blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c1c45115390ffa945df678e8069f4747e8af339b
|
596b6fa0a1c1f30885c5028527c194a571dab16d
|
/R/sample_comp.R
|
9585f429e4ea508d04a6de5d956ea7cbe8bf05c1
|
[] |
no_license
|
realsmak88/ss3sim
|
88dd30b688574cce04c591b41eb8b87eb5f4e972
|
50606b75f9486fab62d12c1b7896b78b4191a29a
|
refs/heads/main
| 2023-06-17T13:12:02.912688
| 2021-07-07T16:03:52
| 2021-07-07T16:03:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,922
|
r
|
sample_comp.R
|
#' Sample composition data from expected values
#'
#' Apply the multinomial or Dirichlet distribution to sample
#' composition data, creating a data frame that mimics
#' observed composition data.
#'
#' @details
#' Sample size, i.e., 'Nsamp', is used as a measure of precision,
#' where higher sample sizes lead to simulated samples that more accurately
#' represent the truth provided in \code{data}.
#'
#' @param data A data frame with informational columns followed by
#' columns of compositional data.
#' The informational columns must include columns labeled
#' 'Yr' and 'FltSvy' and end with a column labeled 'Nsamp'.
#' Columns of compositional data should follow 'Nsamp'.
#' Rows of compositional data do not need to sum to one.
#' @template Nsamp
#' @template lcomp-agecomp-index
#' @template lcomp-agecomp
#' @template sampledots
#' @importFrom magrittr %>%
#'
#' @author Kelli Faye Johnson
#' @return A data frame of observed composition data.
#'
sample_comp <- function(
data,
Nsamp,
fleets,
years,
ESS = NULL,
cpar = 1,
...) {
#### Perform input checks
if (is.null(fleets)) return(data[0, ])
if (is.null(Nsamp)) return(data[0, ])
cpar <- switch(class(cpar),
list = ifelse(mapply(is.null, cpar), NA, unlist(cpar,recursive = FALSE)),
numeric = cpar,
logical = NA,
vector = cpar,
NULL = NA)
# ESS can be (1) user input, (2) NULL -> Nsamp, (3) Dirichlet calculated ESS
useESS <- ifelse(is.null(ESS), FALSE, TRUE)
if (is.null(ESS)) {
ESS <- Nsamp
}
if(useESS) { # in case there are NA values.
ESS <- mapply(function(ess,nsamp) {
if(any(is.na(ess))) {
if(length(ess != nsamp) & length(nsamp == 1)) {
nsamp <- rep(nsamp, length.out = length(ess))
}
new_ess <- mapply(function(e, n) ifelse(is.na(e), n, e),
e = ess, n = nsamp, SIMPLIFY = FALSE)
new_ess <- unlist(new_ess)
} else {
new_ess <- ess
}
new_ess
}, ess = ESS, nsamp = Nsamp, SIMPLIFY = FALSE)
}
# Check for bad inputs
lapply(list(years, fleets, Nsamp, ESS, cpar, ...),
function(x, fleetN = length(fleets)) {
if (!length(x) %in% c(1, fleetN)) stop(call. = FALSE,
"Bad input to ss3sim sampling function.\n",
"There is only ", fleetN, " fleets, yet your input was a ",
class(x), " with a length of ", length(x), ". See below:\n", x)
})
# Repeat short inputs
new <- dplyr::bind_cols(tibble::tibble(FltSvy = fleets), tibble::tibble(Yr = years,
newN = Nsamp, ESS = ESS, cpar = cpar, ...)) %>% dplyr::rowwise() %>%
tidyr::unnest(dplyr::everything()) %>% dplyr::bind_rows()
colnames(new) <- gsub("part", "Part", colnames(new))
colnames(new) <- gsub("seas", "Seas", colnames(new))
# todo: make the rename above more generic
#### Multinomial or DM sampling based on case_when with cpar
# Results are slightly different because of some seed thing with dplyr
# sample_dm or sample_mn will give same values if used in loop
# or force seed in the function
all <- dplyr::inner_join(data, new,
by = na.omit(colnames(new)[match(colnames(data), colnames(new))])) %>%
dplyr::rowwise() %>%
dplyr::mutate(
comp = dplyr::case_when(
is.na(.data[["cpar"]]) ~ list(sample_mn(
data = dplyr::c_across(dplyr::matches("[0-9]+")),
n = .data[["newN"]]
)),
is.numeric(.data[["cpar"]]) ~ list(sample_dm(
data = dplyr::c_across(matches("[0-9]+")),
n = .data[["newN"]], par = .data[["cpar"]]
))
),
ncalc = dplyr::case_when(
is.na(.data[["cpar"]]) ~ .data[["newN"]],
is.numeric(.data[["cpar"]]) ~ .data[["newN"]]/.data[["cpar"]]^2
)
) %>%
dplyr::select(
1:(dplyr::matches("Nsamp") - 1),
Nsamp = .data[[ifelse(useESS, "ESS", "ncalc")]],
.data[["comp"]]) %>% tidyr::unnest_wider(.data[["comp"]], names_sep = "") %>%
`colnames<-`(colnames(data))
return(data.frame(all))
}
|
9a2ec7e77e9f1e0a0a66ca3725e044ebf395333a
|
3255e1bf30d9e05b3ecf38c785d5d4186840db65
|
/run_analysis.R
|
920351a9edacbb1cf14d485569dc06413f84c5e8
|
[] |
no_license
|
jradick/coursera-getdata-013-project
|
2b5cfa46d68e511b4f6bff015bf21fe954d9ebb4
|
ad877ca9b15d35f4f55162e1b757a898dd2607da
|
refs/heads/master
| 2020-06-03T05:13:42.743703
| 2015-04-26T23:16:58
| 2015-04-26T23:16:58
| 34,584,436
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,083
|
r
|
run_analysis.R
|
#
# Coursera course "Getting and Cleaning Data"
# April 2015, session getdata-013
# Part of Data Science Specialization
#
# Jeffrey Radick, student
#
#
# Code for course project
#
# As specified for the assignment, the requirements for this program
# are as follows:
# 1. Merges the training and the test data sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation
# for each measurement
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriate labels the data set with descriptive variable names
# 5. From the data set in step 4, creates a second, independent
# tidy data set with the average of each variable
# for each activity and each subject
#
# The code is structured as follows:
# - There is a set of function definitions,
# each of which performs a single well-defined step.
# These form building blocks for what the script needs to do.
# - At the end, after all the building blocks are defined,
# there is a series of calls to the proper functions in the
# proper order in order to carry out the required action.
# This set of operations is in a single funciton "run_analysis()".
# - At the very end, outside of any of the function definitions,
# the function "run_analysis()" is called so that the required
# operation is performed as the script is read in by R.
#
library(dplyr)
read_activities <- function(dir)
{
fname <- paste(dir, "activity_labels.txt", sep = "/")
df <- read.table(fname, sep = " ", stringsAsFactors = FALSE)
names(df) <- c("levels", "labels")
df
}
read_features <- function(dir)
{
fname <- paste(dir, "features.txt", sep = "/")
df <- read.table(fname, sep = " ", stringsAsFactors = FALSE)
names(df) <- c("num", "names")
df
}
#
# Read the data set files and glue them together into a single data set.
# This does NOT merge the training and test data sets.
# This JUST builds one or the other of the data sets from 3 component files.
#
# The files are called
# subject_<flavor>.txt
# y_<flavor>.txt
# X_<flavor>.txt
# where
# <flavor> is either "test" or "train" as specified by the "flavor"
# argument
#
# Visual inspection of the files indicates that
# - the lines of the 3 files correspond in a direct 1-to-1 fashion
# i.e. line 1 of subject_train.txt goes with
# line 1 of y_train.txt and line 1 of X_train.txt
# - the file subject_<flavor>.txt has the numerical key of the
# subject (person) for which the data was collected
# - the file y_<flavor>.txt has the numerical activity key
# indicating the activity from which the data was collected
# - the file X_<flavor>.txt contains the 561 data points collected
# for the corresponding activity by the corresponding subject.
#
# The aim of this function is
# - read the 3 files into data frames
# - combine them into a single data frame
# - return the combined data frame
#
# Arguments are:
# - dir is the directory containing the all of the data
# (both data sets)
# - flavor is either "train" or "test" to indicate which data set to build
# - features is a character vector containing the feature names,
# to be used as column names for the data points from the
# X_<flavor>.txt part of the data set
#
# The return value is a data frame containing the combined data set.
#
read_data_set <- function(dir, flavor)
{
#
# put together dir and file names with "train" or "test" in the names
#
subdir <- paste(dir, flavor, sep = "/")
subject_fname <- paste("subject_", flavor, ".txt", sep = "")
subject_path <- paste(subdir, subject_fname, sep = "/")
y_fname <- paste("y_", flavor, ".txt", sep = "")
y_path <- paste(subdir, y_fname, sep = "/")
X_fname <- paste("X_", flavor, ".txt", sep = "")
X_path <- paste(subdir, X_fname, sep = "/")
#
# now read the files and label the column headings
# except for the df.X columns, we'll fix those later
#
df.subjects <- read.table(subject_path)
names(df.subjects) <- c("subject")
df.y <- read.table(y_path)
names(df.y) <- c("activity")
df.X <- read.table(X_path)
# now combine everything into a single data frame
df.combined <- data.frame(df.subjects, df.y, df.X)
# here's the result!
df.combined
}
#
# Perform step 1 of the project requirement:
# read and merge the training and test sets into a single data set.
#
read_and_merge_data_sets <- function(dir)
{
df.training <- read_data_set(dir, "train")
df.test <- read_data_set(dir, "test")
# df.training and df.test should have identical column names
df.merged <- merge(df.training, df.test, all = TRUE)
df.merged
}
#
# Step 2 of the project requirement:
# extract the mean and standard deviation measurements
# from the full data set, resulting in a new smaller data set.
#
# This is tricky because we just want certain columns,
# there are a *lot* of columns to choose from,
# and at this point the columns we want have names like V161.
# I don't know of an easy way to automatically extract
# the right feature variables other than by looking,
# so here's what I did.
# After reading features_info.txt for explanations
# and studying features.txt, I realized I could do
# egrep '(mean|std)\(\)' features.txt
# to find all the lines with feature names containing either
# ...mean()...
# or
# ...std()...
# This amounts to 66 feature variables, which
# can be grouped into 2 sets:
# - one set of X, Y, Z coordinate values, and
# - one set that is not broken down into coordinates
#
# The items are as follows.
# Entries in the table are the
# column names in the full merged data set.
#
# Coordinate group, 6 * 8 == 48 variables
#
# feature mean() std()
# variable X Y Z X Y Z
# -------- ---- ---- ---- ---- ---- ----
# tBodyAcc V1 V2 V3 V4 V5 V6
# tGravityAcc V41 V42 V43 V44 V45 V46
# tBodyAccJerk V81 V82 V83 V84 V85 V86
# tBodyGyro V121 V122 V123 V124 V125 V126
# tBodyGyroJerk V161 V162 V163 V164 V165 V166
# fBodyAcc V266 V267 V268 V269 V270 V271
# fBodyAccJerk V345 V346 V347 V348 V349 V350
# fBodyGyro V424 V425 V426 V427 V428 V429
#
# Non-coordinate group, 2 * 9 == 18 variables
#
# feature mean() std()
# ------- -------- ------
# tBodyAccMag V201 V202
# tGravityAccMag V214 V215
# tBodyAccJerkMag V227 V228
# tBodyGyroMag V240 V241
# tBodyGyroJerkMag V253 V254
# fBodyAccMag V503 V504
# fBodyAccJerkMag V516 V517
# fBodyGyroMag V529 V530
# fBodyGyroJerkMag V542 V543
#
# Because I can't think of an automatic way to extract this mapping
# from the features.txt file and construct usable variable names
# from them in the output data frame (at least, not something I
# could code up in a weekend), I'm doing it by hand.
# That is, I constructed the table in the comments above by hand,
# and created the corresponding code by hand.
# The way I coded it is to create 2 mapping tables.
# Thus to extract the values I want,
# I just cycle through the tables.
# There ought to be a better way.
#
extract_subset <- function(df.full)
{
df.subset <- select(df.full,
subject, activity,
V1:V6,
V41:V46,
V81:V86,
V121:V126,
V161:V166,
V266:V271,
V345:V350,
V424:V429,
V201:V202,
V214:V215,
V227:V228,
V240:V241,
V253:V254,
V503:V504,
V516:V517,
V529:V530,
V542:V543)
df.subset
}
#
# Step 3: apply descriptive activity names
# What this does is to change the "activity" column to a factor variable,
# using the level names from the activity_labels.txt file.
#
# While we're at it, make the subject variable into a factor also.
# Not explicitly part of what's required in this step,
# but useful later when summarizing.
#
descriptive_activities <- function(df.in)
{
df.activities <- read_activities(dir)
df.out <- df.in
df.out$activity <- factor(df.out$activity,
levels = df.activities$levels,
labels = df.activities$labels)
df.out$subject <- factor(df.out$subject)
df.out
}
#
# Step 4: apply descriptive labels
# As in step 2, this ought to be automated in some nice way
# but the mapping is a manual renaming of the 66 feature variables
# from the names from the original data frame (Vnnn)
# to a more descriptive name.
# The descriptive name is derived from the name in features.txt
# but is not the same, so as to avoid syntax issues
# relating to minus signs and parentheses in the original names.
#
descriptive_labeling <- function(df.in)
{
df.out <- rename(df.in,
tBodyAcc_mean_X = V1,
tBodyAcc_mean_Y = V2,
tBodyAcc_mean_Z = V3,
tBodyAcc_std_X = V4,
tBodyAcc_std_Y = V5,
tBodyAcc_std_Z = V6,
tGravityAcc_mean_X = V41,
tGravityAcc_mean_Y = V42,
tGravityAcc_mean_Z = V43,
tGravityAcc_std_X = V44,
tGravityAcc_std_Y = V45,
tGravityAcc_std_Z = V46,
tBodyAccJerk_mean_X = V81,
tBodyAccJerk_mean_Y = V82,
tBodyAccJerk_mean_Z = V83,
tBodyAccJerk_std_X = V84,
tBodyAccJerk_std_Y = V85,
tBodyAccJerk_std_Z = V86,
tBodyGyro_mean_X = V121,
tBodyGyro_mean_Y = V122,
tBodyGyro_mean_Z = V123,
tBodyGyro_std_X = V124,
tBodyGyro_std_Y = V125,
tBodyGyro_std_Z = V126,
tBodyGyroJerk_mean_X = V161,
tBodyGyroJerk_mean_Y = V162,
tBodyGyroJerk_mean_Z = V163,
tBodyGyroJerk_std_X = V164,
tBodyGyroJerk_std_Y = V165,
tBodyGyroJerk_std_Z = V166,
fBodyAcc_mean_X = V266,
fBodyAcc_mean_Y = V267,
fBodyAcc_mean_Z = V268,
fBodyAcc_std_X = V269,
fBodyAcc_std_Y = V270,
fBodyAcc_std_Z = V271,
fBodyAccJerk_mean_X = V345,
fBodyAccJerk_mean_Y = V346,
fBodyAccJerk_mean_Z = V347,
fBodyAccJerk_std_X = V348,
fBodyAccJerk_std_Y = V349,
fBodyAccJerk_std_Z = V350,
fBodyGyro_mean_X = V424,
fBodyGyro_mean_Y = V425,
fBodyGyro_mean_Z = V426,
fBodyGyro_std_X = V427,
fBodyGyro_std_Y = V428,
fBodyGyro_std_Z = V429,
tBodyAccMag_mean = V201,
tBodyAccMag_std = V202,
tGravityAccMag_mean = V214,
tGravityAccMag_std = V215,
tBodyAccJerkMag_mean = V227,
tBodyAccJerkMag_std = V228,
tBodyGyroMag_mean = V240,
tBodyGyroMag_std = V241,
tBodyGyroJerkMag_mean = V253,
tBodyGyroJerkMag_std = V254,
fBodyAccMag_mean = V503,
fBodyAccMag_std = V504,
fBodyAccJerkMag_mean = V516,
fBodyAccJerkMag_std = V517,
fBodyGyroMag_mean = V529,
fBodyGyroMag_std = V530,
fBodyGyroJerkMag_mean = V542,
fBodyGyroJerkMag_std = V543)
df.out
}
#
# Step 5: reduce the data to a nice tidy summary
#
make_tidy <- function(df.in)
{
df.split <- split()
df.out = ftable(xt)
df.out
}
#
# The principal driver function,
# organized into steps explicitly associated
# with the stated requirements
#
run_analysis <- function(dir)
{
#
# step 0: read activity and feature names
# this is useful for later in steps 1, 3, and 4
#
df.features <- read_features(dir)
#
# step 1: read and merge the training and test data sets
#
df.merged <- read_and_merge_data_sets(dir)
# step 2: extract mean and std. dev. measurements
df.extract <- extract_subset(df.merged)
# step 3: apply descriptive names for the activities
df.descriptive1 <- descriptive_activities(df.extract)
# step 4: apply descriptive labels to the data set variables
df.descriptive2 <- descriptive_labeling(df.descriptive1)
# step 5: create new tidy data w/ averages for each activity, subject
df.tidy <- make_tidy(df.descriptive2)
# finally, write the tidy data to a file
write.table(df.tidy, "tidy.data.txt", row.names = FALSE)
}
#
# Kick everything off as the script is read in
#
dir <- "./data/UCI HAR Dataset"
#run_analysis(dir)
|
ba531482325f757ec8c8b24ace743c032033d307
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/BETS/examples/report.Rd.R
|
19481757f0ae0b5da3ebd860a08420200e52c639
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,450
|
r
|
report.Rd.R
|
library(BETS)
### Name: report
### Title: Create dynamic reports with a full analysis of a set of time
### series
### Aliases: report
### ** Examples
##-- SARIMA
# parameters = list(lag.max = 48, n.ahead = 12 )
# report(ts = 21864, parameters = parameters)
# report(ts = 4447, series.saveas = "csv")
# series = list(BETSget(4447), BETSget(21864))
# parameters = list(lag.max = 20, n.ahead = 15 )
# report(ts = series, parameters = parameters)
# series = list(4447, 21864)
# report(ts = series, parameters = parameters)
# parameters = list(
# cf.lags = 25,
# n.ahead = 15,
# dummy = dum,
# arch.test = list(lags = 12, alpha = 0.01),
# box.test = list(type = "Box-Pierce")
# )
# report(ts = window(BETSget(21864), start= c(2002,1) , end = c(2015,10)),
#parameters = parameters)
# dum <- dummy(start= c(2002,1) , end = c(2017,1) ,
#from = c(2008,9) , to = c(2008,11))
# parameters = list(
# cf.lags = 25,
# n.ahead = 15,
# dummy = dum
# )
# report(ts = window(BETSget(21864), start= c(2002,1) , end = c(2015,10)),
#parameters = parameters)
##-- GRNN
# params = list(regs = 4382)
# report(mode = "GRNN", ts = 13522, parameters = params)
##-- HOLT-WINTERS
# params = list(alpha = 0.5, gamma = TRUE)
# report(mode = "HOLT-WINTERS", ts = 21864, series.saveas = "csv", parameters = params)
# params = list(gamma = T, beta = TRUE)
# report(mode = "HOLT-WINTERS", ts = 21864, series.saveas = "csv", parameters = params)
|
1cca3934057856b7adbd99db2d2a8c9c846db8f7
|
e8d4d0a7f118e0ea854a9e2908797140d1e1106b
|
/plot4.R
|
84e8f21838ceaeca1f2ca22d784f52942e05044e
|
[] |
no_license
|
sammnoh/ExData_Plotting1
|
a5397f1483e28c1f42eb7f7a1c78fdcc7b8621c5
|
bbd8d8593ded2e8b8afa6ae94c4c4d279d8d0c15
|
refs/heads/master
| 2021-01-21T02:03:11.607987
| 2015-10-11T23:34:03
| 2015-10-11T23:34:03
| 42,377,367
| 0
| 0
| null | 2015-09-12T23:51:52
| 2015-09-12T23:51:51
| null |
UTF-8
|
R
| false
| false
| 1,628
|
r
|
plot4.R
|
library(datasets)
URL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(URL, destfile="./data4-1.zip", mode = "wb" )
unzip("./data4-1.zip", exdir= "data4-1" )
list.files("data4-1")
full <- read.table("./data4-1/household_power_consumption.txt", header = TRUE, sep = ";")
full$Date <- as.Date(full$Date, format = "%d/%m/%Y")
full$Global_active_power <- as.numeric(as.character(full$Global_active_power))
full["DateTime"] <- NA
#Combine Date and Time column via paste()
full$DateTime <- paste(full$Date,full$Time)
#subset dataframe to only include select dates
work <- subset(full, Date == "2007-02-02" | Date == "2007-02-01")
#change DateTime from character to factor. Characters cannot be plotted
work$DateTime <- as.factor(work$DateTime)
#change all sub_meterings to numeric
work$Sub_metering_1 <- as.numeric(as.character(work$Sub_metering_1))
work$Sub_metering_2 <- as.numeric(as.character(work$Sub_metering_2))
work$Sub_metering_3 <- as.numeric(as.character(work$Sub_metering_3))
par(mfrow = c(2,2))
plot(work$DateTime, work$Global_active_power, ylab="Global Active Power (kilowatts)")
plot(work$DateTime, work$Voltage, ylab="Voltage")
plot(work$DateTime, work$Sub_metering_1, type ="l")
lines(work$DateTime, work$Sub_metering_2, type="l", col = "red")
lines(work$DateTime, work$Sub_metering_3, type="l", col = "blue")
legend("topright", pch = 1, col=c("black", "blue", "red"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(work$DateTime, work$Global_reactive_power, ylab="Global Reactive Power")
dev.copy(png, file="plot4.png")
dev.off()
|
241a6b7867e596826a2ad6e875044eea8dc33e01
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/cricketr/R/batsman4s6s.R
|
66dbe20d1dd69b05856e04be5dfbcdb2bf0f81aa
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,670
|
r
|
batsman4s6s.R
|
##########################################################################################
# Designed and developed by Tinniam V Ganesh
# Date : 2 Aug 2015
# Function: batsman4s6s
# This function computes and plots the percent of 4s,6s in total runs
#
###########################################################################################
batsman4s6s <- function(frames, names) {
#col1 = c("blue","orange","red")
batsman4s6s <- NULL
for(i in 1:length(frames))
{
# Clean batsman data
batsman <- clean(frames[[i]])
#batsman <- clean(file)
Runs <- sum(batsman$Runs)
batsman4s <- sum(batsman$X4s * 4)
batsman6s <- sum(batsman$X6s * 6)
#name <- names[[i]]
a <- c(Runs,batsman4s,batsman6s)
batsman4s6s <- cbind(batsman4s6s,a)
}
rownames(batsman4s6s) <- c('Runs(1s,2s,3s)',"4s","6s")
colnames(batsman4s6s) <- names
# Calculate the percentages and create a table
prop <- prop.table(batsman4s6s,margin=2) *100
par(mar=c(4,4,2,14),xpd=TRUE)
# Create a stacked bar plot
barplot(prop, col=heat.colors(length(rownames(prop))), width=2,
ylab="% of total runs",main="Runs(1s,2s,3s), 4s, 6s as % of Total Runs")
legend("topright",inset=c(-0.40,0), fill=heat.colors(length(rownames(prop))),
legend=rownames(prop),cex=0.8)
mtext("Data source-Courtesy:ESPN Cricinfo", side=1, line=3, adj=1.0, cex=0.7, col="blue")
round(prop,2)
}
|
a65eaea3c29c6c60860f82b9db9a802435fbdea8
|
36e74e19edbb1edcf45af968fb8dc57353532507
|
/schollbucks.R
|
b44fd17d30c0065ee9fb3aa21bff58e49212d509
|
[] |
no_license
|
LazerLambda/Schollbucks
|
688d630935b5e241939fb38dcabe047b44e7a63d
|
734e8760f4a659212f3c0201c8865bee693e6ab7
|
refs/heads/master
| 2023-01-24T12:51:32.828478
| 2020-12-03T10:20:54
| 2020-12-03T10:20:54
| 293,352,526
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,386
|
r
|
schollbucks.R
|
library(checkmate)
library(data.table)
library(dplyr)
library(ggplot2)
library(gridExtra)
library(stringr)
library(telegram.bot)
readNames <- function() {
count <- NA
name <- NA
repeat{
#check condition
if (!is.na(count)) {
break
}
# get input
input <- readline(prompt=writeLines(c("<Name>-<Anzahl>", "Type \":\" to quit insert-mode!")))
parsed <- stringr::str_match(input, "(.+)-(\\d+)")
name <- parsed[[2]]
count <- as.numeric(parsed[[3]])
# ask wether user wants to abort
if (is.na(count)) {
if (readline(prompt = writeLines(c("Input mode stopped! Abort? [Y/N]"))) %in% c("Y", "y")) {
break
} else {
next
}
}
}
return(
list(name = name,
count = count)
)
}
construct_table <- function() {
data <- data.table()
repeat {
new_row <- readNames()
if (is.na(new_row$name)) {
break
} else {
data <- rbind(data, new_row)
}
}
return(data)
}
calculate_prices <- function(dt, cost = 0.2) {
assertDataTable(dt)
assertTRUE(dim(dt)[[1]] > 0)
return(
dt %>% mutate(costs = cost * count)
)
}
write_to_file <- function(dt, path) {
assertDataTable(dt)
ifelse(!dir.exists(path), dir.create(path), FALSE)
fwrite(dt, paste(file.path(path, "Schollbucks"), Sys.Date(), sep = "_"))
}
write_to_PDF <- function(dt, path) {
assertDataTable(dt)
ifelse(!dir.exists(path), dir.create(path), FALSE)
file <- file.path(path, paste0("Rechnung_", Sys.Date(), ".pdf"))
pdf(file)
plot(0:10, type = "n", xaxt="n", yaxt="n", bty="n", xlab = "", ylab = "")
text(3, 10, paste("Rechnung", Sys.Date()))
grid.table(dt)
dev.off()
return(file)
}
plot_stat <- function(dt, path) {
assertDataTable(dt)
file <- file.path(path, paste0("stat_", Sys.Date(), ".png"))
coffee <- dt
coffee <- coffee[order(coffee$count, decreasing = T), ]
coffee$fac <- factor(coffee$name, levels = coffee$name[order(coffee$count, decreasing = T)])
ggplot(coffee, aes(x = fac, y = count)) +
geom_bar(stat = "identity", fill = "#8b6245") +
ggtitle(paste("Schollbucks Coffee", Sys.Date())) +
ylab("Anzahl Kaffees") +
xlab("Krasse Leute") +
theme_dark() +
theme(plot.title = element_text(size = 20, face = "bold")) +
ggsave(file)
return(file)
}
run_bot <- function(dt, path) {
assertDataTable(dt)
renv <- fread(".renv")
TOKEN <- renv[Variables == "TOKEN",]$Assigned
DEBUG <- as.logical(renv[Variables == "DEBUG",]$Assigned)
bot <- Bot(token = TOKEN)
updates <- bot$getUpdates()
print(updates)
if(length(updates) == 0){
stop("ERROR: No groups can be found")
}
chat_id <- updates[[1L]]$from_chat_id()
bot$sendMessage(chat_id,
text = paste("Kaffeekosten, Stand", Sys.Date()),
parse_mode = "Markdown"
)
bot$sendDocument(chat_id,
document = write_to_PDF(dt, path)
)
bot$sendPhoto(chat_id,
photo = plot_stat(dt, path))
print("sent")
}
run_Script <- function(
costs = 0.2,
bot_send = F,
path = "~/.Schollbucks") {
repeat{
table <- construct_table()
table <- calculate_prices(dt = table, cost = 0.2)
print(table)
if (readline(prompt="Is that correct? [Y/N]") %in% c("y", "Y")) {
break
}
}
write_to_file(table, path)
if (bot_send) {
run_bot(table, path)
}
}
|
f3bb349c904205b232e5bd3f5f4ceb5382d1b108
|
7036b9ff62f3a30dd79558df2435d3b4c53f6fd0
|
/man/game_file.Rd
|
b900da3a7fedf29a4b6758b23e83f2b5cf216b84
|
[] |
no_license
|
jalapic/quacklr
|
906ef35729c47747481bcbccd727787f77d752f4
|
2ca89141f54fc8c28dd4899e97b9dea194c21e46
|
refs/heads/master
| 2021-09-09T19:10:38.163750
| 2018-03-19T04:27:23
| 2018-03-19T04:27:23
| 125,799,655
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 659
|
rd
|
game_file.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/game_file.R
\name{game_file}
\alias{game_file}
\title{Get raw game text data from a quackle or cross-tables game number or link or filepath.}
\usage{
game_file(v)
}
\arguments{
\item{x}{a quackle or cross-tables game number or link, or file path}
}
\value{
data.frame game summary
}
\description{
Get raw game text data from a quackle or cross-tables game number or link or filepath.
}
\examples{
game_file(28283)
game_file("http://www.cross-tables.com/annotated/selfgcg/281/anno28152.gcg")
game_file("http://www.cross-tables.com/annotated.php?u=28190#0#")
}
|
957d62772c70c9d523ebe4dceaba730482d4d748
|
2aa3b7455f3e17c8dbdc938f3386dae46a12bb99
|
/man/gs_auth_revoke.Rd
|
0f7efcdb2f84d44576f56fb350806e186206fe0c
|
[
"MIT"
] |
permissive
|
dennistseng/googlesheets
|
5fd4d78994251df7703754375d5aab1a21686a7a
|
8471679c621c8b5d43679cf4b9598cba2f6a3560
|
refs/heads/master
| 2021-01-18T02:17:34.149067
| 2015-06-02T06:36:06
| 2015-06-02T06:36:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 361
|
rd
|
gs_auth_revoke.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/gs_auth.R
\name{gs_auth_revoke}
\alias{gs_auth_revoke}
\title{Revoke authentication}
\usage{
gs_auth_revoke(rm_httr_oauth = FALSE, verbose = TRUE)
}
\description{
This unexported function exists so we can revoke all authentication for
testing purposes.
}
\keyword{internal}
|
68d30c64e8df1fda6c5a480689023901b5c1e960
|
5cfcd8980a97d63c97eb2bba6b662e8278c901e9
|
/test_simon/test_kai/inst_test/server.R
|
2d7a789a2dc9671b4b654a5e5a937850e8ea3dfb
|
[] |
no_license
|
lubrunn/DSP_App
|
e040de46407515c4cdfad50c9706d405b111a0d6
|
e477454e59d90749d3af203ba4b1a3ddb7e7946e
|
refs/heads/main
| 2023-03-21T13:02:01.383511
| 2021-03-17T23:50:35
| 2021-03-17T23:50:35
| 348,858,603
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,378
|
r
|
server.R
|
server <- function(input, output, session) {
############################################################# Stocks
# load stock dataset
stockdata_DE <- reactive({
req(input$Stock)
stock_dataset_DE(input$Stock,input$dates[1],input$dates[2])
})
# reset button for stock selection
observeEvent(input$reset,{
updateSelectizeInput(session,"Stock",selected = "")
})
# plot of the stocks
output$plot_DE <- renderPlot({
req(input$Stock)
if (!is.null(ranges$x)) {
ranges$x <- as.Date(ranges$x, origin = "1970-01-01")
}
ggplot(stockdata_DE(),aes(Date,Close.,color = name))+
geom_line()+
theme_classic()+
coord_cartesian(xlim = ranges$x, ylim = ranges$y, expand = FALSE)
})
# hover info box
output$hover_info_DE <- renderUI({
req(input$hovering)
create_hover_info_DE(input$plot_hover_DE,stockdata_DE())
})
# zoom functionality
ranges <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$plot1_dblclick, {
brush <- input$plot1_brush
if (!is.null(brush)) {
ranges$x <- c(brush$xmin, brush$xmax)
ranges$y <- c(brush$ymin, brush$ymax)
} else {
ranges$x <- NULL
ranges$y <- NULL
}
})
#####################################################################
##################################################################### Corona
corona_data <- reactive({
CORONA(input$CoronaCountry,input$dates_corona[1],input$dates_corona[2])
})
output$corona_plot <- renderPlot({
if (!is.null(ranges2$x)) {
ranges2$x <- as.Date(ranges2$x, origin = "1970-01-01")
}
ggplot(corona_data(), aes_string("date",input$corona_measurement,color = "location"))+
geom_line() +
theme_classic() +
coord_cartesian(xlim = ranges2$x, ylim = ranges2$y, expand = FALSE)
})
# hover info box
output$hover_info_corona <- renderUI({
req(input$hovering_corona)
create_hover_info_corona(input$plot_hover_corona, corona_data(),input$corona_measurement)
})
# zoom functionality
ranges2 <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$plot_corona_dblclick, {
brush <- input$plot_corona_brush
if (!is.null(brush)) {
ranges2$x <- c(brush$xmin, brush$xmax)
ranges2$y <- c(brush$ymin, brush$ymax)
} else {
ranges2$x <- NULL
ranges2$y <- NULL
}
})
}
|
12a3e4f8e60e168134fac16e5bf1f2f8948c434a
|
9b63de8e769a0cfaf0e6345687a3c814ebcbccfd
|
/translation/utils.R
|
9fc55fa72aea8ab9f56c3735e097d159aadf5d0b
|
[] |
no_license
|
PFA-WebApp/App
|
928b3721cd3ecda1d583335950c93f79ba5f7e38
|
219f748acb4508075a0cdfce3e95c360acb6146d
|
refs/heads/master
| 2023-04-22T19:56:24.761176
| 2021-04-30T07:52:01
| 2021-04-30T07:52:01
| 313,712,163
| 2
| 0
| null | 2021-02-10T17:54:29
| 2020-11-17T18:47:40
|
R
|
UTF-8
|
R
| false
| false
| 1,023
|
r
|
utils.R
|
sort_translation <- function(path = "translation/translation.json") {
x <- jsonlite::read_json(path)
x <- purrr::map(x, function(el) {
el[order(names(el))]
})
check_names(x)
jsonlite::write_json(x, path, auto_unbox = TRUE, pretty = TRUE)
}
check_names <- function(x) {
nms <- NULL
for (i in seq_along(x)) {
el <- x[[i]]
if (is.null(nms)) {
nms <- names(el)
} else {
if (!identical(nms, names(el))) {
missing_old <- setdiff(names(el), nms)
missing_new <- setdiff(nms, names(el))
warn_old <- if (length(missing_old)) {
paste0(
"Names in ", names(x)[i], " but not in ",
names(x)[1], ": ", paste(missing_old, collapse = ", ")
)
}
warn_new <- if (length(missing_new)) {
paste0(
"Names in ", names(x)[1], " but not in ",
names(x)[i], ": ", paste(missing_new, collapse = ", ")
)
}
warning(warn_old, "\n", warn_new)
}
}
}
}
|
6a5fe8ea4b94bcbe5220ec76031a3ec12aba49fb
|
7d0f25a190e97e0d2714c43a57a017eaf093ecd5
|
/man/computeBaseNetBenefit.Rd
|
848415cf1452c1950aa1974f08207c25fb0556e1
|
[
"MIT"
] |
permissive
|
mikeniemant/nbs
|
f192f6d6ce16d725dc0985d6f505666c2c785020
|
3d82fd553c58ea5941d5dfa60cfaefa95df76121
|
refs/heads/master
| 2022-07-03T11:21:21.170515
| 2022-06-15T08:51:37
| 2022-06-15T08:51:37
| 142,566,362
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 568
|
rd
|
computeBaseNetBenefit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/computeBaseNetBenefit.R
\name{computeBaseNetBenefit}
\alias{computeBaseNetBenefit}
\title{Compute base net benefit}
\usage{
computeBaseNetBenefit(
y,
xstart = 0.01,
xstop = 0.99,
step = 0.01,
type = "treated"
)
}
\arguments{
\item{y}{outcome as integer}
\item{xstart}{start threshold}
\item{xstop}{stop threshold}
\item{step}{step threshold}
\item{type}{type of net benefit}
}
\value{
A data frame with the computed base net benefit
}
\description{
Compute base net benefit
}
|
d0459f49eff4f8ea19b677fc3deff334a617fc79
|
3faad8949a4e9712e242f9b978eb5ad25006af98
|
/climateR/man/createBarChartYearsLeft.Rd
|
cb2fc5b9413d9a19427520c1d41f8ea327f063b8
|
[] |
no_license
|
danielwiegand/climate-justice
|
bdeb6cb9ee64a8fa134db65b689501faac94f39a
|
ea21b5d742393c90447c676450d6db7ff3c5db16
|
refs/heads/master
| 2021-06-23T03:50:32.021557
| 2021-05-29T15:03:35
| 2021-05-29T15:03:35
| 220,316,020
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 519
|
rd
|
createBarChartYearsLeft.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createBarChartYearsLeft.R
\name{createBarChartYearsLeft}
\alias{createBarChartYearsLeft}
\title{Bar chart: Years left per country}
\usage{
createBarChartYearsLeft(data, base_year, theme, cols)
}
\arguments{
\item{data}{Data frame with data to plot}
\item{base_year}{Numeric: Which base year to select?}
\item{theme}{Some ggplot theme adjustments}
\item{cols}{The colors for the points}
}
\description{
Bar chart: Years left per country
}
|
ba215d0a0b93c559b0f8b0111fe284f7035e2981
|
a7c4723087d16f75add07061726058060008c74e
|
/pattern.r
|
b66e4cdd84ef3f4c1864463237fd6891d60db9fd
|
[] |
no_license
|
c-zhong/TriageBehaviorGraphAnalysis
|
29c37aff35af2b9518c53e384c0c1188d58f6f4f
|
25a492e5ab6253e1559946835f4a9f03255ba60c
|
refs/heads/master
| 2021-05-28T21:01:06.651241
| 2015-02-25T19:16:21
| 2015-02-25T19:16:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,057
|
r
|
pattern.r
|
#define patterns
#get.subisomorphism is not related to $label $name
getPattern_Condition=function(pattern_id, hypo, nfound, edgetype){
#definition2
if(identical(pattern_id,"1")){
#pattern1: issub issub
pattern <-graph.formula( "B"--+"A")
if(hypo)
V(pattern)$Hypo=c("1","0")
if(nfound)
V(pattern)$NFound=c("0","0")
if(edgetype)
E(pattern)$type=c(2)
return(pattern)
}
#
else if(identical(pattern_id, "2")){
#node with hypo followed with exact exclude
#based on V$Hypo E$weight
patterns= list()
pattern1 <-graph.formula( "B"--+"A")
pattern2 <-graph.formula( "B"--+"A")
if(hypo){
V(pattern1)$Hypo=c("1","0")
V(pattern2)$Hypo= c("1","1")
}
if(edgetype)
E(pattern)$weight = c(7)
patterns[[1]] = pattern1
patterns[[2]] = pattern2
return(patterns)
}
else if(identical(pattern_id, "3")){
#(nhypo,---link---, hypo )
patterns= list()
pattern1 <-graph.formula( "B"--+"A")
pattern2 <-graph.formula( "B"--+"A")
if(hypo){
V(pattern1)$Hypo=c("1","0")
V(pattern1)$color=c("red","blue")
V(pattern1)$shape = c("circle","circle")
V(pattern2)$Hypo= c("1","1")
}
if(edgetype)
E(pattern1)$weight = c(4)
E(pattern1)$color=c("red")
patterns[[1]] = pattern1
patterns[[2]] = pattern2
return(patterns)
}
}
#Based on definition01
getPattern_Definition01=function(pattern_id){
p = getPattern_Condition(pattern_id, FALSE, FALSE, TRUE)
return (p)
}
#Based on definition2
getPattern_Definition2=function(pattern_id){
p = getPattern_Condition(pattern_id, TRUE, FALSE, TRUE)
return (p)
}
#Based on definition3
getPattern_Definition3=function(pattern_id){
p = getPattern_Condition(pattern_id, FALSE, TRUE, TRUE)
return (p)
}
#Based on definition4
getPattern_Definition4=function(pattern_id){
p = getPattern_Condition(pattern_id, TRUE, TRUE, TRUE)
return (p)
}
|
347ec16c560691e6a4927599c4cdc3a425f8db38
|
a443c2760bdf5064189fff633cf4752367b88f4e
|
/plot1.R
|
80527b7571ab236204df69313c222d0ce3afed4e
|
[] |
no_license
|
nsrepos/ExData_Plotting1
|
ba9e6f91a24a97eabc573a535082a8568fa54623
|
7e4e6e3d94e6f02eb63a24c14e28f36877a92429
|
refs/heads/master
| 2020-12-26T20:15:13.606361
| 2015-04-08T15:23:38
| 2015-04-08T15:23:38
| 33,613,199
| 0
| 0
| null | 2015-04-08T14:50:09
| 2015-04-08T14:50:08
| null |
UTF-8
|
R
| false
| false
| 473
|
r
|
plot1.R
|
plot1<-function(){
library(data.table)
hpc<-fread("household_power_consumption.txt",header=T,sep=";",colClasses = "character")
hpc<-as.data.frame(hpc)
hpc.sub<-subset(hpc,hpc$Date=="1/2/2007" | hpc$Date=="2/2/2007" ,select=c(Global_active_power))
png(filename = "plot1.png",
width = 480, height = 480, units = "px")
hist(as.numeric(hpc.sub$Global_active_power),main="Global Active Power", xlab="Global Active Power (kilowatts)", col="red")
dev.off()
}
|
62ce81f556ff42322db6c84a0e2b28c81d4a9111
|
bb9140e05d2b493422d65084bc9df4fb6ae88ba9
|
/R/R_language_definition/functions/missing_arguments.R
|
a23c7d41d8e3c6c8f703833d27578990073628e6
|
[] |
no_license
|
8589/codes
|
080e40d6ac6e9043e53ea3ce1f6ce7dc86bb767f
|
fd879e36b6d10e5688cc855cd631bd82cbdf6cac
|
refs/heads/master
| 2022-01-07T02:31:11.599448
| 2018-11-05T23:12:41
| 2018-11-05T23:12:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 343
|
r
|
missing_arguments.R
|
cylinder.volume.2 = function(height, radius) {
if (missing(height))
stop("Need to specify height of cylinder for calculations.")
if (missing(radius))
stop("Need to specify radius of cylinder for calculations.")
volume = pi * radius * radius * height
volume
}
# cylinder.volume.2(3)
cylinder.volume.2(3,4)
|
73a393210155862c07bfee4e342874df51fe4f95
|
beb59ae4fb30f320baa5ce1b31ae539868d640bd
|
/R/00jmv.R
|
365ccb66631cfa70e00e949484722b7d170e2e11
|
[] |
no_license
|
jamovi/walrus
|
efae8d4b09dc19c0229459521926388977775ed6
|
ef1a8baea2eb28d7d058ed37689fd25071be7f6a
|
refs/heads/master
| 2023-07-06T18:47:40.499457
| 2023-07-03T07:25:42
| 2023-07-03T07:25:42
| 92,730,294
| 4
| 3
| null | 2023-07-03T07:25:44
| 2017-05-29T10:38:57
|
R
|
UTF-8
|
R
| false
| false
| 932
|
r
|
00jmv.R
|
# This file is automatically generated, you probably don't want to edit this
.jmvrefs <- list(
`w`=list(
`authors`="Love, J., & Mair, P.",
`year`=2017,
`title`="Walrus: Robust Statistical Methods",
`publisher`="[jamovi module]",
`url`="https://github.com/jamovi/walrus/"),
`wrs2`=list(
`authors`="Mair, P., & Wilcox, R.",
`year`=2017,
`title`="WRS2: A Collection of Robust Statistical Methods",
`publisher`="[R package]. Retrieved from https://cran.r-project.org/package=WRS2",
`url`="https://cran.r-project.org/package=WRS2"),
`wilcox`=list(
`authors`="Wilcox, R. R.",
`year`=2011,
`title`="Introduction to robust estimation and hypothesis testing",
`publisher`="Academic press",
`url`="https://www.amazon.com/Introduction-Estimation-Hypothesis-Statistical-Modeling/dp/012804733X"))
|
e96ef75bb467b71a249b6a89a7ecd85ae89b03aa
|
94d0c518d47965692f57c4e1c8ee94d81301444c
|
/man/fuzzy_join.Rd
|
80c8522314016983565ea0f6e714d1a19f58bf86
|
[] |
no_license
|
thelayc/laycUtils
|
5179c2c3448d8186e5a973a5a9dd072492db2d3a
|
06d16592631f22eea242cf9a46c8734422cdfa63
|
refs/heads/master
| 2021-01-18T21:29:11.119308
| 2016-04-19T20:15:06
| 2016-04-19T20:15:06
| 29,616,776
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 529
|
rd
|
fuzzy_join.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fuzzy_join.R
\name{fuzzy_join}
\alias{fuzzy_join}
\title{fuzzy_join()}
\usage{
fuzzy_join(x, y, by)
}
\arguments{
\item{x, y}{dataframes to merge}
\item{by}{a character variable to join by. This variable is generally the output of the create_id() function}
}
\value{
dataframe
}
\description{
Merge 2 dataframes with inconsistent ID variables (due to typos, for instance).
}
\examples{
fuzzy_join(x = dataframe1, y = dataframe2, by = 'custom_id')
}
|
28d1271bce9fbfddb84b420f29261d4b9f47a2bc
|
e90d32ae525367861f469481f94df3c3118b1c6d
|
/animalShelterScript.R
|
9e831dac2cb83430a9f3891eb1e1628a2fbd1508
|
[] |
no_license
|
HenryDMcGee/Data-Science-What-Predictors-Have-The-Greatest-Effect-On-Dog-Adoption-Rates-
|
177598dc328fe8c454cdff69295c784015625032
|
e976ceec7e6bd15d2dcc07140907b237b02141f0
|
refs/heads/master
| 2020-09-27T05:26:44.784079
| 2019-12-17T00:43:04
| 2019-12-17T00:43:04
| 226,440,968
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,917
|
r
|
animalShelterScript.R
|
# Henry McGee
# 10/26/2019
# Animal Shelter
#tricks
#############
# class("MonthYear") check parameter type
#write.csv(dataset, "filename.csv", row.names = FALSE) make csv file in documents for windows
#############
#read the csv
#############
animalShelter <- read.csv("C://Users//Henry//Desktop//School//Dataanalysis4300//AnimalShelter//Austin_Animal_Center_Outcomes.csv", fill=TRUE, header =TRUE, sep=",")
#View(animalShelter)
names(animalShelter)
summary(animalShelter)
#############
#dogs only contains dogs
dogs <- subset(animalShelter, AnimalType == "Dog")
animalShelter <- subset(animalShelter, AnimalType == "Dog")
animalShelter <- animalShelter[!duplicated(animalShelter[1]),]
names(dogs)
summary(dogs)
#adoptedDogs contains dogs who were adopted
adoptedDogs <- subset(dogs, OutcomeType == "Adoption")
names(adoptedDogs)
summary(adoptedDogs)
#eliminate subtypes so that only adopted dogs remain
adoptedDogs <- subset(adoptedDogs, OutcomeSubtype == "")
names(adoptedDogs)
summary(adoptedDogs)
#for arrival date
arrivalDate <- adoptedDogs[,3]
summary(arrivalDate)
#this section limits the year or date range of animals in the sample
#######################
#convert dates and time to date object
adoptedDogs[,3] <- as.Date(arrivalDate, format= "%m/%d/%Y %H:%M")
#limits to 2016 from new years to next new years
adoptedDogs <- subset(adoptedDogs, adoptedDogs[,3] > "2013/12/31 24:00" & adoptedDogs[,3] < "2019/12/31/24:00")
#######################
summary(adoptedDogs)
#working on, limit to 100 in a breed and exclude those with less than 100
#######################
library(plyr)
breedList <- count(adoptedDogs, "Breed")
summary(breedList)
names(breedList)
summary(breedList[,2])
#gives only breeds with 100 or more dogs per a breed
breedList <- subset(breedList, breedList[,2] >= 100)
summary(breedList)
names(breedList)
write.csv(breedList, "breedList.csv", row.names = TRUE)
#creating csv of Breed-freq, AgeUponOutcome-freq, SexUponOutcome-freq, Color-freq
##########
#first find top 24 breeds
#so order by max frequency, currently using breeds with > 200
breedListTop24 <- head(breedList[order(breedList$freq, decreasing= T),], 24)
summary(breedListTop24)
Breed <- breedListTop24$Breed
Freq <- breedListTop24$freq
write.csv(breedListTop24, "Top24Breeds.csv", row.names = TRUE)
#find frequency of arrival dates
#as.Date(adoptedDogs[,3], format= "%m/%d/%Y")
#breedListArrivalDate <- count(adoptedDogs, ArrivalDate)
#summary(breedListArrivalDate)
#write.csv(breedListArrivalDate, "BreedListArrivalDate.csv", row.names = TRUE)
#histogram
####################
summary(Breed)
#plot(Breed,Freq)
summary(breedListTop24)
library("tidyverse")
breedListTop20_plus <- breedListTop24 %>% rowid_to_column(var = "percentAdopted")
summary(breedListTop20_plus)
#gives a basic histogram
breedFrequency_plot <- ggplot(data = breedListTop24,
mapping = aes(x = reorder(Breed,Freq), y = Freq)) + geom_histogram( color="black", fill="black", stat = "identity", binwidth = 100) +
ggtitle("Number of adoptions by breed between 2013 and 2019") + theme_bw() +
theme(plot.title = element_text(size=10)) +
xlab("Breeds") + ylab("Number of Adoptions")
#flips the graph
breedFrequency_plot + coord_flip()
####################
#read in the csv containing only the top 24 breeds (principle is each animal)
####################
AnimalShelterTop24 <- read.csv("C://Users//Henry//Desktop//School//Dataanalysis4300//AnimalShelter//Austin_Animal_Center_OutcomesTop24.csv", fill=TRUE, header =TRUE, sep=",")
#convert age to a date
#AnimalShelterTop24$Age <- as.Date(AnimalShelterTop24$Age, format= "%y years %m months %d days")
Age <- AnimalShelterTop24$Age
AgeFreqTop24 <- count(AnimalShelterTop24, Age)
Age <- AgeFreqTop24$Age
Freq <- AgeFreqTop24$n
write.csv(AgeFreqTop24, "AgeFreqTop24.csv", row.names = TRUE)
write.csv(AnimalShelterTop24, "AnimalShelterTop24$Age.csv", row.names = TRUE)
Age <- subset(AgeFreqTop24, AgeFreqTop24$Age > 5 )
summary(Age)
#Age <- AgeFreqTop24$Age
#Freq <- AgeFreqTop24$n
#library(ggplot2)
#ageFrequency_plot <- ggplot(data = AnimalShelterTop24,
# mapping = aes(x = reorder(Age,Freq), y = Freq)) + geom_histogram( color="black", fill="black", stat = "identity", binwidth = 100) +
# ggtitle("Number of adoptions by Age between 2013 and 2019") + theme_bw() +
# xlab("Age") + ylab("Number of Adoptions")
#ageFrequency_plot + coord_flip()
#arrivalDate <- animalShelterTop24$ArrivalDate
#dateOfBirth <- animalShelterTop24$DateofBirth
#arrivalDate <- as.Date(arrivalDate, "%m/%d/%y")
#dateOfBirth <- as.Date(dateOfBirth, "%m/%d/%y")
#ageUponAdoption <- difftime(arrivalDate,dateOfBirth)
#write.csv(ageUponAdoption, "ageUponAdoption", row.names = TRUE)
####################
#ideas
#############
#use percentage of total adoptions
#############
#graphs normally
#breedFrequency_plot +
# geom_point()
##########
#retrieve
#histogram
#################
library(ggplot2)
plot(adoptedDogs$AgeUponOutcome)
#create copy of data frame
adoptedDogsByDateofBirth <- data.frame(adoptedDogs)
#check that they have different memory addresses
tracemem(adoptedDogsByDateofBirth)==tracemem(adoptedDogs)
#order by date of birth
adoptedDogsByDateofBirth[order(as.Date(adoptedDogsByDateofBirth$DateofBirth, format= "%m/%d/%Y")),]
write.csv(adoptedDogsByDateofBirth, "dateBirth.csv", row.names = TRUE)
summary(adoptedDogsByDateofBirth)
plot(adoptedDogsByDateofBirth$AgeUponOutcome)
#class(adoptedDogs$DateofBirth)
#tmp <- aggregate(as.numeric(adoptedDogs$Animal_ID) ~ as.numeric(adoptedDogs$DateofBirth), FUN = min)
#summary(tmp)
#plot(tmp$DateofBirth)
#################
#now add frequency's to adoptedDogs
#Wrong code below
############################
#breedList <- subset(breedList, breedList[,2] >= 100)
#summary(breedList)
#table(breedList)
#summary(breedList[,2] >= 100)
#adoptedDogs[(adoptedDogs$Breed %in% breedList$Breed),]
summary(adoptedDogs)
#df$quantity <- quantity
#adoptedDogs$breedList <-
#for(Breed in adoptedDogs[,10]) {
# for(freq in breedList) {
# if(Breed = breedList[,1]) {
# adoptedDogs$breedList[,2] <- breedList[,2]
# }
# }
#}
#adoptedDogs <- subset(adoptedDogs, breedList[,2] >= 100)
#summary(adoptedDogs)
write.csv(adoptedDogs, "4.csv", row.names = TRUE)
##########################
animalShelter <- read.csv("C://Users//Henry//Desktop//School//Dataanalysis4300//AnimalShelter//Austin_Animal_Center_OutcomesTop24.csv", fill=TRUE, header =TRUE, sep=",")
breedList <- count(animalShelter, "Breed")
summary(breedList)
write.csv(breedList, "breedList.csv", row.names = TRUE)
|
4e75fc13277cfc074fde410c68be6bfd74f12309
|
19de4c75841d53d7109c58893c47567faaa3d387
|
/meta_analysis.R
|
ae24785e8071757b409f50459aaf653bd842bb73
|
[] |
no_license
|
congca/mrlocusSlides
|
41b22fbf1c53d86700575ee4b8867aeafa3d2b02
|
2b4a24498d2efb2d8f88376a78eed84220032bf8
|
refs/heads/master
| 2023-07-08T16:10:58.834004
| 2021-08-10T01:23:26
| 2021-08-10T01:23:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 678
|
r
|
meta_analysis.R
|
meths <- c("causal","twmr","ptwas",
"mrlocus","ecv-mrl")
cols <- unname(palette.colors())[-c(1,5)]
cols <- cols[c(1,3:5,5)]
names(cols) <- meths
shps <- c(24,17,15,16,10)
names(shps) <- meths
dat <- read.delim("paper_results.tsv")
library(ggplot2)
png(file="sim_perf.png", width=1200, height=500, res=150)
ggplot(dat, aes(h2med, rmae, col=method, shape=method)) +
geom_point(size=2) +
geom_line() +
scale_color_manual(values=cols) +
scale_shape_manual(values=shps) +
facet_wrap(~h2g, labeller=label_both) +
ylab("RMAE = |alpha-hat - alpha| / alpha") +
scale_x_continuous(breaks = c(.001, .005, .01)) +
coord_cartesian(xlim = c(0,0.011))
dev.off()
|
b5be1d7c22df97dcf43b6452deffe80ecb5e7bc8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/simecol/examples/fromtoby.Rd.R
|
3eed8f464de5364c63cecaf6e8e1403b1267fee1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 210
|
r
|
fromtoby.Rd.R
|
library(simecol)
### Name: fromtoby
### Title: Create Regular Sequence from 'from-to-by' Vector
### Aliases: fromtoby
### Keywords: manip
### ** Examples
times <- c(from=1, to=5, by=0.1)
fromtoby(times)
|
8da4443653c061e0219aedfc03b6315acc659912
|
612bef993f50416ed14b8faebb8af8256a5a208e
|
/R/NPVI.init.R
|
ce25c926799b22ec1705a9a6240c6075790844b9
|
[] |
no_license
|
achambaz/tmle.npvi
|
64bd665fe246052974b6947a32bde3be0c93101b
|
85431f25daa2bf213bff9bde40e486c46dcd6f22
|
refs/heads/master
| 2021-07-12T19:38:11.862665
| 2017-01-18T12:14:30
| 2017-01-18T12:14:30
| 70,006,254
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,578
|
r
|
NPVI.init.R
|
setMethodS3("init", "NPVI", function(this, flavor=c("learning", "superLearning", "h2oEnsembleLearning"),
cvControl=NULL,
learnG=NULL,
learnMuAux=NULL,
learnTheta=NULL,
bound=1e-1, B=1e4,
light=TRUE,
trueGMu=NULL,
SuperLearner.=NULL,
..., verbose=FALSE) {
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
## Validate arguments
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
## Argument 'flavor':
flavor <- match.arg(flavor);
learnMode <- switch(flavor,
learning="function",
superLearning="character",
h2oEnsembleLearning="character");
## Argument 'learnG'
mode <- mode(learnG);
if (mode != learnMode) {
throw("Argument 'learnG' should be of mode '", learnMode, "', not '", mode, "' for flavor: ", flavor);
}
## Argument 'learnMuAux'
mode <- mode(learnMuAux);
if (mode != learnMode) {
throw("Argument 'learnMuAux' should be of mode '", learnMode, "', not '", mode, "' for flavor: ", flavor);
}
## Argument 'learnTheta'
mode <- mode(learnTheta);
if (mode != learnMode) {
throw("Argument 'learnTheta' should be of mode '", learnMode, "', not '", mode, "' for flavor: ", flavor);
}
## Argument 'bound':
bound <- Arguments$getNumeric(bound);
if (bound<=0) {
throw("Argument 'bound' must be positive!\n")
}
## Argument 'B':
B <- Arguments$getInteger(B);
## Argument 'light'
light <- Arguments$getLogical(light);
## Argument 'trueGMu'
useTrueGMu <- (!is.null(trueGMu))
if (useTrueGMu) {
if (!is.list(trueGMu)) {
throw("If not NULL, Argument 'trueGMu' should be a list")
}
trueG <- trueGMu[["g"]]
if (mode(trueG) != "function") {
throw("Argument 'trueGMu$g' should be a function, not a ", mode(trueG))
}
trueMuAux <- trueGMu[["muAux"]]
if (mode(trueMuAux) != "function") {
throw("Argument 'trueGMu$muAux' should be a function, not a ", mode(trueMuAux))
}
}
## Argument 'SuperLearner.'
if (flavor=="superLearning") {
if (is.null(SuperLearner.) || mode(SuperLearner.)!="function") {
throw("Argument 'SuperLearner.' should be a function")
}
}
## Argument 'verbose'
verbose <- Arguments$getVerbose(verbose);
verbose <- less(verbose, 10);
## Retrieving 'obs'
obs <- getObs(this, tabulate=FALSE);
## Retrieving 'weights'
obsWeights <- getObsWeights(this);
## Retrieving 'id'
id <- getId(this);
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
## learning
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verbose && enter(verbose, "Estimating relevant features of the distribution");
if (!useTrueGMu) {
g <- estimateG(obs, weights=obsWeights, id=id,
flavor=flavor, learnG=learnG, light=light,
SuperLearner.=SuperLearner.,
..., verbose=verbose);
muAux <- estimateMuAux(obs, weights=obsWeights, id=id,
flavor=flavor, learnMuAux=learnMuAux, light=light,
SuperLearner.=SuperLearner.,
..., verbose=verbose);
} else {
g <- trueG
muAux <- trueMuAux
}
initializeG(this, g);
initializeMu(this, muAux, g);
theta <- estimateTheta(obs, weights=obsWeights, id=id,
flavor=flavor, learnTheta=learnTheta, light=light,
SuperLearner.=SuperLearner.,
..., verbose=verbose);
initializeTheta(this, theta);
sigma2 <- sum(obs[, "X"]^2 * obsWeights);
setSigma2(this, sigma2);
verbose && exit(verbose);
verbose && enter(verbose, "Updating 'psi' accordingly");
updatePsi(this, B, verbose=verbose);
psi0 <- getPsi(this);
verbose && str(verbose, psi0);
verbose && enter(verbose, "Updating efficient influence curve and 'epsilon' accordinlgy");
updateEfficientInfluenceCurve(this);
## Update history
updateHistory(this);
verbose && exit(verbose);
})
############################################################################
## HISTORY:
## 2014-02-07
## o Created.
############################################################################
|
7f3d2ed0091afac5d45f8f414a05734c4b2c9af3
|
8e39c4708b3b5eecdfa2daf699d36c1cc9423312
|
/Courses/Fall term/Machine Learning/1. Logistic regression/Homework 1/Homework1.R
|
3d5be36ddd27c0d97c6a46529f6658001a2dbf8a
|
[] |
no_license
|
geo539/CMF
|
636f988ce16870ebda54b7812b2c5ce9e688cde0
|
d5a250dfb6a41023ea282f97538ea41f343f09eb
|
refs/heads/master
| 2021-05-31T21:45:34.499027
| 2016-06-03T16:50:48
| 2016-06-03T16:50:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,209
|
r
|
Homework1.R
|
##### Initialisation #####
setwd("~/CMF/Courses/Machine Learning/1. Logistic regression/Homework")
library(ggplot2)
Sys.setlocale("LC_ALL","English")
# library(caTools)
# set.seed(123)
# split = sample.split(data$target_variable, SplitRatio = 0.7)
# train = subset(data, split==TRUE)
# test = subset(data, split==FALSE)
##### Loading data #####
y = read.csv("Data/is_spam_train.csv")
X = read.csv("Data/mail_features_train.csv")
X_test = read.csv("Data/mail_features_test.csv")
##### Formatting data #####
y <- cbind(as.numeric(y$x)); X <- as.matrix(X)
X <- cbind(1, X) # add an intercept column
m <- nrow(X); n <- ncol(X) - 1
##### Logistic function #####
g <- function(z) 1/(1+exp(-z))
J <- function(theta) {
m <- nrow(X)
# h hypothesis calculation
# theta and y are column vectors, X is a matrix
h.theta <- g(X%*%theta)
-t(y)%*%log(h.theta)/m - t(1-y)%*%log(1-h.theta)/m
}
gradJ <- function(theta) {
m <- nrow(X)
t(X)%*%(g(X%*%theta)-y)/m
}
##### Numeric optimisation #####
theta0 <- cbind(rep(0,times=n+1)) # initial values
opt <- optim(fn=J, gr=gradJ, par=theta0, method="BFGS")
theta <- opt$par; Jval <- opt$value
list(theta=as.vector(theta),J=Jval)
##### Results #####
y.result <- X %*% theta
y.result <- as.numeric(y.result >= 0)
##### Visualise #####
# head(iris)
#
#
# qplot(iris$Sepal.Length, iris$Sepal.Width, color = iris$Species, lwd = 5)
# qplot(iris$Petal.Length, iris$Petal.Width, color = iris$Species, lwd = 5)
#
# pairs(iris, pch = 16)
# panel.cor <- function(x, y, digits = 2, cex.cor, ...)
# {
# usr <- par("usr"); on.exit(par(usr))
# par(usr = c(0, 1, 0, 1))
# # correlation coefficient
# r <- cor(x, y)
# txt <- format(c(r, 0.123456789), digits = digits)[1]
# txt <- paste("cor = ", txt, sep = "")
# text(0.5, 0.6, txt, cex = 1.5)
#
# # p-value calculation
# p <- cor.test(x, y)$p.value
# txt2 <- format(c(p, 0.123456789), digits = digits)[1]
# txt2 <- paste("p.val = ", txt2, sep = "")
# if(p<0.01) txt2 <- paste("p.val ", "< 0.01", sep = "")
# text(0.5, 0.4, txt2, cex = 1.5)
# }
#
# pairs(iris, upper.panel = panel.cor, pch = 16)
x1 <- c(0,-theta[1]/theta[3])
x2 <- c(-theta[1]/theta[2],0)
lines(x1,x2,type="l",lwd=3)
|
2f6d38c247298f8836910417640a2078b1227898
|
13db5b908ee53411fca6a45c0a7d93de9cc4e3a9
|
/man/babble_ratio.Rd
|
6d9214d95405ac2765fad734c272cd3517c3dd65
|
[] |
no_license
|
LAAC-LSCP/avutils
|
67cb1d7cd41bd7bafa03cdfeec677f7951bd2f66
|
3fa75f5f7447df5a5534486cc6c19428f9509dab
|
refs/heads/master
| 2022-07-18T05:47:25.732313
| 2020-05-05T15:50:11
| 2020-05-05T15:50:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 840
|
rd
|
babble_ratio.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/babble_ratio.R
\name{babble_ratio}
\alias{babble_ratio}
\title{canonical babbling ratio}
\usage{
babble_ratio(xfile)
}
\arguments{
\item{xfile}{character, file path to rttm generated by
\code{\link{divime_classify_vox}}}
}
\value{
a list with three items (\code{cns}: number of canonical utterances,
\code{ncs}: number of non-canonical utterances, \code{babble_ratio}: the
ratio)
}
\description{
proportion of canonical utterances (out of the sum of canonical and
non-canonical)
}
\details{
if the rttm is empty (i.e. contains no child vocalizations at all),
the returned list will contain only \code{NA}s. If there were some child
vocalizations but none of them were CNS or NCS, the counts in the output will
be 0 and only the ratio is returned as \code{NA}.
}
|
c8bd03cf98b286a3ba33360f2bc7cf4dd9d8bcfc
|
91e6816d28d46a5e1eabbc3224a7fbafc122c745
|
/inst/apps/myFirstApp/global.r
|
a537acdd596a283595f65a9b4c649c579cd25422
|
[] |
no_license
|
RachelRamirez/NPS
|
b5da5a71f95511d217800cfd4184ac39b3a00751
|
15f35aab2d517fad3381e2659e92b0abc335b35a
|
refs/heads/master
| 2021-09-09T14:04:42.337793
| 2018-03-16T20:12:59
| 2018-03-16T20:12:59
| 117,839,404
| 0
| 2
| null | 2018-03-16T20:13:00
| 2018-01-17T13:23:30
|
R
|
UTF-8
|
R
| false
| false
| 503
|
r
|
global.r
|
# global.r is evaluated using the source() function and stored as global objects in the parent environment
# This is a good place to call libraries
# NEVER NEVER NEVER INSTALL.PACKAGES() IN THE GLOBAL.R
# because someone using this on the web will fail
# you can also call custom-functions here
library(shiny)
library(shinythemes)
library(choroplethr)
library(choroplethrMaps)
data('df_state_demographics')
map_data <- df_state_demographics
library(NPS)
data('cleandata')
dataSource <- cleandata
|
8929cf316992ec2a6e44fea0f3e5a874233e922f
|
3f9db7481425c63a1fd9078c2583d096287df74f
|
/man/generate_branch_changes.Rd
|
47b94893e5f536e12943d24e22e48b0ac126fcb9
|
[
"MIT"
] |
permissive
|
ethanmoyer/ICCE
|
2f8442a1afc3b66c0bb9c0bb8958c2bf5f3d0f02
|
0f23dc13b51e35b1a387f42a2e2ddc984ee991f9
|
refs/heads/master
| 2022-12-06T14:44:16.564361
| 2020-08-20T01:23:11
| 2020-08-20T01:23:11
| 278,681,090
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,476
|
rd
|
generate_branch_changes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analyze.R
\name{generate_branch_changes}
\alias{generate_branch_changes}
\title{Generate list of relevant probes for gain, loss, and change in methylation
status}
\usage{
generate_branch_changes(icceTree)
}
\arguments{
\item{icceTree}{icceTree data structure}
\item{probe_node_matrix}{probe-node matrix storing the methylation status
across all probes and nodes on a tree}
}
\value{
a structure containing a list of probes with differentially
methylated probes for each node (gain and loss stored separately), a list
of branch labels for the number of total changes in methylation, a list of
branch labels for the number of gains in methylation, and a list of branch
labels for the number of losses in methylation
}
\description{
This function returns relevant probe lists for each node that exhibits
either a gain or a loss in methylation status. It also returns three sets
of branch labels: number of methylation changes, number of methylation
gains, and number of methylation losses. These labels can be given to the
show_tree function as an edge labels parameter to visualize this data.
}
\examples{
probe_information <- generate_branch_changes(icceTree)
relevant_probes <- probe_information$relevant_probes
total_changes_label <- probe_information$total_changes_label
meth_gains_label <- probe_information$meth_gains_label
meth_losses_label <- probe_information$meth_losses_label
}
|
56f09101b3402045fa11bc1814173ad13bc08a92
|
13dfdb1b95a769b214c10608459a2f4687d5fba3
|
/man/run_cyclone.Rd
|
4c178408720c684715c9082f4628bea150bebf00
|
[
"MIT"
] |
permissive
|
kwells4/mtec.10x.pipeline
|
79e4c9267f70af6a793e48502070f35cee74c1e5
|
19a7fdcc3a6ba4f3beda7af06cf925a3b6b97369
|
refs/heads/master
| 2023-01-28T00:57:24.550595
| 2019-04-03T17:22:10
| 2019-04-03T17:22:10
| 179,329,659
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 776
|
rd
|
run_cyclone.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cell_cycle.R
\name{run_cyclone}
\alias{run_cyclone}
\title{Runs cell cycle analysis on cells}
\usage{
run_cyclone(mtec, conversion_file, seed = 0, add_to_obj = TRUE)
}
\arguments{
\item{mtec}{a Seurat object}
\item{conversion_file}{a file to use to convert gene names to ensembl ids.}
\item{seed}{OPTIONAL seed for reproducibility. Defaults to 0}
\item{add_to_obj}{OPTIONAL if the data should be added to the Seurat object.
Defaults to TRUE}
}
\description{
This function allows you to run cell cycle analysis on the population of
cells using cyclone from scran.
}
\examples{
\dontrun{
run_cyclone(mTEC.10x.data::mtec_trace)
run_cyclone(mTEC.10x.data::mtec_trace, seed = 10)
}
}
\keyword{scran}
|
77d6a18ab831a7d40ca249a0a3cc1f0d188f9558
|
f6310abd48d653e92b055a624d791397ba4d457b
|
/scripts/city-by-works.R
|
3064072bb74a4ec0eddf2bbf3b0fdbbb593eef79
|
[] |
no_license
|
akisery/patterns-of-translations
|
e21f75d9e680f4a60aa022c1abe2f9dc8db6d575
|
c33f5c14fdff364b88fefeb23552a70f5d311b88
|
refs/heads/main
| 2023-04-03T10:01:30.515457
| 2021-04-17T17:37:42
| 2021-04-17T17:37:42
| 352,882,235
| 0
| 0
| null | 2021-04-17T17:37:43
| 2021-03-30T05:35:42
| null |
UTF-8
|
R
| false
| false
| 2,953
|
r
|
city-by-works.R
|
library(tidyverse)
df <- read_csv('data/demeter.csv', col_types = 'dccccccccccccd')
# df %>%
# filter(grepl('U.S.A', normalized_city)) %>%
# view()
df %>%
select(normalized_city) %>%
mutate(
count = 1.0,
cities = (str_count(normalized_city, '−') + 1) * 1.0) %>%
separate(normalized_city, c("A","B","C","D"), sep = '−', fill="right") %>%
gather("code", "city", -c(count, cities)) %>%
mutate(count = count/cities) %>%
select(count, city) %>%
group_by(city) %>%
summarise(works = sum(count)) %>%
write_csv('data/city-by-works.csv') %>%
view()
df %>%
select(normalized_city, year_n, nyelv) %>%
mutate(
count = 1.0,
cities = (str_count(normalized_city, '−') + 1) * 1.0) %>%
separate(normalized_city, c("A","B","C","D"), sep = '−', fill="right") %>%
gather("code", "city", -c(year_n, nyelv, count, cities)) %>%
mutate(count = count/cities) %>%
select(city, year_n, nyelv, count) %>%
group_by(city, year_n, nyelv) %>%
summarise(works = sum(count)) %>%
write_csv('data/city-year-language-works.csv') %>%
view()
df <- read_csv('data/city-year-language-works.csv')
df %>%
group_by(city, year_n) %>%
count() %>%
rename(languages = n) %>%
write_csv('data/city-year-languages.csv') %>%
view()
df %>%
group_by(city) %>%
count() %>%
rename(publications = n) %>%
write_csv('data/city-publications.csv') %>%
view()
df %>%
select(city, year_n) %>%
group_by(city) %>%
mutate(
count = n(),
min_year = min(year_n, na.rm = TRUE),
max_year = max(year_n, na.rm = TRUE),
span = max_year+1 - min_year,
density = span / count
) %>%
ungroup() %>%
select(-year_n) %>%
distinct() %>%
# rename(publications = n) %>%
write_csv('data/city-publications.csv') %>%
view()
df <- read_csv('data/city-publications.csv')
df %>%
filter(!is.na(city) & city != 'Budapest' & city != 's. l'
& count > 20) %>%
ggplot(aes(count, span, label = city)) +
# geom_point(colour = 'maroon') +
geom_text(colour = 'brown') +
ggtitle('Publications and timespan') +
ylab('time span in years') +
xlab('minimum number of publications')
ggsave("images/publications-by-timespan.png",
width = 12, height = 6, units = 'in', dpi = 300)
cities <- read_csv('data/city-publications.csv')
top_cities <- cities %>%
select(city, span, count) %>%
filter(count >= 20 & !is.na(city) & city != 'Budapest' & city != 's. l')
df <- read_csv('data/city-year-language-works.csv')
df %>%
filter(city %in% top_cities$city) %>%
select(city, year_n) %>%
left_join(top_cities) %>%
ggplot(aes(year_n, reorder(city, span))) +
geom_point(colour = 'cornflowerblue') +
ggtitle(
'Publication years by cities',
subtitle = 'ordered by time span') +
xlab('publication year') +
ylab('city')
ggsave("images/years-by-cities.png",
width = 4, height = 6, units = 'in', dpi = 300)
|
dce869d2561c9b1195b7a09fe616447442035407
|
31260f945c350bfd1aec4cad7d8d18088335d9b7
|
/man/read_fam.Rd
|
178a70960c0e9257178299980fa968198154c1b3
|
[
"MIT"
] |
permissive
|
signaturescience/skater
|
1c793c012f1b250ef6c3b45455e6277573053a55
|
97c2ba52c68fc1c744f778069938afc4862671c6
|
refs/heads/main
| 2023-04-15T03:27:13.724196
| 2023-01-31T14:48:43
| 2023-01-31T14:48:43
| 339,462,170
| 8
| 5
|
NOASSERTION
| 2023-01-31T14:48:45
| 2021-02-16T16:36:39
|
TeX
|
UTF-8
|
R
| false
| true
| 679
|
rd
|
read_fam.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.R
\name{read_fam}
\alias{read_fam}
\title{Read PLINK-formatted .fam file}
\usage{
read_fam(file)
}
\arguments{
\item{file}{Input file path}
}
\value{
A tibble containing the 6 columns from the fam file.
}
\description{
Reads in a \href{https://www.cog-genomics.org/plink/1.9/formats#fam}{PLINK-formatted .fam file}. Input \code{file} must have six columns:
\enumerate{
\item Family ID
\item Individual ID
\item Father ID
\item Mother ID
\item Sex
\item Affected Status
}
}
\examples{
famfile <- system.file("extdata", "3gens.fam", package="skater", mustWork=TRUE)
fam <- read_fam(famfile)
fam
}
|
8c060118d1483816766da0b139e08143ac1cfe67
|
6d8572fb50a9ba39e6372ff0de70aac877d50ec7
|
/R/extract_cycle_timing.R
|
16e1ceffba6cfc73ae26a86d0dd264cce7705aeb
|
[] |
no_license
|
erikerhardt/isogasex
|
aed346bf689f28dce3d8500dc799e80b7354c037
|
2e3fc9c21c1d3d8e2348b7bff28954b5a169b0e8
|
refs/heads/master
| 2020-05-22T00:32:30.670300
| 2019-07-16T04:43:20
| 2019-07-16T04:43:20
| 186,173,267
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,523
|
r
|
extract_cycle_timing.R
|
#' Cycle timing
#'
#' Write cycle timing to the timing file.
#'
#' @param TDL xxxPARAMxxx
#' @param TDL_cycle xxxPARAMxxx
#' @param TDL_site_timing_filename xxxPARAMxxx
#'
#' @return NULL xxxRETURNxxx
#'
extract_cycle_timing <-
function# Cycle timing
###
(TDL
###
, TDL_cycle
###
, TDL_site_timing_filename
###
)
{
##details<<
##
i_time <- 1; old_time <- i_time;
TDL_site_timing <- paste("date","time.begin","time.seconds","time.minutes","site","name", sep=",");
for (i_time in 2:TDL$n) {
if ((TDL$data[i_time-1,"PrevSite"] != TDL$data[i_time,"PrevSite"]) || (i_time == TDL$n)) {
TDL_site_timing <-
rbind( TDL_site_timing
,paste(
# Changed to format() when adding full TDL interp 12/12/2009 2:03PM
# strftime(TDL$time[old_time],format="%Y-%m-%d")
format(TDL$time[old_time],format="%Y-%m-%d")
,format(TDL$time[old_time],format="%H:%M:%S") # "%H:%M:%OS" ## can't use %OS format since Excel doesn't display decimal seconds automatically
,round(as.numeric(difftime(TDL$time[i_time],TDL$time[old_time],units="secs")),1)
,round(as.numeric(difftime(TDL$time[i_time],TDL$time[old_time],units="mins")),2)
,TDL$data[old_time,"PrevSite"]
,TDL_cycle$table_name[(TDL_cycle$table[,1] == TDL$data[old_time,"PrevSite"])]
, sep=","
)
)
old_time <- i_time;
};
};
write(TDL_site_timing, file = TDL_site_timing_filename, append = FALSE);
invisible(NULL);
### NULL
}
|
abb33c6eecd026542e7415296d5ec3d377c14613
|
907af44f17d7246e7fb2b967adddb937aa021efb
|
/man/fslmerge.Rd
|
35c1572b17d340164f622f1712734441e75cb863
|
[] |
no_license
|
muschellij2/fslr
|
7a011ee50cfda346f44ef0167a0cb52420f67e59
|
53276dfb7920de666b4846d9d8fb05f05aad4704
|
refs/heads/master
| 2022-09-21T07:20:18.002654
| 2022-08-25T14:45:12
| 2022-08-25T14:45:12
| 18,305,477
| 38
| 23
| null | 2019-01-10T20:57:47
| 2014-03-31T19:35:03
|
R
|
UTF-8
|
R
| false
| true
| 1,285
|
rd
|
fslmerge.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fsl_merge.R, R/fslhd.R
\name{fsl_merge}
\alias{fsl_merge}
\alias{fslmerge}
\title{Merge images using FSL}
\usage{
fsl_merge(..., outfile = tempfile(fileext = ".nii.gz"), retimg = FALSE)
fslmerge(
infiles,
direction = c("x", "y", "z", "t", "a"),
outfile = NULL,
retimg = TRUE,
reorient = FALSE,
intern = FALSE,
verbose = TRUE,
...
)
}
\arguments{
\item{...}{additional arguments passed to \code{\link{readnii}}.}
\item{outfile}{(character) output filename}
\item{retimg}{(logical) return image of class nifti}
\item{infiles}{(character) input filenames}
\item{direction}{(character) direction to merge over, x, y, z,
t (time), a (auto)}
\item{reorient}{(logical) If retimg, should file be reoriented when read in?
Passed to \code{\link{readnii}}.}
\item{intern}{(logical) pass to \code{\link{system}}}
\item{verbose}{(logical) print out command before running}
}
\value{
character or logical depending on intern
}
\description{
This function calls \code{fslmerge} to merge files on some dimension
and either saves the image or returns an object of class nifti
}
\note{
Functions with underscores have different defaults
and will return an output filename, so to be used for piping
}
|
de33f86cab8512c0393f721f34e1e213cc4a1aab
|
88863cb16f35cd479d43f2e7852d20064daa0c89
|
/Winton/analysis/match-test1-test2.R
|
1ff9c381f33fa7946d228f31012cce4923bba216
|
[] |
no_license
|
chrishefele/kaggle-sample-code
|
842c3cd766003f3b8257fddc4d61b919e87526c4
|
1c04e859c7376f8757b011ed5a9a1f455bd598b9
|
refs/heads/master
| 2020-12-29T12:18:09.957285
| 2020-12-22T20:16:35
| 2020-12-22T20:16:35
| 238,604,678
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,089
|
r
|
match-test1-test2.R
|
TEST1.FILE <- "../data/test.RData"
TEST2.FILE <- "../data/test_2.RData"
SCALE.UP <- 1000*1000
MERGE.KEY.COLS <- c("Ret_MinusOne", "Ret_MinusTwo")
KEEP.COLS <- c("Id", MERGE.KEY.COLS)
getTestData <- function(fname) {
cat("reading data from:", fname, "\n")
test.data <- readRDS(fname)[,KEEP.COLS]
test.data[is.na(test.data)] <- 0
test.mergekeys <- trunc(SCALE.UP * test.data)
test.mergekeys["Id"] <- test.data["Id"]
return(test.mergekeys)
}
test1.df <- getTestData(TEST1.FILE)
test2.df <- getTestData(TEST2.FILE)
cat("merging\n")
test.merge <- merge(x=test1.df, y=test2.df,
by=MERGE.KEY.COLS,
all.x = TRUE,
suffixes=c(".test1", ".test2"))
# sort by ID of test1
ordering <- order(test.merge[,"Id.test1"])
test.merge <- test.merge[ordering,]
print(head(test.merge, 20))
print(nrow(test.merge))
print(length(unique(test.merge$Id.test1)))
print(length(unique(test.merge$Id.test2)))
cat("\nStock Ids in test1 but NOT in test2\n")
mask <- is.na(test.merge$Id.test2)
print(test.merge[mask,])
|
48264c1dcb731a843d5ac8094a994c1093c6b45a
|
07fb621480b38025144d5dccfbd1056ca0af9495
|
/workout1/code/make-shots-data-script.R
|
40cb9604bbb9cfcc8e661747d7a141b2b0818b02
|
[] |
no_license
|
Agatemei/hw-stat133-Agatemei
|
8132467d8e5d551d8435e5a7fb8f1b6250a9e068
|
8a245238b6cf067b62be0bedfddc0147fb36c60b
|
refs/heads/master
| 2020-05-18T10:04:34.794720
| 2019-03-13T17:22:14
| 2019-03-13T17:22:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,216
|
r
|
make-shots-data-script.R
|
# title: make shots data
# description: create a csv data file shots-data.csv that will contain the required variables to be used in the visualization phase.
# input(s): five csv data files (raw data)
# output(s): summary of five players respectively and totally
library(readr)
# Read in the five data sets, using relative file paths
curry <- read.csv("../data/stephen-curry.csv", stringsAsFactors = FALSE)
durant <- read.csv("../data/kevin-durant.csv", stringsAsFactors = FALSE)
klay <- read.csv("../data/klay-thompson.csv", stringsAsFactors = FALSE)
draymond <- read.csv("../data/draymond-green.csv", stringsAsFactors = FALSE)
iguodala <- read.csv("../data/andre-iguodala.csv", stringsAsFactors = FALSE)
# Add a column name to each imported data frame, that contains the name of the corresponding player:
curry$name = c("Stephen Curry")
durant$name = c("Kevin Durant")
klay$name = c("Klay Thompson")
draymond$name = c("Draymond Green")
iguodala$name = c("Andre Iguodala")
# Change the original values of shot_made_flag to more descriptive values
# Replace "n" with "shot_no", and "y" with "shot_yes":
curry$shot_made_flag[curry$shot_made_flag == "n"] = "shot_no"
curry$shot_made_flag[curry$shot_made_flag == "y"] = "shot_yes"
durant$shot_made_flag[durant$shot_made_flag == "n"] = "shot_no"
durant$shot_made_flag[durant$shot_made_flag == "y"] = "shot_yes"
klay$shot_made_flag[klay$shot_made_flag == "n"] = "shot_no"
klay$shot_made_flag[klay$shot_made_flag == "y"] = "shot_yes"
draymond$shot_made_flag[draymond$shot_made_flag == "n"] = "shot_no"
draymond$shot_made_flag[draymond$shot_made_flag == "y"] = "shot_yes"
iguodala$shot_made_flag[iguodala$shot_made_flag == "n"] = "shot_no"
iguodala$shot_made_flag[iguodala$shot_made_flag == "y"] = "shot_yes"
# Add a column minute that contains the minute number where a shot occurred.
curry$minute <- curry$period * 12 - curry$minutes_remaining
durant$minute <- durant$period * 12 - durant$minutes_remaining
klay$minute <- klay$period * 12 - klay$minutes_remaining
draymond$minute <- draymond$period * 12 - draymond$minutes_remaining
iguodala$minute <- iguodala$period * 12 - iguodala$minutes_remaining
# Use sink() to send the summary() output of each imported data frame into individuals text files:
sink(file = "../output/andre-iguodala-summary.txt")
summary(iguodala)
sink()
sink(file = "../output/draymond-green-summary.txt")
summary(draymond)
sink()
sink(file = "../output/kevin-durant-summary.txt")
summary(durant)
sink()
sink(file = "../output/klay-thompson-summary.txt")
summary(klay)
sink()
sink(file = "../output/stephen-curry-summary.txt")
summary(curry)
sink()
# Use the row binding function rbind() to stack the tables into one single data frame.
shots_data <- rbind(curry, durant, draymond, iguodala, klay)
# Export (i.e. write) the assembled table as a CSV file shots-data.csv inside the folder data/.
# Use a relative path for this operation.
write_csv(x = shots_data, path = "../data/shots-data.csv")
# Use sink() to send the summary() output of the assembled table.
# Send this output to a text file named shots-data-summary.txt inside the output/ folder.
sink(file = "../output/shots-data-summary.txt")
summary(shots_data)
sink()
|
f78664444dea10ea2ea40cbbad8a77a7d063190a
|
d14bcd4679f0ffa43df5267a82544f098095f1d1
|
/R/fx.ADDT.life.quantile.gamma.R
|
5f34755f8be22c67d30daff1c9e1e9d892ea751d
|
[] |
no_license
|
anhnguyendepocen/SMRD
|
9e52aa72a5abe5274f9a8546475639d11f058c0d
|
c54fa017afca7f20255291c6363194673bc2435a
|
refs/heads/master
| 2022-12-15T12:29:11.165234
| 2020-09-10T13:23:59
| 2020-09-10T13:23:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 464
|
r
|
fx.ADDT.life.quantile.gamma.R
|
fx.ADDT.life.quantile.gamma <-
function (gamma.hat, p, distribution, FailLevel, xuse, transformation.response,
transformation.x, transformation.time, model, trans.of.quantile = F)
{
theta.hat <- f.ADDT.origparam(gamma.hat, model)
return(fx.ADDT.life.quantile(theta.hat, p, distribution,
FailLevel, xuse, transformation.response, transformation.x,
transformation.time = transformation.time, trans.of.quantile = trans.of.quantile))
}
|
b2c6d9064f81366496d493c467bc0290cfd55f98
|
d46af12402c6f9e83ad322d777d7db1b6bfe3706
|
/R/03_figures/10_figure_XX_03.R
|
06be48d8f2f166fa5974f24bb4c0403b9e49e47d
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
cbig/japan-zsetup
|
569ab4a3fc514d9da5d7dba2d6832c767b5f0d36
|
30b0e22602849815f5fded8ac0eeb9211408c144
|
refs/heads/master
| 2020-04-20T02:31:55.663618
| 2019-01-31T18:31:10
| 2019-01-31T18:31:10
| 168,573,921
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,042
|
r
|
10_figure_XX_03.R
|
# Figure XX: Hex-grid priority pattern maps
library(dplyr)
library(tidyr)
library(sp)
library(raster)
library(rgeos)
library(rgbif)
library(viridis)
library(gridExtra)
library(rasterVis)
set.seed(1)
study_area <- getData("GADM", country = "LKA", level = 0,
path = "tmp/hexagonal-grids/") %>%
disaggregate %>%
geometry
study_area <- sapply(study_area@polygons, slot, "area") %>%
{which(. == max(.))} %>%
study_area[.]
plot(study_area, col = "grey50", bg = "light blue", axes = TRUE)
text(81.5, 9.5, "Study Area:\nSri Lanka")
size <- 0.5
hex_points <- spsample(study_area, type = "hexagonal", cellsize = size)
hex_grid <- HexPoints2SpatialPolygons(hex_points, dx = size)
plot(study_area, col = "grey50", bg = "light blue", axes = TRUE)
plot(hex_points, col = "black", pch = 20, cex = 0.5, add = T)
plot(hex_grid, border = "orange", add = T)
make_grid <- function(x, type, cell_width, cell_area, clip = FALSE) {
if (!type %in% c("square", "hexagonal")) {
stop("Type must be either 'square' or 'hexagonal'")
}
if (missing(cell_width)) {
if (missing(cell_area)) {
stop("Must provide cell_width or cell_area")
} else {
if (type == "square") {
cell_width <- sqrt(cell_area)
} else if (type == "hexagonal") {
cell_width <- sqrt(2 * cell_area / sqrt(3))
}
}
}
# buffered extent of study area to define cells over
ext <- as(extent(x) + cell_width, "SpatialPolygons")
projection(ext) <- projection(x)
# generate grid
if (type == "square") {
g <- raster(ext, resolution = cell_width)
g <- as(g, "SpatialPolygons")
} else if (type == "hexagonal") {
# generate array of hexagon centers
g <- spsample(ext, type = "hexagonal", cellsize = cell_width, offset = c(0, 0))
# convert center points to hexagons
g <- HexPoints2SpatialPolygons(g, dx = cell_width)
}
# clip to boundary of study area
if (clip) {
g <- gIntersection(g, x, byid = TRUE)
} else {
g <- g[x, ]
}
# clean up feature IDs
row.names(g) <- as.character(1:length(g))
return(g)
}
study_area_utm <- CRS("+proj=utm +zone=44 +datum=WGS84 +units=km +no_defs") %>%
spTransform(study_area, .)
# without clipping
hex_grid <- make_grid(study_area_utm, cell_area = 625, clip = FALSE)
plot(study_area_utm, col = "grey50", bg = "light blue", axes = FALSE)
plot(hex_grid, border = "orange", add = TRUE)
box()
# with clipping
hex_grid <- make_grid(study_area_utm, cell_area = 625, clip = TRUE)
plot(study_area_utm, col = "grey50", bg = "light blue", axes = FALSE)
plot(hex_grid, border = "orange", add = TRUE)
box()
japan <- getData(name = "GADM", country = "JPN", level = 0,
path = "tmp/hexagonal-grids/") %>%
disaggregate %>%
geometry
japan_utm <- CRS("+proj=utm +zone=54 +datum=WGS84 +units=km +no_defs") %>%
spTransform(japan, .)
hex_jpn <- make_grid(japan_utm, type = "hexagonal", cell_width = 100, clip = FALSE)
plot(japan_utm, col = "grey50", bg = "light blue", axes = FALSE)
plot(hex_jpn, border = "orange", add = TRUE)
box()
amp_rank <- raster::raster("zsetup/amphibians/05_amp_caz_wgt_con/05_amp_caz_wgt_con_out/05_amp_caz_wgt_con.CAZ_DE.rank.compressed.tif") %>%
projectRaster(t_crop, to = raster(hex_jpn, res = 1)) %>%
setNames('rank')
hex_amp_rank <- extract(amp_rank, hex_jpn, fun = mean, na.rm = TRUE, sp = TRUE)
breaks <- c(0, 0.25, 0.50, 0.83, 0.91, 0.98, 1)
colors <- rev(RColorBrewer::brewer.pal(length(breaks) - 1, "RdYlBu"))
labels <- (100 - breaks * 100)
labels <- cbind(labels[1:(length(labels) - 1)], labels[2:length(labels)])
labels[,2] <- paste(labels[,2], "%")
labels[6,2] <- ""
labels <- apply(labels, 1, paste, collapse = " - ")
labels[6] <- gsub(" - ", " %", labels[6])
p2 <- spplot(hex_amp_rank,
col.regions = colors,
at = breaks,
colorkey = list(
labels = list(at = breaks,
labels = labels)
)
)
plot(japan_utm, col = "grey50", bg = "light blue", axes = FALSE)
plot(p2)
|
43f0d28f0b6b2fa70223fee1c560267e8cc9f7a8
|
87f1d6db8735e3223d5f0418a2c19c185145fc96
|
/Rcode/dev.new.R
|
c2a0c8bed6a8d729b6066f4428fb94df2d7f2493
|
[] |
no_license
|
marmundo/TouchAnalytics
|
a07c1b884da4015b509bd21c476fa1018ca4f93f
|
feea9d9e265c3d64b055494c34c5d826a3b122e8
|
refs/heads/master
| 2021-01-18T22:51:31.345382
| 2017-11-23T12:31:12
| 2017-11-23T12:31:12
| 9,913,965
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 309
|
r
|
dev.new.R
|
dev.new <- function(width = 7, height = 7) {
platform <- sessionInfo()
if (grepl("linux",platform)) {
x11(width=width, height=height)
}else if (grepl("pc",platform)) {
windows(width=width, height=height)
} else if (grepl("apple", platform)) {
quartz(width=width, height=height)
}
}
|
6b0d998b26fefa7f028f0512e6b484098d921633
|
58cc8d3e92854e81ba82379429b261f076af5a4a
|
/EnsANN.R
|
759599a1e73f254fa5d045dcbe854d40170cef5b
|
[] |
no_license
|
SWoOvOo/EnsembleLearning
|
5398af11212784f74831d22f644575aa39b5819e
|
294764360d964e10e25a6212b9b6bfcaeddbd77a
|
refs/heads/main
| 2023-01-01T20:56:45.282589
| 2020-10-26T04:18:05
| 2020-10-26T04:18:05
| 307,029,974
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,171
|
r
|
EnsANN.R
|
###Ensemble ANN function
#'based on R package "neuralnet"
#'
#'Siwei, 20201023
#load packages
library(neuralnet)
##Part 1
##Average ensemble of neuralnets trained with identical hyperparameters and different random starting weights
#Training function
ensANNa_train <- function(nNN, seed=1, formula, data, hidden, act.fct="logistic", algorithm="rprop+", stepmax=1e6, linear.output=T, rep=1){
#transform target into activation
target <- strsplit(paste(deparse(formula),collapse = ""),split = " ~")[[1]][1]
target_min <- min(data[target])
target_max <- max(data[target])
data[target] <- 0.1 + 0.8 * (data[target] - target_min) / (target_max - target_min)
#Training neuralnets
list_NN <- list()
for(i in 1:nNN){
set.seed(seed-1+i)
list_NN[[i]] <- neuralnet(formula, data=data, hidden=hidden, act.fct=act.fct, algorithm=algorithm, stepmax=stepmax, linear.output=linear.output, rep=rep)
}
#save target transformation data
list_NN[[nNN+1]] <- c(target_min,target_max)
return(list_NN)
}
#Prediction function
ensANN_predict <- function(list_NN, newdata,singlePred = FALSE){
#get number of neuralnets
nNN = length(list_NN)-1
#recall transformation data
target_min <- list_NN[[nNN+1]][1]
target_max <- list_NN[[nNN+1]][2]
#Prediction
pred <- matrix(nrow = nrow(newdata), ncol = nNN)
for(i in 1:nNN){
pred[,i] <- predict(list_NN[[i]],newdata = newdata)
pred[,i] <- ((pred[,i]-0.1)/0.8)*(target_max-target_min)+target_min
}
#average ensemble
meanPred <- rowSums(pred)/ncol(pred)
#Output results
if(singlePred == FALSE){
return(meanPred)
}
else{
df <- data.frame(pred)
df[,"MeanPred"] <- meanPred
return(df)
}
}
##Part 2
##Random ANN structures based on bootstraped data
#Training function
ensANNr_train <- function(nNN, seed=1, formula, data, min_nodes = 1, fraction = 0.8, replace = TRUE, act.fct="logistic", algorithm="rprop+", stepmax=1e6, linear.output=T, rep=1){
#transform target into activation
target <- strsplit(paste(deparse(formula),collapse = ""),split = " ~")[[1]][1]
target_min <- min(data[target])
target_max <- max(data[target])
data[target] <- 0.1 + 0.8 * (data[target] - target_min) / (target_max - target_min)
#Calculate number of model variables
nvar <- length(unlist(strsplit(strsplit(paste(deparse(formula),collapse = ""),split = " ~")[[1]][2],split = "\\+")))
#Training neuralnets
list_NN <- list()
for(i in 1:nNN){
set.seed(seed-1+i)
#generate bootstraped training dataset (no bootstrapping - set fraction = 1, replace = FALSE)
data_b <- sample_frac(data,size = fraction,replace = replace)
#generate random structure of ANN
nodes1 <- round(runif(1,min = min_nodes,max = nvar+1))
nodes2 <- round(runif(1,min = min(min_nodes,nodes1),max = nodes1))
#Train ANN
list_NN[[i]] <- neuralnet(formula, data=data_b, hidden=c(nodes1,nodes2), act.fct=act.fct, algorithm=algorithm, stepmax=stepmax, linear.output=linear.output, rep=rep)
}
#save target transformation data
list_NN[[nNN+1]] <- c(target_min,target_max)
return(list_NN)
}
|
dc891ee16e493f83d4f23ed557c8a582df93ab73
|
f9b5963321f1387041da217634906671eac336b9
|
/Plot3.R
|
2a5a3ea804fc5ac1ba24db3e31b2095e0133d539
|
[] |
no_license
|
naciselim/ExData_Plotting1
|
129610797d403a861ae5704e776454f705bec47d
|
98f90db02c566c2aaf681f0b7e8bdaefb12872a2
|
refs/heads/master
| 2020-12-26T11:15:44.573636
| 2015-12-11T06:30:56
| 2015-12-11T06:30:56
| 47,783,831
| 0
| 0
| null | 2015-12-10T19:49:12
| 2015-12-10T19:49:11
| null |
UTF-8
|
R
| false
| false
| 1,241
|
r
|
Plot3.R
|
# This program draws the Plot 3 mentioned in
# https://github.com/rdpeng/ExData_Plotting1
setwd("d:/Coursera") # not this is correct on my laptop only
# I convert them to POSIXct so that I can compare dates.
firstDate <- as.POSIXct("2007-02-01")
endDate <- as.POSIXct("2007-02-03") # exclusive
# my laptop memory and cpu is OK to read this huge file in short time
df <- read.table("household_power_consumption.txt",
colClasses = c("character","character",rep("numeric",5)),
header = TRUE,sep = ";", na.strings = "?")
df$Date <- strptime(paste(df$Date,df$Time,sep=" "),
"%d/%m/%Y %H:%M:%S")
df$Time <- NULL # no need to but to clean it up a bit
# select data on the given dates only
df <- subset(df, df$Date > firstDate & df$Date < endDate)
plot(df$Date,df$Sub_metering_1, type = "l",
xlab = "", ylab = "Energy sub metering")
points(df$Date, df$Sub_metering_2, type = "l", col = "red")
points(df$Date, df$Sub_metering_3, type = "l", col = "blue")
legend("topright", lty = 1, col = c("black","red","blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
# copy plot to Plot3.png file
dev.copy(png, file = "Plot3.png", height=480, width=480)
dev.off()
|
62891e5adfdb03f09393298dc5136621a368143f
|
014d7e2d3618180e4f0c626385f728858d304d39
|
/man/compute_lppd.Rd
|
4d9497fd8ea2d6deac8d8a08b2ebe935aee1657e
|
[] |
no_license
|
StrattonCh/msocc
|
a4d9818d4e87c061f8d11f68f89d9791c3bd0eca
|
0180cb0489da46208079ae31aac46f6d83c3e058
|
refs/heads/master
| 2021-06-24T13:08:17.677146
| 2020-12-21T08:05:08
| 2020-12-21T08:05:08
| 173,383,551
| 2
| 1
| null | 2020-01-27T20:27:55
| 2019-03-02T00:00:45
|
R
|
UTF-8
|
R
| false
| true
| 548
|
rd
|
compute_lppd.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_lppd.R
\name{compute_lppd}
\alias{compute_lppd}
\title{Compute log pointwise predictive density}
\usage{
compute_lppd(msocc_mod)
}
\arguments{
\item{msocc_mod}{output from \code{\link{msocc_mod}}}
}
\value{
numeric value that is the lppd
}
\description{
This function computes the log pointwise predictive density
(lppd) described in
\href{https://link.springer.com/article/10.1007/s11222-013-9416-2}{Gelman et
al. (2013)} for multi-scale occupancy models.
}
|
9e51b69d9be61b11d23791994545247fe2ec8d0d
|
387cef68bd1759aa02bffe9c097d45787e862106
|
/man/plot.mHMM_gamma.Rd
|
b72c3f8692ad4ffc345d8f607cddc3c42c907f1a
|
[] |
no_license
|
emmekeaarts/mHMMbayes
|
85c1f2cbe7c34e94f6b9c463f673007db5d575a2
|
0222eb41d7e143eae02a33199c93364fabd07b13
|
refs/heads/master
| 2023-08-31T01:12:49.606904
| 2023-08-14T12:01:50
| 2023-08-14T12:01:50
| 167,544,703
| 11
| 10
| null | 2023-07-25T22:37:58
| 2019-01-25T12:33:24
|
R
|
UTF-8
|
R
| false
| true
| 5,172
|
rd
|
plot.mHMM_gamma.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.mHMM_gamma.R
\name{plot.mHMM_gamma}
\alias{plot.mHMM_gamma}
\title{Plotting the transition probabilities gamma for a fitted multilevel HMM}
\usage{
\method{plot}{mHMM_gamma}(x, subj_nr = NULL, cex = 0.8, col, hide, ...)
}
\arguments{
\item{x}{An object of class \code{mHMM_gamma}, generated by the function
\code{\link{obtain_gamma}}.}
\item{subj_nr}{An integer specifying for which specific subject the
transition probability matrix should be plotted. Only required if the input
object represents the subject specific transition probability matrices.}
\item{cex}{An integer specifying scaling of fonts of category labels. When
not specified, defaults to \code{cex = 0.8}.}
\item{col}{An optional vector with length \code{m} * \code{m} (i.e., where
\code{m} denotes the number of hidden states) specifying the used colors in
the alluvial plot.}
\item{hide}{An optional logical vector with length \code{m} * \code{m}
(i.e., where \code{m} denotes the number of hidden states) specifying
whether particular stripes should be plotted. When not specified, omits
the lines representing a value of exactly zero.}
\item{...}{Arguments to be passed to alluvial (see
\code{\link[alluvial]{alluvial}})}
}
\value{
\code{plot.mHMM_gamma} returns a plot of the transition probability
matrix. Depending on whether the input object represents the transition
probabilities at the group level or the subject specific transition
probability matrices, the returned plot represents either the group
transition probability matrix, or the transition probability matrix for a
given subject, specified by \code{subject_nr}.
}
\description{
\code{plot.mHMM_gamma} plots the transition probability matrix for a fitted
multilevel hidden Markov model, by means of an alluvial plot (also known as
Sankey diagram or riverplot) using the R package \code{alluvial}. The plotted
transition probability matrix either represents the probabilities at the
group level, i.e., representing the average transition probability matrix
over all subjects, or at the subject level. In case of the latter, the user
has to specify for which subject the transition probability matrix should be
plotted.
}
\examples{
\donttest{
#' ###### Example on package data, see ?nonverbal
# specifying general model properties:
m <- 2
n_dep <- 4
q_emiss <- c(3, 2, 3, 2)
# specifying starting values
start_TM <- diag(.8, m)
start_TM[lower.tri(start_TM) | upper.tri(start_TM)] <- .2
start_EM <- list(matrix(c(0.05, 0.90, 0.05,
0.90, 0.05, 0.05), byrow = TRUE,
nrow = m, ncol = q_emiss[1]), # vocalizing patient
matrix(c(0.1, 0.9,
0.1, 0.9), byrow = TRUE, nrow = m,
ncol = q_emiss[2]), # looking patient
matrix(c(0.90, 0.05, 0.05,
0.05, 0.90, 0.05), byrow = TRUE,
nrow = m, ncol = q_emiss[3]), # vocalizing therapist
matrix(c(0.1, 0.9,
0.1, 0.9), byrow = TRUE, nrow = m,
ncol = q_emiss[4])) # looking therapist
# Run a model without covariate(s):
out_2st <- mHMM(s_data = nonverbal,
gen = list(m = m, n_dep = n_dep, q_emiss = q_emiss),
start_val = c(list(start_TM), start_EM),
mcmc = list(J = 11, burn_in = 5))
out_2st
summary(out_2st)
# obtaining the transition probabilities at the group and subject level
est_gamma_group <- obtain_gamma(out_2st, level = "group")
# plot the obtained transition probabilities
plot(est_gamma_group, col = rep(c("green", "blue"), each = m))
}
\dontshow{
###### Example on simulated data
# Simulate data for 10 subjects with each 100 observations:
n_t <- 100
n <- 10
m <- 2
n_dep <- 1
q_emiss <- 3
gamma <- matrix(c(0.8, 0.2,
0.3, 0.7), ncol = m, byrow = TRUE)
emiss_distr <- list(matrix(c(0.5, 0.5, 0.0,
0.1, 0.1, 0.8), nrow = m, ncol = q_emiss, byrow = TRUE))
data1 <- sim_mHMM(n_t = n_t, n = n, gen = list(m = m, n_dep = n_dep, q_emiss = q_emiss), gamma = gamma,
emiss_distr = emiss_distr, var_gamma = .5, var_emiss = .5)
# Specify remaining required analysis input (for the example, we use simulation
# input as starting values):
n_dep <- 1
q_emiss <- 3
# Run the model on the simulated data:
out_2st_sim <- mHMM(s_data = data1$obs,
gen = list(m = m, n_dep = n_dep, q_emiss = q_emiss),
start_val = c(list(gamma), emiss_distr),
mcmc = list(J = 11, burn_in = 5))
# obtaining the transition probabilities at the group and subject level
est_gamma_group_sim <- obtain_gamma(out_2st_sim, level = "group")
# plot the obtained transition probabilities
plot(est_gamma_group_sim, col = rep(c("green", "blue"), each = m))
}
}
\seealso{
\code{\link{mHMM}} for fitting the multilevel hidden Markov
model, creating the object \code{mHMM}, and \code{\link{obtain_gamma}} to
obtain the transition probabilities gamma for a fitted multilevel HMM,
creating the object \code{mHMM_gamma}.
}
|
a8e5acc4967201413ba6fae500226ac429d4b7c3
|
ca0a9f8dfa1487adaf9ba2f8d21d0d06dddd79dc
|
/man/detectGlobal.Rd
|
f13ff0ef72b0b69e3e6efd18ccee7d58ac8d6dab
|
[] |
no_license
|
cran/packS4
|
c964c29f4e9e2c1f18bdcbbd91e3257f5ca15296
|
2c3a7206237f28672987275ef5696d66bb5ded34
|
refs/heads/master
| 2021-03-12T23:57:06.953797
| 2015-05-27T00:00:00
| 2015-05-27T00:00:00
| 17,698,171
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,457
|
rd
|
detectGlobal.Rd
|
\name{detectGlobal}
\alias{detectGlobal}
\title{
~ Function: detectGlobal ~
}
\description{Detect if there is some global variable in a function.}
\usage{
detectGlobal(realResult, tolerance = 0, theoResult = "", result = TRUE)
}
\arguments{
\item{tolerance}{Some keyword are detected as global variable whereas they are not. Tolerance is the number of false detection that should be ignored.}
\item{realResult}{Either the name of a function (see example 1), or a function with its argument (see example 2).}
\item{theoResult}{The theoritical result of the computation of the function with its argument.}
\item{result}{TRUE or FALSE: shall realResult should be the same than the theoResult, or not ? This is usefull to test conter example.}
}
\details{Detect if there is some global variable in a function.}
\value{None}
\examples{
### example 1
f <- function(x)x^2
detectGlobal(f)
g <- function(x)x^2*y
detectGlobal(g)
###########
### example 2
f <- function(x)x^2
### the next line should ring a bell, because 2^2 is not 8
detectGlobal(f(2),8)
### the next line should not ring a bell, because 2^2 is not 8, and we ask for conter-example
detectGlobal(f(2),8,FALSE)
###########
### example 3
h <- function(x){
apply(matrix(1:x),1,length)
}
### 'length' is detected as a global variable whereas it is a function
detectGlobal(h)
### So we use tolerance=1
detectGlobal(h,,,1)
}
|
a5229815cf52e926ac88385cf112296bbcc8bd28
|
e639b03c2c5db80538dc4b3ab22e094e3eeddb8e
|
/hw4.R
|
e2fbaa2ac1dc3afef28e7199d6ecaa05f1e07940
|
[] |
no_license
|
ramenkup/370HW4
|
3f2d1efb591a229cfc82ad1c735300bfcf268036
|
4b812343fc9d47ffb3a485c08c44ebe1a11e01bc
|
refs/heads/master
| 2021-01-10T05:00:46.387451
| 2016-01-13T22:38:00
| 2016-01-13T22:38:00
| 49,603,604
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,080
|
r
|
hw4.R
|
#Spencer Klinge
#Sunday Nov 1th, 2015
#ISTA370 - Prof. Kumar
#Homework #4
#*************************************************************
#1
sex_lies <- read.table("~/Downloads/sex_lies.txt", header = TRUE, sep = "")
sex_lies
gender.lies.aov<- aov(count~gender*lies, data=sex_lies)
summary(gender.lies.aov)
print(model.tables(gender.lies.aov, "means"),digits=3)
#Based on the two way anlysis of variance, we can say that the only significant is the effect of gender(F=(1,60)=6.547, p=.013),that is, the count of males (M=29.2) and the count of females(M=42.0) are significantly different. there is no significant effect on counts of propesity to lie(p>.05)
#******************************2**************************
#2
#1
scale.lies.aov<-aov(count~scale*lies, data=sex_lies)
summary(scale.lies.aov)
#we are givin no significant effect of scale (F(1,60)=.000,p=.983) or lies (F(1,60)=.036,p=.851), nor a combination effect. No conclusion can be drawn about propensity to lie among different scales of sexual permissveness.
#2
sex.scale.aov<-aov(count~sex*scale, data=sex_lies)
summary(sex.scale.aov)
print(model.tables(sex.scale.aov, "means"),digits=3)
#premarital sexual permissvesnnes has a significant effect (F(1,60)=4.52,p=.038) with an alpha of a=.05, lower mean being (M=41.2) and higher being (M=30.2). There is no significant combination effect.
#3
gender.religion.scale.aov<-aov(count~religion*gender*scale, data=sex_lies)
summary(gender.religion.scale.aov)
print(model.tables(gender.religion.scale.aov, "means"),digits=3)
#This is our first time running three way anova: were not lucky enough to get the three way interaction as a significant: rather,Religion does not have a siginficicant effect (F(1,56)=.958, p=.332), and gender does (F(1,56)=7.16, p=.0098) with a smaller alpha (p-value > .01). There is a significant. combination effect from both religion and gender (F(1,56)=6.881,p=.011) on count. amongst the triple interaction, Composite Males with a religion of 2 have the highest sample mean count (M=52.4), this is not significant.
|
d32e68721ca2ed3914b26f54c39dca5c5c825969
|
13004837fb945fc180867a47cf03600484b2c071
|
/_site/lessons/dplyr_tutorial_lesson_20190123.R
|
31c69c9086f57f022578781cdcab6630e4bd6b48
|
[
"Apache-2.0"
] |
permissive
|
UBC-R-Study-group/studyGroup
|
89a1cd77c7b8a6d760939e356e75b942e5de7001
|
37d7c51b7bff3315ea02273492db969bd09348f1
|
refs/heads/gh-pages
| 2022-05-04T20:58:28.439487
| 2022-04-22T21:20:51
| 2022-04-22T21:20:51
| 148,341,456
| 24
| 16
|
Apache-2.0
| 2021-03-24T19:58:59
| 2018-09-11T15:47:08
|
HTML
|
UTF-8
|
R
| false
| false
| 4,891
|
r
|
dplyr_tutorial_lesson_20190123.R
|
# Intro to Data Manipulation with dplyr!
# Here we will cover some of the most commonly used dplyr functions, using one
# of dplyr's practice datasets. Specifically, we will be looking at five
# functions used to manipulate dataframes (tables):
# 1. select() - extract columns based on name or index
# 2. filter() - extract rows based on some criteria
# 3. mutate() - add new columns based on existing ones
# 4. group_by() - group rows (observations) by value in a given column
# 5. summarize() - perform action/function on whole groups
# We will also cover the use of the pipe ( %>% ) function, used to send the
# output of one command/function into another.
# Finally, we will see how pipes allow us to chain or link multiple functions
# into a single, elegant step.
# Link to the Software Carpentry page:
# https://swcarpentry.github.io/r-novice-gapminder/13-dplyr/index.html
# Load the data -----------------------------------------------------------
# The dataset we will use is starwars, from dplyr, and can be loaded as follows
library(dplyr)
starwars <- starwars
# There are 13 columns (attributes, or variables) and 87 rows (characters, or
# observations).
# select() ----------------------------------------------------------------
# Let's use select() to extract a few columns from the overall dataset. The way
# we do this is as follows:
new_df <- select(my_df, col1, col2, col4, ...)
# For our starwars dataset, we could select the name, height, mass, homeworld,
# and species columns.
# CODE HERE
# We can rewrite the same command, this time making use of the %>% function.
# This function pushes, sends, or "pipes" the desired object into the subsequent
# function.
new_df <- my_df %>% select(.data = ., col1, col2, col4, ...)
# The "." is simply a placeholder for the object being piped (my_df in this
# example). Now let's apply this method to our starwars data, selecting the
# same columns as before.
# CODE HERE
# filter() ----------------------------------------------------------------
# This function allows us to extract rows from a dataframe, using some sort of
# logical criteria. So, you could extract rows for which the value of a
# particular column is negative (< 0). Or extract rows which contain a certain
# (non-numerical) value, such as a particular place or name.
new_df <- my_df %>% filter(col1 < 0)
# Or, for strings (i.e. "words"):
new_df <- my_df %>% filter(col2 == "blue")
# Equality in R is done using "==", while "=" is for assignment of aguments.
# Let's use filter to pick certain rows from the starwars data, choosing
# characters (rows) which are human:
# CODE HERE
# Another example, this time using a numeric column (i.e. logical criteria) to
# filter for characters above a certain height:
# CODE HERE
# mutate() ----------------------------------------------------------------
# mutate() allows us to create new columns, based on existing columns.
# Operations can be mathematical, or string (i.e. word) -related (i.e. combine
# strings). It works as follows:
new_df <- my_df %>% mutate(new_col = col1 * 2)
# Let's use mutate() to convert the mass column from kg to lbs:
# CODE HERE
# Combining functions through pipes ---------------------------------------
# Now is where we will see the real utility of pipes: they allow us to chain
# together multiple steps into a single command. The output of one function
# (e.g. select() ) can be piped into another function (e.g. filter() ). This
# allows us to combine multiple steps and run them all at once.
new_df <- my_df %>%
select(col1, col2, col4) %>%
filter(col2 == "blue") %>%
mutate(new_col = col1 * 2)
# Let's try it on our starwars dataset, selecting columns of interest, filtering
# on species, and converting mass to pounds:
# CODE HERE
# group_by() and summarise() ----------------------------------------------
# These functions allow us to perform operations on subsets of data.
# group_by() "groups" rows based on a column/value, e.g. all rows for which
# colour (a column name) is equal to "blue". Then, summarise() performs some
# operation on each group as a whole. In this example, we would be grouping
# based on values in col1, then calculating the mean of col2 for each group.
new_df <- my_df %>%
group_by(col1) %>%
summarise(mean(col2))
# So, let's use these functions to calculate the average height for each
# species in the starwars dataset.
# CODE HERE
# Another example, this time getting two different summaries, one for height
# and one for mass.
# CODE HERE
# Tying it all together ---------------------------------------------------
# Now let's use all of the functions we've learned thus far in a single command,
# strung together via pipes. Let's say we want to calculate the average mass,
# in pounds, for each species, but only for characters above a certain height.
# We could do this as follows:
# CODE HERE
|
7ae3b324fbad4e57802e584b67deada7bb8f9114
|
e36d726cb49640d4dfb56103bb5e76bf46e82034
|
/HW6.R
|
0ba0f96c3f3b013689b4f40b0aa2d88a3094413a
|
[] |
no_license
|
nickfrasco/CSCI-365
|
10552d188d90fafb927807d846acd4a0a33559c1
|
dc182f8b0754891bdcadab61d4b0e9d5cefa67dc
|
refs/heads/master
| 2022-11-27T07:43:01.993652
| 2020-07-21T15:13:19
| 2020-07-21T15:13:19
| 281,430,806
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,185
|
r
|
HW6.R
|
#Nicolo Frasco
#nfrasco@iu.edu
#Question 1
#A.
data = read.csv("~/Desktop/Vocab.csv", sep = ",", stringsAsFactors = FALSE);
x = as.vector(data[,4]);
y = as.vector(data[,5]);
n = length(x);
x = cbind(x,rep(1,n))
#print(as.matrix(x));
#B.
xbar = sum(x)/n; #from example
ybar = sum(y)/n;
xybar = sum(x*y)/n;
xsqbar = sum(x*x)/n;
b = (ybar*xsqbar-xbar*xybar)/(xsqbar-xbar*xbar)
a = (ybar - b)/xbar
cat("a = ", a, "b = ", b, "\n")
#C.
cat("Yes, it does appear that people with more education tend to have larger vocabularies", "\n")
#D.
cat("I believe staying in school one extra year would increase the chances of performing better on the test.", "\n")
#Question 2 - Used a little help from stack on this one
data = read.csv("~/Desktop/ais.csv", stringsAsFactors = FALSE, sep = ",")
#A.
x = as.matrix(data[,3:12])
y = as.vector(data[,2])
a = solve(t(x) %*% x, t(x) %*% y)
cat("a: ", a[1], "b: ", a[2], "\n")
#B.
y_hat = x %*% a
e = y - y_hat
sse = sum(e*e)
cat("sum of squared errors: ", sse, "\n")
#C.
for(i in 3:12) {
r = 3:12
x = as.matrix(data[,r[r!=i]]) #got from example <>
y = as.vector(data[,2])
a = solve(t(x) %*% x, t(x) %*% y)
error = y - (x %*% a)
sums = sum(error*error)
cat("sum of squared errors [",i,"]:",sums,"\n")
}
#Question 3
data(nottem)
y = nottem
n = length(y)
x = 1:n;
#A.
plot(x,y, type = "b")
#B.
X = cbind(x,rep(1,n));
A = solve(t(X) %*% X , t(X) %*% y) #alpha
a = A[1]
b = A[2]
cat("a = ", a, "b = ", b, "\n")
plot(x,y,type = "b")
abline(b,a) # add the line that fits the data
#C.
X = cbind(rep(1,n),cos(2*pi*x/12),sin(2*pi*x/12))
new_A = solve(t(X) %*% X, t(X) %*% y)
y_hat = X %*% new_A
lines(y_hat, col="green")
#D.
X = cbind(rep(1,n),x,cos(2*pi*x/12),sin(2*pi*x/12))
a1 = solve(t(X) %*% X, t(X) %*% y)
y_hat = X %*% a1
lines(y_hat, col = "red")
#Question 4
data(AirPassengers)
#print(AirPassengers)
y = AirPassengers
n = length(y)
x = 1:n
#A.plot(x,y, type = "b") plotted later on
#B.
X = cbind(x,rep(1,n));
A = solve(t(X) %*% X , t(X) %*% y) #alpha
a = A[1]
b = A[2]
plot(log(x) - log(y),type = "b")
#C.
par(new = TRUE)
plot(x,y, type = "b", axes = FALSE)
par(new = F)
|
15e7685f1d4ebbc57010488ab9d9d44561792fe2
|
1fc489e086a1d0e1d25d84f37e550193d47bf6c4
|
/plot4.R
|
b60c57a07ca8779e166f375de30d7cc06e99d54d
|
[] |
no_license
|
nadiapavlova/Exploring-Data--Project-1
|
07ef2afbc261e7ce985e8eda20019519b57f7e20
|
1dea432b840ce218969346b066b273477a9b9266
|
refs/heads/master
| 2021-05-04T14:47:10.647608
| 2018-02-04T19:25:05
| 2018-02-04T19:25:05
| 120,210,716
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,647
|
r
|
plot4.R
|
# Coursera Data Science: Exploratory Data Analysis
# Course project 1: plot4
# load the data
rm(list = ls())
if(!file.exists("data")) {dir.create("data")}
fileurl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileurl, destfile = "./data/c4project1.zip", method = "curl")
unzip(zipfile = "./data/c4project1.zip",exdir = "./data")
powercon <- read.table("./data/household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
str(powercon)
# Subset the data by the the dates below
powercon1 <- powercon[powercon$Date %in% c("1/2/2007","2/2/2007") ,]
# Create a new date and time variable
powercon1$DateTime <- strptime(paste(powercon1$Date, powercon1$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
# Graph the plots
png(filename = "plot4.png", width=480, height=480)
par(mfrow = c(2,2), mar = c(4,4,2,1), oma = c(0,0,2,0))
with (powercon1, {
plot(DateTime, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")
plot(DateTime, Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
plot(DateTime, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(DateTime, Sub_metering_2, type = "l", col = "red")
lines(DateTime, Sub_metering_3, type = "l", col = "blue")
legend("topright",col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(DateTime, Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
})
# dev.copy(png, file = "plot4.png", height = 480, width = 480)
dev.off()
|
ba5f7069ac54092f35a5b9eb184f38155402db35
|
880100a71e2ddee1a893599e0d0008acda6b0bd4
|
/R/DNEbar.R
|
f8a409168d6a82f9c4f7951a85be9af82828385f
|
[] |
no_license
|
cran/molaR
|
e3ebbfd4d2c9a641871e7a94b461434df1ea0f79
|
4fdd31517ba9593284f4e0678756a1c0b34775df
|
refs/heads/master
| 2023-02-09T01:03:05.551332
| 2023-01-26T23:30:02
| 2023-01-26T23:30:02
| 48,084,328
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,103
|
r
|
DNEbar.R
|
#' Plot advanced results of a DNE surface analysis
#'
#' a molaR plotting function
#'
#' @param DNE_File An object that stores the output of the DNE()
#' function
#' @param main User's title for plot.
#' @param convexCol Color for the convex DNE total. Default='hotpink'
#' @param concaveCol Color for the concave DNE total. Default='deepskyblue'
#' @param type string to determine what parameters to plot. Default=both and
#' both concave and convex DNE totals will be plotted in stacked bar plot. See details
#' @param legendPos string to determine location of the legend. Default='topright'
#' see details.
#' @param legendInset numeric value determining how far to inset the legend from plot
#' boarder. Default=0
#' @param las logical indicating orientation of the x-axis labels for each bar plot. Enter either 1 or 2.
#' @param names.arg concatenated string of surface names for labels. If none supplied function will
#' pull names from the object itself.
#' @param cex.names Font size for the bar labels. Default is 1.
#'
#' @details This function creates a stacked barplot of DNE values. It colors them according
#' to curve orientation, which is defined by the `kappa` parameter in `DNE()` function. If multiple
#' DNE objects are grouped together the barplot will return a set. When employed on a single
#' DNE object this will return a single stacked bar.
#'
#' The argument `type` accepts either 'Concave' or 'Convex' to plot only concave or convex
#' DNE totals respectively. Default=NA and results in both totals being plotted in stacked barplot.
#'
#' The argument `legendPos` is a string that determines the position of the legend. Default='topright'
#' but will accept any of the following keywords: 'bottomright', 'bottom', 'bottomleft', 'left', 'topleft', 'top',
#' 'topright', 'right', or 'center'.
#'
#'
#' @export
#' DNEbar
#'
#' @examples
#' DNEs <- list()
#' DNEs$Tooth <- DNE(Tooth)
#' DNEs$Hills <- DNE(Hills)
#' DNEbar(DNEs)
DNEbar <- function(DNE_File, main='', convexCol='hotpink', concaveCol='deepskyblue', type='both', legendPos='topright', legendInset=0, las=1, names.arg='', cex.names=1){
if(type=='both') {
if(inherits(DNE_File, 'DNE_Object')==T){
mit <- as.matrix(c(DNE_File$Convex_DNE, DNE_File$Concave_DNE))
labs <- names(DNE_File)
}
if(inherits(DNE_File, 'list')==T){
count <- length(DNE_File)
Vdne <- vector(mode='numeric', length=count)
Cdne <- vector(mode='numeric', length=count)
for (i in 1:count) {
Vdne[i] <- DNE_File[[i]]$Convex_DNE
Cdne[i] <- DNE_File[[i]]$Concave_DNE
labs <- names(DNE_File)
}
mit <- rbind(Vdne, Cdne)
mit <- as.matrix(mit)
}
if(length(names.arg)>1){
labs <- names.arg
}
barplot(mit, col=c(convexCol, concaveCol), main=main, xlab='DNE Surface', ylab='DNE Total', names.arg=c(labs), las=las, cex.names=cex.names)
legend(legendPos, inset=legendInset, legend=c('Concave DNE', 'Convex DNE'), fill=c(concaveCol, convexCol), cex=0.5)
}
if(type!='both' && type!='Convex' && type!='Concave'){
stop("Choose One Orientation to Plot, with type='Concave' or 'Convex' or leave as 'both'")
}
if(type=="Convex") {
if(inherits(DNE_File, 'list')==T){
count <- length(DNE_File)
Vdne <- vector(mode='numeric', length=count)
for (i in 1:count) {
Vdne[i] <- DNE_File[[i]]$Convex_DNE
labs <- names(DNE_File)
}
}
if(length(names.arg)>1){
labs <- names.arg
}
barplot(Vdne, col=convexCol, main=main, xlab='DNE Surface', ylab='DNE Total', names.arg=c(labs), las=las, cex.names=cex.names)
legend(legendPos, inset=legendInset, legend=c('Convex DNE'), fill=c(convexCol), cex=0.5)
}
if(type=='Concave') {
if(inherits(DNE_File, 'list')==T){
count <- length(DNE_File)
Cdne <- vector(mode='numeric', length=count)
for (i in 1:count) {
Cdne[i] <- DNE_File[[i]]$Concave_DNE
labs <- names(DNE_File)
}
}
if(length(names.arg)>1){
labs <- names.arg
}
barplot(Cdne, col=concaveCol, main=main, xlab='DNE Surface', ylab='DNE Total', names.arg=c(labs), las=las, cex.names=cex.names)
legend(legendPos, inset=legendInset, legend=c('Concave DNE'), fill=c(concaveCol), cex=0.5)
}
}
|
23490aba8c50c5588b477d1f498eed232ee5a229
|
19749d7a0180996920b35ffb4aae218ea6f36975
|
/scriptR/src/functions.R
|
b6a041a30c25656c5e336d8588d7273848cfb938
|
[
"Apache-2.0"
] |
permissive
|
Guoshuai1314/LoopExtrusion
|
40db7549f73fe4a59e3c8a240098324f596fd752
|
36a7fca573715850a890bf38d0ab3ef880f977cb
|
refs/heads/main
| 2023-01-31T18:51:40.603627
| 2020-12-16T14:05:20
| 2020-12-16T14:05:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,989
|
r
|
functions.R
|
#Functions
#For HiC
loadRData <- function(fileName){
#loads an RData file, and returns it
load(fileName)
get(ls()[ls() != "fileName"])
}
get_sub_matrix_Hic <- function(file,bed,binsize){
HTC <- loadRData(file)
rows.chr <- x_intervals(HTC)
val1 <- HTC %>% intdata %>% sum
# HTC <- normPerReads(HTC)
overlaps <- as(findOverlaps(bed,rows.chr), "List")
overlaps.bin <- lapply(overlaps,length) %>% unlist()
overlaps <- overlaps[overlaps.bin == binsize]
res <- lapply(1:length(overlaps),function(i){
x <- overlaps[[i]]
if(as.character(strand(bed[i])) == "-"){
x <- rev(x)
}
intdata(HTC)[x,x]
})
return(list(res,val1))
}
get_nb_read_by_chr <- function(file){
HTC <- loadRData(file)
HTC %>% intdata %>% sum
}
plot_ratio <-function(data.ratio,my.quantile = 0.95,facet=TRUE,window = 1000000,fixed = F,fixed.limite = c(0.1,0.1),DSB = "DSB"){
if(fixed ==T){
limite <- fixed.limite
if(length(limite)<2){
limite <- rep(limite,2)
}
}else{
limite <- data.ratio %>% pull(ratio) %>% quantile(my.quantile) %>% as.numeric()
limite <- rep(limite,2)
}
debut <- data.ratio$Var1 %>% levels %>% as.numeric() %>% min()
milieu <- data.ratio$Var1 %>% levels %>% as.numeric() %>% median()
fin <- data.ratio$Var1 %>% levels %>% as.numeric() %>% max()
p.ratio <- data.ratio %>%
mutate(ratio = ifelse(ratio > limite[2],limite[2],ratio)) %>%
mutate(ratio = ifelse(ratio < -limite[1],-limite[1],ratio)) %>%
ggplot(aes(x=Var1,y=Var2,fill=ratio)) + geom_tile() +
scale_fill_gradient2(low = "#2980b9",high = "#f1c40f",midpoint = 0,mid = "black",limits = c(-limite[1],limite[2])) +
scale_x_discrete(name = 'Position',
breaks = c(debut,milieu,fin),
labels = c(-window,DSB,window)
) +
scale_y_discrete(name = 'Position',
breaks = c(debut,milieu,fin),
labels = c(-window,DSB,window)
) +
# geom_vline(xintercept = milieu,linetype ="dashed",col="red")+
theme_classic() +
theme(legend.position="bottom")
if(facet == TRUE){
p.ratio + facet_wrap(~Replicate,ncol=1)
}else{
p.ratio
}
}
plot_telquel <-function(data.plot,my.quantile = 0.90,facet=TRUE,window = 1000000,fixed = F,fixed.limite = 1000,DSB = "DSB"){
if(fixed ==T){
limite <- fixed.limite
}else{
limite <- data.plot %>% pull(value) %>% quantile(my.quantile) %>% as.numeric()
}
debut <- data.plot$Var1 %>% levels %>% as.numeric() %>% min()
milieu <- data.plot$Var1 %>% levels %>% as.numeric() %>% median()
fin <- data.plot$Var1 %>% levels %>% as.numeric() %>% max()
p.ratio <- data.plot %>%
mutate(value = ifelse(value > limite,limite,value)) %>%
# mutate(value = log10(value)) %>%
ggplot(aes(x=Var1,y=Var2,fill=value)) + geom_tile() +
scale_fill_gradient2(low = "white",high = "#EA2027",limits = c(0,limite)) +
scale_x_discrete(name = 'Position',
breaks = c(debut,milieu,fin),
labels = c(-window,DSB,window)
) +
scale_y_discrete(name = 'Position',
breaks = c(debut,milieu,fin),
labels = c(-window,DSB,window)
) +
# geom_vline(xintercept = milieu,linetype ="dashed",col="red")+
theme_classic() +
theme(legend.position="bottom")
if(facet == TRUE){
p.ratio + facet_wrap(~Replicate+Condition,ncol=2)
}else{
p.ratio
}
}
#Boxplot 4CSeq
Get1valSum <- function(my.wigs,one.w,x){
lapply(split(x,droplevels(seqnames(x))),function(zz){
message(unique(as.character(seqnames(zz))))
cov <- one.w[[unique(as.character(seqnames(zz)))]]
score <- Views( cov, start = start(zz), end = end(zz) ) %>% sum()
tibble(wig = my.wigs,value = score,rowname = zz$name)
}) %>% bind_rows()
}
Get1val <- function (Name, one.w, x)
{
require(magrittr)
lapply(split(x, droplevels(seqnames(x))), function(zz) {
message(unique(as.character(seqnames(zz))))
cov <- one.w[[unique(as.character(seqnames(zz)))]]
score <- IRanges::Views(cov, start = start(zz), end = end(zz)) %>%
mean()
tibble::tibble(wig = Name, value = score, rowname = zz$name)
}) %>% bind_rows()
}
#Loop
process_loop <- function(my.table,sizeselector = 200000){
x1 <- my.table %>% dplyr::select(X1,X2,X3) %>%
dplyr::rename(seqnames = X1) %>%
mutate(seqnames = str_c("chr",seqnames)) %>%
dplyr::rename(start = X2) %>%
dplyr::rename(end = X3) %>%
as_granges() %>% mutate(name = str_c("chr1_",1:nrow(my.table)))
x2 <- my.table %>% dplyr::select(X4,X5,X6) %>%
dplyr::rename(seqnames = X4) %>%
mutate(seqnames = str_c("chr",seqnames)) %>%
dplyr::rename(start = X5) %>%
dplyr::rename(end = X6) %>%
as_granges() %>% mutate(name = str_c("chr2_",1:nrow(my.table)))
sizeLoop <- abs(start(x1) - end(x2))
sizeGroup <- ifelse(sizeLoop < sizeselector,str_c("<",sizeselector),str_c(">",sizeselector))
x1$sizeGroup <- sizeGroup
x2$sizeGroup <- sizeGroup
return(list(x1,x2))
}
#BIGWIG
#process BW
computeProfile = function( bed, wig, w = 20000, span = 200, seqlens ,method="mean"){
if( class( wig ) != "SimpleRleList" ){
stop( "ERROR : unknown class of wig, please provide a SimpleRleList" );
}
mat = NULL;
for( i in 1:length( bed ) ){
message( i, "/", length( bed ) );
bedi = bed[i, ];
chr = as.character( seqnames( bedi ) );
cov = wig[[chr]];
center = start( bedi ) + 4;
stW = center - w;
edW = center + w;
if( span == 1 ){
vm = as.numeric( Views( cov, start = stW, end = edW )[[1]] )
}else{
sts = seq( stW, edW - span + 1, span );
eds = seq( stW + span - 1, edW, span );
v = Views( cov, start = sts, end = eds );
if(method =="sum"){
vm = sum( v );
}else {
vm = mean( v );
}
vm[sts >= seqlens[chr] | sts > length( cov )] = 0;
vm[sts < 1] = 0;
}
mat = rbind( mat, vm );
}
#rv = colMeans( mat );
return( mat );
}
ParaleliseViewboxplot <- function(x,one.w,list.sites){
subdom <- list.sites[[x]]
lapply(split(subdom,droplevels(seqnames(subdom))),function(zz){
zz <- zz %>% anchor_center() %>% mutate(width = 2*window)
message(unique(as.character(seqnames(zz))))
cov <- one.w[[unique(as.character(seqnames(zz)))]]
scoredat <- Views( cov, start = start(zz), end = end(zz) )%>% sum()
zz %>% as_tibble() %>% mutate(score = scoredat) %>% mutate(Type = x)
}) %>% bind_rows()
}
ParaleliseViewprofile <- function(x,one.w,list.sites){
subdom <- list.sites[[x]]
d1 <- computeProfile(bed=subdom,wig = one.w,seqlens = seqlens,w = window,span=span) %>% colMeans()
data.frame(Value=d1,Windows=seq(-window,window-span+1,span),Type=x)
}
#APA
process_APA <- function(file){
seqpos <- seq(-10,10,by=1) %>% as.character()
my.dat <- file %>% read_csv(col_names = F) %>%
mutate_if(is.character,str_remove,"\\[|\\]") %>%
mutate_if(is.character,as.numeric) %>%
as.matrix()
colnames(my.dat) <- seqpos
rownames(my.dat) <- rev( seqpos )
my.dat <- my.dat %>% reshape2::melt() %>%
mutate(file = file) %>%
mutate(DSB = str_extract(file,"174clived|80best")) %>%
mutate(Type = str_extract(file,"anchorLeftAsiSI|damaged|notdamaged")) %>%
mutate(Condition = str_extract(file,"manipA_OHT|manipA_DIvA|manipB_OHT|manipB_DIvA"))
return(my.dat)
}
|
51120e6eb4d328bd4539e1988a5cbc2983f1d0ba
|
7fddad3797bd8df11c687a71c77293be3719efce
|
/clusteringBasic.R
|
900c6916946475fb08e74ba69daf735c3a5537fc
|
[] |
no_license
|
DenaliCarpenter/gradientMetricsM1
|
063acd301f9624e1915ab0feb1a5090c172e2362
|
6df5349f7e37446416f54bf5f40d49cf5853a946
|
refs/heads/master
| 2023-01-24T01:41:44.214813
| 2020-12-04T05:24:06
| 2020-12-04T05:24:06
| 316,644,706
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,268
|
r
|
clusteringBasic.R
|
library(tidyverse)
library(cluster)
library(factoextra)
library(haven)
library(fclust)
library(ggplot2)
library(readr)
library(Rtsne)
library(sjlabelled)
library(fpc)
library(reshape2)
library(dendextend)
library(klaR)
library(radiant)
# Data Cleaning -----------------------------------------------------------
surveyData <- read_sav("survey_data.sav")
surveyDataCategorical <- surveyData %>%
replace(is.na(.), 0) %>%
dplyr::select(-weights) %>%
column_to_rownames(var = "response_id") %>%
mutate_all(as.factor) %>%
remove_all_labels(.)
# K-Modes -----------------------------------------------------------------
set.seed(123)
totalWithinDiff <- data.frame("n" = vector(length = 9), "total within diff" = vector(length = 9))
for(i in 2:10){
totalWithinDiff$n[i - 1] <- i
totalWithinDiff$total.within.diff[i - 1] <- sum(kmodes(surveyDataCategorical, i)$withindiff)
}
p <- ggplot(aes(x = n, y = total.within.diff), data = totalWithinDiff) +
geom_point() +
geom_line()
p
kmodesModelFinal <- kmodes(surveyDataCategorical, 4, iter.max = 400)
surveyDataClustered <- surveyData %>%
bind_cols(kmodesModelFinal$cluster) %>%
rename("cluster" = ...101) %>%
group_by(cluster) %>%
dplyr::select(-response_id) %>%
mutate_all(mode)
|
06d2cf8fff3c0d26e6dd04b6689a7a9e0f4caecc
|
e17c6aec7115cb53939b784a87f5909be5fff032
|
/Matrix on a Plot.R
|
e20cd49eeb5d2c9a586bb901d9840a1478371b72
|
[] |
no_license
|
fawnshao/rexamples
|
801ca734159a46ac67ed03b578001529563d3142
|
8e61d423237da5cb409f032dd896903fe8ac68c4
|
refs/heads/master
| 2021-01-18T05:46:15.505501
| 2013-06-11T01:26:21
| 2013-06-11T01:26:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 196
|
r
|
Matrix on a Plot.R
|
library(gplots) #after install.packages("gplots")
m <- matrix(sample(9), 3)
matplot(m, type='l')
par(fig = c(0.5, 1, 0, 0.5), new = T)
textplot(m, show.rownames = F, show.colnames = F, cex = 0.9)
|
7a7be563fb7282a9a6c57c3c055a23caaaa73520
|
3e19721d348058ca7f56660d06b9873e3110b161
|
/analyze text.R
|
c0388e8a8e81a1d3b225e9f543db12bdd8d01386
|
[] |
no_license
|
dhicks/NVIC-analysis
|
1928f1ffda0e56bfdea2bd03442ce592b96fa07f
|
4bdd7f0bfb65b5bbcd882435a4009ca18e30b2b5
|
refs/heads/master
| 2021-01-02T09:15:16.651118
| 2017-01-11T17:02:47
| 2017-01-11T17:02:47
| 78,681,701
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,112
|
r
|
analyze text.R
|
library(tidyverse)
library(tidytext)
library(stringr)
load('www.nvic.org.Rdata')
## Remove empty documents
parsed_site = parsed_site %>%
filter(!is.na(title), !is.na(text))
## For development purposes, work with a much smaller set of documents
# parsed_site = parsed_site %>% sample_frac(size = .25)
## The simplest way to get a count of document lengths is to go through token_counts once first?
tokens = parsed_site %>%
unnest_tokens(token, text)
token_counts = tokens %>%
group_by(path, title, token) %>%
summarize(token_n = n()) %>%
ungroup() #%>%
# bind_tf_idf(term_col = token, document_col = path, n_col = token_n)
docs = token_counts %>%
group_by(path, title) %>%
summarize(length = sum(token_n)) %>%
ungroup()
## Difference between docs and parsed_site due to un-OCRed PDFs
## ECDF of document length
ggplot(docs, aes(length)) + stat_ecdf() + geom_rug()
## Manually remove some problem documents
problem_docs = c('BLF-vs--Offit---Plaintiff-s-Opposition-to-Defendan.aspx')
docs = docs %>% filter(!str_detect(path, problem_docs))
token_counts = token_counts %>%
filter(path %in% docs$path) %>%
bind_tf_idf(term_col = token, document_col = path, n_col = token_n)
vocabulary = token_counts %>%
group_by(token) %>%
summarize(idf = first(idf),
tf_idf = max(tf_idf))
ggplot(vocabulary, aes(idf, tf_idf)) + geom_point() + stat_smooth()
ggplot(vocabulary, aes(idf)) + stat_ecdf()
ggplot(vocabulary, aes(tf_idf)) + stat_ecdf()
dtm = token_counts %>%
filter(tf_idf >= quantile(vocabulary$tf_idf, probs = .8)) %>%
cast_dtm(path, token, token_n)
# dist = proxy::dist(as.matrix(tdm), method = 'cosine')
## Use this instead: http://stackoverflow.com/a/29755756/3187973
library(slam)
cosine_sim_dtm <- crossprod_simple_triplet_matrix(t(dtm))/(sqrt(row_sums(dtm^2) %*% t(row_sums(dtm^2))))
library(apcluster)
clustered = apcluster(cosine_sim_dtm, details = TRUE, q = .5)
clustered
## Uncommon to generate a Tikz version
# tikzDevice::tikz(width = 12, height = 12, file = 'cluster_heatmap.tex',
# standAlone = TRUE, sanitize = TRUE)
heatmap(clustered, cosine_sim_dtm)
# dev.off()
|
b5ce3ab65e710e76d1c162403c9c2b29131d8423
|
fa2be78cc25d98d6d5dd2a1456104dea264ef5a8
|
/R/imports.R
|
92d75c7a443a62c03353b28ed02aa8d473abbee8
|
[] |
no_license
|
sponslerdb/hivescaler
|
5929018d70f8277c663c0a5a62a743fd004a6a8f
|
3643fdaf79515fc8b9a8d744810ead9ee79f4ccf
|
refs/heads/master
| 2021-06-09T06:31:43.212438
| 2019-12-02T04:06:57
| 2019-12-02T04:06:57
| 140,870,760
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 311
|
r
|
imports.R
|
#' @import ggplot2
#' @import dplyr
#' @import tidyr
#' @import readr
#' @import purrr
#' @import tibble
#' @import stringr
#' @import forcats
#' @importFrom magrittr %>%
#' @importFrom zoo rollmean index
#' @importFrom timetk tk_xts
#' @import xts
#' @import dtwclust
#' @import harrietr
#' @import rnoaa
NULL
|
7fa781fa611c2065bdc30674f9505cffcc47cab6
|
2f33e51b4937b0c5b9ed6ef12e60ce1d38f254c6
|
/R/2019/2019_Week47_NZBirdsOfTheYear.R
|
1299fc7ea23112c17e465b937a6081904b72a534
|
[] |
no_license
|
MaiaPelletier/tidytuesday
|
54c384d5a631957e76af83ac9f734f87638536e6
|
ed25cf98fbafaaedc76df729d6170f143d7ddc5e
|
refs/heads/master
| 2021-10-12T00:13:01.730556
| 2021-09-27T20:28:35
| 2021-09-27T20:28:35
| 219,841,220
| 15
| 7
| null | 2020-12-04T14:23:40
| 2019-11-05T20:10:06
|
R
|
UTF-8
|
R
| false
| false
| 7,877
|
r
|
2019_Week47_NZBirdsOfTheYear.R
|
# Week 47 - NZ Bird of the Year -------------------------------------------
library(tidyverse)
library(tidytuesdayR)
library(Cairo)
## Get data from tidytuesdayR pagkage
tuesdata <- tt_load(2019, week = 47)
nz_bird <- tuesdata$nz_bird
## A little bit of tidying
nz_bird <-
nz_bird %>%
mutate(
vote_rank = str_replace(vote_rank, '_', ' '),
vote_rank = str_to_title(vote_rank)
)
# Get the top birds!
top_birds <-
nz_bird %>%
filter(!is.na(bird_breed)) %>%
count(bird_breed, sort = TRUE) %>%
top_n(4) %>%
pull(bird_breed)
# Lollipop plot 1 ----------------------------------------------------------------
nz_bird %>%
filter(bird_breed %in% top_birds) %>%
count(bird_breed) %>%
ggplot() +
geom_segment(aes(x = bird_breed, xend = bird_breed, y = 0, yend = n), color = 'grey35') +
geom_point(aes(reorder(x = bird_breed, -n), y = n, color = bird_breed), size = 6) +
labs(x = NULL, y = 'Total votes for candidate',
caption = 'Data from the New Zealand Forest and Bird Organization, courtesy of Dragonfly Data Science') +
ggtitle('The race between the Yellow-Eyed Penguin and its runner-ups',
subtitle = 'New Zealand bird of the year 2019 voting results') +
ggthemes::scale_color_canva(palette = 'Birds and berries', name = 'Bird breed') +
ggthemes::theme_tufte() +
theme(axis.title.y = element_text(size = 10, face = 'italic', color = 'grey40'),
axis.text.y = element_text(size = 8, color = 'grey50'),
axis.text.x = element_text(size = 10, color = 'grey35'),
legend.title = element_text(size = 9, face = 'italic', color = 'grey35'),
legend.text = element_text(size = 8, color = 'grey30'),
legend.position = 'top',
plot.title = element_text(size = 15, face = 'italic', color = 'grey30', hjust = 0.5),
plot.subtitle = element_text(size = 8, color = 'grey55', hjust = 0.5),
plot.caption = element_text(size = 6.5, color = 'grey30'))
ggsave(
'TidyTuesday/Week 47 - NZ Birds of the Year/Week47_NZBirdsOfTheYear_LollipopPlot.png',
type = 'cairo'
)
# Lollipop plot 2 ---------------------------------------------------------
nz_bird %>%
filter(bird_breed %in% top_birds) %>%
count(vote_rank, bird_breed) %>%
ggplot() +
geom_segment(aes(x = vote_rank, xend = vote_rank, y = 0, yend = n), color = 'grey45') +
geom_point(aes(reorder(x = vote_rank, -n), y = n, color = bird_breed), size = 3) +
labs(x = NULL, y = 'Number of votes in each voting tier',
caption = 'Data from the New Zealand Forest and Bird Organization, courtesy of Dragonfly Data Science') +
ggtitle('The race between the Yellow-Eyed Penguin and its runner-ups',
subtitle = 'New Zealand bird of the year 2019 voting results') +
facet_wrap(.~bird_breed) +
ggthemes::scale_color_canva(palette = 'Birds and berries', name = 'Bird breed') +
ggthemes::theme_tufte() +
theme(axis.title.y = element_text(size = 10, face = 'italic', color = 'grey40'),
axis.text.y = element_text(size = 8, color = 'grey50'),
axis.text.x = element_text(size = 10, color = 'grey35'),
legend.title = element_text(size = 9, face = 'italic', color = 'grey35'),
legend.text = element_text(size = 8, color = 'grey30'),
legend.position = 'top',
plot.title = element_text(size = 15, face = 'italic', color = 'grey30', hjust = 0.5),
plot.subtitle = element_text(size = 8, color = 'grey55', hjust = 0.5),
plot.caption = element_text(size = 6.5, color = 'grey30'),
strip.background = element_rect(fill = 'white', color = 'grey75'),
strip.text = element_text(size = 9, color = 'grey35'))
# Time plot ---------------------------------------------------------------
nz_bird %>%
filter(bird_breed %in% top_birds) %>%
count(date, bird_breed) %>%
ggplot(aes(date, n, group = bird_breed)) +
geom_line(aes(color = bird_breed), size = 1) +
labs(x = NULL, y = 'Number of votes on given day',
caption = 'Data from the New Zealand Forest and Bird Organization, courtesy of Dragonfly Data Science') +
ggtitle('The race between the Yellow-Eyed Penguin and its runner-ups',
subtitle = 'New Zealand bird of the year 2019 voting results') +
scale_x_date(date_labels = '%b %d', date_breaks = '3 day') +
ggthemes::scale_color_canva(palette = 'Birds and berries', name = 'Bird breed') +
ggthemes::theme_tufte() +
theme(axis.title.y = element_text(size = 10, face = 'italic', color = 'grey40'),
axis.text.y = element_text(size = 8, color = 'grey50'),
axis.text.x = element_text(size = 10, face = 'italic', color = 'grey35'),
legend.title = element_text(size = 9, face = 'italic', color = 'grey35'),
legend.text = element_text(size = 8, color = 'grey30'),
legend.position = 'top',
plot.title = element_text(size = 15, face = 'italic', color = 'grey30', hjust = 0.5),
plot.subtitle = element_text(size = 8, color = 'grey55', hjust = 0.5),
plot.caption = element_text(size = 6.5, color = 'grey30'))
ggsave(
'TidyTuesday/Week 47 - NZ Birds of the Year/Week47_NZBirdsOfTheYear_TimePlot.png',
type = 'cairo'
)
# Dumbbell plot -----------------------------------------------------------
winning_birds <-
right_join(
nz_bird %>%
count(vote_rank, bird_breed) %>%
filter(!is.na(bird_breed)) %>%
group_by(vote_rank) %>%
top_n(2) %>%
mutate(min_votes = min(n),
rank = ifelse(n == min_votes, 'runner_up_breed', 'winner_breed')) %>%
pivot_wider(names_from = rank, values_from = bird_breed, id_cols = vote_rank),
nz_bird %>%
count(vote_rank, bird_breed, name = 'num_of_votes') %>%
filter(!is.na(bird_breed)) %>%
group_by(vote_rank) %>%
mutate(total_votes = sum(num_of_votes)) %>%
top_n(2, num_of_votes) %>%
mutate(frac_votes = num_of_votes/total_votes) %>%
mutate(
min_votes = min(frac_votes),
rank = ifelse(frac_votes == min_votes, 'runner_up', 'winner')) %>%
select(-min_votes, -num_of_votes, -total_votes) %>%
pivot_wider(
names_from = rank,
values_from = frac_votes,
id_cols = vote_rank
),
by = 'vote_rank'
)
winning_birds$vote_rank <-
factor(
winning_birds$vote_rank, levels = paste('Vote', 5:1)
)
ggplot(winning_birds) +
geom_segment(aes(x = vote_rank, xend = vote_rank, y = runner_up, yend = winner), color = 'grey60') +
geom_point(aes(x = vote_rank, y = runner_up, color = runner_up_breed), size = 4) +
geom_point(aes(x = vote_rank, y = winner, color = winner_breed), alpha = 0.8, size = 5) +
labs(y = 'Fraction of total votes received', x = NULL, caption = 'Data from the New Zealand Forest and Bird Organization, courtesy of Dragonfly Data Science') +
ggtitle('The race between the Yellow-Eyed Penguin and its runner-ups',
subtitle = 'New Zealand bird of the year 2019 voting results') +
coord_flip() +
ggthemes::scale_color_canva(palette = 'Birds and berries', name = 'Bird breed') +
ggthemes::theme_tufte() +
theme(axis.title.x = element_text(size = 10, face = 'italic', color = 'grey40'),
axis.text.x = element_text(size = 8, color = 'grey50'),
axis.text.y = element_text(size = 10, face = 'italic', color = 'grey35'),
legend.title = element_text(size = 9, face = 'italic', color = 'grey35'),
legend.text = element_text(size = 8, color = 'grey30'),
legend.position = 'top',
plot.title = element_text(size = 15, face = 'italic', color = 'grey30', hjust = 0.5),
plot.subtitle = element_text(size = 8, color = 'grey55', hjust = 0.5),
plot.caption = element_text(size = 6.5, color = 'grey30'))
ggsave(
'TidyTuesday/Week 47 - NZ Birds of the Year/Week47_NZBirdsOfTheYear.png',
type = 'cairo'
)
|
dfc9d73029a00f3dedb9ef39eb8485072f6397c1
|
b49c539a9b70f493c3f20b07fc087318b22ad0da
|
/man/new_factor.Rd
|
fea1bf4d9d47c2b376338865c1305e36137aab89
|
[] |
no_license
|
Tangjiahui26/foofactors
|
5f3c753752ead478542f9b128b248e3698a994e6
|
574c5e41179cb867901c021ee974894cb94dda1b
|
refs/heads/master
| 2021-08-21T20:31:18.865133
| 2017-11-29T01:05:06
| 2017-11-29T01:05:06
| 111,994,867
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 427
|
rd
|
new_factor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/new_factors.R
\name{new_factor}
\alias{new_factor}
\title{Set levels of a factor to the order appeared in data}
\usage{
new_factor(x)
}
\arguments{
\item{x}{a factor}
}
\value{
a factor with levels in the order appeared in x.
}
\description{
Set levels of a factor to the order appeared in data
}
\examples{
new_factor(factor(c("d","b","a","d")))
}
|
6bb56de4db9634569b2468ed5b69f9a9f9955f4c
|
cbd70b829a3dffc23bffe01929d7732f2df815f5
|
/temp.R
|
7d0d98058b4c840694b3761cea9efd449a661950
|
[] |
no_license
|
kimberlyroche/ROL
|
603ac7a71a0487b1ff999a14cc34681090e299fc
|
d33c8f63c692a55cdca9c551b718f1c04929d6f8
|
refs/heads/master
| 2021-02-18T23:58:43.631388
| 2020-12-08T23:47:55
| 2020-12-08T23:47:55
| 245,254,434
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 839
|
r
|
temp.R
|
rm(list = ls())
library(ROL)
d1 <- load_Johnson_2D_summary(); summarize_all_pairs_2D(d1)
d1 <- load_Caporaso_2D_summary(); summarize_all_pairs_2D(d1)
d1 <- load_David_2D_summary(); summarize_all_pairs_2D(d1)
d1 <- load_DIABIMMUNE_2D_summary(); summarize_all_pairs_2D(d1)
d1 <- load_McMahon_shallow_2D_summary(); summarize_all_pairs_2D(d1)
d1 <- load_McMahon_deep_2D_summary(); summarize_all_pairs_2D(d1)
d1 <- load_Grossart_2D_summary(); summarize_all_pairs_2D(d1)
viz <- "bars"
test <- load_Johnson_2D_summary(visualize = viz)
test <- load_Caporaso_2D_summary(visualize = viz)
test <- load_David_2D_summary(visualize = viz)
test <- load_DIABIMMUNE_2D_summary(visualize = viz)
test <- load_McMahon_shallow_2D_summary(visualize = viz)
test <- load_McMahon_deep_2D_summary(visualize = viz)
test <- load_Grossart_2D_summary(visualize = viz)
|
c5e160a6b8449eda694eeada6b2f1472e54610f9
|
0dc7121ee1e033ffca6575849e7ce9bed2c7d0c2
|
/man/get_append_weekly_log_returns.Rd
|
73eb27eaa3fe1a1c9f70f5b4aeb9c3097a590d4b
|
[] |
no_license
|
gmahjub/steal-basis-r
|
156341e2812eaf721890ad25c2dc2fc304130242
|
67d3db66adb1b3aeb91a167b00220a02afe50502
|
refs/heads/master
| 2021-03-31T01:11:27.351523
| 2018-08-05T03:13:19
| 2018-08-05T03:13:19
| 125,122,687
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,455
|
rd
|
get_append_weekly_log_returns.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/returns.R, R/zscore_stats_funcs.R
\name{get_append_weekly_log_returns}
\alias{get_append_weekly_log_returns}
\alias{get_append_weekly_log_returns}
\title{get_append_weekly_log_returns}
\usage{
get_append_weekly_log_returns(tibble_obj, fill_in_between_dates = TRUE)
get_append_weekly_log_returns(tibble_obj, fill_in_between_dates = TRUE)
}
\arguments{
\item{tibble_obj}{simply a call to tq_get() will achieve this input}
\item{fill_in_between_dates}{TRUE default}
\item{tibble_obj}{simply a call to tq_get() will achieve this input}
\item{fill_in_between_dates}{TRUE default}
}
\value{
a tibble object with the appended weekly log returns.
a tibble object with the appended weekly log returns.
}
\description{
Same functionality as the get_weekly_log_returns() function but returns the
original tibble_obj with the weekly log returns calculated as a new column.
In addition, it fills in the in-between days with the last weekly log return
that was calculated. This can be turned off by setting the
"fill_in_between_dates" argument to FALSE
Same functionality as the get_weekly_log_returns() function but returns the
original tibble_obj with the weekly log returns calculated as a new column.
In addition, it fills in the in-between days with the last weekly log return
that was calculated. This can be turned off by setting the
"fill_in_between_dates" argument to FALSE
}
|
e0aaf0ef501c85771ce432f7bd1ca55d39155dbd
|
3b8e2dedcbf2774585962bc163ddaaf8eef36076
|
/man/MLEp.bsci.Rd
|
e5d19e4cf0f2693037cb3f27c8895a3e79c2fd1b
|
[
"MIT"
] |
permissive
|
ville-kinnula/SPEC
|
76d4eab4aaa2dc3408816b4f3abdd31f1a90a40e
|
f3705f38520c6678bcd38efd94cad5143919e561
|
refs/heads/master
| 2023-06-17T17:14:56.124246
| 2021-07-08T13:22:21
| 2021-07-08T13:22:21
| 357,901,368
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 786
|
rd
|
MLEp.bsci.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Parameter_estimation_and_hypothesis_testing.R
\name{MLEp.bsci}
\alias{MLEp.bsci}
\title{Bootstrap confidence interval for the MLE of psi}
\usage{
MLEp.bsci(x, level)
}
\arguments{
\item{x}{A data vector.}
\item{level}{Level of confidence interval as number between 0 and 1.}
}
\value{
The MLE of psi as well as lower and upper bounds of the bootstrap
confidence interval.
}
\description{
A bootstrapped confidence interval for the Maximum Likelihood Estimate for
psi based on a 100 bootstrap rounds with 80\% of data used at a time.
}
\examples{
## Find a 95\% -confidence interval for the MLE of psi given a sample from the
## Poisson-Dirichlet distribution:
x<-rPD(n=10000, psi=100)
MLEp.bsci(x, 0.95)
}
|
7836cd6977df1b0fd90be54ca1468875cebf7a3d
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/ggforce/inst/testfiles/enclose_points/libFuzzer_enclose_points/enclose_points_valgrind_files/1609955670-test.R
|
61a70c77811e90594e7cbb5aeaf94e4ae65afe8b
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 796
|
r
|
1609955670-test.R
|
testlist <- list(id = integer(0), x = -Inf, y = c(1.42602581597035e-105, 1.42602581597035e-105, 1.42602581597035e-105, 1.42602581597035e-105, 1.42602581597035e-105, 1.42602581597035e-105, 1.42602581597035e-105, 1.42602581597035e-105, 1.42602581597035e-105, 1.42602581597035e-105, 1.42602581597035e-105, -5.04975682804633e-195, -5.04975683349975e-195, -1.37738287133826e-196, -5.04975683349975e-195, -5.04975683349975e-195, -5.04975683349975e-195, -5.04975683349975e-195, -5.04975683349975e-195, -4.14174398718279e+306, 2.3566586212048e-306, 0, 0, 0, 0, 0, 0, 0, -5.02584741025324e-195, -5.04975683349975e-195, 1.99990042892157, -4.62477845732711e-203, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(ggforce:::enclose_points,testlist)
str(result)
|
bb39c16a6cc4e07523f8fabcfafdd4e1217db624
|
d09bd36acdde95c8596287fbcebb89dd4c7fb906
|
/R/plotfuns.R
|
7011f9470c2a139884874f1688b1c5a72d496737
|
[] |
no_license
|
MingweiWilliamTang/phyloInt
|
de68a79de30880573527de3ff0700ab3cd2b9f0e
|
504b651261ed6edc5aedca4c48b656fe401f52c5
|
refs/heads/master
| 2020-04-25T14:42:46.085222
| 2019-07-29T19:11:35
| 2019-07-29T19:11:35
| 172,850,819
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,262
|
r
|
plotfuns.R
|
#Rcpp::sourceCpp('src/SIRS_period.cpp')
#Rcpp::sourceCpp("src/SIR_BD.cpp")
#source('~/Dropbox/Research/GP/code/LNAPhyloDyn/R/coal_simulation.R', echo=TRUE)
#source('~/Dropbox/Research/GP/code/LNAPhyloDyn/R/SIR_LNA_standard.R', echo=TRUE)
plot3 = function(traj){
dev.off()
par(mfrow = c(2,2),mar=c(4,4,1,1))
plot(traj[,1],traj[,2],type = "l", xlab = "time", ylab = "suscepitible1")
plot(traj[,1],traj[,3],type = "l", xlab = "time", ylab = "Infected")
plot(traj[,1],traj[,4],type = "l", xlab = "time", ylab = "Recovered")
#plot(traj[,1],traj[,5],type = "l", xlab = "time", ylab = "reconvered")
}
effpopfun = function(Traj,beta=0,lambda=1, volz = FALSE){
if(volz){
return(1 /(2 * Traj[,3] * beta / Traj[,2]))
}else{
return(Traj[,3] / lambda)
}
}
#' Plot the trajectory of effective population size
#'
#' @param MCMC_res returned MCMC list
#' @param idxs The index choosed to make plot
#' @param volz If use volz likelihood the coalescent modeling
#'
#'
#'
random_effpoptraj_line = function(MCMC_res,idxs,volz = FALSE){
N = MCMC_res$Trajectory[1,2,1] + MCMC_res$Trajectory[1,3,1]
#plot(N)
if(volz){
for(i in idxs){
lines(MCMC_res$Trajectory[,1,i],
effpopfun(MCMC_res$Trajectory[,,i],beta = MCMC_res$par[i,3] * N,volz = T),
lwd = 0.5, col = "grey")
}
}else{
for(i in idxs){
lines(MCMC_res$Trajectory[,1,i],
MCMC_res$Trajectory[,3,i] / MCMC_res$par[i,5],
lwd = 0.5, col = "yellow")
}
}
}
medianCur = function(MCMC_obj,ids,scale=1,col="black",row=3,med = T,volz = F,lwd = 2,lid=7,lty = 2){
if(volz == F){
scale = MCMC_obj$par[ids,lid]
}
if(med){
lines(MCMC_obj$Trajectory[,1,1],apply(MCMC_obj$Trajectory[,row,ids]/scale,1,median),
col=col,lwd=lwd,lty=lty)
}else{
lines(MCMC_obj$Trajectory[,1,1],apply(MCMC_obj$Trajectory[,row,ids]/scale,1,mean),
col=col,lwd=lwd,lty = lty)
}
}
medianCur_end = function(MCMC_obj,ids,scale=1,col="black",row=3,med = T,endTime = NULL, lwd = 2,lty = 2){
if(is.null(endTime)){
endTime = max(MCMC_obj$Trajectory[,1,1])
}
time_id = (MCMC_obj$Trajectory[,1,1] <= endTime)
if(med){
lines(MCMC_obj$Trajectory[time_id ,1,1],apply(MCMC_obj$Trajectory[time_id ,row,ids]/scale,1,median),
col=col,lwd=lwd,lty=lty)
}else{
lines(MCMC_obj$Trajectory[time_id ,1,1],apply(MCMC_obj$Trajectory[time_id ,row,ids]/scale,1,mean),
col=col,lwd=lwd,lty = lty)
}
}
effpopfun = function(Traj,beta=0,lambda=1, volz = FALSE){
if(volz){
return(1/(2 * Traj[,2] * beta / Traj[,3]))
}else{
return(Traj[,3] / lambda)
}
}
CI_Curve_end = function(MCMC_obj,ids,endTime = NULL,scale = 1,col = "black", fill_col = "grey", row = 3,method = "qtile",alpha=0.05,fill = T){
if(is.null(endTime)){
endTime = max(MCMC_obj$Trajectory[,1,1])
}
time_id = (MCMC_obj$Trajectory[,1,1] <= endTime)
if(method == "NormApp"){
midcur = apply(MCMC_obj$Trajectory[time_id ,row,ids]/scale,1,mean)
midSd = apply(MCMC_obj$Trajectory[time_id ,row,ids]/scale,1,sd)
qt = qnorm(1-alpha/2)
upper = midcur + qt * midSd
lower = midcur - qt * midSd
#lines(MCMC_obj$Trajectory[,1,1],upper,lty=2,
# col=col,lwd=2)
#lines(MCMC_obj$Trajectory[,1,1],lower,lty=2,
# col=col,lwd=2)
}else if(method == "qtile"){
qt1 = 1 - alpha/2
qt2 = alpha/2
upper = apply(MCMC_obj$Trajectory[time_id ,row,ids]/scale,1,function(x){
return(quantile(x,qt1))
}
)
lower = apply(MCMC_obj$Trajectory[time_id ,row,ids]/scale,1,function(x){
return(quantile(x,qt2))
})
# lines(MCMC_obj$Trajectory[,1,1],midcur + qt * midSd,lty=2,
# col=col,lwd=2)
#lines(MCMC_obj$Trajectory[,1,1],midcur - qt * midSd,lty=2,
# col=col,lwd=2)
}
if(fill == T){
polygon(x = c(MCMC_obj$Trajectory[time_id ,1,1],rev(MCMC_obj$Trajectory[time_id ,1,1])),
y = c(upper,rev(lower)),col = fill_col,border = NA)
}else{
polygon(x = c(MCMC_obj$Trajectory[time_id ,1,1],rev(MCMC_obj$Trajectory[time_id ,1,1])),
y = c(upper,rev(lower)), col = "black",border = F,angle = c(45,-45),density = c(40))
}
}
CI_Curve = function(MCMC_obj,ids,scale = 1, col = "black", fill_col = "grey", row = 3,method = "qtile",alpha=0.05,fill = T){
if(method == "NormApp"){
midcur = apply(MCMC_obj$Trajectory[,row,ids]/scale,1,mean)
midSd = apply(MCMC_obj$Trajectory[,row,ids]/scale,1,sd)
qt = qnorm(1-alpha/2)
upper = midcur + qt * midSd
lower = midcur - qt * midSd
#lines(MCMC_obj$Trajectory[,1,1],upper,lty=2,
# col=col,lwd=2)
#lines(MCMC_obj$Trajectory[,1,1],lower,lty=2,
# col=col,lwd=2)
}else if(method == "qtile"){
qt1 = 1 - alpha/2
qt2 = alpha/2
upper = apply(MCMC_obj$Trajectory[,row,ids]/scale,1,function(x){
return(quantile(x,qt1))
}
)
lower = apply(MCMC_obj$Trajectory[,row,ids]/scale,1,function(x){
return(quantile(x,qt2))
})
# lines(MCMC_obj$Trajectory[,1,1],midcur + qt * midSd,lty=2,
# col=col,lwd=2)
#lines(MCMC_obj$Trajectory[,1,1],midcur - qt * midSd,lty=2,
# col=col,lwd=2)
}
if(fill == T){
polygon(x = c(MCMC_obj$Trajectory[,1,1],rev(MCMC_obj$Trajectory[,1,1])),
y = c(upper,rev(lower)),col = fill_col,border = NA)
}else{
polygon(x = c(MCMC_obj$Trajectory[,1,1],rev(MCMC_obj$Trajectory[,1,1])),
y = c(upper,rev(lower)), col = "black",border = F,angle = c(45,-45),density = c(40))
}
}
CI_Curve3 = function(times,Trajectory,ids,scale = 1, col = "black", fill_col = "grey", row = 3,method = "qtile",alpha=0.05,fill = T){
if(method == "NormApp"){
midcur = apply(Trajectory[,row,ids]/scale,1,mean)
midSd = apply(Trajectory[,row,ids]/scale,1,sd)
qt = qnorm(1-alpha/2)
upper = midcur + qt * midSd
lower = midcur - qt * midSd
#lines(MCMC_obj$Trajectory[,1,1],upper,lty=2,
# col=col,lwd=2)
#lines(MCMC_obj$Trajectory[,1,1],lower,lty=2,
# col=col,lwd=2)
}else if(method == "qtile"){
qt1 = 1 - alpha/2
qt2 = alpha/2
upper = apply(Trajectory[,row,ids]/scale,1,function(x){
return(quantile(x,qt1))
})
mid = apply(Trajectory[,row,ids]/scale,1,function(x){
return(median(x,qt1))
})
lower = apply(Trajectory[,row,ids]/scale,1,function(x){
return(quantile(x,qt2))
})
# lines(MCMC_obj$Trajectory[,1,1],midcur + qt * midSd,lty=2,
# col=col,lwd=2)
#lines(MCMC_obj$Trajectory[,1,1],midcur - qt * midSd,lty=2,
# col=col,lwd=2)
}
if(fill == T){
polygon(x = c(times,rev(times)),
y = c(upper,rev(lower)),col = fill_col,border = NA)
lines(times,mid,col="red",lwd=2,lty=2)
}
}
CI_Curve_eff = function(MCMC_obj,ids,scale = 1, col = "black", fill_col = "grey", row = 3,method = "qtile",alpha=0.05,fill = T,likelihood ="volz"){
if(likelihood == "volz"){
Mx = 1/(2 * MCMC_obj$Trajectory[,2,ids]/MCMC_obj$Trajectory[,3,ids])
}else{
Mx = MCMC_obj$Trajectory[,row,ids]
}
midcur = apply(Mx/scale,1,mean)
if(method == "NormApp"){
midSd = apply(Mx/scale,1,sd)
qt = qnorm(1-alpha/2)
upper = midcur + qt * midSd
lower = midcur - qt * midSd
#lines(MCMC_obj$Trajectory[,1,1],upper,lty=2,
# col=col,lwd=2)
#lines(MCMC_obj$Trajectory[,1,1],lower,lty=2,
# col=col,lwd=2)
}else if(method == "qtile"){
qt1 = 1 - alpha/2
qt2 = alpha/2
upper = apply(Mx/scale,1,function(x){
return(quantile(x,qt1))
}
)
lower = apply(Mx/scale,1,function(x){
return(quantile(x,qt2))
})
# lines(MCMC_obj$Trajectory[,1,1],midcur + qt * midSd,lty=2,
# col=col,lwd=2)
#lines(MCMC_obj$Trajectory[,1,1],midcur - qt * midSd,lty=2,
# col=col,lwd=2)
}
if(fill == T){
polygon(x = c(MCMC_obj$Trajectory[,1,1],rev(MCMC_obj$Trajectory[,1,1])),
y = c(upper,rev(lower)),col = fill_col,border = NA)
}
lines(MCMC_obj$Trajectory[,1,1],midcur,col = col,lwd=2,lty=2)
}
vlineCI = function(data,cred = T){
if(cred){
m = median(data)
s1 = quantile(data,0.025)
s2 = quantile(data,1 - 0.025)
abline(v = s1,col="blue",lwd=2,lty=2)
abline(v = s2,col="blue",lwd=2,lty=2)
}
else{
s = sd(data)
m = meam(data)
abline(v = m + 1.96*s,col="blue",lwd=2,lty=2)
abline(v = m - 1.96*s,col="blue",lwd=2,lty=2)
}
abline(v = m ,col="blue",lwd=2)
}
randomR0_traj = function(times,MCMC_obj,R0_id,col_id,idx,ylim=c(0,2),ylab = "",col = "black",lty=1,fill = rgb(1,0,0,0.3), main = "",xlab = "time",cex.lab=1.3,cex.axis = 1.2,xaxt = "s",add = F,add2 = F){
R0 = MCMC_obj$par[idx,R0_id]
R0_traj = matrix(ncol= length(col_id)+2, nrow = length(idx))
R0_traj[,1] = R0
for(i in 1:length(col_id)){
R0_traj[,i+1] = R0_traj[,i] * MCMC_obj$par[idx,col_id[i]]
}
i = length(col_id)
R0_traj[,length(col_id)+2] = R0_traj[,i] * MCMC_obj$par[idx,col_id[i]]
CIup = apply(R0_traj,2,function(x){
return(quantile(x,0.975))
})
CIlow = apply(R0_traj,2,function(x){
return(quantile(x,0.025))
})
m = apply(R0_traj,2,median)
if(add){
lines(times,m,type="l",ylab = ylab,col = col,lty = lty,lwd = 2,ylim=ylim,main=main,xlab = xlab,
cex.lab= cex.lab,cex.axis = cex.axis, xaxt = xaxt)
polygon(x = c(times,rev(times)),
y = c(CIup,rev(CIlow)),col = "black",border = F,angle = c(45,-45),density = c(40))
}else{
if(add2){
lines(times,m,type="l",ylab = ylab,col = col,lty = lty,lwd = 2,ylim=ylim,main=main,xlab = xlab,
cex.lab= cex.lab,cex.axis = cex.axis, xaxt = xaxt)
}else{
plot(times,m,type="l",ylab = ylab,col = col,lty = lty,lwd = 2,ylim=ylim,main=main,xlab = xlab,
cex.lab= cex.lab,cex.axis = cex.axis, xaxt = xaxt)
}
polygon(x = c(times,rev(times)),
y = c(CIup,rev(CIlow)),col = fill,border = NA)
}
#lines(times,m,type="l",col = "red",lwd = 2)
}
randomR0_traj_V = function(times,MCMC_obj,R0_id,col_id,idx,xlim,ylim=c(0,2),main = "",fill_col = rgb(1,0,0,0.3)){
R0 = MCMC_obj$par[idx,R0_id]
R0_traj = matrix(ncol= length(col_id)+1, nrow = length(idx))
R0_traj[,1] = R0
for(i in 1:length(col_id)){
R0_traj[,i+1] = R0_traj[,i] * MCMC_obj$par[idx,col_id[i]]
}
i = length(col_id)
#R0_traj[,length(col_id)+2] = R0_traj[,i] * MCMC_obj$par[idx,col_id[i]]
CIup = apply(R0_traj,2,function(x){
return(quantile(x,0.975))
})
CIlow = apply(R0_traj,2,function(x){
return(quantile(x,0.025))
})
m = apply(R0_traj,2,median)
times = times[2:(length(m))]
step1 = stepfun(times, m, f = 0)
step2 = stepfun(times, CIup, f = 0)
step3 = stepfun(times, CIlow, f = 0)
plot(step1,ylab = expression(R0(t)),col = "red",lwd = 2.5,ylim=ylim,
main=main,verticals = F,xlim=xlim,xlab = "time",lty=2,xaxt = "n",cex.lab=1.3,cex.axis = 1.2)
#polygon(x = c(times,rev(times)),
# y = c(CIup,rev(CIlow)),col = "grey",border = NA)
polygon(x = c(c(0,rep(times,each=2),1.4), rev(c(0,rep(times,each=2),1.4))),
c(rep(CIlow,each=2), rev(rep(CIup,each=2))),col = fill_col,border = NA)
#lines(step2, lty=2,lwd = 1,verticals = F, col = "blue",xlim=xlim)
#lines(step3, lty=2,lwd = 1,verticals = F, col = "blue",xlim=xlim)
#lines(times,m,type="l",col = "red",lwd = 2)
}
randomR0s = function(times,MCMC_obj,R0_id,col_id,idx){
R0 = MCMC_obj$par[idx,R0_id]
R0_traj = matrix(ncol= length(col_id)+1, nrow = length(idx))
R0_traj[,1] = R0
for(i in 1:length(col_id)){
R0_traj[,i+1] = R0_traj[,i] * MCMC_obj$par[idx,col_id[i]]
}
return(R0_traj)
}
coal_like_fast = function(traj, lambda = 1, coal_obj,t_correct,col=3){
init = coal_lik_init(coal_obj$samp_times,coal_obj$n_sampled,coal_obj$coal_times,grid = traj[,1])
return(coal_loglik(init,LogTraj(traj),t_correct = t_correct, lambda))
}
histwithCI = function(data,cred = T){
hist(data);
vlineCI(data,cred)
}
effpopPlot = function(NP_res,LNA_res,t_correct,idx,row=id,lid=5,ylab="",xlab="",ylim=c(0,10)){
plot(t_correct - NP_res$x,NP_res$effpopmean,type="l",ylim = ylim,xlab = xlab,ylab=ylab)
polygon(c(t_correct - NP_res$x, rev(t_correct - NP_res$x)), c(NP_res$effpop975,rev(NP_res$effpop025)),
col=rgb(0,0,1,0.3),border = F)
CI_Curve(LNA_res,idx,scale = LNA_res$par[idx,5],fill_col = rgb(1,0,0,0.3))
medianCur(LNA_res,idx,col="red",lid = 5)
}
volz_eff = function(Traj, param, x_r, x_i){
x = 1 / (2 * Traj[,2]/Traj[,3])
betas = betaTs(param, Traj[,1], x_r, x_i)
return(cbind(Traj[,1],x/betas))
}
#' @title plot effective population size using output from MCMC
#'
#' @param MCMC_obj A list returned from MCMC functions
#' @param ids Indices from the MCMC iteration output
#' @thin Thinning applies on the SIR trajectory
#'
#'
CI_Curve_eff2 = function(MCMC_obj,ids, thin = 1,col = "black", fill_col = "grey", Irow = 3,method = "qtile",alpha=0.05,fill = T,likelihood ="volz",
x_r,x_i,p=3){
if(sum(ids %% thin) == 1){
stop("Some indices not stored")
}
if(likelihood == "volz"){
Mx = 1/(2 * MCMC_obj$Trajectory[,2,ids / thin]/MCMC_obj$Trajectory[,Irow,ids / thin])
scale = sapply(ids,function(x){
return(betaTs(MCMC_obj$par[x,(p+1):(p + x_i[1] + x_i[2])],
MCMC_obj$Trajectory[,1,1],x_r, x_i))})
Mx = Mx/scale
}else{
Mx = MCMC_obj$Trajectory[,Irow,ids / thin]
scale = 1
}
midcur = apply(Mx,1,mean)
if(method == "NormApp"){
midSd = apply(Mx,1,sd)
qt = qnorm(1-alpha/2)
upper = midcur + qt * midSd
lower = midcur - qt * midSd
#lines(MCMC_obj$Trajectory[,1,1],upper,lty=2,
# col=col,lwd=2)
#lines(MCMC_obj$Trajectory[,1,1],lower,lty=2,
# col=col,lwd=2)
}else if(method == "qtile"){
qt1 = 1 - alpha/2
qt2 = alpha/2
upper = apply(Mx,1,function(x){
return(quantile(x,qt1))
}
)
lower = apply(Mx,1,function(x){
return(quantile(x,qt2))
})
# lines(MCMC_obj$Trajectory[,1,1],midcur + qt * midSd,lty=2,
# col=col,lwd=2)
#lines(MCMC_obj$Trajectory[,1,1],midcur - qt * midSd,lty=2,
# col=col,lwd=2)
}
if(fill == T){
polygon(x = c(MCMC_obj$Trajectory[,1,1],rev(MCMC_obj$Trajectory[,1,1])),
y = c(upper,rev(lower)),col = fill_col,border = NA)
}
lines(MCMC_obj$Trajectory[,1,1],midcur,col = col,lwd=2,lty=2)
}
NP_plot = function(coal_obj, ngrids, t_correct, main = "", xlim = c(0,100), ylim = c(0,10)){
#par(mgp = c(2.5,1,0),mar=c(4,4,1,1))
plot(1, type="n", xlab="time", ylab="effective population size",
xlim=xlim, ylim=ylim,cex.lab=1.3,cex.axis = 1.2,xaxt = "n")
NP_res = BNPR(coal_obj, ngrids)
lines(t_correct - NP_res$x, NP_res$effpopmean, type="l" , lwd=2, col = "black", lty = 3)
polygon(c(t_correct - NP_res$x,rev(t_correct - NP_res$x)),
c(NP_res$effpop025,rev(NP_res$effpop975)),
col = "black",border = F,angle = c(45,-45),density = c(10, 20))
}
random_Incidence = function(MCMC_obj, idx,col = "black", fill_col = rgb(0.15,0.15,0.15,0.3), row = 3,lty = 2,ylim = c(0,1000),ylab = "incidence"){
n = length(MCMC_obj$Trajectory[,1,1])
t = SIR_incidence_Traj(MCMC_obj$Trajectory[,,1],0:(n-1))[,1]
incid_MX = apply(MCMC_obj$Trajectory[,,idx], 3, function(x){
return(SIR_incidence_Traj(x,0:(n-1))[,2])
})
mid = apply(incid_MX,1,median)
upper = apply(incid_MX,1,function(x){
return(quantile(x,0.975))
})
lower = apply(incid_MX,1,function(x){
return(quantile(x,0.025))
})
plot(t, mid, type = "l", lty = lty, ylim = ylim,ylab = ylab)
polygon(c(t,rev(t)),
c(upper,rev(lower)),border = NA, col = fill_col)
}
random_Incidence_pred = function(MCMC_obj,idx,thin, col = "black", fill_col = rgb(0.15,0.15,0.15,0.3), row = 3,lty = 2,ylim = c(0,1000),ylab = "incid.predict",add = F,xaxt = "s"){
t = SIR_incidence_Traj(MCMC_obj$Trajectory[,,1],1:dim(MCMC_obj$Trajectory)[1]-1)[,1]
incid_MX = apply(MCMC_obj$Trajectory[,,idx/thin], 3, function(x){
return(SIR_incidence_Traj(x,1:dim(x)[1]-1)[,2])
})
rhosMX = matrix(rep(MCMC_obj$incid_par[idx,1], each = length(t)), ncol = length(idx))
incid_MX = incid_MX * rhosMX
mid = apply(incid_MX,1,median)
upper = apply(incid_MX,1,function(x){
return(quantile(x,0.975))
})
lower = apply(incid_MX,1,function(x){
return(quantile(x,0.025))
})
if(add){
lines(t, mid, type = "l", lty = lty, ylim = ylim, ylab = ylab, lwd = 2)
}else{
plot(t, mid, type = "l", lty = lty, ylim = ylim,ylab = ylab,lwd=2,xaxt = xaxt)
}
polygon(c(t,rev(t)),
c(upper,rev(lower)),border = NA, col = fill_col)
}
random_Incidence_predictive = function(MCMC_obj,idx,thin, col = "black", fill_col = rgb(0.15,0.15,0.15,0.3), row = 3,lty = 2,ylim = c(0,1000),ylab = "incid.predict",add = F,xaxt = "s"){
t = SIR_incidence_Traj(MCMC_obj$Trajectory[,,1],1:dim(MCMC_obj$Trajectory)[1]-1)[,1]
incid_MX = apply(MCMC_obj$Trajectory[,,idx/thin], 3, function(x){
return(SIR_incidence_Traj(x,1:dim(x)[1]-1)[,2])
})
rhosMX = matrix(rep(MCMC_obj$incid_par[idx,1], each = length(t)), ncol = length(idx))
phiMX = MCMC_obj$incid_par[idx,2]
incid_MX = incid_MX * rhosMX
sample_MX = matrix(ncol = length(idx), nrow = length(t))
for(i in 1:length(idx)){
sample_MX[,i] = sapply(incid_MX[,i], function(x){
return(rnbinom(1,size = phiMX[i],mu = max(x,0)))
})
}
mid = apply(sample_MX,1,function(x){
return(quantile(x,0.5,na.rm = T))
})
upper = apply(sample_MX,1,function(x){
return(quantile(x,0.975,na.rm = T))
})
lower = apply(sample_MX,1,function(x){
return(quantile(x,0.025,na.rm = T))
})
if(add){
lines(t, mid, type = "l", lty = lty, ylim = ylim, ylab = ylab, lwd = 2)
}else{
plot(t, mid, type = "l", lty = lty, ylim = ylim,ylab = ylab,lwd=2,xaxt = xaxt)
}
polygon(c(t,rev(t)),
c(upper,rev(lower)),border = NA, col = fill_col)
return(list(up = upper, mid = mid, low = lower))
}
CI_Curve_irate = function(MCMC_obj,idx,ids2,col_id, dt,col = "black", fill_col = rgb(0.15,0.15,0.15,0.3), row = 3,method = "qtile",lty = 2,alpha=0.05,fill = T){
if(method == "NormApp"){
midcur = apply(MCMC_obj$Trajectory[,row,idx]/scale,1,mean)
midSd = apply(MCMC_obj$Trajectory[,row,idx]/scale,1,sd)
qt = qnorm(1-alpha/2)
upper = midcur + qt * midSd
lower = midcur - qt * midSd
#lines(MCMC_obj$Trajectory[,1,1],upper,lty=2,
# col=col,lwd=2)
#lines(MCMC_obj$Trajectory[,1,1],lower,lty=2,
# col=col,lwd=2)
}else if(method == "qtile"){
qt1 = 1 - alpha/2
qt2 = alpha/2
R0 = MCMC_obj$par[idx,3] * MCMC_obj$par[idx,4] * dt
R0_traj = matrix(ncol= length(col_id)+2, nrow = length(idx))
R0_traj[,1] = R0
for(i in 1:length(col_id)){
R0_traj[,i+1] = R0_traj[,i] * MCMC_obj$par[idx,col_id[i]]
}
i = length(col_id)
R0_traj[,length(col_id)+2] = R0_traj[,i] * MCMC_obj$par[idx,col_id[i]]
upper = apply(MCMC_obj$Trajectory[,row,ids2] * t(R0_traj),1,function(x){
return(quantile(x,qt1))
}
)
lower = apply(MCMC_obj$Trajectory[,row,ids2] * t(R0_traj),1,function(x){
return(quantile(x,qt2))
})
mid = apply(MCMC_obj$Trajectory[,row,ids2] * t(R0_traj),1,function(x){
return(median(x))
})
# lines(MCMC_obj$Trajectory[,1,1],midcur + qt * midSd,lty=2,
# col=col,lwd=2)
#lines(MCMC_obj$Trajectory[,1,1],midcur - qt * midSd,lty=2,
# col=col,lwd=2)
}
if(fill == T){
polygon(x = c(MCMC_obj$Trajectory[,1,1],rev(MCMC_obj$Trajectory[,1,1])),
y = c(upper,rev(lower)),col = fill_col,border = NA)
lines(MCMC_obj$Trajectory[,1,1], mid, col = col, lwd = 2,lty = lty)
}
}
CI_Curve_stat = function(MCMC_obj,ids,scale = 1, row = 3,alpha = 0.05){
qt1 = 1 - alpha/2
qt2 = alpha/2
upper = apply(MCMC_obj$Trajectory[,row,ids]/scale,1,function(x){
return(quantile(x,qt1))
}
)
lower = apply(MCMC_obj$Trajectory[,row,ids]/scale,1,function(x){
return(quantile(x,qt2))
})
mid = apply(MCMC_obj$Trajectory[,row,ids]/scale,1,median)
return(list(upper = upper, lower = lower, mid = mid))
}
R0_Curve_stat = function(times, MCMC_obj, R0_id, col_id, idx){
R0 = MCMC_obj$par[idx,R0_id]
R0_traj = matrix(ncol= length(col_id)+2, nrow = length(idx))
R0_traj[,1] = R0
for(i in 1:length(col_id)){
R0_traj[,i+1] = R0_traj[,i] * MCMC_obj$par[idx,col_id[i]]
}
i = length(col_id)
R0_traj[,length(col_id)+2] = R0_traj[,i] * MCMC_obj$par[idx,col_id[i]]
CIup = apply(R0_traj,2,function(x){
return(quantile(x,0.975))
})
CIlow = apply(R0_traj,2,function(x){
return(quantile(x,0.025))
})
m = apply(R0_traj,2,median)
return(list(times = times, up = CIup, low = CIlow, mid = m))
}
R0_compare = function(RCI, param, x_r, x_i){
RR = RTs(param, RCI$times, x_r, x_i)
MAD = mean(abs(RCI$mid - RR))
MCIW = mean(RCI$up - RCI$low)
return(list(MAD = MAD, MCIW = MCIW))
}
##################
random_Incidence_pred_stat = function(MCMC_obj, idx, thin, incid = T){
t = SIR_incidence_Traj(MCMC_obj$Trajectory[,,1],1:dim(MCMC_obj$Trajectory)[1]-1)[,1]
incid_MX = apply(MCMC_obj$Trajectory[,,idx/thin], 3, function(x){
return(SIR_incidence_Traj(x,1:dim(x)[1]-1)[,2])
})
if(incid == T){
rhosMX = matrix(rep(MCMC_obj$incid_par[idx,1], each = length(t)), ncol = length(idx))
phiMX = MCMC_obj$incid_par[idx,2]
incid_MX = incid_MX * rhosMX
}else{
#rho_pr = MCMC_obj$MCMC_setting$prior$rho_pr
#phi_pr = MCMC_obj$MCMC_setting$prior$phi_pr
#rhosMX = sigmoid(rnorm(length(idx),rho_pr[1], rho_pr[2]))
#phiMX = rlnorm(length(idx),phi_pr[1], phi_pr[2])
}
#incid_MX = incid_MX * rhosMX
#sample_MX = incid_MX
sample_MX = matrix(ncol = length(idx), nrow = length(t))
for(i in 1:length(idx)){
sample_MX[,i] = sapply(incid_MX[,i], function(x){
return(rnbinom(1,size = phiMX[i],mu = max(x,0)))
})
}
##################
mid = apply(sample_MX,1,function(x){
return(quantile(x,0.5,na.rm = T))
})
upper = apply(sample_MX,1,function(x){
return(quantile(x,0.975,na.rm = T))
})
lower = apply(sample_MX,1,function(x){
return(quantile(x,0.025,na.rm = T))
})
return(list(times = t, up = upper, low = lower, mid = mid,sample_MX = sample_MX))
}
MCMC_summarize = function(times, MCMC_obj, idx, thin, alpha = 0.05){
I0 = MCMC_obj$par[idx, 2]
R0 = MCMC_obj$par[idx, 3]
gamma = MCMC_obj$par[idx, 4]
p = dim(MCMC_obj$par)[2]
pb = c(0.025, 0.5, 0.975)
parDensity = list(I0 = density(I0), R0 = density(R0), gamma = density(gamma),I0q = quantile(I0, pb), R0q = quantile(R0, pb), gammaq = quantile(gamma,pb))
R0_traj = R0_Curve_stat(times, MCMC_obj, 3, 5:(p - 1), idx)
I_traj = CI_Curve_stat(MCMC_obj, idx/thin, 1, 3, alpha)
S_traj = CI_Curve_stat(MCMC_obj, idx/thin, 1, 2, alpha)
#incid_pred = random_Incidence_pred_stat(MCMC_obj, idx, thin, F)
return(list(parDensity = parDensity, R0_traj = R0_traj, S_traj = S_traj, I_traj = I_traj, niter = max(idx)))
}
MCMC_summarize_incid = function(times, MCMC_obj, idx, thin, alpha = 0.05, incid = T){
I0 = MCMC_obj$par[idx, 2]
R0 = MCMC_obj$par[idx, 3]
gamma = MCMC_obj$par[idx, 4]
p = dim(MCMC_obj$par)[2]
pb = c(0.025, 0.5, 0.975)
parDensity = list(I0 = density(I0), R0 = density(R0), gamma = density(gamma),I0q = quantile(I0, pb), R0q = quantile(R0, pb), gammaq = quantile(gamma,pb))
R0_traj = R0_Curve_stat(times, MCMC_obj, 3, 5:(p - 1), idx)
S_traj = CI_Curve_stat(MCMC_obj, idx/thin, 1, 2, alpha)
I_traj = CI_Curve_stat(MCMC_obj, idx/thin, 1, 3, alpha)
incid_par = list(rho = density(MCMC_obj$incid_par[idx,1]), rhoq = quantile(MCMC_obj$incid_par[idx,1], pb), phi = density(MCMC_obj$incid_par[idx,2]), phiq = quantile(MCMC_obj$incid_par[idx,2], pb))
incid_pred = random_Incidence_pred_stat(MCMC_obj, idx, thin, incid)
return(list(parDensity = parDensity, R0_traj = R0_traj, I_traj = I_traj,S_traj = S_traj, incid_par = incid_par, incid_pred = incid_pred))
}
MCMC_summarize_pref = function(times, MCMC_obj, idx, thin, alpha, pref = T){
I0 = MCMC_obj$par[idx, 2]
R0 = MCMC_obj$par[idx, 3]
gamma = MCMC_obj$par[idx, 4]
p = dim(MCMC_obj$par)[2]
pb = c(0.025, 0.5, 0.975)
parDensity = list(I0 = density(I0), R0 = density(R0), gamma = density(gamma),I0q = quantile(I0, pb), R0q = quantile(R0, pb), gammaq = quantile(gamma,pb))
R0_traj = R0_Curve_stat(times, MCMC_obj, 3, 5:(p - 1), idx)
I_traj = CI_Curve_stat(MCMC_obj, idx/thin, 1, 3, alpha)
S_traj = CI_Curve_stat(MCMC_obj, idx/thin, 1, 2, alpha)
Pref_par = list(a = density(MCMC_obj$pref_par[idx,1]), aq = quantile(MCMC_obj$pref_par[idx,1], pb), b = density(MCMC_obj$pref_par[idx,2]), bq = quantile(MCMC_obj$pref_par[idx,2], pb))
return(list(parDensity = parDensity, R0_traj = R0_traj, I_traj = I_traj, S_traj = S_traj,Pref_par = Pref_par))
}
pref_sample_intense = function(MCMC_obj, idx, thin){
traj = log(MCMC_obj$Trajectory[,3,idx / thin] + 1)
dt = log(MCMC_obj$Trajectory[2,1,1] - MCMC_obj$Trajectory[1,1,1])
k1 = MCMC_obj$pref_par[idx,1]
k1 = matrix(rep(k1,dim(traj)[1]),byrow = T, ncol = length(idx))
k2 = MCMC_obj$pref_par[idx,2]
k2 = matrix(rep(k2,dim(traj)[1]),byrow = T, ncol = length(idx))
sample_MX = traj * k1 + k2 - dt
mid = apply(sample_MX,1,function(x){
return(quantile(x,0.5,na.rm = T))
})
upper = apply(sample_MX,1,function(x){
return(quantile(x,0.975,na.rm = T))
})
lower = apply(sample_MX,1,function(x){
return(quantile(x,0.025,na.rm = T))
})
return(list(mid = mid, upper = upper, lower = lower))
}
TrajQtl = function(MCMC_obj, idx, thin, col_id){
q = apply(MCMC_obj$Trajectory[,col_id, idx / thin], 1, function(x) quantile(x, c(0.025, 0.5,0.975)))
t = MCMC_obj$Trajectory[,1,1]
res = rbind(t, q)
return(t(res))
}
add_interval = function(up, mid,down, time, col1 = rgb(0,0,1,0.23),col2 = "red", lwd = 2, lty = 2){
polygon(c(time, rev(time)),c(up, rev(down)), col = col1, border = NA)
lines(time, mid, col = col2, lwd = 2, lty = lty)
}
log_predictive = function(truth, ids, sampleMX, rule){
res = sapply(ids, function(x){
r = rule(truth[x])
return(log(mean(r$up > sampleMX[x,] & r$down < sampleMX[x,])))
})
return(res)
}
rules = function(x){
return(list(up = x + 20,down = max(x - 20,0)))
}
|
51268d4e2a11990e34b61fb457d333b72c5e5fef
|
751cb702d367147470470838c89b6eff530d899e
|
/seatrackR/R/listFileArchive.R
|
e0f5761d699736a93256bd22a4c1c9b82682e011
|
[] |
no_license
|
NINAnor/seatrack-db
|
30877c9483b08f824fc2e717f44f4379e76ba35b
|
e07284fb970fb82ba922c8703cd938bfa1da384e
|
refs/heads/master
| 2023-08-16T09:44:50.246576
| 2023-08-16T09:09:22
| 2023-08-16T09:09:22
| 89,018,319
| 0
| 0
| null | 2023-08-16T09:09:02
| 2017-04-21T19:45:42
|
R
|
UTF-8
|
R
| false
| false
| 1,754
|
r
|
listFileArchive.R
|
#' Retrieve info on the status of files in the file archive
#'
#' This function checks which files are stored in the file archive, which files that are missing from the archive
#' (listed in the database but not present in the file archive), and which files are in the archive but not listed in the database (should be zero).
#'
#'
#'
#' @return A list.
#' @export
#' @examples
#' dontrun{
#' listFileArchive()
#' }
#' @seealso \code{\link{getFileArchive}} for a function that summarizes info on the files that should be in the file archive (connected to loggers that have been shut down). )
listFileArchive <- function(){
checkCon()
##Get files in archive, using curl instead of RCurl
url <- seatrackR:::.getFtpUrl()
tmp <- strsplit(url$url, "//")
dest <- paste0(tmp[[1]][1], "//", url$pwd, "@", tmp[[1]][2])
list_files <- curl::new_handle()
curl::handle_setopt(list_files,
ftp_use_epsv = TRUE,
dirlistonly = TRUE,
use_ssl = T,
ssl_verifyhost = F,
ssl_verifypeer = F,
sslversion = 6L)
con <- curl::curl(url = dest, "r", handle = list_files)
filesInStorage <- readLines(con)
close(con)
filesInStorage <- as_tibble(filesInStorage)
names(filesInStorage) <- "filename"
filesInDatabase <- getFileArchiveSummary()
filesNotInStorage <- filesInDatabase %>%
filter(!(filename %in% filesInStorage$filename)) %>%
select(filename)
filesNotInDatabase <- filesInStorage %>%
filter(!(filename %in% filesInDatabase$filename))
out <- list("filesInArchive" = filesInStorage, "filesNotInArchive" = filesNotInStorage, "filesNotInDatabase" = filesNotInDatabase)
return(out)
}
|
8d5c7c80874437a993b5f7ec0027a4d65b076b9e
|
f41039eccb1f5c667927166664a30a224cbae2b8
|
/code/conditional_analysis/plots/ca_upset_plot.R
|
8967f113871d56899ec70b4f15053be7049cd6e1
|
[] |
no_license
|
brycerowland/UKB_BCT_TWAS
|
959f5e9508a708b70c1d77bba10c73357b559327
|
938866c51765e025b8b05894306af5d217dd1814
|
refs/heads/master
| 2023-09-03T00:15:45.191129
| 2021-11-16T16:19:58
| 2021-11-16T16:19:58
| 205,016,656
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 864
|
r
|
ca_upset_plot.R
|
library(tidyverse)
library(UpSetR)
library(fastDummies)
ca_results <- read_tsv("../../../data/conditional_analysis/REGENIE/analysis_results/UKB_BCT_TWAS_CA_results.tsv") %>%
filter(log10_conditional_p > -log10(0.05/11759)) %>%
select(gene_name,
phenotype)
fromList(ca_results)
p_dat <- ca_results %>%
mutate(phenotype = if_else(phenotype == "red_blood_cell_width",
"Red_blood_cell_Distribution_Width",
phenotype)) %>%
dummy_cols(select_columns = "phenotype") %>%
select(-phenotype) %>%
rename_with(.cols = starts_with("phenotype_"),
.fn = ~str_to_title(str_replace_all(str_remove(., "phenotype_"), "_", " "))) %>%
group_by(gene_name) %>%
summarise(across(.fns = sum)) %>%
as.data.frame()
upset(p_dat,
nsets = 10,
order.by = "freq")
|
f1a6b8b70238c1cc545b40ea720a7b487ffc3540
|
e7fac3e63d4df0a4910ef5849d84f1ab7fc290d7
|
/CombineFiles.R
|
f2d4adb219aea929b077594e1754471c659f4326
|
[
"CC-BY-3.0"
] |
permissive
|
l3atbc/verbcorner_data
|
f238e7ad01219801b4ac6eb66ca435b9b725e2ea
|
dbc3c66e7da0112c71dc7c2ee397b31d23ce1ebd
|
refs/heads/master
| 2020-03-10T02:54:03.102058
| 2018-04-11T19:31:33
| 2018-04-11T19:31:33
| 129,149,241
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,039
|
r
|
CombineFiles.R
|
#combine files from database
combine_files = function(file1, file2){
a<-read.csv(paste("Downloaded/",file1,sep=""))
b<-read.csv(paste("Downloaded/",file2,sep=""))
lena1<-length(a[,1])
lenb1<-length(b[,1])
a<-unique(a[,c("question_id","question","answer","user","oversampled","comments")])
b<-unique(b[,c("question_id","question","answer","user","oversampled","comments")])
lena2<-length(a[,1])
lenb2<-length(b[,1])
c<-unique(rbind(a,b))
print(paste(file1,":","originally ",lena1, "of which ",lena2, "are unique"))
print(paste(file2,":","originally ",lena1, "of which ",lena2, "are unique"))
print(paste("Result is ",length(c[,1])))
write.csv(c,file=file1)
}
combine_files("A_Good_World.csv","b_a_AGW.csv")
combine_files("Entropy.csv","b_a_Entropy.csv")
combine_files("Equilibrium.csv","b_a_Equilibrium.csv")
combine_files("Explode_on_Contact.csv","b_a_EOC.csv")
combine_files("Fickle_Folk.csv","b_a_FF.csv")
combine_files("Philosophical_Zombie_Hunter.csv","b_a_PZH.csv")
combine_files("Simon_Says_Freeze.csv","b_a_SSF.csv")
|
9b45a775454f8397e242fc1ffeab62c51520b92a
|
711be6aefd3d76739908e06c091cef714052ec09
|
/test.R
|
ce8f65a92eba818ee8e33ec2591c254fcd202897
|
[
"MIT"
] |
permissive
|
louissutter/fun
|
33716c8da507084fe4142da960f65e2b31df8f6e
|
dd636dc4ecc5fcb6039cf6b0c7a1ef699658e2e7
|
refs/heads/master
| 2020-05-17T19:44:17.104920
| 2015-08-24T12:40:30
| 2015-08-24T12:40:30
| 41,141,676
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 105
|
r
|
test.R
|
version control system:
###something else
# edit it here online
# hoi tschumi
|
5ed017dbd757662535f001d0113543cbf1635c89
|
8e5c3a50a9d0826de579e9d54f3b67ba7e1c7bf5
|
/Analise/R/distribuicaoDosDados.R
|
f8bbc8a393f0af55109d74683c85909dee75d7ea
|
[] |
no_license
|
GIlGoncalves/BI2Semestre
|
5a1e62dd564181709034196bb4ec3fe96494faa1
|
7660a33567e3e89a72fef89f0758b65b5bb4ddf8
|
refs/heads/master
| 2021-03-16T08:41:06.113074
| 2017-07-01T09:14:27
| 2017-07-01T09:14:27
| 84,658,813
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 13,561
|
r
|
distribuicaoDosDados.R
|
setwd("cadeiras/bi/analise de dados/trabalho/")
ficheiro<-read.csv("outMovieFinal.csv",header = T)
for(i in 29:length(ficheiro)) {
ficheiro[is.na(ficheiro[i]),i]<-0
}
ficheiro<-ficheiro[,c(-4,-16,-17,-18,-22,-27,-10)]
array<-which(ficheiro[2]=="", arr.ind = T)
ficheiro<-ficheiro[-array[,1],]
#fit0 <- lm(ficheiro[,8] ~ficheiro[,20] , data=ficheiro)
#summary(fit0) # show results
#plot(fit)
#dim(residuals(fit0))
#predict(fit)
panel.cor <- function(x, y, digits = 2, prefix = "", cex.cor, ...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- abs(cor(x, y))
txt <- format(c(r, 0.123456789), digits = digits)[1]
txt <- paste0(prefix, txt)
if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt)
text(0.5, 0.5, txt, cex = cex.cor * r)
}
png("pairs3.png",width = 5080,height = 5080)
pairs(ficheiro,lower.panel = panel.smooth,upper.panel = panel.cor)
dev.off()
#################################################################################
####Cold deck->Retirar de uma base de dados#####################################
array<-which(ficheiro[1]=="", arr.ind = T)
for(i in 1:length(array[,"row"])){
ficheiro[array[i,1],"color"]<-"Color"
}
################################################################################
ficheiro[is.na(ficheiro[3]),3]<--1
num_critic_for_reviews<-as.data.frame(table(ficheiro[3]),stringsAsFactors =FALSE)
num_critic_for_reviews1<-rbind(num_critic_for_reviews[num_critic_for_reviews$Var1==-1,],
nrow(num_critic_for_reviews[num_critic_for_reviews$Var1!=-1,]))
num_critic_for_reviews1[1,][1]<-"Desconhecidos"
png("num_critic_for_reviews.png",width = 1080,height = 1080)
barplot(num_critic_for_reviews1$Freq,names.arg = num_critic_for_reviews1$Var1,main="Criticas para revisao",xlab = "Criticas",col = c("yellow","blue"))
dev.off()
#################################################################################
ficheiro[is.na(ficheiro[4]),4]<- -1
director_facebook_likes<-as.data.frame(table(ficheiro[4]),stringsAsFactors =FALSE)
director_facebook_likes1<-rbind(director_facebook_likes[director_facebook_likes$Var1==-1,],
director_facebook_likes[director_facebook_likes$Var1==0,],
sum(director_facebook_likes[(director_facebook_likes$Var1!=-1 & director_facebook_likes$Var1!=0 ),]$Freq))
director_facebook_likes1[1,][1]<-"Desconhecidos"
director_facebook_likes1[2,][1]<-"Igual a zero"
director_facebook_likes1[3,][1]<-"Superiores a zero"
png("director_facebook_likes.png",width = 1080,height = 1080)
barplot(director_facebook_likes1$Freq,names.arg = director_facebook_likes1$Var1,main="Criticas para revisao",xlab = "Criticas",col = c("yellow","blue"))
dev.off()
###############################################################################
ficheiro[is.na(ficheiro[5]),5]<- -1
actor_3_facebook_likes<-as.data.frame(table(ficheiro[5]),stringsAsFactors =FALSE)
actor_3_facebook_likes1<-rbind(actor_3_facebook_likes[actor_3_facebook_likes$Var1==-1,],
actor_3_facebook_likes[actor_3_facebook_likes$Var1==0,],
sum(actor_3_facebook_likes[(actor_3_facebook_likes$Var1!=-1 & actor_3_facebook_likes$Var1!=0 ),]$Freq))
actor_3_facebook_likes1[1,][1]<-"Desconhecidos"
actor_3_facebook_likes1[2,][1]<-"Igual a zero"
actor_3_facebook_likes1[3,][1]<-"Superiores a zero"
png("actor_3_facebook_likes.png",width = 1080,height = 1080)
barplot(actor_3_facebook_likes1$Freq,names.arg = actor_3_facebook_likes1$Var1,main="Número de likes do actor 3",xlab = "Número",col = c("yellow","blue"))
dev.off()
###############################################################################################
actor_2_name<-as.data.frame(table(ficheiro[7]),stringsAsFactors = FALSE)
actor_2_name[1,][1]<-"Desconhecido"
actor_2_name1<-rbind(actor_2_name[actor_2_name$Var1=="Desconhecido",],
sum(actor_2_name[actor_2_name$Var1!="Desconhecido",]$Freq))
actor_2_name1[2,][1]<-"Restantes"
png("actor_2_name.png",width = 1080,height = 1080)
barplot(actor_2_name1$Freq,names.arg = actor_2_name1$Var1,main="Nome dos actores 2",xlab = "Nome",col = c("yellow","blue"))
dev.off()
###############################################################################
ficheiro[is.na(ficheiro[8]),8]<--1
actor_1_facebook_likes<-as.data.frame(table(ficheiro[8]),stringsAsFactors =FALSE)
actor_1_facebook_likes1<-rbind(actor_1_facebook_likes[actor_1_facebook_likes$Var1==-1,],
actor_1_facebook_likes[actor_1_facebook_likes$Var1==0,],
sum(actor_1_facebook_likes[(actor_1_facebook_likes$Var1!=-1 & actor_1_facebook_likes$Var1!=0 ),]$Freq))
actor_1_facebook_likes1[1,][1]<-"Desconhecidos"
actor_1_facebook_likes1[2,][1]<-"Igual a zero"
actor_1_facebook_likes1[3,][1]<-"Superiores a zero"
png("actor_1_facebook_likes.png",width = 1080,height = 1080)
barplot(actor_1_facebook_likes1$Freq,names.arg = actor_1_facebook_likes1$Var1,main="Número de likes do actor 1",xlab = "Número",col = c("yellow","blue"))
dev.off()
################################################################################
ficheiro[is.na(ficheiro[8]),8]<--1
gross<-as.data.frame(table(ficheiro[8]),stringsAsFactors =FALSE)
gross1<-rbind(gross[gross$Var1==-1,],
sum(gross[(gross$Var1!=-1),]$Freq))
gross1[1,][1]<-"Desconhecido"
gross1[2,][1]<-"Restantes"
png("gross.png",width = 1080,height = 1080)
barplot(gross1$Freq,names.arg = gross1$Var1,main="Dinheiro Gasto",xlab = "Valores",col = c("yellow","blue"))
dev.off()
#############################################################################
genres<-as.data.frame(table(ficheiro[9]),stringsAsFactors =FALSE)
png("genres.png",width = 1080,height = 1080)
barplot(genres$Freq,names.arg = genres$Var1,main="Genero dos filmes",xlab = "Genero",col = c("yellow","blue"))
dev.off()
################################################################################
actor_1_name <-as.data.frame(table(ficheiro[11]),stringsAsFactors =FALSE)
actor_1_name[1,][1]<-"Desconhecido"
actor_1_name1<-rbind(actor_1_name[actor_1_name$Var1=="Desconhecido",],
sum(actor_1_name[(actor_1_name$Var1!="Desconhecido"),]$Freq))
actor_1_name1[1,][1]<-"Desconhecido"
actor_1_name1[2,][1]<-"Restantes"
png("actor_1_name.png",width = 1080,height = 1080)
barplot(actor_1_name1$Freq,names.arg = actor_1_name1$Var1,main="Nome dos actores 1",xlab = "Nomes",col = c("yellow","blue"))
dev.off()
################################################################################
movie_title <- as.data.frame(table(ficheiro[12]),stringsAsFactors =FALSE)
png("movie_title.png",width = 1080,height = 1080)
barplot(movie_title$Freq,names.arg = movie_title$Var1,main="Titulo dos filmes",xlab = "Titulo",col = c("yellow","blue"))
dev.off()
################################################################################
ficheiro[is.na(ficheiro[13]),13]<--1
num_voted_users<-as.data.frame(table(ficheiro[13]),stringsAsFactors =FALSE)
################################################################################
ficheiro[is.na(ficheiro[14]),14]<--1
cast_total_facebook_likes<-as.data.frame(table(ficheiro[14]),stringsAsFactors =FALSE)
cast_total_facebook_likes1<-rbind(cast_total_facebook_likes[cast_total_facebook_likes$Var1==0,],
sum(cast_total_facebook_likes[(cast_total_facebook_likes$Var1!=0),]$Freq))
cast_total_facebook_likes1[1,][1]<-"Zero"
cast_total_facebook_likes1[2,][1]<-"Restantes"
png("cast_total_facebook_likes.png",width = 1080,height = 1080)
barplot(cast_total_facebook_likes1$Freq,names.arg = cast_total_facebook_likes1$Var1,main="Total de like dos actores",xlab = "Valores",col = c("yellow","blue"))
dev.off()
################################################################################
actor_3_name<-as.data.frame(table(ficheiro[15]),stringsAsFactors =FALSE)
actor_3_name[1,][1]<-"Desconhecido"
actor_3_name1<-rbind(actor_3_name[actor_3_name$Var1=="Desconhecido",],
sum(actor_3_name[(actor_3_name$Var1!="Desconhecido"),]$Freq))
actor_3_name1[1,][1]<-"Desconhecido"
actor_3_name1[2,][1]<-"Restantes"
png("actor_3_name.png",width = 1080,height = 1080)
barplot(actor_3_name1$Freq,names.arg = actor_3_name1$Var1,main="Nome dos actores 3",xlab = "Nomes",col = c("yellow","blue"))
dev.off()
#################################################################################
ficheiro[is.na(ficheiro[19]),19]<--1
num_user_for_reviews<-as.data.frame(table(ficheiro[19]),stringsAsFactors =FALSE)
num_user_for_reviews1<-rbind(num_user_for_reviews[num_user_for_reviews$Var1==-1,],
sum(num_user_for_reviews[(num_user_for_reviews$Var1!=-1),]$Freq))
num_user_for_reviews1[1,][1]<-"Desconhecido"
num_user_for_reviews1[2,][1]<-"Restantes"
png("num_user_for_reviews.png",width = 1080,height = 1080)
barplot(num_user_for_reviews1$Freq,names.arg = num_user_for_reviews1$Var1,main="Numero de utilizadores para revisao",xlab = "Valores",col = c("yellow","blue"))
dev.off()
#################################################################################
lingua<-as.data.frame(table(ficheiro[20]),stringsAsFactors = FALSE)
lingua[1,][1]<-"Desconhecida"
lingua1<-rbind(lingua[lingua$Var1=="Desconhecida",],
nrow(lingua[(lingua$Var1!="Desconhecida"),]))
lingua1[1,][1]<-"Desconhecida"
lingua1[2,][1]<-"Restantes"
png("lingua.png",width = 1080,height = 1080)
barplot(lingua1$Freq,names.arg = lingua1$Var1,main="Lingua dos filmes",xlab = "Lingua",col = c("yellow","blue"))
dev.off()
################################################################################
country<-as.data.frame(table(ficheiro[21]),stringsAsFactors = FALSE)
country[1,][1]<-"Desconhecido"
country1<-rbind(country[country$Var1=="Desconhecido",],
nrow(country[(country$Var1!="Desconhecido"),]))
country1[1,][1]<-"Desconhecido"
country1[2,][1]<-"Restantes"
png("country.png",width = 1080,height = 1080)
barplot(country1$Freq,names.arg = country1$Var1,main="Paises dos filmes",xlab = "Paises",col = c("yellow","blue"))
dev.off()
#################################################################################
ficheiro[is.na(ficheiro[23]),23]<--1
budget<-as.data.frame(table(ficheiro[23]),stringsAsFactors = FALSE)
budget1<-rbind(budget[budget$Var1==-1,],
sum(budget[(budget$Var1!=-1),]$Freq))
budget1[1,][1]<-"Desconhecido"
budget1[2,][1]<-"Restantes"
png("budget.png",width = 1080,height = 1080)
barplot(budget1$Freq,names.arg = budget1$Var1,main="Despesas",xlab = "Valores",col = c("yellow","blue"))
dev.off()
#################################################################################
ficheiro[is.na(ficheiro[24]),24]<--1
title_year<-as.data.frame(table(ficheiro[24]),stringsAsFactors = FALSE)
title_year1<-rbind(title_year[title_year$Var1==-1,],
sum(title_year[(title_year$Var1!=-1),]$Freq))
title_year1[1,][1]<-"Desconhecido"
title_year1[2,][1]<-"Restantes"
png("title_year.png",width = 1080,height = 1080)
barplot(title_year1$Freq,names.arg = title_year1$Var1,main="Anos dos filmes",xlab = "Anos",col = c("yellow","blue"))
dev.off()
#################################################################################
ficheiro[is.na(ficheiro[25]),25]<--1
actor_2_facebook_likes<-as.data.frame(table(ficheiro[25]),stringsAsFactors = FALSE)
actor_2_facebook_likes1<-rbind(actor_2_facebook_likes[actor_2_facebook_likes$Var1==-1,],
actor_2_facebook_likes[actor_2_facebook_likes$Var1==0,],
sum(actor_2_facebook_likes[(actor_2_facebook_likes$Var1!=-1 & actor_2_facebook_likes$Var1!=0 ),]$Freq))
actor_2_facebook_likes1[1,][1]<-"Desconhecidos"
actor_2_facebook_likes1[2,][1]<-"Igual a zero"
actor_2_facebook_likes1[3,][1]<-"Superiores a zero"
png("actor_2_facebook_likes.png",width = 1080,height = 1080)
barplot(actor_2_facebook_likes1$Freq,names.arg = actor_2_facebook_likes1$Var1,main="Número de likes do actor 2",xlab = "Número",col = c("yellow","blue"))
dev.off()
#################################################################################
ficheiro[is.na(ficheiro[26]),26]<--1
imdb_score<-as.data.frame(table(ficheiro[26]),stringsAsFactors = FALSE)
################################################################################
ficheiro[is.na(ficheiro[28]),28]<--1
movie_facebook_likes<-as.data.frame(table(ficheiro[28]),stringsAsFactors = FALSE)
movie_facebook_likes1<-rbind(movie_facebook_likes[movie_facebook_likes$Var1==0,],
sum(movie_facebook_likes[(movie_facebook_likes$Var1!=0),]$Freq))
movie_facebook_likes1[1,][1]<-"Zero"
movie_facebook_likes1[2,][1]<-"Restantes"
png("cast_total_facebook_likes.png",width = 1080,height = 1080)
barplot(movie_facebook_likes1$Freq,names.arg = movie_facebook_likes1$Var1,main="Total de like do filme",xlab = "Valores",col = c("yellow","blue"))
dev.off()
##################################################################################
png("graficaoPequeno.png",width = 2080,height = 2080)
par(mfrow = c(2,2))
barplot(imdb_score$Freq,names.arg = imdb_score$Var1,main="Imdb_Score",xlab = "Raking")
barplot(title_year$Freq,names.arg = title_year$Var1,main="Title_year",xlab = "Ano")
barplot(gross$Freq,names.arg = gross$Var1,main="Gross",xlab = "Valor")
barplot(budget$Freq,names.arg = budget$Var1,main="Budget",xlab = "Despesa")
dev.off()
save.image("trabalho")
|
e93865a1d08b9edc7a2235e9e4777217da6b136d
|
66fc06eafc6e93e3720334251874e5ee4195d6f7
|
/man/TF_to_posneg.Rd
|
ecf9402aba77ffb48b2f32426bfa038d06f54808
|
[
"MIT"
] |
permissive
|
ianhandel/epidemr
|
44361127850aebeab12e870c70486c16fc520f26
|
ce4077d5dadf0edb9225b3eaef6331dbd82fcb70
|
refs/heads/master
| 2022-05-24T11:47:03.202447
| 2022-04-13T08:17:24
| 2022-04-13T08:17:24
| 114,266,392
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 334
|
rd
|
TF_to_posneg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{TF_to_posneg}
\alias{TF_to_posneg}
\title{Utility function to convert TRUE/FALSE to positive/negative}
\usage{
TF_to_posneg(x)
}
\arguments{
\item{x}{Input vector}
}
\description{
Utility function to convert TRUE/FALSE to positive/negative
}
|
2b7ac43383cdc804dadfdf1a2fdb0e071248694c
|
83e19da2b2847b36f2a82e8bf7f38108a4286974
|
/man/pyFunction.Rd
|
427d147d03fb95b041986d7710670f941669f72c
|
[] |
no_license
|
cran/PythonInR
|
dc710e7c21e3d67bee9644517a2fa9c138ee62cd
|
064930845cbab2e05bd25ed17a270547521b2a6f
|
refs/heads/master
| 2021-07-13T05:33:42.412009
| 2020-06-21T19:50:06
| 2020-06-21T19:50:06
| 38,826,198
| 12
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 745
|
rd
|
pyFunction.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PyObject.R
\name{pyFunction}
\alias{pyFunction}
\title{creates a virtual Python function}
\usage{
pyFunction(key, regFinalizer = FALSE)
}
\arguments{
\item{key}{a string specifying the name of a Python method/function.}
\item{regFinalizer}{a logical indicating if a finalizer should be
be registered, the default value is FALSE.}
}
\description{
The function pyFunction creates a new object of type
pyFunction based on a given key.
}
\details{
The function pyFunction makes it easy to create interfaces
to Python functions.
}
\examples{
\dontshow{PythonInR:::pyCranConnect()}
if ( pyIsConnected() ){
pySum <- pyFunction("sum")
pySum(1:3)
}
}
|
839884354642288bb6e158d3ae83937ed0842fa8
|
6e47ec86ea307588d08a9bbc3094e729bea70966
|
/R/db_execute.R
|
378eaddcb3866202db647d2ed47ebf0d22341ac3
|
[] |
no_license
|
MohoWu/databaser
|
8baa7bdfb82671f8d5da359c519abbfc145dda90
|
4d80aeba7bfa8635d245abd1e0dff7b3ce8b5562
|
refs/heads/master
| 2021-07-12T22:25:14.590383
| 2017-10-18T13:51:05
| 2017-10-18T13:51:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 675
|
r
|
db_execute.R
|
#' Function to execute statements on a database.
#'
#' \code{db_execute} is a wrapper for \code{DBI::dbExecute} but is vectorised
#' over \code{statement}.
#'
#' @author Stuart K. Grange
#'
#' @param con Database connection.
#'
#' @param statement Statement to send to \code{con}.
#'
#' @param ... Other parameters passed on to methods.
#'
#' @param progress Type of progress bar to display. Default is \code{"none"}.
#'
#' @return Invisible.
#'
#' @export
db_execute <- function(con, statement, ..., progress = "none") {
# Do
plyr::l_ply(
statement,
function(x) DBI::dbExecute(con, x, ...),
.progress = progress
)
# No return
}
|
321dbcb65548de539a9d849392aac0ec10dc4dfd
|
b9e54258e540f0a0447045729bb4eecb0e490426
|
/Bölüm 23 - Makine Öğrenmesi VI - Neural Networks : Yapay Sinir Ağları/25.11 - Neural Network - Model Eğitim İşlemleri.R
|
5b428a8569a60d18563b2daf3f243ba336968579
|
[] |
no_license
|
sudedanisman/RUdemy
|
b36b67b9e875206a5424f33cc784fd13506f8d8d
|
28a9814706873f5d2e5985e4ba795354144d52c4
|
refs/heads/master
| 2023-01-30T01:54:26.321218
| 2020-12-14T11:36:00
| 2020-12-14T11:36:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,417
|
r
|
25.11 - Neural Network - Model Eğitim İşlemleri.R
|
### Neural Networks - Yapay Sinir Ağları
install.packages("neuralnet")
library(neuralnet)
diabetes <- read.csv("diabetes.csv")
View(diabetes)
modelData <- diabetes
modelData$Outcome <- as.factor(modelData$Outcome)
library(caret)
modelScale <- preProcess(modelData , method = c("center" , "scale"))
modelDataScaled <- predict(modelScale , modelData)
View(modelDataScaled)
set.seed(165)
trainIndex <- sample(1:nrow(modelDataScaled) , size = 0.75*nrow(modelDataScaled))
trainSet <- modelDataScaled[ trainIndex ,]
testSet <- modelDataScaled[ -trainIndex ,]
nrow(trainSet)
nrow(testSet)
table(trainSet$Outcome)
table(testSet$Outcome)
## NN Modeli Eğitim Aşaması
?neuralnet
modelNN_1 <- neuralnet(Outcome ~ . , data = trainSet ,
hidden = 1 , threshold = 0.01 ,
act.fct = "logistic",
linear.output = FALSE
)
modelNN_1
plot(modelNN_2)
modelNN_2 <- neuralnet(Outcome ~ . , data = trainSet ,
hidden = c(2,2) , threshold = 0.01 ,
act.fct = "logistic",
linear.output = FALSE
)
modelNN_3 <- neuralnet(Outcome ~ . , data = trainSet ,
hidden = c(3,3) , threshold = 0.08 ,
rep = 2,
act.fct = "logistic",
linear.output = FALSE
)
plot(modelNN_3)
|
b568314766f0df4710bf4e4a7ead4edeeae5a156
|
40c40770f7ff82c30fa6e58d948f9b90508758c8
|
/man/MP_treeplot.Rd
|
1a15da74a82fda39df0f5eb0b3cf46f6db837a5d
|
[] |
no_license
|
NatalieKAndersson/DEVOLUTION
|
c61a5ff65d1322f1ec4794435d2c17bf6f2e65fa
|
7ec9cec2ef04cdacb47db6413b41c352b0460d7f
|
refs/heads/master
| 2023-08-09T20:13:18.565107
| 2023-07-12T13:05:39
| 2023-07-12T13:05:39
| 297,145,258
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 523
|
rd
|
MP_treeplot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DEVOLUTION.R
\name{MP_treeplot}
\alias{MP_treeplot}
\title{Visualising the MP-tree.}
\usage{
MP_treeplot(MP_tree, limitmp, col)
}
\arguments{
\item{MP_tree}{The MP-tree object obtained from mp_tree()}
\item{limitmp}{The size of the plotting environment. Choose so that the entire phylogeny is clearly visible.}
\item{col}{If the phylogeny should be colored or not.}
}
\value{
The MP tree.
}
\description{
#' Visualizing the MP-tree object.
}
|
2eba7008528434f9571717ed9b79aad28760e060
|
1dc583069d13fa3f4b8f3894d9cc8aecae3fecac
|
/taxonomy_wrangling.R
|
212ee438736e3d54c4ec4a5ec40c96ff3526eb2a
|
[
"MIT"
] |
permissive
|
xxz19900/Schloss_PacBio16S_PeerJ_2015
|
3122bc35f8129b9f02afd1f6cc1533103c5f5d76
|
65432c7c55ea39e19109bec51ddc07e482aae9b0
|
refs/heads/master
| 2021-07-21T18:53:09.734364
| 2017-11-01T11:24:48
| 2017-11-01T11:24:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,562
|
r
|
taxonomy_wrangling.R
|
regions <- c("v4", "v13", "v35", "v15", "v16", "v19")
# this will go through each vector of taxnomic levels and extract
# bootstrap value. It returns whether a taxonomic level had a bootstrap
# valu over the cutoff (80%)
countGoodBootstraps <- function(nameConf, cutoff=80){
bootstrap <- gsub(".*\\((\\d*)\\).*", "\\1", nameConf)
bootstrap[bootstrap == "unclassified"] = 0
return(sum(as.numeric(bootstrap) >= cutoff))
}
# This function will read in a taxonomy file and parse it for the
# sequence name and the depth of the classification that has a
# boostrap value over the cutoff (80%)
getDepths <- function(taxFileName, cutoff=80){
tax <- scan(taxFileName, what="", quiet =T)
lines <- 1:length(tax)
seqNames <- tax[lines %% 2 == 1]
taxString <- tax[lines %% 2 == 0]
taxList <- strsplit(taxString, ";") #take the taxnomy string and split it by the ; into a list
depths <- unlist(lapply(taxList, countGoodBootstraps))
names(depths) <- seqNames
return(depths)
}
# go thorugh each region and read in the data from each of the taxonomy files and get the
# classification depth for each sequence read
for(r in regions){
# here we're looking at the rdp, gg, and silva classifications for the
# "observed" data that used UCHIME to call chimeras
rdpFileName <- paste("analysis/", r, "/", r, ".trim.unique.good.filter.unique.precluster.pick.pds.wang.taxonomy", sep="")
rdp <- getDepths(rdpFileName)
ggFileName <- paste("analysis/", r, "/", r, ".trim.unique.good.filter.unique.precluster.pick.gg.wang.taxonomy", sep="")
gg <- getDepths(ggFileName)
silvaFileName <- paste("analysis/", r, "/", r, ".trim.unique.good.filter.unique.precluster.pick.bacteria.wang.taxonomy", sep="")
silva <- getDepths(silvaFileName)
# output to a file ending in *.tax.compare a list of all sequences and the
# classification depth for each sequence by the database that was used
write.table(cbind("rdp" = rdp, "gg" = gg, "silva"=silva), file=paste("analysis/", r, "/", r, ".tax.compare", sep=""), quote=F)
# i'm not sure what the following lines are for...
# rdpMock <- paste("analysis/", r, "/", r, ".mock.precluster.pds.wang.taxonomy", sep="")
# rdp <- getDepths(rdpMock)
#
# ggMock <- paste("analysis/", r, "/", r, ".mock.precluster.gg.wang.taxonomy", sep="")
# gg <- getDepths(ggMock)
#
# silvaMock <- paste("analysis/", r, "/", r, ".mock.precluster.bacteria.wang.taxonomy", sep="")
# silva <- getDepths(silvaMock)
#
# write.table(cbind("rdp" = rdp, "gg" = gg, "silva"= silva), file=paste("analysis/", r, "/mock.tax.compare", sep=""), quote=F)
}
# here we want to merge the classification depth that we got from each sequence
# as well as the library that each sequence showed up in. This calculates the
# fraction of unique sequences that classified to a given depth.
getDepthByLibrary <- function(region){
# get the frequency data...
count.file <- paste("analysis/", region, "/", region, ".trim.unique.good.filter.unique.precluster.pick.count_table", sep="")
count.table <- read.table(file=count.file, header=T, row.names=1)
# get the read depth data...
depth.file <- paste("analysis/", region, "/", region, ".tax.compare", sep="")
depth.table <- read.table(file=depth.file, header=T, row.names=1)
depth.table$rdp <- factor(depth.table$rdp, levels=0:7)
depth.table$gg <- factor(depth.table$gg, levels=0:7)
depth.table$silva <- factor(depth.table$silva, levels=0:7)
# get the reads that showed up in each sample
mock.depth <- depth.table[count.table$mock > 0,]
human.depth <- depth.table[count.table$human > 0,]
mouse.depth <- depth.table[count.table$mouse > 0,]
soil.depth <- depth.table[count.table$soil > 0,]
# calculate the percentages for each database
mock.gg <- 100*summary(mock.depth$gg)/nrow(mock.depth)
human.gg <- 100*summary(human.depth$gg)/nrow(human.depth)
mouse.gg <- 100*summary(mouse.depth$gg)/nrow(mouse.depth)
soil.gg <- 100*summary(soil.depth$gg)/nrow(soil.depth)
mock.rdp <- 100*summary(mock.depth$rdp)/nrow(mock.depth)
human.rdp <- 100*summary(human.depth$rdp)/nrow(human.depth)
mouse.rdp <- 100*summary(mouse.depth$rdp)/nrow(mouse.depth)
soil.rdp <- 100*summary(soil.depth$rdp)/nrow(soil.depth)
mock.silva <- 100*summary(mock.depth$silva)/nrow(mock.depth)
human.silva <- 100*summary(human.depth$silva)/nrow(human.depth)
mouse.silva <- 100*summary(mouse.depth$silva)/nrow(mouse.depth)
soil.silva <- 100*summary(soil.depth$silva)/nrow(soil.depth)
return(rbind(mock.rdp, human.rdp, mouse.rdp, soil.rdp, mock.gg, human.gg,
mouse.gg, soil.gg, mock.silva, human.silva, mouse.silva, soil.silva))
}
# for each region get the percentage of sequences in each library that
# classified to each taxonomic level for each databaes considered
composite <- data.frame(matrix(rep(0, 8*6*12), ncol=8))
colnames(composite) <- 0:7
composite[1:12,] <- getDepthByLibrary("v4");
composite[13:24,] <- getDepthByLibrary("v35")
composite[25:36,] <- getDepthByLibrary("v13")
composite[37:48,] <- getDepthByLibrary("v15")
composite[49:60,] <- getDepthByLibrary("v16")
composite[61:72,] <- getDepthByLibrary("v19")
# format the final output file
composite$region <- c(rep("v4", 12), rep("v35", 12), rep("v13", 12), rep("v15", 12), rep("v16", 12), rep("v19", 12))
composite$database <- rep(c(rep("rdp", 4), rep("gg", 4), rep("silva", 4)), 6)
composite$sample <- rep(c("mock", "human", "mouse", "soil"), 18)
composite$total <- composite[,"6"] + composite[,"7"] #make a total column that has the % of genus and species-level names
write.table(file="taxonomy.depth.analysis", composite, quote=F)
|
504dc4b5d4fd8ef3a29a36637e7d726c3254b6b7
|
c9b151232ad188a38469473ec765c0f7a1defe7c
|
/R/entropy.R
|
d85b74e8a063bbadf9aad42240dee4afed3ebffe
|
[] |
no_license
|
obreschkow/cooltools
|
3b2c46ac539962153c3a9aa8fbeaeee185455015
|
3b212d077537220aec5b8162f04ed85f7f0af996
|
refs/heads/main
| 2023-08-15T02:14:07.742064
| 2023-07-24T08:47:36
| 2023-07-24T08:47:36
| 184,692,943
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 760
|
r
|
entropy.R
|
#' Information entropy
#'
#' @description Computes the information entropy H=sum(p*log_b(p)), also known as Shannon entropy, of a probability vector p.
#'
#' @param p vector of probabilities; typically normalized, such that sum(p)=1.
#' @param b base of the logarithm (default is e)
#' @param normalize logical flag. If TRUE (default), the vector p is automatically normalized.
#'
#' @return Returns the information entropy in units that depend on b. If b=2, the units are bits; if b=exp(1), the units are nats; if b=10, the units are dits.
#'
#' @author Danail Obreschkow
#'
#' @export
entropy = function(p, b=exp(1), normalize=TRUE) {
if (!is.vector(p)) stop('p must be a vector.')
if (normalize) p = p/sum(p)
return(-sum(p*log(p+1e-300))/log(b))
}
|
a81e3d8aebc905bb74a15d831801ca4dba720729
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.lambda/man/get_function.Rd
|
132f8417042ad4bdc0eabcfe228541206cc9e10f
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,549
|
rd
|
get_function.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.lambda_operations.R
\name{get_function}
\alias{get_function}
\title{Returns information about function or function version, with a link to download the deployment package that's valid for 10 minutes}
\usage{
get_function(FunctionName, Qualifier = NULL)
}
\arguments{
\item{FunctionName}{[required] The name of the Lambda function, version, or alias.
\strong{Name formats}
\itemize{
\item \strong{Function name} - \code{my-function} (name-only), \code{my-function:v1} (with alias).
\item \strong{Function ARN} - \code{arn:aws:lambda:us-west-2:123456789012:function:my-function}.
\item \strong{Partial ARN} - \code{123456789012:function:my-function}.
}
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.}
\item{Qualifier}{Specify a version or alias to get details about a published version of the function.}
}
\description{
Returns information about function or function version, with a link to download the deployment package that's valid for 10 minutes. If you specify a function version, only details specific to that version are returned.
}
\section{Accepted Parameters}{
\preformatted{get_function(
FunctionName = "string",
Qualifier = "string"
)
}
}
\examples{
# This operation retrieves a Lambda function's event source mapping
\donttest{get_function(
FunctionName = "myFunction",
Qualifier = "1"
)}
}
|
29f7145d559d98ed60fd65656397946fa047bf2f
|
5fcdd3a20bf19b949f1eda484d5af0b35df7a8de
|
/2022_mlb/R/standings.R
|
e12b3ea5f672045d83d4268d542b817530efa255
|
[] |
no_license
|
TK2575/threeIdiots
|
e28c5c2dc1837320342c682e45416bdffa3b5d3e
|
11b0fb512326a2c6a413bccb94798bbfa48934cc
|
refs/heads/main
| 2023-08-24T15:15:19.182497
| 2023-07-23T06:30:47
| 2023-07-23T06:30:47
| 178,906,282
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,375
|
r
|
standings.R
|
library(baseballr)
library(tidyverse)
library(janitor)
library(magrittr)
library(purrr)
library(rvest)
get_weekly_standings <- function(date=Sys.Date()) {
map(.x = sundays(),
.f = get_standings,
from = FALSE) %>%
bind_rows()
}
get_standings <- function(html_file, date=Sys.Date(), from=FALSE) {
divisions <- c(
"NL East",
"NL Central",
"NL West",
"AL East",
"AL Central",
"AL West"
)
map(.x = divisions,
.f = get_standings_division,
date = date,
from = from,
html_file = html_file) %>%
bind_rows()
}
get_standings_division <- function(division, date = Sys.Date(), from = FALSE, html_file = NULL) {
df <- NULL
if (is.null(html_file)) {
df <- standings_on_date_bref(date = date,
from = from,
division = division)
} else {
df <- standings_on_date_html(date = date,
from = from,
division = division,
html_file = html_file)
}
df %>%
janitor::clean_names() %>%
as_tibble() %>%
mutate(division = division) -> result
if (from) {
result %>%
mutate(since = date)
} else {
result %>%
mutate(as_of = date)
}
}
sundays <- function(thru = Sys.Date()) {
rslt <- seq(
from=as.Date("2019-03-31"),
to=thru,
by="1 week"
)
if (!thru %in% rslt) {
rslt <- c(rslt, thru)
}
rslt
}
standings_on_date_html <- function(date, division, html_file, from = FALSE) {
all_divisions <- c("AL East", "AL Central", "AL West", "AL Overall",
"NL East", "NL Central", "NL West", "NL Overall")
if (!(division %in% all_divisions)) {
stop("Please select a division in the following: \n'AL East', 'AL Central', 'AL West', 'AL Overall',\n'NL Central', 'NL West', 'NL Overall'")
}
html_doc <- html_file %>% xml2::read_html()
tables <- html_doc %>% rvest::html_elements("table")
min <- length(tables)
max <- length(tables) - 15
tables <- tables[min:max] %>% rvest::html_table()
table_names <- c("NL Overall", "AL Overall", "NL West",
"NL Central", "NL East", "AL West", "AL Central",
"AL East", "NL Overall", "AL Overall", "NL West",
"NL Central", "NL East", "AL West", "AL Central",
"AL East")
table_names[1:8] <- paste0(table_names[1:8], "_after_",
date)
table_names[9:16] <- paste0(table_names[9:16], "_up to_",
date)
names(tables) <- table_names
after <- tables[1:8]
current <- tables[9:16]
if (from == FALSE) {
div_date <- paste0(division, "_up to_", date)
x <- current[div_date]
x <- x[[1]]
} else if (from != FALSE) {
div_date <- paste0(division, "_after_", date)
x <- after[div_date]
x <- x[[1]]
}
x <- x %>% mock_baseballr_data("MLB Standings on Date data from baseball-reference.com",
Sys.time())
x
}
mock_baseballr_data <- function(df, type, timestamp)
{
out <- df %>% tidyr::as_tibble()
class(out) <- c("baseballr_data", "tbl_df", "tbl", "data.table",
"data.frame")
attr(out, "baseballr_timestamp") <- timestamp
attr(out, "baseballr_type") <- type
return(out)
}
|
12d4881ef1a1b2857db51a1f011c7994f0be6382
|
fe612f81a3118bf3ebef644bae3281bd1c156442
|
/man/H2OSegmentModelsFuture-class.Rd
|
bb3bc19c94da78d8423d1a34031c8545eb67db1c
|
[] |
no_license
|
cran/h2o
|
da1ba0dff5708b7490b4e97552614815f8d0d95e
|
c54f9b40693ae75577357075bb88f6f1f45c59be
|
refs/heads/master
| 2023-08-18T18:28:26.236789
| 2023-08-09T05:00:02
| 2023-08-09T06:32:17
| 20,941,952
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 591
|
rd
|
H2OSegmentModelsFuture-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classes.R
\docType{class}
\name{H2OSegmentModelsFuture-class}
\alias{H2OSegmentModelsFuture-class}
\title{H2O Future Segment Models}
\description{
A class to contain the information for background segment models jobs.
}
\section{Slots}{
\describe{
\item{\code{job_key}}{a character key representing the identification of the job process.}
\item{\code{segment_models_id}}{the final identifier for the segment models collections}
}}
\seealso{
\linkS4class{H2OSegmentModels} for the final segment models types.
}
|
b28a25cabca4ab1beac67dcbe6cc82a983ca6fb5
|
8864d28e6a3c8188f353702e64077c49f1fce412
|
/man/compute_mad.Rd
|
65b570083865419e0d843aba8fc1a49746f27d8f
|
[
"MIT"
] |
permissive
|
biosurf/cyCombine
|
8e63e800dac9784a017bccf198af645a5f4ce0f3
|
58f4dc5771e2fdb85a8b5675ef68ce9101d8b7c5
|
refs/heads/master
| 2023-08-27T03:56:14.451540
| 2022-11-11T07:16:29
| 2022-11-11T07:16:29
| 291,703,738
| 11
| 6
| null | null | null | null |
UTF-8
|
R
| false
| true
| 752
|
rd
|
compute_mad.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/evaluate_performance.R
\name{compute_mad}
\alias{compute_mad}
\title{Compute MAD}
\usage{
compute_mad(df, cell_col = "label", batch_col = "batch", markers = NULL)
}
\arguments{
\item{df}{Dataframe to compute the MADs of}
\item{cell_col}{Column name of df that contains cell population labels (or clusters)}
\item{batch_col}{Column name of df that contains batch numbers}
\item{markers}{Vector of the markers to calculate EMD for. If NULL, \code{\link{get_markers}} will be used to find markers}
}
\description{
Given a dataframe, the Median Absolute Deviation (MAD) is calculated per-marker, per-batch
}
\seealso{
Other mad:
\code{\link{evaluate_mad}()}
}
\concept{mad}
|
359a37b93d43650f545b7cac3d46bb32f84bc3a9
|
5769b1d819612c1a7bd53fbdcde1419a57b263f4
|
/Day 5/DataStructures, DataTypes and Coersion.R
|
c14adb44ca50dda392b8d719182267e097d4c4e6
|
[] |
no_license
|
Nashie-R/100DaysOfCodingR.
|
b41420a78f4b68420d650a72be466106fc6efbf8
|
7148008f794c97410169f4de0c849c4ad434b616
|
refs/heads/master
| 2020-12-13T07:00:11.255074
| 2020-05-13T11:38:35
| 2020-05-13T11:38:35
| 234,341,986
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,234
|
r
|
DataStructures, DataTypes and Coersion.R
|
---
title: "Day 5"
author: "Nashipae Waweru"
date: "15/JAN/2020"
segment: "Accessing Data".
---
#DATA TYPES####
#Numeric: integer,single,double
#Character
#Logical
#Complex numbers
#Raw
#DATA STRUCTURES###
#Vector: one or more numbers in 1D array
#Matrix: Two dimensions... #Array: 3 or more dimensions
#Data Frame: Vectors of multiple types, all of same length
#List: Most flexible, ordered collection of elements
#COERSION IS GOOD!!!: CHANGING OF AN ELEMENTS FROM ONE TYPE TO ANOTHER
#DATA TYPES ####
#LOAD PACKAGES###
library(knitr)
library(tidyverse)
#Numeric
n <- 15
n
typeof(n)
n1 <- 1.5
n1
typeof(n1)
#Character
c <- "Nashipae"
c
typeof(c)
c2 <- "I love r"
c2
typeof(c2)
#Logical
l <- TRUE
l
typeof(l)
l1 <- F
l1
typeof(l1)
#DATA STRUCTURES###
#Vectors####
v <- c(1,2,3,4,5)
v
is.vector(v)
v1 <- c("a","b","c","d")
v1
is.vector(v1)
v2 <- c("T", "T", "F")
v2
v3 <- c("TRUE","FALSE")
v3
is.vector(v3)
v4 <- c(TRUE, FALSE,TRUE,TRUE)
v4
##MATRIX####
m1 <- matrix(c(T,T,F,F,T,F), nrow = 2)
m1
m2 <- matrix(c("a","b",
"c","d"),
nrow = 2,
byrow = F)
m2
####ARRAY###
a1 <- array(c(1:24), c(4,3,2))
a1
##DATA FRAME####
#Combine vectors of the same length
vNo <- c(1,2,3,4)
vCha <- c("a","b","c","d")
vLog <- c(T,F,T,F)
data <- cbind(vNo, vCha, vLog)
data #Matrix of one data type
dataf <- as.data.frame(data)
dataf
##LIST####
d <- c(1,2,3,4,5)
d1 <- c("a", "b")
d2 <- c(T,T,F,F)
list1 <- list(d, d1, d2)
list1
#List within list
list2 <- list(d, d1, d2, list1)
list2
#COERCION###
#Automatic Coercion: Goes to the "least restrictive" data type##
(coerce1 <- c(1, "a", T))
typeof(coerce1)
## Coerce numeric to integer#
(coerce2 <- 5)
typeof(coerce2)
(coerce3 <- as.integer(coerce2))
typeof(coerce3)
### Coerse character to numeric####
(coerse4 <- c("a", "b", "c", "d"))
typeof(coerse4)
(coerse5 <- as.numeric(c("a", "b", "c", "d")))
typeof(coerse5)
coerse5
#Coerce matrix into a dataframe
(coerce6 <- matrix(1:9, nrow = 3))
is.matrix(coerce6)
(coerce7 <- as.data.frame(coerce6))
is.data.frame(coerce7)
|
faee4dd3fbee920a60df0a2c2023f159fd22b553
|
486012a2ab88a1c34f00687f37454b9b71c7315e
|
/tests/testthat/helper-functions.R
|
7a415718190cd80906e38520599a70ad8ba5bf75
|
[] |
no_license
|
DivadNojnarg/periscope
|
fd61f3f23f1bf9a89288245623d37e9a57641cc0
|
e3c76bf51a565e35df7924c6a319336fbda8736f
|
refs/heads/master
| 2023-02-17T14:44:40.637555
| 2021-01-14T17:44:17
| 2021-01-14T17:44:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 244
|
r
|
helper-functions.R
|
# helper functions for tests
library(shiny)
source_path <- "periscope/R"
if (interactive()) {
library(testthat)
library(periscope)
invisible(lapply(list.files(source_path), FUN = function(x) source(file.path(source_path, x))))
}
|
3d1ae5dc854c2b29e0d5a96e265406e442e1f65e
|
593d4718f37df669f0e67c94b68c554a94eab19c
|
/01 Data/Titanic Analysis_GP1.R
|
a521f782743a850687f1e8d28a15a143339b64f8
|
[] |
no_license
|
juanitotaveras/DV_RProject1
|
da80697027d6c8723f7d80dc715c720ce8b59903
|
b326d49e53cba0a67ffc97848d175b4910501781
|
refs/heads/master
| 2021-01-19T08:54:59.388573
| 2015-09-16T03:07:00
| 2015-09-16T03:07:00
| 42,364,371
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,896
|
r
|
Titanic Analysis_GP1.R
|
require("jsonlite")
require("RCurl")
# Change the USER and PASS below to be your UTEid
df <- data.frame(fromJSON(getURL(URLencode('129.152.144.84:5001/rest/native/?query="select * from titanic where sex is not null and fare>100"'),httpheader=c(DB='jdbc:oracle:thin:@sayonara.microlab.cs.utexas.edu:1521:orcl', USER='C##cs347_professor', PASS='orcl_professor', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE) ))
df
#summary(df)
#head(df)
ggplot(data = df, mapping = aes(x = as.numeric(as.character(FARE)), fill=SURVIVED)) +
layer(geom = "histogram",
stat = "bin",
stat_params = list(binwidth = 0.1),
mapping = aes(y = ..count..))
ggplot(data = df, mapping = aes(x = PCLASS)) +
scale_x_continuous(limit = c(0,3))
require(extrafont)
ggplot(df, aes(SEX, AGE, color=FARE)) +
geom_boxplot() + geom_jitter()
ggplot() +
coord_cartesian() +
scale_x_continuous() +
scale_y_continuous() +
facet_grid(~SURVIVED, labeller=label_both) +
labs(title='Titanic') +
labs(x="Age", y=paste("Fare")) +
layer(data=df,
mapping=aes(x=as.numeric(as.character(AGE)), y=as.numeric(as.character(FARE)), color=SEX),
stat="identity",
stat_params=list(),
geom="point",
geom_params=list(),
#position=position_identity()
position=position_jitter(width=0.3, height=0)
)
ggplot() +
coord_cartesian() +
scale_x_discrete() +
scale_y_continuous() +
#facet_grid(PCLASS~SURVIVED) +
labs(title='Titanic') +
labs(x="SURVIVED", y=paste("FARE")) +
layer(data=df,
mapping=aes(x=SEX, y=as.numeric(as.character(FARE)), color=as.character(SURVIVED)),
stat="identity",
stat_params=list(),
geom="point",
geom_params=list(),
#position=position_identity()
position=position_jitter(width=0.3, height=0)
)
|
9cc7a28c2a14fe63d7febd0a4c5a1e57cf4b9611
|
3bef70f4b3d6283f2b2bfb44ccdfbf9b28c6429d
|
/man/read_ms_access.Rd
|
04c9ac9c3ee8ded70b358003153145306cc068d9
|
[
"MIT"
] |
permissive
|
KWB-R/dwc.wells
|
4c1594ea66b1792c6c955b98418982edf80675c1
|
45e8670647c4771fe70d59db0f7cfd1e80242361
|
refs/heads/main
| 2023-04-10T01:24:40.973815
| 2022-07-12T13:42:20
| 2022-07-12T13:42:20
| 351,021,733
| 0
| 0
|
MIT
| 2022-10-16T09:17:19
| 2021-03-24T09:35:15
|
R
|
UTF-8
|
R
| false
| true
| 457
|
rd
|
read_ms_access.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepare_functions.R
\name{read_ms_access}
\alias{read_ms_access}
\title{read table from MS Access data base via odbc connection under 64-bit-R}
\usage{
read_ms_access(path_db, tbl_name)
}
\arguments{
\item{path_db}{full path to database}
\item{tbl_name}{name of database table to be read}
}
\description{
read table from MS Access data base via odbc connection under 64-bit-R
}
|
83f46fcc5c8303c49938e0087c72f35d51263d84
|
05a5a1f17f5df9fe295b616fb8d3c2427b2430ac
|
/tests/testthat/test-return-data.R
|
35c9c238cf368e69ecebf44010d9ac6f44c9928a
|
[] |
no_license
|
AntiportaD/hrcomprisk
|
20a0961bdf986414b7c1f2a8a2a23f9c93573d8d
|
c72ae62e96d05a585575a7ae8ea8c4952f03fce5
|
refs/heads/master
| 2020-09-27T04:53:26.499249
| 2020-01-23T14:35:03
| 2020-01-23T14:35:03
| 226,434,561
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 319
|
r
|
test-return-data.R
|
context("Test hrcompet estimation and data return")
test_that("Wheter return gives desired data dimensions", {
set.seed(1)
data <- hrcomprisk::dat_ckid
dat_final<-npcrest(data, exit, event, exposure=b1nb0, entry, eoi=2)
expect_equal(nrow(dat_final$cuminc), 187)
expect_equal(ncol(dat_final$cuminc), 11)
})
|
d130049ed4b8216daa880f989cbab4d00f9c140a
|
c194c5236006a758b29bd4d530ad563dc9ecab7e
|
/inst/apps/acceptance_testing_mtbf/ui.R
|
86ba992062cf8e1017d6cdd5ae7252567e8a7e9c
|
[] |
no_license
|
Auburngrads/teachingApps
|
1087c20a21992433a2f8451db7b1eaa7d1d2cb89
|
b79c192e5f74c5e8376674d4fb9e0b95a426fe03
|
refs/heads/master
| 2021-03-16T07:49:56.579527
| 2020-06-14T12:10:12
| 2020-06-14T12:10:12
| 51,677,745
| 15
| 7
| null | 2018-03-01T03:44:58
| 2016-02-14T03:22:47
|
R
|
UTF-8
|
R
| false
| false
| 1,179
|
r
|
ui.R
|
ui = fluidPage(theme = add_theme(getShinyOption('theme')),
add_css(),
sidebarLayout(
sidebarPanel(width = 3,
sliderInput('ttt',
'Available Test Time',
min = 100,
max = 1000,
step = 10,
value = 400),
sliderInput('fails',
'Failures Allowed',
min = 0,
max = 30,
step = 1,
value = 1),
sliderInput('thresh',
'Threshold MTBF',
min = 10,
max = 500,
step = 5,
value = 40),
sliderInput('objective',
'Objective MTBF',
min = 10,
max = 500,
step = 5,
value = 70),
sliderInput('contract',
'Contract MTBF',
min = 10,
max = 500,
step = 5,
value = 90)),
mainPanel(plotlyOutput('mtbf', height = '650px'),width = 9)))
|
febc2b30b33cb4dd0de473b118eabab7cba51fd2
|
9d0e613597f8829edb92d69aa7edff3a2d403ecc
|
/tests/testthat.R
|
e2bd7c4eb964f4993e027e83294eb54c49c3ec5d
|
[] |
no_license
|
cran/tfestimators
|
52d81322245381915ac74c556c17f5032defe2f6
|
2daf8fc062f8288fea6a05a5d56b62804fa79e33
|
refs/heads/master
| 2021-08-27T16:24:09.668239
| 2021-08-09T21:30:02
| 2021-08-09T21:30:02
| 114,889,826
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 121
|
r
|
testthat.R
|
library(testthat)
library(tfestimators)
if (identical(Sys.getenv("NOT_CRAN"), "true"))
test_check("tfestimators")
|
f9d39d71e2cf1c454d2bf8c020918d3ad9bd3718
|
e4345153bb30035fcde8709ffb3d1ab22dc6aa43
|
/plot4.R
|
7c1a92d9c177dff3a58e0ba6f411fe87b95c0976
|
[] |
no_license
|
nizamo/ExData_Plotting1
|
bcaf137cf8bfc9935ef3ee4a68e57ec66ca24a4c
|
852e5397129bbbed08ca94dc1c6669af43e6ff87
|
refs/heads/master
| 2021-01-18T12:33:28.478696
| 2015-08-09T15:56:22
| 2015-08-09T15:56:22
| 40,430,143
| 0
| 0
| null | 2015-08-09T08:26:52
| 2015-08-09T08:26:50
| null |
UTF-8
|
R
| false
| false
| 1,415
|
r
|
plot4.R
|
#project 1, plot 4
# read table from the folder
data<- "./exdata-data-household_power_consumption/household_power_consumption.txt"
data1<-read.table(data,header=TRUE,sep=";", stringsAsFactors=FALSE,na.strings="?",
dec=".")
#Subset the date to Date/Time classes
data1$Date<-as.Date(data1$Date, format="%d/ %m/ %Y")
#new subset dates
data2<- subset(data1, subset=(Date>="2007/02/01" & Date<="2007/02/02"))
#converting dates
timeanddate<-paste(as.Date(data2$Date),data2$Time)
data2$timeanddate<- as.POSIXct(timeanddate)
submeter1<-as.numeric(data2$Sub_metering_1)
submeter2<-as.numeric(data2$Sub_metering_2)
submeter3<-as.numeric(data2$Sub_metering_3)
#Plotting all 4 graph a,b,c,d
png("plot4.png", width=480, height=480)
par(mfrow=c(2,2))
#graph a
plot(data2$Global_active_power~data2$timeanddate,type="l",xlab="",ylab="Global Active Power", cex=0.2)
#graph b
plot(data2$Voltage~data2$timeanddate,type="l",xlab="datetime",ylab="Voltage")
#graph c
plot(submeter1~data2$timeanddate, type="l",xlab="",ylab="Energy sub metering")
lines(submeter2~data2$timeanddate, type="l",col="red")
lines(submeter3~data2$timeanddate, type="l",col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=1,lwd=2.5,col=c("black","red","blue"))
#graph d
plot(data2$Global_reactive_power~data2$timeanddate,type="l",xlab="datetime",ylab="Global_reactive_power")
dev.off()
|
8402fc25eac4426ac67202cc844dd70f4eb80e1d
|
0877d83cdf78f6e3bb122c7d2c031791684506d3
|
/R/attach_files.R
|
ff18ba1921a9b76b95e12ff0de825bc339d0560f
|
[] |
no_license
|
BWAM/BAP
|
fec1dbe4475f3869f8007894e9ad9a5581cb1277
|
9dd041516b2f4c8a2269516c57d7ade41746d7e9
|
refs/heads/master
| 2023-04-30T00:25:15.586434
| 2023-04-26T16:17:49
| 2023-04-26T16:17:49
| 180,187,817
| 0
| 1
| null | 2023-04-17T16:54:43
| 2019-04-08T16:18:52
|
R
|
UTF-8
|
R
| false
| false
| 1,347
|
r
|
attach_files.R
|
# Import PMA Table
#setwd("C:\\Users\\Owner\\Desktop\\NYSDEC")
#pma.model <- read.csv("PMA_MODEL.csv")
#setwd("C:\\Users\\Owner\\Desktop\\NYSDEC.BAP\\nysdec.bap")
#devtools::use_data(pma.model, overwrite = TRUE)
# Import PMA Table
#setwd("C:\\Users\\Owner\\Desktop\\NYSDEC")
#pma.ponar <- read.csv("PMA_PONAR.csv")
#setwd("C:\\Users\\Owner\\Desktop\\NYSDEC.BAP\\nysdec.bap")
#devtools::use_data(pma.ponar, overwrite = TRUE)
# Import ISD Table
#setwd("C:\\Users\\Owner\\Desktop\\NYSDEC")
#isd.df <- read.csv("ISD.csv")
#setwd("C:\\Users\\Owner\\Desktop\\NYSDEC.BAP\\nysdec.bap")
#devtools::use_data(isd.df, overwrite = TRUE)
# Requires Packages
#setwd("C:\\Users\\Owner\\Desktop\\NYSDEC.BAP\\nysdec.bap")
#devtools::use_package("plyr", "imports")
#devtools::use_package("reshape2", "imports")
#devtools::use_package( "vegan", "imports")
#devtools::use_package("zoo", "imports")
# Import Master Taxa List
#setwd("C:\\Users\\Owner\\Desktop\\NYSDEC")
#master <- read.csv("BPA_MASTER.csv")
#master <- read.csv("specieslist2016.csv")
#(master)[names(master) %in% "GENSPECIES"] <- "GENUS_SPECIES"
#names(master)[names(master) %in% "CLAS"] <- "CLASS"
#names(master)[names(master) %in% "ORDR"] <- "ORDER"
#master <- BAP::prep_master_taxa(master)
#setwd("C:\\Users\\Owner\\Desktop\\NYSDEC.BAP\\nysdec.bap")
#devtools::use_data(master, overwrite = TRUE)
|
8727544a6affa5c346784287e72a44e01a911559
|
07d5d1cb4633a2b23ad1e7a787694e0ac83391f8
|
/R/suitable_model.R
|
7be0add8c5651bd94b19c2645572f550049de92d
|
[] |
no_license
|
gouthaman87/av-gt-bigmart_sales_predictions
|
4950b5087268b567b5c53d4fbc6d25fa1c247383
|
c483e6289308c52df547177c642bd176653a927d
|
refs/heads/master
| 2023-06-30T07:59:04.321661
| 2021-08-07T05:33:38
| 2021-08-07T05:33:38
| 392,631,344
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 669
|
r
|
suitable_model.R
|
#' Title
#'
#' @param list_of_model
#' @param new_data
#'
#' @return
#' @importFrom magrittr
#' @export
#'
#' @examples
suitable_model <- function(
list_of_model,
new_data
) {
foreach::foreach(mod = list_of_model, .errorhandling = "pass") %do%
{
mod %>%
predict(new_data) %>%
dplyr::bind_cols(new_data) %>%
yardstick::rmse(
truth = item_outlet_sales,
estimate = .pred
)
} %>%
dplyr::bind_rows(.id = "model_id") %>%
dplyr::filter(`.estimate`== min(`.estimate`)) -> rmse_tbl
logger::log_info(paste("RMSE: ", rmse_tbl[[".estimate"]]))
list_of_model[[as.numeric(rmse_tbl$model_id)]]
}
|
fe185a36aca04d2cab8786c066efa4d48a05f7fe
|
3b4c731c68e20eb4beb8a6859fb5a124cea9189a
|
/run_analysis.R
|
323bb5cf5eccf4b68c461ef324c5af9ecf60d3a5
|
[] |
no_license
|
placeacall/datasciencecoursera
|
8020cfeacc2a879fb030d3f9fc5d6d0db47a5fdd
|
a0e32dfe4c9bebc29de2025ccf6aaf3dc1ffbc77
|
refs/heads/master
| 2020-04-01T10:22:40.756053
| 2014-05-10T17:19:54
| 2014-05-10T17:19:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,403
|
r
|
run_analysis.R
|
##load reshape2 library
library(reshape2)
##get a list of all feautures
features <- read.table("features.txt", header=FALSE, stringsAsFactors=FALSE)
##load activity label
activityLabel <- read.table("activity_labels.txt", header=FALSE)
##load the y training predictor value
y.train <- read.table("./train/y_train.txt", header=FALSE)
names(y.train) <- "Y"
y.train$Activity <- factor(y.train$Y, levels=activityLabel$V1, labels=activityLabel$V2)
##load the training subject feature
train.subject <- read.table("./train/subject_train.txt", header=FALSE)
names(train.subject) <- "SubjectID"
##load the training data set
train.x <- read.table("./train/X_train.txt", header=FALSE)
##Combine all data sets related to the training set
train <- cbind(train.subject, y.train, train.x)
##load the y testing predictor value
y.test <- read.table("./test/y_test.txt", header=FALSE)
names(y.test) <- "Y"
y.test$Activity <- factor(y.test$Y, levels=activityLabel$V1, labels=activityLabel$V2)
##load the testing subject feature
test.subject <- read.table("./test/subject_test.txt", header=FALSE)
names(test.subject) <- "SubjectID"
##load the test data set
test.x <- read.table("./test/X_test.txt", header=FALSE)
##Combine testing set
test <- cbind(test.subject, y.test, test.x)
##combine row bind
data <- rbind(train, test)
##rename the column names
names(data)[4:length(names(data))] <- features[, 2]
##identify listed feautures, features with mean() & std()
features.listed <- names(data)[grep("std\\(\\)|mean\\(\\)", tolower(names(data)))]
##get all listed features with mean() and std()
data.features <- data[, features.listed]
##Introduce SubjectID, ActivityID, Activity to the identified features(*mean(), *std())
data.features <- cbind(data[c(1, 3)], data.features)
##4 Appropriately labels the data set with descriptive activity names.
##Clean up feature names
names(data.features) <- gsub("\\(\\)", "", gsub("-", ".", names(data.features)))
##5 Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
##Using melt and dcast to aggregate data set
data.features.melt <- melt(data.features, id=c("SubjectID", "Activity"))
data.features.melt.transpose <- dcast(data.features.melt, Activity + SubjectID ~ variable, mean)
##Write dataset to tidy.txt
write.table(data.features.melt.transpose, file = "tidy.txt", sep = ",", row.names=FALSE)
|
c970e680293a025411ce3a7ccf3ed3e8cc5fdae7
|
9bd5a5ab85e68040bc9a8694692ef3dfdea3d713
|
/man/zone_sum.Rd
|
374c3466570e6ac3f17461918931f620df257851
|
[] |
no_license
|
rfsaldanha/scanstatistics
|
578e9c8eaefa9ce4823dc5dac72e1168e4034b0d
|
652f027f40928b776e01373a191e05101bd8f841
|
refs/heads/master
| 2021-01-13T16:47:38.434417
| 2017-01-04T15:28:38
| 2017-01-04T15:28:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 588
|
rd
|
zone_sum.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aggregation_functions.R
\name{zone_sum}
\alias{zone_sum}
\title{Sum columns over all location in each zone, for each duration.}
\usage{
zone_sum(table, sumcols)
}
\arguments{
\item{table}{A \code{data.table} with columns \code{zone, location, duration}
and those given in the argument \code{sumcols}.}
\item{sumcols}{Character vector of column names, the columns to be summed
over each zone and duration.}
}
\description{
Sum columns over all location in each zone, for each duration.
}
\keyword{internal}
|
23bf5177d7351373859f16f84777c1503e33cb3b
|
78889997af8cc4faa5430bffe3addc552095e172
|
/man/convert_image.Rd
|
afc430b0826aa00a6e67999da70f899ee007f001
|
[] |
no_license
|
kmezhoud/papSmear
|
a289df9cb4bf0b9ae88086251258a864fe3ed50f
|
1e75e74e71add31b5e0458ccc280400ff49cb590
|
refs/heads/master
| 2021-04-26T23:06:05.609315
| 2018-10-26T14:03:42
| 2018-10-26T14:03:42
| 123,825,593
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 748
|
rd
|
convert_image.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convertImage.R
\name{convert_image}
\alias{convert_image}
\title{Convert image to jpeg or png}
\usage{
convert_image(inputFolder, outputFolder, type = ".jpeg")
}
\arguments{
\item{inputFolder}{Folder that content the subfolders of images with other extension that jpeg and png.
Each subfolder corresponds to one class.}
\item{outputFolder}{The folder where will save cponverted images.}
\item{type}{the type of extension jpeg, png all supported extesion by imager package}
}
\value{
save images as jpeg (default) or png. Each folder corresponds to a classe.
}
\description{
Convert image to jpeg or png
}
\examples{
\dontrun{
convert_image("/extdata/img_data/")
}
}
|
25b8209edd4876c4975275da6e42f5fc5f7f20f9
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/8694_2/rinput.R
|
5d44dd4fd7969badbd3a7c1bf2142d063783301f
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("8694_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8694_2_unrooted.txt")
|
6c522010792414274d9806c97d0b2cd92f3a05b2
|
61c1c745e9ad59814f1206f611c7ae2a1847b599
|
/R/vcov.Tenv.R
|
94014f91434912574c2dbc894919a6d988385164
|
[] |
no_license
|
jingzzeng/TRES
|
4fe57d3bdb406503f4a357dbfa30f3dac13de197
|
8e1def6faba478821588a74b09d9baa55a043bdf
|
refs/heads/master
| 2023-08-25T13:53:09.847317
| 2021-11-12T00:30:57
| 2021-11-12T00:30:57
| 211,178,926
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 190
|
r
|
vcov.Tenv.R
|
#' @method vcov Tenv
#' @export
vcov.Tenv <- function(object, ...){
stop("The vcov method is not available for Tenv class. Refer to std_err function if the standard error is desired.")
}
|
4913463563a1afa7e3bb12d89dedcabc843ada48
|
b73ba9d91f872931cbf88d50999411c0bb7c211e
|
/man/download-archive.Rd
|
baaf0e0b8463f71f5fa61225bef1fd94a17fae62
|
[
"MIT"
] |
permissive
|
weecology/portalcasting
|
73347ce66f8c1e5c080a1f1029ec17026c912588
|
a35a77214d41dbdaa50bb39452b5fe49c3763a83
|
refs/heads/main
| 2023-08-20T12:48:59.392495
| 2023-05-23T01:16:33
| 2023-05-23T01:16:33
| 129,144,321
| 8
| 12
|
NOASSERTION
| 2023-05-23T01:16:34
| 2018-04-11T19:34:03
|
R
|
UTF-8
|
R
| false
| true
| 2,356
|
rd
|
download-archive.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download.R
\name{download archive}
\alias{download archive}
\alias{archive}
\alias{download_archive}
\title{Download the Portal Predictions Repository Archive}
\usage{
download_archive(
main = ".",
resources_sub = "resources",
version = "latest",
source = "github",
quiet = FALSE,
verbose = FALSE,
force = FALSE,
pause = 30,
timeout = getOption("timeout")
)
}
\arguments{
\item{main}{\code{character} value defining the main component of the portalcasting directory tree.}
\item{resources_sub}{\code{character} value defining the resources subdirectory of the portalcasting directory tree.}
\item{version}{\code{character} version of the data to download. Default \code{"latest"} downloads the most recent (by date published). \code{NULL} means no download.}
\item{source}{\code{character} indicator of the source for the download. Either \code{"github"} (default) or \code{"zenodo"}.}
\item{quiet}{\code{logical} indicator if progress messages should be quieted.}
\item{verbose}{\code{logical} indicator if detailed messages should be generated.}
\item{force}{\code{logical} indicator of whether or not existing files or folders (such as the archive) should be over-written if an up-to-date copy exists (most users should leave as \code{FALSE}).}
\item{pause}{Positive \code{integer} or integer \code{numeric} seconds for pausing during steps around unzipping that require time delay.}
\item{timeout}{Positive \code{integer} or integer \code{numeric} seconds for timeout on downloads. Temporarily overrides the \code{"timeout"} option in \code{\link[base:options]{options}}.}
}
\value{
\code{NULL}, \code{\link[base:invisible]{invisible}}-ly.
}
\description{
Downloads a specific \code{version} of the Portal Predictions repository from either GitHub or Zenodo (based on \code{source}) into the \verb{<main>/raw} sub.
}
\note{
There are two calls to \code{\link[base:Sys.sleep]{base::Sys.sleep}} for \code{pause} seconds each to allow for the file unzipping, copying, and such to catch up.
}
\examples{
\dontrun{
main1 <- file.path(tempdir(), "archive")
create_dir(main = main1)
download_archive(main = main1)
unlink(main1, recursive = TRUE)
}
}
\seealso{
Other downloads:
\code{\link{download climate forecasts}}
}
\concept{downloads}
|
7c69adeeeaa525125167ade9ffdb5a9c8fa4d98b
|
25da8623c40cabf5ff4b85c12a171d0441912fb9
|
/man/center.Rd
|
6d846530ca224219995ce4eabd63e48750c5012f
|
[] |
no_license
|
VIKASMAGGO/centering
|
1f3a205b0b19ff3067557093bab3a8cb1dbf9e8a
|
2ddc0857c44df519e703c6d1a4c0355d3de4f9be
|
refs/heads/master
| 2020-06-28T20:55:58.873256
| 2016-11-22T13:34:30
| 2016-11-22T13:34:30
| 74,471,070
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 231
|
rd
|
center.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/center.R
\name{center}
\alias{center}
\title{Title}
\usage{
center(x)
}
\arguments{
\item{x}{}
}
\description{
Title
}
\examples{
a <- 1:10
center(a)
}
|
41f87bb23e79c4cadcc5df77a4794e4b71ca7649
|
500304e5273d8efa0319ccc986caecbafcdbfc15
|
/test.R
|
9f916b244ec734ea91a7c77418175d88a47670fd
|
[] |
no_license
|
NIVA-Denmark/WATERS
|
e3e3ba549169726430cba7a801cd87cae4f9ccce
|
bace89323005902d63fc2b48de434bcb8187f874
|
refs/heads/master
| 2021-01-19T02:33:17.251145
| 2017-09-06T15:13:54
| 2017-09-06T15:13:54
| 87,289,842
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,469
|
r
|
test.R
|
rm(list=ls())
source("IndicatorFunctions.R")
source("ReadIndicatorType.R")
source("CalculateIndicatorSupport.R")
source("Assessment.R")
source("ReadBounds.R")
source("ReadIndicatorParms.R")
source("ReadMonitoringData.R")
library(tidyverse)
library(haven)
library(lme4)
library(lubridate)
library(shiny)
library(dplyr)
library(prodlim)
df<-read.table("data/data.txt", fileEncoding = "UTF-8", sep=";", stringsAsFactors=F, header=T)
df<-filter(df,!is.na(sali))
#df <- ReadMonitoringDataSMHI("data/danafjord_2001_2006.sas7bdat")
#df <- ReadMonitoringDataSMHI("data/danafjord_2007_2012.sas7bdat")
#df <- ReadMonitoringDataSMHI("data/danafjord_2013_2016.sas7bdat")
#df <- ReadMonitoringDataSMHI("data/byfjorden_2007_2012.sas7bdat")
df.wb<-read.table("data/waterbodies.txt", fileEncoding = "UTF-8", sep="\t", stringsAsFactors=F, header=T)
df<-df %>% left_join(select(df.wb,WaterbodyID,DistrictID), by=c("WB_ID"="WaterbodyID"))
df$WB<-paste0(df$WB_ID," ",df$WB_name)
df$obspoint<-df$station
# wblist<-distinct(df,WB,typology)
# wbcount<-nrow(wblist)
# df <- df %>% filter(WB == wblist$WB[1])
df.indicators<-read.table("data/IndicatorList.txt", fileEncoding = "UTF-8", sep="\t", stringsAsFactors=F, header=T)
nSimMC=100
source("Assessment.R")
# Start the clock!
ptm <- proc.time()
AssessmentResults<-Assessment(df,nsim=nSimMC)
proc.time() - ptm
IndicatorResults<-AssessmentResults
save(IndicatorResults,file="ExampleIndResults.Rda")
rm(IndicatorResults)
rm(AssessmentResults)
#
# nSimMC
# df.resultsOverall<-AssessmentResults[[1]]
# df.resultsQE<-AssessmentResults[[2]]
# df.resultsInd<-AssessmentResults[[3]]
#
# df.chl<-AssessmentResults[[4]]
load("C:/Data/GitHub/Waters/code/WATERS/ExampleIndResults.Rda")
IndicatorResults <- IndicatorResults %>%
select(WB,Type,Indicator,sim,Unit,Ref,HG,GM,MP,PB,Min,Value,ClassID,Class,EQR,Code) %>%
left_join(df.indicators)
test<-IndicatorResults%>%filter(sim==1)
SubelementCount<-IndicatorResults %>%
filter(!is.na(Value)) %>%
group_by(WB,Type,Quality.element,Quality.subelement,sim) %>%
summarise(IndCount=n()) %>%
mutate(IndWeight=1/IndCount) %>%
select(-IndCount)
IndicatorResults <- IndicatorResults %>%
left_join(SubelementCount)
QEresults<-IndicatorResults %>%
mutate(EQRwt=EQR*IndWeight) %>%
group_by(WB,Type,Quality.element,sim) %>%
summarise(sumEQR=sum(EQRwt,na.rm=T),sumWt=sum(IndWeight,na.rm=T),EQR=sum(EQR*IndWeight)/sum(IndWeight,na.rm=T)) %>%
mutate(EQRcheck=sumEQR/sumWt)
|
516ce5cb454407a4940c8487b22e31fe4c51a110
|
27e565d716f3df661b6b042c8b7d9b3e96e13f4d
|
/lib/R/clean_descr_col.R
|
124781bd959afc4ef70b7ffd5122b15d2e7fd6af
|
[] |
no_license
|
webbedfeet/USRDS2015
|
7ab1104fc50d2d19d96b766429686251558cb704
|
bcda102f430f5c0f6af32915f2582ac9448490e8
|
refs/heads/master
| 2021-05-20T18:09:40.406445
| 2020-02-28T21:21:08
| 2020-02-28T21:21:08
| 95,592,749
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 103
|
r
|
clean_descr_col.R
|
clean_descr_col <- function(x){
u <- unique(x)
for(n in u){
x[x==n][-1] <- ''
}
return(x)
}
|
ac870089c4f0c92ecb463a42ed0eb9038d7d4240
|
b4c0c88b54ffd8772bcc6a2f4cb3ad5c6eb1bd30
|
/coin_table.R
|
94ffbe9b0c0d54f495abcb171a7c8ef9eafa3f32
|
[] |
no_license
|
bluefoxr/COINrDev
|
4b7ef140e7af1597e535f38b3619d2e7dd0ce934
|
7c78352f13e50a877d81880b47085f3e544c3076
|
refs/heads/main
| 2023-01-30T13:57:03.441180
| 2020-12-16T19:48:43
| 2020-12-16T19:48:43
| 306,949,698
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,533
|
r
|
coin_table.R
|
#' Generate table to visualise results
#'
#' Uses the reactable package to build interactive tables
#'
#' @param country The country/unit to build the doc for.
#'
#' @examples
#'
#' @return Tables
#'
#' @export
coin_table <- function(COINobj){
# get data and reverse so that index is first
tabledata <- rev(COINobj$data$data_aggregated)
sticky_style <- list(position = "sticky", left = 0, background = "#fff", zIndex = 1,
borderRight = "1px solid #eee")
# to do: make a function which loops over columns of the data frame
# for each col, it adds to a list using the coldef function.
# hopefully should be able to subst whole list into the reactable function.
# basically what I did here below, but then with looping over cols.
# lapply probably a good bet.
coldefs <- list(Index = colDef(
style = function(value) {
normalized <- (value - min(tabledata$Index)) / (max(tabledata$Index) - min(tabledata$Index))
color <- orange_pal(normalized)
list(background = color)
}
))
reactable(tabledata,
defaultSorted = "Index", defaultSortOrder = "desc",
#groupBy = "Group_GDP",
columns = coldefs
# ),
# Code_country = colDef(
# style = sticky_style,
# headerStyle = sticky_style
# )
)
}
orange_pal <- function(x) rgb(colorRamp(c("#ffe4cc", "#ffb54d"))(x), maxColorValue = 255)
#}
|
db9c310bd53e37ec0829530b19565d659fae3298
|
fd2d3a6815f76b43d983cff88fb870419bee0cfe
|
/man/isOverlap.Rd
|
2e2c39c7f83afa5c719c28b59fa93131e5cbf69a
|
[] |
no_license
|
PatrickEslick/CWQkitr
|
15c20b8f94a6156071037647f622869620912956
|
fcf767047f838d048c32bc537fddeb1f1c7612fe
|
refs/heads/master
| 2020-06-13T16:00:31.381308
| 2019-12-11T22:16:21
| 2019-12-11T22:16:21
| 187,235,755
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 555
|
rd
|
isOverlap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/corrections_grades_gaps.R
\name{isOverlap}
\alias{isOverlap}
\title{Determine whether two intervals overlap}
\usage{
isOverlap(start1, end1, start2, end2)
}
\arguments{
\item{start1}{the start of the first interval}
\item{end1}{the end of the first interval}
\item{start2}{the start of the second interval}
\item{end2}{the end of the second interval}
}
\value{
a logical indicating whether the two intervals overlap
}
\description{
Determine whether two intervals overlap
}
|
164e7f9572bfe43cf7c442a572e030b5abbc2504
|
94b1616a96ce7e3386d95dcbbdb8531e736164f4
|
/R-prog/ProgrammingAssignment1/pollutantmean.R
|
7ff87aab6ccb7949aa50dfb300c260ed5fe44124
|
[] |
no_license
|
ronnyma/DataSciSpec
|
babe1ef19bbdf3afd542e74987162eb71134764c
|
f4aebcaec38ecb3d141322854f6874fb213c3dd0
|
refs/heads/master
| 2020-06-01T16:00:27.657438
| 2014-12-21T16:28:19
| 2014-12-21T16:28:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 687
|
r
|
pollutantmean.R
|
intToFile <- function( num ) {
paste( sprintf( "%03d", num ), ".csv", sep='' )
}
pollutantmean <- function( directory, pollutant, id = 1:332 ) {
alldata <- c()
# go to working dir
setwd( "/home/ronnyma/development/DataSciSpec/R-prog/Assignment1" )
# Determine which files to read
files <- intToFile( id )
# Merge all files into one data frame
for( f in files ) {
f <- paste(directory, f, sep="/")
tmp <- read.csv(f, header=TRUE)
alldata <- rbind(tmp,alldata)
}
mean( alldata[[pollutant]], na.rm=TRUE )
}
|
817657d9728c1c50a5cb368b351ee1379873eed2
|
8dc49fa3abdb6462cfb5019f731d1c9fe994ba2c
|
/R/wiki.R
|
43bf30f2ddd57257163a398849aa539fd3983b91
|
[
"MIT"
] |
permissive
|
ropensci/wikitaxa
|
afe1679d406e32e935aeddb77fdfbafe95139d45
|
263de52d2dcef8762fe458894744cb183cbed86f
|
refs/heads/master
| 2023-05-24T02:58:05.108537
| 2023-01-10T17:45:58
| 2023-01-10T17:45:58
| 61,314,596
| 19
| 4
|
NOASSERTION
| 2018-10-19T19:11:11
| 2016-06-16T17:56:58
|
R
|
UTF-8
|
R
| false
| false
| 3,472
|
r
|
wiki.R
|
#' Wikidata taxonomy data
#'
#' @export
#' @param x (character) a taxonomic name
#' @param property (character) a property id, e.g., P486
#' @param ... curl options passed on to `httr::GET()`
#' @param language (character) two letter language code
#' @param limit (integer) records to return. Default: 10
#' @return `wt_data` searches Wikidata, and returns a list with elements:
#' \itemize{
#' \item labels - data.frame with columns: language, value
#' \item descriptions - data.frame with columns: language, value
#' \item aliases - data.frame with columns: language, value
#' \item sitelinks - data.frame with columns: site, title
#' \item claims - data.frame with columns: claims, property_value,
#' property_description, value (comma separted values in string)
#' }
#'
#' `wt_data_id` gets the Wikidata ID for the searched term, and
#' returns the ID as character
#'
#' @details Note that `wt_data` can take a while to run since when fetching
#' claims it has to do so one at a time for each claim
#'
#' You can search things other than taxonomic names with `wt_data` if you
#' like
#' @examples \dontrun{
#' # search by taxon name
#' # wt_data("Mimulus alsinoides")
#'
#' # choose which properties to return
#' wt_data(x="Mimulus foliatus", property = c("P846", "P815"))
#'
#' # get a taxonomic identifier
#' wt_data_id("Mimulus foliatus")
#' # the id can be passed directly to wt_data()
#' # wt_data(wt_data_id("Mimulus foliatus"))
#' }
wt_data <- function(x, property = NULL, ...) {
UseMethod("wt_data")
}
#' @export
wt_data.wiki_id <- function(x, property = NULL, ...) {
data_wiki(x, property = property, ...)
}
#' @export
wt_data.default <- function(x, property = NULL, ...) {
x <- WikidataR::find_item(search_term = x, ...)
if (length(x) == 0) stop("no results found", call. = FALSE)
data_wiki(x[[1]]$id, property = property, ...)
}
#' @export
#' @rdname wt_data
wt_data_id <- function(x, language = "en", limit = 10, ...) {
x <- WikidataR::find_item(search_term = x, language = language,
limit = limit, ...)
x <- if (length(x) == 0) NA else x[[1]]$id
structure(x, class = "wiki_id")
}
data_wiki <- function(x, property = NULL, ...) {
xx <- WikidataR::get_item(x, ...)
if (is.null(property)) {
claims <- create_claims(xx[[1]]$claims)
} else{
cl <- Filter(function(x) x$mainsnak$property %in% property, xx[[1]]$claims)
if (length(cl) == 0) stop("No matching properties", call. = FALSE)
claims <- create_claims(cl)
}
list(
labels = dt_df(xx[[1]]$labels),
descriptions = dt_df(xx[[1]]$descriptions),
aliases = dt_df(xx[[1]]$aliases),
sitelinks = dt_df(lapply(xx[[1]]$sitelinks, function(x)
x[names(x) %in% c('site', 'title')])),
claims = dt_df(claims)
)
}
fetch_property <- function(x) {
tmp <- WikidataR::get_property(x)
list(
property_value = tmp[[1]]$labels$en$value,
property_description = tmp[[1]]$descriptions$en$value
)
}
create_claims <- function(x) {
lapply(x, function(z) {
ff <- c(
property = paste0(unique(z$mainsnak$property), collapse = ","),
fetch_property(unique(z$mainsnak$property)),
value = {
if (inherits(z$mainsnak$datavalue$value, "data.frame")) {
paste0(z$mainsnak$datavalue$value$`numeric-id`, collapse = ",")
} else {
paste0(z$mainsnak$datavalue$value, collapse = ",")
}
}
)
ff[vapply(ff, is.null, logical(1))] <- NA
ff
})
}
|
2a269406213e9291009e20a4877fe97aed715d12
|
e2f2a73b7acf00f738450670a5f15117fb76fec2
|
/R/analise.R
|
cd98ba0ab360eeaab231b729b1b7982663a0919a
|
[] |
no_license
|
dadoscope/lulalivre
|
481e59728c7177985cb80f8f647f158b3df02042
|
76df4b2bacec0ff2f4a5e50acb0493cdb2156b67
|
refs/heads/master
| 2021-08-08T01:02:08.719661
| 2020-09-19T09:22:21
| 2020-09-19T09:22:21
| 220,492,124
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,121
|
r
|
analise.R
|
library(tidyverse)
library(lubridate)
setwd("/Users/isiscosta/RScript/lulalivre")
files <- system("ls data/*.rds",intern=TRUE)
df <- data.frame()
for(f in files){
termo <- unlist(strsplit(f,"_"))[2]
lang <- unlist(strsplit(f,"_"))[3]
aux_df <- readRDS(f)
aux_df$termo <- rep(termo,nrow(aux_df))
df <- rbind(df, aux_df)
}
################# figures
setwd("/Users/isiscosta/RScript/lulalivre/figures")
p0 <- df %>% group_by(lang) %>% summarise(total = sum(retweet_count)) %>% ggplot(aes(x = reorder(lang, total), y = total, fill = lang)) + geom_bar(stat = "identity") + theme_bw()+coord_flip()+labs(title = "Número de retuítes contendo a palavra 'Lula'", y = "Número de Número de Retuítes", x = "Idiomas")
p1 <- df %>% group_by(screen_name) %>% mutate(total = sum(retweet_count)) %>% ungroup() %>% group_by(screen_name, lang, total) %>% summarise(parcial = sum(retweet_count)) %>% arrange(total) %>% tail(50) %>% ggplot(aes(x = reorder(screen_name, total), y = parcial, fill = lang)) + geom_bar(stat = "identity") + theme_bw()+coord_flip()+labs(title = "Número de retuítes contendo a palavra 'Lula'", y = "Número de Retuítes", x = "Perfis do Twitter")
p2 <- df %>% filter(lang != "pt") %>% group_by(screen_name) %>% mutate(total = sum(retweet_count)) %>% ungroup() %>% group_by(screen_name, lang, total) %>% summarise(parcial = sum(retweet_count)) %>% arrange(total) %>% tail(20) %>% ggplot(aes(x = reorder(screen_name, total), y = parcial)) + geom_bar(stat = "identity") + theme_bw()+coord_flip()+labs(title = "Número de retuítes contendo a palavra 'Lula'", y = "Número de Retuítes", x = "Perfis do Twitter")
p3 <- df %>% filter(lang == "pt") %>% group_by(screen_name) %>% mutate(total = sum(retweet_count)) %>% ungroup() %>% group_by(screen_name, lang, total) %>% summarise(parcial = sum(retweet_count)) %>% arrange(total) %>% tail(20) %>% ggplot(aes(x = reorder(screen_name, total), y = parcial)) + geom_bar(stat = "identity") + theme_bw()+coord_flip()+labs(title = "Número de retuítes contendo a palavra 'Lula' - Português", y = "Número de Retuítes", x = "Perfis do Twitter")
p4 <- df %>% filter(lang == "es") %>% group_by(screen_name) %>% mutate(total = sum(retweet_count)) %>% ungroup() %>% group_by(screen_name, lang, total) %>% summarise(parcial = sum(retweet_count)) %>% arrange(total) %>% tail(20) %>% ggplot(aes(x = reorder(screen_name, total), y = parcial)) + geom_bar(stat = "identity") + theme_bw()+coord_flip()+labs(title = "Número de retuítes contendo a palavra 'Lula' - Espanhol", y = "Número de Retuítes", x = "Perfis do Twitter")
p5 <- df %>% filter(lang == "en") %>% group_by(screen_name) %>% mutate(total = sum(retweet_count)) %>% ungroup() %>% group_by(screen_name, lang, total) %>% summarise(parcial = sum(retweet_count)) %>% arrange(total) %>% tail(20) %>% ggplot(aes(x = reorder(screen_name, total), y = parcial)) + geom_bar(stat = "identity") + theme_bw()+coord_flip()+labs(title = "Número de retuítes contendo a palavra 'Lula' - Inglês", y = "Número de Retuítes", x = "Perfis do Twitter")
p6 <- df %>% filter(lang == "fr") %>% group_by(screen_name) %>% mutate(total = sum(retweet_count)) %>% ungroup() %>% group_by(screen_name, lang, total) %>% summarise(parcial = sum(retweet_count)) %>% arrange(total) %>% tail(20) %>% ggplot(aes(x = reorder(screen_name, total), y = parcial)) + geom_bar(stat = "identity") + theme_bw()+coord_flip()+labs(title = "Número de retuítes contendo a palavra 'Lula' - Francês", x = "Número de Retuítes", y = "Perfis do Twitter")
png("ret_por_idioma.png",width=3200,height=1800,res=300)
print(p0)
dev.off()
png("ret_por_usuario_por_idioma.png",width=3200,height=1800,res=300)
print(p1)
dev.off()
png("ret_por_usuario_no_pt.png",width=3200,height=1800,res=300)
print(p2)
dev.off()
png("ret_por_usuario_pt.png",width=3200,height=1800,res=300)
print(p3)
dev.off()
png("ret_por_usuario_es.png",width=3200,height=1800,res=300)
print(p4)
dev.off()
png("ret_por_usuario_en.png",width=3200,height=1800,res=300)
print(p5)
dev.off()
png("ret_por_usuario_fr.png",width=3200,height=1800,res=300)
print(p6)
dev.off()
|
d32893dbf09d094410e435e48da5bcbf64d834df
|
145c4690c0c34318c809fe43ce17eb0cc915d062
|
/HW0.R
|
3b0ddcf2c0825ba86b4dfaee58c01fd5b6302621
|
[] |
no_license
|
gopalmenon/Probabilistic-Modeling-HW0
|
16ac944c64c433c709c5fca68daed1c3b94fd6fc
|
dc8dfc1dd3ff38ea5016030379d39553a06cd95b
|
refs/heads/master
| 2021-05-12T09:22:08.025979
| 2018-01-23T05:03:55
| 2018-01-23T05:03:55
| 117,317,622
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,528
|
r
|
HW0.R
|
# Return the pth quantile for distribution of Y
get_Y_quantile <- function(quantile_value, exponential_rate) {
return(sqrt(-1* log(1 - quantile_value) / exponential_rate))
}
# Random number generation function
generate_random_number <- function(how_many, exponential_rate) {
# Uniform distribution between 0 and 1
uniform_distribution = runif(how_many, min=0, max=1)
# Declare an array to store the random numbers from the distribution of Y
ramdoms_from_exp_distr = numeric(how_many)
# For each number in the uniform distribution find the quantile of Y
for (unif_distr_counter in 1:length(uniform_distribution)) {
ramdoms_from_exp_distr[unif_distr_counter] = get_Y_quantile(uniform_distribution[unif_distr_counter], exponential_rate)
}
return(ramdoms_from_exp_distr)
}
# Return probability density of Y
y_probability_density <- function(real_number, exponential_rate) {
return (2*real_number*exponential_rate*exp(-1 * exponential_rate * real_number^2))
}
#Generate 10,000 realizations of the random variable Y with λ= 2
realizations_needed = 10000
exponential_rate_lambda = 2
random_numbers = generate_random_number(how_many = realizations_needed, exponential_rate = exponential_rate_lambda)
# Plot a histogram of these numbers, using the hist function with option freq = FALSE.
hist(random_numbers, freq=FALSE, main= "Random Number Generation", xlab="Random Number", ylab="Density", col="blue")
# Use the lines command to plot the pdf of Y on top.
number_sequence = seq(0,2,.001)
y_probability_density_trend = y_probability_density(real_number = number_sequence, exponential_rate = exponential_rate_lambda)
length(y_probability_density_trend)
lines(number_sequence, y_probability_density_trend, lty=1, lwd=3, col="red")
legend("topright", legen=c("Random Number Density", "PDF of function Y"), col=c("blue", "red"), lwd=3, lty=1)
## Compute the sample mean and variance of your 10,000 realizations
mean(random_numbers)
var(random_numbers)
## Generate 20 realizations from Y with lambda= 2
new_realizations_needed = 20
random_numbers = generate_random_number(how_many = new_realizations_needed, exponential_rate = exponential_rate_lambda)
random_numbers = sort(random_numbers)
## Compute the likelihood distribution
log_likelihood_distribution = numeric(length(random_numbers))
lambda_values = seq(0,10,.01)
# Compute the log likelihood for at each lamda point
lambda_value_counter = 0
for (lambda_value in lambda_values) {
lambda_value_counter = lambda_value_counter + 1
log_likelihood = 0
# Compute log likelihood at the point corresponding to the random number
for(realization_counter in 1 : new_realizations_needed) {
log_likelihood = log_likelihood + y_probability_density(real_number = random_numbers[realization_counter], exponential_rate = lambda_values[lambda_value_counter])
}
log_likelihood_distribution[lambda_value_counter] = log_likelihood
}
# Plot the likelihood function
plot(lambda_values, log_likelihood_distribution, type='l', lwd=3, col="red", main= "Log Likelihood Plot", xlab="Lambda", ylab="Log Likelihood")
## Maximum likelihood estimate for lambda
random_number_distributions_squared = random_numbers^2
lambda_hat = new_realizations_needed / sum(random_number_distributions_squared)
## Draw a vertical line at the computed value of maximum likelihood
abline(v = lambda_hat, col='blue', lwd = 3, lty = 2)
legend("bottomright", legen=c("Log Likelihood Plot", "MLE Estimate"), col=c("red", "blue"), lwd=3, lty=1)
|
4c102dfc53006f18229b4a456a8e309c35f6ad6d
|
f646afb72c41940b1d31dec7ed33a68f87aa6b72
|
/man/qCal.Rd
|
b1c9e95444cbebaab4bd364a3d82d293cc103759
|
[] |
no_license
|
ahb108/rcarbon
|
4a9272513d6b6d904ff72c8e32ab0063f91acea9
|
995e5a947ff8f84bd904765df66275bf1f7d4223
|
refs/heads/master
| 2023-07-21T17:13:45.291818
| 2023-07-07T19:19:43
| 2023-07-07T19:19:43
| 73,347,130
| 44
| 32
| null | 2021-07-07T16:32:05
| 2016-11-10T04:28:18
|
R
|
UTF-8
|
R
| false
| true
| 607
|
rd
|
qCal.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calibration.R
\name{qCal}
\alias{qCal}
\title{Computes the quantile date of calibrated dates}
\usage{
qCal(x, p = 0.5)
}
\arguments{
\item{x}{A \code{CalDates} class object.}
\item{p}{A numeric value of probability. Default is 0.5 (median).}
}
\value{
A vector of quantile dates in cal BP
}
\description{
Function for generating a vector quantile calibrated dates from a \code{CalDates} class object.
}
\examples{
x <- calibrate(c(3050,2950),c(20,20))
qCal(x,p=0.2)
}
\seealso{
\code{\link{calibrate}}, \code{\link{barCodes}}
}
|
392cb98cc854c2820e69968bf2867dd7aca86ae8
|
e0577625c1b2d02dc7ba7fac3a6a0083573a1669
|
/man/decisionTable.Rd
|
2b1b5fedb78879f8f075ff872c04ea007bc62328
|
[] |
no_license
|
albhasan/RoughSetKnowledgeReduction
|
516ac345757a11d2703f1fc54f47fadd30045002
|
0f0d909d9cde587519c41e50d12a721acf3aa55e
|
refs/heads/master
| 2016-09-05T20:57:09.358622
| 2014-12-18T08:58:13
| 2014-12-18T08:58:13
| 15,021,715
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 784
|
rd
|
decisionTable.Rd
|
\name{decisionTable}
\alias{decisionTable}
\title{DECISION TABLE}
\description{User friendly constructor of an instance of the class Decision Table.}
\usage{
decisionTable(theDecisionTable)
}
\arguments{
\item{theDecisionTable}{A numeric matrix representing a decision table}
}
\value{It returns a Decision Table object.}
\references{Pawlak, Zdzislaw 1991 \emph{Rough Sets: Theoretical Aspects of Reasoning About Data} Dordrecht: Kluwer Academic Publishing.}
\author{Alber Sanchez \email{alber.sanchez@uni-muenster.de}}
\seealso{
\code{\link{DecisionTable-class}}
}
\examples{
exampleMatrix1 <- matrix(c(1,0,2,1,1,2,2,0,0,1,0,1,0,2,1,
1,2,1,0,0,2,0,1,1,2,1,1,2,0,1,1,0,0,2,1,2,1,1,2,1),ncol = 5)
dt <- decisionTable(exampleMatrix1)
}
\keyword{logic}
\keyword{rough}
\keyword{set}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.