blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ec7aafd7632a3984ac481ec47fea0b8977c672c5
|
306e3e5c3afeb3af6a5ed7171f6cc886b7df00b4
|
/run_analysis.R
|
7abe3be55be648e84133fb35095788314aabf33b
|
[] |
no_license
|
lsablake/GettingCleaningData
|
0577076dc288d048b178351e6fd846be2d15102a
|
c9ea16be0fc3536c5203ace32de98a090323566c
|
refs/heads/master
| 2021-01-14T07:55:10.982049
| 2017-03-21T22:52:19
| 2017-03-21T22:52:19
| 81,877,261
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,100
|
r
|
run_analysis.R
|
#setwd("C:/Users/Logan/MyRProgram")
#---------------------------------------------------------------------------------------------------
#1. Download and extract raw datasets from source (unnecessary to repeat if datasets are unchanged).
if(!file.exists("./data")) {
dir.create("./data")}
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileURL, destfile = "Dataset.zip")
unzip("Dataset.zip", overwrite = TRUE, exdir = "./data")
dateDownloaded <- date()
#---------------------------------------------------------------------------------------------------
#2. Read relevant datasets into R environment
# Read "activity" dataset into R
actLabelFile <- "./data/UCI HAR Dataset/activity_labels.txt"
activity_labels <- read.table(actLabelFile, quote = "")
# Read "features" data into R
featuresFile <- "./data/UCI HAR Dataset/features.txt"
features <- read.table(file=featuresFile, header = FALSE, quote = "")
# Read "test" datasets into R
X_testFile <- "./data/UCI HAR Dataset/test/X_test.txt"
X_test <- read.table(X_testFile, quote = "")
y_testFile <- "./data/UCI HAR Dataset/test/y_test.txt"
y_test <- read.table(y_testFile, quote = "")
testsubjFile <- "./data/UCI HAR Dataset/test/subject_test.txt"
subject_test <- read.table(testsubjFile, quote = "")
# Read training datasets into R
X_trainFile <- "./data/UCI HAR Dataset/train/X_train.txt"
X_train <- read.table(X_trainFile, quote = "")
y_trainFile <- "./data/UCI HAR Dataset/train/y_train.txt"
y_train <- read.table(y_trainFile, quote = "")
trainsubjFile <- "./data/UCI HAR Dataset/train/subject_train.txt"
subject_train <- read.table(trainsubjFile, quote = "")
#---------------------------------------------------------------------------------------------------
#3. Merge the training and the test features datasets to create one data set
# (excludes the 'subject' data sets at this point.)
library(dplyr)
total_data <- bind_rows(X_test, X_train) #combine test and train datasets by rows
names(total_data) <- features[,2] #assign column names to the merged dataset
#---------------------------------------------------------------------------------------------------
#4. Extract only the measurements on the mean and standard deviation for each measurement.
MeanDevData <- total_data[,grepl("[Mm]ean|[Ss]td()", names(total_data))]
#select only the 'mean' and 'std()' variables from the total_data dataset
#---------------------------------------------------------------------------------------------------
#5. Use descriptive activity names to name the activities in the data set
# Add the "activityNum" vector as the first column in the MeanDevData dataset
activityNum <- bind_rows(y_test, y_train)
MeanDevData <- bind_cols(activityNum,MeanDevData)
#replace activity numbers with descriptive activity labels
activity_labels[,2] <- as.character(activity_labels[,2])
MeanDevData$V1 <- as.character(MeanDevData$V1)
for(i in seq_along(activity_labels[,1])) {
MeanDevData$V1[MeanDevData$V1==as.character(i)] <- activity_labels[i,2]
}
#---------------------------------------------------------------------------------------------------
#6. Appropriately label the data set with descriptive variable names.
library(data.table)
#rename the activity variable from "V1" to "Activity"
setnames(MeanDevData, "V1", "Activity") # {data.table package}
#replace ambiguous abbreviations with more complete descriptors.
#remove unneccesary symbolic notations. Standardize format.
names(MeanDevData) <- gsub("Acc", "Accelerometer", names(MeanDevData))
names(MeanDevData) <- gsub("Gyro", "Gyroscope", names(MeanDevData))
names(MeanDevData) <- gsub("BodyBody", "Body", names(MeanDevData))
names(MeanDevData) <- gsub("Mag", "Magnitude", names(MeanDevData))
names(MeanDevData) <- gsub("^t", "Time", names(MeanDevData))
names(MeanDevData) <- gsub("-std|-STD", "STD", names(MeanDevData))
names(MeanDevData) <- gsub("^f","Frequency", names(MeanDevData))
names(MeanDevData) <- gsub("freq","Freq", names(MeanDevData), ignore.case = FALSE)
names(MeanDevData) <- gsub("angle", "Angle", names(MeanDevData), ignore.case = FALSE)
names(MeanDevData) <- gsub("gravity", "Gravity", names(MeanDevData),ignore.case = FALSE )
names(MeanDevData) <- gsub("\\(\\)", "", names(MeanDevData))
names(MeanDevData) <- gsub("-[Mm]ean", "Mean", names(MeanDevData), ignore.case = FALSE)
names(MeanDevData) <- gsub("tBody", "TimeBody", names(MeanDevData))
#---------------------------------------------------------------------------------------------------
#7. From the data set in step 4, create a second, independent tidy data set
# with the average of each variable for each activity and each subject.
library(reshape2)
subjectsData <- bind_rows(subject_test, subject_train)
#combine test and training subjects data (not necessary before this point)
MeanDevData2 <- bind_cols(subjectsData, MeanDevData)
#append the subjectsData dataset to the MeanDeveData dataset
setnames(MeanDevData2, "V1", "Subjects") # {data.table package}
#Create descriptive variable name for newly appended data
MeanDevData2 <- MeanDevData2[order(MeanDevData2$Subjects,MeanDevData2$Activity),]
#Order rows by subect and activity for more efficient evaluation
MeanDevMolt <- melt(MeanDevData2, id =c("Subjects", "Activity"),
measure.vars = names(MeanDevData2[,3:88]))
#Melt the dataset by identifier variables "Subjects" and "Activity". All remaining measure variables
#will be stacked together in 1 column, while leaving these identifier variables in place.
#The result is a "molten" dataset ready to be "cast".
SummaryData <- dcast(MeanDevMolt, formula = Subjects + Activity ~ variable, mean)
#Reshape data into tidy dataframe via dcast function. This provides the mean for each measurement
#by Subject and Activity.
write.table(SummaryData, file = "tidydata.txt", row.names = FALSE)
#write output file txt file for review.
#End of file.
|
3d86cd41ffaed7b5e82e001333fbdcaac678d2f5
|
719500684fceaf0a7a80ce663e9cf07802e10b9a
|
/R/write-fwf.r
|
0f10fd40d536240263ecea7a97b3752eba4d30f6
|
[] |
no_license
|
pbreheny/breheny
|
964baf0670a9eb4975946eae66772d47d9affd11
|
3e15bb78a769616adb49ea50c800d026f48ef8e7
|
refs/heads/master
| 2023-08-09T18:34:35.865871
| 2023-08-01T15:38:54
| 2023-08-01T15:38:54
| 82,972,930
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,610
|
r
|
write-fwf.r
|
#' Generate fixed width file in R
#'
#' https://gist.github.com/haozhu233/28d1309b58431f4929f78243054f1f58
#'
#' @param dt The data to be printed
#' @param width Either a single number or a vector of widths
#' @param con Connection, as in `writeLines()`; default is `stdout()`.
#' @param align "l", "r" or something like "lrl" for left, right, left.
#' @param na What to print in places of missing values; default: "NA"
#' @param col.names Print column names? Default: TRUE
#'
#' @examples
#' dt <- data.frame(a = 1:3, b = NA, c = c('a', 'b', 'c'))
#' write_fwf(dt, width = c(4, 4, 3))
#' write_fwf(dt, 5)
#'
#' X <- matrix(LETTERS[1:9], 3, 3, dimnames=list(1:3, paste0('V', 1:3)))
#' write_fwf(X, 6)
#' @export
write_fwf = function(dt, width, con=stdout(), align = "l", na = "NA", col.names=TRUE) {
if (!inherits(dt, 'data.frame')) dt <- as.data.frame(dt)
fct_col = which(sapply(dt, is.factor))
if (length(fct_col) > 0) {
for (i in fct_col) {
dt[[i]] <- as.character(dt[[i]])
}
}
dt[is.na(dt)] = na
n_col = ncol(dt)
align = unlist(strsplit(align, ""))
align = as.character(factor(align, c("l", "r"), c("-", "")))
if (n_col != 1) {
if (length(width) == 1) width = rep(width, n_col)
if (length(align) == 1) align = rep(align, n_col)
}
sptf_fmt = paste0(
paste0("%", align, width, "s"), collapse = ""
)
tbl_content = do.call(sprintf, c(fmt = sptf_fmt, dt))
tbl_header = do.call(sprintf, c(list(sptf_fmt), names(dt)))
out <- if (col.names) c(tbl_header, tbl_content) else tbl_content
writeLines(out, con)
}
|
c2f4cef290a0d413f44a2a23806e5c00bcad2a88
|
4b77c231c94281c8b111ba762ca60693b460c278
|
/lab1/ex3.R
|
3a7b11cb62954b8a6ddb089de045a62d53ae88b5
|
[] |
no_license
|
kkosiorowska/statistical-lab
|
657a6c019ea2186b949f6bcbdad8dd0be0766af6
|
1ae367377087ea38144b3407cda204db7094b344
|
refs/heads/master
| 2022-04-06T20:37:38.400913
| 2020-03-06T10:39:05
| 2020-03-06T10:39:05
| 244,583,632
| 0
| 0
| null | null | null | null |
IBM852
|
R
| false
| false
| 1,012
|
r
|
ex3.R
|
r <- 0.05
rr <- 1 + r / 12
K <- 300000
L <- 20
N <- 12*L
n <- 1:N
rataKredytu <- K * rr ^ N * (rr - 1) / (rr ^ N - 1)
zadluzenie <- K * (rr ^ N - rr ^ n) / (rr ^ N - 1)
odsetki <- K * (rr ^ N - rr ^ ( n - 1)) / (rr ^ N - 1) * (rr - 1)
rataKapitalu <- rataKredytu - odsetki
kredyt <- cbind(rataKapitalu, odsetki, rataKredytu, zadluzenie) # laczy kolumnowo dwie macierze
class(kredyt)
head(kredyt, 10)
tail(kredyt, 10)
dim(kredyt)
print("Wiersze od 100 do 125")
print(kredyt[100:125,])
print("Pierwsze 20 wierszow")
print(head(kredyt, 20))
print("Ostatnie 30 wierszˇw")
print(tail(kredyt, 30))
print("Wiersze od 20 do 30 i od 50 do 60")
print(kredyt[c(20:30, 50:60),])
print("Co dziesieta rate")
print(kredyt[seq(10, to=dim(kredyt)[1], by=10),])
ratyKapitaluSum <- sum(kredyt[,1])
odsetkiSum <- sum(kredyt[,2])
ratyKredytuSum <- sum(kredyt[,3])
data.frame(ratyKapitaluSum, odsetkiSum, ratyKredytuSum)
Sum <- ratyKapitaluSum + odsetkiSum + ratyKredytuSum
when <- which(rataKapitalu > odsetki)[1]
|
0784d3c597d65cc0cfc77cf66b6ca187dd621a3f
|
16beab4e9d61e113858cdb1fad1c09cc2ad03d26
|
/community_based_features.R
|
4f348f0d1711f5b6e7dfb68ae926322fc6cb09b8
|
[] |
no_license
|
jiunnguo/rstyle
|
ddced1de69eecdce7699db1fe4cd0054e57f8a27
|
48557a9d1a7fbc006b5608f7df1f1781eccf9bce
|
refs/heads/master
| 2020-09-11T10:49:52.683371
| 2019-07-12T08:48:23
| 2019-07-12T08:48:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,921
|
r
|
community_based_features.R
|
require(tidyverse)
require(igraph)
require(rex)
require(datasets)
require(dplyr)
require(purrr)
require(stringr)
require(iterators)
pkgs <- readRDS("pkgs_functions_with_syntax_feature.RDS")
comm <- readRDS("cran_community_20190518.RDS")
style_regexes <- list(
"alllowercase" = rex(start, one_or_more(rex(one_of(lower, digit))), end),
"ALLUPPERCASE" = rex(start, one_or_more(rex(one_of(upper, digit))), end),
"UpperCamelCase" = rex(start, upper, zero_or_more(alnum), end),
"lowerCamelCase" = rex(start, lower, zero_or_more(alnum), end),
"snake_case" = rex(start, one_or_more(rex(one_of(lower, digit))), zero_or_more("_", one_or_more(rex(one_of(lower, digit)))), end),
"dotted.case" = rex(start, one_or_more(rex(one_of(lower, digit))), zero_or_more(dot, one_or_more(rex(one_of(lower, digit)))), end)
)
conv_style <- function(x, style_regexes) {
x <- x[!is.na(x) & !is.null(x)]
styles <- map_chr(x, match_function_style, style_regexes = style_regexes)
}
match_function_style <- function(x, style_regexes) {
res <- map_lgl(style_regexes, ~ str_detect(x, .))
if (sum(res) == 0) {
return("other")
}
names(style_regexes)[min(which(res))]
}
get_target_pkgs <- function(x) {
membership(comm)[membership(comm) == x] %>% names -> target_pkgs
return(target_pkgs)
}
get_feature_table_from_pkgs <- function(target_pkgs) {
feature_table <- pkgs %>% filter(pub_year <= 2018) %>%
filter(pkg_name %in% target_pkgs) %>%
group_by(pkg_name) %>%
top_n(1, wt = pub_year) %>%
ungroup %>% select(function_feat) %>%
pull %>% map("result") %>% Filter(Negate(is.null), .) %>%
map_dfr(function(x) x) %>%
summarise_at(vars(fx_assign:fx_tab), sum) %>% t
return(feature_table)
}
get_naming_feature_table_from_pkgs <-function(target_pkgs){
naming_features_table <- pkgs %>% filter(pub_year <= 2018) %>%
filter(pkg_name %in% target_pkgs) %>%
group_by(pkg_name) %>%
top_n(1, wt = pub_year) %>%
ungroup %>% select(function_feat) %>%
pull %>% map("result") %>% Filter(Negate(is.null), .) %>%
map_dfr(function(x) x) %>%
mutate(styles = map(fx_name, conv_style, style_regexes = style_regexes)) %>%
summarise(alllower = sum(unlist(styles) == "alllowercase"),
allupper = sum(unlist(styles) == "ALLUPPERCASE"),
upcamel = sum(unlist(styles) == "UpperCamelCase"),
lowcamel = sum(unlist(styles) == "lowerCamelCase"),
snake = sum(unlist(styles) == "snake_case"),
dotted = sum(unlist(styles) == "dotted.case"),
other = sum(unlist(styles) == "other"))
naming_features_table_df <- data.frame(naming_features_table)
return(as.data.frame(t(naming_features_table_df)))
}
### build two fx feature tables for all communities
### df_total: counting
### df_ratio_total: ratio
community_ids <- list(15,9,4,60,14,35,1,36,25,39,23,19,31,8,64,73,18,20,120)
#community_ids <- list(20,120)
iter_community_ids <- iter(community_ids)
column_name <- c(
"Rstudio-related packages","base","image plotting",
"RCpp","GPS and GEO","ML","public health and Statistics",
"text analysis","social network analysis",
"mix of graphics and anomaly detection",
"graph and its visualization","genetics",
"finance","insurance and actuary","numerical optimization",
"sparse matrix","Java","time, date, and money","neuronal science")
# column_name <- c(
# "time, date, and money","neuronal science")
i <- 0
while (i < length(community_ids)) {
target_pkgs <- nextElem(iter_community_ids) %>% get_target_pkgs
feature_table <- target_pkgs %>%
get_feature_table_from_pkgs
naming_features_table <- target_pkgs %>%
get_naming_feature_table_from_pkgs
if (i==0){
df_total <- feature_table
df_ratio_total<- prop.table(feature_table)
df_naming_total <- naming_features_table
df_naming_ratio_total<- prop.table(naming_features_table)
}
else{
df_total <- cbind(df_total, feature_table)
df_ratio_total<- cbind(df_ratio_total, prop.table(feature_table))
df_naming_total <- cbind(df_naming_total, naming_features_table)
df_naming_ratio_total<- cbind(df_naming_ratio_total, prop.table(naming_features_table))
}
i=i+1
}
colnames(df_total) <- column_name
colnames(df_ratio_total) <- column_name
colnames(df_naming_total) <- column_name
colnames(df_naming_ratio_total) <- column_name
View(df_total)
View(df_ratio_total)
View(df_naming_total)
View(df_naming_ratio_total)
df_total %>% saveRDS('community_df_total.RDS')
df_ratio_total %>% saveRDS('community_df_ratio_total.RDS')
df_naming_total %>% saveRDS('community_df_naming_total.RDS')
df_naming_ratio_total %>% saveRDS('community_df_naming_ratio_total.RDS')
###
|
5e5e6e25aec773f41d0fb071a8ab283abff96b71
|
256d3f44b60010812de16c9cfc8d361e8a7c14ed
|
/plot2.R
|
d116958f057d84e588337b980d9523b68c090770
|
[] |
no_license
|
mrcherve/ExData_Plotting1
|
b0a9f486c87d5a09c70fe27b08378bf937fe586f
|
2b3df09db4b1eee77d78fcb591ca08a598925e11
|
refs/heads/master
| 2020-09-10T20:29:17.754545
| 2019-11-15T02:34:45
| 2019-11-15T02:34:45
| 221,826,861
| 0
| 0
| null | 2019-11-15T02:24:45
| 2019-11-15T02:24:43
| null |
UTF-8
|
R
| false
| false
| 939
|
r
|
plot2.R
|
data_url <- "C:/Users/mikael.herve/Documents/R/household_power_consumption.txt"
class <- c(Voltage="numeric", Global_active_power="numeric",Global_intensity="numeric",Sub_metering_1="numeric",Sub_metering_2="numeric",Sub_metering_3="numeric",Global_reactive_power="numeric")
origin <-read.table(data_url, header=TRUE,sep=";",dec=".", stringsAsFactors=FALSE, na.strings = "?",colClasses = class)
data<-subset(origin,origin$Date=="1/2/2007" | origin$Date=="2/2/2007")
#data$Date <- as.Date(data$Date, format="%d/%m/%Y")
#origin$Date <- as.Date(origin$Date, format="%d/%m/%Y")
#data<-subset(origin,origin$Date=="2007-2-1" | origin$Date=="2007-2-2")
#data$Time <- as.POSIXct(data$Time,format="%H:%M:%S")
data$datetime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
png("plot2.png",480,480)
with(data,plot(datetime, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)"))
dev.off()
|
3b72cf4bd289fb9199385c4b17f86e38be1ae54f
|
81a2fa3228451179b12779bb0149398cbfc8e9b1
|
/man/dot-insp1dimByClustering.Rd
|
161a1f587972d0e7ddeb358e0345acdd1f2e5cd2
|
[] |
no_license
|
cran/wrMisc
|
c91af4f8d93ad081acef04877fb7558d7de3ffa2
|
22edd90bd9c2e320e7c2302460266a81d1961e31
|
refs/heads/master
| 2023-08-16T21:47:39.481176
| 2023-08-10T18:00:02
| 2023-08-10T19:30:33
| 236,959,523
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,210
|
rd
|
dot-insp1dimByClustering.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/searchLinesAtGivenSlope.R
\name{.insp1dimByClustering}
\alias{.insp1dimByClustering}
\title{Segment (1-dim vector) 'dat' into clusters}
\usage{
.insp1dimByClustering(
dat,
automClu = TRUE,
cluChar = TRUE,
silent = FALSE,
debug = FALSE,
callFrom = NULL
)
}
\arguments{
\item{dat}{matrix or data.frame, main input}
\item{automClu}{(logical) run atomatic clustering}
\item{cluChar}{(logical) to display cluster characteristics}
\item{silent}{(logical) suppress messages}
\item{debug}{(logical) additional messages for debugging}
\item{callFrom}{(character) allow easier tracking of messages produced}
}
\value{
This function returns clustering (class index) or (if 'cluChar'=TRUE) list with clustering and cluster-characteristics
}
\description{
This function allows aegmenting (1-dim vector) 'dat' into clusters.
If 'automClu=TRUE ..' first try automatic clustering, if too few clusters, run km with length(dat)^0.3 clusters
This function requires the package NbClust to be installed.
}
\examples{
set.seed(2016); dat1 <- matrix(c(runif(200)+rep(1:10,20)),ncol=10)
}
\seealso{
\code{\link{searchLinesAtGivenSlope}}
}
|
6a14412a082ab32593dc501b392d8ffde5bd9e47
|
74d8c1f83aa5cc608eecb91e5282b1c93cfb87db
|
/ShinyApps/teamStats/server.R
|
24d8c9f45d734168e04af1a2fd68f4b654cf7179
|
[] |
no_license
|
rjmorgan4/585-project
|
6eb053e213537944797cbd68fb6c18018c064360
|
23cd32eb882d824040bb8eae1094bb37cd71e3f8
|
refs/heads/master
| 2021-01-18T23:48:37.882000
| 2017-05-01T16:03:59
| 2017-05-01T16:03:59
| 87,127,431
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 985
|
r
|
server.R
|
###Team Stats
## Server
library(shiny)
library(ggplot2)
library(plotly)
##Read in a data set that is saved to the same folder
teamStats <- read.csv("All_Schools_Team_Stats_Post2000.csv")
shinyServer(function(input, output) {
#Put reactive something here for manipulating the data
filteredData <- reactive({
teamStats %>%
filter(Conference == input$conf) %>%filter(Type == input$Type) %>% filter(Season %in% input$Seasons[1] :input$Seasons[2])
})
plot<- reactive({
graph <- filteredData() %>% ggplot(aes(x=filteredData()[[input$xvariable]],y = filteredData()[[input$yvariable]], color=Team, type=Season)) +
geom_point()+
ggtitle(paste(input$conf," Teams' ",input$yvariable," vs ",input$xvariable," for ",input$Seasons[1]," through", input$Seasons[2]))+
xlab(input$xvariable)+
ylab(input$yvariable)
})
output$graph = renderPlotly(
ggplotly(plot())
)
})
|
86b780d9d3316d35053495e47d7f4d5c53ba1e7a
|
58d06ff7d5c1e12e4033f2024e837b723951a7f7
|
/TM_Swades.R
|
c97652df5fdc698f4b6a1dd35f09d76c17396888
|
[] |
no_license
|
karthiknr2/Karthik
|
a7471280d31164db19afc0343cf448eebf22cb1e
|
ddca7b72fe4074992686f78e4781df07b7855dbd
|
refs/heads/master
| 2023-01-07T08:54:12.702385
| 2020-10-19T19:03:29
| 2020-10-19T19:03:29
| 257,606,334
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,937
|
r
|
TM_Swades.R
|
library(rvest)
library(XML)
library(magrittr)
library(rJava)
library(tm)
library(SnowballC)
library(scales)
library(wordcloud)
library(RWeka)
library(textir)
library(data.table)
library(stringr)
library(slam)
library(ggplot2)
library(syuzhet)
library(reshape2)
library(dplyr)
library(lubridate)
library(topicmodels)
swades <- NULL
rev <- NULL
url <- "https://www.imdb.com/title/tt0367110/reviews?ref_=tt_ql_3"
murl <- read_html(as.character(paste(url,1, sep = "")))
rev <- murl %>% html_nodes(".show-more__control") %>% html_text()
swades <- c(swades,rev)
write.table(swades,"swades.txt")
getwd()
swades_movie <- readLines(file.choose())
str(swades_movie)
summary(swades_movie)
swades_corpus <- Corpus(VectorSource(swades_movie))
swades_corpus <- tm_map(swades_corpus,tolower)
swades_corpus <- tm_map(swades_corpus,removePunctuation)
swades_corpus <- tm_map(swades_corpus,removeNumbers)
stopwords <- readLines(file.choose())
swades_corpus <- tm_map(swades_corpus,removeWords,stopwords)
swades_corpus <- tm_map(swades_corpus,removeWords,stopwords("english"))
swades_corpus <- tm_map(swades_corpus,stripWhitespace)
inspect(swades_corpus[1:5])
s.dtm <- DocumentTermMatrix(swades_corpus)
s.dtm <- as.matrix(s.dtm)
table(s.dtm)
dim(s.dtm)
row_totals <- apply(s.dtm,1,sum)
s.new <- s.dtm[row_totals>0,]
class(s.new)
lda <- LDA(s.new,10)
lterm <- terms(lda,1)
lterm
tops <- terms(lda)
tb <- table(names(tops),unlist(tops))
tb <- as.data.frame.matrix(tb)
tb
cls <- hclust(dist(tb),method = 'ward.D2')
par(family ='HiraKakuProN-W3')
plot(cls)
a <- colSums(s.new)
a <- subset(a,a>=5)
barplot(a,las=2,color=rainbow(50))
wordcloud(word=names(a),freq = a,random.order = FALSE,max.words = 150)
senti_score <- get_nrc_sentiment(swades_movie)
head(senti_score)
barplot(colSums(senti_score),las=2,ylab="count",color=rainbow(10),main="bar plots of sentimental analysis")
|
e2b5b9205320a06702e50a2295468a49aa42a44c
|
366397e9b2bf247a1f2be266b6ec3ccc092ed288
|
/man/cc_severe.Rd
|
69023239a185c27a602a7fedeca3bd83712e6e4d
|
[] |
no_license
|
cran/edgedata
|
c7f5356b20551f899a0990062ce5df570390e872
|
25157bf66a805e34014cbf0e400f3aa7ba1f92c0
|
refs/heads/master
| 2023-03-09T02:34:20.016139
| 2021-02-26T21:00:09
| 2021-02-26T21:00:09
| 258,765,469
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 852
|
rd
|
cc_severe.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cc_int.R
\docType{data}
\name{cc_severe}
\alias{cc_severe}
\title{HCC to severity group mapping - Table 6}
\format{
An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with
8 rows and 3 columns
\describe{
\item{cc}{Hierarchical condition category (Currently includes some G*)}
\item{var}{Variable mapped to (severe_v3)}
\item{desc}{Short description of the variable}
}
}
\source{
Data import and cleaning at:
\url{https://github.com/EeethB/edgedata/tree/main/data-raw}
}
\usage{
cc_severe
}
\description{
A dataset containing the mapping from HCC to severe status.
}
\seealso{
Other Severe interaction tables:
\code{\link{cc_int_h}},
\code{\link{cc_int_m}}
}
\concept{Severe interaction tables}
\keyword{datasets}
|
4e385e3d52ab74ee044f4b95bcf0e21128531f44
|
ab85697ca3f211c4bd5e2b4f0086426fa3839298
|
/drills/plot-drills/ggplots/ggplots.r
|
f1d9c835ab244b859f245761c3b39c0e424ae665
|
[] |
no_license
|
hadley/stat405-resources
|
f248dc99e0b523f340ff3eb6f56a5e9ba39524ad
|
35fed042554e8fb29b49bf28144e98c563b565ab
|
refs/heads/master
| 2016-09-06T04:09:32.598802
| 2011-01-12T13:20:35
| 2011-01-12T13:20:35
| 288,798
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,172
|
r
|
ggplots.r
|
library(maps)
library(ggplot2)
feb13 <- read.csv("delays/delays-feb-13-2007.csv", header = T, stringsAsFactors = F)
# 1. Texas Plane flights
texas <- map_data("state", "texas")
texmap <- c(
geom_polygon(data = texas, colour = "grey70", fill = NA),
scale_x_continuous("", limits = c(-107, -93)),
scale_y_continuous("", limits = c(25.9, 37))
)
ggplot(feb13, aes(long, lat)) +
texmap +
geom_point(aes(size = ntot, colour = ndelay / ntot)) +
geom_text(aes(label = origin),
data = subset(feb13, ndelay >= 100),
size = 4, hjust = 1.5) +
scale_area("total flights", to = c(1, 8)) +
scale_colour_gradient("percent delayed")
ggsave(filename = "texmap1.png", width = 6, height = 4, dpi = 72)
# 2. Airlines point map
ggplot(feb13, aes(ntot, ncancel)) +
geom_point(data = subset(feb13, origin == "IAH"), size = 7,
colour = alpha("red", 0.5)) +
geom_point() +
geom_text(data = subset(feb13, origin == "IAH"),
aes(label = origin), hjust = -.5) +
geom_smooth(method = "lm", se = T) +
labs(y = "Number of flights cancelled",
x = "Total number of flights")
ggsave(filename = "airports2.png", width = 6, height = 4, dpi = 72)
# 3. class names comparison
names <- read.csv("baby-names-data/baby-names.csv", header = T, stringsAsFactors = F)
class <- c("Rakesh", "Luis", "Yanli", "Yen-yin", "Sarah", "Delma", "Chandra", "Elizabeth", "Kim-chi", "Amanda", "Thomas", "Caroline", "Da", "Christine", "Debra", "Christopher", "Justin", "Lisa", "Meng", "Emilian","Rachel", "Lu", "Casper", "Jingjing", "Chengyong", "Ruo", "Zhongyu")
class_names <- subset(names, name %in% class)
class_names <- ddply(class_names, c("name", "year"), summarise, percent = sum(percent) / length(percent))
ggplot(class_names, aes(year, percent)) +
geom_area(aes(group = name, fill = name)) +
geom_text(aes(year, percent,
label = "*some names did not appear in the dataset"),
data = data.frame(year = 1925, percent = 0.10), size = 3)
ggsave(filename = "classnames3.png", width = 6, height = 4, dpi = 72)
# 4. names boxplots
ggplot(class_names, aes(year, percent)) +
geom_boxplot(aes(group = round_any(year, 5, floor))) +
geom_smooth(se = F, size = 1) +
geom_text(aes(year, percent,
label = "*blue line is a smoothed mean"), colour = "blue",
data = data.frame(year = 1906, percent = 0.029), size = 3) +
geom_text(aes(year, percent,
label = "Popularity of class names as a group"),
data = data.frame(year = 1911, percent = 0.03), size = 3)
ggsave(filename = "boxplots4.png", width = 6, height = 4, dpi = 72)
# 5a.
ggplot(diamonds, aes(clarity)) +
geom_bar(aes(fill = cut), position = "dodge")
ggsave(filename = "dodge5a.png", width = 6, height = 4, dpi = 72)
# 5b.
ggplot(diamonds, aes(clarity)) +
geom_bar(aes(fill = cut)) +
facet_grid(cut ~ .)
ggsave(filename = "facet5b.png", width = 6, height = 4, dpi = 72)
# batting data set
b <- read.csv("batting.csv", header = T, stringsAsFactors = F)
# 6. Tiled density games by year
# Note: won't work unless contour = F
ggplot(b, aes(year, g)) +
stat_density2d(geom = "tile", aes(fill = ..density..), contour = F) +
scale_fill_gradient(low = "black", high = "white")
ggsave(filename = "battile6.png", width = 6, height = 4, dpi = 72)
# 7. Homeruns Yankees vs. Red Sox
library(plyr)
yankees <- subset(b, team == "NYA")
yankees <- transform(yankees, team = "Yankees")
boston <- subset(b, team == "BOS")
boston <- transform(boston, team = "Red Sox")
yb <- rbind(yankees, boston)
yb_runs <- ddply(yb, c("year", "team"), summarise,
total_runs = sum(r, na.rm = T))
ggplot(yb_runs, aes(year, total_runs)) +
geom_smooth(aes(colour = team)) +
scale_colour_manual(value = c("red", "blue")) +
geom_vline(aes(xintercept = c(1918, 2004))) +
geom_text(aes(x,y, label = "Curse Begins"),
data = data.frame(x = 1917, y = 400), size = 3, hjust = 0,
vjust = 0, angle = 90) +
geom_text(aes(x,y, label = "Curse Ends"),
data = data.frame(x = 2003, y = 400), size = 3, hjust = 0,
vjust = 0, angle = 90)
ggsave(filename = "hrline7.png", width = 6, height = 4, dpi = 72)
# 8. Homeruns with bars
yb_homeruns <- ddply(yb, c("year", "team"), summarise,
total_hr = sum(hr, na.rm = T))
ggplot(yb_homeruns, aes(year, total_hr)) +
geom_bar(aes(fill = team), stat = "identity", position = "dodge") +
scale_fill_manual(value = alpha(c("red", "blue"), 0.4)) +
geom_smooth(aes(colour = team)) +
scale_colour_manual(value = c("red", "blue"))
ggsave(filename = "hrbars8.png", width = 6, height = 4, dpi = 72)
# 9. Homeruns area
ggplot(yb_homeruns, aes(year, total_hr)) +
geom_area(aes(fill = team), position = "identity") +
scale_fill_manual(value = alpha(c("red", "blue"), 0.4)) +
geom_vline(aes(xintercept = 1918)) +
geom_text(aes(x,y, label = "Curse Begins"),
data = data.frame(x = 1919, y = -10), size = 3, hjust = 0,
vjust = 0)
ggsave(filename = "hrarea9.png", width = 6, height = 4, dpi = 72)
# 10. Homeruns boxplot facetted by curse year
yb_curse <- subset(yb, year > 1918 & year <= 2004)
yb_curse <- transform(yb_curse, curse = "Curse years")
yb_noncurse <- subset(yb, year <= 1918 | year > 2004)
yb_noncurse <- transform(yb_noncurse, curse = "Non-curse Years")
yb <- rbind(yb_curse, yb_noncurse)
ggplot(yb, aes(team, hr / r)) +
geom_boxplot() +
facet_grid( . ~ curse)
ggsave(filename = "hrcurse10.png", width = 6, height = 4, dpi = 72)
# players data set
p <- read.csv("players.csv", header = T, stringsAsFactors = F)
# 11. World map of players
library(maps)
world_map <- map_data("world")
names(world_map)[5] <- "country"
p_country <- ddply(p, "country", summarise, total = length(country))
p_map <- merge(p_country, world_map, by = "country", all = T)
p_map <- p_map[order(p_map$order), ]
ggplot(p_map, aes(long, lat)) +
geom_polygon(aes(group = group, fill = log(total)), colour = "grey60", size = .3) +
ylim(-55, 85)
ggsave(filename = "playermap11.png", width = 6, height = 4, dpi = 72)
# 12. Area map of states
bp <- merge(b, p, by = "id")
bp_country <- ddply(bp, "country", summarise, total = length(country))
bp_country <- bp_country[order(-bp_country$total), ]
bp_10 <- subset(bp, country %in% bp_country[2:11, 1])
ggplot(bp_10, aes(year)) +
geom_area(aes(y = ..count.., fill = country), stat = "bin", binwidth = 10, position = "stack") +
opts(title = "10 most represented foreign countries in combined dataset") +
xlab("year (bin = 10 years)")
ggsave(filename = "statefill12.png", width = 6, height = 4, dpi = 72)
# 13.Right vs. left handers
bp_trimmed <- subset(bp, bats != "")
ggplot(bp_trimmed, aes(throws)) +
geom_bar() +
facet_grid (. ~ bats) +
opts(title = "Hand preference by batting preference")
ggsave(filename = "hand13.png", width = 6, height = 4, dpi = 72)
# 14. Strikeouts by height
ggplot(bp, aes(height, so)) +
geom_jitter(position = position_jitter(width = 5), alpha = 0.05) +
xlim(60, 85)
ggsave(filename = "soheight14.png", width = 6, height = 4, dpi = 72)
# 15. labelled home runs
ggplot(subset(bp, hr > 60), aes(weight, hr)) +
geom_point() +
geom_smooth(method = "lm", se = F) +
geom_text(aes(label = paste(first, last, sep = " ")), hjust = -0.1) +
xlim(203, 233) +
opts(title = "Weight vs. performance among record holders")
ggsave(filename = "hrweight15.png", width = 6, height = 4, dpi = 72)
# delays data set
feb13 <- read.csv("delays/delays-feb-13-2007.csv",
header = T, stringsAsFactors = F)
# 16. US Map
lower48 <- subset(feb13, long > -130)
lower48 <- subset(lower48, lat > 20)
ggplot(subset(lower48, ntot >= 100), aes(long, lat)) +
borders("state") +
geom_point(aes(size = ndelay, colour = log(avgdelay)))
ggsave(filename = "airmap16.png", width = 6, height = 4, dpi = 72)
# 17. cancelled by longitude
ggplot(feb13, aes(long, cperc)) +
geom_point(aes(colour = cperc, size = ntot)) +
geom_text(data = subset(feb13, cperc > 0.4 & long < -100),
aes(label = origin), hjust = 1.2, angle = -45,
colour = "orange")
ggsave(filename = "longdelay17.png", width = 6, height = 4, dpi = 72)
# 18. Number of flights by longitude
ggplot(feb13, aes(long, ntot)) +
geom_area(aes(y = ..density..), stat = "density", alpha = 0.5) +
geom_vline(xintercept = c(-118, -87)) +
geom_text(aes(x,y, label = "Los Angeles"),
data = data.frame(x = - 119, y = 0), size = 4, hjust = 0,
vjust = 0, angle = 90) +
geom_text(aes(x,y, label = "Chicago"),
data = data.frame(x = -88, y = 0), size = 4, hjust = 0,
vjust = 0, angle = 90)
ggsave(filename = "longtot18.png", width = 6, height = 4, dpi = 72)
# 19. Number of flights by airport
main <- subset(feb13, ntot > 400)
ggplot(main, aes(origin, ntot)) +
geom_bar(aes(fill = cperc)) +
opts(axis.text.x = theme_text(angle = 90, hjust = 1))
ggsave(filename = "topairports19.png", width = 6, height = 4, dpi = 72)
# diamonds data set
# 20. pie chart by cut
ggplot(diamonds, aes(x = "", fill = cut)) +
geom_bar(width = 1) +
coord_polar(theta = "y")
ggsave(filename = "pie20.png", width = 6, height = 4, dpi = 72)
|
200e42635c81a23c0a24278b9a1a81b00d104f04
|
e04c0d423fde5be2567111b6983cc91e63c93232
|
/R/databricks_execute.R
|
93400929ae27a198074efd53dc641999065c1d08
|
[] |
no_license
|
RafiKurlansik/bricksteR
|
b42b3b3556ef3394b7e7801568a8e228083ad336
|
9199ab34dda462601186c25cf8655483f0bbe408
|
refs/heads/master
| 2022-10-28T14:35:21.875280
| 2022-10-06T15:36:30
| 2022-10-06T15:36:30
| 227,508,502
| 25
| 6
| null | 2021-07-15T11:59:22
| 2019-12-12T03:04:36
|
R
|
UTF-8
|
R
| false
| false
| 5,679
|
r
|
databricks_execute.R
|
#'
#' Remote execution of commands on a Databricks cluster.
#'
#' This function sends commands to an execution context on an existing
#' Databricks cluster via REST API. It requires a context_id from
#' \code{create_execution_context}. Commands must be compatible with the
#' language of the execution context - 'r', 'python', 'scala', or 'sql'.
#' Will attempt to return a data.frame but if the execution hasn't finished will return
#' the status of execution. If your command does not return a data.frame output may
#' vary considerably, or fail.
#'
#' The API endpoint for creating the execution context is is '1.2/commands/execute'.
#' For all details on API calls please see the official documentation at
#' \url{https://docs.databricks.com/dev-tools/api/latest/}.
#'
#' @param command A string containing commands for remote execution on Databricks.
#' @param context The list generated by \code{create_execution_context}
#' @param verbose If TRUE, will print the API response to the console. Defaults to
#' FALSE.
#' @param ... Additional options to be passed to \code{data.table::fread} which is used to
#' parse the API response.
#' @return A list with two components:
#' \itemize{
#' \item \emph{response} - The full API response.
#' \item \emph{data} - The data as a data.frame.
#' }
#' @examples
#' # Using netrc
#' context <- create_execution_context(workspace = "https://eastus2.azuredatabricks.net",
#' language = "r",
#' cluster_id = "1017-337483-jars232")
#'
#' ## Use the context to execute a command on Databricks
#' command <- "iris[1, ]"
#' result <- databricks_execute(command, context)
#'
#' ## Access dataframe
#' result$data
#'
databricks_execute <- function(command, context, verbose = F, ...) {
payload <- paste0('{
"language": "', context$language, '",
"clusterId": "', context$cluster_id, '",
"contextId": "', context$context_id, '",
"command": "', command, '"
}')
## Send command via REST, using netrc for auth by default
if (is.null(context$token)) {
use_netrc <- httr::config(netrc = 1)
execute_response <- httr::with_config(use_netrc, {
httr::POST(url = paste0(workspace, "/api/1.2/commands/execute"),
httr::content_type_json(),
body = payload)
})
}
else {
## Bearer Authentication
headers <- c(
Authorization = paste("Bearer", context$token)
)
execute_response <- httr::POST(url = paste0(workspace, "/api/1.2/commands/execute"),
httr::add_headers(.headers = headers),
httr::content_type_json(),
body = payload)
}
## Extract command ID from response
command_id <- jsonlite::fromJSON(rawToChar(execute_response$content))$id
# If the command hasn't finished executing, poll the API until it has
repeat{
# Get result from status endpoint with command_id
if (is.null(context$token)) {
use_netrc <- httr::config(netrc = 1)
status_response <- httr::with_config(use_netrc, {
httr::GET(url = paste0(workspace, "/api/1.2/commands/status",
"?clusterId=", context$cluster_id,
"&contextId=", context$context_id,
"&commandId=", command_id))
})
# Share status
message(
"Command Status: ", jsonlite::fromJSON(rawToChar(status_response$content))$status
)
} else {
status_response <- httr::GET(url = paste0(workspace, "/api/1.2/commands/status",
"?clusterId=", context$cluster_id,
"&contextId=", context$context_id,
"&commandId=", command_id),
httr::add_headers(.headers = headers))
message(
"Command Status: ", jsonlite::fromJSON(rawToChar(status_response$content))$status
)
}
if (jsonlite::fromJSON(rawToChar(status_response$content))$status == "Finished") {
break
}
# If execution hasn't finished, wait a second and try again
Sys.sleep(1)
}
# could make this nested to account for both API calls used in this function
if (verbose == T) {
## Successful request message
if (status_response$status_code[1] == 200) {
message(paste0(
"Response Code: ", status_response$status_code[1],
"\nCommand Status: ", jsonlite::fromJSON(rawToChar(status_response$content))$status,
"\nCommand ID: ", command_id
))
}
## Unsuccessful request message
else {
return(message(paste0(
"Status: ", status_response$status_code[1],
"\nThe request was not successful:\n\n", jsonlite::prettify(status_response)
)))
}
}
# Try to extract HTML snippet from API response
tryCatch(
{
txt <- xml2::read_html(jsonlite::fromJSON(rawToChar(status_response$content))$results$data) %>%
rvest::html_children() %>%
xml2::xml_text()
# Convert text to data.table if returning a data frame
df <- suppressWarnings(suppressMessages(data.table::setDF(data.table::fread(txt, drop = ...))))
results <- list(response = status_response,
data = df)
results
},
error = function(e) {
cat("There was a problem parsing the results - output must be a data.frame.
Please check your code and try again.")
jsonlite::prettify(status_response)
}
)
}
|
5fc225f17f574c0305f91ff2bc53cefe1c75c9e1
|
7577bceb20befd9f54f24f1779abb7b04e3b2190
|
/R-scripts/cuffdiff.R
|
45e2fdea191e511826f0ff5a99b9aef1ba3886b3
|
[] |
no_license
|
vitaly001/G_RNASeq
|
f9be8637871a35d7b21cdf4f1eafd05374dc644c
|
1f6a0b629e083d52f60845fbef4d90b76e22bbf8
|
refs/heads/master
| 2021-08-20T00:14:38.095296
| 2017-11-27T19:15:06
| 2017-11-27T19:15:06
| 112,236,087
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,009
|
r
|
cuffdiff.R
|
setwd("/Volumes/HD3/NGS/G_RNASeq")
samples = read.csv("samples.csv", stringsAsFactors=FALSE)
genome = "/Volumes/HD3/UCSC/mm10/Sequence/WholeGenomeFasta/genome.fa"
gf = "/Volumes/HD3/NGS/G_RNASeq/Cuffmerge/merged.gtf"
rm(testfiles)
testfiles = ''
cuff_labels = ''
for(i in seq_len(nrow(samples))) {
lib = samples$conditions[i]
bamFile = file.path(lib, "accepted_hits.bam")
cuff_labels = paste0(cuff_labels, lib, sep = ",")
testfiles = paste0(testfiles, bamFile, sep = ' ')
}
# cuffdiff should run with genome.fa file and option -b
print(paste0('cuffdiff -o Cuffdiff -p 6 ', ' -L ', cuff_labels, " -b ", genome, ' -u ', gf, ' ', testfiles, ' >& com.cuffdiff.log &'))
print(testfiles)
#samples in each condition should be separated by comma, between condition should be space only
#system(paste0("nohup cuffmerge -g ", gf ," -p 6 ", " -o", " /Volumes/HD2/ngsmus/Cuffmerge"," /Volumes/HD2/ngsmus/Cufflinks/assem_GTF.txt", " >& com.cuffmerge.log &"))
|
f2659cd88e992844ec0272c7e52943fef82fa956
|
dd6e07d255641d3a33e305ffca842c4a149921a2
|
/teste_variablen_in_tibble.R
|
7d97b08db25bf17fc67fb8014615617398f8e85d
|
[] |
no_license
|
W-Ing/SolarPV
|
c507dd77702644066bf4af8e6070af32a61bbfdf
|
dba19c25ca5fefdf506830f714b77668bd485bfc
|
refs/heads/master
| 2021-09-03T14:18:10.433302
| 2018-01-09T18:53:03
| 2018-01-09T18:53:03
| 111,313,281
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 584
|
r
|
teste_variablen_in_tibble.R
|
library(tidyverse)
# library(tibbletime)
# library(lubridate)
# library(reshape2)
#require(stringr)
myfunc <- function(par){
result <- ifelse(par=="JA", 1, 0 )
return(result)
}
myfunc("N")
# get first observation for each Species in iris data -- base R
mini_iris <- iris[c(1, 51, 101), ]
# gather Sepal.Length, Sepal.Width, Petal.Length, Petal.Width
my_iris <- gather(mini_iris, key = flower_att, value = measurement,
Sepal.Length, Sepal.Width, Petal.Length, Petal.Width)
# same result but less verbose
gather(mini_iris, key = flower_att, value = measurement, -Species)
|
f153840c7b2f111f6ba55eaa5427e33d8540d989
|
57d3aae331ff9f9907800a36eaff6c0f689b4217
|
/Script files/plot_sfs.R
|
f01c51b6c3fe8bd527f953e0ff540b154b0cccdc
|
[] |
no_license
|
carolinelennartsson/PopulationGeneticsGroup6
|
ceba82ae9cb08e144355833960ed9d0173874449
|
39bcc430f1dd5701f4b98d79b9136e39649a1497
|
refs/heads/main
| 2023-03-29T20:12:25.027102
| 2021-04-09T11:54:30
| 2021-04-09T11:54:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,943
|
r
|
plot_sfs.R
|
.libPaths("~/groupdirs/SCIENCE-BIO-popgen_course-project/Group6_Simulations1/software/Rlib")
library(optparse)
# Input arguements
option_list <- list(
make_option("--path", type="character", help="path to .obs file"),
make_option("--id", type="character",
help="Id for output figures. Will return sfs_id.png"))
# Parse the arguements (You don't need to change this)
parser <- OptionParser(usage="%prog [options]", option_list=option_list)
# Read everything (You don't need to change this)
args <- parse_args(parser, positional_arguments = 0)
opt <- args$options
# This is an expample of how you call the arguement within the script
path <- as.character(strsplit(opt$path, ",")[[1]])
id <- strsplit(opt$id, ",")[[1]]
#___________________________________________________________________
obs <- read.table(path, skip = 2)
n <- sum(obs)
var_sites <- ncol(obs) - 1
norm <- sapply(obs, function(x){
x / n
})
norm <- as.data.frame(norm)
norm_means <- colMeans(norm[, c(2:var_sites)]) # start from 2 since the first has a lot bigger percentage
# Determine the standard deviations within sites
sds <- c()
for (i in var_sites) {
sds <- c(sds, sd(norm[, i]))
}
# Lower and upper boundaries for error bars
lower <- norm_means - sds / 2
upper <- norm_means + sds / 2
comb <- data.frame(norm_means, lower, upper)
rownames(comb) <- c(2:var_sites)
comb <- tibble::rownames_to_column(comb, var = "rownames")
comb$rownames <- as.numeric(comb$rownames)
# Plot
png(paste0("sfs_", id, ".png"))
ggplot2::ggplot(comb, ggplot2::aes(x = rownames, y = norm_means)) +
ggplot2::geom_bar(stat = "identity") +
ggplot2::geom_errorbar(ggplot2::aes(ymin = lower, ymax = upper), width = .3) +
ggplot2::theme_classic() +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle=90, hjust = 1, vjust = 0.5)) +
ggplot2::scale_x_discrete(limits = seq(0, ncol(norm), 5)) +
ggplot2::xlab("") +
ggplot2::ylab("Average proportion of sites")
|
96bc650e6a18d53eec410bb11614c75bb958a860
|
30babe66ab1ea3648dffce92aa6973f5a18ab8f6
|
/Going_through_chapter_4.R
|
c7365cf34ed473319654f64f13d26eb59fbf530b
|
[] |
no_license
|
annelinethomsen/ExperimentalMethods
|
a6093b6690e6fc37e126204d0ea7a49202223e10
|
d3bd69b89be8b891dfc0f7b92d75b1f6eb1cb538
|
refs/heads/master
| 2020-07-28T04:46:51.924232
| 2019-11-19T16:07:56
| 2019-11-19T16:07:56
| 209,313,759
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,061
|
r
|
Going_through_chapter_4.R
|
setwd("/Users/anne-linethomsen/Documents/R/ExperimentalMethods/Chapter 5")
install.packages("car")
install.packages("ggplot2")
install.packages("pastecs")
install.packages("psych")
library(tidyverse)
#I import the data:
festivalData <- read.delim("DownloadFestival.dat", header = TRUE)
#I tell ggplot to use my data with the observations from day1 on the x-axis:
festivalHistogram <- ggplot(festivalData, aes(day1))
#I tell ggplot to visualise the data in a hisotgram:
festivalHistogram + geom_histogram()
#I change the width of the bins and to add labels:
festivalHistogram + geom_histogram() + geom_histogram(binwidth = 0.4) + labs(x = "Hygiene (Day 1 of Festival)", y = "Frequency")
#I now want the gender to be at the x-axis:
festivalBoxplot <- ggplot(festivalData, aes(gender, day1))
#I create a boxplot to see
festivalBoxplot + geom_boxplot() + labs(x = "Gender", y = "Hygiene (Day 1 of Festival)")
#I try to find the outlier:
festivalData<-festivalData[order(festivalData$day1),]
#How do I change my data?
#Importing the new datafile without the outlier:
festivalData2 <- read.delim("DownloadFestival(No Outlier).dat", header = TRUE)
#Informing ggplot about my values:
density <- ggplot(festivalData2, aes(day1))
#Creating a density and adding labels:
density + geom_density() + labs(x = "Hygiene (Day 1 of Festival)", y = "Density Estimate")
#Importing ChickFlick data:
chickFlick <- read.delim("ChickFlick.dat", header = TRUE)
#Data into ggplot:
bar <- ggplot(chickFlick, aes(film, arousal))
#I had to have the Hmisc package to do the next part, so I installed it:
install.packages("Hmisc")
#I want to make a graph of a summary of my graph, so I do this:
#When you write fun.y=mean you tell ggplot to find the mean, and when you write geom = bar, you tell it how you want the mean to be displayed.
bar + stat_summary(fun.y = mean, geom = "bar", fill = "White", colour = "Black") +
stat_summary(fun.data = mean_cl_normal, geom = "pointrange") +
labs(x = "Film", y = "Mean Arousal")
|
f790d5f85ecb1a1ebdd22855205525d8bbe5ea36
|
d83cf027f2836a2e8b8a4c91a14f617b32d02016
|
/Plot2.R
|
4e704feec0d68ed1e3bf2e8635e451e8ee606f60
|
[] |
no_license
|
ProgramLearner7/ExData_Plotting1
|
3e7792bb7cf503f418a520db61388ab36bb864ea
|
9ef64c94227728f10de8f1df8d5fd3dfb301c29e
|
refs/heads/master
| 2020-03-28T19:19:12.216308
| 2018-09-16T16:41:53
| 2018-09-16T16:41:53
| 148,964,767
| 0
| 0
| null | 2018-09-16T04:58:11
| 2018-09-16T04:58:11
| null |
UTF-8
|
R
| false
| false
| 951
|
r
|
Plot2.R
|
library(dplyr)
library(readr)
library(lubridate)
household_power_consumption = read_delim("household_power_consumption.txt",
";", escape_double = FALSE, col_types = cols(Date = col_date(format = "%d/%m/%Y"),
Time = col_time(format = "%H:%M:%S")),
trim_ws = TRUE)
#select the data from the dates 2007-02-01 and 2007-02-02
filteredDate = filter(household_power_consumption, Date >= "2007-02-01", Date <= "2007-02-02")
#plot2.R
filteredDate_v2 = mutate(filteredDate, DateTime = as.POSIXct(paste(Date, Time))) %>%
filter(Global_active_power != "?", Weekday != "?")
png(filename = "plot2.png", width = 480, height = 480)
par(mfrow = c(1,1))
with(filteredDate_v2, plot(DateTime, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)"))
dev.off()
|
8cbaf6f27b4d97176186f575874bcc36215186d2
|
a1c59394a2b42d6756c2b9564697db714b27fe49
|
/R/CST_Calibration.R
|
e973c4d84dae5973e4f02653ea92747d13c98c1d
|
[] |
no_license
|
cran/CSTools
|
e06a58f876e86e6140af5106a6abb9a6afa7282e
|
6c68758da7a0dadc020b48cf99bf211c86498d12
|
refs/heads/master
| 2023-06-26T01:20:08.946781
| 2023-06-06T13:10:05
| 2023-06-06T13:10:05
| 183,258,656
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 41,046
|
r
|
CST_Calibration.R
|
#'Forecast Calibration
#'
#'@author Verónica Torralba, \email{veronica.torralba@bsc.es}
#'@author Bert Van Schaeybroeck, \email{bertvs@meteo.be}
#'@description Five types of member-by-member bias correction can be performed.
#'The \code{"bias"} method corrects the bias only, the \code{"evmos"} method
#'applies a variance inflation technique to ensure the correction of the bias
#'and the correspondence of variance between forecast and observation (Van
#'Schaeybroeck and Vannitsem, 2011). The ensemble calibration methods
#'\code{"mse_min"} and \code{"crps_min"} correct the bias, the overall forecast
#'variance and the ensemble spread as described in Doblas-Reyes et al. (2005)
#'and Van Schaeybroeck and Vannitsem (2015), respectively. While the
#'\code{"mse_min"} method minimizes a constrained mean-squared error using three
#'parameters, the \code{"crps_min"} method features four parameters and
#'minimizes the Continuous Ranked Probability Score (CRPS). The
#'\code{"rpc-based"} method adjusts the forecast variance ensuring that the
#'ratio of predictable components (RPC) is equal to one, as in Eade et al.
#'(2014). It is equivalent to function \code{Calibration} but for objects
#'of class \code{s2dv_cube}.
#'
#'@param exp An object of class \code{s2dv_cube} as returned by \code{CST_Load}
#' function with at least 'sdate' and 'member' dimensions, containing the
#' seasonal hindcast experiment data in the element named \code{data}. The
#' hindcast is used to calibrate the forecast in case the forecast is provided;
#' if not, the same hindcast will be calibrated instead.
#'@param obs An object of class \code{s2dv_cube} as returned by \code{CST_Load}
#' function with at least 'sdate' dimension, containing the observed data in
#' the element named \code{$data}.
#'@param exp_cor An optional object of class \code{s2dv_cube} as returned by
#' \code{CST_Load} function with at least 'sdate' and 'member' dimensions,
#' containing the seasonal forecast experiment data in the element named
#' \code{data}. If the forecast is provided, it will be calibrated using the
#' hindcast and observations; if not, the hindcast will be calibrated instead.
#' If there is only one corrected dataset, it should not have dataset dimension.
#' If there is a corresponding corrected dataset for each 'exp' forecast, the
#' dataset dimension must have the same length as in 'exp'. The default value
#' is NULL.
#'@param cal.method A character string indicating the calibration method used,
#' can be either \code{bias}, \code{evmos}, \code{mse_min}, \code{crps_min} or
#' \code{rpc-based}. Default value is \code{mse_min}.
#'@param eval.method A character string indicating the sampling method used, it
#' can be either \code{in-sample} or \code{leave-one-out}. Default value is the
#' \code{leave-one-out} cross validation. In case the forecast is provided, any
#' chosen eval.method is over-ruled and a third option is used.
#'@param multi.model A boolean that is used only for the \code{mse_min}
#' method. If multi-model ensembles or ensembles of different sizes are used,
#' it must be set to \code{TRUE}. By default it is \code{FALSE}. Differences
#' between the two approaches are generally small but may become large when
#' using small ensemble sizes. Using multi.model when the calibration method is
#' \code{bias}, \code{evmos} or \code{crps_min} will not affect the result.
#'@param na.fill A boolean that indicates what happens in case calibration is
#' not possible or will yield unreliable results. This happens when three or
#' less forecasts-observation pairs are available to perform the training phase
#' of the calibration. By default \code{na.fill} is set to true such that NA
#' values will be returned. If \code{na.fill} is set to false, the uncorrected
#' data will be returned.
#'@param na.rm A boolean that indicates whether to remove the NA values or not.
#' The default value is \code{TRUE}. See Details section for further
#' information about its use and compatibility with \code{na.fill}.
#'@param apply_to A character string that indicates whether to apply the
#' calibration to all the forecast (\code{"all"}) or only to those where the
#' correlation between the ensemble mean and the observations is statistically
#' significant (\code{"sign"}). Only useful if \code{cal.method == "rpc-based"}.
#'@param alpha A numeric value indicating the significance level for the
#' correlation test. Only useful if \code{cal.method == "rpc-based" & apply_to
#' == "sign"}.
#'@param memb_dim A character string indicating the name of the member dimension.
#' By default, it is set to 'member'.
#'@param sdate_dim A character string indicating the name of the start date
#' dimension. By default, it is set to 'sdate'.
#'@param dat_dim A character string indicating the name of dataset dimension.
#' The length of this dimension can be different between 'exp' and 'obs'.
#' The default value is NULL.
#'@param ncores An integer that indicates the number of cores for parallel
#' computations using multiApply function. The default value is one.
#'
#'@return An object of class \code{s2dv_cube} containing the calibrated
#'forecasts in the element \code{data} with the dimensions nexp, nobs and same
#'dimensions as in the 'exp' object. nexp is the number of experiment
#'(i.e., 'dat_dim' in exp), and nobs is the number of observation (i.e.,
#''dat_dim' in obs). If dat_dim is NULL, nexp and nobs are omitted. If 'exp_cor'
#'is provided the returned array will be with the same dimensions as 'exp_cor'.
#'
#'@details Both the \code{na.fill} and \code{na.rm} parameters can be used to
#'indicate how the function has to handle the NA values. The \code{na.fill}
#'parameter checks whether there are more than three forecast-observations pairs
#'to perform the computation. In case there are three or less pairs, the
#'computation is not carried out, and the value returned by the function depends
#'on the value of this parameter (either NA if \code{na.fill == TRUE} or the
#'uncorrected value if \code{na.fill == TRUE}). On the other hand, \code{na.rm}
#'is used to indicate the function whether to remove the missing values during
#'the computation of the parameters needed to perform the calibration.
#'
#'@references Doblas-Reyes F.J, Hagedorn R, Palmer T.N. The rationale behind the
#'success of multi-model ensembles in seasonal forecasting-II calibration and
#'combination. Tellus A. 2005;57:234-252. \doi{10.1111/j.1600-0870.2005.00104.x}
#'@references Eade, R., Smith, D., Scaife, A., Wallace, E., Dunstone, N.,
#'Hermanson, L., & Robinson, N. (2014). Do seasonal-to-decadal climate
#'predictions underestimate the predictability of the read world? Geophysical
#'Research Letters, 41(15), 5620-5628. \doi{10.1002/2014GL061146}
#'@references Van Schaeybroeck, B., & Vannitsem, S. (2011). Post-processing
#'through linear regression. Nonlinear Processes in Geophysics, 18(2),
#'147. \doi{10.5194/npg-18-147-2011}
#'@references Van Schaeybroeck, B., & Vannitsem, S. (2015). Ensemble
#'post-processing using member-by-member approaches: theoretical aspects.
#'Quarterly Journal of the Royal Meteorological Society, 141(688), 807-818.
#'\doi{10.1002/qj.2397}
#'
#'@seealso \code{\link{CST_Load}}
#'
#'@examples
#'# Example 1:
#'mod1 <- 1 : (1 * 3 * 4 * 5 * 6 * 7)
#'dim(mod1) <- c(dataset = 1, member = 3, sdate = 4, ftime = 5, lat = 6, lon = 7)
#'obs1 <- 1 : (1 * 1 * 4 * 5 * 6 * 7)
#'dim(obs1) <- c(dataset = 1, member = 1, sdate = 4, ftime = 5, lat = 6, lon = 7)
#'lon <- seq(0, 30, 5)
#'lat <- seq(0, 25, 5)
#'coords <- list(lat = lat, lon = lon)
#'exp <- list(data = mod1, coords = coords)
#'obs <- list(data = obs1, coords = coords)
#'attr(exp, 'class') <- 's2dv_cube'
#'attr(obs, 'class') <- 's2dv_cube'
#'a <- CST_Calibration(exp = exp, obs = obs, cal.method = "mse_min", eval.method = "in-sample")
#'
#'# Example 2:
#'mod1 <- 1 : (1 * 3 * 4 * 5 * 6 * 7)
#'mod2 <- 1 : (1 * 3 * 1 * 5 * 6 * 7)
#'dim(mod1) <- c(dataset = 1, member = 3, sdate = 4, ftime = 5, lat = 6, lon = 7)
#'dim(mod2) <- c(dataset = 1, member = 3, sdate = 1, ftime = 5, lat = 6, lon = 7)
#'obs1 <- 1 : (1 * 1 * 4 * 5 * 6 * 7)
#'dim(obs1) <- c(dataset = 1, member = 1, sdate = 4, ftime = 5, lat = 6, lon = 7)
#'lon <- seq(0, 30, 5)
#'lat <- seq(0, 25, 5)
#'coords <- list(lat = lat, lon = lon)
#'exp <- list(data = mod1, coords = coords)
#'obs <- list(data = obs1, coords = coords)
#'exp_cor <- list(data = mod2, lat = lat, lon = lon)
#'attr(exp, 'class') <- 's2dv_cube'
#'attr(obs, 'class') <- 's2dv_cube'
#'attr(exp_cor, 'class') <- 's2dv_cube'
#'a <- CST_Calibration(exp = exp, obs = obs, exp_cor = exp_cor, cal.method = "evmos")
#'
#'@importFrom s2dv InsertDim Reorder
#'@import multiApply
#'@importFrom ClimProjDiags Subset
#'@export
CST_Calibration <- function(exp, obs, exp_cor = NULL, cal.method = "mse_min",
eval.method = "leave-one-out", multi.model = FALSE,
na.fill = TRUE, na.rm = TRUE, apply_to = NULL,
alpha = NULL, memb_dim = 'member', sdate_dim = 'sdate',
dat_dim = NULL, ncores = NULL) {
# Check 's2dv_cube'
if (!inherits(exp, "s2dv_cube") || !inherits(obs, "s2dv_cube")) {
stop("Parameter 'exp' and 'obs' must be of the class 's2dv_cube'.")
}
if (!is.null(exp_cor)) {
if (!inherits(exp_cor, "s2dv_cube")) {
stop("Parameter 'exp_cor' must be of the class 's2dv_cube'.")
}
}
Calibration <- Calibration(exp = exp$data, obs = obs$data, exp_cor = exp_cor$data,
cal.method = cal.method, eval.method = eval.method,
multi.model = multi.model, na.fill = na.fill,
na.rm = na.rm, apply_to = apply_to, alpha = alpha,
memb_dim = memb_dim, sdate_dim = sdate_dim,
dat_dim = dat_dim, ncores = ncores)
if (is.null(exp_cor)) {
exp$data <- Calibration
exp$attrs$Datasets <- c(exp$attrs$Datasets, obs$attrs$Datasets)
exp$attrs$source_files <- c(exp$attrs$source_files, obs$attrs$source_files)
return(exp)
} else {
exp_cor$data <- Calibration
exp_cor$attrs$Datasets <- c(exp_cor$attrs$Datasets, exp$attrs$Datasets, obs$attrs$Datasets)
exp_cor$attrs$source_files <- c(exp_cor$attrs$source_files, exp$attrs$source_files, obs$attrs$source_files)
return(exp_cor)
}
}
#'Forecast Calibration
#'
#'@author Verónica Torralba, \email{veronica.torralba@bsc.es}
#'@author Bert Van Schaeybroeck, \email{bertvs@meteo.be}
#'@description Five types of member-by-member bias correction can be performed.
#'The \code{"bias"} method corrects the bias only, the \code{"evmos"} method
#'applies a variance inflation technique to ensure the correction of the bias
#'and the correspondence of variance between forecast and observation (Van
#'Schaeybroeck and Vannitsem, 2011). The ensemble calibration methods
#'\code{"mse_min"} and \code{"crps_min"} correct the bias, the overall forecast
#'variance and the ensemble spread as described in Doblas-Reyes et al. (2005)
#'and Van Schaeybroeck and Vannitsem (2015), respectively. While the
#'\code{"mse_min"} method minimizes a constrained mean-squared error using three
#'parameters, the \code{"crps_min"} method features four parameters and
#'minimizes the Continuous Ranked Probability Score (CRPS). The
#'\code{"rpc-based"} method adjusts the forecast variance ensuring that the
#'ratio of predictable components (RPC) is equal to one, as in Eade et al.
#'(2014). Both in-sample or our out-of-sample (leave-one-out cross
#'validation) calibration are possible.
#'
#'@param exp A multidimensional array with named dimensions (at least 'sdate'
#' and 'member') containing the seasonal hindcast experiment data. The hindcast
#' is used to calibrate the forecast in case the forecast is provided; if not,
#' the same hindcast will be calibrated instead.
#'@param obs A multidimensional array with named dimensions (at least 'sdate')
#' containing the observed data.
#'@param exp_cor An optional multidimensional array with named dimensions (at
#' least 'sdate' and 'member') containing the seasonal forecast experiment
#' data. If the forecast is provided, it will be calibrated using the hindcast
#' and observations; if not, the hindcast will be calibrated instead. If there
#' is only one corrected dataset, it should not have dataset dimension. If there
#' is a corresponding corrected dataset for each 'exp' forecast, the dataset
#' dimension must have the same length as in 'exp'. The default value is NULL.
#'@param cal.method A character string indicating the calibration method used,
#' can be either \code{bias}, \code{evmos}, \code{mse_min}, \code{crps_min}
#' or \code{rpc-based}. Default value is \code{mse_min}.
#'@param eval.method A character string indicating the sampling method used,
#' can be either \code{in-sample} or \code{leave-one-out}. Default value is
#' the \code{leave-one-out} cross validation. In case the forecast is
#' provided, any chosen eval.method is over-ruled and a third option is
#' used.
#'@param multi.model A boolean that is used only for the \code{mse_min}
#' method. If multi-model ensembles or ensembles of different sizes are used,
#' it must be set to \code{TRUE}. By default it is \code{FALSE}. Differences
#' between the two approaches are generally small but may become large when
#' using small ensemble sizes. Using multi.model when the calibration method
#' is \code{bias}, \code{evmos} or \code{crps_min} will not affect the result.
#'@param na.fill A boolean that indicates what happens in case calibration is
#' not possible or will yield unreliable results. This happens when three or
#' less forecasts-observation pairs are available to perform the training phase
#' of the calibration. By default \code{na.fill} is set to true such that NA
#' values will be returned. If \code{na.fill} is set to false, the uncorrected
#' data will be returned.
#'@param na.rm A boolean that indicates whether to remove the NA values or
#' not. The default value is \code{TRUE}.
#'@param apply_to A character string that indicates whether to apply the
#' calibration to all the forecast (\code{"all"}) or only to those where the
#' correlation between the ensemble mean and the observations is statistically
#' significant (\code{"sign"}). Only useful if \code{cal.method == "rpc-based"}.
#'@param alpha A numeric value indicating the significance level for the
#' correlation test. Only useful if \code{cal.method == "rpc-based" & apply_to ==
#' "sign"}.
#'@param memb_dim A character string indicating the name of the member
#' dimension. By default, it is set to 'member'.
#'@param sdate_dim A character string indicating the name of the start date
#' dimension. By default, it is set to 'sdate'.
#'@param dat_dim A character string indicating the name of dataset dimension.
#' The length of this dimension can be different between 'exp' and 'obs'.
#' The default value is NULL.
#'@param ncores An integer that indicates the number of cores for parallel
#' computation using multiApply function. The default value is NULL (one core).
#'
#'@return An array containing the calibrated forecasts with the dimensions
#'nexp, nobs and same dimensions as in the 'exp' array. nexp is the number of
#'experiment (i.e., 'dat_dim' in exp), and nobs is the number of observation
#'(i.e., 'dat_dim' in obs). If dat_dim is NULL, nexp and nobs are omitted.
#'If 'exp_cor' is provided the returned array will be with the same dimensions as
#''exp_cor'.
#'
#'@details Both the \code{na.fill} and \code{na.rm} parameters can be used to
#'indicate how the function has to handle the NA values. The \code{na.fill}
#'parameter checks whether there are more than three forecast-observations pairs
#'to perform the computation. In case there are three or less pairs, the
#'computation is not carried out, and the value returned by the function depends
#'on the value of this parameter (either NA if \code{na.fill == TRUE} or the
#'uncorrected value if \code{na.fill == TRUE}). On the other hand, \code{na.rm}
#'is used to indicate the function whether to remove the missing values during
#'the computation of the parameters needed to perform the calibration.
#'
#'@references Doblas-Reyes F.J, Hagedorn R, Palmer T.N. The rationale behind the
#'success of multi-model ensembles in seasonal forecasting-II calibration and
#'combination. Tellus A. 2005;57:234-252. doi:10.1111/j.1600-0870.2005.00104.x
#'@references Eade, R., Smith, D., Scaife, A., Wallace, E., Dunstone, N.,
#'Hermanson, L., & Robinson, N. (2014). Do seasonal-to-decadal climate
#'predictions underestimate the predictability of the read world? Geophysical
#'Research Letters, 41(15), 5620-5628. \doi{10.1002/2014GL061146}
#'@references Van Schaeybroeck, B., & Vannitsem, S. (2011). Post-processing
#'through linear regression. Nonlinear Processes in Geophysics, 18(2),
#'147. \doi{10.5194/npg-18-147-2011}
#'@references Van Schaeybroeck, B., & Vannitsem, S. (2015). Ensemble
#'post-processing using member-by-member approaches: theoretical aspects.
#'Quarterly Journal of the Royal Meteorological Society, 141(688), 807-818.
#'\doi{10.1002/qj.2397}
#'
#'@seealso \code{\link{CST_Load}}
#'
#'@examples
#'mod1 <- 1 : (1 * 3 * 4 * 5 * 6 * 7)
#'dim(mod1) <- c(dataset = 1, member = 3, sdate = 4, ftime = 5, lat = 6, lon = 7)
#'obs1 <- 1 : (1 * 1 * 4 * 5 * 6 * 7)
#'dim(obs1) <- c(dataset = 1, member = 1, sdate = 4, ftime = 5, lat = 6, lon = 7)
#'a <- Calibration(exp = mod1, obs = obs1)
#'
#'@importFrom s2dv InsertDim Reorder
#'@import multiApply
#'@importFrom ClimProjDiags Subset
#'@export
Calibration <- function(exp, obs, exp_cor = NULL,
cal.method = "mse_min", eval.method = "leave-one-out",
multi.model = FALSE, na.fill = TRUE,
na.rm = TRUE, apply_to = NULL, alpha = NULL,
memb_dim = 'member', sdate_dim = 'sdate', dat_dim = NULL,
ncores = NULL) {
# Check inputs
## exp, obs
if (!is.array(exp) || !is.numeric(exp)) {
stop("Parameter 'exp' must be a numeric array.")
}
if (!is.array(obs) || !is.numeric(obs)) {
stop("Parameter 'obs' must be a numeric array.")
}
expdims <- names(dim(exp))
obsdims <- names(dim(obs))
if (is.null(expdims)) {
stop("Parameter 'exp' must have dimension names.")
}
if (is.null(obsdims)) {
stop("Parameter 'obs' must have dimension names.")
}
if (any(is.na(exp))) {
warning("Parameter 'exp' contains NA values.")
}
if (any(is.na(obs))) {
warning("Parameter 'obs' contains NA values.")
}
## exp_cor
if (!is.null(exp_cor)) {
# if exp_cor is provided, it will be calibrated: "calibrate forecast instead of hindcast"
# if exp_cor is provided, eval.method is overruled (because if exp_cor is provided, the
# train data will be all data of "exp" and the evalutaion data will be all data of "exp_cor";
# no need for "leave-one-out" or "in-sample")
eval.method <- "hindcast-vs-forecast"
expcordims <- names(dim(exp_cor))
if (is.null(expcordims)) {
stop("Parameter 'exp_cor' must have dimension names.")
}
if (any(is.na(exp_cor))) {
warning("Parameter 'exp_cor' contains NA values.")
}
}
## dat_dim
if (!is.null(dat_dim)) {
if (!is.character(dat_dim) | length(dat_dim) > 1) {
stop("Parameter 'dat_dim' must be a character string.")
}
if (!dat_dim %in% names(dim(exp)) | !dat_dim %in% names(dim(obs))) {
stop("Parameter 'dat_dim' is not found in 'exp' or 'obs' dimension.",
" Set it as NULL if there is no dataset dimension.")
}
}
## sdate_dim and memb_dim
if (!is.character(sdate_dim)) {
stop("Parameter 'sdate_dim' should be a character string indicating the",
"name of the dimension where start dates are stored in 'exp'.")
}
if (length(sdate_dim) > 1) {
sdate_dim <- sdate_dim[1]
warning("Parameter 'sdate_dim' has length greater than 1 and only",
" the first element will be used.")
}
if (!is.character(memb_dim)) {
stop("Parameter 'memb_dim' should be a character string indicating the",
"name of the dimension where members are stored in 'exp'.")
}
if (length(memb_dim) > 1) {
memb_dim <- memb_dim[1]
warning("Parameter 'memb_dim' has length greater than 1 and only",
" the first element will be used.")
}
target_dims_exp <- c(memb_dim, sdate_dim, dat_dim)
target_dims_obs <- c(sdate_dim, dat_dim)
if (!all(target_dims_exp %in% expdims)) {
stop("Parameter 'exp' requires 'sdate_dim' and 'memb_dim' dimensions.")
}
if (!all(target_dims_obs %in% obsdims)) {
stop("Parameter 'obs' must have the dimension defined in sdate_dim ",
"parameter.")
}
if (memb_dim %in% obsdims) {
if (dim(obs)[memb_dim] != 1) {
warning("Parameter 'obs' has dimension 'memb_dim' with length larger",
" than 1. Only the first member dimension will be used.")
}
obs <- Subset(obs, along = memb_dim, indices = 1, drop = "selected")
}
if (!is.null(exp_cor)) {
if (!memb_dim %in% names(dim(exp_cor))) {
exp_cor <- InsertDim(exp_cor, posdim = 1, lendim = 1, name = memb_dim)
exp_cor_remove_memb <- TRUE
} else {
exp_cor_remove_memb <- FALSE
}
} else {
exp_cor_remove_memb <- FALSE
}
## exp, obs, and exp_cor (2)
name_exp <- sort(names(dim(exp)))
name_obs <- sort(names(dim(obs)))
name_exp <- name_exp[-which(name_exp == memb_dim)]
if (!is.null(dat_dim)) {
name_exp <- name_exp[-which(name_exp == dat_dim)]
name_obs <- name_obs[-which(name_obs == dat_dim)]
}
if (!identical(length(name_exp), length(name_obs)) |
!identical(dim(exp)[name_exp], dim(obs)[name_obs])) {
stop("Parameter 'exp' and 'obs' must have same length of all ",
"dimensions except 'memb_dim' and 'dat_dim'.")
}
if (!is.null(exp_cor)) {
name_exp_cor <- sort(names(dim(exp_cor)))
name_exp <- sort(names(dim(exp)))
if (!is.null(dat_dim)) {
if (dat_dim %in% expcordims) {
if (!identical(dim(exp)[dat_dim], dim(exp_cor)[dat_dim])) {
stop("If parameter 'exp_cor' has dataset dimension, it must be",
" equal to dataset dimension of 'exp'.")
}
name_exp_cor <- name_exp_cor[-which(name_exp_cor == dat_dim)]
target_dims_cor <- c(memb_dim, sdate_dim, dat_dim)
} else {
target_dims_cor <- c(memb_dim, sdate_dim)
}
} else {
target_dims_cor <- c(memb_dim, sdate_dim)
}
name_exp <- name_exp[-which(name_exp %in% target_dims_exp)]
name_exp_cor <- name_exp_cor[-which(name_exp_cor %in% target_dims_cor)]
if (!identical(length(name_exp), length(name_exp_cor)) |
!identical(dim(exp)[name_exp], dim(exp_cor)[name_exp_cor])) {
stop("Parameter 'exp' and 'exp_cor' must have the same length of ",
"all common dimensions except 'dat_dim', 'sdate_dim' and 'memb_dim'.")
}
}
## ncores
if (!is.null(ncores)) {
if (!is.numeric(ncores) | ncores %% 1 != 0 | ncores <= 0 |
length(ncores) > 1) {
stop("Parameter 'ncores' must be either NULL or a positive integer.")
}
}
## na.rm
if (!inherits(na.rm, "logical")) {
stop("Parameter 'na.rm' must be a logical value.")
}
if (length(na.rm) > 1) {
na.rm <- na.rm[1]
warning("Paramter 'na.rm' has length greater than 1, and only the fist element is used.")
}
## cal.method, apply_to, alpha
if (!any(cal.method %in% c('bias', 'evmos', 'mse_min', 'crps_min', 'rpc-based'))) {
stop("Parameter 'cal.method' must be a character string indicating the calibration method used.")
}
if (cal.method == 'rpc-based') {
if (is.null(apply_to)) {
apply_to <- 'sign'
warning("Parameter 'apply_to' cannot be NULL for 'rpc-based' method so it ",
"has been set to 'sign', as in Eade et al. (2014).")
} else if (!apply_to %in% c('all','sign')) {
stop("Parameter 'apply_to' must be either 'all' or 'sign' when 'rpc-based' ",
"method is used.")
}
if (apply_to == 'sign') {
if (is.null(alpha)) {
alpha <- 0.1
warning("Parameter 'alpha' cannot be NULL for 'rpc-based' method so it ",
"has been set to 0.1, as in Eade et al. (2014).")
} else if (!is.numeric(alpha) | alpha <= 0 | alpha >= 1) {
stop("Parameter 'alpha' must be a number between 0 and 1.")
}
}
}
## eval.method
if (!any(eval.method %in% c('in-sample', 'leave-one-out', 'hindcast-vs-forecast'))) {
stop(paste0("Parameter 'eval.method' must be a character string indicating ",
"the sampling method used ('in-sample', 'leave-one-out' or ",
"'hindcast-vs-forecast')."))
}
## multi.model
if (!inherits(multi.model, "logical")) {
stop("Parameter 'multi.model' must be a logical value.")
}
if (multi.model & !(cal.method == "mse_min")) {
warning(paste0("The 'multi.model' parameter is ignored when using the ",
"calibration method '", cal.method, "'."))
}
warning_shown <- FALSE
if (is.null(exp_cor)) {
calibrated <- Apply(data = list(exp = exp, obs = obs), dat_dim = dat_dim,
cal.method = cal.method, eval.method = eval.method, multi.model = multi.model,
na.fill = na.fill, na.rm = na.rm, apply_to = apply_to, alpha = alpha,
target_dims = list(exp = target_dims_exp, obs = target_dims_obs),
ncores = ncores, fun = .cal)$output1
} else {
calibrated <- Apply(data = list(exp = exp, obs = obs, exp_cor = exp_cor),
dat_dim = dat_dim, cal.method = cal.method, eval.method = eval.method,
multi.model = multi.model, na.fill = na.fill, na.rm = na.rm,
apply_to = apply_to, alpha = alpha,
target_dims = list(exp = target_dims_exp, obs = target_dims_obs,
exp_cor = target_dims_cor),
ncores = ncores, fun = .cal)$output1
}
if (!is.null(dat_dim)) {
pos <- match(c(names(dim(exp))[-which(names(dim(exp)) == dat_dim)], 'nexp', 'nobs'),
names(dim(calibrated)))
calibrated <- aperm(calibrated, pos)
} else {
pos <- match(c(names(dim(exp))), names(dim(calibrated)))
calibrated <- aperm(calibrated, pos)
}
if (exp_cor_remove_memb) {
dim(calibrated) <- dim(calibrated)[-which(names(dim(calibrated)) == memb_dim)]
}
return(calibrated)
}
.data.set.sufficiently.large <- function(exp, obs) {
amt.min.samples <- 3
amt.good.pts <- sum(!is.na(obs) & !apply(exp, c(2), function(x) all(is.na(x))))
return(amt.good.pts > amt.min.samples)
}
.make.eval.train.dexes <- function(eval.method, amt.points, amt.points_cor) {
if (eval.method == "leave-one-out") {
dexes.lst <- lapply(seq(1, amt.points), function(x) return(list(eval.dexes = x,
train.dexes = seq(1, amt.points)[-x])))
} else if (eval.method == "in-sample") {
dexes.lst <- list(list(eval.dexes = seq(1, amt.points),
train.dexes = seq(1, amt.points)))
} else if (eval.method == "hindcast-vs-forecast") {
dexes.lst <- list(list(eval.dexes = seq(1,amt.points_cor),
train.dexes = seq(1, amt.points)))
} else {
stop(paste0("unknown sampling method: ", eval.method))
}
return(dexes.lst)
}
.cal <- function(exp, obs, exp_cor = NULL, dat_dim = NULL, cal.method = "mse_min",
eval.method = "leave-one-out", multi.model = FALSE, na.fill = TRUE,
na.rm = TRUE, apply_to = NULL, alpha = NULL) {
# exp: [memb, sdate, (dat)]
# obs: [sdate (dat)]
# exp_cor: [memb, sdate, (dat)] or NULL
if (is.null(dat_dim)) {
nexp <- 1
nobs <- 1
exp <- InsertDim(exp, posdim = 3, lendim = 1, name = 'dataset')
obs <- InsertDim(obs, posdim = 2, lendim = 1, name = 'dataset')
} else {
nexp <- as.numeric(dim(exp)[dat_dim])
nobs <- as.numeric(dim(obs)[dat_dim])
}
if (is.null(exp_cor)) {
# generate a copy of exp so that the same function can run for both cases
exp_cor <- exp
cor_dat_dim <- TRUE
} else {
if (length(dim(exp_cor)) == 2) { # exp_cor: [memb, sdate]
cor_dat_dim <- FALSE
} else { # exp_cor: [memb, sdate, dat]
cor_dat_dim <- TRUE
}
}
expdims <- dim(exp)
expdims_cor <- dim(exp_cor)
memb <- expdims[1] # memb
sdate <- expdims[2] # sdate
sdate_cor <- expdims_cor[2]
var.cor.fc <- array(dim = c(dim(exp_cor)[1:2], nexp = nexp, nobs = nobs))
for (i in 1:nexp) {
for (j in 1:nobs) {
if (!.data.set.sufficiently.large(exp = exp[, , i, drop = FALSE],
obs = obs[, j, drop = FALSE])) {
if (!na.fill) {
exp_subset <- exp[, , i]
var.cor.fc[, , i, j] <- exp_subset
if (!warning_shown) {
warning("Some forecast data could not be corrected due to data lack",
" and is replaced with uncorrected values.")
warning_shown <<- TRUE
}
} else if (!warning_shown) {
warning("Some forecast data could not be corrected due to data lack",
" and is replaced with NA values.")
warning_shown <<- TRUE
}
} else {
# Subset data for dataset dimension
obs_data <- as.vector(obs[, j])
exp_data <- exp[, , i]
dim(exp_data) <- dim(exp)[1:2]
if (cor_dat_dim) {
expcor_data <- exp_cor[, , i]
dim(expcor_data) <- dim(exp_cor)[1:2]
} else {
expcor_data <- exp_cor
}
eval.train.dexeses <- .make.eval.train.dexes(eval.method = eval.method,
amt.points = sdate,
amt.points_cor = sdate_cor)
amt.resamples <- length(eval.train.dexeses)
for (i.sample in seq(1, amt.resamples)) {
# defining training (tr) and evaluation (ev) subsets
# fc.ev is used to evaluate (not train; train should be done with exp (hindcast))
eval.dexes <- eval.train.dexeses[[i.sample]]$eval.dexes
train.dexes <- eval.train.dexeses[[i.sample]]$train.dexes
fc.ev <- expcor_data[, eval.dexes, drop = FALSE]
fc.tr <- exp_data[, train.dexes]
obs.tr <- obs_data[train.dexes, drop = FALSE]
if (cal.method == "bias") {
var.cor.fc[, eval.dexes, i, j] <- fc.ev + mean(obs.tr, na.rm = na.rm) - mean(fc.tr, na.rm = na.rm)
# forecast correction implemented
} else if (cal.method == "evmos") {
# forecast correction implemented
# ensemble and observational characteristics
quant.obs.fc.tr <- .calc.obs.fc.quant(obs = obs.tr, fc = fc.tr, na.rm = na.rm)
# calculate value for regression parameters
init.par <- c(.calc.evmos.par(quant.obs.fc.tr, na.rm = na.rm))
# correct evaluation subset
var.cor.fc[, eval.dexes, i, j] <- .correct.evmos.fc(fc.ev , init.par, na.rm = na.rm)
} else if (cal.method == "mse_min") {
quant.obs.fc.tr <- .calc.obs.fc.quant(obs = obs.tr, fc = fc.tr, na.rm = na.rm)
init.par <- .calc.mse.min.par(quant.obs.fc.tr, multi.model, na.rm = na.rm)
var.cor.fc[, eval.dexes, i, j] <- .correct.mse.min.fc(fc.ev , init.par, na.rm = na.rm)
} else if (cal.method == "crps_min") {
quant.obs.fc.tr <- .calc.obs.fc.quant.ext(obs = obs.tr, fc = fc.tr, na.rm = na.rm)
init.par <- c(.calc.mse.min.par(quant.obs.fc.tr, na.rm = na.rm), 0.001)
init.par[3] <- sqrt(init.par[3])
# calculate regression parameters on training dataset
optim.tmp <- optim(par = init.par, fn = .calc.crps.opt, gr = .calc.crps.grad.opt,
quant.obs.fc = quant.obs.fc.tr, na.rm = na.rm, method = "BFGS")
mbm.par <- optim.tmp$par
var.cor.fc[, eval.dexes, i, j] <- .correct.crps.min.fc(fc.ev , mbm.par, na.rm = na.rm)
} else if (cal.method == 'rpc-based') {
# Ensemble mean
ens_mean.ev <- Apply(data = fc.ev, target_dims = names(memb), fun = mean, na.rm = na.rm)$output1
ens_mean.tr <- Apply(data = fc.tr, target_dims = names(memb), fun = mean, na.rm = na.rm)$output1
# Ensemble spread
ens_spread.tr <- Apply(data = list(fc.tr, ens_mean.tr), target_dims = names(sdate), fun = "-")$output1
# Mean (climatology)
exp_mean.tr <- mean(fc.tr, na.rm = na.rm)
# Ensemble mean variance
var_signal.tr <- var(ens_mean.tr, na.rm = na.rm)
# Variance of ensemble members about ensemble mean (= spread)
var_noise.tr <- var(as.vector(ens_spread.tr), na.rm = na.rm)
# Variance in the observations
var_obs.tr <- var(obs.tr, na.rm = na.rm)
# Correlation between observations and the ensemble mean
r.tr <- cor(x = ens_mean.tr, y = obs.tr, method = 'pearson',
use = ifelse(test = isTRUE(na.rm), yes = "pairwise.complete.obs", no = "everything"))
if ((apply_to == 'all') || (apply_to == 'sign' &&
cor.test(ens_mean.tr, obs.tr, method = 'pearson', alternative = 'greater')$p.value < alpha)) {
ens_mean_cal <- (ens_mean.ev - exp_mean.tr) * r.tr * sqrt(var_obs.tr) / sqrt(var_signal.tr) + exp_mean.tr
var.cor.fc[, eval.dexes, i, j] <- Reorder(data = Apply(data = list(exp = fc.ev, ens_mean = ens_mean.ev,
ens_mean_cal = ens_mean_cal),
target_dims = names(sdate), fun = .CalibrationMembersRPC,
var_obs = var_obs.tr, var_noise = var_noise.tr, r = r.tr)$output1,
order = names(expdims)[1:2])
} else {
# no significant -> replacing with observed climatology
var.cor.fc[, eval.dexes, i, j] <- array(data = mean(obs.tr, na.rm = na.rm), dim = dim(fc.ev))
}
} else {
stop("unknown calibration method: ", cal.method)
}
}
}
}
}
if (is.null(dat_dim)) {
dim(var.cor.fc) <- dim(exp_cor)[1:2]
}
return(var.cor.fc)
}
# Function to calculate different quantities of a series of ensemble forecasts and corresponding observations
.calc.obs.fc.quant <- function(obs, fc, na.rm) {
if (is.null(dim(fc))) {
dim(fc) <- c(length(fc), 1)
}
amt.mbr <- dim(fc)[1]
obs.per.ens <- InsertDim(obs, posdim = 1, lendim = amt.mbr, name = 'amt.mbr')
fc.ens.av <- apply(fc, c(2), mean, na.rm = na.rm)
cor.obs.fc <- cor(fc.ens.av, obs, use = "complete.obs")
obs.av <- mean(obs, na.rm = na.rm)
obs.sd <- sd(obs, na.rm = na.rm)
return(
append(
.calc.fc.quant(fc = fc, na.rm = na.rm),
list(
obs.per.ens = obs.per.ens,
cor.obs.fc = cor.obs.fc,
obs.av = obs.av,
obs.sd = obs.sd
)
)
)
}
# Extended function to calculate different quantities of a series of ensemble forecasts and corresponding observations
.calc.obs.fc.quant.ext <- function(obs, fc, na.rm){
amt.mbr <- dim(fc)[1]
obs.per.ens <- InsertDim(obs, posdim = 1, lendim = amt.mbr, name = 'amt.mbr')
fc.ens.av <- apply(fc, c(2), mean, na.rm = na.rm)
cor.obs.fc <- cor(fc.ens.av, obs, use = "complete.obs")
obs.av <- mean(obs, na.rm = na.rm)
obs.sd <- sd(obs, na.rm = na.rm)
return(
append(
.calc.fc.quant.ext(fc = fc, na.rm = na.rm),
list(
obs.per.ens = obs.per.ens,
cor.obs.fc = cor.obs.fc,
obs.av = obs.av,
obs.sd = obs.sd
)
)
)
}
# Function to calculate different quantities of a series of ensemble forecasts
.calc.fc.quant <- function(fc, na.rm) {
amt.mbr <- dim(fc)[1]
fc.ens.av <- apply(fc, c(2), mean, na.rm = na.rm)
fc.ens.av.av <- mean(fc.ens.av, na.rm = na.rm)
fc.ens.av.sd <- sd(fc.ens.av, na.rm = na.rm)
fc.ens.av.per.ens <- InsertDim(fc.ens.av, posdim = 1, lendim = amt.mbr, name = 'amt.mbr')
fc.ens.sd <- apply(fc, c(2), sd, na.rm = na.rm)
fc.ens.var.av.sqrt <- sqrt(mean(fc.ens.sd^2, na.rm = na.rm))
fc.dev <- fc - fc.ens.av.per.ens
fc.dev.sd <- sd(fc.dev, na.rm = na.rm)
fc.av <- mean(fc, na.rm = na.rm)
fc.sd <- sd(fc, na.rm = na.rm)
return(
list(
fc.ens.av = fc.ens.av,
fc.ens.av.av = fc.ens.av.av,
fc.ens.av.sd = fc.ens.av.sd,
fc.ens.av.per.ens = fc.ens.av.per.ens,
fc.ens.sd = fc.ens.sd,
fc.ens.var.av.sqrt = fc.ens.var.av.sqrt,
fc.dev = fc.dev,
fc.dev.sd = fc.dev.sd,
fc.av = fc.av,
fc.sd = fc.sd
)
)
}
# Extended function to calculate different quantities of a series of ensemble forecasts
.calc.fc.quant.ext <- function(fc, na.rm) {
amt.mbr <- dim(fc)[1]
repmat1.tmp <- InsertDim(fc, posdim = 1, lendim = amt.mbr, name = 'amt.mbr')
repmat2.tmp <- aperm(repmat1.tmp, c(2, 1, 3))
spr.abs <- apply(abs(repmat1.tmp - repmat2.tmp), c(3), mean, na.rm = na.rm)
spr.abs.per.ens <- InsertDim(spr.abs, posdim = 1, lendim = amt.mbr, name = 'amt.mbr')
return(
append(.calc.fc.quant(fc, na.rm = na.rm),
list(spr.abs = spr.abs, spr.abs.per.ens = spr.abs.per.ens))
)
}
# Below are the core or elementary functions to calculate the regression parameters for the different methods
.calc.mse.min.par <- function(quant.obs.fc, multi.model = F, na.rm) {
par.out <- rep(NA, 3)
if (multi.model) {
par.out[3] <- with(quant.obs.fc, obs.sd * sqrt(1. - cor.obs.fc^2) / fc.ens.var.av.sqrt)
} else {
par.out[3] <- with(quant.obs.fc, obs.sd * sqrt(1. - cor.obs.fc^2) / fc.dev.sd)
}
par.out[2] <- with(quant.obs.fc, abs(cor.obs.fc) * obs.sd / fc.ens.av.sd)
par.out[1] <- with(quant.obs.fc, obs.av - par.out[2] * fc.ens.av.av, na.rm = na.rm)
return(par.out)
}
.calc.evmos.par <- function(quant.obs.fc, na.rm) {
par.out <- rep(NA, 2)
par.out[2] <- with(quant.obs.fc, obs.sd / fc.sd)
par.out[1] <- with(quant.obs.fc, obs.av - par.out[2] * fc.ens.av.av, na.rm = na.rm)
return(par.out)
}
# Below are the core or elementary functions to calculate the functions necessary for the minimization of crps
.calc.crps.opt <- function(par, quant.obs.fc, na.rm){
return(
with(quant.obs.fc,
mean(abs(obs.per.ens - (par[1] + par[2] * fc.ens.av.per.ens +
((par[3])^2 + par[4] / spr.abs.per.ens) * fc.dev)), na.rm = na.rm) -
mean(abs((par[3])^2 * spr.abs + par[4]) / 2., na.rm = na.rm)
)
)
}
.calc.crps.grad.opt <- function(par, quant.obs.fc, na.rm) {
sgn1 <- with(quant.obs.fc,sign(obs.per.ens - (par[1] + par[2] * fc.ens.av.per.ens +
((par[3])^2 + par[4] / spr.abs.per.ens) * fc.dev)))
sgn2 <- with(quant.obs.fc, sign((par[3])^2 + par[4] / spr.abs.per.ens))
sgn3 <- with(quant.obs.fc,sign((par[3])^2 * spr.abs + par[4]))
deriv.par1 <- mean(sgn1, na.rm = na.rm)
deriv.par2 <- with(quant.obs.fc, mean(sgn1 * fc.dev, na.rm = na.rm))
deriv.par3 <- with(quant.obs.fc,
mean(2* par[3] * sgn1 * sgn2 * fc.ens.av.per.ens, na.rm = na.rm) -
mean(spr.abs * sgn3, na.rm = na.rm) / 2.)
deriv.par4 <- with(quant.obs.fc,
mean(sgn1 * sgn2 * fc.ens.av.per.ens / spr.abs.per.ens, na.rm = na.rm) -
mean(sgn3, na.rm = na.rm) / 2.)
return(c(deriv.par1, deriv.par2, deriv.par3, deriv.par4))
}
# Below are the core or elementary functions to correct the evaluation set based on the regression parameters
.correct.evmos.fc <- function(fc, par, na.rm) {
quant.fc.mp <- .calc.fc.quant(fc = fc, na.rm = na.rm)
return(with(quant.fc.mp, par[1] + par[2] * fc))
}
.correct.mse.min.fc <- function(fc, par, na.rm) {
quant.fc.mp <- .calc.fc.quant(fc = fc, na.rm = na.rm)
return(with(quant.fc.mp, par[1] + par[2] * fc.ens.av.per.ens + fc.dev * par[3]))
}
.correct.crps.min.fc <- function(fc, par, na.rm) {
quant.fc.mp <- .calc.fc.quant.ext(fc = fc, na.rm = na.rm)
return(with(quant.fc.mp, par[1] + par[2] * fc.ens.av.per.ens + fc.dev * abs((par[3])^2 + par[4] / spr.abs)))
}
# Function to calibrate the individual members with the RPC-based method
.CalibrationMembersRPC <- function(exp, ens_mean, ens_mean_cal, var_obs, var_noise, r) {
member_cal <- (exp - ens_mean) * sqrt(var_obs) * sqrt(1 - r^2) / sqrt(var_noise) + ens_mean_cal
return(member_cal)
}
|
fb65f82f1acb43410e3a4e9f9a89ea3a33aafa07
|
73744a740941b13641c0175c8e583b20cfd023a1
|
/analysis/words/10_IAT_analyses/scripts/04_get_iat_by_model.R
|
7854bce47bb9d2d24b47bab37e44e79718ecd473
|
[] |
no_license
|
mllewis/WCBC_GENDER
|
8afe092a60852283fd2aa7aea52b613f7b909203
|
ed2d96361f7ad09ba70b564281a733da187573ca
|
refs/heads/master
| 2021-12-25T22:41:21.914309
| 2021-12-22T19:08:36
| 2021-12-22T19:08:36
| 248,584,454
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,840
|
r
|
04_get_iat_by_model.R
|
# get IAT for each model
# load packages etc
library(tidyverse)
library(here)
library(data.table)
library(glue)
source(here("analysis/words/10_IAT_analyses/scripts/IAT_utils.R"))
# Outfile
ES_OUTFILE <- here("data/processed/iat/other/iat_es_by_model.csv")
# Model paths
KIDBOOK_FULL_PATH <- here("data/processed/iat/models/trained_kid_model_5_count.csv")
KIDBOOK_SAMPLED_PREFIX <- here("data/processed/iat/models/trained_sampled_kidbook/trained_sampled_kidbook_5_count_")
COCA_SAMPLED_PREFIX <- here("data/processed/iat/models/trained_sampled_coca/trained_sampled_coca_5_count_")
WIKI_PATH <- "/Users/mollylewis/Documents/research/Projects/1_in_progress/VOCAB_SEEDS/exploratory_analyses/0_exploration/wiki.en.vec"
# Stimuli
MATH_ARTS_KID <- list(test_name = "WEAT_7_2",
bias_type = "gender-bias-math-arts",
category_1 = c("man", "boy", "brother", "he", "him", "son"),
category_2 = c("woman", "girl", "sister", "she", "her", "daughter"),
attribute_1 = c("shapes", "count", "sort", "size", "numbers", "different"),
attribute_2 = c("books", "paint", "draw", "art", "dance", "story"))
MATH_LANGUAGE_KID <- list(test_name = "WEAT_7_3", # this has been studied in kids (Cveneck,Greendwald, & Meltzoff, 2011a)
bias_type = "gender-bias-math-language",
category_1 = c("man", "boy", "brother", "he", "him", "son"),
category_2 = c("woman", "girl", "sister", "she", "her", "daughter"),
attribute_1 = c("shapes", "count", "sort", "size", "numbers", "different"),
attribute_2 = c("books","read", "write","story", "letters", "spell")) # triangle, added, cents
GOOD_BAD_GENDER_KID <- list(test_name = "WEAT_VALENCE_GENDER", # this has been studied in kids (Cveneck, Greenwald, & Meltzoff, 2011b)
bias_type = "gender-bias-good-bad",
category_1 = c("man", "boy", "brother", "he", "him", "son"),
category_2 = c("woman", "girl", "sister", "she", "her", "daughter"),
attribute_1 = c("bad", "awful", "sick", "trouble", "hurt" ) , # these words are adapted from Rudman and Goodman, 2004 exp 1
attribute_2 = c( "good" ,"happy", "gift" , "sunshine", "heaven"))
# terrible, awful/wonderful, hate/love
CAREER_WORD_LIST_KID <- list(test_name = "WEAT_6_2",
bias_type = "gender-bias-career-family2",
category_1 = c( "man", "boy", "brother", "he", "him", "son"),
category_2 = c( "woman", "girl", "sister", "she", "her", "daughter"),
attribute_1 = c("desk", "work", "money",
"office", "business", "job"),
attribute_2 = c("home", "parents", "children", "family", "cousins",
"wedding")) #career - job; salary- money; executive - desk; professional - work
test_list <- list(MATH_ARTS_KID,
MATH_LANGUAGE_KID,
CAREER_WORD_LIST_KID,
GOOD_BAD_GENDER_KID)
### GET THE ES BY MODEL TYPE
## kidbook
kid_model <- read_csv(KIDBOOK_FULL_PATH)
kidbook_full_es <- map_df(test_list, get_ES, kid_model) %>%
mutate(corpus = "kidbook")
## kidbook sampled
kidbook_sampled_models <- map(1:10, ~glue("{KIDBOOK_SAMPLED_PREFIX}{.x}.csv"))
kidbook_sampled_es <- cross2(test_list, kidbook_sampled_models) %>%
map_df(~get_ES(.x[[1]], read_csv(.x[[2]])) %>%
mutate(model_id = str_remove(str_remove(.x[[2]],
KIDBOOK_SAMPLED_PREFIX), ".csv"))) %>%
mutate(corpus = "kidbook_sampled")
# sampled coca
coca_sampled_models <- map(1:10, ~glue("{COCA_SAMPLED_PREFIX}{.x}.csv"))
coca_sampled_es <- cross2(test_list, coca_sampled_models) %>%
map_df(~get_ES(.x[[1]], read_csv(.x[[2]])) %>%
mutate(model_id = str_remove(str_remove(.x[[2]],
COCA_SAMPLED_PREFIX), ".csv"))) %>%
mutate(corpus = "coca_sampled")
# wiki
wiki_model <- fread(
WIKI_PATH,
header = FALSE,
skip = 1,
quote = "",
encoding = "UTF-8",
data.table = TRUE,
col.names = c("word",
unlist(lapply(2:301, function(x) paste0("V", x)))))
wiki_es <- map_df(test_list, get_ES, wiki_model) %>%
mutate(corpus = "wiki")
### Bind ES together
all_es <- list(kidbook_full_es,
kidbook_sampled_es,
coca_sampled_es,
wiki_es) %>%
reduce(bind_rows)
# write to csv
write_csv(all_es, ES_OUTFILE)
|
85218e12a8d5b5c2b71f06b4794a43cecfe80102
|
852b46209a2bb6839078ae48f28f24a8fbf6bfa5
|
/R/alphaPowCon.R
|
4cda47c3be16226a993b5e07db09b4169024fa3a
|
[] |
no_license
|
AKitsche/poco
|
24fd28ab0e517a8e5f1acf02bc72a38906d90cd6
|
a07c7ce7b19e38597904dab7bbfb4728fe6c4b47
|
refs/heads/master
| 2021-01-16T18:40:32.051653
| 2015-10-07T17:18:38
| 2015-10-07T17:18:38
| 18,356,832
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,328
|
r
|
alphaPowCon.R
|
alphaPowCon <- function(power, n, mu, sd, n.sub=2, TreatMat = "Tukey", SubMat = "GrandMean", thetas = 1, alternative = c("two.sided", "less", "greater")){
Alpha <- function(alpha){
alpha <- as.numeric(alpha)
PowCon(mu=mu,
sd=sd,
n = n,
n.sub=n.sub,
TreatMat= TreatMat,
SubMat = SubMat,
thetas=thetas,
alpha=alpha,
alternative=alternative)[[1]]-power
}
Alphafinal <- as.numeric(uniroot(Alpha, lower=0.0001, upper=0.9999)$root)
Power <- PowCon(mu=mu,
sd=sd,
n = n,
n.sub=n.sub,
TreatMat= TreatMat,
SubMat = SubMat,
thetas=thetas,
alpha=Alphafinal,
alternative=alternative)
out <- list(power = power,
n=n,
NonCentrPar=Power[[3]],
crit = Power[[4]],
alternative = Power[[5]],
CorrMat = Power[[6]],
CMat=Power[[7]],
DMat=Power[[8]],
thetas=Power[[9]],
alpha = Alphafinal,
n.sub=Power[[11]],
TreatMat=Power[[12]],
SubMat=Power[[13]])
class(out) <- "Powerpoco"
out
}
|
78a707e153f5002f08431ba5aa065076549694be
|
f72f364b54e40f0ccac7f0c44c96e326a5a0e2d9
|
/man/EasyUpliftTree-package.Rd
|
c22e25f4d4228416476a18174ac457206759c7ce
|
[] |
no_license
|
cran/EasyUpliftTree
|
937bbb394b945d931ce3c56668cd9ac16c281abf
|
9dccbf87f8714dbd43cd7dbca57f9b8632ce12ed
|
refs/heads/master
| 2021-01-23T06:26:36.687184
| 2013-03-24T00:00:00
| 2013-03-24T00:00:00
| 17,717,451
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 837
|
rd
|
EasyUpliftTree-package.Rd
|
\name{EasyUpliftTree-package}
\alias{EasyUpliftTree-package}
\alias{EasyUpliftTree}
\docType{package}
\title{
Easy Uplift Tree Model for R
}
\description{
Easy Uplift Tree Model for R
}
\details{
\tabular{ll}{
Package: \tab EasyUpliftTree\cr
Type: \tab Package\cr
Version: \tab 0.0.2\cr
Date: \tab 2013-02-24\cr
License: \tab BSD\cr
}
}
\author{
Yohei Sato, Issei Kurahashi
Maintainer: Yohei Sato <yokkun@tkul.jp>
}
\references{
http://stochasticsolutions.com/sbut.html
}
\keyword{ package }
\seealso{
\code{\link{buildUpliftTree}},\code{\link{toDataFrame}},\code{\link{classify}}
}
\examples{
\dontrun{
uplift.tree <- buildUpliftTree(y.train, treat.train, x.train)
print(uplift.tree)
uplift.df <- toDataFrame(uplift.tree)
x.test$node.type <- sapply(1:nrow(x.test), function(i) classify(uplift.tree, x.test[i, ]))
}
}
|
78f158891d91f9138c943b7bfb649dd75eb8f828
|
dfee2e61441a20ba3101a67ae8c5479169d8f086
|
/man/installHumanGenomeAnnotation.Rd
|
413450746ffedc8593070a97bddc11f003d21bf6
|
[
"MIT"
] |
permissive
|
hyginn/BCB420.2019.ESA
|
58e8045e063aab83acc2e22738f943c4051e5630
|
cd56c0445ddc31551839e759657bc019ccd8f5b5
|
refs/heads/master
| 2020-04-29T06:07:39.178498
| 2019-04-06T04:35:25
| 2019-04-06T04:35:25
| 175,906,118
| 0
| 30
|
MIT
| 2019-04-06T05:07:32
| 2019-03-16T00:00:12
|
R
|
UTF-8
|
R
| false
| true
| 509
|
rd
|
installHumanGenomeAnnotation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GeneCorVSgotermEDA.R
\name{installHumanGenomeAnnotation}
\alias{installHumanGenomeAnnotation}
\title{\code{installHumanGenomeAnnotation} install Human genome Annotation when necessary.}
\usage{
installHumanGenomeAnnotation()
}
\value{
(NULL)
#' @author {Yuhan Zhang} (aut)
}
\description{
\code{installHumanGenomeAnnotation} install Human genome Annotation when necessary.
}
\examples{
\dontrun{
installHumanGenomeAnnotation()
}
}
|
70be4f44f2f58b78474da92575476e264e902c8a
|
74bf92385b7b328d4304301fa542c814098c89bd
|
/Reading_PreparaingData.R
|
771291f2a4c8a49c8fa92c0f39e2a6d680c7d0b9
|
[] |
no_license
|
aaizenm/ExData_Plotting1
|
982418acefe95304a6016aab5655e70a22923c4f
|
6161d830cd1fa024e35a33a1cebe1737fbd43a53
|
refs/heads/master
| 2021-01-17T21:55:21.326339
| 2014-07-10T07:00:07
| 2014-07-10T07:00:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 454
|
r
|
Reading_PreparaingData.R
|
## Reading the Data
hpc=read.table("./data/household_power_consumption.txt",header=TRUE,check.names=FALSE,as.is=TRUE, sep=";")
## Selecting only the Days that are of our interest
hpc1<-hpc[which(hpc$Date =="1/2/2007" | hpc$Date =="2/2/2007"), ]
## concatenate the dates and time
hpc1$NewDate<-apply(hpc1[,c('Date', 'Time')],1, paste, sep=" ",collapse= " ")
## Convert them to Date
hpc1$NewDate<- strptime(hpc1$NewDate, format="%m/%d/%Y %H:%M:%S")
|
c9d21dbeecec4dc92dae33f67a52b149e75d4ce4
|
8a2b0cab64ac5f28bedfb06684774b2464bfa87c
|
/functions/combined_rr_pa_pa.R
|
a55678bb8fd8c07ceedbcecbfadace5f710d7db7
|
[] |
no_license
|
walkabillylab/ITHIM-R
|
037264528ffef905a8c9b32c4f9500d8601bae63
|
d6809907950af715a68d03c4a4dcd6851170994e
|
refs/heads/master
| 2020-04-02T06:13:15.504959
| 2018-11-06T15:55:35
| 2018-11-06T15:55:35
| 154,136,680
| 0
| 0
| null | 2018-11-06T15:55:37
| 2018-10-22T12:06:04
|
HTML
|
UTF-8
|
R
| false
| false
| 866
|
r
|
combined_rr_pa_pa.R
|
combined_rr_pa_pa <- function(ind_pa,ind_ap){
# Replace NaNs with 1
ind_ap[is.na(ind_ap)] <- 1
# Replace Na with 1
ind_pa[is.na(ind_pa)] <- 1
# remove common columns from ap
ind_ap <- dplyr::select(ind_ap, -c(sex, age, age_cat))
# join pa and ap ind datasets
ind <- left_join(ind_pa, ind_ap, by = "participant_id")
### iterating over all all disease outcomes
for ( j in 1:nrow(DISEASE_OUTCOMES)){
## checking whether to calculate this health outcome for PA
if (DISEASE_OUTCOMES$physical_activity[j] == 1 & DISEASE_OUTCOMES$air_pollution[j] == 1){
for (scen in SCEN_SHORT_NAME){
ac <- as.character(DISEASE_OUTCOMES$acronym[j])
ind[[paste('RR_pa_ap', scen, ac, sep = '_')]] <- ind[[paste('RR_pa', scen, ac, sep = '_')]] * ind[[paste('RR_ap', scen, ac, sep = '_')]]
}
}
}
ind
}
|
d2a7c023d32141c90fe8afbc82c4c5ffa7791e09
|
1ef693a6f51d5b66d72f29670e8318e57b4ade53
|
/R/apply.r
|
f258c8c554a29924c7c08536462f56247e456805
|
[] |
no_license
|
cran/DynamicGP
|
20f649672d3a9235d98a26f192ac51d9cb068232
|
1eb19d2fd1719137086fc19599bd48be76475c94
|
refs/heads/master
| 2022-11-11T05:05:24.731474
| 2022-11-08T09:10:09
| 2022-11-08T09:10:09
| 129,434,322
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 665
|
r
|
apply.r
|
genericApply <- function(mat, margin, func, ..., nthread=1, clutype="PSOCK")
{
if(nthread <= 1)
return(apply(mat,margin,func,...))
cl <- parallel::makeCluster(nthread,type=clutype)
ret <- tryCatch(parallel::parApply(cl,mat,margin,func,...),
finally=parallel::stopCluster(cl))
return(ret)
}
genericLapply <- function(x, func, ..., nthread=1, clutype="PSOCK")
{
if(nthread <= 1)
return(lapply(x, func, ...))
cl <- parallel::makeCluster(nthread,type=clutype)
ret <- tryCatch(parallel::parLapply(cl,x,func,...),
finally=parallel::stopCluster(cl))
return(ret)
}
|
d0a6f673a996b14ec5fd795c4b3316d643aac78f
|
09df45040befbcb4634a3a62c3b9fa7dea7a5742
|
/man/boxplot_cov.Rd
|
35af7cb32a3006e8a2c9b3e8cb0a1ace330a3714
|
[] |
no_license
|
YanruiYang/design143
|
61f18a0b0251188536b849bda330718b6d047c18
|
544112293ff0601c52117d9ccf329a7a825d0b17
|
refs/heads/main
| 2023-05-28T23:59:25.133001
| 2021-06-09T07:06:37
| 2021-06-09T07:06:37
| 375,220,060
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 737
|
rd
|
boxplot_cov.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/testing.R
\name{boxplot_cov}
\alias{boxplot_cov}
\title{Create rmse boxplots to compare covairance models}
\usage{
boxplot_cov(function_name, k, design_type, n, cov = "all", m = 20)
}
\arguments{
\item{function_name}{name of the testing function}
\item{k}{number of levels of the testing function}
\item{design_type}{the type of design: randomLHS, ortho_arrayLHS, maxiMin}
\item{n}{number of training and testing samples}
\item{cov}{covariance: all (all five possible cov types), gauss, matern5_2, matern3_2, exp, powexp}
\item{m}{number of repetitions}
}
\value{
the mean square error
}
\description{
Create rmse boxplots to compare covairance models
}
|
a1e0de2db27cccfd7cce7935191c6cc6f93db7f7
|
c8ced99c717b29c3f4eff59ded1d92f4179bf0fb
|
/4. Exploratory Data Analysis/week1.R
|
0695a00ed4a4bc10ee31c4637b33bf7694eb1f50
|
[] |
no_license
|
salus0324/datasciencecoursera
|
a20f6c7dc98d1516262b701efed15903412db9a5
|
7e53a50ef89c48faf4240febbc65b274bbb8d26b
|
refs/heads/gh-pages
| 2020-03-12T08:46:16.127192
| 2019-02-02T14:36:10
| 2019-02-02T14:36:10
| 130,535,533
| 0
| 0
| null | 2019-02-02T14:36:11
| 2018-04-22T04:36:35
|
HTML
|
UTF-8
|
R
| false
| false
| 2,268
|
r
|
week1.R
|
pollution <- read.csv("./data/avgpm25.csv", colClasses = c("numeric", "character", "factor", "numeric","numeric"))
head(pollution)
summary(pollution$pm25)
boxplot(pollution$pm25, col ="blue")
abline(h=12)
hist(pollution$pm25, col="green", breaks=100)
rug(pollution$pm25)
abline(v=12, lwd=2)
abline(v=median(pollution$pm25), col ="magenta", lwd=4)
barplot(table(pollution$region), col="wheat", main ="Number of Counties in Each Region")
table(pollution$region)
boxplot(pm25 ~ region, data=pollution, col="red")
?par
?mfrow
par(mfrow = c(2,1), mar =c(4,4,2,1))
hist(subset(pollution, region=="east")$pm25, col ="green")
hist(subset(pollution, region =="west")$pm25, col ="green")
with(pollution, plot(latitude, pm25, col= region))
abline(h=12, lwd=2, lty=2)
par(mfrow = c(1,2), mar = c(5,4,2,1))
with(subset(pollution, region=="west"), plot(latitude, pm25, main="West"))
with(subset(pollution, region=="east"), plot(latitude, pm25, main="East"))
library(datasets)
data(cars)
with(cars, plot(speed,dist))
library(lattice)
state <- data.frame(state.x77, region =state.region)
xyplot(Life.Exp ~ Income | region, data=state, layout =c(4,1))
library(ggplot2)
data(mpg)
qplot(displ, hwy, data=mpg)
hist(airquality$Ozone)
par(mfrow=c(1,1))
with(airquality, plot(Wind,Ozone))
airquality <- transform(airquality, Month =factor(Month))
boxplot(Ozone ~Month, airquality, xlab="Month", ylab="Ozone(ppb)")
library(datasets)
with(airquality, plot(Wind, Ozone))
title(main ="Ozone and Wind in NYC")
with(subset(airquality, Month ==5), points(Wind, Ozone, col ="Blue"))
with(airquality, plot(Wind, Ozone, main = "Ozone and Wind in NYC", type="n"))
with(subset(airquality, Month ==5), points(Wind, Ozone, col ="blue"))
with(subset(airquality, Month !=5), points(Wind, Ozone, col ="red"))
legend("topright", pch=1, col=c("blue", "red"), legend =c("May", "Other Months"))
with(airquality, plot(Wind, Ozone, main ="Ozone and Wind in NYC", pch =20))
model <- lm(Ozone~Wind, airquality)
abline(model, lwd=2)
par(mfrow=c(1,3), mar=c(4,4,3,1),oma=c(0,0,2,0))
with(airquality, {
plot(Wind, Ozone, main ="Ozone and Wind")
plot(Solar.R, Ozone, main ="Ozone and solar Radiation")
plot(Temp, Ozone, main="Ozone and Temperatrue")
mtext("Ozone and Weather in NYC", outer=T)
})
|
146be8b6c5efebd176aa3404cbd8a5b59bd4bcd8
|
e59452676887ae6d4052ad6bcaf6ae74c3b94aa8
|
/run_analysis.R
|
c493726ffac01dc447809491c5b7948f97c45c84
|
[] |
no_license
|
Everymans-ai/Getting-and-Cleaning-Data-Course-Project
|
9d3d815db23034d2da3f4cb37abdd935cc34e128
|
41e0b634753411bd6695690b3898e5b4b94bf267
|
refs/heads/master
| 2021-05-30T21:17:12.831326
| 2016-04-07T16:48:43
| 2016-04-07T16:48:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,679
|
r
|
run_analysis.R
|
filesPath <- "C:\\Users\\msunkpal\\Desktop\\R_Final\\UCI HAR Dataset"
# packages loaded are: dplyr and tidyr
#Reading trainings files
trainX <- read.table(file.path(filesPath, "train","X_train.txt"))
XtrainData <- tbl_df(trainX)
trainy <- read.table(file.path(filesPath, "train","y_train.txt"))
ytrainData <- tbl_df(trainy)
trainD <- read.table(file.path(filesPath, "train", "subject_train.txt"))
SubjectTrainD <- tbl_df(trainD)
#Reading testing files
testX <- read.table(file.path(filesPath,"test", "X_test.txt"))
XtestData <- tbl_df(testX)
testy <- read.table(file.path(filesPath,"test", "y_test.txt"))
ytestData <- tbl_df(testy)
testD <- read.table(file.path(filesPath, "test" , "subject_test.txt" ))
SubjectTestD <- tbl_df(testD)
# Read activity files
activity <- read.table(file.path(filesPath, "activity_labels.txt"))
activityData <- tbl_df(activity)
# Read feature files
feature <- read.table(file.path(filesPath, "features.txt"))
# 1. Merges the training and the test sets to create one data set
# Assigning column names:
colnames(XtrainData) <- feature[,2]
colnames(ytrainData) <-"activity"
colnames(SubjectTrainD) <- "subject"
colnames(XtestData) <- feature[,2]
colnames(ytestData) <- "activity"
colnames(SubjectTestD) <- "subject"
colnames(activityData) <- c("activity","activityNum")
# Merge all data into a single dataframe
trainMerge <- cbind(ytrainData, SubjectTrainD, XtrainData)
testMerge <- cbind(ytestData, SubjectTestD, XtestData)
mergeAll <- rbind(trainMerge,testMerge)
# 2. Extract measurements on the mean and standard deviation for each measurement
# Reading column names
Names <- colnames(mergeAll)
# Create vector for defining ID, mean and standard deviation:
mean_std <- (grepl("activity" , Names) | grepl("subject" , Names) | grepl("mean.." , Names) | grepl("std.." , Names))
# Making nessesary subset from setAllInOne:
subset_Mean_Std <- mergeAll[ , mean_std == TRUE]
# 3. Use descriptive activity names to name the activities in the data set
activityNames <- merge(subset_Mean_Std, activityData , by = "activity", all.x = TRUE)
# 4. Appropriately labels the data set with descriptive variable names.
# step is complete from parts of step 1, 2, and 3. this can be verified by the following commands
head(str(activityNames),4)
# 5. Creating a second, independent tidy data set with the average of each variable for each activity and each subject:
tidydataset <- aggregate(. ~subject + activity, activityNames, mean)
tidydataset <- tidydataset[order(tidydataset$subject, tidydataset$activity),]
write.table(tidydataset, "tidyData.txt", row.name=FALSE)
|
f19bdbd90fe11fbade8ec1e1d640ffbb59b94c56
|
b1eca3d685a89eb0fa20e0619e1f98aee971d231
|
/fractal-dimension/regression.r
|
2b0e8746b247845874271b3750658a5828d62d6a
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
tomwhite/how-far-away-is-the-sea
|
36255ec4323362871e086933e55baa8f69f0c98b
|
c1ccd2f8a2fff23b92376e6bad171b7dac4f3946
|
refs/heads/master
| 2020-12-24T06:42:19.927199
| 2013-07-28T19:53:46
| 2013-07-28T19:53:46
| 7,388,142
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 656
|
r
|
regression.r
|
# Enter columns as calculated by DumpCoast program
x=c(40.02177855743041,20.926887304365344,11.364379535864254,6.435809731800157,3.7523206497028117,2.268029441545984,1.4504876804847864,0.8834338574652224,0.6043891952733216)
y=c(92.62456926247125,88.59415989750448,81.57066534732749,72.09660001403037,61.82840478155156,51.145720542732256,39.986551268477555,32.82645299921803,24.81844499754266)
# Take logs
lx <- log(x)
ly <- log(y)
# Plot points
plot (lx, ly)
# Do linear regression
lin=lm(ly~lx)
# Superimpose regression line
abline(lin, col="blue", untf=TRUE)
# Print out intercept and slope
coef(lin)
# Slope is 0.31 like in Chaos and Fractals book
|
ca8fae47c9b3e700f094aa39dc2a61ddcbe808a5
|
6c2f655cd45c3a8f01f84c6024ce7b3087271749
|
/Restaurant project.R
|
18c77f1fc2a7973666b8c75e0cab102f263d7d1f
|
[] |
no_license
|
varun331/R-Code
|
d8a039d91f157c8f3b9af1df2dc0892df3260414
|
d8ec7abfcdf9464c3beda8a330b65040aca80035
|
refs/heads/master
| 2021-06-08T14:02:53.468970
| 2016-11-22T04:30:26
| 2016-11-22T04:30:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,756
|
r
|
Restaurant project.R
|
Project1 = read.csv(file="~/Documents/R Projects/train.csv",head=TRUE)
test=read.csv(file="~/Documents/R Projects/test.csv",head=TRUE)
train <- Project1
attach(train)
plot(revenue,P1,col='1',pch=16)
points(revenue,P2,col='2',pch=16)
points(revenue,P3,col='3',pch=15)
points(revenue,P4,col='4',pch=15)
points(revenue,P5,col='5',pch=15)
points(revenue,P6,col='6',pch=15)
P=c(1:49)
for (i in 6:49){
plot(revenue,train[,i],col=P[i],pch=16)}
# Change the train date data into day/ month/ year
library(lubridate)
train$day<-as.factor(day(as.POSIXlt(train$Open.Date, format="%m/%d/%Y")))
train$month<-as.factor(month(as.POSIXlt(train$Open.Date, format="%m/%d/%Y")))
train$year<-as.factor(year(as.POSIXlt(train$Open.Date, format="%m/%d/%Y")))
#Change the test data into day/month/ year
test$day<-as.factor(day(as.POSIXlt(test$Open.Date, format="%m/%d/%Y")))
test$month<-as.factor(month(as.POSIXlt(test$Open.Date, format="%m/%d/%Y")))
test$year<-as.factor(year(as.POSIXlt(test$Open.Date, format="%m/%d/%Y")))
# change the train city, city group and type as numeric
train$City1 <- as.numeric(train$City)
train$City.Group1 <- as.numeric(train$City.Group)
train$Type1 <- as.numeric(train$Type)
# change the test city, city group and type as numeric
test$City1 <- as.numeric(test$City)
test$City.Group1 <- as.numeric(test$City.Group)
test$Type1 <- as.numeric(test$Type)
# subset train data by year
train.1 = subset(train, year=='1999'|year=='2000'|year=='2013'|year=='2012')
train.1 = train.1[,c(1,6:42,44:49,43)]
train.2 = train.1[,c(2:38)]
set.seed(600)
sub<- sample(nrow(train),floor(nrow(train)*.5))
train.1 = train[sub,]
# subset test data by year
test.2=subset(test,year=='1995'|year=='2001'|year=='2003'|Type1=='4')
test.1=subset(test,year!='1995'& year!='2001'& year!='2003'&Type1!='4')
test.1=test.1[,c("P2","P3","P4","P5","P11","P21","P22","P27","P29","P30","P31","P32","year","City.Group1","Type1")]
train.1$day <- NULL
train.1$City1<- NULL
library(Correlplot)
library(calibrate)
library(MASS)
library(ellipse)
library(mgcv)
library(gam)
library(splines)
train.2 <- as.numeric(train.1[,c(1:4)]
# fit a coorelation matrix to identify variable that have least cooraeltion
ctab <- cor(train.1)
# fit a GAM model
gam1=lm(revenue~P1+P2+P3+P4+P5+P6+P7+P8+P9+P10+P11+P12+P13+P14+P15
+P16+P17+P18+P19+P20+P21+P22+P23+P24+P25+P26+P27+P28+P29
+P30+P31+P32+P33+P34+P35+P36+P37+year+City1+City.Group1
+Type1,data=train.1)
gam1=lm(revenue~ns(P1)+ns(P2)+ns(P3)+ns(P4)+ns(P5)+ns(P6)+ns(P7)+ns(P8)
+ns(P9)+ns(P10)+ns(P11)+ns(P12)+ns(P13)+ns(P14)+ns(P15)
+ns(P16)+ns(P17)+ns(P18)+ns(P19)+ns(P20)+ns(P21)+ns(P22)
+ns(P23)+ns(P24)+ns(P25)+ns(P26)+ns(P27)+ns(P28)+ns(P29)
+ns(P30)+ns(P31)+ns(P32)+ns(P33)+ns(P34)+ns(P35)+ns(P36)
+ns(P37)+year++City.Group1
+Type1,data=train.1)
gam.1 = gam(revenue~s(P1),data=train.1)
gam.2 = gam(revenue~s(P1)+s(P2),data=train.1)
gam.3 = gam(revenue~s(P1)+s(P2)+s(P3),data=train.1)
gam.4 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4),data=train.1)
gam.5 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5),data=train.1)
gam.6 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6),data=train.1)
gam.7 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7),data=train.1)
gam.8 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8),data=train.1)
gam.9 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9),data=train.1)
gam.10 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10),data=train.1)
gam.11 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11),data=train.1)
gam.12 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11) +s(P12),data=train.1)
gam.13 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13),data=train.1)
gam.14 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14),data=train.1)
gam.15 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15),data=train.1)
gam.16 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16),data=train.1)
gam.17 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16)+s(P17),
data=train.1)
gam.18 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16)+s(P17)
+s(P18),data=train.1)
gam.19 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16)+s(P17)
+s(P18)+s(P19),data=train.1)
gam.20 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16)+s(P17)
+s(P18)+s(P19)+s(P20),data=train.1)
gam.21 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16)+s(P17)
+s(P18)+s(P19)+s(P20)+s(P21),data=train.1)
gam.22 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16)+s(P17)
+s(P18)+s(P19)+s(P20)+s(P21)+s(P22),data=train.1)
gam.23 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16)+s(P17)
+s(P18)+s(P19)+s(P20)+s(P21)+s(P22)+s(P23),data=train.1)
gam.24 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16)+s(P17)
+s(P18)+s(P19)+s(P20)+s(P21)+s(P22)+s(P23)+s(P24),data=train.1)
gam.25 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16)+s(P17)
+s(P18)+s(P19)+s(P20)+s(P21)+s(P22)+s(P23)+s(P24)+s(P25),data=train.1)
gam.26 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16)+s(P17)
+s(P18)+s(P19)+s(P20)+s(P21)+s(P22)+s(P23)+s(P24)+s(P25)+s(P26),data=train.1)
gam.27 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16)+s(P17)
+s(P18)+s(P19)+s(P20)+s(P21)+s(P22)+s(P23)+s(P24)+s(P25)+s(P26)+s(P27),data=train.1)
gam.28 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16)+s(P17)
+s(P18)+s(P19)+s(P20)+s(P21)+s(P22)+s(P23)+s(P24)+s(P25)+s(P26)+s(P27)+s(P28),data=train.1)
gam.29 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16)+s(P17)
+s(P18)+s(P19)+s(P20)+s(P21)+s(P22)+s(P23)+s(P24)+s(P25)+s(P26)+s(P27)+s(P28)+s(P29),data=train.1)
gam.30 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16)+s(P17)
+s(P18)+s(P19)+s(P20)+s(P21)+s(P22)+s(P23)+s(P24)+s(P25)+s(P26)+s(P27)+s(P28)+s(P29)+s(P30),data=train.1)
gam.31 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16)+s(P17)
+s(P18)+s(P19)+s(P20)+s(P21)+s(P22)+s(P23)+s(P24)+s(P25)+s(P26)+s(P27)+s(P28)+s(P29)+s(P30)+s(P31),data=train.1)
gam.32 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16)+s(P17)
+s(P18)+s(P19)+s(P20)+s(P21)+s(P22)+s(P23)+s(P24)+s(P25)+s(P26)+s(P27)+s(P28)+s(P29)+s(P30)+s(P31)+s(P32),data=train.1)
gam.33 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16)+s(P17)
+s(P18)+s(P19)+s(P20)+s(P21)+s(P22)+s(P23)+s(P24)+s(P25)+s(P26)+s(P27)+s(P28)+s(P29)+s(P30)+s(P31)+s(P32)
+s(P33),data=train.1)
gam.34 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16)+s(P17)
+s(P18)+s(P19)+s(P20)+s(P21)+s(P22)+s(P23)+s(P24)+s(P25)+s(P26)+s(P27)+s(P28)+s(P29)+s(P30)+s(P31)+s(P32)
+s(P33)+s(P34),data=train.1)
gam.35 = gam(revenue~s(P1)+s(P2)+s(P3)+s(P4)+s(P5)+s(P6)+s(P7)+s(P8)+s(P9)+s(P10)+s(P11)+s(P12)+s(P13)+s(P14)+s(P15)+s(P16)+s(P17)
+s(P18)+s(P19)+s(P20)+s(P21)+s(P22)+s(P23)+s(P24)+s(P25)+s(P26)+s(P27)+s(P28)+s(P29)+s(P30)+s(P31)+s(P32)
+s(P33)+s(P34)+s(P35),data=train.1)
gam.36 = gam(revenue~s(P2,4)+s(P5,4)+s(P11,4)+s(P21,4)+s(P22,4)+s(P27,4)+s(P29,4)+s(P30,4)+s(P31,4)+s(P32,4)
+year+City.Group1+Type1,data=train.1)
gam.37 = gam(revenue~s(P2,5)+s(P5,5)+s(P11,5)+s(P21,7)+s(P22,5)+s(P27,4)+s(P29,4)+s(P30,6)+s(P31,5)+s(P32,6)
+year+factor(City.Group1)+factor(Type1),data=train.1)
plot.gam(gam.37,se=T,col="red")
summary(gam.37)
gam.38 = gam(revenue~year+factor(City.Group1)+factor(Type1)+s(P2,5)+s(P5,5)+s(P11,5)+s(P21,7)+s(P22,5)+s(P27,4)+s(P29,4)+s(P30,6)+s(P31,5)+s(P32,6)
,data=train.1)
gam.39 = gam(revenue~year+factor(City.Group1)+factor(Type1)+s(P2,5)+s(P3,5)+s(P5,5)+s(P11,5)+s(P21,7)+s(P22,5)+s(P27,4)+s(P29,4)+s(P30,6)+s(P31,5)+s(P32,6)
,data=train.1)
gam.40 = gam(revenue~s(P2,5)+s(P3,5)+s(P4,5)+s(P5,5)+s(P11,5)+s(P21,7)+s(P22,5)+s(P27,4)+s(P29,4)+s(P30,6)+s(P31,5)+s(P32,6)
+year+factor(City.Group1)+factor(Type1),data=train,family="inverse.gaussian")
gam.41 = gam(revenue~s(P2,5)+s(P3,5)+s(P4,5)+s(P5,5)+s(P11,5)+s(P21,7)+s(P22,5)+s(P27,4)+s(P29,4)+s(P30,6)+s(P31,5)+s(P32,6)
+factor(City.Group1),data=train,family="inverse.gaussian")
gam.42 = gam(revenue~s(P2,5)+s(P3,5)+s(P4,5)+s(P5,5)+s(P11,5)+s(P21,7)+s(P22,5)+s(P27,4)+s(P29,4)+s(P30,6)+s(P31,5)+s(P32,6)
+factor(City.Group1),data=train)
gam.43 = gam(revenue~s(P2,5)+s(P3,5)+s(P4,5)+s(P5,5)+s(P11,5)+s(P21,7)+s(P22,5)+s(P27,4)+s(P29,4)+s(P30,6)+s(P31,5)+s(P32,6)
+year+factor(City.Group1)+factor(Type1),data=train)
#Predict using the GAM.37 model
preds=predict(gam.37,newdata=test.1)
preds=data.frame(preds)
preds$Id=test.1$Id
preds=preds[,c(2,1)]
MSE = mean((test.1$revenue-preds$preds)^2)
MSE
# MSE 2.491997e+12
plot(test.1$revenue~test.1$Id,col="2")
points(preds$preds~preds$Id,col='3',pch=16)
#Predict using the GAM.38 model
preds=predict(gam.38,newdata=test.1)
preds=data.frame(preds)
preds$Id=test.1$Id
preds=preds[,c(2,1)]
MSE = mean((test.1$revenue-preds$preds)^2)
MSE
# MSE 2.491997e+12
#Predict using the GAM.39 model
preds=predict(gam.39,newdata=test.1)
preds=data.frame(preds)
preds$Id=test.1$Id
preds=preds[,c(2,1)]
MSE = mean((test.1$revenue-preds$preds)^2)
MSE
# MSE 2.456429e+12
#Predict using the GAM.40 model
preds.train=predict(gam.40,newdata=train.1)
preds.train=data.frame(preds.train)
(preds$Id=test.1$Id
preds=preds[,c(2,1)]
MSE = mean((test.1$revenue-preds$preds)^2)
MSE
# MSE 2.445439e+12
#Predict using the GAM.40 model on test data
preds=predict(gam.40,newdata=test.1,type="response")
preds=data.frame(preds)
preds$Id=test.1$Id
#Subset all Na from Non Nas from preds
preds$Check=ifelse(preds$preds>0,1,NA)
preds1.2=preds[is.na(preds$Check),]
preds1.2$preds <- NULL
preds1.2$Check <- NULL
NA.test <- test.1[(test.1$Id %in% preds1.2$Id),]
preds1.1 <- subset(preds,!(Id%in%preds1.2$Id))
preds1.1$Check <- NULL
colnames(preds1.1) <- c("Prediction","Id")
#Predict using the GAM 43 model on the NA.test data
preds1.2=predict(gam.43,newdata=NA.test,type="response")
preds1.2=data.frame(preds1.2)
preds1.2$Id=NA.test$Id
colnames(preds1.2) <- c("Prediction","Id")
datamerge.2=merge(preds1.1,preds1.2,by=c("Prediction","Id"),all=T)
#Predict using the GAM.41 model on test data
preds.1=predict(gam.41,newdata=test.2,type="response")
preds.1=data.frame(preds.1)
preds.1$Id=test.2$Id
colnames(preds.1.1) <- c("Prediction","Id","Check")
preds.1.1$Check <- NULL
plot(preds.1$preds~preds.1$Id)
# MSE 2.445439e+12
#subset all the NA and non NAs from preds.1 data
preds.1$check=ifelse(preds.1$preds>0,1,NA)
NA.1=subset(preds.1,is.na(preds.1$check))
NA.1$preds <- NULL
NA.1$check <- NULL
NA.test <- test.2[(test.2$Id %in% NA.1$Id),]
preds.1.1=subset(preds.1,preds.1$check==1)
#Predict using the GAM.42 model on NA.test
preds1.2=predict(gam.42,newdata=NA.test,type="response")
preds1.2=data.frame(preds1.2)
preds1.2$Id=NA.test$Id
colnames(preds1.2) <- c("Prediction","Id")
datamerge.1=merge(preds.1.1,preds1.2,by=c("Prediction","Id"),all=T)
#data submittion
submission=merge(datamerge.1,datamerge.2, by=c("Prediction","Id"),all=T)
submission=submission[,c(2:1)]
submission$Prediction=round(submission$Prediction,digits=0)
write.csv(submission,"submission.csv",row.names=FALSE,quote=FALSE)
|
2c29aca910e444c490aaab66c25a5d1691c48153
|
6a475ba8392918c4837f41ceee4e3c9015ca56a5
|
/Death Vizualization in United States/server.R
|
f3b343e3c24e7096360e4adec969d9c943c366e1
|
[] |
no_license
|
dipteshnath/R
|
d2cd51cd3ffad87e0a3cc8c75a56287a27bd42d4
|
338261b15d630f0cbeff7271bf868c1d61ffd628
|
refs/heads/master
| 2021-06-29T00:30:04.120255
| 2017-09-19T00:04:30
| 2017-09-19T00:04:30
| 104,002,743
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,181
|
r
|
server.R
|
# By default, the file size limit is 5MB. It can be changed by
# setting this option. Here we'll raise limit to 9MB.
require(googleVis)
require(shiny)
require(plotly)
## Prepare data to be displayed
library(RCurl)
options(shiny.maxRequestSize = 9*1024^2)
shinyServer(function(input, output) {
output$contents <- renderTable({
inFile <- input$file1
if (is.null(inFile))
return(NULL)
read.csv(inFile$datapath)
})
myYear <- reactive({
input$Year
})
output$year <- renderText({
paste("Deaths in USA",myYear())
})
# e<-read.csv(inFile$datapath)
output$plot1<-renderPlot({
inFile <- input$file1
if (is.null(inFile))
return(NULL)
e<-read.csv(inFile$datapath)
plot(e$state,e$deaths,las=1)
})
output$myChart<-renderPlotly({
death<-read.csv("nchs3.csv")
plot_ly(data = death, x = STATES, y = DEATHS, mode = "markers",
color = TOTAL)%>%
layout(autosize = F, width = 800, height = 600)
})
output$myChart1<-renderPlotly({
nchs<-read.csv("nchs2.csv")
p <- ggplot(nchs, aes(YEAR, DEATHS))
p + geom_point() + stat_smooth()
layout(autosize = F, width = 370, height = 270)
ggplotly()
})
output$gvis <- renderGvis({
inFile <- input$file1
if (is.null(inFile))
return(NULL)
dat<-read.csv(inFile$datapath)
#dat<-read.csv("e.csv")
datminmax = data.frame(state=rep(c("Min", "Max"),16),
deaths=rep(c(0, 100),16),
year=sort(rep(seq(1998,2013,1),1)))
dat <- rbind(dat[,1:3], datminmax)
myYear <- reactive({
input$Year
})
#Show the visualization
myData <- subset(dat,
(year > (myYear()-1)) & (year < (myYear()+1)))
gvisGeoChart(myData,
locationvar="state", colorvar="deaths",
options=list(region="US", displayMode="regions",
resolution="provinces",
width=500, height=400,
colorAxis="{colors:['#FFFFFF', '#008000']}"
))
})
})
|
2f1512877d3c27bc83c7a98c36d283c2816b4818
|
91ac969835c4460ef590bf74e61b9f8379e6efe8
|
/R/closest.R
|
54bb70ad5c9a930ceb6d90c2926800d01f18bbd6
|
[] |
no_license
|
prestevez/crimeineq
|
abd3336332aec8e46336f045afa36099e970fcb9
|
0672c79f9040b8a3d8c5208bde9ac1988ca61dcb
|
refs/heads/master
| 2021-05-15T15:15:44.520436
| 2017-12-22T23:05:12
| 2017-12-22T23:05:12
| 107,298,694
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 204
|
r
|
closest.R
|
#' A function to get the closest value from a vector
#' @export
closest <- function(x, y)
{
xy <- abs(x - y)
closest <- which(xy == min(xy))
names(closest) <- names(y)
return(closest)
}
|
50047109d1608c5a405209c59479694ab4555f09
|
24cf4c59481802f340e4efa527103804f7687ae9
|
/RcodeIntegration/bin/Debug/hello.R
|
08ef461d2ee5f3f9c91d5107f9b518722317b873
|
[] |
no_license
|
sachinbabladi/MyBackupRepo
|
e133adcd9bbe25bdaf6a800378495b6319c94918
|
31e00d43a5efc881b1d241b040b7c42ee72dc64c
|
refs/heads/master
| 2020-12-31T07:55:32.480446
| 2015-12-04T15:32:56
| 2015-12-04T15:32:56
| 47,407,351
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,564
|
r
|
hello.R
|
# Hello, world!
#
# This is an example function named 'hello'
# which prints 'Hello, world!'.
#
# You can learn more about package authoring with RStudio at:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Ctrl + Shift + B'
# Check Package: 'Ctrl + Shift + E'
# Test Package: 'Ctrl + Shift + T'
hello <- function() {
library(NLP)
library(tm)
library(Rstem)
library(sentiment)
browser();
Emails <- read.csv("C:/Users/sachin.babladi/Desktop/Emails.csv", header=FALSE, comment.char="#")
--View(Emails)
mycorpus <- Corpus(VectorSource(Emails))
mycorpus <- tm_map(mycorpus, removePunctuation)
for(j in seq(mycorpus))
{
mycorpus[[j]] <- gsub("/", " ", mycorpus[[j]])
mycorpus[[j]] <- gsub("@", " ", mycorpus[[j]])
mycorpus[[j]] <- gsub("\\|", " ", mycorpus[[j]])
}
mycorpus <- tm_map(mycorpus, tolower)
mycorpus <- tm_map(mycorpus, removeWords, stopwords("english"))
mycorpus <- tm_map(mycorpus, stemDocument)
mycorpus <- tm_map(mycorpus, stripWhitespace)
mycorpus <- tm_map(mycorpus, removeNumbers)
mycorpus <- tm_map(mycorpus, removeWords, c("exchanged", "Password Vault"))
mycorpus <- tm_map(mycorpus, PlainTextDocument)
dataframe<-data.frame(text=unlist(sapply(mycorpus, `[`, "content")),stringsAsFactors=F)
write.csv(dataframe, file = "C:/Users/sachin.babladi/Desktop/MyData_2.csv")
class_pol = classify_polarity(dataframe, algorithm="bayes")
polarity = class_pol[,4]
}
|
37afaf27ff6eae8b67f82b3668aa13d4de7bd5b8
|
6d6ee3156d44f079df9712753a9f4de77f806a24
|
/functions/fn_execution_coordinator.R
|
228f41e0a932ebaee70815b0b42b254783cd2efa
|
[
"Apache-2.0"
] |
permissive
|
chowagiken-hubacz/website-classification
|
1dc2d55bb2c4f0f8f2392d56d9d843498bbf13fe
|
99805da874eadf53e5584a7f223d6d6fc8202279
|
refs/heads/master
| 2023-03-20T09:29:22.285088
| 2020-06-11T13:50:50
| 2020-06-11T13:50:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,292
|
r
|
fn_execution_coordinator.R
|
# Die Intelligenz des Programms - der Ausführer.
# Entscheidet anhand der Inputtabelle, welche Modelle mit welchen Daten aufgerufen werden.
if (exists("master") == F) {
master <- new.env()
}
# FUNKTION 1: Entscheidung über Modell
# !!!! Unbedingt mit TRY aufrufen !!!!
# IN: Eine Zeile des Steuerungs-Dataframes
master$execute_Experiment <- function(SteuerDfLine, globalSeed = 1337){
# 1. Entscheidung: Welches Modell wird aufgerufen?
writeLines(paste(Sys.time()," ++++++++++ Testing a ",SteuerDfLine$model," Model", sep=""))
if(SteuerDfLine$model == "NaiveBayes") {
modelMetrics <- master$fn_naivebayes(master$load_dataStack(SteuerDfLine$data),
globalSeed = globalSeed)
} else if (SteuerDfLine$model == "xgboost"){
modelMetrics <- master$fn_xgboost(master$load_dataStack(SteuerDfLine$data),
maxdepth = SteuerDfLine$maxdepth,
gamma = SteuerDfLine$gamma,
nround = SteuerDfLine$nround,
earlystop = SteuerDfLine$earlystop,
globalSeed = globalSeed)
} else if (SteuerDfLine$model == "randomForest"){
modelMetrics <- master$fn_rndForest(master$load_dataStack(SteuerDfLine$data),
ntree = SteuerDfLine$ntree,
mtry = SteuerDfLine$mtry,
globalSeed = globalSeed)
} else if (SteuerDfLine$model == "svm_1vr"){
modelMetrics <- master$fn_svm_1vR(master$load_dataStack(SteuerDfLine$data),
cost = SteuerDfLine$cost,
globalSeed = globalSeed)
} else if (SteuerDfLine$model == "svm_1v1"){
modelMetrics <- master$fn_svm_1v1(master$load_dataStack(SteuerDfLine$data),
cost = SteuerDfLine$cost,
globalSeed = globalSeed)
# } else if (SteuerDfLine$model == "mlp"){
} else if (SteuerDfLine$model %in% c("mlp", "mlp_threshold")){
modelMetrics <- master$fn_mlp_1(master$load_dataStack(SteuerDfLine$data),
ModelNr = SteuerDfLine$modelnr,
epochs = SteuerDfLine$epochs,
batchSize = SteuerDfLine$batchsize,
Threshold = SteuerDfLine$threshold,
globalSeed = globalSeed)
} else if (SteuerDfLine$model == "cnn"){
modelMetrics <- master$fn_cnn(master$load_rawDataStack(SteuerDfLine$data),
ModelNr = SteuerDfLine$modelnr,
epochs = SteuerDfLine$epochs,
Threshold = SteuerDfLine$threshold,
batchSize = SteuerDfLine$batchsize,
sequenceLength = SteuerDfLine$sequenceLength,
maxNumWords = SteuerDfLine$MaxNumWords,
globalSeed = globalSeed)
}else {
modelMetrics <- list("Model type unknown")
}
# 2. Return des Modells zurück an Zeile
return(modelMetrics)
}
master$load_dataStack <- function(dataName) {
writeLines(paste("++++++++++ Using Data from Dataset ",dataName, sep=""))
Identifier <- paste0("TVT_", dataName)
dtmData[[Identifier]]
}
master$load_rawDataStack <- function(dataName) {
writeLines(paste("++++++++++ Using Data from Dataset ",dataName, sep=""))
TrainValTestSamples[[dataName]]
}
master$execute_steuerDF <- function(steuerDF) {
Resultlist <- list()
for(i in 1:nrow(steuerDF)) {
my_row <- steuerDF[i,]
my_return <- master$execute_Experiment(my_row)
my_row$cfm_val <- my_return[1]
my_row$cfm_test <- my_return[2]
my_row$cfm_val_plot <- my_return[3]
my_row$cfm_test_plot <- my_return[4]
my_row$Traintime <- my_return[5]
my_row$Valtime <- my_return[6]
my_row$Testtime <- my_return[7]
if(length(my_return)>7) {
my_row$Val_DF <- my_return[8]
my_row$Test_DF <- my_return[9]
} else {
my_row$Val_DF <- NA
my_row$Test_DF <- NA
}
Resultlist[[i]] <- my_row
cat(sprintf('\nCompleted Model %i of %i in current executionlist\n\n', i, nrow(steuerDF)))
}
Result_DF <- do.call("rbind", Resultlist)
return(Result_DF)
}
|
7cf100ecc1520c7933cb0dcd3b3037744b2f1bc0
|
4a2d5b3331bfcf892aecc61c52d35fb0ef4584d2
|
/tests/testthat/test_server_getOMLDataSetQualities.R
|
ce25cce26f0e057f5f1ae22c389a14177fa37664
|
[
"BSD-3-Clause"
] |
permissive
|
openml/openml-r
|
ede296748ae1b9bcf22d661f4e25f495283402dd
|
530b00d9bde9895d5ba9224dbc812aeb3095e0f3
|
refs/heads/master
| 2022-11-09T02:51:47.329148
| 2022-10-19T19:50:09
| 2022-10-19T19:50:09
| 12,809,430
| 78
| 32
|
NOASSERTION
| 2019-11-19T16:00:48
| 2013-09-13T12:52:44
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 332
|
r
|
test_server_getOMLDataSetQualities.R
|
test_that("getOMLDataSetQualities", {
with_test_server({
qual = getOMLDataSetQualities(1)
expect_data_frame(qual, min.rows = 1L, ncol = 2L)
expect_set_equal(names(qual), c("name", "value"))
expect_character(qual$name, unique = TRUE, any.missing = FALSE)
expect_numeric(qual$value, any.missing = FALSE)
})
})
|
9a49e57201be413334ea8e3c5c55c13a6f6ad8c7
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.end.user.computing/man/workdocs_describe_document_versions.Rd
|
c30f80a59f3ab7cd638d3c2a11e31bc1d0c8c525
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,219
|
rd
|
workdocs_describe_document_versions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workdocs_operations.R
\name{workdocs_describe_document_versions}
\alias{workdocs_describe_document_versions}
\title{Retrieves the document versions for the specified document}
\usage{
workdocs_describe_document_versions(
AuthenticationToken = NULL,
DocumentId,
Marker = NULL,
Limit = NULL,
Include = NULL,
Fields = NULL
)
}
\arguments{
\item{AuthenticationToken}{Amazon WorkDocs authentication token. Not required when using Amazon Web
Services administrator credentials to access the API.}
\item{DocumentId}{[required] The ID of the document.}
\item{Marker}{The marker for the next set of results. (You received this marker from a
previous call.)}
\item{Limit}{The maximum number of versions to return with this call.}
\item{Include}{A comma-separated list of values. Specify "INITIALIZED" to include
incomplete versions.}
\item{Fields}{Specify "SOURCE" to include initialized versions and a URL for the
source document.}
}
\description{
Retrieves the document versions for the specified document.
See \url{https://www.paws-r-sdk.com/docs/workdocs_describe_document_versions/} for full documentation.
}
\keyword{internal}
|
262958f1e3d744e3a6617dd2650987bc91f654c4
|
c81f7ac57ac005ea5b7dc058715d62aaad5e6aa4
|
/plot4.R
|
e2942b5cc7b97ebd0b91103da3cbd02c38e7637f
|
[] |
no_license
|
KareemGamgoum/ExData_Plotting1
|
331faa7afef46df4022f81171c69c0bec6f51515
|
62e69534cb6e2eb7a8adb6406f05284f488ae3a7
|
refs/heads/master
| 2021-01-21T14:33:03.968391
| 2017-06-24T15:55:25
| 2017-06-24T15:55:25
| 95,298,572
| 0
| 0
| null | 2017-06-24T13:15:42
| 2017-06-24T13:15:42
| null |
UTF-8
|
R
| false
| false
| 1,508
|
r
|
plot4.R
|
# This script creates plot4
# Set Working Directory
setwd("C:/Users/kareem.gamgoum/Desktop/DataScience/Course 4 - Exploratory Data Analysis/Course Project 1")
# Load in the data
rawdata <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?")
# Create DateTime variable
rawdata$DateTime <-strptime(paste(rawdata$Date, rawdata$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
# Create Date variable
rawdata$Date <- as.Date(rawdata$Date, "%d/%m/%Y")
# Now the data is prepared, we shall rename appropriately
data <- rawdata
# Filter to only look at 2007-02-01 and 2007-02-02
data <- subset(data, Date == as.Date("2007-02-01") | Date == as.Date("2007-02-02"))
# Construct Plot 4
# Save to a PNG file with a width of 480 pixels and a height of 480 pixels.
png('plot4.png', width=480, height=480)
par(mfrow = c(2,2))
plot(data$DateTime, data$Global_active_power, type="l", xlab="", ylab="Global Active Power")
plot(data$DateTime, data$Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(data$DateTime, data$Sub_metering_1, type="l", ylab="Energy sub metering", xlab="")
lines(data$DateTime, data$Sub_metering_2, col='red')
lines(data$DateTime, data$Sub_metering_3, col='blue')
legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1), bty = "n",
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(data$DateTime, data$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
fab21558668fb6f1f32b597b42b6a669d1934105
|
6e04a59a255f1ea4e684c76f0f09123fa37a3fc5
|
/man/find_lag_time.Rd
|
91e174531adb3c2b60528a5f7b5a2d38f0a3eeac
|
[
"MIT"
] |
permissive
|
Ryan-Lima/gRandcanyonsand
|
12c19946588d41385cbb93e81a1be4702768ce19
|
1482d59120211ee4f34c6b142b31837acfb0dbea
|
refs/heads/main
| 2023-06-28T11:45:16.328664
| 2021-08-06T18:53:24
| 2021-08-06T18:53:24
| 326,823,645
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 603
|
rd
|
find_lag_time.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/02-near_gage_lag.R
\name{find_lag_time}
\alias{find_lag_time}
\title{Find lag time to the nearest gage}
\usage{
find_lag_time(rm, print = F)
}
\arguments{
\item{rm}{--numeric-- river mile, miles downstream from Lees Ferry}
\item{print}{print = FALSE by default, if print = TRUE, lag time and nearest gage printed out}
}
\value{
out = list('lagtime' = --lubridate duration object--, 'nearest_gage' = "LCR" , 'nearest_gage_index' = 3)
}
\description{
Find lag time to the nearest gage
}
\examples{
out <- find_lag_time(220)
}
|
f5ecef57514a32c84e18130fbd0c640370c0995d
|
efc016d1345168cae64db251731fbb1b309e3483
|
/plot1.R
|
c80c8204873fcec93fc565e78c441999d941a780
|
[] |
no_license
|
secastro/ExData_Plotting1
|
c8410048823a48df89044c1e4e6938b6d540bf48
|
87bc1fcd61318d74995cbfb9d9d251a5e86a86cd
|
refs/heads/master
| 2021-01-20T23:32:30.975263
| 2014-08-09T07:26:52
| 2014-08-09T07:26:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 265
|
r
|
plot1.R
|
# Load the data
source("load-data.R")
data_subset <- load_data()
# Creates figure 1
png(filename="figure1.png", width=480, height=480)
hist(data_subset$Global_active_power, col="red",
xlab="Global Active Power (kilowatts)",
main="Global Active Power")
dev.off()
|
342ee185bbfc3497e83f35998b61ffdb923bebc2
|
c6286a95d80085cd0ca3d1081c31cdb217eb916e
|
/R/genelist_specific_profile.R
|
0fad74f75ff943cbc65d277799ebc2fe792d52c1
|
[] |
no_license
|
sethiyap/wangfangscripts
|
9751c84d1a9039c7deaa42aab069c55ca1c66c58
|
c40821b6636303553072090f2770c265da3429a5
|
refs/heads/master
| 2020-04-22T13:59:27.379301
| 2019-10-30T08:51:34
| 2019-10-30T08:51:34
| 170,428,191
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,539
|
r
|
genelist_specific_profile.R
|
#--- with TBP and Pol2
gff_file <- "/Users/Pooja/Documents/Data-Analysis/Others/Reference_Annotation/An/A_nidulans_FGSC_A4_version_s10-m04-r07_features.gff"
list_1 <- read_delim(pipe("pbpaste"), delim="\t", col_names =FALSE) #-- plot sorted according to this list, provide expression value as well
list_2 <- read_delim(pipe("pbpaste"), delim="\t", col_names =FALSE)
setwd(".") #make the folder with bw file as working directory
bw_test <- "H3K4me3_CGGACGTGG_CL_veA_wt_spore_ChIPmix22_normalized.bw" # specify the name of bw files
bw_control <- "H3_AGAACACC_an_spore_CL1019Mix_normalized.bw"
#--- for fig2D
#' genelist_specific_profiles
#'
#' plots two profiles for each provided genelist with normalising by the control
#' @param gff_file Provide reference gff file for your species of interest
#' @param bw_test Provide the bw file name to be tested
#' @param bw_control Provide bw file name to be normalised with i.e. H3
#' @param list_1 list of the genes of interest with expression value in second column
#' @param list_2 only one column of random or control genes
#' @param output heatmap in the pdf format
#'
#' @return
#' @export
#'
#' @examples
genelist_specific_profiles <- function(gff_file, bw_test,bw_control,list_1,list_2, output){
#--- packages
library(EnrichedHeatmap)
library(rtracklayer)
library(circlize)
library(rbamtools)
library(rtracklayer)
library(GenomicFeatures)
library(tidyverse)
library(extrafont)
loadfonts(device = "pdf")
gff <- makeTxDbFromGFF(gff_file, metadata = T)
genes <- genes(gff)
#--- provide the genelist data with expression value for the list one
list_1 <- list_1 %>% arrange(desc(X2))
genes_1 <- subset(genes, genes$gene_id %in% list_1$X1)
genes_1 <- genes_1[match(list_1$X1,genes_1$gene_id),]
#--- get second list co-ordinates
genes_2 <- subset(genes, genes$gene_id %in% list_2$X1)
## prepare signal data
gene_lists <- list(genes_1, genes_2)
names(gene_lists) <- paste(gsub(pattern = "_[[:upper:]]{6,}_.*_normalized.bw",replacement = "", bw_test),"_list",seq(1:length(gene_lists)), sep="")
gene_lists <- tibble(name=names(gene_lists), data=gene_lists)
print(gene_lists)
bw_file_test <- import.bw(bw_test)
bw_file_control=import.bw(bw_control)
## generate normalised matrix in tidy way
dd <- gene_lists %>%
dplyr::mutate(mat=purrr::map(data, function(i){
nn <- EnrichedHeatmap::normalizeToMatrix(bw_file_test, i, value_column = "score",background = 0,
smooth = TRUE,extend = c(1000))
nn[nn<0]=0
return(nn)
})) %>%
dplyr::mutate(mat_h3=purrr::map(data, function(i){
nn <- EnrichedHeatmap::normalizeToMatrix(bw_file_control, i, value_column = "score",background = 0,
smooth = TRUE,extend = c(1000))
nn[nn<0]=0
return(nn)
}))
#--- normalise by h3 ----
dd2 <- dd %>% mutate(norm_mat = map2(mat,mat_h3, function(x,y){ mm = (x+0.01) / (y+0.01)
return(mm)
} ))
print(dd2)
get_enrichment_heatmap_list <- function(x, names, titles, ...) {
ll <- length(x)
## first heatmap
ehml <- EnrichedHeatmap(mat = x[[1]], name = names[[1]], column_title = titles[[1]], show_heatmap_legend = T,
col=colorRamp2(quantile(x[[1]], c(0.1,0.5,0.6,0.9,0.99)),
col=c("#feebe2","#fcc5c0","#fa9fb5","#c51b8a","#7a0177")),
use_raster = TRUE, ...)
## several other heatmaps if length of x > 1.
if (ll > 1) {
for (i in 2:ll) {
print(i)
ehml <- ehml +
EnrichedHeatmap(
mat = x[[i]],
col=colorRamp2(quantile(x[[1]], c(0.1,0.5,0.6,0.9,0.99)),
col=c("#feebe2","#fcc5c0","#fa9fb5","#c51b8a","#7a0177")),
name = ifelse(length(names) >= i, names[i], "NA"),
use_raster = TRUE,
column_title = ifelse(length(titles) >= i, titles[i], "NA"),
show_heatmap_legend = ifelse(length(names) >= i, TRUE, FALSE), ...
) ## legend will be shown only if the name is given for a heatmap.
}
}
return(ehml)
}
ehm_list <- get_enrichment_heatmap_list(x = dd2$norm_mat,names = dd2$name,
titles = dd2$name,
cluster_rows = FALSE,
row_order=NULL,
show_row_names = TRUE,
axis_name_rot = 90,
heatmap_legend_param = list(color_bar = "continuous",legend_direction="horizontal", legend_width = unit(3, "cm"),
title_position = "topcenter",labels_gp = gpar(fonsize=12, fontfamily="Arial")),
axis_name = c("-1kb","TSS","TES", "+1kb"),
axis_name_gp = gpar(fonsize=12, fontfamily="Arial"),
top_annotation = HeatmapAnnotation(lines = anno_enriched(axis_param =list( facing="inside",side="left",gp=gpar(fonsize=12, fontfamily="Arial")),
ylim = c(0.8,4),height = unit(2, "cm")
#ylim = c(0.7,1.8),height = unit(2, "cm")
# ylim = c(0.1,5.8),height = unit(2, "cm")
)
)
)
# row_order_list = row_order(ehm_list)
# list_1$H3K4me3 <- list_1[row_order_list,]$X1
print("plotting....")
pdf(file=paste(output, length(genes_1), "hm.pdf", sep="_"), width=6, height=60)
draw(ehm_list, heatmap_legend_side = "top", gap = unit(2, "mm"))
dev.off()
}
|
94ba51ac219c9c0afa1018308270b2a3f0786e25
|
a6c370c5411e5c9f78dc0009595305eb83e26b82
|
/GradientBoosting_Data_Modelling.R
|
4e43d61f9816ef75408f4b44b8dec98e33a084ba
|
[] |
no_license
|
Sanjanarajagopal/employee-attrition-analysis
|
09760bc42ddd0afeb6b3c728a50cab098539e312
|
5a88c789b3778db3dfaeb77692d5735fcae6a010
|
refs/heads/master
| 2020-04-05T05:19:13.342633
| 2018-12-15T16:37:55
| 2018-12-15T16:37:55
| 156,590,085
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,393
|
r
|
GradientBoosting_Data_Modelling.R
|
#Title : 707 Project - Gradient Boosting Algorithm
#@Author : Sanjana Rajagopala
#Start Date : November 19, 2018
#Load the required libraries
library(caret)
library(dplyr)
library(arules)
library(klaR)
library(tictoc)
library(mlbench)
library(pROC)
library(gbm)
library(ROSE)
library(rpart)
################################
#
#Introduction to Gradient Boosting classifier
# The Gradient boosting classifier is an ensemble of weak prediction models such as decision tree. It builds stage-wise models
# and generalizes each of them using optimization of loss function.
###############################
#Step 1: Data preprocessing for Gradient Boosting
##Retaining the input features to be categorical.Hence, all the columns in the
#dataset must be converted into categorical type.
GBM_data <- employee_data_copy
#Retain the columns that were discretized and remove the corresponding base columns
GBM_data$Age <- NULL
GBM_data$HourlyRate <- NULL
GBM_data$DistanceFromHome <- NULL
GBM_data$PercentSalaryHike <- NULL
GBM_data$YearsWithCurrManager <- NULL
dim(GBM_data)
#Investigate the numerical columns
all_indices <- sapply(GBM_data, is.numeric)
str(GBM_data[, all_indices])
#It can be observed that the columns - MonthlyIncome, MOnthlyRate contain significant and distinct numerical values. These cannot be
#be directly converted into factors because it would result in unnecessarily large number of levels. Hence, we discretize
#them so that manageable number of columns are obtained.
GBM_data$MonthlyIncome <- discretize(GBM_data$MonthlyIncome, method = 'interval')
GBM_data$MonthlyRate <- discretize(GBM_data$MonthlyRate, method = 'interval', categories = 4)
#Now, convert the remaining numerical columns into categorical
GBM_data[, all_indices] <- lapply(GBM_data[,all_indices], as.factor)
str(GBM_data)
#Step 2: Creating data model
#Create Data parition with 80% as training and 20% as testing
#Use Cross validation on the training set while
train_indices <- createDataPartition(GBM_data$Attrition
,p = 0.8
,list = F)
gbm_train <- GBM_data[train_indices,]
gbm_test <- GBM_data[-train_indices,]
#Define the cross validation
tr_control <- trainControl(method = 'cv', number = 5, classProbs = T)
#Model 1 - Basic GBM Model
tic('GBM_Model_1')
GBM_model_1 = suppressWarnings(train(Attrition~., data=gbm_train
, method="gbm"
, verbose = F
, metric="ROC"
, trControl=tr_control))
toc()
#Predict using the GBM Model 1
predict_GBM1 <- suppressWarnings(
predict(GBM_model_1
,newdata = gbm_test
)
)
#Evaluating the model
confusionMatrix(as.factor(predict_RF1), as.factor(gbm_test$Attrition))
roc(as.numeric(gbm_test$Attrition), as.numeric(predict_GBM1))
#This model also shows a fairly poor AUC-ROC. Hence, it implies we need to handle class imbalance
#Class Imbalance problem
#In binary classification problem, when one class outnumbers the other class by a large proportion the machine learning
#algorithms do not get enough information to make accurate prediction for the minority class. This is mainly
#because the ML algorithms assume that the data set has balanced class distributions and errors obtained from different
#classes have same cost.
#This depicts the imbalance
table(GBM_data$Attrition)
#Model 2 - Gradient Boosting Model with SMOTE
#SMOTE - It is the Synthetic Minority Oversampling Technique that generates artificial data based on feature space
# rather than data space similarities from minority samples. The bootstrapping and k-nearest neighbours techniques are used to achieve this.
tic('GBM_Model_2')
tr_control$sampling <-'smote'
GBM_model_2 = suppressWarnings(train(Attrition~., data=gbm_train
, method="gbm"
, metric="ROC"
, verbose = F
, trControl=tr_control))
toc()
#Predict using the smote-fit GBM Model 2
predict_gbm2 <- suppressWarnings(
predict(GBM_model_2
,newdata = gbm_test
)
)
#Evaluating the GBM model 2
confusionMatrix(as.factor(predict_gbm2), as.factor(gbm_test$Attrition))
auc(as.numeric(gbm_test$Attrition), as.numeric(predict_gbm2))
#Model 3 - GBM Model using oversampled data
#Using the oversampling method in ROSE package
over_gbm_train <- ovun.sample(Attrition ~.,data = gbm_train, method = 'over')$data
table(over_gbm_train$Attrition)
#Now, it looks balanced
over_tr_control <- trainControl(method = 'cv', number = 5, classProbs = T)
tic('GBM_Model_3')
GBM_model_3 = suppressWarnings(train(Attrition~., data=over_gbm_train
, method="gbm"
, metric="ROC"
, verbose = F
, trControl=over_tr_control))
toc()
#Predict using the oversampled GBM Model 3
predict_gbm3 <- suppressWarnings(
predict(GBM_model_3
,newdata = gbm_test
))
#Evaluating the GBM model 3
confusionMatrix(as.factor(predict_gbm3), as.factor(gbm_test$Attrition))
auc(as.numeric(gbm_test$Attrition), as.numeric(predict_gbm3))
#An increase in the AUC from 63% to 73% by oversampling
#Model 4 - GBM Model using under+over sampled data
#Using the under sampling method in ROSE package
under_gbm_train <- ovun.sample(Attrition ~.,data = gbm_train, method = 'under')$data
table(under_gbm_train$Attrition)
#Now, it looks balanced. However, there is significant loss of information as it can be observed that the number of obserrvations got reduced
#from ~980 to ~190.
#Hence, choosing the combination of over and under sampling together.
both_gbm_train <- ovun.sample(Attrition~., data = gbm_train, method = 'both', p =0.5, seed = 1)$data
tic('GBM_Model_4')
GBM_model_4 = suppressWarnings(train(Attrition~., data=both_gbm_train
, method="gbm"
, metric="ROC"
, verbose = F
, trControl=over_tr_control))
toc()
#Predict using the over+under sampled GBM Model 4
predict_gbm4 <- suppressWarnings(
predict(GBM_model_4
,newdata = gbm_test
))
#Evaluating the GBM model 4
confusionMatrix(as.factor(predict_gbm4), as.factor(gbm_test$Attrition))
auc(as.numeric(gbm_test$Attrition), as.numeric(predict_gbm4))
#Model 5 - GBM Model using the data sample by ROSE method
#Using the under sampling method in ROSE package
rose_gbm_train <- ROSE(Attrition ~.,data = gbm_train, seed = 1, N = 1470)$data
table(rose_gbm_train$Attrition)
tic('GBM_Model_5')
GBM_model_5 = suppressWarnings(train(Attrition~., data=rose_gbm_train
, method="gbm"
, metric="ROC"
, verbose = F
, trControl=over_tr_control))
toc()
#Predict using the rose sampled GBM Model 5
predict_gbm5 <- suppressWarnings(
predict(GBM_model_5
,newdata = gbm_test
))
#Evaluating the GBM model 5
confusionMatrix(as.factor(predict_gbm5), as.factor(gbm_test$Attrition))
auc(as.numeric(gbm_test$Attrition), as.numeric(predict_gbm5))
#Model 6 - GBM Model using the weighting tecnhique in the dataset
#Weighting technique helps in dealing with class imbalance by punishing the errors in the minority class
set.seed(3000)
model_weights <- ifelse(gbm_train$Attrition == "No",
(1/table(gbm_train$Attrition)[1]) * 0.5,
(1/table(gbm_train$Attrition)[2]) * 0.5)
tic('GBM_Model_6')
GBM_model_6 = suppressWarnings(train(Attrition~., data=gbm_train
, method="gbm"
, metric="ROC"
, verbose = F
, trControl=over_tr_control
, weights = model_weights))
toc()
#Predict using the rose sampled GBM Model 6
predict_gbm6 <- suppressWarnings(
predict(GBM_model_6
,newdata = gbm_test
))
#Evaluating the GBM model 6
confusionMatrix(as.factor(predict_gbm6), as.factor(gbm_test$Attrition))
gbm_auc <- auc(as.numeric(gbm_test$Attrition), as.numeric(predict_gbm6))
gbm_auc
#Plotting the accuracies of all the models
roc.curve(gbm_test$Attrition, predict_GBM1, col = 'black')
par(new=T)
roc.curve(gbm_test$Attrition, predict_gbm2, col = 'blue')
par(new=T)
roc.curve(gbm_test$Attrition, predict_gbm3, col = 'red')
par(new=T)
roc.curve(gbm_test$Attrition, predict_gbm4, col = 'green')
par(new=T)
roc.curve(gbm_test$Attrition, predict_gbm5, col = 'pink')
par(new=T)
roc.curve(gbm_test$Attrition, predict_gbm6, col = 'burlywood')
#It can be concluded that the GBM with oversampled dataset and the GBM with weighting
#gives the best AUC metric when compared to the other models. Both give almost the same value of ~74%.
|
6a44de178932cff06c7a5a56892bf9e6df5b744b
|
e9ed3aaa01ba50bd57d88b9d918960e2010a7351
|
/plot1.R
|
a920b170f7747548ab2a94c26f17e54852a234b0
|
[] |
no_license
|
cordlepe/ExData_Plotting1
|
902ccd5a42c80afa6c23ec3541d819ae0144106a
|
276064ba51729f5fa98f0173403914c3adc131a9
|
refs/heads/master
| 2020-04-24T11:55:18.540429
| 2019-02-22T14:15:39
| 2019-02-22T14:15:39
| 171,941,410
| 0
| 0
| null | 2019-02-21T20:36:36
| 2019-02-21T20:36:35
| null |
UTF-8
|
R
| false
| false
| 507
|
r
|
plot1.R
|
rm(list=ls())
file <- "./data/household_power_consumption.txt"
#read in specified file
df <- read.table(file, header = TRUE, sep = ";", na.strings = "?")
#convert date to date type
df$Date <- as.Date(df$Date, format = "%d/%m/%Y")
#keep only data for specific dates
df <- subset(df, Date == "2007-02-01" | Date == "2007-02-02")
png("plot1.png", width = 480, height = 480)
#plot1
hist(df$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off()
|
a9aa286507dd2ab924540a2b439cd63f48007e98
|
d01f116539c9ec88b1327f61093b715f41f88e87
|
/w2_lec05_preprocess.R
|
69fd092c46c4aa4a7fe96fdaf29fa46b458c14ec
|
[] |
no_license
|
jlnguyen/08-practical-ml
|
6e8a00305791a132f4f4227cf5128d11f76f7bef
|
40b0e87c1e67fe9c4fd1f309fa10d01be2ad6fc9
|
refs/heads/master
| 2021-01-10T09:49:05.704187
| 2015-11-24T01:38:44
| 2015-11-24T01:38:44
| 46,034,993
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,376
|
r
|
w2_lec05_preprocess.R
|
# Coursera JHPH Data Science
# 08 - Pratical Machine Learning
# Week 2 | Lecture 5 - Basic preprocessing
#
# Joe Nguyen | 13 Nov, 2015
# Change working directory
dirBase <- "/home/joe/Documents/01-coursera/01-data-science"
dirWorking <- "/08-practical-ml"
setwd(file.path(dirBase, dirWorking))
rm(list = ls())
library(caret); library(kernlab); data(spam)
inTrain <- createDataPartition(y=spam$type,
p=0.75, list=FALSE)
training <- spam[inTrain,]
testing <- spam[-inTrain,]
hist(training$capitalAve, main = "", xlab = "ave. capital run length")
mean(training$capitalAve)
sd(training$capitalAve)
# Standardising
trainCapAve <- training$capitalAve
trainCapAveS <- (trainCapAve - mean(trainCapAve)) / sd(trainCapAve)
mean(trainCapAveS)
sd(trainCapAveS)
# Standardising test set -> have to standardise using "trainCapAve" (instead of test data)
testCapAve <- testing$capitalAve
testCapAveS <- (testCapAve - mean(trainCapAve)) / sd(trainCapAve)
mean(testCapAveS)
sd(testCapAveS)
## Standardising using 'preProcess' (col 58 is label col: (nonspam, spam))
preObj <- preProcess(training[,-58], method = c("center", "scale"))
trainCapAveS <- predict(preObj, training[,-58])$capitalAve
mean(trainCapAveS)
sd(trainCapAveS)
testCapAveS <- predict(preObj, testing[,-58])$capitalAve
mean(testCapAveS)
sd(testCapAveS)
# preProcess argument
set.seed(32343)
modelFit <- train(type ~ ., data = training,
preProcess = c("center", "scale"),
method = "glm")
modelFit
hist(trainCapAveS)
## Standardising - Box-Cox transforms
preObj <- preProcess(training[,-58], method = c("BoxCox"))
trainCapAveSTf <- predict(preObj, training[,-58])$capitalAve
par(mfrow = c(1,2));
hist(trainCapAveSTf)
qqnorm(trainCapAveSTf)
## Imputing data
set.seed(13343)
# Make some values NA
training$capAve <- training$capitalAve
selectNA <- rbinom(dim(training)[1], size = 1, prob = 0.05) == 1
training$capAve[selectNA] <- NA
## Now handle missing data ##
# Impute and standardise
preObj <- preProcess(training[,-58], method = "knnImpute")
capAve <- predict(preObj, training[,-58])$capAve
# Standardise true values
capAveTruth <- training$capitalAve
capAveTruth <- (capAveTruth - mean(capAveTruth)) / sd(capAveTruth)
quantile(capAve - capAveTruth)
quantile((capAve - capAveTruth)[selectNA])
quantile((capAve - capAveTruth)[!selectNA])
|
2812a0247d54d20a7f88bf93bf491ecd3eff63a0
|
307b0f73161701e48e24192aea10713c4c76db13
|
/man/index.cell_label.Rd
|
88257fff3f54d53067960e8018a1e3c0adfdba42
|
[] |
no_license
|
spgarbet/tangram
|
aef70355a5aa28cc39015bb270a7a5fd9ab4333c
|
bd3fc4b47018ba47982f2cfbe25b0b93d1023d4f
|
refs/heads/master
| 2023-02-21T03:07:43.695509
| 2023-02-09T17:47:22
| 2023-02-09T17:47:22
| 65,498,245
| 58
| 3
| null | 2020-03-24T15:28:05
| 2016-08-11T20:07:01
|
R
|
UTF-8
|
R
| false
| true
| 617
|
rd
|
index.cell_label.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/render-index.R
\name{index.cell_label}
\alias{index.cell_label}
\title{Generate an index from a label object}
\usage{
\method{index}{cell_label}(object, id = "tangram", key.len = 4, ...)
}
\arguments{
\item{object}{cell; The cell for indexing}
\item{id}{character; an additional specifier for the object key}
\item{key.len}{numeric; length of key to generate}
\item{...}{additional arguments to renderer. Unused}
}
\value{
A list of strings containing key, source and value
}
\description{
Overrides to generate no indexing on labels
}
|
b88a1364c02172988d39c3db8715c27be6cbafc5
|
2c1805e79d915c88faa0f6c258fc41e95937dba5
|
/R/Unity/player_log_quest.R
|
aa8bc165cb3250c679874a75897266fc86f0da4e
|
[] |
no_license
|
hejtmy/VR_City_Analysis
|
b85c14ddc7aad5db8aeeb353ae02462986b20e59
|
b149d3f52d76fc8fb0104fa42ec7b38ae7470ba0
|
refs/heads/master
| 2021-01-18T16:16:53.962471
| 2017-05-21T22:01:26
| 2017-05-21T22:01:34
| 49,779,651
| 0
| 0
| null | 2017-02-18T17:35:16
| 2016-01-16T15:48:50
|
R
|
UTF-8
|
R
| false
| false
| 949
|
r
|
player_log_quest.R
|
#' Extracts playuer log information only for the duration of the quest
#'
#' @param quest_set Important because of the information about the set in which quest took place
#'
player_log_quest = function(quest_set, trial_sets = NULL, quest = NULL, quest_order_session = NULL, include_teleport = T){
if(is.null(trial_sets)) return(NULL)
if(!is.null(quest)) quest_line = filter(quest_set, order_session == quest$order_session)
if(!is.null(quest_order_session)) quest_line = filter(quest_set, order_session == quest_order_session)
if(nrow(quest_line) > 1){
print("player_log_quest:: Multiple quests have the same name")
return(NULL)
}
if(is.null(quest)) quest = get_quest(quest_set, trial_sets, quest_order_session)
quest_times = get_quest_timewindow(quest, include_teleport = include_teleport)
player_log = trial_sets[[quest_line$set_id]]$player_log[Time > quest_times$start & Time < quest_times$finish,]
return(player_log)
}
|
815756c90414e275b55f12aec863d44c87bca54b
|
c04075b8045b8412f8fe3aeb25e02cee2821cc05
|
/coursera/c2week1 PartialMatching.R
|
61e6693c59bbe8b78dc3f2e094b0c6182c2da018
|
[] |
no_license
|
tmPolla/R
|
f2e174b1cd75ce9cd3a191e51f403205834a8ad1
|
e6780f0baaf855075bd271fea87d781a7b996bb5
|
refs/heads/master
| 2021-01-12T05:24:06.333057
| 2018-07-21T05:15:00
| 2018-07-21T05:15:00
| 77,921,688
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 252
|
r
|
c2week1 PartialMatching.R
|
##Data Science - Johns Hopkins University
##coursera
## course 2 - R
##week1
x<- list(aardvark=1:5)
# print where name start with a
x$a
# print where the name exactly equal to a
x[["a"]]
#print where name start with a
x[["a",exact=FALSE]]
|
8baf6917b6e41f5c58d1ac61f9f8ca7eb4530949
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.autoscaling/man/record_lifecycle_action_heartbeat.Rd
|
960633fa421a921815670fcc3b140338be9358e0
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,605
|
rd
|
record_lifecycle_action_heartbeat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.autoscaling_operations.R
\name{record_lifecycle_action_heartbeat}
\alias{record_lifecycle_action_heartbeat}
\title{Records a heartbeat for the lifecycle action associated with the specified token or instance}
\usage{
record_lifecycle_action_heartbeat(LifecycleHookName, AutoScalingGroupName,
LifecycleActionToken = NULL, InstanceId = NULL)
}
\arguments{
\item{LifecycleHookName}{[required] The name of the lifecycle hook.}
\item{AutoScalingGroupName}{[required] The name of the Auto Scaling group.}
\item{LifecycleActionToken}{A token that uniquely identifies a specific lifecycle action associated with an instance. Amazon EC2 Auto Scaling sends this token to the notification target that you specified when you created the lifecycle hook.}
\item{InstanceId}{The ID of the instance.}
}
\description{
Records a heartbeat for the lifecycle action associated with the specified token or instance. This extends the timeout by the length of time defined using PutLifecycleHook.
}
\details{
This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:
\enumerate{
\item (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.
\item (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.
\item Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.
\item \strong{If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.}
\item If you finish before the timeout period ends, complete the lifecycle action.
}
For more information, see \href{http://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroupLifecycle.html}{Auto Scaling Lifecycle} in the \emph{Amazon EC2 Auto Scaling User Guide}.
}
\section{Accepted Parameters}{
\preformatted{record_lifecycle_action_heartbeat(
LifecycleHookName = "string",
AutoScalingGroupName = "string",
LifecycleActionToken = "string",
InstanceId = "string"
)
}
}
\examples{
# This example records a lifecycle action heartbeat to keep the instance
# in a pending state.
\donttest{record_lifecycle_action_heartbeat(
AutoScalingGroupName = "my-auto-scaling-group",
LifecycleActionToken = "bcd2f1b8-9a78-44d3-8a7a-4dd07d7cf635",
LifecycleHookName = "my-lifecycle-hook"
)}
}
|
89cc18ebc117b798db319d58e7dd719d73c1d02d
|
bbd803cd4fe2623ae8f41f46586684691a2e7f92
|
/tests/testthat/test_clusterSingle.R
|
5fee6e838950b4ea2dae7aac46ae4e965759b454
|
[] |
no_license
|
12379Monty/clusterExperiment
|
96d3359aefe60a65bfdfd3eb4f05a647347c020d
|
a26d494a9a23d467269d85c69348c4904a08bb56
|
refs/heads/master
| 2021-01-21T15:21:53.986787
| 2017-06-14T23:30:20
| 2017-06-14T23:30:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,536
|
r
|
test_clusterSingle.R
|
context("clusterSingle")
source("create_objects.R")
test_that("`clusterSingle` works with matrix, ClusterExperiment objects, and
SummarizedExperiments", {
clustNothing <- clusterSingle(mat, clusterFunction="pam",
subsample=FALSE, sequential=FALSE,
clusterDArgs=list(k=3),isCount=FALSE)
expect_equal(clusterLabels(clustNothing),"clusterSingle")
expect_is(clustNothing, "ClusterExperiment")
expect_is(clustNothing, "SummarizedExperiment")
#test clusterLabel
clustNothing2 <- clusterSingle(mat, clusterFunction="pam",
subsample=FALSE, sequential=FALSE,
clusterDArgs=list(k=3),isCount=FALSE,clusterLabel="myownClustering")
expect_equal(clusterLabels(clustNothing2),"myownClustering")
#test default 01 distance
x1 <- clusterSingle(mat, clusterFunction="tight",
subsample=FALSE, sequential=FALSE,
isCount=FALSE)
expect_error(clusterSingle(mat, clusterFunction="tight",
subsample=FALSE, sequential=FALSE,
clusterDArgs=list(distFunction=function(x){dist(x,method="manhattan")}),isCount=FALSE),"distance function must give values between 0 and 1")
#test default 01 distance
x2<-clusterSingle(mat, clusterFunction="tight",
subsample=FALSE, sequential=FALSE,
isCount=FALSE)
#warn wrong arguments
expect_warning(clusterSingle(mat, clusterFunction="tight",
subsample=FALSE, sequential=FALSE,
clusterDArgs=list(k=3),isCount=FALSE),"do not match the choice of typeAlg")
#turn off warning
expect_silent(clusterSingle(mat, clusterFunction="tight",
subsample=FALSE, sequential=FALSE,
clusterDArgs=list(k=3,checkArgs=FALSE),isCount=FALSE))
clustNothing2 <- clusterSingle(se, clusterFunction="pam",
subsample=FALSE, sequential=FALSE,
clusterDArgs=list(k=3),isCount=FALSE)
expect_equal(clusterMatrix(clustNothing2), clusterMatrix(clustNothing))
#test running on clusterExperiment Object -- should add the new clustering
clustNothing3 <- clusterSingle(clustNothing2, clusterFunction="pam",
subsample=FALSE, sequential=FALSE,
clusterDArgs=list(k=4),is=FALSE)
expect_equal(NCOL(clusterMatrix(clustNothing3)),2)
expect_equal(length(table(primaryCluster(clustNothing3))),4,info="Check reset primary cluster after run clusterSingle")
})
test_that("Different options algorithms of `clusterD` ", {
#check algorithms
clusterSingle(mat, clusterFunction="tight",
subsample=FALSE, sequential=FALSE,
isCount=FALSE)
clusterSingle(mat, clusterFunction="hierarchical01",
subsample=FALSE, sequential=FALSE,
isCount=FALSE)
clusterSingle(mat, clusterFunction="hierarchicalK", clusterDArgs=list("k"=3),
subsample=FALSE, sequential=FALSE,
isCount=FALSE)
#K algorithm options
clusterSingle(mat, clusterFunction="hierarchicalK",
subsample=FALSE, sequential=FALSE, clusterDArgs=list(findBestK=TRUE,removeSil=TRUE),
isCount=FALSE)
clusterSingle(mat, clusterFunction="pam", clusterDArgs=list(findBestK=TRUE,removeSil=TRUE),
subsample=FALSE, sequential=FALSE,
isCount=FALSE)
########
#Check clusterD
########
###Check pam exactly same:
x<-clusterD(mat, clusterFunction="pam",k=3,
minSize=1, removeSil=FALSE)
expect_equal(length(x),ncol(mat))
x2<-cluster::pam(t(mat),k=3,cluster.only=TRUE)
expect_equal(x,x2)
###Check hierarchicalK exactly same:
x<-clusterD(mat, clusterFunction="hierarchicalK",k=3,
minSize=1, removeSil=FALSE)
expect_equal(length(x),ncol(mat))
x2<-stats::cutree(stats::hclust(dist(t(mat))),k=3)
expect_equal(x,x2)
#check giving wrong parameters gives warning:
expect_warning(clusterD(mat, clusterFunction="tight", alpha=0.1,
minSize=5, removeSil=TRUE),"do not match the choice of typeAlg")
expect_warning(clusterD(mat, clusterFunction="pam", alpha=0.1,
minSize=5, removeSil=TRUE, findBestK=TRUE),"do not match the choice of typeAlg")
expect_warning(clusterD(mat, clusterFunction="tight", alpha=0.1,
clusterArgs=list(evalClusterMethod="average")),"arguments passed via clusterArgs")
expect_warning(clusterD(mat, clusterFunction="hierarchical01", alpha=0.1,
clusterArgs=list(minSize.core=4)),"arguments passed via clusterArgs")
#check turn off if checkArgs=TRUE
expect_silent(clusterD(mat, clusterFunction="tight", alpha=0.1,checkArgs=FALSE,
minSize=5, removeSil=TRUE))
expect_silent(clusterD(mat, clusterFunction="pam", alpha=0.1,checkArgs=FALSE,
minSize=5, removeSil=TRUE, findBestK=TRUE))
expect_silent(clusterD(mat, clusterFunction="tight", alpha=0.1,checkArgs=FALSE,
clusterArgs=list(evalClusterMethod="average")))
expect_silent(clusterD(mat, clusterFunction="hierarchical01", alpha=0.1,checkArgs=FALSE,
clusterArgs=list(minSize.core=4)))
})
test_that("Different options of subsampling",{
#check subsample
clustSubsample <- clusterSingle(mat, clusterFunction="pam",
subsample=TRUE, sequential=FALSE,
subsampleArgs=list(resamp.num=3, k=3),
clusterDArgs=list(k=3),isCount=FALSE)
expect_equal(NCOL(coClustering(clustSubsample)),NCOL(mat))
clusterSingle(mat, clusterFunction="pam",
subsample=TRUE, sequential=FALSE,
subsampleArgs=list(resamp.num=3, k=3,clusterFunction="kmeans"),
clusterDArgs=list(k=3),isCount=FALSE)
set.seed(1045)
clusterSingle(mat, clusterFunction="pam",
subsample=TRUE, sequential=FALSE,
subsampleArgs=list(resamp.num=20, k=3,classifyMethod="InSample"),
clusterDArgs=list(k=3),isCount=FALSE)
set.seed(1045)
clusterSingle(mat, clusterFunction="pam",
subsample=TRUE, sequential=FALSE,
subsampleArgs=list(resamp.num=40, k=3,classifyMethod="OutOfSample"),
clusterDArgs=list(k=3),isCount=FALSE)
set.seed(1045)
expect_error(clusterSingle(mat, clusterFunction="pam",
subsample=TRUE, sequential=FALSE,
subsampleArgs=list(resamp.num=20, k=3,classifyMethod="OutOfSample"),
clusterDArgs=list(k=3),isCount=FALSE),"NA values found in D")
#errors in missing args in subsample
expect_warning(clusterSingle(mat, clusterFunction="pam",
subsample=TRUE, sequential=FALSE,
subsampleArgs=list(resamp.num=3),
clusterDArgs=list(k=3), isCount=FALSE),
"did not give 'k' in 'subsampleArgs'.")
expect_error(clusterSingle(mat, clusterFunction="pam",
subsample=TRUE, sequential=FALSE,
subsampleArgs=list(resamp.num=3), isCount=FALSE),
"must pass 'k' in subsampleArgs")
})
test_that("Different options of clusterD",{
#check errors and warnings
expect_error(clusterSingle(mat, clusterFunction="pam",
subsample=FALSE, sequential=TRUE,
seqArgs=list(verbose=FALSE),
isCount=FALSE,clusterDArgs=list("typeAlg"=="K")),
"seqArgs must contain element 'k0'")
expect_error(clusterSingle(mat, clusterFunction="pam",
subsample=FALSE, sequential=TRUE,
seqArgs=list(verbose=FALSE),
isCount=FALSE, clusterDArgs=list("findBestK"==TRUE)),
"seqArgs must contain element 'k0'")
expect_warning(clusterSingle(mat, clusterFunction="tight",
subsample=FALSE, sequential=FALSE,
clusterDArgs=list(k=3), isCount=FALSE),
"do not match the choice of typeAlg")
expect_warning(clusterSingle(mat, clusterFunction="tight",
subsample=FALSE, sequential=FALSE,
clusterDArgs=list(findBestK=TRUE),isCount=FALSE),
"do not match the choice of typeAlg")
expect_error(clusterSingle(mat, clusterFunction="tight",
subsample=FALSE, sequential=FALSE,
clusterDArgs=list(distFunction=function(x){abs(cor(t(x)))}),isCount=FALSE),
"distance function must have zero values on the diagonal")
})
test_that("Different options of seqCluster",{
#check sequential
clustSeq <- clusterSingle(mat, clusterFunction="pam",
subsample=FALSE, sequential=TRUE,
isCount=FALSE,seqArgs=list(k0=5,verbose=FALSE))
expect_error(clusterSingle(mat, clusterFunction="pam",
subsample=FALSE, sequential=TRUE,
isCount=FALSE), "must give seqArgs so as to identify k0")
clustSeq <- clusterSingle(mat, clusterFunction="tight",
subsample=FALSE, sequential=TRUE,
isCount=FALSE,seqArgs=list(k0=5,verbose=FALSE))
clustSeq <- clusterSingle(mat, clusterFunction="hierarchicalK",
subsample=FALSE, sequential=TRUE,
isCount=FALSE,seqArgs=list(k0=5,verbose=FALSE))
clustSeq <- clusterSingle(mat, clusterFunction="hierarchical01",
subsample=FALSE, sequential=TRUE,
isCount=FALSE,seqArgs=list(k0=5,verbose=FALSE))
})
test_that("Different options of `clusterSingle` ", {
#check isCount
clustCount <- clusterSingle(smSimCount, clusterFunction="pam",
subsample=FALSE, sequential=FALSE,
clusterDArgs=list(k=3),isCount=TRUE)
expect_error(clusterSingle(smSimData, clusterFunction="pam",
subsample=FALSE, sequential=FALSE,
clusterDArgs=list(k=3),isCount=TRUE),info="test error handling for isCount=TRUE when can't take log")
#check pca reduction
clustndims <- clusterSingle(mat, clusterFunction="pam",
subsample=FALSE, sequential=FALSE, dimReduce="PCA",
ndims=3, clusterDArgs=list(k=3),isCount=FALSE)
expect_error(clusterSingle(mat, clusterFunction="pam",
subsample=FALSE, sequential=FALSE, dimReduce="PCA",
ndims=NROW(simData)+1,
clusterDArgs=list(k=3),isCount=FALSE))
#check var reduction
clustndims <- clusterSingle(mat, clusterFunction="pam",
subsample=FALSE, sequential=FALSE,
dimReduce="var", ndims=3,
clusterDArgs=list(k=3), isCount=FALSE)
expect_error(clusterSingle(mat, clusterFunction="pam",
subsample=FALSE, sequential=FALSE,
dimReduce="var", ndims=NROW(mat)+1,
clusterDArgs=list(k=3),isCount=FALSE),
"the number of most variable features must be strictly less than the number of rows of input data matrix")
expect_warning(clusterSingle(mat, clusterFunction="pam",
subsample=FALSE, sequential=FALSE,
dimReduce="none",ndims =3,
clusterDArgs=list(k=3),isCount=FALSE),
"specifying ndims has no effect if dimReduce==`none`")
clustndims <- clusterSingle(mat, clusterFunction="pam",
subsample=FALSE, sequential=FALSE, dimReduce="cv",
ndims=3, clusterDArgs=list(k=3),isCount=FALSE)
clustndims <- clusterSingle(mat, clusterFunction="pam",
subsample=FALSE, sequential=FALSE, dimReduce="mad",
ndims=3, clusterDArgs=list(k=3),isCount=FALSE)
})
test_that("`clusterSingle` preserves the colData and rowData of SE", {
cl <- clusterSingle(se, clusterFunction="pam",
subsample=FALSE, sequential=FALSE,
clusterDArgs=list(k=3),isCount=FALSE)
expect_equal(colData(cl),colData(se))
expect_equal(rownames(cl),rownames(se))
expect_equal(colnames(cl),colnames(se))
expect_equal(metadata(cl),metadata(se))
expect_equal(rowData(cl),rowData(se))
})
|
d62a67b41e53afe0be8a7963db452bf8d338e4a5
|
420827a0e5b5283493e4e624063d83699a1e692b
|
/scripts/diagrams.R
|
d74491c30d778444b1ab33b7666dcd9babbc1d23
|
[] |
no_license
|
szymonm/CGMethodsForInfluence
|
7ce6d8a316b25a377e3e58c6d3d61dad895b1225
|
ac43a08ea1db3189c6f4f397fddffaab8760f7c6
|
refs/heads/master
| 2021-01-21T11:23:59.411979
| 2014-11-23T11:29:35
| 2014-11-23T11:45:51
| 16,474,066
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 248
|
r
|
diagrams.R
|
# Args: filename, name
args <- commandArgs(trailingOnly=T)
filename <- args[1]
print(paste("Reading from:", filename))
data <- read.table(args[1])
source("plotDiagram.R")
pdf(paste(filename,".pdf", sep=""))
plotDiagram(args[2], data)
dev.off()
|
33784d83fa64c936d61a3fa861e4a6cf55422062
|
f12b660880582889b0b09df015dd54ce09805a92
|
/label_summary.R
|
dda12eee2fd153002f2d039a1c6c33db443438be
|
[] |
no_license
|
KenHBS/LDA_adventures
|
833bbce62d6cc5a7d6c62b1cc41ae7419ab301bd
|
aaade1632362c47d4c17eb9786f27d2d90ff4817
|
refs/heads/master
| 2020-12-10T03:13:56.471203
| 2017-06-26T09:14:19
| 2017-06-26T09:14:19
| 95,428,836
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,738
|
r
|
label_summary.R
|
library(RMySQL)
library(stringr)
## Get the data in R:
con <- dbConnect(MySQL(), host = "localhost", port = 3306, user = "root",
password = "qwertz", dbname = "basecamp")
quer <- dbSendQuery(con, "SELECT * FROM abstract_set")
df <- fetch(quer, n = -1)
dbClearResult(quer)
### Three sources of wrongness:
# 1) Empty abstracts
valid_df <- df[df$abstract != "", ]
# 2) Scraped labels, that aren't JEL codes, but 3 digits (e.g 635 026)
digit_inds <- grepl("[0-9]{3}", valid_df$labels, perl = TRUE)
valid_df <- valid_df[!digit_inds, ]
# 3) No labels (labels may be recovered from full PDF, 603 articles)
valid_df <- valid_df[which(valid_df$labels != ""), ]
### SUMMARIZE THE JEL CODES IN THE 5434 USEFUL ABSTRACTS:
label_summary <- table(valid_df$labels)
length(label_summary) # 3997 unique label combinations
max(label_summary) # 31 most common label combination occurs 31 times
split_labels <- str_split(valid_df$labels, " ")
mean(sapply(split_labels, length)) # 3.68 labels per document on average
bag_of_labels <- unlist(split_labels)
length(unique(bag_of_labels)) # 661 unique labels
sum_per_label <- table(bag_of_labels)
mean(sum_per_label) # Every label occurs on average 25.91 times in corpus
sum_per_label[(sum_per_label == max(sum_per_label))] # E32 occurs 340 times
# in the corpus: Business Fluctuations and Cycles
# Most common General Category:
general_cats <- table(gsub("[^A-Z]", "", bag_of_labels))
# Y: Miscellaneous Categories least: 3 times
# D: Microeconomics most: 2950 times
sum(grepl("D", valid_df$labels)) # D present in 1896 documents
###
|
f41e666cdf7bd7d69bc581ac64c4d120a39e21a2
|
c132e78b8002ceb0ce7f06c2e2cb21e4b458e049
|
/ESPmap_ggplot_Arabidopsis.R
|
0b4317ee380d3f2db609ce33b261e7df71d9f866
|
[] |
no_license
|
abj1x/ESPmap_ggplot_Arabidopsis
|
ae0e64e85aa9713a9055e3356ab86640f1ccfa49
|
45fb5e94214d53673e5c3956b3f1ad3a7c020182
|
refs/heads/master
| 2021-05-26T02:56:21.054444
| 2020-04-08T08:03:42
| 2020-04-08T08:03:42
| 254,024,141
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,151
|
r
|
ESPmap_ggplot_Arabidopsis.R
|
## after https://www.rpubs.com/spoonerf/countrymapggplot2
library(maptools)
library(raster)
library(ggplot2)
library(rgdal)
library(plyr)
## download Spain shapefile data
spain<-getData("GADM",country="ES",level=0)
## make base map
esp<-ggplot()+geom_polygon(data=spain,aes(x=long,y=lat,group=group))+coord_fixed(1.3)
## input of dataset with Arabidopsis 1001 genome accession details
ecotypes_all<-read.csv('LHY_SNP_WRLD_dataset.csv', header=TRUE)
## obtaining the ESP specific accessions from dataset
ESPdata<-ecotypes_all[which(ecotypes_all$country.x=="ESP"),]
## cast ecotype locations as lat long coordinates on base map
esp+theme_dark() +
geom_point(data=ESPdata,aes(x=long,y=lat,color=factor(genotype_comp)),size=1.0) +
labs(x="longitude", y="latitude",col="haplotype") +
scale_color_manual(values=c("dodgerblue1","chartreuse2","firebrick1","lightgoldenrod2")) +
theme(legend.background = element_rect(fill="gray 90"),legend.title=element_blank()) +
theme(legend.justification = c(0,1),legend.position = c(0,1)) +
theme(legend.background = element_rect(colour="black",size=0.25))
ggsave('Spain_map_LHY_haplotype.png')
|
60139cbea704e26b1c79b47affa1ac13eb318d10
|
43419401c9bf60ba8650c5f79dfdd2e224c9943e
|
/hacker_assignment.R
|
79910cdf3eb681c32f88cfd790a995ec210aed98
|
[] |
no_license
|
p4r1t05h/Patient-Adherence
|
793daafe93e6223e2d6464bc8d8b86bb327678da
|
41099f156899a372b09985ca1602080b0822a517
|
refs/heads/master
| 2020-03-28T16:56:25.795268
| 2018-09-14T06:05:27
| 2018-09-14T06:05:27
| 148,743,010
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,139
|
r
|
hacker_assignment.R
|
rm(list=ls())
#setting working directory
setwd("D:/Data Science/Abzooba")
getwd()
#Loading important libraries
library(rpart)
library(C50)
library(randomForest)
library(class)
library(e1071)
library(caret)
library(boot)
#Loading data sets
train<-read.csv("Training Data.csv", header = T)
test<-read.csv("Test Data.csv", header = T)
str(train)
head(train)
#checking for missing data
table(is.na(train))
#As we can see there are no missing data
#Now we will convert the variables into proper data type
train$Diabetes<-as.factor(train$Diabetes)
test$Diabetes<-as.factor(test$Diabetes)
train$Alcoholism<-as.factor(train$Alcoholism)
test$Alcoholism<-as.factor(test$Alcoholism)
train$HyperTension<-as.factor(train$HyperTension)
test$HyperTension<-as.factor(test$HyperTension)
train$Smokes<-as.factor(train$Smokes)
test$Smokes<-as.factor(test$Smokes)
train$Tuberculosis<-as.factor(train$Tuberculosis)
train$Sms_Reminder<-as.factor(train$Sms_Reminder)
str(train)
str(test)
#====Univariate Analysis
#Quantitative Variables
#Numerical analysis of Age | 5 point summary
age.5p<-summary(train$Age)
age.5p
#Visual analysis for Age | Histogram and Boxplot
hist(train$Age, main = "Histogram of Age")
boxplot(train$Age, names = "Boxplot of Age")
#Numerical analysis of Prescription Period | 5 point summary
pres.5p<-summary(train$Prescription_period)
pres.5p
#Visual analysis for Prescription Period | Histogram and Boxplot
hist(train$Prescription_period, main = "Histogram of Prescription Period")
boxplot(train$Prescription_period, names = "Boxplot of Prescription PEriod")
#===Bivariate Analysis
#Numerical Analysis for Gender | Proportion Table
prop.table(table(train$Gender))*100
gen.prop<-as.matrix(table(train$Gender))
#Visual Analysis for Gender | Bar Graph & Pie chart
barplot(gen.prop, col = c(3,4), horiz = FALSE)
pie(gen.prop, labels = c("1=F", "2=M"))
#Numerical Analysis for Diabetes | Proportion Table
prop.table(table(train$Diabetes))*100
diabetes.prop<-as.matrix(table(train$Diabetes))
#Visual Analysis for Gender | Bar Graph & Pie chart
barplot(diabetes.prop, col = c(3,4), horiz = FALSE)
pie(diabetes.prop, labels = c("1=No", "2=Yes"))
#Numerical Analysis for Alchoholism | Proportion Table
prop.table(table(train$Alcoholism))*100
alchohol.prop<-as.matrix(table(train$Alcoholism))
#Visual Analysis for Gender | Bar Graph & Pie chart
barplot(alchohol.prop, col = c(3,4), horiz = FALSE)
pie(alchohol.prop, labels = c("1=No", "2=Yes"))
#Numerical Analysis for Hypertension | Proportion Table
prop.table(table(train$HyperTension))*100
hyper.prop<-as.matrix(table(train$HyperTension))
#Visual Analysis for Gender | Bar Graph & Pie chart
barplot(hyper.prop, col = c(3,4), horiz = FALSE)
pie(hyper.prop, labels = c("1=No", "2=Yes"))
#Numerical Analysis for Smokes | Proportion Table
prop.table(table(train$Smokes))*100
smoke.prop<-as.matrix(table(train$Smokes))
#Visual Analysis for Gender | Bar Graph & Pie chart
barplot(smoke.prop, col = c(3,4), horiz = FALSE)
pie(smoke.prop, labels = c("1=No", "2=Yes"))
#Numerical Analysis for Tuberculosis | Proportion Table
prop.table(table(train$Tuberculosis))*100
tuber.prop<-as.matrix(table(train$Tuberculosis))
#Visual Analysis for Gender | Bar Graph & Pie chart
barplot(tuber.prop, col = c(3,4), horiz = FALSE)
pie(tuber.prop, labels = c("1=No", "2=Yes"))
#Numerical Analysis for SMS Reminder | Proportion Table
prop.table(table(train$Sms_Reminder))*100
sms.prop<-as.matrix(table(train$Sms_Reminder))
#Visual Analysis for Gender | Bar Graph & Pie chart
barplot(sms.prop, col = c(3,4), horiz = FALSE)
pie(sms.prop, labels = c("1=0 Reminder", "2=1 Reminder", "3=2 Reminders"))
#====Bivariate Analysis and Dependency Check
#Y=Adherence (Y is categorical Variable)
#Numerical Analysis Gender->Adherence | 2 Way Table
gen.table<-table(train$Gender,train$Adherence)
gen.table
#Chi Squared test for Dependency
gen.chi<-chisq.test(gen.table)
gen.chi
#since p<0.05 | It is DEPENDET
#Numerical Analysis Diabetes->Adherence | 2 Way Table
dia.table<-table(train$Diabetes,train$Adherence)
dia.table
#Chi Squared test for Dependency
dia.chi<-chisq.test(dia.table)
dia.chi
#since p<0.05 | It is DEPENDET
#Numerical Analysis Alcoholism->Adherence | 2 Way Table
al.table<-table(train$Alcoholism,train$Adherence)
al.table
#Chi Squared test for Dependency
al.chi<-chisq.test(al.table)
al.chi
#since p<0.05 | It is DEPENDET
#Numerical Analysis Hypertension->Adherence | 2 Way Table
hyper.table<-table(train$HyperTension,train$Adherence)
hyper.table
#Chi Squared test for Dependency
hyper.chi<-chisq.test(hyper.table)
hyper.chi
#since p<0.05 | It is DEPENDET
#Numerical Analysis Gender->Adherence | 2 Way Table
smokes.table<-table(train$Smokes,train$Adherence)
smokes.table
#Chi Squared test for Dependency
smokes.chi<-chisq.test(smokes.table)
smokes.chi
#since p<0.05 | It is DEPENDET
#Numerical Analysis Tuberculosis->Adherence | 2 Way Table
tuber.table<-table(train$Tuberculosis,train$Adherence)
tuber.table
#Chi Squared test for Dependency
tuber.chi<-chisq.test(tuber.table)
tuber.chi
#since p>0.05 | It is InDEPENDET
#Numerical Analysis SMS Reminder->Adherence | 2 Way Table
sms.table<-table(train$Sms_Reminder,train$Adherence)
sms.table
#Chi Squared test for Dependency
sms.chi<-chisq.test(sms.table)
sms.chi
#since p<0.05 | It is InDEPENDET
#Removing the independent variables from data set
train<-train[,c(-9,-10)]
test<-test[,c(-9,-10)]
#=========================Model Building===================================
#we are splitting the training data to Validation set data because
#we cannot build Confusion Matrix without the Target Variable which is absent from the TEST DATA
#Splitting the traing set into training and Validation set
set.seed(1)
val.index = createDataPartition(train$Adherence, p = .80, list = FALSE)
train = train[ val.index,]
validation = train[-val.index,]
#Since we have to calculate Probablity score for each patient
#We'll only build Logistic Regression Model
#============================================
#======Logistic Regression Model
#============================================
model1<-glm(train$Adherence~., data = train, family = "binomial")
coef(model1)
summary(model1)$coef
summary(model1)
#Predicting using Logistic regression model
predict.model1<-predict.glm(model1, newdata = validation[,-9], type = "response")
#Converting Probablities into 0 & 1
prob.model1<-ifelse(predict.model1>0.5,1,0)
#Creating Confusion Matrix For Logistic Regression
confusion.model1<-table(Predicted=prob.model1,Actual=validation$Adherence)
confusion.model1
#Determining the Accuracy and Error Rate
sum(confusion.model1[c(1,4)]/sum(confusion.model1[1:4])) #Correct Prediction
1-sum(confusion.model1[c(1,4)]/sum(confusion.model1[1:4])) #Prediction Error
#Precision for Yes= 7173/(7173+1894)=79.11%
#Recall for Yes=7173/(7173+1496)=82.74%
#Precision for No = 18180/(18180+1496)=92.4%
#Recall for No = 18180/(18180+1894)=90.56%
#Building LOGISTIC REGRESSION MODEL USING TEST DATA
LRmodel<-glm(train$Adherence~., data = train, family = "binomial")
summary(LRmodel)
#Predicting using Logistic regression model
predict.LRmodel<-predict.glm(LRmodel, newdata = test, type = "response")
#Since there is no specification given as to how much probablity score will result in Adherence
#then we'll assume that if the probablity is more than 0.5 than the Prediction is YES if not then the Prediction is No
predict.LR<-ifelse(predict.LRmodel>0.5,"Yes","No")
final.results<-cbind(predict.LR,predict.LRmodel)
head(final.results)
colnames(final.results)<-c("Probablity Score", "Adherence")
write.csv(final.results,"./Final.csv")
### I have made other Machine Learning Models just in case
#==============================================
#====Decision tree Model
#==============================================
#model2 = C5.0(train$Adherence ~., data=train, trials = 100, rules = TRUE)
#Summary of DT model
#summary(model2)
#Predicting for test cases
#predict.model2 = predict(model2, newdata=validation[,-9], type = "class")
#Creating Confusion Matrix for Decision Tree
#confusion.model2<-table(validation$Adherence,predict.model2)
#confusionMatrix(confusion.model2)
#Determining the Accuracy and Error Rate
#sum(confusion.model2[c(1,4)]/sum(confusion.model2[1:4])) #Correct Prediction
#1-sum(confusion.model2[c(1,4)]/sum(confusion.model2[1:4])) #Prediction Error
#Precision for Yes= 7562/(8205+1946)=74.49%
#Recall for Yes= 7562/(7562+1107)= 87.23%
#Precision for No = 18128/(18128+1107)=94.24%
#Recall for No = 18128/(18128+1946)=90.30%
#=====Random Forest Model
#model3<-randomForest(train$Adherence~., data = train, importance=T, ntree=500)
#summary(model3)
#predict.model3<-predict(model3, validation[,-9])
#confusion.model3<-table(Predicted=prob.model3,Actual=validation$Adherence)
#confusionMatrix(confusion.model3)
#Determining the Accuracy and Error Rate
#sum(confusion.model3[c(1,4)]/sum(confusion.model3[1:4])) #Correct Prediction
#1-sum(confusion.model3[c(1,4)]/sum(confusion.model3[1:4])) #Prediction Error
#Precision for Yes = 7772/(7772+2093)=78.78%
#Recall for Yes= 7772/(7772+897)=89.65%
#Precision for No = 17981/(17981+897)=95.24%
#Recall for No = 17981/(17981+2093)=89.57%
#===========================
#=====KNN Model
#===========================
#model4<-knn(train[,1:8], validation[,1:8], train$Adherence, k=7)
#==========================
#====Naive Bayes Model
#==========================
#model5<-naiveBayes(train$Adherence~., data = train)
#Predicting Model
#predict.model5<-predict(model5, newdata = validation[,-9], type = "class")
#Confusion Matrix for Naive Bayes
#confusion.model5<-table(Predicted=prob.model5,Actual=validation$Adherence)
#confusionMatrix(confusion.model5)
#Determining the Accuracy and Error Rate
#sum(confusion.model5[c(1,4)]/sum(confusion.model5[1:4])) #Correct Prediction
#1-sum(confusion.model5[c(1,4)]/sum(confusion.model5[1:4])) #Prediction Error
#Precision for Yes= 7620/(7620+2840)=72.84%
#Recall for Yes= 7620/(7620+1049)=87.89%
#Precision for No = 17234/(17234+1049)=94.26%
#Recall for No = 17234/(17234+2840)=85.85%
|
fe84980c39b0d6daa86b9fa4255f4bad48efb04d
|
8101cce3db89cabfb1ab278d6e8a4cc5148d007c
|
/analysis.R
|
12e3ea9d3a5c69f2640359b88439884e8d8242d2
|
[] |
no_license
|
hedgef0g/jb_es_2020
|
0ae11349eb7259a59b44184dd0fb6644bab99139
|
fc44c27bfb2c72a1a67ea86284a866a0394b57c0
|
refs/heads/main
| 2023-01-02T08:44:13.892845
| 2020-10-23T06:49:30
| 2020-10-23T06:49:30
| 301,535,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 31,125
|
r
|
analysis.R
|
install.packages("tidyverse")
install.packages("foreign")
library(tidyverse)
library(foreign)
data <- read_csv("./DevEcosystem 20 external data sharing/2020_sharing_data_outside.csv")
qre <- read_csv("./DevEcosystem 20 external data sharing/DevEcosystem 2020 questions_outside.csv")
data <- data %>%
mutate(years_exp = factor(code_yrs,
levels = c("Less than 1 year", "1–2 years", "3–5 years", "6–10 years", "11+ years", "I don't have any professional coding experience"),
labels = c(0.5, 1.5, 4, 8, 12, NA))) %>%
mutate(new_age = factor(age_range,
levels = c("18–20", "21–29", "30–39", "40–49", "50–59", "60 or older"),
labels = c(19, 24, 34.5, 44.5, 54.5, 64.5))) %>%
mutate(mobile_target_os_overall = ifelse((!is.na(mobile_target_os.Android) & !is.na(mobile_target_os.iOS)), "Android & iOS",
ifelse(!is.na(mobile_target_os.Other), "Other",
ifelse(!is.na(mobile_target_os.Android), "Android",
ifelse(!is.na(mobile_target_os.iOS), "iOS", NA)))))
# Reworked function with additional options
maketable <- function(variable, dataset = data, t_country = "total", sw_type = "any", base = "weighted", sort = FALSE, filter = "none") {
if(t_country == "total") {dataset} else {dataset = filter(dataset, country == t_country)}
if(sw_type == "any") {dataset}
else {
sw_type_col <- paste("sw_types_developed.", sw_type, sep = "")
dataset <- filter(dataset, dataset[sw_type_col] == sw_type)}
dataset <- switch(filter,
"none" = dataset,
"employment" = filter(dataset, employment_status %in% unique(dataset$employment_status)[c(1,2,4,5,6)]),
"job_role" = filter_at(dataset,
vars(grep("job_role", names(dataset), value = FALSE)[c(1,2,3,5,6,7,8,10,12)]),
any_vars(!is.na(.))),
"desktop" = filter(dataset, !is.na(dataset$target_platforms.Desktop)),
"mobile" = filter(dataset, !is.na(dataset$target_platforms.Mobile)),
"pets" = filter(dataset, rowSums(is.na(dataset[grep("lifestyle_pet", names(dataset), value = FALSE)])) != 10))
colnums <- which(colnames(dataset) %in% grep(variable, names(dataset), value = TRUE))
if (length(colnums) > 1) {
output <- tibble("value" = character(), "share" = numeric())
for (i in colnums) {
v = as.character(unique(na.omit(dataset[i])))
s = switch(base,
"weighted" = weighted.mean(!is.na(dataset[i]), w = dataset$weight),
"non-weighted" = sum(dataset[i] == v, na.rm = TRUE) / nrow(dataset))
output <- add_row(output, tibble_row(value = v, share = s))}
output <- switch(base,
"weighted" = add_row(output, tibble_row(value = "Base", share = sum(dataset$weight))),
"non-weighted" = add_row(output, tibble_row(value = "Base", share = nrow(dataset))))
}
else {
v = unique(unlist(data[colnums]))
v = v[!is.na(v)]
s = numeric(length = length(v))
for (i in v) {
s[which(v == i)] = switch(base,
"weighted" = sum(filter(dataset, dataset[colnums] == i)$weight) / sum(filter(dataset, !is.na(dataset[colnums]))$weight),
"non-weighted" = nrow(filter(dataset, dataset[colnums] == i)) / nrow(filter(dataset, !is.na(dataset[colnums]))))
}
output <- tibble("value" = v, "share" = s)
output <- switch(base,
"weighted" = add_row(output, tibble_row(value = "Base", share = sum(filter(dataset, !is.na(dataset[colnums]))$weight))),
"non-weighted" = add_row(output, tibble_row(value = "Base", share = nrow(filter(dataset, !is.na(dataset[colnums]))))))
}
if (sort == FALSE) {output} else {arrange(output, value)}
}
percent_sig <- function(perc1, perc2, base1, base2, lev = 1.96) {
if(base1 >= 75 & base2 >= 75) {
perc1 = perc1 * 100
perc2 = perc2 * 100
p = (perc1 * base1 + perc2 * base2) / (base1 + base2)
output <- tibble(sig = character())
if((perc1 - perc2) / sqrt(p * (100 - p) * (1 / base1 + 1 /base2)) > lev) {sig = "high"}
else {
if((perc1 - perc2) / sqrt(p * (100 - p) * (1 / base1 + 1 /base2)) < -lev) {sig = "low"}
else {sig = "no"}}
sig}
else {sig = "no"}
}
get_sig <- function(percent_table, level = 1.96) {
add_column(percent_table, sig = vector("character", nrow(percent_table)))
for (i in 1:(nrow(percent_table) - 1)) {
output = percent_sig(percent_table[i,3],
percent_table[i,2],
percent_table[nrow(percent_table),3],
percent_table[nrow(percent_table),2],
lev = level)
percent_table$sig[i] = output
}
percent_table$sig[nrow(percent_table)] = NA
percent_table
}
sig_levels <- c("darkgreen", "darkred", "darkgrey")
names(sig_levels) <- levels(factor(c("high", "no", "low")))
age_range <- maketable(variable = "age_range", sort = TRUE) %>%
rename(total = share) %>%
add_column(gamedev = maketable(variable = "age_range", sw_type = "Games", sort = TRUE)$share)
age_range <- get_sig(age_range)
age_range$value <- factor(c("18–20", "21–29", "30–39", "40–49", "50–59", "60 or older", "Base"),
levels = c("60 or older", "50–59", "40–49", "30–39", "21–29", "18–20", "Base"))
t.test(as.numeric(as.character(filter(data, data$sw_types_developed.Games == "Games")$new_age)), as.numeric(as.character(data$new_age)))
weighted.mean(as.numeric(as.character(filter(data, data$sw_types_developed.Games == "Games")$new_age)), w = filter(data, data$sw_types_developed.Games == "Games")$weight)
weighted.mean(as.numeric(as.character(data$new_age)), w = data$weight)
png(filename = "age.png", width = 900, height = 500)
age_range %>%
filter(value != "Base") %>%
ggplot(aes(x = value, y = gamedev, fill = sig)) +
coord_flip() +
geom_col() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "Возраст разработчиков игр",
subtitle = "Доля каждого возраста, %",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
#axis.text.x = element_text(angle = 90, hjust=1),
legend.position = "none")
dev.off()
code_yrs <- maketable(variable = "code_yrs") %>%
rename(total = share) %>%
add_column(gamedev = maketable(variable = "code_yrs", sw_type = "Games")$share) %>%
filter(total != 0 & gamedev != 0)
code_yrs <- get_sig(code_yrs)
code_yrs$value <- factor(code_yrs$value, levels = c("I don't have any professional coding experience",
"11+ years",
"6–10 years",
"3–5 years",
"1–2 years",
"Less than 1 year",
"Base"))
png(filename = "exp.png", width = 900, height = 500)
code_yrs %>%
filter(value != "Base") %>%
ggplot(aes(x = value, y = gamedev, fill = sig)) +
coord_flip() +
geom_col() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "Опыт профессиональной разбработки",
subtitle = "Число лет, %",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
#axis.text.x = element_text(angle = 90, hjust=1),
legend.position = "none")
dev.off()
employment <- maketable(variable = "employment_status") %>%
rename(total = share) %>%
add_column(gamedev = maketable(variable = "employment_status", sw_type = "Games")$share)
employment <- get_sig(employment)
png(filename = "employment.png", width = 900, height = 500)
employment %>%
filter(gamedev != 0 & value != "Base") %>%
mutate(value = replace(value, value == "Fully employed by a company / organization", "Fully employed")) %>%
mutate(value = replace(value, value == "Freelancer (a person pursuing a profession without a long-term commitment to any one employer)", "Freelancer")) %>%
mutate(value = replace(value, value == "Self-employed (a person earning income directly from their own business, trade, or profession)", "Self-employed")) %>%
mutate(value = replace(value, value == "Partially employed by a company / organization ", "Partially employed")) %>%
ggplot(aes(x = reorder(value, gamedev), y = gamedev, fill = sig)) +
geom_col() +
coord_flip() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "Трудоустройство",
subtitle = "Доля каждой позиции, %",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
#axis.text.x = element_text(angle = 90, hjust=1),
legend.position = "none")
dev.off()
prof <- maketable(variable = "job_role", filter = "employment") %>%
rename(total = share) %>%
add_column(gamedev = (maketable(variable = "job_role", sw_type = "Games", filter = "employment")$share))
prof <- get_sig(prof)
png("job_role.png", width = 900, height = 500)
prof %>%
filter(gamedev != 0 & value != "Base") %>%
ggplot(aes(x = reorder(value, gamedev), y = gamedev, fill = sig)) +
geom_col() +
coord_flip() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "Что из перечисленного лучше всего описывает ваши\nдолжностные обязанности?",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
legend.position = "none")
dev.off()
position_level <- maketable(variable = "position_level", filter = "job_role") %>%
rename(total = share) %>%
add_column(gamedev = maketable(variable = "position_level", sw_type = "Games", filter = "job_role")$share)
position_level <- get_sig(position_level)
png("position.png", width = 900, height = 500)
position_level %>%
filter(gamedev != 0 & value != "Base") %>%
ggplot(aes(x = reorder(value, gamedev), y = gamedev, fill = sig)) +
geom_col() +
coord_flip() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "Уровень, занимаемый в компании",
subtitle = "Доля каждого уровня, %",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
#axis.text.x = element_text(angle = 90, hjust=1),
legend.position = "none")
dev.off()
activities <- maketable(variable = "activities_kind") %>%
rename(total = share) %>%
add_column(gamedev = maketable(variable = "activities_kind", sw_type = "Games")$share)
activities <- get_sig(activities)
png("activities.png", width = 900, height = 500)
activities %>%
filter(gamedev != 0 & value != "Base") %>%
ggplot(aes(x = reorder(value, gamedev), y = gamedev, fill = sig)) +
geom_col() +
coord_flip() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "Типичные деловые задачи",
subtitle = "% выполняющих задачи подобного рода на основной работе",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
#axis.text.x = element_text(angle = 90, hjust=1),
legend.position = "none")
dev.off()
lang_p12m <- maketable(variable = "^proglang\\.") %>%
rename(total = share) %>%
add_column(gamedev = maketable(variable = "^proglang\\.", sw_type = "Games")$share)
lang_p12m <- get_sig(lang_p12m)
png("lang_p12m.png", width = 900, height = 500)
lang_p12m %>%
filter(gamedev != 0 & value != "Base") %>%
mutate(value = replace(value, value == "SQL(PL/SQL, T-SQL and otherprogramming extensions of SQL)", "SQL")) %>%
mutate(value = replace(value, value == "Shell scripting languages(bash/shell/powershell)", "Shell")) %>%
mutate(value = replace(value, value == "I don't use programming languages", "Not any")) %>%
ggplot(aes(x = reorder(value, -gamedev), y = gamedev, fill = sig)) +
geom_col() +
#coord_flip() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "Языки программирования, использованные за 12 месяцев",
subtitle = "Доля каждого языка, %",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
axis.text.x = element_text(angle = 90, hjust=1),
legend.position = "none")
dev.off()
primary_lang <- maketable(variable = "primary_proglang") %>%
rename(total = share) %>%
add_column(gamedev = maketable(variable = "primary_proglang", sw_type = "Games")$share) %>%
filter(total != 0 & gamedev != 0)
primary_lang <- get_sig(primary_lang)
png("primary_lang.png", width = 900, height = 500)
primary_lang %>%
filter(gamedev != 0 & value != "Base") %>%
mutate(value = replace(value, value == "SQL(PL/SQL, T-SQL and otherprogramming extensions of SQL)", "SQL")) %>%
mutate(value = replace(value, value == "Shell scripting languages(bash/shell/powershell)", "Shell")) %>%
mutate(value = replace(value, value == "I don't use programming languages", "Not any")) %>%
ggplot(aes(x = reorder(value, -gamedev), y = gamedev, fill = sig)) +
geom_col() +
#coord_flip() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "Основные языки программирования (не более трёх для респондента)",
subtitle = "Доля каждого языка, %",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
axis.text.x = element_text(angle = 90, hjust=1),
legend.position = "none")
dev.off()
lang_adopt <- maketable("adopt_proglang") %>%
rename(total = share) %>%
add_column(gamedev = maketable(variable = "adopt_proglang", sw_type = "Games")$share) %>%
filter(total != 0 & gamedev != 0)
lang_adopt <- get_sig(lang_adopt)
png("lang_adopt.png", width = 900, height = 500)
lang_adopt %>%
filter(gamedev != 0 & value != "Base") %>%
mutate(value = replace(value, value == "SQL(PL/SQL, T-SQL and otherprogramming extensions of SQL)", "SQL")) %>%
mutate(value = replace(value, value == "Shell scripting languages(bash/shell/powershell)", "Shell")) %>%
mutate(value = replace(value, value == "No, I'm not planning to adopt / migrate", "Not any")) %>%
mutate(value = replace(value, value == "Planning to adopt / migrate to other language(s) - Write In:", "Щерук")) %>%
ggplot(aes(x = reorder(value, -gamedev), y = gamedev, fill = sig)) +
geom_col() +
#coord_flip() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "Языки, планируемые к изучению / миграции в следующие 12 месяцев",
subtitle = "Доля каждого языка, %",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
axis.text.x = element_text(angle = 90, hjust=1),
legend.position = "none")
dev.off()
os_used <- maketable(variable = "os_devenv") %>%
rename(total = share) %>%
add_column(gamedev = maketable(variable = "os_devenv", sw_type = "Games")$share)
os_used <- get_sig(os_used)
png("os_used.png", width = 900, height = 500)
os_used %>%
filter(gamedev != 0 & value != "Base") %>%
ggplot(aes(x = reorder(value, gamedev), y = gamedev, fill = sig)) +
geom_col() +
coord_flip() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "Пользование операционными системами",
subtitle = "Доля ОС, %",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
#axis.text.x = element_text(angle = 90, hjust=1),
legend.position = "none")
dev.off()
target_platforms <- maketable(variable = "target_platforms") %>%
rename(total = share) %>%
add_column(gamedev = maketable(variable = "target_platforms", sw_type = "Games")$share)
target_platforms <- get_sig(target_platforms)
png("target_platform.png", width = 900, height = 500)
target_platforms %>%
filter(gamedev != 0 & value != "Base") %>%
ggplot(aes(x = reorder(value, gamedev), y = gamedev, fill = sig)) +
geom_col() +
coord_flip() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "Целевые платформы разработки",
subtitle = "Доля каждой платформы, %",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
#axis.text.x = element_text(angle = 90, hjust=1),
legend.position = "none")
dev.off()
target_os <- maketable(variable = "^target_os\\.", filter = "desktop") %>%
rename(total = share) %>%
add_column(gamedev = maketable(variable = "^target_os\\.", sw_type = "Games", filter = "desktop")$share)
target_os <- get_sig(target_os)
png(filename="target_os.png", width = 900, height = 500)
target_os %>%
filter(value != "Base") %>%
ggplot(aes(x = reorder(value, gamedev), y = gamedev, fill = sig)) +
coord_flip() +
geom_col() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "Целевая ОС (для тех, кто разрабатывает приложения для ПК)",
subtitle = "Доля каждой ОС, %",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
#axis.text.x = element_text(angle = 90, hjust=1),
legend.position = "none")
dev.off()
mobile_os <- maketable(variable = "mobile_target_os\\.", filter = "mobile") %>%
rename(total = share) %>%
add_column(gamedev = maketable(variable = "mobile_target_os\\.", sw_type = "Games", filter = "mobile")$share)
mobile_os <- get_sig(mobile_os)
mobile_os_2 <- maketable(variable = "mobile_target_os_overall", filter = "mobile") %>%
rename(total = share) %>%
add_column(gamedev = maketable(variable = "mobile_target_os_overall", sw_type = "Games", filter = "mobile")$share)
mobile_os_2 <- get_sig(mobile_os_2)
png(filename="mobile_os_2.png", width = 900, height = 500)
mobile_os_2 %>%
filter(value != "Base") %>%
ggplot(aes(x = reorder(value, gamedev), y = gamedev, fill = sig)) +
coord_flip() +
geom_col() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "Целевая ОС (для тех, кто разрабатывает приложения для смартфонов)",
subtitle = "Доля каждой ОС, %",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
#axis.text.x = element_text(angle = 90, hjust=1),
legend.position = "none")
dev.off()
open_source <- maketable(variable = "contribute_os") %>%
rename(total = share) %>%
add_column(gamedev = maketable(variable = "contribute_os", sw_type = "Games")$share)
open_source <- get_sig(open_source)
png(filename="open_source.png", width = 900, height = 500)
open_source %>%
filter(value != "Base") %>%
ggplot(aes(x = reorder(value, gamedev), y = gamedev, fill = sig)) +
coord_flip() +
geom_col() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "Участие в проектах с открытым исходным кодом",
subtitle = "% для варианта ответа",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
#axis.text.x = element_text(angle = 90, hjust=1),
legend.position = "none")
dev.off()
hours_code_job <- maketable(variable = "hours_code_job") %>%
rename(total = share) %>%
add_column(gamedev = maketable(variable = "hours_code_job", sw_type = "Games")$share)
hours_code_job <- get_sig(hours_code_job)
hours_code_job$value <- factor(hours_code_job$value, levels = c("32 hours a week or more",
"17-31 hours a week",
"9-16 hours a week",
"3-8 hours a week",
"1-2 hours a week",
"Less than 1 hour a week",
"Base"))
png(filename = "hours_code_job.png", width = 900, height = 500)
hours_code_job %>%
filter(value != "Base") %>%
ggplot(aes(x = value, y = gamedev, fill = sig)) +
#coord_flip() +
geom_col() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "Сколько часов в неделю вы программируете на работе?",
subtitle = "% для варианта ответа",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
#axis.text.x = element_text(angle = 90, hjust=1),
legend.position = "none")
dev.off()
hours_code_hobby <- maketable(variable = "hours_code_hobby") %>%
rename(total = share) %>%
add_column(gamedev = maketable(variable = "hours_code_hobby", sw_type = "Games")$share)
hours_code_hobby <- get_sig(hours_code_hobby)
hours_code_hobby$value <- factor(hours_code_hobby$value, levels = c("I don’t have a side project",
"32 hours a week or more",
"17-32 hours a week",
"9-16 hours a week",
"3-8 hours a week",
"1-2 hours a week",
"Less than 1 hour a week",
"Base"))
png(filename = "hours_code_hobby.png", width = 900, height = 500)
hours_code_hobby %>%
filter(value != "Base") %>%
ggplot(aes(x = value, y = gamedev, fill = sig)) +
#coord_flip() +
geom_col() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "Сколько времени вы посвящаете разработке личных проектов или проектов,\nне связанных с основной работой?",
subtitle = "% для варианта ответа",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
axis.text.x = element_text(angle = 90, hjust = 1),
legend.position = "none")
dev.off()
lifestyle_infosource <- maketable(variable = "lifestyle_infosource") %>%
rename(total = "share") %>%
add_column(gamedev = maketable(variable = "lifestyle_infosource", sw_type = "Games")$share)
lifestyle_infosource <- get_sig(lifestyle_infosource)
png(filename = "lifestyle_infosource.png", width = 900, height = 500)
lifestyle_infosource %>%
filter(value != "Base") %>%
ggplot(aes(x = reorder(value, gamedev), y = gamedev, fill = sig)) +
coord_flip() +
geom_col() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "Какие источники информации вы используете?",
subtitle = "% для варианта ответа",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
#axis.text.x = element_text(angle = 90, hjust = 1),
legend.position = "none")
dev.off()
laptop_or_desktop <- maketable(variable = "laptop_or_desktop") %>%
rename(total = "share") %>%
add_column(gamedev = maketable(variable = "laptop_or_desktop", sw_type = "Games")$share)
laptop_or_desktop <- get_sig(laptop_or_desktop)
png(filename = "laptop_or_desktop.png", width = 900, height = 500)
laptop_or_desktop %>%
filter(value != "Base") %>%
ggplot(aes(x = value, y = gamedev, fill = sig)) +
#coord_flip() +
geom_col() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "Вы предпочитаете ноутбук или десктоп?",
subtitle = "% для варианта ответа",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
#axis.text.x = element_text(angle = 90, hjust = 1),
legend.position = "none")
dev.off()
lifestyle_hobbies <- maketable(variable = "lifestyle_hobbies") %>%
rename(total = "share") %>%
add_column(gamedev = maketable(variable = "lifestyle_hobbies", sw_type = "Games")$share)
lifestyle_hobbies <- get_sig(lifestyle_hobbies)
png(filename = "lifestyle_hobbies.png", width = 900, height = 500)
lifestyle_hobbies %>%
filter(value != "Base") %>%
ggplot(aes(x = reorder(value, gamedev), y = gamedev, fill = sig)) +
coord_flip() +
geom_col() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "Чем вы занимаетесь в свободное время?",
subtitle = "% для варианта ответа",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
#axis.text.x = element_text(angle = 90, hjust = 1),
legend.position = "none")
dev.off()
lifestyle_pet <- maketable(variable = "lifestyle_pet", filter = "pets") %>%
rename(total = share) %>%
add_column(gamedev = maketable(variable = "lifestyle_pet", sw_type = "Games", filter = "pets")$share) %>%
filter(total != 0 & gamedev != 0)
lifestyle_pet <- get_sig(lifestyle_pet)
png(filename = "lifestyle_pet.png", width = 900, height = 500)
lifestyle_pet %>%
filter(value != "Base") %>%
ggplot(aes(x = reorder(value, gamedev), y = gamedev, fill = sig)) +
coord_flip() +
geom_col() +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = sig_levels) +
geom_label(aes(label = round(gamedev * 100, 0)), fill = "white", size = 5) +
labs(x = "", y = "",
title = "У вас есть домашние животные?",
subtitle = "% для варианта ответа",
caption = "На основе исследования JetBrains Developer EcoSystem 2020") +
theme(text = element_text(size = 16),
panel.grid.minor.x = element_blank(),
#axis.text.x = element_text(angle = 90, hjust = 1),
legend.position = "none")
dev.off()
|
f8101123e8b935f4fc8c1f327c25ce2ec30d92fa
|
6125f56ef5651c81bfbe85eebccc05c81f49398c
|
/R/TADCompare.R
|
e4eef5955c640bb312f06734564a884156b90f87
|
[
"MIT"
] |
permissive
|
dozmorovlab/TADCompare
|
87612572160b43d754b1444f0357135d2d3965ed
|
f1b61b789eb6717ce9397cbe059b9f0f721d2bb1
|
refs/heads/master
| 2022-05-13T20:31:58.255606
| 2022-04-25T01:13:55
| 2022-04-25T01:13:55
| 207,209,435
| 19
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,064
|
r
|
TADCompare.R
|
#' Differential TAD boundary detection
#'
#' @import dplyr
#' @import magrittr
#' @import PRIMME
#' @import ggplot2
#' @param cont_mat1 Contact matrix in either sparse 3 column, n x n or
#' n x (n+3) form where the first three columns are coordinates in BED format.
#' See "Input_Data" vignette for more information.
#' If an n x n matrix is used, the column names must correspond to the start
#' point of the corresponding bin. Required.
#' @param cont_mat2 Second contact matrix, used for differential comparison,
#' must be in same format as cont_mat1. Required.
#' @param resolution Resolution of the data. Used to assign TAD boundaries
#' to genomic regions. If not provided, resolution will be estimated from
#' column names of matrix. If matrices are sparse, resolution will be estimated
#' from the column names of the transformed full matrix. Default is "auto"
#' @param z_thresh Threshold for differential boundary score. Higher values
#' result in a higher threshold for differential TAD boundaries. Default is 2.
#' @param window_size Size of sliding window for TAD detection, measured in bins.
#' Results should be consistent regardless of window size. Default is 15.
#' @param gap_thresh Required \% of non-zero interaction frequencies for a
#' given bin to be included in the analysis. Default is .2
#' @param pre_tads A list of pre-defined TADs for testing. Must contain two
#' entries with the first corresponding to TADs detected in matrix 1
#' and the second to those detected in matrix 2. Each entry must contain a BED-like
#' data frame or GenomicRanges object with columns "chr", "start", and "end",
#' corresponding to coordinates of TADs. If provided, differential TAD
#' boundaries are defined only at these coordinates. Optional.
#' @return A list containing differential TAD characteristics
#' \itemize{
#' \item TAD_Frame - Data frame containing any bin where a TAD boundary
#' was detected. Boundary refers to the genomic coordinates, Gap_Score refers
#' to the orresponding differential boundary score. TAD_Score1 and TAD_Score2
#' are boundary scores for cont_mat1 and cont_mat2. Differential is the indicator
#' column whether a boundary is differential. Enriched_In indicates which matrix
#' contains the boundary. Type is the specific type of differential boundary.
#' \item Boundary_Scores - Boundary scores for the entire genome.
#' \item Count_Plot - Stacked barplot containing the number of each type of
#' TAD boundary called by TADCompare
#' }
#' @export
#' @details Given two sparse 3 column, n x n , or n x (n+3) contact matrices,
#' TADCompare identifies differential TAD boundaries. Using a novel boundary
#' score metric, TADCompare simultaneously identifies TAD boundaries (unless
#' provided with the pre-defined TAD boundaries), and tests for the presence
#' of differential boundaries. The magnitude of differences is provided
#' using raw boundary scores and p-values.
#' @examples
#' # Read in data
#' data("rao_chr22_prim")
#' data("rao_chr22_rep")
#' # Find differential TADs
#' diff_frame <- TADCompare(rao_chr22_prim, rao_chr22_rep, resolution = 50000)
TADCompare = function(cont_mat1,
cont_mat2,
resolution = "auto",
z_thresh = 2,
window_size = 15,
gap_thresh = .2,
pre_tads = NULL) {
#Pulling out dimensions to test for matrix type
row_test = dim(cont_mat1)[1]
col_test = dim(cont_mat1)[2]
if (row_test == col_test) {
if (all(is.finite(cont_mat1)) == FALSE) {
stop("Contact matrix 1 contains non-numeric entries")
}
if (all(is.finite(cont_mat2)) == FALSE) {
stop("Contact matrix 2 contains non-numeric entries")
}
}
if (col_test == 3) {
#Convert sparse matrix to n x n matrix
message("Converting to n x n matrix")
if (nrow(cont_mat1) == 1) {
stop("Matrix 1 is too small to convert to full")
}
if (nrow(cont_mat2) == 1) {
stop("Matrix 2 is too small to convert to full")
}
cont_mat1 = HiCcompare::sparse2full(cont_mat1)
cont_mat2 = HiCcompare::sparse2full(cont_mat2)
if (all(is.finite(cont_mat1)) == FALSE) {
stop("Contact matrix 1 contains non-numeric entries")
}
if (all(is.finite(cont_mat2)) == FALSE) {
stop("Contact matrix 2 contains non-numeric entries")
}
if (resolution == "auto") {
message("Estimating resolution")
resolution = as.numeric(names(table(as.numeric(colnames(cont_mat1))-
lag(
as.numeric(
colnames(cont_mat1)
))))[1]
)
}
} else if (col_test-row_test == 3) {
message("Converting to n x n matrix")
#Find the start coordinates based on the second column of the
#bed file portion of matrix
start_coords = cont_mat1[,2]
#Calculate resolution based on given bin size in bed file
resolution = as.numeric(cont_mat1[1,3])-as.numeric(cont_mat1[1,2])
#Remove bed file portion
cont_mat1 = as.matrix(cont_mat1[,-c(seq_len(3))])
cont_mat2 = as.matrix(cont_mat2[,-c(seq_len(3))])
if (all(is.finite(cont_mat1)) == FALSE) {
stop("Contact matrix 1 contains non-numeric entries")
}
if (all(is.finite(cont_mat2)) == FALSE) {
stop("Contact matrix 2 contains non-numeric entries")
}
#Make column names correspond to bin start
colnames(cont_mat1) = start_coords
colnames(cont_mat2) = start_coords
} else if (col_test!=3 & (row_test != col_test) & (col_test-row_test != 3)) {
#Throw error if matrix does not correspond to known matrix type
stop("Contact matrix must be sparse or n x n or n x (n+3)!")
} else if ( (resolution == "auto") & (col_test-row_test == 0) ) {
message("Estimating resolution")
#Estimating resolution based on most common distance between loci
resolution = as.numeric(names(table(as.numeric(colnames(cont_mat1))-
lag(
as.numeric(colnames(cont_mat1))
)))[1])
}
#Make sure contact matrices only include shared columns
coord_sum = list(colnames(cont_mat1), colnames(cont_mat2))
#Only include shared columns in analysis
shared_cols = Reduce(intersect, coord_sum)
cont_mat1 = cont_mat1[colnames(cont_mat1) %in% shared_cols,
colnames(cont_mat1) %in% shared_cols]
cont_mat2 = cont_mat2[colnames(cont_mat2) %in% shared_cols,
colnames(cont_mat2) %in% shared_cols]
#Set maximize size of sliding window
window_size = window_size
#Remove full gaps from matrices
non_gaps = which(colSums(cont_mat1) !=0 & (colSums(cont_mat2) !=0))
#Remove gaps
cont_mat1 = cont_mat1[non_gaps,non_gaps]
cont_mat2 = cont_mat2[non_gaps,non_gaps]
#Defining window size
max_end = window_size
max_size = window_size/ceiling(200000/resolution)
min_size = ceiling(200000/resolution)
Group_over = bind_rows()
start = 1
end = max_end
end_loop = 0
#If window is larger than matrix make it equal to matrix size
if (end+window_size>nrow(cont_mat1)) {
end = nrow(cont_mat1)
}
#Pre-allocate vectors
point_dists1 = c()
point_dists2 = c()
Regions = c()
while (end_loop == 0) {
#Subsetting
sub_filt1 = cont_mat1[seq(start,end,1), seq(start,end,1)]
sub_filt2 = cont_mat2[seq(start,end,1), seq(start,end,1)]
#Removing gap regions from sub_matrices
Per_Zero1 = colSums(sub_filt1 !=0)/nrow(sub_filt1)
Per_Zero2 = colSums(sub_filt2 !=0)/nrow(sub_filt2)
#Remove columns with more zeros than threshold
sub_gaps1 = Per_Zero1>gap_thresh
sub_gaps2 = Per_Zero2>gap_thresh
comp_rows = sub_gaps1 & sub_gaps2
sub_filt1 = sub_filt1[ comp_rows, comp_rows]
sub_filt2 = sub_filt2[ comp_rows, comp_rows]
#Slide window to end if window size is less than 2
if ( (length(sub_filt1) == 0) | (length(sub_filt1) == 1) ) {
start = start+max_end
end = end+max_end
} else {
#Getting degree matrices
dr1 = rowSums(abs(sub_filt1))
dr2 = rowSums(abs(sub_filt2))
#Creating the normalized laplacian
Dinvsqrt1 = diag((1/sqrt(dr1+2e-16)))
Dinvsqrt2 = diag((1/sqrt(dr2+2e-16)))
P_Part1 = crossprod(as.matrix(sub_filt1), Dinvsqrt1)
sub_mat1 = crossprod(Dinvsqrt1, P_Part1)
P_Part2 = crossprod(as.matrix(sub_filt2), Dinvsqrt2)
sub_mat2 = crossprod(Dinvsqrt2, P_Part2)
#Reading names
colnames(sub_mat1) = colnames(sub_mat2) = colnames(sub_filt1)
#Find gaps at 2mb and remove
#Get first two eigenvectors
Eigen1 = PRIMME::eigs_sym(sub_mat1, NEig = 2)
eig_vals1 = Eigen1$values
eig_vecs1 = Eigen1$vectors
#Get order of eigenvalues from largest to smallest
large_small1 = order(-eig_vals1)
eig_vals1 = eig_vals1[large_small1]
eig_vecs1 = eig_vecs1[,large_small1]
#Repeat for matrix 2
Eigen2 = eigs_sym(sub_mat2, NEig = 2)
eig_vals2 = Eigen2$values
eig_vecs2 = Eigen2$vectors
#Get order of eigenvalues from largest to smallest
large_small2 = order(-eig_vals2)
eig_vals2 = eig_vals2[large_small2]
eig_vecs2 = eig_vecs2[,large_small2]
#Normalize the eigenvectors
norm_ones = sqrt(dim(sub_mat1)[2])
for (i in seq_len(dim(eig_vecs1)[2])) {
eig_vecs1[,i] = (eig_vecs1[,i]/sqrt(sum(eig_vecs1[,i]^2))) * norm_ones
if (eig_vecs1[1,i] !=0) {
eig_vecs1[,i] = -1*eig_vecs1[,i] * sign(eig_vecs1[1,i])
}
}
for (i in seq_len(dim(eig_vecs2)[2])) {
eig_vecs2[,i] = (eig_vecs2[,i]/sqrt(sum(eig_vecs2[,i]^2))) * norm_ones
if (eig_vecs2[1,i] !=0) {
eig_vecs2[,i] = -1*eig_vecs2[,i] * sign(eig_vecs2[1,i])
}
}
eps = 2.2204e-16
n = dim(eig_vecs1)[1]
k = dim(eig_vecs1)[2]
#Project eigenvectors onto a unit circle
vm1 = matrix(
kronecker(rep(1,k), as.matrix(sqrt(rowSums(eig_vecs1^2)))),n,k
)
eig_vecs1 = eig_vecs1/vm1
vm2 = matrix(
kronecker(rep(1,k), as.matrix(sqrt(rowSums(eig_vecs2^2)))),n,k
)
eig_vecs2 = eig_vecs2/vm2
#Get distance between points on circle
point_dist1 = sqrt(
rowSums( (eig_vecs1-rbind(NA,eig_vecs1[-nrow(eig_vecs1),]))^2)
)
point_dist2 = sqrt(
rowSums( (eig_vecs2-rbind(NA,eig_vecs2[-nrow(eig_vecs2),]))^2)
)
#Remove NA entry at start of windows
point_dists1 = c(point_dists1, point_dist1[-1])
point_dists2 = c(point_dists2, point_dist2[-1])
#Assign to regions based on column names
Regions = c(Regions, colnames(sub_filt1)[-1])
}
#Test if we've reached end of matrix
if (end == nrow(cont_mat1)) {
end_loop = 1
}
#Set new start and end for window
start = end
end = end+max_end
if ( (end + max_end) >nrow(cont_mat1)) {
end = nrow(cont_mat1)
}
if (start == end | start>nrow(cont_mat1)) {
end_loop = 1
}
}
#Calculating the difference between log gaps
dist_diff = (point_dists1)-(point_dists2)
#Getting the z-scores
sd_diff = (dist_diff-mean(dist_diff, na.rm = TRUE))/(sd(dist_diff,
na.rm = TRUE))
TAD_Score1 = (point_dists1-mean(point_dists1, na.rm = TRUE))/
(sd(point_dists1, na.rm = TRUE))
TAD_Score2 = (point_dists2-mean(point_dists2, na.rm = TRUE))/
(sd(point_dists2, na.rm = TRUE))
#Get areas with high z-scores
gaps = which(abs(sd_diff)>z_thresh)
#Put differential regions into a data frame
diff_loci = data.frame(Region = as.numeric(Regions)[gaps],
Gap_Score = sd_diff[gaps])
#Return differential TAD boundaries
Gap_Scores = data.frame(Boundary = as.numeric(Regions),
TAD_Score1 = TAD_Score1,
TAD_Score2 =TAD_Score2,
Gap_Score = sd_diff)
TAD_Frame = data.frame(Boundary = as.numeric(Regions),
Gap_Score = sd_diff,
TAD_Score1,
TAD_Score2)
#Assign labels to boundary type and identify which matrix has the boundary
if(!is.null(pre_tads)) {
pre_tads = lapply(pre_tads, as.data.frame)
#pre_tads = bind_rows(pre_tads)
TAD_Frame = TAD_Frame %>%
filter(Boundary %in% bind_rows(pre_tads)$end) %>%
mutate(Differential = ifelse(abs(Gap_Score)>z_thresh, "Differential",
"Non-Differential"),
Enriched_In = ifelse(Gap_Score>0, "Matrix 1", "Matrix 2")) %>%
arrange(Boundary) %>%
mutate(Bound_Dist = abs(Boundary-lag(Boundary))/resolution) %>%
mutate(Differential = ifelse( (Differential == "Differential") &
(Bound_Dist<=5) & !is.na(Bound_Dist) &
( Enriched_In!=lag(Enriched_In)) &
(lag(Differential)=="Differential"),
"Shifted", Differential)) %>%
mutate(Differential= ifelse(lead(Differential) == "Shifted", "Shifted",
Differential)) %>%
dplyr::select(-Bound_Dist)
#Pull out non-shared boundaries
} else {
TAD_Frame = TAD_Frame %>%
filter( (TAD_Score1>1.5) | TAD_Score2>1.5) %>%
mutate(Differential = ifelse(abs(Gap_Score)>z_thresh, "Differential",
"Non-Differential"),
Enriched_In = ifelse(Gap_Score>0, "Matrix 1", "Matrix 2")) %>%
arrange(Boundary) %>%
mutate(Bound_Dist = abs(Boundary-lag(Boundary))/resolution) %>%
mutate(Differential = ifelse( (Differential == "Differential") &
(Bound_Dist<=5) & !is.na(Bound_Dist) &
( Enriched_In!=lag(Enriched_In)) &
(lag(Differential)=="Differential"),
"Shifted", Differential)) %>%
mutate(Differential= ifelse(lead(Differential) == "Shifted", "Shifted",
Differential)) %>%
dplyr::select(-Bound_Dist)
}
#Classifying merged-split
TAD_Frame = TAD_Frame %>%
mutate(Type = ifelse( (Differential == "Differential") &
(lag(Differential) == "Non-Differential") &
(lead(Differential) == "Non-Differential"),
ifelse(Enriched_In == "Matrix 1", "Split", "Merge"),
Differential))
#Add up-down enrichment of TAD boundaries
TAD_Frame = TAD_Frame %>%
mutate(Type = ifelse( (TAD_Score1>1.5) &
(TAD_Score2>1.5) &
(Differential == "Differential"),
"Strength Change", Type))
#Classify leftovers as complex
TAD_Frame = TAD_Frame %>% mutate(Type = gsub("^Differential$",
"Complex", Type))
#Another step for pre-specified
if (!is.null(pre_tads)) {
#Pulling out shared ends by overlap
shared_ends = ((TAD_Frame$Boundary %in%
pre_tads[[1]]$end + TAD_Frame$Boundary %in%
pre_tads[[2]]$end)==1)
#Converting non-differential to non-overlap
TAD_Frame = TAD_Frame %>% mutate(Type = ifelse(
(shared_ends == TRUE)&(Type=="Non-Differential"),
"Non-Overlap", Type))
}
#Redo for gap score frame as well
#Assign labels to boundary type and identify which matrix has the boundary
Gap_Scores = Gap_Scores %>%
mutate(Differential = ifelse(abs(Gap_Score)>z_thresh, "Differential",
"Non-Differential"),
Enriched_In = ifelse(Gap_Score>0, "Matrix 1", "Matrix 2")) %>%
arrange(Boundary) %>%
mutate(Bound_Dist = pmin(abs(Boundary-lag(Boundary))/resolution,
abs((Boundary-lead(Boundary)))/resolution)) %>%
mutate(Differential = ifelse( (Differential == "Differential") &
(Bound_Dist<=5) & !is.na(Bound_Dist),
"Shifted", Differential)) %>%
dplyr::select(-Bound_Dist)
#Classifying merged-split
Gap_Scores = Gap_Scores %>%
mutate(Type = ifelse( (Differential == "Differential") &
(lag(Differential) == "Non-Differential") &
(lead(Differential) == "Non-Differential"),
ifelse(Enriched_In == "Matrix 1", "Split", "Merge"),
Differential))
#Add up-down enrichment of TAD boundaries
Gap_Scores = Gap_Scores %>%
mutate(Type = ifelse( (TAD_Score1>1.5) &
(TAD_Score2>1.5) &
(Differential == "Differential"),
"Strength Change", Type))
#Classify leftovers as complex
Gap_Scores = Gap_Scores %>% mutate(Type = gsub("^Differential$",
"Complex", Type))
TAD_Sum = TAD_Frame %>% group_by(Type) %>% summarise(Count = n())
#Fix double counting of shifted boundaries
TAD_Sum = TAD_Sum %>% mutate(Count = ifelse(Type == "Shifted",
Count/2,
Count))
Count_Plot = ggplot2::ggplot(TAD_Sum,
aes(x = 1,
y = Count, fill = Type)) +
geom_bar(stat="identity") + theme_bw(base_size = 24) +
theme(axis.title.x = element_blank(), panel.grid = element_blank(),
axis.text.x = element_blank(), axis.ticks.x = element_blank()) +
labs(y = "Number of Boundaries")
return(list(TAD_Frame =TAD_Frame,
Boundary_Scores = Gap_Scores,
Count_Plot = Count_Plot ))
}
|
29fc3358e7f2ac224c4dd2e9c8fe6bafdf7f2bf2
|
9ac06a307c5449ae56b8dfffd3abaa6303e0feab
|
/R/lambda.R
|
878ba0ecc9302659e2e7c3860b59e7f2adb70275
|
[] |
no_license
|
Rapporter/rapportools
|
5b9d69a707e289aff34d484e2cde2668dd740dac
|
f45730af9cbdf147cafbd7c030602bae2fe915d5
|
refs/heads/master
| 2022-05-10T16:09:34.816759
| 2022-03-21T23:20:00
| 2022-03-21T23:20:00
| 15,641,997
| 5
| 1
| null | 2017-11-01T10:57:46
| 2014-01-05T00:01:26
|
R
|
UTF-8
|
R
| false
| false
| 2,005
|
r
|
lambda.R
|
#' Goodman and Kruskal's lambda
#'
#' Computes Goodman and Kruskal's lambda for given table.
#' @param table a \code{table} of two variables or a \code{data.frame} representation of the cross-table of the two variables without marginals
#' @param direction numeric value of \code{c(0, 1, 2)} where 1 means the lambda value computed for row, 2 for columns and 0 for both
#' @return computed lambda value(s) for row/col of given table
#' @examples \dontrun{
#' ## quick example
#' x <- data.frame(x = c(5, 4, 3), y = c(9, 8, 7), z = c(7, 11, 22), zz = c(1, 15, 8))
#' lambda.test(x) # 0.1 and 0.18333
#' lambda.test(t(x)) # 0.18333 and 0.1
#'
#' ## historical data (see the references above: p. 744)
#' men.hair.color <- data.frame(
#' b1 = c(1768, 946, 115),
#' b2 = c(807, 1387, 438),
#' b3 = c(189, 746, 288),
#' b4 = c(47, 53, 16)
#' )
#' row.names(men.hair.color) <- paste0('a', 1:3)
#' lambda.test(men.hair.color)
#' lambda.test(t(men.hair.color))
#'
#' ## some examples on mtcars
#' lambda.test(table(mtcars$am, mtcars$gear))
#' lambda.test(table(mtcars$gear, mtcars$am))
#' lambda.test(table(mtcars$am, mtcars$gear), 1)
#' lambda.test(table(mtcars$am, mtcars$gear), 2)
#' }
#' @references \itemize{
#' \item Goodman, L.A., Kruskal, W.H. (1954) Measures of association for cross classifications. Part I. \emph{Journal of the American Statistical Association} \bold{49}, 732--764
#' }
#' @export
lambda.test <- function(table, direction = 0) {
if (!is.numeric(direction))
stop('Direction should be an integer between 0 and 2!')
if (!direction %in% c(0, 1, 2))
stop('Direction should be an integer between 0 and 2!')
if (direction != 0)
(base::sum(as.numeric(apply(table, direction, base::max))) - ifelse(direction == 1, base::max(colSums(table)), base::max(rowSums(table)))) / (base::sum(table) - ifelse(direction == 1, base::max(colSums(table)), base::max(rowSums(table))))
else
list(row = lambda.test(table, 1), col = lambda.test(table, 2))
}
|
354847dac8236f3922f5ef6bbff22be63551d8b2
|
1ab2d3219a33e1902d9f9c8f66893e2d0e30892c
|
/source/sensitivity-analysis.R
|
172ef51fad0fe7b1dccbfc112d7b6c3e1d2577ab
|
[] |
no_license
|
MiljanaM94/hw-git
|
1cbb0606d31441cfc0b8ad1fcdcf1e68b44f11e2
|
7594cb8f96528babb54da62f3d31c895937fcc33
|
refs/heads/master
| 2023-04-13T14:38:39.283356
| 2020-03-25T17:00:46
| 2020-03-25T17:00:46
| 248,203,605
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,162
|
r
|
sensitivity-analysis.R
|
library(tidyverse)
library(scales)
forecast <- read_rds("results/forecast.RDS")
cpi <- read_rds("results/cpi.RDS")
gdp <- read_rds("results/gdp.RDS")
res <- list()
cpi <- forecast %>%
filter(indicator == "cpi") %>%
select(date, year, quarter, base) %>%
arrange(year, quarter, date) %>%
group_by(year, quarter) %>%
mutate(t = 1:8) %>%
ungroup() %>%
inner_join(cpi) %>%
transmute(
year, quarter, t, date, predicted = base / 100, actual = actual_cpi / 100
)
gdp <- forecast %>%
filter(indicator == "gdp") %>%
select(date, year, quarter, base) %>%
arrange(year, quarter, date) %>%
group_by(year, quarter) %>%
mutate(t = 1:8) %>%
ungroup() %>%
inner_join(gdp) %>%
transmute(
year, quarter, t, date, predicted = base / 100, actual = actual_gdp / 100
)
df <- cpi %>%
rename("cpi_actual" = actual, "cpi_predicted" = predicted) %>%
inner_join(
gdp %>%
rename("gdp_actual" = actual, "gdp_predicted" = predicted)
)
cpi_impact <- function(model, intercept, gdp, cpi) {
result <- df %>%
mutate(
dummy = case_when(
model == "cc" ~ 0,
model == "cl" ~ 0,
model == "sbb" ~ if_else(date <= as.Date("2013-06-01"), 0.85, 0),
model == "mra" ~ if_else(date <= as.Date("2015-06-01"), 1, 0)
),
z_actual = intercept + gdp * gdp_actual + cpi * cpi_actual + dummy,
z_predicted = intercept + gdp * gdp_actual + cpi * cpi_predicted + dummy,
PD_actual_cpi = 1/(1 + exp(-z_actual)),
PD_forecast_cpi = 1/(1 + exp(-z_predicted))
) %>%
pivot_longer(PD_actual_cpi:PD_forecast_cpi) %>%
ggplot(aes(t, value, color = name)) +
geom_line() +
facet_grid(year ~ quarter) +
scale_y_continuous(
breaks = c(0.01, 0.02, 0.03, 0.04),
labels = percent_format(accuracy = 0.1)
)
result
}
gdp_impact <- function(model, intercept, gdp, cpi) {
result <- df %>%
mutate(
dummy = case_when(
model == "cc" ~ 1,
model == "cl" ~ 1,
model == "sbb" ~ if_else(date <= as.Date("2013-06-01"), 0.85, 0),
model == "mra" ~ if_else(date <= as.Date("2015-06-01"), 1, 0)
),
z_actual = intercept + gdp * gdp_actual + cpi * cpi_actual + dummy,
z_predicted = intercept + gdp * gdp_predicted + cpi * cpi_actual + dummy,
PD_actual_cpi = 1/(1 + exp(-z_actual)),
PD_forecast_cpi = 1/(1 + exp(-z_predicted))
) %>%
pivot_longer(PD_actual_cpi:PD_forecast_cpi) %>%
ggplot(aes(t, value, color = name)) +
geom_line() +
facet_grid(year ~ quarter) +
scale_y_continuous(
breaks = c(0.01, 0.02, 0.03, 0.04),
labels = percent_format(accuracy = 0.1)
)
result
}
# Credit Cards
res$cc_cpi <- cpi_impact("cc", -4.89, -5.99, 6.46)
res$cc_gdp <- gdp_impact("cc", -4.89, -5.99, 6.46)
# Consumer
res$cl_cpi <- cpi_impact("cl", -4.74, -4.6, 5.65)
res$cl_gdp <- gdp_impact("cl", -4.74, -4.6, 5.65)
# SBB
res$sbb_cpi <- cpi_impact("sbb", -4.17, -7.8, 1.2)
res$sbb_gdp <- gdp_impact("sbb", -4.17, -7.8, 1.2)
# MRA
res$mra_cpi <- cpi_impact("mra", -5.4, -7.4, 7)
res$mra_gdp <- gdp_impact("mra", -5.4, -7.4, 7)
write_rds(res, "results/sensitivity.RDS")
|
481867d0fad83b6360c56a8b8e85c3cbec8efb49
|
0a92b4ff5d70a6473dce1bf93116227e3c8586c0
|
/man/plot_map_wqis.Rd
|
f4b77d8723b50f17bbd674c282035541c6b71dea
|
[
"Apache-2.0"
] |
permissive
|
bcgov/wqindex
|
a27524eea5d92d13d4ad781ceea234c01c57a06e
|
804ced921cda5e0301b8c743e5e8ee30f8428526
|
refs/heads/master
| 2021-04-27T09:49:51.477556
| 2020-12-16T23:09:55
| 2020-12-16T23:09:55
| 122,523,615
| 7
| 5
|
Apache-2.0
| 2020-04-15T00:34:19
| 2018-02-22T19:17:11
|
R
|
UTF-8
|
R
| false
| true
| 1,283
|
rd
|
plot_map_wqis.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot_map_wqis}
\alias{plot_map_wqis}
\title{Plot Map of Water Quality Index Categories.}
\usage{
plot_map_wqis(data, x = "Long", y = "Lat", size = 3, shape = 21,
keep = NULL, input_proj = NULL)
}
\arguments{
\item{data}{A data.frame of WQI values to plot.}
\item{x}{A string of the column in data to plot on the x axis.}
\item{y}{A string of the column in data to plot on the y axis.}
\item{size}{A number of the point size or string of the column in data
to represent by the size of points.}
\item{shape}{An integer of the point shape (permitted values are 21 to 25)
or string of the column in data to represent by the shape of points.}
\item{keep}{An optional character vector indicating which columns
in addition to x and y to keep before dropping duplicated rows to
avoid overplotting.}
\item{input_proj}{An optional valid proj4string. Defaults to
(\code{"+proj=longlat +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +no_defs"}).}
}
\description{
Creates a ggplot2 object with a polygon of British Columbia with
the Water Quality Index categories indicated by the fill colour of points.
}
\examples{
\dontrun{
demo(fraser)
}
}
\seealso{
\code{\link{plot_wqis}} and \code{\link{plot_map}}
}
|
281a8efb0ea825803ae65bd6e8c19b0c163b8aca
|
c547df6e3849fafb39062335578e6f52d10780e6
|
/src/20221206/20221206_challenges.R
|
2350d8c2819ba41fe4ae68de97aff4483957146d
|
[
"CC-BY-4.0"
] |
permissive
|
inbo/coding-club
|
974bec58103744c023d6a8da55880b9cc4f5a183
|
f2165b98fa495c9393dd3143638e08fbbb4889e6
|
refs/heads/main
| 2023-08-31T07:49:53.272744
| 2023-08-30T14:18:58
| 2023-08-30T14:18:58
| 168,415,968
| 7
| 12
| null | 2023-04-12T08:58:49
| 2019-01-30T21:11:16
|
HTML
|
UTF-8
|
R
| false
| false
| 2,025
|
r
|
20221206_challenges.R
|
library(tidyverse)
library(sf)
library(terra)
library(maptiles)
library(mapview)
library(leaflet)
library(htmltools)
library(leafem)
library(crosstalk)
library(DT)
## CHALLENGE 1 - Plots
# Plotting is still important. Let's warm-up by plotting some geospatial data.
# 1. GIS data (continuous variable)
natura2000 <- st_read("./data/20221206/20221206_protected_areas.gpkg")
# 2. Raster data (continuous variable)
nitrogen <- rast("./data/20221206/20221206_nitrogen.tif")
# 3. Raster data (categorical values)
lu_nara_2016 <- rast("./data/20221206/20221206_lu_nara_2016_100m.tif")
legend_land_use <- tibble( # a tibble is a "nicely printed" data.frame
id = c(1:9),
land_use = c(
"Open natuur",
"Bos",
"Grasland",
"Akker",
"Urbaan",
"Laag groen",
"Hoog groen",
"Water",
"Overig"
),
color = c(
rgb(red = 223, green = 115, blue = 255, maxColorValue = 255),
rgb(38, 115, 0, maxColorValue = 255),
rgb(152, 230, 0, maxColorValue = 255),
rgb(255, 255, 0, maxColorValue = 255),
rgb(168, 0, 0, maxColorValue = 255),
rgb(137, 205, 102, maxColorValue = 255),
rgb(92, 137, 68, maxColorValue = 255),
rgb(0, 197, 255, maxColorValue = 255),
rgb(204, 204, 204, maxColorValue = 255)
)
)
legend_land_use
## CHALLENGE 2 - static maps
## CHALLENGE 3 - dynamic maps
# read occurrences giant hogweed
occs_hogweed <- readr::read_tsv(
file = "./data/20221206/20221206_gbif_occs_hogweed.txt",
na = ""
)
# transform to sf spatial data.frame
occs_hogweed <- st_as_sf(occs_hogweed,
coords = c("decimalLongitude", "decimalLatitude"),
crs = 4326)
# count number of "points" in Natura2000 areas
occs_in_areas <- st_contains(natura2000, occs_hogweed)
# get number of points in each polygon as a vector
natura2000$n_occs <- purrr::map_dbl(occs_in_areas, function(x) length(x))
# 3. link to image
img <- "https://raw.githubusercontent.com/inbo/coding-club/master/docs/assets/images/coding_club_logo.svg"
|
7e48dbe3c4f37abda96baa0d51f7df75a1e2aae8
|
8ff1d9fbd53f6337ffeade2edce27d0c057a6c17
|
/figures/Fig1PREOverlap.R
|
cc2df2bdb3f4faacda99839927ae47547be7b1d3
|
[
"Apache-2.0"
] |
permissive
|
robertstreeck/PolycombPaperR
|
39cd632d642c37de97e093e75ad67af3515acb36
|
fe37b8e0fd6b21ca50fd59a0d5d9292b2f416a8a
|
refs/heads/main
| 2023-06-25T08:20:58.208166
| 2021-07-20T10:39:12
| 2021-07-20T10:39:12
| 382,376,130
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,351
|
r
|
Fig1PREOverlap.R
|
library(GenomicRanges)
library(rtracklayer)
library(fmsb)
library(tidyverse)
library(rcartocolor)
load("data/fig1/SevenClassGenomeModel.Rdata")
genome = read.delim("/Users/streeck/Genomes/DmelBDGP6.91/chrNameLength.txt", header = F, stringsAsFactors = F)
genome = genome[1:7,]
gr = GRanges(genome[,1], IRanges(1, as.integer(genome[,2])))
chr_lengths = gr@ranges@width
names(chr_lengths) = gr@seqnames
tiled_genome = tileGenome(chr_lengths, tilewidth = 200, cut.last.tile.in.chrom = T)
tiled_genome$cluster = NA
tiled_genome$cluster[multi_chip_fit$excluded] = multi_chip_fit$Group
map_to_clusters = function(DataSet){
DataSet[,2] = as.numeric(DataSet[,2])
DataSet[,3] = as.numeric(DataSet[,3])
DataSet$Cluster = NA
DataSet_GR = GRanges(DataSet$seqid, ranges = IRanges(DataSet$start, DataSet$end))
for (i in 1:length(DataSet$seqid)) {
DataSet$Cluster[i] = names(sort(-table(subsetByOverlaps(tiled_genome, DataSet_GR[i], minoverlap = 1)$cluster)))[1]
}
print(table(DataSet$Cluster))
return(DataSet)
}
load("data/PREs/Ederle_PRE_HC_dm6.Rdata")
Ederle_PRE_HC_dm6 = map_to_clusters(Ederle_PRE_HC_dm6)
RadarPlot = data.frame(Set = "Ederle_PRE_HC", state = Ederle_PRE_HC_dm6$Cluster)
load("data/PREs/Kahn_PRE_dm6.Rdata")
Kahn_PRE_dm6 = map_to_clusters(Kahn_PRE_dm6)
RadarPlot = rbind(RadarPlot, data.frame(Set = "Kahn_PRE", state = Kahn_PRE_dm6$Cluster))
load("data/PREs/Schwartz_PRE_dm6.Rdata")
Schwartz_PRE_dm6 = map_to_clusters(Schwartz_PRE_dm6)
RadarPlot = rbind(RadarPlot, data.frame(Set = "Schwartz_PRE", state = Schwartz_PRE_dm6$Cluster))
RadarPlotSummary = RadarPlot %>%
group_by(Set, state) %>%
summarise(count = n()) %>%
group_by(Set) %>%
mutate(fraction = count/sum(count)) %>%
mutate(state = c("EnhW", "Pc-I", "TEl", "EnhS", "Pc-H", "TSS", "Het")[as.numeric(state)]) %>%
pivot_wider(state, values_from = fraction, names_from = Set, values_fill = 0)
missing.state = data.frame(state = c("TEl", "Het"),
Ederle_PRE_HC = c(0,0),
Kahn_PRE = c(0,0),
Schwartz_PRE = c(0,0))
RadarPlotSummary = rbind(RadarPlotSummary, missing.state)
RadarPlotSummaryTrans = t(RadarPlotSummary[,2:4])
colnames(RadarPlotSummaryTrans) = RadarPlotSummary$state
RadarPlotSummaryTrans = as.data.frame(rbind(matrix(1, nrow = 1, ncol = 7),
matrix(0, nrow = 1, ncol = 7),
RadarPlotSummaryTrans))
colors_border=rcartocolor::carto_pal(n = 4, "Bold")[1:3]
colors_in=rcartocolor::carto_pal(n = 4, "Bold")[1:3]
# Prepare title
mytitle <- c("Ederle et al.\n(high confidence)", "Kahn et al.", "Schwartz et al.")
plot.new()
# Split the screen in 3 parts
par(mar=rep(0.8,4))
par(mfrow=c(1,3))
## Fig1PREOverlabRadar.pdf
# Loop for each plot
for(i in 1:3){
# Custom the radarChart !
radarchart(RadarPlotSummaryTrans[c(1,2,i+2),], axistype=1,
#custom polygon
pcol=paste0(colors_in, "D0")[i] , pfcol=paste0(colors_in, "80")[i] , plwd=4, plty=1 ,
#custom the grid
cglcol="grey", cglty=1, axislabcol="grey", cglwd=0.8,
#custom labels
vlcex=0.8
)
title(main = mytitle[i], adj=0.5, line = -1.5)
text(x = 0, y = 0, c(157,200,170)[i])
}
|
2d92ef845a13d8faaf6e10fc0a474a0bb79cd076
|
b2eba40bbf555f706ad16b693e644fa09ce80222
|
/assets/R/optimization.R
|
ba32559c2fe6f3076d8e1e03c96121cb3414a50e
|
[] |
no_license
|
mlqmlq/STAT628
|
14a4da2295006a5267265231dca6d200b3d00487
|
3befc2329f05cbc18197d984b7708ce74b391e58
|
refs/heads/master
| 2023-08-05T07:40:42.325897
| 2021-07-13T17:30:29
| 2021-07-13T17:30:29
| 277,651,847
| 8
| 1
| null | 2021-09-28T05:55:00
| 2020-07-06T21:24:18
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,759
|
r
|
optimization.R
|
set.seed(0)
mu = 0.01; L = 1; kappa = L/mu
n = 100
D = runif(n); D = 10^D; Dmin = min(D); Dmax = max(D)
D = (D-Dmin) / (Dmax-Dmin)
D = mu + D*(L-mu)
A = diag(D)
x0 = runif(n, 0, 1)
x_star = rep(0, 100)
f <- function(x) {
0.5*t(x) %*% A %*% x
}
df <- function(x) {
A %*% x
}
GradientDescent <- function(x0, x_star, L, f, df, e = 1e-6) {
iter = 0
value = f(x0)
x1 = x0
while (f(x1) - f(x_star) > e) {
x1 = x1 - (1/L)*df(x1)
iter = iter + 1
value = c(value, f(x1))
}
return(list(x0, iter, value))
}
Newton <- function(x0, x, f, df, e = 1e-6) {
iter = 0
value = f(x0)
x1 = x0
while (f(x1) - f(x_star) > e) {
x1 = x1 - solve(A)%*%df(x1)
iter = iter + 1
value = c(value, f(x1))
}
return(list(x0, iter, value))
}
Nesterov <- function(x0, x_star, L, m, f, df, e = 1e-6) {
iter = 0
value = f(x0)
alpha = 1/L
beta = (sqrt(L) - sqrt(m))/(sqrt(L)+sqrt(m))
x1 = x0
while (f(x1) - f(x_star) > e) {
# Write your updates here:
y1 = x1 + beta*(x1 - x0)
x0 = x1
x1 = y1 - alpha*df(y1)
iter = iter + 1
value = c(value, f(x1))
}
return(list(x0, iter, value))
}
out_GD_1 <- GradientDescent(x0, x_star, L = 1, f, df)
out_GD_2 <- GradientDescent(x0, x_star, L = 5, f, df)
out_Nest <- Nesterov(x0, x_star, L = 1, m = 0.01, f, df)
out_Newton <- Newton(x0, x_star, f, df)
# Plots:
plot(0, type="n", xlab="number of iterations", ylab="log of function differences", xlim=c(0, 500), ylim=c(-7, 1))
lines(log(out_GD_1[[3]], 10))
lines(log(out_GD_2[[3]], 10), col = 'blue')
lines(log(out_Nest[[3]], 10), col = 'red')
lines(log(out_Newton[[3]], 10), col = 'purple')
legend("topright", legend = c("GD1", "GD2", "Nest", "Newton"), lty = rep(1,4), col = c("black", "blue", "red", "purple"))
|
56199c3bcd474de80fbb034491f2e1cb2e896416
|
e3ca1bec4bcaf4582f8dab32f0d58b13fb30c8df
|
/global.R
|
0fa1a1e71aca113e2792c6805e00944f1b4f46ca
|
[] |
no_license
|
matt-dray/dehex-challenge
|
9cdc6014df1f3ed042c434fe40eeb7d60c9daa27
|
331e3be4c13cb5fd4acdbe744588997ad8c1f0cc
|
refs/heads/main
| 2023-07-23T05:18:52.069418
| 2021-08-26T12:00:27
| 2021-08-26T12:00:27
| 395,112,733
| 1
| 0
| null | 2021-08-24T16:04:06
| 2021-08-11T20:46:51
|
R
|
UTF-8
|
R
| false
| false
| 46
|
r
|
global.R
|
library(shiny)
library(dehex)
library(bslib)
|
7b262de89dbbc97fc5196f3450187930af7416c4
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/RNOmni/R/BAT.R
|
314c44a7f85307ff250732be3853d8be5556b92d
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,795
|
r
|
BAT.R
|
# Purpose: Basic score test
# Updated: 2020/10/03
#' Partition Data
#'
#' Partition y and X according to the missingness pattern of g.
#'
#' @param e Numeric residual vector.
#' @param g Genotype vector.
#' @param X Model matrix of covariates.
#' @return List containing:
#' \itemize{
#' \item "g_obs", observed genotype vector.
#' \item "X_obs", covariates for subjects with observed genotypes.
#' \item "X_mis", covariates for subjects with missing genotypes.
#' \item "e_obs", residuals for subjects with observed genotypes.
#' }
PartitionData <- function(e, g, X) {
# Ensure matrix formatting.
g <- matrix(g, ncol = 1)
e <- matrix(e, ncol = 1)
# Adjust for missingness.
is_obs <- !is.na(g)
any_miss <- (sum(!is_obs) > 0)
if (any_miss) {
g_obs <- g[is_obs, , drop = FALSE]
X_obs <- X[is_obs, , drop = FALSE]
X_mis <- X[!is_obs, , drop = FALSE]
e_obs <- e[is_obs, , drop = FALSE]
} else {
g_obs <- g
X_obs <- X
X_mis <- NULL
e_obs <- e
}
# Output.
out <- list(
"g_obs" = g_obs,
"X_obs" = X_obs,
"X_mis" = X_mis,
"e_obs" = e_obs
)
return(out)
}
# -----------------------------------------------------------------------------
#' Score Statistics
#'
#' @param e Numeric residual vector.
#' @param g Genotype vector.
#' @param X Model matrix of covariates.
#' @param v Residual variance.
#' @return Numeric vector containing the "score" statistic, standard error "se",
#' "z", and "p" value.
#'
#' @importFrom stats pchisq
ScoreStat <- function(e, g, X, v) {
# Split data.
split_data <- PartitionData(e = e, g = g, X = X)
# Information components.
info_gg <- matIP(split_data$g_obs, split_data$g_obs)
info_gx <- matIP(split_data$g_obs, split_data$X_obs)
info_xx <- matIP(split_data$X_obs, split_data$X_obs)
# Efficient info.
eff_info <- as.numeric(SchurC(info_gg, info_xx, info_gx))
# Score.
score <- as.numeric(matIP(split_data$g_obs, split_data$e_obs)) / v
# SE.
se <- sqrt(eff_info / v)
# Z statistic.
z_stat <- score / se
# Chi statistic.
chi_stat <- z_stat^2
# p-value.
p <- pchisq(q = chi_stat, df = 1, lower.tail = FALSE)
# Output.
out <- c(
"score" = score,
"se" = se,
"z" = z_stat,
"p" = p
)
return(out)
}
#' Basic Association Score Test
#'
#' @param y Numeric phenotype vector.
#' @param G Genotype matrix with observations as rows, SNPs as columns.
#' @param X Model matrix of covariates.
#' @return Numeric matrix, with 1 row per SNP, containing these columns:
#' \itemize{
#' \item "score", the score statistic.
#' \item "se", its standard error.
#' \item "z", the Z statistic.
#' \item "p", the p-value.
#' }
#'
#' @importFrom plyr aaply
BAT.ScoreTest <- function(y, G, X) {
# Fit null model.
fit0 <- fitOLS(y = y, X = X)
# Extract model components.
e <- matrix(fit0$Resid, ncol = 1)
v <- fit0$V
# Calculate Score Statistic.
out <- aaply(.data = G, .margins = 2, .fun = function(g) {
ScoreStat(e = e, g = g, X = X, v = v)
})
return(out)
}
# -----------------------------------------------------------------------------
#' Basic Association Score Test
#'
#' @param y Numeric phenotype vector.
#' @param g Genotype vector.
#' @param X Model matrix of covariates.
#' @return Numeric matrix, with 1 row per SNP, containing these columns:
#' \itemize{
#' \item "score", the score statistic.
#' \item "se", its standard error.
#' \item "z", the Z statistic.
#' \item "p", the p-value.
#' }
#'
#' @importFrom stats pchisq
WaldStat <- function(y, g, X) {
# Split data.
split_data <- PartitionData(e = y, g = g, X = X)
# Fit cull model.
fit1 <- fitOLS(y = split_data$e_obs, X = cbind(split_data$g_obs, split_data$X_obs))
# Coefficient.
bg <- fit1$Beta[1]
# Variance.
eff_info_inv <- as.numeric(matInv(fit1$Ibb)[1, 1])
# Standard error.
se <- sqrt(eff_info_inv)
# Z statistic.
z_stat <- bg / se
# Chi statistic.
chi_stat <- z_stat^2
# p-value.
p <- pchisq(q = chi_stat, df = 1, lower.tail = FALSE)
# Output.
out <- c(
"wald" = bg,
"se" = se,
"z" = z_stat,
"p" = p
)
return(out)
}
#' Basic Association Wald Test
#'
#' @param y Numeric phenotype vector.
#' @param G Genotype matrix with observations as rows, SNPs as columns.
#' @param X Model matrix of covariates.
#' @return Numeric matrix, with 1 row per SNP, containing these columns:
#' \itemize{
#' \item "score", the score statistic.
#' \item "se", its standard error.
#' \item "z", the Z statistic.
#' \item "p", the p-value.
#' }
#'
#' @importFrom plyr aaply
BAT.WaldTest <- function(y, G, X) {
out <- aaply(.data = G, .margins = 2, .fun = function(g) {
WaldStat(y = y, g = g, X = X)
})
return(out)
}
# -----------------------------------------------------------------------------
#' Basic Input Checks
#'
#' @param y Numeric phenotype vector.
#' @param G Genotype matrix with observations as rows, SNPs as columns.
#' @param X Covariate matrix.
BasicInputChecks <- function(y, G, X) {
# Ensure y is a numeric vector.
if (!is.vector(y)) {
stop("A numeric vector is expected for y.")
}
# Ensure G is a numeric matrix.
if (!is.matrix(G)) {
stop("A numeric matrix is expected for G.")
}
# Ensure X is a numeric matrix.
if (!is.matrix(X)) {
stop("A numeric matrix is expected for X.")
}
# Ensure y and X are complete.
y_or_x_miss <- sum(is.na(y)) + sum(is.na(X))
if (y_or_x_miss > 0) {
stop("Please exclude observations missing phenotype or covariate information.")
}
}
# -----------------------------------------------------------------------------
#' Basic Association Test
#'
#' Conducts tests of association between the loci in \code{G} and the
#' untransformed phenotype \code{y}, adjusting for the model matrix \code{X}.
#'
#' @param y Numeric phenotype vector.
#' @param G Genotype matrix with observations as rows, SNPs as columns.
#' @param X Model matrix of covariates and structure adjustments. Should include
#' an intercept. Omit to perform marginal tests of association.
#' @param test Either Score or Wald.
#' @param simple Return the p-values only?
#' @return If \code{simple = TRUE}, returns a vector of p-values, one for each column
#' of \code{G}. If \code{simple = FALSE}, returns a numeric matrix, including the
#' Wald or Score statistic, its standard error, the Z-score, and the p-value.
#'
#' @export
#' @seealso
#' \itemize{
#' \item Direct INT \code{\link{DINT}}
#' \item Indirect INT \code{\link{IINT}}
#' \item Omnibus INT \code{\link{OINT}}
#' }
#'
#' @examples
#' set.seed(100)
#' # Design matrix
#' X <- cbind(1, rnorm(1e3))
#' # Genotypes
#' G <- replicate(1e3, rbinom(n = 1e3, size = 2, prob = 0.25))
#' storage.mode(G) <- "numeric"
#' # Phenotype
#' y <- as.numeric(X %*% c(1, 1)) + rnorm(1e3)
#' # Association test
#' p <- BAT(y = y, G = G, X = X)
BAT <- function(y, G, X = NULL, test = "Score", simple = FALSE) {
# Generate X is omitted.
if (is.null(X)) {
X <- array(1, dim = c(length(y), 1))
}
# Input check.
BasicInputChecks(y, G, X)
# Association testing.
if (test == "Score") {
out <- BAT.ScoreTest(y = y, G = G, X = X)
} else if (test == "Wald") {
out <- BAT.WaldTest(y = y, G = G, X = X)
} else {
stop("Select test from among: Score, Wald.")
}
if (!is.matrix(out)) {out <- matrix(out, nrow = 1)}
# Check for genotype names.
gnames <- colnames(G)
if (is.null(gnames)) {
gnames <- seq_len(ncol(G))
}
# Format output.
if (simple) {
out <- out[, 4]
names(out) <- gnames
} else {
colnames(out) <- c(test, "SE", "Z", "P")
rownames(out) <- gnames
}
return(out)
}
|
a4ff468d86a1edc0e77f27dc1a2ffd2fa22694e3
|
03dcfc60d68155db4be09639174c1e73dc0340de
|
/cachematrix.R
|
4823d65d9329658ae049bd56423ae7a3bbedd1fe
|
[] |
no_license
|
yleporcher/ProgrammingAssignment2
|
0e62fee1e1c479095b2947a08bfc499349c64e9b
|
824b6bc084d9fb931280e3f5644bfb4e3ce57487
|
refs/heads/master
| 2021-05-29T02:14:27.422744
| 2015-06-21T16:27:49
| 2015-06-21T16:27:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,126
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix() and cacheSolve() are designed to compute
## the inverse of a matrix and store it in a cache so as to prevent
## un-necessary re-computation
## Write a short comment describing this function
## This function initilializes the matrix to be cached and
## then invert it. Finally it is cached.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
setmatrix <- function(solve) m <<- solve
getmatrix <- function() m
list(set=set, get=get,
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## Write a short comment describing this function
## THis function detects whether a cached version of the matrix
## already exists and print it. Else, it inversts it and print it
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getmatrix()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix <- x$get()
m <- solve(matrix, ...)
x$setmatrix(m)
m
}
|
9518d78ceaeed2207addaa42942f22a66df41933
|
4b3688af9ed5dfe92ccd51c1c7c851d9bdcce4a1
|
/plot1.R
|
caefb7e7595a5bc22a6f33257fc8cb75f863fe35
|
[] |
no_license
|
stevejburr/ExData_Plotting1
|
b0636e44a0a091e8c434c7cf89fbf1b17337dc3e
|
8f661e7dd8b6daf0408761b63499c82fab1e77f3
|
refs/heads/master
| 2020-12-25T11:32:39.308809
| 2014-06-05T08:45:59
| 2014-06-05T08:45:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 800
|
r
|
plot1.R
|
#Read in all data from WD
#colClass doesn't seem to like numeric for this data so using character (don't want any factors)
Data<-read.csv2("./household_power_consumption.txt", na.strings = "?",colClasses=c("character"))
#subset data and replace large file in memory with filtered set
DataDay1<-subset(Data, Date=="1/2/2007")
DataDay2<-subset(Data, Date=="2/2/2007")
Data<-rbind(DataDay1,DataDay2)
#Make the variable we want to plot numeric
Data$Global_active_power<-as.numeric(as.character(Data$Global_active_power))
#480x480 png export
png(file = "plot1.png", width =480, height =480, bg = "transparent")
#make the plot
with(Data, hist(as.numeric(Global_active_power),col="red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)"))
#close connection to output file
dev.off()
|
fb8ffcf7704d67235a124abccc942214056245f1
|
01e6f98609708ebdfd6d1db5fda9cb443f9f7856
|
/man/date-zone.Rd
|
04c4237b6670233a13dd43220a334ea38e117382
|
[
"MIT"
] |
permissive
|
isabella232/clock-2
|
3258459fe4fc5697ce4fb8b54d773c5d17cd4a71
|
1770a69af374bd654438a1d2fa8bdad3b6a479e4
|
refs/heads/master
| 2023-07-18T16:09:11.571297
| 2021-07-22T19:18:14
| 2021-07-22T19:18:14
| 404,323,315
| 0
| 0
|
NOASSERTION
| 2021-09-08T13:28:17
| 2021-09-08T11:34:49
| null |
UTF-8
|
R
| false
| true
| 2,180
|
rd
|
date-zone.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/posixt.R
\name{date-zone}
\alias{date-zone}
\alias{date_zone}
\alias{date_set_zone}
\title{Get or set the time zone}
\usage{
date_zone(x)
date_set_zone(x, zone)
}
\arguments{
\item{x}{\verb{[POSIXct / POSIXlt]}
A date-time vector.}
\item{zone}{\verb{[character(1)]}
A valid time zone to switch to.}
}
\value{
\itemize{
\item \code{date_zone()} returns a string containing the time zone.
\item \code{date_set_zone()} returns \code{x} with an altered printed time. The
underlying duration is not changed.
}
}
\description{
\itemize{
\item \code{date_zone()} gets the time zone.
\item \code{date_set_zone()} sets the time zone. This retains the \emph{underlying
duration}, but changes the \emph{printed time} depending on the zone that is
chosen.
}
}
\details{
This function is only valid for date-times, as clock treats R's Date class as
a \emph{naive} type, which always has a yet-to-be-specified time zone.
}
\examples{
library(magrittr)
# Cannot set or get the zone of Date.
# clock assumes that Dates are naive types, like naive-time.
x <- as.Date("2019-01-01")
try(date_zone(x))
try(date_set_zone(x, "America/New_York"))
x <- as.POSIXct("2019-01-02 01:30:00", tz = "America/New_York")
x
date_zone(x)
# If it is 1:30am in New York, what time is it in Los Angeles?
# Same underlying duration, new printed time
date_set_zone(x, "America/Los_Angeles")
# If you want to retain the printed time, but change the underlying duration,
# convert to a naive-time to drop the time zone, then convert back to a
# date-time. Be aware that this requires that you handle daylight saving time
# irregularities with the `nonexistent` and `ambiguous` arguments to
# `as.POSIXct()`!
x \%>\%
as_naive_time() \%>\%
as.POSIXct("America/Los_Angeles")
y <- as.POSIXct("2021-03-28 03:30:00", "America/New_York")
y
y_nt <- as_naive_time(y)
y_nt
# Helsinki had a daylight saving time gap where they jumped from
# 02:59:59 -> 04:00:00
try(as.POSIXct(y_nt, "Europe/Helsinki"))
as.POSIXct(y_nt, "Europe/Helsinki", nonexistent = "roll-forward")
as.POSIXct(y_nt, "Europe/Helsinki", nonexistent = "roll-backward")
}
|
db6f70814c37fa8fca0fb9ad53c601a98c1c6241
|
b31c65dca75018c34c3846a4fa197c9893ec113d
|
/man/CovMat.Design.Rd
|
95588ee2f6307697e9a48d46ac077cac212fa043
|
[] |
no_license
|
cran/samplingDataCRT
|
5211ff75c8b3c843b72f6ea4f5bf40eb4203b33a
|
a0b6ea5a69c848ff0ebfebb1863587893b0a909b
|
refs/heads/master
| 2021-01-09T06:46:45.937336
| 2017-02-06T13:28:31
| 2017-02-06T13:28:31
| 81,090,234
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,244
|
rd
|
CovMat.Design.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CovarianceMatrix.R
\name{CovMat.Design}
\alias{CovMat.Design}
\title{covariance matrix for the multivariate normal distributed variables}
\usage{
CovMat.Design(K, J, I, sigma.1.q, sigma.2.q = NULL, sigma.3.q)
}
\arguments{
\item{K}{number of timepoints or measurments (design parameter)}
\item{J}{number of subjects}
\item{I}{number of clusters (design parameter)}
\item{sigma.1.q}{variance of the lowest level (error variance or within subject variance)}
\item{sigma.2.q}{secound level variance (e.g. within cluster and between subject variance), by default NULL and then a cross-sectional type}
\item{sigma.3.q}{third level variance (e.g. between cluster variance)}
}
\value{
V covariance matrix
}
\description{
covariance matrix of the normal distribution under cluster randomized study type given a design and a type
}
\examples{
K<-6 #measurement (or timepoints)
I<-10 #Cluster
J<-2 #number of subjects
sigma.1<-0.1
sigma.3<-0.9
CovMat.Design(K, J, I,sigma.1.q=sigma.1, sigma.3.q=sigma.3)
sigma.1<-0.1
sigma.2<-0.4
sigma.3<-0.9
CovMat.Design(K, J, I,sigma.1.q=sigma.1, sigma.2.q=sigma.2, sigma.3.q=sigma.3)
}
|
5752f72eb61da8be722f8b183800423f8b8ab61a
|
51349029aa0676a8e31c638465469dc9cd33afe9
|
/R/species.R
|
9c4b424192994845742811edb5f2310d9b9679c7
|
[] |
no_license
|
weecology/ratdat
|
07327836b89e0ce5312f02e89b39054e91461c88
|
d7599f3a44d7a338b06d677dc5b789f59385bb91
|
refs/heads/main
| 2022-05-24T04:18:54.643356
| 2022-04-05T14:17:04
| 2022-04-05T14:17:04
| 122,650,282
| 2
| 4
| null | 2023-08-29T14:17:12
| 2018-02-23T17:12:34
|
R
|
UTF-8
|
R
| false
| false
| 405
|
r
|
species.R
|
#' Species data.
#'
#' Data on species captured at the Portal Project
#'
#' @source Portal Project Teaching Database,
#' \doi{10.6084/m9.figshare.1314459}
#' @format A data frame with columns:
#' \describe{
#' \item{species_id}{Species identifier}
#' \item{genus}{The genus of the species}
#' \item{species}{The latin species name}
#' \item{taxa}{General taxonomic category of the species}
#' }
"species"
|
4b31e3ea9ca1f010d4bbdb6d035690edbc9485db
|
d3968caa658b72c858fb0765418c63d517d73de7
|
/scripts/visualization_ramon_y_cajal.R
|
eeba963e15077b1e9406bff14763ae032ee412e8
|
[] |
no_license
|
dernapo/ramon_y_cajal
|
50f5b5ee194ad67d9a5e7a59bb55cdc3b3479a38
|
284bbf162f2e5b692086a93d6a52101695c1ddbc
|
refs/heads/master
| 2022-12-19T19:04:39.982617
| 2020-10-09T07:33:04
| 2020-10-09T07:33:04
| 302,559,178
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,532
|
r
|
visualization_ramon_y_cajal.R
|
#####################################################
## Vamos a preparar varias gráficas con los datos
#####################################################
## Cargar librerias #####
pacman::p_load(data.table, here, ggplot2, hrbrthemes,
patchwork, ggtext)
## Set theme ####
theme_set(theme_ipsum_rc())
theme_set(theme_ipsum())
theme_set(theme_ft_rc())
## Cargar datos ####
rc_dt <- fread(max(list.files(path = here("data"), pattern = ".*csv$", full.names = TRUE)))
## Visualización ####
area_graph <- rc_dt[, .(count = .N), area][order(-count)] %>%
ggplot(aes(y = reorder(area, count), x = count)) +
geom_col() +
labs(title = "Por Área",
x = NULL,
y = NULL)
organismo_graph <- rc_dt[, .(count = .N), organismo][order(-count)][1:19] %>%
ggplot(aes(y = reorder(organismo, count), x = count)) +
geom_col() +
labs(title = "Por Organismo",
subtitle = "top 19",
x = NULL,
y = NULL)
## Poner los gráficos juntos
(organismo_graph / area_graph) + plot_annotation(
title = "Contratos Ramón y Cajal",
subtitle = "Ayudas para contratos convocatoria 2019",
caption = paste0("Fuente: https://www.ciencia.gob.es/\nAutor: @dernapo\nDate: ", format(Sys.Date(), "%d %b %y")),
theme = theme(plot.title = element_markdown(lineheight = 1.1),
plot.subtitle = element_markdown(lineheight = 1.1))
)
## Guardar visualización ####
ggsave(here("output", paste0(format(Sys.time(), "%Y%m%d"), "_ramonycajal.png")),
height = 12,
width = 12)
|
1204ba990166dc84b9516cde03f3cd15fbc928e2
|
e4a5ffbf0b6d567b9c9dc38f3664a995e98db579
|
/R/functions.R
|
ff7abd376c4c63eb9ceb631e0ec70ada24039f0c
|
[
"CC-BY-4.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
RJManners/ClimMob-analysis
|
be659caa521c39088bedcffc2968296679206cf8
|
c2d5c8481a73302ea747f2c54df8424cfccbae8b
|
refs/heads/master
| 2023-08-05T15:58:16.926883
| 2021-10-05T10:28:45
| 2021-10-05T10:28:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,082
|
r
|
functions.R
|
###Functions for Climmob Reporting Analysis
#' Validate the class of objects generated in the tryCatch(s)
any_error <- function(x){
isTRUE("error" %in% class(x))
}
#' Runs specific lines of the code
source2 <- function(file, start, end, ...) {
file.lines <- scan(file, what=character(), skip=start-1, nlines=end-start+1, sep='\n')
file.lines.collapsed <- paste(file.lines, collapse='\n')
source(textConnection(file.lines.collapsed), ...)
}
#' Plot map using leaflet
#' @param data a data frame
#' @param xy index of data for the longitude and latitude coordinates (in that order)
#' @param make.clusters logical, if TRUE coordinates are aggregated by a defined cluster size
#' @param cut.tree numeric, to define the cluster size when make.clusters = TRUE
#' @param map_provider the name of the provider (see http://leaflet-extras.github.io/leaflet-providers/preview/
#' and https://github.com/leaflet-extras/leaflet-providers)
#' @param minimap logical, TRUE to add the minimap
#' @param minimap_position the position of the mini map
#' @examples
#' lonlat <- data.frame(lon = c(15.6, 16.7, 15.55, 15.551),
#' lat = c(65.8, 66.3, 66.25, 66.251))
#'
#' p <- plot_map(lonlat, xy = c(1,2), cut.tree = 0.05)
plot_map <- function(data,
xy = NULL,
make.clusters = TRUE,
cut.tree = 0.05,
map_provider = "Esri.WorldImagery",
minimap = TRUE,
minimap_position = "bottomright",
...){
d <- data[, xy]
# coerce to numeric
d[1:2] <- lapply(d[1:2], as.numeric)
# remove NAs
d <- stats::na.omit(d)
nd <- dim(d)[[1]]
if (isTRUE(nd == 0)) {
stop("No remaining coordinates to plot. ",
"Please check for NAs or if the values can be coerced to numeric. \n")
}
names(d) <- c("lon","lat")
if (isTRUE(make.clusters)) {
# to ensure the privacy of participants location
# we can put the lonlat info into clusters of 0.5 resolution
h <- stats::dist(d)
h <- stats::hclust(h)
h <- stats::cutree(h, h = cut.tree)
# split the d by each defined cluster
d <- split(d, h)
# and take the mean
d <- lapply(d, function(x) {
colMeans(x)
})
# back to data frame
d <- do.call("rbind", d)
d <- as.data.frame(d)
names(d) <- c("lon","lat")
}
map <- leaflet::leaflet(data = d,
options = leaflet::leafletOptions(maxZoom = 17))
map <- leaflet::fitBounds(map = map, lng1 = min(d$lon)-0.25, lat1 = min(d$lat)-0.25,
lng2 = max(d$lon)+0.25, lat2 = max(d$lat)+0.25)
map <- leaflet::addProviderTiles(map = map,
provider = map_provider,
options = leaflet::providerTileOptions(maxNativeZoom = 17))
#map <- leaflet::addCircleMarkers(map = map)
map <- leaflet::addMarkers(map)
if (isTRUE(minimap)) {
map <- leaflet::addMiniMap(map = map, position = minimap_position,
width = 100, height = 100)
}
map$x$options = list("zoomControl" = FALSE)
return(map)
}
scale01 <- function(x) (x-min(x))/(max(x)-min(x))
byfac<-function(model,split){
split<-as.factor(split)
out<-NULL
for(i in 1:nlevels(split)){
mod_t<-update(mod1,rankings=R[split==levels(split)[i],])
tmp<-data.frame(var=rownames(qvcalc(mod_t)$qvframe),split=levels(split)[i],qvcalc(mod_t)$qvframe)
tmp$estimate_adj<-tmp$estimate-mean(tmp$estimate)
out<-rbind(out,tmp)
}
out
ggplot(data=out,aes(y=estimate_adj,x=var,ymax=estimate_adj+qnorm(0.92)*quasiSE,
ymin=estimate_adj-qnorm(0.92)*quasiSE,col=split))+
geom_errorbar(width=0.2,position = position_dodge(width=0.25))+
geom_point(position = position_dodge(width=0.25))
}
win_plot<-function(x){
p1<- ggplot(data=x,aes(y=wins,fill=wins,x=var))+
geom_bar(stat="identity",col="black")+
coord_flip()+
scale_y_continuous(breaks=seq(0,1,by=0.1),labels=scales::percent)+
scale_fill_gradient2(low="red",mid="white",high="forestgreen",limits=c(0,1),midpoint=0.5)
return(p1)
}
anova.PL <- function(model){
if(class(model)!="PlackettLuce"){
stop("Model type is not Plackett-Luce")
}
LLs <- c(model$null.loglik, model$loglik)
dfs <- c(model$df.null, model$df.residual)
df_diff <- (-1) * diff(dfs)
df_LL <- (-1) * diff(LLs)
p <- 1 - pchisq(-2 * df_LL, df_diff)
x <- data.frame(model = c("NULL", deparse(substitute(model))),
"logLikelihood" = LLs,
DF=dfs,
"Statistic" = c(NA, -2 * df_LL),
"Pr(>Chisq)" = c(NA, p),
check.names = FALSE,
stringsAsFactors = FALSE)
return(x)
}
node_terminal1<-
function (mobobj, id = TRUE, worth = TRUE, names = TRUE, abbreviate = TRUE,
index = TRUE, ref = TRUE, col = "black", refcol = "lightgray",
bg = "white", cex = 0.5, pch = 19, xscale = NULL, yscale = NULL,
ylines = 1.5)
{
node <- nodeids(mobobj, terminal = FALSE)
cf <- psychotree:::apply_to_models(mobobj, node, FUN = function(z) if (worth)
worth(z)
else coef(z, all = FALSE, ref = TRUE))
cf <- do.call("rbind", cf)
rownames(cf) <- node
cf<-cf[,order(colSums(cf))]
mod <- psychotree:::apply_to_models(mobobj, node = 1L, FUN = NULL, drop = TRUE)
if (!worth) {
if (is.character(ref) | is.numeric(ref)) {
reflab <- ref
ref <- TRUE
}
else {
reflab <- mod$ref
}
if (is.character(reflab))
reflab <- match(reflab, if (!is.null(mod$labels))
mod$labels
else colnames(cf))
cf <- cf - cf[, reflab]
}
if (worth) {
cf_ref <- 1/ncol(cf)
}
else {
cf_ref <- 0
}
if (is.character(names)) {
colnames(cf) <- names
names <- TRUE
}
if (is.logical(abbreviate)) {
nlab <- max(nchar(colnames(cf)))
abbreviate <- if (abbreviate)
as.numeric(cut(nlab, c(-Inf, 1.5, 4.5, 7.5, Inf)))
else nlab
}
colnames(cf) <- abbreviate(colnames(cf), abbreviate)
if (index) {
x <- 1:NCOL(cf)
if (is.null(xscale))
xscale <- range(x) + c(-0.1, 0.1) * diff(range(x))
}
else {
x <- rep(0, length(cf))
if (is.null(xscale))
xscale <- c(-1, 1)
}
if (is.null(yscale))
yscale <- range(cf) + c(-0.1, 0.1) * diff(range(cf))
rval <- function(node) {
idn <- id_node(node)
cfi <- cf[idn, ]
top_vp <- viewport(layout = grid.layout(nrow = 2, ncol = 3,
widths = unit(c(ylines, 1, 1), c("lines", "null",
"lines")), heights = unit(c(1, 1), c("lines",
"null"))), width = unit(1, "npc"),
height = unit(1, "npc") - unit(2, "lines"),
name = paste("node_btplot", idn, sep = ""))
pushViewport(top_vp)
grid.rect(gp = gpar(fill = bg, col = 0))
top <- viewport(layout.pos.col = 2, layout.pos.row = 1)
pushViewport(top)
mainlab <- paste(ifelse(id, paste("Node", idn,
"(n = "), ""), info_node(node)$nobs,
ifelse(id, ")", ""), sep = "")
grid.text(mainlab)
popViewport()
plot_vpi <- viewport(layout.pos.col = 2, layout.pos.row = 2,
xscale = xscale, yscale = yscale, name = paste("node_btplot",
idn, "plot", sep = ""))
pushViewport(plot_vpi)
grid.lines(xscale, c(cf_ref, cf_ref), gp = gpar(col = refcol),
default.units = "native")
if (index) {
grid.lines(x, cfi, gp = gpar(col = col, lty = 2),
default.units = "native")
grid.points(x, cfi, gp = gpar(col = col, cex = cex),
pch = pch, default.units = "native")
grid.xaxis(at = x,edits = gEdit(gPath="labels", rot=90,cex=0.4), label = if (names)
names(cfi)
else x)
}
else {
if (names)
grid.text(names(cfi), x = x, y = cfi, default.units = "native")
else grid.points(x, cfi, gp = gpar(col = col, cex = cex),
pch = pch, default.units = "native")
}
grid.yaxis(at = c(ceiling(yscale[1] * 100)/100, floor(yscale[2] *
100)/100))
grid.rect(gp = gpar(fill = "transparent"))
upViewport(2)
}
return(rval)
}
draw.emojis <- function(x,y,type="happy",radius=0.3, color="grey", border="black", thickness=1.5){
draw.circle(x,y,radius,nv=100,border=color,col=color,lty=1,density=NULL,angle=45,lwd=thickness/1.5)
segments(x0=x+radius/5, x1=x+radius/5, y0=y+radius/2.5, y1=y+radius/5, lwd = thickness*1.5, col=border)
segments(x0=x-radius/5, x1=x-radius/5, y0=y+radius/2.5, y1=y+radius/5, lwd = thickness*1.5, col=border)
if(type=="happy") draw.arc(x,y,radius=radius/2, deg1=200, deg2=340, col=border, lwd=thickness/1.2)
if(type=="sad") draw.arc(x,y-radius/1.5,radius=radius/2, deg1=20, deg2=160, col=border, lwd=thickness/1.2)
if(type=="neutral") segments(x0=x-radius/4, x1=x+radius/4, y0=y-radius/3, y1=y-radius/3, lwd = thickness, col=border)
}
draw.emojis <- Vectorize(draw.emojis)
#' Visualise network
#' @param object an object of class rankings
#' @param ... additional arguments passed to igraph methods
#' @return an igraph plot
network <- function(object, ...) {
R <- object
adj <- adjacency(R)
adj <- as.vector(adj)
adj <- t(matrix(adj, nrow = ncol(R), ncol = ncol(R)))
dimnames(adj) <- list(dimnames(R)[[2]], dimnames(R)[[2]])
adj <- btdata(adj, return_graph = TRUE)
netw <- adj$graph
}
#' Coearce rankings and explatory variables in a readable file for Cortana
#' @param x a rankings object
#' @param y a data.frame with explanatory variables
#' @return a 'cortana' object
ranking4cortana <- function(x, y) {
L <- c(letters, LETTERS)
ranking <- apply(y, 1, function(w)
{
if (length(unique(w)) == length(w))
{
prefString <- paste(L[order(w)], collapse = ">")
} else {
prefString <- NULL
nbr <- Inf
sapply(order(w), function(i)
{
#if () {
if(w[i]>nbr & !is.na(w[i])){
prefString <<- paste(prefString, ">", L[i], sep="")
} else {
prefString <<- paste(prefString, L[i], sep="")
}
#}
nbr <<- w[i]
})
}
prefString
})
X <- cbind(x,ranking)
return(X)
}
# function from https://github.com/EllaKaye/BradleyTerryScalable
# which unfortunately was removed from CRAN
# Converts a graph representation of wins into a square matrix.
graph_to_matrix <- function(g) {
# check that graph is a directed igraph object
if(!igraph::is.igraph(g)) stop("g must be a directed igraph object")
if(!igraph::is.directed(g)) stop("g must be a directed igraph object")
# check names
if(!is.null(igraph::V(g)$name)) {
arg <- deparse(substitute(g))
if(anyDuplicated(igraph::V(g)$name) > 0) stop(paste0("Vertex names must be unique. Consider fixing with V(", arg, ")$name <- make.names(V(", arg, ")$name, unique = TRUE)"))
}
if (igraph::is.weighted(g)) W <- igraph::as_adjacency_matrix(g, sparse = TRUE, attr = "weight", names = TRUE)
else W <- igraph::as_adjacency_matrix(g, sparse = TRUE, names = TRUE)
return(W)
}
# function from https://github.com/EllaKaye/BradleyTerryScalable
# which unfortunately was removed from CRAN
# Converts a data frame of paired results into a square matrix.
pairs_to_matrix <- function(df) {
# Check for Matrix.utils
if (!requireNamespace("Matrix.utils", quietly = TRUE)) {
stop("The package Matrix.utils is needed for this function to work. Please install it.",
call. = FALSE)
}
# Check for stringr
if (!requireNamespace("stringr", quietly = TRUE)) {
stop("The package stringr is needed for this function to work. Please install it.",
call. = FALSE)
}
# check if data frame
if(!(is.data.frame(df))) stop ("Argument must be a data frame")
# ensure df is a data.frame (rather than tbl_df or tbl)
class(df) <- "data.frame"
# check number of columns
if (!(ncol(df) %in% 3:4 )) stop("Argument must be a data frame with three or four columns")
# get base data
items <- sort(base::union(df[[1]], df[[2]]))
n <- length(items)
# get formula for dMcast
f <- stats::as.formula(paste(names(df)[1:2], collapse= " ~ "))
# convert names to factors
if(!is.factor(df[,1])) {
df[,1] <- factor(df[,1])
}
if(!is.factor(df[,2])) {
df[,2] <- factor(df[,2])
}
# create empty mat if all zeros in column 3
if(all(df[,3] == 0)) {
mat <- Matrix::Matrix(0, n, n, sparse = TRUE)
}
# create matrix with wins from column 3
else {
# create cross-tabs matrix (not square)
mat <- Matrix.utils::dMcast(df, f, value.var = names(df)[3], as.factors = TRUE)
# fix colnames
colnames(mat) <- stringr::str_replace(colnames(mat), names(df)[2], "")
# remove zeros, if any, taking care with dimnames
summary_mat <- Matrix::summary(mat)
x <- NULL # hack to avoid CRAN note
if (any(summary_mat[,3] == 0)) {
summary_mat <- dplyr::filter(summary_mat, x != 0)
mat_rownames <- rownames(mat)
mat_colnames <- colnames(mat)
new_mat_rownames <- mat_rownames[sort(unique(summary_mat[,1]))]
new_mat_colnames <- mat_colnames[sort(unique(summary_mat[,2]))]
mat <- Matrix::sparseMatrix(i = summary_mat[,1], j = summary_mat[,2], x = summary_mat[,3])
nonzero_rows <- which(Matrix::rowSums(mat) != 0)
nonzero_cols <- which(Matrix::colSums(mat) != 0)
mat <- mat[nonzero_rows, nonzero_cols, drop = FALSE]
dimnames(mat) <- list(new_mat_rownames, new_mat_colnames)
}
# add in zeros for missing rows
if (nrow(mat) < n) {
new_rows <- Matrix::Matrix(0, n - nrow(mat), ncol(mat),
dimnames = list(base::setdiff(items, rownames(mat)), colnames(mat)))
mat <- rbind(mat, new_rows)
}
# add in zeros for missing columns
if (ncol(mat) < n) {
new_cols <- Matrix::Matrix(0, n, n - ncol(mat),
dimnames = list(rownames(mat), base::setdiff(items, colnames(mat))))
mat <- cbind(mat, new_cols)
}
# get rows and columns in same, sorted order and return
mat <- mat[items,]
mat <- mat[, rownames(mat)]
}
# repeat above steps if in 4-column format (for item2 beating item1)
# as long as col 4 isn't all zeros
if (ncol(df) == 4) fourth_all_zero <- all(df[,4] == 0)
else fourth_all_zero <- TRUE
if (ncol(df) == 4 & !fourth_all_zero) {
f2 <- stats::as.formula(paste(names(df)[2:1], collapse= " ~ "))
mat2 <- Matrix.utils::dMcast(df, f2, value.var = names(df)[4], as.factors = TRUE)
colnames(mat2) <- stringr::str_replace(colnames(mat2), names(df)[1], "")
# remove zeros, if any, taking care with dimnames
summary_mat2 <- Matrix::summary(mat2)
if (any(summary_mat2[,3] == 0)) {
summary_mat2 <- dplyr::filter(summary_mat2, x != 0)
mat2_rownames <- rownames(mat2)
mat2_colnames <- colnames(mat2)
new_mat2_rownames <- mat2_rownames[sort(unique(summary_mat2[,1]))]
new_mat2_colnames <- mat2_colnames[sort(unique(summary_mat2[,2]))]
mat2 <- Matrix::sparseMatrix(i = summary_mat2[,1], j = summary_mat2[,2], x = summary_mat2[,3])
nonzero_rows2 <- which(Matrix::rowSums(mat2) != 0)
nonzero_cols2 <- which(Matrix::colSums(mat2) != 0)
mat2 <- mat2[nonzero_rows2, nonzero_cols2, drop = FALSE]
dimnames(mat2) <- list(new_mat2_rownames, new_mat2_colnames)
}
# add in zeros for missing rows
if (nrow(mat2) < n) {
new_rows2 <- Matrix::Matrix(0, n - nrow(mat2), ncol(mat2),
dimnames = list(base::setdiff(items, rownames(mat2)), colnames(mat2)))
mat2 <- rbind(mat2, new_rows2)
}
# add in zeros for missing columns
if (ncol(mat2) < n) {
new_cols2 <- Matrix::Matrix(0, n, n - ncol(mat2),
dimnames = list(rownames(mat2), base::setdiff(items, colnames(mat2))))
mat2 <- cbind(mat2, new_cols2)
}
# get rows and columns in same, sorted order and return
mat2 <- mat2[items,]
mat2 <- mat2[, rownames(mat2)]
# add the result to mat
mat <- mat + mat2
}
if(!is.null(colnames(df)[1]) & !is.null(colnames(df)[2])) names(dimnames(mat)) <- colnames(df)[1:2]
return(mat)
}
# function from https://github.com/EllaKaye/BradleyTerryScalable
# which unfortunately was removed from CRAN
#' Create a btdata object
#'
#' Creates a btdata object, primarily for use in the \link{btfit} function.
#'
#' The \code{x} argument to \code{btdata} can be one of four types:
#'
#' \itemize{
#'
#' \item{A matrix (either a base \code{matrix}) or a class from the \code{Matrix} package), dimension \eqn{K} by \eqn{K}, where \eqn{K} is the number of items. The \emph{i,j}-th element is \eqn{w_{ij}}, the number of times item \eqn{i} has beaten item \eqn{j}. Ties can be accounted for by assigning half a win (i.e. 0.5) to each item.}
#' \item{A contingency table of class \code{table}, similar to the matrix described in the above point.}
#' \item{An \code{igraph}, representing the \emph{comparison graph}, with the \eqn{K} items as nodes. For the edges:
#' \itemize{
#' \item{If the graph is unweighted, a directed edge from node \eqn{i} to node \eqn{j} for every time item \eqn{i} has beaten item \eqn{j}}
#' \item{If the graph is weighted, then one edge from node \eqn{i} to node \eqn{j} if item \eqn{i} has beaten item \eqn{j} at least once, with the weight attribute of that edge set to the number of times \eqn{i} has beaten \eqn{j}.}
#' }}
#' \item{
#' If \code{x} is a data frame, it must have three or four columns:
#' \itemize{
#' \item{3-column data frame}{The first column contains the name of the winning item, the second column contains the name of the losing item and the third columns contains the number of times that the winner has beaten the loser. Multiple entries for the same pair of items are handled correctly. If \code{x} is a three-column dataframe, but the third column gives a code for who won, rather than a count, see \code{\link{codes_to_counts}}.}
#' \item{4-column data frame}{The first column contains the name of item 1, the second column contains the name of item 2, the third column contains the number of times that item 1 has beaten item 2 and the fourth column contains the number of times item 2 has beaten item 1. Multiple entries for the same pair of items are handled correctly. This kind of data frame is also the output of \code{\link{codes_to_counts}}.}
#' \item{In either of these cases, the data can be aggregated, or there can be one row per comparison.}
#' \item{Ties can be accounted for by assigning half a win (i.e. 0.5) to each item.}
#' }
#' }
#'
#' }
#'
#' \code{summary.btdata} shows the number of items, the density of the \code{wins} matrix and whether the underlying comparison graph is fully connected. If it is not fully connected, \code{summary.btdata} will additional show the number of fully-connected components and a table giving the frequency of components of different sizes. For more details on the comparison graph, and how its structure affects how the Bradley-Terry model is fitted, see \code{\link{btfit}} and the vignette: \url{https://ellakaye.github.io/BradleyTerryScalable/articles/BradleyTerryScalable.html}.
#'
#' @param x The data, which is either a three- or four-column data frame, a directed igraph object, a square matrix or a square contingency table. See Details.
#' @param return_graph Logical. If TRUE, an igraph object representing the comparison graph will be returned.
#' @return An object of class "btdata", which is a list containing:
#' \item{wins}{A \eqn{K} by \eqn{K} square matrix, where \eqn{K} is the total number of players. The \eqn{i,j}-th element is \eqn{w_{ij}}, the number of times item \eqn{i} has beaten item \eqn{j}. If the items in \code{x} are unnamed, the wins matrix will be assigned row and column names 1:K.}
#' \item{components}{A list of the fully-connected components.}
#' \item{graph}{The comparison graph of the data (if return_graph = TRUE). See Details.}
#' @seealso \code{\link{codes_to_counts}} \code{\link{select_components}}
#' @author Ella Kaye
#' @examples
#' citations_btdata <- btdata(BradleyTerryScalable::citations)
#' summary(citations_btdata)
#' toy_df_4col <- codes_to_counts(BradleyTerryScalable::toy_data, c("W1", "W2", "D"))
#' toy_btdata <- btdata(toy_df_4col)
#' summary(toy_btdata)
#' @export
btdata <- function(x, return_graph = FALSE) {
# if x is a table, convert it to a matrix
if (is.table(x)) {
attr(x, "class") <- NULL
attr(x, "call") <- NULL
}
# if x is a df
if (is.data.frame(x)) {
if (!(ncol(x) %in% 3:4 )) stop("If x is a dataframe, it must have 3 or 4 columns.")
wins <- pairs_to_matrix(x)
g <- igraph::graph.adjacency(wins, weighted = TRUE, diag = FALSE)
}
# if x is a graph
else if (igraph::is.igraph(x)) {
if(!igraph::is.directed(x)) stop("If x is a graph, it must be a directed igraph object")
# check for names
if(!is.null(igraph::V(x)$name)) {
arg <- deparse(substitute(x))
if(anyDuplicated(igraph::V(x)$name) > 0) stop(paste0("If x is a graph, vertex names must be unique. Consider fixing with V(", arg, ")$name <- make.names(V(", arg, ")$name, unique = TRUE)"))
}
wins <- graph_to_matrix(x)
g <- x
}
else if ((methods::is(x, "Matrix") | is.matrix(x) )) {
# check dimensions/content
if (dim(x)[1] != dim(x)[2]) stop("If x is a matrix or table, it must be a square")
if(is.matrix(x)) {if (!is.numeric(x)) stop("If x is a matrix or table, all elements must be numeric")}
if(methods::is(x, "Matrix")) {if (!is.numeric(as.vector(x))) stop("If x is a matrix or table, all elements must be numeric")}
if (any(x < 0)) stop("If x is a matrix or table, all elements must be non-negative")
if(!identical(rownames(x), colnames(x))) stop("If x is a matrix or table, rownames and colnames of x should be the same")
if (anyDuplicated(rownames(x)) > 0) {
arg <- deparse(substitute(x))
stop("If x is a matrix or table with row- and column names, these must be unique. Consider fixing with rownames(", arg, ") <- colnames(", arg, ") <- make.names(rownames(", arg, "), unique = TRUE)")
}
# ensure wins is a dgCMatrix
if (is.matrix(x)) wins <- Matrix::Matrix(x, sparse = TRUE)
else wins <- x
if (class(wins) != "dgCMatrix") wins <- methods::as(wins, "dgCMatrix")
g <- igraph::graph.adjacency(wins, weighted = TRUE, diag = FALSE)
}
else stop("x must be a 3 or 4 column dataframe, a directed igraph object, or square matrix or contingency table.")
## get components
comp <- igraph::components(g, mode = "strong")
components <- igraph::groups(comp)
# name the rows and columns of the wins matrix, if NULL
if (is.null(unlist(dimnames(wins)))) {
K <- nrow(wins)
dimnames(wins) <- list(1:K, 1:K)
}
# return
result <- list(wins = wins, components = components)
if (return_graph) result$graph <- g
class(result) <- c("btdata", "list")
result
}
# function from https://github.com/EllaKaye/BradleyTerryScalable
# which unfortunately was removed from CRAN
#' @rdname btdata
#' @param object An object of class "btdata", typically the result \code{ob} of \code{ob <- btdata(..)}.
#' @param ... Other arguments
#' @export
summary.btdata <- function(object, ...){
if (!inherits(object, "btdata")) stop("object should be a 'btdata' object")
K <- nrow(object$wins)
num_comps <- length(object$components)
connected <- num_comps == 1
components_greater_than_one <- Filter(function(x) length(x) > 1, object$components)
my_tab <- table(sapply(object$components, length))
my_df <- as.data.frame(my_tab)
colnames(my_df) <- c("Component size", "Freq")
density <- Matrix::mean(object$wins != 0)
cat("Number of items:", K, "\n")
cat("Density of wins matrix:", density, "\n")
cat("Fully-connected:", connected, "\n")
if (num_comps > 1) {
cat("Number of fully-connected components:", num_comps, "\n")
cat("Summary of fully-connected components: \n")
print(my_df)
}
}
# Plot worth bar
# @param object a data.frame with worth parameters
# @param value an integer for index in object for the column with values to plot
# @param group an integer for index in object to the colunm with values to group with
plot_worth_bar <- function(object, value, group, palette = NULL, ...){
if(is.null(palette)) {
palette <- grDevices::colorRampPalette(c("#FFFF80", "#38E009","#1A93AB", "#0C1078"))
}
object <- object[,c(group, value)]
names(object) <- c("group", "value")
nr <- dim(object)[[1]]
object$group <- as.character(object$group)
object$group <- gosset:::.reduce(object$group, ...)
object <- object[rev(order(object$value)), ]
object$value <- round(object$value * 100, 0)
# get order of players based on their performance
player_levels <- rev(gosset:::.player_order(object, "group", "value"))
object$group <- factor(object$group, levels = player_levels)
value <- object$value
group <- object$group
maxv <- round(max(value) + 10, -1)
ggplot2::ggplot(data = object,
ggplot2::aes(x = value,
y = "",
fill = group)) +
ggplot2::geom_bar(stat = "identity",
position = "dodge",
show.legend = FALSE,
width = 1,
color = "#ffffff") +
scale_fill_manual(values = palette(nr)) +
ggplot2::scale_x_continuous(labels = paste0(seq(0, maxv, by = 10), "%"),
breaks = seq(0, maxv, by = 10),
limits = c(0, maxv)) +
ggplot2::theme_minimal() +
ggplot2::theme(legend.position="bottom",
legend.text = element_text(size = 9),
panel.grid.major = element_blank(),
axis.text.x = element_text(color = "#000000")) +
ggplot2::labs(y = "",
x = "") +
ggplot2::geom_text(aes(label = group),
position = position_dodge(width = 1), hjust = -.1)
}
# Plot coefficient estimates
plot_coef <- function(object, ...) {
ggplot(data = object,
aes(x = term,
y = ctd,
ymax = ctd + 1.40 * quasiSE,
ymin = ctd - 1.40 * quasiSE,
col = Label)) +
geom_point(position = position_dodge(width = 0.3), size = 1) +
geom_errorbar(position = position_dodge(width = 0.3), width = 0) +
coord_flip() +
scale_color_brewer(palette = "Set1", name = "") +
geom_text(aes(label= .group),
size = 2,
fontface = 1,
nudge_x = rep(c(-0.3, 0.5), each = nlevels(object$term))) +
labs(y = "",
x = "") +
theme_bw() +
theme(legend.position = "bottom",
legend.text = element_text(size = 7, colour = "black"),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.text.x = element_text(size = 9, colour = "black"),
axis.text.y = element_text(size = 9, colour = "black"))
}
#' Rename duplicates
#'
#' Look for duplicated values in a vector and rename them,
#' an additional string is added to avoid duplicate and
#' get unique values with the same vector length
#'
#' @param x a vector to check and rename duplicated values
#' @param rename.with choose between numbers and letters
#' @examples
#'
#' v <- c("Pear", "Apple", "Pear", "Orange", "Apple", "Apple")
#'
#' rename_duplicates(v)
#'
#' @noRd
rename_duplicates <- function(x, rename.with = "numbers", sep = "") {
dups <- duplicated(x)
dups <- unique(x[dups])
for(i in seq_along(dups)) {
dups_i <- x == dups[i]
index <- seq_len(sum(dups_i))
if (rename.with == "letters") {
index <- letters[index]
}
x[dups_i] <- paste(x[dups_i], index, sep = sep)
}
return(x)
}
|
43feedd8be6da193c961c4332496865ae4d7604d
|
dfaf36782928084c27c955e60592baffe214510f
|
/R/Plot.R
|
48f2b2e3b8435ef40630de1743fa368868c74554
|
[] |
no_license
|
SimonGrund/blm
|
05aa4c9d7f31078ea0d50c1585468d758c6ad386
|
31fb8fe2b623b612cdaaa6d7656aa6456843e1b5
|
refs/heads/master
| 2021-06-10T18:30:28.476092
| 2017-01-16T15:01:35
| 2017-01-16T15:01:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,879
|
r
|
Plot.R
|
#' Plot
#'
#' This function plots blm models
#'
#' @param x An objct of class blm
#' @param fit Should target variables (y axis) be plotted from fitted or data values
#' @param newData for plot of new predicted target variable values, add new Data and assign fit = TRUE
#' @param ... Additifonal data,
#'
#' @return if fit = FALSE: x-y plot with a line going through it for the fit, if fit = TRUE and newData assigned, a plot is returned with original data and line (blue) and red data points for the predicted target variables
#' @export
plot.blm = function(x,fit = FALSE, newData = NULL,...){
data = x$data
intercept = coef(x)[[1]]
slope = coef(x)[[2]]
if(fit == FALSE){
ggplot(data = data) +
geom_point(aes(x = data[,2], y=data[,1]), col = "blue", cex = 0.4)+
geom_abline(slope = slope, intercept = intercept, col = "blue") +
labs(title = paste("Intercept =", round(x$mean[1],3) ,
" Slope =",round(x$mean[2],3), " R^2 = ",
round(1-sum(residuals(x)^2)/sum((data[,1]-mean(data[,1]))^2),3)),
y="y",x="x")
}
else{
if(fit != FALSE){
y = fitted(x)[,1]
if(!is.null(newData)){
pred = blm(x$formula, alpha = x$sigma, beta = x$beta, data = newData)
}
if(ncol(newData) != ncol(x$data)) stop("Wrong number of variables in newData for original formula")
intercept = coef(pred)[[1]]
slope = coef(pred)[[2]]
ggplot() +
geom_point(data = data , aes(x = data[,2], y=data[,1]), col = "blue", cex = 0.4) +
geom_point(data = newData, aes(x = newData[,2], y=y), col = "red", cex = 0.4)+
geom_abline(slope = slope, intercept = intercept, col = "red") +
labs(title = paste("Intercept =", round(pred$mean[1],3) ,
" Slope =",round(pred$mean[2],3)),
y="Predicted y",x="x")
}
}
}
|
e051235df4375e72802720c5234c9a1999b05110
|
3c0a74cde5a48df98e1a362e109e82c31ef2ad21
|
/app/server.R
|
bce01711c31208a82af8b54b47491302bf85d936
|
[] |
no_license
|
tomsb459/rtu-neighborhood-map
|
be499ef51fbbb47ae2f8e32f12cac6e6806672c4
|
c0f88071914c69a593c01d42944493927925a8ea
|
refs/heads/master
| 2023-01-04T01:53:00.014728
| 2020-10-19T13:38:11
| 2020-10-19T13:38:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,188
|
r
|
server.R
|
server <- function(input, output, session) {
# Login -------------------------------------------------------------------
# Call login module supplying data frame, user and password cols
# and reactive trigger
credentials <- callModule(
shinyauthr::login,
id = "login",
data = user_base,
user_col = user,
pwd_col = password,
sodium_hashed = TRUE,
log_out = reactive(logout_init())
)
# Call the logout module with reactive trigger to hide/show
logout_init <- callModule(
shinyauthr::logout,
id = "logout",
active = reactive(credentials()$user_auth)
)
# Launch login screen when login button clicked
observeEvent(input$"open-login", {
showModal(modalDialog(
loginUI(id = "login"),
easyClose = TRUE
))
})
# If logged-in, hide the login button
observe({
shinyjs::hide("open-login")
if(!credentials()$user_auth)
shinyjs::show("open-login")
})
# AFter successful login, close login screen
observeEvent(credentials()$user_auth, {
removeModal()
})
# Map ---------------------------------------------------------------------
# Create default map to start with on the app
output$map <- renderLeaflet({
default_var <- var_inputs[[1]]
pal <- colorNumeric("plasma", rw_tracts[[default_var]], na.color = "#bfbfbf", reverse = T)
rw_tracts %>%
leaflet() %>%
addMapboxGL(style = "mapbox://styles/mapbox/light-v9") %>%
addPolygons(
fillColor = pal(rw_tracts[[default_var]]),
fillOpacity = 0.6,
color = "black",
weight = 0.5,
opacity = 1,
layerId = ~geoid,
group = "variable"
)
})
# Any time a new variable is selected from the dropdown menu the choropleth
# map is redrawn for that indicator
observeEvent(input$variable, {
var_name <- input$variable
if(is.null(var_name))
return()
pal <- colorNumeric("plasma", rw_tracts[[var_name]], na.color = "#bfbfbf")
# The default legend has values low/top to high/bottom so need to reverse the palette
rev_pal <- colorNumeric("plasma", rw_tracts[[var_name]], na.color = "#bfbfbf", reverse = T)
leafletProxy("map") %>%
clearShapes() %>%
addPolygons(
data = rw_tracts,
fillColor = pal(rw_tracts[[var_name]]),
fillOpacity = 0.6,
color = "black",
weight = 0.5,
opacity = 1,
layerId = ~geoid,
group = "variable"
) %>%
addLegend(
"topright",
pal = rev_pal,
values = rw_tracts[[var_name]],
opacity = 1,
labFormat = labelFormat(transform = function(x) sort(x, decreasing = TRUE)),
layerId = "legend"
)
})
# Every time a tract is clicked on, a table is generated in the side pane to
# display all its indicator values and an outline it added to the map
observeEvent(input$map_shape_click, {
event <- input$map_shape_click
if(is.null(event))
return()
# remove the outline of the previously clicked tract
if(event$id == "selected") {
leafletProxy("map") %>% removeShape(layerId = "selected")
}
# add the outline for the clicked tract
leafletProxy("map") %>%
addPolygons(
data = rw_tracts %>% filter(geoid == event$id),
fillColor = NA,
fillOpacity = 0,
color = "black",
weight = 2,
opacity = 1,
layerId = "selected"
)
})
# Members -----------------------------------------------------------------
# Only after successful login, load data and show member locations
members <- eventReactive(credentials()$user_auth, {
readRDS("data/members.rds")
})
observeEvent(credentials()$user_auth, {
popup <- popupTable(
members(),
c("name", "pronouns", "phone", "email", "address"),
row.numbers = FALSE,
feature.id = FALSE
)
# add member locations
leafletProxy("map") %>%
addCircleMarkers(
data = members(),
# label = ~name,
# popup = popup,
fillColor = "blck",
fillOpacity = 1,
stroke = FALSE,
radius = 3,
layerId = ~uid,
group = "members"
)
})
observe({
req(!credentials()$user_auth)
# remove member locations on logout
leafletProxy("map") %>%
clearMarkers()
})
# Table -------------------------------------------------------------------
tract_info <- eventReactive(input$map_shape_click, {
event <- input$map_shape_click
if(is.null(event))
return()
if(event$id == "selected")
return()
# Get the row for the selected tract, reshape the data, join in display
# names and formatting info from the manually created csv file, then build
# out a simple table for display in the side pane
rw_tracts %>%
st_drop_geometry() %>%
filter(geoid == event$id) %>%
select(-geoid) %>%
pivot_longer(everything()) %>%
right_join(indic_info, by = c("name" = "var_name")) %>%
arrange(order) %>%
gt(rowname_col = "display_name", groupname_col = "var_group") %>%
cols_hide(vars(name, val_fmt, order)) %>%
tab_style(cell_text(weight = "bold"), cells_row_groups()) %>%
fmt_currency(vars(value), rows = val_fmt == "cur", decimals = 0) %>%
fmt_percent(vars(value), rows = val_fmt == "pct", decimals = 1) %>%
fmt_number(vars(value), rows = val_fmt == "num", decimals = 0) %>%
fmt_number(vars(value), rows = val_fmt == "rt", decimals = 1) %>%
fmt_missing(vars(value)) %>%
tab_options(column_labels.hidden = TRUE)
})
# Output the map for access on the UI side
output$tract_table <- render_gt(
tract_info(),
height = px(700),
width = px(400)
)
# Output a UI component for the side pane
output$sidebar <- renderUI({
fixedPanel(
id = "sidebar", class = "panel panel-default",
style = "overflow-y: scroll",
top = 80, right = 0, width = 400, height = 650,
gt_output(outputId = "tract_table")
)
})
}
|
1aa455d6a6ed3e90fa36879e9e3bfe1fa20588a8
|
f8ce1034cef41685ab2387fa42084ac1ee5c96cf
|
/chapter18/rook.R
|
52c18a980028e2572b095c94529f4db0ad118ef9
|
[] |
no_license
|
AnguillaJaponica/RProgramming
|
ab7e15d99153ebe585745289490e6e3df118a35c
|
ae1401920b5a52795cffc95bd83922f15761e531
|
refs/heads/master
| 2020-11-28T10:07:02.579925
| 2019-12-23T15:28:23
| 2019-12-23T15:28:23
| 229,777,900
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 373
|
r
|
rook.R
|
library(Rook)
hello_fun <- function(env) {
res <- Rook::Response$new()
res$write("<html>ヮ<head><title>HelloWorld</title></head>・n<body>・n")
res$write("<h1>Hello World</h1>・n")
res$write("</body>・n</html>・n")
res$finish()
}
#
rk <- Rhttpd$new()
rk$start(quiet = TRUE)
rk$add(app = hello_fun, name = "HelloWorld")
rk$browse("HelloWorld")
|
1930091d640f83a281aec0af554f05e49f7201b2
|
dd4eedb2d9b20284b5be5f72eb8c9d3f86208855
|
/stage_18/analysis.R
|
44c98c81c25e984daff74f77a2f67ed0362c3685
|
[] |
no_license
|
hasmitapatel18/Master_thesis
|
d3c0b6a456edb11cffc144e8138b57b519458ef1
|
43e0d68efca749901a51f2462821892edb05905a
|
refs/heads/master
| 2020-03-27T11:37:58.390429
| 2018-08-28T20:50:46
| 2018-08-28T20:50:46
| 146,498,690
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,800
|
r
|
analysis.R
|
rm(list=ls()) # clean up the workspace
# *******************************************************************
# Constructing priors on 'g' (genartion time) and
# 'u' (per-generation mutation rate)
# *******************************************************************
# generation prior construction
m <- 29.5; v <- (32-27)^2/16
a <- m^2/v; b <- m/v
# This is how the prior on 'g' looks like:
curve(dgamma(x, a, b), from=20, to=40, main="Distribution curve for the generation, g prior", xlab="Generation time, g (y)", ylab="Gamma(g | a, b)", las=1, n=5e2)
# mutation rate prior
# substitutions per site per 10^8 years.
m.r <- (1.36+0.97)/2; v.r <- (1.36-0.97)^2/16
a.r <- m.r^2/v.r; b.r <- m.r/v.r
# This is how the prior on 'u' looks like:
curve(dgamma(x, a.r, b.r), from=0, to= 2, main="Distribution curve for the mutation rate u prior", xlab="Per-generation mutation rate, u (x 10^-8)", ylab="Gamma(u | a, b)", las=1, n=5e2)
#import mcmc.txt file
m1 <- read.table("mcmc.txt", head=TRUE) # contains 20,000 MCMC samples
names(m1)
# By using the priors on 'g' and 'u' we will convert the tau's into
# geological divergence times (in years) and the theta's into
# effective population sizes (as numbers of individuals)
# For example, this is how posterior distribution of the root's tau
# (age of the root in substitutions per site) looks like before
# re-calibrating it to geological time:
plot(density(m1$tau_10NeaDenYoruba_AFRSpain_EURChinese_EASPeru_AMRChimpGorOrang ), xlim=c(0.0005,0.002), xlab="tau (in substitutions per site)", main="AFR/EUR Root tau")
# To obtain the calibrated times and population sizes, we simply
# obtain 20,000 samples from the priors on 'g' and 'u'.
# Recal that the per-yer mutation rate is r=u/g. Thus the calibrated
# times are t = tau/r. Also recall that theta = 4Nu, thus N = theta/(4u).
# So we simply use the sampled values of 'g' and 'u' to recalibrate all
# the tau's and theta's in the m1 dataframe:
n <- nrow(m1) # 20,000
set.seed(123357) # We set the set so that the analysis is reproducible
gi <- rgamma(n, a, b) # sample from prior on 'g'
ri <- rgamma(n, a.r, b.r) * 1e-8 # sample from prior on 'u'
# Column indices for thau's and theta's
tau.i <- 10:17; theta.i <- 2:9
# Obtain population sizes (Ne) and geological times in years (ty):
Ne <- m1[,theta.i] / (4*ri) # N = theta / (4*u)
ty <- m1[,tau.i] * gi / ri # t = tau * g / u
# Voilá! Ne and ty contain our posterior estimates of population sizes
# and geological divergence times!
# For example, this is how the posterior distribution of the root's Ne
# and age look like:
plot(density(Ne$theta_10NeaDenYoruba_AFRSpain_EURChinese_EASPeru_AMRChimpGorOrang , from=0, to=0.2e5), xlab = "Root's Ne (number of individuals)", main="Effective size of the root's ancestral population")
plot(density(ty$tau_10NeaDenYoruba_AFRSpain_EURChinese_EASPeru_AMRChimpGorOrang /1e6, from=0, to=30e1), xlab = "Root's age (thousands of years ago)", main="Root age EAS/AM")
# Calculate posterior means and 95% credibility-intervals:
N.m <- apply(Ne, 2, mean)
N.95 <- apply(Ne, 2, quantile, prob=c(.025, .975))
t.m <- apply(ty, 2, mean)
t.95 <- apply(ty, 2, quantile, prob=c(.025, .975))
# print out a table of posterior means and CI's for Ne and ty for all
# the populations:
pop.names <- c("10NeaDenYoruba_AFRSpain_EURChinese_EASPeru_AMRChimpGorOrang", "11NeaDenYoruba_AFRSpain_EURChinese_EASPeru_AMRChimpGor", "12NeaDenYoruba_AFRSpain_EURChinese_EASPeru_AMRChimp", "13NeaDenYoruba_AFRSpain_EURChinese_EASPeru_AMR", "14NeaDen", "15Yoruba_AFRSpain_EURChinese_EASPeru_AMR", "16Spain_EURChinese_EASPeru_AMR", "17Chinese_EASPeru_AMR")
Ne.df <- cbind(N.m, t(N.95)); row.names(Ne.df) <- pop.names
t.df <- cbind(t.m, t(t.95)); row.names(t.df) <- pop.names
Ne.df; t.df
|
ef873d924be15686528ff1314636679423fd5af1
|
da67e60cc58adb0fe9e02b1edbf365d558cc35eb
|
/R/s3.R
|
372a356858bc2de946fa928a5a08991b1eeaaba4
|
[] |
no_license
|
brunaw/music21
|
ef506e88fb8e576b622fc62a4d0ae0c7f0b7ad56
|
b67f5d82735cf05bed3b5c366c0f74d9e6906060
|
refs/heads/master
| 2021-07-12T17:57:22.951629
| 2017-10-13T18:47:16
| 2017-10-13T18:47:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,294
|
r
|
s3.R
|
plot.music21.base.Music21Object <- function(x, ...) {
env <- reticulate::import("music21.environment")
env$set('graphicsPath', Sys.which("lilypond")[1])
img <- x$write("lily.png")
print(magick::image_read(img))
invisible(img)
}
#' Prints music21 generic object
#'
#' Uses show method from music21
#'
#' @param x music21 python object
#' @param ... other options (currently ignored)
#'
#' @export
print.music21.base.Music21Object <- function(x, ...) {
res <- reticulate::py_capture_output(x$show("text"))
cat(res)
}
#' Shows music in the viewer pane
#'
#' This function uses \href{http://lilypond.org/}{lilypond} to save the music21
#' object to a png file, then uses \code{\link{magick}} package to load the
#' image.
#'
#' @param x music21 python object
#' @param ... other options (currently ignored)
#'
#' @export
plot.music21.base.Music21Object <- function(x, ...) {
img <- magick::image_read(write_lily(x))
op <- graphics::par(mar = rep(0, 4))
graphics::plot(grDevices::as.raster(img))
graphics::par(op)
}
view <- function(x, ...) {
UseMethod("view")
}
#' @rdname plot.music21.base.Music21Object
#' @export
view.music21.base.Music21Object <- function(x, ...) {
img <- write_lily(x)
utils::capture.output(print(magick::image_read(img)))
invisible(img)
}
|
f5f6baeb463b4d3c3d4d2cb083ee021220573feb
|
fa853f13add91b485908ac7ffec0275cbb458b0c
|
/Day_3.R
|
b3e712873b5cf1876910fffce33e3c285d5b604e
|
[] |
no_license
|
MpumalangaMnyekemfu/Biostats_2021
|
60a8bff9043297811bdfd2c3236b9a074dea1b60
|
5efc173fd3594bce5a08fde7420634430ab518a5
|
refs/heads/master
| 2023-04-05T16:46:02.478318
| 2021-04-27T12:15:51
| 2021-04-27T12:15:51
| 359,588,268
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,147
|
r
|
Day_3.R
|
#Day_3
head(faithful)
eruption.lm <- lm(eruptions ~ waiting, data = faithful)
summary(eruption.lm)
slope <- round(eruption.lm$coef[2], 3)
p.val = 0.001
r2 <- round(summary(eruption.lm)$r.squared, 3)
ggplot(data = faithful, aes(x = waiting, y = eruptions)) +
geom_point() +
annotate("text", x = 45, y = 5, label = paste0("slope == ", slope, "~(min/min)"), parse = TRUE, hjust = 0) +
annotate("text", x = 45, y = 4.75, label = paste0("italic(p) < ", p.val), parse = TRUE, hjust = 0) +
annotate("text", x = 45, y = 4.5, label = paste0("italic(r)^2 == ", r2), parse = TRUE, hjust = 0) +
stat_smooth(method = "lm", colour = "salmon") +
labs(title = "Old Faithful eruption data",
subtitle = "Linear regression",
x = "Waiting time (minutes)",
y = "Eruption duration (minutes)")
# Loading libraries
library(tidyverse)
library(ggpubr)
library(corrplot)
# Reading in the ecklonia dataset
ecklonia <- read_csv("data/ecklonia.csv")
# Using select function to exclude/prevent some columns from being read
ecklonia_sub <- ecklonia %>%
select(-species, - site, - ID)
# Performing correlation analysis
cor.test(x = ecklonia$stipe_length, ecklonia$frond_length,
use = "everything", method = "pearson")
# Kendall rank
ecklonia_norm <- ecklonia_sub %>%
gather(key = "variable") %>%
group_by(variable) %>%
summarise(variable_norm = as.numeric(shapiro.test(value)[2]))
ecklonia_norm
# Correlation test
cor.test(ecklonia$primary_blade_length, ecklonia$primary_blade_width, method = "kendall")
# Calculate Pearson r beforehand for plotting
r_print <- paste0("r = ",
round(cor(x = ecklonia$stipe_length, ecklonia$frond_length),2))
# Then create a single panel showing one correlation
ggplot(data = ecklonia, aes(x = stipe_length, y = frond_length)) +
geom_smooth(method = "lm", colour = "grey90", se = F) +
geom_point(colour = "mediumorchid4") +
geom_label(x = 300, y = 240, label = r_print) +
labs(x = "Stipe length (cm)", y = "Frond length (cm)") +
theme_pubclean()
# Multiple panel visual
corrplot(ecklonia_pearson, method = "circle")
ecklonia_pearson <- cor(ecklonia_sub)
ecklonia_pearson
# Producing a heat map
# Load libraries
library(ggplot2)
library(dplyr)
library(reshape)
library(ggpubr)
library(corrplot)
library(reshape2)
# dlply
library(plyr)
function (.data, .variables, .fun = NULL, ..., .progress = "none",
.inform = FALSE, .drop = TRUE, .parallel = FALSE, .paropts = NULL)
{
.variables <- as.quoted(.variables)
pieces <- splitter_d(.data, .variables, drop = .drop)
llply(.data = pieces, .fun = .fun, ..., .progress = .progress,
.inform = .inform, .parallel = .parallel, .paropts = .paropts)
}
<bytecode: 0x000000e3f49d1310>
<environment: namespace:plyr>
>
# Producing a heat map
# Load libraries
library(ggplot2)
library(dplyr)
library(reshape)
library(ggpubr)
library(corrplot)
library(reshape2)
library(hrbrthemes)
ecklonia_pearson <- cor(ecklonia_sub)
ecklonia_pearson
#melt the data
melted <- melt(ecklonia_pearson)
ggplot(data = melted, mapping = aes(x = X1, y = X2, fill = value)) +
geom_tile()
|
b716ebc7aebec2b91e37aa1f6d7acc78c852607f
|
4606b7fb6bec2053fa493d6a828bbf34bdb30f69
|
/tests/testthat/test-util.R
|
e2d33506bba8bc202b4b25572a22ff99e054e400
|
[] |
no_license
|
manisahni/icd9
|
705ed3fa16d3c21bb96baa7ed6a88cc2c1861e73
|
2eddaa4ae22c7a2cf76e05b949193ddd55d05d96
|
refs/heads/master
| 2021-01-18T18:56:54.207249
| 2014-07-21T15:54:18
| 2014-07-21T15:54:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 870
|
r
|
test-util.R
|
context("test utility functions")
test_that('strMultiMatch with and without dropping empties', {
expect_equal(strMultiMatch(pattern="jack", text=c("not", "here")), list(character(), character()))
expect_equal(strMultiMatch(pattern="(jack)", text=c("not", "here")), list(character(), character()))
expect_equal(strMultiMatch(pattern="(jack)(second)", text=c("not", "here")), list(character(), character()))
expect_equal(strMultiMatch(pattern="jack", text=c("not", "here"), dropEmpty = TRUE), list())
expect_equal(strMultiMatch(pattern="(jack)", text=c("not", "here"), dropEmpty = TRUE), list())
expect_equal(strMultiMatch(pattern="(jack)(second)", text=c("not", "here"), dropEmpty = TRUE), list())
expect_equal(strMultiMatch("LET (jack)(w)", c("LET jack", "LET jackw", "nothing", "LET else"), dropEmpty = TRUE),
list(c("jack", "w")))
})
|
e57173b1dd057b12ae1d3dcb8dcc13f01873dc8b
|
1a9536036975eee9d8d7b8f1475a9bdbc36b8806
|
/man/curve_it.Rd
|
cadac1e1f580a5d97b4c2a23c64ec418b3b32a7c
|
[] |
no_license
|
chrisbrunsdon/caricRture
|
10781aa2b83678a0751acf6eebfdb5e5722bde0a
|
48acb95a7a07ea3155e1c8c625c6d5f3609feb67
|
refs/heads/master
| 2021-01-10T19:27:32.209393
| 2016-04-12T09:26:54
| 2016-04-12T09:26:54
| 39,564,788
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,134
|
rd
|
curve_it.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/caricRture.R
\name{curve_it}
\alias{curve_it}
\title{'Curvify' polygon-based objects.}
\usage{
curve_it(spdf,s)
spdf %>% curve_it(s)
}
\arguments{
\item{s}{\code{shape} parameter as in \link[graphics]{xspline}}
\item{spdf}{a \link[sp]{SpatialPolygons} or \link[sp]{SpatialPolygonsDataFrame} object}
}
\value{
a \link[sp]{SpatialPolygons} curved caricature
}
\description{
Curved caracture from a \link[sp]{SpatialPolygons} or \link[sp]{SpatialPolygonsDataFrame}
object, controlled by a shape parameter. This can pass through the nodes of the
original object (-1 < shape parameter < 0) or go near to them (0 < shape parameter < 1).
}
\details{
This is based on the \code{\link[graphics]{xspline}} function. In particular,
the shape parameter is the same as in that function.
}
\examples{
'indianred' \%>\% adjustcolor(alpha.f=0.3) -> ired
"POLYGON((0 0,0 2,1 3.5,3 3,4 1,3 0,0 0))" \%>\% readWKT -> p1
p1 \%>\% make_canvas \%>\% plot_it(col=ired)
p1 \%>\% curve_it(1) \%>\% plot_it(col=ired,lty=2)
p1 \%>\% curve_it(0.5) \%>\% plot_it(col=ired,lty=2)
}
|
aef5e590d0761ac021e5d028b5626b1d6bf19f67
|
3ad73d74e1323aa0e3992912bf8704cfe6f58e6c
|
/data_visualisation.R
|
dd41ecfcb6c3edf1936682dc9bfa181bd7b533d7
|
[] |
no_license
|
alyomahoney/Monopoly
|
323c539d4a67ac069739c0618031879af4f48064
|
8ada658acd7ba986aaf2e4bbade1da863850a5ab
|
refs/heads/master
| 2022-11-05T02:56:00.713170
| 2020-06-22T09:04:32
| 2020-06-22T09:04:32
| 269,175,499
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,133
|
r
|
data_visualisation.R
|
set1 <- "hsl(29, 69%, 34%)"
set2 <- "hsl(205, 82%, 73%)"
set3 <- "hsl(297, 77%, 47%)"
set4 <- "hsl(35, 100%, 54%)"
set5 <- "hsl(0, 76%, 49%)"
set6 <- "hsl(60, 88%, 60%)"
set7 <- "hsl(111, 74%, 39%)"
set8 <- "hsl(218, 62%, 33%)"
colour_sets <- c(set1,set2,set3,set4,set5,set6,set7,set8)
colour_label <- c("Brown","Light blue","Pink/purple","Orange","Red","Yellow","Green","Dark blue")
prop_col_remain <- data.frame(tidy_ss_remain) %>%
mutate(Colour = factor(colour_simple,levels=unique(colour_simple))) %>%
group_by(Colour) %>%
summarize(Proportion=sum(tidy_ss_remain)) %>%
filter(Colour %in% c("brown","skyblue2","purple","orange","red","yellow","green","blue")) %>%
mutate(Colour=factor(colour_label,levels=unique(colour_label)))
# proportion of time spent at each colour
prop_col_remain %>%
ggplot(aes(Colour, Proportion, fill=Colour)) +
theme_gdocs() +
geom_bar(stat="identity") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()) +
scale_fill_manual(values=c("#884403","skyblue2","#CC0099","orange","#EE0000","yellow","#44BB11","#003399"))
|
50f5e532aa31d554d9be09bd1f28d5db4a9ee781
|
f4e504d84c935accb29cf0394729372413152b93
|
/man/bootstrap_C.Rd
|
c575ae3f4aba20a15c522c8ee6e2755198a6b711
|
[] |
no_license
|
cshannum/unequalgroupoutlier
|
8fd8c2f00628358f809842a3037fef19daa71fdc
|
c1ea76a3dca80a1f2ceee7c7e37aaab155b1af8b
|
refs/heads/master
| 2020-03-31T22:54:00.884508
| 2019-02-28T23:32:18
| 2019-02-28T23:32:18
| 152,635,181
| 0
| 0
| null | 2018-11-09T18:48:07
| 2018-10-11T18:11:29
|
R
|
UTF-8
|
R
| false
| true
| 2,240
|
rd
|
bootstrap_C.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Depth_Outlier_Funcs.R
\name{bootstrap_C}
\alias{bootstrap_C}
\title{Bootstrap a cutoff value to identify anomalies}
\usage{
bootstrap_C(coeff, d.method, c.method, alpha, B)
}
\arguments{
\item{coeff}{A dataframe of coefficients of interest. The first column is \code{ID} identifier.
The rest of the columns are for the parameter to be estimate. Each row is the estimated parameters
fore each curve.}
\item{d.method}{A character string determining the depth function to use: "LP", "Projection",
"Mahalanobis", or "Euclidean". It is suggested to not use "Tukey" due to singularity in
coefficient matrix. For details see \code{\link[DepthProc]{depth}}}
\item{c.method}{A character string determining the method to estimate the cutoff value.
This can be "depth" or "alpha".}
\item{alpha}{A value determining the percentage of rows to remove from \code{coeff}.
\code{alpha} should be between (0, 1) with a suggested value of 0.05. Do not need to
identify if \code{c.method} = "depth".}
\item{B}{A value determining how many bootstrap datasets should be made to estimate
the cutoff value with a suggested rate of 1000.}
}
\value{
\code{$d} the depths computed by \code{d.method} over all coefficients.
\code{$Cb} the cutoff value; depths below cutoff may be anomalous.
}
\description{
Bootstrap a cutoff value to identify anomalies
}
\details{
The function starts by computing the depths for each parameter set by \code{d.method}.
The "alpha" \code{c.method} removes the alpha percent least deep coefficients. The rest of the
coefficients are bootstrapped and new depths are computed for each new bootstrapped set. The
1% empirical percentile of the depths is saved. The cuttoff value is the median of these
1% empirical percentile of the depths.
The "depth" \code{c.method} bootstraps the coefficients with probability related to the
original depth values. New depths are computed for each new bootstrapped set. The
1% empirical percentile of the depths is saved. The cuttoff value is the median of these
1% empirical percentile of the depths.
}
\seealso{
\code{\link[DepthProc]{depth}}, \code{\link{bootstrap_C.alpha}}, and
\code{\link{bootstrap_C.depth}}
}
|
8738938395202e127ad9561e6ec4cac2cc27f0da
|
e77b87fc6aca13fe63b75bcee7ea56554c39963b
|
/man/playlist_demographics.Rd
|
14cc6433d700c7bcbf95e700e0a807df09e73a41
|
[
"MIT"
] |
permissive
|
davisj95/YTAnalytics
|
e5dacebd4fc8cdfb9e4f6dcc3b061e297225cc0a
|
8a52248e8750701c8e5ad1ea814d8f6e40e4fd03
|
refs/heads/main
| 2023-09-01T15:00:20.373935
| 2023-08-31T15:52:02
| 2023-08-31T15:52:02
| 387,549,210
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 553
|
rd
|
playlist_demographics.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/demographics.R
\name{playlist_demographics}
\alias{playlist_demographics}
\title{Playlist Demographic Data}
\usage{
playlist_demographics(playlistId = NULL, ...)
}
\arguments{
\item{playlistId}{Required. String. Id of YouTube playlist.}
\item{...}{Addt. arguments passed to \code{analytics_request}}
}
\value{
data.frame
}
\description{
Returns age and gender demographics
}
\examples{
\dontrun{
playlist_demographics(playlistId = "PL2MI040U_GXq1L5JUxNOulWCyXn-7QyZK")
}
}
|
65f6b1860aefc196636ad9dad7ff74c84b45b972
|
9b93b2e65b95236b1d939179d314ca49acab0d39
|
/docs/concrete-ml.R
|
fc51dbbc0fa1e158aeddc32af26366ec9976035e
|
[] |
no_license
|
anguswg-ucsb/176c-project
|
b2d701cb569004f74c086939a1cafd4d09124e1f
|
a3c3aac6a10457ec0786168e058d0f4c77e8ba94
|
refs/heads/main
| 2023-03-29T09:52:55.594129
| 2021-04-05T16:07:26
| 2021-04-05T16:07:26
| 352,705,065
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,602
|
r
|
concrete-ml.R
|
# Decision trees example
library(tidymodels)
library(baguette)
library(rules)
library(workflowsets)
data(concrete, package = "modeldata")
glimpse(concrete)
concrete <- concrete %>%
group_by(cement, blast_furnace_slag, fly_ash, water, superplasticizer,
coarse_aggregate, fine_aggregate, age) %>%
summarize(compressive_strength = mean(compressive_strength),
.groups = "drop")
# Preprocess
set.seed(1501)
concrete_split <- initial_split(concrete, strata = compressive_strength)
concrete_train <- training(concrete_split)
concrete_test <- testing(concrete_split)
set.seed(1502)
concrete_folds <- vfold_cv(
data = concrete_train,
strata = compressive_strength,
repeats = 5
)
# Recipes
normalized_rec <- recipe(compressive_strength ~., data = concrete_train) %>%
step_normalize(all_predictors())
poly_recipe <- normalized_rec %>%
step_poly(all_predictors()) %>%
step_interact(~all_predictors():all_predictors())
# models
linear_reg_spec <-
linear_reg(penalty = tune(), mixture = tune()) %>%
set_engine("glmnet")
nnet_spec <-
mlp(
hidden_units = tune(),
penalty = tune(),
epochs = tune()) %>%
set_engine("nnet", MaxNWts = 2600) %>%
set_mode("regression")
nnet_param <-
nnet_spec %>%
parameters() %>%
update(hidden_units = hidden_units(c(1, 27)))
mars_spec <-
mars(prod_degree = tune()) %>% #<- use GCV to choose terms
set_engine("earth") %>%
set_mode("regression")
svm_r_spec <-
svm_rbf(cost = tune(), rbf_sigma = tune()) %>%
set_engine("kernlab") %>%
set_mode("regression")
svm_p_spec <-
svm_poly(cost = tune(), degree = tune()) %>%
set_engine("kernlab") %>%
set_mode("regression")
knn_spec <-
nearest_neighbor(neighbors = tune(), dist_power = tune(), weight_func = tune()) %>%
set_engine("kknn") %>%
set_mode("regression")
cart_spec <-
decision_tree(cost_complexity = tune(), min_n = tune()) %>%
set_engine("rpart") %>%
set_mode("regression")
bag_cart_spec <-
bag_tree() %>%
set_engine("rpart", times = 50L) %>%
set_mode("regression")
rf_spec <-
rand_forest(mtry = tune(), min_n = tune(), trees = 1000) %>%
set_engine("ranger") %>%
set_mode("regression")
xgb_spec <-
boost_tree(tree_depth = tune(), learn_rate = tune(), loss_reduction = tune(),
min_n = tune(), sample_size = tune(), trees = tune()) %>%
set_engine("xgboost") %>%
set_mode("regression")
# linear models workflowset, requiring preprocessing step
normalized <-
workflow_set(
preproc = list(normalized = normalized_rec),
models = list(SVM_radial = svm_r_spec)
# models = list(SVM_radial = svm_r_spec, SVM_poly = svm_p_spec,
# KNN = knn_spec, neural_network = nnet_spec)
)
# Non linear models workflowset
model_vars <-
workflow_variables(outcomes = compressive_strength,
predictors = everything())
no_pre_proc <-
workflow_set(
preproc = list(simple = model_vars),
models = list(MARS = mars_spec)
# models = list(MARS = mars_spec, CART = cart_spec, CART_bagged = bag_cart_spec,
# RF = rf_spec, boosting = xgb_spec)
)
with_features <-
workflow_set(
preproc = list(full_quad = poly_recipe),
models = list(linear_reg = linear_reg_spec)
)
all_workflows <-
bind_rows(no_pre_proc, normalized, with_features) %>%
# Make the workflow ID's a little more simple:
mutate(wflow_id = gsub("(simple_)|(normalized_)", "", wflow_id))
# Grid & Tuning Grid
grid_ctrl <-
control_grid(
save_pred = TRUE,
parallel_over = "everything",
save_workflow = TRUE
)
grid_results <- all_workflows %>%
workflow_map(
seed = 1503,
resamples = concrete_folds,
grid = 25,
control = grid_ctrl
)
# workflows
conc_mars_wflow <- workflow() %>%
add_model(mars_spec) %>%
add_recipe(poly_recipe)
# folds
conc_mars_folds <- vfold_cv(concrete_train)
# grids
conc_grid <- grid_regular(
dials::prod_degree(),
levels = 2
)
install.packages("earth")
library(earth)
# tune
conc_tune <- tune_grid(
conc_mars_wflow,
resamples = conc_mars_folds,
grid= conc_grid
)
# --- FITTING ---
conc_best <- conc_tune %>%
select_best() %>%
`[`(1, )
crime_fit <- finalize_workflow(crime_wflow, crime_best) %>%
fit(data = crime_train)
# --- VALIDATION ---
crime_validate <- predict(crime_fit, new_data = crime_test)
crime_roc <- accuracy(
data = setNames(
cbind(crime_validate, crime_test$region),
c("estimate", "truth")
),
truth = truth,
estimate = estimate
)
|
26e90c78d88d0401156e67125a26b2e2dfdaaadd
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/saeSim/examples/sim_gen.Rd.R
|
745c192979c3233f2309a67383a6fded4e32d282
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 618
|
r
|
sim_gen.Rd.R
|
library(saeSim)
### Name: sim_gen
### Title: Generation component
### Aliases: sim_gen sim_gen_generic
### ** Examples
# Data setup for a mixed model
sim_base() %>% sim_gen_x() %>% sim_gen_v() %>% sim_gen_e()
# Adding contamination in the model error
sim_base() %>% sim_gen_x() %>% sim_gen_v() %>% sim_gen_e() %>% sim_gen_ec()
# Simple user defined generator:
gen_myVar <- function(dat) {
dat["myVar"] <- rnorm(nrow(dat))
dat
}
sim_base() %>% sim_gen_x() %>% sim_gen(gen_myVar)
# And a chi-sq(5) distributed 'random-effect':
sim_base() %>% sim_gen_generic(rchisq, df = 5, groupVars = "idD", name = "re")
|
4653b5950464d6dc83443803009064599c0a30a0
|
af0df2be1822e2ed328f8bc1fffd0410d0e954ac
|
/montecarlo.R
|
12b9e496e5e7901b3aa8e6e2d84e8431378a01de
|
[] |
no_license
|
dabaja/StatFinDat
|
8a08d729595f0a77e693acfb44bd33a259c8157d
|
7537bcf39d0c53aaec202d66557dd9483fe2976b
|
refs/heads/master
| 2021-07-04T04:30:34.762642
| 2017-09-28T18:06:57
| 2017-09-28T18:06:57
| 103,696,231
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,440
|
r
|
montecarlo.R
|
library(Rsafd)
# Generating random samples
GWN <- rnorm(1024)
CWN <- rcauchy(1024)
par(mfrow=c(2,1))
plot(GWN,type="l")
title("Sequential plot of a standard Gaussian sample")
plot(CWN,type="l")
title("Sequential plot of a standard Cauchy sample")
par(mfrow=c(1,1))
# Notice that the relative size of extreme values of the Cauchy sample forces
# the bulk of the other points to be crammed together (look at axis values), giving
# the false impression that they are trying to line up along the horizontal axis.
# remember the QQ-plot Cauchy vs Normal!
# Monte Carlo simulation for call price:
Call <- function(N=10000, TAU=0.04, K=3.36, S=3.36, R=0.024, SIG=0.6) {
ML <- log(S) + TAU*(R-SIG^2/2) # mean of the log-normal distr
SL <- SIG*sqrt(TAU) # standard deviation of the log-normal distr
XX <- rlnorm(N, meanlog = ML, sdlog = SL) # generate vector of N random
# log-normal points
PSIX <- pmax(XX-K, 0) # parallel maxima (obtains maximum of each line in the vector)
MC_call <- exp(-R*TAU)*mean(PSIX) # the price of the option is given by the
# risk neutral expectation of the discounted
# expected payoff
MC_call
}
Call() # run several times and compare with result of bscall()
bscall(TAU=0.04, K=3.36, S=3.36, R=0.024, SIG=0.6)
|
ed4d3670f193b4c19447f9b74647424902732ff3
|
c85471f60e9d5c462de6c60c880d05898ec81411
|
/cache/dallinwebb|tidy_tuesday|2019__06_house_morgage__code.R
|
c6e61e3190b69d6c7085a15084c19189c7e9ec66
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
a-rosenberg/github-content-scraper
|
2416d644ea58403beacba33349ee127e4eb42afe
|
ed3340610a20bb3bd569f5e19db56008365e7ffa
|
refs/heads/master
| 2020-09-06T08:34:58.186945
| 2019-11-15T05:14:37
| 2019-11-15T05:14:37
| 220,376,154
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,389
|
r
|
dallinwebb|tidy_tuesday|2019__06_house_morgage__code.R
|
library(tidyverse)
library(USAboundaries)
library(sf)
hpi <- read_csv("https://github.com/rfordatascience/tidytuesday/raw/master/data/2019/2019-02-05/state_hpi.csv")
now <- hpi %>%
filter(year == 2001,
month == 11) %>%
select(state,
year_now = year,
price_index_now = price_index)
joined <- hpi %>%
filter(year >= 2007) %>%
group_by(state) %>%
filter(price_index == min(price_index)) %>%
select(state,
year_low = year,
month_low = month,
price_index_low = price_index) %>%
left_join(now, by = "state") %>%
mutate(price_index_diff = price_index_now / price_index_low - 1,
price_index_pct = scales::percent(price_index_diff))
state_boundaries <- USAboundaries::us_states() %>%
left_join(joined, by = c("stusps" = "state")) %>%
filter(!(stusps %in% c("AK", "DC", "HI", "PR")))
ggplot(state_boundaries, aes(fill = price_index_diff)) +
geom_sf(col = "white") +
geom_sf_text(aes(label = price_index_pct),
check_overlap = T) +
coord_sf(crs = 5070) +
scale_fill_gradient(low = "#87FA87",
high = "#006400") +
labs(title = "Western States had greater increases in HPI since lows",
subtitle = "Should have invested in real estate in Nevada",
fill = "Pct. Increase") +
theme_void() +
theme(panel.grid = element_line(color = "white"))
|
50c472553eafcc236b5358774ce167c8200341d9
|
e535d498001519774956adcc5b0106a5f4e555ac
|
/simulations/ASE_scripts/plot_data.r
|
70f160138b161ca652efaa58ae86e6a745a2f569
|
[] |
no_license
|
kraigrs/thesis_work
|
f73c6f130a0cf33ed079acb35208bff9cb85d4d1
|
bcc8e46b5c65f08c61d5beb8e29ac7e4df101cff
|
refs/heads/master
| 2021-01-22T16:18:29.372793
| 2015-09-10T18:48:11
| 2015-09-10T18:48:11
| 34,088,947
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,887
|
r
|
plot_data.r
|
exons <- read.table("/Users/kraigrs/Wittkopp/Simulations/SNPs_in_const.txt",header=FALSE,sep="\t");
lengths <- exons[,3]-exons[,2];
exons <- cbind(exons,lengths);
exprn <- read.table("/Users/kraigrs/Wittkopp/Simulations/zhr_z30_exons_expression.txt",sep="\t");
exons <- merge(exons,exprn,by.x="V8",by.y="V1");
tiled_exons <- read.table("/Users/kraigrs/Wittkopp/Simulations/tiled/constExons_single_bp50_error0_tiled.bowtie.exons.txt",header=TRUE,sep="\t");
equal_allele_exons <- read.table("/Users/kraigrs/Wittkopp/Simulations/equal_allele/constExons_single_bp50_error0_equal_allele.bowtie.exons.txt",header=TRUE,sep="\t");
equal_total_exons <- read.table("/Users/kraigrs/Wittkopp/Simulations/equal_total/constExons_single_bp50_error0_equal_total.bowtie.exons.txt",header=TRUE,sep="\t");
#############################
# choose a group to look at #
#############################
merged <- merge(tiled_exons,exons,by.x="gene_exon",by.y="V8");
ASE <- subset(merged,merged$dm3_ref > 0 & merged$dm3_alt > 0 & log2(merged$dm3_ref/merged$dm3_alt) == 0);
no_ASE <- subset(merged,merged$dm3_ref == 0 & merged$dm3_alt == 0 & merged$Both > 0);
AI <- subset(merged,merged$dm3_ref > 0 & merged$dm3_alt > 0 & log2(merged$dm3_ref/merged$dm3_alt) != 0);
expressed <- subset(merged,merged$V2.y > 0);
##############
# make plots #
##############
# proportion of TUs with ASE
nrow(ASE)/nrow(merged);
nASE <- nrow(ASE);
# proportion of TUs without ASE but with measurable expression
nrow(no_ASE)/nrow(merged);
# proportion of TUs displaying allelic imbalance
nrow(AI)/nrow(merged);
nAI <- nrow(AI);
# barplot comparing number of SNPs between TU with and without allelic imbalance
obj1 <- hist(ASE$V7/(ASE$lengths/1000),breaks=seq(0,180,5)); # 240 is based on the maximum between each of ASE and AI, which is 236
obj2 <- hist(AI$V7/(AI$lengths/1000),breaks=seq(0,180,5));
mat <- cbind(obj1$counts/nrow(ASE),obj2$counts/nrow(AI));
obj3 <- hist(log2(AI$dm3_ref/AI$dm3_alt),breaks=40);
par( mfrow = c( 1, 2 ) );
barplot(t(mat),beside=TRUE,names.arg=seq(0,175,5),xlab="Number of SNPs/kb",ylab="Proportion of exons",xlim=c(0,60),main="",col=c("black","gray"));
plot(obj3$breaks[1:length(obj3$breaks)-1],obj3$counts/nrow(AI),type="h",col="black",ylim=c(0,0.3),xlim=c(-7,5),xlab="log2(ref/alt)",ylab="Proportion of exons",main="");
#################
exons <- read.table("/Users/kraigrs/Wittkopp/Simulations/SNPs_in_const.txt",header=FALSE,sep="\t");
lengths <- exons[,3]-exons[,2];
exons <- cbind(exons,lengths);
equal_allele_exons <- read.table("/Users/kraigrs/Wittkopp/Simulations/equal_allele/constExons_single_bp50_error0_equal_allele.bowtie.exons.txt",header=TRUE,sep="\t");
merged1 <- merge(equal_allele_exons,exons,by.x="gene_exon",by.y="V8");
equal_total_exons <- read.table("/Users/kraigrs/Wittkopp/Simulations/equal_total/constExons_single_bp50_error0_equal_total.bowtie.exons.txt",header=TRUE,sep="\t");
merged2 <- merge(equal_total_exons,exons,by.x="gene_exon",by.y="V8");
exprn <- read.table("/Users/kraigrs/Wittkopp/Simulations/zhr_z30_exons_expression.txt",sep="\t");
data1 <- merge(merged1,exprn,by.x="gene_exon",by.y="V1");
data2 <- merge(merged2,exprn,by.x="gene_exon",by.y="V1");
par(mfrow=c(1,2));
plot(log2(data1$dm3_ref/data1$dm3_alt),log2(data1$V2.y),pch=19,col=rgb(0,0,0,0.2),cex=0.3,
main="Equal allele approach",ylab="log2(number of generated reads)",xlab="log2(ref/alt)");
abline(v = 0,col="red");
plot(log2(data2$dm3_ref/data2$dm3_alt),log2(data2$dm3_ref+data2$dm3_alt+data2$Both),pch=19,col=rgb(0,0,0,0.2),cex=0.3,
main="Equal total approach",ylab="log2(number of generated reads)",xlab="log2(ref/alt)");
abline(v = 0,col="red");
########################
# binomial exact tests #
########################
cut = 0.05;
tmp1 <- subset(tiled_exons,tiled_exons$dm3_ref > 0 & tiled_exons$dm3_alt > 0);
pvals <- NULL;
for(i in 1:nrow(tmp1))
{
test <- binom.test(tmp1$dm3_ref[i],tmp1$dm3_ref[i]+tmp1$dm3_alt[i], p = 0.5, alternative = "two.sided", conf.level = 0.95);
pvals <- c(pvals,test$p.value);
}
FPR <- sum(pvals<cut)/length(pvals);
tmp2 <- subset(equal_allele_exons,equal_allele_exons$dm3_ref > 0 & equal_allele_exons$dm3_alt > 0);
pvals <- NULL;
for(i in 1:nrow(tmp2))
{
test <- binom.test(tmp2$dm3_ref[i],tmp2$dm3_ref[i]+tmp2$dm3_alt[i], p = 0.5, alternative = "two.sided", conf.level = 0.95);
pvals <- c(pvals,test$p.value);
}
FPR <- sum(pvals<cut)/length(pvals);
tmp3 <- subset(equal_total_exons,equal_total_exons$dm3_ref > 0 & equal_total_exons$dm3_alt > 0);
pvals <- NULL;
for(i in 1:nrow(tmp3))
{
test <- binom.test(tmp3$dm3_ref[i],tmp3$dm3_ref[i]+tmp3$dm3_alt[i], p = 0.5, alternative = "two.sided", conf.level = 0.95);
pvals <- c(pvals,test$p.value);
}
sum(pvals<cut);
length(pvals);
sum(pvals<cut)/length(pvals);
##############
# pie charts #
##############
pie(c(nrow(ASE),nrow(AI)),labels=c("ASE","AI"),col=c("black","gray"));
|
fa929553b09838e470b9b9049557c5250c0c68ba
|
1ff5773280731e9de136b796d3102cd942977e7c
|
/man/SelectControls.Rd
|
30109481d307cbb537386fafc08797523113136c
|
[] |
no_license
|
na89/SVDFunctions
|
014dc99608f4ba304e26437e3f410c34640ebab5
|
e9af744ba684fbdda85a4c0d658222b2983a5c6f
|
refs/heads/master
| 2020-05-18T14:23:21.487558
| 2019-03-01T19:11:49
| 2019-03-01T19:11:49
| 184,469,099
| 0
| 0
| null | 2019-05-01T19:22:34
| 2019-05-01T19:22:34
| null |
UTF-8
|
R
| false
| true
| 1,424
|
rd
|
SelectControls.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/selector.R
\name{SelectControls}
\alias{SelectControls}
\title{Selection of the optimal set of controls}
\usage{
SelectControls(genotypeMatrix, SVDReference, caseCounts,
minLambda = 0.75, softMinLambda = 0.9, softMaxLambda = 1.05,
maxLambda = 1.3, min = 500, nSV = 5, binSize = 1)
}
\arguments{
\item{genotypeMatrix}{Genotype matrix}
\item{SVDReference}{Reference basis of the left singular vectors}
\item{caseCounts}{Matrix with summary genotype counts from cases}
\item{minLambda}{Minimum possible lambda}
\item{softMinLambda}{Desirable minimum for lambda}
\item{softMaxLambda}{Desirable maximum for lambda}
\item{maxLambda}{Maximum possible lambda}
\item{min}{Minimal size of a control set that is permitted for return}
\item{nSV}{Number of singular vectors to be used for reconstruction of the}
\item{binSize}{sliding window size for optimal lambda search}
}
\description{
Finds an optimal set of controls satisfying
\eqn{\lambda_GC < softmax_lambda} and \eqn{\lambda_GC > softmin_lambda}
or if none exists – will select a set of controls with
closest \eqn{\lambda_GC} to the range \eqn{[softmin_lambda; softmax_lambda]}
satisfying \eqn{\lambda_GC < max_lambda} and \eqn{\lambda_GC > min_lambda}.
Otherwise no results will be returned. Minimal size of control set
is \code{min} samples for privacy preservation reasons.
}
|
5b06cfe4a60c6f6dcfef2eca037c148774bfd2e3
|
ee49a71e821e06bdda7a8d59486a5070cee68fa3
|
/inst/rstudio/templates/project/proj_fls/data/02_read.R
|
ac3162da0b163ba766cfc03cd67d5ee67e3beef9
|
[
"MIT"
] |
permissive
|
scholaempirica/reschola
|
728b42ba5acb7eb32c712c2ab404ab5546f09700
|
16f7d64889950cb7fe183d26ed7da1f7d8d6283e
|
refs/heads/master
| 2023-04-28T08:21:25.166619
| 2023-04-13T12:20:00
| 2023-04-13T12:20:00
| 245,384,211
| 4
| 1
|
MIT
| 2021-02-24T01:08:23
| 2020-03-06T09:53:26
|
R
|
UTF-8
|
R
| false
| false
| 51
|
r
|
02_read.R
|
library(reschola)
library(tidyverse)
library(here)
|
cc528eff1db049f478aa0c98954dd1d87c222cb5
|
b201f1f182b1828a66a2d97baf28224b39d70564
|
/man/build_tm_distplot_tbl.Rd
|
f98c0ddb944f2d7657182835220950b64b31f94e
|
[
"MIT"
] |
permissive
|
Drinchai/iatlas-app
|
147294b54f64925fb4ee997da98f485965284744
|
261b31224d9949055fc8cbac53cad1c96a6a04de
|
refs/heads/master
| 2023-02-08T08:17:45.384581
| 2020-07-20T23:27:08
| 2020-07-20T23:27:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 513
|
rd
|
build_tm_distplot_tbl.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/til_map_distributions_functions.R
\name{build_tm_distplot_tbl}
\alias{build_tm_distplot_tbl}
\title{Build Tilmap Distplot Tibble}
\usage{
build_tm_distplot_tbl(tbl, id, scale_method)
}
\arguments{
\item{tbl}{A tibble with columns sample_id, sample_name, slide_barcode, group}
\item{id}{An integer in the feature_id column of the features_to_samples
table}
\item{scale_method}{A string}
}
\description{
Build Tilmap Distplot Tibble
}
|
b3e7a651272f645222b0a3325ff9d192e1032e7a
|
4e5e4dd54801402c93bb5909bab70ec3bc5a09e6
|
/script/source scrape Big3.R
|
8cca372b9ffcf3d12e0d4476400ef4c8e3b5a3c9
|
[] |
no_license
|
mguideng/text-mining-big3-reviews
|
ada2e96356e02cd15da9809a7031b49f8e3de520
|
28ff8df8afa554055be9b5b3a9360cb8ca7198d2
|
refs/heads/master
| 2020-03-23T15:23:17.583972
| 2018-08-03T00:28:55
| 2018-08-03T00:28:55
| 141,744,132
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,258
|
r
|
source scrape Big3.R
|
## Set URL
baseurl <- "https://www.glassdoor.com/Reviews/"
sort <- ".htm?sort.sortType=RD&sort.ascending=true"
totalreviews <- read_html(paste(baseurl, company, sort, sep="")) %>%
html_nodes(".margBot.minor") %>%
html_text() %>%
sub(" reviews", "", .) %>%
sub(",", "", .) %>%
as.integer()
maxresults <- as.integer(ceiling(totalreviews/10)) #10 reviews per page, round up to whole number
## A. Create df by scraping: Date, Summary, Title, Pros, Cons, Helpful
df.z <- map_df(1:maxresults, function(i) {
Sys.sleep(2) #be a polite bot
cat("! ") #progress indicator
pg <- read_html(sprintf(paste(baseurl, company, "_P", i, sort, sep="")))
data.frame(rev.date = html_text(html_nodes(pg, ".date.subtle.small, .featuredFlag")),
rev.sum = html_text(html_nodes(pg, ".reviewLink .summary:not([class*='hidden'])")),
rev.title = html_text(html_nodes(pg, "#ReviewsFeed .hideHH")),
rev.pros = html_text(html_nodes(pg, "#ReviewsFeed .pros:not([class*='hidden'])")),
rev.cons = html_text(html_nodes(pg, "#ReviewsFeed .cons:not([class*='hidden'])")),
rev.helpf = html_text(html_nodes(pg, ".tight")),
stringsAsFactors=F)
})
|
46319a13a724b2b2cf1a227b37c1d4f88152540b
|
9e4df408b72687493cc23144408868a975971f68
|
/SMS_r_prog/r_prog_less_frequently_used/short-term_sensitivity.r
|
efb8c675b9e3dc97cca808e86584364bcd6a55ce
|
[
"MIT"
] |
permissive
|
ices-eg/wg_WGSAM
|
7402ed21ae3e4a5437da2a6edf98125d0d0e47a9
|
54181317b0aa2cae2b4815c6d520ece6b3a9f177
|
refs/heads/master
| 2023-05-12T01:38:30.580056
| 2023-05-04T15:42:28
| 2023-05-04T15:42:28
| 111,518,540
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,615
|
r
|
short-term_sensitivity.r
|
# stochastic, short term forecast,
# lines begining with # are comments and can be put anywhere
#
#
# Specification of year range
# 1. first year for inclusion in mean
# 2. second year for inclusion in mean
# 3. include variance. 1=TRUE, 0=FALSE
###################################################
# Input
options<-c(
2006, 2008, 1, # Mean weight in the stock
2006, 2008, 1, # mean weight in the catch
2006, 2008, 0, # proportion mature
2006, 2008, 0 # Natural mortality
)
all.covariance<-F # include (T|F) covariance or the four parameters
# if TRUE remember to selct the same year range for all parameters
Ry2<- 8.820E6 #Recruitment in the second year after the last assessment year
Ry2.cv<-0.57 # CV of recruitment in the second year after the last assessment year
Ry3<-8.820E6 #Recruitment in the third year after the last assessment year
Ry3.cv<-0.57 # CV of recruitment in the third year after the last assessment year
TACintermidiatYear<-1200000 # TAC or F-multiplier for intermidiate year
FmultIntermidiatYear<-NA # one of them needs to be NA
###################################################
if (is.na(FmultIntermidiatYear)) doCalcFmult<-T else doCalcFmult<-F
la<-SMS.control@species.info[1,"last-age"] #last age
fa<- SMS.control@first.age # first age
na<-la-fa+1 # no of age-groups
ly<-SMS.control@last.year.model # last assessment year
agesFbar<-as.vector(SMS.control@avg.F.ages)
options<-matrix(options,ncol=3,byrow = T)
dimnames(options)[2]<-list(c('first year','last year','Include variance'))
dimnames(options)[1]<-list(c('WS','WC','PM','M'))
read_input<-function(inp="west.in",lab='',
fy=SMS.control@first.year, ly=SMS.control@last.year
) {
w<-scan(file=file.path(data.path,inp),comment.char = "#" )
la<-SMS.control@species.info[1,"last-age"]
fa<- SMS.control@first.age
w<-matrix(w,ncol= la-fa+1,byrow = T)
w<-w[1:(ly-fy+1),] # cut off
dimnames(w)[2]<-list(paste(lab,formatC(seq(fa,la),flag='0',width=2),sep=''))
dimnames(w)[1]<-list(seq(fy,ly))
w
}
WS<-read_input ("west.in",lab='WS')
WC<-read_input ("weca.in",lab='WC')
PM<-read_input ("propmat.in",lab='PM')
M<-read_input ("natmor.in",lab='M')
#filter data
WS<- WS[paste(seq(options[1,1],options[1,2])),]
WC<- WC[paste(seq(options[2,1],options[2,2])),]
PM<- PM[paste(seq(options[3,1],options[3,2])),]
M<- M[paste(seq(options[4,1],options[4,2])),]
if (all.covariance) {
a<-cbind(WS,WC,PM,M)
cov1<-cov(a)
}
if (is.matrix(WS)) {WS.var<-apply(WS,2,var); WS<-apply(WS,2,mean)} else WS.var<-rep(0,na)
if (options['WS','Include variance']==0) WS.var<-rep(0,na)
if (is.matrix(WC)) {WC.var<-apply(WC,2,var); WC<-apply(WC,2,mean)} else WC.var<-rep(0,na)
if (options['WC','Include variance']==0) WC.var<-rep(0,na)
if (is.matrix(PM)) {PM.var<-apply(PM,2,var); PM<-apply(PM,2,mean)} else PM.var<-rep(0,na)
if (options['PM','Include variance']==0) PM.var<-rep(0,na)
if (is.matrix(M)) {M.var<-apply(M,2,var); M<-apply(M,2,mean)} else M.var<-rep(0,na)
if (options['M','Include variance']==0) M.var<-rep(0,na)
a<-c(WS,WC,PM,M)
varNames1<-names(a)
if (!all.covariance) {
cov1<-matrix(0,nrow=length(a),ncol=length(a))
dimnames(cov1)[2]<-list(varNames1)
dimnames(cov1)[1]<-list(varNames1)
diag(cov1)<-c(WS.var,WC.var,PM.var,M.var) # use only the variance (no co-variance)
}
#####################################
# Make covariance matrix from SMS SMS.cor and SMS.std files
#Open SMS correlation file
file<-file.path(data.path,"sms.cor")
ofil<-file.path(data.path,"sms_out.cor")
a<-readLines(file)
len<-length(a)
a<-a[3:len]
len<-len-2
write.table(a,file=ofil,quote=FALSE,col.names=FALSE,row.names=FALSE)
var.name<-substr(a,start=10,stop=23)
b<-substr(a,start=49,stop=1e5)
write.table(b,file=ofil,quote=FALSE,col.names=FALSE,row.names=FALSE)
c<-scan(file=ofil)
cor<-matrix(NA,nrow=len,ncol=len)
j<-1
for (i in (1:len)) {
cor[i,1:(i)]<-c[j:(j+i-1)]
j<-j+i
}
# fill the upper triangle
for (i in (1:len)) {
for (j in (min(i+1,len):len)) {
cor[i,j]<-cor[j,i]
}
}
# read SMS.std
a<-read.table(file.path(data.path,"sms_sensi.std"),comment.char = "#",header=FALSE,skip=1)
std<-a$V4
value<-a$V3
var.name<-a$V2
# make covariance from correlation
cov2<-matrix(NA,nrow=len,ncol=len) # co-variance
for (i in (1:len)) {
for (j in (1:len)) {
if (i!=j) cov2[i,j]=cor[i,j]*std[i]*std[j]
}
}
diag(cov2)<-std^2
vars<- 'term'
varList<-grep(vars,var.name)
cov2<-cov2[varList,varList]
varNames2<-sub('term2_N','N',var.name[varList])
varNames2<-sub('term_F','F',varNames2)
varNames2<-paste(varNames2,formatC(seq(fa,la),flag='0',width=2),sep='')
dimnames(cov2)[2]<-list(varNames2)
dimnames(cov2)[1]<-list(varNames2)
#just checking. Get the correlation matrix from the new covariance matrix
# round(cov2cor(cov2),2)
#######################
# We have now two covariance matrices (cov1 and cov2)
# combine those into one, assuming no corelation between the two matrices
allNames<-sort(c(varNames1,varNames2,'Ry2','Ry3'))
# the combined covariance matrix
cov12<-matrix(0,nrow=length(allNames),ncol=length(allNames))
dimnames(cov12)[2]<-list(allNames)
dimnames(cov12)[1]<-list(allNames)
cov12[varNames1,varNames1]<-cov1
cov12[varNames2,varNames2]<-cov2
cov12["Ry2","Ry2"]<-(Ry2.cv*Ry2)^2 # Recruitment variance
cov12["Ry3","Ry3"]<-(Ry3.cv*Ry3)^2
#cov12
#######################################
# Collect input parameters
#
FN<-value[varList] # F and stock numbers
names(FN)<-varNames2
rec<-c(Ry2,Ry3) #recruitment
names(rec)<-list('Ry2','Ry3')
values<-c(WS,WC,PM,M,FN,rec) # all of them
values<-values[order(names(values))]
#########################################
#Utility to extract the various data types from the list of parameters
clip<-matrix(NA,nrow=2,ncol=8)
dimnames(clip)[2]<-list(c('FI','M','N','PM','Ry2','Ry3','WC','WS'))
clip[,'FI']<-c(1,na)
i<-1+na
clip[,'M']<-c(i,i+na-1)
i<-i+na
clip[,'N']<-c(i,i+na-1)
i<-i+na
clip[,'PM']<-c(i,i+na-1)
i<-i+na
clip[,'Ry2']<-i
i<-i+1
clip[,'Ry3']<-i # recruits
i<-i+1
clip[,'WC']<-c(i,i+na-1)
i<-i+na
clip[,'WS']<-c(i,i+na-1)
#####################################
# F bars
Fs<-values[clip[1,'FI']:clip[2,'FI']]
Fbar<-mean(Fs[paste("F",formatC(seq(agesFbar[1],agesFbar[2]),flag='0',width=2),sep='')])
##################
# prediction
predict<-function(pars,Fmult1=1,Fmult2=1) {
FI<-matrix(NA,nrow=2,ncol=na)
N<-matrix(NA,nrow=3,ncol=na)
FI[1,]<-pars[clip[1,'FI']:clip[2,'FI']]
M<-pars[clip[1,'M']:clip[2,'M']]
N[1,]<-pars[clip[1,'N']:clip[2,'N']]
prop<-pars[clip[1,'PM']:clip[2,'PM']]
N[2,1]<-pars[clip[1,'Ry2']]
N[3,1]<-pars[clip[1,'Ry3']]
Weca<-pars[clip[1,'WC']:clip[2,'WC']]
West<-pars[clip[1,'WS']:clip[2,'WS']]
if (doCalcFmult) {
calcYield<-function(Fmult=1,FF=FI[1,]) {
FF<-FF*Fmult;
ZZ<-FF+M
sum(N[1,]*(1-exp(-ZZ))*FF/ZZ*Weca)
}
dif<-100.0;
iter<-0;
x<-1.0;
target<-TACintermidiatYear
upper<-3; lower<-0
while ((dif>1E-8) & (iter<100) & (x >=1E-12)) {
x<-(upper+lower)/2;
y<-calcYield(Fmult=x);
if (y>=target) upper<-x else lower<-x;
dif<-abs(upper-lower);
iter<-iter+1;
}
if ((iter<100) | (x<=1E-8)) Fmult1<-x else Fmult1<- -1000.0;
}
FI[2,]<-FI[1,]*Fmult2
FI[1,]<-FI[1,]*Fmult1
Z<-FI[,]+M
for (y in (1:2)) {
for (a in ((fa:(la-1)))) N[y+1,a+1]<-N[y,a]*exp(-Z[y,a])
N[y+1,la]<-N[y+1,la]+N[y,la]*exp(-Z[y,la]) #plusgroup
}
SSB<-N*rep(West,each=3)*rep(prop,each=3)
TSB<-N*rep(West,each=3)
Yield<-N[1:2,]*(1-exp(-Z))/Z*FI *rep(Weca,each=2)
list(TSB,SSB,Yield,N,FI,Fmult1)
}
# test
# sum(predict(values)[[2]][1,])
###################
##########
# Partial derivatives
##########
partialDerivatives<-function(parm, varNo,varYno,Fmult2=1) {
# Values input values for prediction
# varNo 1=TSB, 2=SSB, 3=Yield, 4=N, 5=F
# varYno 1=intermidiate year, 2=TAC year, 3=TAC year+1
# Fmult F-multiplier in the TAC year
grad<-rep(NA,length(parm))
B1<-sum(predict(pars=parm,Fmult2=Fmult2)[[varNo]][varYno,])
delta<-0.01
for (i in (1:length(parm))) {
localPar<-values
localPar[i]<- localPar[i]*(1.0+delta)
B2<-sum(predict(pars=localPar,Fmult2=Fmult2)[[varNo]][varYno,])
grad[i]<-(B2-B1)/(delta*parm[i])
}
# without covariance
var1<-sum(diag(cov12)*grad^2)
# with covariance
var2<-as.numeric(t(grad) %*% cov12 %*% grad)
list(B1,var1,var2,grad)
}
# sensitivity
sensitivity<-function(pars,varNo=2,varYno=2) {
varGrad<-partialDerivatives(pars,varNo=varNo,varYno=varYno)
# rate sensitivity coefficients
sens<-varGrad[[4]]*pars/ varGrad[[1]]
sens2<-sens[order(abs(sens),decreasing = T)]
# varNo 1=TSB, 2=SSB, 3=Yield, 4=N, 5=F
if (varNo==1) tit<-'TSB'
if (varNo==2) tit<-'SSB'
if (varNo==3) tit<-'Yield'
tit2<-paste(tit, ly+varYno ,"\n Liniar coefficients")
barplot(sens2[sens2>0.1],cex.names=0.8,las=2,ylab='sensitivity',main=tit2,cex.main = 0.8)
#partial Variance
p<-varGrad[[4]]^2*diag(cov12)/ varGrad[[2]]
p1<-p[order(p)]
sepa<-0.9
p2<-c(sum(p1[1:(length(p1)*sepa)]),p1[(length(p1)*sepa+1):length(p1)])
names(p2)[1]<-"other"
tit2<-paste(tit, ly+varYno ,"\nProportion of variance")
pie(p2, main=tit2,cex.main=0.8)
}
cleanup()
nox<-3; noy<-4;
dev<-"print"
dev<-"screen"
newplot(dev,nox,noy);
sensitivity(values,varNo=3,varYno=2)
sensitivity(values,varNo=2,varYno=3)
################################################
# input values to forecast
B0<-predict(values)
CV<-sqrt(diag(cov12))/values
out<- cbind(seq(fa,la),round(WS,d=3)) ; lab<-c('Age','Weight in the stock (kg)')
if (sum(CV[clip[1,'WS']:clip[2,'WS']]) >0) { out<-cbind(out, round(CV[clip[1,'WS']:clip[2,'WS']],2)); lab<-c(lab,'CV');}
out<- cbind(out,round(WC,d=3)) ; lab<-c(lab,'Weight in the catch (kg)')
if (sum(CV[clip[1,'WC']:clip[2,'WC']]) >0) { out<-cbind(out, round(CV[clip[1,'WC']:clip[2,'WC']],2)); lab<-c(lab,'CV');}
out<- cbind(out,round(PM,d=3)) ; lab<-c(lab,'Proportion mature')
if (sum(CV[clip[1,'PM']:clip[2,'PM']]) >0) { out<-cbind(out, round(CV[clip[1,'PM']:clip[2,'PM']],2)); lab<-c(lab,'CV');}
out<- cbind(out,round(B0[[4]][1,],d=3)) ; lab<-c(lab,'F')
if (sum(CV[clip[1,'FI']:clip[2,'FI']]) >0) { out<-cbind(out, round(CV[clip[1,'FI']:clip[2,'FI']],2)); lab<-c(lab,'CV');}
out<- cbind(out,round(B0[[3]][1,],d=3)) ; lab<-c(lab,'Stock numbers (thousands)')
if (sum(CV[clip[1,'N']:clip[2,'N']]) >0) { out<-cbind(out, round(CV[clip[1,'N']:clip[2,'N']],2)); lab<-c(lab,'CV');}
dimnames(out)[2]<-list(lab)
dimnames(out)[1]<-list(seq(fa,la))
write.table(out,row.names = F,file.path(data.path,"short-term_sensitivity_input.out"))
out
######################################################
# Make forcast
incl.CV<-T
make.contrib.plot<-T
steps<-seq(0.0,1.0,0.2)
steps<-0.5
results<-matrix(NA,ncol=15,nrow=length(steps))
dimnames(results)[2]<-list(c('TSB1','SSB1','Fmult1','Fbar1','Land1','TSB2','SSB2','Fmult2','Fbar2','Land2','CV(Land2)',
'TSB3','CV(TSB3)','SSB3','CV(SSB3)'))
i<-0
for (s in (steps)) {
a<-predict(pars=values,Fmult2=s)
i<-i+1
results[i,'TSB1']<- round(sum(a[[1]][1,]))
results[i,'SSB1']<- round(sum(a[[2]][1,]))
results[i,'Fmult1']<- round(a[[6]],2)
results[i,'Fbar1']<- round(Fbar,2)*results[i,'Fmult1']
results[i,'Land1']<-round(sum(a[[3]][1,]))
results[i,'TSB2']<- round(sum(a[[1]][2,]))
if (incl.CV) {
#varGrad<-partialDerivatives(parm=values,varNo=1,varYno=2,Fmult2=s)
#results[i,'CV(TSB2)']<- round(sqrt(varGrad[[3]])/varGrad[[1]],2)
}
results[i,'SSB2']<- round(sum(a[[2]][2,]))
if (incl.CV) {
#varGrad<-partialDerivatives(parm=values,varNo=2,varYno=2,Fmult2=s)
#results[i,'CV(SSB2)']<- round(sqrt(varGrad[[3]])/varGrad[[1]],2)
}
results[i,'Fmult2']<- s
results[i,'Fbar2']<- round(Fbar*s,3)
results[i,'Land2']<-round(sum(a[[3]][2,]))
if (incl.CV) {
varGrad<-partialDerivatives(parm=values,varNo=3,varYno=2,Fmult2=s)
if (varGrad[[1]]>0) results[i,'CV(Land2)']<- round(sqrt(varGrad[[3]])/varGrad[[1]],2)
}
if (make.contrib.plot) {
yield<-a[[3]][1,] #landings
names(yield)<-paste('age',seq(fa,la))
tit2<-paste(ly+1 ,"Yield")
pie(yield, main=tit2,cex.main=1)
ssb<-a[[2]][1,] #SSB
names(yield)<-paste('age',seq(fa,la))
tit2<-paste(ly+1 ,"SSB")
pie(yield, main=tit2,cex.main=1)
yield<-a[[3]][2,] #landings
names(yield)<-paste('age',seq(fa,la))
tit2<-paste(ly+2 ,"Yield")
pie(yield, main=tit2,cex.main=1)
ssb<-a[[2]][2,] #SSB
names(yield)<-paste('age',seq(fa,la))
tit2<-paste(ly+2 ,"SSB")
pie(yield, main=tit2,cex.main=1)
ssb<-a[[2]][3,] #SSB
names(yield)<-paste('age',seq(fa,la))
tit2<-paste(ly+3 ,"SSB")
pie(yield, main=tit2,cex.main=1)
}
results[i,'TSB3']<- round(sum(a[[1]][3,]))
if (incl.CV) {
varGrad<-partialDerivatives(parm=values,varNo=1,varYno=3,Fmult2=s)
results[i,'CV(TSB3)']<- round(sqrt(varGrad[[3]])/varGrad[[1]],2)
}
results[i,'SSB3']<- round(sum(a[[2]][3,]))
if (incl.CV) {
varGrad<-partialDerivatives(parm=values,varNo=2,varYno=3,Fmult2=s)
results[i,'CV(SSB3)']<- round(sqrt(varGrad[[3]])/varGrad[[1]],2)
}
ssb<-a[[2]][3,] #landings
names(yield)<-paste('age',seq(fa,la))
tit2<-paste(ly+3 ,"SSB")
pie(yield, main=tit2,cex.main=1)
}
results
|
b54b95369790a519b944aa004bf0d3b2bd765e3e
|
992a8fd483f1b800f3ccac44692a3dd3cef1217c
|
/Rstudy/tidyr and dplyr.r
|
111e728b3ef9b1ba6127be753d6930871e80c877
|
[] |
no_license
|
xinshuaiqi/My_Scripts
|
c776444db3c1f083824edd7cc9a3fd732764b869
|
ff9d5e38d1c2a96d116e2026a88639df0f8298d2
|
refs/heads/master
| 2020-03-17T02:44:40.183425
| 2018-10-29T16:07:29
| 2018-10-29T16:07:29
| 133,203,411
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 577
|
r
|
tidyr and dplyr.r
|
# tidyr and dplyr、
install.packages("tidyr") # install the package
library(tidyr)
install.packages("dplyr") # install the package
library(dplyr)
germination <- read.csv("Germination.csv", sep = ";")
head(germination)
# subset rows
germinSR <- filter(germination, Species == 'SR')
# select columns
germin_clean <- select(germination, Species, Treatment, Nb_seeds_germin)
# or
germin_clean <- dplyr::select(germination, Species, Treatment, Nb_seeds_germin)
# create a new column
germin_percent <- mutate(germination, Percent = Nb_seeds_germin / Nb_seeds_tot * 100)
|
2fb0a60c402cb4a6a0c29d7b7dfefa3a8fd38ce7
|
26fc0711f31ec6dcce1f1c3960a271eaa8457548
|
/Stepik4/data_table.r
|
2a64465ec948dc52b1922f0bcb188837d8d0a705
|
[] |
no_license
|
venkaDaria/rlang-demo
|
bdeee1621c7a506c7f1f520333550a7e130e34ac
|
1ba8ee3904541a86c11e9d8ace8f528c569d6b48
|
refs/heads/master
| 2022-04-13T13:39:54.597495
| 2020-04-11T16:15:43
| 2020-04-11T16:18:12
| 254,906,497
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,011
|
r
|
data_table.r
|
# Напишите функцию filter.expensive.available, которая принимает на вход products (объект типа data.table) и вектор названий брендов,
# и возвращает только те строчки, которые соответствуют товарам, цена которых больше или равна 5000 рублей,
# которые доступны на складе и принадлежат одному из переданных брендов.
filter.expensive.available <- function(products, brands) {
products[(price > 500000) & (available == T) & (brand %in% brands)]
}
# vs
products[brand %in% brands][price >= 500000][available == T]
# Создайте функцию ordered.short.purchase.data, которая будет принимать purchases, объект data.table,
# и возвращать таблицу только со столбцами с номером заказа и ID продукта.
# Упорядочите результат по убыванию стоимости купленного товара.
# Возвраты (записи с отрицательным количеством предметов в позиции) надо удалить.
ordered.short.purchase.data <- function(purchases) {
purchases[order(-price)][!(quantity < 0), .(ordernumber, product_id)] # quantity >= 0
}
# vs
purchases[quantity >= 0][order(-price), .(ordernumber, product_id)]
# Напишите функцию purchases.median.order.price, у которой один аргумент:
# purchases, и которая возвращает медианную стоимость заказа (число).
# Группировку стоит проводить с помощью data.table.
# Записи с неположительным количеством купленных товаров (возвраты) игнорировать.
# Обратите внимание, что одному заказу может соответствовать несколько записей – «позиций» с одинаковым ordernumber,
# и что при расчете стоимости заказа надо учитывать ситуации, когда пользователь купил несколько товаров одного типа
# (их количество указано в quantity).
purchases.median.order.price <- function(purchases) {
purchases[quantity > 0][, .(w = sum(price*quantity)), by = ordernumber][, median(w)]
}
# vs
median(purchases[quantity >= 0][, list(w = sum(price * quantity)), by=list(ordernumber)]$w)}
# Создайте функцию get.category.ratings, которая будет возвращать суммарный оборот (с учетом скидок) каждой категории,
# и количество купленных предметов по таблице покупок и таблице принадлежности товара к категории.
# Если купленный товар принадлежит нескольким категориям, его необходимо учитывать во всех. При решении используйте ключи.
get.category.ratings <- function(purchases, product.category) {
setkey(purchases)
setkey(product.category)
tb <- merge(purchases, product.category, by = 'product_id', allow.cartesian=TRUE)
tb[, lapply(.SD,sum), by=.(category_id)][, list(category_id, totalcents, quantity)]
}
# vs
get.category.ratings <- function(purchases, product.category) {
setkey(purchases)
setkey(product.category)
tb <- merge(purchases, product.category, by = 'product_id', allow.cartesian=TRUE)
tb[, list(totalcents=sum(totalcents), quantity=sum(quantity)), by = category_id]
}
# Напишите функцию, которая будет с помощью := добавлять столбец «price.portion»,
# содержащий процент стоимости товара в заказе, с двумя знаками после запятой (нули после запятой не опускать).
# Проверяться будет возвращаемая из функции таблица.
# Тип нового столбца - character (строка).
# Записи с неположительным количеством товаров убрать перед расчётом.
mark.position.portion <- function(purchases) {
purchases[quantity >= 0, 'price.portion' := sprintf("%.2f", round((price * quantity)/sum(price * quantity)*100, 2)), by = 'ordernumber'][!is.na(price.portion)]
}
# vs
mark.position.portion <- function(purchases) {
purchases <- purchases[quantity > 0]
purchases[, price.portion := format(round(100 * price * quantity / sum(price * quantity), 2),
nsmall=2,digits=2, scientific = F), by=ordernumber]
}
|
5ca7033e059d1a56f8a59bc07f19d770cba9760e
|
e93365ff9ea828bb82bb691b8e88037280f26a36
|
/src/visualization/plot_saleprice_waterfront.r
|
cd422e29b88b7d15c34284f069fd6a7e3aa79d6a
|
[] |
no_license
|
YufenLin/housing_prices_project
|
42ae66e2244218e055c5263f29f0425cc6478165
|
a1d31b84d51ab39f1b7e6dd3651570837cc9f3f8
|
refs/heads/master
| 2020-09-23T08:44:13.599004
| 2019-12-06T23:25:07
| 2019-12-06T23:25:07
| 225,455,807
| 0
| 1
| null | 2019-12-06T21:25:00
| 2019-12-02T19:48:33
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,285
|
r
|
plot_saleprice_waterfront.r
|
#
# Author: Yu Fen
# Date: Decemeber 6, 2019
# Purpose: Visualize the distribution of sale price and waterfront
#
# load necessary libraries ----
# install.packages("tidyverse")
library(tidyverse)
# set working directory ----
setwd("~/flatiron/project/housing_prices_project/")
# load necessary data ----
prices_df = read_csv("data/processed/residential.csv")
# filter data to only include sales price and porch flag ----
waterfront_df =
prices_df %>%
select(saleprice, wfntlocation) %>%
mutate(sale_price_per_100k = saleprice / 100000,
waterfront_new = if_else(wfntlocation == 0, "No waterfront", "Waterfront"))
# visualize the distribution of sales price by porch ----
waterfront_df %>%
ggplot(aes(x=sale_price_per_100k, fill=waterfront_new)) +
geom_histogram() +
xlab("Sale price per $100K") +
ylab("Count") +
labs(title="The distribution of King County home sale prices in 2018") +
theme_minimal() +
theme(legend.position="none", plot.title = element_text(hjust = 0.5)) +
facet_grid(facets = vars(waterfront_new)) +
ggsave("references/figures/waterfront_new_price_hist.png")
# Make the legend human readable!
# Better yet, have a discucssion if the legend is even necessary
# Make sure the color palette is something you like!
|
f44fb9916af8d26f0622f826cde0a80e73b62d59
|
ba53c61c1916301ec353def6c857d3af9c17a284
|
/man/expand_matrix.Rd
|
fd793d58c8db3ab8788481c50c316d3ff8d90cba
|
[] |
no_license
|
ivaughan/econullnetr
|
7921b08fe0b16c771afee2f7320a137c0359b36e
|
e2227492df82cef54936eb89815f6bf207b26b70
|
refs/heads/master
| 2023-06-17T11:39:19.704109
| 2021-05-28T15:46:56
| 2021-05-28T15:46:56
| 104,395,786
| 10
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,699
|
rd
|
expand_matrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/expanding_interaction_matrix.R
\name{expand_matrix}
\alias{expand_matrix}
\title{Expand a summarised interaction matrix}
\usage{
expand_matrix(X, r.names = rownames(X), MARGIN = 1)
}
\arguments{
\item{X}{A matrix or data frame representing the interaction matrix. This
should only include the interaction data (i.e. counts of different
interactions) and no additional columns for taxon names, covariates, etc}
\item{r.names}{An optional object of identical length to the number of
rows in \code{X} listing the taxon names. In many situations these may
be the row names of \code{X} (the default). Alternatively
\code{r.names} can be use to specify a column in a data frame
containing the names or a separate vector.}
\item{MARGIN}{Similar to \code{apply}, an integer value indicating
whether the data are arranged in rows or columns. \code{MARGIN = 1}
(the default) indicates that each column relates to one consumer taxon
(the format typically used for bipartite networks), whilst
\code{MARGIN = 2} indicates that each row is one consumer taxon, with
column names being the resources.}
}
\value{
A data frame where each row represents the interaction observed
between an individual consumer and one resource species. The first
column is named \code{Consumer} and records which taxon each indidual
belongs to. The remaining columns represent the resources: one column for each
taxon.
}
\description{
A simple function for converting interaction matrices that are summarised
at (typically) species-level to individual-level matrices, ready for
use with \code{generate_null_net}. This is only applicable to the
special (but common) case where one individual = one interaction
(e.g. many pollination networks, ant-seed networks).
Data can be stored either with consumers as columns and resources as
rows or vice versa. Taxon names for each row in the matrix could either
be stored as the row names of the matrix or data frame (as used, for
example, by the \code{bipartite} package), or as a column containing the
names in a data frame.
}
\examples{
# Toy example representing a typical bipartite format.
bp.inter <- matrix(c(1, 2, 2, 0, 5, 3, 3, 0, 2), nrow = 3, byrow = FALSE,
dimnames = list(c("A", "B", "C"),
c("sp1", "sp2", "sp3")))
bp.inter
expand_matrix(bp.inter)
# Use a simplified version of the Silene data set, pooling data
# across the 11 visits.
int.summ <- aggregate(Silene[, 3:7], by = list(Silene$Insect), sum)
colnames(int.summ)[1] <- "taxon"
expand_matrix(int.summ[, -1], r.names = int.summ$taxon, MARGIN = 2)
}
|
39a66ed058b86ec1fa206acb87cd9c12695f4f80
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/urca/examples/plot-methods.Rd.R
|
f26e3e138b0c1fc60b1d8723bb6fc1e593a9e539
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 968
|
r
|
plot-methods.Rd.R
|
library(urca)
### Name: plot-methods
### Title: Methods for Function plot in Package urca
### Aliases: plot plot-methods plot,ur.ers,missing-method
### plot,ur.kpss,missing-method plot,ca.jo,missing-method
### plot,ca.po,missing-method plot,ur.pp,missing-method
### plot,ur.sp,missing-method plot,ur.za,missing-method
### plot,ur.df,missing-method
### Keywords: methods
### ** Examples
data(nporg)
gnp <- na.omit(nporg[, "gnp.r"])
gnp.l <- log(gnp)
#
ers.gnp <- ur.ers(gnp, type="DF-GLS", model="trend", lag.max=4)
plot(ers.gnp)
#
kpss.gnp <- ur.kpss(gnp.l, type="tau", lags="short")
plot(kpss.gnp)
#
pp.gnp <- ur.pp(gnp, type="Z-tau", model="trend", lags="short")
plot(pp.gnp)
#
sp.gnp <- ur.sp(gnp, type="tau", pol.deg=1, signif=0.01)
plot(sp.gnp)
#
za.gnp <- ur.za(gnp, model="both", lag=2)
plot(za.gnp)
#
data(denmark)
sjd <- denmark[, c("LRM", "LRY", "IBO", "IDE")]
sjd.vecm <- ca.jo(sjd, ecdet="const", type="eigen", K=2, season=4)
plot(sjd.vecm)
|
d067de495732c6aa6ec587b3eaf380d9f7e6b7fd
|
97fd888949808a0ed1734bab1c602eb8ca0fbaa2
|
/R/param_network.R
|
3f848b09b2cf39601591915f36f12acaab789f10
|
[
"MIT"
] |
permissive
|
tidymodels/dials
|
31850316efdb13c97944130a93f845a035cf88e8
|
55763e0cbd49a16a3f5a532dc92b9069258d54e7
|
refs/heads/main
| 2023-07-20T16:14:58.149708
| 2023-04-03T18:10:23
| 2023-04-03T18:10:23
| 141,954,544
| 111
| 33
|
NOASSERTION
| 2023-07-14T16:03:15
| 2018-07-23T03:07:49
|
R
|
UTF-8
|
R
| false
| false
| 1,576
|
r
|
param_network.R
|
#' Neural network parameters
#'
#' These functions generate parameters that are useful for neural network models.
#' @inheritParams Laplace
#' @details
#' * `dropout()`: The parameter dropout rate. (See `parsnip:::mlp()`).
#'
#' * `epochs()`: The number of iterations of training. (See `parsnip:::mlp()`).
#'
#' * `hidden_units()`: The number of hidden units in a network layer.
#' (See `parsnip:::mlp()`).
#'
#' * `batch_size()`: The mini-batch size for neural networks.
#' @examples
#' dropout()
#' @export
dropout <- function(range = c(0, 1), trans = NULL) {
new_quant_param(
type = "double",
range = range,
inclusive = c(TRUE, FALSE),
trans = trans,
label = c(dropout = "Dropout Rate"),
finalize = NULL
)
}
#' @rdname dropout
#' @export
epochs <- function(range = c(10L, 1000L), trans = NULL) {
new_quant_param(
type = "integer",
range = range,
inclusive = c(TRUE, TRUE),
trans = trans,
label = c(epochs = "# Epochs"),
finalize = NULL
)
}
#' @export
#' @rdname dropout
hidden_units <- function(range = c(1L, 10L), trans = NULL) {
new_quant_param(
type = "integer",
range = range,
inclusive = c(TRUE, TRUE),
trans = trans,
label = c(hidden_units = "# Hidden Units"),
finalize = NULL
)
}
#' @export
#' @rdname dropout
batch_size <- function(range = c(unknown(), unknown()), trans = log2_trans()) {
new_quant_param(
type = "integer",
range = range,
inclusive = c(TRUE, TRUE),
trans = trans,
label = c(batch_size = "Batch Size"),
finalize = get_batch_sizes
)
}
|
4e29cafe8ff043bb453da9a1605de6c969dd6f5a
|
ba71ea3bd22182e6733a3b4132d18f20ed681b7d
|
/man/BatchUpdateValuesByDataFilterRequest.Rd
|
fe109d10e81d22223e78a9da6efbff2b709abb5b
|
[] |
no_license
|
key-Mustang/googleSheetsR
|
4ef61ef15e944825746bcb2ae1427f3c2850ed50
|
c904a53fccddb3dc332655f645ed2dc465eac434
|
refs/heads/master
| 2020-03-28T12:40:56.857407
| 2018-07-22T05:39:03
| 2018-07-22T05:39:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,248
|
rd
|
BatchUpdateValuesByDataFilterRequest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sheets_objects.R
\name{BatchUpdateValuesByDataFilterRequest}
\alias{BatchUpdateValuesByDataFilterRequest}
\title{BatchUpdateValuesByDataFilterRequest Object}
\usage{
BatchUpdateValuesByDataFilterRequest(valueInputOption = NULL, data = NULL,
responseDateTimeRenderOption = NULL, responseValueRenderOption = NULL,
includeValuesInResponse = NULL)
}
\arguments{
\item{valueInputOption}{How the input data should be interpreted}
\item{data}{The new values to apply to the spreadsheet}
\item{responseDateTimeRenderOption}{Determines how dates, times, and durations in the response should be}
\item{responseValueRenderOption}{Determines how values in the response should be rendered}
\item{includeValuesInResponse}{Determines if the update response should include the values}
}
\value{
BatchUpdateValuesByDataFilterRequest object
}
\description{
BatchUpdateValuesByDataFilterRequest Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The request for updating more than one range of values in a spreadsheet.
}
\seealso{
Other BatchUpdateValuesByDataFilterRequest functions: \code{\link{spreadsheets.values.batchUpdateByDataFilter}}
}
|
ce54235e90c1d4b05ddf22c2ec6aa74230d6755e
|
a63298e74cb572c76046f4585e49ee5f6327c75e
|
/R/deltafunc.R
|
ce580f236a1e2bfba8b228a79a6acf6e6b686f46
|
[] |
no_license
|
leminhthien2011/CONETTravel
|
65b24f58830c0e347d938719862b8043603d88ae
|
5fbc75cfe4225c36dacb057e9e04b1122d5b9c94
|
refs/heads/main
| 2023-04-05T13:25:12.253787
| 2020-11-02T15:44:22
| 2020-11-02T15:44:22
| 308,343,455
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 532
|
r
|
deltafunc.R
|
#' This gives estimation for delta
#' @param data data of A,R,D sequence
#' @export
deltafunc = function(data){
ddeath_sim = data[,3] - c(0,data[,3][-length(data[,3])])
active_sim = c(1,data[,1][-length(data[,1])])
index7 = which(active_sim==0)
index8 = c(1,index7)
ddeath_sim = ddeath_sim[-index8]
active_sim = active_sim[-index8]
deltas_sim = ddeath_sim/active_sim
########This help to avoid empty array
if (length(deltas_sim) == 0){deltas_sim = 10^8}
###########
return(deltas_sim)
}
|
52acb7f2d81849e9a7b1d22b3549d3527a911397
|
34072d4e8efe0531b20dbb9d57a930ae5d85e9d9
|
/classic-bugs-vol1/pump.R
|
1df2d0dc6b39198ef94d4e8f8918abe20339f036
|
[] |
no_license
|
datacloning/dcexamples
|
489aa223bb7e76851958ab93918ac02d575bfcde
|
c3774eda0766cc5a8061885daef634bdb9ef6289
|
refs/heads/master
| 2021-01-01T19:06:46.632020
| 2016-01-13T00:03:11
| 2016-01-13T00:03:11
| 25,499,601
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 610
|
r
|
pump.R
|
## pump: conjugate gamma-Poisson hierarchical model (BUGS Examples Vol. 1)
library(dcmle)
pump <- makeDcFit(
multiply = "N",
data = list(
"N" = 10,
"t" =
c(94.3, 15.7, 62.9, 126, 5.24, 31.4, 1.05, 1.05, 2.1, 10.5),
"x" =
c(5, 1, 5, 14, 3, 19, 1, 1, 4, 22)),
model = function() {
for (i in 1:N){
theta[i] ~ dgamma(alpha,beta);
lambda[i] <- theta[i]*t[i];
x[i] ~ dpois(lambda[i])
}
alpha ~ dexp(1.0);
beta ~ dgamma(0.1,1.0);
},
params = c("theta","alpha","beta"))
#dcmle(pump,n.clones=1:2)
|
8253a50472d8ae1d255cd242875af45f1907820f
|
124bf41d015e2d72b5757c7912ff49040f93827c
|
/man/mnlogit.Rd
|
bd0e2fc395eeafdc77bd173717f8cee8ed703d67
|
[] |
no_license
|
floswald/mnlogit
|
70e5cbdaefbd062df771f8acece5a39d0baedbd0
|
40b878c4ffa69b87c1f9a6dc98600fee9511a1d2
|
refs/heads/master
| 2020-05-28T04:08:36.198242
| 2019-12-12T16:00:45
| 2019-12-12T16:00:45
| 188,875,438
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,213
|
rd
|
mnlogit.Rd
|
\name{mnlogit}
\alias{mnlogit}
\alias{print.mnlogit}
\alias{summary.mnlogit}
\alias{predict.mnlogit}
\alias{coef.mnlogit}
\alias{print.est.stats}
\alias{print.model.size}
\alias{print.summary.mnlogit}
\title{Fast estimation of multinomial logit models}
\description{
Time and memory efficient estimation of multinomial logit models using maximum likelihood method and targeted at large scale multiclass classification problems in econometrics and machine learning.
Numerical optimization is performed by the Newton-Raphson method using an optimized, parallel C++ library to achieve fast computation of Hessian matrices.
The user interface closely related to the CRAN package \pkg{mlogit}.
}
\usage{
mnlogit(formula, data, choiceVar, maxiter = 50, ftol = 1e-6,
gtol = 1e-6, weights = NULL, ncores = 1, na.rm = TRUE,
print.level = 0, linDepTol = 1e-6, ...)
\method{predict}{mnlogit}(object, newdata = NULL, probability = FALSE, ...)
\method{coef}{mnlogit}(object, as.list = FALSE, ...)
}
\arguments{
\item{formula}{formula object or string specifying the model to be estimated (see Note).}
\item{data}{A data.frame object with data organized in the 'long' format (see Note).}
\item{choiceVar}{A string naming the column in 'data' which has the list of choices.}
\item{maxiter}{An integer indicating maximum number of Newton's iterations,}
\item{ftol}{A real number indicating tolerance on the difference of two subsequent loglikelihood values.}
\item{gtol}{A real number indicating tolerance on norm of the gradient.}
\item{weights}{Optional vector of (positive) frequency weights, one for each observation.}
\item{ncores}{An integer indicating number of processors allowed for Hessian calculations.}
\item{na.rm}{a logical variable which indicates whether rows of the data frame containing NAs will be removed.}
\item{print.level}{An integer which controls the amount of information to be printed during execution.}
\item{linDepTol}{Tolerance for detecting linear dependence between columns in input data. Dependent columns are removed from the estimation.}
\item{...}{Currently unused.}
\item{object}{A fitted mnlogit object.}
\item{newdata}{A data.frame object to used for prediction.}
\item{probability}{If TRUE predict output the probability matrix, otherwise the chocice with the highest probability for each observation is returned.}
\item{as.list}{Returns estimated model coefficients grouped by variable type.}
}
\value{
An object of class mnlogit, with elements:
\item{coeff}{the named vector of coefficients.}
\item{probabilities}{the probability matrix: (i,j) entry denotes the probability of the jth choice being choosen in the ith observation.}
\item{residuals}{the named vector of residuals which is the probability of not choosing the alternative which was chosen.}
\item{logLik}{the value of the log-likelihood function at exit.}
\item{df}{the number of parameters in the model.}
\item{gradient}{the gradient of the log-likelihood function at exit.}
\item{hessian}{the Hessian of the log-likelihood function at exit.}
\item{AIC}{the AIC value of the fitted model.}
\item{formula}{the formula specifying the model.}
\item{data}{the data.frame used in model estimation.}
\item{choices}{the vector of alternatives.}
\item{freq}{the frequencies of alternatives.}
\item{model.size}{Information about number of parameters in model.}
\item{est.stat}{Newton Raphson stats.}
\item{freq}{the frequency of each choice in input data.}
\item{call}{the mnlogit function call that user made, }
}
\note{
1. The data must be in the 'long' format. This means that for each observation there must be as many rows as there are alternatives (which should be grouped together).
2. The formula should be specified in the format: responseVar ~ choice specific variables with generic coefficients | individual specific variables | choice specific variables with choice specific coefficients. These are the 3 available variable types.
3. Any type of variables may be omitted. To omit use "1" as a placeholder.
4. An alternative specific intercept is included by default in the estimation. To omit it, use a '-1' or '0' anywhere in the formula.
}
\references{
Croissant, Yves.
\emph{Estimation of multinomial logit models in R: The mlogit Packages.}
\url{http://cran.r-project.org/web/packages/mlogit/index.html}
Train, K. (2004) \emph{Discrete Choice Methods with Simulation},
Cambridge University Press.
}
\author{Wang Zhiyu, Asad Hasan}
\keyword{mnlogit, logistic, classification, multinomial, mlogit, parallel}
\examples{
library(mnlogit)
data(Fish, package = "mnlogit")
fm <- formula(mode ~ price | income | catch)
result <- mnlogit(fm, Fish, "alt", ncores = 2)
predict(result)
\dontrun{
print(result)
print(result$est.stats)
print(result$model.size)
summary(result)
# Formula examples (see also Note)
fm <- formula(mode ~ 1 | income) # Only type-2 with intercept
fm <- formula(mode ~ price - 1) # Only type-1, no intercept
fm <- formula(mode ~ 1 | 1 | catch) # Only type-3, including intercept
}
}
|
1f70ac803c2e96558677193de2ae41688542806b
|
dfb5bf243b895ee58b8b8dea5f11fc3f5472dcae
|
/man/equation_9_7.Rd
|
14852a9ac473720db4dbecd39014cb4c15726735
|
[] |
no_license
|
trollock/respiratoR
|
d6301459d80caf996e268e3676cf4838e879c86a
|
a86fd4bccfcf5f37f2756a7304500739498c8104
|
refs/heads/master
| 2020-09-01T16:39:13.359005
| 2019-11-25T15:25:08
| 2019-11-25T15:25:08
| 218,963,440
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 872
|
rd
|
equation_9_7.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/equation_9_7.R
\name{equation_9_7}
\alias{equation_9_7}
\title{Baseline drift correction inspetion plot}
\usage{
equation_9_7(dat, val1, val2)
}
\arguments{
\item{dat}{dataframe containing spline fits of respirometry data, this is the output from the baseline_corr function}
\item{dat1}{dataframe containing the baseline fit data, this is the output from the base_bg_rect function}
\item{val}{the channel being drift correct, e.g. "Oxygen"}
}
\value{
a dataframe containing drift correct oxygen and carbon dioxide data corrected using a
spline fit
}
\description{
This function provides a basic plot of the different spline and linear interpolation fits for the visual inspection of
the data.
}
\details{
This is a generic function:
}
\examples{
drift_plot (o2_corr, base_rect, "Oxygen")
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.