blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9e602b3ce4559c818f1ec3d078c19526065e0b43
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/speedglm/examples/speedlm.Rd.R
|
a1af60c1b98c935f3abd7e0983b7dc68e0223b06
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,880
|
r
|
speedlm.Rd.R
|
library(speedglm)
### Name: speedlm
### Title: Fitting Linear Models to Large Data Sets
### Aliases: speedlm speedlm.fit speedlm.wfit update.speedlm
### updateWithMoreData
### Keywords: models
### ** Examples
## Not run:
##D n <- 1000
##D k <- 3
##D y <- rnorm(n)
##D x <- round(matrix(rnorm(n * k), n, k), digits = 3)
##D colnames(x) <- c("s1", "s2", "s3")
##D da <- data.frame(y, x)
##D do1 <- da[1:300,]
##D do2 <- da[301:700,]
##D do3 <- da[701:1000,]
##D
##D m1 <- speedlm(y ~ s1 + s2 + s3, data = do1)
##D m1 <- update(m1, data = do2)
##D m1 <- update(m1, data = do3)
##D
##D m2 <- lm(y ~ s1 + s2 + s3, data = da)
##D summary(m1)
##D summary(m2)
## End(Not run)
## Not run:
##D # as before but recursively
##D make.data <- function(filename, chunksize,...){
##D conn <- NULL
##D function(reset=FALSE, header=TRUE){
##D if(reset){
##D if(!is.null(conn)) close(conn)
##D conn<<-file(filename,open="r")
##D } else{
##D rval <- read.table(conn, nrows=chunksize,header=header,...)
##D if (nrow(rval)==0) {
##D close(conn)
##D conn<<-NULL
##D rval<-NULL
##D }
##D return(rval)
##D }
##D }
##D }
##D
##D write.table(da,"da.txt",col.names=TRUE,row.names=FALSE,quote=FALSE)
##D x.names <- c("s1", "s2", "s3")
##D dat <- make.data("da.txt",chunksize=300,col.names=c("y",x.names))
##D dat(reset=TRUE)
##D da2 <- dat(reset=FALSE)
##D
##D # the first model runs on the first 300 rows.
##D m3 <- speedlm(y ~ s1 + s2 + s3, data=da2)
##D
##D # the last three models run on the subsequent 300, 300 and 100 rows, respectively
##D for (i in 1:3){
##D da2 <- dat(reset=FALSE, header=FALSE)
##D m3 <- update(m3, data=da2, add=TRUE)
##D }
##D all.equal(coef(m1),coef(m3))
##D file.remove("da.txt")
## End(Not run)
|
20504aad3cf6ad2e3409cb12ad5ef4d6e43f49fa
|
942b499a16ff485928e6a005ce641a204bab251a
|
/man/generate.excitation.Rd
|
c50db3334a5a6faf01897593fca7a871a78c5ffc
|
[] |
no_license
|
dcourvoisier/doremi
|
c16032fbfd8254f03d52643ebd01df1e1ce63d84
|
f2adc5037fef7e621eec4c32990896f3279e67ab
|
refs/heads/master
| 2022-01-26T21:38:26.947001
| 2022-01-13T09:42:00
| 2022-01-13T09:42:00
| 175,785,185
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,214
|
rd
|
generate.excitation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doremi.R
\name{generate.excitation}
\alias{generate.excitation}
\title{Excitation signal generation}
\usage{
generate.excitation(
amplitude = 1,
nexc = 1,
duration = 2,
deltatf = 0.1,
tmax = 10,
minspacing = 1
)
}
\arguments{
\item{amplitude}{is a vector of values different from 0 indicating the amplitude of the excitation. It should contain as many values
as the number of pulses (nexc). If the elements are less than the number of pulses, the amplitude vector will be "recycled" and the elements from it will be repeated until
all the pulses are covered (for instance, if the number of excitations nexc is 6 and the amplitude vector has two elements, pulses 1,3 and 5 will
have the same amplitude as the first element of the amplitude vector and pulses 2,4 and 6 that of the second element).}
\item{nexc}{is an integer greater than 0 indicating the number of pulses to generate.}
\item{duration}{is a vector of values greater or equal to 0 indicating the duration of each pulse in time units. It should have as many elements as the number of pulses (nexc). If
the elements are less than the number of pulses, the amplitude vector will be "recycled" and the elements from it will be repeated until
all the pulses are covered.}
\item{deltatf}{is a value greater than 0 indicating the time step between two consecutive data points.}
\item{tmax}{is a value greater than 0 indicating the maximum time range of the excitation vector in time units. The time vector generated will go from 0 to tmax.}
\item{minspacing}{as pulses are generated randomly, minspacing is a value greater than or equal to 0 that indicates minimum spacing between pulses, thus avoiding
overlapping of the pulses in time. A value of 0 indicates that pulses can follow one another.}
}
\value{
Returns two vectors:
E- vector containing the values of the excitation generated.
t- vector containing the values of time generated.
}
\description{
\code{generate.excitation} generates a vector of randomly located square pulses
with a given amplitude, duration and spacing between the pulses. A pulse is where the excitation passes from value 0 to value amplitude
for a given duration and then returns back to 0, thus producing a square shape.
}
\details{
Used for simulations in the context of the package. Beware that the following condition should apply:
\deqn{tmax >= (duration+minspacing)*nexc}
so that the pulses "fit" in the time lapse defined.
Compared to \code{pulsew} from the \code{seewave} package, this function can generate pulses of different duration and amplitude.
}
\examples{
generate.excitation (amplitude = 3,
nexc = 6,
duration = 2,
deltatf = 1,
tmax = 200,
minspacing = 2)
#Vector of length 201 (deltatf x tmax + 1 as it includes 0 as initial time value)
generate.excitation (amplitude = c(1,10,20),
nexc = 3,
duration = c(1,2,4),
deltatf = 0.5,
tmax = 100,
minspacing = 10)
}
\keyword{excitation}
\keyword{simulation}
|
e63789b7ae6327a762dc837ec723959ad3d3dab5
|
b21426c84a4e69c69b14c983e7c5fa050b627e71
|
/R/pdl_report_td_df.R
|
723668ce0bb6dc89dd049a81aad0a2f68fdbd4c8
|
[
"MIT"
] |
permissive
|
rmsharp/snprcspf
|
ca32e15b14ba0e1bfd818f90b98755ea26f44a58
|
154ef8face204fd24e15899c2f5ac8ac908e9961
|
refs/heads/master
| 2021-03-22T08:04:36.009143
| 2020-06-14T02:49:26
| 2020-06-14T02:49:26
| 92,219,026
| 0
| 2
|
MIT
| 2020-04-02T23:33:59
| 2017-05-23T20:49:40
|
HTML
|
UTF-8
|
R
| false
| false
| 2,102
|
r
|
pdl_report_td_df.R
|
#' Returns dataframe with parsed content of pdl_report
#'
#' @param pdl_report list containing $content and $meta that make up the
#' complete data from PDF PDL report file.
#' @import stringi
#' @export
pdl_report_to_df <- function(pdl_report) {
file_name <- get_pdl_report_file_name(pdl_report)
order_pk <- get_pdl_meta_data_from_content(pdl_report, "Order Pk:", 14)
print_date_tm <- get_pdl_meta_data_from_content(pdl_report,
"Report printed on:", 28)
bill_po <- get_pdl_meta_data_from_content(pdl_report, "Bill Po:", 10)
request_num <- get_pdl_meta_data_from_content(pdl_report, "Req Num:", 10)
report_contact <- get_pdl_meta_data_from_content(pdl_report, "Rpt Contact:",
20)
received_date <- get_pdl_meta_data_from_content(pdl_report, "Recd Dt:", 11)
report_date <- get_pdl_meta_data_from_content(pdl_report, "Report Dt:", 11)
order_comment <- get_pdl_meta_data_from_content(pdl_report, # to end of line
"Order Comment:", 1000)
content <- remove_pdl_report_headers(pdl_report$content)
col_boundaries <- get_col_boundaries(content)
content <- strip_column_labels(content)
# This is needed because the report clips the last line of the page off
# if the report goes to the next page.
content <- insert_missing_lines(content)
len <- length(content)
pdl_df <- get_empty_pdl_df()
for (i in seq_along(content)[is_odd(seq_along(content))]) {
line_1 <- content[i]
if (len > i) {
line_2 <- content[i + 1]
pdl_df <-
rbind(pdl_df, get_one_record_df(file_name, order_pk,
print_date_tm, bill_po, request_num,
report_contact, received_date,
report_date, order_comment, line_1,
line_2, col_boundaries))
}
}
pdl_df <- add_sqlmed_codes(pdl_df, "PDL")
pdl_df$sample_type <- get_sample_type_trans(pdl_df$sample_type)
pdl_df
}
|
c1a1a0e3344f782551580a004a598801c476b0af
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/5815_0/rinput.R
|
c90fd517c2642f8fa89b7c434bd0aeda9117bf9e
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("5815_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5815_0_unrooted.txt")
|
dd172aa600e2873014fe31bba1ed490c2875f6e7
|
421366a39299a1a82bd0f2a42e667da7fc602b62
|
/man/MergeRfandEnetPredictions.Rd
|
d4ea8a9f0b27f9e110f9a22caba264d519a5afde
|
[] |
no_license
|
thomasferte/PredictCovidOpen
|
363ef4cc9006696d5fa16c2ac4bdf9b58882a476
|
2468b0006a6f19310a9f8c7de6aa46979f19d627
|
refs/heads/main
| 2023-03-17T19:28:52.863817
| 2023-01-26T09:51:38
| 2023-01-26T09:51:38
| 496,952,863
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 639
|
rd
|
MergeRfandEnetPredictions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MergeRfandEnetPredictions.R
\name{MergeRfandEnetPredictions}
\alias{MergeRfandEnetPredictions}
\title{MergeRfandEnetPredictions}
\usage{
MergeRfandEnetPredictions(df_test, lsmodelEnet, lsmodelRf, OUTCOME, FORECAST)
}
\arguments{
\item{df_test}{The test dataframe}
\item{lsmodelEnet}{The list from trainElasticNet()}
\item{lsmodelRf}{The list from trainRf()}
\item{OUTCOME}{The OUTCOME must be one of HOSP or IN_HOSP}
\item{FORECAST}{The FORECAST time parameter}
}
\value{
The predictions df
}
\description{
Merge Elastic net and random forest predictions
}
|
b25721b0dc4b946bc6557cbba6a2fca73d9ec84e
|
527ce6da569f9a58334b0c91877950a53ebc29e1
|
/R/0_ff_simulate.R
|
ffce64f971ba2d07bf450819a94ee82f3d6c33d2
|
[
"MIT"
] |
permissive
|
mrcaseb/ffsimulator
|
7967a637be2ec9d06ab4f18f285acf10f7c5c881
|
4747b37544f7339bb841d4da4a10e38df01828f3
|
refs/heads/main
| 2023-06-10T07:01:02.987403
| 2021-07-04T19:43:02
| 2021-07-04T19:43:02
| 383,100,971
| 2
| 0
|
NOASSERTION
| 2021-07-05T10:25:48
| 2021-07-05T10:25:47
| null |
UTF-8
|
R
| false
| false
| 4,222
|
r
|
0_ff_simulate.R
|
#' Simulate Fantasy Seasons
#'
#' The main function of the package
#'
#' @param conn an connection to a league made with `ff_connect()` and friends (required)
#' @param n_seasons number of seasons to simulate, default = 100
#' @param weeks_per_season number of weeks per season, default = 14
#' @param best_ball a logical: are weekly wins based on optimal lineups?
#' @param seed an integer to control reproducibility
#' @param injury_model select between "simple", "none"
#' @param base_seasons a numeric vector that selects seasons as base data, earliest available is 2012
#' @param parallel a logical - use parallel processing for optimizing lineups, default is FALSE
#' @param verbose print progress messages for debugging
#'
#' @examples \dontrun{
#'
#' conn <- mfl_connect(2021, 54040)
#'
#' auto <- ff_simulate(conn)
#'
#' reprex <- ff_simulate(conn = conn, seed = 613)
#'
#' basic <- ff_simulate(conn = conn, n_seasons = 100, weeks_per_season = 14, best_ball = FALSE)
#'
#' custom <- ff_simulate(
#' conn = conn,
#' n_seasons = 100,
#' weeks_per_season = 17,
#' custom_rankings = df_rankings,
#' seed = 613,
#' best_ball = FALSE,
#' injury_model = c("bimodal", "separate", "none"),
#' owner_efficiency = list(average = 0.75, sd = 0.025),
#' verbose = FALSE
#' )
#' }
#'
#' @export
ff_simulate <- function(conn,
n_seasons = 100,
weeks_per_season = 14,
best_ball = FALSE,
seed = NULL,
injury_model = c("simple", "none"),
base_seasons = 2012:2020,
parallel = FALSE,
verbose = TRUE
){
#### ASSERTIONS ####
if(!class(conn) %in% c("mfl_conn","sleeper_conn","flea_conn","espn_conn")) {
stop("conn should be a connection object created by `ff_connect()` and friends!",
call. = FALSE)
}
injury_model <- match.arg(injury_model)
checkmate::assert_numeric(base_seasons)
checkmate::assert_int(n_seasons, lower = 1)
checkmate::assert_int(weeks_per_season, lower = 1)
checkmate::assert_int(seed, null.ok = TRUE)
checkmate::assert_flag(best_ball)
if(!is.null(seed)) set.seed(seed)
# # checkmate::assert_flag(verbose)
# if(!is.null(custom_rankings)) {
# checkmate::assert_data_frame(custom_rankings)
# ## ADD ASSERTIONS FOR CORRECT RANKINGS COLUMNS
# }
#
# if(!is.null(owner_efficiency)) checkmate::assert_list(owner_efficiency, names = c("average","sd"))
#### DOWNLOAD SCORING HISTORY ####
scoring_history <- ffscrapr::ff_scoringhistory(conn, base_seasons)
#### CREATE ADP OUTCOMES ####
adp_outcomes <- ffs_adp_outcomes(scoring_history = scoring_history,
injury_model = injury_model)
#### DOWNLOAD LATEST FANTASYPROS RANKINGS ####
latest_rankings <- ffs_latest_rankings()
#### DOWNLOAD ROSTERS ####
rosters <- ffscrapr::ff_rosters(conn)
lineup_constraints <- ffscrapr::ff_starter_positions(conn)
#### JOIN DATA ####
preprocessed_data <- ffs_preprocess_data(conn, rosters, latest_rankings, adp_outcomes)
#### GENERATE PREDICTIONS ####
n_weeks <- n_seasons * weeks_per_season
projected_scores <- ffs_generate_predictions(preprocessed_data, n_weeks)
#### OPTIMIZE LINEUPS ####
optimal_scores <- ffs_optimize_lineups(
projected_scores = projected_scores,
lineup_constraints = lineup_constraints,
best_ball = best_ball,
parallel = parallel)
#### GENERATE SCHEDULES ####
schedules <- ffs_build_schedules(n_teams = length(unique(rosters$franchise_id)),
n_seasons = n_seasons,
n_weeks = weeks_per_season)
#### SUMMARISE SEASON ####
summary_week <- ffs_summarise_week(optimal_scores, schedules)
summary_season <- ffs_summarise_season(summary_week)
summary_simulation <- ffs_summarise_simulation(summary_season)
#### BUILD AND RETURN ####
out <- list(
summary_simulation = summary_simulation,
summary_season = summary_season,
summary_week = summary_week,
latest_rankings = latest_rankings,
raw_data = preprocessed_data
)
return(out)
}
|
1876ce9414a9c8a37e25969b72c4de673859edea
|
5aaa149e2fad457ff61928a609a9d106d486cffd
|
/man/get_event_rsvps.Rd
|
04dd986ddc8931d435353486fe4570cb3de207ae
|
[
"MIT"
] |
permissive
|
decktools/meetupr
|
4df6031dc73f97f5f63f1e4b14b3ced595e537b2
|
113986e9361f0bfc48d9255c31987d9ba8071af0
|
refs/heads/master
| 2023-02-04T14:30:13.184976
| 2020-12-19T17:24:05
| 2020-12-19T17:24:05
| 321,782,654
| 0
| 0
|
NOASSERTION
| 2020-12-16T14:55:59
| 2020-12-15T20:34:40
| null |
UTF-8
|
R
| false
| true
| 1,426
|
rd
|
get_event_rsvps.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_event_rsvps.R
\name{get_event_rsvps}
\alias{get_event_rsvps}
\title{Get the RSVPs for a specified event}
\usage{
get_event_rsvps(urlname, event_id, api_key = NULL)
}
\arguments{
\item{urlname}{Character. The name of the group as indicated in the
\url{https://meetup.com} url.}
\item{event_id}{Character. The id of the event. Event ids can be obtained
using \code{\link[=get_events]{get_events()}} or by looking at the event page URL.}
\item{api_key}{Character. Your api key. Defaults to checking your environment
for a parameter called "MEETUP_KEY" via \code{Sys.getenv("MEETUP_KEY")}. This key can be
obtained from \url{https://secure.meetup.com/meetup_api/key/}. To set the environment variable
run \code{Sys.setenv(MEETUP_KEY = "PASTE YOUR MEETUP KEY HERE")}.}
}
\value{
A tibble with the following columns:
\itemize{
\item member_id
\item member_name
\item member_is_host
\item response
\item guests
\item created
\item updated
\item resource
}
}
\description{
Get the RSVPs for a specified event
}
\examples{
\dontrun{
urlname <- "rladies-nashville"
upcoming_events <- get_events(
urlname = urlname,
event_status = "past"
)
event_id <- upcoming_events$id[1] # first event for this group
rsvps <- get_event_rsvps(urlname, event_id)
}
}
\references{
\url{https://www.meetup.com/meetup_api/docs/:urlname/events/:event_id/rsvps/#list}
}
|
d2983edcd8e2c7a469cef1238020ce7fc5e70ece
|
aed99786f997fabe16404351480bb801cde4b56f
|
/RecommendationSystem/RecommendationSystem.R
|
8eb43115f08aa46655058c73e74d7e6861787870
|
[] |
no_license
|
JaivalBhup/RDataScienceProjects
|
b333f96edd02fcc0d8962af04569094f02a6c03d
|
ed1ac2593c74d64986d908641f76fde4eb5a0b8a
|
refs/heads/master
| 2022-12-04T20:30:54.840710
| 2020-08-20T05:23:48
| 2020-08-20T05:23:48
| 288,914,171
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,468
|
r
|
RecommendationSystem.R
|
library(dslabs)
library(tidyverse)
data("movielens")
library(caret)
set.seed(755)
test_index <- createDataPartition(y = movielens$rating, times = 1,
p = 0.2, list = FALSE)
train_set <- movielens[-test_index,]
test_set <- movielens[test_index,]
test_set <- test_set %>%
semi_join(train_set, by = "movieId") %>%
semi_join(train_set, by = "userId")
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings - predicted_ratings)^2))
}
#predicting by just the mean y_u_i = mu
#fit <- lm(rating ~ as.factor(userId), data = movielens)
mu_hat <- mean(train_set$rating)
naive_rmse<-RMSE(test_set$rating,mu_hat)
#adding to result table
rmse_results <- data.frame(method= "Just the Average", RMSE = naive_rmse)
# prediction with bias which is the average rating for each movie y_u_i = mu + b_i
mu <- mean(train_set$rating)
movie_avgs <- train_set %>%
group_by(movieId) %>%
summarize(b_i = mean(rating - mu))
predict_ratings <- mu + test_set%>%
left_join(movie_avgs, by = "movieId")%>%
.$b_i
model_1_rmse <- RMSE(test_set$rating, predict_ratings)
rmse_results <- bind_rows(rmse_results,
data.frame(method ="Movie Effect Model",
RMSE = model_1_rmse))
rmse_results%>%knitr::kable()
# lm(rating ~ as.factor(movieId) + as.factor(userId))
# adding user specific effect
user_avgs <- test_set%>%
left_join(movie_avgs, by = "movieId")%>%
group_by(userId)%>%
summarise(b_u = mean(rating - mu - b_i))
predict_ratings <- test_set%>%
left_join(movie_avgs, by = "movieId")%>%
left_join(user_avgs, by = "userId")%>%
mutate(pred = mu + b_i + b_u)%>%.$pred
model_2_rmse <- RMSE(test_set$rating, predict_ratings)
rmse_results <- bind_rows(rmse_results,
data.frame(method ="User Specific Model",
RMSE = model_2_rmse))
rmse_results%>%knitr::kable()
#regulariztion
lambda <- 3
mu <- mean(train_set$rating)
movie_reg_avgs <- train_set %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+lambda), n_i = n())
predict_ratings <-test_set%>%
left_join(movie_reg_avgs, by="movieId")%>%
mutate(pred = mu + b_i)%>%.$pred
model_3_rmse <- RMSE(test_set$rating, predict_ratings)
rmse_results <- bind_rows(rmse_results,
data.frame(method = "Movie + User Effects Model",
RMSE = model_3_rmse))
rmse_results%>%knitr::kable()
|
108d4c89d6ce00ce8ad2f9fb109bccbe71548e46
|
100145ba710bf41813800aff99d4748bd902fe51
|
/SourceCode.R
|
89cabfe8967b031576d504726e2a832db0697d9e
|
[] |
no_license
|
iportnoy1/General-Skills-Test-Dataset-and-Source-Code
|
6b4cd308cac124c3284739ecce28f6c34f4b6da1
|
7b07aced6c58be5d506ef3344fe9b62c00dde4a2
|
refs/heads/main
| 2023-05-08T18:48:18.775436
| 2021-06-01T03:15:43
| 2021-06-01T03:15:43
| 370,750,348
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,998
|
r
|
SourceCode.R
|
library(vegan)
library(mdatools)
library(psych)
library(resample)
library(igraph)
library(reshape2)
library(qgraph)
library(psych)
library(corrr)
library(ggplot2)
library(dplyr)
library(GGally)
library(stats)
library(dgof)
setwd("C:/Users/idpdl/Desktop/Paper Jessica")
#Loading Data
X <- read.csv('Deidentified Data.csv',header = T, dec = '.')
#Descriptive Statistics Visualization
#Period-wise statistics
ggplot(X, aes(x=Period, y=Average, fill=Period)) +
geom_boxplot()+ scale_fill_brewer(palette="RdBu")
##Skill-wise statistics]
ggplot(X, aes(x=Period, y=RC, fill=Period)) +
geom_boxplot()+ scale_fill_brewer(palette="RdBu")
ggplot(X, aes(x=Period, y=QR, fill=Period)) +
geom_boxplot()+ scale_fill_brewer(palette="RdBu")
ggplot(X, aes(x=Period, y=CS, fill=Period)) +
geom_boxplot()+ scale_fill_brewer(palette="RdBu")
ggplot(X, aes(x=Period, y=EP, fill=Period)) +
geom_boxplot()+ scale_fill_brewer(palette="RdBu")
ggplot(X, aes(x=Period, y=WC, fill=Period)) +
geom_boxplot()+ scale_fill_brewer(palette="RdBu")
# Splitting Data
temp <- as.numeric(grepl("2020", X$Period))
X$COVID <- temp
X_COVID <- X[X$COVID==1,]
X_No_COVID <- X[X$COVID==0,]
temp1 <- as.data.frame(X_COVID[,3:7])
temp2 <- as.data.frame(X_No_COVID[,3:7])
# Testing Normality
NormalityTests_No_COVID <- as.data.frame(matrix(rep(0,length(temp1[1,])),1,5))
colnames(NormalityTests_No_COVID) <- colnames(temp1)
NormalityTests_COVID <- NormalityTests_No_COVID
for (i in 1:length(temp1[1,])) {
NormalityTests_No_COVID[1,i] <- shapiro.test(temp2[sample(length(temp2[,i]), 5000, replace = F),i])
NormalityTests_COVID[1,i] <- shapiro.test(temp1[sample(length(temp1[,i]), 5000, replace = F),i])
}
#Differential Analysis: Testing mean and std. dev. equality
#Hereby we use the Welch's t-teste and the Fisher's F-test
pvals_means <- as.data.frame(matrix(rep(0,length(temp1[1,])),1,5))
colnames(pvals_means) <- colnames(temp1)
pvals_std_devs <- pvals_means
for (i in 1:length(temp1[1,])) {
temp <- t.test(temp1[,i],temp2[,i],alternative="two.sided",var.equal=F)
pvals_means[1,i] <- temp$p.value
temp <- var.test(temp1[,i], temp2[,i], alternative = "two.sided")
pvals_std_devs[1,i] <- temp$p.value
}
# Correlation Structures
ggpairs(temp1)
ggpairs(temp2)
# Correlation Networks
corMat=cor(X_No_COVID[,3:7])
corMat2=cor(X_COVID[,3:7])
CorMat_mod <- corMat
CorMat_mod[upper.tri(CorMat_mod)] <- 2
cor_df1 <- melt(CorMat_mod )
cor_df1 <- filter(cor_df1, value != 2) %>% filter(Var1 != Var2)
adj_list1 <- cor_df1 %>% filter(abs(value) > 0.01)
names(adj_list1) <- c('from', 'to', 'weight')
net1 <- graph_from_data_frame(adj_list1, directed = FALSE)
Cols <- c("red","blue")
E(net1)$color <- unlist(lapply(1:nrow(adj_list1), function(i){Cols[(adj_list1$weight[i]>0)+1]}))
E(net1)$size <- adj_list1$weight
set.seed(2)
CorMat_mod2 <- corMat2
CorMat_mod2[upper.tri(CorMat_mod2)] <- 2
cor_df2 <- melt(CorMat_mod2)
cor_df2 <- filter(cor_df2, value != 2) %>% filter(Var1 != Var2)
adj_list2 <- cor_df2 %>% filter(abs(value) > 0.01)
names(adj_list2) <- c('from', 'to', 'weight')
net2 <- graph_from_data_frame(adj_list2, directed = FALSE)
Cols <- c("red","blue")
E(net2)$color <- unlist(lapply(1:nrow(adj_list2), function(i){Cols[(adj_list2$weight[i]>0)+1]}))
E(net2)$size <- adj_list2$weight
par(mfrow=c(1,2))
set.seed(2)
plot(net1, vertex.size=45, vertex.label =
c("RC","QR","CS","EP","WC"),
edge.width= 12*E(net1)$size)
set.seed(2)
plot(net2, vertex.size=45, vertex.label =
c("RC","QR","CS","EP","WC"),
edge.width= 12*E(net2)$size)
#Jennrich Test
n1 <- dim(X_No_COVID)[1]
n2 <- dim(X_COVID)[1]
pval_Jennrich <- cortest.jennrich(corMat,corMat2,n1,n2)$prob
|
a88d58055e00af6c214ba53138139bf214b7fdff
|
736873b12d3d5e3b3c4314a0aa1b9a029d4875c2
|
/Performing_clustering_and_evaluation_of_clusters.R
|
b28fb17932c17135c0dc4b4f5a3293700b0c804e
|
[] |
no_license
|
pushkarmittal1996/Similarities-of-World-trade-networks
|
29ca113c0bdb29abc6a0d4a7c699f4f971cedf13
|
188c2069c96ef6a01f881c19062f116c96341285
|
refs/heads/master
| 2020-07-16T03:19:44.675037
| 2019-09-01T21:37:50
| 2019-09-01T21:37:50
| 205,707,767
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,673
|
r
|
Performing_clustering_and_evaluation_of_clusters.R
|
rescale <- function(x) (x-min(x))/(max(x) - min(x)) * 100
dat <- rescale(dat)
library(dplyr)
library(tidyr)
library(ggplot2)
## method 1. WSS :compute the total within sum square error, this measures how close
# are the points in a cluster to each other
# [Distance] : calculates the sum squared distance of a given cluster of points,
# note that "sum squared distance" is used here for measuring variance
Distance <- function(cluster)
{
# the center of the cluster, mean of all the points
center <- colMeans(cluster)
# calculate the summed squared error between every point and
# the center of that cluster
distance <- apply( cluster, 1, function(row)
{
sum( ( row - center )^2 )
}) %>% sum()
return(distance)
}
# calculate the within sum squared error manually for hierarchical clustering
# [WSS] : pass in the dataset, and the resulting groups(cluster)
WSS <- function( data, groups )
{
k <- max(groups)
# loop through each groups (clusters) and obtain its
# within sum squared error
total <- lapply( 1:k, function(k)
{
# extract the data point within the cluster
cluster <- subset( data, groups == k )
distance <- Distance(cluster)
return(distance)
}) %>% unlist()
return( sum(total) )
}
CHCriterion <- function( data, kmax, clustermethod,linkmethod, ... )
{
if( !clustermethod %in% c( "kmeanspp", "hclust","pam" ) )
stop( "method must be one of 'kmeanspp' or 'hclust' or 'pam'" )
# total sum squared error (independent with the number of cluster k)
tss <- Distance( cluster = data )
# initialize a numeric vector storing the score
wss <- numeric(kmax)
sil <- numeric(kmax)
# k starts from 2, cluster 1 is meaningless
if( clustermethod == "kmeanspp" )
{
for( k in 2:kmax )
{
results <- Kmeanspp( data, k, ... )
wss[k] <- results$tot.withinss
sil[k] <- mean(silhouette(results$cluster, dist(data))[,3])
}
} else if ( clustermethod == "pam" )
{
for( k in 2:kmax )
{
results <- pam( data, k)
wss[k] <- WSS( data = data, groups = results$cluster )
sil[k] <- mean(silhouette(results$cluster, dist(data))[,3])
#sil[k] <- mean(silhouette_val[, 3])
}
}
else # "hclust"
{
for( k in 2:kmax )
{
#clustering <- hclust(as.dist(data,diag = TRUE, upper = TRUE), method= 'complete',...)
#sil[k]<- silhouette(cutree(clustering, k = k) ,as.dist(data))
#rownames(sil_cl) <- rownames(tmp)
clustering <- hclust( dist(data), method = linkmethod, ... )
groups <- cutree( clustering, k )
wss[k] <- WSS( data = data, groups = groups )
sil[k] <- mean(silhouette(groups, dist(data))[, 3])
}
}
# between sum of square
bss <- tss - wss[-1]
# cluster count start from 2!
numerator <- bss / ( 1:(kmax-1) )
denominator <- wss[-1] / ( nrow(data) - 2:kmax )
criteria <- data.frame( k = 2:kmax,
CHIndex = numerator / denominator,
wss = wss[-1], sil=sil[-1])
# convert to long format for plotting
criteria_long <- gather( criteria, "index", "value", -1 )
print(paste(clustermethod,linkmethod))
plot <- ggplot( criteria_long, aes( k, value, color = index)) +
geom_line() + geom_point( aes( shape = index ), size = 3 ) +
facet_wrap( ~ index, scale = "free_y" ) +
guides( color = FALSE, shape = FALSE ) #+labs(title=paste(clustermethod,linkmethod), x ="Number of clusters", y = "Score")
return( list( data = criteria,
plot = plot ) )
}
netemd<-read.csv("output.csv", header = T, sep=",")
rownames(netemd)<-netemd[,1]
netemd$X<-NULL
diag(netemd)<-0
library(Matrix)
netemd <- forceSymmetric(netemd)
netemd[lower.tri(netemd)]<-netemd[upper.tri(netemd)]
netemd<-as.matrix(netemd)
library(vegan)
require(vegan)
pco.result <- wcmdscale(netemd, eig = TRUE, add="lingoes", k = 10)
source("kmeanspp.R")
somePDFPath = "clustering_eval.pdf"
pdf(file=somePDFPath,width=8,height=4,paper='special')
criteria <- CHCriterion(data = pco.result$points, kmax = 50, clustermethod = "hclust", linkmethod = 'single')
criteria$plot
criteria <- CHCriterion(data = pco.result$points, kmax = 50, clustermethod = "hclust", linkmethod = 'complete')
criteria$plot
criteria <- CHCriterion(data = pco.result$points, kmax = 50, clustermethod = "hclust", linkmethod = 'average')
criteria$plot
criteria <- CHCriterion(data = pco.result$points, kmax = 50, clustermethod = "kmeanspp", linkmethod = 'average')
criteria$plot
criteria <- CHCriterion(data = pco.result$points, kmax = 50, clustermethod = "pam", linkmethod = 'average')
criteria$plot
dev.off()
|
689f56816820413da601be00e5f035f37cff477f
|
6a11d7abcaf6fded7f7d0b423be0dd9aa12bbc99
|
/man/validate.Rd
|
b3dd741f8230dda14de070c62af263455a838fbc
|
[] |
no_license
|
cran/spacesRGB
|
e312a05e70e1b760ec8619113015f9e6ed223f31
|
93c0fd64db8a873d3b7da9f0eb15f2dfde3a86be
|
refs/heads/master
| 2021-12-09T08:39:59.256040
| 2021-12-06T04:30:02
| 2021-12-06T04:30:02
| 135,997,194
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,847
|
rd
|
validate.Rd
|
% File validate.Rd
\name{validate}
\title{Validate a TransferFunction by applying some simple Tests}
\alias{validate}
\alias{validate.TransferFunction}
\description{
Each \code{TransferFunction} object is actually a list of so-called \emph{elementary} transfer functions;
for details on this see \code{\link{composition}()}.
This \code{validate()} applies an internal \code{validate()} function to each elementary function
individually.
The internal \code{validate()} function generates some points in the domain of the function
and checks that all points are transfered into the range of the function.
If the function is also invertible, it checks that the inverse transfers back to the original point.
}
\usage{
\S3method{validate}{TransferFunction}( TF, points=1300, tol=5.e-7, domain=NULL )
}
\arguments{
\item{TF}{a \code{TransferFunction} object with dimension N, and consisting of M elementary transfer functions}
\item{points}{the number of points to test, in each elementary function}
\item{tol}{the numerical tolerance for the inversion test - this is relative to the length of the corresponding side of the domain box}
\item{domain}{a 2xN matrix to use as an alternate domain, for the first elementary function in the list only.
\code{domain} can also be a vector of length 2, which is then replicated to a 2xN matrix.
}
}
\value{
The function returns a logical vector of length M.
The value of the i'th element is the validation status of the i'th elementary function.
The returned vector has the attribute \code{'message'} which is a list of length M
with explanatory text.
For nicely formatted text see \code{\link{print}()}.
}
\seealso{
\code{\link{TransferFunction}},
\code{\link{identity.TF}},
\code{\link{composition}()},
\code{\link{print.TransferFunction}()}
}
|
c09e9435f55123b1fc360c65e23a52802160e9e5
|
80d82a839691562bc2f76903c1435d48ad137e2b
|
/run_analysis.R
|
4d37b41825a9598adb4157015f050d4578545510
|
[] |
no_license
|
grimli/gettingCleaningProject
|
8b64e9e00ee94401842aeea1b1380f36b93e3858
|
f1f9a0ee387bb44fb109c1ce68ce9781940b1631
|
refs/heads/master
| 2020-03-30T14:06:52.011463
| 2015-05-19T20:20:21
| 2015-05-19T20:20:21
| 35,772,426
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,333
|
r
|
run_analysis.R
|
#this scripts expect the data uncompressed in a directory named "UCI HAR Dataset" under the working directory
#as first step load plyr library to be used later
library(plyr)
# Read all the required data into data frame from the files
# the data frame will be named as the files
# general interst data
activity_labels <- read.table(file = "UCI HAR Dataset/activity_labels.txt")
features <- read.table(file = "UCI HAR Dataset/features.txt")
#test values
X_test <- read.table(file = "UCI HAR Dataset/test/X_test.txt")
y_test <- read.table(file = "UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table(file = "UCI HAR Dataset/test/subject_test.txt")
#train values
X_train <- read.table(file = "UCI HAR Dataset/train/X_train.txt")
y_train <- read.table(file = "UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table(file = "UCI HAR Dataset/train/subject_train.txt")
# prepare general data frame
# define names for activity table
colnames(activity_labels) <- c("ID_activity", "activity")
# the project requires to select some columns, to do this i define the list of column as a vector. This list contains all the useful columns: the required variables and columns required to identify the measuraments
relevant_variables <- c("subject", "ID_activity","tBodyAcc-mean()-X" , "tBodyAcc-mean()-Y" , "tBodyAcc-mean()-Z" , "tBodyAcc-std()-X" , "tBodyAcc-std()-Y" , "tBodyAcc-std()-Z" , "tGravityAcc-mean()-X" , "tGravityAcc-mean()-Y" , "tGravityAcc-mean()-Z" , "tGravityAcc-std()-X" , "tGravityAcc-std()-Y" , "tGravityAcc-std()-Z" , "tBodyAccJerk-mean()-X" , "tBodyAccJerk-mean()-Y" , "tBodyAccJerk-mean()-Z" , "tBodyAccJerk-std()-X" , "tBodyAccJerk-std()-Y" , "tBodyAccJerk-std()-Z" , "tBodyGyro-mean()-X" , "tBodyGyro-mean()-Y" , "tBodyGyro-mean()-Z" , "tBodyGyro-std()-X" , "tBodyGyro-std()-Y" , "tBodyGyro-std()-Z" , "tBodyGyroJerk-mean()-X" , "tBodyGyroJerk-mean()-Y" , "tBodyGyroJerk-mean()-Z" , "tBodyGyroJerk-std()-X" , "tBodyGyroJerk-std()-Y" , "tBodyGyroJerk-std()-Z" , "tBodyAccMag-mean()" , "tBodyAccMag-std()" , "tGravityAccMag-mean()" , "tGravityAccMag-std()" , "tBodyAccJerkMag-mean()" , "tBodyAccJerkMag-std()" , "tBodyGyroMag-mean()" , "tBodyGyroMag-std()" , "tBodyGyroJerkMag-mean()" , "tBodyGyroJerkMag-std()" , "fBodyAcc-mean()-X" , "fBodyAcc-mean()-Y" , "fBodyAcc-mean()-Z" , "fBodyAcc-std()-X" , "fBodyAcc-std()-Y" , "fBodyAcc-std()-Z" , "fBodyAccJerk-mean()-X" , "fBodyAccJerk-mean()-Y" , "fBodyAccJerk-mean()-Z" , "fBodyAccJerk-std()-X" , "fBodyAccJerk-std()-Y" , "fBodyAccJerk-std()-Z" , "fBodyGyro-mean()-X" , "fBodyGyro-mean()-Y" , "fBodyGyro-mean()-Z" , "fBodyGyro-std()-X" , "fBodyGyro-std()-Y" , "fBodyGyro-std()-Z" , "fBodyAccMag-mean()" , "fBodyAccMag-std()" , "fBodyBodyAccJerkMag-mean()" , "fBodyBodyAccJerkMag-std()" , "fBodyBodyGyroMag-mean()" , "fBodyBodyGyroMag-std()" , "fBodyBodyGyroJerkMag-mean()" , "fBodyBodyGyroJerkMag-std()")
# merge tables for test data
test_beta <- cbind(subject_test, y_test, X_test )
# assign proper names to the columns: except for the first two columns i'm going to get the names from the features table.
colnames(test_beta) <- c("subject","ID_activity",as.character(features[,2]))
# merge activity names
# I have postponed this step because of the ordering produced by the merge function
test <- merge(activity_labels, test_beta[relevant_variables], by = "ID_activity")
# merge files for train data
train_beta <- cbind(subject_train, y_train, X_train )
# assign proper names to the columns: except for the first two columns i'm going to get the names from the features table.
colnames(train_beta) <- c("subject","ID_activity",as.character(features[,2]))
# merge activity names
# I have postponed this step because of the ordering produced by the merge function
train <- merge(activity_labels, train_beta[relevant_variables], by = "ID_activity")
#merge test and train in one dataset
dataset_beta <- rbind(test,train)
# remove the activity ID: not required
dataset <- dataset_beta[2:68]
# clean up intermediate steps
# not required but i don't like to leave all these unused variables
rm("dataset_beta","X_test","X_train","features","subject_test","subject_train","test","test_beta","y_test","y_train","train","train_beta","activity_labels")
# Summarize data on activity and subject
dataset_2 <- ddply(dataset,c("activity","subject"),colwise(mean))
|
99e568e0cc9196b83fa9b6abefe2f19b991760b7
|
d4993f92e71c70ca13cde160565fbdbb165251a7
|
/example_model.r
|
190fe11f3c6fbb15ba8867e643a15628509c30ba
|
[] |
no_license
|
Frjo0083/example-scripts
|
3604dcac5f5e803ff0614b389b2daaca326aa080
|
233ca3582685342c2ab9f3d736ae80327ed87d37
|
refs/heads/master
| 2022-12-02T04:35:49.326605
| 2020-08-17T17:42:30
| 2020-08-17T17:42:30
| 288,244,426
| 0
| 0
| null | 2020-08-17T17:32:58
| 2020-08-17T17:32:58
| null |
UTF-8
|
R
| false
| false
| 12,547
|
r
|
example_model.r
|
#Tutorial and Tips for Numerai
#***
#The stock market is the hardest data science problem in the world.
#If it seems easy, you're doing it wrong.
#We don't know how to solve this problem. But here is what we know.
#We need your help. You take it from here.
#***
library(magrittr)
library(tidyverse)
library(ranger)
library(gbm)
set.seed(1337)
#function to calculate them correlation between your model's predictions and the target across the validation eras
corrfun <- function(df) {
correlation_data <- df %>%
group_by(era) %>%
summarise(r_model = cor(target_kazutsugi, predictions, method = "spearman")) %>%
ungroup()
}
print(
"This script uses down sampling and random forest and will likely underperform the example_predictions.csv"
)
#the training data is used to train your model how to predict the targets
training_data <- read_csv("numerai_training_data.csv")
#the tournament data is the data that Numerai uses to evaluate your model
tournament_data <- read_csv("numerai_tournament_data.csv")
#the tournament data contains validation data, test data and live data
#validation is a hold out set out of sample and further in time (higher eras) than the training data
#let's separate the validation data from the tournament data
validation <- tournament_data %>%
filter(data_type == "validation")
#we will not be using id and data_type when training the model so these columns should be removed from the training data
training_data %<>%
select(-id,-data_type)
#the training data is large so we reduce it by 10x in order to speed up this script
#note that reducing the training data like this is not recommended for a production ready model
nrow(training_data)
#remove the following lines to skip the down sampling. Note: training will take much longer to run
training_data_downsampled <- training_data %>%
sample_frac(0.1)
#there are a large number of features
training_data_downsampled %>%
select(-era, -target_kazutsugi) %>%
colnames() %>%
length()
#features are a number of feature groups of different sizes (intelligence, charisma, strength, dexterity, constitution, wisdom)
#Numerai does not disclose what the features or feature groups mean; they are abstract and obfuscated
#however, features within feature groups tend to be related in some way
#models which have large feature importance in one group tend to do badly for long stretches of time (eras)
training_data_downsampled %>%
colnames() %>%
head(20)
#the target variable has 5 ordinal outcomes (0, 0.25, 0.5, 0.75, 1)
#we train a model on the features to predict this target
summary(training_data_downsampled$target_kazutsugi)
#on this dataset, feature importance analysis is very important
#we build a random forest to understand which features tend to improve the model out of bag
#because stocks within eras are not independent, we use small bag sizes (sample size = 10%) so the out of bag estimate is meaningful
#we use a small subset of features for each tree to build a more feature balanced forest (mtry = floor(num_features))
#we use all features except for era
forest <-
ranger(
target_kazutsugi ~ .,
data = training_data_downsampled[, -1],
num.trees = 500,
max.depth = 5,
sample.fraction = 0.1,
importance = "impurity"
)
feature_importance <- as.data.frame(importance(forest))
colnames(feature_importance) <- "feature_importance"
#a good model might drop the bad features according to the forest before training a final model
#if a feature group or feature is too good, it might also be a good idea to drop it to improve the feature balance and improve consistency of the model
#let us display the, according to our model, 20 most important features on this subset of the training data
feature_importance %>%
arrange(-feature_importance) %>%
head(20)
#based on the forest we can generate predictions on the validation set
#the validation set contains eras further in time than the training set
#the validation set is not "representative of the live data"; no one knows how the live data will behave
#usually train performance < validation performance < live performance because live data is many eras into the future from the training data
predictions <- predict(forest, validation)
head(predictions$predictions)
#Numerai measures performance based on rank correlation between your predictions and the true targets
cor(validation$target_kazutsugi,
predictions$predictions,
method = "spearman")
#a correlation of 0.017 might be on the lower end, this model could perhaps use some extra tuning or more training data
validation$predictions <- predictions$predictions
#let us look at the era to era variation when it comes to correlation
correlations <- corrfun(validation)
correlations %>% print(n = Inf)
#era 199 seems difficult, our model has a correlation of -0.0384 on that particular era
#successful models typically have few eras with large negative correlations
#consistency is the fraction of months where the model achieves better than zero correlation with the targets
sum(ifelse(correlations$r_model > 0, 1, 0)) / nrow(correlations)
#the Sharpe ratio is defined as the average era correlation divided by their standard deviation
mean(correlations$r_model) / sd(correlations$r_model)
#we can try a GBM model; we also choose a low bag fraction of 10% as a strategy to deal with within-era non-independence (which is a property of this data)
#(if you take a sample from one era and a different sample from the same era that sample is not really out of sample because the observations occured in the same era)
#having small bags also improves the out of bag estimate for the optimal number of trees
model <-
gbm(
target_kazutsugi ~ .,
data = training_data_downsampled[,-1],
n.trees = 50,
shrinkage = 0.01,
interaction.depth = 5,
train.fraction = 1,
bag.fraction = 0.1,
verbose = T
)
#looking at the relative importance of the features we can see the model relies more on some features than others
head(summary(model))
best.iter <- gbm.perf(model, method = "OOB")
best.iter
predictions <-
predict.gbm(model, validation, n.trees = best.iter, type = "response")
head(predictions)
cor(validation$target_kazutsugi, predictions, method = "spearman")
validation$predictions <- predictions
#let us look at the era to era variation when it comes to correlation for our GBM model
correlations <- corrfun(validation)
correlations %>% print(n = Inf)
#let us calculate the consistency
sum(ifelse(correlations$r_model > 0, 1, 0)) / nrow(correlations)
#and the Sharpe ratio
mean(correlations$r_model) / sd(correlations$r_model)
#the GBM model and random forest model have the same consistency (number of eras where correlation is positive) even though correlations are different
#improving consistency can be more important than improving standard machine learning metrics like RMSE
#good models might train in such a way as to minimize the error across eras (improve consistency) not just reduce the error on each training example
#so far we have ignored eras for training
#eras are in order of time; for example, era4 is before era5
#the reason to use eras is that cross validation on rows will tend to overfit (rows within eras are not independent)
#so it's much better to cross validate within eras for example: take a subset of eras, build a model and test on the out of sample subset of eras
#this will give a better estimate of how well your model will generalize
#the validation set is not special; it is just an out of sample set of eras greater than the training set
#some users might choose to train on the validation set as well
#to give you a sense of how to use eras we train a model on the first half of the eras and test it on the second half
training_data$era <- as.integer(as.factor(training_data$era))
first_half <- training_data %>%
filter(era <= 60)
second_half <- training_data %>%
filter(era > 60)
#we remove id, era, data_type column and train a GBM model on the first half of the data
model_first_half <-
gbm(
target_kazutsugi ~ .,
data = first_half[, -1],
n.trees = 50,
shrinkage = 0.01,
interaction.depth = 5,
train.fraction = 1,
bag.fraction = 0.1,
verbose = T
)
best.iter1 <- gbm.perf(model_first_half, method = "OOB")
best.iter1
predictions_second_half <-
predict.gbm(model_first_half,
as.data.frame(second_half),
n.trees = best.iter1,
type = "response")
#our correlation score is good; what we appeared to learn generalizes well on the second half of the eras
cor(second_half$target_kazutsugi,
predictions_second_half,
method = "spearman")
second_half$predictions <- predictions_second_half
#let us investigate the correlations on the second half of the data
correlation_data_second_half <- second_half %>%
group_by(era) %>%
summarise(r_model = cor(target_kazutsugi, predictions, method = "spearman")) %>%
ungroup()
second_half <- second_half %>% select(-predictions)
correlation_data_second_half %>% print(n = Inf)
#let us calculate the consistency on the second half of the data
sum(ifelse(correlation_data_second_half$r_model > 0, 1, 0)) / nrow(correlation_data_second_half)
#and the Sharpe ratio
mean(correlation_data_second_half$r_model) / sd(correlation_data_second_half$r_model)
#but now we try build a model on the second half of the eras and predict on the first half
model_second_half <-
gbm(
target_kazutsugi ~ .,
data = second_half[, -1],
n.trees = 50,
shrinkage = 0.01,
interaction.depth = 5,
train.fraction = 1,
bag.fraction = 0.1,
verbose = T
)
best.iter2 <- gbm.perf(model_second_half, method = "OOB")
best.iter
predictions_first_half <-
predict.gbm(
model_second_half,
as.data.frame(first_half[, -1]),
n.trees = best.iter,
type = "response"
)
#let us calculate the correlation
cor(first_half$target_kazutsugi, predictions_first_half,
method = "spearman")
first_half$predictions <- predictions_first_half
#let us investigate the correlations on the second half of the data
correlation_data_first_half <- first_half %>%
group_by(era) %>%
summarise(r_model = cor(target_kazutsugi, predictions, method = "spearman")) %>%
ungroup()
correlation_data_first_half %>% print(n = Inf)
#let us calculate the consistency on the second half of the data
sum(ifelse(correlation_data_first_half$r_model > 0, 1, 0)) / nrow(correlation_data_first_half)
#and the Sharpe ratio
mean(correlation_data_first_half$r_model) / sd(correlation_data_first_half$r_model)
#our correlation score is surprisingly low and surprisingly different from when we trained on the first half
#this means our model is not very consistent, and it's possible that we will see unexpectedly low performance on the test set or live set
#it also shows that our validation performance is likely to greatly overestimate our performance--this era-wise cross validation is more valuable
#a model whose performance when training on the first half is the same as training on the second half would likely be more consistent
#and would likely perform better on the tournament data and the live data
#remove unused data and models
rm(
feature_importance,
model,
best.iter,
best.iter1,
best.iter2,
model_first_half,
model_second_half,
first_half,
second_half,
training_data,
validation,
training_data_downsampled,
predictions_first_half,
predictions_second_half,
correlation_data_first_half,
correlation_data_second_half,
correlations,
predictions,
prediction_kazutsugi,
)
#Numerai only pays models with correlations that beat the benchmark on the live portion of the tournament data
#to submit predictions from your model to Numerai, predict on the entire tournament data
#we choose use our original forest model for our final submission
prediction_kazutsugi <- predict(forest, tournament_data)
tournament_data$prediction_kazutsugi <-
prediction_kazutsugi$predictions
#create your submission
submission <- tournament_data %>% select(id, prediction_kazutsugi)
head(submission)
#save your submission and now upload it to https://numer.ai
write_csv(submission, path = "kazutsugi_submission.csv")
|
06220cf4884bac0ee33bcc1fa5a321edf4d1dc2f
|
c32a17abe0b8838ce0625b1272f840f0d220b9e1
|
/cachematrix.R
|
122b51d82e7f02ca6b6e209380c1376b0a2414fe
|
[] |
no_license
|
sokhals/ProgrammingAssignment2
|
9630fc93851ffe5c6dd295f4aa592b804aa1bd83
|
664f6081aabfd9abbba5691e946441d2170c044d
|
refs/heads/master
| 2021-01-18T09:39:48.136129
| 2014-12-20T06:30:42
| 2014-12-20T06:30:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,756
|
r
|
cachematrix.R
|
## This program set the value of the given matrix and find the
## inverse of the matrix. If inverse already exists then it returns
## the inverse else it returns the inverse for the given matrix.
##GIVEN: The matrix
##FUNCTION: It creates a list that has function which serves following
## purposes.
## 1. It sets the value of matrix using "setMatrix" function
## 2. It gets the matrix using "getMatrix" function which will
## be used by cacheSolve function to calulate Inverse of
## given Matrix.
## 3. It stores the inverse of Matrix computed by cacheSolve
## function into the list using "setInverse" function.
## 4. It gets the inverse of already computed matrixes which
## will be used by cacheSolve function in order to check; if
## inverse already exists; return the same inverse else compute
## the inverse for new matrix.
makeCacheMatrix <- function(x = matrix()) {
## Sets initial inverse to null and "setMatrix" function sets
## the new matrix to the list.
inverse <- NULL
setMatrix <- function(y) {
x <<- y
inverse <<- NULL
}
getMatrix <- function() x
## sets the inverse into list for further caching.
setInverse <- function(solve) inverse <<- solve
## It retrieves the inverse form the list.
getInverse <- function() inverse
## List containing all the information from which the inverse, matrix
## will be extracted and the new inverse and new matrix will be set into.
list(setMatrix = setMatrix, getMatrix = getMatrix,
setInverse = setInverse, getInverse = getInverse)
}
##FUNCTION: This function serves two purposes:
## 1. It checks for the inverse in the list; If inverse exist it
## the same inverse that is the cached result
## 2. If inverse for the given matrix comes out to be null; then it
## returns the computed inverse for it and set it into the list for
## further need.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## The inverse from the list for the given matrix is retrieved.
inverse <- x$getInverse()
## It checks if the inverse is not null; that is it has been already
## computed, the previous stored inverse is returned.
if(!is.null(inverse)) {
message("Cached inverse of matrix is returned")
return(inverse)
}
## If the inverse comes out to be null; it gets the matrix form the list.
matrix <- x$getMatrix()
## It computes the inverse for the matrix.
inverse <- solve(matrix, ...)
## It sets the inverse for the matrix into the list
x$setInverse(inverse)
## It returns the computed inverse to be printed on console.
inverse
}
|
2cf286a720999f99f14a8129d41f0ec63aca041f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rbhl/examples/bhl_namecount.Rd.R
|
8199222f4581183644681b44167028b4f984671d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 332
|
r
|
bhl_namecount.Rd.R
|
library(rbhl)
### Name: bhl_namecount
### Title: Return the number of unique names found on pages in BHL.
### Aliases: bhl_namecount
### ** Examples
## Not run:
##D bhl_namecount(startdate = '12/25/2009', enddate = '12/27/2009')
##D bhl_namecount(startdate = '10/15/2009', enddate = '10/17/2009', as='json')
## End(Not run)
|
4560b39377f5302147371dae4946917038a637f4
|
c527b9fd330ad3284230bfdbcd791acabd0d618e
|
/predictZOO.R
|
6373e2b442f29b7fcae6ad044209399f649a7d8a
|
[] |
no_license
|
RMORSEcode/NEhabitat
|
c1aa66d7e37899d080fa58d775b56e3e764bb1f1
|
b7622199b77d4fbc86ee14a21ba691a1db5edd66
|
refs/heads/master
| 2022-10-14T12:50:39.882458
| 2022-09-21T15:00:02
| 2022-09-21T15:00:02
| 206,566,972
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,984
|
r
|
predictZOO.R
|
# Choose
slctseason="FALL"; fishseas="Fall"; SEASON='Fall'
slctseason="SPRING"; fishseas="Spr"; SEASON='Spr'
# Testing GAMs on zooplankton abundance
zoo=FData.abn %>% dplyr::select(YEAR, SEASON, LAT:BOTSALIN, calfin_100m3:rug)
zoo$MONTH=month(FData.abn$EST_TOWDATE)
zoo=zoo[complete.cases(zoo),]
zoo2=zoo[which(zoo$SEASON==slctseason),] # subset to season
logd=zoo2[,10:19]
zoopa=ifelse(logd>0, 1, 0) # presence absence
logd10=log10(logd+1)
zoo2[,10:19]=logd10 # use in biomass (subset to only present)
zoo3=zoo2
zoo3[,10:19]=zoopa # use in PA only
set.seed(101) # Set Seed so that same sample can be reproduced in future also
# # Now Selecting 75% of data as sample from total 'n' rows of the data
## presence absence only model
sample <- sample.int(n = nrow(zoo3), size = floor(.75*nrow(zoo3)), replace = F)
trainzoo <- zoo3[sample, ]
testzoo <- zoo3[-sample, ]
zoonm='calfin' #'pseudocal' #'calfin'
## positive biomass model - select zoo taxa to use for model and subset to positive biomass only
zooposbio=zoo2[which(zoo3$calfin_100m3>0),]
sample <- sample.int(n = nrow(zooposbio), size = floor(.75*nrow(zooposbio)), replace = F)
trainzoobio <- zooposbio[sample, ]
testzoobio <- zooposbio[-sample, ]
# + s(grnszmm, k=30, bs='tp') + s(rug, k=30, bs='tp')
zoo_modG_pa = gam(calfin_100m3 ~ s(BOTTEMP, k=35, bs="tp") +s(SURFTEMP, k=35, bs='tp') + s(DEPTH, k=30, bs="tp") + s(BOTSALIN, k=30, bs='tp') + s(SURFSALIN, k=30, bs="tp"), data=trainzoo, method = "REML", family="binomial", select=T)
zoo_modG_pb = gam(calfin_100m3 ~ s(BOTTEMP, k=35, bs="tp") +s(SURFTEMP, k=35, bs='tp') + s(DEPTH, k=30, bs="tp") + s(BOTSALIN, k=30, bs='tp') + s(SURFSALIN, k=30, bs="tp"), data=trainzoobio, method = "REML", family="gaussian", select=T)
save(zoo_modG_pa, file=paste('/home/ryan/Git/NEhabitat/rasters/',fishseas,'/zoop/zoo_modG_pa_',fishseas,'_',zoonm,'.Rdata',sep=''))
save(zoo_modG_pb, file=paste('/home/ryan/Git/NEhabitat/rasters/',fishseas,'/zoop/zoo_modG_pb_',fishseas,'_',zoonm,'.Rdata',sep=''))
# ______
zoo_modG4_pa = gam(calfin_100m3 ~ s(LON, LAT, bs='tp') + s(BOTTEMP, k=35, bs="tp") +s(SURFTEMP, k=35, bs='tp') + s(DEPTH, k=30, bs="tp") + s(BOTSALIN, k=30, bs='tp') + s(SURFSALIN, k=30, bs="tp"), data=trainzoo, method = "REML", family="binomial", select = T)
zoo_modG4_pb = gam(calfin_100m3 ~ s(LON, LAT, bs='tp') + s(BOTTEMP, k=35, bs="tp") +s(SURFTEMP, k=35, bs='tp') + s(DEPTH, k=30, bs="tp") + s(BOTSALIN, k=30, bs='tp') + s(SURFSALIN, k=30, bs="tp"), data=trainzoobio, method = "REML", family="gaussian", select=T)
save(zoo_modG4_pa, file=paste('/home/ryan/Git/NEhabitat/rasters/',fishseas,'/zoop/zoo_modG4_pa_',fishseas,'_',zoonm,'.Rdata',sep=''))
save(zoo_modG4_pb, file=paste('/home/ryan/Git/NEhabitat/rasters/',fishseas,'/zoop/zoo_modG4_pb_',fishseas,'_',zoonm,'.Rdata',sep=''))
# ______
# run soap film smoother below with Lat Lon first
trainzoopts=SpatialPoints(cbind(trainzoo$LON, trainzoo$LAT),proj4string=CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"))
trainzoo.in=sp::over(trainzoopts, sps)
trainzoo.sub=trainzoo
trainzoo.sub=trainzoo.sub[complete.cases(trainzoo.in),]
trainzoobiopts=SpatialPoints(cbind(trainzoobio$LON, trainzoobio$LAT),proj4string=CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"))
trainzoo.inbio=sp::over(trainzoobiopts, sps)
trainzoo.subbio=trainzoobio
trainzoo.subbio=trainzoo.subbio[complete.cases(trainzoo.inbio),]
zoo_modG4bll_pa = gam(calfin_100m3 ~ s(LON, LAT, bs = "so", xt = list(bnd = boundll)) + s(BOTTEMP, k=20, bs="tp") +s(SURFTEMP, k=20, bs='tp') + s(DEPTH, k=10, bs="tp") + s(BOTSALIN, k=30, bs='tp') + s(SURFSALIN, k=30, bs="tp"), data=trainzoo.sub, method = "REML", knots=knotsll, family="binomial", select=T)
zoo_modG4bll_pb = gam(calfin_100m3 ~ s(LON, LAT, bs = "so", xt = list(bnd = boundll)) + s(BOTTEMP, k=20, bs="tp") +s(SURFTEMP, k=20, bs='tp') + s(DEPTH, k=10, bs="tp") + s(BOTSALIN, k=30, bs='tp') + s(SURFSALIN, k=30, bs="tp"), data=trainzoo.subbio, method = "REML", knots=knotsll, family="gaussian", select=T)
save(zoo_modG4bll_pa, file=paste('/home/ryan/Git/NEhabitat/rasters/',fishseas,'/zoop/zoo_modG4bll_pa_',fishseas,'_',zoonm,'.Rdata',sep=''))
save(zoo_modG4bll_pb, file=paste('/home/ryan/Git/NEhabitat/rasters/',fishseas,'/zoop/zoo_modG4bll_pb_',fishseas,'_',zoonm,'.Rdata',sep=''))
# draw(zoo_modG)
# gam.check(zoo_modG)
# plot(fish_modG, shade=T, rug=T, scale=0)
# AIC(fish_modG)
path1=paste('/home/ryan/Git/NEhabitat/rasters/',fishseas,'/zoop/',zoonm,'/',sep='')
#### VERIFY MODEL SPECIES AND SEASON ####
fishseas
zoonm
trainzoo$SEASON[1] #verify season
modlistpa=list.files(path1, pattern = '_pa_') # presence-absence models
modlistpb=list.files(path1, pattern = '_pb_') # positive biomass models
### verify model as seperate files
pdf(paste(path1, 'PAmodels_AUC.pdf', sep=''), height=4, width=6)
for (i in (1:length(modlistpa))){
modchoice=i
modlistpa[modchoice]
usemodel=loadRData(paste(path1,modlistpa[modchoice], sep=''))
pred.test=predict(usemodel,testPA,type='response')
preds.obs=data.frame(pred.test=pred.test,testPA$pa)
colnames(preds.obs)=c("predicted","observed")
preds.obs2=preds.obs2[complete.cases(preds.obs$predicted),]
plotROC(preds.obs2$observed,preds.obs2$predicted, colorize = TRUE, main=paste('All Stages ', modlistpa[i], sep=''))
}
dev.off()
pred.test=predict(usemodel,testzoo,type='response')
preds.obs=data.frame(pred.test=pred.test,testzoo$calfin_100m3)
colnames(preds.obs)=c("predicted","observed")
plotROC(preds.obs2$observed,preds.obs2$predicted, colorize = TRUE, main=paste('All Stages ', modlistpa[i], sep=''))
## list data files in each folder
btlist=list.files(paste('/home/ryan/Git/NEhabitat/rasters/', SEASON,'/BT2', sep=''))
stlist=list.files(paste('/home/ryan/Git/NEhabitat/rasters/', SEASON,'/ST2', sep=''))
zlist=list.files(paste('/home/ryan/Git/NEhabitat/rasters/', SEASON,'/pseudo', sep=''))
sslist=list.files(paste('/home/ryan/Git/NEhabitat/rasters/', SEASON,'/SS2', sep=''))
bslist=list.files(paste('/home/ryan/Git/NEhabitat/rasters/', SEASON,'/BS2', sep=''))
## parse year from filenames e.g #"RAST_NESREG_1977.04.03.BT.TEMP.YEAR.000066596.RData"
tb=strsplit(btlist, split=('RAST_NESREG_'))
ttb=sapply(tb, function(x) strsplit(x, "[.]")[[2]][1], USE.NAMES=FALSE)
ttb2=as.numeric(ttb)
#Surface temp
ts=strsplit(stlist, split=('RAST_NESREG_'))
tts=sapply(ts, function(x) strsplit(x, "[.]")[[2]][1], USE.NAMES=FALSE)
tts2=as.numeric(tts)
# Zooplankton (pseudocal)
tz=strsplit(zlist, split=('RAST_NESREG_'))
ttz=sapply(tz, function(x) strsplit(x, "[.]")[[2]][1], USE.NAMES=FALSE)
ttz2=as.numeric(ttz)
#Surface salinity
tss=strsplit(sslist, split=('RAST_NESREG_'))
ttss=sapply(tss, function(x) strsplit(x, "[.]")[[2]][1], USE.NAMES=FALSE)
ttss2=as.numeric(ttss)
#Bottom salinity
tbs=strsplit(bslist, split=('RAST_NESREG_'))
ttbs=sapply(tbs, function(x) strsplit(x, "[.]")[[2]][1], USE.NAMES=FALSE)
ttbs2=as.numeric(ttbs)
## DO for zooplankton species to model abundance
usemodelpa=zoo_modG_pa #loadRData(paste(path1,modlist[modchoice], sep='')) #fish_modS #_spr_had
usemodelbio=zoo_modG_pb
zooyrlist=seq(from=1992, to=2019, by=1)
fishnm='zoop'
zoosp='pseudocal' #'calfin' #'pseudocal'
rm(bt)
wd2=paste('/home/ryan/Git/NEhabitat/rasters/', SEASON,'/', fishnm, '/', zoonm, '/', sep='')
### NOW loop over files, load yearly dynamic raster files and predict habitat from HGAM models
for (i in 1:length(zooyrlist)){
bi=which(zooyrlist[i]==ttb2) # index of year
bt=loadRData(paste('/home/ryan/Git/NEhabitat/rasters/', SEASON,'/BT2/', btlist[[bi]], sep=''))
bi=which(zooyrlist[i]==tts2) # index of year
st=loadRData(paste('/home/ryan/Git/NEhabitat/rasters/', SEASON,'/ST2/', stlist[[bi]], sep=''))
bi=which(zooyrlist[i]==ttss2) # index of year
ss=loadRData(paste('/home/ryan/Git/NEhabitat/rasters/', SEASON,'/SS2/', sslist[[bi]], sep=''))
bi=which(zooyrlist[i]==ttbs2) # index of year
bs=loadRData(paste('/home/ryan/Git/NEhabitat/rasters/', SEASON,'/BS2/', bslist[[bi]], sep=''))
ef <- data.frame(coordinates(bt), val=values(bt))
colnames(ef)=c("LON", "LAT", "BOTTEMP")
ef$SURFTEMP=values(st)
ef$DEPTH=values(gd4)
ef$SURFSALIN=values(ss)
ef$BOTSALIN=values(bs)
ef$grnszmm=values(phi2)
ef$rug=values(rug2)
ef2=ef[complete.cases(ef),]
# tt=ef2$Stg
# tt2=fl[[1]][[1]][2][[1]][tt] # subsets to stage
# ef$Stg=factor(ef$Stg, levels=c("Adt", "Juv", "ich"))
test1 <- predict.gam(usemodelpa, ef2, type='response')
test2 <- predict.gam(usemodelbio, ef2, type='response')
ef2$predpa=test1
ef2$predbio=test2
ef2$combinedout=test1*test2
wd3=paste(zooyrlist[i], '_', SEASON, '_', zoosp, '_', '.RData', sep="")
save(ef2, file=paste(wd2, wd3, sep=""))
spg1=ef2[,c('LON', 'LAT', 'combinedout')]
wd4=paste(zooyrlist[i], '_', 'RASTER', '_', SEASON, '_', zoosp, '_', '.RData', sep="")
# tes1=rasterFromXYZ(spg1[complete.cases(spg1$Stg),])
# save(tes1, file=paste(wd2,wd4, sep=''))
coordinates(spg1)= ~ LON + LAT
gridded(spg1)=T
rastDF=raster(spg1)
if (i == 1){
keepstack=rastDF
}
# plot(rastDF)
# save(rastDF, file=paste(wd2,wd4, sep=''))
if (i >1){
keepstack=stack(keepstack, rastDF)
}
save(keepstack, file=paste(wd2,'stacked_', SEASON, '_', zoosp, '_', '.RData', sep=""))
}
### save raster layers to new file:
t1=subset(keepstack, 26)
save(t1, file='/home/ryan/Git/NEhabitat/rasters/Fall/pseudo/RAST_NESREG_2017.04.01.07.TEMP.YEAR.000066596.RData')
t1=subset(keepstack, 27)
save(t1, file='/home/ryan/Git/NEhabitat/rasters/Fall/pseudo/RAST_NESREG_2018.04.01.07.TEMP.YEAR.000066596.RData')
t1=subset(keepstack, 28)
save(t1, file='/home/ryan/Git/NEhabitat/rasters/Fall/pseudo/RAST_NESREG_2019.04.01.07.TEMP.YEAR.000066596.RData')
### SAVE out model performance data to CSV file - deviance explained, AIC
modeval=data.frame(matrix(nrow=length(modlistpa), ncol=9, data=NA))
for (i in 1:length(modlistpa)){
modchoice=i
usemodel=loadRData(paste(path1,modlistpa[modchoice], sep=''))
usemodelbio=loadRData(paste(path1,modlistpb[modchoice], sep=''))
modeval[i,1]=modlistpa[modchoice]
modeval[i,2]=summary(usemodel)$dev.expl
modeval[i,3]=summary(usemodelbio)$dev.expl
modeval[i,4]=usemodel$aic
modeval[i,5]=usemodelbio$aic
modeval[i,6]=sum(usemodel$edf)
modeval[i,7]=sum(usemodelbio$edf)
modeval[i,8]=df.residual(usemodel)
modeval[i,9]=df.residual(usemodelbio)
}
colnames(modeval)=c('model', 'PA.dev.exp','BIO.dev.exp','PA.aic','BIO.aic','PA.edf','BIO.edf','PA.res.df','BIO.res.df')
write.csv(modeval, file=paste(path1,'model_evaluation_', fishseas, '.csv', sep=""), row.names = F)
#### Save model hindcast output trends (mean, trend, variance)
## Load rasters
p1=paste('/home/ryan/Git/NEhabitat/rasters/',fishseas,'/', fishnm, '/', sep='')
p2='fish_modGSe_fall_haddock' #'fish_modG4_spr_Haddock/'
p3=paste('/PA_only_stacked_', SEASON, '_', fishnm, '_', sep='') #'PA_only_stacked_Spr_Haddock_'
p4=paste('/stacked_', SEASON, '_', fishnm, '_', sep='') #'stacked_Spr_Haddock_'
ichpa=loadRData(paste(p1,p2,p3,'ich.RData', sep=''))
juvpa=loadRData(paste(p1,p2,p3,'Juv.RData', sep=''))
adtpa=loadRData(paste(p1,p2,p3,'Adt.RData', sep=''))
ich=loadRData(paste(p1,p2,p4,'ich.RData', sep=''))
juv=loadRData(paste(p1,p2,p4,'Juv.RData', sep=''))
adt=loadRData(paste(p1,p2,p4,'Adt.RData', sep=''))
### Run script and save as PDF
pdf(paste(path1, 'PA_Hindcast_',p2,'_Ich.pdf', sep=''), height=4, width=6)
plotRasterTrends(ichpa)
dev.off()
map("worldHires", xlim=c(-77,-65),ylim=c(35,45), fill=T,border=0,col="gray70")
map.axes(las=1)
points(trainPA$LON[which(trainPA$calfin_100m3>3 & trainPA$calfin_100m3<6)], trainPA$LAT[which(trainPA$calfin_100m3>3 & trainPA$calfin_100m3<6)], col=addTrans('red', 5), pch=19)
points(trainPA$LON[which(trainPA$calfin_100m3>1 & trainPA$calfin_100m3<3)], trainPA$LAT[which(trainPA$calfin_100m3>1 & trainPA$calfin_100m3<3)], col=addTrans('green', 5), pch=19)
points(trainPA$LON[which(trainPA$calfin_100m3<1)], trainPA$LAT[which(trainPA$calfin_100m3<1)], col=addTrans('blue', 5), pch=19)
|
b30fbd3103e6b9c3d528d09a5aac74140f5667ff
|
b5c1b6ddcd5bbee3fa72d31f3bae261cadc3268f
|
/R/Utils.R
|
d26ac9c06de0058d14b321ab74be2d11fc5c0399
|
[] |
no_license
|
APCC21/rAPEX
|
2835ee59d2287efb6b5140858a726365558ee2ae
|
5c771ddbf38233308876b1553c15371f991fd36c
|
refs/heads/master
| 2021-05-13T20:33:08.075946
| 2018-01-10T05:58:09
| 2018-01-10T05:58:09
| 116,914,242
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,153
|
r
|
Utils.R
|
# ctrldir <- NewEnvList$ctrldir
# ctrlfile <- NewEnvList$ctrlfile
# rundir <- NewEnvList$rundir
# soldir <- NewEnvList$soldir
# wnddir <- NewEnvList$wnddir
# wp1dir <- NewEnvList$wp1dir
# dlydir <- NewEnvList$dlydir
# opsdir <- NewEnvList$opsdir
# RunNo <- 1
################################################################
Copy.APEX.Input.From.DBase <- function(ctrldir, ctrlfile, rundir, soldir, wnddir, wp1dir, dlydir, opsdir, RunNo,...){
setwd(rundir)
# Get required information from Control file
ctrldf <- call.control.data (ctrldir, ctrlfile)
SoilID <- ctrldf$SoilID[RunNo]
soildf <- read.table(file.path(rundir, "SOIL.DAT"), header=F)
soilfn <- soildf[which(soildf[1] == SoilID), 2]
# SOIL file
file.copy(file.path(soldir, soilfn), file.path(rundir, soilfn), overwrite = T)
# WND file
file.copy(file.path(wnddir, "1.WND"), file.path(rundir, "1.WND"), overwrite = T)
# WP1 file
WStnID <- ctrldf$WStnID[RunNo]
wp1df <- read.table(file.path(rundir, "WPM1.DAT"), header=F)
wp1fn <- wp1df[which(wp1df[1] == WStnID), 2]
file.copy(file.path(wp1dir, wp1fn), file.path(rundir, wp1fn), overwrite = T)
# DLY file
dlydf <- read.table(file.path(rundir, "WDLST.DAT"), header=F)
dlyfn <- dlydf[which(dlydf[1] == WStnID), 2]
file.copy(file.path(dlydir, dlyfn), file.path(rundir, dlyfn), overwrite = T)
# Ops file
OpsID <- ctrldf$OpsID[RunNo]
opsdf <- read.fwf(file.path(rundir, "OPSC.DAT"), widths=c(5,10,90), fileEncoding="UTF-8")
opsfn <- str_trim(opsdf[which(opsdf[1] == OpsID), 2])
file.copy(file.path(opsdir, opsfn), file.path(rundir, opsfn), overwrite = T)
}
################################################################
Move.APEX.Input.Output.Files <- function(EnvList, RunNo){
rundir <- EnvList$rundir # rundir : folder address where APEX model runs
prjdir <- EnvList$prjdir
# Get required information from Control file
ctrldf <- Call.control.data (EnvList)
PlotNm <- ctrldf$PlotNm[RunNo]
ScnNm <- ctrldf$ScnNm[RunNo]
outdir = paste(prjdir, PlotNm, ScnNm, sep="/")
SetWorkingDir(outdir)
# Output files
flist = list.files(rundir, pattern = glob2rx("ApexOut.*"))
file.rename(from = paste(rundir, flist, sep="/"), to = paste(outdir, flist, sep="/"))
flist = list.files(rundir, pattern = glob2rx("*.OUT"))
file.rename(from = paste(rundir, flist, sep="/"), to = paste(outdir, flist, sep="/"))
# APEXRUN Input file
flist = list.files(rundir, pattern = glob2rx("APEXRUN.DAT"))
file.rename(from = paste(rundir, flist, sep="/"), to = paste(outdir, flist, sep="/"))
# APEXCONT Input file
flist = list.files(rundir, pattern = glob2rx("APEXCONT.DAT"))
file.rename(from = paste(rundir, flist, sep="/"), to = paste(outdir, flist, sep="/"))
# SIT Input file
flist = list.files(rundir, pattern = glob2rx("*.sit"))
file.rename(from = paste(rundir, flist, sep="/"), to = paste(outdir, flist, sep="/"))
# SUB Input file
flist = list.files(rundir, pattern = glob2rx("*.sub"))
file.rename(from = paste(rundir, flist, sep="/"), to = paste(outdir, flist, sep="/"))
# OPS Input file
flist = list.files(rundir, pattern = glob2rx("*.OPS"))
file.rename(from = paste(rundir, flist, sep="/"), to = paste(outdir, flist, sep="/"))
# SOL Input file
flist = list.files(rundir, pattern = glob2rx("*.SOL"))
file.rename(from = paste(rundir, flist, sep="/"), to = paste(outdir, flist, sep="/"))
# WND Input file
flist = list.files(rundir, pattern = glob2rx("*.WND"))
file.rename(from = paste(rundir, flist, sep="/"), to = paste(outdir, flist, sep="/"))
# WP1 Input file
flist = list.files(rundir, pattern = glob2rx("*.WP1"))
file.rename(from = paste(rundir, flist, sep="/"), to = paste(outdir, flist, sep="/"))
# DLY Input file
flist = list.files(rundir, pattern = glob2rx("*.DLY"))
file.rename(from = paste(rundir, flist, sep="/"), to = paste(outdir, flist, sep="/"))
}
################################################################
call.control.data <- function(ctrldir, ctrlfile,...){
ctrldir <- EnvList$ctrldir
ctrlfile <- EnvList$ctrlfile
setwd(ctrldir)
CtrlData <- read.csv(file = ctrlfile)
return(CtrlData)
}
|
02d4f1c6f9c56f8102129c9b17261301d4b4f185
|
2c46f227e81ceec714b4a0170ae39bb0b74bfd5d
|
/R/extra.R
|
e88c125cdbf13514e531c9638877874791d1079d
|
[] |
no_license
|
tengfei/chromatoplots
|
f25d95a521ca09eceb79498588df9b1f93047ae6
|
858cec990aafbf58b8e95bbdc37414a7ac6b833c
|
refs/heads/master
| 2021-01-02T09:33:06.358088
| 2013-07-12T07:03:15
| 2013-07-12T07:03:15
| 926,471
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,691
|
r
|
extra.R
|
## cplotViewBL2 by basic graphci, so could be easy to embed into animation
## cplotViewBL2 <- function(object,protocal,raw,mz,xlim=NULL,ylim=NULL,type='l',subtract=T){
## time <- raw@scantime
## mzmin <- raw@mzrange[1]
## rawint <- raw@env$profile[mz-mzmin+1,]
## corint <- object@env$profile[mz-mzmin+1,]
## if (subtract) {
## res <- corint
## fit <- rawint - corint
## } else {
## res <- rawint - corint
## fit <- corint
## }
## if(is.null(xlim)){xlim=range(time)}
## if(is.null(ylim)){ylim=c(min(res),max(rawint))}
## par(mfrow=c(2,1),mar=c(5,2,1,1))
## plot(x=time,y=rawint,xlim=xlim,ylim=ylim,type=type,pch=20)
## lines(time,fit,col='gray50',lwd=2)
## plot(time,res,ylab='residuals',xlim=xlim,ylim=ylim,type=type,pch=20)
## }
## cplotAniBL <- function(raw,by=10,ylim=c(0,6000),size=c(800,300),mzrange=NULL,outdir=NULL){
## require(animation)
## if(is.null(mzrange)){
## mzrange <- raw@mzrange
## }
## mz <- seq(mzrange[1],mzrange[2],by=by)
## n <- length(mz)
## if(is.null(outdir)){
## ani.options(nmax=n+1,ani.width=size[1],ani.height=size[2])
## }else{
## ani.options(nmax=n+1,ani.width=size[1],ani.height=size[2],outdir=outdir)
## }
## ani.start()
## for(i in mz){
## cplotViewBL(raw,mz=i,ylim=ylim)
## }
## ani.stop()
## }
## cplotAniBL2 <- function(object,raw,by=10,ylim=c(0,6000),size=c(800,300),mzrange=NULL,outdir=NULL){
## require(animation)
## if(is.null(mzrange)){
## mzrange <- raw@mzrange
## }
## mz <- seq(mzrange[1],mzrange[2],by=by)
## n <- length(mz)
## if(is.null(outdir)){
## ani.options(nmax=n+1,ani.width=size[1],ani.height=size[2])
## }else{
## ani.options(nmax=n+1,ani.width=size[1],ani.height=size[2],outdir=outdir)
## }
## ani.start()
## for(i in mz){
## cplotViewBL2(object=object,raw=raw,mz=i,xlim=xlim,ylim=ylim)
## }
## ani.stop()
## }
## #cplotAniBL(raw_prof,ylim=c(0,2000),mzrange=c(50,80),by=1,outdir='~/Desktop/by1_50_70')
## cplotAniPK <- function(object,raw,by=10,size=c(800,300),xlim=NULL,mzrange=NULL,outdir=NULL){
## require(animation)
## if(is.null(xlim)) {xlim <- range(raw@scantime)}
## if(is.null(mzrange)){
## mzrange <- raw@mzrange
## }
## mz <- seq(mzrange[1],mzrange[2],by=by)
## n <- length(mz)
## if(is.null(outdir)){
## ani.options(nmax=n+1,ani.width=size[1],ani.height=size[2])
## }else{
## ani.options(nmax=n+1,ani.width=size[1],ani.height=size[2],outdir=outdir)
## }
## ani.start()
## for(i in mz){
## cplotPeaks2(obj=object,raw=raw,mz=i,rt_range=xlim)
## }
## ani.stop()
## }
#cplotAniPK(peaks,raw1,outdir='~/Desktop/pk/')
|
38056487972abd1744d48cdfa7c934e2c3524255
|
76b146e6530e98a8fcd15a93ac821ca09fad22e7
|
/gt-compare.r
|
c66ab8adc93757236461369063f637c75cfc30ae
|
[] |
no_license
|
ctan2020/test
|
af8e4ce8aca05979b2f49cdeba7bd1d9c1d2f6f0
|
28e093ed4a4dee31b0fd73ef1d126d54a51068aa
|
refs/heads/master
| 2020-09-08T12:12:03.068436
| 2019-11-13T07:33:55
| 2019-11-13T07:33:55
| 221,129,575
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 844
|
r
|
gt-compare.r
|
argv <- commandArgs(T)
df <- read.table(argv[1],header=F,sep="\t")
vt <- c("Commander","Fleet","Hindmash","La_Trobe","Scope","Vlamingh","WI4304")
diff <- matrix(0,ncol=7,nrow=7)
rownames(diff) <- vt
colnames(diff) <- vt
comm <- matrix(0,ncol=7,nrow=7)
rownames(diff) <- vt
self <- numeric(7)
names(self) <- vt
for (i in 6:12){
for (j in i:12){
# get the diff variation
d <- df[,i]!=df[,j]
diff[i-4,j-4] <- length(d[d==TRUE])
diff[j-4,i-4] <- diff[i-4,j-4]
# get common variation
c <- df[,i]==df[,j] && df[,i]!="2"
comm[i-4,j-4] <- length(c[c==TRUE])
comm[j-4,i-4] <- comm[i-4,j-4]
}
# mutation number against morex
s <- df[,i]!=2
self[i-4] <- length(s[s==TRUE])
}
write.table(diff,argv[2],col.names=T,row.names=T)
write.table(comm,argv[2],col.names=T,row.names=T,append=T)
write.table(self,argv[2],append=T)
|
8f043d9616333abb1d0c23ce038694db54c897ca
|
92626a21f23ab35e82cb439255e10cde2a7047c1
|
/man/Tsoil.Rd
|
99f3bc11e8ed0a51814d3e977ca4beae42859c16
|
[
"MIT"
] |
permissive
|
ArchiYujie/TrenchR
|
04630ddd078eca187a517c0c98e59065b3054a74
|
f45c2f0b54eab4ce578c0b3b631f9d93058ba731
|
refs/heads/master
| 2023-07-16T22:25:21.419072
| 2021-08-26T21:30:12
| 2021-08-26T21:30:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,451
|
rd
|
Tsoil.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/energybalance_functions.R
\name{Tsoil}
\alias{Tsoil}
\title{Statistical approximation of soil temperature}
\usage{
Tsoil(Tg_max, Tg_min, hour, depth)
}
\arguments{
\item{Tg_max}{daily maximum soil surface temperature (°C)}
\item{Tg_min}{daily minimum soil surface temperature (°C)}
\item{hour}{hour of the day}
\item{depth}{depth (cm) ???}
}
\value{
soil temperature (°C)
}
\description{
Statistical approximation of soil temperature
}
\details{
This function allows you to estimate soil temperature at a given depth and hour approximating diurnal variation as sinusoidal (adapted from Campbell and Norman 1998). Source: Riddell EA. 2017. Physical calculations of resistance to water loss improve predictions of species range models. Ecological Monographs 87: 21-23.
}
\examples{
\dontrun{
Tsoil(Tg_max=30, Tg_min=15, hour=12, depth=5)
}
}
\seealso{
Other biophysical models:
\code{\link{Free_or_forced_convection}()},
\code{\link{Grashof_number_Gates}()},
\code{\link{Grashof_number}()},
\code{\link{Nu_from_Gr}()},
\code{\link{Nu_from_Re}()},
\code{\link{Nusselt_number}()},
\code{\link{Prandtl_number}()},
\code{\link{Qconduction_animal}()},
\code{\link{Qconduction_substrate}()},
\code{\link{Qconvection}()},
\code{\link{Qemitted_thermal_radiation}()},
\code{\link{Qevaporation}()},
\code{\link{Qmetabolism_from_mass_temp}()},
\code{\link{Qmetabolism_from_mass}()},
\code{\link{Qnet_Gates}()},
\code{\link{Qradiation_absorbed}()},
\code{\link{Qthermal_radiation_absorbed}()},
\code{\link{Reynolds_number}()},
\code{\link{Tb_CampbellNorman}()},
\code{\link{Tb_Fei}()},
\code{\link{Tb_Gates2}()},
\code{\link{Tb_Gates}()},
\code{\link{Tb_butterfly}()},
\code{\link{Tb_grasshopper}()},
\code{\link{Tb_limpetBH}()},
\code{\link{Tb_limpet}()},
\code{\link{Tb_lizard}()},
\code{\link{Tb_mussel}()},
\code{\link{Tb_salamander_humid}()},
\code{\link{Tb_snail}()},
\code{\link{Tbed_mussel}()},
\code{\link{actual_vapor_pressure}()},
\code{\link{boundary_layer_resistance}()},
\code{\link{external_resistance_to_water_vapor_transfer}()},
\code{\link{heat_transfer_coefficient_approximation}()},
\code{\link{heat_transfer_coefficient_simple}()},
\code{\link{heat_transfer_coefficient}()},
\code{\link{saturation_vapor_pressure}()},
\code{\link{saturation_water_vapor_pressure}()}
}
\author{
Eric Riddell
}
\concept{biophysical models}
\keyword{soil}
\keyword{temperature}
|
7ac058d2908eed38aa28fe8f127a13edec5164ee
|
8c1daa6967fd693652dd1eac38a9f666fc65c8ee
|
/man/classify.variant.Rd
|
16f8139c847c80345c20790d11fcc83287c74029
|
[] |
no_license
|
cran/varitas
|
ae90d05e61f5004a07d09ec5861724218215fdcd
|
603e4ec1d6d90678eb54486f7a0faf6a76a14114
|
refs/heads/master
| 2021-01-13T21:42:36.802682
| 2020-11-13T23:30:03
| 2020-11-13T23:30:03
| 242,504,094
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 454
|
rd
|
classify.variant.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_functions.R
\name{classify.variant}
\alias{classify.variant}
\title{classify.variant}
\usage{
classify.variant(ref, alt)
}
\arguments{
\item{ref}{Vector of reference bases}
\item{alt}{Vector of alternate bases}
}
\value{
Character vector giving type of variant.
}
\description{
Classify a variant as SNV, MNV, or indel based on the reference and alternative alleles
}
|
211f1bcd9d974e1541d6bb6d4a332dc601021e77
|
4359d75816ac645b6b80e72b75068f1d4ffc5164
|
/man/stacked_bar_ses.Rd
|
fe867e9c480a28a2f224ef14275ebac2dceba990
|
[] |
no_license
|
Changing-Cities-Research-Lab/seattleViz
|
04e5e3cfad30a57b632614fed310729ebc2b0a7b
|
fbcb42776e3dbf74153f24d509801d7b5cfb288d
|
refs/heads/main
| 2023-04-13T15:55:07.183707
| 2021-04-12T23:06:48
| 2021-04-12T23:06:48
| 337,885,525
| 0
| 2
| null | 2021-02-25T06:54:14
| 2021-02-10T23:48:54
| null |
UTF-8
|
R
| false
| true
| 980
|
rd
|
stacked_bar_ses.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stacked_bar_ses.R
\name{stacked_bar_ses}
\alias{stacked_bar_ses}
\title{Produce panel of stacked bar charts of SES by a 'periods' column}
\usage{
stacked_bar_ses(
dat,
var,
y_title = NULL,
save = F,
savename = "plot.png",
title = NULL,
caption = "test"
)
}
\arguments{
\item{dat}{Data with a column containing variables of interest and grouping variables.}
\item{var}{Name of column containing variable to plot}
\item{y_title}{Title to display along y-axis.}
\item{save}{T if user would like to return plot object and save file, F (default) to just return object.}
\item{savename}{File name of map for saving.}
\item{caption}{Caption for figure}
}
\value{
Panel of stacked bar charts of different groupings
}
\description{
This function takes in data and produces a panel of stacked bar charts,
based on different groupings. Intended to be used with oakViz::aggregate_categories()
}
|
961875b02af9dd8ef170f34f7d6e558fe1df1b99
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MASS/examples/rnegbin.Rd.R
|
4d67ac4a3d6bfedb0b35831209625fe3917c0264
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 288
|
r
|
rnegbin.Rd.R
|
library(MASS)
### Name: rnegbin
### Title: Simulate Negative Binomial Variates
### Aliases: rnegbin
### Keywords: distribution
### ** Examples
# Negative Binomials with means fitted(fm) and theta = 4.5
fm <- glm.nb(Days ~ ., data = quine)
dummy <- rnegbin(fitted(fm), theta = 4.5)
|
f47382068f5738174d16982ef2b127765e08fb21
|
ea1f64b248492c2ade00097b7de4e63880965f0f
|
/man/summary.textmodel_wordshoal_fitted.Rd
|
1d441d6d4952751d5cb9e9cbf500f04766adfc30
|
[] |
no_license
|
amatsuo/wordshoal
|
e485e4b90ff4c00d5479d9c43c70998ffbe85666
|
951a57671ab536921435167e756599d4b98effce
|
refs/heads/master
| 2021-05-10T13:38:52.497458
| 2017-12-27T19:23:14
| 2017-12-27T19:23:14
| 118,483,291
| 0
| 0
| null | 2018-01-22T16:27:17
| 2018-01-22T16:27:17
| null |
UTF-8
|
R
| false
| true
| 549
|
rd
|
summary.textmodel_wordshoal_fitted.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/textmodel_wordshoal.R
\name{summary.textmodel_wordshoal_fitted}
\alias{summary.textmodel_wordshoal_fitted}
\title{Summarize a fitted textmodel_wordshoal object.}
\usage{
\method{summary}{textmodel_wordshoal_fitted}(object, ...)
}
\arguments{
\item{object}{results of \code{\link{textmodel_wordshoal}} to be summarized}
\item{...}{additional arguments passed to \code{print}}
}
\description{
\code{summary} method for a fitted \code{\link{textmodel_wordshoal}} object.
}
|
3d1cf1e18b00bd747d4a19cb15cd1fe6efae8ee7
|
37220ae10e03d433b9574d92671958f3efc3e18f
|
/R/AllClasses.R
|
992b2a8049757625f2f571c9919c48f95ddd4ea6
|
[] |
no_license
|
ge11232002/JASPAR2016
|
20ca180b3d842baa8274b497e50f022a5a1e8a9a
|
2a2354b063e4e3170b2ffca1b47a618509e5e47c
|
refs/heads/master
| 2021-01-10T11:20:24.971892
| 2015-12-31T15:10:53
| 2015-12-31T15:10:53
| 48,853,136
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 103
|
r
|
AllClasses.R
|
setClass("JASPAR2016",
slots=c(
db="character"
)
)
|
45963a82152760aa7f395034a76cdf11fd0ab8b1
|
e4c1423e571869c1247a40c90dae0615adc62bcf
|
/parse_aux/aux_results.R
|
95d43c57c12d66c0a319391a43dbe9f50d2a6295
|
[
"MIT"
] |
permissive
|
dquesadacr/Rep_SDDL
|
23621c8ba7d3af41966f061a6922f48fdfdf2954
|
1c77a1809f119b1247d4345127f7012a4374309a
|
refs/heads/main
| 2023-04-09T15:23:40.729104
| 2022-01-16T13:53:34
| 2022-01-16T13:53:34
| 448,561,119
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,302
|
r
|
aux_results.R
|
# Written by Dánnell Quesada-Chacón
# Collection of functions to parse and plot the validation metrics
fix_index <- function(x, models) {
index <- (x %>% redim(drop = TRUE))$Data
if (length(dim(index)) == 2) {
dim(index) <- c(1, prod(dim(index)[1:2]))
} else {
dim(index) <- c(nrow(index), prod(dim(index)[2:3]))
}
na_models <- (rowSums(is.na(index)) == ncol(index)) %>% which()
if (length(na_models) > 0) {
index <- index[-na_models, ]
}
indLand <- (!apply(index, MARGIN = 2, anyNA)) %>% which()
index <- as.data.frame(index[, indLand] %>% t())
if (length(na_models) > 0) {
colnames(index) <- models[-na_models]
} else {
colnames(index) <- models
}
return(index)
}
fix_index_2 <- function(x, models) {
index <- (x %>% redim(drop = TRUE))$Data
dim(index) <- c(1, prod(dim(index)[1:2]))
na_models <- (rowSums(is.na(index)) == ncol(index)) %>% which()
if (length(na_models) > 0) {
index <- index[-na_models, ]
}
indLand <- (!apply(index, MARGIN = 2, anyNA)) %>% which()
index <- as.data.frame(index[, indLand])
if (length(na_models) > 0) {
colnames(index) <- models[-na_models]
} else {
colnames(index) <- models
}
return(index)
}
calc_stat <- function(x) {
stats <- quantile(x, probs = c(0.1, 0.25, 0.5, 0.75, 0.9))
names(stats) <- c("ymin", "lower", "middle", "upper", "ymax")
return(stats)
}
calc_stat2 <- function(x) {
stats <- quantile(x, probs = c(0.02, 0.25, 0.5, 0.75, 0.98))
names(stats) <- c("ymin", "lower", "middle", "upper", "ymax")
return(stats)
}
raw_read <- function(folders) {
sapply(folders, simplify = F, function(u) {
setwd(paste0(path, "/V-", u))
seeds <- dir(path = "./", pattern = "validation", recursive = T) %>%
str_split("/", simplify = T) %>%
.[, 1] %>%
unique()
df <- sapply(seeds, simplify = F, function(x) {
runs_CNN <- dir(path = x, pattern = "hist_train_CNN")
validation_CNN <- dir(path = x, pattern = "validation_CNN")
vl_tib <- sapply(1:length(runs_CNN), simplify = F, function(y) {
print(paste0(u, "--", x, "--", y))
load(paste0("./", x, "/", runs_CNN[y]), .GlobalEnv)
bern_loss <- rownames_to_column(as.data.frame(t(sapply(names(history_trains), FUN = function(z) {
m_index <- which.min(history_trains[[z]]$metrics$val_loss)
if (length(m_index) == 1) {
return(c(
loss = history_trains[[z]]$metrics$loss[m_index],
val_loss = history_trains[[z]]$metrics$val_loss[m_index],
epochs = length(history_trains[[z]]$metrics$val_loss)
))
} else {
return(c(
loss = NA,
val_loss = NA,
epochs = length(history_trains[[z]]$metrics$val_loss)
))
}
}))), var = "models")
load(paste0("./", x, "/", validation_CNN[y]))
ylabs <- c(
"ROCSS", "RMSE (mm)",
"Spearman", "RB (%)",
"RBp98 (DET, %)", "RBp98 (STO, %)",
"RAAC", "WetAMS (days)",
"DryAMS (days)"
)
validation.list <- validation.list[-10]
names(validation.list) <- ylabs
for (t in grep("%)", names(validation.list))) {
validation.list[[t]]$Data <- validation.list[[t]]$Data * 100
}
nicenames <- c("ROCSS", "RMSE", "Spear", "RB", "RB98Det", "RB98Sto", "RAAC", "WetAMS", "DryAMS")
names(validation.list) <- nicenames
if (length(names(history_trains)) == 1) {
models_summ <- NULL
for (v in names(validation.list)) {
models_summ <- bind_cols(
models_summ,
validation.list[[v]]$Data %>%
as.vector() %>%
t() %>%
as.tibble() %>%
summarise("median.{v}" := rowMedians(as.matrix(.), na.rm = T))
)
}
models_all <- bind_cols(bern_loss, models_summ)
} else {
models_summ <- NULL
for (v in names(validation.list)) {
models_summ <-
bind_cols(
models_summ,
subsetDimension(validation.list[[v]],
dimension = "var",
indices = 1:length(names(history_trains))
) %>%
redim(drop = TRUE) %>%
.$Data %>%
apply(., 1L, c) %>%
t() %>%
as.tibble() %>%
summarise("median.{v}" := rowMedians(as.matrix(.), na.rm = T))
)
}
models_all <- bind_cols(bern_loss, models_summ)
}
models_all$Run <- as.character(y)
return(as.data.frame(models_all))
}) %>% do.call(rbind.data.frame, .)
vl_tib$seed <- as.character(x)
return(vl_tib)
}) %>% do.call(rbind.data.frame, .)
df$iter <- as.character(u)
return(df)
}) %>%
do.call(rbind.data.frame, .) %>%
relocate(Run, seed, iter, .after = epochs)
}
df_filter_plot <- function(df, plot_name) {
setwd(path)
df_filter <- sapply(1:nrow(df), function(x) {
load(paste0("./V-", df$iter[x], "/", df$seed[x], "/validation_CNN_", df$Run[x],".rda"))
ylabs <- c(
"ROCSS", "RMSE (mm)",
"Spearman", "RB (%)",
"RBp98D (%)", "RBp98S (%)",
"RAAC", "WetAMS (days)",
"DryAMS (days)"
)
names2 <- validation.list[[10]]
validation.list <- validation.list[-10]
names(validation.list) <- ylabs
for (y in grep("%)", names(validation.list))) {
validation.list[[y]]$Data <- validation.list[[y]]$Data * 100
}
validation_fix <- sapply(validation.list, fix_index, models = names2, simplify = F) %>%
reshape2::melt() %>%
filter(variable == df$models[x]) %>%
droplevels()
colnames(validation_fix) <- c("Model", "value", "metric")
models_nice <- df$models[x] %>%
str_remove_all("-0.25|-le_0.3-le_0.3|_0.3|RUE|ALSE") %>%
str_remove("T-") %>%
paste0(., ", R=", df$Rank[x], "\nS=", df$seed[x], ", E=", df$epochs[x])
validation_fix$Model <- models_nice
levels(validation_fix$Model) <- models_nice
validation_fix$metric <- factor(validation_fix$metric, levels = c(
"ROCSS", "Spearman", "RMSE (mm)",
"RB (%)", "RBp98D (%)", "RBp98S (%)",
"RAAC", "WetAMS (days)", "DryAMS (days)"
))
return(validation_fix)
}, simplify = F) %>% do.call(rbind.data.frame, .)
cnn_plot <- ggplot(df_filter, aes(x = Model, y = value, color = Model)) +
facet_wrap(~metric, scales = "free") +
stat_summary(fun.data = calc_stat, geom = "boxplot", width = 0.6, lwd = 0.35) + # , size=0.33
theme_light(base_size = 10, base_family = "Helvetica") +
guides(color = guide_legend(ncol = 4)) +
theme(
axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
axis.title.y = element_blank(),
strip.background = element_rect(fill = "white"),
strip.text = element_text(color = "black", margin = margin(0, 0, 0.5, 0, unit = "mm")),
legend.key.size = unit(2.5, "mm"),
legend.box.margin = margin(-3.25, 8, 0, 0, unit = "mm"),
legend.position = "bottom",
legend.title = element_blank(),
legend.text = element_text(margin = margin(0.2, 0.2, 0.2, 0.2, unit = "mm")),
panel.spacing = unit(1, "mm"),
plot.margin = margin(0, 0, -1.5, 0, unit = "mm")
)
if (nm2plot <= 20 && nm2plot >= 10) {
cnn_plot <- cnn_plot + scale_color_ucscgb()
}
if (nm2plot <= 10) {
cnn_plot <- cnn_plot + scale_color_jco()
}
if (nm2plot >= 10) {
cnn_plot <- cnn_plot + scale_color_viridis_d(option = "turbo")
}
if (nm2plot == 12) {
cnn_plot <- cnn_plot + scale_color_manual(values = c(
"#0073c2", "#EFC000", "#A73030", "#868686",
"#641ea4", "#76CD26", "#E67300", "#1929C8",
"#cd2926", "#3c3c3c", "#1B6F1B", "#82491E"
))
}
ggsave(
plot = cnn_plot, filename = paste0("./boxplots/Best-", nm2plot, "_", plot_name, ".pdf"),
height = 100, width = 175, units = "mm"
)
return(paste0("Check plot in: ", path, "/boxplots"))
}
spatial_plot <- function(df, ind_selection, ind_metrics, plot_name) {
setwd(path)
df <- df[ind_selection, ]
ylabs <- c(
"ROCSS", "RMSE (mm)",
"Spearman", "RB (%)",
"RBp98D (%)", "RBp98S (%)",
"RAAC", "WetAMS",
"DryAMS"
)
df_s <- df %>% group_split(iter)
validation.df <- lapply(df_s, function(m) {
validations <- sapply(1:nrow(m), function(x) {
load(paste0(path, "/V-", m$iter[x], "/", m$seed[x], "/validation_CNN_", m$Run[x],".rda"))
names2 <<- validation.list[[10]]
validation.list <- validation.list[-10]
names(validation.list) <- ylabs
for (y in grep("RB", names(validation.list))) {
validation.list[[y]]$Data <- validation.list[[y]]$Data * 100
}
return(validation.list)
}, simplify = F)
names(validations) <- m$models
models2plot <- m$models %>%
str_remove_all("-0.25|-le_0.3-le_0.3|_0.3|RUE|ALSE") %>%
str_remove("T-")
models_sp <- names2 %>%
unique() %>%
str_remove_all("-0.25|-le_0.3-le_0.3|_0.3|RUE|ALSE") %>%
str_remove("T-")
ind2plot <- match(models2plot, models_sp)
models_SR <- models2plot %>%
paste0(., "\nR=", m$Rank, ", S=", m$seed)
vdf <- lapply(ind_metrics, FUN = function(z) {
lapply(1:length(validations), FUN = function(w) {
index <- subsetDimension(validations[[w]][[z]],
dimension = "var",
indices = ind2plot[w]
) %>% redim(drop = TRUE)
index_ext <- extent(index$xyCoords)
rast.spdf <- raster(index$Data,
xmn = index_ext[1], xmx = index_ext[2],
ymn = index_ext[3], ymx = index_ext[4]
) %>%
flip(direction = "y") %>%
as(., "SpatialPixelsDataFrame") %>%
as.data.frame() %>%
set_colnames(c("value", "x", "y"))
rast.spdf$metric <- as.factor(ylabs[z])
rast.spdf$model <- models_SR[w]
return(rast.spdf)
}) %>% do.call(rbind.data.frame, .)
}) %>% do.call(rbind.data.frame, .)
}) %>%
do.call(rbind.data.frame, .) %>%
as.tibble()
mod_facts <- df$models %>%
unique() %>%
str_remove_all("-0.25|-le_0.3-le_0.3|_0.3|RUE|ALSE") %>%
str_remove("T-") %>%
paste0(., "\nR=", df$Rank, ", S=", df$seed)
validation.df %<>% mutate(model = factor(model, levels = mod_facts))
validation.df$scales <- as.factor(ifelse(validation.df$metric %in% ylabs[c(1, 3)], "A", "B"))
colors2plot <- colorRampPalette(colors = c("#32b732", "#b3de32", "#efef19", "#ff8c19"))(20)
spatial_plot_nice <- ggplot() +
geom_tile(data = validation.df %>% filter(scales == "A"), aes(x = x, y = y, fill = value)) +
facet_grid(metric ~ model) +
coord_sf(crs = sf::st_crs(4326)) +
scale_x_continuous(breaks = c(13.2, 13.6, 14)) +
scale_y_continuous(breaks = c(50.7, 50.9)) +
theme_light(base_size = 10, base_family = "Helvetica") +
scale_fill_gradientn(
colours = colors2plot,
limits = c(0.62, 0.881),
breaks = c(0.65, 0.75, 0.85),
name = "", labels = c(0.65, 0.75, 0.85)
) +
new_scale_fill() +
geom_tile(data = validation.df %>% filter(scales == "B"), aes(x = x, y = y, fill = value)) +
facet_grid(metric ~ model) +
coord_sf(crs = sf::st_crs(4326), ) +
geom_text(
data = validation.df %>% group_by(model, metric) %>%
summarise(median = signif(median(value), 3)),
aes(x = 13.8, y = 50.65, label = median), size = 2.75
) +
scale_fill_gradient2(
low = "dodgerblue2", high = "red2", mid = "gray95",
midpoint = 0, name = "", limits = c(-43, 43), na.value = "red2"
) +
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.text.x = element_text(angle = 45, hjust = 1),
strip.background = element_rect(fill = "white"),
strip.text = element_text(color = "black"),
strip.text.x = element_text(margin = margin(c(0, 0, 1, 0), unit = "mm")),
strip.text.y = element_text(margin = margin(c(0, 0, 0, 1), unit = "mm")),
legend.key.size = unit(3, "mm"),
legend.key.width = unit(10, "mm"),
legend.position = "bottom",
legend.box.margin = margin(-3.5, 0, -2, 0, unit = "mm"),
panel.spacing = unit(1.25, "mm"),
plot.margin = margin(0, 0, 0, 0, unit = "mm")
)
ggsave(paste0(path, "/spatial/Best_spatial_", plot_name, ".pdf"),
plot = spatial_plot_nice, dpi = 600, width = 175, height = 125, units = "mm"
)
return(validation.df)
}
reprodf_runs <- function(df) {
df2 <- sapply(1:nrow(df), simplify = F, function(u) {
setwd(paste0(path, "/V-", df$iter[u]))
seed <- as.character(df$seed[u])
label <- as.character(df$label[u])
runs_CNN <- dir(path = seed, pattern = "hist_train_CNN")
validation_CNN <- dir(path = seed, pattern = "validation_CNN")
vl_tib <- sapply(1:length(runs_CNN), simplify = F, function(y) {
load(paste0("./", seed, "/", runs_CNN[y]), .GlobalEnv)
bern_loss <- rownames_to_column(as.data.frame(t(sapply(as.character(df$models[u]),
FUN = function(z) {
m_index <- which.min(history_trains[[z]]$metrics$val_loss)
if (length(m_index) == 1) {
return(c(
loss = history_trains[[z]]$metrics$loss[m_index],
val_loss = history_trains[[z]]$metrics$val_loss[m_index],
epochs = length(history_trains[[z]]$metrics$val_loss)
))
} else {
return(c(
loss = NA,
val_loss = NA,
epochs = length(history_trains[[z]]$metrics$val_loss)
))
}
}
))), var = "models") # %>% arrange(., val_loss)
bern_loss$Run <- as.character(y)
return(as.data.frame(bern_loss))
}) %>% do.call(rbind.data.frame, .)
vl_tib$seed <- as.character(seed)
vl_tib$epochs %<>% as.character()
vl_tib$Run %<>% as.character(.) %>% factor(., levels = as.character(1:length(runs_CNN)))
vl_tib$models %<>% unique() %>%
str_remove_all("-0.25|-le_0.3-le_0.3|_0.3|RUE|ALSE") %>%
str_remove("T-") %>%
paste0(., "\nS=", seed)
if (!identical(label, character(0))) {
vl_tib$models %<>% paste0(., ", ", label)
}
levels(vl_tib$models) <- vl_tib$models %>% unique()
vl_tib$iter <- as.character(df$iter[u])
return(vl_tib)
}) %>% do.call(rbind.data.frame, .)
}
reprodf_metrics <- function(df, to_use = c(1, 4, 7, 8, 9)) {
df2 <- sapply(1:nrow(df), simplify = F, function(u) {
setwd(paste0(path, "/V-", df$iter[u]))
seed <- as.character(df$seed[u])
label <- as.character(df$label[u])
runs_CNN <- dir(path = seed, pattern = "hist_train_CNN")
validation_CNN <- dir(path = seed, pattern = "validation_CNN")
metric_tib <- sapply(1:length(runs_CNN), simplify = F, function(y) {
load(paste0("./", seed, "/", runs_CNN[y]))
load(paste0("./", seed, "/", validation_CNN[y]))
ylabs <- c(
"ROCSS", "RMSE",
"Spearman", "RB",
"RBp98D", "RBp98S",
"RAAC", "WetAMS",
"DryAMS"
)
validation.list <- validation.list[to_use]
names(validation.list) <- ylabs[to_use]
for (t in grep("RB", names(validation.list))) {
validation.list[[t]]$Data <- validation.list[[t]]$Data * 100
}
if (length(names(history_trains)) == 1) {
validation_fix <- sapply(validation.list, fix_index_2,
simplify = F,
models = names(history_trains)
) %>%
reshape2::melt()
colnames(validation_fix) <- c("Model", "value", "metric")
} else {
validation_fix <- sapply(validation.list, fix_index,
simplify = F,
models = names(history_trains)
) %>%
reshape2::melt()
colnames(validation_fix) <- c("Model", "value", "metric")
}
validation_fix %<>% filter(str_detect(Model, as.character(df$models[u])))
validation_fix$metric <- factor(validation_fix$metric, levels = ylabs[to_use])
validation_fix$Run <- as.character(y)
return(as.data.frame(validation_fix))
}) %>% do.call(rbind.data.frame, .)
metric_tib$Run %<>% as.character(.) %>% factor(., levels = as.character(1:length(runs_CNN)))
metric_tib$seed <- as.character(seed)
metric_tib$Model %<>% unique() %>%
str_remove_all("-0.25|-le_0.3-le_0.3|_0.3|RUE|ALSE") %>%
str_remove("T-") %>%
paste0(., "\nS=", seed)
if (!identical(label, character(0))) {
metric_tib$Model %<>% paste0(., ", ", label)
}
levels(metric_tib$Model) <- metric_tib$Model %>% unique()
metric_tib$iter <- as.character(df$iter[u])
return(metric_tib)
}) %>% do.call(rbind.data.frame, .)
}
repro_plot <- function(runs_df, metrics_df, plot_name, breaks_epochs = 3) {
setwd(path)
epochs_plot <- ggplot(runs_df, aes(x = epochs, y = val_loss, color = Run, shape = Run)) +
facet_wrap(~models, scales = "free", nrow = 1) +
geom_point(size = 0.9, position = position_dodge(width = 0.25, preserve = "total")) +
theme_light(base_size = 8.5, base_family = "Helvetica") +
scale_color_manual(values = c(
"#0073c2", "#EFC000", "#A73030", "#868686",
"#641ea4", "#76CD26", "#E67300", "#1929C8",
"#cd2926", "#3c3c3c"
)) +
scale_shape_manual(values = c(15, 1, 17, 6, 3, 18, 4, 20, 5, 0)) +
labs(x = "Epochs", tag = "BG val loss") +
scale_y_continuous(
labels = scales::number_format(accuracy = 0.001),
breaks = breaks_pretty(n = breaks_epochs)
) +
guides(color = guide_legend(nrow = 1, override.aes = list(size = 1.5))) +
theme(
axis.title.x = element_text(margin = margin(0.5, 0, 0, 0, unit = "mm")),
axis.text.x = element_text(angle = 90, vjust = 0.5),
axis.text.y = element_text(angle = 55, vjust = 0.5, hjust = 0.8),
axis.title.y = element_blank(),
axis.title.y.right = element_text(),
strip.background = element_rect(fill = "white"),
strip.text = element_blank(),
legend.position = "bottom",
plot.tag.position = "right",
plot.tag = element_text(angle = 270, size = 7, margin = margin(c(-14, 0.1, 0, 0.6), unit = "mm")),
legend.text = element_text(margin = margin(0, 0.3, 0, 0.3, unit = "mm")),
legend.key.size = unit(3, "mm"),
legend.box.margin = margin(-3.25, 0, -1.5, 0, unit = "mm"),
panel.spacing = unit(0, "mm"),
plot.margin = margin(0, 0, 0, 0, unit = "mm")
)
cnn_plot <- ggplot(metrics_df, aes(x = Run, y = value, color = Run)) +
facet_grid(vars(metric), vars(Model), scales = "free") +
stat_summary(fun.data = calc_stat, geom = "boxplot", width = 0.5, lwd = 0.35) +
theme_light(base_size = 8.5, base_family = "Helvetica") +
scale_color_manual(values = c(
"#0073c2", "#EFC000", "#A73030", "#868686",
"#641ea4", "#76CD26", "#E67300", "#1929C8",
"#cd2926", "#3c3c3c"
)) +
guides(color = FALSE) +
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
strip.background = element_rect(fill = "white"),
strip.text = element_text(color = "black"),
strip.text.x = element_text(margin = margin(c(0, 0, 1, 0), unit = "mm")),
strip.text.y = element_text(margin = margin(c(0, 0, 0, 0.5), unit = "mm")),
legend.key.size = unit(4, "mm"),
legend.position = "bottom",
legend.box.margin = margin(0, 0, 0, 0, unit = "mm"),
panel.spacing = unit(1, "mm"),
plot.margin = margin(0, 0, 1, 0, unit = "mm")
)
repr_plot_2 <- grid.arrange(cnn_plot, epochs_plot, heights = c(3.2, 1))
setwd(path)
dir.create("boxplots", recursive = TRUE)
dir.create("spatial", recursive = TRUE)
ggsave(
plot = repr_plot_2, filename = paste0("./boxplots/Best_repr_", plot_name, ".pdf"),
height = 120, width = 175, units = "mm"
)
}
repro_plot_2 <- function(runs_df, metrics_df, plot_name, breaks_epochs = 3) {
setwd(path)
runs_df <- runs_df %>%
mutate(
Model = str_remove_all(models, ",.+$"),
label = str_split(models, ",.", simplify = T)[, 2]
) %>%
mutate(
label = factor(label, levels = c("ND", "D #1", "D #2")),
epochs = str_c("Epochs=", epochs)
)
epochs_plot <- ggplot(runs_df, aes(x = label, y = val_loss)) +
facet_wrap(~epochs, scales = "free", nrow = 1) +
stat_summary(fun.data = calc_stat, geom = "boxplot", width = 0.4, lwd = 0.3) +
theme_light(base_size = 8.5, base_family = "Helvetica") +
labs(tag = "BG val loss") +
scale_y_continuous(
breaks = breaks_pretty(n = breaks_epochs),
labels = scales::number_format(accuracy = 0.001)
) +
theme(
axis.title.x = element_blank(),
axis.text.y = element_text(angle = 55, vjust = 0.5, hjust = 0.5),
axis.title.y = element_blank(),
axis.title.y.right = element_text(),
strip.background = element_rect(fill = "white"),
strip.text = element_text(color = "black", margin = margin(0, 0, 0.5, 0, unit = "mm")),
plot.tag.position = "right",
plot.tag = element_text(angle = 270, size = 7, margin = margin(c(-1, 0.1, 0, 0.6), unit = "mm")),
legend.text = element_text(margin = margin(0, 0.3, 0, 0.3, unit = "mm")),
legend.key.size = unit(3, "mm"),
panel.spacing = unit(1, "mm"),
plot.margin = margin(0, 0, 0, 0, unit = "mm")
)
cnn_plot <- ggplot(metrics_df, aes(x = Run, y = value, color = Run)) +
facet_grid(vars(metric), vars(Model), scales = "free") +
stat_summary(fun.data = calc_stat, geom = "boxplot", width = 0.4, lwd = 0.25) +
theme_light(base_size = 8.5, base_family = "Helvetica") +
scale_color_manual(values = c(
"#0073c2", "#EFC000", "#A73030", "#868686",
"#641ea4", "#76CD26", "#E67300", "#1929C8",
"#cd2926", "#3c3c3c"
)) +
guides(color = FALSE) +
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
strip.background = element_rect(fill = "white"),
strip.text = element_text(color = "black"),
strip.text.x = element_text(margin = margin(c(0, 0, 1, 0), unit = "mm")),
strip.text.y = element_text(margin = margin(c(0, 0, 0, 0.5), unit = "mm")),
legend.key.size = unit(4, "mm"),
legend.position = "bottom",
legend.box.margin = margin(0, 0, 0, 0, unit = "mm"),
panel.spacing = unit(1, "mm"),
plot.margin = margin(0, 0, 1, 0, unit = "mm")
)
repr_plot_2 <- grid.arrange(cnn_plot, epochs_plot, heights = c(3.35, 0.8))
setwd(path)
dir.create("boxplots", recursive = TRUE)
dir.create("spatial", recursive = TRUE)
ggsave(
plot = repr_plot_2, filename = paste0("./boxplots/Best_repr_", plot_name, ".pdf"),
height = 120, width = 175, units = "mm"
)
}
train_loss_plot <- function(df, plot_name) {
df2 <- sapply(1:nrow(df), simplify = F, function(u) {
setwd(paste0(path, "/V-", df$iter[u]))
seed <- as.character(df$seed[u])
label <- as.character(df$label[u])
runs_CNN <- dir(path = seed, pattern = "hist_train_CNN")
train_df <- sapply(1:length(runs_CNN), simplify = F, function(y) {
load(paste0("./", seed, "/", runs_CNN[y]), .GlobalEnv)
tibble(
Train = history_trains[[df$models[u]]]$metrics$loss,
Validation = history_trains[[df$models[u]]]$metrics$val_loss,
Seed = seed,
Model = df$models[u] %>%
str_remove_all("-0.25|-le_0.3-le_0.3|_0.3|RUE|ALSE") %>%
str_remove("T-") %>%
paste0(., "\nS=", seed) %>%
as.factor(),
Run = as.character(y) %>% as.factor(),
Epoch = 1:length(history_trains[[df$models[u]]]$metrics$loss),
steps = history_trains[[df$models[u]]]$params$steps
)
}) %>% do.call(rbind.data.frame, .)
}) %>%
do.call(rbind.data.frame, .) %>%
as_tibble() %>%
pivot_longer(c("Train", "Validation"), names_to = "Data", values_to = "loss")
train_plot <- ggplot(df2, aes(y = loss, x = Epoch, color = Data)) +
geom_line(size = 0.4) +
facet_grid(vars(Run), vars(Model), scales = "free") +
scale_y_continuous(
limits = c(1.1, 1.7), labels = c("1.2", "1.4", "1.6"),
breaks = c(1.2, 1.4, 1.6)
) +
scale_x_continuous(breaks = breaks_pretty(n = 3)) +
theme_light(base_size = 9, base_family = "Helvetica") +
labs(y = "Bernoulli Gamma loss function", color = "") +
scale_colour_manual(values = c("#0073c2", "#cd2926")) +
theme(
axis.text.x = element_text(angle = 45, hjust = 1),
panel.spacing = unit(1.5, "mm"),
strip.background = element_rect(fill = "white"),
strip.text = element_text(color = "black"),
strip.text.x = element_text(margin = margin(c(0, 0, 1, 0), unit = "mm")),
strip.text.y = element_text(margin = margin(c(0, 0, 0, 1), unit = "mm")),
legend.margin = margin(-3.5, 0, -1.5, 0, unit = "mm"),
legend.key.size = unit(6, "mm"),
legend.key.width = unit(6, "mm"),
legend.position = "bottom",
plot.margin = margin(0, 0, 0, 0.25, unit = "mm")
)
ggsave(paste0(path, "/loss_plots/Repr_", plot_name, ".pdf"),
plot = train_plot, width = 175, height = 120, units = "mm"
)
return(df2)
}
|
2afc1f300d06b9b499d5076732768fb7497d60a3
|
fdf19c5e406df9d9f52409a18bd77e4b120eb87f
|
/R/tmt_transformer.R
|
138d2151e7ef710a6f57fd0d8fbde8e334111f80
|
[
"MIT"
] |
permissive
|
MassDynamics/lfq_processing
|
d2325cabb0d50779d9ea5beb8596fefb815e0bca
|
5480744fbdfc4aea014dec6589e86b3dc2b0f632
|
refs/heads/main
| 2023-05-12T02:28:48.982754
| 2023-05-04T03:28:00
| 2023-05-04T03:28:00
| 341,805,035
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,995
|
r
|
tmt_transformer.R
|
#' Run Pairwise Protein Quantification on each level of TMT Maxquant output
#'
#' @param mq_folder A maxquant txt output folder.
#' @param output_folder An output folder to store produced files.
#' @param imputStDev The Standard Deviation parameter for MNAR Imputation
#' @param imputePosition The Position parameter for MNAR Imputation
#' @return A string describing the type of experiment
#' @examples
#' \dontrun{
#' tmp = tmt_transformer(mq_folder = upload_folder,
#' output_folder = output_folder,
#' imputeStDev=0.3,
#' imputePosition=1.8)
#' # get each of the produced quantification files.
#' prot = tmp[[1]]
#' prot_int = tmp[[2]]
#' pept = tmp[[3]]
#' pept_int = tmp[[4]]
#' mod_pept = tmp[[5]]
#' mod_pept_int = tmp[[6]]
#' expdes = tmp[[7]]
#' evidence = tmp[[8]]
#' msms = tmp[[9]]
#' }
#' @import data.table
#' @export tmt_transformer
tmt_transformer <- function(protein_groups, des, output_folder, imputeStDev=0.3, imputePosition=1.8) {
dir.create(output_folder, showWarnings = FALSE)
protein_groups <- remove_not_annotated_channels(protein_groups, des)
des_list <- condition_name_encoder_tmt(des = des)
des <- des_list[[1]]
conditions_dict <- des_list[[2]]
tmp <- tmt_quant_analysis(protein_groups,
des,
"id",
imputeStDev=0.3,
imputePosition= 1.8)
prot <- tmp[[1]]
prot_int <- tmp[[2]]
conditionComparisonMapping <- tmp[[3]]
prot_int <- condition_name_decode_intensity_data_tmt(dt = prot_int, dict = conditions_dict)
prot <- condition_name_decode_quantitative_data(dt = prot, dict = conditions_dict)
des <- condition_name_decode_intensity_data_tmt(dt = des, dict = conditions_dict, writerunid = FALSE)
conditionComparisonMapping = decodeComparisonConditionMapping(conditionComparisonMapping, conditions_dict)
write_output(prot, output_folder, "proteinGroups_quant.txt")
write_output(prot_int, output_folder, "proteinGroups_quant_intensities.txt")
return(list(prot, prot_int, des, conditionComparisonMapping))
}
# utilities
#' @export verify_tmt_des
verify_tmt_des <- function(des){
stopifnot("reporter_channel" %in% tolower(colnames(des)))
stopifnot("condition" %in% tolower(colnames(des)))
stopifnot("replicate" %in% tolower(colnames(des)))
stopifnot("experiment" %in% tolower(colnames(des)))
if (detect_fractions(des)){
stopifnot("mixture" %in% tolower(colnames(des)))
}
}
#' @export remove_not_annotated_channels
remove_not_annotated_channels <- function(protein_groups, des){
intensity_columns = colnames(protein_groups)[(grepl("reporter intensity corrected [0-9]* ", colnames(protein_groups)))]
cols_to_keep <- str_c(str_c("reporter intensity corrected ", des$reporter_channel), tolower(des$experiment), sep = " ")
cols_to_remove = setdiff(intensity_columns, cols_to_keep)
for (col in cols_to_remove){
protein_groups[,(col):=NULL]
}
return(protein_groups)
}
|
c31b56905194221cb2f02b4f8964d962ef53c9e8
|
c910ef2f737ae2b109e35d5c483f18a686d42baf
|
/R/juveniles_BioEnObservedRates.R
|
8c464e26ba1f1fa4722d3c9fd5272b74b21a26a6
|
[
"MIT"
] |
permissive
|
wStockhausen/rPacificCod
|
132318a9d0961c1ce487b2edecd44a2e7f00c6b0
|
a2339cd1d0a5af6ed94e7ef11e7e64c8ff4473a6
|
refs/heads/master
| 2021-08-07T14:20:52.672997
| 2021-03-19T16:55:37
| 2021-03-19T16:55:37
| 246,152,660
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,297
|
r
|
juveniles_BioEnObservedRates.R
|
#'
#' @title Calculate observed growth (J/g fish/day) of a (nominally) 6.8 g fish as function of temperature
#'
#' @description Function to calculate observed growth rate (J/g fish/day) of a (nominally) 6.8 g fish as a function of temperature.
#'
#' @param T - temperature (deg C)
#' @param EDc - energy density of Pacific cod juveniles (J/g)
#'
#' @return value for growth rate (fraction wet weight/day) of juvenile Pacific cod weighing (nominally) 6.8 g.
#'
#' @details Based on eq. S8 (from Laurel et al. 2016, in % body mass/day) as
#' given in Supplement to Hurst et al. 2018: \cr
#' \itemize{
#' \item{\eqn{ GT = [(0.2494 + 0.3216*T - 0.0069*T^2 - 0.0004*T^3)/100]*EDc},[J/g fish/day]}
#' }
#'
#' @family juvenile bioenergetic functions
#'
#' @export
#'
juv_GT<-function(T,EDc=4138){
gt <- 0.2494 + 0.3216*T - 0.0069*T^2 - 0.0004*T^3;#--in % wet weight per day
gt <- gt / 100 * EDc; #--in J/g fish/day
return(gt);
}
#'
#' @title Calculate consumed energy (J/g fish/day) of a (nominal) 6.8 g fish as function of temperature
#'
#' @description Function to calculate consumed energy (J/g fish/day) of (nominal) 6.8 g fish as function of temperature.
#'
#' @param T - temperature (deg C)
#' @param W - weight of fish (nominally 6.8 g)
#'
#' @return value of consumed energy in [J/g fish/day]
#'
#' @details Based on eq. S9 (in J/day) in Supplement to Hurst et al. 2018:\cr
#' \eqn{ CT = juv_GT(T) + juv_Ra(W)*juv_fR(T)} [J/g fish/day], \cr
#' where W=6.8 g.
#'
#' @family juvenile bioenergetic functions
#'
#' @export
#'
juv_CT<-function(T,
W=6.8){
ct<-juv_GT(T) + juv_Ra(W)*juv_fR(T);#--units in J/g fish/day
return(ct);
}
#'
#' @title Calculate functional reponse of respiration to temperature
#'
#' @description Function to calculate functional reponse of respiration to temperature.
#'
#' @param T - temperature (deg C)
#'
#' @return value for functional response (units??)
#'
#' @details Based on eq. S5 (from Oh et al. 2010) as
#' given in Supplement to Hurst et al. 2018:
#' \itemize{
#' \item{\eqn{mt = (-1.04e-5)*T^2 + (3.38e-4)*T + (-1.02e-3)}, units??}
#' }
#'
#' @family juvenile bioenergetic functions
#'
#' @export
#'
juv_MT<-function(T){
mt<-(-1.04e-5)*T^2 + (3.38e-4)*T + (-1.02e-3); #---units??
return(mt);
}
|
96b2e3d6a17da9174e7ae2818f8dd2f45b9e4102
|
2e3acb734fb139d33619cae6f7dab9da22ab98e5
|
/tests/testthat/test_adjacent_thresholds.R
|
c3ac05953f6e59d36855dcf15bb9328ebe21bb75
|
[] |
no_license
|
avalcarcel9/rtapas
|
72e5b4dd0416809438be19b39949cc102cbb4327
|
f1968a5962d82f10c776a01c4103be3caa443eac
|
refs/heads/master
| 2020-04-14T21:38:08.361861
| 2020-03-27T13:37:34
| 2020-03-27T13:37:34
| 164,134,749
| 1
| 8
| null | 2020-03-25T14:45:49
| 2019-01-04T17:13:30
|
R
|
UTF-8
|
R
| false
| false
| 1,624
|
r
|
test_adjacent_thresholds.R
|
###########################
# Alessandra Valcarcel
# Tests for adjacent threshold detection in training
# Created: November 14, 2019
# Updated: November 14, 2019
###########################
# Non-adjacent thresholds are detected and error out
testthat::test_that("Test non-adjacent tied thresholds during tapas_train errors.", {
# Non-adjacent ties
data = tibble::tibble(threshold = rep(c(.1,.2, .3), 50),
dsc = runif(150, min = 0, max = 1),
volume = rnorm(150, 25, 20),
subject_id = rep(1:50, 3)) %>%
dplyr::arrange(subject_id, threshold)
data$dsc[data$subject_id == 1] = c(.3, .1, .3)
# We can now implement the train_tapas function using the data from tapas_data_par
testthat::expect_error(rtapas::tapas_train(data = data,
dsc_cutoff = 0.03,
verbose = TRUE))
})
# Adjacent thresholds are detected and returns the median
testthat::test_that("Test adjacent tied thresholds during tapas_train runs and returns a message about tie.", {
# Adjacent ties
data = tibble::tibble(threshold = rep(c(.1,.2, .3), 50),
dsc = runif(150, min = 0, max = 1),
volume = rnorm(150, 25, 20),
subject_id = rep(1:50, 3)) %>%
dplyr::arrange(subject_id, threshold)
data$dsc[data$subject_id == 1] = c(.3, .3, .1)
testthat::expect_message(rtapas::tapas_train(data = data,
dsc_cutoff = 0.03,
verbose = TRUE), "Subject ties are adjacent using the median value for training.")
})
|
bba21f168c4192d00fe915b73c8470938be9097f
|
2c4a048e0b3063f003fc5179690cda49ea182f40
|
/tests/testthat.R
|
4ffc6c6e4e4f0e9178fea3653eb29eaf66a0c8e6
|
[] |
no_license
|
md0u80c9/SSNAPInterface
|
ec2782a1be700cb8bb2112148dbe620b35d804ff
|
ddd3d46ca080d8d677c459ac8c398375b99c0a86
|
refs/heads/master
| 2020-09-07T01:51:30.652250
| 2019-11-09T09:40:29
| 2019-11-09T09:40:29
| 220,620,861
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 72
|
r
|
testthat.R
|
library(testthat)
library(ssnapinterface)
test_check("ssnapinterface")
|
cd5ecbca53811c743595825f07389a616a04efbd
|
4ae8357183a22e92625b11541af76a2c0ebb9d8e
|
/R/shiny.R
|
531f6daf8079fbd865ccd1f0251cb93809204eff
|
[
"MIT"
] |
permissive
|
ropensci/iheatmapr
|
795a25a6f772e59fcb23bcc1e51901651546c654
|
2fc34790d5af20e9fe99e7752c958ef8f470e2ad
|
refs/heads/main
| 2023-08-13T15:40:10.593628
| 2023-07-25T05:07:22
| 2023-07-25T05:07:22
| 85,225,301
| 202
| 33
|
NOASSERTION
| 2023-07-25T05:13:46
| 2017-03-16T17:44:27
|
R
|
UTF-8
|
R
| false
| false
| 3,243
|
r
|
shiny.R
|
#' Shiny bindings for iheatmapr
#'
#' Output and render functions for using iheatmapr within Shiny
#'
#' @param outputId output variable to read from
#' @param width,height Must be a valid CSS unit (like \code{"100\%"},
#' \code{"400px"}, \code{"auto"}) or a number, which will be coerced to a
#' string and have \code{"px"} appended.
#' @param quoted Is \code{expr} a quoted expression (with \code{quote()})? This
#' is useful if you want to save an expression in a variable.
#'
#' @importFrom htmlwidgets shinyWidgetOutput shinyRenderWidget
#' @name iheatmapr-shiny
#'
#' @export
iheatmaprOutput <- function(outputId, width = "100%", height = "400px") {
htmlwidgets::shinyWidgetOutput(outputId, "iheatmapr", width, height,
package = "iheatmapr")
}
#' @param expr An expression that generates an Iheatmap object
#' @param env The environment in which to evaluate \code{expr}.
#' @rdname iheatmapr-shiny
#' @export
renderIheatmap <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
func <- shiny::exprToFunction(expr, env, quoted = TRUE)
expr <- quote(getFromNamespace("to_widget", "iheatmapr")(func()))
htmlwidgets::shinyRenderWidget(expr, iheatmaprOutput, environment(), quoted = TRUE)
}
#' Access iheatmapr user input event data in shiny
#'
#' This function must be called within a reactive shiny context.
#'
#' @param object \code{\link{Iheatmap-class}} object
#' @param event The type of plotly event. Currently 'plotly_hover',
#' 'plotly_click', 'plotly_selected', and 'plotly_relayout' are supported.
#' @param session a shiny session object (the default should almost always be used).
#' @export
#' @examples \dontrun{
#' shiny::runApp(system.file("examples", "shiny_example", package = "iheatmapr"))
#' }
iheatmapr_event <- function(object,
event = c("hover", "click", "relayout"),
session = shiny::getDefaultReactiveDomain())
{
if (is.null(session)) {
stop("No reactive domain detected. This function can only be called \n",
"from within a reactive shiny context.")
}
source <- object@source
event <- match.arg(event)
event <- paste0("iheatmapr_", event)
src <- sprintf(".clientValue-%s-%s", event, source)
val <- session$rootScope()$input[[src]]
if (is.null(val)) {
out <- val
} else if (event == "iheatmapr_hover" || event == "iheatmapr_click"){
raw <- jsonlite::fromJSON(val)
out <- list(raw = raw)
curve <- names(plots(object))[raw$curveNumber + 1]
xname <- xaxis_name(plots(object)[[curve]])
yname <- yaxis_name(plots(object)[[curve]])
if (is(xaxes(object)[[xname]], "IheatmapMainAxis")){
co <- axis_order(xaxes(object)[[xname]])
out$col <- co[raw$x]
} else{
out$x <- raw$x
}
if (is(yaxes(object)[[yname]], "IheatmapMainAxis")){
ro <- axis_order(yaxes(object)[[yname]])
out$row <- ro[raw$y]
} else{
out$y <- raw$y
}
if (is(plots(object)[[curve]],"MainHeatmap")){
out$value <- raw$z
}
} else if (event == "iheatmapr_relayout"){
out <- jsonlite::fromJSON(val)
#out <- list(raw = raw)
}
return(out)
}
|
c9d5a95d77efd2267b66e04f43ec9ef0468a5331
|
ac9c0d6dff162a23018ad56fcbca468703ad051c
|
/introduction.R
|
2885f7e2d5c2ed478f785885a1246544d0b4e5c1
|
[] |
no_license
|
WL-Biol185-ShinyProjects/COVID-and-Race-analysis
|
ae59f5e9f91cd2f6bc5681711ab779a64bacd934
|
8e271173905823a8324e9fb472840eb028b7be35
|
refs/heads/main
| 2023-01-23T00:04:36.592230
| 2020-11-20T20:06:07
| 2020-11-20T20:06:07
| 300,344,672
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,439
|
r
|
introduction.R
|
library(shiny)
library(shinydashboard)
library(markdown)
introductiontext <-
fluidPage(
titlePanel("The Coronavirus Disease 2019 in the United States"),
column(width = 6,
box(
title = "Background on the COVID-19 Pandemic", width = NULL, background = "purple",
"Since the initial outbreak of the COVID-19 disease in Wuhan, Hubei Province,
China in December 2019, the virus has rapidly spread across the globe.
As of November 11th, 2020 there are 10,170,846 cases in the United
States and 51 million worldwide. However, in the past few decades, COVID-19
is not the first disease to be caused by zoonotic coronaviruses. In 2002, an
episode of severe respiratory disease emerged and scientists determined that
the etiological agent of severe acute respiratory syndromes (SARS) is SARS coronavirus
(SARS-CoV) – a member of the Severe acute respiratory syndrome-related coronavirus species.
Both the SARS epidemic of 2002 and the Middle East respiratory syndrome coronavirus were
associated with upper respiratory tract infections. Thus when multiple cases of pneumonia
like symptoms emerged in December, the causative agent was identified as the coronavirus
2019-nCoV. Based on its genetic resemblance to SARS-CoV and a number of other SARS-like
animal coronaviruses, the novel coronavirus that causes COVID-19 was identified as SARS-CoV-2.
Clinical evidence suggests that the primary mode of infection with SARS-CoV-2 is through
exposure to infectious respiratory droplets. Infection through respiratory viruses can
happen through direct transmission, droplet transmission, and airborne transmission.
Droplet transmission is defined as infection through exposure to respiratory droplets
from a contagious person who is within six feet of distance. Airborne transmission is
infection from smaller respiratory droplets and particles that can remain in the air
for extended periods of time."
),
box(
title = "Demographic Disparities in COVID-19 Cases and Deaths", width = NULL, background = "purple",
"Race and ethnicity are risks for higher chance of disease and for other underlying
conditions that impact health such as, socioeconomic status, access to health case,
and increased exposure to COVID-19 due to occupation. Jobs that increase exposure
include frontline works like doctors or nurses, essential workers, and infrastructure
workers. Black people have a ratio that is 2.6x higher than white persons of being infected with SARS-CoV-2, including a 4.7x higher ratio of
being hospitalized, and 2.1x higher ratio of death. Additionally, Hispanic or Latinos have a ratio that is 2.8x higher
than white persons for cases, 4.6x higher for hospitalizations, and 1.1x higher for deaths."
),
box(title = "Links for More Information", width = NULL, background = "purple",
a("COVID-19 Information (World Health Organization)", href = "https://www.who.int/news-room/q-a-detail/coronaviruse-disease-covid-19", style = "color:LightGray;"),
br(),
a("SARS-CoV-2 Transmission", href = "https://www.cdc.gov/coronavirus/2019-ncov/more/scientific-brief-sars-cov-2.html", style = "color:LightGray;"),
br(),
a("COVID-19 Cases and Deaths by Race", href = "https://www.cdc.gov/coronavirus/2019-ncov/covid-data/investigations-discovery/hospitalization-death-by-race-ethnicity.html", style = "color:LightGray;"),
br(),
a("COVID-19 Cases Dashboard (CDC)", href = "https://covid.cdc.gov/covid-data-tracker", style = "color:LightGray;"),
br(),
a("COVID-19 Cases Dashboard (World Health Organization)", href = "https://covid19.who.int", style = "color:LightGray;"))
),
img(src = "COVID19infographic.png",
height="25%", width="50%",
align = "middle",
style="display: block; margin-left: auto; margin-right: auto;",
h5()
)
)
|
7cbafaea68c1a951192f42d96bb51ff99cd8030e
|
abd91368a4e3842af3fe15bc7aad78fc8c96a81e
|
/tests/testthat.R
|
c545e23b04561b58aed98b696f38b3eaa19661a2
|
[
"MIT"
] |
permissive
|
RMHogervorst/nsapi
|
5d5df709ebbf576f58d8be5a1fe8cdb1a9d2fab4
|
9b97dc6c71557892093f616fc51facdf24542646
|
refs/heads/master
| 2023-03-18T00:37:19.422575
| 2023-03-06T16:21:18
| 2023-03-06T16:21:18
| 141,177,406
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54
|
r
|
testthat.R
|
library(testthat)
library(nsapi)
test_check("nsapi")
|
4081caabafd0910ecf1b64a9afd2bd310924a4be
|
49d99fe88067e9e0db38fcd4058df6dc722d6886
|
/man/slice.eventlog.Rd
|
11adf39891e18d7b29ead5db1b46c5ec4ebe57dd
|
[] |
no_license
|
marijkeswennen/bupaR
|
e7616ae191f21bccfb7f0494b090f750d054e0c1
|
568f6d882f5babe68afc1e00101353e7eab4e045
|
refs/heads/master
| 2021-08-11T10:18:48.674592
| 2017-09-04T20:12:26
| 2017-09-04T20:12:26
| 110,561,508
| 0
| 0
| null | 2017-11-13T14:54:02
| 2017-11-13T14:54:02
| null |
UTF-8
|
R
| false
| true
| 349
|
rd
|
slice.eventlog.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slice.eventlog.R
\name{slice.eventlog}
\alias{slice.eventlog}
\title{Slice function for event log}
\usage{
\method{slice}{eventlog}(.data, ...)
}
\arguments{
\item{.data}{Eventlog}
\item{...}{Additional Arguments}
}
\description{
Return a slice of an event log object
}
|
d7d743790d5ff3d7850adc771c9e99ed594abfef
|
a4359fa2784abb2b619d1e74b8b989e02bc19d0f
|
/R/deprecated.R
|
133b365028bbae90143799ba859cccd61c3bb1b1
|
[
"MIT"
] |
permissive
|
syncrosim/rsyncrosim
|
2decddd287399b388838e80f1e0f1caf401dbcd6
|
cd0a5853c3e3beed5216087c03f60a73a1dd5dca
|
refs/heads/dev
| 2023-08-31T23:35:57.073921
| 2023-08-31T22:45:46
| 2023-08-31T22:45:46
| 169,151,965
| 8
| 8
|
NOASSERTION
| 2023-08-17T13:40:51
| 2019-02-04T21:23:14
|
R
|
UTF-8
|
R
| false
| false
| 6,221
|
r
|
deprecated.R
|
#' Add module
#'
#' `r lifecycle::badge("deprecated")`
#' Please use \code{\link{addPackage}} instead.
#'
#' @param filename character string or vector of these. The path to an .ssimpkg
#' file on disk, or a vector of filepaths.
#' @param session \code{\link{Session}} object
#'
#' @keywords internal
#'
#' @export
addModule <- function(filename, session = NULL) {
lifecycle::deprecate_warn("1.2.11", "addModule()", "addPackage()")
addPackage(filename, session)
}
#' Adds a package to SyncroSim
#'
#' `r lifecycle::badge("deprecated")`
#' Please use \code{\link{addPackage}} instead.
#'
#' @param filename character string. The path to a SyncroSim package file
#' @param session \code{\link{Session}} object
#'
#' @keywords internal
#'
#' @export
addPackageFile <- function(filename, session = NULL) {
lifecycle::deprecate_warn("1.2.11", "addPackageFile()", "addPackage()")
addPackage(filename, session)
}
#' Installed base packages
#'
#' `r lifecycle::badge("deprecated")`
#' Please use \code{\link{package}} instead.
#'
#' @param ssimObject \code{\link{Session}} or \code{\link{SsimLibrary}} object
#'
#' @keywords internal
#'
#' @export
basePackage <- function(ssimObject = NULL) {
lifecycle::deprecate_warn("1.2.11", "basePackage()", "package()")
package(ssimObject, installed = "BASE")
}
#' Delete module or modules
#'
#' `r lifecycle::badge("deprecated")`
#' Please use \code{\link{removePackage}} instead.
#'
#' @param name character string or vector of these. A module or vector of modules
#' to remove. See modules() for options
#' @param session \code{\link{Session}} object
#' @param force logical. If \code{FALSE} (default), require confirmation from user
#' before deletion
#'
#' @keywords internal
#'
#' @export
deleteModule <- function(name, session = NULL, force = FALSE) {
lifecycle::deprecate_warn("1.2.11", "deleteModule()", "removePackage()")
removePackage(name, session, force)
}
#' Deletes a package from your SyncroSim installation
#'
#' `r lifecycle::badge("deprecated")`
#' Please use \code{\link{removePackage}} instead.
#'
#' @param name character string or vector of these. A package or vector of
#' packages to remove
#' @param session \code{\link{Session}} object
#' @param force logical. If \code{FALSE} (default), require confirmation from user
#' before deletion
#'
#' @keywords internal
#'
#' @export
deletePackage <- function(name, session = NULL, force = FALSE) {
lifecycle::deprecate_warn("1.2.11", "deletePackage()", "removePackage()")
removePackage(name, session, force)
}
#' Installed models
#'
#' `r lifecycle::badge("deprecated")`
#'
#' Models are now distributed in Packages;
#' please use \code{\link{package}} instead.
#'
#' @param ssimObject \code{\link{Session}} or \code{\link{SsimLibrary}} object
#'
#' @keywords internal
#'
#' @export
model <- function(ssimObject = NULL) {
lifecycle::deprecate_warn("1.2.11", "model()", "package()")
package(ssimObject)
}
#' Installed modules
#'
#' `r lifecycle::badge("deprecated")`
#' modules are now distributed in Packages;
#' Please use \code{\link{package}} instead.
#'
#' @param session \code{\link{Session}} object
#'
#' @keywords internal
#'
#' @export
module <- function(session = NULL) {
lifecycle::deprecate_warn("1.2.11", "module()", "package()")
package(session)
}
#' SyncroSim DataSheet Input Folder
#'
#' `r lifecycle::badge("deprecated")`
#' Please use \code{\link{runtimeInputFolder}} instead.
#'
#' @param scenario \code{\link{Scenario}} object. A SyncroSim result Scenario
#' @param datasheetName character. The input Datasheet name
#'
#' @keywords internal
#'
#' @export
envInputFolder <- function(scenario, datasheetName) {
lifecycle::deprecate_warn("1.2.11", "envInputFolder()", "runtimeInputFolder()")
runtimeInputFolder(scenario, datasheetName)
}
#' SyncroSim DataSheet Output Folder
#'
#' `r lifecycle::badge("deprecated")`
#' Please use \code{\link{runtimeOutputFolder}} instead.
#'
#' @param scenario \code{\link{Scenario}} object. A SyncroSim result Scenario
#' @param datasheetName character. The output Datasheet name
#'
#' @keywords internal
#'
#' @export
envOutputFolder <- function(scenario, datasheetName) {
lifecycle::deprecate_warn("1.2.11", "envOutputFolder()", "runtimeOutputFolder()")
runtimeOutputFolder(scenario, datasheetName)
}
#' SyncroSim Temporary Folder
#'
#' `r lifecycle::badge("deprecated")`
#' Please use \code{\link{runtimeTempFolder}} instead.
#'
#' @param folderName character. The folder name
#'
#' @keywords internal
#'
#' @export
envTempFolder <- function(folderName) {
lifecycle::deprecate_warn("1.2.11", "envTempFolder()", "runtimeTempFolder()")
runtimeTempFolder(folderName)
}
#' Reports SyncroSim simulation progress
#'
#' `r lifecycle::badge("deprecated")`
#' Please use \code{\link{progressBar}} instead.
#'
#' @param iteration integer. The current iteration
#' @param timestep integer. The current timestep
#'
#' @keywords internal
#'
#' @export
envReportProgress <- function(iteration, timestep) {
lifecycle::deprecate_warn("1.2.11", "envReportProgress()", "progressBar()")
progressBar(type = "report", iteration = iteration, timestep = timestep)
}
#' Begins a SyncroSim simulation
#'
#' `r lifecycle::badge("deprecated")`
#' Please use \code{\link{progressBar}} instead.
#'
#' @param totalSteps integer. The total number of steps in the simulation
#'
#' @keywords internal
#'
#' @export
envBeginSimulation <- function(totalSteps) {
lifecycle::deprecate_warn("1.2.11", "envBeginSimulation()", "progressBar()")
progressBar(type = "report", totalSteps = totalSteps)
}
#' Steps a SyncroSim simulation
#'
#' `r lifecycle::badge("deprecated")`
#' Please use \code{\link{progressBar}} instead.
#'
#' @keywords internal
#'
#' @export
envStepSimulation <- function() {
lifecycle::deprecate_warn("1.2.11", "envStepSimulation()", "progressBar()")
progressBar(type = "step")
}
#' Ends a SyncroSim simulation
#'
#' `r lifecycle::badge("deprecated")`
#' Please use \code{\link{progressBar}} instead.
#'
#' @keywords internal
#'
#' @export
envEndSimulation <- function() {
lifecycle::deprecate_warn("1.2.11", "envEndSimulation()", "progressBar()")
progressBar(type = "end")
}
|
7d348354277f33a50684f92671ef1a424afff0a5
|
4e99758fd592b1cdf575c3fbb6177c26bb006af4
|
/code/proprotional test.R
|
50e1df29b2d79483a8127e8a4af30a601d3603de
|
[] |
no_license
|
kyle810828/advancedatascience
|
e731c059e038d1984782d1dfbbec52a3c278fe41
|
5d2eba090b657350b41f5487531b8044cfb1a818
|
refs/heads/master
| 2021-01-21T13:29:39.153796
| 2017-10-24T23:13:45
| 2017-10-24T23:13:45
| 102,121,004
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 659
|
r
|
proprotional test.R
|
setwd("~/Desktop/DS1001/Data/Indeed")
Indeed<-read.csv("Indeed.csv")
Indeed<-as.data.frame(Indeed)
setwd("~/Desktop/DS1001/Data/Glassdoor")
GlassDoor<-read.csv("GlassDoor.csv")
GlassDoor<-as.data.frame(GlassDoor)
testG<-GlassDoor[,c('R','Python','Math','C++','Statistics')]
testI<-Indeed[,c('R','Python','Math','C++','Statistics')]
#proportional test
proptest<-matrix(rep(NA,10),ncol=2)
for(i in 1:5){
x<-prop.test(c(sum(testG[,i]),sum(testI[,i])),c(nrow(testG),nrow(testI)),correct=FALSE)
proptest[i,]<-c(colnames(testG)[i],x$p.value)
}
proptest<-as.data.frame(proptest)
colnames(proptest)<-c("Skills","p-value")
## proportional test
kable(proptest)
|
4be4f44ccdd100240f00a841899bf3973a24e6e5
|
7c5caeca7735d7909c29ee3ed6074ad008320cf0
|
/man/osd.Rd
|
fe67bcea7f34ad9c8983d33633bfc86209c40e50
|
[] |
no_license
|
ncss-tech/aqp
|
8063e800ed55458cfa7e74bc7e2ef60ac3b1e6f5
|
c80591ee6fe6f4f08b9ea1a5cd011fc6d02b5c4a
|
refs/heads/master
| 2023-09-02T07:45:34.769566
| 2023-08-31T00:14:22
| 2023-08-31T00:27:14
| 54,595,349
| 47
| 12
| null | 2023-08-17T15:33:59
| 2016-03-23T21:48:50
|
R
|
UTF-8
|
R
| false
| true
| 441
|
rd
|
osd.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-documentation.R
\docType{data}
\name{osd}
\alias{osd}
\title{Example Output from soilDB::fetchOSD()}
\format{
A \code{SoilProfileCollection}
}
\usage{
data(osd)
}
\description{
An example \code{SoilProfileCollection} object created by \code{soilDB::fetchOSD()}, derived from the Cecil, Appling, and Bonneau Official Series Descriptions.
}
\keyword{datasets}
|
58091cbdab2536f685233fc2a7dd227839544270
|
d39b8ee4f436043101f576a57bc70f72b3719145
|
/OLD/Readability_test(02.26.13).R
|
5225cae5b9c767e2f867cab385b015aff4589dfc
|
[] |
no_license
|
sbraddaughdrill/Mutual_Fund_Strategy
|
4200c202ff1807559c323968722a53b7f957cda3
|
e42547517a999d3e2680c32ee4a6a982fb79149c
|
refs/heads/master
| 2020-04-06T04:58:58.424803
| 2015-02-05T20:15:23
| 2015-02-05T20:15:23
| 29,552,028
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 41,212
|
r
|
Readability_test(02.26.13).R
|
#==============================================================================;
#PROGRAM DETAILS;
#==============================================================================;
#Program: Readability_test.R;
#Version: 1.0;
#Author: S. Brad Daughdrill;
#Date: 02.17.2013;
#Purpose: Orange Model of underlying option securities;
#==============================================================================;
#==============================================================================;
#INITIAL SETUP;
cat("SECTION: INITIAL SETUP", "\n")
#==============================================================================;
#String as factors is False -- used for read.csv
options(StringsAsFactors=FALSE)
options(install.packages.check.source = FALSE)
#Default maxprint option
#options(max.print=99999)
#Maximum maxprint option
options(max.print=500)
#Memory limit default
#memory.limit(size=2047)
#Increase memory limit to 3000 mb (3 gb)
#memory.limit(size=3000)
#Limit History so that it never contains more than 50 lines
Sys.setenv(R_HISTSIZE=99999)
#Clear workspace
rm(list=ls(all=TRUE))
#Set working directory
setwd("C:/Users/Brad/Dropbox/Research/3rd-Year_Paper/R") #Home
#setwd("C:/Users/bdaughdr/Dropbox/Research/3rd-Year_Paper/R") #Work
#Create output directory
output_directory <- normalizePath("C:/Users/Brad/Dropbox/Research/3rd-Year_Paper/R/",winslash = "\\", mustWork = NA) #Home
#output_directory <- normalizePath("C:/Users/bdaughdr/Dropbox/Research/3rd-Year_Paper/R/",winslash = "\\", mustWork = NA) #Work
#Data data directory
data_directory <- normalizePath("C:/Users/Brad/Dropbox/Research/3rd-Year_Paper/Data/",winslash = "\\", mustWork = NA) #Home
#data_directory <- normalizePath("C:/Users/bdaughdr/Dropbox/Research/3rd-Year_Paper/Data/",winslash = "\\", mustWork = NA) #Work
#Create function directory
function_directory <- normalizePath("C:/Users/Brad/Dropbox/Research/R/",winslash = "\\", mustWork = NA) #Home
#function_directory <- normalizePath("C:/Users/bdaughdr/Dropbox/Research/R/",winslash = "\\", mustWork = NA) #Work
#Create package directory
#package_directory <- normalizePath("C:/Users/Brad/Documents/R/win-library/2.15/",winslash = "\\", mustWork = NA) #Home
#package_directory <- normalizePath("C:/Users/bdaughdr/Documents/R/win-library/2.15/",winslash = "\\", mustWork = NA) #Work
#Create treetag directory
treetag_directory <- normalizePath("C:/TreeTagger",winslash = "\\", mustWork = NA) #Home
#==============================================================================;
#INPUT START DATE AND END DATE;
#==============================================================================;
#Start_File_Name <- "Final_1999_QTR_1_2"
#End_File_Name <- "Final_2012_QTR_3_4"
#==============================================================================;
#INPUT OBSERVATIONS TO KEEP (MONTHS)
#==============================================================================;
#observations_to_keep <- 84 #CHANGE TO 36 or 60 as needed (or other #)
#==============================================================================;
#LIBRARIES;
cat("SECTION: LIBRARIES", "\n")
#==============================================================================;
#First-time install libraries
#source(file=paste(function_directory,"install_libraries.R",sep=""),echo=FALSE)
#Load add-on packages
library(cwhmisc) #Package for Miscellaneous Functions for maths, plotting, printing, statistics, strings, and tools
#library(dynlm) #Package for Dynamic linear models and time series regression
library(fastmatch) #Package for Fast match() function
library(foreign) #Package for Reading Data from Minitab,S,SAS,SPSS,...
library(formatR) #Package for ..........
library(gdata) #Package for ..........
#library(gmodels) #Package for Various R programming tools for model fitting
#library(googleVis) #Interface between R and the Google Visualisation API
library(gtools) #Package for ..........
#library(gWidgets) #Package for gWidgets API for building toolkit-independent, interactive GUIs
#library(gWidgetstcltk) #Package for Toolkit implementation of gWidgets for tcltk package (or gWidgetsGtk2 or gWidgetsrJava or gWidgetsWWW or gWidgetsQt)
library(Hmisc) #Package for data imputation
library(koRpus) #Package for ..........
#library(lmtest) #Package for testing Linear Regression Models
library(mitools) #Package for imputation of missing data
#library(PERregress) #Package for Regression Functions and Datasets
#library(plm) #Package for creating Linear Models for Panel Data
library(plyr) #Package for splitting, applying and combining data
#library(quantmod) #Package for Quantitative Financial Modelling Framework
library(R.oo) #Package for trim() function
#library(RODBC) #Package for ODBC Database Access in R
#library(rpanel) #Package for Simple Interactive Controls with the tcltk Package.
#library(RSiteSearch) #Package for RSiteSearch
#library(sandwich) #Package for Robust Covariance Matrix Estimators
library(sqldf) #Package for performing SQL selects on R Data Frames
library(stringr) #Package for additional string functions
#library(tcltk) #Package for Tcl/Tk Interface
#library(tseries) #Package for time series analysis & computational finance
library(XML)
#library(zoo) #Package for performing time-series analysis
#==============================================================================;
#FUNCTIONS;
cat("SECTION: FUNCTIONS", "\n")
#==============================================================================;
#External Functions
source(file=paste(function_directory,"functions.R",sep=""),echo=FALSE)
# import_across_row_function <- function(x,data_temp, file_temp,row_NA_first_temp,temp_row_count,temp_headers) {
#
# file_temp <- as.character(file_temp)
#
# cat("COLUMN ",as.numeric(grep(x,sample_data_cols2)),"\n")
# cat("ROW ",row_NA_first_temp," IS FIRST EMPTY ROW", "\n")
# #cat("X ",x, "\n")
# #cat("file_temp ",file_temp, "\n")
# #cat("temp_row_count ",temp_row_count, "\n")
# #cat("temp_headers ",temp_headers, "\n")
# #cat("data_temp col yr ",data_temp[,names(data_temp)==x], "\n")
#
# if (row_NA_first_temp==1)
# {
# #cat("IF", "\n")
#
# xxx_col_num <- as.numeric(match(x,temp_headers))
#
# xxx1 <- as.character(read.csv(file=file_temp,header=TRUE,na.strings="NA",stringsAsFactors=FALSE)[1:temp_row_count,xxx_col_num])
# xxx2 <- as.character(data_temp[(temp_row_count+1):nrow(data_temp),names(data_temp)==x])
# data_temp[,names(data_temp)==x] <- append(xxx1,xxx2)
#
# } else if (row_NA_first_temp>1)
# {
# #cat("ELSE", "\n")
#
# #temp <- data_temp[1:row_NA_first_temp,]
#
#
# } else
# {
# cat("ERROR!!", "\n")
#
# }
#
# return(data_temp)
# }
#Function to merge across rows
merge_cols_function <- function(col_one,col_two,separator){
if (is.na(col_one) & is.na(col_two))
{
#cat("Both NA", "\n")
return(NA)
} else if (!(is.na(col_one)) & is.na(col_two))
{
#cat("Col_two NA", "\n")
return(col_one)
} else if (is.na(col_one) & !(is.na(col_two)))
{
#cat("Col_one NA", "\n")
return(col_two)
} else
{
#cat("Neither NA", "\n")
return(paste(col_one, col_two, sep = separator))
}
#return(temp)
}
#Function to add path to file names
format_function <- function(temp_df,temp_df_col){
for (i in 1:ncol(temp_df))
{
#i <- 1
if (names(temp_df)[i] %in% temp_df_col[temp_df_col[,names(temp_df_col)=="isnum"]==1,names(temp_df_col)=="colnames"])
{
#cat("Is a number", "\n")
temp_df[,i] <- unknownToNA(temp_df[,i], unknown=c("","NA","NA_character_","NA_Real_", NA),force=TRUE)
temp_df[,i] <- as.numeric(temp_df[,i])
} else if (names(temp_df)[i] %in% temp_df_col[temp_df_col[,names(temp_df_col)=="ischar"]==1,names(temp_df_col)=="colnames"])
{
#cat("Is a character", "\n")
temp_df[,i] <- unknownToNA(temp_df[,i], unknown=c("","NA","NA_character_", "NA_Real_", NA),force=TRUE)
temp_df[,i] <- as.character(temp_df[,i])
} else if (names(temp_df)[i] %in% temp_df_col[temp_df_col[,names(temp_df_col)=="isdate"]==1,names(temp_df_col)=="colnames"])
{
temp_df[,i] <- unknownToNA(temp_df[,i], unknown=c("","NA","NA_character_","NA_Real_", NA),force=TRUE)
#if (inherits(temp_df[,i], "Date"))
#{
#cat("Is already date", "\n")
#year <- as.numeric(format(temp_df[,1], "%Y"))
#month <- as.numeric(format(temp_df[,1], "%m"))
#} else
#{
#cat("Is not already date", "\n")
temp_df[,i] <- as.character(temp_df[,i])
temp_df[,i] <- as.Date(temp_df[,i],format = "%m/%d/%Y")
#temp_df[,i] <- as.Date(temp_df[,i],format = "%Y-%m-%d")
#}
} else if (names(temp_df)[i] %in% temp_df_col[temp_df_col[,names(temp_df_col)=="isfactor"]==1,names(temp_df_col)=="colnames"])
{
#cat("Is a factor", "\n")
temp_df[,i] <- unknownToNA(temp_df[,i], unknown=c("","NA","NA_character_", "NA_Real_", NA),force=TRUE)
temp_df[,i] <- as.factor(temp_df[,i])
} else
{
#cat("ERROR!!", "\n")
}
}
return(temp_df)
}
#==============================================================================;
#PREALLOCATE DATA;
cat("SECTION: PREALLOCATE DATA", "\n")
#==============================================================================;
ptm <- proc.time()
#Create base column table
temp_data_cols <- as.data.frame(matrix(NA, ncol = 6, nrow = 200))
colnames(temp_data_cols) <- c("order","isnum","ischar","isdate","isfactor","colnames")
temp_data_cols[,1] <- as.numeric(temp_data_cols[,1])
temp_data_cols[,2] <- as.numeric(temp_data_cols[,2])
temp_data_cols[,3] <- as.numeric(temp_data_cols[,3])
temp_data_cols[,4] <- as.numeric(temp_data_cols[,4])
temp_data_cols[,5] <- as.numeric(temp_data_cols[,5])
temp_data_cols[,6] <- as.character(temp_data_cols[,6])
#Sample data table
sample_data_cols_count <- 44
sample_data <- as.data.frame(matrix(NA, ncol = sample_data_cols_count, nrow = 500000))
#sample_data <- as.data.frame(matrix(NA, ncol = sample_data_cols_count, nrow = 50000))
sample_data_cols <- temp_data_cols[1:sample_data_cols_count,]
sample_data_cols[1,] <- data.frame(order=1,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="yr",stringsAsFactors = FALSE)
sample_data_cols[2,] <- data.frame(order=2,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="cusip8",stringsAsFactors = FALSE)
sample_data_cols[3,] <- data.frame(order=3,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="crsp_fundno",stringsAsFactors = FALSE)
sample_data_cols[4,] <- data.frame(order=4,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="chgdt",stringsAsFactors = FALSE) #DATE
sample_data_cols[5,] <- data.frame(order=5,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="chgenddt",stringsAsFactors = FALSE) #DATE
sample_data_cols[6,] <- data.frame(order=6,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="crsp_portno",stringsAsFactors = FALSE)
sample_data_cols[7,] <- data.frame(order=7,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="crsp_cl_grp",stringsAsFactors = FALSE)
sample_data_cols[8,] <- data.frame(order=8,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="fund_name",stringsAsFactors = FALSE)
sample_data_cols[9,] <- data.frame(order=9,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="nasdaq",stringsAsFactors = FALSE)
sample_data_cols[10,] <- data.frame(order=10,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="ncusip",stringsAsFactors = FALSE)
sample_data_cols[11,] <- data.frame(order=11,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="first_offer_dt",stringsAsFactors = FALSE) #DATE
sample_data_cols[12,] <- data.frame(order=12,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="mgmt_name",stringsAsFactors = FALSE)
sample_data_cols[13,] <- data.frame(order=13,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="mgmt_cd",stringsAsFactors = FALSE)
sample_data_cols[14,] <- data.frame(order=14,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="mgr_name",stringsAsFactors = FALSE)
sample_data_cols[15,] <- data.frame(order=15,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="mgr_dt",stringsAsFactors = FALSE) #DATE
sample_data_cols[16,] <- data.frame(order=16,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="adv_name",stringsAsFactors = FALSE)
sample_data_cols[17,] <- data.frame(order=17,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="open_to_inv",stringsAsFactors = FALSE)
sample_data_cols[18,] <- data.frame(order=18,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="retail_fund",stringsAsFactors = FALSE)
sample_data_cols[19,] <- data.frame(order=19,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="inst_fund",stringsAsFactors = FALSE)
sample_data_cols[20,] <- data.frame(order=20,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="m_fund",stringsAsFactors = FALSE)
sample_data_cols[21,] <- data.frame(order=21,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="index_fund_flag",stringsAsFactors = FALSE)
sample_data_cols[22,] <- data.frame(order=22,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="vau_fund",stringsAsFactors = FALSE)
sample_data_cols[23,] <- data.frame(order=23,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="et_flag",stringsAsFactors = FALSE)
sample_data_cols[24,] <- data.frame(order=24,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="fyear",stringsAsFactors = FALSE)
sample_data_cols[25,] <- data.frame(order=25,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="accession_num",stringsAsFactors = FALSE)
sample_data_cols[26,] <- data.frame(order=26,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="GVKEY",stringsAsFactors = FALSE)
sample_data_cols[27,] <- data.frame(order=27,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="CIK",stringsAsFactors = FALSE)
sample_data_cols[28,] <- data.frame(order=28,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="FDATE",stringsAsFactors = FALSE) #DATE
sample_data_cols[29,] <- data.frame(order=29,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="FINDEXDATE",stringsAsFactors = FALSE) #DATE
sample_data_cols[30,] <- data.frame(order=30,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="LINDEXDATE",stringsAsFactors = FALSE) #DATE
sample_data_cols[31,] <- data.frame(order=31,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="Form",stringsAsFactors = FALSE)
sample_data_cols[32,] <- data.frame(order=32,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="CoName",stringsAsFactors = FALSE)
sample_data_cols[33,] <- data.frame(order=33,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="Fname",stringsAsFactors = FALSE)
sample_data_cols[34,] <- data.frame(order=34,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="PortName",stringsAsFactors = FALSE)
sample_data_cols[35,] <- data.frame(order=35,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="investment_objective",stringsAsFactors = FALSE)
sample_data_cols[36,] <- data.frame(order=36,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="investment_strategy",stringsAsFactors = FALSE)
sample_data_cols[37,] <- data.frame(order=37,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="principal_risks",stringsAsFactors = FALSE)
sample_data_cols[38,] <- data.frame(order=38,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="investment_objective_f",stringsAsFactors = FALSE)
sample_data_cols[39,] <- data.frame(order=39,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="investment_strategy_f",stringsAsFactors = FALSE)
sample_data_cols[40,] <- data.frame(order=40,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="investment_objective_strategy_f",stringsAsFactors = FALSE)
sample_data_cols[41,] <- data.frame(order=41,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="principal_risks_f",stringsAsFactors = FALSE)
sample_data_cols[42,] <- data.frame(order=42,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="Process_IS",stringsAsFactors = FALSE)
sample_data_cols[43,] <- data.frame(order=43,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="Process_R",stringsAsFactors = FALSE)
sample_data_cols[44,] <- data.frame(order=44,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="File",stringsAsFactors = FALSE)
colnames(sample_data) <- sample_data_cols[,6]
#Sample data statistics table
sample_data_statistics_cols_count <- 54
sample_data_statistics <- as.data.frame(matrix(NA, ncol = sample_data_statistics_cols_count, nrow = 500000))
#sample_data_statistics <- as.data.frame(matrix(NA, ncol = sample_data_statistics_cols_count, nrow = 50000))
sample_data_statistics_cols <- temp_data_cols[1:(sample_data_statistics_cols_count/2),]
sample_data_statistics_cols[1,] <- data.frame(order=1,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="lines",stringsAsFactors = FALSE)
sample_data_statistics_cols[2,] <- data.frame(order=2,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="sentences",stringsAsFactors = FALSE)
sample_data_statistics_cols[3,] <- data.frame(order=3,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="words",stringsAsFactors = FALSE)
sample_data_statistics_cols[4,] <- data.frame(order=4,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="all_chars",stringsAsFactors = FALSE)
sample_data_statistics_cols[5,] <- data.frame(order=5,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="chars_no_space",stringsAsFactors = FALSE)
sample_data_statistics_cols[6,] <- data.frame(order=6,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="letters_only",stringsAsFactors = FALSE)
sample_data_statistics_cols[7,] <- data.frame(order=7,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="digits",stringsAsFactors = FALSE)
sample_data_statistics_cols[8,] <- data.frame(order=8,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="punct",stringsAsFactors = FALSE)
sample_data_statistics_cols[9,] <- data.frame(order=9,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="conjunctions",stringsAsFactors = FALSE)
sample_data_statistics_cols[10,] <- data.frame(order=10,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="prepositions",stringsAsFactors = FALSE)
sample_data_statistics_cols[11,] <- data.frame(order=11,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="pronouns",stringsAsFactors = FALSE)
sample_data_statistics_cols[12,] <- data.frame(order=12,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="foreign",stringsAsFactors = FALSE)
sample_data_statistics_cols[13,] <- data.frame(order=13,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="num_syll",stringsAsFactors = FALSE)
sample_data_statistics_cols[14,] <- data.frame(order=14,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="normalized_space",stringsAsFactors = FALSE)
sample_data_statistics_cols[15,] <- data.frame(order=15,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="Flesch_Kincaid",stringsAsFactors = FALSE)
sample_data_statistics_cols[16,] <- data.frame(order=16,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="ARI",stringsAsFactors = FALSE)
sample_data_statistics_cols[17,] <- data.frame(order=17,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="Coleman_Liau",stringsAsFactors = FALSE)
sample_data_statistics_cols[18,] <- data.frame(order=18,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="SMOG",stringsAsFactors = FALSE)
sample_data_statistics_cols[19,] <- data.frame(order=19,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="FOG_hard_words",stringsAsFactors = FALSE)
sample_data_statistics_cols[20,] <- data.frame(order=20,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="TTR",stringsAsFactors = FALSE)
sample_data_statistics_cols[21,] <- data.frame(order=21,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="sntc_per_word",stringsAsFactors = FALSE)
sample_data_statistics_cols[22,] <- data.frame(order=22,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="avg_sentc_length",stringsAsFactors = FALSE)
sample_data_statistics_cols[23,] <- data.frame(order=23,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="avg_word_length",stringsAsFactors = FALSE)
sample_data_statistics_cols[24,] <- data.frame(order=24,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="avg_syll_word",stringsAsFactors = FALSE)
sample_data_statistics_cols[25,] <- data.frame(order=25,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="sntc_per100",stringsAsFactors = FALSE)
sample_data_statistics_cols[26,] <- data.frame(order=26,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="syll_per100",stringsAsFactors = FALSE)
sample_data_statistics_cols[27,] <- data.frame(order=27,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="lett_per100",stringsAsFactors = FALSE)
#Double this data.frame because investment objective/strategy & principal_risks
sample_data_statistics_cols2 <- temp_data_cols[1:sample_data_statistics_cols_count,]
sample_data_statistics_cols2 <- rbind(sample_data_statistics_cols,sample_data_statistics_cols)
sample_data_statistics_cols2[,1] <- rep(1:54, 1)
sample_data_statistics_cols2[1:27,6] <- unlist(mapply(merge_cols_function,col_one=sample_data_statistics_cols2[1:27,6],col_two="_iois",separator="", SIMPLIFY = FALSE,USE.NAMES = FALSE))
sample_data_statistics_cols2[28:54,6] <- unlist(mapply(merge_cols_function,col_one=sample_data_statistics_cols2[28:54,6],col_two="_pr",separator="", SIMPLIFY = FALSE,USE.NAMES = FALSE))
colnames(sample_data_statistics) <- sample_data_statistics_cols2[,6]
#Tokens table
tokens_all_cols_count <- 4
tokens_all <- as.data.frame(matrix(NA, ncol = tokens_all_cols_count, nrow = 10000000))
#tokens_all <- as.data.frame(matrix(NA, ncol = tokens_all_cols_count, nrow = 1000000))
tokens_all_cols <- temp_data_cols[1:tokens_all_cols_count,]
tokens_all_cols[1,] <- data.frame(order=1,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="ID",stringsAsFactors = FALSE)
tokens_all_cols[2,] <- data.frame(order=2,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="token",stringsAsFactors = FALSE)
tokens_all_cols[3,] <- data.frame(order=3,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="desc",stringsAsFactors = FALSE)
tokens_all_cols[4,] <- data.frame(order=4,isnum=1,ischar=0,isdate=0,isfactor=0,colnames="Remove",stringsAsFactors = FALSE)
colnames(tokens_all) <- tokens_all_cols[,6]
#Readability table
readbl_all_df_cols_count <- 5
readbl_all_df <- as.data.frame(matrix(NA, ncol = readbl_all_df_cols_count, nrow = 44))
readbl_all_df_cols <- temp_data_cols[1:readbl_all_df_cols_count,]
readbl_all_df_cols[1,] <- data.frame(order=1,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="index",stringsAsFactors = FALSE)
readbl_all_df_cols[2,] <- data.frame(order=2,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="flavour",stringsAsFactors = FALSE)
readbl_all_df_cols[3,] <- data.frame(order=3,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="raw",stringsAsFactors = FALSE)
readbl_all_df_cols[4,] <- data.frame(order=4,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="grade",stringsAsFactors = FALSE)
readbl_all_df_cols[5,] <- data.frame(order=5,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="age",stringsAsFactors = FALSE)
colnames(readbl_all_df) <- readbl_all_df_cols[,6]
#Files table
files_cols_count <- 2
files <- as.data.frame(matrix(NA, ncol = files_cols_count, nrow = 10))
files_cols <- temp_data_cols[1:files_cols_count,]
files_cols[1,] <- data.frame(order=1,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="filename",stringsAsFactors = FALSE)
files_cols[2,] <- data.frame(order=2,isnum=0,ischar=1,isdate=0,isfactor=0,colnames="filepath",stringsAsFactors = FALSE)
colnames(files) <- files_cols[,6]
#Format data
sample_data <- format_function(sample_data,sample_data_cols)
sample_data_statistics <- format_function(sample_data_statistics,sample_data_statistics_cols)
tokens_all <- format_function(tokens_all,tokens_all_cols)
readbl_all_df <- format_function(readbl_all_df,readbl_all_df_cols)
files <- format_function(files,files_cols)
#==============================================================================;
#IMPORT DATA;
cat("SECTION: IMPORT DATA", "\n")
#==============================================================================;
files[,1] <- c("1999.csv","2000.csv","2001.csv","2002.csv","2003.csv","2004.csv","2005.csv","2006.csv","2007.csv","2008.csv")
files[,2] <- unlist(mapply(merge_cols_function,col_one=data_directory,col_two=files[,1],separator="", SIMPLIFY = FALSE,USE.NAMES = FALSE))
for (j in 1:nrow(files))
{
#j <- 1
#j <- 2
row_NA_index_data <- which(rowSums(is.na(sample_data[,1:ncol(sample_data)]))==ncol(sample_data))
row_NA_first_data <- as.numeric(min(row_NA_index_data))
temp_row_count <- as.numeric(nrow(read.csv(file=files[j,2],header=TRUE,na.strings="NA",stringsAsFactors=FALSE)))
temp_headers <- names(read.csv(file=files[j,2],header=TRUE,na.strings="NA",stringsAsFactors=FALSE))
#sample_data_cols2 <- sample_data_cols[1:2]
#zzz1 <- sapply(sample_data_cols,import_across_row_function,
# data_temp=sample_data, file_temp=files[j,2], row_NA_first_temp=row_NA_first_data,temp_row_count=temp_row_count,temp_headers=temp_headers,
# simplify = FALSE, USE.NAMES = FALSE)
#zzz2 <- ldply(zzz1, data.frame)
#colnames(zzz1) <- sample_data_cols
if (row_NA_first_data==1)
{
temp_sample_data_col_num <- as.numeric(match("File",names(sample_data)))
sample_data[1:temp_row_count,temp_sample_data_col_num] <- files[j,1]
for (k in 1:nrow(sample_data_cols))
{
#k <- 1
temp_csv_col_num <- as.numeric(match(sample_data_cols[k,6],temp_headers))
if(!(is.na(temp_csv_col_num)))
{
#cat(sample_data_cols[k,6]," is in ",files[j,2], "\n")
xxx1 <- as.character(read.csv(file=files[j,2],header=TRUE,na.strings="NA",stringsAsFactors=FALSE)[1:temp_row_count,temp_csv_col_num])
xxx2 <- as.character(sample_data[(temp_row_count+1):nrow(sample_data),names(sample_data)==sample_data_cols[k,6]])
sample_data[,names(sample_data)==sample_data_cols[k,6]] <- append(xxx1,xxx2)
#sample_data[,names(sample_data)==sample_data_cols[k,6]] <- append(as.character(read.csv(file=files[j,2],header=TRUE,na.strings="NA",stringsAsFactors=FALSE)[1:temp_row_count,temp_csv_col_num]),
# as.character(sample_data[(temp_row_count+1):nrow(sample_data),names(sample_data)==sample_data_cols[k]]))
} else
{
#cat(sample_data_cols[k,6]," not in ",files[j,2], "\n")
}
}
} else if (row_NA_first_data>1)
{
temp_sample_data_col_num <- as.numeric(match("File",names(sample_data)))
sample_data[row_NA_first_data:(row_NA_first_data+temp_row_count-1),temp_sample_data_col_num] <- files[j,1]
for (k in 1:nrow(sample_data_cols))
{
#k <- 1
temp_csv_col_num <- as.numeric(match(sample_data_cols[k,6],temp_headers))
if(!(is.na(temp_csv_col_num)))
{
#cat(sample_data_cols[k,6]," is in ",files[j,2], "\n")
yyy1 <- sample_data[1:(row_NA_first_data-1),names(sample_data)==sample_data_cols[k,6]]
yyy2 <- as.character(read.csv(file=files[j,2],header=TRUE,na.strings="NA",stringsAsFactors=FALSE)[1:(temp_row_count),temp_csv_col_num])
yyy3 <- as.character(sample_data[(row_NA_first_data+temp_row_count):nrow(sample_data),names(sample_data)==sample_data_cols[k,6]])
sample_data[,names(sample_data)==sample_data_cols[k,6]] <- append(append(yyy1,yyy2),yyy3)
} else
{
#cat(sample_data_cols[k,6]," not in ",files[j,2], "\n")
}
}
} else
{
cat("ERROR!!", "\n")
}
#==============================================================================;
#CLEAN DATA;
#==============================================================================;
#lapply(1:nrow(sample_data), function(x), Equity_Data_import_split[[x]], envir = .GlobalEnv))
#xxx <- apply(sample_data,1,function(x) html2txt(x[1]))
#x <- paste("i", "s", "n", "&", "a", "p", "o", "s", ";", "t", sep = "")
#xmlValue(getNodeSet(htmlParse(x, asText = TRUE), "//p")[[1]])
#Format data
sample_data <- format_function(sample_data,sample_data_cols)
#==============================================================================;
#OUTPUT DATA;
#==============================================================================;
#Create temp data.frame of Smaple data where not NA after each iteration
assign(paste("sample_data", formatC(j, width=6, format="d", flag="0"), sep = ""), sample_data[!(rowSums(is.na(sample_data[,1:ncol(sample_data)]))==ncol(sample_data)),], envir = .GlobalEnv)
#Create temp data.frame of for each CSV file
temp_output_name <- paste("input_csv", formatC(j, width=6, format="d", flag="0"), sep = "")
#assign(temp_output_name, sample_data[sample_data[,names(sample_data)=="File"]==files[j,1] & !is.na(sample_data[,names(sample_data)=="File"]),], envir = .GlobalEnv)
write.csv(sample_data[sample_data[,names(sample_data)=="File"]==files[j,1] & !is.na(sample_data[,names(sample_data)=="File"]),], file = paste(data_directory,temp_output_name,".csv",sep=""))
#==============================================================================;
#CREATE PROGRESS OUTPUTS;
#==============================================================================;
#Initiate garbage collection
capture.output(gc(),file='NUL')
progress_function(outer_loop_count=j, outer_loop_start_val=1, outer_loop_end_val=nrow(files), inner_loop_count=1, inner_loop_start_val=1, inner_loop_end_val=1)
}
#==============================================================================;
#CLEAN DATA;
#==============================================================================;
#sample_data <- sample_data000010
#Find most recent sample_dataXXXXXX
#sample_data_vector <- ls(pattern = 'sample_data[0-9]_*')
#sample_data_last <- max(sample_data_vector)
#Merge investment_objective_f and investment_strategy_f and place in investment_objective_strategy_f
sample_data[,names(sample_data)=="investment_objective_strategy_f"] <- unlist(mapply(merge_cols_function,col_one=sample_data[,names(sample_data)=="investment_objective_f"],col_two=sample_data[,names(sample_data)=="investment_strategy_f"],separator="\n\n", SIMPLIFY = FALSE,USE.NAMES = FALSE))
#Replace unformatted text with N/A's
sample_data[,names(sample_data)=="investment_objective"] <- rep(NA, nrow(sample_data))
sample_data[,names(sample_data)=="investment_strategy"] <- rep(NA, nrow(sample_data))
sample_data[,names(sample_data)=="principal_risks"] <- rep(NA, nrow(sample_data))
sample_data[,names(sample_data)=="investment_objective_f"] <- rep(NA, nrow(sample_data))
sample_data[,names(sample_data)=="investment_strategy_f"] <- rep(NA, nrow(sample_data))
#Remove all NAs rows left
sample_data2 <- sample_data[!(rowSums(is.na(sample_data[,1:ncol(sample_data)]))==ncol(sample_data)),]
#test <- sample_data[245:260,]
#test_iois <- test[,names(sample_data)=="investment_objective_strategy_f"]
#test_pr <- test[,names(sample_data)=="principal_risks_f"]
#==============================================================================;
#GET STATISTICS;
cat("SECTION: GET STATISTICS", "\n")
#==============================================================================;
for (i in 1:nrow(sample_data))
{
#i <- 1
#i <- 2
str_id <- paste("", formatC(i, width=6, format="d", flag="0"), sep = "")
sample_cell <- as.character(sample_data[i,1])
temp_text <- unlist(strsplit(sample_cell, "\n"))
temp_text_df <- as.data.frame(temp_text)
names(temp_text_df)[1] <- "temp_text"
fileConn<-file(paste(data_directory,"temptext.txt",sep=""))
writeLines(temp_text, fileConn)
close(fileConn)
#==============================================================================;
#kRp.tagged-class;
#==============================================================================;
tagged_text <- treetag("../Data/temptext.txt", treetagger="manual",lang="en", TT.options=c(path="C:/TreeTagger", preset="en"),debug=FALSE)
#tagged_text <- treetag("../Data/temptext.txt", treetagger = "manual", rm.sgml = TRUE,lang = "en", sentc.end = c(".", "!", "?", ";", ":"),encoding = NULL, TT.options = c(path="C:/TreeTagger", preset="en"), debug = FALSE,TT.tknz = TRUE, format = "file")
#tagged_text_tokens <- tagged_text@TT.res
#tagged_text_desc <- tagged_text@desc
#==============================================================================;
#kRp.hyphen-class;
#==============================================================================;
hyph_text_en <- hyphen(tagged_text,quiet=TRUE)
#hyph_text_en_desc <- hyph_text_en@desc
#==============================================================================;
#kRp.readability-class;
#==============================================================================;
readbl_text <- suppressWarnings(readability(tagged_text, hyphen=hyph_text_en, index="all",quiet=TRUE))
#readbl_text_tokens <- readbl_text@TT.res
#readbl_text_desc <- readbl_text@desc
readbl_all_df[1:44,] <- summary(readbl_text)[1:44,]
#==============================================================================;
#FILL IN GLOBAL DICTIONARY;
#==============================================================================;
row_NA_index_dict <- which(rowSums(is.na(tokens_all[,1:ncol(tokens_all)]))==ncol(tokens_all))
row_NA_first_dict <- as.numeric(min(row_NA_index_dict))
if (row_NA_first_dict==1)
{
tokens_all[1:nrow(readbl_text@TT.res),names(tokens_all)=="ID"] <- str_id
tokens_all[,names(tokens_all)=="token"] <- append(as.character(readbl_text@TT.res[1:nrow(readbl_text@TT.res),names(readbl_text@TT.res)=="token"]),
as.character(tokens_all[(nrow(readbl_text@TT.res)+1):nrow(tokens_all),names(tokens_all)=="token"]))
tokens_all[,names(tokens_all)=="desc"] <- append(as.character(readbl_text@TT.res[1:nrow(readbl_text@TT.res),names(readbl_text@TT.res)=="desc"]),
as.character(tokens_all[(nrow(readbl_text@TT.res)+1):nrow(tokens_all),names(tokens_all)=="desc"]))
} else if (row_NA_first_dict>1)
{
tokens_all[row_NA_first_dict:(row_NA_first_dict+nrow(readbl_text@TT.res)-1),names(tokens_all)=="ID"] <- str_id
tokens_all[,names(tokens_all)=="token"] <- append(append(as.character(tokens_all[1:(row_NA_first_dict-1),names(tokens_all)=="token"]),
as.character(readbl_text@TT.res[1:nrow(readbl_text@TT.res),names(readbl_text@TT.res)=="token"])),
as.character(tokens_all[(row_NA_first_dict+nrow(readbl_text@TT.res)):nrow(tokens_all),names(tokens_all)=="token"]))
tokens_all[,names(tokens_all)=="desc"] <- append(append(as.character(tokens_all[1:(row_NA_first_dict-1),names(tokens_all)=="desc"]),
as.character(readbl_text@TT.res[1:nrow(readbl_text@TT.res),names(readbl_text@TT.res)=="desc"])),
as.character(tokens_all[(row_NA_first_dict+nrow(readbl_text@TT.res)):nrow(tokens_all),names(tokens_all)=="desc"]))
} else
{
cat("ERROR!!", "\n")
}
#==============================================================================;
#COMBINE STATISTICS DATA;
#==============================================================================;
sample_data_statistics[i,names(sample_data_statistics)=="lines"] <- as.data.frame(tagged_text@desc$lines)
sample_data_statistics[i,names(sample_data_statistics)=="sentences"] <- as.data.frame(readbl_text@desc$sentences)
sample_data_statistics[i,names(sample_data_statistics)=="words"] <- as.data.frame(readbl_text@desc$words)
sample_data_statistics[i,names(sample_data_statistics)=="all_chars"] <- as.data.frame(readbl_text@desc$all.chars)
sample_data_statistics[i,names(sample_data_statistics)=="chars_no_space"] <- as.data.frame(tagged_text@desc$chars.no.space)
sample_data_statistics[i,names(sample_data_statistics)=="letters_only"] <- as.data.frame(tagged_text@desc$letters.only)
sample_data_statistics[i,names(sample_data_statistics)=="digits"] <- as.data.frame(tagged_text@desc$digits)
sample_data_statistics[i,names(sample_data_statistics)=="punct"] <- as.data.frame(readbl_text@desc$punct)
sample_data_statistics[i,names(sample_data_statistics)=="conjunctions"] <- as.data.frame(readbl_text@desc$conjunctions)
sample_data_statistics[i,names(sample_data_statistics)=="prepositions"] <- as.data.frame(readbl_text@desc$prepositions)
sample_data_statistics[i,names(sample_data_statistics)=="pronouns"] <- as.data.frame(readbl_text@desc$pronouns)
sample_data_statistics[i,names(sample_data_statistics)=="foreign"] <- as.data.frame(readbl_text@desc$foreign)
sample_data_statistics[i,names(sample_data_statistics)=="num_syll"] <- as.data.frame(hyph_text_en@desc$num.syll)
sample_data_statistics[i,names(sample_data_statistics)=="normalized_space"] <- as.data.frame(tagged_text@desc$normalized.space)
sample_data_statistics[i,names(sample_data_statistics)=="Flesch_Kincaid"] <- as.numeric(readbl_all_df[readbl_all_df[,1]=="Flesch-Kincaid" & readbl_all_df[,2]=="", names(readbl_all_df)=="grade"])
sample_data_statistics[i,names(sample_data_statistics)=="ARI"] <- as.numeric(readbl_all_df[readbl_all_df[,1]=="ARI" & readbl_all_df[,2]=="", names(readbl_all_df)=="grade"])
sample_data_statistics[i,names(sample_data_statistics)=="Coleman_Liau"] <- as.numeric(readbl_all_df[readbl_all_df[,1]=="Coleman-Liau" & readbl_all_df[,2]=="", names(readbl_all_df)=="grade"])
sample_data_statistics[i,names(sample_data_statistics)=="SMOG"] <- as.numeric(readbl_all_df[readbl_all_df[,1]=="SMOG" & readbl_all_df[,2]=="", names(readbl_all_df)=="grade"])
sample_data_statistics[i,names(sample_data_statistics)=="FOG_hard_words"] <- as.data.frame(readbl_text@desc$FOG.hard.words)
sample_data_statistics[i,names(sample_data_statistics)=="TTR"] <- as.data.frame(readbl_text@desc$TTR)
sample_data_statistics[i,names(sample_data_statistics)=="sntc_per_word"] <- as.data.frame(readbl_text@desc$sntc.per.word)
sample_data_statistics[i,names(sample_data_statistics)=="avg_sentc_length"] <- as.data.frame(readbl_text@desc$avg.sentc.length)
sample_data_statistics[i,names(sample_data_statistics)=="avg_word_length"] <- as.data.frame(readbl_text@desc$avg.word.length)
sample_data_statistics[i,names(sample_data_statistics)=="avg_syll_word"] <- as.data.frame(readbl_text@desc$avg.syll.word)
sample_data_statistics[i,names(sample_data_statistics)=="sntc_per100"] <- as.data.frame(readbl_text@desc$sntc.per100)
sample_data_statistics[i,names(sample_data_statistics)=="syll_per100"] <- as.data.frame(readbl_text@desc$syll.per100)
sample_data_statistics[i,names(sample_data_statistics)=="lett_per100"] <- as.data.frame(readbl_text@desc$lett.per100)
sample_data_statistics[i,names(sample_data_statistics)=="investment_strategy"] <- as.character(sample_data[i,1])
#==============================================================================;
#CREATE PROGRESS OUTPUTS;
#==============================================================================;
#Initiate garbage collection
capture.output(gc(),file='NUL')
progress_function(outer_loop_count=1, outer_loop_start_val=1, outer_loop_end_val=1, inner_loop_count=i, inner_loop_start_val=1, inner_loop_end_val=nrow(sample_data))
}
#==============================================================================;
#COMBINE INDIVIDUAL TOKEN;
cat("COMBINE INDIVIDUAL TOKEN", "\n")
#==============================================================================;
#Remove all NAs rows left
tokens_all <- tokens_all[!(rowSums(is.na(tokens_all[,1:ncol(tokens_all)]))==ncol(tokens_all)),]
#Find which rows to remove
tokens_all[!(tokens_all$desc %in% c("Cardinal number","Comma","Sentence ending punctuation","Symbol",
"Opening bracket","Closing bracket","Quote","End quote"))
& !(tokens_all$token %in% c("%","&")), "Remove"] <- 0
tokens_all[(tokens_all$desc %in% c("Cardinal number","Comma","Sentence ending punctuation","Symbol",
"Opening bracket","Closing bracket","Quote","End quote"))
|(tokens_all$token %in% c("%","&")), "Remove"] <- 1
query_tokens <- "select distinct ID, Upper(token) token, Count(token) Count
from tokens_all
where Remove=0
group by ID, Upper(token)"
tokens_all2 <- sqldf(query_tokens)
#Create uTotal and gTotal
tokens_all2 <- ddply(tokens_all2, "ID", function(x) data.frame(x, uTotal=nrow(x),gTotal=sum(x$Count)) )
#Output file
write.csv(tokens_all, file = paste(data_directory,"tokens_all.csv",sep=""))
#==============================================================================;
#DONE;
cat("DONE", "\n")
#==============================================================================;
#Remove temporary data.frames
#rm(i,row_NA_index_dict,row_NA_first_dict,str_id,sample_cell, temp_text,temp_text_df,fileConn,tagged_text,hyph_text_en)
#rm(readbl_text,readbl_all_df,sample_data,sample_data_statistics,query_tokens)
proc.time() - ptm
|
6914c1e16b9d55eaf444b17d65b0fa0832fb651a
|
deb8293b706ba213c330d43ee2e547227c5365a2
|
/BSCOV/R/RcppExports.R
|
89115dd07b0db6433de1b2ede3156dbb46bf464b
|
[] |
no_license
|
markov10000/BSCOV
|
c6c9acea1bb091e16435159c781548fa0c5d8ddb
|
87cc5e2c914b0b3528274a9585a2aa1ac9dfae00
|
refs/heads/master
| 2023-08-23T17:54:23.949831
| 2021-10-23T22:14:09
| 2021-10-23T22:14:09
| 277,444,752
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 628
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
func_dc <- function(z) {
.Call('_BSCOV_func_dc', PACKAGE = 'BSCOV', z)
}
func_dc_by <- function(z, dmby, dtby) {
.Call('_BSCOV_func_dc_by', PACKAGE = 'BSCOV', z, dmby, dtby)
}
func_coef <- function(z, scale) {
.Call('_BSCOV_func_coef', PACKAGE = 'BSCOV', z, scale)
}
func_input <- function(coef, sgn) {
.Call('_BSCOV_func_input', PACKAGE = 'BSCOV', coef, sgn)
}
func_input_on <- function(coef) {
.Call('_BSCOV_func_input_on', PACKAGE = 'BSCOV', coef)
}
|
1a62951298c036e295a7b63648263e01b3088054
|
519b45c11d428a9440b62ab89f20c43341a08419
|
/scripts/3_1_atributos_candidatos.R
|
989671c6a01fd8a1c0d8b55287d0768afaed6a13
|
[
"MIT"
] |
permissive
|
elateneoac/ponencia-saap-2021
|
7cc3ce3cfcb6723cec8fba6247f2f0e913992e1e
|
b75535b0412b893cc6cb1c4a8c22da1d742e4a45
|
refs/heads/main
| 2023-08-16T01:02:49.126403
| 2021-10-13T00:13:53
| 2021-10-13T00:13:53
| 412,637,913
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,165
|
r
|
3_1_atributos_candidatos.R
|
library(data.table)
library(jsonlite)
library(stringr)
library(ggplot2)
library(ggpubr)
library(ggthemes)
library(ggExtra)
library(treemap)
# 1. General
paleta = 'Dark2'
path_data = '~/repos/elateneoac/ponencia-saap-2021/data/' # path a la carpeta 'ponencia-unl/data'
oraciones = fread(paste0(path_data,'/oraciones_clasificadas.csv'))
## 3.1. Gabinete
atributos_sustantivos = as.data.table(table(oraciones[,dsustantiva]))[, tipo := 'sustantivo']
atributos_valorativos = as.data.table(table(oraciones[,dvalorativa]))[, tipo := 'valorativo']
atributos_sustantivos[V1 == 'etica', V1 := 'Ética']
atributos_sustantivos[V1 == 'gestion', V1 := 'Gestión']
atributos_sustantivos[V1 == 'ideologia', V1 := 'Ideología']
atributos_sustantivos[V1 == 'personalidad', V1 := 'Personalidad']
atributos_sustantivos[V1 == 'otras', V1 := 'Otras']
atributos_sustantivos[, porcentaje := N * 100 / sum(N)]
atributos_valorativos[V1 == 'positiva', V1 := 'Positiva']
atributos_valorativos[V1 == 'negativa', V1 := 'Negativa']
atributos_valorativos[V1 == 'neutra', V1 := 'Neutral']
atributos_valorativos[, porcentaje := N * 100 / sum(N)]
atributos = rbind(atributos_valorativos, atributos_sustantivos)
ggbarplot(atributos_sustantivos,
x = 'V1',
y = 'porcentaje',
fill = 'V1',
sort.val = 'desc',
sort.by.groups = F,
ggtheme = theme_pander(nomargin = F, lp = 'none'),
palette = paleta,
title = 'Atributos sustantivos de los candidatxs',
xlab = "",
ylab = "Porcentaje",
rotate = F)
# ggsave(filename = '~/Documentos/ponencia-ateneo/dibujos/3_1_gabinete_sustantiva.jpeg', limitsize = F)
ggbarplot(atributos_valorativos,
x = 'V1',
y = 'porcentaje',
fill = 'V1',
sort.val = 'desc',
sort.by.groups = F,
ggtheme = theme_pander(nomargin = F,lp = 'none'),
palette = paleta,
title = 'Atributos valorativos de los candidatxs',
xlab = "",
ylab = "Porcentaje",
rotate = F)
# ggsave(filename = '~/Documentos/ponencia-ateneo/dibujos/3_1_gabinete_valorativa.jpeg', limitsize = F)
|
297aa719c11616f25648792dfd4809f698c60b01
|
1ee90109eb327d979b228174d4c310022439ed0b
|
/app.R
|
59175f4b2447e2639071b08fb0d469e39c961865
|
[] |
no_license
|
jakubsob/plants
|
d66346d7360c44d95fb8c49edb57287b234f3cb9
|
bef449f4d62f23250256d2474ceb8ab719f7b8d3
|
refs/heads/master
| 2023-04-30T16:41:25.054063
| 2021-05-13T15:49:42
| 2021-05-13T15:49:42
| 365,287,241
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,534
|
r
|
app.R
|
library(shiny)
library(shiny.fluent)
library(shiny.react)
library(shiny.router)
library(shinyjs)
library(sass)
library(R6)
library(owmr)
library(purrr)
library(DT)
library(glue)
library(pool)
library(dplyr)
library(dbplyr)
library(leaflet)
library(promises)
library(future)
library(showtext)
library(ggplot2)
library(ggrepel)
library(DBI)
library(RSQLite)
library(stringr)
plan(multicore)
font_add_google("Montserrat", "Montserrat")
showtext_auto()
options(box.path = getwd())
box::use(
modules/data_manager,
modules/header,
modules/sidebar,
modules/search_modal,
modules/home,
modules/map,
modules/info,
modules/download,
modules/upload_modal
)
box::reload(data_manager)
box::reload(header)
box::reload(sidebar)
box::reload(map)
box::reload(info)
box::reload(search_modal)
box::reload(home)
box::reload(download)
box::reload(upload_modal)
router <- make_router(
route("home", home$ui()),
route("info", info$ui("info"), info$server),
route("map", map$ui("map"), map$server)
)
addResourcePath("www", "./www")
sass(sass_file("styles/main.scss"), output = "www/style.css", cache = FALSE)
pool <- dbPool(
drv = RSQLite::SQLite(),
dbname = config::get("db_loc")
)
onStop(function() {
poolClose(pool)
})
ui <- fluentPage(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "www/style.css"),
tags$script(src = "www/script.js"),
tags$link(rel = "shortcut icon", href = "www/favicon.ico")
),
suppressDependencies("bootstrap"),
useShinyjs(),
tags$body(
search_modal$ui("search_modal"),
upload_modal$ui("upload"),
div(
class = "grid-container",
div(class = "header", header$ui()),
div(class = "sidebar", sidebar$ui("sidebar")),
div(class = "main", router$ui)
)
)
)
server <- function(input, output, session) {
session$userData$data_manager <- data_manager$DataManager$new(pool)$reactive()
session$userData$search_is_open <- reactiveVal(FALSE)
session$userData$upload_is_open <- reactiveVal(FALSE)
session$userData$selected <- sidebar$server("sidebar")
router$server(input, output, session, info_id = "info", map_id = "map")
download$server(input, output, session)
upload_modal$server("upload")
ids <- search_modal$server("search_modal")
observeEvent(input$add, {
session$userData$search_is_open(TRUE)
})
observeEvent(input$upload, {
session$userData$upload_is_open(TRUE)
})
observeEvent(ids(), {
session$userData$data_manager()$add(ids())
})
}
shinyApp(ui = ui, server = server)
|
0f58d7a0efd4fad695a0060805aff91b5b85b540
|
9972106e39f5cc87ec7c85a3c890d09a253de6bb
|
/man/mhor.rd
|
fcbb5b5b44ddf2ea010da865b3965cc26e335c82
|
[] |
no_license
|
cran/epiDisplay
|
ed688f06f5f7101d851a28857930dd43748b51ee
|
e90693c88a54fe1ecb7c64638de43c3992375de9
|
refs/heads/master
| 2022-05-28T12:16:44.261031
| 2022-05-18T13:20:02
| 2022-05-18T13:20:02
| 38,376,568
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,209
|
rd
|
mhor.rd
|
\name{mhor}
\alias{mhor}
\title{Mantel-Haenszel odds ratio}
\description{Mantel-Haenszel odds ratio calculation and graphing from a stratified case-control study}
\usage{
mhor(..., mhtable = NULL, decimal=2, graph = TRUE, design = "cohort")
}
\arguments{
\item{...}{Three variables viz. 'outcome', 'exposure' and 'stratification'.}
\item{mhtable}{a 2-by-2-by-s table, where s (strata) is more than one}
\item{decimal}{number of decimal places displayed}
\item{graph}{If TRUE (default), produces an odds ratio plot}
\item{design}{Specification for graph; can be "case control","case-control", "cohort" or "prospective"}
}
\details{
'mhor' computes stratum-specific odds ratios and 95 percent confidence intervals and the Mantel-Haenszel odds ratio and chi-squared test is given as well as the homogeneity test. A stratified odds ratio graph is displayed.
}
\author{Virasakdi Chongsuvivatwong
\email{ cvirasak@gmail.com}
}
\seealso{'fisher.test', 'chisq.test'}
\examples{
data(Oswego)
with(Oswego, cc(ill, chocolate))
with(Oswego, mhor(ill, chocolate, sex))
mht1 <- with(Oswego, table(ill, chocolate, sex))
dim(mht1)
mhor(mhtable=mht1) # same results
}
\keyword{array}
|
4839b2afbad2fb918e4a65c919c414e5669e1387
|
c2d1de906b14c5b1f5574d731bc9fc21fcdeecb0
|
/man/varianceBasedfilter.Rd
|
9f6642bcd69f295ecf62f0b94b61a1c6ab707ba3
|
[] |
no_license
|
cran/DSviaDRM
|
b251e0ff6961dc37f2817da4af7571b351e26ed8
|
b81e818ee47dc09206c6dac9d64027e297b89cff
|
refs/heads/master
| 2021-01-01T06:50:55.109878
| 2015-05-12T00:00:00
| 2015-05-12T00:00:00
| 35,482,433
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,351
|
rd
|
varianceBasedfilter.Rd
|
\name{varianceBasedfilter}
\alias{varianceBasedfilter}
\title{To filter genes according to expression variability}
\description{
Those genes not significantly more variable than the median gene are filtered out.
}
\usage{
varianceBasedfilter(exprs,p)
}
\arguments{
\item{exprs}{a data frame or matrix with rows for variables (genes) and columns for samples.}
\item{p}{the probability cut-off of the chi-squared model of the gene-specific variance-like statistics.}
}
\details{
This is an approximate test of the hypothesis that gene has the same variance as the median variance. A statistical significance criterion
based on the variance can be used. If the significance criterion is chosen, then the variance of the log-values for each gene is compared to
the median of all the variances. The quantity for each gene compared to a percentile of the chi-square distribution with n-1 degrees of freedom.
Those genes not significantly more variable than the median gene are filtered out [BRB-ArrayTools Version 3.7].
}
\value{
A data frame or matrix with a reduced number of rows.
}
\references{
Dr. Richard Simon & Amy Peng Lam, BRB-ArrayTools (v3.7) User's Manual: 'Log expression variation filter'.
}
\author{Jing Yang}
\examples{
data(exprs1)
varianceBasedfilter(exprs1,0.05)
}
\keyword{gene filtering}
|
f4d6251eba669ba4228e0e9f9cc67867da627286
|
814c9360135107b70a1099ee585b2a5010b2a88e
|
/man/selectionTray.Rd
|
45d8842faee88c93cca54901a7284be4baecfbac
|
[] |
no_license
|
wjawaid/bglab
|
bb5f7844cb956cd184847be8396063e4ae8551f1
|
9c04f65ae15887ba64d5b635f97c84f27a087ec7
|
refs/heads/master
| 2021-01-09T06:18:17.675740
| 2017-04-09T21:39:54
| 2017-04-09T21:39:54
| 80,956,007
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 646
|
rd
|
selectionTray.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/heatmap.R
\name{selectionTray}
\alias{selectionTray}
\title{Selection Heatmap}
\usage{
selectionTray(scd, retHeat, pdf = FALSE, data = NULL)
}
\arguments{
\item{scd}{Single Cell Dataset object.}
\item{retHeat}{List returned from heatmap original heatmap.}
\item{pdf}{Will it be output to a pdf}
\item{data}{If the data to heatmap was altered, enter the new data as a matrix.}
}
\value{
Plots heatmap and returns the display.
}
\description{
Displays heatmap to choose area for submap
}
\details{
Displays heatmap to choose area for submap
}
\author{
Wajid Jawaid
}
|
be4b0730fd5feb3be1816c21a603a3ae9fbe0364
|
19969ee90b110cb0e183ae88239be7b277dea257
|
/R several/R_lemon_folder/brkdn.R
|
dc7828e08aff7f06d27c5bd13c2ed11426327b51
|
[] |
no_license
|
atom-box/notes
|
ffcd333dbb151c93d2a77c3e291431855867654d
|
5cd3675d57c8098ab7f702d741b2e34151505e0f
|
refs/heads/master
| 2021-04-06T20:27:55.801962
| 2021-01-09T20:10:47
| 2021-01-09T20:10:47
| 125,290,458
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,902
|
r
|
brkdn.R
|
# brkdn is a function that attempts to calculate and display means,
# variances and valid ns for the variable that appears on the left
# side of the formula.
# It expects the variables on the right side of the formula to be
# factors, or at worst integers with a very small range.
# It returns a list of "dstat" objects (or a list of lists of "dstat"
# objects if there are two breakdown variables, and so on.)
# A "dstat" object is a matrix that looks like:
# vname1 vname2 ...
# Mean mean1 mean2 ...
# Variance var1 var2 ...
# Valid n n1 n2 ...
brkdn<-function(formula,dataframe,maxlevels=10) {
if(!missing(dataframe) && !missing(formula)) {
bn<-as.character(attr(terms(formula),"variables")[-1])
nbn<-length(bn)
cat("\nBreakdown of",bn[1],"by",bn[nbn],"\n")
if(nbn > 2) {
# get the factor for this level
by.factor<-as.factor(dataframe[[bn[nbn]]])
factor.levels<-levels(by.factor)
nlevels<-length(factor.levels)
if(nlevels > maxlevels) {
nlevels<-maxlevels
cat("Too many levels - only using first",maxlevels,"\n")
}
brkstats<-as.list(rep(0,nlevels))
names(brkstats)<-factor.levels
# calculate the mean for this level
for(i in 1:nlevels) {
currentdata<-subset(dataframe,by.factor == factor.levels[i])
cat(paste("\n",bn[1],sep="",collapse=""),"for",bn[nbn],"- level",factor.levels[i],"\n\n")
gstat<-desc.stat(currentdata[bn[1]],na.rm=TRUE)
cat("Mean ",gstat[1],"\n")
cat("Variance ",gstat[2],"\n")
cat("n ",gstat[3],"\n")
next.formula<-as.formula(paste(paste(bn[1],"~"),paste(bn[2:(nbn-1)],collapse="+")))
# and call yourself for the next level down
brkstats[[i]]<-brkdn(next.formula,currentdata)
}
class(brkstats)<-"dstat"
invisible(brkstats)
}
else {
by.factor<-as.factor(dataframe[[bn[2]]])
factor.levels<-levels(by.factor)
nlevels<-length(factor.levels)
if(nlevels > maxlevels) {
nlevels<-maxlevels
cat("Too many levels - only using first",maxlevels,"\n")
}
gstats<-matrix(NA,ncol=nlevels,nrow=3)
colnames(gstats)<-factor.levels[1:nlevels]
rownames(gstats)<-c("Mean","Variance","n")
# calculate the basic descriptive stats
if(is.numeric(dataframe[[bn[1]]])) {
for(i in 1:nlevels) {
currentdata<-subset(dataframe[[bn[1]]],by.factor == factor.levels[i])
if(length(currentdata)) gstats[,i]<-desc.stat(currentdata,na.rm=TRUE)
}
class(gstats)<-"dstat"
print(gstats)
}
invisible(gstats)
}
}
else cat("Usage: brkdn(formula, dataframe, maxlevels=10)\n")
}
# desc.stat calculates the mean, variance and valid n for a numeric vector
# needed by brkdn()
desc.stat<-function(datavec,na.rm=TRUE) {
dstat<-c(0,0,0)
dstat[1]<-mean(datavec,na.rm=na.rm)
dstat[2]<-var(datavec,na.rm=na.rm)
dstat[3]<-sum(!is.na(datavec))
return(dstat)
}
|
46eaa65903996244a3674203b14f1e4c05c58543
|
b60300a6092ae0aea077033b7a57811900a0e970
|
/plot2.R
|
076ca973706c46f4c9ac3df578ccfd43f060e440
|
[] |
no_license
|
iramosp/ExData_Plotting1
|
ebef9a294430db126a91ec4e2360726c76bb87d0
|
703c68e0ac2ff7ce20761f6b1f76cf053b262781
|
refs/heads/master
| 2021-01-09T08:02:52.975416
| 2015-05-10T22:35:15
| 2015-05-10T22:35:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 761
|
r
|
plot2.R
|
# Read data
data <- read.table("household_power_consumption.txt",
nrows = 173000,
sep = ";",
header = TRUE)
s <- split(data, data$Date)
data <- rbind(s[[2]], s[[46]]) # These are the two dates we need
rm(s) # We no longer need s
# Convert Date and Time columns into date-time (POSIXlt) format:
data[, "Date"] <- as.Date(data$Date, format="%d/%m/%Y")
dates <- data[, 1]
times <- data[, 2]
datetimes <- paste(dates, times)
datetimes <- strptime(datetimes, "%Y-%m-%d %H:%M:%S")
# Plot 2
png("plot2.png")
plot(datetimes,
as.numeric(as.character(data$Global_active_power)), # Convert factor into numeric
ylab ="Global Active Power (kilowatts)",
type = "l",
xlab = "")
dev.off()
|
d87c5ad3be073dd4aace26e96f5e0ebafcee0ecf
|
d8859d0a38e3faa79c72471e4e189e027b7cd096
|
/collate_extra_data.R
|
6159a3f7c47e9d990f319903052b6639fb01666d
|
[] |
no_license
|
m20ty/decoupling_emt_stroma
|
306110017aad4213167951e1a6f575cf0f4da31d
|
00ac33849ccb67e4494817671d67a544f0864b5f
|
refs/heads/master
| 2023-03-09T20:58:17.538150
| 2021-01-05T10:26:05
| 2021-01-05T10:26:05
| 319,970,323
| 7
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,148
|
r
|
collate_extra_data.R
|
library(data.table)
library(plyr)
sc_metadata <- list(
breast_qian = quote(
fread('../data_and_figures/qian_breast_2020_reclassified.csv')[
cell_type != 'ambiguous' & id != 'sc5rJUQ064_CCATGTCCATCCCATC',
-c('patient', 'cell_type_author', 'cell_type_lenient')
]
),
crc_lee_smc = quote(
fread('../data_and_figures/lee_crc_2020_smc_reclassified.csv')[
cell_type != 'ambiguous',
-c('patient', 'cell_type_author', 'cell_type_lenient')
]
),
hnscc_puram = quote(
fread('../data_and_figures/puram_hnscc_2017_reclassified.csv')[
cell_type != 'ambiguous',
-c('patient', 'cell_type_author')
]
),
liver_ma = quote(
fread('../data_and_figures/ma_liver_2019_reclassified.csv')[
cell_type != 'ambiguous',
-c('patient', 'cell_type_author', 'cell_type_lenient')
]
),
luad_kim = quote(
fread('../data_and_figures/kim_luad_2020_reclassified.csv')[
cell_type != 'ambiguous',
-c('patient', 'cell_type_author')
]
),
lusc_qian = quote(
fread('../data_and_figures/qian_lung_2020_reclassified.csv')[
disease == 'LUSC' & cell_type != 'ambiguous',
-c('patient', 'disease', 'cell_type_author', 'cell_type_lenient')
]
),
ovarian_qian = quote(
fread('../data_and_figures/qian_ovarian_2020_reclassified.csv')[
cell_type != 'ambiguous' & !(id %in% c('scrSOL001_TCATTTGTCTGTCAAG', 'scrSOL004_TTGCCGTTCTCCTATA')),
-c('patient', 'cell_type_author', 'cell_type_lenient')
]
),
pdac_peng = quote(
fread('../data_and_figures/peng_pdac_2019_reclassified.csv')[
cell_type != 'ambiguous' & !(id %in% c('T8_TGGTTCCTCGCATGGC', 'T17_CGTGTAACAGTACACT')),
-c('patient', 'cell_type_author')
]
)
)
extra_data <- rbindlist(
lapply(
names(sc_metadata),
function(ref) {
cat(paste0(ref, '\n'))
sc_data <- eval(sc_metadata[[ref]])
sc_diff <- sc_data[cell_type == 'cancer', colMeans(.SD), .SDcols = -c('id', 'cell_type')] -
sc_data[cell_type == 'caf', colMeans(.SD), .SDcols = -c('id', 'cell_type')]
cat('\tDone!\n')
data.table(source = ref, gene = names(sc_diff), diff = sc_diff)
}
)
)
fwrite(extra_data, '../data_and_figures/collated_extra_data.csv')
|
68b9245120be4729ccc5a811079921c9bebfba13
|
6b36aa4957c3d0a6a25113a55a2480e37bd7d537
|
/bigmemory.R
|
885f45aedc2d86a4f54cf1c2d59c6f16e83a6c35
|
[] |
no_license
|
TotallyBullshit/Large-R
|
159ceb0e80f09d55a7838a4a0c0a0ab88a917455
|
b4804090df7f061c4a71ae6fa639daaccfe3b350
|
refs/heads/master
| 2021-01-16T20:58:05.256743
| 2014-05-26T15:23:19
| 2014-05-26T15:23:19
| 20,217,827
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,687
|
r
|
bigmemory.R
|
# install.packages('bigmemory')
library(bigmemory)
n0 = 2^15
m0 = 2^15
# Check vector (Norm)
# Ram drive (Jason)
# bigmemory - shared and file based
# library use (David)
# Time difference of 59.30802 secs (MacAir)
# Time difference of 58.14366 secs (Ledger)
t0=Sys.time();
a.matrix = matrix(nrow=2^16,ncol=2^16);
Sys.time()-t0
# Time difference of 3.551736 mins (MacAir)
# Time difference of 1.410767 mins (Ledger)
t0=Sys.time();
a.big.matrix = big.matrix(nrow=2^16,ncol=2^16);
Sys.time()-t0
t0=Sys.time();
a.matrix[n0,m0] = 1
Sys.time()-t0
# Time difference of 4.76923 mins (MacAir)
# Time difference of 1.26127 mins (Ledger)
# Time difference of 41.0511 secs (Ledger)
t0=Sys.time();
a.big.matrix[n0,m0] = 1
Sys.time()-t0
# Time difference of 37.10874 secs (MacAir)
# Time difference of 0.01183 secs (Ledger)
# Time difference of 1.430479 mins (MacAir)
# Time difference of 1.297428 mins (Ledger)
t0=Sys.time();
a.file.matrix =
filebacked.big.matrix(2^16,
2^16,
type='double',
init=NULL,
backingpath='/mnt/ramdisk/',
backingfile="Mat32GB.bin",
descriptorfile="Mat32GB.desc")
Sys.time()-t0
# Time difference of 15.69226 secs
t0=Sys.time();
a.file.matrix =
filebacked.big.matrix(2^15,
2^15,
type='double',
init=1,
backingfile="Mat8GB.bin",
descriptorfile="Mat8GB.desc")
Sys.time()-t0
start.time=Sys.time();
a.file.matrix[n0,m0] = 1
Sys.time()-start.time
# Time difference of 0.005546808 secs (MacAir, Ledger)
rm(a.file.matrix)
a.file.matrix
a.file.matrix = attach.big.matrix("Mat32GB.desc")
a.file.matrix[n0,m0]
# Time difference of 23.82744 secs
# Time difference of 46.74579 secs
# Time difference of 0.01596069 secs
start.time=Sys.time();
a.file.matrix[n0,]=4 # faster
a.file.matrix[,m0]=5 # slower
Sys.time()-start.time
# Time difference of 4.853491 secs
# Time difference of 0.043221 secs
start.time=Sys.time();
a.file.matrix[,m0+0]=1:2^16
a.file.matrix[,m0+1]=runif(2^16)
a.file.matrix[,m0+2]=runif(2^16)
a.file.matrix[,m0+3]=runif(2^16)
Sys.time()-start.time
a.file.matrix[n0,m0] # OK
a.file.matrix[n0,m0+1] # OK
a.file.matrix[n0,m0:(m0+1)] # OK PRECEDENCE!
# Time difference of 4.853491 secs
# Time difference of 0.0122726 secs
start.time=Sys.time();
an.order = morder(x=a.file.matrix,
cols=n0,
decreasing=TRUE)
Sys.time()-start.time
an.order[1:10]
start.time=Sys.time();
mpermute(a.file.matrix, order=an.order)
Sys.time()-start.time
|
d807923313043a03ed0ddd989d5de68be74fd0eb
|
9d178498df8dcdecac074ba9d392f7a922c08f11
|
/man/add_datetime_columns.Rd
|
587c27b3dd9e311398edb297e6d567e718ad2f2d
|
[] |
no_license
|
Keegan-Evans/pitDataR
|
6b5e7d6c7409810e656fb7d049991e62e768eea0
|
0b6880329c2adac538cb9087271d750e9e035109
|
refs/heads/master
| 2020-04-21T11:30:43.989883
| 2019-07-08T20:41:50
| 2019-07-08T20:41:50
| 169,528,070
| 0
| 0
| null | 2019-07-08T20:41:51
| 2019-02-07T06:19:19
|
R
|
UTF-8
|
R
| false
| true
| 676
|
rd
|
add_datetime_columns.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_in.R
\name{add_datetime_columns}
\alias{add_datetime_columns}
\title{add_datetime_columns Adds columns that allow for easier temporal deliniation}
\usage{
add_datetime_columns(detection_dataframe)
}
\arguments{
\item{detection_dataframe}{Data read in using
\code{\link{get_detection_data}} that contains a the column 'detected_at'
with date/time date in it.}
}
\value{
Original dataframe with additional columns containing integer values
for year, month, week, day, and hour.
}
\description{
add_datetime_columns Adds columns that allow for easier temporal deliniation
}
|
09f08afc915ef4ef57e2b24c9194a379237bdd23
|
763e4186462dc996df083d78d99630204d48ecd6
|
/database/db_interface.R
|
ad78a38024cd336d4f0cea19ab812304483c8b4a
|
[] |
no_license
|
ccagrawal/folio
|
0bd8fab15c12a4a955541c2c8073e9eaf35d6f2a
|
34d62fb8bac4b8ae0bc0aa995ff81f6468d242bd
|
refs/heads/master
| 2021-03-12T20:37:22.205724
| 2015-08-22T23:02:06
| 2015-08-22T23:02:06
| 40,704,765
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,476
|
r
|
db_interface.R
|
library(quantmod)
source('./database/db_management.R')
GetStocks <- function(ticker, name, quantity, volume, notes) {
sql <- "SELECT * FROM Stocks"
# Conditions will contain each WHERE clause
conditions <- c()
# Go through each parameter and add a clause if necessary
if (!missing(ticker)) {
conditions <- c(conditions, paste0("Ticker IN ('", paste(ticker, collapse = "\', \'"), "')"))
}
if (!missing(name)) {
conditions <- c(conditions, paste0("Name IN ('", paste(name, collapse = "\', \'"), "')"))
}
if (!missing(quantity)) {
if (quantity == 'positive') {
conditions <- c(conditions, "Quantity > 0")
} else if (quantity == 'negative') {
conditions <- c(conditions, "Quantity < 0")
} else if (quantity == 'nonzero') {
conditions <- c(conditions, "Quantity != 0")
}
}
if (!missing(volume)) {
if (volume == 'positive') {
conditions <- c(conditions, "Volume > 0")
} else {
conditions <- c(conditions, paste0("Volume >= ", volume))
}
}
if (!missing(notes)) {
conditions <- c(conditions, paste0("Notes LIKE \"", notes, "\""))
}
# Create full sql statement by collapsing conditions
if (length(conditions) > 0) {
sql <- paste0(sql, " WHERE ", paste(conditions, collapse = " AND "), ";")
} else {
sql <- paste0(sql, ";")
}
return(RunQuery(sql))
}
UpdateStock <- function(ticker, name, price, quantity, notes, action = 'add') {
sql <- "INSERT OR REPLACE INTO Stocks(ID, Ticker, Quantity, Volume, Name, Price, Notes) "
values <- paste0("VALUES((SELECT ID FROM Stocks WHERE Ticker = \"", ticker, "\"), \"", ticker, "\"")
# If quantity not included, just set it to 0
if (missing(quantity)) {
quantity <- 0
}
# If we're deleting an action, flip the quantity and volume
if (action == 'delete') {
quantity <- -quantity
volume <- -abs(quantity)
} else {
volume <- abs(quantity)
}
values <- paste0(values, ",
COALESCE((SELECT Quantity FROM Stocks WHERE Ticker = \"", ticker, "\"), 0) + ", quantity, ",
COALESCE((SELECT Volume FROM Stocks WHERE Ticker = \"", ticker, "\"), 0) + ", volume)
# Add name, price, and notes if they were supplied
if (!missing(name)) {
values <- paste0(values, ", \"", name, "\"")
} else {
values <- paste0(values, ", (SELECT Name FROM Stocks WHERE Ticker = \"", ticker, "\")")
}
if (!missing(price)) {
values <- paste0(values, ", ", price)
} else {
values <- paste0(values, ", (SELECT Price FROM Stocks WHERE Ticker = \"", ticker, "\")")
}
if (!missing(notes)) {
values <- paste0(values, ", \"", notes, "\"")
} else {
values <- paste0(values, ", (SELECT Notes FROM Stocks WHERE Ticker = \"", ticker, "\")")
}
sql <- paste0(sql, values, ");")
RunQuery(sql)
}
UpdateStockPrices <- function() {
sql <- "SELECT Ticker FROM Stocks;"
tickers <- paste(RunQuery(sql)$Ticker, collapse = ";")
# Get current VWAP if exists, else just get Last Trade Price
results <- getQuote(tickers, what = yahooQF(c("Name", "Last Trade (Price Only)", "Bid Size", "Bid", "Ask", "Ask Size")))
results$VWAP <- (results$Bid * results$`Ask Size` + results$Ask * results$`Bid Size`) / (results$`Ask Size` + results$`Bid Size`)
results$Price <- results$VWAP
results[which(is.na(results$Price)), 'Price'] <- results[which(is.na(results$Price)), 'Last']
# Clean up data frame
results$Price <- round(results$Price, digits = 2)
results$Ticker <- row.names(results)
results <- results[, c('Ticker', 'Name', 'Price')]
# Create temp table; join tables to update price and name; then remove table
WriteTable("Temp", results)
sql <- "UPDATE Stocks
SET Name = (SELECT Name
FROM temp
WHERE Stocks.Ticker = Temp.Ticker),
Price = (SELECT Price
FROM temp
WHERE Stocks.Ticker = Temp.Ticker)
WHERE Ticker IN (SELECT Ticker
FROM Temp);"
RunQuery(sql)
RemoveTable("Temp")
}
GetActionsStock <- function(id, timestamp, ticker, price, quantity, cash.change, purpose, notes) {
sql <- "SELECT Actions_Stock.ID,
Actions_Stock.Timestamp,
Stocks.Ticker,
Actions_Stock.Price,
Actions_Stock.Quantity,
Actions_Stock.Fees,
Actions_Stock.CashChange,
Actions_Stock.Purpose,
Actions_Stock.Notes
FROM Actions_Stock
JOIN Stocks ON Actions_Stock.Stock = Stocks.ID"
# Conditions will contain each WHERE clause
conditions <- c()
# Go through each parameter and add a clause if necessary
if (!missing(id)) {
conditions <- c(conditions, paste0("ID = ", id))
}
if (!missing(timestamp)) {
timestamp <- as.numeric(timestamp)
conditions <- c(conditions, paste0("Timestamp >= ", timestamp[1]))
if (length(timestamp) == 2) {
conditions <- c(conditions, paste0("Timestamp <= ", timestamp[2]))
}
}
if (!missing(ticker)) {
conditions <- c(conditions, paste0("Ticker IN (", paste(ticker, collapse = ", "), ")"))
}
if (!missing(price)) {
conditions <- c(conditions, paste0("Price >= ", price[1]))
if (length(price) == 2) {
conditions <- c(conditions, paste0("Price <= ", price[2]))
}
}
if (!missing(quantity)) {
if (quantity == 'positive') {
conditions <- c(conditions, "Quantity > 0")
} else if (quantity == 'negative') {
conditions <- c(conditions, "Quantity < 0")
} else if (quantity == 'nonzero') {
conditions <- c(conditions, "Quantity != 0")
}
}
if (!missing(cash.change)) {
conditions <- c(conditions, paste0("CashChange >= ", cash.change[1]))
if (length(cash.change) == 2) {
conditions <- c(conditions, paste0("CashChange <= ", cash.change[2]))
}
}
if (!missing(purpose)) {
conditions <- c(conditions, paste0("Purpose LIKE \"", purpose, "\""))
}
if (!missing(notes)) {
conditions <- c(conditions, paste0("Notes LIKE \"", notes, "\""))
}
# Create full sql statement by collapsing conditions
if (length(conditions) > 0) {
sql <- paste0(sql, " WHERE ", paste(conditions, collapse = " AND "), ";")
} else {
sql <- paste0(sql, ";")
}
return(RunQuery(sql))
}
DeleteActionsStock <- function(id) {
action.info <- GetActionsStock(id)
# Undo change to funds
UpdateFunds(-action.info[1, 'CashChange'])
# Undo change to stock position and volume
UpdateStock(ticker = action.info[1, 'Ticker'],
quantity = action.info[1, 'Quantity'],
action = 'delete')
sql <- paste0("DELETE FROM Actions_Stock WHERE ID = ", id, ";")
RunQuery(sql)
return(action.info)
}
UpdateActionsStock <- function(id, timestamp, ticker, price, quantity, fees, cash.change, purpose, notes) {
sql <- "INSERT OR REPLACE INTO Actions_Stock("
values <- paste0("VALUES(")
if (!missing(id)) {
action.info <- DeleteActionsStock(id)
sql <- paste0(sql, "ID, ")
values <- paste0(values, id, ", ")
}
# Make timestamp an integer if it isn't
timestamp <- as.numeric(timestamp)
# If cash.change not supplied, just calculate it
if (missing(cash.change)) {
cash.change <- -1 * price * quantity - fees
}
# Update Stock table
UpdateStock(ticker = ticker, quantity = quantity)
# Get Stock ID
stock.sql <- paste0("SELECT ID FROM Stocks WHERE Ticker = \"", ticker, "\"")
stock.id <- RunQuery(stock.sql)[1, 1]
sql <- paste0(sql, "Timestamp, Stock, Price, Quantity, Fees, CashChange")
values <- paste0(values,
timestamp, ", ",
stock.id, ", ",
price, ", ",
quantity, ", ",
fees, ", ",
cash.change)
# Add purpose and notes if they were supplied
if (!missing(purpose)) {
sql <- paste0(sql, ", Purpose")
values <- paste0(values, ", \"", purpose, "\"")
} else if (!missing(id) & !is.na(action.info[1, 'Purpose'])) {
sql <- paste0(sql, ", Purpose")
values <- paste0(values, ", \"", action.info[1, 'Purpose'], "\"")
}
if (!missing(notes)) {
sql <- paste0(sql, ", Notes")
values <- paste0(values, ", \"", notes, "\"")
} else if (!missing(id) & !is.na(action.info[1, 'Notes'])) {
sql <- paste0(sql, ", Notes")
values <- paste0(values, ", \"", action.info[1, 'Notes'], "\"")
}
sql <- paste0(sql, ") ", values, ");")
RunQuery(sql)
}
GetFunds <- function() {
sql <- "SELECT * FROM Funds"
return(RunQuery(sql)[1, 'Quantity'])
}
UpdateFunds <- function(quantity) {
sql <- paste0(
"UPDATE Funds
SET Quantity = Quantity + ", quantity, "
WHERE ID = 1"
)
RunQuery(sql)
}
# Outdated
add.action.stock.option <- function(timestamp, underlying, type, expiration, strike, price, quantity, fees, cash.change, purpose, notes) {
# Make option name
name <- paste0(
underlying,
format(expiration, '%y%m%d'),
substr(type, 0, 1),
gsub("\\.", "", sprintf("%09.03f", 120))
)
# Make timestamp an integer if it isn't
timestamp <- as.numeric(timestamp)
# If cash.change not supplied, just calculate it
if (missing(cash.change)) {
cash.change <- -1 * price * quantity - fees
}
conn <- dbConnect(drv = SQLite(), dbname = db.name)
# Check if underlying exists in underlying table
sql <- paste0(
"SELECT * FROM Stocks
WHERE Ticker = \"", underlying, "\";"
)
underlying.info <- dbGetQuery(conn, sql)
# If underlying doesn't exist, add it
if (nrow(underlying.info) == 0) {
sql <- paste0(
"INSERT INTO Stocks(Ticker, Quantity, Volume)
VALUES(\"", ticker, "\", 0, 0);"
)
dbGetQuery(conn, sql)
sql <- "SELECT last_insert_rowid();"
underlying.id <- dbGetQuery(conn, sql)[1, 1]
} else {
underlying.id <- underlying.info[1, 'ID']
}
# Check if stock option exists in stock options table
sql <- paste0(
"SELECT * FROM StockOptions
WHERE Name = \"", name, "\";"
)
asset.info <- dbGetQuery(conn, sql)
# If stock option doesn't exist, add it; else, update stock option's quantity and volume
if (nrow(asset.info) == 0) {
sql <- paste0(
"INSERT INTO StockOptions(Name, Underlying, Type, Expiration, Strike, Quantity, Volume)
VALUES(
\"", name, "\", ",
underlying.id, ", ",
"\"", type, "\", ",
expiration, ", ",
strike, ", ",
quantity, ", ",
quantity, ", ",
");"
)
dbGetQuery(conn, sql)
sql <- "SELECT last_insert_rowid();"
asset.id <- dbGetQuery(conn, sql)[1, 1]
} else {
asset.id <- asset.info[1, 'ID']
sql <- paste0(
"UPDATE StockOptions
SET Quantity = Quantity + ", quantity, ",
Volume = Volume + ", abs(quantity), "
WHERE Name = \"", name, "\";"
)
dbGetQuery(conn, sql)
}
# Add transaction into actions table
sql <- paste0(
"INSERT INTO Actions_StockOption(Timestamp, Option, Price, Quantity, Fees, CashChange)
VALUES(",
timestamp, ", ",
asset.id, ", ",
price, ", ",
quantity, ", ",
fees, ", ",
cash.change,
");"
)
dbGetQuery(conn, sql)
sql <- "SELECT last_insert_rowid();"
action.id <- dbGetQuery(conn, sql)[1, 1]
# Add purpose and notes if they exist
if (!missing(purpose)) {
sql <- paste0(
"UPDATE Actions_StockOption
SET Purpose = \"", purpose, "\",
WHERE ID = ", action.id, ";"
)
dbGetQuery(conn, sql)
}
if (!missing(notes)) {
sql <- paste0(
"UPDATE Actions_StockOption
SET Notes = \"", notes, "\",
WHERE ID = ", action.id, ";"
)
dbGetQuery(conn, sql)
}
# Update funds
sql <- paste0(
"UPDATE Funds
SET Quantity = Quantity + ", cash.change, "
WHERE ID = 1;"
)
dbGetQuery(conn, sql)
dbDisconnect(conn)
}
add.action.fund <- function(timestamp, method, cash.change, notes) {
# Make timestamp an integer if it isn't
timestamp <- as.numeric(timestamp)
conn <- dbConnect(drv = SQLite(), dbname = db.name)
# Add transaction into actions table
sql <- paste0(
"INSERT INTO Actions_Fund(Timestamp, Method, CashChange)
VALUES(",
timestamp, ", ",
"\"", method, "\", ",
cash.change,
");"
)
dbGetQuery(conn, sql)
sql <- "SELECT last_insert_rowid();"
action.id <- dbGetQuery(conn, sql)[1, 1]
# Add notes if they are there
if (!missing(notes)) {
sql <- paste0(
"UPDATE Actions_Fund
SET Notes = \"", notes, "\",
WHERE ID = ", action.id, ";"
)
dbGetQuery(conn, sql)
}
# Update funds
sql <- paste0(
"UPDATE Funds
SET Quantity = Quantity + ", cash.change, "
WHERE ID = 1;"
)
dbGetQuery(conn, sql)
dbDisconnect(conn)
}
|
007d171c75298f07e2d80e7605d72cad3db813fd
|
a3c78700a65f10714471a0d307ab984e8a71644d
|
/modules/assim.sequential/inst/sda_backup/bmorrison/extract_lai_agb_data.R
|
8b6b7754ea2f8b5382588fb71382f31e96cc5229
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
PecanProject/pecan
|
e42a8a6a0fc9c0bb624e0743ab891f6cf131ed3f
|
ce327b92bf14498fa32fcf4ef500a7a5db5c9c6c
|
refs/heads/develop
| 2023-08-31T23:30:32.388665
| 2023-08-28T13:53:32
| 2023-08-28T13:53:32
| 6,857,384
| 187
| 217
|
NOASSERTION
| 2023-09-14T01:40:24
| 2012-11-25T23:48:26
|
R
|
UTF-8
|
R
| false
| false
| 11,364
|
r
|
extract_lai_agb_data.R
|
rm(list=ls(all=TRUE)) # clear workspace
graphics.off() # close any open graphics
closeAllConnections() # close any open connections to files
#--------------------------------------------------------------------------------------------------#
#---------------- Load required libraries ---------------------------------------------------------#
library(PEcAn.all)
library(PEcAn.SIPNET)
library(PEcAn.LINKAGES)
library(PEcAn.visualization)
library(PEcAnAssimSequential)
library(nimble)
library(lubridate)
library(PEcAn.visualization)
#PEcAnAssimSequential::
library(rgdal) # need to put in assim.sequential
library(ncdf4) # need to put in assim.sequential
library(purrr)
library(listviewer)
library(dplyr)
library(furrr)
library(tictoc)
work_dir <- "/data/bmorrison/sda/500_site_run"
# delete an old run
#unlink(c('run','out','SDA'),recursive = T)
# grab multi-site XML file
settings <- read.settings("pecan_MultiSite_SDA_LAI_AGB_sitegroup.xml")
if ("sitegroup" %in% names(settings)){
if (is.null(settings$sitegroup$nSite)){
settings <- PEcAn.settings::createSitegroupMultiSettings(settings,
sitegroupId = settings$sitegroup$id)
} else {
settings <- PEcAn.settings::createSitegroupMultiSettings(settings,
sitegroupId = settings$sitegroup$id,
nSite = settings$sitegroup$nSite)
}
settings$sitegroup <- NULL ## zero out so don't expand a second time if re-reading
}
# doesn't work for one site
observation <- c()
for (i in seq_along(1:length(settings$run))) {
command <- paste0("settings$run$settings.",i,"$site$id")
obs <- eval(parse(text=command))
observation <- c(observation,obs)
}
# what is this step for???? is this to get the site locations for the map??
if ("MultiSettings" %in% class(settings)) site.ids <- settings %>%
map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character()
# sample from parameters used for both sensitivity analysis and Ens
# get.parameter.samples(settings,
# ens.sample.method = settings$ensemble$samplingspace$parameters$method)
# ## Aside: if method were set to unscented, would take minimal changes to do UnKF
# #--------------------------------------------------------------------------------------------------#
############################ EXTRACT SITE INFORMATION FROM XML TO DOWNLOAD DATA + RUN SDA ###########################
################ Not working on interactive job on MODEX
observations = observation
lai_data = data.frame()
for (i in 1:16)
{
start = (1+((i-1)*10))
end = start+9
obs = observations[start:end]
working = print(paste("working on: ", i))
sites = print(obs)
PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****")
bety <- list(user='bety', password='bety', host='localhost',
dbname='bety', driver='PostgreSQL',write=TRUE)
con <- PEcAn.DB::db.open(bety)
bety$con <- con
site_ID <- obs
suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon,
ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})",
ids = site_ID, .con = con))
suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry))
suppressWarnings(qry_results <- DBI::dbFetch(qry_results))
site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat,
lon=qry_results$lon, time_zone=qry_results$time_zone)
lai = call_MODIS(outdir = NULL, var = "LAI", site_info = site_info, product_dates = c("1980001", "2018365"),
run_parallel = TRUE, ncores = 10, product = "MOD15A2H", band = "LaiStdDev_500m",
package_method = "MODISTools", QC_filter = TRUE, progress = FALSE)
lai_data = rbind(lai_data, lai)
lai_sd = lai_data
save(lai_sd, file = paste('/data/bmorrison/sda/500_site_run/lai_sd_sites_', i, '.Rdata', sep = ""))
}
observation = observations
PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****")
bety <- list(user='bety', password='bety', host='localhost',
dbname='bety', driver='PostgreSQL',write=TRUE)
con <- PEcAn.DB::db.open(bety)
bety$con <- con
site_ID <- observation
suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon,
ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})",
ids = site_ID, .con = con))
suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry))
suppressWarnings(qry_results <- DBI::dbFetch(qry_results))
site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat,
lon=qry_results$lon, time_zone=qry_results$time_zone)
# # output folder for the data
data_dir <- "/data2/RS_GIS_Data/LandTrendr/LandTrendr_AGB_data"
# # extract the data
med_agb_data <- extract.LandTrendr.AGB(site_info, "median", buffer = NULL, fun = "mean",
data_dir, product_dates=NULL, file.path(work_dir,"Obs"))[[1]]
sdev_agb_data <- extract.LandTrendr.AGB(site_info, "stdv", buffer = NULL, fun = "mean",
data_dir, product_dates=NULL, file.path(work_dir,"Obs"))[[1]]
#
# ndates = colnames(med_agb_data)[-c(1:2)]
#
med_agb_data$Site_Name = as.character(med_agb_data$Site_Name, stringsAsFactors = FALSE)
med_agb_data = reshape2::melt(med_agb_data, id.vars = "Site_ID", measure.vars = colnames(med_agb_data)[-c(1:2)])
sdev_agb_data$Site_Name = as.character(sdev_agb_data$Site_Name, stringsAsFactors = FALSE)
sdev_agb_data = reshape2::melt(sdev_agb_data, id.vars = "Site_ID", measure.vars = colnames(sdev_agb_data)[-c(1:2)])
agb_data = as.data.frame(cbind(med_agb_data, sdev_agb_data$value))
names(agb_data) = c("Site_ID", "Date", "Median", "SD")
agb_data$Date = as.character(agb_data$Date, stringsAsFactors = FALSE)
save AGB data into long style
save(agb_data, file = '/data/bmorrison/sda/500_site_run/agb_data_sites.Rdata')
######### calculate peak_lai
# already in long format style for dataframe
names(lai_sd) = c("modis_date", "calendar_date", "band", "tile", "site_id", "lat", "lon", "pixels", "sd", "qc")
output = cbind(lai_data, lai_sd$sd)
names(output) = c(names(lai_data), "sd")
#output = as.data.frame(data)
save(output, file = '/data/bmorrison/sda/lai/50_site_run/all_lai_data.Rdata')
# change tile names to the site name
h
# remove extra data
output = output[,c(5, 2, 9, 11)]
colnames(output) = names(agb_data)
# compute peak lai per year
data = output
peak_lai = data.frame()
years = unique(year(as.Date(data$Date, "%Y-%m-%d")))
for (i in seq_along(years))
{
d = data[grep(data$Date, pattern = years[i]),]
sites = unique(d$Site_ID)
for (j in seq_along(sites))
{
index = which(d$Site_ID == site_info$site_id[j]) #which(round(d$lat, digits = 3) == round(site_info$lat[j], digits = 3) & round(d$lon, digits = 3) == round(site_info$lon[j], digits = 3))
site = d[index,]
if (length(index) > 0)
{
# peak lai is the max value that is the value <95th quantile to remove potential outlier values
max = site[which(site$Median == max(site$Median[which(site$Median <= quantile(site$Median, probs = 0.95))], na.rm = T))[1],] #which(d$Median == max(d$Median[index], na.rm = T))[1]
peak = data.frame(max$Site_ID, Date = paste("Year", years[i], sep = "_"), Median = max$Median, SD = max$SD)
peak_lai = rbind(peak_lai, peak)
}
}
}
# a fix for low SD values because of an issue with MODIS LAI error calculations. Reference: VISKARI et al 2014.
peak_lai$SD[peak_lai$SD < 0.66] = 0.66
#output data
names(peak_lai) = c("Site_ID", "Date", "Median", "SD")
save(peak_lai, file = '/data/bmorrison/sda/lai/50_site_run/peak_lai_data.Rdata')
# ######################### TIME TO FIX UP THE OBSERVED DATASETS INTO A FORMAT THAT WORKS TO MAKE OBS.MEAN and OBS.COV FOR SDA ########################
peak_lai$Site_ID = as.numeric(as.character(peak_lai$Site_ID, stringsAsFactors = F))
peak_lai$Date = as.character(peak_lai$Date, stringsAsFactors = F)
observed_vars = c("AbvGrndWood", "LAI")
# merge agb and lai dataframes and places NA values where data is missing between the 2 datasets
observed_data = merge(agb_data, peak_lai, by = c("Site_ID", "Date"), all = T)
names(observed_data) = c("Site_ID", "Date", "med_agb", "sdev_agb", "med_lai", "sdev_lai")
# order by year
observed_data = observed_data[order(observed_data$Date),]
#sort by date
dates = sort(unique(observed_data$Date))
# create the obs.mean list --> this needs to be adjusted to work with load.data in the future (via hackathon)
obs.mean = data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, med_agb = observed_data$med_agb, med_lai = observed_data$med_lai)
obs.mean$date = as.character(obs.mean$date, stringsAsFactors = FALSE)
obs.mean = obs.mean %>%
split(.$date)
# change the dates to be middle of the year
date.obs <- strsplit(names(obs.mean), "_") %>%
map_chr(~.x[2]) %>% paste0(.,"/07/15")
obs.mean = names(obs.mean) %>%
map(function(namesl){
obs.mean[[namesl]] %>%
split(.$site_id) %>%
map(~.x[3:4] %>% setNames(c("AbvGrndWood", "LAI")) %>% `row.names<-`(NULL))
#setNames(site.ids)
}) %>% setNames(date.obs)
#remove NA data as this will crash the SDA. Removes rown numbers (may not be nessesary)
names = date.obs
for (name in names)
{
for (site in names(obs.mean[[name]]))
{
na_index = which(!(is.na(obs.mean[[ name]][[site]])))
colnames = names(obs.mean[[name]][[site]])
if (length(na_index) > 0)
{
obs.mean[[name]][[site]] = obs.mean[[name]][[site]][na_index]
}
}
}
# fillers are 0's for the covariance matrix. This will need to change for differing size matrixes when more variables are added in.
# filler_0 = as.data.frame(matrix(0, ncol = length(observed_vars), nrow = nrow(observed_data)))
# names(filler_0) = paste0("h", seq_len(length(observed_vars)))
# create obs.cov dataframe -->list by date
obs.cov = data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, sdev_agb = observed_data$sdev_agb, sdev_lai = observed_data$sdev_lai)#, filler_0)
obs.cov$date = as.character(obs.cov$date, stringsAsFactors = F)
obs.cov = obs.cov %>%
split(.$date)
obs.cov = names(obs.cov) %>%
map(function(namesl){
obs.cov[[namesl]] %>%
split(.$site_id) %>%
map(~.x[3:4]^2 %>% unlist %>% diag(nrow = 2, ncol = 2) )
}) %>% setNames(date.obs)
names = date.obs
for (name in names)
{
for (site in names(obs.cov[[name]]))
{
bad = which(apply(obs.cov[[name]][[site]], 2, function(x) any(is.na(x))) == TRUE)
if (length(bad) > 0)
{
obs.cov[[name]][[site]] = obs.cov[[name]][[site]][,-bad]
if (is.null(dim(obs.cov[[name]][[site]])))
{
obs.cov[[name]][[site]] = obs.cov[[name]][[site]][-bad]
} else {
obs.cov[[name]][[site]] = obs.cov[[name]][[site]][-bad,]
}
}
}
}
save(obs.mean, file = '/data/bmorrison/sda/lai/50_site_run/obs_mean_50.Rdata')
save(obs.cov, file = '/data/bmorrison/sda/lai/50_site_run/obs_cov_50.Rdata')
|
ed74b3d55fec6163419986fcbe9f59b2cf24cf88
|
f75602bd6cce18930a1e04a50208df19140c4d98
|
/man/langcog.Rd
|
fb79e3723bbe7dbe7306dd2f7b416d1ad94c736c
|
[] |
no_license
|
langcog/langcog-package
|
951bde3325ca24a73e5217717f2baac83e911d85
|
947511e68759386d3a55d43f0721326abca123d3
|
refs/heads/master
| 2021-07-01T08:31:28.454157
| 2017-09-20T20:41:27
| 2017-09-20T20:41:27
| 37,636,200
| 6
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 568
|
rd
|
langcog.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/langcog.R
\docType{package}
\name{langcog}
\alias{langcog}
\alias{langcog-package}
\title{langcog: Language and Cognition Lab Things}
\description{
The langcog package provides several different components:
\describe{
\item{solarized_palette}{Colour schemes based on the solarized accent colours.}
\item{clm}{Linear models with coefficient constraints.}
\item{multiboot}{Non-parametric bootstrap with multiple sample statistics.}
\item{util}{Miscellaneous utility functions.}
}
}
|
e32d8b0582168c29c44f931855984be079d76662
|
1efc1f24f77e19192e4dc25e621d5bd8dc08b372
|
/R/versionr.R
|
51cc5524ed2a4adc645e6e8ffaa51326490c6731
|
[] |
no_license
|
zsigmas/versionr
|
8f57def4e615534513630c35d4690ff1faf7e5c0
|
c044d483d99ebcc5662f7a5402477ff955c68b92
|
refs/heads/master
| 2023-02-07T11:35:18.974742
| 2020-11-19T19:09:08
| 2020-11-19T19:09:08
| 303,978,982
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,968
|
r
|
versionr.R
|
update_version <- function(path='.', print_output=F) {
con <- file('DESCRIPTION')
on.exit(close(con))
desc_content <- readLines(con)
version_idx <- which(startsWith(desc_content, 'Version:'))
date_idx <- which(startsWith(desc_content, 'Date:'))
branch_idx <- which(startsWith(desc_content, 'Branch:'))
commit_idx <- which(startsWith(desc_content, 'ParentCommit:'))
desc_content[version_idx] <- paste('Version:', clean_version_number(get_describe_head()))
if(length(date_idx)==0){date_idx=length(desc_content)+1}
desc_content[date_idx] <- paste('Date:', date())
if(length(branch_idx)==0){branch_idx=length(desc_content)+1}
desc_content[branch_idx] <- paste('Branch:', get_branch_name())
if(length(commit_idx)==0){commit_idx=length(desc_content)+1}
desc_content[commit_idx] <- paste('ParentCommit:', get_parent_commit_id())
writeLines(desc_content, con = con)
if(print_output){return(desc_content)}
}
current_version <- function(path='.') {
con <- file('DESCRIPTION')#
on.exit(close(con))
desc_content <- readLines(con)
version_idx <- which(startsWith(desc_content, 'Version:'))
current_version <- unlist(strsplit(desc_content[version_idx], split=': ', fixed = T))[2]
return(current_version)
}
current_branch <- function(path='.') {
con <- file('DESCRIPTION')#
on.exit(close(con))
desc_content <- readLines(con)
version_idx <- which(startsWith(desc_content, 'Branch:'))
current_version <- unlist(strsplit(desc_content[version_idx], split=': ', fixed = T))[2]
return(current_version)
}
current_date <- function(path='.') {
con <- file('DESCRIPTION')#
on.exit(close(con))
desc_content <- readLines(con)
version_idx <- which(startsWith(desc_content, 'Date:'))
current_version <- unlist(strsplit(desc_content[version_idx], split=': ', fixed = T))[2]
return(current_version)
}
get_describe_head <- function() {
if(!dir.exists('.git')){stop("Can't find .git directory in specified path")}
system2(command="git", args="describe HEAD --tags | rev | sed 's/g-/./' | sed 's/-/+/' | rev", stdout=T)
}
get_branch_name <- function() {
if(!dir.exists('.git')){stop("Can't find .git directory in specified path")}
system2(command="git", args="rev-parse --abbrev-ref HEAD", stdout=T)
}
get_parent_commit_id <- function(){
if(!dir.exists('.git')){stop("Can't find .git directory in specified path")}
system2(command="git", args="rev-parse HEAD", stdout=T)
}
clean_version_number <- function(ver_num) {
spl_ver_num <- unlist(strsplit(ver_num, split='+', fixed = T))
ver_num <- spl_ver_num[1]
commit_num <- unlist(strsplit(spl_ver_num[2], split='.', fixed = T))[1]
commit_num <- ifelse(!is.na(commit_num),paste0('-', commit_num), '')
return(paste0(ver_num, commit_num))
}
init <- function(path='.', .force_update_copy=F) {
origin_pre_commit_file <- system.file('pre-commit', package='versionr', mustWork = T)
origin_update_version_file <- system.file('update-version.R', package='versionr', mustWork = T)
target_pre_commit_file <- file.path(path, '.git', 'hooks', 'pre-commit')
target_update_version_file <- file.path(path, '.git', 'hooks', 'update-version.R')
if(file.exists(target_pre_commit_file)){
cat(paste0('File pre-commit already exists, paste the block below in the pre-commit file\n\n',
paste0(readLines(origin_pre_commit_file), collapse='\n'),
'\n')
)
}else{
print(paste('Copying Origin:', origin_pre_commit_file, '->', 'Target:', target_pre_commit_file))
file.copy(origin_pre_commit_file, target_pre_commit_file)
}
if(!file.exists(target_update_version_file) | .force_update_copy){
print(paste('Copying Origin:', origin_update_version_file, '->', 'Target:', target_update_version_file))
file.copy(origin_update_version_file, target_update_version_file, overwrite = T)
}else{
warning('Update version file is already present, use .force_update_copy if you want it copied')
}
}
|
89c1d1bcc5e365ce4823a4746cbe0d317770c0f4
|
efb67b529095add05d77312f981305690655b45a
|
/ggplot2/Layers/Geoms/geom_violin/example7.R
|
2b5591ab0de79768796f783a213c0463ea3615db
|
[] |
no_license
|
plotly/ssim_baselines
|
6d705b8346604004ae16efdf94e425a2989b2401
|
9d7bec64fc286fb69c76d8be5dc0899f6070773b
|
refs/heads/main
| 2023-08-14T23:31:06.802931
| 2021-09-17T07:19:01
| 2021-09-17T07:19:01
| 396,965,062
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 77
|
r
|
example7.R
|
p <- ggplot(mtcars, aes(factor(cyl), mpg))
p <- p + geom_violin(adjust = .5)
|
7528a9c7400f8bbae8ba44f01284cf759c052ee7
|
0e92c0b362b230341f9cc31207df8139dbc3ac18
|
/R/dropLayer.R
|
f4a0451c028d8a108b1df9cea2396810712ef17b
|
[] |
no_license
|
cran/raster
|
b08740e15a19ad3af5e0ec128d656853e3f4d3c6
|
dec20262815cf92b3124e8973aeb9ccf1a1a2fda
|
refs/heads/master
| 2023-07-09T20:03:45.126382
| 2023-07-04T10:40:02
| 2023-07-04T10:40:02
| 17,699,044
| 29
| 35
| null | 2015-12-05T19:06:17
| 2014-03-13T06:02:19
|
R
|
UTF-8
|
R
| false
| false
| 1,084
|
r
|
dropLayer.R
|
# Author: Robert J. Hijmans
# Date : June 2008
# Version 0.9
# Licence GPL v3
if (!isGeneric("dropLayer")) {
setGeneric("dropLayer", function(x, i, ...)
standardGeneric("dropLayer"))
}
...nameToIndex <- function(name, allnames) {
# this is the same as match, I think
k = NULL
for (i in 1:length(name)) {
k = c(k, which(allnames == name[i])[1])
}
return(k)
}
setMethod('dropLayer', signature(x='RasterStack'),
function(x, i, ...) {
if (is.character(i)) {
i = match(i, names(x))
}
i <- sort(unique(round(i)))
i <- i[i > 0 & i <= nlayers(x)]
if (length(i) > 0) {
x@layers <- x@layers[-i]
}
return(x)
}
)
setMethod('dropLayer', signature(x='RasterBrick'),
function(x, i, ...) {
if (is.character(i)) {
i <- match(i, names(x))
}
i <- sort(unique(round(i)))
nl <- nlayers(x)
i <- i[i > 0 & i <= nl]
if (length(i) < 1) {
return(x)
} else {
sel <- which(! 1:nl %in% i )
if (length(sel) == 0) {
return(brick(x, values=FALSE))
} else {
return(subset(x, sel, ...))
}
}
}
)
|
55d584fec584d7d5eb672b3791186fd7313b1543
|
b8ca52003cd634c3e2d5ff77fd01d0811efd15e5
|
/Plot2.R
|
94157bddc30a3d7f607cadd6a8c6022f1e8060ac
|
[] |
no_license
|
gholler/ExData_Plotting1
|
925d4ada4086db425477466c20c4d97a810f7071
|
2e9b754598bf9c80faed26975bde6d51a567a419
|
refs/heads/master
| 2021-01-24T07:18:12.482525
| 2017-06-04T20:21:53
| 2017-06-04T20:21:53
| 93,338,881
| 0
| 0
| null | 2017-06-04T20:14:50
| 2017-06-04T20:14:50
| null |
UTF-8
|
R
| false
| false
| 460
|
r
|
Plot2.R
|
#load common functions
source("common_functions.R")
# load the dataset
if (!exists("hpc")) {
hpc <- read_data()
}
#construct the plot2.png plot
# this is a line plot that shows
# evolution of Global Active Power with time
do_plot(hpc, "Plot2.png",
function(hpc) {
with(
hpc,
plot(Time, Global_active_power, type="l",
ylab = "Global Active Power (kilowatts)", xlab = NA)
)
}
)
|
48979f0cf40b5453ebc8c956cc183dd038124b12
|
04d0a997364ad1bab775fb920edfe5b60cf6d740
|
/man/SpearmanRho.Rd
|
175b3766f98a49794fa7853c7d9a0ba6cacfbca6
|
[] |
no_license
|
mainwaringb/DescTools
|
a2dd23ca1f727e8bbfc0e069ba46f44567e4be24
|
004f80118d463c3cb8fc2c6b3e934534049e8619
|
refs/heads/master
| 2020-12-22T15:12:41.335523
| 2020-03-21T17:30:52
| 2020-03-21T17:30:52
| 236,836,652
| 0
| 0
| null | 2020-01-28T20:40:03
| 2020-01-28T20:40:02
| null |
UTF-8
|
R
| false
| false
| 2,505
|
rd
|
SpearmanRho.Rd
|
\name{SpearmanRho}
\alias{SpearmanRho}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Spearman Rank Correlation
%% ~~function to do ... ~~
}
\description{Calculate Spearman correlation coefficient and it's confidence interval.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
SpearmanRho(x, y = NULL, use = c("everything", "all.obs", "complete.obs",
"na.or.complete","pairwise.complete.obs"),
conf.level = NA)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{a numeric vector, an ordered factor, matrix or data frame. An ordered factor will be coerced to numeric.
%% ~~Describe \code{x} here~~
}
\item{y}{\code{NULL} (default) or a vector, an ordered factor, matrix or data frame with compatible dimensions to x.
An ordered factor will be coerced to numeric.
%% ~~Describe \code{y} here~~
}
\item{use}{
an optional character string giving a method for computing covariances in the presence of missing values.
This must be (an abbreviation of) one of the strings \code{"everything"}, \code{"all.obs"}, \code{"complete.obs"},
\code{"na.or.complete"}, or \code{"pairwise.complete.obs"}.
}
\item{conf.level}{confidence level of the interval. If set to \code{NA} (which is the default) no confidence interval will be calculated.
%% ~~Describe \code{conf.level} here~~
}
}
\details{The function calculates Spearman's rho statistic by means of \code{cor(..., method="spearman")}.
The confidence intervals are calculated via z-Transformation.\cr
%% ~~ If necessary, more details than the description above ~~
}
\value{
Either a single numeric value, if no confidence interval is required, \cr
or a vector with 3 elements for estimate, lower and upper confidence intervall.
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
Conover W. J. (1999) \emph{Practical Nonparametric Statistics (3rd edition)}. Wiley
%% ~put references to the literature/web site here ~
}
\author{Andri Signorell <andri@signorell.net>
%% ~~who you are~~
}
\seealso{\code{\link{cor}}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
SpearmanRho(d.diamonds$clarity, d.diamonds$cut)
SpearmanRho(d.diamonds$clarity, d.diamonds$cut, conf.level = 0.95)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ multivar }
|
4f9c0698909247742996f2b5a179ef1f25a30f04
|
4a7718b5618d75bdcfb3fb71324569d0d11ac749
|
/R/C_log_exp.R
|
76bf960782047b2afce2a3b77829afaeb53af1de
|
[] |
no_license
|
EwaMarek/FindReference
|
859676f1744ea2333714634fd420d6b91b367956
|
eedc8c80809b6f3e4439999bac4cb09ec2b228f2
|
refs/heads/master
| 2018-08-01T08:39:05.326467
| 2018-06-02T17:05:06
| 2018-06-02T17:05:06
| 104,148,930
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 627
|
r
|
C_log_exp.R
|
# na wejście z-scores oraz info o tym, która kontrola zostaął użyta do którego eksperymentu
# na wyjście z-score jedynie kontroli
C_log_exp = function(data_z, C_for_IR){
if(class(data_z) == 'matrix'){
wh_samples = which(C_for_IR[1,] %in% colnames(data_z))
C_exp = data_z[, C_for_IR[2, wh_samples]]
}else if(class(data_z) == 'list'){
C_exp = rep(list(list()), length(data_z))
for (j in 1:length(data_z)) {
wh_samples = which(C_for_IR[[j]][1,] %in% colnames(data_z[[j]]))
C_exp[[j]] = data_z[[j]][, C_for_IR[[j]][2, wh_samples]]
}
}
return(C_exp)
}
|
5a4196e50ea695c1148b2cf12bf554d14458e6a8
|
8ff5342585cdebd98e697be6982c582aa3d92fe8
|
/list_to_matrix_to_list_function.v2.R
|
06a9866bfd998e9e52cab4ca2b34168d57356b5e
|
[] |
no_license
|
ialves19/SingleCellProject
|
739ae65105c79f3e231f9603aa4ad38866497660
|
8fe40377d20ef05263db00de0e4323063635efeb
|
refs/heads/master
| 2022-12-21T07:02:32.478826
| 2020-09-14T09:06:37
| 2020-09-14T09:06:37
| 275,119,767
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,552
|
r
|
list_to_matrix_to_list_function.v2.R
|
#creating a list with HQ haplotypes
creatingHQhapList <- function(l_counts_order) { #it takes subMatrix and subOrder and return a matrix with all haplotypes
#l_counts_order <- listHQ[[2]]
for (col in 1:nrow(l_counts_order[[1]])) {
vectorList <- list()
focalPosFirst <- as.character(l_counts_order[[1]][col,2])
focalPosSec <- as.character(l_counts_order[[1]][col,3])
linkSupported <- l_counts_order[[2]][col,][1:2]
# vectorHap_one <- rep(NA, length(names_subM))
# vectorHap_two <- rep(NA, length(names_subM))
if (sum(is.element(1, linkSupported) & is.element(4, linkSupported))) {
vectorList[[col]] <- rbind(c(0,0),c(1,1))
} else if (sum(is.element(2, linkSupported) & is.element(3, linkSupported))) {
vectorList[[col]] <- rbind(c(0,1),c(1,0))
}
colnames(vectorList[[col]]) <- c(focalPosFirst,focalPosSec)
if (col == 1) {
newHap <- vectorList[[col]]
} else {
newHap <- merge(newHap, vectorList[[col]], by = focalPosFirst)
}
}
newHap <- newHap[,order(as.numeric(colnames(newHap)), decreasing = F)]
namesNewHap <- colnames(newHap)
newHap <- as.numeric(newHap[which(newHap[,1] == 0),])
names(newHap) <- namesNewHap
#colnames(m_hap_tmp) <- names_subM
return(newHap)
}
##-------------
#-------
creatingHapListFromLinkCountMatrix <- function(haplotypeMatrix) {
## converting from matrix to list and removing all the NAs
list_hap_site_tmp <- list()
for (pos in 1:ncol(haplotypeMatrix)) { #pass from hap matrix to hap list
if (length(which(haplotypeMatrix[,pos] == 0)) == 1) { #in case there is only one haplotype covering a site
v.new <- haplotypeMatrix[which(haplotypeMatrix[,pos] == 0),]
v.new_one <- v.new[which(!is.na(v.new))]
v.new <- haplotypeMatrix[which(haplotypeMatrix[,pos] == 1),]
v.new_two <- v.new[which(!is.na(v.new))]
list_hap_site_tmp[[pos]] <- rbind(v.new_one,v.new_two)
} else { #in case there is >1 haplotype covering a site
v.new <- apply(haplotypeMatrix[which(haplotypeMatrix[,pos] == 0),],2,mean, na.rm=T)
v.new_one <- v.new[which(!is.na(v.new))]
v.new <- apply(haplotypeMatrix[which(haplotypeMatrix[,pos] == 1),],2,mean, na.rm=T)
v.new_two <- v.new[which(!is.na(v.new))]
list_hap_site_tmp[[pos]] <- rbind(v.new_one,v.new_two)
}
}
return(list_hap_site_tmp)
}
##---------------
#------------
#creating a matrix with all overlapping haps
creatingHapMatrixFromLinkCountMatrix <- function(linkCountM_subset, orderLinkCountM_subset) { #it takes subMatrix and subOrder and return a matrix with all haplotypes
nrow(linkCountM_subset)
names_subM <- sort(as.numeric(union(linkCountM_subset[,2],linkCountM_subset[,3])))
for (col in 1:nrow(linkCountM_subset)) {
focalPosFirst <- linkCountM_subset[col,2]
focalPosSec <- linkCountM_subset[col,3]
linkSupported <- orderLinkCountM_subset[col,][1:2]
vectorHap_one <- rep(NA, length(names_subM))
vectorHap_two <- rep(NA, length(names_subM))
if (sum(is.element(1, linkSupported) & is.element(4, linkSupported))) {
vectorHap_one[c(which(names_subM == focalPosFirst),which(names_subM == focalPosSec))] <- c(0,0)
vectorHap_two[c(which(names_subM == focalPosFirst),which(names_subM == focalPosSec))] <- c(1,1)
} else {
vectorHap_one[c(which(names_subM == focalPosFirst),which(names_subM == focalPosSec))] <- c(0,1)
vectorHap_two[c(which(names_subM == focalPosFirst),which(names_subM == focalPosSec))] <- c(1,0)
}
if (col == 1) {
m_hap_tmp <- rbind(vectorHap_one,vectorHap_two)
} else {
m_hap_tmp <- rbind(m_hap_tmp,rbind(vectorHap_one,vectorHap_two))
}
}
colnames(m_hap_tmp) <- names_subM
return(m_hap_tmp)
}
##------------------
#------------
convertingIntoMatrix <- function(l, l_names) { #l = hapList; l_names=namesHapList #changed last time Feb 23, 2018
tmp_matrixSortedHap <- matrix(rep(NA, length(l_names)*length(l)), ncol=length(l_names))
colnames(tmp_matrixSortedHap) <- l_names
i <- 1
countHap <- 1
while (i <= nrow(tmp_matrixSortedHap)) {
tmp_matrixSortedHap[i, match(names(l[[countHap]]), colnames(tmp_matrixSortedHap))] <- as.numeric(unlist(l[[countHap]]))
i <- i+1
countHap <- countHap+1
}
return(tmp_matrixSortedHap)
}
##--------------
#------------
|
105206e1ecbfc8954a2f5d94e081c917032aabe6
|
51250726e0ce12a81f75572be193d0b6742554cf
|
/tests/testthat/test-exists.R
|
75e51dc5a911ecd25f99de21956532ab7707e60c
|
[] |
no_license
|
dpastoor/pacman
|
6ead1b9913e7d2a6b018fc2e6390fd2d86ff4673
|
3b4c2c7f47f2d7faf7563f7b76a92953da47f884
|
refs/heads/master
| 2021-01-23T22:01:25.517006
| 2014-11-04T00:00:09
| 2014-11-04T00:00:09
| 26,233,734
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 638
|
r
|
test-exists.R
|
context("Checking p_exists")
test_that("p_exists works for local packages",{
expect_true(p_exists(pacman, local = TRUE))
expect_false(p_exists(thisisnotarealpackage235asdkjlakl3lkj, local = TRUE))
})
test_that("p_exists works for packages on CRAN",{
expect_true(p_exists(ggplot2, local = FALSE))
expect_false(p_exists(thisisnotarealpackage235asdkjlakl3lkj, local = FALSE))
})
test_that("p_isinstalled works", {
expect_true(p_isinstalled(base))
expect_true(p_isinstalled("base"))
expect_false(p_isinstalled(thisdoesntexistsasdfasdf23s))
expect_false(p_isinstalled("thisdoestasdflkjasflkjlkj2d"))
})
|
ecf107d9eff6070c0af023f2ef5f25316c4f51be
|
d21fe3466e1c9e25d5bc1a599400fcfc9edbac2d
|
/R/decimalLength.R
|
e717af60ce675e9ac42ab84dc3a6f8dd2f96c4f3
|
[] |
no_license
|
ctloftin/NBADraft
|
8a98dafd353d7dad821f547b545bb521bd6ceaaa
|
8ea0b8442f994709b0c6b12a9db5b6aee7748602
|
refs/heads/master
| 2021-03-27T18:53:56.049422
| 2018-03-10T02:37:05
| 2018-03-10T02:37:05
| 61,965,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 356
|
r
|
decimalLength.R
|
#' @export
decimalLength <- function(length) {
length <- stri_trim(length)
length <- gsub("\\\"", "", length)
# length <- length[which(length != "NA")]
feet <- as.numeric(unlist(lapply(strsplit(length, "-"), head, 1)))
inches <- round(as.numeric(unlist(lapply(strsplit(length, "-"), tail, 1)))/12, 2)
length <- feet + inches
return(length)
}
|
cffe9f2597bbe23e5afadac30a95176370534dbd
|
46b2a0410724f8414ee7ae4fb97b5fb4b1d8ed9d
|
/mask.R
|
9e5a0b0096c48a944b09012df2d5b7489a9e9fed
|
[] |
no_license
|
dsidavis/RFundamentals
|
07c7dccdcce58ef0054eb83f571e3e130b06d4c6
|
a7a426d18a8a9461b5791e31c02f8a886a3a7feb
|
refs/heads/master
| 2020-06-28T00:52:49.992751
| 2020-03-07T23:26:00
| 2020-03-07T23:26:00
| 97,078,032
| 10
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,016
|
r
|
mask.R
|
par(mar = rep(0, 4))
png("vec.png", 500, 200, bg = "transparent")
plot(0, type = "n", axes = FALSE, xlab = "", ylab = "", xlim = c(0, 1), ylim = c(0, 1))
x = seq(0, 1, length = 10)
rect(x[- length(x)], .25, x[-1], .75)
text((x[-length(x)] + x[-1])/2, .5, 1:9)
dev.off()
png("logicalMask.png", 500, 200, bg = "transparent")
plot(0, type = "n", axes = FALSE, xlab = "", ylab = "", xlim = c(0, 1), ylim = c(0, 1))
x = seq(0, 1, length = 10)
i = c(FALSE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE)
rect(x[- length(x)], .25, x[-1], .75, col = c("#FFFFFFFF", "#D3D3D3FF")[i+1])
text((x[-length(x)] + x[-1])/2, .85, i)
dev.off()
png("logicalSubset.png", 500, 200, bg = "transparent")
plot(0, type = "n", axes = FALSE, xlab = "", ylab = "", xlim = c(0, 1), ylim = c(0, 1))
x = seq(0, 1, length = 10)
i = c(FALSE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE)
rect(x[- length(x)], .25, x[-1], .75, col = c("#FFFFFFFF", "#D3D3D3FF")[i+1])
text(((x[-length(x)] + x[-1])/2)[i], .15, (1:9)[i])
dev.off()
|
e9e9adf13ee1da640e27642c376aa81f1fe60582
|
f855ffb8b52577950381b78bcfa31d87b39dc6b3
|
/docs/R/cellDensity.R
|
2248c4043604eb7538e5c4b91324204ee06b3c7d
|
[] |
no_license
|
PhillipMogensen/SmB-I-2020
|
0d4cd559b46981d3187be8c02f72228258d0360d
|
2c3a0bd7048e66541b23d637296d0bf495fb28ba
|
refs/heads/master
| 2023-02-14T17:52:28.790584
| 2021-01-02T15:25:15
| 2021-01-02T15:25:15
| 318,582,116
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 614
|
r
|
cellDensity.R
|
# -------------------------------------------------------------
# Applied Statistics / Statistical methods in the Biosciences
# Day 2
# Example on density of nerve cells
# Bo Markussen
# November 22, 2018
# -------------------------------------------------------------
# Hard code data into two vectors
mid <- c(50.6,39.2,35.2,17.0,11.2,14.2,24.2,37.4,35.2)
mes <- c(38.0,18.6,23.2,19.0, 6.6,16.4,14.4,37.6,24.2)
# Make t-test
qqnorm(mid-mes)
shapiro.test(mid-mes)
t.test(mid,mes,paired=TRUE)
# Make sign test
binom.test(sum(mid>mes),length(mid))
# Make Wilcoxon rank sum test
wilcox.test(mid,mes,paired=TRUE)
|
b992cc909b6b21114ff69a6a7069e10c4dbcf8dd
|
863dc57a25d91d5206ac652176d6e16f82e847df
|
/Analysis/RScripts/rq14-cases-for-manual- analysis.R
|
d3e08dd2063a559eed4cbc637955c31420afc949
|
[
"Apache-2.0"
] |
permissive
|
STAMP-project/Botsing-model-seeding-application
|
17ce2907e62398621655e6d0f1fd366c6dcb1e33
|
3be2df5627d544aef76f9cc5c900ab5a0bd34b15
|
refs/heads/master
| 2020-08-20T17:43:25.905477
| 2020-02-13T12:43:37
| 2020-02-13T12:43:37
| 216,042,346
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 744
|
r
|
rq14-cases-for-manual- analysis.R
|
# R script
#
# author: Pouria Derakhshanfar
library(ggplot2)
library(dplyr)
source('dataclean.r')
source('graphs.r')
source('tables.r')
Configs<- c("test s. 0.2", "test s. 0.5", "test s. 0.8", "test s. 1.0")
noseeding <- getNoSeedingResults()
testseeding <- getTestSeedingResults()
# Restrict to frames that are in both test and no seeding
results <- testseeding %>%
bind_rows(noseeding %>%
filter(case_frame %in% testseeding$case_frame))
# Search initialization
writeDifferingCrashesInStarting(results,configs)
# Crash reproduction
reproduction <- getReproduceStatus(results)
writeDifferingCrashes(reproduction,configs,"reproduced")
# ff evals
ffEvals <- getFFEvals(results)
writeffEvalsInterestingCases(ffEvals,Configs)
|
534d4cd1816fe9f7f8e22d502200c9aa08653b07
|
789dd3039ae8c7a1b29582e563c66f2f3b573e9b
|
/ML/stocks.r
|
42746838f7e0bacd66e76ba2e4a4d6c3c885bd99
|
[] |
no_license
|
Aurametrix/R
|
44ecb2969e0eb39120176692761304adae7a3539
|
affb2b2e06b94ff8a1c8d552aa3b996b0158911f
|
refs/heads/master
| 2023-01-31T22:28:15.893079
| 2023-01-27T01:17:57
| 2023-01-27T01:17:57
| 16,440,534
| 4
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,282
|
r
|
stocks.r
|
##Install and load required packages
##install.packages("quantmod")
library(quantmod)
library(reshape2)
##Merge the NASDAQ and NYSE csv files. Files found at http://www.nasdaq.com/screening/companies-by-name.aspx?letter=0&exchange=nasdaq&render=download
##and http://www.nasdaq.com/screening/companies-by-name.aspx?letter=0&exchange=nyse&render=download
##Bind new column to these datasets to include stock type of NASDAQ or NYSE
NASDAQ <- read.csv("NASDAQ.csv")
NASDAQ <- cbind(NASDAQ, "Stock.Exchange"="NASDAQ")
NYSE <- read.csv("NYSE.csv")
NYSE <- cbind(NYSE, "Stock.Exchange"="NYSE")
Stocks_df <- data.frame(rbind(NASDAQ, NYSE))
##write.csv(Stocks_df, "AllStocks.csv", row.names=FALSE)
##Create function to get stock prices/info using a function and bind industry/sector info
stockprices <- function(symbol){
TICKERS.DF <- data.frame(getSymbols(symbol, from=Sys.Date()-657, to=Sys.Date(), env=NULL))
TICKERS.DF <- cbind(TICKERS.DF, "Date"=attr(TICKERS.DF , "row.names"))
TICKERS.DF <- cbind(TICKERS.DF, "PctChange"=round(((TICKERS.DF[,4]-TICKERS.DF[,1])/TICKERS.DF[,1]*100), digits=4))
IndustryInfo <- Stocks_df[Stocks_df$Symbol==symbol,]
TICKERS.DF <- cbind(TICKERS.DF, "Sector"=IndustryInfo$Sector)
TICKERS.DF <- cbind(TICKERS.DF, "Industry"=IndustryInfo$industry)
TICKERS.DF <- cbind(TICKERS.DF, "Stock.Exchange"=IndustryInfo$Stock.Exchange)
TICKERS.DF$Date <- as.Date(TICKERS.DF$Date)
TICKERS.DF
}
##Create function to get stock ratios, make sure to load 'reshape2' package
stockratios <- function(symbol=""){
finances <- getFinancials(symbol, env=NULL)
BalanceSheet <- data.frame(viewFinancials(finances, type='BS', period="Q"))
BalanceSheet <- cbind(BalanceSheet, "Variables"=attr(BalanceSheet, "row.names"))
BalanceSheet <- cbind(BalanceSheet, "Statement"="Balance Sheet")
BalanceSheet <- melt(BalanceSheet)
IncomeStatement <- data.frame(viewFinancials(finances, type='IS', period="Q"))
IncomeStatement <- cbind(IncomeStatement, "Variables"=attr(IncomeStatement, "row.names"))
IncomeStatement <- cbind(IncomeStatement, "Statement"="Income Statement")
IncomeStatement <- melt(IncomeStatement)
finances <- rbind(BalanceSheet, IncomeStatement)
finances$variable <- sub("X", "", finances$variable)
finances$variable <- gsub("\\.", "-", finances$variable)
colnames(finances)[3] <- "Date"
NetIncome <- finances[finances$Variables=="Net Income",]
TotalAssets <- finances[finances$Variables=="Total Assets",]
TotalEquity <- finances[finances$Variables=="Total Equity",]
TotalRevenue <- finances[finances$Variables=="Total Revenue",]
CurrentAssets <- finances[finances$Variables=="Total Current Assets",]
CurrentLiabilities <- finances[finances$Variables=="Total Current Liabilities",]
Inventory <- finances[finances$Variables=="Total Inventory",]
TotalLiabilities <- finances[finances$Variables=="Total Liabilities",]
EBIT <- finances[finances$Variables=="Operating Income",]
IBIT <- finances[finances$Variables=="Income Before Tax",]
TotalInventory <- finances[finances$Variables=="Total Inventory",]
#Profitability Ratios
ReturnOnAssets <- data.frame("ReturnOnAssets"=round(NetIncome$value/TotalAssets$value*100, digits=4))
ReturnOnEquity <- data.frame("ReturnOnEquity"=round(NetIncome$value/TotalEquity$value*100, digits=4))
ProfitMargin <- data.frame("ProfitMargin"=round(NetIncome$value/TotalRevenue$value*100, digits=4))
#Liquidity Ratios
CurrentRatio <- data.frame("CurrentRatio"=round(CurrentAssets$value/CurrentLiabilities$value, digits=4))
QuickRatio <- data.frame("QuickRatio"=round((CurrentAssets$value-Inventory$value)/CurrentLiabilities$value, digits=4))
#Debt Ratios, subtract liabilities from assets to get shareholders equity, Interest Expense is Operating Income(EBIT) minus Income before tax(IBIT)
DebtToEquityRatio <- data.frame("DebtToEquityRatio"=round(TotalLiabilities$value/(TotalAssets$value-TotalLiabilities$value), digits=4))
InterestCoverageRatio <- data.frame("InterestCoverageRatio"=round(EBIT$value/(EBIT$value-IBIT$value), digits=4))
#Efficiency Ratios, sales(revenue) divided by Inventory to calculate the Inventory Turnover Ratio
AssetTurnoverRatio <- data.frame("AssetTurnoverRatio"=round(TotalRevenue$value/TotalAssets$value, digits=4))
InventoryTurnoverRatio <- data.frame("InventoryTurnoverRatio"=round(TotalRevenue$value/TotalInventory$value, digits=4))
#Final ratios and cash flow dataframe
Ratios_df <- cbind("Date"=NetIncome$Date, ReturnOnAssets)
Ratios_df <- cbind(Ratios_df, ReturnOnEquity)
Ratios_df <- cbind(Ratios_df, ProfitMargin)
Ratios_df <- cbind(Ratios_df, CurrentRatio)
Ratios_df <- cbind(Ratios_df, QuickRatio)
Ratios_df <- cbind(Ratios_df, DebtToEquityRatio)
Ratios_df <- cbind(Ratios_df, InterestCoverageRatio)
Ratios_df <- cbind(Ratios_df, AssetTurnoverRatio)
Ratios_df <- cbind(Ratios_df, InventoryTurnoverRatio)
Ratios_df$Date <- as.Date(Ratios_df$Date)
Ratios_df
}
##Create the final function to merge both stock prices and financial ratio data
PredictStock <- function(symbol=""){
ratios <- stockratios(symbol)
stockprice <- stockprices(symbol)
stockprice <- stockprice[stockprice$Date>=min(stockratios(symbol)$Date),]
stockprice <- stockprice[order(stockprice$Date, decreasing=TRUE),]
for (i in 1:nrow(stockprice)){
if (i==1){
if (stockprice$Date[i]>=ratios$Date[1]){
stock_df <- ratios[1,2:10]
}
}
else if (stockprice$Date[i]>=ratios$Date[1]){
stock_df <- rbind(stock_df, ratios[1,2:10])
}
else if (stockprice$Date[i]>=ratios$Date[2]){
stock_df <- rbind(stock_df, ratios[2,2:10])
}
else if (stockprice$Date[i]>=ratios$Date[3]){
stock_df <- rbind(stock_df, ratios[3,2:10])
}
else if (stockprice$Date[i]>=ratios$Date[4]){
stock_df <- rbind(stock_df, ratios[4,2:10])
}
else if (stockprice$Date[i]>=ratios$Date[5]){
stock_df <- rbind(stock_df, ratios[5,2:10])
}
}
stockprice <- cbind(stockprice, stock_df)
stockprice
}
##Example how to plot stock prices
plot(PredictStock("AAPL")$Date, PredictStock("AAPL")[,4] , type="l", ylim=c(0,140))
lines(PredictStock("AAPL")$Date, PredictStock("AAPL")$ReturnOnEquity, col="red")
var1 <- PredictStock("AAPL")$AssetTurnoverRatio
var2 <- PredictStock("AAPL")$ReturnOnEquity
var3 <- PredictStock("AAPL")$Date
regress <- lm(PredictStock("AAPL")[,4] ~ var3)
predict(regress, data.frame(var3=as.Date("2016-2-15"), PredictStock("AAPL")$ReturnOnEquity[300]), interval="prediction")
|
0fcdccd33c80a62bca3a3bec1e57775194370c59
|
a48797beca55474d7b39676389f77f8f1af76875
|
/man/graph_plusone.Rd
|
70695d4b78ba8b17d4d1b9c8c1761908680df4cf
|
[] |
no_license
|
uqrmaie1/admixtools
|
1efd48d8ad431f4a325a4ac5b160b2eea9411829
|
26759d87349a3b14495a7ef4ef3a593ee4d0e670
|
refs/heads/master
| 2023-09-04T02:56:48.052802
| 2023-08-21T21:15:27
| 2023-08-21T21:15:27
| 229,330,187
| 62
| 11
| null | 2023-01-23T12:19:57
| 2019-12-20T20:15:32
|
R
|
UTF-8
|
R
| false
| true
| 770
|
rd
|
graph_plusone.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/toposearch.R
\name{graph_plusone}
\alias{graph_plusone}
\title{Find all graphs which result from adding one admixture edge}
\usage{
graph_plusone(graph, ntry = Inf)
}
\arguments{
\item{graph}{Admixture graph in \code{igraph} format}
\item{ntry}{Specify this to return only a subset of all possible graphs with one more edge}
}
\value{
A data frame with columns \code{from}, \code{to}, and \code{graph}
}
\description{
Find all graphs which result from adding one admixture edge
}
\examples{
\dontrun{
newgraphs = graph_plusone(example_igraph)
# now evaluate the new graphs
newgraphs \%>\%
rowwise \%>\%
mutate(res = list(qpgraph(example_f2_blocks, graph))) \%>\%
unnest_wider(res)
}
}
|
f4b24e69f1c4c9f121af8974585ada5c59617c84
|
903a04d3b2ad601eac56531a6b81692d96d40b2d
|
/man/AIC.secr.Rd
|
0d2649663e3b072d23f1eb676944dc91326b1483
|
[] |
no_license
|
cran/secr
|
a2b058587a4655e4688c912d3fecb5fa7838dca8
|
50b315739c723f61bcd82bbbb5c6770973fcb132
|
refs/heads/master
| 2023-07-20T04:45:01.190001
| 2023-07-10T23:40:02
| 2023-07-11T07:34:31
| 17,699,535
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,924
|
rd
|
AIC.secr.Rd
|
\name{AIC.secr}
\alias{AIC.secr}
\alias{logLik.secr}
\alias{AIC.secrlist}
\alias{secrlist}
\alias{[.secrlist}
\title{ Compare SECR Models }
\description{
Terse report on the fit of one or more spatially explicit
capture--recapture models. Models with smaller values of AIC (Akaike's
Information Criterion) are preferred. Extraction ([) and logLik methods
are included.
}
\usage{
\method{AIC}{secr}(object, ..., sort = TRUE, k = 2, dmax = 10, criterion = c("AICc","AIC"), chat = NULL)
\method{AIC}{secrlist}(object, ..., sort = TRUE, k = 2, dmax = 10, criterion = c("AICc","AIC"), chat = NULL)
\method{logLik}{secr}(object, ...)
secrlist(...)
\method{[}{secrlist}(x, i)
}
\arguments{
\item{object}{ \code{secr} object output from the function
\code{\link{secr.fit}}, or a list of such objects with class c("secrlist", "list") }
\item{\dots}{ other \code{secr} objects }
\item{sort}{ logical for whether rows should be sorted by ascending AICc }
\item{k}{ numeric, penalty per parameter to be used; always k = 2 in this method}
\item{dmax}{ numeric, maximum AIC difference for inclusion in confidence set}
\item{criterion}{ character, criterion to use for model comparison and
weights}
\item{chat}{numeric optional variance inflation factor for quasi-AIC}
\item{x}{secrlist}
\item{i}{indices}
}
\details{
Models to be compared must have been fitted to the same data and use the
same likelihood method (full vs conditional). From version 4.1 a warning is
issued if \code{\link{AICcompatible}} reveals a problem.
AIC is given by
\deqn{ \mbox{AIC} = -2\log(L(\hat{\theta})) + 2K}{AICc = -2log(L(theta-hat)) + 2K}
where \eqn{K} is the number of "beta" parameters estimated.
AIC with small sample adjustment is given by
\deqn{ \mbox{AIC}_c = -2\log(L(\hat{\theta})) + 2K +
\frac{2K(K+1)}{n-K-1}.}{AICc = -2log(L(theta-hat)) + 2K +
2K(K+1)/(n-K-1).}
The sample size \eqn{n} is the number of individuals observed at least once (i.e. the
number of rows in \code{capthist}).
Model weights are calculated as \deqn{w_i = \frac{\exp(-\Delta_i / 2),}{
\sum{\exp(-\Delta_i / 2)}}}{w_i = exp(-delta_i / 2) / sum{ exp(-delta_i
/ 2) },} where \eqn{\Delta}{delta} refers to differences in AIC or AICc depending on the
argument `criterion'. AICc is widely used, but AIC may be better (Fletcher 2018, p. 60).
Models for which delta > \code{dmax} are given a weight of zero and are
excluded from the summation. Model weights may be used to form
model-averaged estimates of real or beta parameters with
\code{\link{modelAverage}} (see also Buckland et al. 1997, Burnham and
Anderson 2002).
The argument \code{k} is included for consistency with the generic method \code{AIC}.
\code{secrlist} forms a list of fitted models (an object of class
`secrlist') from the fitted models in \dots. Arguments may include
secrlists. If secr components are named the model names will be retained
(see Examples).
If chat (\eqn{\hat c}) is provided then quasi-AIC values are computed (\pkg{secr} >= 4.6.0):
\deqn{ \mbox{QAIC} = -2\log(L(\hat{\theta}))/ \hat c + 2K.}{AICc = -2log(L(theta-hat))/ \hat c + 2K.}
}
\value{
A data frame with one row per model. By default, rows are sorted by ascending
'criterion' (default AICc).
\item{model }{character string describing the fitted model}
\item{detectfn }{shape of detection function fitted (halfnormal vs hazard-rate) }
\item{npar }{number of parameters estimated}
\item{logLik }{maximized log likelihood}
\item{AIC }{Akaike's Information Criterion}
\item{AICc }{AIC with small-sample adjustment of Hurvich & Tsai
(1989)}
And depending on \code{criterion}:
\item{dAICc }{difference between AICc of this model and the one with smallest AICc}
\item{AICcwt }{AICc model weight}
or
\item{dAIC }{difference between AIC of this model and the one with smallest AIC}
\item{AICwt }{AIC model weight}
\code{logLik.secr} returns an object of class `logLik' that has
attribute \code{df} (degrees of freedom = number of estimated
parameters).
If the variance inflation factor 'chat' is provided then outputs AIC, AICc
etc. are replaced by the corresponding quasi-AIC values labelled QAIC, QAICc etc.
}
\references{
Buckland S. T., Burnham K. P. and Augustin, N. H. (1997) Model selection: an integral part of inference. \emph{Biometrics} \bold{53}, 603--618.
Burnham, K. P. and Anderson, D. R. (2002) \emph{Model Selection and Multimodel Inference: A Practical Information-Theoretic Approach}. Second edition. New York: Springer-Verlag.
Fletcher, D. (2019) Model averaging. SpringerBriefs in Statistics. Berlin: Springer-Verlag.
Hurvich, C. M. and Tsai, C. L. (1989) Regression and time series model selection in small samples. \emph{Biometrika} \bold{76}, 297--307.
Turek, D. and Fletcher, D. (2012) Model-averaged Wald confidence
intervals. \emph{Computational statistics and data analysis} \bold{56},
2809--2815.
}
\note{
It is not be meaningful to compare models by AIC if they relate to
different data (see \code{\link{AICcompatible}}).
Specifically:
\itemize{
\item an `secrlist' generated and saved to file by \code{mask.check}
may be supplied as the object argument of \code{AIC.secrlist}, but the
results are not informative
\item models fitted by the conditional likelihood (\code{CL = TRUE}) and
full likelihood (\code{CL = FALSE}) methods cannot be compared
\item hybrid mixture models (using hcov argument of secr.fit) should not
be compared with other models
\item grouped models (using groups argument of secr.fit) should not be
compared with other models
\item multi-session models should not be compared with single-session
models based on the same data.
}
A likelihood-ratio test (\code{\link{LR.test}}) is a more direct way to
compare two models.
The issue of goodness-of-fit and possible adjustment of AIC for
overdispersion has yet to be addressed (cf QAIC in MARK).
From version 2.6.0 the user may select between AIC and AICc for
comparing models, whereas previously only AICc was used and AICc weights
were reported as `AICwt'). There is evidence that AIC may be better for
model averaging even when samples are small sizes - Turek and Fletcher
(2012).
}
\seealso{
\code{\link{AICcompatible}},
\code{\link{modelAverage}},
\code{\link[stats]{AIC}},
\code{\link{secr.fit}},
\code{\link{print.secr}},
\code{\link{score.test}},
\code{\link{LR.test}},
\code{\link{deviance.secr}} }
\examples{
## Compare two models fitted previously
## secrdemo.0 is a null model
## secrdemo.b has a learned trap response
AIC(secrdemo.0, secrdemo.b)
## Form secrlist and pass to AIC.secr
temp <- secrlist(null = secrdemo.0, learnedresponse = secrdemo.b)
AIC(temp)
}
\keyword{ models }
|
36e9d420183424809a6e138a91e48b510cc2d4df
|
530474c7537d174c797f8be66da1087bf7cf1c59
|
/R/siteAgeSummary_PSP.R
|
63d135af7abf28f96fd2e7c089480802d023937a
|
[
"Apache-2.0"
] |
permissive
|
bcgov/FAIBCompiler
|
409d88e9444ca26847b62e43668b41eb945f84e0
|
3baf38a21c5493b7d7cf0f4695e1cc6322eeabe3
|
refs/heads/master
| 2023-08-05T08:47:43.934344
| 2023-08-02T21:35:23
| 2023-08-02T21:35:23
| 195,121,227
| 0
| 0
|
Apache-2.0
| 2020-02-12T18:00:30
| 2019-07-03T20:19:19
|
R
|
UTF-8
|
R
| false
| false
| 31,533
|
r
|
siteAgeSummary_PSP.R
|
#' Summarize site age data by cluster and cluster/species for PSP data
#'
#'
#' @description This function takes compiled site age tree data, an output of \code{\link{siteAgeCompiler}}, to
#' derive mean age and height results. The compiled data must have breast height age,
#' total age, and site index.
#'
#' @param cpldSiteAgeData data.table, Compiled site age tree data, an output of \code{\link{siteAgeCompiler}}.
#' @param treemsmt data.table, Tree measurement data.
#'
#' @return Two data tables: cl_ah is the age/height summary at cluster level and spc_ah is the age/height summary
#' at cluster and species level
#'
#'
#' @importFrom data.table ':='
#' @importFrom FAIBBase merge_dupUpdate
#'
#'
#' @export
#' @docType methods
#' @rdname siteAgeSummary_PSP
#'
#' @author Yong Luo
siteAgeSummary_PSP <- function(cpldSiteAgeData,
treemsmt){
# to fill missing site sector,
# 1) use the last observed site sector number to populate for a given tree
# this is a revised method from sas codes
# *fill in site sector no;
# if sitesect_fill(j-1) ~= . and sitesect_fill(j) = . then
# sitesect_fill(j) = sitesect_fill(j-1);
# else if sitesect_fill(j-1) = . and sitesect_fill(j) ~= . then
# sitesect_fill(j-1) = sitesect_fill(j);
# else if sitesect_fill(j-1) ~= . and sitesect_fill(j) ~= . and
# sitesect_fill(j-1) ~= sitesect_fill(j) then do;
# tree_err18 = "_chgsitsec";
# end;
lastsitesector_tb <- treemsmt[!is.na(SITE_SECTOR_NO),
.(SITE_IDENTIFIER, PLOT, TREE_NO,
VISIT_NUMBER, SITE_SECTOR_NO)]
lastsitesector_tb[, visit_last := max(VISIT_NUMBER),
by = c("SITE_IDENTIFIER", "PLOT", "TREE_NO")]
lastsitesector_tb <- lastsitesector_tb[visit_last == VISIT_NUMBER,
.(SITE_IDENTIFIER, PLOT, TREE_NO,
SITE_SECTOR_NO_last = SITE_SECTOR_NO)]
treemsmt <- merge(treemsmt,
lastsitesector_tb,
by = c("SITE_IDENTIFIER", "PLOT", "TREE_NO"),
all.x = TRUE)
treemsmt[is.na(SITE_SECTOR_NO) & !is.na(SITE_SECTOR_NO_last),
SITE_SECTOR_NO := SITE_SECTOR_NO_last]
treemsmt[, SITE_SECTOR_NO_last := NULL]
# 2) based on tagging sector : site sector computed lookup table;
# *for each sample id, based on the majority of trees
# with valid tagging sector numbers and site sector numbers;
# this method is original SAS method
bestsitesector_tb <- treemsmt[TAGGING_SECTOR_NO > 0 &
SITE_SECTOR_NO > 0,
.(nobsved = length(DBH)),
by = c("CLSTR_ID", "PLOT", "TAGGING_SECTOR_NO",
"SITE_SECTOR_NO")]
bestsitesector_tb <- bestsitesector_tb[order(CLSTR_ID, PLOT, TAGGING_SECTOR_NO,
-nobsved),]
bestsitesector_tb <- unique(bestsitesector_tb,
by = c("CLSTR_ID", "PLOT", "TAGGING_SECTOR_NO"))
setnames(bestsitesector_tb, "SITE_SECTOR_NO", "SITE_SECTOR_NO_best")
treemsmt <- merge(treemsmt,
bestsitesector_tb,
by = c("CLSTR_ID", "PLOT", "TAGGING_SECTOR_NO"),
all.x = TRUE)
treemsmt[is.na(SITE_SECTOR_NO) & !is.na(SITE_SECTOR_NO_best),
SITE_SECTOR_NO := SITE_SECTOR_NO_best]
treemsmt[, nobsved := NULL]
lasttaggsector_tb <- treemsmt[!is.na(TAGGING_SECTOR_NO),
.(SITE_IDENTIFIER, PLOT, TREE_NO,
VISIT_NUMBER, TAGGING_SECTOR_NO)]
lasttaggsector_tb[, visit_last := max(VISIT_NUMBER),
by = c("SITE_IDENTIFIER", "PLOT", "TREE_NO")]
lasttaggsector_tb <- lasttaggsector_tb[visit_last == VISIT_NUMBER,
.(SITE_IDENTIFIER, PLOT, TREE_NO,
TAGGING_SECTOR_NO_last = TAGGING_SECTOR_NO)]
treemsmt <- merge(treemsmt,
lasttaggsector_tb,
by = c("SITE_IDENTIFIER", "PLOT", "TREE_NO"),
all.x = TRUE)
treemsmt[is.na(TAGGING_SECTOR_NO) & !is.na(TAGGING_SECTOR_NO_last),
TAGGING_SECTOR_NO := TAGGING_SECTOR_NO_last]
treemsmt[, TAGGING_SECTOR_NO_last := NULL]
cpldSiteAgeData <- cpldSiteAgeData[,.(CLSTR_ID, PLOT, TREE_NO,
AGE_BH, AGE_TOT, SI_TREE,
AGE_MEASURE_CODE, BORED_AGE_FLAG,
siteAgeTree = TRUE)]
treelist_whole <- merge(treemsmt,
cpldSiteAgeData,
by = c("CLSTR_ID", "PLOT", "TREE_NO"),
all.x = TRUE)
treelist_whole[siteAgeTree == TRUE &
BORED_AGE_FLAG == "ADDED_FROM_REFERENCE" &
is.na(LV_D),
LV_D := "L"]
treelist_whole <- treelist_whole[!is.na(PLOT_AREA_MAIN),] # select plot with area
treelist_whole[, MAXN_HTOP := round(PLOT_AREA_MAIN*100)] # determine how many trees for a given plot size for the smry
treelist_whole[MAXN_HTOP == 0, MAXN_HTOP := 1]
treelist_whole[SITE_SECTOR_NO > 0,
SECTOR_NO := SITE_SECTOR_NO]
treelist_whole[is.na(SECTOR_NO) & TAGGING_SECTOR_NO > 0,
SECTOR_NO := TAGGING_SECTOR_NO]
# in the original sas code, it used max sector_no as the number of the sectors for a
# given samp_id
# this needs to be discussed
treelist_whole[, MAX_SECTORS := max(SECTOR_NO, na.rm = TRUE),
by = "SITE_IDENTIFIER"]
treelist_whole[MAXN_HTOP == MAX_SECTORS,
':='(TAGGING_SECTOR_HTOP = SECTOR_NO,
NUM_HTOP_PERSEC = 1)]
treelist_whole[MAXN_HTOP > MAX_SECTORS,
':='(TAGGING_SECTOR_HTOP = SECTOR_NO,
NUM_HTOP_PERSEC = as.integer(MAXN_HTOP/MAX_SECTORS))]
treelist_whole[MAXN_HTOP < MAX_SECTORS,
':='(TAGGING_SECTOR_HTOP = round(SECTOR_NO/round(MAX_SECTORS/MAXN_HTOP)),
NUM_HTOP_PERSEC = 1)]
treelist_whole[TAGGING_SECTOR_HTOP == 0, TAGGING_SECTOR_HTOP := 1]
treelist_whole[is.na(NUM_HTOP_PERSEC), NUM_HTOP_PERSEC := 1]
treelist_whole_live <- treelist_whole[LV_D == "L" & RESIDUAL == "N",
.(SITE_IDENTIFIER, PLOT, TREE_NO, VISIT_NUMBER,
TAGGING_SECTOR_HTOP, SPECIES, DBH)]
treelist_whole_live <- treelist_whole_live[order(SITE_IDENTIFIER, PLOT, VISIT_NUMBER,
TAGGING_SECTOR_HTOP, SPECIES, -DBH),]
treelist_whole_live[, DBH_SEC_ORDER := 1:length(DBH),
by = c("SITE_IDENTIFIER", "PLOT", "VISIT_NUMBER",
"TAGGING_SECTOR_HTOP", "SPECIES")]
treelist_whole_live <- treelist_whole_live[order(SITE_IDENTIFIER, PLOT, VISIT_NUMBER,
SPECIES, -DBH),]
treelist_whole_live[, DBH_SAM_ORDER := 1:length(DBH),
by = c("SITE_IDENTIFIER", "PLOT", "VISIT_NUMBER",
"SPECIES")]
treelist_whole <- merge(treelist_whole,
treelist_whole_live[,.(SITE_IDENTIFIER, PLOT, TREE_NO,
VISIT_NUMBER, DBH_SEC_ORDER, DBH_SAM_ORDER)],
by = c("SITE_IDENTIFIER", "PLOT", "TREE_NO",
"VISIT_NUMBER"),
all.x = TRUE)
## to define suitable for si
# 1. use what database has
treelist_whole[, SUIT_SI_org := SUIT_SI]
treelist_whole[RESIDUAL == "Y" | LV_D == "D",
SUIT_SI := "N"]
# 2. if suit_si missing, use original sas codes to define suit_si
# *define a site tree suitability flag, based on a selection of damage codes,
# suitability for taking heights;
# *cores with pith reached or pith estimated, and assessment of age suppression;
# *for site tree screening, the sisuitability flag is used, modified under age2 dataset based on additional criteria;
# *conversation with khardy 2013-mar-26, ;
# if top_damage = "Y" or tree_suitable_for_ht = "N" or pith_code in ("R") or
# suppression_ind in ("Y") or tr_class in (5,6) then do;
# *for this definition, if not suitable for si, then also not suitable for ht, if missing;
# sitree_suit = "N";
# if tree_suitable_for_ht = "" then tree_suitable_for_ht = "N";
# end;
# else do;
# *for this definition, if suitable for si, and htsuit flag is missing,
# then assume also suitable for ht;
# sitree_suit = "Y";
# if tree_suitable_for_ht = "" then tree_suitable_for_ht = "Y";
# end;
treelist_whole[is.na(SUIT_SI) & (BTOP == "Y" | SUIT_HT == "N" | substr(AGE_MEASURE_CODE, 1, 1) %in% c("R", "C") |
TREE_SUPPRESSION_CODE == "Y" | TREE_CLASS_CODE %in% c(5, 6)),
SUIT_SI := "N"]
treelist_whole[is.na(SUIT_SI_org) & SUIT_SI == "N" & is.na(SUIT_HT),
SUIT_HT := "N"]
treelist_whole[is.na(SUIT_SI), ':='(SUIT_SI = "Y")]
treelist_whole[is.na(SUIT_HT), ':='(SUIT_HT = "Y")]
treelist_whole[CROWN_CLASS_CODE %in% c("S", "I"),
SUIT_SI := "N"]
treelist_whole[AGE_BH < 20 & HT_TOTAL_SOURCE == "Estimated based on DBH",
SUIT_SI := "N"]
# *si method 1 based on selecting the largest suitable dbh
# tree from each sector (where each sector is an approximation of 0.01ha);
treelist_whole[DBH_SEC_ORDER > 0 &
DBH_SEC_ORDER <= NUM_HTOP_PERSEC &
SUIT_SI == "Y"]
treelist_whole[, si_flag := ifelse(is.na(SI_TREE), 0,
ifelse(SI_TREE > 0, 1,
0))]
treelist_whole[, agebh_flag := ifelse(is.na(AGE_BH), 0,
ifelse(AGE_BH > 0, 1,
0))]
treelist_whole[, httop_flag := ifelse(is.na(HT_TOTAL), 0,
ifelse(HT_TOTAL > 0, 1,
0))]
treelist_whole[DBH_SEC_ORDER > 0 &
DBH_SEC_ORDER <= NUM_HTOP_PERSEC &
SUIT_SI == "Y",
IN_METHOD1 := TRUE]
smry_method1 <- treelist_whole[DBH_SEC_ORDER > 0 &
DBH_SEC_ORDER <= NUM_HTOP_PERSEC &
SUIT_SI == "Y",
.(SI1 = mean(SI_TREE, na.rm = TRUE),
AGE_BH1 = mean(AGE_BH, na.rm = TRUE),
AGE_TOT1 = mean(AGE_TOT, na.rm = TRUE),
HTOP1 = mean(HT_TOTAL, na.rm = TRUE),
N_SI1 = sum(si_flag),
N_AGE1 = sum(agebh_flag),
N_HTOP1 = sum(httop_flag)),
by = c("SITE_IDENTIFIER", "VISIT_NUMBER", "SPECIES")]
# *method 2, based on selecting the largest suitable 100 topht trees/ha without sectoring (ie., 4 largest in 0.04ha, 8/0.08, etc);
# *adjust by applying oscar garcias simple adjustment equation: 1.6 * A - 0.6, to account for bias;
# *Theoretical arguments suggest the 1.6 A - 0.6 largests as a reasonable approximation (Garcia, O., Can.J.For.Res. 28: 1509-1517, 1998);
treelist_whole[, MAXN_HTOP_ADJ := round((1.6 * MAXN_HTOP - 0.6))]
treelist_whole[DBH_SAM_ORDER > 0 &
DBH_SAM_ORDER <= MAXN_HTOP_ADJ &
SUIT_SI == "Y",
IN_METHOD2 := TRUE]
smry_method2 <- treelist_whole[DBH_SAM_ORDER > 0 &
DBH_SAM_ORDER <= MAXN_HTOP_ADJ &
SUIT_SI == "Y",
.(SI2 = mean(SI_TREE, na.rm = TRUE),
AGE_BH2 = mean(AGE_BH, na.rm = TRUE),
AGE_TOT2 = mean(AGE_TOT, na.rm = TRUE),
HTOP2 = mean(HT_TOTAL, na.rm = TRUE),
N_SI2 = sum(si_flag),
N_AGE2 = sum(agebh_flag),
N_HTOP2 = sum(httop_flag)),
by = c("SITE_IDENTIFIER", "VISIT_NUMBER", "SPECIES")]
# *prep for method 3, based on first computing average age and average calc height
# of the 100 largest suitable dbh trees (4/0.04) by species;
# *faib way;
# *as of june 11, 2014, decided to relax age criteria, to be all available si suitable trees;
# *otherwise, no tree data may get averaged here, if not the largest 4 dbh trees/0.04ha plot were bored for age;
# *this may lower the average, but at least plots with aged tree data of si suitable trees will get a stand age assigned with this method;
treelist_whole[DBH_SAM_ORDER > 0 &
(DBH_SAM_ORDER <= MAXN_HTOP_ADJ |
(SUIT_SI == "Y" & AGE_BH > 0)),
IN_METHOD3 := TRUE]
smry_method3 <- treelist_whole[DBH_SAM_ORDER > 0 &
(DBH_SAM_ORDER <= MAXN_HTOP_ADJ |
(SUIT_SI == "Y" & AGE_BH > 0)),
.(AGE_BH = mean(AGE_BH, na.rm = TRUE),
AGE_TOT = mean(AGE_TOT, na.rm = TRUE),
HTOP = mean(HT_TOTAL, na.rm = TRUE),
N_AGE3 = sum(agebh_flag),
N_HTOP3 = sum(httop_flag)),
by = c("SITE_IDENTIFIER", "VISIT_NUMBER", "SPECIES")]
specieslkup <- lookup_species()
smry_method3 <- merge(smry_method3,
unique(specieslkup[,.(SPECIES, SP_SINDEX)]),
by = "SPECIES",
all.x = TRUE)
smry_method3[, sp_index := SIndexR::SIndexR_SpecMap(SP_SINDEX)]
smry_method3[, ':='(gi_curve = SIndexR::SIndexR_DefGICurve(sp_index),
si_curve = SIndexR_DefCurve(sp_index))]
SI_GI_temp <- SIndexR::SIndexR_HtAgeToSI(curve = smry_method3$gi_curve,
age = smry_method3$AGE_BH,
ageType = 1,
height = smry_method3$HTOP,
estType = 1)
SI_SI_temp <- SIndexR::SIndexR_HtAgeToSI(curve = smry_method3$si_curve,
age = smry_method3$AGE_BH,
ageType = 1,
height = smry_method3$HTOP,
estType = 1)
smry_method3[, ':='(SI_GI = SI_GI_temp$output,
SI_SI = SI_SI_temp$output)]
smry_method3[, SI3 := ifelse(SI_GI > 0 & AGE_BH > 10, SI_GI,
ifelse(SI_SI > 0 & AGE_BH > 10, SI_SI,
NA))]
smry_method3 <- smry_method3[,.(SITE_IDENTIFIER, VISIT_NUMBER, SPECIES,
AGE_BH3 = AGE_BH,
AGE_TOT3 = AGE_TOT,
HTOP3 = HTOP,
SI3, N_AGE3, N_HTOP3)]
maxnhtop <- treelist_whole[,.(MAXN_HTOP = max(MAXN_HTOP, na.rm = TRUE)),
by = c("SITE_IDENTIFIER", "PLOT")]
maxnhtop <- maxnhtop[,.(MAXN_HTOP = sum(MAXN_HTOP)), by = "SITE_IDENTIFIER"]
smry_method_all <- merge(smry_method1,
smry_method2,
by = c("SITE_IDENTIFIER", "VISIT_NUMBER", "SPECIES"),
all = TRUE)
smry_method_all <- merge(smry_method_all,
smry_method3,
by = c("SITE_IDENTIFIER", "VISIT_NUMBER", "SPECIES"),
all = TRUE)
smry_method_all <- merge(smry_method_all,
maxnhtop,
by = c("SITE_IDENTIFIER"),
all.x = TRUE)
smry_method_all[,':='(PER_AGE1 = round(100*N_AGE1/MAXN_HTOP),
PER_AGE2 = round(100*N_AGE2/MAXN_HTOP),
PER_AGE3 = round(100*N_AGE3/MAXN_HTOP),
YRSFRM_50BH1 = abs(50 - AGE_BH1),
YRSFRM_50BH2 = abs(50 - AGE_BH2),
YRSFRM_50BH3 = abs(50 - AGE_BH3))]
# *prioritize site index by measurement, to assign one standardized site index across all measurements;
# *based on msmt that is closest to 50yrs breast height age, and has at least 50% of the maximum number of site trees available for si calculation;
# *repeat for each si method;
#
# /************************/
# *method 1;
# *first keep only measurements with at least 1 site tree sampled;
si_sort1 <- smry_method_all[N_SI1 > 0,]
si_sort1 <- si_sort1[order(SITE_IDENTIFIER, SPECIES, YRSFRM_50BH1),]
# *next, pick first measurement closest to 50yrs bh;
si_sort1 <- unique(si_sort1[,.(SITE_IDENTIFIER, SPECIES,
SI_FINAL1 = SI1,
SI_FINAL_SOURCE1 = VISIT_NUMBER,
sort1 = TRUE)],
by = c("SITE_IDENTIFIER", "SPECIES"))
# do the same for the rests
si_sort2 <- smry_method_all[N_SI2 > 0,]
si_sort2 <- si_sort2[order(SITE_IDENTIFIER, SPECIES, YRSFRM_50BH2),]
si_sort2 <- unique(si_sort2[,.(SITE_IDENTIFIER, SPECIES,
SI_FINAL2 = SI2,
SI_FINAL_SOURCE2 = VISIT_NUMBER,
sort2 = TRUE)],
by = c("SITE_IDENTIFIER", "SPECIES"))
si_sort3 <- smry_method_all[N_AGE3 > 0,]
si_sort3 <- si_sort3[order(SITE_IDENTIFIER, SPECIES, YRSFRM_50BH3),]
si_sort3 <- unique(si_sort3[,.(SITE_IDENTIFIER, SPECIES,
SI_FINAL3 = SI3,
SI_FINAL_SOURCE3 = VISIT_NUMBER,
sort3 = TRUE)],
by = c("SITE_IDENTIFIER", "SPECIES"))
si_master <- unique(treelist_whole[,.(SITE_IDENTIFIER, VISIT_NUMBER, SPECIES)])
si_master <- merge(si_master,
smry_method_all,
by = c("SITE_IDENTIFIER", "VISIT_NUMBER", "SPECIES"),
all.x = TRUE)
si_master <- merge(si_master,
si_sort1,
by = c("SITE_IDENTIFIER", "SPECIES"),
all.x = TRUE)
si_master <- merge(si_master,
si_sort2,
by = c("SITE_IDENTIFIER", "SPECIES"),
all.x = TRUE)
si_master <- merge(si_master,
si_sort3,
by = c("SITE_IDENTIFIER", "SPECIES"),
all.x = TRUE)
si_master <- si_master[sort1 == TRUE | sort2 == TRUE | sort3 == TRUE,]
# equv si_allmethod3
si_master[,':='(sort1 = NULL,
sort2 = NULL,
sort3 = NULL)]
# *get leading species (by ba) site index;
# *note, that the leading species is specific to a given measurement;
# *leading species can change over subsequent measurements, so that the leading SI is species specific;
# *if a given leading species has no SI site trees, then no leading SI will be available for that measurement, even though;
# *si is standardized across all measurements;
# summarize the leading and secondary species for a given msmt
# based on BA of the trees with dbh>=4, and live trees
ba_smry <- treelist_whole[volumeTree == TRUE &
DBH >= 4 & LV_D == "L",
.(ba_tot_sp = sum(BA_TREE)),
by = c("SITE_IDENTIFIER", "VISIT_NUMBER", "SPECIES")]
ba_smry[, ba_tot := sum(ba_tot_sp),
by = c("SITE_IDENTIFIER", "VISIT_NUMBER")]
ba_smry[, BA_PCT := round(100*ba_tot_sp/ba_tot, 2)]
ba_smry <- ba_smry[order(SITE_IDENTIFIER, VISIT_NUMBER, -BA_PCT),]
ba_smry[, BA_PCT_RANK := 1:length(SPECIES), by = c("SITE_IDENTIFIER", "VISIT_NUMBER")]
ba_smry[,':='(ba_tot_sp = NULL,
ba_tot = NULL)]
smry_by_sp <- merge(ba_smry,
si_master,
by = c("SITE_IDENTIFIER", "VISIT_NUMBER", "SPECIES"),
all.x = TRUE)
smry_by_sp <- smry_by_sp[order(SITE_IDENTIFIER, VISIT_NUMBER, BA_PCT_RANK),
.(CLSTR_ID = paste0(SITE_IDENTIFIER, "-PSP", VISIT_NUMBER),
SITE_IDENTIFIER, VISIT_NUMBER, SPECIES,
BA_PCT, BA_PCT_RANK,
SI1, N_SI1,
SI2, N_SI2,
SI3, N_SI3 = N_AGE3,
AGE_BH1 = round(AGE_BH1),
N_AGE1,
AGE_BH2 = round(AGE_BH2),
N_AGE2,
AGE_BH3 = round(AGE_BH3),
N_AGE3,
AGE_TOT1 = round(AGE_TOT1),
AGE_TOT2 = round(AGE_TOT2),
AGE_TOT3 = round(AGE_TOT3),
HTOP1, N_HTOP1,
HTOP2, N_HTOP2,
HTOP3, N_HTOP3)]
spcomp_4cm_lead <- ba_smry[BA_PCT_RANK == 1,
.(SITE_IDENTIFIER, VISIT_NUMBER, SPECIES, BA_PCT)]
spcomp_4cm_lead <- merge(spcomp_4cm_lead,
si_master,
by = c("SITE_IDENTIFIER", "VISIT_NUMBER", "SPECIES"),
all.x = TRUE)
names(spcomp_4cm_lead) <- paste0("LEAD_", names(spcomp_4cm_lead))
setnames(spcomp_4cm_lead, c("LEAD_SITE_IDENTIFIER", "LEAD_VISIT_NUMBER", "LEAD_SPECIES"),
c("SITE_IDENTIFIER", "VISIT_NUMBER", "LEAD_SP"))
# for the second leading species
spcomp_4cm_secd <- ba_smry[BA_PCT_RANK == 2,
.(SITE_IDENTIFIER, VISIT_NUMBER, SPECIES, BA_PCT)]
spcomp_4cm_secd <- merge(spcomp_4cm_secd,
si_master,
by = c("SITE_IDENTIFIER", "VISIT_NUMBER", "SPECIES"),
all.x = TRUE)
names(spcomp_4cm_secd) <- paste0("SECD_", names(spcomp_4cm_secd))
setnames(spcomp_4cm_secd, c("SECD_SITE_IDENTIFIER", "SECD_VISIT_NUMBER", "SECD_SPECIES"),
c("SITE_IDENTIFIER", "VISIT_NUMBER", "SECD_SP"))
si_lead_secd <- merge(spcomp_4cm_lead,
spcomp_4cm_secd,
by = c("SITE_IDENTIFIER", "VISIT_NUMBER"),
all = TRUE)
si_lead_secd <- si_lead_secd[,.(CLSTR_ID = paste0(SITE_IDENTIFIER, "-PSP", VISIT_NUMBER),
SITE_IDENTIFIER, VISIT_NUMBER,
LEAD_SP, LEAD_BA_PCT,
LEAD_SI1, LEAD_N_SI1,
LEAD_SI2, LEAD_N_SI2,
LEAD_SI3, LEAD_N_SI3 = LEAD_N_AGE3,
LEAD_SI_FINAL1, LEAD_SI_FINAL_SOURCE1,
LEAD_SI_FINAL2, LEAD_SI_FINAL_SOURCE2,
LEAD_SI_FINAL3, LEAD_SI_FINAL_SOURCE3,
LEAD_AGE_BH1 = round(LEAD_AGE_BH1),
LEAD_N_AGE1,
LEAD_AGE_BH2 = round(LEAD_AGE_BH2),
LEAD_N_AGE2,
LEAD_AGE_BH3 = round(LEAD_AGE_BH3),
LEAD_N_AGE3,
LEAD_AGE_TOT1 = round(LEAD_AGE_TOT1),
LEAD_AGE_TOT2 = round(LEAD_AGE_TOT2),
LEAD_AGE_TOT3 = round(LEAD_AGE_TOT3),
LEAD_HTOP1, LEAD_N_HTOP1,
LEAD_HTOP2, LEAD_N_HTOP2,
LEAD_HTOP3, LEAD_N_HTOP3,
SECD_SP, SECD_BA_PCT,
SECD_SI1, SECD_N_SI1,
SECD_SI2, SECD_N_SI2,
SECD_SI3, SECD_N_SI3 = SECD_N_AGE3,
SECD_SI_FINAL1, SECD_SI_FINAL_SOURCE1,
SECD_SI_FINAL2, SECD_SI_FINAL_SOURCE2,
SECD_SI_FINAL3, SECD_SI_FINAL_SOURCE3,
SECD_AGE_BH1 = round(SECD_AGE_BH1),
SECD_N_AGE1,
SECD_AGE_BH2 = round(SECD_AGE_BH2),
SECD_N_AGE2,
SECD_AGE_BH3 = round(SECD_AGE_BH3),
SECD_N_AGE3,
SECD_AGE_TOT1 = round(SECD_AGE_TOT1),
SECD_AGE_TOT2 = round(SECD_AGE_TOT2),
SECD_AGE_TOT3 = round(SECD_AGE_TOT3))]
# *some cases where leading species changes over measurements, but site tree data only available for
# one species (eg., leading species at later msmts;
# *stand total age is based on leading species at establishment. but if the leading species
# at first measure doesnot have site tree data;
# *then need to have a surrogate age assigned;
# *site index stays specific to the species at given measure;
# *but a last resort total age assigned to sample id will be based on the leading species
# at last measure, if no site tree data are available;
# *for the leading species at establishment;
# *this part address this;
surage1 <- si_master[,.(SITE_IDENTIFIER, VISIT_NUMBER,
SPECIES, SPC1_LMEAS_BHAGE = round(AGE_BH3),
SPC1_LMEAS_TOTAGE = round(AGE_TOT3))]
surage2 <- ba_smry[BA_PCT_RANK == 1,
.(SITE_IDENTIFIER, VISIT_NUMBER, SPECIES)]
surage2[, lastvisit := max(VISIT_NUMBER),
by = "SITE_IDENTIFIER"]
surage2 <- surage2[VISIT_NUMBER == lastvisit,
.(SITE_IDENTIFIER, sp_lead = SPECIES)]
surage1 <- merge(surage1,
surage2,
by = "SITE_IDENTIFIER",
all.x = TRUE)
surage1 <- surage1[SPECIES == sp_lead,
.(SITE_IDENTIFIER, VISIT_NUMBER, SPECIES,
SPC1_LMEAS_BHAGE, SPC1_LMEAS_TOTAGE)]
si_lead_secd <- merge(si_lead_secd,
surage1,
by = c("SITE_IDENTIFIER", "VISIT_NUMBER"),
all.x = TRUE)
# *note that since stand age is based on the average age
# of the site trees of the leading species (using si method 1) at the first measurement;
# *all subsequent measurements are based on this age. so if the later
# measurements have a different leading species, then the total age may no longer;
# *be relevent. at this time, total age is kept consistent with
# measurement interval, and is assigned blank if the total age goes less than zero;
si_lead_secd_standage <- si_lead_secd[,.(SITE_IDENTIFIER,
VISIT_NUMBER,
LEAD_AGE_TOT1,
LEAD_AGE_BH1,
LEAD_AGE_TOT2,
LEAD_AGE_BH2,
LEAD_AGE_TOT3,
LEAD_AGE_BH3,
SPC1_LMEAS_TOTAGE,
SPC1_LMEAS_BHAGE,
SECD_AGE_TOT3,
SECD_AGE_BH3,
LEAD_SP)]
si_lead_secd_standage[, visit_ref := min(VISIT_NUMBER),
by = "SITE_IDENTIFIER"]
si_lead_secd_standage <- si_lead_secd_standage[VISIT_NUMBER == visit_ref,]
# *base stand age on average age from si method 1, 2, or 3, in that order of priority, depending whether a age is available for each si method;
# *first use method 1 if available;
si_lead_secd_standage[LEAD_AGE_TOT1 >= 4,
':='(tot_age_ref = LEAD_AGE_TOT1,
bh_age_ref = LEAD_AGE_BH1,
STAND_AGE_SOURCE = "FIRSTM_METH1")]
si_lead_secd_standage[LEAD_AGE_TOT2 >= 4 & is.na(STAND_AGE_SOURCE),
':='(tot_age_ref = LEAD_AGE_TOT2,
bh_age_ref = LEAD_AGE_BH2,
STAND_AGE_SOURCE = "FIRSTM_METH2")]
si_lead_secd_standage[LEAD_AGE_TOT3 >= 4 & is.na(STAND_AGE_SOURCE),
':='(tot_age_ref = LEAD_AGE_TOT3,
bh_age_ref = LEAD_AGE_BH3,
STAND_AGE_SOURCE = "FIRSTM_METH3")]
si_lead_secd_standage[SPC1_LMEAS_TOTAGE >= 4 & is.na(STAND_AGE_SOURCE),
':='(tot_age_ref = SPC1_LMEAS_TOTAGE,
bh_age_ref = SPC1_LMEAS_BHAGE,
STAND_AGE_SOURCE = "LASTM_METH3")]
si_lead_secd_standage[SECD_AGE_TOT3 >= 4 & is.na(STAND_AGE_SOURCE),
':='(tot_age_ref = SECD_AGE_TOT3,
bh_age_ref = SECD_AGE_BH3,
STAND_AGE_SOURCE = "FIRSTM_METH3_S2")]
si_lead_secd_standage[, LAST_SPC1 := LEAD_SP]
si_lead_secd_standage <- si_lead_secd_standage[,.(SITE_IDENTIFIER,
visit_ref,
tot_age_ref,
bh_age_ref,
STAND_AGE_SOURCE,
LAST_SPC1)]
si_lead_secd <- merge(si_lead_secd,
si_lead_secd_standage,
by = "SITE_IDENTIFIER",
all.x = TRUE)
# to get sample msmt summaries
sampmsmt <- unique(treemsmt[,.(SITE_IDENTIFIER, VISIT_NUMBER, MEAS_YR)])
si_lead_secd <- merge(si_lead_secd,
sampmsmt[,.(SITE_IDENTIFIER, VISIT_NUMBER,
MEAS_YR)],
by = c("SITE_IDENTIFIER", "VISIT_NUMBER"),
all.x = TRUE)
si_lead_secd[visit_ref == VISIT_NUMBER,
MEAS_YR_ref := MEAS_YR]
si_lead_secd[, MEAS_YR_ref := max(MEAS_YR_ref, na.rm = TRUE),
by = "SITE_IDENTIFIER"]
si_lead_secd[,':='(TOT_STAND_AGE = MEAS_YR - MEAS_YR_ref + tot_age_ref,
BH_STAND_AGE = MEAS_YR - MEAS_YR_ref + bh_age_ref)]
si_lead_secd[,':='(SPECIES = NULL,
SPC1_LMEAS_BHAGE = NULL,
SPC1_LMEAS_TOTAGE = NULL,
tot_age_ref = NULL,
bh_age_ref = NULL,
LAST_SPC1 = NULL,
visit_ref = NULL,
MEAS_YR = NULL,
MEAS_YR_ref = NULL)]
return(list(smry_by_clsp = smry_by_sp,
smry_by_cl = si_lead_secd,
tree_order = treelist_whole))
}
|
4435935bbb4f9771986fce7ea522942259358a7d
|
5e37ee5e60def89a5eb4c9e938994a88c15c55d8
|
/Project3-MachineLearning/aull_dobbins_ganemccalla_schott/ganemccalla/kaggle EDA.R
|
20e6a52af8ae379becd3ee8f1731aa018f824f9c
|
[] |
no_license
|
xiyuansun/bootcamp009_project
|
2c079c94d2339a61397d5ea8765bf4bbc7a5f5b7
|
53bad9ea33d665db222bfa4a38a92580d811b53d
|
refs/heads/master
| 2020-05-02T18:08:48.813998
| 2017-06-02T15:30:42
| 2017-06-02T15:30:42
| 178,120,031
| 1
| 0
| null | 2019-03-28T03:33:46
| 2019-03-28T03:33:45
| null |
UTF-8
|
R
| false
| false
| 1,382
|
r
|
kaggle EDA.R
|
#change this!
fileLoc = "C:/Users/Robin/Desktop/Kaggle Competition/train.csv"
library(dplyr)
testFileLoc = "C:/Users/Robin/Desktop/Kaggle Competition/test.csv"
trainData = read.csv(fileLoc)
testData = read.csv(testFileLoc)
#these 13 columns are most relevant
#the rest contain information about the raion(region), this could be put into
#a smaller table, and there's also lots information about nearby facilities
#which will probably be reduced
reducedTrainData = dplyr::select(trainData,price_doc,timestamp,full_sq,life_sq,floor,max_floor,material,build_year,num_room,kitch_sq,state,product_type,sub_area)
reducedTestData = dplyr::select(testData,timestamp,full_sq,life_sq,floor,max_floor,material,build_year,num_room,kitch_sq,state,product_type,sub_area)
#we see there are lots of NA values
#we should probably impute some values as deleting all of the NAs
#would reduce the dataset largely
which(is.na(reducedTrainData))
hist(reducedTrainData$price_doc)
#we do not have a normal distribution of prices
#we remove the categorical variables for now
model = lm(price_doc ~.-timestamp -sub_area, data=reducedTrainData)
modelPredict = predict(model, newdata =testData, na.action = na.remove)
library(tree)
prediction = as.data.frame(modelPredict)
submission = cbind()
summary(model)
#the R-Squared value is .4322, life_sq, max_floor and kitch_sq
#are outside the p-value threshold
|
57791a9e79e0b5c66b7cce0ad80cb02e6841f2ee
|
ceef8f6f738da7d8cb2148f250d750872b0b9378
|
/Cheat Sheet Quizz2.R
|
d2b8307ef7e14110fdcd497e36f06919e216f49e
|
[] |
no_license
|
amarques09/R
|
e6f02678eab5a9dd3051661f5d57e406d0b9e1d9
|
65e076b4f5e0f64d9a311027f6895fe9cc8a7a53
|
refs/heads/master
| 2021-02-03T21:25:26.603969
| 2020-02-27T15:04:03
| 2020-02-27T15:04:03
| 243,543,405
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,860
|
r
|
Cheat Sheet Quizz2.R
|
############Data.frame
##indexation
#numeric
###########Loops
#### Create a loop that iterates through all the positions of 'v' and, for each
#### iteration, prints "FALSE value found" if the value of 'v' in that position
#### is FALSE. Do this using both a for and a while loop. Each loop should contain an if
#### (or if-else) clause.
v <- sample(c(TRUE, FALSE), 100, replace = TRUE);
for (index in 1:length(v)){
if (!v[index]){
print("FALSE value found")
} else {
}
}
index <- 1;
while (index <= length(v)){
if (!v[index]){
print("FALSE value found")
} else {
}
index <- index + 1;
}
#### Your objective now is to create a loop (you can choose which type) that iterates through the positions
#### of 'v' until it finds an index where 'v[index]' is distinct from 'v[index+1]', in that moment you should
#### print the value of 'index' and stop the loop using the "break" command. Beware infinite loops.
v <- sample(c(TRUE, FALSE), 100, replace = TRUE);
index;
for (index in 1:length(v)){
if (v[index] != v[index+4]){
print(index);
break;
}
}
#### Code two nested for loops in order to add a different random value
#### to each cell of the matrix. Suggestion: use runif to compute
#### each random number.
new_M <- matrix(0, nrow = nrow(M), ncol = ncol(M));
for (i in 1:nrow(new_M)){
for (j in 1:ncol(new_M)){
M[i,j] = M[i,j] + runif(1, 0, 1);
}
}
identical(M, new_M);
head(M);
head(new_M);
#######Matrix
df <- matrix(c("Rex", "Tom", "Jerry", "Bobby",
"dog", "cat", "mouse", "cat",
3, 5, 2, 7), nrow = 4, ncol = 3);
colnames(df) <- c("name", "type", "age");
df;
##indexation
#numeric
### Use numeric indexation to get the value of column 'type' in the third row.
df[3, 2];
#logical
### Use logical indexation to get the rows where 'type' is equal to cat. Do not create
### the logical index manually, it should be the output of a logical operator (>, <, ==, !=, etc.)
df[df[, colnames(df) == "type"] == "cat",];
#byname
### Use indexation by name to get the age of Tom. You can select the row of 'df'
### corresponding to Tom using any indexation method, but the age column must be
### selected using indexation by name.
df[2, "age"];
###########FUNCTIONS
#### (*) EXERCISE 5: Create a function, called 'my_or', that returns as output ####
#### TRUE only if the first argument, named 'x', or the second argument, named 'y',
#### are TRUE. Use default values so that the output of
my_or();
#### is FALSE
my_or <- function(x = FALSE, y = FALSE) {
result <- x | y;
return(result);
}
###########Data.table
#Creating a data.table
dt1 <- data.table(v1=v1,v2=v2);
#Converting to data.table
dt <- as.data.table(iris);
# data.table using dt[i, j, by] operator
res_3 <- dt[Sepal.Length >= 5.4 & Petal.Length <= 2.0];
res_3;
#counting the number of rows with Petal.Length greater than 5 using
dt[Petal.Length > 5, list(n= .N)];
#Counting the number of cars (rows) with 6 cylinders
dt[ cyl ==6, list(n = .N)]
#creating a new column and summing two rows
dt[Sepal.Length >= 5.4, list(sum_width = sum(Sepal.Width + Petal.Width))];
#computing mean for each value of Species.
dt[, list(mean_sepallenght = mean(Sepal.Length)), by = "Species"]
dt[, list(avg_sepal_length = mean(Sepal.Length)), by = "Species"]
#compute the maximum weight for each value of gear.
dt[,list(max_w = max(wt)),by = "gear" ]
#computing the number of something, grouped by x, wiht a condition on the rows (>4);
dt[Petal.Length > 4,list(n=sum(Sepal.Length>5)), by = "Species"];
#using all the 3 [i,j,by]
#One variable
dt[Sepal.Length > 5, list(n = .N), by = "Species"];
res <- dt[Sepal.Length > 5, list(n = .N), by = c("Species", "Sepal.Length")];
#Multiple variables
res <- dt[Sepal.Length > 5, list(n = .N, avg_sepal_widgt = mean(Sepal.Width)),
by = c("Species", "Sepal.Length")];
View(res);
dat <-readRDS ("/Users/amand/Documents/IE/Programming R/Quizz2/userbase.RData");
head(dat[, list(n= .N), by = "origin"][order(n, decreasing = TRUE)],1);
head(dat[, list(n = .N), by = "destination"][order(n, decreasing = TRUE)],5);
#compute by number of cylinders, rows of automatic transmission and manual. Only cars with more than 3 gears.
dt[ gear > 3, list(automatic= sum(am == 0), manual = sum(am ==1) ) , by = "cyl"]
#compute the minimum value of 'mpg' for each value of 'am', taking into account only cars with more than 4 cylinders.
dt[cyl >4, min(.SD[,"mpg"]), by = "am"];
# Take into account only flights bought after "2018-11-01"
head(dat)
dat[bookind_date>=as.Date("2018-11-01")];
###Chaining
dt[Sepal.Length > 5, list(avg_sepal_width = mean(Sepal.Width)),
by = "Species"][order(avg_sepal_width)]
# Get the top 5 users in terms of number of flights purchased via online channels
head(dat[sale_channel=="online",list(n_purchases= .N), by = "user"][order(n_purchases,
decreasing = TRUE)],5);
# Get also the top 5 in terms of price per purchase.
head(dat[canal_venta == "online", list(avg_price = mean(price)), by = "user"][order(avg_price,
decreasing = TRUE)], 5)
###.SD (applies function to all the dt)
my_mode <- function(x) {
x <- as.factor(x);
ret <- names(sort(table(x), decreasing = FALSE))[1];
return(ret);
}
#Decreasing = FALSE because it wants the less frequent value;
dt <- as.data.table(mtcars);
less_frequent <- dt[, sapply(.SD, my_mode)];
names(less_frequent) <- colnames(mtcars);
less_frequent
my_percentile <- function(x){
quantile(x, 0.75);
}
dt <- as.data.table(mtcars);
third_quartiles <- dt[, sapply(.SD, my_percentile)];
names(third_quartiles) <- colnames(mtcars);
third_quartiles
null_counter <- function(x) {
ret <- sum(x > 5);
return(ret);
}
dt <- as.data.table(mtcars);
large_values <- dt[, sapply(.SD, null_counter)];
names(large_values) <- colnames(mtcars);
###########Lists
###########Plots
#general
plot(v, col = "red", fg = "blue", pch = 2, type = "l",
main = "My first plot", xlab = "x", ylab = "y",
xlim = c(10,90), ylim = c(20,100),
lty = 2, font = 2, las = 1, cex.axis = 1);
# Table of frequencies
tf <- table(v);
barplot(tf);
#histogram
hist(v, col = "purple", fg = "red",
main = "TITLE", sub= "subtitle",xlab = "x values",
ylim = c(-10,30),
font = 2, las = 2, cex.axis = 0.6,
breaks = 5
);
#barplot
barplot(frequencies, col = "green", font = 2);
#boxplot
boxplot(v);
v <- c(v, 1000);
boxplot(v, outline = FALSE);
boxplot(v, col = "yellow",
main = "my_blox", sub = "subtitle", xlab = "x",
ylab = "value",
ylim = c(-100,500),
las = 3, cex.axis = 1,
outline = FALSE);
#density
plot(density(v), type = "l", col = "blue");
# Replace missing values
sapply(dat, function(x){sum(is.na(x))});
dat$price[is.na(dat$price)] <- mean(dat$price, na.rm = TRUE);
sapply(dat, function(x){sum(is.na(x))});
# Plot density
plot(density(dat$price), col = "blue");
#cumulative distribution
plot(ecdf(v), col = "blue");
###########Ggplot
### Create plot
library(ggplot2)
my_plot <- ggplot(as.data.table(mtcars),
aes(x=1:nrow(mtcars),y=cyl));
my_plot <- my_plot + geom_point(col="purple"); (#layer)
my_plot <- my_plot + labs(subtitle = "From mtcars dataset", x = "index");
my_plot
### Add layer to set palette
my_plot <- my_plot +
scale_colour_brewer(palette = "YlOrRd")
print(my_plot);
library(data.table);
library(ggplot2);
### Create plot
my_plot <- ggplot(as.data.table(mtcars),
aes(x=1:nrow(mtcars), y = mpg));
### Add layer of points
my_plot <- my_plot + geom_line(col = "blue");
### Add layer to set axis ranges
my_plot <- my_plot + xlim(c(0, 50))
### Add layer to set main and axis titles
my_plot <- my_plot +
labs(title="Car mpg", subtitle="From mtcars dataset",
y="mpg", caption="mtcars dataset mpg");
### Print plot
my_plot;
### Create plot
my_plot <- ggplot(as.data.table(mtcars),
aes(y = wt));
### Add layer of points
my_plot <- my_plot + geom_boxplot(col = "blue",
fill = "green",
outlier.colour ="red", notch=FALSE);
### Add layer to set main and axis titles
my_plot <- my_plot +
labs(subtitle="From mtcars dataset",
y="wt");
### Add layer to set axis ranges
my_plot <- my_plot + ylim(c(0, 10))
### Print plot
my_plot;
### Create plot
my_plot <- ggplot(as.data.table(mtcars),
aes(y = mpg));
### Add layer of points
my_plot <- my_plot + geom_boxplot(col = "purple",
fill = "blue",
outlier.colour ="red");
### Add layer to set main and axis titles
my_plot <- my_plot +
labs(subtitle="From mtcars dataset (again)",
y="mpg");
### Add layer to reverse y axis
my_plot <- my_plot + scale_y_reverse();
### Print plot
my_plot;
#############Mark-Down
##########Shiny
############################# UI ##################################
# Define UI for application that draws a histogram
shinyUI(fluidPage(
title = 'mtcars Analysis',
mainPanel(
img(src='logo.png', align = "right"),
tags$head(
tags$style(type='text/css',
".nav-tabs {font-size: 25px} ")),
tabsetPanel(
type = "tabs",
tabPanel("dt[i, j, by]",
### SELECTORS
# Slider
sliderInput("selected_rows", label = h3("Rows Selected"), min = 1,
max = nrow(mtcars), value = c(1, 10),
round = -2, step = 1),
# Multiple response choice
selectInput("selected_columns", label = h3("Columns Selected"),
choices = c("",colnames(mtcars)),
selected = 1,
width='55%',
multiple = TRUE),
# Single response choice
selectInput("selected_op", label = h3("Operation to Compute"),
choices = c("", "mean", "min", "max"),
selected = 1,
width='55%',
multiple = FALSE),
# Multiple response choice
selectInput("selected_group", label = h3("Group By"),
choices = c("",colnames(mtcars)),
selected = 1,
width='55%',
multiple = TRUE),
### TABLE 1
# Print table
fluidRow(
column(12, DT::dataTableOutput('data'))
),
# Download button
sidebarPanel(
radioButtons("filetype", "File type:",
choices = c("excel", "csv", "tsv")),
downloadButton('downloadData', 'Download')
)
,
### TABLE 2
# Print table
fluidRow(
column(12, DT::dataTableOutput('operation'))
),
# Download button
sidebarPanel(
radioButtons("filetype2", "File type:",
choices = c("excel", "csv", "tsv")),
downloadButton('downloadData2', 'Download')
)
),
tabPanel("Plot",
# Single response choice
selectInput("selected_column_plot", label = h3("Column to Plot"),
choices = c("",colnames(mtcars)),
selected = 1,
width='55%',
multiple = FALSE),
fluidRow(
column(12, plotOutput("plot"))
)
)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Show Data Set"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
selectInput("dataset", label = h3("Select the data set"),
choices = c("iris","mtcars"),
selected = 1,
width='55%',
multiple = FALSE)
),
# Show a plot of the generated distribution
fluidRow(
column(12, DT::dataTableOutput('tablechoosen'))
)
)
))
########################## Server
library(shiny);
library(data.table);
library(ggplot2);
# Define server logic required to draw a histogram
shinyServer(function(input, output){
### Reactive functions
# Function to filter data
compute_data <- reactive({
if (length(input$selected_columns) > 0){
dat <- as.data.table(mtcars[input$selected_rows[1]:input$selected_rows[2], input$selected_columns]);
}else{
dat <- data.table();
}
return(dat)
})
# Function to compute operation over filtered data
compute_operation <- reactive({
dat <- compute_data();
if (input$selected_op!=""){
if (input$selected_op == "mean"){
dat <- dat[, sapply(.SD, mean), by = eval(input$selected_group),
.SDcols = setdiff(input$selected_columns,input$selected_group)];
} else if (input$selected_op == "max"){
dat <- dat[, sapply(.SD, max), by = eval(input$selected_group),
.SDcols = setdiff(input$selected_columns,input$selected_group)];
} else if (input$selected_op == "min"){
dat <- dat[, sapply(.SD, min), by = eval(input$selected_group),
.SDcols = setdiff(input$selected_columns,input$selected_group)];
}
}else{
dat <- data.table();
}
return(dat)
})
### Tables
# Print 'data' table
output$data = DT::renderDataTable(
compute_data(), filter = 'top', rownames=FALSE)
# Print 'operation' table
output$operation = DT::renderDataTable(
compute_operation(), filter = 'top', rownames=FALSE)
### Download button
# 'data' table button
output$downloadData <- downloadHandler(
# This function returns a string which tells the client
# browser what name to use when saving the file.
filename = function() {
paste("filtered_data", gsub("excel", "csv",input$filetype), sep = ".")
},
# This function should write data to a file given to it by
# the argument 'file'.
content = function(file) {
if (input$filetype == "excel"){
write.csv2(compute_data(), file);
} else {
sep <- switch(input$filetype, "csv" = ",", "tsv" = "\t");
write.table( compute_data(), file, sep = sep,
row.names = FALSE);
}
}
)
# 'operation' table button
output$downloadData2 <- downloadHandler(
# This function returns a string which tells the client
# browser what name to use when saving the file.
filename = function() {
paste("operation", gsub("excel", "csv",input$filetype2), sep = ".")
},
# This function should write data to a file given to it by
# the argument 'file'.
content = function(file) {
if (input$filetype2 == "excel"){
write.csv2(compute_operation(), file);
} else {
sep <- switch(input$filetype2, "csv" = ",", "tsv" = "\t");
write.table( compute_operation(), file, sep = sep,
row.names = FALSE);
}
})
### Plots
compute_plot <- reactive({
dat <- compute_data();
if (input$selected_column_plot != ""){
### Create plot
my_plot <- ggplot(dat, aes(x=1:nrow(dat),
y = as.numeric(t(dat[,input$selected_column_plot, with = F]))));
### Add layer of points
my_plot <- my_plot + geom_point(col = "red");
### Add layer to set main and axis titles
my_plot <- my_plot +
labs(title="Scatterplot", subtitle="From mtcars dataset",
y= input$selected_column_plot, x="n", caption="mtcars dataset");
### Add layer to set axis values
my_plot <- my_plot + scale_x_continuous(breaks=seq(0, nrow(dat), 2));
### Add layer to change theme
my_plot <- my_plot + theme_dark();
my_plot;
}
})
output$plot <- renderPlot({
compute_plot();
})
}
)
library(shiny)
library(data.table)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
compute_data <- reactive({
if (input$dataset == "iris"){
dat <- data.table(iris);
} else {
dat <- data.table(mtcars);
}
return(dat)
});
# Print table
output$tablechoosen = DT::renderDataTable(compute_data(), filter = 'top', rownames=FALSE)
})
|
817f256f5267f92c190c75c8654755673ae098b0
|
71f0ab7e5f296645273dbf967a3500300036d32c
|
/R/mapalign.R
|
1079ffeae6a3ded6ef20671386515548a3428535
|
[] |
no_license
|
tcgriffith/RNAmrf
|
c9eeb3f3732df045a379cf4ada23bad8d6f3cf8b
|
964d992f3fea5c5e90f613e7d3a591dc6a83c96d
|
refs/heads/master
| 2023-02-25T17:23:39.355326
| 2021-02-05T00:53:22
| 2021-02-05T00:53:22
| 252,070,631
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,206
|
r
|
mapalign.R
|
falign_R=function(score_mtx, rows,cols){
max_sco = 0;
# sco[rows+1,cols+1];
gap_o=-1
sco=matrix(0, rows+1,cols+1)
for (i in 2:(rows+1)){
for (j in 2:(cols+1)){
A = sco[i-1,j-1] + score_mtx[i-1,j-1]
D = sco[i-1,j]
R = sco[i,j-1]
if (A >= R) {
if (A >= D) {
sco[i, j] = A
} else{
sco[i,j] = D
}
}
else{
if (R >= D) {
sco[i,j] = R
} else{
sco[i,j] = D
}
}
if(sco[i,j] > max_sco){max_sco = sco[i,j]}
}
}
return(max_sco)
# return(sco)
}
align_R=function(score_mtx, gap_open=-1,gap_e=0.2,debug=FALSE){
rows=nrow(score_mtx)
cols=ncol(score_mtx)
sco=matrix(0, rows+1,cols+1)
label=matrix(0, rows+1,cols+1)
max_sco=0
a2b=integer(rows)
a2b[]=-1
max_i=0
max_j=0
for (i in 2:(rows+1)){
for (j in 2:(cols+1)){
A = sco[i-1,j-1] + score_mtx[i-1,j-1]
D = sco[i-1,j]
R = sco[i,j-1]
if(label[i-1,j] == 1){
D = D+ gap_open
} else{
D = D+ gap_open * gap_e
}
if(label[i,j-1] == 1){
R = R+ gap_open
} else{
R = R+ gap_open * gap_e
}
# if(label[i-1,j] == 1){D =D+ gap_b[j-1]}else{D =D+ gap_b[j-1] * gap_e}
# if(label[i,j-1] == 1){R =R+ gap_a[i-1]}else{R =D+ gap_a[i-1] * gap_e}
if(A <= 0 && D <= 0 && R <= 0){label[i,j] = 0;sco[i,j] = 0;}
else{
if(A >= R){if(A >= D){label[i,j] = 1;sco[i,j] = A;}else{label[i,j] = 2;sco[i,j] = D;}}
else{if(R >= D){label[i,j] = 3;sco[i,j] = R;}else{label[i,j] = 2;sco[i,j] = D;}}
if(sco[i,j] > max_sco){max_i = i;max_j = j;max_sco = sco[i,j];}
}
}
}
i = max_i;
j = max_j;
while (1) {
if (label[i,j] == 0) {
break
}
else if (label[i,j] == 1) {
a2b[i - 1] = j - 1
i=i-1
j=j-1
}
else if (label[i,j] == 2) {
i=i-1
}
else if (label[i,j] == 3) {
j=j-1
}
}
if (debug){
return(sco)
}
return(a2b)
}
#' read_mrf read a MRF model from GREMLIN output, column renumbered
#'
#' @param filemrf
#'
#' @return list object of mrf
#' @export
#' @import dplyr
#' @importFrom data.table fread
#'
read_mrf= function(filemrf){
myalphabet = c("a", "u", "c", "g", "-")
v1 = data.table::fread(cmd = paste("grep '^V'", filemrf))
names(v1)[-1] = myalphabet
w1 = data.table::fread(cmd = paste("grep '^W'", filemrf))
len=nrow(v1)
len_a=length(myalphabet)
v1$i_ori=as.integer(gsub(".*\\[(.*?)\\].*","\\1",v1$V1))
v1$i=1:nrow(v1)
w1$i_ori=as.integer(gsub(".*\\[(.*?)\\]\\[(.*?)\\].*","\\1",w1$V1))
w1$i=match(w1$i_ori,v1$i_ori)
w1$j_ori=as.integer(gsub(".*\\[(.*?)\\]\\[(.*?)\\].*","\\2",w1$V1))
w1$j=match(w1$j_ori, v1$i_ori)
mrf_mat=RNAmrf:::mrf2mrf_mat(w1,len)
mrfh = as.matrix((v1[, 2:6]))
w1mat=as.matrix(w1[,2:(len_a^2+1)])
mat_score = sapply(1:nrow(w1), function(i) {
tmpmat = matrix(w1mat[i,], 5, 5)
score = sqrt(sum(tmpmat[-5,-5] ^ 2))
return(score)
})
ids=data.frame(i=w1$i,j=w1$j)
ids$score = mat_score
mat_mrf = matrix(0, nrow(v1), nrow(v1))
mat_mrf[as.matrix(ids[, c(1, 2)])] = ids[, 3]
mat_mrf[as.matrix(ids[, c(2, 1)])] = ids[, 3]
mat_apc=RNAmrf:::APC_correction(mat_mrf)
mrf = list(
len = len,
h = v1,
j = w1,
mat_mrf = mat_mrf,
mat_apc = mat_apc,
mrf_mat=mrf_mat,
mrf_h=mrfh
)
return(mrf)
}
retrieve_matj_R=function(i,a,j,b,mat_j,len_a){
return(mat_j[(i-1)*len_a+a,(j-1)*len_a+b])
}
mrf2mrf_mat = function(w1,mrflen) {
myalphabet = c("a", "u", "c", "g", "-")
len = mrflen
len_a = length(myalphabet)
mat_j = matrix(0, len * len_a, len * len_a)
for (m in 1:nrow(w1)) {
id_i = w1$i[m]
id_j = w1$j[m]
id_ia = id2_to_id1(1, id_i, len_a)
id_ja = id2_to_id1(1, id_j, len_a)
mat = matrix(as.matrix(w1[m, 2:26]), 5, 5, byrow = TRUE)
# array_j[id_i, id_j, ,] = mat
mat_j[id_ia:(id_ia + len_a - 1), id_ja:(id_ja + len_a - 1)] = mat
}
return(mat_j)
}
id1_to_id2=function(id,dim){
# j = id %/% (dim +1)+1
i = id %% (dim)
j=ceiling(id/dim)
i[i==0]=dim
return(data.frame(i=i,j=j))
}
id2_to_id1=function(i,j,dim){
return((j-1)*dim+i)
}
#' Encode RNA sequence
#'
#' @param seq
#'
#' @return a list
#' @export
#'
encode_seq = function(seq) {
myalphabet = c("a", "u", "c", "g")
seq_int = match(seq, table = myalphabet, nomatch = 0) - 1 # 0 based
seq_int_ungapped = seq_int[seq_int > -1]
seq_int_ref = which(seq_int > -1)
rslt=list(
seq_ungapped=seq[seq_int > -1],
seq_int_ungapped=seq_int_ungapped,
seq_int_ref=seq_int_ref
)
return(rslt)
}
bench_seqid=function(seq,seqref){
return(sum(seq==seqref)/length(seqref))
}
# bench_pair=function(seq,seqref,ctref, debug=FALSE){
#
# npair=sum(ctref$j>0)
#
# pairs=paste0(seq[ctref$i[ctref$j>0]],seq[ctref$j[ctref$j>0]])
#
# pairs=toupper(pairs)
#
# if(debug){
# print(paste(pairs))
# }
#
# return(sum(pairs %in% RNASSP::energy2)/npair)
# }
#
# bench_aln=function(seq,seqref,ctref,debug=FALSE){
# seqid=bench_seqid(seq,seqref)
# pairid=bench_pair(seq,seqref,ctref,debug)
# return(c(
# seqid=seqid,
# pairid=pairid
# ))
# }
a2b2seq= function(a2b_1b,seq,mrf_len,type=c("c","s")){
type=match.arg(type)
a2b=a2b_1b
seq_aln = character(mrf_len)
seq_aln[] = "-"
seq_aln[a2b[a2b>0]] = seq[a2b > 0]
if (type=="s"){
seq_aln = paste0(seq_aln,collapse = "")
}
return(seq_aln)
}
#' align sequence to mrf
#'
#' @param seq sequence
#' @param mrf mrf read by read_mrf
#' @param iteration number of iterations
#' @param wt_h weight of field term H
#' @param wt_j weight of coupling term J
#' @param init_method method to generate initiation,
#' 1(default): fast, based on 1-body
#' 2: slow
#' @param gap_ext gap extension penalty
#' @param gap_open gap open penalty
#' @param debug verbose
#'
#' @return a2b, mapping index to sequence. Length is the same as mrf
#' @export
#'
align_seq2mrf = function(seq, mrf,iteration=20,wt_h=1.0,wt_j=1.0,init_method=1,gap_ext=0.1, gap_open=-1,debug=TRUE) {
exp_seq = encode_seq(seq)
if (init_method ==2){
SCO_init = ini_SCO(exp_seq$seq_int_ungapped,
mrf_h = mrf$mrf_h,
mrf_mat=mrf$mrf_mat
)
}
else{
SCO_init = ini_SCO_simple(exp_seq$seq_int_ungapped,
mrf_h = mrf$mrf_h)
}
SCO_mod = mod_SCO(
SCO_init,
iteration = iteration,
exp_seq$seq_int_ungapped,
mrf_mat = mrf$mrf_mat,
mrf_h = mrf$mrf_h,
wt_h = wt_h,
wt_j = wt_j,
gap_o=gap_open,
gap_e=gap_ext,
DEBUG = debug
)
a2b=align(SCO_mod,gap_ext = gap_ext,gap_open = gap_open)
return(a2b)
}
#' align sequence to mrf
#'
#' @param seq sequence
#' @param mrf mrf read by read_mrf
#' @param iteration number of iterations
#' @param wt_h weight of field term H
#' @param wt_j weight of coupling term J
#' @param gap_ext gap extension penalty
#' @param gap_open gap open penalty
#' @param debug verbose
#'
#' @return a2b, mapping index to sequence. Length is the same as mrf
#' @export
#'
align_seq2mrf_PSgap = function(seq, mrf,iteration=20,wt_h=1.0,wt_j=1.0,gap_ext=0.1, gap_open,debug=TRUE) {
exp_seq = encode_seq(seq)
SCO_init = ini_SCO_simple(exp_seq$seq_int_ungapped,
mrf_h = mrf$mrf_h)
SCO_mod = mod_SCO_PSgap(
SCO_init,
iteration = iteration,
exp_seq$seq_int_ungapped,
mrf_mat = mrf$mrf_mat,
mrf_h = mrf$mrf_h,
wt_h = wt_h,
wt_j = wt_j,
gap_o=gap_open,
gap_e=gap_ext,
DEBUG = debug
)
a2b=align_PSgap(SCO_mod,gap_ext = gap_ext,gap_open = gap_open)
return(a2b)
}
#' align sequence to mrf with position specific gaps
#'
#' @param seq sequence
#' @param mrf mrf read by read_mrf
#' @param iteration number of iterations
#' @param wt_h weight of field term H
#' @param wt_j weight of coupling term J
#' @param gap_ext gap extension penalty
#' @param gap_ins gap open penalty based on insertion
#' @param gap_del gap open penalty based on deletion
#' @param debug verbose
#'
#' @return a2b, mapping index to sequence. Length is the same as mrf
#' @export
#'
align_seq2mrf_psgap2=function(seq, mrf,iteration=20,wt_h=1.0,wt_j=1.0,gap_ext=0.1, gap_ins,gap_del,debug=TRUE) {
exp_seq = RNAmrf::encode_seq(seq)
SCO_init = RNAmrf:::ini_SCO_simple(exp_seq$seq_int_ungapped,
mrf_h = mrf$mrf_h)
SCO_mod = RNAmrf:::mod_SCO_PSgap2(
SCO_init,
iteration = iteration,
exp_seq$seq_int_ungapped,
mrf_mat = mrf$mrf_mat,
mrf_h = mrf$mrf_h,
wt_h = wt_h,
wt_j = wt_j,
gap_ins = gap_ins,
gap_del = gap_del,
gap_e=gap_ext,
DEBUG = debug
)
a2b=RNAmrf:::align_PSgap2(SCO_mod,gap_ext = gap_ext,gap_ins = gap_ins,gap_del = gap_del)
return(a2b)
}
align_seq2mrf_mtx = function(seq, mrf_mat, mrf_h,iteration=20,wt_h=1.0,wt_j=1.0,debug=TRUE) {
exp_seq = encode_seq(seq)
SCO_init = ini_SCO_simple(exp_seq$seq_int_ungapped,
mrf_h = mrf$mrf_h)
SCO_mod = mod_SCO(
SCO_init,
iteration = iteration,
exp_seq$seq_int_ungapped,
mrf_mat = mrf_mat,
mrf_h = mrf_h,
wt_h = wt_h,
wt_j = wt_j,
DEBUG = debug
)
a2b=align(SCO_mod,gap_ext = 0.1,gap_open = -1)
return(a2b)
}
#' bencha2b
#'
#' @param a2b_0b a2b 0 based
#' @param seq sequence
#' @param mrf mrf from read_mrf
#' @param seq_ref reference sequence
#' @param ct_ref reference ct with BP
#'
#' @return named vector of benchmark metrics
#' @export
#'
bench_a2b = function(a2b_0b, # a2b output of Rcpp, 0 based
seq,
mrf,
seq_ref = NULL,
ct_ref = NULL) {
exp_seq = encode_seq(seq)
rslt_aln = score_aln(a2b_0b, exp_seq$seq_int_ungapped, mrf$mrf_mat, mrf$mrf_h, DEBUG = FALSE)
rslt = c(mrf_total = rslt_aln[1]+rslt_aln[2],
mrf_single = rslt_aln[1],
mrf_pair = rslt_aln[2])
a2b_1b=a2b_0b+1
seq_final = a2b2seq(a2b_1b, exp_seq$seq_ungapped, mrf$len)
if (!is.null(seq_ref)) {
benchaln = bench_aln(seq_final, seqref = seqs[[1]], ctref = ct_ref)
rslt=c(rslt,benchaln)
}
return(rslt)
}
#' pair_a2b2aln convert two a2b to aligned sequence.
#'
#'
#'
#' @param a2b_1 integer vector of length(seq1),encoding the alignment of seq1 to MRF
#' @param a2b_2 integer vector of length(seq2),encoding the alignment of seq2 to MRF
#' @param seqs unaligned seq list, read from seqinr::read.fasta()
#'
#' @return list of aligned seqs
#' @export
#'
pair_a2b2aln = function(a2b_1, a2b_2, seqs) {
last_idx = -1
for (i in 1:length(a2b_1)) {
if (a2b_1[i] == -1) {
a2b_1[a2b_1 > last_idx] = a2b_1[a2b_1 > last_idx] + 1
a2b_2[a2b_2 > last_idx] = a2b_2[a2b_2 > last_idx] + 1
a2b_1[i] = last_idx + 1 # fill unaligned
}
last_idx = a2b_1[i]
}
last_idx = -1
for (i in 1:length(a2b_2)) {
if (a2b_2[i] == -1) {
a2b_1[a2b_1 > last_idx] = a2b_1[a2b_1 > last_idx] + 1
a2b_2[a2b_2 > last_idx] = a2b_2[a2b_2 > last_idx] + 1
a2b_2[i] = last_idx + 1 # fill unaligned
}
last_idx = a2b_2[i]
}
a2b_1_1b=a2b_1+1
a2b_2_1b=a2b_2+1
seq_aln1 = character(max(a2b_1_1b, a2b_2_1b))
seq_aln1[] = "-"
seq_aln2 = character(max(a2b_1_1b, a2b_2_1b))
seq_aln2[] = "-"
seq_aln1[a2b_1_1b] = seqs[[1]]
seq_aln2[a2b_2_1b] = seqs[[2]]
return(list(
seq_aln1=seq_aln1,
seq_aln2=seq_aln2
))
}
#' mrfaln_seqs
#'
#' @param seqs seqs read from seqinr::read.fasta
#' @param mrf mrf model read from RNAmrf::read_mrf_renum
#'
#' @return a list of re-aligned sequences in a2m format.
#' Can be converted to other alignment format using esl-reformat
#' @export
#' @import pbapply
#'
mrfaln_seqs = function(seqs, mrf) {
pbapply::pboptions(type="timer")
aln_a2m = pbapply::pblapply(seqs, function(aseq) {
a2b = align_seq2mrf(
aseq,
mrf = mrf,
gap_open = -3,
debug = FALSE,
iteration = 25
)
seqenc=RNAmrf:::encode_seq(aseq)
a2m= RNAmrf:::a2b2a2m(a2b,seqenc$seq_int_ungapped,mrflen = mrf$len)
return(a2m)
})
return(aln_a2m)
}
|
c2dc23456ba824a39e549f7f6ed2ceb5f444ea6a
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/blackbox/R/writeFinalInfo.R
|
695e2520d02c15cb0b2df6a4cab417008d425a24
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,391
|
r
|
writeFinalInfo.R
|
writeFinalInfo <- function(cleanResu="") {
rosglobal <- blackbox.getOption("rosglobal")
plotOptions <- blackbox.getOption("plotOptions")
oneDimCIvars <- blackbox.getOption("oneDimCIvars")
message.redef("...done.")
message.redef("\n*** Final likelihood estimates, and predicted logL: ***")
message.redef(prettyNamedUserValues(c(rosglobal$canonVP, "ln(L)"=rosglobal$value), extradigits=2))
returncode <- rosglobal$convergence
tmp <- rosglobal$edgelevel
if (tmp>0) returncode <- returncode+tmp/(10^ceiling(log(tmp, 10))) ## second summand goes in decimal part of returcode
write("\n*** Point estimates *** \n", file=cleanResu)
writeCleanNAMED(prettyNamedUserValues(rosglobal$canonVP), file=cleanResu)
DemographicModel <- blackbox.getOption("DemographicModel")
if ("IBD" %in% DemographicModel) {
D1IBDbool <- "1D" %in% DemographicModel
Nbfactor <- blackbox.getOption("Nbfactor")
write(paste("\n Neighborhood: ", prettynum(rosglobal$latt2Ns2*Nbfactor), " ",
if (D1IBDbool) {blackbox.getOption("GeoUnit")} else {""}, sep=""), file=cleanResu)
if (D1IBDbool) write(paste("## Conversion factor from Nb in lattice units to Nb in geographic distance units as deduced from input:", Nbfactor),
file=blackbox.getOption("estimOutf"))
} else if (length(intersect(DemographicModel, c("OnePopVarSize", "IM")))>0) {
write(paste("\n N ratio: ", prettynum(rosglobal$Nratio), " ", sep=""), file=cleanResu)
} else if ("OnePopFounderFlush" %in% DemographicModel) {
write(paste("\n Nanc ratio: ", prettynum(rosglobal$Nratio), " ", sep=""), file=cleanResu)
write(paste("\n NactNfounder ratio: ", prettynum(rosglobal$NactNfounderratio), " ", sep=""), file=cleanResu)
write(paste("\n NfounderNanc ratio: ", prettynum(rosglobal$NfounderNancratio), " ", sep=""), file=cleanResu)
}
if (length(intersect(DemographicModel, c("OnePopVarSize", "OnePopFounderFlush", "IM")))>0) {
if ( !("IM" %in% DemographicModel) && ( ("DgmuProf" %innc% plotOptions) || ("Dgmu" %innc% oneDimCIvars) ) ) write(paste("\n Dg*mu: ", prettynum(rosglobal$Dgmu), " ", sep=""), file=cleanResu)
if ( ("TgmuProf" %innc% plotOptions) || ("Tgmu" %innc% oneDimCIvars) ) write(paste("\n Tg*mu: ", prettynum(rosglobal$Tgmu), " ", sep=""), file=cleanResu)
}
if (length(intersect(DemographicModel, c("Npop", "IM")))>0) {
write(paste("\n NMratio: ", prettynum(rosglobal$NMratio), " ", sep=""), file=cleanResu)
write(paste("\n mratio: ", prettynum(rosglobal$mratio), " ", sep=""), file=cleanResu)
if ( ("movermuProf" %innc% plotOptions) || ("m1overmu" %innc% oneDimCIvars) ) write(paste("\n m1/mu: ", prettynum(rosglobal$m1overmu), " ", sep=""), file=cleanResu)
if ( ("movermuProf" %innc% plotOptions) || ("m2overmu" %innc% oneDimCIvars) ) write(paste("\n m2/mu: ", prettynum(rosglobal$m2overmu), " ", sep=""), file=cleanResu)
}
## note that the C codes seeks estimates in the VERY LAST line of the output.txt file: do not write comments after the following output:
upperPred_crits <- blackbox.getOption("upperPred_crits")
if (is.null(upperPred_crits)) upperPred_crits <- list(RMSpred=NA,GOP=NA) ## if sampleByResp has not been run (which should not occur)
writeoutput(paste(blackbox.getOption("dataFile"), "(final)", sep=""),
returncode=returncode,
NA,
upperPred_crits$RMSpred, ## RMSpred for upper points only
upperPred_crits$GOP) ## GOP root mean relative squared error of prediction
if ( ! blackbox.getOption("interactiveGraphics")) { ##
plotFiles <- blackbox.getOption("plotFiles")
if(!is.null(plotFiles) & length(plotFiles)>0)
message.redef(paste("See file(s) ", paste(names(plotFiles), sep="", collapse=", "), " for figures", sep=""))
graphics.off() ## close all graphic files, but note that plotFiles is not cleaned.
close(blackbox.getOption("estimOutf")) ## ! leaves connection open if interactive run !
}
write("\nNormal ending.", file=cleanResu)
close(cleanResu) ##
blackbox.options(cleanResu="") ## so that all 'cleanResu' output now goes to the standard output connection; see ?write
alarm() ## Sounds when execution of the R source file reaches this point. Except in Rstudio...
invisible(NULL)
}
|
3605f8916fd0cf283094d01c6b87894a15c489b1
|
a0340fe520ed82859d7145605fba1aca5b21ad1b
|
/man/plot.pi.Rd
|
5fb54b7c684a568990e63ceaea334fc3c474324a
|
[] |
no_license
|
vincentlomazzi/ptds2018hw4g5
|
314dd6592e6ef685be67505eaaa4991b52bf7a83
|
7563e5b754e4bee5af6f65186dd6f9533a699d9e
|
refs/heads/master
| 2020-04-08T12:08:05.502735
| 2018-12-02T17:29:53
| 2018-12-02T17:29:53
| 159,334,217
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 594
|
rd
|
plot.pi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pi.R
\name{plot.pi}
\alias{plot.pi}
\title{Plot objects of resulting from the estimate_pi function}
\usage{
\method{plot}{pi}(x)
}
\arguments{
\item{x}{An output from the estimate_pi function}
}
\value{
A plot of the simulated points
}
\description{
This function draws a plot representing the approximtion of the
number pi obtained with the estimate_pi function
}
\examples{
estimated_pi <- estimate_pi(5000)
plot(estimated_pi)
}
\author{
Germano David
Lomazzi Vincent
Bron Luca
Raisin Edgar
Grandadam Patrik
}
|
99eee8b4fa0500221511b80a36f88bb438352546
|
597f8d0728ee40f03680aaee00351a9ee0c38cb8
|
/R/cleanWdLogField.R
|
1d6a2f13b284d0dcd53c7aabfe3bdba3509a17e6
|
[] |
no_license
|
michelk/logbuch.R
|
878b606ab233e676e3c8b626d09b0b6d5026189a
|
264021a8cdef715b8174c9c3e7735233863873d7
|
refs/heads/master
| 2021-01-25T00:22:37.774440
| 2017-11-30T18:48:20
| 2017-11-30T18:48:20
| 4,222,883
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 288
|
r
|
cleanWdLogField.R
|
cleanWdLogField <-
function # ^ remove any whitespace or tab
(
x # ^ character: a field of day-record-file
)
{
gsub(
"\t+", "", gsub("\\s+", "", x))
} # ^ returns a cleaned character
|
039e0eea8d976fa3cafaa569d5542e679e9c99d4
|
dd8132404e8c7b028cb13cba904c50aace01c6a7
|
/swt/src/spc/vcg.u/src/otg$uii.r
|
02b159355eb8b582de085a39bec6fb985afed04e
|
[] |
no_license
|
arnoldrobbins/gt-swt
|
d0784d058fab9b8b587f850aeccede0305d5b2f8
|
2922b9d14b396ccd8947d0a9a535a368bec1d6ae
|
refs/heads/master
| 2020-07-29T09:41:19.362530
| 2019-10-04T11:36:01
| 2019-10-04T11:36:01
| 209,741,739
| 15
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 293
|
r
|
otg$uii.r
|
# otg$uii --- generate a uii request group
subroutine otg$uii (def)
integer def
integer group_data (2)
DB call print (ERROUT, "otg$uii: hardware def = *,-8i*n"s, def)
group_data (1) = UIIREQ_GROUP * BIT8 + 1
group_data (2) = def
call group (group_data)
return
end
|
f0899bd3db1044d76c590fcdd8f1349a0bca50a2
|
814c9360135107b70a1099ee585b2a5010b2a88e
|
/R/bglab.R
|
97acfd51edd3381afedbbdc222d366fb8322758a
|
[] |
no_license
|
wjawaid/bglab
|
bb5f7844cb956cd184847be8396063e4ae8551f1
|
9c04f65ae15887ba64d5b635f97c84f27a087ec7
|
refs/heads/master
| 2021-01-09T06:18:17.675740
| 2017-04-09T21:39:54
| 2017-04-09T21:39:54
| 80,956,007
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 223
|
r
|
bglab.R
|
##' @name bglab
##' @title Gottgens Lab Toolkit
##' @description Package of scripts frequently used in Berthold Gottgen's lab for
##' processing scRNA-seq data.
##' @author Wajid Jawaid
NULL
globalVariables("heatmapCols")
|
e880e73fe1beaaffe9631caef00c3a9a342fe33e
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/conting/R/print.modprobs.R
|
eb1e913bfe007834714c5a71857e5fe07a24e5f6
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 258
|
r
|
print.modprobs.R
|
print.modprobs <-
function(x,digits = max(3, getOption("digits") - 3),...){
cat("Posterior model probabilities:\n")
print(x$table,right=FALSE,digits=digits)
cat("\n")
cat("Total number of models visited = ",round(x$totmodsvisit,digits=digits),"\n")}
|
566f48fd5b182784b5d48e35c64dd7db0a4c25a0
|
ec29146e9381661afdc8ae329db4576319862271
|
/R/performMultipleRegression.R
|
7dc5773d48ae5ce36a1b330d0a489cbc2a6dfa39
|
[] |
no_license
|
imkrishsub/VisiStat
|
b49f77ae3d59b5448a591b3a50af192103fe6e92
|
019a3f6f0f0fd8b763358b6c0a1665ebb48bd6e9
|
refs/heads/master
| 2021-05-01T07:56:15.327519
| 2017-06-30T13:04:31
| 2017-06-30T13:04:31
| 14,532,263
| 4
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,313
|
r
|
performMultipleRegression.R
|
performMultipleRegression <- function(outcomeVariable, explanatoryVariables, dataset)
{
table <- as.data.frame(dataset)
explanatoryVariables = c(explanatoryVariables);
print(explanatoryVariables);
pretext = paste("lm(",outcomeVariable,"~",sep="");
for(i in 1:length(explanatoryVariables))
{
if(i != length(explanatoryVariables))
{
pretext = paste(pretext,explanatoryVariables[i]," + ",sep="");
}
else
{
pretext = paste(pretext,explanatoryVariables[i],sep="");
}
}
pretext = paste(pretext,", data=table)",sep="");
model <- eval(parse(text = pretext));
results <- summary(model);
intercept <- model$coefficients[["(Intercept)"]];
pretext = paste("c(",sep="");
for(i in 1:length(explanatoryVariables))
{
if(i != length(explanatoryVariables))
{
pretext = paste(pretext, model$coefficients[[i+1]],",",sep="");
}
else
{
pretext = paste(pretext, model$coefficients[[i+1]],")",sep="");
}
}
coefficients = eval(parse(text = pretext));
list(intercept = intercept, coefficients = coefficients, rSquared = results$r.squared, len = length(explanatoryVariables));
}
|
da7191b298deff574e3b882960781f0655304bbf
|
4a292e34ae6a0fdca63ab07b7b0ccc3f57bdc566
|
/man/findNLCD.Rd
|
d4ea2c8ec03a0fffbcd01c1a8c33cf760d8c36c6
|
[
"MIT"
] |
permissive
|
mikejohnson51/HydroData
|
c122ba9d559e46bf2c58db609cff43411d7922d9
|
6b7f08656d7299bca3a544652cb69ae064049d41
|
refs/heads/master
| 2021-07-09T13:51:37.180796
| 2019-01-17T06:36:21
| 2019-01-17T06:36:21
| 112,221,493
| 38
| 9
|
MIT
| 2018-11-29T14:50:59
| 2017-11-27T16:39:08
|
R
|
UTF-8
|
R
| false
| true
| 1,195
|
rd
|
findNLCD.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/findNLCD.R
\name{findNLCD}
\alias{findNLCD}
\title{Find National Land Cover Products (NLCD)}
\usage{
findNLCD(AOI = NULL, year = 2011, type = "landcover")
}
\arguments{
\item{AOI}{A Spatial* or simple features geometry, can be piped from \link[AOI]{getAOI}}
\item{year}{the year(s) to download. Options include 2001, 2006, 2011. Default = 2011}
\item{type}{the type of data to downlaod. Options include landcover, canopy, and impervious. Default = landcover}
}
\value{
a list() of minimum length 2: AOI and NLCD
}
\description{
\code{findNLCD} returns \code{Raster} land cover data from the National Land Cover Dataset (\href{https://www.mrlc.gov}{NLCD}) for an AOI.
Data comes the the USA National Map and is avaialble for years 2001, 2006, 2011.
In additon to landcover, users can get data reflecting impervious surface and conaopy cover.
}
\examples{
\dontrun{
dt = getAOI(clip = list("Devil Tower")) \%>\% findNLDC(2006, 'landcover')
dt = getAOI(clip = list("Devil Tower")) \%>\% findNLDC(2011, 'canopy')
dt = getAOI(clip = list("Devil Tower")) \%>\% findNLDC(2011, 'impervious')
}
}
\author{
Mike Johnson
}
|
16029cd00ff8152e24a99dac5cb08a346040c55b
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/lsbclust/man/ClustMeans.Rd
|
6d1123834133781941917a92bf387ae4453ce574
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 582
|
rd
|
ClustMeans.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{ClustMeans}
\alias{ClustMeans}
\title{C++ Function for Cluster Means}
\usage{
ClustMeans(nclust, start, data)
}
\arguments{
\item{nclust}{The number of clusters.}
\item{start}{The current clustering vector.}
\item{data}{The concatenated data, with J * K rows and N columns}
}
\value{
A numeric matrix with \code{nclust} rows and \code{J*K} columns.
}
\description{
This function calculates the cluster means in vectorized form based on the current
value of the clustering vector.
}
|
4952be3e7dd95c6eca0a4614984e05ecd5c549af
|
3c2f772f9facb95517f23a9e481aa9787016aa6f
|
/man/estimate_tongfen_correspondence.Rd
|
ca05445113ee7249f8282f1f507e59d0582e21dc
|
[
"MIT"
] |
permissive
|
cgauvi/tongfen
|
c8a53ba8c6bee02406d3263ba00a443d9e7f7bbc
|
fc32292fb18a749f78d6a67c2c5719cb2d1d6612
|
refs/heads/master
| 2022-12-02T13:58:06.283156
| 2020-08-21T22:06:14
| 2020-08-21T22:06:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,571
|
rd
|
estimate_tongfen_correspondence.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tongfen.R
\name{estimate_tongfen_correspondence}
\alias{estimate_tongfen_correspondence}
\title{Generate togfen correspondence for list of geographies}
\usage{
estimate_tongfen_correspondence(
data,
geo_identifiers,
method = "estimate",
tolerance = 50,
computation_crs = NULL
)
}
\arguments{
\item{data}{list of geometries of class sf}
\item{geo_identifiers}{vector of unique geographic identifiers for each list entry in data.}
\item{method}{aggregation method. Possible values are "estimate" or "identifier". "estimate" estimates the
correspondence purely from the geographic data. "identifier" assumes that regions with identical geo_identifiers are the same,
and uses the "estimate" method for the remaining regions. Default is "estimate".}
\item{tolerance}{tolerance (in projected coordinate units of `computation_crs`) for feature matching}
\item{computation_crs}{optional crs in which the computation should be carried out,
defaults to crs of the first entry in the data parameter.}
}
\value{
A correspondence table linking geo1_uid and geo2_uid with unique TongfenID and TongfenUID columns
that enumerate the common geometry.
}
\description{
\lifecycle{maturing}
Get correspondence data for arbitrary congruent geometries. Congruent means that one can obtain a common
tiling by aggregating several sub-geometries in each of the two input geo data. Worst case scenario the
only common tiling is given by unioning all sub-geometries and there is no finer common tiling.
}
|
c39bfcb2c25528335814db3756d953eedcde4679
|
cfbda84beab0d0710a688363278d7c2354541d39
|
/plot1.R
|
f39a42e9bf2302f817df25b7f61502fdfaaa3e86
|
[] |
no_license
|
VivianeSan/ExData_Plotting1
|
761987ecee4b217f8dc4582ca3c3fc511e8d9737
|
6aeb2a645ee4bff21ecaeb353e2952f787fd48a5
|
refs/heads/master
| 2021-01-09T06:52:14.238771
| 2015-01-12T00:19:15
| 2015-01-12T00:19:15
| 29,110,262
| 0
| 0
| null | 2015-01-11T23:36:09
| 2015-01-11T23:36:09
| null |
UTF-8
|
R
| false
| false
| 349
|
r
|
plot1.R
|
require('sqldf')
query <- "SELECT * from file WHERE Date IN('1/2/2007', '2/2/2007')"
data <- read.csv.sql("./household_power_consumption.txt", sql = query, sep=";")
png('plot1.png', width = 480, height = 480)
hist(
data$Global_active_power,
col = 'red',
xlab = 'Global Active Power (kilowatts)',
main = 'Global Active Power')
dev.off()
|
1bd3c8a161af5cb3199b4a19fec2a9ed1887aca5
|
a5bbcb2b8c60e803c0bc6c5f3b6acd6f76f608cd
|
/R/bsNavBarInputs.R
|
70043c1640504cb5ea53f02029d2f0ddcf3f6007
|
[] |
no_license
|
DataXujing/shinyBS
|
fdfaf0784b40c3693e43ade945bec22efa411bd1
|
6bfa2a44b6d05cebd251d7470b039878510fce3d
|
refs/heads/master
| 2021-07-05T21:12:42.048441
| 2015-01-23T15:12:03
| 2015-01-23T15:12:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,988
|
r
|
bsNavBarInputs.R
|
# Create a link input for a navbar that acts like an actionButton
bsNavLink <- function(inputId, label, href="#") {
if(!inherits(label, "shiny.tag")) label <- HTML(label)
tags$li(tags$a(id = inputId, type="button", href = href, class="action-button", label))
}
# Create a link input for a navbar that acts like a checkboxInput
bsNavToggleLink <- function(inputId, label, value=FALSE) {
if(!inherits(label, "shiny.tag")) label <- HTML(label)
class = ""
if(value) class = "active"
tags$li(class = class, tags$a(id = inputId, href="#", class = "sbs-toggle", label))
}
# Update a toggleLink object
updateToggleLink <- function(session, inputId, label=NULL, value=NULL) {
data <- dropNulls(list(label=label, value = value))
session$sendInputMessage(inputId, data)
}
# Create a vertical divider between navbar elements
bsNavDivider <- function() {
tags$li(class="divider-vertical")
}
# Wraps actionbutton in a li so it works with bsNavBar
bsNavButton <- function(inputId, label) {
tags$li(tags$form(class="navbar-form", actionButton(inputId, label)))
}
# Same as textInput but with label and a placeholder and optional width argument to save space
bsNavTextInput <- function(inputId, label, value = "", width=NULL) {
style = ""
if(!is.null(width)) style = paste0("width: ", width, "px;")
tags$li(tags$form(class="navbar-form", tags$input(id = inputId, style=style, type = "text", value = value, placeholder=label)))
}
# dateInput element for navbars
bsNavDateInput <- function(inputId, label, value = NULL, min = NULL,
max = NULL, format = "yyyy-mm-dd", startview = "month",
weekstart = 0, language = "en", width = NULL) {
if (inherits(value, "Date"))
value <- format(value, "%Y-%m-%d")
if (inherits(min, "Date"))
min <- format(min, "%Y-%m-%d")
if (inherits(max, "Date"))
max <- format(max, "%Y-%m-%d")
style = ""
if(!is.null(width)) style = paste0("width: ", width, "px;")
tagList(singleton(tags$head(tags$script(src = "shared/datepicker/js/bootstrap-datepicker.min.js"),
tags$link(rel = "stylesheet", type = "text/css", href = "shared/datepicker/css/datepicker.css"))),
tags$li(tags$form(id = inputId, class = "shiny-date-input navbar-form", tags$input(type = "text", style = style, class = "input-medium datepicker", placeholder = label,
`data-date-language` = language, `data-date-weekstart` = weekstart,
`data-date-format` = format, `data-date-start-view` = startview,
`data-min-date` = min, `data-max-date` = max, `data-initial-date` = value)
)
)
)
}
# Same as dateRangeInput with slight formatting modification. Would like to figure out how to remove space from between date inputs
bsNavDateRangeInput <- function(inputId, label, start = NULL, end = NULL,
min = NULL, max = NULL, format = "yyyy-mm-dd",
startview = "month", weekstart = 0, language = "en", width=NULL) {
if (inherits(start, "Date"))
start <- format(start, "%Y-%m-%d")
if (inherits(end, "Date"))
end <- format(end, "%Y-%m-%d")
if (inherits(min, "Date"))
min <- format(min, "%Y-%m-%d")
if (inherits(max, "Date"))
max <- format(max, "%Y-%m-%d")
style = ""
if(!is.null(width)) style = paste0("width: ", width, "px;")
x <- label # Just a placeholder
tagList(singleton(tags$head(tags$script(src = "shared/datepicker/js/bootstrap-datepicker.min.js"),
tags$link(rel = "stylesheet", type = "text/css", href = "shared/datepicker/css/datepicker.css"))),
tags$li(tags$form(id = inputId, class = "shiny-date-range-input input-daterange navbar-form",
tags$input(class = "input-small", style = style, placeholder="Start Date",
type = "text", `data-date-language` = language,
`data-date-weekstart` = weekstart, `data-date-format` = format,
`data-date-start-view` = startview, `data-min-date` = min,
`data-max-date` = max, `data-initial-date` = start),
tags$input(class = "input-small", style = style, placeholder="End Date",
type = "text", `data-date-language` = language,
`data-date-weekstart` = weekstart, `data-date-format` = format,
`data-date-start-view` = startview, `data-min-date` = min,
`data-max-date` = max, `data-initial-date` = end)
)
)
)
}
|
42ee5a95403ce0eb5329e0f7731192abcf305ce2
|
fc9e46acb47b9cf444746941c46cc1471988a750
|
/R/ErrorCheck.R
|
fad465ce5c0eafd5739dff7733d2ec6ce51968bf
|
[] |
no_license
|
SharonLutz/Umediation
|
63c70d5ac27a95ea746b8241a3957bd6ebbc4874
|
b1d5c234d5c2b43044f059cadf6a6b9a0d7c2086
|
refs/heads/master
| 2022-06-13T12:12:30.164593
| 2021-05-25T16:04:05
| 2021-05-25T16:04:05
| 95,026,761
| 2
| 3
| null | 2019-09-26T13:54:02
| 2017-06-21T16:59:13
|
R
|
UTF-8
|
R
| false
| false
| 5,586
|
r
|
ErrorCheck.R
|
ErrorCheck<-function(n=1000,Atype="D",Mtype="C",Ytype="C",Ctype="C",Utype="C",interact=FALSE,muC=0,varC=1,muU=0,varU=1,gamma0=0,gammaC=0,gammaU=0,varA=1,alpha0=0,alphaA=0,alphaC=0,alphaU=0,varM=1,beta0=0,betaA=0,betaM=0,betaI=0,betaC=0,betaU=0,varY=1,alpha=0.05,nSim=300,nBoot=500,seed=1,atreat=1,acontrol=0){
##################
# Models of exposure A,
# mediator M, outcome Y
##################
# logit(P(A=1)) or E[A]=gamma0+gammaC*C+gammaU*U
# Logit(P(M=1)) or E[M]=alpha0+alphaA*A+alphaC*C+alphaU*U)
# logit(P(Y=1)) or E[Y]=beta0+betaA*A+betaM*M+betaI*A*M+betaC*C+betaU*U
##################
# check A,M,Y,C,U type vectors are only C or D
##################
if(Atype!="C"& Atype!="D"){stop("Error: Atype must be D for dichtomous or C for continuous for exposure A.")}
if(Mtype!="C"& Mtype!="D"){stop("Error: Mtype must be D for dichtomous or C for continuous for mediator M.")}
if(Ytype!="C"& Ytype!="D"){stop("Error: Ytype must be D for dichtomous or C for continuous for outcome Y.")}
for(jj in 1:length(Ctype)){if(Ctype[jj]!="C" & Ctype[jj]!="D"){stop("Error: Ctype must be D for dichtomous or C for continuous measured confounders C.")}}
for(jj in 1:length(Utype)){if(Utype[jj]!="C" & Utype[jj]!="D"){stop("Error: Utype must be D for dichtomous or C for continuous unmeasured confounders U.")}}
##################
# check A variables
##################
if((length(Atype)!=length(varA))| (length(Atype)!=length(gamma0))| (length(Atype)!=length(alphaA))|(length(Atype)!=length(betaA)) |(length(Atype)!=1 ) ){stop("Error: The length of Atype does not equal the length of varA, gamma0, alphaA, and or betaA, which does not equal one. This function does not accomodate multiple exposures A.")}
##################
# check M variables
##################
if( (length(Mtype)!=length(varM))|(length(Mtype)!=length(alpha0)) |(length(Mtype)!=length(betaM)) |(length(Mtype)!=1 ) ){stop("Error: The length of Mtype does not equal the length of varM, alpha0, and or betaM, which does not equal one. This function does not accomodate multiple mediators M.")}
##################
# check Y variables
##################
if((length(Ytype)!=length(varY))| (length(Ytype)!=length(beta0)) |(length(Ytype)!=1)){stop("Error: The length of Ytype does not equal the length of varY, beta0, and or betaI, which does not equal one. This function does not accomodate multiple outcomes Y.")}
if(interact==TRUE & (length(betaI)!=1)){stop("Error: length of betaI must be 1 when interact flag is TRUE.")}
##################
# check C variables
##################
if((length(Ctype)!=length(muC)) |(length(Ctype)!=length(varC)) |(length(Ctype)!=length(gammaC)) |(length(Ctype)!=length(alphaC))|(length(Ctype)!=length(betaC))){stop(paste("Error: Vectors for Ctype, muC, varC, gammaC, alphaC, and betaC must all be the same length."))}
for(j1 in 1:length(Ctype)){
if(Ctype[j1]=="D"){
if(muC[j1]<0|muC[j1]==0|muC[j1]==1|muC[j1]>1){stop(paste("Error: Prob(C",j1,"=1) is ",muC[j1],". Prob(C",j1,"=1) must be between 0 and 1 for Ctype",j1,". ",sep=""))}
}}
##################
# check U variables
##################
if((length(Utype)!=length(muU)) |(length(Utype)!=length(varU)) |(length(Utype)!=length(gammaU))|(length(Utype)!=length(alphaU))|(length(Utype)!=length(betaU))){stop(paste("Error: Vectors for Utype, muU, varU, gammaU, alphaU, and betaU must all be the same length."))}
for(j2 in 1:length(Utype)){
if(Utype[j2]=="D"){
if(muU[j2]<0|muU[j2]==0|muU[j2]==1|muU[j2]>1){stop(paste("Error: Prob(U",j2,"=1) is ",muU[j2],". Prob(U",j2,"=1) must be between 0 and 1 for Utype",j2,". ",sep=""))}
}}
##################
#check interact flag
##################
if(interact!=FALSE & interact!=TRUE){stop("Error: interact must be TRUE or FALSE. This flag is case sensitive.")}
if(length(interact)!=1){stop("Error: length of the vector for interact must be 1.")}
##################
#check alpha
##################
if(alpha<0 | alpha>1|alpha==0|alpha==1){stop("Error: alpha must be greater than 0 and less than 1")}
if(length(alpha)!=1){stop("Error: length of the vector for alpha must be 1")}
##################
#check n, nSim, nBoot, seed
##################
if(length(n)!=1){stop("Error: length of the vector for n must be 1")}
if(length(nSim)!=1){stop("Error: length of the vector for nSim must be 1")}
if(length(nBoot)!=1){stop("Error: length of the vector for nBoot must be 1")}
if(length(seed)!=1){stop("Error: length of the vector for seed must be 1")}
if((floor(n)!=ceiling(n))|(n<0)|(n==0)){stop(paste("Error: the sample size n must be an integer greater than 0"))}
if((floor(nSim)!=ceiling(nSim))|(nSim<0)|(nSim==0)){stop(paste("Error: the number of simulations nSim must be an integer greater than 0"))}
if((floor(nBoot)!=ceiling(nBoot))|(nBoot<0)|(nBoot==0)){stop(paste("Error: the number of bootstrap samples nBoot must be an integer greater than 0"))}
if((floor(seed)!=ceiling(seed))|(seed<0)|(seed==0)){stop(paste("Error: the seed must be an integer greater than 0"))}
##################
#atreat and acontrol
##################
if(length(atreat)!=1 |length(acontrol)!=1 ){stop("Error: length of the vector for atreat and acontrol must be 1")}
###################
}#end of function
|
8233d701e8702dc2141a6a6a5264022f5f616e8e
|
dbfe5ce272e204a8e1663ced35c9d48ef4870496
|
/R/graphics.R
|
15ab1c048c05034c54a18be2c54db8eb86000b8f
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hmito/hmRLib
|
fac91a4e2ddfcd899283ec0b63c87c31965fb17f
|
f2cfd54ea491ee79d64f7dd976a94086092b8ef5
|
refs/heads/master
| 2023-08-31T07:21:31.825394
| 2023-08-28T10:02:07
| 2023-08-28T10:02:07
| 41,907,654
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,744
|
r
|
graphics.R
|
#' Draw plot without any data
#' @description This function just call plot function with parameter type = "n".
#' @param ... Arguments are passed to function plot.
#' @importFrom graphics plot
#' @export
plot.null=function(...){plot(0,0,type="n",...)}
#' Add polygon on image
#' @description Drow polygon on image where z==true
#' @param x vector of x-axis
#' @param y vector of y-axis
#' @param z matrix where polygon is drawn if TRUE
#' @param ... arguments for polygon function
#' @export
image_polygon=function(x,y,z,...){
ez = matrix(FALSE,length(x)+2,length(y)+2)
ez[1+(1:length(x)),1+(1:length(y))]=z
s = hmRLib::small_step_planeroot_from_matrix(
c(2*x[1] - x[2] ,x, 2*x[length(x)] - x[length(x)-1]),
c(2*y[1] - y[2] ,y, 2*y[length(y)] - y[length(y)-1]),ez)
polygon(s$x,s$y,...)
}
#' Add points with error bars
#' @description Drow points with error bars.
#' @param x vector of x-axis.
#' @param y vector of y-axis.
#' @param upper Error value of the upper boundary.
#' @param lower Error value of the lower boundary. In default, lower = -upper.
#' @param head.length Length of the head of error bar.
#' @param horizontal Logical: if true the error bar for x will be drawn.
#' @param ... arguments for arrows and points function
#' @export
error_points = function(x,y,upper,lower=NULL,head.length = 0.05, horizontal = FALSE,...){
if(is.null(lower)){
lower = -upper
}
asel = (upper>0 | lower>0)
if(sum(asel)>0){
if(horizontal){
arrows(x[asel]+lower[asel], y[asel], x[asel]+upper[asel], y[asel], length=head.length, angle=90, code=3,...)
}else{
arrows(x[asel], y[asel]+lower[asel], x[asel], y[asel]+upper[asel], length=head.length, angle=90, code=3,...)
}
}
points(x, y,...)
}
#' @importFrom grDevices extendrange
virtual_curve=function (expr, from = NULL, to = NULL, n = 101, add = FALSE,
type = "l", ylab = NULL, log = NULL, xlim = NULL, ...){
sexpr <- substitute(expr)
fcall <- paste(sexpr, "(x)")
if (is.null(ylab)){
ylab <- fcall
}
if (is.null(xlim))
delayedAssign("lims", {
pu <- par("usr")[1L:2L]
if (par("xaxs") == "r")
pu <- extendrange(pu, f = -1/27)
if (par("xlog"))
10^pu
else pu
})
else lims <- xlim
if (is.null(from))
from <- lims[1L]
if (is.null(to))
to <- lims[2L]
lg <- if (length(log))
log
else paste(if (add && par("xlog"))
"x", if (add && par("ylog"))
"y", sep = "")
if (length(lg) == 0)
lg <- ""
x <- if (lg != "" && "x" %in% strsplit(lg, NULL)[[1L]]) {
if (any(c(from, to) <= 0))
stop("'from' and 'to' must be > 0 with log=\"x\"")
exp(seq.int(log(from), log(to), length.out = n))
}
else seq.int(from, to, length.out = n)
# y=numeric(length(x))
# for(i in 1:length(x))y[i] <- eval(expr, envir = list(x = x[i]), enclos = parent.frame())
y=sapply(x,expr)
if (add)
lines(x, y, type = type, ...)
else plot(x, y, type = type, ylab = ylab, xlim = xlim, log = lg,
...)
invisible(list(x = x, y = y))
}
#' @importFrom grDevices extendrange
fill.interval=function (expr1, expr2, from = NULL, to = NULL, n = 101, add = FALSE,
type = "l", ylab = NULL, log = NULL, xlim = NULL, ...){
sexpr1 <- substitute(expr1)
if (is.name(sexpr1)) {
fcall1 <- paste(sexpr1, "(x)")
expr1 <- parse(text = fcall1)
ylab1 <- fcall1
}else {
if (!((is.call(sexpr1) || is.expression(sexpr1)) && match("x",
all.vars(sexpr1), nomatch = 0L)))
stop("'expr1' must be a function, call or an expression containing 'x'")
expr1 <- sexpr1
ylab1 <- deparse(sexpr1)
}
sexpr2 <- substitute(expr2)
if (is.name(sexpr2)) {
fcall2 <- paste(sexpr2, "(x)")
expr2 <- parse(text = fcall2)
ylab2 <- fcall2
}else {
if (!((is.call(sexpr2) || is.expression(sexpr2)) && match("x",
all.vars(sexpr2), nomatch = 0L)))
stop("'expr2' must be a function, call or an expression containing 'x'")
expr2 <- sexpr2
ylab2 <- deparse(sexpr2)
}
if (is.null(ylab))
ylab <- paste(ylab1,ylab2,sep="_")
if (is.null(xlim))
delayedAssign("lims", {
pu <- par("usr")[1L:2L]
if (par("xaxs") == "r")
pu <- extendrange(pu, f = -1/27)
if (par("xlog"))
10^pu
else pu
})
else lims <- xlim
if (is.null(from))
from <- lims[1L]
if (is.null(to))
to <- lims[2L]
lg <- if (length(log))
log
else paste(if (add && par("xlog"))
"x", if (add && par("ylog"))
"y", sep = "")
if (length(lg) == 0)
lg <- ""
x <- if (lg != "" && "x" %in% strsplit(lg, NULL)[[1L]]) {
if (any(c(from, to) <= 0))
stop("'from' and 'to' must be > 0 with log=\"x\"")
exp(seq.int(log(from), log(to), length.out = n))
}
else seq.int(from, to, length.out = n)
y1=numeric(length(x))
for(i in 1:length(x))y1[i] <- eval(expr1, envir = list(x = x[i]), enclos = parent.frame())
y2=numeric(length(x))
for(i in 1:length(x))y2[i] <- eval(expr2, envir = list(x = x[i]), enclos = parent.frame())
if (!add)plot.null(ylab = ylab, xlim = xlim, log = lg, ...)
sep=c(((is.na(y1)|is.na(y2))|(y1<y2)),TRUE)
sep[is.na(sep)]=TRUE
if(sep[1]){
Last=NA
}else{
Last=1
}
for(i in 1:(length(sep)-1)){
if(sep[i+1]){
if(!is.na(Last)){
polygon(c(x[Last:i],x[i:Last]),c(y1[Last:i],y2[i:Last]), ...)
Last=NA
}
}else{
if(is.na(Last))Last=i
}
}
invisible(list(x = c(x,rev(x)),y = c(y1,rev(y2))))
}
limited_image=function(f,limit=NULL,num=400,xlim=c(0,1),ylim=c(0,1),col=rgb(0,0,0,1),add=FALSE,...){
x=seq(xlim[1],xlim[2],length=num)
y=seq(ylim[1],ylim[2],length=num)
z=matrix(mapply(f,rep(x,times=num),rep(y,each=num)),num,num)
if(!is.null(limit)){
limit_area=matrix(mapply(limit,rep(x,times=num),rep(y,each=num)),num,num)
z=z&limit_area
}
image(x,y,z,col=c(rgb(1.,1.,1.,0.),col),add=add,...)
}
limited_lines=function(x,y,limit=NULL,...){
if(!is.null(limit)){
tmpx=x
tmpy=y
limit_area=mapply(limit,tmpx,tmpy)
x=numeric(0)
y=numeric(0)
for(i in 1:length(limit_area)){
if(is.na(limit_area[i])){
x=c(x,NA)
y=c(y,NA)
}else if(limit_area[i]==FALSE){
if(i>1 && (!is.na(limit_area[i-1])) && limit_area[i-1]==TRUE){
x=c(x,NA)
y=c(y,NA)
}
}else{
x=c(x,tmpx[i])
y=c(y,tmpy[i])
}
}
}
lines(x,y,...)
return(length(x[!is.na(x)]))
}
|
385c77916a7c3a7abffa886fd6cfe809105a81df
|
e78ae3fdd458fc4b8bee318025614d5d6fd2af8f
|
/R/cdtFilterCDTdata_dlgBox.R
|
05d65cdb84ea2a2cb74398f25c0a1d7c4044f4ce
|
[] |
no_license
|
rijaf-iri/CDT
|
b42fd670adfad59c08dcf990a95f9f1ebea9a9e4
|
e1bb6deac6e814656bf5ed13b8d4af4d09475c9d
|
refs/heads/master
| 2023-07-27T09:16:15.407835
| 2023-07-21T02:16:42
| 2023-07-21T02:16:42
| 136,133,394
| 10
| 12
| null | 2018-09-28T03:56:51
| 2018-06-05T06:53:11
|
R
|
UTF-8
|
R
| false
| false
| 10,664
|
r
|
cdtFilterCDTdata_dlgBox.R
|
filterCDTData_getParams <- function(){
listOpenFiles <- openFile_ttkcomboList()
if(WindowsOS()){
largeur1 <- 47
largeur2 <- 45
largeur3 <- 20
}else{
largeur1 <- 42
largeur2 <- 40
largeur3 <- 21
}
############################################
tt <- tktoplevel()
tkgrab.set(tt)
tkfocus(tt)
frMRG0 <- tkframe(tt, relief = 'raised', borderwidth = 2)
frMRG1 <- tkframe(tt)
############################################
xml.dlg <- file.path(.cdtDir$dirLocal, "languages", "cdtFilterCDTdata_dlgBox.xml")
lang.dlg <- cdtLanguageParse(xml.dlg, .cdtData$Config$lang.iso)
#############
frInput <- tkframe(frMRG0, relief = "groove", borderwidth = 2)
file.stnfl1 <- tclVar(.cdtData$GalParams$filein)
txtStnfl1 <- tklabel(frInput, text = lang.dlg[['label']][['1']], anchor = 'w', justify = 'left')
cbStnfl1 <- ttkcombobox(frInput, values = unlist(listOpenFiles), textvariable = file.stnfl1, width = largeur2)
btStnfl1 <- tkbutton(frInput, text = "...")
tkgrid(txtStnfl1, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 10, padx = 1, pady = 0, ipadx = 1, ipady = 1)
tkgrid(cbStnfl1, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 9, padx = 0, pady = 0, ipadx = 1, ipady = 1)
tkgrid(btStnfl1, row = 1, column = 9, sticky = 'we', rowspan = 1, columnspan = 1, padx = 0, pady = 0, ipadx = 1, ipady = 1)
#############
tkconfigure(btStnfl1, command = function(){
tcl('wm', 'attributes', tt, topmost = FALSE)
dat.opfiles <- getOpenFiles(tt)
tcl('wm', 'attributes', tt, topmost = TRUE)
if(!is.null(dat.opfiles)){
update.OpenFiles('ascii', dat.opfiles)
listOpenFiles[[length(listOpenFiles) + 1]] <<- dat.opfiles[[1]]
tclvalue(file.stnfl1) <- dat.opfiles[[1]]
tkconfigure(cbStnfl1, values = unlist(listOpenFiles), textvariable = file.stnfl1)
}
})
############################################
frPeriod <- tkframe(frMRG0, relief = "groove", borderwidth = 2)
timeSteps <- tclVar()
CbperiodVAL <- .cdtEnv$tcl$lang$global[['combobox']][['1']][1:6]
periodVAL <- c('minute', 'hourly', 'daily', 'pentad', 'dekadal', 'monthly')
tclvalue(timeSteps) <- CbperiodVAL[periodVAL %in% .cdtData$GalParams$tstep]
all.period <- tclVar(.cdtData$GalParams$all.period)
retminhr <- set.hour.minute(.cdtData$GalParams$tstep, .cdtData$GalParams$minhour)
minhour.tclVar <- tclVar(retminhr$val)
statePeriod0 <- if(tclvalue(all.period) == "1") "disabled" else "normal"
statePeriod1 <- if(tclvalue(all.period) == "1") "disabled" else retminhr$state
chk.Period <- tkcheckbutton(frPeriod, variable = all.period, text = lang.dlg[['label']][['4']], anchor = 'w', justify = 'left')
txt.Tstep <- tklabel(frPeriod, text = lang.dlg[['label']][['5']], anchor = 'e', justify = 'right')
cb.Tstep <- ttkcombobox(frPeriod, values = CbperiodVAL, textvariable = timeSteps, state = statePeriod0, width = largeur3)
cb.minhour <- ttkcombobox(frPeriod, values = retminhr$cb, textvariable = minhour.tclVar, state = statePeriod1, width = 2)
bt.Period <- ttkbutton(frPeriod, text = lang.dlg[['button']][['1']], state = statePeriod0)
txt.Period1 <- tklabel(frPeriod, text = "", anchor = 'e', justify = 'right', width = 10)
tkgrid(chk.Period, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 10, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.Period1, row = 1, column = 0, sticky = 'e', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.Tstep, row = 1, column = 2, sticky = 'e', rowspan = 1, columnspan = 3, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.Tstep, row = 1, column = 5, sticky = 'we', rowspan = 1, columnspan = 4, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.minhour, row = 1, column = 9, sticky = 'w', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.Period, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 10, padx = 1, pady = 2, ipadx = 1, ipady = 1)
#############
tkconfigure(bt.Period, command = function(){
intstep <- periodVAL[CbperiodVAL %in% str_trim(tclvalue(timeSteps))]
.cdtData$GalParams[["date.range"]] <- getInfoDateRange(tt, .cdtData$GalParams[["date.range"]], intstep)
})
tkbind(chk.Period, "<Button-1>", function(){
statePeriod0 <- if(tclvalue(all.period) == '1') 'normal' else 'disabled'
tkconfigure(cb.Tstep, state = statePeriod0)
tkconfigure(bt.Period, state = statePeriod0)
statePeriod1 <- if(tclvalue(all.period) == "1") retminhr$state else "disabled"
tkconfigure(cb.minhour, state = statePeriod1)
})
tkbind(cb.Tstep, "<<ComboboxSelected>>", function(){
intstep <- periodVAL[CbperiodVAL %in% str_trim(tclvalue(timeSteps))]
minhour <- as.numeric(str_trim(tclvalue(minhour.tclVar)))
retminhr <<- set.hour.minute(intstep, minhour)
tkconfigure(cb.minhour, values = retminhr$cb, state = retminhr$state)
tclvalue(minhour.tclVar) <- retminhr$val
})
############################################
frPercent <- tkframe(frMRG0, relief = "groove", borderwidth = 2)
filter.crt <- tclVar(.cdtData$GalParams$opfilter)
filter.val <- tclVar(.cdtData$GalParams$valfilter)
txtFilter1 <- tklabel(frPercent, text = lang.dlg[['label']][['2']], anchor = 'e', justify = 'right')
cbFilter <- ttkcombobox(frPercent, values = c(">=", ">", "<=", "<"), textvariable = filter.crt, width = 4)
enFilter <- tkentry(frPercent, textvariable = filter.val, width = 4)
txtFilter2 <- tklabel(frPercent, text = '%', anchor = 'w', justify = 'left')
tkgrid(txtFilter1, row = 0, column = 0, sticky = 'e', rowspan = 1, columnspan = 1, padx = 1, pady = 0, ipadx = 1, ipady = 1)
tkgrid(cbFilter, row = 0, column = 1, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 0, ipadx = 1, ipady = 1)
tkgrid(enFilter, row = 0, column = 2, sticky = 'e', rowspan = 1, columnspan = 1, padx = 0, pady = 0, ipadx = 1, ipady = 1)
tkgrid(txtFilter2, row = 0, column = 3, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 0, ipadx = 1, ipady = 1)
############################################
frOutput <- tkframe(frMRG0, relief = "groove", borderwidth = 2)
file.save1 <- tclVar(.cdtData$GalParams$file2save)
txtFileSave <- tklabel(frOutput, text = lang.dlg[['label']][['3']], anchor = 'w', justify = 'left')
enFileSave <- tkentry(frOutput, textvariable = file.save1, width = largeur1)
btFileSave <- tkbutton(frOutput, text = "...")
tkgrid(txtFileSave, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 10, padx = 1, pady = 0, ipadx = 1, ipady = 1)
tkgrid(enFileSave, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 9, padx = 0, pady = 0, ipadx = 1, ipady = 1)
tkgrid(btFileSave, row = 1, column = 9, sticky = 'we', rowspan = 1, columnspan = 1, padx = 0, pady = 0, ipadx = 1, ipady = 1)
#############
tkconfigure(btFileSave, command = function(){
tcl('wm', 'attributes', tt, topmost = FALSE)
file2save1 <- tk_get_SaveFile(filetypes = .cdtEnv$tcl$data$filetypesA)
tclvalue(file.save1) <- if(is.na(file2save1)) "" else file2save1
tcl('wm', 'attributes', tt, topmost = TRUE)
})
############################################
tkgrid(frInput, row = 0, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frPeriod, row = 1, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frPercent, row = 2, column = 0, sticky = 'e', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frOutput, row = 3, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
############################################
bt.prm.OK <- ttkbutton(frMRG1, text = .cdtEnv$tcl$lang$global[['button']][['1']])
bt.prm.CA <- ttkbutton(frMRG1, text = .cdtEnv$tcl$lang$global[['button']][['2']])
tkconfigure(bt.prm.OK, command = function(){
if(str_trim(tclvalue(file.stnfl1)) %in% c("", "NA")){
cdt.tkmessageBox(tt, message = lang.dlg[['message']][['6']], icon = "warning", type = "ok")
tkwait.window(tt)
}else if(str_trim(tclvalue(file.save1)) %in% c("", "NA")){
cdt.tkmessageBox(tt, message = lang.dlg[['message']][['7']], icon = "warning", type = "ok")
tkwait.window(tt)
}else{
.cdtData$GalParams$filein <- str_trim(tclvalue(file.stnfl1))
.cdtData$GalParams$all.period <- switch(tclvalue(all.period), '0' = FALSE, '1' = TRUE)
.cdtData$GalParams$tstep <- periodVAL[CbperiodVAL %in% str_trim(tclvalue(timeSteps))]
.cdtData$GalParams$minhour <- as.numeric(str_trim(tclvalue(minhour.tclVar)))
.cdtData$GalParams$opfilter <- str_trim(tclvalue(filter.crt))
.cdtData$GalParams$valfilter <- as.numeric(str_trim(tclvalue(filter.val)))
.cdtData$GalParams$file2save <- str_trim(tclvalue(file.save1))
.cdtData$GalParams$message <- lang.dlg[['message']]
tkgrab.release(tt)
tkdestroy(tt)
tkfocus(.cdtEnv$tcl$main$win)
}
})
tkconfigure(bt.prm.CA, command = function(){
tkgrab.release(tt)
tkdestroy(tt)
tkfocus(.cdtEnv$tcl$main$win)
})
tkgrid(bt.prm.OK, row = 0, column = 0, sticky = 'w', padx = 5, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.prm.CA, row = 0, column = 1, sticky = 'e', padx = 5, pady = 1, ipadx = 1, ipady = 1)
############################################
tkgrid(frMRG0, row = 0, column = 0, sticky = 'nswe', rowspan = 1, columnspan = 2, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frMRG1, row = 1, column = 1, sticky = 'se', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
############################
tkwm.withdraw(tt)
tcl('update')
tt.w <- as.integer(tkwinfo("reqwidth", tt))
tt.h <- as.integer(tkwinfo("reqheight", tt))
tt.x <- as.integer(.cdtEnv$tcl$data$width.scr*0.5 - tt.w*0.5)
tt.y <- as.integer(.cdtEnv$tcl$data$height.scr*0.5 - tt.h*0.5)
tkwm.geometry(tt, paste0('+', tt.x, '+', tt.y))
tkwm.transient(tt)
tkwm.title(tt, lang.dlg[['title']])
tkwm.deiconify(tt)
tcl('wm', 'attributes', tt, topmost = TRUE)
tkfocus(tt)
tkbind(tt, "<Destroy>", function(){
tkgrab.release(tt)
tkfocus(.cdtEnv$tcl$main$win)
})
tkwait.window(tt)
}
|
408514e75d75d585c0b732a20c1903b30ab08c60
|
cd31134688875ecd83c993efb70cfc75d0022c57
|
/tests/testthat/test_force_of_inf.R
|
23b1c5adc525ea9361315492e7167ac6f4e344df
|
[] |
no_license
|
caleb-easterly/msid
|
ba703d82193fc08b9b15b0d29818803df7ed9d47
|
cafb17ae76b577914adcdfe6f78ac78b0ebb034c
|
refs/heads/master
| 2021-07-22T23:31:18.734314
| 2021-07-07T02:05:49
| 2021-07-07T02:05:49
| 168,190,386
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 676
|
r
|
test_force_of_inf.R
|
context("foi")
test_that("force of infection is calculated properly", {
suff_contact <- matrix(c(5, 1, 4, 2), nrow = 2, byrow = TRUE)
prev <- c(1, 0.5)
exp_foi <- c(5.5, 5)
calc_foi <- calculate_force_of_inf(suff_contact, prev)
expect_equal(exp_foi, calc_foi)
suff_contact2 <- matrix(1:9, nrow = 3, byrow = TRUE)
prev2 <- c(0.1, 0.5, 1)
exp_foi2 <- c(0.1+1+3, 0.4+2.5+6, 0.7+4+9)
calc_foi2 <- calculate_force_of_inf(suff_contact2, prev2)
expect_equal(exp_foi2, calc_foi2)
# check that this automatically throws an error
# if ncol(suff_contact) != length(prev)
expect_error(calculate_force_of_inf(suff_contact, prev2))
})
|
e782b3911e122b1b7516b684765ef735b5ba14d0
|
03818820f3088fd53fd6f7ae14b3aad40c210560
|
/Feed Forward Neural Network.R
|
27ddf357808e11794ffd79cd166b0c0024e5bee3
|
[] |
no_license
|
jyoungeconomics/feedforwardneuralnetwork
|
83dea631d958d6ffcb4f6d76b8c608b192850cbf
|
1eb3d2db8a42b397675c726a8412524381e884ff
|
refs/heads/master
| 2021-10-22T09:44:22.698220
| 2019-03-09T17:33:54
| 2019-03-09T17:33:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,620
|
r
|
Feed Forward Neural Network.R
|
#JEFFREY YOUNG, 2017
#there are 4 options for activation functions, 3 with a derivative
sigmoid <- function(x,dx){
if(dx==FALSE)
temp <- (1/(1+exp(-x))) else temp <- (1/(1+exp(-x)))*(1-(1/(1+exp(-x))))
return(temp)
}
tootoo <- function(x,dx){
if(dx==FALSE)
temp <- (tanh(x)) else temp <- (1/(cosh(x))^2)
return(temp)
}
#actually leaky ReLu since 0.01 is not 0.00
relu <- function(x,dx){
if(dx==FALSE)
temp <- (x<=0)*(0.01*x)+(x>0)*(x) else temp <- (x<=0)*(0.01)+(x>0)*(1)
return(temp)
}
#Siraj says in https://www.youtube.com/watch?v=-7scQpJT7uo
#to use ReLu for hidden layers and use SOFTMAX for output for classifications
#but use linear activation for regression
## from http://tr.im/hH5A
softmax <- function (x){
y <- max(x)
z <- exp(x-y)
s <- sum(z)
logsumexp <- y + log(s)
return(exp(x-logsumexp))
}
#mini-batch sample of 5 subjects/individuals
#3 input variables
input <- t(cbind(c(0,0,1),c(1,1,1),
c(1,0,1),c(0,1,1),c(1,0,0)))
colnames(input) <- c("feature 1","feature 2","feature 3")
rownames(input) <- c("subject 1","subject 2",
"subject 3","subject 4","subject 5")
output <- rbind(0,1,1,0,1)
colnames(output) <- c("output")
rownames(output) <- c("subject 1","subject 2",
"subject 3","subject 4","subject 5")
#choose activation function
activate <- relu
#This code only works when there is 1 output node (last entry in q)...output is a column vector
#however, the last digit could be 1,2,3,...,k nodes if you wanted,
#just be sure to know that there are k-output vectors in the training
#and that you have to CBIND(output,output,...,output) to have the same
#number of columns as what you have (so you could have multivariate outcome)
q <- c(3,2,ncol(as.matrix(output)))
H <- length(q)
W <- list()
#first weights include bias (+1 random number row)
W[[1]] <- (matrix(runif((q[1])*(ncol(input)+1)),nrow=ncol(input)+1,ncol=q[1]))
#even weights matrices are transposed, even are not
#the dimensions are formulaic, try plugging in i=2,3,... and see
for (i in 2:(H)){
if(i%%2==0) W[[i]] <- t(matrix(runif((ncol(W[[i-1]])+1)*(q[i])),nrow=q[i],ncol=ncol(W[[i-1]])+1))
if(i%%2==1) W[[i]] <- (matrix(runif((q[i])*(ncol(W[[i-1]])+1)),nrow=ncol(W[[i-1]])+1,ncol=q[i]))
}
#last weights are column vector
W[[H]] <- matrix(runif((ncol(W[[H-1]])+1)*(1)),nrow=ncol(W[[H-1]])+1,ncol=q[H])
#ReLu can BLOW UP activations if you have too large a learning rate
#but at least you avoid the vanishing gradient common to TANH(X) and SIG(X)
learn <- 0.001
momentum <- 0.5
errors <- list()
gradients <- list()
layers <- list()
descent <- list()
#number of epochs for training
EEP <- 100000
WW <- list()
#train that mess
for (i in 1:EEP){
#first or zero-th layer is just the unweighted, unbiased input data
#first hidden layer is weighted input and bias
layers[[1]] <- input
#subsequent layers are the weighted inputs and biases
for (k in 2:(H)){
layers[[k]] <- activate(cbind(1,layers[[k-1]])%*%(W[[k-1]]),dx=FALSE)
}
#the predicted output is the output layer, i.e., the layer after the
#last hidden layer, that is, layer H (output is H+1 in the network)
layers[[H+1]] <- (cbind(1,layers[[k]])%*%(W[[k]]))
#first error is the difference between predicted output (last layer)
#and actual output
errors[[1]] <- output-layers[[length(layers)]]
#save the average error at the i_th epoch for a nice graph
descent[[i]] <- mean(abs(errors[[1]]))
if (i %% 1000 == 0) {print(c("Error=",mean(abs(errors[[1]]))))}
gradients[[1]] <- errors[[1]]*(activate(layers[[length(layers)]],dx=T))
#compute the errors and gradients at each layer, backpropagating the errors through the network
for (j in 2:(H)){
errors[[j]] <- gradients[[j-1]]%*%t(W[[length(layers)-j+1]][-1,])
gradients[[j]] <- errors[[j]]*(activate(layers[[length(layers)-(j-1)]],dx=T))
}
#calculate the weight change from previous i, for using momentum
for (g in 1:H){
if(i==1)
#if true, add no change in W (it is the first epoch, nothing has changed)
#make sure same dimensions as W without the bias
WW[[g]] <- 0*W[[g]][-1,] else WW[[g]] <- ((t(layers[[g]])%*%gradients[[(length(1:H)+1)-g]])*learn+WW[[g]]*momentum)
}
#update weights using the estimated gradient
for (m in 1:(H)){
#the m_th weight matrix exluding bias# #change to W scaled by learning rate# #previous change to W scaled by momentum#
W[[m]][-1,] <- W[[m]][-1,]+(t(layers[[m]])%*%gradients[[(length(1:H)+1)-m]])*learn+WW[[m]]*momentum
}
}
#check out the (hopefully) descending gradient!
plot(NULL,xlim = c(1,length(descent)),ylim = c(min(as.numeric(descent)),
0.1*max(as.numeric(descent))),
main = "Gradient Descent",xlab="Epoch",ylab = "Average Error")
lines(as.numeric(descent),col="red")
#initialize the forecasted observation
TEST <- cbind(0,1,1) # answer should be 0 (see data)
#add bias
neurons <- list(cbind(1,TEST))
#pass new input forward through the network
for (i in 1:(H - 1)) {
temp <- neurons[[i]] %*% W[[i]]
act.temp <- activate(temp,dx=F)
neurons[[i + 1]] <- cbind(1, act.temp)
}
#calculate predicted output
temp <- (neurons[[H]] %*% W[[H]])
temp
#initialize the forecasted observation
TEST <- cbind(1,0,0) # answer should be 1 (see data)
#add bias
neurons <- list(cbind(1,TEST))
#pass new input forward through the network
for (i in 1:(H - 1)) {
temp <- neurons[[i]] %*% W[[i]]
act.temp <- activate(temp,dx=F)
neurons[[i + 1]] <- cbind(1, act.temp)
}
#calculate predicted output
temp <- (neurons[[H]] %*% W[[H]])
temp
|
ee4523a9fec9efb545016c391b806652444ecab9
|
3e86297a031840551385109cc588f859865be2ed
|
/change_analysis/change_analysis.R
|
369cc56ed0f93419867bb00df61ef31f7171a972
|
[
"MIT"
] |
permissive
|
shmh40/detectreeRGB
|
692d386009f532822f2d3eac97aa6be7703e74e1
|
4335fc168729ddb96f1cd655fcdf721f3476804d
|
refs/heads/main
| 2023-05-23T16:51:20.670558
| 2021-11-29T14:53:24
| 2021-11-29T14:53:24
| 382,028,803
| 24
| 9
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,848
|
r
|
change_analysis.R
|
# Load up some packages!
rm(list=ls())
install.packages('bbplot')
library(bbplot)
library(reshape2)
library(raster)
library(dplyr); library(tidyr); library(magrittr)
library(ggplot2); library(RColorBrewer); library(rgdal)
# LiDAR packages
library(raster); library(sf); library(spatialEco)
library(rasterVis) # This is just for plotting, you probably don't need it
library(velox)
Sep = raster::stack(paste0("C:/Users/sebhi/ai4er/mres_project/work/data/sepilok_lidar/Sep_2014_coarse_CHM_g10_sub0.01_0.5m.tif"), paste0("C:/Users/sebhi/ai4er/mres_project/work/data/sepilok_lidar/Sep_2020_CHM_g10_sub0.2_0.5m.tif"), paste0("C:/Users/sebhi/ai4er/mres_project/work/data/sepilok_lidar/Sepilok_CHM_diff_0.5m.tif"))
Sep_2014 = raster(paste0("C:/Users/sebhi/ai4er/mres_project/work/data/sepilok_lidar/Sep_2014_coarse_CHM_g10_sub0.01_0.5m.tif"))
Sep_2020 = raster(paste0("C:/Users/sebhi/ai4er/mres_project/work/data/sepilok_lidar/Sep_2020_CHM_g10_sub0.2_0.5m.tif"))
Sep_diff = raster(paste0("C:/Users/sebhi/ai4er/mres_project/work/data/sepilok_lidar/Sepilok_CHM_diff_0.5m.tif"))
DTM = raster("C:/Users/sebhi/ai4er/mres_project/work/data/sepilok_lidar/Sepilok_2014_DTM_g10_sub0.2_1m.tif")
plot(DTM)
DTM_agg = raster::aggregate(DTM, 20)
plot(DTM_agg)
slope_agg <- terrain(DTM_agg, opt=c('slope'), unit='degrees',na.rm=TRUE)
aspect_agg <- terrain(DTM_agg, opt=c('aspect'), na.rm=TRUE)
TPI_agg <- terrain(DTM_agg, opt=c('TPI'), na.rm=TRUE)
plot(TPI_agg)
## Load in Mask RCNN crowns and remove the grid boxes and ones off the edge of the RGB
Sep_detectron_crowns = shapefile("C:/Users/sebhi/ai4er/mres_project/work/data/detectron_predicted_crowns/full_sepilok_deploy/left_plot.shp")
Sep_detectron_crowns$perimeter=as.numeric(polyPerimeter(Sep_detectron_crowns))
Sep_detectron_crowns = Sep_detectron_crowns[Sep_detectron_crowns$pixelvalue>100,]
Sep_detectron_crowns = Sep_detectron_crowns[Sep_detectron_crowns$perimeter<220,]
#file_name<-paste("C:/Users/sebhi/ai4er/mres_project/work/data/detectron_predicted_crowns/full_sepilok_deploy/maskrcnn_preds_sepilok")
#writeOGR(Sep_detectron_crowns,dsn = paste(file_name, '.shp', sep=''),layer = basename(file_name),drive = 'ESRI Shapefile')
plot(Sep_detectron_crowns)
### Load in itcfast predictions
Sep_detectron_crowns = shapefile("C:/Users/sebhi/ai4er/mres_project/work/data/shape/sepilok_deploy/left_plot_itc.shp")
Sep_plot_outline = shapefile("C:/Users/sebhi/ai4er/mres_project/work/data/shape/sepilok_2014_preds_outline.shp")
### CROP the LiDAR data
#bound = st_bbox(Sep_detectron_crowns)
Sep_2014_cropped = raster::crop(Sep_2014, Sep_plot_outline)
Sep_2020_cropped = raster::crop(Sep_2020, Sep_plot_outline)
Sep_diff_cropped = raster::crop(Sep_diff, Sep_plot_outline)
Sep_2014_masked = raster::mask(Sep_2014_cropped, Sep_plot_outline)
Sep_2020_masked = raster::mask(Sep_2020_cropped, Sep_plot_outline)
Sep_diff_masked = raster::mask(Sep_diff_cropped, Sep_plot_outline)
plot(Sep_2014_masked)
plot(Sep_2020_masked)
plot(Sep_diff_masked)
# stack them up...currently failing
Sep = raster::stack(Sep_2014_masked, Sep_2020_masked, Sep_diff_masked)
## CROP and MASK
### CROP the LiDAR data
TPI_cropped = raster::crop(TPI_agg, Sep_plot_outline)
TPI_masked = raster::mask(TPI_cropped, Sep_plot_outline)
plot(TPI_masked)
#plot(dfs, add=TRUE)
slope_cropped = raster::crop(slope_agg, Sep_plot_outline)
slope_masked = raster::mask(slope_cropped, Sep_plot_outline)
plot(slope_masked)
#plot(dfs, add=TRUE)
aspect_cropped = raster::crop(aspect_agg, Sep_plot_outline)
aspect_masked = raster::mask(aspect_cropped, Sep_plot_outline)
plot(aspect_masked)
#dfs=get_poly_CHM_info_velox(rpoly=Sep_detectron_crowns,CHM_org=Sep_cropped$Sep_2014_coarse_CHM_g10_sub0.01_0.5m,CHM_diff=Sep_cropped$Sepilok_CHM_diff_0.5m)
dfs=get_poly_CHM_info_velox(rpoly=Sep_detectron_crowns,CHM_org=Sep_2014_masked,CHM_2020=Sep_2020_masked,CHM_diff=Sep_diff_masked)
# Give the site of the dfs
dfs$Site="Sepilok"
### COUNT THE NUMBER OF TREES ABOVE CERTAN HEIGHT THRESHOLDS
dfs_2020 = dfs@data[dfs@data$H_max_2020>70,]
dfs_2020_drop_na = dfs_2020[!is.na(dfs_2020$H_max_2020), ]
dfs_30_drop_na$H_max_2020 = dfs_30_drop_na$Org_H_max + dfs_30_drop_na$Change_H_max
dfs_30_drop_na = dfs_30_drop_na[!is.na(dfs_30_drop_na$H_max_2020), ]
dfs_H = dfs_30_drop_na[dfs_30_drop_na$H_max_2020>100,]
### now doing analysis of not dead trees
ggplot(dfs@data,aes(Org_H_max,Change_H_mean,size=area,color=Site))+geom_point()
ggplot(dfs@data,aes(Org_H_max, Change_H_mean))+
geom_point(color='red',aes(Org_H_max,Change_H_mean, size=area))+
ggtitle('Change in tree height predicted by Mask R-CNN')+
theme_classic()+
theme(plot.title=element_text(size=10,face='bold',margin=margin(10,10,10,0), hjust = 0.5))+
labs(x='2014 tree height / m', y='Change in tree height / m')+
theme(legend.title = element_text(colour="black", size=8, face="bold"))+
labs(size='Crown Area')+
guides(size = guide_legend(reverse=TRUE))+
xlim(30,80)+
ylim(-60, 20)
dfs_drop_na = dfs[!is.na(dfs$Change_H_mean), ]
dfs_drop_dead = dfs_drop_na[dfs_drop_na$Change_H_mean>=-5,]
ggplot(dfs_drop_dead@data,aes(Org_H_max,Change_H_mean,size=area,color=Site))+geom_point()
ggplot(dfs_drop_dead@data,aes(Org_H_max, Change_H_mean))+
geom_point(color='red',aes(Org_H_max,Change_H_mean, size=area))+
ggtitle('Mask R-CNN predicts decrease in tree growth with height')+
theme_classic()+
theme(plot.title=element_text(size=10,face='bold',margin=margin(10,10,10,0), hjust = 0.5))+
labs(x='2014 tree height / m', y='Change in tree height / m')+
theme(legend.title = element_text(colour="black", size=8, face="bold"))+
labs(size='Crown Area')+
guides(size = guide_legend(reverse=TRUE))+
geom_abline(intercept = model_intercept, slope = model_slope, color = 'black')+
xlim(30,80)+
ylim(-5,12)
model_linear1 <- lm(Change_H_mean ~ Org_H_max, data = dfs_drop_dead@data)
summary(model_linear1)
model_intercept <- coef(model_linear1)[1]
model_slope <- coef(model_linear1)[2]
ggplot(data = dfs_drop_dead@data, aes(Org_H_max, Change_H_mean, size=area, color=Site)) +
geom_point() +
geom_abline(intercept = model_intercept, slope = model_slope, color = 'black')+xlim(30,80)
### analysis of percentage deaths in each height bin
dfs$Org_H_max_round=round(dfs$Org_H_max,digits=-1)
dfs$Org_H_mean_round=round(dfs$Org_H_mean,digits=-1)
df_summary=dfs@data %>% group_by(Site,Org_H_mean_round) %>%
summarize(num_tot=n(),num_died=sum(Change_H_mean<=-5, na.rm=TRUE),pct_died_per_year=(100*num_died/num_tot)^(1/6))
# Plot the results of summary
ggplot(df_summary,aes(Org_H_mean_round,pct_died_per_year,color=Site))+geom_point()+xlim(20,60)
ggplot(df_summary,aes(Org_H_mean_round, pct_died_per_year))+
geom_point(color='red',aes(Org_H_mean_round,pct_died_per_year))+
ggtitle('Annual mortality rate of Mask R-CNN trees')+
theme_classic()+
theme(plot.title=element_text(size=10,face='bold',margin=margin(10,10,10,0), hjust = 0.5))+
labs(x='Binned tree heights / m', y='Annual tree mortality / %')+
theme(legend.title = element_text(colour="black", size=12, face="bold"))+
xlim(20,60)+
ylim(1, 2)
### Now do a carbon calculation
dfs_50 = dfs@data[dfs@data$Org_H_max>0,]
dfs_50_drop_na = dfs_50[!is.na(dfs_50$Org_H_max), ]
dfs_50_drop_na$diameter = 2*sqrt((dfs_50_drop_na$area)/pi)
dfs_50_drop_na$agb = 0.136 * (dfs_50_drop_na$Org_H_max * dfs_50_drop_na$diameter)^1.52
dfs_50_drop_na$carbon = 0.5 * dfs_50_drop_na$agb
sum_carbon = sum(dfs_50_drop_na$carbon, na.rm=TRUE)
dfs_50_drop_na$agb_change = (0.136 * ((dfs_50_drop_na$Org_H_max + dfs_50_drop_na$Change_H_max) * dfs_50_drop_na$diameter)^1.52) - (0.136 * (dfs$Org_H_max * dfs$diameter)^1.52)
dfs_50_drop_na$agb_change = (0.136 * ((dfs_50$Org_H_mean + dfs_50$Change_H_mean) * dfs_50$diameter)^1.52) - (0.136 * (dfs_50$Org_H_mean * dfs_50$diameter)^1.52)
dfs_50$carbon_change = 0.5 * dfs_50$agb_change
sum_carbon_change = sum(dfs_50$carbon_change, na.rm=TRUE)
ggplot(dfs_50_drop_na,aes(Org_H_max, carbon))+
geom_point(color='red',aes(Org_H_max,carbon, size=area))+
ylim(0, 40000)+ggtitle('Carbon stored in each tree predicted by Mask R-CNN')+
theme_classic()+
theme(plot.title=element_text(size=10,face='bold',margin=margin(10,10,10,0), hjust = 0.5))+
labs(x='Tree height / m', y='Carbon / kg')+
theme(legend.title = element_text(colour="black", size=8, face="bold"))+
labs(size='Crown Area')+
guides(size = guide_legend(reverse=TRUE))
### Locate the DEAD trees
#dfs = dfs@data[dfs@data$Org_H_max>0,]
dfs_drop_na = dfs[!is.na(dfs$Change_H_mean), ]
dfs_dead = dfs_drop_na[dfs_drop_na$Change_H_mean<=-5,]
plot(TPI_masked)
plot(dfs_dead, add=TRUE)
hist(TPI_masked,
main = "Distribution of TPI in Sepilok",
xlab = "TPI", ylab = "Frequency",
col = "springgreen")
hist(slope_masked,
main = "Distribution of Slope in Sepilok",
xlab = "Slope", ylab = "Frequency",
col = "springgreen")
hist(aspect_masked,
main = "Distribution of Aspect in Sepilok",
xlab = "Aspect", ylab = "Frequency",
col = "springgreen")
dfs_dead_velox_info=get_poly_TPI_slope_aspect_info_velox(rpoly=dfs_dead,TPI=TPI_masked,slope=slope_masked,aspect=aspect_masked)
plot(dfs_dead_velox_info)
hist(dfs_dead_velox_info$TPI,
main = "Distribution of TPI in Sepilok",
xlab = "TPI", ylab = "Frequency",
col = "black")
hist(dfs_dead_velox_info$slope,
main = "Distribution of TPI in Sepilok",
xlab = "Slope", ylab = "Frequency",
col = "black")
hist(dfs_dead_velox_info$aspect,
main = "Distribution of TPI in Sepilok",
xlab = "Slope", ylab = "Frequency",
col = "black")
### Now trying to do histograms with ggplot2
dfs_dead_velox_df = dfs_dead_velox_info@data
dfs_dead_velox_df_rm_na = dfs_dead_velox_df[!is.na(dfs_dead_velox_df$TPI), ]
#dfs_dead_velox_df[['TPI','slope','aspect']]
#dfs_dead_velox_df.m = melt(dfs_dead_velox_df)
TPI_df = as.data.frame(TPI_masked)
slope_df = as.data.frame(slope_masked)
aspect_df = as.data.frame(aspect_masked)
ggplot(TPI_df, aes(x=tpi))+
geom_vline(xintercept=0, color="black", size=1)+
stat_density(alpha = 1,size = 1, geom="line", position = "identity")+
xlab('TPI Distribution')+ylab("")+xlim(-1.5,1.5)+theme_light()+
theme(axis.text.y = element_blank(),axis.ticks.y = element_blank(),
legend.title = element_blank(),legend.position = c(0.2,0.8))+
guides(color=guide_legend(nrow=4))+
scale_color_manual(values=c(brewer.pal(6,"Paired")[c(1,2,5,6)]))
ggplot(dfs_dead_velox_df, aes(x=TPI))+
geom_vline(xintercept=0, color="black", size=1)+
stat_density(alpha = 1,size = 1, geom="line", position = "identity")+
xlab('TPI Distribution')+ylab("")+xlim(-1.5,1.5)+theme_light()+
theme(axis.text.y = element_blank(),axis.ticks.y = element_blank(),
legend.title = element_blank(),legend.position = c(0.2,0.8))+
guides(color=guide_legend(nrow=4))+
scale_color_manual(values=c(brewer.pal(6,"Paired")[c(1,2,5,6)]))
ggplot(slope_df, aes(x=slope))+
geom_vline(xintercept=0, color="black", size=1)+
stat_density(alpha = 1,size = 1, geom="line", position = "identity")+
xlab('Slope Distribution')+ylab("")+xlim(-1.5,1.5)+theme_light()+
theme(axis.text.y = element_blank(),axis.ticks.y = element_blank(),
legend.title = element_blank(),legend.position = c(0.2,0.8))+
guides(color=guide_legend(nrow=4))+
scale_color_manual(values=c(brewer.pal(6,"Paired")[c(1,2,5,6)]))
ggplot(dfs_dead_velox_df, aes(x = slope))+
geom_vline(xintercept=0, color="black", size=1)+
stat_density(alpha = 1,size = 1, geom="line", position = "identity")+
xlab('Slope Distribution')+ylab("")+xlim(-1.5,1.5)+theme_light()+
theme(axis.text.y = element_blank(),axis.ticks.y = element_blank(),
legend.title = element_blank(),legend.position = c(0.2,0.8))+
guides(color=guide_legend(nrow=4))+
scale_color_manual(values=c(brewer.pal(6,"Paired")[c(1,2,5,6)]))
ggplot(aspect_df, aes(x=aspect))+
geom_vline(xintercept=0, color="black", size=1)+
stat_density(alpha = 1,size = 1, geom="line", position = "identity")+
xlab('Aspect Distribution')+ylab("")+xlim(-1.5,1.5)+theme_light()+
theme(axis.text.y = element_blank(),axis.ticks.y = element_blank(),
legend.title = element_blank(),legend.position = c(0.2,0.8))+
guides(color=guide_legend(nrow=4))+
scale_color_manual(values=c(brewer.pal(6,"Paired")[c(1,2,5,6)]))
ggplot(dfs_dead_velox_df, aes(x = aspect))+
geom_vline(xintercept=0, color="black", size=1)+
stat_density(alpha = 1,size = 1, geom="line", position = "identity")+
xlab('Aspect Distribution')+ylab("")+xlim(-1.5,1.5)+theme_light()+
theme(axis.text.y = element_blank(),axis.ticks.y = element_blank(),
legend.title = element_blank(),legend.position = c(0.2,0.8))+
guides(color=guide_legend(nrow=4))+
scale_color_manual(values=c(brewer.pal(6,"Paired")[c(1,2,5,6)]))
###
get_poly_TPI_slope_aspect_info_velox=function(rpoly,TPI,slope,aspect){
TPI_orgv=velox(TPI)
slope_orgv=velox(slope)
aspect_orgv=velox(aspect)
# Get info from the rasters
poly_TPI_raster_data_list= TPI_orgv$extract(rpoly, fun = NULL)
rpoly$TPI=sapply(poly_TPI_raster_data_list,FUN=mean,na.rm=TRUE)
poly_slope_raster_data_list= slope_orgv$extract(rpoly, fun = NULL)
rpoly$slope=sapply(poly_slope_raster_data_list,FUN=mean,na.rm=TRUE) # poly area in raster units
poly_aspect_raster_data_list= aspect_orgv$extract(rpoly, fun = NULL)
rpoly$aspect=sapply(poly_aspect_raster_data_list,FUN=mean,na.rm=TRUE) # poly area in raster units
return(rpoly)
}
get_poly_CHM_info_velox=function(rpoly,CHM_org,CHM_2020,CHM_diff){
CHM_orgv=velox(CHM_org)
CHM_2020v=velox(CHM_2020)
CHM_diffv=velox(CHM_diff)
# Get info from original raster
# does this introduce NaNs?
poly_original_raster_data_list= CHM_orgv$extract(rpoly, fun = NULL)
rpoly$area=sapply(poly_original_raster_data_list,FUN=length) # poly area in raster units
rpoly$Org_H_mean=sapply(poly_original_raster_data_list,FUN=mean,na.rm=TRUE)
rpoly$Org_H_max =sapply(poly_original_raster_data_list,FUN=max)
rpoly$Org_H_min =sapply(poly_original_raster_data_list,FUN=min)
rpoly$Org_H_var =sapply(poly_original_raster_data_list,FUN=var)
poly_2020_raster_data_list= CHM_2020v$extract(rpoly, fun = NULL)
rpoly$area=sapply(poly_2020_raster_data_list,FUN=length) # poly area in raster units
rpoly$H_mean_2020=sapply(poly_2020_raster_data_list,FUN=mean,na.rm=TRUE)
rpoly$H_max_2020 =sapply(poly_2020_raster_data_list,FUN=max)
rpoly$H_min_2020 =sapply(poly_2020_raster_data_list,FUN=min)
rpoly$H_var_2020 =sapply(poly_2020_raster_data_list,FUN=var)
poly_change_raster_data_list=CHM_diffv$extract(rpoly, fun = NULL)
rpoly$Change_H_mean=sapply(poly_change_raster_data_list,FUN=mean)
rpoly$Change_H_max =sapply(poly_change_raster_data_list,FUN=max)
rpoly$Change_H_min =sapply(poly_change_raster_data_list,FUN=min)
rpoly$Change_H_var =sapply(poly_change_raster_data_list,FUN=var)
rpoly$perimeter=as.numeric(polyPerimeter(rpoly)) # perimeter of each polygon
rpoly$shape_complexity = as.numeric(rpoly$perimeter/(2*sqrt(rpoly$area*pi)))
rpoly$shape_circleness=as.numeric(4*pi*(rpoly$area)/((rpoly$perimeter)^2))
return(rpoly)
}
## old velox with the diff
get_poly_CHM_info_velox_old=function(rpoly,CHM_org,CHM_diff){
CHM_orgv=velox(CHM_org)
CHM_diffv=velox(CHM_diff)
# Get info from original raster
# does this introduce NaNs?
poly_original_raster_data_list= CHM_orgv$extract(rpoly, fun = NULL)
rpoly$area=sapply(poly_original_raster_data_list,FUN=length) # poly area in raster units
rpoly$Org_H_mean=sapply(poly_original_raster_data_list,FUN=mean,na.rm=TRUE)
rpoly$Org_H_max =sapply(poly_original_raster_data_list,FUN=max)
rpoly$Org_H_min =sapply(poly_original_raster_data_list,FUN=min)
rpoly$Org_H_var =sapply(poly_original_raster_data_list,FUN=var)
poly_change_raster_data_list=CHM_diffv$extract(rpoly, fun = NULL)
rpoly$Change_H_mean=sapply(poly_change_raster_data_list,FUN=mean)
rpoly$Change_H_max =sapply(poly_change_raster_data_list,FUN=max)
rpoly$Change_H_min =sapply(poly_change_raster_data_list,FUN=min)
rpoly$Change_H_var =sapply(poly_change_raster_data_list,FUN=var)
rpoly$perimeter=as.numeric(polyPerimeter(rpoly)) # perimeter of each polygon
rpoly$shape_complexity = as.numeric(rpoly$perimeter/(2*sqrt(rpoly$area*pi)))
rpoly$shape_circleness=as.numeric(4*pi*(rpoly$area)/((rpoly$perimeter)^2))
return(rpoly)
}
|
2c0cda1bcbe9ab164a6085733d57b530f3015d3f
|
cfd5ed592da9c7bbf7e4590070c60e09e98d4e8b
|
/dashboard/6 Dashboard-Rush hour.R
|
778862e65946f537d23934917e14f978073c9c14
|
[] |
no_license
|
ThoffyLi/LA-Metro-Bike-Share-Analysis
|
a010ff2fd2021857055e2eb36e6da624d6cfcb5c
|
87c44a9ffa97ae9b964e4c6a8a238c2b3d58a604
|
refs/heads/master
| 2020-05-15T23:56:11.558074
| 2019-04-22T17:20:34
| 2019-04-22T17:20:34
| 182,565,274
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,755
|
r
|
6 Dashboard-Rush hour.R
|
# Shiny dashboard of rush hour~year/quarter/region
library(ggplot2)
library(dplyr)
library(lubridate)
library(shiny)
dataset = read.csv("LA_metro.csv")
# The User Interface
ui = fluidPage(
sidebarLayout(
# for input
sidebarPanel(
textInput(inputId = "title1", label = "Title1"),
textInput(inputId = "title2", label = "Title2"),
selectInput(inputId = "year", label = "Year",
choices = c('2017','2018','Total'),selected = 'Total'),
selectInput(inputId = "quarter", label= "Quarter",
choices = c('1','2','3','4','Total'),selected = 'Total'),
selectInput(inputId = "region", label= "Region",
choices = c(levels(as.factor(dataset$start_region)),'Total'),selected = 'Total')
),
# for output
mainPanel(
plotOutput(outputId = "plot1"),
plotOutput(outputId = "plot2")
)
)
)
# The server
server = function(input, output){
#divide by half hour
dataset$time = ifelse(dataset$minute<30,dataset$hour,dataset$hour+0.5)
output$plot1 = renderPlot({
if(input$year == 'Total'){
if(input$quarter == 'Total'){
if(input$region =='Total'){
genRH = dataset %>% group_by(weekday,as.factor(time)) %>% summarise(count = n())
}
else{
genRH = dataset %>% filter(start_region ==input$region) %>% group_by(weekday,as.factor(time)) %>% summarise(count = n())
}
}
else{
if(input$region =='Total'){
genRH = dataset %>%filter(quarter ==as.numeric(input$quarter)) %>% group_by(weekday,as.factor(time)) %>% summarise(count = n())
}
else{
genRH = dataset %>% filter(start_region ==input$region & quarter ==as.numeric(input$quarter)) %>% group_by(weekday,as.factor(time)) %>% summarise(count = n())
}
}
}
else{
if(input$quarter == 'Total'){
if(input$region == 'Total'){
genRH = dataset %>% filter(year ==as.numeric(input$year)) %>% group_by(weekday,as.factor(time)) %>% summarise(count = n())
}
else{
genRH = dataset %>% filter(year ==as.numeric(input$year)&start_region ==input$region) %>% group_by(weekday,as.factor(time)) %>% summarise(count = n())
}
}
else{
if(input$region == 'Total'){
genRH = dataset %>% filter(quarter ==as.numeric(input$quarter) & year ==as.numeric(input$year)) %>% group_by(weekday,as.factor(time)) %>% summarise(count = n())
}
else{
genRH = dataset %>% filter(quarter ==as.numeric(input$quarter) & year ==as.numeric(input$year)&start_region ==input$region) %>% group_by(weekday,as.factor(time)) %>% summarise(count = n())
}
}
}
colnames(genRH)[2] = "time"
genRH$time = as.numeric(as.character(genRH$time))
genRH %>% ggplot(aes(x = weekday, y = time, fill = count))+
geom_tile()+scale_fill_gradient(low = "white", high = "darkred")+
scale_y_continuous(breaks = seq(0,23,2))+
xlab("Weekday")+ggtitle(input$title1)+
theme(plot.title = element_text(hjust = 0.5))+
scale_x_discrete(limits = c("Sun","Mon","Tue","Wed","Thu","Fri","Sat"))
})
output$plot2 = renderPlot({
if(input$year == 'Total'){
if(input$quarter == 'Total'){
if(input$region == 'Total'){
dataset = dataset
}
else{
dataset = dataset %>% filter(start_region == input$region)
}
}
else{
if(input$region == 'Total'){
dataset = dataset %>% filter(quarter == input$quarter)
}
else{
dataset = dataset %>% filter(start_region == input$region & quarter == input$quarter)
}
}
}
else{
if(input$quarter == 'Total'){
if(input$region == 'Total'){
dataset = dataset %>% filter(year == input$year)
}
else{
dataset = dataset %>% filter(year == input$year & start_region == input$region)
}
}
else{
if(input$region == 'Total'){
dataset = dataset %>% filter(year == input$year & quarter == input$quarter)
}
else{
dataset = dataset %>% filter(year == input$year & start_region == input$region & quarter == input$quarter)
}
}
}
dataset %>% ggplot(aes(x = duration, y =..density..))+
geom_histogram(fill = 'lightblue',color ='white',binwidth = 1,size = 0.8)+
xlim(c(0,100))+
xlab('Duration')+
ylab('Density')+
ggtitle(input$title2)+theme(plot.title = element_text(hjust = 0.5))
})
}
# Combine and run the app
shinyApp(ui, server)
|
b1b1bbacc6956ef059a37ddb589657fa2221555a
|
51c1bbe2355aa47cf52016c24413c9f7b0c57972
|
/hw1-1.R
|
19322a0b4043303801c1dcf1786a7c09b41c3c42
|
[] |
no_license
|
yujunkuo/Business-Analytics-with-R
|
cfe6f875dc29f5e2a3d251c9e55cb96b277f660f
|
5d7bc0d03091a80ab4b82cf97ac94c010d28c95a
|
refs/heads/master
| 2022-12-16T02:15:12.074743
| 2020-09-09T11:00:57
| 2020-09-09T11:00:57
| 294,084,163
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 556
|
r
|
hw1-1.R
|
#solution 1
data(iris)
data <- iris[,-5]
dis_matrix <- matrix(data = 0,nrow = 150,ncol = 150)
for(i in 2:150){
for(j in 1:(i-1)){
dis_matrix[i,j] <- sum((data[i,]-data[j,])^2)^0.5
dis_matrix[j,i] <- dis_matrix[i,j]
}
}
#solution 2
dis_matrix2 <- matrix(data = NA,nrow = 150,ncol = 150)
com_dis <- function(i,j){
x <- (iris[i,1] - iris[j,1])^2 + (iris[i,2] - iris[j,2])^2 + (iris[i,3] - iris[j,3])^2 + (iris[i,4] - iris[j,4])^2
return(sqrt(x))
}
for(i in 1:150){
for(j in 1:150){
dis_matrix2[i,j] <- com_dis(i,j)
}
}
|
5b47cbcd91ef1969bd00138da81280eb78f215e4
|
244393a89b3f8a836ee5afdd2ec9c91f5e52a6cd
|
/Machine_Learning/knn_overtraining_and_oversmoothing.R
|
80fb7cc5fb779cbd0fdc6562551d9aa342195e25
|
[] |
no_license
|
mjchenko/R_for_Data_Science
|
c33e470bb7b054ba5255df99aa06f60c2940976d
|
a2d228b738400a80fa2ab6fbf9df7af40a2ad83e
|
refs/heads/main
| 2023-02-01T13:39:57.324999
| 2020-12-18T20:27:20
| 2020-12-18T20:27:20
| 322,691,962
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,305
|
r
|
knn_overtraining_and_oversmoothing.R
|
library(tidyverse)
library(dslabs)
library(caret)
data("mnist_27")
#showing all the data
mnist_27$test %>% ggplot(aes(x_1, x_2, color = y)) + geom_point()
#logistic regression as the baseline we are trying to beat
fit_glm <- glm(y~x_1+x_2, data=mnist_27$train, family="binomial")
p_hat_logistic <- predict(fit_glm, mnist_27$test)
y_hat_logistic <- factor(ifelse(p_hat_logistic > 0.5, 7, 2))
confusionMatrix(data = y_hat_logistic, reference = mnist_27$test$y)$overall[1]
#fit knn model (~ . means use all predictors in the data set. i.e. x_1 + x_2)
knn_fit <- knn3(y ~ ., data = mnist_27$train)
## we could also fit using this method
# the train data set has two predictors x_1 and x_2. putting these in x
x <- as.matrix(mnist_27$train[,2:3])
#putting the actual number (2,7) in y
y <- mnist_27$train$y
##example how to fit
#fitting using knn3 (in caret package)
knn_fit <- knn3(x,y)
# k is the number of nearest neighbors. The default is 5 but we write explicitly
knn_fit <- knn3(y ~ ., data = mnist_27$train, k=5)
# type = "class" means it will give the actual prediction (2 or 7) instead of the probability
y_hat_knn <- predict(knn_fit, mnist_27$test, type = "class")
confusionMatrix(data = y_hat_knn, reference = mnist_27$test$y)$overall["Accuracy"]
###overtraining and oversmoothing
# we see that when we test accuracy on the train set it is higher than on the
# test set indicating over training
y_hat_knn <- predict(knn_fit, mnist_27$train, type = "class")
confusionMatrix(data = y_hat_knn, reference = mnist_27$train$y)$overall["Accuracy"]
y_hat_knn <- predict(knn_fit, mnist_27$test, type = "class")
confusionMatrix(data = y_hat_knn, reference = mnist_27$test$y)$overall["Accuracy"]
#fit knn with k=1
# when k = 1 it only takes the one current point so it should be perfect training on the train set
# but this will not be accurate on the test set due to overtraining
knn_fit_1 <- knn3(y ~ ., data = mnist_27$train, k = 1)
#train set
y_hat_knn_1 <- predict(knn_fit_1, mnist_27$train, type = "class")
confusionMatrix(data=y_hat_knn_1, reference=mnist_27$train$y)$overall[["Accuracy"]]
#test set
y_hat_knn_1 <- predict(knn_fit_1, mnist_27$test, type = "class")
confusionMatrix(data=y_hat_knn_1, reference=mnist_27$test$y)$overall[["Accuracy"]]
#fit knn with k=401
#larger k will lead to more smoothing but too large of a k will lead to almost linear behavior
#because it considers too many points
knn_fit_401 <- knn3(y ~ ., data = mnist_27$train, k = 401)
y_hat_knn_401 <- predict(knn_fit_401, mnist_27$test, type = "class")
confusionMatrix(data=y_hat_knn_401, reference=mnist_27$test$y)$overall["Accuracy"]
#pick the k in knn
ks <- seq(3, 251, 2)
library(purrr)
accuracy <- map_df(ks, function(k){
fit <- knn3(y ~ ., data = mnist_27$train, k = k)
y_hat <- predict(fit, mnist_27$train, type = "class")
cm_train <- confusionMatrix(data = y_hat, reference = mnist_27$train$y)
train_error <- cm_train$overall["Accuracy"]
y_hat <- predict(fit, mnist_27$test, type = "class")
cm_test <- confusionMatrix(data = y_hat, reference = mnist_27$test$y)
test_error <- cm_test$overall["Accuracy"]
tibble(train = train_error, test = test_error)
})
#pick the k that maximizes accuracy using the estimates built on the test data
ks[which.max(accuracy$test)]
max(accuracy$test)
|
d28097ab38a0f80f14d8d674d4f904e1fa9d3180
|
1c5baab48957be97c27b82676e66681a7b8e5742
|
/data/make_heatmap_data.R
|
be66e0987464ed281eef8fa423cd647296509a85
|
[] |
no_license
|
eduardobsg/intraday-fx-seasonality-app
|
50ce69cb37969db180ea00c200ae0c7013757a57
|
5e2b6de947dd0cb89c14f8ee886f124c6c10baf3
|
refs/heads/master
| 2022-11-17T14:19:29.233611
| 2020-07-13T05:29:28
| 2020-07-13T05:29:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,804
|
r
|
make_heatmap_data.R
|
library(tidyverse)
library(here)
# get list of performance files, calculate lengths of filenames
temp <- list.files(path = here::here("data", "perf_logs"), pattern = "*.csv")
map_dbl(temp, ~length(flatten_chr(str_split(.x, "_"))))
# length == 8: detrended, length == 7: not detrended
# read in and process each file
process_perf_log <- function(filename) {
chars <- str_split(filename, "_") %>% flatten_chr()
if(length(chars) == 8)
{
detrended = TRUE
ccy = chars[3]
offset = chars[6]
years = glue::glue("{chars[7]}-{str_split(chars[8], '.csv')[[1]][1]}")
} else if(length(chars) == 7) {
detrended = FALSE
ccy = chars[3]
offset = chars[5]
years = glue::glue("{chars[6]}-{str_split(chars[7], '.csv')[[1]][1]}")
}
df <- read_csv(here::here("data", "perf_logs", filename))
df %>%
mutate(
Detrended = detrended,
LocalCcy = ccy,
Offset = offset,
Years = years
)
}
performance_df <- temp %>%
map(process_perf_log) %>%
bind_rows()
performance_df <- performance_df %>%
mutate(
Timezone = case_when(
LocalCcy == "USD" ~ "ET",
LocalCcy == "EUR" ~ "CET",
LocalCcy == "JPY" ~ "JST"
),
Asset = str_remove(Asset, "/")
) %>%
rename("Ticker" = Asset)
# check we have expected number of observations
assets_per_tz <- 7
params <- list(
timezones = c("ET", "CET", "JST"),
offsets = c(0, 17, 33),
detrended = c(TRUE, FALSE),
year_subsets = c("2009-2011", "2012-2014", "2015-2017", "2018-2020", "2009-2020")
)
start_hours <- 7
end_hours <- 7
expected <- assets_per_tz * start_hours * end_hours * map_dbl(params, length) %>% prod()
actual <- performance_df %>% distinct() %>% nrow()
if(expected == actual) {
save(performance_df, file = here::here("data", "performance_df.RData"))
}
|
701a1f90115ead61b4a4eb905fa9189eccb55718
|
3db305c9b6f9f791d2668f88e9f42c0cbfbaf4cf
|
/argosTrack/man/DCRW-class.Rd
|
bd8be6c78bcb96060557b6413a9ebb98bafe03d3
|
[] |
no_license
|
calbertsen/argosTrack
|
4789f170f0b53cf2afa83195c55d57c25d3bd591
|
d09d54082bcf03c555f3553ff444bb5dc2246b34
|
refs/heads/master
| 2022-09-02T05:37:29.760935
| 2020-11-25T12:59:55
| 2020-11-25T12:59:55
| 24,145,844
| 10
| 7
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,695
|
rd
|
DCRW-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Movement_DCRW.R
\docType{class}
\name{DCRW-class}
\alias{DCRW-class}
\title{A Reference Class for fitting a discrete time first Difference Correlated Random Walk model.}
\description{
The reference class implements a discrete time first Difference Correlated Random Walk (Jonsen et al. 2005). The locations are modelled by
\deqn{X_t = X_{t-\Delta} + \gamma T(\phi) (X_t - X_{t-\Delta}) + \epsilon_t}
Here, \eqn{\epsilon_t} is zero mean Gaussian noise with covariance \eqn{ \pmatrix{ \sigma_1^2 & \rho\sigma_1\sigma_2 \cr \rho\sigma_1\sigma_2 & \sigma_2^2 }}. \eqn{T(\phi)} is the rotation matrix \eqn{ \pmatrix{ \cos(\phi) & -\sin(\phi) \cr \sin(\phi) & \cos(\phi) }}. \eqn{\gamma} is a scalar.
}
\section{Methods}{
\describe{
\item{\code{getTMBmap(...)}}{Function to return a map list for TMB::MakeADFun.}
\item{\code{simulate(x0 = c(0, 0))}}{Function to simulate from the movement model. The initial states (latitudinal/y-coordinate location and longitudinal/x-coordinate location) must be given. If nauticalStates==TRUE, the result is returned in nautical miles.}
}}
\references{
Jonsen, I., J. Mills Flemming, and R. Myers. (2005) Robust state-space modeling of animal movement data. Ecology 86, 2874-2880.
}
\seealso{
\link{Movement-class} \link{DCRW}
Other "Movement models": \code{\link{CSB-class}},
\code{\link{CTCRW-class}}, \code{\link{DIRAC-class}},
\code{\link{DSBHN-class}}, \code{\link{DSBW-class}},
\code{\link{GDCRW-class}}, \code{\link{MPCTCRW-class}},
\code{\link{Movement-class}}, \code{\link{OUL-class}},
\code{\link{OUV-class}}, \code{\link{RW-class}}
}
\concept{"Movement models"}
|
21274af6acd676bea0dbe82ed99457d1c33d42af
|
a978a2a95d989b6df0941dc3cce8c691ccd1151f
|
/results/assignments/assignment2.R
|
3c864b581917c118cc911ac7c698d57ce66e835d
|
[] |
no_license
|
Denis-png/eda_rhine
|
11f85aeaf6c8a5da8b8399987cbc5c984eb70d40
|
8092e866d5cfad28b0c50df909b34391ca05ec21
|
refs/heads/master
| 2021-05-23T19:12:22.028155
| 2020-04-13T11:53:12
| 2020-04-13T11:53:12
| 253,431,201
| 0
| 0
| null | 2020-04-06T07:53:04
| 2020-04-06T07:53:03
| null |
UTF-8
|
R
| false
| false
| 1,262
|
r
|
assignment2.R
|
#Task 1-3
library(data.table)
library(ggplot2)
runoff_stations <- fread('./data/raw/runoff_stations.csv')
runoff_stations[, sname := factor(abbreviate(station))]
runoff_stations[, id := factor(id)]
runoff_stations[, altitude := round(altitude, 0)]
runoff_stations_new <- runoff_stations[,.(sname, area, altitude)]
runoff_stations_new$size <- runoff_stations_new[,(2*(area/altitude))]
ggplot(data = runoff_stations_new, aes(x = area, y = altitude, color = size)) +
geom_point() +
geom_text(aes(label=sname),hjust=0, vjust=0)
runoff_stations[, lat := round(lat, 3)]
runoff_stations[, lon := round(lon, 3)]
runoff_stations_very_new <- runoff_stations[,.(sname, lon, lat)]
runoff_stations_very_new$altitude <- runoff_stations_new[,altitude]
ggplot(data = runoff_stations_very_new, aes(x = lon, y = lat, color = altitude)) +
geom_point()+
geom_text(aes(label=sname),hjust=0, vjust=0) +
scale_color_gradient(low="darkgreen", high="darkred")
#Task 4-5
runoff_day <- readRDS('./data/runoff_day_raw.rds')
missing_values <- runoff_day[value < 0, .(missing = .N), by = .(sname,date)]
ggplot(data = runoff_day, aes(x = sname, y = date, color = sname)) +
geom_boxplot() +
geom_point(data = missing_values, aes(x = sname , y = date, color = 'red'))
|
1556414bde3619b4ca5582c4b06e5364a89e99b1
|
54e47fa6756bab639986dc3c1f3c2d8964f79d09
|
/run_analysis.R
|
71ed42b102f8c00053ab637f65579a1a54e5db67
|
[] |
no_license
|
pleap/ExDataProject
|
63068b8b30ec956c577ef7065604b9ae975bc176
|
5a1a8a32ed65b95eb6045c9b92746b99333deba2
|
refs/heads/master
| 2021-01-20T07:03:24.086069
| 2014-12-18T04:13:56
| 2014-12-18T04:13:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,605
|
r
|
run_analysis.R
|
library(dplyr)
## STEP 1 (Includes STEP 4) Merges the training and the test sets to create one data set
## Create the variable name list from features.txt
tmpfeatures<-read.table("./UCI HAR Dataset-2/features.txt")
features<-tmpfeatures[2]
features<-gsub( "[^[:alnum:][:space:]']" , "", features[,1])
features<-data.frame(features)
colnames(features)<-"feature"
## Open the Y data
ydata <- read.table("./UCI HAR Dataset-2/test/y_test.txt", col.names="activityID")
ydata1 <- read.table("./UCI HAR Dataset-2/train/y_train.txt", col.names="activityID")
ydata<-rbind(ydata, ydata1)
rm(ydata1)
##Open the X data
Xdata <- read.table("./UCI HAR Dataset-2/test/X_test.txt", col.names = features$feature)
Xdata1 <- read.table("./UCI HAR Dataset-2/train/X_train.txt", col.names = features$feature)
Xdata<-rbind(Xdata, Xdata1)
rm(Xdata1)
##Open the subject data
subjectdata <- read.table("./UCI HAR Dataset-2/test/subject_test.txt", col.names="subject")
subjectdata1 <- read.table("./UCI HAR Dataset-2/train/subject_train.txt", col.names="subject")
subjectdata<-rbind(subjectdata, subjectdata1)
rm(subjectdata1)
##Combine columns
dataset<-cbind(subjectdata, ydata, Xdata)
## Remove Temp variables
rm(tmpfeatures, subjectdata, Xdata, ydata, features)
## STEP 2 - Extract only the measurements on the mean and standard deviation for each measurement.
## Select the std and mean columns
dataset<-select(dataset, subject, activityID, contains("mean"), contains("std"))
## STEP 3 - Use descriptive activity names to name the activities in the data set
## Create table "activitylabels" with the activity names from the activity_labels.txt file
activitylabels<-read.table("./UCI HAR Dataset-2/activity_labels.txt")
names(activitylabels)<-c("activityID", "activity")
dataset<-inner_join(dataset, activitylabels, by = "activityID")
dataset<-select(dataset,subject, activity, -activityID, contains("mean"), contains("std"))
rm(activitylabels)
## STEP 4 - Appropriately labels the data set with descriptive variable names.
# This was needed in STEP 2 to extract the mean and std measurements. Reused in STEP 3 to collect the columns in order
## STEP 5 - From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
##Compute the mean of each test
result <- dataset %>%
group_by(subject) %>%
group_by(activity, add = TRUE) %>%
summarise_each(funs(mean), contains("mean"), contains("std")) %>%
arrange(subject, activity)
## Write out tidy datafile for submission
write.table(result, "tidydata.txt", row.name=FALSE)
|
d19d3016621bd05a3025e49131648378fb69402a
|
b29e2cd95341baf97f21a665d7298bf9b88121a7
|
/man/select_door.Rd
|
fee2dc9dca23c34c8bc4b8d6ce40872000ba421d
|
[] |
no_license
|
radwan-a/montyhall
|
7f038e0c5f56756945a3c3b14967f71a553cec45
|
286e77b860608ca6754edd418e6d00e4fc7630a7
|
refs/heads/main
| 2023-08-11T01:41:51.671390
| 2021-10-07T06:35:52
| 2021-10-07T06:35:52
| 414,487,265
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 668
|
rd
|
select_door.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/monty-hall-problem.R
\name{select_door}
\alias{select_door}
\title{Contestant selects a door.}
\usage{
select_door()
}
\arguments{
\item{...}{no arguments are used by the function.}
}
\value{
The function returns a numeric vector of length one
indicating the chosen door by contestant.
}
\description{
\code{select_door()} allows contestant to choose one door out of the three available doors at random.
}
\details{
Contestant is asked to pick a door out of the three available doors.
Two of the doors are goat doors and only one of the doors is a car door.
}
\examples{
select_door()
}
|
a9f5868ca75d80061358b7873b3d3bdfa843db37
|
26f5865f149e0e7f87c074c035ebb10fc1bf3800
|
/R/kriga.R
|
8e97a3ae5920398235b0b6ded840156302bff4f7
|
[] |
no_license
|
elpidiofilho/labgeo
|
bfa3580b67f0f777f1c28e82dd826ee6ae89da32
|
aebd001557b18832a2a6080c54892a3262a45162
|
refs/heads/master
| 2021-01-22T12:38:39.902086
| 2018-08-11T13:42:37
| 2018-08-11T13:42:37
| 102,354,346
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,932
|
r
|
kriga.R
|
#' kriga - Automatric krige
#' @param df dataframe with targer variable, coordinate x and coordinate y
#' @param target_var Name of column with target_var
#' @param nrep Number of repetitions
#' @param name_px names of column with coordinate x
#' @param name_py names of column with coordinate y
#' @param p proportion between training and validation
#' @param seed seed number to allow reprodutibility
#' @keywords krige geostatics
#' @importFrom automap autoKrige
#' @importFrom dplyr select filter
#' @importFrom caret createDataPartition
#' @importFrom sp coordinates
#' @importFrom stats as.formula
#' @importFrom knitr kable
#' @author Elpidio Filho, \email{elpidio@ufv.br}
#' @details details
#' @export
#' @examples
#' \dontrun{
#' result.krig = kriga(df = d,target_var = "argila", name_px = 'point_x',
#' name_py = 'point_y',nrep = 100, p = 0.75 , seed = 313)
#' media = result.krig %>% select(-(model:repet))
#' %>% summarise_all(funs(med = mean, desvpad = sd ))
#' print(knitr::kable(media))
#' }
kriga <- function(df, target_var, nrep = 10,
name_px, name_py, p = 0.75, seed = NULL) {
repet <- NULL
ng <- nrep
if (!is.null(seed)) {
set.seed(seed)
}
ld <- caret::createDataPartition(df[, 1], times = ng)
varsel <- c(target_var, name_px, name_py)
nl <- length(ld)
dsel <- df %>% select(one_of(varsel))
for (i in 1:nl) {
df1 <- dsel[unlist(ld[[i]]), ]
df1$repet <- i
df2 <- dsel[-unlist(ld[[i]]), ]
df2$repet <- i
if (i == 1) {
dftreino <- df1
dfvalida <- df2
} else {
dftreino <- rbind(dftreino, df1)
dfvalida <- rbind(dfvalida, df2)
}
}
dfresult <- data.frame(
model = character(ng), vars = character(ng),
repet = numeric(ng),
r2 = numeric(ng), rmse = numeric(ng),
mae = numeric(ng), mbe = numeric(ng),
stringsAsFactors = FALSE
)
cont <- 1
i <- 1
for (i in 1:ng) {
dsel.treino <- dftreino %>% filter(repet == i)
dsel.valida <- dfvalida %>% filter(repet == i)
f1 <- as.formula(paste("~", name_px, "+", name_py))
f2 <- as.formula(paste(target_var, " ~ 1"))
sp::coordinates(dsel.treino) <- f1
sp::coordinates(dsel.valida) <- f1
kr <- automap::autoKrige(f2, dsel.treino, dsel.valida, model = c("Ste"))
ddd <- data.frame(
predito = kr$krige_output$var1.pred,
observado = select(dsel.valida@data, one_of(target_var))
)
names(ddd) <- c("predito", "observado")
result <- pred_acc(ddd$observado, ddd$predito)
dfresult$model[cont] <- "krigging"
dfresult$vars[cont] <- as.character(kr$var_model[2, 1])
dfresult$repet[cont] <- i
dfresult$r2[cont] <- result$rsquared
dfresult$rmse[cont] <- result$root_mean_square_error
dfresult$mae[cont] <- result$mean_absolute_error
dfresult$mbe[cont] <- result$mean_bias_error
cont <- cont + 1
}
print(knitr::kable(dfresult))
return(dfresult)
}
|
6ee99770942935f50940d52c05ac4132da36d874
|
cf37901bf9a214d3f68c699ab8c8aa5b88528a0b
|
/Football/football_functions.R
|
1f9ae450a6937e31b234dbd1294f472b7389759b
|
[] |
no_license
|
tcampbell8/sports_projects
|
ab3f372f9b20ca7c4e4ba91568ae1f960ca0e377
|
e428cdfc4b50a5563674ec7b3c8639bb7e32e70f
|
refs/heads/master
| 2021-01-10T02:47:58.265126
| 2015-10-29T17:08:22
| 2015-10-29T17:08:22
| 43,761,425
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,280
|
r
|
football_functions.R
|
#random sampling for fantasy football teams using aggregated week 1 data
week1 <- read.csv(file.choose())
week1$Index <- 1:length(week1$Name)
#names(week1)[6] <- "Index"
rbs = subset(week1, week1$Position =="RB", 1:7)
dst = subset(week1, Position =="DST", 1:7)
qbs = subset(week1, Position =="QB", 1:7)
tes = subset(week1, Position =="TE", 1:7)
wrs = subset(week1, Position =="WR", 1:7)
n_rbs <- length(rbs[,1])
n_dst <- length(dst[,1])
n_qbs <- length(qbs[,1])
n_tes <- length(tes[,1])
n_wrs <- length(wrs[,1])
random_football_team <- function(N_rbs=n_rbs, N_dst=n_dst, N_qbs=n_qbs, N_tes=n_tes, N_wrs=n_wrs){
repeat{
nums_rbs <- sample(1:N_rbs, 3, replace = FALSE)
nums_qbs <- sample(1:N_qbs, 1, replace = FALSE)
nums_dst <- sample(1:N_dst, 1, replace = FALSE)
nums_tes <- sample(1:N_tes, 1, replace = FALSE)
nums_wrs <- sample(1:N_wrs, 3, replace = FALSE)
team_new <- qbs[nums_qbs[1],]
team_new <- rbind.data.frame(team_new, rbs[nums_rbs[1],])
team_new <- rbind.data.frame(team_new, rbs[nums_rbs[2],])
team_new <- rbind.data.frame(team_new, rbs[nums_rbs[3],])
team_new <- rbind.data.frame(team_new, wrs[nums_wrs[1],])
team_new <- rbind.data.frame(team_new, wrs[nums_wrs[2],])
team_new <- rbind.data.frame(team_new, wrs[nums_wrs[3],])
team_new <- rbind.data.frame(team_new, tes[nums_tes[1],])
team_new <- rbind.data.frame(team_new, dst[nums_dst[1],])
if (sum(team_new$Salary)<=50000)
break
}
return(team_new)
}
gen_football_teams <- function(trials){
team_indices <- NULL
team_points <- NULL
for (i in 1:trials){
t <- random_football_team()
team_indices <- rbind.data.frame(team_indices, t$Index)
team_points[i] <- sum(t$AvgPointsPerGame) #Insert appropriate predictive statistic to sum
}
team_indices <- cbind.data.frame(team_indices, team_points)
team_indices$Trial <- 1:length(team_indices[,1])
names(team_indices) <- c("QB", "RB1", "RB2", "RB3", "WR1", "WR2", "WR3", "TE", "DST", "TeamPoints", "Trial")
df <- team_indices[order(-team_points),]
for (j in 1:100){
for (k in 1:9){
value <- df[j,k]
df[j,k] <- as.character(week1$Name[as.integer(value)])
#print(week1$Name[as.integer(value)])
}
}
return(df)
}
head(gen_football_teams(1000))
|
152cd49d94b3abe63b1d1d60788c1a2cc60f34cf
|
087898ec4573a2a0fbe3c62aa4d2dea9fec2e301
|
/HW3_group8/hw3-2.R
|
ecdd242872acf198373d2040b9e1956a0043b8b0
|
[] |
no_license
|
405520002/R-Business-analytics
|
8af4eb26e7b85a094bc65ebd20362a6d4b73d727
|
851212e9c503686300de3e8a06afd8a5e5144992
|
refs/heads/master
| 2023-03-09T13:45:20.776236
| 2021-02-25T14:29:06
| 2021-02-25T14:29:06
| 342,250,328
| 0
| 0
| null | null | null | null |
BIG5
|
R
| false
| false
| 4,768
|
r
|
hw3-2.R
|
cost=10#每件衣服的價錢
before_price=25#比賽開打前衣服的售價
win_price=before_price#雄鷹贏球衣服的售價
lose_price=12.5#雄鷹輸球衣服的售價
n=1000#模擬1000場比賽
win_prob=0.4#雄鷹贏球機率
# mean 2000 and standard deviation of 1000
#ex=shape*scale,varx=shape*scale^2
scale=(1000**2)/2000#gamma的scale參數計算(var/mean)
sh=2000/scale#gamma的shape參數計算(mean/scale)
supply=seq(from=1000,to=50000,by=200)#模擬supply(1000~100000,增量1000為一單位)
profit_one<-c()#存放一次模擬的不同supply情況下的所有profit
demand_one<-c()
profit_table<-data.frame(matrix(ncol = length(supply), nrow = 0))#建立一個空的dataframe,存放10000次模擬結果
game=sample(c(1,0),n,replace=TRUE,prob=c(win_prob,1-win_prob))#模擬10000次比賽
demand_table=data.frame(matrix(ncol = length(supply), nrow = 0))
for (i in c(1:1000)){
before_demand=rnorm(1,9000,2000)#比賽前的demand
if(game[i]==1){ #贏球
demand=rnorm(1,6000,2000)#模擬贏球後球衣需求
}
else{ #輸球
demand=rgamma(1,shape=sh,scale=scale)#模擬輸球後球衣需求
}
for(s in supply){#模擬不同supply下之所有獲利情況
if(game[i]==1){
profit=min(s,before_demand)*(before_price-cost)+max(0,min(s-before_demand,demand))*(win_price-cost)-(s-before_demand-max(0,min(s-before_demand,demand)))*cost
#獲利=min(生產量,比賽前的需求)*比賽前單件球衣利潤+max(0,min(比賽後僅存庫存,比賽贏球後需求))-比賽結束後沒賣掉的衣服*成本
}
else{
profit=min(s,before_demand)*(before_price-cost)+max(0,min(s-before_demand,demand))*(lose_price-cost)-(s-before_demand-max(0,min(s-before_demand,demand)))*cost
#獲利=min(生產量,比賽前的需求)*比賽前單件球衣利潤+max(0,min(比賽後僅存庫存,比賽輸球後需求))-比賽結束後沒賣掉的衣服*成本
}
d=before_demand+demand
profit_one=c(profit_one,profit)#加入該需求模擬一次比賽後的獲利
}
total=demand+before_demand#比賽前+比賽後的需求
demand_one=c(before_demand,demand,total)#加入一次模擬的:比賽前、比賽後、和總需求
profit_table=rbind(profit_table, profit_one)#加入獲利表
demand_table<-rbind(demand_table,demand_one)#加入需求表
profit_one<-c()
demand_one<-c()
#清空暫存區
}
profitmean=colMeans(profit_table)#求出各供給下的平均收益
library(ggplot2)
#(1)畫圖
mean<-data.frame(x = supply, y = profitmean)
ggplot(data=mean,mapping = aes(x = supply, y = profitmean))+geom_line(colour="blue")+geom_point(x=supply[which.max(profitmean)],y=max(profitmean),colour="red")
#(2)
cvar10<-c()
#把每個supply下的所有資料取出來然後求cvar10
for(i in 1:length(supply)){
data=profit_table[,i]
q=quantile(data, probs =0.1)
m=mean(data[data<=q])
cvar10<-c(cvar10,m)
}
#(2)畫圖
cvar10_p<-data.frame(x = supply, y = cvar10)
ggplot(data=cvar10_p,aes(x = supply, y = cvar10))+geom_line(colour="red")+geom_point(x=supply[which.max(cvar10)],y=max(cvar10),colour="black")
#(3)不管雄鷹是否贏球都已滿足贏球或輸球的最大服務水準去算,最大服務水準:滿足贏球的市場需求
win_threshold=(win_price-cost)/((win_price-cost)+cost)#贏球的cu/(cu+co)=0.6
#把每個demand下的所有資料取出來然後求小於prob=0.6 quantile的所有值平均
q=quantile(demand_table[,3],prob=win_threshold)#求total demand<0.6 的quantile值
perfect_profit<-c()
perfect_profit_mean<-c()
#試算profit
for(i in 1:n){
before_demand=demand_table[i,1]
after_demand=demand_table[i,2]
total_demand=demand_table[i,3]
if(total_demand<q){#如果那一次模擬出來的total_demand<q
if(game[i]==0){
profit=min(q,before_demand)*(before_price-cost)+max(0,min(s-before_demand,after_demand))*(lose_price-cost)-(q-before_demand-max(0,min(q-before_demand,after_demand)))*cost
perfect_profit<-c(perfect_profit,profit)
}
else{
profit=min(s,before_demand)*(before_price-cost)+max(0,min(q-before_demand,after_demand))*(win_price-cost)-(q-before_demand-max(0,min(q-before_demand,after_demand)))*cost
perfect_profit<-c(perfect_profit,profit)
}
}
}
mean(perfect_profit)
#1+2+3總圖
ggplot()+geom_line(aes(supply,profitmean),colour="blue")+geom_line(aes(supply,cvar10),colour="red")
#print(1,2,3題最佳生產量和profit)
cat("optimal_profit of (1):",round(max(profitmean)),"optimal_supply of (1): ",supply[which.max(profitmean)])
cat("optimal_profit of (2):",round(max(cvar10)),"optimal_supply of (2): ",supply[which.max(cvar10)])
cat("optimal_profit of (3):",round(mean(perfect_profit)),"optimal_supply of (3): ",round(q))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.