blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
572807f08d30511e291aca355144da8522eada40
|
67c2a90c7edfac3cfd891cb332c45e71cf4a6ad1
|
/R/din.equivalent.class.R
|
14cf4cecebc0e659a694a47604d7d30117a1a736
|
[] |
no_license
|
alexanderrobitzsch/CDM
|
48316397029327f213967dd6370a709dd1bd2e0a
|
7fde48c9fe331b020ad9c7d8b0ec776acbff6a52
|
refs/heads/master
| 2022-09-28T18:09:22.491208
| 2022-08-26T11:36:31
| 2022-08-26T11:36:31
| 95,295,826
| 21
| 11
| null | 2019-06-19T09:40:01
| 2017-06-24T12:19:45
|
R
|
UTF-8
|
R
| false
| false
| 1,630
|
r
|
din.equivalent.class.R
|
## File Name: din.equivalent.class.R
## File Version: 0.191
#**** calculation of equivalent skill classes
din.equivalent.class <-function( q.matrix, rule="DINA")
{
Q <- q.matrix
# Matrix with all skill classes
S <- expand.grid( as.data.frame( t( matrix( rep( c(0,1), each=ncol(Q)), ncol=2 ))))
J <- nrow(Q)
if ( length(rule)==1){ rule <- rep( rule, J ) }
rownames(S) <- paste0("Skills_", apply( S, 1,
FUN=function(ll){ paste(ll, collapse="" ) } ) )
# Calculation of latent response of every skill class
A <- din_equivalent_class_latent_response(q.matrix=Q,S=S,rule=rule)
A <- t(A)
I <- nrow(A)
# calculate latent responses
latent.response <- paste0("LatResp_",
sapply( 1:I, FUN=function(ii){ paste( A[ ii, ], collapse="" ) } ) )
skillclasses <- data.frame( "skillclass"=rownames(S) )
skillclasses$latent.response <- latent.response
# define distinguishable skill classes
skillclasses$distinguish.class <- match( latent.response, unique( latent.response ) )
# calculates how many skill classes correspond to the same latent response
latent.response <- table( latent.response )
six <- sort( latent.response, index.return=FALSE, decreasing=TRUE)
gini_mod <- cdm_gini( as.numeric(six) )
res <- list( "latent.responseM"=A, "latent.response"=latent.response,
"S"=S, "gini"=gini_mod, "skillclasses"=skillclasses )
cat( nrow(S), "Skill classes |", max( skillclasses$distinguish.class ),
" distinguishable skill classes |",
"Gini coefficient=", round( gini_mod,3 ), "\n")
return(res)
}
|
7f5cc8a6738ebe9438c07aece18a68ad5cc76a6d
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/mgpd/R/Alog.R
|
00e0a4df3aa4168e2e5c7f3bc624ebd8beff50ac
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 102
|
r
|
Alog.R
|
Alog <-
function(t1, t2, alpha=2 ,... ) eval({t1<-t1; t2<-t2; alpha<-alpha; .Alog})
|
541261bae139ff349056188533c05b4f9d584927
|
80c98dbfc7f47256233c4a164482530ad20dd523
|
/primer/ejp-1/primero.R
|
702ee91e3754fd87b67cb61d5110588ef088d79d
|
[] |
no_license
|
davidlechuga/Remedia-est-des
|
de21f35842e48a997e70243629cac984e820b974
|
1ade475392d7503a332bb2f32b4298ebb6e3b5b7
|
refs/heads/master
| 2020-05-18T21:13:27.838597
| 2019-05-02T21:42:17
| 2019-05-02T21:42:17
| 184,655,250
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,307
|
r
|
primero.R
|
library(readxl) #leer archivos en excel
library(dplyr) # libreria para manipular frames
library(graphics) # libreria de grafos
primero <- read_excel("primero.xlsx", col_names = FALSE) #abrimos el documento
typeof(primero) # vemos el tipo de variables del objeto
View(primero)
primero1<-as.matrix(primero) # convertimos una lista en una matriz
typeof(primero1) # vemos el tipo de variables del objeto
sort(primero1, decreasing = FALSE) # ordenamos los resultados de menor a mayor.
segundo<-data.frame(sort(primero1, decreasing = FALSE)) # los convertimos en una lista
typeof(segundo) # vemos el tipo de variables del objeto
segundo1<-as.matrix(segundo) # convertimos una lista en una matriz
typeof(segundo1) # emos el tipo de variables del objeto
colnames(segundo)<-c("edades") # le agregamos un nombre a la columna.
colnames(segundo1)<-c("edades")
length(segundo1) # vemos el tamaño del vector
lugardelamediana<-length((segundo1)+1)/2 # lugar de la mediana
print(segundo1[15:16]) # los lugares de la mediana son 15,y 16
(46+47)/2 # el valor de la mediana es 46.5
stem(segundo$edades) # we crearte a steam- and -leaf-diagram of edades
tabulate(segundo$edades) # de 1-80 cuantas veces se repitio el numero ?
table(segundo$edades) # solo de los resultados cuales se repitieron?
|
6b63a6587d5444b6653fb406de44671fbef56f8f
|
0fa414cecc28b3eae79096922a72924981247d0d
|
/taylor_sin.R
|
e557fd8d51cf63a27806163a0ee3eb84d15dc59e
|
[] |
no_license
|
Asmith9555/R_Functions
|
5e8c67c80892cf0533e2ead543499eb8f71c1011
|
a60cd817bdbb49dd9096a774a19049259b30409b
|
refs/heads/main
| 2023-03-15T20:30:45.185519
| 2021-03-10T19:55:48
| 2021-03-10T19:55:48
| 346,475,104
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 211
|
r
|
taylor_sin.R
|
#---
taylor_sin <- function(x, n){
taylor_approx <- 0
sin_function <- sin(x)
for (k in 0:n){
taylor_approx <- taylor_approx + ((-1)^k)*((x^((2*k)+1))/(factorial((2*k)+1)))
}
return(taylor_approx)
}
|
117454a9d7ce21c9151ab91dbea511f28534eac4
|
31fb3920c121d0f174e81d68b0185df98645e60b
|
/Code/install_pkg.R
|
8894e15a14610acbb10cc28c8423dac2d8cc35cb
|
[] |
no_license
|
omega-eol/ds_boilerplate
|
8af23660c236888d01f4c9ca352c8762f1c10f62
|
d3912cc9b9d7b308ce318e94d8e63d83b34eb4c2
|
refs/heads/master
| 2021-01-10T05:55:01.838847
| 2015-12-31T23:24:10
| 2015-12-31T23:24:10
| 44,648,067
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 743
|
r
|
install_pkg.R
|
# list of most common packages that I usually use
# update all old packages first
update.packages();
# to support parallel computation
install.packages("codetools");
install.packages("foreach");
install.packages("doMC");
install.packages("parallel");
# sparse matrix
install.packages("Matrix");
# c++ support
install.packages("Rcpp");
install.packages("RcppEigen");
install.packages("RcppArmadillo");
# DB support
# make sure that libpq-dev package is installed!
install.packages("RPostgreSQL");
# Optimization
install.packages("DEoptim");
install.packages("GenSA");
# classifiers
install.packages("randomForest");
install.packages("gbm");
install.packages('rpart');
# ds support
install.packages('caret');
install.packages('PRROC');
|
06e85b75a6ed819bb96d5fcb32afc3a6b89b703b
|
65cc65e883596f91bd8cf5c571127bd825b77026
|
/Weekend_Homework.R
|
5fc29abff67b6777c69fcd7aa651fcbcd50fbad9
|
[] |
no_license
|
Nkomo1997/Intro_to_R_UWC
|
4fa60b0e915e9899303ad65d735a5bbe99894475
|
0eb1a3021f4bba58172fdbabdf5c1c681cf61cd4
|
refs/heads/master
| 2020-04-19T16:06:20.164987
| 2019-05-14T20:23:17
| 2019-05-14T20:23:17
| 168,294,101
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,575
|
r
|
Weekend_Homework.R
|
# Instructions:
# Submit: Tuesday morning (before 10-am)
# Answer each of the sections in an individual script
# Answer all sections
# Add comments and notes thoughout the script
# Section 1:
# Make use of the rast_feb and rast_aug dataset:
# Explore the dataset (Hint* head, tail, glimpse etc) - Make use of google for more functions on exploring a dataset
# Create a map by making use of the lat and long variables(for each of the datasets)
# Create a colour pallete using the link in the document and make use this colour pallete on the map
# Add complete labels and titles to the map
# Add the name of the oceans (Atlanic and indian ocean) on the map, increase the size of the labels
# The map should include the north arrow and scale bar(both maps)
# Bonus marks for insetting (having a smaller map inside another map)
# Get creative, try new things.
# Section 2:(make details on top, load librariess, load data then explore it before coding)
# Make use of the ecklonia.csv dataset:
# Explore the data (Hint* head, tail, glimpse functions)
# Demonstrate the dimensions of the dataset
# Create three graphs; bargraph, line graph and boxplot: Write hypothesis for each of the graphs and answer these hypotheses
# Make use of the ggarrange function and arrange these three graphs created above into 1 plot
# All graphs must have labels as well as titles
# Calculate the mean,max,min,median and variance for the stipe_length, stipe_diameter for each of the sites (Hint* group_by site)
# Calculate standard error
# Determine the min and maximum frond length and stipe length
# Determine the overall summary of the dataset(use summary and put dataset name)
# Section 3:
# Make use of the SACTN_day1 data:
# Here create a graph showing temperature variation between sites(group_by=site)
# Select all the temperatures recorded at the site Port Nolloth during August or September.
# Select all the monthly temperatures recorded in Port Nolloth during the year 1994
# Calculate the average temperature by depth
# Work through the tidyverse section within the document. Show what you have done by creating comments/ notes throughout the script(tidy to tidiest)
# Section 4:
# Make use of any two built in datasets:
# Make use of the summarise, select, group_by functions(include in one code or run seperately)
# Create at least two visualisations that were not done in the Intro R workshop(eg.density plot-type geom_ and select any graph that pops up and create)
## Good luck!!!
---------------------------------------------------------------------------------------------
|
b92704f38b2c4108032105445294b2597689cee3
|
1ed875767a44b0458a6ef079e51d0a0cbb545fff
|
/server.R
|
c924b113182094b63c75c911d16185ff23956142
|
[] |
no_license
|
fleschgordon/Coursera_DevDataProd
|
09835b8209736f6d4476eadae0a74d3c81359ec2
|
e3d40a0364d59b94b3766e89bcacc4db8f991ca1
|
refs/heads/master
| 2020-06-11T15:32:48.909293
| 2016-12-05T15:45:55
| 2016-12-05T15:45:55
| 75,625,725
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,140
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application.
#
library(shiny)
library(shiny)
library(dplyr)
library(tidyr)
library(ggplot2)
library(plotly)
library(RSQLite)
library(reshape2)
library(RColorBrewer)
library(gplots)
library(data.table)
library(d3heatmap)
library(viridis)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
# Connect to data base ----------------------------------------------------
con <- dbConnect(SQLite(), dbname="database.sqlite")
player <- tbl_df(dbGetQuery(con,"SELECT * FROM player"))
Match <- tbl_df(dbGetQuery(con,"SELECT * FROM Match"))
Team <- tbl_df(dbGetQuery(con,"SELECT * FROM Team"))
Country <- tbl_df(dbGetQuery(con,"SELECT * FROM Country"))
League <- tbl_df(dbGetQuery(con,"SELECT * FROM League"))
# select columns
player <- select(player,player_api_id, player_name) # use player_api_id as key for join
Team <- select(Team, team_api_id, team_long_name, team_short_name) # use team_api_id as key for join
Country <-select(Country, id, name) %>% rename(country_id = id) %>% rename(country_name = name) # use country_id as key for join
League <- select(League, country_id, name) %>% rename(league_name = name) # use country_id as key for join
Match <-select(Match, id, country_id, league_id, season, stage, date, match_api_id, home_team_api_id, away_team_api_id, home_team_goal, away_team_goal, home_player_1, home_player_2, home_player_3, home_player_4, home_player_5, home_player_6, home_player_7, home_player_8, home_player_9, home_player_10, home_player_11, away_player_1, away_player_2, away_player_3, away_player_4, away_player_5, away_player_6, away_player_7, away_player_8, away_player_9, away_player_10, away_player_11, goal, shoton, shotoff, foulcommit, card, cross, corner, possession)
dbDisconnect(con)
PointsDf <-Match %>%
select(1:11) %>%
mutate(homePoint = if_else((home_team_goal > away_team_goal),3,if_else((home_team_goal == away_team_goal),1,0))) %>%
mutate(awayPoint = if_else((home_team_goal > away_team_goal),0,if_else((home_team_goal == away_team_goal),1,3)))
tableHomeDt <- PointsDf %>%
group_by(season, league_id, home_team_api_id) %>%
summarise(pointsHome = sum(homePoint)) %>%
ungroup() %>% data.table
keycols = c("season", "league_id", "home_team_api_id" )
setkeyv(tableHomeDt,keycols)
tableAwayDt <- PointsDf %>%
group_by(season, league_id, away_team_api_id) %>%
summarise(pointsAway = sum(awayPoint)) %>%
ungroup() %>% data.table
keycols = c("season", "league_id", "away_team_api_id" )
setkeyv(tableAwayDt,keycols)
tableHomeAwayDt <- tableHomeDt[tableAwayDt, nomatch=0] %>%
mutate(points = pointsHome + pointsAway) %>%
group_by(season, league_id) %>%
mutate(rank = min_rank(desc(points)))
tableLong <- tableHomeAwayDt %>%
left_join(League, by = c("league_id" = "country_id")) %>%
left_join(Team, by = c("home_team_api_id" = "team_api_id")) %>%
ungroup() %>%
select(season, league_name, rank, team_long_name, points)
output$Heatmap <- renderPlotly({
seasonsdata <- subset(tableLong, season %in% input$selSeason)
seasonsdata$points <- as.factor(seasonsdata$points)
p <- ggplot(filter(seasonsdata, league_name %in% input$selLeague), mapping = aes(x = season, y = team_long_name)) +
geom_tile(mapping = aes(fill = points),color="white", size=0.1 ) + facet_grid(league_name~., scales = "free_y") +scale_fill_viridis(discrete=TRUE) + theme(legend.position = "none") # free y scale to avoid that all clubs are on Y axis in all leagues
ggplotly(p)
#p
})
})
|
c2d5ebfc94ab6f1e77b7d1cacec076a339e77689
|
99227ef3ea18a4ca96e9d1e6b1f6ea77dcf19dba
|
/readcsv.r
|
0392ece12f1e8a416d7c93d2c99a9fa8bc3961f2
|
[] |
no_license
|
sayefi/test-repo
|
4655df193df4e01b88926ba8d9a11a4cb7c0ef1f
|
5639756e4f6b960cdd840b2785392f31713c0a99
|
refs/heads/master
| 2021-01-02T23:05:13.755078
| 2017-08-29T03:11:22
| 2017-08-29T03:11:22
| 99,464,865
| 0
| 0
| null | 2017-08-06T16:38:25
| 2017-08-06T04:50:02
| null |
UTF-8
|
R
| false
| false
| 305
|
r
|
readcsv.r
|
fileUrl<-"https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for"
download.file(fileUrl,"data/quizdata.csv")
list.files("./data")
fData<-read.csv("data/quizdata.csv",sep=" ",skip = 3,header = TRUE)
fData<-read.fwf("data/quizdata.csv",skip=4,widths=c(12, 7, 4, 9, 4, 9, 4, 9, 4))
sum(fData$V4)
|
a53bb5478e1bcf3b798a4a11e9dc307542a6c657
|
cd69c92a74016f7957dcd7a81c2f3a1fb91fba01
|
/test.R
|
401238c83d1dd8a8df22b5c6a2632feb9d591b1c
|
[] |
no_license
|
kalexanderk/TestTask_SAS
|
cff7f48861ac6fb3f67fb843ca3b24aa5abddb9d
|
78635f94b482e230891cc620cebcf2511708e7e8
|
refs/heads/master
| 2021-07-09T10:13:11.958354
| 2017-10-08T09:55:22
| 2017-10-08T09:55:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,219
|
r
|
test.R
|
library("openxlsx")
df_initial <- read.xlsx("Data/payments.xlsx", sheet=1)
typeof(df_initial)
class(df_initial)
change_date <- function(x) as.Date(x ,origin='1960-01-01')
df <- data.frame(df_initial['lead_id'], lapply(df_initial['dt'], change_date), df_initial['amount'])
df['weekday'] <- format(df['dt'], "%A")
summary(df)
shift <- function(d, k) rbind( tail(d,k), head(d,-k), deparse.level = 0 )
df['id_shifted'] <- shift(df['lead_id'], 1)
coun<-0
find_days_not_returned<-function(df){
df['days_not_returned']<-0
for(i in 1:nrow(df)){
if ((df[i, 'lead_id']==df[i, 'id_shifted']) & (df[i, 'amount']==0)){
if ((df[i, 'weekday']=='Saturday') | (df[i, 'weekday']=='Sunday')) {
df[i, 'days_not_returned']=coun
}
else{
coun=coun+1
df[i, 'days_not_returned']=coun
}
}
else if ((df[i, 'lead_id']!=df[i, 'id_shifted']) | (df[i, 'amount']>0)){
if (df[i, 'amount'] != 0){
coun=0
df[i,'days_not_returned']=coun
}
if (df[i, 'amount'] == 0){
coun=1
df[i, 'days_not_returned']=coun
}
}
}
return(df)
}
df_t<-find_days_not_returned(df)
df_t$id_shifted=NULL
#df_t is the result
write.xlsx(df_t, "Files/result.xlsx")
|
1ecc58e74768765c1b12b9d3631239535b4e0c64
|
c79c01fc45ddf21fcd482563ff023d483374efd4
|
/R/sperrorest.R
|
5603345492032ea2bbf3655910a801e6cda79d61
|
[] |
no_license
|
cran/sperrorest
|
75700bd7da78449c5e5d25ac44d836a1a2600f40
|
00e8c435a36e08b8592373e8487271daad0ab2b9
|
refs/heads/master
| 2022-11-12T17:15:09.873658
| 2022-10-16T11:50:02
| 2022-10-16T11:50:02
| 17,700,028
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,762
|
r
|
sperrorest.R
|
#' @title Perform spatial error estimation and variable importance assessment
#'
#' @description {sperrorest} is a flexible interface for multiple types of
#' parallelized spatial and non-spatial cross-validation and bootstrap error
#' estimation and parallelized permutation-based assessment of spatial variable
#' importance.
#'
#' @details Custom predict functions passed to `pred_fun`, which consist of
#' multiple child functions, must be defined in one function.
#'
#' @section Parallelization:
#'
#' Running in parallel is supported via package \CRANpkg{future}.
#' Have a look at `vignette("future-1-overview", package = "future")`.
#' In short: Choose a backend and specify the number of workers, then call
#' `sperrorest()` as usual. Example:
#'
#' ```r
#' future::plan(future.callr::callr, workers = 2)
#' sperrorest()
#' ```
#' Parallelization at the repetition is recommended when using
#' repeated cross-validation. If the 'granularity' of parallelized
#' function calls is too fine, the overall runtime will be very
#' poor since the overhead for passing arguments and handling
#' environments becomes too large. Use fold-level parallelization
#' only when the processing time of individual folds is very
#' large and the number of repetitions is small or equals 1.
#'
#' Note that nested calls to `future` are not possible.
#' Therefore a sequential `sperrorest` call should be used for
#' hyperparameter tuning in a nested cross-validation.
#'
#' @importFrom future.apply future_lapply
#' @importFrom utils packageVersion tail
#' @importFrom stringr str_replace_all
#'
#' @param data a `data.frame` with predictor and response variables. Training
#' and test samples will be drawn from this data set by `train_fun` and
#' `test_fun`, respectively.
#'
#' @param formula A formula specifying the variables used by the `model`. Only
#' simple formulas without interactions or nonlinear terms should be used,
#' e.g. `y~x1+x2+x3` but not `y~x1*x2+log(x3)`. Formulas involving interaction
#' and nonlinear terms may possibly work for error estimation but not for
#' variable importance assessment, but should be used with caution.
#' The formula `y~...` is not supported, but `y~1` (i.e. no predictors) is.
#' @param coords vector of length 2 defining the variables in `data` that
#' contain the x and y coordinates of sample locations.
#' @param model_fun Function that fits a predictive model, such as `glm` or
#' `rpart`. The function must accept at least two arguments, the first one
#' being a formula and the second a data.frame with the learning sample.
#' @param model_args Arguments to be passed to `model_fun` (in addition to the
#' `formula` and `data` argument, which are provided by {sperrorest})
#' @param pred_fun Prediction function for a fitted model object created by
#' `model`. Must accept at least two arguments: the fitted `object` and a
#' `data.frame` `newdata` with data on which to predict the outcome.
#' @param pred_args (optional) Arguments to `pred_fun` (in addition to the
#' fitted model object and the `newdata` argument, which are provided by
#' {sperrorest}).
#' @param smp_fun A function for sampling training and test sets from `data`.
#' E.g. [partition_kmeans] for spatial cross-validation using spatial
#' *k*-means clustering.
#' @param smp_args (optional) Arguments to be passed to `smp_fun`.
#' @param train_fun (optional) A function for resampling or subsampling the
#' training sample in order to achieve, e.g., uniform sample sizes on all
#' training sets, or maintaining a certain ratio of positives and negatives in
#' training sets. E.g. [resample_uniform] or [resample_strat_uniform].
#' @param train_param (optional) Arguments to be passed to `resample_fun`.
#' @param test_fun (optional) Like `train_fun` but for the test set.
#' @param test_param (optional) Arguments to be passed to `test_fun`.
#' @param err_fun A function that calculates selected error measures from the
#' known responses in `data` and the model predictions delivered by
#' `pred_fun`. E.g. [err_default] (the default).
#' @param imp_variables (optional; used if `importance = TRUE`). Variables for
#' which permutation-based variable importance assessment is performed. If
#' `importance = TRUE` and `imp_variables` == `NULL`, all variables in
#' `formula` will be used.
#' @param imp_sample_from (default: `"test"`): specified if the permuted feature
#' values should be taken from the test set, the training set (a rather unlikely
#' choice), or the entire sample (`"all"`). The latter is useful in
#' leave-one-out resampling situations where the test set is simply too small
#' to perform any kind of resampling. In any case importances are
#' always estimates on the test set. (Note that resampling with replacement is
#' used if the test set is larger than the set from which the permuted values
#' are to be taken.)
#' @param imp_permutations (optional; used if `importance = TRUE`). Number of
#' permutations used for variable importance assessment.
#' @param importance logical (default: `FALSE`): perform permutation-based
#' variable importance assessment?
#' @param distance logical (default: `FALSE`): if `TRUE`, calculate mean
#' nearest-neighbour distances from test samples to training samples using
#' [add.distance.represampling].
#' @param do_gc numeric (default: 1): defines frequency of memory garbage
#' collection by calling [gc]; if `< 1`, no garbage collection; if `>= 1`, run
#' a [gc] after each repetition; if `>= 2`, after each fold.
#' @param progress character (default: `all`): Whether to show progress
#' information (if possible). Default shows repetition, fold and (if enabled)
#' variable importance progress. Set to `"rep"` for repetition information
#' only or `FALSE` for no progress information.
#' @param mode_rep,mode_fold character (default: `"future"` and `"sequential"`,
#' respectively): specifies whether to parallelize the execution at the repetition
#' level, at the fold level, or not at all.
#' Parallel execution uses `future.apply::future_lapply()` (see details below).
#' It is only possible to parallelize at the repetition level or at
#' the fold level.
#' The `"loop"` option uses a `for` loop instead of an `lappy`
#' function; this option is for debugging purposes.
#' @param benchmark (optional) logical (default: `FALSE`): if `TRUE`, perform
#' benchmarking and return `sperrorestbenchmark` object.
#' @param verbose Controls the amount of information printed while processing.
#' Defaults to 0 (no output).
#'
#' @return A list (object of class {sperrorest}) with (up to) six components:
#' - error_rep: `sperrorestreperror` containing
#' predictive performances at the repetition level
#' - error_fold: `sperroresterror` object containing predictive
#' performances at the fold level
#' - represampling: [represampling] object
#' - importance: `sperrorestimportance` object containing
#' permutation-based variable importances at the fold level
#' - benchmark: `sperrorestbenchmark` object containing
#' information on the system the code is running on, starting and
#' finishing times, number of available CPU cores and runtime performance
#' - package_version: `sperrorestpackageversion` object containing
#' information about the {sperrorest} package version
#'
#' @references Brenning, A. 2012. Spatial cross-validation and bootstrap for
#' the assessment of prediction rules in remote sensing: the R package
#' 'sperrorest'.
#' 2012 IEEE International Geoscience and Remote Sensing Symposium (IGARSS),
#' 23-27 July 2012, p. 5372-5375.
#' <https://ieeexplore.ieee.org/document/6352393>
#'
#' Brenning, A. 2005. Spatial prediction models for landslide hazards: review,
#' comparison and evaluation. Natural Hazards and Earth System Sciences,
#' 5(6), 853-862. \doi{10.5194/nhess-5-853-2005}
#'
#' Brenning, A., S. Long & P. Fieguth. 2012. Detecting rock glacier
#' flow structures using Gabor filters and IKONOS imagery.
#' Remote Sensing of Environment, 125, 227-237.
#' \doi{10.1016/j.rse.2012.07.005}
#'
#' Russ, G. & A. Brenning. 2010a. Data mining in precision agriculture:
#' Management of spatial information. In 13th International Conference on
#' Information Processing and Management of Uncertainty, IPMU 2010; Dortmund;
#' 28 June - 2 July 2010. Lecture Notes in Computer Science, 6178 LNAI: 350-359.
#'
#' Russ, G. & A. Brenning. 2010b. Spatial variable importance assessment for
#' yield prediction in Precision Agriculture. In Advances in Intelligent
#' Data Analysis IX, Proceedings, 9th International Symposium,
#' IDA 2010, Tucson, AZ, USA, 19-21 May 2010.
#' Lecture Notes in Computer Science, 6065 LNCS: 184-195.
#'
#' @examples
#'
#' ## ------------------------------------------------------------
#' ## Classification tree example using non-spatial partitioning
#' ## ------------------------------------------------------------
#'
#' # Muenchow et al. (2012), see ?ecuador
#' fo <- slides ~ dem + slope + hcurv + vcurv + log.carea + cslope
#'
#' library(rpart)
#' mypred_part <- function(object, newdata) predict(object, newdata)[, 2]
#' ctrl <- rpart.control(cp = 0.005) # show the effects of overfitting
#' # show the effects of overfitting
#' fit <- rpart(fo, data = ecuador, control = ctrl)
#'
#' ### Non-spatial cross-validation:
#' mypred_part <- function(object, newdata) predict(object, newdata)[, 2]
#' nsp_res <- sperrorest(
#' data = ecuador, formula = fo,
#' model_fun = rpart,
#' model_args = list(control = ctrl),
#' pred_fun = mypred_part,
#' progress = TRUE,
#' smp_fun = partition_cv,
#' smp_args = list(repetition = 1:2, nfold = 3)
#' )
#' summary(nsp_res$error_rep)
#' summary(nsp_res$error_fold)
#' summary(nsp_res$represampling)
#' # plot(nsp_res$represampling, ecuador)
#'
#' ### Spatial cross-validation:
#' sp_res <- sperrorest(
#' data = ecuador, formula = fo,
#' model_fun = rpart,
#' model_args = list(control = ctrl),
#' pred_fun = mypred_part,
#' progress = TRUE,
#' smp_fun = partition_kmeans,
#' smp_args = list(repetition = 1:2, nfold = 3)
#' )
#' summary(sp_res$error_rep)
#' summary(sp_res$error_fold)
#' summary(sp_res$represampling)
#' # plot(sp_res$represampling, ecuador)
#'
#' smry <- data.frame(
#' nonspat_training = unlist(summary(nsp_res$error_rep,
#' level = 1
#' )$train_auroc),
#' nonspat_test = unlist(summary(nsp_res$error_rep,
#' level = 1
#' )$test_auroc),
#' spatial_training = unlist(summary(sp_res$error_rep,
#' level = 1
#' )$train_auroc),
#' spatial_test = unlist(summary(sp_res$error_rep,
#' level = 1
#' )$test_auroc)
#' )
#' boxplot(smry,
#' col = c("red", "red", "red", "green"),
#' main = "Training vs. test, nonspatial vs. spatial",
#' ylab = "Area under the ROC curve"
#' )
#' @export
sperrorest <- function(formula,
data,
coords = c("x", "y"),
model_fun,
model_args = list(),
pred_fun = NULL,
pred_args = list(),
smp_fun = partition_cv,
smp_args = list(),
train_fun = NULL,
train_param = NULL,
test_fun = NULL,
test_param = NULL,
err_fun = err_default,
imp_variables = NULL,
imp_permutations = 1000,
imp_sample_from = c("test", "train", "all"),
importance = !is.null(imp_variables),
distance = FALSE,
do_gc = 1,
progress = "all",
benchmark = FALSE,
mode_rep = c("future", "sequential", "loop"),
mode_fold = c("sequential", "future", "loop"),
verbose = 0) {
if (verbose >= 1) {
cat("sperrorest version", as.character(packageVersion("sperrorest")), "\n")
cat("(c) A. Brenning, P. Schratz, and contributors\n")
cat("Cite as Brenning (2012), doi: 10.1109/igarss.2012.6352393\n")
}
# set global variables for R CMD Check
current_res <- NULL
pooled_obs_train <- NULL
pooled_obs_test <- NULL
# if benchmark = TRUE, start clock
if (benchmark) {
start_time <- Sys.time()
}
# Some checks:
if (missing(model_fun)) {
stop("'model_fun' is a required argument")
}
if (any(all.vars(formula) == "...")) {
stop("formula of the form lhs ~ ... not accepted by 'sperrorest'\n
specify all predictor variables explicitly")
}
stopifnot(is.function(model_fun))
stopifnot(is.function(smp_fun))
if (!is.null(train_fun)) {
stopifnot(is.function(train_fun))
}
if (!is.null(test_fun)) {
stopifnot(is.function(test_fun))
}
stopifnot(is.function(err_fun))
if (importance) {
stopifnot(is.numeric(imp_permutations))
if (!is.null(imp_variables)) {
stopifnot(is.character(imp_variables)) # nocov
}
imp_sample_from <- match.arg(imp_sample_from)
}
stopifnot(is.character(coords))
stopifnot(length(coords) == 2)
mode_rep <- match.arg(mode_rep)
mode_fold <- match.arg(mode_fold)
if ((mode_rep == "future") & (mode_fold == "future")) {
warning("Only parallelization at either the repetition level or the fold level\nis supported. Using mode_fold = 'sequential'.")
mode_fold <- "sequential"
}
mode_dist <- mode_rep
# Check if user is trying to bypass the normal mechanism for
# generating training and test data sets and for passing formulas:
if (any(names(model_args) == "formula")) {
stop("'model_args' cannot have a 'formula' element")
}
if (any(names(model_args) == "data")) {
stop("'model_args' cannot have a 'data' element")
}
if (any(names(pred_args) == "object")) {
stop("'pred_args' cannot have an 'object' element:\n
this will be generated by 'sperrorest'")
}
if (any(names(pred_args) == "newdata")) {
stop("'pred_args' cannot have a 'newdata' element:\n
this will be generated by 'sperrorest'")
}
# account for tibbles as input
if (any(class(data) == "tbl")) {
data <- as.data.frame(data)
}
# Name of response variable:
response <- all.vars(formula)[1]
if (verbose >= 1)
cat(date(), "Creating resampling object...\n")
smp_args$data <- data
smp_args$coords <- coords
resamp <- do.call(smp_fun, args = smp_args)
if (distance) {
if (verbose >= 1)
cat(date(), "Adding distance information to resampling object...\n")
resamp <- add.distance(object = resamp, data = data,
coords = coords, fun = mean,
mode = mode_dist[1])
if (verbose >= 3) {
cat("\n-----------------------------\nResampling object:",
"\n-----------------------------\n")
print(resamp)
cat("\n-----------------------------\n")
}
}
res <- lapply(resamp, unclass)
class(res) <- "sperroresterror"
pooled_error <- NULL
### Permutation-based variable importance assessment (optional):
impo <- NULL
if (importance) {
# Importance of which variables:
if (is.null(imp_variables)) {
imp_variables <- all.vars(formula)[-1]
# imp_variables <- strsplit(as.character(formula)[3], " + ",
# fixed = TRUE)[[1]]
}
if (length(imp_variables) == 0) {
importance <- FALSE
warning("importance is TRUE, but there are no predictors,\n",
"or no predictors have been selected; using importance = FALSE.")
}
}
if (importance) {
# Dummy data structure that will later be populated with the results:
impo <- resamp
# Create a template that will contain results of variable importance
# assessment:
imp_one_rep <- as.list(rep(NA, length(imp_variables)))
names(imp_one_rep) <- imp_variables
tmp <- as.list(rep(NA, imp_permutations))
names(tmp) <- as.character(seq_len(imp_permutations))
for (vnm in imp_variables) {
imp_one_rep[[vnm]] <- tmp
}
rm(tmp)
}
# runreps call Sun Apr 9 13:28:31 2017 ------------------------------
# mode = "future" Sun May 21 12:04:55 2017 -----------------------------
if (verbose >= 1)
cat(date(), "Running the model assessment...\n")
if (mode_rep == "sequential") {
my_res <- lapply(seq_along(resamp), function(x) {
runreps(
current_sample = resamp[[x]],
data = data,
formula = formula,
response = response,
do_gc = do_gc,
imp_one_rep = imp_one_rep,
pred_fun = pred_fun,
model_args = model_args,
model_fun = model_fun,
imp_permutations = imp_permutations,
imp_variables = imp_variables,
imp_sample_from = imp_sample_from,
importance = importance,
current_res = current_res,
pred_args = pred_args,
coords = coords,
progress = progress,
mode_fold = mode_fold,
pooled_obs_train = pooled_obs_train,
train_fun = train_fun,
train_param = train_param,
test_fun = test_fun,
test_param = test_param,
pooled_obs_test = pooled_obs_test,
err_fun = err_fun,
i = x
)
}
)
} else if (mode_rep == "future") {
my_res <- future.apply::future_lapply(seq_along(resamp), function(x) {
runreps(
current_sample = resamp[[x]],
data = data,
formula = formula,
response = response,
do_gc = do_gc,
imp_one_rep = imp_one_rep,
pred_fun = pred_fun,
model_args = model_args,
model_fun = model_fun,
imp_permutations = imp_permutations,
imp_variables = imp_variables,
imp_sample_from = imp_sample_from,
importance = importance,
current_res = current_res,
pred_args = pred_args,
coords = coords,
progress = progress,
mode_fold = mode_fold,
pooled_obs_train = pooled_obs_train,
train_fun = train_fun,
train_param = train_param,
test_fun = test_fun,
test_param = test_param,
pooled_obs_test = pooled_obs_test,
err_fun = err_fun,
i = x
)
},
future.seed = TRUE
)
} else if (mode_rep == "loop") {
# for loop as a safety net for debugging purposes:
my_res <- list()
for (i_rep in seq_along(resamp)) {
my_res[[i_rep]] <-
runreps(
current_sample = resamp[[i_rep]],
data = data,
formula = formula,
response = response,
do_gc = do_gc,
imp_one_rep = imp_one_rep,
pred_fun = pred_fun,
model_args = model_args,
model_fun = model_fun,
imp_permutations = imp_permutations,
imp_variables = imp_variables,
imp_sample_from = imp_sample_from,
importance = importance,
current_res = current_res,
pred_args = pred_args,
coords = coords,
progress = progress,
mode_fold = mode_fold,
pooled_obs_train = pooled_obs_train,
train_fun = train_fun,
train_param = train_param,
test_fun = test_fun,
test_param = test_param,
pooled_obs_test = pooled_obs_test,
err_fun = err_fun,
i = i_rep
)
if (verbose >= 3) {
cat("\n-----------------------------\nResults:",
"\n-----------------------------\n")
print(my_res[[i_rep]])
cat("-----------------------------\n\n")
}
}
} else stop("invalid mode_rep")
### format parallel outputs ----
if (verbose >= 1)
cat(date(), "Postprocessing...\n")
# overwrite resamp object with possibly altered resample object from
# runfolds
# this applies if a custom test_fun or train_fun with a sub-resampling
# method is used
if (!is.null(test_fun) | !is.null(train_fun)) {
if (verbose >= 2)
cat(date(), " - Copy possibly altered resampling object...")
for (i in seq_along(resamp)) {
for (j in seq_along(resamp[[i]])) {
# ...was [[1]], which assumes that all repetitions have equal
# number of folds.
resamp[[i]][[j]] <- my_res[[i]][["resampling"]][[j]][[j]]
}
}
}
## 2021-06-21:
## removed NA check; NAs should be handled by
## summary methods...
# check if any rep is NA in all folds and if, remove entry
# this happens e.g. in maxent #nolint
# if (verbose >= 2)
# cat(date(), " - Check NAs...\n")
#
# check_na <- lapply(my_res, function(x) all(is.na(x))) # nolint
# check_na_flat <- unlist(check_na)
#
# if (any(check_na_flat) == TRUE) {
# check_na <- as.numeric(which(lapply(my_res, function(x) {
# all(is.na(x))
# }) ))
#
# my_res <- my_res[-check_na]
#
# }
# assign names to sublists - otherwise `transfer_parallel_output` doesn't work
if (verbose >= 2)
cat(date(), " - Rename sublists...\n")
for (i in seq_along(my_res)) {
names(my_res[[i]]) <- c(
"error", "pooled_error", "importance",
"non-converged-folds"
)
}
# flatten list & calc sum
if (verbose >= 2)
cat(date(), " - Flatten lists...\n")
not_converged_folds <- sum(
unlist(lapply(my_res,
function(x) unlist(x[["non-converged-folds"]]))))
# transfer results of lapply() to respective data objects
if (verbose >= 2)
cat(date(), " - Transfer outputs...\n")
my_res_mod <- transfer_parallel_output(my_res, res, impo, pooled_error) # nolint
pooled_error <- as.data.frame(my_res_mod$pooled_error)
rownames(pooled_error) <- NULL
class(pooled_error) <- "sperrorestreperror"
if (importance) {
impo <- my_res_mod$impo
class(impo) <- "sperrorestimportance"
}
if (benchmark) {
end_time <- Sys.time()
my_bench <- list(
system.info = Sys.info(), t_start = start_time,
t_end = end_time, cpu_cores = future::nbrOfWorkers(),
# was: parallel::detectCores(), which is the number of physically
# available cores, but only nbrOfWorkers() can be used by this process.
runtime_performance = end_time - start_time
)
class(my_bench) <- "sperrorestbenchmark"
} else {
my_bench <- NULL
}
package_version <- packageVersion("sperrorest")
class(package_version) <- "sperrorestpackageversion"
if (verbose >= 1)
cat(date(), "Done.\n")
res <- list(
error_rep = pooled_error, error_fold = my_res_mod$res,
represampling = resamp, importance = impo, benchmark = my_bench,
package_version = package_version
)
class(res) <- "sperrorest"
if (not_converged_folds > 0) {
if (length(smp_args$repetition) > 1) {
smp_args$repetition <- tail(smp_args$repetition, n = 1)
}
# print counter
cat(sprintf(
"%s folds of %s total folds (%s rep * %s folds) caused errors or returned NA (e.g., did not converge).", # nolint
not_converged_folds, smp_args$repetition * smp_args$nfold,
smp_args$repetition, smp_args$nfold
))
}
res
}
|
c79d3277dd036e765197c768eeb25295574eb3b1
|
5757fcdbcf06bec35c9c72fb07a5523f99f137f2
|
/R/get-targets.r
|
1ab4444246ac6ffb5914741b69a80c9447a0845d
|
[] |
no_license
|
nrminor/ebird-target-map
|
080eff43ca5d309e43f18faa357b9e7d52d70938
|
a4d121d8afc4fd6f2633d263f79afcc9a17b7a94
|
refs/heads/master
| 2023-07-06T08:25:41.105670
| 2019-03-12T15:59:05
| 2019-03-12T15:59:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 469
|
r
|
get-targets.r
|
get_targets <- function(life_list, region_list, freq_thresh, period) {
if (period > 0) {
region_list <- dplyr::filter(region_list, month == period)
}
dplyr::group_by(region_list, region_code, species_code) %>%
dplyr::summarise(frequency = mean(frequency)) %>%
dplyr::ungroup() %>%
dplyr::filter(frequency >= freq_thresh) %>%
dplyr::mutate(seen = (species_code %in% life_list)) %>%
dplyr::arrange(region_code, dplyr::desc(frequency))
}
|
e10854fd721b5d1ce3d76ec319cbed1165b146fd
|
fb63f02100ac34f6d050e22d2028904e1c7dd462
|
/R/RIT.R
|
8ebd21d02a7a9995a2c3a4f012408c0c6da7aec7
|
[] |
no_license
|
chanzuckerberg/FSInteractX
|
f99a687428ef97f2fb902203fc2f68475e1c398b
|
ea8a9e7996d720a973ad0b2abca2dc01662b6561
|
refs/heads/master
| 2020-04-07T00:24:27.623146
| 2018-11-20T21:38:29
| 2018-11-20T21:38:29
| 157,902,653
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,882
|
r
|
RIT.R
|
RIT <- function(z, z0, branch=5, depth=10L, n_trees=100L, theta0=0.5, theta1=theta0,
min_inter_sz=2L, L=100L, n_cores=1L, output_list=FALSE) {
## check L, branch, depth, t, min_inter_sz, n_cores, output_list
L <- as.integer(L)
branch <- as.double(branch)
depth <- as.integer(depth)
n_trees <- as.integer(n_trees)
theta0 <- as.double(theta0)
theta1 <- as.double(theta1)
min_inter_sz <- as.integer(min_inter_sz)
n_cores <- as.integer(n_cores)
output_list <- as.logical(output_list)
if (L < 1L)
stop("L must be >= 1")
if (branch < 1)
stop("branch must be >= 1")
if (depth <= 1L)
stop("depth must be >= 2")
if (n_trees <= 0L)
stop("n_trees must be >= 0")
if (min_inter_sz < 2L)
stop("min_inter_sz must be >= 2")
if (n_cores <1L)
stop("n_cores must be >= 1")
if(theta0<0 || theta0>1){
stop("theta0 must be between 0 and 1")
}
if(theta1<0 || theta1>1){
stop("theta1 must be between 0 and 1")
}
## check z,z0 and convert to suitable data types for 'ExportedcppFunctions.cpp', then carry out RIT
is_2_class <- !missing(z0)
is_sparse <- is(z,"Matrix")
# If z or z0 is sparse then also make the other one sparse, so that they are of the same type
if (is_2_class) {
if (is_sparse && !is(z0,"Matrix")) {
z0 <- Matrix(z0, sparse=TRUE)
}
else if (is(z0,"Matrix") && !is_sparse) {
z <- Matrix(z, sparse=TRUE)
is_sparse <- TRUE
}
}
if (is_sparse) {
if (is_2_class) {
if (nrow(z0) == 0) stop("z0 must have at least one row")
if (ncol(z0) != ncol(z)) stop("z and z0 must have the same number of columns")
z0 <- Matrix::t(z0)
z0 <- list(z0@i, z0@p)
}
if (nrow(z) == 0) stop("z must have at least one row")
z <- t(z)
z <- list(z@i, z@p)
}
## if z, z0 are not sparse matrices, check that they are matrices
if (!is_sparse) {
if (!is.matrix(z)) stop("z must be a matrix")
if (nrow(z) == 0) stop("z must have more than 0 rows")
if (is_2_class) {
if (!is.matrix(z0)) stop("z0 must be a matrix")
if (nrow(z0) == 0) stop("z0 must have more than 0 rows")
if (ncol(z) != ncol(z0)) stop("z and z0 must have the same number of columns")
}
}
# carry out RIT_2class
if (is_2_class) {
output <- RIT_2class(z, z0, L, branch, depth, n_trees, theta0, theta1, min_inter_sz, n_cores, is_sparse)
# reorder output in decreasing prevalence
prev_order <- order(output$Class1$Prevalence1, decreasing=TRUE)
prev_order0 <- order(output$Class0$Prevalence0, decreasing=TRUE)
output$Class1$Prevalence1 <- output$Class1$Prevalence1[prev_order]
output$Class1$Prevalence0 <- output$Class1$Prevalence0[prev_order]
output$Class1$Interactions <- output$Class1$Interactions[prev_order]
output$Class0$Prevalence0 <- output$Class0$Prevalence0[prev_order0]
output$Class0$Prevalence1 <- output$Class0$Prevalence1[prev_order0]
output$Class0$Interactions <- output$Class0$Interactions[prev_order0]
# check whether output should be a list or a data.frame
if(!output_list) {
data1 <- convert.to.data.frame(output$Class1)
data0 <- convert.to.data.frame(output$Class0)
output <- list("Class1"=data1, "Class0"=data0)
}
return(output)
}
# carry out RIT_1class
output<-RIT_1class(z, L, branch, depth, n_trees, min_inter_sz, n_cores, is_sparse)
# reorder output in decreasing prevalence
prev_order <- order(output$Prevalence, decreasing=TRUE)
output$Prevalence <- output$Prevalence[prev_order]
output$Interactions <- output$Interactions[prev_order]
# check whether output should be a list or a data.frame
if (!output_list) {
output<-convert.to.data.frame(output)
}
return(output)
}
|
5ae27923fb37dd41fc3b241fe390393bc20737a3
|
9247d61661679ce45b608f778a7783a9d57b4557
|
/pda.R
|
ed76b54a89d4b9f96296c119334e8ed0658c0491
|
[] |
no_license
|
smartbenben/platform
|
3ab887918d86e201e0d3a2294974cc6134221f43
|
1873260a5c063f733961e73c83e801a05639bff5
|
refs/heads/master
| 2022-12-26T06:29:27.682053
| 2020-10-03T00:06:09
| 2020-10-03T00:06:09
| 290,269,174
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,523
|
r
|
pda.R
|
# Closed-form probability distribution of treatment allocation
# N: number of patients (scalar)
# p: success probabilities (vector)
# priora, priorb: priors for beta(alpha, beta)
pda <- function(N, p, nmin, early.stop=FALSE,
priora=NULL, priorb=NULL) {
# number of treatment arms
k <- length(p)
print(noquote(c("Number of design points", choose(N + k - 1, k - 1))))
# set for first patient
EN <- firstEN(p, nmin, early.stop,
priora, priorb)
p_alloc <- c(1, Ea(buildp(EN)))
# expected allocation
EaN <- c(1, Ea(buildp(EN)))
# we are done if N = 1
if(N == 1)
return(summary(buildp(EN)))
# for each additional patient
for(i in 2 : N) {
# tree set for next patient
EN <- nextEN(EN, p, nmin, early.stop,
priora, priorb)
# build table of expected treatment arm allocation
alloc <- buildp(EN)
p_new <- rowSums(apply(alloc, 1, function(x){
x[length(p)+1]*ERN1(n=unlist(x[1:length(p)]), p,
nmin, early.stop, priora, priorb)
}))
p_alloc <- rbind(p_alloc, c(i,p_new))
EaN <- rbind(EaN,c(i, Ea(alloc)))
# progress report
print(noquote(c("N: ", i)))
# print it out before completion of loop
flush.console()
}
# output, summarized
pda <- summarize(buildp(EN))
colnames(EaN) <- c("N", LETTERS[1 : k])
# expected treatment allocations
pda$EaN <- EaN
pda$p_alloc <- p_alloc
return(pda)
}
|
47e71db8dbaeea9e8cf27588a8dfe87bdecb474c
|
4136fec9aeffcf809ee5f5e2d9dc1c7f8a413ba9
|
/code.R
|
9f798c5ce741599b65f4e191880d54783a691da1
|
[] |
no_license
|
atamagnini/r-project-happiness-report
|
1c4831b09205e058f5c46ffe1858adae70fbb89a
|
86367576ddafecb290d75c4222c7531a24fa2035
|
refs/heads/main
| 2023-08-19T10:30:48.135718
| 2021-09-29T20:19:05
| 2021-09-29T20:19:05
| 411,814,510
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,278
|
r
|
code.R
|
library(tidyverse)
library(highcharter)
library(webshot)
library(htmlwidgets)
dataset <-
read.csv('~/dataset.csv')
#Reshape dataframe
df <- as.data.frame(dataset)
df <- df[, -c(3:5)]
colnames(df)[1] <- 'Country'
glimpse(df)
df$Country <- as.character(as.factor(df$Country))
#Replace value in specific cells
df[df == 'United States'] <- 'United States of America'
#map
map_data <- get_data_from_map(download_map_data("custom/world"))
p <- hcmap(map = 'custom/world', download_map_data = getOption("highcharter.download_map_data"), data = df,
value = 'Ladder', joinBy = c('name', 'Country')) %>%
hc_tooltip(useHTML = TRUE, headerFormat = '<b>World ranking position of<br>',
pointFormat = '{point.name}: N°{point.value}') %>%
hc_title(text = 'World happiness report (2019)',
style = list(fontWeight = 'bold', fontSize = '20px'),
align = 'left') %>%
hc_credits(enabled = TRUE, text = 'Map by Antonela Tamagnini
<br> Source: WHO, Minsitry of Health Department') %>%
hc_mapNavigation(enabled = TRUE) %>%
hc_colorAxis(stops = color_stops(8, c("#0000CD","#8ba9fe","#fee08b","#434348")))
p
saveWidget(widget = p, file = "plot.html")
webshot(url = 'plot.html', file = 'plot.png')
|
8db12f0308292197f80798f4fa846128af327b3d
|
a4bcc17bab2f4ac27c37f8f0027168211a9bc24d
|
/man/subsetTxi.Rd
|
c869093d171c2379aae2ed8d5ed344fdf65f32c5
|
[] |
no_license
|
hcnh174/hlsgr
|
7da6307f535038d00e9030eab61e1a0045fc745a
|
23b0d0cce887eef200c90fccb5a667b4fc1f37cd
|
refs/heads/master
| 2023-04-13T18:44:28.519716
| 2023-04-03T05:38:45
| 2023-04-03T05:38:45
| 262,727,333
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 334
|
rd
|
subsetTxi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rnaseq.R
\name{subsetTxi}
\alias{subsetTxi}
\title{Subset txi data by sample or gene}
\usage{
subsetTxi(txi, samples, include_genes = rownames(txi$counts))
}
\arguments{
\item{include_genes}{}
}
\value{
}
\description{
Subset txi data by sample or gene
}
|
aecd883eec83d90fccb70dd13d335e7fb3b2407f
|
f43ff1e09138649558c2e90a75bd2d4f3cbbdbb6
|
/source/Windows/R-Portable-Win/library/digest/tinytest/test_raw.R
|
902a1e2721ceb04ed701258abaff687a13dbe7ce
|
[
"GPL-2.0-only",
"MIT",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
romanhaa/Cerebro
|
5b2d9371403c52f60341894f84cd0f6a006cc930
|
946ed178c986027d60af6013e63d1fc51ae8b371
|
refs/heads/master
| 2022-12-02T15:49:57.705873
| 2021-11-20T11:47:12
| 2021-11-21T17:09:37
| 164,686,297
| 87
| 23
|
MIT
| 2022-11-10T18:21:44
| 2019-01-08T16:09:59
|
HTML
|
UTF-8
|
R
| false
| false
| 692
|
r
|
test_raw.R
|
## tests for raw output
suppressMessages(library(digest))
current <- hmac("Jefe", 'what do ya want for nothing?', "md5", raw=TRUE)
expected <- as.raw(c(0x75, 0x0c, 0x78, 0x3e, 0x6a, 0xb0, 0xb5, 0x03, 0xea,
0xa8, 0x6e, 0x31, 0x0a, 0x5d, 0xb7, 0x38))
expect_equal(current, expected)
current <- digest("The quick brown fox", algo="sha1", raw=TRUE)
expected <- as.raw(c(0x5f, 0x79, 0x8c, 0xb4, 0xd8, 0x14, 0x4e, 0xec, 0x35,
0xf4, 0xd0, 0x79, 0x3e, 0xf2, 0x1e, 0x55, 0xce, 0xb6, 0xa7, 0x88))
expect_equal(current, expected)
## feed raw to sha1() to test sha1.raw() as well
expect_true(sha1(expected) == "75a2995eeec0fcb5d7fa97c676a37f4e224981a1")
|
e3a4200344ff66bb5814dd81e94b562768599c00
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/bpca/demo/rock.var.rd.R
|
8e4867e95772f5dbf6aebdfb404c2a6cfb086d99
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,097
|
r
|
rock.var.rd.R
|
oask <- devAskNewPage(dev.interactive(orNone=TRUE))
bp <- bpca(rock,
var.rb=TRUE,
var.rd=TRUE)
summary(bp)
# The total variance explained is satisfactory (>= .80)!
plot(bp)
# A more accurate diagnostic
bp$var.rd
# It is possible to observe that the variable 'perm'
# has not a good representation (bpca.2d)!
# Observed correlations:
cor(rock)
# Projected correlations:
bp$var.rb
# Aditional diagnostic
plot(qbpca(rock,
bp))
# This variable reamains as important in a dimension not contemplated
# by the biplot reduction (PC3):
bp$eigenvectors
bp1 <- bpca(rock,
d=3:4)
summary(bp1)
plot(bp1)
# The recommendation, knowing that this variable has a poor
# representaion is:
# 1- Avoid to discut it;
# 2- Consider to incorporate the information with a bpca.3d
bp2 <- bpca(rock,
d=1:3,
var.rb=TRUE,
var.rd=TRUE)
summary(bp2)
plot(bp2) # Static
plot(bp2,
rgl.use=TRUE) # Dinamic
bp2$var.rd # Nice!
# Aditional diagnostic
plot(qbpca(rock,
bp2))
devAskNewPage(oask)
|
ebb5420a95b85651553d43c1ac3105fd6f366c26
|
c750c1991c8d0ed18b174dc72f3014fd35e5bd8c
|
/pkgs/oce/tests/testthat/test_flags.R
|
8c85f3e6e5ec162d4cb8cd85fb97648d0e661997
|
[] |
no_license
|
vaguiar/EDAV_Project_2017
|
4b190e66fe7a6b4078cfe1b875bccd9b5a594b25
|
288ffaeec1cfdd873fe7439c0fa0c46a90a16a4f
|
refs/heads/base
| 2021-01-23T02:39:36.272851
| 2017-05-01T23:21:03
| 2017-05-01T23:21:03
| 86,010,131
| 1
| 0
| null | 2017-05-01T23:43:04
| 2017-03-24T00:21:20
|
HTML
|
UTF-8
|
R
| false
| false
| 3,975
|
r
|
test_flags.R
|
library(oce)
context("Flags")
test_that("[[ and [[<- work with ctd flags", {
data(section)
ctd <- section[["station", 100]]
expect_equal(c(2,2,2,2,2,3), ctd[["salinityFlag"]][1:6])
ctd[["salinity"]][2] <- -999
ctd[["salinityFlag"]] <- ifelse(ctd[["salinity"]] < 0, 3, ctd[["salinityFlag"]])
expect_equal(c(2,3,2,2,2,3), ctd[["salinityFlag"]][1:6])
ctd[["salinity"]] <- ifelse(ctd[["salinityFlag"]]!=2, NA, ctd[["salinity"]])
expect_equal(is.na(ctd[["salinity"]][1:6]), c(FALSE, TRUE, FALSE, FALSE, FALSE, TRUE))
})
test_that("handleFLags works with ctd data", {
data(section)
ctd <- section[["station", 100]]
## this stn has a few points with salinityFlag==3
ctdNew <- handleFlags(ctd, flags=list(salinity=c(1, 3:9)))
##cat("ctd salinity: orig had", sum(is.na(ctd[['salinity']])), "NA values; new has",
## sum(is.na(ctdNew[['salinity']])), "\n")
expect_equal(sum(is.na(ctd[["salinity"]])), 0)
nbad <- sum(ctd[['salinityFlag']] != 2)
expect_equal(2, nbad)
## test replacement via function
f <- function(object) rep(30, length.out=length(object[['salinity']]))
ctdNew2 <- handleFlags(ctd, flags=list(salinity=4:5), actions=list(salinity=f))
expect_equal(sum(ctdNew[['salinity']]==30, na.rm=TRUE),
sum(ctd[['salinityFlag']] == 4 | ctd[['salinityFlag']] == 5, na.rm=TRUE))
})
test_that("handleFLags works with the built-in argo dataset", {
data(argo)
argoNew <- handleFlags(argo, flags=list(salinity=4:5))
## Test a few that are identified by printing some values
## for argo[["salinityFlag"]].
expect_true(is.na(argoNew[["salinity"]][13, 2]))
expect_true(is.na(argoNew[["salinity"]][53, 8]))
## Test whether data with salinity flag of 4 get changed to NA
expect_true(all(is.na(argoNew[["salinity"]][4==argo[["salinityFlag"]]])))
expect_true(!all(is.na(argoNew[["salinity"]][1==argo[["salinityFlag"]]])))
## Similar for temperature. First, check that it is *not* NA, with
## the call to handleFlags() above, which was restricted to salinity.
expect_true(!is.na(argoNew[["temperature"]][10, 2]))
## Now, handle *all* the flags, and check temperature again, and also salinity.
argoNew2 <- handleFlags(argo)
expect_true(is.na(argoNew2[["temperature"]][10, 2]))
expect_true(all(is.na(argoNew2[["temperature"]][4==argo[["temperatureFlag"]]])))
# Tests of overall numbers
expect_equal(sum(is.na(argo[["salinity"]])), 90)
expect_equal(sum(is.na(argoNew[["salinity"]])), 110)
## test replacement via function
f <- function(object) rep(30, length.out=length(object[['salinity']]))
argoNew3 <- handleFlags(argo, flags=list(salinity=4:5), actions=list(salinity=f))
expect_equal(sum(argoNew3[['salinity']]==30, na.rm=TRUE),
sum(argo[['salinityFlag']] == 4 | argo[['salinityFlag']] == 5, na.rm=TRUE))
})
test_that("handleFLags works with the built-in section dataset", {
data(section)
SECTION <- handleFlags(section)
## Inspection reveals that salinity are triggered in the first CTD entry, i.e.
## the station named "3" in this dataset.
## The default for `handleFlags,ctd-method` is the WOCE standard, with 2=good, 3=bad, ...
stn1 <- section[["station", 1]]
STN1 <- SECTION[["station", 1]]
expect_equal(c(2, 3, 3, 2, 2), stn1[["salinityFlag"]])
ok <- which(2 == stn1[["salinityFlag"]])
expect_equal(stn1[["salinity"]][ok], STN1[["salinity"]][ok])
replace <- which(2 != stn1[["salinityFlag"]])
expect_equal(stn1[["salinityBottle"]][replace], STN1[["salinity"]][replace])
})
|
85f06b4b8f6c01720f539cf172a3ba1680978a74
|
1b874124f7a4492b23028ebb7878fb6db7ec6ccb
|
/man/gene_pos_counts.Rd
|
748409ed636ea8514869d3ac0c95025936923a77
|
[
"MIT"
] |
permissive
|
sariya/GARCOM
|
c149c40483742d68c72be54311e0df1dace68bb9
|
53ad17a654c71ddbc6aacbf9065d2b736c3855fd
|
refs/heads/master
| 2023-04-11T21:41:49.020295
| 2022-12-23T11:37:19
| 2022-12-23T11:37:19
| 290,058,010
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,441
|
rd
|
gene_pos_counts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gene_pos_counts.R
\name{gene_pos_counts}
\alias{gene_pos_counts}
\title{gene position counts}
\usage{
gene_pos_counts(dt_gen,dt_snp,dt_gene,keep_indiv=NULL,
extract_SNP=NULL,filter_gene=NULL,
impute_missing=FALSE,impute_method="mean")
}
\arguments{
\item{dt_gen}{a dataframe for genetic data that follows PLINK format (.raw)}
\item{dt_snp}{a dataframe for SNP information with SNP BP as column names.}
\item{dt_gene}{a dataframe for gene boundaries with CHR START END GENE as column names. Where CHR should be integer 1-22. START and END column should be integer. GENE column contains gene names}
\item{keep_indiv}{an option to specify individuals to retain. Mutation counts will be provided for individuals provided in the list only. Default is all individuals.}
\item{extract_SNP}{an option to specify SNPs for which mutation counts are needed. Mutation counts will be provided for SNPs included in the list only. Default is all SNPs.}
\item{filter_gene}{an option to filter in Genes. Mutation counts will be provided for genes included in the list only. Default is all genes.}
\item{impute_missing}{an option to impute missing genotypes. Default is FALSE.}
\item{impute_method}{an option to specify method to specify imptuation method. Default method is impute to the mean. Alternatively imputation can be carried out by median. Function accepts method in quotes: "mean" or "median". Data are rounded to the second decimal places (e.g. 0.1234 will become 0.12.).}
}
\value{
Returns an object of data.table class as an output with allelic gene counts within each sample where each row corresponds to gene and column to individual IDs from column second. The first column contains gene names.
}
\description{
Function returns matrix with allelic counts per gene per individual for SNP and gene coordinates as inputs
}
\details{
Inputs needed are: recoded genetic data formatted in PLINK format, SNP name with BP (position) and gene name with START and END position. The first six columns of the input genetic data follow standard PLINK .raw format. Column names as FID, IID, PAT, MAT, SEX and PHENOTYPE followed by SNP information as recoded by the PLINK software. The function returns allelic counts per gene per sample (where each row represents a gene and each column represents an individual starting with the second column where first column contains gene information).
}
\examples{
#Package provides sample data that are loaded with package loading.
#not RUN
data(recodedgen) #PLINK raw formatted data of 10 individiduals with 10 SNPs
data(genecoord) #gene coordinates with START, END, CHR and GENE names.
#Five genes with start and end genomic coordinates
data(snppos) #SNP and BP column names with SNP names and SNP genomic location in BP.
#10 SNPs with genomic location
gene_pos_counts(recodedgen, snppos, genecoord) #run the function
#subset individuals
gene_pos_counts(recodedgen, snppos, genecoord,keep_indiv=c("IID_sample2","IID_sample4"))
#subset genes
gene_pos_counts(recodedgen,snppos,genecoord,filter_gene=c("GENE1","GENE2"))
#subset genes and individual iids
gene_pos_counts(recodedgen,snppos,genecoord,filter_gene=c("GENE1","GENE2"),
keep_indiv=c("IID_sample10","IID_sample4"))
##impute by mean
gene_pos_counts(recodedgen,snppos,genecoord,impute_missing=TRUE,impute_method="mean")
#end not RUN
}
\author{
Sanjeev Sariya
}
|
6ed72734d4b385a36f47cc388a8c42a4bf10b350
|
aff3c6907f74ee86a8661cf9280b3049c60e02cc
|
/man/addIntercept.Rd
|
9cf7de5c2455e82d61a3eedf92e19dcf3cd9d04a
|
[] |
no_license
|
josherrickson/pbph
|
1c701e07928c84b70f62bc31b43fbd3f45bb2c93
|
2f322d46af1c2f1e8c1431cfc7a2af3f27d135d2
|
refs/heads/main
| 2023-06-23T04:10:03.698604
| 2023-06-07T13:31:25
| 2023-06-07T13:31:25
| 42,535,789
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 696
|
rd
|
addIntercept.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{addIntercept}
\alias{addIntercept}
\title{Internal function to add a column for the intercept to a matrix.}
\usage{
addIntercept(x)
}
\arguments{
\item{x}{A \code{matrix} or \code{data.frame}.}
}
\value{
A \code{matrix} (if \code{x} is a \code{matrix}) or
\code{data.frame} (if \code{x} is a \code{data.frame})
guaranteed to have an intercept column.
}
\description{
Given a \code{matrix} or \code{data.frame} of size nxp, returns an
nx(p+1) object of the same type where the first column is an
intercept if the intercept is not already in the object (defined
as a first column being all 1's).
}
|
29759323c8422fff8c2d1c65cb52a2cbe1df5c56
|
9ad7f8213adb1064d4b95e240a36572fdd413dbe
|
/GeraAlerta/fun/pop.r
|
bba78a02fdfaf9adccd0bc6412c7ce541073443f
|
[] |
no_license
|
claudia-codeco/AlertaDengueAnalise
|
5bb2e0a05fc54cf88652df8bc22238d2d7ac3fc7
|
a39d8df5ce95e0dbd5881b1511935502a3585a37
|
refs/heads/master
| 2021-01-16T20:39:26.028225
| 2015-10-10T20:31:03
| 2015-10-10T20:31:03
| 22,836,135
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 320
|
r
|
pop.r
|
d<- read.csv(file="pop.csv")
head(d)
class(d$pop)
b <- read.csv(file="bairro2AP.csv")
names(b)[2]<-"BAIRRO"
head(b)
bd <-merge(d,b,all.d=TRUE)
head(bd)
class(bd$pop)
popap<-aggregate(bd$pop,by=list(bd$APS),FUN=sum)
names(popap)<-c("APS","Pop2010")
write.csv(popap,file="populacao2010porAPS_RJ.csv",row.names=FALSE)
|
fdc61fb57a9a427df88536aa20354d97708d6d0a
|
bb2cf94c9925c755e8d838e5e7547c3d82ca48bb
|
/RSI NORMAL.R
|
2ef1a49ec44bc7ee393ccd07474a30120714cf9f
|
[] |
no_license
|
VivekVerma25/codingground
|
8a6706dfa3293a577e3594ec5ddbad7bc02268bf
|
7387e33d8cd142982e8ba0f44d1251f3523c96e9
|
refs/heads/master
| 2021-01-22T06:11:56.225998
| 2017-06-16T17:24:06
| 2017-06-16T17:24:06
| 92,528,840
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,467
|
r
|
RSI NORMAL.R
|
library(zoo)
library(TTR)
# library(timeSeries)
# library(slam)
# library(quantmod)
# library(quantreg)
# library(allelematch)
path_input<-"C:/Users/YashV/Desktop/Framework Data/"
path_output<-"C:/Users/YashV/Desktop/Framework Data/"
stck_lvl<-data.frame(read.csv(paste(path_input,"Roll adjusted returns.csv",sep="")))
stck_lvl$Returns<-as.Date(stck_lvl$Returns,"%d/%m/%Y")
tc<-(0) # This is one side trnsaction cost
minp=2 #min of 5 stocks should be there or else no trade
fa=1 #whether fixed allocation or not
alloc=1
lookback_period=c(14,21)
rsi_entry_long = c(70,80) #entry for long
rsi_entry_short = c(30,20) #entry for short
for(li in 1:length(lookback_period))
{
for(lo in 1:length(rsi_entry_long)){
pric<-read.csv(paste(path_input,"PX_LAST.csv",sep="")) #Closing prices CAsh
clafic<-data.frame(read.csv(paste(path_input,"Nifty 50.csv",sep="")))[,-1] #Universe
stck_lvl<-data.frame(read.csv(paste(path_input,"Roll adjusted returns.csv",sep="")))
stck_lvl$Returns<-as.Date(stck_lvl$Returns,"%d/%m/%Y")
names<-colnames(pric)
dt<-pric[(lookback_period[li]+1):nrow(pric),1] #Dates from where NAs are not present
pric<-pric[,-1]
#calculating rsi for each stock
for(j in 1:ncol(pric)){
if(j==1) {
roll<-RSI(pric[,j],lookback_period[li])
rsi<-data.frame(roll)
}
else{
roll<-RSI(pric[,j],lookback_period[li])
rsi<-cbind(rsi,roll)
}
}
rsi<-rsi[(lookback_period[li]+1):nrow(rsi),] #from where NAs arent present
fin<-matrix(0,nrow(rsi),ncol(rsi)) #matrix of long and short signal
rsi[rsi==0] <- NA
clafic<-clafic[(lookback_period[li]+1):nrow(clafic),]
rsi[clafic==0]<-NA #removing stocks which arent in universe
#matrix of long and short signal
fin[1,which(as.matrix(rsi[1,])>rsi_entry_long[lo])]<-1
fin[2,which(as.matrix(rsi[2,])>rsi_entry_long[lo])]<-1
for(i in 3:nrow(fin)){
fin[i,which(fin[i-1,]==0 & fin[i-2,]==0 & as.matrix(rsi[i,])>rsi_entry_long[lo])]<-1
}
fin[1,which(as.matrix(rsi[1,])<(rsi_entry_short[lo]))]<--1
fin[2,which(as.matrix(rsi[2,])<(rsi_entry_short[lo]))]<--1
for(i in 3:nrow(fin)){
fin[i,which(fin[i-1,]==0 & fin[i-2,]==0 & as.matrix(rsi[i,])<(rsi_entry_short[lo]))]<--1
}
new<-data.frame(DATE = dt,fin)
write.csv(new,"long short signal.csv",row.names = FALSE)
pric<-data.frame(read.csv(paste(path_input,"PX_LAST.csv",sep="")))[,-1]
pricn<-pric[(lookback_period[li]+1):nrow(pric),]
pricn[is.na(pricn)] =0
pricn[which(!is.finite(as.matrix(pricn)))] <- 0
#Code for entry exit with stoploss
st<-matrix(0,dim(as.matrix(pricn))[1],dim(as.matrix(pricn))[2])
high<-matrix(0,dim(as.matrix(pricn))[1],dim(as.matrix(pricn))[2])
entry_l<-matrix(0,dim(as.matrix(pricn))[1],dim(as.matrix(pricn))[2])
st_short<-matrix(0,dim(as.matrix(pricn))[1],dim(as.matrix(pricn))[2])
low<-matrix(0,dim(as.matrix(pricn))[1],dim(as.matrix(pricn))[2])
entry_s<-matrix(0,dim(as.matrix(pricn))[1],dim(as.matrix(pricn))[2])
test_shortprice<-as.matrix(pricn)
testprice<-as.matrix(pricn)
if(minp!=0) netp<-0
if(sum(fin[1,]==1 | fin[1,]==-1)>=minp){
entry_l[1,which(fin[1,]==1)]=1
entry_s[1,which(fin[1,]==(-1))]=(-1)
low[1,which(fin[1,]==(-1))]<-test_shortprice[1,which(fin[1,]==(-1))]
high[1,which(fin[1,]==(1))]<-testprice[1,which(fin[1,]==(1))]
if(sum(entry_l[1,]==1 | fin[1,]==(-1) )>0)
{
st[1,which(entry_l[1,]==1)]<-0.97*testprice[1,which(entry_l[1,]==1)]
st_short[1,which(fin[1,]==(-1))]<-1.03*test_shortprice[1,which(fin[1,]==(-1))]
}
netp<-netp+sum(fin[1,]==1)+sum(fin[1,]==-1)
}
for(i in 2:nrow(testprice)){
if(sum(entry_l[i-1,]==1 | entry_s[i-1,]==-1)>0){
j<-sum(entry_l[i-1,]==1)
while(j!=0){
c<-which(entry_l[i-1,]==1)
if(testprice[i,c[j]]>high[i-1,c[j]]) {
st[i,c[j]]<-0.97* (testprice[i,c[j]])
high[i,c[j]]<- (testprice[i,c[j]])
}
else {
st[i,c[j]]<-st[i-1,c[j]]
high[i,c[j]]<-high[i-1,c[j]]
}
if(testprice[i,c[j]]<st[i,c[j]]){ #exit criteria
entry_l[i,c[j]]=0
netp=netp-1
}
else{
entry_l[i,c[j]]=1
}
j=j-1
}
j<-sum(entry_s[i-1,]==-1)
while(j!=0){
c<-which(entry_s[i-1,]==-1)
if(test_shortprice[i,c[j]]<low[i-1,c[j]]) {
st_short[i,c[j]]<-1.03* (test_shortprice[i,c[j]])
low[i,c[j]]<- (test_shortprice[i,c[j]])
}
else {
st_short[i,c[j]]<-st_short[i-1,c[j]]
low[i,c[j]]<-low[i-1,c[j]]
}
if(test_shortprice[i,c[j]]>st_short[i,c[j]]){ #exit criteria
entry_s[i,c[j]]=0
netp=netp-1
}
else{
entry_s[i,c[j]]=-1
}
j=j-1
}
}
if((sum(fin[i,]==1 | fin[i,]==-1)+netp)>=minp) {
entry_l[i,which(fin[i,]==1)]=1
st[i,which(entry_l[i-1,]==0 & entry_l[i,]==1)]<-0.97*(testprice[i,which(entry_l[i-1,]==0 & entry_l[i,]==1)])
high[i,which(entry_l[i-1,]==0 & entry_l[i,]==1)]<-testprice[i,which(entry_l[i-1,]==0 & entry_l[i,]==1)]
entry_s[i,which(fin[i,]==-1)]=-1
st_short[i,which(entry_s[i-1,]==0 & entry_s[i,]==-1)]<-1.03*(test_shortprice[i,which(entry_s[i-1,]==0 & entry_s[i,]==-1)])
low[i,which(entry_s[i-1,]==0 & entry_s[i,]==-1)]<-test_shortprice[i,which(entry_s[i-1,]==0 & entry_s[i,]==-1)]
netp<-netp+(sum(fin[i,]==1 & entry_l[i-1,]==0)) +(sum(fin[i,]==-1 & entry_s[i-1,]==0))
}
}
write.csv(entry_l,paste(path_output,"entry_l.csv"),row.names = FALSE)
write.csv(entry_s,paste(path_output,"entry_s.csv"),row.names = FALSE)
#Final Matrix with both L & S
entry<-matrix(0,dim(as.matrix(pricn))[1],dim(as.matrix(pricn))[2])
for(i in 1:nrow(entry)){
if(i==1){
entry[i,which(entry_l[i,]==1)]=1
entry[i,which(entry_s[i,]==-1)]=-1
}
else{
entry[i,which(entry_l[i,]==1 & entry_s[i,]==0 )]=1
# entry[i,which(entry_l[i,]==1 & entry_s[i,]==0 & entry_s[i-1,]==-1 & fin[i,]==1)]=1
entry[i,which(entry_s[i,]==-1 & entry_l[i,]==0)]=-1
# entry[i,which(entry_s[i,]==-1 & entry_l[i,]==0 & entry_l[i-1,]==1 & fin[i,]==-1)]=-1
entry[i,which(entry_l[i,]==1 & entry_s[i,]==-1 & entry_l[i-1,]==0)]=1
entry[i,which(entry_l[i,]==1 & entry_s[i,]==-1 & entry_s[i-1,]==0)]=-1
entry[i,which(entry_l[i,]==1 & entry_s[i,]==-1 & entry_s[i-1,]==-1 & entry_l[i-1,]==1 )]=entry[i-1,which(entry_l[i,]==1 & entry_s[i,]==-1 & entry_s[i-1,]==-1 & entry_l[i-1,]==1 )]
}
}
names<-colnames(clafic)
colnames(entry)<-names
newentry<-data.frame(DATE =dt,entry)
names<-colnames(newentry)
write.csv(newentry,paste(path_output,"entry.csv"),row.names = FALSE) #Entry and exit matrix
start_date<-matrix(0,dim(entry)[1],dim(entry)[2])
for(i in 1:nrow(entry)){
if(i==1){
start_date[i,which(entry[i,]==1)]=paste(dt[i])
start_date[i,which(entry[i,]==-1)]=paste(dt[i])
}
else{
start_date[i,which(entry[i,]==1 & entry[i-1,]==0)]=paste(dt[i])
start_date[i,which(entry[i,]==-1 & entry[i-1,]==0)]=paste(dt[i])
}
}
start_date<-data.frame(DATE = dt,start_date)
colnames(start_date)=names
write.csv(start_date,paste(path_output,"start date.csv"),row.names = FALSE)
end_date<-matrix(0,dim(entry)[1],dim(entry)[2])
for(i in 2:nrow(entry)){
end_date[i,which(entry[i,]==0 & entry[i-1,]==1)]=paste(dt[i])
end_date[i,which(entry[i,]==0 & entry[i-1,]==-1)]=paste(dt[i])
}
end_date<-data.frame(DATE = dt,end_date)
colnames(end_date)=names
write.csv(end_date,paste(path_output,"end date.csv"),row.names = FALSE)
LorS<-matrix(0,dim(entry)[1],dim(entry)[2])
for(i in 1:nrow(entry)){
if(i==1){
LorS[i,which(entry[i,]==1)]="L"
LorS[i,which(entry[i,]==-1)]="S"
}
else{
LorS[i,which(entry[i,]==1 & entry[i-1,]==0)]="L"
LorS[i,which(entry[i,]==-1 & entry[i-1,]==0)]="S"
}
}
LorS<-data.frame(DATE = dt,LorS)
colnames(LorS)=names
write.csv(LorS,paste(path_output,"LorS.csv"),row.names = FALSE)
#NetPosition
np<-as.matrix(rowSums(abs(entry)))
np<-data.frame(DATE =dt,np)
write.csv(np,paste(path_output,"net_position_Long_Short.csv"),row.names = FALSE) #netPositions
# newst<-data.frame(DATE=dt,st)
# write.csv(newst,"stop_loss.csv",row.names = FALSE)
#
# newp<-data.frame(DATE=dt,pricn)
# # write.csv(newp,paste(path_output,"prices.csv"),row.names = FALSE)
# #
# stock_names<-matrix(0,nrow = nrow(entry),120)
# for(i in 1:nrow(entry)){
# k=1
# j<-sum(entry[i,]==1)
# while(j!=0){
# c<-which(entry[i,]==1)
# stock_names[i,k]=names[c[j]]
# k=k+1
# j=j-1
# }
# }
# stock_names<-data.frame(DATE = dt,stock_names)
# write.csv(stock_names,paste(path_output,"stock in zpcr.csv"),row.names=FALSE)
#
#
#TO Calculate Returns
entry_final<-entry[-1*dim(entry)[1],]
stck_lvl<-stck_lvl[(lookback_period[li]+1):nrow(stck_lvl),]
stck_lvl_calc<-stck_lvl[-1,-1]
#FIXED ALLOCATION
#ALLOCATION ON EACH STOCK
exposure_long<-as.matrix(rowSums(abs(entry_final)))
for(i in 1:ncol(entry_final)){
entry_final[,i]<-entry_final[,i]/exposure_long
}
entry_final[is.na(entry_final)] =0
entry_final<-as.matrix(entry_final)
entry_final[which(!is.finite(entry_final))] <- 0
exposure_long_og<-as.matrix(rowSums(abs(entry)))
for(i in 1:ncol(entry)){
entry[,i]<-entry[,i]/exposure_long_og
}
entry[is.na(entry)] =0
entry<-as.matrix(entry)
entry[which(!is.finite(entry))] <- 0
write.csv(entry,paste(path_output,"entry_after.csv"),row.names=FALSE)
#For Both Longs And Shorts
#traded Value
tv<-matrix(0,dim(entry)[1],dim(entry)[2])
tv[1,]<-entry[1,]
for(i in 2:nrow(entry)){
tv[i,]=abs(entry[i,]-entry[i-1,])
}
tot_tv<-rowSums(abs(tv))
tot_tv<-data.frame(DATE = dt,Tv = tot_tv)
write.csv(tot_tv,paste(path_output,"GTV each day.csv"),row.names=FALSE)
# write.csv(stck_lvl_calc,paste(path_output,"stcklvlcalc.csv"),row.names=FALSE)
#
fr<-data.frame(entry_final*stck_lvl_calc)
lng_ret_tb<-data.frame(rowSums(entry_final*stck_lvl_calc))
cst_tb<-data.frame(rowSums(abs(entry[-1,]-entry[-1*dim(entry)[1],])*tc))
# alloc<-rowSums(abs(clafic[-1]))
# exposure_long<-as.matrix(rowSums(abs(entry_final)))
tot_ret_tb<-data.frame((lng_ret_tb -cst_tb))
tot_ret_tb[is.na(tot_ret_tb)] =0
tot_ret_tb<-as.matrix(tot_ret_tb)
tot_ret_tb[which(!is.finite(tot_ret_tb))] <- 0
mtm<-tot_ret_tb
mtm<-data.frame(DATE=dt[-1],MTM_Daily = mtm)
name<-c("DATE","MTM")
colnames(mtm)=name
write.csv(mtm,paste(path_output,"MTM.csv"),row.names=FALSE)
ann_mtm<-data.frame(aggregate(mtm[,2],by=list((substr(mtm[,1],7,10))),sum))
ann_tv<-data.frame(aggregate(tot_tv[,2],by=list((substr(tot_tv[,1],7,10))),sum))
ann_mtmtv<-ann_mtm[,2]/ann_tv[,2]
ann_mtmtv<-data.frame(DATE = ann_mtm[,1],MTMTV<-ann_mtmtv)
ret_fin_tb<-data.frame(stck_lvl[2:nrow(stck_lvl),1], tot_ret_tb, lng_ret_tb)
colnames(ret_fin_tb) <- c("Date","Total Ret","Ret without tc")
write.csv(ret_fin_tb,paste(path_output,"Long Only.csv"),row.names=FALSE)
ann_ret_rsi<-data.frame(aggregate(ret_fin_tb[,2],by=list((substr(ret_fin_tb[,1],1,4))),sum))
ann_mean_rsi<-data.frame(aggregate(ret_fin_tb[,(2)],by=list((substr(ret_fin_tb[,1],1,4))),mean))
ann_sd_rsi<-data.frame(aggregate(ret_fin_tb[,(2)],by=list((substr(ret_fin_tb[,1],1,4))),sd))
t1<-ann_mean_rsi[,2]
t2<-ann_sd_rsi[,2]
ann_sharpe_rsi<- data.frame(DATE=ann_mean_rsi[,1],Sharpe = (t1/t2)*sqrt(252))
#calculation Of cumalative returns
cum_ret<-matrix(0,dim(fr)[1],dim(fr)[2]) #ret per day
fr<-as.matrix(fr)
entry_final<-as.matrix(entry_final)
for(i in 1:nrow(fr)){
if(i==1){
cum_ret[i,which(entry_final[i,]!=0)]=fr[i,which(entry_final[i,]!=0)]
}
else{
cum_ret[i,which(entry_final[i-1,]==0 & entry_final[i,]!=0)]=fr[i,which(entry_final[i-1,]==0 & entry_final[i,]!=0)]
c=which(entry_final[i-1,]!=0 & entry_final[i,]!=0)
j=length(c)
if(j>0){
for(k in 1:j){
cum_ret[i,c[k]]=fr[i,c[k]]+cum_ret[i-1,c[k]]
}
}
}
}
cum_ret_dt<-data.frame(DATE = dt[-1],cum_ret)
write.csv(cum_ret_dt,paste(path_output,"cum_returns.csv"),row.names=FALSE)
cum_rt<-matrix(0,length(dt),ncol = ncol(cum_ret))
cum_rt[2:nrow(cum_rt),]=cum_ret
trade_ret<-matrix(0,dim(cum_rt)[1],dim(cum_rt)[2])
for(i in 2:nrow(entry)){
c=which(cum_rt[i,]==0 & cum_rt[i-1,]!=0)
j=length(c)
if(j>0){
for(k in 1:j){
trade_ret[i-1,c[k]]=cum_rt[i-1,c[k]]
}
}
}
trade_ret[nrow(entry),]=cum_rt[nrow(entry),]
trade_ret<-data.frame(DATE = dt,trade_ret)
colnames(trade_ret)<-names
write.csv(trade_ret,paste(path_output,"Trade Ret.csv"),row.names = FALSE)
no_of_pos_trades<-matrix(0,dim(cum_ret)[1],dim(cum_ret)[2])
no_of_neg_trades<-matrix(0,dim(cum_ret)[1],dim(cum_ret)[2])
for(i in 2:nrow(no_of_pos_trades)){
if(i==nrow(no_of_pos_trades)){
no_of_pos_trades[i,which(cum_ret[i,]>0)]=1
no_of_neg_trades[i,which(cum_ret[i,]<0)]=1
}
c=which(cum_ret[i-1,]!=0 & cum_ret[i,]==0)
j=length(c)
if(j>0){
for(k in 1:j){
if(cum_ret[i-1,c[k]]>0) no_of_pos_trades[i-1,c[k]]=1
if(cum_ret[i-1,c[k]]<0) no_of_neg_trades[i-1,c[k]]=1
}
}
}
no_of_trades=no_of_pos_trades+no_of_neg_trades
write.csv(no_of_trades,paste(path_output,"Total trades.csv"),row.names = FALSE)
pos<-data.frame((apply(no_of_pos_trades,1,sum,na.rm=TRUE)))
neg<-data.frame((apply(no_of_neg_trades,1,sum,na.rm=TRUE)))
pos<-data.frame(DATE = dt[-1],pos_No_OF_TRADES = pos[,1])
neg<-data.frame(DATE = dt[-1],neg_No_OF_TRADES = neg[,1])
ann_pos<-data.frame(aggregate(pos[,2],by=list((substr(pos[,1],7,10))),sum))
ann_neg<-data.frame(aggregate(neg[,2],by=list((substr(neg[,1],7,10))),sum))
ann_no_of_trades<-ann_pos[,2]+ann_neg[,2]
ann_no_of_trades<-data.frame(DATE = ann_pos[,1], ann_no_of_trades = ann_no_of_trades)
success_ratio<-ann_pos[,2]/ann_no_of_trades[,2]
success_ratio<-data.frame(DATE = ann_pos[,1], suc_ratio = success_ratio)
avg_pos<-matrix(0,dim(cum_ret)[1],dim(cum_ret)[2])
avg_neg<-matrix(0,dim(cum_ret)[1],dim(cum_ret)[2])
for(i in 2:nrow(avg_pos)){
if(i==nrow(avg_pos)){
avg_pos[i,which(cum_ret[i,]>0)]=cum_ret[i,which(cum_ret[i,]>0)]
avg_neg[i,which(cum_ret[i,]<0)]=cum_ret[i,which(cum_ret[i,]<0)]
}
c=which(cum_ret[i-1,]!=0 & cum_ret[i,]==0)
j=length(c)
if(j>0){
for(k in 1:j){
if(cum_ret[i-1,c[k]]>0) avg_pos[i-1,c[k]]=cum_ret[i-1,c[k]]
else avg_neg[i-1,c[k]]=cum_ret[i-1,c[k]]
}
}
}
write.csv(avg_pos,paste(path_output,"avg_pos.csv"),row.names=FALSE)
write.csv(avg_neg,paste(path_output,"avg_neg.csv"),row.names=FALSE)
avg_pos<-data.frame((apply(avg_pos,1,sum,na.rm=TRUE)))
avg_neg<-data.frame((apply(avg_neg,1,sum,na.rm=TRUE)))
avg_pos<-data.frame(DATE = dt[-1],avg_pos_ret = avg_pos[,1])
avg_neg<-data.frame(DATE = dt[-1],avg_neg_ret = avg_neg[,1])
ann_avg_pos<-data.frame(aggregate(avg_pos[,2],by=list((substr(avg_pos[,1],7,10))),sum))
ann_avg_neg<-data.frame(aggregate(avg_neg[,2],by=list((substr(avg_neg[,1],7,10))),sum))
#Divide total ret pos/total pos
ann_avg_pos<-ann_avg_pos[,2]/ann_pos[,2]
ann_avg_neg<-ann_avg_neg[,2]/ann_neg[,2]
ann_avg_neg<-data.frame(DATE = ann_neg[,1],ann_avg_neg)
ann_avg_pos<-data.frame(DATE = ann_pos[,1],ann_avg_pos)
wl<-abs(ann_avg_pos[,2]/ann_avg_neg[,2])
wl<-data.frame(DATE = ann_avg_pos[,1],Win_to_lose = wl)
#for long side
entry_long<-matrix(0,dim(entry)[1],dim(entry)[2])
for(i in 1:nrow(entry)){
entry_long[i,which(entry[i,]>0)]<-entry[i,which(entry[i,]>0)]
}
write.csv(entry_long,paste(path_output,"entry_long.csv"),row.names=FALSE)
entry_long_final<-entry_long[-1*dim(entry_long)[1],]
fr<-data.frame(entry_long_final*stck_lvl_calc)
lng_ret_tb<-data.frame(rowSums(entry_long_final*stck_lvl_calc))
cst_tb<-data.frame(rowSums(abs(entry_long[-1,]-entry_long[-1*dim(entry_long)[1],])*tc))
# alloc<-rowSums(abs(clafic[-1]))
# exposure_long<-as.matrix(rowSums(abs(entry_long_final)))
tot_ret_tb<-data.frame((lng_ret_tb -cst_tb))
tot_ret_tb[is.na(tot_ret_tb)] =0
tot_ret_tb<-as.matrix(tot_ret_tb)
tot_ret_tb[which(!is.finite(tot_ret_tb))] <- 0
mtm_long<-tot_ret_tb
mtm_long<-data.frame(DATE=dt[-1],mtm_long_Daily = mtm_long)
name<-c("DATE","mtm_long")
colnames(mtm_long)=name
write.csv(mtm_long,paste(path_output,"Long_mtm.csv"),row.names=FALSE)
ann_mtm_long<-data.frame(aggregate(mtm_long[,2],by=list((substr(mtm_long[,1],7,10))),sum))
ann_tv<-data.frame(aggregate(tot_tv[,2],by=list((substr(tot_tv[,1],7,10))),sum))
ann_mtm_longtv<-ann_mtm_long[,2]/ann_tv[,2]
ann_mtm_longtv<-data.frame(DATE = ann_mtm_long[,1],mtm_longTV<-ann_mtm_longtv)
ann_ret_rsi_long<-data.frame(aggregate(ret_fin_tb[,2],by=list((substr(ret_fin_tb[,1],1,4))),sum))
ann_mean_rsi_long<-data.frame(aggregate(ret_fin_tb[,(2)],by=list((substr(ret_fin_tb[,1],1,4))),mean))
ann_sd_rsi_long<-data.frame(aggregate(ret_fin_tb[,(2)],by=list((substr(ret_fin_tb[,1],1,4))),sd))
t1<-ann_mean_rsi_long[,2]
t2<-ann_sd_rsi_long[,2]
ann_sharpe_rsi_long<- data.frame(DATE=ann_mean_rsi_long[,1],Sharpe = (t1/t2)*sqrt(252))
cum_ret<-matrix(0,dim(fr)[1],dim(fr)[2]) #ret per day
fr<-as.matrix(fr)
entry_long_final<-as.matrix(entry_long_final)
for(i in 1:nrow(fr)){
if(i==1){
cum_ret[i,which(entry_long_final[i,]!=0)]=fr[i,which(entry_long_final[i,]!=0)]
}
else{
cum_ret[i,which(entry_long_final[i-1,]==0 & entry_long_final[i,]!=0)]=fr[i,which(entry_long_final[i-1,]==0 & entry_long_final[i,]!=0)]
c=which(entry_long_final[i-1,]!=0 & entry_long_final[i,]!=0)
j=length(c)
if(j>0){
for(k in 1:j){
cum_ret[i,c[k]]=fr[i,c[k]]+cum_ret[i-1,c[k]]
}
}
}
}
no_of_pos_trades<-matrix(0,dim(cum_ret)[1],dim(cum_ret)[2])
no_of_neg_trades<-matrix(0,dim(cum_ret)[1],dim(cum_ret)[2])
for(i in 2:nrow(no_of_pos_trades)){
if(i==nrow(no_of_pos_trades)){
no_of_pos_trades[i,which(cum_ret[i,]>0)]=1
no_of_neg_trades[i,which(cum_ret[i,]<0)]=1
y=i
}
c=which(cum_ret[i-1,]!=0 & cum_ret[i,]==0)
j=length(c)
if(j>0){
for(k in 1:j){
if(cum_ret[i-1,c[k]]>0) no_of_pos_trades[i-1,c[k]]=1
if(cum_ret[i-1,c[k]]<0) no_of_neg_trades[i-1,c[k]]=1
}
}
}
no_of_trades=no_of_pos_trades+no_of_neg_trades
write.csv(no_of_trades,paste(path_output,"Total long trades.csv"),row.names = FALSE)
pos<-data.frame((apply(no_of_pos_trades,1,sum,na.rm=TRUE)))
neg<-data.frame((apply(no_of_neg_trades,1,sum,na.rm=TRUE)))
pos<-data.frame(DATE = dt[-1],pos_No_OF_TRADES = pos[,1])
neg<-data.frame(DATE = dt[-1],neg_No_OF_TRADES = neg[,1])
ann_pos_long<-data.frame(aggregate(pos[,2],by=list((substr(pos[,1],7,10))),sum))
ann_neg_long<-data.frame(aggregate(neg[,2],by=list((substr(neg[,1],7,10))),sum))
ann_no_of_trades_long<-ann_pos_long[,2]+ann_neg_long[,2]
ann_no_of_trades_long<-data.frame(DATE = ann_pos_long[,1], ann_no_of_trades_long = ann_no_of_trades_long)
#for short side
entry_short<-matrix(0,dim(entry)[1],dim(entry)[2])
for(i in 1:nrow(entry)){
entry_short[i,which(entry[i,]<0)]<-entry[i,which(entry[i,]<0)]
}
entry_short_final<-entry_short[-1*dim(entry_short)[1],]
write.csv(entry_short,paste(path_output,"entry_short_after.csv"),row.names=FALSE)
fr<-data.frame(entry_short_final*stck_lvl_calc)
lng_ret_tb<-data.frame(rowSums(entry_short_final*stck_lvl_calc))
cst_tb<-data.frame(rowSums(abs(entry_short[-1,]-entry_short[-1*dim(entry_short)[1],])*tc))
# alloc<-rowSums(abs(clafic[-1]))
# exposure_short<-as.matrix(rowSums(abs(entry_short_final)))
tot_ret_tb<-data.frame((lng_ret_tb -cst_tb))
tot_ret_tb[is.na(tot_ret_tb)] =0
tot_ret_tb<-as.matrix(tot_ret_tb)
tot_ret_tb[which(!is.finite(tot_ret_tb))] <- 0
mtm_short<-tot_ret_tb
mtm_short<-data.frame(DATE=dt[-1],mtm_short_Daily = mtm_short)
name<-c("DATE","mtm_short")
colnames(mtm_short)=name
write.csv(mtm_short,paste(path_output,"Long_mtm.csv"),row.names=FALSE)
ann_mtm_short<-data.frame(aggregate(mtm_short[,2],by=list((substr(mtm_short[,1],7,10))),sum))
ann_tv<-data.frame(aggregate(tot_tv[,2],by=list((substr(tot_tv[,1],7,10))),sum))
ann_mtm_shorttv<-ann_mtm_short[,2]/ann_tv[,2]
ann_mtm_shorttv<-data.frame(DATE = ann_mtm_short[,1],mtm_shortTV<-ann_mtm_shorttv)
ann_ret_rsi_short<-data.frame(aggregate(ret_fin_tb[,2],by=list((substr(ret_fin_tb[,1],1,4))),sum))
ann_mean_rsi_short<-data.frame(aggregate(ret_fin_tb[,(2)],by=list((substr(ret_fin_tb[,1],1,4))),mean))
ann_sd_rsi_short<-data.frame(aggregate(ret_fin_tb[,(2)],by=list((substr(ret_fin_tb[,1],1,4))),sd))
t1<-ann_mean_rsi_short[,2]
t2<-ann_sd_rsi_short[,2]
ann_sharpe_rsi_short<- data.frame(DATE=ann_mean_rsi_short[,1],Sharpe = (t1/t2)*sqrt(252))
cum_ret<-matrix(0,dim(fr)[1],dim(fr)[2]) #ret per day
fr<-as.matrix(fr)
entry_short_final<-as.matrix(entry_short_final)
for(i in 1:nrow(fr)){
if(i==1){
cum_ret[i,which(entry_short_final[i,]!=0)]=fr[i,which(entry_short_final[i,]!=0)]
}
else{
cum_ret[i,which(entry_short_final[i-1,]==0 & entry_short_final[i,]!=0)]=fr[i,which(entry_short_final[i-1,]==0 & entry_short_final[i,]!=0)]
c=which(entry_short_final[i-1,]!=0 & entry_short_final[i,]!=0)
j=length(c)
if(j>0){
for(k in 1:j){
cum_ret[i,c[k]]=fr[i,c[k]]+cum_ret[i-1,c[k]]
}
}
}
}
no_of_pos_trades<-matrix(0,dim(cum_ret)[1],dim(cum_ret)[2])
no_of_neg_trades<-matrix(0,dim(cum_ret)[1],dim(cum_ret)[2])
for(i in 2:nrow(no_of_pos_trades)){
if(i==nrow(no_of_pos_trades)){
no_of_pos_trades[i,which(cum_ret[i,]>0)]=1
no_of_neg_trades[i,which(cum_ret[i,]<0)]=1
y=i
}
c=which(cum_ret[i-1,]!=0 & cum_ret[i,]==0)
j=length(c)
if(j>0){
for(k in 1:j){
if(cum_ret[i-1,c[k]]>0) no_of_pos_trades[i-1,c[k]]=1
if(cum_ret[i-1,c[k]]<0) no_of_neg_trades[i-1,c[k]]=1
}
}
}
no_of_trades=no_of_pos_trades+no_of_neg_trades
write.csv(no_of_trades,paste(path_output,"Total short trades.csv"),row.names = FALSE)
pos<-data.frame((apply(no_of_pos_trades,1,sum,na.rm=TRUE)))
neg<-data.frame((apply(no_of_neg_trades,1,sum,na.rm=TRUE)))
pos<-data.frame(DATE = dt[-1],pos_No_OF_TRADES = pos[,1])
neg<-data.frame(DATE = dt[-1],neg_No_OF_TRADES = neg[,1])
ann_pos_short<-data.frame(aggregate(pos[,2],by=list((substr(pos[,1],7,10))),sum))
ann_neg_short<-data.frame(aggregate(neg[,2],by=list((substr(neg[,1],7,10))),sum))
ann_no_of_trades_short<-ann_pos_short[,2]+ann_neg_short[,2]
ann_no_of_trades_short<-data.frame(DATE = ann_pos_short[,1], ann_no_of_trades_short = ann_no_of_trades_short)
final_res<-data.frame(Date=ann_avg_pos[,1],Number_Of_trades=ann_no_of_trades[,2],No_of_pos_Trades = ann_pos[,2], No_of_Neg_Trades = ann_neg[,2], Average_pos_Trade=ann_avg_pos[,2], Avg_Neg_trade = ann_avg_neg[,2], Success_Ratio=success_ratio[,2], Win_to_Lose = wl[,2], Total_mtm=ann_mtm[,2],Tot_tv=ann_tv[,2],Mtm_tv=ann_mtmtv[,2], Sharpe= ann_sharpe_rsi[,2],No_of_long_Trades=ann_no_of_trades_long[,2],Mtm_long = ann_mtm_long[,2],No_of_short_Trades=ann_no_of_trades_short[,2],Mtm_short = ann_mtm_short[,2])
write.csv(final_res[1:8,],paste("C:/Users/YashV/Desktop/Framework Data/Results/","final_res RSI ",lookback_period[li] ,"entry exit",rsi_entry_long[lo]," ",rsi_entry_short[lo],"Nifty 50_5tst_exit.csv"),row.names=FALSE)
}
}
|
5a4cd4215f0cf5c4003ef5a9ce58ca80b0882322
|
a9a48fd57852f07d84e874579050ca124345868f
|
/R/Healthcareai/Diabetes/diabetes.R
|
efd25eb9e134a9b986acae75321f3766f338527c
|
[] |
no_license
|
nicolaumatarrodona/AI
|
f28689d3f56192fd24e3622427a6b0e20c1d72da
|
3d024541a902a05855e3273d527417f29fe1b76d
|
refs/heads/master
| 2020-08-04T13:57:46.176130
| 2019-10-13T06:30:19
| 2019-10-13T06:30:19
| 212,159,883
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,392
|
r
|
diabetes.R
|
library(healthcareai)
str(pima_diabetes)
#--------------------------------------------------------
# Easy Machine Learning
#--------------------------------------------------------
quick_models <- machine_learn(pima_diabetes, patient_id, outcome = diabetes)
quick_models
predictions <- predict(quick_models)
predictions
plot(predictions)
quick_models %>%
predict(outcome_groups = 2) %>%
plot()
# Data Profiling
missingness(pima_diabetes) %>%
plot()
#--------------------------------------------------------
# Data Preparation
#--------------------------------------------------------
split_data <- split_train_test(d = pima_diabetes,
outcome = diabetes,
p = 0.8,
seed = 1)
prepped_training_data <- prep_data(split_data$train, patient_id, outcome = diabetes,
center = TRUE, scale = TRUE,
collapse_rare_factors = FALSE)
# write.csv(prepped_training_data,'diabetes_prepared.csv')
head(prepped_training_data)
# prep_data object with only center, scale and impute missing values set to true. Rest is set to false (default is true)
while (FALSE) {
prepped_training_data <- prep_data(split_data$train, patient_id, outcome = diabetes,
center = TRUE, scale = TRUE,
collapse_rare_factors = FALSE,
impute = TRUE,
remove_near_zero_variance = FALSE,
add_levels = FALSE,
logical_to_numeric = FALSE,
factor_outcome = FALSE)
}
#--------------------------------------------------------
# Model Training
#--------------------------------------------------------
models <- tune_models(d = prepped_training_data,
outcome = diabetes,
tune_depth = 25,
metric = "PR")
evaluate(models, all_models = TRUE)
models["Random Forest"] %>%
plot()
#--------------------------------------------------------
# Faster Model Training
# flash_models use fixed sets of hyperparameter values to train the models
# so you still get a model customized to your data,
# but without burning the electricity and time to precisely optimize all the details.
# Here we’ll use models = "RF" to train only a random forest.
# If you want to train a model on fixed hyperparameter values, but you want to choose those values,
# you can pass them to the hyperparameters argument of tune_models.
# Run get_hyperparameter_defaults() to see the default values and get a list you can customize.
#--------------------------------------------------------
#--------------------------------------------------------
# Model Interpretation
#--------------------------------------------------------
# In this plot, the low value of weight_class_normal signifies that people with normal weight
# are less likely to have diabetes. Similarly, plasma glucose is associated with increased risk of
# diabetes after accounting for other variables.
interpret(models) %>%
plot()
# Tree based methods such as random forest and boosted decision trees can’t provide coefficients
# like regularized regression models can, but they can provide information about how important each
# feature is for making accurate predictions.
get_variable_importance(models) %>%
plot()
# The explore function reveals how a model makes its predictions. It takes the most important features
# in a model, and uses a variety of “counterfactual” observations across those features to see what
# predictions the model would make at various combinations of the features.
explore(models) %>%
plot()
#--------------------------------------------------------
# Prediction
#--------------------------------------------------------
predict(models)
test_predictions <-
predict(models,
split_data$test,
risk_groups = c(low = 30, moderate = 40, high = 20, extreme = 10)
)
# > Prepping data based on provided recipe
plot(test_predictions)
#--------------------------------------------------------
# Saving, Moving, and Loading Models
#--------------------------------------------------------
save_models(models, file = "my_models.RDS")
models <- load_models("my_models.RDS")
#--------------------------------------------------------
# A Regression Example:
#
# All the examples above have been classification tasks,
# redicting a yes/no outcome. Here’s an example of a full
# regression modeling pipeline on a silly problem:
# predicting individuals’ ages. The code is very similar to classification.
#--------------------------------------------------------
regression_models <- machine_learn(pima_diabetes, patient_id, outcome = diabetes)
summary(regression_models)
# Let’s make a prediction on a hypothetical new patient. Note that the model handles missingness in
# insulin and a new category level in weight_class without a problem (but warns about it).
new_patient <- data.frame(
pregnancies = 0,
plasma_glucose = 80,
diastolic_bp = 55,
skinfold = 24,
insulin = NA,
weight_class = "???",
pedigree = .2,
age = 24)
predict(regression_models, new_patient)
|
6cb7d8010b1850a66aaaa82c6e7db4e87f0a0783
|
35f40a71768518309a394e584e25aa7c043311db
|
/ui.R
|
77a9af0ce19b367ed3ec0312912410952d803c88
|
[] |
no_license
|
sandrafdo/Shopping-Analysis
|
593e1d4b524ec7b6fdcb7bbfa31ab9b96417d272
|
5a823baa20cb4b5bc1b5616e63e67868c9e4b8d7
|
refs/heads/master
| 2020-03-20T00:13:29.516246
| 2018-06-12T07:53:19
| 2018-06-12T07:53:19
| 137,036,455
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 634
|
r
|
ui.R
|
library(shinythemes)
library(datasets)
library(rsconnect)
InputData<-read.csv("D:/Analysis.csv")
# Use a fluid Bootstrap layout
fluidPage(
theme = shinytheme("slate"),
# Give the page a title
titlePanel("ONLINE SHOPPING ANALYSIS"),
# Generate a row with a sidebar
sidebarLayout(
# Define the sidebar with one input
sidebarPanel(
selectInput("category", "Select Category",
choices=colnames(InputData)),
hr(),
helpText("Data")
),
# Create a spot for the barplot
mainPanel(
plotOutput("storePlot")
)
)
)
|
7a78cbddb4ff502cf96301326dba404daec65e69
|
728315d8c5d09e13c67641030b92d59c5e7e2222
|
/moderate/predict_the_number.r
|
2a6bbb475cac92d44161f3bf761b0913024d8cd1
|
[
"MIT"
] |
permissive
|
shortthirdman/code-eval-challenges
|
88ea93c0e9385b2a0db95a05b1f3f753c900a62d
|
cf2197927830326539399fdd3e16c9b8a4468f7d
|
refs/heads/master
| 2023-03-06T19:34:44.607154
| 2023-02-26T13:30:56
| 2023-02-26T13:30:56
| 92,970,178
| 4
| 0
|
MIT
| 2023-02-26T13:30:57
| 2017-05-31T17:14:59
|
Go
|
UTF-8
|
R
| false
| false
| 242
|
r
|
predict_the_number.r
|
cat(sapply(as.double(readLines(tail(commandArgs(), n=1))), function(t) {
r <- 0
if (t >= 2^31) {
t <- t - 2^31
r <- 1
}
s <- as.integer(t)
while (s > 0) {
s <- bitwAnd(s, s - 1)
r <- r + 1
}
r %% 3
}), sep="\n")
|
c1528624680f94f1f67517b6ae17ae465e7f001e
|
f32f521476b59d0a3f1ecc15be45a08c544603b6
|
/other_code/make_tpm_files.R
|
91bfd7e6f3491ee3372000a762e88d56d60f4612
|
[] |
no_license
|
buttelab/harmonyrna
|
e32e4ff373ab2fe59ed5947f66848c5200545b57
|
6059618c761d5e635bb13a27a23c2c322141dc99
|
refs/heads/master
| 2022-11-29T13:05:20.272831
| 2020-08-12T20:21:27
| 2020-08-12T20:21:27
| 286,588,291
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,003
|
r
|
make_tpm_files.R
|
# Code created by Matthew Elliott
# Contact: melliot1@ucsc.edu (valid until 2023)
# Phone: 231-392-1263 (just in case)
#############
### Set Up Notebook
#############
library(plyr)
library(biomaRt)
#############
### Load Data
#############
# We reharmnonize the wholeblood dataset using harmony ran method
# We load in the datasets from CSV files:
raw_guinea= read.table("~/test_data/guinea_rna.csv", header=TRUE , sep =",", stringsAsFactors=FALSE) #quote = input$quote,
raw_tanzi = read.table("~/test_data/tanzania_rna.csv", header=TRUE , sep =",", stringsAsFactors=FALSE)
raw_mali = read.table("~/test_data/mali_rna.csv", header=TRUE , sep =",", stringsAsFactors=FALSE)
# we create the south africa dataset using resutls from the unharmonized_counts file
# The south africa data is counts, so the unharmonized counts should have th correctly formatted data
raw_all = read.table("~/test_data/unharmonized_counts.csv", header=TRUE , sep =",", stringsAsFactors=FALSE)
raw_sa = raw_all[ , c(2,111:355) ]
raw_guinea[1:10,1:10]
# check that mali is actually counts per million
raw_mali[1:10,1:10]
dim(raw_mali)
colSums(raw_mali[,2:81] )
# IT's currently not summing to 1 million
dim(raw_sa)
raw_sa[1:10,1:10]
#############
### compare to unharmonized_data.csv
#############
#Compare data agianst the unharmonized counts file
length( unique( raw_guinea[,1] ))
length( raw_guinea[,1] )
raw_all = read.table("~/test_data/unharmonized_counts.csv", header=TRUE , sep =",", stringsAsFactors=FALSE)
# check raw_all against raw_guinea
dim(raw_guinea)
dim(raw_tanzi)
names(raw_all)
raw_all[1:10,1:4]
raw_guinea[1:10,1:3]
raw_guinea[ which( raw_guinea$hgnc=="AARS" ), 1:3]
# The datasets are the same just in different order
# The mali dataset is changed
names(raw_mali)
raw_mali[1:10,1:3]
names(raw_all)
raw_all[1:10,c(2,31,32)]
#############
### Remove Duplicates
#############
# Guinea - Duplicates
#length(unique(raw_guinea$hgnc))
#length(raw_guinea$hgnc)
raw_guinea2 = ddply( raw_guinea,"hgnc", numcolwise(sum) )
# check that it works
#length(unique(raw_guinea$hgnc))
#length(raw_guinea$hgnc)
#raw_guinea$hgnc[ which( duplicated(raw_guinea$hgnc) ) ]
#raw_guinea[ which(raw_guinea$hgnc=="RPS27") ,]
#raw_guinea2[ which(raw_guinea2$hgnc=="RPS27") ,]
# Tanzania - No duplicates
length(unique(raw_tanzi$GeneSymbol))
length(raw_tanzi$GeneSymbo)
raw_tanzi[1:10,1:10]
# South Africa - No duplicates
#length( raw_sa$hgnc )
#length( unique(raw_sa$hgnc ))
# Mali - No duplicates
#length( raw_mali$hgnc )
#length( unique(raw_mali$hgnc ))
#############
### TPM Function
#############
#counts = raw_guinea2 # for testing
#counts = raw_tanzi # for testing
# TPM Function
TPM_Converter <- function( counts ) {
# setup: Get gene lengths
human <- useMart("ensembl", dataset="hsapiens_gene_ensembl")
# Get gene lengths for guinea
gene_coords=getBM(attributes=c("hgnc_symbol", "start_position","end_position"), filters="hgnc_symbol", values=counts[,1], mart=human)
gene_coords$size=gene_coords$end_position - gene_coords$start_position
gene_coords$end_position = NULL
gene_coords$start_position = NULL
#dim(gene_coords2)
#dim(counts)
dim(counts_small)
# Only keep genes with corresponding gene lengths
gene_coords2 = gene_coords[ !duplicated(gene_coords$hgnc_symbol) ,] # remove duplicates from gene sizes
counts_small = counts[ counts[,1] %in% gene_coords2$hgnc_symbol ,] # only keep genes with gene sizes
counts_small = counts_small[order(counts_small[,1]),] # order alphabetically
gene_coords3 = gene_coords2[ gene_coords2$hgnc_symbol %in% counts_small[,1] ,] # only keep genes with gene sizes
gene_coords3 = gene_coords3[order(gene_coords3$hgnc_symbol),] # order alphabetically
print( all( gene_coords3$hgnc_symbol == counts_small[,1] ) )
# Create counts varibale
counts_gene = sweep( counts_small[,-1], MARGIN = 1, STATS = gene_coords3$size, FUN = "/")
scaling_factor=colSums(counts_gene)/1000000
TPM = sweep( counts_gene, MARGIN = 2, STATS = scaling_factor, FUN = "/")
TPM_final = cbind( counts_small[,1], TPM )
names(TPM_final)[1] = "hgnc"
return(TPM_final)
#counts[1:6,1:10] #head(gene_coords2$size) #141403.840 / 1491100 #TPM2[1:6,1:10]
#counts2 = round( counts ) #counts2[1:10,1:10]
}
#############
### Convert Data to TPM
#############
# Convert Guinea, Tanzania, and South Africa
tpm_guinea = TPM_Converter(raw_guinea2)
tpm_tanzi = TPM_Converter( raw_tanzi )
tpm_sa = TPM_Converter( raw_sa )
# Make datasets look slightly different
# Guinea: shuffle rows and rename first column and add a duplicate row
rows <- sample(nrow(tpm_guinea))
tpm_guinea2 = tpm_guinea[rows,]
names(tpm_guinea2)[1]="gene_symbol"
tpm_guinea2 = rbind( tpm_guinea2, tpm_guinea2[1,] ) # duplicate a row
# Tanzania: shuffle rows
rows <- sample(nrow(tpm_tanzi))
tpm_tanzi2 = tpm_tanzi[rows,]
# Rescale Mali
scaling_factor=colSums(raw_mali[,-1])/1000000
tpms = sweep( raw_mali[,-1], MARGIN = 2, STATS = scaling_factor, FUN = "/")
tpm_mali = cbind( raw_mali[,1], tpms )
names(tpm_mali)[1] = "hgnc"
#tpm_mali[1:20,1:10]
#dim(raw_mali)
#olSums( tpm_mali[,2:81] )
#head(raw_mali[,-1])
#colSums(tpm_guinea[,2:5])
#colSums(tpm_tanzi[,3:6])
#colSums(tpm_sa[,33:35])
# Check it works
#tpm_guinea[1:10,1:10]
#dim(TPM_final)
#TPM_final[1:10,1:10]
#colSums(TPM_final[,-1])
#safety=TPM_final
#############
### Save Datasets
#############
write.csv( tpm_tanzi2, "tpm_data/tanzania_tpm.csv", row.names=FALSE)
write.csv( tpm_guinea2, "tpm_data/guinea_tpm.csv", row.names=FALSE)
write.csv( tpm_mali, "tpm_data/mali_tpm.csv", row.names=FALSE)
write.csv( tpm_sa, "tpm_data/south_africa_tpm.csv", row.names=FALSE)
|
96bff821923b8f8ef9519573cf7ad5f590fd5bb9
|
bd4a9e6c4e7f83f2f6ac1a750cefd17fe3ee3598
|
/proc_CITIPM_v1_0.R
|
1a351a84ef872f034294fe836a96c303b0d1eef4
|
[] |
no_license
|
castillosebastian/JUSTAT
|
e125ece65a8e9f032ecef4cf442ba220ce111a5f
|
63f11a714a8a446f0a90bf14fda3963c455db26e
|
refs/heads/master
| 2021-01-18T15:40:52.211027
| 2017-08-15T14:51:13
| 2017-08-15T14:51:13
| 100,383,250
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,458
|
r
|
proc_CITIPM_v1_0.R
|
# JUSTAT is a free software developed for official statistics of Supreme Court
# of Entre Ríos (STJER), Argentina, Office of Planification Management and
# Statistics (APGE)
# V.1.0
# 13-06-17
# Authors
# JUSTAT: Lic. Sebastián Castillo, Ing. Federico Caballaro y Lic. Paolo Orundes
############################ proc_CITIPM ###################################
# Librerías
library(data.table)
library(lubridate)
library(stringr)
library(purrr)
library(stringdist)
library(dplyr)
library(tidyr)
library(tibble)
proc_CITIPM <- function(id_archivo) {
BD_adm_ingresos <- fread("~/JUSTAT/BD/BD_adm_ingresos.csv")
BD_CITIPM <- fread("~/JUSTAT/BD/BD_CITIPM.csv")
ultimo_procesado <- BD_CITIPM$id_archivos[which.max(BD_CITIPM$id_archivos)]
BD_adm_ingresos <- BD_adm_ingresos[BD_adm_ingresos$id_archivos >
ultimo_procesado &
BD_adm_ingresos$id_operacion == "CITIPM" &
BD_adm_ingresos$rec_estado == "admitido"
, ]
lista_tbs_prim_CITIPM <- map(BD_adm_ingresos$ruta,
fread, encoding = "Latin-1", na.strings = "" )
for (i in seq_along(lista_tbs_prim_CITIPM)) {
if (length(lista_tbs_prim_CITIPM[[i]]) == 6) {
colnames(lista_tbs_prim_CITIPM[[i]]) <-
try(c("caratula", "tproc", "finic", "nro_receptoria",
"radicacion", "origenOmedpriv"))
} else {
next()
}
}
# filtar tablas de 6 variables
indice_6col <- lengths(lista_tbs_prim_CITIPM) == 6
lista_tbs_prim_CITIPM <- lista_tbs_prim_CITIPM[indice_6col]
lista_inic_xorgano <- list()
for (i in seq_along(lista_tbs_prim_CITIPM)) {
lista_inic_xorgano[[i]] <- lista_tbs_prim_CITIPM[[i]] %>%
group_by(radicacion, tproc) %>%
summarise(cantidad_procesos = n())
lista_inic_xorgano[[i]]$periodo <- BD_adm_ingresos$id_periodo[indice_6col][[i]]
lista_inic_xorgano[[i]]$id_archivos <- BD_adm_ingresos$id_archivos[indice_6col][[i]]
lista_inic_xorgano[[i]]$informante <- BD_adm_ingresos$id_organismo[indice_6col][[i]]
lista_inic_xorgano[[i]]
}
BD_dfs <- bind_rows(lista_inic_xorgano)
BD_dfs <- select(BD_dfs, id_archivos, informante, periodo, radicacion,
tproc, cantidad_procesos)
write.table(BD_dfs, file= "~/JUSTAT/BD/BD_CITIPM.csv",
sep = ",", row.names = F, col.names = F, append = T)
}
|
fa46a0fa3f57b21cd483cf09465c75055249d7da
|
52586df6b1df22e19750306185ee69a7b09abf42
|
/FastIEM/src/GMM/utils/algos.R
|
3a39c39f1a22bdfc88886cb3bd92aa7ef35fbc21
|
[] |
no_license
|
BelhalK/AccelerationTrainingAlgorithms
|
5d1390f5a5cb6f24f59f2c06073040056014aa64
|
0cc5f4405ad103f704cd7c6259762a66fb6bf37f
|
refs/heads/master
| 2023-07-25T02:28:38.095277
| 2020-10-30T09:14:28
| 2020-10-30T09:14:28
| 94,530,148
| 0
| 0
| null | 2023-07-06T21:20:14
| 2017-06-16T09:46:26
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 5,616
|
r
|
algos.R
|
require(ggplot2)
require(gridExtra)
require(reshape2)
mixt.simulate <-function(n,weight,mu,sigma)
{
G <- length(mu)
Z <- sample(1:G, n, prob=weight, replace=T)
x<-NULL
for (g in 1:G)
{
x<-c(x,rnorm(length(which(Z==g)),mu[g],sigma[g]))
}
return(x)
}
#-------------------------------------
mixt.em <- function(x, theta0, K)
{
G<-length(mu)
col.names <- c("iteration", paste0("p",1:G), paste0("mu",1:G), paste0("sigma",1:G))
theta.est <- matrix(NA,K+1,3*G+1)
theta.est[1,] <- c(0, theta0$p, theta0$mu, theta0$sigma)
theta<-theta0
for (k in 1:K)
{
s<-step.E(x,theta)
theta<-step.M(s,n)
theta.est[k+1,] <- c(k, theta$p, theta$mu, theta$sigma)
}
df <- as.data.frame(theta.est)
names(df) <- col.names
return(df)
}
mixt.iem <- function(x, theta0, K,nbr)
{
G<-length(mu)
col.names <- c("iteration", paste0("p",1:G), paste0("mu",1:G), paste0("sigma",1:G))
theta.est <- matrix(NA,K+1,3*G+1)
theta.est[1,] <- c(0, theta0$p, theta0$mu, theta0$sigma)
tau <- compute.tau(x,theta0)
theta<-theta0
tau.old <- compute.tau(x[1],theta0)
s <- compute.stat_iem(x,tau, tau.old,1)
l <- rep(sample(1:n,n), K/n)
i <- 1:nbr
for (k in 1:K)
{
if (k%%(n/nbr) == 1)
{
# l<-sample(1:n,n)
# l<-1:n
i<-1:nbr
}
# tau.new <- compute.tau(x[i],theta)
# s <- compute.stat_iem(x,tau, tau.new, i)
tau[l[i],] <- compute.tau(x[l[i]],theta)
s <- compute.stat(x,tau)
theta<-step.M(s,n)
theta.est[k+1,] <- c(k, theta$p, theta$mu, theta$sigma)
i = i+nbr
}
df <- as.data.frame(theta.est)
names(df) <- col.names
return(df)
}
# mixt.oem <- function(x, theta0, K,nbr)
# {
# G<-length(mu)
# kiter = 1:K
# rho = 1/(kiter+1)
# col.names <- c("iteration", paste0("p",1:G), paste0("mu",1:G), paste0("sigma",1:G))
# theta.est <- matrix(NA,K+1,3*G+1)
# theta.est[1,] <- c(0, theta0$p, theta0$mu, theta0$sigma)
# theta<-theta0
# tau<-compute.tau(x,theta)
# s<-compute.stat(x,tau)
# s.old <- s
# theta<-step.M(s,n)
# n<-length(x)
# l <- NULL
# l <- rep(sample(1:n,n), K/n)
# i <- 1:nbr
# for (k in 1:K)
# {
# if (k%%(n/nbr) == 1)
# {
# i<-1:nbr
# }
# tau.new <- compute.tau(x[l[i]],theta)
# s <- compute.stat_oem(x,tau,s.old, tau.new, l[i],rho[k])
# s.old <- s
# i <- i+nbr
# theta<-step.M(s,n)
# theta.est[k+1,] <- c(k, theta$p, theta$mu, theta$sigma)
# }
# df <- as.data.frame(theta.est)
# names(df) <- col.names
# return(df)
# }
# mixt.oemvr <- function(x, theta0, K,nbr,rho)
# {
# G<-length(mu)
# col.names <- c("iteration", paste0("p",1:G), paste0("mu",1:G), paste0("sigma",1:G))
# rho = 0.0001
# theta.est <- matrix(NA,K+1,3*G+1)
# theta.est[1,] <- c(0, theta0$p, theta0$mu, theta0$sigma)
# tau <- compute.tau(x,theta0)
# tau.old.init <- tau[1,]
# theta<-theta0
# s<-compute.stat(x,tau)
# s.old <- s
# s.old.init <- s
# l <- rep(sample(1:n,n), K/n)
# i <- 1:nbr
# for (k in 1:K)
# {
# if (k%%(n/nbr) == 1)
# {
# i<-1:nbr
# tau.old.init <- compute.tau(x[i],theta)
# s.old.init <- compute.stat(x,tau)
# }
# tau.new <- compute.tau(x[i],theta)
# s <- compute.stat_oemvr(x,tau, tau.new,s.old,s.old.init,tau.old.init, i,rho)
# s.old <- s
# i <- i+nbr
# theta<-step.M(s,n)
# theta.est[k+1,] <- c(k, theta$p, theta$mu, theta$sigma)
# }
# df <- as.data.frame(theta.est)
# names(df) <- col.names
# return(df)
# }
mixt.oem <- function(x, theta0, K,nbr,rho)
{
G<-length(mu)
col.names <- c("iteration", paste0("p",1:G), paste0("mu",1:G), paste0("sigma",1:G))
theta.est <- matrix(NA,K+1,3*G+1)
theta.est[1,] <- c(0, theta0$p, theta0$mu, theta0$sigma)
theta<-theta0
#Init
tau <- compute.tau(x,theta)
s<-compute.stat(x,tau)
l <- NULL
l <- rep(sample(1:n,n), K/n)
i <- 1:nbr
for (k in 1:K)
{
if (k%%(n/nbr) == 1)
{
i<-1:nbr
}
tau.indiv.new <- compute.tau(x[l[i]],theta)
#Update statistic
s$s1 <- s$s1 + rho[k]*(tau.indiv.new - s$s1)
s$s2 <- s$s2 + rho[k]*(x[l[i]]*tau.indiv.new - s$s2)
s$s3 <- s$s3 + rho[k]*(x[l[i]]^2*tau.indiv.new - s$s3)
theta <- step.M(s,n)
theta.est[k+1,] <- c(k, theta$p, theta$mu, theta$sigma)
#Update index
i <- i+nbr
}
df <- as.data.frame(theta.est)
names(df) <- col.names
return(df)
}
mixt.oemvr <- function(x, theta0, K,nbr,rho)
{
G<-length(mu)
col.names <- c("iteration", paste0("p",1:G), paste0("mu",1:G), paste0("sigma",1:G))
theta.est <- matrix(NA,K+1,3*G+1)
theta.est[1,] <- c(0, theta0$p, theta0$mu, theta0$sigma)
theta<-theta0
#Init
tau <- compute.tau(x,theta)
s<-compute.stat(x,tau)
l <- NULL
l <- rep(sample(1:n,n), K/n)
i <- 1:nbr
for (k in 1:K)
{
if (k%%(n/nbr) == 1)
{
i<-1:nbr
theta.e.0 <- theta
}
tau.indiv.new <- compute.tau(x[l[i]],theta)
s.indiv.new <- x[l[i]]*tau.indiv.new
tau.indiv.e.0 <- compute.tau(x[l[i]],theta.e.0)
s.indiv.e.0 <- x[l[i]]*tau.indiv.e.0
tau.e.0 <- compute.tau(x,theta.e.0)
s.e.0 <- x%*%tau.e.0
#Update statistic
s$s1 <- s$s1 + rho*(tau.indiv.new - tau.indiv.e.0 + colSums(tau.e.0) - s$s1)
s$s2 <- s$s2 + rho*(s.indiv.new - s.indiv.e.0 + s.e.0 - s$s2)
s$s3 <- s$s3 + rho*(x[l[i]]*s.indiv.new - x[l[i]]*s.indiv.e.0 + (x^2)%*%tau.e.0 - s$s3)
theta <- step.M(s,n)
theta.est[k+1,] <- c(k, theta$p, theta$mu, theta$sigma)
#Update index
i <- i+nbr
}
df <- as.data.frame(theta.est)
names(df) <- col.names
return(df)
}
|
3176f072b452bd4063bb3eebbd6d360bb8d2d0b4
|
dc0dfacaa2d82b87ea71a9e951ab2716d5459dd7
|
/R/baseline.norm.cl.R
|
049507cf46e954946d71e1df15e84dcbf6e0f748
|
[] |
no_license
|
navinlabcode/copykat
|
7e797eaad48a5a98883024dc0ee2194f9d7010e0
|
b795ff793522499f814f6ae282aad1aab790902f
|
refs/heads/master
| 2023-09-05T13:42:47.124206
| 2022-09-23T17:43:44
| 2022-09-23T17:43:44
| 231,153,766
| 158
| 53
| null | 2021-03-05T22:20:36
| 2019-12-31T22:45:07
|
R
|
UTF-8
|
R
| false
| false
| 2,121
|
r
|
baseline.norm.cl.R
|
#' find a cluster of diploid cells with integrative clustering method
#'
#' @param norm.mat.smooth smoothed data matrix; genes in rows; cell names in columns.
#' @param min.cells minimal number of cells per cluster.
#' @param n.cores number of cores for parallel computing.
#'
#' @return 1) predefined diploid cell names; 2) clustering results; 3) inferred baseline.
#'
#' @examples
#' test.bnc <- baseline.norm.cl(norm.mat.smooth=norm.mat.smooth, min.cells=5, n.cores=10)
#'
#' test.bnc.cells <- test.bnc$preN
#' @export
baseline.norm.cl <- function(norm.mat.smooth, min.cells=5, n.cores=n.cores){
d <- parallelDist::parDist(t(norm.mat.smooth), threads = n.cores) ##use smooth and segmented data to detect intra-normal cells
km <- 6
fit <- hclust(d, method="ward.D2")
ct <- cutree(fit, k=km)
while(!all(table(ct)>min.cells)){
km <- km -1
ct <- cutree(fit, k=km)
if(km==2){
break
}
}
SDM <-NULL
SSD <-NULL
for(i in min(ct):max(ct)){
data.c <- apply(norm.mat.smooth[, which(ct==i)],1, median)
sx <- max(c(0.05, 0.5*sd(data.c)))
GM3 <- mixtools::normalmixEM(data.c, lambda = rep(1,3)/3, mu = c(-0.2, 0, 0.2), sigma = sx,arbvar=FALSE,ECM=FALSE,maxit=5000)
SDM <- c(SDM, GM3$sigma[1])
SSD <- c(SSD, sd(data.c))
i <- i+1
}
wn <- mean(cluster::silhouette(cutree(fit, k=2), d)[, "sil_width"])
####
PDt <- pf(max(SDM)^2/min(SDM)^2, nrow(norm.mat.smooth), nrow(norm.mat.smooth), lower.tail = FALSE)
#PDt <- dt((min(SDM)-max(SDM))/mad(SDM),df=km-1)
# print(c("low sigma pvalue:", PDt))
#print(c("low sd pvalue:", dt((min(SSD)-max(SSD))/mad(SSD),df=km-1)))
if(wn <= 0.15|(!all(table(ct)>min.cells))| PDt > 0.05){
WNS <- "unclassified.prediction"
print("low confidence in classification")
}else {
WNS <- ""
}
basel <- apply(norm.mat.smooth[, which(ct %in% which(SDM==min(SDM)))], 1, median)
preN <- colnames(norm.mat.smooth)[which(ct %in% which(SDM==min(SDM)))]
### return both baseline and warning message
RE <- list(basel, WNS, preN, ct)
names(RE) <- c("basel", "WNS", "preN", "cl")
return(RE)
}
|
de024a647786e86441600e633162d088b5e01e37
|
b8edf0c8c2e7ace0427623bb7d0915d757261c9d
|
/man/choose_rho.Rd
|
873566ec9eae813adbfaacad53e1b55a5d6ac712
|
[
"MIT"
] |
permissive
|
ressomlab/INDEED
|
63326276b316c1248f1473573571a572067396c7
|
b935993c28d4e136297075d50549d7036eeddc13
|
refs/heads/master
| 2022-08-12T02:00:51.612094
| 2022-07-29T19:40:12
| 2022-07-29T19:40:12
| 130,506,907
| 9
| 8
| null | 2019-10-30T20:36:20
| 2018-04-21T19:45:42
|
R
|
UTF-8
|
R
| false
| true
| 561
|
rd
|
choose_rho.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_function.R
\name{choose_rho}
\alias{choose_rho}
\title{Draw error curve}
\usage{
choose_rho(data, n_fold, rho)
}
\arguments{
\item{data}{This is a matrix.}
\item{n_fold}{This parameter specifies the n number in n-fold cross_validation.}
\item{rho}{This is the regularization parameter values to be evalueated in terms their errors.}
}
\value{
A list of errors and their corresponding \eqn{log(rho)}.
}
\description{
This function draws error curve using cross-validation.
}
|
9334d61fefbdddc2103e7bdf666bfaa3e041c4c4
|
d11dba6dafe5f5204743e03662d8d6d216672393
|
/man/ip_random.Rd
|
2a416f7c8e08ae9f78bb0c8cf749bcbe0bef346c
|
[] |
no_license
|
ktargows/iptools
|
d7b6e260296750198444b0edde26a09df4ad3630
|
d3d85680cd85d276672a42f4bbdeb8fac3d8758e
|
refs/heads/master
| 2021-01-11T01:55:03.682784
| 2016-10-06T01:54:41
| 2016-10-06T01:54:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 821
|
rd
|
ip_random.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generators.R
\name{ip_random}
\alias{ip_random}
\title{generate random IPv4 IP addresses}
\usage{
ip_random(n)
}
\arguments{
\item{n}{the number of IP addresses to randomly generate.}
}
\value{
a character vector of randomly-generated IPv4 addresses,
in their dotted-decimal form.
}
\description{
\code{ip_random} generates random IP addresses.
These currently only follow IPv4 standards, since IPv6 addresses
are too large to be stored in R in their numeric form. All
IPs generated this way are valid.
}
\examples{
ip_random(1)
#[1] "49.20.57.31"
}
\seealso{
\code{\link{ip_to_numeric}} for converting \code{random_ips}'
output to its numeric form, and \code{\link{range_generate}} for
generating all IP addresses within a specific range.
}
|
e2e7a0479dd6e866a09eb180a71355a400c89daf
|
2b864fa89488650a9840c49f8312ebccc3fefffc
|
/ggplot2 - hadley wickham/chapter4.R
|
017e66aa49086c129f99fbb173e219172bc3a011
|
[] |
no_license
|
harryyang1982/r-codes
|
8147d3f70fd7cf435ecb34d1bc1acd921b75f7bd
|
89fb033f11f26c285837c0e04b74d6453c16be50
|
refs/heads/master
| 2020-12-30T11:52:12.212063
| 2018-04-12T06:25:58
| 2018-04-12T06:25:58
| 91,541,975
| 0
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 740
|
r
|
chapter4.R
|
# 4.2 Building a Scatterplot
library(tidyverse)
ggplot(mpg, aes(displ, hwy, color=factor(cyl))) +
geom_point()
ggplot(mpg, aes(displ, hwy, color=factor(cyl))) +
geom_line() +
theme(legend.position = "none")
ggplot(mpg, aes(displ, hwy, color=factor(cyl))) +
geom_bar(stat="identity", position = "identity", fill=NA) +
theme(legend.position="none")
ggplot(mpg, aes(displ, hwy, color=factor(cyl))) +
geom_point() +
geom_smooth(method = "lm")
ggplot(mpg, aes(displ, hwy, color=factor(cyl))) +
geom_point() +
geom_smooth()
# 4.2.2 Scaling
vignette("ggplot2-specs")
# 4.3 Adding Complexity
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
geom_smooth() +
facet_wrap(~year)
# 4.4 Components of the Layered Grammar
|
7f552a406cf81e128f771069ae7537c01593b2c1
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/ggtern/R/utilities.R
|
42d687dab4dd2228ccba7cf521499e99af95125f
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,739
|
r
|
utilities.R
|
#'Internal Functions
#'
#'@description INTERNAL FUNCTIONS: \code{ggtern} makes use of several non-exported internal functions, list are as follows:
#'@keywords internal
#'@name zzz-internal
#'@rdname undocumented
NULL
#' \code{ifthenelse} function takes input arguments \code{x}, \code{a} and \code{b} and returns \code{a} if \code{x} is \code{TRUE}, else, returns \code{b}
#' @param x logical input to check
#' @param a value to return if \code{x} is TRUE
#' @param b value to return if \code{x} is FALSE
#' @keywords internal
#' @rdname undocumented
ifthenelse <- function(x,a,b){
if(!is.logical(x))stop("x argument must be logical")
if(x){a}else{b}
}
#' \code{is.numericor} function takes input arguments \code{A} and \code{B} and returns \code{A} if \code{A} is numeric, else, returns \code{B}
#' @param A value to return if numeric
#' @param B numeric value to return if \code{A} is NOT numeric
#' @keywords internal
#' @rdname undocumented
is.numericor <- function(A,B){
if(!is.numeric(B)){stop("b must be numeric")}
if(is.numeric(A)){A}else{B}
}
"%||%" <- function(a, b) {if (!is.null(a)) a else b}
#' \code{find_global_tern} is a function that conducts a named search for the \code{name} object instance, within the \code{env} environment.
#' If an instance doesn't exist within the \code{env} environment, a search is then conducted within the \code{ggtern} and \code{ggplot2}
#' namespaces \emph{(in that order)}. This is a modified version of the original source as provided in \code{ggplot2}, which has the same functionality, however, the modification is such that the function
#' now additionally searches within the \code{ggtern} namespace prior to the \code{ggplot2} namespace.
#' @param name character name of object to search for
#' @param env environment to search within as first priority
#' @keywords internal
#' @rdname undocumented
find_global_tern <- function (name, env=environment()){
if(!is.character(name)){stop("'name' must be provided as a character")}
if(!inherits(environment(),"environment")){stop("'env' must inherit the environment class")}
if (exists(name, env)){return(get(name, env))}
nsenv <- asNamespace("ggtern")
if(exists(name, nsenv)){return(get(name, nsenv))}
nsenv <- asNamespace("ggplot2")
if(exists(name, nsenv)){return(get(name, nsenv))}
NULL
}
#' \code{getBreaks} is a function that calculates the Breaks for Major or Minor Gridlines based
#' on the input limits.
#' @param limits the scale limits
#' @param isMajor major or minor grids
#' @param nMajor number of major breaks
#' @param nMinor number of minor breaks
#' @keywords internal
#' @rdname undocumented
getBreaks <- function(limits,isMajor,nMajor=5,nMinor=2*nMajor){
if(is.null(limits)){ limits = c(0,1) }
if(!all(is.numeric(limits))){ limits=c(0,1) }
if(diff(range(limits)) == 0){
return(if(isMajor){getOption("tern.breaks.default")}else{getOption("tern.breaks.default.minor")})
}else{
ret = pretty(limits,n=nMajor)
if(!isMajor){
r = range(ret)
d = diff(r)/(length(ret)-1)
minor = seq(min(ret)-d,max(ret)+d,by=d/2)
minor = minor[which(minor >= min(limits) & minor <= max(limits))]
ret = minor[which(!minor %in% ret)]
}
ret = ret[which(!ret %in% min(limits))]
ret
}
}
#'
#'
#' \code{tern_dep} is a function that gives a deprecation error, warning, or messsage,
#' depending on version number, it is based of the \code{\link[ggplot2]{gg_dep}} function which is
#' used inside the \code{ggplot2} package
#' @inheritParams ggplot2::gg_dep
#' @keywords internal
#' @rdname undocumented
tern_dep <- function(version, msg) {
v <- as.package_version(version)
cv <- packageVersion("ggtern")
# If current major number is greater than last-good major number, or if
# current minor number is more than 1 greater than last-good minor number,
# give error.
if (cv[[1,1]] > v[[1,1]] || cv[[1,2]] > v[[1,2]] + 1) {
stop(msg, " (Defunct; last used in version ", version, ")",
call. = FALSE)
# If minor number differs by one, give warning
} else if (cv[[1,2]] > v[[1,2]]) {
warning(msg, " (Deprecated; last used in version ", version, ")",
call. = FALSE)
# If only subminor number is greater, give message
} else if (cv[[1,3]] > v[[1,3]]) {
message(msg, " (Deprecated; last used in version ", version, ")")
}
invisible()
}
#internal
.makeValid <- function(x){
x = x[[1]]
if(class(x) == 'character'){
x = gsub("%","'%'",x)
x = gsub('([[:punct:]])\\1+', '\\1', x)
x = gsub(" ","~",x)
}
x
}
#' \code{arrow_label_formatter} is a function that formats the labels directly adjacent to the ternary arrows.
#' @param label character label
#' @param suffix chacater suffix behind each label
#' @param sep the seperator between label and suffix
#' @keywords internal
#' @rdname undocumented
arrow_label_formatter = function(label,suffix=NULL,sep="/") UseMethod("arrow_label_formatter")
arrow_label_formatter.default = function(label,suffix=NULL,sep="/") arrow_label_formatter.character( as.character(label), suffix,sep)
arrow_label_formatter.call = function(label,suffix=NULL,sep="/") arrow_label_formatter.expression(as.expression(label),suffix,sep)
arrow_label_formatter.expression = function(label,suffix=NULL,sep="/"){
suffix = if(suffix == "") NULL else suffix
sep = if(is.null(suffix)) "" else .trimAndPad(sep)
parse(text=paste(as.character(label),suffix,sep))
}
arrow_label_formatter.character = function(label,suffix=NULL,sep="/") {
suffix = if(suffix == "") NULL else suffix
sep = if(is.null(suffix)) "" else .trimAndPad(sep)
TeX(paste(label,suffix,sep=sep))
}
.trimAndPad <- function(x){
x = gsub("^(\\s+)","",gsub("(\\s+)$","",x))
if(nchar(x) == 1) x = sprintf(" %s ",x)
x
}
#' \code{label_formatter} is a function that formats / parses labels for use in the grid.
#' @param label character label
label_formatter = function(label){ arrow_label_formatter(label,suffix="",sep="") }
#' \code{joinCharacterSeries} is a function will turn a character vector
#' from the format \code{c('a','b','c')} to a single string
#' in the following format: \code{"'a','b' and 'c'"}
#' @param x character vector
#' @author Nicholas Hamilton
#' @keywords internal
#' @rdname undocumented
joinCharacterSeries <- function(x,lastWord='and'){
if(!is.character(x) | !is.vector(x)) stop("'x' must be character vector",call.=FALSE)
if(length(x) > 1){ x = paste(paste(x[-length(x)],collapse="', '"),x[length(x)],sep=sprintf("' %s '",lastWord)) }
sprintf("'%s'",x)
}
#' \code{identityInv} is a function which returns exactly the same as \code{\link{identity}} however
#' it can be used within transformation logic via \code{do.call(...)} in the same way as for example
#' \code{\link{ilrInv}} is to \code{\link{ilr}}.
#' @param x input object
#' @author Nicholas Hamilton
#' @keywords internal
#' @rdname undocumented
identityInv = function(z) identity(z)
#' \code{getFormulaVars} is a function that returns a list of either dependent or independent variables used
#' in an input formula
#' @param x formula object
#' @param dependent whether to return the dependent variables (TRUE) or the indpenedent variables (FALSE)
#' @rdname undocumented
#' @keywords internal
#' @author Nicholas Hamilton
getFormulaVars = function(x,dependent=TRUE) {
if(class(x) != 'formula') stop("x argument must be a formula",call.=FALSE)
all.vars(x[[if(dependent) 3 else 2]])
}
#' Function to add missing scales and other items to the plot and its coordinates sytem
#' @param ggplot object
#' @rdname undocumented
#' @keywords internal
#' @author Nicholas Hamilton
scales_add_missing_tern <- function(plot){
#Run some checks
stopifnot(inherits(plot,'ggplot'))
stopifnot(inherits(plot$coordinates,'CoordTern'))
#Ensure required scales have been added
rs = plot$coordinates$required_scales
ggint$scales_add_missing(plot,rs,plot$plot_env) ##NH
#plot$scales$scales = plot$scales$scales[!sapply(plot$scales$scales,is.null)]
#Push some details to the coordinates
plot$coordinates$scales = sapply(rs,plot$scales$get_scales) ##NH
for(r in rs)
plot$coordinates$limits[[r]] = plot$scales$get_scales(r)$limits
plot$coordinates$labels_coord = plot$labels
plot$coordinates$theme = ggint$plot_theme(plot) #NH
#done
plot
}
#' Function to add clipping mask if it isn't already present
#' @param plot ggplot object
#' @rdname undocumented
#' @keywords internal
#' @author Nicholas Hamilton
layers_add_missing_mask = function(plot){
if(!"GeomMask" %in% unlist(lapply(plot$layers,function(x){ class(x$geom) })))
plot = plot + geom_mask()
plot
}
|
099bc1814b5bc79a5d0d43d37e1b93b4d235df65
|
b4ff9e10ae4ee08998429208ae74586ff402deed
|
/man/plot_worms_grid.Rd
|
6db834669621cf22b010988c85e6790b77aa54c7
|
[] |
no_license
|
chuanboguo/gfdlm
|
61e5869a3e3227a8e70cd5eb86e6da74c69e308a
|
8e878a8b06f14ba7d92f12766de73463f275359c
|
refs/heads/master
| 2022-11-15T01:54:34.299145
| 2020-04-22T05:36:34
| 2020-04-22T05:36:34
| 269,184,929
| 0
| 0
| null | 2020-06-03T20:13:09
| 2020-06-03T20:13:09
| null |
UTF-8
|
R
| false
| true
| 767
|
rd
|
plot_worms_grid.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/figures-worm.R
\name{plot_worms_grid}
\alias{plot_worms_grid}
\title{Make a Kobe "worm" timeseries plot with uncertainty}
\usage{
plot_worms_grid(object_list, prob = 0.5, include_historical = TRUE)
}
\arguments{
\item{object_list}{A named list of MSE objects from DLMtool.
Names become scenario names.}
\item{prob}{Tail probability for the quantiles. E.g., 0.5 refers to an
interquartile range.}
\item{include_historical}{Logical: include the historical time?}
}
\value{
A ggplot object
}
\description{
Make a Kobe "worm" timeseries plot with uncertainty
}
\examples{
x <- list()
x[[1]] <- mse_example
x[[2]] <- mse_example
names(x) <- c("Scenario 1", "Scenario 2")
plot_worms_grid(x)
}
|
e8a9245bccbc7c8d84f2478ae0e89a84ea3fe285
|
7ebe128fc17cdc0e2f534dbe5940774e98da4ce8
|
/man/WAIC.Rd
|
26795572ba63c8de8b63a102c100199ad1272dd5
|
[] |
no_license
|
cran/bamlss
|
89f8d08be4599c03120acb9ed097c31916d1ef21
|
5535fb038104cdd3df08eccb92863a778cd56e75
|
refs/heads/master
| 2023-07-17T04:24:23.253981
| 2023-07-04T06:30:02
| 2023-07-04T06:30:02
| 82,776,249
| 2
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,014
|
rd
|
WAIC.Rd
|
\name{WAIC}
\alias{WAIC}
\title{
Watanabe-Akaike Information Criterion (WAIC)
}
\description{
Function returning the Watanabe-Akaike Information Criterion (WAIC) of a fitted model object.
}
\usage{
WAIC(object, ..., newdata = NULL)
}
\arguments{
\item{object}{A fitted model object which contains MCMC samples.}
\item{\dots}{Optionally more fitted model objects.}
\item{newdata}{Optionally, use new data for computing the WAIC.}
}
\value{
A data frame containing the WAIC and estimated number of parameters.
}
\references{
Watanabe S. (2010). Asymptotic Equivalence of {B}ayes Cross Validation and Widely
Applicable Information Criterion in Singular Learning Theory. \emph{The Journal of Machine
Learning Research}, \bold{11}, 3571--3594.
\url{https://jmlr.org/papers/v11/watanabe10a.html}
}
\examples{
\dontrun{d <- GAMart()
b1 <- bamlss(num ~ s(x1), data = d)
b2 <- bamlss(num ~ s(x1) + s(x2), data = d)
WAIC(b1, b2)
}
}
\keyword{regression}
|
88b830f3ec82b3fc857f8f63bbaaa8eee1f441bf
|
6e9eb1a6eee8b578b6d8a530eadef32ccdb4ef4a
|
/content/mapk/mapk_VP.R
|
c76ca7396aa7a328ad8fb42bff11b4ef3499fd99
|
[] |
no_license
|
metrumresearchgroup/ub-cdse-2019
|
72fcbd1544b79b7ed78de5bc00244b8728a9c331
|
018859a3aebbb1d114a39f2684ef501080e68b99
|
refs/heads/master
| 2020-05-07T09:55:00.952992
| 2019-04-18T22:06:00
| 2019-04-18T22:06:00
| 180,396,320
| 6
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,583
|
r
|
mapk_VP.R
|
library(mrgsolve)
library(tidyverse)
library(magrittr)
library(reshape2)
library(R.utils)
sourceDirectory("content/script/VPop/")
# Load simulation results (We are trying to match this Population)
sims <- readRDS("content/mapk/mapk_sims.RDS")
sims %<>% filter(label == "GDC")
# Load treatment regimens
reg <- readRDS("content/mapk/mapk_setup.RDS")
reg %<>% filter(label == "GDC")%>%pull(object)
reg <- reg[[1]]
model <- mread("mapk", 'content/mapk/', soloc = 'content/mapk')
m_params <- names(param(model))
# What parameters do we need to explore?
foo <- readRDS("content/mapk/s10vpop_pk.RDS")
(foo %>% summarise_all(sd)%>%gather()%>%filter(value!=0))%>%pull(key)
length((foo %>% summarise_all(sd)%>%gather()%>%filter(value!=0))%>%pull(key))
p_names <- foo %>% summarise_all(sd)%>%gather()%>%filter(value!=0)
# 33 of them
ICs <- foo %>% summarise_all(sd)%>%gather()%>%filter(value==0)
ICnames <- ICs$key
ICs <- foo%>%summarise_all(mean)%>%gather()%>%filter(key %in% ICnames)
ICs %<>% spread(key,value)
# Filter only to ones present in model
p_names %<>% filter(key %in% m_params)%>%pull(key)
# Left with 27
# parameters <- foo%>%group_by(VPOP)%>%slice(1)%>%filter(VPOP==910)%>%gather(key="Name",value="Values")%>%filter(Name %in% p_names)
# Get Parameter Limits
pLower <- foo %>% summarise_all(.funs = function(x){min(x)*0.85})
pUpper <- foo %>% summarise_all(.funs = function(x){max(x)*1.15})
pLower %<>% gather(key="Name",value="Lower")
pUpper %<>% gather(key="Name",value="Upper")
paramLims <- left_join(pLower,pUpper,by="Name")
paramLims %<>% filter(Name %in% p_names)
# Load model
# Set up simulation function
sim_fcn <- function(parameters=NULL,model,pnames,dosing,ICs,simulate=0){
loadso(model)
if(!is.null(parameters)){
param_in <- data.frame(Names = pnames,Values=parameters)
param_in <- spread(param_in,key=Names,value=Values)
}else{
param_in <- data.frame(ID=1)
}
param_in %<>% cbind(ICs)
output <- model%>%idata_set(param_in) %>%Req(TUMOR)%>%obsonly%>%mrgsim(delta=56,end=56,events=as.ev(dosing))%>%
filter(time==56)%>%as.data.frame()
if(simulate==0){
return(list(NSS=output))
}else{
return(output)
}
}
stateLims <- data.frame(Name = 'TUMOR','Lower'=0.0,'Upper'=4.0,Time=56)
model_args = list(model=model,pnames=p_names,dosing=reg,ICs=ICs)
control=list(runParallel="parallel",nCores = 4,
parallel_libs="mrgsolve")
plausiblePatients <- generatePPs(model_fn = sim_fcn, NP=1e2, paramLims=paramLims,stateLims = stateLims,
method='SA',
model_args = model_args,
scoreThreshold = 0)
hist_data <- plausiblePatients$simulation_results%>%mutate(Source="Plausible")
hist_data %<>% rbind(sims%>%select(ID,time,TUMOR)%>%mutate(Source="Simulation"))
ggplot(hist_data,aes(x=TUMOR))+geom_density(aes(x=TUMOR,y=..scaled..,fill=Source),alpha=0.5)
VPs <- getVPs(plausiblePatients, sims%>%select(ID,time,TUMOR),runs=20,plausible_pdf = 'auto',data_pdf='auto',
alpha_algorithm = 'PSO')
hist_data <- plausiblePatients$simulation_results%>%mutate(Source="Plausible")
hist_data %<>% rbind(sims%>%select(ID,time,TUMOR)%>%mutate(Source="Simfaculation"))
hist_data %<>% rbind((VPs$VPs)%>%mutate(Source="VP"))
unique <- hist_data%>%group_by(Source)%>%count(Source)
hist_data <- merge(hist_data,y = unique, by='Source',all.x=TRUE)
ggplot(hist_data,aes(x=TUMOR, color=Source,fill=Source)) + geom_histogram(alpha=0.4, size = 1.5 )
ggplot(hist_data) + geom_density(aes(x=TUMOR,y=..scaled..,fill=Source),alpha=0.5)
|
30dd2c5b104444ffb76cabe23c5666bbbc45323e
|
0505b9118b71b919d8cae3684ce15f1328ef9c72
|
/man/binQTLScan.Rd
|
e6d04b6bf418aa89f27c2c000f8c407a70b1b374
|
[] |
no_license
|
YaoLab-Bioinfo/binQTL
|
851a1d090f0932c0bd71f38a9a79ca5b72f7ddb2
|
97c7dce106b673d7af880cc6c387f7008c4e7f36
|
refs/heads/master
| 2022-01-11T19:57:21.731539
| 2019-05-18T13:54:57
| 2019-05-18T13:54:57
| 93,064,482
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,630
|
rd
|
binQTLScan.Rd
|
\name{binQTLScan}
\alias{binQTLScan}
\title{QTL mapping with binmap data}
\usage{
binQTLScan(phenotype="", genotype="", population="RIL")
}
\arguments{
\item{phenotype}{A data frame representing the phenotype of a population.}
\item{genotype}{A data frame representing the genotype of a population.}
\item{population}{A character string indicating the type of the population.}
}
\description{
The phenotype data frame should contain two columns. The first column is the ID
of each accession of a population. The second column contains the phenotypic
value of specific trait for each accession.
For RIL (recombinant Inbred lines) or F2 population, each row of the
genotype data frame represents a Bin. The first column is the
ID of each Bin. The second column is the chromosome ID of each Bin. The 3rd
column is the start coordinate of each Bin. The 4th column is the end coordinate
of each Bin. Each of the rest columns gives the genotype of each accession
at different bins. The names of the rest columns can be any characters
specified by the User.
For MAGIC population with 8 parents, each bin should be represented as 8 lines.
The population type can be RIL, F2 or magic.
}
\author{Wen Yao, Shizhong Xu}
\examples{
ril.phe <- read.csv(system.file("examples/ril.phe.csv", package="binQTL"), as.is=TRUE)
dim(ril.phe)
head(ril.phe)
ril.geno <- read.csv(system.file("examples/ril.geno.csv", package="binQTL"), as.is=TRUE)
dim(ril.geno)
ril.geno[1:2, 1:9]
qtl.res <- binQTLScan(phenotype = ril.phe, genotype = ril.geno, population="RIL")
head(qtl.res)
}
|
6539e4d8daf388fc4b63fc8e73c6cb450473fe10
|
9cc6a9e0035cbfa4f05e7e6a7bc986fb28d0b8f8
|
/tests/test.matharray.R
|
172ff92c4c8df5d00f0f403bc07988165ed8c98b
|
[] |
no_license
|
cran/xtable
|
5718ef14118ecf648f45a9cb0235a2121249426f
|
137fbfebc88cfd4aa5e7d2463e8889049b033dce
|
refs/heads/master
| 2021-06-08T07:10:15.108553
| 2019-04-21T11:20:03
| 2019-04-21T11:20:03
| 17,700,942
| 3
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 898
|
r
|
test.matharray.R
|
require(xtable)
V <- matrix(c(1.140380e-03, 3.010497e-05, 7.334879e-05,
3.010497e-05, 3.320683e-04, -5.284854e-05,
7.334879e-05, -5.284854e-05, 3.520928e-04), nrow = 3)
### Simple test of print.xtableMatharray
print.xtableMatharray(xtable(V, display = rep("E", 4)))
class(V) <- c("xtableMatharray")
class(V)
### Test without any additional arguments
mth <- xtableMatharray(V)
str(mth)
print(mth)
### Test with arguments to xtable
mth <- xtableMatharray(V, display = rep("E", 4))
str(mth)
print(mth)
mth <- xtableMatharray(V, digits = 6)
str(mth)
print(mth)
### Test with additional print.xtableMatharray arguments
mth <- xtableMatharray(V, digits = 6)
str(mth)
print(mth, format.args = list(decimal.mark = ","))
print(mth, scalebox = 0.5)
print(mth, comment = TRUE)
print(mth, timestamp = "2000-01-01")
print(mth, comment = TRUE, timestamp = "2000-01-01")
|
0bd0c55d889ce7059868f8918080cc7460a51b6b
|
242e7e98e5cade53b253664f69f77afeaa8895e7
|
/TestFunctions.R
|
285f09833912cc299e6d532c221e53e696695025
|
[] |
no_license
|
MarkBell1310/PGSMs
|
a7446d86353229710d0f7d43f20c7952c2c66ce2
|
01f6ec55f84030a9091082ad41a882a83ba72791
|
refs/heads/master
| 2021-01-24T21:25:23.786728
| 2018-04-16T13:09:58
| 2018-04-16T13:09:58
| 123,270,036
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,539
|
r
|
TestFunctions.R
|
#****************************************************
#
#**** Particle Gibbs for SBMs - Test functions ******
#
#****************************************************
rm(list = ls())
library(Matrix)
library(igraph)
library(matrixStats)
#library(blockmodels)
source("PGSMsFunctions.R")
#****************************************************
# generate SBM
num.nodes <- 20
num.clusters <- 4
sbm <- sample_sbm(n = num.nodes,
pref.matrix = forceSymmetric(matrix(runif(num.clusters^2),
(c(num.clusters, num.clusters)))),
#pref.matrix = matrix(c(0.99, 0.01, 0.01, 0.99), c(num.clusters, num.clusters)),
block.sizes = c(6, 4, 7, 3),
directed = FALSE,
loops = FALSE)
plot(sbm)
adj <- as_adj(sbm)
# define clusters: 20 data points in 4 clusters
all.clusters <- list(c(18, 14, 3, 5), c(12, 16, 20), c(1, 4, 19), c(7, 9, 13, 15),
c(2, 6, 8, 10, 11, 17))
# select anchors at random
s <- SelectAnchors(all.clusters)
# calculate c.bar and s.bar
closure <- CalculateClosureOfAnchors(s, all.clusters)
c.bar <- closure$c.bar
s.bar <- closure$s.bar
# uniform permutation on closure of anchors
sigma <- SamplePermutation(s, s.bar)
# get particle from c.bar
particle <- MapClustersToAllocations(sigma, c.bar)
MapAllocationsToClusters(sigma, particle, s) # should have same elements as c.bar
c.bar
# intermediate targets
alpha <- 1
beta1 <- 0.1
beta2 <- 0.2
t <- 3
n <- length(particle)
log.previous.weight <- log(0.1)
LogIntermediateTarget(sigma, s, particle, all.clusters, c.bar, adj, tau1, tau2,
t, alpha, beta1, beta2)
LogImprovedIntermediateTarget(sigma, s, particle, all.clusters, c.bar, adj, tau1, tau2,
t, n, alpha, beta1, beta2)
# proposal and weights
PossibleAllocations(sigma, s, particle, all.clusters, c.bar, adj,
tau1, tau2, t, n, alpha, beta1, beta2)
Proposal(sigma, s, particle, all.clusters, c.bar, adj, tau1, tau2,
t, n, alpha, beta1, beta2)
LogUnnormalisedWeight(sigma, s, particle, log.previous.weight, all.clusters,
c.bar, adj, tau1, tau2, t, n, alpha, beta1, beta2)
# PGSM
N <- 10
resampling.threshold <- 0.5
ParticleGibbsSplitMerge(all.clusters, adj, s, s.bar, c.bar, N, resampling.threshold,
alpha, beta1, beta2)
SplitMerge(all.clusters, adj, N, resampling.threshold, alpha, beta1, beta2)
|
8befba189cb38a0382e85bb8b3865f7917a7a9fe
|
9f372e4eda2adcdbdfc20883d4a66083fcf42d7c
|
/Scripts/Benchmark_script.R
|
fb9801ebf5bd0b3457efa30259664ec6ddd2474d
|
[] |
no_license
|
mftth/Deko_Projekt
|
bb4f68f6b83fffb9ff29ae8e6086830c3148cc72
|
9b7d13483055f3c2012260b11f96e20d55e451a8
|
refs/heads/master
| 2023-02-22T11:33:10.189233
| 2021-01-19T09:36:48
| 2021-01-19T09:36:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,052
|
r
|
Benchmark_script.R
|
### benchmark runs
# missing_samples = c("105103","130002","PNET08","130003","145702","1344","127403","PNET55")
library(devtools)
load_all("~/artdeco")
source("~/Deko_Projekt/CIBERSORT_package/CIBERSORT.R")
library("stringr")
library("bseqsc")
library("MuSiC")
###
#transcriptome_data = read.table("~/Deko_Projekt/Data/Bench_data/Scarpa.S29.tsv",sep = "\t",header = T,row.names = 1)
#colnames(transcriptome_data) = str_replace(colnames(transcriptome_data) , pattern ="^X","")
#transcriptome_data[1:5,1:5]
#models_ductal = list(c("Alpha_Beta_Gamma_Delta_Baron","Alpha_Beta_Gamma_Delta_Hisc_Baron"))
models_ductal = c(
list(c("Alpha_Beta_Gamma_Delta_Baron","Alpha_Beta_Gamma_Delta_Acinar_Ductal_Baron")),
list(c("Alpha_Beta_Gamma_Delta_Segerstolpe","Alpha_Beta_Gamma_Delta_Acinar_Ductal_Segerstolpe")),
list(c("Alpha_Beta_Gamma_Delta_Lawlor","Alpha_Beta_Gamma_Delta_Acinar_Ductal_Lawlor"))
)
models_hisc = c(
list(c("Alpha_Beta_Gamma_Delta_Baron","Alpha_Beta_Gamma_Delta_Acinar_Ductal_Hisc_Baron")),
list(c("Alpha_Beta_Gamma_Delta_Segerstolpe","Alpha_Beta_Gamma_Delta_Acinar_Ductal_Hisc_Segerstolpe")),
list(c("Alpha_Beta_Gamma_Delta_Lawlor","Alpha_Beta_Gamma_Delta_Acinar_Ductal_Hisc_Lawlor"))
)
nr_models = length(models_ductal)
transcriptome_files = list.files("~/Deko_Projekt/Data/Bench_data/",full.names = T,pattern = "[0-9].tsv")
transcriptome_files = as.character(sapply(transcriptome_files,FUN=rep,3))
visualization_files = str_replace_all(transcriptome_files,pattern ="\\.tsv",".vis.tsv")
#meta_info = read.table("~/MAPTor_NET/Misc/Meta_information.tsv",sep = "\t",header = T,stringsAsFactors = F)
meta_info = read.table("~/Deko_Projekt/Misc/Meta_information.tsv",sep = "\t",header = T,stringsAsFactors = F)
rownames(meta_info) = meta_info$Name
colnames(meta_info) = str_replace(colnames(meta_info),pattern = "\\.","_")
source("~/Deko_Projekt/Scripts/Benchmark.R")
algorithm = "bseqsc" # NMF # music # bseqsc
type = "hisc"
high_threshold = 66
low_threshold = 33
confidence_threshold = 1.1
transcriptome_files
i = 10
fractions <<- matrix( as.character(), ncol = 6)
#for( i in 1:length(transcriptome_files)){
#for( i in 16:18){
dataset_query = tail(as.character(unlist(str_split(transcriptome_files[i],pattern = "/"))),1)
dataset_query = str_replace_all(dataset_query,".tsv","")
if (type == "ductal") {
models = models_ductal#[[1]]
} else if (type == "hisc") {
models = models_hisc#[[1]]
}
models = models[((i-1) %% 3) + 1]
dataset_training = as.character(unlist(models))[2]
path_benchmark_files = paste0(
"~/Deko_Projekt/Results/Cell_fraction_predictions/",
paste0(
c(dataset_query,
dataset_training,
#paste0(models[2], collapse = ".", sep =""),
algorithm,"tsv"
),
collapse = "."
)
)
path_benchmark_files_dec_res = paste0(
"~/Deko_Projekt/Results/Cell_fraction_predictions/",
paste0(
c(dataset_query,
dataset_training,
#paste0(models[2], collapse = ".", sep =""),
algorithm,".dec_res.tsv"
),
collapse = "."
)
)
transcriptome_file = transcriptome_files[i]
visualization_file = visualization_files[i]
print(i)
print(dataset_query)
print(dataset_training)
res = run_benchmark(
dataset_query = dataset_query,
dataset_training = dataset_training,
type = type,
algorithm = algorithm,
transcriptome_file = transcriptome_file,
visualization_file = visualization_file,
path_benchmark_files = path_benchmark_files,
high_threshold = high_threshold,
low_threshold = low_threshold,
confidence_threshold = confidence_threshold,
path_benchmark_files_dec_res = path_benchmark_files_dec_res
)
write.table(res, path_benchmark_files, sep ="\t", quote = F, row.names = F)
#fractions = rbind(fractions,res)
#}
|
7339254052d668cb0167050d2709b3a7ae97e862
|
3ab3abeca359d7690de4bb3d0cd7d3b55db349c3
|
/Impute NA.R
|
37256ff07f5f5b7ec73f58558a720cd5313f558d
|
[] |
no_license
|
ravindrareddytamma/EDA-Techniques
|
7eed4308bd095a0efb7dad41eae7f6c1d3b4560a
|
59faa76132405e9fa8b347651912124f1d783f0f
|
refs/heads/master
| 2020-03-14T08:11:58.542173
| 2018-05-01T17:47:22
| 2018-05-01T17:47:22
| 131,519,641
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 177
|
r
|
Impute NA.R
|
impute.na <- function(colname)
{
if(!is.numeric(colname))
stop("Required Numeric Column as Input!")
suppressWarnings(require(zoo))
return(na.approx(colname))
}
|
0230c0ef1f847c8205c7a8d9fc589e56d5a69f7d
|
7cde2a767abd22950471a6ef9d83b4122c6aa674
|
/scripts/original_simulations/4-plotResults.R
|
98a11a819f1ad126511f10ac04a361e48e49a82a
|
[] |
no_license
|
jfiksel/ReVAMP_Simulations
|
c6c73211106497efcaa39d6f96632fb881dc5ecf
|
6a142be0a53e2828573309efe62b7d118f449f6d
|
refs/heads/master
| 2020-03-28T04:52:57.748212
| 2018-09-17T15:08:55
| 2018-09-17T15:08:55
| 147,742,363
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,402
|
r
|
4-plotResults.R
|
library(ggplot2)
library(dplyr)
results <- readRDS("../../data/simulation_results.rds")
results$csmf.acc.split <- round(results$csmf.acc.split, 2)
### relevel results
results$method <- factor(results$method,
levels = c('tariff_calib', 'tariff_train_and_calib',
'tariff_train', 'tariff_mle', 'tariff_revamp',
'insilico_calib', 'insilico_train_and_calib',
'insilico_train', 'insilico_mle', 'insilico_revamp'))
results$is.revamp <- grepl("revamp", results$method)
csmf.plot <-
results %>%
filter(measure == "csmf") %>%
ggplot(aes(x = method, y = accuracy, color = is.revamp)) +
facet_grid(calib.size ~ csmf.acc.split) +
geom_boxplot() +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
ccc.plot <-
results %>%
filter(measure == "ccc") %>%
ggplot(aes(x = method, y = accuracy, color = is.revamp)) +
facet_grid(calib.size ~ csmf.acc.split) +
geom_boxplot() +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
viz.dir <- "../../visualizations"
if(!dir.exists(viz.dir)){
dir.create(viz.dir)
}
ggsave(filename = file.path(viz.dir, "csmf_results.jpg"),
plot = csmf.plot, width = 14, height = 12)
ggsave(filename = file.path(viz.dir, "ccc_results.jpg"),
plot = ccc.plot, width = 14, height = 12)
|
21547739d9ea051dbe4ff5ccbc95c724d8df0bd2
|
5ded80783b7c77fba142feae93509f0dc062b31b
|
/R/plugin.R
|
08ac2f33f9d2133292fb9275bb25e22e83ad84bd
|
[] |
no_license
|
cran/ROI.plugin.scs
|
daf5f4714784f0ab1c97fe0794cef96bf837746e
|
efc0509c21b38aedefb4049daea9502bbc84c4c1
|
refs/heads/master
| 2023-08-05T04:06:41.381302
| 2023-07-07T11:40:02
| 2023-07-07T11:40:02
| 61,485,404
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,644
|
r
|
plugin.R
|
## ROI plugin: SCS
## based on scs interface
cone_dims <- function(x, ctype) {
wcol <- which(colnames(x) == ctype)
if ( length(wcol) == 0 ) return(NULL)
as.integer(table(x$v[x$j == wcol]))
}
cone_counts <- function(x, ctype) {
wcol <- which(colnames(x) == ctype)
if ( length(wcol) == 0 ) return(0)
sum(x$v[x$j == wcol])
}
## SLAM - VECH
##
## unvech
## ======
unvech <- function(x) {
## length(vech(M)) := m * (m-1) / 2; where n is the dimension of the m x m matrix M
n <- as.integer((- 1 + sqrt(1 + 8 * length(x))) / 2)
k <- scale_which(n)
x[k] <- x[k] / sqrt(2)
idx <- seq_len(n)
i <- unlist(lapply(idx, seq.int, to=n), recursive=FALSE, use.names=FALSE)
j <- unlist(mapply(rep_len, idx, rev(idx), SIMPLIFY=FALSE, USE.NAMES=FALSE))
simple_triplet_matrix(c(i, j[k]), c(j, i[k]), c(x, x[k]))
}
##
## svec
## ====
## @param x a symmetric matrix of type numeric
## @return a numeric vector
## example: Let A be defined as,
## a11 a12 a13
## a21 a22 a23
## a31 a32 a33
## then svec(A) returns
## s2 <- sqrt(2)
## c(a11, s2*a21, s2*a31, a22, s2*a32, a33)
## n x (n+1) / 2 = 3 * 2 = 6
## multiplies a lower triangular matrix with sqrt(2)
svec <- function(x) {
x <- as.matrix(x)
x[lower.tri(x)] <- sqrt(2) * x[lower.tri(x)]
as.numeric(x[lower.tri(x, TRUE)])
}
##
## svec_inv
## ========
##
## is the backward transformation
##
svec_inv <- function(x) {
m <- length(x)
n <- as.integer((- 1 + sqrt(1 + 8 * m)) / 2)
M <- matrix(0, nrow=n, ncol=n)
M[lower.tri(M, TRUE)] <- x
M[lower.tri(M)] <- M[lower.tri(M)] / sqrt(2)
M[upper.tri(M)] <- t(M)[upper.tri(M)]
M
}
##
## vector_to_psd
## =============
##
## is the backward transformation
##
vector_to_psd <- function(x) {
n <- as.integer((- 1 + sqrt(1 + 8 * length(x))) / 2)
M <- matrix(0, nrow=n, ncol=n)
M[lower.tri(M, TRUE)] <- x
M[upper.tri(M)] <- t(M)[upper.tri(M)]
M
}
## scale_which
## ===========
##
## gives the indices which should be scaled in an vectorized n x n matrix
##
## @param n an integer giving the dimension of the n x n matrix
##
scale_which <- function(n) {
fun <- function(x) cbind( ((x - 1) * n) + seq.int(x+1, n), rep.int(x, n - x) )
x <- do.call(rbind, lapply(seq_len(n-1), fun))
vec_correction <- cumsum(seq_len(n) - 1)
x[,1] - vec_correction[x[,2]]
}
## sym_vec_scale_lower_tri
## scales the off diagonal elements of a vectorized lower triangular matrix
## by a given vector
## @param x a numeric vector containing a lower triangular matrix
## @returns the scaled vector
sym_vec_scale_lower_tri <- function(x, scale=sqrt(2)) {
i <- 1L
n <- calc_psd_matrix_dim(length(x))
fun <- function(y) {
if ( i == 1 ) {
## do nothing
} else if ( i > n ) {
i <<- 1L
n <<- n - 1
} else {
y <- y * scale
}
i <<- i + 1L
return(y)
}
sapply(x, fun)
}
to_dense_vector <- function(x, len) {
y <- rep.int(0L, len)
if ( is.null(x$ind) ) return(y)
y[x$ind] <- x$val
return(y)
}
calc_expp_dims <- function(x) {
y <- x$id[x$cone == scs_cones['expp']]
if ( !length(y) )
return(NULL)
length(unique(y))
}
calc_expd_dims <- function(x) {
y <- x$id[x$cone == scs_cones['expd']]
if ( !length(y) )
return(NULL)
length(unique(y))
}
calc_soc_dims <- function(x) {
y <- x$id[x$cone == scs_cones['soc']]
if ( !length(y) )
return(NULL)
as.integer(table(y))
}
calc_psd_matrix_dim <- function(m) as.integer((- 1 + sqrt(1 + 8 * m)) / 2)
calc_psd_dims <- function(x) {
y <- x$id[x$cone == scs_cones['psd']]
if ( !length(y) )
return(NULL)
sapply(table(y), calc_psd_matrix_dim)
}
calc_pow_dims <- function(x) {
powp <- powd <- NULL
ids <- unique(x$id[x$cone == scs_cones['powp']])
if ( length(ids) )
powp <- sapply(as.character(ids), function(id) x$params[[id]]['a'], USE.NAMES = FALSE)
ids <- unique(x$id[x$cone == scs_cones['powd']])
if ( length(ids) )
powd <- sapply(as.character(ids), function(id) -x$params[[id]]['a'], USE.NAMES = FALSE)
unname(c(powp, powd))
}
calc_dims <- function(cones) {
dims <- list()
dims$z <- sum(cones$cone == scs_cones["zero"])
dims$l <- sum(cones$cone == scs_cones["nonneg"])
dims$ep <- calc_expp_dims(cones)
dims$ed <- calc_expd_dims(cones)
dims$q <- calc_soc_dims(cones)
dims$s <- calc_psd_dims(cones)
dims$p <- calc_pow_dims(cones)
dims
}
which_scs_default_lower_bounds <- function(lower_bounds) {
which_inf <- which(is.infinite(lower_bounds$val))
if ( length(which_inf) ) return(lower_bounds$ind[which_inf])
return(NULL)
}
## get the indices of the conic bounds which are not the free cone
get_indizes_nonfree <- function(bo) {
if ( is.null(bo) )
return(integer())
if ( is.null(bo$cones) )
return(integer())
c(unlist(bo$cones$nonneg), unlist(bo$cones$soc), unlist(bo$cones$psd),
unlist(bo$cones$expp), unlist(bo$cones$expd),
unlist(lapply(bo$cones$powp, "[[", "i")),
unlist(lapply(bo$cones$powd, "[[", "i")))
}
scs_cones <- c("zero" = 1L, "nonneg" = 2L, "soc" = 3L, "psd" = 4L,
"expp" = 5L, "expd" = 6L, "powp" = 7L, "powd" = 8L)
solve_OP <- function(x, control = list()) {
constr <- as.C_constraint(constraints(x))
## check if "scs" supports the provided cone types
stopifnot(all(constr$cones$cone %in% scs_cones))
obj <- as.vector(terms(objective(x))[["L"]])
if ( maximum(x) )
obj <- -obj
AL <- AU <- NULL
AL.rhs <- AU.rhs <- double()
## lower bounds
lower_bounds <- to_dense_vector(bounds(x)$lower, length(objective(x)))
not_is_scs_default <- !is.infinite(lower_bounds)
if ( any(not_is_scs_default) ) {
li <- which(not_is_scs_default)
AL <- simple_triplet_matrix(i = seq_along(li), j = li,
v = rep.int(-1, length(li)),
nrow = length(li), ncol = length(obj))
AL.rhs <- -lower_bounds[not_is_scs_default]
}
## upper bounds
ui <- bounds(x)$upper$ind
ub <- bounds(x)$upper$val
if ( length(ui) ) {
AU <- simple_triplet_matrix(i = seq_along(ui), j = ui,
v = rep.int(1, length(ui)),
nrow = length(ui), ncol = length(obj))
AU.rhs <- ub
}
A <- rbind(constr$L, AL, AU)
A.rhs <- c(constr$rhs, AL.rhs, AU.rhs)
cones <- c(constr$cones, K_lin(length(AL.rhs)), K_lin(length(AU.rhs)))
if ( nrow(constr) > 0 ) {
i <- with(cones, order(cone, id))
ordered_cones <- list(cone = cones$cone[i], id = cones$id[i])
A <- A[i,]
A.rhs <- A.rhs[i]
dims <- calc_dims(cones)
} else {
dims <- calc_dims(cones)
}
## The NO_PSD_SCALING mode is only for testing purposes
if ( !is.null(dims$s) & is.null(control$NO_PSD_SCALING) ) {
psd_j <- list()
b <- ordered_cones$cone == scs_cones["psd"]
roi_cones <- split(seq_along(ordered_cones$cone)[b], ordered_cones$id[b])
for ( i in seq_along(roi_cones) ) {
psd_dim <- dims$s[i]
psd_j[[i]] <- roi_cones[[i]][scale_which( psd_dim )]
k <- A$i %in% psd_j[[i]]
A$v[k] <- sqrt(2) * A$v[k]
A.rhs[psd_j[[i]]] <- sqrt(2) * A.rhs[psd_j[[i]]]
}
}
if ( is.null(control$verbose) ) control$verbose <- FALSE
if ( is.null(control$eps_rel) ) control$eps_rel <- 1e-6
solver_call <- list(scs, A = A, b = A.rhs, obj = obj,
cone = dims, control = control)
mode(solver_call) <- "call"
if ( isTRUE(control$dry_run) )
return(solver_call)
out <- eval(solver_call)
out$len_objective <- length(objective(x))
out$len_dual_objective <- nrow(constraints(x))
if ( "s" %in% names(dims) ) {
out$psd <- lapply(roi_cones, function(j) unvech(out$y[j]))
} else {
out$psd <- NULL
}
optimum <- (-1)^x$maximum * tryCatch({as.numeric(out$x %*% obj)}, error=function(e) as.numeric(NA))
ROI_plugin_canonicalize_solution(solution = out$x,
optimum = optimum,
status = out[["info"]][["status_val"]],
solver = "scs",
message = out)
}
ROI_plugin_solution_dual.scs_solution <- function(x) {
x$message$y[seq_len(x$message$len_dual_objective)]
}
ROI_plugin_solution_psd.scs_solution <- function(x) {
x$message$psd
}
|
2a92698842393df12c91f4b347080301e8d3c0b6
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.analytics/man/gluedatabrew_delete_schedule.Rd
|
dfde919193870603f6d301f2cb565d25892343e4
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 521
|
rd
|
gluedatabrew_delete_schedule.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gluedatabrew_operations.R
\name{gluedatabrew_delete_schedule}
\alias{gluedatabrew_delete_schedule}
\title{Deletes the specified DataBrew schedule}
\usage{
gluedatabrew_delete_schedule(Name)
}
\arguments{
\item{Name}{[required] The name of the schedule to be deleted.}
}
\description{
Deletes the specified DataBrew schedule.
See \url{https://www.paws-r-sdk.com/docs/gluedatabrew_delete_schedule/} for full documentation.
}
\keyword{internal}
|
93be39609545533105cb9ab8937b37bf19b5b776
|
bcb827857c741281b6a99616c8244d02a0d0bfc6
|
/R/split2csv.R
|
da056728e3b6f486c34a738a173ef08d9fe40525
|
[] |
no_license
|
ellyck/upgraded-waffle
|
d6cd301b7af6941ec523f0000e994118e476c8b1
|
f1fd6c8f160d6cc182123c50b62181b02c79a8e0
|
refs/heads/master
| 2022-02-22T00:52:38.209064
| 2017-11-21T22:21:58
| 2017-11-21T22:21:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,339
|
r
|
split2csv.R
|
#' Split List to CSV by Factor
#'
#' This function allows you to split a data frame into a list of data frames by a chosen factor and write them out as separate CSV files in the chosen directory.
#' @param x the vector or data frame containing values to be divided into groups.
#' @param f a ‘factor’ in the sense that as.factor(f) defines the grouping, or a list of such factors in which case their interaction is used for the grouping.
#' @param directory the directory that the CSV files will be written out to. Default is current working directory.
#' Credit: agstudy, Tyler Rinker
# https://stackoverflow.com/questions/9713294/split-data-frame-based-on-levels-of-a-factor-into-new-data-frames
# https://stackoverflow.com/questions/17018138/using-lapply-to-apply-a-function-over-list-of-data-frames-and-saving-output-to-f
#' @keywords split data frame df csv list factor
#' @export
#' @examples
#' split2csv(mtcars, 'carb') # writes CSV files to the current working directory using the element name as filenames.
split2csv <- function(x, f, directory = getwd()) {
setwd(directory)
df_list <- split(x, as.factor(x[[f]]))
lapply(1:length(df_list),
function(i) write.csv(df_list[[i]],
file = paste0(names(df_list[i]), ".csv"),
row.names = FALSE))}
|
1d35178bcc030e1cebf8ebf58ce30d38c5b5c815
|
75e7cb1ada7b0edc7b62c02d9698217255738660
|
/Binomial Distribution in R.R
|
b6dedd79d2fd0ff5a3fe336c12d164eabf035423
|
[
"MIT"
] |
permissive
|
JatinRanaV1/Working-on-R
|
4faf478ea95d99755031523dddded95f32f7b84d
|
83f5a228677ba89dfd65295e4e5f5563e2b6331f
|
refs/heads/main
| 2023-01-21T21:53:03.722994
| 2020-12-04T18:30:30
| 2020-12-04T18:30:30
| 308,795,353
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,078
|
r
|
Binomial Distribution in R.R
|
# dbinom(x,size,prob) --> This function gives the probability density distribution at each point where prob = probability of success
# Create a sample of 100 numbers which are incremented by 1.
x1 <- seq(0,100,by = 1)
# Create the binomial distribution.
y <- dbinom(x1,100,0.5)
# Plot the graph for this sample.
plot(x1,y)
# pbinom(x, size, prob) --> This function gives the cumulative probability of an event. It is a single value representing the probability.
# Probability of getting 26 or less heads from a 51 tosses of a coin.
x2 <- pbinom(26,51,0.5)
print(x2)
# qbinom(p, size, prob) --> This function takes the probability value and gives a number whose cumulative value matches the probability value.
# How many heads will have a probability of 0.25 will come out when a coin is tossed 51 times.
x3 <- qbinom(0.25,51,0.5)
print(x3)
# rbinom(n, size, prob) --> This function generates required number of random values of given probability from a given sample.
# Find 10 random values from a sample of 500 with probability of 0.4.
x4 <- rbinom(10,500,.4)
print(x4)
|
c796626ee1790cf36b2b49df1419d715235bf025
|
466673bd6d98827f9705e2643d838d10db3c950a
|
/testing-checkboxinput/app.R
|
6f0fadd1aa63541aca64b98d5c5d356b66caa0f5
|
[] |
no_license
|
jyuu/shiny-apps
|
7a7d59c5e30689473a067eabaddb6724985a1344
|
5ec1ea989f587a7a4f2a616346bca242cd5a2178
|
refs/heads/master
| 2020-07-04T07:04:12.648296
| 2019-08-22T00:48:07
| 2019-08-22T00:48:07
| 202,197,247
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,251
|
r
|
app.R
|
testUI <- function(id) {
ns <- NS(id)
tagList(
tags$div(
shinyWidgets::pickerInput(
ns('tidyselector'),
label = 'Pivot variables selection helper',
choices = c("Starts With", "Ends With", "Select All"),
width = "100%"
)
),
tags$div(
style = "width:100%;",
conditionalPanel(
sprintf("input['%s'] != 'Select All'", ns("tidyselector")),
# textInput("helper", "Enter helper text"),
checkboxInput(ns("dropna"), "Drop NA values?"),
verbatimTextOutput(ns("dropout"))
)
),
actionButton(
inputId = ns("valid_helpers"),
label = "Apply selection rule",
width = "100%",
class = "btn-primary",
disabled = "disabled"
)
)
}
testUIServer <- function(input, output, session) {
observeEvent(input$tidyselector, {
updateActionButton(
session = session,
inputId = "valid_helpers",
label = "Apply selection rule"
)
})
output$dropout <- renderText({
input$dropna
})
}
new_ui <- fluidPage(
testUI('myNamespace')
)
server <- function(input, output, session) {
callModule(testUIServer, "myNamespace")
}
shinyApp(ui = new_ui, server = server)
|
2c6d070b936014a12e35fd597ebfbfd171698715
|
1448728f802319af18b58ee89f34ef1a07a277c6
|
/test.script.r
|
92b390557a090c629acc1a68370f4f426e5c6451
|
[] |
no_license
|
brianscheng/cheng-lab-test
|
83133e5b2619fa5c00839705e22d3a75e82bdd20
|
847401a68f1bbe368ce81262be805abeed4274b1
|
refs/heads/master
| 2022-08-10T06:20:30.629843
| 2020-05-19T19:38:55
| 2020-05-19T19:38:55
| 265,344,005
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 40
|
r
|
test.script.r
|
#this is test script
#some awesome code
|
e1309c9f68590a318cda901620decf55b8f95e09
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/phangorn/examples/cladePar.Rd.R
|
30c21ed95712fd67804b10de9c79313c9f038c7b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 262
|
r
|
cladePar.Rd.R
|
library(phangorn)
### Name: cladePar
### Title: Utility function to plot.phylo
### Aliases: cladePar
### Keywords: plot
### ** Examples
tree <- rtree(10)
plot(tree)
nodelabels()
x <- cladePar(tree, 12)
cladePar(tree, 18, "blue", "blue", x=x, plot=TRUE)
|
bba00922b9a122b421f60e765106974ab2df151c
|
6f646a9dd32f79184b6bf512247c27c280fa39a2
|
/classify_with_words.R
|
cacae2208e2b541e15bd99185e79412d0cc16318
|
[] |
no_license
|
mjockers/no_reasonable_person
|
31fbd459c90d15c451d202ed84b132dc835374cb
|
889abab710cc6e15dad0d1a2aa10a13530d5b6e4
|
refs/heads/master
| 2021-09-14T23:42:29.923719
| 2018-05-22T13:43:52
| 2018-05-22T13:43:52
| 114,029,433
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,439
|
r
|
classify_with_words.R
|
library(caret)
library(dplyr)
library(readr)
library(pamr)
library(tidyr)
library(pROC)
library(ROCR)
# Load the wide form word frequencies data
load("data/wide_form_word_freqs.RData")
# load the metadata
load("data/metadata_word_punc.RData")
# Ignore very short documents: e.g. docs < 1000 wds
word_threshold <- 1000
long_docs <- filter(metadata, NumWords >= word_threshold) %>%
mutate(ID=paste(Author, Text_ID, sep="_")) %>%
select(Author, Text_ID, ID, NumWords)
long_doc_data <- wide_df[which(wide_df$ID %in% long_docs$ID),]
# Merge in the meta data
meta_full <- merge(long_docs, long_doc_data)
# Now winnow the data to high frequency words that are used by all authors.
# Calculate the means for winnowing
the_means <- colMeans(meta_full[, 5:ncol(meta_full)])
# First set a maximum number of possible features to retain
# We tested values from 100 to 1000. Ideally the feature set
# should be small and limited to high frequency "context insensative"
# features. Here we begin with 150 and then winnow further in the
# next step
max_cols <- 150
if(length(the_means) < max_cols){
max_cols <- length(the_means)
}
# We'll need to know the names of the meta columns
metacols <- colnames(meta_full)[1:4]
# we collect the 150 most frequent features into a vector "keepers"
keepers <- names(sort(the_means, decreasing = TRUE)[1:max_cols])
# form a new dataframe with just the metadata and feature columns
temp_data <- meta_full[,c(metacols, keepers)]
# now check that every feature appears at least once in every author
# and also in the unknown doc. Second winnowing step
zero_value_test <- group_by(temp_data, Author) %>%
select(-ID, -Text_ID, -NumWords) %>%
summarise_each(funs(sum))
# reset any 0 values to NA, so we can use a function to find
# any columns containing an "NA" (i.e. zero value)
zero_value_test[zero_value_test == 0] <- NA
# This function will identify columns that have NA values
nacols <- function(df) {
colnames(df)[unlist(lapply(df, function(x) any(is.na(x))))]
}
# Sending zero_value_test to the function returns a set of features
# that were not present in all three authors and also in the
# unknown file. we will remove these
remove <- nacols(zero_value_test)
# remove any features that are not common to all authors.
if(length(remove) > 0){
classing_data_full <- temp_data[, -which(colnames(temp_data) %in% remove)]
} else {
classing_data_full <- temp_data
}
# Balance the classes by undersampling
# Setting seed for repeatability during testing.
set.seed(8675309) # go Jenny!
# figure out which rows are which and then sample from the
# larger classes based on the size of the smaller class
r_ids <- which(classing_data_full$Author == "Rehnquist")
s_ids <- which(classing_data_full$Author == "Scalia")
t_ids <- which(classing_data_full$Author == "Thomas")
u_ids <- which(classing_data_full$Author == "Unknown")
small_class_size <- min(c(length(r_ids), length(s_ids), length(t_ids)))
r_keep <- sample(r_ids, small_class_size)
s_keep <- sample(s_ids, small_class_size)
t_keep <- sample(t_ids, small_class_size)
# a new data frame from the sampled data
classing_data <- classing_data_full[c(r_keep, s_keep, t_ids),]
# compare composition of authors before balancing . . .
table(classing_data_full$Author)
# . . . to composition after Balancing
table(classing_data$Author)
################################################################################
# Classify the data using SVM and 3/4 of data for training
################################################################################
# TRAIN ON 3/4 OF DATA
trainIndex <- createDataPartition(factor(classing_data$Author), p = .75, list = FALSE, times = 1)
training <- classing_data[trainIndex,5:ncol(classing_data)]
testing <- classing_data[-trainIndex,5:ncol(classing_data)]
unknown <- classing_data_full[u_ids, 5:ncol(classing_data_full)]
# 10-fold x-validation with 5 repeats
fitControl <- trainControl(method = "repeatedcv", repeats = 5, classProbs = T)
# Build an SVM model from training data
svmFit <- train(x=training, y = factor(classing_data$Author[trainIndex]), method = "svmRadial", preProcess = c("center","scale"),trControl = fitControl)
# Examine the model
svmFit
# Examine how the training data was classified in x-validation
# This is not the best measure of performance. We'll also look at
# the performace on the held out data (below).
training_data_class_pred <- predict(svmFit, newdata = training, type = "raw")
confusionMatrix(data = training_data_class_pred, reference = factor(classing_data$Author[trainIndex]))
################################################################################
# NOTE:
# Good explanation of the kappa statistic here:
# https://stats.stackexchange.com/questions/82162/cohens-kappa-in-plain-english
################################################################################
# Now make predictions using the unseen data and examine performance again
class_pred <- predict(svmFit, newdata = testing, type = "raw")
class_probs <- predict(svmFit, newdata = testing, type = "prob")
confusionMatrix(data = class_pred, reference = factor(classing_data$Author[-trainIndex]))
################################################################################
# NOTE:
# Some find the multi-class AUC a useful performance metric.
# This function builds multiple ROC curve to compute the multi-class
# AUC as defined by Hand and Till. A multiclass AUC is a mean of AUCs.
# See David J. Hand and Robert J. Till (2001). A Simple Generalisation
# of the Area Under the ROC Curve for Multiple Class Classification Problems.
# Machine Learning 45(2), p. 171–186. DOI: 10.1023/A:1010920819831.
mx <- multiclass.roc(factor(classing_data$Author[-trainIndex]), as.numeric(factor(class_pred)), percent=TRUE)
mx$auc
################################################################################
# now classify the "unknown" Bush V. Gore Document
class_pred <- predict(svmFit, newdata = unknown, type = "raw")
class_probs <- predict(svmFit, newdata = unknown, type = "prob")
# Show final classification result and probabilities
class_probs
################################################################################
# Rerun the same test using all available data for model training
################################################################################
trainIndex <- createDataPartition(factor(classing_data$Author), p = 1, list = FALSE, times = 1)
training <- classing_data[trainIndex,5:ncol(classing_data)]
testing <- classing_data[-trainIndex,5:ncol(classing_data)]
unknown <- classing_data_full[u_ids, 5:ncol(classing_data_full)]
fitControl <- trainControl(method = "repeatedcv", repeats = 5, classProbs = T)
svmFit <- train(x=training, y = factor(classing_data$Author[trainIndex]), method = "svmRadial", preProcess = c("center","scale"),trControl = fitControl)
svmFit # Examine the model
# Examine how the training data was classified in x-validation
training_data_class_pred <- predict(svmFit, newdata = training, type = "raw")
confusionMatrix(data = training_data_class_pred, reference = factor(classing_data$Author[trainIndex]))
# Predict the Bush V. Gore Doc.
class_pred <- predict(svmFit, newdata = unknown, type = "raw")
class_probs <- predict(svmFit, newdata = unknown, type = "prob")
# Show final classification result and probabilities
class_probs
training_data_class_pred <- predict(svmFit, newdata = training, type = "raw")
mx <- multiclass.roc(factor(classing_data$Author[trainIndex]), as.numeric(factor(training_data_class_pred)), percent=TRUE)
mx$auc
# Save the probabilities for each document?:
training_data_class_probs <- predict(svmFit, newdata = training, type = "prob")
################################################################################
# Rerun clasification USING NSC instead of SVM
################################################################################
# TRAIN ON 3/4 OF DATA
set.seed(8675309) # Hi Jenny!
trainIndex <- createDataPartition(factor(classing_data$Author), p = .75, list = FALSE, times = 1)
training <- classing_data[trainIndex,5:ncol(classing_data)]
testing <- classing_data[-trainIndex,5:ncol(classing_data)]
unknown <- classing_data_full[u_ids, 5:ncol(classing_data_full)]
# 10 x 10-fold x-validation
fitControl <- trainControl(method = "repeatedcv", repeats = 5, classProbs = T)
# Build the NSC model
nscFit <- train(x=training, y = factor(classing_data$Author[trainIndex]), method = "pam", preProcess = c("center","scale"),trControl = fitControl)
# Examine the model
nscFit
# Examine how the training data was classified in x-validation
training_data_class_pred <- predict(nscFit, newdata = training, type = "raw")
confusionMatrix(data = training_data_class_pred, reference = factor(classing_data$Author[trainIndex]))
# Now make predictions using the unseen and examine
class_pred <- predict(nscFit, newdata = testing, type = "raw")
class_probs <- predict(nscFit, newdata = testing, type = "prob")
confusionMatrix(data = class_pred, reference = factor(classing_data$Author[-trainIndex]))
# now classify the Bush V. Gore Document
class_pred <- predict(nscFit, newdata = unknown, type = "raw")
class_probs <- predict(nscFit, newdata = unknown, type = "prob")
# Show final classification result and probabilities
class_probs
################################################################################
# Rerun USING out of the box NSC so that we can access the feature weights
################################################################################
# Author column is a factor and needs to be character vector for this algo
classing_data$Author <- as.character(classing_data$Author)
set.seed(8675309) # set see for repeatability during testing.
trainIndex <- createDataPartition(factor(classing_data$Author), p = .75, list = FALSE, times = 1)
training <- classing_data[trainIndex,5:ncol(classing_data)]
testing <- classing_data[-trainIndex,5:ncol(classing_data)]
unknown <- classing_data_full[u_ids, 5:ncol(classing_data_full)]
feature_cols <- 5:ncol(classing_data)
train_data <- classing_data[trainIndex, feature_cols]
test_data <- classing_data[-trainIndex, feature_cols]
train_signal_colors <- classing_data[trainIndex, "Author"]
test_signal_colors <- classing_data[-trainIndex, "Author"]
unknown_data <- classing_data_full[u_ids, feature_cols]
features <- colnames(train_data)
data.train <- list(x=t(train_data), y=train_signal_colors, geneid=features)
data.test <- list(x=t(test_data), y=test_signal_colors, geneid=features)
data.unk <- list(x=t(unknown_data), y="UNK", geneid=features)
prior <- rep(1/length(levels(as.factor(data.train$y))), length(levels(as.factor(data.train$y))))
pamr.train.out <- pamr.train(data.train, prior=prior)
# pamr.cv.out <- pamr.cv(pamr.train.out, data.train)
new.scales <- pamr.adaptthresh(pamr.train.out)
pamr.train.out <- pamr.train(data.train, prior=prior, threshold.scale=new.scales)
pamr.cv.out <- pamr.cv(pamr.train.out, data.train)
thresh.row <- which(pamr.cv.out$error == min(pamr.cv.out$error))[1]
the.thresh <- pamr.cv.out$threshold[thresh.row]
tt <- pamr.confusion(pamr.cv.out, threshold=the.thresh, FALSE)
tt1 <- tt
diag(tt1) <- 0
tt <- cbind(tt, apply(tt1, 1, sum)/apply(tt, 1, sum))
dimnames(tt)[[2]][ncol(tt)] <- "Class Error rate"
tt
# Held out data. . .
pamr.test.pred <- pamr.predict(pamr.train.out, data.test$x, threshold=0, type="class")
theProbs <- as.data.frame(pamr.predict(pamr.train.out, data.test$x, threshold=0, type="posterior"))
signalskey <- c("Rehnquist", "Scalia", "Thomas")
predicted.class <- as.character(signalskey[as.numeric(pamr.test.pred)])
pred.results <- as.data.frame(cbind(Author=classing_data[-trainIndex, "Author"], predicted.class, theProbs))
colnames(pred.results)[3:5] <- c("Rehnquist", "Scalia", "Thomas")
confusionMatrix(data = pred.results$predicted.class, reference = pred.results$Author)
# Unknown Text
pamr.test.pred <- pamr.predict(pamr.train.out, data.unk$x, threshold=0, type="class")
theProbs <- as.data.frame(pamr.predict(pamr.train.out, data.unk$x, threshold=0, type="posterior"))
signalskey <- c("Rehnquist", "Scalia", "Thomas")
predicted.class <- as.character(signalskey[as.numeric(pamr.test.pred)])
pred.results <- as.data.frame(cbind(Author="Unknown", predicted.class, theProbs))
colnames(pred.results)[3:5] <- c("Rehnquist", "Scalia", "Thomas")
pred.results
pamr.listgenes(pamr.train.out, data.train, the.thresh, pamr.cv.out)
|
13598c171b549bd65da4231ee8d875c82156d174
|
dbd1ce9b5cd4dea4f39a6f5a4a717210bc77649e
|
/Mevo/mevo_daily_bikes.R
|
77218ef43bae912601cc8049de68b43bf1ef21b4
|
[] |
no_license
|
hrpunio/Data
|
7b26d18f39d7e8b38bf2e951d85283cdbacfec14
|
8d894237e6a0cbe140c075d09ee1231a56788cd5
|
refs/heads/master
| 2023-07-24T06:33:29.212661
| 2022-01-27T19:38:22
| 2022-01-27T19:38:22
| 2,735,606
| 4
| 4
| null | 2022-02-03T13:55:26
| 2011-11-08T17:41:23
|
HTML
|
UTF-8
|
R
| false
| false
| 3,504
|
r
|
mevo_daily_bikes.R
|
## http://www.statmethods.net/stats/regression.html
## http://www.cookbook-r.com/Statistical_analysis/Regression_and_correlation/
## google:: trust wvs Heston et al.
## http://tdhock.github.io/animint/geoms.html
require(ggplot2)
require(ggpubr)
d <- read.csv("MEVO_DAILY_BIKES.csv", sep = ';', header=T, na.string="NA");
##rains <- read.csv("mevo_rains_daily.csv", sep = ';', header=T, na.string="NA");
##
##d["rains"] <- rains$opad
nzb <- d$bikes - d$zb
d["nzb"] <- nzb
p1 <- ggplot(d, aes(x = as.Date(day))) +
ggtitle("MEVO: rowery jeżdżone (nzb) vs niejeżdżone (zb)") +
geom_point(aes(y = bikes, colour = 'bikes'), size=1) +
geom_point(aes(y = zb, colour = 'zb'), size=1) +
geom_point(aes(y = nzb, colour = 'nzb'), size=1) +
##geom_line(aes(y = rains, colour = 'nzb'), size=1) +
geom_smooth(aes(x = as.Date(day), y=bikes, colour='bikes'), method="loess", size=.5) +
geom_smooth(aes(x = as.Date(day), y=zb, colour='zb'), method="loess", size=.5) +
geom_smooth(aes(x = as.Date(day), y=nzb, colour='nzb'), method="loess", size=1) +
ylab(label="#") +
##theme(legend.title=element_blank()) +
labs(colour = "Rowery: ") +
theme(legend.position="top") +
theme(legend.text=element_text(size=10));
p2 <- ggplot(d, aes(x = as.Date(day))) +
ggtitle("MEVO: dzienny dystans (Gdańsk/Gdynia)") +
geom_point(aes(y = ga, colour = 'ga'), size=1) +
geom_point(aes(y = gd, colour = 'gd'), size=1) +
geom_smooth(aes(x = as.Date(day), y=ga, colour='ga'), method="loess", size=.5) +
geom_smooth(aes(x = as.Date(day), y=gd, colour='gd'), method="loess", size=.5) +
ylab(label="km") +
labs(colour = "Miasta: ") +
theme(legend.position="top") +
theme(legend.text=element_text(size=10));
p3 <- ggplot(d, aes(x = as.Date(day))) +
ggtitle("MEVO: dzienny dystans (Tczew/Rumia/Sopot)") +
geom_point(aes(y = sop, colour = 'sop'), size=1) +
geom_point(aes(y = tczew, colour = 'tczew'), size=1) +
geom_point(aes(y = rumia, colour = 'rumia'), size=1) +
geom_smooth(aes(x = as.Date(day), y=sop, colour='sop'), method="loess", size=.5) +
geom_smooth(aes(x = as.Date(day), y=tczew, colour='tczew'), method="loess", size=.5) +
geom_smooth(aes(x = as.Date(day), y=rumia, colour='rumia'), method="loess", size=.5) +
ylab(label="km") +
labs(colour = "Miasta: ") +
theme(legend.position="top") +
theme(legend.text=element_text(size=10));
p4 <- ggplot(d, aes(x = as.Date(day))) +
ggtitle("MEVO: dzienny dystans łącznie") +
geom_line(aes(y = dist.total, colour = 'dist.total'), size=.5) +
geom_smooth(aes(x = as.Date(day), y=dist.total, colour='dist.total'), method="loess", size=1) +
ylab(label="km") +
labs(colour = "") +
theme(legend.position="top") +
theme(legend.text=element_text(size=10));
p1;p2;p3;p4
ggarrange(p1, p2, p3, p4, ncol = 2, nrow = 2)
ggsave(file="mevo_daily_bikes.pdf", width=12)
# https://stackoverflow.com/questions/16652199/compute-monthly-averages-from-daily-data
d$day <- as.Date(d$day);
d$mm <- months(d$day)
d$yy <- format(d$day, format="%y")
aggregate(nzb ~ mm + yy, d, mean)
aggregate(zb ~ mm + yy, d, mean)
aggregate(bikes ~ mm + yy, d, mean)
d$nzbp <- d$nzb/d$bikes * 100
## udział średni jeżdżonych w całości
aggregate(nzbp ~ mm + yy, d, mean)
## gdańsk gdynia
aggregate(gd ~ mm + yy, d, mean)
aggregate(ga ~ mm + yy, d, mean)
##
mean(d$zstat)
mean(d$sstat)
mean(d$gd0p)
mean(d$ga0p)
mean(d$sop0p)
mean(d$tczew0p)
mean(d$rumia0p)
mean(d$gd1p)
mean(d$ga1p)
mean(d$sop1p)
mean(d$tczew1p)
mean(d$rumia1p)
|
740bb5abbd15f9dc98966fe73f5e77ed917292ec
|
0c5b5d084fbdb1954752c7eed2943f22ffbcde96
|
/central_limit_theorem_homework.R
|
a3507d99e5937ca83ad3516f307756509e4dc32f
|
[] |
no_license
|
miltoss/HWZ_DataAnalysis_2015
|
82c0e9947d3c9493007255a108e00c78baa0af71
|
68d48bedf689ad0d881d1a4adba1ce3bae6730db
|
refs/heads/master
| 2021-01-10T06:34:58.138172
| 2016-01-26T20:08:31
| 2016-01-26T20:08:31
| 43,833,432
| 1
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,681
|
r
|
central_limit_theorem_homework.R
|
# HOMEWORK EXCERCISE
# This is a simulation of the central limit theorem
# (Chapter 6.4 of the book)
# The idea is the following: Iteratively generate a vector data_dist of random numbers
# following a probability density function (for example the uniform distribution).
# There will be n_samples iterations (i.e. generated vectors)
# The vector has length (n_observations). Add each vector to a sum vector
# data_sum (update the sum vector). When finished plot the resulting vector
# as a histogram and check if it is a normal distribution.
# This is the size of one random sample
n_observations = 5000
# This is the number of random samples to draw. Vary this number
# from 1 to 5000 and see what happens. For example, try the following
# value sfor n: 1, 2, 3, 5, 10, 100, 1,000, 10,000
n_samples = 1000
# Take n_samples samples each of sample_size
# Initialise vector with zeros
data_sum = rep(0, n_observations)
# Check some things out
length(data_sum)
head(data_sum)
# Iteratively generate a sample and add it to the sum
for (i in 1:n_samples) {
# Fill in the code here. Generate vector data_dist and
# add it to data_sum
}
# Let's see what came out:
# Split the plot window in two columns
par(mfrow = c(1, 2))
# Plot the resulting distribution
hist(data_sum)
# Test resulting distribution for normality in a graphical way.
# If a straight line is drawn, then a normal distribution (data_sum)
# has been created
qqnorm(data_sum)
# Reset the plot window
par(mfrow = c(1, 1))
# Another way to test normality
# If the p-value printed on the console
# is under 0.05, then we know distribution is not normal
shapiro.test(data_sum)
|
fdf43d9404c062622ba4a732e417c0a08a0abae8
|
b934fa93e660667ec7e5193639a02137f29e746e
|
/ikde.Rcheck/00_pkg_src/ikde/R/data.R
|
e28c9dd1030860b9e34e5ab6b1500659b69a394c
|
[] |
no_license
|
tkmckenzie/ikde-scripts
|
b5fe5ec86de11905a7bfd7c03f3640dea37ea106
|
989c2dbc416cd489788d5a6071282d1c109d8c3e
|
refs/heads/master
| 2020-04-10T19:35:48.600541
| 2019-01-09T17:36:21
| 2019-01-09T17:36:21
| 161,240,905
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,237
|
r
|
data.R
|
#' Randomly generated multivariate linear model data
#'
#' A dataset for estimation of linear models
#'
#' @format A list with two components:
#' \describe{
#' \item{X}{Matrix of independent variables}
#' \item{y}{Vector of dependent variable observations}
#' }
#' @details
#' Generated with the following code:
#' \preformatted{
#' set.seed(100)
#'
#' N <- 100
#' k <- 4
#' sd <- 10
#'
#' X <- cbind(1, matrix(runif(N * (k - 1), -10, 10), ncol = k - 1))
#' beta <- runif(k, -5, 5)
#' y <- X %*% beta + rnorm(N, sd = sd)
#' y <- c(y)
#' }
"lm.generated"
#' Prostatic nodal development data
#'
#' A dataset replicated from Chib (1995) indicating presence of prostatic nodal development among patients prostate cancer
#'
#' @format A data.frame with 53 observations of 7 variables:
#' \describe{
#' \item{Case}{Patient identifier}
#' \item{y}{Binary outcome indicating nodal development}
#' \item{X.1}{Explanatory variable}
#' \item{X.2}{Explanatory variable}
#' \item{X.3}{Binary explanatory variable}
#' \item{X.4}{Binary explanatory variable}
#' \item{X.5}{Binary explanatory variable}
#' }
#' @details
#' These data were replicated from Chib (1995)
#' @references
#' \insertRef{Chib}{ikde}
"prostatic.nodes"
|
d7506f9ea0dc69ca9c8d5b54eda06d593192bff6
|
c3979af4d5e88510fc6bc204d15a89999173c78f
|
/man/ozone.Rd
|
7f063901a43283984d1148c6da1b9d9d1b0bafc8
|
[] |
no_license
|
cran/faraway
|
a82ac66b6e3696ce4b3d3959c61e7136d2ef7aa9
|
fd738166e58ee12f02afe35029b4e5e7ebff58d1
|
refs/heads/master
| 2022-08-31T20:44:43.259095
| 2022-08-23T13:50:02
| 2022-08-23T13:50:02
| 17,695,973
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,042
|
rd
|
ozone.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/faraway-package.R
\docType{data}
\name{ozone}
\alias{ozone}
\title{Ozone in LA in 1976}
\format{
A data frame with 330 observations on the following 10 variables.
\describe{ \item{O3}{Ozone conc., ppm, at Sandbug AFB.}
\item{vh}{a numeric vector} \item{wind}{wind speed}
\item{humidity}{a numeric vector} \item{temp}{temperature}
\item{ibh}{inversion base height} \item{dpg}{Daggett
pressure gradient} \item{ibt}{a numeric vector}
\item{vis}{visibility} \item{doy}{day of the year} }
}
\source{
Breiman, L. and J. H. Friedman (1985). Estimating optimal
transformations for multiple regression and correlation. Journal of the
American Statistical Association 80, 580-598.
}
\description{
A study the relationship between atmospheric ozone concentration and
meteorology in the Los Angeles Basin in 1976. A number of cases with
missing variables have been removed for simplicity.
}
\examples{
data(ozone)
## maybe str(ozone) ; plot(ozone) ...
}
\keyword{datasets}
|
06460d0d1495ade79d48a2c0cac69bd7524c6a36
|
a6d2fd0960626ba07f222e37b4ad9299d7e061e0
|
/man/getVersion.Rd
|
19bc8a4b58aa04558da1f0570ec201414b7dbab0
|
[] |
no_license
|
cran/SpectralGEM
|
b61104c48f355f6035a4fa5da614c0b68060a9a4
|
6d424c701c3936bc22ab8b8a6bbd048719a436b7
|
refs/heads/master
| 2021-01-01T18:18:03.499673
| 2009-07-07T00:00:00
| 2009-07-07T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 272
|
rd
|
getVersion.Rd
|
\name{getVersion}
\alias{getVersion}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ getVersion}
\description{
Internal function.
}
\usage{
getVersion(InputFile)
}
\arguments{
\item{InputFile}{Input file name.}
}
\keyword{file}
|
a6058ce28d69f1de749e8ba30ed630539b8c203a
|
6f2f5a6cae8f8aca53041a45ec9c81b3f4a221b1
|
/MAIN - Additional Data Wrangling.R
|
07484927485531ddb009a1f370d8ead3d5c24463
|
[] |
no_license
|
pierremichelhardy/Master-Thesis
|
ba8246ce62a9e5879ca336ea034cad41eeee2405
|
caf9aef42c3c4b9459824739d07db81b3c29effd
|
refs/heads/main
| 2023-01-01T02:44:25.360325
| 2020-10-28T14:51:03
| 2020-10-28T14:51:03
| 308,048,320
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,550
|
r
|
MAIN - Additional Data Wrangling.R
|
# This code is written by Pierre Michel B. Hardy in partial fulfillment of his master thesis
# This code contains additional data wrangling before running the regression
# Namely, it's about taking out the effects of individual artist effects from the variables.
# Packages #########
library(readxl)
library(dplyr)
library(xlsx)
library(varhandle)
# Data #########
dat <- read_excel("DATA COLLECTED - READY FOR ANALYSIS.xlsx")
# Main Code ########
# turn T2H into numeric. Apparently, it's in character
dat$T2H <- as.numeric(dat$T2H)
# make another copy to reduce risk of having to repeat
dat2 <- dat
# group by artist, take the mean value of variable, then subtract the mean from each
# this first one is my test to make sure the code does what I want it to
dat3 <- dat2 %>%
group_by(arist.id) %>%
transmute(dance.mean = mean(danceability),
danceability2 = danceability - mean(danceability),
dancebility.orig = danceability)
# this code snippet has been tested against excel on a few artists
# the results are consistent with what I want to happen
# this code works!
# okay, time to to do this for all columns
dat4 <- dat2 %>%
group_by(arist.id) %>%
transmute(danceability2 = danceability - mean(danceability),
loudness2 = loudness - mean(loudness),
speechiness2 = speechiness - mean(speechiness),
acousticness2 = acousticness - mean(acousticness),
instrumentalness2 = instrumentalness - mean(instrumentalness),
liveness2 = liveness - mean(liveness),
tempo2 = tempo - mean(tempo),
track.duration_ms2 = track.duration_ms - mean(track.duration_ms),
track.popularity2 = track.popularity - mean(track.popularity),
T2H2 = T2H - mean(T2H),)
# just going to put back the columns I didn't transform but lost from the transmutation
leftover <- select(dat2, c("key","mode","track.id","track.name","track.album.id",
"track.album.name","artist.name","time.signature.dummy"))
# Last job: turning the moods into binary
mood.binary <- select(dat2, c("track.mood"))
mood.binary <- to.dummy(mood.binary$track.mood, "mood")
# volt em in
dat5 <- cbind(as.data.frame(dat4), leftover, mood.binary)
# Export the dataset ########
write.xlsx(dat5, "DATA COLLECTED - READY FOR ANALYSIS - ARTIST EFFECTS.xlsx")
# Next Steps #####
# That's it! Artist effects are taken care off and now we're ready for analysis
|
890b0b315e546ea23a1d78c7b4f8eb02a4934fb1
|
2c7ffda73e9a3f3bdacfb11c615a420217b638e0
|
/run_analysis.R
|
1f964a08da2bb79a33d8c5c1d3279d4495d36d80
|
[] |
no_license
|
stormy-ua/humanactivityrecognition
|
61970659f63a02d08f8764d7fa02a8b93356886d
|
073ba5c840c63b2a057e6897f05fe7fac611d79d
|
refs/heads/master
| 2021-01-01T06:49:46.014759
| 2015-02-25T14:14:46
| 2015-02-25T14:14:46
| 30,977,905
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,848
|
r
|
run_analysis.R
|
library(dplyr)
library(magrittr)
# loads tidy train or test data set
loadDataSet <- function(xFile, yFile, subjectsFiles) {
# load list of all features from features.txt
featureNames <- as.character(read.table("features.txt", stringsAsFactors = F)[, 2])
# we want to load only std and mean features so filter out other
featuresToLoad <- grepl("std", featureNames) | grepl("mean", featureNames)
colClassesToLoad <- rep("NULL", times = length(featureNames))
colClassesToLoad[featuresToLoad] <- "numeric"
# load measurements data
data <- read.table(xFile, colClasses = colClassesToLoad)
# set column names
names(data) <- featureNames[featuresToLoad]
# load subjects
subjects <- read.table(subjectsFiles, colClasses = c("numeric"), col.names = c("SubjectId"))
data <- cbind(subjects, data)
# load activity labels from activity_labels.txt
activityLabels <- read.table("activity_labels.txt", col.names = c("ActivityLabelIndex", "ActivityLabel"))
# load activities
activities <- read.table(yFile, stringsAsFactors = T
, colClasses = c("numeric"), col.names = c("ActivityLabelIndex"))
# append activities to data set
activities <- merge(activities, activityLabels, by = "ActivityLabelIndex")[, 2]
data$Activity <- activities
# return tidt data set
data
}
# load and merge tidy train and tidy test data sets
data <- rbind(loadDataSet("train\\X_train.txt", "train\\y_train.txt", "train\\subject_train.txt"),
loadDataSet("test\\X_test.txt", "test\\y_test.txt", "test\\subject_test.txt"))
#summarize
data <- data %>% group_by(SubjectId, Activity) %>% summarise_each(funs(mean))
head(data)
# save tidy data set to tidy.txt
write.table(data, "tidy.txt", row.names = F, quote = F)
|
0194998c030bce0a2aa76fc03f468002239e9159
|
4ba01df88418e1877fc115a8c90bc1da1b81dc3d
|
/R/predict.R
|
6cb80de81406a6ffdc3f4bc5d72659aba86331ae
|
[] |
no_license
|
kaixinhuaihuai/OncoCast
|
9c9839f68058b67e6fd027ed6bd079e11b5c0e2a
|
beea742940deb7b7d44d0e903ba4388954e51f0a
|
refs/heads/master
| 2023-01-22T16:29:52.166583
| 2020-12-08T23:02:48
| 2020-12-08T23:02:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,357
|
r
|
predict.R
|
#' predIncomingSurv
#'
#' This function let's user predict the genetic risk score of new/incoming patients. This function
#' makes use of the output of the OncoCast function and an inputed data set similar to the one use to generate
#' those results. The user can retrieve the genetic risk score of those new patients and their predicted survival curves.
#'
#' @param OC_object Output of the OncoCast function.
#' @param new.data New data set containing the information of the incoming patients. Should be a dataframe
#' with patients as rows and features as columns.
#' @param surv.print A numeric vector indicating the patients for which the user wishes to print the predicted
#' survival curves.
#' @param riskRefit The refitted cox proportional hazard model with the risk score as predictor.
#'
#' @return data.out : The data frame inputted in the function with an additional column giving the predicted risk
#' score of the incoming patients.
#' @return RiskHist : A histogram of the distribution of the risk scores of patients in the given dataset.
#' @return IncKM : An interactive Kaplan-Meier plot of the selected patients in the surv.print argument.
#' @export
#' @examples library(OncoCast)
#' test <- OncoCast(data=survData,formula = Surv(time,status)~.,
#' method = "LASSO",runs = 30,
#' save = FALSE,nonPenCol = NULL,cores =1)
#' results <- getResults_OC(OC_object=test$LASSO,data=survData,
#' cuts=c(0.2,0.4,0.6,0.8),
#' geneList=NULL,mut.data=TRUE)
#' new.data <- as.data.frame(matrix(rbinom(5*20,1,0.5),nrow=20,ncol = 5))
#' colnames(new.data) <- c("ImpCov1","ImpCov2","ImpCov3","ImpCov4","Cov7")
#' rownames(new.data) <- paste0("Incoming",1:20)
#' Incoming <- predIncomingSurv(test$LASSO,new.data=new.data,
#' surv.print = c(5,10,15),riskRefit = results$RiskRefit)
#' @import
#' magrittr
#' dtplyr
#' ggplot2
#' survminer
#' reshape2
#' scales
#' pheatmap
#' @importFrom plotly plot_ly layout toRGB add_ribbons
#' @importFrom dplyr select filter mutate group_by rename summarise arrange
predIncomingSurv <- function(OC_object,new.data,surv.print= NULL,riskRefit){
OC_object <- Filter(Negate(is.null), OC_object)
# get all information needed from the oncocast object
# 1. risk
final.pred <- sapply(OC_object,"[[","predicted")
ori.risk <- apply(final.pred,1,function(x){
mean(as.numeric(x),na.rm = TRUE)
})
### FOR PENALIZED REG ###
if(OC_object[[1]]$method %in% c("LASSO","RIDGE","ENET")){
# 2. Fits
LassoFits <- t(sapply(OC_object,"[[","fit"))
LassoFits[is.na(LassoFits)] <- 0
################################
features <- colnames(LassoFits)
dums <- apply(new.data,2,function(x){anyNA(as.numeric(as.character(x)))})
if(sum(dums) > 0){
tmp <- new.data %>%
select(which(dums)) %>%
fastDummies::dummy_cols(remove_first_dummy = T) %>%
select(-one_of(names(which(dums))))
new.data <- as.data.frame(cbind(
new.data %>% select(-one_of(names(which(dums)))),
tmp
) %>% mutate_all(as.character) %>%
mutate_all(as.numeric)
)
warning("Character variables were transformed to dummy numeric variables. If you didn't have any character variables make sure all columns in your input data are numeric. The transformed data will be saved as part of the output.")
}
if(!all(is.na(match(colnames(new.data),features)))){
matched.genes <- c(na.omit(match(colnames(new.data),features)))
new.dat <- new.data[,which(!is.na(match(colnames(new.data),features)))]
## ADD ALL MISSING GENES TO BE ALL zero ##
missing <- features[which(is.na(match(features,colnames(new.dat))))]
to.add <- as.data.frame(matrix(0L,nrow=nrow(new.dat),ncol=length(missing)))
colnames(to.add) <- missing
rownames(to.add) <- rownames(new.dat)
new.dat <- as.data.frame(cbind(new.dat,to.add))
new.dat <- new.dat[,match(features,colnames(new.dat))]
#############################################
all.pred <- lapply(1:nrow(LassoFits),function(x){
### Subset to the coefs of that cv ###
coefs <- LassoFits[x,LassoFits[x,] != 0]
new.temp <- select(new.dat,names(coefs))
## substract mean mutation rate of TRAINING SET !!!###
new.x <- new.temp - rep(OC_object[[x]]$means[match(names(coefs),names(OC_object[[x]]$means))], each = nrow(new.temp))
cal.risk.test <- drop(as.matrix(new.x) %*% coefs)
return(cal.risk.test)
})
}
else{
stop("No gene overlapped be sure they are correctly matched.")
}
}
### For GBM ###
if(OC_object[[1]]$method %in% c("GBM","RF","SVM","NN")){
if(OC_object[[1]]$method == "GBM") features <- OC_object[[1]]$GBM$var.names
if(OC_object[[1]]$method == "RF") features <- OC_object[[1]]$RF$forest$independent.variable.names
if(OC_object[[1]]$method == "SVM") features <- names(OC_object[[1]]$Vars)
if(OC_object[[1]]$method == "NN") features <- names(OC_object[[1]]$Vars)
dums <- apply(new.data,2,function(x){anyNA(as.numeric(as.character(x)))})
if(sum(dums) > 0){
tmp <- new.data %>%
select(which(dums)) %>%
fastDummies::dummy_cols(remove_first_dummy = T) %>%
select(-one_of(names(which(dums))))
new.data <- as.data.frame(cbind(
new.data %>% select(-one_of(names(which(dums)))),
tmp
) %>% mutate_all(as.character) %>%
mutate_all(as.numeric)
)
warning("Character variables were transformed to dummy numeric variables. If you didn't have any character variables make sure all columns in your input data are numeric. The transformed data will be saved as part of the output.")
}
if(!all(is.na(match(colnames(new.data),features)))){
matched.genes <- c(na.omit(match(colnames(new.data),features)))
new.dat <- new.data[,which(!is.na(match(colnames(new.data),features)))]
## ADD ALL MISSING GENES TO BE ALL zero ##
missing <- features[which(is.na(match(features,colnames(new.dat))))]
to.add <- as.data.frame(matrix(0L,nrow=nrow(new.dat),ncol=length(missing)))
colnames(to.add) <- missing
rownames(to.add) <- rownames(new.dat)
new.dat <- as.data.frame(cbind(new.dat,to.add))
new.dat <- new.dat[,match(features,colnames(new.dat))]
if(OC_object[[1]]$method == "GBM") {
all.pred <- lapply(OC_object,function(x){
predict(x$GBM,newdata=new.dat,
n.trees = x$bestTreeForPrediction,
type="response")
})}
if(OC_object[[1]]$method == "RF") {
all.pred <- lapply(OC_object,function(x){
predict(x$RF,new.dat)$predictions
})}
if(OC_object[[1]]$method == "SVM") {
all.pred <- lapply(OC_object,function(x){
predict(x$SVM,new.dat)
})}
if(OC_object[[1]]$method == "NN") {
all.pred <- lapply(OC_object,function(x){
predict(x$NN,new.dat)
})}
}
else{
stop("No gene overlapped be sure they are correctly matched.")
}
}
all.pred <- do.call("cbind",all.pred)
Risk <- apply(all.pred,1,mean)
names(Risk) <- rownames(new.dat)
# Risk.all <- as.matrix(coefs) %*% as.matrix(t(new.dat))
# Risk <- apply(Risk.all,2,mean)
#new.data$Risk <- Risk
##########################################
ori.risk.range <- range(ori.risk)
new.data$OncoCastRiskScore <- rescale(Risk, to = c(0, 10), from = ori.risk.range) #WithOriginal
#new.data$rescaledRisk <- rescale(new.data$Risk, to = c(0, 10), from = range(new.data$Risk, na.rm = TRUE, finite = TRUE))
RiskHistogram.new <- ggplot(new.data, aes(x = OncoCastRiskScore, y = ..density..)) +
geom_histogram(show.legend = FALSE, aes(fill=..x..),
breaks=seq(min(new.data$OncoCastRiskScore,na.rm = T), max(new.data$OncoCastRiskScore,na.rm = T))) +#, by=20/nrow(new.data))) +
geom_density(show.legend = FALSE) +
theme_minimal() +
labs(x = "Average risk score", y = "Density") +
scale_fill_gradient(high = "red", low = "green")
#return(list("RiskHistogram.new"=RiskHistogram.new,"out.data"=new.data))
####################################################
## Creat survival curves for patients of interest ##
####################################################
if(is.null(surv.print)) surv.print <- 1:nrow(new.data)
mut <- new.data[surv.print,]
colnames(mut)[ncol(mut)] <- "RiskScore"
allSurvs <- data.frame(nrow= 5)
for(j in 1:nrow(mut)){
survival.probs <- as.data.frame(matrix(nrow=6,ncol=15))
rownames(survival.probs) <- c("Patient","Surv","Lower","Upper","Time","OncoRiskScore")
surv.temp <- survfit(riskRefit, newdata = mut[j,])
for(i in 1:ncol(survival.probs)){
survival.probs[,i] <- try(c(rownames(mut)[j],as.numeric(summary(surv.temp, times = (i*3-3))$surv),
round(summary(surv.temp, times = (i*3-3))$lower,digits=2),
round(summary(surv.temp, times = (i*3-3))$upper,digits=2),
i*3-3,as.numeric(mut$RiskScore[j])),silent=T)
}
allSurvs <- cbind(allSurvs,survival.probs)
}
allSurvs <- allSurvs[,-1]
a <- list(
autotick = FALSE,
dtick = 6,
tickcolor = toRGB("black")
)
t.survival.probs <- as.data.frame(t(allSurvs))
for(k in 2:ncol(t.survival.probs)){
t.survival.probs[,k] <- as.numeric(as.character(t.survival.probs[,k]))
}
y <- list(
title = "Survival Probability"
)
IndSurvKM <- plot_ly(t.survival.probs, x = ~Time, y = ~Surv, name = ~Patient, type = 'scatter',
mode = 'lines+markers',hoverinfo="hovertext",color = ~Patient,
hovertext = ~paste("Genetic Risk Score :",round(OncoRiskScore,digits=3))
) %>% layout(yaxis = y,xaxis = ~a) %>%
layout(xaxis = list(title = paste0("Time (Months)"), showgrid = TRUE),showlegend = FALSE) %>%
add_ribbons(data = t.survival.probs,
ymin = ~Lower,
ymax = ~Upper,
line = list(color = 'rgba(7, 164, 181, 0.05)'),
fillcolor = 'rgba(7, 164, 181, 0.2)',
name = "Confidence Interval") #%>% layout(showlegend = FALSE)
# }
# else{
# IndSurvKM = NULL
# t.survival.probs
# }
return(list("data.out" = new.data,"RiskHist"=RiskHistogram.new,"IncKM" = IndSurvKM,"survivalEst"=t.survival.probs))
}
|
889e9e6fdffb9c5e862e057934f0426afa98b116
|
d11d7fe9df513536af20898f7dd8c36beec2ed06
|
/US_Scrapping.R
|
8cfb5f9e4e0a4ee2ccc23f9eb954facfd78fd6c5
|
[] |
no_license
|
urosgodnov/Trademarks
|
b54a1b3e913825ad95c6563504709e1392d707b8
|
3cd8e9685e54c961b96bd13a591c527355ecaf36
|
refs/heads/master
| 2021-09-06T10:48:14.471968
| 2018-02-05T19:08:24
| 2018-02-05T19:08:24
| 105,199,426
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,526
|
r
|
US_Scrapping.R
|
GetOwner<-function(dataOwner) {
assignements <- dataOwner %>%
html_nodes(xpath = "//div[@class='assignmentsContainer persist-area' and (@data-assign-type='Ownership and Name Change' or @data-assign-type='Others')]/@id")%>%
html_text()
assignementsStatus <- dataOwner %>%
html_nodes(xpath = "//div[@class='assignmentsContainer persist-area' and (@data-assign-type='Ownership and Name Change' or @data-assign-type='Others')]/@data-assign-type")%>%
html_text()
assignementsL<-as.list(gsub(".*-([0-9]+).*", "\\1", assignements))
if (length(assignementsL)>0 && class(assignementsL)=="list") {
for (i in length(assignementsL):1) {
#Command
x<-paste("//div[@id='assignments-",assignementsL[[i]],"']//div[@class='value']",sep="")
name<-paste("//div[@id='assignments-",assignementsL[[i]],"']//div[contains(text(),'Assignee')]/following::div[1]",sep="")
address<-paste("//div[@id='assignments-",assignementsL[[i]],"']//div[contains(text(),'Address')]/following::div[1]",sep="")
Conveyance<-dataOwner %>%
html_nodes(xpath = x) %>% html_text()
Conveyance<-Conveyance[1]
if (grepl("Legal",Conveyance) && assignementsStatus[[i]]=='Others' || assignementsStatus[[i]]=='Ownership and Name Change') {
OwnerName<-dataOwner %>%
html_node(xpath = name) %>% html_text()
OwnerName<-gsub("\r","",OwnerName)
OwnerName<-gsub("\n","",OwnerName)
OwnerName<-gsub("Name:","",OwnerName)
OwnerName<-trimws(OwnerName)
OwnerAddr<-dataOwner %>%
html_node(xpath = address) %>% html_text()
OwnerAddr<-gsub("\r","",OwnerAddr)
OwnerAddr<-sub("\n","",OwnerAddr)
break
}
}
}
else {OwnerName<-NA
OwnerAddr<-NA}
return(data.frame(OwnerName,OwnerAddr, stringsAsFactors = FALSE))
}
USClasses<-function(data) {
tmpDF <- data.frame(matrix(ncol = 18, nrow = 1))
class <-
sapply(as.list(1:9), function(x) {
return(paste("class", x, sep = ""))
})
desc <-
sapply(as.list(1:9), function(x) {
return(paste("description", x, sep = ""))
})
colnames(tmpDF) <- c(class, desc)
classdesc<-data %>% html_nodes(xpath = "//div[text()='For:']/following::div[1]") %>% html_text()
classdesc<-gsub("\r\n","",classdesc)
#removing text between characters
classdesc<-gsub('\\[.*?\\]', '', classdesc)
classdesc<-gsub('\\*', '', classdesc)
classdesc<-gsub(' ,', ',', classdesc)
classStatus<-data %>% html_nodes(xpath = "//div[text()='Class Status:']/following::div[1]") %>% html_text()
classStatus<-gsub("\r\n","",classStatus)
classn<-data %>% html_nodes(xpath = "//div[text()='International Class(es):']/following::div[1]") %>% html_text()
if (length(classn)==1 && grepl(",",classn)) {
classn<-gsub("(^|[^0-9])0+","\\1",unlist(str_split(classn,",",simplify = FALSE)))
classn<-gsub("\r\n", "", classn, perl = TRUE)
classes<-as.data.frame(cbind.fill(classn,classdesc,classStatus), stringsAsFactors = FALSE)
colnames(classes)<-c("classn","classdesc","classStatus")
} else {
classn<-gsub("\\D", "", classn)
classn<-gsub("(^|[^0-9])0+", "\\1", classn, perl = TRUE)
classes<-data.frame(classn,classdesc,classStatus, stringsAsFactors = FALSE)
}
rows<-nrow(classes)
classes<-classes[classes$classStatus=="ACTIVE",]
if (nrow(classes)==0 && rows>0) {
classes<-data.frame(classn,classdesc,classStatus, stringsAsFactors = FALSE)
classes<-head(classes,1)
}
if (length(classes)>0 && nrow(classes)>0) {
for (i in 1:nrow(classes))
{
tmpDF[, i] <- classes[i,1]
tmpDF[, i + 9] <- classes[i,2]
}
return(tmpDF)
} else {return(tmpDF<-as.data.frame(NULL))}
}
USScrap <- function(AppNo) {
#AppNo <-"73802871"
#Making URL and Reading data
current<-Sys.getlocale("LC_TIME")
Sys.setlocale("LC_TIME","English")
AppNo<-gsub(",","",AppNo)
AppNo<-gsub("/","",AppNo)
AppNo<-gsub("-","",AppNo, fixed=TRUE)
try(rm("data"), silent = TRUE)
try(rm("tmpDF"), silent = TRUE)
try(rm("dataOwner"), silent = TRUE)
try(rm("statusURL"), silent = TRUE)
try(rm("imageUrl"), silent = TRUE)
if (!grepl('^[0-9]+$', AppNo)) {
tmpDF = as.data.frame(NULL)
return(tmpDF)
}
url <-
paste(
"http://tsdr.uspto.gov/#caseNumber=",
AppNo,
"&caseType=SERIAL_NO&searchType=statusSearch",
sep = ""
)
statusURL<-paste("http://tsdr.uspto.gov/statusview/sn",AppNo,sep="")
try(data <- statusURL %>% read_html(), silent=TRUE)
if (!(class(data)[1] %in% "xml_document")) {
tmpDF = as.data.frame(NULL)
return(tmpDF)
}
application <-
as.Date(
gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Application Filing Date:']/following::div[1]") %>% html_text()),
"%B. %d, %Y"
)
if (is.na(application) && length(application)>0) {
application <-
as.Date(
gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Application Filing Date:']/following::div[1]") %>% html_text()),
"%B %d, %Y"
)
}
application<-format(application, "%d.%m.%Y")
if (length(application)==0 || is.na(application)) {
application<-format(as.Date("1800-01-01","%Y-%m-%d"),"%d.%m.%Y")
}
registrationNo<-gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='US Registration Number:']/following::div[1]") %>% html_text())
if (length(registrationNo)==0) {
registrationNo<-NA
}
acceptance <-
as.Date(
gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Registration Date:']/following::div[1]") %>% html_text()),
"%B. %d, %Y"
)
if (is.na(acceptance) && length(acceptance)>0) {
acceptance <-
as.Date(
gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Registration Date:']/following::div[1]") %>% html_text()),
"%B %d, %Y"
)
}
acceptance<-format(acceptance, "%d.%m.%Y")
if (length(acceptance)==0 || is.na(acceptance)) {
acceptance<-format(as.Date("1800-01-01","%Y-%m-%d"),"%d.%m.%Y")
}
priority <-
as.Date(
gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Foreign Application Filing Date:']/following::div[1]") %>% html_text()),
"%B. %d, %Y"
)
if (is.na(priority) && length(priority)>0) {
priority <-
as.Date(
gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Foreign Application Filing Date:']/following::div[1]") %>% html_text()),
"%B %d, %Y"
)
}
priority<-format(priority, "%d.%m.%Y")
if (length(priority)==0 || is.na(priority)) {
priority<-format(as.Date("1800-01-01","%Y-%m-%d"),"%d.%m.%Y")
}
NoticeOfAllowanceDate <-
as.Date(
gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Notice of Allowance Date:']/following::div[1]") %>% html_text()),
"%B. %d, %Y"
)
if (is.na(NoticeOfAllowanceDate) && length(NoticeOfAllowanceDate)>0) {
NoticeOfAllowanceDate <-
as.Date(
gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Notice of Allowance Date:']/following::div[1]") %>% html_text()),
"%B %d, %Y"
)
}
NoticeOfAllowanceDate<-format(NoticeOfAllowanceDate, "%d.%m.%Y")
if (length(NoticeOfAllowanceDate)==0 || is.na(NoticeOfAllowanceDate)) {
NoticeOfAllowanceDate<-format(as.Date("1800-01-01","%Y-%m-%d"),"%d.%m.%Y")
}
priorityNo<-gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Foreign Application Number:']/following::div[1]") %>% html_text())
priorityNo<-tail(priorityNo,1)
if (length(priorityNo)==0) {
priorityNo<-NA
}
if (is.na(priorityNo)) {
priorityNo<-gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Foreign Registration Number:']/following::div[1]") %>% html_text())
priorityNo<-tail(priorityNo,1)
if (length(priorityNo)==0) {
priorityNo<-NA
}
priority <-
as.Date(
gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Foreign Registration Date:']/following::div[1]") %>% html_text()),
"%B. %d, %Y"
)
if (is.na(priority) && length(priority)>0) {
priority <-
as.Date(
gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Foreign Registration Date:']/following::div[1]") %>% html_text()),
"%B %d, %Y"
)
}
priority<-format(priority, "%d.%m.%Y")
if (length(priority)==0 || is.na(priority)) {
priority<-format(as.Date("1800-01-01","%Y-%m-%d"),"%d.%m.%Y")
}
}
priorityCountry<-gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Foreign Application/Registration Country:']/following::div[1]") %>% html_text())
priorityCountry<-tail(priorityCountry,1)
if (length(priorityCountry)==0) {
priorityCountry<-NA
}
publication <-
as.Date(
gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Publication Date:']/following::div[1]") %>% html_text()),
"%B. %d, %Y"
)
if (is.na(publication) && length(publication)>0) {
publication <-
as.Date(
gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Publication Date:']/following::div[1]") %>% html_text()),
"%B %d, %Y"
)
}
publication<-format(publication, "%d.%m.%Y")
if (length(publication)==0 || is.na(publication)) {
publication<-format(as.Date("1800-01-01","%Y-%m-%d"),"%d.%m.%Y")
}
#First use date
FirstUseDate<-
as.Date(gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='First Use:']/following::div[1]") %>% html_text()),
"%B. %d, %Y"
)
if (is.na(FirstUseDate) && length(FirstUseDate)>0) {
FirstUseDate<-
as.Date(gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='First Use:']/following::div[1]") %>% html_text()),
"%B %d, %Y"
)
}
FirstUseDate<-format(FirstUseDate, "%d.%m.%Y")
if (length(FirstUseDate)==0 || is.na(FirstUseDate)) {
FirstUseDate<-format(as.Date("1800-01-01","%Y-%m-%d"),"%d.%m.%Y")
}
FirstUseDate<-min(FirstUseDate)
#TM Type
kind<- gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Mark Drawing Type:']/following::div[1]") %>% html_text())
kind<-gsub(".*([0-9]+).*$", "\\1", kind)
kind<-ifelse(kind %in% c("1","2","3","4","5"),kind,"")
kind<-switch(as.numeric(kind),"WORD","DEVICE","WORD-DEVICE","WORD","WORD-DEVICE")
kind<-ifelse(is.null(kind),NA,kind)
AppType<- try(gsub("\r\n","",data %>% html_nodes(xpath = "//span[contains(@data-sectiontitle,'International Registration')]/following::div[1]") %>% html_text()), silent=TRUE)
AppType<-ifelse(length(AppType)>0,"International","National")
agentOnRecord<-gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Attorney Name:']/following::div[1]") %>% html_text())
if (length(agentOnRecord)==0) {
agentOnRecord<-NA
}
agentOnRecordAddr<-gsub("\r","",data %>% html_nodes(xpath = "//div[text()='Correspondent Name/Address:']/following::div[1]") %>% html_text())
agentOnRecordAddr<-gsub(agentOnRecord,"",agentOnRecordAddr)
if (length(agentOnRecordAddr)==0) {
agentOnRecordAddr<-NA
}
agentOnRecord<-paste(agentOnRecord,agentOnRecordAddr,sep="")
associatedTMs<-gsub("\r\n","",data %>%
html_nodes(xpath = "//div[contains(text(),'Claimed Ownership')]/following::div[1]//a")%>% html_text())
associatedTMs<-paste(associatedTMs,collapse = ",")
if (length(associatedTMs)==0) {
associatedTMs<-NA
}
###Dealing with images
imageUrl<-paste("http://tmsearch.uspto.gov/ImageAgent/ImageAgentProxy?getImage=",AppNo,sep="")
if (length(imageUrl) == 1 && !is.na(imageUrl)) {
cat(paste("\n","Downloading image...",sep=""))
imageName<-paste("./logos/", AppNo, ".jpeg", sep ="")
try(download.file(imageUrl,imageName, mode = 'wb',cacheOK=FALSE), silent = TRUE)
size<-file.info(imageName)$size
#delete files with problems
if (size<400)
{
file.remove(imageName)
}
} else {imageUrl<-NA}
#####Classes
tmpDF<-USClasses(data)
if (length(tmpDF)==0) {
tmpDF <- data.frame(matrix(ncol = 18, nrow = 1))
class <-
sapply(as.list(1:9), function(x) {
return(paste("class", x, sep = ""))
})
desc <-
sapply(as.list(1:9), function(x) {
return(paste("description", x, sep = ""))
})
colnames(tmpDF) <- c(class, desc)
}
#Color
color<- gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Color(s) Claimed:']/following::div[1]") %>% html_text())
if (length(color)>0) {
color<-ifelse(grepl('not',color),"Black and white",color)
} else {color<-NA}
#Owner na ta zajeban način
#I have to call this tab
urlA<-paste("http://tsdr.uspto.gov/assignments/",AppNo,"?searchprefix=sn",sep="")
try(dataOwner <- urlA %>% read_html(), silent=TRUE)
if ( !exists("dataOwner")) {
owner<-gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Owner Name:']/following::div[1]") %>% html_text())
if (length(owner)==0) {
owner<-NA
}
ownerAddr<-gsub("\r","",data %>% html_nodes(xpath = "//div[text()='Owner Address:']/following::div[1]") %>% html_text())
ownerAddr<-sub("\n","",ownerAddr)
if (length(ownerAddr)==0) {
ownerAddr<-NA
}
} else
{
owner1<-GetOwner(dataOwner)
if (nrow(na.omit(owner1))==0) {
owner<-gsub("\r\n","",data %>% html_nodes(xpath = "//div[text()='Owner Name:']/following::div[1]") %>% html_text())
if (length(owner)==0) {
owner<-NA
}
ownerAddr<-gsub("\r","",data %>% html_nodes(xpath = "//div[text()='Owner Address:']/following::div[1]") %>% html_text())
ownerAddr<-sub("\n","",ownerAddr)
if (length(ownerAddr)==0) {
ownerAddr<-NA
}
}
else {
ownerAddr<-owner1$OwnerAddr
owner<-trimws(gsub("Name:","",owner1$OwnerName))
}
}
LimDis<-gsub('"','',data %>% html_nodes(xpath = "//div[text()='Disclaimer:']/following::div[1]") %>% html_text())
if (length(LimDis)==0) {
LimDis<-NA
}
statusw<-data %>% html_nodes(xpath = "//div[text()='TM5 Common Status Descriptor:']/../div") %>% html_text()
statusw<-paste(statusw,collapse = ",")
if (grepl("LIVE/REGISTRATION",statusw)) {
status<-"REGISTERED"
} else if (grepl("LIVE/APPLICATION",statusw)) {
status<-"FILED"
}else if (grepl("DEAD/",statusw)) {
status<-"INACTIVE"
} else {status<-NA}
if (acceptance!="01.01.1800" && status!="INACTIVE") {
x <- 0
while (x<100) {
x<-x+10
tmpDate<-as.Date(acceptance,"%d.%m.%Y") %m+% years(x)
if (tmpDate>today()) {
renewal<- tmpDate
break
}
}
tmpDAU<-tmpDate<-as.Date(acceptance,"%d.%m.%Y") %m+% years(6)
if (tmpDAU>today()) {
DAU<-tmpDAU
renewalGP<-as.Date(renewal,"%d.%m.%Y") %m+% months(6)
DAUGP<-as.Date(DAU,"%d.%m.%Y") %m+% months(6)
} else {
DAU<- renewal
renewalGP<-as.Date(renewal,"%d.%m.%Y") %m+% months(6)
DAUGP<-as.Date(DAU,"%d.%m.%Y") %m+% months(6)
}
} else {
renewal <-as.Date("01.01.1800", "%d.%m.%Y")
renewalGP<-as.Date("01.01.1800", "%d.%m.%Y")
DAU <-as.Date("01.01.1800", "%d.%m.%Y")
DAUGP <-as.Date("01.01.1800", "%d.%m.%Y")
}
renewal<-format(renewal, "%d.%m.%Y")
if (length(renewal)==0 || is.na(renewal)) {
renewal<-format(as.Date("1800-01-01","%Y-%m-%d"),"%d.%m.%Y")
}
renewalGP<-format(renewalGP, "%d.%m.%Y")
if (length(renewalGP)==0 || is.na(renewalGP)) {
renewalGP<-format(as.Date("1800-01-01","%Y-%m-%d"),"%d.%m.%Y")
}
DAU<-format(DAU, "%d.%m.%Y")
if (length(DAU)==0 || is.na(DAU)) {
DAU<-format(as.Date("1800-01-01","%Y-%m-%d"),"%d.%m.%Y")
}
DAUGP<-format(DAUGP, "%d.%m.%Y")
if (length(DAUGP)==0 || is.na(DAUGP)) {
DAUGP<-format(as.Date("1800-01-01","%Y-%m-%d"),"%d.%m.%Y")
}
words<-NA
image<-NA
trademark<-data %>% html_nodes(xpath = "//div[@id='summary']//div[text()='Mark:']/following::div[1]") %>% html_text()
trademark<-gsub("\r","",trademark)
trademark<-gsub("\t","",trademark)
trademark<-gsub("\n","",trademark)
trademark<-trimws(trademark)
agComment<-data %>% html_nodes(xpath = "//div[text()='Status:']/following::div[1]") %>% html_text()
agComment<-gsub("\r","",agComment)
agComment<-gsub("\t","",agComment)
agComment<-gsub("\n","",agComment)
agComment<-trimws(agComment)
##AppNumber
AppNumber<-data %>% html_nodes(xpath = "//div[text()='US Serial Number:']/following::div[1]")%>% html_text()
AppNumber<-gsub("\r","",AppNumber)
AppNumber<-gsub("\n","",AppNumber)
AppNumber<-trimws(gsub("\t","",AppNumber))
AppNumber<-gsub(",","",AppNumber)
AppNumber<-gsub("/","",AppNumber)
AppNumber<-gsub("-","",AppNumber, fixed=TRUE)
#return DF
tmpDF <- cbind(
data.frame(
AppNumber,
trademark,
registrationNo,
renewal,
application,
acceptance,
FirstUseDate,
priority,
priorityNo,
priorityCountry,
NoticeOfAllowanceDate,
publication,
agentOnRecord,
associatedTMs,
status,
kind,
AppType,
DAU,
renewalGP,
DAUGP,
color,
words,
image,
imageUrl,
agComment,
LimDis,
owner,
ownerAddr,
stringsAsFactors = FALSE
),
tmpDF
)
tmpDF<-tmpDF%>%dplyr::rename(
`Application no.`=AppNumber,
Trademark=trademark,
`Application date`=application,
`Registration no.`=registrationNo,
`Registration date`=acceptance,
`First use date`=FirstUseDate,
`Next renewal date`=renewal,
`Priority date`=priority,
`Priority no.`=priorityNo,
`Priority country`=priorityCountry,
`Notice of allowance date`=NoticeOfAllowanceDate,
`Publication date`=publication,
`TM Type`=kind,
`Application type`=AppType,
`Next DAU date`=DAU,
`Is color`=color,
`Next renewal date: end of grace period`=renewalGP,
`Next DAU date: end of grace period`=DAUGP,
Status=status,
`Limitations & Disclaimers`=LimDis,
`Agent on record`=agentOnRecord,
Owner=owner,
`Owner address`=ownerAddr,
`Associated TMs`=associatedTMs,
`1st Class`= class1,
`1st Goods & Services` =description1,
`2nd Class` = class2,
`2nd Goods & Services`=description2,
`3rd Class` =class3,
`3rd Goods & Services`=description3,
`4th Class` =class4,
`4th Goods & Services`=description4,
`5th Class` =class5,
`5th Goods & Services`=description5,
`6th Class` =class6,
`6th Goods & Services`=description6,
`7th Class` =class7,
`7th Goods & Services`=description7,
`8th Class` =class8,
`8th Goods & Services`=description8,
`9th Class` =class9,
`9th Goods & Services`=description9,
`Agent's comment`=agComment
)
if (class(tmpDF) != "data.frame")
{
tmpDF = as.data.frame(NULL)
}
rm(list=setdiff(ls(), "tmpDF"))
Sys.setlocale("LC_TIME",current)
return(tmpDF)
}
|
ff15bb2e6dd42473fb8d8ee7a5f0b6912846ec48
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Tentrup/mult-matrix/mult_bool_matrix_dyn_9_6.unsat/mult_bool_matrix_dyn_9_6.unsat.R
|
49f455350b74430ce8545f69534d21a6c6057ff0
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 82
|
r
|
mult_bool_matrix_dyn_9_6.unsat.R
|
ac1500c0e85bb933126d3dac996a3ed5 mult_bool_matrix_dyn_9_6.unsat.qdimacs 5492 16388
|
36d3289b89107fbd190f0e8a00d19e26ab709ce6
|
e5b3c5ceb002c9aa926e5a1caee2cfe6beb43e1b
|
/7-figures_and_tables/t_test_table.r
|
af14d14cc24c67f14ce45999c5cef11a607c9090
|
[] |
no_license
|
brendane/symbiosis_gene_evolution_initial
|
4e4f9edc146fd2f1efe79e49b7d8e63d8b55941b
|
c69a2357c561794c79e2ec1189886b58e9354dad
|
refs/heads/master
| 2022-11-20T23:13:48.105230
| 2020-07-26T23:53:21
| 2020-07-26T23:53:21
| 282,684,232
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,817
|
r
|
t_test_table.r
|
#!/usr/bin/env Rscript
#
# Make table of medians (or means) and t-test p-values for several key
# comparisons. Has a lot of overlap with the gene_count_table.
#
# Also reports some correlations.
#
projdir = '/home/tiffinp/epste051/project/symbiosis_gene_evolution'
indir = file.path(projdir, 'notes/table')
genes_files = list('Classic Signaling'='symbiosis_signaling_genes.edited.2020-05-07.txt',
'Classic Fixation'='symbiosis_fixation_genes.edited.2020-05-07.txt',
'Host Benefit'=c('gwas_candidates/2020-05-11/plant_biomass.A17.top10.txt',
'gwas_candidates/2020-05-11/plant_biomass.R108.top10.txt'),
'Symbiont Fitness'=c('gwas_candidates/2020-05-11/rhizobial_fitness.A17.top10.txt',
'gwas_candidates/2020-05-11/rhizobial_fitness.R108.top10.txt'))
sym_sets = names(genes_files)
genes = structure(vector('list', length=length(genes_files)),
names=names(genes_files))
for(geneset in names(genes)) {
for(gf in genes_files[[geneset]]) {
file.copy(file.path(indir, gf), '.', overwrite=TRUE)
}
genes[[geneset]] = unique(as.character(unlist(sapply(genes_files[[geneset]],
function(fn) {
scan(basename(fn), what='character', sep='\n')
}, USE.NAMES=FALSE))))
}
file.copy(file.path(projdir, 'results/gene_comparison/73strains_alpha_complete/2020-05-11/data.tsv'), '.',
overwrite=TRUE)
gene_data = read.csv('data.tsv', sep='\t', comment.char='#', header=TRUE, as.is=TRUE, check.names=FALSE)
gene_data[, 'ka_ks.n.all'] = ifelse(is.na(gene_data[, 'ka_ks.n.all']), 0, gene_data[, 'ka_ks.n.all'])
genes[['Genome']] = unique(gene_data[, 'gene'])
genes[['Non-Symbiosis']] = genes[['Genome']][!(genes[['Genome']] %in% unlist(genes[sym_sets]))]
output = matrix(ncol=14, nrow=7, data=NaN,
dimnames=list(c('Non-Symbiosis', 'Classic Signaling', 'Classic Fixation', 't_test_sig_fix',
'Host Benefit', 'Symbiont Fitness', 't_test_ben_fit'),
c('genomes', 'copies_per_genome', 'duplication', 'transfer',
'median_ka_ks', 'mean_ka_ks', 'median_ka_ks_3', 'mean_ka_ks_3',
'r2', 'delta', 'delta_pav', 'delta_pav_3', 'loss', 'n_gene')))
output_n = matrix(ncol=2, nrow=7, data=NaN,
dimnames=list(c('Non-Symbiosis', 'Classic Signaling', 'Classic Fixation', 't_test_sig_fix',
'Host Benefit', 'Symbiont Fitness', 't_test_ben_fit'),
c( 'median_ka_ks_3', 'delta_pav_3')))
output_mean = matrix(ncol=14, nrow=5, data=NaN,
dimnames=list(c('Non-Symbiosis', 'Classic Signaling', 'Classic Fixation',
'Host Benefit', 'Symbiont Fitness'),
c('genomes', 'copies_per_genome', 'duplication', 'transfer',
'median_ka_ks', 'mean_ka_ks', 'median_ka_ks_3', 'mean_ka_ks_3',
'r2', 'delta', 'delta_pav', 'delta_pav_3', 'loss', 'n_gene')))
# P-values for test for difference with Non-Symbiosis
output_p = matrix(ncol=15, nrow=10, data=NaN,
dimnames=list(c('Classic Signaling', 'Classic Fixation',
'Host Benefit', 'Symbiont Fitness',
'fix_vs_sig', 'ben_vs_fit',
'sig_vs_ben', 'sig_vs_fit', 'fix_vs_ben', 'fix_vs_fit'),
c('genomes', 'copies_per_genome', 'duplication', 'transfer',
'transfer_minus_dup',
'median_ka_ks', 'mean_ka_ks', 'median_ka_ks_3', 'mean_ka_ks_3',
'r2', 'delta', 'delta_pav', 'delta_pav_3', 'loss', 'n_gene')))
# Correlations with median pairwise Ka/Ks
output_kaks_cor = matrix(ncol=12, nrow=5, data=NaN,
dimnames=list(c('Non-Symbiosis', 'Classic Signaling', 'Classic Fixation',
'Host Benefit', 'Symbiont Fitness'),
c('transfer', 'duplication', 'delta', 'delta_pav', 'delta_pav_3', 'r2',
'transfer_df', 'duplication_df', 'delta_df', 'delta_pav_df', 'delta_pav_3_df', 'r2_df')))
## Median and mean stats for each measurement
for(ss in c('Non-Symbiosis', sym_sets)) {
d = gene_data[gene_data[, 'gene'] %in% genes[[ss]], ]
output[ss, 'genomes'] = median(d[, 'n_strains'])
output[ss, 'copies_per_genome'] = median(d[, 'n_genes'] / d[, 'n_strains'])
output[ss, 'duplication'] = median(d[, 'duplication'], na.rm=TRUE)
output[ss, 'transfer'] = median(d[, 'transfer'], na.rm=TRUE)
output[ss, 'median_ka_ks'] = median(d[, 'ka_ks.median.all'], na.rm=TRUE)
output[ss, 'mean_ka_ks'] = median(d[, 'ka_ks.mean.all'], na.rm=TRUE)
output[ss, 'median_ka_ks_3'] = median(d[d[, 'ka_ks.n.all'] > 2, 'ka_ks.median.all'], na.rm=TRUE)
output[ss, 'mean_ka_ks_3'] = median(d[d[, 'ka_ks.n.all'] > 2, 'ka_ks.mean.all'], na.rm=TRUE)
output[ss, 'r2'] = median(d[, 'r2'], na.rm=TRUE)
output[ss, 'delta'] = median(d[, 'delta'], na.rm=TRUE)
output[ss, 'delta_pav'] = median(d[, 'delta_pav'], na.rm=TRUE)
output[ss, 'delta_pav_3'] = median(d[d[, 'n_genes'] > 2, 'delta_pav'], na.rm=TRUE)
output[ss, 'loss'] = median(d[, 'loss'], na.rm=TRUE)
output[ss, 'n_gene'] = median(d[, 'n_genes'], na.rm=TRUE)
output_n[ss, 'median_ka_ks_3'] = sum(!is.na(d[d[, 'ka_ks.n.all'] > 2, 'ka_ks.median.all']))
output_n[ss, 'delta_pav_3'] = sum(!is.na(d[d[, 'n_genes'] > 2, 'delta_pav']))
}
for(ss in c('Non-Symbiosis', sym_sets)) {
d = gene_data[gene_data[, 'gene'] %in% genes[[ss]], ]
output_mean[ss, 'genomes'] = mean(d[, 'n_strains'])
output_mean[ss, 'copies_per_genome'] = mean(d[, 'n_genes'] / d[, 'n_strains'])
output_mean[ss, 'duplication'] = mean(d[, 'duplication'], na.rm=TRUE)
output_mean[ss, 'transfer'] = mean(d[, 'transfer'], na.rm=TRUE)
output_mean[ss, 'median_ka_ks'] = mean(d[, 'ka_ks.median.all'], na.rm=TRUE)
output_mean[ss, 'mean_ka_ks'] = mean(d[, 'ka_ks.mean.all'], na.rm=TRUE)
output_mean[ss, 'median_ka_ks_3'] = mean(d[d[, 'ka_ks.n.all'] > 2, 'ka_ks.median.all'], na.rm=TRUE)
output_mean[ss, 'mean_ka_ks_3'] = mean(d[d[, 'ka_ks.n.all'] > 2, 'ka_ks.mean.all'], na.rm=TRUE)
output_mean[ss, 'r2'] = mean(d[, 'r2'], na.rm=TRUE)
output_mean[ss, 'delta'] = mean(d[, 'delta'], na.rm=TRUE)
output_mean[ss, 'delta_pav'] = mean(d[, 'delta_pav'], na.rm=TRUE)
output_mean[ss, 'delta_pav_3'] = mean(d[d[, 'n_genes'] > 2, 'delta_pav'], na.rm=TRUE)
output_mean[ss, 'loss'] = mean(d[, 'loss'], na.rm=TRUE)
output_mean[ss, 'n_gene'] = mean(d[, 'n_genes'], na.rm=TRUE)
}
for(ss in rownames(output_kaks_cor)) {
d = gene_data[gene_data[, 'gene'] %in% genes[[ss]], ]
d = d[d[, 'ka_ks.n.all'] > 2, ]
for(st in grep('_df', colnames(output_kaks_cor), value=TRUE, invert=TRUE)) {
stc = st
if(st == 'delta_pav_3') stc = 'delta_pav'
if(sum(!is.na(d[, stc]) & !is.na(d[, 'ka_ks.median.all'])) < 3) {
cat('Skipping', st, 'correlation because not enough complete observations\n')
} else {
ct = cor.test(d[, stc], d[, 'ka_ks.median.all'])
output_kaks_cor[ss, st] = ct[['estimate']]
output_kaks_cor[ss, paste0(st, '_df')] = ct[['parameter']]
}
}
}
## T-test (unequal variances)
tt = function(x1, x2, ...) {
t.test(x1, x2, var.equal=FALSE, paired=FALSE, alternative='two.sided', ...)[['p.value']]
}
dns = gene_data[gene_data[, 'gene'] %in% genes[['Non-Symbiosis']], ]
for(ss in sym_sets) {
d = gene_data[gene_data[, 'gene'] %in% genes[[ss]], ]
output_p[ss, 'genomes'] = tt(d[, 'n_strains'], dns[, 'n_strains'])
output_p[ss, 'copies_per_genome'] = tt(d[, 'n_genes'] / d[, 'n_strains'],
dns[, 'n_genes'] / dns[, 'n_strains'])
output_p[ss, 'duplication'] = tt(d[, 'duplication'], dns[, 'duplication'])
output_p[ss, 'transfer'] = tt(d[, 'transfer'], dns[, 'transfer'])
output_p[ss, 'median_ka_ks'] = tt(d[, 'ka_ks.median.all'], dns[, 'ka_ks.median.all'])
output_p[ss, 'median_ka_ks_3'] = tt(d[d[, 'ka_ks.n.all'] > 2, 'ka_ks.median.all'],
dns[dns[, 'ka_ks.n.all'] > 2, 'ka_ks.median.all'])
output_p[ss, 'r2'] = tt(d[, 'r2'], dns[, 'r2'])
output_p[ss, 'delta'] = tt(d[, 'delta'], dns[, 'delta'])
output_p[ss, 'delta_pav'] = tt(d[, 'delta_pav'], dns[, 'delta_pav'])
output_p[ss, 'delta_pav_3'] = tt(d[d[, 'n_genes'] > 2, 'delta_pav'], dns[dns[, 'n_genes'] > 2, 'delta_pav'])
output_p[ss, 'transfer_minus_dup'] = tt(d[, 'transfer_minus_dup'], dns[, 'transfer_minus_dup'])
output_p[ss, 'loss'] = tt(d[, 'loss'], dns[, 'loss'])
output_p[ss, 'n_gene'] = tt(d[, 'n_genes'], dns[, 'n_genes'])
}
d1 = gene_data[gene_data[, 'gene'] %in% genes[['Classic Fixation']], ]
d2 = gene_data[gene_data[, 'gene'] %in% genes[['Classic Signaling']], ]
output['t_test_sig_fix', 'genomes'] = tt(d1[, 'n_strains'], d2[, 'n_strains'])
output['t_test_sig_fix', 'copies_per_genome'] = tt(d1[, 'n_genes']/d1[, 'n_strains'],
d2[, 'n_genes']/d2[, 'n_strains'])
output['t_test_sig_fix', 'duplication'] = tt(d1[, 'duplication'], d2[, 'duplication'])
output['t_test_sig_fix', 'transfer'] = tt(d1[, 'transfer'], d2[, 'transfer'])
output['t_test_sig_fix', 'median_ka_ks'] = tt(d1[, 'ka_ks.median.all'], d2[, 'ka_ks.median.all'])
output['t_test_sig_fix', 'median_ka_ks_3'] = tt(d1[d1[, 'ka_ks.n.all'] > 2, 'ka_ks.median.all'],
d2[d2[, 'ka_ks.n.all'] > 2, 'ka_ks.median.all'])
output['t_test_sig_fix', 'r2'] = tt(d1[, 'r2'], d2[, 'r2'])
output['t_test_sig_fix', 'delta'] = tt(d1[, 'delta'], d2[, 'delta'])
output['t_test_sig_fix', 'delta_pav'] = tt(d1[, 'delta_pav'], d2[, 'delta_pav'])
output['t_test_sig_fix', 'delta_pav_3'] = tt(d1[d[, 'n_genes'] > 2, 'delta_pav'],
d2[d2[, 'n_genes'] > 2, 'delta_pav'])
for(comp in list(c('Classic Fixation', 'Classic Signaling', 'fix_vs_sig'),
c('Host Benefit', 'Symbiont Fitness', 'ben_vs_fit'),
c('Classic Fixation', 'Host Benefit', 'fix_vs_ben'),
c('Classic Fixation', 'Symbiont Fitness', 'fix_vs_fit'),
c('Classic Signaling', 'Host Benefit', 'sig_vs_ben'),
c('Classic Signaling', 'Symbiont Fitness', 'sig_vs_fit'))) {
ss = comp[3]
d1 = gene_data[gene_data[, 'gene'] %in% genes[[comp[1]]], ]
d2 = gene_data[gene_data[, 'gene'] %in% genes[[comp[2]]], ]
output_p[ss, 'genomes'] = tt(d1[, 'n_strains'], d2[, 'n_strains'])
output_p[ss, 'copies_per_genome'] = tt(d1[, 'n_genes'] / d1[, 'n_strains'],
d2[, 'n_genes'] / d2[, 'n_strains'])
output_p[ss, 'duplication'] = tt(d1[, 'duplication'], d2[, 'duplication'])
output_p[ss, 'transfer'] = tt(d1[, 'transfer'], d2[, 'transfer'])
output_p[ss, 'median_ka_ks'] = tt(d1[, 'ka_ks.median.all'], d2[, 'ka_ks.median.all'])
output_p[ss, 'median_ka_ks_3'] = tt(d1[d1[, 'ka_ks.n.all'] > 2, 'ka_ks.median.all'],
d2[d2[, 'ka_ks.n.all'] > 2, 'ka_ks.median.all'])
output_p[ss, 'r2'] = tt(d1[, 'r2'], d2[, 'r2'])
output_p[ss, 'delta'] = tt(d1[, 'delta'], d2[, 'delta'])
output_p[ss, 'delta_pav'] = tt(d1[, 'delta_pav'], d2[, 'delta_pav'])
output_p[ss, 'delta_pav_3'] = tt(d1[d1[, 'n_genes'] > 2, 'delta_pav'],
d2[d2[, 'n_genes'] > 2, 'delta_pav'])
output_p[ss, 'transfer_minus_dup'] = tt(d1[, 'transfer_minus_dup'], d2[, 'transfer_minus_dup'])
output_p[ss, 'loss'] = tt(d1[, 'loss'], d2[, 'loss'])
output_p[ss, 'n_gene'] = tt(d1[, 'n_genes'], d2[, 'n_genes'])
}
write_readable_table = function(object, fname) {
handle = file(fname, 'w')
for(i in 1:ncol(object)) {
cat('\n\t', file=handle)
write.table(object[, i, drop=FALSE], handle, sep='\t',
col.names=TRUE, row.names=TRUE, quote=FALSE)
}
close(handle)
}
write_readable_table(output, 'medians.tsv')
write_readable_table(output_n, 'n.tsv')
write_readable_table(output_mean, 'means.tsv')
write_readable_table(output_p, 'pvalues_vs_all.tsv')
write_readable_table(output_kaks_cor, 'correlations_with_median_kaks.tsv')
|
0ae66a60500083e432e57111f18023f86bfe3eb9
|
5ee4a55ce230452b5b537feb4528a16c71928970
|
/runShinyApp.R
|
615105e82d72bee9cf063395f7a6e624e6865296
|
[] |
no_license
|
leegang1990/Local-App-Deployment-Shiny-Portable-R
|
7302246a2798cc41de5960e463a041bbada3f163
|
24e11108f855592282c807a70dacf9cbc2154144
|
refs/heads/master
| 2022-05-30T13:56:10.444035
| 2020-05-03T06:14:02
| 2020-05-03T06:14:02
| 260,732,812
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 138
|
r
|
runShinyApp.R
|
message('library paths:\n', paste('... ', .libPaths(), sep='', collapse='\n'))
shiny::runApp('./App/app.R', launch.browser = TRUE)
|
1478d99a172255b332577b81c6afb32b95cb3cd2
|
8f045a1293610a0bb955da7022418beea0d80c6f
|
/ChicagoChlamydiaModel/ChicagoChlamydiaModel/ChicagoChlaModel.R
|
52152220b551ccc46ab7d9a945009823044ffb2f
|
[] |
no_license
|
chrisporras/Chicago-chlamydia-spatialsim
|
11fbca7369c1ec26d237df06d6a0547d49f481a0
|
0256b142178c902750e0fd5874d8711300a73e64
|
refs/heads/master
| 2020-03-23T08:16:30.350473
| 2018-08-07T16:20:39
| 2018-08-07T16:20:39
| 141,317,760
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,740
|
r
|
ChicagoChlaModel.R
|
library(pracma)
library(ggplot2)
library(MASS)
library(compositions)
library(ggmap)
library(rgdal)
library(rgeos)
library(maptools)
library(dplyr)
library(tidyr)
library(tmap)
setwd( "/Users/marlinfiggins/Desktop/ChicagoChlamydiaModel/")
Chicago= readOGR(dsn = "chicomm", layer = "chicomm")
ExtraData = read.csv(file="Chicago Pop Data.csv", header=TRUE, sep=",")
ExtraData=ExtraData[as.numeric(Chicago@data[["DISTNAME"]]),]
DistanceMat=spDists(Chicago, Chicago)
alpha=0.015
##########Switch to Area to Area
GTransProb=function(GridPositions,ExtraData, DistanceMat, alpha){
ProbMat=matrix(NA, nrow=nrow(GridPositions), ncol=nrow(GridPositions))
GP=GridPositions
##from J to K#######
#####Find Distance Classes for each neighborhood, find the probability (summed for each neighborhood in that class) and weigh it by the size of the neighborhood
DistanceClasses= seq(0, max(DistanceMat+0.001), length.out = 20)
for (J in 1:nrow(GridPositions)){
for (i in 2:length(DistanceClasses)){
DistanceClassProb=0
NeighClass=which(DistanceMat[J,]<=DistanceClasses[i] & DistanceMat[J,]>=DistanceClasses[i-1])
for (K in NeighClass){
DistanceClassProb=exp(-sum(((GridPositions[K,]-GridPositions[J,])/alpha)^2))+DistanceClassProb
}
for (K in NeighClass){
ProbMat[J,K]=DistanceClassProb*ExtraData$Land.Area..Acres.[K]/sum(ExtraData$Land.Area..Acres.[NeighClass])
}
}
}
######Normalizing Transition Probs#######
for (J in 1:nrow(GridPositions)){
ProbMat[J,]=ProbMat[J,]/sum(ProbMat[J,])
}
return(ProbMat)
########Want to return matrix of prob from going to J to K
}
P=GTransProb(coordinates(Chicago), ExtraData, DistanceMat, alpha)
|
0fd6a51f035fd62f66e0b89c3137c083475d2d41
|
13750542b2e5406b948059ae7d2bbe9967bb58bc
|
/plotGrid (woodgrain).R
|
1e25fa6e099916594055a188321654c5d7179a3e
|
[] |
no_license
|
jrevenaugh/Riquity
|
ce65cd3e9f0f30b92b8e773e723b1cb34793e226
|
64ce1eed78467f6a6849744819829f9aa469f4c7
|
refs/heads/master
| 2020-03-15T01:57:34.876482
| 2018-05-07T21:07:17
| 2018-05-07T21:07:17
| 131,906,279
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,727
|
r
|
plotGrid (woodgrain).R
|
# plotGrid
#
# Make a simple plot of a Riquity grid
plotGrid <- function(grid) {
centers <- gCenter
centers$gc <- grid
openCenters <- centers %>% filter(gc == FALSE)
filledCenters <- centers %>% filter(gc == TRUE)
g <- ggplot() +
scale_y_continuous(limits = c(-1, 5)) +
scale_x_continuous(limits = c(-1, 5)) +
coord_equal(expand = FALSE) +
# Add woodgrain background
annotation_custom(grob, xmin = -Inf, xmax = Inf, ymin = -Inf, ymax = Inf) +
# Mask off woodgrain outside of board are
geom_polygon(data = mask1, aes(x, y),
fill = "white",
color = "white") +
geom_polygon(data = mask2,
aes(x, y),
fill = "white",
color = "white") +
# Outline board
geom_path(data = triangle,
aes(x, y),
color = "black",
size = 2) +
# Add diagonals
geom_path(data = diagonals, aes(x, y),
color = "steelblue",
size = 3) +
# Add open holes
geom_point(data = openCenters, aes(x, y),
size = 10,
color = "gray20",
fill = "gray20",
pch = 21) +
# Add "pegged" holes
geom_point(data = filledCenters, aes(x, y),
size = 30,
color = "black",
fill = "darkgoldenrod",
pch = 21) +
geom_point(data = filledCenters, aes(x, y),
size = 20,
color = "black",
fill = "goldenrod",
pch = 21) +
theme_void()
return(g)
}
|
fcfd9cdcdd2e29183a49642876d7a7ac2e484d3f
|
8843d262a2721cc9995169b45ac808731b0b6457
|
/Plot5.R
|
c68ea1d49e1fdf3c2ec3606b137de734abebb89f
|
[] |
no_license
|
JackIsrael2020/ExData_Plotting2
|
0a015604d6b6408c465294fddfdfedac5eaf2397
|
ffc7a5f3795ea1b8fade41760803e47a2554f81e
|
refs/heads/master
| 2022-10-24T22:31:56.099745
| 2020-06-19T13:25:28
| 2020-06-19T13:25:28
| 272,690,160
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 784
|
r
|
Plot5.R
|
## This solution uses the "downloader" package from CRAN. If you already have it loaded,
## then please ignore the next section.
##########
install.packages("downloader")
library(downloader)
##########
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download(url, dest="dataset.zip", mode="wb")
unzip("dataset.zip")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
##########
install.packages("dplyr")
library(dplyr)
library(ggplot2)
##########
E <- filter(NEI, type == "ON-ROAD", fips == "24510")
g <- ggplot(E, aes(year, Emissions))
g + geom_point(color = "steelblue", size = 2, alpha = 1/3) +
geom_smooth(method="lm") + xlab("Year") +
ggtitle("Annual Motor Vehicle PM25 Emmisions - Baltimore City")
|
b097f257ac845546c8bca733c7b5824491ebe369
|
71f5a07ab52e014d43cb74274b14662ba5e9279a
|
/8.Practical Machine Learning/ex4.R
|
b11b5b29c1f77a7852c67916f66510ac6e8040b4
|
[] |
no_license
|
xiangacadia/DataSciencCoursera
|
b95c6b82abbac76873a3e681f83f421a856975c0
|
20e064039be87228eb25d21764fdb5f3e0d4e834
|
refs/heads/master
| 2016-09-03T06:30:03.361941
| 2014-12-01T19:39:30
| 2014-12-01T19:39:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 100
|
r
|
ex4.R
|
Q1.
library(ElemStatLearn)
data(vowel.train)
data(vowel.test)
set.seed(33833)
summary(vowel.train)
|
b2414e572e5b1854712bdea74ec6eca2b087ce8d
|
5c065353ded3ccb1798904ce2ec0c8d38cbcd9ac
|
/plot2.R
|
7dfa60536959e0360b0bed078a92e0ee1ea844c6
|
[] |
no_license
|
naveen74/ExData_Plotting1
|
3334542c2ec532b701cc266bd4589a5f546e7fa9
|
50c7fc0f1a8d279454be7d525d37dd05b17a2f9b
|
refs/heads/master
| 2021-01-15T20:53:34.023079
| 2015-02-08T17:28:57
| 2015-02-08T17:28:57
| 30,351,190
| 0
| 0
| null | 2015-02-05T10:59:17
| 2015-02-05T10:59:16
| null |
UTF-8
|
R
| false
| false
| 514
|
r
|
plot2.R
|
df<- read.table("household_power_consumption.txt", header = TRUE, sep=";", as.is=TRUE, na.strings="?")
DATE1 <- as.Date("01/02/2007", "%d/%m/%Y")
DATE2 <- as.Date("02/02/2007", "%d/%m/%Y")
df$Date <- as.Date(df$Date, "%d/%m/%Y")
df1 <- df[df$Date >= DATE1 & df$Date <= DATE2, ]
x <- paste(df1$Date, df1$Time)
x <- strptime(x, "%Y-%m-%d %H:%M:%S")
plot(x, df1$Global_active_power, type = "l", ylab = "Global Active Power(kilowatts", xlab = "")
dev.copy(png, file = "plot2.png", width = 480, height = 480)
dev.off()
|
27e4cfce90e2edb323adc2e41aae3fad4e2fd95a
|
879896ca6b4c167e3cc8f2cfd59c5b7e6a571d2e
|
/R/b_parsing.R
|
28b2ef31e1f4961a5e42aa8b603b3a795d322b6b
|
[
"MIT"
] |
permissive
|
brshallo/flipbookr
|
b2861fe8a9697c8b1bf6c60a007592d746f4c474
|
2013c7e389dd8ce8caf76de9938dcbe6127387a7
|
refs/heads/master
| 2023-07-08T15:25:13.092571
| 2021-08-12T06:17:42
| 2021-08-12T06:17:42
| 395,095,527
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,176
|
r
|
b_parsing.R
|
####### Get code from source chunk #####
chunk_code_get <- function(chunk_name){
paste(knitr::knit_code$get(chunk_name), collapse = "\n")
}
correct_py <- function(lang){
if (lang == "py") {lang <- "python"}
}
code_remove_omit <- function(code, omit = "#OMIT"){
code %>%
stringr::str_split(pattern = "\n") %>%
.[[1]] %>%
.[!stringr::str_detect(., omit)] %>%
paste(collapse = "\n")
}
#
# create_code() %>%
# code_as_table() %>%
# code_as_table_process_break_messages()
#### Code parsing #########
code_as_table <- function(code, omit = "#OMIT"){
code %>%
code_remove_omit(omit = omit) %>%
stringr::str_split(pattern = "\n") %>%
.[[1]] %>%
tibble::tibble(raw_code = .) %>%
dplyr::mutate(line = 1:dplyr::n())
}
code_as_table_process_break_messages <- function(code_as_table){
code_as_table %>%
dplyr::mutate(raw_code = stringr::str_remove(raw_code, "\\s+$")) %>%
dplyr::mutate(non_seq = stringr::str_extract(raw_code, "#BREAK\\-?\\d+")) %>%
dplyr::mutate(non_seq = stringr::str_extract(non_seq, "-?\\d+")) %>%
dplyr::mutate(non_seq = as.numeric(non_seq)) %>%
dplyr::mutate(non_seq = tidyr::replace_na(non_seq, 1)) %>%
dplyr::mutate(user = stringr::str_detect(raw_code, "#BREAK$")) %>%
dplyr::mutate(rotate = stringr::str_detect(raw_code, "#ROTATE$"))
}
# create_code() %>%
# code_as_table() %>%
# code_as_table_process_break_messages()
# create_code() %>%
# code_as_table() %>%
# code_as_table_process_break_messages()
#
# create_code_remove() %>%
# code_simple_parse()
code_simple_parse <- function(code, omit = "#OMIT"){
code %>%
code_as_table(omit = omit) %>%
code_as_table_process_break_messages()
}
#### Real Parsing ####
r_code_base_parse <- function(code, omit = "#OMIT") {
code <- code_remove_omit(code = code, omit = omit)
# code <- stringr::str_remove_all(code, "#BREAK\\d+|#BREAK|#ROTATE|#OMIT")
sf <- srcfile(code)
try(parse(text = code, srcfile = sf))
utils::getParseData(sf)
}
r_base_parsed_count_parentheses <- function(base_parsed){
num_lines <- max(base_parsed$line1)
tibble::tibble(line = 1:num_lines) ->
all_lines
base_parsed %>%
dplyr::rename(line = line1) %>%
dplyr::mutate(open_par = text == "(") %>%
dplyr::mutate(closed_par = text == ")") %>%
dplyr::mutate(open_curly = text == "{") %>%
dplyr::mutate(closed_curly = text == "}") %>%
dplyr::mutate(open_square = text == "[") %>%
dplyr::mutate(open_square = ifelse(text == "[[", 2, open_square)) %>%
dplyr::mutate(closed_square = text == "]") %>%
dplyr::group_by(line) %>%
dplyr::summarise(
full_line = paste0(text, collapse = ""),
comment = stringr::str_trim(paste0(ifelse(token == "COMMENT", text, ""),
collapse = " ")),
num_open_par = sum(open_par),
num_closed_par = sum(closed_par),
num_open_curly = sum(open_curly),
num_closed_curly = sum(closed_curly),
num_open_square = sum(open_square),
num_closed_square = sum(closed_square)
) %>%
dplyr::full_join(all_lines, by = "line") %>%
dplyr::arrange(line) %>%
dplyr::mutate(
full_line = tidyr::replace_na(full_line, ""),
comment = tidyr::replace_na(comment, ""),
num_open_par = tidyr::replace_na(num_open_par, 0),
num_closed_par = tidyr::replace_na(num_closed_par, 0),
num_open_curly = tidyr::replace_na(num_open_curly, 0),
num_closed_curly = tidyr::replace_na(num_closed_curly, 0),
num_open_square = tidyr::replace_na(num_open_square, 0),
num_closed_square = tidyr::replace_na(num_closed_square, 0)
) %>%
dplyr::mutate(balanced_paren = (cumsum(num_open_par) - cumsum(num_closed_par)) == 0) %>%
dplyr::mutate(balanced_curly = (cumsum(num_open_curly) - cumsum(num_closed_curly)) == 0) %>%
dplyr::mutate(balanced_square = (cumsum(num_open_square) - cumsum(num_closed_square)) == 0) %>%
dplyr::mutate(all_parentheses_balanced = balanced_paren & balanced_curly & balanced_square) %>%
dplyr::select(line, full_line, comment, all_parentheses_balanced)
}
# create_code() %>%
# r_code_base_parse() %>%
# r_base_parsed_count_parentheses()
# create_code_remove() %>%
# r_code_full_parse()
#### Full parse R, python, stata ####
r_code_full_parse <- function(code = code, omit = "#OMIT"){
arithmetic <- "\\+$|-$|\\/$|\\*$|\\^$|%%$|%\\/%$"
matrix <- "%\\*%$|%o%$"
ggplot_change_data <- "%\\+%$"
the_magrittr <- "%>%$|%\\$%$"
base_pipe <- "\\|\\>$"
right_assign <- "->$"
combine_booleans <- "\\|$|\\&$"
connectors <- paste(arithmetic, matrix, ggplot_change_data,
the_magrittr, base_pipe,
right_assign, combine_booleans, sep = "|")
raw_code_table <- code_simple_parse(code = code, omit = omit)
parsed_code_table <- code %>%
r_code_base_parse(omit = omit) %>%
r_base_parsed_count_parentheses()
raw_code_table %>%
dplyr::full_join(parsed_code_table, by = "line") %>%
# we need this XXXXXXX so that we don't get a bunch of warnings
dplyr::mutate(comment = tidyr::replace_na(comment, "XXXXXXXXX")) %>%
dplyr::mutate(comment = stringr::str_replace(comment, "^$", "XXXXXXXXX")) %>%
dplyr::mutate(code = stringr::str_remove(raw_code, comment)) %>%
dplyr::mutate(connector = stringr::str_extract(stringr::str_trim(code), connectors)) %>%
dplyr::mutate(connector = tidyr::replace_na(connector, "")) %>%
# delete comments understood as
dplyr::mutate(comment = stringr::str_remove(comment, "#BREAK-?\\d?\\d?")) %>%
dplyr::mutate(comment = stringr::str_remove(comment, "#ROTATE")) %>%
dplyr::mutate(comment = stringr::str_remove(comment, "#[[A-Z]]+")) %>%
dplyr::mutate(comment = stringr::str_remove(comment, "XXXXXXXXX")) %>%
dplyr::mutate(code = stringr::str_remove(stringi::stri_trim_right(code), connectors)) %>%
dplyr::mutate(auto = all_parentheses_balanced & code != "") %>%
dplyr::select(line, raw_code, code, connector, comment, auto, user, non_seq, rotate)
}
# create_python_code() %>%
# python_code_full_parse()
# create_python_code_pipeline() %>%
# python_code_full_parse()
#
# code <- create_python_code_pipeline()
python_code_full_parse <- function(code, omit = "#OMIT"){
connectors <- "\\\\"
code %>%
code_simple_parse(omit = omit) %>%
dplyr::mutate(code = raw_code) %>%
dplyr::mutate(open_par = stringr::str_count(code, "\\{|\\(|\\[")) %>%
dplyr::mutate(closed_par = stringr::str_count(code, "\\}|\\)|\\]")) %>%
dplyr::mutate(auto = cumsum(open_par) == cumsum(closed_par)) %>%
dplyr::mutate(auto = ifelse(raw_code == "", FALSE, auto)) %>%
dplyr::mutate(auto = ifelse(stringr::str_detect(raw_code, ":\\s?$"), FALSE, auto)) %>%
dplyr::mutate(indented = stringr::str_detect(code, "^\\s+")) %>%
# dplyr::mutate(indented_follows = dplyr::lead(indented, default = FALSE)) %>%
# dplyr::mutate(auto = ifelse(indented_follows, FALSE, auto)) %>%
dplyr::mutate(connector = stringr::str_extract(stringr::str_trim(code), connectors)) %>%
dplyr::mutate(connector = tidyr::replace_na(connector, "")) %>%
# dplyr::mutate(connector = stringr::str_replace(connector, "\\\\", "\\")) %>%
dplyr::mutate(code = stringr::str_remove(stringi::stri_trim_right(code), connectors)) %>%
dplyr::mutate(comment = "")
}
stata_code_full_parse <- function(code, omit = "#OMIT"){
code %>%
code_simple_parse(omit = omit) %>%
dplyr::mutate(code = raw_code) %>%
dplyr::mutate(auto = ifelse(raw_code == "", FALSE, TRUE)) %>%
dplyr::mutate(connector = "") %>%
dplyr::mutate(comment = "")
}
#### Combined code parsing all languages ####
code_parse <- function(code = create_code(), lang = "r", omit = "#OMIT") {
if (lang == "r") {
r_code_full_parse(code = code, omit = omit) %>%
dplyr::mutate(func = stringr::str_extract(code, "\\w+\\(")) %>%
dplyr::mutate(func = stringr::str_remove(func, "\\("))
} else if (lang %in% c("python", "py")) {
python_code_full_parse(code = code, omit = omit)
} else if (lang == "stata") {
NULL
}
}
|
2e9afb6ea9608efad33d0f7d25aea482dc6ad6fc
|
49be6f5d1dcacdbb9589398a26167e203ea0f525
|
/R/get_cache_site.R
|
50c2cde1ccabe9e0922c04ac4bb77abebf6189a9
|
[
"MIT"
] |
permissive
|
NIVANorge/aquamonitR
|
09ece04387ba4b0db43b355fc393c579379bbfbd
|
8a27d3ac83eda4e0fcc4387bff543e7380af0967
|
refs/heads/main
| 2023-07-03T08:21:44.108362
| 2021-08-09T14:29:42
| 2021-08-09T14:29:42
| 323,634,747
| 1
| 1
|
NOASSERTION
| 2021-02-05T08:50:03
| 2020-12-22T13:32:49
|
R
|
UTF-8
|
R
| false
| false
| 81
|
r
|
get_cache_site.R
|
.get_cache_site <- function() {
cache_site <- "AquaCache"
cache_site
}
|
a274b906e8293908b229bf6534e82198d26f7569
|
98fd03ebd9de52038f06cd89200a460432f9cc5c
|
/R/pkg_ref_class_extract.R
|
f63ec74133372132036306809a695e88295d72aa
|
[
"MIT"
] |
permissive
|
pharmaR/riskmetric
|
51d3b067da6db6ad1252f3ba706db1d922b5df64
|
3d1501880edc07cff5cd72129c0df0899db83029
|
refs/heads/master
| 2023-07-26T07:33:56.471690
| 2023-05-31T14:58:21
| 2023-05-31T14:58:21
| 173,354,970
| 148
| 32
|
NOASSERTION
| 2023-09-12T20:41:31
| 2019-03-01T19:11:16
|
R
|
UTF-8
|
R
| false
| false
| 4,204
|
r
|
pkg_ref_class_extract.R
|
#' @export
`$.pkg_ref` <- function(x, name) {
`[[`(x, as.character(name))
}
#' @export
`$<-.pkg_ref` <- function(x, name, value) {
`[[<-`(x, as.character(name), value = value)
}
#' Lazily instantiated, immutable metadata access
#'
#' If errors are thrown upon instantiation, they are saved and rethrown any time
#' the value is attempted to be accessed. These then propegate through
#' assessment and scoring functions to affect any downstream metrics.
#'
#' @param x pkg_ref object to extract metadata from
#' @param name name of metadata field to extract
#' @param ... additional arguments used to extract from internal environment
#'
#' @return a pkg_ref object
#' @export
#' @keywords internal
`[[.pkg_ref` <- function(x, name, ...) {
if (!name %in% bare_env(x, names(x))) {
allow_mutation(x, {
pkg_ref_cache(x, name)
ret <- tryCatch(pkg_ref_cache(x, name), error = function(e) e)
x[[name]] <- ret
if (inherits(ret, "error")) stop(ret)
ret
})
} else {
bare_env(x, {
ret <- x[[name, ...]]
if (inherits(ret, "error")) stop(ret)
ret
})
}
}
#' @export
`[[<-.pkg_ref` <- function(x, name, value) {
if (is.null(attr(x, "allowed_mutations")))
stop(pkg_ref_mutability_error(name))
bare_env(x, x[[name]] <- value)
}
#' @export
`[.pkg_ref` <- function(x, names, ...) {
lapply(names, function(n, ...) x[[n, ...]], ...)
}
#' @export
`[<-.pkg_ref` <- function(x, names, value) {
invisible(Map(function(name, value) {
`[[<-`(x, name = name, value = value)
}, names, value))
}
#' evaluate an expression with a pkg_ref object reclassed as a bare environment
#' object, used to sidestep pkg_ref assignment guardrails
#'
#' @param x a \code{pkg_ref} object
#' @param expr an expression to evaluate, avoiding \code{pkg_ref} extraction
#' handlers
#' @param envir an environment in which the expression is to be evaluated
#'
#' @return the result of \code{expr}
#' @keywords internal
bare_env <- function(x, expr, envir = parent.frame()) {
old_class <- class(x)
class(x) <- "environment"
on.exit(class(x) <- old_class)
eval(expr, envir = envir)
}
#' pretty printing for a pkg_ref mutability error caused by trying to do
#' assignment within the pkg_ref without permission
#'
#' @param name name of field for which mutation was attempted
#' @return a \code{simplError} with subclasses \code{pkg_ref_mutability_error},
#' \code{pkg_ref_error}
#' @keywords internal
pkg_ref_mutability_error <- function(name) {
message <- list(paste0(
"Assignment to a pkg_ref environment can only be done in a ",
"pkg_ref_cache call."))
if (!missing(name)) message <- append(message, list(paste0(
"Extend the pkg_ref class by implementing function '",
"pkg_ref_cache.", name, "'")))
e <- simpleError(message = paste(message, collapse = " "))
class(e) <- c("pkg_ref_mutability_error", "pkg_ref_error", class(e))
e
}
#' a wrapper to assert that a pkg_ref has been permitted to do an additional
#' mutation, used to handle recursive initialization of cached fields
#'
#' @param x a \code{pkg_ref} object
#' @param expr an expression to evaluate, and possible do a mutation within
#' @param envir an environment in which the expression is to be evaluated
#'
#' @return the result of \code{expr}
#' @keywords internal
allow_mutation <- function(x, expr, envir = parent.frame()) {
inc_mutations_count(x)
on.exit(dec_mutations_count(x))
expr <- substitute(expr)
eval(expr, envir = envir)
}
#' increment the number of allowed mutations
#'
#' @param x pkg_ref object to increment mutation counter for
#' @return a pkg_ref object
#' @keywords internal
inc_mutations_count <- function(x) {
if (is.null(attr(x, "allowed_mutations"))) attr(x, "allowed_mutations") <- 0
attr(x, "allowed_mutations") <- attr(x, "allowed_mutations") + 1
}
#' decrement the number of allowed mutations
#'
#' @param x pkg_ref object to decrement mutation counter for
#' @return pkg_ref object
#' @keywords internal
dec_mutations_count <- function(x) {
attr(x, "allowed_mutations") <- attr(x, "allowed_mutations") - 1
if (attr(x, "allowed_mutations") <= 0) attr(x, "allowed_mutations") <- NULL
}
|
0c8b238fe0a1b1148a6666231273bde3dd8cf517
|
7c712df431e316689cd9204e98621662a2f4a42d
|
/codes/.ipynb_checkpoints/Tobias_Try-checkpoint.R
|
8ee9f15632860c9c45ea96d66ca4d054b0858cf6
|
[] |
no_license
|
Tobiaspk/Dynamic_Centrality
|
94f3edc479d65d2adb5e423b48f182f3addabc1e
|
a92f84bda94753113cdc3a89cdae5a5b1de3dccb
|
refs/heads/master
| 2023-03-06T08:38:17.275075
| 2021-02-16T18:14:27
| 2021-02-16T18:14:27
| 339,482,370
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,926
|
r
|
Tobias_Try-checkpoint.R
|
devtools::load_all()
library(fasttime)
library(Matrix)
## GLOBAL
data_names <- c("Votes", "Postings", "Following")
# store data as .fst
read_csv_data(user = "tobias",
data_names = data_names,
save_as_fst = TRUE,
return_df = FALSE)
# read .fst
votes <- read_fst_data(user = "tobias", data_name = "Votes")
posts <- read_fst_data(user = "tobias", data_name = "Postings")
# combine data
df <- create_user_vote_user_data(df_votes = votes, df_posts = posts)
## Prepare Data
# map user ids from random ids (199, 30, 5000000,...) to (1, 2, 3, 4...)
# date columns to actual dates
users_uq <- unique(c(df$ID_PostUser, df$ID_VoteUser))
df1 <- df[, .(ID_Posting = map_id(ID_Posting),
ID_PostUser = map_id(ID_PostUser, levels = users_uq),
ID_VoteUser = map_id(ID_VoteUser, levels = users_uq),
ID_Article = map_id(ID_Article),
ArticleChannel = ArticleChannel,
VoteCreated = fastdate(VoteCreatedAt),
PostingCreated = fastdate(PostingCreatedAt),
ArticleCreated = fastdate(ArticlePublishingDate))]
### insights all data
cat_("Dimension = ", collapse_(c("Rows:", "Cols:"), dim(df1)))
for (i in grep_get("ID_", colnames(df1)))
cat_("Unique ", i, ": ", length(unique(df1[[i]])))
head(df1)
### Create Sparse Matrices at each timepoint
adj_mat <- create_adj_mat_time(df = df1,
time_var = "VoteCreated",
x = "ID_VoteUser",
y = "ID_PostUser")
### Insights time Points
y_n <- sapply(adj_mat, sum)
plot(time_points, y_n, main = "Votes Per Day", type = "l")
points(time_points, y_n, pch = 19, cex = 1, col = "white")
points(time_points, y_n, pch = 19, cex = .4)
###
path_temp <- paste0(get_path("tobias"), "adj_mat_inv.rds")
B <- store_alpha(A = adj_mat, a = .3, path = path_temp)
DCM <- dcm_simple(B = B)
|
7b49a388032e4fbe1ceab7f8135f6741d7343311
|
3a9f1487a7afa658cf2a2c4c3cab36955eaec89c
|
/Funny_String.R
|
bc019c0d5d217bab697d490672b02ef0b4016640
|
[] |
no_license
|
yusukeaoki1223/Hackerrank-1
|
0427d7c7b0f56b9a6bf495eccd45b7d8a286d17c
|
42df160ac440556e8c550a020de7af1efd16c3be
|
refs/heads/master
| 2022-08-19T10:07:47.541862
| 2017-08-30T14:44:14
| 2017-08-30T14:44:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,381
|
r
|
Funny_String.R
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
library(stringr)
nums <- suppressWarnings(readLines(file("stdin")))
nums2<-nums[-1]
asc <- function(x) { strtoi(charToRaw(x),16L) }
strReverse <- function(x)
sapply(lapply(strsplit(x, NULL), rev), paste, collapse="")
allSame <- function(x) length(unique(x)) == 5
for(i in 1:length(nums2))
{
nums3<-tolower(str_replace_all(as.matrix(nums2[i]), fixed(" "), ""))
nums3<-str_replace_all(unlist(strsplit(unlist(nums3),"")),"[^[:alnum:]]", "")
nums3<-gsub("[^A-Za-z ]", "", nums3)
nums3<-na.omit(nums3)
nums4<-length(nums3)
rev1<-strReverse(nums2[i])
revs3<-tolower(str_replace_all(as.matrix(rev1), fixed(" "), ""))
revs3<-str_replace_all(unlist(strsplit(unlist(revs3),"")),"[^[:alnum:]]", "")
revs3<-gsub("[^A-Za-z ]", "", revs3)
revs3<-na.omit(revs3)
revs4<-length(revs3)
# write.table(rev1, sep = " ", append=T, row.names = F, col.names = F,quote = FALSE,)
isFunny<-"Funny"
for(j in 2:nums4)
{
nums5<-asc(as.character(nums3[j]))
nums6<-asc(as.character(nums3[j-1]))
revs5<-asc(as.character(revs3[j]))
revs6<-asc(as.character(revs3[j-1]))
# write.table(nums5-nums6, sep = " ", append=T, row.names = F, col.names = F,quote = FALSE,)
# write.table(revs5-revs6, sep = " ", append=T, row.names = F, col.names = F,quote = FALSE,)
if(abs(nums5-nums6)==abs(revs5-revs6))
{
funornot<-"Funny"
#write.table("Funny", sep = " ", append=T, row.names = F, col.names = F,quote = FALSE,)
}
else
{
funornot<-"Not Funny"
isFunny<-"Not Funny"
break;
}
}
# write.table(isFunny, sep = " ", append=T, row.names = F, col.names = F,quote = FALSE,)
write.table(funornot, sep = " ", append=T, row.names = F, col.names = F,quote = FALSE,)
}
|
df4aa48fc55845745ba9d5eee56e750482098195
|
ff53eb4044c2006e7ccf4a5a92e666a7fdebfebc
|
/CCM/CCMBaseExperiment.R
|
722724310da897c67756e7532f62a0da930197aa
|
[] |
no_license
|
AhahaJade/netinf
|
d31b8a96782ca9dc6d1b998929f6074820351448
|
282fc2411d609523d0faa9e32b5564c8c72bc1e2
|
refs/heads/master
| 2022-11-05T04:01:19.701124
| 2020-06-26T17:08:42
| 2020-06-26T17:08:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,231
|
r
|
CCMBaseExperiment.R
|
CCMBaseExperiment <- function(data, mats, E, num_libs, tau=1, num_trials=dim(data)[3], num_samples=100,
preprocfn=identity, freq=1, result_path=NULL, data_obs_idx=NULL) {
# return: votingMats, est, tableResults
save_data <- TRUE
if (is.null(result_path)) {
save_data <- FALSE
}
# Make directory to hold results files if one does not already exist
if (save_data && !dir.exists(result_path)) {
print(sprintf("Result path not found: %s", result_path))
stop()
}
nvars <- dim(mats)[1] # number of variables / oscillators
num_mats <- 1
if (length(dim(mats)) == 3) {
num_mats <- dim(mats)[3] # number of matrices we try
}
if (is.null(data_obs_idx)) {
data_obs_idx <- matrix(TRUE, nrow=num_mats, ncol=nvars)
}
# Tables to hold results
table_results_TPR <- numeric(length=num_mats)
table_results_FPR <- numeric(length=num_mats)
table_results_acc <- numeric(length=num_mats)
# est holds CCM's estimate of the networks
est <- array(NaN, c(nvars, nvars, num_mats))
ccm_rho_graphs <- array(NaN, c(nvars, nvars, num_libs, num_trials, num_mats))
count <- 1 # number of times we have run network inference method (so know how often to save work)
# Loop over the networks
for (i in 1:num_mats) {
obs_idx <- data_obs_idx[i,]
num_obs <- sum(obs_idx)
hasMats = length(dim(mats)) == 3
truth <- mats
if (hasMats) {
truth <- mats[,,i]
}
num_positives <- sum(truth[obs_idx, obs_idx] & !diag(nvars))
num_negatives <- sum(!truth[obs_idx, obs_idx] & !diag(nvars))
hasTrials = length(dim(data)) >= 3
if (is.list(data)) {
preproc_data <- preprocfn(data[[i]][[1]][obs_idx,,])
} else if (!hasTrials && !hasMats) {
preproc_data <- preprocfn(data[obs_idx,])
} else if (hasTrials && !hasMats) {
preproc_data <- preprocfn(data[obs_idx,,])
} else {
preproc_data <- preprocfn(data[obs_idx,,,i])
}
time_length <- dim(preproc_data)[2]
delta <- floor(time_length/(num_libs + 1))
lib_sizes <- delta * 1:num_libs
# Run network inference on this data
graphs <- get_ccm_rho(preproc_data, E, lib_sizes, tau, num_trials, num_samples)
ccm_rho_graphs[,,,, i] <- graphs
# Compute the adjacency matrix predicted by averaging the CCM trials
voted_graphs <- apply(graphs, c(1, 2, 3), function(x) mean(na.omit(x)))
predMat <- apply(voted_graphs, c(1, 2), function(x) mean(na.omit(x))) > 0.5
diag(predMat) <- 0
est[,, i] <- predMat
# Save results
table_results_TPR[i] <- sum((est[,, i] + truth == 2) * !diag(nvars)) / num_positives
table_results_FPR[i] = sum((est[,, i] - truth == 1) * !diag(nvars)) / num_negatives
table_results_acc[i] = sum((est[,, i] == truth) * !diag(nvars)) / (num_obs^2-num_obs)
if (save_data && count %% freq == 0) {
# save necessary files
}
count <- count + 1
}
# TODO: Save whole workspace (including all those tables of results)
if (save_data) {
}
table_results <- list("tpr"=table_results_TPR, "fpr"=table_results_FPR, "acc"=table_results_acc)
result <- list("pred_mats"=est, "graphs"=ccm_rho_graphs, "table_results"=table_results)
return(result)
}
|
cdc42281de4c56af8c3c8d28522e185fe365fe22
|
096192b28074e28f19acaf69a04667c6ae62d397
|
/input/000_BDRLE/sesion000_crearGuia.R
|
7ec26968a51e895c31989dca6963c453e9c77f75
|
[
"MIT"
] |
permissive
|
red-list-ecosystem/RLE-xml-db
|
a022726fc9b0821e71f661d4dbc02288c1442df9
|
e8decc879cd866b5e820fcb87b01eec0b8bef032
|
refs/heads/master
| 2022-08-09T14:08:52.191003
| 2021-01-14T06:33:58
| 2021-01-14T06:33:58
| 240,814,561
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 869
|
r
|
sesion000_crearGuia.R
|
##R --vanilla
setwd("~/tmp/Provita")
require(foreign)
require(gdata)
require(vegan)
require(xtable)
require(rgeos)
require(rgdal)
require(XML)
require(RColorBrewer)
hoy <- format(Sys.time(), "%Y%m%d")
mi.path <- "~/doc/"
mi.path <- "~/Dropbox/Provita/doc/"
mi.path <- "~/Provita_JRFP/doc/"
mi.dir <- "000_BDRLE"
mi.arch <- "Document1_RLE_XMLguide"
titulo <- "Report1_RLE_XMLdevelopment"
xml.db <- "~/Provita_JRFP/xml/"
cdg.doc <- sprintf("Provita.RLE.2017.%s",1) ## colocar código aquí...
options(width=80)
##system(sprintf("rm %s.*",mi.arch))
Sweave(file=paste(mi.path,mi.dir,"/",mi.arch,".Rnw",sep=""),eps=F)
##Stangle(file=paste(mi.path,mi.dir,"/",mi.arch,".Rnw",sep=""))
tools::texi2dvi(paste(mi.arch,".tex",sep=""), pdf=TRUE)
##system(sprintf("evince %s.pdf &",mi.arch))
##system(sprintf("mv %s.pdf %s/%s/%s_%s.pdf",mi.arch,mi.path,mi.dir,hoy,titulo))
|
441e21b1b9a312a41acb20de8724b7d87ae735e4
|
9b29740c13ba8a40228da4ba233973e88aec83bb
|
/R/make_stan_model.R
|
21400571435572f13f36df51cba8d9029a1dddaa
|
[] |
no_license
|
boryspaulewicz/bhsdtr
|
2cb8f33e518184426dbd55dc962b504f649cf733
|
3411f3ef80aaaa3320f66fb514604c808726f34c
|
refs/heads/master
| 2022-04-07T02:38:40.392834
| 2020-02-05T18:37:43
| 2020-02-05T18:37:43
| 103,666,088
| 12
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,360
|
r
|
make_stan_model.R
|
## -*- coding: utf-8 -*-
#' Creates the SDT stan model code.
#'
#' \code{make_stan_model} function creates the stan model code
#' defining the SDT model with additional regression structure and
#' random effects if these are specified using the \code{random}
#' argument. See \code{\link{make_stan_data}} for details on how to
#' specify a random effects structure. The model is described in detail
#' in the paper (TODO reference)
#'
#' @param random an optional list specifying random effects
#' structure. See \code{\link{make_stan_data}} for details.
#' @param gamma_link is either 'softmax' (described in the paper), 'log_distance' or 'log_ratio'
#' (See the Readme file in the github repository)
#' @param metad if TRUE meta-d' model code (only with the softmax gamma link function) is created,
#' default is FALSE.
#' @return a string containing the full model definition in the stan
#' modelling language.
#' @examples
#' data(gabor)
#' model = make_stan_model(list(list(group = ~ id, delta = ~ -1 + duration, gamma = ~ 1)))
#' cat(model)
#' @export
make_stan_model = function(random = NULL, gamma_link = 'softmax', metad = FALSE){
if(!(gamma_link %in% c('softmax', 'log_ratio', 'log_distance')))
stop("The gamma_link function must be one of the following: 'softmax', 'log_ratio', 'log_distance'")
model = ''
if(!metad){
f = file(paste(path.package('bhsdtr'), '/stan_templates/sdt_template.stan', sep = ''))
}else{
f = file(paste(path.package('bhsdtr'), '/stan_templates/metad_template.stan', sep = ''))
}
## We go line by line
for(part in readLines(f)){
## If this is part of the random effects' specification ...
if(rmatch('//(common|delta|gamma)', part)){
## ... and there are some random effects in the model ...
if(length(random) > 0)
for(l in 1:length(random)){
## ... then add the line read from the template with % replaced by the grouping factor number ...
if(rmatch('//common', part))
model[length(model)+1] = gsub('%', l, part)
## ... and do the same with parts of delta/gamma
## random effects' specification if delta/gamma is
## associated with random effects of the given
## grouping factor ...
for(par in c('delta', 'gamma'))
if(!is.null(random[[l]][[par]]) & rmatch(sprintf('//%s', par), part))
model[length(model)+1] = gsub('%', l, part)
}
}else if(rmatch('//link-gamma', part)){
## Replace the gamma link function specification with the
## chosen link function
model = c(model,
readLines((f2 = file(paste(path.package('bhsdtr'), sprintf('/stan_templates/link_%s.stan', gamma_link), sep = '')))))
close(f2)
}else{
## This line is not about the random effects' specification
model[length(model)+1] = part
}
}
close(f)
paste(model, collapse = '\n')
}
rmatch = function (pattern, vector){
res = TRUE
for (i in 1:length(vector)) {
if (length(grep(pattern, vector[i])) > 0) {
res[i] = TRUE
}
else {
res[i] = FALSE
}
}
res
}
|
2027301a38a199b3e000a6ccfa871ba1eaca94f0
|
3f7efd40023f39eb3cfde82413596f602211d0f1
|
/R/funcs.R
|
e07458237f26deaf921b583390c23abb6126d4ff
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
tbep-tech/piney-point
|
72881b5392e88f2a806774d76b696e10883b87c1
|
d26d62958d5e2d141ce322ba8251b1f87e39d15d
|
refs/heads/main
| 2023-08-31T06:39:31.994455
| 2023-08-31T01:34:01
| 2023-08-31T01:34:01
| 353,409,644
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,031
|
r
|
funcs.R
|
g_legend <- function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)
}
#' wqdat is input data
#' station is station character string
#'
plo_fun <- function(wqdat, station, cols, parms){
# scale ranges
rngs <- wqdat %>%
group_by(var) %>%
summarise(
minv = min(val, na.rm = T),
maxv = max(val, na.rm = T),
.groups = 'keep'
) %>%
nest %>%
deframe %>%
lapply(., unlist)
# parameter labels
lbs <- parms %>%
select(var, lbs) %>%
deframe
# subset to statio
toplo <- wqdat %>%
filter(station %in% !!station) %>%
spread(var, val)
ylbsz <- 13
p1 <- plot_ly(toplo) %>%
add_markers(x = ~date, y = ~tp, type = 'scatter', color = I(cols[6]), mode = 'markers', line = list(shape = 'linear'), showlegend = F) %>%
layout(
yaxis = list(title = lbs['tp'], titlefont = list(size = ylbsz))#, range = rngs$tp)
)
p2 <- plot_ly(toplo) %>%
add_markers(x = ~date, y = ~tn, type = 'scatter', color = I(cols[5]), mode = 'markers', line = list(shape = 'linear'), showlegend = F) %>%
layout(
yaxis = list(title = lbs['tn'], titlefont = list(size = ylbsz))#, range = rngs$tn)
)
p3 <- plot_ly(toplo) %>%
add_markers(x = ~date, y = ~nh34, type = 'scatter', color = I(cols[4]), mode = 'markers', line = list(shape = 'linear'), showlegend = F) %>%
layout(
yaxis = list(title = lbs['nh34'], titlefont = list(size = ylbsz))#, range = rngs$nh34)
)
p4 <- plot_ly(toplo) %>%
add_markers(x = ~date, y = ~chla, type = 'scatter', color = I(cols[3]), mode = 'markers', line = list(shape = 'linear'), showlegend = F) %>%
layout(
yaxis = list(title = lbs['chla'], titlefont = list(size = ylbsz))#, range = rngs$chla)
)
p5 <- plot_ly(toplo) %>%
add_markers(x = ~date, y = ~ph, type = 'scatter', color = I(cols[2]), mode = 'markers', line = list(shape = 'linear'), showlegend = F) %>%
layout(
yaxis = list(title = lbs['ph'], titlefont = list(size = ylbsz))#, range = rngs$ph)
)
p6 <- plot_ly(toplo) %>%
add_markers(x = ~date, y = ~sal, type = 'scatter', color = I(cols[1]), mode = 'markers', line = list(shape = 'linear'), showlegend = F) %>%
layout(
yaxis = list(title = lbs['sal'], titlefont = list(size = ylbsz))#, range = rngs$sal)
)
p <- subplot(p1, p2, p3, p4, p5, p6, nrows = 6, shareX = T, shareY = F, titleY = T) %>%
layout(
xaxis = list(title = NA, range = c(as.Date('1995-01-01'), as.Date('2022-01-01')))
) %>%
plotly::config(
toImageButtonOptions = list(
format = "svg",
filename = "myplot"
)
)
return(p)
}
# function for plotting rapid response transect data
# modified from show_transect in tbpetools
show_rstransect <- function(savdat, mcrdat, savsel, mcrsel, base_size = 12){
savlevs <- c('Thalassia testudinum', 'Halodule wrightii', 'Syringodium filiforme', 'Ruppia maritima', 'Halophila engelmannii', 'Halophila decipiens')
grplevs <- c('Red', 'Green', 'Brown', 'Cyanobacteria')
abulabs <- c('<1%', '1-5%', '6-25%', '26-50%', '51-75%', '76-100%')
abubrks <- c(0, 1, 2, 3, 4, 5)
colpal <- colorRampPalette(RColorBrewer::brewer.pal(n = 8, name = 'Dark2'))
szrng <- c(2, 16)
# xlims
savxlms <- savdat %>%
pull(location) %>%
unique %>%
sort
mcrxlms <- mcrdat %>%
pull(location) %>%
unique %>%
sort
xlms <- range(savxlms, mcrxlms)
# get dates for factor levels
# this makes sure that y values on plots are shared
dts1 <- savdat %>%
pull(date)
dts2 <- mcrdat %>%
pull(date)
dts <- c(dts1, dts1) %>%
unique %>%
sort %>%
format('%b %d, %Y')
# prep sav plot data
savdatfrm <- savdat %>%
dplyr::mutate(
Year = lubridate::year(date),
location = as.numeric(as.character(location)),
pa = ifelse(bb == 0, 0, 1),
date = format(date, '%b %d, %Y'),
date = factor(date, levels = dts)
) %>%
dplyr::select(Year, date, location, taxa, abundance, pa, bb)
# sort color palette so its the same regardless of species selected
savcol <- colpal(length(savlevs))
names(savcol) <- savlevs
savcol <- savcol[savsel]
# legend labels
leglab <- 'Abundance (bb)'
# data with species
toplo1a <- savdatfrm %>%
dplyr::filter(taxa %in% !!savsel) %>%
dplyr::filter(pa == 1) %>%
dplyr::mutate(
bb = round(bb, 1),
tltp = paste0(taxa, ', ', abundance)
) %>%
dplyr::arrange(date, location)
# find overplots
dups1 <- duplicated(toplo1a[, c('date', 'location')])
dups2 <- duplicated(toplo1a[, c('date', 'location')], fromLast = T)
dups <- apply(cbind(dups1, dups2), 1, any)
toplo1a <- toplo1a %>%
mutate(
dups = dups
) %>%
group_by(date, location) %>%
mutate(
location = case_when(
dups ~ location + seq(-1 * length(dups) / 3, length(dups) / 3, length.out = length(dups)),
T ~ location
)
) %>%
ungroup()
# data w/o species, no facet
toplo2a <- savdatfrm %>%
group_by(date, location) %>%
filter(sum(pa) == 0) %>%
ungroup() %>%
select(date, location) %>%
unique()
pa <- ggplot2::ggplot(toplo1a, ggplot2::aes(y = date, x = location)) +
ggplot2::geom_point(data = toplo2a, alpha = 1, colour = 'black', size = 2) +
ggplot2::geom_point(aes(size = bb, fill = taxa), alpha = 0.8, pch = 21) +
ggplot2::scale_fill_manual(values = savcol) +
ggplot2::scale_radius(limits = range(abubrks), labels = abulabs, breaks = abubrks, range = szrng) +
ggplot2::theme_minimal(base_size = base_size, base_family = 'Roboto') +
ggplot2::scale_y_discrete(limits = dts, breaks = dts) +
ggplot2::scale_x_continuous(breaks = savxlms) +
ggplot2::coord_cartesian(xlim = xlms) +
ggplot2::theme(
panel.grid.major.y = ggplot2::element_blank(),
panel.grid.minor.y = ggplot2::element_blank(),
panel.grid.minor.x = ggplot2::element_blank(),
legend.title = ggplot2::element_blank(),
strip.text = ggplot2::element_text(hjust = 0),
axis.title.y = element_blank(),
axis.title.x = element_blank()
) +
ggplot2::labs(
x = 'Transect distance (m)',
title = 'Submerged aquatic vegetation'
) +
guides(fill = guide_legend(override.aes = list(size = 7), order = 1))
# prep mcr plot data
mcrdatfrm <- mcrdat %>%
dplyr::mutate(
Year = lubridate::year(date),
location = as.numeric(as.character(location)),
pa = ifelse(bb == 0, 0, 1),
date = format(date, '%b %d, %Y'),
date = factor(date, levels = dts)
) %>%
dplyr::select(Year, date, location, taxa, abundance, pa, bb)
# sort color palette so its the same regardless of species selected
mcrcol <- c('tomato1', 'lightgreen', 'burlywood3', 'lightblue')
names(mcrcol) <- grplevs
mcrcol <- mcrcol[mcrsel]
# legend labels
leglab <- 'Abundance (bb)'
# data with species
toplo1b <- mcrdatfrm %>%
dplyr::filter(taxa %in% mcrsel) %>%
dplyr::filter(pa == 1) %>%
dplyr::mutate(
bb = round(bb, 1),
tltp = paste0(taxa, ', ', abundance)
)
# jitter duplicates
dups1 <- duplicated(toplo1b[, c('date', 'location')])
dups2 <- duplicated(toplo1b[, c('date', 'location')], fromLast = T)
dups <- apply(cbind(dups1, dups2), 1, any)
toplo1b <- toplo1b %>%
mutate(
dups = dups
) %>%
group_by(date, location) %>%
mutate(
location = case_when(
dups ~ location + seq(-1 * length(dups) / 3, length(dups) / 3, length.out = length(dups)),
T ~ location
)
) %>%
ungroup()
# data w/o species, no facet
toplo2b <- mcrdatfrm %>%
group_by(date, location) %>%
filter(sum(pa) == 0) %>%
ungroup() %>%
select(date, location) %>%
unique()
pb <- ggplot2::ggplot(toplo1b, ggplot2::aes(y = date, x = location)) +
ggplot2::geom_point(data = toplo2b, colour = 'black', alpha = 1, size = 2) +
ggplot2::geom_point(inherit.aes = F, aes(colour = 'Empty sample'), x = NA, y = NA) +
ggplot2::geom_point(aes(size = bb, fill = taxa), alpha = 0.8, pch = 21) +
ggplot2::scale_fill_manual(values = mcrcol) +
ggplot2::scale_colour_manual(values = 'black') +
ggplot2::scale_radius(limits = range(abubrks), labels = abulabs, breaks = abubrks, range = szrng, guide = F) +
ggplot2::theme_minimal(base_size = base_size, base_family = 'Roboto') +
ggplot2::scale_y_discrete(limits = dts, breaks = dts) +
ggplot2::scale_x_continuous(breaks = mcrxlms) +
ggplot2::coord_cartesian(xlim = xlms) +
ggplot2::theme(
panel.grid.major.y = ggplot2::element_blank(),
panel.grid.minor.y = ggplot2::element_blank(),
panel.grid.minor.x = ggplot2::element_blank(),
legend.title = ggplot2::element_blank(),
strip.text = ggplot2::element_text(hjust = 0),
axis.title.y = element_blank()
) +
ggplot2::labs(
x = 'Transect distance (m)',
title = 'Macroalgae'
) +
guides(
fill = guide_legend(override.aes = list(size = 7), order = 1),
colour = guide_legend(override.aes = list(size = 2))
)
# out
p <- pa + pb + plot_layout(ncol = 1, heights = c(0.9, 1), guides = 'collect')
return(p)
}
# function for creating popup plot with mapview click
rswqpopup_plo <- function(station, datin){
# monitoring data
toplo1 <- datin %>%
filter(station == !!station) %>%
mutate(
minrng = gsub('(^.*)\\-.*$', '\\1', nrmrng),
maxrng = gsub('^.*\\-(.*)$', '\\1', nrmrng)
) %>%
select(-nrmrng) %>%
mutate(
minrng = as.numeric(minrng),
maxrng = as.numeric(maxrng)
)
# reference data
toplo2 <- toplo1 %>%
select(date, minrng, maxrng) %>%
mutate(
mo = lubridate::month(date)
) %>%
group_by(mo) %>%
mutate(
datestr = floor_date(date, unit = 'month'),
dateend = ceiling_date(date, unit = 'month')
) %>%
ungroup %>%
select(-date) %>%
unique
ylb <- unique(toplo1$lbs)
minrng <- unique(toplo1$minrng)
maxrng <- unique(toplo1$maxrng)
ylm <- range(c(minrng, maxrng, toplo1$val), na.rm = TRUE)
out <- ggplot() +
geom_line(data = toplo1, aes(x = date, y = val)) +
geom_point(data = toplo1, aes(x = date, y = val), size = 2) +
geom_rect(data = toplo2, aes(xmin = datestr, xmax = dateend, ymin = minrng, ymax = maxrng, fill = 'Monthly normal range', group = mo), alpha = 0.2) +
coord_cartesian(ylim = ylm, xlim = range(toplo1$date)) +
scale_fill_manual(values = 'blue') +
theme_minimal(base_size = 18) +
theme(
legend.title = element_blank(),
axis.title.x = element_blank(),
legend.position = 'top'
) +
labs(
y = ylb,
subtitle = paste('Station', station)
)
return(out)
}
# estimate TBBI, used in dat_proc, source code from tbeptools
tbbi_fun <- function(datin, salin){
taxdat <- datin %>%
select(date = Date, station = StationNumber, FAMILY, NAME, TaxaCount = Count, AdjCount) %>%
filter(FAMILY != 'NULL') %>%
mutate(
date = as.Date(date)
)
# taxa counts aggregated by station & taxa list id
taxasums <- taxdat %>%
dplyr::group_by(station, FAMILY, NAME) %>%
dplyr::summarise(
SumofCount = sum(TaxaCount, na.rm = T),
SumofAdjCount = sum(AdjCount, na.rm = T),
.groups = 'drop'
)
# biology stats aggregated by station
biostats <- taxasums %>%
dplyr::group_by(station) %>%
dplyr::summarise(
SpeciesRichness = length(na.omit(NAME)),
RawCountAbundance = sum(SumofCount, na.rm = T),
AdjCountAbundance = sum(SumofAdjCount, na.rm = T),
.groups = 'drop'
)
spionid <- taxasums %>%
dplyr::filter(FAMILY %in% 'Spionidae') %>%
dplyr::group_by(station) %>%
dplyr::summarise(SpionidAbundance = sum(SumofAdjCount, na.rm = T), .groups = 'drop')
capitellid <- taxasums %>%
dplyr::filter(FAMILY %in% 'Capitellidae') %>%
dplyr::group_by(station) %>%
dplyr::summarise(CapitellidAbundance = sum(SumofAdjCount, na.rm = T), .groups = 'drop')
# calculate biology populations/abundance by station
biostatspopulation <- biostats %>%
dplyr::left_join(spionid, by = 'station') %>%
dplyr::left_join(capitellid, by = 'station') %>%
dplyr::mutate(
Salinity = salin,
StandPropLnSpecies = dplyr::case_when(
is.na(SpeciesRichness) | is.na(Salinity) ~ 0,
T ~ ((log(SpeciesRichness + 1) / log(10))
/ ((( 3.2983 - 0.23576 * Salinity ) + 0.01081 * Salinity^2) - 0.00015327 * Salinity^3)
- 0.84227
) / 0.18952
),
SpeciesRichness = ifelse(is.na(SpeciesRichness), 0, SpeciesRichness),
RawCountAbundance = ifelse(is.na(RawCountAbundance), 0, RawCountAbundance),
TotalAbundance = ifelse(is.na(AdjCountAbundance), 0, AdjCountAbundance),
SpionidAbundance = ifelse(is.na(SpionidAbundance), 0, SpionidAbundance),
CapitellidAbundance = ifelse(is.na(CapitellidAbundance), 0, CapitellidAbundance)
) %>%
dplyr::select(station, SpeciesRichness, RawCountAbundance, TotalAbundance, SpionidAbundance, CapitellidAbundance, StandPropLnSpecies)
biostatstbbi <- biostatspopulation %>%
dplyr::mutate(
TBBI = dplyr::case_when(
CapitellidAbundance == 0 & SpionidAbundance != 0 ~
(((-0.11407) + (StandPropLnSpecies * 0.32583 ) +
(((asin(SpionidAbundance / TotalAbundance) - 0.11646 ) / (0.18554)) *
(-0.1502)) + ((-0.51401) * (-0.60943))) - (-3.3252118)) / (0.7578544 + 3.3252118),
CapitellidAbundance != 0 & SpionidAbundance == 0 ~
(((-0.11407) + (StandPropLnSpecies * 0.32583) + ((-0.62768) * (-0.1502)) +
((( asin( CapitellidAbundance / TotalAbundance) - 0.041249) / 0.08025) *
(-0.60943))) - (-3.3252118)) / (0.7578544 + 3.3252118),
CapitellidAbundance == 0 & SpionidAbundance == 0 & TotalAbundance != 0 ~
(((-0.11407) + (StandPropLnSpecies * 0.32583) + ((-0.62768) * (-0.1502)) +
((-0.51401) * (-0.60943))) - (-3.3252118)) / ( 0.7578544 + 3.3252118),
TotalAbundance == 0 ~ 0,
T ~ ((( -0.11407) + (StandPropLnSpecies * 0.32583) +
(((asin(SpionidAbundance / TotalAbundance) - 0.11646) / 0.18554) * (-0.1502)) +
(((asin( CapitellidAbundance / TotalAbundance) - 0.041249) / 0.08025) *
(-0.60943))) - (-3.3252118)) / (0.7578544 + 3.3252118)
),
TBBI = round(100 * TBBI, 2)
) %>%
dplyr::select(station, TotalAbundance, SpeciesRichness, TBBI) %>%
dplyr::filter(!is.na(station))
dts <- taxdat %>%
select(station, date) %>%
unique
# family sums
taxfams <- taxdat %>%
group_by(station, FAMILY) %>%
summarise(
TotalAbundance = sum(AdjCount, na.rm = T),
.groups = 'drop'
) %>%
group_by(station) %>%
nest() %>%
rename(family_abu = data)
# total abundance is individuals/m2
out <- biostatstbbi %>%
dplyr::mutate(
TBBICat = dplyr::case_when(
TBBI == 0 ~ 'Empty Sample',
TBBI < 73 ~ 'Degraded',
TBBI >= 73 & TBBI < 87 ~ 'Intermediate',
TBBI >= 87 ~ 'Healthy',
T ~ NA_character_
),
col = dplyr::case_when(
TBBICat == 'Empty Sample' ~ 'grey',
TBBICat == 'Degraded' ~ 'red',
TBBICat == 'Intermediate' ~ 'yellow',
TBBICat == 'Healthy' ~ 'darkgreen',
)
) %>%
left_join(dts, ., by = 'station') %>%
mutate(
modt = paste0(as.character(month(date, abbr = F, label = T)), ' 2021')
) %>%
left_join(taxfams, by = 'station')
return(out)
}
#' Convert DO into % saturation for 1-m depth
#' Use convention of expressing saturation at 1 atm.
#' data(sfbay)
#'
#' @param do, dissolved oxygen mg/l
#' @param t tem temperature, degrees C
#' @param S salinity, on the Practical Salinity Scale
#' @param P pressure, atm
DOsat <- function (do, t, S, P = NULL)
{
T = t + 273.15
lnCstar = -139.34411 + 157570.1/T - 66423080/T^2 + 1.2438e+10/T^3 -
862194900000/T^4 - S * (0.017674 - 10.754/T + 2140.7/T^2)
Cstar1 <- exp(lnCstar)
if (is.null(P)) {
out <- Cstar1
}
else {
Pwv = (1 - 0.000537 * S) * exp(18.1973 * (1 - 373.16/T) +
3.1813e-07 * (1 - exp(26.1205 * (1 - T/373.16))) -
0.018726 * (1 - exp(8.03945 * (1 - 373.16/T))) +
5.02802 * log(373.16/T))
theta = 0.000975 - 1.426e-05 * t + 6.436e-08 * t^2
out <- Cstar1 * P * (1 - Pwv/P) * (1 - theta * P)/((1 - Pwv) *
(1 - theta))
}
out <- 100 * do / Cstar1
return(out)
}
|
92d90bbf9d778e6100ff56de5e4aed18dde2ac1e
|
80048b095e0aeff1a9285a1d11425c4cbab1f9bd
|
/man/Bonferroni_m.Rd
|
95a303601f36424ec3d3d1a1c4e8b8bff3e2dbcb
|
[] |
no_license
|
alexmarzel/svisits
|
1af7b9ac8b35b7593585347eaf7fe45e41c3bac2
|
9b6f13e251a3eb443096a772331447b8b7874035
|
refs/heads/master
| 2020-05-25T23:25:32.143389
| 2017-01-22T17:37:00
| 2017-01-22T17:37:00
| 63,353,396
| 0
| 0
| null | 2016-07-15T13:55:59
| 2016-07-14T16:45:09
|
R
|
UTF-8
|
R
| false
| false
| 2,404
|
rd
|
Bonferroni_m.Rd
|
\name{Bonferroni_m}
\alias{Bonferroni_m}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Detecting the pairs using Bonferroni correction
}
\description{
This method is much faster than the shuffling method, but less precise.
}
\usage{
Bonferroni_m(unadjusted_pairs,
ids=ids,
prob = 1/75,
alpha = 0.01,
only_significant = TRUE)
}
\arguments{
\item{unadjusted_pairs}{
a \code{"table"} object containing the pairs and the number of shared visits. See \code{\link{get_observed_pairs}}.
}
\item{ids}{a \code{"data.table"} object with columns "ids" and "N_visits". The "ids" column represents patient identifiers and is the reference (see \code{setkey} from package \code{data.table} for more details). The "N_visits" contains the number of visits for each patient.}
\item{prob}{
probability of sharing a single visit. Default \code{1/75}.
}
\item{alpha}{
type I error
}
\item{only_significant}{
returns only significant pairs above the threshold (default) or all the pairs}
}
\details{
}
\value{a data frame with
\describe{
\item{\code{"allPairs"}}{pair identifier}
\item{\code{"Freq"}}{the unadjusted number of shared visits}
\item{\code{"id_1"}}{identifier of the first pair member}
\item{\code{"id_2"}}{identifier of the second pair member}
\item{\code{"N_visits.x"}}{total number of visits of the first pair member}
\item{\code{"N_visits.y"}}{total number of visits of the second pair member}
\item{\code{"Prob_for_Bonferr"}}{probability of sharing the given number of shared visits}
\item{\code{"BP"}}{row number}
\item{\code{"ltp"}}{the minus log10-transformed probablity of sharing the given number shared visits}
}
}
\examples{
# load data
data("simulated_data")
db_dates <- prepare_db(your_database = simulated_data,
ids_column = "subject",
dates_column = "sim_dates")
# first get unadjusted pairs
unadjusted_observed_pairs <- get_observed_pairs(db_dates)
# prepare ids
ids <- data.table(ids = as.character(names(table(db_dates$subject))),
N_visits = as.character(as.numeric(table(db_dates$subject))))
setkey(ids, "ids")
# run
Bonferroni_m_output <- Bonferroni_m(unadjusted_observed_pairs,
ids = ids, prob = 1/75, alpha = 0.01)
# number of significant pairs
nrow(Bonferroni_m_output)
}
|
5bb0f295db995f014cf119cc7542cfac80682697
|
c66cf0b8cedef9cc8294ee94a26ad8b4fcb0c609
|
/MIW/base.R
|
4b36275fa60ca04c88e777010d9993ce5e7b7cde
|
[] |
no_license
|
tkachenia/multimedia_course
|
931b11d2b1f95543c6ce2e8eab296325e6d215ab
|
56d6401edec2b2c0a07b7a3b8306cc4f1712fb7a
|
refs/heads/master
| 2021-11-29T03:46:05.843055
| 2021-11-25T15:10:25
| 2021-11-25T15:10:25
| 157,021,523
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,324
|
r
|
base.R
|
hexAbc <- function(str_abc, prefix = "") {
abc <- strsplit(str_abc, "")[[1]]
abc_utf8 <- enc2utf8(abc)
hex <- sapply(abc_utf8, charToRaw, simplify = TRUE)
hex_str <- paste0(prefix, toupper(hex))
utf8 <- sapply(hex_str, as.character)
dim(utf8) <- dim(hex)
if (!is.null(dim(utf8)))
utf8 <- apply(utf8, 2, paste, collapse = "\t")
names(utf8) <- abc
utf8
}
hexStr <- function(str, prefix = "") {
str_utf8 <- enc2utf8(str)
hex <- charToRaw(str_utf8)
utf8 <- paste0(prefix, toupper(hex))
paste(utf8, collapse = " ")
}
hexInt <- function(int, bytesCount = 4, endian = c("little", "big"), prefix = "") {
bits <- intToBits(int)
bytes <- packBits(bits, type = "raw")
bytes <- as.character(bytes)
if (length(bytes) > bytesCount) {
bytes <- bytes[1:bytesCount]
} else {
bytes <- c(bytes, rep("00", bytesCount - length(bytes)))
}
bytes <- paste0(prefix, toupper(bytes))
if (endian[1] == "big") {
bytes <- rev(bytes)
}
paste(bytes, collapse = " ")
}
makeField <- function(values = c(""), ...) {
if ( is.character(values[1]) ) {
field <- sapply(values, hexStr)
} else {
field <- sapply(values, hexInt, ...)
}
names(field) <- values
list(field)
}
|
2d6b7a0cd23c49a9402b11db54c3491bfa4f126d
|
ac2762a5038694f43e41ec93a5b1ad721f70062c
|
/man/residual.plots.Rd
|
7ccc315b4ccc8bcd34e63e9cdf92acb1dedb3786
|
[] |
no_license
|
adamleerich/alr3-aa1f43f9
|
8de29bbacd14a8baccd934b2c337244ba4e81d38
|
ccd7ec645559a0258e07bd31e773e16565457a88
|
refs/heads/main
| 2023-04-29T18:24:42.166791
| 2021-05-10T14:54:27
| 2021-05-10T14:54:27
| 366,079,517
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,151
|
rd
|
residual.plots.Rd
|
\name{residual.plots}
\alias{residual.plots}
\alias{residual.plots.lm}
\alias{resplot}
\alias{resid.curv.test}
\alias{tukey.nonadd.test}
\title{Residual plots and curvature tests for linear model fits}
\description{
Plots the residuals versus each term in a mean function and versus
fitted values. Also computes a curvature test for each of the plots
by adding a quadratic term and testing the quadratic to be zero. This
is Tukey's test for nonadditivity when plotting against fitted values.
}
\usage{
### This is a generic function with only one required argument:
residual.plots (m, ...)
### When the first argument is a linear model (of class lm), the form of the
### function is
\method{residual.plots}{lm}(m,vars=~.,fitted=TRUE,plot=TRUE,
layout=NULL,ask,...)
### The following are three related functions:
resplot(m,varname="tukey",type="pearson",
plot=TRUE,add.quadratic=TRUE,
ylab=paste(string.capitalize(type),"Residuals"),...)
resid.curv.test(m,varname)
tukey.nonadd.test(m)
}
\arguments{
\item{m}{ \code{lm} regression object }
\item{vars}{ A one-sided formula that specifies a subset of the predictors.
One
residual plot is drawn for each column specified. The default
\code{~.} is to plot against all predictors. For example, the
specification \code{vars = ~.-X3} would plot against all predictors
except for \code{X3}.}
\item{fitted}{If TRUE, the default, plot against fitted values.}
\item{tukey}{If TRUE, draw plot of residuals versus fitted values and compute
Tukey's test of non-additivity.}
\item{layout}{ If set to a value like \code{c(1,1)} or \code{c(4,3)}, the layout
of the graph will have this many rows and columns. If not set, the program will
select an appropriate layout. If the number of graphs exceed nine, you must
select the layout yourself, or you will get a maximum of nine per page.}
\item{ask}{If TRUE, ask the user before drawing the next plot; FALSE if don't ask.}
\item{\dots}{\code{residual.plots} passes these arguments to \code{resplot}.
\code{resplot}
passes them to \code{plot}. }
\item{varname}{Quoted variable name for the horizontal axis,
\code{"tukey"} by
default for Tukey's test and the plot versus fitted values.}
\item{type}{Type of residuals to be used. Pearson residuals are
appropriate for \code{lm} objects since there are equivalent to ordinary residuals
with ols and correctly weighted residuals with wls.}
\item{ylab}{Label for the yaxis. The default is the residual type.}
\item{add.quadratic}{if TRUE, fits the quadratic regression of the
vertical axis on the horizontal axis.}
\item{plot}{If TRUE, draw the plot(s).}
}
\details{
\code{residual.plots} draws all residuals plots, versus
each term specified first-order term in the model (interactions are
automatically skipped) and versus fitted values, and returns all the
curvature tests. \code{resplot}, which is called by \code{residual.plots},
should be viewed as an internal function, and is included here to display its
arguments, which can be used with \code{residual.plots} as well.
\code{resid.curv.test} computes the curvature test only. For any factors a
boxplot will be drawn.
}
\value{
Returns a data.frame with one row for each plot drawn, one column for
the curvature test statistic, and a second column for the corresponding
p-value. This function is used primarily for its side effect of drawing
residual plots.
}
\references{S. Weisberg (2005), \emph{Applied
Linear Regression}, third edition, Wiley, Chapter 8}
\author{Sanford Weisberg, \email{sandy@stat.umn.edu}}
\seealso{See Also \code{\link{lm}}}
\examples{
data(highway)
highway$Sigs <- (round(highway$Sigs*highway$Len)+1)/highway$Len
attach(highway)
d <- data.frame(Rate=Rate,logLen=logb(Len,2),
logADT=logb(ADT,2),logTrks=logb(Trks,2),
Slim=Slim,Shld=Shld,logSigs1=logb(Sigs,2))
attach(d)
m2 <- lm(Rate~logLen+logADT+logTrks+Slim+Shld+logSigs1,d)
residual.plots(m2)
}
\keyword{ hplot }% at least one, from doc/KEYWORDS
\keyword{ regression }% __ONLY ONE__ keyword per line
|
39c8adf067f55c03d5e4b92ef055fbc32641eb09
|
3c1e14e19ae1b763232dc9dae81d59958ab1225d
|
/plot2.R
|
823ddfe27a37f16922e579d6c54433df74dc6bed
|
[] |
no_license
|
rodmel/ExData_Plotting1
|
3ac6b47443fc3ba00f566afa20ceb2920db64c98
|
37fc2741fdc8ac7ce6671d104129657a95d2c4ea
|
refs/heads/master
| 2020-12-25T12:47:57.727048
| 2015-05-22T22:51:24
| 2015-05-22T22:51:24
| 21,773,292
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,340
|
r
|
plot2.R
|
## Exploratory Data Analysis
## version 1.0
## By Rodrigo Melgar - July 2014
##================================================================##
## Household energy usage over 2-day period in February 2007 ##
## Based on Individual household electric power consumption data ##
##================================================================##
## Step 1: use the data.table function "fread" to quickly read the large file
# and filter only those required data (Dates within Feb.1-2, 2007).
##----------------------------------------------------------------------------
library(data.table)
# save to "tmp" the subset data
tmp <- fread("household_power_consumption.txt", sep=";"
, header=TRUE, colClasses="character"
, na.strings=c("?","NA")
)[Date=="1/2/2007" | Date=="2/2/2007"]
# create the data by correctly formatting the "tmp" subset data
# Note: Date and Time are combined as one variable DateTime using as.POSIXct
data <- data.table(
DateTime=as.POSIXct(strptime( paste(tmp$Date,tmp$Time)
, format="%d/%m/%Y %H:%M:%S"))
,Global_active_power = as.numeric(tmp$Global_active_power)
)
rm(tmp) ## release the memory used by "tmp" as it is no longer needed
setkey(data, DateTime) ## create DateTime as the Key of the Data Table
## Step 2: extract data points to be plotted
##----------------------------------------------------------------------------
points = as.data.frame( data[, list(DateTime, Global_active_power)])
rm(data) ## tidy up unwanted object
## Step 3: Create the graph to png file
##----------------------------------------------------------------------------
# open device
png(filename="plot2.png", width=480, height=480, bg="transparent")
# plot and annotate the graph
plot( points$DateTime, points$Global_active_power, type="n"
,xlab="", ylab="Global Active Power (kilowatts)")
lines(points$DateTime,points$Global_active_power, col="black")
# close the device
dev.off()
rm(points) ## tidy up unwanted object
|
f8af8c65f82657847339a673b19e6f24206ce753
|
ab3ba20d8a8d1deeb49a30c10186d44763afeccc
|
/man/nl_reverse.Rd
|
019cbbac62ea788be2e59d680155eadf23eea2d6
|
[] |
no_license
|
cran/nlgeocoder
|
f6c30f4e3d16226d9a1233e6d1e1a4b81aa6f26c
|
97c919b0f651eb01eaba9d521a95344409ba3e6e
|
refs/heads/master
| 2020-03-31T10:13:44.864225
| 2018-10-08T17:30:06
| 2018-10-08T17:30:06
| 152,127,650
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 225
|
rd
|
nl_reverse.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nl_reverse.R
\name{nl_reverse}
\alias{nl_reverse}
\title{experimental reverse api}
\usage{
nl_reverse()
}
\description{
experimental reverse api
}
|
5c49f248fa847b2c17909191ad10db1c133f27d1
|
d63882da14a027b82bdc9b4906c77da037e204a0
|
/tests/testthat/test-function-calculate_operational_parameters_berlin_t.R
|
9b2ab8d47d54e18ca1d443f21e6acc6e8e5cae35
|
[
"MIT"
] |
permissive
|
KWB-R/aquanes.report
|
f0dd0db38b99bf26b94b5cd24f8a0d0aabbbccf8
|
7df99ab71b6fbdf05a7b64024d8f7555361e3903
|
refs/heads/master
| 2021-03-30T20:25:32.869920
| 2019-09-09T07:14:46
| 2019-09-09T07:14:46
| 83,431,353
| 0
| 3
| null | 2018-05-08T11:57:43
| 2017-02-28T12:44:26
|
R
|
UTF-8
|
R
| false
| false
| 221
|
r
|
test-function-calculate_operational_parameters_berlin_t.R
|
#
# This test file has been generated by kwb.test::create_test_files()
#
test_that("calculate_operational_parameters_berlin_t() works", {
expect_error(aquanes.report:::calculate_operational_parameters_berlin_t())
})
|
00337758fb660524cba92c3907a682d340eed3ce
|
0b63dedc1660e619fbea468ad3e34dbd383ccf94
|
/scratch.R
|
b4db29e4ce086bf550bb518710f95e8811852c25
|
[] |
no_license
|
OMahoneyM/GGBN_sample_query
|
cb76d8ce9419cd0f5238045d8d8d52dc351da7e7
|
4ca01f7761d7acdc58d1dea90a2125ab54d9be69
|
refs/heads/main
| 2023-03-13T21:24:55.633404
| 2021-03-11T19:01:13
| 2021-03-11T19:01:13
| 344,582,339
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,139
|
r
|
scratch.R
|
# needed to run fromJSON(),
library('jsonlite')
# needed to run flatten()
library('purrr')
#library('tidyverse')
# needed to run RCurl()
library('RCurl')
# needed to run rbind.fill()
library('plyr')
# needed to run the progress_bar
library('progress')
# needed to run str_replace_all()
library('stringr')
# needed to run write_tsv()
library('readr')
# The base URL used to query the API
base <- "http://data.ggbn.org/ggbn_portal/api/search?getSampletype&name="
# Load in the data containing the taxonomy to query
data <- read.csv('GP_kozloff_edits.csv', header = TRUE, sep = ",")
# Remove the header from "data"
names(data) <- NULL
# Extract species from "data" and store it as a vector "taxa"
## Note the double brackets "[[]]" in "data[[2]]" as it extracts the column
## from the data frame and returns it as a vector. You could also write it as
## "data[,2]" and get the same result. Simply writing "data[2]" returns a
## a sublist of the "data" with the class data.frame.
test <- c("Hermissenda crassicornis", "Arthropoda", "Polychaeta","Cnidaria", "Annelida", "Echinodermata","Nudibranchia", "Asteroidea", "Nemertea","Nematoda", "Mollusca", "Copepoda")
taxa <- data[[2]]
# trims off leading and trailing whitespace
taxa <- trimws(taxa)
# Create an empty data frame to add the query output to
result <- data.frame()
# Create new instance of the progress bar with ETA
pb <- progress_bar$new(
format = " downloading [:bar] :percent eta: :eta",
total = length(taxa), clear = FALSE, width= 60)
# For loop that builds URL, makes GET request, and uses rbind.fill
# to append "request" variable to the data frame "result"
# the flatten() function allows null results to be coerced into dataframes
for (i in 1:length(taxa)) {
# Initiate progress bar with ETA
pb$tick(0)
pb$tick()
# Query GGBN API and store flattened JSON response in request
request <- flatten(fromJSON(getURL(URLencode(paste(base,taxa[i], sep = "")))))
# use rbind.fill to append each request to the result dataframe and
# autopopulate the null values with NA
result <- rbind.fill(result, as.data.frame(request))
}
# Remove "fullScientificName_nc=" from the species query
result$filters <- str_replace_all(result$filters, "^.*=", "")
# write "result" to TSV
write_tsv(result, 'GGBN_Query_results_Complete.tsv', na = "NA")
# DONE
################################################################################
# --------------------- FUNCTIONIZE THE SCRIPT ABOVE --------------------------#
################################################################################
# turn above into two functions. one function for the loop
# and the second function for the data cleanup
GGBN_query <- function(taxa) {
# The base URL used to query the API
base <- "http://data.ggbn.org/ggbn_portal/api/search?getSampletype&name="
# Create an empty data frame to add the query output to
result <- data.frame()
# Create new instance of the progress bar with ETA
pb <- progress_bar$new(
format = " downloading [:bar] :percent eta: :eta",
total = length(taxa), clear = FALSE, width= 60)
# For loop that builds URL, makes GET request, and uses rbind.fill
# to append "request" variable to the data frame "result"
# the flatten() function allows null results to be coerced into dataframes
for (i in 1:length(taxa)) {
# Initiate progress bar with ETA
pb$tick(0)
pb$tick()
# Query GGBN API and store flattened JSON response in request
request <-
base %>%
paste0(taxa[i]) %>%
URLencode() %>%
getURL() %>%
fromJSON() %>%
flatten()
# use rbind.fill to append each request to the result dataframe and
# autopopulate the null values with NA
result <-
request %>%
as.data.frame() %>%
rbind.fill(result)
}
return(result)
}
df <- GGBN_query(test)
# DONE
################################################################################
# --------------------------- USING SAPPLY ------------------------------------#
################################################################################
# Turning the above code into a function that uses lapply instead of a for loop
# Load in the data containing the taxonomy to query
data <- read.csv('GP_kozloff_edits.csv', header = TRUE, sep = ",")
# The base URL used to query the API
base <- "http://data.ggbn.org/ggbn_portal/api/search?getSampletype&name="
# Extract species from "data" and store it as a vector "taxa"
## Note the double brackets "[[]]" in "data[[2]]" as it extracts the column
## from the data frame and returns it as a vector. You could also write it as
## "data[,2]" and get the same result. Simply writing "data[2]" returns a
## a sublist of the "data" with the class data.frame.
# trims off leading and trailing whitespace with trimws()
taxa_list <-
data[[2]] %>%
trimws()
# Create new instance of the progress bar with ETA
pb <- progress_bar$new(
format = " downloading [:bar] :percent eta: :eta",
total = length(taxa_list), clear = FALSE, width= 60)
# function that queries GGBN for available sample type data
GGBN_query <- function(taxa) {
# Initiate progress bar with ETA
pb$tick(0)
# Update progress bar each time the function is run
pb$tick()
# Query GGBN API and store flattened JSON response in request
request <-
base %>%
paste0(taxa) %>%
URLencode() %>%
getURL() %>%
fromJSON() %>%
flatten() %>%
as.data.frame()
}
# Run sapply() to query each species in the taxa list using GGBN_query()
df <-
taxa_list %>%
sapply(FUN = GGBN_query)
# Convert df from a list of lists to a data.frame
df <- do.call(rbind.fill, df)
# Remove "fullScientificName_nc=" from the species query
df$filters <- str_replace_all(df$filters, "^.*=", "")
# write "result" to TSV
write_tsv(df, 'GGBN_Query_results_Complete.tsv', na = "NA")
# DONE
################################################################################
# -----------------If/else check on doc type and header -----------------------#
################################################################################
# writing function to check if data inputed was csv, tsv/txt
# also check is data has a header or not
wrapper_test <- function(data_file = NULL, head = NULL, column = NULL) {
# if else check for null values.
if (is.null(data_file) & is.null(head) & is.null(column)) {
print("You didn't enter any arguments... Are you even trying to use this function?")
} else if (is.null(data_file) & is.null(head)) {
print("data_file argument and head argument are both NULL Enter the correct values for both to proceed with function")
} else if (is.null(data_file) & is.null(column)) {
print("data_file argument and column argument are both NULL Enter the correct values for both to proceed with function")
} else if (is.null(head) & is.null(column)) {
print("head argument and column argument are both NULL Enter the correct values for both to proceed with function")
} else if (is.null(data_file)) {
print("data_file argument is NULL Please supply appropriate data file")
} else if (is.null(head)) {
print("head argument is NULL Please indicate the presence of table header with TRUE or FALSE")
} else if (is.null(column)) {
print("column argument is NULL Please indicate the header name or numeric column position of the taxonomic names to be queried")
} else {
if (data_file == 1) {
data <- data.frame(x = 1:4, y = 5:8, z = 9:12)
} else if (data_file == 2) {
data <- "neatwo"
} else {
print("Hello. We have been trying to reach you about your car's extended warranty")
}
if (column == 1) {
taxa_list <-
data[[column]] %>%
trimws()
} else {
print("Please enter TRUE or FALSE for the header argument and check the
spelling of the column argument")
}
}
}
wrapper_test()
wrapper_test(column = 1)
wrapper_test(head = TRUE)
wrapper_test(data_file = 1)
wrapper_test(head = TRUE, column = 1)
wrapper_test(data_file = 1, column = 1)
wrapper_test(data_file = 1, head = TRUE)
x <- wrapper_test(data_file = 1, head = TRUE, column = 1)
# Load in the data containing the taxonomy to query
data <- read.csv('GP_kozloff_edits_test.csv', header = TRUE, sep = ",")
column <- "scientificname_verbatim"
data4 <- data[[2]]
test_df <- data.frame(uno = 1:4,
test_header = 5:8)
test <- "test_header"
test2 <- 2
head <- "poo"
is.logical(head)
if (!is.logical(head)) {
print("do it right")
} else {
print("cool")
}
if (grepl("^.*\\.tsv|^.*\\.txt", test) == TRUE) {
print("success")
} else {
print("Hello. We have been trying to reach you about your car's extended warranty")
}
if (grepl("^.*\\.csv", data_file) == TRUE) {
data <- read.table(file = data_file, header = head, sep = ",")
} else if (grepl("^.*\\.tsv|^.*\\.txt", data_file) == TRUE) {
data <- read.table(file = data_file, header = head, sep = "\t")
} else {
print("Incorrect data format. Please load .csv, .tsv, or .txt file")
}
is.character(test2)
is.integer(test2)
test2 %% 1 == 0
is.numeric()
if (test %in% names(test_df) | is.numeric(column)) {
zoop <- "pass"
} else {
print("Please enter TRUE or FALSE for the header argument and check the spelling of the column argument")
}
if (head == FALSE & is.character(test) == TRUE) {
print("You entered FALSE for the header argument and a string for the column argument. Please check your file again and re-enter a vaild combination")
} else {
print("Please enter TRUE or FALSE for the header argument and check the spelling of your column argument")
}
# if column string is found in data_file colnames | column is int
grepl(column, colnames(data_file)) == TRUE | is.integer(column) == TRUE
# else if head == FALSE and column is string
# ---------------------------------------------------------------------------
# QC'd if statements to check if entered arguments are valid
# ---------------------------------------------------------------------------
if (grepl("^.*\\.csv", data_file) == TRUE) {
data <- read.table(file = data_file, header = head, sep = ",")
} else if (grepl("^.*\\.tsv|^.*\\.txt", data_file) == TRUE) {
data <- read.table(file = data_file, header = head, sep = "\t")
} else {
print("Incorrect data format. Please load .csv, .tsv, or .txt file")
}
if (column %in% names(data_file) || is.numeric(column)) {
taxa_list <-
data[[column]] %>%
trimws()
} else if (head == FALSE && is.character(column) == TRUE) {
print("You entered FALSE for the header argument and a string for the column
argument. Please check your file again and re-enter a vaild combination")
} else {
print("Please enter TRUE or FALSE for the header argument and check the
spelling of the column argument")
}
################################################################################
################# PUTTING EVERYTHING TOGETHER FOR REAL #########################
################################################################################
wrapper_test <- function(data_file = NULL, head = NULL, column = NULL) {
# Validation check for NULL values function arguments
if (is.null(data_file) & is.null(head) & is.null(column)) {
print("You didn't enter any arguments... Are you even trying to use this function?")
} else if (is.null(data_file) & is.null(head)) {
print("data_file argument and head argument are both NULL Enter the correct values for both to proceed with function")
} else if (is.null(data_file) & is.null(column)) {
print("data_file argument and column argument are both NULL Enter the correct values for both to proceed with function")
} else if (is.null(head) & is.null(column)) {
print("head argument and column argument are both NULL Enter the correct values for both to proceed with function")
} else if (is.null(data_file)) {
print("data_file argument is NULL Please supply appropriate data file")
} else if (is.null(head)) {
print("head argument is NULL Please indicate the presence of table header with TRUE or FALSE")
} else if (is.null(column)) {
print("column argument is NULL Please indicate the header name or numeric column position of the taxonomic names to be queried")
} else {
# Validation check on file type for data_file argument and Boolean entry for
# head argument. If pass the file is loaded as a data frame
if (!is.logical(head)) {
print("head argument was provided in incorrect format. Please enter either TRUE or FALSE")
} else if (grepl("^.*\\.csv", data_file)) {
data <- read.table(file = data_file, header = head, sep = ",")
} else if (grepl("^.*\\.tsv|^.*\\.txt", data_file)) {
data <- read.table(file = data_file, header = head, sep = "\t")
} else {
print("Incorrect data format. Please load .csv, .tsv, or .txt file")
}
# Validation check that column value can be found in header values if it is
# a string or if it is a numeric value instead. If pass the taxa column is
# extracted, white space is trimmed, and stored as taxa_list vector
if (column %in% names(data_file) || is.numeric(column)) {
taxa_list <-
data[[column]] %>%
trimws()
# Create new instance of the progress bar with ETA
pb <- progress_bar$new(
format = " downloading [:bar] :percent eta: :eta",
total = length(taxa_list), clear = FALSE, width= 60)
# The base URL used to query the API
base <- "http://data.ggbn.org/ggbn_portal/api/search?getSampletype&name="
# function that queries GGBN for available sample type data
GGBN_query <- function(taxa) {
# Initiate progress bar with ETA
pb$tick(0)
# Update progress bar each time the function is run
pb$tick()
# Query GGBN API and store flattened JSON response in request
request <-
base %>%
paste0(taxa) %>%
URLencode() %>%
getURL() %>%
fromJSON() %>%
flatten() %>%
as.data.frame()
}
# Run sapply() to query each species in the taxa list using GGBN_query()
df <-
taxa_list %>%
sapply(FUN = GGBN_query)
# Convert df from a list of lists to a data.frame
df <- do.call(rbind.fill, df)
# Remove "fullScientificName_nc=" from the species query
df$filters <- str_replace_all(df$filters, "^.*=", "")
return(df)
} else if (head == FALSE && is.character(column) == TRUE) {
print("You entered FALSE for the head argument and a string for the column argument. Please check your file again and re-enter a vaild combination")
} else {
print("Please enter TRUE or FALSE for the head argument and check the spelling of the column argument")
}
}
}
df <- wrapper_test(data_file = 'GP_kozloff_edits_test.csv', head = TRUE, column = 2)
# write "result" to TSV
write_tsv(df, 'GGBN_Query_results_Complete.tsv', na = "NA")
|
8be6c38cf0a39d3d0e3f55ff1c058b17e82a9436
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/HWEintrinsic/examples/HWEdata-class.Rd.R
|
2c70166264fcdd6a893cef2b10c46517954b736a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 354
|
r
|
HWEdata-class.Rd.R
|
library(HWEintrinsic)
### Name: HWEdata-class
### Title: Class "HWEdata". Data specification for the Hardy-Weinberg
### Testing Problem Using the Intrinsic Prior Approach.
### Aliases: HWEdata-class HWEdata initialize,HWEdata-method
### Keywords: classes methods
### ** Examples
data.tmp <- c(3, 9, 8)
dataset <- new("HWEdata", data = data.tmp)
|
3fd3d8fe7d670312196d77d625d3e1367c71e772
|
2c751b858462d002f9afe0c31ea233dc2c148796
|
/plot3.R
|
248822bdabb92167f0c3aba9c0f0d717249c9b92
|
[] |
no_license
|
speedy3d/Coursera-ExploratoryDataAnalysis
|
78d0ec6aedf88618b333f1ed0ab11119a7e565d0
|
2891a9733166511a727ae8156fc498884571f76a
|
refs/heads/master
| 2020-05-17T01:23:32.092259
| 2014-11-23T19:46:01
| 2014-11-23T19:46:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,134
|
r
|
plot3.R
|
##Plot 3 - shows total PM2.5 emission for each of the years (1999,2002,2005,2008) in baltimore broken down by source
#get required library
library(ggplot2)
#set working directory
setwd("../Exploratory Data Analysis/Project 2")
#import data from PM2.5 emissions RDS file
NEI <- readRDS("summarySCC_PM25.rds")
#import data from source classification code table RDS file
SCC <- readRDS("Source_Classification_Code.rds")
#Subset baltimore only data (fips=24510)
baltimore_Subset <- subset(NEI, fips == "24510")
#Use aggregate function to find emission for 1999,2002,2005, and 2008 using baltimore subset of the data broken down by source
data_TotalEmissions <- aggregate(baltimore_Subset[c("Emissions")], list(type=baltimore_Subset$type, year=baltimore_Subset$year), sum)
#create the plot utilizing ggplot2 qplot method. Using smoother (method loess) to smooth connections between points
png("plot3.png")
chart <- qplot(year, Emissions, data = data_TotalEmissions, geom=c("point","smooth"), method="loess", colour=type, ylab= "Emissions (PM 2.5)", xlab= "Year", main= "Total Emissions for Baltimore by Type")
print(chart)
dev.off()
|
2d9b723cd5795eaf388750614e4493447de06adf
|
62710fa1d384c68cce4f703c54782778c38e8032
|
/get_county_pop.R
|
a5d99165dc205dbcc0ad7ef30e19b477e5c0750e
|
[] |
no_license
|
kfaiola22/covid_maps
|
acdde5d2d0775a49a4d499b4858080f2a923c88d
|
8a89cd291294f0aba5e4ec8b061c84e7a0c3ff5c
|
refs/heads/master
| 2022-04-28T16:26:53.884944
| 2020-04-30T23:38:02
| 2020-04-30T23:38:02
| 260,337,919
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 541
|
r
|
get_county_pop.R
|
tidycensus::census_api_key("34ee793ce93ee686718193c8f108cb8de08dfc91")
# create table of county demographics
options(tigris_use_cache = TRUE)
#county_map <- tigris::counties(class = "sf", cb = TRUE, resolution = "20m")
county_pop <- tidycensus::get_acs(
geography = "county",
variables = "B01003_001",
geometry = TRUE,
resolution = "5m"
)
#county_pop <- sf::st_transform(county_pop, crs = 2163 )
state_map <- tigris::states( cb = TRUE, resolution = "5m", class = "sf" )
save( county_pop, state_map, file = "stco_maps.RData" )
|
8abcba980754c0776d3c326f49e4f94ab2d47370
|
c529e1776d0073d1c122ee88def416f6f69d6c87
|
/R/stat-identity.r
|
6be2822f218426fd684a6a70130852899560f7d1
|
[] |
no_license
|
lixinyao/ggplot2
|
db919577d8c53bc0522b7c47d9d56cd10ff28452
|
7be4c8944bca845c9b9e189ec8c44231f6b4dc2b
|
refs/heads/master
| 2021-01-18T18:22:59.926529
| 2016-01-31T22:22:40
| 2016-01-31T22:22:40
| 50,810,838
| 1
| 0
| null | 2016-02-01T03:18:43
| 2016-02-01T03:18:43
| null |
UTF-8
|
R
| false
| false
| 1,984
|
r
|
stat-identity.r
|
#' Identity statistic.
#'
#' The identity statistic leaves the data unchanged.
#'
#' @param mapping The aesthetic mapping, usually constructed with
#' \code{\link{aes}} or \code{\link{aes_string}}. Only needs to be set
#' at the layer level if you are overriding the plot defaults.
#' @param data A layer specific dataset - only needed if you want to override
#' the plot defaults.
#' @param geom The geometric object to use display the data
#' @param position The position adjustment to use for overlapping points
#' on this layer
#' @param show.legend logical. Should this layer be included in the legends?
#' \code{NA}, the default, includes if any aesthetics are mapped.
#' \code{FALSE} never includes, and \code{TRUE} always includes.
#' @param inherit.aes If \code{FALSE}, overrides the default aesthetics,
#' rather than combining with them. This is most useful for helper functions
#' that define both data and aesthetics and shouldn't inherit behaviour from
#' the default plot specification, e.g. \code{\link{borders}}.
#' @param ... other arguments passed on to \code{\link{layer}}. This can
#' include aesthetics whose values you want to set, not map. See
#' \code{\link{layer}} for more details.
#' @export
#' @examples
#' p <- ggplot(mtcars, aes(wt, mpg))
#' p + stat_identity()
stat_identity <- function(mapping = NULL, data = NULL,
geom = "point", position = "identity",
...,
show.legend = NA,
inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = StatIdentity,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
na.rm = FALSE,
...
)
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
StatIdentity <- ggproto("StatIdentity", Stat,
compute_layer = function(data, scales, params) {
data
}
)
|
b5b6426fcc00b113b7014fade337f938bce4eba6
|
23b6a147398a1c5f376c4386de5731cb1f063637
|
/data/preprocessing/scripts/superSeq_summary.R
|
61e777b5308b326160bff213247e3ce4a539e249
|
[
"MIT"
] |
permissive
|
StoreyLab/superSeq-manuscript
|
fe6de4afb27205766b38cea57427f3cb807f8caa
|
99c27e0bbbcf5f692e17abacfeebe9beb07a9194
|
refs/heads/master
| 2021-07-30T23:58:27.831879
| 2021-07-29T02:05:49
| 2021-07-29T02:05:49
| 185,483,248
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 611
|
r
|
superSeq_summary.R
|
#! /usr/bin/env Rscript
library(subSeq)
library(DESeq2)
library(dplyr)
library(qvalue)
library(data.table)
args <- commandArgs(trailingOnly = TRUE)
experiment <- args[1]
contrast <- args[2]
presample = as.numeric(args[3])
outfolder = args[4]
infolder = args[5]
trial = as.numeric(args[6])
load(paste0(infolder, "/", experiment, "_", contrast, "_", trial, ".rda"))
ss_out <- summary(ss)
ss_out$experiment <- experiment
ss_out$contrast <- contrast
ss_out$presample <- presample
ss_out$replication <- trial
write.table(ss_out, file = paste0(outfolder, "/", experiment, "_", contrast, "_", trial, ".summary"))
|
626d02d1bfaa0cbe051f9be968590d4cd06987b7
|
cbbbfb067b2189cb8a3f3c36d37f4f56092086b8
|
/man/filter.Rd
|
b2ab3c817cdb7b0ecad23ab7133ecee97f7e6dc3
|
[] |
no_license
|
mstewie/liveregr
|
cf7ae982cc312602a1a6e2aeb7d3656d8598f356
|
a533f84181515c5867cce87a0410e4cf6946dbd4
|
refs/heads/master
| 2020-03-28T20:53:08.222419
| 2018-09-17T11:34:35
| 2018-09-17T11:34:35
| 149,110,959
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,108
|
rd
|
filter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter.livereg.R
\name{filter}
\alias{filter}
\title{Filter CSO Live Register data}
\usage{
filter(object, gender = c("both", "male", "female"), age.group = c("all",
"under_25", "over_and_25"), start.year = 1967, end.year = 2018)
}
\arguments{
\item{object}{\code{livereg} S3 object}
\item{gender}{a character vector of gender choices: "both", "male", "female"}
\item{age.group}{a character vector of age group choices: "all", "under_25", "over_and_25"}
\item{start.year}{the start year of the data}
\item{end.year}{the end year of the data}
}
\value{
A filtered \code{livereg} S3 object which contain the live register unemployment data based on the input filtering parameters.
}
\description{
\code{filter} filters the CSO Live Register unemployment data based on certain input options. These options are gender, age group, start year, and end year.
}
\examples{
dat = load_livereg(use.offline.data = TRUE)
the_1990s_data = filter(dat, start.year = 1990, end.year = 1999)
female_data = filter(dat, gender = c("female"))
}
|
925fc2bf98bc07d829e770954e9064d5257b0f49
|
375e98a79ccefec3d226edbb3cb8e03e8e3b01a2
|
/man/parse_taxonomy_silva_128.Rd
|
851999a41d5b59a37021787227264cd6f4449f52
|
[
"MIT"
] |
permissive
|
sekingsley/MicrobiomeR
|
e1325c519e34bcbc32bf75cab0a65a4f0df07a67
|
dfc8f67d88f7a9dfaa15753a369df73ccc948396
|
refs/heads/master
| 2022-12-01T13:29:05.960005
| 2019-08-30T20:35:06
| 2019-08-30T20:35:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,243
|
rd
|
parse_taxonomy_silva_128.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phyloseq.R
\name{parse_taxonomy_silva_128}
\alias{parse_taxonomy_silva_128}
\title{Parse elements of a taxonomy vector}
\usage{
parse_taxonomy_silva_128(char.vec)
}
\arguments{
\item{char.vec}{(Required). A single character vector of taxonomic
ranks for a single OTU, unprocessed (ugly).}
}
\value{
A character vector in which each element is a different
taxonomic rank of the same OTU, and each element name is the name of
the rank level. For example, an element might be \code{"Firmicutes"}
and named \code{"phylum"}.
These parsed, named versions of the taxonomic vector should
reflect embedded information, naming conventions,
desired length limits, etc; or in the case of \code{\link{parse_taxonomy_default}},
not modified at all and given dummy rank names to each element.
}
\description{
These are provided as both example and default functions for
parsing a character vector of taxonomic rank information for a single taxa.
As default functions, these are intended for cases where the data adheres to
the naming convention used by greengenes
the naming convention used by greengenes and silva.
(\url{http://greengenes.lbl.gov/cgi-bin/nph-index.cgi})
or where the convention is unknown, respectively.
To work, these functions -- and any similar custom function you may want to
create and use -- must take as input a single character vector of taxonomic
ranks for a single OTU, and return a \strong{named} character vector that has
been modified appropriately (according to known naming conventions,
desired length limits, etc.
The length (number of elements) of the output named vector does \strong{not}
need to be equal to the input, which is useful for the cases where the
source data files have extra meaningless elements that should probably be
removed, like the ubiquitous
``Root'' element often found in greengenes/QIIME taxonomy labels.
In the case of \code{parse_taxonomy_default}, no naming convention is assumed and
so dummy rank names are added to the vector.
More usefully if your taxonomy data is based on greengenes, the
\code{parse_taxonomy_greengenes} function clips the first 3 characters that
identify the rank, and uses these to name the corresponding element according
to the appropriate taxonomic rank name used by greengenes
(e.g. \code{"p__"} at the beginning of an element means that element is
the name of the phylum to which this OTU belongs).
If you taxonomy data is based on SILVA, the \code{parse_taxonomy_silva_128} function
clips the first 5 characters that identify rank, and uses these to name the
corresponding element according to the appropriate taxonomic rank name used
by SILVA (e.g. \code{"D_1__"} at the beginning of an element means that element
is the name of the phylum to which this OTU belongs.
Alternatively you can create your own function to parse this data.
Most importantly, the expectations for these functions described above
make them compatible to use during data import,
specifically the \code{\link{import_biom}} function, but
it is a flexible structure that will be implemented soon for all phyloseq
import functions that deal with taxonomy (e.g. \code{\link{import_qiime}}).
}
\details{
This function is currently under PR review by phyloseq in a well supported
pull request: \url{https://github.com/joey711/phyloseq/pull/854}. If you use this function,
then please comment on the GitHub PR to encourage merging this feature.
}
\examples{
\dontrun{
> taxvec1 = c("Root", "k__Bacteria", "p__Firmicutes", "c__Bacilli", "o__Bacillales",
"f__Staphylococcaceae")
> parse_taxonomy_default(taxvec1)
> parse_taxonomy_greengenes(taxvec1)
> taxvec2 = c("Root;k__Bacteria;p__Firmicutes;c__Bacilli;o__Bacillales;f__Staphylococcaceae")
> parse_taxonomy_qiime(taxvec2)
> taxvec3 = c("D_0__Bacteria", "D_1__Firmicutes", "D_2__Bacilli", "D_3__Staphylococcaceae")
> parse_taxonomy_silva_128(taxvec3)
}
}
\seealso{
\code{\link[phyloseq:parseTaxonomy-functions]{parse_taxonomy_default}}
\code{\link[phyloseq:parseTaxonomy-functions]{parse_taxonomy_greengenes}}
\code{\link[phyloseq:parseTaxonomy-functions]{parse_taxonomy_qiime}}
\code{\link[phyloseq]{import_biom}}
\code{\link[phyloseq]{import_qiime}}
}
|
8abf8dcef3e03c941c6b6e2667e1cf149bcf54b0
|
7eb128f9b7899c33d4854009edbd38dd566cba72
|
/R Tutorials/Book spuRs/scripts/binom.sim.r
|
13023912554259adbd741d4796bfc9aa2933da32
|
[] |
no_license
|
chengjun/Research
|
1149add090ec563f544c4b5a886c01b1392a25d4
|
c01e3d2eac2bca74671abb9cd63e1b06e5566fc8
|
refs/heads/master
| 2021-06-15T16:35:15.005107
| 2019-08-06T09:03:29
| 2019-08-06T09:03:29
| 11,498,113
| 4
| 6
| null | 2021-04-15T09:27:11
| 2013-07-18T08:36:12
|
Mathematica
|
UTF-8
|
R
| false
| false
| 231
|
r
|
binom.sim.r
|
# program spuRs/resources/scripts/binom.sim.r
binom.sim <- function(n, p) {
X <- 0
px <- (1-p)^n
Fx <- px
U <- runif(1)
while (Fx < U) {
X <- X + 1
px <- px*p/(1-p)*(n-X+1)/X
Fx <- Fx + px
}
return(X)
}
|
f70d6f2ddef3adcbbc5038d59898ac0b1b9a6962
|
54c82ee4afc95ee439b9c625dfaeed76984db5af
|
/R/roundx_n.R
|
930948da5a83553c668e94c2583d18feb7d9d1fb
|
[
"MIT"
] |
permissive
|
johnaclouse/curios
|
06c48db7848c269332e216d7ae971ccda4d2def8
|
cfebfde9ed90cdeac565660d8b783a869ab10f38
|
refs/heads/main
| 2022-06-17T15:42:35.938675
| 2022-06-05T06:55:53
| 2022-06-05T06:55:53
| 167,911,423
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 741
|
r
|
roundx_n.R
|
#' Round value based on magnitude
#'
#' @param x A numeric value to be rounded. Values >= 100 are rounded to 1
#' decimal place. Values < 100 and >=1 are rounded to two decimal places.
#' Values <1 are rounded to three decimal places.
#'
#' @return A numeric value rounded based on the magnitude of `x`
#' @export
#'
#' @examples
#' library(curios)
#' # Note: R will display right padded zero's below. This is a display manifestation only.
#' roundx_n(c(0.1234, 1.1234, 100.123))
#' # Verify rounding does not add right hand padding
#' roundx_n(c(0.1234, 1.1234, 100.123))[3]
roundx_n <- function(x) {
dplyr::case_when(abs(x) >= 100 ~ round(x, 1),
abs(x) >= 1 ~ round(x, 2),
TRUE ~ round(x, 3))
}
|
afaaa2d15a605200747a6c11923b0985e7452939
|
a7940556142d37c569ececafc29e08859778fafe
|
/analysis.R
|
be0313548190a2fa4b231a0f7924506f26ec7ef0
|
[
"MIT"
] |
permissive
|
info201a-w21/a3-lechauthaoquynh
|
fee3abe363ebbe04649084b22ee46d4cca20db37
|
66ef71d0ed46dd61d70a38ad5d575b4afbde2741
|
refs/heads/main
| 2023-03-14T14:19:27.280863
| 2021-03-02T07:07:39
| 2021-03-02T07:07:39
| 338,643,164
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,859
|
r
|
analysis.R
|
#Assignment 3: Incarceration trends
# Loading data -----------------------------------------------------------------
# Install and load the packages
library(tidyverse)
install.packages("maps")
install.packages("mapproj")
install.packages("patchwork")
library(maps)
library(mapproj)
library(patchwork)
# Load incarceration_trends.csv file
incarceration_trends <- read.csv(
"https://raw.githubusercontent.com/vera-institute/incarceration-trends/master/incarceration_trends.csv")
# Variable: Black Jail Population ----------------------------------------------
#What is the average value of black jail population across all the counties most
#recently? `recent_average_black_jail_pop`
recent_average_black_jail_pop <- incarceration_trends %>%
filter(year == max(year, na.rm = T)) %>%
summarize(mean_black_jail = mean(black_jail_pop, na.rm = T))
# When is the highest black jail population from the dataset?
#`year_highest_black_jail_pop`
year_highest_black_jail_pop <- incarceration_trends %>%
filter(black_jail_pop == max(black_jail_pop, na.rm = T)) %>%
pull(year)
# Where is the highest black jail population from the dataset?
#`place_highest_black_jail_pop`
place_highest_black_jail_pop <- incarceration_trends %>%
filter(black_jail_pop == max(black_jail_pop, na.rm = T)) %>%
select(state, county_name)
#Top 5 unique places have highest black jail population? `top_5_places`
top_5_places <- incarceration_trends %>%
group_by(county_name) %>%
summarize(total_in_each_county = sum(black_jail_pop, na.rm = T)) %>%
arrange(desc(total_in_each_county)) %>%
slice(1:5)
top_5_chart <- top_5_places %>%
left_join(incarceration_trends, by = "county_name", na.rm = T) %>%
filter(black_jail_pop != "NA")
# What is total black jail population in Washington in the most recent year?
#`total_wa`
total_wa <- incarceration_trends %>%
filter(state == "WA") %>%
filter(year == max(year, na.rm = T)) %>%
summarize(total_black_jail = sum(black_jail_pop, na.rm = T))
# Trends over time chart ------------------------------------------------------
black_jail_pop_over_time_in_top_5 <- ggplot(data = top_5_chart) +
geom_point(mapping = aes(x = year, y = black_jail_pop, color = county_name)) +
geom_smooth(mapping = aes(x = year, y = black_jail_pop, color = county_name),
se = FALSE) +
labs(x = "Year", y = "Black Jail Population", title = "Top 5 Counties Has
Highest Black Jail Population Over Time") +
scale_color_discrete("County Name")
# Variable comparison charts--------------------------------------------------
#Make a data frame for black and white jail pop? `two variables data`
two_variables_data <- data.frame(incarceration_trends$black_jail_pop,
incarceration_trends$white_jail_pop)
#Graph the two variables comparison chart? `variables_comparison_chart`
variable_comparison_chart <- ggplot(data = two_variables_data) +
geom_point(mapping = aes(x = incarceration_trends.black_jail_pop,
y = incarceration_trends.white_jail_pop)) +
labs(x = "Black Jail Pop", y = "White Jail Pop", title = "Comparison between
Black Jail Population and White Jail Population in All Counties")
#Map -------------------------------------------------------------------------
#Get the `black_jail_pop` most recent data from the dataset? `black_jail_data`
black_jail_data <- incarceration_trends %>%
filter(year == max(year, na.rm = T))
#Use map_data function to join `incarceration_trends` dataset with the map_data?
# `join_map`
join_map <- map_data("county") %>%
unite(polyname, region, subregion, sep = ",") %>%
left_join(county.fips, by = "polyname")
#Merge map data and incarceration data?`merge_map`
merge_map <- join_map %>%
left_join(black_jail_data, by = "fips") %>%
filter(black_jail_pop != "NA")
# Incorporation blank theme
blank_theme <- theme_bw() +
theme(
axis.line = element_blank(), # remove axis lines
axis.text = element_blank(), # remove axis labels
axis.ticks = element_blank(), # remove axis ticks
axis.title = element_blank(), # remove axis titles
plot.background = element_blank(), # remove gray background
panel.grid.major = element_blank(), # remove major grid lines
panel.grid.minor = element_blank(), # remove minor grid lines
panel.border = element_blank() # remove border around plot
)
# Create map `black_jail_map`
black_jail_map <- ggplot(merge_map) +
geom_polygon(
mapping = aes(x = long, y = lat, group = group, fill = black_jail_pop),
color = "gray", size = 0.3) +
coord_map() +
scale_fill_continuous(limits = c(0, max(merge_map$black_jail_pop)), na.value =
"white", low = "yellow", high = "red") +
blank_theme +
ggtitle("Black Jail Population in the U.S")
|
2c3377e16f16a8f95369340db15fcc5bfcbec913
|
94d1a45dfeeb0c6aa151e6d3a2a6d090fa82897d
|
/man/extract_tags.Rd
|
43de161bfef280147856047d6b8a649ad15be634
|
[] |
no_license
|
lwjohnst86/orgclockr
|
15c83b1b56b0299040b933baa29e576c6e5d3658
|
baaf62675967388eeb61171f2727edc41ca4379d
|
refs/heads/master
| 2021-01-22T01:34:43.430367
| 2015-05-07T09:53:41
| 2015-05-07T09:56:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,362
|
rd
|
extract_tags.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{extract_tags}
\alias{extract_tags}
\title{Extract the tags of an org file.}
\usage{
extract_tags(x, inherit = TRUE)
}
\arguments{
\item{x}{org object as character vector.}
\item{inherit}{logical; if \code{TRUE} (default), inherit tags
from level one headlines.}
}
\value{
the tags of an orgfile as a character vector.
}
\description{
This function is used to extract the tags matching the regexp
pattern.
}
\details{
If there is more than one tag for a headline, they will
be bundled in one character object separated by one space
character. The headlines without any tag will give a \code{NA}
value, effectively resulting in a vector of as many elements as
the number of headlines in the org file.
If the \code{inherit} parameter is set to TRUE (default), the
function will make use of tag inheritance given by the org-mode
variable \code{org-use-tag-inheritance}. So far, the inheritance
only works for level one tags.
}
\examples{
system.file("extdata", "sample.org", package = "orgclockr") \%>\%
readLines() \%>\%
extract_tags(inherit = FALSE)
## [1] "TagOne" "TagTwo" NA
## [4] NA "TagThree" "TagTwo"
## [7] "TagTwo" "TagOne TagThree TagTwo" NA
## [10] NA NA NA
}
|
09602caead12cf88cfbfeb5daa6ce655d7cf6dce
|
5667104d156cd74946ae4c6660926a89dc9aa552
|
/Pop Mod code(1).R
|
cf0d706ca36aec10f203e4b311400325601163a5
|
[] |
no_license
|
Robertcavaye/R-Practice-
|
b51921adec269531eb83c776da0e70d66905af75
|
c11b09235db41e5ee4724c7a91cbaa4a2e499a54
|
refs/heads/master
| 2021-04-30T04:18:03.749305
| 2018-02-18T13:42:24
| 2018-02-18T13:42:24
| 121,532,231
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,588
|
r
|
Pop Mod code(1).R
|
# It's good practice to start your code with a line that clears R's memory:
rm(list=ls())
# 2. Simple population models
Years <- 100 # sets the duration of the simulation
R <- 0.2 # sets the population’s growth rate
N <- c(10,rep(0,Years-1)) # defines a vector to store the population sizes (an initial
# size of 10, followed by 0 repeated for every other year)
for (t in 2:Years) N[t] <- N[t-1]+N[t-1]*R # loops through the remaining years of the simulation,
# calculating the new population size each time
plot(1:Years,N, bty= "l", xlab= "Year", ylab= "Population size", type= "l")
Years <- 100 # sets the duration of the simulation
R <- 3.0 # sets the population’s maximum growth rate
K <- 100 # sets the carrying capacity
N <- c(10,rep(0,Years-1)) # defines a vector to store the population sizes (an initial
# size of 10, followed by 0 repeated for every other year)
for (t in 2:Years) N[t] <- N[t-1]+N[t-1]*R*(1 - N[t-1]/K) # loops through the remaining years of the
# simulation, calculating the new
# population size each time
plot(1:Years,N, bty= "l", xlab= "Year", ylab= "Population size", type= "l")
# 3. The effect of growth rate
Rs <- seq(1.5,3,0.01)
Years <- 250 # sets the duration of the simulation
K <- 100 # sets the carrying capacity
stable.sizes <- lapply(Rs,function(R){
N <- c(10,rep(0,Years-1)) # defines a vector to store the population sizes (an initial
# size of 10, followed by 0 repeated for every other year)
for (t in 2:Years) N[t] <- N[t-1]+N[t-1]*R*(1 - N[t-1]/K) # loops through the remaining years of the
# simulation, calculating the new
# population size each time
sizes <- N[(Years-99):Years] # record only the last 100 years' worth of population sizes
return(data.frame(PGR=R,Pop.sizes=sizes)) # store those population sizes as a data frame
# within the "stable.sizes" object; the first column contains the population growth rate
})
stable.sizes <- do.call('rbind',stable.sizes) # bind all the data frames together as a single object
stable.sizes$colour <- 'red' # make another column in the data frame, with the word 'red' in each row
stable.sizes$colour[stable.sizes$Pop.sizes < K] <- 'blue' # change the colour column to be 'blue' for all
# rows in which the population sizes are less than the carrying capacity
par(las=1) # change the graphical parameter that controls axis tick-mark label orientation
# and plot the graph:
plot(stable.sizes$PGR,stable.sizes$Pop.sizes,cex=0.3,pch=19,col=stable.sizes$colour,xlab='Population growth rate',ylab='Stable sizes')
# 4. Matrices, simulations and population characteristics
rm(list=ls())
setwd("C:/My documents/R") # or whatever represents a good directory for you to work in
M <- matrix(c(0,0.024,0,52,0.08,0.25,279.5,0,0.43),nrow=3)
N <- c(10,10,10) # note that this can also be written N <- rep(10,3)
N <- M%*%N # NB. Matrix multiplication is sensitive to order, unlike normal
# multiplication (i.e. M%*%N ≠ N%*%M)
# set the duration of the simulation
Years <- 15
# define the transition matrix
M <- matrix(c(0,0.024,0,52,0.08,0.25,279.5,0,0.43),nrow=3)
# define a matrix with 3 rows and ‘Years’ columns to store population sizes. Make the first 3
# numbers (to fill the first column) 10 (the initial population sizes) and all subsequent numbers
# zero (because we’ll calculate what those numbers should be in subsequent steps)
N <- matrix( c(rep(10,3), rep(0,(Years-1)*3) ), nrow=3)
# compute population sizes
for (t in 2:Years) N[,t] <- M%*%N[,t-1]
# plot the outcome
par(las=1)
plot(c(0,Years),c(1,max(N)),xlab="Time",ylab="Number",type="n",log="y",bty="l")
cols=c("black","brown","darkgreen")
for (L in 1:3) lines(1:Years,N[L,],col=cols[L],lwd=3)
# add a legend
labs <- c("Pre-juveniles","Juveniles","Adults")
legend( x=0, # an x coordinate for the legend’s top left corner
y=3000000, # a y coordinate for the legend’s top left corner
legend=labs, # tells R that the text for the legend is in the vector ‘labs’
lty=1, lwd=3, # specifies the line characteristics
col=cols, # tells R that the colours for the legend are in the vector ‘cols’
box.lty=0) # suppresses a surrounding box
lambda.est = sum(N[,Years])/sum(N[,Years-1])
round(lambda.est,2) # reduces the number of decimal places to 2
SSD.est = N[,Years]/sum(N[,Years])
round(SSD.est,3)
Final <- numeric(3) # this is one way to declare an empty vector of numbers
for (s in 1:3) {
N[,1] <- c(0,0,0)
N[s,1] = 1
for (t in 2:Years) N[,t] = M%*%N[,t-1]
Final[s] = sum(N[,Years])
}
RV.est <- Final/Final[1]
round(RV.est,2)
# 5. Population characteristics and matrix properties
lambda.true <- eigen(M)$values[1]
( lambda.true <- round(Re(eigen(M)$values[1]),2) )
SSD.true <- eigen(M)$vectors[,1]/sum(eigen(M)$vectors[,1])
( SSD.true <- round(Re(SSD.true),3) )
RV.true <- eigen(t(M))$vectors[,1]
RV.true <- Re(RV.true/RV.true[1]) # this makes all reproductive values relative to that of the 1st stage class
round(RV.true,3)
# 6. Sensitivities, elasticities and other uses of matrix models
M <- matrix(c(0,0.024,0,52,0.08,0.25,279.5,0,0.43),nrow=3) # the matrix
w <- eigen(M)$vectors[,1]/sum(eigen(M)$vectors[,1]) # the SSD
v <- eigen(t(M))$vectors[,1]/eigen(t(M))$vectors[1,1] # reproductive values
s = 3 # number of life stages
# Summed product of RV and SSD for each stage class:
v.w <- sum(v*w)
# Now calculate sensitivity for each matrix element:
sensitivity <- matrix(nrow=s,ncol=s,0)
for (i in 1:s)
for (j in 1:s)
sensitivity[i,j] = v[i]*w[j]/v.w
( sensitivity <- round(Re(sensitivity),4) )
# Elasticities can then be computed by:
Lambda <- eigen(M)$values[1]
elasticity <- sensitivity * M / Lambda
( elasticity <- round(Re(elasticity),4) )
|
74821a092f6e598b78d1ca5092f47ec1ae22bec3
|
ea35efa70d0a10b4b6708de564577da8e5b92680
|
/Prediction Models/LM_Model_for_edr.R
|
72f29fa972b2ecc7902352935b43bc9b8647037d
|
[] |
no_license
|
akuyper/minneMUDAC
|
57f6604da41d4bb4268a750f554560edd33a5d0a
|
f6bcf72fb65aa9e72ec8fcfd30e12751232a976f
|
refs/heads/master
| 2020-03-31T23:45:29.379818
| 2018-11-04T21:28:22
| 2018-11-04T21:28:22
| 152,668,414
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,612
|
r
|
LM_Model_for_edr.R
|
#### LM Model for edr (Presinct Level)
#### Ziyi Lu
# Load Packages
library(tidyverse)
library(janitor)
library(skimr)
library(readxl)
# Load data
data <- readRDS("data/processed/competition_data.rds") %>%
select(-pctname,-signatures,-county,-date) #total = signatures + ab_mb
data$mailballot <- ifelse(data$mailballot =="Yes", 1, 0)
data <- mutate_all(data, funs(as.numeric))
data2018 <- readRDS("data/processed/county_dat_2018.rds") %>% rename(countycode = COUNTYCODE)
voter2018_reg7am <- readRDS("data/processed/voter2018.rds") #%>% select_if(~sum(!is.na(.)) > 0)
data_2018 <- voter2018_reg7am %>%
left_join(data2018, by = c("countycode"))
#### Presinct Level Data
df2010 <- data %>%
filter(year == 2010)
df2012 <- data %>%
filter(year == 2012)
df2014 <- data %>%
filter(year == 2014)
df2016 <- data %>%
filter(year == 2016)
# LM Model with all FRED variables
mod_2010 <- df2010 %>%
lm(edr~reg7am+age_med+assoc_deg+buildings+burd_house+
commute+discon_yth+eq_sprime+hisp_latin+
homeowner+house_singp+inc_ineq+income_med+
income_percap+income_person+perc_pop_u18+pop_est+
pop_perc+pop_res+pop_u18+poverty+poverty_u18+prec_rel_chil_518+
privat_ests+race_white+racical_dissim+rel_chil_518+undergrad+unemployment,data = .)
mod_2012 <- df2012 %>%
lm(edr~reg7am+age_med+assoc_deg+buildings+burd_house+
commute+discon_yth+eq_sprime+hisp_latin+
homeowner+house_singp+inc_ineq+income_med+
income_percap+income_person+perc_pop_u18+pop_est+
pop_perc+pop_res+pop_u18+poverty+poverty_u18+prec_rel_chil_518+
privat_ests+race_white+racical_dissim+rel_chil_518+undergrad+unemployment,data = .)
mod_2014 <- df2014 %>%
lm(edr~reg7am+age_med+assoc_deg+buildings+burd_house+
commute+discon_yth+eq_sprime+hisp_latin+
homeowner+house_singp+inc_ineq+income_med+
income_percap+income_person+perc_pop_u18+pop_est+
pop_perc+pop_res+pop_u18+poverty+poverty_u18+prec_rel_chil_518+
privat_ests+race_white+racical_dissim+rel_chil_518+undergrad+unemployment,data = .)
mod_2016 <- df2016 %>%
lm(edr~reg7am+age_med+assoc_deg+buildings+burd_house+
commute+discon_yth+eq_sprime+hisp_latin+
homeowner+house_singp+inc_ineq+income_med+
income_percap+income_person+perc_pop_u18+pop_est+
pop_perc+pop_res+pop_u18+poverty+poverty_u18+prec_rel_chil_518+
privat_ests+race_white+racical_dissim+rel_chil_518+undergrad+unemployment,data = .)
lm_coef_2010 <- as.matrix(mod_2010$coefficients)
lm_coef_2012 <- as.matrix(mod_2012$coefficients)
lm_coef_2014 <- as.matrix(mod_2014$coefficients)
lm_coef_2016 <- as.matrix(mod_2016$coefficients)
lm_coef <- (1/3)*lm_coef_2014 + (1/3)*lm_coef_2010 + (1/6)*lm_coef_2016 + (1/6)*lm_coef_2012
test_data <- data_2018 %>%
ungroup() %>%
select(reg7am,age_med,assoc_deg,buildings,burd_house,
commute,discon_yth,eq_sprime,hisp_latin,
homeowner,house_singp,inc_ineq,income_med,
income_percap,income_person,perc_pop_u18,pop_est,
pop_perc,pop_res,pop_u18,poverty,poverty_u18,prec_rel_chil_518,
privat_ests,race_white,racical_dissim,rel_chil_518,undergrad,unemployment)
test_data <- as.matrix(data.frame(1, test_data))
edr_hat <- as.vector(test_data %*% lm_coef)
edr_hat <- ifelse(edr_hat>0,edr_hat,0)
voter2018_edr_lm <- voter2018_reg7am %>%
add_column(edr=edr_hat)
saveRDS(voter2018_edr_lm,"data/processed/voter2018_edr_lm.rds")
|
cea7eaa6a84c37122810e1f80a184f4cca5377ca
|
cbe6eb38d57711b27fb63bfc8768d02e21f501ef
|
/plot4.R
|
180502c3ae7442f679996da37b0194f190dc4831
|
[] |
no_license
|
Rongbin-Ye-94/JHU-DS-Explantory-Data-Analysis
|
3e03fec717a11a0284a14dffde357eed14d38317
|
6fcfafcd5f0358e6c733f5d6bb9eabbcf0900c68
|
refs/heads/master
| 2020-06-12T23:14:32.615977
| 2019-07-01T01:57:01
| 2019-07-01T01:57:01
| 194,525,015
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,510
|
r
|
plot4.R
|
FileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
Dest_1 <- "Elec_UCI.zip"
if(!file.exists(Dest_1)){download.file(FileUrl,destfile = Dest_1, method = "curl")}
if(file.exists(Dest_1)){unzip(Dest_1)}
library(grDevices)
#loading data
data_full <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_1 <- subset(data_full, Date %in% c("1/2/2007","2/2/2007"))
data_1$Date <- as.Date(data_1$Date, format="%d/%m/%Y")
datetime <- paste(as.Date(data_1$Date), data_1$Time)
data_1$Datetime <- as.POSIXct(datetime)
#plot 4
par(mfcol = c(2,2))
"Subgraph1"
plot(data_1$Datetime, data_1$Global_active_power, xlab = "date", ylab = "Global Active Power(kilowatts)", type = "l")
"Subgraph2"
with(data_1,
{plot(data_1$Sub_metering_1~data_1$Datetime, col = "black", xlab = "", ylab = "Energy Sub Metering", type = "l")
lines(Sub_metering_2~Datetime, col ="red")
lines(Sub_metering_3~Datetime, col = "blue")
legend("topright", col = c("black","red","blue"), lty = 1, lwd = 2, legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
})
"Subgraph3"
plot(data_1$Voltage~data_1$Datetime, type = "l", xlab = "datetime", ylab = "Voltage")
"Subgraph4"
plot(data_1$Global_reactive_power~data_1$Datetime, type = "l", xlab = "datetime", ylab = "Global Reactive Power")
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
ecf2c4f23800821c11dd895e905e9b7338625475
|
31b0773626f57cf0a4d2e4007b80ba853613c99c
|
/R/metaROC.R
|
858130298ae0b6196611c5eabbfd060ff9f3db3b
|
[] |
no_license
|
cran/nsROC
|
d63918034cce7b9e7f6a89f7545282df45e70518
|
653970764286dee9054ceb92a003dcb2d7d655d7
|
refs/heads/master
| 2021-01-21T23:16:35.840779
| 2018-08-07T08:10:03
| 2018-08-07T08:10:03
| 95,219,628
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,851
|
r
|
metaROC.R
|
metaROC <- function(data, ...) {
UseMethod("metaROC")
}
metaROC.default <- function(data, Ni=1000, model=c("fixed-effects", "random-effects"), plot.Author=FALSE, plot.bands=TRUE, plot.inter.var=FALSE, cex.Author=0.7, lwd.Author=12, col.curve='blue', col.bands='light blue', alpha.trans=0.5, col.border='blue', ...){
# Check if input arguments are correct
model <- match.arg(model)
if(!is.numeric(Ni) || length(Ni)!=1 || Ni%%1!=0 || Ni <= 0){
stop("Ni should be a positive integer.")
}
if(!is.numeric(alpha.trans) || length(alpha.trans)!=1 || alpha.trans < 0 || alpha.trans > 1){
stop("alpha.trans should be a number in the unit interval.")
}
if(!is.data.frame(data) || sum(names(data)=="Author")!=1 || sum(names(data)=="TP")!=1 || sum(names(data)=="TN")!=1 || sum(names(data)=="FP")!=1 || sum(names(data)=="FN")!=1){
stop("data should be a data frame with at least 5 variables called 'Author', 'TP', 'TN', 'FP' and 'FN'.")
}
n <- data$n <- data$TP+data$FN
m <- data$m <- data$FP+data$TN
data$tpr <- data$TP/n
data$fpr <- data$FP/m
S <- unique(data$Author)
cat("Number of papers included in meta-analysis:", length(S), '\n', sep=' ')
sapply(S, function(i){
data.S <- data[data$Author==i,]
n.S <- dim(data.S)[1]
if(!identical(data.S$n, rep(data.S$n[1], n.S))){
stop("The number of positives does not coincide for some Author. Check it.")
}else if(!identical(data.S$m, rep(data.S$m[1], n.S))){
stop("The number of negatives does not coincide for some Author. Check it.")
}
})
# Plot pairs (False-Positive Rate, True-Positive Rate) for each Author
if(plot.Author){
plot(data$fpr, data$tpr, xlim=c(0,1), ylim=c(0,1), lwd=lwd.Author, pch=1, col='gray', xlab="False-Positive Rate", ylab="True-Positive Rate", cex.lab=1.5, main=paste("ROC curve (",model," model)", sep=''), ...)
axis(1, at=seq(0,1,0.01), labels=F, tck=-0.01)
axis(1, at=seq(0,1,0.1), labels=F, tck=-0.02)
axis(2, at=seq(0,1,0.01), labels=F, tck=-0.01)
axis(2, at=seq(0,1,0.1), labels=F, tck=-0.02)
}
index.ord <- order(data$Author, data$fpr, data$tpr)
data <- data[index.ord,]
n.points <- dim(data)[1]
t <- seq(0,1,length=Ni)
roc.j <- sapply(S, function(j){
f <- approxfun(c(0,data$fpr[data$Author==j],1), c(0,data$tpr[data$Author==j],1), ties="ordered") # If there exist a point (1,Se) with Se<1, it will not be considered
if(plot.Author){lines(c(0,t,1), c(0,f(t),1), col='gray')}
f(t)})
if(plot.Author){
for (i in 1:n.points) {
text(data$fpr[i],data$tpr[i],data$Author[i],cex=cex.Author)
}
}
# Variance of fixed-effects model ROC estimate (for each Author)
der.roc.j <- matrix(1, Ni, length(S))
size.j <- sapply(S, function(j){
M <- m[min(which(data$Author==j))]
N <- n[min(which(data$Author==j))]
c(M,N)})
M <- matrix(rep(size.j[1,],Ni),Ni,length(S),byrow=TRUE)
N <- matrix(rep(size.j[2,],Ni),Ni,length(S),byrow=TRUE)
T <- matrix(rep(t,length(S)),Ni,length(S))
var.j <- (der.roc.j)^2*T*(1-T)/M + (roc.j)*(1-roc.j)/N
var.j[1,] <- var.j[2,] # To avoid infinite weights (var.j=0 iff t=0 or t=1)
var.j[Ni,] <- var.j[(Ni-1),]
w.j <- 1/var.j
W <- apply(w.j, 1, sum)
cat("Model considered:", model, '\n', sep=' ')
# ROC curve estimate (fixed-effects model)
RA.fem <- apply(w.j*roc.j, 1, sum)/W
sRA.fem <- RA.fem
for(i in 1:(Ni-1)){sRA.fem[i+1] <- max(sRA.fem[i],sRA.fem[i+1])}
# Plot ROC curve estimate
if(model=="fixed-effects"){
se.RA.fem <- W^(-1/2)
if(!plot.Author){
plot(t, sRA.fem, 'l', xlim=c(0,1), ylim=c(0,1), lwd=1, col=col.curve, xlab="False-Positive Rate", ylab="True-Positive Rate", cex.lab=1.5, main=paste("ROC curve (",model," model)", sep=''), ...)
abline(0,1,col='gray', lty=2)
axis(1, at=seq(0,1,0.01), labels=F, tck=-0.01)
axis(1, at=seq(0,1,0.1), labels=F, tck=-0.02)
axis(2, at=seq(0,1,0.01), labels=F, tck=-0.01)
axis(2, at=seq(0,1,0.1), labels=F, tck=-0.02)
abline(0, 1, col='gray')
}
RA <- RA.fem; sRA <- sRA.fem; se.RA <- se.RA.fem
}else if(model=="random-effects"){
inter.var <- apply(w.j*(roc.j - RA.fem)^2, 1, sum)/W # RA.fem is considered to compute tau^2
inter.var[1] <- inter.var[2]
inter.var[Ni] <- inter.var[Ni-1]
w.j.rem <- 1/(var.j + inter.var)
W.rem <- apply(w.j.rem, 1, sum)
# ROC curve estimate (random-effects model)
RA.rem <- apply(w.j.rem*roc.j, 1, sum)/W.rem
sRA.rem <- RA.rem
for(i in 1:(Ni-1)){sRA.rem[i+1] <- max(sRA.rem[i],sRA.rem[i+1])}
se.RA.rem <- (apply((w.j.rem)^2*var.j, 1, sum)/(W.rem^2))^(1/2)
if(!plot.Author){
plot(t, sRA.rem, 'l', xlim=c(0,1), ylim=c(0,1), lwd=1, col=col.curve, xlab="False-Positive Rate", ylab="True-Positive Rate", cex.lab=1.5, main=paste("ROC curve (",model," model)", sep=''), ...)
abline(0, 1, col='gray', lty=2)
axis(1, at=seq(0,1,0.01), labels=F, tck=-0.01)
axis(1, at=seq(0,1,0.1), labels=F, tck=-0.02)
axis(2, at=seq(0,1,0.01), labels=F, tck=-0.01)
axis(2, at=seq(0,1,0.1), labels=F, tck=-0.02)
abline(0, 1, col='gray')
}
RA <- RA.rem; sRA <- sRA.rem; se.RA <- se.RA.rem
}
makeTransparent <- function(Color, alpha=255){
newColor <- col2rgb(Color)
apply(newColor, 2, function(col){
rgb(red=col[1], green=col[2], blue=col[3], alpha=alpha, maxColorValue=255)
})
}
# Plot ROC interval confidence intervals
if(plot.bands){
polygon(c(t,rev(t)),c(sRA-1.96*se.RA,rev(sRA+1.96*se.RA)),col=makeTransparent(col.bands,alpha.trans*255), border=makeTransparent(col.border,alpha.trans*255))
}
lines(c(0,t,1), c(0,sRA,1), lwd=2, col=col.curve)
abline(0, 1, col='gray', lty=2)
# Area under the curve estimate (AUC)
auc <- mean(sRA[-1]+sRA[-Ni])/2
text(0.8, 0.1, paste("AUC =", round(auc,3)))
cat("The area under the summary ROC curve (AUC) is ", round(auc,3),".\n", sep="")
# Youden index
ind.youden <- which.max(1-t+sRA)
youden.index <- c(1-t[ind.youden], sRA[ind.youden])
cat("The optimal specificity and sensitivity (in the Youden index sense) for summary ROC curve are ", round(youden.index[1],3)," and ", round(youden.index[2],3),", respectively.\n", sep="")
# Plot inter-study variability estimate
if(model=="random-effects" && plot.inter.var){
dev.new()
par(mar=c(6,5,4,2))
plot(t, inter.var, 'l', lwd=2, xlab="t", ylab=expression(tau[M]^2~(t)), cex.lab=1.2, main="Inter-study variability")
}
results <- list(data=data, t=t, sRA=sRA, RA=RA, se.RA=se.RA, youden.index=youden.index, auc=auc, roc.j=roc.j, w.j=w.j, model=model)
if(model=="random-effects"){
results <- list(data=data, t=t, sRA=sRA, RA=RA, se.RA=se.RA, youden.index=youden.index, auc=auc, roc.j=roc.j, w.j=w.j, w.j.rem=w.j.rem, inter.var=inter.var, model=model)
}
results
}
|
9994d78a2aca44bad04a8d00cdf5b4ccc82c026e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ramps/examples/corRExp2.Rd.R
|
c38996a3358c3cbd176632276a39fa422c569e5c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 503
|
r
|
corRExp2.Rd.R
|
library(ramps)
### Name: corRExp2
### Title: Non-Separable Exponential Spatio-Temporal Correlation Structure
### Aliases: corRExp2
### Keywords: models
### ** Examples
sp1 <- corRExp2(form = ~ x + y + t)
spatDat <- data.frame(x = (0:4)/4, y = (0:4)/4, t=(0:4)/4)
cs1Exp <- corRExp2(c(1, 1, 1), form = ~ x + y + t)
cs1Exp <- Initialize(cs1Exp, spatDat)
corMatrix(cs1Exp)
cs2Exp <- corRExp2(c(1, 1, 1), form = ~ x + y + t, metric = "man")
cs2Exp <- Initialize(cs2Exp, spatDat)
corMatrix(cs2Exp)
|
d65ba03c81651d1194dd190750cf510e8195b832
|
3653a5e85dca41ca724b03c83fad08e92c433244
|
/Leaflet.R
|
b03c262b691beacdc896f025a0afeb046d985ebf
|
[] |
no_license
|
ChanningC12/Developing-Data-Products
|
3742e035ddd05403673c4e329cb44ebe175422ec
|
a3165c3ebf12a6bbd8e207dafbded7c216dfffad
|
refs/heads/master
| 2020-07-02T09:53:23.507313
| 2016-11-21T01:44:09
| 2016-11-21T01:44:09
| 74,312,179
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,755
|
r
|
Leaflet.R
|
# Leaflet: Javascript libraries for creating interactive map
install.packages("leaflet")
library(leaflet)
my_map = leaflet() %>% # pipe notation in R %>%, equals my_map = addTiles(leaflet())
addTiles()
my_map
# Adding Markers
my_map = my_map %>%
addMarkers(lat=39.2980803, lng=-76.5898801,
popup="Jeff Leek's Office")
my_map
# Adding many markers
set.seed(2016-11-20)
df = data.frame(lat=runif(20,min=39.2,max=39.2),
lng=runif(20,min=-76.6,max=-76.5))
df %>%
leaflet() %>%
addTiles() %>%
addMarkers()
# Making Custom Markers
hopkinsIcon = makeIcon(
iconUrl = "http://brand.jhu.edu/content/uploads/2014/06/university.shield.small_.blue_.png",
iconWidth = 31*215*230, iconHeight=31,
iconAnchorX=31*215/230/2, iconAnchorY=16
)
hopkinsLatLong = data.frame(lat=c(39.2973166,39.3288851,39.2906617,39.2970681,39.2824806),
lng=c(-76.5929798,-76.6206598,-76.5469683,-76.6150537,-76.6016766))
hopkinsLatLong %>%
leaflet() %>%
addTiles() %>%
addMarkers(icon=hopkinsIcon)
# Mapping Clusters
df = data.frame(lat=runif(500,min=39.25,max=39.35),
lng=runif(500,min=-76.65,max=-76.55))
df %>%
leaflet() %>%
addTiles() %>%
addMarkers(clusterOptions = markerClusterOptions())
df_20 = data.frame(lat=runif(20,min=39.25,max=39.35),
lng=runif(20,min=-76.65,max=-76.55))
df_20 %>%
leaflet() %>%
addTiles() %>%
addCircleMarkers()
# Drawing Circles
md_cities = data.frame(name=c("Baltimore","Frederick","Rockville","Gaithersburg","Bowie","Hagerstown",
"Annapolis","College Park", "Salisbury","Laurel"),
pop=c(619493,66169,62334,61045,55232,39890,38880,30587,30484,25346),
latitude=c(39.292592,39.4143921,39.0840,39.1434,39.0068,39.6418,38.9784,38.9897,
38.3607,39.0993),
longitude=c(-76.6077852,-77.4204875,-77.1528,-77.2014,-76.7791,-77.7200,-76.4922,
-76.9378,-75.5994,-76.8483))
md_cities %>%
leaflet() %>%
addTiles() %>%
addCircles(weight=1,radius=sqrt(md_cities$pop)*30)
# Drawing Rectangles
leaflet() %>%
addTiles() %>%
addRectangles(lat1 = 37.3858, lng1 = -122.0595,
lat2 = 37.3890, lng2 = -122.0625)
# Adding Legends
df = data.frame(lat = runif(20,min=39.25,max=39.35),
lng = runif(20,min=-76.65,max=-76.55),
col = sample(c("red","blue","green"),20,replace=T),
stringsAsFactors = F)
str(df)
df %>%
leaflet() %>%
addTiles() %>%
addCircleMarkers(color=df$col) %>%
addLegend(labels=LETTERS[1:3],colors=c("blue","red","green"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.