blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
โ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
โ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
โ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1278b51918b453a40540bc95a61aa59390998f95
|
3d93a8b31c1ca565df5a8dfd0079a8892da44e63
|
/1M_project.R
|
2bce348c349619b7ecba0d541c102e34e8d310e7
|
[] |
no_license
|
KFc4dinnah/1M_Project
|
bf79dac596414eb92765f73109a270eb74e7ddec
|
e30c088141864a7ef0f51aa7ba9ab991a40bb44a
|
refs/heads/main
| 2023-08-23T02:39:32.792274
| 2021-09-29T07:27:04
| 2021-09-29T07:27:04
| 411,541,272
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,384
|
r
|
1M_project.R
|
library(dplyr)
library(tidyverse)
library(readxl)
library(lubridate)
library(plyr)
library(arules)
library(arulesViz)
#import the data
df <- read_excel("C:/Users/khiem.phung/Downloads/Test_Data_Skill.xlsx",
sheet = "Data")
###SQL test
#first two services and the date
df_first2 <- df %>%
select(User_id,Serviceid,Date) %>%
group_by(User_id) %>%
arrange(Date) %>%
group_by(User_id) %>%
slice(1:2)
#last service and the date
df_last <- df %>%
select(User_id,Serviceid,Date) %>%
group_by(User_id) %>%
arrange(desc(Date)) %>%
group_by(User_id) %>%
slice(1)
#distinct serviceid that users use
df_service <- df %>%
distinct(User_id, Serviceid) %>%
group_by(User_id) %>%
count()
#put data in wide format
df_first_wide <- df_first2 %>%
merge(df_first2[-1,], by = 'User_id') %>%
group_by(User_id) %>%
slice(1)
#merge all tables together
df_sql <- df_first_wide %>%
inner_join(df_last, by = 'User_id') %>%
inner_join(df_service, by = 'User_id')
#rename the columns
df_sql <- df_sql %>%
rename(FirstServiceid = Serviceid.x,
FirstServiceDate = Date.x,
SecondServiceid = Serviceid.y,
SecondServiceDate = Date.y,
LastServiceid = Serviceid,
LastServiceDate = Date,
TotalService = n)
#reorder columns
df_sql <- df_sql[c(1,2,4,3,5,6,7,8)]
###Analysis test
#duplicate to a new df
df_sql2 <- df_sql
#create customer's age column
df_sql2$Customer_age <- (df_sql$LastServiceDate
- df_sql$FirstServiceDate)
df_sql2$Customer_age <- time_length(df_sql2$Customer_age,
unit = 'days')
#create table with user id and their age
df_age <- df_sql2 %>%
select(User_id, Customer_age)
#join that table with the original table to get user id, their age, and all
#service ids they use
df_age <- df_age %>%
inner_join(df, by = 'User_id')
#select only age groups and all service id used
df_age2 <- df_age %>%
ungroup(User_id) %>%
select(Customer_age, Serviceid) %>%
distinct()
#initially visualize the clusters using scatterplot
ggplot(df_age2, aes(x = Customer_age,
y = Serviceid)) +
geom_point()
#print out serviceid with customer age < 90
df_age2 %>%
group_by(Customer_age) %>%
filter(Customer_age < 90) %>%
ungroup(Customer_age) %>%
select(Serviceid)
#print out serviceid with customer age > 90 and < 365
df_age2 %>%
group_by(Customer_age) %>%
filter(Customer_age > 90, Customer_age < 365) %>%
ungroup(Customer_age) %>%
select(Serviceid)
#print out serviceid with customer age > 365
df_age2 %>%
group_by(Customer_age) %>%
filter(Customer_age > 365) %>%
ungroup(Customer_age) %>%
select(Serviceid)
#get transaction data by putting all service ids on one row, grouped by
#users and the date users used those services
df_transaction <- ddply(df,c('User_id','Date'),
function(df1)paste(df1$Serviceid,
collapse = ','))
#select on the service column
df_transaction <- df_transaction %>%
select(V1)
#store the data to a csv file
write.csv(df_transaction,
'C:/Users/khiem.phung/Downloads/basket_transaction.csv',
quote = FALSE, row.names = FALSE)
#load the data into transaction class
tr <- read.transactions('C:/Users/khiem.phung/Downloads/basket_transaction.csv',
format = 'basket', sep=',')
#see the service with most frequent appearance
itemFrequencyPlot(tr,topN=10,type="absolute")
#mine the rules using the APRIORI algorithm
association.rules <- apriori(tr,
parameter = list(supp=0.001, conf=0.8))
summary(association.rules)
#sort the rules by count
association.rules <- sort(association.rules, by="count", decreasing=TRUE)
#print top 10 rules
inspect(association.rules[1:20])
##Bonus question
#count number of service ids used each date
df_date <- df %>%
distinct(Date, Serviceid) %>%
group_by(Date) %>%
count()
#visualize the findings
ggplot(df_date, aes(x = Date, y = n)) +
geom_point()
#filter the date to 2018
df_date_2018 <- df_date %>%
filter(Date > '2017-12-31')
#visualize the new series
ggplot(df_date_2018, aes(x = Date, y = n)) +
geom_line()
|
cb26ea00b68d5fcf56f94716b35e7d0b6d1a66d9
|
0844c816ade1b06645fd9b3ae661c0691978117b
|
/prep/example_json.R
|
bbb9c2eef17d94e812199a18536ea6b446aeeb3c
|
[
"MIT"
] |
permissive
|
petrbouchal/pragr
|
d92966395ac79982c72ba3a14fae6203176b256e
|
858fbb96f69eda9ac0922e8dfbc966948e545073
|
refs/heads/master
| 2022-12-21T20:44:41.420783
| 2022-12-17T21:18:35
| 2022-12-17T21:18:35
| 186,895,587
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 252
|
r
|
example_json.R
|
library(sf)
st_read("https://ags.arcdata.cz/arcgis/rest/services/OpenData/AdministrativniCleneni_v12/MapServer/10/query?where=KOD_OBEC%20like%20%27%25554782%25%27&returnGeometry=true&outFields=*&f=json&&resultRecordCount=200") %>%
plot(max.plot = 1)
|
b7fb994e6a63d7576dc64cc9d324ef1453bb7f2a
|
9ec64d2b674f77c8c61e9023f08f286a13ecc2e0
|
/Exploratory-Data/course-project/plot6.R
|
0a56048cc4652f358047a3cf4e1eeb11e7ff810f
|
[] |
no_license
|
anabaraldi/data-science
|
677fafc52378b6c3d1656162ee090e0b8db13398
|
8ddbfc364a87472e3a0054b2ed5bf0ab5a984eb5
|
refs/heads/master
| 2020-04-06T07:01:12.833087
| 2016-09-04T23:17:26
| 2016-09-04T23:17:26
| 30,598,445
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,247
|
r
|
plot6.R
|
#libraries
library(dplyr)
library(ggplot2)
# Reading and cleaning data
unzip("exdata-data-NEI_data.zip")
# This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# The following code will filter the SCC file so we can subset the data with only the Motor Vehicle Source
# By definition Motor Vehicle sources are considered Onroad Category in USA
motorVehicleSCC <- filter(SCC, grepl("[Oo]nroad", SCC$Data.Category))
# The following code will filter only the Motor Vehicle Sources in Baltimore and Los Angeles, then group the data by year
# and take the sum for each year
motorVehicleNEI <- filter(NEI, SCC %in% motorVehicleSCC$SCC) %>%
filter(fips %in% c("24510", "06037")) %>%
mutate(fips = factor(fips, levels=c("24510", "06037"), labels=c("Baltimore", "Los Angeles"))) %>%
group_by(year, fips) %>%
summarise(emissionsSum=sum(Emissions))
# Creating the plot
png("plot6.png")
ggplot(motorVehicleNEI, aes(year, emissionsSum, color=fips)) +
geom_smooth() +
xlab("Year") +
ylab("Total PM2.5 Emission (in ton)") +
ggtitle("Motor Vehicle Sources PM2.5 Emission\n in Baltimore and Los Angeles - 1999 to 2008")
dev.off()
|
47583f0fe210a33fe0ebd0a399a18fa65ff0706a
|
b77b91dd5ee0f13a73c6225fabc7e588b953842b
|
/shared_functions/point_to_nearest_coastline.R
|
0c2623b79cde3061be1e3f7dff00d74f7546e307
|
[
"MIT"
] |
permissive
|
ksamuk/gene_flow_linkage
|
a1264979e28b61f09808f864d5fa6c75568147b0
|
6182c3d591a362407e624b3ba87403a307315f2d
|
refs/heads/master
| 2021-01-18T09:18:02.904770
| 2017-04-02T16:51:40
| 2017-04-02T16:51:40
| 47,041,898
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 791
|
r
|
point_to_nearest_coastline.R
|
# move a point to the nearest coastline
point_to_nearest_coastline <- function (bat, loc, mode){
nearest.coastline <- NA
dist.to.coast1 <- NA
dist.to.coast2 <- NA
try(nearest.coastline <- dist2isobath(bat, loc, isobath = -10))
if (!is.na(nearest.coastline[,1][1])){
loc$x <- nearest.coastline[,4]
loc$y <- nearest.coastline[,5]
dist.to.coast1 <- nearest.coastline[,1][1] / 1000 # meters
dist.to.coast2 <- nearest.coastline[,1][2] / 1000 # meters
if (mode == 1){
if (loc$x[1] < 0){
loc$x[1] <- loc$x[1] + 360
}
if (loc$x[2] < 0){
loc$x[2] <- loc$x[2] + 360
}
}
}
return(data.frame(nearest.coastline,
dist.to.coast1,
dist.to.coast2,
loc$x[1],
loc$y[1],
loc$x[2],
loc$y[2]))
}
|
22c10ecb604878563720a99410e2961ace122fe9
|
efeba9f5aff2e7afbf96a57e0baf62a8fb1a3b94
|
/Part2/Stage3-Structured data/SeoulClinic analysis.R
|
242931eb4abf072862c168ea23d5194ac86e9d43
|
[] |
no_license
|
psm9619/R_Data_Analysis
|
b1db04295607b5b0811eb2151ce5378a812b2aa3
|
b6b8186a582174533ab41a68aeab77bdcf0ea854
|
refs/heads/master
| 2020-05-29T13:27:26.350660
| 2019-10-10T01:07:53
| 2019-10-10T01:07:53
| 189,161,472
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 844
|
r
|
SeoulClinic analysis.R
|
library(ggplot2)
data1 <- read.csv("2013๋
_์์ธ_์ฃผ์๊ตฌ๋ณ_๋ณ์ํํฉ.csv")
data1
barplot(as.matrix(data1[1:9,2:11]),
main=paste("์์ธ์ ์ฃผ์๊ตฌ๋ณ ๊ณผ๋ชฉ๋ณ ๋ณ์ํํฉ-2013๋
", "\n", "์ถ์ฒ "),
ylab = "๋ณ์์", beside = T, col= rainbow(8))
abline (h=seq(0,350,10), lty=3, lwd=0.2)
name <- data1$ํ์๊ณผ๋ชฉ
## to draw in ggplot, you can't use wide table (๋ฐ์ผ๋ก ๊ธธ๊ฒ ๋์ด์ง ํํ์ฌ์ผ ๊ฐ๋ฅํจ)
## use melt (data, id=c(์๋ก์ด ๋ฐ์ดํฐํ๋ ์์ ์นผ๋ผ๋ค์์ด ๋ ๊ฒ-๊ธฐ์ค์นผ๋ผ))
install.packages("reshape")
library(reshape)
df_long <- melt(data1, id=c('ํ์๊ณผ๋ชฉ'))
colnames(df_long) <- c('ํ์๊ณผ๋ชฉ','์ง์ญ๋ช
', '์์์')
df_long
p <- ggplot(df_long, aes(x= ์ง์ญ๋ช
, y=์์์, fill=ํ์๊ณผ๋ชฉ)) +
geom_bar
######## ์ค ๊นํ๋ธ ๋ณด๊ณ ํ ๊ฒ
|
21f8c962c1509f7e35b917d859f06948b3a68277
|
74d8df7e5a0fd61394fd0494f35ce82dfaa30c96
|
/man/generateFreqs.Rd
|
e8d22e196f31d01f22c9490f29528148a38f77ca
|
[
"MIT"
] |
permissive
|
immunogenomics/scpost
|
8bde0fff6be217aa92e5b2cb48d145cd35031343
|
9e6ce336addc7e0d50e266299e8b46bed7df78d0
|
refs/heads/main
| 2023-04-13T13:15:08.708526
| 2021-07-22T14:14:36
| 2021-07-22T14:14:36
| 312,683,900
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,230
|
rd
|
generateFreqs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateFrequencies.R
\name{generateFreqs}
\alias{generateFreqs}
\title{Generate cell state frequency distributions for samples}
\usage{
generateFreqs(
batchStructure,
log_prior,
clus,
fc = 1,
cond_induce = "cases",
cf_sigma
)
}
\arguments{
\item{batchStructure}{The structure of the study design in which cases and controls are split into batches. These
structures are output by the "distributeSample" functions (which can then be modified if specific structure is desired).}
\item{log_prior}{A named vector containing the mean frequencies of the prototype dataset's cell states (log space). The
"estimateFreqVar" function returns this a mean frequency vector in linear space (can be transformed into log space via the
"log" function).}
\item{clus}{The name of the cluster in which a fold change will be induced.}
\item{fc}{The magnitude of the fold change that will be induced in the chosen cluster. If no fold change is desired, set
fc = 1.}
\item{cond_induce}{The condition you wish to induce a fold change in. Setting cond_induce = "cases" will induce a fold
change into cases, while setting cond_induce = "ctrls" will induce a fold change into controls.}
\item{cf_sigma}{A matrix containing the covariance between cell states. This matrix is received as output from
the "estimateFreqVar" function}
}
\value{
Returns a list containing: a list of cell state frequencies for all case samples and a list
of cell state frequencies for all control samples
}
\description{
Given a batchStructure and baseline frequency distribution (in log space), this function will
generate a cell state frequency distribution for each sample. This function also allows users to induce
a designated fold change (fc) into either case samples or control samples, and control the magnitude
of covariance that cell states have with each other via cf_sigma (e.g. increase or decrease the cell state frequency
variation across samples). The magnitude of the fold change will be the ratio of case to control cells (e.g. inducing
a fold change of 2 in cases will result in there being, on average, 2 times more case cells than control cells of that
cluster
}
|
13db35ecada560ff0fa515ac64a917c8947e71d7
|
db78542ec83aa66cb8a543a94463bb99c58151e7
|
/Pairwise T Test.r
|
7d7cf1b6e2d7b32efc43042982289c02c86623fb
|
[] |
no_license
|
chunhuayu/R-Learning
|
59ee2567fb910c5124492da84603069ee5b9e2f1
|
36ede3bb562dca07029a8411e230b970e69f22e5
|
refs/heads/master
| 2020-05-09T14:59:05.094442
| 2019-06-29T03:06:52
| 2019-06-29T03:06:52
| 181,216,574
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 722
|
r
|
Pairwise T Test.r
|
> library(multcomp, pos=4)
> library(abind, pos=4)
> AnovaModel.1 <- aov(days.mgraine.5 ~ group, data=Dataset)
> summary(AnovaModel.1)
Df Sum Sq Mean Sq F value Pr(>F)
group 3 76.7 25.5776 2.6472 0.04858 *
Residuals 435 4203.0 9.6621
---
Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
37 observations deleted due to missingness
> numSummary(Dataset$days.mgraine.5 , groups=Dataset$group,
+ statistics=c("mean", "sd"))
mean sd n NA
group A 2.314815 3.911756 108 13
group B 2.027273 1.988310 110 9
group C 2.495495 3.011716 111 7
group D 3.163636 3.229813 110 8
> pairwise.t.test(days.mgraine.5, group, p.adj="bonferroni", paired=T)
|
c655d62cdfabb3c3f489aca2a380599b46a39f5f
|
267aa85b975d3348c5557505298364d6f1b5b7f4
|
/inst/shiny/server.R
|
a8ea84058d1e8b43166296a8f28387c1f1a983a1
|
[] |
no_license
|
matdoering/openPrimeRui
|
d2a6dad1038ddbe8e3541ccba6a2172271ccaf09
|
9b3f330bffff554986733cc85d2d40f7f8e2953f
|
refs/heads/master
| 2021-01-19T19:40:13.861823
| 2020-08-14T07:52:24
| 2020-08-14T07:52:24
| 101,199,926
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,270
|
r
|
server.R
|
##########
# Shiny server functionalities
##########
server <- function(input, output, session) {
##############
# define static variables for the shiny app:
source(system.file("shiny", "shiny_server", "extra_shiny_backend.R",
package = "openPrimeRui"))
#print("Require namespace test:")
#print(requireNamespace("openPrimeRui"))
#openPrimeRui:::reset.reactive.values(values = NULL)
#stop("TEST")
####################
shinyjs::hide(selector = "#light") # don't show traffic light for design difficulty when difficulty hasn't been evaluated yet.
shinyjs::hide(id = "loadingContent", anim = TRUE, animType = "fade") # after dependencies have loaded, hide the loading message
shinyjs::show("app-content") # show the true app content
############
# convention: reactiveValues (rv) should start with the prefix rv_
#############
# rv_values: other general reactive values that do not fit into existing reactive values
# relax_info: bsmodal code when filtering relaxation occurred
# last_filtering_constraints: last applied filtering constraints
rv_values <- reactiveValues(
"relax_info" = NULL,
"last_filtering_constraints" = NULL
)
###########################
# rv_cur input data:
###########################
# templates_exon: template sequence file
# templates_leader: allowed binding regions fw file
# templates_leader_rev: allowed binding regions rev file
# primers: file with primer sequences
# settings: xml file for constraint settings
rv_cur.input.data <- reactiveValues("templates_exon" = NULL,
"templates_leader" = NULL,
"templates_leader_rev" = NULL,
"primers" = NULL,
"settings" = NULL)
# load all server source files:
sources <- list.files(system.file("shiny", "shiny_server",
package = "openPrimeRui"),
pattern="server_.*.R",
full.names = TRUE)
for (s in sources) {
#message("Loading shiny server source: ", s)
source(s, local = TRUE)
}
}
|
7dfd720ca0a1ffb5169432c6fbaff7d2ea1cbd26
|
7676c2d5b77b588adde0e787a501dac27ad8efcd
|
/work/r๋ฐ์ดํฐ๋ถ์_์์ ํ์ผ/์์ /2_01.R
|
c6fe40a04808b64ad6e2a62094ce9aa7f85cd1c6
|
[] |
no_license
|
bass4th/R
|
a834ce1e455ca2c018364a48ea8ba0fbe8bf95e9
|
fb774770140a5792d9916fc3782708e09c12273d
|
refs/heads/master
| 2020-05-05T09:02:52.210737
| 2019-04-06T19:48:37
| 2019-04-06T19:48:37
| 179,888,937
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 65
|
r
|
2_01.R
|
search()
install.packages("igraph")
library(igraph)
search()
|
7402953aca7d9e91934980cfa68591202944cd10
|
cc00b5cdf7898fa1d960f09cecbced322443ecbf
|
/R/diff_days.R
|
135dde6a05e7d1977252293e4ab2ce6cc2d65abc
|
[
"MIT"
] |
permissive
|
edgararuiz-zz/maya
|
6a7392da41a61ef36574267de6718823cc7d5494
|
51129990671b06d50d62435a4f99dce9098fa918
|
refs/heads/master
| 2022-01-06T15:39:44.970647
| 2019-05-12T16:15:54
| 2019-05-12T16:15:54
| 184,825,567
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,632
|
r
|
diff_days.R
|
#' Number of days between two gregorian dates
#'
#' It determines the number of days between two gregorian dates. It works
#' independently from any R date/time function. An advantage of this function
#' is that it is accepts dates older than year 1 CE. It uses calendar and
#' not astronomical year numbering.
#'
#' @param year_1 A positive integer
#' @param month_1 A positive integer
#' @param day_1 A positive integer
#' @param bce_1 Logical variable, indicates if the date is Before Common Era
#' @param year_2 A positive integer
#' @param month_2 A positive integer
#' @param day_2 A positive integer
#' @param bce_2 Logical variable, indicates if the date is Before Common Era
#'
#' @examples
#'
#' diff_days(3114, 8, 11, TRUE, 2012, 12, 21, FALSE)
#'
#' @export
diff_days <- function(from_date, to_date) UseMethod("diff_days")
#' @export
diff_days.Date <- function(from_date, to_date) {
d1 <- as_gregorian_date(from_date)
d2 <- as_gregorian_date(to_date)
diff_days(d1, d2)
}
#' @export
diff_days.gregorian_date <- function(from_date, to_date) {
diff_days2(
from_date$year, from_date$month, from_date$day, from_date$bce,
to_date$year, to_date$month, to_date$day, to_date$bce
)
}
diff_days2 <- function(year_1, month_1, day_1, bce_1,
year_2, month_2, day_2, bce_2) {
month_days <- c(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
nd1 <- date_as_number(year_1, month_1, day_1, bce_1)
nd2 <- date_as_number(year_2, month_2, day_2, bce_2)
if(bce_1) year_1 <- -(year_1 - 1)
if(bce_2) year_2 <- -(year_2 - 1)
if(nd1 >= nd2) {
year_a <- year_2; month_a <- month_2; day_a <- day_2
year_b <- year_1; month_b <- month_1; day_b <- day_1
negative <- TRUE
} else {
year_a <- year_1; month_a <- month_1; day_a <- day_1
year_b <- year_2; month_b <- month_2; day_b <- day_2
negative <- FALSE
}
if(year_a == year_b) {
if(month_a == month_b) {
res <- day_b - day_a
} else {
md <- month_days
if(is_leap_year(year_a)) month_days[[2]] + 1
md <- md[month_a:month_b]
md[[1]] <- md[[1]] - day_a
md[length(md)] <- day_b
res <- sum(md)
}
} else {
yrs <- as.integer(lapply(year_a:year_b, is_leap_year))
yrs <- 365 + yrs
md <- month_days
md[[1]] <- md[[1]] + is_leap_year(year_a)
md <- md[month_a:12]
md[[1]] <- md[[1]] - day_a
yrs[[1]] <- sum(md)
md <- month_days
md[[2]] <- md[[2]] + is_leap_year(year_b)
md <- md[1:month_b]
md[[length(md)]] <- day_b
yrs[[length(yrs)]] <- sum(md)
res <- sum(yrs)
}
if(negative) res <- -(res)
res
}
|
91d9a857c8631c68c10ea239b9f181ce5eb223b6
|
e5ff475e7fec1c7cccebbd3eff284b8ba1f9ba53
|
/man/nessy_examples.Rd
|
6d0a023c225a62aaf03a5a6d82f84fc3097b276d
|
[
"MIT"
] |
permissive
|
rpodcast/nessy
|
d4230125ed3f0fc460c6a78eaf7f6bf02cc794e7
|
8c2dd9e0cfe1f1ab29172d6c3e162b515ac60e1d
|
refs/heads/master
| 2020-04-24T21:50:56.820584
| 2019-02-14T10:10:49
| 2019-02-14T10:10:49
| 172,291,748
| 0
| 0
|
NOASSERTION
| 2019-02-24T03:33:58
| 2019-02-24T03:33:57
| null |
UTF-8
|
R
| false
| true
| 393
|
rd
|
nessy_examples.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/examples.R
\name{nessy_examples}
\alias{nessy_examples}
\title{Get a NES example}
\usage{
nessy_examples(which = NULL)
}
\arguments{
\item{which}{The example to run. If empty, all the available examples are listed.}
}
\value{
A path to the example.
}
\description{
Get a NES example
}
\examples{
nessy_examples()
}
|
53b50e9a5da9bcda0227694892e65913c57920e7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/untb/examples/volkov.Rd.R
|
41e921d8a7b82389b89c069aa997dbca874f434c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 521
|
r
|
volkov.Rd.R
|
library(untb)
### Name: volkov
### Title: Expected frequency of species
### Aliases: volkov
### Keywords: math
### ** Examples
## Not run:
##D volkov(J=21457,c(theta=47.226, m=0.1)) # Example in figure 1
## End(Not run)
volkov(J=20,params=c(theta=1,m=0.4))
data(butterflies)
r <- plot(preston(butterflies,n=9,orig=TRUE))
## Not run: jj <- optimal.params(butterflies) # needs PARI/GP
jj <- c(9.99980936124759, 0.991791987473506)
points(r,volkov(no.of.ind(butterflies), jj, bins=TRUE),type="b")
|
ff8500d0dd28ce8eae67523316740e491697b4ab
|
c2a6015d964e0a004fa4ac9c59df8aed039cc4fc
|
/man/knitAndSave.Rd
|
6451628de8365c9fcbad4ae50637fc4e7f8ae8b0
|
[] |
no_license
|
cran/ufs
|
27083e54b6e4c89f802c4de9218dbbd7c7d4260d
|
74bcfb60160bced552d79d301b739bb965d1a156
|
refs/heads/master
| 2023-06-23T09:48:11.331297
| 2023-06-09T15:30:03
| 2023-06-09T15:30:03
| 145,907,951
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,686
|
rd
|
knitAndSave.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/knitAndSave.R
\name{knitAndSave}
\alias{knitAndSave}
\title{knitAndSave}
\usage{
knitAndSave(
plotToDraw,
figCaption,
file = NULL,
path = NULL,
figWidth = ufs::opts$get("ggSaveFigWidth"),
figHeight = ufs::opts$get("ggSaveFigHeight"),
units = ufs::opts$get("ggSaveUnits"),
dpi = ufs::opts$get("ggSaveDPI"),
catPlot = ufs::opts$get("knitAndSave.catPlot"),
...
)
}
\arguments{
\item{plotToDraw}{The plot to knit using \code{\link[=knitFig]{knitFig()}} and save using \code{\link[=ggSave]{ggSave()}}.}
\item{figCaption}{The caption of the plot (used as filename if no filename is specified).}
\item{file, path}{The filename to use when saving the plot, or the path where to save the
file if no filename is provided (if \code{path} is also omitted, \code{getWd()} is used).}
\item{figWidth, figHeight}{The plot dimensions, by default specified in inches (but 'units' can
be set which is then passed on to \code{\link[=ggSave]{ggSave()}}.}
\item{units, dpi}{The units and DPI of the image which are then passed on to \code{\link[=ggSave]{ggSave()}}.}
\item{catPlot}{Whether to use \code{\link[=cat]{cat()}} to print the knitr fragment.}
\item{...}{Additional arguments are passed on to \code{\link[=ggSave]{ggSave()}}. Note that file (and ...) are
vectorized (see the \code{\link[=ggSave]{ggSave()}} manual page).}
}
\value{
The \code{\link[=knitFig]{knitFig()}} result, visibly.
}
\description{
knitAndSave
}
\examples{
\dontrun{plot <- ggBoxplot(mtcars, 'mpg');
knitAndSave(plot, figCaption="a boxplot", file=tempfile(fileext=".png"));}
}
|
558d71085f9678ca8c434a7828ceaaa268f5366b
|
46ebd01f819e499506c5f16f44ea96556225172f
|
/Getting and Cleaning Data/Week 2/quiz.R
|
b3e0ae3af956f61fe72e1f205f958db77bfe3f5e
|
[] |
no_license
|
peterchiappini/datasciencecoursera
|
8ea7b0896fabde95e78f86cc04de13fd18193b3b
|
9e40202af1bb177a88d7bc4152c02b3a26f76ae0
|
refs/heads/master
| 2023-06-15T01:45:18.294878
| 2021-07-16T22:46:51
| 2021-07-16T22:46:51
| 295,849,808
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,099
|
r
|
quiz.R
|
library(httr)
library(sqldf)
# PROBLEM 1
oauth_endpoints("github")
myapp <- oauth_app("github",
key = "e60d1c464f658054d81a",
secret = "5fde2ac1529b22d3cf1d4b0b0a38005478a9ff3d"
)
# Get OAuth credentials
github_token <- oauth2.0_token(oauth_endpoints("github"), myapp)
gtoken <- config(token = github_token)
# Use API
req <- with_config(gtoken, GET("https://api.github.com/users/jtleek/repos"))
con_request <- content(req)
find_create <- function(x,myurl) {
if (x$html_url == myurl) {
print(x$created_at)
}
}
lapply(con_request, find_create, myurl ="https://github.com/jtleek/datasharing")
# Problem 2
acs <- read.csv("getdata_data_ss06pid.csv")
head(sqldf("select pwgtp1 from acs where AGEP < 50"))
# Problem 3
sqldf("select distinct AGEP from acs")
# Problem 4
con = url("http://biostat.jhsph.edu/~jleek/contact.html")
htmlCode = readLines(con)
close(con)
nchar(htmlCode[100])
# Problem 5
data <- read.fwf("getdata_wksst8110.for", skip=4,
widths=c(12, 7, 4, 9, 4, 9, 4, 9, 4))
sum(data[,4])
|
6582dba5e2a9ae44dc25039011dd9ba71c93838b
|
52d489c2491476428a9a0cd11b200c63be4794eb
|
/man/gen.arch.wge.Rd
|
97f56f9f21146e92d006f518df1f2f50053c9865
|
[] |
no_license
|
cran/tswge
|
2ffabc4794652937b86a701ec4772b2e07697531
|
435566d44f7652da48e9d257040fc78b47a08101
|
refs/heads/master
| 2023-04-01T22:52:04.649970
| 2023-01-31T12:10:02
| 2023-01-31T12:10:02
| 236,955,358
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,042
|
rd
|
gen.arch.wge.Rd
|
\name{gen.arch.wge}
\alias{gen.arch.wge}
\title{Generate a realization from an ARCH(q0) model}
\description{Generates a realization of length n from the GARCH(q0) model (4.23) in "Applied Time Series Analysis with R, 2nd edition" by Woodward, Gray, and Elliott}
\usage{
gen.arch.wge(n, alpha0, alpha, plot = TRUE,sn=0)
}
\arguments{
\item{n}{Length of realization to be generated}
\item{alpha0}{The constant alpha0 in model (4.23)}
\item{alpha}{A vector of length q0 containing alpha1 through alphaq0}
\item{plot}{If plot=TRUE (default) the generated realization is plotted}
\item{sn}{determines the seed used in the simulation. sn=0 produces new/random realization each time. sn=positive integer produces same realization each time}
}
\value{returns the generated realization}
\references{"Applied Time Series Analysis with R, 2nd edition" by Woodward, Gray, and Elliott}
\author{Wayne Woodward}
\examples{gen.arch.wge(n=200,alpha0=.1,alpha=c(.36,.27,.18,.09))}
\keyword{ ARCH }
\keyword{ Conditional variance}
|
ce5e8a2b163db96e6ede8014d1b5691bbe2c2f29
|
08da636974bcd1dfd35c6265a0687c32c609571e
|
/R_coco/crawling_basic2.R
|
10d2edde000c554d7f5800f3e3b11cf7682e73e0
|
[] |
no_license
|
edgestory/DS
|
0698ac4d4fdc3958289b052aac40917efe35052e
|
5d3c3ad8b0f4b24a73ac75b776448973ad3b149f
|
refs/heads/master
| 2023-01-13T07:37:38.604708
| 2020-11-17T09:33:06
| 2020-11-17T09:33:06
| 33,327,820
| 0
| 0
| null | 2020-06-05T01:41:47
| 2015-04-02T19:31:16
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 563
|
r
|
crawling_basic2.R
|
library(stringr)
getwd()
data <- read.csv("final_data.csv")
head(data)
url_list <- data[,3]
length(url_list)
content <- c()
for ( i in 1:length(url_list)){
## try_error
if(class(try(b<-readLines(as.character(url_list[i]), encoding = 'UTF-8'))) == "try-error"){
b6 <- ""
content <- c(content,b6)
# next;
}else{
b2<-b[which(str_detect(b,"post_content")):which(str_detect(b,"post_ccls"))]
b3<-paste(b2, collapse = "")
b4 <- gsub("<.*?>","",b3)
b5 <- gsub("\t| ","",b4)
b6 <- str_trim(b5)
content <- c(content)
cat("\n",i)
}
}
|
d2bfd51b27ab75cfa8483beab7a3e994c7ccd4f4
|
5d690f159266b2c0f163e26fcfb9f9e17a0dc541
|
/inlabru/R/bru.integration.R
|
1808f6eba1f861f27e20a38e5488e45615f13ab3
|
[] |
no_license
|
albrizre/spatstat.revdep
|
3a83ab87085895712d7109c813dcc8acb55493e9
|
b6fc1e73985b0b7ed57d21cbebb9ca4627183108
|
refs/heads/main
| 2023-03-05T14:47:16.628700
| 2021-02-20T01:05:54
| 2021-02-20T01:05:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,121
|
r
|
bru.integration.R
|
#' @title Generate integration points
#'
#' @description
#' This function generates points in one or two dimensions with a weight attached to each point.
#' The weighted sum of a function evaluated at these points is the integral of that function approximated
#' by linear basis functions. The parameter \code{region} describes the area(s) integrated over.
#'
#' In case of a single dimension \code{region} is supposed to be a two-column \code{matrix} where
#' each row describes the start and end point of the interval to integrate over. In the two-dimensional
#' case \code{region} can be either a \code{SpatialPolygon}, an \code{inla.mesh} or a
#' \code{SpatialLinesDataFrame} describing the area to integrate over. If a \code{SpatialLineDataFrame}
#' is provided it has to have a column called 'weight' in order to indicate the width of the line.
#'
#' The domain parameter is an \code{inla.mesh.1d} or \code{inla.mesh} object that can be employed to
#' project the integration points to the vertices of the mesh. This reduces the final number of
#' integration points and reduces the computational cost of the integration. The projection can also
#' prevent numerical issues in spatial LGCP models where each observed point is ideally surrounded
#' by three integration point sitting at the coresponding mesh vertices. For convenience, the
#' \code{domain} parameter can also be a single integer setting the number of equally spaced integration
#' points in the one-dimensional case.
#'
#' @aliases ipoints
#' @export
#'
#' @author Fabian E. Bachl <\email{bachlfab@@gmail.com}>
#'
#' @param region Description of the integration region boundary.
#' In 1D either a vector of two numerics or a two-column matrix where each row describes and interval.
#' In 2D either a \code{SpatialPolygon} or a \code{SpatialLinesDataFrame} with a weight column defining the width of the line.
#' @param domain In 1D a single numeric setting the numer of integration points or an \code{inla.mesh.1d}
#' defining the locations to project the integration points to. In 2D \code{domain} has to be an
#' \code{inla.mesh} object describing the projection and granularity of the integration.
#' @param name Character array stating the name of the domains dimension(s)
#' @param group Column names of the \code{region} object (if applicable) for which the integration points are calculated independently and not merged by the projection.
#' @param project If TRUE, project the integration points to mesh vertices
#'
#' @return A \code{data.frame} or \code{SpatialPointsDataFrame} of 1D and 2D integration points, respectively.
#'
#' @examples
#' \donttest{
#' if (require("INLA", quietly = TRUE)) {
#'
#' # Create 50 integration points covering the dimension 'myDim' between 0 and 10.
#'
#' ips = ipoints(c(0,10), 50, name = "myDim")
#' plot(ips)
#'
#' # Create integration points for the two intervals [0,3] and [5,10]
#'
#' ips = ipoints(matrix(c(0,3, 5,10), nrow = 2, byrow = TRUE), 50)
#' plot(ips)
#'
#' # Convert a 1D mesh into integration points
#' mesh = inla.mesh.1d(seq(0,10,by = 1))
#' ips = ipoints(mesh, name = "time")
#' plot(ips)
#'
#'
#' # Obtain 2D integration points from a SpatialPolygon
#'
#' data(gorillas, package = "inlabru")
#' ips = ipoints(gorillas$boundary)
#' ggplot() + gg(gorillas$boundary) + gg(ips, aes(size = weight))
#'
#'
#' #' Project integration points to mesh vertices
#'
#' ips = ipoints(gorillas$boundary, domain = gorillas$mesh)
#' ggplot() + gg(gorillas$mesh) + gg(gorillas$boundary) + gg(ips, aes(size = weight))
#'
#'
#' # Turn a 2D mesh into integration points
#'
#' ips = ipoints(gorillas$mesh)
#' ggplot() + gg(gorillas$boundary) + gg(ips, aes(size = weight))
#' }
#' }
ipoints = function(region = NULL, domain = NULL, name = "x", group = NULL, project) {
pregroup = NULL
# If region is null treat domain as the region definition
if ( is.null(region) ) {
if ( is.null(domain) ) { stop("regio and domain can not be NULL at the same time.") }
else { region = domain ; domain = NULL }
}
if ( is.data.frame(region) ) {
if (!("weight" %in% names(region))) { region$weight = 1 }
ips = region
}
else if (is.integer(region)){
ips = data.frame(weight = rep(1,length(region)))
ips[name] = region
}
else if (is.numeric(region)) {
if ( is.null(dim(region)) ){ region = matrix(region, nrow = 1) }
if ( ncol(region) == 1) {
ips = data.frame(x = region[,1], weight = 1)
colnames(ips) = c(name, "weight")
} else {
ips = list()
for (j in 1:nrow(region) ) {
subregion = region[j,]
# If domain is NULL set domain to a 1D mesh with 30 equally spaced vertices and boundary according to region
# If domain is a single numeric set domain to a 1D mesh with n=domain vertices and boundary according to region
if ( is.null(domain) ) { subdomain = INLA::inla.mesh.1d(seq(min(subregion), max(subregion), length.out = 30)) }
else if ( is.numeric(domain)) { subdomain = INLA::inla.mesh.1d(seq(min(subregion), max(subregion), length.out = domain)) }
else { subdomain = stop("1D weight projection not yet implemented") }
fem = INLA::inla.mesh.1d.fem(subdomain)
ips[[j]] = data.frame(weight = Matrix::diag(fem$c0))
ips[[j]][name] = subdomain$loc
ips[[j]] = ips[[j]][,c(2,1)] # make weights second column
}
ips = do.call(rbind, ips)
}
} else if ( inherits(region, "inla.mesh") ){
# If domain is provided: break
if ( !is.null(domain) ) stop("Integration region provided as 2D and domain is not NULL.")
# transform to equal area projection
if ( !is.null(region$crs) && !(is.na(region$crs@projargs))) {
crs = region$crs
region = stransform(region, crs = CRS("+proj=cea +units=km"))
}
ips = vertices(region)
ips$weight = INLA::inla.mesh.fem(region, order = 1)$va
# backtransform
if ( !is.null(region$crs) && !(is.na(region$crs@projargs))) { ips = stransform(ips, crs = crs) }
} else if ( inherits(region, "inla.mesh.1d") ){
ips = data.frame(x = region$loc)
colnames(ips) = name
ips$weight = Matrix::diag(INLA::inla.mesh.fem(region)$c0)
} else if ( class(region) == "SpatialPoints" ){
ips = region
ips$weight = 1
} else if ( class(region) == "SpatialPointsDataFrame" ){
if (!("weight" %in% names(region))) {
warning("The integration points provided have no weight column. Setting weights to 1.")
region$weight = 1
}
ips = region
} else if ( inherits(region, "SpatialLines") || inherits(region, "SpatialLinesDataFrame") ){
# If SpatialLines are provided convert into SpatialLinesDataFrame and attach weight = 1
if ( class(region)[1] == "SpatialLines" ) {
region = SpatialLinesDataFrame(region, data = data.frame(weight = rep(1, length(region))))
}
# Set weights to 1 if not provided
if (!("weight" %in% names(region))) {
warning("The integration points provided have no weight column. Setting weights to 1.")
region$weight = 1
}
ips = int.slines(region, domain, group = group)
} else if (inherits(region,"SpatialPolygons")){
# If SpatialPolygons are provided convert into SpatialPolygonsDataFrame and attach weight = 1
if ( class(region)[1] == "SpatialPolygons" ) {
region = SpatialPolygonsDataFrame(region,
data = data.frame(weight = rep(1, length(region))),
match.ID = FALSE)
}
cnames = coordnames(region)
p4s = proj4string(region)
# Convert region and domain to equal area CRS
if ( !is.null(domain$crs) && !is.na(domain$crs@projargs)){
region = stransform(region, crs = CRS("+proj=cea +units=km"))
}
polyloc = do.call(rbind, lapply(1:length(region),
function(k) cbind(
x = rev(coordinates(region@polygons[[k]]@Polygons[[1]])[,1]),
y = rev(coordinates(region@polygons[[k]]@Polygons[[1]])[,2]),
group = k)))
# If domain is NULL, make a mesh with the polygons as boundary
if ( is.null(domain) ) {
max.edge = max(diff(range(polyloc[,1])), diff(range(polyloc[,2])))/20
domain = INLA::inla.mesh.2d(boundary = region, max.edge = max.edge)
domain$crs = CRS(proj4string(region))
} else {
if ( !is.null(domain$crs) && !is.na(domain$crs@projargs))
domain = stransform(domain, crs = CRS("+proj=cea +units=km"))
}
ips = int.polygon(domain, loc = polyloc[,1:2], group = polyloc[,3])
df = data.frame(region@data[ips$group, pregroup, drop = FALSE],
weight = ips[,"weight"])
ips = SpatialPointsDataFrame(ips[,c("x","y")], data = df, match.ID = FALSE)
proj4string(ips) = proj4string(region)
if ( !is.na(p4s) ) {
ips = stransform(ips, crs = CRS(p4s))
}
}
ips
}
#' @title Cross product of integration points
#'
#' @description
#' Calculates the cross product of integration points in different dimensions
#' and multiplies their weights accordingly. If the object defining points in a particular
#' dimension has no weights attached to it all weights are assumend to be 1.
#'
#' @aliases cprod
#' @export
#'
#' @author Fabian E. Bachl <\email{bachlfab@@gmail.com}>
#'
#' @param ... \code{data.frame} or \code{SpatialPointsDataFrame} objects, each one usually obtained by a call to the \link{ipoints} function.
#' @return A \code{data.frame} or \code{SpatialPointsDataFrame} of multidimensional integration points and their weights
#'
#' @examples
#' \donttest{
#' # ipoints needs INLA
#' if (require("INLA", quietly = TRUE)) {
#' # Create integration points in dimension 'myDim' and 'myDiscreteDim'
#' ips1 = ipoints(c(0,8), name = "myDim")
#' ips2 = ipoints(as.integer(c(1,2,3)), name = "myDiscreteDim")
#'
#' # Calculate the cross product
#' ips = cprod(ips1, ips2)
#'
#' # Plot the integration points
#' plot(ips$myDim, ips$myDiscreteDim, cex = 10*ips$weight)
#' }
#' }
cprod = function(...) {
ipl = list(...)
ipl = ipl[!vapply(ipl, is.null, TRUE)]
if ( length(ipl) == 0 ) return(NULL)
if ( length(ipl) == 1 ) {
ips = ipl[[1]]
} else {
ips1 = ipl[[1]]
ips2 = do.call(cprod, ipl[2:length(ipl)])
if (! "weight" %in% names(ips1) ) { ips1$weight = 1 }
if (! "weight" %in% names(ips2) ) { ips2$weight = 1 }
loc1 = ips1[,setdiff(names(ipl[[1]]),"weight"), drop = FALSE]
w1 = data.frame(weight = ips1$weight)
loc2 = ips2[,setdiff(names(ips2),"weight"), drop = FALSE]
w2 = data.frame(weight2 = ips2[,"weight"])
# Merge the locations. In case of Spatial objects we need to use the sp:merge
# function. Unfortunately sp::merge replicates entries in a different order than
# base merge so we need to reverse the order of merging the weights
if ( inherits(loc1, "Spatial") ) {
ips = sp::merge(loc1, loc2, duplicateGeoms = TRUE)
weight = merge(w2, w1)
} else if ( inherits(loc2, "Spatial") ){
ips = sp::merge(loc2, loc1, duplicateGeoms = TRUE)
weight = merge(w2, w1)
} else {
ips = merge(loc1, loc2)
weight = merge(w1, w2)
}
ips$weight = weight$weight * weight$weight2
}
ips
}
# Integration points for log Gaussian Cox process models using INLA
#
# prerequisits:
#
# - List of integration dimension names, extend and quadrature
# - Samplers: These may live in a subset of the dimensions, usually space and time
# ("Where and wehen did a have a look at the point process")
# - Actually this is a simplified view. Samplers should have start and end time !
#
# Procedure:
# - Select integration strategy by type of samplers:
# 1) SpatialPointsDataFrame: Assume these are already integration points
# 2) SpatialLinesDataFrame: Use simplified integration along line with (width provided by samplers)
# 3) SpatialPolygonDataFrame: Use full integration over polygons
#
# - Create integration points from samplers. Do NOT perform simplification projection here!
# - Simplify integration points.
# 1) Group by non-mesh dimensions, e.g. time, weather
# 2) For each group simplify with respect to mesh-dimensions, e.g. space
# 3) Merge
#
# Dependencies (iDistance):
# int.points(), int.polygon(), int.1d(), int.expand(), recurse.rbind()
#
# @aliases ipoints
# @export
# @param samplers A Spatial[Points/Lines/Polygons]DataFrame objects
# @param points A SpatialPoints[DataFrame] object
# @param config An integration configuration. See \link{iconfig}
# @return Integration points
ipmaker = function(samplers, domain, dnames, model = NULL, data = NULL) {
# Fill missing domain definitions using meshes from effects where map equals the domain name
meshes = list()
for (e in effect(model)) {meshes[[paste0(as.character(e$map), collapse ="")]] = e$mesh}
for ( nm in dnames) {
if ( is.null(domain[[nm]]) ) { domain[[nm]] = meshes[[nm]] }
}
# Fill missing domain definitions with data ranges
for ( nm in dnames) {
if ( !(nm %in% names(domain)) & !is.null(data) & !(nm %in% names(samplers))){
if ( nm == "coordinates" ) {
domain[["coordinates"]] = INLA::inla.mesh.2d(loc.domain = coordinates(data), max.edge = diff(range(coordinates(data)[,1]))/10)
domain[["coordinates"]]$crs = INLA::inla.CRS(proj4string(data))
} else {
domain[[nm]] = range(data[[nm]])
}
}
}
if ( "coordinates" %in% dnames ) { spatial = TRUE } else { spatial = FALSE }
# Dimensions provided via samplers (except "coordinates")
samp.dim = intersect(names(samplers), dnames)
# Dimensions provided via domain but not via samplers
nosamp.dim = setdiff(names(domain), c(samp.dim, "coordinates"))
# Check if a domain definition is missing
missing.dims = setdiff(dnames, c(names(domain), samp.dim))
if ( length(missing.dims > 0) ) stop(paste0("Domain definitions missing for dimensions: ", paste0(missing.dims, collapse = ", ")))
if ( spatial ) {
ips = ipoints(samplers, domain$coordinates, project = TRUE, group = samp.dim)
} else {
ips = NULL
}
lips = lapply(nosamp.dim, function(nm) ipoints(NULL, domain[[nm]], name = nm))
ips = do.call(cprod, c(list(ips), lips))
}
# Project data to mesh vertices under the assumption of lineariity
#
#
# @aliases vertex.projection
# @export
# @param points A SpatialPointsDataFrame object
# @param mesh An inla.mesh object
# @param columns A character array of the points columns which whall be projected
# @param group Character array identifying columns in \code{points}. These coloumns are interpreted as factors and the projection is performed independently for eah combination of factor levels.
# @return SpatialPointsDataFrame of mesh vertices with projected data attached
vertex.projection = function(points, mesh, columns = names(points), group = NULL, fill = NULL){
if ( is.null(group) | (length(group) == 0) ) {
res = INLA::inla.fmesher.smorg(mesh$loc, mesh$graph$tv, points2mesh = coordinates(points))
tri = res$p2m.t
data = list()
for (k in 1:length(columns)){
cn = columns[k]
nw = points@data[,columns] * res$p2m.b
w.by = by(as.vector(nw), as.vector(mesh$graph$tv[tri,]), sum, simplify = TRUE)
data[[cn]] = as.vector(w.by)
}
data = data.frame(data)
coords = mesh$loc[as.numeric(names(w.by)),c(1,2)]
data$vertex = as.numeric(names(w.by))
ret = SpatialPointsDataFrame(coords,
proj4string = CRS(proj4string(points)),
data = data,
match.ID = FALSE)
coordnames(ret) = coordnames(points)
# If null is not not NULL, add vertices to which no data was projected
# and set their projected data according to `fill`
if ( !is.null(fill) ) {
vrt = vertices(mesh)
vrt = vrt[setdiff(vrt$vertex, data$vertex),]
if ( nrow(vrt) > 0 ){
for (nm in setdiff(names(data), "vertex")) vrt[[nm]] = fill
ret = rbind(ret, vrt)
}
ret = ret[match(1:mesh$n, ret$vertex),]
}
} else {
fn = function(X) {
coordinates(X) = coordnames(points)
ret = vertex.projection(X, mesh, columns = columns)
for (g in group) { ret[[g]] = X[[g]][1] }
ret
}
idx = as.list(data.frame(points)[,group,drop=FALSE])
ret = by(points, idx, fn)
ret = do.call(rbind, ret)
proj4string(ret) = proj4string(points)
}
ret
}
# Project data to mesh vertices under the assumption of lineariity
#
#
# @aliases vertex.projection
# @export
# @param points A SpatialPointsDataFrame object
# @param mesh An inla.mesh object
# @param columns A character array of the points columns which whall be projected
# @param group Character array identifying columns in \code{points}. These coloumns are interpreted as factors and the projection is performed independently for eah combination of factor levels.
# @return SpatialPointsDataFrame of mesh vertices with projected data attached
# @example
#
# pts = data.frame(x = 50 * runif(10), weight = abs(rnorm(100)))
# msh = inla.mesh.1d(seq(0,50,by=1))
# pts$year = c(rep(1,5), rep(2,5))
# ip = vertex.projection.1d(pts, msh)
# ggplot(ip) + geom_point(aes(x=x, y=weight))
#
# ip = vertex.projection.1d(pts, msh, group = "year", fill = 0, column = "weight")
# head(ip)
# ggplot(ip) + geom_point(aes(x=x, y=weight, color = year))
vertex.projection.1d = function(points, mesh, group = NULL, column = "weight", simplify = TRUE, fill = NULL) {
dname = setdiff(names(points),c(column, group))
if ( length(dname)>1 ) { dname = dname[1] }
xx = points[, dname]
ww = points[, column]
iv = findInterval(xx, mesh$loc)
# Left and right vertex location
left = mesh$loc[iv]
right = mesh$loc[iv+1]
# Relative location within the two neighboring vertices
w.right = (xx-left)/(right-left)
w.left = 1 - w.right
# Projected integration points
ips = rbind(data.frame(x = left, vertex = iv),
data.frame(x = right, vertex = iv+1))
ips[column] = c(ww * w.left, ww * w.right)
# Simplify
if ( simplify ) {
bygroup = list(vertex = ips$vertex)
if ( !is.null(group) ) { bygroup = c(bygroup, as.list(rbind(points[,group,drop=FALSE], points[,group,drop=FALSE]))) }
ips = aggregate(ips[,column, drop = FALSE], by = bygroup, FUN = sum)
}
# Add x-coordinate
ips[dname] = mesh$loc[ips$vertex]
# Fill
if ( !is.null(fill) ) {
miss = setdiff(1:length(mesh$loc), ips$vertex)
mips = data.frame(vertex = miss, x = mesh$loc[miss])
mips[,column] = fill
ips = rbind(ips, merge(mips, ips[,group, drop=FALSE]))
}
ips
}
#' Weighted summation (integration) of data frame subsets
#'
#' A typical task in statistical inference to integrate a (multivariate) function along one or
#' more dimensions of its domain. For this purpose, the function is evaluated at some points
#' in the domain and the values are summed up using weights that depend on the area being
#' integrated over. This function performs the weighting and summation conditional for each level
#' of the dimensions that are not integrated over. The parameter \code{dims} states the the
#' dimensions to integrate over. The set of dimensions that are held fixed is the set difference
#' of all column names in \code{data} and the dimensions stated by \code{dims}.
#'
#' @aliases int
#' @export
#' @param data A \code{data.frame} or \code{Spatial} object. Has to have a \code{weight} column with numeric values.
#' @param values Numerical values to be summed up, usually the result of function evaluations.
#' @param dims Column names (dimension names) of the \code{data} object to integrate over.
#' @return A \code{data.frame} of integrals, one for each level of the cross product of all dimensions not being integrated over.
#'
#' @examples
#' \donttest{
#' # ipoints needs INLA
#' if (require("INLA", quietly = TRUE)) {
#' # Create integration points in two dimensions, x and y
#'
#' ips = cprod(ipoints(c(0,10), 10, name = "x"),
#' ipoints(c(1,5), 10, name = "y"))
#'
#' # The sizes of the domains are 10 and 4 for x and y, respectively.
#' # Integrating f(x,y) = 1 along x and y should result in the total
#' # domain size 40
#'
#' int(ips, rep(1, nrow(ips)), c("x","y"))
#' }
#' }
int = function(data, values, dims = NULL) {
keep = setdiff(names(data), c(dims, "weight"))
if (length(keep) > 0 & !is.null(dims)) {
agg = aggregate(values * data$weight, by = as.list(data[,keep,drop=FALSE]), FUN = sum)
names(agg)[ncol(agg)] = "integral" # paste0("integral_{",dims,"}(",deparse(values),")")
} else {
agg = sum(data$weight * values)
}
agg
}
|
1e8d3b75880fe43de8d9672687018f39169323e6
|
117936196834fbda370de297d6f5a77846bf45e9
|
/old/testingHMSCv2/functions/data_wrangling_fx.R
|
343931fb58d203db351a25e42c48732325300814
|
[] |
no_license
|
javirudolph/testingHMSC
|
a79dc2ffcdec967ed45d23e46151044d1365ab51
|
61c3e1b035b8095c45755833d2ab0ebc1179a6fb
|
refs/heads/master
| 2021-06-16T04:27:22.878177
| 2021-03-11T18:46:51
| 2021-03-11T18:46:51
| 170,368,566
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,762
|
r
|
data_wrangling_fx.R
|
# Functions to modify and organize the dataframes from the VP output and get ready for plotting
doItAll_dataWrangling <- function(outPath, scenarioNum, indSites = FALSE){
if(indSites == TRUE){
richness <- readRDS(paste0(outPath, scenarioNum, "-metacomSim.RDS")) %>%
set_names(imap(., ~ paste0("iter", .y))) %>%
map(., rowSums) %>%
bind_rows() %>%
rownames_to_column(var = "sites") %>%
gather(., key = "iteration", value = "richness", -sites) %>%
mutate(identifier = paste0("site", sites, "_", iteration)) %>%
select(., -c(sites, iteration))
vp <- readRDS(paste0(outPath, scenarioNum, "-vpsites.RDS"))
overlap1 <- map(vp, "overlap1")
overlap2 <- map(vp, "overlap2")
overlap3 <- map(vp, "overlap3")
vpALL <- vector("list", length = 5)
for(i in 1:5){
workingVP1 <- overlap1[[i]]
workingVP2 <- overlap2[[i]]
workingVP3 <- overlap3[[i]]
c <- rowSums(workingVP1[,,1])/15
b <- rowSums(workingVP1[,,2])/15
a <- rowSums(workingVP1[,,3])/15
e <- rowSums(workingVP2[,,1])/15
f <- rowSums(workingVP2[,,2])/15
d <- rowSums(workingVP2[,,3])/15
g <- rowSums(workingVP3)/15
env <- a + f + 1/2 * d + 1/2 * g
env <- ifelse(env < 0, 0, env)
spa <- b + e + 1/2 * d + 1/2 * g
spa <- ifelse(spa < 0, 0, spa)
random <- c
codist <- ifelse(random < 0, 0, random)
r2 <- env + spa + codist
iteration <- factor(paste0("iter", i), levels = paste0("iter", 1:5))
cleanData <- cbind.data.frame(env, spa, codist, r2, iteration)
cleanData$site <- paste0(row.names(cleanData))
vpALL[[i]] <- cleanData
}
vpALL %>%
bind_rows() %>%
mutate(identifier = paste(site, iteration, sep = "_"),
scenario = scenarioNum) %>%
left_join(., richness) -> vpALL
return(vpALL)
}
params <- with(readRDS(paste0(outPath, scenarioNum, "-params.RDS")), {
enframe(u_c[1,], name = "species",value = "nicheOpt") %>%
left_join(., enframe(s_c[1,], name = "species", value = "nicheBreadth")) %>%
left_join(., enframe(c_0, name = "species", value = "colProb")) %>%
mutate(dispersal = alpha,
species = as.character(species),
intercol = d_c,
interext = d_e)
})
prevalence <- readRDS(paste0(outPath, scenarioNum, "-metacomSim.RDS")) %>%
set_names(imap(., ~ paste0("iter_", .y))) %>%
map(., colSums) %>%
bind_cols() %>%
rownames_to_column(var = "species") %>%
gather(., key = "iteration", value = "prevalence", -species) %>%
mutate(identifier = paste0("spp", species, "_", iteration)) %>%
select(., -c(species, iteration))
readRDS(paste0(outPath, scenarioNum, "-vpspp.RDS")) %>%
set_names(imap(., ~ paste0("iter_", .y))) -> VPdata
fullData <- list()
for(i in 1:length(VPdata)){
fullData[[i]] <- VPdata[[i]] %>%
map(as_tibble) %>%
bind_cols() %>%
rownames_to_column() %>%
set_names(c("species", "c", "b", "a", "e", "f", "d", "g")) %>%
transmute(species = species,
env = a + f + 0.5 * d + 0.5 * g,
env = ifelse(env < 0, 0, env),
spa = b + e + 0.5 * d + 0.5 * g,
spa = ifelse(spa < 0, 0, spa),
codist = c,
codist = ifelse(codist < 0, 0, codist),
r2 = env + spa + codist,
iteration = names(VPdata[i])) %>%
left_join(., params)
}
fullData %>%
bind_rows() %>%
mutate(identifier = paste0("spp", species, "_", iteration),
scenario = scenarioNum) %>%
left_join(., prevalence) -> fullData
return(fullData)
}
# csv and figures ---------------------------------------------------------
save_csv_and_plots <- function(scenario){
sppcsv <- doItAll_dataWrangling(outPath = folderpath, scenarioNum = scenario, indSites = FALSE)
write.csv(sppcsv, file = paste0(folderpath, "csvFiles/", scenario, "spp.csv"))
sppcsv %>%
make_tern_plot(., varShape = "iteration", varColor = "nicheOpt") +
labs(title = scenario)
ggsave(filename = paste0(folderpath, "figures/", scenario, "spp.png"), dpi = 300, width = 9, height = 4.5)
sitescsv <- doItAll_dataWrangling(outPath = folderpath, scenarioNum = scenario, indSites = TRUE)
write.csv(sitescsv, file = paste0(folderpath, "csvFiles/", scenario, "sites.csv"))
make_tern_plot(sitescsv, varShape = "iteration", varColor = "richness") +
labs(title=scenario)
ggsave(filename = paste0(folderpath, "figures/", scenario, "sites.png"), dpi = 300, width = 9, height = 4.5)
}
|
d1f8a2a6fe3926e788ca2176355b630d712e0d02
|
3e9f4fef38f2fbcee67db40525d2cf343d5dd264
|
/R/Basics/02_check_data.R
|
0ff1900287eb9b5b61abfbfe5071e1f1866dc35c
|
[] |
no_license
|
LS-2ednar/statistics_cheatsheat
|
727a9b2b376639ea3b03d4f17797db763cb84e57
|
e525d33fe79c41c434c97665a22c8ee26aab11b3
|
refs/heads/main
| 2023-01-13T05:44:07.687749
| 2020-11-23T16:22:18
| 2020-11-23T16:22:18
| 315,372,424
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,200
|
r
|
02_check_data.R
|
# checking your data is crucial to do good analysis! Here some methods are shown
# install the mice packages to check your dataset for NA's and use md.pattern to
# find missing datapoints. IF you get all 1 and 0 below you are golden
install.packages('mice')
library(mice)
md.pattern(data)
# use str() to figure out if some parts of your data should be a factor or not
# then use as.factor() to change the values to a factor
data = morley
str(data)
data$Expt = as.factor(data$Expt)
str(data)
# depending on the data typ you can now choose to inspect the data further with
# for example a boxplot to do that, use ggplot or build in boxplot() function.
# ggplot example
install.packages('ggplot2')
library(ggplot2)
ggplot(data = data, aes(Expt,Speed)) +
geom_boxplot() +
stat_boxplot(geom = 'errorbar', width = 0.3)
# basic r example
boxplot(data$Speed~data$Expt)
# one can use a different approach
install.packages('sciplot')
library(sciplot)
bargraph.CI(Expt, Speed, col = (gray(0.88)), data = data, xlab = "Experiment", ylab = "count", ylim = c(0,20))
# lineplot.CI(Expt, Speed, type = "p", data = data, xlab = "spray", ylab = "count", ylim = c(0,20))
|
85791c243734d690afe620b923c765dfb3f77fc3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/clues/examples/Maronna.Rd.R
|
6f537fdb5a255577f52d5d24cb4a10cc0a461750
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 418
|
r
|
Maronna.Rd.R
|
library(clues)
### Name: Maronna
### Title: The Maronna Data Set
### Aliases: Maronna maronna maronna.mem
### Keywords: cluster
### ** Examples
data(Maronna)
# data matrix
maronna <- Maronna$maronna
# cluster membership
maronna.mem <- Maronna$maronna.mem
# 'true' number of clusters
nClust <- length(unique(maronna.mem))
# scatter plots
plotClusters(maronna, maronna.mem)
|
473e1fc4e6a85c0b943f58c4700d207258a3451d
|
bc7cb0d6281727d4283b8635143cec8e1c864287
|
/man/coef.estimate.Rd
|
0300cf7f52a44d19d16996f92175a363c36143d3
|
[] |
no_license
|
josue-rodriguez/GGMnonreg
|
e8cfb3e1db1c96c226e91705642227cf4a3ee7eb
|
33ecd010df57525411261a08005a4d2f946327d3
|
refs/heads/master
| 2021-01-02T00:30:37.231210
| 2020-01-27T20:45:52
| 2020-01-27T20:45:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,145
|
rd
|
coef.estimate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coef.GGM_bootstrap.R
\name{coef.estimate}
\alias{coef.estimate}
\alias{coef.GGM_bootstrap}
\title{Precision Matrix to Multiple Regression}
\usage{
\method{coef}{GGM_bootstrap}(object, node = 1, ci = 0.95, ...)
}
\arguments{
\item{object}{object of class \code{estimate} (analytic = F)}
\item{node}{which variable (node) to summarise}
\item{ci}{confidence interval used in the summary output}
\item{...}{currently ignored}
}
\value{
list of class \code{coef.estimate}:
}
\description{
There is a direct correspondence between the covariance matrix and multiple regression. In the case of GGMs, it is possible
to estimate the edge set with multiple regression (i.e., neighborhood selection). In \strong{GGMnonreg}, the precision matrix
is first bootstrapped, and then each sample is converted to the corresponding coefficients and error variances.
This results in bootstrap distributions for a multiple regression.
}
\examples{
# data
X <- scale(GGMnonreg::ptsd)
# fit model
fit <- GGM_bootstrap(X)
# summary for predicting the first variable
coef(fit, node = 1)
}
|
5adfd8b8f8e0bca46f7f9160d4ba41de038ba923
|
11d4d6cb6be6f3a5b45c8470045ae6a7b03e11e9
|
/app.R
|
9e491ca2cd2c37a4d58b3a5c4bfed47cd09c14e6
|
[] |
no_license
|
tobias-heuser/classification-digitalisation-projects
|
8a51aa619a18f61ccaa97e739cd914176bdbaca8
|
ced2c1cd34c573486736545b3cdff69363990037
|
refs/heads/main
| 2023-04-02T21:28:48.430489
| 2021-03-27T01:08:42
| 2021-03-27T01:08:42
| 351,126,848
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,201
|
r
|
app.R
|
# install.packages("ggdendro")
# install.packages("reshape2")
# install.packages("grid")
# install.packages("dendextend")
library(dplyr)
projects <- data.frame(
#id.s = c(1:58),
pro_involved = c("+C", "1D", "+D", "+C", "+C", "+D", "+D", "+D", "1D", "1D", "+C", "+D", "+C", "+D", "+C", "+C", "+C", "+D", "+C", "+C", "+C", "1D", "+D", "+C", "1D", "+C", "+D", "+C", "+C", "+C", "+C", "+D", "+C", "+D", "+D", "+D", "+D", "+C", "+D", "+C", "+C", "+C", "+C", "+C", "+C", "+D", "+C", "+D", "+C", "+C", "+C", "+C", "+C", "+C", "+C", "+C", "+C", "+C"),
pro_focus = c("P&S", "OE&A", "I&DM", "I&DM", "C", "OE&A", "OE&A", "OE&A", "OE&A", "OE&A", "P&S", "P&S", "C", "OE&A", "P&S", "OE&A", "C", "I&DM", "C", "I&DM", "OE&A", "I&DM", "C", "P&S", "I&DM", "C", "OE&A", "OE&A", "I&DM", "I&DM", "OE&A", "I&DM", "I&DM", "OE&A", "OE&A", "OE&A", "OE&A", "C", "I&DM", "I&DM", "I&DM", "P&S", "I&DM", "P&S", "I&DM", "OE&A", "C", "P&S", "P&S", "OE&A", "OE&A", "C", "OE&A", "I&DM", "OE&A", "OE&A", "I&DM", "P&S"),
pro_complexity = c("H/H", "L/L", "L/H", "H/H", "H/H", "H/L", "H/L", "H/H", "H/H", "H/H", "L/H", "H/L", "L/H", "H/H", "L/H", "H/H", "L/H", "H/H", "H/H", "H/H", "L/H", "H/H", "H/L", "H/H", "H/L", "H/H", "H/H", "H/L", "H/L", "H/H", "L/L", "L/H", "H/L", "L/L", "H/H", "L/L", "H/H", "H/H", "L/H", "L/H", "H/H", "H/H", "H/H", "L/H", "H/H", "H/H", "H/L", "H/L", "H/H", "H/L", "L/L", "L/L", "H/L", "L/L", "H/L", "H/L", "L/H", "H/H"),
pro_impact = c("+R", "-C", "+R", "-C", "+R", "-C", "+R", "-C", "O", "O", "+R", "O", "+R", "O", "O", "-C", "+R", "O", "+R", "O", "-C", "O", "+R", "+R", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "-C", "-C", "-C", "+R", "O", "-C", "O", "+R", "O", "O", "O", "-C", "+R", "+R", "+R", "+R", "O", "-C", "-C", "-C", "-C", "-C", "O", "+R"),
pro_mode = c("explore", "explore", "explore", "exploit", "exploit", "exploit", "explore", "exploit", "exploit", "exploit", "explore", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "explore", "explore", "exploit", "exploit", "explore", "explore", "exploit", "explore", "exploit", "exploit", "exploit", "explore", "explore", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "explore", "exploit", "exploit", "explore", "exploit", "exploit", "exploit", "explore", "exploit", "explore", "explore", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "explore"),
pro_pathway = c("CX", "CX", "I&S", "I&S", "CX", "iterate", "new", "I&S", "I&S", "I&S", "CX", "CX", "CX", "I&S", "new", "new", "new", "I&S", "new", "I&S", "I&S", "I&S", "CX", "I&S", "I&S", "new", "I&S", "I&S", "new", "new", "I&S", "I&S", "I&S", "I&S", "I&S", "I&S", "I&S", "CX", "CX", "I&S", "CX", "CX", "new", "CX", "CX", "I&S", "CX", "CX", "CX", "I&S", "CX", "I&S", "new", "I&S", "I&S", "I&S", "I&S", "iterate"),
stringsAsFactors=TRUE
)
#----- Dissimilarity Matrix -----#
library(cluster)
# to perform different types of hierarchical clustering
# package functions used: daisy(), diana(), clusplot()
gower.dist <- daisy(projects[ ,1:6], metric = c("gower"))
# class(gower.dist)
## dissimilarity , dist
#------------ DIVISIVE CLUSTERING ------------#
divisive.clust <- diana(as.matrix(gower.dist),
diss = TRUE, keep.diss = TRUE)
plot(divisive.clust,
main = "Divisive")
#------------ AGGLOMERATIVE CLUSTERING ------------#
aggl.clust.c <- hclust(gower.dist, method = "complete")
plot(aggl.clust.c,
main = "Agglomerative, complete linkages")
# Cluster stats comes in a list form, it is more convenient to look at it as a table
# This code below will produce a dataframe with observations in columns and variables in row
# Not quite tidy data, but it's nicer to look at
library(fpc)
cstats.table <- function(dist, tree, k) {
clust.assess <- c("cluster.number","n","within.cluster.ss","average.within","average.between",
"wb.ratio","dunn2","avg.silwidth")
clust.size <- c("cluster.size")
stats.names <- c()
row.clust <- c()
output.stats <- matrix(ncol = k, nrow = length(clust.assess))
cluster.sizes <- matrix(ncol = k, nrow = k)
for(i in c(1:k)){
row.clust[i] <- paste("Cluster-", i, " size")
}
for(i in c(2:k)){
stats.names[i] <- paste("Test", i-1)
for(j in seq_along(clust.assess)){
output.stats[j, i] <- unlist(cluster.stats(d = dist, clustering = cutree(tree, k = i))[clust.assess])[j]
}
for(d in 1:k) {
cluster.sizes[d, i] <- unlist(cluster.stats(d = dist, clustering = cutree(tree, k = i))[clust.size])[d]
dim(cluster.sizes[d, i]) <- c(length(cluster.sizes[i]), 1)
cluster.sizes[d, i]
}
}
output.stats.df <- data.frame(output.stats)
cluster.sizes <- data.frame(cluster.sizes)
cluster.sizes[is.na(cluster.sizes)] <- 0
rows.all <- c(clust.assess, row.clust)
# rownames(output.stats.df) <- clust.assess
output <- rbind(output.stats.df, cluster.sizes)[ ,-1]
colnames(output) <- stats.names[2:k]
rownames(output) <- rows.all
is.num <- sapply(output, is.numeric)
output[is.num] <- lapply(output[is.num], round, 2)
output
}
# I am capping the maximum amout of clusters by 7
# but for sure, we can do more
# I want to choose a reasonable number, based on which I will be able to see basic differences between customer groups
stats.df.divisive <- cstats.table(gower.dist, divisive.clust, 10)
stats.df.divisive
stats.df.aggl <- cstats.table(gower.dist, aggl.clust.c, 10)
stats.df.aggl
# --------- Choosing the number of clusters ---------#
# Using "Elbow" and "Silhouette" methods to identify the best number of clusters
library(ggplot2)
# Elbow
# Divisive clustering
ggplot(data = data.frame(t(stats.df.divisive)), aes(x=cluster.number, y=within.cluster.ss)) + geom_point()+
geom_line()+
ggtitle("") +
labs(x = "Num.of clusters", y = "Within sum of squares") +
theme(plot.title = element_text(hjust = 0.5)) +
theme_bw(base_size=20)
# Silhouette
ggplot(data = data.frame(t(stats.df.divisive)), aes(x=cluster.number, y=avg.silwidth)) + geom_point()+
geom_line()+
ggtitle("Divisive clustering") +
labs(x = "Num.of clusters", y = "Average silhouette width") +
theme(plot.title = element_text(hjust = 0.5))
# Agglomorative clustering
# Elbow
ggplot(data = data.frame(t(stats.df.aggl)), aes(x=cluster.number, y=within.cluster.ss)) + geom_point()+
geom_line()+
ggtitle("") +
labs(x = "Num.of clusters", y = "Within sum of squares") +
theme(plot.title = element_text(hjust = 0.5)) +
theme_bw(base_size=20)
# Silhouette
ggplot(data = data.frame(t(stats.df.aggl)), aes(x=cluster.number, y=avg.silwidth)) + geom_point()+
geom_line()+
ggtitle("Agglomorative clustering") +
labs(x = "Num.of clusters", y = "Average silhouette width") +
theme(plot.title = element_text(hjust = 0.5))
# Finally, assigning the cluster number to the observation
clust.num <- cutree(divisive.clust, k = 3)
id.s = c(1:58)
projects.cl <- cbind(id.s, projects, clust.num)
clust.aggl.num <- cutree(aggl.clust.c, k = 3)
id.s = c(1:58)
projects.aggl.cl <- cbind(id.s, projects, clust.aggl.num)
#projects.cl <- cbind(projects, clust.num)
library("ggplot2")
library("reshape2")
library("purrr")
library("dplyr")
library("dendextend")
dendro <- as.dendrogram(aggl.clust.c)
dendro.col <- dendro %>%
set("branches_k_color", k = 3,
value = c("gold3", "darkcyan", "cyan3")) %>%
set("branches_lwd", 0.6) %>%
set("labels_colors",
value = c("darkslategray")) %>%
set("labels_cex", 0.5)
ggd1 <- as.ggdend(dendro.col)
ggplot(ggd1, theme = theme_minimal()) +
labs(x = "Num. observations", y = "Height", title = "Dendrogram (aggl), k = 3")
# Create a radial plot
ggplot(ggd1, labels = T) +
scale_y_reverse(expand = c(0.2, 0)) +
coord_polar(theta="x")
# cust.order <- order.dendrogram(dendro)
# projects.cl.ord <- projects.cl[cust.order, ]
# 1 variable per row
# factors have to be converted to characters in order not to be dropped
cust.long <- melt(data.frame(lapply(projects.cl, as.character), stringsAsFactors=FALSE),
id.vars = c("id.s", "clust.num"), factorsAsStrings=T)
cust.aggl.long <- melt(data.frame(lapply(projects.aggl.cl, as.character), stringsAsFactors=FALSE),
id.vars = c("id.s", "clust.aggl.num"), factorsAsStrings=T)
cust.long.q <- cust.long %>%
group_by(clust.num, variable, value) %>%
mutate(count = n_distinct(id.s)) %>%
distinct(clust.num, variable, value, count)
cust.aggl.long.q <- cust.aggl.long %>%
group_by(clust.aggl.num, variable, value) %>%
mutate(count = n_distinct(id.s)) %>%
distinct(clust.aggl.num, variable, value, count)
cust.long.p <- cust.long.q %>%
group_by(clust.num, variable) %>%
mutate(perc = count / sum(count)) %>%
arrange(clust.num)
cust.aggl.long.p <- cust.aggl.long.q %>%
group_by(clust.aggl.num, variable) %>%
mutate(perc = count / sum(count)) %>%
arrange(clust.aggl.num)
heatmap.p <- ggplot(cust.long.p, aes(x = clust.num, y = factor(value, levels = c("1D","+D","+C",
"C", "P&S", "I&DM", "OE&A",
"L/L","L/H", "H/L", "H/H",
"+R","-C","O",
"exploit","explore",
"I&S","CX","iterate","new"),
ordered = T))) +
geom_tile(aes(fill = perc), alpha = 0.85)+
labs(title = "Distribution of characteristics across clusters", x = "Cluster number", y = NULL) +
geom_hline(yintercept = 3.5) +
geom_hline(yintercept = 7.5) +
geom_hline(yintercept = 11.5) +
geom_hline(yintercept = 14.5) +
geom_hline(yintercept = 16.5) +
scale_fill_gradient2(low = "darkslategray1", mid = "yellow", high = "turquoise4")
heatmap.p
heatmap.aggl.p <- ggplot(cust.aggl.long.p, aes(x = clust.aggl.num, y = factor(value, levels = c("1D","+D","+C",
"C", "P&S", "I&DM", "OE&A",
"L/L","L/H", "H/L", "H/H",
"+R","-C","O",
"exploit","explore",
"I&S","CX","iterate","new"),
ordered = T))) +
geom_tile(aes(fill = perc), alpha = 0.85)+
labs(title = "Distribution of characteristics across (aggl) clusters", x = "Cluster number", y = NULL) +
geom_hline(yintercept = 3.5) +
geom_hline(yintercept = 7.5) +
geom_hline(yintercept = 11.5) +
geom_hline(yintercept = 14.5) +
geom_hline(yintercept = 16.5) +
scale_fill_gradient2(low = "darkslategray1", mid = "yellow", high = "turquoise4")
heatmap.aggl.p
|
b80f1e2882f77666da50b18fc51bc4b90daa4372
|
bf745c74482e237f777aa00c20e3bbf9f4cfa0bf
|
/EjBrandEval/R/process.Ej.cjs.r
|
1a1f4c4689d7f665033ab6f30bf7bc57dd6c3058
|
[] |
no_license
|
jlaake/EjBrandEval
|
5c3f11a2d3c3e320b56c555ab81db9afde3a9a6f
|
e284c94e4156139e46349d2ccfa57dfa206383df
|
refs/heads/master
| 2021-01-18T21:56:57.349758
| 2016-05-24T22:09:59
| 2016-05-24T22:09:59
| 2,009,626
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,923
|
r
|
process.Ej.cjs.r
|
#' Prepares data for running RMark models
#' Prepares data by running process.data and make.design.data step for RMark.
#' Creates occasion-specific design data for groups and for occasions based on
#' platform used for re-sighting.
#'
#' @export
#' @param ej.list list that results from running \code{\link{extract.Ej}}
#' @return \item{data.proc}{Processed data list for RMark} \item{ddl}{Design
#' data list for RMark}
#' @author Jeff Laake
#' @examples
#'
#' ej.list=extract.Ej()
#' ej.list=process.Ej.cjs(ej.list)
#' p=vector("list",2)
#' p[[1]]=list(formula=~late:Tag:time + time + patch1:Tag + patch2:Tag + patch3:Tag + scope:Tag + camera:Tag)
#' p[[2]]=list(formula=~late:Tag + time + patch1:Tag + patch2:Tag + patch3:Tag + scope:Tag + camera:Tag)
#' Phi=vector("list",1)
#' Phi[[1]]=list(formula=~early:trt+Brand:batch)
#' results=run.cjs.models(p,Phi,ej.list)
#'
process.Ej.cjs <-function(ej.list)
{
#
# Create time interval vector with a unit interval of 7 days
#
ti=as.numeric(diff(ej.list$times)/7)
#
# Process data
#
ej.proc=process.data(ej.list$data,model="CJS",groups=c("trt","tag","experiment","brand","batch","brander","anesthesiologist","sex","treatno"),time.intervals=ti,begin.time=0)
#
# Create design data
#
ej.ddl=make.design.data(ej.proc,parameters=list(Phi=list(pim.type="time"),p=list(pim.type="time")))
#
# Add early late split on Phi and p
#
ej.ddl$Phi$early=0
ej.ddl$Phi$early[ej.ddl$Phi$Time<10]=1
ej.ddl$p$late=1
ej.ddl$p$late[ej.ddl$p$Time<11]=0
ej.ddl$p$Brand=as.numeric(as.character(ej.ddl$p$brand))
ej.ddl$p$Tag=1-ej.ddl$p$Brand
ej.ddl$Phi$Brand=as.numeric(as.character(ej.ddl$Phi$brand))
ej.ddl$Phi$Tag=1-ej.ddl$Phi$Brand
#
# Create occasion data frame to merge with the p design data
#
times=cumsum(ti)
# Need to change these for added occasions
patch1=c(rep(1,5),rep(0,58))
patch2=c(rep(0,5),rep(1,5),rep(0,53))
patch3=c(rep(0,10),rep(1,5),rep(0,48))
yp=c(rep(1,15),rep(0,48))
camera=ej.list$platform[,1]
vessel=ej.list$platform[,2]
scope=ej.list$platform[,3]
xcov=data.frame(patch1=patch1,patch2=patch2,patch3=patch3,yp=yp,camera=camera,vessel=vessel,scope=scope)
if(ej.list$scenario==2)xcov=xcov[1:50,]
if(ej.list$scenario==4)xcov=xcov[-c(54,55,57,59),]
ej.ddl=merge_design.covariates(ej.ddl,"p",cbind(times=times,xcov))
ej.ddl$p$extra=0
ej.ddl$p$extra[ej.ddl$p$experiment==0]=1
ej.ddl$Phi$threshold1=0
ej.ddl$Phi$threshold1[ej.ddl$Phi$Time<4]=1
ej.ddl$Phi$threshold2=0
ej.ddl$Phi$threshold2[ej.ddl$Phi$Time>=4&ej.ddl$Phi$Time<8]=1
ej.ddl$Phi$threshold3=0
ej.ddl$Phi$threshold3[ej.ddl$Phi$Time>=4&ej.ddl$Phi$Time<6]=1
ej.ddl$Phi$threshold4=0
ej.ddl$Phi$threshold4[ej.ddl$Phi$Time>=6&ej.ddl$Phi$Time<8]=1
ej.ddl$Phi$threshold5=0
ej.ddl$Phi$threshold5[ej.ddl$Phi$Time>=8&ej.ddl$Phi$Time<10]=1
return(list(data.proc=ej.proc,ddl=ej.ddl))
}
|
f95e42650906c1a1c0a17119ddf31731761933af
|
29891624cdb77ca6a43b683cc8d668612590e877
|
/R/get.index.mat.R
|
dd4892439dd94debc5e183724e0515dd5532b228
|
[] |
no_license
|
kellijohnson-NOAA/saconvert
|
e8f3d0aa853cf58a050826ccdf4aa35804b1556e
|
d004f5cee8af1edb27fe8a15ffac41cfc1ac61d6
|
refs/heads/master
| 2022-07-07T16:04:06.041578
| 2022-01-16T15:41:23
| 2022-01-18T14:39:01
| 230,995,952
| 0
| 2
| null | 2021-07-09T17:24:02
| 2019-12-30T23:56:04
|
R
|
UTF-8
|
R
| false
| false
| 1,134
|
r
|
get.index.mat.R
|
#'
#'
get.index.mat<- function(x, cv, neff, first.year, nyears, catch.ages, survey.ages) {
n.ages = length(catch.ages)
last.yr <- first.year+nyears - 1
tmp.yrs <- as.numeric(rownames(x))
all.years = first.year-1 + 1:nyears
years.use.ind = which(tmp.yrs %in% all.years)
#if (tmp.yrs[length(tmp.yrs)]>last.yr) tmp.yrs <- tmp.yrs[-which(tmp.yrs>last.yr)]
tmp.ages <- as.numeric(colnames(x))
tmp.ages = catch.ages
survey.ages.index = which(catch.ages %in% survey.ages)
i.mat <- matrix(0, nyears, (n.ages + 4))
i.mat[,1] <- all.years
rownames(x) <- c()
colnames(x) <- c()
x[is.na(x)] <- 0
print(dim(x))
print(dim(i.mat))
print(survey.ages.index)
print(tmp.yrs)
print(sum(all.years %in% tmp.yrs))
print(all.years)
print(x)
tmp.ind.total <- apply(x[years.use.ind,], 1, sum)
i.mat.ind = which(all.years %in% tmp.yrs[years.use.ind])
i.mat[i.mat.ind,2:3] <- cbind(tmp.ind.total, rep(cv, length(years.use.ind)))
i.mat[i.mat.ind, (3+survey.ages.index)] <- x[years.use.ind,]
i.mat[i.mat.ind, (n.ages+4)] <- rep(neff, length(years.use.ind))
return(i.mat)
}
|
9f34517ce67308434ac19640406e6a3f254de609
|
246f3d934ab5aad0a6ea7f34e46ccf80cc11b8fd
|
/code/part_3.R
|
ff010ebf6494673b037041cb42b4fbe72aa48b09
|
[] |
no_license
|
ewong027/stats133-final-project
|
944299a2678a4dcfde390ad22f71c39efa739389
|
6d4b76ba5e62bc25fffecdeb87c7878ae9c2d169
|
refs/heads/master
| 2020-06-10T08:26:11.924729
| 2015-12-10T21:28:27
| 2015-12-10T21:28:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,276
|
r
|
part_3.R
|
# ======================================================================
# Part 3: Type analysis
# Description: Here we are looking at how the most common types of
# airplanes to crash changed over different decades.
# ======================================================================
# Note: must have ran part_2 first.
## ---- Preliminary ----
# packages needed
library(stringr)
library(ggplot2)
# functions needed
source('../code/fun_top3.R')
## ---- comment ----
# Here we are parsing out the data by decade so that we can plot by type and
# extracting just the top three values.
## ---- Top3 Array ----
top3_array <- fun_top3(data, decade_names)
## ---- comment ----
# Now we want to create a data frame that combines the data from each
# decade in a way that we can plot it efficiently.
## ---- Reorganizing ----
type <- names(top3_array)
freq <- as.vector(top3_array)
decade <- rep(decade_names, each = 3)
top3 <- data.frame(type = type, freq = freq, decade = decade)
## ---- comment ----
# These are the two sets of decades we want to focus on.
# Because of the differences among our data sets, we want to look at:
# - from 1940-1980
# - from 1980-2010
## ---- Parsing: 1940-1980 ----
decade_names_1 <- decade_names[3:6]
type_1 <- type[7:18]
freq_1 <- freq[7:18]
decade_1 <- rep(decade_names_1, each = 3)
top3_1 <- data.frame(type = type_1, freq = freq_1, decade = decade_1)
# plotting the trend
ggplot(top3_1, aes(x = decade, y = freq, fill = type))+
geom_bar( stat = 'identity', position = position_dodge())+
ggtitle('Most Common Planes in Accidents 1940s to 1970s')+
theme(plot.title = element_text(size = rel(.75)))
## ---- Parsing: 1980-2010 ----
decade_names_2 <- decade_names[7:9]
type_2 <- type[19:27]
freq_2 <- freq[19:27]
decade_2 <- rep(decade_names_2, each = 3)
top3_2 <- data.frame(type = type_2, freq = freq_2, decade = decade_2)
# plotting the trend
ggplot(top3_2, aes(x = decade, y = freq, fill = type))+
geom_bar( stat = 'identity', position = position_dodge())+
ggtitle('Most Common Planes in Accidents 1980s to 2000s')
## ---- Exporting the Graphics ----
# Exporting the Graphics
# PDF
pdf('../plots_and_graphics/most_common_planes_in_accidents_1940s_to_1970s.pdf')
ggplot(top3_1, aes(x = decade, y = freq, fill = type))+
geom_bar( stat = 'identity', position = position_dodge())+
ggtitle('Top 3 Planes in Accidents 1940s to 1970s')
dev.off()
pdf('../plots_and_graphics/most_common_planes_in_accidents_1980s_to_2000s.pdf')
ggplot(top3_2, aes(x = decade, y = freq, fill = type))+
geom_bar( stat = 'identity', position = position_dodge())+
ggtitle('Top 3 Planes in Accidents 1980s to 2000s')
dev.off()
# PNG
png('../plots_and_graphics/most_common_planes_in_accidents_1940s_to_1970s.png',
res = 96, width = 700, height = 500)
ggplot(top3_1, aes(x = decade, y = freq, fill = type))+
geom_bar( stat = 'identity', position = position_dodge())+
ggtitle('Top 3 Planes in Accidents 1940s to 1970s')
dev.off()
png('../plots_and_graphics/most_common_planes_in_accidents_1980s_to_2000s.png',
res = 96)
ggplot(top3_2, aes(x = decade, y = freq, fill = type))+
geom_bar( stat = 'identity', position = position_dodge())+
ggtitle('Top 3 Planes in Accidents 1980s to 2000s')
dev.off()
|
ef55269907ea1364f995fe54dd5d70dfd2902631
|
11dd3782354ca82cc5dfc6996d3b707e5f563010
|
/man/taxa_rollup.Rd
|
75c0a59c3bbfb27b84ff703b9c81f8fd7f48806f
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
pmartR/pmartRseq
|
49010cceff1a173e924e7b467781cec86c0aaae6
|
75d573e528d14a6563a69be46c9a326c13423738
|
refs/heads/master
| 2020-12-31T07:33:06.348449
| 2018-02-09T00:39:18
| 2018-02-09T00:39:18
| 86,605,696
| 2
| 1
|
BSD-2-Clause
| 2018-02-09T00:39:19
| 2017-03-29T16:35:39
|
R
|
UTF-8
|
R
| false
| true
| 1,314
|
rd
|
taxa_rollup.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taxa_rollup.R
\name{taxa_rollup}
\alias{taxa_rollup}
\title{Roll up data to a specified taxonomic level}
\usage{
taxa_rollup(omicsData, level, taxa_levels = NULL)
}
\arguments{
\item{omicsData}{an object of the class 'seqData' created by \code{\link{as.seqData}}.}
\item{level}{taxonomic level to roll up to.}
\item{taxa_levels}{The levels of taxonomy (or other e_meta object) which might be used in the roll up. If NULL, will use c("Kingdom","Phylum","Class","Order","Family","Genus","Species"), in that order. Default is NULL.}
}
\value{
A seqData object of the same class as the input, where e_data and e_meta are rolled up to the specified level.
}
\description{
This function rolls up data to the specified taxonomic level so that statistics may be calculated at various taxonomic levels.
}
\details{
Data will be rolled (summed) up to a specified taxonomic level. For example, data at the OTU level could be rolled (summed) up to the Genus level before statistics are computed.
}
\examples{
\dontrun{
library(mintJansson)
data(rRNA_data)
rRNA_split <- split_emeta(rRNA_data)
rRNA_rollup <- taxa_rollup(omicsData = rRNA_split, level = "Phylum")
dim(rRNA_rollup$e_data)
attributes(rRNA_rollup)
}
}
\author{
Allison Thompson
}
|
1d93e92dbd3dd1686ccb693d119e709fd4b4b213
|
4200785bd7e0ba6c0b83bea00357cefd9a550510
|
/plot1.R
|
187076326df51a304e94b178d9d07fd78c6fe68b
|
[] |
no_license
|
meganminshew/ExData_Plotting1
|
245d7efebb2851db6c4c3c78823ae374d9bb842d
|
271c269b6c02fab1eab5b0cf2e84a7dcfa99f33e
|
refs/heads/master
| 2021-01-18T05:06:44.038840
| 2014-05-09T15:54:30
| 2014-05-09T15:54:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 602
|
r
|
plot1.R
|
## read the data from the working folder
dt <- read.delim("household_power_consumption.txt", header = TRUE, sep = ";", colClasses="character")
## class the date
dt$Date = as.Date(dt$Date, format = '%d/%m/%Y')
## filter for the 2 days
dt <- subset(dt, dt$Date == "2007-02-01" | dt$Date == "2007-02-02")
## get the attribute to plot and make it a number
gap <- as.numeric(dt$Global_active_power)
##output the histogram to PNG
png(filename="plot1.png", width=480, height=480, units="px", bg="transparent")
hist(gap, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
|
2c966ae010fe65346653de89036a6f9bf3b7a7fa
|
cfde857379a0b0a9b216cf08fa99449045bf4997
|
/supervised/generative_learning/naive_bayes/golf_play.R
|
6cefa80538c7b71f131bdc8c9b8fe40f380f79f4
|
[] |
no_license
|
sureshpodeti/Machine-Learning
|
c0a1922e77bba2b6cbbd596ae075c99a287ee7f2
|
32922a1c307c9666b0143e98c975e5a0c0ce26b4
|
refs/heads/master
| 2021-06-02T06:27:41.698147
| 2020-06-29T03:43:03
| 2020-06-29T03:43:03
| 131,320,257
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 677
|
r
|
golf_play.R
|
# Load the data into data frame
df <- read.csv('/home/podeti/Desktop/AI/Machine-Learning/Data/golf_play.csv')
n <- ncol(df)
# convert dataframe into matrix
data <- data.matrix(df)
# split the data into train and test data
library(caret)
partitionIndex <- createDataPartition(data[, n], p=0.7, list=FALSE)
data_train <- data[partitionIndex, ]
data_test <- data[-partitionIndex, ]
data_train_y_0 <- data_train[data_train[, n] == 0, ]
data_train_y_1 <- data_train[data_train[,n] == 1, ]
data_train_features_y_0 <- data_train_y_0[, -n]
data_train_target_y_0 <- data_train_y_0[, n]
data_train_features_y_1 <- data_train_y_1[, -n]
data_train_target_y_1 <- data_train_y_1[, n]
|
a7174d367bb2a6581fc54b6e7ae5cc367675ee19
|
5c1426dddbe4f3e13929b0f9c44569be2e0885fa
|
/man/get.run.par.Rd
|
73b80fdd5c3f773a0070c5a07070c3389cbd41c1
|
[] |
no_license
|
cran/dynatopmodel
|
1ae2d5695d24be40f0f5c1f5e30526fd8bfb06ed
|
0d7d074b022452f925c8de4271d4a843882ac84c
|
refs/heads/master
| 2021-01-21T21:54:23.182728
| 2018-01-19T13:37:01
| 2018-01-19T13:37:01
| 19,747,316
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 837
|
rd
|
get.run.par.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/defs.r
\name{get.run.par}
\alias{get.run.par}
\title{get.run.par
Initalise model run parameters. Note this function is maintained for backward compatibility only}
\usage{
get.run.par(tms = NULL, dt = NULL, units = "secs", ...)
}
\arguments{
\item{tms}{xts time series or an object that has a POSIXct index}
\item{dt}{Numeric Time step in seconds}
\item{units}{string Units for dt.}
\item{...}{Any other parameters returned by get.run.par}
}
\value{
Structure to maintain run information.
}
\description{
get.run.par
Initalise model run parameters. Note this function is maintained for backward compatibility only
}
\details{
The returned value includes a simulation times calculated from the supplied time range and interval
}
|
0e8463e2711920068daedfc3b4a7d62e086523e7
|
29587cb4ef25b1db3338bd2cc72d944da87d4f1d
|
/alt-as-sin-2016.R
|
a45891da13f1aa2bd78fe250770871cbd43efbcb
|
[] |
no_license
|
kleinschmidt/brewnotes
|
687ddaed6ebce32083eedb64cdaf4ad5abd70b4b
|
30b71f81ecb74a05377369336cec723b701987bc
|
refs/heads/master
| 2023-02-27T16:27:26.756927
| 2021-01-30T02:32:59
| 2021-01-30T02:32:59
| 334,311,379
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 278
|
r
|
alt-as-sin-2016.R
|
library(magrittr)
devtools::load_all()
brewnotes::strike_decoc_topoff_sparge_gal(grain_lbs = 10.5,
mash_thickness = 1.5) %T>%
print() %>%
brewnotes::gal_to_lbs() %>%
`+`(2.3) # tare weight on bucket
|
e946b8dbea5d26eee26f9718c97e70dcb5ae60a0
|
d1aba4f6a11564cc8f1f9d3b64801cb916a27687
|
/plot2.R
|
beb3099ebaab548178f56f1ee8c35a751984a99c
|
[] |
no_license
|
dvsdimas/exploratory-analysis-project-2
|
9a1e412eeef661c69a13acaade4448d5957e1b91
|
fee9e4f5275b6c42da843e953d7ca1d9e4fff9ae
|
refs/heads/master
| 2021-06-10T15:37:11.930301
| 2016-12-12T17:50:44
| 2016-12-12T17:50:44
| 76,126,186
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,195
|
r
|
plot2.R
|
require(dplyr)
get_data <- function() {
data_folder <- file.path(getwd(), "data")
file_source <- file.path(data_folder, "Source_Classification_Code.rds")
file_summary <- file.path(data_folder, "summarySCC_PM25.rds")
if(!dir.exists(data_folder)) {
dir.create(data_folder)
if(!dir.exists(data_folder)){
stop(paste0("Cannot create folder ", data_folder))
}
}
if(!file.exists(file_source) || !file.exists(file_summary)) {
zip_file <- file.path(data_folder, "exdata.zip")
if(!file.exists(zip_file)) {
data_url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(data_url, destfile = zip_file)
if(!file.exists(zip_file)) {
stop(paste0("Cannot download zip file : ", data_url))
}
}
unzip(zip_file, exdir = data_folder)
if(!file.exists(file_source) || !file.exists(file_summary)) {
stop(paste0("Cannot unpack zip file : ", zip_file))
}
unlink(zip_file)
}
NEI <- readRDS(file_summary)
if( (dim(NEI)[1] != 6497651) || (dim(NEI)[2] != 6) ) {
stop("NEI data has wrong format")
}
SCC <- readRDS(file_source)
if( (dim(SCC)[1] != 11717) || (dim(SCC)[2] != 15) ) {
stop("SCC data has wrong format")
}
list(NEI = NEI, SCC = SCC)
}
ret <<- NULL
get_cdata <- function() {
if(!is.null(ret)) {
return(ret)
}
ret <<- get_data()
}
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510") from 1999 to 2008?
# Use the base plotting system to make a plot answering this question.
D <- get_cdata()
trend <- D$NEI %>%
filter(fips == "24510") %>%
group_by(year) %>%
summarise(mean = mean(Emissions, na.rm = TRUE))
png("plot2.png", width = 480, height = 480)
plot(trend$year, trend$mean, main = "Total emissions PM2.5 in the Baltimore City, MD",
ylab = "PM2.5, tons", xlab = "Year", type = "l")
dev.off()
|
10d50bfeb4a12fa7dba26f595e7305040d3662cc
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/LilRhino/examples/Codes_done.Rd.R
|
f4a95c5e38e13a6d2f9ef080706ebdb3f1e6df23
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 188
|
r
|
Codes_done.Rd.R
|
library(LilRhino)
### Name: Codes_done
### Title: For announcing when code is done.
### Aliases: Codes_done
### ** Examples
Codes_done("done", "check it", sound = TRUE, effect = 1)
|
ad81dc83bbe0ccc31e5698c0fa7c49a80bb77254
|
60d84fe954bc0dbcf75d254991ec730dd71e67f3
|
/Rfiles/tables/table_3_html_ETo_tower_MOD16.R
|
bd121b996adc6edcce028869e1ca32cfba8092c3
|
[] |
no_license
|
tbiggsgithub/SEBAL_ET_CA
|
024cd94b604684ec10807380b1b0685d10e2370b
|
99b9d5349aba55c0109de15a5bc85b934a96dba7
|
refs/heads/master
| 2021-01-01T06:04:57.573882
| 2015-02-15T20:56:15
| 2015-02-15T20:56:15
| 30,841,717
| 4
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 627
|
r
|
table_3_html_ETo_tower_MOD16.R
|
library(htmlTable)
outdir.tables = "G:/mydocuments/SDSU/research/CA/ET_MOD16_SEBAL_towers/writeups/tables/"
sourcedir = "G:/mydocuments/SDSU/research/CA/ET_MOD16_SEBAL_towers/Rfiles/plots/"
sourcefile = "plot_ts_PET_MOD16_tower_multiple_in_one.R"
names(statsout2) = c("Tower","MOD16","Error %")
outhtml = htmlTable(statsout2,
rowlabel = "Tower name",
cgroup = c("ETo",""),
n.cgroup = c(2,1),
caption = "Table 3. Comparison of mean seasonal reference evapotranspiration (ETo) from MOD16 and towers.",
)
setwd(outdir.tables)
sink("Table_xx_ETo_tower_MOD16_error.html")
print(outhtml,type="html",useViewer=FALSE)
sink()
|
be4bb64216599097635503cb7ba91fa1e7b98088
|
466a14350411044a071faa1702294d06b1543edf
|
/man/rattle.print.summary.multinom.Rd
|
587b8520abefb9f93134d22672650e4d101a0d2b
|
[] |
no_license
|
cran/rattle
|
8fc67846c6dac6c282e905fe87ff38f0694056da
|
3875c10d0ae6c7a499d918bc501e121861067e06
|
refs/heads/master
| 2022-05-02T14:29:57.688324
| 2022-03-21T12:10:02
| 2022-03-21T12:10:02
| 17,699,048
| 18
| 33
| null | null | null | null |
UTF-8
|
R
| false
| false
| 694
|
rd
|
rattle.print.summary.multinom.Rd
|
\name{rattle.print.summary.multinom}
\alias{rattle.print.summary.multinom}
\title{
Print information about a multinomial model
}
\description{
Displays a textual reveiw of the performance of a multinom model.
}
\usage{
rattle.print.summary.multinom(x, digits = x$digits, ...)
}
\arguments{
\item{x}{An rpart object.}
\item{digits}{Number of digist to print for numbers.}
\item{...}{Other arguments.}
}
\details{
Print a summary of a multinom model. This is sipmly a modification of
the print.summary.multinom function to add the number of entities!
}
\references{Package home page: \url{https://rattle.togaware.com}}
\author{\email{Graham.Williams@togaware.com}}
|
17466ed5d0967ca74826fc7a5f5e32f395fdae8a
|
599c2cf0ad1b158138c78b5c6c4c2804bbeb45d0
|
/R/oneHot.R
|
175ce7816e63c05034ced0905f0b8629e584de88
|
[] |
no_license
|
tlarzg/rtemis
|
b12efae30483c52440cc2402383e58b66fdd9229
|
ffd00189f6b703fe8ebbd161db209d8b0f9f6ab4
|
refs/heads/master
| 2023-07-07T20:53:43.066319
| 2021-08-27T03:42:19
| 2021-08-27T03:42:19
| 400,347,657
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 736
|
r
|
oneHot.R
|
# oneHot.R
# ::rtemis::
# 2019 E.D. Gennatas lambdamd.org
#' One hot encoding
#'
#' One hot encode a vector or factors in a data.frame
#'
#' A vector input will be one-hot encoded regardless of type by looking at all unique values. With data.frame input,
#' only column of type factor will be one-hot encoded. This function is used by \link{preprocess}
#' @param x Vector or data.frame
#' @param verbose Logical: If TRUE, print messages to console. Default = TRUE
#' @return For vector input, a one-hot-encoded matrix, for data.frame frame input, an expanded data.frame where all
#' factors are one-hot encoded
#' @author E.D. Gennatas
#' @export
oneHot <- function(x, verbose = FALSE) {
UseMethod("oneHot", x)
} # rtemis::oneHot
|
b2b381831832a5baffc1cc1f3161fcbad9b25610
|
e49c37520df710db3c236c7a764dcad107332540
|
/plot4.R
|
881daededcd8f6cf2763621e382b36d8a426935f
|
[] |
no_license
|
gvaljak/ExData_Plotting1
|
61e53841a3d51a37ab9043a1ec0b99f2e3e61e6b
|
fa0bbe6e85f9dedfba19094459bedda31e1dbf58
|
refs/heads/master
| 2021-05-31T21:19:27.594918
| 2016-04-07T10:05:31
| 2016-04-07T10:05:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,592
|
r
|
plot4.R
|
#read data. values "?" are intrepreted as NA. file household_power_consumption.txt should be in the working directory
raw <- read.csv("household_power_consumption.txt", sep=";", na.strings = c("?"))
#remove NA from data
clean <- na.omit(raw)
#convert Date and Time from character vector to date and time
clean$Time = strptime(paste(clean$Date, clean$Time), format = "%d/%m/%Y %H:%M:%S")
clean$Date <- as.Date(clean$Date, format="%d/%m/%Y")
#subset only relevant dates
data <- subset(clean, Date >= "2007-02-01" & Date <= "2007-02-02")
#plot directly to png. When copying from screen, some parts of graph are missing
png("plot4.png")
#set canvas to 2x2
par(mfcol = c(2,2))
#plot first graph
with(data, plot(Time, Global_active_power, type="n", xlab = "", ylab = "Global Active Power"))
with(data, lines(Time, Global_active_power))
#plot second graph
with(data, plot(Time, Sub_metering_1, type="n", xlab = "", ylab = "Energy sub metering"))
with(data, lines(Time, Sub_metering_1))
with(data, lines(Time, Sub_metering_2, col = "red"))
with(data, lines(Time, Sub_metering_3, col = "blue"))
#add legend
legend("topright", bty = "n", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lty = c(1,1,1))
#plot third graph
with(data, plot(Time, Voltage, xlab = "datetime", ylab = "Voltage", type = "n"))
with(data, lines(Time, Voltage))
#plot fourth graph
with(data, plot(Time, Global_reactive_power, xlab = "datetime", type = "n"))
with(data, lines(Time, Global_reactive_power))
#close device
dev.off()
|
a3e27717e3be0754676b1928cc961250fa829387
|
a7840bbed633bf24e8040d9a28fb87b48190483a
|
/man/CreateFlowChart.Rd
|
356bc63944472e66d9adac510e06c632b3136a20
|
[
"MIT"
] |
permissive
|
DavideMessinaARS/CreateFlowChart
|
1a1abf55564bdd186e688cc60d3dfca6dd353e0e
|
9fb860905f2e458b650d0dfcf8786b8f3b9fc90b
|
refs/heads/main
| 2023-03-04T14:54:55.582627
| 2021-02-16T09:59:01
| 2021-02-16T09:59:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,279
|
rd
|
CreateFlowChart.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CreateFlowChart.R
\name{CreateFlowChart}
\alias{CreateFlowChart}
\title{'CreateFlowChart'}
\usage{
CreateFlowChart(dataset, listcriteria, weight, strata, flowchartname)
}
\arguments{
\item{dataset}{input dataset to work with}
\item{listcriteria}{list of boolean/binary variables}
\item{weight}{(optional) weight variable: in the input dataset each row may represent multiple unit of observations, if this is the case weight contains the weight of each row}
\item{strata}{(optional) categorical variable representing strata}
\item{flowchartname:}{filename (possibly with path) of the output dataset containing the flowchart}
}
\description{
CreateFlowChart takes as input a dataset where a list of exclusion criteria is represented as binary or boolean variables. The output are two datasets (a) the input dataset itself, restricted to the sole rows which don't match any exclusion criterion; this dataset is returned at the end of the function, and (b) the flowchart representing how many units were discarded by each criterion; this dataset is saved in the R environment. Criteria are considered to be hierarchical. As an option, the count is performed across strata of a categorical variable.
}
|
501be72617809c06bd1e82c15d72e7a66f91fde7
|
275791c3aad442f01680b1ef443ce1b615008ef2
|
/day24.R
|
76b7f0e3304e6007f791cbe55a1aeb50f81244ee
|
[] |
no_license
|
mdequeljoe/aoc2020
|
429532169b7413d976dd5294c55d16adc4bae14b
|
31483376883c538ce4edcbfff4c946fb35d1ad71
|
refs/heads/master
| 2023-02-15T17:42:01.100379
| 2021-01-10T12:50:12
| 2021-01-10T12:50:12
| 318,455,525
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,554
|
r
|
day24.R
|
hex <- function(point, coord) {
diff <- list(
nw = list(d = c(1, -1), link = 'n'),
n = list(d = c(1, 1), link = 'ne'),
ne = list(d = c(0, 1), link = 'se'),
se = list(d = c(-1, 1), link = 's'),
s = list(d = c(-1, -1), link = 'sw'),
sw = list(d = c(0, -1), link = 'nw')
)
res <- setNames(list(coord), point)
for (i_ in seq_along(diff)) {
x <- diff[[point]]
coord <- coord + x$d
point <- x$link
res[[point]] <- coord
}
list(
w = res[c('sw', 'nw')],
nw = res[c('nw', 'n')],
ne = res[c('n', 'ne')],
e = res[c('se', 'ne')],
se = res[c('s', 'se')],
sw = res[c('sw', 's')]
)
}
hex_links <- function()
list(
w = 'e',
e = 'w',
nw = 'se',
se = 'nw',
ne = 'sw',
sw = 'ne'
)
hex_path <- function(x) {
i <- 1
links <- hex_links()
sides <- names(links)
res <- list(h <- hex('nw', c(0, 0)))
repeat {
if (i > length(x))
break()
p <- x[i]
if (!p %in% sides) {
p <- paste0(p, x[i + 1])
i <- i + 1
}
side <- h[[p]]
adj_points <- names(h[[links[[p]]]])
adj_side <- setNames(side, adj_points)
h <- hex(names(adj_side)[1], adj_side[[1]])
res[[length(res) + 1]] <- h
i <- i + 1
}
res
}
# l <- readLines('data/day24_example.txt', warn = FALSE)
# l <- strsplit(l, '')
l <- readLines('data/day24.txt', warn = FALSE)
l <- strsplit(l, '')
md5 <- digest::getVDigest()
tiles <- vapply(l, function(x) {
p <- hex_path(x)
md5(p[length(p)])
}, character(1))
f <- table(tiles)
print(length(f[f == 1]))
# part two
.h <- function(x)
paste0('hex_', md5(x))
set_names <- function(tiles)
vapply(tiles, function(x)
.h(list(x)), character(1))
is_white <- function(n)
n %% 2 == 0
is_black <- function(n)
! is_white(n)
adj_hex <- function(x) {
links <- hex_links()
o <- lapply(seq_along(x), function(i) {
side <- names(x)[i]
adj_points <- names(x[[links[[side]]]])
adj_side <- setNames(x[[i]], adj_points)
hex(names(adj_side)[1], adj_side[[1]])
})
names(o) <- set_names(o)
o
}
update_tiles <- function(o) {
black_tiles <- o$black_tiles
adj <- o$adj
if (is.null(adj))
adj <- lapply(black_tiles, adj_hex)
nms <- names(black_tiles)
res <- res_adj <- white_tiles <- list()
for (i in seq_along(black_tiles)) {
x <- nms[i]
x.adj <- names(a <- adj[[x]])
n.b <- length(x.adj[x.adj %in% nms])
a.w <- x.adj[!x.adj %in% nms]
white_tiles[a.w] <- a[a.w]
if (n.b <= 2 && n.b != 0) {
res[[x]] <- black_tiles[[x]]
res_adj[[x]] <- a
}
}
white_tiles <- white_tiles[unique(names(white_tiles))]
for (i in seq_along(white_tiles)) {
x.tile <- white_tiles[[i]]
x <- names(white_tiles)[i]
x.adj <- names(a <- adj_hex(x.tile))
n.b <- length(x.adj[x.adj %in% nms])
if (n.b == 2) {
res[[x]] <- x.tile
res_adj[[x]] <- a
}
}
list(black_tiles = res, adj = res_adj)
}
run_days <- function(tiles, n = 100) {
colors <- table(names(tiles))
black <- colors[is_black(colors)]
tiles <- tiles[names(black)]
o <- list(black_tiles = tiles, adj = NULL)
for (i_ in 1:n) {
o <- update_tiles(o)
cat(i_, ':', length(o[[1]]), '\n')
}
invisible(o[[1]])
}
# l <- readLines('data/day24_example.txt', warn = FALSE)
# l <- strsplit(l, '')
l <- readLines('data/day24.txt', warn = FALSE)
l <- strsplit(l, '')
tiles <- lapply(l, function(x) {
p <- hex_path(x)
p[[length(p)]]
})
names(tiles) <- set_names(tiles)
res <- run_days(tiles, n = 100)
print(length(res))
# 3937
|
cb2da27a49e5c4390e22a539e189e73195f0a6e5
|
54182c6ac646b9c4aecc75fdcdc290cd67e0beb8
|
/R/layeropts.R
|
1f127fd6e19b7bfd70a15922f222bdb5ee0b37e4
|
[
"MIT"
] |
permissive
|
vincenzocoia/copsupp
|
92ed0b9a00cf77d8c4146b841614d8fe6da3b70c
|
9b92e11670aff9a7b6ef8365fc2c9b3a8c904923
|
refs/heads/master
| 2021-03-19T16:01:28.754408
| 2020-08-21T16:04:50
| 2020-08-21T16:04:50
| 46,912,656
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,310
|
r
|
layeropts.R
|
#' Fitting options for a new Vine Layer
#'
#' When fitting a new layer(s) to a vine, use this function to specify
#' "known" components of the new layer(s), as well as
#'
#' @param ntrunc Truncation level. Could be a vector corresponding to the
#' truncation level for the variables \code{var} or \code{G[1, ]}.
#' @note The arrays here use the newer form, where variables go in row 1.
#' @return A list of partially-specified layers of a vine.
#'
#' Regarding the vine array info:
#'
#' \itemize{
#' \item \code{$var} Vector of new variables that these layers add, not
#' necessarily in order.
#' \item \code{$ntrunc} Depending on how much information is input, could be
#' \code{NULL}, an integer for maximum tree depth of these layers, or a
#' vector of tree depth for each layer.
#' \item \code{$G} Either \code{NULL}, or a vine array.
#' }
#'
#' Regarding copula and parameter info:
#'
#' \itemize{
#' \item \code{$copmat} Copula matrix for these layers. No blank column
#' to the left. Some entries may be \code{NA}.
#' \item \code{$cparmat} Copula parameter matrix for these layers. No
#' blank column to the left. Some entries may contain \code{NA}'s.
#' }
#' @export
layeropts <- function(var=NULL, G=NULL, ntrunc=NULL, cops=NULL, cpar=NULL,
families = c("bvncop","bvtcop","mtcj","gum","frk",
"joe","bb1","bb7","bb8")){
## Deal with array-related things first.
if (is.null(G) & is.null(var))
stop("At least one of 'var' or 'G' must be specified.")
if (!is.null(G) & !is.null(var)) {
warning("Both 'var' and 'G' are specified -- ignoring 'var'.")
var <- G[1, ]
}
if (is.null(G)) { # In this case, var is specified, G is not.
## Check that the var input contains integers.
if (any(is.na(var)) | any(!is.numeric(var)))
stop("'var' must be entirely integers, containing no NA's.")
if (any(var%%1!=0)) # Check that they're integers. is.integer won't work.
stop("'var' must be entirely integers, containing no NA's.")
}
if (is.null(var)) { # In this case, G is specified, var is not.
var <- G[1, ]
}
list(var=var, G=G, ntrunc=ntrunc, copmat=cops, cparmat=cpar, families=families)
}
|
4f627b5ea3f9bb63c5bf71372d2a1f246a25bc39
|
b211639b9d1b4ca253df986eb2344d029bd415d5
|
/Problem Set 1/Scripts/summarizePlotQuartet.R
|
a3e2dac5538ac778447ab9ce6a8c4da0787be6ad
|
[] |
no_license
|
cortbreuer/ENGRD-2700-Problem-Sets
|
ca318aef280f3e872f2199139b46439862572e6d
|
3738314fb760e22d4eb33fe2f320d43d54e91324
|
refs/heads/master
| 2020-07-17T23:30:00.698135
| 2019-12-14T00:39:55
| 2019-12-14T00:39:55
| 206,123,955
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,273
|
r
|
summarizePlotQuartet.R
|
#load libraries
library(tidyverse)
library(knitr)
library(kableExtra)
library(gridExtra)
theme_set(theme_bw())
#import quartet data set
quartet <- read.csv("Data/Quartet.csv")
#summarize data by mean, median, standard deviation and show in table
colNames <- c("X1", "Y1", "X2", "Y2", "X3", "Y3", "X4", "Y4")
quartetMean <- apply(quartet, 2, mean)
quartetMedian <- apply(quartet, 2, median)
quartetSD <- apply(quartet, 2, sd)
summaryTable <- tibble(colNames, quartetMean, quartetMedian, quartetSD) %>%
rename(Column = colNames, "Sample Mean" = quartetMean, "Sample Median" = quartetMedian,
"Sample Standard Deviation" = quartetSD) %>% mutate_if(is.numeric, round, digits = 2)
kable(summaryTable) %>% kable_styling(bootstrap_options = c("striped", "hover"))
#plot all four paired data sets to compare to summary statistics
p1 <- ggplot(data = quartet, aes(x = x1, y = y1)) + geom_point(size = 3) + xlim(0, 19) + ylim(0, 15)
p2 <- ggplot(data = quartet, aes(x = x2, y = y2)) + geom_point(size = 3) + xlim(0, 19) + ylim(0, 15)
p3 <- ggplot(data = quartet, aes(x = x3, y = y3)) + geom_point(size = 3) + xlim(0, 19) + ylim(0, 15)
p4 <- ggplot(data = quartet, aes(x = x4, y = y4)) + geom_point(size = 3) + xlim(0, 19) + ylim(0, 15)
grid.arrange(p1, p2, p3, p4)
|
f8969f12393499df7171b17723b24692b556f93f
|
69cba8c80765e2c57d5cdf1901833a697bfaa05f
|
/PrisonerProblem.R
|
7e686fc899e5ca521352f50990cca6f495b24854
|
[] |
no_license
|
jtownball/Sandbox
|
d81453e2ade709b74fa0ef44efe0ee7b8b273c99
|
cea575d424627b0aeb7e355006b5cc32690e641f
|
refs/heads/master
| 2023-05-13T04:58:15.579487
| 2023-05-07T03:09:14
| 2023-05-07T03:09:14
| 14,943,552
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,013
|
r
|
PrisonerProblem.R
|
# The director of a prison offers 100 death row prisoners,
# who are numbered from 1 to 100, a last chance. A room
# contains a cupboard with 100 drawers. The director randomly
# puts one prisoner's number in each closed drawer. The prisoners
# enter the room, one after another. Each prisoner may open and
# look into 50 drawers in any order. The drawers are closed again
# afterwards. If, during this search, every prisoner finds their
# number in one of the drawers, all prisoners are pardoned. If even
# one prisoner does not find their number, all prisoners die. Before
#the first prisoner enters the room, the prisoners may discuss
# strategy โ but may not communicate once the first prisoner enters to
# look in the drawers. What is the prisoners' best strategy?
# This function takes the list of drawers and the current prisoner number.
# It then follows the strategy to check the drawers.
prisonerStrategy <- function(listOfDrawers, currentPrisonerNumber)
{
nextDrawerInSequence <- listOfDrawers[currentPrisonerNumber]
for(prisonerNumber in 1:50)
{
if(nextDrawerInSequence == currentPrisonerNumber)
{
return(TRUE)
}
nextDrawerInSequence <- listOfDrawers[nextDrawerInSequence]
}
return(FALSE)
}
listOfPrisoners <- 1:100
# simCount controls how many times we see if all the prisoners live or die
# Set this value lower for a quicker response, and higher for more accuracy
simCount <- 100000
listOfSimulationResults <- rep(0, simCount)
for(simulationIteration in 1:simCount)
{
drawers <- sample(c(1:100), 100)
LiveOrDie <- rep(0, 100)
for(prisonerNumber in listOfPrisoners)
{
if(prisonerStrategy(drawers,prisonerNumber))
{
LiveOrDie[prisonerNumber] <- 1
}
}
if(sum(LiveOrDie) == 100)
{
listOfSimulationResults[simulationIteration] <- 1
}
}
# This prints the average of the simulation results
# The higher the number of simulation runs the closer
# to ~0.31183 the value should get
print(mean(listOfSimulationResults))
|
5b0766f63b58293af915d9877f3c081c53f4187c
|
e68e99f52f3869c60d6488f0492905af4165aa64
|
/tests/testthat/test-jit-ops.R
|
5d52159a191c6711fa5398b6582bfed5425fb7dc
|
[
"MIT"
] |
permissive
|
mlverse/torch
|
a6a47e1defe44b9c041bc66504125ad6ee9c6db3
|
f957d601c0295d31df96f8be7732b95917371acd
|
refs/heads/main
| 2023-09-01T00:06:13.550381
| 2023-08-30T17:44:46
| 2023-08-30T17:44:46
| 232,347,878
| 448
| 86
|
NOASSERTION
| 2023-09-11T15:22:22
| 2020-01-07T14:56:32
|
C++
|
UTF-8
|
R
| false
| false
| 1,480
|
r
|
test-jit-ops.R
|
test_that("can access operators via ops object", {
# matmul, default use
res <- jit_ops$aten$matmul(torch::torch_ones(5, 4), torch::torch_rand(4, 5))
expect_equal(dim(res), c(5, 5))
# matmul, passing out tensor
t1 <- torch::torch_ones(4, 4)
t2 <- torch::torch_eye(4)
out <- torch::torch_zeros(4, 4)
jit_ops$aten$matmul(t1, t2, out)
expect_equal_to_tensor(t1, out)
# split, returning two tensors in a list of length 2
res_torch <- torch_split(torch::torch_arange(0, 3), 2, 1)
res_jit <- jit_ops$aten$split(torch::torch_arange(0, 3), torch::jit_scalar(2L), torch::jit_scalar(0L))
expect_length(res_jit, 2)
expect_equal_to_tensor(res_jit[[1]], res_torch[[1]])
expect_equal_to_tensor(res_jit[[2]], res_torch[[2]])
# split, returning a single tensor
res_torch <- torch_split(torch::torch_arange(0, 3), 4, 1)
res_jit <- jit_ops$aten$split(torch::torch_arange(0, 3), torch::jit_scalar(4L), torch::jit_scalar(0L))
expect_length(res_jit, 1)
expect_equal_to_tensor(res_jit[[1]], res_torch[[1]])
# linalg_qr always returns a list
m <- torch_eye(5)/5
res_torch <- linalg_qr(m)
res_jit <- jit_ops$aten$linalg_qr(m, torch::jit_scalar("reduced"))
expect_equal_to_tensor(res_torch[[2]], res_jit[[2]])
})
test_that("can print ops objects at different levels", {
local_edition(3)
expect_snapshot(jit_ops)
expect_snapshot(jit_ops$sparse)
expect_snapshot(jit_ops$prim$ChunkSizes)
expect_snapshot(jit_ops$aten$fft_fft)
})
|
2303897d89b41f1409a1fc7af923f5041d079949
|
999f6296b3102c5374af78e8f19f783db7ae0f22
|
/R/testing_fun.R
|
911fa7dcb205da9c24b80534abd5138fb51e5454
|
[] |
no_license
|
felix28dls/ddCt_QPCR_Analysis
|
b052a4beb308e7650cda03f864829e6266c1ca20
|
0539f31edb2b3b506de0a9c2db90a01190fb0e3c
|
refs/heads/master
| 2020-06-16T20:55:48.974964
| 2019-07-07T22:28:42
| 2019-07-07T22:28:42
| 195,700,929
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,266
|
r
|
testing_fun.R
|
#' Statistical testing of PCR data
#'
#' A unified interface to different statistical significance tests for qPCR data
#'
#' @inheritParams pcr_ddct
#' @param test A character string; 't.test' default, 'wilcox.test' or 'lm'
#' @param ... Other arguments for the testing methods
#'
#' @return A data.frame of 5 columns in addition to term when test == 'lm'
#' \itemize{
#' \item term The linear regression comparison terms
#' \item gene The column names of df. reference_gene is dropped
#' \item estimate The estimate for each term
#' \item p_value The p-value for each term
#' \item lower The low 95\% confidence interval
#' \item upper The high 95\% confidence interval
#' }
#' For details about the test methods themselves and different parameters,
#' consult \code{\link[stats]{t.test}}, \code{\link[stats]{wilcox.test}}
#' and \code{\link[stats]{lm}}
#'
#' @details The simple t-test can be used to test the significance of the
#' difference between two conditions \eqn{\Delta C_T}. t-test assumes in addition,
#' that the input \eqn{C_T} values are normally distributed and the variance
#' between conditions are comparable.
#' Wilcoxon test can be used when sample size is small and those two last
#' assumptions are hard to achieve.
#'
#' Two use the linear regression here. A null hypothesis is formulated as following,
#' \deqn{
#' C_{T, target, treatment} - C_{T, control, treatment} =
#' C_{T, target, control} - C_{T, control, control}
#' \quad \textrm{or} \quad \Delta\Delta C_T
#' }
#' This is exactly the \eqn{\Delta\Delta C_T} as explained earlier. So the
#' \eqn{\Delta\Delta C_T} is estimated and the null is rejected when
#' \eqn{\Delta\Delta C_T \ne 0}.
#'
#' @references Yuan, Joshua S, Ann Reed, Feng Chen, and Neal Stewart. 2006.
#' โStatistical Analysis of Real-Time PCR Data.โ BMC Bioinformatics 7 (85).
#' BioMed Central. doi:10.1186/1471-2105-7-85.
#'
#' @examples
#' # locate and read data
#' fl <- system.file('extdata', 'ct4.csv', package = 'pcr')
#' ct4 <- readr::read_csv(fl)
#'
#' # make group variable
#' group <- rep(c('control', 'treatment'), each = 12)
#'
#' # test using t-test
#' pcr_test(ct4,
#' group_var = group,
#' reference_gene = 'ref',
#' reference_group = 'control',
#' test = 't.test')
#'
#' # test using wilcox.test
#' pcr_test(ct4,
#' group_var = group,
#' reference_gene = 'ref',
#' reference_group = 'control',
#' test = 'wilcox.test')
#'
#' # testing using lm
#' pcr_test(ct4,
#' group_var = group,
#' reference_gene = 'ref',
#' reference_group = 'control',
#' test = 'lm')
#'
#' # testing advanced designs using a model matrix
#' # make a model matrix
#' group <- relevel(factor(group), ref = 'control')
#' dose <- rep(c(100, 80, 60, 40), each = 3, times = 2)
#' mm <- model.matrix(~group:dose, data = data.frame(group, dose))
#'
#' # test using lm
#' pcr_test(ct4,
#' reference_gene = 'ref',
#' model_matrix = mm,
#' test = 'lm')
#'
#' # using linear models to check the effect of RNA quality
#' # make a model matrix
#' group <- relevel(factor(group), ref = 'control')
#' set.seed(1234)
#' quality <- scale(rnorm(n = 24, mean = 1.9, sd = .1))
#' mm <- model.matrix(~group + group:quality, data = data.frame(group, quality))
#'
#' # testing using lm
#' pcr_test(ct4,
#' reference_gene = 'ref',
#' model_matrix = mm,
#' test = 'lm')
#'
#' # using linear model to check the effects of mixing separate runs
#' # make a model matrix
#' group <- relevel(factor(group), ref = 'control')
#' run <- factor(rep(c(1:3), 8))
#' mm <- model.matrix(~group + group:run, data = data.frame(group, run))
#'
#' # test using lm
#' pcr_test(ct4,
#' reference_gene = 'ref',
#' model_matrix = mm,
#' test = 'lm')
#'
#' @export
pcr_test <- function(df, test = 't.test', ...) {
switch (test,
't.test' = pcr_ttest(df, ...),
'wilcox.test' = pcr_wilcox(df, ...),
'lm' = pcr_lm(df, ...)
)
}
#' t-test qPCR data
#'
#' @inheritParams pcr_ddct
#' @param tidy A \code{logical} whether to return a \code{list} of \code{htest}
#' or a tidy \code{data.frame}. Default TRUE.
#' @param ... Other arguments to \code{\link[stats]{t.test}}
#'
#' @return A data.frame of 5 columns
#' \itemize{
#' \item gene The column names of df. reference_gene is dropped
#' \item estimate The estimate for each term
#' \item p_value The p-value for each term
#' \item lower The low 95\% confidence interval
#' \item upper The high 95\% confidence interval
#' }
#' When \code{tidy} is FALSE, returns a \code{list} of \code{htest} objects.
#'
#' @examples
#' # locate and read data
#' fl <- system.file('extdata', 'ct4.csv', package = 'pcr')
#' ct4 <- readr::read_csv(fl)
#'
#' # make group variable
#' group <- rep(c('control', 'treatment'), each = 12)
#'
#' # test
#' pcr_ttest(ct4,
#' group_var = group,
#' reference_gene = 'ref',
#' reference_group = 'control')
#'
#' # test using t.test method
#' pcr_test(ct4,
#' group_var = group,
#' reference_gene = 'ref',
#' reference_group = 'control',
#' test = 't.test')
#'
#' @importFrom purrr map
#' @importFrom stats t.test relevel
#' @importFrom dplyr data_frame bind_rows
#'
#' @export
pcr_ttest <- function(df, group_var, reference_gene, reference_group,
tidy = TRUE, ...) {
# calculate the delta_ct values
norm <- .pcr_normalize(df, reference_gene = reference_gene)
# adjust the reference group
group_levels <- unique(group_var)
if(length(group_levels) != 2) {
stop('t.test is only applied to two group comparisons.')
}
ref <- group_levels[group_levels != reference_group]
group_var <- relevel(factor(group_var), ref = ref)
# perform test
tst <- map(norm, function(x) {
t.test(x ~ group_var, ...)
})
# make a tidy data.frame or return htest object
if(tidy) {
res <- bind_rows(map(tst, function(x) {
data_frame(
estimate = unname(x$estimate[1] - x$estimate[2]),
p_value = x$p.value,
lower = x$conf.int[1],
upper = x$conf.int[2]
)
}),
.id = 'gene')
} else {
res <- tst
}
# return
return(res)
}
#' Wilcoxon test qPCR data
#'
#' @inheritParams pcr_ddct
#' @param tidy A \code{logical} whether to return a \code{list} of \code{htest}
#' or a tidy \code{data.frame}. Default TRUE.
#' @param ... Other arguments to \code{\link[stats]{wilcox.test}}
#'
#' @return A data.frame of 5 columns
#' \itemize{
#' \item gene The column names of df. reference_gene is dropped
#' \item estimate The estimate for each term
#' \item p_value The p-value for each term
#' \item lower The low 95\% confidence interval
#' \item upper The high 95\% confidence interval
#' }
#'
#' When \code{tidy} is FALSE, returns a \code{list} of \code{htest} objects.
#'
#' @examples
#' # locate and read data
#' fl <- system.file('extdata', 'ct4.csv', package = 'pcr')
#' ct4 <- readr::read_csv(fl)
#'
#' # make group variable
#' group <- rep(c('control', 'treatment'), each = 12)
#'
#' # test
#' pcr_wilcox(ct4,
#' group_var = group,
#' reference_gene = 'ref',
#' reference_group = 'control')
#'
#' # test using wilcox.test method
#' pcr_test(ct4,
#' group_var = group,
#' reference_gene = 'ref',
#' reference_group = 'control',
#' test = 'wilcox.test')
#'
#' @importFrom purrr map
#' @importFrom stats wilcox.test relevel
#' @importFrom dplyr data_frame bind_rows
#'
#' @export
pcr_wilcox <- function(df, group_var, reference_gene, reference_group,
tidy = TRUE, ...) {
# calculate the delta_ct values
norm <- .pcr_normalize(df, reference_gene = reference_gene)
# adjust the reference group
group_levels <- unique(group_var)
if(length(group_levels) != 2) {
stop('wilcox.test is only applied to two group comparisons.')
}
ref <- group_levels[group_levels != reference_group]
group_var <- relevel(factor(group_var), ref = ref)
# perform test
tst <- map(norm, function(x) {
wilcox.test(x ~ group_var, conf.int = TRUE, ...)
})
# make a tidy data.frame or return htest object
if(tidy) {
res <- bind_rows(map(tst, function(x) {
data_frame(
estimate = unname(x$estimate),
p_value = x$p.value,
lower = x$conf.int[1],
upper = x$conf.int[2]
)
}),
.id = 'gene')
} else {
res <- tst
}
# return
return(res)
}
#' Linear regression qPCR data
#'
#' @inheritParams pcr_ddct
#' @param model_matrix A model matrix for advanced experimental design. for
#' constructing such a matrix with different variables check
#' \code{\link[stats]{model.matrix}}
#' @param mode A character string for the normalization mode. Possible values
#' are "subtract" (default) or "divide".
#' @param tidy A \code{logical} whether to return a \code{list} of
#' \code{\link[stats]{lm}} or a tidy \code{data.frame}. Default TRUE.
#' @param ... Other arguments to \code{\link[stats]{lm}}
#'
#' @return A data.frame of 6 columns
#' \itemize{
#' \item term The term being tested
#' \item gene The column names of df. reference_gene is dropped
#' \item estimate The estimate for each term
#' \item p_value The p-value for each term
#' \item lower The low 95\% confidence interval
#' \item upper The high 95\% confidence interval
#' }
#' When \code{tidy} is FALSE, returns a \code{list} of \code{\link[stats]{lm}}
#' objects.
#'
#' @examples
#' # locate and read data
#' fl <- system.file('extdata', 'ct4.csv', package = 'pcr')
#' ct4 <- readr::read_csv(fl)
#'
#' # make group variable
#' group <- rep(c('control', 'treatment'), each = 12)
#'
#' # test
#' pcr_lm(ct4,
#' group_var = group,
#' reference_gene = 'ref',
#' reference_group = 'control')
#'
#' # testing using lm method
#' pcr_test(ct4,
#' group_var = group,
#' reference_gene = 'ref',
#' reference_group = 'control',
#' test = 'lm')
#'
#' @importFrom purrr map
#' @importFrom stats lm confint relevel
#' @importFrom dplyr data_frame bind_rows
#'
#' @export
pcr_lm <- function(df, group_var, reference_gene, reference_group,
model_matrix = NULL, mode = 'subtract', tidy = TRUE,
...) {
# calculate the delta_ct values
norm <- .pcr_normalize(df, reference_gene = reference_gene, mode = mode)
# adjust group_var for formuls
if(is.null(model_matrix)) {
group_var <- relevel(factor(group_var), ref = reference_group)
}
# apply linear models
tst <- map(norm, function(x) {
if(is.null(model_matrix)) {
lm(x ~ group_var, ...)
} else {
lm(x ~ model_matrix + 0, ...)
}
})
# make a tidy data.frame or return wilcox object
if(tidy) {
res <- bind_rows(map(tst, function(x) {
mod <- x
conf_int <- confint(mod)
data_frame(
term = names(mod$coefficients)[-1],
estimate = unname(mod$coefficients)[-1],
p_value = summary(mod)$coefficients[-1, 4],
lower = conf_int[-1, 1],
upper = conf_int[-1, 2]
)
}),
.id = 'gene')
} else {
res <- tst
}
# return
return(res)
}
|
f3480014bf6aaf416aacf29583e28961c45e4880
|
5e42a668e417fd55fe28ecee719c759016f963b9
|
/tests/testthat/test-expect_s3_class_linter.R
|
d071a8f7748c2707bb09366cc370f4a402eda934
|
[
"MIT"
] |
permissive
|
cordis-dev/lintr
|
2120e22820e8499ca3066fa911572fd89c49d300
|
cb694d5e4da927f56c88fa5d8972594a907be59a
|
refs/heads/main
| 2023-08-05T08:50:42.679421
| 2023-07-25T13:21:29
| 2023-07-25T13:21:29
| 225,583,354
| 0
| 0
|
NOASSERTION
| 2019-12-03T09:41:30
| 2019-12-03T09:41:30
| null |
UTF-8
|
R
| false
| false
| 3,306
|
r
|
test-expect_s3_class_linter.R
|
test_that("expect_s3_class_linter skips allowed usages", {
linter <- expect_s3_class_linter()
# expect_s3_class doesn't have an inverted version
expect_lint("expect_true(!inherits(x, 'class'))", NULL, linter)
# NB: also applies to tinytest, but it's sufficient to test testthat
expect_lint("testthat::expect_true(!inherits(x, 'class'))", NULL, linter)
# other is.<x> calls are not suitable for expect_s3_class in particular
expect_lint("expect_true(is.na(x))", NULL, linter)
# case where expect_s3_class() *could* be used but we don't enforce
expect_lint("expect_true(is.data.table(x))", NULL, linter)
# expect_s3_class() doesn't have info= or label= arguments
expect_lint("expect_equal(class(x), k, info = 'x should have class k')", NULL, linter)
expect_lint("expect_equal(class(x), k, label = 'x class')", NULL, linter)
expect_lint("expect_equal(class(x), k, expected.label = 'target class')", NULL, linter)
expect_lint("expect_true(is.data.frame(x), info = 'x should be a data.frame')", NULL, linter)
})
test_that("expect_s3_class_linter blocks simple disallowed usages", {
expect_lint(
"expect_equal(class(x), 'data.frame')",
rex::rex("expect_s3_class(x, k) is better than expect_equal(class(x), k)"),
expect_s3_class_linter()
)
# works when testing against a sequence of classes too
expect_lint(
"expect_equal(class(x), c('data.table', 'data.frame'))",
rex::rex("expect_s3_class(x, k) is better than expect_equal(class(x), k)"),
expect_s3_class_linter()
)
# expect_identical is treated the same as expect_equal
expect_lint(
"testthat::expect_identical(class(x), 'lm')",
rex::rex("expect_s3_class(x, k) is better than expect_identical(class(x), k)"),
expect_s3_class_linter()
)
# yoda test with string literal in first arg also caught
expect_lint(
"expect_equal('data.frame', class(x))",
rex::rex("expect_s3_class(x, k) is better than expect_equal(class(x), k)"),
expect_s3_class_linter()
)
# different equivalent usages
expect_lint(
"expect_true(is.table(foo(x)))",
rex::rex("expect_s3_class(x, k) is better than expect_true(is.<k>(x))"),
expect_s3_class_linter()
)
expect_lint(
"expect_true(inherits(x, 'table'))",
rex::rex("expect_s3_class(x, k) is better than expect_true(is.<k>(x))"),
expect_s3_class_linter()
)
# TODO(michaelchirico): consider more carefully which sorts of class(x) %in% . and
# . %in% class(x) calls should be linted
#> expect_lint(
#> "expect_true('lm' %in% class(x))",
#> "expect_s3_class\\(x, k\\) is better than expect_equal\\(class\\(x\\), k",
#> expect_s3_class_linter
#> )
})
local({
# test for lint errors appropriately raised for all is.<class> calls
is_classes <- c(
"data.frame", "factor", "numeric_version",
"ordered", "package_version", "qr", "table",
"relistable", "raster", "tclObj", "tkwin", "grob", "unit",
"mts", "stepfun", "ts", "tskernel"
)
patrick::with_parameters_test_that(
"expect_true(is.<base class>) is caught",
expect_lint(
sprintf("expect_true(is.%s(x))", is_class),
rex::rex("expect_s3_class(x, k) is better than expect_true(is.<k>(x))"),
expect_s3_class_linter()
),
.test_name = is_classes,
is_class = is_classes
)
})
|
841ff67402ac255d1f04b5e773f66aa5a849cea9
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/8734_0/rinput.R
|
ac611a14df07c87fa836e4a652250d6ba090eb24
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("8734_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8734_0_unrooted.txt")
|
7b3431e0aea653b152481ca397c752f0e6f90796
|
92220d3bc952901e2423745771de1725c34e5c86
|
/dataproc/workerMetrics.R
|
054fa771da9f986dfbf1cf1e9a196e5186bcd943
|
[] |
no_license
|
laroyo/watsonc
|
9489605d94c1336a82350a2dc4494e54374dc624
|
a55b62945ed75b85e81ec5be453ccb8a3edd6a25
|
refs/heads/master
| 2016-09-05T23:13:48.119320
| 2015-06-17T10:21:34
| 2015-06-17T10:21:34
| 7,225,727
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,674
|
r
|
workerMetrics.R
|
#!/usr/bin/Rscript
## Read file 90-sents-all-batches-GS-sentsv3.csv and applies the filters.
## The filter output is the same as 90-sents-all-batches-CS-sentsv3.csv (Dropbox/data/CF-Results-processed/)
source('/var/www/html/wcs/dataproc/envars.R')
library(XLConnect)
source(paste(libpath,'/db.R',sep=''),chdir=TRUE)
source(paste(libpath,'/measures.R',sep=''),chdir=TRUE)
source(paste(libpath,'/filters.R',sep=''),chdir=TRUE)
source(paste(libpath,'/simplify.R',sep=''),chdir=TRUE)
source(paste(libpath,'/fileStorage.R',sep=''),chdir=TRUE)
#For calculating the cosine.
library(lsa)
args <- commandArgs(trailingOnly = TRUE)
if(length(args) > 0){
job.id <- args[1]
} else {
if(!exists('job.id')){
stop('Error: you should provide a Job id (parameter)')
}
}
#FIXME: this be obtained when storing the file on the file storage.
file_id <- -1
raw.data <- getJob(job.id)
if(job.id == 196344){
raw.data <- raw.data[raw.data$relation != '',]
}
worker.ids <- sort(unique(raw.data$worker_id))
without.singletons <- TRUE
if(without.singletons) {
numSent <- numSentences(raw.data)
singletons <- belowFactor(numSent,'numSent',3)
worker.ids <- setdiff(worker.ids,singletons)
raw.data <- raw.data[!(raw.data$worker_id %in% singletons),]
}
if(dim(raw.data)[1] == 0){
cat('JOB_NOT_FOUND')
} else {
sentenceTable <- pivot(raw.data,'unit_id','relation')
sentenceDf <- getDf(sentenceTable)
#Calculate the measures to apply the filters.
filters <- list('SQRT','NormSQRT','NormR', 'NormRAll')
#Calculate the measures to apply the filters.filters <- list('SQRT','NormSQRT')
mdf <- calc_measures(sentenceDf,filters)
discarded <- list()
filtered <- list()
for (f in filters){
#Apply the filters: each one returns the discarded rows (those below the threshold)
discarded[[f]] <- belowDiff(mdf,f)
#The filtered *in*
filtered[[f]] <- setdiff(rownames(sentenceDf),discarded[[f]])
saveFilteredSentences(job.id, file_id, f, discarded[[f]])
}
#After applying the filters, add the "NULL" filter.
filters <- append('NULL', filters)
filtered[['NULL']] <- rownames(sentenceDf)
discarded[['NULL']] <- NULL
worker.ids <- sort(unique(raw.data$worker_id))
out <- NULL
spamCandidates <- list()
for (f in filters){
print(paste('computing metrics for filter ',f))
filt <- raw.data[raw.data$unit_id %in% filtered[[f]],]
filtWorkers <- sort(unique(filt$worker_id))
numSent <- numSentences(filt)
numAnnot <- numAnnotations(filt)
annotSentence <- numAnnot / numSent
colnames(annotSentence) <- 'annotSentence'
#sentMat <- list()
agrValues <- agreement(filt)
cosValues <- cosMeasure(filt)
#sentRelScoreValues <- sentRelScoreMeasure(filt)
saveWorkerMetrics(cbind(agrValues, cosValues,annotSentence,numSent), job.id, f,without.singletons)
#df <- data.frame(row.names=filtWorkers,numSents=numSent, cos=cosValues, agr=agrValues, annotSentence=(numAnnot/numSent))
df <- cbind(numSent,cosValues, agrValues,annotSentence)
#Add empty values for filtered out workers
missingworkers <- setdiff(worker.ids,filtWorkers)
emptyCol <- rep(0,length(missingworkers))
filtrows <- data.frame(row.names=missingworkers,numSent=emptyCol,cos=emptyCol,agr=emptyCol,annotSentence=emptyCol)
df <- rbind(df, filtrows)
df <- df[order(as.numeric(row.names(df))),]
#Empty dataframe
spamFilters <- data.frame(row.names=worker.ids,cos=rep(0,length(worker.ids)),annotSentence=rep(0,length(worker.ids)),agr=rep(0,length(worker.ids)))
candidateRows <- belowDiff(df,'cos')
if(length(candidateRows) > 0 & dim(spamFilters[rownames(spamFilters) %in% candidateRows,])[1]>0){
spamFilters[rownames(spamFilters) %in% candidateRows,]$cos = 1
}
spammers <- c(14067668,9705524,12974606,14119448,9844590,8071333,13997142,8885952,7478095,9767020,13617382,5254360,8947442)
candidateRows <- overDiff(df,'annotSentence')
if(length(candidateRows) > 0 & dim(spamFilters[rownames(spamFilters) %in% candidateRows,])[1]>0){
#if(length(candidateRows) > 0){
spamFilters[rownames(spamFilters) %in% candidateRows,]$annotSentence = 1
}
candidateRows <- belowDiff(df,'agr')
if(length(candidateRows) > 0 & dim(spamFilters[rownames(spamFilters) %in% candidateRows,])[1]>0){
#if(length(candidateRows) > 0){
spamFilters[rownames(spamFilters) %in% candidateRows,]$agr = 1
}
spamCandidates[[f]] <- spamFilters
if(is.null(out)){
out <- df
} else {
out <- cbind(out, df)
}
}
spamFilterOutput <- data.frame(row.names=worker.ids,
filter1=rowSums(spamCandidates[['NULL']]),
filter2=rowSums(spamCandidates[['SQRT']]),
filter3=rowSums(spamCandidates[['NormSQRT']]),
filter4=rowSums(spamCandidates[['NormR']]),
filter5=rowSums(spamCandidates[['NormRAll']])
)
#Combine spamFilterOutput.
sf <- as.data.frame(rowSums(spamFilterOutput > 1) > 1)
colnames(sf) = 'label'
spamLabels <- rownames(sf[sf$label==TRUE,,drop=FALSE])
fname <- getFileName(job.id,fileTypes[['workerMetrics']])
path <- getFilePath(job.id, folderTypes[['analysisFiles']], FALSE)
wb.new <- loadWorkbook(paste(path,fname,sep='/'), create = TRUE)
sentRelDf <- sentRelScoreMeasure(raw.data)
sClarity <- sentenceClarity(sentRelDf)
rClarity <- relationClarity(sentRelDf)
workerSentCos <- workerSentenceCosTable(raw.data)
workerSentScore <- workerSentenceScoreTable(raw.data, workerSentCos, sClarity)
#workerRelScore <- workerRelationScore(raw.data, rClarity, workerSentCos)
## createSheet(wb.new, name = "pivot-worker")
## writeOutputHeaders(wb.new,"pivot-worker")
## writeWorksheet(wb.new,data=cbind(out,spamFilterOutput[rownames(out),],spam=sf[rownames(out),]),sheet=1,startRow=2,startCol=1,header=TRUE,rownames='Worker ID')
query <- sprintf("select worker_id, relation,explanation,selected_words,sentence from cflower_results where job_id = %s", job.id)
res <- dbGetQuery(con,query)
res$selected_words <- apply(res[,'selected_words',drop=FALSE],1,FUN=correctMisspells)
res$explanation <- apply(res[,'explanation',drop=FALSE],1,FUN=correctMisspells)
oth.non <- res[intersect(grep('OTHER|NONE',res$relation),grep('\n',res$relation)),]
filtWorkers <- list()
if(dim(oth.non)[1] > 0){
filtWorkers[['none_other']] <- noneOther(oth.non)
}
filtWorkers[['rep_response']] <- repeatedResponse(res)
filValWords <- validWords(res)
filtWorkers[['valid_words']] <- sort(unique(filValWords$worker_id))
filtWorkers[['rep_text']] <- repeatedText(job.id,'both')
beh.filters <- c('none_other', 'rep_response','valid_words', 'rep_text')
bspammers <- c()
for (f in beh.filters){
for (f2 in beh.filters){
if(f != f2)
bspammers <- union(bspammers,intersect(filtWorkers[[f]],filtWorkers[[f2]]))
}
}
saveFilteredWorkers(job.id,unique(bspammers),'beh_filters')
## for (filter in names(filtWorkers)){
## saveFilteredWorkers(job.id, filtWorkers[[filter]], filter)
## }
saveFilteredWorkers(job.id, spamLabels, 'disag_filters')
numFilteredSentences <- length(unlist(discarded))
numWorkers <- length(unique(raw.data$worker_id))
numFilteredWorkers <- length(union(spamLabels, unique(unlist(filtWorkers))))
query <- sprintf("update history_table set no_workers = %s, no_filtered_workers = %s where job_id = %s", numWorkers, numFilteredWorkers, job.id)
rs <- dbSendQuery(con, query)
createSheet(wb.new, name = "singleton-workers-removed")
writeOutputHeaders(wb.new,"singleton-workers-removed")
writeWorksheet(wb.new,data=out[rownames(out) %in% setdiff(rownames(out),singletons),],sheet="singleton-workers-removed",startRow=2,startCol=1,header=TRUE,rownames='Worker ID')
## createSheet(wb.new, name="workerRelationScore")
## wrs <- workerRelScore
## wrs[is.na(workerRelScore)] <- 0
## wrs$worker_id <- rownames(wrs)
## writeWorksheet(wb.new,data=wrs[,all],sheet="workerRelationScore",startRow=1,startCol=1,header=TRUE,rownames='Worker ID')
createSheet(wb.new, name = "filtered-out-sentences")
writeFilteredOutHeaders(wb.new,"filtered-out-sentences")
currentCol <- 1
for (f in filters){
if(f != 'NULL'){
writeWorksheet(wb.new,data=discarded[[f]],sheet='filtered-out-sentences',startRow=2,startCol=currentCol,header=FALSE)
currentCol <- currentCol + 2
#write.csv(discarded[[f]], paste(outputdirectory,paste(job_id,'filtered-out-sentences',f,'.csv',sep="_"),sep=""),row.names=FALSE)
}
}
createSheet(wb.new, name = "spammer-labels")
writeWorksheet(wb.new,data=spamLabels,sheet='spammer-labels',startRow=1,startCol=1,header=FALSE)
createSheet(wb.new, name = "beh-filters")
writeBehFiltersHeaders(wb.new, 'beh-filters')
writeWorksheet(wb.new,filtWorkers[['none_other']],sheet='beh-filters',startRow=2,startCol=1,header=FALSE)
writeWorksheet(wb.new,filtWorkers[['rep_response']],sheet='beh-filters',startRow=2,startCol=3,header=FALSE)
writeWorksheet(wb.new,filtWorkers[['valid_words']],sheet='beh-filters',startRow=2,startCol=5,header=FALSE)
writeWorksheet(wb.new,filtWorkers[['rep_text']],sheet='beh-filters',startRow=2,startCol=7,header=FALSE)
saveWorkbook(wb.new)
#FIXME: get the adecuate value for the creator
creator = 'script'
saveFileMetadata(fname,path,mimeTypes[['excel']],-1,creator)
dbDisconnect(con)
cat('OK')
}
|
71b6b358012ba21845bc2ea6c118d40c863fede7
|
c5904577c015ffd7254fef31eae73484aa0fc6a7
|
/fitAndCompareModels.R
|
92efad90591a072c5aa8dcb8729cc1260f681ad6
|
[] |
no_license
|
RetoSchmucki/SURPASS_WP1
|
a11e56a6eaa79d8b5e78f9bf9db42715e6514683
|
e103caea6b83dbb009511954acd70b96db19653c
|
refs/heads/main
| 2023-06-11T03:03:08.761942
| 2021-07-01T10:55:35
| 2021-07-01T10:55:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,490
|
r
|
fitAndCompareModels.R
|
#### fit models to estimate temporal trends in species' distributions ####
library(occAssess)
library(raster)
library(reshape2)
library(plyr)
library(dplyr)
library(ggplot2)
library(gridExtra)
# Load sparta
library(sparta)
## setup model grid
shp <- raster::shapefile("C:/Users/Rob.Lenovo-PC/Documents/surpass/Data/South America country boundaries/South America country boundaries/data/commondata/data0/southamerica_adm0.shp")
shp <- shp[shp$COUNTRY == "CHILE", ]
shp <- spTransform(shp, crs("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"))
grid <- raster("C:/Users/Rob.Lenovo-PC/Documents/surpass/Data/maskLayers/mask_CHL.asc")
grid <- crop(grid, shp)
grid <- aggregate(grid, fact = 6)
## load species data
dat <- read.csv("C:/Users/Rob.Lenovo-PC/Documents/surpass/Data/GBIF/07.04.21/preAndPostDigBeesChile.csv")
dat <- dat[-which(is.na(dat$species)), ]
pre <- dat[dat$identifier == "pre-digitization", ]
post <- dat[dat$identifier == "post-digitization", ]
## format species data for use with sparta models
formatDat <- function(data, x) {
cell <- extract(grid, data.frame(x = data$x[x],
y = data$y[x]),
cellnumbers = TRUE)
cell <- cell[1]
data.frame(species = data$species[x],
cell = cell,
year = data$year[x])
}
fDatPre <- lapply(1:nrow(pre),
formatDat,
data = pre)
fDatPre <- do.call("rbind", fDatPre)
fDatPost <- lapply(1:nrow(post),
formatDat,
data = post)
fDatPost <- do.call("rbind", fDatPost)
## drop data from Chilean island outside of domain
fDatPost <- fDatPost[-which(is.na(fDatPost$cell)), ]
periods <- list(1950:1959, 1960:1969, 1970:1979, 1980:1989, 1990:1999,2000:2010, 2011:2019)
fDatPre$Period <- NA
fDatPost$Period <- NA
for (i in 1: length(periods)) {
fDatPre$Period <- ifelse(fDatPre$year %in% periods[[i]], i, fDatPre$Period)
fDatPost$Period <- ifelse(fDatPost$year %in% periods[[i]], i, fDatPost$Period)
}
#fDatPre <- fDatPre[-which(is.na(fDatPre$cell)), ]
#fDatPost <- fDatPost[-which(is.na(fDatPost$cell)), ]
## fit models
# first the reporting rate model with list length as a covariate and a random site intercept
rrPre <- reportingRateModel(taxa = fDatPre$species,
site = fDatPre$cell,
time_period = as.numeric(fDatPre$Period),
list_length = TRUE,
site_effect = TRUE)
rrPost <- reportingRateModel(taxa = fDatPost$species,
site = fDatPost$cell,
time_period = as.numeric(fDatPost$Period),
list_length = TRUE,
site_effect = TRUE)
## check for models that didn't converge
length(rrPre[!is.na(rrPre$error_message), ])
length(rrPost[!is.na(rrPost$error_message), ])
## then a simpler model without the random site intercept
rr2Pre <- reportingRateModel(taxa = fDatPre$species,
site = fDatPre$cell,
time_period = as.numeric(fDatPre$Period),
list_length = TRUE,
site_effect = FALSE)
rr2Post <- reportingRateModel(taxa = fDatPost$species,
site = fDatPost$cell,
time_period = as.numeric(fDatPost$Period),
list_length = TRUE,
site_effect = FALSE)
## compare models
rrMods <- merge(rrPre, rrPost, by = "species_name")
rr2Mods <- merge(rr2Pre, rr2Post, by = "species_name")
## check species converged in BOTH models
nrow(rrMods[is.na(rrMods$error_message.x) & is.na(rrMods$error_message.y), ])
nrow(rr2Mods)
## plot predictions from post-digitization data against those from pre digitization data
plot(rrMods$year.estimate.y ~ rrMods$year.estimate.x)
plot(rr2Mods$year.estimate.y ~ rr2Mods$year.estimate.x)
cor.test(rrMods$year.estimate.y, rrMods$year.estimate.x)
cor.test(rr2Mods$year.estimate.y, rr2Mods$year.estimate.x)
## now format the data for the Telfer model
fDatTelfPre <- fDatPre[fDatPre$Period %in% c(1,2,3, 5, 6, 7), ] # p1 = decades 1, 2 and 3; and p2 = decades 5, 6 and 7
fDatTelfPre$Period <- ifelse(fDatTelfPre$Period %in% c(1,2, 3), 1, 2)
fDatTelfPost <- fDatPost[fDatPost$Period %in% c(1,2,3,5,6,7), ]
fDatTelfPost$Period <- ifelse(fDatTelfPost$Period %in% c(1,2,3), 1, 2)
## fit Telfer model
telferPre <- sparta::telfer(taxa = fDatTelfPre$species,
site = fDatTelfPre$cell,
time_period = as.numeric(fDatTelfPre$Period),
minSite = 2)
telferPost <- sparta::telfer(taxa = fDatTelfPost$species,
site = fDatTelfPost$cell,
time_period = as.numeric(fDatTelfPost$Period),
minSite = 2)
colnames(telferPre)[1] <- "species_name"
colnames(telferPost)[1] <- "species_name"
## compare Telfer models
telferMods <- merge(telferPre, telferPost, by = "species_name")
## establish number of species which could be fitted using both pre and post-digitization data
nrow(telferMods[!is.na(telferMods$Telfer_1_2.x) & !is.na(telferMods$Telfer_1_2.y), ])
## plot model predictions from post-digitization data on predictions from pre-digitization data
pTelfer <- ggplot(data = telferMods, aes(x = Telfer_1_2.x, y = Telfer_1_2.y)) +
geom_point() +
theme_linedraw() +
xlab("pre digitization index") +
ylab("post digitization index") +
geom_abline(slope = 1,
intercept = 0) +
ggtitle("A) Telfer")
pRR <- ggplot(data = rrMods, aes(x = year.estimate.x, y = year.estimate.y)) +
geom_point() +
theme_linedraw() +
xlab("pre digitization index") +
ylab("post digitization index") +
geom_abline(slope = 1,
intercept = 0) +
ggtitle("C) RR + site")
pRR2 <- ggplot(data = rr2Mods, aes(x = year.estimate.x, y = year.estimate.y)) +
geom_point() +
theme_linedraw() +
xlab("pre digitization index") +
ylab("post digitization index") +
geom_abline(slope = 1,
intercept = 0) +
ggtitle("B) RR")
png("preVsPostMods.png", width = 3, height = 9, units = "in", res = 500)
grid.arrange(pTelfer,
pRR2,
pRR,
ncol = 1)
dev.off()
cor.test(telferMods$Telfer_1_2.x, telferMods$Telfer_1_2.y)
plot(telferMods$Telfer_1_2.y ~ rrMods$year.estimate.y)
cor.test(mods$year.estimate, mods$Telfer_1_2, method = "spearman")
plot(log(mods$year.estimate) ~ log(mods$Telfer_1_2))
meanRR <- median(mods$year.estimate, na.rm = T)
mods$agree <- ifelse(mods$year.estimate > meanRR & mods$Telfer_1_2 > 0 | mods$year.estimate < meanRR & mods$Telfer_1_2 < 0, "agree", "disagree")
head(mods)
png("cor.png", width = 5, height = 5, units = "in", res = 500)
ggplot(data = mods, aes(x = Telfer_1_2, y = year.estimate, group = agree, colour = agree)) +
geom_point() +
theme_linedraw() +
ylim(c(-3, 3)) +
xlab("Telfer index") +
ylab("RR period effect") + ggtitle("Spearman's rho = 0.59") +
theme(legend.position = "none")
dev.off()
|
9d1c235208299a5fa3fd5306ae06bf90553b82c7
|
6ceab1bf9c435b523d2f8e7e9440da39770d741b
|
/R/f7List.R
|
4cc4cf33559ffc26ec8bba3b9af7a2d393c230b4
|
[] |
no_license
|
RinteRface/shinyMobile
|
a8109cd39c85e171db893d1b3f72d5f1a04f2c62
|
86d36f43acf701b6aac42d716adc1fae4f8370c6
|
refs/heads/master
| 2023-07-25T16:28:41.026349
| 2022-11-25T17:04:29
| 2022-11-25T17:04:29
| 139,186,586
| 328
| 92
| null | 2023-03-26T05:58:53
| 2018-06-29T19:13:06
|
R
|
UTF-8
|
R
| false
| false
| 18,734
|
r
|
f7List.R
|
#' Create a framework 7 contact list
#'
#' @param ... Slot for \link{f7ListGroup} or \link{f7ListItem}.
#' @param mode List mode. NULL or "media" or "contacts".
#' @param inset Whether to display a card border. FALSE by default.
#' @export
#'
#' @examples
#' if (interactive()) {
#' library(shiny)
#' library(shinyMobile)
#'
#' shinyApp(
#' ui = f7Page(
#' title = "My app",
#' f7SingleLayout(
#' navbar = f7Navbar(title = "f7List"),
#'
#' # simple list
#' f7List(
#' lapply(1:3, function(j) f7ListItem(letters[j]))
#' ),
#'
#' # list with complex items
#' f7List(
#' lapply(1:3, function(j) {
#' f7ListItem(
#' letters[j],
#' media = f7Icon("alarm_fill"),
#' right = "Right Text",
#' header = "Header",
#' footer = "Footer"
#' )
#' })
#' ),
#'
#' # list with complex items
#' f7List(
#' mode = "media",
#' lapply(1:3, function(j) {
#' f7ListItem(
#' title = letters[j],
#' subtitle = "subtitle",
#' "Lorem ipsum dolor sit amet, consectetur adipiscing elit.
#' Nulla sagittis tellus ut turpis condimentum, ut dignissim
#' lacus tincidunt. Cras dolor metus, ultrices condimentum sodales
#' sit amet, pharetra sodales eros. Phasellus vel felis tellus.
#' Mauris rutrum ligula nec dapibus feugiat. In vel dui laoreet,
#' commodo augue id, pulvinar lacus.",
#' media = tags$img(
#' src = paste0(
#' "https://cdn.framework7.io/placeholder/people-160x160-", j, ".jpg"
#' )
#' ),
#' right = "Right Text"
#' )
#' })
#' ),
#'
#' # list with links
#' f7List(
#' lapply(1:3, function(j) {
#' f7ListItem(url = "https://google.com", letters[j])
#' })
#' ),
#'
#' # grouped lists
#' f7List(
#' mode = "contacts",
#' lapply(1:3, function(i) {
#' f7ListGroup(
#' title = LETTERS[i],
#' lapply(1:3, function(j) f7ListItem(letters[j]))
#' )
#' })
#' )
#' )
#' ),
#' server = function(input, output) {}
#' )
#' }
f7List <- function(..., mode = NULL, inset = FALSE) {
listCl <- "list chevron-center"
if (!is.null(mode)) listCl <- paste0(listCl, " ", mode, "-list")
if (inset) listCl <- paste0(listCl, " inset")
shiny::tags$div(
class = listCl,
if (is.null(mode)) {
shiny::tags$ul(...)
} else if (mode == "media") {
shiny::tags$ul(...)
} else {
shiny::tagList(...)
}
)
}
#' Create a Framework 7 contact item
#'
#' @param ... Item text.
#' @param title Item title.
#' @param subtitle Item subtitle.
#' @param header Item header. Do not use when \link{f7List} mode is not NULL.
#' @param footer Item footer. Do not use when \link{f7List} mode is not NULL.
#' @param href Item external link.
#' @param media Expect \link{f7Icon} or \code{img}.
#' @param right Right content if any.
#' @export
f7ListItem <- function(..., title = NULL, subtitle = NULL, header = NULL, footer = NULL,
href = NULL, media = NULL, right = NULL) {
# avoid to have crazy large images
if (!is.null(media)) {
if (!is.null(media$name)) {
if (media$name == "img") media$attribs$width <- "50"
}
}
itemContent <- shiny::tagList(
# left media
if (!is.null(media)) {
shiny::tags$div(
class = "item-media",
media
)
},
# center content
shiny::tags$div(
class = "item-inner",
if (is.null(title)) {
shiny::tagList(
shiny::tags$div(
class = "item-title",
if (!is.null(header)) {
shiny::tags$div(
class = "item-header",
header
)
},
...,
if (!is.null(footer)) {
shiny::tags$div(
class = "item-footer",
footer
)
}
),
# right content
if (!is.null(right)) {
shiny::tags$div(
class = "item-after",
right
)
}
)
} else {
shiny::tagList(
shiny::tags$div(
class = "item-title-row",
shiny::tags$div(
class = "item-title",
if (!is.null(header)) {
shiny::tags$div(
class = "item-header",
header
)
},
title,
if (!is.null(footer)) {
shiny::tags$div(
class = "item-footer",
footer
)
}
),
# right content
if (!is.null(right)) {
shiny::tags$div(
class = "item-after",
right
)
}
),
# subtitle
if (!is.null(subtitle)) {
shiny::tags$div(
class = "item-subtitle",
subtitle
)
},
# text
shiny::tags$div(
class = "item-text",
...
)
)
}
)
)
itemContentWrapper <- if (is.null(href)) {
shiny::tags$div(
class = "item-content",
itemContent
)
} else {
shiny::tags$a(
class = "item-link item-content external",
href = href,
target = "_blank",
itemContent
)
}
shiny::tags$li(itemContentWrapper)
}
#' Create a framework 7 group of contacts
#'
#' @param ... slot for \link{f7ListItem}.
#' @param title Group title.
#' @export
f7ListGroup <- function(..., title) {
shiny::tags$div(
class = "list-group",
shiny::tags$ul(
shiny::tags$li(class = "list-group-title", title),
...
)
)
}
#' Create a Framework 7 list index
#'
#' List index must be attached to an existing list view.
#'
#' @param id Unique id.
#' @param target Related list element. CSS selector like .class, #id, ...
#' @param ... Other options (see \url{https://v5.framework7.io/docs/list-index#list-index-parameters}).
#' @param session Shiny session object.
#' @export
#'
#' @note For some reason, unable to get more than 1 list index working. See
#' example below. The second list does not work.
#'
#' @examples
#' if (interactive()) {
#' library(shiny)
#' library(shinyMobile)
#' shinyApp(
#' ui = f7Page(
#' title = "List Index",
#' f7TabLayout(
#' navbar = f7Navbar(
#' title = "f7ListIndex",
#' hairline = FALSE,
#' shadow = TRUE
#' ),
#' f7Tabs(
#' f7Tab(
#' tabName = "List1",
#' f7List(
#' mode = "contacts",
#' lapply(1:26, function(i) {
#' f7ListGroup(
#' title = LETTERS[i],
#' lapply(1:26, function(j) f7ListItem(letters[j]))
#' )
#' })
#' )
#' ),
#' f7Tab(
#' tabName = "List2",
#' f7List(
#' mode = "contacts",
#' lapply(1:26, function(i) {
#' f7ListGroup(
#' title = LETTERS[i],
#' lapply(1:26, function(j) f7ListItem(letters[j]))
#' )
#' })
#' )
#' )
#' )
#' )
#' ),
#' server = function(input, output, session) {
#' observeEvent(TRUE, {
#' f7ListIndex(id = "list-index-1", target = ".list")
#' }, once = TRUE)
#' }
#' )
#' }
f7ListIndex <- function(id, target, ..., session = shiny::getDefaultReactiveDomain()) {
message <- list(el = id, listEl = target, ...)
sendCustomMessage("listIndex", message, session)
}
#' Framework7 virtual list
#'
#' \code{f7VirtualList} is a high performance list container.
#' Use if you have too many components in \link{f7List}.
#'
#' @param id Virtual list unique id.
#' @param items List items. Slot for \link{f7VirtualListItem}.
#' @param rowsBefore Amount of rows (items) to be rendered before current
#' screen scroll position. By default it is equal to double amount of
#' rows (items) that fit to screen.
#' @param rowsAfter Amount of rows (items) to be rendered after current
#' screen scroll position. By default it is equal to the amount of rows
#' (items) that fit to screen.
#' @param cache Disable or enable DOM cache for already rendered list items.
#' In this case each item will be rendered only once and all further
#' manipulations will be with DOM element. It is useful if your list
#' items have some user interaction elements (like form elements or swipe outs)
#' or could be modified.
#'
#' @export
#' @rdname virtuallist
#' @examples
#' if (interactive()) {
#' library(shiny)
#' library(shinyMobile)
#' shinyApp(
#' ui = f7Page(
#' title = "Virtual List",
#' f7SingleLayout(
#' navbar = f7Navbar(
#' title = "Virtual Lists",
#' hairline = FALSE,
#' shadow = TRUE
#' ),
#' # main content
#' f7VirtualList(
#' id = "vlist",
#' rowsBefore = 2,
#' rowsAfter = 2,
#' items = lapply(1:2000, function(i) {
#' f7VirtualListItem(
#' title = paste("Title", i),
#' subtitle = paste("Subtitle", i),
#' header = paste("Header", i),
#' footer = paste("Footer", i),
#' right = paste("Right", i),
#' content = i,
#' media = img(src = "https://cdn.framework7.io/placeholder/fashion-88x88-1.jpg")
#' )
#' })
#' )
#' )
#' ),
#' server = function(input, output) {
#'
#' }
#' )
#'
#' # below example will not load with classic f7List
#' #shinyApp(
#' # ui = f7Page(
#' # title = "My app",
#' # f7SingleLayout(
#' # navbar = f7Navbar(
#' # title = "Virtual Lists",
#' # hairline = FALSE,
#' # shadow = TRUE
#' # ),
#' # # main content
#' # f7List(
#' # lapply(1:20000, function(i) {
#' # f7ListItem(
#' # title = paste("Title", i),
#' # subtitle = paste("Subtitle", i),
#' # header = paste("Header", i),
#' # footer = paste("Footer", i),
#' # right = paste("Right", i),
#' # content = i
#' # )
#' # })
#' # )
#' # )
#' # ),
#' # server = function(input, output) {
#' #
#' # }
#' #)
#' }
f7VirtualList <- function(id, items, rowsBefore = NULL, rowsAfter = NULL,
cache = TRUE) {
config <- dropNulls(
list(
items = items,
rowsBefore = rowsBefore,
rowsAfter = rowsAfter,
cache = cache
)
)
shiny::tags$div(
id = id,
shiny::tags$script(
type = "application/json",
`data-for` = id,
jsonlite::toJSON(
x = config,
auto_unbox = TRUE,
json_verbatim = TRUE
)
),
class = "list virtual-list media-list searchbar-found"
)
}
#' Framework7 virtual list item
#'
#' \code{f7VirtualListItem} is an item component for \link{f7VirtualList}.
#'
#' @inheritParams f7ListItem
#' @rdname virtuallist
#' @export
f7VirtualListItem <- function(..., title = NULL, subtitle = NULL, header = NULL, footer = NULL,
href = NULL, media = NULL, right = NULL) {
dropNulls(
list(
content = ...,
title = title,
subtitle = subtitle,
header = header,
footer = footer,
url = href,
media = as.character(media), # avoid issue on JS side
right = right
)
)
}
#' Update an \link{f7VirtualList} on the server side
#'
#' This function wraps all methods from \url{https://framework7.io/docs/virtual-list.html}
#'
#' @param id \link{f7VirtualList} to update.
#' @param action Action to perform. See \url{https://framework7.io/docs/virtual-list.html}.
#' @param item If action is one of appendItem, prependItem, replaceItem, insertItemBefore.
#' @param items If action is one of appendItems, prependItems, replaceAllItems.
#' @param index If action is one of replaceItem, insertItemBefore, deleteItem.
#' @param indexes If action if one of filterItems, deleteItems.
#' @param oldIndex If action is moveItem.
#' @param newIndex If action is moveItem.
#' @param session Shiny session.
#'
#' @export
#'
#' @examples
#' if (interactive()) {
#' library(shiny)
#' library(shinyMobile)
#' shinyApp(
#' ui = f7Page(
#' title = "Update virtual list",
#' f7SingleLayout(
#' navbar = f7Navbar(
#' title = "Virtual Lists",
#' hairline = FALSE,
#' shadow = TRUE
#' ),
#' # main content
#' f7Segment(
#' container = "segment",
#'
#' f7Button(inputId = "appendItem", "Append Item"),
#' f7Button(inputId = "prependItems", "Prepend Items"),
#' f7Button(inputId = "insertBefore", "Insert before"),
#' f7Button(inputId = "replaceItem", "Replace Item")
#' ),
#' f7Segment(
#' container = "segment",
#' f7Button(inputId = "deleteAllItems", "Remove All"),
#' f7Button(inputId = "moveItem", "Move Item"),
#' f7Button(inputId = "filterItems", "Filter Items")
#' ),
#' f7Flex(
#' uiOutput("itemIndexUI"),
#' uiOutput("itemNewIndexUI"),
#' uiOutput("itemsFilterUI")
#' ),
#' f7VirtualList(
#' id = "vlist",
#' items = lapply(1:5, function(i) {
#' f7VirtualListItem(
#' title = paste("Title", i),
#' subtitle = paste("Subtitle", i),
#' header = paste("Header", i),
#' footer = paste("Footer", i),
#' right = paste("Right", i),
#' content = i,
#' media = img(src = "https://cdn.framework7.io/placeholder/fashion-88x88-3.jpg")
#' )
#' })
#' )
#' )
#' ),
#' server = function(input, output, session) {
#'
#' output$itemIndexUI <- renderUI({
#' req(input$vlist$length > 2)
#' f7Stepper(
#' inputId = "itemIndex",
#' label = "Index",
#' min = 1,
#' value = 2,
#' max = input$vlist$length
#' )
#' })
#'
#' output$itemNewIndexUI <- renderUI({
#' req(input$vlist$length > 2)
#' f7Stepper(
#' inputId = "itemNewIndex",
#' label = "New Index",
#' min = 1,
#' value = 1,
#' max = input$vlist$length
#' )
#' })
#'
#' output$itemsFilterUI <- renderUI({
#' input$appendItem
#' input$prependItems
#' input$insertBefore
#' input$replaceItem
#' input$deleteAllItems
#' input$moveItem
#' isolate({
#' req(input$vlist$length > 2)
#' f7Slider(
#' inputId = "itemsFilter",
#' label = "Items to Filter",
#' min = 1,
#' max = input$vlist$length,
#' value = c(1, input$vlist$length)
#' )
#' })
#' })
#'
#' observe(print(input$vlist))
#'
#' observeEvent(input$appendItem, {
#' updateF7VirtualList(
#' id = "vlist",
#' action = "appendItem",
#' item = f7VirtualListItem(
#' title = "New Item Title",
#' right = "New Item Right",
#' content = "New Item Content",
#' media = img(src = "https://cdn.framework7.io/placeholder/fashion-88x88-1.jpg")
#' )
#' )
#' })
#'
#' observeEvent(input$prependItems, {
#' updateF7VirtualList(
#' id = "vlist",
#' action = "prependItems",
#' items = lapply(1:5, function(i) {
#' f7VirtualListItem(
#' title = paste("Title", i),
#' right = paste("Right", i),
#' content = i,
#' media = img(src = "https://cdn.framework7.io/placeholder/fashion-88x88-1.jpg")
#' )
#' })
#' )
#' })
#'
#' observeEvent(input$insertBefore, {
#' updateF7VirtualList(
#' id = "vlist",
#' action = "insertItemBefore",
#' index = input$itemIndex,
#' item = f7VirtualListItem(
#' title = "New Item Title",
#' content = "New Item Content",
#' media = img(src = "https://cdn.framework7.io/placeholder/fashion-88x88-1.jpg")
#' )
#' )
#' })
#'
#' observeEvent(input$replaceItem, {
#' updateF7VirtualList(
#' id = "vlist",
#' action = "replaceItem",
#' index = input$itemIndex,
#' item = f7VirtualListItem(
#' title = "Replacement",
#' content = "Replacement Content",
#' media = img(src = "https://cdn.framework7.io/placeholder/fashion-88x88-1.jpg")
#' )
#' )
#' })
#'
#' observeEvent(input$deleteAllItems, {
#' updateF7VirtualList(
#' id = "vlist",
#' action = "deleteAllItems"
#' )
#' })
#'
#' observeEvent(input$moveItem, {
#' updateF7VirtualList(
#' id = "vlist",
#' action = "moveItem",
#' oldIndex = input$itemIndex,
#' newIndex = input$itemNewIndex
#' )
#' })
#'
#' observeEvent(input$filterItems, {
#' updateF7VirtualList(
#' id = "vlist",
#' action = "filterItems",
#' indexes = input$itemsFilter[1]:input$itemsFilter[2]
#' )
#' })
#'
#' }
#' )
#' }
updateF7VirtualList <- function(id, action = c("appendItem", "appendItems", "prependItem",
"prependItems", "replaceItem", "replaceAllItems",
"moveItem", "insertItemBefore", "filterItems",
"deleteItem", "deleteAllItems", "scrollToItem"),
item = NULL, items = NULL, index = NULL, indexes = NULL,
oldIndex = NULL, newIndex = NULL,
session = shiny::getDefaultReactiveDomain()) {
# JavaScript starts from 0!
index <- index - 1
indexes <- indexes - 1
oldIndex <- oldIndex - 1
newIndex <- newIndex - 1
message <- dropNulls(
list(
action = action,
item = item,
items = items,
index = index,
indexes = indexes,
oldIndex = oldIndex,
newIndex = newIndex
)
)
session$sendInputMessage(inputId = id, message)
}
|
f9d6eedae94927a0bb00ea0ad3abaa3804a18df4
|
00f3affa6100239eba6a49728cbcaa5e03e8c4d2
|
/code/mouse/R/gtf-munging.R
|
49da952cdd035287cb456b654fe4c9117b58aef1
|
[] |
no_license
|
nehiljain/pgi-analysis
|
72ae7953cc7797e8db92806deb2ec2afaf94fc44
|
1f31621fcc21ce15c91411b5369d00a053debcda
|
refs/heads/master
| 2021-01-10T21:06:34.180321
| 2015-04-29T17:48:06
| 2015-04-29T17:48:06
| 17,648,428
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,227
|
r
|
gtf-munging.R
|
rm(list=ls())
library(plyr)
library(dplyr)
library(stringr)
# library(rattle)
# This script loads the GTF file for mouse, creates a header, munges the strings to be made useful, converts to a dataframe and writes it to RData and CSV Files
# change the path to point to the GTF file from ensemble ftp://ftp.ensembl.org/pub/release-77/gtf/mus_musculus
# previewDf gives us idea about the structure of the file
# Header is derived from http://uswest.ensembl.org/info/website/upload/gff.html
# change the path to point to the GTF file from ensemble ftp://ftp.ensembl.org/pub/release-77/gtf/mus_musculus
# WARNING: No gene name found for line 21995 to 22008 in mouse GTF file path and url above. Replaced with "NA"
gtfFilePath <- "/home/data/reference/77/Mus_musculus.GRCm38.77.gtf"
# previewDf <- read.table(file = gtfFilePath,
# header = FALSE,
# comment.char = "#",
# nrow = 100,
# na.strings = "NA",
# fill = TRUE,
# sep = "\t")
headerName <- c("chromosome_name", "source", "feature", "start", "end", "score", "strand", "frame", "attribute")
colClassNames <- c("character", "factor", "factor", "integer", "integer", "character", "character", "character", "character")
gtfData <- read.table(file = gtfFilePath,
header = FALSE,
comment.char = "#",
na.strings = "NA",
fill = TRUE,
sep = "\t",
col.names = headerName)
## Reading is complete.
## Next we manipulate the attribute column to create 3 additional columns for gene id, gene name and gene biotype
removeGeneNameTag <- function(row) {
s <- as.character(row)
# print(s)
gene_name_loc <- str_locate(s, "gene_name ")
if (!is.na(gene_name_loc[1])) {
return(str_trim(str_sub(s, start = gene_name_loc[2])))
}
# print(s)
return(str_trim(s))
}
removeGeneIdTag <- function(row) {
s <- as.character(row)
# print(s)
gene_id_loc <- str_locate(s, "gene_id ")
# print(gene_id_loc)
if (!is.na(gene_id_loc[1])) {
return(str_trim(str_sub(s, start = gene_id_loc[2])))
}
return(str_trim(row))
}
removeGeneBiotypeTag <- function(row) {
s <- as.character(row)
# print(s)
gene_biotype_loc<- str_locate(s, "gene_biotype ")
if (!is.na(gene_biotype_loc[1])) {
return(str_trim(str_sub(s, start = gene_biotype_loc[2])))
}
return(str_trim(s))
}
splitAttributes <- str_split(gtfData$attribute, "; ")
formattedAttributes <- ldply(splitAttributes, function (row) {
id <- removeGeneIdTag(row[grep("gene_id", row)])
if ( length(grep("gene_name", row)) == 0 ) {
g_name <- "NA"
# this is done for line 21995 to 22008 in mouse GTF file path and url above.
} else {
g_name <- removeGeneNameTag(row[grep("gene_name", row)])
}
biotype <- removeGeneBiotypeTag(row[grep("gene_biotype", row)])
print(g_name)
df <- data.frame(gene_id = id, gene_name = g_name, gene_biotype = biotype)
})
resultGtfData <- cbind(gtfData, formattedAttributes)
str(resultGtfData)
# names(resultGtfData) <- normVarNames(names(resultGtfData))
save(resultGtfData, file = "/home/data/reference/77/Mus_musculus.GRCm38.77.mouse_gtf.RData")
write.csv(resultGtfData, file = "/home/data/reference/77/Mus_musculus.GRCm38.77.mouse_gtf.csv",
quote = FALSE, na = "NA", row.names = FALSE)
#
# refGeneIdData <- read.csv(, file = "Downloads/mouse_gene_list-NCBIM37.67-mm9.txt",
# header = TRUE,
# comment.char = "#",
# na.strings = "NA",
# fill = TRUE)
#
#
# names(refGeneIdData) <- normVarNames(names(refGeneIdData))
# save(refGeneIdData, file = "mouse_gene_list_mm9.RData")
# write.csv(refGeneIdData, file = "mouse_gene_list_mm9.csv",
# quote = FALSE, na = "NA", row.names = FALSE)
#
# write.csv(refGeneIdData[, 1], file = "test_mouse_gene_list_mm9.csv",
# quote = FALSE, na = "NA", row.names = FALSE)
|
95aae3708fcb06bb712045fd5be75341be27389f
|
0ba374b61d485f17ae45bcf6033d7deae7a84925
|
/Laura_Pipeline/Clean_Lineage_genes.R
|
155ca6b9386301be7bd8026338dd250de34adf4c
|
[] |
no_license
|
tallulandrews/LiverTumouroidsScripts
|
cb6cd9ec4dbbb6dd51864ec5f6cb5cc3901cc66a
|
ed7e4a7a30912076cd91e208a485df4d47657e8a
|
refs/heads/master
| 2022-03-02T18:07:38.553300
| 2022-02-28T11:30:35
| 2022-02-28T11:30:35
| 232,075,946
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,311
|
r
|
Clean_Lineage_genes.R
|
# Lineage markers
Chol_lineage <- read.table("/nfs/users/nfs_t/ta6/Collaborations/LiverOrganoids/Markers_130418_Chol.txt", header=TRUE)
Hep_lineage <- read.table("/nfs/users/nfs_t/ta6/Collaborations/LiverOrganoids/Markers_130418_Hep.txt", header=TRUE)
Hep_both <- Hep_lineage[ grepl("Prog", Hep_lineage[,2]) & grepl("Hep", Hep_lineage[,2]), 1]
Chol_both <- Chol_lineage[ grepl("Prog", Chol_lineage[,2]) & grepl("Chol", Chol_lineage[,2]), 1]
Prog_both <- Hep_lineage[ grepl("Prog", Hep_lineage[,2]) & Hep_lineage[,1] %in% Chol_lineage[Chol_lineage[,2] == "Prog",1], 1]
Conflict1 <- Hep_lineage[ grepl("Hep", Hep_lineage[,2]) & Hep_lineage[,1] %in% Chol_lineage[Chol_lineage[,2] == "Chol",1], 1]
Conflict2 <- Hep_lineage[ grepl("Prog", Hep_lineage[,2]) & Hep_lineage[,1] %in% Chol_lineage[Chol_lineage[,2] == "Chol",1], 1]
Conflict3 <- Hep_lineage[ grepl("Hep", Hep_lineage[,2]) & Hep_lineage[,1] %in% Chol_lineage[Chol_lineage[,2] == "Prog",1], 1]
Conflict4 <- Chol_lineage[ grepl("Prog", Chol_lineage[,2]) & Chol_lineage[,1] %in% Hep_lineage[Hep_lineage[,2] == "Hep",1], 1]
Conflicts <- c(as.character(Conflict1), as.character(Conflict2), as.character(Conflict3), as.character(Conflicts4))
Chol_lineage <- Chol_lineage[Chol_lineage[,1] %in% marker_genes & Chol_lineage[,1] %in% keep_genes,]
Hep_lineage <- Hep_lineage[Hep_lineage[,1] %in% marker_genes & Hep_lineage[,1] %in% keep_genes,]
Chol_lineage[,2] <- as.character(Chol_lineage[,2])
Chol_lineage[Chol_lineage[,2] == "Prog",2] <- "Chol-Prog"
Chol_lineage[Chol_lineage[,2] == "Chol",2] <- "Chol-Mature"
Chol_lineage[Chol_lineage[,1] %in% Chol_both,2] <- "Chol-Both"
Chol_lineage <- Chol_lineage[!(Chol_lineage[,1] %in% Conflicts),]
Hep_lineage[,2] <- as.character(Hep_lineage[,2])
Hep_lineage[Hep_lineage[,2] == "Prog",2] <- "Hep-Prog"
Hep_lineage[Hep_lineage[,2] == "Hep",2] <- "Hep-Mature"
Hep_lineage[Hep_lineage[,1] %in% Hep_both,2] <- "Hep-Both"
Hep_lineage <- Hep_lineage[!(Hep_lineage[,1] %in% Conflicts),]
Lineage <- rbind(Chol_lineage,Hep_lineage)
Lineage[Lineage[,1] %in% Prog_both,2] <- "Common-Prog"
Lineage <- Lineage[!duplicated(Lineage[,1]),]
Lineage[,1] <- as.character(Lineage[,1])
Lineage[Lineage[,1] == "05-Mar",1] <- "MARCH5"
Lineage <- unique(Lineage)
write.table(Lineage, file="Cleaned_Lineage.txt", row.names=F, col.names=F)
|
d14297b6a9e14b0a7592ad409fb7941444c7ecdd
|
ecb38a2803f102e08c59813270d638111d41d10c
|
/FindFeaturesFrom PCa.R
|
c01006a442138112e920782062f50c1d807bd555
|
[] |
no_license
|
WeichselRiver/stamps
|
52f0badb23d386a16b3d68cb1b74a6717ca9b604
|
271f241b12322dfdcdd6f386599dd140689177b8
|
refs/heads/master
| 2022-04-09T15:50:12.575177
| 2020-01-25T08:05:30
| 2020-01-25T08:05:30
| 115,778,314
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,513
|
r
|
FindFeaturesFrom PCa.R
|
# Stamp Recognition using PCA
#.libPaths(c("C:/Daten/RStudio/R-3.3.2/library", "C:/Daten/R-3.1.2/library" ))
library(EBImage)
library(dplyr)
library(readxl)
t1 = read_excel("StampList.xlsx") %>%
dplyr::filter(bild == "Ziffern im Kreis")
pic_prep = function(x) {
x1 = EBImage::readImage(x) %>%
resize(w = 100, h = 100)
colorMode(x1) = "Grayscale"
as.vector(imageData(x1[,,1]))
}
# get data.frame of all pictures
pics_array = plyr::laply(t1$file, pic_prep) %>% t %>% data.frame
pca = princomp(pics_array)
# rescale function
linMap <- function(x, from = 0, to = 1)
(x - min(x)) / max(x - min(x)) * (to - from) + from
pca_pic = pca$scores[,1] %>%
linMap %>%
Image(dim = c(100,100), colormode = "Grayscale")
display(pca_pic, method = "raster")
# find features
bwlabel(pca_pic)
computeFeatures.shape(pca_pic)
## load and segment nucleus
y = pca_pic
x = thresh(y, 10, 10, 0.05)
# x = opening(x, makeBrush(5, shape='disc'))
x = bwlabel(x)
display(y, title="Cell nuclei", method = "raster")
display(x, title="Segmented nuclei", method = "raster")
## compute shape features
fts = computeFeatures.shape(x)
data.frame(fts) %>% dplyr::arrange(desc(s.area))
## compute features
ft = computeFeatures(x, y, xname="nucleus")
cat("median features are:\n")
apply(ft, 2, median)
## compute feature properties
ftp = computeFeatures(x, y, properties=TRUE, xname="nucleus")
ftp
cx = colorLabels(x)
display(cx, method = "raster")
|
f8e69d40582b2275ee2c455c7656738d0aefc510
|
9fe45af18aeb00a6de72d11f15916cd652913dd5
|
/dmeas.R
|
691598785dacd7d6c5bf9c8e807d8b09275050dc
|
[] |
no_license
|
dkenned1/KennedyDunnRead
|
804929ef873aaa1c4dbce00ff2ae329ab02a2292
|
0acd499f9c94a8d63ffbafbbb585a18179f44e3e
|
refs/heads/master
| 2020-12-30T09:11:41.294172
| 2017-08-15T17:04:47
| 2017-08-15T17:04:47
| 100,393,887
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,137
|
r
|
dmeas.R
|
dmeas <- Csnippet("
double tol=1.0e-17;
double DetectionLimit=2;
double probit_beta_0= -2.206;
double probit_beta_1= 1.555;
double vcn_beta_0= 1.127;
double vcn_beta_1= -0.151;
double lVirusConc = log10(V/D +1);
double ProbitValue=probit_beta_0 + probit_beta_1*lVirusConc;
double dNormValue=vcn_beta_0 + vcn_beta_1 *lVirusConc;
if (log10(VCN1+1)<DetectionLimit)
{
lik = pnorm(ProbitValue,0,1,0,1); //prob that virus is undetectable //last argument is log=TRUE, second to last is lower.tail=FALSE
}
else
{
lik = pnorm(ProbitValue,0,1,1,1); //prob that virus is detectable
lik += dnorm(log10(VCN1+1), lVirusConc, dNormValue, 1); //prob that virus is exactly equal to data
}
if (Ndata>1)
{
if (log10(VCN2+1)<DetectionLimit)
{
lik += pnorm(ProbitValue,0,1,0,1);
}
else
{
lik += pnorm(ProbitValue,0,1,1,1);
lik += dnorm(log10(VCN2+1), lVirusConc, dNormValue, 1);
}
if (Ndata>2)
{
if (log10(VCN3+1)<DetectionLimit)
{
lik += pnorm(ProbitValue,0,1,0,1);
}
else
{
lik += pnorm(ProbitValue,0,1,1,1);
lik += dnorm(log10(VCN3+1), lVirusConc, dNormValue, 1);
}
if (Ndata>3)
{
if (log10(VCN4+1)<DetectionLimit)
{
lik += pnorm(ProbitValue,0,1,0,1);
}
else
{
lik += pnorm(ProbitValue,0,1,1,1);
lik += dnorm(log10(VCN4+1), lVirusConc, dNormValue, 1);
}
if (Ndata>4)
{
if (log10(VCN5+1)<DetectionLimit)
{
lik += pnorm(ProbitValue,0,1,0,1);
}
else
{
lik += pnorm(ProbitValue,0,1,1,1);
lik += dnorm(log10(VCN5+1), lVirusConc, dNormValue, 1);
}
if (Ndata>5)
{
if (log10(VCN6+1)<DetectionLimit)
{
lik += pnorm(ProbitValue,0,1,0,1);
}
else
{
lik += pnorm(ProbitValue,0,1,1,1);
lik += dnorm(log10(VCN6+1), lVirusConc, dNormValue, 1);
}
}
}
}
}
}
if(isnan(lik))
{
//Rprintf(\"%lg\\n\",lik);
lik=-300;
}
lik=exp(lik) + tol;
")
|
8eb3a39cd449bd8043476e1668e39560dcad7c17
|
597f9a80945008773bfc3fde0c333913fa17395b
|
/weatherType2Table.R
|
767d26ebc1b01795e0f749b6f6744c120aaa075c
|
[
"MIT"
] |
permissive
|
drmingle/Wallmart-II
|
0e97cb4775430dd1a0dcbca522f164773d5d0a1c
|
a8a262e5e5968fae0e4691fd0272c084b913ba53
|
refs/heads/master
| 2021-01-15T13:13:35.495976
| 2015-05-25T01:43:57
| 2015-05-25T01:43:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 342
|
r
|
weatherType2Table.R
|
weatherType2Table <- function(weatherSting){
types2check <- c("TS", "GR", "RA", "DZ", "SN", "SG", "GS", "PL", "FG", "BR", "UP", "HZ", "FU", "DU",
"SS", "SQ", "FZ", "MI", "PR", "BC", "BL", "VC")
logicalString <- sapply(types2check, function(type){
return(grepl(type, weatherSting))
})
return(logicalString)
}
|
6ac06b24a9c899a953bae240af128fc5896bdc74
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/gaston/man/lik.contour.Rd
|
54580cbc923db67a21e091b8f5ee5bb723890f43
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,574
|
rd
|
lik.contour.Rd
|
\name{lik.contour}
\alias{lik.contour}
\title{ Contour plot for two parameters likelihood }
\description{ Create a contour plot (superimposed with a heat map) }
\usage{ lik.contour(x, y, z, levels = NULL, nlevels = 11, heat = TRUE, col.heat = NULL, ...) }
\arguments{
\item{x, y, z}{ As in \code{contour} }
\item{levels}{ As in \code{contour}. If \code{NULL}, the function computes appropriate levels. }
\item{nlevels}{ As in \code{contour} }
\item{heat}{ If \code{TRUE}, a heat map is superimposed to the contour plot }
\item{col.heat}{ Vector of heat colors}
\item{\dots}{ Additional arguments to \code{image} and \code{contour}}
}
\details{ This function is a wrapper for \code{contour}, with a different method to compute
a default value for levels. If \code{heat = TRUE}, a heatmap produced by \code{image} is added to the plot.
See \code{\link{contour}} for details on parameters. }
\author{ Hervรฉ Perdry and Claire Dandine-Roulland }
\seealso{ \code{\link{lmm.diago.likelihood}}, \code{\link[graphics:contour]{contour}}, \code{\link[graphics:image]{image}} }
\examples{
data(AGT)
x <- as.bed.matrix(AGT.gen, AGT.fam, AGT.bim)
# Compute Genetic Relationship Matrix
K <- GRM(x)
# eigen decomposition of K
eiK <- eigen(K)
# simulate a phenotype
set.seed(1)
y <- 1 + lmm.simu(tau = 1, sigma2 = 2, eigenK = eiK)$y
# Likelihood
TAU <- seq(0.5,2.5,length=30)
S2 <- seq(1,3,length=30)
lik1 <- lmm.diago.likelihood(tau = TAU, s2 = S2, Y = y, eigenK = eiK)
lik.contour(TAU, S2, lik1, heat = TRUE, xlab = "tau", ylab = "sigma^2")
}
\keyword{ Heat map }
|
d7825648a216034df4de0ff301cf2b77324cc1ec
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rms/examples/orm.fit.Rd.R
|
a5e28f55cfe2822061097c4bbf2911481fb11f97
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 356
|
r
|
orm.fit.Rd.R
|
library(rms)
### Name: orm.fit
### Title: Ordinal Regression Model Fitter
### Aliases: orm.fit
### Keywords: models regression
### ** Examples
#Fit an additive logistic model containing numeric predictors age,
#blood.pressure, and sex, assumed to be already properly coded and
#transformed
#
# fit <- orm.fit(cbind(age,blood.pressure,sex), death)
|
1fd6fba9a67b47c7eeffc9f4c8dca333741c9257
|
e6af6862edb1e783a00cb51a79a35a4496ffdc79
|
/preprocessing/sample-data-test.R
|
c3220e15f332286aea698feafa0701a2aba3dabd
|
[] |
no_license
|
jodeleeuw/219-2020-eeg-analysis
|
19fc9b13305ade1dd79bad4541dc6e21e0f93048
|
c3d5bb2811b53ee09bb69df4a478be5d46a31173
|
refs/heads/master
| 2023-07-27T06:40:57.453364
| 2021-09-13T12:21:17
| 2021-09-13T12:21:17
| 238,238,068
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 420
|
r
|
sample-data-test.R
|
library(readr)
library(dplyr)
test_data <- read_csv('data/behavioral/longtest_csv.csv')
trials <- test_data %>% filter(phase=="test") %>% group_by(audio_type, match_type) %>% summarize(n=n())
trials <- test_data %>% filter(phase=="test") %>% group_by(audio_type, match_type, stimulus) %>% summarize(n=n())
trials <- test_data %>% filter(phase=="test") %>% group_by(image_category, sound_category) %>% summarize(n=n())
|
b6391ef1ec7958a6eadc5d373999b5e67c3f935f
|
d3da6daa98914ca5e140930257c7e4f8f34fb7d2
|
/algorithm/iterative_cmi_greedy_flexible_parallel.R
|
3e7b9a775a6bc97ce045f40ad9d8af4ec1d47d5e
|
[
"MIT"
] |
permissive
|
ylincen/CMI-adaptive-hist
|
0a836051910f9def29818be000f96b3e8011613f
|
264e65e55da0e3796d06ccb6e75147c7c66a6bf1
|
refs/heads/main
| 2023-03-27T02:04:33.397777
| 2021-03-25T09:57:24
| 2021-03-25T09:57:24
| 327,953,841
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,895
|
r
|
iterative_cmi_greedy_flexible_parallel.R
|
iterative_cmi_greedy_flexible_parallel = function(data, eps = 0, Kmax = 0, v=F, isCat = c(), max_num_updating = 20, cores=1){
require("parallel")
cores = min(cores, detectCores()) ## check if number of available cores is sufficient
# transform the categorical data into 1,2,3,...,(number of categories)
if(length(isCat)>0){
data[,isCat] = apply(data[,isCat,drop = F],2, function(x){as.integer(as.factor(x))})
}
# In case it is 1D data, then no iteration is needed.
if(ncol(data) == 1){
if(length(isCat) == 1){
res = multi_hist_splitting_seed_based_simple(data, isCat = T)
} else {
res = multi_hist_splitting_seed_based_simple(data)
}
return(res)
}
# initialize the grid
prev_res = NULL
for(i in 1:ncol(data)){
if(i %in% isCat){
res = multi_hist_splitting_seed_based_simple(data[,i,drop=F], res = prev_res, isCat = T)
} else {
res = multi_hist_splitting_seed_based_simple(data[,i,drop=F], res = prev_res, Kmax = 1)
}
prev_res = res
}
l1 = res$L1
r1 = res$R1
if(length(res$L1) > 1){
l1 = res$L1[1]
r1 = res$R1[1]
}
# iteratively updating each dimension in a greedy manner
min_sc = res$L[1] + res$R[1]
min_res = NULL
min_dim = 1:ncol(data)
remaining_cat = isCat
dims_in_order = 1:ncol(data)
for(iter in 1:max_num_updating){
min_dim_here = 0
num_updates = 0
result_list = mclapply(1:(ncol(data)), function(jj){
# drop the current split of this dimension
dim = dims_in_order[jj]
if(dim %in% isCat){
return(NULL)
}
sub_cols_to_remove = c(2*jj-1, 2*jj)
sub_cols = setdiff(1:(2*ncol(data)), sub_cols_to_remove)
duplicated_info_list = prev_res[,sub_cols] %>%
as.matrix()
duplicated_info_list = duplicated_row_indices(duplicated_info_list) # The function duplicated_row_indices() is in utils.R
if(length(duplicated_info_list$ind_dup) == 0){
prev_res_drop = prev_res[,-sub_cols_to_remove,drop=F] # no duplicated rows to remove
} else{
prev_res_drop = prev_res[duplicated_info_list$ind_base,-sub_cols_to_remove, drop=F] # the result if we remove the split of one dimension
for(i in 1:length(duplicated_info_list$ind_dup)){
prev_res_drop[duplicated_info_list$corresponding_ind[i],"local_index"][[1]] =
c(unlist(prev_res_drop[duplicated_info_list$corresponding_ind[i],"local_index"]),
unlist(prev_res[duplicated_info_list$ind_dup[i],"local_index"])) %>% list()
}
}
# update
res = multi_hist_splitting_seed_based_simple(data[,dim,drop=F], res = prev_res_drop, isCat = dim %in% isCat, Kmax = Kmax)
return(res)
}, mc.cores=cores)
for(jj in 1:(ncol(data))){
dim = dims_in_order[jj]
if(dim %in% isCat){
next
}
num_updates = num_updates + 1
res = ((result_list[jj]))[[1]]
if(res$L[1] + res$R[1] < min_sc){ # note that min_sc is not equal to min_sc if we are considering a categorical dimension
min_res = res # update min_res
min_sc = res$L[1] + res$R[1] # update min_sc
min_dim_here = dim
}
}
if(min_dim_here == 0){ # break if further split will not have lower SC
break
}
min_dim = c(min_dim, min_dim_here) # update the min_dim
if(min_dim_here %in% isCat){
remaining_cat = setdiff(remaining_cat, min_dim_here)
}
prev_res = min_res # update the prev_res
dims_in_order = c(setdiff(dims_in_order, min_dim_here),min_dim_here)
if(num_updates <= 1){
break
}
}
res = prev_res
# make the res in the right order
min_dim = unique(min_dim, fromLast = T)
correct_order = c(min_dim * 2 - 1, min_dim * 2) %>% matrix(byrow = T, nrow = 2) %>% as.numeric()
res[,correct_order] = res[,1:(2*ncol(data))]
res$L1 = l1
res$R1 = r1
return(res)
}
|
049f656f4085780fa01ca65a77121b8bec286ee2
|
00f898ae9d13abce71fb6680b85955487f2642f7
|
/R/est_lucid.R
|
fcb94f147a088fc387d6503ddd0a8c23121fa976
|
[] |
no_license
|
cran/LUCIDus
|
6e912853c0e7a801836164af8e917b4cef47c7c6
|
6370b1e66496f3c81a9671a9e8f8a5a38a98459c
|
refs/heads/master
| 2022-11-23T09:05:57.209899
| 2022-11-08T09:10:02
| 2022-11-08T09:10:02
| 162,729,398
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,644
|
r
|
est_lucid.R
|
#' @title Fit LUCID model to conduct integrated clustering
#'
#' @description The Latent Unknown Clustering with Integrated Data (LUCID) performs
#' integrative clustering using multi-view data. LUCID model is estimated via EM
#' algorithm for model-based clustering. It also features variable selection,
#' integrated imputation, bootstrap inference and visualization via Sankey diagram.
#'
#' @param G Exposures, a numeric vector, matrix, or data frame. Categorical variable
#' should be transformed into dummy variables. If a matrix or data frame, rows
#' represent observations and columns correspond to variables.
#' @param Z Omics data, a numeric matrix or data frame. Rows correspond to observations
#' and columns correspond to variables.
#' @param Y Outcome, a numeric vector. Categorical variable is not allowed. Binary
#' outcome should be coded as 0 and 1.
#' @param CoG Optional, covariates to be adjusted for estimating the latent cluster.
#' A numeric vector, matrix or data frame. Categorical variable should be transformed
#' into dummy variables.
#' @param CoY Optional, covariates to be adjusted for estimating the association
#' between latent cluster and the outcome. A numeric vector, matrix or data frame.
#' Categorical variable should be transformed into dummy variables.
#' @param K Number of latent clusters. An integer greater or equal to 2. User
#' can use \code{\link{lucid}} to determine the optimal number of latent clusters.
#' @param family Distribution of outcome. For continuous outcome, use "normal";
#' for binary outcome, use "binary". Default is "normal".
#' @param useY Flag to include information of outcome when estimating the latent
#' cluster. Default is TRUE.
#' @param tol Tolerance for convergence of EM algorithm. Default is 1e-3.
#' @param max_itr Max number of iterations for EM algorithm.
#' @param max_tot.itr Max number of total iterations for \code{est_lucid} function.
#' \code{est_lucid} may conduct EM algorithm for multiple times if the algorithm
#' fails to converge.
#' @param Rho_G A scalar. This parameter is the LASSO penalty to regularize
#' exposures. If user wants to tune the penalty, use the wrapper
#' function \code{lucid}
#' @param Rho_Z_Mu A scalar. This parameter is the LASSO penalty to
#' regularize cluster-specific means for omics data (Z). If user wants to tune the
#' penalty, use the wrapper function \code{lucid}
#' @param Rho_Z_Cov A scalar. This parameter is the graphical LASSO
#' penalty to estimate sparse cluster-specific variance-covariance matrices for omics
#' data (Z). If user wants to tune the penalty, use the wrapper function \code{lucid}
#' @param modelName The variance-covariance structure for omics data.
#' See \code{mclust::mclustModelNames} for details.
#' @param seed An integer to initialize the EM algorithm or imputing missing values.
#'Default is 123.
#' @param init_impute Method to initialize the imputation of missing values in
#' LUCID. "mclust" will use \code{mclust:imputeData} to implement EM Algorithm
#' for Unrestricted General Location Model to impute the missing values in omics
#' data; \code{lod} will initialize the imputation via relacing missing values by
#' LOD / sqrt(2). LOD is determined by the minimum of each variable in omics data.
#' @param init_par Method to initialize the EM algorithm. "mclust" will use mclust
#' model to initialize parameters; "random" initialize parameters from uniform
#' distribution.
#' @param verbose A flag indicates whether detailed information for each iteration
#' of EM algorithm is printed in console. Default is FALSE.
#'
#'
#'
#' @return A list which contains the several features of LUCID, including:
#' \item{pars}{Estimates of parameters of LUCID, including beta (effect of
#' exposure), mu (cluster-specific mean for omics data), sigma (cluster-specific
#' variance-covariance matrix for omics data) and gamma (effect estimate of association
#' between latent cluster and outcome)}
#' \item{K}{Number of latent cluster}
#' \item{modelName}{Geometric model to estiamte variance-covariance matrix for
#' omics data}
#' \item{likelihood}{The log likelihood of the LUCID model}
#' \item{post.p}{Posterior inclusion probability (PIP) for assigning observation i
#' to latent cluster j}
#' \item{Z}{If missing values are observed, this is the complet dataset for omics
#' data with missing values imputed by LUCID}
#'
#' @importFrom nnet multinom
#' @import mclust
#' @importFrom glmnet glmnet
#' @importFrom glasso glasso
#' @import stats
#' @import utils
#' @import mix
#' @export
#'
#' @references
#' Cheng Peng, Jun Wang, Isaac Asante, Stan Louie, Ran Jin, Lida Chatzi,
#' Graham Casey, Duncan C Thomas, David V Conti, A Latent Unknown Clustering
#' Integrating Multi-Omics Data (LUCID) with Phenotypic Traits, Bioinformatics,
#' btz667, https://doi.org/10.1093/bioinformatics/btz667.
#'
#'
#' @examples
#' \dontrun{
#' # use simulated data
#' G <- sim_data$G
#' Z <- sim_data$Z
#' Y_normal <- sim_data$Y_normal
#' Y_binary <- sim_data$Y_binary
#' cov <- sim_data$Covariate
#'
#' # fit LUCID model with continuous outcome
#' fit1 <- est_lucid(G = G, Z = Z, Y = Y_normal, family = "normal", K = 2,
#' seed = 1008)
#'
#' # fit LUCID model with block-wise missing pattern in omics data
#' Z_miss_1 <- Z
#' Z_miss_1[sample(1:nrow(Z), 0.3 * nrow(Z)), ] <- NA
#' fit2 <- est_lucid(G = G, Z = Z_miss_1, Y = Y_normal, family = "normal", K = 2)
#'
#' # fit LUCID model with sporadic missing pattern in omics data
#' Z_miss_2 <- Z
#' index <- arrayInd(sample(length(Z_miss_2), 0.3 * length(Z_miss_2)), dim(Z_miss_2))
#' Z_miss_2[index] <- NA
#' # initialize imputation by imputing
#' fit3 <- est_lucid(G = G, Z = Z_miss_2, Y = Y_normal, family = "normal",
#' K = 2, seed = 1008, init_impute = "lod")
#' LOD
#' # initialize imputation by mclust
#' fit4 <- est_lucid(G = G, Z = Z_miss_2, Y = Y, family = "normal", K = 2,
#' seed = 123, init_impute = "mclust")
#'
#' # fit LUCID model with binary outcome
#' fit5 <- est_lucid(G = G, Z = Z, Y = Y_binary, family = "binary", K = 2,
#' seed = 1008)
#'
#' # fit LUCID model with covariates
#' fit6 <- est_lucid(G = G, Z = Z, Y = Y_binary, CoY = cov, family = "binary",
#' K = 2, seed = 1008)
#'
#' # use LUCID model to conduct integrated variable selection
#' # select exposure
#' fit6 <- est_lucid(G = G, Z = Z, Y = Y_normal, CoY = NULL, family = "normal",
#' K = 2, seed = 1008, Rho_G = 0.1)
#' # select omics data
#' fit7 <- est_lucid(G = G, Z = Z, Y = Y_normal, CoY = NULL, family = "normal",
#' K = 2, seed = 1008, Rho_Z_Mu = 90, Rho_Z_Cov = 0.1, init_par = "random")
#'
#' }
est_lucid <- function(G,
Z,
Y,
CoG = NULL,
CoY = NULL,
K = 2,
family = c("normal", "binary"),
useY = TRUE,
tol = 1e-3,
max_itr = 1e3,
max_tot.itr = 1e4,
Rho_G = 0,
Rho_Z_Mu = 0,
Rho_Z_Cov = 0,
modelName = NULL,
seed = 123,
init_impute = c("mclust", "lod"),
init_par = c("mclust", "random"),
verbose = FALSE) {
# 1. basic setup for estimation function =============
family <- match.arg(family)
init_impute <- match.arg(init_impute)
init_par <- match.arg(init_par)
Select_G <- FALSE
Select_Z <- FALSE
if(Rho_G != 0) {
Select_G <- TRUE
}
if(Rho_Z_Mu != 0 | Rho_Z_Cov != 0) {
Select_Z <- TRUE
}
## 1.1 check data format ====
if(is.null(G)) {
stop("Input data 'G' is missing")
} else {
if(!is.matrix(G)) {
G <- as.matrix(G)
if(!is.numeric(G)) {
stop("Input data 'G' should be numeric; categorical variables should be transformed into dummies")
}
}
}
if(is.null(colnames(G))){
Gnames <- paste0("G", 1:ncol(G))
} else {
Gnames <- colnames(G)
}
colnames(G) <- Gnames
if(is.null(Z)) {
stop("Input data 'Z' is missing")
} else {
if(!is.matrix(Z)) {
Z <- as.matrix(Z)
if(!is.numeric(Z)) {
stop("Input data 'Z' should be numeric")
}
}
}
if(is.null(colnames(Z))){
Znames <- paste0("Z", 1:ncol(Z))
} else {
Znames <- colnames(Z)
}
if(is.null(Y)) {
stop("Input data 'Y' is missing")
} else {
if(!is.matrix(Y)) {
Y <- as.matrix(Y)
if(!is.numeric(Y)) {
stop("Input data 'Y' should be numeric; binary outcome should be transformed them into dummies")
}
if(ncol(Y) > 1) {
stop("Only continuous 'Y' or binary 'Y' is accepted")
}
}
}
if(is.null(colnames(Y))) {
Ynames <- "outcome"
} else {
Ynames <- colnames(Y)
}
colnames(Y) <- Ynames
if(family == "binary") {
if(!(all(Y %in% c(0, 1)))) {
stop("Binary outcome should be coded as 0 and 1")
}
}
CoGnames <- NULL
if(!is.null(CoG)) {
if(!is.matrix(CoG)) {
CoG <- as.matrix(CoG)
if(!is.numeric(CoG)) {
stop("Input data 'CoG' should be numeric; categroical variables should be transformed into dummies")
}
}
if(is.null(colnames(CoG))) {
CoGnames <- paste0("CoG", 1:ncol(CoG))
} else {
CoGnames <- colnames(CoG)
}
colnames(CoG) <- CoGnames
}
CoYnames <- NULL
if(!is.null(CoY)) {
if(!is.matrix(CoY)) {
CoY <- as.matrix(CoY)
if(!is.numeric(CoY)) {
stop("Input data 'CoY' should be numeric; categorical variables should be transformed into dummies")
}
}
if(is.null(colnames(CoY))) {
CoYnames <- paste0("CoY", 1:ncol(CoY))
} else {
CoYnames <- colnames(CoY)
}
colnames(CoY) <- CoYnames
}
## 1.2 record input dimensions, family function ====
N <- nrow(Y)
dimG <- ncol(G)
dimZ <- ncol(Z);
dimCoG <- ifelse(is.null(CoG), 0, ncol(CoG))
dimCoY <- ifelse(is.null(CoY), 0, ncol(CoY))
G <- cbind(G, CoG)
Gnames <- c(Gnames, CoGnames)
family.list <- switch(family, normal = normal(K = K, dimCoY),
binary = binary(K = K, dimCoY))
Mstep_Y <- family.list$f.maxY
switch_Y <- family.list$f.switch
## 1.3. check missing pattern ====
na_pattern <- check_na(Z)
if(na_pattern$impute_flag) {
# initialize imputation
if(init_impute == "mclust") {
cat("Intializing imputation of missing values in 'Z' via the mix package \n\n")
invisible(capture.output(Z <- mclust::imputeData(Z, seed = seed)))
Z[na_pattern$indicator_na == 3, ] <- NA
}
if(init_impute == "lod") {
cat("Intializing imputation of missing values in 'Z' via LOD / sqrt(2) \n\n")
Z <- apply(Z, 2, fill_data_lod)
colnames(Z) <- Znames
}
}
# 2. EM algorithm for LUCID ================
tot.itr <- 0
convergence <- FALSE
while(!convergence && tot.itr <= max_tot.itr) {
if(tot.itr > 0) {
seed <- seed + 10
}
set.seed(seed)
## 2.1 initialize model parameters ====
# initialize beta
res.beta <- matrix(data = runif(K * (dimG + dimCoG + 1)), nrow = K)
res.beta[1, ] <- 0
# initialize mu and sigma
# initialize by mclust
if(init_par == "mclust") {
cat("Initialize LUCID with mclust \n")
invisible(capture.output(mclust.fit <- Mclust(Z[na_pattern$indicator_na != 3, ],
G = K,
modelNames = modelName)))
if(is.null(mclust.fit)) {
stop("mclust failed for specified model - please set modelName to `NULL` to conduct automatic model selection ")
}
if(is.null(modelName)){
model.best <- mclust.fit$modelName
} else{
model.best <- modelName
}
res.mu <- t(mclust.fit$parameters$mean)
res.sigma <- mclust.fit$parameters$variance$sigma
# browser()
} else { # initialize by random guess
cat("Initialize LUCID with random values from uniform distribution \n")
if(is.null(modelName)){
model.best <- "VVV"
cat("GMM model for LUCID is not specified, 'VVV' model is used by default \n")
} else{
model.best <- modelName
}
res.mu <- matrix(runif(dimZ * K, min = -0.5, max = 0.5),
nrow = K)
res.sigma <- gen_cov_matrices(dimZ = dimZ, K = K)
}
# initialize family specific parameters gamma
res.gamma <- family.list$initial.gamma(K, dimCoY)
# start EM algorithm
cat("Fitting LUCID model",
paste0("K = ", K, ", Rho_G = ", Rho_G, ", Rho_Z_Mu = ", Rho_Z_Mu, ", Rho_Z_Cov = ", Rho_Z_Cov, ")"),
"\n")
res.loglik <- -Inf
itr <- 0
while(!convergence && itr <= max_itr){
itr <- itr + 1
tot.itr <- tot.itr + 1
check.gamma <- TRUE
# 2.2 E-step ====
# calculate log-likelihood for observation i being assigned to cluster j
new.likelihood <- Estep(beta = res.beta,
mu = res.mu,
sigma = res.sigma,
gamma = res.gamma,
G = G,
Z = Z,
Y = Y,
CoY = CoY,
N = N,
K = K,
family.list = family.list,
itr = itr,
useY = useY,
dimCoY = dimCoY,
ind.na = na_pattern$indicator_na)
# normalize the log-likelihood to probability
res.r <- t(apply(new.likelihood, 1, lse_vec))
if(!all(is.finite(res.r))){
cat("iteration", itr,": EM algorithm collapsed: invalid estiamtes due to over/underflow, try LUCID with another seed \n")
break
} else{
if(isTRUE(verbose)) {
cat("iteration", itr,": E-step finished.\n")
}
}
# 2.3 M-step - parameters ====
# update model parameters to maximize the expected likelihood
invisible(capture.output(new.beta <- Mstep_G(G = G,
r = res.r,
selectG = Select_G,
penalty = Rho_G,
dimG = dimG,
dimCoG = dimCoG,
K = K)))
new.mu.sigma <- Mstep_Z(Z = Z,
r = res.r,
selectZ = Select_Z,
penalty.mu = Rho_Z_Mu,
penalty.cov = Rho_Z_Cov,
model.name = model.best,
K = K,
ind.na = na_pattern$indicator_na,
mu = res.mu)
if(is.null(new.mu.sigma$mu)){
cat("variable selection failed, try LUCID with another seed \n")
break
}
if(useY){
new.gamma <- Mstep_Y(Y = Y, r = res.r, CoY = CoY, K = K, CoYnames)
check.gamma <- is.finite(unlist(new.gamma))
}
# 2.4 M step - impute missing values ====
if(na_pattern$impute_flag){
Z <- Istep_Z(Z = Z,
p = res.r,
mu = res.mu,
sigma = res.sigma,
index = na_pattern$index)
}
# 2.5 control step ====
check.value <- all(is.finite(new.beta),
is.finite(unlist(new.mu.sigma)),
check.gamma)
if(!check.value){
cat("iteration", itr,": Invalid estimates, try LUCID with another seed \n")
break
} else{
res.beta <- new.beta
res.mu <- new.mu.sigma$mu
res.sigma <- new.mu.sigma$sigma
if(useY){
res.gamma <- new.gamma
}
new.loglik <- sum(rowSums(res.r * new.likelihood))
if(Select_G) {
new.loglik <- new.loglik - Rho_G * sum(abs(res.beta))
}
if(Select_Z) {
new.loglik <- new.loglik - Rho_Z_Mu * sum(abs(res.mu)) - Rho_Z_Cov * sum(abs(res.sigma))
}
if(isTRUE(verbose)) {
if(Select_G | Select_Z) {
cat("iteration", itr,": M-step finished, ", "penalized loglike = ", sprintf("%.3f", new.loglik), "\n")
} else{
cat("iteration", itr,": M-step finished, ", "loglike = ", sprintf("%.3f", new.loglik), "\n")
}
} else {
cat(".")
}
if(abs(res.loglik - new.loglik) < tol){
convergence <- TRUE
cat("Success: LUCID converges!", "\n\n")
}
res.loglik <- new.loglik
}
}
}
# 3. summarize results ===============
if(!useY){
res.gamma <- Mstep_Y(Y = Y, r = res.r, CoY = CoY, K = K, CoYnames = CoYnames)
}
res.likelihood <- Estep(beta = res.beta,
mu = res.mu,
sigma = res.sigma,
gamma = res.gamma,
G = G,
Z = Z,
Y = Y,
family.list = family.list,
itr = itr,
CoY = CoY,
N = N,
K = K,
dimCoY = dimCoY,
useY = useY,
ind.na = na_pattern$indicator_na)
res.r <- t(apply(res.likelihood, 1, lse_vec))
res.loglik <- sum(rowSums(res.r * res.likelihood))
if(Select_G) {
res.loglik <- res.loglik - Rho_G * sum(abs(res.beta))
}
if(Select_Z) {
res.loglik <- res.loglik - Rho_Z_Mu * sum(abs(res.mu)) - Rho_Z_Cov * sum(abs(res.sigma))
}
# browser()
pars <- switch_Y(beta = res.beta, mu = res.mu, sigma = res.sigma, gamma = res.gamma, K = K)
res.r <- res.r[, pars$index]
colnames(pars$beta) <- c("intercept", Gnames)
colnames(pars$mu) <- Znames
if(Select_G){
tt1 <- apply(pars$beta[, -1], 2, range)
selectG <- abs(tt1[2, ] - tt1[1, ]) > 0.001
} else{
selectG <- rep(TRUE, dimG)
}
if(Select_Z){
tt2 <- apply(pars$mu, 2, range)
selectZ <- abs(tt2[2, ] - tt2[1, ]) > 0.001
} else{
selectZ <- rep(TRUE, dimZ)
}
results <- list(pars = list(beta = pars$beta,
mu = pars$mu,
sigma = pars$sigma,
gamma = pars$gamma),
K = K,
var.names =list(Gnames = Gnames,
Znames = Znames,
Ynames = Ynames),
modelName = model.best,
likelihood = res.loglik,
post.p = res.r,
family = family,
select = list(selectG = selectG, selectZ = selectZ),
useY = useY,
Z = Z,
init_impute = init_impute,
init_par = init_par,
Rho = list(Rho_G = Rho_G,
Rho_Z_Mu = Rho_Z_Mu,
Rho_Z_Cov = Rho_Z_Cov)
)
class(results) <- c("lucid")
return(results)
}
|
5b3acce235137988b86a7a8a43702bc3fb806162
|
5385cba7f1961943ef93a09e79372bd8ae4ce787
|
/plots/plot1.R
|
374ff822770a33ca1c31b8f4c40ca72609d8f20e
|
[] |
no_license
|
Deano24/ExData_Plotting1
|
1961584e9e6fe868635d85600b1543d0187c667e
|
2360b2a97e1a1605979d86e2f5a3a8023ab9edba
|
refs/heads/master
| 2021-01-16T21:34:53.529466
| 2014-07-13T03:48:39
| 2014-07-13T03:48:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 656
|
r
|
plot1.R
|
#Reading in the data
data = read.table("household_power_consumption.txt", sep=";",header=TRUE, row.names=NULL,na.strings="?")
#Formatting the data field
data$Date <- as.Date( as.character(data$Date), "%d/%m/%Y")
#Subsetting the data
subsetdata <- subset(data, Date >= as.Date("2007-02-01") & Date <= as.Date("2007-02-02"))
#Removing all NA from data frame
na.omit(subsetdata)
#Plots histogram to screen
hist(subsetdata$Global_active_power,col = "red", main ="Global Active Power",xlab = "Global Active Power (killowatts)", ylab="Frequency",breaks=12)
#Copying image displayed on screen to png file
dev.copy(png,file="plot1.png")
#Closing device
dev.off()
|
74e7b6ffe42f87b6850108f46b11b02cb9653a00
|
abb7fe666cf31c5c77a0632b84090be758637b53
|
/tests/testthat/can_download.R
|
bb7865361141b0448140ba40c126469b78b2ab5d
|
[] |
no_license
|
tanio003/argoFloats
|
d9a220673cec60ff06801f7a1ff939ca3495ee5d
|
510100fc42e80a468920fc6e3510abbf4d10d64e
|
refs/heads/main
| 2023-04-02T16:50:57.951812
| 2020-07-23T16:51:52
| 2020-07-23T16:51:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 620
|
r
|
can_download.R
|
## Do we have permission to download files? (NOT EXPORTED)
## @param destdir character value giving directory into which to download files
## @return logical value indicating whether this user has permission to download fies to ~/data/argo
canDownload <- function(destdir="~/data/argo")
{
## FIXME(dek): add username for @harbinj in next line (also, I was guessing on @richardsc's linux name)
isDeveloper <- Sys.getenv("USER") == "kelley" || Sys.getenv("USER") == "jaimieharbin" || Sys.getenv("USER") == "richardsc"
canWrite <- file.exists(destdir) && file.info(destdir)$isdir
isDeveloper && canWrite
}
|
84cc0173b46a7922b05f31f1614ff0498174b847
|
94129bfca0b1d8a9508be81b9ac6faf96bd4e07b
|
/R/sessionQuestions.R
|
38b4439a05fd99f67bedc43e6478aa3a6809527a
|
[] |
no_license
|
lgnbhl/polyglot
|
8601b68fc1a81c82195e4b4fd258e975db915d59
|
665c05cdbfc982622f223e09b215297e4d8f7029
|
refs/heads/master
| 2021-12-26T04:32:34.540666
| 2021-10-10T10:43:53
| 2021-10-10T10:43:53
| 126,710,810
| 25
| 3
| null | 2021-09-28T20:38:45
| 2018-03-25T15:20:23
|
R
|
UTF-8
|
R
| false
| false
| 9,031
|
r
|
sessionQuestions.R
|
#' Ask questions in the interactive environment
#'
#' This function will print the questions in the interactive learning environment.
#'
#' @details
#' This function reads the selected dataset and print the first row of its first
#' colomn, i.e. the question. Then it presents to the user a menu, which gives him
#' multiple choices. According to the choice made by the user, the function gives a
#' score point and will return a due date, inspired by the SuperMemo-2 and Anki algorithms.
#' The menu also proposes to show the answer (the 2nd column of the row), to give a
#' hint/example, or to go back to the main menu. Finally, the function reorders the dataset
#' in order to get the lower points score in its first row and return the function once again.
#'
#' @note
#' In order to quit, simply type 0.
#'
#' @param assign.env An environment
#'
#' @importFrom utils menu read.csv select.list write.csv browseURL
#' @importFrom magick image_read image_scale
#'
#' @source \url{https://www.supermemo.com/english/ol/sm2.htm}{ SuperMemo-2 algorithm}
#' @source \url{https://apps.ankiweb.net/docs/manual.html#what-spaced-repetition-algorithm-does-anki-use}{ Anki algorithm}
sessionQuestions <- function(assign.env = parent.frame(1)) {
sessionDataset <- read.csv(paste0("", datasetAbsolutePath,""), stringsAsFactors = FALSE)
# order dataset by dueDate and Score
#sessionDataset <- sessionDataset[order(sessionDataset$dueDate, sessionDataset$Score), ]
#assign("sessionDataset", sessionDataset, envir = assign.env)
# check if rows to learn for current session and print question
if(as.Date(sessionDataset$dueDate[1]) <= as.Date(Sys.Date())) {
## list image extension
image_ext <- c(".jpg", ".JPG", ".jpeg", ".JPEG", ".png", ".PNG", ".svg", ".SVG", ".gif", ".GIF", ".avi", ".AVI", ".ico", ".ICO", ".icon", ".ICON", ".tiff", ".TIFF")
if(any(sapply(image_ext, function(x) grepl(x, sessionDataset[1,1], fixed = TRUE)))) {
message(paste("| Question: [see image]"))
## PRINT IMAGE
image1 <- tryCatch(magick::image_read(sessionDataset[1,1]), error = function(e) paste0("Could not read image at ", sessionDataset[1,1]))
image1 <- tryCatch(magick::image_scale(image1, "x300"), error = function(e) paste0("Could not read image at ", sessionDataset[1,1]))
print(image1, info = FALSE)
} else {
message(paste("| Question:", sessionDataset[1,1],""))
}
} else {
message(paste("| 0 row to learn... Back to menu. \n"))
return(learn())
}
# menu 1, inspired by Anki app
# ref: https://apps.ankiweb.net/
switch(menu(c("Show answer", "Hint", paste0("Back to menu (",length(which(sessionDataset$dueDate <= as.Date(Sys.Date())))," left to learn)"))) + 1,
return(sessionExit()),
# "Show answer"
## list image extension
if(any(sapply(c(".jpg", ".JPG", ".jpeg", ".JPEG", ".png", ".PNG", ".svg", ".SVG", ".gif", ".GIF", ".avi", ".AVI", ".ico", ".ICO", ".icon", ".ICON", ".tiff", ".TIFF"), function(x) grepl(x, sessionDataset[1,2], fixed = TRUE)))) {
message(paste("| Answer: [see image]"))
## PRINT IMAGE
image2 <- tryCatch(magick::image_read(sessionDataset[1,2]), error = function(e) paste0("Could not read image at ", sessionDataset[1,2]))
image2 <- tryCatch(magick::image_scale(image2, "x300"), error = function(e) paste0("Could not scale image at ", sessionDataset[1,2]))
print(image2, info = FALSE)
} else {
message(paste("| Answer:", sessionDataset[1,2],""))
},
# "Hint/Example"
if (names(sessionDataset[3]) != "Score") {
if(any(sapply(c(".jpg", ".JPG", ".jpeg", ".JPEG", ".png", ".PNG", ".svg", ".SVG", ".gif", ".GIF", ".avi", ".AVI", ".ico", ".ICO", ".icon", ".ICON", ".tiff", ".TIFF"), function(x) grepl(x, sessionDataset[1,3], fixed = TRUE)))) {
message(paste("| Hint: [see image]"))
## If image, open in default browse
## Not in viewer because can overwrite image of Question or Answer
utils::browseURL(sessionDataset[1,3])
return(sessionQuestions())
} else {
message(paste("| Hint:", sessionDataset[1,3],""))
return(sessionQuestions())
}
} else {
message(paste("| No Hint in this dataset."))
return(sessionQuestions())
},
return(learn()))
# space repetition learning algorithm, inspired by SuperMemo 2.
switch(menu(c("Hard", "Good",
if(sessionDataset$Repetition[1] == 0){ paste0("Easy (+1 day)")}
else if(sessionDataset$Repetition[1] == 1){ paste0("Easy (+4 days)")}
else if(sessionDataset$Repetition[1] > 1){paste0("Easy (+", (sessionDataset$Interval[[1]] - 1)*max(1.3, sessionDataset$eFactor[[1]]+(0.1-(5-5)*(0.08+(5-5)*0.02))), " days)")}
else{ paste0("Easy")})) + 1,
return(sessionExit()),
# "Hard" (fail and again)
if(exists("sessionDataset")) {
sessionDataset$Score[1] <- sessionDataset$Score[1] + 1
assign("sessionDataset", sessionDataset, envir = assign.env)
sessionDataset$eFactor[1] <- 2.5 #default eFactor
assign("sessionDataset", sessionDataset, envir = assign.env)
sessionDataset$Interval[1] <- as.difftime(0, units = "days") #0 day interval
assign("sessionDataset", sessionDataset, envir = assign.env)
},
# "Good" (again)
if(exists("sessionDataset")) {
sessionDataset$Score[1] <- sessionDataset$Score[1] + 2
assign("sessionDataset", sessionDataset, envir = assign.env)
},
# "Easy" (pass)
if(sessionDataset$Repetition[1] == 0) {
sessionDataset$Repetition[1] <- sessionDataset$Repetition[1] + 1
assign("sessionDataset", sessionDataset, envir = assign.env)
sessionDataset$Score[1] <- sessionDataset$Score[1] + 4
assign("sessionDataset", sessionDataset, envir = assign.env)
sessionDataset$Interval[1] <- as.difftime(1, units = "days") #+1 day
assign("sessionDataset", sessionDataset, envir = assign.env)
dueDate_new <- Sys.Date() + sessionDataset$Interval[1]
assign("dueDate_new", dueDate_new, envir = assign.env)
sessionDataset$dueDate[1] <- as.character.Date(dueDate_new)
assign("sessionDataset", sessionDataset, envir = assign.env)
write.csv(sessionDataset, file = paste0("", datasetAbsolutePath, ""), row.names = FALSE)
} else if (sessionDataset$Repetition[1] == 1) {
sessionDataset$Repetition[1] <- sessionDataset$Repetition[1] + 1
assign("sessionDataset", sessionDataset, envir = assign.env)
sessionDataset$Score[1] <- sessionDataset$Score[1] + 4
assign("sessionDataset", sessionDataset, envir = assign.env)
sessionDataset$Interval[1] <- as.difftime(4, units = "days") #+4 days (like Anki)
assign("sessionDataset", sessionDataset, envir = assign.env)
dueDate_new <- Sys.Date() + sessionDataset$Interval[1]
assign("dueDate_new", dueDate_new, envir = assign.env)
sessionDataset$dueDate[1] <- as.character.Date(dueDate_new)
assign("sessionDataset", sessionDataset, envir = assign.env)
write.csv(sessionDataset, file = paste0("", datasetAbsolutePath, ""), row.names = FALSE)
} else if (sessionDataset$Repetition[1] > 1) {
sessionDataset$Repetition[1] <- sessionDataset$Repetition[1] + 1
assign("sessionDataset", sessionDataset, envir = assign.env)
sessionDataset$Score[1] <- sessionDataset$Score[1] + 4
# SuperMemo 2 algorithm below:
sessionDataset$eFactor[1] <- max(1.3, sessionDataset$eFactor[[1]]+(0.1-(5-5)*(0.08+(5-5)*0.02)))
assign("sessionDataset", sessionDataset, envir = assign.env)
sessionDataset$Interval[1] <- (sessionDataset$Interval[[1]] - 1)*sessionDataset$eFactor[[1]]
assign("sessionDataset", sessionDataset, envir = assign.env)
dueDate_new <- Sys.Date() + sessionDataset$Interval[[1]]
assign("dueDate_new", dueDate_new, envir = assign.env)
sessionDataset$dueDate[1] <- as.character.Date(dueDate_new)
assign("sessionDataset", sessionDataset, envir = assign.env)
write.csv(sessionDataset, file = paste0("", datasetAbsolutePath, ""), row.names = FALSE)
})
# reorder dataset by dueDate and Score
sessionDataset <- sessionDataset[order(sessionDataset$dueDate, sessionDataset$Score), ]
assign("sessionDataset", sessionDataset, envir = assign.env)
write.csv(sessionDataset, file = paste0("", datasetAbsolutePath, ""), row.names = FALSE)
invisible()
return(sessionQuestions()) # create loop
}
|
5dc3bf85aa60f4132f471b0e16210d09e1133f49
|
356373c526fe9d3c490a3b20f324a85b9aca8e3b
|
/DevSF/archived/man_for_v1/plot.mcmckingui0.Rd
|
d39de59eafa696f61c8e7c99d32a3128614891c4
|
[] |
no_license
|
zhenglei-gao/StudyKin
|
89885e0a5cefd4cc443deee37c6173f0c94f1543
|
7f1adca875f93e5af4c718f0ce9f7bdc2f127df8
|
refs/heads/master
| 2021-01-25T08:43:08.087744
| 2013-04-08T23:28:45
| 2013-04-08T23:28:45
| 8,031,257
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,795
|
rd
|
plot.mcmckingui0.Rd
|
\name{plot.mcmckingui}
\alias{plot.mcmckingui}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
S3 method to plot for calss 'mcmckingui'
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
plot.mcmckingui(object, fname1, fname2, pch = 1, device = "wmf", ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{
%% ~~Describe \code{object} here~~
An object of class 'mcmckingui'
}
\item{fname1}{
%% ~~Describe \code{fname1} here~~
The file name of the density plot.
}
\item{fname2}{
%% ~~Describe \code{fname2} here~~
The file name of the correlation plot.
}
\item{pch}{
%% ~~Describe \code{pch} here~~
What kind of points to use in the plots.
}
\item{device}{
%% ~~Describe \code{device} here~~
The plot device to be used.
}
\item{\dots}{
%% ~~Describe \code{\dots} here~~
Other arguments to be passed to 'plot'.
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
Density and Correlation plots of the sampled parameters in 'wmf' or other format.
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
Zhenglei Gao
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
\dontrun{
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ Summary Statistics and Plots }
|
5a829b578b660e37942580a871296ddf71061052
|
4866242878090b27f05c1af37a192f3c730a5476
|
/bvi_plot.R
|
b39e1d833b7bc2b09a072fcd57f061d98e83d31e
|
[] |
no_license
|
jcvdav/bvi
|
d2afe03f3ca7ff29c8bd959f56a824e67adf2e0e
|
ef049421ab9dff03878776abd0f276a7d64c023a
|
refs/heads/master
| 2021-06-07T06:51:20.640455
| 2021-04-22T02:32:58
| 2021-04-22T02:32:58
| 55,379,803
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 392
|
r
|
bvi_plot.R
|
bvi_plot <- function(bvi_scores){
library(ggplot2)
library(dplyr)
library(tidyr)
taxunits <- colnames(bvi_scores)[1]
bvi_scores %>%
select(-c(BVI, rBVI)) %>%
gather(Sample, Score, -1) %>%
set_colnames(value = c("Spp", "Sample", "Score")) %>%
ggplot(aes(x = Sample, y = Score, fill = Spp)) +
geom_col(position = "fill", color = "black") +
theme_bw()
}
|
ae0787d33bcf9b47a7f59d53bc01e09a26897353
|
8c4a74b0a344440a15a2edee5bb761bcd2dfcad9
|
/R/zzz.R
|
4f41daa039ccab1ffcfdce80f9ea41fe91193e3c
|
[
"MIT"
] |
permissive
|
xoopR/set6
|
341950b7649629dc9594b9230710df5140679bf7
|
e65ffeea48d30d687482f6706d0cb43b16ba3919
|
refs/heads/main
| 2023-05-22T22:46:30.493943
| 2022-08-27T17:20:08
| 2022-08-27T17:20:08
| 197,164,551
| 9
| 0
|
NOASSERTION
| 2021-11-16T15:02:05
| 2019-07-16T09:36:22
|
R
|
UTF-8
|
R
| false
| false
| 241
|
r
|
zzz.R
|
#' @import ooplah
#' @importFrom R6 R6Class
NULL
# nocov start
.onLoad <- function(libname, pkgname) {
options(set6.unicode = l10n_info()$`UTF-8`)
}
.onUnload <- function(libname, pkgname) {
options(set6.unicode = NULL)
}
# nocov end
|
6f7bc34f8c99023e43fabb3d20caed0f2b01d39f
|
889ba2e1d818e3f32a8e0febf17839f960841623
|
/HW7/1.R
|
37b34ceff3d482647ed559fab52c4a051aee40e8
|
[] |
no_license
|
WillMc93/EN.605.657
|
f9b5a80599192eafa1ce98890fc87bef0b22eaa8
|
febf01617025b99365e954dae42e2321c14f4a4f
|
refs/heads/master
| 2022-11-05T17:43:43.650873
| 2022-10-27T20:13:56
| 2022-10-27T20:13:56
| 238,077,182
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,711
|
r
|
1.R
|
library(HMM)
states <- c('M1', 'M2', 'M3', 'M4', 'M5', 'I1', 'I2', 'I3', 'I4')
symbols <- c('A', 'T', 'C', 'G')
t1 <- 0.9
t2 <- 0.1
t3 <- 0.4
t4 <- 0.6
t5 <- 0
t6 <- 0
transitions <- matrix(data=c(0, t1, 0, 0, 0, t2, 0, 0, 0,
0, 0, t1, 0, 0, 0, t2, 0, 0,
0, 0, 0, t1, 0, 0, 0, t2, 0,
0, 0, 0, t5, t1, 0, 0, 0, t2,
0, 0, 0, 0, t6, 0, 0, 0, 0,
0, t4, 0, 0, 0, t3, 0, 0, 0,
0, 0, t4, 0, 0, 0, t3, 0, 0,
0, 0, 0, t4, 0, 0, 0, t3, 0,
0, 0, 0, 0, t4, 0, 0, 0, t3),
byrow=TRUE, nrow=9, dimnames=list(states, states))
ei <- 0.25
emissions <- matrix(data=c(0.7, 0.1, 0.1, 0.1,
0.1, 0.1, 0.7, 0.1,
0.1, 0.8, 0.1, 0.0,
0.1, 0.1, 0.1, 0.7,
0.8, 0.0, 0.0, 0.2,
ei, ei, ei, ei,
ei, ei, ei, ei,
ei, ei, ei, ei,
ei, ei, ei, ei),
byrow=TRUE, nrow=9, dimnames=list(states, symbols))
startProbs <- c(0.2, 0.2, 0.2, 0.2, 0.2, 0, 0, 0, 0)
hmm = initHMM(states, symbols, startProbs=startProbs, transProbs=transitions,
emissionProbs=emissions)
observations <- c('A', 'C', 'T', 'G', 'A')
print(viterbi(hmm, observations))
observations <- c('A', 'G', 'C', 'T', 'G', 'A')
print(viterbi(hmm, observations))
observations <- c('A', 'G', 'C', 'C', 'T', 'G', 'A')
print(viterbi(hmm, observations))
fundamental <- transitions[-5,-5]
fundamental <- solve(diag(length(fundamental[,1])) - fundamental)
print(fundamental)
avg_len <- 0
for (i in 1:(length(fundamental[,1])/2)) {
print(i)
avg_len <- avg_len + sum(fundamental[i,])
}
avg_len <- avg_len / (length(fundamental[,1])/2)
print(avg_len)
observations<-c("T", "A", "A", "A", "C", "T", "G", "A", "T", "T", "T")
print(viterbi(hmm,observations))
|
d954829f4525c32d17196be29495501f5286442f
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.compute/man/ec2_describe_network_insights_access_scope_analyses.Rd
|
0a9ea8429cdd414687c95d12df8d325e50114d0c
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,782
|
rd
|
ec2_describe_network_insights_access_scope_analyses.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_describe_network_insights_access_scope_analyses}
\alias{ec2_describe_network_insights_access_scope_analyses}
\title{Describes the specified Network Access Scope analyses}
\usage{
ec2_describe_network_insights_access_scope_analyses(
NetworkInsightsAccessScopeAnalysisIds = NULL,
NetworkInsightsAccessScopeId = NULL,
AnalysisStartTimeBegin = NULL,
AnalysisStartTimeEnd = NULL,
Filters = NULL,
MaxResults = NULL,
DryRun = NULL,
NextToken = NULL
)
}
\arguments{
\item{NetworkInsightsAccessScopeAnalysisIds}{The IDs of the Network Access Scope analyses.}
\item{NetworkInsightsAccessScopeId}{The ID of the Network Access Scope.}
\item{AnalysisStartTimeBegin}{Filters the results based on the start time. The analysis must have
started on or after this time.}
\item{AnalysisStartTimeEnd}{Filters the results based on the start time. The analysis must have
started on or before this time.}
\item{Filters}{There are no supported filters.}
\item{MaxResults}{The maximum number of results to return with a single call. To retrieve
the remaining results, make another call with the returned \code{nextToken}
value.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
\item{NextToken}{The token for the next page of results.}
}
\description{
Describes the specified Network Access Scope analyses.
See \url{https://www.paws-r-sdk.com/docs/ec2_describe_network_insights_access_scope_analyses/} for full documentation.
}
\keyword{internal}
|
53684accabfdd966abfafa0bdf470562a6697807
|
7d71864a94808408a08b1b73c7302987d4492f7e
|
/R/filter_variants.R
|
612b02ebfab7002dbcdd788b59b914fe257deb96
|
[] |
no_license
|
komalsrathi/MendelianRNA-seq-analysis
|
ed77e3990efb59d32e3f2b756827ec26d605ce29
|
301b0e01bea6e0b9db810268c0331992e3385c4e
|
refs/heads/master
| 2020-07-03T07:00:02.523330
| 2020-01-20T20:35:51
| 2020-01-20T20:35:51
| 201,830,611
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,048
|
r
|
filter_variants.R
|
####################################################################
# Author: Komal S Rathi
# Date: 01/31/2019
# Function: script to filter variants from GATK, Vardict and Strelka
# Mahdi's pipeline filters
# 1. ROI filter: exons +/- 10
# 2. Qual by depth: 5 for low GQ/low DP variants
# 3. Population Filters:
# HGMD variants 1%, synonymous variants 0.1%, other variants 0.5%
# how many remain - save to another file
# Step 1 (after annotating maf with HGMD)
####################################################################
library(data.table)
library(hutils)
library(tidyr)
library(GenomicRanges)
library(reshape2)
library(dplyr)
# setwd('/mnt/isilon/cbmi/variome/rathik/mendelian_rnaseq')
setwd('~/Projects/DGD_Mendelian_RNASeq/')
# use exon file from biomart output
exon.dat <- read.delim('data/gencode.v19.cdl_canonical_transcripts.v7.patched_contigs.exons.txt', stringsAsFactors = F)
exon.dat <- exon.dat %>% group_by(gene_symbol) %>%
mutate(exon_start = exon_start - 10,
exon_end = exon_end + 10) %>%
unique() %>% dplyr::select(gene_symbol, chrom, exon_start, exon_end) %>%
as.data.frame()
# final exons/intron list
# introns <- read.delim('data/variant_filtering/final_splicevariants_exons.txt', stringsAsFactors = F)
# introns <- unique(introns[,c("Sample","HGVSc","Introns","Label","Hugo_Symbol")])
# vars to test (for testing)
# vars.to.test <- c("c.64","c.359","c.4561")
# vars.to.test <- paste(vars.to.test, collapse = "|")
# folder
folder <- 'data/variant_filtering/rawdata/gatk3-source/'
filter.out <- function(folder){
lf <- list.files(path = folder, pattern = '*.maf', full.names = TRUE)
for(i in 1:length(lf)){
print(paste0("Sample no.: ",i))
n <- gsub('.*/|-hgmdannotated.maf|-gatk-haplotype-annotated-hgmdannotated.maf|.variants-hgmdannotated.maf|.vardict-annotated-rnaedit-annotated-gemini-hgmdannotated.maf|Sample_1__','',lf[i])
print(paste0("Sample: ",n))
dat <- data.table::fread(lf[i], verbose = FALSE)
dat <- as.data.frame(dat)
dat <- dat[which(dat$variant_qual != "."),]
dat$Tumor_Sample_Barcode <- n # add Tumor Sample Barcode
dat$var_id <- paste0(dat$Tumor_Sample_Barcode,'_', rownames(dat))
# ROI filter:
# identify all exonic variants
# add sequential identifiers
exonic.vars <- dat
exonic.vars$id <- seq(1:nrow(exonic.vars))
# only keep positions that are within the exons in the exon file
subject <- with(exon.dat, GRanges(chrom, IRanges(start = exon_start, end = exon_end, names = gene_symbol)))
query <- with(exonic.vars, GRanges(Chromosome, IRanges(start = Start_Position, end = End_Position, names = id)))
# find overlaps and subset maf
res <- findOverlaps(query = query, subject = subject, type = "within")
res.df <- data.frame(exonic.vars[queryHits(res),], exon.dat[subjectHits(res),])
exonic.vars <- exonic.vars[which(exonic.vars$id %in% res.df$id),]
exonic.vars$id <- NULL
print(paste0("Dimensions of exonic vars: ", nrow(exonic.vars)))
dat <- exonic.vars
maf <- dat # this is for writing out full maf
maf[which(maf$var_id %in% dat$var_id),"F1"] <- "Y"
s0 <- nrow(dat)
print(s0)
# Quality Filters:
# quality by depth >= 2
dat$variant_qual <- as.numeric(dat$variant_qual)
dat <- dat[which(dat$variant_qual >= 30),]
maf[which(maf$var_id %in% dat$var_id),"F2"] <- "Y"
s1 <- nrow(dat)
print(s1)
# Population Filters
# if HGMD annotated, AF <= 0.01 else Syn variants <= 0.001 and others <= 0.005
# replace gnomAD NAs with 0s
dat[,grep('gnomAD_.*AF$', colnames(dat))][is.na(dat[,grep('gnomAD_.*AF$', colnames(dat))])] <- 0
dat$AF_filter <- FALSE
dat$gnomAD_max_AF <- apply(dat[,grep('gnomAD_[A-Z]{3}_AF',colnames(dat))], 1, max)
print(summary(dat$gnomAD_max_AF))
dat$AF_filter <- ifelse(is.na(dat$CLASS),
ifelse(dat$gnomAD_max_AF <= 0.001, TRUE, FALSE),
ifelse(dat$gnomAD_max_AF <= 0.005, TRUE, FALSE))
dat <- dat[which(dat$AF_filter == TRUE),]
maf[which(maf$var_id %in% dat$var_id),"F3"] <- "Y"
s2 <- nrow(dat)
print(s2)
# add sample name to tumor_sample_barcode
t <- data.frame(sample = n, exonic = nrow(exonic.vars), F1 = s0, F2 = s1, F3 = s2)
if(i == 1){
total1 <- t
total2 <- dat
total3 <- maf
} else {
total1 <- rbind(total1, t)
total2 <- rbind(total2, dat)
total3 <- rbind(total3, maf)
}
}
# return results
return(list(total1, total2, total3))
}
# exonic pipeline
vardict.total <- filter.out(folder = 'data/variant_filtering/rawdata/vardict/')
write.table(vardict.total[[1]], file = 'data/variant_filtering/vardict_filtered_variants.txt', quote = F, sep = "\t", row.names = F)
write.table(vardict.total[[2]], file = 'data/variant_filtering/vardict_filtered_variants.maf', quote = F, sep = "\t", row.names = F)
write.table(vardict.total[[3]], file = 'data/variant_filtering/vardict_filter_breakdown_variants.maf', quote = F, sep = "\t", row.names = F)
gatk4.total <- filter.out(folder = 'data/variant_filtering/rawdata/gatk4/')
write.table(gatk4.total[[1]], file = 'data/variant_filtering/gatk4_filtered_variants.txt', quote = F, sep = "\t", row.names = F)
write.table(gatk4.total[[2]], file = 'data/variant_filtering/gatk4_filtered_variants.maf', quote = F, sep = "\t", row.names = F)
write.table(gatk4.total[[3]], file = 'data/variant_filtering/gatk4_filter_breakdown_variants.maf', quote = F, sep = "\t", row.names = F)
strelka.total <- filter.out(folder = 'data/variant_filtering/rawdata/strelka/')
write.table(strelka.total[[1]], file = 'data/variant_filtering/strelka_filtered_variants.txt', quote = F, sep = "\t", row.names = F)
write.table(strelka.total[[2]], file = 'data/variant_filtering/strelka_filtered_variants.maf', quote = F, sep = "\t", row.names = F)
write.table(strelka.total[[3]], file = 'data/variant_filtering/strelka_filter_breakdown_variants.maf', quote = F, sep = "\t", row.names = F)
# gatk 3.8 (bcbio)
gatk3.total <- filter.out(folder = 'data/variant_filtering/rawdata/gatk3/')
write.table(gatk3.total[[1]], file = 'data/variant_filtering/gatk3_filtered_variants.txt', quote = F, sep = "\t", row.names = F)
write.table(gatk3.total[[2]], file = 'data/variant_filtering/gatk3_filtered_variants.maf', quote = F, sep = "\t", row.names = F)
write.table(gatk3.total[[3]], file = 'data/variant_filtering/gatk3_filter_breakdown_variants.maf', quote = F, sep = "\t", row.names = F)
# gatk 3.8 (source)
gatk3.total <- filter.out(folder = 'data/variant_filtering/rawdata/gatk3-source/')
write.table(gatk3.total[[1]], file = 'data/variant_filtering/gatk3_source_filtered_variants.txt', quote = F, sep = "\t", row.names = F)
write.table(gatk3.total[[2]], file = 'data/variant_filtering/gatk3_source_filtered_variants.maf', quote = F, sep = "\t", row.names = F)
write.table(gatk3.total[[3]], file = 'data/variant_filtering/gatk3_source_filter_breakdown_variants.maf', quote = F, sep = "\t", row.names = F)
|
435071f7d57ee97139b3bdad7f3ed5a814ecbdd8
|
d5c6d9895e35047d47718fec3decce9e728906b1
|
/R/parse_taxolist.R
|
ff6703f72883dae200c964ec3c3312e870dbcc87
|
[] |
no_license
|
XingXiong/bioparser
|
db44cf1ec5e3e49cb73af3cd93b4bffd209f7b23
|
1f11a018563fe5c7ad737c4e3d01dcb0f220c5a7
|
refs/heads/master
| 2021-01-23T13:30:47.438492
| 2017-08-29T16:11:21
| 2017-08-29T16:11:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,318
|
r
|
parse_taxolist.R
|
#' Parse and extract taxonomic names from txt files
#'
#' \code{parse_taxolist} reads and parses all text lines from a file which contains
#' taxonomic names, authors and distribution in each row and writes the tabular output
#' to a csv file automatically or based on the configuration \code {config} specified
#' by the users.
#'
#' @import RCurl
#' @import plyr
#' @import stringr
#'
#' @param input_file Required. The path and name of the file which the data is to be
#' read from. If it does not contain an absolute path, the file name is relative to the
#' current working directory.
#' @param output_file Required. The path and name of the file for writing. If it does
#' not contain an absolute path, the file name is relative to the current working
#' directory.
#' @param location_detail Optional.A logical value indicating whether the detailed
#' information including longitude, latitude and detail location names of distributions
#' is to be exported. Defaults to TRUE.
#' @param language Optional.The language of detailed distribution information which
#' must be one of "English" or "Chinese". Defaults to "English".
#' @param evaluation Optional. A logical value indicating whether the evaluation of the
#' parsing result is to be exported. Defaults to TRUE.
#' @param config Optional. If it is not specified by users, the output will be generated
#' automatically based on the structure of input texts. If it is indicated explicitly by
#' users, the function will parse the input texts based on the rules specified in the
#' \code{config}. Some examples of config are provided in the "Examples" part. Note that:
#' Author_year should be regarded as a whole part; The separator between author_year part
#' and distribution part should be stated clearly; If '\n' exsits, it can only appear
#' right after the \code{genus} part.
#'
#' @return A data frame containing the result of parsing taxonomic names in the input
#' file and detailed distribution information about species. For those taxonomic names
#' which have more than one distribution, if \code{location_detail} is \code{TRUE}, each
#' row in the data frame will only contain one distribution. If \code{location_detail}
#' is \code{FALSE}, all distributions for a single species will be written in one row.
#'
#' A CSV file written from the above data frame.
#'
#' @examples \dontrun{
#' # example1:
#' parse_taxolist(input_file = "./Examples/input_data/test_example.txt",
#' output_file = "./Examples/output_data/test_example_output.csv",
#' location_detail = TRUE,
#' language = "English",
#' evaluation = TRUE,
#' config = "")
#'
#' input example:
#' Charmus indicus Chiang, Chen & Zhang 1996.Distribution: Andhra Pradesh, Kerala,Pondicherry and Tamil Nadu.
#' Isometrus maculatus De Geer, 1778.Distribution: Kerala, Andhra Pradesh, Madhya Pradesh, Karnataka,Maharashtra, Meghalaya and Tamil Nadu.
#' Lychas hendersoni Pocock, 1897) Distribution: Kerala and Tamil Nadu.
#'
#'
#' # example2:
#' parse_taxolist(input_file = "./Examples/input_data/test_example_config_1.txt",
#' output_file = "./Examples/output_data/test_example_output_config_1.csv",
#' location_detail = FALSE,
#' language = "English",
#' evaluation = TRUE,
#' config = "Genus, Species, Author_Year, 'Distribution:', distribution")
#'
#' input example:
#' Pachliopta pandiyana Moore, 1881 Distribution: Goa, Karnataka, Kerala
#'
#'
#' # example3:
#' parse_taxolist(input_file = "./Examples/input_data/test_example_config_2.txt",
#' output_file = "./Examples/output_data/test_example_output_config_2.csv",
#' location_detail = FALSE,
#' language = "English",
#' evaluation = FALSE,
#' config = "Genus, Species, Author_Year, ':', distribution")
#'
#' input example:
#' Pachliopta pandiyana Moore, 1881 : Goa, Karnataka, Kerala
#'
#'
#' # example4:
#' parse_taxolist(input_file = "./Examples/input_data/test_example_config_3.txt",
#' output_file = "./Examples/output_data/test_example_output_config_3.csv",
#' location_detail = TRUE,
#' language ="English",
#' evaluation = FALSE,
#' config = "Genus, '\n', Species, Author_Year, ':',distribution")
#'
#' input example:
#' Pachliopta
#' pandiyana Moore, 1881 : Goa, Karnataka, Kerala
#' aristolochiae Fabricius, 1775 : Meghalaya, Paschimbanga, Kerala, Karnataka, Arunachal Pradesh, Telangana, Andhra Pradesh, Maharashtra, Gujarat, Odisha, Chhattisgarh
#'}
#'
#' @export
parse_taxolist <- function(input_file, output_file, location_detail, language, evaluation, config){
lines <- readLines(input_file, warn = FALSE)
final_result <- c()
if(config == ""){
for (i in 1:length(lines)){
if (lines[i] != ""){
cur_sci_name <- get_full_sciname_one_line(lines[i])
cur_result <- get_tabular_output(cur_sci_name, location_detail, language, evaluation)
final_result <- rbind(final_result, cur_result)
}
}
write.csv(final_result, file = output_file, row.names = F)
} else {
config_list <- as.list(strsplit(config, ",")[[1]])
config_list <- str_trim(config_list)
line_break_num <- length(str_locate_all(config, "\n")[[1]][,"start"])
if(line_break_num == 0){
for (i in 1:length(lines)){
if (lines[i] != ""){
cur_sci_name <- get_sciname_by_config_one_line(lines[i], config)
cur_result <- get_tabular_output(cur_sci_name, location_detail, language, evaluation)
final_result <- rbind(final_result, cur_result)
}
}
write.csv(final_result, file = output_file, row.names = F)
} else if(line_break_num == 1){
output_sciname_by_config_multi_line(config, lines, output_file, location_detail, language, evaluation)
} else{
Stop("You can't have more than one line break symbols in config.")
}
}
return(final_result)
}
getGenus <- function(science_name){
# if the first character is capitalized, then the first word must be Genus
# quality: score to evaluate results (1: good, 2: uncertainty in some part, 3: possibly error in input)
if (str_detect(science_name, "^[A-Z]")){
capital_index <- str_locate_all(science_name,"[[:blank:]]?[A-Z]+")
genus_species_sub <- str_sub(science_name, capital_index[[1]][1], capital_index[[1]][2]-1)
split <- str_split(genus_species_sub, " ")
split <- unlist(split)
if (length(split) == 1){ # if there's only one word
genus <- split[1]
species <- 'NA'
subspecies <- 'NA'
score <- 1
evaluation <- "High credibility."
} else if (length(split) == 2){ # if there are two words
genus <- split[1]
species <- split[2]
subspecies <- 'NA'
score <- 1
evaluation <- "High credibility."
} else if (length(split) == 3){ # if there are three words
genus <- split[1]
species <- split[2]
subspecies <- split[3]
score <- 1
evaluation <- "High credibility."
} else { # if there are more than three words in the fist part
genus <- split[1]
species <- split[2]
subspecies <- split[3]
score <- 3
evaluation <- "Possibly error or confusion in the input."
}
# if the first character is not capitalized, then this entry doesn't include Genus
} else {
capital_index <- str_locate_all(science_name,"[[:blank:]]?[A-Z]+")
genus_species_sub <- str_sub(science_name, 1, capital_index[[1]][1]-1)
split <- str_split(genus_species_sub, " ")
split <- unlist(split)
if (length(split) == 1){
# to be continued (how to distinguish between species and subspecies? )
genus <- 'NA'
species <- split[1]
subspecies <- 'NA'
score <- 2
evaluation <- "Cannot distinguish species or subspecies."
} else if (length(split) == 2) {
genus <- 'NA'
species <- split[1]
subspecies <- split[2]
score <- 1
evaluation <- "High credibility."
} else {
genus <- 'NA'
species <- split[1]
subspecies <- split[2]
score <- 3
evaluation <- "Possibly error or confusion in the input."
}
}
return (list(genus, species, subspecies, score, evaluation))
}
getAuthorYear <- function(science_name){
if(str_detect(science_name, "[0-9]{4}.?")){
year_list <- str_locate_all(science_name, "[0-9]{4}.?")[[1]]
# end_index is the position of the last character of the last year
end_index <- year_list[, "end"][length(year_list[, "end"])]
capital_index <- str_locate_all(science_name,"[[:blank:]]?[A-Z]+")
if(str_detect(science_name, "^[A-Z]")){
# if scientific name starts with a capital letter (genus), then the second capital letter is the start of author_year part
start_index <- capital_index[[1]][2]
} else {
# if not, then the first capital letter is the start of author_year part
start_index <- capital_index[[1]][1]
}
author_year_list <- str_sub(science_name, start_index + 1, end_index - 1)
score <- 1
evaluation <- "High credibility."
} else {
author_year_list <- 'NA'
score <- 1
evaluation <- "There's no author info in this entry."
}
return(c(author_year_list, score, evaluation))
}
getDistribution <- function(science_name){
# if there is year part in the scientific name, the distribution info is right after the last year.
if(str_detect(science_name, "[0-9]{4}.?")){
year_list <- str_locate_all(science_name, "[0-9]{4}.?")[[1]]
# start_index is the position of the last character of the last year
start_index <- year_list[, "end"][length(year_list[, "end"])]
# if there's no author_year part, the distribution info starts with the second (or first) capital letter in the scientific name
} else {
capital_index <- str_locate_all(science_name,"[[:blank:]]?[A-Z]+")
if(str_detect(science_name, "^[A-Z]")){
start_index <- capital_index[[1]][2]
} else{
start_index <- capital_index[[1]][1]
}
}
distribution_list <- substring(science_name, start_index + 1)
clean_list <- gsub("\\sand\\s", ",", distribution_list)
clean_list <- gsub("Distribution:", "", clean_list)
clean_list <- str_trim(gsub("[:punct:]", "", clean_list))
if(str_detect(science_name, ",") == FALSE & length(str_split(clean_list, " ")[[1]]) != 1){
distribution <- str_split(clean_list, " ")
} else {
distribution <- str_split(clean_list, ",")
}
score <- 1
evaluation <- "High credibility."
# if there's no distribution info, to be continued
return(c(distribution, score, evaluation))
}
# connect with google map api to get the longitude, altitude and detailed information about the distribution
url <- function(address, language, return.call = "json", sensor = "false") {
if (language == "English"){
root <- "http://maps.google.com/maps/api/geocode/"
u <- paste(root, return.call, "?address=", address, "&sensor=", sensor, sep = "")
return(URLencode(u))
} else {
root <- "http://maps.google.cn/maps/api/geocode/"
u <- paste(root, return.call, "?address=", address, "&sensor=", sensor, sep = "")
return(URLencode(u))
}
}
geoCode <- function(address, language, verbose = FALSE) {
if(verbose) cat(address,"\n")
u <- url(address, language)
doc <- getURL(u)
x <- fromJSON(doc, simplify = FALSE)
if(x$status == "OK") {
lat <- x$results[[1]]$geometry$location$lat
lng <- x$results[[1]]$geometry$location$lng
location_type <- x$results[[1]]$geometry$location_type
formatted_address <- x$results[[1]]$formatted_address
return(c(lat, lng, location_type, formatted_address))
} else {
return(c(NA, NA, NA, NA))
}
}
evaluation_output <- function(sci_name){
score <- max(sci_name$genus_score,sci_name$author_year_score,sci_name$distribution_score)
evaluation <- paste("genus:", sci_name$genus_evaluation,
"author:", sci_name$author_year_evaluation,
"distribution:", sci_name$distribution_evaluation,
sep = " ")
return(c(score,evaluation))
}
get_full_sciname_one_line <- function(line){
sci_name <-c()
sci_name$genus <- 'NA'
sci_name$species <- 'NA'
sci_name$subspecies <- 'NA'
sci_name$author_year <- 'NA'
sci_name$distribution <- 'NA'
sci_name$genus_score <- -1
sci_name$genus_evaluation <- ''
sci_name$author_year_score <- -1
sci_name$author_year_evaluation <- ''
sci_name$distribution_score <- -1
sci_name$distribution_evaluation <- ''
# get genus, species, subspecies and its evaluation
genus_species_sub <- getGenus(line)
sci_name$genus <- unlist(genus_species_sub)[1]
sci_name$species <- unlist(genus_species_sub)[2]
sci_name$subspecies <- unlist(genus_species_sub)[3]
sci_name$genus_score <- unlist(genus_species_sub)[4]
sci_name$genus_evaluation <- unlist(genus_species_sub)[5]
# get author year info and the evaluation
author_year_result <- getAuthorYear(line)
sci_name$author_year <- unlist(author_year_result)[1]
sci_name$author_year_score <- unlist(author_year_result)[2]
sci_name$author_year_evaluation <- unlist(author_year_result)[3]
# get basic distribution
distribution_result <- getDistribution(line)
distribution_list <- distribution_result[1]
sci_name$distribution_score <- distribution_result[2][[1]]
sci_name$distribution_evaluation <- distribution_result[3][[1]]
sci_name$distribution <- paste(distribution_result[[1]], collapse = ",")
return(sci_name)
}
get_sciname_by_config_one_line <- function(line, config){
sci_name <-c()
sci_name$genus <- 'NA'
sci_name$species <- 'NA'
sci_name$subspecies <- 'NA'
sci_name$author_year <- 'NA'
sci_name$distribution <- 'NA'
sci_name$genus_score <- -1
sci_name$genus_evaluation <- ''
sci_name$author_year_score <- -1
sci_name$author_year_evaluation <- ''
sci_name$distribution_score <- -1
sci_name$distribution_evaluation <- ''
config_list <- as.list(strsplit(config, ",")[[1]])
config_list <- str_trim(config_list)
len = length(config_list)
if("Genus" %in% str_trim(config_list)){
genus_pos <- get_config_position(config_list, "Genus")
sci_name$genus <- str_split(line, " ")[[1]][genus_pos]
rest_part <- str_trim(gsub(sci_name$genus, "", line))
sci_name$genus_score <- 1
sci_name$genus_evaluation <- "High credibility."
} else {
sci_name$genus_score <- 1
sci_name$genus_evaluation <- "Missing 'genus' in the scientific name."
}
if("Species" %in% str_trim(config_list)){
species_pos <- get_config_position(config_list, "Species")
sci_name$species <- str_split(line, " ")[[1]][species_pos]
rest_part <- str_trim(gsub(sci_name$species, "", rest_part))
} else {
if (sci_name$genus_evaluation == "High credibility."){
sci_name$genus_score <- 1
sci_name$genus_evaluation <- "Missing 'species' in the scientific name."
} else {
sci_name$genus_score <- 1
sci_name$genus_evaluation <- paste(sci_name$genus_evaluation, "Missing 'species' in the scientific name.")
}
}
if("Subspecies" %in% str_trim(config_list)){
subspecies_pos <- get_config_position(config_list, "Subspecies")
sci_name$subspecies <- str_split(line, " ")[[1]][subspecies_pos]
rest_part <- str_trim(gsub(sci_name$subspecies, "", rest_part))
} else {
if (sci_name$genus_evaluation == "High credibility."){
sci_name$genus_score <- 1
sci_name$genus_evaluation <- "Missing 'subspecies' in the scientific name."
} else {
sci_name$genus_score <- 1
sci_name$genus_evaluation <- paste(sci_name$genus_evaluation, "Missing 'subspecies' in the scientific name.")
}
}
# if config list ends with distribution
if(str_trim(config_list[len]) == 'distribution'){
# extract distribution part
if (str_detect(config_list[len-1], "'*'")){
split_word <- str_split(config_list[len-1], "'*'")[[1]][2]
sci_name$distribution <- str_trim(str_split(rest_part, split_word)[[1]][2])
rest_part <- str_trim(gsub(split_word, "", rest_part))
sci_name$author_year <- str_trim(gsub(sci_name$distribution , "", rest_part))
sci_name$distribution_score <- 1
sci_name$distribution_evaluation <- "High credibility."
if (sci_name$author_year != ""){
sci_name$author_year_score <- 1
sci_name$author_year_evaluation <- "High credibility."
} else {
sci_name$author_year_score <- 1
sci_name$author_year_evaluation <- "Missing author and year part in the scientific name."
}
}
# if config list ends with author and year part
else if (str_trim(config_list[len-1]) == 'Author_Year'){
year_list <- str_locate_all(rest_part, "[0-9]{4}.?")[[1]]
# start_index is the position of the last character of the last year
start_index <- year_list[, "end"][length(year_list[, "end"])]
sci_name$distribution <- str_trim(substring(rest_part, start_index + 1))
sci_name$author_year <- str_trim(gsub(sci_name$distribution, "", rest_part))
sci_name$author_year_score <- 1
sci_name$author_year_evaluation <- "High credibility."
sci_name$distribution_score <- 1
sci_name$distribution_evaluation <- "Missing distribution part in the scientific name."
}
# if config list does not contain author_year and distribution
else {
sci_name$distribution <- rest_part
sci_name$distribution_score <- 1
sci_name$distribution_evaluation <- "High credibility."
sci_name$author_year_score <- 1
sci_name$author_year_evaluation <- "Missing author and year part in the scientific name."
}
} else if (str_trim(config_list[len]) == 'Author_Year'){
sci_name$author_year <- rest_part
sci_name$distribution_score <- 1
sci_name$distribution_evaluation <- "Missing distribution part in the scientific name."
} else{
sci_name$author_year_score <- 1
sci_name$author_year_evaluation <- "Missing author and year part in the scientific name."
sci_name$distribution_score <- 1
sci_name$distribution_evaluation <- "Missing distribution part in the scientific name."
}
return(sci_name)
}
output_sciname_by_config_multi_line <- function(config, lines, output_file, location_detail, language, evaluation){
sci_name <-c()
sci_name$genus <- 'NA'
sci_name$species <- 'NA'
sci_name$subspecies <- 'NA'
sci_name$author_year <- 'NA'
sci_name$distribution <- 'NA'
sci_name$genus_score <- -1
sci_name$genus_evaluation <- ''
sci_name$author_year_score <- -1
sci_name$author_year_evaluation <- ''
sci_name$distribution_score <- -1
sci_name$distribution_evaluation <- ''
final_result <- c()
config_list <- as.list(strsplit(config, ",")[[1]])
config_list <- str_trim(config_list)
line_break_pos <- get_config_position(config_list, "'\n'")
line_break_num <- length(str_locate_all(config, "\n")[[1]][,"start"])
if(line_break_num == 1 & line_break_pos ==2){
len <- length(lines)
for(i in 1:len){
if(lines[i] == ""){
continue
}
line_split <- str_split(str_trim(lines[i]), " ")[[1]]
if(length(line_split) == 1){
cur_line_index <- i
sci_name$genus <- str_trim(lines[i])
} else {
cur_config_list <- config_list[-1]
cur_config_list <- cur_config_list[-1]
cur_config_len <- length(cur_config_list)
if("Species" %in% str_trim(cur_config_list)){
species_pos <- get_config_position(cur_config_list, "Species")
sci_name$species <- str_split(lines[i], " ")[[1]][species_pos]
lines[i] <- str_trim(gsub(sci_name$species, "", lines[i]))
} else {
sci_name$genus_score <- 1
sci_name$genus_evaluation <- "Missing 'species' in the scientific name."
}
if("Subspecies" %in% str_trim(cur_config_list)){
subspecies_pos <- get_config_position(cur_config_list, "Subspecies")
sci_name$subspecies <- str_split(lines[i], " ")[[1]][subspecies_pos]
lines[i] <- str_trim(gsub(sci_name$subspecies, "", lines[i]))
} else {
if (sci_name$genus_evaluation == "High credibility."){
sci_name$genus_score <- 1
sci_name$genus_evaluation <- "Missing 'subspecies' in the scientific name."
} else {
sci_name$genus_score <- 1
sci_name$genus_evaluation <- paste(sci_name$genus_evaluation, "Missing 'subspecies' in the scientific name.")
}
}
if(str_trim(cur_config_list[cur_config_len]) == 'distribution'){
# extract distribution part
if (str_detect(cur_config_list[cur_config_len-1], "'*'")){
split_word <- str_split(cur_config_list[cur_config_len-1], "'*'")[[1]][2]
sci_name$distribution <- str_trim(str_split(lines[i], split_word)[[1]][2])
lines[i] <- str_trim(gsub(split_word, "", lines[i]))
sci_name$author_year <- str_trim(gsub(sci_name$distribution , "", lines[i]))
sci_name$distribution_score <- 1
sci_name$distribution_evaluation <- "High credibility."
if (sci_name$author_year != ""){
sci_name$author_year_score <- 1
sci_name$author_year_evaluation <- "High credibility."
} else {
sci_name$author_year_score <- 1
sci_name$author_year_evaluation <- "Missing author and year part in the scientific name."
}
} else if (str_trim(cur_config_list[cur_config_len-1]) == 'Author_Year'){
year_list <- str_locate_all(lines[i], "[0-9]{4}.?")[[1]]
# start_index is the position of the last character of the last year
start_index <- year_list[, "end"][length(year_list[, "end"])]
sci_name$distribution <- str_trim(substring(lines[i], start_index + 1))
sci_name$author_year <- str_trim(gsub(sci_name$distribution, "", lines[i]))
sci_name$author_year_score <- 1
sci_name$author_year_evaluation <- "High credibility."
sci_name$distribution_score <- 1
sci_name$distribution_evaluation <- "Missing distribution part in the scientific name."
} else {
sci_name$distribution <- lines[i]
sci_name$distribution_score <- 1
sci_name$distribution_evaluation <- "High credibility."
sci_name$author_year_score <- 1
sci_name$author_year_evaluation <- "Missing author and year part in the scientific name."
}
} else if (str_trim(cur_config_list[cur_config_len]) == 'Author_Year'){
sci_name$author_year <- lines[i]
sci_name$distribution_score <- 1
sci_name$distribution_evaluation <- "Missing distribution part in the scientific name."
} else {
sci_name$author_year_score <- 1
sci_name$author_year_evaluation <- "Missing author and year part in the scientific name."
sci_name$distribution_score <- 1
sci_name$distribution_evaluation <- "Missing distribution part in the scientific name."
}
# write the current line to tabular output
cur_result <- get_tabular_output(sci_name, location_detail, language, evaluation)
final_result <- rbind(final_result, cur_result)
}
}
write.csv(final_result, file = output_file, row.names = F)
} else {
stop("Can't support current config.")
}
}
get_tabular_output <- function(sci_name, location_detail, language, evaluation){
# create a new dataframe
result <- data.frame(genus = c(0), species = c(0), subspecies = c(0), author_year = c(0), distribution = c(0),
latitude = c(0), longitude = c(0), detail = c(0))
eva_df <- data.frame(score = c(0), evaluation = c(0))
if (location_detail == "TRUE"){
distribution_list <- str_split(sci_name$distribution, ",")[[1]]
for (j in 1:length(distribution_list)){
# To make the table clear,the table will show information about genus,author and etc. for only one time
# for different locations from the same entry.
if (j == 1) {
distribution_list[j] <- gsub("[[:punct:]]", "", distribution_list[j])
address <- geoCode(distribution_list[j], language)
new_row <- c(sci_name$genus, sci_name$species, sci_name$subspecies, sci_name$author_year, distribution_list[j], address[1], address[2], address[4])
result <- rbind(result, new_row)
if (evaluation == "TRUE"){
eva_result <- evaluation_output(sci_name)
eva_df <- rbind(eva_df, eva_result)
}
} else {
address <-geoCode(distribution_list[j], language)
new_row <- c(" ", " ", " ", " ", distribution_list[j], address[1], address[2], address[4])
result <- rbind(result, new_row)
if (evaluation == "TRUE"){
eva_df <- rbind(eva_df,c(" "," "))}
}
}
# The server of api will reject too frequent access,so set a stop after each loop.
Sys.sleep(1)
} else{
if (evaluation == "TRUE"){
new_row <- c(sci_name$genus, sci_name$species, sci_name$subspecies, sci_name$author_year, sci_name$distribution)
result <- rbind(result[,1:5], new_row)
eva_result <- evaluation_output(sci_name)
eva_df <- rbind(eva_df, eva_result)
} else {
new_row <- c(sci_name$genus, sci_name$species, sci_name$subspecies, sci_name$author_year, sci_name$distribution)
result <- rbind(result[,1:5], new_row)}
}
if (evaluation == "TRUE"){
result = cbind(result[-1,],eva_df[-1,])
} else {
result = result[-1,]
}
return(result)
}
parse_taxoname <- function(input_str, location_detail, language, evaluation, config){
lines <- str_split(input_str, "\n")[[1]]
if(config == ""){
cur_sci_name <- get_full_sciname_one_line(input_str)
} else {
config_list <- as.list(strsplit(config, ",")[[1]])
config_list <- str_trim(config_list)
line_break_num <- length(str_locate_all(config, "\n")[[1]][,"start"])
if(line_break_num == 0){
cur_sci_name <- get_sciname_by_config_one_line(input_str, config)
} else{
Stop("You can't have more than one line break symbols in config.")
}
}
return(cur_sci_name)
}
|
73eb3a888cafc52dca023fdedf5c3a0105355643
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610051626-test.R
|
92c3715ddb78ae8d5931006601cca89f86938c65
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,829
|
r
|
1610051626-test.R
|
testlist <- list(rates = c(NaN, 7.29112072938316e-304, -1.64816262214147e-307, -2.35343736497682e-185, 7036874417766.4, -2.35343736826454e-185, 7.17736025324585e-310, 7.32777351949015e-15, 9.14021444806306e-322, 1.00891829368495e-309, 2.67904643304077e+301, -1.26836459123889e-30, 9.37339630957792e-312, 1.09509791288755e-303, -5.66365833702221e+303, 5.43230922486616e-312, -3.9759940224262e-34, -1.26823100659151e-30, -1.26836459270829e-30, 2.39422219319154e-301, -4.93185008441161e-31, NaN, -5.79189576537157e-34, -9.21253817446353e-280, 1.23269447171475e-30, -1.26836459270829e-30, 5.5869437297374e-319, -6.76385503750878e-231, -2.97578981702996e-288, 7.29112203319188e-304, 8.74601785204247e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), thresholds = numeric(0), x = c(NaN, NaN, NaN, 9.70488469130173e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, -5.82789329454073e+303, -5.82900681339507e+303, 1.08646184497373e-311, 8.97712454626148e-308, -2.11840698478091e-289, 1.24351972100265e-13, -3.72626152437281e+304, Inf, -7.40367110377773e-171, NaN, NaN, NaN, -8.59702596077467e-171, 5.7418150925011e+199, -2.97598959778408e-288, 4.0083522360489e-306, 2.34012289634757e-269, -1.26836459270829e-30, 3.94108708470682e-312, -5.96890832358674e+306, 2.81218450871091e-312, 5.85363771866079e+170, 8.62805110310557e-307, -4.25550648705951e+305, NaN, NaN, NaN, NaN, -1.36573625663878e-151, 5.74181509254692e+199, -2.11852534547344e-289, NaN, -5.96890832411666e+306, NaN, -2.30879999750655e-289, 8.74601785371863e-310, 8.5451750570825e+194, -1.404447759072e+306, NaN, 7.2911220195564e-304, -8.8144298991562e-280, -2.30331110774114e-156, -2.30331110816477e-156, -1.12583501772562e-305, -2.30331110816477e-156, -6.36358120662016e+305, 3.65365169506523e-306, 2.62480682658967e-301, -6.56793027847815e-287, -8.81442988493713e-280, -2.64494692448922e+154, 1.86165782692909e-130, -1.40946656124468e-52, -6.5692055167788e-287, 2.11057949781812e-309, -2.29827867994584e-185, 9.11389926506012e-306, 2.39701938834909e-94, -2.18056649082045e-289, NaN, 2.7271398649941e-312, 7.14190420369699e-304, 5.43226988934558e-312, 0, 0, 2.71615461243555e-312, 7.14190420369699e-304, 5.43226988934558e-312, -1.03066467131803e-228, 2.71615461306795e-312, -1.90877083252549e-287, 7.41606077195142e-310, -1.74162172578232e-248, 7.20047258077813e-310, 3.98264587882868e-317, 8.28904605845809e-317, 2.84809453888922e-306, 6.9881578015912e-310, NA, -2.30331110816477e-156, -2.30331110816477e-156, -2.30331110816272e-156, -5.5516993870748e+306, 4.65661286890991e-10, 0))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
751186950e6b5da574d7a76c4373be228599161d
|
6b01573263a626f7125dd3cba6f59c1c5024cc09
|
/functions.R
|
97dffcd62ed6266540cda65e5ec5865d6b7a1acb
|
[] |
no_license
|
escanillans/eulerian_path_finding_algorithm
|
eb987b4bb45872d5eddb0cbea23b7a7fa6be65b6
|
617045c2d095060ae592f48ac2222d0b29530d06
|
refs/heads/master
| 2020-03-11T09:27:59.678758
| 2018-04-17T14:03:28
| 2018-04-17T14:03:28
| 129,912,126
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,980
|
r
|
functions.R
|
# This function is the forward algorithm
# Input:
# n = number of states
# seq = sequence
# Output: log forward probabilities for corresponding sequence position and state
forward <- function(n, seq, transitions, emissions)
{
# create an empty matrix
# dimension = n+2 x length(seq)+1,
# where n = number of states (add two for begin and end states)
logProbMatrix = matrix(data = rep(0, (n+2)*(length(seq)+1)), nrow = (n+2), ncol = (length(seq)+1))
# Initialize (1,1) to 1
logProbMatrix[1,1] = 1
# Fill in logProbMatrix Top-Down, Left-Right
for(j in 2:ncol(logProbMatrix))
{
for(i in 2:nrow(logProbMatrix))
{
# Compute probability of generating first j chars and ending in state i
# 1. Find out how many states point to state i
pointerInfo = findPointers(transitions, i)
sum = 0
for(k in 1:pointerInfo$numPointers)
{
currSum = logProbMatrix[pointerInfo$pointers[k],j-1] * pointerInfo$transProb[k]
sum = sum + currSum
}
# 2. find emissionProb for current state
emitProb = emissionProb(emissions, i-1, seq[i-1])
# 3. compute log prob
logProbMatrix[i,j] = log(emitProb*sum)
}
}
return(logProbMatrix)
}
# This function returns necessary emission probability
# Input:
# emissions = table of emission probabilities
# stateID = state ID
# symbol = emission symbol
# Output:
emissionProb <- function(emissions, stateID, symbol)
{
return(emissions[emissions$statNum == stateID & emissions$emissionSymb == symbol,3])
}
# For current state,
# output the number of pointers to state,
# the states that point to currState,
# and the transition probabilities.
findPointers <- function(transitions, currState)
{
return(list(numPointers = length(transitions[transitions$to == currState,1]),
pointers = transitions[transitions$to == currState,1],
transProb = transitions[transitions$to == currState,3]))
}
|
bbe56a4000a400c15e953f99e80d70663d021204
|
50851ba9027ece2a6f0a23d493d5d15e4d3cc208
|
/script/corr.R
|
c08e2b136146111ed0f07c6a8f574af63c18282f
|
[] |
no_license
|
JonathanRyanW/R_Programming_Quiz2
|
b6b8faca6600ebf71cb5586a6f9250eb6d0e63b2
|
3017a8079b8fc623f5735dcf02bc6e66abfae1df
|
refs/heads/main
| 2023-03-05T12:31:17.324566
| 2021-02-24T00:10:23
| 2021-02-24T00:10:23
| 341,727,119
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,936
|
r
|
corr.R
|
id_into_character <- function(id){ #id is an integer vector
id_char <- c() #creating an empty vector for id in character form
for (i in seq_along(id)){
if (id[i] < 10){ #if the integer has only 1 digit
id_char <- c(id_char, paste("00", as.character(id[i]), sep = ""))
}
else if (id[i] < 100){ #if the integer has 2 digit
id_char <- c(id_char, paste("0", as.character(id[i]), sep = ""))
}
else if (id[i] <= 332) { #if the integer has 3 digit
id_char <- c(id_char, as.character(id[i]))
}
}
return(id_char)
}
url_list <- function(id){ # id is an integer vector
id <- id_into_character(id) #turning id into a character vector
url <- c() #creating an empty vector for the urls
for (i in seq_along(id)){
url <- c(url, paste("./specdata/", id[i], ".csv",sep = ""))
}
return(url)
}
complete <- function(directory, id){
nobs <- c() #creating an empty vector to store complete cases count
for (i in seq_along(id)){
data <- read.csv(url_list(id)[i])
complete_cases <- !is.na(data$nitrate) & !is.na(data$sulfate)
nobs <- c(nobs, sum(complete_cases))
}
return(as.data.frame(cbind(id, nobs)))
}
df <- complete(specdata, 1:332)
corr <- function(directory, threshold = 0){
above_threshold <- which(df$nobs > threshold) #location of data above threshold
above_threshold <- id_into_character(above_threshold)
correlation <- c()
for (i in seq_along(above_threshold)){ #obtaining all the correlations
data <- read.csv(paste("./specdata/", above_threshold[i], ".csv",sep = ""))
complete_cases <- !is.na(data$nitrate) & !is.na(data$sulfate)
correlation <- c(correlation, cor(data$sulfate[complete_cases],
data$nitrate[complete_cases]))
}
#printing the correlations
if (length(correlation) > 0){
return(correlation)
}
if (length(correlation) == 0){
return(as.numeric(correlation))
}
}
|
857c45e446ed605298acdf51d1c2b0e332b9d377
|
32e0458f7a034d1bbc63b2e251ed485c8672fc53
|
/man/pcc.Rd
|
fe1aa7a8b031a12091cda0b1e6aa142700695993
|
[] |
no_license
|
matthewwolak/nadiv
|
8ac285b4d5d5e1de558b3de9019db1c81bdd6bce
|
4d60f7c2a71149780c0cd33aee2b7735e8650619
|
refs/heads/master
| 2023-08-02T13:14:04.450579
| 2023-06-16T02:00:38
| 2023-06-16T02:00:38
| 33,896,065
| 16
| 7
| null | 2023-06-16T02:00:39
| 2015-04-13T21:52:53
|
R
|
UTF-8
|
R
| false
| true
| 2,463
|
rd
|
pcc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/remlSupport.R
\name{pcc}
\alias{pcc}
\title{REML convergence checks}
\usage{
pcc(object, traces = NULL, tol = 0.01, silent = FALSE)
}
\arguments{
\item{object}{A list with at least one element named: \code{monitor} (see
Details)}
\item{traces}{Optionally, a matrix to substitute instead of the monitor
element to \code{object}. Each row corresponds to a different variance
component in the model and each column is a different iteration of the
likelihood calculation (column 1 is the first iterate).}
\item{tol}{The tolerance level for which to check against all of the changes
in variance component parameter estimates}
\item{silent}{Optional argument to silence the output of helpful (indicating
default underlying behavior) messages}
}
\value{
Returns \code{TRUE} if all variance parameters change less than the
value specified by \code{tol}, otherwise returns \code{FALSE}. Also see the
\code{details} section for other circumstances when \code{FALSE} might be
returned.
}
\description{
Mainly checks to ensure the variance components in a REML mixed model do not
change between the last two iterations more than what is allowed by the
tolerance value. See details for extra check on asreml-R models.
}
\details{
Object is intended to be an asreml-R model output. NOTE, The first 3 rows
are ignored and thus should not be variance components from the model (e.g.,
they should be the loglikelihood or degrees of freedom, etc.). Also, the
last column is ignored and should not be an iteration of the model (e.g., it
indicates the constraint).
The function also checks \code{object} to ensure that the output from the
asreml-R model does not contain a log-likelihood value of exactly 0.00. An
ASReml model can sometimes fail while still returning a \code{monitor}
object and \code{TRUE} value in the \code{converge} element of the output.
This function will return \code{FALSE} if this is the case.
}
\examples{
# Below is the last 3 iterations from the trace from an animal model of
# tait1 of the warcolak dataset.
# Re-create the output from a basic, univariate animal model in asreml-R
tracein <- matrix(c(0.6387006, 1, 0.6383099, 1, 0.6383294, 1, 0.6383285, 1),
nrow = 2, ncol = 4, byrow = FALSE)
dimnames(tracein) <- list(c("ped(ID)!ped", "R!variance"), c(6, 7, 8, 9))
pcc(object = NULL, trace = tracein)
}
\author{
\email{matthewwolak@gmail.com}
}
|
d114468fec6da1ad8fab1f5bc9b294312ffe3358
|
73ad0d1d6afe0beb099efd6817e2136326f20991
|
/AutoCorrelation.R
|
657dacce3698f51ee13533824975cca82ee6af27
|
[] |
no_license
|
prabasiva/cchaos
|
7edce0367b53e685ff642dadee3b38adb140af5a
|
07a399abaf559cb1bbecc9405b491a2e64c547ed
|
refs/heads/master
| 2021-06-02T14:07:39.186551
| 2018-01-09T15:29:37
| 2018-01-09T15:29:37
| 67,543,385
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,052
|
r
|
AutoCorrelation.R
|
ac<-function()
{
#Program used to create AutoCorrelation Analysis for sample, SP500 & NASDAQ
#Filename:AutoCorrelation.R
# Praba Siva
# praba@umich.edu
# @prabasiva
library(mFilter);
library(latex2exp)
setwd("/Users/sivasp1/Documents/2016/Personal/Praba/MATH599/program")
fspcom=read.table('fspcom.dat')
dat = (fspcom[,5])
mort=log(dat)
year=fspcom[,2]+1/12*fspcom[,3]
le=length(dat)
x=mort[2:le]
y=mort[1:le-1]
diffxy=x-y
#plot(diffxy,type='l')
dur=1:length(year)
lmr=lm(mort~dur)
intercept=coef(lmr)[1]
slope=coef(lmr)[2]
dftrend=intercept+slope*dur
dfcycle=mort-dftrend
dfacf=acf(dfcycle,plot=FALSE,100);
hpf=hpfilter(mort,freq=14400)
layout(matrix(c(1,2,3,4), 4,1, byrow = TRUE))
color={'blue'}
ac1=acf(hpf$cycle, ci.type = "ma",plot=FALSE,100)
plot(year,mort,main='Log SP500 index',
xlab='Year',ylab=TeX('log (SP500(t))'),
type='l',cex.axis=1.1,cex.lab=1.1,lwd=3,col='red');
bc1=acf(diffxy,ci.type="ma",plot=FALSE,100)
plot(ac1,main='Autocorrelation of log SP500 HP Cycles'
,xlab='Lag',ylab='AC(1)')
lines(ac1$lag,ac1$acf,main='Autocorrelation of log SP500 HP Cycles',
xlab='Lag',ylab='AC(1)',type='l',
col='blue',cex.axis=1.1,cex.lab=1.1,lwd=3)
plot(bc1,main='Autocorrelation of log SP500 FD ',
xlab='Lag',ylab='AC(1)')
lines(bc1$lag,bc1$acf,main='Autocorrelation of log SP500 FD',
xlab='Lag',ylab='AC(1)',type='l',col='blue',lwd=3)
plot(dfacf,main='Autocorrelation of log-linear SP500 ',
xlab='Lag',ylab='AC(1)')
lines(dfacf$lag,dfacf$acf,main='Autocorrelation of log-linear SP500 ',
xlab='Lag',ylab='AC(1)',type='l',
col='blue',lwd=3)
layout(matrix(c(1,2), 2,1, byrow = TRUE))
plot(year,mort,main='Log SP500 index',
xlab='Year',ylab=TeX('log (SP500(t))'),
type='l',cex.axis=1.1,cex.lab=1.1,lwd=3,col='red');
lines(year,dftrend,main='Trend of Log SP500 index using Log-linear',
xlab='Year',ylab=TeX('log-linear(SP500(t))'),
type='l',cex.axis=1.1,cex.lab=1.1,lwd=3,col='blue');
legend("bottomright",c("Trend"),lty=c(1),lwd=c(2.5),col=c("blue"))
plot(year,dfcycle,main='Cycle of Log SP500 index using Log-linear',
xlab='Year',ylab=TeX('log-linear(SP500(t))'),
type='l',cex.axis=1.1,cex.lab=1.1,lwd=3,col='green');
layout(matrix(c(1,2), 2,1, byrow = TRUE))
plot(year,mort,main='Log SP500 index',
xlab='Year',ylab=TeX('log (SP500(t))'),
type='l',cex.axis=1.1,cex.lab=1.1,lwd=3,col='red');
plot(year[1:length(diffxy)],diffxy,
main='Cycle of Log SP500 index using Log-linear trend',
xlab='Year',ylab=TeX('log-linear(SP500(t))'),type='l',
cex.axis=1.1,cex.lab=1.1,lwd=3,col='green');
sta.sp500=list(mean(dfacf$acf),sd(dfacf$acf),var(dfacf$acf),corrlength(dfacf),
mean(ac1$acf),sd(ac1$acf),var(ac1$acf),corrlength(ac1),
mean(bc1$acf),sd(bc1$acf),var(bc1$acf),corrlength(bc1));
layout(matrix(c(1,2,3,4), 4,1, byrow = TRUE))
setwd("/Users/sivasp1/Documents/2016/Personal/Praba/MATH599/program")
dat <- read.csv(file="nasdaq_ready.csv",head=TRUE,sep=",")
year=dat[,1]+1/12*dat[,2]
dat=dat[,3]
mort=log(dat)
le=length(dat)
x=mort[2:le]
y=mort[1:le-1]
diffxy=x-y
dur=1:length(year)
lmr=lm(mort~dur)
intercept=coef(lmr)[1]
slope=coef(lmr)[2]
dftrend=intercept+slope*dur
dfcycle=mort-dftrend
dfacf=acf(dfcycle,plot=FALSE,100);
hpf=hpfilter(mort,freq=14400)
ac1=acf(hpf$cycle, ci.type = "ma",plot=FALSE,100)
bc1=acf(diffxy,ci.type="ma",plot=FALSE,100)
layout(matrix(c(1,2), 2,1, byrow = TRUE))
plot(year,mort,main='Log NASDAQ index',
xlab='Year',ylab=TeX('log (NASDAQ(t))'),
type='l',cex.axis=1.1,cex.lab=1.1,lwd=3,col='red');
lines(year,dftrend,main='Trend of Log NASDAQ index using Log-linear',
xlab='Year',ylab=TeX('log-linear(NASDAQ(t))'),
type='l',cex.axis=1.1,cex.lab=1.1,lwd=3,col='blue');
legend("bottomright",c("Trend"),lty=c(1),lwd=c(2.5),col=c("blue"))
plot(year,dfcycle,main='Cycle of Log NASDAQ index using Log-linear',
xlab='Year',ylab=TeX('log-linear(NASDAQ(t))'),
type='l',cex.axis=1.1,cex.lab=1.1,lwd=3,col='green');
layout(matrix(c(1,2), 2,1, byrow = TRUE))
plot(year,mort,main='Log NASDAQ index',
xlab='Year',ylab=TeX('log (NASDAQ(t))'),
type='l',cex.axis=1.1,cex.lab=1.1,lwd=3,col='red');
plot(year[1:length(diffxy)],diffxy,
main='Cycle of Log NASDAQ index using Log-linear trend',
xlab='Year',ylab=TeX('log-linear(NASDAQ(t))'),type='l',
cex.axis=1.1,cex.lab=1.1,lwd=3,col='green');
layout(matrix(c(1,2,3,4), 4,1, byrow = TRUE))
plot(year,mort,main='Log NASDAQ index',
xlab='Year',ylab=TeX('log (NASDAQ(t))'),type='l',
col='red',cex.axis=1.1,cex.lab=1.1,lwd=3);
plot(ac1,main='Autocorrelation of log NASDAQ HP Cycles',
xlab='Lag',ylab='AC(1)')
lines(ac1$lag,ac1$acf,main='Autocorrelation of log NASDAQ HP Cycles',
xlab='Lag',ylab='AC(1)',type='l',
col='blue',cex.axis=1.1,cex.lab=1.1,lwd=3)
plot(bc1,main='Autocorrelation of log NASDAQ FD ',
xlab='Lag',ylab='AC(1)')
lines(bc1$lag,bc1$acf,main='Autocorrelation of log NASDAQ FD',
xlab='Lag',ylab='AC(1)',type='l',
col='blue',cex.axis=1.1,cex.lab=1.1,lwd=3)
plot(dfacf,main='Autocorrelation of log-linear NASDAQ ',
xlab='Lag',ylab='AC(1)')
lines(dfacf$lag,dfacf$acf,main='Autocorrelation of log-linear NASDAQ ',
xlab='Lag',ylab='AC(1)',type='l',col='blue',lwd=3)
layout(matrix(c(1,2,3,4,5,6), 3, 2, byrow = TRUE))
#par(mfrow=c(2,1),mar=c(3,3,2,1),cex=.8)
x=seq(-15,15,.1);
y=sin(x)
ac1=acf(y,lag.max=100,plot=FALSE);
plot(x,y,main='Sin wave',xlab='T',ylab='Sin(t)',type='l',
col='red',cex.axis=1.1,cex.lab=1.1,lwd=3)
plot(ac1,main='Autocorrelation of Sin wave',xlab='Lag',ylab='AC(1)',
cex.axis=1.1,cex.lab=1.1,lwd=.2)
lines(ac1$lag,ac1$acf,type='l',col='blue',lwd=2)
x=seq(-15,15,.1);
y=x^2+x^3
ac1=acf(y,lag.max=100,plot=FALSE);
plot(x,y,main='Polynomial',
xlab='T',ylab=TeX('y=x^3(t)+x^2(t)'),type='l',col='red',
cex.axis=1.1,cex.lab=1.1,lwd=3)
plot(ac1,main='Autocorrelation of Polynomial',
xlab='Lag',ylab='AC(1)',cex.axis=1.1,cex.lab=1.5,lwd=.2)
lines(ac1$lag,ac1$acf,type='l',col='blue',lwd=2)
x=seq(-15,15,.1);
y=sin(x)*rnorm(length(x),mean=0,sd=1)
ac1=acf(y,lag.max=100,plot=FALSE);
plot(x,y,main='Sin wave with random noise',
xlab='t',
ylab=TeX('Sin(t) * r($\\mu=0 ,\\sigma^2=1)'),type='l',col='red',
cex.axis=1.1,cex.lab=1.1,lwd=2)
plot(ac1,main='Autocorrelation of Sin wave with random noise',
xlab='Lag',ylab='AC(1)',cex.axis=1.1,cex.lab=1.5,lwd=3)
lines(ac1$lag,ac1$acf,type='l',col='blue',lwd=.2)
corrlength(ac1)
sta.nasdaq=list(mean(dfacf$acf),sd(dfacf$acf),var(dfacf$acf),corrlength(dfacf),
mean(ac1$acf),sd(ac1$acf),var(ac1$acf),corrlength(ac1),
mean(bc1$acf),sd(bc1$acf),var(bc1$acf),corrlength(bc1))
print("Dtrend statistics for SP500")
print(matrix(sta.sp500,nrow=4))
print("Dtrend statistics for NASDAQ")
print(matrix(sta.nasdaq,nrow=4))
}
corrlength <- function(acfvector)
{
ind=min(which(acfvector$acf<0));
return ((abs(acfvector$acf[ind])+abs(acfvector$acf[ind-1])/10)*(abs(acfvector$acf[ind-1]))+ind-1)
}
|
a62df1774fd2dfdf48bf9192dd0b472cb0831f51
|
2077b291538c221a6f2ceb36849d0d0630a5fb26
|
/INF-0612/aula3/aula3.R
|
1014c7ae30bbd8691ff554b24289bbb8c524ae15
|
[] |
no_license
|
arthurlustosa/MDC-Unicamp
|
44ba0967019a22d375200cf070161a5f906c82fb
|
edaec371f16a7aaa661da6b87852d216ffb1c763
|
refs/heads/master
| 2021-02-27T21:06:20.458374
| 2020-03-07T14:04:52
| 2020-03-07T14:04:52
| 245,635,733
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,880
|
r
|
aula3.R
|
# Definindo Funcoes
mysum <- function(a, b) {
a + b
}
makePower <- function(n) {
function(x) {
x^n
}
}
square <- makePower(2)
cube <- makePower(3)
subvector <- function(vector, begin = 1, end = length(vector)) {
return(vector[begin:end])
}
mydist <- function(x = c(0, 0), y = c(0, 0)) {
sqrt((x[1] - y[1])^2 + (x[2] - y[2])^2)
}
truncd <- function(n, d = 0) {
trunc(n * 10^d) / 10^d
}
# Comandos Condicionais
odd <- function(x) {
if (x %% 2 == 1) {
TRUE
} else {
FALSE
}
}
odd <- function(x) {
ifelse(x %% 2 == 1, TRUE, FALSE)
}
odd <- function(x) {
x %% 2 == 1
}
myabs <- function(a) {
if (a < 0) {
-a
} else {
a
}
}
bhaskara <- function(a = 0, b = 0, c = 0) {
if (a != 0) {
delta <- as.complex(b^2 - 4*a*c)
if (delta != 0) {
c((-b + sqrt(delta)) / (2 * a), (-b - sqrt(delta)) / (2 * a))
} else {
-b / (2 * a)
}
} else {
-c / b
}
}
# Comandos de Repeticao
printVector <- function(v) {
i <- 1
while(i <= length(v)) {
print(v[i])
i <- i + 1
}
}
printVector <- function(v) {
for (i in v) {
print(i)
}
}
mysum <-function(...) {
x <- 0
for (i in c(...)) {
x <- x + i
}
return(x)
}
mylength <- function(vector) {
k <- 0
for (i in vector) {
k <- k + 1
}
return(k)
}
mylength <- function(...) {
k <- 0
for (i in c(...)) {
k <- k + 1
}
return(k)
}
multlength <- function(...) {
result <- NULL
for (i in list(...)) {
result <- c(result, length(i))
}
return(result)
}
multlength(25:30, matrix(1:12, 3, 4), rnorm(5), sample(10))
mymin <- function(...) {
min <- Inf
for (i in c(...)) {
if (i < min) {
min <- i
}
}
return(min)
}
mymin <- function(...) {
min <- Inf
if (missing(...)) {
warning("missing arguments; returning Inf")
} else {
for (i in c(...)) {
if (i < min) {
min <- i
}
}
}
return(min)
}
subset <- function(set1, set2) {
all(is.element(set1, set2))
}
subset <- function(set1, set2) {
for (elem in set1) {
if (!is.element(elem, set2)) {
return(FALSE)
}
}
return(TRUE)
}
index <- function(vector, element) {
n <- length(vector)
result <- NULL
for (i in 1:n) {
if (vector[i] == element) {
result <- c(result, i)
}
}
result
}
# lapply
L <- list(a = 25:30,
b = matrix(1:6, 2, 3),
c = rnorm(5),
d = sample(10))
lapply(L, mean)
lapply(2:4, runif)
lapply(2:4, runif, min = 0, max = 10)
lapply(datasets::faithful, max)
lapply(faithful, min)
lapply(faithful, function(x) {max(x) - min(x)})
# sapply
sapply(L, mean)
sapply(L, range)
lapply(faithful, range)
lapply(faithful, quantile)
sapply(faithful, quantile)
# apply
m <- matrix(sample(12), nrow = 3, ncol = 4); m
apply(m, 1, min)
apply(m, 2, max)
m <- matrix(sample(8))
dim(m) <- c(2, 2, 2); m
apply(m, 1, mean)
apply(m[ , , 1], 1, mean)
apply(m, 2, mean)
apply(m[ , , 2], 2, mean)
total <- sum(datasets::HairEyeColor); total
apply(HairEyeColor, 1, sum) / total
apply(HairEyeColor, 2, sum) / total
apply(HairEyeColor, 3, sum) / total
# mapply
mapply(rep, 1:3, 5:3)
mapply("^", 1:6, 2:3)
tipo1 <- sample(10:99, 10); tipo1
tipo2 <- sample(10:99, 10); tipo2
tipo3 <- sample(10:99, 10); tipo3
tipo4 <- sample(10:99, 10); tipo4
mapply(min, tipo1, tipo2, tipo3, tipo4)
mapply(max, tipo1, tipo2, tipo3, tipo4)
# tapply
x <- c(rnorm(100), runif(100), sample(100))
f <- gl(n = 3, k = 100,
labels = c("norm", "unif", "sample"))
tapply(x, f, range)
s <- sample(length(x))
df <- data.frame(x[s], f[s])
tapply(df$x, df$f, range)
tapply(datasets::mtcars$mpg,
datasets::mtcars$cyl, mean)
tapply(mtcars$qsec, mtcars$cyl, mean)
tapply(mtcars$hp, mtcars$vs, mean)
qfactor <- function(vector) {
q <- quantile(vector)
result <- NULL
for (i in vector)
if (i <= q["25%"])
result <- c(result, "q1")
else if (i <= q["50%"])
result <- c(result, "q2")
else if (i <= q["75%"])
result <- c(result, "q3")
else result <- c(result, "q4")
return(as.factor(result))
}
tapply(mtcars$mpg, qfactor(mtcars$hp), mean)
tapply(mtcars$mpg, qfactor(mtcars$qsec), max)
tapply(mtcars$hp, qfactor(mtcars$mpg), mean)
tapply(datasets::Loblolly$height,
datasets::Loblolly$age, min)
tapply(Loblolly$height, Loblolly$age, mean)
tapply(Loblolly$height, Loblolly$age, max)
tapply(datasets::airquality$Temp,
datasets::airquality$Month, mean)
tapply(airquality$Solar.R, airquality$Month, mean, na.rm = TRUE)
tapply(airquality$Ozone, airquality$Month, mean, na.rm = TRUE)
tapply(datasets::iris$Petal.Length,
datasets::iris$Species, mean)
tapply(iris$Petal.Width, iris$Species, mean)
tapply(iris$Petal.Length / iris$Petal.Width, iris$Species, mean)
tapply(iris$Petal.Length, iris$Species, summary)
simplify2array(tapply(iris$Petal.Length, iris$Species, summary))
|
cf3822253d433ac548b68b7925b8c848af00b1cf
|
4024b9299759390486555af4f8211e8544a2ac50
|
/run.analysis.R
|
cec6f7364ef2f2e43efe4e379407922f62f98e74
|
[] |
no_license
|
JasonSklikas/Getting-and-cleaning-data-project
|
a888b126be741fb8246a50ab9e0d7b0df0567bc1
|
fd2ceeecc8b5c244438c8ec3ae87f68d58bdfb79
|
refs/heads/master
| 2021-06-08T05:55:37.673805
| 2016-11-01T15:47:51
| 2016-11-01T15:47:51
| 72,547,090
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,622
|
r
|
run.analysis.R
|
#download and unzip the file.
if(!file.exists("./Project")){dir.create("./Project")}
url<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url,destfile = "./Project/dataset.zip")
unzip("dataset.zip")
#1.Merges the training and the test sets to create one data set.
#Load the train info
x_train<- read.table("train/x_train.txt")
y_train<- read.table("train/y_train.txt")
trainsubject<- read.table("train/subject_train.txt")
#load the test info
x_test<-read.table("test/x_test.txt")
y_test<-read.table("test/y_test.txt")
testsubject<-read.table("test/subject_test.txt")
#Create the data sets
xdata<- rbind(x_train,x_test)
ydata<- rbind(y_train,y_test)
subjectdata<- rbind(trainsubject,testsubject)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# #load the features
features<- read.table("features.txt")
#Extract only the mean and std
mean_n_std_features<- grep(".*mean.*|.*std.*", features[,2])
##Tidy x_data, set column names
xdata<- xdata[, mean_n_std_features]
names(xdata) <-features[mean_n_std_features,2]
# 3. Uses descriptive activity names to name the activities in the data set
#Load activities and tidy them
activities<- read.table("activity_labels.txt")
activities[,2]<- gsub("_"," ", tolower(as.character(activities[,2])))
names(xdata) <- gsub("-mean","mean",names(xdata)) #Remove -mean
names(xdata) <- gsub("-std","std",names(xdata)) #Remove -std
names(xdata) <- gsub("[()-]","",names(xdata))
names(xdata) <- tolower(names(xdata))
names(xdata) <- gsub("^t","time",names(xdata))
names(xdata) <- gsub("^f","frequency",names(xdata))
names(xdata) <- gsub("acc","accelerometer",names(xdata))
names(xdata) <- gsub("gyro","gyroscope",names(xdata))
names(xdata) <- gsub("bodybody","body",names(xdata))
names(xdata) <- gsub("mag","magnitude",names(xdata))
#replace 1-6 into the activities names,so ydata now has as values the activity names
ydata[,1]<-activities[ydata[,1],2]
#Set the column names activity and subject
names(ydata)<- "activity"
names(subjectdata)<-"subject"
#Create the final data set
clean_data<- cbind(subjectdata,ydata,xdata)
write.table(clean_data, "clean_data.txt",row.name=FALSE)
# 5. Creates a 2nd, independent tidy data set with the average of each variable for each activity and each subject.
library(plyr)
data2<-aggregate(.~subject+activity,clean_data,mean)
data2<-data2[order(data2$subject,data2$activity), ]
Average_data<-data2
write.table(Average_data,file = "tidy_data_average.txt", row.name=FALSE)
|
0ff96593f97023e332eb6b8c315d83cb00bb71ff
|
d7746351cdbf75c90e1fbcfbe337da50eb6dc1b1
|
/man/disp2D-package.Rd
|
45d323e191579dd4a1792f3911932371060ae75d
|
[] |
no_license
|
cran/disp2D
|
7295625024c1dd59936b33ef0bbc355994adcb61
|
e2927c57f1a38ad405a07b165a7070e53773ca02
|
refs/heads/master
| 2021-01-23T15:03:27.066160
| 2012-05-24T00:00:00
| 2012-05-24T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,953
|
rd
|
disp2D-package.Rd
|
\name{disp2D-package}
\alias{disp2D-package}
\alias{disp2D}
\docType{package}
\title{ Hausdorff and Simplex Dispersion orderings }
\description{
Given a 2D point set, different three point sets are selected.
The Hausdorff distances between the convex hulls are calculated
exactly.
}
\details{
\tabular{ll}{
Package: \tab disp2D\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2012-05-24\cr
License: \tab GPL-2\cr
LazyLoad: \tab yes\cr
}
}
\author{
Guillermo Ayala <Guillermo.Ayala@uv.es>
Maintainer: Guillermo Ayala
}
\references{
Ayala G. and Lopez M. The simplex dispersion ordering and its
application to the evaluation of human corneal endothelia. Journal of
Multivariate Analysis, 100:1447-1464, 2009.
G. Ayala, M.C. Lopez-Diaz, M. Lopez-Diaz, and
L. Martinez-Costa. Studying hypertension in ocular fundus images
using Hausdorff dispersion ordering.
Mathematical Medicine and Biology: A journal of the IMA, 2011.
Miguel Lopez-Diaz. An indexed multivariate dispersion ordering based
on the Hausdorff distance. Journal of Multivariate Analysis,
97(7):1623 - 1637, 2006.
G. Ayala, M.C. Lopez-Diaz, M. Lopez-Diaz and
L. Martinez-Costa. Methods and algorithms to test the simplex
and Hausdorff dispersion orders with a simulation study and an
Ophthalmological application. Technical Report. 2012
}
\keyword{ package }
\examples{
library(disp2D)
library(geometry)
library(mvtnorm)
sigma1 = matrix(c(0.912897,1.092679,1.092679,1.336440),byrow=TRUE,ncol=2)
sigma2 = sigma1 + diag(1,ncol=2,nrow=2)
A = rmvnorm(200,mean=rep(0,2),sigma=sigma1)
B = rmvnorm(200,mean=rep(0,2),sigma=sigma2)
r=.1
prob = probA = probB = rep(1/200,200)
HA = exactHausdorff(A,probA,r)
HB = exactHausdorff(B,probB,r)
plot(HA$distance, cumsum(HA$probability), type = "l", xlab = "",
ylab = "DF", xlim = range(c(HA,HB)))
lines(HB$distance, cumsum(HB$probability), lty = 2)
d1 = simplex(A,bootstrap=TRUE,nresamples=100)
}
|
c2828e6faca563891665787e19741cbea4a467de
|
52f95b07a1d460d90350d5dced856363d96b5aa0
|
/Data Analysis 3 test file.R
|
e2f67ced77525e4e083634edf93cbd3a4d9214e8
|
[] |
no_license
|
twgg201/datan3_2019
|
16122d6f5a7271ba6932c677db97855f2a87e877
|
1a1c833eacce1cde029ad8aa504ae9d2983d1a39
|
refs/heads/master
| 2020-04-17T04:57:58.115699
| 2019-01-17T16:37:47
| 2019-01-17T16:37:47
| 166,255,456
| 0
| 0
| null | 2019-01-17T16:12:30
| 2019-01-17T16:12:30
| null |
UTF-8
|
R
| false
| false
| 26
|
r
|
Data Analysis 3 test file.R
|
#data analysis 3 test file
|
84242d78e6522449036c65c40e66d589134c475c
|
1be41b26870593e2c1320bfa7a5a8a207f7ef9ba
|
/dataset_splits/stratify.R
|
d3ec57712d9d49caf72b97db363a8b35c9a2d7ce
|
[] |
no_license
|
NUNLP/AD_modeling
|
a133f1f4c53ec3c320bdbdbb0b53e4e9a84436de
|
3323ce527aa8ec8a61d4a6e1dfd78df49ea9cadd
|
refs/heads/master
| 2020-12-06T17:22:07.722750
| 2017-06-30T20:30:00
| 2017-06-30T20:30:00
| 95,596,854
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,715
|
r
|
stratify.R
|
library(dplyr)
library(magrittr)
strat_sample <- function(data, gr_variab, tr_percent, seed) {
stopifnot(tr_percent > 0 & tr_percent < 1)
if(require(dplyr) & require(magrittr)) {
if (!missing(seed)) set.seed(seed)
names0 <- names(data)
gr_variab <- which(names0 == gr_variab)
names(data) <- make.unique(c("n", "tRows", "SET", names0))[-(1:3)]
gr_variab <- names(data)[gr_variab]
data %<>%
sample_frac %>%
group_by_(gr_variab) %>%
mutate(n = n(), tRows = round(tr_percent * n))
data %<>%
mutate(SET = ifelse(row_number() <= tRows, "Train", "Test")) %>%
select(-n, -tRows) %>%
ungroup
names(data) <- make.unique(c(names0, "SET"))
data
}
}
extract_set <- function(data, whichSET) {
stopifnot(is.element(whichSET, c("Train", "Test")))
if (require(dplyr)) {
variab <- names(data)[ncol(data)]
condit <- get(variab, data) == whichSET
data %>%
filter_(~ condit) %>%
select_(paste0("-", variab))
}
}
## example ##
#n <- 1e+5
#set.seed(386)
#Df <- data.frame(V1 = rnorm(n),
# V2 = rt(n, df = 4),
# V3 = rpois(n, lambda = 1),
# y = sample(letters[1:4], n, replace = T,
# prob = c(.33, .33, .33, .01)))
#groups <- strat_sample(Df, "y", .75)
#with(groups, prop.table(table(y, SET), 1))
# a tibble
#extract_set(groups, "Train")
#extract_set(groups, "Test")
#samples <- strat_sample(dat_grouped_codes, "label", 0.8)
#with(samples, prop.table(table(label, SET), 1)) # check!
#train_set <- extract_set(samples, "Train")
#test_set <- extract_set(samples, "Test")
|
da41109b6dfb0c6f85b2ebf495b9faf84ee1ac08
|
e40d274ff6b9bd7e7f20998379f483543582c81f
|
/apputils/man/update_toastr_css.Rd
|
79ee44119690116ec68ef58557a33c84d82fdcaf
|
[
"MIT"
] |
permissive
|
ua-snap/snap-r-tools
|
5be2dcc5171cf7289504f20e98ad3ec603e4ed57
|
c3f573c2abf11633b5262c4d98cfbde39854dbf4
|
refs/heads/master
| 2020-03-22T05:57:29.239067
| 2019-01-08T03:11:17
| 2019-01-08T03:11:17
| 139,602,296
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,151
|
rd
|
update_toastr_css.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apputils.R
\name{update_toastr_css}
\alias{update_toastr_css}
\title{Update shinytoastr css}
\usage{
update_toastr_css(container = NULL, toast = NULL, rgba = NULL,
hover.rgba = NULL, opacity = NULL, hover.opacity = NULL,
radius = NULL, position = "top-center")
}
\arguments{
\item{container}{list of style arguments for the container div. See details and example.}
\item{toast}{list of style arguments for the toast. See details and example.}
\item{rgba}{numeric, vector of four css rgba property values for background color, e.g., \code{c(0, 0, 0, 0.5)}. See details.}
\item{hover.rgba}{numeric, vector of four css rgba property values for background color on mouse hover. See details.}
\item{opacity}{numeric, toast opacity. Appended to \code{container}.}
\item{hover.opacity}{numeric, toast opacity on mouse hover.}
\item{radius}{character, border radius, e.g., \code{"0px"}.}
\item{position}{character, defaults to \code{"top-center"}.}
}
\value{
an html style tag.
}
\description{
Update toast css from shinytoastr package.
}
\details{
\code{apputils} already contains some toastr css overrides (loaded via \code{use_apputils}).
This function allows for injecting additional or different css overrides for a specific toast container
that may not already be as specified by \code{apputils}. This is typically used to adjust the app intro toast,
hence the default for \code{position} is \code{"top-center"}.
Note that list names and values may be quoted if necessary. See example.
Should be familiar with source toastr css in addition to running the example
in order to understand which elements apply to \code{container} vs. \code{toast}.
If wanting to keep text fully opaque in the toast while using semi-transparency,
especially useful when adding a background image, use css rgba instead of opacity.
\code{rgba} and\code{hover.rgba} nullify opacity arguments if both are provided, respectively.
}
\examples{
update_toastr_css(
list('overflow-y' = 'auto', width = '70\%', height = '700px'),
list(top = '100px', margin = '0 auto', left = '115px')
)
}
|
61df754e3cdb802482e174335607cce9fcaa8b79
|
ab0ec24c7b111c5b333cc5b1f459f56db7adb5d6
|
/plot2.R
|
3ad9ac68f8b8f8ecacbcfaaba2e55cd9b1a289e4
|
[] |
no_license
|
RichardSobota/ExData_Plotting1
|
d8a2572c88c998a18f00bfd47fa4cb9c36b381de
|
181cf87f869b920f3417fdd31b0b4caceb3d7afd
|
refs/heads/master
| 2021-01-22T11:03:23.080679
| 2015-02-08T19:30:12
| 2015-02-08T19:30:12
| 28,894,790
| 0
| 0
| null | 2015-01-07T02:41:43
| 2015-01-07T02:41:42
| null |
UTF-8
|
R
| false
| false
| 1,213
|
r
|
plot2.R
|
##
## plot2.R
##
## Written by Richard Sobota as part of programming assignment
## in Exploratory Data Analysis course.
##
## Function uses packages dplyr and lubridate.
##
plot2 <- function() {
## read data from file
csv.data <- read.csv("household_power_consumption.txt",
sep=";",
na.strings=c("?"),
stringsAsFactors=FALSE)
## use PNG file as graphics device
png(file = "plot2.png")
## convert data to table
tbl_df(csv.data) %>%
## convert date from string to Date
mutate(Date = dmy(Date)) %>%
## select required interval
filter(Date >= dmy("01/02/2007"), Date <= dmy("02/02/2007")) %>%
## add datetime
mutate(datetime = Date + hms(Time)) %>%
## convert data to numbers where needed
mutate(Global_active_power = as.numeric(Global_active_power)) %>%
## draw plot
with(plot(datetime,
Global_active_power,
type="l",
main="",
ylab="Global Active Power (kilowatts)",
xlab=""))
dev.off()
}
|
5f55a8b763474c93d54a0372e128f119470e5030
|
f15700323e86bd4cc22886fe0329757c9bf7fc34
|
/man/lre_auto_bk.Rd
|
88eca49a9ee4f2b6c12317efce9f61c871c7e711
|
[] |
no_license
|
kenjisato/lrem
|
699fac35a3f6d5d77801dd77d62ddc1608389d50
|
33068384970231d169704108866c1b9de369059c
|
refs/heads/master
| 2020-05-07T12:19:28.330011
| 2017-06-12T04:16:32
| 2017-06-12T04:16:32
| 180,499,821
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 709
|
rd
|
lre_auto_bk.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/algorithm-bk.R
\name{lre_auto_bk}
\alias{lre_auto_bk}
\title{LRE solution method based on Blanchard and Kahn (1980, ECTA)}
\usage{
lre_auto_bk(A, nx)
}
\arguments{
\item{A}{Square matrix}
\item{nx}{The number of predetermined variables, \code{nx} is required
by the algorithm.}
}
\value{
List of two functions (g, h), passed to \code{\link{simulate}}
}
\description{
This function solves for a linear policy function for the Linear Rational
Expectations model of \deqn{(x_{t+1}, y_{t+1}) = A (x_{t}, y_{t})}{
(x_{t+1}, y_{t+1}) = A (x_{t}, y_{t})}, where x and y are predetermined and
non-predetermined variables, respectively.
}
|
733b24a9f044ac090d9c3f9cd63449fcf81c930e
|
0ac12d92c092548517fd4c3f8bcb2849567b290a
|
/man/create_seurat_obj.Rd
|
e4f99e16efccb5fc3c8eaec0e02d803fbd4be524
|
[
"MIT"
] |
permissive
|
igordot/scooter
|
9422ea52ab634870ddac1bb4ce435f10057f0207
|
cff289d15e8b2ac7704ade6e1c962e7bf0c1f674
|
refs/heads/master
| 2023-02-09T14:04:13.117319
| 2023-01-23T23:19:20
| 2023-01-23T23:19:20
| 145,920,527
| 5
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 776
|
rd
|
create_seurat_obj.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import.R
\name{create_seurat_obj}
\alias{create_seurat_obj}
\title{Create a new Seurat object from a matrix.}
\usage{
create_seurat_obj(
counts_matrix,
assay = "RNA",
min_cells = 1,
min_genes = 1,
log_file = NULL,
project = "proj"
)
}
\arguments{
\item{counts_matrix}{A matrix of raw counts.}
\item{assay}{Seurat assay to add the data to.}
\item{min_cells}{Include genes/features detected in at least this many cells.}
\item{min_genes}{Include cells where at least this many genes/features are detected.}
\item{log_file}{Filename for the logfile.}
\item{project}{Project name for Seurat object.}
}
\value{
Seurat object.
}
\description{
Create a new Seurat object from a matrix.
}
|
b988a51671653364a8c3cc38ec448e91af5b98b5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/GpGp/examples/order_dist_to_point.Rd.R
|
c7dc2255df385823aafde6d89382eac7fd2401f9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 326
|
r
|
order_dist_to_point.Rd.R
|
library(GpGp)
### Name: order_dist_to_point
### Title: Distance to specified point ordering
### Aliases: order_dist_to_point
### ** Examples
n <- 100 # Number of locations
d <- 2 # dimension of domain
locs <- matrix( runif(n*d), n, d )
loc0 <- c(1/2,1/2)
ord <- order_dist_to_point(locs,loc0)
|
3b6676b7f66c949a58bf8607fd79d9a0e14f6389
|
447b1e30413599ff5306408b9383937f5c1bef36
|
/R/discrete-gamma-distribution.R
|
b6875d398e7ad535ef1411aa151b5ff05c2be4c1
|
[] |
no_license
|
twolodzko/extraDistr
|
874768df1d0c1af75924be8f2cc872e222c3bb6d
|
6cdbe85a98c3a34d8360b8c0ffe6eb78517e0fc3
|
refs/heads/master
| 2022-11-11T03:35:25.808964
| 2022-11-08T10:41:21
| 2022-11-08T10:41:21
| 55,365,786
| 41
| 10
| null | 2022-06-25T20:24:44
| 2016-04-03T19:51:19
|
C++
|
UTF-8
|
R
| false
| false
| 2,464
|
r
|
discrete-gamma-distribution.R
|
#' Discrete gamma distribution
#'
#' Probability mass function, distribution function and random generation
#' for discrete gamma distribution.
#'
#' @param x,q vector of quantiles.
#' @param n number of observations. If \code{length(n) > 1},
#' the length is taken to be the number required.
#' @param rate an alternative way to specify the scale.
#' @param shape,scale shape and scale parameters. Must be positive, scale strictly.
#' @param log,log.p logical; if TRUE, probabilities p are given as log(p).
#' @param lower.tail logical; if TRUE (default), probabilities are \eqn{P[X \le x]}
#' otherwise, \eqn{P[X > x]}.
#'
#' @details
#'
#' Probability mass function of discrete gamma distribution \eqn{f_Y(y)}{f}
#' is defined by discretization of continuous gamma distribution
#' \eqn{f_Y(y) = S_X(y) - S_X(y+1)}{f(y) = S(x) - S(x+1)}
#' where \eqn{S_X}{S} is a survival function of continuous gamma distribution.
#'
#' @references
#' Chakraborty, S. and Chakravarty, D. (2012).
#' Discrete Gamma distributions: Properties and parameter estimations.
#' Communications in Statistics-Theory and Methods, 41(18), 3301-3324.
#'
#' @seealso \code{\link[stats]{GammaDist}}, \code{\link{DiscreteNormal}}
#'
#' @examples
#'
#' x <- rdgamma(1e5, 9, 1)
#' xx <- 0:50
#' plot(prop.table(table(x)))
#' lines(xx, ddgamma(xx, 9, 1), col = "red")
#' hist(pdgamma(x, 9, 1))
#' plot(ecdf(x))
#' xx <- seq(0, 50, 0.1)
#' lines(xx, pdgamma(xx, 9, 1), col = "red", lwd = 2, type = "s")
#'
#' @name DiscreteGamma
#' @aliases DiscreteGamma
#' @aliases ddgamma
#'
#' @keywords distribution
#' @concept Univariate
#' @concept Discrete
#'
#' @export
ddgamma <- function(x, shape, rate = 1, scale = 1/rate, log = FALSE) {
if (!missing(rate) && !missing(scale)) {
if (abs(rate * scale - 1) < 1e-15)
warning("specify 'rate' or 'scale' but not both")
else stop("specify 'rate' or 'scale' but not both")
}
cpp_ddgamma(x, shape, scale, log[1L])
}
#' @rdname DiscreteGamma
#' @export
pdgamma <- function(q, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) {
pgamma(floor(q)+1, shape, scale = scale, lower.tail = lower.tail[1L], log.p = log.p[1L])
}
#' @rdname DiscreteGamma
#' @export
rdgamma <- function(n, shape, rate = 1, scale = 1/rate) {
floor(rgamma(n, shape, scale = scale))
}
|
aa0555c5076e3876a58bd6363bc965bf0ae7facb
|
53010da0027d6b6b7a44309d36e26010e0852681
|
/dvnorm_paper_v2.r
|
0182cfa80845dcc5730b419ad73483b631caf9b2
|
[] |
no_license
|
jhhughes256/LEN_PK
|
7575ea8e92ecf2638de2ffdf641b9c1b052c7280
|
6f19454fc314728c536e94ccd762748d603132e2
|
refs/heads/master
| 2022-01-07T14:43:38.819059
| 2019-06-14T02:22:56
| 2019-06-14T02:22:56
| 62,599,627
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,919
|
r
|
dvnorm_paper_v2.r
|
###nmprep.r
##Goal: To collate tables of missing data contained within nonclinical raw data obtained on 23rd March 2016
##Note: Based heavily off of datacheck_cyt_script2.r -> Richards code
# Remove any previous objects in the workspace
rm(list=ls(all=TRUE))
graphics.off()
# Set the working directory
master.dir <- "E:/Hughes/Data"
scriptname <- "nmprep_clin"
setwd(master.dir)
# Load libraries
library(ggplot2)
library(doBy)
library(Hmisc)
library(plyr)
library(grid)
library(reshape)
library(stringr)
library(scales)
library(cowplot)
library(gridExtra)
# Source utility functions file
source("E:/Hughes/functions_utility.r")
# Customize ggplot2 theme - R 2.15.3
theme_bw2 <- theme_set(theme_bw(base_size = 22))
theme_bw2 <- theme_update(plot.margin = unit(c(1, 0.5, 3, 0.5), "lines"),
axis.title.x = element_text(size = 18, vjust = 0),
axis.title.y = element_text(size = 18, vjust = 0, angle = 90),
strip.text.x = element_text(size = 16),
strip.text.y = element_text(size = 16, angle = 90),
legend.title = element_text(size = 18),
legend.text = element_text(size = 16))
# Organise working and output directories
working.dir <- paste(master.dir,"RAW_Clinical",sep="/")
workspacefilename <- paste(getwd(),"/",scriptname,".RData", sep="")
output.dir <- paste(working.dir,"/",scriptname,"_Output",sep="")
if(!file.exists(output.dir)){
dir.create(output.dir)
}
filename <- paste(output.dir,"nmprep_flagged.csv",sep="/")
nmprep <- read.csv(filename, na.strings = ".")
locf <- function (x) {
#Last observation carried forward
#Finds an NA and carries forward the previous value
good <- !is.na(x)
positions <- seq(length(x))
good.positions <- good * positions
last.good.position <- cummax(good.positions)
last.good.position[last.good.position == 0] <- NA
x[last.good.position]
}
nmprep$DOSE <- locf(nmprep$AMT)
nmprep$DVNORM <- nmprep$DV/nmprep$DOSE
bin_cuts <- c(0.52, 1.02, 2.02, 3.02, 5.02, 8.02, 49)
nmprep$TADBIN <- cut2(nmprep$TAD, cuts = bin_cuts, levels.mean = T)
levels(nmprep$TADBIN)[length(bin_cuts)] <- 24
nmprep$TADBIN <- as.numeric(paste(nmprep$TADBIN))
dose_bins <- c(8, 26, 80)
nmprep$DOSEf <- cut2(nmprep$DOSE, cuts = dose_bins)
levels(nmprep$DOSEf) <- c("<10mg", "10-25mg", ">25mg")
nmprep$DXCATf <- factor(nmprep$DXCATNUM)
levels(nmprep$DXCATf) <- c("CLL", "AML", "ALL", "MM")
# Define colourblind palette
cbPalette <- c("#0072B2", "#D55E00", "#009E73", "#CC79A7")
# Create plot function
dvnormPlot <- function(xCol, guideName) {
p <- NULL
p <- ggplot(aes(x = TAD, y = DVNORM*1000), data = nmprep)
p <- p + geom_point(aes(colour = get(xCol)), alpha = 0.2)
p <- p + stat_summary(aes(x = TADBIN, y = DVNORM*1000, colour = get(xCol)), fun.y = median,
geom = "line", size = 1.2)
p <- p + scale_y_log10(NULL, labels = comma)
p <- p + scale_x_continuous(NULL, breaks = 0:6*4)
p <- p + scale_colour_manual(name = guideName, values = cbPalette)
p
}
# Create plots and use cowplot to create grid
p1 <- dvnormPlot("DOSEf", "Dosage")
p2 <- dvnormPlot("DXCATf", "Cancer")
p3 <- plot_grid(p1, p2, align = "vh", labels = c("A", "B"),
ncol = 1, hjust = -5)
# Create text grobs for common y and x axis labels
y.grob <- textGrob("Dose Normalised Concentrations (ng/mL)\n", vjust = 0.7,
gp = gpar(fontface = "plain", col = "black", fontsize = 18), rot = 90)
x.grob <- textGrob("Time After Last Dose (hours)", hjust = 0.6, vjust = -1,
gp = gpar(fontface = "plain", col = "black", fontsize = 18))
# Produce final figure
plot_grid(grid.arrange(arrangeGrob(p3, left = y.grob, bottom = x.grob)))
ggsave("dvnormplot_v2.png", width = 17.4, height = 23.4, units = c("cm"))
ggsave("dvnormplot_v2.eps", width = 17.4, height = 23.4, units = c("cm"),
dpi = 1200, device = cairo_ps, fallback_resolution = 1200)
|
71fa075d9caea77d286c9b1f10bbe387c2fbe723
|
8baf888790f25fb9cdfb45cbda64cc9bba38c2a2
|
/obs_fixes.R
|
315ba2f9585b0ecbec98f25ed9d1feb90ba0f023
|
[] |
no_license
|
NIVA-Denmark/NISAR_app
|
6a79aa489dc0047f7aad2883b2779c77eac47952
|
b4aed56e3ba1a48e94725a9f0f70c3f99cf207cc
|
refs/heads/master
| 2021-07-25T15:16:35.562414
| 2020-12-11T13:43:25
| 2020-12-11T13:43:25
| 228,263,375
| 0
| 0
| null | 2020-12-11T13:43:26
| 2019-12-15T22:41:23
|
R
|
UTF-8
|
R
| false
| false
| 1,295
|
r
|
obs_fixes.R
|
if(F){
dfObs <- read.table("data/NISAR_obs.csv",stringsAsFactors=F,header=T,fileEncoding="UTF-8",sep=";")
dfObs <- dfObs %>%
filter(!ShapeID %in% c(168,169))
dfObs <- dfObs %>%
filter(AphiaID != 127188)
dfObs <- dfObs %>%
filter(Source != "KU Fish")
write.table(dfObs,file="data/NISAR_obs.csv",col.names=T,row.names=F,sep=";",na="",quote=T,fileEncoding="UTF-8")
df1 <- dfObs %>%
filter(Source=="MONIS-5") %>%
distinct(Lat,Lon) %>%
mutate(MID=row_number())
write.table(df1,file="../NISAR/20200512/monis5stns.csv",
col.names=T,row.names=F,sep=";",na="",quote=T,fileEncoding="UTF-8")
df2 <- read.table("../NISAR/20200512/monis5stnsRegions.csv",
stringsAsFactors=F,header=T,fileEncoding="UTF-8",sep=";") %>%
select(MID,REGIONID)
df1 <- df1 %>%
left_join(df2,by="MID")
df1 <- df1 %>%
select(-MID) %>%
rename(REGIONIDfix=REGIONID)
dfObs <- read.table("data/NISAR_obs.csv",stringsAsFactors=F,header=T,fileEncoding="UTF-8",sep=";")
dfObs <- dfObs %>%
left_join(df1,by=c("Lon","Lat"))
dfObs <- dfObs %>%
mutate(REGIONID=ifelse(is.na(REGIONIDfix),REGIONID,REGIONIDfix))
dfObs <- dfObs %>%
select(-REGIONIDfix)
write.table(dfObs,file="data/NISAR_obs.csv",col.names=T,row.names=F,sep=";",na="",quote=T,fileEncoding="UTF-8")
}
|
817ccadc8eaf7df5ad6c29d6a523a52eb09ffd94
|
be8c9660ff29a44d1835b74b3ec861cd76adb834
|
/results/fig3_gsebias-sim-plots-tables.R
|
e6ad7446086c1bfd7bc62050d28fd15943d0bc37
|
[] |
no_license
|
metamaden/recountmethylation_flexible-blood-analysis_manuscript
|
ec9ba3666db953430ec1be509a826d45fba97f57
|
ec835f346da6bcb628ac262d22c5827936610981
|
refs/heads/main
| 2023-04-16T20:18:31.234484
| 2023-02-02T20:33:38
| 2023-02-02T20:33:38
| 401,501,606
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,219
|
r
|
fig3_gsebias-sim-plots-tables.R
|
#!/usr/bin/env R
# Author: Sean Maden
#
# Make heatmaps of GSE bias simulations results.
library(ggplot2); library(data.table)
library(scales); library(gridExtra)
library(ggpubr)
# library(magick)
#----------
# load data
#----------
# load data tables
# sum of squared variances table
msq.fname <- "msq-gse-bias_all-blood-2-platforms.rda"
msq <- get(load(msq.fname))
# fev differences table
mdif.fname <- "mdiff-gse-bias_all-blood-2-platforms.rda"
mdif <- get(load(mdif.fname))
#---------------------------------------
# fraction explained variances plot data
#---------------------------------------
dfp.fev <- apply(msq[,c(1:39)], 2, function(ci){
median(as.numeric(ci), na.rm=T)})
# format heatmap data
lvlv <- c("gse", "predsex", "predcell.Mono", "predcell.NK",
"predcell.CD4T", "predage", "predcell.Bcell", "predcell.CD8T",
"predcell.Gran", "platform", "glint.epi.pc2", "glint.epi.pc1",
"Residuals")
dfp.fev <- data.frame(var = names(dfp.fev), value = as.numeric(dfp.fev))
dfp.fev$model <- gsub(".*_", "", dfp.fev$var)
dfp.fev$var <- gsub("_.*", "", dfp.fev$var)
dfp.fev$`Median\nFEV` <- as.numeric(dfp.fev$value)
dfp.fev$var <- factor(dfp.fev$var, levels = lvlv)
dfp.fev$model <- factor(dfp.fev$model,
levels = c("unadj", "adj1", "adj2"))
dfp.fev$value.label <- round(100*dfp.fev$value, digits = 2)
#-----------------------------------------------
# main fig -- compare var cat dist, violin plots
#-----------------------------------------------
# get plot data
# get fev binned on type
typev <- c("unadj", "adj1", "adj2")
lvarv <- list(technical = c("platform", "gse"),
demographic = c("predage", "predsex", "glint.epi.pc2",
"glint.epi.pc1"),
biological = c("predcell.CD8T", "predcell.CD4T", "predcell.NK",
"predcell.Bcell", "predcell.Mono", "predcell.Gran"))
msqf <- msq[,!grepl("^Residuals.*", colnames(msq))]
ltot.fev <- lapply(typev, function(ti){apply(msqf[,grepl(ti, colnames(msqf))], 1, sum, na.rm = T)})
names(ltot.fev) <- typev
# get plot data object
dfp <- do.call(rbind, lapply(names(lvarv), function(vari){
varvii <- lvarv[[vari]]
do.call(rbind, lapply(names(ltot.fev), function(ti){
fev.fract.denom <- ltot.fev[[ti]]
msqff <- msqf[,grepl(ti, colnames(msqf))]
# get vector of category ssq var
which.cnamev <- grepl(paste0(varvii, collapse = "|"), colnames(msqff))
msqff <- msqff[, which.cnamev, drop = F]
ssqv <- apply(msqff, 1, sum, na.rm = T)
fev.cat.fractv <- ssqv/fev.fract.denom # get fraction fev by cat
dfi <- data.frame(fev.fract = fev.cat.fractv)
dfi$vartype <- vari
dfi$modeltype <- ti
return(dfi)
}))
}))
# get plot objects
dfp$`Model type` <- ifelse(dfp$modeltype=="unadj", "unadjusted",
ifelse(dfp$modeltype=="adj1", "adjustment 1", "adjustment 2"))
lvlv <- c("unadjusted", "adjustment 1", "adjustment 2")
dfp$`Model type` <- factor(dfp$`Model type`, levels = lvlv)
dfp$FEV <- dfp$fev.fract
# format plot vars
catv <- c("technical", "demographic", "biological")
tech.str <- paste0(paste0(rep(" ", 13), collapse = ""), "Technical", collapse = "")
biol.str <- paste0(paste0(rep(" ", 5), collapse = ""), "Biological", collapse = "")
demo.str <- paste0(paste0(rep(" ", 2), collapse = ""), "Demographic", collapse = "")
# get list of plot objects
text.size <- 10; title.size <- 12
lgg <- lapply(catv, function(cati){
dfpi <- dfp[dfp$vartype == cati,]
ggvp <- ggplot(dfpi, aes(y = FEV, x = `Model type`, fill = `Model type`)) +
geom_violin(draw_quantiles = 0.5) + theme_bw() +
theme(axis.text.x = element_blank(), axis.title.x = element_blank(),
legend.position = "none", plot.title = element_text(size = title.size),
axis.text.y = element_text(size = text.size))
if(cati == "biological"){
ggvp <- ggvp + ggtitle(biol.str) +
theme(axis.title.y = element_text(size = title.size))
}
if(cati == "demographic"){
ggvp <- ggvp + ggtitle(demo.str) +
theme(axis.title.y = element_blank())
}
if(cati == "technical"){
ggvp <- ggvp + ggtitle(tech.str) +
theme(axis.title.y = element_blank())
}
return(ggvp)
})
names(lgg) <- catv
# get zoom panel for technical
# get plot legend
pl <- ggplot(dfp, aes(y = FEV, x = `Model type`, fill = `Model type`)) +
geom_violin(draw_quantiles = 0.5) + theme_bw() +
theme(legend.title = element_text(size = title.size),
legend.text = element_text(size = text.size))
lgg[["legend"]] <- get_legend(pl)
# save new plot
plot.fname <- "ggvp_fev-byvarcat_gsebias"
mg.pgn.fname <- "magnifying_glass_bgtransparent.png"
# get plot params
lm <- matrix(c(1,1,2,2,3,3,4), nrow = 1)
# save new pdf
pdf(paste0(plot.fname, ".pdf"), 7.8, 1.8)
grid.arrange(lgg[["biological"]], lgg[["demographic"]], lgg[["technical"]],
lgg[["legend"]], layout_matrix = lm)
dev.off()
#--------------------
# sfigs, compare fevs
#--------------------
# get plot data
dfp1 <- data.frame(unadj = ltot.fev$unadj, adj.val = ltot.fev$adj1)
dfp2 <- data.frame(unadj = ltot.fev$unadj, adj.val = ltot.fev$adj2)
dfp1$adj.type <- "adj. 1"; dfp2$adj.type <- "adj. 2"
dfp <- rbind(dfp1, dfp2)
# fract fev
dfp$fract.fev <- dfp$adj.val/dfp$unadj
# plot scatterplot fev
ggpt <- ggplot(dfp, aes(x = unadj, y = adj.val)) +
geom_point(draw_quantiles = 0.1) + theme_bw()
ggpt <- ggpt + facet_wrap(~adj.type, ncol = 2)
pdf("ggpt_fev-adj-unadj_gsebias.pdf", 5.5, 3.5)
print(ggpt); dev.off()
# 2d density plot
ggpt <- ggplot(dfp, aes(x = unadj, y = adj.val)) + geom_bin2d(bins = 70) +
scale_fill_continuous(type = "viridis") + theme_bw() +
xlab("Unadjusted FEV") + ylab("Adjusted FEV") +
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))
ggpt <- ggpt + facet_wrap(~adj.type, ncol = 2)
pdf("ggdensity_fev-adj-unadj_gsebias.pdf", 3.5, 1.8)
print(ggpt); dev.off()
# plot fraction fev
# get plot object
ggvp <- ggplot(dfp, aes(x = adj.type, y = fract.fev, group = adj.type)) +
geom_violin(show_quantiles = 0.5) + theme_bw() +
ylab("FEV fraction\n(Adj./Unadj.)") +
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1),
axis.title.x = element_blank())
# save new plot
pdf('ggvp_fev-fract_gsebias.pdf', 2.5, 1.5)
print(ggvp);dev.off()
# summary stats for reporting
median(dfp[dfp$adj.type=="adj. 1",]$fract.fev) # 0.6882031
median(dfp[dfp$adj.type=="adj. 2",]$fract.fev) # 0.6841613
var(dfp[dfp$adj.type=="adj. 1",]$fract.fev) # 0.07646976
var(dfp[dfp$adj.type=="adj. 2",]$fract.fev) # 0.07648388
sd(dfp[dfp$adj.type=="adj. 1",]$fract.fev) # 0.2765317
sd(dfp[dfp$adj.type=="adj. 2",]$fract.fev) # 0.2765572
#----------------------------
# get data for fev dist plots
#----------------------------
# get fev binned on type
varv.technical <- c("platform")
varv.dem <- c("predage", "predsex", "glint.epi.pc2", "glint.epi.pc1")
varv.bio <- c("predcell.CD8T", "predcell.CD4T",
"predcell.NK", "predcell.Bcell",
"predcell.Mono", "predcell.Gran")
lvarv <- list(technical = varv.technical,
demographic = varv.dem,
biological = varv.bio)
typev <- c("unadj", "adj1", "adj2")
# write to new results table
dfp.fname <- "dfp_fev-bycat_gse-bias_blood-4stypes.csv"
mcname <- matrix(c("vartype", "fev", "modeltype"), nrow = 1)
data.table::fwrite(mcname, file = dfp.fname, sep = ",",
row.names = F, col.names = F, append = F)
# iterate on sims
dfp <- do.call(rbind, lapply(seq(nrow(msq)), function(ri){
#message(ri);
ridat <- msq[ri,]
dfi <- do.call(rbind, lapply(typev, function(ti){
rii <- ridat[grepl(ti, names(ridat))]
rii <- rii[!grepl('Residuals', names(rii))]
total.var <- sum(as.numeric(rii), na.rm = T)
do.call(rbind, lapply(names(lvarv), function(vi){
rii.vi <- rii[paste0(lvarv[[vi]], "_", ti)]
if(!vi == "technical"){rii.vi <- sum(rii.vi, na.rm = T)}
rfract <- as.numeric(rii.vi)/total.var
data.frame(vartype = vi, fev = rfract, modeltype = ti)
}))
}))
data.table::fwrite(dfi, file = dfp.fname, sep = ",",
row.names = F, col.names = F, append = T)
}))
# save plot data
dfp.fname <- "dfp_fev-bycat_gse-bias_blood-4stypes.rda"
save(dfp, file = dfp.fname)
#--------------------------------------
# table s2 -- median fevs by model, var
#--------------------------------------
# get var categories
grpv <- c("unadj", "adj1", "adj2")
filtv <- c("biological", "demographic", "technical")
tfev <- do.call(cbind, lapply(grpv, function(grpi){
dfpi <- dfp[dfp$modeltype==grpi,]
unlist(lapply(filtv, function(filti){
dfpii <- dfpi[dfpi$vartype==filti,]
median(dfpii$fev, na.rm = T)
}))
}))
colnames(tfev) <- grpv
rownames(tfev) <- filtv
# get variable-wise fev
dim(msq)
msq.filt <- msq[,!grepl("^Residuals.*", colnames(msq))]
dim(msq.filt)
# total vars by sim
ltot.fev <- lapply(grpv, function(grpi){
apply(msq.filt[,grepl(grpi, colnames(msq.filt))], 1,
function(ri){sum(ri, na.rm = T)})
})
names(ltot.fev) <- grpv
# parse fevs by var
filtv <- unique(gsub("_.*", "", colnames(msq.filt)[1:36]))
tfev.bind <- do.call(cbind, lapply(grpv, function(grpi){
msqi <- msq.filt[,grepl(grpi, colnames(msq.filt))]
tot.fevi <- ltot.fev[[grpi]]
unlist(lapply(filtv, function(filti){
fractvi <- msqi[,grepl(filti, colnames(msqi))]/tot.fevi
median(fractvi, na.rm = T)
}))
}))
colnames(tfev.bind) <- grpv
rownames(tfev.bind) <- filtv
# bind all results
st2 <- rbind(tfev, tfev.bind)
t(round(st2, 3))
#---------------------------------
# violin plots with technical zoom -- OLD
#---------------------------------
source("facet_zoom2.R")
# library(png)
catv <- c("technical", "demographic", "biological")
tech.str <- paste0(paste0(rep(" ", 13), collapse = ""),
"Technical", collapse = "")
biol.str <- paste0(paste0(rep(" ", 5), collapse = ""),
"Biological", collapse = "")
demo.str <- paste0(paste0(rep(" ", 2), collapse = ""),
"Demographic", collapse = "")
text.size <- 10
title.size <- 12
lgg <- lapply(catv, function(cati){
dfpi <- dfp[dfp$vartype == cati,]
ggvp <- ggplot(dfpi, aes(y = FEV, x = `Model type`, fill = `Model type`)) +
geom_violin(draw_quantiles = 0.5) + theme_bw() +
theme(axis.text.x = element_blank(), axis.title.x = element_blank(),
legend.position = "none", plot.title = element_text(size = title.size),
axis.text.y = element_text(size = text.size))
if(cati == "biological"){
ggvp <- ggvp + ggtitle(biol.str) +
theme(axis.title.y = element_text(size = title.size))
}
if(cati == "demographic"){
ggvp <- ggvp + ggtitle(demo.str) +
theme(axis.title.y = element_blank())
}
if(cati == "technical"){
ggvp <- ggvp + ggtitle(tech.str) +
theme(axis.title.y = element_blank()) +
facet_zoom2(ylim = c(0, 0.01))
}
return(ggvp)
})
names(lgg) <- catv
# get zoom panel for technical
# get plot legend
pl <- ggplot(dfp, aes(y = FEV, x = `Model type`, fill = `Model type`)) +
geom_violin(draw_quantiles = 0.5) + theme_bw() +
theme(legend.title = element_text(size = title.size),
legend.text = element_text(size = text.size))
lgg[["legend"]] <- get_legend(pl)
# save new plot
plot.fname <- "ggviolin_fev-byvarcat_gsebias"
mg.pgn.fname <- "magnifying_glass_bgtransparent.png"
# get plot params
lm <- matrix(c(1,1,1,1,1,1,1,1,1,
2,2,2,2,2,2,2,2,
3,3,3,3,3,3,3,3,3,3,3,3,3,
4,4,4,4,4,4), nrow = 1)
# save new pdf
pdf(paste0(plot.fname, ".pdf"), 7.8, 1.8)
grid.arrange(lgg[["biological"]], lgg[["demographic"]],
lgg[["technical"]], lgg[["legend"]],
layout_matrix = lm)
dev.off()
# Produce image using graphics device
# fig <- image_graph(width = 800, height = 200, res = 110)
# ggplot2::qplot(mpg, wt, data = mtcars, colour = cyl)
#grid.arrange(lgg[["biological"]], lgg[["demographic"]], lgg[["technical"]],
# lgg[["legend"]], layout_matrix = lm)
#dev.off()
#mg.image <- image_scale(image_read(mg.pgn.fname), "x22")
#out <- image_composite(fig, mg.image,
# offset = geometry_point(475, -175))
#print(out)
|
d99949f033fc334728e8bac8e7bb83cfb1482b96
|
ed640b2eab34ddbde1435b83aa29d49d2c01422d
|
/man/vda.Rd
|
b30981c711d5c8308adbfc854b0caa5d1df62d2e
|
[] |
no_license
|
cran/rcompanion
|
4cf285cf6d43197e55df85de86d23904f9418c37
|
dea4b790b5d78fe350ff303e5c04603c7e672ae1
|
refs/heads/master
| 2023-05-12T14:48:28.937161
| 2023-05-05T07:20:05
| 2023-05-05T07:20:05
| 67,362,460
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,059
|
rd
|
vda.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vda.r
\name{vda}
\alias{vda}
\title{Vargha and Delaney's A}
\usage{
vda(
formula = NULL,
data = NULL,
x = NULL,
y = NULL,
ci = FALSE,
conf = 0.95,
type = "perc",
R = 1000,
histogram = FALSE,
reportIncomplete = FALSE,
brute = FALSE,
verbose = FALSE,
digits = 3,
...
)
}
\arguments{
\item{formula}{A formula indicating the response variable and
the independent variable. e.g. y ~ group.}
\item{data}{The data frame to use.}
\item{x}{If no formula is given, the response variable for one group.}
\item{y}{The response variable for the other group.}
\item{ci}{If \code{TRUE}, returns confidence intervals by bootstrap.
May be slow.}
\item{conf}{The level for the confidence interval.}
\item{type}{The type of confidence interval to use.
Can be any of "\code{norm}", "\code{basic}",
"\code{perc}", or "\code{bca}".
Passed to \code{boot.ci}.}
\item{R}{The number of replications to use for bootstrap.}
\item{histogram}{If \code{TRUE}, produces a histogram of bootstrapped values.}
\item{reportIncomplete}{If \code{FALSE} (the default),
\code{NA} will be reported in cases where there
are instances of the calculation of the statistic
failing during the bootstrap procedure.}
\item{brute}{If \code{FALSE}, the default, the statistic is based on the
U statistic from the \code{wilcox.test} function.
If \code{TRUE}, the function will compare values
in the two samples directly.}
\item{verbose}{If \code{TRUE}, reports the proportion of ties and
the proportions of (Ya > Yb) and (Ya < Yb).}
\item{digits}{The number of significant digits in the output.}
\item{...}{Additional arguments passed to the \code{wilcox.test} function.}
}
\value{
A single statistic, VDA.
Or a small data frame consisting of VDA,
and the lower and upper confidence limits.
}
\description{
Calculates Vargha and Delaney's A (VDA)
with confidence intervals by bootstrap
}
\details{
VDA is an effect size statistic appropriate
in cases where a Wilcoxon-Mann-Whitney test might be used.
It ranges from 0 to 1, with 0.5 indicating stochastic equality,
and 1 indicating that the first group dominates the second.
By default, the function calculates VDA from the "W" U statistic
from the \code{wilcox.test} function.
Specifically, \code{VDA = U/(n1*n2)}.
The input should include either \code{formula} and \code{data};
or \code{x}, and \code{y}. If there are more than two groups,
only the first two groups are used.
Currently, the function makes no provisions for \code{NA}
values in the data. It is recommended that \code{NA}s be removed
beforehand.
When the data in the first group are greater than
in the second group, VDA is greater than 0.5.
When the data in the second group are greater than
in the first group, VDA is less than 0.5.
Be cautious with this interpretation, as R will alphabetize
groups in the formula interface if the grouping variable
is not already a factor.
When VDA is close to 0 or close to 1,
or with small sample size,
the confidence intervals
determined by this
method may not be reliable, or the procedure may fail.
}
\note{
The parsing of the formula is simplistic.
The first variable on the
left side is used as the measurement variable.
The first variable on the
right side is used for the grouping variable.
}
\examples{
data(Catbus)
vda(Steps ~ Gender, data=Catbus)
}
\references{
\url{http://rcompanion.org/handbook/F_04.html}
}
\seealso{
\code{\link{cliffDelta}},
\code{\link{multiVDA}}
}
\author{
Salvatore Mangiafico, \email{mangiafico@njaes.rutgers.edu}
}
\concept{Vargha and Delaney's A}
\concept{Wilcoxon-Mann-Whitney}
\concept{confidence interval}
\concept{effect size}
|
d5aa6a34d882ea9cc42f2637cef2f633cd2785cf
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/kmconfband/examples/noe.compute.cgh.Rd.R
|
1b5670746c2ce0444f6cd4e9b9901ed3885a2698
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 406
|
r
|
noe.compute.cgh.Rd.R
|
library(kmconfband)
### Name: noe.compute.cgh
### Title: Intermediate Steps in the Noe Recursions for the Exact Coverage
### Probability of a Nonparametric Confidence Band for the Survivor
### Function
### Aliases: noe.compute.cgh
### ** Examples
## Check of Noe recursion calculations.
a<-c(0.001340,0.028958,0.114653,0.335379)
b<-c(0.664621,0.885347,0.971042,0.998660)
noe.compute.cgh(4,a,b)
|
988ea5e6b48fbba3aaf1af388d91be9a69a0edd4
|
79f67b255bf060cbe2847541df4aff09d4c339e9
|
/no1.R
|
4c423c619645206b283fd6edba507aec19bb23d3
|
[] |
no_license
|
mspub/R_TEST
|
6c7fabe10016c4984e6c2b8397dd1b2480a23285
|
28f8a2dd6cc8373607744905f4fc9b429e341f7b
|
refs/heads/master
| 2021-01-19T00:01:36.674840
| 2016-07-13T04:46:29
| 2016-07-13T04:46:29
| 63,214,057
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 471
|
r
|
no1.R
|
?install.packages
old.packages()
head(iris,n=10)
summary(iris)
var(iris$Sepal.Length)
(v<-c(1,3,4,6))
a<-v[c(1,2,3)]
a
v
v[v>2]
v>2
v[-1]
v[-3]
v[-lenghth(v)]
v[-length(v)]
data=read.table(header=T, text='
subject sex size
1 M 7
2 F 6
3 M 11
')
data
data[1,3]
data
data[1:2,]
data[data$subject <3]
(b<-4)
v
subset(data, subject<3, select=-subject)
data
subset(data, subject<3 & sex=="M")
|
608d69d6212c47a481bf6c193c524a0d55c0ebcb
|
7eb2898925eb5b0f04729cc8c151cd751277569a
|
/R/query.r
|
92c389ccb2f027d531811dfbe474400aa765cdc4
|
[
"MIT"
] |
permissive
|
hrbrmstr/sergeant
|
d23a7443136d921a9a5fdf4d8c219076afa55af5
|
9408208e41ecb2636d4a9856d32789ffb93b9d96
|
refs/heads/master
| 2022-05-06T11:56:21.690396
| 2021-11-29T18:06:59
| 2021-11-29T18:06:59
| 60,310,735
| 137
| 20
|
NOASSERTION
| 2021-11-29T18:07:00
| 2016-06-03T02:03:16
|
R
|
UTF-8
|
R
| false
| false
| 7,088
|
r
|
query.r
|
#' Submit a query and return results
#'
#' This function can handle REST API connections or JDBC connections. There is a benefit to
#' calling this function for JDBC connections vs a straight call to \code{dbGetQuery()} in
#' that the function result is a `tbl_df` vs a plain \code{data.frame} so you get better
#' default printing (which can be helpful if you accidentally execute a query and the result
#' set is huge).
#'
#' @param drill_con drill server connection object setup by \code{drill_connection()} or
#' \code{drill_jdbc()})
#' @param query query to run
#' @param uplift automatically run \code{drill_uplift()} on the result? (default: \code{TRUE},
#' ignored if \code{drill_con} is a \code{JDBCConnection} created by
#' \code{drill_jdbc()})
#' @param .progress if \code{TRUE} (default if in an interactive session) then ask
#' \code{httr::RETRY} to display a progress bar
#' @references \href{https://drill.apache.org/docs/}{Drill documentation}
#' @family Drill direct REST API Interface
#' @export
#' @examples
#' try({
#' drill_connection() %>%
#' drill_query("SELECT * FROM cp.`employee.json` limit 5")
#' }, silent=TRUE)
drill_query <- function(drill_con, query, uplift=TRUE, .progress=interactive()) {
query <- trimws(query)
query <- gsub(";$", "", query)
if (inherits(drill_con, "JDBCConnection")) {
try_require("rJava")
try_require("RJDBC")
try_require("sergeant.caffeinated")
tibble::as_tibble(dbGetQuery(drill_con, query))
} else {
drill_server <- make_server(drill_con)
if (.progress) {
httr::RETRY(
verb = "POST",
url = sprintf("%s/query.json", drill_server),
encode = "json",
httr::progress(),
body = list(
queryType = "SQL",
query = query
),
terminate_on = c(403, 404)
) -> res
} else {
httr::RETRY(
verb = "POST",
url = sprintf("%s/query.json", drill_server),
encode = "json",
body = list(
queryType = "SQL",
query = query
),
terminate_on = c(403, 404)
) -> res
}
jsonlite::fromJSON(
httr::content(res, as="text", encoding="UTF-8"),
flatten=TRUE
) -> out
if ("errorMessage" %in% names(out)) {
message(sprintf("Query ==> %s\n%s\n", gsub("[\r\n]", " ", query), out$errorMessage))
invisible(out)
} else {
if (uplift) out <- drill_uplift(out)
out
}
}
}
#' Turn columnar query results into a type-converted tbl
#'
#' If you know the result of `drill_query()` will be a data frame, then
#' you can pipe it to this function to pull out `rows` and automatically
#' type-convert it.
#'
#' Not really intended to be called directly, but useful if you accidentally ran
#' \code{drill_query()} without `uplift=TRUE` but want to then convert the structure.
#'
#' @param query_result the result of a call to `drill_query()`
#' @references \href{https://drill.apache.org/docs/}{Drill documentation}
#' @export
drill_uplift <- function(query_result) {
if (length(query_result$columns) != 0) {
query_result$rows <- query_result$rows[,query_result$columns,drop=FALSE]
}
if (length(query_result$columns) != 0) {
if (is.data.frame(query_result$rows)) {
if (nrow(query_result$rows) > 0) {
query_result$rows <- query_result$rows[,query_result$columns,drop=FALSE]
}
} else {
lapply(1:length(query_result$columns), function(col_idx) {
ctype <- query_result$metadata[col_idx]
if (ctype == "INT") {
integer(0)
} else if (ctype == "VARCHAR") {
character(0)
} else if (ctype == "TIMESTAMP") {
cx <- integer(0)
class(cx) <- "POSIXct"
cx
} else if (ctype == "BIGINT") {
integer64(0)
} else if (ctype == "BINARY") {
character(0)
} else if (ctype == "BOOLEAN") {
logical(0)
} else if (ctype == "DATE") {
cx <- integer(0)
class(cx) <- "Date"
cx
} else if (ctype == "FLOAT") {
numeric(0)
} else if (ctype == "DOUBLE") {
double(0)
} else if (ctype == "TIME") {
character(0)
} else if (ctype == "INTERVAL") {
character(0)
} else {
character(0)
}
}) -> xdf
xdf <- set_names(xdf, query_result$columns)
class(xdf) <- c("data.frame")
return(xdf)
}
} else {
xdf <- dplyr::tibble()
return(xdf)
}
# ** only available in Drill 1.15.0+ **
# be smarter about type conversion now that the REST API provides
# the necessary metadata
if (length(query_result$metadata)) {
if ("BIGINT" %in% query_result$metadata) {
if (!.pkgenv$bigint_warn_once) {
if (getOption("sergeant.bigint.warnonce", TRUE)) {
warning(
"One or more columns are of type BIGINT. ",
"The sergeant package currently uses jsonlite::fromJSON() ",
"to process Drill REST API result sets. Since jsonlite does not ",
"support 64-bit integers BIGINT columns are initially converted ",
"to numeric since that's how jsonlite::fromJSON() works. This is ",
"problematic for many reasons, including trying to use 'dplyr' idioms ",
"with said converted BIGINT-to-numeric columns. It is recommended that ",
"you 'CAST' BIGINT columns to 'VARCHAR' prior to working with them from ",
"R/'dplyr'.\n\n",
"If you really need BIGINT/integer64 support, consider using the ",
"R ODBC interface to Apache Drill with the MapR ODBC drivers.\n\n",
"This informational warning will only be shown once per R session and ",
"you can disable them from appearing by setting the 'sergeant.bigint.warnonce' ",
"option to 'FALSE' (i.e. options(sergeant.bigint.warnonce = FALSE)).",
call.=FALSE
)
}
.pkgenv$bigint_warn_once <- TRUE
}
}
sapply(1:length(query_result$columns), function(col_idx) {
cname <- query_result$columns[col_idx]
ctype <- query_result$metadata[col_idx]
case_when(
ctype == "INT" ~ "i",
ctype == "VARCHAR" ~ "c",
ctype == "TIMESTAMP" ~ "?",
ctype == "BIGINT" ~ "?",
ctype == "BINARY" ~ "c",
ctype == "BOOLEAN" ~ "l",
ctype == "DATE" ~ "?",
ctype == "FLOAT" ~ "d",
ctype == "DOUBLE" ~ "d",
ctype == "TIME" ~ "c",
ctype == "INTERVAL" ~ "?",
TRUE ~ "?"
)
}) -> col_types
suppressMessages(
tibble::as_tibble(
readr::type_convert(
df = query_result$rows,
col_types = paste0(col_types, collapse=""),
na = character()
)
)
) -> xdf
} else {
suppressMessages(
tibble::as_tibble(
readr::type_convert(df = query_result$rows, na = character())
)
) -> xdf
}
xdf
}
|
edd91a6f6676957e9e0575fc6d4b637cb7a8b246
|
f7ec59a2df6950794de998015e3b6372865e34df
|
/A549/scripts/chris/NB_balance/analysis_GainLossNB_genes.R
|
f693df9290e0d3673d979ae491a384f01ad6c469
|
[] |
no_license
|
ArnaudDroitLab/sb_cofactor
|
e89e0ec8ce54033be723ec34d2455834dba9bf2d
|
77e5c922ecae7b4c66f140f5525085ab03344ec1
|
refs/heads/master
| 2021-01-23T05:29:40.591863
| 2019-09-27T17:30:41
| 2019-09-27T17:30:41
| 92,969,805
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,844
|
r
|
analysis_GainLossNB_genes.R
|
# setwd("/Users/chris/Desktop/sb_cofactor_hr/A549")
setwd("/home/chris/Bureau/sb_cofactor_hr/A549")
source("scripts/ckn_utils.R")
library(ChIPseeker)
# Loading peaks
peaks_dir <- "output/chip-pipeline-GRCh38/peak_call/A549_NB"
gainNB_ovGR <- rtracklayer::import(con = file.path(peaks_dir, "NB_DEX_to_None_CTRL_ovGR_hg38.bed")); print(length(gainNB_ovGR)) # 399
gainNB_notovGR <- rtracklayer::import(con = file.path(peaks_dir, "NB_DEX_to_None_CTRL_notovGR_hg38.bed")); print(length(gainNB_notovGR)) # 3
lossNB_ovGR <- rtracklayer::import(con = file.path(peaks_dir, "NB_CTRL_to_None_DEX_ovGR_hg38.bed")); print(length(lossNB_ovGR)) # 561
lossNB_notovGR <- rtracklayer::import(con = file.path(peaks_dir, "NB_CTRL_to_None_DEX_notovGR_hg38.bed")); print(length(lossNB_notovGR)) # 904
# Width
summary(width(gainNB_ovGR))
hist(width(gainNB_ovGR), breaks = 60)
summary(width(gainNB_notovGR))
hist(width(gainNB_notovGR), breaks = 60)
summary(width(lossNB_ovGR))
hist(width(lossNB_ovGR), breaks = 60)
summary(width(lossNB_notovGR))
hist(width(lossNB_notovGR), breaks = 60)
# Annotation
gainNB_ovGR_annodf <- annotatePeaks(gainNB_ovGR, output = "df")
gainNB_notovGR_annodf <- annotatePeaks(gainNB_notovGR, output = "df")
lossNB_ovGR_annodf <- annotatePeaks(lossNB_ovGR, output = "df")
lossNB_notovGR_annodf <- annotatePeaks(lossNB_notovGR, output = "df")
# Retrieve genes which gain or lose NBC at the promoters
geneGainNB_ovGR <- gainNB_ovGR_annodf %>% filter(Annot %in% c("Promoter")) %>% pull(geneId) %>% unique
geneGainNB_notovGR <- gainNB_notovGR_annodf %>% filter(Annot == "Promoter") %>% pull(geneId) %>% unique
geneLossNB_ovGR <- lossNB_ovGR_annodf %>% filter(Annot == "Promoter") %>% pull(geneId) %>% unique
geneLossNB_notovGR <- lossNB_notovGR_annodf %>% filter(Annot == "Promoter") %>% pull(geneId) %>% unique
symbol_all_geneGainNB_ovGR <- gainNB_ovGR_annodf %>% pull(SYMBOL) %>% unique
symbol_all_geneGainNB_notovGR <- gainNB_notovGR_annodf %>% pull(SYMBOL) %>% unique
symbol_all_geneLossNB_ovGR <- lossNB_ovGR_annodf %>% pull(SYMBOL) %>% unique
symbol_all_geneLossNB_notovGR <- lossNB_notovGR_annodf %>% pull(SYMBOL) %>% unique
symbol_prom_geneGainNB_ovGR <- gainNB_ovGR_annodf %>% filter(Annot == "Promoter") %>% pull(SYMBOL) %>% unique
symbol_prom_geneGainNB_notovGR <- gainNB_notovGR_annodf %>% filter(Annot == "Promoter") %>% pull(SYMBOL) %>% unique
symbol_prom_geneLossNB_ovGR <- lossNB_ovGR_annodf %>% filter(Annot == "Promoter") %>% pull(SYMBOL) %>% unique
symbol_prom_geneLossNB_notovGR <- lossNB_notovGR_annodf %>% filter(Annot == "Promoter") %>% pull(SYMBOL) %>% unique
#########
upDEX <- c("PER1", "ZFP36", "ERRFI1", "ANGPTL4", "NR1D2", "CRY2")
upDEX_in_gainNB <- upDEX %in% symbol_all_geneGainNB_ovGR; names(upDEX_in_gainNB) <- upDEX
upDEX_in_gainNB
downDEX <- c("IL11")
downDEX_in_gainNB <- downDEX %in% symbol_all_geneGainNB_ovGR; names(downDEX_in_gainNB) <- downDEX
downDEX_in_gainNB
######################
# Draw FC time series
######################
source("scripts/reddy_time_series/draw_graph_log2FC_0-12h.R")
geneGroupList <- list("GainNB_ovGR_withGR" = geneGainNB_ovGR,
"GainNB_notovGR_withGR" = geneGainNB_notovGR,
"LossNB_withGR" = geneLossNB_ovGR,
"LossNB_withoutGR" = geneLossNB_notovGR)
draw_time_course_FC(geneGainNB_ovGR)
draw_time_course_FC(gainNB_ovGR_annodf %>% pull(geneId) %>% unique)
draw_time_course_FC(geneGainNB_notovGR)
draw_time_course_FC(geneLossNB_ovGR)
draw_time_course_FC(geneLossNB_notovGR)
draw_time_course_pergroup_FC(geneGroupList)
# geneLossNBC_ovGR: Action rรฉpressive de GR par binding direct
# geneLossNBC_notovGR: Les premiรจres observations ne montre pas de grand changements dans le niveau de fold change de gene expression, rรฉservoir de cofacteurs?
gainNB_ovGR_annodf %>% filter(distanceToTSS > 500000)
|
9ce85901e01b6cbd3d57b7ff69769b237d4aa5e8
|
b6ca93afe5eecaf5bb8a5f2989095da324c58b8c
|
/RealData/Code/JSMultistateInfFunctions.R
|
9e669c4ec2d13dacc2186aca4837f99a4090f5f7
|
[] |
no_license
|
angieluis/BayesianMarkRecapSNV
|
51a6428770cb39202c090b4a287008f00ab74916
|
b67a59194b3282cd41f3b13946a7437eecc46eb8
|
refs/heads/master
| 2023-02-16T15:10:48.702511
| 2023-02-10T20:16:33
| 2023-02-10T20:16:33
| 119,726,664
| 7
| 2
| null | 2018-09-28T20:17:12
| 2018-01-31T18:31:42
|
R
|
UTF-8
|
R
| false
| false
| 3,318
|
r
|
JSMultistateInfFunctions.R
|
logit=function(x){
log(x/(1-x))}
revlogit=function(x){
exp(x)/(1+exp(x))}
# function to create a primary CH from the secondary capture history:
primaryMS.fun<-function(CH.secondary){
x <- lapply(CH.secondary,function(x){apply(x,1,min)})
v1 <- unlist(x)
CH.primary <- matrix(v1, nrow=dim(CH.secondary[[1]])[1], ncol=length(CH.secondary))
return(CH.primary)
}
#functions to add dummy occasion
primary.dummy.fun <- function(CH.primary,notseen=3){
CH.primary.du <- cbind(rep(notseen, dim(CH.primary)[1]), CH.primary)
return(CH.primary.du)
}
secondary.dummy.fun <- function(CH.secondary,notseen=3){
CH.secondary.du <- c(list(matrix(notseen,nrow=dim(CH.secondary[[1]])[1],ncol=dim(CH.secondary[[1]])[2])), CH.secondary)
return(CH.secondary.du)
}
# functions to Augment data
primary.augment.fun <- function(CH.primary.du,notseen=3,num.aug=500){
nz <- num.aug
CH.primary.ms <- rbind(CH.primary.du, matrix(notseen, ncol = dim(CH.primary.du)[2], nrow = nz))
return(CH.primary.ms)
}
secondary.augment.fun <- function(CH.secondary.du,notseen=3,num.aug=500){
nz <- num.aug
CH.secondary.ms <- lapply(CH.secondary.du,function(x){rbind(x,matrix(notseen, ncol = dim(x)[2], nrow = nz))})
return(CH.secondary.ms)
}
# Function to create known latent states z
### fill in all known but unobserved states (can't go back to S from I)
#If observed as I, then not seen, and seen again later, when not seen must have been I.
#If observed as S, not seen, then observed as S again, then must be S.
# Remember these are now states not observations, so coded differently.
# 1 is not yet entered
# 2 is S
# 3 is I
# 4 is dead
# only able to fill in 2's and 3's
# Allows us to fill in a lot. And should speed up computation time
known.state.SImsJS <- function(ms=CH.primary.ms, notseen=3){ # ms is multistate capture history
# notseen: label for 'not seen' #here is 3
state <- ms
state[state==notseen] <- NA
for(i in 1:dim(ms)[1]){
if(length(which(ms[i, ] == 2)) > 0){ #filling in I's where can
minI <- min(which(ms[i, ] == 2)) #I's are observation 2
maxI <- max(which(ms[i, ] == 2))
state[i, minI:maxI] <- 3} # I's are state 3
if(length(which(ms[i, ]==1)) > 0){ #filling in S's where can
minS <- min(which(ms[i, ] == 1)) # S's are observation 1
maxS <- max(which(ms[i, ] == 1))
state[i, minS:maxS] <- 2} # S's are state 2
}
return(state)
}
# Specify initial values
jsmsinf.init <- function(ch=CH.primary.ms, num.aug=500){
# ch is primary capture histories after augmentation
# nz is number of rows added for augmentation
nz <- num.aug
kn.state <- known.state.SImsJS(ms=ch)
state <- matrix(2, nrow=dim(ch)[1], ncol=dim(ch)[2]) # default is S (2)
state <- replace(state,!is.na(kn.state),NA)
for(i in 1:(dim(state)[1]-nz)){
f <- min(which(is.na(state[i,]))) # before ever caught
if(f>1){state[i,1:(f-1)] <- 2} # tried both 1 and 2 here, still get errors
if(length(which(kn.state[i,] == 3)) > 0){
maxI <- max(which(kn.state[i,]==3))
if(maxI<dim(state)[2] ){
state[i,(maxI+1):dim(state)[2]] <- 3 # all after caught as I are I (3)
}
}
}
state[(dim(state)[1]-nz+1):dim(state)[1],] <- 1
state[,1] <- NA #this is specified in likelihood
return(state)
}
|
e16472a9bfea902a19a72ac8c23e870913b46586
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RCircos/examples/RCircos.Get.Heatmap.Color.Scale.Rd.R
|
f2bf6583193ba3ffdc51a52d05c5e3977d5f8882
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 293
|
r
|
RCircos.Get.Heatmap.Color.Scale.Rd.R
|
library(RCircos)
### Name: RCircos.Get.Heatmap.Color.Scale
### Title: Generate Color Scales for Heatmap Plot
### Aliases: RCircos.Get.Heatmap.Color.Scale
### Keywords: methods
### ** Examples
library(RCircos)
colorScales <- RCircos.Get.Heatmap.Color.Scale(heatmap.color="BlueWhiteRed")
|
ed2660b4499a70323fa663fd635fb6c47b58b5e2
|
7209adb2c925cce4dcdb83499f92485790ddb84d
|
/oldScripts/aylinsMonster/Franken_SPACE_5-4-15.R
|
992330259dd367b324292553a10c92bf814164e9
|
[] |
no_license
|
TinasheMTapera/Reward
|
e45942891261fa28bf79d2d13b4f87fbbc6736af
|
ed3e7a5c10a06a0a4a8dd4d6ac0ab1aefce3bb25
|
refs/heads/master
| 2023-08-19T20:23:37.374939
| 2021-06-22T17:45:03
| 2021-06-22T17:45:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,130
|
r
|
Franken_SPACE_5-4-15.R
|
### converts NSRS, FRANKEN & FRANKEN ADOL. item-level data from giant redcap project (Wolf Satterthwaite Repository) into New Franken Space ###
# updated on 2/1/16 to run from Selkie server instead of Banshee
# records must be updated on selkie from banshee for active projects (like effort) before it is run otherwise the output won't be the most up-to-date
library("bitops")
library("RCurl")
library("REDCapR")
set_config(config(ssl_verifypeer = 0L))
set_config(config(sslversion = 1))
#Create a redcap.cfg file in Users directory with ALL User-Projects and User-specific Tokens
redcap_uri <- "https://selkie.uphs.upenn.edu/API/"
ALL_Projects<-read.csv("~/.redcap.cfg")
#List of projects needed for full data import
projects<-ALL_Projects[which(ALL_Projects[,1] == "Wolf Satterthwaite Repository"),]
####Importing selected Selkie Redcap Project and Dictionary####
i<-1
p.token<-projects[i,2]
name<-projects[i,1]
#print(p.token)
#print(name)
project_data<-redcap_read_rdh(
redcap_uri = redcap_uri,
token = p.token,
#config_options = list(ssl.verifypeer=FALSE), commented out for this version of R but may be required in future
batch=1000
)$data
project_dictionary<-redcap_metadata_read(redcap_uri=redcap_uri, token=p.token)$data
#####
measure="nsrs"
measure_data<-project_data[which(project_data$procedure==measure),
c(project_dictionary$field_name[which(project_dictionary$form_name %in% c("general",measure))])]
giant1<-measure_data
giant1[giant1 ==-9999] <- NA
measure="franken"
measure_data<-project_data[which(project_data$procedure==measure),
c(project_dictionary$field_name[which(project_dictionary$form_name %in% c("general",measure))])]
giant2<-measure_data
giant2[giant2 ==-9999] <- NA
measure="frankenadol"
measure_data<-project_data[which(project_data$procedure==measure),
c(project_dictionary$field_name[which(project_dictionary$form_name %in% c("general",measure))])]
giant3<-measure_data
giant3[giant3 ==-9999] <- NA
giant1<-giant1[order(giant1$bblid),]
giant2<-giant2[order(giant2$bblid),]
giant3<-giant3[order(giant3$bblid),]
nsrs<-giant1[,c('participant_id','bblid', grep('nsrs[1-9]', names(giant1), value=T)),drop=F]
franken<-giant2[,c('participant_id','bblid', grep('franken_[1-9]', names(giant2), value=T)),drop=F]
frankenadol<-giant3[,c('participant_id','bblid', grep('franken_a_[1-9]', names(giant3), value=T)),drop=F]
#merge all three data frames together
df1<-merge(nsrs,franken, by=c("participant_id","bblid"), all.x=T, all.y=T)
NEW_frank<-merge(df1,frankenadol, by=c("participant_id","bblid"), all.x=T, all.y=T)
NEW_frank$frankenversion<-ifelse(is.na(NEW_frank[,c("franken_1")] & is.na(NEW_frank[,c("franken_8")])),NA,"franken") #determines version that the scores come from
NEW_frank$frankenversion<-ifelse(is.na(NEW_frank[,c("frankenversion")]) & ! is.na(NEW_frank[,c("nsrs1")]),"nsrs",NEW_frank$frankenversion) #determines version that the scores come from
NEW_frank$frankenversion<-ifelse(is.na(NEW_frank[,c("frankenversion")]) & ! is.na(NEW_frank[,c("franken_a_1")]),"frankenadol",NEW_frank$frankenversion) #determines version that the scores come from
NEW_frank$newfrank1<-ifelse(is.na(NEW_frank[,c("franken_1")]), NEW_frank[,c("nsrs1")], NEW_frank[,c("franken_1")]) #if franken is NA, then use nsrs score, else use franken score
NEW_frank$newfrank1<-ifelse(is.na(NEW_frank[,c("newfrank1")]), NEW_frank[,c("franken_a_1")], NEW_frank[,c("newfrank1")]) #if nsrs score is NA, then use frankenadol score, else use nsrs score
NEW_frank$newfrank2<-ifelse(is.na(NEW_frank[,c("franken_2")]), NEW_frank[,c("nsrs2")], NEW_frank[,c("franken_2")])
NEW_frank$newfrank2<-ifelse(is.na(NEW_frank[,c("newfrank2")]), NEW_frank[,c("franken_a_2")], NEW_frank[,c("newfrank2")])
NEW_frank$newfrank3<-ifelse(is.na(NEW_frank[,c("franken_3")]), NEW_frank[,c("nsrs3")], NEW_frank[,c("franken_3")])
NEW_frank$newfrank3<-ifelse(is.na(NEW_frank[,c("newfrank3")]), NEW_frank[,c("franken_a_3")], NEW_frank[,c("newfrank3")])
NEW_frank$newfrank4<-ifelse(is.na(NEW_frank[,c("franken_4")]), NEW_frank[,c("franken_a_4")], NEW_frank[,c("franken_4")])
NEW_frank$newfrank5<-ifelse(is.na(NEW_frank[,c("franken_4a")]), NEW_frank[,c("nsrs8")], NEW_frank[,c("franken_4a")])
NEW_frank$newfrank5<-ifelse(is.na(NEW_frank[,c("newfrank5")]), NEW_frank[,c("franken_a_4a")], NEW_frank[,c("newfrank5")])
NEW_frank$newfrank6<-ifelse(is.na(NEW_frank[,c("franken_4b")]), NEW_frank[,c("nsrs9")], NEW_frank[,c("franken_4b")])
NEW_frank$newfrank6<-ifelse(is.na(NEW_frank[,c("newfrank6")]), NEW_frank[,c("franken_a_4b")], NEW_frank[,c("newfrank6")])
NEW_frank$newfrank7<-ifelse(is.na(NEW_frank[,c("franken_5")]), NEW_frank[,c("franken_a_5")], NEW_frank[,c("franken_5")])
NEW_frank$newfrank8<-ifelse(is.na(NEW_frank[,c("franken_5a")]), NEW_frank[,c("nsrs10b")], NEW_frank[,c("franken_5a")])
NEW_frank$newfrank8<-ifelse(is.na(NEW_frank[,c("newfrank8")]), NEW_frank[,c("franken_a_5a")], NEW_frank[,c("newfrank8")])
NEW_frank$newfrank9<-ifelse(is.na(NEW_frank[,c("franken_6")]), NEW_frank[,c("nsrs5")], NEW_frank[,c("franken_6")])
NEW_frank$newfrank9<-ifelse(is.na(NEW_frank[,c("newfrank9")]), NEW_frank[,c("franken_a_6")], NEW_frank[,c("newfrank9")])
NEW_frank$newfrank10<-ifelse(is.na(NEW_frank[,c("franken_7")]), NEW_frank[,c("franken_a_7")], NEW_frank[,c("franken_7")])
NEW_frank$newfrank11<-ifelse(is.na(NEW_frank[,c("franken_8")]), NEW_frank[,c("franken_a_8")], NEW_frank[,c("franken_8")])
NEW_frank$newfrank12<-ifelse(is.na(NEW_frank[,c("franken_9")]), NEW_frank[,c("nsrs6")], NEW_frank[,c("franken_9")])
NEW_frank$newfrank12<-ifelse(is.na(NEW_frank[,c("newfrank12")]), NEW_frank[,c("franken_a_9")], NEW_frank[,c("newfrank12")])
NEW_frank$newfrank13<-ifelse(is.na(NEW_frank[,c("franken_10")]), NEW_frank[,c("franken_a_10")], NEW_frank[,c("franken_10")])
NEW_frank$newfrank14<-ifelse(is.na(NEW_frank[,c("franken_11")]), NEW_frank[,c("franken_a_11")], NEW_frank[,c("franken_11")])
NEW_frank$newfrank15<-ifelse(is.na(NEW_frank[,c("franken_11a")]), NEW_frank[,c("nsrs15")], NEW_frank[,c("franken_11a")])
NEW_frank$newfrank15<-ifelse(is.na(NEW_frank[,c("newfrank15")]), NEW_frank[,c("franken_a_10a")], NEW_frank[,c("newfrank15")])
NEW_frank$newfrank16<-ifelse(is.na(NEW_frank[,c("franken_11b")]), NEW_frank[,c("nsrs16b")], NEW_frank[,c("franken_11b")])
NEW_frank$newfrank16<-ifelse(is.na(NEW_frank[,c("newfrank16")]), NEW_frank[,c("franken_a_11a")], NEW_frank[,c("newfrank16")])
NEW_frank$newfrank17<-ifelse(is.na(NEW_frank[,c("franken_11c")]), NEW_frank[,c("nsrs11")], NEW_frank[,c("franken_11c")])
NEW_frank$newfrank17<-ifelse(is.na(NEW_frank[,c("newfrank17")]), NEW_frank[,c("franken_a_12")], NEW_frank[,c("newfrank17")])
NEW_frank$newfrank18<-ifelse(is.na(NEW_frank[,c("franken_11d")]), NEW_frank[,c("nsrs12")], NEW_frank[,c("franken_11d")])
NEW_frank$newfrank18<-ifelse(is.na(NEW_frank[,c("newfrank18")]), NEW_frank[,c("franken_a_12a")], NEW_frank[,c("newfrank18")])
NEW_frank$newfrank19<-ifelse(is.na(NEW_frank[,c("franken_11e")]), NEW_frank[,c("nsrs13b")], NEW_frank[,c("franken_11e")])
NEW_frank$newfrank19<-ifelse(is.na(NEW_frank[,c("newfrank19")]), NEW_frank[,c("franken_a_13")], NEW_frank[,c("newfrank19")])
NEW_frank$newfrank20<-ifelse(is.na(NEW_frank[,c("franken_11f")]), NEW_frank[,c("nsrs14")], NEW_frank[,c("franken_11f")])
NEW_frank$newfrank20<-ifelse(is.na(NEW_frank[,c("newfrank20")]), NEW_frank[,c("franken_a_10b")], NEW_frank[,c("newfrank20")])
NEW_frank$newfrank21<-ifelse(is.na(NEW_frank[,c("franken_12")]), NEW_frank[,c("nsrs17")], NEW_frank[,c("franken_12")])
NEW_frank$newfrank21<-ifelse(is.na(NEW_frank[,c("newfrank21")]), NEW_frank[,c("franken_a_14")], NEW_frank[,c("newfrank21")])
NEW_frank$newfrank22<-ifelse(is.na(NEW_frank[,c("franken_13")]), NEW_frank[,c("nsrs18")], NEW_frank[,c("franken_13")])
NEW_frank$newfrank22<-ifelse(is.na(NEW_frank[,c("newfrank22")]), NEW_frank[,c("franken_a_15")], NEW_frank[,c("newfrank22")])
NEW_frank$newfrank23<-ifelse(is.na(NEW_frank[,c("franken_14")]), NEW_frank[,c("franken_a_16")], NEW_frank[,c("franken_14")])
NEW_frank$newfrank24<-ifelse(is.na(NEW_frank[,c("franken_15")]), NEW_frank[,c("nsrs19")], NEW_frank[,c("franken_15")])
NEW_frank$newfrank24<-ifelse(is.na(NEW_frank[,c("newfrank24")]), NEW_frank[,c("franken_a_17")], NEW_frank[,c("newfrank24")])
NEW_frank$newfrank25<-ifelse(is.na(NEW_frank[,c("franken_16")]), NEW_frank[,c("nsrs22")], NEW_frank[,c("franken_16")])
NEW_frank$newfrank25<-ifelse(is.na(NEW_frank[,c("newfrank25")]), NEW_frank[,c("franken_a_18")], NEW_frank[,c("newfrank25")])
NEW_frank2<-NEW_frank[,c('participant_id','bblid','frankenversion','newfrank1','newfrank2','newfrank3','newfrank4','newfrank5','newfrank6','newfrank7','newfrank8','newfrank9','newfrank10','newfrank11','newfrank12',
'newfrank13','newfrank14','newfrank15','newfrank16','newfrank17','newfrank18','newfrank19','newfrank20','newfrank21','newfrank22','newfrank23','newfrank24','newfrank25'), drop=FALSE]
currentDate<-Sys.Date()
write.csv(NEW_frank2, paste("/import/monstrum/Users/adaldal/Newfrank_", currentDate, ".csv", sep=''), row.names=F)
|
25b4ebf9b83aa20bf62ac50a2384e119a9385831
|
cafff9b400a5f31e92176ec294517cdc43a8dc86
|
/Zero models.R
|
70d2b420693903d973f532703b97306d9fddb366
|
[] |
no_license
|
camillemellin/TrueAbsencesInSDMs
|
df279c65efc6d5da41130e7180920154d9a79018
|
7af2357c28b028ef0fdc7870d6e698aee43069b8
|
refs/heads/main
| 2023-01-23T08:59:35.589144
| 2020-11-30T03:48:40
| 2020-11-30T03:48:40
| 304,510,467
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 39,343
|
r
|
Zero models.R
|
##################################################################
# ZERO MODELS - CM 09/07/19 #
##################################################################
# Load libraries ------------
rm(list = ls())
library(dplyr)
library(stringr)
library(RLSPrivate)
library(psych)
library(vegan)
library(goeveg)
library(RColorBrewer)
library(tidyr)
library(boral)
library(corrplot)
library(broom)
library(visreg)
library(metafor)
library(mgcv)
library(ggplot2)
library(gridExtra)
library(pscl)
library(sp)
library(ks)
library(gstat)
library(PBSmapping)
library(sf)
library(cowplot)
library(psych)
library(spatialkernel)
library(lme4)
library(pROC)
library(caret)
library(ecospat)
scale2 <- function(x){
(x-mean(x, na.rm = T))/sd(x, na.rm = T)
}
# Load and filter data --------------
setwd("~/Dropbox/My documents/Projects/UTAS/xxx MS Statistical report/modelling")
load('Zero models.RData')
aus <- importShapefile("~/Dropbox/My documents/Projects/UTAS/NESP/SoE/250K_coastline", readDBF=FALSE)
aus2 <- aus %>% dplyr::select(group=PID, POS=POS,long=X,lat=Y)
aus.sf <- st_read("~/Dropbox/My documents/Projects/UTAS/NESP/SoE/250K_coastline.shp")
data(fdat)
names(fdat)[5] <- "SPECIES_NAME"
load('rls-sites-plectropomus.rda')
GBR_fish_data <- read.table("GBR_fish_data.csv", header = TRUE, quote = "'", sep = ",")
GBR_site_data <- read.table("GBR_site_data.csv", header = TRUE, quote = "'", sep = ",")
GBR_fish_data$SurveyDate <- as.Date(GBR_fish_data$SurveyDate, "%d/%m/%y")
# Build metadata and list of sites surveyed both pre and post bleaching
GBR_metadata <- GBR_fish_data %>%
filter(Pre.or.post.bleach != "during" & include == "Y" & Method != 0) %>%
group_by(SiteCode, Site.name, SiteLat, SiteLong, Reef, SurveyID, SurveyDate, Pre.or.post.bleach, Year) %>%
summarize()
GBR_metadata_pre <- GBR_metadata %>% filter(Pre.or.post.bleach == "Pre")
GBR_metadata_post <- GBR_metadata %>% filter(Pre.or.post.bleach == "Post")
GBR_site.ls <- GBR_metadata %>% filter(SiteCode %in% GBR_metadata_pre$SiteCode & SiteCode %in% GBR_metadata_post$SiteCode) %>%
group_by(SiteCode, SiteLat, SiteLong) %>% summarize()
# Compute pre vs. post species-site matrices based on different methods for zero insertion ------
# 1- Ignore zeros
# 2- Add zeros for all species in the data matrix when not recorded at a site.
# 3- Add zeros for species recorded at each individual site on at least one occasion (this is what I have done for the population trend analysis).
# 4- Add zeros for species that occur within a convex hull that encompasses the site (i.e. extent of occurrence), where the kernel is calculated using all data.
# 5- Add zeros for species that occur within a convex hull that encompasses the site, where the kernel is calculated for that year/time slice only. This scenario takes into account changes in distributional ranges through time.
GBR_fish_data <- GBR_fish_data %>% filter(CLASS %in% fish_classes() & Method == 1)
GBR_fish_data$SPECIES_NAME <- factor(GBR_fish_data$SPECIES_NAME)
#1- Ignore zeros
GBR_fish_site_1 <- GBR_fish_data %>%
filter(SiteCode %in% GBR_site.ls$SiteCode & Pre.or.post.bleach != "during" & include == "Y" & Method != 0) %>%
group_by(SiteCode, Site.name, SiteLat, SiteLong, SurveyID, Year, Pre.or.post.bleach, SPECIES_NAME) %>%
summarise(N = sum(N, na.rm=T)) %>%
group_by(SiteCode, Site.name, SiteLat, SiteLong, Pre.or.post.bleach, Year, SPECIES_NAME) %>%
summarise(N = mean(N, na.rm=T)) %>%
group_by(SiteCode, Site.name, SiteLat, SiteLong, Pre.or.post.bleach, SPECIES_NAME) %>%
summarise(N.1 = mean(N, na.rm=T))
#2- Add zeros everywhere when not recorded at a site
GBR_fish_site_2 <- GBR_fish_data %>%
filter(SiteCode %in% GBR_site.ls$SiteCode & Pre.or.post.bleach != "during" & include == "Y" & Method != 0) %>%
group_by(SiteCode, Site.name, SiteLat, SiteLong, SurveyID, Year, Pre.or.post.bleach, SPECIES_NAME) %>%
summarise(N = sum(N, na.rm=T)) %>%
ungroup() %>%
complete(nesting(SiteCode, Site.name, SiteLat, SiteLong, SurveyID, Year, Pre.or.post.bleach), SPECIES_NAME, fill = list(N = 0)) %>%
group_by(SiteCode, Site.name, SiteLat, SiteLong, Pre.or.post.bleach, Year, SPECIES_NAME) %>%
summarise(N = mean(N, na.rm=T)) %>%
group_by(SiteCode, Site.name, SiteLat, SiteLong, Pre.or.post.bleach, SPECIES_NAME) %>%
summarise(Year = mean(Year), N.2 = mean(N, na.rm=T))
#3- Add zeros for species recorded at each individual site on at least one occasion, and at sites whithin their vicinity (i.e. within 1-degree radius)
# Or use KDE?
SiteSpecies.ls <- GBR_fish_site_1 %>% group_by(SiteCode, SiteLat, SiteLong, SPECIES_NAME) %>% summarise()
GBR_spp.ls <- names(table(SiteSpecies.ls$SPECIES_NAME)[table(SiteSpecies.ls$SPECIES_NAME)>0])
SiteSpecies.ls.rd <- SiteSpecies.ls
SiteSpecies.ls.rd$SiteLong <- round(SiteSpecies.ls.rd$SiteLong)
SiteSpecies.ls.rd$SiteLat <- round(SiteSpecies.ls.rd$SiteLat)
GBR_site.ls.rd <- GBR_site.ls
GBR_site.ls.rd$SiteLong <- round(GBR_site.ls.rd$SiteLong)
GBR_site.ls.rd$SiteLat <- round(GBR_site.ls.rd$SiteLat)
SiteSpecies.ls.rd <- SiteSpecies.ls.rd %>% left_join(GBR_site.ls.rd, by = c("SiteLong", "SiteLat"))
GBR_fish_site_3 <- subset(GBR_fish_site_2, paste(SiteCode, SPECIES_NAME, sep = "_") %in% with(SiteSpecies.ls.rd, paste(SiteCode.y, SPECIES_NAME, sep = "_")))
#GBR_fish_site_3 <- subset(GBR_fish_site_2, paste(SiteCode, SPECIES_NAME, sep = "_") %in% with(SiteSpecies.ls, paste(SiteCode, SPECIES_NAME, sep = "_")))
names(GBR_fish_site_3)[ncol(GBR_fish_site_3)] <- "N.3"
# 4- Add zeros for species that occur within a convex hull that encompasses the site (i.e. extent of occurrence), where the kernel is calculated using all data.
SiteSpecies.ls.4 <- data.frame(SPECIES_NAME = as.character(NA), SiteCode = NA)
GBR_spp_range.area <- data.frame(SPECIES_NAME = GBR_spp.ls, range.area = NA)
for (i in 1:length(GBR_spp.ls)) {
hull.data <- subset(SiteSpecies.ls, SPECIES_NAME == GBR_spp.ls[i], select = c(SiteLong, SiteLat))
hull <- chull(hull.data)
hull <- c(hull, hull[1])
GBR_spp_range.area$range.area[i] <- areapoly(as.matrix(hull.data[hull,]))$area
GBR_site.in.hull <- point.in.polygon(GBR_site.ls$SiteLong, GBR_site.ls$SiteLat, hull.data$SiteLong[hull], hull.data$SiteLat[hull])
SiteSpecies.ls.4 <- rbind(SiteSpecies.ls.4, data.frame(SPECIES_NAME = GBR_spp.ls[i], SiteCode = GBR_site.ls$SiteCode[GBR_site.in.hull %in% c(1,3)]))
}
SiteSpecies.ls.4 <- SiteSpecies.ls.4[-1,]
# Check convex hulls and inside/outside sites
plot(hull.data)
lines(hull.data[hull,])
polygon(hull.data[hull,], col = "lightgrey")
points(GBR_site.ls$SiteLong, GBR_site.ls$SiteLat, col = "blue", pch = 19)
points(hull.data, pch = 19, col = "green")
points(GBR_site.ls$SiteLong[GBR_site.in.hull %in% c(1,3)], GBR_site.ls$SiteLat[GBR_site.in.hull %in% c(1,3)], col = "red", pch = 19)
GBR_fish_site_4 <- subset(GBR_fish_site_2, paste(SiteCode, SPECIES_NAME, sep = "_") %in% with(SiteSpecies.ls.4, paste(SiteCode, SPECIES_NAME, sep = "_")))
names(GBR_fish_site_4)[ncol(GBR_fish_site_4)] <- "N.4"
# 5- Add zeros for species that occur within a convex hull that encompasses the site, where the kernel is calculated for that year/time slice only. This scenario takes into account changes in distributional ranges through time.
SiteSpecies.ls.pre.post <- GBR_fish_site_1 %>% group_by(SiteCode, SiteLat, SiteLong, Pre.or.post.bleach, SPECIES_NAME) %>% summarise()
SiteSpecies.ls.5 <- data.frame(SPECIES_NAME = as.character(NA), SiteCode = NA, Pre.or.post.bleach = NA)
for (i in 1:length(GBR_spp.ls)) {
pre.hull.data <- subset(SiteSpecies.ls.pre.post, SPECIES_NAME == GBR_spp.ls[i] & Pre.or.post.bleach == "Pre", select = c(SiteLong, SiteLat))
pre.hull <- chull(pre.hull.data)
pre.hull <- c(pre.hull, pre.hull[1])
GBR_site.in.pre.hull <- point.in.polygon(GBR_site.ls$SiteLong, GBR_site.ls$SiteLat, pre.hull.data$SiteLong[pre.hull], pre.hull.data$SiteLat[pre.hull])
if(length(GBR_site.ls$SiteCode[GBR_site.in.pre.hull %in% c(1,3)]) > 0) {
SiteSpecies.ls.5 <- rbind(SiteSpecies.ls.5, data.frame(SPECIES_NAME = GBR_spp.ls[i], SiteCode = GBR_site.ls$SiteCode[GBR_site.in.pre.hull %in% c(1,3)], Pre.or.post.bleach = "Pre"))
}
post.hull.data <- subset(SiteSpecies.ls.pre.post, SPECIES_NAME == GBR_spp.ls[i] & Pre.or.post.bleach == "Post", select = c(SiteLong, SiteLat))
post.hull <- chull(post.hull.data)
post.hull <- c(post.hull, post.hull[1])
GBR_site.in.post.hull <- point.in.polygon(GBR_site.ls$SiteLong, GBR_site.ls$SiteLat, post.hull.data$SiteLong[post.hull], post.hull.data$SiteLat[post.hull])
if(length(GBR_site.ls$SiteCode[GBR_site.in.post.hull %in% c(1,3)]) > 0) {
SiteSpecies.ls.5 <- rbind(SiteSpecies.ls.5, data.frame(SPECIES_NAME = GBR_spp.ls[i], SiteCode = GBR_site.ls$SiteCode[GBR_site.in.post.hull %in% c(1,3)], Pre.or.post.bleach = "Post"))
}
}
SiteSpecies.ls.5 <- SiteSpecies.ls.5[-1,]
# Check convex hulls and inside/outside sites
par(mfcol = c(2,1))
plot(pre.hull.data)
lines(pre.hull.data[pre.hull,])
polygon(pre.hull.data[pre.hull,], col = "lightgrey")
points(GBR_site.ls$SiteLong, GBR_site.ls$SiteLat, col = "blue", pch = 19)
points(pre.hull.data, pch = 19, col = "green")
points(GBR_site.ls$SiteLong[GBR_site.in.pre.hull %in% c(1,3)], GBR_site.ls$SiteLat[GBR_site.in.pre.hull %in% c(1,3)], col = "red", pch = 19)
plot(post.hull.data)
lines(post.hull.data[post.hull,])
polygon(post.hull.data[post.hull,], col = "lightgrey")
points(GBR_site.ls$SiteLong, GBR_site.ls$SiteLat, col = "blue", pch = 19)
points(post.hull.data, pch = 19, col = "green")
points(GBR_site.ls$SiteLong[GBR_site.in.post.hull %in% c(1,3)], GBR_site.ls$SiteLat[GBR_site.in.post.hull %in% c(1,3)], col = "red", pch = 19)
GBR_fish_site_5 <- subset(GBR_fish_site_2, paste(SiteCode, Pre.or.post.bleach, SPECIES_NAME, sep = "_") %in% with(SiteSpecies.ls.5, paste(SiteCode, Pre.or.post.bleach, SPECIES_NAME, sep = "_")))
names(GBR_fish_site_5)[ncol(GBR_fish_site_5)] <- "N.5"
# Build single table with 5 abundance estimates, one for each method
GBR_fish_site_all <- GBR_fish_site_2 %>%
left_join(GBR_fish_site_1, by = c("SiteCode","Site.name","SiteLat","SiteLong","SPECIES_NAME","Pre.or.post.bleach")) %>%
left_join(GBR_fish_site_3, by = c("SiteCode","Site.name","SiteLat","SiteLong","SPECIES_NAME","Pre.or.post.bleach")) %>%
left_join(GBR_fish_site_4, by = c("SiteCode","Site.name","SiteLat","SiteLong","SPECIES_NAME","Pre.or.post.bleach")) %>%
left_join(GBR_fish_site_5, by = c("SiteCode","Site.name","SiteLat","SiteLong","SPECIES_NAME","Pre.or.post.bleach")) %>%
dplyr::select("SiteCode","Site.name","SiteLat","SiteLong","Year", "Pre.or.post.bleach","SPECIES_NAME","N.1","N.2","N.3","N.4","N.5")
GBR_fish_site_P <- GBR_fish_site_all
GBR_fish_site_P[,7:11][GBR_fish_site_P[,7:11] > 0] <- 1
names(GBR_fish_site_P)[7:11] <- c("P.1", "P.2", "P.3", "P.4", "P.5")
# Illustrate the method with Ctenochaetus cyanocheilus -----
plot.data.pre <- GBR_fish_site_all %>% filter(SPECIES_NAME == "Ctenochaetus cyanocheilus" & Pre.or.post.bleach == "Pre")
plot.data.post <- GBR_fish_site_all %>% filter(SPECIES_NAME == "Ctenochaetus cyanocheilus" & Pre.or.post.bleach == "Post")
plot.data.pre[,7:11][plot.data.pre[,7:11] > 0] <- 1
plot.data.post[,7:11][plot.data.post[,7:11] > 0] <- 1
map.1_pre <- ggplot() +
geom_polygon(data=aus2, aes(long, lat, group=group), fill="lightgray", color="darkgray") +
coord_map(xlim=c(143,156), ylim=c(-22,-10)) +
# xlab(expression(paste(Longitude^o, ~'E'))) +
# ylab(expression(paste(Latitude^o, ~'S'))) +
geom_point(data=GBR_site.ls, aes(SiteLong, SiteLat), size=1, shape=19, colour="dimgrey") +
geom_point(data = plot.data.pre[plot.data.pre$N.1 == 0,], aes(SiteLong, SiteLat), size = 2, shape = 19, colour="cornflowerblue")+
geom_point(data = plot.data.pre[plot.data.pre$N.1 == 1,], aes(SiteLong, SiteLat), size = 3, shape = 19, colour="tomato")+
theme(text=element_text(size=12, family="Calibri"), plot.margin = unit(c(.1,.1,.1,.1), "cm"), axis.title = element_blank()) + theme_void()
map.1_post <- ggplot() +
geom_polygon(data=aus2, aes(long, lat, group=group), fill="lightgray", color="darkgray") +
coord_map(xlim=c(143,156), ylim=c(-22,-10)) +
# xlab(expression(paste(Longitude^o, ~'E'))) +
# ylab(expression(paste(Latitude^o, ~'S'))) +
geom_point(data=GBR_site.ls, aes(SiteLong, SiteLat), size=1, shape=19, colour="dimgrey") +
geom_point(data = plot.data.post[plot.data.post$N.1 == 0,], aes(SiteLong, SiteLat), size = 2, shape = 19, colour="cornflowerblue")+
geom_point(data = plot.data.post[plot.data.post$N.1 == 1,], aes(SiteLong, SiteLat), size = 3, shape = 19, colour="tomato")+
theme(text=element_text(size=12, family="Calibri"), plot.margin = unit(c(.1,.1,.1,.1), "cm"), axis.title = element_blank())+ theme_void()
map.2_pre <- ggplot() +
geom_polygon(data=aus2, aes(long, lat, group=group), fill="lightgray", color="darkgray") +
coord_map(xlim=c(143,156), ylim=c(-22,-10)) +
xlab(expression(paste(Longitude^o, ~'E'))) +
ylab(expression(paste(Latitude^o, ~'S'))) +
geom_point(data=GBR_site.ls, aes(SiteLong, SiteLat), size=1, shape=19, colour="dimgrey") +
geom_point(data = plot.data.pre[plot.data.pre$N.2 == 0,], aes(SiteLong, SiteLat), size = 2, shape = 19, colour="cornflowerblue")+
geom_point(data = plot.data.pre[plot.data.pre$N.2 == 1,], aes(SiteLong, SiteLat), size = 3, shape = 19, colour="tomato")+
theme(text=element_text(size=12, family="Calibri"), plot.margin = unit(c(.1,.1,.1,.1), "cm"), axis.title = element_blank())+ theme_void()
map.2_post <- ggplot() +
geom_polygon(data=aus2, aes(long, lat, group=group), fill="lightgray", color="darkgray") +
coord_map(xlim=c(143,156), ylim=c(-22,-10)) +
xlab(expression(paste(Longitude^o, ~'E'))) +
ylab(expression(paste(Latitude^o, ~'S'))) +
geom_point(data=GBR_site.ls, aes(SiteLong, SiteLat), size=1, shape=19, colour="dimgrey") +
geom_point(data = plot.data.post[plot.data.post$N.2 == 0,], aes(SiteLong, SiteLat), size = 2, shape = 19, colour="cornflowerblue")+
geom_point(data = plot.data.post[plot.data.post$N.2 == 1,], aes(SiteLong, SiteLat), size = 3, shape = 19, colour="tomato")+
theme(text=element_text(size=12, family="Calibri"), plot.margin = unit(c(.1,.1,.1,.1), "cm"), axis.title = element_blank())+ theme_void()
map.3_pre <- ggplot() +
geom_polygon(data=aus2, aes(long, lat, group=group), fill="lightgray", color="darkgray") +
coord_map(xlim=c(143,156), ylim=c(-22,-10)) +
xlab(expression(paste(Longitude^o, ~'E'))) +
ylab(expression(paste(Latitude^o, ~'S'))) +
geom_point(data=GBR_site.ls, aes(SiteLong, SiteLat), size=1, shape=19, colour="dimgrey") +
geom_point(data = plot.data.pre[plot.data.pre$N.3 == 0,], aes(SiteLong, SiteLat), size = 2, shape = 19, colour="cornflowerblue")+
geom_point(data = plot.data.pre[plot.data.pre$N.3 == 1,], aes(SiteLong, SiteLat), size = 3, shape = 19, colour="tomato")+
theme(text=element_text(size=12, family="Calibri"), plot.margin = unit(c(.1,.1,.1,.1), "cm"), axis.title = element_blank())+ theme_void()
map.3_post <- ggplot() +
geom_polygon(data=aus2, aes(long, lat, group=group), fill="lightgray", color="darkgray") +
coord_map(xlim=c(143,156), ylim=c(-22,-10)) +
xlab(expression(paste(Longitude^o, ~'E'))) +
ylab(expression(paste(Latitude^o, ~'S'))) +
geom_point(data=GBR_site.ls, aes(SiteLong, SiteLat), size=1, shape=19, colour="dimgrey") +
geom_point(data = plot.data.post[plot.data.post$N.3 == 0,], aes(SiteLong, SiteLat), size = 2, shape = 19, colour="cornflowerblue")+
geom_point(data = plot.data.post[plot.data.post$N.3 == 1,], aes(SiteLong, SiteLat), size = 3, shape = 19, colour="tomato")+
theme(text=element_text(size=12, family="Calibri"), plot.margin = unit(c(.1,.1,.1,.1), "cm"), axis.title = element_blank())+ theme_void()
map.4_pre <- ggplot() +
geom_polygon(data=aus2, aes(long, lat, group=group), fill="lightgray", color="darkgray") +
coord_map(xlim=c(143,156), ylim=c(-22,-10)) +
xlab(expression(paste(Longitude^o, ~'E'))) +
ylab(expression(paste(Latitude^o, ~'S'))) +
geom_polygon(data = hull.data[hull,], aes(SiteLong, SiteLat), fill = "lightblue", alpha = .8)+
geom_point(data=GBR_site.ls, aes(SiteLong, SiteLat), size=1, shape=19, colour="dimgrey") +
geom_point(data = plot.data.pre[plot.data.pre$N.4 == 0,], aes(SiteLong, SiteLat), size = 2, shape = 19, colour="cornflowerblue")+
geom_point(data = plot.data.pre[plot.data.pre$N.4 == 1,], aes(SiteLong, SiteLat), size = 3, shape = 19, colour="tomato")+
theme(text=element_text(size=12, family="Calibri"), plot.margin = unit(c(.1,.1,.1,.1), "cm"), axis.title = element_blank())+ theme_void()
map.4_post <- ggplot() +
geom_polygon(data=aus2, aes(long, lat, group=group), fill="lightgray", color="darkgray") +
coord_map(xlim=c(143,156), ylim=c(-22,-10)) +
xlab(expression(paste(Longitude^o, ~'E'))) +
ylab(expression(paste(Latitude^o, ~'S'))) +
geom_polygon(data = hull.data[hull,], aes(SiteLong, SiteLat), fill = "lightblue", alpha = .8)+
geom_point(data=GBR_site.ls, aes(SiteLong, SiteLat), size=1, shape=19, colour="dimgrey") +
geom_point(data = plot.data.post[plot.data.post$N.4 == 0,], aes(SiteLong, SiteLat), size = 2, shape = 19, colour="cornflowerblue")+
geom_point(data = plot.data.post[plot.data.post$N.4 == 1,], aes(SiteLong, SiteLat), size = 3, shape = 19, colour="tomato")+
theme(text=element_text(size=12, family="Calibri"), plot.margin = unit(c(.1,.1,.1,.1), "cm"), axis.title = element_blank())+ theme_void()
map.5_pre <- ggplot() +
geom_polygon(data=aus2, aes(long, lat, group=group), fill="lightgray", color="darkgray") +
coord_map(xlim=c(143,156), ylim=c(-22,-10)) +
xlab(expression(paste(Longitude^o, ~'E'))) +
ylab(expression(paste(Latitude^o, ~'S'))) +
geom_polygon(data = pre.hull.data[pre.hull,], aes(SiteLong, SiteLat), fill = "lightblue", alpha = .8)+
geom_point(data=GBR_site.ls, aes(SiteLong, SiteLat), size=1, shape=19, colour="dimgrey") +
geom_point(data = plot.data.pre[plot.data.pre$N.5 == 0,], aes(SiteLong, SiteLat), size = 2, shape = 19, colour="cornflowerblue")+
geom_point(data = plot.data.pre[plot.data.pre$N.5 == 1,], aes(SiteLong, SiteLat), size = 3, shape = 19, colour="tomato")+
theme(text=element_text(size=12, family="Calibri"), plot.margin = unit(c(.1,.1,.1,.1), "cm"), axis.title = element_blank())+ theme_void()
map.5_post <- ggplot() +
geom_polygon(data=aus2, aes(long, lat, group=group), fill="lightgray", color="darkgray") +
coord_map(xlim=c(143,156), ylim=c(-22,-10)) +
xlab(expression(paste(Longitude^o, ~'E'))) +
ylab(expression(paste(Latitude^o, ~'S'))) +
geom_polygon(data = post.hull.data[post.hull,], aes(SiteLong, SiteLat), fill = "lightblue", alpha = .8)+
geom_point(data=GBR_site.ls, aes(SiteLong, SiteLat), size=1, shape=19, colour="dimgrey") +
geom_point(data = plot.data.post[plot.data.post$N.5 == 0,], aes(SiteLong, SiteLat), size = 2, shape = 19, colour="cornflowerblue")+
geom_point(data = plot.data.post[plot.data.post$N.5 == 1,], aes(SiteLong, SiteLat), size = 3, shape = 19, colour="tomato")+
theme(text=element_text(size=12, family="Calibri"), plot.margin = unit(c(.1,.1,.1,.1), "cm"), axis.title = element_blank())+ theme_void()
map.pre.post <- plot_grid(plotlist = list(map.1_pre, map.1_post, map.2_pre, map.2_post, map.3_pre, map.3_post, map.4_pre, map.4_post, map.5_pre,
map.5_post), ncol=2, nrow=5)
# Compare species frequency distribution --------
spp_freq_pre <- GBR_fish_site_P %>% filter(Pre.or.post.bleach == "Pre") %>% group_by(SPECIES_NAME) %>%
summarize(F.2 = sum(P.2, na.rm = T)/n_distinct(SiteCode[P.2 %in% c(0,1)]),
F.3 = sum(P.3, na.rm = T)/n_distinct(SiteCode[P.3 %in% c(0,1)]),
F.4 = sum(P.4, na.rm = T)/n_distinct(SiteCode[P.4 %in% c(0,1)]),
F.5 = sum(P.5, na.rm = T)/n_distinct(SiteCode[P.5 %in% c(0,1)])) %>%
filter(F.5 < 1)
d.2.pre <- with(spp_freq_pre, density(F.2, na.rm = T))
d.3.pre <- with(spp_freq_pre, density(F.3, na.rm = T))
d.4.pre <- with(spp_freq_pre, density(F.4, na.rm = T))
d.5.pre <- with(spp_freq_pre, density(F.5, na.rm = T))
spp_freq_post <- GBR_fish_site_P %>% filter(Pre.or.post.bleach == "Post") %>% group_by(SPECIES_NAME) %>%
summarize(F.2 = sum(P.2, na.rm = T)/n_distinct(SiteCode[P.2 %in% c(0,1)]),
F.3 = sum(P.3, na.rm = T)/n_distinct(SiteCode[P.3 %in% c(0,1)]),
F.4 = sum(P.4, na.rm = T)/n_distinct(SiteCode[P.4 %in% c(0,1)]),
F.5 = sum(P.5, na.rm = T)/n_distinct(SiteCode[P.5 %in% c(0,1)])) %>%
filter(F.5 < 1)
d.2.post <- with(spp_freq_post, density(F.2, na.rm = T))
d.3.post <- with(spp_freq_post, density(F.3, na.rm = T))
d.4.post <- with(spp_freq_post, density(F.4, na.rm = T))
d.5.post <- with(spp_freq_post, density(F.5, na.rm = T))
plot(d.2.pre, type = "l", ylim = c(0,5))
lines(d.2.post, lty = 2)
lines(d.3.pre, col = "green")
lines(d.3.post, col = "green", lty = 2)
lines(d.4.pre, col = "red")
lines(d.4.post, col = "red", lty = 2)
lines(d.5.pre, col = "orange")
lines(d.5.post, col = "orange", lty = 2)
pairs.panels(spp_freq_pre[,-1])
pairs.panels(spp_freq_post[,-1])
spp_freq_pre <- spp_freq_pre %>% left_join(GBR_spp_range.area)
spp_freq_post <- spp_freq_post %>% left_join(GBR_spp_range.area)
density.plot <- ggplot() +
geom_density(data = spp_freq_pre, aes(F.2, color = "M.2", lty = "Before"), size = 1)+
geom_density(data = spp_freq_post, aes(F.2, color = "M.2", lty = "After"), size = 1)+
geom_density(data = spp_freq_pre, aes(F.3, color = "M.3", lty = "Before"), size = 1)+
geom_density(data = spp_freq_post, aes(F.3, color = "M.3", lty = "After"), size = 1)+
geom_density(data = spp_freq_pre, aes(F.4, color = "M.4", lty = "Before"), size = 1)+
geom_density(data = spp_freq_post, aes(F.4, color = "M.4", lty = "After"), size = 1)+
geom_density(data = spp_freq_pre, aes(F.5, color = "M.5", lty = "Before"), size = 1)+
geom_density(data = spp_freq_post, aes(F.5, color = "M.5", lty = "After"), size = 1)+
xlab("Species frequency")+
theme(legend.position = 'right') +
scale_color_manual(values = c('#172A3A','#006166','#508991',"#09BC8A"),
labels = c("M.2", "M.3", "M.4", "M.5"),
name = "Method")+
scale_linetype_manual(values = factor(c(1,3)), labels = c("Before", "After"), name = "Bleaching")
biplot <- ggplot() +
geom_point(data = spp_freq_pre, aes(x = F.2, y = F.4, color = range.area))+
xlab("Species frequency (M1)") + ylab("Species frequency (M4)")
fig2 <- plot_grid(density.plot, biplot, ncol=1, nrow=2)
fig2
# Build corresponding Site matrix ---------------
# Process SST data from coral trout dataset
dat_plectro2 <- dat_plectro %>% dplyr::select(SiteCode, PrePost, sst_mean, sst_year, sst_anom, Live_hard_coral) %>%
group_by(SiteCode, PrePost) %>% summarize_all(mean) %>% data.frame()
dat_plectro2$PrePost <- factor(dat_plectro2$PrePost, levels = c("Before", "After", "Pre", "Post"))
dat_plectro2$PrePost <- ifelse(dat_plectro2$PrePost == "Before", "Pre", "Post")
dat_plectro2$PrePost <- factor(dat_plectro2$PrePost)
all.dat <- GBR_fish_site_P %>% left_join(dat_plectro2, by = c('SiteCode', 'Pre.or.post.bleach' = 'PrePost'))
# Logistic GLMM for each species ---------
# See link on Cohen's kappa: https://stats.stackexchange.com/questions/82162/cohens-kappa-in-plain-english
# Calculate spp.frequency (only include spp occurring at 15 sites or more, i.e. 155 species)
spp.frequency <- GBR_fish_site_P %>% group_by(SPECIES_NAME) %>%
summarize(Nocc = sum(P.1, na.rm = T),
Nocc.pre = sum(P.1[Pre.or.post.bleach == "Pre"], na.rm = T),
Nocc.post = sum(P.1[Pre.or.post.bleach == "Post"], na.rm = T)) %>%
filter(Nocc > 20 & Nocc.pre > 10 & Nocc.post > 10) %>%
left_join(GBR_spp_range.area)
all.dat.sub <- all.dat %>% filter(SPECIES_NAME %in% spp.frequency$SPECIES_NAME)
all.dat.sub <- data.frame(all.dat.sub)
for (i in 12:15) all.dat.sub[,i] <- scale2(all.dat.sub[,i])
M2.coef <- M3.coef <- M4.coef <- M5.coef <-
M2.pval <- M3.pval <- M4.pval <- M5.pval <-
data.frame(SPECIES_NAME = spp.frequency$SPECIES_NAME, "(Intercept)"=NA, "Live_hard_coral"=NA, "sst_mean"=NA, "sst_anom"=NA, "sst_mean:sst_anom"=NA)
AUC <- accuracy <- kappa <- boyce <- tss <- data.frame(SPECIES_NAME = spp.frequency$SPECIES_NAME, "m2"=NA, "m3"=NA, "m4"=NA, "m5"=NA)
for (i in 1:length(spp.frequency$SPECIES_NAME)) {
sp.dat <- subset(all.dat.sub, SPECIES_NAME == spp.frequency$SPECIES_NAME[i])
m2 <- glmer(P.2 ~ Live_hard_coral + sst_mean*sst_anom + (1|Pre.or.post.bleach), data = sp.dat, family = binomial)
m3 <- glmer(P.3 ~ Live_hard_coral + sst_mean*sst_anom + (1|Pre.or.post.bleach), data = sp.dat, family = binomial)
m4 <- glmer(P.4 ~ Live_hard_coral + sst_mean*sst_anom + (1|Pre.or.post.bleach), data = sp.dat, family = binomial)
m5 <- glmer(P.5 ~ Live_hard_coral + sst_mean*sst_anom + (1|Pre.or.post.bleach), data = sp.dat, family = binomial)
M2.coef[i,-1] <- summary(m2)$coefficients[,1]
M3.coef[i,-1] <- summary(m3)$coefficients[,1]
M4.coef[i,-1] <- summary(m4)$coefficients[,1]
M5.coef[i,-1] <- summary(m5)$coefficients[,1]
M2.pval[i,-1] <- summary(m2)$coefficients[,4]
M3.pval[i,-1] <- summary(m3)$coefficients[,4]
M4.pval[i,-1] <- summary(m4)$coefficients[,4]
M5.pval[i,-1] <- summary(m5)$coefficients[,4]
pred.m2 <- predict(m2, newdata = sp.dat, type = "response")
pred.m3 <- predict(m3, newdata = sp.dat, type = "response")
pred.m4 <- predict(m4, newdata = sp.dat, type = "response")
pred.m5 <- predict(m5, newdata = sp.dat, type = "response")
# Remove NA values from predictions and observations for calculating accuracy metrics
obs.m2 <- sp.dat$P.2[!is.na(pred.m2)]
obs.m3 <- sp.dat$P.3[!is.na(pred.m3)]
obs.m4 <- sp.dat$P.4[!is.na(pred.m4)]
obs.m5 <- sp.dat$P.5[!is.na(pred.m5)]
pred.m2 <- na.omit(pred.m2)
pred.m3 <- na.omit(pred.m3)
pred.m4 <- na.omit(pred.m4)
pred.m5 <- na.omit(pred.m5)
AUC$m2[i] <- auc(roc(obs.m2, pred.m2, quiet = T))
AUC$m3[i] <- auc(roc(obs.m3, pred.m3, quiet = T))
AUC$m4[i] <- auc(roc(obs.m4, pred.m4, quiet = T))
AUC$m5[i] <- auc(roc(obs.m5, pred.m5, quiet = T))
boyce$m2[i] <- ecospat.boyce(as.numeric(pred.m2), as.numeric(pred.m2[which(obs.m2 == 1)]),
nclass=0, window.w="default", res=100, PEplot = F)$Spearman.cor
boyce$m3[i] <- ecospat.boyce(as.numeric(pred.m3), as.numeric(pred.m3[which(obs.m3 == 1)]),
nclass=0, window.w="default", res=100, PEplot = F)$Spearman.cor
boyce$m4[i] <- ecospat.boyce(as.numeric(pred.m4), as.numeric(pred.m4[which(obs.m4 == 1)]),
nclass=0, window.w="default", res=100, PEplot = F)$Spearman.cor
boyce$m5[i] <- ecospat.boyce(as.numeric(pred.m5), as.numeric(pred.m5[which(obs.m5 == 1)]),
nclass=0, window.w="default", res=100, PEplot = F)$Spearman.cor
p2 <- as.numeric(pred.m2>0.5)
accuracy$m2[i] <- mean(p2==obs.m2, na.rm = T)
c2 <- confusionMatrix(factor(p2), factor(obs.m2))
kappa$m2[i] <- c2$overall[2]
tss$m2[i] <- c2$byClass["Sensitivity"] + c2$byClass["Specificity"] - 1
p3 <- as.numeric(pred.m3>0.5)
accuracy$m3[i] <- mean(p3==obs.m3, na.rm = T)
c3 <- confusionMatrix(factor(p3), factor(obs.m3))
kappa$m3[i] <- c3$overall[3]
tss$m3[i] <- c3$byClass["Sensitivity"] + c3$byClass["Specificity"] - 1
p4 <- as.numeric(pred.m4>0.5)
accuracy$m4[i] <- mean(p4==obs.m4, na.rm = T)
c4 <- confusionMatrix(factor(p4), factor(obs.m4))
kappa$m4[i] <- c4$overall[4]
tss$m4[i] <- c4$byClass["Sensitivity"] + c4$byClass["Specificity"] - 1
p5 <- as.numeric(pred.m5>0.5)
accuracy$m5[i] <- mean(p5==obs.m5, na.rm = T)
c5 <- confusionMatrix(factor(p5), factor(obs.m5))
kappa$m5[i] <- c5$overall[5]
tss$m5[i] <- c5$byClass["Sensitivity"] + c5$byClass["Specificity"] - 1
print(i)
}
M2.coef[,-1][M2.pval[,-1] > 0.05] <- NA
M3.coef[,-1][M3.pval[,-1] > 0.05] <- NA
M4.coef[,-1][M4.pval[,-1] > 0.05] <- NA
M5.coef[,-1][M5.pval[,-1] > 0.05] <- NA
# Logistic GLMM for each species: fit on PRE data, predict POST data ---------
kappa.prepost <- tss.prepost <- data.frame(SPECIES_NAME = spp.frequency$SPECIES_NAME, "m2"=NA, "m3"=NA, "m4"=NA, "m5"=NA)
for (i in 1:length(spp.frequency$SPECIES_NAME)) {
sp.dat <- subset(all.dat.sub, SPECIES_NAME == spp.frequency$SPECIES_NAME[i])
m2 <- glm(P.2 ~ Live_hard_coral + sst_mean*sst_anom, data = sp.dat[sp.dat$Pre.or.post.bleach == "Pre",], family = binomial)
m3 <- glm(P.3 ~ Live_hard_coral + sst_mean*sst_anom, data = sp.dat[sp.dat$Pre.or.post.bleach == "Pre",], family = binomial)
m4 <- glm(P.4 ~ Live_hard_coral + sst_mean*sst_anom, data = sp.dat[sp.dat$Pre.or.post.bleach == "Pre",], family = binomial)
m5 <- glm(P.5 ~ Live_hard_coral + sst_mean*sst_anom, data = sp.dat[sp.dat$Pre.or.post.bleach == "Pre",], family = binomial)
pred.m2 <- predict(m2, newdata = sp.dat[sp.dat$Pre.or.post.bleach == "Post",], type = "response")
pred.m3 <- predict(m3, newdata = sp.dat[sp.dat$Pre.or.post.bleach == "Post",], type = "response")
pred.m4 <- predict(m4, newdata = sp.dat[sp.dat$Pre.or.post.bleach == "Post",], type = "response")
pred.m5 <- predict(m5, newdata = sp.dat[sp.dat$Pre.or.post.bleach == "Post",], type = "response")
# Remove NA values from predictions and observations for calculating accuracy metrics
obs.m2 <- sp.dat$P.2[sp.dat$Pre.or.post.bleach == "Pre"][!is.na(pred.m2)]
obs.m3 <- sp.dat$P.3[sp.dat$Pre.or.post.bleach == "Pre"][!is.na(pred.m3)]
obs.m4 <- sp.dat$P.4[sp.dat$Pre.or.post.bleach == "Pre"][!is.na(pred.m4)]
obs.m5 <- sp.dat$P.5[sp.dat$Pre.or.post.bleach == "Pre"][!is.na(pred.m5)]
pred.m2 <- na.omit(pred.m2)
pred.m3 <- na.omit(pred.m3)
pred.m4 <- na.omit(pred.m4)
pred.m5 <- na.omit(pred.m5)
p2 <- as.numeric(pred.m2>0.5)
c2 <- confusionMatrix(factor(p2), factor(obs.m2))
kappa.prepost$m2[i] <- c2$overall[2]
tss.prepost$m2[i] <- c2$byClass["Sensitivity"] + c2$byClass["Specificity"] - 1
p3 <- as.numeric(pred.m3>0.5)
c3 <- confusionMatrix(factor(p3), factor(obs.m3))
kappa.prepost$m3[i] <- c3$overall[3]
tss.prepost$m3[i] <- c3$byClass["Sensitivity"] + c3$byClass["Specificity"] - 1
p4 <- as.numeric(pred.m4>0.5)
c4 <- confusionMatrix(factor(p4), factor(obs.m4))
kappa.prepost$m4[i] <- c4$overall[4]
tss.prepost$m4[i] <- c4$byClass["Sensitivity"] + c4$byClass["Specificity"] - 1
p5 <- as.numeric(pred.m5>0.5)
c5 <- confusionMatrix(factor(p5), factor(obs.m5))
kappa.prepost$m5[i] <- c5$overall[5]
tss.prepost$m5[i] <- c5$byClass["Sensitivity"] + c5$byClass["Specificity"] - 1
print(i)
}
kappa.prepost[kappa.prepost < 0] <- 0
tss.prepost[tss.prepost < 0] <- 0
# Heat matrices of model coefficients (Fig. 3) ---------------
# Heat matrix for %Coral
coral.all <- data.frame(SPECIES_NAME = M2.coef$SPECIES_NAME,
M2 = M2.coef$Live_hard_coral,
M3 = M3.coef$Live_hard_coral,
M4 = M4.coef$Live_hard_coral,
M5 = M5.coef$Live_hard_coral)
coral.all <- subset(coral.all, !(is.na(M3) & is.na(M4) & is.na(M5)))
coral.all[is.na(coral.all)] <- 0
coral.all <- coral.all[order(coral.all$M5, coral.all$M4, coral.all$M3, decreasing = F),]
coral.all <- data.matrix(coral.all[,-1])
coral.all[coral.all > quantile(coral.all, .9)] <- quantile(coral.all, .9)
coral.all[coral.all < quantile(coral.all, .1)] <- quantile(coral.all, .1)
corrplot(t(coral.all), col = rev(brewer.pal(11,"RdBu")),
method = "color", is.corr= FALSE,
cl.pos = "n", addgrid.col = "lightgrey",
tl.pos = "n")
cor(coral.all)
coral.all.recl <- coral.all
coral.all.recl[coral.all.recl < 0] <- 1
coral.all.recl[coral.all.recl > 0] <- 1
# False positives
# M3 = 10%
length(coral.all.recl[,3][coral.all.recl[,3] == 1 & coral.all.recl[,4] == 0]) * 100/dim(coral.all.recl)[1]
# M2 = 15.8%
length(coral.all.recl[,2][coral.all.recl[,2] == 1 & coral.all.recl[,4] == 0]) * 100/dim(coral.all.recl)[1]
# M1 = 13.3%
length(coral.all.recl[,1][coral.all.recl[,1] == 1 & coral.all.recl[,4] == 0]) * 100/dim(coral.all.recl)[1]
# False negatives
# M3 = 10%
length(coral.all.recl[,3][coral.all.recl[,3] == 0 & coral.all.recl[,4] == 1]) * 100/dim(coral.all.recl)[1]
# M2 = 15.8%
length(coral.all.recl[,2][coral.all.recl[,2] == 0 & coral.all.recl[,4] == 1]) * 100/dim(coral.all.recl)[1]
# M1 = 13.3%
length(coral.all.recl[,1][coral.all.recl[,1] == 0 & coral.all.recl[,4] == 1]) * 100/dim(coral.all.recl)[1]
# Heat matrix for %sst
sst.all <- data.frame(SPECIES_NAME = M2.coef$SPECIES_NAME,
M2 = M2.coef$sst_anom,
M3 = M3.coef$sst_anom,
M4 = M4.coef$sst_anom,
M5 = M5.coef$sst_anom)
sst.all <- subset(sst.all, !(is.na(M3) & is.na(M4) & is.na(M5)))
sst.all[is.na(sst.all)] <- 0
sst.all <- sst.all[order(sst.all$M5, sst.all$M4, sst.all$M3, decreasing = F),]
sst.all <- data.matrix(sst.all[,-1])
sst.all[sst.all > quantile(sst.all, .9)] <- quantile(sst.all, .9)
sst.all[sst.all < quantile(sst.all, .1)] <- quantile(sst.all, .1)
corrplot(t(sst.all), col = rev(brewer.pal(11,"RdBu")),
method = "color", is.corr= FALSE,
cl.pos = "n", addgrid.col = "lightgrey",
tl.pos = "n")
cor(sst.all)
sst.all.recl <- sst.all
sst.all.recl[sst.all.recl < 0] <- 1
sst.all.recl[sst.all.recl > 0] <- 1
# False positives
# M3 = 10%
length(sst.all.recl[,3][sst.all.recl[,3] == 1 & sst.all.recl[,4] == 0]) * 100/dim(sst.all.recl)[1]
# M2 = 15.8%
length(sst.all.recl[,2][sst.all.recl[,2] == 1 & sst.all.recl[,4] == 0]) * 100/dim(sst.all.recl)[1]
# M1 = 13.3%
length(sst.all.recl[,1][sst.all.recl[,1] == 1 & sst.all.recl[,4] == 0]) * 100/dim(sst.all.recl)[1]
# False negatives
# M3 = 10%
length(sst.all.recl[,3][sst.all.recl[,3] == 0 & sst.all.recl[,4] == 1]) * 100/dim(sst.all.recl)[1]
# M2 = 15.8%
length(sst.all.recl[,2][sst.all.recl[,2] == 0 & sst.all.recl[,4] == 1]) * 100/dim(sst.all.recl)[1]
# M1 = 13.3%
length(sst.all.recl[,1][sst.all.recl[,1] == 0 & sst.all.recl[,4] == 1]) * 100/dim(sst.all.recl)[1]
# Identify range shifting species -------
Species_lat_ranges <- GBR_fish_site_P %>%
#filter(SPECIES_NAME %in% spp.frequency$SPECIES_NAME) %>%
group_by(SPECIES_NAME) %>%
summarize(min.lat.pre = min(SiteLat[P.1 == 1 & Pre.or.post.bleach == "Pre"], na.rm = T),
max.lat.pre = max(SiteLat[P.1 == 1 & Pre.or.post.bleach == "Pre"], na.rm = T),
min.lat.post = min(SiteLat[P.1 == 1 & Pre.or.post.bleach == "Post"], na.rm = T),
max.lat.post = max(SiteLat[P.1 == 1 & Pre.or.post.bleach == "Post"], na.rm = T)) %>%
filter(is.finite(min.lat.pre))
Species_lat_ranges$lat.ext.pre <- with(Species_lat_ranges, max.lat.pre - min.lat.pre)
Species_lat_ranges$lat.ext.post <- with(Species_lat_ranges, max.lat.post - min.lat.post)
Species_lat_ranges$lat.ext.change <- with(Species_lat_ranges, lat.ext.post - lat.ext.pre)
length(Species_lat_ranges$lat.ext.change[Species_lat_ranges$lat.ext.change < -1]) #25 species with range contraction (9.9%)
length(Species_lat_ranges$lat.ext.change[Species_lat_ranges$lat.ext.change > 1]) #29 species with range extension (11.5%)
Species_lat_ranges$lat.mp.pre <- with(Species_lat_ranges, (min.lat.pre + max.lat.pre)/2)
Species_lat_ranges$lat.mp.post <- with(Species_lat_ranges,(min.lat.post + max.lat.post)/2)
Species_lat_ranges$lat.mp.change <- with(Species_lat_ranges, lat.mp.post - lat.mp.pre)
length(Species_lat_ranges$lat.mp.change[Species_lat_ranges$lat.mp.change < -1]) #22 species with southern midpoint displacement
length(Species_lat_ranges$lat.mp.change[Species_lat_ranges$lat.mp.change > 1]) #8 species with northern midpoint displacement
Species_lat_ranges$range.cont <- ifelse(Species_lat_ranges$lat.ext.change < -1, 1, 0)
Species_lat_ranges$range.ext <- ifelse(Species_lat_ranges$lat.ext.change > 1, 1, 0)
Species_lat_ranges$range.displ <- ifelse(Species_lat_ranges$lat.mp.change > 1 | Species_lat_ranges$lat.mp.change < -1, 1, 0)
Species_lat_ranges$range.change <- ifelse(Species_lat_ranges$range.ext == 1 | Species_lat_ranges$range.cont == 1 | Species_lat_ranges$range.displ == 1, 1, 0)
length(Species_lat_ranges$range.ext[Species_lat_ranges$range.ext ==1])/length(Species_lat_ranges$range.ext)
length(Species_lat_ranges$range.cont[Species_lat_ranges$range.cont ==1])/length(Species_lat_ranges$range.ext)
length(Species_lat_ranges$range.displ[Species_lat_ranges$range.displ ==1])/length(Species_lat_ranges$range.ext)
length(Species_lat_ranges$range.change[Species_lat_ranges$range.change ==1])/length(Species_lat_ranges$range.ext)
# Distribution of TSS and Kappa for all species vs. range-shifting species --------
stats <- rbind(data.frame(method = "M1", SPECIES_NAME = tss$SPECIES_NAME, tss = tss$m2, kappa = kappa$m2),
data.frame(method = "M2", SPECIES_NAME = tss$SPECIES_NAME, tss = tss$m3, kappa = kappa$m3),
data.frame(method = "M3", SPECIES_NAME = tss$SPECIES_NAME, tss = tss$m4, kappa = kappa$m4),
data.frame(method = "M4", SPECIES_NAME = tss$SPECIES_NAME, tss = tss$m5, kappa = kappa$m5))
stats$tss[stats$tss < 0] <- 0.001
stats$kappa[stats$kappa < 0] <- 0.001
g.kappa <- ggplot(stats, aes(factor(method), kappa)) +
#geom_violin(draw_quantiles = c(0.25, 0.5, 0.75))
geom_boxplot(notch = T) + xlab("Method") + ylab("Kappa") + ylim(0,1) + ggtitle("All species")
g.kappa.range.change <- ggplot(stats[stats$SPECIES_NAME %in% Species_lat_ranges$SPECIES_NAME[Species_lat_ranges$range.change ==1],], aes(factor(method), kappa)) +
#geom_violin(draw_quantiles = c(0.25, 0.5, 0.75))
geom_boxplot(notch = T) + xlab("Method") + ylab("Kappa") + ylim(0,1) + ggtitle("Range-shifting species")
g.tss <- ggplot(stats, aes(factor(method), tss)) +
#geom_violin(draw_quantiles = c(0.25, 0.5, 0.75))
geom_boxplot(notch = T) + xlab("Method") + ylab("TSS") + ylim(0,.6)
g.tss.range.change <- ggplot(stats[stats$SPECIES_NAME %in% Species_lat_ranges$SPECIES_NAME[Species_lat_ranges$range.change ==1],], aes(factor(method), tss)) +
#geom_violin(draw_quantiles = c(0.25, 0.5, 0.75))
geom_boxplot(notch = T) + xlab("Method") + ylab("TSS") + ylim(0,.6)
plot_grid(g.kappa, g.kappa.range.change, g.tss, g.tss.range.change, nrow = 2, ncol = 2)
g.biplot <- ggplot() +
geom_point(data = tss, aes(x = m2, y = m5), color = "dimgrey")+
xlab("TSS (M.2)") + ylab("TSS (M.4)") +
geom_abline(aes(slope = 1, intercept = 0), color = "dimgrey") +
geom_density_2d(data = tss, aes(x = m2, y = m5), color = "blue")+
geom_density_2d(data = tss[tss$SPECIES_NAME %in% Species_lat_ranges$SPECIES_NAME[Species_lat_ranges$range.change ==1],], aes(x = m2, y = m5), color = "green")+
#stat_density_2d(data = tss, aes(x = m2, y = m5, fill = after_stat(level)), alpha = .5, geom = "polygon")+
coord_cartesian(xlim = c(0,1), ylim = c(0,1))
ggplot() +
geom_point(data = tss, aes(x = m2, y = m4, color = log10(spp.frequency$Nocc)))+
xlab("TSS (M.2)") + ylab("TSS (M.4)")
|
f7c9467ec0028c9c54b7ad886d397580f20d325f
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615831663-test.R
|
4905af3f68601581bf6b8ceae855275f7045eb73
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 683
|
r
|
1615831663-test.R
|
testlist <- list(doy = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), latitude = numeric(0), temp = c(8.5728629954997e-312, 1.5688525430436e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61599376411615e+76, 4.82650578930004e+76, -1.94295658750812e-157, 5.21464652810224e-302, -7.5949865592493e+118, 1.07054513907543e-219, -4.2324579017604e+95, -1.3199888952305e+101, -9.4183172679602e+144))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
48da30463329abf37009844f11b06501f44b0985
|
8a97255cb66455dbef0cf01864a3b334cf20a66b
|
/MMModellerEngine/ModellerEngineFunctions.R
|
cc4fecf71736c4ffc7bdb8d9bbc15aabddc32f79
|
[] |
no_license
|
AshutoshAgrahari/R_Practice
|
c56bbb3c0893e101305f150c0b74045f24cf5a44
|
4c31ce94f130b363f894177a1505ccac290547e0
|
refs/heads/master
| 2020-03-19T17:51:05.826260
| 2020-01-25T10:34:55
| 2020-01-25T10:34:55
| 136,781,266
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 216,653
|
r
|
ModellerEngineFunctions.R
|
#############################################################################
####################### ModellerEngien Licensecing & Login ##################
#############################################################################
# sidebar Modelling Technique functions
Am_function <- function(){
menuItem("AutoModeller",tabName = "ols_am",icon = icon("user"),
menuItem("Bucketing",tabName = "am_Bucketing",icon = icon("user")),
menuItem("Acquire", tabName = "Acquire", icon = icon("upload"),
menuItem("Transformation",tabName = "directCsvUpload", icon = icon("database"))
),
menuItem("Analyse", tabName = "Analyse", icon = icon("th"),
menuItem("Data review",tabName = "AM_Data_Review", icon = icon("tasks")),
menuItem("Results",tabName = "AM_Result", icon = icon("line-chart")),
menuItem("Filter Results",tabName = "AM_FilterResult", icon = icon("filter", lib = "glyphicon"),badgeLabel = "new",badgeColor = "red"),
menuItem("Top Models",tabName = "AM_TopResult", icon = icon("line-chart"),badgeLabel = "new",badgeColor = "red")
)
)
}
MMM_function <- function(){
menuItem("MMM",tabName = "ols_manual",icon = icon("user"),
menuItem("AF Scope",tabName = "olsm_csvUp", icon = icon("database")),
menuItem("Model Scope",tabName = "olsm_modelScope",icon = icon("filter", lib = "glyphicon")),
menuItem("Model Manager", tabName = "olsm_modelManager", icon = icon("tasks")),
menuItem("Results",tabName = "olsm_results", icon = icon("line-chart"))
)
}
DLM_function <- function(){
menuItem("DLM Project",tabName = "karma_dlm",icon = icon("user"),
#### Side Bars ####
menuItem("DLM_Transformtion",tabName = "dlm_trans",icon = icon("user"),
#### Side Bars ####
shinydashboard:: menuItem(tabName = "dlm_Trans_data", text = "Data Transformation", icon = icon("tasks"), selected = TRUE)
),
menuItem("DLM_Modelling",tabName = "dlm_Modelling",icon = icon("user"),
shinydashboard:: menuItem(tabName = "data", text = "Data", icon = icon("home"), selected = TRUE),
shinydashboard:: menuItem(tabName = "explore", text = "Explore", icon = icon("cogs")),
shinydashboard:: menuItem(tabName = "par", text = "DLM Parameters", icon = icon("edit")),
shinydashboard:: menuItem(tabName = "dlm", text = "DLM", icon = icon("download")),
shinydashboard:: menuItem(tabName = "info", text = "Info & Documentation", icon = icon("file")))
)
}
EDA_function <- function(){
menuItem("EDA",tabName = "MEEDAProcess",icon = icon("user"),
menuItem("Data Viewer",tabName = "kDataViewer",icon = icon("bar-chart-o"),badgeLabel = "WIP",badgeColor = "red"),
menuItem("DataAnalysis",tabName = "kEDA",icon = icon("line-chart"),
menuItem("Summary",tabName = "kSummary",icon = icon("upload"),badgeLabel = "WIP",badgeColor = "red"),
menuItem("Univariate",tabName = "kUnivariate", icon = icon("columns"),badgeLabel = "WIP",badgeColor = "red"),
menuItem("Bivariate",tabName = "kBivariate", icon = icon("columns"),badgeLabel = "WIP",badgeColor = "red"),
menuItem("Multivariate",tabName = "kMultivariate", icon = icon("columns"),badgeLabel = "WIP",badgeColor = "red")),
menuItem("Clustering ",tabName = "kClustering",icon = icon("columns"),badgeLabel = "WIP",badgeColor = "red")
)
}
VOF_function <- function(){
menuItem("Variable Console",tabName = "VOF",icon = icon("book"),
menuItem("Variable Console",tabName = "kVOF",icon = icon("book")),
menuItem("Console Help",tabName = "Console_help",icon = icon("upload")))
}
# build sidebar based on UserAccess to display
UserAccessSidebarConditions <- function(userAccess, loggedUser){
userAccess <- MEUsersLoginDetail[which(MEUsersLoginDetail$username == loggedUser),]
tmpSidebarMMM <- NULL
if(userAccess[["MMM"]] == TRUE){
tmpSidebarMMM <- paste0(tmpSidebarMMM,
menuItem("MMM Project",tabName = "MEProjectTab",icon = icon("user"),
menuItem("New Project",tabName = "newProjectTab",icon = icon("database")),
menuItem("Saved Project",tabName = "oldProjectTab",icon = icon("database")),
menuItem("Import/Export Project",tabName = "sharedProjectTab",icon = icon("database")),
EDA_function(),VOF_function(),MMM_function()
)
)
}
tmpSidebarDLM <- NULL
if(userAccess[["DLM"]] == TRUE){
tmpSidebarDLM <- paste0(tmpSidebarDLM,DLM_function())
}
tmpSidebarZippyGeo <- NULL
if(userAccess[["GeoMMM"]] == TRUE){
tmpSidebarZippyGeo <- paste0(tmpSidebarZippyGeo,menuItem("GeoMMM Project",tabName = "karmaZippyGeo",icon = icon("user")))
}
tmpSidebarBayes <- NULL
if(userAccess[["Bayesian"]] == TRUE){
#tmpSidebarBayes <- paste0(tmpSidebarBayes,menuItem("Bayesian Project",tabName = "karmaBayes",icon = icon("user")))
}
tmpSidebarML_Workbench <- NULL
if(userAccess[["ML_Workbench"]] == TRUE){
tmpSidebarML_Workbench <- paste0(tmpSidebarML_Workbench,
menuItem("ML WorkBench Project",tabName = "karmaML_WorkBench",icon = icon("user"),
menuItem("Data Load", tabName = "ML_FileUpload", icon = icon("upload")),
menuItem("Parameter Setup", tabName = "ML_VariableShortlist", icon = icon("table")),
menuItem("Models Comparison", tabName = "ML_ModelCompare", icon = icon("filter", lib = "glyphicon"))
#menuItem("Linear Regression", tabName = "ML_LinearRegression", icon = icon("cogs")),
#menuItem("Gradient Boosting",tabName = "ML_GBM",icon = icon("cogs")),
#menuItem("XGBoost",tabName = "ML_XGBoost",icon = icon("cogs")),
#menuItem("ANN Modelling", tabName = "ML_H2oANN", icon = icon("cogs")),
#menuItem("Bayesian",tabName = "ML_Bayes",icon = icon("cogs"))
#menuItem("Bayesian Belief",tabName = "ML_BayesBelief",icon = icon("cogs"))
#menuItem("Hierarchical Bayesian",tabName = "ML_HBayes",icon = icon("cogs")),
#menuItem("IMR",tabName = "ML_IMR",icon = icon("cogs"))
)
)
}
tmpSidebarBeta <- NULL
if(userAccess[["Beta"]] == TRUE){
tmpSidebarBeta <- paste0(tmpSidebarBeta,menuItem("Beta",tabName = "karmaBeta",icon = icon("user"),Am_function()))
}
return(HTML(paste0(HTML(tmpSidebarMMM),
HTML(tmpSidebarDLM),
#HTML(tmpSidebarZippyGeo),
HTML(tmpSidebarBayes),
HTML(tmpSidebarML_Workbench),
HTML(tmpSidebarBeta))
)
)
}
# Modelling options dispaly based on UserAccess after AF upload.
UserModellingButtonDisplay <- function(userAccess){
tmpModellingButton <- NULL
if(userAccess[["MMM"]] == TRUE){
tmpModellingButton <- paste0(tmpModellingButton,column(3,actionButton("olsmProceed","MMM",style="color: #ffffff ; background-color: #455a64 ; border-color: #455a64;margin: 2px")))
}
if(userAccess[["Beta"]] == TRUE){
tmpModellingButton <- paste0(tmpModellingButton,column(4,actionButton("am_Proceed","AutoModeller",style="color: #ffffff ; background-color: #455a64; border-color: #455a64;margin: 2px")))
}
return(HTML(tmpModellingButton))
}
#############################################################################
####################### AutoModeller functions ##########################
#############################################################################
# Root Mean Square Error function
rmse <- function(x,y){
return(sqrt(sum((x-y)^2)/(length(x))))
}
# Mean Absolute Percentage Error function
mape <- function(x,y){
return(sum(abs(sapply(1:length(x), function(i){(x[i]-y[i])/x[i]})))*(100/length(x))/100)
}
applyModellingPeriod <- function(df,startDate,endDate){
dfDateSubset <- subset(df, df$period >= ymd(startDate) & df$period <= ymd(endDate))
return(dfDateSubset)
}
#Changine alpha function for automodeller
getAlpha <- function(df_lagged,df,alpha,beta,df_variable){
df_laggedDT <- as.data.table(df_lagged)
dfDT <- as.data.table(df)
alpha <- alpha[complete.cases(alpha)]
for(name in names(alpha)){
if(max(dfDT[[name]],na.rm = T) != 0){
set(x = df_laggedDT,j = name,value = (as.numeric(unname(beta[name]))/(10^10))^((as.numeric(unname(alpha[name]))^((as.numeric(df_laggedDT[[name]])/max(as.numeric(dfDT[[df_variable]]),na.rm = T))*100))))
#Refresnce formula
#(1/(10^10))^(as.numeric(unname(alpha[name]))^((df_lagged[,name]/max(df_lagged[,name]))*100))
} else{
df_laggedDT[[name]] <- 0
}
}
df <- as.data.frame(df_laggedDT)
return(df)
}
# lag for complete tranformation
applyDfLag <- function(df,lag){
df_Lag <- as.data.table(df)
for(name in names(lag)){
if(!is.na(lag[name])) {
df_Lag[,(name):=shift(df_Lag[[name]],as.numeric(unname(lag[name])),fill = 0,type = "lag")]
}
}
df_lagged <- as.data.frame(df_Lag)
return(df_lagged)
}
#power
getPower <- function(df,powerRange){
dfPowerDt <- as.data.table(df)
powerSeries <- powerRange[complete.cases(powerRange)]
for(name in names(powerSeries)) {
set(dfPowerDt,j = name,value=dfPowerDt[[name]]^as.numeric(unname(powerSeries[name])))
}
df <- as.list.data.frame(dfPowerDt)
return(df)
}
#Decay
getDecay <- function(df,decay){
df_DecayDT <- as.data.table(df)
decay <- decay[complete.cases(decay)]
calcDecay <- function(col,decay){
for(i in 1:length(col)){
if(i ==1){
col[i] <- as.numeric(col[i])
} else if(!is.na(col[i - 1])){
col[i] <- as.numeric(col[i])+ as.numeric(col[i - 1]*(1-decay))
}
}
return(col)
}
for(name in names(decay)) {
set(df_DecayDT,j = name,value=calcDecay(df_DecayDT[[name]],as.numeric(unname(decay[name]))))
}
df <- as.data.frame(df_DecayDT)
return(df)
}
# transform the data as per bucket
CreateAllTransformations <- function(df, trnsList, amInputBucketList){
colNames <- names(df)
transVar <- as.character(unlist(amInputBucketList[names(amInputBucketList) %in% trnsList$selectedTransBucket]))
dfToNonTrans <- df[,-which(names(df) %in% c("Period",transVar))]
dfToTrans <- data.frame(df[,which(names(df) %in% transVar)],stringsAsFactors = F)
names(dfToTrans) <- names(df)[which(names(df) %in% transVar)]
if(length(dfToTrans)==1){
dfToTrans[,1] <- as.numeric(dfToTrans[,1])
}else{
dfToTrans <- as.data.frame(apply(dfToTrans,2,as.numeric))
}
transformedDf <- list()
for (name in names(dfToNonTrans)) {
transformedDf[[name]][[name]] <- as.numeric(dfToNonTrans[,name])
}
for(name in names(dfToTrans)){
# Transforming the data as per the bucket selection.
transformedDf[[name]]<- list()
lagTrans <- as.data.frame(replicate(as.numeric(as.character(dfToTrans[,name])), n = (trnsList$getLagMax-trnsList$getLagMin)+1),stringsAsFactors = F)
lagSeries <- as.numeric(trnsList$getLagMin:trnsList$getLagMax)
names(lagSeries) <- paste0(name,"_L",trnsList$getLagMin:trnsList$getLagMax)
names(lagTrans) <- names(lagSeries)
lagTrans <- applyDfLag(df = lagTrans,lag = lagSeries)
if(trnsList$decaySelection == "Alpha Decay"){
alphaDecayTransList <- alphaDecayTrans(dfToTrans,lagTrans,name,trnsList)
transformedDf[[name]] <- append(transformedDf[[name]],values = alphaDecayTransList)
}else if(trnsList$decaySelection == "Power Decay"){
powerDecayTransList <- powerDecayTrans(dfToTrans,lagTrans,name,trnsList)
transformedDf[[name]] <- append(transformedDf[[name]],values = powerDecayTransList)
}else if(trnsList$decaySelection == "Decay Power"){
decayPowerTransList <- decayPowerTrans(dfToTrans,lagTrans,name,trnsList)
transformedDf[[name]] <- append(transformedDf[[name]],values = decayPowerTransList)
}else if(trnsList$decaySelection == "Decay Alpha"){
decayAlphaTransList <- decayAlphaTrans(dfToTrans,lagTrans,name,trnsList)
transformedDf[[name]] <- append(transformedDf[[name]],values = decayAlphaTransList)
}
}
return(transformedDf)
}
#capturing Alpha Decay data
alphaDecayTrans <- function(dfToTrans,lagTrans,name,trnsList){
alphaTransformedList <- list()
for(lagName in names(lagTrans)){
AlphaSeries <- as.numeric(seq(from=trnsList$getAlphaMin,to=trnsList$getAlphaMax,by=trnsList$getAlphaSteps))
lagTransAlpha <- as.data.frame(replicate(as.numeric(as.character(lagTrans[,lagName])),n = length(AlphaSeries)),stringsAsFactors = F)
names(AlphaSeries) <- paste0(lagName,"_A",seq(from=trnsList$getAlphaMin,to=trnsList$getAlphaMax,by=trnsList$getAlphaSteps))
names(lagTransAlpha) <- names(AlphaSeries)
betaSeries <- rep(1,times=length(AlphaSeries))
names(betaSeries) <- names(AlphaSeries)
df <- as.data.frame(dfToTrans[,name])
colnames(df) <- name
lagTransAlpha <- getAlpha(df_lagged = lagTransAlpha,df = df,alpha = AlphaSeries,beta = betaSeries,df_variable = name)
for(alphaName in names(lagTransAlpha)){
decaySeries <- as.numeric(seq(from=trnsList$getDecayMin,to=trnsList$getDecayMax,by=trnsList$getDecaySteps))
lagTransAlphaDecay <- as.data.frame(replicate(as.numeric(as.character(lagTransAlpha[,alphaName])),n = length(decaySeries)),stringsAsFactors = F)
names(decaySeries) <- paste0(alphaName,"_D",seq(from=trnsList$getDecayMin,to=trnsList$getDecayMax,by=trnsList$getDecaySteps))
names(lagTransAlphaDecay) <- names(decaySeries)
lagTransAlphaDecay <- getDecay(lagTransAlphaDecay,decaySeries)
alphaTransformedList <- c(alphaTransformedList,lagTransAlphaDecay)
}
}
return(alphaTransformedList)
}
#capturing Power Decay data
powerDecayTrans <- function(dfToTrans,lagTrans,name,trnsList){
powerTransformedList <- list()
for(lagName in names(lagTrans)){
powerSeries <- as.numeric(seq(from=trnsList$getPowerMin,to=trnsList$getPowerMax,by=trnsList$getPowerSteps))
lagTransPower <- as.data.frame(replicate(as.numeric(as.character(lagTrans[,lagName])), n = length(powerSeries)),stringsAsFactors = F)
names(powerSeries) <- paste0(lagName,"_P",seq(from=trnsList$getPowerMin,to=trnsList$getPowerMax,by=trnsList$getPowerSteps))
names(lagTransPower) <- names(powerSeries)
lagTransPower <- as.data.frame.list(getPower(lagTransPower,powerSeries))
for(powerName in names(lagTransPower)){
decaySeries <- as.numeric(seq(from=trnsList$getDecayMin,to=trnsList$getDecayMax,by=trnsList$getDecaySteps))
lagTransPowerDecay <- as.data.frame(replicate(as.numeric(as.character(lagTransPower[,powerName])),n = length(decaySeries)),stringsAsFactors = F)
names(decaySeries) <- paste0(powerName,"_D",seq(from=trnsList$getDecayMin,to=trnsList$getDecayMax,by=trnsList$getDecaySteps))
names(lagTransPowerDecay) <- names(decaySeries)
lagTransPowerDecay <- getDecay(lagTransPowerDecay,decaySeries)
powerTransformedList <- c(powerTransformedList,lagTransPowerDecay)
}
}
return(powerTransformedList)
}
#capturing Decay Power data
decayPowerTrans <- function(dfToTrans,lagTrans,name,trnsList){
powerTransformedList <- list()
for(lagName in names(lagTrans)){
decaySeries <- as.numeric(seq(from=trnsList$getDecayMin,to=trnsList$getDecayMax,by=trnsList$getDecaySteps))
lagTransDecay <- as.data.frame(replicate(as.numeric(as.character(lagTrans[,lagName])), n = length(decaySeries)),stringsAsFactors = F)
names(decaySeries) <- paste0(lagName,"_D",seq(from=trnsList$getDecayMin,to=trnsList$getDecayMax,by=trnsList$getDecaySteps))
names(lagTransDecay) <- names(decaySeries)
lagTransDecay <- getDecay(lagTransDecay,decaySeries)
for(powerName in names(lagTransDecay)){
powerSeries <- as.numeric(seq(from=trnsList$getPowerMin,to=trnsList$getPowerMax,by=trnsList$getPowerSteps))
lagTransDecayPower <- as.data.frame(replicate(as.numeric(as.character(lagTransDecay[,powerName])), n = length(powerSeries)),stringsAsFactors = F)
names(powerSeries) <- paste0(powerName,"_P",seq(from=trnsList$getPowerMin,to=trnsList$getPowerMax,by=trnsList$getPowerSteps))
names(lagTransDecayPower) <- names(powerSeries)
lagTransDecayPower <- getPower(lagTransDecayPower,powerSeries)
powerTransformedList <- c(powerTransformedList,lagTransDecayPower)
}
}
return(powerTransformedList)
}
#capturing Decay Alpha data
decayAlphaTrans <- function(dfToTrans,lagTrans,name,trnsList){
alphaTransformedList <- list()
for(lagName in names(lagTrans)){
decaySeries <- as.numeric(seq(from=trnsList$getDecayMin,to=trnsList$getDecayMax,by=trnsList$getDecaySteps))
lagTransDecay <- as.data.frame(replicate(as.numeric(as.character(lagTrans[,lagName])), n = length(decaySeries)),stringsAsFactors = F)
names(decaySeries) <- paste0(lagName,"_D",seq(from=trnsList$getDecayMin,to=trnsList$getDecayMax,by=trnsList$getDecaySteps))
names(lagTransDecay) <- names(decaySeries)
lagTransDecay <- getDecay(lagTransDecay,decaySeries)
for(alphaName in names(lagTransDecay)){
AlphaSeries <- as.numeric(seq(from=trnsList$getAlphaMin,to=trnsList$getAlphaMax,by=trnsList$getAlphaSteps))
lagTransDecayAlpha <- as.data.frame(replicate(as.numeric(as.character(lagTransDecay[,alphaName])),n = length(AlphaSeries)),stringsAsFactors = F)
names(AlphaSeries) <- paste0(alphaName,"_A",seq(from=trnsList$getAlphaMin,to=trnsList$getAlphaMax,by=trnsList$getAlphaSteps))
names(lagTransDecayAlpha) <- names(AlphaSeries)
betaSeries <- rep(1,times=length(AlphaSeries))
names(betaSeries) <- names(AlphaSeries)
df <- as.data.frame(dfToTrans[,name])
colnames(df) <- alphaName
lagTransDecayAlpha <- getAlpha(df_lagged = lagTransDecayAlpha,df = df,alpha = AlphaSeries,beta = betaSeries,df_variable = alphaName)
alphaTransformedList <- c(alphaTransformedList,lagTransDecayAlpha)
}
}
return(alphaTransformedList)
}
getTransformedVariables <- function(amTransDataList,parametersDf){
transformedVariablesIndexDf <- parametersDf[which(parametersDf$Transformation != "Linear"),]
transformedVariablesIndexDf$Bucket <- as.character(transformedVariablesIndexDf$Bucket)
transformedVariablesDf <- data.frame()
variables <- NULL
for(i in 1:nrow(transformedVariablesIndexDf)){
if(transformedVariablesIndexDf$Transformation[i] == "Decay Power"){
variables <- sapply(paste0(transformedVariablesIndexDf$VariableName[i],"_L",as.numeric(as.character(transformedVariablesIndexDf$LagMin[i])):as.numeric(as.character(transformedVariablesIndexDf$LagMax[i]))),FUN = function(x)sapply(paste0(x,"_D",paste0(seq(from=as.numeric(as.character(transformedVariablesIndexDf$DecayMin[i])),to=as.numeric(as.character(transformedVariablesIndexDf$DecayMax[i])),by=as.numeric(as.character(transformedVariablesIndexDf$DecaySteps[i]))))),FUN = function(y)paste0(y,"_P",paste0(seq(from=as.numeric(as.character(transformedVariablesIndexDf$PowerMin[i])),to=as.numeric(as.character(transformedVariablesIndexDf$PowerMax[i])),by=as.numeric(as.character(transformedVariablesIndexDf$PowerSteps[i]))))),simplify = T),simplify = T)
} else if (transformedVariablesIndexDf$Transformation[i] == "Alpha Decay"){
variables <- sapply(paste0(transformedVariablesIndexDf$VariableName[i],"_L",as.numeric(as.character(transformedVariablesIndexDf$LagMin[i])):as.numeric(as.character(transformedVariablesIndexDf$LagMax[i]))),FUN = function(x)sapply(paste0(x,"_A",paste0(seq(from=as.numeric(as.character(transformedVariablesIndexDf$AlphaMin[i])),to=as.numeric(as.character(transformedVariablesIndexDf$AlphaMax[i])),by=as.numeric(as.character(transformedVariablesIndexDf$AlphaSteps[i]))))),FUN = function(y)paste0(y,"_D",paste0(seq(from=as.numeric(as.character(transformedVariablesIndexDf$DecayMin[i])),to=as.numeric(as.character(transformedVariablesIndexDf$DecayMax[i])),by=as.numeric(as.character(transformedVariablesIndexDf$DecaySteps[i]))))),simplify = T),simplify = T)
} else if(transformedVariablesIndexDf$Transformation[i] == "Power Decay"){
variables <- sapply(paste0(transformedVariablesIndexDf$VariableName[i],"_L",as.numeric(as.character(transformedVariablesIndexDf$LagMin[i])):as.numeric(as.character(transformedVariablesIndexDf$LagMax[i]))),FUN = function(x)sapply(paste0(x,"_P",paste0(seq(from=as.numeric(as.character(transformedVariablesIndexDf$PowerMin[i])),to=as.numeric(as.character(transformedVariablesIndexDf$PowerMax[i])),by=as.numeric(as.character(transformedVariablesIndexDf$PowerSteps[i]))))),FUN = function(y)paste0(y,"_D",paste0(seq(from=as.numeric(as.character(transformedVariablesIndexDf$DecayMin[i])),to=as.numeric(as.character(transformedVariablesIndexDf$DecayMax[i])),by=as.numeric(as.character(transformedVariablesIndexDf$DecaySteps[i]))))),simplify = T),simplify = T)
} else if (transformedVariablesIndexDf$Transformation[i] == "Decay Alpha"){
variables <- sapply(paste0(transformedVariablesIndexDf$VariableName[i],"_L",as.numeric(as.character(transformedVariablesIndexDf$LagMin[i])):as.numeric(as.character(transformedVariablesIndexDf$LagMax[i]))),FUN = function(x)sapply(paste0(x,"_D",paste0(seq(from=as.numeric(as.character(transformedVariablesIndexDf$DecayMin[i])),to=as.numeric(as.character(transformedVariablesIndexDf$DecayMax[i])),by=as.numeric(as.character(transformedVariablesIndexDf$DecaySteps[i]))))),FUN = function(y)paste0(y,"_A",paste0(seq(from=as.numeric(as.character(transformedVariablesIndexDf$AlphaMin[i])),to=as.numeric(as.character(transformedVariablesIndexDf$AlphaMax[i])),by=as.numeric(as.character(transformedVariablesIndexDf$AlphaSteps[i]))))),simplify = T),simplify = T)
}
tempDf <- as.data.frame.list(amTransDataList[[transformedVariablesIndexDf$Bucket[i]]][[transformedVariablesIndexDf$VariableName[i]]][names(amTransDataList[[transformedVariablesIndexDf$Bucket[i]]][[transformedVariablesIndexDf$VariableName[i]]]) %in% variables])
if(ncol(transformedVariablesDf) ==0 ){
transformedVariablesDf <- tempDf
}else{
transformedVariablesDf <- cbind(transformedVariablesDf,tempDf)
}
}
return(transformedVariablesDf)
}
buildFormulaList <- function(amTransDataList,modelScopeDf,bucketData,parametersDf){
linearNames <- names(amTransDataList)[names(amTransDataList) %in% parametersDf$Bucket[parametersDf$Transformation == "Linear" & parametersDf$ModellingFlag == "Yes" & parametersDf$Bucket != "Dependent"]]
nonLinearNames <- names(amTransDataList)[names(amTransDataList) %in% parametersDf$Bucket[parametersDf$Transformation != "Linear" & parametersDf$ModellingFlag == "Yes"]]
# calculating the range of combination for each bucket
bucketData[,-1] <- apply(bucketData[,-1],2,as.numeric)
bucketRangeList <- lapply(bucketData$Bucket, function(x) bucketData$MinVariables[bucketData$Bucket == x]:bucketData$Max[bucketData$Bucket == x])
names(bucketRangeList) <- bucketData$Bucket
# generating bucket wise combination for formula building
bucketComb <- data.frame(expand.grid(bucketRangeList),stringsAsFactors = F)
# getting all variables by bucket.
bucketVarList <- list()
bucketVarList[["Dependent"]] <- as.character(parametersDf$VariableName[which(parametersDf$Bucket == "Dependent")])
# collecting all linear variables by bucket
if(length(linearNames)!= 0){
bucketVarListLr <- lapply(linearNames, function(x) {names(amTransDataList[[x]])[names(amTransDataList[[x]]) %in% parametersDf$VariableName[parametersDf$Bucket==x & parametersDf$ModellingFlag == "Yes"]]})
names(bucketVarListLr) <- linearNames
bucketVarList <- append(bucketVarList, bucketVarListLr)
}
# collecting all nonlinear variables by bucket
if(length(nonLinearNames)!= 0){
bucketVarListNLr <- lapply(nonLinearNames, function(x) {names(amTransDataList[[x]])[names(amTransDataList[[x]]) %in% parametersDf$VariableName[parametersDf$Bucket==x & parametersDf$ModellingFlag == "Yes"]]})
names(bucketVarListNLr) <- nonLinearNames
for(name in names(bucketVarListNLr)){
tempList <- lapply(bucketVarListNLr[[name]], function(x){names(amTransDataList[[name]][[x]])})
names(tempList) <- bucketVarListNLr[[name]]
bucketVarListNLr[[name]] <- tempList
}
bucketVarList <- append(bucketVarList, bucketVarListNLr)
}
# generating all possible combination of each bucket by min and max. (by default generating NULL for min 0)
bucketVarComb <- list()
for(name in names(bucketRangeList)){
# name <- names(bucketRangeList)[1]
range <- bucketRangeList[[name]]
if(name %in% linearNames){
bucketVarComb[[name]] <- lapply(range, function(x){
if(x!=0){
unlist(combn(bucketVarList[[name]],x,simplify = F,FUN = function(x){paste0(x,collapse = " + ")}))
}
})
}else{
bucketVarComb[[name]] <- lapply(range, function(x){
if(x!=0){
varCombList <- as.list(as.data.frame(combn(names(bucketVarList[[name]]),x)))
apply(varList <- sapply(data.frame(rbindlist(lapply(varCombList, function(x){
data.frame(do.call(expand.grid,bucketVarList[[name]][names(bucketVarList[[name]]) %in% unlist(x)]),stringsAsFactors = F)
}))), as.character),1,FUN = function(x){paste(x,collapse = "+")})
}
})
}
names(bucketVarComb[[name]])<- range
}
# remove all NULL from nested bucketvVarComb list
bucketVarComb <- rlist::list.clean(bucketVarComb,fun = is.null, recursive = T)
# baseformula with dependent only
baseformula <- paste0(bucketVarList$Dependent," ~ ")
# building formula by bucketComb row wise
# remove 0 from bucketComb after comnverting into list.
formulaList <- NULL
for(i in 1:nrow(bucketComb)){
if(length(formulaList) < 200000){
bucket <- as.list(bucketComb[i,])
bucket[bucket == 0] <- NULL
if(length(bucket)!= 0){
formulaList <- c(formulaList, paste0(baseformula, apply(expand.grid(lapply(names(bucket), function(x){bucketVarComb[[x]][[as.character(bucket[[x]])]]}),stringsAsFactors = F),1,paste0, collapse = " + ")))
}
}
}
return(as.list(formulaList))
}
getIterCount <- function(RegDataTemp,amTransDataList, bucketData,parametersDf,startDate, endDate){
modelScopeDf <- getRegDataTable(RegDataTemp,amTransDataList, parametersDf,startDate, endDate)
baseFormula <- buildFormulaList(amTransDataList,modelScopeDf,bucketData,parametersDf)
return(baseFormula)
}
getActualVsPredictedDf <- function(modelScopeDf,model){
data <- modelScopeDf
actPred <- cbind.data.frame(Period =data[,"period"], Actual = model$model[,1], Predicted = fitted(model), Residual = residuals(model))
return(actPred)
}
getElasticity <- function(model,parametersDf){
modelScopeMean <- colMeans(model$model)
modelScopeMean_12 <- colMeans(tail(model$model,n = 12))
depVar <- parametersDf$VariableName[parametersDf$Bucket=="Dependent"]
df <- NULL
contribution <- NULL
elasticity <- NULL
elasticity_12 <- NULL
for(name in names(model$coefficients)){
if(name == "(Intercept)"){
contribution[name] <- (model$coefficients[name] *100)/modelScopeMean[depVar]
} else {
contribution[name] <- model$coefficients[name]*(modelScopeMean[name]/modelScopeMean[depVar])* 100
if(length(parametersDf$Transformation[parametersDf$VariableName == name]) != 0 && parametersDf$Transformation[parametersDf$VariableName == name] == "Linear" || grepl("Dummy",name)){
elasticity[name] <- 5*model$coefficients[name]*(modelScopeMean[name]/modelScopeMean[depVar])
elasticity_12[name] <- 5*model$coefficients[name]*(modelScopeMean_12[name]/modelScopeMean_12[depVar])
} else {
decayValue <- as.numeric(gsub("D","",str_extract(name,"D\\d+.\\d+")))
powerValue <- as.numeric(gsub("P","",str_extract(name,"P\\d+.\\d+")))
elasticity[name] <- (((1.01^(powerValue))-1)*100*(model$coefficients[name])*(modelScopeMean[name])/(modelScopeMean[depVar]))*5
elasticity_12[name] <- (((1.01^(powerValue))-1)*100*(model$coefficients[name])*(modelScopeMean_12[name])/(modelScopeMean_12[depVar]))*5
}
}
}
parameterDetails <- cbind(tidy(model),contribution = contribution,VIF=c(0,vif(model)),Elasticity_Modelling_Period = c(0,elasticity),Elasticity_L12_Modelling_Period = c(0,elasticity_12))
return(parameterDetails)
}
#get data for transformation
getDataForTransformation <-function(RegDataTemp,amTransDataList,parametersDf,bucketData,startDate,endDate, baseFormula, rankType){
modelScopeDf <- getRegDataTable(RegDataTemp,amTransDataList, parametersDf,startDate, endDate)
# Ashutosh: passing parameterDF to allPossibleRegressions() to get T-stat for each variable given by user
system.time(resultList <- allPossibleRegressions(modelScopeDf = modelScopeDf,baseFormula, parametersDf))
result <- resultList$result
# if there is no model, then message will display to change parameter.
if(length(result) == 1){
resultData <- list()
resultData[["Models"]] <- result
resultData[["ModelScopeDf"]] <- modelScopeDf
resultData[["finalDf"]] <- modelScopeDf
resultData[["modelList"]] <- resultList$ModelList
return(resultData)
}else {
rankResultList <- list()
rankResultList <- rankModels(data = result,rankType)
resultData <- list()
if(length(rankResultList) != 1){
ranked_result <- cbind(result, Model_Rank = rankResultList$rank, Model_Score = rankResultList$score)
ranked_result$Model_Score <- round(ranked_result$Model_Score,digits = 2)
resultData[["Models"]] <- ranked_result
resultData[["ModelScopeDf"]] <- modelScopeDf
resultData[["finalDf"]] <- modelScopeDf
resultData[["modelList"]] <- resultList$ModelList
return(resultData)
}else{
resultData[["Models"]] <- result
resultData[["ModelScopeDf"]] <- modelScopeDf
resultData[["finalDf"]] <- modelScopeDf
resultData[["modelList"]] <- resultList$ModelList
return(resultData)
}
}
}
# Function to get data table for linear regression input with specified period range.
getRegDataTable <- function(RegDataTemp,amTransDataList,parametersDf,startDate,endDate){
linearVariables <- parametersDf$VariableName[which(parametersDf$Transformation == "Linear")]
linearVariablesDf <- as.data.frame(RegDataTemp[-1,][,linearVariables])
names(linearVariablesDf) <- linearVariables
if(any(as.character(parametersDf$Transformation)!= "Linear")){
transformedVariableDf <- getTransformedVariables(amTransDataList,parametersDf)
finalDf <- cbind(period=lubridate::dmy(RegDataTemp[-1,1]),linearVariablesDf,transformedVariableDf)
} else {
finalDf <- cbind(period=lubridate::dmy(RegDataTemp[-1,1]),linearVariablesDf)
}
modelScopeDf <- applyModellingPeriod(finalDf,startDate,endDate)
return(modelScopeDf)
}
getDummyModelResult <- function(model, modelScopeDummyTable, RegDataTemp, modelScopeDf){
if(grepl("Intercept",names(coef(model)))){
candidateModelVar <- names(coef(model))[-1]
}else{
candidateModelVar <- names(coef(model))
}
candidateModelDf <- as.data.frame(modelScopeDf[which(modelScopeDf$period >= min(modelScopeDummyTable$Period) & modelScopeDf$period <= max(modelScopeDummyTable$Period)),which(names(modelScopeDf) %in% as.character(candidateModelVar))])
names(candidateModelDf) <- as.character(candidateModelVar)
dummyDFTable <- as.data.frame(modelScopeDummyTable[,which(names(modelScopeDummyTable) %in% names(which(apply(modelScopeDummyTable[,-1],2,sum)!=0)))])
names(dummyDFTable) <- names(which(apply(modelScopeDummyTable[,-1],2,sum)!=0))
modelScopeDfDepVar <- names(unlist(sapply(RegDataTemp[1,], function(x) which(x == "Dependent"))))
modelScopeDfDep <- data.frame(modelScopeDf[which(modelScopeDf$period >= min(modelScopeDummyTable$Period) & modelScopeDf$period <= max(modelScopeDummyTable$Period)), names(modelScopeDf) %in% modelScopeDfDepVar], stringsAsFactors = FALSE)
colnames(modelScopeDfDep) <- modelScopeDfDepVar
candidateModelScopeDf <- cbind(modelScopeDfDep, candidateModelDf, dummyDFTable)
candidateModelScopeDf <- as.data.frame(lapply(candidateModelScopeDf, function(x) as.numeric(as.character(x))))
candidateModelVarList <- list()
NonDependent <- names(candidateModelDf)
DummyVar <- names(dummyDFTable)
NonDependent <- append(NonDependent, DummyVar)
candidateModelVarList[["NonDependent"]] <- NonDependent
candidateModelVarList[["Dependent"]] <- names(modelScopeDfDep)
baseFormula <- as.formula(paste0(candidateModelVarList$Dependent," ~ ", paste0(unlist(candidateModelVarList$NonDependent),collapse = "+")))
modelDummy <- lm(formula = baseFormula, data = candidateModelScopeDf)
return(modelDummy)
}
updateAllModelsResults <- function(allModelsResults,model.index,resultTable, allModelsList, rankType){
result <- as.data.frame(t(extractModelParameterValue(allModelsResults[[length(allModelsResults)]])))
modelNumber <- strsplit(as.character(resultTable$`Model No`[model.index]), split = "_")
modelNumber <- as.numeric(modelNumber[[1]][2])
allModelsList[[modelNumber]] <- allModelsList[[modelNumber]]+1
result <- cbind(nrow(resultTable)+1 ,Model_No = paste0("CANDIDATE_",modelNumber,"_Dummy_",as.numeric(allModelsList[[modelNumber]])), result)
colnames(result) <- c("Index","Model No","%R2","%R2.adj","2-DW","T.stat.avg","VIF.Avg","RootMSE","F_Stat","MAPE")
result$`%R2` <- sapply(result$`%R2`, function(x) x <- round((x * 100),digits = 2))
result$`%R2.adj` <- sapply(result$`%R2.adj`, function(x) x <- round((x * 100),digits = 2))
if(length(resultTable)==8){
result <- rbind(resultTable, result)
} else {
result <- rbind(resultTable[,!names(resultTable) %in% c("Model_Rank","Model_Score")], result)
}
rankResultList <- rankModels(result, rankType)
ranked_result <- cbind(result, Model_Rank = rankResultList$rank, Model_Score = rankResultList$score)
ranked_result$Model_Score <- round(ranked_result$Model_Score,digits = 2)
resultList <- as.list(NULL)
resultList[["ranked_result"]] <- ranked_result
resultList[["allModelsList"]] <- allModelsList
return(resultList)
}
allPossibleRegressions <- function(modelScopeDf, baseFormula, parametersDf){
n <- nrow(modelScopeDf)
modelScopeDfFinal <- modelScopeDf
modelScopeDfFinal$period <- NULL
modelScopeDfFinal <- as.data.frame(lapply(modelScopeDfFinal, function(x) as.numeric(as.character(x))))
allModelsResults <<- lapply(baseFormula,function(x, data) lm(x, data=modelScopeDfFinal),data=modelScopeDfFinal)
allModelsResults1 <- modelFilterByTStat(allModelsResults, parametersDf)
allModelsResults <- allModelsResults1
result <- NULL
result[["ModelList"]] <- allModelsResults
result[["result"]] <- extractModelParameter(allModelsResults)
return(result)
}
# Function to extract model parameters to display on screen
extractModelParameter <- function(allModelsResults){
#allModelsResults <- dummyModel
# calculating number of models
n.models <- length(allModelsResults)
if(n.models == 0){
# if there is no model, then message will display to change parameter.
return(0)
}else{
# calling the function to extract the parameter of each model to rank.
result <- lapply(allModelsResults, extractModelParameterValue)
result <- as.data.frame(matrix(unlist(result), nrow=n.models, byrow=T))
result <- cbind(index = c(1:nrow(result)),paste0("CANDIDATE_",1:nrow(result)), result)
rownames(result) <- NULL
colnames(result) <- c("Index","Model No","%R2","%R2.adj","2-DW","T.stat.avg","VIF.Avg","RootMSE","F_Stat","MAPE")
result$`%R2` <- sapply(result$`%R2`, function(x) x <- round((x * 100),digits = 2))
result$`%R2.adj` <- sapply(result$`%R2.adj`, function(x) x <- round((x * 100),digits = 2))
return(result)
}
}
# fucntion to extract model parameter to rank the model
extractModelParameterValue <- function(fit) {
R2 <- summary(fit)$r.squared
R2.adj <- summary(fit)$adj.r.squared
dw <- abs(2-durbinWatsonTest(fit)[[2]])
model_t_stat_avg <- mean(abs(tidy(fit)$statistic))
VIF.Avg <- mean(abs(vif(fit)))
RootMSE <- sqrt(mean(fit$residuals^2))
F_Stat <- round(summary(fit)$fstatistic[1],digits = 5)
MAPE <- mape(y = fit$fitted.values, x = fit$model[,1])
out <- data.frame(R2=R2, R2.adj=R2.adj,DurbinWatson=dw, T.Stat.Avg = model_t_stat_avg, VIF.Avg = VIF.Avg,RootMSE = RootMSE, F_Stat = F_Stat, MAPE = MAPE)
out <- sapply(out,function(x) if(!is.nan(x)) {x <- x}
else{x <- 0}
)
return(out)
}
# check the model tstat of tstat dir variable to filter out others models from allmodelresults.
modelFilterByTStat <- function(allModelsResults,parametersDf){
# extarcting variable name with tStatDir from parameterDF by only taking only non zero tStatDir variables to filter the models.
tstatParameterDF <- parametersDf[which(parametersDf$TstatDir != 0) ,c(1,which(colnames(parametersDf)=="TstatDir"))]
# Function to check the model if tstat of model term should be greater than the tStatDir value provided by user for positive tstat direction or if tstat of model term should be lesser than the tStatDir value provided by user for negative tstat direction.
tStatCheck <- function(modelIndex, tstatParam){
modelData <- allModelsResults[[modelIndex]]
modelDf <- tidy(modelData)[c(1,4)]
modelTerm <- gsub("_L+[0-9].*", "", modelDf$term)
flag <- 0
if(all(tstatParam$VariableName %in% modelTerm)){
for (i in 1:nrow(tstatParam)) {
if(tstatParam$TstatDir[i] < 0 & round(modelDf[grep(tstatParam$VariableName[i], modelTerm),2],5) < 0 & abs(round(modelDf[grep(tstatParam$VariableName[i], modelTerm),2],5)) > abs(round(as.numeric(as.character(tstatParam$TstatDir[i])),2))){
flag <- flag + 1
}else if(tstatParam$TstatDir[i] > 0 & round(modelDf[grep(tstatParam$VariableName[i], modelTerm),2],5) > round(as.numeric(as.character(tstatParam$TstatDir[i])),2)){
flag <- flag + 1
}
}
if(flag == nrow(tstatParam)){
return(modelIndex)
}
}else {
# here return modelindex which doesn't have tsat derived variable.
return(modelIndex)
}
}
if(nrow(tstatParameterDF) >= 1){
tstatModel <- lapply(1:nrow(tstatParameterDF),function(y){
unlist(sapply(1:length(allModelsResults), function(x, tstatParam){tStatCheck(x,tstatParam)}, tstatParam = tstatParameterDF[y,]))
})
tstatFilteredModelIndex <- Reduce(intersect, tstatModel)
tstatFinalModel <- allModelsResults[tstatFilteredModelIndex]
return(tstatFinalModel)
}else{
return(allModelsResults)
}
}
#Function definition to Ranking the Model based on score
rankModels <- function(data, rankType) {
if(nrow(data) == 1 |nrow(data) == 0){
return(list(0))
}else{
if(rankType == "Ranking1"){
#Ranking formula is used by Nimish's team
factors_for_ranking <- data
factors_for_ranking$RankAverage <- factors_for_ranking$`%R2` - factors_for_ranking$`%R2.adj`
factors_for_ranking <- factors_for_ranking[order(factors_for_ranking$RankAverage,-factors_for_ranking$F_Stat),]
factors_for_ranking <- within(factors_for_ranking, FinalRankForModels <- rank(order(factors_for_ranking$RankAverage,-factors_for_ranking$F_Stat), ties.method='average'))
factors_for_ranking <- factors_for_ranking[order(factors_for_ranking$Index),]
return(list(rank=factors_for_ranking$FinalRankForModels,score =factors_for_ranking$RankAverage))
}else if(rankType == "Ranking2"){
# Ranking formula is used by Sounava's team
factors_for_ranking <- data
# creating ranks
# ranks based on R Square, Adjusted R square and Tstat Average in descending order
RankedModels_based_on_Parameters <- as.data.frame(
apply(cbind(factors_for_ranking$R2,factors_for_ranking$R2.adj,factors_for_ranking$T.stat.avg),
2,FUN=function(x){
rank(-x,ties.method = "average")
}))
#Normalizing DW statistic by negating all the values from 2.
factors_for_ranking$DW_normalized <- ave(factors_for_ranking$DW,FUN=function(y){2-y})
# Ranks based on DW and RMSE in ascending order
ReverseRanking_Models <- as.data.frame(
apply(cbind(factors_for_ranking$RootMSE,factors_for_ranking$DW_normalized),2,
FUN=function(z){
rank(z,ties.method = "average")
}))
# Final ranks in a dataframe
FinaldataforRanking <- cbind(RankedModels_based_on_Parameters,ReverseRanking_Models)
#Averaging the ranks across parameters for models
factors_for_ranking$RankAverage <- apply(FinaldataforRanking,1,mean)
#Final Ranks for Models based on the average of all the statistical parameters
factors_for_ranking <- transform(
factors_for_ranking,FinalRankForModels = rank(factors_for_ranking$RankAverage,ties.method = "average")
)
# rounding off model rank to it's floor value.
factors_for_ranking$FinalRankForModels <- floor(factors_for_ranking$FinalRankForModels)
return(list(rank=factors_for_ranking$FinalRankForModels,score =factors_for_ranking$RankAverage))
}
}
}
sortModelResult <- function(modelResult,flag){
if(flag == TRUE){
resultTableDF <- arrange(modelResult, Model_Rank)
}else{
resultTableDF <- modelResult
}
return(resultTableDF)
}
# Function to create bucket wise variable name in data frame to filter the model based on variables.
createBucketVarData <- function(amInputBuckets){
amInputBuckets <- amInputBuckets[!which(amInputBuckets$bucket=="Dependent"),]
bucketList <- split(amInputBuckets, by = "bucket",keep.by = FALSE)
## Compute maximum length of each bucket
bucketVarLength <- as.vector(NULL)
for (i in 1:length(bucketList)) {
bucketVarLength <- append(bucketVarLength, length(bucketList[[i]]$variableName))
}
max.bucketLen <- max(bucketVarLength)
## Add NA values to list elements
for (i in 1:length(bucketList)) {
bucketList[[i]] <- lapply(bucketList[[i]], function(v) { c(v, rep(NA, max.bucketLen-length(v)))})
}
bucketVar <- as.data.frame.list(bucketList)
colnames(bucketVar) <- names(bucketList)
return(bucketVar)
}
# Function to extract variable name from all generated models through regression.
extractModelVarName <- function(allModelsResults){
modelVarNames <- list()
# extracting model variable with model index from all model results.
for (i in 1:length(allModelsResults)) {
test <- allModelsResults[[i]]
temp <- names(test$model)
temp[grep("Dummy_Var",temp)] <- NA
#modelVarNames[[i]] <- c(i,names(test$model))
modelVarNames[[i]] <- c(i,temp)
}
## Compute maximum length
max.length <- max(sapply(modelVarNames, length))
## Add NA values to model variable list elements to make same length
modelVarNames <- lapply(modelVarNames, function(v) { c(v, rep(NA, max.length-length(v)))})
modelVar <- do.call(rbind.data.frame, modelVarNames)
colnames(modelVar) <- paste("Var", 1:ncol(modelVar), sep="")
if(length(allModelsResults)== 1){
return(modelVar)
}else {
# removing lag, decay parts from model variable names.
modelVar <- as.data.frame(apply(modelVar, 2, function(x){
x <- gsub("_L.*","",as.character(x))
}))
return(modelVar)
}
}
# Function to extract allModelResults filtered by variable given by user.
extractFilterModelResults <- function(bucketSelectedVarName, modelVar, allModelsResults, resultTable){
test <- bucketSelectedVarName
filterResultTable <- data.frame()
if(length(test)< length(modelVar) & length(test)>=2){
testResult <- as.data.frame(t(apply(modelVar[,2:(length(test)+1)], 1, function(x){
x %in% test
})))
testResult <- cbind(ModelIndex = modelVar$Var1, testResult)
testResult$TrueNumber <- apply(testResult[,2:(length(test)+1)],1,function(x){
length(which(x==TRUE))
})
modelFilter <- as.numeric(as.character(testResult$ModelIndex[which(testResult$TrueNumber == length(test))]))
if(length(modelFilter) == 0){
return(filterResultTable)
}else{
filterResultTable <- resultTable[modelFilter, ]
filterResultTable[,3:7] <- sapply(filterResultTable[,3:7], function(x) round(x, digits = 5))
return(filterResultTable)
}
}else{
return(filterResultTable)
}
}
# Function to extract top model for each variable combination.
extractVarCombModel <- function(modelVarCombList, modelVarCombIndex, rankType){
n.models <- length(modelVarCombList)
# calling the function to extract the parameter of each model to rank.
result <- lapply(modelVarCombList, extractModelParameterValue)
result <- as.data.frame(matrix(unlist(result), nrow=n.models, byrow=T))
result <- cbind(paste("CANDIDATE_",modelVarCombIndex), result)
rownames(result) <- NULL
colnames(result) <- c("Model No","%R2","%R2.adj","2-DW","T.stat.avg","VIF.Avg","RootMSE", "F_Stat","MAPE")
result$`%R2` <- sapply(result$`%R2`, function(x) x <- round((x * 100),digits = 2))
result$`%R2.adj` <- sapply(result$`%R2.adj`, function(x) x <- round((x * 100),digits = 2))
if(nrow(result)<=1){
ranked_result = result
}else {
rankResult <- rankModels(result, rankType)
ranked_result <- cbind(result, Model_Rank = rankResult$rank, Model_Score = rankResult$score)
}
if(nrow(ranked_result) >= 2){
topModelVarCombResult <- arrange(ranked_result, desc(Model_Score))
return(topModelVarCombResult[1,])
}else{
return(ranked_result)
}
}
# function to get the unique variable combination table.
getModelVarTable <- function(allModelsResults){
modelVar <- extractModelVarName(allModelsResults)
# to remove the duplicate combination
modelVarComb <- modelVar[,-1]
modelVarComb <- modelVarComb[!duplicated(modelVarComb),]
if(nrow(modelVarComb) == 1){
modelVarComb <- as.data.frame(t(apply(modelVarComb, 2, as.character)),stringsAsFactors = FALSE)
}else {
modelVarComb <- as.data.frame(apply(modelVarComb, 2, as.character),stringsAsFactors = FALSE)
}
return(modelVarComb)
}
# function to extract Top model for exch variable combination in list.
extractTopModelVariableCombResult <- function(allModelsResults, resultTableDetail, resultTable){
modelVarComb <- getModelVarTable(allModelsResults)
modelVar <- extractModelVarName(allModelsResults)
varCombList <- list()
for (i in 1:nrow(modelVarComb)) {
varCombList[[i]] <- modelVarComb[i,]
varCombList[[i]] <- unlist(lapply(varCombList[[i]], na.omit))
}
varCombList <<- varCombList
varCombModelList <- list()
for (i in 1:length(varCombList)) {
test <- varCombList[[i]]
testResult <- as.data.frame(t(apply(modelVar[,2:(length(test)+1)], 1, function(x){
x %in% test
})))
testResult <- cbind(ModelIndex = modelVar$Var1, testResult)
testResult$TrueNumber <- apply(testResult[,2:(length(test)+1)],1,function(x){
length(which(x==TRUE))
})
varCombModelList[[i]] <- as.numeric(as.character(testResult$ModelIndex[which(testResult$TrueNumber == length(test))]))
}
varCombModelList
modelCombList <- list()
for (i in 1:length(varCombModelList)) {
if(length(varCombModelList[[i]])>1){
modelCombList[i] <- sortModelResult(resultTableDetail[varCombModelList[[i]],],flag = 1)[1,1]
}else {
modelCombList[i] <- resultTable[varCombModelList[[i]][1],1]
}
}
return(modelCombList)
}
# function to compare model.
compareModelResult <- function(s, allModelsResults, resultTableDetail, parametersDf){
models <- allModelsResults[s]
compareModel <- NULL
for (i in 1:length(models)) {
temp <- data.frame(getElasticity(models[[i]],parametersDf = parametersDf),row.names = NULL)
temp <- temp[,-c(3,5,6)]
temp$Model <- resultTableDetail[s[i],c("Model No")]
temp<- temp[,c(7,1:6)]
if(i==1){
compareModel <- temp
}else {
compareModel <- rbind(compareModel, temp)
}
}
return(compareModel)
}
extractModelDetail <- function(model, modelScopeDf, parametersDf, modelResult, RegDataTemp, dummyModelScopeDf){
tmpModelScopeDf <- modelScopeDf
if(any(grepl("Dummy",names(model$coefficients)))){
dummyModelIndex <- which(as.character(dummyModelScopeDf$Model_No) == modelResult[,2])
tmpModelScopeDf <- subset(tmpModelScopeDf, period >= dummyModelScopeDf[dummyModelIndex,"Start_Date"] & period <= dummyModelScopeDf[dummyModelIndex,"End_Date"])
}
modelParameters <- getElasticity(model,parametersDf)
output <- NULL
output <- c(output,"The REG Procedure")
output <- c(output,"\n\n")
output <- c(output,paste("Model:",modelResult$`Model No`))
output <- c(output,paste("Dependant Variable:",names(model$model[1])))
output <- c(output,"\n\n")
output <- c(output,paste("Number of Observations Read:",nrow(RegDataTemp[-1,])))
output <- c(output,paste("Number of Observations Used:",nrow(tmpModelScopeDf)))
output <- c(output,"\n\n")
output <- c(output,noquote(capture.output(write.csv(modelResult,stdout(),row.names = F,quote = F))))
output <- c(output,"\n\n")
output <- c(output,noquote(capture.output(write.csv(modelParameters,file = stdout(),row.names = F,quote = F))))
output <- c(output,"\n\n")
output <- as.data.frame(output,quote=F)
colnames(output) <- "output"
resultOutput <- list()
resultOutput[["modelDetails"]] <- cSplit(output,"output",sep = ",",type.convert = F)
resultOutput[["actPredData"]] <- getActualVsPredictedDf(tmpModelScopeDf,model)
return(resultOutput)
}
extractModelData <- function(model,modelScopeDummyTable, modelScopeDf, parametersDf,dummyModelScopeDf,modelResult){
if(any(grepl("Dummy",names(model$coefficients)))){
dummyModelIndex <- which(as.character(dummyModelScopeDf$Model_No) == modelResult[,2])
Period <- modelScopeDf[which(modelScopeDf$period >= dummyModelScopeDf[dummyModelIndex,"Start_Date"] & modelScopeDf$period <= dummyModelScopeDf[dummyModelIndex,"End_Date"]),"period"]
}else{
Period <- modelScopeDf$period
}
modelData <- cbind(Period,model$model)
return(modelData)
}
################################################################################
##################### OLS manual process Acquire ###########################
################################################################################
##################### Function related to Model Manager ########################
olsm_createModelManagerData <- function(olsm_SelectedVar){
df <- data.frame(
VariableName = olsm_SelectedVar,
#Variable type
Type = factor(
rep("Not in Model", times = length(olsm_SelectedVar)),
levels = c("DepVar", "Manual No Trans", "Outside No Trans","Fixed Var No Trans","Manual TOF", "Outside TOF","Fixed Var TOF","Not in Model")
),
#Variable transformation
Transformation = factor(
rep("Linear", times = length(olsm_SelectedVar)),
levels = c("Linear", "S-Curve","S-origin","Power","Testing")
),
#Decay type
Decay = factor(rep("Media", times = length(olsm_SelectedVar)),levels = c("Media", "Promo")),
#Lag minimum
LagMin = rep(as.integer(0), times = length(olsm_SelectedVar)),
#Lag maximum
LagMax = rep(as.integer(0), times = length(olsm_SelectedVar)),
#decay steps
DecaySteps = rep(as.integer(1), times = length(olsm_SelectedVar)),
#decay minimum
DecayMin = rep(as.numeric(1), times = length(olsm_SelectedVar)),
#decay maximum
DecayMax = rep(as.numeric(1), times = length(olsm_SelectedVar)),
#alpha steps
AlphaSteps = rep(as.integer(1), times = length(olsm_SelectedVar)),
#alpha minimum
AlphaMin = rep(as.numeric(0), times = length(olsm_SelectedVar)),
#alpha maximum
AlphaMax = rep(as.numeric(0), times = length(olsm_SelectedVar)),
#alpha minimum
BetaMin = rep(as.numeric(1), times = length(olsm_SelectedVar)),
#Series Multipler
BetaMultiplier = rep(as.integer(0), times = length(olsm_SelectedVar)),
#alpha steps
BetaSteps = rep(as.integer(1), times = length(olsm_SelectedVar)),
#Series maximum
SeriesMax = rep(as.numeric(1), times = length(olsm_SelectedVar)),
# Normalization
Normalization = factor(
rep("None", times = length(olsm_SelectedVar)),
levels = c("None", "Division","Subtraction")
),
# Min Max Adjustment
Min_Max_Adjustment = factor(
rep("None", times = length(olsm_SelectedVar)),
levels = c("None","Min","Max","Average")
),
# Fixed Coefficient
Fixed_Coefficient = rep(as.numeric(0), times = length(olsm_SelectedVar)),
# Combined Column
Combined_Column = rep(as.integer(0), times = length(olsm_SelectedVar)),
# Mixed Effect
Random_Effect = factor(
rep(0, times = length(olsm_SelectedVar)),
levels = c(0,1)
),
stringsAsFactors = F
)
return(df)
}
##################### Function related to Transformation #######################
createOlsmTransformation <- function(olsm_RegDataModelList, olsm_parametersDF,modelFeatureList){
olsm_RegDataModelDF <- olsm_RegDataModelList[[modelFeatureList$TransGeo]]
## condition to check Transformation, Media Decay or Promo Decay and call respective functions
callDecayfunctions <- function(i,olsm_parametersDF, df_laggedDT, decayMin){
# i<- 1
if(olsm_parametersDF[i,"Decay"] == "Media"){
#capturing Media Decay data
df_laggedDT <- olsmCalcMediaDecay(col = df_laggedDT,decay = decayMin)
}else if(olsm_parametersDF[i,"Decay"] == "Promo"){
#capturing Promo Decay data
df_laggedDT <- olsmCalcPromoDecay(df_laggedDT,decayMin)
}
return(df_laggedDT)
}
#Not in the model dropping
varsToBeDropped <- NULL
varsToBeDropped <- olsm_parametersDF$VariableName[olsm_parametersDF$Type == "Not in Model"]
if(length(varsToBeDropped)!=0){
tempData <- olsm_RegDataModelDF[,-which(colnames(olsm_RegDataModelDF) %in% varsToBeDropped)]
}else {
tempData <- olsm_RegDataModelDF
}
TransformedDf <- tempData[,which(names(tempData) %in% c("Geography","Period"))]
for(i in 1:nrow(olsm_parametersDF)){
# i =4
name <- as.character(olsm_parametersDF[i,"VariableName"])
olsm_varDetails <- as.list(olsm_parametersDF[i,])
olsm_varDetails[["elasticityFlag"]] <- modelFeatureList$elasticityFlag
olsm_varDetails[["elasticityValue"]] <- modelFeatureList$elasticityValue
olsm_varDetails[["elasticityL12Flag"]] <- modelFeatureList$elasticityL12Flag
# this will execute only for scurve and sorigin transformation for SeriesMax and ScurveVarMax
if(any(as.character(olsm_parametersDF[i,"Transformation"]) %in% c("S-Curve","Testing","S-origin")) & olsm_parametersDF[i,"Type"] != "Not in Model" ){
# Changes included to implement SeriesMax by geography or All.
if(modelFeatureList$ScurveSeriesMaxChoice == "All"){
olsm_varDetails[["SeriesMax"]] <- as.numeric(as.character(olsm_parametersDF$SeriesMax[i]))
}else if(modelFeatureList$ScurveSeriesMaxChoice == "Geo"){
seriesMaxDF <- modelFeatureList$ScurveSeriesMaxDF
olsm_varDetails[["SeriesMax"]] <- seriesMaxDF[seriesMaxDF$Geography == modelFeatureList$TransGeo & seriesMaxDF$VariableName == name,"SeriesMax"]
}
if(olsm_varDetails[["SeriesMax"]] == 0){
olsm_varDetails[["SeriesMax"]] = 0.01
}
# Taking raw data to calculate max which is used in s-curve formula.
# Changes included to implement variable Max from raw data by geography or All.
if(modelFeatureList$ScurveVarMaxChoice == "All"){
olsm_varDetails[["ScruveVarMax"]] <- max(rbindlist(olsm_RegDataModelList)[,..name],na.rm = T)
}else if(modelFeatureList$ScurveVarMaxChoice == "Geo"){
olsm_varDetails[["ScruveVarMax"]] <- max(tempData[, name],na.rm = T)
}
}
# S-Shaped_New --- Currently ME will not do any transformation for S-Shaped_New. Not implemented Yet.
if(olsm_parametersDF[i,"Type"] %in% c("DepVar","Fixed Var No Trans","Manual No Trans","Outside No Trans")){
transVec <- as.data.frame(tempData[,colnames(tempData) %in% olsm_parametersDF[i,"VariableName"]])
if(olsm_varDetails$elasticityFlag == TRUE){
transVec <- transVec + (transVec*as.numeric(olsm_varDetails$elasticityValue/100))
}
colnames(transVec) <- name
TransformedDf<-cbind(TransformedDf,transVec)
} else
if(olsm_parametersDF[i,"Type"] %in% c("Manual TOF","Fixed Var TOF")){
transVec <- as.data.frame(tempData[, name])
colnames(transVec) <- name
#apply lag over the transVec data
dfDT <- data.table(name = transVec)
names(dfDT) <- name
dfDTElastic <- dfDT
# increase the full data by elasticity value if elasticityFlag is true.
if(olsm_varDetails$elasticityFlag == TRUE){
dfDTElastic <- dfDTElastic + (dfDTElastic*as.numeric(olsm_varDetails$elasticityValue/100))
}
ModellingPeriodData <- NULL
# increase the last 12 months data by elasticity value if elasticityL12Flag is true.
if(olsm_varDetails$elasticityL12Flag == TRUE){
# Modelling Period
ModellingPeriodData <- zoo::as.yearmon(dmy(modelFeatureList$modellingPeriod))
# Af period
tempL12 <- data.frame(Period = zoo::as.yearmon(dmy(tempData[,"Period"])),dfDT)
ModellingPeriodData <- data.frame(Period = tempL12$Period[which(tempL12$Period %in% ModellingPeriodData)],stringsAsFactors = F)
tempL12[which(tempL12$Period %in% tail(unique(ModellingPeriodData$Period),n = 12)),name] <- tempL12[which(tempL12$Period %in% tail(unique(ModellingPeriodData$Period),n = 12)),name] + (tempL12[which(tempL12$Period %in% tail(unique(ModellingPeriodData$Period),n = 12)),name] * as.numeric(olsm_varDetails$elasticityValue/100))
dfDTElastic <- data.table(tempL12[,name])
colnames(dfDTElastic) <- name
}
dfDT <- dfDTElastic
df_laggedDT <- dfDT[,(name):=shift(dfDT[[name]],olsm_varDetails$LagMin,fill = 0,type = "lag")]
if(olsm_parametersDF[i,"Transformation"] == "S-Curve" | olsm_parametersDF[i,"Transformation"] == "Testing"){
if(modelFeatureList$adStockChoice == "AdStock First"){
df_laggedDT <- callDecayfunctions(i,olsm_parametersDF, df_laggedDT, olsm_varDetails$DecayMin)
if(olsm_varDetails$ScruveVarMax != 0){
set(x = df_laggedDT,j = name,value = (as.numeric(olsm_varDetails$BetaMin)/(10^10))^(as.numeric(olsm_varDetails$AlphaMin)^((as.numeric(df_laggedDT[[name]])/(olsm_varDetails$ScruveVarMax * olsm_varDetails$SeriesMax))*100)))
}else {
df_laggedDT[[name]] <- 0
}
}else if(modelFeatureList$adStockChoice == "AdStock Last"){
if(olsm_varDetails$ScruveVarMax != 0){
set(x = df_laggedDT,j = name,value = (as.numeric(olsm_varDetails$BetaMin)/(10^10))^(as.numeric(olsm_varDetails$AlphaMin)^((as.numeric(df_laggedDT[[name]])/(olsm_varDetails$ScruveVarMax * olsm_varDetails$SeriesMax))*100)))
}else {
df_laggedDT[[name]] <- 0
}
df_laggedDT <- callDecayfunctions(i,olsm_parametersDF, df_laggedDT, decayMin = olsm_varDetails$DecayMin)
}
# Updating Testing transformation value with cap of 1 incase it is greater than 1.
if(olsm_parametersDF$Transformation[i] == "Testing"){
df_laggedDT[df_laggedDT > 1] <- 1
}
TransformedDf<-cbind(TransformedDf,as.data.frame(df_laggedDT))
}else
if(olsm_parametersDF[i,"Transformation"] == "S-origin"){
if(modelFeatureList$adStockChoice == "AdStock First"){
df_laggedDT <- callDecayfunctions(i,olsm_parametersDF, df_laggedDT, olsm_varDetails$DecayMin)
if(olsm_varDetails$ScruveVarMax != 0){
set(x = df_laggedDT,j = name,value = ((as.numeric(olsm_varDetails$BetaMin)/(10^9))^(as.numeric(olsm_varDetails$AlphaMin)^((as.numeric(df_laggedDT[[name]])/( olsm_varDetails$ScruveVarMax * olsm_varDetails$SeriesMax))*100)) - (as.numeric(olsm_varDetails$BetaMin)/(10^9))))
}else {
df_laggedDT[[name]] <- 0
}
}else if(modelFeatureList$adStockChoice == "AdStock Last"){
if(olsm_varDetails$ScruveVarMax != 0){
set(x = df_laggedDT,j = name,value = ((as.numeric(olsm_varDetails$BetaMin)/(10^9))^(as.numeric(olsm_varDetails$AlphaMin)^((as.numeric(df_laggedDT[[name]])/( olsm_varDetails$ScruveVarMax * olsm_varDetails$SeriesMax))*100)) - (as.numeric(olsm_varDetails$BetaMin)/(10^9))))
}else {
df_laggedDT[[name]] <- 0
}
df_laggedDT <- callDecayfunctions(i,olsm_parametersDF, df_laggedDT, olsm_varDetails$DecayMin)
}
TransformedDf<-cbind(TransformedDf,as.data.frame(df_laggedDT))
}else
if (olsm_parametersDF[i,"Transformation"] == "Power"){
if(modelFeatureList$adStockChoice == "AdStock First"){
df_laggedDT <- callDecayfunctions(i,olsm_parametersDF, df_laggedDT,as.numeric(olsm_varDetails$DecayMin))
set(df_laggedDT,j = name,value=df_laggedDT[[name]]^as.numeric(olsm_varDetails$AlphaMin))
}else if(modelFeatureList$adStockChoice == "AdStock Last"){
set(df_laggedDT,j = name,value=df_laggedDT[[name]]^as.numeric(olsm_varDetails$AlphaMin))
df_laggedDT <- callDecayfunctions(i,olsm_parametersDF, df_laggedDT, as.numeric(olsm_varDetails$DecayMin))
}
TransformedDf<-cbind(TransformedDf,as.data.frame(df_laggedDT))
}else
if(olsm_parametersDF[i,"Transformation"] %in% c("Linear")){
df_laggedDT <- callDecayfunctions(i,olsm_parametersDF, df_laggedDT, olsm_varDetails$DecayMin)
TransformedDf<-cbind(TransformedDf,df_laggedDT)
}
} else
if(olsm_parametersDF[i,"Type"] == "Outside TOF"){
TransformedDf<-cbind(TransformedDf,createOlsmTransOutsideTOF(olsm_RegDataTemp = tempData, olsm_parametersDF,name,olsm_varDetails,modelFeatureList))
}
}
return(TransformedDf)
}
createOlsmTransOutsideTOF <- function(olsm_RegDataTemp, olsm_parametersDF, name, olsm_varDetails,modelFeatureList){
#tempolsm_varDetails <- olsm_varDetails
TransformedOutsideTOF_Df <- NULL
transVec <- olsm_RegDataTemp[,name]
transElastic <- transVec
# increase the full data elasticity value if elasticity flag is true.
if(olsm_varDetails$elasticityFlag == TRUE){
transElastic <- transVec + (transVec*as.numeric(olsm_varDetails$elasticityValue/100))
}
ModellingPeriodData <- NULL
# increase the last 12 months data by elasticity value if elasticityL12Flag is true.
if(olsm_varDetails$elasticityL12Flag == TRUE){
ModellingPeriodData <- zoo::as.yearmon(dmy(modelFeatureList$modellingPeriod))
tempL12 <- data.frame(Period = zoo::as.yearmon(dmy(olsm_RegDataTemp[,"Period"])),transElastic)
ModellingPeriodData <- data.frame(Period = tempL12$Period[which(tempL12$Period %in% ModellingPeriodData)],stringsAsFactors = F)
tempL12[which(tempL12$Period %in% tail(unique(ModellingPeriodData$Period),n = 12)),-1] <- tempL12[which(tempL12$Period %in% tail(unique(ModellingPeriodData$Period),n = 12)),-1] + (tempL12[which(tempL12$Period %in% tail(unique(ModellingPeriodData$Period),n = 12)),-1] * as.numeric(olsm_varDetails$elasticityValue/100))
transElastic <- data.frame(tempL12[,-1])
colnames(transElastic) <- names(transVec)
}
transVec <- as.data.frame(transElastic)
names(transVec) <- name
lagTrans <- olsmCreateLagSeries(name = name, df = transVec[,name], olsm_varDetails$LagMin, olsm_varDetails$LagMax)
if(as.character(olsm_varDetails$Transformation) == "S-Curve" | as.character(olsm_varDetails$Transformation) == "Testing"){
if(modelFeatureList$adStockChoice == "AdStock First"){
TransformedOutsideTOF_Df <- olsmdecayAlphaTrans(olsm_RegDataTemp,lagTrans,name, olsm_varDetails)
}else if (modelFeatureList$adStockChoice == "AdStock Last"){
TransformedOutsideTOF_Df <- olsmalphaDecayTrans(olsm_RegDataTemp,lagTrans,name, olsm_varDetails)
}
# Updating Testing transformation value with cap of 1 incase it is greater than 1.
if(as.character(olsm_varDetails$Transformation) == "Testing"){
TransformedOutsideTOF_Df[TransformedOutsideTOF_Df > 1] <- 1
}
}else
if(as.character(olsm_varDetails$Transformation) == "S-origin"){
if(modelFeatureList$adStockChoice == "AdStock First"){
TransformedOutsideTOF_Df <- olsmdecayAlphaSoriginTrans(olsm_RegDataTemp,lagTrans,name, olsm_varDetails)
}else if (modelFeatureList$adStockChoice == "AdStock Last"){
TransformedOutsideTOF_Df <- olsmalphaDecaySoriginTrans(olsm_RegDataTemp,lagTrans,name, olsm_varDetails)
}
}else
if(as.character(olsm_varDetails$Transformation) == "Power"){
if(modelFeatureList$adStockChoice == "AdStock First"){
TransformedOutsideTOF_Df <- olsmdecayPowerTrans(olsm_RegDataTemp,lagTrans,name, olsm_varDetails)
}else if (modelFeatureList$adStockChoice == "AdStock Last"){
TransformedOutsideTOF_Df <- olsmpowerDecayTrans(olsm_RegDataTemp,lagTrans,name, olsm_varDetails)
}
}else
if(as.character(olsm_varDetails$Transformation) == "Linear"){
TransformedOutsideTOF_Df<-olsmDecayTrans(olsm_RegDataTemp,lagTrans,name, olsm_varDetails)
}
return(as.data.frame(TransformedOutsideTOF_Df))
}
# Media Decay for Manual TOF
olsmCalcMediaDecay <- function(col,decay){
name <- colnames(col)
col = unlist(col,use.names = FALSE)
for(i in 1:length(col)){
if(i ==1){
col[i] <- as.numeric(col[i])
} else if(!is.na(col[i - 1])){
col[i] <- as.numeric(col[i])+ as.numeric(col[i - 1]*(1-decay))
}
}
col <- data.table(col,stringsAsFactors = FALSE)
colnames(col)<- name
return(col)
}
# Promo Decay for Manual TOF
olsmCalcPromoDecay <- function(col,decay){
name <- colnames(col)
col = unlist(col,use.names = FALSE)
for(i in 1:length(col)){
if(i ==1){
col[i] <- as.numeric(col[i])
} else if(!is.na(col[i - 1])){
col[i] <- ((decay*as.numeric(col[i]))+ (as.numeric(col[i - 1]*(1-decay))))
}
}
col <- data.table(col,stringsAsFactors = FALSE)
colnames(col)<- name
return(col)
}
olsmCreateLagSeries <- function(name, df, lagMin, lagMax){
lagTrans <- as.data.frame(replicate(as.numeric(as.character(df)), n = (as.numeric(as.character(lagMax))-as.numeric(as.character(lagMin)))+1),stringsAsFactors = F)
lagSeries <- as.numeric(as.numeric(as.character(lagMin)):as.numeric(as.character(lagMax)))
names(lagSeries) <- paste0(name,"_L",as.numeric(as.character(lagMin)):as.numeric(as.character(lagMax)))
names(lagTrans) <- names(lagSeries)
df_Lag <- as.data.table(lagTrans)
for(name in names(lagSeries)){
if(!is.na(lagSeries[name])) {
df_Lag[,(name):=shift(df_Lag[[name]],as.numeric(unname(lagSeries[name])),fill = 0,type = "lag")]
}
}
df_lagged <- as.data.frame(df_Lag)
return(df_lagged)
}
olsmgetAlpha <- function(df_lagged,varMax,alpha,beta,df_variable, seriesMax){
#Refresnce formula: (beta/(10^10))^(as.numeric(unname(alpha[name]))^((df_lagged[,name]/max(df_lagged[,name]))*100))
df_laggedDT <- as.data.table(df_lagged)
beta <- beta[complete.cases(beta)]
for(name in names(beta)){
if(varMax != 0){
set(x = df_laggedDT,j = name,value = (as.numeric(unname(beta[name]))/(10^10))^((as.numeric(unname(alpha))^((as.numeric(df_laggedDT[[name]])/(varMax * seriesMax))*100))))
} else{
df_laggedDT[[name]] <- 0
}
}
df <- as.data.frame(df_laggedDT)
return(df)
}
# alpha for S-origin type
olsmgetS_OriginAlpha <- function(df_lagged,varMax,alpha,beta,df_variable, seriesMax){
#Refresnce formula: (beta/(10^10))^(as.numeric(unname(alpha[name]))^((df_lagged[,name]/max(df_lagged[,name]))*100))- - (beta/(10^9))
df_laggedDT <- as.data.table(df_lagged)
beta <- beta[complete.cases(beta)]
for(name in names(beta)){
if(varMax != 0){
set(x = df_laggedDT,j = name,value = ((as.numeric(unname(beta[name]))/(10^9))^(as.numeric(unname(alpha))^((as.numeric(df_laggedDT[[name]])/(varMax*seriesMax))*100)) - (as.numeric(unname(beta[name]))/(10^9))))
} else{
df_laggedDT[[name]] <- 0
}
}
df <- as.data.frame(df_laggedDT)
return(df)
}
#power
olsmgetPower <- function(df,powerRange){
dfPowerDt <- as.data.table(df)
powerSeries <- powerRange[complete.cases(powerRange)]
for(name in names(powerSeries)) {
set(dfPowerDt,j = name,value=dfPowerDt[[name]]^as.numeric(unname(powerSeries[name])))
}
df <- as.list.data.frame(dfPowerDt)
return(df)
}
#Media Decay
olsmgetMediaDecay <- function(df,decay){
df_DecayDT <- as.data.table(df)
decay <- decay[complete.cases(decay)]
calcDecay <- function(col,decay){
for(i in 1:length(col)){
if(i ==1){
col[i] <- as.numeric(col[i])
} else if(!is.na(col[i - 1])){
col[i] <- as.numeric(col[i])+ as.numeric(col[i - 1]*(1-decay))
}
}
return(col)
}
for(name in names(decay)) {
set(df_DecayDT,j = name,value=calcDecay(df_DecayDT[[name]],as.numeric(unname(decay[name]))))
}
df <- as.data.frame(df_DecayDT)
return(df)
}
#promo Decay
olsmgetPromoDecay <- function(df,decay){
df_DecayDT <- as.data.table(df)
decay <- decay[complete.cases(decay)]
calcDecay <- function(col,decay){
for(i in 1:length(col)){
if(i ==1){
col[i] <- as.numeric(col[i])
} else if(!is.na(col[i - 1])){
col[i] <- ((decay*as.numeric(col[i]))+(as.numeric(col[i - 1]*(1-decay))))
}
}
return(col)
}
for(name in names(decay)) {
set(df_DecayDT,j = name,value=calcDecay(df_DecayDT[[name]],as.numeric(unname(decay[name]))))
}
df <- as.data.frame(df_DecayDT)
return(df)
}
#capturing Scurve Decay data
olsmalphaDecayTrans <- function(olsm_RegDataTemp,lagTrans,name, olsm_varDetails){
alphaTransformedList <- list()
# Alpha
for(lagName in names(lagTrans)){
#lagName <- names(lagTrans)[2]
if(as.numeric(as.character(olsm_varDetails$AlphaSteps)) == 0 | as.numeric(as.character(olsm_varDetails$AlphaSteps)) == 1){
alphaSteps <- 1
AlphaSeries <- as.numeric(as.character(olsm_varDetails$AlphaMin))
}else {
alphaSteps <- (as.numeric(as.character(olsm_varDetails$AlphaMax))-as.numeric(as.character(olsm_varDetails$AlphaMin)))/(as.numeric(as.character(olsm_varDetails$AlphaSteps))-1)
AlphaSeries <- as.numeric(seq(from=as.numeric(as.character(olsm_varDetails$AlphaMin)),to=as.numeric(as.character(olsm_varDetails$AlphaMax)),by=alphaSteps))
}
lagTransAlpha <- as.data.frame(replicate(as.numeric(as.character(lagTrans[,lagName])),n = length(AlphaSeries)),stringsAsFactors = F)
names(AlphaSeries) <- paste0(lagName,"_A",seq(from=as.numeric(as.character(olsm_varDetails$AlphaMin)),to=as.numeric(as.character(olsm_varDetails$AlphaMax)),by=alphaSteps))
names(lagTransAlpha) <- names(AlphaSeries)
# Beta
for (alphaName in names(lagTransAlpha)) {
#alphaName <- names(lagTransAlpha)[1]
if(olsm_varDetails$BetaSteps==0){
BetaSeries <- olsm_varDetails$BetaMin
}else {
if(olsm_varDetails$BetaMultiplier==0){
BetaSeries <- olsm_varDetails$BetaMin
}else{
BetaSeries <- rep(olsm_varDetails$BetaMin*(olsm_varDetails$BetaMultiplier^(0:(olsm_varDetails$BetaSteps-1))))
}
}
lagTransAlphaBeta <- as.data.frame(replicate(as.numeric(as.character(lagTransAlpha[,alphaName])),n = length(BetaSeries)),stringsAsFactors = F)
names(BetaSeries) <- paste0(alphaName,"_B",BetaSeries)
names(lagTransAlphaBeta) <- names(BetaSeries)
lagTransAlphaBeta <- olsmgetAlpha(lagTransAlphaBeta,olsm_varDetails$ScruveVarMax,AlphaSeries[[alphaName]],BetaSeries,name, olsm_varDetails$SeriesMax)
#Decay
for(betaName in names(lagTransAlphaBeta)){
if(as.numeric(as.character(olsm_varDetails$DecaySteps)) == 0 | as.numeric(as.character(olsm_varDetails$DecaySteps)) == 1){
decaySteps <- 1
decaySeries <- as.numeric(as.character(olsm_varDetails$DecayMin))
}else {
decaySteps <- (as.numeric(as.character(olsm_varDetails$DecayMax))-as.numeric(as.character(olsm_varDetails$DecayMin)))/(as.numeric(as.character(olsm_varDetails$DecaySteps))-1)
decaySeries <- as.numeric(seq(from=as.numeric(as.character(olsm_varDetails$DecayMin)),to=as.numeric(as.character(olsm_varDetails$DecayMax)),by=decaySteps))
}
lagTransAlphaBetaDecay <- as.data.frame(replicate(as.numeric(as.character(lagTransAlphaBeta[,betaName])),n = length(decaySeries)),stringsAsFactors = F)
names(decaySeries) <- paste0(betaName,"_D",seq(from=as.numeric(as.character(olsm_varDetails$DecayMin)),to=as.numeric(as.character(olsm_varDetails$DecayMax)),by=decaySteps))
names(lagTransAlphaBetaDecay) <- names(decaySeries)
## condition to check if transformation is Media Decay or Promo Decay .
if(olsm_varDetails$Decay == "Media"){
lagTransAlphaBetaDecay <- olsmgetMediaDecay(lagTransAlphaBetaDecay,decaySeries)
}else if(olsm_varDetails$Decay == "Promo"){
lagTransAlphaBetaDecay <- olsmgetPromoDecay(lagTransAlphaBetaDecay,decaySeries)
}
alphaTransformedList <- c(alphaTransformedList,lagTransAlphaBetaDecay)
}
}
}
return(as.data.frame.list(alphaTransformedList))
}
#capturing Decay Scurve data
olsmdecayAlphaTrans <- function(olsm_RegDataTemp,lagTrans,name, olsm_varDetails){
alphaTransformedList <- list()
# Decay
for(lagName in names(lagTrans)){
if(as.numeric(as.character(olsm_varDetails$DecaySteps)) == 0 | as.numeric(as.character(olsm_varDetails$DecaySteps)) == 1){
decaySteps <- 1
decaySeries <- as.numeric(as.character(olsm_varDetails$DecayMin))
}else {
decaySteps <- (as.numeric(as.character(olsm_varDetails$DecayMax))-as.numeric(as.character(olsm_varDetails$DecayMin)))/(as.numeric(as.character(olsm_varDetails$DecaySteps))-1)
decaySeries <- as.numeric(seq(from=as.numeric(as.character(olsm_varDetails$DecayMin)),to=as.numeric(as.character(olsm_varDetails$DecayMax)),by=decaySteps))
}
lagTransDecay <- as.data.frame(replicate(as.numeric(as.character(lagTrans[,lagName])), n = length(decaySeries)),stringsAsFactors = F)
names(decaySeries) <- paste0(lagName,"_D",seq(from=as.numeric(as.character(olsm_varDetails$DecayMin)),to=as.numeric(as.character(olsm_varDetails$DecayMax)),by=decaySteps))
names(lagTransDecay) <- names(decaySeries)
## condition to check if Transformation is Media Decay and Promo Decay.
if(olsm_varDetails$Decay == "Media"){
lagTransDecay <- olsmgetMediaDecay(lagTransDecay,decaySeries)
}else if(olsm_varDetails$Decay == "Promo"){
lagTransDecay <- olsmgetPromoDecay(lagTransDecay,decaySeries)
}
#Alpha
for(alphaName in names(lagTransDecay)){
if(as.numeric(as.character(olsm_varDetails$AlphaSteps)) == 0 | as.numeric(as.character(olsm_varDetails$AlphaSteps)) == 1){
alphaSteps <- 1
AlphaSeries <- as.numeric(as.character(olsm_varDetails$AlphaMin))
}else {
alphaSteps <- (as.numeric(as.character(olsm_varDetails$AlphaMax))-as.numeric(as.character(olsm_varDetails$AlphaMin)))/(as.numeric(as.character(olsm_varDetails$AlphaSteps))-1)
AlphaSeries <- as.numeric(seq(from=as.numeric(as.character(olsm_varDetails$AlphaMin)),to=as.numeric(as.character(olsm_varDetails$AlphaMax)),by=alphaSteps))
}
lagTransDecayAlpha <- as.data.frame(replicate(as.numeric(as.character(lagTransDecay[,alphaName])),n = length(AlphaSeries)),stringsAsFactors = F)
names(AlphaSeries) <- paste0(alphaName,"_A",seq(from=as.numeric(as.character(olsm_varDetails$AlphaMin)),to=as.numeric(as.character(olsm_varDetails$AlphaMax)),by=alphaSteps))
names(lagTransDecayAlpha) <- names(AlphaSeries)
# Beta
for (betaName in names(lagTransDecayAlpha)) {
if(olsm_varDetails$BetaSteps==0){
BetaSeries <- olsm_varDetails$BetaMin
}else {
if(olsm_varDetails$BetaMultiplier==0){
BetaSeries <- olsm_varDetails$BetaMin
}else{
BetaSeries <- rep(olsm_varDetails$BetaMin*(olsm_varDetails$BetaMultiplier^(0:(olsm_varDetails$BetaSteps-1))))
}
}
lagTransDecayAlphaBeta <- as.data.frame(replicate(as.numeric(as.character(lagTransDecayAlpha[,betaName])),n = length(BetaSeries)),stringsAsFactors = F)
names(BetaSeries) <- paste0(betaName,"_B",BetaSeries)
names(lagTransDecayAlphaBeta) <- names(BetaSeries)
lagTransDecayAlphaBeta <- olsmgetAlpha(lagTransDecayAlphaBeta,olsm_varDetails$ScruveVarMax,AlphaSeries[[betaName]],BetaSeries,name, olsm_varDetails$SeriesMax)
alphaTransformedList <- c(alphaTransformedList,lagTransDecayAlphaBeta)
}
}
}
return(as.data.frame.list(alphaTransformedList))
}
# capturing S-origin Decay data
olsmalphaDecaySoriginTrans <- function(olsm_RegDataTemp,lagTrans,name, olsm_varDetails){
alphaTransformedList <- list()
# Alpha
for(lagName in names(lagTrans)){
#lagName <- names(lagTrans)[3]
if(as.numeric(as.character(olsm_varDetails$AlphaSteps)) == 0 | as.numeric(as.character(olsm_varDetails$AlphaSteps)) == 1){
alphaSteps <- 1
AlphaSeries <- as.numeric(as.character(olsm_varDetails$AlphaMin))
}else {
alphaSteps <- (as.numeric(as.character(olsm_varDetails$AlphaMax))-as.numeric(as.character(olsm_varDetails$AlphaMin)))/(as.numeric(as.character(olsm_varDetails$AlphaSteps))-1)
AlphaSeries <- as.numeric(seq(from=as.numeric(as.character(olsm_varDetails$AlphaMin)),to=as.numeric(as.character(olsm_varDetails$AlphaMax)),by=alphaSteps))
}
lagTransAlpha <- as.data.frame(replicate(as.numeric(as.character(lagTrans[,lagName])),n = length(AlphaSeries)),stringsAsFactors = F)
names(AlphaSeries) <- paste0(lagName,"_A",seq(from=as.numeric(as.character(olsm_varDetails$AlphaMin)),to=as.numeric(as.character(olsm_varDetails$AlphaMax)),by=alphaSteps))
names(lagTransAlpha) <- names(AlphaSeries)
# Beta
for (alphaName in names(lagTransAlpha)) {
#alphaName <- names(lagTransAlpha)[1]
if(olsm_varDetails$BetaSteps==0){
BetaSeries <- olsm_varDetails$BetaMin
}else {
if(olsm_varDetails$BetaMultiplier==0){
BetaSeries <- olsm_varDetails$BetaMin
}else{
BetaSeries <- rep(olsm_varDetails$BetaMin*(olsm_varDetails$BetaMultiplier^(0:(olsm_varDetails$BetaSteps-1))))
}
}
lagTransAlphaBeta <- as.data.frame(replicate(as.numeric(as.character(lagTransAlpha[,alphaName])),n = length(BetaSeries)),stringsAsFactors = F)
names(BetaSeries) <- paste0(alphaName,"_B",BetaSeries)
names(lagTransAlphaBeta) <- names(BetaSeries)
lagTransAlphaBeta <- olsmgetS_OriginAlpha(lagTransAlphaBeta,olsm_varDetails$ScruveVarMax,AlphaSeries[[alphaName]],BetaSeries,name, olsm_varDetails$SeriesMax)
#Decay
for(betaName in names(lagTransAlphaBeta)){
if(as.numeric(as.character(olsm_varDetails$DecaySteps)) == 0 | as.numeric(as.character(olsm_varDetails$DecaySteps)) == 1){
decaySteps <- 1
decaySeries <- as.numeric(as.character(olsm_varDetails$DecayMin))
}else {
decaySteps <- (as.numeric(as.character(olsm_varDetails$DecayMax))-as.numeric(as.character(olsm_varDetails$DecayMin)))/(as.numeric(as.character(olsm_varDetails$DecaySteps))-1)
decaySeries <- as.numeric(seq(from=as.numeric(as.character(olsm_varDetails$DecayMin)),to=as.numeric(as.character(olsm_varDetails$DecayMax)),by=decaySteps))
}
lagTransAlphaBetaDecay <- as.data.frame(replicate(as.numeric(as.character(lagTransAlphaBeta[,betaName])),n = length(decaySeries)),stringsAsFactors = F)
names(decaySeries) <- paste0(betaName,"_D",seq(from=as.numeric(as.character(olsm_varDetails$DecayMin)),to=as.numeric(as.character(olsm_varDetails$DecayMax)),by=decaySteps))
names(lagTransAlphaBetaDecay) <- names(decaySeries)
## condition to check if Transformation is Media Decay or Promo Decay .
if(olsm_varDetails$Decay == "Media"){
lagTransAlphaBetaDecay <- olsmgetMediaDecay(lagTransAlphaBetaDecay,decaySeries)
}else if(olsm_varDetails$Decay == "Promo"){
lagTransAlphaBetaDecay <- olsmgetPromoDecay(lagTransAlphaBetaDecay,decaySeries)
}
alphaTransformedList <- c(alphaTransformedList,lagTransAlphaBetaDecay)
}
}
}
return(as.data.frame.list(alphaTransformedList))
}
# capturing Decay Alpha data for S-origin
olsmdecayAlphaSoriginTrans <- function(olsm_RegDataTemp,lagTrans,name, olsm_varDetails){
alphaTransformedList <- list()
# Decay
for(lagName in names(lagTrans)){
if(as.numeric(as.character(olsm_varDetails$DecaySteps)) == 0 | as.numeric(as.character(olsm_varDetails$DecaySteps)) == 1){
decaySteps <- 1
decaySeries <- as.numeric(as.character(olsm_varDetails$DecayMin))
}else {
decaySteps <- (as.numeric(as.character(olsm_varDetails$DecayMax))-as.numeric(as.character(olsm_varDetails$DecayMin)))/(as.numeric(as.character(olsm_varDetails$DecaySteps))-1)
decaySeries <- as.numeric(seq(from=as.numeric(as.character(olsm_varDetails$DecayMin)),to=as.numeric(as.character(olsm_varDetails$DecayMax)),by=decaySteps))
}
lagTransDecay <- as.data.frame(replicate(as.numeric(as.character(lagTrans[,lagName])), n = length(decaySeries)),stringsAsFactors = F)
names(decaySeries) <- paste0(lagName,"_D",seq(from=as.numeric(as.character(olsm_varDetails$DecayMin)),to=as.numeric(as.character(olsm_varDetails$DecayMax)),by=decaySteps))
names(lagTransDecay) <- names(decaySeries)
## Condition to Check if Transformation is Media Decay or Promo Decay .
if(olsm_varDetails$Decay == "Media"){
lagTransDecay <- olsmgetMediaDecay(lagTransDecay,decaySeries)
}else if(olsm_varDetails$Decay == "Promo"){
lagTransDecay <- olsmgetPromoDecay(lagTransDecay,decaySeries)
}
#Alpha
for(alphaName in names(lagTransDecay)){
if(as.numeric(as.character(olsm_varDetails$AlphaSteps)) == 0 | as.numeric(as.character(olsm_varDetails$AlphaSteps)) == 1){
alphaSteps <- 1
AlphaSeries <- as.numeric(as.character(olsm_varDetails$AlphaMin))
}else {
alphaSteps <- (as.numeric(as.character(olsm_varDetails$AlphaMax))-as.numeric(as.character(olsm_varDetails$AlphaMin)))/(as.numeric(as.character(olsm_varDetails$AlphaSteps))-1)
AlphaSeries <- as.numeric(seq(from=as.numeric(as.character(olsm_varDetails$AlphaMin)),to=as.numeric(as.character(olsm_varDetails$AlphaMax)),by=alphaSteps))
}
lagTransDecayAlpha <- as.data.frame(replicate(as.numeric(as.character(lagTransDecay[,alphaName])),n = length(AlphaSeries)),stringsAsFactors = F)
names(AlphaSeries) <- paste0(alphaName,"_A",seq(from=as.numeric(as.character(olsm_varDetails$AlphaMin)),to=as.numeric(as.character(olsm_varDetails$AlphaMax)),by=alphaSteps))
names(lagTransDecayAlpha) <- names(AlphaSeries)
# Beta
for (betaName in names(lagTransDecayAlpha)) {
if(olsm_varDetails$BetaSteps==0){
BetaSeries <- olsm_varDetails$BetaMin
}else {
if(olsm_varDetails$BetaMultiplier==0){
BetaSeries <- olsm_varDetails$BetaMin
}else{
BetaSeries <- rep(olsm_varDetails$BetaMin*(olsm_varDetails$BetaMultiplier^(0:(olsm_varDetails$BetaSteps-1))))
}
}
lagTransDecayAlphaBeta <- as.data.frame(replicate(as.numeric(as.character(lagTransDecayAlpha[,betaName])),n = length(BetaSeries)),stringsAsFactors = F)
names(BetaSeries) <- paste0(betaName,"_B",BetaSeries)
names(lagTransDecayAlphaBeta) <- names(BetaSeries)
lagTransDecayAlphaBeta <- olsmgetS_OriginAlpha(lagTransDecayAlphaBeta,olsm_varDetails$ScruveVarMax,AlphaSeries[[betaName]],BetaSeries,name,olsm_varDetails$SeriesMax)
alphaTransformedList <- c(alphaTransformedList,lagTransDecayAlphaBeta)
}
}
}
return(as.data.frame.list(alphaTransformedList))
}
#capturing Power Decay data
olsmpowerDecayTrans <- function(olsm_RegDataTemp,lagTrans,name, olsm_varDetails){
powerTransformedList <- list()
# Power
for(lagName in names(lagTrans)){
#lagName <- names(lagTrans)[1]
if(as.numeric(as.character(olsm_varDetails$AlphaSteps)) == 0 | as.numeric(as.character(olsm_varDetails$AlphaSteps)) == 1){
alphaSteps <- 1
AlphaSeries <- as.numeric(as.character(olsm_varDetails$AlphaMin))
}else {
alphaSteps <- (as.numeric(as.character(olsm_varDetails$AlphaMax))-as.numeric(as.character(olsm_varDetails$AlphaMin)))/(as.numeric(as.character(olsm_varDetails$AlphaSteps))-1)
AlphaSeries <- as.numeric(seq(from=as.numeric(as.character(olsm_varDetails$AlphaMin)),to=as.numeric(as.character(olsm_varDetails$AlphaMax)),by=alphaSteps))
}
lagTransPower <- as.data.frame(replicate(as.numeric(as.character(lagTrans[,lagName])),n = length(AlphaSeries)),stringsAsFactors = F)
names(AlphaSeries) <- paste0(lagName,"_P",seq(from=as.numeric(as.character(olsm_varDetails$AlphaMin)),to=as.numeric(as.character(olsm_varDetails$AlphaMax)),by=alphaSteps))
names(lagTransPower) <- names(AlphaSeries)
lagTransPower <- as.data.frame.list(olsmgetPower(lagTransPower,AlphaSeries))
# Decay
for(powerName in names(lagTransPower)){
# powerName <-names(lagTransPower)[1]
if(as.numeric(as.character(olsm_varDetails$DecaySteps)) == 0 | as.numeric(as.character(olsm_varDetails$DecaySteps)) == 1){
decaySteps <- 1
decaySeries <- as.numeric(as.character(olsm_varDetails$DecayMin))
}else {
decaySteps <- (as.numeric(as.character(olsm_varDetails$DecayMax))-as.numeric(as.character(olsm_varDetails$DecayMin)))/(as.numeric(as.character(olsm_varDetails$DecaySteps))-1)
decaySeries <- as.numeric(seq(from=as.numeric(as.character(olsm_varDetails$DecayMin)),to=as.numeric(as.character(olsm_varDetails$DecayMax)),by=decaySteps))
}
lagTransPowerDecay <- as.data.frame(replicate(as.numeric(as.character(lagTransPower[,powerName])), n = length(decaySeries)),stringsAsFactors = F)
names(decaySeries) <- paste0(powerName,"_D",seq(from=as.numeric(as.character(olsm_varDetails$DecayMin)),to=as.numeric(as.character(olsm_varDetails$DecayMax)),by=decaySteps))
names(lagTransPowerDecay) <- names(decaySeries)
## check for Transformation type, if it is Media Decay or Promo Decay.
if(olsm_varDetails$Decay == "Media"){
lagTransPowerDecay <- olsmgetMediaDecay(lagTransPowerDecay,decaySeries)
}else if(olsm_varDetails$Decay == "Promo"){
lagTransPowerDecay <- olsmgetPromoDecay(lagTransPowerDecay,decaySeries)
}
powerTransformedList <- c(powerTransformedList,lagTransPowerDecay)
}
}
return(as.data.frame.list(powerTransformedList))
}
#capturing Decay Power data
olsmdecayPowerTrans <- function(olsm_RegDataTemp,lagTrans,name, olsm_varDetails){
powerTransformedList <- list()
#Decay
for(lagName in names(lagTrans)){
if(as.numeric(as.character(olsm_varDetails$DecaySteps)) == 0 | as.numeric(as.character(olsm_varDetails$DecaySteps)) == 1){
decaySteps <- 1
decaySeries <- as.numeric(as.character(olsm_varDetails$DecayMin))
}else {
decaySteps <- (as.numeric(as.character(olsm_varDetails$DecayMax))-as.numeric(as.character(olsm_varDetails$DecayMin)))/(as.numeric(as.character(olsm_varDetails$DecaySteps))-1)
decaySeries <- as.numeric(seq(from=as.numeric(as.character(olsm_varDetails$DecayMin)),to=as.numeric(as.character(olsm_varDetails$DecayMax)),by=decaySteps))
}
lagTransDecay <- as.data.frame(replicate(as.numeric(as.character(lagTrans[,lagName])), n = length(decaySeries)),stringsAsFactors = F)
names(decaySeries) <- paste0(lagName,"_D",seq(from=as.numeric(as.character(olsm_varDetails$DecayMin)),to=as.numeric(as.character(olsm_varDetails$DecayMax)),by=decaySteps))
names(lagTransDecay) <- names(decaySeries)
## condition to check if transformation is Media Decay or Promo Decay
if(olsm_varDetails$Decay == "Media"){
lagTransDecay <- olsmgetMediaDecay(lagTransDecay,decaySeries)
}else if(olsm_varDetails$Decay == "Promo"){
lagTransDecay <- olsmgetPromoDecay(lagTransDecay,decaySeries)
}
#Power
for(decayName in names(lagTransDecay)){
if(as.numeric(as.character(olsm_varDetails$AlphaSteps)) == 0 | as.numeric(as.character(olsm_varDetails$AlphaSteps)) == 1){
alphaSteps <- 1
AlphaSeries <- as.numeric(as.character(olsm_varDetails$AlphaMin))
}else {
alphaSteps <- (as.numeric(as.character(olsm_varDetails$AlphaMax))-as.numeric(as.character(olsm_varDetails$AlphaMin)))/(as.numeric(as.character(olsm_varDetails$AlphaSteps))-1)
AlphaSeries <- as.numeric(seq(from=as.numeric(as.character(olsm_varDetails$AlphaMin)),to=as.numeric(as.character(olsm_varDetails$AlphaMax)),by=alphaSteps))
}
lagTransDecayPower <- as.data.frame(replicate(as.numeric(as.character(lagTransDecay[,decayName])),n = length(AlphaSeries)),stringsAsFactors = F)
names(AlphaSeries) <- paste0(decayName,"_P",seq(from=as.numeric(as.character(olsm_varDetails$AlphaMin)),to=as.numeric(as.character(olsm_varDetails$AlphaMax)),by=alphaSteps))
names(lagTransDecayPower) <- names(AlphaSeries)
lagTransDecayPower <- as.data.frame.list(olsmgetPower(lagTransDecayPower,AlphaSeries))
powerTransformedList <- c(powerTransformedList,lagTransDecayPower)
}
}
return(as.data.frame.list(powerTransformedList))
}
#capturing Decay data
olsmDecayTrans <- function(olsm_RegDataTemp,lagTrans,name, olsm_varDetails){
decayTransformedList <- list()
# Decay
if(as.numeric(as.character(olsm_varDetails$DecaySteps)) == 0 | as.numeric(as.character(olsm_varDetails$DecaySteps)) == 1){
decaySteps <- 1
decaySeries <- as.numeric(as.character(olsm_varDetails$DecayMin))
}else {
decaySteps <- (as.numeric(as.character(olsm_varDetails$DecayMax))-as.numeric(as.character(olsm_varDetails$DecayMin)))/(as.numeric(as.character(olsm_varDetails$DecaySteps))-1)
decaySeries <- as.numeric(seq(from=as.numeric(as.character(olsm_varDetails$DecayMin)),to=as.numeric(as.character(olsm_varDetails$DecayMax)),by=decaySteps))
}
for(lagName in names(lagTrans)){
lagTransDecay <- as.data.frame(replicate(as.numeric(as.character(lagTrans[,lagName])), n = length(decaySeries)),stringsAsFactors = F)
names(decaySeries) <- paste0(lagName,"_D",seq(from=as.numeric(as.character(olsm_varDetails$DecayMin)),to=as.numeric(as.character(olsm_varDetails$DecayMax)),by=decaySteps))
names(lagTransDecay) <- names(decaySeries)
## check for transformation, if it is Media Decay or Promo Decay.
if(olsm_varDetails$Decay == "Media"){
lagTransDecay <- olsmgetMediaDecay(lagTransDecay,decaySeries)
}else if(olsm_varDetails$Decay == "Promo"){
lagTransDecay <- olsmgetPromoDecay(lagTransDecay,decaySeries)
}
decayTransformedList <- c(decayTransformedList,lagTransDecay)
}
return(as.data.frame.list(decayTransformedList))
}
createOlsmNormalization <- function(olsmFinalRegDf,olsm_parametersDF){
addGeo <- FALSE
if(any(names(olsmFinalRegDf) %in% "Geography")){
addGeo <- TRUE
geoName <- olsmFinalRegDf$Geography
olsmFinalRegDf <- olsmFinalRegDf[,which(names(olsmFinalRegDf)!= "Geography")]
}
for(name in names(olsmFinalRegDf)[-1]){
if(as.character(olsm_parametersDF$Normalization[which(grepl(gsub("_L+[0-9].*","",name), olsm_parametersDF$VariableName))]) == "Division"){
#division
if(mean(olsmFinalRegDf[,which(names(olsmFinalRegDf) == name)],na.rm = T)==0){
# divide by zero case handle
olsmFinalRegDf[name]<-olsmFinalRegDf[name]
}else{
olsmFinalRegDf[,which(names(olsmFinalRegDf) == name)] <- olsmFinalRegDf[,which(names(olsmFinalRegDf) == name)]/mean(olsmFinalRegDf[,which(names(olsmFinalRegDf) == name)],na.rm = T)
}
}else if(as.character(olsm_parametersDF$Normalization[which(grepl(gsub("_L+[0-9].*","",name), olsm_parametersDF$VariableName))]) == "Subtraction"){
#Subtraction
olsmFinalRegDf[,which(names(olsmFinalRegDf) == name)] <- olsmFinalRegDf[,which(names(olsmFinalRegDf) == name)]-mean(olsmFinalRegDf[,which(names(olsmFinalRegDf) == name)],na.rm = T)
}else{
# None
olsmFinalRegDf[name]<-olsmFinalRegDf[name]
}
}
if(addGeo == TRUE){
olsmFinalRegDf <- cbind("Geography" = geoName, olsmFinalRegDf)
return(olsmFinalRegDf)
}else{
return(olsmFinalRegDf[,-1])
}
}
##################### Function related to Regression & Model Result ############
olsmGetFixedEffectDF <- function(olsmFinalRegDf, olsm_parametersDF, modelFeatureList){
olsmExcludeFixedEst <- function(df,depVar, fixedVarCoef){
for (i in 1:nrow(fixedVarCoef)) {
df[,which(names(df) %in% fixedVarCoef$VariableName[i])] <- df[,which(names(df) %in% fixedVarCoef$VariableName[i])]* fixedVarCoef$Fixed_Coefficient[i]
}
fixedVar <- fixedVarCoef$VariableName
if(length(fixedVar)==1){
df[,which(names(df)==depVar)] <- df[,which(names(df)==depVar)]- df[,which(names(df) %in% fixedVar)]
}else{
df[,which(names(df)==depVar)] <- df[,which(names(df)==depVar)]- rowSums(df[,which(names(df) %in% fixedVar)])
}
# removing fixed var from olsmFinalFixedRegDf
df <- df[,-which(names(df) %in% fixedVar)]
return(df)
}
df <- olsmFinalRegDf # copying the olsmFinalRegDf for making fixed df file.
fixedVar <- olsm_parametersDF[grepl("Fixed Var",olsm_parametersDF$Type),1]
if(length(fixedVar)!=0){
depVar <- olsm_parametersDF[grepl("DepVar",olsm_parametersDF$Type),1]
# subtracting fixed df value from dependent variable.
if(modelFeatureList$FixedVarChoice == "All"){
fixedVarCoef <- olsm_parametersDF[grepl("Fixed Var",olsm_parametersDF$Type),c("VariableName","Fixed_Coefficient")]
nonFixedFinalRegDF <- olsmExcludeFixedEst(df, depVar, fixedVarCoef)
}else if(modelFeatureList$FixedVarChoice == "Geo"){
fixedVarCoefByGeo <- split(modelFeatureList$geoFixedEstimatesDF,modelFeatureList$geoFixedEstimatesDF$Geography)
geoData <- split(df, df$Geography)
nonFixedFinalRegDF <- data.frame(rbindlist(lapply(names(fixedVarCoefByGeo), function(x){return(olsmExcludeFixedEst(geoData[[x]], depVar, fixedVarCoefByGeo[[x]]))})),row.names = NULL)
}
return(nonFixedFinalRegDF)
}else {
return(df)
}
}
olsmCreateCombinedColumn <- function(df,combinedCol){
combinedColumns <- combinedCol[which(combinedCol$Combined_Column != 0),]
if(nrow(combinedColumns)!=0){
columnsToBeDeleted <- combinedColumns$VariableName
combinedColumnsList <<- split(combinedColumns$VariableName,combinedColumns$Combined_Column)
combinedColumnsList <<- setNames(combinedColumnsList,paste0("Combined_",names(combinedColumnsList)))
combinedColumnsdf <- do.call("cbind",lapply(combinedColumnsList,function(x){data.frame(rowSums(df[,x]))}))
colnames(combinedColumnsdf) <- names(combinedColumnsList)
finalCombined <- cbind(df,combinedColumnsdf)
finalCombined <- finalCombined[ , !(names(finalCombined) %in% columnsToBeDeleted)]
return(finalCombined)
}else {
return(df)
}
}
olsmExtractOutsideVar <- function(fit, modelManager){
olsm_parametersDF <- modelManager
modelVar <- names(fit$coefficients)
outsideVar <- olsm_parametersDF$VariableName[grep("Outside",olsm_parametersDF$Type)]
if(any(gsub("_L+[0-9].*","", modelVar) %in% outsideVar)){
return(as.character(modelVar[gsub("_L+[0-9].*","", modelVar) %in% outsideVar]))
}else{
return("NO Outside (Base Model)")
}
}
olsmGetOutsideTstatVIF <- function(model, outsideVar){
tstat <- 0
vifValue <- 0
if(any(gsub("_L+[0-9].*","",tidy(model)[,"term"]) %in% outsideVar)){
test <- as.data.frame(t(tidy(model)[-1]))[3,]
colnames(test) = tidy(model)[,1]
tstat <- round(test[,which(gsub("_L+[0-9].*","",colnames(test)) %in% outsideVar)],digits = 5)
vifValue <- vif(model)
vifValue <- vifValue[gsub("_L+[0-9].*","",names(vifValue)) %in% outsideVar]
}
return(t(data.frame(c(tstat,vifValue),row.names = NULL)))
}
#Rebuilding estimates for combined column by split
olsmSplitCombinedEstimateData <- function(parameterDetails, parametersDf, transData){
index <- grep("Combined",parameterDetails$term)
df <- parameterDetails[-index,]
temp <- NULL
combinedColumns <- parametersDf[which(parametersDf$Combined_Column != 0),]
combinedColumnsList <- split(combinedColumns$VariableName,combinedColumns$Combined_Column)
combinedColumnsList <- setNames(combinedColumnsList,paste0("Combined_",names(combinedColumnsList)))
for (i in 1:length(index)) {
# i = 3
combinedModelVar <- as.character(parameterDetails$term[index[i]])
combineVars <- combinedColumnsList[[combinedModelVar]]
if(all(parametersDf$Normalization[parametersDf$VariableName %in% combineVars] != "None")){
combinedModelVarMean <- mean(rowSums(transData[,names(transData) %in% combineVars]),na.rm = T)
combineVarsMeans <- colMeans(transData[,names(transData) %in% combineVars],na.rm = T)
combinedVarRatio <- sapply(combineVarsMeans,FUN = function(x){if(x == 0){return(x)}else{return(x/combinedModelVarMean)}})
}else{
combinedVarRatio <- rep(1/length(combineVars), times = length(combineVars))
names(combinedVarRatio) <- combineVars
}
####### Multiplying RolledEstimates with Number of Variables/length of combinedVarRatio [combined column]
if(any(names(parameterDetails) %in% "Rolledup_Estimate")){
parameterDetails$Rolledup_Estimate[parameterDetails$term == parameterDetails$term[[index[i]]]] <- parameterDetails$Rolledup_Estimate[parameterDetails$term == parameterDetails$term[[index[i]]]] * length(combinedVarRatio)
}else{
parameterDetails$estimate[parameterDetails$term == parameterDetails$term[[index[i]]]] <- parameterDetails$estimate[parameterDetails$term == parameterDetails$term[[index[i]]]] * length(combinedVarRatio)
}
temp <- parameterDetails[rep(index[i], each=length(combinedColumnsList[[as.character(parameterDetails$term[index[i]])]])),]
temp$term <- combinedColumnsList[[as.character(parameterDetails$term[index[i]])]]
for(var in names(combinedVarRatio)){
# var = names(combinedVarRatio)[1]
if(any(names(temp) %in% "Rolledup_Estimate")){
temp$Rolledup_Estimate[temp$term == var] <- temp$Rolledup_Estimate[temp$term == var] * combinedVarRatio[names(combinedVarRatio) == var]
}else{
temp$estimate[temp$term == var] <- temp$estimate[temp$term == var] * combinedVarRatio[names(combinedVarRatio) == var]
}
}
names(temp) <- names(df)
df <- data.frame(rbind(df, temp), row.names = NULL)
}
return(df)
}
olsmGetContribution <- function(olsmFullDecomp, depVar, unrolled){
# function for calculating contribution.
# If olsmFullDecomp, depVar present with NULL unrolled then it will calculate for nonstacked model and appended up stacked model.
# if unrolled is not NULL then contribution will be calculated by geography.
if(is.null(unrolled)){
if(length(olsmFullDecomp)==2){
olsmFullDecomp <- olsmFullDecomp$FulldecompUnRolledDf
}else{
olsmFullDecomp <- olsmFullDecomp$Fulldecomposition_BaseModel
}
}
fullDecompAvg <- colMeans(olsmFullDecomp[,!names(olsmFullDecomp) %in% c("Period", "Geography",depVar)])
fullDecompAvg <- t(as.data.frame.list(fullDecompAvg/sum(fullDecompAvg)*100))
contributionDF <- data.frame(row.names(fullDecompAvg),fullDecompAvg, row.names = NULL)
if(is.null(unrolled)){
names(contributionDF) <- c("term","Contribution%")
}else{
names(contributionDF) <- c("term",unrolled)
}
return(contributionDF)
}
# New elasticity calculation need to build here by remove above function. Please Don't delete below commented section.
olsmGetElasticity <- function(model,olsm_RegDataTemp,olsmModelFeatureList, olsm_parametersDF, olsmModelData, transData, actPredData){
# olsmModelFeatureList$elasticityValue <- 1
olsmGetElastctContribution <- function(model,olsmElasticRegData, olsmModelFeatureList, olsm_parametersDF, olsmModelData,olsm_RegDataTemp, varMean, transData){
olsmElasticRegData$Geography <- as.character(olsmElasticRegData$Geography)
olsm_SplitElasticAF <- split(olsmElasticRegData, olsmElasticRegData$Geography)
olsm_splitByGeoSubset <- olsm_SplitElasticAF[which(names(olsm_SplitElasticAF) %in% olsmModelFeatureList$selectedGeos)]
olsmTransElasticAF <- NULL
for (name in names(olsm_splitByGeoSubset)) {
#name = names(olsm_splitByGeoSubset)[1]
olsmModelFeatureList[["TransGeo"]] <- name
df <- data.frame(createOlsmTransformation(olsm_RegDataModelList = olsm_splitByGeoSubset, olsm_parametersDF, modelFeatureList = olsmModelFeatureList),stringsAsFactors = FALSE)
df <- df[dmy(df$Period) %in% dmy(olsmModelFeatureList$modellingPeriod),]
olsmTransElasticAF[[name]] <- df
}
olsmTransElasticAF <- as.data.frame(rbindlist(olsmTransElasticAF))
olsmTransElasticAF$Period <- lubridate::dmy(olsmTransElasticAF$Period)
dummyVarName <- NULL
if(olsmModelFeatureList$DummyFlag == TRUE){
olsmTransElasticAF <- olsmTransElasticAF[olsmTransElasticAF$Period %in% olsmModelData$Period,]
dummyVarName <- grep("Dummy_Var+[0-9]",names(olsmModelData),value = T)
dummyVar <- olsmModelData[,grep("Dummy_Var+[0-9]",names(olsmModelData))]
tmp <- cbind(olsmTransElasticAF, dummyVarName = dummyVar + (dummyVar*olsmModelFeatureList$elasticityValue/100))
names(tmp) <- c(names(olsmTransElasticAF),dummyVarName)
olsmTransElasticAF <- tmp
}
#olsmFullDecompList <- olsmExtractFullDecomp(model, olsm_parametersDF, olsmModelData = olsmTransElasticAF,modelFeatureList = olsmModelFeatureList, olsm_RegDataTemp, transData)
olsmFullDecompList <- olsmExtractFullDecomp(model, olsm_parametersDF, olsmModelData = olsmTransElasticAF,modelFeatureList = olsmModelFeatureList, olsm_RegDataTemp, transData)
olsmElasticFullDecomp <- list()
olsmElasticFullDecomp[["FulldecompUnRolledDf"]] <- as.data.frame(rbindlist(olsmFullDecompList))
#olsmElasticFullDecomp[["FulldecompUnRolledDf"]] <- as.data.frame(rbindlist(lapply(olsmFullDecompList, function(x) olsmMinMaxAdjust(x, olsmModelFeatureList))))
olsmElasticFullDecomp$FulldecompUnRolledDf$Period <- as.character(olsmElasticFullDecomp$FulldecompUnRolledDf$Period)
olsmElasticFullDecompList <- split(olsmElasticFullDecomp$FulldecompUnRolledDf, olsmElasticFullDecomp$FulldecompUnRolledDf$Geography)
olsmElasticFullDecomp <- lapply(names(olsmElasticFullDecompList), function(x){
sapply(names(olsmElasticFullDecompList[[x]]),function(y){
if(any(y %in% c("Geography","Period", "Intercept",dummyVarName))){
olsmElasticFullDecompList[[x]][,names(olsmElasticFullDecompList[[x]]) %in% y]
}else{
if(olsm_parametersDF$Normalization[olsm_parametersDF$VariableName %in% gsub("_L+[0-9].*", "", y)] %in% c("Division")){
olsmElasticFullDecompList[[x]][,names(olsmElasticFullDecompList[[x]]) %in% y]/as.numeric(varMean[which(varMean$Geography ==x), names(varMean) %in% y])
}else if(olsm_parametersDF$Normalization[olsm_parametersDF$VariableName %in% gsub("_L+[0-9].*", "", y)] %in% c("Subtraction")){
olsmElasticFullDecompList[[x]][,names(olsmElasticFullDecompList[[x]]) %in% y]-as.numeric(varMean[which(varMean$Geography ==x), names(varMean) %in% y])
}else if(olsm_parametersDF$Normalization[olsm_parametersDF$VariableName %in% gsub("_L+[0-9].*", "", y)] %in% c("None")){
olsmElasticFullDecompList[[x]][,names(olsmElasticFullDecompList[[x]]) %in% y]
}
}
})
})
# olsmElasticFullDecompList$Total$Geography <- as.character(olsmElasticFullDecompList$Total$Geography)
olsmElasticFullDecomp <- as.data.frame(rbindlist(lapply(olsmElasticFullDecomp, function(x) data.frame(x,stringsAsFactors = F))))
return(olsmElasticFullDecomp)
}
olsmElasticRegData <- olsm_RegDataTemp
olsmElasticRegData$Geography <- as.character(olsmElasticRegData$Geography)
olsmFullDecompList <- olsmExtractFullDecomp(model, olsm_parametersDF, olsmModelData,modelFeatureList = olsmModelFeatureList, olsm_RegDataTemp, transData)
olsmFullDecomp <- list()
olsmFullDecomp[["FulldecompUnRolledDf"]] <- as.data.frame(rbindlist(olsmFullDecompList))
modelData <- transData
geoDepMean <- modelData[modelData$Geography %in% olsmModelFeatureList$selectedGeos,]
varMean <- aggregate(geoDepMean[,!names(geoDepMean) %in% c("Geography","Period")], by = list(geoDepMean$Geography), FUN=mean)
names(varMean)[which(names(varMean) == "Group.1")] <- "Geography"
# calculating elasticity of all full data points
baseFullDecomp <- olsmFullDecomp$FulldecompUnRolledDf[,!names(olsmFullDecomp$FulldecompUnRolledDf) %in% c("Geography", "Period")]
olsmElasticFullDecomp <- olsmGetElastctContribution(model,olsmElasticRegData, olsmModelFeatureList, olsm_parametersDF, olsmModelData,olsm_RegDataTemp, varMean, transData)
elasticFullDecomp <- as.data.frame(apply(olsmElasticFullDecomp [,!names(olsmElasticFullDecomp) %in% c("Geography","Period")], 2, as.numeric))
Elasticity_Modelling_Period <- (colMeans(elasticFullDecomp - baseFullDecomp)/mean(actPredData$Predicted))*100
## Calculation of Elasticity by Geography... olsmElasticFullDecomp split by geo
olsmGeoElasticFullDecomp <- split(olsmElasticFullDecomp,olsmElasticFullDecomp$Geography)
# olsmFullDecomp$FulldecompUnRolledDf split by geo
OlsmFullDecompUnRolledGeoSplit <- split(olsmFullDecomp$FulldecompUnRolledDf,olsmFullDecomp$FulldecompUnRolledDf$Geography)
#actualVsPrediacted Geo Split
GeoSplitActPredData <- split(actPredData,actPredData$Geography)
ElasticityByGeography <- data.frame()
## Elasticity by Geography
for(name in names(olsmGeoElasticFullDecomp)){
# name = names(olsmGeoElasticFullDecomp)[1]
GeoSplitbaseFullDecomp <- OlsmFullDecompUnRolledGeoSplit[[name]][,!names(olsmFullDecomp$FulldecompUnRolledDf) %in% c("Geography", "Period")]
GeoSplitElasticFullDecomp <- as.data.frame(apply(olsmGeoElasticFullDecomp[[name]] [,!names(olsmGeoElasticFullDecomp[[name]]) %in% c("Geography","Period")], 2, as.numeric))
## Elasticity after Geo Split
GoeSplit_Elasticity_Modelling_Priod <- (colMeans(GeoSplitElasticFullDecomp - GeoSplitbaseFullDecomp)/mean(GeoSplitActPredData[[name]]$Predicted))*100
GoeSplit_Elasticity_Modelling_Priod <- data.frame(t(data.frame(GoeSplit_Elasticity_Modelling_Priod)),row.names = NULL)
GoeSplit_Elasticity_Modelling_Priod <- cbind(Geography = name, GoeSplit_Elasticity_Modelling_Priod)
ElasticityByGeography <- rbind(ElasticityByGeography, GoeSplit_Elasticity_Modelling_Priod)
}
# calculating elasticity of 12 months data points
olsmModelFeatureList$elasticityL12Flag <- TRUE
olsmElasticFullDecomp <- olsmGetElastctContribution(model,olsmElasticRegData, olsmModelFeatureList, olsm_parametersDF, olsmModelData,olsm_RegDataTemp, varMean, transData)
olsmFullDecomp$FulldecompUnRolledDf$Period <- zoo::as.yearmon(olsmFullDecomp$FulldecompUnRolledDf$Period)
olsmElasticFullDecomp$Period <- zoo::as.yearmon(as.Date(olsmElasticFullDecomp$Period))
baseFullDecomp_12 <- olsmFullDecomp$FulldecompUnRolledDf[olsmFullDecomp$FulldecompUnRolledDf$Period %in% tail(unique(olsmFullDecomp$FulldecompUnRolledDf$Period),n = 12),!names(olsmFullDecomp$FulldecompUnRolledDf) %in% c("Geography", "Period")]
elasticFullDecomp_12 <- apply(olsmElasticFullDecomp [olsmElasticFullDecomp$Period %in% tail(unique(olsmElasticFullDecomp$Period),n = 12),!names(olsmElasticFullDecomp) %in% c("Geography", "Period")],2,as.numeric)
Elasticity_L12_Modelling_Period <- (colMeans(elasticFullDecomp_12 - baseFullDecomp_12)/mean(actPredData$Predicted[olsmFullDecomp$FulldecompUnRolledDf$Period %in% tail(unique(olsmFullDecomp$FulldecompUnRolledDf$Period),n = 12)]))*100
## Elasticity by Geography of last 12 months
# olsmElasticFullDecomp split by geo
olsmGeoElasticFullDecomp <- split(olsmElasticFullDecomp,olsmElasticFullDecomp$Geography)
# olsmFullDecomp$FulldecompUnRolledDf split by geo after conversion of period to month year format.
OlsmFullDecompUnRolledGeoSplit <- split(olsmFullDecomp$FulldecompUnRolledDf,olsmFullDecomp$FulldecompUnRolledDf$Geography)
## Elasticity by Geography
ElasticityL12ByGeography <- data.frame()
for(name in names(olsmGeoElasticFullDecomp)){
# name = names(olsmGeoElasticFullDecomp)[1]
baseFullDecomp_12_GeoSplit <- OlsmFullDecompUnRolledGeoSplit[[name]][OlsmFullDecompUnRolledGeoSplit[[name]]$Period %in% tail(unique(olsmFullDecomp$FulldecompUnRolledDf$Period),n = 12),!names(olsmFullDecomp$FulldecompUnRolledDf) %in% c("Geography", "Period")]
#write.csv(baseFullDecomp_12_GeoSplit, file = "baseFullDecomp_12_GeoSplit.csv", row.names = F)
elasticFullDecomp_12_GeoSplit <- apply(olsmGeoElasticFullDecomp[[name]] [olsmGeoElasticFullDecomp[[name]]$Period %in% tail(unique(olsmGeoElasticFullDecomp[[name]]$Period),n = 12),!names(olsmGeoElasticFullDecomp[[name]]) %in% c("Geography", "Period")],2,as.numeric)
#write.csv(elasticFullDecomp_12_GeoSplit, file = "elasticFullDecomp_12_GeoSplit.csv", row.names = F)
## Elasticity after Geo Split
Elasticity_L12_Modelling_Period_GeoSplit <- (colMeans(elasticFullDecomp_12_GeoSplit - baseFullDecomp_12_GeoSplit)/mean(GeoSplitActPredData[[name]]$Predicted[OlsmFullDecompUnRolledGeoSplit[[name]]$Period %in% tail(unique(olsmFullDecomp$FulldecompUnRolledDf$Period),n = 12)]))*100
Elasticity_L12_Modelling_Period_GeoSplit <- data.frame(t(data.frame(Elasticity_L12_Modelling_Period_GeoSplit)),row.names = NULL)
Elasticity_L12_Modelling_Period_GeoSplit <- cbind(Geography = name, Elasticity_L12_Modelling_Period_GeoSplit)
ElasticityL12ByGeography <- rbind(ElasticityL12ByGeography, Elasticity_L12_Modelling_Period_GeoSplit)
}
Elasticity_L12_Modelling_Period["Intercept"] <- Elasticity_Modelling_Period["Intercept"] <- ElasticityL12ByGeography["Intercept"] <- ElasticityByGeography["Intercept"] <- 0
ElasticityFullPeriod_L12 <- data.frame(term = names(Elasticity_Modelling_Period),Elasticity_Modelling_Period, Elasticity_L12_Modelling_Period,row.names = NULL)
Calculated_Elasticity <- list()
Calculated_Elasticity[["ElasticityByGeography"]] <- data.frame(ElasticityByGeography[,c(which(names(ElasticityByGeography) == "Geography"),which(names(ElasticityByGeography) != "Geography"))],row.names = NULL)
Calculated_Elasticity[["ElasticityL12ByGeography"]] <- data.frame(ElasticityL12ByGeography[,c(which(names(ElasticityL12ByGeography) == "Geography"),which(names(ElasticityL12ByGeography) != "Geography"))],row.names = NULL)
Calculated_Elasticity[["ElasticityFullPeriod_L12"]] <- ElasticityFullPeriod_L12
Calculated_Elasticity <- lapply(Calculated_Elasticity, function(x){
x[is.na(x)]<- 0
return(x)
})
Calculated_Elasticity$ElasticityByGeography[,which(names(Calculated_Elasticity$ElasticityByGeography) %in% olsmModelFeatureList$depVar)] <- NULL
Calculated_Elasticity$ElasticityL12ByGeography[,which(names(Calculated_Elasticity$ElasticityL12ByGeography) %in% olsmModelFeatureList$depVar)] <- NULL
Calculated_Elasticity$ElasticityFullPeriod_L12[which(Calculated_Elasticity$ElasticityFullPeriod_L12$term == olsmModelFeatureList$depVar),-1] <- NA
return(Calculated_Elasticity)
}
olsmGetModelParameter <- function(model, olsm_parametersDF,olsmModelData, olsmModelFeatureList, olsm_RegDataTemp, transData){
depVar <- olsm_parametersDF$VariableName[olsm_parametersDF$Type=="DepVar"]
# getting VIF of variable
VIF <- data.frame(term = names(rms::vif(model)), VIF = rms::vif(model),row.names = NULL)
# checking for combined column, if present then split the value of combined columns.
if(any(grepl("Combined",names(model$coefficients)))){
modelParam <- merge(tidy(model),VIF,by = "term",all = T)
parameterDetails <- olsmSplitCombinedEstimateData(parameterDetails = modelParam, parametersDf = olsm_parametersDF, transData)
}else{
parameterDetails <- merge(tidy(model),VIF,by = "term",all = T)
}
# checking for fixed estimates, if present in model manager, then add to the model parameterDetails
if(any(grepl("Fixed",olsm_parametersDF$Type))){
if(olsmModelFeatureList$FixedVarChoice == "All"){
fixedVar <- cbind(olsm_parametersDF[grep("Fixed",olsm_parametersDF$Type),c("VariableName","Fixed_Coefficient")],matrix(NA,length(grep("Fixed",olsm_parametersDF$Type)),length(parameterDetails)-2))
names(fixedVar) <- names(parameterDetails)
parameterDetails <- rbind(parameterDetails,fixedVar)
}else if(olsmModelFeatureList$FixedVarChoice == "Geo"){
fixedGeoDf <- olsmModelFeatureList$geoFixedEstimatesDF[,c("VariableName","Fixed_Coefficient")]
fixedGeoDf <- data.frame(aggregate(fixedGeoDf$Fixed_Coefficient,by = list(fixedGeoDf$VariableName),FUN = mean,na.rm = T), matrix(NA,length(grep("Fixed",olsm_parametersDF$Type)),length(parameterDetails)-2))
names(fixedGeoDf) <- names(parameterDetails)
parameterDetails <- rbind(parameterDetails,fixedGeoDf)
}
}
# checking for multicollinearity, if present then adding multicollinear variable with 0 estimate and NA in other columns.
if(nrow(tidy(model)) != length(names(model$coefficients))){
# here is handling multicollinearity in model result.
multiCorVar <- names(model$coefficients)[!names(model$coefficients) %in% tidy(model)$term]
multiCorVar <- data.frame(multiCorVar,matrix(0, nrow = length(multiCorVar), ncol = 1),matrix(NA, nrow = length(multiCorVar), ncol = length(parameterDetails)-2))
names(multiCorVar) <- names(parameterDetails)
parameterDetails <- rbind(parameterDetails, multiCorVar)
}
# reordering parameterDetails as per model manager
varRowName <- parameterDetails$term
varOrderMM <- olsm_parametersDF$VariableName[olsm_parametersDF$Type != "Not in Model"]
if(any(varRowName %in% "(Intercept)")){
varRowName[varRowName %in% "(Intercept)"] <- depVar
}
rownames(parameterDetails) <- as.character(sapply(varRowName, function(x) gsub("_L+[0-9].*","",x)))
varOrderMM <- varOrderMM[varOrderMM %in% row.names(parameterDetails)]
olsmResult <- data.frame(parameterDetails[order(match(row.names(parameterDetails),varOrderMM)),],row.names = NULL)
olsmResult$term[grep("Intercept",olsmResult$term)] <- gsub("\\(|\\)","",olsmResult$term[grep("Intercept",olsmResult$term)])
#olsmFullDecomp <- olsmExtractFullDecomp(model, olsm_parametersDF, olsmModelData,olsmModelFeatureList, olsm_RegDataTemp, transData)
olsmFullDecompList <- olsmExtractFullDecomp(model, olsm_parametersDF, olsmModelData,olsmModelFeatureList, olsm_RegDataTemp, transData)
if(length(olsmModelFeatureList$min_max_var$VariableName) > 0){
olsmModelFeatureList[["fullDecompMinMaxAdjustDF"]] <- olsmExtractBaseFullDecompMinMax(olsmFullDecompList, olsmModelFeatureList)
}
olsmFullDecomp <- list()
olsmFullDecomp[["FulldecompUnRolledDf"]] <- as.data.frame(rbindlist(lapply(olsmFullDecompList, function(x)olsmMinMaxAdjust(x, olsmModelFeatureList))))
olsmFullDecomp[["FulldecompRolledDf"]] <- olsmExtractFullDecompRolledUp(olsmFullDecomp[["FulldecompUnRolledDf"]])
modelContribution <- olsmGetContribution(olsmFullDecomp, olsmModelFeatureList$depVar, unrolled = NULL)
olsmResult <- merge(olsmResult, modelContribution, by = "term",sort = F,all = T)
return(olsmResult)
}
# Function to extract model parameters to display on screen
olsmExtractModelParameter <- function(olsmAllModelList, olsm_parametersDF){
# calculating number of models
n.models <- length(olsmAllModelList)
if(n.models == 0){
# if there is no model, then message will display to change parameter.
return(0)
}else{
# calling the function to extract the parameter of each model to rank.
result <- lapply(olsmAllModelList, olsmExtractModelParameterValue)
result <- as.data.frame(matrix(unlist(result), nrow=n.models, byrow=T))
modelOutside <- unlist(lapply(olsmAllModelList, function(x, modelManager) olsmExtractOutsideVar(x,modelManager = olsm_parametersDF),modelManager=olsm_parametersDF))
olsmModelResult <- cbind(paste0("Model_",1:nrow(result)), modelOutside, result)
rownames(olsmModelResult) <- NULL
colnames(olsmModelResult) <- c("Model_No", "Outside_Variable","%R2","%R2.adj","DW","RootMSE")
olsmModelResult$`%R2` <- sapply(olsmModelResult$`%R2`, function(x) x <- round((x * 100),digits = 2))
olsmModelResult$`%R2.adj` <- sapply(olsmModelResult$`%R2.adj`, function(x) x <- round((x * 100),digits = 2))
return(olsmModelResult)
}
}
# fucntion to extract model parameter to rank the model
olsmExtractModelParameterValue <- function(fit) {
R2 <- summary(fit)$r.squared
R2.adj <- summary(fit)$adj.r.squared
dw <- durbinWatsonTest(fit)[[2]]
RootMSE <- sqrt(mean(fit$residuals^2))
out <- data.frame(R2=R2, R2.adj=R2.adj,DurbinWatson=dw, RootMSE = RootMSE)
out <- sapply(out,function(x) if(!is.nan(x)) {x <- x}
else{x <- 0}
)
return(out)
}
olsm_getActualVsPredictedDf <- function(model, olsm_parametersDF, regDf, modelFeatureList){
olsmIncludeFixedEst <- function(df, actPredData, predictedVar, fixedVarCoef){
for (i in 1:nrow(fixedVarCoef)) {
df[,which(names(df) %in% fixedVarCoef$VariableName[i])] <- df[,which(names(df) %in% fixedVarCoef$VariableName[i])] * fixedVarCoef$Fixed_Coefficient[i]
}
fixedVar <- fixedVarCoef$VariableName
if(length(fixedVar)==1){
actPredData[,which(names(actPredData)==predictedVar)] <- actPredData[,which(names(actPredData)==predictedVar)] + df[,which(names(df) %in% fixedVar)]
}else{
actPredData[,which(names(actPredData)==predictedVar)] <- actPredData[,which(names(actPredData)==predictedVar)] + rowSums(df[,which(names(df) %in% fixedVar)])
}
return(actPredData)
}
data <- regDf
olsm_actPred <- cbind.data.frame("Geography" = data[,"Geography"],Period =data[,"Period"], Actual = data[,olsm_parametersDF$VariableName[olsm_parametersDF$Type == "DepVar"]], Predicted = fitted(model), Residual = residuals(model))
# code to add fixed effect value to predData.
if(any(grepl("Fixed",olsm_parametersDF$Type))){
if(modelFeatureList$FixedVarChoice == "All"){
fixedVarCoef <- olsm_parametersDF[grepl("Fixed Var",olsm_parametersDF$Type),c("VariableName","Fixed_Coefficient")]
olsm_actPred <- olsmIncludeFixedEst(df = data ,actPredData = olsm_actPred, predictedVar = "Predicted", fixedVarCoef)
}else if(modelFeatureList$FixedVarChoice == "Geo"){
fixedVarCoefByGeo <- split(modelFeatureList$geoFixedEstimatesDF,modelFeatureList$geoFixedEstimatesDF$Geography)
geoData <- split(data, data$Geography)
olsm_actPred <- split(olsm_actPred, olsm_actPred$Geography)
olsm_actPred <- data.frame(rbindlist(lapply(names(fixedVarCoefByGeo), function(x){
return(olsmIncludeFixedEst(df = geoData[[x]] ,actPredData = olsm_actPred[[x]], predictedVar = "Predicted", fixedVarCoefByGeo[[x]]))
})),row.names = NULL)
}
}
return(olsm_actPred)
}
olsmDenormModelEstimate <- function(ModelDf, olsm_parametersDF, olsmFinalTransRegDf, olsmModelFeatureList){
if(any(grepl("Combined",ModelDf$term))){
combinedDf <- olsmSplitCombinedEstimateData(ModelDf, olsm_parametersDF, olsmFinalTransRegDf)
ModelDf <- data.frame(combinedDf[,which(names(combinedDf) %in% c("term","estimate"))],row.names = NULL)
}
if(any(grepl("Fixed",olsm_parametersDF$Type))){
fixedDf <- data.frame(olsm_parametersDF[grep("Fixed",olsm_parametersDF$Type),names(olsm_parametersDF) %in% c("VariableName","Fixed_Coefficient")],row.names = NULL)
names(fixedDf) <- c("term","estimate")
fixedVarDf <- data.frame(fixedDf$term, fixedDf$estimate, matrix(NA, nrow = nrow(fixedDf), ncol = ncol(ModelDf)-ncol(fixedDf)))
names(fixedVarDf) <- names(ModelDf)
ModelDf <- rbind(ModelDf,fixedVarDf)
}
olsmSplittedTransRegDf <- split(olsmFinalTransRegDf[names(olsmFinalTransRegDf) %in% c("Geography", olsmModelFeatureList$depVar,ModelDf$term)], olsmFinalTransRegDf$Geography)
ModelGeoVarAvg <- data.frame(cbind(Geography= names(olsmSplittedTransRegDf),rbindlist(lapply(names(olsmSplittedTransRegDf), function(x){data.frame(t(colMeans(olsmSplittedTransRegDf[[x]][,-1],na.rm = T)))}))))
DenormModelEst <- data.frame(Geography = names(olsmSplittedTransRegDf), matrix(ncol = length(ModelDf$term),nrow = length(names(olsmSplittedTransRegDf))), stringsAsFactors = F)
names(DenormModelEst)[-1] <- ModelDf$term
if(olsm_parametersDF$Normalization[olsm_parametersDF$Type == "DepVar"] == "Division"){
for(name in ModelDf$term){
# name = ModelDf$term[4]
varName <- gsub("_L+[0-9].*","",name)
if(varName == "(Intercept)"){
DenormModelEst[,name] <- ModelDf$estimate[ModelDf$term == name] * ModelGeoVarAvg[,olsmModelFeatureList$depVar]
}else if(grepl("Dummy_Var",name)){
DenormModelEst[,name] <- ModelDf$estimate[ModelDf$term == name]
}else{
if(olsm_parametersDF$Normalization[olsm_parametersDF$VariableName == varName] == "Division"){
varAvg <- ModelGeoVarAvg[,olsmModelFeatureList$depVar]/ModelGeoVarAvg[,name]
varAvg[is.infinite(varAvg)] <- 0
DenormModelEst[,name] <- ModelDf$estimate[ModelDf$term == name] * varAvg
}else if(olsm_parametersDF$Normalization[olsm_parametersDF$VariableName == varName] != "Division"){
DenormModelEst[,name] <- ModelDf$estimate[ModelDf$term == name]
}
}
}
}else
if(olsm_parametersDF$Normalization[olsm_parametersDF$Type == "DepVar"] != "Division"){
for(name in ModelDf$term){
varName <- gsub("_L+[0-9].*","",name)
if(name == "(Intercept)" | grepl("Dummy_Var",name)){
DenormModelEst[,name] <- ModelDf$estimate[ModelDf$term == name]
}else{
if(olsm_parametersDF$Normalization[olsm_parametersDF$VariableName == varName] == "Division"){
DenormModelEst[,name] <- ModelDf$estimate[ModelDf$term == name]/ModelGeoVarAvg[,name]
}else if(olsm_parametersDF$Normalization[olsm_parametersDF$VariableName == varName] != "Division"){
DenormModelEst[,name] <- ModelDf$estimate[ModelDf$term == name]
}
}
}
}
return(DenormModelEst)
}
olsm_MixedModelDenormEstimate <- function(olsm_parametersDF,unrolledEstimate,VarMeanGeoData){
depVar <- olsm_parametersDF$VariableName[olsm_parametersDF$Type == "DepVar"]
geo <- names(unrolledEstimate)[!names(unrolledEstimate) %in% "term"]
for(name in geo){
if(olsm_parametersDF$Normalization[olsm_parametersDF$Type == "DepVar"] == "Division"){
depAvg <- VarMeanGeoData[VarMeanGeoData$Geography == name,which(names(VarMeanGeoData) == depVar)]
unrolledEstimate[,name] <- unrolledEstimate[,name] * depAvg
}else {
indepVar <- as.character(unrolledEstimate$term[!unrolledEstimate$term %in% "(Intercept)"])
for(indepname in indepVar){
if(olsm_parametersDF$Normalization[olsm_parametersDF$VariableName == indepname] == "Division"){
unrolledEstimate[unrolledEstimate$term == indepname,name] <- unrolledEstimate[unrolledEstimate$term == indepname,name]/VarMeanGeoData[VarMeanGeoData$Geography == name,indepname]
}
}
}
}
return(unrolledEstimate)
}
##################### Function related to Model Result Download ################
olsmExtractModelDetail <- function(model, modelResult,olsm_parametersDF, obsCount,olsmModelFeatureList){
output <- NULL
output <- c(output,"The REG Procedure")
output <- c(output,"\n\n")
output <- c(output,paste("Model:",modelResult[1,1]))
output <- c(output,paste("Dependant Variable:",names(model$model[1])))
output <- c(output,"\n\n")
output <- c(output,paste("Number of Observations Used in Model:",obsCount))
output <- c(output,"\n\n")
output <- c(output,paste("AF Start Date:",olsmModelFeatureList$startDate))
output <- c(output,"\n\n")
output <- c(output,paste("AF End Date:",olsmModelFeatureList$endDate))
output <- c(output,"\n\n")
output <- c(output,paste("Model Start Date:",min(lubridate::dmy(olsmModelFeatureList$modellingPeriod))))
output <- c(output,"\n\n")
output <- c(output,paste("Model End Date:",max(lubridate::dmy(olsmModelFeatureList$modellingPeriod))))
output <- c(output,"\n\n")
output <- c(output,paste("Adstock Choice:",olsmModelFeatureList$adStockChoice))
output <- c(output,"\n\n")
output <- c(output,paste("Intercept:",olsmModelFeatureList$hasIntercept))
output <- c(output,"\n\n")
output <- c(output,paste("Weight:",olsmModelFeatureList$wLSChoice))
output <- c(output,"\n\n")
output <- c(output,paste("Intercept:",olsmModelFeatureList$hasIntercept))
output <- c(output,"\n\n")
output <- c(output,paste("Mixed Model Chioce:",olsmModelFeatureList$mixedModelChioce))
output <- c(output,"\n\n")
output <- c(output,noquote(capture.output(write.csv(modelResult,stdout(),row.names = F,quote = F))))
output <- c(output,"\n\n")
# output <- c(output,noquote(capture.output(write.csv(olsmResult,file = stdout(),row.names = F,quote = F))))
# output <- c(output,"\n\n")
return(output)
}
olsmExtractMixedModel <- function(model,olsmFinalNormRegDf,modelFeatureList,olsm_parametersDF,olsm_RegDataTemp, olsmFinalTransRegDf){
# Model Features
modelFeature <- data.frame(Method = model$method,AIC = summary(model)["AIC"],BIC = summary(model)["BIC"],logLik = -2*model$logLik)
names(modelFeature)[names(modelFeature) %in% "logLik"] <- "-2logLik"
# Rolled Up Mixed Model Result
rolledEstimate <- data.frame(summary(model)$tTable)[c("Value","Std.Error","t.value","p.value")]
names(rolledEstimate) <- c("Rolledup_Estimate","Rolledup_Std.Error","Rolledup_t.value","Rolledup_p.value")
rolledEstimate <- data.frame(term = rownames(rolledEstimate), rolledEstimate, row.names = NULL,stringsAsFactors = F)
if(any(grepl("Combined",rolledEstimate$term))){
rolledEstimate <- data.frame(olsmSplitCombinedEstimateData(rolledEstimate, olsm_parametersDF, olsmFinalTransRegDf),row.names = NULL)
}
if(any(grepl("(Intercept)",rolledEstimate$term))){
rolledEstimate$term[grep("(Intercept)",rolledEstimate$term)] <- "Intercept"
}
if(any(grepl("Fixed",olsm_parametersDF$Type))){
if(modelFeatureList$FixedVarChoice == "All"){
fixedDf <- data.frame(olsm_parametersDF[grep("Fixed",olsm_parametersDF$Type),names(olsm_parametersDF) %in% c("VariableName","Fixed_Coefficient")],row.names = NULL)
fixedDf <- data.frame(fixedDf, matrix(NA,nrow = length(fixedDf),ncol = length(rolledEstimate)-2))
names(fixedDf) <- names(rolledEstimate)
rolledEstimate <- rbind(rolledEstimate,fixedDf)
}else if(modelFeatureList$FixedVarChoice == "Geo"){
fixedDf <- olsm_parametersDF[grep("Fixed",olsm_parametersDF$Type),names(olsm_parametersDF) %in% "VariableName"]
fixedGeoDf <- modelFeatureList$geoFixedEstimatesDF[,c("VariableName","Fixed_Coefficient")]
fixedGeoDf <- data.frame(aggregate(fixedGeoDf$Fixed_Coefficient,by = list(fixedGeoDf$VariableName),FUN = mean,na.rm = T), matrix(NA,nrow = length(fixedDf),ncol = length(rolledEstimate)-2))
names(fixedGeoDf) <- names(rolledEstimate)
rolledEstimate <- rbind(rolledEstimate,fixedGeoDf)
}
}
## calculating Contribution
olsmModelData <- olsmFinalNormRegDf
olsmModelData$Period <- lubridate::dmy(olsmModelData$Period)
olsmFullDecompList <- olsmExtractFullDecomp(model, olsm_parametersDF, olsmModelData,modelFeatureList, olsm_RegDataTemp, olsmFinalTransRegDf)
if(length(modelFeatureList$min_max_var$VariableName) > 0){
modelFeatureList[["fullDecompMinMaxAdjustDF"]] <- olsmExtractBaseFullDecompMinMax(olsmFullDecompList, modelFeatureList)
}
olsmFullDecomp <- list()
olsmFullDecomp[["FulldecompUnRolledDf"]] <- as.data.frame(rbindlist(lapply(olsmFullDecompList, function(x)olsmMinMaxAdjust(x, modelFeatureList))))
olsmFullDecomp[["FulldecompRolledDf"]] <- olsmExtractFullDecompRolledUp(olsmFullDecomp[["FulldecompUnRolledDf"]])
rolledEstimate <- merge(rolledEstimate,olsmGetContribution(olsmFullDecomp, modelFeatureList$depVar, unrolled = NULL),by = "term",sort = F)
# Unrolled Up Mixed Model Result
unrolledEstimate <- data.frame(term = rownames(t(coef(model))),data.frame(t(coef(model)),row.names = NULL))
if(any(grepl("Combined",unrolledEstimate$term))){
modelDFList <- list()
modelDFList <- lapply(as.list(unrolledEstimate[-1]), function(x){data.frame(unrolledEstimate[1],estimate = x, stringsAsFactors = F)})
geoTransData <- split(olsmFinalTransRegDf, olsmFinalTransRegDf$Geography)
modelDFList <- lapply(names(modelDFList), function(x){return(olsmSplitCombinedEstimateData(modelDFList[[x]], olsm_parametersDF, geoTransData[[x]]))})
unrolledEstimate <- data.frame(modelDFList[[1]][,1],do.call(cbind, lapply(modelDFList, function(df) df$estimate)), row.names = NULL)
names(unrolledEstimate) <- c("term", names(geoTransData))
}
unrolledEstimate$term <- as.character(unrolledEstimate$term)
if(any(grepl("(Intercept)",unrolledEstimate$term))){
unrolledEstimate$term[grep("(Intercept)",unrolledEstimate$term)] <- "Intercept"
}
if(any(grepl("Fixed",olsm_parametersDF$Type))){
if(modelFeatureList$FixedVarChoice == "All"){
fixedDf <- data.frame(olsm_parametersDF[grep("Fixed",olsm_parametersDF$Type),names(olsm_parametersDF) %in% c("VariableName","Fixed_Coefficient")],row.names = NULL)
fixedDf <- data.frame(fixedDf, matrix(fixedDf$Fixed_Coefficient, nrow = nrow(fixedDf), ncol = ncol(unrolledEstimate)-2))
names(fixedDf) <- names(unrolledEstimate)
unrolledEstimate <- rbind(unrolledEstimate,fixedDf)
}else if(modelFeatureList$FixedVarChoice == "Geo"){
fixedDf <- olsm_parametersDF[grep("Fixed",olsm_parametersDF$Type),names(olsm_parametersDF) %in% "VariableName"]
fixedGeoDf <- modelFeatureList$geoFixedEstimatesDF[,c("Geography","VariableName","Fixed_Coefficient")]
fixedGeoDf <- reshape2::dcast(fixedGeoDf,formula = VariableName ~ Geography,value.var = "Fixed_Coefficient")
names(fixedGeoDf) <- names(unrolledEstimate)
unrolledEstimate <- rbind(unrolledEstimate,fixedGeoDf)
}
}
# Unrolled Contribution by Geography
olsmFullDecompList <- split(olsmFullDecomp$FulldecompUnRolledDf,olsmFullDecomp$FulldecompUnRolledDf$Geography)
geoContrList <- lapply(names(olsmFullDecompList), function(x) olsmGetContribution(olsmFullDecompList[[x]], modelFeatureList$depVar,x))
geoContrTable <- Reduce(function(x, y) merge(x, y, all=TRUE), geoContrList)
# Random Effect by geography
randomEffect <- data.frame(Term = rownames(t(random.effects(model))),data.frame(t(random.effects(model)),row.names = NULL))
output <- NULL
output <- c(output,"The Mixed Model Result")
output <- c(output,"\n\n")
output <- c(output,paste("Dependant Variable:",modelFeatureList$depVar))
output <- c(output,"\n\n")
output <- c(output,paste("Model Statistics:"))
output <- c(output,noquote(capture.output(write.csv(modelFeature,stdout(),row.names = F,quote = F))))
output <- c(output,"\n\n")
output <- c(output,paste("Number of Observations Read:",modelFeatureList$TotalDataCount))
output <- c(output,paste("Number of Observations Used:",nrow(model$data)))
output <- c(output,"\n\n")
output <- c(output,paste("AF Start Date:",modelFeatureList$startDate))
output <- c(output,"\n\n")
output <- c(output,paste("AF End Date:",modelFeatureList$endDate))
output <- c(output,"\n\n")
output <- c(output,paste("Model Start Date:",min(lubridate::dmy(modelFeatureList$modellingPeriod))))
output <- c(output,"\n\n")
output <- c(output,paste("Model End Date:",max(lubridate::dmy(modelFeatureList$modellingPeriod))))
output <- c(output,"\n\n")
output <- c(output,paste("Adstock Choice:",modelFeatureList$adStockChoice))
output <- c(output,"\n\n")
output <- c(output,paste("Intercept:",modelFeatureList$hasIntercept))
output <- c(output,"\n\n")
output <- c(output,paste("Weight:",modelFeatureList$wLSChoice))
output <- c(output,"\n\n")
output <- c(output,paste("Intercept:",modelFeatureList$hasIntercept))
output <- c(output,"\n\n")
output <- c(output,paste("Mixed Model Chioce:",modelFeatureList$mixedModelChioce))
output <- c(output,"\n\n")
output <- c(output,paste("Class Level Information:"))
output <- c(output,noquote(capture.output(write.csv(data.frame(Class = "Geography",Levels = length(modelFeatureList$selectedGeos), Values = paste(modelFeatureList$selectedGeos,collapse = "-")),stdout(),row.names = F,quote = F))))
output <- c(output,"\n\n")
# output <- c(output,paste("Rolled-up Estimate with Contribution:"))
# output <- c(output,noquote(capture.output(write.csv(rolledEstimate,stdout(),row.names = F,quote = F))))
# output <- c(output,"\n\n")
# output <- c(output,paste("Unrolled Estimate by Geography:"))
# output <- c(output,noquote(capture.output(write.csv(unrolledEstimate,stdout(),row.names = F,quote = F))))
# output <- c(output,"\n\n")
# output <- c(output,paste("Unrolled Contribution% by Geography:"))
# output <- c(output,noquote(capture.output(write.csv(geoContrTable,stdout(),row.names = F,quote = F))))
# output <- c(output,"\n\n")
# output <- c(output,paste("Random Effect for Each Variable within Geography:"))
# output <- c(output,noquote(capture.output(write.csv(randomEffect,stdout(),row.names = F,quote = F))))
# output <- c(output,"\n\n")
mixedModelOutput <- list()
mixedModelOutput[["output"]] <- output
mixedModelOutput[["rolledEstimate"]] <- rolledEstimate
mixedModelOutput[["unrolledEstimate"]] <- unrolledEstimate
mixedModelOutput[["randomEffect"]] <- randomEffect
return(mixedModelOutput)
#return(output)
}
olsmExtractModelData <- function(model, olsm_parametersDF, olsmFinalTransRegDf, olsmFinalRegDf,regDf,olsmDummyModelDateScope,modelResult){
olsm_modelData <- NULL
olsm_modelData <- model$model
# modelDf <- names(model$coefficients)
#
# # get combined column var
# combinedIndex <- grep("Combined",modelDf)
# combinedVar <- NULL
# if(length(combinedIndex) >= 1){
# combinedColumns <- olsm_parametersDF[which(olsm_parametersDF$Combined_Column != 0),]
# combinedColumnsList <- split(combinedColumns$VariableName,combinedColumns$Combined_Column)
# combinedColumnsList <- setNames(combinedColumnsList,paste0("Combined_",names(combinedColumnsList)))
# combinedVar <- as.character(unlist(lapply(combinedIndex, function(x) combinedColumnsList[[modelDf[x]]])))
# modelDf <- modelDf[-c(combinedIndex)] # removing intercept and combined columns
# }
#
# # get fixed var
# fixedVar <- NULL
# fixedVar <- olsm_parametersDF[which(olsm_parametersDF$Type %in% c("Fixed Var No Trans","Fixed Var TOF")),c("VariableName")]
#
# # get depvar
# depVar <- as.character(olsm_parametersDF$VariableName[which(olsm_parametersDF$Type == "DepVar")])
#
# # remove intercept
# if(any(grepl("Intercept", modelDf))){
# modelDf <- modelDf[-grep("Intercept", modelDf)]
# }
#
# # get indepVar
# indepVar <- modelDf[!modelDf %in% depVar]
#
#
# if(grepl("Dummy",modelResult[,"Model_No"])){
# dummyScope <- olsmDummyModelDateScope[[modelResult[,"Model_No"]]]
# regDf <- data.frame(subset(regDf, dmy(regDf$Period) >= dummyScope$dummyStartDate & dmy(regDf$Period) <= dummyScope$dummyEndDate),row.names = NULL)
# olsmFinalTransRegDf <- data.frame(subset(olsmFinalTransRegDf, dmy(olsmFinalTransRegDf$Period) >= dummyScope$dummyStartDate & dmy(olsmFinalTransRegDf$Period) <= dummyScope$dummyEndDate),row.names = NULL)
# olsmFinalRegDf <- data.frame(subset(olsmFinalRegDf, dmy(olsmFinalRegDf$Period) >= dummyScope$dummyStartDate & dmy(olsmFinalRegDf$Period) <= dummyScope$dummyEndDate),row.names = NULL)
# }
#
# olsm_modelData <- cbind(olsmFinalTransRegDf[, which(colnames(olsmFinalTransRegDf)== "Period")],
# regDf[,which(names(regDf)==depVar)],
# olsmFinalRegDf[,which(names(olsmFinalRegDf) %in% indepVar)],
# olsmFinalTransRegDf[,which(names(olsmFinalTransRegDf) %in% combinedVar)],
# regDf[,which(names(regDf) %in% fixedVar)])
#
# colnames(olsm_modelData) <- c("Period", depVar, names(olsmFinalRegDf)[which(names(olsmFinalRegDf) %in% indepVar)],names(olsmFinalTransRegDf)[which(names(olsmFinalTransRegDf) %in% combinedVar)],names(regDf)[which(names(regDf) %in% fixedVar)])
#
# if(any(names(olsmFinalRegDf) %in% "Geography")){
# olsm_modelData <- cbind(Geography = olsmFinalRegDf$Geography, olsm_modelData)
# }
#
# if(any(names(olsmFinalRegDf) %in% "weight")){
# olsm_modelData <- cbind(olsm_modelData, Weight = olsmFinalRegDf[,"weight"])
# }
#
# if(grepl("Dummy",modelResult[,"Model_No"])){
# dummyScope <- olsmDummyModelDateScope[[modelResult[,"Model_No"]]]
# olsm_modelData <- data.frame(subset(olsm_modelData, dmy(olsm_modelData$Period) >= dummyScope$dummyStartDate & dmy(olsm_modelData$Period) <= dummyScope$dummyEndDate),row.names = NULL)
# df <- cbind(olsm_modelData,model$model[,grep("Dummy",names(model$model))])
# names(df) <- c(names(olsm_modelData),names(model$model)[grep("Dummy",names(model$model))])
# olsm_modelData <- df
# }
return(olsm_modelData)
}
olsmExtractBaseFullDecompMinMax <- function(olsmFullDecompList, olsmModelFeatureList){
fullDecompMinMaxAdjustDF <- data.frame(Geography = names(olsmFullDecompList))
for(i in 1:length(olsmModelFeatureList$min_max_var$VariableName)){
var <- olsmModelFeatureList$min_max_var$VariableName[i]
if(as.character(olsmModelFeatureList$min_max_var$Min_Max_Adjustment[i]) == "Min"){
df <- data.frame(sapply(olsmFullDecompList, function(x) min(x[,names(x) %in% var], na.rm = T)),row.names = NULL)
}else if(as.character(olsmModelFeatureList$min_max_var$Min_Max_Adjustment[i]) == "Max"){
df <- data.frame(sapply(olsmFullDecompList, function(x) max(x[,names(x) %in% var], na.rm = T)),row.names = NULL)
}else if(as.character(olsmModelFeatureList$min_max_var$Min_Max_Adjustment[i]) == "Average"){
df <- data.frame(sapply(olsmFullDecompList, function(x) mean(x[,names(x) %in% var], na.rm = T)),row.names = NULL)
}
names(df) <- var
fullDecompMinMaxAdjustDF <- cbind(fullDecompMinMaxAdjustDF, df)
}
return(fullDecompMinMaxAdjustDF)
}
olsmMinMaxAdjust <- function(df, modelFeatureList){
min_max_var <- modelFeatureList$min_max_var
baseFullDecompMinMix <- modelFeatureList$fullDecompMinMaxAdjustDF
df$Geography <- as.character(df$Geography)
if(nrow(min_max_var)!= 0){
for(i in 1:nrow(min_max_var)){
if(any(grepl(min_max_var$VariableName[i],names(df)))){
varTmp <- df[,which(grepl(min_max_var$VariableName[i],names(df)))]
df[,which(grepl(min_max_var$VariableName[i],names(df)))] <- varTmp-baseFullDecompMinMix[baseFullDecompMinMix$Geography== unique(df$Geography),min_max_var$VariableName[i]]
if(modelFeatureList$hasIntercept == "Yes"){
df[,which(grepl("Intercept",names(df)))] <- df[,which(grepl("Intercept",names(df)))] + baseFullDecompMinMix[baseFullDecompMinMix$Geography== unique(df$Geography),min_max_var$VariableName[i]]
}
}
}
}
return(df)
}
olsmExtractFullDecompRolledUp <- function(olsmFulldecompUnRolledDf){
olsmFulldecompRolledDf <- olsmFulldecompUnRolledDf
olsmFulldecompRolledDf$Geography <- NULL
if(!is.Date(olsmFulldecompRolledDf$Period)){
olsmFulldecompRolledDf$Period <- lubridate::dmy(olsmFulldecompRolledDf$Period)
}
olsmFulldecompRolledDf <- aggregate(olsmFulldecompRolledDf[,-1],by = list(olsmFulldecompRolledDf$Period),sum)
names(olsmFulldecompRolledDf)[1] <- "Period"
return(olsmFulldecompRolledDf)
}
olsmExtractFullDecomp <- function(model, olsm_parametersDF, olsmModelData,modelFeatureList, olsm_RegDataTemp, transData){
olsmDenormbyDep <- function(olsmFulldecompDf, depAvg, ModelDf, modelFeatureList, olsm_parametersDF, denormType){
# This function will call if Depvar is Normalized.
# And IndepVar may or may not be normalized.
# IF IndepVar is Normalized so denormalized indepVar with its estimate and depAvg,
# otherwise denormalized indepVar with its estimate only.
# DepVar will denormalized by depAvg.
# IF Intercept is present then it will get denormalized by depAvg.
for (j in 1:length(olsmFulldecompDf)) {
#names(olsmFulldecompDf)
#j = 9
if(any(ModelDf$term == names(olsmFulldecompDf)[j])){
if(denormType == "Division"){
olsmFulldecompDf[,j] <- olsmFulldecompDf[,j]* ModelDf[which(ModelDf$term == names(olsmFulldecompDf)[j]),2]* depAvg
}else if(denormType == "Subtraction"){
olsmFulldecompDf[,j] <- olsmFulldecompDf[,j]* (ModelDf[which(ModelDf$term == names(olsmFulldecompDf)[j]),2]+ depAvg)
}else {
# indepVar is not normalized.
olsmFulldecompDf[,j] <- olsmFulldecompDf[,j]* ModelDf[which(ModelDf$term == names(olsmFulldecompDf)[j]),2]
}
}else if(names(olsmFulldecompDf)[j] == modelFeatureList$depVar){
if(denormType == "Division"){
olsmFulldecompDf[,j] <- olsmFulldecompDf[,j] * depAvg
}else if(denormType == "Subtraction"){
olsmFulldecompDf[,j] <- olsmFulldecompDf[,j] + depAvg
}
}
}
if(modelFeatureList$hasIntercept == "Yes"){
if(denormType == "Division"){
intercept <- ModelDf[grep("Intercept",ModelDf$term),2] * depAvg
}else if(denormType == "Subtraction"){
intercept <- ModelDf[grep("Intercept",ModelDf$term),2] + depAvg
}
olsmFulldecompDf <- cbind(olsmFulldecompDf, intercept)
names(olsmFulldecompDf)[length(names(olsmFulldecompDf))] <- "Intercept"
}
return(olsmFulldecompDf)
}
olsmDenormWithoutDep <- function(olsmFulldecompDf, ModelDf, modelFeatureList, olsm_parametersDF){
# This function will call if Depvar is not Normalized.
# And IndepVar may or may not be normalized, and It will get denormalized with its estimate only.
# IF Intercept is present then it will just add to data.
for (j in 1:length(olsmFulldecompDf)) {
if(any(ModelDf$term == names(olsmFulldecompDf)[j])){
olsmFulldecompDf[,j] <- olsmFulldecompDf[,j]* ModelDf[which(ModelDf$term == names(olsmFulldecompDf)[j]),2]
}
}
if(modelFeatureList$hasIntercept == "Yes"){
olsmFulldecompDf <- cbind(olsmFulldecompDf, ModelDf[grep("Intercept",ModelDf$term),2])
names(olsmFulldecompDf)[length(names(olsmFulldecompDf))] <- "Intercept"
}
return(olsmFulldecompDf)
}
modelDFList <- list()
if(modelFeatureList$mixedModelChioce == "Yes"){
ModelDf <- data.frame(term = rownames(t(coef(model))), t(coef(model)), row.names = NULL)
names(ModelDf)[-1] <- modelFeatureList$selectedGeos
modelDFList <- lapply(as.list(ModelDf[-1]), function(x){data.frame(ModelDf[1],estimate = x, stringsAsFactors = F)})
}else {
ModelDf <- tidy(model)
ModelDf <- ModelDf[,names(ModelDf) %in% c("term","estimate")]
for(geo in as.character(unique(olsmModelData$Geography))){
modelDFList[[geo]] <- list()
modelDFList[[geo]] <- ModelDf
}
}
olsmFullDecomp <- NULL
if(any(grepl("Combined",modelDFList[[1]]$term))){
if(modelFeatureList$mixedModelChioce == "Yes"){
geoTransData <- split(transData, transData$Geography)
modelDFList <- lapply(names(modelDFList), function(x){return(olsmSplitCombinedEstimateData(modelDFList[[x]], olsm_parametersDF, geoTransData[[x]]))})
names(modelDFList) <- names(geoTransData)
}else {
#combinedDf <- cbind(rep(paste0("Model_",1), times = nrow(modelDFList[[1]])),rep("No OutsideVar", times = nrow(modelDFList[[1]])),modelDFList[[1]])
combinedDf <- modelDFList[[1]]
combinedDf <- olsmSplitCombinedEstimateData(combinedDf, olsm_parametersDF, transData)
ModelDf <- data.frame(combinedDf[,which(names(combinedDf) %in% c("term","estimate"))],row.names = NULL)
for(geo in as.character(unique(olsmModelData$Geography))){
modelDFList[[geo]] <- list()
modelDFList[[geo]] <- ModelDf
}
}
}
if(any(grepl("Fixed",olsm_parametersDF$Type))){
if(modelFeatureList$mixedModelChioce == "Yes"){
if(modelFeatureList$FixedVarChoice == "All"){
fixedDf <- data.frame(olsm_parametersDF[grep("Fixed",olsm_parametersDF$Type),names(olsm_parametersDF) %in% c("VariableName","Fixed_Coefficient")],row.names = NULL)
names(fixedDf) <- c("term","estimate")
modelDFList <- lapply(modelDFList, function(x){return(data.frame(rbind(x,fixedDf), row.names = NULL))})
names(modelDFList) <- as.character(unique(olsmModelData$Geography))
}else if(modelFeatureList$FixedVarChoice == "Geo"){
fixedDfByGeo <- split(modelFeatureList$geoFixedEstimatesDF,modelFeatureList$geoFixedEstimatesDF$Geography)
fixedDfByGeo <- lapply(fixedDfByGeo, function(x){data.frame(term = x$VariableName, estimate = x$Fixed_Coefficient, row.names = NULL)})
modelDFList <- lapply(names(modelDFList), function(x){return(data.frame(rbind(modelDFList[[x]],fixedDfByGeo[[x]]), row.names = NULL))})
names(modelDFList) <- as.character(unique(olsmModelData$Geography))
}
}else {
if(modelFeatureList$FixedVarChoice == "All"){
fixedDf <- data.frame(olsm_parametersDF[grep("Fixed",olsm_parametersDF$Type),names(olsm_parametersDF) %in% c("VariableName","Fixed_Coefficient")],row.names = NULL)
names(fixedDf) <- names(modelDFList[[1]])
modelDFList <- lapply(modelDFList, function(x){return(rbind(x,fixedDf))})
}else if(modelFeatureList$FixedVarChoice == "Geo"){
fixedDfByGeo <- split(modelFeatureList$geoFixedEstimatesDF,modelFeatureList$geoFixedEstimatesDF$Geography)
fixedDf <- NULL
fixedDfByGeo <- lapply(fixedDfByGeo, function(x){
fixedDf <- data.frame(term = x$VariableName, estimate = x$Fixed_Coefficient, row.names = NULL)
names(fixedDf) <- names(modelDFList[[1]])
return(fixedDf)
})
modelDFList <- lapply(names(modelDFList), function(x){return(rbind(modelDFList[[x]],fixedDfByGeo[[x]]))})
names(modelDFList) <- as.character(unique(olsmModelData$Geography))
}
}
}
indepVar <- as.character(modelDFList[[1]]$term[!modelDFList[[1]]$term %in% modelFeatureList$depVar])
df <- olsm_RegDataTemp[which(dmy(olsm_RegDataTemp$Period) %in% dmy(modelFeatureList$modellingPeriod)),]
geoDepMean <- df[df$Geography %in% modelFeatureList$selectedGeos,names(df) %in% c("Geography", modelFeatureList$depVar)]
geoDepMean <- aggregate(geoDepMean[,which(names(geoDepMean) %in% modelFeatureList$depVar)], by = list(geoDepMean$Geography), FUN=mean)
names(geoDepMean) <- c("Geography", "DepMean")
olsmFulldecompDf <- olsmModelData[,names(olsmModelData)%in% c("Geography", "Period", modelFeatureList$depVar,indepVar)]
olsmFulldecompList <- split(olsmFulldecompDf, olsmFulldecompDf$Geography)
olsmFulldecompList <- lapply(olsmFulldecompList, function(x){if(nrow(x) == 0){return(NULL)}else {return(x)}})
olsmFulldecompList <- olsmFulldecompList[!sapply(olsmFulldecompList,is.null)]
if(olsm_parametersDF$Normalization[olsm_parametersDF$Type == "DepVar"] != "None"){
denormType <- as.character(olsm_parametersDF$Normalization[olsm_parametersDF$Type == "DepVar"])
olsmFulldecompList <- lapply(names(olsmFulldecompList), function(x){
#x <- names(olsmFulldecompList)[1]
return(olsmDenormbyDep(olsmFulldecompDf = olsmFulldecompList[[x]],
depAvg = geoDepMean[geoDepMean$Geography == x,"DepMean"],
ModelDf = modelDFList[[x]], modelFeatureList, olsm_parametersDF, denormType))
})
}else
if(olsm_parametersDF$Normalization[olsm_parametersDF$Type == "DepVar"] == "None"){
olsmFulldecompList <- lapply(names(olsmFulldecompList), function(x){
return(olsmDenormWithoutDep(olsmFulldecompList[[x]], modelDFList[[x]], modelFeatureList, olsm_parametersDF))
})
}
names(olsmFulldecompList) <- unique(olsmFulldecompDf$Geography)
return(olsmFulldecompList)
}
# Function to denorm actual vs Predcited of OLSM Model.
olsmDenormActvsPred <- function(modelParam, actPredData, olsm_parametersDF, olsm_RegDataTemp ){
actPredDenormList <- list()
actPredDataRolled <- actPredData
actPredDataRolled$Geography <- NULL
if(class(actPredDataRolled$Period) != "Date"){
actPredDataRolled$Period <- lubridate::dmy(actPredDataRolled$Period)
}
actPredDataRolled <- aggregate(actPredDataRolled[,-1],by = list(actPredDataRolled$Period),sum)
names(actPredDataRolled)[1] <- "Period"
actPredDenormList[["actPredDataUnRolled"]] <- actPredData
actPredDenormList[["actPredDataRolled"]] <- actPredDataRolled
if(olsm_parametersDF$Normalization[which(olsm_parametersDF$Type == "DepVar")] != "None"){
depVar <- olsm_parametersDF$VariableName[olsm_parametersDF$Type == "DepVar"]
df <- olsm_RegDataTemp[which(dmy(olsm_RegDataTemp$Period) %in% dmy(modelParam$modellingPeriod)),]
geoDepMean <- df[df$Geography %in% modelParam$selectedGeos,names(df) %in% c("Geography", depVar)]
geoDepMean <- aggregate(geoDepMean[,which(names(geoDepMean) %in% depVar)], by = list(geoDepMean$Geography), FUN=mean)
names(geoDepMean) <- c("Geography", "DepMean")
actPredList <- split(actPredData, actPredData$Geography)
actPredList <- lapply(actPredList, function(x){if(nrow(x) == 0){return(NULL)}else {return(x)}})
actPredList <- actPredList[!sapply(actPredList,is.null)]
for(i in 1:length(actPredList)){
# i = 1
depAvg <- geoDepMean$DepMean[geoDepMean$Geography == names(actPredList)[i]]
if(olsm_parametersDF$Normalization[which(olsm_parametersDF$Type == "DepVar")] == "Division"){
actPredList[[i]][,-c(1,2)] <- actPredList[[i]][,-c(1,2)] * depAvg
}else if(olsm_parametersDF$Normalization[which(olsm_parametersDF$Type == "DepVar")] == "Subtraction"){
if(modelParam$MixedModelChoice == "No"){
actPredList[[i]][,-c(1,2,5)] <- actPredList[[i]][,-c(1,2,5)] + depAvg
}else if(modelParam$MixedModelChoice == "Yes"){
actPredList[[i]][,!names(actPredList[[i]]) %in% c("Geography","Period", names(actPredList[[i]])[grep("Residuals",names(actPredList[[i]]))])] <- actPredList[[i]][,!names(actPredList[[i]]) %in% c("Geography","Period", names(actPredList[[i]])[grep("Residuals",names(actPredList[[i]]))])] + depAvg
}
}
}
actPredDataUnRolled <- as.data.frame(rbindlist(actPredList))
actPredDataRolled <- actPredDataUnRolled
actPredDataRolled$Geography <- NULL
if(class(actPredDataRolled$Period) != "Date"){
actPredDataRolled$Period <- lubridate::dmy(actPredDataRolled$Period)
}
actPredDataRolled <- aggregate(actPredDataRolled[,-1],by = list(actPredDataRolled$Period),sum)
names(actPredDataRolled)[1] <- "Period"
actPredDenormList[["actPredDataUnRolled"]] <- actPredDataUnRolled
actPredDenormList[["actPredDataRolled"]] <- actPredDataRolled
}
return(actPredDenormList)
}
olsmExtractAllModelData <- function(olsmAllModelList, olsm_parametersDF,hasIntercept, olsmFinalTransRegDf){
olsmModelDataList <- lapply(olsmAllModelList, function(x) as.data.frame(tidy(x)))
olsmModelDataDfFinal <- NULL
parColName <- names(olsm_parametersDF)[which(!names(olsm_parametersDF) %in% c("VariableName","Type" ))]
fixedVar <- olsm_parametersDF[which(olsm_parametersDF$Type %in% c("Fixed Var No Trans","Fixed Var TOF")),c("VariableName", "Fixed_Coefficient")]
for (i in 1:length(olsmModelDataList)) {
#i = 2
ModelDf <- as.data.frame(tidy(olsmAllModelList[[i]]))
# getting VIF of variable
VIF <- rbind(data.frame(term = "(Intercept)", VIF = NA), data.frame(term = names(rms::vif(olsmAllModelList[[i]])), VIF = rms::vif(olsmAllModelList[[i]]),row.names = NULL))
ModelDf <- merge(ModelDf, VIF, by = "term")
outsideVarfull <- ModelDf$term[gsub("_L+[0-9].*","",ModelDf$term) %in% olsm_parametersDF$VariableName[grep("Outside",olsm_parametersDF$Type)]]
outsideVar <- gsub("_L+[0-9].*","",outsideVarfull[!is.na(outsideVarfull)])
term <- list()
if(length(outsideVar)==0){
olsmModelDataDf <- cbind(rep(paste0("Model_",i), times = nrow(ModelDf)),rep("No OutsideVar", times = nrow(ModelDf)),ModelDf)
if(any(grepl("Combined",olsmModelDataDf$term))==TRUE){
names(olsmModelDataDf)[1:2] <- c("Model_Number","Outside_Variable")
olsmModelDataDf <- olsmSplitCombinedEstimateData(olsmModelDataDf, olsm_parametersDF, olsmFinalTransRegDf)
}
colnames(olsmModelDataDf) <- c("Model_Number", "Outside_Variable","Model Terms", "Estimate", "Std.Error", "Statistic","p.Value","VIF")
if(nrow(fixedVar)!=0){
fixedVarDf <- data.frame(rep(olsmModelDataDf$Model_Number[1],nrow(fixedVar)), olsmModelDataDf$Outside_Variable[1],fixedVar$VariableName, fixedVar$Fixed_Coefficient, NA, NA, NA, NA)
colnames(fixedVarDf) = c("Model_Number","Outside_Variable","Model Terms","Estimate","Std.Error","Statistic","p.Value","VIF")
olsmModelDataDf <- rbind(olsmModelDataDf, fixedVarDf)
}
orderModelTerm <- olsmModelDataDf$`Model Terms`
linearDecayVarPos <- grep("_L+[0-9].*", orderModelTerm)
orderTerm <- gsub("_L+[0-9].*","",orderModelTerm)
if(hasIntercept == "Yes"){
term[[orderModelTerm[1]]] <- olsm_parametersDF[which(olsm_parametersDF$Type == "DepVar"),]
term$`(Intercept)`[1] <- "(Intercept)"
term <- append(term,sapply(orderTerm[-1], function(x) term[[x]] <- olsm_parametersDF[which(olsm_parametersDF$VariableName == x),],simplify = FALSE))
}else {
term <- append(term,sapply(orderTerm, function(x) term[[x]] <- olsm_parametersDF[which(olsm_parametersDF$VariableName == x),],simplify = FALSE))
}
parDf <- as.data.frame(rbindlist(term))
parDf$VariableName[linearDecayVarPos] <- olsmModelDataDf$`Model Terms`[linearDecayVarPos]
olsmModelDataDf <- merge(olsmModelDataDf, parDf, by.x = "Model Terms", by.y = "VariableName",all.x = TRUE)
olsmModelDataDf[which(olsmModelDataDf$Type == "DepVar"),parColName] <- NA
olsmModelDataDf[which(olsmModelDataDf$Type %in% c("Fixed Var No Trans")),parColName[-which(parColName=="Fixed_Coefficient")]] <- NA
olsmModelDataDf[which(olsmModelDataDf$Type %in% c("Fixed Var TOF")),parColName[-which(parColName %in% c("Transformation","DecayMin","AlphaMin","BetaMin","Normalization","Fixed_Coefficient"))]] <- NA
olsmModelDataDf <- olsmModelDataDf[,c("Model_Number","Outside_Variable","Model Terms","Estimate","Std.Error","Statistic","p.Value","VIF",names(parDf)[-1])]
olsmModelDataDf <- olsmModelDataDf[match(orderModelTerm, olsmModelDataDf$`Model Terms`),]
# if(is.null(olsmModelDataDfFinal))
if(i > 1){
olsmModelDataDfFinal <- rbind(olsmModelDataDfFinal, olsmModelDataDf)
}else {
olsmModelDataDfFinal <- olsmModelDataDf
}
}else{
df <- cbind(rep(paste0("Model_",i), times = nrow(ModelDf)),rep(outsideVar, times = nrow(ModelDf)),ModelDf)
if(any(grepl("Combined",df$term))==TRUE){
names(olsmModelDataDf)[1:2] <- c("Model_Number","Outside_Variable")
df <- olsmSplitCombinedEstimateData(parameterDetails = df,parametersDf = olsm_parametersDF, transData = olsmFinalTransRegDf)
}
colnames(df) <- c("Model_Number", "Outside_Variable","Model Terms", "Estimate", "Std.Error", "Statistic","p.Value","VIF")
df$`Model Terms` <- gsub("_L+[0-9].*","",df$`Model Terms`)
if(nrow(fixedVar)!= 0){
fixedVarDf <- data.frame(rep(df$Model_Number[1],nrow(fixedVar)), df$Outside_Variable[1],fixedVar$VariableName, fixedVar$Fixed_Coefficient, NA, NA, NA,NA)
colnames(fixedVarDf) = c("Model_Number","Outside_Variable","Model Terms","Estimate","Std.Error","Statistic","p.Value","VIF")
df <- rbind(df, fixedVarDf)
}
orderTerm <- df$`Model Terms`
indvar <- orderTerm[!orderTerm %in% outsideVarfull]
if(hasIntercept == "Yes"){
term[[orderTerm[1]]] <- olsm_parametersDF[which(olsm_parametersDF$Type == "DepVar"),]
term$`(Intercept)`[1] <- "(Intercept)"
indvar <- indvar[-1]
}
term <- append(term,sapply(indvar, function(x) term[[x]] <- olsm_parametersDF[which(olsm_parametersDF$VariableName == x),],simplify = FALSE))
term <- append(term,sapply(outsideVar, function(x) term[[x]] <- olsm_parametersDF[which(olsm_parametersDF$VariableName == x),],simplify = FALSE))
parDf <- as.data.frame(rbindlist(term))
if(any(grepl("Dummy",names(term)))){
dummyDf <- as.data.frame(t(sapply(names(term)[grep("Dummy",names(term))], function(x) c(x,rep(NA,length(parDf)-1)))))
names(dummyDf) <- names(parDf)
parDf <- rbind(parDf,dummyDf)
}
df <- merge(df, parDf, by.x = "Model Terms", by.y = "VariableName")
df <- df[,c("Model_Number","Outside_Variable","Model Terms","Estimate","Std.Error","Statistic","p.Value","VIF",names(parDf)[-1])]
df$`Model Terms`[df$`Model Terms` %in% outsideVar] <- outsideVarfull
orderTerm[orderTerm == outsideVar] <- outsideVarfull
df <- df[match(orderTerm, df$`Model Terms`),]
df[which(df$Type == "DepVar"),parColName] <- NA
df[which(df$Type %in% c("Fixed Var No Trans")),parColName[-which(parColName=="Fixed_Coefficient")]] <- NA
olsmModelDataDfFinal <- rbind(olsmModelDataDfFinal, df)
}
}
# Separate Columns for Outside variables
olsmModelDataDfFinal$S.No <- seq(1:nrow(olsmModelDataDfFinal))
olsm_Outside_Data <- olsmModelDataDfFinal[which(olsmModelDataDfFinal$Type == "Outside TOF"),]
termData <- data.frame()
for(term in olsm_Outside_Data$`Model Terms`){
# first extracting "L\\d" and than extracting "\\d"= digit and assigning it as Lag .
Outside_Lag <- as.numeric(str_extract(stringr::str_extract(term,"L\\d+"),pattern = "\\d.*"))
# first extracting "D\\d\\.\\d+" -> D followed by decimal and than extracting "\\d.*"= digit and assigning it as Decay(+ is to take more than one digits).
Outside_Decay <- as.numeric(str_extract(stringr::str_extract(term,"D\\d\\.\\d+"),pattern = "\\d.*"))
# first extracting "A\\d\\.\\d+" -> A followed by decimal and than extracting "\\d.*"= digit and assigning it as alpha.
Outside_Alpha <- as.numeric(str_extract(stringr::str_extract(term,"A\\d\\.\\d+"),pattern = "\\d.*"))
if(is.na(Outside_Alpha)){
Outside_Alpha <- as.numeric(str_extract(stringr::str_extract(term,"P\\d\\.\\d+"),pattern = "\\d.*"))
}
# first extracting "B\\d\\.\\d+" -> B followed by decimal and than extracting "\\d.*"= digit and assigning it as beta.
Outside_Beta <- as.numeric(str_extract(stringr::str_extract(term,"B\\d\\.\\d+"),pattern = "\\d.*"))
if(is.na(Outside_Beta)){
Outside_Beta <- as.numeric(str_extract(stringr::str_extract(term,"B\\d+"),pattern = "\\d.*"))
}
if(length(nrow(termData)) == 0){
termData <- data.frame(Outside_Lag=Outside_Lag,Outside_Alpha=Outside_Alpha,Outside_Beta=Outside_Beta,Outside_Decay=Outside_Decay)
} else{
# making dataframe with variable ,lag, decay, alpha, beta and rbinding it to termData
termData <- rbind(termData,data.frame(Outside_Lag=Outside_Lag,Outside_Alpha=Outside_Alpha,Outside_Beta=Outside_Beta,Outside_Decay=Outside_Decay))
}
}
if(nrow(olsm_Outside_Data)!=0){
olsm_toMerge_OutsideData <- data.frame(cbind(S.No = olsm_Outside_Data$S.No,termData),stringsAsFactors = T)
olsmModelDataDfFinal <- merge(olsmModelDataDfFinal,olsm_toMerge_OutsideData,by = "S.No",all = T)
olsmModelDataDfFinal$S.No <- NULL
olsmModelDataDfFinal$`Model Terms` <- gsub(pattern = "\\_\\L\\d.*$","",olsmModelDataDfFinal$`Model Terms`)
olsmModelDataDfFinal <- olsmModelDataDfFinal[,c("Model_Number","Outside_Variable","Model Terms", "Estimate","Std.Error","Statistic","p.Value","VIF", "Type" ,"Transformation","Decay","Outside_Lag","Outside_Alpha","Outside_Beta","Outside_Decay","LagMin" ,"LagMax","DecaySteps","DecayMin","DecayMax","AlphaSteps","AlphaMin","AlphaMax","BetaMin","BetaMultiplier","BetaSteps","SeriesMax","Normalization","Min_Max_Adjustment","Fixed_Coefficient","Combined_Column","Random_Effect")]
}
return(olsmModelDataDfFinal)
}
##################### Function related MMM Modelling ##########################
# function to transform and normalized the data by geography.
olsmGetTransformData <- function(olsm_SplitByGeoList,olsm_parametersDF,modelFeatureList){
olsm_splitByGeoSubset <- olsm_SplitByGeoList[which(names(olsm_SplitByGeoList) %in% modelFeatureList$selectedGeos)]
splitDfList <- olsm_splitByGeoSubset
olsmFinalTransRegList <- NULL
splitDf <- NULL
for (name in names(splitDfList)) {
# name <- names(splitDfList)[1]
#print(name)
modelFeatureList[["TransGeo"]] <- name
dfdata <- createOlsmTransformation(olsm_RegDataModelList = splitDfList, olsm_parametersDF = olsm_parametersDF,modelFeatureList = modelFeatureList)
dfdata <- data.frame(dfdata,stringsAsFactors = FALSE)
dfdata <- dfdata[dmy(dfdata$Period) %in% dmy(modelFeatureList$modellingPeriod),]
olsmFinalTransRegList[[name]] <- dfdata
dfdata <- createOlsmNormalization(olsmFinalRegDf = dfdata,olsm_parametersDF)
splitDf[[name]]<- dfdata
}
olsmTransDFList <- NULL
olsmTransDFList[["olsmFinalTransRegDf"]] <- as.data.frame(rbindlist(olsmFinalTransRegList))
olsmTransDFList[["olsmFinalNormRegDf"]] <- as.data.frame(rbindlist(splitDf))
return(olsmTransDFList)
}
# function to call for Modelling
olsmGenerateModel <- function(olsm_RegDataTemp,olsm_parametersDF,olsm_SplitByGeoList,modelFeatureList,type){
olsmTransDFList <- olsmGetTransformData(olsm_SplitByGeoList,olsm_parametersDF,modelFeatureList)
olsmFinalTransRegDf <- olsmTransDFList$olsmFinalTransRegDf
olsmFinalNormRegDf <- olsmTransDFList$olsmFinalNormRegDf
olsmFinalRegDf <- olsmGetFixedEffectDF(olsmFinalRegDf = olsmFinalNormRegDf, olsm_parametersDF, modelFeatureList)
olsmFinalRegDf <- olsmCreateCombinedColumn(df = olsmFinalRegDf, combinedCol = olsm_parametersDF[olsm_parametersDF$Type != "Not in Model",c("VariableName","Combined_Column")])
modelParamList <- list()
if(type == "OLS"){
# OLS stacked modelling
formulaList <- olsmBuildFormula(olsmFinalRegDf, olsm_parametersDF,modelFeatureList$hasIntercept, mixed = FALSE)
modelParamList <- c("OLS", list(formulaList))
names(modelParamList) <- c("type", "formulaList")
olsmAllModelList <- olsmAllPossibleRegressions(modelParamList,olsmFinalRegDf)
olsmModelResult <- olsmExtractModelParameter(olsmAllModelList, olsm_parametersDF)
}else if(type == "WLS"){
depVar <- olsm_parametersDF$VariableName[olsm_parametersDF$Type == "DepVar"]
regDF <- olsm_RegDataTemp[dmy(olsm_RegDataTemp$Period) %in% dmy(modelFeatureList$modellingPeriod),]
geoMean <- aggregate(regDF[, which(names(regDF) %in% depVar)], list(regDF$Geography), mean)
olsmFinalRegDf <- merge(olsmFinalRegDf, geoMean, by.x = "Geography", by.y = "Group.1")
colnames(olsmFinalRegDf)[names(olsmFinalRegDf)%in% "x"] <- "weight"
formulaList <- olsmBuildFormula(olsmFinalRegDf, olsm_parametersDF,modelFeatureList$hasIntercept, mixed = FALSE)
modelParamList <- c("WLS", list(formulaList))
names(modelParamList) <- c("type", "formulaList")
olsmAllModelList <- olsmAllPossibleRegressions(modelParamList,olsmFinalRegDf)
olsmModelResult <- olsmExtractModelParameter(olsmAllModelList, olsm_parametersDF)
}else if(type == "Mixed"){
# Mixed Modelling
formulaList <- olsmBuildFormula(olsmFinalRegDf, olsm_parametersDF,modelFeatureList$hasIntercept, mixed = TRUE)
# Formula Building for Mixed Model
randomVar <- olsm_parametersDF$VariableName[olsm_parametersDF$VariableName != olsm_parametersDF$VariableName[olsm_parametersDF$Type == "DepVar"] & olsm_parametersDF$Random_Effect == 1 & olsm_parametersDF$Type != "Not in Model"]
combinedVar <- unique(olsm_parametersDF$Combined_Column[olsm_parametersDF$Type != "Not in Model" & olsm_parametersDF$Random_Effect == 1 ])
if(any(combinedVar==0)){
combinedVar <- combinedVar[combinedVar!=0]
}
if(length(combinedVar)!= 0){
randomVar <- c(randomVar,paste0("Combined_",combinedVar))
}
olsm_varTypeDf <- olsm_parametersDF[,c("VariableName","Type","Combined_Column")]
uniqueCombValue <- plyr::count(as.factor(olsm_varTypeDf$Combined_Column))
combinedColumns <- as.character(olsm_varTypeDf[-c(which(olsm_varTypeDf$Combined_Column==uniqueCombValue[which(uniqueCombValue$freq <= 1),1]),which(olsm_varTypeDf$Combined_Column==uniqueCombValue[which(uniqueCombValue$x == 0),1])),1])
randomVar <- randomVar[!randomVar %in% combinedColumns]
randomVar <- paste0(randomVar,collapse = "+")
if(modelFeatureList$hasIntercept=="No"){
randomVar <- paste0("0 + ",randomVar)
}else if(modelFeatureList$hasIntercept=="Yes"){
randomVar <- paste0("1 + ",randomVar)
}
modelParamList <- c("Mixed", formulaList, randomVar)
names(modelParamList) <- c("type", "formulaList", "randomVar")
if(modelFeatureList$wLSChoice == "No"){
modelParamList[["weight"]] <- FALSE
olsmAllModelList <- olsmAllPossibleRegressions(modelParamList,olsmFinalRegDf)
}else if(modelFeatureList$wLSChoice == "Yes"){
modelParamList[["weight"]] <- TRUE
modelParamList[["depVar"]] <- olsm_parametersDF$VariableName[olsm_parametersDF$Type == "DepVar"]
geoMean <- aggregate(olsmFinalTransRegDf[, which(names(olsmFinalTransRegDf) %in% modelParamList$depVar)], list(olsmFinalTransRegDf$Geography), mean)
olsmFinalRegDf <- merge(olsmFinalRegDf, geoMean, by.x = "Geography", by.y = "Group.1")
colnames(olsmFinalRegDf)[names(olsmFinalRegDf)%in% "x"] <- "Geoweight"
olsmAllModelList <- olsmAllPossibleRegressions(modelParamList,olsmFinalRegDf)
}
}
olsmResult <- NULL
olsmResult[["olsmFinalTransRegDf"]] <- olsmFinalTransRegDf
olsmResult[["olsmFinalNormRegDf"]] <- olsmFinalNormRegDf
olsmResult[["olsmFinalRegDf"]] <- olsmFinalRegDf
olsmResult[["olsmAllModelList"]] <- olsmAllModelList
olsmResult[["formulaList"]] <- formulaList
if(type != "Mixed"){
olsmResult[["olsmModelResult"]] <- olsmModelResult
}
return(olsmResult)
}
# function to call for Modelling
olsmBuildFormula <- function(olsmFinalRegDf, olsm_parametersDF, hasIntercept, mixed){
varName <- names(olsmFinalRegDf)
olsm_varTypeDf <- olsm_parametersDF[,c("VariableName","Type","Combined_Column")]
uniqueCombValue <- plyr::count(as.factor(olsm_varTypeDf$Combined_Column))
combinedColumns <- olsm_varTypeDf[-c(which(olsm_varTypeDf$Combined_Column==uniqueCombValue[which(uniqueCombValue$freq <= 1),1]),which(olsm_varTypeDf$Combined_Column==uniqueCombValue[which(uniqueCombValue$x == 0),1])),]
formulaList <- list()
depVar <- as.character(olsm_parametersDF$VariableName[which(olsm_parametersDF$Type == "DepVar")])
baseFormula <- paste0(depVar," ~ ")
# generating formula without intercept.
if(hasIntercept=="No"){
baseFormula <- paste0(depVar," ~ ","0 +")
}
IndepVariable <- olsm_varTypeDf$VariableName[which(olsm_varTypeDf$Type %in% c("Manual No Trans","Manual TOF"))]
if(nrow(combinedColumns)!=0){
IndepVariable <- IndepVariable[-which(IndepVariable %in% combinedColumns$VariableName)]
IndepVariable <- c(IndepVariable, names(combinedColumnsList))
}
firstFormula <- paste0(paste0(baseFormula,paste(IndepVariable[-length(IndepVariable)],"+",collapse = " " ),collapse = " ")," ",IndepVariable[length(IndepVariable)])
formulaList[[1]]<- firstFormula
baseFormula <- paste0(baseFormula,paste(IndepVariable,"+",collapse = " " ),collapse = " ")
linearDecayList <- NULL
outsideLinear <- olsm_varTypeDf$VariableName[which(olsm_varTypeDf$Type == "Outside No Trans")]
outsideTOF <- olsm_varTypeDf$VariableName[which(olsm_varTypeDf$Type == "Outside TOF")]
if(any(grepl("Outside",olsm_parametersDF$Type))){
if(length(outsideLinear)>0){
for (i in 1:length(outsideLinear)) {
formulaCount <- length(formulaList)
formulaList[[formulaCount+1]] <- paste0(baseFormula," ",outsideLinear[i])
}
}
if(length(outsideTOF)>0){
for (i in 1:length(outsideTOF)) {
outsideTOFVar <- outsideTOF[i]
varTOF <- varName[gsub("_L+[0-9].*","",varName) == outsideTOFVar]
for (j in 1:length(varTOF)) {
formulaCount <- length(formulaList)
formulaList[[formulaCount+1]] <- paste0(baseFormula," ",varTOF[j])
}
}
}
}
return(formulaList)
}
# function for modelling the data
olsmAllPossibleRegressions <- function(modelParamList,olsmFinalRegDf){
if(modelParamList$type == "OLS"){
olsmFinalRegDf <- as.data.frame(lapply(olsmFinalRegDf, function(x) as.numeric(as.character(x))))
olsmModelsResults <- lapply(modelParamList$formulaList,function(x, data) lm(x, data=olsmFinalRegDf,na.action = na.exclude),data=olsmFinalRegDf)
}else
if(modelParamList$type == "WLS"){
if(any(names(olsmFinalRegDf) %in% c("Geography","Period"))){
modelScopeDfFinal <- olsmFinalRegDf[,-which(names(olsmFinalRegDf) %in% c("Geography","Period"))]
}else{
modelScopeDfFinal <- olsmFinalRegDf
}
modelScopeDfFinal <- as.data.frame(lapply(modelScopeDfFinal, function(x) as.numeric(as.character(x))))
olsmModelsResults <- lapply(modelParamList$formulaList,function(x, data) lm(x, data=modelScopeDfFinal, weights = weight,na.action = na.exclude),data=modelScopeDfFinal)
}else
if(modelParamList$type == "Mixed"){
# function to generate Mixed model and remove sign flipage iteratively.
randModelFunction <- function(fixedFormula,randFormula,nlmeData, modelParamList){
if(modelParamList$weight == FALSE){
mixedeffectmodel <- lme(fixed = as.formula(fixedFormula),
random = list(Geography = pdDiag(randFormula)),data = nlmeData, method = "REML", correlation = NULL,weights = NULL,contrasts = NULL,control = lmeControl(maxIter = 500000,msMaxIter = 500000,tolerance = 1e-6,niterEM = 25,msMaxEval = 200,msTol = 1e-12,msVerbose = F,returnObject = TRUE,gradHess = TRUE,apVar = TRUE,minAbsParApVar = 0.05,opt = "nlminb",optimMethod = "BFGS"))
}else if(modelParamList$weight == TRUE){
#varFixed(value = eval(parse(text = paste0("~ 1/Geoweight"))))
mixedeffectmodel <- lme(fixed = as.formula(fixedFormula),random = list(Geography = pdDiag(randFormula)),data = nlmeData, method = "REML", correlation = NULL,weights = varFixed(value = ~ 1/Geoweight),contrasts = NULL,
control = lmeControl(maxIter = 500000,msMaxIter = 500000,tolerance = 1e-6,niterEM = 25,msMaxEval = 200,msTol = 1e-12,msVerbose = F,returnObject = TRUE,gradHess = TRUE,apVar = TRUE,minAbsParApVar = 0.05,opt = "nlminb",optimMethod = "BFGS"))
}
estimateDf <- as.data.frame.list(coef(mixedeffectmodel))
fixedDf <- fixef(mixedeffectmodel)
if(any(grepl("Intercept", names(estimateDf)))){
names(estimateDf)[1] <- "Intercept"
names(fixedDf)[1] <- "Intercept"
}
estimateDfRatio <- NULL
for(name in names(fixedDf)){
estimateDfRatio <- estimateDf[,name]* fixedDf[name]
estimateDf[,name] <- estimateDfRatio
}
getcolnameList <- as.list(NULL)
getcolnameList <- apply(estimateDf,2,function(x){
if(!any(x < 0)){
return(NULL)
}else{
as.vector(which(x < 0))
}
})
if(any(grepl("Intercept",names(getcolnameList)))){
getcolnameList[[grep("Intercept",names(getcolnameList))]] <- NULL
}
if(any(unlist(lapply(getcolnameList,FUN = function(x) length(x))))){
print("Flipped")
splitData_geography <- split(nlmeData,nlmeData[,"Geography"])
for(name in names(getcolnameList)){
if(length(getcolnameList[[name]]) != 0){
for(i in 1:length(getcolnameList[[name]])){
splitData_geography[[getcolnameList[[name]][i]]][,name] <- 0
}
}
}
nlmeData <- as.data.frame(rbindlist(splitData_geography,fill = T))
randModelFunction(fixedFormula,randFormula,nlmeData, modelParamList)
}else{
return(mixedeffectmodel)
}
}
modelScopeDfFinal <- olsmFinalRegDf[,-which(names(olsmFinalRegDf) %in% c("Period"))]
modelScopeDfFinal[,-which(names(modelScopeDfFinal) %in% c("Geography"))] <- as.data.frame(lapply(modelScopeDfFinal[,-which(names(modelScopeDfFinal) %in% c("Geography"))], function(x) as.numeric(as.character(x))))
# getting random part of lme formula.
randFormula <- eval(parse(text = as.character(paste0("~ ",modelParamList$randomVar))))
# checking sign flipage in Mixed Model.
olsmModelsResults <- lapply(modelParamList$formulaList, function(x,randFormula, data, modelParamList) randModelFunction(fixedFormula = x,randFormula = randFormula,nlmeData = data, modelParamList = modelParamList), randFormula = randFormula, data = modelScopeDfFinal, modelParamList = modelParamList)
}
return(olsmModelsResults)
}
# Generate Dummy Model
olsmGetDummyModelResult <- function(olsmAllModelList, olsmModelResult,olsmModelScopeDummyTable, finalDf, olsm_parametersDF,dummyModelProp){
if(grepl("Dummy",olsmModelResult$Model_No[as.numeric(dummyModelProp$olsm.model.index)])){
baseModel <- as.numeric(gsub("Model_|_Dummy_+[0-9]*","",olsmModelResult$Model_No[as.numeric(dummyModelProp$olsm.model.index)]))
model <- olsmAllModelList[[baseModel]]
}else {
model <- olsmAllModelList[[as.numeric(dummyModelProp$olsm.model.index)]]
}
# Dummy model Data
dummyModelData <- data.frame(finalDf$Geography,lubridate::dmy(finalDf$Period),model$model)
if(any(grepl("weights",names(dummyModelData)))){
names(dummyModelData) <- c("Geography","Period",names(model$model)[-length(model$model)],"weight")
}else{
names(dummyModelData) <- c("Geography","Period",names(model$model))
}
dummyModelData <- subset(dummyModelData, Period >= min(olsmModelScopeDummyTable$Period) & Period <= max(olsmModelScopeDummyTable$Period))
dummyDFTable <- as.data.frame(olsmModelScopeDummyTable[,which(names(olsmModelScopeDummyTable) %in% names(which(apply(olsmModelScopeDummyTable[,!names(olsmModelScopeDummyTable) %in% c("Geography","Period")],2,sum)!=0)))])
names(dummyDFTable) <- names(which(apply(olsmModelScopeDummyTable[,!names(olsmModelScopeDummyTable) %in% c("Geography","Period")],2,sum)!=0))
dummyModelData <- cbind(dummyModelData,dummyDFTable)
dummyModelData <- dummyModelData[,!names(dummyModelData) %in% c("Geography","Period")]
depVar <- olsm_parametersDF$VariableName[olsm_parametersDF$Type == "DepVar"]
if(dummyModelProp$WLSChoice == "No"){
indepVar <- names(dummyModelData)[!names(dummyModelData) %in% depVar]
}else if(dummyModelProp$WLSChoice == "Yes"){
indepVar <- names(dummyModelData)[!names(dummyModelData) %in% c(depVar,"weight")]
}
if(dummyModelProp$hasIntercept == "No"){
baseFormula <- paste0(depVar," ~ ", paste0(c(indepVar,0),collapse = "+"))
}else if(dummyModelProp$hasIntercept == "Yes"){
baseFormula <- paste0(depVar," ~ ", paste0(indepVar,collapse = "+"))
}
dummyFormula <- list()
dummyFormula[[1]] <- baseFormula
modelParamList <- list()
if(dummyModelProp$WLSChoice == "No"){
# Stacked OLS Model
modelParamList <- c("OLS", as.list(dummyFormula))
names(modelParamList) <- c("type", "formulaList")
modelDummy <- olsmAllPossibleRegressions(modelParamList,olsmFinalRegDf = dummyModelData)
}else if(dummyModelProp$WLSChoice == "Yes"){
# Stacked WLS Model
modelParamList <- c("WLS", dummyFormula)
names(modelParamList) <- c("type", "formulaList")
modelDummy <- olsmAllPossibleRegressions(modelParamList,dummyModelData)
}
return(modelDummy)
}
#Creating Respose curve Data
CreateResponseCurve <- function(ResponseVariables,ResponseSteps,ResponseMaxLimit,MEUserProjectList){
ResponseCurveDataList <- list()
#Response curve series
ResponseCurveSeries <- data.frame(ResponseSeries = seq(0,ResponseMaxLimit,ResponseSteps))
#Parameter taking from Global variable
Parameters <- data.frame(MEUserProjectList$olsm_parametersDF[which(MEUserProjectList$olsm_parametersDF$VariableName %in% ResponseVariables),c("VariableName","Transformation","BetaMin","AlphaMin","SeriesMax")],row.names = NULL)
ResponseTransformation <- data.frame(ResponseCurveSeries)
if(MEUserProjectList$olsmModelFeatureList$ScurveSeriesMaxChoice == "Geo"){
seriesMaxGeoDf <- MEUserProjectList$olsmModelFeatureList$ScurveSeriesMaxDF
if(any(seriesMaxGeoDf$VariableName == ResponseVariables)){
Parameters$SeriesMax <- mean(seriesMaxGeoDf$SeriesMax[seriesMaxGeoDf$VariableName == ResponseVariables],na.rm = T)
}
}
for(name in 1:nrow(Parameters)){
# name <- 1
if(Parameters[name,"Transformation"] == "S-Curve"){
tempData <- data.frame(TransformedData = (Parameters[,"BetaMin"][name]/(10^10))^(Parameters[,"AlphaMin"][name]^((ResponseCurveSeries/(max(MEUserProjectList$meRawData[Parameters[name,1]],na.rm = T)*Parameters[,"SeriesMax"][name]))*100)),stringsAsFactors = F)
}else if(Parameters[name,"Transformation"] == "S-origin"){
tempData <- data.frame(TransformedData = (Parameters[,"BetaMin"][name]/(10^9))^(Parameters[,"AlphaMin"][name]^((ResponseCurveSeries/(max(MEUserProjectList$meRawData[Parameters[name,1]],na.rm = T)*Parameters[,"SeriesMax"][name]))*100)) - (Parameters[,"BetaMin"][name]/(10^9)),stringsAsFactors = F)
}else if(Parameters[name,"Transformation"] == "Power"){
tempData <- data.frame(TransformedData = ResponseCurveSeries^Parameters[,"AlphaMin"][name],stringsAsFactors = F
)
}
## BreakThrough & Saturation Points.
FirstDerivatives <- data.frame(Parameters[,"AlphaMin"][name]^((ResponseCurveSeries/(max(MEUserProjectList$meRawData[Parameters[name,1]],na.rm = T)*Parameters[,"SeriesMax"][name]))*100)*tempData*log(Parameters[,"AlphaMin"][name])*log(Parameters[,"BetaMin"][name]/(10^10)))
# 2nd Derivatives
SecondDerivatives <- FirstDerivatives*log(Parameters[,"AlphaMin"][name]) + Parameters[,"AlphaMin"][name]^(2*((ResponseCurveSeries/(max(MEUserProjectList$meRawData[Parameters[name,1]],na.rm = T)*Parameters[,"SeriesMax"][name]))*100))*tempData*(log(Parameters[,"AlphaMin"][name])^2)*(log((Parameters[,"BetaMin"][name]/(10^10)))^2)
# 3rd Derivatives
ThirdDerivatives <- FirstDerivatives*log(Parameters[,"AlphaMin"][name])^2+3*Parameters[,"AlphaMin"][name]^(2*((ResponseCurveSeries/(max(MEUserProjectList$meRawData[Parameters[name,1]],na.rm = T)*Parameters[,"SeriesMax"][name]))*100))*tempData*log(Parameters[,"AlphaMin"][name])^3*log((Parameters[,"BetaMin"][name]/(10^10)))^2+Parameters[,"AlphaMin"][name]^(3*((ResponseCurveSeries/(max(MEUserProjectList$meRawData[Parameters[name,1]],na.rm = T)*Parameters[,"SeriesMax"][name]))*100))*tempData*log(Parameters[,"AlphaMin"][name])^3*log((Parameters[,"BetaMin"][name]/(10^10)))^3
BreakThroughPoint <- ((ResponseCurveSeries/(max(MEUserProjectList$meRawData[Parameters[name,1]],na.rm = T)*Parameters[,"SeriesMax"][name]))*100)[match(max(SecondDerivatives[,1],na.rm = T),SecondDerivatives[,1]),]
SaturationPoint <- ((ResponseCurveSeries/(max(MEUserProjectList$meRawData[Parameters[name,1]],na.rm = T)*Parameters[,"SeriesMax"][name]))*100)[match(max(ThirdDerivatives[match(max(SecondDerivatives[,1],na.rm = T),SecondDerivatives[,1]):nrow(ThirdDerivatives),1],na.rm = T),ThirdDerivatives[,1]),]
FullSaturationPoint <- ((ResponseCurveSeries/(max(MEUserProjectList$meRawData[Parameters[name,1]],na.rm = T)*Parameters[,"SeriesMax"][name]))*100)[max(which(tempData$ResponseSeries <= 0.98)),]
# BP :- BreakThroughPOint,
# SP :- SaturationPOint
# FSP :- Full Saturation Point
BP_XValue <- BreakThroughPoint*((as.numeric(max(MEUserProjectList$meRawData[Parameters[name,1]],na.rm = T))*Parameters[,"SeriesMax"][name])/100)
SP_XValue <- SaturationPoint*((max(MEUserProjectList$meRawData[Parameters[name,1]],na.rm = T)*Parameters[,"SeriesMax"][name])/100)
FSP_XValue <- FullSaturationPoint*((max(MEUserProjectList$meRawData[Parameters[name,1]],na.rm = T)*Parameters[,"SeriesMax"][name])/100)
## X-Axis for BreakThrough & Saturation Point Calculation .
XAxis <- ResponseCurveSeries
# XAveragePercent <- ((ResponseCurveSeries/(maxSeries*Parameters[,"SeriesMax"][name]))*100)
XAveragePercent <- ((ResponseCurveSeries/(max(MEUserProjectList$meRawData[Parameters[name,1]],na.rm = T)*Parameters[,"SeriesMax"][name]))*100)
ResponseTransformation <- cbind(ResponseTransformation,XAveragePercent,tempData)
names(ResponseTransformation)[length(names(ResponseTransformation))] <- Parameters[name,"VariableName"]
names(ResponseTransformation)[c(1,2)] <- c("ResponseCurveSeries","XAveragePercent")
}
ResponseCurvePlotData <- melt(ResponseTransformation[,!names(ResponseTransformation) %in% "ResponseCurveSeries"],id.vars = c("XAveragePercent"))
### BreakThrough
Break_SaturationPlotData <- melt(ResponseTransformation[,!names(ResponseTransformation) %in% "XAveragePercent"],id.vars = c("ResponseCurveSeries"))
ResponseCurveDataList[["BP_XValue"]] <- BP_XValue
ResponseCurveDataList[["SP_XValue"]] <- SP_XValue
ResponseCurveDataList[["FSP_XValue"]] <- FSP_XValue
ResponseCurveDataList[["XAxis"]] <- XAxis
ResponseCurveDataList[["ResponseCurvePlotData"]] <- ResponseCurvePlotData
ResponseCurveDataList[["Break_SaturationPlotData"]] <- Break_SaturationPlotData
#returns list of dataframe/s and plot/s
return(ResponseCurveDataList)
}
################################################################################
##################### ML_Workbench functions ###########################
################################################################################
# BMA Modelling
ML_GetBMAModel <- function(ModelData){
set.seed(100)
data <- ModelData
data<-Filter(function(x) sd(x) != 0, data)
errorflag<-F
BMAModel <- NA
while(errorflag == F){
tryCatch({
BMAModel <- BMS::bms(data, mprior = "random", g="UIP", user.int=F)
errorflag<-T
},error=function(e){
},finally={})
}
return(BMAModel)
}
# ML OLS Modelling
ML_GetOLSModel <- function(ML_ModelParameterList,varSelectionGrid,ML_ModelDF,norm){
olsParameterList <- list()
olsParameterList[["formulaList"]][[1]] <- as.formula(paste0(ML_ModelParameterList$Dependent,"~ ",paste0(varSelectionGrid$Variable[varSelectionGrid$InModel == TRUE],collapse = "+ ")))
olsParameterList[["type"]] <- "OLS"
modelVar <- c(ML_ModelParameterList[["Dependent"]], as.character(varSelectionGrid[varSelectionGrid$InModel == TRUE,1]))
olsModelData <- ML_ModelDF[,modelVar]
if(norm == TRUE){
olsModelData<-as.data.frame(lapply(olsModelData, function(x) round((x)/(mean(x)), 5)))
}
trainPt <- floor(nrow(olsModelData)*(as.numeric(ML_ModelParameterList[["ML_TrainPt"]])/100))
testPt <- floor(nrow(olsModelData)*(as.numeric(ML_ModelParameterList[["ML_TestPt"]])/100))
training<-olsModelData[1:trainPt,]
testing<-olsModelData[(nrow(olsModelData)-testPt+1):nrow(olsModelData),]
OLSModelList <- list()
model <- olsmAllPossibleRegressions(modelParamList = olsParameterList,training)
Elasticity <- unlist(lapply(names(model[[1]]$coefficients)[-1], function(x){
y <- pdp::partial(model[[1]],pred.var=x,plot=F,returngrid =TRUE, rug=TRUE,train = training,type="regression")
midPoint <- floor(nrow(y)/2)
elasticity <- (y[,2]/y[midPoint,2]-1)/(y[,1]/y[midPoint,1]-1)
elasticity[is.nan(elasticity)] <- 0
finalAvgEl <- mean(elasticity)
}))
OLSModelList[["ExternalModel"]] <- model[[1]]
OLSModelList[["training"]] <- training
OLSModelList[["testing"]] <- testing
OLSModelList[["Elasticity"]] <- data.frame(Variable = names(model[[1]]$coefficients)[-1], OLS_Elasticity = Elasticity,row.names = NULL,stringsAsFactors = F)
return(OLSModelList)
}
# Bayesian Modelling using Rstan
ML_GetBayesianModel <- function(ML_ModelParameterList,varSelectionGrid,ML_ModelDF,norm){
set.seed(101)
modelVar <- c(ML_ModelParameterList[["Dependent"]], as.character(varSelectionGrid[varSelectionGrid$InModel == TRUE,1]))
bayesModelData <- ML_ModelDF[,modelVar]
names(bayesModelData)[1]<- "Dependant"
if(norm == TRUE){
bayesModelData<-as.data.frame(lapply(bayesModelData, function(x) round((x)/(mean(x)), 5)))
}
ModelConstraintsDF <- varSelectionGrid[,c("Variable","Expected_PostMeanSign")]
PosIndex <- which(ModelConstraintsDF$Variable %in% modelVar & ModelConstraintsDF[,2]== 1)
NegIndex <- which(ModelConstraintsDF$Variable %in% modelVar & ModelConstraintsDF[,2]== -1)
monotonic_constraints <- as.numeric(rep(1,times = 1+length(PosIndex)+length(NegIndex)))
monotonic_constraints[NegIndex] <- -1
monotonic_constraints <- monotonic_constraints[-1]
trainPt <- floor(nrow(bayesModelData)*(as.numeric(ML_ModelParameterList[["ML_TrainPt"]])/100))
testPt <- floor(nrow(bayesModelData)*(as.numeric(ML_ModelParameterList[["ML_TestPt"]])/100))
training<-bayesModelData[1:trainPt,]
testing<-bayesModelData[(nrow(bayesModelData)-testPt+1):nrow(bayesModelData),]
bayesModel <-brm(Dependant ~ .,
data = training,
iter = as.numeric(ML_ModelParameterList[["BayesianParameter"]][["ML_BayesIterNO"]]),
chains = as.numeric(ML_ModelParameterList[["BayesianParameter"]][["ML_BayesChains"]]),
control = list(adapt_delta = as.numeric(ML_ModelParameterList[["BayesianParameter"]][["ML_BayesAdaptDelta"]]),
max_treedepth = as.numeric(ML_ModelParameterList[["BayesianParameter"]][["ML_BayesMaxtreedepth"]])))
Elasticity<- data.frame(Bayesian_Elasticity = fixef(bayesModel)[-1,1])
BayesModelList <- list()
BayesModelList[["training"]] <- training
BayesModelList[["testing"]] <- testing
BayesModelList[["bayes_model"]] <- bayesModel
BayesModelList[["Elasticity"]] <- data.frame(Variable = rownames(Elasticity), Bayesian_Elasticity = Elasticity[1],row.names = NULL,stringsAsFactors = F)
return(BayesModelList)
}
# Bayesian Belief Network Modelling
ML_GetBayesianBeliefNetworkModel <- function(ML_ModelParameterList,varSelectionGrid,ML_ModelDF,norm){
}
# GBM Modelling
ML_GetGBMModel <- function(ML_ModelParameterList,varSelectionGrid,ML_ModelDF,norm){
set.seed(101)
modelVar <- c(ML_ModelParameterList[["Dependent"]], as.character(varSelectionGrid[varSelectionGrid$InModel == TRUE,1]))
gbmModelData <- ML_ModelDF[,modelVar]
names(gbmModelData)[1]<- "Dependant"
if(norm == TRUE){
gbmModelData<-as.data.frame(lapply(gbmModelData, function(x) round((x)/(mean(x)), 5)))
}
ModelConstraintsDF <- varSelectionGrid[,c("Variable","Expected_PostMeanSign")]
PosIndex <- which(ModelConstraintsDF$Variable %in% modelVar & ModelConstraintsDF[,2]== 1)
NegIndex <- which(ModelConstraintsDF$Variable %in% modelVar & ModelConstraintsDF[,2]== -1)
monotonic_constraints <- as.numeric(rep(1,times = 1+length(PosIndex)+length(NegIndex)))
monotonic_constraints[NegIndex] <- -1
monotonic_constraints <- monotonic_constraints[-1]
trainPt <- floor(nrow(gbmModelData)*(as.numeric(ML_ModelParameterList[["ML_TrainPt"]])/100))
testPt <- floor(nrow(gbmModelData)*(as.numeric(ML_ModelParameterList[["ML_TestPt"]])/100))
training<-gbmModelData[1:trainPt,]
testing<-gbmModelData[(nrow(gbmModelData)-testPt+1):nrow(gbmModelData),]
best_model <- gbm(Dependant ~ .,data = training,
var.monotone = monotonic_constraints,
bag.fraction = as.numeric(ML_ModelParameterList[["GBMParameter"]][["ML_GBMBagFraction"]]),
shrinkage = as.numeric(ML_ModelParameterList[["GBMParameter"]][["ML_GBMShrinkage"]]),
n.minobsinnode = as.numeric(ML_ModelParameterList[["GBMParameter"]][["ML_GBMNMinobsinnode"]]),
n.trees = as.numeric(ML_ModelParameterList[["GBMParameter"]][["ML_GBMNTree"]]),
cv.folds = as.numeric(ML_ModelParameterList[["GBMParameter"]][["ML_GBMCVFolds"]]),
distribution = as.character(ML_ModelParameterList[["GBMParameter"]][["ML_GBMDistType"]]),
keep.data = FALSE,verbose = F)
best.iter <- gbm.perf(best_model,method="cv",plot.it = F)
Elasticity <- unlist(lapply(best_model$var.names, function(x){
y <- plot.gbm(best_model,x,n.trees = best.iter,return.grid = TRUE)
midPoint <- floor(nrow(y)/2)
elasticity <- (y[,2]/y[midPoint,2]-1)/(y[,1]/y[midPoint,1]-1)
elasticity[is.nan(elasticity)] <- 0
finalAvgEl <- mean(elasticity)
}))
GBMModelList <- list()
GBMModelList[["GBMModel"]] <- best_model
GBMModelList[["GBMBestIteration"]] <- best.iter
GBMModelList[["training"]] <- training
GBMModelList[["testing"]] <- testing
GBMModelList[["Elasticity"]] <- data.frame(Variable = best_model$var.names, GBM_Elasticity = Elasticity,row.names = NULL,stringsAsFactors = F)
return(GBMModelList)
}
# XGBoost Modelling
ML_GetXGBoostModel <- function(ML_ModelParameterList,varSelectionGrid,ML_ModelDF,norm){
set.seed(101)
modelVar <- c(ML_ModelParameterList[["Dependent"]], as.character(varSelectionGrid[varSelectionGrid$InModel == TRUE,1]))
xgboostModelData <- ML_ModelDF[,modelVar]
names(xgboostModelData)[1]<- "Dependant"
if(norm == TRUE){
xgboostModelData<-as.data.frame(lapply(xgboostModelData, function(x) round((x)/(mean(x)), 5)))
}
ModelConstraintsDF <- varSelectionGrid[,c("Variable","Expected_PostMeanSign")]
PosIndex <- which(ModelConstraintsDF$Variable %in% modelVar & ModelConstraintsDF[,2]== 1)
NegIndex <- which(ModelConstraintsDF$Variable %in% modelVar & ModelConstraintsDF[,2]== -1)
monotonic_constraints <- as.numeric(rep(1,times = 1+length(PosIndex)+length(NegIndex)))
monotonic_constraints[NegIndex] <- -1
monotonic_constraints <- monotonic_constraints[-1]
trainPt <- floor(nrow(xgboostModelData)*(as.numeric(ML_ModelParameterList[["ML_TrainPt"]])/100))
testPt <- floor(nrow(xgboostModelData)*(as.numeric(ML_ModelParameterList[["ML_TestPt"]])/100))
train<-xgboostModelData[1:trainPt,]
test<-xgboostModelData[(nrow(xgboostModelData)-testPt+1):nrow(xgboostModelData),]
trainm <- sparse.model.matrix(Dependant ~ ., data = train)
train_label <- train[,"Dependant"]
train_matrix <- xgb.DMatrix(data = trainm, label = train_label)
testm <- sparse.model.matrix(Dependant~ ., data = test)
test_label <- test[,"Dependant"]
test_matrix <- xgb.DMatrix(data = testm, label = test_label)
watchlist <- list(train = train_matrix, test = test_matrix)
crossvalidresult <- xgb.cv(params = list("booster"="gbtree","objective" = "reg:linear","eval_metric" = "rmse"),
monotone_constraints = monotonic_constraints,
data = train_matrix,
early_stopping_rounds = as.numeric(ML_ModelParameterList[["XGBoostParameter"]][["ML_XGBoostESRounds"]]),
watchlist = watchlist,
eta = as.numeric(ML_ModelParameterList[["XGBoostParameter"]][["ML_XGBoostETA"]]),
max.depth = as.numeric(ML_ModelParameterList[["XGBoostParameter"]][["ML_XGBoostMaxDepth"]]),
nrounds = as.numeric(ML_ModelParameterList[["XGBoostParameter"]][["ML_XGBoostNrounds"]]),
maximize = FALSE,
nfold=as.numeric(ML_ModelParameterList[["XGBoostParameter"]][["ML_XGBoostNFolds"]]),
verbose = F)
best_model <- xgb.train(params = list("booster"="gbtree","objective" = "reg:linear","eval_metric" = "rmse"),
monotone_constraints = monotonic_constraints,
data = train_matrix,
watchlist = watchlist,
verbose = F,
eta = as.numeric(ML_ModelParameterList[["XGBoostParameter"]][["ML_XGBoostETA"]]),
max.depth = as.numeric(ML_ModelParameterList[["XGBoostParameter"]][["ML_XGBoostMaxDepth"]]),
nrounds = crossvalidresult$best_iteration,
maximize = FALSE)
Elasticity <- unlist(lapply(best_model$feature_names, function(x){
y <- partial(best_model,pred.var=x,plot=F,returngrid =TRUE, rug=TRUE,train = sparse.model.matrix(Dependant ~ ., data = train),type="regression")
midPoint <- floor(nrow(y)/2)
elasticity <- (y$yhat/y$yhat[midPoint]-1)/(y[,1]/y[midPoint,1]-1)
elasticity[is.nan(elasticity)] <- 0
finalAvgEl <- mean(elasticity)
}))
xgboostModelList <- list()
xgboostModelList[["best_model"]] <- best_model
xgboostModelList[["best.iter"]] <- crossvalidresult
xgboostModelList[["training"]] <- train
xgboostModelList[["testing"]] <- test
xgboostModelList[["Elasticity"]] <- data.frame(Variable = best_model$feature_names, XGBoost_Elasticity = Elasticity,row.names = NULL,stringsAsFactors = F)
return(xgboostModelList)
}
# ANN Modelling
ML_GetANNModel <- function(ML_ModelParameterList,varSelectionGrid,ML_ModelDF,norm){
h2o.init()
h2o.removeAll()
response <- ML_ModelParameterList[["Dependent"]]
predictors <- as.character(varSelectionGrid[varSelectionGrid$InModel == TRUE,1])
h2oRawData <- ML_ModelDF[,which(names(ML_ModelDF) %in% c(ML_ModelParameterList[["Dependent"]], as.character(varSelectionGrid[varSelectionGrid$InModel == TRUE,1])))]
if(norm == TRUE){
h2oRawData<-as.data.frame(lapply(h2oRawData, function(x) round((x)/(mean(x)), 5)))
}
trainPt <- floor(nrow(h2oRawData)*(as.numeric(ML_ModelParameterList[["ML_TrainPt"]])/100))
testPt <- floor(nrow(h2oRawData)*(as.numeric(ML_ModelParameterList[["ML_TestPt"]])/100))
Training.hex <- as.h2o(h2oRawData[1:trainPt,])
Testing.hex <- as.h2o(h2oRawData[(nrow(h2oRawData)-testPt+1):nrow(h2oRawData),])
####hyper_params ###
hyper_params <- list(activation=c("Rectifier","Tanh","Maxout","RectifierWithDropout","TanhWithDropout","MaxoutWithDropout"),
hidden=list(c(20,20),c(50,50),c(30,30,30),c(25,25,25,25)),
input_dropout_ratio=c(0,0.05),l1=seq(0,1e-4,1e-6),l2=seq(0,1e-4,1e-6)
)
####search criteria ###
search_criteria = list(strategy = "RandomDiscrete", max_runtime_secs = 360,
max_models = as.numeric(ML_ModelParameterList[["ANNParameter"]][["ML_ANNMaxModels"]]),
seed=1234567, stopping_rounds=5, stopping_tolerance=1e-2)
dl_random_grid <- h2o.grid(
algorithm="deeplearning",
grid_id = "dl_grid_random",
training_frame=Training.hex,
validation_frame=Testing.hex,
x=predictors,
y=response,
epochs= as.numeric(ML_ModelParameterList[["ANNParameter"]][["ML_ANNNoEpochs"]]),
stopping_metric= as.character(ML_ModelParameterList[["ANNParameter"]][["ML_ANNstoppingmetric"]]),
stopping_tolerance=1e-2, ## stop when logloss does not improve by >=1% for 2 scoring events
stopping_rounds=2,
score_validation_samples=10000, ## downsample validation set for faster scoring
score_duty_cycle=0.025, ## don't score more than 2.5% of the wall time
max_w2=10, ## can help improve stability for Rectifier
hyper_params = hyper_params,
search_criteria = search_criteria
)
grid <- h2o.getGrid("dl_grid_random",sort_by="r2",decreasing=TRUE)
h2obest_model <- h2o.getModel(grid@model_ids[[1]])
Elasticity <- unlist(lapply(h2obest_model@parameters$x, function(x){
y <- h2o.partialPlot(h2obest_model,Training.hex,x,plot = F)
midPoint <- floor(nrow(y[1])/2)
elasticity <- (y$mean_response/y$mean_response[midPoint]-1)/(y[,1]/y[midPoint,1]-1)
elasticity[is.nan(elasticity)] <- 0
finalAvgEl <- mean(elasticity)
}))
ANNModelList <- list()
ANNModelList[["ANNModelgrid"]] <- grid
ANNModelList[["ANNBestModel"]] <- h2obest_model
ANNModelList[["training"]] <- Training.hex
ANNModelList[["testing"]] <- Testing.hex
ANNModelList[["Elasticity"]] <- data.frame(Variable = h2obest_model@parameters$x, ANN_Elasticity = Elasticity,row.names = NULL,stringsAsFactors = F)
return(ANNModelList)
}
# Generate Compared AVM Table
ML_generateCompareAVM <- function(ML_ModelDataList,ML_ModelParameterList,SelectedModel){
ML_allAVM <- data.frame()
# Actual
ML_allAVM <- rbind(ML_ModelDataList$ML_ModelDF[ML_ModelParameterList[["Dependent"]]])
names(ML_allAVM) <- "Actual"
# OLS Fitted
if(any(names(ML_ModelDataList) %in% "ExternalModel")){
olsTrainFitted <- ML_ModelDataList$ExternalModel$ExternalModel$fitted.values
olsTestFitted <- predict(ML_ModelDataList$ExternalModel$ExternalModel,ML_ModelDataList$ExternalModel$testing)
if(SelectedModel[SelectedModel$ModellingType == "ExternalModel",3] == "MeanNorm"){
olsTrainFitted <- olsTrainFitted * mean(ML_ModelDataList[["ML_ModelDF"]][,ML_ModelParameterList[["Dependent"]]])
olsTestFitted <- olsTestFitted * mean(ML_ModelDataList[["ML_ModelDF"]][,ML_ModelParameterList[["Dependent"]]])
}
ML_allAVM <- cbind(ML_allAVM,data.frame(ExternalModel_Fitted = c(olsTrainFitted,olsTestFitted)))
}
# GBM Fitted
if(any(names(ML_ModelDataList) %in% "GBMModel")){
gbmTrainFitted <- predict.gbm(ML_ModelDataList$GBMModel$GBMModel,ML_ModelDataList$GBMModel$training)
gbmTestFitted <- predict.gbm(ML_ModelDataList$GBMModel$GBMModel,ML_ModelDataList$GBMModel$testing)
if(SelectedModel[SelectedModel$ModellingType == "GBM",3] == "MeanNorm"){
gbmTrainFitted <- gbmTrainFitted * mean(ML_ModelDataList[["ML_ModelDF"]][,ML_ModelParameterList[["Dependent"]]])
gbmTestFitted <- gbmTestFitted * mean(ML_ModelDataList[["ML_ModelDF"]][,ML_ModelParameterList[["Dependent"]]])
}
ML_allAVM <- cbind(ML_allAVM,data.frame(GBM_Fitted = c(gbmTrainFitted,gbmTestFitted)))
}
# XGBoost Fitted
if(any(names(ML_ModelDataList) %in% "XGBoostModel")){
xgbTrainFitted <- predict(ML_ModelDataList$XGBoostModel$best_model,xgb.DMatrix(data = sparse.model.matrix(Dependant ~ ., data = ML_ModelDataList$XGBoostModel$training), label = ML_ModelDataList$XGBoostModel$training[,"Dependant"]))
xgbTestFitted <- predict(ML_ModelDataList$XGBoostModel$best_model,xgb.DMatrix(data = sparse.model.matrix(Dependant ~ ., data = ML_ModelDataList$XGBoostModel$testing), label = ML_ModelDataList$XGBoostModel$testing[,"Dependant"]))
if(SelectedModel[SelectedModel$ModellingType == "XGBoost",3] == "MeanNorm"){
bayesTrainFitted <- bayesTrainFitted * mean(ML_ModelDataList[["ML_ModelDF"]][,ML_ModelParameterList[["Dependent"]]])
xgbTestFitted <- xgbTestFitted * mean(ML_ModelDataList[["ML_ModelDF"]][,ML_ModelParameterList[["Dependent"]]])
}
ML_allAVM <- cbind(ML_allAVM,data.frame(XGBoost_Fitted = c(xgbTrainFitted,xgbTestFitted)))
}
# ANN Fitted
if(any(names(ML_ModelDataList) %in% "ANNModel")){
#h2obest_model <- h2o.getModel(ML_ModelDataList$ANNModel$ANNModelgrid@model_ids[[1]])
h2obest_model <- ML_ModelDataList$ANNModel$ANNBestModel
ANNTrainFitted <- as.data.frame(h2o.predict(h2obest_model, ML_ModelDataList$ANNModel$training))
ANNTestFitted <- as.data.frame(h2o.predict(h2obest_model, ML_ModelDataList$ANNModel$testing))
if(SelectedModel[SelectedModel$ModellingType == "ANN",3] == "MeanNorm"){
ANNTrainFitted <- ANNTrainFitted * mean(ML_ModelDataList[["ML_ModelDF"]][,ML_ModelParameterList[["Dependent"]]])
ANNTestFitted <- ANNTestFitted * mean(ML_ModelDataList[["ML_ModelDF"]][,ML_ModelParameterList[["Dependent"]]])
}
ML_allAVM <- cbind(ML_allAVM,data.frame(ANN_Fitted = rbind(ANNTrainFitted,ANNTestFitted)))
names(ML_allAVM)[names(ML_allAVM) %in% "predict"] <- "ANN_Fitted"
}
# Bayesian Fitted
if(any(names(ML_ModelDataList) %in% "BayesianModel")){
bayesTrainFitted <- predict(ML_ModelDataList$BayesianModel$bayes_model)[,1]
bayesTestFitted <- predict(ML_ModelDataList$BayesianModel$bayes_model,ML_ModelDataList$BayesianModel$testing)[,1]
if(SelectedModel[SelectedModel$ModellingType == "Bayesian",3] == "MeanNorm"){
bayesTrainFitted <- bayesTrainFitted * mean(ML_ModelDataList[["ML_ModelDF"]][,ML_ModelParameterList[["Dependent"]]])
bayesTestFitted <- bayesTestFitted * mean(ML_ModelDataList[["ML_ModelDF"]][,ML_ModelParameterList[["Dependent"]]])
}
ML_allAVM <- cbind(ML_allAVM,data.frame(Bayesian_Fitted = c(bayesTrainFitted,bayesTestFitted)))
}
# Bayesian Fitted
if(any(names(ML_ModelDataList) %in% "BayesianBeliefNetworkModel")){
# bayesFitted <- predict(ML_ModelDataList$BayesianModel$bayes_model)[,1]
# ML_allAVM <- cbind(ML_allAVM,data.frame(Bayesian_Fitted = c(bayesFitted,rep(0,nrow(ML_allAVM)-length(bayesFitted)))))
}
return(ML_allAVM)
}
# Generate Compared Elasticity Table
ML_getCompareElasticityGrid <- function(ML_ModelDataList){
ElasticityGrid <- data.frame(Variable = ML_ModelDataList$BMAModelResult$Variable[ML_ModelDataList$BMAModelResult$InModel ==TRUE])
# OLS Fitted
if(any(names(ML_ModelDataList) %in% "ExternalModel")){
ElasticityGrid <- merge(ElasticityGrid,ML_ModelDataList$ExternalModel$Elasticity,by = "Variable",all = T,sort = F)
}
# GBM Fitted
if(any(names(ML_ModelDataList) %in% "GBMModel")){
ElasticityGrid <- merge(ElasticityGrid,ML_ModelDataList$GBMModel$Elasticity,by = "Variable",all = T,sort = F)
}
# XGBoost Fitted
if(any(names(ML_ModelDataList) %in% "XGBoostModel")){
ElasticityGrid <- merge(ElasticityGrid,ML_ModelDataList$XGBoostModel$Elasticity,by = "Variable",all = T,sort = F)
}
# ANN Fitted
if(any(names(ML_ModelDataList) %in% "ANNModel")){
ElasticityGrid <- merge(ElasticityGrid,ML_ModelDataList$ANNModel$Elasticity,by = "Variable",all = T,sort = F)
}
# Bayesian Fitted
if(any(names(ML_ModelDataList) %in% "BayesianModel")){
ElasticityGrid <- merge(ElasticityGrid,ML_ModelDataList$BayesianModel$Elasticity,by = "Variable",all = T,sort = F)
}
ElasticityGrid <- ElasticityGrid[ElasticityGrid$Variable != "(Intercept)",]
ElasticityGrid[,-1] <- apply(as.data.frame(ElasticityGrid[,-1]),2,function(x) round(x,digits = 3))
return(ElasticityGrid)
}
# Compare Models Stat
ML_getCompareModelsStat <- function(ML_ModelDataList){
ModelStats <- list()
# OLS Fitted
if(any(names(ML_ModelDataList) %in% "ExternalModel")){
##### MAPE ####
olsStat <- list()
ML_ModelDataList$ExternalModel$ExternalModel$model
olsStat[["MAPE"]] <- mape(x = ML_ModelDataList$ExternalModel$ExternalModel$model[,1],y = ML_ModelDataList$ExternalModel$ExternalModel$fitted.values)
##### RMSE #####
olsStat[["RMSE"]] <- rmse(y = ML_ModelDataList$ExternalModel$ExternalModel$fitted.values,x = ML_ModelDataList$ExternalModel$ExternalModel$model[,1])
olsStat[["R2"]] <- paste0(round(summary(ML_ModelDataList$ExternalModel$ExternalModel)$r.squared,digits = 3) * 100,"%")
ModelStats[["External Model"]] <- olsStat
}
# GBM Fitted
if(any(names(ML_ModelDataList) %in% "GBMModel")){
gbmStat <- list()
gbmStat[["MAPE"]] <- mape(x = ML_ModelDataList$GBMModel$training[,1],y = ML_ModelDataList$GBMModel$GBMModel$fit)
gbmStat[["RMSE"]] <- rmse(y = ML_ModelDataList$GBMModel$GBMModel$fit,x = ML_ModelDataList$GBMModel$training[,1])
gbmStat[["R2"]] <- paste0(round((cor(ML_ModelDataList$GBMModel$GBMModel$fit,ML_ModelDataList$GBMModel$training[,1]))^2,digits = 3) * 100,"%")
ModelStats[["GBM Model"]] <- gbmStat
}
# XGBoost Fitted
if(any(names(ML_ModelDataList) %in% "XGBoostModel")){
xgbStat <- list()
actual <- ML_ModelDataList$XGBoostModel$training[,1]
fitted <- predict(ML_ModelDataList$XGBoostModel$best_model,xgb.DMatrix(data = sparse.model.matrix(Dependant ~ ., data = ML_ModelDataList$XGBoostModel$training), label = ML_ModelDataList$XGBoostModel$training[,"Dependant"]))
xgbStat[["MAPE"]] <- mape(x = as.vector(actual),y = as.vector(fitted))
xgbStat[["RMSE"]] <- rmse(y = fitted,x = actual)
xgbStat[["R2"]] <- paste0(round((cor(fitted,actual))^2,digits = 3) * 100,"%")
ModelStats[["XGBoost Model"]] <- xgbStat
}
# ANN Fitted
if(any(names(ML_ModelDataList) %in% "ANNModel")){
annStat <- list()
actual = ML_ModelDataList$ANNModel$training[,1]
fitted = h2o.predict(ML_ModelDataList$ANNModel$ANNBestModel, ML_ModelDataList$ANNModel$training)
annStat[["MAPE"]] <- mape(x = as.vector(actual),y = as.vector(fitted))
annStat[["RMSE"]] <- rmse(y = as.vector(fitted),x = as.vector(actual))
annStat[["R2"]] <- paste0(round((cor(fitted,actual))^2,digits = 3) * 100,"%")
ModelStats[["ANN Model"]] <- annStat
}
# Bayesian Fitted
if(any(names(ML_ModelDataList) %in% "BayesianModel")){
bayesStat <- list()
actual <- ML_ModelDataList$BayesianModel$bayes_model$data[,1]
fitted <- predict(ML_ModelDataList$BayesianModel$bayes_model)[,1]
bayesStat[["MAPE"]] <- mape(x = as.vector(actual),y = as.vector(fitted))
bayesStat[["RMSE"]] <- rmse(y = fitted,x = actual)
bayesStat[["R2"]] <- paste0(round((cor(fitted,actual))^2,digits = 3) * 100,"%")
ModelStats[["Bayesian Model"]] <- bayesStat
}
ModelStats <- do.call(rbind,lapply(names(ModelStats), function(x){
data.frame(Model = x,as.data.frame.list(ModelStats[[x]]))
}))
return(ModelStats)
}
######################################################################
###################### Functions Related to EDA ######################
######################################################################
################ Function to plot a Scatterplot
kFunBiScatterPlot<-function(column,df){
p<- ggplot(df,aes_string(x=column[1],y=column[2]))+
geom_point()+
theme(axis.text=element_text(size=10,colour = "blue"),
axis.title = element_text(size=15,face = "bold"))
scale_color_gradient(low = "#0091ff", high = "#f0650e")
return(p)
}
################## Function to check correlation between Variables
KFuncorrelation<-function(column,df)
{
r<-round(cor(df[column],df[column]),3)
return(r)
}
crosstbl<-data.frame()
############# Function to check Contigency between Variables
KFunContingencyTbl<-function(column,df)
{
crosstbl<-table(df[[column[1]]],df[[column[2]]])
crosstbl <- data.frame(cbind(variable=rownames(crosstbl),crosstbl))
rownames(crosstbl) <- NULL
return(crosstbl)
}
################## Function to create chisquare
kFunChiSquare<-function(column,df){
kVarchisquaredf<-data.frame()
data<-subset(MEUserProjectList$meRawData,select = column)
t<-table(data)
test<-chisq.test(t)
kVarchisquaredf<-data.frame(pvalue=test$p.value,xsquared=test$statistic,degreeOfFreedom=test$parameter)
return(kVarchisquaredf)
}
######### Function for ANOVA
############## Function to find skewness and curtosis
kFunStatsConCat<-function(df){
kVartble<-data.frame()
kVartble<-data.frame((group_by(df, df[[2]]) %>%
summarise(count = n(),
mean = mean(df[[1]], na.rm = TRUE),
sd = sd(df[[1]], na.rm = TRUE)
)))
colnames(df[2]) <- names(kVartble[1])
return(kVartble)
}
################## Function to plot a Boxplot
KFunVisualize<-function(df)
{
p1<- ggboxplot(df, x = names(df)[2], y = names(df)[1],color = names(df)[2])+
theme(axis.text.x = element_text(face="bold", color="#993333",size=8, angle=45),plot.margin = margin(1,2,4,1,"cm"))
return(p1)
}
######### Function to display the summary of ANOVA
KFunAnnovaTest<-function(df)
{
res.aov<-aov(df[[1]]~df[[2]])
return(summary(res.aov))
}
################## Function to plot a Heatmap
kFunheatmap<-function(inheatmap,df){
df<-subset(df,select=inheatmap)
cormat<-cor(df)
melted_cordata <- melt(cormat)
col.plot<-c('red','green')
plot <- ggplot(data = melted_cordata, aes(x=Var1, y=Var2, fill=value, label= value))+
scale_fill_gradient(low="#58D68D",high="#FA8072")+theme(axis.text.x = element_text(size=8, angle=45),plot.margin = margin(1,2,6,1,"cm"))+theme(axis.text.y = element_text(size=8, angle=25),plot.margin = margin(1,2,6,1,"cm"))
plot_tile<-plot+geom_tile()
return(ggplotly(plot_tile))
}
######### Function to display the summary of ANOVA
KFunAnnovaTest<-function(df){
res.aov<-aov(df[[1]]~df[[2]])
return(tidy(res.aov))
}
####### Function to craete a formula
kFunanovaform<-function(contvar){
form<-as.formula(paste(contvar[1],"~",as.character(paste(contvar[2:length(contvar)],collapse = "+"))))
return(form)
}
kFunAnovaMulti<-function(inaov,df){
res.aov<-aov(kFunanovaform(inaov),df)
return(tidy(res.aov))
}
###################### Function related to DLM ######################
MovingAverage <- function(series,min,max){
if(max-min ==0){
Movavgdata <- series
}else{
series <- as.data.frame(series)
avgseries <- min:max
avgseries <- avgseries[which((min:max)%%2 ==1)]
avgseries <- avgseries[which(avgseries>1)]
Movavgdata <- as.data.frame(replicate(expr = series[,1],n = length(avgseries)),stringsAsFactors = F)
Movavgdata <- data.frame(do.call(cbind,replicate(Movavgdata,n= ncol(series))),stringsAsFactors = F)
avgseries <- rep(avgseries,times= ncol(series))
names(Movavgdata) <- paste0(names(series),"_CMA_",avgseries)
names(avgseries) <- names(Movavgdata)
for(name in names(Movavgdata)){
#name <- names(Movavgdata)[1]
Movavgdata[,name] <- movavg(x = Movavgdata[,name],n = avgseries[name],type = "s")
}
Movavgdata <- as.data.frame(Movavgdata)
return(Movavgdata)
}
}
lagTransformation <- function(series,min,max){
FinalLagdata <- data.frame()
if(max-min== 0){
FinalLagdata <- series
}else {
series <- as.data.frame(series)
Ldata <- as.data.frame(replicate(expr = series[,1],n = c(max - min + 1)),stringsAsFactors = F)
#which(!names(DLMAfData) %in% c("Geography","Period"))
names(Ldata) <- paste0(names(series),"_L",min:max)
lagseries <- as.numeric(as.numeric(as.character(min)):as.numeric(as.character(max)))
names(lagseries) <- names(Ldata)
LagDataframe <- as.data.frame(Ldata)
for(namel in names(lagseries)){
#namel <- names(lagseries)[1]
if(!is.na(lagseries[namel])){
LagDataframe[,namel] <- shift(LagDataframe[namel],as.numeric(unname(lagseries[namel])),fill = 0,type = "lag")
}
}
if(nrow(FinalLagdata)==0){
FinalLagdata <- LagDataframe
}else{
FinalLagdata <- cbind(FinalLagdata,LagDataframe)
}
return(FinalLagdata)
}
}
logTransformation <- function(series,value){
if(value ==0 ){
series <- series
}else{
series <- as.data.frame(series)
#value <- 2
#series[,1] <- log(series[,1],base = value)
series[,1] <- ifelse(series[,1]==0,0,log(series[,1],base = value))
names(series) <- paste0(names(series),"_log_",value)
return(series)
}
}
PastAverage <- function(series,min,max){
if(max-min == 0){
Pastavgdata <-series
}else{
series <- as.data.frame(series)
pastseries <- min:max
pastseries <- pastseries[which(pastseries>1)]
Pastavgdata <- as.data.frame(replicate(expr = series[,1],n = length(pastseries)),stringsAsFactors = F)
Pastavgdata <- data.frame(do.call(cbind,replicate(Pastavgdata,n = ncol(series))),stringsAsFactors = F)
names(Pastavgdata) <- paste0(names(series),"_PA_",pastseries)
pastseries <- rep(pastseries,times= ncol(Pastavgdata))
names(pastseries) <- names(Pastavgdata)
#series1 <- data.frame(matrix(data = NA,nrow = nrow(series),ncol = 1))
#value <- 2
for(name in names(Pastavgdata)){
#name <- names(Pastavgdata)[1]
for(i in pastseries[name]:nrow(Pastavgdata[name])){
Pastavgdata[i,name] <- sum(Pastavgdata[(i-pastseries[name]):(i-1),name])/2
}
Pastavgdata[1:pastseries[name],name]<-0
}
return(Pastavgdata)
}
}
Normalization <- function(series){
series <- as.data.frame(series)
mean <- sapply(X = series,FUN =mean)
# mean <- mean(series[,1],na.rm = T)
sd <- sapply(X = series,FUN =sd)
# sd <- sd(series[,1])
NormalizedData <- data.frame()
for(name in names(series)){
# name <-names(series)[1]
series2 <- data.frame(matrix(data = 0,nrow = nrow(series),ncol = 1))
names(series2) <- name
series2[1,name] <- 0
#series2[2:nrow(series),] <- (series[2:nrow(series),]-mean)/sd
series2[2:nrow(series),name] <- ifelse(sd[name]==0,0,(series[2:nrow(series),name]-mean[name])/sd[name])
series2 <- as.data.frame(series2)
names(series2) <- paste0(name,"_norm")
if(nrow(NormalizedData) == 0){
NormalizedData <- series2
}else{
NormalizedData <- cbind(series2,NormalizedData)
}
}
return(NormalizedData)
}
AdStock <- function(series,min,max,steps){
if(max-min == 0){
AdStockData <- series
}else{
series <- as.data.frame(series)
if(as.numeric(steps) == 0 | as.numeric(steps) == 1){
steps <- 1
AdStockSeries <- as.numeric(min)
}else{
steps <- (as.numeric(max)-as.numeric(min))/(as.numeric(steps)-1)
AdStockSeries <- as.numeric(seq(from=as.numeric(min),to=as.numeric(max),by=steps))
}
series[1,] <- 0
AdStockData <- do.call(cbind, replicate(series, n = length(AdStockSeries), simplify=FALSE))
# names(AdStockData) <- paste0(names(series),"_A",AdStockSeries)
names(AdStockData) <- as.character(sapply(names(series), function(x) paste0(x,"_A",AdStockSeries)))
AdStockSeries <- rep(AdStockSeries,ncol(series))
names(AdStockSeries) <- names(AdStockData)
for(name in names(AdStockData)){
#name <- names(AdStockData)[2]
for( i in 2:length(AdStockData[,name])){
#i <- 2
AdStockData[i,name] <- (AdStockData[i,name] * as.numeric(AdStockSeries[name]) + ((1-as.numeric(AdStockSeries[name])*AdStockData[i-1,name])))
}
}
return(AdStockData)
}
}
|
f828d26d8b1de57632621d53d509c7cc8f4ac2e6
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Introduction_To_Probability_And_Statistics_For_Engineers_And_Scientists_by_Sheldon_M._Ross/CH5/EX5.8.e/Ex5_8e.R
|
af07ae4112b18cd7159a03bcce14535817472eda
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 32
|
r
|
Ex5_8e.R
|
#a
pt(1.4,12)
#b
-qt(0.025,9)
|
5adc1ab4ecd53b19ad3138888f5f36e6f732b3cf
|
164f95bc07a0655806d4169e67e8e4aeb61bfbb7
|
/tools/music_deconvolution/scripts/estimateprops.R
|
92e57337aa391744a91ac6a0330e8d7c05feac91
|
[] |
no_license
|
Delphine-L/galaxytools
|
4ce7cf82e9302820b84b7f2a50530174d3f02f13
|
479bcc60983d5957ec5609dc6a58d8e927ce4af6
|
refs/heads/master
| 2021-11-23T22:18:16.046656
| 2021-11-09T08:36:48
| 2021-11-09T08:36:48
| 250,547,957
| 0
| 0
| null | 2020-03-27T13:56:05
| 2020-03-27T13:56:05
| null |
UTF-8
|
R
| false
| false
| 4,074
|
r
|
estimateprops.R
|
suppressWarnings(suppressPackageStartupMessages(library(xbioc)))
suppressWarnings(suppressPackageStartupMessages(library(MuSiC)))
suppressWarnings(suppressPackageStartupMessages(library(reshape2)))
suppressWarnings(suppressPackageStartupMessages(library(cowplot)))
## We use this script to estimate the effectiveness of proportion methods
## Load Conf
args <- commandArgs(trailingOnly = TRUE)
source(args[1])
## Estimate cell type proportions
est_prop <- music_prop(
bulk.eset = bulk_eset, sc.eset = scrna_eset,
clusters = celltypes_label,
samples = samples_label, select.ct = celltypes, verbose = T)
## Show different in estimation methods
## Jitter plot of estimated cell type proportions
jitter.fig <- Jitter_Est(
list(data.matrix(est_prop$Est.prop.weighted),
data.matrix(est_prop$Est.prop.allgene)),
method.name = methods, title = "Jitter plot of Est Proportions")
## Make a Plot
## A more sophisticated jitter plot is provided as below. We separated
## the T2D subjects and normal subjects by their HbA1c levels.
m_prop <- rbind(melt(est_prop$Est.prop.weighted),
melt(est_prop$Est.prop.allgene))
colnames(m_prop) <- c("Sub", "CellType", "Prop")
m_prop$CellType <- factor(m_prop$CellType, levels = celltypes) # nolint
m_prop$Method <- factor(rep(methods, each = 89 * 6), levels = methods) # nolint
m_prop$HbA1c <- rep(bulk_eset$hba1c, 2 * 6) # nolint
m_prop <- m_prop[!is.na(m_prop$HbA1c), ]
m_prop$Disease <- factor(sample_groups[(m_prop$HbA1c > 6.5) + 1], # nolint
levels = sample_groups)
m_prop$D <- (m_prop$Disease == # nolint
sample_disease_group) / sample_disease_group_scale
m_prop <- rbind(subset(m_prop, Disease == healthy_phenotype),
subset(m_prop, Disease != healthy_phenotype))
jitter.new <- ggplot(m_prop, aes(Method, Prop)) +
geom_point(aes(fill = Method, color = Disease, stroke = D, shape = Disease),
size = 2, alpha = 0.7,
position = position_jitter(width = 0.25, height = 0)) +
facet_wrap(~ CellType, scales = "free") +
scale_colour_manual(values = c("white", "gray20")) +
scale_shape_manual(values = c(21, 24)) + theme_minimal()
## Plot to compare method effectiveness
## Create dataframe for beta cell proportions and HbA1c levels
m_prop_ana <- data.frame(pData(bulk_eset)[rep(1:89, 2), phenotype_factors],
ct.prop = c(est_prop$Est.prop.weighted[, 2],
est_prop$Est.prop.allgene[, 2]),
Method = factor(rep(methods, each = 89),
levels = methods))
colnames(m_prop_ana)[1:4] <- phenotype_factors
m_prop_ana <- subset(m_prop_ana, !is.na(m_prop_ana[phenotype_gene]))
m_prop_ana$Disease <- factor(sample_groups[( # nolint
m_prop_ana[phenotype_gene] > 6.5) + 1], sample_groups)
m_prop_ana$D <- (m_prop_ana$Disease == # nolint
sample_disease_group) / sample_disease_group_scale
jitt_compare <- ggplot(m_prop_ana, aes_string(phenotype_gene, "ct.prop")) +
geom_smooth(method = "lm", se = FALSE, col = "black", lwd = 0.25) +
geom_point(aes(fill = Method, color = Disease, stroke = D, shape = Disease),
size = 2, alpha = 0.7) + facet_wrap(~ Method) +
ggtitle(compare_title) + theme_minimal() +
scale_colour_manual(values = c("white", "gray20")) +
scale_shape_manual(values = c(21, 24))
pdf(file = outfile_pdf, width = 8, height = 8)
plot_grid(jitter.fig, jitter.new, labels = "auto", ncol = 1, nrow = 2)
jitt_compare
dev.off()
## Summary table
for (meth in methods) {
##lm_beta_meth = lm(ct.prop ~ age + bmi + hba1c + gender, data =
##subset(m_prop_ana, Method == meth))
lm_beta_meth <- lm(as.formula(
paste("ct.prop", paste(phenotype_factors, collapse = " + "),
sep = " ~ ")),
data = subset(m_prop_ana, Method == meth))
print(paste0("Summary: ", meth))
capture.output(summary(lm_beta_meth),
file = paste0("report_data/summ_", meth, ".txt"))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.