blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
48719a5b1191cf5688ac9211c6432097ce841965
|
16e42803c4b1ecd097c2d0152f99f3890e12c1db
|
/app.R
|
c4e05acf0fa292287e24e9c7f7dec035f4bd8f60
|
[
"Apache-2.0"
] |
permissive
|
w4356y/GeoViz
|
a2fb9d63d137bd7d0e7f685a51b9fe0eefd32367
|
b1db8e4c4a7c9e2a993030836da9281b394106d7
|
refs/heads/master
| 2021-05-18T12:52:21.944384
| 2021-04-21T08:49:59
| 2021-04-21T08:49:59
| 251,196,921
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 278
|
r
|
app.R
|
# Launch the ShinyApp (Do not remove this comment)
# To deploy, run: rsconnect::deployApp()
# Or use the blue button on top of this file
pkgload::load_all()
Sys.setlocale(category = "LC_ALL", locale = "chs")
options( "golem.app.prod" = TRUE)
#source('R/run_app.R')
run_app()
|
33a510b8e2e5b1fe385081985c214de421a4399f
|
132c868650be85a4eaf605832e6cd57d9aa8faf3
|
/R/summary_misc.R
|
6b2af635d716f544d41e4a8ba20fa98d49403c94
|
[] |
no_license
|
EvoNetHIV/RoleSPVL
|
9797fe146afa1e750ef1cfdaf231b62e0f19e848
|
113b55fedbdd2ac6627b751df3e102e801e36c5c
|
refs/heads/master
| 2021-09-14T17:46:30.726252
| 2018-05-16T22:16:13
| 2018-05-16T22:16:13
| 103,449,083
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,320
|
r
|
summary_misc.R
|
#' @title Title
#'
#' @description Description
#'
#' @param x A number.
#' @param y A number.
#' @return return value here.
#' @details
#' Additional details here
#' @examples
#' example function call here
#' @export
summary_misc <- function(dat,at){
#Description:
#1)fills in vl_list (inidividual agent vl per timstep - if flagged)
#2)viral and spvl lineage attributes per agent filled in if last timestep
#3)saves dat$discord_coital_df if flagged (qaqc to look at acts per agent/couple)
#4)updates dat$nwparam[[1]]$coef.form[1] (epimodel's edges correct fxn)
#5)prints summary stats to screen
#6) subset out age data for graphing
#7) add sessionInfo to output object
#8) add evonet version to output object
#-----------------------------------------------------------------
#1
#populate vl/cd4 list
if(dat$param$popsumm_frequency==1)
{
if ((at == 2) || (at %% dat$param$print_frequency == 0)) {
dat <- summary_vl_list(dat,at)
}
}
if(dat$param$popsumm_frequency>1)
{
if (at%%dat$param$popsumm_frequency==0) {
dat <- summary_vl_list(dat,at)
}
}
#-----------------------------------------------------------------
#2
#at last timestep, assign "founder" lineage to each infected
#(except for initially infected, who are the founders)
#and assign founder spvl
if(at == dat$param$n_steps){
viral_list <- summary_viral_lineage(dat$pop)
dat$pop <- viral_lineage_fnx2(vl_list = viral_list, poplist= dat$pop)
dat$pop <- summary_spvl_lineage(poplist = dat$pop)
}
#-----------------------------------------------------------------
#3
if(dat$param$save_coital_acts)
dat$coital_acts_list[[at-1]] <- dat$discord_coital_df
#-----------------------------------------------------------------
#4
#EpiModel's edges correct fxn here, just for MSM!, need to put hetero/bipartite part in
if(dat$param$popsumm_frequency==1)
{
old.num <- dat$popsumm$alive[at-1]
new.num <- dat$popsumm$alive[at]
dat$nwparam[[1]]$coef.form[1] <- ( dat$nwparam[[1]]$coef.form[1] +
log(old.num) - log(new.num) )
}else{
if(at%%dat$param$popsumm_frequency==0 & at>2){
index1=(at/dat$param$popsumm_frequency)+1
index2=(at/dat$param$popsumm_frequency)
old.num <- dat$popsumm$alive[index2]
new.num <- dat$popsumm$alive[index1]
dat$nwparam[[1]]$coef.form[1] <- ( dat$nwparam[[1]]$coef.form[1] +
log(old.num) - log(new.num) )
}
}
#-----------------------------------------------------------------
#5 (Version 5a -- Refresh screen very time step)
if(!dat$param$hpc & !dat$param$scrolling_output){
cat("\f")
cat("\nEvoNet HIV Simulation")
cat("\n----------------------------")
cat("\nModel name:" ,dat$param$model_name)
cat("\nSimulation:",dat$simulation)
cat("\nTimestep: ", at, "/", dat$control$nsteps, sep = " ")
cat("\nTotal population (alive):", dat$popsumm$alive[at])
cat("\nMean SPVL (untreated):", dat$popsumm$mean_spvl_pop_untreated[at])
cat("\nTotal infections:", dat$popsumm$total_infections_alive[at])
cat("\nTotal susceptibles:", dat$popsumm$susceptibles[at] )
cat("\nAIDS deaths", sum(dat$popsumm$aids_deaths[1:at],na.rm=T))
cat("\nOther deaths", sum(dat$popsumm$natural_deaths[1:at],na.rm=T))
cat("\nAged-out", sum(dat$popsumm$aged_out[1:at],na.rm=T))
cat("\n----------------------------")
}
# 5 (Version 5b -- Don't over-write screen each time step)
if(!dat$param$hpc & dat$param$scrolling_output){
if (at <= 2) {
cat("\nStarting simulation of ",dat$control$nsteps," time steps\n")
}
if (at <= 2) {
cat ("sim\t time\t Tot\t Inf\t InfNot\t Sus\t VL \t SPVL\t dAIDS\t dNat\t AgeOut\t Pills\n")
}
if(dat$param$popsumm_frequency==1)
{
if ((at == 2) || (at %% dat$param$print_frequency == 0)) {
cat(
dat$simulation,"\t",
at,"\t",
dat$popsumm$alive[at],"\t",
dat$popsumm$total_infections_alive[at],"\t",
dat$popsumm$total_infections_not_treated[at],"\t",
dat$popsumm$susceptibles[at],"\t",
round(dat$popsumm$mean_vl_pop_all[at],2),"\t",
round(dat$popsumm$mean_spvl_pop_untreated[at],2),"\t",
sum(dat$popsumm$aids_deaths[1:at],na.rm=T),"\t",
sum(dat$popsumm$natural_deaths[1:at],na.rm=T),"\t",
sum(dat$popsumm$aged_out[1:at],na.rm=T),"\t",
dat$popsumm$total_pills_taken[at],"\n")
}
}
if(dat$param$popsumm_frequency>1)
{
if (at%%dat$param$popsumm_frequency==0) {
time_index=(at/dat$param$popsumm_frequency)+1
cat(
dat$simulation,"\t",
at,"\t",
dat$popsumm$alive[time_index],"\t",
dat$popsumm$total_infections_alive[time_index],"\t",
dat$popsumm$total_infections_not_treated[time_index],"\t",
dat$popsumm$susceptibles[time_index],"\t",
round(dat$popsumm$mean_vl_pop_all[time_index],2),"\t",
round(dat$popsumm$mean_spvl_pop_untreated[time_index],2),"\t",
sum(dat$popsumm$aids_deaths[1:time_index],na.rm=T),"\t",
sum(dat$popsumm$natural_deaths[1:time_index],na.rm=T),"\t",
sum(dat$popsumm$aged_out[1:time_index],na.rm=T),"\t",
dat$popsumm$total_pills_taken[time_index],"\n")
}
}
if (at == dat$control$nsteps) { # Remind users what the columns mean
cat ("sim\t time\t Tot\t Inf\t InfNot\t Sus\t SPVL\t dAIDS\t dNat\t AgeOut\t Pills\n")
}
}
#----------------------------------------------
#6
if(at==2)
dat$age_list[[1]]<-dat$pop$age[which(dat$pop$Status>=0)]
if(at==round(dat$param$n_steps*.25))
dat$age_list[[2]]<-dat$pop$age[which(dat$pop$Status>=0)]
if(at==round(dat$param$n_steps *.50))
dat$age_list[[3]]<-dat$pop$age[which(dat$pop$Status>=0)]
if(at==round(dat$param$n_steps *.75))
dat$age_list[[4]]<-dat$pop$age[which(dat$pop$Status>=0)]
if(at==dat$param$n_steps)
dat$age_list[[5]]<-dat$pop$age[which(dat$pop$Status>=0)]
#-----------------------------------------------------------------
#7
if(at == dat$param$n_steps){
dat$sessionInfo <- sessionInfo()
}
#-----------------------------------------------------------------
#8
if(at == dat$param$n_steps){
#note: working directory needs to be versioned,
aa<-try(system("svnversion"),silent=TRUE)
if(aa==0)
dat$evonet_version <- system("svnversion")
}
#-----------------------------------------------------------------
return(dat)
}
|
4c469f6040ad0440f583a6027983deb43a7619ad
|
0ff25191d3e874528463d244afaa140a258eaea0
|
/plot1.R
|
713ddb75c697b8a5a02e414f07ba4624018ad96f
|
[] |
no_license
|
viswanth/ExData_Plotting1
|
2065e155bf3480275692fc7840b92e1a7dd2546a
|
c1ceac0154a9a633c6352c4b455cfd4797e0073d
|
refs/heads/master
| 2020-04-30T09:23:18.129608
| 2014-07-11T19:11:13
| 2014-07-11T19:11:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 541
|
r
|
plot1.R
|
##This script assumes that the household_power_consumption.txt file is present in the working directory.
##Also assumptions are made that the system has enough memory to handle this large data set
fileName <- "household_power_consumption.txt"
pwrData <- read.table(fileName,header=TRUE, sep=";",na.strings="?")
reqData <- subset(pwrData,pwrData$Date == "1/2/2007" | pwrData$Date=="2/2/2007")
png(file="plot1.png")
hist(reqData$Global_active_power, col="red", xlab="Global Active Power (kilowatts)", , main = "Global Active Power")
dev.off()
|
772c37092b44e23a336c6201003a98de07764792
|
42ffb55368ff15dbdb7e33782119cb1876334147
|
/FoodSecurity/FS_SimpleIndices.R
|
3c363a3fa6a94d738fb0ce6b4fb3921557ff35b4
|
[] |
no_license
|
austindrichards/vs-indicators-calc
|
ccafee0b3061cb63a78edfe9fdbc75c97d7e95f2
|
7d275ae0e763961eb0e3cc6c0793a14a74fa31b7
|
refs/heads/master
| 2020-04-18T16:08:18.177105
| 2018-02-01T22:14:04
| 2018-02-01T22:14:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,050
|
r
|
FS_SimpleIndices.R
|
library(dplyr)
library(lubridate)
setwd('../FoodSecurity/')
pg_conf <- read.csv('../rds_settings', stringsAsFactors=FALSE)
vs_db <- src_postgres(dbname='vitalsigns', host=pg_conf$host,
user=pg_conf$user, password=pg_conf$pass,
port=pg_conf$port)
# hh_i031
# How many meals, including breakfast are taken per day in your household? (Adults: over 5 Years Old)
#
# hh_i08
# In the last 12 months, have you been faced with a situation when you did not have enough food to feed the household?
fs <- tbl(vs_db, 'c__household') %>%
collect
# 2 Rely on less preferred foods?
# 2 Limit the variety of foods eaten?
# 5 Limit portion size at meal-times?
# 5 Reduce number of meals eaten in a day?
# 7 Restrict consumption by adults for small children to eat?
# 7 Borrow food, or rely on help from a friend or relative?
# 10 Have no food of any kind in your house-hold?
# 10 Go a whole day and night without eating anything?
fs$months_insecurity <- rowSums(fs[ , c(paste0('hh_i09a_', seq(1,12)), paste0('hh_i09b_', seq(1,12)))], na.rm=T)
fs$hfias <- rowSums(fs[ , c('hh_i02_1', 'hh_i02_2', 'hh_i02_3', 'hh_i02_4', 'hh_i02_5', 'hh_i02_6', 'hh_i02_7', 'hh_i02_8')], na.rm=T)
f_groups <- c("hh_k2_8_a", "hh_k2_8_b", "hh_k2_8_c", "hh_k2_8_d", "hh_k2_8_e",
"hh_k2_8_f","hh_k2_8_g", "hh_k2_8_h", "hh_k2_8_i", "hh_k2_8_j")
fs$diversity <- rowSums(fs[f_groups] / 7, na.rm=T) / length(f_groups)
fs <- fs %>%
select(country, landscape_no, hh_refno, round, shortage_year=hh_i08,
months_insecurity, number_meals=hh_i031, hfias, diversity,
hh_interview_date)
##Nonfood spending
##Do we need Sec L?
nfs <- tbl(vs_db, "c__household_expenditure") %>%
collect
nfs$hh_paid[nfs$hh_period=='week'] <- nfs$hh_paid[nfs$hh_period=='week']*52.14
nfs$hh_paid[nfs$hh_period=='year'] <- nfs$hh_paid[nfs$hh_period=='year']*12
nfs <- nfs %>%
group_by(country, landscape_no, hh_refno, round) %>%
summarize(Nonfood.Spending = sum(hh_paid, na.rm=T))
##Food Spending
food <- tbl(vs_db, "c__household_food") %>%
collect
food <- food %>%
rowwise() %>%
mutate(FCV = sum(hh_k_04, hh_k_05a, na.rm=T)) %>%
group_by(country, landscape_no, hh_refno, round) %>%
summarise(Food.Consumption.Value = sum(FCV, na.rm = TRUE)*52.14, Food.Spending = sum(hh_k_04, na.rm=T)*52.14)
#Combine and aggregate
out <- Reduce(function(x, y){merge(x, y, all=T)}, list(fs, nfs, food))
out$Food_As_Percent_Total_Spending <- (out$Food.Spending/(out$Food.Spending + out$Nonfood.Spending))*100
exchange_rates <- read.csv('../exchange_rates_2009usd.csv')
exchange_rates$date <- mdy(exchange_rates$date)
#match rate to interview date and country
out$date <- ymd(ceiling_date(out$hh_interview_date, "week")) #find the next Sunday
out<-merge(out, exchange_rates, all.x=T, all.y=F)
rateadjust <- c('Nonfood.Spending', 'Food.Spending', 'Food.Consumption.Value')
out[ , rateadjust] <- (out[ , rateadjust]/out$rate)*1.1
out_ls <- out %>% group_by(country, landscape_no) %>%
summarize(avg_meals = mean(number_meals, na.rm=T),
Percent_Shortage_Past_Year = mean(shortage_year, na.rm=T)*100,
Mean_Months_Insecurity = mean(months_insecurity, na.rm=T),
Mean_Diet_Diversity = mean(diversity, na.rm=T),
Mean_Nonfood_Spending = mean(Nonfood.Spending, na.rm=T),
Mean_Food_Consumption_Value = mean(Food.Consumption.Value, na.rm=T),
Mean_Food_Spending = mean(Food.Spending, na.rm=T),
Food_As_Percent_Total_Spending = mean(Food_As_Percent_Total_Spending, na.rm=T))
#########################################
#Write
#################################
library(aws.s3)
aws.signature::use_credentials()
writeS3 <- function(df, name){
zz <- rawConnection(raw(0), "r+")
write.csv(df, zz, row.names=F)
aws.s3::put_object(file = rawConnectionValue(zz),
bucket = "vs-cdb-indicators", object = name)
close(zz)
}
writeS3(out, 'FoodSecurity_HH.csv')
writeS3(out_ls, 'FoodSecurity_Landscape.csv')
|
bcc0a0ad73eb3bd46e3984c4e33969144713ea41
|
432f56ada3ff869f709f001cdaf808be24bbdd1d
|
/R/bar_plot_update_manual.R
|
c06ea571fa80f5101283dbb3177b8c6ed6646d58
|
[] |
no_license
|
juyeki/MetabR
|
30c6f1e711997ffa5c6c7c6b8f694c0a9f500a13
|
fde5baded23c4a602f149396a502ac715995aa16
|
refs/heads/master
| 2020-08-09T05:08:46.545707
| 2020-07-31T19:21:48
| 2020-07-31T19:21:48
| 214,005,006
| 3
| 3
| null | 2020-03-02T07:28:44
| 2019-10-09T19:23:25
|
R
|
UTF-8
|
R
| false
| false
| 2,330
|
r
|
bar_plot_update_manual.R
|
#' @export
#'
bar_plot_update_manual <- function(a, met, Title, x, y, axis.text.x, scales, type = NULL, num_cond=NULL,index=NULL)
{
col<-c("turquoise","red","plum4","steelblue1","red4","springgreen2","slateblue2","sienna1","darkgreen","lightpink1","navy","olivedrab1",
"orangered","darkslateblue","lightseagreen","magenta2","royalblue","yellowgreen","lightsalmon","cyan","maroon1","indianred3","mediumseagreen",
"slateblue3","hotpink","lemonchiffon1","orangered4","lightcoral","tomato")
if(!is.null(index))
{
j <- 1
k <- 1
extra_qc <- c("peachpuff1", "seashell1", "wheat2", "snow1")
res <- vector()
for( i in 1:num_cond)
{
if(i %in% index[[1]])
res <- c(res, "yellow1")
else if(i %in% index[[2]])
res <- c(res, "grey45")
else if(i %in% index[[3]])
{
res <- c(res, extra_qc[k])
k <- k + 1
}
else
{
res <- c(res, col[j])
j <- j + 1
}
}
col <- res
}
a + geom_bar(position="dodge", stat="identity", width=0.9) +
geom_bar(aes(linetype=under_50_percent,color = under_50_percent, size=under_50_percent),position="dodge", stat="identity", width=0.9) +
guides(linetype=FALSE)+
scale_size_manual(values=c(0.3,0.8), guide = F) + scale_colour_manual(values = c("black", "gray29"), guide = F) +
facet_wrap( ~ Name, scales=scales) +
theme_bw() +scale_linetype_manual(values=c("solid","58"))+
labs(x=x, y=y, title=Title, fill=element_blank()) +
theme(
plot.title=element_text(size=20, face="bold", vjust=2), #sets title properties
axis.title=element_text(size=16, lineheight=20, face="bold"), #sets theme for axis font
axis.text=element_text(size=11, face="bold"),
axis.text.x=axis.text.x,
legend.title=element_text(face="bold", size=12),
legend.text=element_text(face="bold",size=12), #sets legend text
strip.text=element_text(face="bold", size=15), #sets theme for title in facets
panel.grid.major=element_blank()) +
geom_errorbar(aes(ymin=RelAmounts_Ave, ymax=RelAmounts_Ave+RelAmounts_Std), position=position_dodge(0.9), width=.2)+
scale_fill_manual(values = col)
}
|
bf891c196f161d7243e6d21d9da82844db70dafc
|
d7edad430461de78735dcb8625817bb98164688b
|
/retrosheet/framing_gputools.R
|
f6b9376c428317e50f4a06bf8741b58a2008b2c8
|
[] |
no_license
|
drbryan/baseball-public
|
4a7d7c54c749a77abc4434e1ab3998f46c09b417
|
f7d24dd498ecb7e67112e66dd25a7c5d10d38c95
|
refs/heads/master
| 2020-07-10T11:31:34.733378
| 2015-02-12T17:16:51
| 2015-02-12T17:16:51
| 29,069,650
| 1
| 0
| null | 2015-01-10T20:15:00
| 2015-01-10T20:15:00
| null |
UTF-8
|
R
| false
| false
| 577
|
r
|
framing_gputools.R
|
sink("framing_gputools.txt")
library(gputools)
#source("read_postgresql.R")
source("read_csv.R")
dim(pitches)
pitches$year <- as.factor(pitches$year)
pitches$field <- as.factor(pitches$field)
pitches$count <- as.factor(pitches$count)
pitches$uhp_id <- as.factor(pitches$uhp_id )
pitches$c_id <- as.factor(pitches$c_id)
#pitches$b_id <- as.factor(pitches$b_id)
#pitches$p_id <- as.factor(pitches$p_id)
model <- cs_p ~ year+field+count+c_id+uhp_id
outcome <- gpuGlm(model,data=pitches,weights=n,family=binomial(link="logit"))
outcome
summary(outcome)
AIC(outcome)
q("no")
|
284acf0a0e4fcb3dd686428029e2171f4200d607
|
2a2b4bcf198c0a93bd5bc95c65959b1340a8f033
|
/08_Final_Applied_Algorithm.R
|
480066efe34d60d3ac5ea43db4d131063eb2448b
|
[] |
no_license
|
AdrienHdz/SUMO_Travel_time_estimation
|
b5e94f970ae5a392b0e6c42b8223f56ad40ba865
|
4a27033046fab71b85ffd51850294a7235b00c9c
|
refs/heads/master
| 2023-01-24T03:01:48.760962
| 2020-11-28T20:54:06
| 2020-11-28T20:54:06
| 287,548,514
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,938
|
r
|
08_Final_Applied_Algorithm.R
|
# Created by Adrien Hernandez
#install.packages("devtools")
#devtools::install_github("melmasri/traveltimeHMM")
devtools::install_github("AdrienHdz/traveltimeCLT")
library(traveltimeHMM)
library(traveltimeCLT)
library(dplyr)
library(tidyr)
library(data.table)
# Loading data
Sumo_data <- read.csv("Quebec_data_2/Sumo_data.csv")
Sumo_data$speed <- exp(Sumo_data$logspeed)
# Transforming variables
Sumo_data <- as.data.table(Sumo_data)
Sumo_data$timeBins <- as.character(Sumo_data$timeBins)
# Creating test trips
set.seed(2020)
test.trips.Sumo <- create_test_trips(M = 500, Sumo_data, min.n = 1)
# Splitting data into train and test set
test = Sumo_data[trip %in% test.trips.Sumo]
train = Sumo_data[!trip %in% test.trips.Sumo]
# Counting the the number of trips in each set
print(paste0("Number of trips inside the test set: ", test[, 1, trip][, sum(V1)]))
print(paste0("Number of trips inside the train set: ", train[, 1, trip][, sum(V1)]))
print(paste0("Number of trips in total: ", test[, 1, trip][, sum(V1)] + train[, 1, trip][, sum(V1)]))
# Setting up the rules for our dataset
myrules = list(
list(start='6:30', end= '9:00', days = 0:6, tag='MR'),
list(start='15:00', end= '18:00', days = 0:6, tag='ER')
#list(start='7:00', end= '10:00', days = 0:6, tag='MR'),
#list(start='12:00', end='15:00', days = 0:6, tag='NR'),
#list(start='17:00', end= '20:00', days = 0:6, tag='ER')
)
mytimebins = c("MR", "ER", "Other")
# We run the travel time estimation method
ttCLTmodel <- traveltimeCLT(data.train = train,
M = 1000,
L = 2,
bin = "MR",
rules = myrules,
data.timebins = mytimebins)
ttCLTresults <- predict_traveltimeCLT(ttCLTmodel,
test,
"MR",
myrules)
|
b659a56af731e78188a4178c2cd989c60ea1a931
|
9df69dbe58ff3a85b9e62b3249b36b3bdc3f828e
|
/P556HW4/Prob2-KNN_Helper-CalcDist-InfNorm.R
|
39f1e93a44a384040e9f3560415ab88703586cf7
|
[] |
no_license
|
chuckjia/P556-AppliedML-Fall2017
|
e14ab07d56b75afd70cebb017e62e762e8ce584e
|
5f5fe35363dc86bccbd48b4edcf91a87b9d0a0ff
|
refs/heads/master
| 2021-09-27T22:09:51.151350
| 2018-11-12T05:10:38
| 2018-11-12T05:10:38
| 112,528,741
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 80
|
r
|
Prob2-KNN_Helper-CalcDist-InfNorm.R
|
allDist <- apply(scale(tr_feat, center = as.numeric(dataPt), scale = F), 1, max)
|
4336c797f3a2b1ba20ba95c6db6bf863c4f935d6
|
11084e0b1c39feca6d647deebe3baebf92e043aa
|
/man/shypFun.01310.Rd
|
3fe808d088c9728ad10a295b68dd456fddc8a170
|
[] |
no_license
|
cran/spsh
|
c2b89af235dfad8556f3057ecda088c09895b774
|
2139fe8faaa791eebad1b1bc1eba6bf0949fcd47
|
refs/heads/master
| 2020-12-22T19:05:06.246628
| 2020-04-06T15:30:02
| 2020-04-06T15:30:02
| 236,901,609
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,473
|
rd
|
shypFun.01310.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shypFun.01310.R
\name{shypFun.01310}
\alias{shypFun.01310}
\title{van Genuchten-Mualem trimodal Soil Hydraulic Propterty Model}
\usage{
shypFun.01310(p, h)
}
\arguments{
\item{p}{vector of the 9 van Genuchten-Mualem model parameters, order is sensitve and has to be given as:
\tabular{lll}{
\code{thr}\tab{residual water water content [cm cm-3]}\cr
\code{ths}\tab{saturated water water content [cm cm-3]}\cr
\code{alf1}\tab{van Genuchten alpha [cm-3]}\cr
\code{n1}\tab{van Genuchten n [-]}\cr
\code{w1}\tab{fraction of the first modality [-], \code{w2} is internally computed as \code{w2 = 1-w1}}\cr
\code{alf2}\tab{van Genuchten alpha of the second modality[cm-3]}\cr
\code{n2}\tab{van Genuchten n of the second modality [-]}\cr
\code{w2}\tab{fraction of the second modality [-], \code{w3} is internally computed as \code{w3 = 1-w1-w2}, in \code{resFun} ensures \code{w3 >=0} }\cr
\code{alf3}\tab{van Genuchten alpha of the third modality[cm-3]}\cr
\code{n3}\tab{van Genuchten n of the third modality [-]}\cr
\code{Ks}\tab{saturated conductivity [cm d-1]}\cr
\code{tau}\tab{exponent of \code{Se} in the capillary conductivity model, sometimes denoted in the literature as \code{l} [-]}
}}
\item{h}{pressure heads [cm] for which the corresponding retention and conductivity values are calculated.}
}
\value{
returns a \code{list} with calculations at specified \code{h}:
\item{theta}{calculated volumetric moisture content}
\item{Se}{calculated saturation}
\item{cap}{specific water capacity function}
\item{psd}{pore size distribution}
\item{Kh}{Hydraulic conductivity values}
}
\description{
trimodal van Genuchten-Mualem functions for the retention and hydraulic conductivity curves \insertCite{Durner.1994}{spsh}.
}
\details{
The function solves analytically the spec. water capacity function and integral to the capillary bundle model.
}
\examples{
p <- c("thr" = 0.1, "ths" = 0.4, alf1 = .5, "n1" = 3,
"w1" = .5, "alf2" = 0.01, "n2" = 2,
"w2" = .3, "alf3" = 0.01, "n3" = 1.6,
"Ks" = 100, "tau" = .5)
h <- 10^seq(-2, 6.8, length = 197)
shyp.L <- shypFun.01310(p, h)
}
\references{
\insertRef{Durner.1994}{spsh}\cr
}
\seealso{
\insertRef{Weber.2019}{spsh}\cr
\insertRef{Weber.2017a}{spsh}\cr
\insertRef{Weber.2017b}{spsh}\cr
}
\author{
Tobias KD Weber , \email{tobias.weber@uni-hohenheim.de}
}
|
7eafc8c0fbf209ca6485c0db222dc9f6302cecac
|
73dec85ac61fa6d651b1cdad804f01f79c5047f0
|
/Server-ShinyDashboard-OKCupid.R
|
48dcbb6826e97a13ea2bd6e537bb7f449fcd50df
|
[] |
no_license
|
dmassihp/StatCodeEx
|
c8e6c30257ba5e23b47740f5ae7fa48ad4ef2220
|
2fa391130fe682be047ada4ab8ac4a06e89a2b36
|
refs/heads/master
| 2021-10-09T21:54:38.433471
| 2019-01-03T22:25:22
| 2019-01-03T22:25:22
| 118,522,473
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,419
|
r
|
Server-ShinyDashboard-OKCupid.R
|
library(shiny)
library(ggplot2)
library(wordcloud)
library(tm)
library(SnowballC)
shinyServer(function(input, output) {
webapp_theme <- function(){
theme(
plot.title = element_text(size = 18),
axis.text = element_text(size=12),
axis.title = element_text(size=14),
legend.title = element_text(size=14),
legend.text = element_text(size=14)
)
}
okcupid <- read.csv("profiles.csv", header = T)
##Jun---------------------------------------------------------------------------------------------
okcupid2 <- okcupid
okcupid2$diet <- as.character(okcupid2$diet)
okcupid2$religion <- as.character(okcupid2$religion)
okcupid2$offspring <- as.character(okcupid2$offspring)
religion <- c("agnosticism", "atheism", "buddhism",
"catholicism", "christianity", "hinduism",
"islam", "judaism", "other")
religion_caps <- c("Agnosticism", "Atheism", "Buddhism",
"Catholicism", "Christianity", "Hinduism",
"Islam", "Judaism", "Other")
for (i in 1:9)
{
curr_religion <- grepl(pattern = religion[i], x = okcupid2$religion)
okcupid2$religion[curr_religion] <- religion_caps[i]
}
generaldiet <- okcupid2
generaldiet <- generaldiet[!(is.na(generaldiet$diet) |
generaldiet$diet == ""), ]
diet <- c("anything", "halal", "kosher",
"vegetarian", "vegan", "other")
diet_caps <- c("Anything", "Halal", "Kosher",
"Vegetarian", "Vegan", "Other")
for (i in 1:6)
{
curr_diet <- grepl(pattern = diet[i], x = generaldiet$diet)
generaldiet$diet[curr_diet] <- diet_caps[i]
}
vegetarian <- subset(okcupid2,
diet == "strictly vegetarian" |
diet == "mostly vegetarian" |
diet == "vegetarian")
vegetarian <- vegetarian[!(is.na(vegetarian$religion) |
vegetarian$religion==""), ]
no_kids <- grepl(pattern = "doesn’t", x = vegetarian$offspring)
vegetarian$offspring[no_kids] = "Doesn't Have Children"
one_kid <- grepl(pattern = "has a kid", x = vegetarian$offspring)
vegetarian$offspring[one_kid] = "Has a Child"
multiple_kids <- grepl(pattern = "has kids", x = vegetarian$offspring)
vegetarian$offspring[multiple_kids] = "Has Children"
vegetarian <- subset(vegetarian,
offspring %in% c("Doesn't Have Children",
"Has a Child",
"Has Children"))
output$religion_plot <- renderPlot({
ggplot(vegetarian) +
geom_bar(aes(x = religion), fill = "lightgreen") +
ggtitle("Number of Vegetarians Based on Religion") +
labs(x = "Religion", y = "Number of Vegetarians")
})
output$age <- renderPlot({
if (input$age_input == 1)
age_plot <- ggplot(vegetarian) +
geom_density(aes(x = age)) + xlim(0, 80) +
ggtitle("Age Distribution of Vegetarians") +
labs(x = "Age", y = "Density") +
scale_colour_discrete(name = "")
else if (input$age_input == 2) {
age_plot <- ggplot(vegetarian) +
geom_density(aes(x = age, colour = offspring)) +
xlim(0, 80) +
ggtitle("Age Distribution of Vegetarians based on Offspring") +
labs(x = "Age", y = "Density") +
theme(legend.position = c(.8, .8),
legend.title=element_blank())
}
else {
age_plot <- ggplot(generaldiet) +
geom_density(aes(x = age, colour = diet)) +
xlim(0, 80) +
ggtitle("Age Distribution based on Diet") +
labs(x = "Age", y = "Density") +
theme(legend.position = c(.8, .8),
legend.title=element_blank())
}
age_plot
})
##Jim---------------------------------------------------------------------------------------------
profiles <- okcupid
height_weight <- data.frame(
diet = profiles$diet,
height = profiles$height,
body_type = profiles$body_type,
is_vegetarian = TRUE
)
height_weight$is_vegetarian[profiles$diet != "strictly vegetarian" &
profiles$diet != "mostly vegetarian" &
profiles$diet != "vegetarian" &
profiles$diet != "vegan" &
profiles$diet != "mostly vegan" &
profiles$diet != "strictly vegan"] = FALSE
height_weight <- height_weight[profiles$diet != "",]
profiles <- profiles[profiles$diet == "strictly vegetarian" |
profiles$diet == "mostly vegetarian" |
profiles$diet == "vegetarian",]
profiles_non_v <- profiles[profiles$diet != "strictly vegetarian" &
profiles$diet != "mostly vegetarian" &
profiles$diet != "vegetarian" &
profiles$diet != "vegan" &
profiles$diet != "mostly vegan" &
profiles$diet != "strictly vegan" &
profiles$diet != "",]
smoke_drink <- data.frame(
smokes = profiles$smokes,
drinks = profiles$drinks
)
smoke_drink <- smoke_drink[levels(smoke_drink$smokes)[smoke_drink$smokes] != "" &
levels(smoke_drink$drinks)[smoke_drink$drinks] != "",]
smoke_drink$smokes[smoke_drink$smokes != 'no'] <- 'yes'
order_var <- vector(length = length(smoke_drink$drinks))
order_var[smoke_drink$drinks == 'not at all'] <- 1
order_var[smoke_drink$drinks == 'rarely'] <- 2
order_var[smoke_drink$drinks == 'socially'] <- 3
order_var[smoke_drink$drinks == 'often'] <- 4
order_var[smoke_drink$drinks == 'very often'] <- 5
order_var[smoke_drink$drinks == 'desperately'] <- 6
output$smoke_drink_plot_1 <- renderPlot({
smoke_drink_hist <- ggplot(data = smoke_drink) + aes(x = reorder(drinks, order_var), fill = smokes) +
geom_bar(position = "dodge") +
labs(title = "Histogram of Smoking Habit Given Drinking Habit", x="Drinking Habit")
if(input$sqrt_scale) {
smoke_drink_hist <- ggplot(data = smoke_drink) + aes(x = reorder(drinks, order_var), fill = smokes) +
geom_bar(position = "dodge") +
labs(title = "Histogram of Smoking Habit Given Drinking Habit (Sqrt Scale)", x="Drinking Habit") +
coord_trans(y = "sqrt")
}
smoke_drink_hist + webapp_theme()
})
smoke_drink_tab <- table(smoke_drink$smokes, smoke_drink$drinks)
smoke_drink_tab <- smoke_drink_tab[c(2, 6), 2:7]
smoke_drink_proportion <- smoke_drink_tab[2,] / (smoke_drink_tab[1,] + smoke_drink_tab[2,])
output$smoke_drink_plot_2 <- renderPlot({
ggplot() + aes(x = reorder(c("desperately", "not at all", "often", "rarely", "socially", "very often"),c(6, 1, 4, 2, 3, 5)), y = smoke_drink_proportion) +
geom_bar(stat = "identity", fill = "#408040") +
labs(title = "Proportion of Smokers Given Drinking Habit", x = "Drinking Habit", y = "Proportion") +
webapp_theme()
})
output$smoke_drink_plot_3 <- renderPlot({
mosaicplot(smoke_drink_tab[,c(2,4,5,3,6,1)], shade = T, las=1, main="Association Between Drinking and Smoking Habits", xlab="Smoke", ylab="Drink", cex=1.25)
})
output$height_weight_plot_1 <- renderPlot({
if(input$show_stats){
ggplot(data = height_weight) + aes(x = is_vegetarian, y = height) +
geom_boxplot(fill = c("#da9826", "#408040")) +
labs(title="Height Distributions of Vegetarians vs Non-vegetarians", x="Vegetarian", y="Inches") +
webapp_theme() +
geom_text(size=6,aes(x = 1, y = 25, label=paste("Median:", median(height_weight$height[height_weight$is_vegetarian == FALSE])))) +
geom_text(size=6,aes(x = 1, y = 21, label=paste("Mean:", round(mean(height_weight$height[height_weight$is_vegetarian == FALSE]), digits=1)))) +
geom_text(size=6,aes(x = 2, y = 25, label=paste("Median:", median(height_weight$height[height_weight$is_vegetarian == TRUE])))) +
geom_text(size=6,aes(x = 2, y = 21, label=paste("Mean:", round(mean(height_weight$height[height_weight$is_vegetarian == TRUE]), digits=1))))
}
else{
ggplot(data = height_weight) + aes(x = is_vegetarian, y = height) +
geom_boxplot(fill = c("#da9826", "#408040")) +
labs(title="Height Distributions of Vegetarians vs Non-vegetarians", x="Vegetarian", y="Inches") +
webapp_theme()
}
})
height_weight <- height_weight[height_weight$body_type != "" &
height_weight$body_type != "rather not say",]
body_veg_tab <- table(height_weight$is_vegetarian, height_weight$body_type)
body_veg_tab <- body_veg_tab[,c(-1,-10)]
body_veg_proportion <- body_veg_tab[2,] / (body_veg_tab[1,] + body_veg_tab[2,])
output$height_weight_plot_2 <- renderPlot({
if(input$prop_veg){
ggplot() + aes(x = colnames(body_veg_tab), y = body_veg_proportion) +
geom_bar(stat = "identity", fill = "#408040") +
labs(title = "Proportion of Vegetarians Given Body Type", x = "Body Type", y = "Proportion") +
webapp_theme() +
theme(
axis.text = element_text(size=10)
)
}
else{
ggplot(data = height_weight) + aes(x = body_type, fill = is_vegetarian) +
geom_bar(position = "dodge") +
labs(title="Distribution of Body Type and Diet", x="Body Type") +
webapp_theme() +
scale_fill_manual("Vegetarian", values = c("#da9826", "#408040")) +
theme(
axis.text = element_text(size=10)
)
}
})
##Jennifer----------------------------------------------------------------------------------------
okcupid_all <- okcupid
okcupid_veg <- subset(okcupid, grepl("vegetarian", diet))
essay <- function(essay_num, gender) {
okcupid_gender <- subset(okcupid_veg, sex==gender)
if(essay_num == "4") {
essay <- Corpus(VectorSource(okcupid_gender$essay4))
essay <- lapply(essay, function(x)
gsub("book|movie|show|music|food|favorite", " ", x))
}
if(essay_num == "5") {
essay <- Corpus(VectorSource(okcupid_gender$essay5))
}
plain_essay <- lapply(essay, function(x)
gsub("ilink|<br />|href|class|amp|nbsp", " ", x))
plain_essay <- Corpus(VectorSource(plain_essay))
plain_essay <- tm_map(plain_essay, PlainTextDocument)
plain_essay <- tm_map(plain_essay, removeWords, stopwords('english'))
plain_essay <- tm_map(plain_essay, removePunctuation)
plain_essay <- tm_map(plain_essay, stemDocument)
return(plain_essay)
}
#preload word cloud data
essay_4_m <- essay("4", "m")
essay_4_f <- essay("4", "f")
essay_5_m <- essay("5", "m")
essay_5_f <- essay("5", "f")
output$body_type_plot <- renderPlot({
okcupid <- okcupid[okcupid$diet != "",]
okcupid <- okcupid[okcupid$body_type != "",]
okcupid$gen_diet <- ifelse(okcupid$diet == "vegetarian" |
okcupid$diet == "mostly vegetarian" |
okcupid$diet == "strictly vegetarian", "vegetarian",
ifelse(okcupid$diet == "vegan" |
okcupid$diet == "mostly vegan" |
okcupid$diet == "strictly vegan", "vegan", "other"))
okcupid$specific_veg_diet <- ifelse(okcupid$diet == "vegetarian", "vegetarian",
ifelse(okcupid$diet == "strictly vegetarian", "strictly vegetarian",
ifelse(okcupid$diet == "mostly vegetarian", "mostly vegetarian", "other")))
if (input$diet_type == "All Diets") {
p <- ggplot(okcupid, (aes(x=okcupid$diet, fill=okcupid$body_type))) +
geom_bar(position="fill") +
ggtitle("Body Type Proportions by Diet") +
labs(x = "Diet", y = "Proportion of Users") +
theme(axis.text.x = element_text(angle=90, hjust=1)) +
scale_fill_discrete(name="Body Type")
}
if (input$diet_type == "Diets Generalized") {
p <- ggplot(okcupid, (aes(x=okcupid$gen_diet,fill=okcupid$body_type))) +
geom_bar(position="fill") +
ggtitle("Body Type Proportions by Diet") +
labs(x = "Diet", y = "Proportion of Users") +
theme(axis.text.x = element_text(angle=90, hjust=1)) +
scale_fill_discrete(name="Body Type")
}
if (input$diet_type == "Vegetarian Diets and Other") {
p <- ggplot(okcupid, (aes(x=okcupid$specific_veg_diet,
fill=okcupid$body_type))) +
geom_bar(position="fill") +
ggtitle("Body Type Proportions by Diet") +
labs(x = "Diet", y = "Proportion of Users") +
theme(axis.text.x = element_text(angle=90, hjust=1)) +
scale_fill_discrete(name="Body Type")
}
print(p)
})
output$word_cloud_4 <- renderPlot({
if (input$gender == "Males") {
wordcloud(essay_4_m, max.words = 100, random.order = FALSE,
colors = brewer.pal(8,"Dark2"))
}
if (input$gender == "Females") {
wordcloud(essay_4_f, max.words = 100, random.order = FALSE,
colors = brewer.pal(8,"Dark2"))
}
})
output$word_cloud_5 <- renderPlot({
if (input$gend == "Males") {
wordcloud(essay_5_m, max.words = 100, random.order = FALSE,
colors = brewer.pal(8,"Dark2"))
}
if (input$gend == "Females") {
wordcloud(essay_5_f, max.words = 100, random.order = FALSE,
colors = brewer.pal(8,"Dark2"))
}
})
##Dorsa--------------------------------------------------------------------------------------------
profiles <- okcupid
profiles$age <- as.numeric(as.character(profiles$age))
profiles.subset <- subset(profiles, (diet == "mostly vegetarian" | diet == "vegetarian" | diet == "strictly vegetarian"))
profiles.subset.nonveg <- subset(profiles, diet == "mostly anything" | diet == "strictly anything" | diet == "anything" | diet == "mostly vegan" | diet == "vegan" | diet == "strictly vegan" | diet == "mostly other" | diet == "strictly other" | diet == "other" | diet == "halal" | diet == "strictly halal" | diet == "mostly halal" | diet == "kosher" | diet == "mostly kosher" | diet == "strictly kosher")
# Combining pet categories for vegetarian dataset
dogs <- grepl(pattern = "(likes dogs)|(has dogs)", x = profiles.subset$pets)
cats <- grepl(pattern = "(likes cats)|(has cats)" , x = profiles.subset$pets)
both <- dogs & cats
neither <- !dogs & !cats
profiles.subset$pet_categories <- ifelse(both, "likes both",
ifelse(cats, "likes cats",
ifelse(dogs, "likes dogs", "likes neither")))
# Combining Pet categories : Same manipulation for nonvegetarian subset
dogs <- grepl(pattern = "(likes dogs)|(has dogs)", x = profiles.subset.nonveg$pets)
cats <- grepl(pattern = "(likes cats)|(has cats)" , x = profiles.subset.nonveg$pets)
both <- dogs & cats
neither <- !dogs & !cats
profiles.subset.nonveg$pet_categories <- ifelse(both, "likes both",
ifelse(cats, "likes cats",
ifelse(dogs, "likes dogs", "likes neither")))
# Combining non-vegetarian categories for profiles.subset.nonveg
vegan <- grepl(pattern = "(mostly vegan)|(vegan)|(strictly vegan)", x = profiles.subset.nonveg$diet)
other <- grepl(pattern = "(other)|(strictly other)|(mostly other)" , x = profiles.subset.nonveg$diet)
anything <- grepl(pattern = "(anything)|(strictly anything)|(mostly anything)" , x = profiles.subset.nonveg$diet)
halal <- grepl(pattern = "(halal)|(strictly halal)|(mostly halal)" , x = profiles.subset.nonveg$diet)
kosher <- grepl(pattern = "(kosher)|(strictly kosher)|(mostly kosher)" , x = profiles.subset.nonveg$diet)
profiles.subset.nonveg$diet_simple <- ifelse(vegan, "vegan",
ifelse(other, "other",
ifelse(halal, "halal",
ifelse(kosher, "kosher", "anything"))))
library(gridExtra)
output$bar_ageorsex <- renderPlot({
p <- ggplot(data = profiles.subset, aes(x = diet)) +
geom_bar(position = position_dodge()) +
aes(fill = sex) +
ggtitle("Bar Chart of Gender Given Vegetarian") +
labs(x = "Diet", y = "Count") +
scale_fill_manual(values = c("purple", "green4"), guide = guide_legend(title = "Gender"))
q <- ggplot(data = profiles.subset.nonveg, aes(x = diet_simple)) +
geom_bar(position = position_dodge()) +
aes(fill = sex) +
ggtitle("Bar Chart of Gender Given Non Vegetarians") +
labs(x = "Diet", y = "Count") +
scale_fill_manual(values = c("purple", "green4"), guide = guide_legend(title = "Gender"))
r <- (ggplot(profiles.subset) + aes(x = age) +
geom_density() +
ggtitle("Conditional Distribution of Age Given Vegetarian") +
labs(x = "Age", y = "Density"))
s <- (ggplot(profiles.subset.nonveg) + aes(x = age) + geom_density() +
ggtitle("Conditional Distribution of Age Given Non-Vegetarian") +
labs(x = "Age", y = "Density"))
if (input$sex == "Sex") {
grid.arrange(p, q, ncol = 2)
}
else {
grid.arrange(r, s, ncol = 2)
}
})
output$pets_plot <- renderPlot({
pets <- ggplot(data = profiles.subset,
aes(x = pet_categories)) +
geom_bar() + aes(fill = diet) +
ggtitle("Bar Chart of Pet Preferences of Vegetarians") +
labs(x = "Pet Preferences", y = "Count") +
scale_fill_manual(values = c("purple", "green4", "deepskyblue"), guide = guide_legend(title = "diet"))
pets.nonveg <- ggplot(data = profiles.subset.nonveg,
aes(x = pet_categories)) +
geom_bar() + aes(fill = diet_simple) +
ggtitle("Bar Chart of Pet Preferences of Vegetarians") +
labs(x = "Pet Preferences", y = "Count") +
scale_fill_manual(values = c("purple", "green4", "deepskyblue",
"yellow", "tan1"), guide = guide_legend(title = "Diet"))
if (input$pets == "Vegetarians") {
print(pets)
}
else {
print(pets.nonveg)
}
})
})
|
79b917a042c1e1a26f507c12575a19004dcc4db8
|
17c7b99185eb37422435fb0342a074302fc08ac7
|
/1/1/T_1.R
|
5aa5727e58304cd428437f0440127b8020589484
|
[] |
no_license
|
galikeeva/ML_course
|
24d8408f3e3d32f4c9e6d4387b7e8e071ff572ff
|
b8bba5e2058a3079e8af4a2b8e586bd29b1fbff0
|
refs/heads/main
| 2023-04-21T03:57:12.618664
| 2021-04-27T18:41:38
| 2021-04-27T18:41:38
| 362,211,531
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,891
|
r
|
T_1.R
|
data <- read.table("test_sample.csv",header=T)
View(data)
Y <- data[,1]
data$Y = NULL
View(data[,1:2])
X <- data
rm(data)
rSquared<-sapply(1:491,function(z) summary(lm(Y~.,data=data.frame(Y,X[,1:z])))$r.squared)
matplot(1:491,rSquared.10,type="l",lty=1,lwd=2,col = "black",
main="OriginalR2",
xlab="Number of Predictors",ylab="Determination Coefficient")
legend("bottomright",legend="Original",lty=1,lwd=2,col="black")
matplot(1:491,rSquared,type="l",lty=1,lwd=2,col = "black",
main="OriginalR2",
xlab="Number of Predictors",ylab="Determination Coefficient")
legend("bottomright",legend="Original",lty=1,lwd=2,col="black")
View(rSquared)
indices = which(rSquared > 0.9)
View(rSquared[311:312])
N.orig <- 312
pca.x <- princomp(X)
plot(pca.x)
suppressWarnings(library(relaimpo))
factorLoading = pca.x$loadings
factorScores = pca.x$scores
factors10Data<-data.frame(Y,factorScores)
m_all.PCA<-lm(Y~.,data=factors10Data)
metrics.PCA <- calc.relimp(m_all.PCA, type = c("lmg", "first", "last","betasq", "pratt"))
metrics.PCA <- calc.relimp(m_all.PCA, type = "lmg")
metrics.PCA <- calc.relimp(m_all.PCA, type = "first")
(first.PCA.rank<-metrics.PCA@first.rank)
orderedFactors<-factorScores[,order(first.PCA.rank)]
orderedPCA.R2<-sapply(1:491,function(z) summary(lm(Y~.,data=data.frame(Y,orderedFactors[,1:z])))$r.squared)
matplot(1:491,cbind(rSquared,orderedPCA.R2),type="l",lty=1,lwd=2,col=c("black","red"),
main="Improvement of Fit with Number of Predictors",
xlab="Number of Predictors",ylab="Determination Coefficient")
legend("bottomright",legend=c("Original","PCA"),lty=1,lwd=2,col=c("black","red"))
r2pca <- which(orederPCA.R2 > 0.9)
r2pca <- which(orederedPCA.R2 > 0.9)
r2pca <- which(orderedPCA.R2 > 0.9)
orderedPCA.R2[143:144]
N.Pca <- 144
(res <- N.orig - N.Pca)
(det_coef <- orderedPCA.R2[144])
|
18af6e36dcfd3de3b488768af64c4e52068f3daf
|
1b1241229f6386662bef24be67ca94e0ac8f7ca5
|
/R/br_contribReadin2002.R
|
1293a28fd5fcb52e93cd5b95e50a7800d9bc3bcc
|
[] |
no_license
|
rafaeldjacome/CongressoAberto
|
c928815bef71008ffadefc7d2ea1d07fd75129a1
|
a4785785cb37e8095893dc411f0a030a57fd30f8
|
refs/heads/master
| 2020-03-18T00:33:50.482718
| 2010-04-14T14:46:51
| 2010-04-14T14:46:51
| null | 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 6,705
|
r
|
br_contribReadin2002.R
|
#### To do:
#### Deal with paths
#### Export output table to mySQL
####
#### Reads campaign contributions data obtained by Leoni
#### Uses only a subset of "Deputados Federais", but could run for all
#### 1-Cleans donor and candidate names
#### 2-Classifies type of donor based on CPF/CGC in PJ,PF
#### 3-Finds invalid CPF/CGC that can be internally corrected
#### invalid CPF/CGCs and "other" types of contributions are coded NAs in the cpfcgc column
#### 4-Assembles list of unique donors
#### 5-Redoes donortype classification:
#### 6.1 Separates (roughly) "other" type of donation from "invalid" CPF/CGC entreies,
#### 6.2 Identifies party donations
#### so type is PF,PF,Other,PP,NA
#### 6-Replaces all variations of names with the most comon name for each CPF
#### Note that when there are 2 variations, the algorithms selects the "first"
#### The first variation, in thsese cases, might be the misspelled one. There is no real consequence
#### but it might be worth correcting this in the future (from the CPF/CGC online database?)
####
#### Inputs:
#### contrib2002.Rdta: just the raw data for all offices, read into R format
####
#### Outputs:
#### br_donorsunique2002fd.Rdta: unique donors for the 2002 federal deputy campaigns
#### br_donorsvariations2002fd.Rdta: list with the same lenght as the above, with all the name variations for each donor
#### br_contrib2002fd.csv:: donation data with corrected names and cpf/cgcs for fed. deptuty 2002
#### to read these data use read.csv("br_contrib2002fd.csv",as.is=TRUE)
####
####
rm(list=ls(all=TRUE))
run.from <-"C:/reps/CongressoAberto/DATA/CampaignContributions"
setwd(run.from)
load("contrib2002.Rdta")
d <- d[d$office=="Deputado Federal",]
d$year <- 2002
clean.text<-function(x){
y<-toupper(x)
y<-gsub("Â","A", y)
y<-gsub("Á","A", y)
y<-gsub("Ã","A", y)
y<-gsub("É","E", y)
y<-gsub("Ê","E", y)
y<-gsub("Í","I", y)
y<-gsub("Ó","O", y)
y<-gsub("Ô","O", y)
y<-gsub("Õ","O", y)
y<-gsub("Ú","U", y)
y<-gsub("Ü","U", y)
y<-gsub("Ç","C", y)
y<-gsub("*","", y, fixed=TRUE)
y<-gsub("'"," ", y)
y<-gsub("."," ", y, fixed=TRUE)
y<-gsub("-"," ", y, fixed=TRUE)
y<-gsub("/","", y, fixed=TRUE)
y<-gsub(" "," ", y)
return(y)
}
d$name <- clean.text(d$name)
d$donorname <- clean.text(d$donorname) #clean donornames
d$donortype <- ifelse(is.element(d$cpfcgc,c("00000000000","00000000000000")),NA, #what out for invalids with 11 and 14 chars.
ifelse(nchar(d$cpfcgc)==11,"PF",
ifelse(nchar(d$cpfcgc)==14,"PJ",NA)))
d$cpfcgc[is.na(d$donortype)]<-NA #Use NA for invalid or no CPF-CGC
#This category includes OTHER and INVALID
#Check for cases with "invalid" CPF/CGCs that also appear with valid CPF/CGCs:
#This is done before name replacement to make use of different spellings!!!!
unique.invalid <- unique(d[is.na(d$cpfcgc),"donorname"]) #invalid cpfcgc
cases.invalid <- nrow(d[is.na(d$cpfcgc),]) #just for on screen reporting
cat("Found",length(unique.invalid),"names/",sum(is.na(d$cpfcgc)),"cases, with invalid CPF/CGC\n")
unique.valid <- unique(d[is.na(d$cpfcgc)==FALSE,"donorname"]) #valid cpfcgc
unique.invalid.matched <- unique.invalid[is.element(unique.invalid,unique.valid)] #which are matched
for(i in unique.invalid.matched){
d$cpfcgc[which(is.na(d$cpfcgc)&d$donorname==i)] <- #replace missing CPFCGCs for matched name
names(sort(table(d$cpfcgc[d$donorname==i]))[1]) #with most comon CPFCGC for that name
}
unique.invalid <- unique(d[is.na(d$cpfcgc),"donorname"]) #invalid cpfcgc after corrections
cat("\t",length(unique.invalid.matched),"names/",cases.invalid-nrow(d[is.na(d$cpfcgc),]),"cases with invalid CPF/CGC were corrected\n")
#cat("\t",length(unique.other),"'other' types of donor were identified\n")
#cat("\t",length(unique.invalid)-length(unique.other),"names/",sum(is.na(d$donortype)),"cases, with invalid CPF/CGC remain\n")
#cat("\t Invalid CPF/CGC correspond to",round(sum(d[is.na(d$donortype),"donation"])/sum(d$donation)*100,2),"% of total donations\n")
#Assemble unique donors ###############################################
unique.donors <- data.frame(donor=NA,
cpfcgc=as.character(na.omit(unique(d$cpfcgc))), #drop NA's
variations=NA,
e2002=TRUE) #create dataframe to store unique donor info
variations <- list() #create object to store different spellings
for(i in 1:nrow(unique.donors)){
donors <- sort(table(as.character(d$donorname[d$cpfcgc==unique.donors$cpfcgc[i]])),decreasing=TRUE)#find all name variations for a given cpfcgc
unique.donors$donor[i] <- names(donors)[1] #use most comon name variation
unique.donors$variations[i] <- length(donors) #take note of number of different spellings
variations[[i]] <- names(donors) #store, in a separate object, all different spellings
if(round(i/500)-i/500==0){cat("Done with first",i,"unique donors\n")} #report advances periodically
flush.console()
}
write.csv(unique.donors,file="br_donorsunique2002fd.csv",row.names=FALSE)
save(variations,file="br_donorsvariations2002fd.Rdta")
#Redo "donortype" classification
d$donortype <- ifelse(is.element(d$cpfcgc,c("00000000000","00000000000000")),NA, #what out for invalids with 11 and 14 chars.
ifelse(nchar(d$cpfcgc)==11,"PF",
ifelse(nchar(d$cpfcgc)==14,"PJ",NA)))
party.donors <- union(union(union(union( #Classify party donations are classified as such
grep("COMITE",d$donorname),
grep("DIRETORIO",d$donorname)),
grep("PARTIDO",d$donorname)),
grep("CANDIDATO",d$donorname)),
grep("DIRECAO",d$donorname))
d$donortype[party.donors]<-"PP"
other <- union(grep("EVENTOS",d$donorname),grep("APLICACOES",d$donorname))#Separate INVALIDS from OTHER sources #
d$donortype[other] <- "Other" #change donortype from NA to "OTHER"
### Standarize: One name for each CPF.
d$donor<- d$donorname #create new name field, typically the same as old names
for(i in which(unique.donors$variations>1)){ #replace newnames of cases with variations with the most comon name
d$donor[which(as.character(d$cpfcgc)==as.character(unique.donors$cpfcgc[i]))] <- as.character(unique.donors$donor[i])
}
write.csv(d,file="br_contrib2002fd.csv",row.names=FALSE)
#dd<-read.csv("br_contrib2002fd.csv",as.is=TRUE)
|
9075f62c837b448d0770f3401515fb3aa632eb2c
|
39e16bc6a2054d010e5d046a4733bc9b16ef40f9
|
/R/tf_map.R
|
86818c73313a8ba26cc85e972c73befc9657766a
|
[] |
no_license
|
cran/tfautograph
|
9ce2c748bc2abac3835cebb298ea036c00bdd835
|
3e07c23bc4a1f52f6fed3e71734736d5fabf99de
|
refs/heads/master
| 2021-09-29T22:19:59.516819
| 2021-09-17T19:30:02
| 2021-09-17T19:30:02
| 236,950,490
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,440
|
r
|
tf_map.R
|
#' `tf.map_fn()`
#'
#' @description Thin wrapper around `tf.map_fn()` with the following
#' differences:
#'
#' + accepts `purrr` style `~` lambda syntax to define function `fn`.
#'
#' + The order of `elems` and `fn` is switched to make it more pipe `%>%`
#' friendly and consistent with R mappers `lapply()` and `purrr::map()`.
#'
#' @param elems A tensor or (possibly nested) sequence of tensors, each of which
#' will be unpacked along their first dimension. The nested sequence of the
#' resulting slices will be applied to `fn`.
#' @param fn An R function, specified using `purrr` style ~ syntax, a character
#' string, a python function (or more generally, any python object with a
#' `__call__` method) or anything coercible via `as.function()`. The function
#' will be be called with one argument, which will have the same (possibly
#' nested) structure as `elems`. Its output must return the same structure as
#' `dtype` if one is provided, otherwise it must return the same structure as
#' `elems`.
#' @param dtype (optional) The output type(s) of fn. If fn returns a structure
#' of Tensors differing from the structure of elems, then dtype is not
#' optional and must have the same structure as the output of fn.
#' @param parallel_iterations (optional) The number of iterations allowed to
#' run in parallel. When graph building, the default value is 10. While
#' executing eagerly, the default value is set to 1.
#' @param back_prop (optional) True enables support for back propagation.
#' @param swap_memory (optional) True enables GPU-CPU memory swapping.
#' @param infer_shape (optional) False disables tests for consistent output
#' shapes.
#' @param name (optional) Name prefix for the returned tensors.
#'
#' @return A tensor or (possibly nested) sequence of tensors. Each tensor packs
#' the results of applying fn to tensors unpacked from elems along the first
#' dimension, from first to last.
#' @export
tf_map <- function(elems, fn,
dtype = NULL,
parallel_iterations = NULL,
back_prop = TRUE,
swap_memory = FALSE,
infer_shape = TRUE,
name = NULL) {
if (inherits(fn, "formula")) {
# compat purrr::as_mapper() but without `.y` and a positional first match
if (length(fn) > 2L)
stop("Left hand side in `~`` not allowed")
fn_body <- fn[[2L]]
# replace all `.` symbols with `.x`. More robust than having multiple
# symbols in the fn formals, because it allows you to assign to one and
# return the other
fn_body <- eval(substitute(substitute(fn_body, alist(. = .x))))
fn <- as.function(c(alist(.x = ), fn_body), envir = environment(fn))
} else if (!inherits(fn, "python.builtin.object"))
fn <- as.function(fn, envir = parent.frame())
if(is.double(parallel_iterations))
storage.mode(parallel_iterations) <- "integer"
tf$map_fn(
fn = fn,
elems = elems,
dtype = dtype,
parallel_iterations = parallel_iterations,
back_prop = back_prop,
swap_memory = swap_memory,
infer_shape = infer_shape,
name = name
)
}
# alternative names tf_map_along_rows tf_apply_rows tf_map_rows
#
# ? also integrate with listarrays?, autograph listarrays::map_along_rows? ...
# ag_map_along_rows()? can do modify_along_rows for doing infer_shape = FALSE?
# wrapper around tf$unstack() in eager mode?
|
71ccecb2379f0f605aba392ab50df34d302a8923
|
c3ed88a4aabbb507a059734a49e259f97867abe3
|
/R/data.R
|
d242cda330d16dfe3e42303b8fed2f47ce311929
|
[] |
no_license
|
RECETOX/Retip
|
53ce7efa2eba237a4b1c49b6e5179ad804290fd0
|
0835edd0858c85da362d839dbcea0315eb47e0d5
|
refs/heads/master
| 2023-03-24T09:44:36.210640
| 2021-03-18T12:33:40
| 2021-03-18T12:33:40
| 296,605,098
| 0
| 0
| null | 2021-03-18T12:33:41
| 2020-09-18T11:44:53
|
R
|
UTF-8
|
R
| false
| false
| 280
|
r
|
data.R
|
#' FiehnHilic positive dataset.
#'
#' @docType data
#'
#' @usage data(HILIC)
#'
#'
#' @keywords datasets Fiehn Lab Hilic
#'
#' @references you have to chose one reference
#'
#' @source \href{http://mona.fiehnlab.ucdavis.edu/}{QTL Archive}
#'
#' @examples
#' data(HILIC)
"HILIC"
|
8092c1f7e560dbfb5cacc264f70ed2a23251e0b3
|
c06ce7fc23add73e0a034c491c97e3cc4fd42cf8
|
/scriptLCdP_DDM.R
|
b04dba60a8dc38b5e4427493621014d189ef79e6
|
[] |
no_license
|
gamerino/DSwR-DDM
|
4113a8727759d5cecf09c9f621861a2971d4b11b
|
1c20d3b40d164829cd3beb28fb03e418ae274dc5
|
refs/heads/master
| 2020-06-29T16:41:12.964495
| 2019-08-05T02:48:11
| 2019-08-05T02:48:11
| 200,569,503
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,719
|
r
|
scriptLCdP_DDM.R
|
setwd("/myDirectory/DatosDeMiercoles/")
# install_packages("readr")
library(readr)
library(dplyr)
library(tidytext)
library(widyr)
library(ggpubr)
library(cowplot)
la_casa_de_papel <- readr::read_csv("https://raw.githubusercontent.com/cienciadedatos/datos-de-miercoles/master/datos/2019/2019-07-31/la_casa_de_papel.csv")
# obtener palabras por separado
la_casa_de_papel_subs <- la_casa_de_papel %>%
unnest_tokens(word,texto) %>%
anti_join(stop_words)
# definir los personajes, teniendo en cuenta los "búscados" luego de T2 (https://github.com/gamerino/DSwR-DDM/blob/master/LCDPPersonajes.png)
personajes<-c("profesor", "tokio", "rio", "denver", "helsinki", "nairobi",
"lisboa", "lisboa", "lisboa", "estocolmo", "estocolmo")
# teniendo en cuenta que raquel=inspectora=lisboa y que mónica=estocolmo
names(personajes)<-c("profesor", "tokio", "río", "denver", "helsinki", "nairobi",
"inspectora", "raquel", "lisboa", "mónica", "estocolmo")
#agrupamos los personajes según el número de veces que se los nombra por episodio por temporada
la_casa_de_papel_pers=la_casa_de_papel_subs %>% filter(word %in%names(personajes)) %>% group_by(word, episodio,temporada) %>% tally()
#agrupamos los personajes según su denominación como capital de país ( a parte del profesor :P)
la_casa_de_papel_pers=mutate(la_casa_de_papel_pers, wordSumm = personajes[word])
# determinamos los porcentajes
la_casa_de_papel_pers_perc=la_casa_de_papel_pers %>%
group_by(wordSumm,temporada) %>%
dplyr::mutate(sumper=n/sum(n))%>%group_by(wordSumm,temporada,episodio)%>%
dplyr::summarise(Ret=sum(sumper)*100)
# Gráficos!
graficos=list()
for( p in unique(personajes)){
# cargamos la imágen del personaje en cuestión
pen <- png::readPNG(paste(p, ".png", sep=""))
# graficamos los porcentajes por temporada y por episodio
graficos[[p]]=ggplot(la_casa_de_papel_pers_perc %>% filter(wordSumm %in% p),
aes(x=temporada, y=Ret, fill=factor(episodio)))+background_image(as.raster(pen))+geom_bar(color="black",
stat = "identity", alpha=0.7)+theme(legend.position = "none")+guides(fill =guide_legend(nrow=1))+labs(
fill="Episodios", x="Temporada", y="% de veces que son nombrados")+scale_y_continuous(limits=c(0,100), expand=c(0,0))+scale_fill_brewer(palette="Set3")
}
leyenda=get_legend(graficos[["profesor"]]+theme(legend.position = "bottom", legend.justification = "center"))
figura=plot_grid(plot_grid(plotlist = graficos, nrow=2), leyenda, ncol=1, rel_heights = c(0.9,0.1))
ggplot2::ggsave(figura, file="PorcentajeNombrePersonajes.tiff", height = 9, width=12, dpi=400)
|
7263c83fa06df8afd8fb272964a1e6600c84ad56
|
8033362b590ce650297c63add261fde2cad75368
|
/tests/testthat/test-helpers.R
|
07708a71023bea6a1c613e622e0245209f521eff
|
[] |
no_license
|
cran/jstor
|
6a50559e3f608fe12fffe87e1941b68f8e3adc63
|
cf541804654ca572f59a76d9cd39adac29e19a78
|
refs/heads/master
| 2023-08-31T18:12:43.259268
| 2023-08-16T13:14:39
| 2023-08-16T15:30:42
| 145,900,811
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,145
|
r
|
test-helpers.R
|
invalid_file <- "testfiles/invalid_file.xml"
standard_case <- "testfiles/standard_case.xml"
# tests ------
test_that("Files other than `book` and `article` raise an error", {
file <- xml2::read_xml("testfiles/wrong_doc_type.xml")
expect_error(validate_article(file), "Unknown input file")
expect_error(validate_book(file), "Unknown input file")
})
test_that("unvalid file paths raise an error", {
expect_error(validate_file_path("abc.txt", "xml"))
})
test_that("Warnings for invalid URI are suppressed", {
expect_silent(read_jstor(invalid_file))
})
test_that("read_jstor works", {
expect_equal(read_jstor(standard_case), xml2::read_xml(standard_case))
expect_error(read_jstor("test.txt"))
expect_error(read_jstor(rep(standard_case, 2)), "file_path should be")
zip_loc <- specify_zip_loc("testfiles/standard_case.zip",
"standard_case.xml")
expect_equal(read_jstor(zip_loc), xml2::read_xml(standard_case))
})
test_that("Construction of zip location works", {
zip_loc <- specify_zip_loc("testfiles/standard_case.zip",
"standard_case.xml")
expect_s3_class(zip_loc, "jstor_zip")
expect_identical(zip_loc$zip_archive, "testfiles/standard_case.zip")
expect_identical(zip_loc$file_path, "standard_case.xml")
})
test_that("jst_get_file_name works", {
standard_xml_file <- "testfiles/standard_case.xml"
expect_identical(jst_get_file_name(standard_xml_file), "standard_case")
zip_loc <- specify_zip_loc("testfiles/standard_case.zip",
"standard_case.xml")
expect_identical(jst_get_file_name(zip_loc), "standard_case")
})
test_that("unavailable path raises error", {
expect_error(check_path("abc"))
})
test_that("getting footnotes or references on book raises informative error", {
expect_error(jst_get_references(jst_example("book.xml")),
class = "article_function_for_book")
})
test_that("validating input for tibbles works", {
expect_error(validate_tibble(list(1:3, 1:2)))
expect_equal(validate_tibble(list(1:5, 1:5)), 5L)
expect_equal(validate_tibble(letters), 26L)
})
|
908093b2f63186cd4a9c91018edd51bde6b0754d
|
d25c39bb883df168abbf0fa25d946e9845c699cc
|
/pollutantmean.R
|
b05ef770727bd902baf01bae2eb46f7a270e94b3
|
[] |
no_license
|
ysusuk/datasciencecoursera
|
b8f412e4a25fc190f8aad84b27a46a38cdfba4a8
|
950f3802f97bf64fa648a738f91236bb88eb001f
|
refs/heads/master
| 2021-01-17T05:59:47.463482
| 2014-09-05T15:39:07
| 2014-09-05T15:39:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 967
|
r
|
pollutantmean.R
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
complete <- function(monitor) {
if (nchar(monitor) == 1)
paste0("00", monitor)
else if (nchar(monitor) == 2)
paste0("0", monitor)
else monitor
}
read <- function(monitorId) {
file <- paste0(directory, "/", complete(monitorId), ".csv")
if (file.exists(file)) {
read.csv(file)
}
}
pollutants <- unlist(sapply(id, read)[pollutant,])
round(mean(pollutants[!is.na(pollutants)]), 3)
}
|
22e9dc1095545d39ad87be06f3db08ccf849b94d
|
bb7a620d494e1757add7bb6b8702875cf832b400
|
/R/day-10.R
|
df799c1c1fad82291429edb4505c8d89546d21db
|
[] |
no_license
|
anguswg-ucsb/geog176A-daily-exercises
|
213b05b49c8f8a8244c653d3598b8a3e5e119542
|
75f2f8fdbe67ab1caba8b25e1337583d966b5f61
|
refs/heads/master
| 2022-12-13T06:44:23.819430
| 2020-09-03T18:37:48
| 2020-09-03T18:37:48
| 286,771,402
| 1
| 0
| null | 2020-08-11T14:50:41
| 2020-08-11T14:50:40
| null |
UTF-8
|
R
| false
| false
| 383
|
r
|
day-10.R
|
library(tidyverse)
library(USAboundaries)
library(USAboundariesData)
library(sf)
USAboundaries::us_states()
states = us_states()
lower48 = states %>%
filter(!name %in% c('Puerto Rico', 'Alaska', 'Hawaii'))
#Question 1:
usa_c= st_combine(lower48) %>% st_cast('MULTILINESTRING')
plot(usa_c)
#Question 2:
usa_u= st_union(lower48) %>% st_cast('MULTILINESTRING')
plot(usa_u)
|
6880a75e68b25f5d43b3b5dd5dffec390ea28fef
|
d1c7db4224306f1e48b2f44e17105e32990a9e6c
|
/ejercicio.R
|
89eb3719f9b2439fbf598ccd63e9ae340419f672
|
[] |
no_license
|
luiandresgonzalez/cursoR2020
|
c2c7a3fe4f81e448201fb8c8ee588326f11c4a1a
|
9f96b77a76b4e83cd42856f05384ed19d322a76a
|
refs/heads/master
| 2023-05-07T08:57:35.618426
| 2021-06-03T20:19:01
| 2021-06-03T20:19:01
| 276,419,909
| 0
| 0
| null | null | null | null |
ISO-8859-10
|
R
| false
| false
| 830
|
r
|
ejercicio.R
|
library(tidyverse)
load(url("https://github.com/rstudio/EDAWR/raw/master/data/cases.rdata"))
load(url("https://github.com/rstudio/EDAWR/raw/master/data/storms.rdata"))
load(url("https://github.com/rstudio/EDAWR/raw/master/data/tb.rdata"))
load(url("https://github.com/rstudio/EDAWR/raw/master/data/pollution.rdata"))
pollution
spread(pollution,size, amount)
st <- separate(storms, date, c("aņo", "mes", "dia"), sep = "-")
unite(st, date, c("dia","mes","aņo"), sep = "/")
storms %>%
select(storm, wind) %>%
filter(wind >= 50) %>%
arrange(desc(wind))
pollution %>%
group_by(city) %>%
summarise(mean = mean(amount))
tb %>%
filter(year == 2013) %>%
mutate(cases = child + adult + elderly) %>%
group_by(country) %>%
summarise(Cases = sum(cases)) %>%
arrange(desc(Cases)) %>%
View()
|
91f416183182b2e791a1762c108c31a3cf9e5aea
|
c15f74ad13ec0656df788d0ab9de922ca35206b0
|
/man/fars_map_state.Rd
|
a9b72284d098f3545321e3ffb79e337b73547cd1
|
[] |
no_license
|
aliciatb/fars
|
f4d409207c60efeaa8feb07c9f393867add81cb6
|
7341de93fb0e85022fcf992ff3924fccea69aa5f
|
refs/heads/master
| 2021-03-22T00:11:54.828201
| 2018-03-24T18:36:08
| 2018-03-24T18:36:08
| 114,496,015
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 880
|
rd
|
fars_map_state.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars.R
\name{fars_map_state}
\alias{fars_map_state}
\title{Print "Fars Map State"}
\usage{
fars_map_state(state.num, year)
}
\arguments{
\item{state.num}{A character string signifying the code for a state. For example,
Washington state equals '53'}
\item{year}{A character string representing 4 character length year value like '2015'.}
}
\value{
This function returns a map with accidents plotted for a given state and year.
Any records with invalid latitude and longitude coordinates will be excluded from the plot.
}
\description{
This function plots any accidents for a single state and year.
}
\note{
An invalid year and/or state code will result in a warning message the failed condition.
}
\examples{
\dontrun{
fars_map_state('53', '2015')
fars_map_state(state.num = '53', year = '2015')
}
}
|
d863b736bcf1f349d22157a06c542bdd240d406a
|
61c188bba8f228b0f14f4bae7c2fa3dcd1f7b3a2
|
/R/write.nowcasting.R
|
763ccea76e780ba53e963e240de2b43978a86ca1
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
covid19br/now_fcts
|
24cb3b7bcbf47b827e50fec43f0dd9647c89dde4
|
44479971618513ef23e82ac277c749b8384e12f8
|
refs/heads/master
| 2023-02-27T01:34:07.757658
| 2021-02-05T20:41:10
| 2021-02-05T20:41:10
| 273,057,036
| 0
| 1
|
CC0-1.0
| 2020-07-07T00:27:17
| 2020-06-17T19:04:42
|
R
|
UTF-8
|
R
| false
| false
| 1,004
|
r
|
write.nowcasting.R
|
#' Writes nowcasting
#'
#' @param now now
#' @param output.dir output.dir
#' @param tipo tipo de caso/obito: "covid", "srag", "obitos_covid", "obitos_srag"
#' @param data data
#'
#' @importFrom utils write.csv
#' @export
#'
write.nowcasting <- function(now,
output.dir,
tipo = "covid",
data) {
nome.now.df <- paste0(output.dir, "nowcasting_", tipo, "_previstos_", data, ".csv")
write.csv(now$estimates,
file = nome.now.df,
row.names = FALSE)
nome.now.post <- paste0(output.dir, "nowcasting_", tipo, "_post_", data, ".csv")
write.csv(now$params.post,
file = nome.now.post,
row.names = FALSE)
# seria melhor usar um argumento?
if ("trajectories" %in% names(now)){
nome.now.traj <- paste0(output.dir, "nowcasting_", tipo, "_traj_", data, ".csv")
write.csv(now$trajectories,
file = nome.now.traj,
row.names = FALSE)
}
}
|
3e612217768c4cf3f8abc238566ef35d98e33f31
|
51ad96e9f8b58e55a32cd40e4160f409d7291e0e
|
/R/builder.cacheSweave.R
|
e0a722f676ad3c6eb516c5ae20966dbacfbb2e17
|
[] |
no_license
|
jbryer/makeR
|
f95035956ec0f1a928bf5685ba01c9747145da5e
|
6f4b283f9ac5e58d1513466e4a6c8a28b700e76d
|
refs/heads/master
| 2020-05-18T16:23:23.058957
| 2019-03-20T16:41:05
| 2019-03-20T16:41:05
| 3,110,499
| 14
| 11
| null | 2019-03-20T16:41:07
| 2012-01-05T14:14:33
|
R
|
UTF-8
|
R
| false
| false
| 1,563
|
r
|
builder.cacheSweave.R
|
#' This function will build Sweave (Rnw) files using the cacheSweave driver.
#'
#' @param project the project to be built.
#' @param theenv the environment to build in.
#' @param fork if true Sweave will be executed in a separate R instance.
#' @param debug debug option sent to the Sweave function. If true, the output
#' of R code from the Rnw file will be printed as it is running.
#' @param ... other unspecified parameters
#' @return the name of the file if successfully built.
#' @export
builder.cacheSweave <- function(project, theenv, fork=TRUE, debug=TRUE, ...) {
require(cacheSweave)
sourceFile = ifelse(is.null(project$SourceFile), '.rnw$', project$SourceFile)
wd = eval(getwd(), envir=theenv)
files = list.files(path=wd, pattern=sourceFile, ignore.case=TRUE)
built = character()
for(i in seq_len(length(files))) {
file = files[i]
message('Running Stangle...\n')
Stangle(file)
message('Running Sweave with cacheSweave...\n')
if(fork) {
envstr = env2string(theenv)
thecall = paste('Rscript -e "require(cacheSweave); ', envstr,
' Sweave(\'', file, '\', driver=cacheSweaveDriver, debug=',
debug, ')"', sep='')
message(paste(thecall, '\n'))
system(thecall)
} else {
for(i in ls(theenv)) { assign(i, get(i, envir=theenv), envir=globalenv()) }
Sweave(file, driver=cacheSweaveDriver, debug=debug)
}
message('Running texi2dvi...\n')
texi2pdf(paste(substr(file, 1, (nchar(file)-4)), '.tex', sep=''))
built = c(built, paste(substr(file, 1, (nchar(file)-4)), '.pdf', sep=''))
}
return(built)
}
|
a68f156169e2c2accde8599e8a6faf10dfdf5a45
|
675dfd3561aa645fc48d11f79b10ceca3c93a58e
|
/get_price_history.R
|
23e55b58945076328f68215446eaf2f3036fea03
|
[] |
no_license
|
nlovin/mtgdb
|
3ff7df5c49a3eeaf58c1e793c3da03304ccb5288
|
60811e0584717aa58ccddb45d771f594e8e9fdca
|
refs/heads/master
| 2022-12-11T11:45:19.711324
| 2020-09-18T16:56:13
| 2020-09-18T16:56:13
| 282,291,144
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,103
|
r
|
get_price_history.R
|
#get price history
#### Function ----
get_set_prices <- function(set){
library(tidyverse)
library(httr)
library(rvest)
## Set name
ifelse(set == "Magic 2014", "Magic 2014 Core Set", set) %>%
ifelse(. == "Magic 2015", "Magic 2015 Core Set", .) %>%
ifelse(. == "Time Spiral Timeshifted", "Timeshifted", .) %>%
str_replace_all(" ", "+") %>%
str_remove_all(":") %>%
str_remove_all("'") -> set_url
### Get set cards
library(DBI)
library(RSQLite)
library(RPostgres)
pw <- askpass::askpass()
con <- dbConnect(RPostgres::Postgres(),
host='localhost',
port='5432',
dbname='mtgdb',
user='postgres',
password=pw)
set_urls <- dbReadTable(con,"cards_all") %>%
as_tibble() %>%
filter(set_name == set) %>%
filter(!str_detect(type, "Basic Land")) %>%
filter(ifelse(set_type == "masters", !is.na(id), main == 1)) %>%
mutate(name = str_remove_all(name, "'"),
name = str_remove_all(name, ","),
name = ifelse(is_split==T, str_replace(name, " // ","+"),name),
name = str_remove_all(name, " //.+"),
name = str_replace_all(name, " ", "+"),
name = paste0("https://www.mtggoldfish.com/price/",
set_url,
"/",
name,
"#paper")) %>%
distinct() %>%
pull(name)
set_code <- dbReadTable(con,"cards_all") %>%
as_tibble() %>%
filter(set_name == set) %>%
filter(!str_detect(type, "Basic Land")) %>%
filter(ifelse(set_type == "masters", !is.na(id), main == 1)) %>%
select(set_code) %>%
distinct() %>%
pull(set_code)
set_card_names <- dbReadTable(con,"cards_all") %>%
as_tibble() %>%
filter(set_name == set) %>%
filter(!str_detect(type, "Basic Land")) %>%
filter(ifelse(set_type == "masters", !is.na(id), main == 1)) %>%
distinct() %>%
pull(name)
set_card_ids <- dbReadTable(con,"cards_all") %>%
as_tibble() %>%
filter(set_name == set) %>%
filter(!str_detect(type, "Basic Land")) %>%
filter(ifelse(set_type == "masters", !is.na(id), main == 1)) %>%
distinct() %>%
pull(setcard_id)
loop_tbl <- bind_cols(set_urls,set_card_names,set_card_ids) %>%
rename(set_urls = 1, set_card_names = 2, setcard_id=3) %>%
filter(!str_detect(set_card_names, "Guildgate")) %>%
left_join(., dbReadTable(con,"cards_all") %>%
as_tibble() %>%
select(id, setcard_id), by = "setcard_id")
dbDisconnect(con)
hist <- tibble()
for (i in 1:length(loop_tbl$set_urls)) {
indx <- match(loop_tbl$set_card_names[i], loop_tbl$set_card_names)
tryCatch(
expr = {
s <- str_squish(content(GET(loop_tbl$set_urls[i]), as = 'text'))
str_extract(s, 'price-sources-paper"\\)\\.toggle\\(true\\).+containing div') %>%
str_remove_all('\\+= \\"\\\\n') %>%
str_remove_all('\\"') %>%
str_remove_all("g = new Dygraph.+") %>%
str_remove_all(" d ") %>%
str_split(";") %>%
unlist() -> tmp
nme <- tmp[2] %>%
str_remove(" var= Date,")
if (str_detect(nme,"'")) {
nme <- paste(tmp[c(2,3)], collapse = "") %>%
str_replace("'", "'") %>%
str_remove(" var= Date,")
tmp[-1:-3] %>%
str_remove_all(" d ") %>%
str_split(",", simplify = T) %>%
as_tibble(.name_repair = "minimal") %>%
rename(date = 1, price = 2) %>%
filter(date != "") %>%
mutate(price = as.numeric(price),
name = loop_tbl$set_card_names[i],
set_code = set_code,
set_name = set,
setcard_id = loop_tbl$setcard_id[i]) %>%
filter(!is.na(price)) %>%
mutate(date = lubridate::as_date(date)) -> tmp
} else{
tmp[-1:-2] %>%
str_remove_all(" d ") %>%
str_split(",", simplify = T) %>%
as_tibble(.name_repair = "minimal") %>%
rename(date = 1, price = 2) %>%
filter(date != "") %>%
mutate(price = as.numeric(price),
name = loop_tbl$set_card_names[i],
set_code = set_code,
set_name = set,
setcard_id = loop_tbl$setcard_id[i]) %>%
filter(!is.na(price)) %>%
mutate(date = lubridate::as_date(date)) -> tmp
}
hist <- bind_rows(hist, tmp)
print(paste0("Finished with ", loop_tbl$set_card_names[i]))
message("Iteration ", indx, " successful.")
t <- runif(1,2.5,5)
print(paste0("Sleeping for ", t, " seconds"))
Sys.sleep(t)
},
error = function(e){
message("* Caught an error on itertion ", indx, ". URL: ", loop_tbl$set_urls[i])
print(e)
Sys.sleep(1)
}
)
}
print("Finished. Writing to DB now.")
con <- dbConnect(RPostgres::Postgres(),
host='localhost',
port='5432',
dbname='mtgdb',
user='postgres',
password=pw)
# add name
hist <- hist %>%
select(date,
price,
name,
set_code,
set_name,
setcard_id)
# Write tmp file for inspection
write_rds(hist, paste0("data/tmp/tmp_price_",lubridate::today() %>% str_replace_all("-","_"),".rds"))
# Return top of file
hist %>% head(15)
# write to DB
dbWriteTable(con,
"price_history",
hist %>%
select(date,
price,
setcard_id) %>% left_join(.,dbReadTable(con,"cards_all") %>%
as_tibble() %>%
select(card_id = id, setcard_id), by = "setcard_id") %>%
mutate(date = as.numeric(date)),
overwrite=F,
append=T)
dbDisconnect(con)
}
|
1d174e8a5301445b7984dc11b881c8b2d118d33b
|
7665057b28fb224108b09ce4231981c472de38e3
|
/tests/testthat/test-rc.expand.sample.names.R
|
f7e25a0beb7758a44b0287387f920f03a5fc37d3
|
[] |
no_license
|
cbroeckl/RAMClustR
|
e60e01a0764d9cb1b690a5f5f73f3ecc35c40a32
|
e0ca67df1f993a89be825fcbf3bfa6eaabbadfef
|
refs/heads/master
| 2023-08-03T10:26:16.494752
| 2023-06-21T14:24:06
| 2023-06-21T14:24:06
| 13,795,865
| 10
| 13
| null | 2023-08-01T01:42:17
| 2013-10-23T07:04:07
|
R
|
UTF-8
|
R
| false
| false
| 472
|
r
|
test-rc.expand.sample.names.R
|
test_that("RAMClustR rc.expand.sample.names", {
expected <- readRDS(file.path("testdata", "rc.expand.sample.names.rds"))
ramclustObj <- readRDS(file.path("testdata", "rc.get.xcms.data.rds"))
actual <- rc.expand.sample.names(ramclustObj = ramclustObj, quiet=TRUE)
# renamed phenoData colnames as test fails in R CMD checks becuase of no user input for colnames
colnames(actual$phenoData) <- colnames(expected$phenoData)
expect_equal(actual, expected)
})
|
2587ce052735807da662ff37cc54838ee3e749d8
|
f65ae497d68d9d842febebb5e23bb33df636c266
|
/R/SRA_Entrez_API.R
|
22a8a03229099599c7ed543eeb4682cddb183b2b
|
[
"MIT"
] |
permissive
|
Roleren/ORFik
|
3da7e00f2033f920afa232c4c9a76bd4b5eeec6d
|
e6079e8433fd90cd8fc682e9f2b1162d145d6596
|
refs/heads/master
| 2023-08-09T06:38:58.862833
| 2023-08-08T08:34:02
| 2023-08-08T08:34:02
| 48,330,357
| 28
| 7
|
MIT
| 2023-07-06T16:17:33
| 2015-12-20T17:21:15
|
R
|
UTF-8
|
R
| false
| false
| 9,733
|
r
|
SRA_Entrez_API.R
|
SRP_from_GSE <- function(SRP) {
message("GSE inserted, trying to find SRP from the GSE")
url <- "https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc="
url <- paste0(url, SRP, "&form=text")
# Get GSE info
a <- fread(url, sep = "!", header = FALSE, col.names = c("row.info", "info"))
# Now find SRP from GSE
SRP_line <- grepl("Series_relation = ", a$info)
if (length(SRP_line) == 0) stop("GSE does not have a recorded Bioproject or SRP; check that it is correct!")
b <- a[SRP_line,]$info
d <- b[grepl("=SRP", b)]
if (length(d) == 0) {
d <- b[grepl("= BioProject:", b)]
SRP <- gsub(".*/", replacement = "", d)
} else SRP <- gsub(".*term=", replacement = "", d)
if (length(d) == 0) stop("GSE does not have a recorded Bioproject or SRP; check that it is correct!")
SRP <- unique(SRP)
if (length(SRP) > 1) stop("Found multiple non identical SRPs for GSE:", SRP)
message(paste("Found SRP, will continue using:", SRP))
return(SRP)
}
fetch_xml_attributes <- function(xml, xml_path) {
dt <- XML::getNodeSet(xml, xml_path)
dt <- lapply(dt, XML::xmlToDataFrame)
dt <- lapply(dt, function(x) as.data.table(t(x)))
dt <- lapply(dt, function(x) {colnames(x) <- as.character(x[1,]); return(x[2,])})
dt <- rbindlist(dt, fill = TRUE)
to_keep <- sapply(dt, function(x) !all(x == "NA"))
dt <- dt[,..to_keep]
return(dt)
}
sample_info_append_SRA <- function(SRP, destfile, abstract_destfile, abstract,
remove.invalid, rich.format = FALSE) {
# Download xml and add more data
xml <- sample_info_download(SRP)
if (rich.format) {
sample_xml <- XML::xmlParse(xml)
sample_dt <- fetch_xml_attributes(sample_xml, "//SAMPLE/SAMPLE_ATTRIBUTES")
exp_attr_dt <- fetch_xml_attributes(sample_xml, "//EXPERIMENT/EXPERIMENT_ATTRIBUTES")
}
xml <- xml2::as_list(xml)
dt <- data.table()
is_EXP_SET <- !is.null(xml$EXPERIMENT_PACKAGE_SET)
EXP <- if(is_EXP_SET) {xml$EXPERIMENT_PACKAGE_SET} else xml
# browser()
for(i in seq_along(EXP)) { # Per sample in study
EXP_SAMPLE <- EXP[i]$EXPERIMENT_PACKAGE
dt_single <- sample_info_single(EXP_SAMPLE)
dt <- rbind(dt, dt_single)
}
no_linker <- !is.null(EXP$eFetchResult)
if (no_linker) {
# Download runinfo from Trace / SRA server
warning("Could not find SRA linker, falling back to deprecated search",
"This might not find all the samples!")
dt <- study_runinfo_download_deprecated(SRP, destfile)
} else {
dt <- add_pubmed_id(EXP_SAMPLE, dt)
}
dt <- add_author(dt)
# Save abstract
if (rich.format) dt <- cbind(dt, sample_dt, exp_attr_dt)
abstract_save(EXP_SAMPLE, abstract, abstract_destfile)
return(filter_empty_runs(dt, remove.invalid, SRP))
}
sample_info_single <- function(EXP_SAMPLE) {
# For each run in sample
dt_run_all <- data.table()
for (j in seq_along(EXP_SAMPLE$RUN_SET)) {
RUN <- EXP_SAMPLE$RUN_SET[j]$RUN
xml.RUN <- unlist(RUN$IDENTIFIERS$PRIMARY_ID)
spots <- as.integer(attr(RUN, "total_spots"))
bases <- as.numeric(attr(RUN, "total_bases"))
# spots_with_mates <- 0
avgLength <- as.integer(attr(RUN$Statistics$Read, "average"))
size_MB <- floor(as.numeric(attr(RUN, "size"))/1024^2)
Experiment <- EXP_SAMPLE$EXPERIMENT$IDENTIFIERS$PRIMARY_ID[[1]]
# if (length(xml.RUN) == 0) xml.RUN <- ""
dt_run <- data.table(Run = xml.RUN, spots, bases,
avgLength, size_MB, Experiment)
dt_run_all <- rbind(dt_run_all, dt_run)
}
# Sample info (only 1 row per sample)
# Library
lib_design <- EXP_SAMPLE$EXPERIMENT$DESIGN$LIBRARY_DESCRIPTOR
if (length(lib_design$LIBRARY_NAME) == 0) {
LibraryName <- NA
} else LibraryName <- lib_design$LIBRARY_NAME[[1]]
LibraryStrategy <- lib_design$LIBRARY_STRATEGY[[1]]
LibrarySelection <- lib_design$LIBRARY_SELECTION[[1]]
LibrarySource <- lib_design$LIBRARY_SOURCE
LibraryLayout <- names(lib_design$LIBRARY_LAYOUT)
dt_library <- data.table(LibraryName, LibraryStrategy,
LibrarySelection, LibrarySource, LibraryLayout)
# Insert
InsertSize <- 0
InsertDev <- 0
dt_insert <- data.table(InsertSize, InsertDev)
# Instrument
Platform_info <- EXP_SAMPLE$EXPERIMENT$PLATFORM
Platform <- names(Platform_info)
Model <- unlist(Platform_info[[1]]$INSTRUMENT_MODEL)
dt_platform <- data.table(Platform, Model)
# Sample
SRAStudy <- EXP_SAMPLE$STUDY$IDENTIFIERS$PRIMARY_ID[[1]]
BioProject <- attr(EXP_SAMPLE$STUDY$IDENTIFIERS$EXTERNAL_ID, "namespace")
BioProject <- EXP_SAMPLE$STUDY$IDENTIFIERS$EXTERNAL_ID[[1]]
Study_Pubmed_id <- 1
ProjectID <- ""
Sample_info <- EXP_SAMPLE$SAMPLE
Sample <- Sample_info$IDENTIFIERS$PRIMARY_ID[[1]]
BioSample <- Sample_info$IDENTIFIERS$EXTERNAL_ID[[1]]
SampleType <- "simple"
dt_sample <- data.table(SRAStudy, BioProject, Study_Pubmed_id, ProjectID,
Sample, BioSample, SampleType)
# Organism
TaxID <- Sample_info$SAMPLE_NAME$TAXON_ID[[1]]
ScientificName <- Sample_info$SAMPLE_NAME$SCIENTIFIC_NAME[[1]]
dt_organism <- data.table(TaxID, ScientificName)
# Submission
SampleName <- attr(Sample_info, "alias")
CenterName <- attr(EXP_SAMPLE$SUBMISSION, "center_name")
Submission <- attr(EXP_SAMPLE$SUBMISSION, "accession")
ReleaseDate <- attr(RUN, "published")
MONTH <- substr(ReleaseDate, 6, 7)
YEAR <- gsub("-.*", "", ReleaseDate)
AUTHOR <- unlist(EXP_SAMPLE$Organization$Contact$Name$Last)
AUTHOR <- ifelse(is.null(AUTHOR), "",
ifelse(AUTHOR %in% c("Curators", "GEO"), "", AUTHOR))
dt_submission <- data.table(SampleName, CenterName, Submission,
MONTH, YEAR, AUTHOR)
# Source and title
# Check if sample has alternative name (SOURCE)
sample_source <- c()
for (j in seq_along(EXP_SAMPLE$SAMPLE$SAMPLE_ATTRIBUTES)) {
tag <- unlist(EXP_SAMPLE$SAMPLE$SAMPLE_ATTRIBUTES[[j]]$TAG)
if (!is.null(tag)) {
if (tag == "source_name") {
sample_source <- c(sample_source, unlist(EXP_SAMPLE$SAMPLE$SAMPLE_ATTRIBUTES[[j]]$VALUE))
}
}
}
sample_source <- ifelse(is.null(sample_source), "", sample_source)
# Get Sample title
sample_title <- unlist(EXP_SAMPLE$SAMPLE$TITLE)
sample_title <- ifelse(is.null(sample_title), "", sample_title)
dt_additional <- data.table(sample_source, sample_title)
dt_single <- data.table(cbind(dt_run_all, dt_library, dt_insert, dt_platform,
dt_sample, dt_organism, dt_submission,
dt_additional))
}
study_runinfo_download_deprecated <- function(SRP, destfile) {
url <- "https://trace.ncbi.nlm.nih.gov/Traces/sra-db-be/runinfo?term="
url <- paste0(url, SRP)
download.file(url, destfile)
return(fread(destfile))
}
sample_info_download <- function(SRP) {
# Trace db is dead, now we have to get ids first, then get samples
url_prefix <- "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=sra&term="
url_suffix <- "&retmax=2000&retmode=xml"
url <- paste0(url_prefix, SRP, url_suffix)
ids_xml <- xml2::as_list(xml2::read_xml(url))
ids <- sort(unlist(ids_xml$eSearchResult$IdList, use.names = FALSE))
url_prefix <- "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=sra&id="
url_suffix <- "&rettype=fasta&retmode=xml"
url <- paste0(url_prefix, paste(ids, collapse = ","), url_suffix)
message("Downloading metadata from:")
message(url)
return(xml2::read_xml(url))
}
abstract_save <- function(EXP_SAMPLE, abstract, abstract_destfile) {
if (abstract %in% c("print", "save", "printsave")) {
if (!is.null(EXP_SAMPLE) && !is.null(EXP_SAMPLE$STUDY$DESCRIPTOR$STUDY_ABSTRACT[[1]])) {
abstract_text <- EXP_SAMPLE$STUDY$DESCRIPTOR$STUDY_ABSTRACT[[1]]
if (abstract %in% c("print", "printsave")) {
cat("Study Abstract:\n")
cat(abstract_text, " \n")
cat("------------------\n")
}
if (abstract %in% c("save", "printsave")) {
fwrite(data.table(abstract = abstract_text), abstract_destfile)
}
} else message("Could not find abstract for project")
}
}
add_pubmed_id <- function(EXP_SAMPLE, file) {
pubmed.id <- unlist(EXP_SAMPLE$STUDY$STUDY_LINKS$STUDY_LINK$XREF_LINK$DB[[1]])
if (!is.null(pubmed.id)) {
if (pubmed.id == "pubmed") {
file$Study_Pubmed_id <- as.integer(unlist(EXP_SAMPLE$STUDY$STUDY_LINKS$STUDY_LINK$XREF_LINK$ID[[1]]))
}
}
return(file)
}
add_author <- function(file) {
if (!is.null(file$Study_Pubmed_id)) {
if (!is.na(file$Study_Pubmed_id[1]) & (file$Study_Pubmed_id[1] != 3)) { # 3 is error code
url <- "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=pubmed&id="
url <- paste0(url, file$Study_Pubmed_id[1])
a <- xml2::as_list(xml2::read_xml(url))
authors <- a$eSummaryResult$DocSum[[5]]
if (!is.null(unlist(authors)[1])) {
file$AUTHOR <- gsub(" .*", "", unlist(authors)[1])
#TODO: Decide if I add Last author:
# file$LAST_AUTHOR <- gsub(" .*", "", tail(unlist(authors), 1))
}
}
}
return(file)
}
filter_empty_runs <- function(dt, remove.invalid = TRUE, SRP) {
# Filter
if (nrow(dt) == 0) {
warning("Experiment not found on SRA, are you sure it is public?")
return(dt)
}
msg <- paste("Found Runs with 0 reads (spots) in metadata, will not be able
to download the run/s:", dt[spots == 0,]$Run)
dt[is.na(spots), spots := 0]
if (any(dt$spots == 0)) {
warning(msg)
if (remove.invalid) {
message("Removing invalid Runs from final metadata list")
dt <- dt[spots > 0,]
}
}
if (nrow(dt) == 0) {
warning(paste("No valid runs found from experiment:", SRP))
return(dt)
}
return(dt)
}
|
8b4955d9971b689a59750d581c3340cedceba863
|
784b8a10943c39d07d81420ce7b7c43f13b7aba3
|
/man/Menu.General.Rd
|
a64f36fd2bb2b9455a12a489db2fae4116b9cb70
|
[] |
no_license
|
cran/RcmdrPlugin.DoE
|
329bd8373636234acd0963d534828048c5139f66
|
797521b0902e88a56d8c4ce26e2a74e4a291782c
|
refs/heads/master
| 2022-05-12T08:53:01.137562
| 2022-04-23T22:50:12
| 2022-04-23T22:50:12
| 17,693,091
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,364
|
rd
|
Menu.General.Rd
|
\name{Menu.General}
\alias{Menu.General}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{General factorial designs}
\description{This menu covers full factorial designs orthogonal main effects designs
for cases for which not all factors are at two levels.
Furthermore, Taguchi-parameter designs are covered. This
help file is about how and when to apply these.}
%- maybe also 'usage' for other objects documented here.
\section{Quantitative or Qualitative}{
Both types of design are suitable for quantitative and qualitative factors alike.
If you have quantitative factors only, you may want to consider the special menu for these.
}
\section{Full factorial designs}{
Full factorial designs are straight-forward to generate and are generated by
function \code{\link[DoE.base]{fac.design}} from package \pkg{DoE.base}. The
number of runs needed for a full factorial experiment is the product of the numbers
of levels of all factors. This may be more than is feasible. In such situations, the
orthogonal main effects plans may be helpful. If interactions are also of interest,
it may be useful to combine several plans or to pay attention to specific properties of
orthogonal arrays (automatic support for such possibilities is currently poor and will be improved
in the future).\cr
Full factorial designs can be run in blocks. This is advisable, whenever the experimental
units are not homogeneous, but smaller groups of units (the blocks) can be made
reasonably homogeneous (e.g., batches of material, etc.). }
\section{Orthogonal main effects plans}{
If a full factorial experiment is too large, an orthogonal main effects plan may be useful.
As long as there are no interactions between the factors represented by columns of the array,
all such arrays work well, provided they are large enough for stable estimation. Some arrays also
work well in the presence of interactions or even allow estimation of interactions for special subsets
of variables. However, there is no automated support for selection of an array that
has desirable properties. It may therefore be useful to specifically select an array
the properties of which are known to the experimenter.}
\section{Warning}{Important: For all factorial designs,
the experiment must conduct all experimental runs as determined in the design,
because the design properties will deteriorate in ways not easily foreseeable,
if some combinations are omitted.
It must be carefully considered in the planning phase, whether it is
possible to conduct all experimental runs, or whether there might be restrictions
that do not permit all combinations to be run (e.g.: three factors, each with levels
\dQuote{small} and \dQuote{large}, where the combination with all three factors
at level \dQuote{large} is not doable because of space restrictions).
If such restrictions are encountered, the design should be devised in a different way from the beginning.
If possible, reasonable adjustments to levels should ensure that a factorial design
becomes feasible again. Alternatively, a non-orthogonal D-optimal design can take
the restrictions into account. \emph{Unfortunately, this functionality is not yet implemented in this GUI.}
}
\section{Taguchi inner-outer array designs - also called parameter designs}{
With the menu item \emph{Create Taguchi inner-outer array}, two \bold{existing} designs
can be combined into a crossed inner-outer array design. For more detail, see the
literature and the help in the Taguchi design menu.}
\references{
Box G. E. P, Hunter, W. C. and Hunter, J. S. (2005)
\emph{Statistics for Experimenters, 2nd edition}.
New York: Wiley.
}
\author{ Ulrike Groemping }
\seealso{ See Also \code{\link[FrF2]{pb}} for the function behind the screening designs,
\code{\link[FrF2]{FrF2}} for the function behind the regular fractional factorial designs,
and \code{\link[FrF2:CatalogueAccessors]{catlg}} for a catalogue of regular fractional factorial designs,
and \code{\link{DoEGlossary}} for a glossary of terms relevant in connection with
orthogonal 2-level factorial designs.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ design }
\keyword{ array }% __ONLY ONE__ keyword per line
|
d7f0f63144a1e418f60f2598570c5ffed98a7933
|
63a054b3e1fde6b2faadc61fa5587e33945376d0
|
/data-raw/make_ECDMS_electric_county_data.R
|
4323ad077235d73ba6f58381028eb4e25566785e
|
[] |
no_license
|
BAAQMD/ECDMS
|
217dfec37785e44beab22fe5e1cdfb74f5fcc335
|
75f231cf3076777b2772bce4eafa6fef2b7afc43
|
refs/heads/master
| 2021-07-17T03:46:37.329016
| 2021-04-22T19:43:51
| 2021-04-22T19:43:51
| 246,622,274
| 0
| 0
| null | 2021-04-22T19:43:52
| 2020-03-11T16:25:00
|
R
|
UTF-8
|
R
| false
| false
| 1,107
|
r
|
make_ECDMS_electric_county_data.R
|
make_ECDMS_electric_county_data <- function (
path
) {
raw_data <-
tbltools::read_tbl(
path,
verbose = TRUE)
names(raw_data)[1] <-
"County" # fix double-quoted column name
renamed <-
rename(
raw_data,
county = County,
sector = Sector)
reshaped <-
renamed %>%
gather_years("tput_qty") %>%
mutate_at(
vars(year),
~ CY(elide_year(.))) %>%
vartools::drop_vars(
`Total Usage`)
validated <-
reshaped %>%
spread(
sector,
tput_qty) %>%
mutate(
Ratio = (`Non-Residential` + `Residential`) / Total,
Diff = `Non-Residential` + `Residential` - Total) %>%
ensurer::ensure(
all(abs(.$Diff) < 0.1, na.rm = TRUE),
all(abs(log(.$Ratio)) < 0.1, na.rm = TRUE)) %>%
drop_vars(
Total, Ratio, Diff) %>%
gather(
sector,
tput_qty,
`Non-Residential`,
`Residential`)
tidied <-
validated %>%
filter(
sector != "Total") %>%
mutate(
county = str_to_title(county)) %>%
mutate(
tput_unit = "GWh")
return(tidied)
}
|
d34e7ee45f1bf45d861dc56c31aac567ee591249
|
8981917d4fb02453c0e772becdf12e08070956da
|
/Rashmi_eaxam_R.R
|
f1876705c2e4e16ac10639088f7ce0d661dc990e
|
[] |
no_license
|
rashmikc17/rfiles
|
fd7fa4ef0c53d03a7ff3c82d8f0335bf5020dafc
|
266e17f0a245a485a12fb48df3fdabd17fcd15b1
|
refs/heads/master
| 2022-04-15T17:13:13.517876
| 2020-04-15T08:50:48
| 2020-04-15T08:50:48
| 255,858,260
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 660
|
r
|
Rashmi_eaxam_R.R
|
# Read a csv, named "data"
my_data <- read.csv("data.csv")
my_data
head(my_data)
# [1] Compute the MEAN, STDEV of the numeric columns. Do not use available R functions but create your own script.
# Function to calculate mean
mean_UD <- function(x,npar=TRUE,print=TRUE) {
result <- sum(x)/length(x)
print(result)
return(result)
}
# Function to calculate mean
standardDeviation_UD <- function(x,npar=TRUE,print=TRUE) {
result <- sqrt(sum((x - (sum(x)/length(x)))^2) / (length(x) - 1))
print(result)
return(result)
}
# invoking the function
set.seed(1234)
mean_UD(my_data$Marks)
standardDeviation_UD(my_data$Marks)
|
537d572b4f16468b27aad4412140429997a99022
|
1b3cba401b594a5755353f8a52e2f4bbecdcb457
|
/src/alphadiversity_boxplot.R
|
a89b4fa19615baa8c4927c95dc33005ac5ab1db3
|
[] |
no_license
|
jeremy29tien/imrep
|
2c230ef6f29d19aadfa509d45e205237747da27d
|
a597d9e400d05bf36d66dff3714ff77387f609b7
|
refs/heads/master
| 2020-07-05T10:43:11.238284
| 2019-08-16T00:08:14
| 2019-08-16T00:08:14
| 202,627,693
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,847
|
r
|
alphadiversity_boxplot.R
|
library(ggplot2)
df <- read.csv("/users/j29tien/ROP/results/IMMUNE/samples_cdr3.csv", header=TRUE)
rownames(df) <- NULL
spl <- split(df, rep(c(1, 2), length.out = nrow(df)))
age_70 <- spl$"1"
age_80 <- spl$"2"
## alphaIGH
igh70 <- age_70[,grep("alphaIGH", ignore.case=TRUE, colnames(age_70))]
igh80 <- age_80[,grep("alphaIGH", ignore.case=TRUE, colnames(age_80))]
alphaIGH <- cbind(igh70, igh80)
alphaIGH <- cbind(alphaIGH, igh80 - igh70)
alphaIGH <- cbind(c(rep("alphaIGH", 65)), alphaIGH)
colnames(alphaIGH) <- c("Locus", "Age_70", "Age_80", "Eighty_minus_Seventy")
alphaIGH <- as.data.frame(alphaIGH)
## alphaIGK
igk70 <- age_70[,grep("alphaIGK", ignore.case=TRUE, colnames(age_70))]
igk80 <- age_80[,grep("alphaIGK", ignore.case=TRUE, colnames(age_80))]
alphaIGK <- cbind(igk70, igk80)
alphaIGK <- cbind(alphaIGK, igk80 - igk70)
alphaIGK <- cbind(c(rep("alphaIGK", 65)), alphaIGK)
colnames(alphaIGK) <- c("Locus", "Age_70", "Age_80", "Eighty_minus_Seventy")
alphaIGK <- as.data.frame(alphaIGK)
## alphaIGL
igl70 <- age_70[,grep("alphaIGL", ignore.case=TRUE, colnames(age_70))]
igl80 <- age_80[,grep("alphaIGL", ignore.case=TRUE, colnames(age_80))]
alphaIGL <- cbind(igl70, igl80)
alphaIGL <- cbind(alphaIGL, igl80 - igl70)
alphaIGL <- cbind(c(rep("alphaIGL", 65)), alphaIGL)
colnames(alphaIGL) <- c("Locus", "Age_70", "Age_80", "Eighty_minus_Seventy")
alphaIGL <- as.data.frame(alphaIGL)
## Combine and graph
alphaD <- rbind(alphaIGH, alphaIGK, alphaIGL)
alphaD$Age_70 <- as.numeric(as.character(alphaD$Age_70))
alphaD$Age_80 <- as.numeric(as.character(alphaD$Age_80))
alphaD$Eighty_minus_Seventy <- as.numeric(as.character(alphaD$Eighty_minus_Seventy))
alphaD$Locus <- as.factor(alphaD$Locus)
ggplot(alphaD, aes(y=Eighty_minus_Seventy, fill=Locus)) + geom_boxplot()
ggsave("/users/j29tien/imrep/results/paired_alpha_boxplot.png")
|
517568c4bf07120e4ac1ffcf1e14ccc75bc785de
|
2c170474a479f0582d1685a8df22ca98dd157798
|
/tests/testthat/test-new-ggplot.R
|
263969ff86e503684b12122ffc1fbefffdd2324a
|
[] |
no_license
|
wcmbishop/gogoplot
|
80574a1161a44222265f9478d891ac6d4a696033
|
1857b750305c15a9bb90dfdb12b96230c14a0ff8
|
refs/heads/master
| 2021-03-27T17:13:16.813628
| 2018-03-30T18:08:07
| 2018-03-30T18:08:07
| 106,642,044
| 3
| 2
| null | 2018-03-30T18:08:08
| 2017-10-12T03:53:19
|
R
|
UTF-8
|
R
| false
| false
| 425
|
r
|
test-new-ggplot.R
|
context("new_ggplot")
test_that("blank plot", {
data_name <- "mtcars"
input <- list(xvar = "disp", yvar = "hp")
p_blank <- new_gogoplot(ggplot(!!sym(data_name), aes(!!sym(input$xvar),
!!sym(input$yvar))))
code <- get_plot_code(p_blank)
expect_is(p_blank, "ggplot")
expect_equal(length(code), 1)
expect_equal(code[1], "ggplot(mtcars, aes(disp, hp))")
})
|
c01f38d6bab444d871619159bcd5a2d6956492b8
|
07c5e08887d321eb3c7f9947dd6b3848251306c1
|
/political_urls/political_classifier_evaluation/figure_s4.R
|
127b057725f6996da48c4e14650e6cb2a0232b36
|
[] |
no_license
|
LazerLab/twitter-fake-news-replication
|
b26c6a7ffcc2482558e5181c600391d99bb5fdd2
|
ff58308d46e5531d9c37836ac473083cdfe37517
|
refs/heads/master
| 2020-04-12T21:00:14.255705
| 2019-12-05T04:26:30
| 2019-12-05T04:26:30
| 162,752,654
| 10
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,818
|
r
|
figure_s4.R
|
library(binom)
source("util/plotting.R")
theme_set(my_custom_theme())
# Load in the annotation data, with an additional field for our labeling
survey_data <- fread("restricted_data/political_classifier_eval_data.csv")
survey_min <- survey_data[!is.na(answer) & answer != "idk" ]
survey_min[, answer_is_pol := answer %in% c("election","politics")]
# confidence intervals for error rates
fn_p1 <- rbind(binom.confint(sum(survey_min$answer_is_pol & survey_min$fake_url_count >0),
sum(survey_min$answer_is_pol),
methods="ac"),
binom.confint(sum(survey_min$is_pol & survey_min$fake_url_count >0),
sum(survey_min$is_pol),
methods="ac"),
binom.confint(sum((!survey_min$answer_is_pol) & survey_min$fake_url_count >0),
sum((!survey_min$answer_is_pol)),
methods="ac"),
binom.confint(sum((!survey_min$is_pol) & survey_min$fake_url_count >0),
sum((!survey_min$is_pol)),
methods="ac"))
fn_p1$lab <- c("By Hand\n(MTurk)","Classifier","By Hand\n(MTurk)", "Classifier")
fn_p1$typev <- factor(c("Political Tweets","Political Tweets", "Non-political Tweets","Non-political Tweets"),
levels=c("Political Tweets","Non-political Tweets"))
p <- ggplot(fn_p1, aes(lab,mean,ymin=lower,ymax=upper)) + geom_pointrange(size=1.4) + my_custom_theme() + facet_wrap(~typev)
p <- p + xlab("How tweets were labeled (N=19,819)") + scale_y_continuous("Percentage of Tweets\nContaining Link to\n Fake News Site",
labels=percent)
ggsave("img/percentage_fake_news.pdf",p,h=5,w=10)
|
b45ec98f87a7ae97321c02142593677ff89bc18d
|
b81875d1dc66033329e6e82914cd08727dffc8bf
|
/man/Bolstad-package.Rd
|
cfa3921a6413e31781af09a96e7165548008652c
|
[] |
no_license
|
cran/Bolstad
|
b4cb3d49c8edca8ebcc51fe89539a3d144e8de32
|
3dc15d83e44e4e5e120e91465ae7ca213ba4e699
|
refs/heads/master
| 2021-01-21T00:52:47.450624
| 2020-10-05T05:50:02
| 2020-10-05T05:50:02
| 17,678,157
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 963
|
rd
|
Bolstad-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Bolstad-package.R
\docType{package}
\name{Bolstad-package}
\alias{Bolstad-package}
\alias{Bolstad}
\title{Bolstad Functions}
\description{
A set of R functions and data sets for the book Introduction to Bayesian
Statistics, Bolstad, W.M. (2007), John Wiley & Sons ISBN 0-471-27020-2. Most
of the package functions replicate the Minitab macros that are provided with
the book. Some additional functions are provided to simplfy inference about
the posterior distribution of the parameters of interest.
}
\details{
\tabular{ll}{ Package: \tab Bolstad\cr Type: \tab Package\cr Version: \tab
0.2-26\cr Date: \tab 2015-05-01\cr License: \tab GPL 2\cr }
}
\references{
Bolstad, W.M. (2007), Introduction to Bayesian Statistics, John
Wiley & Sons.
}
\author{
James Curran Maintainer: James Curran <j.curran@auckland.ac.nz> ~~
The author and/or maintainer of the package ~~
}
\keyword{package}
|
013c5c3d284d3667ebfd900650bd6080531b6927
|
48ec227d3b47fb7be58d7508ec22881262c1a767
|
/man/parse_annotation_row.Rd
|
10051c3dd35fe1f5074873fb96efaf137bb89d1f
|
[] |
no_license
|
MaksimRudnev/annotated
|
b76a80687fa45d35a7bcd9506c95658c1d6f1c09
|
1e78fbd8c5c93ee5be03f9ba92e5cd5fa565ce16
|
refs/heads/master
| 2020-03-27T07:25:50.635386
| 2019-02-11T14:58:15
| 2019-02-11T14:58:15
| 146,190,720
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 351
|
rd
|
parse_annotation_row.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unexported_helpers.R
\name{parse_annotation_row}
\alias{parse_annotation_row}
\title{Helper to run inline r code (a lá .Rmd)}
\usage{
parse_annotation_row(rowtxt)
}
\arguments{
\item{rowtxt}{Annotation to parse.}
}
\description{
Helper to run inline r code (a lá .Rmd)
}
|
df8c26815b2088686326e7ddb0df61d84def7d5d
|
738024f66cd839fbe5206b3a6763cf9e750b48be
|
/National Map Scripts/Weed_maps.R
|
721bd7e00ba854dc8a5fce1cfa53c016f9c993e4
|
[] |
no_license
|
prabinrs/PRISM-Crop-Health-Data-Filter-and-Visualize
|
1eea9ddf1036a7fb58f6060941689b41b7f1619c
|
787c706ecc696e57c2b212e5d4318b8b3877ff87
|
refs/heads/master
| 2020-12-28T19:33:04.135749
| 2015-05-12T06:57:51
| 2015-05-12T06:57:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,728
|
r
|
Weed_maps.R
|
##############################################################################
# title : Weed_maps.R;
# purpose : Map weed data from PRISM;
# producer : prepared by A. Sparks;
# last update : in Los Baños, Philippines, Nov. 2014;
# inputs : Aggregated PRISM data;
# outputs : Maps of PRISM data;
# remarks 1 : ;
# Licence: : GPL2;
##############################################################################
#### load packages ####
library(rgdal)
library(ggplot2)
library(RColorBrewer)
#### end load packages ####
#### Load data for mapping
source("Filter_Aggregator_Data.R")
PHL <- readOGR(dsn = "Data", layer = "PHL_NE_50m")
PHL.fortify <- fortify(PHL)
map <- ggplot(PHL.fortify) + geom_map(map = PHL.fortify, aes(x = long, y = lat, map_id = id), fill = "#333333")
#### End load data ####
#### Begin mapping #####
# Weedabove
map + geom_point(data = summaryBy(weedabove+lat+lon~Municipality+visit, data = data.frame(visit, weedabove), FUN = median, na.rm = TRUE, keep.names = TRUE), aes(x = lon, y = lat, size = weedabove, colour = weedabove)) +
scale_size_continuous("Median\nWeed\nAbove\nRating", range = c(3, 15)) +
scale_colour_gradientn(colours = brewer.pal(7, "YlOrRd"), "Median\nWeed\nAbove\nRating") +
scale_x_continuous("Longitude") +
scale_y_continuous("Latitude") +
guides(size = "none") +
ggtitle("Weeds Above Canopy") +
coord_map() +
facet_grid(. ~ visit)
ggsave("Graphs/Weed_above_map.png", width = 8, height = 8, units = "in")
# Weedbelow
map + geom_point(data = summaryBy(weedbelow+lat+lon~Municipality+visit, data = data.frame(visit, weedbelow), FUN = median, na.rm = TRUE, keep.names = TRUE), aes(x = lon, y = lat, size = weedbelow, colour = weedbelow)) +
scale_size_continuous("Median\nWeed\nBelow\nRating", range = c(3, 15)) +
scale_colour_gradientn(colours = brewer.pal(7, "YlOrRd"), "Median\nWeed\nBelow\nRating") +
scale_x_continuous("Longitude") +
scale_y_continuous("Latitude") +
guides(size = "none") +
ggtitle("Weed Below the Canopy") +
coord_map() +
facet_grid(. ~ visit)
ggsave("Graphs/Weed_below_map.png", width = 8, height = 8, units = "in")
# Grass
map + geom_point(data = summaryBy(grass+lat+lon~Municipality+visit, data = data.frame(visit, grass), FUN = median, na.rm = TRUE, keep.names = TRUE), aes(x = lon, y = lat, size = grass, colour = grass)) +
scale_size_continuous("Median\nGrassy\nWeed\nRating", range = c(3, 15)) +
scale_colour_gradientn(colours = brewer.pal(7, "YlOrRd"), "Median\nGrassy\nWeed\nRating") +
scale_x_continuous("Longitude") +
scale_y_continuous("Latitude") +
guides(size = "none") +
ggtitle("Grasses") +
coord_map() +
facet_grid(. ~ visit)
ggsave("Graphs/Grassy_weed_map.png", width = 8, height = 8, units = "in")
# Broadleaf weed
map + geom_point(data = summaryBy(broadleaf+lat+lon~Municipality+visit, data = data.frame(visit, broadleaf), FUN = median, na.rm = TRUE, keep.names = TRUE), aes(x = lon, y = lat, size = broadleaf, colour = broadleaf)) +
scale_size_continuous("Median\nBroadleaf\nWeed\nRanking", range = c(3, 15)) +
scale_colour_gradientn(colours = brewer.pal(7, "YlOrRd"), "Median\nBroadleaf\nWeed\nRanking") +
scale_x_continuous("Longitude") +
scale_y_continuous("Latitude") +
guides(size = "none") +
ggtitle("Broadleaf Weeds") +
coord_map() +
facet_grid(. ~ visit)
ggsave("Graphs/Broadleaf_weed_map.png", width = 8, height = 8, units = "in")
# Sedge
map + geom_point(data = summaryBy(sedge+lat+lon~Municipality+visit, data = data.frame(visit, sedge), FUN = median, na.rm = TRUE, keep.names = TRUE), aes(x = lon, y = lat, size = sedge, colour = sedge)) +
scale_size_continuous("Median\nSedge\nWeed\nRank", range = c(3, 15)) +
scale_colour_gradientn(colours = brewer.pal(7, "YlOrRd"), "Median\nSedge\nWeed\nRank") +
scale_x_continuous("Longitude") +
scale_y_continuous("Latitude") +
guides(size = "none") +
ggtitle("Sedge Ranking") +
coord_map() +
facet_grid(. ~ visit)
ggsave("Graphs/Sedge_map.png", width = 8, height = 8, units = "in")
# Small weeds
map + geom_point(data = summaryBy(small+lat+lon~Municipality+visit, data = data.frame(visit, small), FUN = median, na.rm = TRUE, keep.names = TRUE), aes(x = lon, y = lat, size = small, colour = small)) +
scale_size_continuous("Median\nSmall\nWeed\nRanking", range = c(3, 15)) +
scale_colour_gradientn(colours = brewer.pal(7, "YlOrRd"), "Median\nSmall\nWeed\nRanking") +
scale_x_continuous("Longitude") +
scale_y_continuous("Latitude") +
guides(size = "none") +
ggtitle("Small Weed Ranking") +
coord_map() +
facet_grid(. ~ visit)
ggsave("Graphs/Small_weed_map.png", width = 8, height = 8, units = "in")
#### End mapping ####
# eos
|
1f42fab682a6c60c5b36655910f38dd620704667
|
f670ba62c6bbeac81f7c071422f1b12de1e61081
|
/src/PredictiveAnalysis/ExcutedFiles/K MEANS CLUSTERING ON UTILITES.R
|
bffbe1e22d823de69f01397c10b318e08e37d5b9
|
[] |
no_license
|
ravireddy07/LPU-Material
|
c1d3cd32da81b240ee9fb18cb3872e96f190d1ad
|
be97485f0ec1682e410fc2dd4aee0ae34b034211
|
refs/heads/master
| 2022-12-18T18:25:39.323285
| 2020-09-22T03:01:38
| 2020-09-22T03:01:38
| 297,216,910
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,576
|
r
|
K MEANS CLUSTERING ON UTILITES.R
|
# Cluster Analysis
mydata <- read.csv(file.choose(),sep=',')
str(mydata)
head(mydata)
pairs(mydata)
# Scatter plot
plot(mydata$Fuel_Cost~ mydata$Sales, data = mydata)
with(mydata,text(mydata$Fuel_Cost ~ mydata$Sales, labels=mydata$Company,pos=4))
# Normalize
z = mydata[,-c(1,1)]
means = apply(z,2,mean)
sds = apply(z,2,sd)
nor = scale(z,center=means,scale=sds)
nor
##calculate distance matrix (default is Euclidean distance)
distance = dist(nor)
print(distance,digits=3)
#DENDOGRAM WITH COMPLETE LINKAGE
# Hierarchical agglomerative clustering using default complete linkage
mydata.hclust = hclust(distance)
plot(mydata.hclust)
plot(mydata.hclust,labels=mydata$Company,main='Default from hclust')
plot(mydata.hclust,hang=-1)
# Hierarchical agglomerative clustering using "average" linkage
mydata.avg<-hclust(distance,method="average")
plot(mydata.avg,labels=mydata$Company,main='Default from hclust')
plot(mydata.avg,hang=-1)
# Cluster membership
member.c = cutree(mydata.hclust,3)
member.a = cutree(mydata.avg,3)
table(member.c,member.a)
#Characterizing clusters
aggregate(nor,list(member),mean)
aggregate(mydata[,-c(1,1)],list(member),mean)
#Silhouette Plot
library(cluster)
plot(silhouette(cutree(mydata.hclust,3),distance))
#`` Scree Plot
wss <- (nrow(nor)-1)*sum(apply(nor,2,var))
for (i in 2:20) wss[i] <- sum(kmeans(nor, centers=i)$withinss)
plot(1:20, wss, type="b", xlab="Number of Clusters", ylab="Within groups sum of squares")
# K-means clustering
kc<-kmeans(nor,3)
kc
kc$cluster
kc$centers
plot(Sales~Demand_growth,mydata,col=kc$cluster)
mydata
|
9e92200e50bcf678da83faab4b5b43699b3dae15
|
2a69e9a61af5bb0df8bc4103cfa1f5608c4b5cf4
|
/install.R
|
e1ca141285f2c91bb2f00be4b476e9813d9970ff
|
[
"MIT"
] |
permissive
|
khaors/binder-GQAnalyzer
|
6dea314e84f20bc8d2c9101edc0548dcd3736a53
|
20b3d2724bf92500108277026fe4242fe013329a
|
refs/heads/main
| 2023-03-11T23:12:07.386141
| 2021-03-04T01:34:24
| 2021-03-04T01:34:24
| 344,245,002
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 232
|
r
|
install.R
|
install.packages("devtools")
install.packages("shiny")
install.packages("ggplot2")
install.packages("grid")
install.packages("gridExtra")
install.packages("DT")
install.packages("dplyr")
devtools::install_github("khaors/GQAnalyzer")
|
a996c862b9bb803c2e5b111c43ee5d013c162ce1
|
2dfc1715d53363b0c4572c5305d82b36f2d7cee3
|
/man/grouping-methods.Rd
|
259f139950add99965c1ce61ebff6d375e29839f
|
[] |
no_license
|
cran/haplotypes
|
53833ab28b3e6b9995e21226efe93e2315065052
|
d161571530d08d75de14ed2c153d95776099d54d
|
refs/heads/master
| 2023-07-25T22:49:01.985321
| 2023-07-15T05:40:03
| 2023-07-15T06:35:14
| 34,604,271
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,487
|
rd
|
grouping-methods.Rd
|
\name{grouping-methods}
\docType{methods}
\alias{grouping}
\alias{grouping-methods}
\alias{grouping,Haplotype-method}
\title{Groups haplotypes according to the grouping variable (populations, species, etc.)
}
\description{
Function for creating a matrix with haplotypes as rows, grouping factor (populations, species, etc.) as columns and abundance as entries.
}
\usage{
\S4method{grouping}{Haplotype}(x,factors)
}
\arguments{
\item{x}{an object of class \code{\link{Haplotype}}. }
\item{factors}{a vector or factor giving the grouping variable (populations, species, etc.), with one element per individual. }
}
\value{
a list with two components:
\describe{
\item{\code{hapmat}:}{a matrix with haplotypes as rows, levels of the grouping factor (populations, species, etc.) as columns and abundance as entries.}
\item{\code{hapvec}:}{a vector giving the haplotype identities of individuals. }
}}
\section{Methods}{
\describe{
\item{\code{signature(x = "Haplotype")}}{
}
}}
\seealso{
\code{\link{haplotype}}}
\author{
Caner Aktas, \email{caktas.aca@gmail.com}
}
\examples{
data("dna.obj")
x<-dna.obj[1:6,,as.matrix=FALSE]
# inferring haplotypes from DNA sequences
h<-haplotype(x)
## Grouping haplotypes.
# character vector 'populations' is a grouping factor.
populations<-c("pop1","pop1","pop2","pop3","pop3","pop3")
# length of the argument 'factor' is equal to the number of sequences
g<-grouping(h,factors=populations)
g
}
\keyword{HAPLOTYPE ANALYSIS}
|
b25cbc22188f2d31e9e2a115a4bf953b538f9687
|
eedf388d4b622989f4cc1ec550792ae29053a9b8
|
/bird_lists_maps_traits/birdlife_scraper.R
|
4c5ccd6d26713bc065cf87a94b6a5f7ff5dd0014
|
[] |
no_license
|
jsocolar/colombiaBeta
|
8d1d68b73c2034e7b14ede81dd891262af61a1a4
|
2187270b5d35e4ed244f9950b982af4c56af6ab5
|
refs/heads/master
| 2023-08-03T16:30:49.504699
| 2023-03-22T19:07:03
| 2023-03-22T19:07:03
| 215,572,259
| 2
| 4
| null | 2023-02-16T14:02:08
| 2019-10-16T14:43:21
|
R
|
UTF-8
|
R
| false
| false
| 5,190
|
r
|
birdlife_scraper.R
|
load('/Users/jacobsocolar/Dropbox/Work/Useful_data/BirdlifeTraits/nf_species.Rdata')
birdlife_list <- readxl::read_xlsx('/Users/jacobsocolar/Dropbox/Work/Colombia/Data/Birds/species_list_creation/HBW-BirdLife_Checklist_v4_Dec19/HBW-BirdLife_List_of_Birds_v4.xlsx')
neotrop_pages <- list()
for(i in 1:length(nf_species)){
print(i)
sn <- nf_species[i]
cn <- birdlife_list$`Common name`[birdlife_list$`Scientific name` == sn]
cn <- gsub("'", "", cn)
cn <- gsub('á', 'a', cn)
cn <- gsub('ç', 'c', cn)
urlname <- paste(c(strsplit(cn, ' ')[[1]], strsplit(sn, ' ')[[1]]), sep = '-', collapse = '-')
if(sn == 'Vireo chivi'){urlname <- 'vireo-chivi'}
theurl <- paste0('http://datazone.birdlife.org/species/factsheet/', urlname, '/details')
neotrop_pages[[i]] = readLines(theurl)
}
save(neotrop_pages, file = "/Users/jacobsocolar/Dropbox/Work/Useful_data/BirdlifeTraits/neotrop_pages.Rdata")
load('/Users/jacobsocolar/Dropbox/Work/Useful_data/BirdlifeTraits/neotrop_pages.Rdata')
# Confirm that all species pages include the following two lines
for(i in 1:length(neotrop_pages)){
begin_line <- grep('Habitat \\(level 1\\)', neotrop_pages[[i]])
end_line <- grep('Occasional altitudinal limits', neotrop_pages[[i]])
if(length(end_line) == 0 | length(begin_line) == 0){stop()}
}
habitats <- as.list(rep(NA, length(nf_species)))
names(habitats) <- nf_species
for(i in 1:length(nf_species)){
begin_line <- grep('Habitat \\(level 1\\)', neotrop_pages[[i]])
end_line <- grep('Occasional altitudinal limits', neotrop_pages[[i]])
lines_suitable1 <- grep('suitable', neotrop_pages[[i]])
lines_suitable <- lines_suitable1[lines_suitable1 > begin_line & lines_suitable1 < end_line]
lines_major1 <- grep('major', neotrop_pages[[i]])
lines_major <- lines_major1[lines_major1 > begin_line & lines_major1 < end_line]
a <- list()
for(k in 1:4){
thestring1 <- trimws(neotrop_pages[[i]][lines_suitable + (k-3)])
thestring2 <- stringr::str_remove(thestring1, "<td>")
thestring3 <- stringr::str_remove(thestring2, "</td>")
thestring4 <- trimws(neotrop_pages[[i]][lines_major + (k-3)])
thestring5 <- stringr::str_remove(thestring4, "<td>")
thestring6 <- stringr::str_remove(thestring5, "</td>")
a[[k]] <- c(thestring3, thestring6)
}
habitats[[i]] <- paste(a[[1]], a[[2]], a[[3]], a[[4]], sep = "; ")
}
altitude <- as.list(rep(NA, length(nf_species)))
names(altitude) <- nf_species
for(i in 1:length(nf_species)){
end_line <- grep('Occasional altitudinal limits', neotrop_pages[[i]])
lines_altitude1 <- grep('Altitude', neotrop_pages[[i]])
line_altitude <- max(lines_altitude1[lines_altitude1 < end_line])
thestring1 <- trimws(neotrop_pages[[i]][line_altitude + 2])
thestring2 <- stringr::str_remove(thestring1, "<td>")
thestring3 <- stringr::str_remove(thestring2, '</td>')
a <- strsplit(thestring3, "-")
a1 <- as.numeric(gsub("\\D", "", a[[1]][1]))
a2 <- as.numeric(gsub("\\D", "", a[[1]][2]))
altitude[[i]] <- c(a1, a2)
}
generation <- as.list(rep(NA, length(nf_species)))
names(generation) <- nf_species
for(i in 1:length(nf_species)){
line_gen <- grep('Generation length \\(yrs\\)', neotrop_pages[[i]])
if(length(line_gen) > 1){stop()}
thestring1 <- trimws(neotrop_pages[[i]][line_gen + 1])
thestring2 <- stringr::str_remove(thestring1, "<td>")
thestring3 <- stringr::str_remove(thestring2, '</td>')
generation[[i]] <- as.numeric(thestring3)
}
forest_dep <- as.list(rep(NA, length(nf_species)))
names(forest_dep) <- nf_species
for(i in 1:length(nf_species)){
line_forest <- grep('Forest dependency', neotrop_pages[[i]])
if(length(line_forest) != 1){stop()}
thestring1 <- trimws(neotrop_pages[[i]][line_forest + 1])
thestring2 <- stringr::str_remove(thestring1, "<td>")
thestring3 <- stringr::str_remove(thestring2, '</td>')
forest_dep[[i]] <- thestring3
}
migratory_status <- as.list(rep(NA, length(nf_species)))
names(migratory_status) <- nf_species
for(i in 1:length(nf_species)){
line_migstat <- grep('Migratory status', neotrop_pages[[i]])
if(length(line_migstat) != 1){stop()}
thestring1 <- trimws(neotrop_pages[[i]][line_migstat + 1])
thestring2 <- stringr::str_remove(thestring1, "<td>")
thestring3 <- stringr::str_remove(thestring2, '</td>')
migratory_status[[i]] <- thestring3
}
body_mass <- as.list(rep(NA, length(nf_species)))
names(body_mass) <- nf_species
for(i in 1:length(nf_species)){
line_avgmass <- grep('Average mass', neotrop_pages[[i]])
if(length(line_avgmass) != 1){stop()}
thestring1 <- trimws(neotrop_pages[[i]][line_avgmass + 1])
thestring2 <- stringr::str_remove(thestring1, "<td>")
thestring3 <- stringr::str_remove(thestring2, '</td>')
thestring4 <- stringr::str_remove(thestring3, ' g')
if(thestring3 != "-"){body_mass[[i]] <- as.numeric(thestring4)}
}
birdlife_traits <- list(names = nf_species, habitats = habitats, altitude = altitude,
generation = generation, forest_dep = forest_dep,
migratory_status = migratory_status, body_mass = body_mass)
save(birdlife_traits, file = '/Users/jacobsocolar/Dropbox/Work/Useful_data/BirdlifeTraits/birdlife_traits.Rdata')
|
53e9e5e3337951af5459ec143b57c32cb42e0588
|
0c02264351bf960a07139d5c02f72921eb41658c
|
/tests/testthat.R
|
2e88d61b37ac855771b501a55238f41bcb59b52a
|
[
"MIT"
] |
permissive
|
geoffwlamb/redditr
|
103ecb240cfff4a459b6d8fd316225edd7797efc
|
1416ba1f611560f70ee9a4cb0d5d09a0bdfe42f7
|
refs/heads/master
| 2020-03-25T07:33:26.250266
| 2019-03-12T03:41:11
| 2019-03-12T03:41:11
| 143,568,023
| 9
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 58
|
r
|
testthat.R
|
library(testthat)
library(redditr)
test_check("redditr")
|
68de184daecd41ecd7a335c6cd07c68a5e1c607a
|
a913d91888c84c5fcb3954bf5999515339f652aa
|
/ui.R
|
cef2e092d6d65726814c19c518cdd64cee4f7333
|
[] |
no_license
|
6AM-Health/Shiny-App
|
5b0d348372a082c92615732a446b3e827c9e6506
|
5d7bbdbc880f98218c22c3210cf1728aae340807
|
refs/heads/master
| 2020-07-27T13:51:30.782730
| 2019-10-01T14:55:46
| 2019-10-01T14:55:46
| 209,113,088
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,324
|
r
|
ui.R
|
library(shiny)
library(shinydashboard)
df <- read_csv('Sales_Byte_ARS_Data_Management - Sales Transactions.csv')
df$Date <- as.Date(df$Date,"%m/%d/%Y")
summary(df)
daily_sales <- df %>% group_by(Date) %>%
summarise(total_sales_dollar = sum(Total), total_sales_qty = sum(Qty))
daily_sales_by_ftype <- df %>% group_by(Date, Fridge_Type) %>%
summarise(total_sales_dollar = sum(Total), total_sales_qty = sum(Qty))
daily_sales_by_location <- df %>%
group_by(Date,Kiosk,Fridge_Type) %>%
summarise(total_sales_dollar = sum(Total), total_sales_qty = sum(Qty))
daily_sales_by_product <- df %>%
group_by(Date, Product) %>%
summarise(total_sales_dollar = sum(Total), total_sales_qty = sum(Qty))
shinyUI(
dashboardPage(skin = "black",
dashboardHeader(title = tags$a(href='https://www.6amhealth.com/',tags$img(src="//cdn.shopify.com/s/files/1/2639/0012/files/6AM_BB_b555713e-b5ab-47ec-a4a3-881dfa1c0567_500x.png?v=1568994487",
height = "30px"),
style = "padding-top:10px; padding-bottom:10px;")),
dashboardSidebar(
sidebarMenu(
menuItem("Dashboard"),
menuItem("Charts",tabName = 'visuals'),
menuItem("Reports", tabName = 'reports'),
menuItem("Analysis")
)
),
dashboardBody(
tabItems(
tabItem(tabName = "visuals",
fluidPage(
titlePanel('Daily Sales'),
sidebarLayout(
sidebarPanel(
dateRangeInput(inputId = 'Date',
label = 'Select a date range:',
start = '2019-03-19',
end = '2019-09-08',
format = 'yyyy-mm-dd'),
selectizeInput(inputId = "Fridge_Type", label = strong("Fridge Type"),
choices = unique(daily_sales_by_ftype$Fridge_Type)
),
selectizeInput(inputId = 'Kiosk', label=strong('Kiosk'),
choices = unique(daily_sales_by_location$Kiosk),selected = NULL),
selectizeInput(inputId= 'Product', label=strong('Product'),
choices = unique(daily_sales_by_product$Product))
),
mainPanel(
helpText('To view sales breakdown by one criterion, remove your selection for others. To view aggregate sales, remove all selections'),
verbatimTextOutput('description'),
plotOutput(outputId = 'timeseriesplot'),
checkboxInput('qty', 'Click to view sales volume', value=FALSE)
)
)
)
),
tabItem(tabName = 'reports',
fluidPage(
titlePanel('Daily Sales Reports'),
sidebarLayout(
sidebarPanel(
dateRangeInput(inputId = 'DateT',
label = 'Select a date range:',
start = '2019-03-19',
end = '2019-09-08',
format = 'yyyy-mm-dd'),
selectizeInput(inputId = "Fridge_TypeT", label = strong("Fridge Type"),
choices = unique(daily_sales_by_ftype$Fridge_Type)
),
selectizeInput(inputId = 'KioskT', label=strong('Kiosk'),
choices = unique(daily_sales_by_location$Kiosk),selected = NULL),
selectizeInput(inputId= 'ProductT', label=strong('Product'),
choices = unique(daily_sales_by_product$Product))
),
mainPanel(
helpText('To view sales breakdown by one criterion, remove your selection for others. To view aggregate sales, remove all selections'),
verbatimTextOutput('descriptionT'),
DT::dataTableOutput("table")
)
)
))
))
)
)
|
7890162102c128f95d161cd34c1709fbc2754e8b
|
bcc3bf661017041dfc14adc9be256996ce0fce56
|
/2019-accountability/district/district_designations.R
|
158f3e5ecc58ef29b6affb4fe5b0fe69e6dbb4e9
|
[] |
no_license
|
tnedu/accountability
|
60bc867c76342bc43e66464150439b10360331e1
|
395c4d880d02cede1ff37ee3d4980046e0bcf783
|
refs/heads/master
| 2021-11-02T03:30:30.545850
| 2019-10-23T18:39:11
| 2019-10-23T18:39:11
| 42,124,295
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,258
|
r
|
district_designations.R
|
library(acct)
library(zeallot)
library(tidyverse)
district_accountability <- read_csv("N:/ORP_accountability/data/2019_final_accountability_files/district_accountability_file.csv")
c(subgroups, all_students) %<-% (district_accountability %>% split(.$subgroup == "All Students"))
all_students_overall <- all_students %>%
group_by(system) %>%
summarise(achievement_average = mean(overall_score, na.rm = TRUE))
subgroups_overall <- subgroups %>%
group_by(system, subgroup) %>%
summarise_at("overall_score", mean, na.rm = TRUE) %>%
group_by(system) %>%
summarise(subgroup_average = mean(overall_score, na.rm = TRUE))
final <- left_join(all_students_overall, subgroups_overall, by = "system") %>%
transmute(
system,
achievement_average,
achievement_determination = case_when(
achievement_average >= 3.1 ~ "Exemplary",
achievement_average >= 2.1 ~ "Advancing",
achievement_average >= 1.1 ~ "Satisfactory",
achievement_average < 1.1 ~ "Marginal"
),
subgroup_average,
subgroup_determination = case_when(
subgroup_average >= 3.1 ~ "Exemplary",
subgroup_average >= 2.1 ~ "Advancing",
subgroup_average >= 1.1 ~ "Satisfactory",
subgroup_average < 1.1 ~ "Marginal"
),
overall_average = round5(0.6 * achievement_average + 0.4 * subgroup_average, 1),
rank = if_else(not_na(overall_average), rank(overall_average, ties.method = "min"), NA_integer_),
denom = sum(not_na(overall_average)),
percentile = round(100 * rank/denom, 1),
met_minimum_performance = as.integer(percentile > 5),
final_determination = case_when(
met_minimum_performance == 0 ~ "In Need of Improvement",
overall_average >= 3.1 ~ "Exemplary",
overall_average >= 2.1 ~ "Advancing",
overall_average >= 1.1 ~ "Satisfactory",
overall_average < 1.1 ~ "Marginal"
)
) %>%
mutate_at(c("achievement_average", "subgroup_average", "overall_average"), ~ if_else(is.nan(.), NA_real_, .))
write_csv(final, path = "N:/ORP_accountability/data/2019_final_accountability_files/district_designations.csv", na = "")
|
28ca65cc04a83d0bac842c07b3c584fac15fe87e
|
31e8d39ef230c71a00021c43f67c20fb005ef86f
|
/plot3.R
|
1b039ca5759dde82951a0fdd862a1024f11e19f3
|
[] |
no_license
|
kfirestone/ExData_Plotting1
|
92496d8c08490aa953ce6ece48ae395807d02bd2
|
c7af19c4b15a544c311373e22945a55898074589
|
refs/heads/master
| 2021-01-24T14:45:40.089813
| 2014-10-11T20:20:15
| 2014-10-11T20:20:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,490
|
r
|
plot3.R
|
data <- read.table("household_power_consumption.txt", header = TRUE, sep =";",
na.strings = "?", skip = 66636, nrows = 2880) #Reads in data with dates from 02/01/2007 to 02/02/2007
colnames(data) <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage",
"Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3") #Assigns column names
#to data
data$Time_Date <- strptime(paste(data$Date, data$Time, sep = ","), format = "%d/%m/%Y,%H:%M:%S") #Creates new column
#to combine "Date" and
#"Time" columns.
png(filename = "plot3.png", height = 480, width = 480) #Open png file device
plot(data$Time_Date, data$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering") #Creates plot
lines(data$Time_Date, data$Sub_metering_2, type = "l", col = "red") #Adds Sub_metering_2 data to plot in a red line
lines(data$Time_Date, data$Sub_metering_3, type = "l", col = "blue") #Adds Sub_metering_3 data to plot in a blue line
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) #Creates legend in top right corner
dev.off() #Closes file device
|
26176ad4bf31d5a99786ef1b076d18ff661c3e73
|
23cad221b4fd1656e27038880f500eed6695fde0
|
/R/split_clusters.R
|
fca14dc070b11ba1b45eef29ff331c3ee8d15a08
|
[
"GPL-2.0-only",
"MIT"
] |
permissive
|
campbio/celda
|
91f8c64424fe24a74a1359b6dde371ab8ff2aea1
|
92905bda2833c9beda48c6a9404a86a102cd0553
|
refs/heads/master
| 2023-02-17T09:41:27.551599
| 2023-02-15T19:01:52
| 2023-02-15T19:01:52
| 158,611,235
| 134
| 32
|
MIT
| 2023-02-17T01:39:55
| 2018-11-21T22:01:57
|
R
|
UTF-8
|
R
| false
| false
| 17,468
|
r
|
split_clusters.R
|
# .cCCalcLL = function(mCPByS, nGByCP, s, z, K, nS, nG, alpha, beta)
.cCSplitZ <- function(counts,
mCPByS,
nGByCP,
nCP,
s,
z,
K,
nS,
nG,
alpha,
beta,
zProb,
maxClustersToTry = 10,
minCell = 3) {
## Identify clusters to split
zTa <- tabulate(z, K)
zToSplit <- which(zTa >= minCell)
zNonEmpty <- which(zTa > 0)
if (length(zToSplit) == 0) {
m <- paste0(
date(),
" .... Cluster sizes too small. No additional splitting was",
" performed."
)
return(list(
z = z,
mCPByS,
nGByCP,
nCP = nCP,
message = m
))
}
## Loop through each split-able Z and perform split
clustSplit <- vector("list", K)
for (i in zToSplit) {
clustLabel <- .celda_C(
counts[, z == i],
K = 2,
zInitialize = "random",
maxIter = 5,
splitOnIter = -1,
splitOnLast = FALSE,
verbose = FALSE,
reorder = FALSE
)
clustSplit[[i]] <- as.integer(celdaClusters(clustLabel)$z)
}
## Find second best assignment give current assignments for each cell
zProb[cbind(seq(nrow(zProb)), z)] <- NA
zSecond <- apply(zProb, 1, which.max)
## Set up initial variables
zSplit <- matrix(NA,
nrow = length(z),
ncol = length(zToSplit) * maxClustersToTry
)
zSplitLl <- rep(NA, times = length(zToSplit) * maxClustersToTry)
zSplitLl[1] <- .cCCalcLL(mCPByS, nGByCP, s, z, K, nS, nG, alpha, beta)
zSplit[, 1] <- z
## Select worst clusters to test for reshuffling
previousZ <- z
llShuffle <- rep(NA, K)
for (i in zNonEmpty) {
ix <- z == i
newZ <- z
newZ[ix] <- zSecond[ix]
p <- .cCReDecomposeCounts(counts, s, newZ, previousZ, nGByCP, K)
nGByCP <- p$nGByCP
mCPByS <- p$mCPByS
llShuffle[i] <- .cCCalcLL(mCPByS, nGByCP, s, z, K, nS, nG, alpha, beta)
previousZ <- newZ
}
zToShuffle <- utils::head(order(llShuffle, decreasing = TRUE, na.last = NA),
n = maxClustersToTry
)
pairs <- c(NA, NA)
splitIx <- 2
for (i in zToShuffle) {
otherClusters <- setdiff(zToSplit, i)
for (j in otherClusters) {
newZ <- z
## Assign cluster i to the next most similar cluster (excluding
## cluster j)
## as defined above by the correlation
ixToMove <- z == i
newZ[ixToMove] <- zSecond[ixToMove]
## Split cluster j according to the clustering defined above
ixToSplit <- z == j
newZ[ixToSplit] <- ifelse(clustSplit[[j]] == 1, j, i)
p <- .cCReDecomposeCounts(counts, s, newZ, previousZ, nGByCP, K)
nGByCP <- p$nGByCP
mCPByS <- p$mCPByS
## Calculate likelihood of split
zSplitLl[splitIx] <- .cCCalcLL(
mCPByS,
nGByCP,
s,
z,
K,
nS,
nG,
alpha,
beta
)
zSplit[, splitIx] <- newZ
splitIx <- splitIx + 1L
previousZ <- newZ
pairs <- rbind(pairs, c(i, j))
}
}
select <- which.max(zSplitLl)
if (select == 1) {
m <- paste0(date(), " .... No additional splitting was performed.")
} else {
m <- paste0(
date(),
" .... Cluster ",
pairs[select, 1],
" was reassigned and cluster ",
pairs[select, 2],
" was split in two."
)
}
p <- .cCReDecomposeCounts(counts, s, zSplit[, select], previousZ, nGByCP, K)
return(list(
z = zSplit[, select],
mCPByS = p$mCPByS,
nGByCP = p$nGByCP,
nCP = p$nCP,
message = m
))
}
# .cCGCalcLL = function(K, L, mCPByS, nTSByCP, nByG, nByTS, nGByTS,
# nS, nG, alpha, beta, delta, gamma)
.cCGSplitZ <- function(counts,
mCPByS,
nTSByC,
nTSByCP,
nByG,
nByTS,
nGByTS,
nCP,
s,
z,
K,
L,
nS,
nG,
alpha,
beta,
delta,
gamma,
zProb,
maxClustersToTry = 10,
minCell = 3) {
## Identify clusters to split
zTa <- tabulate(z, K)
zToSplit <- which(zTa >= minCell)
zNonEmpty <- which(zTa > 0)
if (length(zToSplit) == 0) {
m <- paste0(
date(),
" .... Cluster sizes too small. No additional splitting was",
" performed."
)
return(list(
z = z,
mCPByS = mCPByS,
nTSByCP = nTSByCP,
nCP = nCP,
message = m
))
}
## Loop through each split-able Z and perform split
clustSplit <- vector("list", K)
for (i in zToSplit) {
clustLabel <- .celda_C(counts[, z == i],
K = 2,
zInitialize = "random",
maxIter = 5,
splitOnIter = -1,
splitOnLast = FALSE,
verbose = FALSE,
reorder = FALSE
)
clustSplit[[i]] <- as.integer(celdaClusters(clustLabel)$z)
}
## Find second best assignment give current assignments for each cell
zProb[cbind(seq(nrow(zProb)), z)] <- NA
zSecond <- apply(zProb, 1, which.max)
## Set up initial variables
zSplit <- matrix(NA,
nrow = length(z),
ncol = length(zToSplit) * maxClustersToTry
)
zSplitLl <- rep(NA, ncol = length(zToSplit) * maxClustersToTry)
zSplitLl[1] <- .cCGCalcLL(
K,
L,
mCPByS,
nTSByCP,
nByG,
nByTS,
nGByTS,
nS,
nG,
alpha,
beta,
delta,
gamma
)
zSplit[, 1] <- z
## Select worst clusters to test for reshuffling
previousZ <- z
llShuffle <- rep(NA, K)
for (i in zNonEmpty) {
ix <- z == i
newZ <- z
newZ[ix] <- zSecond[ix]
p <- .cCReDecomposeCounts(nTSByC, s, newZ, previousZ, nTSByCP, K)
nTSByCP <- p$nGByCP
mCPByS <- p$mCPByS
llShuffle[i] <- .cCGCalcLL(
K,
L,
mCPByS,
nTSByCP,
nByG,
nByTS,
nGByTS,
nS,
nG,
alpha,
beta,
delta,
gamma
)
previousZ <- newZ
}
zToShuffle <- utils::head(order(llShuffle, decreasing = TRUE, na.last = NA),
n = maxClustersToTry
)
pairs <- c(NA, NA)
splitIx <- 2
for (i in zToShuffle) {
otherClusters <- setdiff(zToSplit, i)
for (j in otherClusters) {
newZ <- z
## Assign cluster i to the next most similar cluster (excluding
## cluster j)
## as defined above by the correlation
ixToMove <- z == i
newZ[ixToMove] <- zSecond[ixToMove]
## Split cluster j according to the clustering defined above
ixToSplit <- z == j
newZ[ixToSplit] <- ifelse(clustSplit[[j]] == 1, j, i)
p <- .cCReDecomposeCounts(nTSByC, s, newZ, previousZ, nTSByCP, K)
nTSByCP <- p$nGByCP
mCPByS <- p$mCPByS
## Calculate likelihood of split
zSplitLl[splitIx] <- .cCGCalcLL(
K,
L,
mCPByS,
nTSByCP,
nByG,
nByTS,
nGByTS,
nS,
nG,
alpha,
beta,
delta,
gamma
)
zSplit[, splitIx] <- newZ
splitIx <- splitIx + 1L
previousZ <- newZ
pairs <- rbind(pairs, c(i, j))
}
}
select <- which.max(zSplitLl)
if (select == 1) {
m <- paste0(date(), " .... No additional splitting was performed.")
} else {
m <- paste0(
date(),
" .... Cluster ",
pairs[select, 1],
" was reassigned and cluster ",
pairs[select, 2],
" was split in two."
)
}
p <- .cCReDecomposeCounts(
nTSByC,
s,
zSplit[, select],
previousZ,
nTSByCP,
K
)
return(list(
z = zSplit[, select],
mCPByS = p$mCPByS,
nTSByCP = p$nGByCP,
nCP = p$nCP,
message = m
))
}
.cCGSplitY <- function(counts,
y,
mCPByS,
nGByCP,
nTSByC,
nTSByCP,
nByG,
nByTS,
nGByTS,
nCP,
s,
z,
K,
L,
nS,
nG,
alpha,
beta,
delta,
gamma,
yProb,
maxClustersToTry = 10,
KSubclusters = 10,
minCell = 3) {
#########################
## First, the cell dimension of the original matrix will be reduced by
## splitting each z cluster into 'KSubclusters'.
#########################
## This will not be as big as the original matrix (which can take a lot of
## time to process with large number of cells), but not as small as the
## 'nGByCP' with current z assignments
zTa <- tabulate(z, K)
zNonEmpty <- which(zTa > 0)
tempZ <- rep(0, length(z))
currentTopZ <- 0
for (i in zNonEmpty) {
ix <- z == i
if (zTa[i] <= KSubclusters) {
tempZ[ix] <- seq(currentTopZ + 1, currentTopZ + zTa[i])
} else {
clustLabel <- .celda_C(counts[, z == i],
K = KSubclusters,
zInitialize = "random",
maxIter = 5,
splitOnIter = -1,
splitOnLast = FALSE,
verbose = FALSE,
reorder = FALSE
)
tempZ[ix] <- as.integer(celdaClusters(clustLabel)$z) + currentTopZ
}
currentTopZ <- max(tempZ, na.rm = TRUE)
}
## Decompose counts according to new/temp z labels
tempNGByCP <- .colSumByGroup(counts, group = tempZ, K = currentTopZ)
#########################
## Second, different y splits will be estimated and tested
#########################
## Identify clusters to split
yTa <- tabulate(y, L)
yToSplit <- which(yTa >= minCell)
yNonEmpty <- which(yTa > 0)
if (length(yToSplit) == 0) {
m <- paste0(
date(),
" .... Cluster sizes too small. No additional splitting was",
" performed."
)
return(list(
y = y,
mCPByS = mCPByS,
nTSByCP = nTSByCP,
nCP = nCP,
message = m
))
}
## Loop through each split-able Z and perform split
clustSplit <- vector("list", L)
for (i in yToSplit) {
clustLabel <- .celda_G(tempNGByCP[y == i, ],
L = 2,
yInitialize = "random",
maxIter = 5,
splitOnIter = -1,
splitOnLast = FALSE,
verbose = FALSE,
reorder = FALSE
)
clustSplit[[i]] <- as.integer(celdaClusters(clustLabel)$y)
}
## Find second best assignment give current assignments for each cell
yProb[cbind(seq(nrow(yProb)), y)] <- NA
ySecond <- apply(yProb, 1, which.max)
## Set up initial variables
ySplit <- matrix(NA,
nrow = length(y),
ncol = length(yToSplit) * maxClustersToTry
)
ySplitLl <- rep(NA, ncol = length(yToSplit) * maxClustersToTry)
ySplitLl[1] <- .cCGCalcLL(
K,
L,
mCPByS,
nTSByCP,
nByG,
nByTS,
nGByTS,
nS,
nG,
alpha,
beta,
delta,
gamma
)
ySplit[, 1] <- y
## Select worst clusters to test for reshuffling
previousY <- y
llShuffle <- rep(NA, L)
for (i in yNonEmpty) {
ix <- y == i
newY <- y
newY[ix] <- ySecond[ix]
p <- .cGReDecomposeCounts(nGByCP, newY, previousY, nTSByCP, nByG, L)
nTSByCP <- p$nTSByC
nByTS <- p$nByTS
nGByTS <- p$nGByTS
llShuffle[i] <- .cCGCalcLL(
K,
L,
mCPByS,
nTSByCP,
nByG,
nByTS,
nGByTS,
nS,
nG,
alpha,
beta,
delta,
gamma
)
previousY <- newY
}
yToShuffle <- utils::head(order(llShuffle, decreasing = TRUE, na.last = NA),
n = maxClustersToTry
)
pairs <- c(NA, NA)
splitIx <- 2
for (i in yToShuffle) {
otherClusters <- setdiff(yToSplit, i)
for (j in otherClusters) {
newY <- y
## Assign cluster i to the next most similar cluster (excluding
## cluster j)
## as defined above by the correlation
ixToMove <- y == i
newY[ixToMove] <- ySecond[ixToMove]
## Split cluster j according to the clustering defined above
ixToSplit <- y == j
newY[ixToSplit] <- ifelse(clustSplit[[j]] == 1, j, i)
p <- .cGReDecomposeCounts(nGByCP, newY, previousY, nTSByCP, nByG, L)
nTSByCP <- p$nTSByC
nByTS <- p$nByTS
nGByTS <- p$nGByTS
## Calculate likelihood of split
ySplitLl[splitIx] <- .cCGCalcLL(
K,
L,
mCPByS,
nTSByCP,
nByG,
nByTS,
nGByTS,
nS,
nG,
alpha,
beta,
delta,
gamma
)
ySplit[, splitIx] <- newY
splitIx <- splitIx + 1L
previousY <- newY
pairs <- rbind(pairs, c(i, j))
}
}
select <- which.max(ySplitLl)
if (select == 1) {
m <- paste0(date(), " .... No additional splitting was performed.")
} else {
m <- paste0(
date(),
" .... Cluster ",
pairs[select, 1],
" was reassigned and cluster ",
pairs[select, 2],
" was split in two."
)
}
p <- .cGReDecomposeCounts(
nGByCP,
ySplit[, select],
previousY,
nTSByCP,
nByG,
L
)
return(list(
y = ySplit[, select],
nTSByCP = p$nTSByC,
nByTS = p$nByTS,
nGByTS = p$nGByTS,
message = m
))
}
# .cGCalcLL = function(nTSByC, nByTS, nByG, nGByTS, nM, nG, L, beta, delta,
# gamma) {
.cGSplitY <- function(counts,
y,
nTSByC,
nByTS,
nByG,
nGByTS,
nM,
nG,
L,
beta,
delta,
gamma,
yProb,
minFeature = 3,
maxClustersToTry = 10) {
## Identify clusters to split
yTa <- table(factor(y, levels = seq(L)))
yToSplit <- which(yTa >= minFeature)
yNonEmpty <- which(yTa > 0)
if (length(yToSplit) == 0) {
m <- paste0(
date(),
" .... Cluster sizes too small. No additional splitting was",
" performed."
)
return(list(
y = y,
nTSByC = nTSByC,
nByTS = nByTS,
nGByTS = nGByTS,
message = m
))
}
## Loop through each split-able y and find best split
clustSplit <- vector("list", L)
for (i in yToSplit) {
clustLabel <- .celda_G(counts[y == i, ],
L = 2,
yInitialize = "random",
maxIter = 5,
splitOnIter = -1,
splitOnLast = FALSE,
verbose = FALSE,
reorder = FALSE
)
clustSplit[[i]] <- as.integer(celdaClusters(clustLabel)$y)
}
## Find second best assignment give current assignments for each cell
yProb[cbind(seq(nrow(yProb)), y)] <- NA
ySecond <- apply(yProb, 1, which.max)
## Set up initial variables
ySplit <- matrix(NA,
nrow = length(y),
ncol = length(yToSplit) * maxClustersToTry
)
ySplitLl <- rep(NA, ncol = length(yToSplit) * maxClustersToTry)
ySplitLl[1] <- .cGCalcLL(
nTSByC,
nByTS,
nByG,
nGByTS,
nM,
nG,
L,
beta,
delta,
gamma
)
ySplit[, 1] <- y
## Select worst clusters to test for reshuffling
llShuffle <- rep(NA, L)
previousY <- y
for (i in yNonEmpty) {
ix <- y == i
newY <- y
newY[ix] <- ySecond[ix]
p <- .cGReDecomposeCounts(counts, newY, previousY, nTSByC, nByG, L)
llShuffle[i] <- .cGCalcLL(
p$nTSByC,
p$nByTS,
nByG,
p$nGByTS,
nM,
nG,
L,
beta,
delta,
gamma
)
previousY <- newY
}
yToShuffle <- utils::head(order(llShuffle, decreasing = TRUE, na.last = NA),
n = maxClustersToTry
)
pairs <- c(NA, NA)
splitIx <- 2
for (i in yToShuffle) {
otherClusters <- setdiff(yToSplit, i)
for (j in otherClusters) {
newY <- y
## Assign cluster i to the next most similar cluster (excluding
## cluster j)
## as defined above by the spearman correlation
ixToMove <- y == i
newY[ixToMove] <- ySecond[ixToMove]
## Split cluster j according to the clustering defined above
ixToSplit <- y == j
newY[ixToSplit] <- ifelse(clustSplit[[j]] == 1, j, i)
## Calculate likelihood of split
p <- .cGReDecomposeCounts(counts, newY, previousY, nTSByC, nByG, L)
ySplitLl[splitIx] <- .cGCalcLL(
p$nTSByC,
p$nByTS,
nByG,
p$nGByTS,
nM,
nG,
L,
beta,
delta,
gamma
)
ySplit[, splitIx] <- newY
splitIx <- splitIx + 1L
previousY <- newY
pairs <- rbind(pairs, c(i, j))
}
}
select <- which.max(ySplitLl)
if (select == 1) {
m <- paste0(date(), " .... No additional splitting was performed.")
} else {
m <- paste0(
date(),
" .... Cluster ",
pairs[select, 1],
" was reassigned and cluster ",
pairs[select, 2],
" was split in two."
)
}
p <- .cGReDecomposeCounts(
counts,
ySplit[, select],
previousY,
nTSByC,
nByG,
L
)
return(list(
y = ySplit[, select],
nTSByC = p$nTSByC,
nByTS = p$nByTS,
nGByTS = p$nGByTS,
message = m
))
}
|
a62c775fb0d4d8177b62c36ccd00971c91810965
|
a176626eb55b6525d5a41e2079537f2ef51d4dc7
|
/Uni/Projects/code/P047.BW_MAIAC/archive/cn004_BW_lags_v2.R
|
579c269f071dea85596ca10eb7271e6934eced71
|
[] |
no_license
|
zeltak/org
|
82d696b30c7013e95262ad55f839998d0280b72b
|
d279a80198a1dbf7758c9dd56339e8a5b5555ff2
|
refs/heads/master
| 2021-01-21T04:27:34.752197
| 2016-04-16T04:27:57
| 2016-04-16T04:27:57
| 18,008,592
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,853
|
r
|
cn004_BW_lags_v2.R
|
library(data.table)
library(plyr)
library(reshape2)
library(foreign)
library(Hmisc)
library(mgcv)
library(FNN)
library(ggplot2)
######################################
# prediction for enrollment locations
#######################################
bwfull <- read.dbf("/media/NAS/Uni/Projects/P047_BW_MAIAC/2.Gather_data/FN003_BW_data/bwall.dbf")
bwfull<-as.data.table(bwfull)
l=seq(names(bwfull));names(l)=names(bwfull);
l
xyguid<-fread("/media/NAS/Uni/Projects/P047_BW_MAIAC/2.Gather_data/FN007_Key_tables/locxy0308_guid_lpmid.csv")
# l=seq(names(xyguid));names(l)=names(xyguid);
# l
xyguid<-xyguid[,c(4,7),with=FALSE]
setkey(xyguid,uniqueid_y)
setkey(bwfull,uniqueid_y)
#make sure to allow cartesian
bwfull.g <- merge(bwfull,xyguid)
#subset data (short dataset)
bwfull.s<-bwfull.g[,c("bdob","byob","birthw","ges_calc","uniqueid_y","guid"),with=FALSE]
#create unique location
# lengthen out to each day of pregnancy
setnames(bwfull.s, c("bdob", "byob","uniqueid_y"), c("birthdate", "birthyear","id"))
bwfull.s[, birthdate := as.Date(strptime(birthdate, format = "%Y-%m-%d"))]
# new variable for start of gestation using the best gestational age (in weeks)
bwfull.s[, pregstart := birthdate - 7*ges_calc]
#subset to current expo year range (all pregnancies that start after first day of exposure)
bwfull.s <- bwfull.s[pregstart >= as.Date("2003-01-01") , ]
bwfull.s[,ges_calc:=as.integer(ges_calc)]
bwfull.s[,birthw:=as.integer(birthw)]
# create every single day of pregnancy for each pregnancy
gestlong <- bwfull.s[,list(day = seq(.SD$pregstart, .SD$birthdate -1, by = "day")),by=id]
setkey(gestlong,id)
gestlong <- merge(gestlong, bwfull.s, by = "id")
######## import pollution sets
p2003<-fread("/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/FN40_steve_clean/finalprPM03.csv",select=c(1,2,3))
p2004<-fread("/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/FN40_steve_clean/finalprPM04.csv",select=c(1,2,3))
p2005<-fread("/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/FN40_steve_clean/finalprPM05.csv",select=c(1,2,3))
p2006<-fread("/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/FN40_steve_clean/finalprPM06.csv",select=c(1,2,3))
p2007<-fread("/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/FN40_steve_clean/finalprPM07.csv",select=c(1,2,3))
p2008<-fread("/media/NAS/Uni/Projects/P031_MIAC_PM/3.Work/2.Gather_data/FN40_steve_clean/finalprPM08.csv",select=c(1,2,3))
allbestpred <- rbind(p2003,p2004,p2005,p2006,p2007,p2008)
rm(p2003,p2004,p2005,p2006,p2007,p2008)
gc()
allbestpred$guid<-as.numeric(allbestpred$guid)
#common dates
allbestpred[, day := as.Date(strptime(day, format = "%Y-%m-%d"))]
gestlong$day<-gestlong$birthdate
setkey(gestlong,guid,day)
setkey(allbestpred ,guid, day)
#make sure to allow cartesian
gestlong.pm <- merge(gestlong,allbestpred,all.x=TRUE)
summary(gestlong.pm$predpm25)
####this is where we calculate the exposure per period for each participent-
#pmperg-exposure all pregnancy
gestlong.pm.lags <- gestlong.pm[, list(pmpreg = mean(predpm25),
pm3rdT = mean(tail(.SD[,predpm25], 90)),
pmlast30 = mean(tail(.SD[,predpm25], 30)),
pm1stT = mean(head(.SD[,predpm25], 90)),
pmweek12to24 = mean(.SD[84:168,predpm25]),
pm2ndT = mean(.SD[91:175,predpm25]),
pmf20w = mean(.SD[1:140,predpm25]),
guid = guid[1]),by=id]
#As far as the lags, I met with Emily yesterday, and if we proceed, we are thinking 0-12.99 weeks (1st trimester), 13 weeks-24.99 weeks (2nd trimester), 25 weeks-delivery (3rd trimester), and LMP-20 weeks (which is often considered a relevant exposure window for the outcome of gestational hypertension).
head(bw.o1,n=3)
saveRDS(gestlong.pm.lags,"/media/NAS/Uni/Projects/P047_BW_MAIAC/2.Gather_data/FN008_Fin_data/bw_pm1knodup.rds")
summary(gestlong.pm.lags)
# join back
setkey(bwfull.s ,id)
setkey(gestlong.pm.lags,id)
bw.o1 <- merge(bwfull.s , gestlong.pm.lags)
head(bw.o1,n=3)
# histogram
ggplot(bw.o1, aes(pmpreg)) + geom_histogram()
# show this with ggmap
# library(ggmap)
# MxC_Map_df <- get_map(location = 'massachusetts', maptype = "hybrid", zoom = 9)
# str(MxC_Map_df)
# P4 <- ggmap(MxC_Map_df, darken = c(0.5, "white"))
# P4 +
# geom_point(data = bw.o1 ,
# aes(-longdd, latdd, color = pmpreg, size = pmpreg)) +
# theme_bw(10) +
# ggtitle("Predictions over pregnancy")
# merge in other covariates
bwfull.s <- merge(bwfull.s, participants[etapa == "00", list(folio, peso_h, talla_h, fecha_naci_M)])
# some pre-processing
# construct seasonality terms
bwfull.s[, jday := as.numeric(format(birthdate, "%j"))]
bwfull.s[, costime := cos(2*pi*jday/365.25)]
bwfull.s[, sintime := sin(2*pi*jday/365.25)]
bwfull.s[, female := sex - 1]
# simple regression
summary(lm(Fenton_Z_score ~ pmpreg + sintime + costime, data=bwfull.s))
summary(lm(peso_h ~ monpreg + gestage_comb + female + costime + sintime, data=gestpred[gestage_comb >= 37,]))
# add random intercept for aodid
summary(lmer(Fenton_Z_score ~ pmpreg + (1|aodid), data=gestpred))
summary(lmer(peso_h ~ pmpreg + gestage_comb + female + costime + sintime + (1|aodid), data=gestpred[gestage_comb >= 37,]))
summary(lmer(peso_h ~ pmlast90 + gestage_comb + female + costime + sintime + (1|aodid), data=gestpred[gestage_comb >= 37,]))
ggplot(gestpred, aes(pmpreg, Fenton_Z_score)) + geom_point() + geom_smooth()
ggplot(gestpred, aes(pmpreg, gestage_comb)) + geom_point() + geom_smooth()
ggplot(gestpred, aes(pmpreg, peso_h)) + geom_point() + geom_smooth()
# bring in land use terms
gestpred <- merge(gestpred,aodidlur[,list(aodid,elev,rden,rden_OSM)], all.x = T, by="aodid")
describe(gestpred[, list(elev,rden)])
|
1dc6442fd47637f5b21d87b8c324ada629a5e604
|
680f44adcd020315efe719852ab4b112ad89bc83
|
/apps/195-radio-empty/tests/testthat/test-mytest.R
|
c17d6aa37ed712120288e31116f7038454ae1996
|
[
"MIT"
] |
permissive
|
rstudio/shinycoreci-apps
|
73e23b74412437982275d60ded5ac848fc900cbe
|
add53dde46fc9c31f2063f362ea30ca4da3b2426
|
refs/heads/main
| 2023-04-15T09:10:38.668013
| 2022-06-15T17:53:03
| 2022-06-15T17:57:59
| 228,669,687
| 39
| 5
|
NOASSERTION
| 2023-03-30T06:07:28
| 2019-12-17T17:38:59
|
JavaScript
|
UTF-8
|
R
| false
| false
| 417
|
r
|
test-mytest.R
|
library(shinytest2)
test_that("Migrated shinytest test: mytest.R", {
app <- AppDriver$new(variant = shinycoreci::platform_rversion())
app$expect_values()
app$expect_screenshot()
app$set_inputs(radio = "b")
app$set_inputs(radio2 = "d")
app$expect_values()
app$expect_screenshot()
app$set_inputs(empty = "click")
app$set_inputs(rerender = "click")
app$expect_values()
app$expect_screenshot()
})
|
6a0b4017e1a36937e743ab9134779590f7eae79a
|
d32f0db3e9dbc28f666664661ddff927a168a65f
|
/Figure_2.r
|
0de8ca911d93e69b282ce076145a546be204ec35
|
[
"BSD-3-Clause"
] |
permissive
|
aristotle-tek/Classif_Accuracy_Polarization
|
b921692e108dea69df6b7c4a210910497909a1e5
|
f082f9f10172544529d1cad3b9414a86e351c3e6
|
refs/heads/master
| 2021-06-26T09:55:20.763639
| 2020-04-20T09:52:18
| 2020-04-20T09:52:18
| 99,031,655
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 732
|
r
|
Figure_2.r
|
# Figure 2: Density Plot of Predicted Probability
# Conservative For Different Levels of Noise.
rm(list=ls())
library(ggplot2)
library(reshape2)
wd <- getwd()
data.dir <- paste0(wd, "/data/")
df <- read.csv(paste0(data.dir, "preds_sims_sel.csv"), header=T)
m <- melt(df, id.vars='noisefrac')
m$party <- sapply(m$variable, substring, 1,1)
m$party[m$party=='r'] <- 'Right'
m$party[m$party=='l'] <- 'Left'
left <- m[m$party=='Left',]
right <- m[m$party=='Right',]
p <- ggplot(m, aes(value, linetype=as.factor(noisefrac)))
p + geom_density() + theme_bw() + xlim(0,1)+labs(linetype = "Fraction Noise") + xlab("Predicted Probability Conservative") + ylab("density")
ggsave("density_by_noise.pdf", width=10, height=6)
dev.off()
|
8a64e631dd0ee04e23d7de27abaa4c38225291f5
|
6f355684cb68139644cb4c5b5ee66c6947deed68
|
/rScripts/s3_DMPs.R
|
1a31d1676fc2e1e6155936bdac1f58642ff7fea3
|
[
"MIT"
] |
permissive
|
QianhuiWan/Methods_DNAm
|
55cceb1bb4eee25d39f272eb2af8d3ae89a0b72c
|
61942fbad31fa35b70b19c51865bfa7d79c652c1
|
refs/heads/master
| 2021-07-24T19:26:14.772620
| 2021-04-07T00:01:13
| 2021-04-07T00:01:13
| 246,944,393
| 3
| 0
| null | 2021-04-07T00:01:14
| 2020-03-12T22:50:25
|
R
|
UTF-8
|
R
| false
| false
| 8,058
|
r
|
s3_DMPs.R
|
library(here)
library(minfi)
library(tidyverse)
library(magrittr)
library(microbenchmark)
library(missMethyl)
library(limma)
library(gamlss)
# library(betareg)
library(IlluminaHumanMethylationEPICanno.ilm10b4.hg19)
ann850k <- getAnnotation(IlluminaHumanMethylationEPICanno.ilm10b4.hg19)
BM_list <- readRDS(file = here("Method_DNAme/rds/preprocessed_PAC12_placenta.rds"))
baseDir <- file.path(here("EPICdata/IDAT_Placenta_PAC_sub12"))
phenoData <- read.metharray.sheet(baseDir)
EpicRGsetPAC12 <- read.metharray.exp(targets = phenoData, extended = TRUE, force=TRUE)
estSex <- EpicRGsetPAC12 %>% preprocessRaw() %>% mapToGenome() %>% getSex()
phenoDataPAC12 <- EpicRGsetPAC12 %>% preprocessRaw() %>% mapToGenome() %>%
addSex(sex = estSex) %>%
pData()
# DMPS with different methods #########################################################
# variables
Trimester <- factor(phenoDataPAC12$Trimester)
GA <- phenoDataPAC12$Gestational.Age %>% as.numeric()
# FetalSex <- factor(phenoDataPAC12$Fetal.Sex, levels = c("F", "M"))
ArrayDateBatch <- factor(phenoDataPAC12$ArrayDateReceived)
## design the matrix for limma
### create a matrix
design <- model.matrix(~ Trimester+ArrayDateBatch)
### rename the col of the matrix
colnames(design) <- c("Intercept","secondTrimester",
"arrayDateReceivedBatch")
############ RUVm ############################################################
# RUVm Illumina negtive controls (INCs): ====
## true negtive control probes, 411
## INCs are Illumina negtive controls # M=BM_list$Mnorm
RUVm_dmp <- function(EpicRGsetPAC12, M){
INCs <- getINCs(EpicRGsetPAC12)
## Mc means combine m-value and Illumina negtive controls
Mc <- base::rbind(M, INCs)
## ctl means ctl probes' information
ctl1 <- base::rownames(Mc) %in% base::rownames(INCs)
# RUVfit and RUVadj====
#fit microarray linear regression model
rfit1 <- missMethyl::RUVfit(Y = Mc, X= Trimester, ctl = ctl1)
# empirical bayes method: adjust variance
rfit2 <- RUVadj(Y = Mc, fit = rfit1)
#Extract a table of the top-ranked CpGs from a linear model
top1 <- topRUV(rfit2, number = Inf, p.BH = 1)
# head(top1)
## ECPs, Empirical control probes
### p.ebayes.BH is FDR-adjusted p-values, after applying the empirical bayes method
# probes without significant differenc as EPCs====
ctl_EPCs <- base::rownames(M) %in% base::rownames(top1[top1$p.BH_X1.Second >0.5,])
# table(ctl_EPCs)
# use ECPs to do 2nd Diff methylation====
# fit linear model, 2nd time
rfit1_2nd <- RUVfit(Y = M, X= Trimester, ctl = ctl_EPCs)
# adjust variance
rfit2_2nd <- RUVadj(Y = M, rfit1_2nd)
# extract topRUV result====
top2 <- topRUV(rfit2_2nd, number = Inf)
table(top2$p.BH_X1.Second <0.05) # 35937
return(top2)
}
######### limma ################################################################
Lm_dmp <- function(phenoDataPAC12, M){
# design the matrix for limma
# estimate weights for each sample
w <- arrayWeights(M, design = design)
phenoDataPAC12 %>%
cbind(w) %>%
as.data.frame() %>%
as_tibble() %>%
ggplot(aes(Trimester, w)) +
geom_violin() +
geom_jitter(width = 0.2) +
geom_hline(yintercept = 1, colour = "blue", linetype = 2) +
facet_wrap(~ArrayDateReceived) +
theme_bw()
## fit lm using limma:
fit1_limma <- lmFit(M, design = design, weights = w) %>%
eBayes()
# decide test
fit1_limma %>%
decideTests(p.value = 0.05, adjust.method = "BH") %>%
summary()
## add annotation to top_limma:
annEPIC <- ann850k %>% as.data.frame()
annEPICsub <- annEPIC[match(rownames(M), annEPIC$Name),
c(1:4, 12:19, 22:ncol(annEPIC))]
table(rownames(M)==annEPICsub$Name)
## save BY ajusted data
DMPs_limma <- topTable(fit1_limma, coef = 2,
number = Inf, adjust.method="BH",
genelist = annEPICsub)
rm(fit1_limma)
return(DMPs_limma)
}
######## Beta regression ########################################################
# Beta regression with `gamlss` R package
# if the meth and unmeth status are individual for each probe,
# the beta-values obey beta distribution, since beta ∈ {0,1} and
# beta contain maginal values e.g. round 0 or 1
# use muti cores and also add ajusted covariants in the model:
# library(doMC)
# registerDoMC(cores=3)
##functions
w <- arrayWeights(M, design = design)
## beta glm function
# beta.glm <- function(dat.run, dat.Y, dat.gp, dat.co1){ #, dat.co2
# dat.tmp <- data.frame("Y"=dat.Y,"X"=dat.gp, "W1"=dat.co1) #, "W2"=dat.co2
# zero.tab <- dat.tmp$Y<=0.02 #0.02 cutoff
# zero.prop <- length(zero.tab[zero.tab%in%TRUE])/nrow(dat.tmp)
#
# #beta regression
# #beta
# if(zero.prop<0.8){fit1 <- gamlss(Y~X+W1,data=dat.tmp,family=BE())} #+W2
# # if I use betareg package for this fitting, it can be quicker and more comparable with other 2 methods.
#
# #zero-inflated
# if(zero.prop>=0.8){fit1 <- gamlss(Y~X+W1,data=dat.tmp,family=BEZI())}
#
# return(fit1)
# }
beta.glm <- function(dat.Y, dat.gp, dat.co1){ #, dat.co2
# browser()
dat.tmp <- data.frame("Y"=dat.Y,"X"=dat.gp, "W1"=dat.co1) #, "W2"=dat.co2
zero.tab <- dat.tmp$Y<=0.02 #0.02 cutoff
zero.prop <- mean(zero.tab)
zi <- zero.prop >= 0.8
fm <- list(BE(), BEZI())[[zi + 1]]
gamlss(Y~X+W1, data = dat.tmp, family = fm, control = gamlss.control(trace = FALSE))
# #beta regression
# #beta
# if(zero.prop<0.8){fit1 <- gamlss(Y~X+W1,data=dat.tmp,family=BE())} #+W2
# #zero-inflated
# if(zero.prop>=0.8){fit1 <- gamlss(Y~X+W1,data=dat.tmp,family=BEZI())}
#
# return(fit1)
}
## calculte zero proportion
CalLowProp <- function(x){
zero.tab <- x<=0.02
zero.prop <- length(zero.tab[zero.tab%in%TRUE])/length(x)
return(zero.prop)
}
## run from here
## we can't use doMC on phoenix, so I need to rewrite
## this in to a for loop or any other ways that can deal with it
glmBeta_dmp <- function(phenoDataPAC12, B){
dat.run <- B
dat.gp <- model.matrix(~Trimester, data=phenoDataPAC12)[,2] %>% as.matrix()
dat.co1 <- model.matrix(~ArrayDateReceived, data=phenoDataPAC12)[,2] %>% as.matrix()
p.dat <- list()
# dat.co2 <- factor(phenoDataPlacenta$Fetal.Sex, levels = c("F", "M"))
zero.prop <- apply(B, 1, FUN = CalLowProp)
fit1 <- betareg.fit(x = dat.gp, y = dat.run[-which(zero.prop>=0.8), ])
fit1 <- gamlss(B[1,]~Trimester+ArrayDateBatch,family=BE())
for(k in 1:3){
fit_sum <- beta.glm(dat.run[k,],dat.Y = , dat.gp=Trimester, dat.co1=ArrayDateBatch) %>% summary()#, dat.co2
p.dat[[k]] <- fit_sum[2,4]
}
dat.run <- B
dat.gp <- factor(phenoDataPAC12$Trimester)
dat.co1 <- factor(phenoDataPAC12$ArrayDateReceived)
p.dat <- list()
# dat.co2 <- factor(phenoDataPlacenta$Fetal.Sex, levels = c("F", "M"))
library(broom)
system.time(
temp <- tibble(
probe = rownames(B)[1:1000],
fit = lapply(probe, function(x){
beta.glm(B[x,], dat.gp, dat.co1)
}),
coef = lapply(fit, tidy),
p = vapply(coef, function(x){dplyr::filter(x, term == "XSecond")$p.value}, numeric(1))
# p = vapply(fit, function(x){
# suppressWarnings(summary(x)["XSecond", "Pr(>|t|)"])
# }, numeric(1))
)
)
apply(B[1:3,], MARGIN = 1, function(x){
fit <- beta.glm(x,dat.gp, dat.co1) #, dat.co2
summary(fit)[2,4]
p.dat_df <- data.frame(matrix(unlist(p.dat), nrow=length(p.dat), byrow=T),
stringsAsFactors=FALSE) %>%
`colnames<-`("unadj.p")
rownames(p.dat_df) <- rownames(dat.run)
p.dat_df$adj.P <- p.adjust(p.dat_df$unadj.p,"BH")
# save files:
# saveRDS(fit2, file = here("Method_DNAme/rds/glmBeta_fit_T1T2.rds"))
saveRDS(p.dat_df, file = here("Method_DNAme/rds/glmBeta_pval_T1T2.rds"))
return(p.dat_df=p.dat_df)
}
############## Use functions ############################################################
mbm <- microbenchmark(
RUVm = RUVm_dmp(EpicRGsetPAC12 = EpicRGsetPAC12, M = BM_list$Mnorm),
Lm = Lm_dmp(phenoDataPAC12 = phenoDataPAC12, M = BM_list$Mnorm),
glmBeta = glmBeta_dmp(phenoDataPAC12 = phenoDataPAC12, B = BM_list$betaNorm)
)
saveRDS(mbm, file = here("Method_DNAme/rds/microbenchmark_DMPs_out.rds"))
library(ggplot2)
autoplot(mbm)
|
50620bef025dfcb20f62ddc642cefca21adf2484
|
d802ad80e991359b169d8a5cce42ccd25a442ce5
|
/R/w2nHSMM.R
|
35fa7121817c0b5cca9271722a996e7c8c488601
|
[] |
no_license
|
cran/PHSMM
|
8bf8f514d98e84abef90eb63a28cd5188bb58b49
|
a2150c306aeb122a96108ebc430360902e548539
|
refs/heads/master
| 2023-03-11T00:14:25.564620
| 2021-02-09T09:10:02
| 2021-02-09T09:10:02
| 337,383,333
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,605
|
r
|
w2nHSMM.R
|
# parameter back-transformation
w2nHSMM<-function(N,parvect,R_vec,y_dist=c("norm","gamma","pois","bern"),
stationary=TRUE,p_ref=2){
cR_vec<-cumsum(c(0,R_vec))
sR_vec<-sum(R_vec)
p_list<-lapply(1:N,function(i){
p_h<-numeric(R_vec[i]+1)
p_h[-p_ref]<-parvect[cR_vec[i]+1:R_vec[i]]
p_h<-c(exp(p_h))/(sum(exp(p_h)))
return(p_h)
})
d_r<-lapply(p_list,function(p_h) return(p_h[-length(p_h)]))
foo<-sR_vec
if(N>2){ # only needed if N>2
omega<-matrix(0,N,N)
omega[!diag(N)]<-as.vector(t(matrix(c(rep(1,N),exp(parvect[foo+1:(N*(N-2))])),N,N-1)))
omega<-t(omega)/apply(omega,2,sum)
foo<-foo+(N*(N-2))
}else{
omega<-matrix(c(0,1,1,0),2,2)
}
Gamma<-tpmHMM(N,omega,d_r,R_vec) # tpm of the HMM representing the HSMM
if(stationary){
delta<-solve(t(diag(sR_vec)-Gamma+1),rep(1,sR_vec))
}else{
delta<-c(1,exp(parvect[foo+1:(N-1)]))
delta<-delta/sum(delta)
foo<-foo+N-1
}
mu2<-NULL
sigma2<-NULL
if(y_dist=="norm"){
mu<-cumsum(parvect[foo+1:N])
sigma<-exp(parvect[foo+N+1:N])
}else if(y_dist=="gamma"){
mu2<-cumsum(exp(parvect[foo+1:N]))
sigma2<-exp(parvect[foo+N+1:N])
mu<-mu2^2/sigma2^2 # shape parameter, needed for dgamma
sigma<-mu2/sigma2^2 #rate parameter, needed for dgamma
}else if(y_dist=='pois'){
mu<-cumsum(exp(parvect[foo+1:N]))
sigma<-NULL
}else{
mu<-plogis(cumsum(parvect[foo+1:N]))
sigma<-NULL
}
return(list(p_list=p_list,d_r=d_r,omega=omega,Gamma=Gamma,delta=delta,mu=mu,sigma=sigma,mu2=mu2,sigma2=sigma2))
}
|
ea5a12ec1943d42058dc8542bbc806d7a4175f4e
|
5fd011017b9a2ec48069ba470bc6344d8374cf30
|
/packages/pegasus-dax-r/Pegasus/Workflow/R/profile.R
|
6cfad64829e493e727ea1b0a6980f8fa724d0b96
|
[
"Apache-2.0"
] |
permissive
|
pegasus-isi/pegasus
|
578c6d8f2df77a7d43c4b9e2d1edf6687db92040
|
6b7e41d7ebfacca23d853890937e593a248e6a6a
|
refs/heads/master
| 2023-08-02T08:20:18.871762
| 2023-07-20T23:00:08
| 2023-07-20T23:00:08
| 8,016,431
| 156
| 80
|
Apache-2.0
| 2023-06-15T18:55:18
| 2013-02-04T21:18:31
|
Java
|
UTF-8
|
R
| false
| false
| 1,548
|
r
|
profile.R
|
#' A Profile captures scheduler-, system-, and environment-specific parameters in a uniform fashion
#'
#' @description
#' A Profile captures scheduler-, system-, and environment-specific
#' parameters in a uniform fashion. Each profile declaration assigns
#' a value to a key within a namespace.
#'
#' Profiles can be added to \code{\link{Job}}, \code{\link{DAX}},
#' \code{\link{DAG}}, \code{\link{File}}, \code{\link{Executable}}, and \code{\link{PFN}}.
#'
#' @examples
#' path <- Profile(Pegasus.Namespace$ENV, 'PATH', '/bin')
#' vanilla <- Profile(Pegasus.Namespace$CONDOR, 'universe', 'vanilla')
#' path <- Profile(namespace='env', key='PATH', value='/bin')
#' path <- Profile('env', 'PATH', '/bin')
#'
#' @param namespace The namespace of the profile
#' @param key The key name. Can be anything that responds to as.character()
#' @param value The value for the profile. Can be anything that responds to as.character()
#' @seealso \code{\link{Pegasus.Namespace}}
#' @return Profile object with the defined key=value pair
#' @export
Profile <- function(namespace, key, value) {
object <- list(namespace=namespace, key=key, value=value)
class(object) <- "Profile"
return(object)
}
Equals.Profile <- function(profile, other) {
if (class(other) == "Profile") {
return(profile$namespace == other$namespace && profile$key == other$key)
}
return(FALSE)
}
ToXML.Profile <- function(profile) {
p <- Element('profile', list(namespace=profile$namespace, key=profile$key))
p <- Text(p, profile$value)
p <- Flatten(p)
return(p)
}
|
3f4bc7db8b9f6d8716edaf03b6ca7a6ce763f52f
|
ca5655d9d7328c44079b2fcd5f7cfb4c45d2a715
|
/man/meStack.Rd
|
52f390eb28f829debdb682bd886e423326fdc182
|
[] |
no_license
|
cran/CAWaR
|
060d107895469291ce641fa0d5268437a9252555
|
57c4dca038289f1f808301d671cb8d079ddb272a
|
refs/heads/master
| 2020-12-21T21:29:13.542811
| 2020-06-04T14:40:11
| 2020-06-04T14:40:11
| 236,568,529
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,246
|
rd
|
meStack.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meStack.R
\name{meStack}
\alias{meStack}
\title{meStack}
\usage{
meStack(x, y, z, agg.fun = mean, derive.stats = FALSE)
}
\arguments{
\item{x}{A \emph{list} of \emph{RasterLayer} objects or a \emph{character} vector with the paths to \emph{raster} objects.}
\item{y}{A spatial object from which an extent can be derived.}
\item{z}{Object of class \emph{Date} with the acquisition date for each element in \emph{x}.}
\item{agg.fun}{Function used to aggregate images collected in the same date. Default is the mean.}
\item{derive.stats}{Logical argument. Default is FALSE.}
}
\value{
A list containing a \emph{RasterStack} and related statistics.
}
\description{
Stacking of raster layers with different extents
}
\details{
{The function stacks the raster objects specified in \emph{x}. For each element
in \emph{x}, the function crops it by the extent of \emph{y} and, if their extents differ,
fits the extent of \emph{x} to the one of \emph{y}. All new pixels are set to NA. If \emph{z}
is provided, the function will then aggregate all bands acquired in the same date using the
function provide with \emph{agg.fun}. If \emph{derive.stats} is set to TRUE, the function will
return basic statistics for each band (i.e. min, max, mean and sd) together with a plot of the
mean values. The final output of the function is a list containing:
\itemize{
\item{\emph{stack} - \emph{RasterStack} object.}
\item{\emph{dates} - Acquisition dates for each layer in \emph{stack}.}
\item{\emph{image.stats} - Statistics for each band in the output \emph{RasterStack}.}
\item{\emph{stats.plot} - Plot showing the mean, minimum and maximum values per band.}
\item{\emph{control} - Logical vector showing which elements in \emph{x} where used to build the \emph{RasterStack}.}}}
}
\examples{
{
require(raster)
r1 <- raster(xmn=1, xmx=90, ymn=1, ymx=90, res=1, vals=1) # image 1
r2 <- raster(xmn=50, xmx=150, ymn=50, ymx=150, res=1, vals=1) # image 2
r0 <- raster(xmn=20, xmx=90, ymn=50, ymx=90, res=1, vals=1) # target extent
crs(r0) <- crs(r2) <- crs(r1)
mes <- meStack(list(r1, r2), r0)
plot(mes$stack)
}
}
|
a79b09c9f7f8f90b18b54353944735c216e59c35
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Rexperigen/examples/getUsers.Rd.R
|
db9f0d647fe52702a902f58bf3ab4c3da5c31906
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 259
|
r
|
getUsers.Rd.R
|
library(Rexperigen)
### Name: getUsers
### Title: Requests the table of users from the server.
### Aliases: getUsers
### ** Examples
## Not run:
##D getUsers("https...s3.amazonaws.com.myexperiment.index.html", "running", auth = TRUE)
## End(Not run)
|
087ed66616eb071f7daf5fb84f6a6546e9b41e2d
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gtkWidgetGetSettings.Rd
|
1439a43063e18d04e5d1c9a73d2ee4d60d4a079f
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 701
|
rd
|
gtkWidgetGetSettings.Rd
|
\alias{gtkWidgetGetSettings}
\name{gtkWidgetGetSettings}
\title{gtkWidgetGetSettings}
\description{Gets the settings object holding the settings (global property
settings, RC file information, etc) used for this widget.}
\usage{gtkWidgetGetSettings(object)}
\arguments{\item{\verb{object}}{a \code{\link{GtkWidget}}}}
\details{Note that this function can only be called when the \code{\link{GtkWidget}}
is attached to a toplevel, since the settings object is specific
to a particular \code{\link{GdkScreen}}.}
\value{[\code{\link{GtkSettings}}] the relevant \code{\link{GtkSettings}} object. \emph{[ \acronym{transfer none} ]}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
dcbfcf28c04a893e457a8acc0ea09035255b8dd7
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/GUILDS/tests/testthat/test-generate.Guilds.R
|
e70cb133defa756f3ebd6a98e222e80cdf4fa2b1
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,614
|
r
|
test-generate.Guilds.R
|
context("generate.Guilds")
test_that("generate.Guilds: use", {
skip_on_cran()
J <- 10000
v <- generate.ESF(theta = 100, I = 10, J)
expect_equal(
sum(v),
J
)
v <- generate.ESF(theta = 100, I = Inf, J)
expect_equal(
sum(v),
J
)
v <- generate.Guilds(theta = 100, alpha_x = 0.1, alpha_y = 0.1, J)
expect_equal(sum(v$guildX,v$guildY), J)
v <- generate.Guilds(theta = 100, alpha_x = 1, alpha_y = 1, J)
expect_equal(sum(v$guildX,v$guildY), J)
})
test_that("generate.Guilds: abuse", {
skip_on_cran()
expect_error(
generate.ESF(theta = -1, I = 10, J = 100),
"theta can not be below one"
)
expect_error(
generate.ESF(theta = 100, I = -10, J = 100),
"I can not be below zero"
)
expect_error(
generate.ESF(theta = 100, I = 10, J = -100),
"J can not be below zero"
)
expect_error(
generate.Guilds(theta = -1, alpha_x = 0.1, alpha_y = 0.1, J = 100),
"theta can not be below one"
)
expect_error(
generate.Guilds(theta = 10, alpha_x = 0.1, alpha_y = 0.1, J = -100),
"J can not be below zero"
)
expect_error(
generate.Guilds(theta = 10, alpha_x = -0.1, alpha_y = 0.1, J = 100),
"alpha_x can not be below zero"
)
expect_error(
generate.Guilds(theta = 10, alpha_x = 0.1, alpha_y = -0.1, J = 100),
"alpha_y can not be below zero"
)
expect_error(
generate.Guilds(theta = 10, alpha_x = 1.1, alpha_y = 0.1, J = 100),
"alpha_x can not be above one"
)
expect_error(
generate.Guilds(theta = 10, alpha_x = 0.1, alpha_y = 1.1, J = 100),
"alpha_y can not be above one"
)
})
|
29e84527db701aa2c941cfe9eda11a843d13fe42
|
fd1535a5de4436b2cf59fd7ab2c32a1e1246a997
|
/BAP_readTrial.r
|
1fff604a33c95228ed59438f37088d34435fbe32
|
[] |
no_license
|
cboisvenue/R_collaboration
|
006c84d55338dbb46e575abcefbd98de7fd9b15e
|
d2778437d7332bc22f579b6c49cdf0c008a98ce5
|
refs/heads/master
| 2020-04-10T22:42:01.169744
| 2017-03-01T19:00:10
| 2017-03-01T19:00:10
| 68,142,021
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 732
|
r
|
BAP_readTrial.r
|
# trial read Joanne's data
#
#Cboisvenue July20th, 2016
#--------------------------------------
library(data.table)
links <- fread("G:/RES_Work/Work/StandReconstruction/work/data/whitejoannenrcanrncanrebapproxyvaluesforintens/Boisvenue_plots_link.csv")
setkey(links,UNIQUE)
library(foreign)
#data <- read.dbf("<Path to your file>")
zone14 <- read.dbf("G:/RES_Work/Work/StandReconstruction/work/data/whitejoannenrcanrncanrebapproxyvaluesforintensTrial/zone14_BAP_extract.dbf")
zone14 <- as.data.table(zone14)
setkey(zone14,UNIQUE)
trial14 <- merge(links,zone14)
dim(trial14)
# complete set
bap <- fread("G:/RES_Work/Work/StandReconstruction/work/data/whitejoannenrcanrncanrebapproxyvaluesforintens/CB_plots_BAP_July292016.csv")
|
7270619f48dfc326fe686ffc412f8b0de65dc47b
|
3087e43b9ac4d9547ca42a2661904f32f79af04d
|
/seqOutATACBias_workflow_Vignette/12mask_RuleEnsemble_Tn5_scaling.R
|
6b55042423925ea867860d192bf177c064691ad9
|
[] |
no_license
|
guertinlab/Tn5bias
|
a0334d91f13932a2604c06845c6570e1825aad4e
|
903740846b6b6521e2b9228e1b36b5c26f9de219
|
refs/heads/master
| 2023-04-07T14:27:04.558296
| 2023-03-28T21:18:54
| 2023-03-28T21:18:54
| 277,839,583
| 4
| 3
| null | 2021-04-13T16:51:20
| 2020-07-07T14:30:19
|
Shell
|
UTF-8
|
R
| false
| false
| 128,793
|
r
|
12mask_RuleEnsemble_Tn5_scaling.R
|
RE_scale_12mask = function (input) { input[, RuleEnsemble :=
(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX*0.488249030865301) +
(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX*0.334453503436979) +
(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX*0.255014821022294) +
(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX*0.213919435102378) +
(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX*0.180257102883671) +
(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX*0.100247733505605) +
(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX*0.0991379733752532) +
(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX*0.0314980563075302) +
(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX*0.00286370598838547) +
(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX*0.000404389291000352) +
(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX*0.0000653049216046575) +
(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX*-0.0000319457150364638) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.30079316378989 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.6960526535544 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 2.05132815048606, 0.333581385708611, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.03853092561245 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 2.2428304683037 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 2.49071748725718, 0.290758414101882, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.03926244695112 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.61872934063001 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 2.17675499575527, 0.281762287544795, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.11611367654543 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 2.17140360952328, 0.242078204976413, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.35198383094047 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 2.15548234597529 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.35773516165006, 0.163455843921196, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.12147840016148 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 2.46922717253169 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 2.08959291303171, 0.155974156600025, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.48207196627455 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.87263167890065, -0.153330290524754, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.04240646564147 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.20725218769144 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.36106016503841, 0.130486878325226, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.03926244695112 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.61872934063001 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 2.11501829696828, -0.123865244444514, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.41831021088559 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.28266474528321, -0.103805599174991, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.07583580015453 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.12011077792057 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.730104175363516, -0.103458362006245, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.735114303614995 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.52963374168146, -0.0996814611787193, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.14766767581115 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.12863025866094, -0.0933309096991373, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.33218346278325 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.57198827511197, 0.093277566064033, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.834860042139387 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.01526335456444, -0.0911517386855585, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.925612539677371 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.45117215797578 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.46620167507986, 0.0897823183294665, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.31945527307936 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 2.35163019898018, 0.0795372266031353, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 2.77919379444136 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 2.35822910993117, -0.0787395020296189, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.29697030942244 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.21021143782173, 0.0769951184529374, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.07089147565059 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.46287225326433 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.42587713597102, -0.0761820843713193, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.96868334214953 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 2.31701743897662, -0.0754612086973898, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.29168565544984 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.3248927242855, -0.0718694251916845, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.07767124057222 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.08000601252991 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.390981197292, -0.0696192621982054, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.11362463703657 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.71951644792382, 0.0669217335814795, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.03926244695112, -0.0643097241876336, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.21878591999566 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.38673378063209, 0.0632385010034038, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.35198383094047 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.9310281825675 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.61721969831197, 0.062283972747313, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.03723412051391 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.35728027740031, -0.0592510340809953, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.20437077743006 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.33812224527425, 0.0578091299765536, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.25446060826171 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.11186907860968, 0.0568283442190958, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.31945527307936 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.48207196627455 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.27226086215418, 0.0538407709776299, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.31351575916698 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.36052926587621, -0.0522106394886171, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 0.969669523165527 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.28227186943802 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 2.67478937799589, -0.0521586293798841, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.891679789180476 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 0.899736762741174, 0.0520805142479659, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.2615087722643 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.334928725825, -0.0515625292777896, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.08424801989329 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.0602409262328, -0.0503320715180922, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.866383464095255 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 0.912647026023851, -0.050317066362775, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.37903326610255 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.10307121217882, 0.0495813333165943, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.12449549860911, -0.0493992463850178, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.26244305443752 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.3858695293802, 0.0465069243222818, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.07583580015453 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.11969625570605 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.51169362045784, 0.0444141875501178, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.25637016476249 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.26359503181463, -0.0443130944588046, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.30655688412335 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.72967395704334 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.16009008803533, 0.0436664317300069, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.11611367654543 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 2.17140360952328 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 2.67560117996209, 0.0432969334588336, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.14689136638072, -0.0425145142854638, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.40813728356268 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.2652372245181, -0.0423270147382189, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.925612539677371 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.45117215797578 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.46620167507986, -0.0422729296078217, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.03926244695112 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.61872934063001, 0.0413961535628114, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.04240646564147 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.895596433557823 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.505612707715895, -0.0413186531080911, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.2615087722643 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.334928725825, 0.0410998559860296, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.38400895652451 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.29313767504, -0.0401564471709802, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.04727380271582 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.05695714893465, 0.0399604542043851, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.20992352676344 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.30264720103575, 0.0397004568170764, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.31945527307936 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 2.35163019898018 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.826794146032151, -0.0392167157628024, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.03011344785821 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.03817963259462, 0.0385187219698247, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.39461436596385 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.36193351399301, -0.0382961712593589, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.2401761913856 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.23383282890469, 0.0374969999792205, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.30729995680696 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.40578476089613, -0.0364894063709418, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.31295264211459 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.30387211938815 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.32850653934179, 0.0364516592748666, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.04240646564147 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.895596433557823 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.03285299963215, 0.036406824938302, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.635307933723307 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 0.723754030844885, -0.0363578056172314, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.881525413018651 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.691040100539121 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 0.873260876613477, -0.036092194738586, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.10779209237636 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 0.943243583052806 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.836142153597975, -0.0358449438825798, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.37125670890025 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.30862361660925, 0.0355107067553897, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.32482060985854 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.39413984398038, 0.0350164824080078, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.33361792692898 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.44404857107837, 0.0349952653604731, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.31945527307936 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.908375557508399 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.56388742825914, -0.0341662623948504, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.08441785829935 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.09711381782565 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.14559610734361, -0.0340237219930873, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.05583115476377 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.06233357955615, -0.0339012073827584, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.24855541042532 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.20045097218025, 0.0335974605482504, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.4053642393532, 0.0319780782481074, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.22084851191763 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.2875908266778, 0.031953637149379, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 2.11388570863066, -0.0315289572979777, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.31945527307936 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.48207196627455, 0.030498355647272, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.12449549860911 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.945024919107137 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.768703255421155, -0.0304772144680125, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.22273878439595 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.27792405537497, -0.0303294401163872, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.0172909187926 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.09765965503524, 0.0300067343404125, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.03926244695112 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.61872934063001 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 2.11501829696828, 0.0297383511165928, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.07583580015453 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.12011077792057 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.3361785216097, -0.029602593194338, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.05553977589406 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.21052226855662 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.37770797190211, 0.0295660740567028, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.30655688412335 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.72967395704334 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.16009008803533, -0.0292687418829754, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 0.960155924852486 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 0.584801316911234, -0.0292046516292567, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.30878386319277 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.05312785010014 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.10325358258282, 0.0288391818328955, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.11611367654543 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 2.17140360952328 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 2.67560117996209, -0.0287823121895504, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.13117322124094 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.10523071425015, -0.0287042687623068, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.04249576475707 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.04329972915065 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.02641858398022, -0.0280194793416886, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.09851971983192 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 2.17675499575527, 0.0279962586043767, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 0.972172967383103 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 0.8785931627873, -0.0279062687568345, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.22305974405219 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.20738703839211, 0.0278052557654286, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.34889259893576 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.68582909964961 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.94481547024542, -0.0275781573229717, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 0.963899778434893 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.206135681897 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.38352255840952, -0.0271033691876792, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.94729170854631 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.925499280250155, -0.0269362917182051, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.07726854464485, 0.0261552486670885, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.21815043213193 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.23655683738544, -0.0261159559300572, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.14467624547122 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.20580080229079, -0.0258908211773779, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.03438967823963 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.12586190545081, -0.0257668078264316, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.34653960993578 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 2.44247751084617 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.742037880178955, -0.0257212453807816, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.3910718548265 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.38682781736576, -0.0255781459541065, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.35198383094047 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 2.15548234597529 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.35773516165006, -0.02519642112129, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 0.883443905770571 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 0.864550300944261 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.797643793116308, -0.0251740857348408, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.31295264211459 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.30387211938815 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.32850653934179, -0.0246759469398984, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 0.915444140310548 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 0.910908859268247, -0.0246088493708245, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.06784332766279, -0.0244730887067157, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.01324338657153 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.974996136142036, -0.0241853837165797, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.12597425056647 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.20158463048025, 0.0241259104102726, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.29936070829162 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.53340946412691 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.39118115265477, 0.0240818290655575, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.08353235633319 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.08921135227255, -0.0240197473148145, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.09652530207821 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.0609211042925, -0.0236695699872988, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 0.924717081217319 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 0.694537694545019, 0.0232399594888254, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.936528402474118 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 0.954199967270602, -0.0228678791619472, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.966952685081111 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.976711181999597 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.434922353262717, -0.0228199300623289, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.816612530684486 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 0.812192801493859, -0.0224785566332845, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.22528106173346 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.31929885106703 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.25767119534897, -0.0224640449689602, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.10779209237636 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.66419801655982 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 2.17140360952328, -0.022453601560616, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.11113345696244 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.08160669723618, -0.0224348079881481, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.24969962086152 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.32538934678979, 0.0221928803038967, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.38586152274802 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.09969251016909, 0.0219886089125133, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.25637016476249 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.26359503181463, 0.0219331659117406, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.19756539545942 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.25060110257884, -0.0218562101002719, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.35198383094047 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 2.15548234597529 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.826794146032151, 0.0216948310292903, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.31746293623785 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.40842470060704, 0.0216650841257125, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.33498282744699 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.1575751460517 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.925952633062759, 0.0215143230877803, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.11603478096867 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.09692864092733, -0.0213550912761429, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 2.00846714728468, -0.0211353510087792, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.20120611153793 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.33049294560974, -0.0210605096384901, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.04498665909879 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.12270327170654 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.44007944543355, -0.020750844346941, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.23853579685761 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.33879181937005, -0.0206229380494388, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 0.838517120704225 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.736502632166762 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 0.808051877782748, -0.0204368726523034, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.880414757558357 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.31458678158748 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.36414197526058, -0.0201909168920721, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.97565854191988 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.05445429145245, 0.0201065101099198, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 0.974454122612796 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 0.804540086950337 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.916057006967577, -0.0198510108659761, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.03853092561245 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.59000498028485 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.598161129071365, -0.0198244756317709, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.35198383094047 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.9310281825675 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.61721969831197, -0.0197689807001561, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.34451373515663 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.08925563805148, 0.019765548515853, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 0.840240347113306 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.791553049849546, -0.019648000188194, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.21477565183429, -0.0196382037247274, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.09638289460021 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 2.07322459300759 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.87263167890065, 0.0196225383545994, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 2.77919379444136 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.909351589412695, -0.019553090377912, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.33218346278325 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.57198827511197, -0.0194659507816373, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.905312363908601 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.822931867962424 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 0.833457150432398, -0.0192495345559907, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.863433568770227 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 0.902411755616272, -0.0192145534500715, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.07583580015453 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.12011077792057 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.3361785216097, 0.0191103517023764, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.23315199710487 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.33406859925351, -0.0189772730290626, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.28381229003165 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.19564865536677, -0.0188535516831252, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.10779209237636 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.66419801655982 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 2.17140360952328, 0.0185922972226727, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.878199824891658 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.17257590395462, -0.0185657028362545, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.09851971983192 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 0.948777684216913 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.7507798732853, -0.0185468346532144, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.03853092561245 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.59000498028485, -0.0185100420332572, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.25446060826171 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.28158835247122 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.20714089879689, -0.0184869702043279, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.03853092561245 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.59000498028485 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.598161129071365, 0.0184067841870949, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.04874626447484 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.05440831249912, -0.0183738843865696, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.06476675732832, -0.0182877918458198, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.06373209800703 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.07282214482567, -0.01828215974263, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.88308872050366 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.722460864677487, -0.0181408927491862, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.1239026602759 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.34760064412985, 0.0178565612252725, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.06259269474577 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.40032234087794, -0.0177329080191522, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.24855541042532 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.13301795368305, -0.0176002356712432, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.03011344785821 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.03817963259462, -0.0174415228314364, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.71300284078277, 0.0174365656861644, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.32864312202031 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.42517489857994, -0.0173342791849353, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.871352951292847 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.373301798134696, -0.0172372920150104, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.3910718548265 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.38682781736576, 0.0171668557394118, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.31945527307936 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.48207196627455 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.27226086215418, -0.0168793037405468, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.906284557640422 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.03835820601739, 0.016557261698822, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.05780317012475 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.988634258860862, 0.0165506272823786, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.0171666979913 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.743066832302638 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 0.905111586456195, 0.0165503996173084, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.23149648471163 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.32965811291227, 0.0164540426708964, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.840092461285149 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.934366438268084, 0.0160250161852655, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 0.826664373274069 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.901988345363132, -0.0159276761534243, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.34354256357022 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.31357469536326 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.29537679978879, 0.0158842647520707, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 0.831280136215299, -0.0157153366707079, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.887389088485458 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 0.852321842909942, -0.0156558605598496, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.450310114989889, -0.0155415000268697, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 0.868158008605044 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.12728449339912 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.31906155762623, -0.0153133937833975, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.35503969414523 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.23878576723927, -0.0151762946341644, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.21177463963806 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.2845644045679, -0.0150649591448768, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.03454310650492 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 0.966020098904103, -0.0147345506843373, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.36724840895935 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.25329187789209, 0.0147195669703807, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.28921376438208 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.09903227188179, 0.0146721094875807, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.535755758888943, -0.0145833878609318, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.1356139299085, 0.014577456560564, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.05212505589328 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.20336575692991 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.40003140327425, 0.0145440198098429, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.937367481636307 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 0.835997596421775 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 0.765781253202845, 0.0145377468307684, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.76212684492114 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 0.265683853283815, 0.0144454498705195, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.08612397260923 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.07169599155486, 0.0143881488173386, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.30096359190482 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.41308435185402, -0.0142991199148315, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.921003400037954 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.33852934412947, -0.0141936339288169, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 0.876447337294126 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 0.967683615073472, -0.0141629524842759, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.32124647690088 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.21773756857756, 0.0140870273805675, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 0.834645902642031 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 0.80774394285406, -0.0140738595477967, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.900112447483091 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 0.858294877178664, -0.0139518028156766, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 0.847918334039497 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.585862850421681, -0.013863074298106, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.06116488952794, -0.0137827016179029, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 0.907119056683761 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.921179988622474, -0.0137614235688453, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 0.86316457309912 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.868027405882138, 0.0137172535405474, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.05553977589406 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.924443268669402 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.816036277030848, -0.0136812650108295, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.25446060826171, 0.0136524591780673, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.11362463703657, -0.0135040897256818, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.39195126859653 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.38668478654262, -0.0134523542223176, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.19964859893924 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.2910811439993, 0.0133648243431989, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.30878386319277 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.05312785010014, 0.0133274595903127, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 0.837219800811416 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.942349258579398, 0.0132232612109382, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.05354478255892 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.13506265474165, 0.0132188601428071, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.962416463737224, -0.0132056891656913, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.2160896493411 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.23434054002733 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.53726310127802, -0.0131783679293379, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.1776426475709, -0.0131288914950239, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.08441785829935 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.09711381782565 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.14559610734361, 0.0128859833774469, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.14766767581115, -0.0128684009998883, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 0.901530185204246 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 0.861675021188962 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 0.695165277064159, -0.012839611996785, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 0.829371996626686 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.824142530496413, -0.012808274400108, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.16915549275908, -0.0126997135680201, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.0051881313311, -0.0126842123758581, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.964019470278914 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.933158346403072, 0.0126545510973442, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.2270105973319 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.35554384783788, -0.0126362228785056, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.0350169328666 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.04874626447484, 0.0125446503925047, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.19637206011795, 0.0125041154311964, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.41410576130809 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.49562573599539, -0.0124815070653538, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.12913284768772 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.29658911318746 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.32463930332892, 0.0124478141599557, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.00110840948381 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 0.896841315275127 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.994654132314555, 0.0124239198039155, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.19832381322792, 0.0122565321411775, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.905312363908601 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.822931867962424, -0.0122359768734999, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 0.893491386992258 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 0.716648028050735, 0.0122250578013683, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.2939554637702, 0.0122136818131046, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 0.942708526670204 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.09737212886818, -0.0120039425591364, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.34889259893576 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.68582909964961 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.03214538253814, -0.0119647089838243, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.11362463703657 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.71951644792382 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.34934445134826, -0.0118357282099886, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.874578797738 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.854990342054128, -0.0117816432533538, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.87264259070831 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.13172801222249 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 0.825731116359236, 0.0115868495280291, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 0.85104678741516 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.679816785461653, -0.0114273743340196, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.02452150692262 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.973569564897714, -0.0113300144831388, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.880414757558357 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.31458678158748 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.36414197526058, 0.0112591660634675, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.22528106173346, 0.0111498295334555, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.01060131397351 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.04027971306826, 0.0111480117020275, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.40204338746576 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.27763943917897, 0.0110394571347851, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.29767946572844 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.54008603345232, -0.0110073568910605, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.34381988576134 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 0.601415426063895, -0.0108483499239923, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.10102026684922 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.22213764872096, 0.0107393129673654, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.31271881766352 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.09165468920373, 0.0105096733118076, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.34653960993578 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.85694499154494 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.68880174319283, 0.0104523191522444, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.04669835096704, -0.0104087420360529, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.37125670890025 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.30862361660925 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.23570368234794, -0.0101758505579338, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 0.838517120704225 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.736502632166762, 0.0101539207076456, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.08353235633319 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.05911528244283, 0.0100519776947947, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.03723412051391 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.35728027740031, 0.0100095066225263, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.11423217031902 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.2520088331459, -0.0099935640238371, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 0.851305046433773 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.922441144490223, 0.00998601303024787, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 0.879880831503114 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.955388361304409, -0.0097737861823677, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 0.908738158137526, 0.00971882548209072, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.25808955182184, 0.00963504625205906, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.27891794416962 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.31393270985931 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.29535585176552, -0.00944935261683516, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.05553977589406 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.924443268669402 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 0.952436305961355, 0.00944858321303424, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 0.853808402823484 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 0.872150205717072, -0.00942604482457084, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.09851971983192 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 2.17675499575527 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.00609717396859, -0.00938933958064995, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.05272473714001 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.941817756724881, -0.00928089422490699, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.34381988576134 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 2.92281038542157, 0.00905281642938509, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.32482060985854, -0.00901129538212297, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.3388451500221 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.34897498189198, -0.00898349468057684, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.29197391221681 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.41529026635041, -0.00889473441643992, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.33215074411659 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.22934540335419, 0.00888962277725002, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.06784332766279 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 0.827415319832066, -0.00887998245394691, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.37903326610255 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.10307121217882, -0.00870884865845793, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.01340210647826 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.00046415559074 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.44440153927441, -0.00849982665931729, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.13347482991274, -0.00834104072764004, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 0.967493755653802 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.02610140956731, -0.00833667592611445, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.35368593586059 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.30616982171354, 0.00831746907577393, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.23682315219878 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.08972182986823, 0.00823132230122712, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.03242010327937 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.914822950137195 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.898295440574346, 0.00822066094912299, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.34381988576134 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 0.601415426063895 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 2.21154723563665, -0.00814883703906425, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 0.852103616149724 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 0.931211151968348, -0.00803899822336505, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.11771290781095 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.03071833146051, 0.00802820862288355, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.3306724049374 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.24384566924619, 0.00800837220155144, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 0.908514811086138 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 0.885684240793991, 0.00791165337332054, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 0.969669523165527 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.28227186943802, 0.00779983138500883, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.30096359190482 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.41308435185402, 0.00778772008359409, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.13313800164818 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.2749330020556, 0.00778768433015414, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.4618595783136, 0.00775354312116308, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 0.832302312907622 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 0.847323206424849, -0.00771324627482114, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 0.912720009191434, 0.0077103285875748, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.29525110736674, 0.00767589939189568, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.937367481636307 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 0.835997596421775, 0.00765747855989284, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.12449549860911 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.945024919107137 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.768703255421155, 0.00761867824681384, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.10450265754416, -0.00753763032648994, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.2048261742829 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.28584058988372, -0.00748171043624017, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 0.979545157938249, -0.00746988161323649, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 0.883156994566195 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 0.957167600933645, -0.00740228180897409, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.32374527009163 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.22418212592123, -0.00736316478784713, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.31292711987062 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.31065102853183 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.13256580584681, 0.00735930169920495, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.11423217031902 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.898913477275128 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.862180614178463, 0.00734717053689467, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 2.80677029487693 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.20722434836927, -0.00724181107688956, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 0.893223446352717 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.880284745282227 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 0.89182472114026, -0.0071578097922698, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.03733211296583 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.06829598224007, -0.00711219768654879, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.31720055261552 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.10026908027799, 0.00710752608760784, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.24451121125274, -0.00702214858245417, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 0.901530185204246 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 0.861675021188962, 0.00699732770332793, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.937367481636307 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 0.835997596421775 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 0.928827272117719, -0.00698248181727504, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.99976973901587, -0.00697568264895463, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 2.77919379444136 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.909351589412695 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.31719057759504, 0.00692179420585642, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.10568074558849 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.30116977102276, -0.0069216999221546, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.24024815425569 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.1467011011408, -0.00687659343825668, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.24419512387367 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.39109226375486, -0.00687364774968977, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.21815043213193 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.23655683738544, 0.00687239838149215, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.05212505589328, -0.00686338286247579, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.22186178428803, -0.00683924031619169, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.835850159118617 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 0.885238688811757, -0.00676105136250228, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.12449549860911 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.945024919107137 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.03926244695112, -0.00672784271729028, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.859764510588721 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.884313459999119 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 0.593576282498002, 0.00670691268426263, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.11155949073187, 0.00666417733064435, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.34590867547216 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.94804171538424, 0.00663154582465309, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.07583580015453 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.12011077792057 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.730104175363516, 0.00661248814154433, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.04249576475707 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.0476426972969, 0.00659585727153009, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.05613867069986 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.02975176511655 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.02447533548794, 0.00657570822608808, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.11580916302441 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.05597817211217, -0.00650015220622877, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.12942545979991, -0.00645061864900697, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.07805488418889 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.02542468290017 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.05035370129856, 0.00642785312363981, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 0.80158468634792, 0.00638868268257538, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.04249576475707 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.0476426972969, -0.00638829122119539, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 2.77919379444136 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.909351589412695 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.31719057759504, -0.00638503858568645, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.33498282744699 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.1575751460517 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.31892581068652, 0.00629287805210646, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.967179920440727 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.964892230322823, 0.00628309623378816, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.29256522293539, 0.00624102369480907, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 0.995939198408011 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 0.977823220790104 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.962545957563433, -0.00623865029873561, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 0.877000192627709 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 0.918118862993646, -0.00621913435425952, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.00887186256508 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 0.991880367162887, -0.00618599730750511, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.37527539546748, 0.00610685913286641, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 0.843496458154422 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.880722108211904, -0.00606334235554325, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.39249077036098, 0.00605975889559086, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 0.937571342158536 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 0.89899287015091, -0.00604756773892086, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.35198383094047 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.9310281825675 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.506027542729424, -0.00604717325336939, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.47370013568584, -0.00603029652472856, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.866383464095255 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 0.912647026023851, 0.00602057460027758, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.917792039771864, -0.00599050533026723, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.11611367654543 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.94174335094815 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.816715330000596, -0.00597545721611829, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.31945527307936 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.908375557508399 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.426444402270437, -0.00592563929400692, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.11603478096867 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.09692864092733, 0.00592419385643524, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.12119139040867 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 0.824556412942982 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.972702846147435, -0.00592291962389388, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.835850159118617 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 0.885238688811757, 0.00590282890208201, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 0.960155924852486 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 0.584801316911234, 0.00581244455046229, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.906628969356515, -0.00574681355136756, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.1580228343568, -0.00566292488631122, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.03845026798249, -0.00557228904128938, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.11362463703657 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.89891038956548 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.545044224662068, -0.00546843775985624, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.03926244695112 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 0.839654348397624, -0.00543843145791073, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.43946803812821, -0.00543313301185877, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.877999286308661 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.20298330440461 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.26127503194575, -0.00539317981381527, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.31858627099186 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.29836181051748 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.20485459996336, -0.00539238221940908, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.937602069477368, 0.00534314639941313, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.01838137558332 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.360389788917 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.36773226003927, -0.00531788054946415, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.37137149195307 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.33733551357055, 0.00531082359778395, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.13313800164818, -0.00528814472806452, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.882657172951689 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 0.271876541831391 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 0.532945493627246, -0.00528204542950472, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.97565854191988 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.884307841414835, -0.00509450407626885, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.924997921782229, -0.00504213706003296, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 0.961250777476212 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 0.91545342213287, 0.00504201956580699, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.30878386319277, 0.00503361181103491, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 0.965875792521157, 0.00500688070171874, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 0.940305048246754, -0.00494726736330688, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.40183036134436 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.30595106900718, -0.0049419309944493, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.39528009828393, 0.00483389733645513, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 0.897175950766723 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 0.896801497442592, 0.0047887465688927, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.898252131839788, -0.0047748300329521, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.03454310650492 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 0.966020098904103, 0.00476765548047699, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 0.994357924375165 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.0636152454638, -0.00475061123472865, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.04807111339042 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.04376930270378, -0.00474608255082718, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.996048930311746, 0.00473110232277878, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.920949412354188 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.02906338205278, -0.0047222337241527, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.11557541273582, 0.00470422392094913, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.898733144918979, -0.00469281448894552, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 0.876447337294126 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 0.967683615073472, 0.00459464066886307, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 0.819687130327619 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 0.627841967611459, 0.00456947921135574, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.1037349299289 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.35958328798221, 0.00452229700666518, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.00887186256508 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 0.991880367162887 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 0.9987582265531, 0.00452105590443581, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.22192162555442, -0.00446164557312442, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.05804648418695 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.0529160506457 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.918961210747972, 0.00444059533738665, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 0.910071358869004, 0.00444002212283148, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.2270105973319, 0.00442860939940149, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 0.853808402823484 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 0.872150205717072 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 0.808618738643361, 0.00442046180561293, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.36753568731191, -0.00439773353498071, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.930403722205977, -0.00438849854633702, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.11119445784276 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.00856432760507, 0.00433777956380068, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.924964271105218 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 0.861946997363071, 0.00432802785589064, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 0.848365411355446 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 0.837841847007574, -0.00432093612908185, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.12011077792057, -0.0043147783192404, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.22235800561365, -0.00428482566773449, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.14467624547122 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.20580080229079, 0.00426629362668623, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.23182937001241, 0.00425900324528889, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.29719829440581, 0.00421870149388374, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.29767946572844, 0.00416819788525593, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.05528258061262, -0.00416786572874433, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.31135022754399 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.953211381044623 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 0.958772639334226, 0.00413064644804892, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.19756539545942 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.25060110257884, 0.00402905881008084, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.08612397260923 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.37970374902683, -0.00396917105064447, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.36655045561444, -0.00390564743276686, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.863433568770227 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 0.902411755616272, 0.00388758509628925, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.34685481296123, -0.00375048436849568, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.0172909187926 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.09765965503524, -0.0037339159825063, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.964628928181026 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 0.847257761939066, 0.0037212864815694, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.893540732821182 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.906441308984658, -0.00371817761654874, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.13985164596761 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 0.886277863659797, -0.00370934317166178, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.00499696941144 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.800308608005861, -0.00370092298996936, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.01340210647826 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 0.953655923351661, 0.00369820714348253, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 0.912720009191434 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.92126038315127, 0.0036435647235447, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.12147840016148 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.92709719627966, -0.00360927783351769, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.3997797813121, 0.00358577544358984, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.04441788922265 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.17219423548397 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.0072888292563, 0.00357058336723833, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.05920423081913 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 0.905944897551095 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.969747143428071, 0.0035699666157603, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.966952685081111 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.976711181999597 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.434922353262717, 0.00348254291346545, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.909780574048893, -0.00343205155037304, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.856182856480833 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.887013316092284, 0.00340966721813803, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.33356717421016, 0.00336865503711646, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.39517460707623 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.4986617647134, 0.00332796992335919, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 0.922440065032667, 0.00329174838295108, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.36303625822637 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.33602035181268, -0.00326413954122969, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 0.874220731420897 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 0.97139800657778, 0.00324172446240521, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.22817670432127 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.45025856667115, 0.0032394086236675, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.09417613805274, 0.00322704000812304, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.04441788922265 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.947757281870179, -0.00322594345658104, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 0.854749198709277, -0.00319443217974467, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.07670090378175 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.44966482450581 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.34418180592954, 0.00318999058373766, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.12898704706676, -0.00317086998588759, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.37527539546748 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.25246204600328, 0.00313119725675155, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.05015270912332 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.40115101612719, 0.00308742657969339, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.00530502846783 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 0.944363536904245, 0.00302297521996049, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 0.877089903239066 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.858967691994239 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 0.827239065213497, -0.00302093845701055, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.37125670890025 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.30862361660925, -0.0029833143089365, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.978040298650981, -0.0029757263584532, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.37782683510153, 0.00297464396807444, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 0.831060938701923 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 0.792879137726857, -0.00295140608135344, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.08209504304339, -0.00294951872326333, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.07805488418889 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.02542468290017 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.05035370129856, -0.00292375587221304, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.11611367654543, -0.00292082152393483, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.2160896493411 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.23434054002733 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.53726310127802, 0.00290938453466943, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.30396196941673 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.44825927662944, 0.00289411764972744, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.957474392820208 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 0.983234116422538, -0.00288453258658816, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.01060131397351 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.04027971306826, -0.00286501328778504, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 0.980640681156277, -0.0028625646137148, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.05553977589406 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.21052226855662 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.37770797190211, -0.00284925206560491, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.03293487838369, 0.00284924591921555, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.07246650285035, -0.0028390935022513, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.859764510588721 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.884313459999119 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 0.593576282498002, -0.00281546064403975, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 0.851731560997165 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.887923976779929, 0.00279044568568036, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.869686251580395 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.62520077935524, -0.00276205668220229, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 2.67560117996209, -0.00275745895701981, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.12041275112777, -0.00271477216880133, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 0.96673125688331 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 0.950718547119746, 0.00268341837264213, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.25417940247186 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.04186482927286 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.813188272346049, -0.00266086391277331, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 0.93954163912127 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.899177079611789, -0.00264030154779314, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.936528402474118 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 0.954199967270602, 0.00263600337588782, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.05272473714001 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.941817756724881, 0.00263444056454117, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.957025892455838, 0.00259699065745772, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.931512942919776 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.898244195999617 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 0.833991149515406, 0.00259680676454941, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.05528258061262 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 0.98802467236844, -0.002553182188146, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.04986802419074 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.03467704033223, 0.00255178342947877, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 0.915354749850066, 0.00254699213774702, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.3224771132253, 0.00248946216204096, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 0.87964008269421 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 0.865976213406224, 0.00248786235816839, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.0445487303361 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.0384203704542, 0.00247688459289969, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.972901302826579 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 0.907227417542244, -0.00246613678859568, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.07190615929039, -0.00245082710377208, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.40122651546086, 0.00244718083687579, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 2.24135728456161 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.871658822749402, 0.00242854161180259, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.31945527307936, -0.00235831980338422, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.735114303614995 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.42317877849448 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 0.793744222551962, -0.00232734897097551, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 0.887680103858508 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.889834183812708, -0.00229041669245134, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.2787315254526 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.31199034660684, 0.00227334846262917, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.32352617949785 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.47807558718276, -0.00226806519773867, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.89135107110496 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 0.88154047966434, 0.00221523152567207, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.34277616381311 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.3615274623429, 0.00220354385734897, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 0.863359467151898 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 0.960307767602695, 0.00218188154309068, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.913510638113869 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 0.805840348246377, -0.0021818629294818, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.39968320768093, 0.00215939325465051, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.09638289460021, -0.00206193346065796, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.881525413018651 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.691040100539121, -0.00204136770514427, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.25417940247186 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.04186482927286, 0.00201266621666951, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.03310022594668, 0.0020082126184399, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 0.970536644302606 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 0.952413126464422, -0.00198688863837832, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.06600124949586, -0.0019815993759158, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.02403523003355, -0.00195636944447731, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 0.902451330728719 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 0.90315714233785, -0.00195082492548245, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 0.85104678741516, -0.00194930623284282, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.34653960993578 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.85694499154494 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.21891969968349, 0.00190908152568137, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.31999501039319 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.00274704035344 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.919913432196862, 0.00188296096062144, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.05592751717621 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.07563979416555, -0.00184475263204281, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.39195126859653 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.38668478654262, 0.00184275670962482, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.34653960993578 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.85694499154494 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.21891969968349, -0.00182135486494106, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 0.999689286413294 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.10258772024805 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.32772469957359, 0.00180518565063422, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 0.852134267496972 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.855130071115941, 0.00180076658851687, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.04027971306826 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.969035478627084, 0.0017499038882641, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.24266482206042 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.15565962056837, 0.00172975667264383, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.28381229003165, -0.00171282859168877, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.35198383094047, -0.00167364671011669, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.1298053800956, -0.00164695008798594, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 0.91197680403029 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.89501631896242, -0.0015922241403153, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 0.988261982027014, -0.00158979560363261, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.03653411966117, 0.00157263993431474, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.914213675163693 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 0.879880831503114, -0.00154826827703072, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.22181001223104, -0.00143403213463175, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.33243424843946 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.20482779114642, -0.00134414720745373, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.20354360063074 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.13350036058998, -0.00132414444002319, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 0.967744049890247, -0.00128773186245024, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.31945527307936 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.908375557508399 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.56388742825914, 0.00126732849257072, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 0.898608041800499, 0.00123226313241152, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.15954455635314, -0.00122469343627024, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.996492486422194, -0.00119576844250084, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.35152005561828, 0.0011868839675692, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.04399151317856 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.00335419380208, -0.00118133929823323, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.09524954826731, -0.00116846671731368, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.10029924067518, -0.00110925013251192, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.08507192269264, 0.00110244798037604, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.32352617949785, 0.00109395465902616, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.04346390099789, -0.00105836448319721, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.844929749593815, 0.0010385536769344, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.08353235633319, 0.000990974081875634, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.12468990300863, 0.000986200780653051, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.1239026602759, -0.000985561599742251, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.909747704785207, -0.000938585012063306, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.11845591594238, 0.00088762636890347, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 0.874220731420897 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.97139800657778, -0.000882195909546862, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.00715867758118, -0.000867643323908016, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.24024815425569, -0.000859684442186208, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.830995509611506 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 0.577317595969596 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.815764131501761, 0.000855546934228679, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 0.892953450438548 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 0.913789879135182, 0.000843299370899447, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.964019470278914 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.933158346403072, -0.000802036290575967, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.04240646564147 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.895596433557823, -0.00079602032518946, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.09851971983192 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 2.17675499575527, -0.000778691915910567, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.904798811292425, -0.000773349003332429, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.37038797452095, 0.000760515490749051, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.36346624543365, -0.000755976883486905, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.06216843464664 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 0.961760603345072 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 0.959089496337387, 0.000754477065453217, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.87610145823139, -0.000743566862787078, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.0682771317612 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.10815133288372, 0.00071408310043794, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 0.4986665784851 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.29898234420424, 0.000703109171193013, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.937367481636307 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 0.835997596421775, -0.000701806231111943, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.02344070164844 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 0.852408278599834, 0.00068731984563616, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 0.852103616149724 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.931211151968348, 0.000679750682767402, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.06216843464664 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 0.961760603345072 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 0.959089496337387, -0.000663942323434949, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.12147840016148, -0.000655600247829782, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 0.873194456275202 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.892523734914106, -0.000634870052627146, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.832837972866098 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.590237832374461, 0.000623384025893317, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.874882222531005 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 0.909037311148994, -0.00061844843799436, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.07583580015453 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 0.950110700825569, -0.000615587217199421, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.26871834232988, 0.000614603723023836, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.25583405219755, 0.000610416389805169, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.08441785829935, 0.00060859569189544, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.04807111339042 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.04376930270378, 0.000597479433244511, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 0.867653265475925, 0.000580711509629958, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.21084146466237 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.03275929945658, 0.000570985695038223, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 0.829657844568558, -0.000558924084046112, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.04147010182348, 0.000542685519026073, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.915611297441359 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.898534054795876, -0.0005423549224387, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.12188214971029, -0.000533845056772257, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.03653411966117 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.23401449319348, -0.000532611799858636, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.31525823030643 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.23139146458155, 0.000522831111838234, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.40204338746576 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.27763943917897, -0.000517777102080462, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.09634162118346, -0.000488275115623572, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.14818268804446 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.16312234121278, -0.0004794908662087, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 0.877000192627709 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 0.918118862993646, 0.000468905817608071, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.08612397260923 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.07169599155486, -0.000461129375414205, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.957249199893909 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 0.968700915290745, -0.000459977959163935, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.13985164596761 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.02181613958808, 0.000446939753694228, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.01838137558332 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.360389788917, 0.000440989341900632, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.11362463703657 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 0.89891038956548 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.02848874689826, -0.000425822169754574, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.22051522981686, -0.000424014060473518, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.04249576475707 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.04329972915065, -0.000407933210506314, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 0.852868219107228 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 0.904583577179706, -0.000403616359761544, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.41334718172177 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.23501393238222 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.40205299967252, -0.000386634836094581, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.03653411966117 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.23401449319348 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.24453103718277, -0.000376464730358069, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.23572233269957 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.11966187362861, -0.000374707034928239, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.35329333757217 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.26414493983061, -0.000358799431804868, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 0.957249199893909 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 0.928097560819421, 0.000344536294748582, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.3388451500221 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.34897498189198, 0.000339580761384243, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.05965982547378, 0.000328195142171614, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.39517460707623 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.4986617647134, -0.000324863468232887, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 0.906932872931764 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.931800361706014, 0.000298055181332318, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.28313766917091 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 0.737226259731416, 0.0002976915296855, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.72356672560156, -0.00029299110316736, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.35503969414523 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.23878576723927, 0.000286605775974309, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.735114303614995 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.42317877849448, 0.000259766037912937, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.36103445517464, -0.000254746351066977, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.35605052716789, 0.000243857128232761, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.880137915011245, -0.000240296940142497, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.23315199710487 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.33406859925351, 0.000239052607713721, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.830029314454341 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.87196559261787, 0.000227534543831866, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.919036502687591 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 0.961351414280969, -0.000220057789090931, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 1.12597425056647 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.20158463048025, -0.000219812583217312, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.33350865269969, 0.000216433377346793, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.03853092561245 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 2.2428304683037 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 2.49071748725718, -0.000208426356073263, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.877226697347508 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 0.431223123567725, -0.000204098904162867, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 1.34277616381311 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.3615274623429, -0.000201430300806877, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.25669053717296, 0.000199007711419238, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.966952685081111 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 0.976711181999597, -0.000185499736464924, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.11119445784276, 0.000181982557079459, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.36303625822637 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.33602035181268, 0.00017894044245867, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 0.893653793808612 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.04543781355996 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.00358634837851, 0.000177466665641728, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 1.20141272345263 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.21762046364458, 0.000164999344412068, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 0.856233615032966, 0.000156264835107226, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.905894334241514, -0.000153817924896375, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.42358680359283, 0.000153067068914208, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.06488076398632 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.04049319727917, -0.000143207722483321, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.22415507320277 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.46993038271285, 0.000139715477320807, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 0.876270947014132 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 0.865898956693699, 0.000138036703172767, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.04614072583522 & XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 0.980360207968927 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.638081403181287, -0.000130954082466954, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.30079316378989 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.98913000070314 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.866652942487459, -0.000130873064487289, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.01139126488373 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.00926200577589, -0.000130216962651546, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.22051522981686 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.910294622701578 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.728547688012695, -0.000127381047516445, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.29936070829162, -0.000123465766853719, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 0.973108144310501 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 0.96879523324276 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 0.956513187673127, -0.000121435658241077, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.01139126488373, -0.000114043433304666, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 0.843131744534883 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.894238408697896, 0.000113332293136546, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.34354256357022 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.31357469536326, -0.000110294265554258, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.21177463963806 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.2845644045679, 0.000106310983155297, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.36964576791517, 0.000103983456189131, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.19040271067216, -0.000102590120233945, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.07767124057222 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 1.08000601252991 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.390981197292, 0.0000992014995002853, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 0.868158008605044 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 1.12728449339912 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.31906155762623, 0.00009629844086302, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX > 0.916908413156355 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.03862335582334, -0.0000926368146473989, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.23863474270592 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.09572124693659, 0.0000925756990901105, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.08634685678687, 0.000089489648737535, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 2.98481063677435 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.27226086215418, -0.0000885029827076689, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 0.853645038381408, -0.0000851328659370119, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.14083528859261 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.06923934228597, 0.0000829458903680672, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.0488418943762, -0.0000825285389967051, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.06488076398632, -0.0000820395615139841, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.29168565544984, 0.0000781090752761053, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX <= 1.37952231133207 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.33153332355589, -0.0000759393737892249, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX > 1.31945527307936 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 2.35163019898018 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.826794146032151, 0.0000758411400069336, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.834070394865509 & XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 0.561070189102893, 0.0000711970416440925, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.28880710021379, -0.0000699626029661113, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.01567856153012, -0.0000678105780378975, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.904123656503336 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 0.907196202507125, 0.0000677039112189193, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.35152089398059, 0.0000635860033068141, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.29699582231818, -0.0000571310800902501, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.834860042139387 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.01526335456444, 0.0000569877610488321, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.34420068350773, 0.0000564678643745028, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX > 1.04669835096704 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 1.17096216181466, 0.0000507045501812927, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.190085571807104 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.232153606859, 0.0000502280606445634, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.947884539024048, -0.0000488237303393659, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.07184503470502, -0.0000452086228372561, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 0.883156994566195 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 0.957167600933645, 0.0000450605433802653, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.05613867069986 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.02975176511655, -0.0000414928113449154, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.01482386360845, -0.0000406929875201535, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.97565854191988 & XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.8867111658547, -0.000040064028239912, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.20437077743006, -0.0000398419214104415, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.35424180958054, 0.000039198795728351, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.25470105441834, 0.0000386762324513461, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.00294260693498, 0.0000349313957346405, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.08441785829935 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.09711381782565, 0.0000326204009299482, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.976173375486984, -0.0000325471471116262, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.27044502146024, 0.0000322671614425443, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.943582618715492, -0.000031862562003128, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.20621789021796, -0.0000316564774230359, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.902548976214405, -0.000031431866681501, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.98188558029411, 0.0000302285607887252, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.884851532580825, -0.0000297899840877304, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.08424801989329 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.14261685429855, 0.0000289829974201233, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 0.918467365716101, 0.0000285085303041423, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.30903873636976, -0.0000283809998412198, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.24855541042532, -0.0000275504871620473, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.10796969617065, 0.0000260077124116569, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX <= 1.04430731517994, -0.0000243752711990526, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 0.901516511724948, -0.0000231762162623958, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 2.24135728456161, 0.0000226492207315773, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX > 0.967493755653802 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.02610140956731, 0.0000208594550564295, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 1.30878469853823, -0.0000200423705994655, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.27021125361213, 0.0000175309252598473, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.39461436596385 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 1.36193351399301, 0.0000173475774700173, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.08768751154509, -0.0000166575150364074, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 1.35958467327548 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.34977040955101, -0.0000140475554349537, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.07971990984129, 0.0000135926275114657, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.25446060826171 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 1.28158835247122, -0.0000130792497459048, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX > 1.12147840016148 & XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 2.46922717253169 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX <= 2.08959291303171, -0.00000980140554824843, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX > 0.894971255630995 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.66419801655982 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX > 0.882696728900872, 0.00000958415598991686, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.18288161921642, 0.00000950613829107423, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXNNNNNNNXXXXXXXX <= 1.14367389944559, 0.00000916945269530357, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.33243424843946 & XXXXXXXXXXXXXXXXXXXXNNNCNNNNXXXXXXXXXXXXXXXXXXX <= 1.20482779114642, 0.00000914750197021399, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.86818302615905 & XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXNNNNNNNXXXXXX > 0.853479672434208, -0.00000862416855517291, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXNNNNNNNXXXXXXXXXXXXXX <= 1.27704523319522, 0.00000810139567222492, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.08004358708774, 0.00000671937097672074, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXNNNNNNNXXXXXXXXX <= 1.34451373515663 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 1.08925563805148, -0.000005640059056824, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 1.35663557293265, -0.00000465381991745199, 0) +
fifelse(XXXXXXXXXXXXXXXXXNNNNNNCNXXXXXXXXXXXXXXXXXXXXXX <= 0.963730013338503, 0.00000288192017276138, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXXXXNNNNNNNXXXX > 0.861901641213631 & XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX > 0.928570101115731, 0.00000285029085217759, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCXXXXXXXXXNNNNNNNXXXXXXX <= 1.04656076791866, -0.00000273637519413457, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXXXCNNNNNNNXXXXXXXXXXXXXXXX <= 1.07364036103663, 0.00000264202387667376, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX > 0.873160883052272 & XXXXXXXXXXXXXXXXXXXNNNNCNNNXXXXXXXXXXXXXXXXXXXX > 0.949257522041606, 0.0000026129158378542, 0) +
fifelse(XXXXXXXXXXXXXXXXXXXXXNNCNNNNNXXXXXXXXXXXXXXXXXX <= 1.01440719149976, -0.00000186595913239291, 0) +
fifelse(XXXXXXXXXXXXXXXXXXNNNNNCNNXXXXXXXXXXXXXXXXXXXXX <= 0.956371933543879, 0.00000116048585856704, 0)]
return(input)
}
|
8922d6ec5382c35eb0212b7dc0cd6f0e61e49cc4
|
f4bbaaf60d7784a23fc934a8f2f42af9ac07b3a7
|
/seqDatetime_byEnddate.R
|
aa96fec63efd3698e9041d1c986ca87b47fd9886
|
[] |
no_license
|
praster1/Unit-root-test-of-Non-stationary-time-series
|
6e8fca8f47b8c71a48ab9e1022a4e10ac494ed51
|
e47b62ac7dcefde7e4bcb16b29ccd12960ee3eb8
|
refs/heads/master
| 2020-04-02T01:54:59.554348
| 2019-04-17T18:04:10
| 2019-04-17T18:04:10
| 153,881,587
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 929
|
r
|
seqDatetime_byEnddate.R
|
# 시작일(startDate)부터 종료일(endDate) 직전까지 날짜 벡터 구하기
seqDatetime_byEnddate = function(startDate = "2000-01-01", endDate = "2000-01-02", split = 96)
{
# YYYY-MM-DD HH:MM:SS 타입으로 전환
startDate = as.POSIXct(startDate)
endDate = as.POSIXct(endDate)
# endDate는 startDate보다 커야 한다.
if (startDate >= endDate) { stop("endDate must be greater than startDate."); }
# split은 0보다 커야 한다.
if (split <= 0) { stop("split must be greater than 0.") }
# seqData 생성
splitDates = as.POSIXct("0000-01-01")
while(startDate <= endDate)
{
plusTime = seq(1, (60*60*24), length=split)
splitDates = c(splitDates, startDate + plusTime)
startDate = startDate + (60*60*24);
}
splitDates = unique(splitDates)
splitDates = splitDates[which(splitDates < endDate)]
splitDates = sort(splitDates)
splitDates = splitDates[-1]
return(splitDates)
}
|
2e3309c758ee5141b778808713bdf69c810bca46
|
0a9473ca331986d383ceaad6f50ed3b508bca3e0
|
/package-dplyr.R
|
708886eef2b622c150515e8346673284342ad41c
|
[] |
no_license
|
rjkhan/R-ExamPreparation
|
255ed84da9e8227edc23b29e8bf74a35f1df384a
|
f226c88074e333e2f935b5fef7a23629e1c4f174
|
refs/heads/master
| 2021-07-14T09:14:02.566170
| 2017-10-20T04:31:52
| 2017-10-20T04:31:52
| 107,160,303
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,527
|
r
|
package-dplyr.R
|
library(dplyr)
library(nycflights13)
flight_data <- flights(3:5)
# dplyr filter
r<- dplyr::filter(flight_data, month == 1 & day ==1 )
dim(r)
r<- flight_data[flight_data$month == 1 & flight_data$day == 1, ]
dim(r)
#extract a number of rows from data in table
slice(flight_data, 1:2)
#order the data work as similar to filter but assceding and deceding order
r<- arrange(flight_data, sched_dep_time)
#select a specific column
r<- select(flight_data, year, carrier)
r
#select all column between year and day
select(flight_data, sched_dep_time:carrier)
#select all column except
select(flight_data, -(sched_dep_time:carrier))
#disticnt unique data find
distinct(select(flight_data, dest))
#add a new column to table
select(mutate(flight_data, gain=0), gain)
arran <- select(slice(arrange(flight_data,arr_delay), 1:40), arr_delay)
rename(flight_data, year = year)
k<- transform(flight_data, gain = 0)
k$gain
plyr::summarise(flight_data, sd(arr_delay, na.rm = TRUE))
dplyr::summarise(dplyr::group_by(flights, dest), M = sd(arr_delay, na.rm = TRUE))
sample.n<-sample_n(flight_data,10)
sample.fr<- sample_frac(flight_data,0.0001)
one_col <- select(flight_data, dest)
one_col
by_tailnum <- group_by(one_col, dest )
delay <- summarise(by_tailnum, count = n())
delay
by_tailnum
a<- data.frame(a=c(1:10), b=seq(1,10,1))
dplyr::tbl_df(a)
dplyr::glimpse(a)
tidyr::gather(a,"a",convert = TRUE, value="ccc")
dplyr::cumall(a$a)
dplyr::bind_rows(a,a[1])
fruit <- c("apple", "banana", "pear", "pineapple")
fruit
s
|
62eaa04c069381ee030461c2319381fe45d86293
|
02e16d94c252fdcba74cd8bd397bdaae9d7758c7
|
/R/attenuate.r.R
|
8a3ad89ba51d85deefe1b0b66a90ed08f65299f2
|
[] |
no_license
|
Matherion/ufs
|
be53b463262e47a2a5c4bcbc47827f85aa0c4eb2
|
9138cab0994d6b9ac0cea327a572243d66487afb
|
refs/heads/master
| 2020-03-24T21:11:05.053939
| 2019-02-12T10:33:48
| 2019-02-12T10:33:48
| 143,017,526
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 416
|
r
|
attenuate.r.R
|
#' Attenuate a Pearson's r estimate for unreliability in the measurements
#'
#' @param r The (disattenuated) value of Pearson's r
#' @param reliability1,reliability2 The reliabilities of the two variables
#'
#' @return The attenuated value of Pearson's r
#' @export
#'
#' @examples
#' attenuate.r(.5, .8, .9);
attenuate.r <- function(r, reliability1, reliability2) {
return(r * sqrt(reliability1*reliability2));
}
|
d182b05943529c454f437bb2f817401a2b5bca42
|
51abd749be8aba20130d819c28878c40ede3cf11
|
/main.R
|
2013d140e51bda001a81cd5efc3d5bd596d7afef
|
[] |
no_license
|
BasBoek/GreenestCity
|
fdf9a9a6f0b8a797259bcc3c7d3e92ce92f2e3a6
|
ff532e554c232ac86465ee1e26e7acebfe222ab9
|
refs/heads/master
| 2021-01-15T17:41:10.915812
| 2015-01-12T17:56:37
| 2015-01-12T17:56:37
| 29,135,330
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,618
|
r
|
main.R
|
# Team Bastei
# January 12, 2015
library(raster)
library(rgdal)
library(sp)
source('R/DownloadData.R')
# Download monthly modis NDVI data or The Netherlands
DownloadNDVI('https://github.com/GeoScripting-WUR/VectorRaster/raw/gh-pages/data/MODIS.zip')
NDVImonth <- brick('data/MOD13A3.A2014001.h18v03.005.grd')
# Creating NDVI yearly average of the Netherlands
meanNDVI <- mean(NDVImonth)
allNDVI <- stack(NDVImonth, meanNDVI)
names(allNDVI)[13] <- paste('Mean_NDVI')
# Download municipalities of the Netherlands
Cities <- DownloadCitiesNL(3)
# Change projection
City_newCRS <- spTransform(Cities, CRS(proj4string(allNDVI)))
source('R/GreenPerCity.R')
# Extracting mean NDVI/month/City (Please take a coffee...)
GreenCity <- GreenPerCity(allNDVI, City_newCRS)
# Creating Dataframe (not spatial) for subsetting
GreenDF <- GreenCity@data
source('R/GreenestCity.R')
### Examples of determining the greenest city of the Netherlands for a month or averaged over the year
print(Greenest_Jan <- GreenestCity('January'))
print(Greenest_Jun <- GreenestCity('June'))
print(Greenest_Nov <- GreenestCity('November'))
print(Greenest_ALL <- GreenestCity('Mean_NDVI')) # <- MEAN OVER THE YEAR! # # # Greenest City EVER # # #
### Please try it yourself!!! (first uncomment next line and fill in month of your choice)
# GreenestMonthCity <- GreenestCity('ENTER__MONTH')
# Plot March NDVI average per municipality
spplot(GreenCity['March'], main=list(label="NDVI March",cex=3))
spplot(GreenCity['November'], main=list(label="NDVI November",cex=3))
spplot(GreenCity['Mean_NDVI'], main=list(label="NDVI YEAR_AVG",cex=3))
|
aa979d734206ab78ed707061a838e3ee49d9cd85
|
d55156b62c4bd353228726e303a1bb70034800be
|
/app.R
|
c863254134b03220a92a60977e3b8d147b11066d
|
[
"MIT"
] |
permissive
|
ubco-mds-2020-labs/dashboard-project---r-data_551_group15
|
4e0f8f9cabb5fbfd9dc5f4282634b73930f3769b
|
471d65181bfb52a6cf34e58508978bdccf7145b5
|
refs/heads/main
| 2023-03-24T01:15:11.476470
| 2021-03-21T23:53:54
| 2021-03-21T23:53:54
| 348,539,126
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,427
|
r
|
app.R
|
# This file renders the layout of the app
# Import necessary libraries
library(dash)
library(dashHtmlComponents)
library(dashCoreComponents)
library(dashBootstrapComponents)
library(tidyverse)
library(ggplot2)
library(dplyr)
library(reshape)
library(ggpubr)
library(cowplot)
library(plotly)
source("src/dataWrangling.R")
source("src/graphs.R")
# Prepare the input values:
provinces = as.list(Canada_economic_indicators$Geography)
Years = as.list(Canada_economic_indicators$Year)
provinces = unique(provinces)
Years = unique(Years)
geos = list()
for (p in provinces) {
temp = list(label = p, value = p)
geos = append(geos, list(temp))
}
# App layout:
app = Dash$new(external_stylesheets = dbcThemes$BOOTSTRAP)
app$layout(
dbcContainer(
children = list(
# header nav:
dbcNavbarSimple(
#Data Sources:
dbcDropdownMenu(
children = list(
dbcDropdownMenuItem("temp",href = "#")
),
nav = TRUE,
in_navbar = TRUE,
label = "Data Sources"
),
#Other Nav paras:
brand = "Canada 2010-2020 Economy Analysis",
color = "primary",
dark = TRUE,
brand_href = "#"
),
dbcRow(
list(
dbcCol(
# Dropdown:
list(
htmlLabel(
htmlH5("Select Province:")
),
dccDropdown(
id = "province",
options = geos,
value = "British Columbia"
)
)
),
dbcCol(
# Slider:
list(
htmlLabel(
htmlH5("Select Year:")
),
dccSlider(id = "year", min = 2010, max = 2020, value = 2015, marks = list("2010" = "2010", "2015" = "2015", "2020" = "2020"), tooltip = list("placement" = "top"))
)
)
)
),
dbcRow(
dbcCol(
# Tabs:
dbcTabs(
children = list(
dbcTab(
label = "GDP",
children = list(
dbcRow(
dbcCol(
dccGraph(id = "t1_1")
)
),
dbcRow(
dbcCol(
dccGraph(id = "t1_2")
)
),
dbcRow(
dbcCol(
dccGraph(id = "t1_3")
)
),
dbcRow(
dbcCol(
dccGraph(id = "t1_4")
)
)
)
),
dbcTab(
label = "GDP per Capita",
children = list(
dbcRow(
dbcCol(
dccGraph(id = "t2_1")
)
),
dbcRow(
dbcCol(
dccGraph(id = "t2_2")
)
),
dbcRow(
dbcCol(
dccGraph(id = "t2_3")
)
)
)
),
dbcTab(
label = "Conusmer Price Index(CPI)",
children = list(
dbcRow(
dbcCol(
dccGraph(id = "t3_1")
)
),
dbcRow(
dbcCol(
dccGraph(id = "t3_2")
)
),
dbcRow(
dbcCol(
dccGraph(id = "t3_3")
)
)
)
),
dbcTab(
label = "Employment and Earnings",
children = list(
dbcRow(
dbcCol(
dccGraph(id = "t4_1")
)
),
dbcRow(
dbcCol(
dccGraph(id = "t4_2")
)
),
dbcRow(
dbcCol(
dccGraph(id = "t4_3")
)
),
dbcRow(
dbcCol(
dccGraph(id = "t4_4")
)
)
)
)
)
)
)
)
)
,style = list('max-width' = '85%') # Change left/right whitespace for the container
)
)
# call backs:
app$callback(
list(output("t1_1","figure")),
list(input("province", "value"),
input("year", "value")),
function(province,year){
g = tab1_one(year, province)
return(list(g))
}
)
app$callback(
list(output("t1_2","figure")),
list(input("province", "value"),
input("year", "value")),
function(province,year){
g = tab1_two(year, province)
return(list(g))
}
)
app$callback(
list(output("t1_3","figure")),
list(input("province", "value"),
input("year", "value")),
function(province,year){
g = tab1_three(year, province)
return(list(g))
}
)
app$callback(
list(output("t1_4","figure")),
list(input("province", "value"),
input("year", "value")),
function(province,year){
g = tab1_four(year, province)
return(list(g))
}
)
app$callback(
list(output("t2_1","figure")),
list(input("province", "value"),
input("year", "value")),
function(province,year){
g = tab2_one(year, province)
return(list(g))
}
)
app$callback(
list(output("t2_2","figure")),
list(input("province", "value"),
input("year", "value")),
function(province,year){
g = tab2_two(year, province)
return(list(g))
}
)
# app$callback(
# list(output("t2_3","figure")),
# list(input("province", "value"),
# input("year", "value")),
# function(province,year){
# g = tab2_three(year, province)
# return(list(g))
# }
# )
app$callback(
list(output("t3_1","figure")),
list(input("province", "value"),
input("year", "value")),
function(province,year){
g = tab3_one(year, province)
return(list(g))
}
)
app$callback(
list(output("t3_2","figure")),
list(input("province", "value"),
input("year", "value")),
function(province,year){
g = tab3_two(year, province)
return(list(g))
}
)
# app$callback(
# list(output("t4_1","figure")),
# list(input("province", "value"),
# input("year", "value")),
# function(province,year){
# g = tab4_one(year, province)
# return(list(g))
# }
# )
app$callback(
list(output("t4_2","figure")),
list(input("province", "value"),
input("year", "value")),
function(province,year){
g = tab4_two(year, province)
return(list(g))
}
)
# app$callback(
# list(output("t4_3","figure")),
# list(input("province", "value"),
# input("year", "value")),
# function(province,year){
# g = tab5_one(year, province)
# return(list(g))
# }
# )
app$callback(
list(output("t4_4","figure")),
list(input("province", "value"),
input("year", "value")),
function(province,year){
g = tab5_two(year, province)
return(list(g))
}
)
app$run_server(host = "0.0.0.0")
# app$run_server(debug = TRUE)
|
5eed9bc83fb9c285296c368ba32f6249f9e8cf85
|
338375c4dca04671ab0ac7a80676d629afff207f
|
/02-Pacote_dplyr.R
|
b2951359770760b95dd51ae256049217d3555409
|
[] |
no_license
|
renatosarda1981/BigDataRAzure-Cap07
|
5ada08bc3b07fc57e719d6c68f21b8c20fab84d7
|
c5c1163365ba5a209e2d021485b2aea7011ce450
|
refs/heads/master
| 2023-05-31T21:17:05.394782
| 2021-06-08T19:28:25
| 2021-06-08T19:28:25
| 356,620,216
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,093
|
r
|
02-Pacote_dplyr.R
|
setwd('C:/FCD/BigDataRAzure/Cap07')
getwd()
library("readr")
library("dplyr")
sono_df <- read_csv("sono.csv")
View(sono_df)
head(sono_df)
class(sono_df)
str(sono_df)
# função glimpse() substitui str()
glimpse(sono_df)
#Aplicando mutate
glimpse(mutate(sono_df, peso_libras = sono_total / 0.45359237))
count(sono_df, cidade)
hist(sono_df$sono_total)
#Amostragem
sample_n(sono_df, size = 10)
# select()
sleepData <- select(sono_df, nome, sono_total)
head(sleepData)
class(sleepData)
select(sono_df, nome)
select(sono_df, nome:cidade)
select(sono_df, nome:pais)
# filter()
filter(sono_df, sono_total >= 16)
filter(sono_df, sono_total >= 16, peso >= 80)
filter(sono_df, cidade %in% c("Recife", "Curitiba"))
# arrange()
sono_df %>% arrange(cidade) %>% head
sono_df %>%
select(nome, cidade, sono_total) %>%
arrange(cidade, sono_total) %>%
head
sono_df %>%
select(nome, cidade, sono_total) %>%
arrange(cidade, desc(sono_total)) %>%
filter(sono_total >= 16)
sono_df %>%
select(nome, cidade, sono_total) %>%
arrange(cidade, sono_total) %>%
head
# mutate
head(sono_df)
sono_df %>%
mutate(novo_indice = sono_total / peso) %>%
head
sono_df %>%
mutate(novo_indice = sono_total / peso,
peso_libras = peso / 045359237)%>%
head
# summarize()
sono_df %>%
summarise(media_sono = mean(sono_total))
sono_df %>%
summarise(media_sono = mean(sono_total),
min_sono = min(sono_total),
max_sono = max(sono_total),
total = n())
# group_by()
sono_df %>%
group_by(cidade) %>%
summarise(avg_sono = mean(sono_total),
min_sono = min(sono_total),
max_sono = max(sono_total),
total = n())
# operador: %>%
head(select(sono_df, nome, sono_total))
sono_df %>%
select(nome, sono_total) %>%
head
sono_df %>%
mutate(novo_indice = round(sono_total*peso)) %>%
arrange(desc(novo_indice)) %>%
select(cidade, novo_indice)
sono_df2 <- sono_df %>%
mutate(novo_indice = round(sono_total*peso)) %>%
arrange(desc(novo_indice)) %>%
select(cidade, novo_indice)
View(sono_df2)
|
f1ccb36609079e23af78dfcfd109d677cc1d627f
|
b5cbb046412053f2202c315c640b7b728101a482
|
/Semester3/AnalytischesGrundpraktikum/Aufgabe2/SimulationTitrationskurve.R
|
bbdb47d6b7ada10476ccb21966c757291241120d
|
[
"MIT"
] |
permissive
|
Progklui/studyChemistryFloKlui
|
7da906a831f17bfcbb9cec8787bcbec218e49feb
|
7b08dcf93cd888d3a93eda5b1835814b37245aa5
|
refs/heads/master
| 2020-04-24T20:01:46.775619
| 2020-01-15T22:46:16
| 2020-01-15T22:46:16
| 172,231,181
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,202
|
r
|
SimulationTitrationskurve.R
|
library(titrationCurves)
pk_w = 14
pka_1 = 6.35
pka_2 = 10.33
conc_base = 0.04508 # in mol/l
conc_acid = 0.05 # in mol/l
titer_acid = 0.9799
volume_base = 10 # in ml
volume_burette = 25 # in ml
titration_Curve = diwb_sa(conc.base = conc_base, conc.acid = conc_acid * titer_acid, pka1 = pka_1, pka2 = pka_2, pkw = pk_w, vol.base = volume_base, overlay = TRUE)
titration_Curve_deriv = derivative(titration_Curve)
deriv = titration_Curve_deriv$first_deriv
deriv$y1 = - deriv$y1
intervall_1 = deriv$y1[1:(length(deriv$y1)/2)]
pos_eqp_1 = which(deriv$y1 == max(intervall_1))
eqp_1 = deriv$x1[[pos_eqp_1]]
intervall_2 = deriv$y1[(length(deriv$y1)/2):length(deriv$y1)]
pos_eqp_2 = which(deriv$y1 == max(intervall_2))
eqp_2 = deriv$x1[[pos_eqp_2]]
plot(titration_Curve, , xaxs = "i", xlim = c(0, volume_burette), yaxs = "i", ylim = c(0, 14), panel.first = grid(), type = "l", col = "red", lwd = 2, main="Acidimetrische Titration von Carbonat", xlab = "V(HCl) in ml", ylab = "pH")
lines(deriv, type = "l", col = "blue", lwd = 1)
abline(v = eqp_2, col = "red", lty = 2)
abline(v = eqp_1, col = "red", lty = 2)
cat("Äquivalenzpunkt 1 bei ", eqp_1, " ml")
cat("Äquivalenzpunkt 2 bei ", eqp_2, " ml")
|
1328ab706feeba5fb1be4fe0183c750bd3bf60bb
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/xRing/examples/selectProfiles.Rd.R
|
7393897fce8aab71f051a4a7c7b9600af50a07bb
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 387
|
r
|
selectProfiles.Rd.R
|
library(xRing)
### Name: selectProfiles
### Title: Select Profile(s)
### Aliases: selectProfiles
### ** Examples
if(interactive()){
# read a sample file
im <- imRead(file = system.file("img", "AFO1046.1200dpi.png", package="xRing"))
# to display the image
imDisplay(im)
# select a profile
profile <- selectProfile(im)
# to display the profile
plot(profile, type="l")
}
|
f4754732ed6270276cf13a16eceb8209ec6ac909
|
0081be39b9dc664eddd8c2a024885a362c64af56
|
/R/ringWidths.R
|
0809291d01e2a95ce0756f104c9b685a08ed2e2f
|
[] |
no_license
|
cran/measuRing
|
c50422fbf90defdcd0e5ce500b883a0e9db34462
|
1a1d73c85e69969266b4fc9e333965768b56141a
|
refs/heads/master
| 2021-01-13T01:41:54.200591
| 2018-02-20T14:15:55
| 2018-02-20T14:15:55
| 24,686,346
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,731
|
r
|
ringWidths.R
|
ringWidths <- structure(
function#Ring widths
### This function can compute the ring widths (mm) from the ring
### borders detected on an image section.
(
image,##<<character or matrix. Either path of an image section
##or an array representing a gray matrix.
last.yr = NULL,##<<year of formation of the newest ring. If
##NULL then the rings are numbered from one
##(right) to the number of detected rings
##(left).
...##<< arguments to be passed to two functions:
##\code{\link{ringBorders}}, and/or
##\code{\link{imageTogray}}.
)
{
f.rown <- function(x)as.numeric((rownames(x)))
f.tit <- function(image){
p <- '.tif'
if(any(grepl('.png',image)))p <- '.png'
bn <- basename(image)
gsub(p,'',bn)}
pixtypes <- ringBorders(image,...)
attb <- attributes(pixtypes)
ppi <- attb[['ppi']]
names. <- f.tit(attb[['image']])
scale <- 25.4/ppi ## (mm)
pixtypes[,'distance'] <- f.rown(pixtypes)*scale
finald <- pixtypes[pixtypes[,"borders"]%in%TRUE,]
f.label <- function(finald,last.yr){
finald[,'item'] <- c(1:nrow(finald))
finald[,'growth'] <- with(finald,(max(distance) - distance))
if(!is.null(last.yr))year1 <- last.yr + 1
else{year1 <- nrow(finald)}
finald[,'year'] <- with(finald,year1-item)
finald[,'delta'] <- with(finald,c(rev(diff(rev(growth))),0))
finald <- finald[1:(nrow(finald)-1),c('year','delta')]}
if(nrow(finald)==0){
trwd <- data.frame(year=vector(),delta=vector())}
else{
trwd <- f.label(finald,last.yr)
last.yr <- max(trwd[,'year'])}
names(trwd) <- c('year',names.)
attributes(trwd) <- c(attributes(trwd),## attcol,
rbord=attb,last.yr=last.yr)
return(trwd)
###data frame with the ring widths.
}
,
ex=function(){
## (not run) Read one image section:
image1 <- system.file("P105_a.tif", package="measuRing")
## (not run) columns in gray matrix to be included/excluded:
Toinc <- c(196,202,387,1564)
Toexc <- c(21,130,197,207,1444,1484)
## (not run) tree-ring widths
rwidths <- ringWidths(image1,inclu = Toinc,exclu = Toexc,last.yr=NULL)
str(rwidths)
##plot of computed tree-ring widths:
maint <- 'Hello ring widths!'
plot(rwidths,type='l',col = 'red',main = maint,
xlab = 'Year',ylab = 'Width (mm)')
}
)
|
04d8441cf597eb3c1e28b999a2e0c2bbab12c5a3
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/vcpen/R/vcexample.R
|
ab8c29ea8b4e433c130ef2b7ddc9efc2ecbd3b29
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,120
|
r
|
vcexample.R
|
#' Example data for Penalized Variance Component method
#'
#' Datasets for an example run of vcpen with 4 variance components calculated as kernel matrices from genotype dosage (dose) on 100 subjects with two covariates (covmat), and a continuous response.
#'
#' @format The example contains three data.frames and a response vector for 100 subjects at 70 SNPs accross 4 variance components:
#' \describe{
#' \item{\code{covmat}}{two arbitrary covariates (columns) for 100 subjects (rows)}
#' \item{\code{dose}}{genotype dosage at 70 SNPs (columns) and 100 subjects (rows)}
#' \item{\code{doseinfo}}{2-column matrix with indices for grouping SNPs into variance components (for Kernel Matrix)}
#' \item{\code{response}}{continuous response vector for 100 subjects}
#' }
#' @examples
#' data(vcexample)
#' dim(dose)
#' dim(doseinfo)
#' dim(covmat)
#' length(response)
#' @name vcexample
NULL
#> NULL
#' @rdname vcexample
#' @name covmat
NULL
#> NULL
#' @rdname vcexample
#' @name dose
NULL
#> NULL
#' @rdname vcexample
#' @name doseinfo
NULL
#> NULL
#' @rdname vcexample
#' @name response
NULL
#> NULL
|
2ed0f366ac3a7928327a0958e67685b394a2bd2b
|
ba8dd7f1077723d7bf4ba9d592cf1484becec6bc
|
/Employer/machine learning section/machine learning basics/logistic regression/Projects/Remarketing List with predictive analysis/main.R
|
4164d5016d6db0e270d075711c41758b571c6c9f
|
[] |
no_license
|
MachineLearningWithHuman/cloud
|
12a450f1f74aa9bd79bed01ca09a68a2ec5dfc3b
|
2c505925ab6e0f9e9ab531139ef8230ac21dd5fe
|
refs/heads/master
| 2021-07-16T06:56:57.408823
| 2020-07-22T18:10:56
| 2020-07-22T18:10:56
| 192,422,593
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,801
|
r
|
main.R
|
#google analytics premium bigquery statistics
#loading the data
data <- read.csv("./data/train_data.csv")
#see head
head(data)
View(data)
#columns names
names(data)
#remove columns that are insignificant to me
data <- data[c(-1,-2,-22)]
#too much variable still left use pearson coreration to see colinearity
corr<-cor(data)
library(corrplot)
corrplot1<-corrplot(corr)
symnum(abs(cor(data)),cutpoints = c(0, 0.2, 0.4, 0.6, 0.9, 1), symbols = c(" ", ".", "_", "+", "*"))
#remove a_sum_total_hits and a_diffdays for .9 colinear value
data <- data[c(-3,-7)]
#removing midnignt_flag
data <- data[,c(-13)]
#we can run initial data model but first lets see balancing act
table(data$b_CV_flag)
#pure unbalanced data you see
model <- glm(b_CV_flag~.,data,family = binomial("logit"))
result <- summary(model)
result
#now let's say this is my benchmark model
#check for multicolinear data
library(car)
library(rms)
vif(model)
#removing _pageview for colinearity
#unchanged our basemodel data
data1 <- data[,c(-2)]
#new model
model1 <- glm(formula = b_CV_flag ~., data = data1, family = binomial("logit"))
result1 <- summary(model1)
result1
vif(model1)
#result view
prob <- data.frame(predict(model1, data1, type = "response"))
gain <- cumsum(sort(prob[, 1], decreasing = TRUE)) / sum(prob)
png('gain_curve_plot.png')
plot(gain,main ="Gain chart",xlab="number of users", ylab="cumulative conversion rate")
dev.off()
#ROC CURVE
library(ROCR)
pred <- prediction(prob, data1$b_CV_flag)
perf <- performance(pred, measure = "tpr", x.measure = "fpr")
qplot(x = perf@x.values[[1]], y = perf@y.values[[1]], xlab = perf@x.name, ylab = perf@y.name, main="ROC curve")
#coefficient
coef <- names(model1$coefficients)
value <- as.vector(model1$coefficients)
result <- data.frame(coef,value)
result
|
2c46e9e6238f57cce25584e9cc4a1a0d3cf50e29
|
42f01671fb60c8273e429ecba61042f3f70d7c01
|
/R/daily_helpers.R
|
2b0e5c08448393166742abf2ded6ca57f3a5b58c
|
[] |
no_license
|
leighseverson/countyweather
|
3a674ac59393c9e2ad6ccad581daa6b6ac5d54a5
|
31f9c7589f78b0dab9dc64b51d512e5849c13b7c
|
refs/heads/master
| 2022-05-17T05:54:44.572448
| 2022-04-06T22:47:52
| 2022-04-06T22:47:52
| 52,288,023
| 10
| 14
| null | 2020-12-04T04:20:56
| 2016-02-22T16:38:30
|
R
|
UTF-8
|
R
| false
| false
| 11,723
|
r
|
daily_helpers.R
|
#' NOAA NCDC station IDs per county.
#'
#' Returns a dataframe with NOAA NCDC station IDs for
#' a single U.S. county. This function has options to filter stations based on
#' maximum and minimum dates, as well as percent data coverage.
#'
#' @note Because this function uses the NOAA API to identify the weather
#' monitors within a U.S. county, you will need to get an access token from
#' NOAA to use this function. Visit NOAA's token request page
#' (\url{http://www.ncdc.noaa.gov/cdo-web/token}) to request a token by
#' email. You then need to set that API code in your R session (e.g., using
#' \code{options(noaakey = "your key")}, replacing "your key" with the API
#' key you've requested from NOAA). See the package vignette for more details.
#'
#' @param fips A string with the five-digit U.S. FIPS code of a county
#' in numeric, character, or factor format.
#' @param date_min A string with the desired starting date in character, ISO
#' format ("yyyy-mm-dd"). The dataframe returned will include only stations
#' that have data for dates including and after the specified date. In other words,
#' if you specify that this equals "1981-02-16", then it will return only
#' the stations with at least some data recorded after Feb. 16, 1981. If a station
#' stopped recording data before Feb. 16, 1981, it will be removed from the set of stations. If not
#' specified, the function will include available stations, regardless of the date
#' when the station started recording data.
#' @param date_max A string with the desired ending date in character, ISO
#' format ("yyyy-mm-dd"). The dataframe returned will include only stations
#' that have data for dates up to and including the specified date. If not
#' specified, the function will include available stations, regardless of the date
#' when the station stopped recording data.
#' @param limit_20_longest A logical value, indicating whether the stations should
#' be limited to the 20 with the longest records of data (otherwise, there may
#' be so many stations that it will take extremely long to pull data from all of
#' them). The default is FALSE, but you may want to change to TRUE if it is taking
#' a long time to pull your data.
#' @param exclude_less_than_one_year A logical value, indicating whether stations with
#' less than one year's worth of data should be automatically excluded. The default
#' value is TRUE.
#'
#' @return A dataframe with NOAA NCDC station IDs for a single U.S. county.
#'
#' @examples
#' \dontrun{
#' stations_36005 <- daily_stations("36005")
#' stations_36005
#'
#' miami_stations <- daily_stations("12086", date_min = "1999-01-01",
#' date_max = "2012-12-31")
#' miami_stations
#' }
#'
#' @importFrom dplyr %>%
#' @export
daily_stations <- function(fips, date_min = NULL, date_max = NULL,
limit_20_longest = FALSE, exclude_less_than_one_year = TRUE) {
FIPS <- paste0('FIPS:', fips)
station_ids <- rnoaa::ncdc_stations(datasetid = 'GHCND', locationid = FIPS,
limit = 10)
station_df <- station_ids$data
if (station_ids$meta$totalCount > 10) {
how_many_more <- station_ids$meta$totalCount - 10
more_stations <- rnoaa::ncdc_stations(datasetid = 'GHCND',
locationid = FIPS,
limit = how_many_more,
offset = 10 + 1)
station_df <- rbind(station_df, more_stations$data)
}
# If either `min_date` or `max_date` option was null, set to a date that
# will keep all monitors in the filtering.
if (is.null(date_max)) {
date_max <- max(lubridate::ymd(station_df$mindate))
}
if (is.null(date_min)) {
date_min <- min(lubridate::ymd(station_df$maxdate))
}
date_max <- lubridate::ymd(date_max)
date_min <- lubridate::ymd(date_min)
tot_df <- dplyr::mutate_(station_df,
mindate = ~ lubridate::ymd(mindate),
maxdate = ~ lubridate::ymd(maxdate)) %>%
dplyr::filter_(~ maxdate >= date_min & mindate <= date_max)
if(exclude_less_than_one_year){
tot_df <- tot_df %>%
dplyr::mutate_(dftime = ~ difftime(maxdate, mindate, units = "weeks")) %>%
dplyr::filter(dftime >= 52.14)
}
if(limit_20_longest & nrow(tot_df) > 20){
tot_df <- tot_df %>%
dplyr::mutate_(dftime = ~ difftime(maxdate, mindate)) %>%
dplyr::slice_max(order_by = dftime, n = 20)
}
tot_df <- tot_df %>%
dplyr::select_(.dots = c("id", "latitude", "longitude", "name")) %>%
dplyr::mutate_(id = ~ gsub("GHCND:", "", id))
return(tot_df)
}
#' Average daily weather data across multiple stations.
#'
#' Returns a dataframe with daily weather averaged across
#' stations, as well as columns showing the number of stations contributing
#' to the average for each variable and each day.
#'
#' @param weather_data A dataframe with daily weather observations. This
#' dataframe is returned from the \code{rnoaa} function
#' \code{meteo_pull_monitors}.
#'
#' @importFrom dplyr %>%
ave_daily <- function(weather_data) {
all_cols <- colnames(weather_data)
not_vars <- c("id", "date")
g_cols <- all_cols[!all_cols %in% not_vars]
#not sure about -id -date cols - how to do NSE here
averaged_data <- tidyr::gather_(weather_data, key_col = "key",
value_col = "value",
gather_cols = g_cols) %>%
dplyr::group_by_(.dots = c("date", "key")) %>%
dplyr::summarize_(mean = ~ mean(value, na.rm = TRUE)) %>%
tidyr::spread_(key_col = "key", value_col = "mean") %>%
dplyr::ungroup()
n_reporting <- tidyr::gather_(weather_data, key_col = "key",
value_col = "value",
gather_cols = g_cols) %>%
dplyr::group_by_(.dots = c("date", "key")) %>%
dplyr::summarize_(n_reporting = ~ sum(!is.na(value))) %>%
dplyr::mutate_(key = ~ paste(key, "reporting", sep = "_")) %>%
tidyr::spread_(key_col = "key", value_col = "n_reporting")
averaged_data <- dplyr::left_join(averaged_data, n_reporting,
by = "date")
return(averaged_data)
}
#' Filter stations based on "coverage" requirements.
#'
#' Filters available weather stations based on a specified required minimum
#' coverage (i.e., percent non-missing daily observations). Weather stations
#' with non-missing data for fewer days than specified by \code{coverage} will
#' be excluded from the county average.
#'
#' @param coverage_df A dataframe as returned by the \code{meteo_coverage}
#' function in the \code{rnoaa} package
#' @param coverage A numeric value in the range of 0 to 1 that specifies
#' the desired percentage coverage for the weather variable (i.e., what
#' percent of each weather variable must be non-missing to include data from
#' a monitor when calculating daily values averaged across monitors).
#'
#' @return A dataframe with stations that meet the specified coverage
#' requirements for weather variables included in the \code{coverage_df}
#' dataframe passed to the function.
#'
#' @importFrom dplyr %>%
filter_coverage <- function(coverage_df, coverage = 0) {
if (is.null(coverage)) {
coverage <- 0
}
all_cols <- colnames(coverage_df)
not_vars <- c("id", "start_date", "end_date", "total_obs")
g_cols <- all_cols[!all_cols %in% not_vars]
filtered <- dplyr::select_(coverage_df,
.dots = list("-start_date", "-end_date",
"-total_obs")) %>%
tidyr::gather_(key_col = "key", value_col = "covered",
gather_cols = g_cols) %>%
dplyr::filter_(~ covered >= coverage) %>%
dplyr::mutate_(covered_n = ~ 1) %>%
dplyr::group_by_(.dots = list("id")) %>%
dplyr::mutate_(good_monitor = ~ sum(!is.na(covered_n)) > 0) %>%
dplyr::ungroup() %>%
dplyr::filter_(~ good_monitor) %>%
dplyr::select_(.dots = list("-good_monitor", "-covered_n"))
colnames(filtered)[3] <- "calc_coverage"
return(filtered)
}
#' Plot daily weather stations for a particular county.
#'
#' Produces a map with points indicating stations that contribute
#' to the weather data in the \code{daily_data} data frame output by
#' \code{daily_fips}.
#'
#' @param fips A five-digit FIPS county code.
#' @param daily_data A list returned from the function \code{daily_df} (see
#' helpfile for \code{daily_df}).
#' @param point_color Character string with color for points
#' mapping the locations of weather stations (passes to \code{ggplot}).
#' @param fill_color Character string with color for county background fill
#' (passes to \code{ggplot}).
#' @param point_size Character string with size for for points
#' mapping the locations of weather stations (passes to \code{ggplot}).
#' @param station_label TRUE / FALSE Whether to include labels for
#' each weather station.
#'
#' @return A \code{ggplot} object mapping all weather stations for a particular
#' county satisfying the conditions present in \code{daily_df}'s
#' arguments (date range, coverage, and/or weather variables). 2011 U.S.
#' Census cartographic boundary shapefiles are used to provide county
#' outlines.
#'
#' @examples
#' \dontrun{
#' miami_stations <- daily_stations(fips = "12086", date_min = "1992-08-01",
#' date_max = "1992-08-31")
#' daily_data <- daily_df(stations = miami_stations, coverage = 0.90,
#' var = c("tmax", "tmin", "prcp"),
#' date_min = "1992-08-01", date_max = "1992-08-31")
#' daily_stationmap(fips = "12086", daily_data = daily_data)
#' }
#'
#' @importFrom dplyr %>%
daily_stationmap <- function(fips, daily_data, point_color = "firebrick",
fill_color = "lightgrey",
point_size = 2, station_label = FALSE) {
# for plot title
census_data <- countyweather::county_centers
row_num <- which(grepl(fips, census_data$fips))
title <- as.character(census_data[row_num, "name"])
loc_census <- census_data %>%
dplyr::rename(fc = fips) %>%
dplyr::filter(fc == fips)
suppressMessages(
county_sf <- tigris::counties(state = loc_census$state,
cb = T,
class = "sf") %>%
dplyr::filter(COUNTYFP == stringr::str_sub(fips, 3, 5))
)
map <- ggplot2::ggplot() +
ggplot2::geom_sf(data = county_sf, color = fill_color)
station_df <- daily_data$station_df %>%
dplyr::tbl_df() %>%
dplyr::filter_(~ !duplicated(id)) %>%
dplyr::arrange_(~ dplyr::desc(latitude))
name_levels <- unique(station_df$name)
station_df <- station_df %>%
dplyr::mutate_(name = ~ factor(name, levels = name_levels))
if (station_label == TRUE) {
map_out <- map +
ggplot2::geom_point(data = station_df,
ggplot2::aes_(~ longitude, ~ latitude,
fill = ~ name),
colour = "black",
size = point_size,
shape = 21) +
ggplot2::ggtitle(title) +
ggplot2::theme_void() +
ggplot2::theme(legend.title = ggplot2::element_blank())
} else {
map_out <- map +
ggplot2::geom_point(data = station_df,
ggplot2::aes_(~ longitude, ~ latitude),
colour = point_color,
size = point_size) +
ggplot2::theme_void() +
ggplot2::ggtitle(title)
}
return(map_out)
}
|
5069214a59329df547b9b98cd19b876371139021
|
55e92b151e9282e8106d168629ba7a2edad67938
|
/run_analysis.R
|
cb0e304aff34ac278fa3b905c937cca66f5acb95
|
[] |
no_license
|
kelleycarr/coursera-get-clean-data
|
fc538cb3f33e1bce5f00833bc89658f7ffe7186f
|
506941fea105cef58b4e517a4ed69f0eb0a79077
|
refs/heads/master
| 2021-01-12T07:07:52.903097
| 2017-03-05T22:59:04
| 2017-03-05T22:59:04
| 76,916,008
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,400
|
r
|
run_analysis.R
|
# This assumes that your working directory is the repo.
# This file needs to be sourced in order for the analysis to run properly.
# source("run_analysis.R")
library(reshape2)
run_analysis <- function(summary = FALSE, long_form = TRUE) {
if (!file.exists("UCI HAR Dataset")) {
unzip_data_files()
}
data_file_path <- file.path(".","UCI HAR Dataset")
## Merge the training and the test sets & label the data
all_data <- merge_sets(data_file_path)
## Extract measurements on the mean and standard deviation for each measurement.
extracted_data <- all_data[ , grepl("subject|y_data|mean|std|Mean", colnames(all_data))]
## Name the activities in the data set
extracted_data <- assign_activity_names(extracted_data, data_file_path)
## Create an independent tidy data set with the average of each variable for each
## activity and each subject.
tidy_data_summary <- aggregate(. ~ subject + activity_label, extracted_data, mean)
if (long_form) {
tidy_data_summary <- melt(tidy_data_summary,
id = c("subject","activity_label"),
measure.vars = setdiff(colnames(tidy_data_summary), c("subject", "activity_label")))
colnames(tidy_data_summary) <- c("subject","activity_label","variable","mean")
}
# Write the tidy data if option is selected
if (summary) {
write.table(tidy_data_summary, file="tidy_data_summary.txt", row.names = FALSE)
}
}
unzip_data_files <- function() {
# Checks for zip file. If it does not exist, downloads zip file. Unzips file.
if (!file.exists("dataset.zip")) {
print("Downloading file")
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",
"dataset.zip")
}
print("Unzipping file")
unzip("dataset.zip")
}
merge_sets <- function(data_file_path) {
features <- read.table(file.path(data_file_path, "features.txt"))
test_data <- merge_separate_sets(data_file_path, "test", features)
train_data <- merge_separate_sets(data_file_path, "train", features)
all_data <- rbind(test_data, train_data)
return(all_data)
}
merge_separate_sets <- function(data_file_path, file_type, features) {
file_path <- file.path(data_file_path, file_type)
file_list <- list.files(file_path, pattern = ".txt", full.names = TRUE)
parse_data <- function(pattern, file_list) {
# Searches the list of files for the filename to be read in.
# TODO: This will break if multiple files are found.
data_file <- file_list[grep(pattern, file_list)]
data_points <- read.table(data_file)
return(data_points)
}
subject_data <- parse_data("subject_", file_list)
x_data <- parse_data("X_", file_list)
y_data <- parse_data("y_", file_list)
return_data <- cbind(subject_data,
y_data,
x_data)
# Set the name of the columns to the "features" list
colnames(return_data) <- c("subject","activity_code",as.character(features[ , 2]))
return(return_data)
}
assign_activity_names <- function(extracted_data, data_file_path) {
activity_labels <- read.table(file.path(data_file_path,"activity_labels.txt"))
colnames(activity_labels) <- c("activity_code","activity_label")
extracted_data <- merge(extracted_data, activity_labels)
extracted_data$activity_code <- NULL
return(extracted_data)
}
|
ffe5d980e360dad0f9cca5f94818f1e53707c40f
|
7c40eecf96cafbed719baa9bbd001c0e9d8e7450
|
/dev/Seurat_annalysis.R
|
a8811d10177b839b56757f78c79711e666c484ac
|
[] |
no_license
|
rseraphin/Deconvolution_Internship
|
a6da5b4d86f664b24c7b8807b418f7739c3df40a
|
08a8c215f1407eba5b642399c12f664ae0067aaa
|
refs/heads/master
| 2022-11-25T15:36:33.413417
| 2020-07-20T19:12:58
| 2020-07-20T19:12:58
| 247,586,002
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,647
|
r
|
Seurat_annalysis.R
|
#### Initialisation ####
if (!require('tidyverse')){
install.packages("tidyverse")
library ("tidyverse")
}
if (!require('Seurat')){
BiocManager::install("multtest")
install.packages('Seurat')
library("Seurat")
install.packages('sctransform')
library("sctransform")
}
if (!require('nnls')){
install.packages('nnls')
library('nnls')
}
setwd("Master_2/")
#load('dataset/.RData')
#### Seurat lungs ####
data_lungs_unNorm <- read.table("dataset/GSE113530_countsFinal.txt", header = TRUE)
seurat_lungs_data <- CreateSeuratObject(counts = data_lungs_unNorm)
seurat_lungs_data <- SCTransform(seurat_lungs_data)
seurat_lungs_data <- RunPCA(seurat_lungs_data)
seurat_lungs_data <- RunUMAP(seurat_lungs_data, dims = 1:30)
seurat_lungs_data <- FindNeighbors(seurat_lungs_data, dims = 1:30)
seurat_lungs_data <- FindClusters(seurat_lungs_data)
DimPlot(seurat_lungs_data, label = T) + NoLegend()
seurat_markers <- FindAllMarkers(seurat_lungs_data, only.pos = T, min.pct = 0.25, logfc.threshold = 0.25)
seurat_final_markers <- seurat_markers %>% group_by(cluster) %>% top_n(n = 20, wt = avg_logFC)
seurat_clust1 <- subset(seurat_final_markers$gene, seurat_final_markers$cluster == 1)
seurat_clust0 <- subset(seurat_final_markers$gene, seurat_final_markers$cluster == 0)
seurat_lungs_Norm_data <- CreateSeuratObject(counts = data_lungs)
seurat_lungs_Norm_data <- SCTransform(seurat_lungs_Norm_data)
seurat_lungs_Norm_data <- RunPCA(seurat_lungs_Norm_data)
seurat_lungs_Norm_data <- RunUMAP(seurat_lungs_Norm_data, dims = 1:30)
seurat_lungs_Norm_data <- FindNeighbors(seurat_lungs_Norm_data, dims = 1:30)
seurat_lungs_Norm_data <- FindClusters(seurat_lungs_Norm_data)
DimPlot(seurat_lungs_Norm_data, label = T) + NoLegend()
seurat_markers_Norm <- FindAllMarkers(seurat_lungs_Norm_data, only.pos = T, min.pct = 0.25, logfc.threshold = 0.25)
seurat_final_markers_Norm <- seurat_markers_Norm %>% group_by(cluster) %>% top_n(n = 20, wt = avg_logFC)
seurat_clust1_Norm <- subset(seurat_final_markers_Norm$gene, seurat_final_markers_Norm$cluster == 1)
seurat_clust0_Norm <- subset(seurat_final_markers_Norm$gene, seurat_final_markers_Norm$cluster == 0)
#### Seurat Sciatic Nerves ####
data_single <- read.table("dataset/GSE144707_countTable_aggrNerveStStD1D5.txt", header = TRUE)
data_single[1:3, 1:3]
rownames(data_single) <- data_single[,1]
data_single <- subset(data_single, select = -1)
siatic <- CreateSeuratObject(counts = data_single)
siatic <- SCTransform(siatic)
siatic <- RunPCA(siatic)
siatic <- RunUMAP(siatic, dims = 1:30)
siatic <- FindNeighbors(siatic, dims = 1:30)
siatic <- FindClusters(siatic)
DimPlot(siatic, label = T) + NoLegend()
siatic_markers <- FindAllMarkers(siatic, only.pos = T, min.pct = 0.25, logfc.threshold = 0.25)
siatic_markers_final <- siatic_markers %>% group_by(cluster) %>% top_n(n = 20, wt = avg_logFC)
write_delim(siatic_markers_final, "seurat_markers_GSE144707", "\t")
#### Seurat broad####
seurat_broad_data <- CreateSeuratObject(counts = data_broad)
seurat_broad_data <- SCTransform(seurat_broad_data)
seurat_broad_data <- RunPCA(seurat_broad_data)
seurat_broad_data <- RunUMAP(seurat_broad_data, dims = 1:30)
seurat_broad_data <- FindNeighbors(seurat_broad_data, dims = 1:30)
seurat_broad_data <- FindClusters(seurat_broad_data)
DimPlot(seurat_broad_data, label = T) + NoLegend()
seurat_markers_broad <- FindAllMarkers(seurat_broad_data, only.pos = T, min.pct = 0.25, logfc.threshold = 0.25)
seurat_final_markers_broad <- seurat_markers_broad %>% group_by(cluster) %>% top_n(n = 50, wt = avg_logFC)
write_delim(seurat_final_markers_broad, "seurat_markers_GSE92332", "\t")
#### comparaison marker ####
seurat_clust0 <- gsub("-","_", seurat_clust0)
seurat_clust0_Norm <- gsub("-","_", seurat_clust0_Norm)
seurat_clust1 <- gsub("-","_", seurat_clust1)
seurat_clust1_Norm <- gsub("-","_", seurat_clust1_Norm)
intersect(seurat_clust0,sc3_clust1)
intersect(seurat_clust0,sc3_clust3)
intersect(seurat_clust0, seurat_clust0_Norm)
intersect(seurat_clust0, seurat_clust1_Norm)
intersect(seurat_clust1,sc3_clust1)
intersect(seurat_clust1,sc3_clust3)
intersect(seurat_clust1, seurat_clust0_Norm)
intersect(seurat_clust1, seurat_clust1_Norm)
intersect(seurat_clust0_Norm, sc3_clust1)
intersect(seurat_clust0_Norm, sc3_clust3)
intersect(seurat_clust1_Norm, sc3_clust1)
intersect(seurat_clust1_Norm, sc3_clust3)
write_delim(as.data.frame(seurat_clust0), 'seurat_clust0_markers_GSE113530', '\t')
write_delim(as.data.frame(seurat_clust1), 'seurat_clust1_markers_GSE113530', '\t')
#### Bulk sciatic ####
data_bulk <- read.table("dataset/GSE144705_processedData_bulkRNAseq_YdensEtAl.txt.gz", header = TRUE)
data_bulk
data_bulk_marker <- subset(data_bulk, data_bulk$Gene %like% siatic_markers$gene)
rownames(data_bulk_marker) <- data_bulk_marker$Gene
data_bulk_marker <- subset(data_bulk_marker, select= -1)
bulk_samples <- data.frame(matrix(nrow = 13 ,ncol =0 ))
rownames(bulk_samples) <- colnames(data_bulk_marker)[2:14]
bulk_samples$condition <- rep(c("SN","ON","SPF"),c(4,4,5))
dds <- DESeqDataSetFromMatrix(countData = data_bulk_marker,
colData = bulk_samples,
design = ~ condition)
dds <- DESeq(dds)
res_SN_ON <- results(dds, contrast = c("condition","SN","ON"))
res_SN_SPF <- results(dds, contrast = c("condition","SN","SPF"))
res_ON_SPF <- results(dds, contrast = c("condition","SPF","ON"))
sum(res$padj < 0.1, na.rm=TRUE)
sum(res_ON_SPF$padj < 0.1, na.rm=TRUE)
sum(res_SN_ON$padj < 0.1, na.rm=TRUE)
sum(res_SN_SPF$padj < 0.1, na.rm=TRUE)
#### Pancreas dataset ####
pancreas_h1 <- read.csv("dataset/GSE84133_Pancreas/GSM2230757_human1_umifm_counts.csv.gz", header = TRUE)
rownames(pancreas_h1) <- pancreas_h1$X
pancreas_h1 <- subset(pancreas_h1, select = -c(1))
pancreas_h1_sup <- subset(pancreas_h1, select = c(1,2))
pancreas_h1 <- subset(pancreas_h1, select = -c(1,2))
pancreas_h1_long <- pancreas_h1
pancreas_h1 <- t(pancreas_h1)
pancreas_h1_seurat <- CreateSeuratObject(counts = pancreas_h1)
pancreas_h1_seurat <- SCTransform(pancreas_h1_seurat)
pancreas_h1_seurat <- RunPCA(pancreas_h1_seurat)
pancreas_h1_seurat <- RunUMAP(pancreas_h1_seurat, dims = 1:30)
pancreas_h1_seurat <- FindNeighbors(pancreas_h1_seurat, dims = 1:30)
#pancreas_h1_seurat <- FindClusters(pancreas_h1_seurat, resolution = 0.6)
pancreas_h1_seurat <- FindClusters(pancreas_h1_seurat)
DimPlot(pancreas_h1_seurat, label = T) + NoLegend()
pancreas_h1_seurat_markers <- FindAllMarkers(pancreas_h1_seurat, only.pos = T, min.pct = 0.25, logfc.threshold = 0.25)
pancreas_h1_seurat_markers_c <- FindAllMarkers(pancreas_h1_seurat, only.pos = T)
pancreas_h1_seurat <- RunTSNE(pancreas_h1_seurat, )
TSNEPlot(pancreas_h1_seurat)
pancreas_h1_seurat <- JackStraw(pancreas_h1_seurat, num.replicate = 100, dims = 20)
pancreas_h1_seurat <- ScoreJackStraw(pancreas_h1_seurat, dims = 1:20)
JackStrawPlot(pancreas_h1_seurat, dims = 1:20)
ElbowPlot(pancreas_h1_seurat,ndims = 20)
meta_data_h1 <- pancreas_h1_seurat@meta.data
treated_h1_data <- as.matrix(GetAssayData(pancreas_h1_seurat,slot='data'))
pancreas_h1_avg <- AverageExpression(pancreas_h1_seurat)
write.table(meta_data_h1, 'dataset/meta_data_h1', quote = FALSE,sep = '\t', col.names = NA)
write.table(as.data.frame(treated_h1_data), 'dataset/treated_h1_data', quote = FALSE, sep = '\t', col.names = NA)
selected_genes_bulk_h1 <- subset(pancreas_bulk, pancreas_bulk$id %in% pancreas_h1_seurat_markers$gene)
selected_genes_sc_h1 <- subset(pancreas_h1_avg$SCT, rownames(pancreas_h1_avg$SCT) %in% selected_genes_bulk_h1$id)
write_tsv(selected_genes_bulk_h1, 'dataset/selected_genes_bulk_h1')
write.table(selected_genes_sc_h1, 'dataset/selected_genes_sc_h1', sep='\t' , quote=FALSE, col.names = NA)
pancreas_h2 <- read.csv("dataset/GSE84133_Pancreas/GSM2230758_human2_umifm_counts.csv.gz", header = TRUE)
rownames(pancreas_h2) <- pancreas_h2$X
pancreas_h2 <- subset(pancreas_h2, select = -c(1))
pancreas_h2_sup <- subset(pancreas_h2, select = c(1,2))
pancreas_h2 <- subset(pancreas_h2, select = -c(1,2))
pancreas_h2_long <- pancreas_h2
pancreas_h2 <- t(pancreas_h2)
pancreas_h2_seurat <- CreateSeuratObject(counts = pancreas_h2)
pancreas_h2_seurat <- SCTransform(pancreas_h2_seurat)
pancreas_h2_seurat <- RunPCA(pancreas_h2_seurat)
pancreas_h2_seurat <- RunUMAP(pancreas_h2_seurat, dims = 1:30)
pancreas_h2_seurat <- FindNeighbors(pancreas_h2_seurat, dims = 1:30)
pancreas_h2_seurat <- FindClusters(pancreas_h2_seurat)
DimPlot(pancreas_h2_seurat, label = T) + NoLegend()
pancreas_h2_seurat_markers <- FindAllMarkers(pancreas_h2_seurat, only.pos = T, min.pct = 0.25, logfc.threshold = 0.25)
meta_data_h2 <- pancreas_h2_seurat@meta.data
treated_h2_data <- as.matrix(GetAssayData(pancreas_h2_seurat,slot='data'))
pancreas_h2_avg <- AverageExpression(pancreas_h2_seurat)
write.table(meta_data_h2, 'dataset/meta_data_h2', quote = FALSE,sep = '\t', col.names = NA)
write.table(as.data.frame(treated_h2_data), 'dataset/treated_h2_data', quote = FALSE, sep = '\t', col.names = NA)
selected_genes_bulk_h2 <- subset(pancreas_bulk, pancreas_bulk$id %in% pancreas_h2_seurat_markers$gene)
selected_genes_sc_h2 <- subset(pancreas_h2_avg$SCT, rownames(pancreas_h2_avg$SCT) %in% selected_genes_bulk_h2$id)
write_tsv(selected_genes_bulk_h2, 'dataset/selected_genes_bulk_h2')
write.table(selected_genes_sc_h2, 'dataset/selected_genes_sc_h2', sep='\t' , quote=FALSE, col.names = NA)
pancreas_h3 <- read.csv("dataset/GSE84133_Pancreas/GSM2230759_human3_umifm_counts.csv.gz", header = TRUE)
rownames(pancreas_h3) <- pancreas_h3$X
pancreas_h3 <- subset(pancreas_h3, select = -c(1))
pancreas_h3_sup <- subset(pancreas_h3, select = c(1,2))
pancreas_h3 <- subset(pancreas_h3, select = -c(1,2))
pancreas_h3_long <- pancreas_h3
pancreas_h3 <- t(pancreas_h3)
pancreas_h3_seurat <- CreateSeuratObject(counts = pancreas_h3)
pancreas_h3_seurat <- SCTransform(pancreas_h3_seurat)
pancreas_h3_seurat <- RunPCA(pancreas_h3_seurat)
pancreas_h3_seurat <- RunUMAP(pancreas_h3_seurat, dims = 1:30)
pancreas_h3_seurat <- FindNeighbors(pancreas_h3_seurat, dims = 1:30)
pancreas_h3_seurat <- FindClusters(pancreas_h3_seurat)
DimPlot(pancreas_h3_seurat, label = T) + NoLegend()
pancreas_h3_seurat_markers <- FindAllMarkers(pancreas_h3_seurat, only.pos = T, min.pct = 0.25, logfc.threshold = 0.25)
pancreas_h4 <- read.csv("dataset/GSE84133_Pancreas/GSM2230760_human4_umifm_counts.csv.gz", header = TRUE)
rownames(pancreas_h4) <- pancreas_h4$X
pancreas_h4 <- subset(pancreas_h4, select = -c(1))
pancreas_h4_sup <- subset(pancreas_h4, select = c(1,2))
pancreas_h4 <- subset(pancreas_h4, select = -c(1,2))
pancreas_h4_long <- pancreas_h4
pancreas_h4 <- t(pancreas_h4)
pancreas_h4_seurat <- CreateSeuratObject(counts = pancreas_h4)
pancreas_h4_seurat <- SCTransform(pancreas_h4_seurat)
pancreas_h4_seurat <- RunPCA(pancreas_h4_seurat)
pancreas_h4_seurat <- RunUMAP(pancreas_h4_seurat, dims = 1:30)
pancreas_h4_seurat <- FindNeighbors(pancreas_h4_seurat, dims = 1:30)
pancreas_h4_seurat <- FindClusters(pancreas_h4_seurat)
DimPlot(pancreas_h4_seurat, label = T) + NoLegend()
pancreas_h4_seurat_markers <- FindAllMarkers(pancreas_h4_seurat, only.pos = T, min.pct = 0.25, logfc.threshold = 0.25)
pancreas_all <-rbind(pancreas_h1_long,pancreas_h2_long,pancreas_h3_long,pancreas_h4_long)
pancreas_all <- t(pancreas_all)
pancreas_all_seurat <- CreateSeuratObject(counts = pancreas_all)
pancreas_all_seurat <- SCTransform(pancreas_all_seurat)
pancreas_all_seurat <- RunPCA(pancreas_all_seurat)
pancreas_all_seurat <- RunTSNE(pancreas_all_seurat)
TSNEPlot(pancreas_all_seurat)
pancreas_all_seurat <- RunUMAP(pancreas_all_seurat, dims = 1:30)
pancreas_all_seurat <- FindNeighbors(pancreas_all_seurat, dims = 1:30)
pancreas_all_seurat <- FindClusters(pancreas_all_seurat, resolution = 1.5)
DimPlot(pancreas_all_seurat, label = T) + NoLegend()
pancreas_all_seurat_markers <- FindAllMarkers(pancreas_all_seurat, only.pos = T, min.pct = 0.25, logfc.threshold = 0.25)
DoHeatmap(pancreas_all_seurat, features = known_marker) + NoLegend()
# Tested resolution 0.5 ~ 1.3
# Testing merging clusters
pancreas_all_seurat <- BuildClusterTree(pancreas_all_seurat, do.reorder = T, reorder.numeric = T)
node.scores <- AssessNodes(pancreas_all_seurat)
node.scores[order(node.scores$oobe,decreasing = T),] -> node.scores
# Components analysis
pancreas_all_seurat <- JackStraw(pancreas_all_seurat, num.replicate = 100, dims = 30)
pancreas_all_seurat <- ScoreJackStraw(pancreas_all_seurat, dims = 1:30)
JackStrawPlot(pancreas_all_seurat, dims = 10:30)
ElbowPlot(pancreas_all_seurat,ndims = 30)
# Markers comparison and visualization
length(intersect(pancreas_h2_seurat_markers$gene, pancreas_h1_seurat_markers$gene))
length(pancreas_h1_seurat_markers$gene)
length(pancreas_h2_seurat_markers$gene)
known_marker <- c("GCG",'INS','PPY','SST','GHRL','PRSS1',"CPA1",'KRT19','SPARC','VWF','RGS5','PDGFRA','SOX10','SDS','TPSAB1','TRAC')
cell_types <- c('Alpha', 'Beta', 'Gamma', 'Delta', 'Epsilon', 'Acinar', 'Acinar', 'Ductal', '', 'Endothelial', 'quiescent stellate', 'activate stellate', 'schwann', 'Macrophage', 'Mast', 'Cytotoxyc T')
topn_all <- pancreas_all_seurat_markers %>% group_by(cluster) %>% top_n(n=20, wt = avg_logFC)
DoHeatmap(pancreas_all_seurat, features = known_marker) + NoLegend()
known_marker %in% pancreas_all_seurat_markers$gene
topn_all$gene %in% pancreas_all_seurat
subset(pancreas_h1_seurat_markers, pancreas_h1_seurat_markers$gene %in% known_marker)
subset(pancreas_all_seurat_markers, pancreas_all_seurat_markers$gene %in% known_marker)
topn_h1 <- pancreas_h1_seurat_markers %>% group_by(cluster) %>% top_n(n=5, wt = avg_logFC)
top_1_h2 <- pancreas_h2_seurat_markers %>% group_by(cluster) %>% top_n(n=1, wt = avg_logFC)
top_5_h2 <- pancreas_h2_seurat_markers %>% group_by(cluster) %>% top_n(n=5, wt = avg_logFC)
top_10_h2 <- pancreas_h2_seurat_markers %>% group_by(cluster) %>% top_n(n=10, wt = avg_logFC)
top_20_h2 <- pancreas_h2_seurat_markers %>% group_by(cluster) %>% top_n(n=20, wt = avg_logFC)
top_50_h2 <- pancreas_h2_seurat_markers %>% group_by(cluster) %>% top_n(n=50, wt = avg_logFC)
DefaultAssay(object = pancreas_h1_seurat) <- "SCT"
t1 <- DoHeatmap(pancreas_h1_seurat, features = known_marker) + NoLegend()
t2 <- DoHeatmap(pancreas_h2_seurat, features = known_marker) + NoLegend()
t3 <- DoHeatmap(pancreas_h3_seurat, features = known_marker) + NoLegend()
t4 <- DoHeatmap(pancreas_h4_seurat, features = known_marker) + NoLegend()
par(mfrow=c(2,2))
plot(t2)
DoHeatmap(pancreas_all_seurat, features = topn_all) + NoLegend()
top1_h2_gene_bulk <- subset(pancreas_bulk, pancreas_bulk$id %in% top_1_h2$gene)
top5_h2_gene_bulk <- subset(pancreas_bulk, pancreas_bulk$id %in% top_5_h2$gene)
top10_h2_gene_bulk <- subset(pancreas_bulk, pancreas_bulk$id %in% top_10_h2$gene)
top20_h2_gene_bulk <- subset(pancreas_bulk, pancreas_bulk$id %in% top_20_h2$gene)
top50_h2_gene_bulk <- subset(pancreas_bulk, pancreas_bulk$id %in% top_50_h2$gene)
write_tsv(top1_h2_gene_bulk, "dataset/top1_h2_genes_bulk.tsv")
write_tsv(top5_h2_gene_bulk, "dataset/top5_h2_genes_bulk.tsv")
write_tsv(top10_h2_gene_bulk, "dataset/top10_h2_genes_bulk.tsv")
write_tsv(top20_h2_gene_bulk, "dataset/top20_h2_genes_bulk.tsv")
write_tsv(top50_h2_gene_bulk, "dataset/top50_h2_genes_bulk.tsv")
top1_genes_h2 <- subset(pancreas_h2_avg$SCT, rownames(pancreas_h2_avg$SCT) %in% top1_h2_gene_bulk$id)
top5_genes_h2 <- subset(pancreas_h2_avg$SCT, rownames(pancreas_h2_avg$SCT) %in% top5_h2_gene_bulk$id)
top10_genes_h2 <- subset(pancreas_h2_avg$SCT, rownames(pancreas_h2_avg$SCT) %in% top10_h2_gene_bulk$id)
top20_genes_h2 <- subset(pancreas_h2_avg$SCT, rownames(pancreas_h2_avg$SCT) %in% top20_h2_gene_bulk$id)
top50_genes_h2 <- subset(pancreas_h2_avg$SCT, rownames(pancreas_h2_avg$SCT) %in% top50_h2_gene_bulk$id)
write_tsv(top1_genes_h2, "dataset/top1_genes_h2.tsv")
write_tsv(top10_genes_h2, "dataset/top10_genes_h2.tsv")
write_tsv(top5_genes_h2, "dataset/top5_genes_h2.tsv")
write_tsv(top20_genes_h2, "dataset/top20_genes_h2.tsv")
write_tsv(top50_genes_h2, "dataset/top50_genes_h2.tsv")
pancreas_avg <- AverageExpression(pancreas_all_seurat)
#selected_genes_sc <- subset(pancreas_avg$SCT, rownames(pancreas_avg$SCT) %in% pancreas_all_seurat_markers$gene)
GetAssayData(object = pancreas_all_seurat, slot = 'data')[1:3,1:3]
GetAssayData(object = pancreas_h1_seurat, slot = 'RNA')
#### Bulk Pancreas GSE50244 ####
pancreas_bulk <- read.table('dataset/GSE50244_Genes_counts_TMM_NormLength_atLeastMAF5_expressed.txt.gz', header = T)
selected_genes_bulk <- subset(pancreas_bulk, pancreas_bulk$id %in% pancreas_all_seurat_markers$gene)
selected_genes_sc <- subset(pancreas_avg$SCT, rownames(pancreas_avg$SCT) %in% selected_genes_bulk$id)
top20_gene_bulk <- subset(pancreas_bulk, pancreas_bulk$id %in% topn_all$gene)
top20_genes_sc <- subset(pancreas_avg$SCT, rownames(pancreas_avg$SCT) %in% top20_gene_bulk$id)
write_tsv(selected_genes_bulk,'Selected_gene_bulk.tsv')
write_tsv(selected_genes_sc,'Selected_gene_sc.tsv')
write_tsv(top20_gene_bulk, "Top20_marker_bulk.tsv")
write_tsv(top20_genes_sc, "Top20_marker_sc.tsv")
nnls(as.matrix(selected_genes_sc), as.vector(selected_genes_bulk[,4]))
nnls(as.matrix(top20_genes_sc), as.vector(top20_gene_bulk[,4]))
dim(selected_genes_sc)
dim(selected_genes_bulk)
head(selected_genes_bulk)
head(selected_genes_sc)
sum_to_one <- c(rep(1,19))
test <- rbind(selected_genes_sc,sum_to_one)
test[2863:2865,]
sum_to_one <- c(as.factor('constraint'),rep(1,90))
test_bulk <- rbind(selected_genes_bulk,sum_to_one)
test_bulk[2863:2865,]
nnls(as.matrix(test), as.vector(test_bulk[,4]))
|
4cbd38a1aaeed53da190df501dc4b8bcca1a283a
|
6f44fa8b69db185adad4fdd568141ab31aa9193f
|
/R/biodiversity.R
|
cc66bfc081f9a7168d9f98485c0a73aca0157b92
|
[] |
no_license
|
cran/semtree
|
c3df84fa4f712c299f18c68b4cd0f25731edd67f
|
b542f84cac056c4fa0c04f6c8475d6154edeac8b
|
refs/heads/master
| 2022-06-03T00:39:18.868941
| 2022-05-13T19:20:02
| 2022-05-13T19:20:02
| 76,660,595
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 818
|
r
|
biodiversity.R
|
#' Quantify bio diversity of a SEM Forest
#'
#' A function to calculate biodiversity of a \code{\link{semforest}} object.
#'
#'
#' @param x A \code{\link{semforest}} object
#' @param aggregate.fun Takes a function to apply to the vector of pairwise
#' diversities. By default, this is the median.
#' @author Andreas M. Brandmaier
#' @keywords biodiversity semforest
#' @export
biodiversity <- function(x, aggregate.fun=median)
{
if (!(
is(x,"semforest") |
is(x,"diversityMatrix")
))
{
stop("Error! x must be a SEM forest or a diversity matrix!")
}
if (is(x,"semforest")) {
message("Computing diversity matrix.")
D <- diversityMatrix(x)
} else {
D <- x
}
values <- D[lower.tri(D)]
return(aggregate.fun(values))
}
|
d1ed4285b0392d87298ccf52b028b1ee03b0af88
|
c294c7c8acc3fdc86b726fb2b4d94f073ad153ef
|
/log/aug-8.r
|
378b17bbf78514fca800b84c55f2c2cb843db888
|
[] |
no_license
|
alexjgriffith/r-workspace
|
347a1d764cce86de86f01a0d41a4942998d14a3a
|
229ab551ffeed1b5c60b51123e5d065cde065a7b
|
refs/heads/master
| 2020-04-06T07:09:58.395966
| 2016-09-10T07:25:31
| 2016-09-10T07:25:31
| 52,470,450
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,382
|
r
|
aug-8.r
|
library(CCCA)
library(Biostrings)
source("~/r-workspace/project.r")
source("~/r-workspace/project-variables.r")
source("~/r-workspace/ccca.r")
source("http://bioconductor.org/biocLite.R")
biocLite(org.Hs.eg.db)
source("http://bioconductor.org/biocLite.R")
biocLite("GO")
library(org.Hs.eg.db)
x <- org.Hs.egCHRLOC
# Get the entrez gene identifiers that are mapped to chromosome locations
mapped_genes <- mappedkeys(x)
biocLite("TxDb.Hsapiens.UCSC.hg19.knownGene")
require(TxDb.Hsapiens.UCSC.hg19.knownGene)
exons.db <- exonsBy(TxDb.Hsapiens.UCSC.hg19.knownGene, by='gene')
egs <- unlist( mget(symbols[ symbols %in% keys(org.Hs.egSYMBOL2EG) ],org.Hs.egSYMBOL2EG) )
str(exons.db)
exons <- exonsBy(TxDb.Hsapiens.UCSC.hg19.knownGene, by='gene')
t.db<-transcriptsBy(TxDb.Hsapiens.UCSC.hg19.knownGene,"gene")
txdb<-TxDb.Hsapiens.UCSC.hg19.knownGene
cds_by_tx1 <- cdsBy(txdb, "tx", use.names=TRUE)
select(org.Hs.eg.db,exons.db@partitioning@NAMES, "SYMBOL","ENTREZID")
# ensure all genes have appropriate strand values
genGeneTSS<-function(exons=NULL){
if(!(require(org.Hs.eg.db) & require(TxDb.Hsapiens.UCSC.hg19.knownGene)))
NULL
if(is.null(exons))
exons<-exonsBy(TxDb.Hsapiens.UCSC.hg19.knownGene, by='gene')
t<-c(0,exons@partitioning@end)
regs<-cbind(t[1:(length(t)-1)]+1,t[2:length(t)])
df<-data.frame(chr=as.character(exons@unlistData@seqnames[regs[,1]]),
tss=0,
strands=c("+","-")[as.numeric(exons@unlistData@strand[regs[,1]])],
id=exons@partitioning@NAMES,
name=select(org.Hs.eg.db,exons@partitioning@NAMES, "SYMBOL","ENTREZID")$SYMBOL)
hold<-rep(0,dim(regs)[[1]])
hold[df$strands=="-"]<-regs[df$strands=="-",2]
df$tss[df$strands=="-"]=exons@unlistData@ranges@start[hold[df$strands=="-"]]+exons@unlistData@ranges@width[hold[df$strands=="-"]]
hold[df$strands=="+"]<-regs[df$strands=="+",1]
df$tss[df$strands=="+"]=exons@unlistData@ranges@start[hold[df$strands=="+"]]
df[!is.na(as.character(df$name)),]
}
t<-genGeneTSS(exons)
regions<-genomicRegions(t$chr,t$tss,t$strand,1000,5000,1000000)
a<-lapply(contexts,function(x) greatGeneAssoc(env$over[env$reg[,x],c(1,2,3)],regions,t))
names(a)<-contexts
b<-sort(unique(unlist(lapply(a,function(x) x[,"name"]))))
env<-getPRC20(2)
setdiff(b,a[[1]]$name)
om<-matrix(FALSE,length(b),length(a))
colnames(om)<-contexts
rownames(om)<-b
for(i in contexts){
om[as.character(a[[i]]$name),i]=TRUE
}
regs[,as.numeric(exons.db@unlistData@strand[regs[,1]])]
## speed up genomicRegions function
swapif<-function(x){
if((x[1]<=x[2]))
x
else
cbind(x[2],x[1])
}
chr<-t$chr
tss<-t$tss
strand<-t$strand
levels(strand)<-c(-1,1)
strand<-as.numeric(strand)
proxUp<-1000
proxDown<-5000
distal<-1000000
basalDomains<-t(apply(
cbind(tss-strand*proxUp,tss+strand*proxDown),1,swapif))
bound<-na.omit(do.call(rbind,lapply(levels(chr),function(lev){
y<-basalDomains[chr==lev,]
if(sum(chr==lev)==1){
print(y)
return (cbind(y[1],y[2],max(0,y[1]-distal),(y[2]+distal)))
}
else if(sum(chr==lev)<1){
return (cbind(NA,NA,NA,NA))
}
else {
y<-y[order(y[,1]),]
len<-dim(y)[1]
lower<-c(0,y[1:(len-1),2])
upper<-c(y[2:len,1],y[len,1]+distal)
extBD<-cbind(y,lower,upper,y[,1]-distal,y[,2]+distal)
lbl<-rowSums(cbind(extBD[,1]>extBD[,3],
extBD[,1] > extBD[,3]& extBD[,3] < extBD[,5],
extBD[,1] > extBD[,3]& extBD[,3] < extBD[,5]&extBD[,5]<0))
lb<-rep(0,len)
lb[lbl==0]=extBD[lbl==0,1]
lb[lbl==1]=extBD[lbl==1,3]
lb[lbl==2]=extBD[lbl==2,5]
lb[lbl==3]=0
ubl<-rowSums(cbind(extBD[,2]<extBD[,4],
extBD[,2]<extBD[,4]&extBD[,4]>extBD[,6]))
ub<-rep(0,len)
ub[ubl==0]=extBD[ubl==0,2]
ub[ubl==1]=extBD[ubl==1,4]
ub[ubl==2]=extBD[ubl==2,6]
return(cbind(y[,1],y[,2],lb,ub))
##return(cbind(NA,NA))
}
})))
bc<-na.omit(do.call(c,sapply(levels(chr),function(lev){
if(sum(chr==lev)<1)
NA
else
as.character(chr[chr==lev])
})))
data.frame(bc,bound)
## if true current minimum
## if false continue
## if true minimum = lower
## if false continue
## if true then 0
## if false then distal
genomeDomains<-do.call(
rbind,
lapply(seq(n),extendsMax,chrom,basalDomains,tss,distal))
return(genomeDomains)
exons.db@unlistData@strand
exons.db@unlistData@ranges[exons.db@partitioning[[1]]]
length(exons.db@unlistData@strand@values)*2
str(exons.db@unlistData@ranges)
t<-c(0,exons.db@partitioning@end)
(regs[,2]-regs[,1])[1:10]
exons.db@unlistData@strand@lengths[1:10]
inverse.rle(exons.db@unlistData@strand[regs[1:10,1]])
sapply(seq(10),function(i) exons.db[[i]]@strand@values)
x<-Rle(factor(rep(c(1,2,1,1,3,4,5),each=2),levels=0:5))
inverse.factor.rle<-function (x)
{
if (is.null(le <- x$lengths) || is.null(v <- x$values) ||
length(le) != length(v))
stop("invalid 'rle' structure")
rep.int(v, le)
}
as.numeric(exons.db@unlistData@strand@values[1:10])
exons.db@unlistData@strand@values[as.numeric(exons.db@unlistData@strand)]
sapply(seq(10),function(i) as.numeric(exons.db@strand@values))
|
e5084827046309514996077bcf4fca865cce9492
|
5f935eb4f7bb4de4cfecb689e2691fb1cbbf9602
|
/Main text data and analysis/04_Demographic_stochasticity_data/plotting_demographic_stochasticity_Vs_no_stochasticity.R
|
b68ace5d57d171d16f50c1195e351536c848227f
|
[] |
no_license
|
GauravKBaruah/ECO-EVO-EWS-DATA
|
be7f1d4b45121d3a06d5182c0d558c293f3c498c
|
1792eeaaecdd12798bb35a9486913e38de4ee76b
|
refs/heads/master
| 2020-05-25T14:41:42.716388
| 2019-06-22T09:31:17
| 2019-06-22T09:31:17
| 187,850,260
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,245
|
r
|
plotting_demographic_stochasticity_Vs_no_stochasticity.R
|
rm(list=ls())
load("~/Dropbox/Zurich PhD Research/2_Chapter_2/J Animal Ecology codes/reproductive_rate.RData")
load("~/Dropbox/Zurich PhD Research/2_Chapter_2/J Animal Ecology codes/genvar.RData")
load("~/Dropbox/Zurich PhD Research/2_Chapter_2/J Animal Ecology codes/plasticity.RData")
load("Demographic_stochasticity_plasticity.RData")
load("Demographic_stochasticity_genetic_variation.RData")
load("Demographic_stochasticity_R0.RData")
best_color_paletter<- c(wes_palettes$Darjeeling1, wes_palettes$Rushmore1)
#R0 combined data
Net.repro.dat<-data.frame(rbind(data.R, data.R.demog), Demographic_stochasticity =factor(c(rep("No", each=3000), rep("Yes",each=3000)) ))
#plasticity combined data
Net.p.dat<-data.frame(rbind(data.p, data.p.plasticity_demog), Demographic_stochasticity =factor(c(rep("No", each=3600), rep("Yes",each=3600)) ))
#genvariation combined data
Net.g.dat<-data.frame(rbind(data.g, data.g.demog), Demographic_stochasticity =factor(c(rep("No", each=3600), rep("Yes",each=3600)) ))
# plotting R0
R01<-ggplot(data= Net.repro.dat, aes(y = Kendall.tau,x = R0, color= Demographic_stochasticity )) +geom_boxplot(alpha=0)+
geom_point(pch = 21,alpha=0.2, position = position_jitterdodge())+ylim(c(-1,1))+
ylab("Kendall's Tau")+
theme_classic()+theme(plot.title = element_text(size = 10, face = "bold"),text = element_text(size = 10 ),
axis.title = element_text(face="bold"),legend.position = "right") +
scale_color_manual(values=best_color_paletter )+ labs(color="Demographic stochasticity?")+
facet_wrap(.~variation)+xlab("Net reproductive rate")+ggtitle("EWS metrics")
#pdf(file = "constant_timeseries_vs_variable_timeseris__R0.pdf", width = 8, height = 8)
R01
#dev.off()
## plotting plasticity
pl1<-ggplot(data= Net.p.dat, aes(y = Kendall.tau,x = Plasticity, color= Demographic_stochasticity )) +geom_boxplot(alpha=0)+
geom_point(pch = 21,alpha=0.2, position = position_jitterdodge())+ylim(c(-1,1))+
ylab("Kendall's Tau")+
theme_classic()+theme(plot.title = element_text(size = 10, face = "bold"),text = element_text(size = 10 ),
axis.title = element_text(face="bold"),legend.position = "right") +
scale_color_manual(values=best_color_paletter )+ labs(color="Demographic stochasticity?")+
facet_wrap(.~variation)+xlab("Plasticity strength")+ggtitle("EWS metrics")
#pdf(file = "constant_timeseries_vs_variable_timeseris_plasticity.pdf", width = 8, height = 8)
pl1
#dev.off()
## plotting genetic variation
gg1<-ggplot(data= Net.g.dat, aes(y = Kendall.tau,x = Genetic_variation, color= Demographic_stochasticity )) +geom_boxplot(alpha=0)+
geom_point(pch = 21,alpha=0.2, position = position_jitterdodge())+ylim(c(-1,1))+
ylab("Kendall's Tau")+
theme_classic()+theme(plot.title = element_text(size = 10, face = "bold"),text = element_text(size = 10 ),
axis.title = element_text(face="bold"),legend.position = "right") +
scale_color_manual(values=best_color_paletter )+ labs(color="Demographic stochasticity?")+
facet_wrap(.~variation)+xlab("Genetic variation")+ggtitle("EWS metrics")
#pdf(file = "constant_timeseries_vs_variable_timeseris_genetic_variation.pdf", width = 8, height = 8)
gg1
#dev.off()
|
a72f6a200e0700f3d148c6ca710e24d1483a8d7e
|
1f74a31dce7c679d3ef4507335e2f6e763987ff1
|
/testmore/NSAS/script.R
|
ff19869ed69a0644158268d6480c7f693a46baa6
|
[] |
no_license
|
fishfollower/SAM
|
5b684c0a54d6e69f05300ebb7629829b2a003692
|
a1f1c5b17505a7a73da28736f0077805a7606b30
|
refs/heads/master
| 2023-07-22T00:50:48.411745
| 2023-04-21T10:25:20
| 2023-04-21T10:25:20
| 67,597,583
| 55
| 35
| null | 2023-02-22T08:42:23
| 2016-09-07T10:39:25
|
R
|
UTF-8
|
R
| false
| false
| 4,381
|
r
|
script.R
|
# to install the package from the multi branch
#devtools::install_github("fishfollower/SAM/stockassessment", ref="multi")
library(stockassessment)
cn<-read.ices("old/CANUM.txt")
cw<-read.ices("old/WECA.txt")
dw<-cw
lf<-cn; lf[]<-1
lw<-cw
mo<-read.ices("old/MATPROP.txt")
nm<-read.ices("old/NATMOR.txt")
pf<-read.ices("old/FPROP.txt")
pm<-read.ices("old/MPROP.txt")
sw<-read.ices("old/WEST.txt")
surveys<-read.ices("old/FLEET.txt")
#- Original split data
cnA<-read.ices("split/cn_A.dat");
cnB<-read.ices("split/cn_B.dat");
cnC<-read.ices("split/cn_C.dat");
cnD<-read.ices("split/cn_D.dat");
cwA<-read.ices("split/cw_A.dat");
cwB<-read.ices("split/cw_B.dat");
cwC<-read.ices("split/cw_C.dat");
cwD<-read.ices("split/cw_D.dat");
#- Set plusgroup to 8+ (data in 9+)
cwA[,as.character(8)] <- rowSums(cnA[,as.character(8:9)] * cwA[,as.character(8:9)])/ rowSums(cnA[,as.character(8:9)])
cwB[,as.character(8)] <- rowSums(cnB[,as.character(8:9)] * cwB[,as.character(8:9)])/ rowSums(cnB[,as.character(8:9)])
cwC[,as.character(8)] <- rowSums(cnC[,as.character(8:9)] * cwC[,as.character(8:9)])/ rowSums(cnC[,as.character(8:9)])
cwD[,as.character(8)] <- rowSums(cnD[,as.character(8:9)] * cwD[,as.character(8:9)])/ rowSums(cnD[,as.character(8:9)])
cnA[,as.character(8)] <- rowSums(cnA[,as.character(8:9)]);
cnB[,as.character(8)] <- rowSums(cnB[,as.character(8:9)]);
cnC[,as.character(8)] <- rowSums(cnC[,as.character(8:9)]);
cnD[,as.character(8)] <- rowSums(cnD[,as.character(8:9)]);
cnA <- cnA[,-ncol(cnA)]
cnB <- cnB[,-ncol(cnB)]
cnC <- cnC[,-ncol(cnC)]
cnD <- cnD[,-ncol(cnD)]
cwA <- cwA[,-ncol(cwA)]
cwB <- cwB[,-ncol(cwB)]
cwC <- cwC[,-ncol(cwC)]
cwD <- cwD[,-ncol(cwD)]
#- Fake split
cnAFS <- rbind(colMeans(cnA / (cnA + cnB + cnC + cnD)) * cn[1:50,],cnA)
cnBFS <- rbind(colMeans(cnB / (cnA + cnB + cnC + cnD)) * cn[1:50,],cnB)
cnCFS <- rbind(colMeans(cnC / (cnA + cnB + cnC + cnD)) * cn[1:50,],cnC)
cnDFS <- rbind(colMeans(cnD / (cnA + cnB + cnC + cnD)) * cn[1:50,],cnD)
cnAFS[cnAFS==0]<-1
cnBFS[cnBFS==0]<-1
cnCFS[cnCFS==0]<-1
cnDFS[cnDFS==0]<-1
idx<-!rownames(cn)%in%rownames(cnA)
#- get catch weight by fleet full length
cwAF <- cwBF <- cwCF <- cwDF <- cw
cwAF[-which(idx),] <- cwA; cwAF[is.nan(cwAF)] <- 0
cwBF[-which(idx),] <- cwB; cwBF[is.nan(cwBF)] <- 0
cwCF[-which(idx),] <- cwC; cwCF[is.nan(cwCF)] <- 0
cwDF[-which(idx),] <- cwD; cwDF[is.nan(cwDF)] <- 0
#sumACDF<-(cnA+cnC+cnD+cnF)[idx,]
sumABCD<-cn[idx,]
attr(sumABCD, "sumof")<-c(1,2,3,4)
# temp fix to convergence problems
#cnA[cnA==0]<-1
#cnB[cnB==0]<-1
#cnC[cnC==0]<-1
#cnD[cnD==0]<-1
#sumABCD[sumABCD==0] <- 1
#cwAF[which(cwAF==0)] <- 0.01
#cwBF[which(cwBF==0)] <- 0.01
#cwCF[which(cwCF==0)] <- 0.01
#cwDF[which(cwDF==0)] <- 0.01
surveys[[1]] <- surveys[[1]][,-ncol(surveys[[1]])]; attr(surveys[[1]],"time") <- c(0.54,0.56)
surveys[[2]] <- surveys[[2]][-nrow(surveys[[2]]),1:2]; attr(surveys[[2]],"time") <- c(0.08,0.17)
surveys[[3]] <- matrix(surveys[[3]][-nrow(surveys[[3]]),],ncol=1,dimnames=list(1992:2016,0)); attr(surveys[[3]],"time") <- c(0.08,0.17)
iSY <- 1
cutsum<-sumABCD[iSY:50,]
attr(cutsum, "sumof")<-c(1,2,3,4)
dat<-setup.sam.data(surveys=surveys[1:3],
#residual.fleets=list(cn), # Notice list
#residual.fleets=list(cnAFS, cnBFS, cnCFS, cnDFS), # Notice list
residual.fleets=list(cnA, cnB, cnC, cnD), # Notice list
sum.residual.fleets=cutsum,
prop.mature=mo[iSY:70,],
stock.mean.weight=sw[iSY:70,],
catch.mean.weight=cw[iSY:70,],#list(cwAF,cwBF,cwCF,cwDF),
dis.mean.weight=dw[iSY:70,],
land.mean.weight=lw[iSY:70,],#list(cwAF,cwBF,cwCF,cwDF),
prop.f=pf[iSY:70,],
prop.m=pm[iSY:70,],
natural.mortality=nm[iSY:70,],
land.frac=lf[iSY:70,])
#conf<-defcon(dat)
#conf$fbarRange <- c(2,6)
#conf$corFlag<-c(0,0,0,0)
#saveConf(conf, "model.cfg")
conf<-loadConf(dat,"model.cfg")
par<-defpar(dat,conf)
par$logFpar[]<-0
fit<-sam.fit(dat,conf,par)
#save(fit,file="./truesplit.RData")
#save(fit,file="./nosplit.RData")
#save(fit,file="./fakesplit.RData")
#catchplot(fit)
#ssbplot(fit)
#fbarplot(fit)
#cat(fit$opt$objective,"\n\n", file="res.out")
#cat(capture.output(prmatrix(t(fit$pl$logF))), sep="\n", file="res.out", append=TRUE)
|
51fde342c71c95dfeed341d1b72f89664fb2ebff
|
a469d0e44d96c93e7ef6d472503061ba853e737c
|
/PutahData copy/Pielous.R
|
02979b754ee0e0edac20185d96f9b5c9843b889d
|
[] |
no_license
|
emjacinto/PutahCreekEJ
|
59e19820e2c2468d6f612b56432bb4745cd95e72
|
de5fc16ef1637c8cb736598f92a5359cedc4fb79
|
refs/heads/main
| 2023-07-16T07:11:45.485756
| 2021-08-29T21:04:45
| 2021-08-29T21:04:45
| 400,645,240
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,331
|
r
|
Pielous.R
|
#Pielou's Index
setwd("~/Desktop/R_DAVIS_2020/r-davis-in-class-project-emjacinto")
library(tidyverse)
library(vegan)
library(data.table)
library(ggpubr)
#cite diversity-vegan and vegan in R
citation("vegan")
Shannon <- read.csv("PutahData/ShannonPutah.csv")
#isolate date, remove site and year columns
Pielou1993 <- Shannon[Shannon$YEAR == '1993',]
Pielou1993 <- Pielou1993[-c(1:2)]
H93 <- diversity(Pielou1993) #Pielou's Index
J93 <- H93/log(specnumber(Pielou1993))
print(J93)
Pielou1994 <- Shannon[Shannon$YEAR == '1994',]
Pielou1994 <- Pielou1994[-c(1:2)]
H94 <- diversity(Pielou1994) #Pielou's Index
J94 <- H94/log(specnumber(Pielou1994))
print(J94)
Pielou1995 <- Shannon[Shannon$YEAR == '1995',]
Pielou1995 <- Pielou1995[-c(1:2)]
H95 <- diversity(Pielou1995) #Pielou's Index
J95 <- H95/log(specnumber(Pielou1995))
print(J95)
Pielou1996 <- Shannon[Shannon$YEAR == '1996',]
Pielou1996 <- Pielou1996[-c(1:2)]
H96 <- diversity(Pielou1996) #Pielou's Index
J96 <- H96/log(specnumber(Pielou1996))
print(J96)
Pielou1997 <- Shannon[Shannon$YEAR == '1997',]
Pielou1997 <- Pielou1997[-c(1:2)]
H97 <- diversity(Pielou1997) #Pielou's Index
J97 <- H97/log(specnumber(Pielou1997))
print(J97)
Pielou1998 <- Shannon[Shannon$YEAR == '1998',]
Pielou1998 <- Pielou1998[-c(1:2)]
H98 <- diversity(Pielou1998) #Pielou's Index
J98 <- H98/log(specnumber(Pielou1998))
print(J98)
Pielou1999 <- Shannon[Shannon$YEAR == '1999',]
Pielou1999 <- Pielou1999[-c(1:2)]
H99 <- diversity(Pielou1999) #Pielou's Index
J99 <- H99/log(specnumber(Pielou1999))
print(J99)
Pielou2000 <- Shannon[Shannon$YEAR == '2000',]
Pielou2000 <- Pielou2000[-c(1:2)]
H00 <- diversity(Pielou2000) #Pielou's Index
J00 <- H00/log(specnumber(Pielou2000))
print(J00)
Pielou2001 <- Shannon[Shannon$YEAR == '2001',]
Pielou2001 <- Pielou2001[-c(1:2)]
H01 <- diversity(Pielou2001) #Pielou's Index
J01 <- H01/log(specnumber(Pielou2001))
print(J01)
Pielou2002 <- Shannon[Shannon$YEAR == '2002',]
Pielou2002 <- Pielou2002[-c(1:2)]
H02 <- diversity(Pielou2002) #Pielou's Index
J02 <- H02/log(specnumber(Pielou2002))
print(J02)
Pielou2003 <- Shannon[Shannon$YEAR == '2003',]
Pielou2003 <- Pielou2003[-c(1:2)]
H03 <- diversity(Pielou2003) #Pielou's Index
J03 <- H03/log(specnumber(Pielou2003))
print(J03)
Pielou2004 <- Shannon[Shannon$YEAR == '2004',]
Pielou2004 <- Pielou2004[-c(1:2)]
H04 <- diversity(Pielou2004) #Pielou's Index
J04 <- H04/log(specnumber(Pielou2004))
print(J04)
Pielou2005 <- Shannon[Shannon$YEAR == '2005',]
Pielou2005 <- Pielou2005[-c(1:2)]
H05 <- diversity(Pielou2005) #Pielou's Index
J05 <- H05/log(specnumber(Pielou2005))
print(J05)
Pielou2006 <- Shannon[Shannon$YEAR == '2006',]
Pielou2006 <- Pielou2006[-c(1:2)]
H06 <- diversity(Pielou2006) #Pielou's Index
J06 <- H06/log(specnumber(Pielou2006))
print(J06)
Pielou2007 <- Shannon[Shannon$YEAR == '2007',]
Pielou2007 <- Pielou2007[-c(1:2)]
H07 <- diversity(Pielou2007) #Pielou's Index
J07 <- H07/log(specnumber(Pielou2007))
print(J07)
Pielou2008 <- Shannon[Shannon$YEAR == '2008',]
Pielou2008 <- Pielou2008[-c(1:2)]
H08 <- diversity(Pielou2008) #Pielou's Index
J08 <- H08/log(specnumber(Pielou2008))
print(J08)
#No data for 2009
Pielou2010 <- Shannon[Shannon$YEAR == '2010',]
Pielou2010 <- Pielou2010[-c(1:2)]
H10 <- diversity(Pielou2010) #Pielou's Index
J10 <- H10/log(specnumber(Pielou2010))
print(J10)
Pielou2011 <- Shannon[Shannon$YEAR == '2011',]
Pielou2011 <- Pielou2011[-c(1:2)]
H11 <- diversity(Pielou2011) #Pielou's Index
J11 <- H11/log(specnumber(Pielou2011))
print(J11)
Pielou2012 <- Shannon[Shannon$YEAR == '2012',]
Pielou2012 <- Pielou2012[-c(1:2)]
H12 <- diversity(Pielou2012) #Pielou's Index
J12 <- H12/log(specnumber(Pielou2012))
print(J12)
Pielou2013 <- Shannon[Shannon$YEAR == '2013',]
Pielou2013 <- Pielou2013[-c(1:2)]
H13 <- diversity(Pielou2013) #Pielou's Index
J13 <- H13/log(specnumber(Pielou2013))
print(J13)
Pielou2014 <- Shannon[Shannon$YEAR == '2014',]
Pielou2014 <- Pielou2014[-c(1:2)]
H14 <- diversity(Pielou2014) #Pielou's Index
J14 <- H14/log(specnumber(Pielou2014))
print(J14)
Pielou2015 <- Shannon[Shannon$YEAR == '2015',]
Pielou2015 <- Pielou2015[-c(1:2)]
H15 <- diversity(Pielou2015) #Pielou's Index
J15 <- H15/log(specnumber(Pielou2015))
print(J15)
Pielou2016 <- Shannon[Shannon$YEAR == '2016',]
Pielou2016 <- Pielou2016[-c(1:2)]
H16 <- diversity(Pielou2016) #Pielou's Index
J16 <- H16/log(specnumber(Pielou2016))
print(J16)
Pielou2017 <- Shannon[Shannon$YEAR == '2017',]
Pielou2017 <- Pielou2017[-c(1:2)]
H17 <- diversity(Pielou2017) #Pielou's Index
J17 <- H17/log(specnumber(Pielou2017))
print(J17)
Pielous_Table <-data.table(J93, J94, J95, J96, J97, J98, J99, J00, J01, J02, J03, J04, J05, J06, J07, J08, J10, J11, J12, J13, J14, J15, J16, J17)# list all the year dataframes
print(Pielous_Table)
Pielous_table<- fwrite(Pielous_Table,"PutahData/Pielous_Table.csv")
Map_csv <- read.csv("PutahData/PielousTable2.csv")
Map_csv
SiteA<- Map_csv %>% # Site A
ggplot(aes(x = Map_csv$X, y= Map_csv$A)) +
geom_point() +
geom_smooth(method="lm") +
xlab("Year")+
ylab("Pielou's Evenness")+
ylim(0,1)
SiteA
#generating a linear regression model
# lm- Fitting Linear Model, formula Y~X
mod1 <- lm(Map_csv$A ~ Map_csv$X)
SiteB <-Map_csv %>% # Site B
ggplot(aes(x = Map_csv$X, y= Map_csv$B)) +
geom_point() +
geom_smooth(method="lm")+
xlab("Year")+
ylab("Pielou's Evenness")+
ylim(0,1)
SiteC <- Map_csv %>% # Site C
ggplot(aes(x = Map_csv$X, y= Map_csv$C)) +
geom_point() +
geom_smooth(method="lm")+
xlab("Year")+
ylab("Pielou's Evenness")+
ylim(0,1)
SiteD <- Map_csv %>% # Site D
ggplot(aes(x = Map_csv$X, y= Map_csv$D)) +
geom_point() +
geom_smooth(method="lm") +
xlab("Year")+
ylab("Pielou's Evenness")+
ylim(0,1)
SiteE <- Map_csv %>% # Site E
ggplot(aes(x = Map_csv$X, y= Map_csv$E)) +
geom_point() +
geom_smooth(method="lm")+
xlab("Year") +
ylab("Pielou's Evenness")+
ylim(0,1)
SiteF <- Map_csv %>% # Site F
ggplot(aes(x = Map_csv$X, y= Map_csv$F)) +
geom_point() +
geom_smooth(method="lm")+
xlab("Year") +
ylab("Pielou's Evenness")+
ylim(0,1)
SiteF
ALLSITESP <- ggarrange(SiteA,SiteB,SiteC,SiteD, SiteE, SiteF,
labels = c("A", "B", "C","D","E","F"),
ncol = 2, nrow = 3)
ALLSITESP
ggsave("ALLSITE_pielous.pdf")
|
e77190eda42d3493fee768f19fd3c647001b2b76
|
9a64ce7165ee1eaed9e0ed401894b54ef1a31f70
|
/funs/cleanWords.R
|
803376006f13e3a44e2c42cfe455c4459c08714a
|
[] |
no_license
|
SebastianKuzara/twitter-poleng
|
f519cf90927d1374e36af3d818c8f3370f286ea3
|
cebe0338a8dbec63ad450f2c57b034c4b728309a
|
refs/heads/master
| 2020-12-14T09:55:06.172759
| 2017-06-26T17:22:50
| 2017-06-26T17:22:50
| 95,468,810
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,721
|
r
|
cleanWords.R
|
## Funkcja oczyszczająca teksty z niechcianych znaków, wyrazów, wyrażeń itp.
## Dotyczy wpisów na twitterze
cleanWords <- function(words) {
require(stringr)
clean_words <- gsub("&", "", words)
# clean urls
clean_words <- gsub("?(f|ht)(tp)(s?)(://)(.*)[.|/](.*)", "", clean_words)
clean_words <- gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", clean_words)
# clean user names
clean_words <- gsub("@\\w+", "", clean_words)
# clean punctuation marks
clean_words <- gsub("[[:punct:]]", " ", clean_words)
# clean_words <- gsub("[[:digit:]]", "", clean_words)
# clean_words <- gsub("http\\w+", "", clean_words)
clean_words <- gsub("[ \t]{2,}", "", clean_words)
# clean spaces on begin or end of expression
clean_words <- gsub("^\\s+|\\s+$", "", clean_words)
# clean_words = gsub("\\\\[A-Za-z0-9]+", "", clean_words)
clean_words <- gsub('\\p{So}|\\p{Cn}', '', clean_words, perl = TRUE)
# clean new line signs
clean_words <- gsub("\\n", "", clean_words)
# get rid hashtags
clean_words <- gsub("poleng", "", clean_words, ignore.case = TRUE)
clean_words <- gsub("engpol", "", clean_words, ignore.case = TRUE)
clean_words <- gsub("u21euro", "", clean_words, ignore.case = TRUE)
clean_words <- gsub("eurou21", "", clean_words, ignore.case = TRUE)
#get rid of unnecessary spaces
clean_words <- str_replace_all(clean_words," "," ")
# Take out retweet header, there is only one
clean_words <- str_replace(clean_words,"RT @[a-z,A-Z]*: ","")
# Get rid of hashtags
clean_words <- str_replace_all(clean_words,"#[a-z,A-Z]*","")
# Get rid of references to other screennames
clean_words <- str_replace_all(clean_words,"@[a-z,A-Z]*","")
return(clean_words)
}
|
1d64821c40ae8f466f5b4317299b129277bb34cb
|
d9bc032601df3e2bf2e5587fad68f7b60ed2a29e
|
/portOptimizer/man/calc_port_risk.Rd
|
3ecef782f9b84f674413b6ba650b3c16f0f0d869
|
[] |
no_license
|
jkr216/rex-sandbox
|
c69cdc7a7fb521d81dcfc16933ebcc42563927ae
|
952a763724c356e72803a0edb80d595b4d12e0e8
|
refs/heads/master
| 2020-04-11T04:57:02.530037
| 2019-01-02T21:02:42
| 2019-01-02T21:02:42
| 161,532,657
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 467
|
rd
|
calc_port_risk.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/portfolioFunctions.r
\name{calc_port_risk}
\alias{calc_port_risk}
\title{Portfolio Risk (Standard Deviation)}
\usage{
calc_port_risk(x, expectedCov)
}
\arguments{
\item{x}{Weight vector of the portfolio}
\item{expectedCov}{Covariance matrix with dim(length(x), length(x))}
}
\value{
scalar
}
\description{
Portfolio Risk (Standard Deviation)
}
\examples{
calc_port_risk(x, expectedCov)
}
|
991c02ffa7e1e704c974e26d3fcc04bd28942e9e
|
726ff4e63db1e6ce83e83191100ff89251858b46
|
/R/clsif-functions.R
|
de77d1cfcf073a3f8a862e3474bc692839b35085
|
[] |
no_license
|
hhau/densityratiosugiyama
|
ab932a2c4a823b92ac8435afb5bbd6c1c1c18a12
|
26cc267bc798aa65eef03f6b7c96249b9e782d56
|
refs/heads/master
| 2020-04-10T06:21:37.230091
| 2019-01-04T10:37:25
| 2019-01-04T10:37:25
| 160,852,031
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,623
|
r
|
clsif-functions.R
|
clsif.learning <- function(X.de, X.nu, lambda, max.iteration = 100,
eps.list = 10^seq(3, -3, -1)) {
## compare Sugiyama book algorithm p 68
mean.X.de <- apply(X.de, 2, mean)
ss.mean.X.de <- sum(mean.X.de^2) # psi_bar_de' psi_bar_de
dim(X.de)
H <- t(X.de) %*% X.de / nrow(X.de) # basis.fcts x basis.fcts matrix
h <- apply(X.nu, 2, mean)
# alpha <- rep(0.01,ncol(X.de))
# score <- 0.5*t(alpha) %*% H %*% alpha - sum(h*alpha) + lambda*sum(alpha)
alpha <- rep(0.01, ncol(X.de))
score <- Inf
i <- 1
eps <- eps.list[1]
for (eps in eps.list) {
for (i in 1:max.iteration) {
## min 0.5*alpha H alpha - h alpha + lambda * 1vec * alpha, alpha >= 0
## so direction is -grad
alpha.new <- alpha + eps * (-H %*% alpha + h - lambda)
alpha.new <- alpha.new + ((1 - sum(mean.X.de * alpha.new)) / ss.mean.X.de) * mean.X.de
alpha.new <- pmax(0, alpha.new)
alpha.new <- alpha.new / sum(mean.X.de * alpha.new)
score.new <- (0.5 * t(alpha.new) %*% H %*% alpha.new - sum(h * alpha.new)
+ lambda * sum(alpha.new))
if (score.new >= score) break # no improvement any more
score <- score.new
alpha <- alpha.new
}
}
list(alpha = alpha, score = score)
}
#' Constrained Least Squares Importance Fitting (CLISF)
#'
#' Sugiyama density ratio estimation method, with an L1 penalty on the
#' parameters.
#'
#' \code{x.de} and \code{x.nu} should be the same dimension (same number of
#' rows), but there can an uneven number of samples (number of rows)
#'
#'
#' @param x.de A matrix with d rows, with one sample from p(x_de) per column.
#' @param x.nu A matrix with d rows, with one sample from p(x_nu) per column.
#' @param lambda Positive real number. Regularisation parameter, see Sugiyama,
#' Suzuki and Kanamori (2012) Section 6.2.1 for details
#' @param sigma.chosen Positive real number. Sigma for the Gaussian kernel
#' radial basis functions. If this is set to zero, will be chosen via cross
#' validation.
#' @param is.adaptive Boolean. Adaptively choose location of basis functions.
#' @param neigh.rank Positive integer. How many other kernels to use to compute
#' distance metrics.
#' @param kernel.low Real number. Lower bound for rescaled distances.
#' @param kernel.high Real number. Upper bound for rescaled distances.
#' @param b Positive integer. How many kernels to use.
#' @param fold Positive integer. How many cross validation folds to use to
#' select \code{sigma.chosen}
#'
#' @return list with the following elements:
#' \describe{
#' \item{alpha}{basis function parameter estimates.}
#' \item{score}{final cross validation score, used to select sigma.chosen.}
#' \item{x.ce}{the chosen centers for the density ratio.}
#' \item{sigma}{the value of sigma.chosen after the cross validation.}
#' \item{is.adaptive}{the value of is.adaptive - used to figure out which
#' basis function to call later.}
#' \item{c.dists}{vector of distances between centers, used if is.adaptive
#' is true}
#' }
#' Note that this is list is meant to be passed to \code{\link{eval.basis}}. It
#' also serves as a small way to represent the estimated density ratio.
#' @export
clsif <- function(x.de, x.nu, lambda, sigma.chosen = 0.2, is.adaptive = FALSE,
neigh.rank = 5, kernel.low = 0.5, kernel.high = 2, b = 50, fold = 6) {
fit.dr(x.de, x.nu,
lambda = lambda, sigma.chosen = sigma.chosen,
is.adaptive = is.adaptive, neigh.rank = neigh.rank,
kernel.low = kernel.low, kernel.high = kernel.high,
b = b, fold = fold, learning.fct = clsif.learning
)
}
|
fd7b46e68ed5e33d3295dafdf1785f22d00a5b0f
|
5cb0e73f2c04589d76df3eb5d2812801490d3757
|
/EDA Week1 Dataprep.R
|
b7b85c031ba6a537d0f4f3658ba4e67254051c5b
|
[] |
no_license
|
brlang77/ExData_Plotting1
|
496961be8c7e663cb2549aa50c7163b318b4d8f0
|
e44c89cb01736f4a467cad146bcc50bdef848b82
|
refs/heads/master
| 2021-01-20T14:48:17.258974
| 2017-05-11T03:33:34
| 2017-05-11T03:33:34
| 90,675,798
| 0
| 0
| null | 2017-05-08T21:54:02
| 2017-05-08T21:54:01
| null |
UTF-8
|
R
| false
| false
| 977
|
r
|
EDA Week1 Dataprep.R
|
##Exploratory Data Analysis
##Week 1 Assignment
## Author: brlang77
## date: May 8, 2017
## Estimated memory needed for file with 2M rows and 9 columns ~143MB
##Data source from UC Irvine Machine Learning Repository, on Electric Power Consumption
getwd()
setwd("~/Coursera/Exploratory Data Analysis")
url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url,destfile="EPC.zip")
unzip("EPC.zip")
data<-read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings="?")
str(data) ##check formats, need to change date and time formats
head(data) ## appears to be day/month/year
##data$Time<-strptime(data$Time,"%H:%M:%S")
data$dt<-strptime(paste(data$Date,data$Time),"%d/%m/%Y%H:%M:%S")
data$Date<-as.Date(data$Date,'%d/%m/%Y')
t1<-as.Date("2007-02-01")
t2<-as.Date("2007-02-02")
data2<-data[data$Date %in% t1:t2,] ##get 2 days data (2007-02-01 and 2007-02-02)
rm(data)
|
03a79891c94982662a4857ad90a2182811c22a36
|
0ec1d97a20da6275bc3a538d74f6cf7796c1ba62
|
/workflows/GWAS_FA_MD/scripts/R/Covariates.R
|
48f8c51f4c9a48fcc88c543ca92b539c5286c315
|
[] |
no_license
|
ningjing0831/UKB_imaging_GWAS_2020
|
16ad0bc2fe293a695eb807df36c7a9375e68c772
|
018b944ddfaec520ef0271e7ae1e7439be1e3f0b
|
refs/heads/master
| 2023-01-05T05:47:38.819201
| 2020-04-23T15:31:19
| 2020-04-23T15:31:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,839
|
r
|
Covariates.R
|
args=commandArgs(trailingOnly=TRUE)
TRAIT = args[1]
RDA_FILE = args[2]
QC_FILE = args[3]
FAM_FILE = args[4]
PCA_FILE = args[5]
OUT_DIR = args[6]
FIELDS = args[7]
library(dplyr)
library(data.table)
library(mice)
library(FactoMineR)
#-------------------------------------------------
# IMPORT RDA_FILE
#-------------------------------------------------
load(RDA_FILE)
fields=fread(FIELDS, header=T)
# Interesting columns
InterestingColumns=list(
## Main columns
IID="eid",
# WMH="f25781_2_0",
BrainSize="f25009_2_0",
## Covariates
AgeBaseline="f21022_0_0",
Sex="f31_0_0",
YearBirth="f34_0_0",
MonthBirth="f52_0_0",
DateAssessment="f53_2_0",
UKBcentre="f54_2_0",
MeanrfMRI="f25741_2_0",
MeantfMRI="f25742_2_0"
)
for(i in 1:nrow(fields)){
InterestingColumns[[fields$traits[i]]]=fields$col.name[i]
}
Classes=list(
## Main columns
IID="character",
BrainSize="numeric",
## Covariates
AgeBaseline="numeric",
Sex="factor",
YearBirth="numeric",
MonthBirth="character",
DateAssessment="character",
UKBcenter="factor",
MeanrfMRI="numeric",
MeantfMRI="numeric"
## Correlation
# SysBloodPress_man=rep("numeric",2),
# DiasBloodPress_man=rep("numeric",2),
# SysBloodPress_auto=rep("numeric",2),
# DiasBloodPress_auto=rep("numeric",2)
)
for(i in 1:nrow(fields)){
Classes[[fields$traits[i]]]="numeric"
}
ColumnsIndices=lapply(InterestingColumns,function(x) grep(pattern=x, colnames(res_ukb)))
# Extract interesting columns
res_ukb_sub=res_ukb %>%
select(unlist(ColumnsIndices) )
colnames(res_ukb_sub)=names(unlist(ColumnsIndices))
# Recode with the correct class
RecodeVariables=function(data, classes){
for(i in 1:length(classes)){
if(classes[i]=="numeric"){
data[,i]=as.numeric(as.character(data[,i]))
}else if(classes[i]=="character"){
data[,i]=as.character(data[,i])
}else if(classes[i]=="factor"){
data[,i]=as.factor(data[,i])
}
}
return(data)
}
res_ukb_sub=RecodeVariables(res_ukb_sub, unlist(Classes))
#-------------------------------------------------
# IMPORT PCA_FILE
#-------------------------------------------------
# Extract samples from PCA
pca=fread(PCA_FILE, header=TRUE)
colnames(pca)=c("FID",
"IID",
paste("PC", 1:10 ,sep="_"))
pca = pca %>%
select(IID,FID, eval(paste("PC", 1:10, sep="_")))
pca$IID=as.character(pca$IID)
data2=res_ukb_sub %>%
right_join(pca, by= "IID")
#-------------------------------------------------
# IMPORT QC_FILE
#-------------------------------------------------
QC_data=fread(QC_FILE, header=FALSE)
FAM_data=fread(FAM_FILE, header=FALSE)
FAM_Columns=c(
"FID",
"IID",
"PID",
"MID",
"Sex",
"Batch"
)
QC_Columns=c(
paste("genotyping.array", 1:2, sep="_"),
paste("Batch", 1:2, sep="_"),
"Plate.Name",
"Well",
"Cluster.CR",
"dQC",
"Internal.Pico..ng.uL.",
"Submitted.Gender",
"Inferred.Gender",
"X.intensity",
"Y.intensity",
"Submitted.Plate.Name",
"Submitted.Well",
"sample.qc.missing.rate",
"heterozygosity",
"heterozygosity.pc.corrected",
"het.missing.outliers",
"putative.sex.chromosome.aneuploidy",
"in.kinship.table",
"excluded.from.kinship.inference",
"excess.relatives",
"in.white.British.ancestry.subset",
"used.in.pca.calculation",
paste("PC", 1:40, sep=""),
"in.Phasing.Input.chr1_22",
"in.Phasing.Input.chrX",
"in.Phasing.Input.chrXY"
)
colnames(FAM_data)=FAM_Columns
colnames(QC_data)=QC_Columns
QC_data=QC_data %>%
bind_cols(FAM_data)
QC_data=QC_data %>%
select(IID, Batch_1)
QC_data[["IID"]]=as.character(QC_data[["IID"]])
QC_data[["Batch_1"]]=as.factor(QC_data[["Batch_1"]])
output=data2 %>%
left_join(QC_data, by= "IID")
output=output%>%
select(FID,IID,everything())
#-------------------------------------------------
# TRANSFORMATION OF VARIABLES
#-------------------------------------------------
# Sex
output$SexCode=factor(output$Sex, levels=c("0", "1"))
levels(output$SexCode)=c("Female", "Male")
# Age at MRI : Compute from DateAssessment, MonthBirth, YearBirth
output$DateAssessment=as.Date(output$DateAssessment, format="%Y-%m-%d")
output$Birthday=unlist(lapply(1:nrow(output), function(x) paste(output$YearBirth[x],output$MonthBirth[x],1, sep="-")))
output$Birthday=as.Date(output$Birthday, format="%Y-%m-%d")
output=output%>%
mutate( AgeMRI=as.numeric(difftime(DateAssessment,Birthday, unit="days"))/365,
AgeMRI2=AgeMRI^2,
AgeMRI3=AgeMRI^3
)
# UKB centre
output$UKBcentre=factor(output$UKBcentre, levels=c("11025", "11027"))
levels(output$UKBcentre)=c("Cheadle_imaging", "Newcastle_imaging")
output$UKBcentre_name=output$UKBcentre
levels(output$UKBcentre)=c("0", "1")
# Batch
output$Batch_name = output$Batch_1
levels(output$Batch_1)=c("0", "1")
# MeanrfMRI, MeantfMRI: Imputation of missing values
subimputation=output %>%
select(Sex,
UKBcentre,
MeanrfMRI,
MeantfMRI,
eval(paste("PC_",1:10,sep="")),
AgeMRI,
Batch_1)
imputeddata=mice(subimputation, method="pmm")
output[is.na(output$MeanrfMRI),"MeanrfMRI"]=imputeddata$imp$MeanrfMRI[,1]
output[is.na(output$MeantfMRI),"MeantfMRI"]=imputeddata$imp$MeantfMRI[,1]
# Add PC
pca_trait_data = output[,fields$traits]
res.pca=PCA(pca_trait_data, graph=F)
EIG=res.pca$eig
PCS=res.pca$ind$coord
colnames(PCS)=paste("PC",1:5,"_trait",sep="")
output = cbind(output,PCS)
save(res.pca,file=paste(OUT_DIR, "/", TRAIT, "_pca.rda", sep=""))
#-------------------------------------------------
# OUPTPUT
#-------------------------------------------------
write.table(output, paste(OUT_DIR, "/", TRAIT, "_covar.txt", sep=""), quote=FALSE, row.names=FALSE, col.names=TRUE, sep="\t")
write.table(EIG, paste(OUT_DIR, "/", TRAIT, "_eig.txt", sep=""), quote=FALSE, row.names=FALSE, col.names=TRUE, sep="\t")
|
560c640fa232c7a12abe137ad8ad7e502738f907
|
cbb7ce08926cfe11d3fcc2315661374662e099c7
|
/merge_data.R
|
f0bb75529ba7661b5dde26044f07939e6996e8db
|
[] |
no_license
|
aiddata/westbank_inpii
|
61257519d00cabf628e5737434d4247393c387e7
|
78248b7acc5e2aa19e911acd2255230f06ac974c
|
refs/heads/master
| 2020-12-23T03:50:23.532540
| 2020-03-04T19:49:29
| 2020-03-04T19:49:29
| 237,021,556
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 45,930
|
r
|
merge_data.R
|
box_loc <- '/Users/christianbaehr/Box Sync/westbank_inpii'
library(haven)
library(plyr)
library(sf)
library(raster)
library(rgdal)
library(stargazer)
#################################################
## Read in Wave III locations
# locations <- read.csv(paste0(box_loc, '/inputData/arabbarometer/WaveIII Locations.csv'), stringsAsFactors = F)
# locations <- st_read(paste0(box_loc, "/inputData/arabbarometer/waveiii_final.geojson"), stringsAsFactors=F)
# locations <- locations[!duplicated(locations$v05aENG),]
locations <- st_read(paste0(box_loc, "/inputData/arabbarometer_locations.geojson"), stringsAsFactors=F)
locations <- locations[!duplicated(locations$v05aENG),]
#################################################
wave3_locations <- paste0(box_loc, '/inputData/arabbarometer/ABWaveIIIPalestinewLocation.dta') %>%
read_dta() %>%
.[,1:5]
wave3_survey <- read.csv(paste0(box_loc, "/inputData/ABIII_English.csv"), stringsAsFactors = F)
wave3_survey <- wave3_survey[wave3_survey$country=="Palestine", ]
for(i in 1:nrow(locations)) {
x <- wave3_locations$v05aENG[which(wave3_locations$qid==locations$qid[i])]
ids <- wave3_locations$qid[which(wave3_locations$v05aENG==x)]
rows <- wave3_survey[which(wave3_survey$qid %in% ids), ]
locs <- data.frame(locations[rep(i, nrow(rows)),])
if(i==1) {
wave3 <- cbind(locs, rows)
} else {
a <- cbind(rows, locs)
wave3 <- rbind(wave3, cbind(locs, rows))
}
}
# sum(wave3$v04 == wave3$q1)
# View(wave3[wave3$v04 != wave3$q1, c("v04", "q1")])
## checks out...only deviations are spelling issues
wave3 <- wave3[, !names(wave3) %in% c("a1", "q1", "q13")]
wave3$v03 <- "West Bank"
#################################################
wave4_survey <- read.csv(paste0(box_loc, "/inputData/ABIV_English.csv"), stringsAsFactors = F)
wave4_survey <- wave4_survey[wave4_survey$country=="Palestine", ]
# locations$merge <- tolower(locations$v05aENG)
# wave4_survey$merge <- tolower(wave4_survey$q2)
locations$merge <- tolower(paste(locations$v04, locations$v05aENG))
wave4_survey$merge <- tolower(paste(wave4_survey$q1, wave4_survey$q2))
wave4 <- merge(locations, wave4_survey, by="merge")
sum(wave4$v04 == wave4$q1)
## checks out...all ADM2 and ADM3s match
wave4 <- wave4[, !names(wave4) %in% c("a1", "q1", "q2", "q13")]
#################################################
rm(list = setdiff(ls(), c("box_loc", "wave3", "wave4")))
#################################################
#################################################
wave3$geometry <- as(wave3$geometry, 'Spatial')
wave4$geometry <- as(wave4$geometry, 'Spatial')
wave3 <- SpatialPolygonsDataFrame(Sr=wave3$geometry, data = wave3, match.ID = F)
wave4 <- SpatialPolygonsDataFrame(Sr=wave4$geometry, data = data.frame(wave4), match.ID = F)
###
roads <- st_read(paste0(box_loc, "/inputData/inpii_roads.geojson"), stringsAsFactors = F)
roads <- SpatialLinesDataFrame(sl = as(roads$geometry, 'Spatial'), data = data.frame(roads), match.ID = F)
wave3roads <- over(wave3, roads, returnList = T)
extract.dates <- function(x) {
y <- as.Date(x[["date"]], tryFormats="%m/%d/%y", optional=T)
return(as.character(y[!is.na(y)]))
}
wave3roads <- lapply(wave3roads, FUN = extract.dates)
# wave3 <- as.data.frame(wave3, stringsAsFactors=F)
wave3$end_dates <- wave3roads
# wave3$geometry <- as(wave3$geometry, "Spatial")
# wave3 <- SpatialPolygonsDataFrame(Sr=wave3$geometry, data = wave3, match.ID = F)
wave3$roads <- sapply(wave3$end_dates, length)
wave3$treatment <- ifelse(wave3$roads>0, 1, 0)
wave4roads <- over(wave4, roads, returnList = T)
wave4roads <- lapply(wave4roads, FUN = extract.dates)
wave4$end_dates <- wave4roads
wave4$roads <- sapply(wave4$end_dates, length)
wave4$treatment <- ifelse(wave4$roads>0, 1, 0)
###
wave3$dmsp2007 <- raster(paste0(box_loc, "/inputData/ntl/dmsp_2007.tif")) %>%
extract(., wave3, fun=mean) %>%
as.numeric(.)
wave3$dmsp2008 <- raster(paste0(box_loc, "/inputData/ntl/dmsp_2008.tif")) %>%
extract(., wave3, fun=mean) %>%
as.numeric(.)
wave3$dmsp2009 <- raster(paste0(box_loc, "/inputData/ntl/dmsp_2009.tif")) %>%
extract(., wave3, fun=mean) %>%
as.numeric(.)
wave3$dmsp2010 <- raster(paste0(box_loc, "/inputData/ntl/dmsp_2010.tif")) %>%
extract(., wave3, fun=mean) %>%
as.numeric(.)
wave3$dmsp2011 <- raster(paste0(box_loc, "/inputData/ntl/dmsp_2011.tif")) %>%
extract(., wave3, fun=mean) %>%
as.numeric(.)
wave3$dmsp2012 <- raster(paste0(box_loc, "/inputData/ntl/dmsp_2012.tif")) %>%
extract(., wave3, fun=mean) %>%
as.numeric(.)
wave3$viirs2012max <- raster(paste0(box_loc, "/inputData/ntl/VIIRS_2012MAX.tif")) %>%
extract(., wave3, fun=mean) %>%
as.numeric(.)
wave3$population <- raster(paste0(box_loc, "/inputData/population/population_2010_gpw.tif")) %>%
extract(., wave3, fun=mean) %>%
as.numeric(.)
wave3$dist_to_city <- raster(paste0(box_loc, "/inputData/city_access/accessibility_eujrc.tif")) %>%
extract(., wave3, fun=mean) %>%
as.numeric(.)
###
test3 <- wave3
test4 <- wave4
#################################################
### Wave 3 Demographics ###
test3$age <- as.numeric(test3$q1001)
test3$male <- ifelse(test3$q1002=="Male", 1, 0)
test3$education <- ifelse(test3$q1003=="MA and above", 7,
ifelse(test3$q1003=="ba", 6,
ifelse(test3$q1003=="Mid-level diploma (professional or technical)", 5,
ifelse(test3$q1003=="Secondary", 4,
ifelse(test3$q1003=="Prepartory/Basic", 3,
ifelse(test3$q1003=="Elementary", 2,
ifelse(test3$q1003=="Illiterate/No formal education", 1, NA)))))))
test3$married <- ifelse(test3$q1010=="Married", 1,
ifelse(test3$q1010=="Missing", NA, 0))
test3$muslim <- ifelse(test3$q1012=="Muslim", 1,
ifelse(test3$q1012=="Missing", NA, 0))
test3$christian <- ifelse(test3$q1012=="Christian", 1,
ifelse(test3$q1012=="Missing", NA, 0))
test3$income <- ifelse(test3$q1014=="No income", 0,
ifelse(test3$q1014=="Missing", NA, as.numeric(test3$q1014)))
test3$urban <- ifelse(test3$v13=="City", 1, 0)
test3$rural <- ifelse(test3$v13=="Village", 1, 0)
test3$refugee_camp <- ifelse(test3$v13=="Refugee camp", 1, 0)
test3$employed <- ifelse(test3$q1004=="Yes", 1,
ifelse(test3$q1004=="No", 0, NA))
test3$full_time <- ifelse(test3$q1006=="Full time (30 hours or more a week)", 1,
ifelse(test3$q1006=="Part time (less than 30 hours a week)", 0, NA))
test3$public_employee <- ifelse(test3$q1006a=="Public", 1,
ifelse(test3$q1006a=="Private", 0, NA))
test3$retired <- ifelse(test3$q1005=="Retired", 1,
ifelse(test3$q1005=="Missing", NA, 0))
test3$housewife <- ifelse(test3$q1005=="A housewife", 1,
ifelse(test3$q1005=="Missing", NA, 0))
test3$student <- ifelse(test3$q1005=="A student", 1,
ifelse(test3$q1005=="Missing", NA, 0))
test3$unemployed <- ifelse(test3$q1005=="Unemployed", 1,
ifelse(test3$q1005=="Missing", NA, 0))
test3$homeowner <- ifelse(test3$q1013 %in% c("Owned", "Owned with mortgage payments to a bank"), 1,
ifelse(test3$q1013=="Missing", NA, 0))
test3$own_a_computer <- ifelse(test3$q1011a=="Yes", 1,
ifelse(test3$q1011a=="No", 0, NA))
test3$own_a_car <- ifelse(test3$q1011b=="Yes", 1,
ifelse(test3$q1011b=="No", 0, NA))
### Wave 3 Domestic Governance ###
# How would you evaluate the current economic situation in your country?
test3$q101 <- ifelse(test3$q101=="Very good", 2,
ifelse(test3$q101=="Good", 1,
ifelse(test3$q101=="Bad", -1,
ifelse(test3$q101=="Very bad", -2, NA))))
# What do you think will be the economic situation in your country during the next few years (3-5 years) compared to the current situation?
test3$q102 <- ifelse(test3$q102=="Much better", 2,
ifelse(test3$q102=="Somewhat better", 1,
ifelse(test3$q102=="Almost the same as the current situation", 0,
ifelse(test3$q102=="Somewhat worse", -1,
ifelse(test3$q102=="Much worse", -2, NA)))))
# to what extent do you trust: the government (cabinet)
test3$q2011 <- ifelse(test3$q2011=="I trust it to a great extent", 4,
ifelse(test3$q2011=="I trust it to a medium extent", 3,
ifelse(test3$q2011=="I trust it to a limited extent", 2,
ifelse(test3$q2011=="I absolutely do not trust it", 1, NA))))
# to what extent do you trust: The elected council of representatives (the parliament)
test3$q2013 <- ifelse(test3$q2013=="I trust it to a great extent", 4,
ifelse(test3$q2013=="I trust it to a medium extent", 3,
ifelse(test3$q2013=="I trust it to a limited extent", 2,
ifelse(test3$q2013=="I absolutely do not trust it", 1, NA))))
# to what extent do you trust: Public Security (the police)
# test3$q2014 <- ifelse(test3$q2014=="I trust it to a great extent", 4,
# ifelse(test3$q2014=="I trust it to a medium extent", 3,
# ifelse(test3$q2014=="I trust it to a limited extent", 2,
# ifelse(test3$q2014=="I absolutely do not trust it", 1, NA))))
# to what extent do you trust: The armed forces (the army)
# test3$q2016 <- ifelse(test3$q2016=="I trust it to a great extent", 4,
# ifelse(test3$q2016=="I trust it to a medium extent", 3,
# ifelse(test3$q2016=="I trust it to a limited extent", 2,
# ifelse(test3$q2016=="I absolutely do not trust it", 1, NA))))
# How would you evaluate the performance of the government in carrying out its tasks and duties?
# test3$q2031 <- ifelse(test3$q2031=="Very good", 2,
# ifelse(test3$q2031=="Good", 1,
# ifelse(test3$q2031=="Neither good nor bad", 0,
# ifelse(test3$q2031=="Bad", -1,
# ifelse(test3$q2031=="Very bad", -2, NA)))))
# How would you evaluate the performance of the Parliament in carrying out its tasks and duties?
# test3$q2032 <- ifelse(test3$q2032=="Very good", 2,
# ifelse(test3$q2032=="Good", 1,
# ifelse(test3$q2032=="Neither good nor bad", 0,
# ifelse(test3$q2032=="Bad", -1,
# ifelse(test3$q2032=="Very bad", -2, NA)))))
# How would you evaluate the performance of the judiciary in carrying out its tasks and duties?
# test3$q2033 <- ifelse(test3$q2033=="Very good", 2,
# ifelse(test3$q2033=="Good", 1,
# ifelse(test3$q2033=="Neither good nor bad", 0,
# ifelse(test3$q2033=="Bad", -1,
# ifelse(test3$q2033=="Very bad", -2, NA)))))
# How would you evaluate the performance of the police in carrying out its tasks and duties?
# test3$q2034 <- ifelse(test3$q2034=="Very good", 2,
# ifelse(test3$q2034=="Good", 1,
# ifelse(test3$q2034=="Neither good nor bad", 0,
# ifelse(test3$q2034=="Bad", -1,
# ifelse(test3$q2034=="Very bad", -2, NA)))))
# If you were to evaluate the state of democracy and human rights in your country today, would you say that they are
# test3$q504 <- ifelse(test3$q504=="Very good", 2,
# ifelse(test3$q504=="Good", 1,
# ifelse(test3$q504=="Neither good nor bad", 0,
# ifelse(test3$q504=="Bad", -1,
# ifelse(test3$q504=="Very bad", -2, NA)))))
# In your opinion, to what extent is your country democratic?
test3$q511 <- as.numeric(ifelse(test3$q511=="Democratic to the greatest extent possible", 10,
ifelse(test3$q511=="No democracy whatsoever", 0,
ifelse(!is.na(as.numeric(test3$q511)), test3$q511, NA))))
# To what extent are you satisfied with the government’s performance?
test3$q513 <- as.numeric(ifelse(test3$q513=="Completely satisfied", 10,
ifelse(test3$q513=="Absolutely unsatisfied", 0,
ifelse(!is.na(as.numeric(test3$q513)), test3$q513, NA))))
# Democratic regimes are indecisive and full of problems. To what extent do you agree?
test3$q5162 <- ifelse(test3$q5162=="I strongly agree", 2,
ifelse(test3$q5162=="I somewhat agree", 1,
ifelse(test3$q5162=="I somewhat disagree", -1,
ifelse(test3$q5162=="I strongly disagree", -2, NA))))
### Wave 3 International Relations ###
## future economic relations btwn palestine/us
test3$q7001 <- ifelse(test3$q7001=="Become stronger than they were in previous years", 1,
ifelse(test3$q7001=="Become weaker than they were in previous years", -1,
ifelse(test3$q7001=="Remain the same as they were in previous years", 0, NA)))
## future economic relations btwn palestine/saudi
test3$q7002 <- ifelse(test3$q7002=="Become stronger than they were in previous years", 1,
ifelse(test3$q7002=="Become weaker than they were in previous years", -1,
ifelse(test3$q7002=="Remain the same as they were in previous years", 0, NA)))
## future economic relations btwn palestine/israel. Not asked in Wave IV
test3$q7006 <- ifelse(test3$q7006=="Become stronger than they were in previous years", 1,
ifelse(test3$q7006=="Become weaker than they were in previous years", -1,
ifelse(test3$q7006=="Remain the same as they were in previous years", 0, NA)))
## future security relations btwn palestine/us. Not asked in Wave IV
test3$q700a1 <- ifelse(test3$q700a1=="Become stronger than they were in previous years", 1,
ifelse(test3$q700a1=="Become weaker than they were in previous years", -1,
ifelse(test3$q700a1=="Remain the same as they were in previous years", 0, NA)))
## future security relations btwn palestine/saudi Not asked in Wave IV
test3$q700a2 <- ifelse(test3$q700a2=="Become stronger than they were in previous years", 1,
ifelse(test3$q700a2=="Become weaker than they were in previous years", -1,
ifelse(test3$q700a2=="Remain the same as they were in previous years", 0, NA)))
## future security relations btwn palestine/israel. Not asked in Wave IV
test3$q700a6 <- ifelse(test3$q700a6=="Become stronger than they were in previous years", 1,
ifelse(test3$q700a6=="Become weaker than they were in previous years", -1,
ifelse(test3$q700a6=="Remain the same as they were in previous years", 0, NA)))
## do you want more or less foreign aid? Not asked in Wave IV
test3$q701c <- ifelse(test3$q701c=="Increase a lot", 2,
ifelse(test3$q701c=="Increase a little", 1,
ifelse(test3$q701c=="Remain at its current level", 0,
ifelse(test3$q701c=="Decrease a little", -1,
ifelse(test3$q701c=="Decrease a lot", -2, NA)))))
## impact of foreign investment on people with similar econ conditions as you? Not asked in Wave IV
test3$q701d6 <- ifelse(test3$q701d6=="Very positive", 2,
ifelse(test3$q701d6=="Somewhat positive", 1,
ifelse(test3$q701d6=="Has no impact", 0,
ifelse(test3$q701d6=="Negative", -1,
ifelse(test3$q701d6=="Very negative", -2, NA)))))
## do you think americans are generally good people?
test3$q707 <- ifelse(test3$q707=="I agree", 1,
ifelse(test3$q707=="I disagree", 0, NA))
## US intervention in the region justifies arms against the US
test3$q706 <- ifelse(test3$q706=="I strongly agree", 2,
ifelse(test3$q706=="I agree", 1,
ifelse(test3$q706=="I disagree", -1,
ifelse(test3$q706=="I strongly disagree", -2, NA))))
#################################################
### Wave 4 Demographics ###
test4$age <- as.numeric(test4$q1001)
test4$male <- ifelse(test4$q1002=="Male", 1, 0)
test4$education <- ifelse(test4$q1003=="MA and above", 7,
ifelse(test4$q1003=="BA", 6,
ifelse(test4$q1003=="Mid-level diploma/professional or technical", 5,
ifelse(test4$q1003=="Secondary", 4,
ifelse(test4$q1003=="Preparatory/Basic", 3,
ifelse(test4$q1003=="Elementary", 2,
ifelse(test4$q1003=="Illiterate/No formal education", 1, NA)))))))
test4$married <- ifelse(test4$q1010=="Married", 1, 0)
test4$muslim <- ifelse(test4$q1012=="Muslim", 1, 0)
test4$christian <- ifelse(test4$q1012=="Christian", 1, 0)
### No explicit income measure in test4
test4$urban <- ifelse(test4$v05=="city", 1, 0)
test4$rural <- ifelse(test4$v05 %in% c("Village/Town", "Village/ Town"), 1, 0)
test4$refugee_camp <- ifelse(test4$v05=="Refugee Camp", 1, 0)
test4$employed <- ifelse(test4$q1004=="Yes", 1, 0)
test4$full_time <- ifelse(test4$q1006=="Full time (30 hours or more a week)", 1, 0)
test4$public_employee <- ifelse(test4$q1006a=="Public", 1,
ifelse(test4$q1006a=="", NA, 0))
test4$retired <- ifelse(test4$q1005=="Retired", 1,
ifelse(test4$q1005=="Decline to answer (Do not read)", NA, 0))
test4$housewife <- ifelse(test4$q1005=="Housewife", 1,
ifelse(test4$q1005=="Decline to answer (Do not read)", NA, 0))
test4$student <- ifelse(test4$q1005=="Student", 1,
ifelse(test4$q1005=="Decline to answer (Do not read)", NA, 0))
test4$student <- ifelse(test4$q1005=="Student", 1,
ifelse(test4$q1005=="Decline to answer (Do not read)", NA, 0))
test4$unemployed <- ifelse(test4$q1005=="Unemployed", 1,
ifelse(test4$q1005=="Decline to answer (Do not read)", NA, 0))
### Wave 4 Domestic Governance ###
# How would you evaluate the current economic situation in your country?
test4$q101 <- ifelse(test4$q101=="Very good", 2,
ifelse(test4$q101=="Good", 1,
ifelse(test4$q101=="Bad", -1,
ifelse(test4$q101=="Very bad", -2, NA))))
# What do you think will be the economic situation in your country during the next few years (3-5 years) compared to the current situation?
test4$q102 <- ifelse(test4$q102=="Much better", 2,
ifelse(test4$q102=="Somewhat better", 1,
ifelse(test4$q102=="Almost the same as the current situation", 0,
ifelse(test4$q102=="Somewhat worse", -1,
ifelse(test4$q102=="Much worse", -2, NA)))))
# to what extent do you trust: the government (cabinet)
test4$q2011 <- ifelse(test4$q2011=="A great deal of trust", 4,
ifelse(test4$q2011=="Quite a lot of trust", 3,
ifelse(test4$q2011=="Not very much trust", 2,
ifelse(test4$q2011=="No trust at all", 1, NA))))
# to what extent do you trust: The elected council of representatives (the parliament)
test4$q2013 <- ifelse(test4$q2013=="A great deal of trust", 4,
ifelse(test4$q2013=="Quite a lot of trust", 3,
ifelse(test4$q2013=="Not very much trust", 2,
ifelse(test4$q2013=="No trust at all", 1, NA))))
# to what extent do you trust: Public Security (the police)
# test3$q2014 <- ifelse(test3$q2014=="I trust it to a great extent", 4,
# ifelse(test3$q2014=="I trust it to a medium extent", 3,
# ifelse(test3$q2014=="I trust it to a limited extent", 2,
# ifelse(test3$q2014=="I absolutely do not trust it", 1, NA))))
# to what extent do you trust: The armed forces (the army)
# test3$q2016 <- ifelse(test3$q2016=="I trust it to a great extent", 4,
# ifelse(test3$q2016=="I trust it to a medium extent", 3,
# ifelse(test3$q2016=="I trust it to a limited extent", 2,
# ifelse(test3$q2016=="I absolutely do not trust it", 1, NA))))
# How would you evaluate the performance of the government in carrying out its tasks and duties?
# test3$q2031 <- ifelse(test3$q2031=="Very good", 2,
# ifelse(test3$q2031=="Good", 1,
# ifelse(test3$q2031=="Neither good nor bad", 0,
# ifelse(test3$q2031=="Bad", -1,
# ifelse(test3$q2031=="Very bad", -2, NA)))))
# How would you evaluate the performance of the Parliament in carrying out its tasks and duties?
# test3$q2032 <- ifelse(test3$q2032=="Very good", 2,
# ifelse(test3$q2032=="Good", 1,
# ifelse(test3$q2032=="Neither good nor bad", 0,
# ifelse(test3$q2032=="Bad", -1,
# ifelse(test3$q2032=="Very bad", -2, NA)))))
# How would you evaluate the performance of the judiciary in carrying out its tasks and duties?
# test3$q2033 <- ifelse(test3$q2033=="Very good", 2,
# ifelse(test3$q2033=="Good", 1,
# ifelse(test3$q2033=="Neither good nor bad", 0,
# ifelse(test3$q2033=="Bad", -1,
# ifelse(test3$q2033=="Very bad", -2, NA)))))
# How would you evaluate the performance of the police in carrying out its tasks and duties?
# test3$q2034 <- ifelse(test3$q2034=="Very good", 2,
# ifelse(test3$q2034=="Good", 1,
# ifelse(test3$q2034=="Neither good nor bad", 0,
# ifelse(test3$q2034=="Bad", -1,
# ifelse(test3$q2034=="Very bad", -2, NA)))))
# If you were to evaluate the state of democracy and human rights in your country today, would you say that they are
# test3$q504 <- ifelse(test3$q504=="Very good", 2,
# ifelse(test3$q504=="Good", 1,
# ifelse(test3$q504=="Neither good nor bad", 0,
# ifelse(test3$q504=="Bad", -1,
# ifelse(test3$q504=="Very bad", -2, NA)))))
# In your opinion, to what extent is your country democratic?
test4$q511 <- as.numeric(ifelse(test4$q511=="Complete democracy", 10,
ifelse(test4$q511=="Complete dictatorship", 0,
ifelse(!is.na(as.numeric(test4$q511)), test4$q511, NA))))
# To what extent are you satisfied with the government’s performance?
test4$q513 <- as.numeric(ifelse(test4$q513=="Completely satisfied", 10,
ifelse(test4$q513=="Completely unsatisfied", 0,
ifelse(!is.na(as.numeric(test4$q513)), test4$q513, NA))))
# Democratic regimes are indecisive and full of problems. To what extent do you agree?
test4$q5162 <- ifelse(test4$q5162=="I strongly agree", 2,
ifelse(test4$q5162=="I agree", 1,
ifelse(test4$q5162=="I disagree", -1,
ifelse(test4$q5162=="I strongly disagree", -2, NA))))
### Wave 4 International Relations ###
### No home ownership question in Wave IV
test4$own_a_computer <- ifelse(test4$q1011a=="Yes", 1, 0)
test4$own_a_car <- ifelse(test4$q1011b=="Yes", 1, 0)
## future economic relations btwn palestine/US
test4$q7001 <- ifelse(test4$q7001=="Become stronger than they were in previous years", 1,
ifelse(test4$q7001=="Remain the same as they were in previous years", 0,
ifelse(test4$q7001=="Become weaker than they were in previous years", -1, NA)))
## future economic relations btwn palestine/Saudi
test4$q7002 <- ifelse(test4$q7002=="Become stronger than they were in previous years", 1,
ifelse(test4$q7002=="Remain the same as they were in previous years", 0,
ifelse(test4$q7002=="Become weaker than they were in previous years", -1, NA)))
## influence of US on development of democracy in your country. Not asked in Wave III
# test4$q7011 <- ifelse(test4$q7011=="Very positive", 2,
# ifelse(test4$q7011=="Somewhat positive", 1,
# ifelse(test4$q7011=="Neither positive nor negative", 0,
# ifelse(test4$q7011=="Somewhat negative", -1,
# ifelse(test4$q7011=="Very negative", -2, NA)))))
## do you think (ordinary) americans are generally good people? Slight variation of Wave III
test4$q707 <- ifelse(test4$q707=="Agree", 1,
ifelse(test4$q707=="Disagree", 0, NA))
## American and Western culture have positive aspects? Not asked in Wave III
test4$q708 <- ifelse(test4$q708=="Strongly agree", 2,
ifelse(test4$q708=="Agree", 1,
ifelse(test4$q708=="Disagree", -1,
ifelse(test4$q708=="Strongly disagree", -2, NA))))
## Do you support the two-state Arab-Israeli solution?
test4$q709a <- ifelse(test4$q709a=="Support", 1,
ifelse(test4$q709a=="Oppose", 0, NA))
## Is western infuence an obstacle to reform in your country?
test4$q7114 <- ifelse(test4$q7114=="Agree to a large extent", 2,
ifelse(test4$q7114=="Agree to some extent", 1,
ifelse(test4$q7114=="Don’t agree", -1,
ifelse(test4$q7114=="Don’t agree at all", -2, NA))))
## What country poses the greatest threat to stability in your country? (US dummy)
test4$q714us <- ifelse(test4$q714=="The United States", 1,
ifelse(test4$q714 %in% c("Decline to answer (Do not read)", "Don't know (Do not read)"), NA, 0))
## What country poses the greatest threat to stability in your country? (SA dummy)
test4$q714sa <- ifelse(test4$q714=="Saudi Arabia", 1,
ifelse(test4$q714 %in% c("Decline to answer (Do not read)", "Don't know (Do not read)"), NA, 0))
## What country poses the greatest threat to stability in your country? (ISR dummy)
test4$q714is <- ifelse(test4$q714=="Israel", 1,
ifelse(test4$q714 %in% c("Decline to answer (Do not read)", "Don't know (Do not read)"), NA, 0))
## to what degree you would describe yourself as feeling angry toward the US?
test4$q8341 <- ifelse(test4$q8341=="Very angry", 4,
ifelse(test4$q8341=="Somewhat angry", 3,
ifelse(test4$q8341=="Not very angry", 2,
ifelse(test4$q8341=="Not angry at all", 1, NA))))
#################################################
test3$dmsp_pretrend <- NA
for(i in unique(test3$qid)) {
dmsp <- as.numeric(test3[test3$qid==i, paste0("dmsp", 2007:2012)]@data[1,])
trend <- lm(dmsp ~ c(1:6))
test3$dmsp_pretrend[test3$qid==i] <- trend$coefficients[2]
}
sum(test4$v05aENG %in% test3$v05aENG)
sum(test3$v05aENG %in% test4$v05aENG)
unique(test3$v05aENG[which(!test3$v05aENG %in% test4$v05aENG)])
test3$v05aENG[test3$v05aENG=="Old-City-Nablus"] <- "Nablus City"
# test3$v05aENG[test3$v05aENG=="Al-faraa Refugee"] <- "Al-Faraa Refugee"
# test3$v05aENG[test3$v05aENG=="Askar Refugee"] <- "Askar refugee"
unique(test3$v05aENG[!test3$v05aENG %in% test4$v05aENG])
test3 <- test3[which(test3$v05aENG %in% test4$v05aENG),]
unique(test4$v05aENG[!test4$v05aENG %in% test3$v05aENG])
test4 <- test4[test4$v05aENG %in% test3$v05aENG,]
out_names <- c("age",
"male",
"education",
"married",
"muslim",
"christian",
"income",
"employed",
"own_a_car")
out_labels <- c("Age",
"Male (dummy)",
"Level of Educ., 0=None, 7=MA+",
"Married (dummy)",
"Muslim (dummy)",
"Christian (dummy)",
"Income",
"Employed (dummy)",
"Owns a car (dummy)")
stargazer(test3@data[, out_names], type = "latex", title = "Wave III Demographics (Full Sample)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/demographics_full_wave3.tex"))
stargazer(test3@data[test3$treatment==1, out_names], type = "latex", title = "Wave III Demographics (Treatment Only)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/demographics_treatment_wave3.tex"))
stargazer(test3@data[test3$treatment==0, out_names], type = "latex", title = "Wave III Demographics (Control Only)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/demographics_control_wave3.tex"))
###
out_names <- c("treatment",
"roads",
"dmsp_pretrend",
"viirs2012max",
"population",
"dist_to_city",
"urban",
"rural",
"refugee_camp")
out_labels <- c("Treatment (dummy)",
"# of treatments",
"NTL Pre-trend (DMSP, 2007-12)",
"VIIRS 2012 (max)",
"Population (CIESIN)",
"Distance to nearest city (pop>50k)",
"Urban (dummy)",
"Rural (dummy)",
"Refugee Camp (dummy)")
stargazer(test3@data[, out_names], type = "latex", title = "Wave III Covariates (Full Sample)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/covariates_full_wave3.tex"),
digits.extra = 0)
stargazer(test3@data[test3$treatment==1, out_names], type = "latex", title = "Wave III Covariates (Treatment Only)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/covariates_treatment_wave3.tex"),
digits.extra = 0)
stargazer(test3@data[test3$treatment==0, out_names], type = "latex", title = "Wave III Covariates (Control Only)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/covariates_control_wave3.tex"),
digits.extra = 0)
###
out_names <- c("q7001",
"q7002",
"q7006",
"q700a1",
"q700a2",
"q700a6",
"q706",
"q707",
"q701c")
out_labels <- c("Do you prefer economic relations btwn Pal./US improve",
"Do you prefer economic relations btwn Pal./Saudi improve",
"Do you prefer economic relations btwn Pal./Israel improve",
"Do you prefer security relations btwn Pal./US improve",
"Do you prefer security relations btwn Pal./Saudi improve",
"Do you prefer security relations btwn Pal./Israel improve",
"Do you agree: US interference justifies arms against the US?",
"Americans are generally good people",
"Should foreign aid to your country increase?")
stargazer(test3@data[, out_names], type = "latex", title = "Wave III Survey Questions (Full Sample)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/survey_full_wave3.tex"),
digits.extra = 0)
stargazer(test3@data[test3$treatment==1, out_names], type = "latex", title = "Wave III Survey Questions (Treatment Only)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/survey_treatment_wave3.tex"),
digits.extra = 0)
stargazer(test3@data[test3$treatment==0, out_names], type = "latex", title = "Wave III Survey Questions (Control Only)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/survey_control_wave3.tex"),
digits.extra = 0)
###
# writeOGR(obj = test3[names(test3)!="geometry"], dsn = paste0(box_loc, "/ProcessedData/wave3.geojson"),
# layer = "qid", driver = "GeoJSON")
###
## mapping data
agg_vars <- c("treatment",
"education",
"dist_to_city",
"unemployed",
"dmsp_pretrend",
"q513",
"q7001")
test3_out <- aggregate(test3[, agg_vars], by = list(test3$v05aENG), FUN=mean, na.rm=T)
# writeOGR(obj = test3_out[, names(test3_out)!="geometry"],
# dsn = paste0(box_loc, "/ProcessedData/wave3_villagelevel.geojson"),
# layer = "qid", driver = "GeoJSON")
agg_vars <- c("treatment",
"education",
"unemployed",
"q513",
"q7001")
test4_out <- aggregate(test4[, agg_vars], by = list(test4$v05aENG), FUN=mean, na.rm=T)
# writeOGR(obj = test4_out[, names(test4_out)!="geometry"],
# dsn = paste0(box_loc, "/ProcessedData/wave4_villagelevel.geojson"),
# layer = "qid", driver = "GeoJSON")
#################################################
out_names <- c("age",
"male",
"education",
"married",
"muslim",
"christian",
"employed",
"own_a_car")
out_labels <- c("Age",
"Male (dummy)",
"Level of Educ., 0=None, 7=MA+",
"Married (dummy)",
"Muslim (dummy)",
"Christian (dummy)",
"Employed (dummy)",
"Owns a car (dummy)")
stargazer(test4@data[, out_names], type = "latex", title = "Wave IV Demographics (Full Sample)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/demographics_full_wave4.tex"))
stargazer(test4@data[test4$treatment==1, out_names], type = "latex", title = "Wave IV Demographics (Treatment Only)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/demographics_treatment_wave4.tex"))
stargazer(test4@data[test4$treatment==0, out_names], type = "latex", title = "Wave IV Demographics (Control Only)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/demographics_control_wave4.tex"))
###
out_names <- c("q7001",
"q7002",
"q7011",
"q707",
"q708",
"q709a",
"q7114",
"q714us",
"q714sa",
"q714is",
"q8341")
out_labels <- c("Do you prefer economic relations btwn Pal./US improve",
"Do you prefer economic relations btwn Pal./Saudi improve",
"How does US influence democracy development in Pal.",
"*Ordinary* Americans are generally good people",
"American and Western culture has positive aspects",
"Do you support the two-state Arab-Israeli solution",
"Is western influence an obstacle to reform in Pal.?",
"What country poses greatest threat to your country (US Dummy)",
"What country poses greatest threat to your country (Saudi Dummy)",
"What country poses greatest threat to your country (ISR Dummy)",
"To what degree do you feel angry towards the US?")
stargazer(test4@data[, out_names], type = "latex", title = "Wave IV Survey Questions (Full Sample)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/survey_full_wave4.tex"),
digits.extra = 0)
stargazer(test4@data[test4$treatment==1, out_names], type = "latex", title = "Wave IV Survey Questions (Treatment Only)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/survey_treatment_wave4.tex"),
digits.extra = 0)
stargazer(test4@data[test4$treatment==0, out_names], type = "latex", title = "Wave IV Survey Questions (Control Only)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/survey_control_wave4.tex"),
digits.extra = 0)
###
# writeOGR(obj = test4[names(test4)!="geometry"], dsn = paste0(box_loc, "/ProcessedData/wave4.geojson"),
# layer = "qid", driver = "GeoJSON")
###
out_names <- c("q101",
"q102",
"q2011",
"q2013",
"q511",
"q513",
"q5162")
out_labels <- c("How do you evaluate the current economic state in Pal.?",
"Will the economic situation in Pal. be better in 3-5 yrs.?",
"To what extent do you trust the government (cabinet)?",
"To what extent do you trust the Parliament?",
"To what extent is your country democratic?",
"How satisfied are you with the governments performance?",
"Democratic regimes are indecisive and full of problems. Do you agree?")
stargazer(test3@data[, out_names], type = "latex", title = "Wave III Domestic Governance Survey Questions (Full Sample)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/survey-dg_full_wave3.tex"),
digits.extra = 0)
stargazer(test3@data[test3$treatment==1, out_names], type = "latex", title = "Wave III Domestic Governance Survey Questions (Treatment Only)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/survey-dg_treatment_wave3.tex"),
digits.extra = 0)
stargazer(test3@data[test3$treatment==0, out_names], type = "latex", title = "Wave III Domestic Governance Survey Questions (Control Only)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/survey-dg_control_wave3.tex"),
digits.extra = 0)
stargazer(test4@data[, out_names], type = "latex", title = "Wave IV Domestic Governance Survey Questions (Full Sample)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/survey-dg_full_wave4.tex"),
digits.extra = 0)
stargazer(test4@data[test4$treatment==1, out_names], type = "latex", title = "Wave IV Domestic Governance Survey Questions (Treatment Only)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/survey-dg_treatment_wave4.tex"),
digits.extra = 0)
stargazer(test4@data[test4$treatment==0, out_names], type = "latex", title = "Wave IV Domestic Governance Survey Questions (Control Only)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/survey-dg_control_wave4.tex"),
digits.extra = 0)
#################################################
sum(test3$v05aENG %in% test4$v05aENG)
sum(test4$v05aENG %in% test3$v05aENG)
sum(test3$v04 %in% test4$v04)
sum(test4$v04 %in% test3$v04)
agg_vars <- c("treatment",
"education",
"dist_to_city",
"unemployed",
"dmsp_pretrend",
"population",
"viirs2012max")
test3_out <- aggregate(test3[, agg_vars], by = list(test3$v05aENG), FUN=mean, na.rm=T)
test4 <- merge(test4@data, test3_out[, c("Group.1", "population", "dist_to_city", "dmsp_pretrend", "viirs2012max")],
by.x="v05aENG", by.y="Group.1")
test3$wave4 <- 0
test4$wave4 <- 1
cols <- c("v05aENG",
"v04",
"male",
"age",
"education",
"married",
"muslim",
#"income",
"urban",
"rural",
"refugee_camp",
"unemployed",
"own_a_car",
"population",
"dist_to_city",
"dmsp_pretrend",
"viirs2012max",
"end_dates",
"treatment",
"wave4",
"q7001",
"q7002",
"q7011",
"q707",
#"q708",
#"q709a",
#"q7114",
#"q714us",
#"q714sa",
#"q714is",
#"q8341",
"q101",
"q102",
"q2011",
"q2013",
"q511",
"q513",
"q5162")
full_ab <- rbind(test3@data[, cols], test4[, cols])
names(full_ab)[1:2] <- c("village_name", "district_name")
min_enddate <- sapply(full_ab$end_dates, function(x) min(as.Date(x)), simplify = F)
min_enddate <- do.call("c", min_enddate)
full_ab$EarlyTreat <- ifelse(min_enddate<="2014-03-31", 1, 0)
full_ab$EarlyTreat[min_enddate==min_enddate[998]] <- NA
full_ab <- full_ab[, names(full_ab)!="end_dates"]
write.csv(full_ab, file = paste0(box_loc, "/ProcessedData/merged_ab.csv"), row.names = F)
###
tapply(full_ab, INDEX=list(full_ab$village_name), FUN = function(x) summary(x[, "dmsp_pretrend"]))
test <- aggregate(full_ab[, c("population", "dist_to_city", "dmsp_pretrend", "viirs2012max")], by=list(full_ab$village_name), FUN=sd)
test <- full_ab[full_ab$village_name=="Nablus City",]
test2 <- aggregate(full_ab, by=list(full_ab$village_name), FUN=mean)
test2 <- test2[test2$Group.1 %in% c("Tammoun", "Tayaseer", "Tulkarem", "al-zawya"), ]
View(full_ab[full_ab$village_name=="Tammoun",])
View(full_ab[full_ab$village_name=="Tayaseer",])
###
outcomes <- c("q7001",
"q7002",
#"q7011",
"q707",
#"q708",
#"q709a",
#"q7114",
#"q714us",
#"q714sa",
#"q714is",
#"q8341",
"q101",
"q102",
"q2011",
"q2013",
"q511",
"q513",
"q5162")
covars <- c("male",
"age",
"education",
"married",
"muslim",
#"income",
"urban",
"rural",
"refugee_camp",
"unemployed",
"own_a_car",
"population",
"dist_to_city",
"dmsp_pretrend",
"viirs2012max")
mod <- list()
for(i in 1:length(outcomes)) {
form <- paste0(outcomes[i], " ~ wave4*treatment + ", paste(covars, collapse = " + "), " + factor(v05aENG)")
# mod[i] <- lm(form, data = full_ab[!is.na(full_ab[, outcomes[i]]), ])
assign(paste0("mod", i), lm(form, data = full_ab[!is.na(full_ab[, outcomes[i]]), ]))
}
stargazer(mod1, mod2, type = "html", title = "Regressions", out = "/Users/christianbaehr/Desktop/test.html")
stargazer(test4@data[test4$treatment==0, out_names], type = "latex", title = "Wave IV Domestic Governance Survey Questions (Control Only)",
covariate.labels = out_labels, out = paste0(box_loc, "/Results/survey-dg_control_wave4.tex"),
digits.extra = 0)
names(test3)[names(test3)=="v05aENG"] <- "village_name"
names(test3)[names(test3)=="v04"] <- "district_name"
names(test4)[names(test4)=="v05aENG"] <- "village_name"
names(test4)[names(test4)=="v04"] <- "district_name"
out_names <- c("q7001",
"q7002",
"q7011",
"q707",
"q708",
"q709a",
"q7114",
"q714us",
"q714sa",
"q714is",
"q8341")
out_labels <- c("Do you prefer economic relations btwn Pal./US improve",
"Do you prefer economic relations btwn Pal./Saudi improve",
"How does US influence democracy development in Pal.",
"*Ordinary* Americans are generally good people",
"American and Western culture has positive aspects",
"Do you support the two-state Arab-Israeli solution",
"Is western influence an obstacle to reform in Pal.?",
"What country poses greatest threat to your country (US Dummy)",
"What country poses greatest threat to your country (Saudi Dummy)",
"What country poses greatest threat to your country (ISR Dummy)",
"To what degree do you feel angry towards the US?")
out_names <- c("q101",
"q102",
"q2011",
"q2013",
"q511",
"q513",
"q5162")
out_labels <- c("How do you evaluate the current economic state in Pal.?",
"Will the economic situation in Pal. be better in 3-5 yrs.?",
"To what extent do you trust the government (cabinet)?",
"To what extent do you trust the Parliament?",
"To what extent is your country democratic?",
"How satisfied are you with the governments performance?",
"Democratic regimes are indecisive and full of problems. Do you agree?")
full_ab <- merge()
|
385091fc4f480a4e8a6b35f1dcc8af9588e039ad
|
2f169d6f13ce556633c34f7b2378a201678e7ddf
|
/r_scripts/clim_and_veg/pww_veg_and_clim.r
|
05220b8256a9baacb606ff06319b01ff51417929
|
[] |
no_license
|
treelover/climate
|
6e10dd89f12cab4cdccd62e1f796ef4409694d76
|
80cff9b45b335fb71d36409b2ef22f5cbfed3d35
|
refs/heads/master
| 2021-01-20T00:40:00.468950
| 2015-07-10T02:46:32
| 2015-07-10T02:46:32
| 38,344,025
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,072
|
r
|
pww_veg_and_clim.r
|
#This code reads in the veg data for each site, the climate data for each site,
#and computes a 6 month average of various climate metrics for each plant.
#Libraries and functions
#---------------------------------------#
source('/home/adam/Dropbox/sapflow/r_scripts/fun/climread.r')
source('/home/adam/Dropbox/sapflow/r_scripts/fun/read_lau_2010.r')
source('/home/adam/Dropbox/sapflow/r_scripts/fun/read_lau_2011.r')
source('/home/adam/Dropbox/sapflow/r_scripts/fun/read_lau_2012.r')
source('/home/adam/Dropbox/sapflow/r_scripts/fun/read_palam_2010.r')
source('/home/adam/Dropbox/sapflow/r_scripts/fun/read_pww_2013.r')
library(WriteXLS)
#---------------------------------------#
#Directories
#---------------------------------------#
cdir = '/home/adam/Dropbox/sapflow/clim_screened/'
dates = '20140404'
#---------------------------------------#
#Interval over which to aggregate data
#BECAUSE THE CLIMATE DATA DOES NOT GO BACK SIX MONTHS, SET THE PERIOD AT 118 DAYS, WHICH IS THE AMOUNT OF TIME
#WE HAVE DATA FOR
#---------------------------------------#
per = 60*60*24*110
#---------------------------------------#
#Percentage of data that can still be NA and give a valid number
#---------------------------------------#
nathresh = 0.8
#---------------------------------------#
#Load up the climate data
#---------------------------------------#
load(paste(cdir,'all_stations_',dates,'.Rdat',sep=''))
#---------------------------------------#
#PUUWAAWAA
#---------------------------------------#
#Pulling out the climate data for this location
clim = slist[['pww']]
#Veg data
vdir = '/home/adam/Dropbox/sapflow/veg/pww/'
veg13 = read_pww_2013(paste(vdir,'pww_resurvey_2013_v02_clim_EDITED.csv',sep=''))
veg14 = read_pww_2013(paste(vdir,'pww_resurvey_2014_v01_clim_EDITED.csv',sep=''))
#Getting rid of entries where there's "no growth", or rather growth could not be calculated
veg13 = veg13[-which(veg13$Growth. == 'no'),]
veg14 = veg14[-which(veg14$Growth. == 'no'),]
#Identifying trees common to 2011 and 2010
vegmatch = match(veg14$TAG_1,veg13$TAG_2)
vegmatch = vegmatch[-which(is.na(vegmatch) == T)]
#ID of trees that appear in all datasets:
treeID = sort(veg13$TAG_2[vegmatch])
#Ordering the datasets to match each other
veg13 = veg13[match(treeID,veg13$TAG_2),]
veg14 = veg14[match(treeID,veg14$TAG_1),]
#Filling in a data frame with the PWW vegetation data
#...........................................................#
veg = as.data.frame(matrix(NA,nrow = length(treeID),ncol = 14))
colnames(veg) = c('tag','site','species','dbh','survey_2010','survey_2011','survey_2012','survey_2013','survey_2014','growth_2010','growth_2011','growth_2012','growth_2013','growth_2014')
veg[,'tag'] = treeID
veg[,'site'] = rep('pww',length(treeID))
veg[,'species'] = veg13$SPECIES_2
veg[,'dbh'] = veg14$DBH_2
veg$survey_2013 = veg13$DATE_2
veg$survey_2014 = veg14$DATE_2
veg$growth_2013 = veg13$cm.day
veg$growth_2014 = veg14$cm.day
#...........................................................#
#Filling in a data frame with the Palamanui climate data
#...........................................................#
vegclim = as.data.frame(matrix(NA,nrow = length(treeID),ncol = 40))
colnames(vegclim) = c('Tave_2010','Tave_2011','Tave_2012','Tave_2013','Tave_2014',
'Tmin_2010','Tmin_2011','Tmin_2012','Tmin_2013','Tmin_2014',
'Tmax_2010','Tmax_2011','Tmax_2012','Tmax_2013','Tmax_2014',
'RF_2010','RF_2011','RF_2012','RF_2013','RF_2014',
'Cld_2010','Cld_2011','Cld_2012','Cld_2013','Cld_2014',
'SW_2010','SW_2011','SW_2012','SW_2013','SW_2014',
'SM_2010','SM_2011','SM_2012','SM_2013','SM_2014',
'WS_2010','WS_2011','WS_2012','WS_2013','WS_2014')
#Factor of days and a vector of unique days
dayfac = substr(clim$yyyy.mm.dd.hh.mm,1,10)
dayuni = unique(substr(clim$yyyy.mm.dd.hh.mm,1,10))
#The climate variables
Tair = rowMeans(clim[,c('Tair_1','Tair_2')],na.rm=T)
RF = clim$RF
SW = clim$SWup
SM = rowMeans(cbind(clim$SM_1,clim$SM_2,clim$SM_3),na.rm=T)
WS = clim$WS
daymins = tapply(Tair,dayfac,min)
daymaxs = tapply(Tair,dayfac,max)
cld = clim$Cloud
dayID = which(clim$DayFlag == 1)
for(i in 1:nrow(veg)){
win2013 = which(clim$yyyy.mm.dd.hh.mm == as.POSIXct(paste(veg$survey_2013[i],'00:00:00')))
win2013 = which(clim$yyyy.mm.dd.hh.mm == as.POSIXct(paste(veg$survey_2013[i],'00:00:00'))-per):win2013
win2014 = which(clim$yyyy.mm.dd.hh.mm == as.POSIXct(paste(veg$survey_2014[i],'00:00:00')))
win2014 = which(clim$yyyy.mm.dd.hh.mm == as.POSIXct(paste(veg$survey_2014[i],'00:00:00'))-per):win2014
#Average Temperature
if(length(which(is.na(Tair[win2013])))/length(win2013) < nathresh){vegclim$Tave_2013[i] = mean(Tair[win2013],na.rm=T)} #Tave 2013
if(length(which(is.na(Tair[win2014])))/length(win2014) < nathresh){vegclim$Tave_2014[i] = mean(Tair[win2014],na.rm=T)} #Tave 2014
#Rainfall
if(length(which(is.na(RF[win2013])))/length(win2013) < nathresh){vegclim$RF_2013[i] = sum(RF[win2013],na.rm=T)/(length(win2013)/(6*24))} #RF 2013
if(length(which(is.na(RF[win2014])))/length(win2014) < nathresh){vegclim$RF_2014[i] = sum(RF[win2014],na.rm=T)/(length(win2014)/(6*24))} #RF 2014
#Shortwave radiation
if(length(which(is.na(SW[win2013])))/length(win2013) < nathresh){vegclim$SW_2013[i] = mean(SW[win2013],na.rm=T)} #SW 2013
if(length(which(is.na(SW[win2014])))/length(win2014) < nathresh){vegclim$SW_2014[i] = mean(SW[win2014],na.rm=T)} #SW 2014
#Soil Moisture
if(length(which(is.na(SM[win2013])))/length(win2013) < nathresh){vegclim$SM_2013[i] = mean(SM[win2013],na.rm=T)} #SM 2013
if(length(which(is.na(SM[win2014])))/length(win2014) < nathresh){vegclim$SM_2014[i] = mean(SM[win2014],na.rm=T)} #SM 2014
#Windspeed
if(length(which(is.na(WS[win2013])))/length(win2013) < nathresh){vegclim$WS_2013[i] = mean(WS[win2013],na.rm=T)} #WS 2013
if(length(which(is.na(WS[win2014])))/length(win2014) < nathresh){vegclim$WS_2014[i] = mean(WS[win2014],na.rm=T)} #WS 2014
#Now the tricky ones: Minimum and maximum temperature
#+=+=+=+=+=+=+$
#2013
if(length(which(is.na(Tair[win2013])))/length(win2013) < nathresh){
vegclim$Tmin_2013[i] = mean(daymins[which(dayuni == (veg$survey_2013[i]-per)):which(dayuni == veg$survey_2013[i])],na.rm=T)
vegclim$Tmax_2013[i] = mean(daymaxs[which(dayuni == (veg$survey_2013[i]-per)):which(dayuni == veg$survey_2013[i])],na.rm=T)}
#2014
if(length(which(is.na(Tair[win2014])))/length(win2014) < nathresh){
vegclim$Tmin_2014[i] = mean(daymins[which(dayuni == (veg$survey_2014[i]-per)):which(dayuni == veg$survey_2014[i])],na.rm=T)
vegclim$Tmax_2014[i] = mean(daymaxs[which(dayuni == (veg$survey_2014[i]-per)):which(dayuni == veg$survey_2014[i])],na.rm=T)}
#+=+=+=+=+=+=+$
#Cloudiness index
#+=+=+=+=+=+=+$
#Only taking the entries in the window that correspond to day
cldwin2013 = dayID[dayID %in% win2013]
cldwin2014 = dayID[dayID %in% win2014]
if(length(which(is.na(cld[cldwin2013])))/length(cldwin2013) < nathresh){vegclim$Cld_2013[i] = mean(cld[cldwin2013],na.rm=T)} #cld 2013
if(length(which(is.na(cld[cldwin2014])))/length(cldwin2014) < nathresh){vegclim$Cld_2014[i] = mean(cld[cldwin2014],na.rm=T)} #cld 2014
#+=+=+=+=+=+=+$
if(i/100 == round(i/100,0)){print(paste(i,'trees done'))}
}
#...........................................................#
#PWW DONE! STICK THE CLIM MATRIX TO THE VEG MATRIX
pwwmasta = cbind(veg,vegclim)
#write it out for its own self
WriteXLS(x = 'pwwmasta',ExcelFileName = paste(vdir,'Pww_master_climveg_6mo.xls',sep=''),AdjWidth = T)
#============================================================#
#------------------------------------------------------------#
#............................................................#
#------------------------------------------------------------#
#============================================================#
|
c7475eaad3d7c723fc7c629f333c8961aadf07f9
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Sampling_Regression_Experiment_Design_and_Analysis/mink.r
|
a5e3b974e87aeb6fc4cf010a43288b486f38f61a
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 4,516
|
r
|
mink.r
|
# Mink pelts in Saskatchewan
# Dealing with Autocorrelation in time series
# 2014-05-05 CJS First edition
# L.B. Keith (1963) collected information on the number of
# mink-pelts from Saskatchewan, Canada over a 30 year period.
# This is data series 3707 in the
# NERC Centre for Population Biology, Imperial College (1999)
# The Global Population Dynamics Database available at
# \url{http://www.sw.ic.ac.uk/cpb/cpb/gpdd.html}.
options(useFancyQuotes=FALSE) # renders summary output corrects
library(car)
library(ggplot2)
library(gridExtra)
library(Kendall)
library(lmtest)
library(lsmeans)
library(nlme)
library(plyr)
source("../../schwarz.functions.r")
# Read in the data. Declare Epoch as a factor. Remove data points when location changed in a year
sink('mink-R-010.txt', split=TRUE)
##---part010b;
mink <- read.csv("mink.csv", header=TRUE,
as.is=TRUE, strip.white=TRUE,
na.string=".")
mink$logPelts <- log(mink$Pelts)
head(mink)
str(mink)
##---part010e;
sink()
# make an initial plot of the data
# Notice how we specify a different plotting symbol for each Epoch.
##---partprelimplotb;
plotprelim <- ggplot(data=mink, aes(x=Year, y=logPelts))+
ggtitle("Mink Pelts over time")+
xlab("Year")+ylab("log(Mink Pelts)")+
geom_point(size=4)+geom_line()
plotprelim
##---partprelimplote;
ggsave(plot=plotprelim, file='mink-R-prelimplot.png')
# Using Base R graphics
with(mink, plot(Year, logPelts, type="b",
main='Mink Pelts over time',
xlab='Year',ylab="log(Mink Pelts) ") )
sink('mink-R-regfit.txt', split=TRUE)
##---partregfitb;
# Fit the regression line to the log(Pelts)
mink.fit <- lm( logPelts ~ Year, data=mink)
summary(mink.fit)
##---partregfite;
sink()
##---partresidlagb;
# Look at residual plot over time
resid <- data.frame(resid=resid(mink.fit),Year=mink$Year)
resid$lagresid <- c(NA, resid$resid[1:(length(resid$resid)-1)])
residtimeplot <- ggplot(data=resid, aes(x=Year, y=resid))+
ggtitle("Time plot of residuals from a simple linear fit")+
geom_point()+
geom_line()+
geom_hline(yintercept=0)
residtimeplot
residlagplot <- ggplot(data=resid, aes(x=lagresid, y=resid))+
ggtitle("Lag plot of residuals")+
geom_point()
residlagplot
##---partresidlage;
ggsave(plot=residtimeplot, file='mink-R-residtimeplot.png')
ggsave(plot=residlagplot, file='mink-R-residlagplot.png')
##---partdiagplotb;
# look at diagnostic plot
plotdiag <- sf.autoplot.lm(mink.fit)
plotdiag
##---partdiagplote;
ggsave(plot=plotdiag, file='mink-R-diagplot.png')
# Get the diagnostic plots using Base R graphics
layout(matrix(1:4,2,2))
plot(mink.fit)
layout(1)
sink("mink-R-dwtest.txt", split=TRUE)
##---partdwtestb;
# check for autocorrelation using Durbin-Watson test.
# You can use the durbinWatsontest in the car package or the
# dwtest in the lmtest package
# For small sample sizes both are fine; for larger sample sizes use the lmtest package
# Note the difference in the default direction of the alternative hypothesis
durbinWatsonTest(mink.fit) # from the car package
dwtest(mink.fit) # from the lmtest package
##---partdwteste;
sink()
sink('mink-R-ar1.txt', split=TRUE)
##---partar1b;
# Fit a model that allows for autocorrelation using gls
mink.fit.ar1 <- gls(logPelts ~ Year, data=mink,
correlation=corAR1(form=~1))
summary(mink.fit.ar1)
anova(mink.fit.ar1)
# Fit a model that allows for autocorrelation using ARIMA
mink.fit.arima <- with(mink, arima(logPelts, xreg=Year, order=c(1,0,0)))
mink.fit.arima
##---partar1e;
sink()
sink("mink-R-fitpieces.txt", split=TRUE)
##---partfitpiecesb;
# Extract the individual parts of the fit using the
# standard methods. Note that because Epoch is a factor
# DO NOT USE THE ESTIMATES from the summary() to estimate
# the Epoch effect because these estimates depend on the
# internal parameterization used. Use the lsmeans() function instead
summary(mink.fit.ar1)
coef(mink.fit.ar1)
sqrt(diag(vcov(mink.fit.ar1))) # gives the SE
confint(mink.fit.ar1)
names(summary(mink.fit.ar1))
summary(mink.fit.ar1)$r.squared
summary(mink.fit.ar1)$sigma
##---partfitpiecese;
sink()
# predictions are now much more complex because
# of the autocorrelation. The general problem is that
# the autocorrelation structure implies that predictions at
# a particular year must incorporate the residuals from
# the previous years. Of course, this influence will diminish
# once you get further and further past the end of the series.
#
# Contact me for details
|
68ef630e18013a1697557cfb290e11a47375f152
|
3da3704c5e87255697f75a5ec38effd2e8480ce6
|
/Kaggle/Yelp_2013/DataPreprocessing/2013_yelp_ryelp_review.R
|
49fc4b167fc941af8c47a162c0238c1123bb7803
|
[
"Apache-2.0"
] |
permissive
|
chaoannricardo/StudyNotes
|
4e83a721ab63a61a12e238136061d312e13b81ea
|
c27ef93ddd3803b7019c48457afae096a5c676a4
|
refs/heads/master
| 2022-12-25T01:04:18.184661
| 2022-03-20T14:47:39
| 2022-03-20T14:47:39
| 196,901,650
| 2
| 1
|
Apache-2.0
| 2022-12-14T12:06:30
| 2019-07-15T01:20:23
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 2,925
|
r
|
2013_yelp_ryelp_review.R
|
# yelp yelp_training_set_review.json===============================================
library(jsonlite)
review_2013 <- stream_in(file("C:/Users/Student/Google_Drive/BDSE10_Project/DataSets/yelp-recsys-2013/yelp_training_set/yelp_training_set/yelp_training_set_review.json"))
data.c <- review_2013[,c("votes")]
funny <- data.c[,1]
useful <- data.c[,2]
cool <- data.c[,3]
data.n <- c(funny + useful + cool)
funny <- (data.c[,1] / data.n)
useful <- (data.c[,2] / data.n)
cool <- (data.c[,3] / data.n)
data.c <- as.data.frame(cbind(funny, useful, cool))
data.c[is.na(data.c)] <- 0
data.k <-review_2013[,-c(1,ncol(data))]
data_clean1 <- cbind(data.c, data.k)
# Delete "text" column
data_clean1 <- data_clean1[,-8]
# Delete "type" column
data_clean1 <- data_clean1[,-8]
# Extract "Year' & "Month"
library(data.table)
setDT(data_clean1)[, Year := format(as.Date(data_clean1$date), "%Y") ]
setDT(data_clean1)[, Month := format(as.Date(data_clean1$date), "%m") ]
# Flatten "Year" & "Month"
library("nnet")
flatten = as.data.frame(class.ind(data_clean1$Year))
colnames(flatten) <- paste("Year-", colnames(flatten), sep = "")
data_clean1 <- cbind(data_clean1, flatten)
flatten = as.data.frame(class.ind(data_clean1$Month))
colnames(flatten) <- paste("Month-", colnames(flatten), sep = "")
data_clean1 <- cbind(data_clean1, flatten)
# Delete "date"
data_clean1 <- data_clean1[,-7]
data_clean1 <- data_clean1[,-8]
data_clean1 <- data_clean1[,-8]
# Rearrage Column
data_user_id <- data_clean1[,4]
data_review_id <- data_clean1[,5]
data_business_id <- data_clean1[,7]
data_clean1 <- cbind(data_review_id, data_clean1)
data_clean1 <- cbind(data_user_id, data_clean1)
data_clean1 <- cbind(data_business_id, data_clean1)
# Delete Unused Columns
data_clean1 <- data_clean1[,-7]
data_clean1 <- data_clean1[,-7]
data_clean1 <- data_clean1[,-8]
write.csv(data_clean1, "C:/Users/richi/Google_Drive/BDSE10_Project/CleanDataSets/yelp-recsys-2013/training_review_clean_190513.csv", row.names = FALSE)
# yelp yelp_test_set_review.json===============================================
library(jsonlite)
review_2013 <- stream_in(file("C:/Users/Student/Google_Drive/BDSE10_Project/DataSets/yelp-recsys-2013/yelp_test_set/yelp_test_set/yelp_test_set_review.json"))
data_clean1 <- review_2013
write.csv(data_clean1, "C:/Users/Student/Google_Drive/BDSE10_Project/CleanDataSets/yelp-recsys-2013/Test_review_clean.csv", row.names = FALSE)
# yelp yelp_finaltest_set_review.json===============================================
library(jsonlite)
review_2013 <- stream_in(file("C:/Users/Student/Google_Drive/BDSE10_Project/DataSets/yelp-recsys-2013/final_test_set/final_test_set/final_test_set_review.json"))
data_clean1 <- review_2013
write.csv(data_clean1, "C:/Users/Student/Google_Drive/BDSE10_Project/CleanDataSets/yelp-recsys-2013/finalTest_review_clean.csv", row.names = FALSE)
|
608a0d562c0b2862b0e724164510b4cd6892533c
|
2e15bafa9ef722b0c057f2cfb819059f017425f4
|
/Codes/Ubuntu/MergeDataSets.r
|
1dd36b3924978c10c5d63877c317e4dde94b2a9e
|
[] |
no_license
|
vivanreddy/DataWranglingWithR
|
50493838d5378cb1fc9051faf212ff032722c5ea
|
b460ca6846f7dd7742fc44513a06afc21ff10a90
|
refs/heads/master
| 2021-09-03T21:28:18.244796
| 2018-01-12T05:01:21
| 2018-01-12T05:01:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 420
|
r
|
MergeDataSets.r
|
# Merging Two Data Frames
library(dplyr)
df1 = data.frame(id = sample(1:10), x = rnorm(10))
df1
df2 = data.frame(id = sample(1:10), y = rnorm(10))
df2
arrange(join(df1,df2), id)
# If having multiple Data Frames
df1 = data.frame(id = sample(1:10), x = rnorm(10))
df2 = data.frame(id = sample(1:10), y = rnorm(10))
df3 = data.frame(id = sample(1:10), z = rnorm(10))
dfList = list(df1, df2, df3)
dfList
join_all(dfList)
|
16d150c864bb53385c8489726b9ef79becc9463c
|
d1b8170deac8cb349411a7e53f05976501ef98e4
|
/my_script/exon_length/calculate_exon_lenght.R
|
8f95e5f3224be0d797f084dfe8c52cc5943b57a2
|
[] |
no_license
|
x2yline/coursera_notes
|
c62f022393d8cb778d5b0ae2ee56e27235a4b57f
|
b8951c4231247362f1fb8799e7f0e7137a1e8404
|
refs/heads/master
| 2021-01-12T05:26:03.055869
| 2017-05-21T10:57:25
| 2017-05-21T10:57:25
| 77,927,217
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,077
|
r
|
calculate_exon_lenght.R
|
setwd('E:\\r\\biotrainee_demo1')
t1 <- Sys.time()
directory = 'CCDS.current.txt'
# 读取数据并提取第1列和第10列
data <- read.table(directory, sep='\t',
stringsAsFactors=F, header=T)[c(1,10)]
get_gene <-function(data_item){
# 用apply执行该函数
# 输入的数据为仅含原始数据第1列和第10列的dataframe
# 输出的数据为c('111-112, 115-135, 125-138', '254-258',...)
if (!data_item[2] =='-'){
exon_ranges <- data_item[2]
exon_ranges <- substr(exon_ranges, start=2, stop=nchar(exon_ranges)-1)
}
}
get_exon <- function(gene){
# 输入的数据为c('111-112, 115-135, 125-138, 254-258,...')
# 输出的数据为c('111-112','115-135', '125-138', '254-258', ...)
exon <- unique(strsplit(gene,", ")[[1]])# 注:strsplit的输出结果为列表
}
get_length <- function(exon){
# 输入的数据为lapply(c('111-112','115-135', '125-138', '254-258', ...),fun)
# 输出结果为两坐标值和左右两坐标之差
loc <- strsplit(exon,"-")[[1]]
a <- c(as.numeric(loc[1]), as.numeric(loc[2])-as.numeric(loc[1]), as.numeric(loc[2]))
#if (a==0){
#print(loc)
#}
a
}
exon_length = 0
for (i in unique(data[,1])){
# paste 函数把i号染色体的所有外显子的坐标合并为一个character对象
# gene_i的格式为'111-112, 115-135, 125-138, 254-258,...'
gene_i <- paste(apply(data[which(data[1]==i & data[2] != '-'),], 1, get_gene),collapse=', ')
# exon_i的格式为c('111-112','115-135', '125-138', '254-258', ...)
exon_i <- lapply(get_exon(gene_i), get_length)
mat <- matrix(unlist(exon_i), ncol=3, byrow = T)
#mat <- mat[order(mat[,2], decreasing = F),]
#mat <- mat[order(mat[,1], decreasing = F),]
# 使用matrix 是因为vector太长会报错
#R memory management / cannot allocate vector of size n MB
base_loc <- matrix(unique(unlist(apply(mat, 1, function(x) c(x[1]:x[3])))))
exon_length <- exon_length + dim(base_loc)[1] * dim(base_loc)[2]
}
# 消耗的时间
difftime(Sys.time(), t1, units = 'secs')
print(paste('all exons length is',exon_length))
|
9555df9ffc65c04bdd419b3b9a7138fd898e31ce
|
8e67d42d194ef6a3783158f20281135bf42821ee
|
/week5/lecture/t-test.R
|
7e418cbd430bb382f3116b9d76c3bad945a46b65
|
[] |
no_license
|
Mela2014/coursera-course-statistics_one
|
b6db95df6c7ef09c5cf75a8278d3d3eea48cf51e
|
122dec0a8922a0a0808fe50ed7ca75162d6282c9
|
refs/heads/master
| 2021-01-16T17:59:14.571445
| 2012-10-17T11:33:15
| 2012-10-17T11:33:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 266
|
r
|
t-test.R
|
library(psych)
# Read data into R
wm <- read.delim("./supplemental-stats1_ex07.txt")
# Descriptive statistics
describeBy(wm, wm$cond)
wm.c <- subset(wm, wm$train == "0")
wm.t <- subset(wm, wm$train == "1")
wm.c.out <- describe(wm.c)
wm.t.out <- describe(wm.t)
|
4e3af7f916e45c320c4fa5820c62204b7d0334a2
|
d2415986080d77471c4216419d93067729dd3a5e
|
/1-curso/trabalhos/grupo-idat.R
|
5eef7f4ab6d74be8089025e1cc032f55f42d51d6
|
[
"MIT"
] |
permissive
|
henriquepgomide/r-ciencias-saude
|
c0d18fa053017720923016f52b4cf8415a703707
|
65bbab57b17b50b1e550273ac19c656d25b90a49
|
refs/heads/master
| 2016-09-06T06:57:28.420219
| 2014-10-08T20:59:21
| 2014-10-08T20:59:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,801
|
r
|
grupo-idat.R
|
# Escala IDAT
##--- Importando Banco de Dados ---##
# Para importar o banco de dados, vá na planilha dos arquivos, escolha Arquivo -> Download -> arquivo csv.
# Digite o número de colunas do seu questionário
ncol <- 32
# Escolha o arquivo baixado
idatBruto <- read.csv(file.choose(), header=TRUE, fill = TRUE, col.names = paste0("v", seq_len(ncol)), na.strings = c("NA",""), encoding = "UTF8")
## Limpando a caquinha de ter editado o banco original no formulário docs
# Remover casos desnecessários. Casos maiores que 36 não existem, erros de importação devido a fórmula usada na coluna 32. Coluna 32 retirada porque o R will do the job for us below!
idat <- idatBruto[1:36, 1:31]
#---- Soma do IDAT ----
idat$somaEscala <- idat$v12 + idat$v13 + idat$v14 + idat$v15 + idat$v16 + idat$v17 + idat$v18 + idat$v19 + idat$v20 + idat$v21 + idat$v22 + idat$v23 + idat$v24 + idat$v25 + idat$v26 + idat$v27 + idat$v28 + idat$v29 + idat$v30 + idat$v31
#---- Recodificação segundo critério de Spilberg, rs ----
# SPIELBERGER (1979):
# < 33 baixo
# 33-49 médio
# >49 alto
idat$trAnsiedade[idat$somaEscala < 33] <- "baixo"
idat$trAnsiedade[idat$somaEscala >= 33 & idat$somaEscala <= 49 ] <- "médio"
idat$trAnsiedade[idat$somaEscala > 49 ] <- "alto"
# Em word, salvo em pdf
# 0 - Faça um guia das variáveis do banco com os valores possíveis segundo este exemplo - https://www.dropbox.com/s/2lccmbyqyscdn30/descricao-banco-auto-estima.pdf
# Script do R
# 1 - Qual a frequência de distribuição de pessoas por traços de ansiedade?
# 2 - Qual a medida de tendência central mais adequada para reportar a soma da escala (idat$somaEscala)?
# 4 - Qual gráfico você utilizaria para comparar os escores da escala (idat$somaEscala) por grupo que usa e não usa suplemento (idat$v9)?
|
4f9947a0bb603bba64729d89bc84b11407648127
|
7e38971daf48e04414cb0c65facd38c98648142b
|
/R/cleanAQRBABMonthly.R
|
4af319b32ba34c6fc96e087f0b88dc3bf73d2941
|
[] |
no_license
|
yn1/FFAQR
|
338800ec7209a10b33d79630ae2c082d7df1f22c
|
c173309a9415f9d83130650b8c7e4f5132fabdf4
|
refs/heads/master
| 2021-01-02T23:02:57.359761
| 2015-02-06T21:49:02
| 2015-02-06T21:49:02
| 29,973,189
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,333
|
r
|
cleanAQRBABMonthly.R
|
#' Reads in, cleans, and subdivides monthly BAB data set in data folder.
cleanAQRBABMonthly <- function() {
temp <- tempfile()
BABMonthly <- "https://www.aqr.com/~/media/files/data-sets/betting-against-beta-equity-factors-monthly.xlsx"
download.file(BABMonthly, temp, method = "curl")
# Imports equity QMJ data
AQRBABFactorsMonthly <- read.xlsx(temp, "BAB Factors", startRow=19, colIndex=c(1:25))
row.names(AQRBABFactorsMonthly) <- NULL
names(AQRBABFactorsMonthly)[1] <- "Date"
x1 <- findBreak(AQRBABFactorsMonthly, 1000, "NA") - 1
AQRBABFactorsMonthly[,1] <- ymd(AQRBABFactorsMonthly[,1])
AQRBABFactorsMonthly <- AQRBABFactorsMonthly[(1:x1),]
# Imports aggregate equity portfolios
AQRBABPortfoliosMonthly <- read.xlsx(temp, "BAB Factors", startRow=19, colIndex=c(1,26:30))
row.names(AQRBABPortfoliosMonthly) <- NULL
names(AQRBABPortfoliosMonthly)[1] <- "Date"
x1 <- findBreak(AQRBABPortfoliosMonthly, 1000, "NA") - 1
AQRBABPortfoliosMonthly[,1] <- ymd(AQRBABPortfoliosMonthly[,1])
AQRBABPortfoliosMonthly <- AQRBABPortfoliosMonthly[(1:x1),]
unlink(temp)
start <- system.file(package="FFAQR")
save(AQRBABFactorsMonthly, file=paste0(start, "/data/AQRBABFactorsMonthly.Rdata"))
save(AQRBABPortfoliosMonthly, file=paste0(start, "/data/AQRBABPortfoliosMonthly.Rdata"))
}
|
35bcedee16d6bf39a64bcd371780059b481b3fde
|
2b0fc0d1e74d09172e43d2c384e3188c3bf8e0a4
|
/R/readxrf.R
|
d46d5494eddca9e62f5f2f427656df28766ed784
|
[
"MIT"
] |
permissive
|
agryt/xrfr
|
86a8afa3b70548091e0172c85ac684aa6cee08d5
|
2bfa6f87946b00a7776f9f1dd9aaeadec3083e0c
|
refs/heads/master
| 2023-04-26T06:54:54.359548
| 2021-05-11T09:54:18
| 2021-05-11T09:54:18
| 317,477,531
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,231
|
r
|
readxrf.R
|
#' Reading and joining data file and info file
#'
#' @description This function reads your two data frames and combines them into one. It will warn you if there are any samples that do not match between your datasets (the raw data and the information file).
#'
#' See vignette("xrfr") for more information.
#'
#' @return description The function creates a data frame where your raw data and project information has been merged.
#'
#' @param raw_data The name of your data frame with raw data from the XRF machine.
#' @param project_info The name of your data frame with necessary information about the samples.
#'
#' @importFrom dplyr inner_join anti_join select contains rename_all
#' @importFrom stringr str_remove
#' @importFrom magrittr %>%
#'
#' @examples
#' \dontrun{
#' rawdata.df <- read_delim("xrf_rawdata.txt", delim = "\t", locale = locale(decimal_mark = ","))
#' projectinfo.df <- read_excel("xrf_projectinfo.xlsx")
#'
#' projectfile.df <- readxrf(raw_data = rawdata.df, project_info = projectinfo.df)
#' }
#'
#' @export
readxrf <- function(raw_data, project_info) {
# import data file
datafile.df <- as.data.frame(raw_data)
datafile.df <- datafile.df %>%
dplyr::select(c(Sample, Date, dplyr::contains("Int"))) %>%
dplyr::rename_all(stringr::str_remove, pattern = " .*")
# import info file
infofile.df <- as.data.frame(project_info)
# assigning error messages for if not all necessary columns are included
if(!"Filter_type" %in% names(infofile.df)) {
stop("ERROR! Your project information file is missing one or more of the following columns: Filter_type, Filter_size, Filter_box_nr, Filter_blank, and Volume.")
}
if(!"Filter_size" %in% names(infofile.df)) {
stop("ERROR! Your project information file is missing one or more of the following columns: Filter_type, Filter_size, Filter_box_nr, Filter_blank, and Volume.")
}
if(!"Filter_box_nr" %in% names(infofile.df)) {
stop("ERROR! Your project information file is missing one or more of the following columns: Filter_type, Filter_size, Filter_box_nr, Filter_blank, and Volume.")
}
if(!"Filter_blank" %in% names(infofile.df)) {
stop("ERROR! Your project information file is missing one or more of the following columns: Filter_type, Filter_size, Filter_box_nr, Filter_blank, and Volume.")
}
if(!"Volume" %in% names(infofile.df)) {
stop("ERROR! Your project information file is missing one or more of the following columns: Filter_type, Filter_size, Filter_box_nr, Filter_blank, and Volume.")
}
if(is.numeric(infofile.df$Volume) == FALSE) {
stop("ERROR! The column Volume in your project information file is not numeric. Make sure this column only contains numerical digits.")
}
# joining them into one data frame
projectfile.df <- dplyr::inner_join(datafile.df, infofile.df, by = "Sample")
# making a data frame of samples that did not match, should be 0 rows here
notinprojectfile.df <- dplyr::anti_join(datafile.df, infofile.df, by = "Sample")
# assigning warning message for if not all samples match
if(nrow(notinprojectfile.df) > 0) {
warning("WARNING! There are samples that do not match between your raw data file and information file.")
}
return(projectfile.df)
}
|
50d68111c67e0804eaba8e32b838d163f7f7059b
|
97dc93dfc0362cedeb8d28501def35308a6d48d3
|
/R/get_weather_all.R
|
df7ccbb8429577af552b124ffc034bb7290102a7
|
[] |
no_license
|
everdark/ggplot2_lecture_dsc2015
|
dbaa048ffcfe589b6db8f3c87b629b60f4c3154c
|
358477a5640eb240f7e28ceece1c4749855af287
|
refs/heads/master
| 2016-09-09T21:11:05.603583
| 2015-08-19T14:21:41
| 2015-08-19T14:21:41
| 38,595,117
| 2
| 7
| null | 2015-08-18T11:23:43
| 2015-07-06T03:16:38
|
HTML
|
UTF-8
|
R
| false
| false
| 1,259
|
r
|
get_weather_all.R
|
get_weather_all = function() {
filepaths <- list.files("data", pattern = "[a-zA-Z]_....-...csv", full.names = TRUE)
locations <- gsub("data\\/|_....-..\\.csv", "", filepaths)
weather_all <- NULL
for (i in 1:length(filepaths)) {
temp <- read.csv(filepaths[i], stringsAsFactors = FALSE)
temp$location <- locations[i]
weather_all <- rbind(weather_all, temp)
}
weather_all$location <- factor(weather_all$location)
weather_all$date <- as.Date(substr(weather_all$T.Max.Time, 1, 10))
weather_all$month <- months(weather_all$date)
# deal with "T"
weather_all$Precp[weather_all$Precp == "T"] <- "0"
weather_all$PrecpMax10[weather_all$PrecpMax10 == "T"] <- "0"
weather_all$PrecpHrMax[weather_all$PrecpHrMax == "T"] <- "0"
weather_all$Precp <- as.numeric(weather_all$Precp)
weather_all$PrecpMax10 <- as.numeric(weather_all$PrecpMax10)
weather_all$PrecpHrMax <- as.numeric(weather_all$PrecpHrMax)
# deal with "x"
weather_all$EvapA[weather_all$EvapA == "x"] <- NA
weather_all$EvapA <- as.numeric(weather_all$EvapA)
weather_all$GloblRad[weather_all$GloblRad == "x"] <- NA
weather_all$GloblRad <- as.numeric(weather_all$GloblRad)
return(weather_all)
}
|
e2adf4600a877d9eaa7f934d72337a72cd6384b1
|
8dae905abbd5736c720561dfe8c8efa26af087a4
|
/scipts/sandbox rolling mean sum.R
|
56136bbae80f34d36bb8f2d12e4dcef8f6fdd2c8
|
[
"MIT"
] |
permissive
|
rosseji/rutils
|
99646a7d4d3e0233309798c15faa099053a28327
|
d5d58672a241ebc5b40f6bd7af62e07d95da1efc
|
refs/heads/master
| 2021-04-29T20:28:35.883416
| 2018-09-24T03:10:26
| 2018-09-24T03:10:26
| 121,598,084
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 450
|
r
|
sandbox rolling mean sum.R
|
library(dplyr)
library(purrr)
library(RcppRoll)
naive_forecast <- function(x, n) {
vals <- RcppRoll::roll_mean(x, n)
c(rep(NA, n), head(vals, length(vals) - 1))
}
vals <- roll_mean(x, n)
c(rep(NA, n), head(vals, length(vals) - 1))
tibble(x = c(2,2,4,4, 2, 2,2,2)) %>%
mutate(y = naive_forecast(!!sym("x"), 3))
RcppRoll::roll_sum
?roll_sum
x <- c(2,2,4,4, 2, 2,2,2)
n <- 3
vals <- roll_sum(x, n)[-length(x)]
vals
c(rep(NA, n), vals)
|
81ac7792ca56bd5c6616942e468caedc47b7f309
|
2de20569e5ee815f2b00948594835f0fdb830b34
|
/Lab3/Computer-lab-3.R
|
babe5bd6a914968a03d1d8736934737ae7df0e1a
|
[] |
no_license
|
GSternelov/Bayesian-Learning
|
3275c57012102e946145ddc762d33401f69154d0
|
9f3a9597c7febf56e33980ee0c54fe3684ac61e6
|
refs/heads/master
| 2021-05-31T21:36:29.827158
| 2016-06-04T13:45:00
| 2016-06-04T13:45:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,025
|
r
|
Computer-lab-3.R
|
#### Assignment 1 ####
rainfall <- read.delim("C:/Users/Gustav/Documents/Bayesian-Learning/Lab3/rainfall.dat", sep="", header = TRUE)
library(ggplot2)
library(gridExtra)
## a)
# priors and others
mu0 <- 1
kappa0 <- 1
v0 <- 1
sigma0 <- 1
n <- nrow(rainfall)
ybar <- colMeans(rainfall)
s2 <- var(rainfall[,1])
# Posteriors
muN <- (kappa0 / (kappa0 + n)) * mu0 + (n / (kappa0 + n)) * ybar
kappaN <- kappa0 + n
vN <- v0 +n
vNsigmaN <- v0*sigma0 + (n-1)*s2 + (kappa0*n / (kappa0 + n)) * (ybar - mu0)^2
sigmaN <- vNsigmaN / vN
# Simulations - Gibbs Sampling
sims <- data.frame(mu=0, sigma2=0)
muN <- (kappa0 / (kappa0 + n)) * mu0 + (n / (kappa0 + n)) * ybar
vNsigmaN <- v0*sigma0 + (n-1)*s2 + (kappa0*n / (kappa0 + n)) * (ybar - muN)^2
sigmaN <- vNsigmaN / vN
X <- rchisq(1, vN)
sigma2 <- (vN * sigmaN / X)
mu <- rnorm(1, muN, sqrt(sigma2/kappaN))
sims[1,1] <- mu
sims[1,2] <- sigma2
for (i in 2:1000){
# Byter ut mu0 mot [i-1,1] i sims
# Byter ut sigma0 mot [i-1,2] i sims
muN <- (kappa0 / (kappa0 + n)) * sims[i-1,1] + (n / (kappa0 + n)) * ybar
# Byter ut mu0 mot muN
vNsigmaN <- v0*sims[i-1,2] + (n-1)*s2 + (kappa0*n / (kappa0 + n)) * (ybar - muN)^2
sigmaN <- vNsigmaN / vN
X <- rchisq(1, n-1)
sigma2 <- (vN * sigmaN / X)
mu <- rnorm(1, muN, sqrt(sigma2/kappaN))
sims[i,1] <- mu
sims[i,2] <- sigma2
}
#trace plots - with burn-in!
tr_w_burn <- ggplot(sims, aes(x=1:nrow(sims), y=mu)) + geom_line() + theme_bw() + xlab("1:1000") + ylab("Mu") + ggtitle("Gibbs Sampling - Trace Plot - Mu") + geom_vline(xintercept=100, col="red", size=1.05)
tr_w_burn2 <- ggplot(sims, aes(x=1:nrow(sims), y=sigma2)) + geom_line() +
theme_bw() +ggtitle("Gibbs Sampling - Trace Plot - Sigma^2")+ xlab("1:1000") + geom_vline(xintercept=100, col="red", size=1.05)
grid.arrange(tr_w_burn, tr_w_burn2, ncol=2)
tr_wh_burn <- ggplot(sims[101:1000,], aes(x=101:nrow(sims), y=mu)) + geom_line() + theme_bw() + xlab("101:1000") + ylab("Mu") + ggtitle("Gibbs Sampling - Trace Plot - Mu\n Without burn-in")
tr_wh_burn2 <- ggplot(sims[101:1000,], aes(x=101:nrow(sims), y=sigma2)) + geom_line() + theme_bw() + ggtitle("Gibbs Sampling - Trace Plot - Sigma^2\n Without burn-in") + xlab("101:1000")
grid.arrange(tr_wh_burn, tr_wh_burn2, ncol=2)
# Looks at efficieny in terms of auto-correlation
acf_m <- acf(sims[101:1000,1], lag.max = 25, type = c("correlation"), plot=FALSE)
acf_s <- acf(sims[101:1000,2], lag.max = 25, type = c("correlation"), plot=FALSE)
acf_m <- data.frame(ACF=as.numeric(acf_m$acf), Lag=0:25)
acf_s <- data.frame(ACF=as.numeric(acf_s$acf), Lag=0:25)
acf_mu <- ggplot(acf_m, aes(x=Lag, y=ACF))+geom_bar(stat="identity", fill="black")+theme_bw() + ggtitle("Auto-correlation for chain - Mu")
acf_sigma <- ggplot(acf_s, aes(x=Lag, y=ACF))+geom_bar(stat="identity", fill="black")+theme_bw() + ggtitle("Auto-correlation for chain - Sigma^2")
grid.arrange(acf_mu, acf_sigma, ncol=2)
rainfall <- read.delim("C:/Users/Gustav/Documents/Bayesian-Learning/Lab3/rainfall.dat",
sep="", header = TRUE)
x <- as.matrix(rainfall['X136'])
# Model options
nComp <- 2 # Number of mixture components
# Prior options
alpha <- c(10, 10)
muPrior <- c(32.2681, 32.2681)
tau2Prior <- rep(10,nComp) # Prior std theta
sigma2_0 <- rep(var(x),nComp) # s20 (best guess of sigma2)
nu0 <- rep(4,nComp) # degrees of freedom for prior on sigma2
# MCMC options
nIter <- 1000 # Number of Gibbs sampling draws
# Plotting options
plotFit <- TRUE
lineColors <- c("blue", "green", "magenta", 'yellow')
################ END USER INPUT ###############
###### Defining a function that simulates from the
rScaledInvChi2 <- function(n, df, scale){
return((df*scale)/rchisq(n,df=df))
}
####### Defining a function that simulates from a Dirichlet distribution
rDirichlet <- function(param){
nCat <- length(param)
thetaDraws <- matrix(NA,nCat,1)
for (j in 1:nCat){
thetaDraws[j] <- rgamma(1,param[j],1)
}
thetaDraws = thetaDraws/sum(thetaDraws) # Diving every column of ThetaDraws by the sum of the elements in that column.
return(thetaDraws)
}
# Simple function that converts between two different representations of the mixture allocation
S2alloc <- function(S){
n <- dim(S)[1]
alloc <- rep(0,n)
for (i in 1:n){
alloc[i] <- which(S[i,] == 1)
}
return(alloc)
}
# Initial value for the MCMC
nObs <- length(x)
S <- t(rmultinom(nObs, size = 1 , prob = rep(1/nComp,nComp))) # nObs-by-nComp matrix with component allocations.
theta <- quantile(x, probs = seq(0,1,length = nComp))
sigma2 <- rep(var(x),nComp)
probObsInComp <- rep(NA, nComp)
# Setting up the plot
xGrid <- seq(min(x)-1*apply(x,2,sd),max(x)+1*apply(x,2,sd),length = 100)
xGridMin <- min(xGrid)
xGridMax <- max(xGrid)
mixDensMean <- rep(0,length(xGrid))
effIterCount <- 0
ylim <- c(0,2*max(hist(x,plot = FALSE)$density))
simulations <- data.frame(w_1 = 0, w_2 = 0, mu_1 = 0, mu_2 = 0, sigma1=0, sigma2=0)
for (k in 1:nIter){
alloc <- S2alloc(S) # Just a function that converts between different representations of the group allocations
nAlloc <- colSums(S)
# Update components probabilities
w <- rDirichlet(alpha + nAlloc)
simulations[k,1] <- w[1]
simulations[k,2] <- w[2]
# Update theta's
for (j in 1:nComp){
precPrior <- 1/tau2Prior[j]
precData <- nAlloc[j]/sigma2[j]
precPost <- precPrior + precData
wPrior <- precPrior/precPost
muPost <- wPrior*muPrior + (1-wPrior)*mean(x[alloc == j])
tau2Post <- 1/precPost
theta[j] <- rnorm(1, mean = muPost, sd = sqrt(tau2Post))
}
simulations[k,3] <- theta[1]
simulations[k,4] <- theta[2]
simulations$Expected[k] <- sum(simulations$w_1[k] * simulations$mu_1[k] +
simulations$w_2[k] * simulations$mu_2[k])
# Update sigma2's
for (j in 1:nComp){
sigma2[j] <- rScaledInvChi2(1, df = nu0[j] + nAlloc[j], scale = (nu0[j]*sigma2_0[j] + sum((x[alloc == j] - theta[j])^2))/(nu0[j] + nAlloc[j]))
}
simulations[k,5] <- sigma2[1]
simulations[k,6] <- sigma2[2]
# Update allocation
for (i in 1:nObs){
for (j in 1:nComp){
probObsInComp[j] <- w[j]*dnorm(x[i], mean = theta[j], sd = sqrt(sigma2[j]))
}
S[i,] <- t(rmultinom(1, size = 1 , prob = probObsInComp/sum(probObsInComp)))
}
# Printing the fitted density against data histogram
if (plotFit && (k%%1 ==0)){
effIterCount <- effIterCount + 1
#hist(x, breaks = 20, freq = FALSE, xlim = c(xGridMin,xGridMax), main = paste("Iteration number",k), ylim = ylim)
mixDens <- rep(0,length(xGrid))
components <- c()
for (j in 1:nComp){
compDens <- dnorm(xGrid,theta[j],sd = sqrt(sigma2[j]))
mixDens <- mixDens + w[j]*compDens
#lines(xGrid, compDens, type = "l", lwd = 2, col = lineColors[j])
components[j] <- paste("Component ",j)
}
mixDensMean <- ((effIterCount-1)*mixDensMean + mixDens)/effIterCount
#lines(xGrid, mixDens, type = "l", lty = 2, lwd = 3, col = 'red')
#legend("topleft", box.lty = 1, legend = c("Data histogram",components, 'Mixture'),
# col = c("black",lineColors[1:nComp], 'red'), lwd = 2)
}
}
Wh_b <- ggplot(simulations, aes(x=1:nrow(simulations), y=Expected)) + geom_line() + theme_bw() + geom_vline(xintercept=100, col="red", size=1.05) + xlab("1:1000") + ylab("Mu") + ggtitle("Trace plot for chain of values from the\n mixture model")
Wh_o_b <- ggplot(simulations[101:nrow(simulations),], aes(x=101:nrow(simulations), y=Expected)) + geom_line() + theme_bw() + xlab("101:1000") + ylab("Mu") + ggtitle("Trace plot for chain of values from the\n mixture model (without burn-in)")
grid.arrange(Wh_b, Wh_o_b, ncol=2)
# Looks at efficieny in terms of auto-correlation
acf_mix <- acf(simulations[101:nrow(simulations),7], lag.max = 25, type = c("correlation"), plot=FALSE)
acf_mix <- data.frame(ACF=as.numeric(acf_mix$acf), Lag=0:25)
ggplot(acf_mix, aes(x=Lag, y=ACF))+geom_bar(stat="identity", fill="black")+theme_bw() + ggtitle("Auto-correlation for values from mixture model")
## trace plots for the other parameters
mu1_b <- ggplot(simulations, aes(x=1:1000, y=mu_1)) + geom_line() + theme_bw() + ggtitle("Trace plot for mu_1")
mu2_b <- ggplot(simulations, aes(x=1:1000, y=mu_2)) + geom_line() + theme_bw()+ ggtitle("Trace plot for mu_2")
w1_b <- ggplot(simulations, aes(x=1:1000, y=w_1)) + geom_line() + theme_bw()+ ggtitle("Trace plot for w_1")
w2_b <- ggplot(simulations, aes(x=1:1000, y=w_2)) + geom_line() + theme_bw()+ ggtitle("Trace plot for w_2")
sigma1_b <- ggplot(simulations, aes(x=1:1000, y=sigma1)) + geom_line() + theme_bw()+ ggtitle("Trace plot for sigma_1")
sigma2_b <- ggplot(simulations, aes(x=1:1000, y=sigma2)) + geom_line() + theme_bw()+ ggtitle("Trace plot for sigma_2")
grid.arrange(mu1_b, mu2_b,w1_b,w2_b,sigma1_b,sigma2_b, ncol=2)
# Autocorrelation plots for the other parameters
acf_mu1 <- acf(simulations[101:1000,3], lag.max = 25, type = c("correlation"), plot=FALSE)
acf_mu2 <- acf(simulations[101:1000,4], lag.max = 25, type = c("correlation"), plot=FALSE)
acf_w1 <- acf(simulations[101:1000,1], lag.max = 25, type = c("correlation"), plot=FALSE)
acf_w2 <- acf(simulations[101:1000,2], lag.max = 25, type = c("correlation"), plot=FALSE)
acf_s1 <- acf(simulations[101:1000,5], lag.max = 25, type = c("correlation"), plot=FALSE)
acf_s2 <- acf(simulations[101:1000,6], lag.max = 25, type = c("correlation"), plot=FALSE)
acf_mu1 <- data.frame(ACF=as.numeric(acf_mu1$acf), Lag=0:25)
acf_mu2 <- data.frame(ACF=as.numeric(acf_mu2$acf), Lag=0:25)
acf_w1 <- data.frame(ACF=as.numeric(acf_w1$acf), Lag=0:25)
acf_w2 <- data.frame(ACF=as.numeric(acf_w2$acf), Lag=0:25)
acf_s1 <- data.frame(ACF=as.numeric(acf_s1$acf), Lag=0:25)
acf_s2 <- data.frame(ACF=as.numeric(acf_s2$acf), Lag=0:25)
acf_mu1 <- ggplot(acf_mu1, aes(x=Lag, y=ACF))+geom_bar(stat="identity", fill="black")+theme_bw() + ggtitle("Auto-correlation for chain - Mu_1")
acf_mu2 <- ggplot(acf_mu2, aes(x=Lag, y=ACF))+geom_bar(stat="identity", fill="black")+theme_bw()+ ggtitle("Auto-correlation for chain - Mu_2")
acf_w1 <- ggplot(acf_w1, aes(x=Lag, y=ACF))+geom_bar(stat="identity", fill="black")+theme_bw()+ ggtitle("Auto-correlation for chain - W_1")
acf_w2 <- ggplot(acf_w2, aes(x=Lag, y=ACF))+geom_bar(stat="identity", fill="black")+theme_bw()+ ggtitle("Auto-correlation for chain - W_2")
acf_s1 <- ggplot(acf_s1, aes(x=Lag, y=ACF))+geom_bar(stat="identity", fill="black")+theme_bw()+ ggtitle("Auto-correlation for chain - Sigma_1")
acf_s2 <- ggplot(acf_s2, aes(x=Lag, y=ACF))+geom_bar(stat="identity", fill="black")+theme_bw() + ggtitle("Auto-correlation for chain - Sigma_2")
grid.arrange(acf_mu1, acf_mu2,acf_w1,acf_w2,acf_s1,acf_s2,ncol=2)
## c)
a1 <- data.frame(x=rnorm(1000, 32.27564, sqrt(1546.53868)))
b1 <- data.frame(y=mixDensMean, x=xGrid)
ggplot(rainfall, aes(X136)) + geom_histogram(aes(y = ..density..),alpha=0.9,
fill="black") + theme_bw() +
geom_density(data=a1, aes(x),col="royalblue", size=1.05) +
geom_line(data=b1, aes(x=x, y=y), col="red", size=1.05) +
ggtitle("Density for normal model and mixture of normals\n Blue = 1.a) - Red = 1.b)") + xlab("")
########### BEGIN USER INPUTS ################
Probit <- 1 # If Probit <-0, then logistic model is used.
chooseCov <- c(1:16) # Here we choose which covariates to include in the model
tau <- 10; # Prior scaling factor such that Prior Covariance = (tau^2)*I
########### END USER INPUT ################
# install.packages("mvtnorm") # Loading a package that contains the multivariate normal pdf
library("mvtnorm") # This command reads the mvtnorm package into R's memory. NOW we can use dmvnorm function.
# Loading data from file
Data<-read.table("C:/Users/Gustav/Documents/Bayesian-Learning/Lab3/SpamReduced.dat",header=TRUE) # Spam data from Hastie et al.
y <- as.vector(Data[,1]); # Data from the read.table function is a data frame. Let's convert y and X to vector and matrix.
X <- as.matrix(Data[,2:17]);
covNames <- names(Data)[2:length(names(Data))];
X <- X[,chooseCov]; # Here we pick out the chosen covariates.
covNames <- covNames[chooseCov];
nPara <- dim(X)[2];
# Setting up the prior
mu <- as.vector(rep(0,nPara)) # Prior mean vector
Sigma <- tau^2*diag(nPara);
# Defining the functions that returns the log posterior (Logistic and Probit models). Note that the first input argument of
# this function must be the one that we optimize on, i.e. the regression coefficients.
LogPostProbit <- function(betaVect,y,X,mu,Sigma){
nPara <- length(betaVect);
linPred <- X%*%betaVect;
# MQ change:
# instead of logLik <- sum(y*log(pnorm(linPred)) + (1-y)*log(1-pnorm(linPred)) ) type in the equivalent and
# much more numerically stable;
logLik <- sum(y*pnorm(linPred, log.p = TRUE) + (1-y)*pnorm(linPred, log.p = TRUE, lower.tail = FALSE))
# The old expression: logLik2 <- sum(y*log(pnorm(linPred)) + (1-y)*log(1-pnorm(linPred)) )
abs(logLik) == Inf
#print('-----------------')
#print(logLik)
#print(logLik2)
#if (abs(logLik) == Inf) logLik = -20000; # Likelihood is not finite, stear the optimizer away from here!
logPrior <- dmvnorm(betaVect, matrix(0,nPara,1), Sigma, log=TRUE);
return(logLik + logPrior)
}
# Calling the optimization routine Optim. Note the auxilliary arguments that are passed to the function logPost
# Note how I pass all other arguments of the function logPost (i.e. all arguments except betaVect which is the one that we are trying to optimize over) to the R optimizer.
# The argument control is a list of options to the optimizer. Here I am telling the optimizer to multiply the objective function (i.e. logPost) by -1. This is because
# Optim finds a minimum, and I want to find a maximum. By reversing the sign of logPost I can use Optim for my maximization problem.
# Different starting values. Ideally, any random starting value gives you the same optimum (i.e. optimum is unique)
initVal <- as.vector(rep(0,dim(X)[2]));
# Or a random starting vector: as.vector(rnorm(dim(X)[2]))
# Set as OLS estimate: as.vector(solve(crossprod(X,X))%*%t(X)%*%y); # Initial values by OLS
if (Probit==1){
logPost = LogPostProbit;
} else{
logPost = LogPostLogistic;
}
OptimResults<-optim(initVal,logPost,gr=NULL,y,X,mu,Sigma,method=c("BFGS"),control=list(fnscale=-1),hessian=TRUE)
library(msm)
library(mvtnorm)
library(coda)
tau <- 10
mu <- as.vector(rep(0,nPara)) # Prior mean vector
Sigma <- tau^2*diag(nPara)
nPara <- dim(X)[2]
mean_p <- t(as.matrix(as.vector(rep(0,dim(X)[2])), ncol=1))
sigma_p <- diag(x=tau^2, 16, 16)
sigma_p2 <- ( as.matrix(diag(sigma_p)))
emptyB <- data.frame(matrix(vector(), 250, 16))
u <- data.frame(matrix(vector(), 4601, 20))
set.seed(311015)
u[,1] <- rtnorm(4601, X%*%t(mean_p), sd = rep(1, 16))
for (i in 1:250){
B_n <- solve(t(X)%*%X + sigma_p2%*%mean_p) %*% t(X)%*%u[,i]
mean_p <- t(as.matrix(B_n))
sigma_p <- solve(t(X)%*%X + sigma_p)
sigma_p2 <- ( as.matrix(diag(sigma_p))) #same as sigma_p, just modified format
emptyB[i,] <- rmvnorm(1, mean_p, sigma_p)
newB <- t(matrix(as.numeric(emptyB[i,])))
for(j in 1:4601){
if(Data$spam[j] == 1){
u[j,i+1] <- rtnorm(1, t(X[i,] %*% newB),sd = rep(1, 16), lower=0, upper=Inf)
}else{
u[j,i+1] <- rtnorm(1, t(X[i,] %*% newB),sd = rep(1, 16), lower=-Inf, upper=0)
}
}
print(i+1)
}
effectiveSize(emptyB)
effectiveSize(u)
Betas <- matrix(as.numeric(emptyB[250,]))
options(scipen = 999)
Res <- data.frame(covs=covNames,OptimBeta = as.numeric(OptimResults$par), GibbsBeta=as.numeric(emptyB[250,]), OptimStd = sqrt(diag(-solve(OptimResults$hessian))), GibbsStd=sqrt(sigma_p2),row.names = NULL)
Res
par(mfrow=c(4,2))
for(i in 1:16){
plot(emptyB[,i], type="l")
}
for(i in 9:16){
plot(emptyB[,i], type="l")
}
y_fitGibbs <- (X) %*% Betas
for(i in 1:4601){
if(y_fitGibbs[i] > 0){
y_fitGibbs[i] = 1
}else{
y_fitGibbs[i] = 0
}
}
table(y,y_fitGibbs)
betasOptim <- matrix(as.numeric(OptimResults$par))
y_fitOptim <- (X) %*% betasOptim
for(i in 1:4601){
if(y_fitOptim[i] > 0){
y_fitOptim[i] = 1
}else{
y_fitOptim[i] = 0
}
}
table(y,y_fitOptim)
## NA
|
fd861f3464b52e232f9e56d5f5d4bf029367920b
|
200412f0bd7d414124b3ea07478e7a40d48c338e
|
/man/toc.Rd
|
b2f2b4e0bae42a38041bd227e4699ebdce953fdc
|
[] |
no_license
|
dcgerard/stramash
|
6c4eb8cc168d6ae7800c5949885f32984d367c3d
|
7578dbad1bddc3d6a3fcb1bb0e0134cdb79dde40
|
refs/heads/master
| 2021-01-20T17:26:56.198069
| 2016-06-21T16:04:55
| 2016-06-21T16:04:55
| 61,399,327
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 269
|
rd
|
toc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stramash.R
\name{toc}
\alias{toc}
\title{From the ashr package because not exported but I need it.}
\usage{
toc()
}
\description{
From the ashr package because not exported but I need it.
}
|
945a1bdfec9519d75b5bf73ccb2a027846bf75a0
|
de2cafbc4de2a32a82aae7f42a1f8c59277e44f1
|
/man/ihcrit.Rd
|
b5b4a442f76c0df9185cf31a349905d37a110e1f
|
[] |
no_license
|
heike/extracat
|
77869ba3596052d03643682e0caa247d3c3b229d
|
d54052e6d6ac09a1ce560cba2b66b6d4953cbd89
|
refs/heads/master
| 2020-07-17T18:00:49.978955
| 2019-10-08T21:37:58
| 2019-10-08T21:37:58
| 206,067,778
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 620
|
rd
|
ihcrit.Rd
|
\name{WBCI}
\alias{WBCI}
\title{
The Weighted Bertin Classification Index
}
\description{
The weighted Bertin Classification Criterion using weights according to the Hamming distance is normalized by means of the independence case.
}
\usage{
WBCI(x)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
A data matrix.
}
}
\value{
The criterion value.
}
\author{
Alexander Pilhoefer
}
\seealso{
\link[extracat]{kendalls}
}
\examples{
M <-arsim(1000, c(12,12), 3)
BCI(M)
WBCI(M)
M2 <- optile(M, iter = 100)
BCI(M2)
WBCI(M2)
M3 <- optile(M, fun = "WBCC", iter = 100)
BCI(M3)
WBCI(M3)
}
|
2f201f6886f58ea3aa58749e8239340259247a31
|
6003ef90542eb509ca376c63852c72356f937509
|
/run_analysis.R
|
0e94938368afb40786223f06370fe28f8eb1a4a7
|
[] |
no_license
|
jangsioco/Getting-and-Cleaning-Data
|
f330fa1cb3dcf63eb5e655149643d6aac1462909
|
fc70c2984423cda2d12df81e141a7f8d20ee6d5e
|
refs/heads/master
| 2021-01-10T10:31:57.307865
| 2016-03-04T09:27:58
| 2016-03-04T09:27:58
| 53,105,923
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,855
|
r
|
run_analysis.R
|
# load files onto R, with assumption that all files are already extracted on the
# current working directory
features <- read.table("features.txt")
activity_lables <- read.table("activity_labels.txt")
x_train <- read.table("X_train.txt")
subject_train <- read.table("subject_train.txt")
y_train <- read.table("y_train.txt")
x_test <- read.table("X_test.txt")
subject_test <- read.table("subject_test.txt")
y_test <- read.table("y_test.txt")
# name columns
colnames(subject_test) <- "subject"
colnames(subject_train) <- "subject"
colnames(y_test) <- "activity_code"
colnames(y_train) <- "activity_code"
colnames(activity_lables) <- c("code","activityName")
colnames(x_test) <- features[,2]
colnames(x_train) <- features[,2]
# merge columns
test.all <- cbind(subject_test,y_test,x_test)
train.all <- cbind(subject_train,y_train,x_train)
# merge rows
mydata.all <- rbind(test.all, train.all)
# filter for column names with mean and std
mydata.filtered <- mydata.all[(grep(("subject|activity_code|mean|std"),colnames(mydata.all)))]
# rename columns to remove non-standard characters and capitalize first character of mean and std
# for readability
names(mydata.filtered) <- gsub('-mean', 'Mean', names(mydata.filtered))
names(mydata.filtered) <- gsub('-std', 'Std', names(mydata.filtered))
names(mydata.filtered) <- gsub('[-()]', '', names(mydata.filtered))
# aggregate by activity and subject, and then remove the group columns
library(dplyr)
mydata.agg <- mydata.filtered %>% group_by(activity_code,subject) %>% summarise_each(funs(mean))
# add activity lables and then finally remove the activity_code column
mydata.comp <- merge(activity_lables, mydata.agg, by.x = "code", by.y = "activity_code")
mydata.comp <- mydata.comp[,2:82]
# save data set to text file
write.table(mydata.comp, file = "tidyData.txt", row.names = FALSE, quote = FALSE)
|
1f60544d800915c932c7c7bf449104d697219339
|
4d9e2ba0997f4e7168446022fab051ce56095f36
|
/bin/CNVnator/CNVnator_output_0.1_round.r
|
d4dff1ab0875cd971ff82393342ebf892664e68c
|
[] |
no_license
|
Jonathan-Abrahams/Duplications
|
da15cbbbd00d767ebbf27b8ae5a12ebcce9d9990
|
bee97dcf864169074d880eb23c64e27a007e2f80
|
refs/heads/master
| 2021-08-10T23:30:53.699697
| 2021-07-29T09:49:31
| 2021-07-29T09:49:31
| 172,690,022
| 0
| 0
| null | 2021-07-05T15:24:16
| 2019-02-26T10:30:07
|
R
|
UTF-8
|
R
| false
| false
| 2,155
|
r
|
CNVnator_output_0.1_round.r
|
#turn the output of CNVnator into a bed file
library(IRanges)
#input should be the genome name only
args = commandArgs(trailingOnly=TRUE)
#args[1]="SRR5071080"
print(as.character(args[1]))
CNVnator_output=read.delim(args[1],stringsAsFactors = F,head=F)
Tohama_genes=read.delim(args[2],stringsAsFactors = F,head=F,comment.char = "#")
colnames(CNVnator_output)=c("Type","Location","Length","Depth","E1","E2","E3","E4","Other")
CNVnator_output=CNVnator_output[CNVnator_output$E1<0.0001,]
CNVnator_processed=CNVnator_output
CNVnator_processed$Location=gsub(".*:","",CNVnator_output$Location,perl = T)
cc <- strsplit(CNVnator_processed$Location,"-")
#print(cc)
CNVnator_processed$Starts <- unlist(cc)[2*(1:length(CNVnator_output$Location))-1]
CNVnator_processed$Ends <- unlist(cc)[2*(1:length(CNVnator_output$Location)) ]
head(CNVnator_processed)
CNVnator_ranges=IRanges(as.numeric(CNVnator_processed$Starts),as.numeric(CNVnator_processed$Ends))
Tohama_ranges=IRanges(as.numeric(Tohama_genes$V4),as.numeric(Tohama_genes$V5))
results=findOverlaps(CNVnator_ranges,Tohama_ranges)
overlaps <- pintersect(CNVnator_ranges[queryHits(results)],Tohama_ranges[subjectHits(results)])
percentOverlap <- width(overlaps) / width(Tohama_ranges[subjectHits(results)])
results_filtered=results[which(percentOverlap>=0.8)]
int_list=as(results_filtered, "IntegerList")
results_frame=Tohama_genes
results_frame$Copy_number=1
print("Before loop")
#for every region
for(i in c(1:length(int_list)))
{
#print(CNVnator_processed$Type[i])
#print(as.numeric(unlist(as(results, "IntegerList")[i])))
hitty=as.numeric(unlist(as(results, "IntegerList")[i]))
results_frame$Copy_number[hitty]=round(CNVnator_processed$Depth[i],1)
}
colnames(results_frame)[10]=args[1]
#output_path=paste("./","Tohama_artificial_CNV/Tohama_art_300_v2/snippy/","Tohama_1col_CNVnator_simple","_200_round_1.6.txt",sep="")
#output_path=paste("./",args[1],"/","CNVnator_genes_",args[2],".txt",sep="")
col1=results_frame[10]
write.table(col1,file=paste(args[1],"_genes_overlap.TXT",sep=""),quote=F,row.names=F,col.names=T)
|
1c1ff2d90965da068f1541e289db29f97fb2e005
|
29dbebba9a0cbd0610a1660b7b1a27673bb90d3f
|
/R/sim.data.R
|
2dba69acd850a5bf2ffeb1c53432f34163376dc8
|
[] |
no_license
|
jaroyle/SCRbayes
|
9af20e405b8eae53d80a63f137e37b60ef993041
|
6f0b484947f53d6fa8cbea806190e68919fa7fd7
|
refs/heads/master
| 2021-01-19T05:45:03.904012
| 2014-10-09T17:14:19
| 2014-10-09T17:14:19
| 3,121,871
| 4
| 2
| null | 2020-05-05T16:31:54
| 2012-01-06T23:24:08
|
R
|
UTF-8
|
R
| false
| false
| 2,927
|
r
|
sim.data.R
|
sim.data <-
function (N = 200, sigma = 0.3, loglam0 = log(0.35),
K = 12,
statespace ,
traplocs,
Xss = NULL, alpha1 = 0,
coord.scale = 5000, Ntel=2, nfixes=500)
{
statespace[, 1:2] <- statespace[, 1:2]/coord.scale
traplocs <- traplocs/coord.scale
ntraps <- nrow(traplocs)
if (is.null(Xss))
Xss <- rep(1, nrow(GRID))
G <- statespace[, 1:2]
goodbad <- statespace[, 3]
G <- G[goodbad == 1, ]
nG <- nrow(G)
sprobs <- exp(alpha1 * Xss[goodbad == 1])
sprobs <- sprobs/sum(sprobs)
centers <- sample(1:nG, N, replace = TRUE, prob = sprobs)
S <- G[centers, ]
par(mar=c(3,3,3,6))
spatial.plot(statespace,Xss,cx=2,add=FALSE)
points(S,pch=20)
Y <- array(NA, c(N, K, ntraps))
#Ytel<- matrix(NA,nrow=nG,ncol=Ntel)
Ytel<-NULL
sex<-rbinom(N,1,.5) # sex ratio = .5
if(length(sigma)==2){
sigma.v<- sigma[sex+1]
}
else{
sigma.v<-rep(sigma,N)
}
for (i in 1:N) {
lp <- loglam0 - (1/(2*sigma.v[i]^2)) * ((S[i, 1] - traplocs[, 1])^2 +
(S[i, 2] - traplocs[, 2])^2)
lambda<- exp(lp)
pcap <- 1 - exp(-1 * lambda)
for (t in 1:K) {
ttt <- rbinom(ntraps, 1, pcap)
Y[i, t, 1:ntraps] <- ttt
}
if(Ntel>0 & i<= Ntel){
## Xss needs in encounter model too
log.lambda<-
-(1/(2*sigma.v[i]^2)) * ((S[i, 1] - G[, 1])^2 + (S[i, 2] - G[, 2])^2) +alpha1*Xss
# rsf.probs<- exp(log.lambda)/sum(exp(log.lambda))
# Ytel[,i]<- rmultinom(1,nfixes,rsf.probs)
Ytel<- rbind(Ytel,cbind( rnorm(nfixes,S[i,1],sigma.v[i]),
rnorm(nfixes,S[i,2],sigma.v[i]),i))
}
}
attr(Ytel,"sex")<- sex[1:Ntel]
ndet <- apply(Y, c(1, 2), sum)
ind.captured <- apply(ndet, 1, sum)
cat("total captures: ", sum(Y), fill = TRUE)
cat("total individuals: ", sum(ind.captured > 0), fill = TRUE)
Y <- Y[ind.captured > 0, , ]
sex<-sex[ind.captured>0]
MASK <- matrix(1, ntraps, K)
traplocs <- traplocs * coord.scale
trapout <- cbind(1:nrow(traplocs), traplocs, MASK)
dimnames(trapout) <- list(1:nrow(traplocs), c("LOC_ID", "X_Coord",
"Y_Coord", as.character(1:ncol(MASK))))
## write.csv(trapout, "traps.csv", row.names = FALSE)
o <- NULL
for (i in 1:dim(Y)[1]) {
z <- Y[i, , ]
o <- rbind(o, cbind(i, row(z)[z > 0], col(z)[z > 0]))
}
y <- cbind(o[, 3], o[, 1], o[, 2])
y <- y[order(y[, 1]), ]
dimnames(y) <- list(1:nrow(y), c("trapid", "individual",
"sample"))
# write.csv(y, "captures.csv", row.names = FALSE)
# write.csv(statespace, "statespace.csv", row.names = FALSE)
list(Y = Y, MASK = MASK, traplocs = traplocs, Xss = Xss,Ytel=Ytel,
Xsex=sex,statespace=statespace)
}
|
068eaa80e026828698b561b62bf7346657c6214d
|
e84d1b806eb7561a09e9436accc3b6ef2a389bdc
|
/homework/Week4/project/ui.R
|
a603631318b594923e2efcbde408312717eb19c3
|
[] |
no_license
|
danfan1/Developing-Data-Products
|
b9a1f53a9d3d96203a8a10bb93f444d93d7e3621
|
d7883e4dbb9b1e87dddba500b0e1fcda0a3235b3
|
refs/heads/master
| 2020-03-17T13:01:11.277323
| 2018-05-24T05:22:00
| 2018-05-24T05:22:00
| 133,613,739
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,002
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
titlePanel("How Mathematical Plots Change with Parameters"),
h3("Change the parameter values and see how the plot changes"),
tabsetPanel(
tabPanel(
"Line",
h3("y = m * x + c"),
sidebarLayout(
sidebarPanel(
sliderInput("m", "Slope (m)", value = 2, min = -5, max = 5, step = 1),
sliderInput("c", "Intercept (c)", value = 1, min = -5, max = 5, step = 1)
),
mainPanel(
plotOutput("plot1")
)
)
),
tabPanel(
"Sine",
h3("y = A * sin(w * x + theta)"),
sidebarLayout(
sidebarPanel(
sliderInput("w", "Frequency (w)", value = 1, min = -5, max = 5, step = 1),
sliderInput("theta", "Phase Shift (theta)", value = 0, min = -2*pi, max = 2*pi, step = pi/2),
sliderInput("A", "Amplitude (A)", value = 1, min = -5, max = 5, step = 1)
),
mainPanel(
plotOutput("plot2")
)
)
)
)
))
|
9e054f6a433fc721938f93f1c567bfa1b38d34b4
|
576b64cc5c65c3544ba5646c695c83a1995c2fd1
|
/RBigObject/man/bigobject_import.Rd
|
c3aa7c8d56c50e1ec44b3a158a447fae9b407066
|
[] |
no_license
|
bigobject-inc/BigObjectRPkgs
|
f0fddb8983e0a85b8f64e339c3f99841393b6644
|
ba03a3ee5612e711e0739ec57b31db2b09208927
|
refs/heads/master
| 2020-04-15T15:18:44.571813
| 2015-10-26T08:39:57
| 2015-10-26T08:39:57
| 42,028,415
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,208
|
rd
|
bigobject_import.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/bigobject_sql.R
\name{bigobject_import}
\alias{bigobject_import}
\title{Import data.frame to BigObject Analytics}
\usage{
bigobject_import(df, name, action = c("create", "append", "overwrite"),
ip = get_ip(), port = get_port(), verbose = get_verbose())
}
\arguments{
\item{df}{data.frame. The data which will be imported to BigObject Analytics.}
\item{name}{string. The table name of the imported}
\item{action}{string. Specify how to update object in BigObject Analytics. Only the first element is used. Please see details.}
\item{ip}{string. The ip address or domain name to the BigObject Analytics instance.}
\item{port}{string. The port number.}
\item{verbose}{logical value. Whether to print verbose message.}
}
\description{
Import data.frame to BigObject Analytics
}
\details{
The \code{action} parameter indicates the behavior of BigObject Analytics:
\itemize{
\item {"create":} Creating a new table.
\item {"append":} Appending data to an existed table.
\item {"overwrite":} Overwrite an existed table with the same schema.
}
}
\references{
url{http://docs.bigobject.io/API/Data_Import_Service.html}
}
|
e09d30cc886b1cb93746740fbee2716d144e011d
|
6034d565642a30876b7b7a025b74a31580c44613
|
/tests/testthat/test-printing2.R
|
68fd4f26ffa75a311617d5fd015fdbe3dbd0a703
|
[] |
no_license
|
cran/parameters
|
a95beba8c8bd820a88b74ca407609cc08a62fcab
|
f19575ccdbbd303a1896a13d8b4b8210563cabfa
|
refs/heads/master
| 2023-06-08T08:58:24.080762
| 2023-05-26T09:20:02
| 2023-05-26T09:20:02
| 211,083,154
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,441
|
r
|
test-printing2.R
|
skip_if_not_installed("withr")
skip_if(getRversion() < "4.0.0")
withr::with_options(
list("parameters_interaction" = "*"),
{
lm1 <- lm(Sepal.Length ~ Species, data = iris)
lm2 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris)
lm3 <- lm(Sepal.Length ~ Species * Petal.Length, data = iris)
# Basic -------
out <- compare_parameters(lm1, lm2, lm3)
test_that("multiple model", {
expect_snapshot(print(out))
})
# templates --------------
out <- compare_parameters(lm1, lm2, lm3, select = "se_p")
test_that("templates", {
expect_snapshot(print(out))
})
out <- compare_parameters(lm1, lm2, lm3, select = "{estimate}{stars} ({se})")
test_that("templates, glue-1", {
expect_snapshot(print(out))
})
out <- compare_parameters(lm1, lm2, lm3, select = "{estimate} ({ci_low}, {ci_high}), p={p}{stars}")
test_that("templates, glue-2", {
expect_snapshot(print(out))
})
out <- compare_parameters(lm1, lm2, lm3, select = "{estimate} ({se})|{p}")
test_that("templates, glue-3, separate columnns", {
expect_snapshot(print(out))
})
# grouping parameters --------------
lm1 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris)
lm2 <- lm(Sepal.Width ~ Species * Petal.Length, data = iris)
# remove intercept
out <- compare_parameters(lm1, lm2, drop = "^\\(Intercept")
test_that("templates, glue-3, separate columnns", {
expect_snapshot(
print(out, groups = list(
Species = c(
"Species (versicolor)",
"Species (virginica)"
),
Interactions = c(
"Species (versicolor) * Petal Length",
"Species (virginica) * Petal Length"
),
Controls = "Petal Length"
))
)
expect_snapshot(
print(out, groups = list(
Species = c(
"Species (versicolor)",
"Species (virginica)"
),
Interactions = c(
"Species (versicolor) * Petal Length", # note the unicode char!
"Species (virginica) * Petal Length"
),
Controls = "Petal Length"
), select = "{estimate}{stars}")
)
expect_snapshot(
print(out, groups = list(
Species = c(
"Species (versicolor)",
"Species (virginica)"
),
Interactions = c(
"Species (versicolor) * Petal Length", # note the unicode char!
"Species (virginica) * Petal Length"
),
Controls = "Petal Length"
), select = "{estimate}|{p}")
)
})
test_that("combination of different models", {
skip_on_cran()
skip_if_not_installed("glmmTMB")
data("fish")
m0 <- glm(count ~ child + camper, data = fish, family = poisson())
m1 <- glmmTMB::glmmTMB(
count ~ child + camper + (1 | persons) + (1 | ID),
data = fish,
family = poisson()
)
m2 <- glmmTMB::glmmTMB(
count ~ child + camper + zg + (1 | ID),
ziformula = ~ child + (1 | persons),
data = fish,
family = glmmTMB::truncated_poisson()
)
cp <- compare_parameters(m0, m1, m2, effects = "all", component = "all")
expect_snapshot(print(cp))
})
}
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.