blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
94bfd1349c31fdf3787e2285c11035d819245c43
|
49b3470e94000265077bb930eb3374fbe0d34d3a
|
/vignettes/rolling_statistics.R
|
935ec4d255cdadb8855e72a4fe8ed1e837502644
|
[] |
no_license
|
lozanof/HighFreq
|
9cddc77c03456de26687b0ee3aaca56ec2b76c3e
|
da68b7b886217b70913dd4a31382ad85d77af6f5
|
refs/heads/master
| 2022-05-29T05:48:50.004919
| 2020-05-03T20:30:09
| 2020-05-03T20:30:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,759
|
r
|
rolling_statistics.R
|
## ----eval=FALSE----------------------------------------------------------
# # load HighFreq to load SPY data
# library(HighFreq)
# # rolling average prices
# look_back <- 10
# prices_rolling <- rutils::roll_sum(Cl(HighFreq::SPY), look_back=look_back)/look_back
# colnames(prices_rolling) <- "SPY.Prices"
# chart_Series(HighFreq::SPY["2013-11-12"], name="SPY Prices")
# add_TA(prices_rolling["2013-11-12"], on=1, col="red", lwd=2)
# legend("top", legend=c("SPY prices", "average prices"),
# bg="white", lty=c(1, 1), lwd=c(2, 2),
# col=c("black", "red"), bty="n")
## ----eval=FALSE, echo=(-(1:2))-------------------------------------------
# # load HighFreq
# library(HighFreq)
# # rolling volume-weighted average prices
# look_back <- 10
# prices_rolling <- roll_vwap(oh_lc=HighFreq::SPY["2013-11-12"], look_back=look_back)
# colnames(prices_rolling) <- "SPY.Prices"
# chart_Series(HighFreq::SPY["2013-11-12"], name="SPY VWAP Prices")
# add_TA(prices_rolling["2013-11-12"], on=1, col="red", lwd=2)
# legend("top", legend=c("SPY prices", "VWAP prices"),
# bg="white", lty=c(1, 1), lwd=c(2, 2),
# col=c("black", "red"), bty="n")
## ----eval=FALSE, echo=(-(1:2))-------------------------------------------
# # load HighFreq
# library(HighFreq)
# # calculate variance of SPY using method yang_zhang
# # scale from minutely to daily frequency and also apply factor to compensate for secondly units
# vari_ance <- (6.5*60*60^2)*HighFreq::calc_variance(HighFreq::SPY, calc_method="yang_zhang")
# # calculate variance of SPY without accounting for overnight jumps
# vari_ance <- (6.5*60*60^2)*HighFreq::calc_variance(HighFreq::SPY, calc_method="rogers_satchell")
# # calcuate daily intraday volatilities
# var_daily <- (6.5*60*60^2)*period.apply(x=HighFreq::SPY, INDEX=end_days, HighFreq::calc_variance)
# index(var_daily) <- lubridate::floor_date(index(var_daily), "day")
## ----eval=FALSE, echo=(-(1:2))-------------------------------------------
# # load HighFreq
# library(HighFreq)
# # calculate running variance using method rogers_satchell
# # scale from minutely to daily frequency and also apply factor to compensate for secondly units
# var_running <- (6.5*60*60^2)*run_variance(oh_lc=HighFreq::SPY,
# calc_method="rogers_satchell")
# # calculate rolling volume-weighted average daily variance
# look_back <- 21
# var_rolling <- roll_vwap(oh_lc=HighFreq::SPY, x_ts=var_running, look_back=look_back)
# colnames(var_rolling) <- "SPY.Var"
#
# # calculate rolling daily variance using roll_variance()
# var_rolling <- (6.5*60*60^2)*roll_variance(oh_lc=HighFreq::SPY,
# calc_method="rogers_satchell",
# look_back=look_back)
#
# # calculate rolling volume-weighted average skew indicator
# skew_running <- run_skew(oh_lc=HighFreq::SPY)
# skew_rolling <- roll_vwap(oh_lc=HighFreq::SPY, x_ts=skew_running, look_back=look_back)
# skew_rolling <- ifelse(var_rolling>0, skew_rolling/(var_rolling)^(1.5), 0)
# colnames(skew_rolling) <- "SPY.Skew"
# chart_Series(skew_rolling["2013-11-12"], name="SPY Rolling Skew")
## ----eval=FALSE, echo=(-(1:2))-------------------------------------------
# # load HighFreq
# library(HighFreq)
# # calculate rolling volume-weighted average variance and skew
# look_back <- 21
# var_rolling <- roll_moment(oh_lc=HighFreq::SPY, look_back=look_back)
# skew_rolling <- roll_moment(oh_lc=HighFreq::SPY, mo_ment="run_skew", look_back=look_back)
# skew_rolling <- ifelse(var_rolling>0, skew_rolling/(var_rolling)^(1.5), 0)
# chart_Series(skew_rolling["2013-11-12"], name="SPY Rolling Skew")
## ----eval=FALSE, echo=(-(1:2))-------------------------------------------
# # load HighFreq
# library(HighFreq)
# # calculate daily average open to close variance
# var_daily <- (6.5*60*60^2)*xts::apply.daily(x=HighFreq::SPY, FUN=agg_regate,
# mo_ment="run_variance", calc_method="rogers_satchell")
# colnames(var_daily) <- "SPY.Var"
# chart_Series(100*sqrt(var_daily["/2010"]), name="SPY daily standard deviation")
#
# # calculate daily average skew
# skew_daily <- xts::apply.daily(x=HighFreq::SPY, FUN=agg_regate, mo_ment="run_skew")
# skew_daily <- skew_daily/(var_daily)^(1.5)
# colnames(skew_daily) <- "SPY.Skew"
# inter_val <- "2013-10/2013-12"
# chart_Series(skew_daily[inter_val], name="SPY Skew")
## ----eval=FALSE, echo=(-(1:2))-------------------------------------------
# # load HighFreq
# library(HighFreq)
# volume_seasonal <- season_ality(Vo(HighFreq::SPY))
# colnames(volume_seasonal) <- "SPY.volume_seasonal"
# chart_Series(volume_seasonal, name="SPY intraday seasonality of volume")
# var_seasonal <- season_ality((6.5*60*60^2)*run_variance(oh_lc=HighFreq::SPY))
# colnames(var_seasonal) <- "SPY.var_seasonal"
# chart_Series(var_seasonal, name="SPY intraday seasonality of variance")
## ----eval=FALSE, echo=(-(1:2))-------------------------------------------
# # load HighFreq
# library(HighFreq)
# # extract a single day of SPY data
# x_ts <- SPY["2012-02-13"]
# look_back <- 11
# # calculate the rolling sums of the columns of x_ts
# agg_regations <- roll_apply(x_ts, agg_fun=sum, look_back=look_back, by_columns=TRUE)
# # define a vector-valued aggregation function
# agg_function <- function(x_ts) c(max(x_ts[, 2]), min(x_ts[, 3]))
# # apply the aggregation function over a rolling window
# agg_regations <- roll_apply(x_ts, agg_fun=agg_function, look_back=look_back)
# # define end points at 11-minute intervals (SPY is minutely bars)
# end_points <- rutils::end_points(x_ts, inter_val=look_back)
# # calculate the rolling sums of the columns of x_ts over end_points
# agg_regations <- roll_apply(x_ts, agg_fun=sum, look_back=2, end_points=end_points, by_columns=TRUE)
# # apply the vector-valued aggregation function over the end_points of x_ts
# agg_regations <- roll_apply(x_ts, agg_fun=agg_function, look_back=2, end_points=end_points)
## ----eval=FALSE, echo=(-(1:2))-------------------------------------------
# # load HighFreq
# library(HighFreq)
# # Calculate ETF prices
# sym_bols <- colnames(rutils::etf_env$price_s)
# sym_bols <- sym_bols[!(sym_bols=="VXX")]
# price_s <- rutils::etf_env$price_s[, sym_bols]
# # Carry forward non-NA prices
# price_s <- zoo::na.locf(price_s)
# price_s <- na.omit(price_s)
# # Calculate simple ETF returns
# re_turns <- rutils::diff_it(price_s)
# # Calculate the daily excess returns
# # risk_free is the daily risk-free rate
# risk_free <- 0.03/260
# ex_cess <- re_turns - risk_free
# # Define monthly end_points without initial warmpup period
# end_points <- rutils::calc_endpoints(re_turns, inter_val="months")
# end_points <- end_points[end_points>50]
# len_gth <- NROW(end_points)
# # Define 12-month look_back interval and start_points over sliding window
# look_back <- 12
# start_points <- c(rep_len(1, look_back-1), end_points[1:(len_gth-look_back+1)])
# # Define shrinkage and regularization intensities
# al_pha <- 0.5
# max_eigen <- 3
# # Simulate monthly rolling portfolio optimization strategy in pure R
# roll_rets_R <- lapply(2:NROW(end_points), function(i) {
# # subset the ex_cess returns
# ex_cess <- ex_cess[start_points[i-1]:end_points[i-1], ]
# ei_gen <- eigen(cov(ex_cess))
# # Calculate regularized inverse of covariance matrix
# max_eigen <- 3
# eigen_vec <- ei_gen$vectors[, 1:max_eigen]
# eigen_val <- ei_gen$values[1:max_eigen]
# in_verse <- eigen_vec %*% (t(eigen_vec) / eigen_val)
# # Apply shrinkage to the mean returns
# col_means <- colMeans(ex_cess)
# col_means <- ((1-al_pha)*col_means + al_pha*mean(col_means))
# # Calculate weights using R
# weight_s <- in_verse %*% col_means
# weight_s <- weight_s/sum(weight_s)
# # subset the re_turns to out-of-sample returns
# re_turns <- re_turns[(end_points[i-1]+1):end_points[i], ]
# # calculate the out-of-sample portfolio returns
# xts(re_turns %*% weight_s, index(re_turns))
# } # end anonymous function
# ) # end lapply
# # Flatten the list of xts into a single xts series
# roll_rets_R <- rutils::do_call(rbind, roll_rets_R)
# colnames(roll_rets_R) <- "roll_rets"
#
# # Simulate monthly rolling portfolio optimization strategy using HighFreq::calc_weights()
# roll_rets_arma <- lapply(2:NROW(end_points), function(i) {
# # subset the ex_cess returns
# ex_cess <- ex_cess[start_points[i-1]:end_points[i-1], ]
# # apply regularized inverse to mean of ex_cess
# weight_s <- HighFreq::calc_weights(ex_cess, max_eigen, al_pha)
# # subset the re_turns to out-of-sample returns
# re_turns <- re_turns[(end_points[i-1]+1):end_points[i], ]
# # calculate the out-of-sample portfolio returns
# xts(re_turns %*% weight_s, index(re_turns))
# } # end anonymous function
# ) # end lapply
# # Flatten the list of xts into a single xts series
# roll_rets_arma <- rutils::do_call(rbind, roll_rets_arma)
# colnames(roll_rets_arma) <- "roll_rets"
# # Compare RcppArmadillo with R
# all.equal(roll_rets_arma, roll_rets_R)
#
# # Simulate monthly rolling portfolio optimization strategy using HighFreq::roll_portf()
# roll_rets <- HighFreq::roll_portf(ex_cess, re_turns,
# start_points-1, end_points-1,
# max_eigen, al_pha)
# roll_rets <- xts(roll_rets, index(re_turns))
# colnames(roll_rets) <- "roll_rets"
# # Compare RcppArmadillo with R
# all.equal(roll_rets_arma, roll_rets[index(roll_rets_arma)])
# # Plot dygraph of strategy
# dygraphs::dygraph(cumsum(roll_rets),
# main="Cumulative Returns of Max Sharpe Portfolio Strategy")
|
7a8f951cd5739273280fdc974f007ee78cfce19b
|
6c12225069086e6c544199652ef147c7d7c2e5ba
|
/refresh_Update_DB2.R
|
41199e7209e4d384f1d18a1ce18cda6fcd6fe95c
|
[] |
no_license
|
maquins/ewars_dashboard
|
237eed007b758b588c7f51a7d11acf006c359461
|
edb8c94b7d2d5162d445c0e32e564cac74ccbc9a
|
refs/heads/master
| 2023-06-08T04:26:12.012375
| 2021-06-24T14:38:43
| 2021-06-24T14:38:43
| 287,126,031
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,925
|
r
|
refresh_Update_DB2.R
|
# contact: sewemaquins@gmail.com
#options(shiny.reactlog=FALSE)
#list11<-list(x=2,y=3)
#list11[[c(1:2)]][1]
print("Running refresh DB script........")
print(paste("district is: ",input$district_dB2))
#print(names(dat_ou$df))
p<-as.numeric(input$district_dB2)
covariates_d<-c("Cases","Population",as.character(str_split(arl_vars,',',simplify =T)))
## select data from sql database
con_pros <- dbConnect(SQLite(),"prospective_data.sqlite")
runs_c<-dB2_pars()$curr_Run %>% filter(district==input$district_dB2)
tim_stamp<-str_remove(runs_c$model_run_Name,"model_Run_")
dat_nam<-paste('pros_Dat_',input$district_dB2,'_',tim_stamp,sep='')
print(dat_nam)
eval(parse(text=readLines("database_Functions.R")))
merge_pros_dat_str<-get_merge_STR(c(covariates_d))
print(covariates_d)
cat(merge_pros_dat_str)
con_up <- dbConnect(SQLite(),"prospective_data.sqlite")
for(hh in 1:length(covariates_d)){
assign(covariates_d[hh],dbGetQuery(con_up, paste("select * from ",covariates_d[hh],collapse = " "))[,1:4])
}
dbDisconnect(con_up)
## decide whether to update or insert new record
#ds_cu1<-eval(parse(text=merge_pros_dat_str)) %>%
#dplyr::filter(District==as.numeric(input$district_dB2))
val_in_up<-reactive({
if(as.numeric(input$district_dB2) %in% c(999,9999)){
stop("we we !!")
}else{
eval(parse(text=merge_pros_dat_str)) %>%
dplyr::filter(District==input$district_dB2)
}
})
print(val_in_up())
output$data_dis<-DT::renderDataTable(datatable(val_in_up(),
options = list(autoWidth = TRUE,
searching = FALSE)))
dbDisconnect(con_pros)
print(paste("year is ::",input$year,'and week is ::',input$week))
observeEvent(c(input$week,input$year),{
validate(
need(!as.numeric(input$district_dB2) %in% c(999,9999),"district wierd")
)
al_ev<-paste("al_var1<-as.character( dB2_pars()$all_forDBS$param_values_",input$district_dB2,"$value)[1]",sep='')
print(names(val_in_up()))
eval(parse(text=al_ev))
al_var<- c("Cases","Population",as.character(str_split(al_var1,pattern=",",simplify =T)))
al_var11<-as.character(str_split(al_var1,pattern=",",simplify =T))
cuplprit<<-list(val_in_up=val_in_up(),
al_ev=al_ev)
fa_f<-val_in_up() %>% dplyr::filter(week==input$week & year==input$year)
print("this is where it changed")
#stop("[[omera ayudi...]]")
print(val_in_up())
check_t<-sum(sapply(al_var11,FUN =function(x) as.numeric(x %in% names(fa_f))))
print(paste("total is::",check_t))
print(al_var11)
print(paste("length of al_var11 ::",length(al_var11)))
print(paste("check_t==length(al_var11) is ::",as.numeric(check_t==length(al_var11))))
if(check_t==length(al_var11)){
#FFF<-F
#if(FFF){
#rev<-as.numeric(fa_f[,al_var])
#rev<-as.numeric(fa_f)
rev<-fa_f %>%
dplyr::select(all_of(al_var)) %>%
as.numeric()
print(c(input$district_dB2,input$year,input$week))
print(rev)
#print(input$Cases)
up_st<-paste("updateNumericInput(session,'",al_var,"'",",value=",rev,')',sep='')
print(up_st)
eval(parse(text=up_st))
}else{
safeError("why why ....")
}
})
#browser()
# model data and evaluation
d_str<-paste("dat_eval<-dB2_pars()$all_forDBS","[[",paste("'data_eval_",p,"'",sep=''),"]]",sep='')
m_str<-paste("mod<-dB2_pars()$all_forDBS","[[",paste("'gam_model_",p,"'",sep=''),"]]",sep='')
eval(parse(text=d_str))
eval(parse(text=m_str))
##get year
yrs_ch<-min(dat_eval$year)
dat_eval<-dat_eval %>% dplyr::filter(year==yrs_ch)
dat_eval11<<-dat_eval
#print(head(dat_eval))
tem.d.a<-expand.grid(District=p,year=input$year,week=2:52)
val_year_Merge<-val_in_up() %>%
dplyr::filter(year==input$year)
tem.d<-merge(tem.d.a,val_year_Merge,by=c("District","week"),sort=T,all.x=T)
temp.d_check<<-tem.d
ds_c1_check<<-val_in_up()
## merge with entered values for prediction
cc_d<-paste("cc_v<-dB2_pars()$all_forDBS$param_values_",input$district_dB2,sep='')
print(cc_d)
eval(parse(text=cc_d))
a_n<-which(cc_v$parameter=="alarm threshold")
print(cc_v$value[a_n])
print(cc_v[1:2,])
tem.d<-tem.d %>% dplyr::mutate(outbreak=(Cases/Population)*1000,
alarm_threshold=as.numeric(as.character(cc_v$value[a_n])),
#alarm_threshold=0.05,
outbreak_moving=round(dat_eval$outbreak_moving,6),
outbreak_moving_sd=dat_eval$outbreak_moving_sd,
outbreak_moving_limit=round(dat_eval$outbreak_moving_limit,6),
endemic_chanel=round(dat_eval$outbreak_moving_limit,6),
season=dat_eval$season)
temppp<<-tem.d
##map the alarm indicators
print(tem.d[1:2,])
al_ev<-paste("al_var1<-as.character( dB2_pars()$all_forDBS$param_values_",input$district_dB2,"$value)[1]",sep='')
eval(parse(text=al_ev))
al_var<- str_split(al_var1,pattern=",",simplify =T)
n_alarm_indicators<-length(al_var)
##create alarm indicators
#alarm_ind<-paste(paste(paste('mean_alarm',1:n_alarm_indicators,sep=''),'=',input$alarm_indicators),collapse =',')
#create_alarm_inds<-paste('tem.d %>%mutate(',alarm_ind,')',sep='')
##create correctly
#alarm_window<-3
for_mean1<-paste('mean_alarm',1:n_alarm_indicators,sep='')
al_w<-paste("alarm_window<-as.numeric( dB2_pars()$all_forDBS$param_values_",input$district_dB2,"$value)[5]",sep='')
eval(parse(text=al_w))
for_mean2<-paste("rollapply(",as.character(al_var),',FUN=mean,width=list(-',alarm_window-1,':0),
align = "center",
fill = NA,na.rm = T,
partial=T)',sep='')
for_mean3<-paste('tem.d %>% mutate(',paste(for_mean1,'=',for_mean2,collapse =','),')',sep='')
cat(for_mean3)
#parse(text=for_mean3)
tem.d<-eval(parse(text=for_mean3))
#tem.d<-eval(parse(text=create_alarm_inds))
tem.d$Year_F<-yrs_ch-1
## compute outbreak probability
out_prob<-predict(mod,tem.d,'response')
tem.d$outbreak_probability<-round(as.numeric(out_prob),5)
## replace with missing unobserved weeks
rep_pro<-which(!tem.d$week %in% val_in_up()$week)
tem.d$outbreak_probability[rep_pro]<-NA
##compute the other indicators
tem.d<-tem.d %>% mutate(outbreak_period=case_when(outbreak>endemic_chanel~1,
TRUE~0),
alarm_signal=case_when(outbreak_probability>alarm_threshold~1,
is.na(outbreak_probability)~as.double(NA),
TRUE~0))
tem.d<-tem.d %>% mutate(lag0=dplyr::lag(alarm_signal,0),
lag1=dplyr::lag(alarm_signal,1),
lag2=dplyr::lag(alarm_signal,2),
lag3=dplyr::lag(alarm_signal,3),
lag4=dplyr::lag(alarm_signal,4)) %>%
mutate(response_cat=case_when(lag0==1 & lag1==1 & lag2 %in% c(0,NA) ~1,
lag0==1 & lag1==1 & lag2==1 & lag3 %in% c(0,NA) ~1.5,
lag0==1 & lag1==1 & lag2==1 & lag3==1 ~2,
is.na(alarm_signal)~ as.double(NA),
TRUE~0.5))
tem.d$year<-as.numeric(as.character(tem.d$year.x))
## Reduce the variables to display
vars.left<-c("District","year","week","Cases","Population","outbreak",
"endemic_chanel","alarm_threshold",
"outbreak_probability","alarm_signal",as.character(al_var))
eval(parse(text=paste("sel.dat<-tem.d %>% dplyr::select(",paste(vars.left,collapse =','),")",sep='')))
output$pred_dis<-renderDataTable(datatable(sel.dat,
options = list(autoWidth = F,
searching = FALSE)) )
##send the plots
dat_lab<-data.frame(response_cat=c("No response",
"Initial response",
"Early response",
"Late/emergency response"),
x=-20,y=seq(0.65,2.5,0.5))
plot1<-ggplot(aes(x=week,y=outbreak_moving_limit),data=tem.d)+
geom_area(aes(fill="Endemic channel"))+
geom_line(aes(y=outbreak,col="Confirmed cases"),lwd=0.3)+
geom_point(aes(y=outbreak,col="Confirmed cases"),size=2.5,pch=15)+
theme_bw()+
scale_fill_manual(values =c("Endemic channel"=grey(0.7)))+
scale_color_manual(values =c("Confirmed cases"='red1'))+
scale_x_continuous(breaks=2:52,limits =c(2,52))+
theme(panel.grid.major.x =element_blank(),
panel.grid.minor.x =element_blank(),
panel.grid.major.y =element_line(linetype=2),
panel.grid.minor.y =element_blank(),
axis.line.x.top =element_blank(),
panel.border =element_blank(),
axis.line.y =element_line(linetype=1,colour="grey",size=0.4,lineend="butt"),
axis.line.x =element_line(linetype=1,colour="grey",size=0.4,lineend="butt"),
legend.position ="top",
axis.title.y =element_blank(),
legend.text =element_text(size=14)
)+
guides(fill=guide_legend(title =NULL),
color=guide_legend(title =NULL))+
xlab("Epidemiological week")
plot2<-ggplot()+
geom_line(aes(x=week,y=outbreak_probability,col="Outbreak probability"),lwd=0.3,data=tem.d)+
geom_point(aes(x=week,y=outbreak_probability,col="Outbreak probability"),size=2.5,pch=15,data=tem.d)+
geom_line(aes(x=week,y=alarm_threshold,col="Alarm threshold"),lwd=0.7,data=tem.d,lty=2)+
theme_bw()+
scale_color_manual(values =c("Outbreak probability"='dark blue',
"Alarm threshold"="forest green"))+
scale_x_continuous(breaks=2:52,limits =c(2,52))+
theme(panel.grid.major.x =element_blank(),
panel.grid.minor.x =element_blank(),
panel.grid.major.y =element_line(linetype=2),
panel.grid.minor.y =element_blank(),
axis.line.x.top =element_blank(),
panel.border =element_blank(),
axis.line.y =element_line(linetype=1,colour="grey",size=0.4,lineend="butt"),
axis.line.x =element_line(linetype=1,colour="grey",size=0.4,lineend="butt"),
legend.position ="top",
axis.title.y =element_blank(),
legend.text =element_text(size=14)
)+
guides(fill=guide_legend(title =NULL),
color=guide_legend(title =NULL))+
xlab("Epidemiological week")
plot3<-ggplot(aes(x=week,y=outbreak_moving_limit),data=tem.d)+
geom_area(aes(fill="Endemic channel"))+
geom_line(aes(y=outbreak,col="Confirmed cases"),lwd=0.3)+
geom_point(aes(y=outbreak,col="Confirmed cases"),size=2.5,pch=15)+
geom_line(aes(x=week,y=outbreak_probability,col="Outbreak probability"),lwd=0.3,data=tem.d)+
geom_point(aes(x=week,y=outbreak_probability,col="Outbreak probability"),size=2.5,pch=15,data=tem.d)+
geom_line(aes(x=week,y=alarm_threshold,col="Alarm threshold"),lwd=0.7,data=tem.d,lty=2)+
theme_bw()+
scale_fill_manual(values =c("Endemic channel"=grey(0.7)))+
scale_color_manual(values =c("Confirmed cases"='red1',
"Outbreak probability"='blue',
"Alarm threshold"="forest green"))+
scale_x_continuous(breaks=2:52,limits =c(2,52))+
theme(panel.grid.major.x =element_blank(),
panel.grid.minor.x =element_blank(),
panel.grid.major.y =element_line(linetype=2),
panel.grid.minor.y =element_blank(),
axis.line.x.top =element_blank(),
panel.border =element_blank(),
axis.line.y =element_line(linetype=1,colour="grey",size=0.4,lineend="butt"),
axis.line.x =element_line(linetype=1,colour="grey",size=0.4,lineend="butt"),
legend.position ="top",
axis.title.y =element_blank(),
legend.text =element_text(size=14)
)+
guides(fill=guide_legend(title =NULL),
color=guide_legend(title =NULL))+
xlab("Epidemiological week")
plot4<-ggplot(aes(x=week,y=response_cat),data=tem.d)+geom_point(pch=21,size=2.5)+
geom_hline(yintercept =0.5,col="yellowgreen",lwd=0.8)+
geom_hline(yintercept =1,col="orange",lwd=0.8)+
geom_hline(yintercept =1.5,col="brown",lwd=0.8)+
geom_hline(yintercept =2,col="red",lwd=0.8)+
geom_text(aes(x=x,y=y,label=response_cat,col=response_cat),data=dat_lab,
show.legend =F,hjust=0,nudge_x =0.2)+
theme_bw()+
scale_x_continuous(breaks=seq(2,52,2))+
scale_color_manual(values=c("No response"='yellowgreen',
"Initial response"='orange',
"Early response"='brown',
"Late/emergency response"='red'))+
theme(panel.grid.minor.y =element_blank(),
panel.grid.major.y =element_blank(),
panel.grid.major.x =element_blank(),
panel.grid.minor.x =element_blank(),
panel.border =element_blank(),
axis.line.x =element_line(linetype=1,
colour="grey",
size=0.4,
lineend="butt"),
axis.title.y =element_blank(),
axis.text.y=element_blank(),
axis.ticks.y =element_blank(),
legend.text =element_text(size=14))+
coord_fixed(6,ylim =c(0.3,3),xlim = c(-20,52))+
xlab("Epidemiological week")
output$db2_plot1<-renderPlot(plot1)
output$db2_plot2<-renderPlot(plot2)
output$db2_plot3<-renderPlot(plot3)
output$db2_plot4<-renderPlot(plot4)
cc_d<-paste("cc_v<-dB2_pars()$all_forDBS$param_values_",input$district_dB2,sep='')
eval(parse(text=cc_d))
output$a_vars<-renderPrint({
cat(as.character(al_var),sep ='\n')
})
s1<-nrow(cc_v)
s2<-s1-1
sp_n<-which(cc_v$parameter=="Spline")
output$alr_vars<-renderTable(cc_v[2:s2,])
cc_v_check<<-cc_v
output$s_vars<-renderPrint({
if(cc_v$value[sp_n]=="TRUE"){
s_c<-as.character(str_split(cc_v$value[s1],',',simplify =T))
cat(as.character(s_c),sep ='\n')
}
})
|
fb8eaff998ef4b82caad23567608aa20040aa635
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/CoopGame/examples/shapleyShubikIndex.Rd.R
|
5a78993d50c1bbf1bf885abf4ea6c8630e6290bf
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 401
|
r
|
shapleyShubikIndex.Rd.R
|
library(CoopGame)
### Name: shapleyShubikIndex
### Title: Compute Shapley-Shubik index
### Aliases: shapleyShubikIndex
### ** Examples
library(CoopGame)
shapleyShubikIndex(v=c(0,0,0,0,1,0,1))
## No test:
#Example from Stach (2011):
library(CoopGame)
v=weightedVotingGameVector(n=4,q=50,w=c(10,10,20,30))
shapleyShubikIndex(v)
#[1] 0.08333333 0.08333333 0.25000000 0.58333333
## End(No test)
|
eb2602e63fb972b25e7b7e0a82c74f32c3c2c556
|
8f4db2544a30e207d2d7a8dcde9b232cba571b1d
|
/server.R
|
53c33b3297740e4ed41927f99f08e406f19093f3
|
[] |
no_license
|
ktdrv/NameExplorer
|
1911e9c88b0a440ff0c24894a049ff56fc69c45a
|
a6d36282f63a56279e378ea7e3e979bc6caabd1d
|
refs/heads/master
| 2020-05-20T00:10:12.452567
| 2014-06-23T22:39:21
| 2014-06-23T22:39:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,469
|
r
|
server.R
|
library(plyr)
library(ggplot2)
library(shiny)
filenames <- list.files(path="names", pattern="yob[[:digit:]]+.txt", full.names=T)
data <- ldply(.data=filenames,
.fun=function (fname) {
t <- read.csv(file=fname, header=F, sep=",", col.names=c('Name', 'Sex', 'Count'), colClasses=c("character", "factor", "numeric"))
# t <- subset(t, subset=t$Count >= 100)
t <- ddply(.data=t, .variables=c("Sex"), transform, Rank = rank(-Count, ties.method="min"), Name=tolower(Name))
y <- as.numeric(regmatches(fname, regexpr("([[:digit:]]{4})", fname)))
t$Year = rep(y, nrow(t))
t
})
shinyServer(function(input, output) {
output$count_plot <- renderPlot({
d <- subset(data, data$Name==tolower(input$name) & data$Year >= input$from_year & data$Year <= input$to_year)
ggplot(d, aes(x=Year, y=Count, fill=Sex)) +
geom_area(position = 'stack') +
ylab(paste("Babies named", input$name)) + xlab("Year")
})
output$rank_plot <- renderPlot({
d <- subset(data, data$Name==tolower(input$name) & data$Year >= input$from_year & data$Year <= input$to_year)
ggplot(d, aes(x=Year, y=Rank, color=Sex)) +
geom_line() + facet_grid(. ~ Sex, scales="free_y") +
scale_y_log10(limit=c(1, 10000), breaks=c(1, 10, 100, 1000, 10000, 10000)) +
ylab("Rank of name") + xlab("Year")
})
})
|
01e48ad8e3b7af30e03541a366dcd983efa95696
|
3f6dd3134f16de2f08aa6ec52e772d7e5c5422c0
|
/man/n_dist.ChiSquare.Rd
|
2cbd9e62bb80d2952fa457735d8f12d4b9bef37f
|
[
"MIT"
] |
permissive
|
imbi-heidelberg/blindrecalc
|
1ee7045d042f20b2d3392753838fb41a41a2017f
|
b6df80d1ff7b9605fbb6ee1adc38a7aac32f08e5
|
refs/heads/master
| 2022-11-24T15:18:40.995647
| 2022-11-22T14:30:31
| 2022-11-22T14:30:31
| 228,565,854
| 8
| 2
|
NOASSERTION
| 2022-11-22T14:30:33
| 2019-12-17T08:10:34
|
R
|
UTF-8
|
R
| false
| true
| 1,859
|
rd
|
n_dist.ChiSquare.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ChiSquare.R
\name{n_dist,ChiSquare-method}
\alias{n_dist,ChiSquare-method}
\title{Distribution of the Sample Size}
\usage{
\S4method{n_dist}{ChiSquare}(
design,
n1,
nuisance,
summary = TRUE,
plot = FALSE,
allocation = c("exact", "approximate"),
...
)
}
\arguments{
\item{design}{Object of class \code{ChiSquare} created by \code{setupChiSquare}.}
\item{n1}{Either the sample size of the first stage (if
\code{recalculation = TRUE} or the total sample size (if
\code{recalculation = FALSE}).}
\item{nuisance}{Value of the nuisance parameter in (0,1). For the
Chi-Squared test this is the overall response rate.}
\item{summary}{Is a summary of the sample size distribution desired?
Otherwise, a vector with sample sizes is returned.}
\item{plot}{Should a plot of the sample size distribution be drawn?}
\item{allocation}{Whether the allocation ratio should be preserved
exactly (\code{exact}) or approximately (\code{approximate}
or \code{kf_approx}). \code{appproximate} uses the unrounded calculated
sample size in the sample size recalculation, \code{kf_approx}
rounds the sample size to the next integer.}
\item{...}{Further optional arguments.}
}
\value{
Summary and/or plot of the sample size distribution for
every nuisance parameter and every value of n1.
}
\description{
Calculates the distribution of the total sample sizes of designs
with blinded sample size recalculation for different values of the
nuisance parameter or of n1.
}
\details{
Only sample sizes that occur with a probability of at least 0.01%
are considered.
The method is only vectorized in either \code{nuisance}
or \code{n1}.
}
\examples{
d <- setupChiSquare(alpha = 0.025, beta = 0.2, r = 1, delta = 0.2)
n_dist(d, n1 = 20, nuisance = 0.25, summary = TRUE, plot = FALSE)
}
|
f64c6c154ec9f4d3a5c9b34d81e4dc737eac3bc5
|
48f35d86ca1ab91a8b6cb99d42ff099e64cb7ef9
|
/practice.R
|
b87296b1c3917b25ae139c9c2334e036e36f76c9
|
[] |
no_license
|
anastasia-lubinets/practice
|
1882ee622039c824a4aec4efd34650e7c532969e
|
7597c070302a8a9f1d869f75f4254d7650ccf17e
|
refs/heads/master
| 2020-12-28T14:51:05.334093
| 2020-02-12T05:58:11
| 2020-02-12T05:58:11
| 238,377,266
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,656
|
r
|
practice.R
|
## Practice the following problem
# What is the sum of the first 1000 positive integers?
sum(1:1000)
#1. Use the function c to create a vector with the average high temperatures in January for
#Beijing, Lagos, Paris, Rio de Janeiro, San Juan, and Toronto, which are 35, 88, 42, 84, 81,
#and 30 degrees Fahrenheit. Call the object temp.
temp <- data.frame(City = c("Beijing", "Lagos", "Paris", "Rio de Janeiro", "San Juan", "Toronto"), Temperature = c(35, 88, 42, 84, 81, 30))
temp
# Now create a vector with the city names and call the object city.
city <- c("Beijing", "Lagos", "Paris", "Rio de Janeiro", "San Juan", "Toronto")
city
# Use the names function and the objects defined in the previous exercises to associate the temperature data with its corresponding city.
city_temp <- c(35, 88, 42, 84, 81, 30)
names(city_temp) <- city
city_temp
city_temp[1:3] # named vector
# Use the [ and : operators to access the temperature of the first three cities on the list.
temp$Temperature[1:3] # only numbers
# Use the [ operator to access the temperature of Paris and San Juan.
city_temp[c("Paris", "San Juan")]
city_temp[c(3,5)] # same thing just with numbers
temp[c(3,5),] # also same purpose but getting the values from the data frame
# Use the : operator to create a sequence of numbers 12,13,14,.....73
seq <- (12:73)
seq
# Create a vector containing all the positive odd numbers smaller than 100.
seq(1,100,2) # sequence of numbers starting with 1 and going till 100 with a step of 2
# Create a vector of numbers that starts at 6, does not pass 55, and adds numbers in increments of 4/7: 6, 6 + 4/7, 6 + 8/7, and so on. How many numbers does the list have? Hint: use seq and length.
seq(6, 55, 4/7) # same as previous but starts with 6, goes till 55 with a step of 4/7
# What is the class of the following object a <- seq(1, 10, 0.5)?
class(a <- seq(1, 10, 0.5))
#What is the class of the following object a <- seq(1, 10)?
class(a <- seq(1, 10))
# The class of class(a<-1) is numeric, not integer. R defaults to numeric and to force an integer, you need to add the letter L. Confirm that the class of 1L is integer.
class(a<-1)
class(a<-1L)
# Define the following vector:x <- c("1", "3", "5") and coerce it to get integers.
x <- c("1", "3", "5")
class(x)
as.integer(x)
# In the data frame you made in the second problem convert the temperature from Fahrenheit to Celsius. The conversion is
#C=5/9×( F −32)
temp$Temperature
temp$Temperature <- 5/9*(temp$Temperature-32)
temp
|
2497d2d650482d5903caaa61db3c30459c63e19a
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609963183-test.R
|
32f4e12193b25d84edae1448d3f4d4b633649ffa
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 711
|
r
|
1609963183-test.R
|
testlist <- list(x = 704641023L, y = c(-11776948L, -702873809L, -445680667L, 1919251292L, 1651471657L, 677605230L, -2745809L, -1L, -256L, 115L, 0L, -1L, 0L, 1929379840L, 16776986L, 805250559L, -451142628L, 471604252L, 471604252L, 471604252L, 471604252L, 471604252L, 471652095L, NA, -10497L, 692857302L, 31L, NA, -16757711L, 909522687L, 1280112639L, -536870913L, -1L, -10752L, 61439L, -65460L, -1L, -1L, -40934L, 805306146L, -2130706433L, -1L, -160L, 439353343L, 570490879L, -1L, -1L, 450244607L, -14352384L, 1996550143L, -1L, -15007745L, -13631703L, -39521L, -1616936801L, -539557889L, -2745601L, -2687191L, 1277810176L, 7977L))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
884481d99523242cbb9f91a8e63f69e0105b8e8e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/miceadds/examples/Reval.Rd.R
|
9fc9a4a3609aff4d8a99a4ca70547be9e30770a2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 524
|
r
|
Reval.Rd.R
|
library(miceadds)
### Name: Reval
### Title: R Utilities: Evaluates a String as an Expression in R
### Aliases: Reval Revalpr Revalprstr Revalpr_round Revalpr_maxabs
### Keywords: R utilities
### ** Examples
# This function is simply a shortage function
# See the definition of this function:
Reval <- function( Rstring, print.string=TRUE){
if (print.string){ cat( paste( Rstring ), "\n" ) }
eval.parent( parse( text=paste( Rstring )), n=1 )
}
Reval( "a <- 2^3" )
## a <- 2^3
a
## [1] 8
|
6325f4de2e2a86c243bb1cd2c51c36f2df7c7c91
|
570b70266d3a2a857f9476dd112487bf32136c70
|
/ui.R
|
a49a1f0f6130a1819028af5ddfc483d73e2d3d69
|
[] |
no_license
|
gregtozzi/IrisClassifier
|
03b9d33d91fdd95a8ae4fbfeaa5a6fe9a45185e2
|
89a04b1b5552db8cf4ddb14da9e78de3c3615c33
|
refs/heads/master
| 2021-12-10T20:44:54.228442
| 2021-11-24T19:41:09
| 2021-11-24T19:41:09
| 29,459,137
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,329
|
r
|
ui.R
|
library(shiny)
library(rCharts)
#shinyUI(fluidPage(theme = "css/cosmo.css",
shinyUI(fluidPage(
# Application title
titlePanel("Iris Classifier"),
fluidRow(
column(12,
h4("Overview"))),
fluidRow(
column(6,
p("Your goal is to identify species of irises based on physical measurements. This app provides access to two methods of solving this classification problem: random forests and regression trees. Follow the step-by-step directions below to select a training set, build a machine learning model, and apply the model to the test set."))),
fluidRow(
column(12,
h4("1. Build and Explore the Training Set"))),
fluidRow(
column(3,
p("Start by dividing the iris data into a test set and a training set using the slider. Explore the training set using the pull down menus to set the plot's x- and y-axes.")),
column(3,
sliderInput("testTrain",
label = "Percent of data to use as a training set",
min = 50,
max = 80,
value = 70),
selectInput("xaxis",
label = "Choose x-axis",
choices = names(iris)[-5],
selected = names(iris)[1]),
uiOutput("uiy")),
column(6,
showOutput("plot1", "highcharts"))
),
fluidRow(
column(12,
h4("2. Pick Covariates and Choose Model Settings"))
),
fluidRow(
column(3,
p("Now that you've taken a look at the training set, consider which covariates are most likely to produce a good classification model.")),
column(3,
div("Not sure which model to choose? Each model type offers advantages and disadvantages. If you need some background, consider Wikipedia's articles on", a(href="http://en.wikipedia.org/wiki/Decision_tree_learning", "decision trees"), "and", a(href="http://en.wikipedia.org/wiki/Random_forest", "random forests."))),
column(6,
htmlOutput("warnMsg"),
selectizeInput(inputId = "coVars",
label = "Choose covariates",
choices = names(iris[, 1:4]),
selected = names(iris[, 1:4]),
multiple = TRUE,
options = list(plugins = I("['remove_button']"))
),
radioButtons("modelMethod",
label = "Choose a method",
choices = list("Regression Tree" = "rpart",
"Random Forest" = "rf")))
),
fluidRow(
column(12,
h4("3. Evaluate your model's performance"))),
fluidRow(
column(2,
p('The scatterplot shows the test set. The flowers that your model misclassified are highlighted with large orange dots. You can change how the covariates are plotted against one another to see where the model did well and where it ran into challenges. You can also toggle the orange dots on and off by clicking "False Predictions" in the legend')),
column(4,
selectInput("xaxis2", label = "Choose x-axis",
choices = names(iris)[1:4],
selected = names(iris[1])),
uiOutput("uiy2"),
tableOutput("view"),
p("The table gives a detailed breakdown of the model's performance.")),
column(6,
showOutput("plot2", "highcharts")))
))
|
560da25887266f937cff8822f9f8115c19259880
|
267aa85b975d3348c5557505298364d6f1b5b7f4
|
/inst/shiny/shiny_ui/ui_main_constraints.R
|
bfe180b4089f4b8267626365ae55790c3b965b9a
|
[] |
no_license
|
matdoering/openPrimeRui
|
d2a6dad1038ddbe8e3541ccba6a2172271ccaf09
|
9b3f330bffff554986733cc85d2d40f7f8e2953f
|
refs/heads/master
| 2021-01-19T19:40:13.861823
| 2020-08-14T07:52:24
| 2020-08-14T07:52:24
| 101,199,926
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,538
|
r
|
ui_main_constraints.R
|
########
# Constraints main panel in UI
#######
tabPanel("Constraints",
value = "constraints_view",
icon = icon("resize-small", lib = "glyphicon"),
br(),
selectizeInput("selected_constraint_result", # select a constraint result to display
"Selected result",
choices = list(
Primers =
c(`Constraint Overview` = "overview",
`Dimerization` = "dimerization",
`Other` = "other_constraints"),
Coverage =
c(`Mismatches` = "no_mismatches",
`Other` = "other_cvg_constraints"),
#`Primer Efficiencies` = "primer_efficiency"),
Templates =
c(`Secondary Structures` = "template_secondary",
`Sequence Conservation` = "template_conservation")
)
),
#######
# Overview of constraints:
###########
conditionalPanel("input.set_meta_selector != 'all' &&
input.selected_constraint_result == 'overview'",
# show filtering/optimization stats
selectInput("selected_filtering_plot", "Output",
c("Evaluation" = "evaluation",
"Filtering overview" = "overview",
"Filtering coverage" = "coverage",
"Filtering runtime" = "runtime")
),
conditionalPanel(
# filtering overview
condition = "input.selected_filtering_plot == 'overview'",
fluidRow(
column(12, plotOutput("filtering_stats"), align= "center")
)
),
conditionalPanel(
# filtering coverage
condition = "input.selected_filtering_plot == 'coverage'",
fluidRow(
column(12, plotOutput("filtering_stats_cvg"), align = "center"),
column(12, plotOutput("exclusion_stats"), align = "center")
)
),
conditionalPanel(
# filtering runtime
condition = "input.selected_filtering_plot == 'runtime'",
fluidRow(
column(12, plotOutput("filtering_runtime"), align="center")
)
)
),
conditionalPanel("input.selected_constraint_result == 'overview'",
conditionalPanel("input.set_meta_selector == 'all' ||
input.selected_filtering_plot == 'evaluation'",
# evaluation stats
#column(12, plotOutput("constraint_fulfillment_plot"), align="center"), # removed due to redundancy with 'constraint_stats' plot which provides more infos
# text summary of constraints
column(12, uiOutput("ConstraintsTotal")),
br(),
# fulfillment matrix:
column(12, uiOutput("constraint_stats_ui"), align="center"),
# deviation of constraints:
column(12, uiOutput("constraint_deviations_ui"), align = "center")
)
),
########
# Dimerization
##########
conditionalPanel("input.selected_constraint_result == 'dimerization'",
# dimerization results
selectizeInput("selected_dimerization_data", "Dimerization type", c("Self-Dimerization", "Cross-Dimerization"),
options = list(
placeholder = 'Please select a type of dimerization.',
onInitialize = I('function() { this.setValue(""); }')
)
), # selectize: don't select anything without user input
selectInput("selected_dimerization_result", "Selected result", c("Summary" = "summary",
"Details" = "details"
)),
conditionalPanel(
# dimerization summary
condition = "input.selected_dimerization_result == 'summary'",
htmlOutput("dimer_text"),
plotOutput("dimer_distribution"),
DT::dataTableOutput("dimer_table") # warning (structure()) due to NULL value (caused by shiny, WONTFIX)
),
conditionalPanel(
# dimerization details
condition = "input.selected_dimerization_result == 'details'",
DT::dataTableOutput("dimer_data")
)
),
########
# Other constraints
###########
conditionalPanel("input.selected_constraint_result == 'other_constraints'",
selectizeInput("selected_other_result",
"Selected constraints",
multiple = TRUE,
choices = NULL, # set choices dynamically based on the settings
options = list(
placeholder = 'Please select some constraints',
onInitialize = I('function() { this.setValue(""); }')
)
#choices = c("Primer coverage" = "primer_coverage",
#"Primer length" = "primer_length",
#"Melting temperature range" = "melting_temp_range",
#"Melting temperature differences" = "melting_temp_diff",
#"GC clamp" = "gc_clamp",
#"Number of runs" = "no_runs",
#"Number of repeats" = "no_repeats",
#"Secondary structures" = "secondary_structure",
#"Primer specificity" = "primer_specificity",
#"Self-Dimerization" = "self_dimerization",
#"Cross-Dimerization" = "cross_dimerization")
),
plotOutput("constraint_plot_histogram")
),
##########
# Coverage constraints
##########
conditionalPanel("input.selected_constraint_result == 'no_mismatches'",
# nbr mismatch plot
plotOutput("constraint_plots_no_mismatches"),
selectInput("selected_primer_set_mismatches_direction", "Selected primer direction", c("fw", "rev")),
br(),
DT::dataTableOutput('mismatch_table')
),
# other cvg constraints:
conditionalPanel("input.selected_constraint_result == 'other_cvg_constraints'",
selectizeInput("selected_cvg_constraints",
"Selected coverage constraints",
multiple = TRUE,
choices = NULL, # set choices dynamically based on the selected settings
options = list(
placeholder = 'Please select a coverage constraint',
onInitialize = I('function() { this.setValue(""); }')
)
#choices = c("Primer efficiency" = "primer_efficiency",
#"Primer annealing" = "annealing_DeltaG",
#"Stop codons" = "stop_codon",
#"Terminal mismatches" = "terminal_mismatch_pos",
#"Coverage Model FPR" = "coverage_model")
),
plotOutput("constraint_plots_cvg_constraints")
),
########
# TEMPLATE CONSTRAINTS
##########
conditionalPanel("input.selected_constraint_result == 'template_secondary'",
plotOutput("template_secondary_plot")
),
conditionalPanel("input.selected_constraint_result == 'template_conservation'",
plotOutput("template_conservation_plot")
)
)
|
59881f6df29bf5edf023882ba37228de304bf5d5
|
128025338a34d4751aa16adc2109c57d97b0da3f
|
/R/JS.counts.R
|
466f6238a0cc4eeb28eec7a7530704c113e37a02
|
[] |
no_license
|
MurrayEfford/openCR
|
e2de3810d9582406d598d0637fa4dd864468b961
|
949c39e3c65be41be859499fb7d3c9645e951629
|
refs/heads/master
| 2023-07-21T22:28:22.287511
| 2023-07-11T08:52:45
| 2023-07-11T08:52:45
| 116,920,401
| 2
| 0
| null | 2021-12-02T20:16:51
| 2018-01-10T06:57:54
|
R
|
UTF-8
|
R
| false
| false
| 1,645
|
r
|
JS.counts.R
|
###############################################################################
# JS.counts.R
## 2018-04-20 openCR 1.2.0
## 2021-04-18 stratified
###############################################################################
JS.counts <- function(object, primary.only = TRUE, stratified = FALSE) {
if (stratified) {
lapply(object, JS.counts)
}
else {
first <- function(y) {
w <- 1:length(y)
as.numeric(w == min(which(y)))
}
last <- function(y) {
w <- 1:length(y)
as.numeric(w == max(which(y)))
}
object <- unsqueeze(object) # 2018-02-06
if (inherits(object, 'capthist')) {
if (ms(object)) {
ch <- suppressWarnings(reduce(object, by = 'all', outputdetector = 'nonspatial', verify = FALSE))
object <- join(ch)
}
else {
if (primary.only) object <- primaryonly(object)
}
CH <- apply(abs(object), 1:2, sum)>0 ## sum over detectors
}
else {
CH <- abs(object)>0 # 0/1
}
nsess <- ncol(CH)
ni <- apply(CH,2,sum)
firsti <- as.matrix(apply(CH,1,first))
lasti <- as.matrix(apply(CH,1,last))
ui <- apply(firsti,1,sum)
li <- apply(lasti,1,sum)
mi <- ni-ui
ri <- ni-li
zi <- cumsum(c(0,ri[-nsess]) - mi)
removed <- apply(object,2, function(x) sum(x<0))
data.frame(n=ni, R=ni-removed, m=mi, r=ri, z=zi)
}
}
############################################################################################
|
af728e860c253c5371e5f4135044b3cd3e29367d
|
7cceb23f6b1518bc032ac983ca3eeaa0726fd2da
|
/man/DuffyNGS_Annotation.Rd
|
e48b35717ee6058ae05b90cdec632a90d6e16d78
|
[] |
no_license
|
sturkarslan/DuffyNGS
|
309b07351bc08d6cb0bd6defe59a8c0c00be22df
|
661bde0bda2cec95203b241a6f8560fe14f0c607
|
refs/heads/master
| 2020-05-26T19:21:58.422462
| 2014-11-26T18:08:27
| 2014-11-26T18:08:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,523
|
rd
|
DuffyNGS_Annotation.Rd
|
\name{DuffyNGS_Annotation}
\title{
Annotation File of Sample-Specific Settings
}
\description{
The Annotation file defines the SampleID and several sample-specific settings for
each dataset to be processed. See \code{\link{DuffyNGS_Options}} for processing
settings that are not specific to each sample. Each column is a named field, and
all entries are tab-delimited. See \code{\link{AnnotationTable}} for more details.
}
\section{Annotation Fields (Keys)}{
\describe{
\item{SampleID}{
The SampleID for this sample. This SampleID keys for one entire row of annotation
details in the annotation file, for getting sample-specific details.
The SampleID is also used as a sample-specific prefix for all
files created during the processing of this sample.
}
\item{Filename}{
File(s) of raw read data to be aligned; most commonly FASTQ format. In the case of paired end
data, this field will be 2 filenames, separated by a comma with no intervening spaces.
}
\item{PairedEnd}{
Logical. Is this sample a paired end dataset, with 2 separate files of raw reads.
}
\item{DataType}{
A data type for this sample. One of: RNA-seq, DNA-seq, ChIP-seq
}
\item{Group}{
A GroupID for this sample, for processing tools that combine samples by various traits.
Most such tools allow you to specify the grouping field by a \code{groupColumn} argument.
}
\item{Color}{
A color for this sample, for processing tools that visualize multiple samples.
Most such tools allow you to specify the color field by a \code{colorColumn} argument.
}
\item{ReadSense}{
The read orientation for this sample. One of: sense, antisense
This can depend on the details of sample prep, library construction, and sequencer.
It is used if you expect to have strand specific reads, and controls which strand the
reads get assigned to.
}
\item{StrandSpecific}{
Logical. Did the details of sample prep, library construction, and sequencing generate
strand specific reads, and should reads only count toward expression totals, etc., if
they land on the correct (coding) strand.
}
\item{KeepIntergenics}{
Logical. Should extra 'non-genes' be explicitly added to capture expression, etc., in
the intergenic spaces. This field is queried by each tool, so it may be necessary to
re-run some steps. (e.g. differential expression is based on transcription, so adding
non-genes to the DE results requires that the transcription tool was run with
\code{KeepIntergenics = TRUE}
}
}}
|
c6d3546e13ed6cd6b57279e5560fa1fd0e28f16c
|
3b0cfdef3d8eac5720642bf78e2f5c031bdb188c
|
/howtomakeurllist.R
|
5661f4520852a5a1540adf3d55737950dc2701b9
|
[] |
no_license
|
williamhickman/test
|
ccc3df0d0fb36eeaada71d12406ca53f4a15d47c
|
dbd64c93d4cc80c6e1f80fb237318d8ec0e2127d
|
refs/heads/main
| 2023-08-04T05:03:04.169995
| 2021-09-16T17:17:49
| 2021-09-16T17:17:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 890
|
r
|
howtomakeurllist.R
|
#making a list of URL's (article titles are from Excel)
library(readxl)
# load data
testurls <- read_xl("~/Desktop/Fall 2021/testurls.xlsx")
library(tidyverse)
# get rid of spaces and replace with +
testurls<-
testurls%>%
mutate(url2=str_replace_all(url2," ","+"))
# put everything into one unified url
testurls$complete <- paste(testurls$url1,testurls$url2,testurls$url3)
# cut it down to only include the column with the whole url
testurls <- testurls$complete
# make the testurls vector into a data frame
testurls<-as.data.frame(testurls)
#make sure there aren't any spaces, replace these with nothing
testurls<-
testurls%>%
mutate(testurls=str_replace_all(testurls," ",""))
# # export to a text file
write.table(testurls,"testurlsanothertry.txt",sep="\t",row.names=FALSE, col.names=FALSE,quote=FALSE)
#class(testurls)
#testurls[,colnames(testurls)=="complete"]
|
638cc65c16784b4de5fd61a751559dd91020a4b9
|
55ec4a0425e1219cf7cd1504b1efd78d60f2a32d
|
/EDA.R
|
05e958d22587dd7043bda3330c75ee2d3c7a935c
|
[
"MIT"
] |
permissive
|
nihalij2/CU_churn
|
13763c2c140dcbed80981cf3869b08f74acf5b3e
|
c212c56963d38262601d5fde3f5773651692337a
|
refs/heads/master
| 2020-04-03T13:07:21.067552
| 2018-10-29T00:29:19
| 2018-10-29T00:29:19
| 155,274,423
| 0
| 0
| null | 2018-10-29T20:10:13
| 2018-10-29T20:10:12
| null |
UTF-8
|
R
| false
| false
| 2,490
|
r
|
EDA.R
|
library("lubridate")
library("zipcode")
member = read.csv("Member Dataset.csv")
#creating age groups
member$Age_bin = ifelse(member$Age < 14, "Kids", ifelse(member$Age < 22, "Teens",
ifelse(member$Age<35, "Millenials",
ifelse(member$Age<55, "Middel_Aged","55+"))))
#combining diffrent loan and savings accounts to one of each kind
member[is.na(member)] <- 0
member$Loan_jul <- member$Visa + member$Mortgage +member$Home.Equity+member$Other.Loan+member$Vehicle
member$Savings_jul <- member$Checking +member$Savings+member$CD+member$IRA+member$Money.Market
member$Loan_aug <- member$Visa.1 + member$Mortgage.1 +member$Home.Equity.1+member$Other.Loan.1+member$Vehicle.1
member$Savings_aug <- member$Checking.1 +member$Savings.1+member$CD.1+member$IRA.1+member$Money.Market.1
member$Loan_sep <- member$Visa.2 + member$Mortgage.2 +member$Home.Equity.2+member$Other.Loan.2+member$Vehicle.2
member$Savings_sep <- member$Checking.2 +member$Savings.2+member$CD.2+member$IRA.2+member$Money.Market.2
#Adding type of customer
member$Cust_type = ifelse(member$ClosedDate == "", "Active", ifelse(mdy(member$ClosedDate) < ymd("2018-07-31"),
"Closed_bfr_jul","Churned_Recently"))
#Duration
member$loyalty_months = ifelse(member$Cust_type== "Active",
interval(mdy(member$EarliestMemAcctDate), mdy("10-01-2018")) %/% months(1),
interval(mdy(member$EarliestMemAcctDate), mdy(member$ClosedDate)) %/% months(1))
#Zip correction
Zip_correct = function(zip_old){
zip_new = rep(paste0(rep('0',5-nchar(zip_old)),zip_old,collapse = ""))
return(zip_new)
}
member$ZipCode_Validated = as.character(member$ZipCode_Validated)
for(i in 1:nrow(member)){
member$zip[i] = Zip_correct(member$ZipCode_Validated[i])
}
EDA_variables = member[,c("Member_ID","zip","Age_bin","Cust_type","loyalty_months",
"July.Trans", "August.Trans","September.Trans",
"Loan_jul","Savings_jul","Loan_aug","Savings_aug","Loan_sep","Savings_sep"
)]
#adding city and state from zipcode library
data(zipcode)
EDA_variables = merge(EDA_variables,zipcode[,c("zip","city","state")],"zip",all.x = T)
write.csv(EDA_variables,"CU_EDA_variables.csv", row.names = F)
|
2c2aefac152ad27c5094f7681a84fce877b03e12
|
a837471a5b02854fa291f72338ea709a81b9f44e
|
/man/plotTime.Rd
|
1036f299b601eecbd608264b8a1c09bcdcbe4ad5
|
[
"Artistic-2.0"
] |
permissive
|
bvieth/powsimR
|
4dab889467c698d65b66a65608183f6e49b2203d
|
d9e49ace330214513761e4be37396e4afed96e86
|
refs/heads/master
| 2023-08-14T14:52:40.322693
| 2023-07-31T12:14:42
| 2023-07-31T12:14:42
| 88,427,309
| 102
| 27
|
Artistic-2.0
| 2021-06-28T07:22:53
| 2017-04-16T16:22:12
|
R
|
UTF-8
|
R
| false
| true
| 2,588
|
rd
|
plotTime.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Plot.R
\name{plotTime}
\alias{plotTime}
\title{Visualize computational time}
\usage{
plotTime(evalRes, Table=TRUE, Annot=TRUE)
}
\arguments{
\item{evalRes}{The output of \code{\link{evaluateSim}}.}
\item{Table}{A logical vector. If \code{TRUE}, a table of average running time in seconds per sample size setup and pipeline step is printed.}
\item{Annot}{A logical vector. If \code{TRUE}, a short figure legend under the plot is included.}
}
\value{
A ggplot object.
}
\description{
This function plots the computational running time of the simulations.
}
\examples{
\dontrun{
# estimate gene parameters
data("SmartSeq2_Gene_Read_Counts")
Batches = data.frame(Batch = sapply(strsplit(colnames(SmartSeq2_Gene_Read_Counts), "_"), "[[", 1),
stringsAsFactors = F,
row.names = colnames(SmartSeq2_Gene_Read_Counts))
data("GeneLengths_mm10")
estparam_gene <- estimateParam(countData = SmartSeq2_Gene_Read_Counts,
readData = NULL,
batchData = Batches,
spikeData = NULL, spikeInfo = NULL,
Lengths = GeneLengths_mm10, MeanFragLengths = NULL,
RNAseq = 'singlecell', Protocol = 'Read',
Distribution = 'ZINB', Normalisation = "scran",
GeneFilter = 0.1, SampleFilter = 3,
sigma = 1.96, NCores = NULL, verbose = TRUE)
# define log fold change
p.lfc <- function(x) sample(c(-1,1), size=x,replace=T)*rgamma(x, shape = 1, rate = 2)
# set up simulations
setupres <- Setup(ngenes = 10000, nsims = 10,
p.DE = 0.1, pLFC = p.lfc,
n1 = c(20,50,100), n2 = c(30,60,120),
Thinning = c(1,0.9,0.8), LibSize = 'given',
estParamRes = estparam_gene,
estSpikeRes = NULL,
DropGenes = FALSE,
sim.seed = 66437, verbose = TRUE)
# run simulation
simres <- simulateDE(SetupRes = setupres,
Prefilter = "FreqFilter",
Imputation = NULL,
Normalisation = 'scran', Label = 'none',
DEmethod = "limma-trend", DEFilter = FALSE,
NCores = NULL, verbose = TRUE)
# evaluation
evalsimres <- evaluateSim(simRes = simres)
plotEvalSim(evalRes = evalsimres, Annot = TRUE)
plotTime(evalRes = evalsimres, Annot = TRUE)
}
}
\author{
Beate Vieth
}
|
598bb24e2c07c632f950b1a698a47acd75c7a22c
|
487be43c960999e4b0eb56eb8c574922c5eb350e
|
/test.R
|
c7f9e3f8157f26478e2fd73588f4fda041b5bcbd
|
[] |
no_license
|
river-fish/tandemQueue
|
5076d8262ebdc7648d3ce10b39f54255b45ee2d6
|
a4011c81a93d4bf9de7dbbdb2f3770884d89012a
|
refs/heads/master
| 2021-06-05T17:17:35.590327
| 2016-11-02T18:38:58
| 2016-11-02T18:38:58
| 72,096,814
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 80
|
r
|
test.R
|
x = rep(1,10)
y = 1:10
plot(x, y, xlim=c(-10, 10))
z = x + y
plot(z)
plot(x, z)
|
63a388e432b7c72ed50e40f9f2a04db811f54abd
|
02f053ce70b065724d4a02619fb402adcc0ec997
|
/analysis/boot/boot890.R
|
34ba2aa891fe65a0d9c761ae575d13afc2c3c674
|
[] |
no_license
|
patperry/interaction-proc
|
27950482929240bba55c7d0f2f8c5235d770feea
|
cf8dfd6b5e1d0684bc1e67e012bf8b8a3e2225a4
|
refs/heads/master
| 2021-01-01T06:11:47.125853
| 2012-12-04T20:01:42
| 2012-12-04T20:01:42
| 673,564
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,766
|
r
|
boot890.R
|
seed <- 890
log.wt <- -13.968555670627097
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 226174.85775653107
df.resid <- 35402
df <- 165
coefs <- c(6.764702510328753, 5.76191260705328, 5.760174753200904, 5.4139060794440566, 5.058940944175658, 4.890921999777053, 4.7991142293376035, 4.60732171248199, 4.4170640667111085, 4.284810372796421, 4.361039574856681, 4.140543879303489, 4.010914543844265, 3.9798479051084286, 3.7918626812102887, 3.5460264894724682, 3.249589278457191, 2.969747920696486, 2.496839867155912, 2.089267102393612, 1.5218699251833334, 0.8865586284265778, 0.960011645458911, 0.2467757424528636, 0.43536764310949105, -1.3537906123629249, -0.22583509510556873, 1.0300220288005466, 1.1295147963089254, -1.2405802274931725, -1.953055761723925, -2.675611166703232, -0.36817084204138817, 0.7864289869169435, 1.3215043729034182, -1.3276566838345665, -0.13835638577217996, -1.4629736611717554, 0.27503922010087484, -0.8512942769602984, 0.9366152113818522, 0.8132705336207336, -0.6883569608513482, -2.3256987901414887, -0.7428018368966357, -0.8811102629389085, -0.19726965644175684, 0.5728194928273049, 0.5987973020692604, -1.1699126627100598, -0.1834598535075854, 0.9209246372877635, -3.183682296720477, 1.6694262214197075, 0.7346884036663331, 1.0936053619956076, -2.071429238331321, -3.8661628190680794e-2, -0.23031893920936003, 1.315882442916821, 1.1274483369244952, 0.9814624378648019, -2.1605650641511183, -1.1505544693052945, -0.6169647412124076, 0.40453200322090394, 0.4471328943182846, -0.2894194688840154, -1.0095417962450512, -0.7158498646707379, -2.4953772218030323, -0.4125252760649351, 0.4437995591962163, 1.0439544358212594, 0.6744975529659746, -0.9421840811130179, -1.4268372287143691, -1.1732940893062458, -9.182744180050816e-2, 0.658558102742629, 1.1029632631342585, 0.13926561281542185, 0.14444110163669172, -2.3861552948166387, -1.3259777302403886, 0.37833000528119626, 1.2071716035315285, 0.6289330794714388, 0.976395117978346, -1.8071702301136192, 0.43963228833007667, 0.9669599710523884, 0.751838133471648, 0.30412253675176915, -0.11661232798906186, 1.3705531885932205, -0.38978388802812697, 0.5385405956288241, 0.13472095044212448, -0.1331076830347742, 0.5740150495570225, -0.43188664650890346, 0.783084851895068, 6.984159850215468e-2, 0.5701985556568165, 0.8224183642970626, 1.0579737831276443, 1.7967772780088157e-3, -0.38525096958058924, -0.6204322176506432, 0.536303899399661, 0.6691663317550125, 1.6591376579840154, -0.7629094607526183, -0.19442764243402577, -0.7793649343498502, 0.7847397184224472, -0.394066295335442, 0.4509165201994887, 0.5526577198061001, -0.5412375844381978, -0.24659864260685516, -1.1955999266907245, -0.5871062291242192, 0.4572689117728778, 0.6697383172715347, -6.577698075269518e-2, 1.0051295174724686, -0.660896822077857, -0.3315464786934002, 0.2958343246517396, 0.8011610347901575, 0.8427193824651008, 0.5055025643553948, -1.683512286737957e-2, 1.069267973058198, -0.32518688639750964, 0.9908968711405861, 0.6338746813043911, 0.9678617941481379, 0.7083392371057157, -0.5040488812050028, -1.1939663332490045, 0.5939226089386234, 0.4543349351696752, 0.6007931068163188, -0.18148950600557168, -0.3753778930757886, -1.751307692280318, 1.2462029821802214, 0.1081232856058102, 1.11944032003823, -0.18038011838978385, -7.351071817357592e-2, 2.862730714285766e-3, -1.3292317528870174, -1.1218606008747531, 0.8051246507343836, 1.1360513418876854, -0.27868941775954503, 1.5710321003418415, -0.37822627467923936, -0.15309161755762885, 0.18869769412697282, 1.1151417461911974)
|
cd7035f671d2460e1f54962be99ad072220ae999
|
eebabe55f31dab6c9e1a435bde3be0468d7d73ce
|
/man/getBezierAdj4Arrw.Rd
|
debb3c45cfee4ecd695701de80b34db494357c4a
|
[] |
no_license
|
gforge/Gmisc
|
279763beb0bab02d75b6e44c6a7f8d5a79249a25
|
968d4edfd84453c33c483d5a1f1f0825fae98c8e
|
refs/heads/master
| 2023-08-31T09:30:16.367537
| 2023-08-25T21:55:45
| 2023-08-25T21:55:45
| 10,400,448
| 43
| 16
| null | 2022-01-03T18:23:20
| 2013-05-31T08:01:23
|
R
|
UTF-8
|
R
| false
| true
| 611
|
rd
|
getBezierAdj4Arrw.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bezier_private_functions.R
\name{getBezierAdj4Arrw}
\alias{getBezierAdj4Arrw}
\title{Gets the bezier points adjusted for an arrow}
\usage{
getBezierAdj4Arrw(x, y, arrow_length, length_out = 100)
}
\arguments{
\item{x}{The x start and end points}
\item{y}{The spline control points}
\item{arrow_length}{The desired length of the arrow}
\item{length_out}{Increases the resolution for the final bezier
points, i.e. generating more fine-grained intervals}
}
\value{
list
}
\description{
Gets the bezier points adjusted for an arrow
}
|
b7776273662c5a302fa0c6050c9e19cc26586d76
|
693d88d479f96e91be7607de520875861f3f6e4d
|
/man/propcattlecheck.Rd
|
9a0a242c3df51cb4077be7f0f05970fb71141fb3
|
[] |
no_license
|
vijaydairyf/DMMongoDB
|
c4239c144f3357856177855e2fa82baf72bd34bf
|
920bbbbaed086df6d271be6c47dd2f4bcbe4341a
|
refs/heads/master
| 2020-12-05T19:20:18.297176
| 2019-12-18T21:44:41
| 2019-12-18T21:44:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,299
|
rd
|
propcattlecheck.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/propcattlecheck.R
\name{propcattlecheck}
\alias{propcattlecheck}
\title{Check cattle access to ALMS accross whole property}
\usage{
propcattlecheck(property, days = NULL, username = NULL,
password = NULL)
}
\arguments{
\item{property}{this is a list or single property name as a character entry}
\item{days}{the number of days to search over, if no days entered then it only searches for cattle that have crossed the ALMS today}
\item{username}{if you don't have a username set up using the dmaccess function you can pass a username, if no value added then the function looks for a value from dmaccess via keyring}
\item{password}{if you include a username you will also need to add a password contact Lauren O'Connor if you don't have access}
}
\value{
a dataframe that provides a list including the RFID number, datetime and weights of cattle
}
\description{
This function pulls in daily weights for cattle for specified periods. It searches based on all cattle across a named property. If you need assistance please email \email{info@datamuster.net.au} to seek help or suggest improvements.
}
\author{
Dave Swain \email{dave.swain@datamuster.net.au} and Lauren O'Connor \email{lauren.oconnor@datamuster.net.au}
}
|
52e7f259d28c0be7acbfb922388e91de21d848cf
|
74d3ccdbeeee691888e89073039b47a9b737d78f
|
/tests/OandaMachineLearningHiddenMarkovModel.R
|
689ffdfc17ff724a4f7bc0e537a4a5986d5cd9b7
|
[
"MIT"
] |
permissive
|
elephann/RQuantTrader
|
e5f8813eb880ce05cf997f01b9732cfaa57b995f
|
067c715c036a5d86596b8589d617ec795a8dc3c1
|
refs/heads/master
| 2022-03-21T17:35:51.078741
| 2019-09-07T13:18:40
| 2019-09-07T13:18:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,953
|
r
|
OandaMachineLearningHiddenMarkovModel.R
|
library('depmixS4')
# library('quantmod')
set.seed(1)
loadQuantPackages()
Cur1 = 'AUD'
Cur2 = 'USD'
PRICE.OA = prepareForexOandaPrices(oanda.count = 500, Cur1 = Cur1, Cur2 = Cur2, oanda.granularity = 'M15')
price.oa = PRICE.OA$OA.MID
oa.ret = ROC(Cl(price.oa), n = 1, type = 'discrete')[-1]; names(oa.ret) = 'target'
# Fit a Hidden Markov Model with two states
# to the S&P500 returns stream
hmm <- depmix(target ~ 1, family = gaussian(), nstates = 2, data=as.data.frame(oa.ret))
hmmfit <- fit(hmm, verbose = F)
post_probs <- posterior(hmmfit)
# Plot the returns stream and the posterior
# probabilities of the separate regimes
layout(1:2)
plot(oa.ret, type='l', main='Regime Detection', xlab='', ylab='Returns')
chart_Series(Cl(price.oa))
matplot(post_probs[,-1], type='l', main='Regime Posterior Probabilities', ylab='Probability')
legend(x='bottomleft', c('Regime #1','Regime #2'), fill=1:2, bty='n')
mean(oa.ret[post_probs[,1]==1])
mean(oa.ret[post_probs[,1]==2])
# mean(oa.ret[post_probs[,1]==3])
sd(oa.ret[post_probs[,1]==1])
sd(oa.ret[post_probs[,1]==2])
sd(oa.ret)
# sd(oa.ret[post_probs[,1]==3])
table(post_probs[,1])
plot(Cl(price.oa)[post_probs[,1]==1])
plot(Cl(price.oa)[post_probs[,1]==2])
spreads.MA <- MaRatio(Cl(price.oa), 20)
spreads.SD <- Sd(Cl(price.oa), 20)
Z.Score <- ZScore(Cl(price.oa),spreads.MA,spreads.SD)
chart_Series(Z.Score)
print(chart_Series(Z.Score[post_probs[,1]==2]))
plot(post_probs[,1], type = 'l')
# Rolling Moving Average
MaRatio <- function(x, N){
Mavg <- rollapply(x, N , mean)
colnames(Mavg) <- 'Price.Ratio.MA'
Mavg
}
# Rolling Standard Deviation
Sd <- function(x, N){
Stand.dev <- rollapply(x, N, sd)
colnames(Stand.dev) <- "Price.Ratio.SD"
Stand.dev
}
# Z Score
ZScore <- function(Price.Ratio,Price.Ratio.MA,Price.Ratio.SD){
a1 <- Price.Ratio
b1 <- Price.Ratio.MA
c1 <- Price.Ratio.SD
z <- (a1-b1)/c1
colnames(z)<- 'Z.Score'
z
}
|
27344ecead9118e31b7387fde278834439a2332b
|
3608fad236c6d1485761ee60456546c260b52eab
|
/doc/data/lch-eps/lch-pf/make-pf-plot-data.r
|
428a7c9c7249395fdbaf6810f82db62ef28cfbc9
|
[] |
no_license
|
kreuvf/phd-public
|
ae4cf4e70ec04b823540dccd4e6d6ba6ee281904
|
10a1245b93b631371ffda9c9e3a9ba399f01c1d5
|
refs/heads/master
| 2022-04-25T02:03:13.717890
| 2020-04-25T19:23:17
| 2020-04-25T20:10:50
| 258,830,856
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,956
|
r
|
make-pf-plot-data.r
|
# # # # # # # # # # # # # # # # # # # #
# Generate graphs of LCHF0
# LCH-PF, two figures, each with four y-axes
# # # # # # # # # # # # # # # # # # # #
library(splitstackshape) # cSplit
library(lubridate) # as.duration
library(plyr) # round_any, join
library(zoo) # rollapply
library(readxl) # read_excel
# # # # # # # # # # # # # # # # # # # #
# Data structure and type definitions
# Conversion of sample number to sampling time in seconds
# blk1smptim = block sample times
blk1smptim <- read.table(text =
"sample.block.1 t
0 3600
1 50400
2 90600
3 135000
4 172800
5 223800
6 259800
7 316800
8 329400", # D600 only sample at the end of the process
header = TRUE, colClasses = c("numeric", "numeric"))
blk1smptim$t <- as.duration(blk1smptim$t)
blk2smptim <- read.table(text =
"sample.block.2 t
0 3600
1 50400
2 90600
3 135000
4 172800
5 223800
6 259800
7 316800
8 346800
9 403200
10 432000
11 482400",
header = TRUE, colClasses = c("numeric", "numeric"))
blk2smptim$t <- as.duration(blk2smptim$t)
# Dilution factors
# lchfa1df = LCHFA1 dilution factor
lchfa1df <- 5.05 # 61 µl for neutralization
# lchfa223df = LCHFA2_2 and LCHFA2_3 dilution factor
lchfa223df <- 400
# # # # # # # # # # # # # # # # # # # #
# Function definitions
# Function for reading in fermentation data
getFermDat <- function(df, file){
# Read in data
df <- as.data.frame(
cSplit(
read.table(file, header = TRUE,
# Names for description of the expected content only
col.names = c("Date and time",
"Time after inoculation in hh:mm:ss", "pH value",
"DO in %", "CO2 in A.U."),
colClasses = c("NULL", "character", "numeric", "numeric",
"numeric"),
sep = "\t", dec = '.'),
splitCols = "Time.after.inoculation.in.hh.mm.ss", sep =":",
drop = TRUE))
# Give temporary names
colnames(df) <- c("pH value", "DO in %", "CO2 in A.U.", "HH", "MM", "SS")
return(df)
}
# Function for preparing fermentation data
prepFermDat <- function(df, shorthand){
# Remove unnecessary pH column
df <- df[ , c(2, 3, 4, 5, 6)]
# Reduce time to duration in seconds and
# round down to 30 s to have matching times when joining columns
# Rounding down due to F2 data which has seconds of
# 14, 44, 14, 45, 15, 44, 15, 45 ... giving ~25% of duplicate values
df$SS <- round_any(
as.duration(
df$SS + (df$MM * 60) + (df$HH * 3600))
, 30, floor)
# Remove now unnecessary hours and minutes columns, reorder columns
df <- df[ , c(5, 1, 2)]
# Name columns sensibly
# Explanations:
# [[1]] t: process time in s
# [[2]] DO: dissolved oxygen in percent of max. calibrated at process start
# [[3]] CO2: CO2 in off-gas in percent
colnames(df) <- c("t", paste('DO', shorthand, sep='.'),
paste('CO2', shorthand, sep='.'))
# Rolling average over 50 of DO values, last 50 values take median
df[[2]] <- rollapply(
df[[2]], 50, mean, fill=c(mean(head(df[[2]], 50)),
NA, median(tail(df[[2]], 50))))
return(df)
}
# # # # # # # # # # # # # # # # # # # #
# Input fermentation/on-line data
# Data of 8 fermenters: time, pH, DO, CO2
# Vector with filenames
infiles <- c('f1.txt', 'f2.txt', 'f3.txt', 'f4.txt',
'f5.txt', 'f6.txt', 'f7.txt', 'f8.txt')
# String with relative path to files
directory <- 'ferm-dat/'
# Create 'indon' as empty list; indon = in data, online
indon <- list()
# Loop for the actual input
for (i in 1:8) {
indon[[i]] <- getFermDat(indon[[i]], paste(directory, infiles[[i]], sep = ''))
}
# # # # # # # # # # # # # # # # # # # #
# Input off-line sample data
# Data of 8 fermenters: time, D600, CDM, Glucose concentration (glucose
# assay), Molar mass at RI peak, Furfural (found in block 2, only; data for
# all 8 available)
# Not considered: EPS concentration (at the end, from precipitation), Xylose
# concentration (PMP, too unreliable), EPS concentration and monomer
# compositions (PMP, too unreliable)
# Input D600
d600 <- as.data.frame(
cSplit(
read.table('ferm-dat/d600.txt', header = TRUE, sep = "\t", dec = ",",
col.names = c("Time after inoculation in hh:mm", "D600.F1", "D600.F2", "D600.F3", "D600.F4", "D600.F5", "D600.F6", "D600.F7", "D600.F8"),
na.strings = "-",
colClasses = c("character", rep("numeric", 8)), nrows = 13),
splitCols = "Time.after.inoculation.in.hh.mm", sep = ":", drop = FALSE))
colnames(d600) <- c("t", "D600.F1", "D600.F2", "D600.F3", "D600.F4", "D600.F5", "D600.F6", "D600.F7", "D600.F8", "HH", "MM")
# Input CDM
cdm <- read_excel("lchf0-cdm.xlsx", sheet = "BTM", col_names = TRUE, skip = 1)
# Input glucose assay data (LCHFA2); skip rows with visually appealing header
lchfa2 <- as.data.frame(
read_excel("glc-results.xlsx", sheet = "conc",
col_names = c("junk", "UID", "A.418.-A.480.", "Glc.conc."),
col_types = c("text", "text", "text", "numeric"),
skip = 4))
# Input EPS monomer data; skip rows with type, unit, analyte, signal
epsamc <- as.data.frame(
read_excel("eps-amc.xlsx", sheet = "PMP",
col_names = TRUE,
col_types = c("text", "text", rep("numeric", 21)),
na = "n.a.", skip = 4))
# Input molar mass at RI peak data
mp <- as.data.frame(
read_excel("sec-malls-results.xlsx", sheet = "Tabelle1",
col_names = c("Sample.ID", "Peak elution time", "Mn in g/mol", "Mw in g/mol", "Mp in g/mol", "Injection datetime"),
col_types = c("text", rep("numeric", 4), "text"),
na = "-", skip = 1))
# Input furfural data
fur <- as.data.frame(
read_excel("glc-xyl-fur-hplc.xlsx", sheet = "PMP",
col_names = c("junk", "UID", "FermGlc.PMP", "FermXyl.PMP", "HMF", "Fur"),
col_types = c("text", "text", rep("numeric", 4)),
na = "n.a.", skip = 4))
# # # # # # # # # # # # # # # # # # # #
# Process data
# Transform time into seconds and round to nearest 30 for comparison, get
# rid of unneeded columns, name columns according to their origin
for (i in 1:8) {
indon[[i]] <- prepFermDat(indon[[i]], paste('F', i, sep = ''))
}
# Transform D600 data
# Time data to duration in seconds
# Remove no longer necessary columns
d600$t <- as.duration((d600$MM * 60) + (d600$HH * 3600))
d600 <- d600[, 1:9]
# Split into block 1 and 2
blk1d600 <- d600[ , 1:5]
blk2d600 <- d600[ , c(1, 6:9)]
# Remove NAs
blk1d600 <- blk1d600[complete.cases(blk1d600),]
blk2d600 <- blk2d600[complete.cases(blk2d600),]
# Remove last block 1 sample
# it's unnecessary, because there is no other data to correlate it with
blk1d600 <- blk1d600[1:(nrow(blk1d600)-1),]
# Transform CDM data
# Columns fermenter, sample, concentration stay; others will be removed
cdm <- cdm[, c(1, 2, 8)]
colnames(cdm) <- c("Fermenter", "Sample", "CDM")
# Remove NA-only rows
cdm <- cdm[rowSums(is.na(cdm)) != ncol(cdm),]
# Reshape format to Sample, Fermenter n ...
cdm <- as.data.frame(reshape(cdm, idvar = "Sample", timevar = "Fermenter", direction = "wide"))
# Replace "Sample" column with sample times
# blk2smptim used, because 8th blk1 sample is D600 only
# +1, because R starts indexing content at 1
cdm[,1] <- blk2smptim[(cdm[,1]+1),2]
# Adapt column names
colnames(cdm) <- c("t", "CDM.F1", "CDM.F2", "CDM.F3", "CDM.F4", "CDM.F5", "CDM.F6", "CDM.F7", "CDM.F8")
# Split into block 1 and 2
blk1cdm <- cdm[ , 1:5]
blk2cdm <- cdm[ , c(1, 6:9)]
# Remove NAs
blk1cdm <- blk1cdm[complete.cases(blk1cdm),]
blk2cdm <- blk2cdm[complete.cases(blk2cdm),]
# Transform glucose assay data
# Remove column 'junk' and absorption differences
lchfa2 <- lchfa2[ , c(2, 4)]
# Split into two parts:
# PMP glucose (LCHFA2_0 + LCHFA2_1)
# glucose in fermenter (LCHFA2_2 + LCHFA2_3)
pmpglc <- lchfa2[1:152, ]
fermglc <- lchfa2[153:nrow(lchfa2), ]
# Transform PMP glucose data
# Keep rows from before hydrolysis only, renumber rows
pmpglc <- pmpglc[c(1:68),]
rownames(pmpglc) <- NULL
# Reverse dilution (5 µl sample + 45 µl ddH2O)
pmpglc[, 2] <- 10 * pmpglc[, 2]
# Split UID: LCHFA2_0.B9_F1.0 --> LCHFA2 0.B9 F1 0
pmpglc <- as.data.frame(cSplit(pmpglc,
splitCols = "UID", sep ="_", drop = TRUE, type.convert = FALSE))
pmpglc <- as.data.frame(cSplit(pmpglc,
splitCols = "UID_3", sep =".", drop = TRUE, type.convert = FALSE))
# Throw away columns: experiment identifier, plate + coordinates
pmpglc <- pmpglc[ , c(1, 4, 5)]
colnames(pmpglc) <- c("PMPGlc", "Fermenter", "Sample")
# Auto-convert columns now
pmpglc[, 3] <- type.convert(pmpglc[, 3])
pmpglc[, 2] <- type.convert(pmpglc[, 2])
# Set negative values to zero
pmpglc[ ,1] <- with(pmpglc, ifelse(PMPGlc < 0, 0, PMPGlc))
# Reorder columns: Sample, Fermenter, Glc. conc.
pmpglc <- pmpglc[ , c(3, 2, 1)]
# Reshape content: columns for every fermenter
pmpglc <- as.data.frame(reshape(pmpglc, idvar = "Sample", timevar = "Fermenter", direction = "wide"))
# Renumber rows
rownames(pmpglc) <- NULL
# Transform Fermentation glucose data
# Renumber rows
rownames(fermglc) <- NULL
# Remove rows with standards and 10 l fermentation
fermglc <- fermglc[-c(9:10, 19:20, 29:30, 39:41, 50:52, 61:63, 72:73, 82:84, 180:212), ]
# Renumber rows
rownames(fermglc) <- NULL
# Fix wrong labelling in raw data: sample F7.2 in (E|F)2 is actually F7.1
fermglc[c(113, 125), 1] <- "LCHFA2_3.E2_F7.1"
# Split UID: LCHFA2_2.A1_F1.0 --> LCHFA2 2.A1 F1.0
fermglc <- as.data.frame(cSplit(fermglc,
splitCols = "UID", sep ="_", drop = TRUE, type.convert = FALSE))
# Throw away columns: UID_1, UID_2
fermglc <- fermglc[ , c(1, 4)]
# Rename column
colnames(fermglc) <- c("FermGlc", "Fermenter.Sample")
# Create one column for every sample
# Max. two values per sample --> mark via duplicate
# reshape data to get two rows and lots of columns
fermglc['dpl'] <- as.numeric(duplicated(fermglc[ , c(2)]))
fermglc <- as.data.frame(reshape(fermglc, idvar = "dpl", timevar = "Fermenter.Sample", direction = "wide"))
# Add row with means ignoring NAs
fermglc <- rbind(fermglc, sapply(fermglc, mean, na.rm = 1))
# Remove unneeded rows
fermglc <- fermglc[3, ]
# Re-reshape into long format
fermglc <- as.data.frame(reshape(fermglc))
# Re-number rows
rownames(fermglc) <- NULL
# Drop now useless dpl column
fermglc <- fermglc[ , 2:3]
# Rename columns
colnames(fermglc)[2] <- "FermGlc"
# Split into fermenter and sample
fermglc <- as.data.frame(cSplit(fermglc,
splitCols = "Fermenter.Sample", sep =".", drop = TRUE, type.convert = FALSE))
# Rename columns
colnames(fermglc)[2:3] <- c("Fermenter", "Sample")
# Auto-convert columns now
fermglc[, 3] <- type.convert(fermglc[, 3])
# FAINARU RISHEIPU (final reshape): Sample FermGlc.F1 FermGlc.F2 ...
fermglc <- as.data.frame(reshape(fermglc, idvar = "Sample", timevar = "Fermenter", direction = "wide"))
# Re-number rows
rownames(fermglc) <- NULL
# Apply dilution factor
fermglc[, 2:ncol(fermglc)] <- lchfa223df*fermglc[, 2:ncol(fermglc)]
# Convert unit from mg/l to g/l
fermglc[, 2:ncol(fermglc)] <- 0.001*fermglc[, 2:ncol(fermglc)]
# Replace sample number with sample time
# blk2smptim used, because 8th blk1 sample is D600 only
# +1, because R starts indexing content at 1
fermglc[,1] <- blk2smptim[(fermglc[,1]+1),2]
# Rename sample column
colnames(fermglc)[1] <- "t"
# Transform EPS monomer data
# Use appropriate column names
# Remove unused rows and colums
# Remove NA only columns
# Subtract glucose (from glucose assay)
colnames(epsamc) <- c(
"Sample number", "Sample name", "Man", "GlcUA", "GlcN", "GalUA",
"Rib", "Rha", "Gen", "GalN", "GlcNAc", "Lac",
"Cel", "Glc", "GalNAc", "Gal", "Ara", "Xyl",
"Fuc", "2dGlc", "2dRib", "HMF", "Fur")
# Throw away junk rows start (standards)
epsamc <- epsamc[21:nrow(epsamc), ]
# Throw away junk rows at the end (standards at the end, 10 l fermentation)
epsamc <- epsamc[1:(nrow(epsamc)-17), ]
# Throw away junk cols (Ara, Xyl)
epsamc <- epsamc[, -(17:18)]
# Throw away junk cols (sample number, HMF, Fur)
epsamc <- epsamc[, 2:(ncol(epsamc)-2)]
# Set sample #10 of fermenter 8 GalUA value to "NA": no GalUA detected in MS!
epsamc[67,5] <- NA
# Remove all columns which contain only NA
epsamc <- Filter(function(x)!all(is.na(x)), epsamc)
# Split sample name column twice to get columns for fermenter and sample
epsamc <- as.data.frame(cSplit(epsamc,
splitCols = "Sample name", sep ="_", drop = TRUE, type.convert = FALSE))
epsamc <- as.data.frame(cSplit(epsamc,
splitCols = "Sample name_2", sep =".", drop = TRUE, type.convert = FALSE))
# Throw away column: plate + coordinates
epsamc <- epsamc[ , -6]
# Reorder columns: sample, fermenter and then sugars in alphabetical order
epsamc <- epsamc[ , c(7, 6, 5, 4, 2, 1, 3)]
colnames(epsamc) <- c("Sample", "Fermenter", "AMCGal", "AMCGlc", "AMCGlcN", "AMCMan", "AMCRha")
# Unify fermenter naming: capital "F" followed by fermenter number
epsamc[ , 2] <- paste("F", epsamc[ , 2], sep = "")
# Auto-convert columns now
epsamc[, 1] <- type.convert(epsamc[, 1])
epsamc[, 2] <- type.convert(epsamc[, 2])
# Add new dummy column for monomer sums
epsamc$AMCSum <- rep(NA, nrow(epsamc))
# Reshape content: columns for every fermenter
epsamc <- as.data.frame(reshape(epsamc, idvar = "Sample", timevar = "Fermenter", direction = "wide"))
# Renumber row names
rownames(epsamc) <- NULL
# Reorder columns: first step puts "Sample" at the end
epsamc <- epsamc[ , order(names(epsamc))]
epsamc <- epsamc[ , c(ncol(epsamc), 1:(ncol(epsamc)-1))]
# Apply dilution factor
epsamc[, 2:ncol(epsamc)] <- lchfa1df*epsamc[, 2:ncol(epsamc)]
# Subtract monomeric glucose before hydrolysis
for (i in 1:8) {
amccol <- paste("AMCGlc.F", i, sep = "")
pmpcol <- paste("PMPGlc.F", i, sep = "")
epsamc[ , amccol] <- epsamc[ , amccol] - pmpglc[ , pmpcol]
}
# Calculate sums
# Copy dataframe
# Replace NAs by 0s
# Finally calculate sums
temp.df <- epsamc
temp.df[is.na(temp.df)] <- 0
for (i in 1:8) { # i = Fermenters
galcol <- paste("AMCGal.F", i, sep = "")
glccol <- paste("AMCGlc.F", i, sep = "")
glcncol <- paste("AMCGlcN.F", i, sep = "")
mancol <- paste("AMCMan.F", i, sep = "")
rhacol <- paste("AMCRha.F", i, sep = "")
sumcol <- paste("AMCSum.F", i, sep = "")
for (j in 1:nrow(epsamc)) {
epsamc[j, sumcol] <-
temp.df[j, galcol] + temp.df[j, glccol] +
temp.df[j, glcncol] + temp.df[j, mancol] +
temp.df[j, rhacol]
}
}
remove(temp.df)
# Replace sample number with sample time
# blk2smptim used, because 8th blk1 sample is D600 only
# +1, because R starts indexing content at 1
epsamc[,1] <- blk2smptim[(epsamc[,1]+1),2]
# Change column name accordingly
colnames(epsamc)[1] <- "t"
# Transform molar mass data
# Throw away unneeded columns
mp <- mp[ , c(1, 5)]
# Throw away columns with standards, LiNO3, LCHF1 samples, co-worker samples
mp <- mp[-c(1:13, 15, 23, 32, 41, 50:51, 64, 77, 90, 103:nrow(mp)) ,]
# Split Sample ID: 1.0 -> 1 0
mp <- as.data.frame(cSplit(mp,
splitCols = "Sample.ID", sep =".", drop = TRUE, type.convert = FALSE))
# Rename columns
colnames(mp) <- c("Mp", "Fermenter", "Sample")
# Unify fermenter naming: capital "F" followed by fermenter number
mp[ , 2] <- paste("F", mp[ , 2], sep = "")
# Reorder columns
mp <- mp[ , c(3, 2, 1)]
# Reshape: Sample Mp.F1 Mp.F2 ...
mp <- as.data.frame(reshape(mp, idvar = "Sample", timevar = "Fermenter", direction = "wide"))
# Convert column
mp[ ,1] <- type.convert(mp[ ,1])
# Renumber rows
rownames(mp) <- NULL
# Replace sample number with sample time
# blk2smptim used, because 8th blk1 sample is D600 only
# +1, because R starts indexing content at 1
mp[,1] <- blk2smptim[(mp[,1]+1),2]
# Change column name accordingly
colnames(mp)[1] <- "t"
# Transform furfural data
# Remove unnecessary columns (junk, FermGlc.PMP, FermXyl.PMP, HMF)
fur <- fur[ , c(2, 6)]
# Remove unnecessary rows (standards, 10 l samples)
fur <- fur[-c(1:13, 90:nrow(fur)), ]
# Split UID: B01_F1.0 -> B01 F1 0
fur <- as.data.frame(cSplit(fur,
splitCols = "UID", sep ="_", drop = TRUE, type.convert = FALSE))
fur <- as.data.frame(cSplit(fur,
splitCols = "UID_2", sep =".", drop = TRUE, type.convert = FALSE))
# Remove unnecessary column
fur <- fur[, -2]
# Rename columns
colnames(fur)[2:3] <- c("Fermenter", "Sample")
# Convert column
fur[ , 3] <- type.convert(fur[ , 3])
# Set negative values to 0 (necessary for plotting)
fur[ ,1] <- with(fur, ifelse(Fur < 0, 0, Fur))
# Convert unit: from mg/l to g/l
fur[ ,1] <- fur[ ,1]/1000
# Reshape: Sample Fur.F1 Fur.F2 ...
fur <- as.data.frame(reshape(fur, idvar = "Sample", timevar = "Fermenter", direction = "wide"))
# Renumber rows
rownames(fur) <- NULL
# Replace sample number with sample time
# blk2smptim used, because 8th blk1 sample is D600 only
# +1, because R starts indexing content at 1
fur[,1] <- blk2smptim[(fur[,1]+1),2]
# Change column name accordingly
colnames(fur)[1] <- "t"
# # # # # # # # # # # # # # # # # # # #
# Merge data
# For every variable, sequence is: block 1 (blk1dat), block 2 (blk2dat)
# Start with fermentation data
blk1dat <- indon[[1]]
for (i in 2:4) {
blk1dat <- join(blk1dat, indon[[i]], by = c("t"), type = "full", match = "all")
}
blk2dat <- indon[[5]]
for (i in 6:8) {
blk2dat <- join(blk2dat, indon[[i]], by = c("t"), type = "full", match = "all")
}
# Reorder columns to cluster DO and CO2
blk1dat <- blk1dat[ , c(1, 2, 4, 6, 8, 3, 5, 7, 9)]
blk2dat <- blk2dat[ , c(1, 2, 4, 6, 8, 3, 5, 7, 9)]
# D600 data
blk1dat <- join(blk1dat, blk1d600, by = c("t"), type = "full", match = "all")
blk2dat <- join(blk2dat, blk2d600, by = c("t"), type = "full", match = "all")
# CDM data
blk1dat <- join(blk1dat, blk1cdm, by = c("t"), type = "full", match = "all")
blk2dat <- join(blk2dat, blk2cdm, by = c("t"), type = "full", match = "all")
# Fermenter glucose data
blk1dat <- join(blk1dat, fermglc[ , 1:5], by = "t", type = "full", match = "all")
blk2dat <- join(blk2dat, fermglc[ , c(1, 6:9)], by = "t", type = "full", match = "all")
# Sum of EPS aldose monomers
blk1dat <- join(blk1dat, epsamc[ , c(1, 42:45)], by = "t", type = "full", match = "all")
blk2dat <- join(blk2dat, epsamc[ , c(1, 46:49)], by = "t", type = "full", match = "all")
# Molar mass at RI peak data
blk1dat <- join(blk1dat, mp[ , 1:5], by = "t", type = "full", match = "all")
blk2dat <- join(blk2dat, mp[ , c(1, 6:9)], by = "t", type = "full", match = "all")
# Furfural data
blk1dat <- join(blk1dat, fur[ , 1:5], by = "t", type = "full", match = "all")
blk2dat <- join(blk2dat, fur[ , c(1, 6:9)], by = "t", type = "full", match = "all")
# Remove unnecessary rows of blk1dat (+4 empty)
blk1dat <- blk1dat[1:(nrow(blk1dat)-254), ]
# Remove unnecessary rows of blk2dat (every row after last sample)
blk2dat <- blk2dat[1:(nrow(blk2dat)-108), ]
# Generate statistics and join
quantiles <- c(0.1, 0.50, 0.9)
coltypes <- c("DO", "CO2", "D600", "CDM", "FermGlc", "AMCSum", "Mp", "Fur")
for (i in 1:8) {
start <- 2 + (4 * (i - 1))
stop <- start + 3
newcols <- c(paste(coltypes[i], quantiles[1], sep = "."),
paste(coltypes[i], quantiles[2], sep = "."),
paste(coltypes[i], quantiles[3], sep = "."))
statdat <- as.data.frame(t(apply(blk1dat[, start:stop], 1, quantile, quantiles, na.rm = TRUE)))
blk1dat[ , newcols] <- statdat
statdat <- as.data.frame(t(apply(blk2dat[, start:stop], 1, quantile, quantiles, na.rm = TRUE)))
blk2dat[ , newcols] <- statdat
}
save(blk1dat, file = "block1-plot-data.Rda")
save(blk2dat, file = "block2-plot-data.Rda")
|
e6c09a6396ce83665c6c492632b9ea44a0d61994
|
1d8b2cd0581dd0874bd68815e3650466f2deddca
|
/Sec3L18/Vector_index_element_selection.R
|
3d655318b9f7d84cfe51c22ffdc3dfdc7a7cb2f8
|
[] |
no_license
|
whosivan/R-BootCamp-ML-
|
4cd616f38111c59f6c7b1c4d41c8af7d61a6d56b
|
a6a572c63e2981821631218f4f88af00a1891fbe
|
refs/heads/master
| 2021-01-23T14:15:40.454305
| 2017-12-20T05:50:26
| 2017-12-20T05:50:26
| 102,680,343
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 322
|
r
|
Vector_index_element_selection.R
|
x <- c(1,123, 534, 13, 4) #combine
y <- seq(201, 250, 11) #sequence
z <- rep("Hi!", 3) #replicate
w <- c("a", "b", "c", "d", "e")
w
w[1]
w[2]
w[3]
w[-1]
w[-3]
w[c(1, 3, 5)]
w[c(-2, -4)]
w[-3:-5]
w[1:2]
w[7]
w[-100]
w[-800]
w[1:9]
w[5:9]
#----
a <- c("a", "b", "c")
b <- c(1, 2, 3)
a
b
toString(b)
a + b
is.character(b)
|
bc292ec0bb4e3a0665b6004087183a139986ca04
|
24c6301ee0b35cf45faea2dc246ed2ad45818efc
|
/SFAUR-TEC_ENERO_2017.R
|
bd21a8841ce66b00aedcbc4f35a8f54104484d68
|
[] |
no_license
|
maikelonu/Research_SFASUR-TEC_River_Analysis_Model
|
3baded960ab7a1421e4615037d5d3887fa3a1b95
|
a923f436847ecb6129e4064eeba0bbdc80894ee0
|
refs/heads/master
| 2020-09-26T10:04:22.105150
| 2019-12-06T02:54:37
| 2019-12-06T02:54:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 60,492
|
r
|
SFAUR-TEC_ENERO_2017.R
|
# Streamflow and Flood Analysis Using R (SFAUR-TEC)
# Instituto Tecnologico de Costa Rica (www.tec.ac.cr)
# Maikel Mendez-M (mamendez@itcr.ac.cr);(maikel.mendez@gmail.com)
# Luis Alexander Calvo-V (lcalvo@itcr.ac.cr);(lualcava.sa@gmail.com)
# This script is structured in R (www.r-project.org)
# General purpose: Generate various graphical and numerical products
# for streamflow and flood analysis at river catchments
# Custom functions: FrequencyAnalysis() and BootstrapCI() created by
# Dave Hutchinson (mtb_dave[at]yahoo[dot]ca)
# Script conceptualization: based on SAAS (Streamflow Analysis and Assessment Software)
# by Robert A. Metcalfe (http://people.trentu.ca/~rmetcalfe/SAAS.html)
# Input files:
# Output files:
# Workspace is cleared
rm(list = ls())
# working directory is defined
# setwd("B:\\R_ITC\\SAAS_diario")
setwd("B:\\R_ITC\\SAAS_diario")
# CRAN libraries are loaded
require(dplyr)
require(EcoHydRology)
require(evd)
require(extRemes)
require(ggplot2)
require(ggthemes)
require(hydroTSM)
require(lmom)
require(lmomco)
require(lubridate)
require(pastecs)
require(reshape)
require(reshape2)
require(scales)
require(tidyr)
require(xts)
require(zoo)
# ////////////////////////
# BLOCK: Custom Functions
# ////////////////////////
# Custom Function: FrequencyAnalysis
# Fits a given extreme value distribution to an extreme value series
# @param series A vector representing an extreme value series (e.g., annual maximum flood)
# @param distribution A three-character name of the extreme value distribution (see ?dist.list())
# @param nep A vector of non-exceedance probabilities
# @return A list object containing: (1) distribution information and (2) output
# (quantile estimates at various non-exceedance probabilities)
# @export
# @import lmomco
FrequencyAnalysis <- function( series, distribution, nep = nonexceeds() ) {
distribution <- tolower(distribution)
transformed <- FALSE
# add log Pearson Type 3 to list of distributions supported
# by lmomco package
base.dist <- c('lp3', dist.list())
if( any(distribution %in% base.dist) ) {
# log transform series
if( distribution == 'lp3' ) {
series <- log10(series)
transformed <- TRUE
distribution <- 'pe3'
}
# compute L-moments
samLmom <- lmom.ub(series)
# estimate distribution parameters
distPar <- lmom2par(samLmom, type = distribution)
# compute quantiles for nonexceedances
quant <- par2qua(f = nep, para = distPar)
if( distribution == 'pe3' & transformed ) {
distribution <- 'lp3'
quant <- 10^quant
}
# return result as list object
return(
list(
distribution = list(
name = distribution,
logTransformed = transformed,
parameters = distPar),
output = data.frame(nep = nep, rp = prob2T(nep), estimate = quant)
) )
} else {
stop(
sprintf('Distribution \'%s\' not recognized!', distribution))
}
}
# Custom Function: BootstrapCI
# Conducts bootstrap to randomly sample an extreme value series 'n' times for a
# specified distribution to estimate confidence interval for each given
# non-exceedance probability.
# @param fitted.model Fitted distribution (see ?frequencyAnalysis)
# @param series A vector representing an extreme value series (e.g., annual maximum flood)
# @param distribution A three-character name of the extreme value distribution (see ?dist.list())
# @param n.resamples An integer representing number of re-samples to conduct
# @param nep A vector of non-exceedance probabilities
# @param ci The confidence interval
# @export
# @import lmomco
# @return A list containing a data frame of confidence bounds for quantile estimates for each
# non-exceedance probability, a matrix containing estimated distribution parameters for each resample,
# and a matrix of quantile estimates for each resample
BootstrapCI <- function(series, distribution, n.resamples=1E3, nep=nonexceeds(), ci=0.90) {
# compute frequency analysis
fa <- FrequencyAnalysis(series=series, distribution=distribution, nep=nep)
# extract fitted model parameters and flag as to whether the
# distribution is based on log transformed data
base.params <- fa$distribution$parameters
isTransformed <- fa$distribution$logTransformed
# create output matrices to store parameter sets and quantile estimates
param.sets <- matrix(NA, nrow = n.resamples, ncol = length(base.params$para))
quantile.estimates <- matrix(NA, nrow = n.resamples, ncol = length(nep),
dimnames = list(NULL, nep) )
# begin bootstrapping procedure
for(i in 1:n.resamples) {
valid.moments <- FALSE
j <- 0
# allow up to 20 re-tries to re-sample
while(!valid.moments & j < 20) {
# sample 'n' random variates from base distribution
data <- rlmomco(n=length(series), base.params)
# compute sample l-moments
sample.moms = lmom.ub(data)
valid.moments <- are.lmom.valid(sample.moms)
j <- j + 1
}
# error handling
if(!valid.moments) {
stop("Bootstrapping failed to sample valid l-moments")
} else {
# estimate distribution parameters
dist.par <- lmom2par(sample.moms, base.params$type)
# store the distribution parameters
param.sets[i,] <- dist.par$para
# estimate quantiles at NEP
estimated <- qlmomco(nep, dist.par)
# convert quantile estimates to real values if
# distribution was transformed
if(isTransformed) estimated <- 10^estimated
# store the quantiles at the desired AEP values
quantile.estimates[i,] <- estimated
}
}
# now calculate confidence limits for quantiles
p <- c((1-ci)/2, (1+ci)/2)
ci <- sapply(colnames(quantile.estimates),
FUN=function(x){
quantile(quantile.estimates[,x], probs=p, na.rm=TRUE)})
# now return list object containing output
return(
list(
ci = data.frame(
nonexceed_prob = nep,
lower = as.vector(ci[1,]),
true = fa$output$estimate,
upper = as.vector(ci[2,]) ),
parameters = param.sets,
quantiles = quantile.estimates)
)
}
boundary.LM <- function(max.X,min.X,max.Y,min.Y) {
# Defines boundaries of labels-position for a LM model-plot using ggplot2
#
# Args:
# max.X: horizontal-axis maximum extension
# min.X: horizontal-axis minumum extension
# max.Y: vertical-axis maximum extension
# min.Y: vertical-axis minumum extension
#
# Returns:
# a vector containing label X and Y position in ggplot2-plot
delta.X <- (max.X - min.X)
pos.X <- (min.X + (delta.X)*0.05)
delta.Y <- (max.Y - min.Y)
pos.Y <- (min.Y + (delta.Y)*1.10)
return(c(pos.X,pos.Y))
}
eq.PAR <- function(lm.model) {
# Creates a label parameter-summary for LM models in ggplot2
#
# Args:
# lm.model: a lm model previously defined
#
# Returns:
# a character-pasted vector to be displayed in ggplot2
paste("Adj R2 = ",round(summary(lm.model)$adj.r.squared,4),
"; Intercept =",round(lm.model$coef[[1]],4),
"; Slope =",round(lm.model$coef[[2]],4),
"; p-value =",round(summary(lm.model)$coef[2,4],4))
}
# /////////////////////////////////////////////////
# BLOCK: Creating and organizing input data.frames
# /////////////////////////////////////////////////
# Observations data.frame is loaded
df.obs <- read.table("inputflow.txt",header=T,sep="\t",quote="")
# "DATE" class character is converted to class-date and added as a new column named "DATE2"
temp <- df.obs$DATE
df.obs$DATE2 <- as.Date(temp, format = "%d/%m/%Y")
# A date-class query is requested (TRUE or FALSE)
is.Date(df.obs$DATE2)
# lubridate Library functions are applied to df.obs to create new columns contaning:
# YEAR, YEAR_CH, MONTH, MONTH_CH, WEEK, DAY, DAY_MONTH
df.obs$YEAR <- year(df.obs$DATE2) # Years component of a date-time
df.obs$YEAR_CH <- as.character(year(df.obs$DATE2)) # Years component of a date-time as character
df.obs$MONTH <- month(df.obs$DATE2, label = FALSE) # Months component of a date-time
df.obs$MONTH_CH <- month(df.obs$DATE2, label = TRUE) # Months component of a date-time as character
df.obs$WEEK <- week(df.obs$DATE2) # Weeks component of a date-time
df.obs$DAY <- yday(df.obs$DATE2) # Days component of a date-time
df.obs$DAY_MONTH <- days_in_month(df.obs$DATE2) # Number of days in the month of a date-time
# Descriptive statistics are extracted for df.obs data.frame for "FLOW" column only
df.obs.desc <- round((as.data.frame(stat.desc(df.obs$FLOW))),3)
# colnames in df.obs.desc data.frame are renamed
colnames(df.obs.desc) <- c("FLOW")
# Total length of df.obs data.frame is requested
obs.length <- length(df.obs$FLOW)
# A subset data.frame containing only "YEAR","DAY" and "FLOW" is created
df.obs.sub <- df.obs[,c("YEAR","DAY","FLOW")]
# A pivot data.frame is created organizing data by "YEAR" and "FLOW"
df.pivot <- df.obs.sub %>% spread(YEAR,FLOW)
# Apply function is applied to df.pivot to generate mean, median and sd at row level
df.pivot$MEAN <- round(apply(df.pivot [ , 2:18],1,mean),3)
df.pivot$MEDIAN <- round(apply(df.pivot [ , 2:18],1,median),3)
df.pivot$SD <- round(apply(df.pivot [ , 2:18],1,sd),3)
# NA values are omitted from df.pivot (leap years)
df.pivot <- na.omit(df.pivot)
# df.pivot data.frame is ordered (sorted) by "DAY" value
df.pivot <- df.pivot[order(df.pivot[ ,1]) , ]
# A prefix "YEAR" is added to df.pivot column names
colnames(df.pivot) <- paste("YEAR", colnames(df.pivot), sep = "_")
# Descriptive statistics are extracted from df.pivot data.frame
df.pivot.desc <- round(stat.desc (df.pivot[ , 2:21]),3)
# A temporal date-class is re-incorporated in df.pivot data.frame
df.pivot$tempDATE <- as.Date(df.pivot$YEAR_DAY - 1, origin = "2015-01-01")
# lubridate Library functions are applied to create new columns to df.pivot data.frame
# contaning: MONTH, MONTH_CH, and DAY_YEAR, DAY_MONTH
df.pivot$MONTH <- month(df.pivot$tempDATE, label = FALSE) # Months component of a date-time
df.pivot$MONTH_CH <- month(df.pivot$tempDATE, label = TRUE) # Months component of a date-time as character
df.pivot$DAY_YEAR <- yday(df.pivot$tempDATE) # Days component of a date-time
# ////////////////////////////////
# BLOCK: Flood Frequency Analysis
# ////////////////////////////////
# CRAN library data.table is loaded at this point to avoid conflicts with other libraries
require(data.table)
# A subset data.frame is created base on df.obs data.frame
df.obs.sub2 <- data.table(date = as.IDate(df.obs$DATE2), df.obs[-1])
# Maximum annual flow, along with other annual statistics are extracted
# from df.obs.sub2 into a new data.frame called df.annual.flow
df.annual.flow <- as.data.frame(df.obs.sub2[, list(mean.FLOW = mean(FLOW),
median.FLOW = median(FLOW),
min.FLOW = min(FLOW),
max.FLOW = max(FLOW)),
by = year(date)])
# df.annual.flow data.frame is rounded to three significant digits
df.annual.flow <- round(df.annual.flow,3)
# CRAN library data.table is detached to avoid conflicts with other libraries
detach(package:data.table)
# Maximum flow dataset is extracted df.annual.flow data.frame
# and stated as input_flow (numeric vector)
input_flow <- df.annual.flow$max.FLOW
# A Distribution function is selected
# (see ?dist.list for available distributions)
# log Pearson 3 is not one of them, but is a log-transformed equivalent of pe3
# note: this script recognizes 'lp3' to stand for log Pearson Type 3
dist <- "lp3" # this script uses "gev" by default; not "lp3"
# Frequency distribution is fitted as function of "dist"
# and parameters "mu", "sigma" and "gamma" are calculated
fa <- FrequencyAnalysis(series=input_flow, distribution=dist)
# A data.frame containing estimated-deterministic fitted values,
# non-exceedance probabilities (nep) and return period (rp) is created
df.fa.out <- fa$output
# 95% confidence intervals are estimated for chosen frequency-distribution
ci <- BootstrapCI(series=input_flow, # flow data
distribution=dist, # distribution
n.resamples = 2.5E4, # number of re-samples to conduct
ci = 0.95) # confidence interval level
# A data.frame containing non-exceedance probabilities (nep) for "central", "lower"
# and "upper" CI is created
df.ci.out <- ci$ci
# Maximum flow values are sorted df.annual.flow data.frame
df.annual.flow <- df.annual.flow[order(df.annual.flow$max.FLOW),]
# length of max.FLOW per year is requested from df.annual.flow data.frame
n.length <- length(df.annual.flow$max.FLOW)
# Weibull probabilities plotting-positions are calculated and added to df.annual.flow data.frame
df.annual.flow$PROB <- round(((1:n.length)/(1+n.length)),3)
# Frequency analysisis plot is configured by defining: specific x-breaks, ceiling and floor
# range and scale logarithmic transformation
bwpeaks <- data.frame(PROB = df.annual.flow$PROB , FLOW = df.annual.flow$max.FLOW)
xbreaks <- c(0.002,0.01,0.1,0.3335,0.5,0.8,0.9,0.95,0.975,0.99,0.995, 0.998) # Plotting positions are determined
log.range <- log10(range(bwpeaks$FLOW))
lower <- 10^floor(log.range[1])
upper <- 10^ceiling(log.range[2])
cap <- lower
ybreaks <- NULL
while(cap < upper) {
ybreaks <- c(ybreaks, seq(cap, cap*15, by = cap))
cap <- cap * 10
}
# Flood Frequency Analysisis plot is generated and saved
g.ffa <- ggplot(bwpeaks) +
geom_line(data=df.ci.out, aes(x=nonexceed_prob, y=true),size = 1.25, color="#cc0000", alpha = 0.50) +
geom_line(data=df.ci.out, aes(x=nonexceed_prob, y=lower),size = 0.75, color="#333333",alpha = 0.75, lty=2) +
geom_line(data=df.ci.out, aes(x=nonexceed_prob, y=upper),size = 0.75, color="#333333",alpha = 0.75, lty=2) +
geom_point(aes(x=PROB, y=FLOW),size = 4.00) +
scale_y_continuous(trans="log10", breaks=ybreaks) +
scale_x_continuous(trans=probability_trans(distribution="norm"),
breaks=xbreaks, labels=signif(prob2T(xbreaks), digits=4),
name="Return period [yrs]") +
ylab(label = 'Q (m3/s)') +
xlab(label = 'Recurrence Interval (years)') +
ggtitle(label = 'Upper Toro River Catchment. Flood Frequency Analysisis (1994-2010). Log Pearson Type 3') +
theme_bw(base_size = 18.0)
# Flood Frequency Analysisis plot is requested
g.ffa
# Flow events with recurrence interval = 1.5 years are identified and a new subset data.frame is created
# Their percentage is also calculated
limit1.5 <- df.fa.out[12,3]
FFATr1.5 <- sum(df.obs$FLOW > limit1.5)
df.FFATr1.5_sel <- df.obs [df.obs$FLOW > limit1.5, ]
FFATr1.5_percent <- round(((FFATr1.5 / obs.length)*100),3)
# Flow events with recurrence interval = 10 years are identified and a new subset data.frame is created
# Their percentage is also calculated
limit10 <- df.fa.out[23,3]
FFATr10 <- sum(df.obs$FLOW > limit10)
df.FFATr10_sel <- df.obs [df.obs$FLOW > limit10, ]
FFATr10_percent <- round(((FFATr10 / obs.length)*100),3)
# A complete-series hydrograph is generated and saved
# Data points over FFATr1.5 and FFATr10 are shown above threshold lines
g.hydro01 <- ggplot() +
geom_point(aes(x = DATE2,y = FLOW,colour = YEAR_CH),data=df.obs,size = 1.50) +
geom_point(aes(x = DATE2,y = FLOW),data=df.FFATr1.5_sel,shape = 21,colour = '#999900',size = 3.0) +
geom_line(aes(x = DATE2,y = FLOW),data=df.obs,colour = '#666666',size = 0.15,alpha = 0.5126) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 8.0,min.n = 1.0),expand = c(0.05,0.50),limits = c(0,70)) +
scale_x_date(breaks = scales::pretty_breaks(n = 6.0)) +
geom_hline(data=df.obs,size = 0.7,alpha = 0.5,yintercept = (df.obs.desc[8,1])) +
geom_hline(data=df.obs,size = 0.7,linetype = 2,alpha = 0.5,yintercept = (df.obs.desc[9,1]), color="black") +
geom_hline(data=df.obs,colour = '#999900',yintercept = limit1.5) +
geom_hline(data=df.obs,colour = '#999900',linetype = 2,yintercept = limit10) +
ylab(label = 'Q (m3/s)') +
xlab(label = 'Period (years)') +
ggtitle(label = 'Upper Toro River Catchment Streamflow Analisis (1994-2010)') +
geom_text(aes(df.obs[1,4],limit1.5,label = " R.int > 1.5 years", vjust = -1)) +
geom_text(aes(df.obs[1,4],limit10,label = " R.int > 10 years", vjust = -1)) +
geom_text(aes(df.obs[1,4],df.obs.desc[8,1],label = "median", vjust = 1.5)) +
geom_text(aes(df.obs[1,4],df.obs.desc[9,1],label = "mean", vjust = -1)) +
theme_bw(base_size = 18.0)
# Complete-series hydrograph is requested
g.hydro01
# Only columns containing "YEAR_" are selected from df.pivot data.frame
i1 <- grep("YEAR_", names(df.pivot))
# A df.L data.frame is created
df.L <- (df.pivot)[i1]
# Irrelevant columns are erased from df.L data.frame
df.L$YEAR_DAY <- NULL
df.L$YEAR_MEAN <- NULL
df.L$YEAR_MEDIAN <- NULL
df.L$YEAR_SD <- NULL
# "tempDATE" is incorporated in df.L data.frame
df.L$tempDATE <- df.pivot$tempDATE
# melt function from reshape2 CRAN-Library is applied
# and a new data.frame is created for graphical purposes
df.L2 <- melt(df.L,id.vars="tempDATE")
# An annual-summary hydrograph is generated. The mean and the median are highlighted
# Both df.L2 and df.pivot are called
g.hydro02 <- ggplot() +
geom_line(aes(x = tempDATE,y = value, group=variable),data=df.L2,size = 0.1,alpha = 0.20) +
geom_line(aes(x = tempDATE,y = YEAR_MEAN),data=df.pivot,colour = '#ff3300',size = 0.75) +
geom_line(aes(x = tempDATE,y = YEAR_MEDIAN),data=df.pivot,colour = '#0000cc',size = 0.75) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 8.0,min.n = 8.0)) +
scale_x_date(breaks = scales::date_breaks(),labels = date_format(format = '%m')) +
ylab(label = 'Q (m3/s)') +
xlab(label = 'Period (months)') +
ggtitle(label = 'Upper Toro River Catchment Streamflow Analisis (1994-2010)') +
geom_text(aes(df.pivot[1,22],5,label = "mean", vjust = -1)) +
geom_text(aes(df.pivot[1,22],3,label = "median", vjust = 1)) +
theme_bw(base_size = 18.0)
# annual-summary hydrograph is requested
g.hydro02
# An annual-summary hydrograph is generated (colored by "YEAR"). The mean and the median are highlighted
# Both df.L2 and df.pivot are called
g.hydro03 <- ggplot() +
geom_line(aes(x = tempDATE,y = value, group=variable, colour=variable),data=df.L2,size = 0.1,alpha = 0.55) +
geom_line(aes(x = tempDATE,y = YEAR_MEAN),data=df.pivot,colour = '#ff3300',size = 0.75) +
geom_line(aes(x = tempDATE,y = YEAR_MEDIAN),data=df.pivot,colour = '#0000cc',size = 0.75) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 8.0,min.n = 8.0)) +
scale_x_date(breaks = scales::date_breaks(),labels = date_format(format = '%m')) +
ylab(label = 'Q (m3/s)') +
xlab(label = 'Period (months)') +
ggtitle(label = 'Upper Toro River Catchment Streamflow Analisis (1994-2010)') +
geom_text(aes(df.pivot[1,22],5,label = "mean", vjust = -1)) +
geom_text(aes(df.pivot[1,22],3,label = "median", vjust = 1)) +
theme_grey()
# annual-summary hydrograph is requested
g.hydro03
# ////////////////////////////
# BLOCK: Flow Duration Curves
# ////////////////////////////
# A subset df.obs.month data.frame containing only "MONTH_CH" and "FLOW" is created
# based on df.obs data.frame
df.obs.month <- df.obs[,c("MONTH_CH","FLOW")]
# df.obs.month data.frame is ordered (sorted) by "MONTH_CH"" value for the whole period
df.obs.month <- df.obs.month[order(df.obs.month[ ,1]) , ]
# A rep function is introduced
df.obs.month$ID <-rep (1:length(df.obs.month$FLOW),1)
# dcast function is requested to convert from long to wide format
# (unmelt) NAs are introduced by coercion !!!!!!!!!!!!!!!!!
df.obs.month <- dcast(df.obs.month, ID ~ MONTH_CH, value.var="FLOW")
# ID variable is erased from df.obs.month
df.obs.month$ID <- NULL
# Descriptive statistics are extracted for df.obs.month data.frame
# and rounded to three significant digits
df.obs.month.desc <- round((as.data.frame(stat.desc(df.obs.month))),3)
# Rows 2 and 3 are deleted from df.obs.month.desc data.frame to avoid confusion
df.obs.month.desc <- df.obs.month.desc[-c(2, 3), ]
# hydroTSM fdc function is applied and plotting is suppressed
df.FDC <- fdc(df.obs.month, plot=FALSE)
# df.FDC matrix is transformed into data.frame class
df.FDC <- as.data.frame(df.FDC)
# A data.frame is created for each month
df.Jan <- data.frame(x=(df.FDC$Jan)*100, y=df.obs.month$Jan)
df.Feb <- data.frame(x=(df.FDC$Feb)*100, y=df.obs.month$Feb)
df.Mar <- data.frame(x=(df.FDC$Mar)*100, y=df.obs.month$Mar)
df.Apr <- data.frame(x=(df.FDC$Apr)*100, y=df.obs.month$Apr)
df.May <- data.frame(x=(df.FDC$May)*100, y=df.obs.month$May)
df.Jun <- data.frame(x=(df.FDC$Jun)*100, y=df.obs.month$Jun)
df.Jul <- data.frame(x=(df.FDC$Jul)*100, y=df.obs.month$Jul)
df.Aug <- data.frame(x=(df.FDC$Aug)*100, y=df.obs.month$Aug)
df.Sep <- data.frame(x=(df.FDC$Sep)*100, y=df.obs.month$Sep)
df.Oct <- data.frame(x=(df.FDC$Oct)*100, y=df.obs.month$Oct)
df.Nov <- data.frame(x=(df.FDC$Nov)*100, y=df.obs.month$Nov)
df.Dec <- data.frame(x=(df.FDC$Dec)*100, y=df.obs.month$Dec)
# NA values are omitted from each data.frame
df.Jan <- na.omit(df.Jan)
df.Feb <- na.omit(df.Feb)
df.Mar <- na.omit(df.Mar)
df.Apr <- na.omit(df.Apr)
df.May <- na.omit(df.May)
df.Jun <- na.omit(df.Jun)
df.Jul <- na.omit(df.Jul)
df.Aug <- na.omit(df.Aug)
df.Sep <- na.omit(df.Sep)
df.Oct <- na.omit(df.Oct)
df.Nov <- na.omit(df.Nov)
df.Dec <- na.omit(df.Dec)
# data.frames are ordered (sorted) by "% Excedeed" value
df.Jan <- df.Jan[order(df.Jan[ ,1]) , ]
df.Feb <- df.Feb[order(df.Feb[ ,1]) , ]
df.Mar <- df.Mar[order(df.Mar[ ,1]) , ]
df.Apr <- df.Apr[order(df.Apr[ ,1]) , ]
df.May <- df.May[order(df.May[ ,1]) , ]
df.Jun <- df.Jun[order(df.Jun[ ,1]) , ]
df.Jul <- df.Jul[order(df.Jul[ ,1]) , ]
df.Aug <- df.Aug[order(df.Aug[ ,1]) , ]
df.Sep <- df.Sep[order(df.Sep[ ,1]) , ]
df.Oct <- df.Oct[order(df.Oct[ ,1]) , ]
df.Nov <- df.Nov[order(df.Nov[ ,1]) , ]
df.Dec <- df.Dec[order(df.Dec[ ,1]) , ]
# A "Month" character column is added to each adta.frame
df.Jan$Month <- c("Jan")
df.Feb$Month <- c("Feb")
df.Mar$Month <- c("Mar")
df.Apr$Month <- c("Apr")
df.May$Month <- c("May")
df.Jun$Month <- c("Jun")
df.Jul$Month <- c("Jul")
df.Aug$Month <- c("Aug")
df.Sep$Month <- c("Sep")
df.Oct$Month <- c("Oct")
df.Nov$Month <- c("Nov")
df.Dec$Month <- c("Dec")
# colnames in monthly data.frames are renamed
colnames(df.Jan) <- c("Perc_Exceedance", "FLOW", "MONTH")
colnames(df.Feb) <- c("Perc_Exceedance", "FLOW", "MONTH")
colnames(df.Mar) <- c("Perc_Exceedance", "FLOW", "MONTH")
colnames(df.Apr) <- c("Perc_Exceedance", "FLOW", "MONTH")
colnames(df.May) <- c("Perc_Exceedance", "FLOW", "MONTH")
colnames(df.Jun) <- c("Perc_Exceedance", "FLOW", "MONTH")
colnames(df.Jul) <- c("Perc_Exceedance", "FLOW", "MONTH")
colnames(df.Aug) <- c("Perc_Exceedance", "FLOW", "MONTH")
colnames(df.Sep) <- c("Perc_Exceedance", "FLOW", "MONTH")
colnames(df.Oct) <- c("Perc_Exceedance", "FLOW", "MONTH")
colnames(df.Nov) <- c("Perc_Exceedance", "FLOW", "MONTH")
colnames(df.Dec) <- c("Perc_Exceedance", "FLOW", "MONTH")
# data.frames are binded by row
df.rbind.FDC <- rbind(df.Jan,
df.Feb,
df.Mar,
df.Apr,
df.May,
df.Jun,
df.Jul,
df.Aug,
df.Sep,
df.Oct,
df.Nov,
df.Dec)
# Continuous standard monthly Flow Duration Curves are generated
g.cont <- ggplot() +
geom_line(aes(x = Perc_Exceedance,y = FLOW,colour = MONTH, linetype = MONTH,group = MONTH),
data=df.rbind.FDC,size = 0.75) +
scale_y_continuous(trans='log10',
breaks = c(c(0.1,1,2,3,4,5,6,7,8,9,10,12,14,16,18,20,30,40,50,60,70,80,90,100))) +
scale_x_continuous(breaks = c(c(1,2,3,4,5,10,20,30,40,50,60,70,80,90,100))) +
ylab(label = 'Q (m3/s)') +
xlab(label = 'Percent Exceedance') +
ggtitle(label = 'Upper Toro River Catchment. Monthly Flow Duration Curves. Continuous Values (1994-2010)') +
theme_bw(base_size = 18.0)
# Continuous standard monthly Flow Duration Curves are requested
g.cont
# NULL vectors are declared for each month
vt.month.output.Jan <- NULL
vt.month.output.Feb <- NULL
vt.month.output.Mar <- NULL
vt.month.output.Apr <- NULL
vt.month.output.May <- NULL
vt.month.output.Jun <- NULL
vt.month.output.Jul <- NULL
vt.month.output.Aug <- NULL
vt.month.output.Sep <- NULL
vt.month.output.Oct <- NULL
vt.month.output.Nov <- NULL
vt.month.output.Dec <- NULL
# Empty vectors are declared for each month
vt.month.output.Jan <- vector()
vt.month.output.Feb <- vector()
vt.month.output.Mar <- vector()
vt.month.output.Apr <- vector()
vt.month.output.May <- vector()
vt.month.output.Jun <- vector()
vt.month.output.Jul <- vector()
vt.month.output.Aug <- vector()
vt.month.output.Sep <- vector()
vt.month.output.Oct <- vector()
vt.month.output.Nov <- vector()
vt.month.output.Dec <- vector()
# Discrete Percentage Exceedance interval are defined
breaks.fdc=c(0.01,0.1,1,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,99,99.9,99.99)
# A loop is executed to find closest Percentage Exceedance to discrete intervals for each month
for (i in 1:(length(breaks.fdc))) {
vt.month.output.Jan [i] <- which.min(abs((df.Jan$Perc_Exceedance - breaks.fdc[i])))
vt.month.output.Feb [i] <- which.min(abs((df.Feb$Perc_Exceedance - breaks.fdc[i])))
vt.month.output.Mar [i] <- which.min(abs((df.Mar$Perc_Exceedance - breaks.fdc[i])))
vt.month.output.Apr [i] <- which.min(abs((df.Apr$Perc_Exceedance - breaks.fdc[i])))
vt.month.output.May [i] <- which.min(abs((df.May$Perc_Exceedance - breaks.fdc[i])))
vt.month.output.Jun [i] <- which.min(abs((df.Jun$Perc_Exceedance - breaks.fdc[i])))
vt.month.output.Jul [i] <- which.min(abs((df.Jul$Perc_Exceedance - breaks.fdc[i])))
vt.month.output.Aug [i] <- which.min(abs((df.Aug$Perc_Exceedance - breaks.fdc[i])))
vt.month.output.Sep [i] <- which.min(abs((df.Sep$Perc_Exceedance - breaks.fdc[i])))
vt.month.output.Oct [i] <- which.min(abs((df.Oct$Perc_Exceedance - breaks.fdc[i])))
vt.month.output.Nov [i] <- which.min(abs((df.Nov$Perc_Exceedance - breaks.fdc[i])))
vt.month.output.Dec [i] <- which.min(abs((df.Dec$Perc_Exceedance - breaks.fdc[i])))
}
# Matching values are extracted from monthly data.frames
vt.month.output.Jan <- (df.Jan[vt.month.output.Jan,2])
vt.month.output.Feb <- (df.Feb[vt.month.output.Feb,2])
vt.month.output.Mar <- (df.Mar[vt.month.output.Mar,2])
vt.month.output.Apr <- (df.Apr[vt.month.output.Apr,2])
vt.month.output.May <- (df.May[vt.month.output.May,2])
vt.month.output.Jun <- (df.Jun[vt.month.output.Jun,2])
vt.month.output.Jul <- (df.Jul[vt.month.output.Jul,2])
vt.month.output.Aug <- (df.Aug[vt.month.output.Aug,2])
vt.month.output.Sep <- (df.Sep[vt.month.output.Sep,2])
vt.month.output.Oct <- (df.Oct[vt.month.output.Oct,2])
vt.month.output.Nov <- (df.Nov[vt.month.output.Nov,2])
vt.month.output.Dec <- (df.Dec[vt.month.output.Dec,2])
# A monthly summary data.frame is created based on discrete Percentage Exceedance intervals
df.comp.FDC <- data.frame(breaks.fdc,
vt.month.output.Jan,
vt.month.output.Feb,
vt.month.output.Mar,
vt.month.output.Apr,
vt.month.output.May,
vt.month.output.Jun,
vt.month.output.Jul,
vt.month.output.Aug,
vt.month.output.Sep,
vt.month.output.Oct,
vt.month.output.Nov,
vt.month.output.Dec)
# Colnames in df.comp.FDC data.frames are renamed
names(df.comp.FDC) <- c("Perc_Exceedance",
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec")
# Monthy character labels are repeated as a function of length(breaks.fdc)
Jan_ch <- rep("Jan",length(breaks.fdc))
Feb_ch <- rep("Feb",length(breaks.fdc))
Mar_ch <- rep("Mar",length(breaks.fdc))
Apr_ch <- rep("Apr",length(breaks.fdc))
May_ch <- rep("May",length(breaks.fdc))
Jun_ch <- rep("Jun",length(breaks.fdc))
Jul_ch <- rep("Jul",length(breaks.fdc))
Aug_ch <- rep("Aug",length(breaks.fdc))
Sep_ch <- rep("Sep",length(breaks.fdc))
Oct_ch <- rep("Oct",length(breaks.fdc))
Nov_ch <- rep("Nov",length(breaks.fdc))
Dic_ch <- rep("Dic",length(breaks.fdc))
# A vector is repeated which length is equal to breaks.fdc is repeated 12 times,
# one for each month
V.C1 <- rep(1:length(breaks.fdc), 12)
# Discrete Percentage Exceedance intervals are repeated 12 times,
# one for each month
V.C2 <- rep(breaks.fdc,12)
# A summation vector is created based on monthly vectors
V.C3 <- c(vt.month.output.Jan,
vt.month.output.Feb,
vt.month.output.Mar,
vt.month.output.Apr,
vt.month.output.May,
vt.month.output.Jun,
vt.month.output.Jul,
vt.month.output.Aug,
vt.month.output.Sep,
vt.month.output.Oct,
vt.month.output.Nov,
vt.month.output.Dec)
# A summation vector is created based on monthy character-labeled vectors
V.C4 <- c(Jan_ch,
Feb_ch,
Mar_ch,
Apr_ch,
May_ch,
Jun_ch,
Jul_ch,
Aug_ch,
Sep_ch,
Oct_ch,
Nov_ch,
Dic_ch)
# A summary data.frame is created based on all monthly vectors for graphical purposes
df.discrete <- data.frame(V.C1, V.C2, V.C3, V.C4)
# colnames in df.discrete data.frame are renamed
colnames(df.discrete) <- c("Sequence","Perc_Exceedance","FLOW","Month")
# Discrete standard monthly Flow Duration Curves are generated
g.disc <- ggplot() +
geom_line(aes(x = Perc_Exceedance,y = FLOW,colour = Month,linetype = Month,group = Month),
data=df.discrete,size = 0.75) +
scale_y_continuous(trans='log10',
breaks = c(c(0.1,1,2,3,4,5,6,7,8,9,10,12,14,16,18,20,30,40,50,60,70,80,90,100))) +
scale_x_continuous(breaks = c(c(1,2,3,4,5,10,20,30,40,50,60,70,80,90,100))) +
ylab(label = 'Q (m3/s)') +
xlab(label = 'Percent Exceedance') +
ggtitle(label = 'Upper Toro River Catchment. Monthly Flow Duration Curves. Discrete Values (1994-2010)') +
theme_grey()
# Discrete standard monthly Flow Duration Curves are requested
g.disc
# /////////////////////////////////////////
# BLOCK: Temporal and Graphical Indicators
# /////////////////////////////////////////
# A flow monthly boxplot is generated
boxplot.Month <- ggplot() +
geom_point(aes(x = MONTH_CH,y = FLOW),data=df.obs,size = 0.5) +
geom_boxplot(aes(y = FLOW,x = MONTH_CH,colour = MONTH_CH),
data=df.obs,size = 0.75,alpha = 0.75,outlier.colour = '#ff0000',outlier.size = 1.5) +
scale_y_continuous(trans='log10',
breaks = c(c(0.1,1,2,3,4,5,6,7,8,9,10,12,14,16,18,20,30,40,50,60,70,80,90,100))) +
ylab(label = 'Q (m3/s)') +
xlab(label = 'Month') +
ggtitle(label = 'Upper Toro River Catchment. Monthly Boxplot (1994-2010)') +
theme_bw(base_size = 18.0)
# A flow monthly boxplot is requested
boxplot.Month
# A Log-Scaled flow monthly boxplot-violin.plot is generated
boxplot.Month.violin.log <- ggplot() +
geom_point(aes(x = MONTH_CH,y = FLOW),data=df.obs,
size = 0.95,position = position_jitter(width = 0.15)) +
geom_violin(aes(x = MONTH_CH,y = FLOW,colour = MONTH_CH),data=df.obs,size = 0.85,alpha = 0.75) +
geom_boxplot(aes(y = FLOW,x = MONTH_CH),data=df.obs,
size = 0.15,alpha = 0.0,outlier.size = 0.15) +
scale_y_continuous(trans='log10',
breaks = c(c(0.5,2,4,6,8,10,12,14,16,18,20,30,40,50,60,70,80,90,100))) +
ylab(label = 'Q (m3/s)_log scaled') +
xlab(label = 'Month') +
ggtitle(label = 'Upper Toro River Catchment. Monthly Boxplot-Violin (1994-2010)') +
theme_grey()
# A Log-Scaled flow monthly boxplot-violin.plot is requested
boxplot.Month.violin.log
# A flow monthly boxplot-violin.plot is generated
boxplot.Month.violin <- ggplot() +
geom_point(aes(x = MONTH_CH,y = FLOW),data=df.obs,
size = 0.95,position = position_jitter(width = 0.15)) +
geom_violin(aes(x = MONTH_CH,y = FLOW,colour = MONTH_CH),data=df.obs,size = 0.85,alpha = 0.75) +
geom_boxplot(aes(y = FLOW,x = MONTH_CH),data=df.obs,size = 0.15,alpha = 0.0,outlier.size = 0.15) +
scale_y_continuous(breaks = c(c(0.5,2,4,6,8,10,12,14,16,18,20,30,40,50,60,70,80,90,100))) +
ylab(label = 'Q (m3/s)') +
xlab(label = 'Month') +
ggtitle(label = 'Upper Toro River Catchment. Monthly Boxplot-Violin (1994-2010)') +
theme_grey()
# A flow monthly boxplot-violin.plot is requested
boxplot.Month.violin
# A flow yearly boxplot is generated
boxplot.Year <- ggplot() +
geom_point(aes(x = YEAR_CH,y = FLOW),data=df.obs,size = 0.5) +
geom_boxplot(aes(y = FLOW,x = YEAR_CH,colour = YEAR_CH),data=df.obs,
size = 0.75,alpha = 0.75,outlier.colour = '#ff0000',outlier.size = 1.5) +
scale_y_continuous(trans='log10',
breaks = c(c(0.1,1,2,3,4,5,6,7,8,9,10,12,14,16,18,20,30,40,50,60,70,80,90,100))) +
ylab(label = 'Q (m3/s)_log scaled') +
xlab(label = 'Year') +
ggtitle(label = 'Upper Toro River Catchment. Yearly Boxplot (1994-2010)') +
theme_bw(base_size = 18.0)
# A flow yearly boxplot-violin.plot is requested
boxplot.Year
# ///////////////////////////////////////
# BLOCK: Baseflow Separation and Lowflow
# ///////////////////////////////////////
# BaseflowSeparation function from EcoHydRology library is used to get an approximation
# of baseflow using a 3-pass filter and rounded to 3 significant digit
df.bfs <- round((BaseflowSeparation(df.obs$FLOW, passes=3)),3)
# colnames in df.bfs data.frame are renamed
colnames(df.bfs) <- c("BASEFLOW","QUICKFLOW")
# df.obs data.frame is cbinded to df.bfs data.frame
df.bfs.union <- cbind(df.obs,df.bfs)
# CRAN library data.table is loaded at this point to avoid conflicts with other libraries
require(data.table)
# A subset data.frame is created base on df.bfs.union data.frame
df.bfs.union.sub2 <- data.table(date = as.IDate(df.bfs.union$DATE2), df.bfs.union[-1])
# Baseflow annual statistics are extracted
# from df.bfs.union.sub2 into a new data.frame called df.bfs.union.annual
df.bfs.union.annual <- as.data.frame(df.bfs.union.sub2[, list(mean.BASEFLOW = mean(BASEFLOW),
median.BASEFLOW = median(BASEFLOW),
min.BASEFLOW = min(BASEFLOW),
max.BASEFLOW = max(BASEFLOW)),
by = year(date)])
# df.bfs.union.annual data.frame is rounded to three significant digits
df.bfs.union.annual <- round(df.bfs.union.annual,3)
# CRAN library data.table is detached to avoid conflicts with other libraries
detach(package:data.table)
# A subset data.frame containing only "MONTH_CH" and "BASEFLOW" is created
df.bfs.union.temp <- df.bfs.union[,c("MONTH_CH","BASEFLOW")]
#//////////////////////////////////////////
# NULL vectors for baseflow are declared for each month
Jan.base <- NULL
Jan.base <- c()
Feb.base <- NULL
Feb.base <- c()
Mar.base <- NULL
Mar.base <- c()
Apr.base <- NULL
Apr.base <- c()
May.base <- NULL
May.base <- c()
Jun.base <- NULL
Jun.base <- c()
Jul.base <- NULL
Jul.base <- c()
Aug.base <- NULL
Aug.base <- c()
Sep.base <- NULL
Sep.base <- c()
Oct.base <- NULL
Oct.base <- c()
Nov.base <- NULL
Nov.base <- c()
Dec.base <- NULL
Dec.base <- c()
# A baseflow counter-loop is declared
counter01 <- length(df.bfs.union.temp$MONTH_CH)
# A disaggregation baseflow loop is executed to separate baseflow by month
for(i in 1:counter01) {
if(df.bfs.union.temp$MONTH_CH[i] == "Jan" ) {
Jan.base[i] <- df.bfs.union.temp$BASEFLOW[i] }
else if(df.bfs.union.temp$MONTH_CH[i] == "Feb" ) {
Feb.base[i] <- df.bfs.union.temp$BASEFLOW[i] }
else if(df.bfs.union.temp$MONTH_CH[i] == "Mar" ) {
Mar.base[i] <- df.bfs.union.temp$BASEFLOW[i] }
else if(df.bfs.union.temp$MONTH_CH[i] == "Apr" ) {
Apr.base[i] <- df.bfs.union.temp$BASEFLOW[i] }
else if(df.bfs.union.temp$MONTH_CH[i] == "May" ) {
May.base[i] <- df.bfs.union.temp$BASEFLOW[i] }
else if(df.bfs.union.temp$MONTH_CH[i] == "Jun" ) {
Jun.base[i] <- df.bfs.union.temp$BASEFLOW[i] }
else if(df.bfs.union.temp$MONTH_CH[i] == "Jul" ) {
Jul.base[i] <- df.bfs.union.temp$BASEFLOW[i] }
else if(df.bfs.union.temp$MONTH_CH[i] == "Aug" ) {
Aug.base[i] <- df.bfs.union.temp$BASEFLOW[i] }
else if(df.bfs.union.temp$MONTH_CH[i] == "Sep" ) {
Sep.base[i] <- df.bfs.union.temp$BASEFLOW[i] }
else if(df.bfs.union.temp$MONTH_CH[i] == "Oct" ) {
Oct.base[i] <- df.bfs.union.temp$BASEFLOW[i] }
else if(df.bfs.union.temp$MONTH_CH[i] == "Nov" ) {
Nov.base[i] <- df.bfs.union.temp$BASEFLOW[i] }
else if(df.bfs.union.temp$MONTH_CH[i] == "Dec" ) {
Dec.base[i] <- df.bfs.union.temp$BASEFLOW[i] }
}
# NA values are removed from monthly baseflow vectors
Jan.base <- Jan.base[!is.na(Jan.base)]
Feb.base <- Feb.base[!is.na(Feb.base)]
Mar.base <- Mar.base[!is.na(Mar.base)]
Apr.base <- Apr.base[!is.na(Apr.base)]
May.base <- May.base[!is.na(May.base)]
Jun.base <- Jun.base[!is.na(Jun.base)]
Jul.base <- Jul.base[!is.na(Jul.base)]
Aug.base <- Aug.base[!is.na(Aug.base)]
Sep.base <- Sep.base[!is.na(Sep.base)]
Oct.base <- Oct.base[!is.na(Oct.base)]
Nov.base <- Nov.base[!is.na(Nov.base)]
Dec.base <- Dec.base[!is.na(Dec.base)]
# Monthly baseflow vectors are converted to data.frames and rounded to 3 significant digits
df.Jan.base <- as.data.frame(round((stat.desc(Jan.base)),3))
df.Feb.base <- as.data.frame(round((stat.desc(Feb.base)),3))
df.Mar.base <- as.data.frame(round((stat.desc(Mar.base)),3))
df.Apr.base <- as.data.frame(round((stat.desc(Apr.base)),3))
df.May.base <- as.data.frame(round((stat.desc(May.base)),3))
df.Jun.base <- as.data.frame(round((stat.desc(Jun.base)),3))
df.Jul.base <- as.data.frame(round((stat.desc(Jul.base)),3))
df.Aug.base <- as.data.frame(round((stat.desc(Aug.base)),3))
df.Sep.base <- as.data.frame(round((stat.desc(Sep.base)),3))
df.Oct.base <- as.data.frame(round((stat.desc(Oct.base)),3))
df.Nov.base <- as.data.frame(round((stat.desc(Nov.base)),3))
df.Dec.base <- as.data.frame(round((stat.desc(Dec.base)),3))
# Monthly baseflow data.frames are binded by column
df.rbind.base <- cbind(df.Jan.base,
df.Feb.base,
df.Mar.base,
df.Apr.base,
df.May.base,
df.Jun.base,
df.Jul.base,
df.Aug.base,
df.Sep.base,
df.Oct.base,
df.Nov.base,
df.Dec.base)
# colnames in df.rbind.base data.frame are renamed
colnames(df.rbind.base) <- c("Jan_BASEFLOW",
"Feb_BASEFLOW",
"Mar_BASEFLOW",
"Apr_BASEFLOW",
"May_BASEFLOW",
"Jun_BASEFLOW",
"Jul_BASEFLOW",
"Aug_BASEFLOW",
"Sep_BASEFLOW",
"Oct_BASEFLOW",
"Nov_BASEFLOW",
"Dec_BASEFLOW")
# A Baseflow monthly boxplot is generated
boxplot.baseflow.Month <- ggplot() +
geom_boxplot(aes(y = BASEFLOW,x = MONTH_CH, colour = MONTH_CH),data=df.bfs.union,
size = 0.75,alpha = 0.75,outlier.colour = '#ff0000',outlier.size = 1.5) +
scale_y_continuous(breaks = c(c(1,2,3,4,5,6,7,8,9,10,15,20,25,30,50,100))) +
ylab(label = 'Q (m3/s)') +
xlab(label = 'Month') +
ggtitle(label = 'Upper Toro River Catchment. Monthy Baseflow Boxplot (1994-2010)') +
theme_grey()
# A Baseflow monthly boxplot is requested
boxplot.baseflow.Month
# A Quickflow-Baseflow ratio is calculated and added to df.bfs.union data.frame
df.bfs.union$RATIO <- round(((df.bfs.union$QUICKFLOW / df.bfs.union$FLOW)),3)
# Descriptive statistics are extracted for df.bfs.union data.frame for "RATIO" column only
df.bfs.desc.RATIO <- round((as.data.frame(stat.desc(df.bfs.union$RATIO))),3)
# A Baseflow monthly boxplot (as fraction of total flow) is generated
boxplot.baseflow.RATIO <- ggplot() +
geom_boxplot(aes(y = RATIO,x = MONTH_CH, colour = MONTH_CH),data=df.bfs.union,
size = 0.75,alpha = 0.75,outlier.colour = '#ff0000',outlier.size = 1.5) +
scale_y_continuous(breaks = c(c(0.025,0.05,0.1,0.2,0.25,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0))) +
#geom_hline(data=df.bfs.union,size = 0.95,alpha = 0.5,yintercept = (df.bfs.desc.RATIO[8,1])) +
#geom_hline(data=df.bfs.union,size = 0.95,linetype = 2,
#alpha = 0.5,yintercept = (df.bfs.desc.RATIO[9,1]), color="black") +
geom_text(aes(df.bfs.union[1,8],df.bfs.desc.RATIO[8,1],label = "median", vjust = -0.5)) +
(aes(df.bfs.union[1,8],df.bfs.desc.RATIO[9,1],label = "mean", vjust = -1)) +
ylab(label = 'Baseflow Index (BFI)') +
xlab(label = 'Month') +
ggtitle(label = 'Upper Toro River Catchment. Monthy Baseflow Boxplot (as fraction of total flow) (1994-2010)') +
theme_bw(base_size = 18.0)
# A Baseflow monthly boxplot (as fraction of total flow) is requested
boxplot.baseflow.RATIO
# A complete-series hydrograph is generated which includes Baseflow
# Data points over FFATr1.5 and FFATr10 are shown above threshold lines
g.hydro04 <- ggplot() +
#geom_point(aes(x = DATE2,y = FLOW,colour = YEAR_CH),data=df.obs,size = 1.50) +
geom_point(aes(x = DATE2,y = FLOW),data=df.FFATr1.5_sel,shape = 21,colour = '#999900',size = 3.0) +
geom_line(aes(x = DATE2,y = FLOW),data=df.obs,colour = '#666666',size = 0.15,alpha = 0.75) +
geom_line(aes(x = DATE2,y = BASEFLOW),data=df.bfs.union,colour = '#0000cc',size = 0.75) +
scale_y_continuous(trans='log10',breaks = c(c(0.1,0.5,0.75,1,1.5,2,2.5,3,3.5,4,4.5,5,6,7,8,9,10,12,14,16,18,20,30,40,50,60,70,80,90,100))) +
scale_x_date(breaks = scales::pretty_breaks(n = 15.0)) +
geom_hline(data=df.obs,size = 0.95,alpha = 0.5,yintercept = (df.obs.desc[8,1])) +
geom_hline(data=df.obs,size = 0.95,linetype = 2,alpha = 0.5,yintercept = (df.obs.desc[9,1]), color="black") +
geom_hline(data=df.obs,,size = 0.95,colour = '#999900',yintercept = limit1.5) +
geom_hline(data=df.obs,,size = 0.95,colour = '#999900',linetype = 2,yintercept = limit10) +
ylab(label = 'Q (m3/s)') +
xlab(label = 'Period (years)') +
ggtitle(label = 'Upper Toro River Catchment Streamflow Analisis (1994-2010)') +
geom_text(aes(df.obs[1,4],limit1.5,label = " R.int > 1.5 years", vjust = -1)) +
geom_text(aes(df.obs[1,4],limit10,label = " R.int > 10 years", vjust = -1)) +
geom_text(aes(df.obs[1,4],df.obs.desc[8,1],label = "median", vjust = -0.5)) +
geom_text(aes(df.obs[1,4],df.obs.desc[9,1],label = "mean", vjust = -1)) +
geom_text(aes(df.obs[1,4],df.obs.desc[9,1],label = "Baseflow", vjust = 6)) +
theme_bw(base_size = 18.0)
# Baseflow annual-summary hydrograph is requested
g.hydro04
# //////////////////////////////
# BLOCK: Flow Assessment Models
# //////////////////////////////
# Maximum flow values are sorted in df.annual.flow data.frame
df.annual.flow.model <- df.annual.flow[order(df.annual.flow$year),]
# Character columns are created in df.annual.flow.model data.frame for graphical purposes
df.annual.flow.model$data <- c("observations")
df.annual.flow.model$model <- c("CI_95")
# Character columns are created in df.bfs.union.annual data.frame for graphical purposes
df.bfs.union.annual$data <- c("observations")
df.bfs.union.annual$model <- c("CI_95")
# A LM model for the Annual-MedianFlow is created
lm.01 <- lm(median.FLOW ~ year, data=df.annual.flow.model)
sm.lm.01 <- summary(lm.01)
# A LM model for the Annual-MeanFlow is created
lm.02 <- lm(mean.FLOW ~ year, data=df.annual.flow.model)
sm.lm.02 <- summary(lm.02)
# A LM model for the Annual-MaxFlow is created
lm.03 <- lm(max.FLOW ~ year, data=df.annual.flow.model)
sm.lm.03 <- summary(lm.03)
# A LM model for the Annual-MedianBaseFlow is created
lm.04 <- lm(median.BASEFLOW ~ year, data=df.bfs.union.annual)
sm.lm.04 <- summary(lm.04)
# A LM model for the Annual-MeanBaseFlow is created
lm.05 <- lm(mean.BASEFLOW ~ year, data=df.bfs.union.annual)
sm.lm.05 <- summary(lm.05)
# A LM model for the Annual-MaxBaseFlow is created
lm.06 <- lm(max.BASEFLOW ~ year, data=df.bfs.union.annual)
sm.lm.06 <- summary(lm.06)
# A LM model-plot for the Annual-Median is created
g.lm01 <- ggplot(data=df.annual.flow.model) +
geom_smooth(aes(x = year,y = median.FLOW,colour = model),
fill = '#cccc00',size = 1.25,alpha = 0.25,method = lm) +
geom_line(aes(x = year,y = median.FLOW),size = 0.45,linetype = 2,alpha = 0.55) +
geom_point(aes(x = year,y = median.FLOW,fill = data),colour = '#333333',size = 4.0,alpha = 0.99) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10.0,min.n = 10.0),
expand = c(0.05,((max(df.annual.flow.model$median.FLOW))*0.075))) +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10.0,min.n = 10.0)) +
theme_bw(base_size = 18.0) +
ylab(label = 'Q (m3/s)') +
xlab(label = 'Period (years)') +
ggtitle(label = 'Upper Toro River Catchment. Annual-Median Linear-Model (1994-2010)')
# boundary.LM function is called
post.lm01 <- boundary.LM(max(df.annual.flow.model$year),
min(df.annual.flow.model$year),
max(df.annual.flow.model$median.FLOW),
min(df.annual.flow.model$median.FLOW))
# Boundaries for LM model-plot for the Annual-Max labels are incorporated
g.lm01 <- g.lm01 + (geom_text(data=df.annual.flow.model,x = post.lm01[1],y = post.lm01[2],
label = eq.PAR(lm.01), # eq.PAR function is called
colour = 'black',hjust = 0.0,vjust = 0.50,alpha = 0.05,size=5.0, parse = FALSE))
# A LM model-plot for the Annual-Median is requested
g.lm01
# A LM model-plot for the Annual-mean is created
g.lm02 <- ggplot(data=df.annual.flow.model) +
geom_smooth(aes(x = year,y = mean.FLOW,colour = model),
fill = '#cccc00',size = 1.25,alpha = 0.25,method = lm) +
geom_line(aes(x = year,y = mean.FLOW),size = 0.45,linetype = 2,alpha = 0.55) +
geom_point(aes(x = year,y = mean.FLOW,fill = data),colour = '#333333',size = 4.0,alpha = 0.99) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10.0,min.n = 10.0),
expand = c(0.05,((max(df.annual.flow.model$mean.FLOW))*0.075))) +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10.0,min.n = 10.0)) +
theme_bw(base_size = 18.0) +
ylab(label = 'Q (m3/s)') +
xlab(label = 'Period (years)') +
ggtitle(label = 'Upper Toro River Catchment. Annual-Mean Linear-Model (1994-2010)')
# boundary.LM function is called
post.lm02 <- boundary.LM(max(df.annual.flow.model$year),
min(df.annual.flow.model$year),
max(df.annual.flow.model$mean.FLOW),
min(df.annual.flow.model$mean.FLOW))
# Boundaries for LM model-plot for the Annual-Max labels are incorporated
g.lm02 <- g.lm02 + (geom_text(data=df.annual.flow.model,x = post.lm02[1],y = post.lm02[2],
label = eq.PAR(lm.02), # eq.PAR function is called
colour = 'black',hjust = 0.0,vjust = 0.50,alpha = 0.05,size=5.0, parse = FALSE))
# A LM model-plot for the Annual-Mean is requested
g.lm02
# A LM model-plot for the Annual-Max is created
g.lm03 <- ggplot(data=df.annual.flow.model) +
geom_smooth(aes(x = year,y = max.FLOW,colour = model),
fill = '#cccc00',size = 1.25,alpha = 0.25,method = lm) +
geom_line(aes(x = year,y = max.FLOW),size = 0.45,linetype = 2,alpha = 0.55) +
geom_point(aes(x = year,y = max.FLOW,fill = data),colour = '#333333',size = 4.0,alpha = 0.99) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10.0,min.n = 10.0),
expand = c(0.05,((max(df.annual.flow.model$max.FLOW))*0.075))) +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10.0,min.n = 10.0)) +
theme_bw(base_size = 18.0) +
ylab(label = 'Q (m3/s)') +
xlab(label = 'Period (years)') +
ggtitle(label = 'Upper Toro River Catchment. Annual-Max Linear-Model (1994-2010)')
# boundary.LM function is called
post.lm03 <- boundary.LM(max(df.annual.flow.model$year),
min(df.annual.flow.model$year),
max(df.annual.flow.model$max.FLOW),
min(df.annual.flow.model$max.FLOW))
# Boundaries for LM model-plot for the Annual-Max labels are incorporated
g.lm03 <- g.lm03 + (geom_text(data=df.annual.flow.model,x = post.lm03[1],y = post.lm03[2],
label = eq.PAR(lm.03), # eq.PAR function is called
colour = 'black',hjust = 0.0,vjust = 0.50,alpha = 0.05,size=5.0, parse = FALSE))
# A LM model-plot for the Annual-Max is requested
g.lm03
# A LM model-plot for the Annual-median.BASEFLOW is created
g.lm04 <- ggplot(data=df.bfs.union.annual) +
geom_smooth(aes(x = year,y = median.BASEFLOW,colour = model),
fill = 'green',size = 1.25,alpha = 0.25,method = lm) +
geom_line(aes(x = year,y = median.BASEFLOW),size = 0.45,linetype = 2,alpha = 0.55) +
geom_point(aes(x = year,y = median.BASEFLOW,fill = data),colour = '#333333',size = 4.0,alpha = 0.99) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10.0,min.n = 10.0),
expand = c(0.05,((max(df.bfs.union.annual$median.BASEFLOW))*0.075))) +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10.0,min.n = 10.0)) +
theme_bw(base_size = 18.0) +
ylab(label = 'Q (m3/s)') +
xlab(label = 'Period (years)') +
ggtitle(label = 'Upper Toro River Catchment. Annual-Median BaseFlow Linear-Model (1994-2010)')
# boundary.LM function is called
post.lm04 <- boundary.LM(max(df.bfs.union.annual$year),
min(df.bfs.union.annual$year),
max(df.bfs.union.annual$median.BASEFLOW),
min(df.bfs.union.annual$median.BASEFLOW))
# Boundaries for LM model-plot for the Annual-median.BASEFLOW labels are incorporated
g.lm04 <- g.lm04 + (geom_text(data=df.bfs.union.annual,x = post.lm04[1],y = post.lm04[2],
label = eq.PAR(lm.04), # eq.PAR function is called
colour = 'black',hjust = 0.0,vjust = 0.50,alpha = 0.05,size=5.0, parse = FALSE))
# A LM model-plot for the Annual-median.BASEFLOW is requested
g.lm04
# A LM model-plot for the Annual-mean.BASEFLOW is created
g.lm05 <- ggplot(data=df.bfs.union.annual) +
geom_smooth(aes(x = year,y = mean.BASEFLOW,colour = model),
fill = 'green',size = 1.25,alpha = 0.25,method = lm) +
geom_line(aes(x = year,y = mean.BASEFLOW),size = 0.45,linetype = 2,alpha = 0.55) +
geom_point(aes(x = year,y = mean.BASEFLOW,fill = data),colour = '#333333',size = 4.0,alpha = 0.99) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10.0,min.n = 10.0),
expand = c(0.05,((max(df.bfs.union.annual$mean.BASEFLOW))*0.075))) +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10.0,min.n = 10.0)) +
theme_bw(base_size = 18.0) +
ylab(label = 'Q (m3/s)') +
xlab(label = 'Period (years)') +
ggtitle(label = 'Upper Toro River Catchment. Annual-Mean BaseFlow Linear-Model (1994-2010)')
# boundary.LM function is called
post.lm05 <- boundary.LM(max(df.bfs.union.annual$year),
min(df.bfs.union.annual$year),
max(df.bfs.union.annual$mean.BASEFLOW),
min(df.bfs.union.annual$mean.BASEFLOW))
# Boundaries for LM model-plot for the Annual-mean.BASEFLOW labels are incorporated
g.lm05 <- g.lm05 + (geom_text(data=df.bfs.union.annual,x = post.lm05[1],y = post.lm05[2],
label = eq.PAR(lm.05), # eq.PAR function is called
colour = 'black',hjust = 0.0,vjust = 0.50,alpha = 0.05,size=5.0, parse = FALSE))
# A LM model-plot for the Annual-mean.BASEFLOW is requested
g.lm05
# A LM model-plot for the Annual-max.BASEFLOW is created
g.lm06 <- ggplot(data=df.bfs.union.annual) +
geom_smooth(aes(x = year,y = max.BASEFLOW,colour = model),
fill = 'green',size = 1.25,alpha = 0.25,method = lm) +
geom_line(aes(x = year,y = max.BASEFLOW),size = 0.45,linetype = 2,alpha = 0.55) +
geom_point(aes(x = year,y = max.BASEFLOW,fill = data),colour = '#333333',size = 4.0,alpha = 0.99) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10.0,min.n = 10.0),
expand = c(0.05,((max(df.bfs.union.annual$max.BASEFLOW))*0.075))) +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10.0,min.n = 10.0)) +
theme_bw(base_size = 18.0) +
ylab(label = 'Q (m3/s)') +
xlab(expression(atop("Period (years)", paste("data.frame-source: df.bfs.union.annual")))) +
ggtitle(label = 'Upper Toro River Catchment. Annual-Max BaseFlow Linear-Model (1994-2010)')
# boundary.LM function is called
post.lm06 <- boundary.LM(max(df.bfs.union.annual$year),
min(df.bfs.union.annual$year),
max(df.bfs.union.annual$max.BASEFLOW),
min(df.bfs.union.annual$max.BASEFLOW))
# Boundaries for LM model-plot for the Annual-max.BASEFLOW labels are incorporated
g.lm06 <- g.lm06 + (geom_text(data=df.bfs.union.annual,x = post.lm06[1],y = post.lm06[2],
label = eq.PAR(lm.06), # eq.PAR function is called
colour = 'black',hjust = 0.0,vjust = 0.50,alpha = 0.05,size=5.0, parse = FALSE))
# A LM model-plot for the Annual-max.BASEFLOW is requested
g.lm06
# /////////////////////////////////////////////
# BLOCK: Export and display and of data.frames
# /////////////////////////////////////////////
# Observed daily flow data.frame
write.csv(df.obs, file = "df.obs.csv")
# Observed daily flow descriptive statistics data.frame
write.csv(df.obs.desc, file = "df.obs.desc.csv")
# Observed daily flow disaggregated by year data.frame
write.csv(df.pivot, file = "df.pivot.csv")
# Observed daily flow descriptive statistics disaggregated by year data.frame
write.csv(df.pivot.desc, file = "df.pivot.desc.csv")
# Annual descriptive statistics data.frame
write.csv(df.annual.flow, file = "df.annual.flow.csv")
# Estimated-deterministic fitted values, non-exceedance probabilities (nep),
# return period (rp) and estimated FLOW data.frame
write.csv(df.fa.out, file = "df.fa.out.csv")
# Non-exceedance probabilities (nep) for "central", "lower" and "upper" CI data.frame
write.csv(df.ci.out, file = "df.ci.out.csv")
# Observed daily flow descriptive statistics disaggregated by month data.frame
write.csv(df.obs.month.desc, file = "df.obs.month.desc.csv")
# Summarized monthly flows by discrete Percentage Exceedance intervals data.frame
write.csv(df.comp.FDC, file = "df.comp.FDC.csv")
# Summarized monthly flows by continuous Percentage Exceedance intervals data.frame
write.csv(df.Jan, file = "df.Jan.csv")
write.csv(df.Feb, file = "df.Feb.csv")
write.csv(df.Mar, file = "df.Mar.csv")
write.csv(df.Apr, file = "df.Apr.csv")
write.csv(df.May, file = "df.May.csv")
write.csv(df.Jun, file = "df.Jun.csv")
write.csv(df.Jul, file = "df.Jul.csv")
write.csv(df.Aug, file = "df.Aug.csv")
write.csv(df.Sep, file = "df.Sep.csv")
write.csv(df.Oct, file = "df.Oct.csv")
write.csv(df.Nov, file = "df.Nov.csv")
write.csv(df.Dec, file = "df.Dec.csv")
# Daily baseflow-quickflow separation data.frame
write.csv(df.bfs, file = "df.bfs.csv")
# Observed flow daily including baseflow and quickflow data.frame
write.csv(df.bfs.union, file = "df.bfs.union.csv")
# Annual baseflow descriptive statistics data.frame
write.csv(df.bfs.union.annual, file = "df.bfs.union.annual.csv")
# names(df.rbind.base) # baseflow daily descriptive statistics disaggregated by month
write.csv(df.rbind.base, file = "df.rbind.base.csv")
# LM-Models summaries are requested, saved and exported as *.TXT
capture.output(sm.lm.01, file = "lm01_median.txt")
capture.output(sm.lm.02, file = "lm02_mean.txt")
capture.output(sm.lm.03, file = "lm03_max.txt")
capture.output(sm.lm.04, file = "lm04_median_baseflow.txt")
capture.output(sm.lm.05, file = "lm05_mean_baseflow.txt")
capture.output(sm.lm.06, file = "lm06_max_baseflow.txt")
# Relevant data.frames are displayed
#View(df.obs)
#View(df.obs.desc)
#View(df.pivot)
#View(df.pivot.desc)
#View(df.annual.flow)
#View(df.fa.out)
#View(df.ci.out)
#View(df.obs.month.desc)
#View(df.comp.FDC)
#View(df.bfs)
#View(df.bfs.union)
#View(df.bfs.union.annual)
#View(df.rbind.base)
|
cc8370ad46cd9428067ba33972435fab3b022085
|
7796666bf5bfd050ca3e393ef0357f17f0528e31
|
/plot-3D.R
|
793fe9de3241cd9921ea1a398500af48629c2cb1
|
[] |
no_license
|
clarajegousse/fcm
|
f16588ee928cb1eac49f7687b09f6fc0fedd693e
|
d9b65b4058dd962792cb9cf42ee04347c060e066
|
refs/heads/master
| 2020-03-20T00:32:06.080963
| 2018-06-15T11:26:57
| 2018-06-15T11:26:57
| 137,047,501
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,462
|
r
|
plot-3D.R
|
###LOAD PACKAGES
library(flowCore)
library(ggcyto)
library(stringr)
library(car)
library(rgl)
###VARIABLES
Folder.path <- '/Users/bland/Desktop/Flow-cytometry_data/Input/' #Path of the folder containing the FCS Files
csv.path <- '/Users/bland/Desktop/Flow-cytometry_data/Output/Dataframe/' #Path of the folder containing the csv file for the results
img.path <- '/Users/bland/Desktop/Flow-cytometry_data/Output/Figures/Plots/3D_plots/' #Path of the folder containing the PDF Files for the results
csv.name <- "_Abundance_with_all_info_results.csv" #Name of the CSV file containing the results
pdf.name <- "_Plots_with_gating.pdf" #Name of the pdf containing the plots with the gates
liste.stations <- c('LB2', 'LB8') #List of the keywords of the stations to analyse ###be sure that all the FCS files corresponding to the stations are in the folder and that the keywords correspond to a unique station
today <- '20180615'
#MINIMAL NUMBER OF BEADS AND EVENT
minEvents <- 9999 #minimal number of events
minBeads <- 999 #minimal number of beads
#TYPE OF TRANSFORMATION
Transfo.type <- logTransform(transformationId="LogTransform", logbase=10, r=1, d=1) #Type of transformation
to.transform <- c('FSC.A', 'SSC.A', 'Chlorophyll.A', 'SybrGreen.A', 'PE.A') #List of the names of the measurement parameters on which the transformations have to be performed
#BACKGROUND NOISE
NoiseSyb.min <- 0 #Minimal value of the noise in SybrGreen.A
NoiseSyb.max <- 2 #Maximal value of the noise in SybrGreen.A
NoisePE.min <- 0 #Minimal value of the noise in PE.A
NoisePE.max <- 2 #Maximal value of the noise in PE.A
NoiseChl.min <- 0 #Minimal value of the noise in Chlorophyll.A
NoiseChl.max <- 3.15 #Maximal value of the noise in Chlorophyll.A
#BEADS GATE
BeadsSyb.min <- 5 #Minimal value of the beads gate in SybrGreen.A
BeadsSyb.max <- 7 #Maximal value of the beads gate in SybrGreen.A
BeadsChl.min <- 4.5 #Minimal value of the beads gate in Chlorophyll.A
BeadsChl.max <- 5.5 #Maximal value of the beads gate in Chlorophyll.A
BeadsSSC.min <- 4.5 #Minimal value of the beads gate in SSC.A
BeadsSSC.max <- 5.5 #Maximal value of the beads gate in SSC.A
#INFORMATIONS ABOUT THE STATIONS
stn.lane.list <- c("Faxafloi","Faxafloi","Faxafloi","Faxafloi","Faxafloi","Faxafloi","Faxafloi","Faxafloi","Faxafloi","Latrabjarg","Latrabjarg","Latrabjarg","Latrabjarg","Latrabjarg","Latrabjarg","Latrabjarg","Latrabjarg","Latrabjarg","Kogur", "Kogur", "Kogur", "Kogur", "Kogur","Kogur", "Hornbanki","Hornbanki","Hornbanki","Hornbanki","Hornbanki","Hornbanki", "Siglunes","Siglunes","Siglunes","Siglunes","Siglunes","Siglunes","Siglunes","Siglunes", "Langanes","Langanes","Langanes","Langanes","Langanes","Langanes","Langanes","Langanes","Langanes","Langanes","Langanes","Langanes","Langanes","Langanes", "Krossanes","Krossanes","Krossanes","Krossanes","Krossanes","Krossanes", "Stokksnes","Stokksnes","Stokksnes","Stokksnes","Stokksnes", "Ingolfshofdi","Ingolfshofdi","Ingolfshofdi", "Selvogsbanki","Selvogsbanki","Selvogsbanki","Selvogsbanki","Selvogsbanki")
stn.name.list <- c("Faxafloi 1","Faxafloi 2","Faxafloi 3","Faxafloi 4","Faxafloi 5","Faxafloi 6","Faxafloi 7","Faxafloi 8","Faxafloi 9","Latrabjarg 1","Latrabjarg 2","Latrabjarg 3","Latrabjarg 4","Latrabjarg 5","Latrabjarg 6","Latrabjarg 7","Latrabjarg 8","Latrabjarg 9", "Kogur 1", "Kogur 2", "Kogur 3", "Kogur 4", "Kogur 5","Kogur 6", "Hornbanki 1","Hornbanki 2","Hornbanki 3","Hornbanki 4","Hornbanki 5","Hornbanki 6", "Siglunes 1","Siglunes 2","Siglunes 3","Siglunes 4","Siglunes 5","Siglunes 6","Siglunes 7","Siglunes 8", "Langanes 1","Langanes 2","Langanes 3","Langanes 4","Langanes 5","Langanes 6","Langanes 1","Langanes 2","Langanes 3","Langanes 4","Langanes 5","Langanes 6","Langanes 7","Langanes 8", "Krossanes 1","Krossanes 2","Krossanes 3","Krossanes 4","Krossanes 5","Krossanes 6", "Stokksnes 1","Stokksnes 2","Stokksnes 3","Stokksnes 4","Stokksnes 5", "Ingolfshofdi 1","Ingolfshofdi 2","Ingolfshofdi 3", "Selvogsbanki 1","Selvogsbanki 2","Selvogsbanki 3","Selvogsbanki 4","Selvogsbanki 5")
stn.id.list <- c("FX1", "FX2", "FX3","FX4","FX5","FX6","FX7","FX8","FX9","LB1","LB2","LB3","LB4","LB5","LB6","LB7","LB8","LB9","KG1","KG2","KG3","KG4","KG5","KG6","HB1","HB2","HB3","HB4","HB5","HB6","SI1","SI2","SI3","SI4","SI5","SI6","SI7","SI8","LN1", "LN2","LN3","LN4","LN5","LN6","LA1","LA2","LA3","LA4","LA5","LA6","LA7","LA8","KR1","KR2","KR3","KR4","KR5","KR6","ST1","ST2","ST3","ST4","ST5","IH1","IH2","IH3","SB1","SB2","SB3","SB4","SB5")
stn.lat.list <- c(64.2,64.2,64.2,64.2,64.2,64.2,64.2,64.2,64.2,65.3,65.35,65.4,65.45,65.5,65.54,66.1,66.5,66.9,66.3,66.41,66.53,67.5,67.2,67.35,66.4,66.5,67,67.1,67.2,67.3,66.16,66.24,66.32,66.44,67,67.2,67.4,68,66.37,67,67.15,67.3,67.45,68,66.22,66.22,66.22,66.22,66.22,66.22,66.22,66.22,65,65,65,65,65,65,64.12,64.2,63.52,63.48,63.38,63.46,63.42,63.34,63.41,63.29,63.19,63.9,63)
stn.lon.list <- c(-22.25,-22.45,-23.15,-23.45,-24.2,-25,-26,-27,-27.57,-24.34,-24.55,-25.16,-25.39,-26,-26.29,-26.48,-27.3,-27.15,-23,-23.9,-23.19,-23.28,-23.42,-23.56,-21.35,-21.35,-21.35,-21.35,-21.35,-21.35,-18.5,-18.5,-18.5,-18.5,-18.5,-18.5,-18.5,-18.5,-14.16,-13.55,-13.34,-13.16,-12.58,-12.4,-14.22,-14.1,-13.35,-13,-12.5,-11,-10,-9,-13.3,-12.49,-11.4,-11.17,-10.7,-9,-14.5,-14.28,-14.8,-13.58,-13.4,-16.36,-16.3,-16.18,-20.41,-20.54,-21.7,-21.18,-21.28)
stn.max.depth.list <- c(70,40,94,190,220,235,327,430,1010,34,64,108,256,228,290,410,658,510,45,78,236,240,500,980,100,120,200,230,330,608,80,435,470,700,230,498,408,1045,188,420,1576,1760,1830,1890,60,156,275,160,1094,1400,1300,1260,54,116,230,518,600,1420,84,141,216,546,1192,72,90,108,46,90,155,510,1004)
#SETTING FOR THE PLOTS
Bins <- 100 #value of the bins for the plots (a high value increase the time of calculation and the size of the PDF file)
###FUNCTIONS =====================================================================
Sort.Files <- function(listouille, max.depth) { ##This function returns the list of the sorted files
List.sorted <- c()
Match2 <- grep("BLANK", listouille, value = TRUE) ###Find the file corresponding to the blank (to be sorted at the first position)
if(length(Match2)>0) {
List.sorted <- append(List.sorted, Match2[1])
}
for(dep in 0:max.depth) { ###Sort the file with the depth
dep.match <- paste("_", dep, "m", sep="")
Match <- grep(dep.match, listouille, value = TRUE)
if(length(Match)>0) {
List.sorted <- append(List.sorted, Match[1])
}
}
return(List.sorted)
}
Find.Depth <- function(listouille, max.depth) { ###This function returns the list of the depths
Profondeur <- c()
Match2 <- grep("BLANK", listouille, value = TRUE) ###Find the file corresponding to the blank (to be sorted at the first position)
if(length(Match2)>0) {
Profondeur <- append(Profondeur, "BLANK")
}
for(dep in 0:max.depth) { ###Sort the file with the depth
dep.match <- paste("_", dep, "m", sep="")
Match <- grep(dep.match, listouille, value = TRUE)
if(length(Match)>0) {
Profondeur <- append(Profondeur, dep)
}
}
return(Profondeur)
}
FFtoDF <-function(FF){
if(class(FF) == "flowFrame"){
return(as.data.frame(exprs(FF)))
}
if(class(FF) == "list"){
frameList<-list()
length(frameList)<-length(FF)
for(i in 1:length(FF)){
if(class(FF[[i]]) == "flowFrame"){
frameList[[i]]<-as.data.frame(flowCore::exprs(FF[[i]]))
names(frameList)[[i]]<-names(FF)[[i]]
}
else{
warning(paste("Object at index",i,"not of type flowFrame"))
}
}
return(frameList)
}
else {
stop("Object is not of type flowFrame")
}
}
Make.3Dplot <- function(Station.flowFrame, Beads.flowFrame, Noise.flowFrame, X.index, Y.index, Z.index, xlabel, ylabel, zlabel, titre){ ###This function allows to create a 3D scatter plot from a flowframe
AllStation <- FFtoDF(Station.flowFrame)
Allevent <- rep("Entire sample",length(AllStation[,1]))
AllStation$Gate <- Allevent
AllBeads <- FFtoDF(Beads.flowFrame)
AllBevent <- rep("Beads",length(AllBeads[,1]))
AllBeads$Gate <- AllBevent
AllNoise <- FFtoDF(Noise.flowFrame)
AllNevent <- rep("Noise",length(AllNoise[,1]))
AllNoise$Gate <- AllNevent
matr <- rbind(AllStation, AllBeads, AllNoise)
list.toRemove <- grep("Inf", matr[,X.index],value = FALSE)
list.toRemove <- append(list.toRemove, grep("Inf", matr[,Y.index],value = FALSE))
list.toRemove <- append(list.toRemove, grep("Inf", matr[,Z.index],value = FALSE))
if(length(list.toRemove)>0){
matr <- matr[-list.toRemove,] ### Remove rows containing "Inf" value
}
matr$Gate <- as.factor(matr$Gate)
plt3D <- scatter3d(x = matr[,X.index], y = matr[,Y.index], z = matr[,Z.index], xlab=xlabel, ylab=ylabel, zlab=zlabel, sphere.size=0.1, groups = matr$Gate, surface.col=c("darkorange1","steelblue4","snow3"), axis.col=c("black","black","black"), surface=FALSE)
+ legend3d("topright", legend = c(titre, ' ', 'Beads', 'Microbes communities', 'Background noise'), pch = 16, col = c("white","white","darkorange1","steelblue4","snow3"), cex=1, inset=c(0.02))
return(plt3D)
}
###===============================================================================
###INITIALISATION DE LA LISTE DE TRANSFORMATION
myTrans <- transformList(to.transform, Transfo.type)
###DETERMINATION OF BEADS GATE
BeadsSyb.Gate <- rectangleGate(filterId="Beads Region","SybrGreen.A"=c(BeadsSyb.min, BeadsSyb.max))
BeadsChl.Gate <- rectangleGate(filterId="Beads Region","Chlorophyll.A"=c(BeadsChl.min, BeadsChl.max))
BeadsSSC.Gate <- rectangleGate(filterId="Beads Region","SSC.A"=c(BeadsSSC.min, BeadsSSC.max))
Beads.Gate <- BeadsSyb.Gate & BeadsChl.Gate & BeadsSSC.Gate
###DETERMINATION OF NOISE GATE
NoiseSyb.Gate <- rectangleGate(filterId="Noise","SybrGreen.A"=c(NoiseSyb.min, NoiseSyb.max))
NoisePE.Gate <- rectangleGate(filterId="Noise","PE.A"=c(NoisePE.min, NoisePE.max))
NoiseChl.Gate <- rectangleGate(filterId="Noise","Chlorophyll.A"=c(NoiseChl.min, NoiseChl.max))
Noise.Gate <- NoiseSyb.Gate & NoisePE.Gate & NoiseChl.Gate
###WITHOUT BEADS AND NOISE
Sans.Noise.Gate <- !Noise.Gate
Sans.Beads.Gate <- !Beads.Gate
Filtered.Gate <- Sans.Noise.Gate & Sans.Beads.Gate
###CREATION OF FLOWSET (ONE PER STATION)
Station.frames <- c()
Beads.frames <- c()
Noise.frames <- c()
Samp.Name <- c("Name of the FCS file") #List containing the names of all the analysed files
Smp.depth <- c("Depth (in meters)")
for (station.index in 1:length(liste.stations)) {
setwd(Folder.path)
list.FCSname <- Sort.Files(list.files(pattern=liste.stations[station.index]), 2000)
Smp.depth <- append(Smp.depth, Find.Depth(list.files(pattern=liste.stations[station.index]), 2000))
for(truc in 1:length(list.FCSname)) {
Samp.Name <- append(Samp.Name, list.FCSname[truc])
}
###READ FLOWSET
fs <- read.flowSet(files=list.FCSname, alter.names=TRUE, transformation =FALSE)
###TRANSFORMATION
fs.trans <- transform(fs, myTrans) #Transformation of the data
Without.BeadandNoise <- Subset(fs.trans, Filtered.Gate)
Station.frames <- append(Station.frames, Without.BeadandNoise)
###SUPPRESSION OF BACKGROUND NOISE
Noise <- Subset(fs.trans, Noise.Gate)
Noise.frames <- append(Noise.frames, Noise)
###BEADS GATING
Beads <- Subset(fs.trans, Beads.Gate)
Beads.frames <- append(Beads.frames, Beads)
}
###INITIALISATION OF THE VECTORS CONTAINING THE DATA (the first value of each vector is the label)
Nb.Totevent <- c("Total number of events")
Nb.beads <- c("Number of beads")
Nb.Noise <- c("Number of events to remove (background noise)")
Pourc.Noise <- c("Pourcentage of background noise removed (%)")
Abundance <- c("Concentration of phytoplankton (number of events / mL)")
stn.lane <- c("Station lane")
stn.name <- c("Station name")
stn.id <- c("Station ID")
stn.lat <- c("Station latitude")
stn.lon <- c("Station longitude")
stn.max.depth <- c("Maximal depth (in meters)")
###FILLING OF THE DIFFERENT LISTS CONTAINING THE INFORMATIONS (station names, depths, localisation, abundance...)
for (station in 1:length(liste.stations)) {
ind <- match(liste.stations[station],stn.id.list)
for (prof in 1:length(Station.frames[[station]])) {
NbEvent <- nrow(Station.frames[[station]][[prof]]) + nrow(Beads.frames[[station]][[prof]]) + nrow(Noise.frames[[station]][[prof]])
NbBeads <- nrow(Beads.frames[[station]][[prof]])
NbNoise <- nrow(Noise.frames[[station]][[prof]])
PNoise <- (NbNoise*100)/NbEvent
if (NbEvent > minEvents && NbBeads > minBeads){
Abond <- ((NbEvent-NbBeads-NbNoise)/(NbBeads/1080000))
}else{
Abond <- "ERROR"
}
Nb.Totevent <- append(Nb.Totevent, NbEvent)
Nb.beads <- append(Nb.beads, NbBeads)
Nb.Noise <- append(Nb.Noise, NbNoise)
Abundance <- append(Abundance, Abond)
stn.lane <- append(stn.lane, stn.lane.list[ind])
stn.name <- append(stn.name, stn.name.list[ind])
stn.id <- append(stn.id, stn.id.list[ind])
stn.lat <- append(stn.lat, stn.lat.list[ind])
stn.lon <- append(stn.lon, stn.lon.list[ind])
stn.max.depth <- append(stn.max.depth, stn.max.depth.list[ind])
Pourc.Noise <- append(Pourc.Noise, PNoise)
}
}
###CREATION OF A CSV FILE TO STORE THE DATA
csv.name <- paste(csv.path, today, csv.name, sep="")
results <- cbind(Samp.Name, stn.lane, stn.name, stn.id, stn.lat, stn.lon, stn.max.depth, Smp.depth, Nb.Totevent, Nb.beads, Nb.Noise, Pourc.Noise, Abundance)
#write.csv(results,csv.name)
index <- 1
for (station in 1:length(liste.stations)) {
path3D <- paste(img.path, liste.stations[station], sep="")
path3D1 <- paste(path3D,"/SSC_vs_SYBRGREEN_vs_PE", sep="")
path3D2 <- paste(path3D,"/Chlorophyll_vs_SYBRGREEN_vs_PE", sep="")
print(dir.create(path3D))
print(dir.create(path3D1))
print(dir.create(path3D2))
for (prof in 1:length(Station.frames[[station]])) {
index <- index + 1
print(par3d(windowRect = 50 + c(0,0,640,640)))
print(Make.3Dplot(Station.frames[[station]][[prof]], Beads.frames[[station]][[prof]], Noise.frames[[station]][[prof]], "SSC.A", "SybrGreen.A", "PE.A", "log(SSC.A) [arbitratry unit]", "log(SyberGreen.A) [arbitratry unit]", "log(PE.A) [arbitratry unit]", paste("3D plot of the sample ", liste.stations[station], "_", Smp.depth[index], sep="")))
print(writeWebGL(filename = paste(path3D1, "/",today, "_3Dplot_SSC-SybrGreen-PE_", liste.stations[station], "_", Smp.depth[index], ".html", sep="")))
print(Make.3Dplot(Station.frames[[station]][[prof]], Beads.frames[[station]][[prof]], Noise.frames[[station]][[prof]], "Chlorophyll.A", "SybrGreen.A", "PE.A", "log(Chlorophyll.A) [arbitratry unit]", "log(SyberGreen.A) [arbitratry unit]", "log(PE.A) [arbitratry unit]", paste("3D plot of the sample ", liste.stations[station], "_", Smp.depth[index], sep="")))
print(writeWebGL(filename = paste(path3D2, "/",today, "_3Dplot_Chlorophyll-SybrGreen-PE_", liste.stations[station], "_", Smp.depth[index], ".html", sep="")))
}
}
|
feab48e8800e668f70de8f1ec73deb328cd2f99a
|
40db507664c28975eeaaea556307b476381edf25
|
/shiny/server.R
|
cc53e4c1243bc4dcc68cdab01dc243a37d31925c
|
[] |
no_license
|
fstrueb/tools
|
982d35092ac65840b0a60b0da88f52a1d4a852ac
|
d4547b00a7d98bbc924034e4c77419b32a5a00c9
|
refs/heads/master
| 2020-05-24T15:56:17.015405
| 2017-07-11T16:20:50
| 2017-07-11T16:20:50
| 84,855,178
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,547
|
r
|
server.R
|
library(shiny)
library(dplyr)
library(rtracklayer)
library(DT)
library(shinyjs)
path = getwd()
source('../R_functions/plotPromoter.R')
source('../R_functions/makeRangeOfInterest.R')
source('../R_functions/scanRangeForTFBS.R')
source('../R_functions/siteSetToDataFrame.R')
source('../R_functions/unlistJASPAR.R')
source('../R_functions/standardizeSeqlevels.R')
source('../R_functions/siteSetListSummary.R')
shinyServer(function(input, output, session) {
####### DATA CHOICE #######
reactive({
if (input$rangeChoice == 'upload') {
shinyjs::disable('chromSel')
shinyjs::disable('from')
shinyjs::disable('to')
}
})
############ FILE IMPORT #############
rootVolumes = c(Home = normalizePath("~"), getVolumes()(), WD = '.')
shinyFileChoose(input, 'file', roots = rootVolumes, session = session)
loadCSVObj <- reactive({
loadCSVFile(req(as.character(
parseFilePaths(rootVolumes,
input$file)$datapath
)), input$csvsample1, input$csvsample2)
})
############ REACTIVE VALUES ###########
rangeObj = reactiveValues(
# species = 'mouse'
# assembly = 'mm10'
# standard: Lin7c-Ccdc34 locus, mm10
assembly = 'mm10',
chromosome = 'chr2',
start = 109886546,
end = 109892833)
scanDetails = reactiveValues(
species = 'mouse',
collection = 'CORE')
scanMotifs = reactiveValues(
motif_ID = c('MA0001.1', 'MA0002.1'))
scanResults = reactiveValues(
df = data.frame(),
summary = data.frame())
############ MESSAGE MENU ##############
output$notificationMenu = renderMenu({
notifications = list()
if (is.null(scanResults$df)) {
notifications[[1]] = notificationItem(text = 'Range not present, please provide one.', icon = icon('warning'))
} else {
notifications[[1]] = notificationItem(text = 'No notifications.', icon = icon('check'))
}
dropdownMenu(type = 'notification', .list = notifications)
})
####### ISOLATE ###########
# isolate checkbox button inputs
isolate({
#rangeObj$assembly = input$annoChoice
scanDetails$species = input$rangeSpecies
scanDetails$collection = input$rangeCollection
scanMotifs$motif_ID = input$resultMotifs
})
########### SUBMIT AND SCAN ##########
observeEvent(input$acceptRange, {
# disable the button once the submit button is clicked
shinyjs::disable('acceptRange')
output$rangeSummary = renderDataTable(options = list(pageLength = 25, scrollX = T), {
#input$acceptRange
# Create a Progress object
progress <- shiny::Progress$new(style = 'notification')
progress$set(message = "Scanning range...", value = 0)
# Close the progress when this reactive exits (even if there's an error)
on.exit(progress$close())
# Create a closure to update progress.
# Each time this is called:
# - If `value` is NULL, it will move the progress bar 1/5 of the remaining
# distance. If non-NULL, it will set the progress to that value.
# - It also accepts optional detail text.
updateProgress <- function(value = NULL, detail = NULL) {
if (is.null(value)) {
value <- progress$getValue()
value <- value + (progress$getMax() - value) / 4
}
progress$set(value = value, detail = detail)
}
isolate({
rangeObj$assembly = input$annoChoice
rangeObj$chromosome = input$chromSel
rangeObj$start = input$from
rangeObj$end = input$to
})
# scanMotifs$motif_ID = motif.list$motif_ID
range = makeRange(chromosome = rangeObj$chromosome, from = rangeObj$start, to = rangeObj$end)
cat('worked: ', as.character(GenomicRanges::seqnames(range)), '\n')
######### MAIN FUNCTIONS FOR PROGRESS BAR ##########
withProgress(message = 'Scanning...', style = 'notification', value = 0.1, {
###### check if only specific JASPAR IDs are selected
isolate({
if (is.null(input$resultMotifs)) {
motif.list = unlistJASPAR(species = scanDetails$species, collection = scanDetails$collection)
scanRes = scanRangeForTFBS(query = range,
motif.list = motif.list$motif_ID,
input.assembly = rangeObj$assembly,
updateProgress)
} else {
scanRes = scanRangeForTFBS(query = range,
motif.list = isolate(input$resultMotifs),
input.assembly = range$assembly,
updateProgress)
}
})
})
scanResSum = siteSetListSummary(query = range, siteSetList = scanRes)
### implement pvalue and motif return functionality here, update second progress bar
# re-enable the submit button
shinyjs::enable('acceptRange')
# update the scanRes object
scanResults$summary = scanResSum
scanResSum
}
# enable scroll bar for data table output
)
# must update reSummary output
output$reSummaryTable = renderDataTable(scanResults$df, options = list(scrollX = TRUE))
})
############# dynamically change motif ID to select from for scan ##########
foundMotifs = reactive({
scanDetails$species = input$rangeSpecies
scanDetails$collection = input$rangeCollection
motif.list = unlistJASPAR(species = scanDetails$species, collection = scanDetails$collection)
# if (is.null(motif.list)) {
# warning('motif.list is empty')
# } else {
# motif.list = 'nothing defined'
# }
scanMotifs$motif_ID = motif.list
motif.list$motif_ID
})
output$foundMotifsUI = renderUI({
selectizeInput('resultMotifs', "Select JASPAR IDs", foundMotifs(), multiple = T)
})
######## dynamically change motif ID input boxes in plot ##############
searchResult = reactive({
#scanResults$df %>% group_by(motif_ID) %>% tally() %>% arrange(-n) %>% mutate(n = as.character(n)) %>% tidyr::unite(col = result, everything(), sep = ', hits = ')
unique(scanResults$df$motif_ID)
})
output$selectUI = renderUI({
selectizeInput("plotMotifChoices", "Select your choice", searchResult())
})
######### PLOT PART #############
observeEvent(input$plotButton, {
output$plotResults = renderPlot({
if (is.null(scanResults$df)) {
print('bla')
plot(mtcars)
} else {
plotPromoter(
range = scanResults$df,
chr = 'chr12',
motif.selection = isolate(input$plotMotifChoices)
)
plot
}
})
})
})
|
b1f3b90f8e7d7bf39c18be2a24c83c3e024fd197
|
762ff19db84f778a9135269cc8c0ce62f06bc0b7
|
/R/pathos.R
|
78a8552311810b62aa70e4cffeedce014da1e4ce
|
[] |
no_license
|
Trackage/pathos
|
d8ddb99bf68b17620ea04021fd6db93d21fdf9d3
|
915649b3a9fc5286f0388aa8dffaaf4887dab378
|
refs/heads/master
| 2021-06-19T15:26:56.930296
| 2017-07-19T22:56:27
| 2017-07-19T22:56:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,213
|
r
|
pathos.R
|
#' Plot segments and arrows
#'
#' Add segments or arrows to plot from prepare_segments.
#'
#' Input is a data frame with `x0`, `y0`, `x`, `y` columns as per [segments]
#' @param d data frame
#' @param ... arguments passed to [segments()]
plot_segments <- function(d, ...) {
segments(d$x0, d$y0, d$x1, d$y1, ...)
}
#'
#' @name plot_segments
#' @name plot_segments
plot_arrows <- function(d, ...) {
arrows(d$x0, d$y0, d$x1, d$y1, ...)
}
#' Path to segments
#'
#' Array logic trick to turn a path into segments.
#'
#' It's assumed that `x` is a vector of IDs to other records, but this function
#' only does the reshaping to paired indexes from path of indexes.
#' @param x vector
path_to_seg <- function (x)
{
head(suppressWarnings(matrix(x, nrow = length(x) + 1, ncol = 2,
byrow = FALSE)), -2L)
}
#' Paths as segments
#'
#' Prepare paths of coordinates by Group as segments
#'
#' This is really specific to X, Y, G inputs but can be made more general to any set of grouped (and implicitly arranged) records.
#' @param X input X coordinate
#' @param Y input Y coordinate
#' @param G input grouping index
prepare_path_segments <- function(X, Y, G) {
d <- data.frame(X = X, Y = Y, G = G, ROW_ID = seq_along(X))
grouped_seg_id <- lapply(split(d$ROW_ID, d$G), function(x) path_to_seg(x))
ds <- dplyr::bind_rows(lapply(grouped_seg_id, function(x) data.frame(x0 = d$X[x[, 1]], y0 = d$Y[x[, 1]],
x1 = d$X[x[, 2]], y1 = d$Y[x[, 2]])), .id = "G")
ds
}
#' Segments
#'
#' Prepare segments from two-layer u-v brick.
#'
#' The input is currently assumed to be a two-layer raster brick with horizontal (U) and vertical (V) components to vectors
#' at each pixel.
#' @param x [raster::brick] with two layers of vector field components (u, v)
#' @param scale multiplication factor for u-v
#' @param ... ignored
#' @export
prepare_segments <- function(x, scale = 3600, ...) {
u <- raster::values(x[[1]]) * scale
v <- raster::values(x[[2]]) * scale
bad <- is.na(u)
xy <- sp::coordinates(x)[!bad, ]
u <- u[!bad]
v <- v[!bad]
data.frame(x0 = xy[, 1], y0 = xy[, 2], x1 = xy[, 1] + u, y1 = xy[,2] + v)
}
|
24894a2682bba3b94f601cee7a9f4882608cef18
|
605c9ab0818959b65d00f6c89efe296e9ec8a0f6
|
/01_diversity_analyses/bayesian_modeling/defining_strategies.R
|
612543a3cb386f1a0cd9b22f3eba35f2058c3325
|
[] |
no_license
|
nyu-cdsc/diversity
|
c5d1246911fe3fcc89a27a346a357a8c98064e8b
|
16355ce74a043cf2655c7042d441edcfe0b08854
|
refs/heads/master
| 2020-03-10T15:55:35.031602
| 2019-06-24T17:03:30
| 2019-06-24T17:03:30
| 129,461,256
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,589
|
r
|
defining_strategies.R
|
## Defining the hypothesis space
# This creates a matrix for each strategy (assigning probabilities to how a likely a choice would be if the person adopted a particular strategy).
# These individual matrices are compiled into a single matrix that has all possible selections given experimenter choice.
# This will be used to compare the participants' choices to.
extreme_matrix = as.data.frame(matrix(c(c( 1, 1, 2, 2, 3, 3,4,5),
c( 5, 4, 5, 4, 5, 4,5,4),
c(.8,.2,.8,.2,.8,.2,1,1),
c( 2, 2, 2, 2, 2, 2,3,3)),
ncol = 4))
extreme_matrix$prob_strategy <- "extreme"
diverse_matrix = as.data.frame(matrix(c(c(1,1,2,2,3,3,4,4,5,5),
c(5,4,5,4,1,5,1,2,1,2),
c(.8,.2,.8,.2,.5,.5,.8,.2,.8,.2),
c(2,2,2,2,2,2,2,2,2,2)),
ncol = 4))
diverse_matrix$prob_strategy <- "diverse"
average_matrix = as.data.frame(matrix(c(c( 1, 1, 2, 2, 3, 3, 4, 4, 5, 5),
c( 5, 4, 4, 5, 2, 4, 2, 1, 1, 2),
c(.8,.2,.8,.2,.5,.5,.8,.2,.8,.2),
c( 2, 2, 2, 2, 2, 2, 2, 2, 2, 2)),
ncol = 4))
average_matrix$prob_strategy <- "average"
middle_matrix = as.data.frame(matrix(c(c(1,2,3,3,4,5),
c(3,3,4,2,3,3),
c(1,1,.5,.5,1,1),
c(3,3,2,2,3,3)),
ncol = 4))
middle_matrix$prob_strategy <- "middle"
pick1_matrix = as.data.frame(matrix(c( c( 1, 2,3,4,5),
c( 2, 1,1,1,1),
c( 1, 1,1,1,1),
c( 3 ,3,3,3,3)),
ncol = 4))
pick1_matrix$prob_strategy <- "pick1"
pick2_matrix = as.data.frame(matrix(c( c(1, 2, 2,3,4,5),
c(2, 1, 3,2,2,2),
c(1,.5,.5,1,1,1),
c(3, 2, 2,3,3,3)),
ncol = 4))
pick2_matrix$prob_strategy <- "pick2"
pick4_matrix = as.data.frame(matrix(c( c(1,2,3, 4, 4,5),
c(4,4,4, 3, 5,4),
c(1,1,1,.5,.5,1),
c(3,3,3, 2, 2,3)),
ncol = 4))
pick4_matrix$prob_strategy <- "pick4"
adjacent_matrix = as.data.frame(matrix(c(c(1, 2, 2, 3, 3, 4, 4,5),
c(2, 1, 3, 2, 4, 3, 5,4),
c(1,.5,.5,.5,.5,.5,.5,1),
c(3, 2, 2, 2, 2, 2, 2,3)),
ncol = 4))
adjacent_matrix$prob_strategy <- "adjacent"
# Listing all data frames and changing their names
dfs <- c("diverse_matrix", "extreme_matrix", "average_matrix", "middle_matrix", "pick1_matrix", "pick2_matrix", "pick4_matrix", "adjacent_matrix")
strategies <- c("diverse", "extreme", "average", "middle", "pick1", "pick2", "pick4", "adjacent")
for(df in dfs) {
df_tmp <- get(df)
colnames(df_tmp) <- c("exp_choice","agent_choice", "probability","number_of_unexpected_choices","prob_strategy")
assign(df, df_tmp)
}
df_list <- list(diverse_matrix, extreme_matrix, average_matrix, middle_matrix, pick1_matrix, pick2_matrix, pick4_matrix, adjacent_matrix)
## Setting epsilon and adjusting probability values in accordance with epsilon
# Episilon is the value that defines how often someone behaves randomly despite having an adopted strategy.
# Lower values of episilon means that the participant is assumed to perform less randomly, and higher values means
# that participant is assumed to make random decisions more frequently.
# Probabilities in the hypothesis space are adjusted for episilon.
# Parameter to determine how often agent behaves randomly
# Lower values mean agent behaves less randomly
epsilon <- .15
# updating hypothesis space based on epsilon value
# will be needed for computing likelihoods later
expected_matrix <- bind_rows(df_list)
expected_matrix$prob_strategy <- as.factor(expected_matrix$prob_strategy)
expected_matrix$probability <- ifelse(expected_matrix$probability == .25, expected_matrix$probability,
expected_matrix$probability - (epsilon/(4-expected_matrix$number_of_unexpected_choices)))
expected_matrix$probability_of_unexpected <- ifelse(expected_matrix$number_of_unexpected_choices == 0, 0,
epsilon/expected_matrix$number_of_unexpected_choices)
|
5b4d97a5e79e2265385514bf70168e6339192a9a
|
15107b515d45e60c7ea59cfcb63b758984c52272
|
/R/private_createEstimateArray.R
|
831e166b5d8653e4bfc092c38adb2c33aa2bb598
|
[] |
no_license
|
gforge/forestplot
|
e00e700b727758c30a530d077168d26b86c63f4b
|
b26b33561d2664933fc7a9b8258e26dffa8fe2e5
|
refs/heads/master
| 2023-09-01T12:16:44.558022
| 2023-08-27T19:52:18
| 2023-08-27T19:52:18
| 28,350,997
| 38
| 16
| null | 2022-11-23T20:55:20
| 2014-12-22T17:53:03
|
R
|
UTF-8
|
R
| false
| false
| 2,100
|
r
|
private_createEstimateArray.R
|
createEstimateArray <- function(labeltext, lower, upper, mean) {
if (missing(lower) &&
missing(upper) &&
missing(mean)) {
if (missing(labeltext)) {
stop(
"You need to provide the labeltext or",
" the mean/lower/upper arguments"
)
}
mean <- labeltext
labeltext <- rownames(mean)
}
if (missing(lower) &&
missing(upper)) {
assert(
check_matrix(mean, ncols = 3),
check_array(mean, d = 3),
check_integer(dim(mean)[2], lower = 3, upper = 3)
)
}
if (missing(labeltext)) {
labeltext <- rownames(mean)
}
if (is.null(labeltext)) {
stop(
"You must provide labeltext either in the direct form as an argument",
" or as rownames for the mean argument."
)
}
# Assume that lower and upper are contained within
# the mean variable
if (missing(lower) &&
missing(upper)) {
if (NCOL(mean) != 3) {
stop("If you do not provide lower/upper arguments your mean needs to have 3 columns")
}
# If the mean can in this case be eithe 2D-matrix
# that generates a regular forest plot or
# it can be a 3D-array where the 3:rd level
# constitutes the different bands
all <- prFpConvertMultidimArray(mean)
mean <- all$mean
lower <- all$lower
upper <- all$upper
}
if (NCOL(mean) != NCOL(lower) ||
NCOL(lower) != NCOL(upper) ||
NCOL(mean) == 0) {
stop(
"Mean, lower and upper contain invalid number of columns",
" Mean columns:", ncol(mean),
" Lower bound columns:", ncol(lower),
" Upper bound columns:", ncol(upper)
)
}
if (NCOL(mean) == 1) {
estimates <- array(NA, dim = c(NROW(mean), 3, 1))
estimates[,,1] <- cbind(mean, lower, upper) |> as.matrix()
} else {
estimates <- array(dim = c(NROW(mean), 3, NCOL(mean)))
for (i in 1:NCOL(mean)) {
estimates[,,i] <- cbind(mean[,i], lower[,i], upper[,i])
}
}
d <- dimnames(estimates)
d[[2]] <- c("mean", "lower", "upper")
dimnames(estimates) <- d
list(labeltext = labeltext,
estimates = estimates)
}
|
e04aa44edcea222ae547d207d6bd955c1bd5e50e
|
3b41c8c6913935a01a19cdf17136078582bf17bc
|
/R/temp_till_9am.R
|
d3daeb3dcec1026b75e861f9e6afc26676de6531
|
[] |
no_license
|
pkezich/SolarPVUWA
|
4a62db6a74dc2c7a2c9af2d135a934db988f986e
|
cadd4005ac16876ef1418408adba3abb62d2a652
|
refs/heads/master
| 2021-01-17T05:54:23.910629
| 2015-06-03T12:42:17
| 2015-06-03T12:42:17
| 36,012,715
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 782
|
r
|
temp_till_9am.R
|
# The function temp_till_9am is used to temperatures for time up to and including 9am. With the function
# temp_after_9am used for the later times. The main function extracts the mean max and min temperatures from
# the closest BOM weather station and uses the first to functions to extract the hourly temperatures.
#'
#'
#' @name temp_till_9am
#' @title temp_till_9am
#' @author Philip Kezich
#' @param hour Desired hour of the day (up to and including 9am)
#' @param max Maximum daily temperature
#' @param min Minimum daily temperature
#' @return The temperature value corresponding to our selected hour
#' @export
#'
temp_till_9am <- function(hour,max,min){
# Simple cosine temperature model
Thr <- -(max-min)/2*cos(hour*pi/9) + (max +min)/2
return(Thr)
}
|
bf60e814b8b03955f2c4b12ea4b7bb953a2120ce
|
091ef5c6d171eac17282a656f98c02aa0eec3727
|
/R/bdiagMat.R
|
26a7690917e685463c993c750c47f2710e8b703b
|
[] |
no_license
|
mikewlcheung/metasem
|
079a7168a525950768024d7cf45e6496deb5c5b6
|
c5daae1ece3f1c9d9bd2cac1f752a89b7ed9a043
|
refs/heads/master
| 2023-01-21T23:36:42.082583
| 2023-01-08T01:27:06
| 2023-01-08T01:27:06
| 33,676,124
| 31
| 11
| null | 2021-07-11T11:37:21
| 2015-04-09T15:14:44
|
R
|
UTF-8
|
R
| false
| false
| 721
|
r
|
bdiagMat.R
|
bdiagMat <- function(x){
if(!is.list(x)) stop("\"x\" must be a list.")
n <- length(x)
if(n==0) return(NULL)
x <- lapply(x, function(y) if(length(y)) as.matrix(y) else stop("Zero-length component in x"))
d <- array(unlist(lapply(x, dim)), c(2, n))
rr <- d[1,]
cc <- d[2,]
rsum <- sum(rr)
csum <- sum(cc)
out <- array(0, c(rsum, csum))
ind <- array(0, c(4, n))
rcum <- cumsum(rr)
ccum <- cumsum(cc)
ind[1,-1] <- rcum[-n]
ind[2,] <- rcum
ind[3,-1] <- ccum[-n]
ind[4,] <- ccum
imat <- array(1:(rsum * csum), c(rsum, csum))
iuse <- apply(ind, 2, function(y, imat) imat[(y[1]+1):y[2],
(y[3]+1):y[4]], imat=imat)
iuse <- as.vector(unlist(iuse))
out[iuse] <- unlist(x)
return(out)
}
|
05bf4ddf686cd6cc999e30104cbf9af30ddb145c
|
f2a1cebe0c88195da10e3ad8f3184e6738bab6a8
|
/Simulation/getcumulativecounts.R
|
caf4776441183568a417d9e888a8d13db42e5769
|
[] |
no_license
|
als23/identifying_and_responding_to_outlier_demand_in_revenue_management
|
229ac85e0e723d0c2b352d0c186eeda0b16fde4a
|
bba9b8197a432bee65ea8fcf5fbc9d78d9b36726
|
refs/heads/main
| 2023-08-15T08:14:40.903841
| 2021-09-24T22:45:46
| 2021-09-24T22:45:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 88
|
r
|
getcumulativecounts.R
|
getcumulativecounts <- function(data){
df <- t(apply(data, 1, cumsum))
return(df)
}
|
e704b99bea415bf0d784ba705319927ffbe4626f
|
cfaec47464084ffa17bf1732ff7c87e5372084a4
|
/Stats/Scripts/sexplot/sexage.R
|
c8f2403d6fff1b6eb28a6c120ac5a547df90aec8
|
[] |
no_license
|
pushpendra42/AcademicProjects
|
f972150b898338a4aa4a370a8147938ccc233974
|
723789fdd9050fd9c9468204ca0cc4d7c62c5d36
|
refs/heads/main
| 2023-01-30T12:25:27.093261
| 2020-12-17T08:21:29
| 2020-12-17T08:21:29
| 321,918,449
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 787
|
r
|
sexage.R
|
######## SEXPLOT ###########
library(ggplot2)
sexdata <- data.frame(
Sex = c("Male", "Female"),
value = c(10,5)
)
plot1 <- ggplot(sexdata, aes(x="", y=value, fill=Sex))+
geom_bar(width = 1, stat = "identity") +
coord_polar("y", start=0) +
scale_fill_manual(values=c("#42b6f4", "#f7a0c9")) +
labs(title = "Sex Distribution of Study Participents",x = "value", y = "Gender") +
theme(plot.title = element_text(hjust=0.5, size = 15), legend.title = element_text(size = 15))
plot1
age = data.frame(Age= c("21-22", "22-23","23-24", "24-25", "25-26"), agevalue = c(4,5,1,2,1))
ggplot(age, aes(x = Age, y = agevalue ,fill = Age )) +
geom_bar(width = 0.85, stat="identity") + coord_polar(theta = "x")+
geom_text( vjust = 0, aes(label = agevalue), size = 5)
|
4a547c7cd986b0e4b97ab3ab6856024e7bd58a3e
|
92a2fef26ccc8b48fc40a9d38d1a36c2233aaa65
|
/R/Ch9/9.2RegularizationMethods.R
|
597971b6d01fa7a95293e6556d264898994e6acb
|
[] |
no_license
|
happyrabbit/DataScientistR
|
a48eb2b9b08d315de2545d650a7b2cbfaf4e3003
|
2e32e045af051ec6b1a38e2e7c8895552440088a
|
refs/heads/master
| 2020-12-10T04:55:22.800805
| 2020-06-23T21:46:59
| 2020-06-23T21:46:59
| 89,527,935
| 18
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,077
|
r
|
9.2RegularizationMethods.R
|
#############
### 岭回归
#############
dat<-read.csv("https://raw.githubusercontent.com/happyrabbit/DataScientistR/master/Data/SegData.csv")
# 对数据进行一些清理,删除错误的样本观测,消费金额不能为负数
dat<-subset(dat,store_exp>0 & online_exp>0)
# 将10个问卷调查变量当作自变量
trainx<-dat[,grep("Q",names(dat))]
# 将实体店消费量和在线消费之和当作应变量
# 得到总消费量=实体店消费+在线消费
trainy<-dat$store_exp+dat$online_exp
ctrl <- trainControl(method = "cv", number = 10)
ridgeGrid <- data.frame(.lambda = seq(0, .1, length = 20))
set.seed(100)
ridgeRegTune <- train(trainx, trainy,
method = "ridge",
## 用不同的罚函数值来拟合模型
tuneGrid = ridgeGrid,
trControl = ctrl,
## 中心化和标度化变量
preProc = c("center", "scale"))
ridgefit = enet(x = as.matrix(trainx), y = trainy, lambda = 0.01,
# 这里设置将自变量标准化
normalize = TRUE)
ridgePred <- predict(ridgefit, newx = as.matrix(trainx),
s = 1, mode = "fraction", type = "fit")
names(ridgePred)
head(ridgePred$fit)
ridgeCoef<-predict(ridgefit,newx = as.matrix(trainx),
s=1, mode="fraction", type="coefficients")
# 这里不显示结果
RidgeCoef=ridgeCoef$coefficients
################
### Lasso
################
ctrl <- trainControl(method = "cv", number = 10)
lassoGrid <- data.frame(fraction = seq(.8, 1, length = 20))
set.seed(100)
lassoTune <- train(trainx, trainy,
method = "lars",
## 用不同的罚函数值来拟合模型
tuneGrid = lassoGrid,
trControl = ctrl,
## 中心化和标度化变量
preProc = c("center", "scale"))
lassoTune
plot(lassoTune)
lassoModel<- enet(x = as.matrix(trainx), y = trainy, lambda = 0, normalize = TRUE)
lassoFit <- predict(lassoModel, newx = as.matrix(trainx),s = .95, mode = "fraction",type = "fit")
head(lassoFit$fit)
lassoCoef<-predict(lassoModel,newx = as.matrix(trainx),s=0.95, mode="fraction", type="coefficients")
LassoCoef=lassoCoef$coefficients
################
### 弹性网络
################
enetGrid <- expand.grid(.lambda = seq(0,0.2,length=20), .fraction = seq(.8, 1, length = 20))
set.seed(100)
enetTune <- train(trainx, trainy,
method = "enet",
tuneGrid = enetGrid,
trControl = ctrl,
preProc = c("center", "scale"))
enetfit = enet(x = as.matrix(trainx), y = trainy, lambda = 0.01,
# 这里设置将自变量标准化
normalize = TRUE)
enetPred <- predict(enetfit, newx = as.matrix(trainx),
s = 0.958, mode = "fraction", type = "fit")
enetCoef<-predict(ridgefit,newx = as.matrix(trainx),
s=0.958, mode="fraction", type="coefficients")
|
a431cc6f135692ea87727dbedf08bd74561af3b8
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/cairoScaledFontSetUserData.Rd
|
730188ff907256f3b84f3969598ea4d70d357ea5
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 911
|
rd
|
cairoScaledFontSetUserData.Rd
|
\alias{cairoScaledFontSetUserData}
\name{cairoScaledFontSetUserData}
\title{cairoScaledFontSetUserData}
\description{Attach user data to \code{scaled.font}. To remove user data from a surface,
call this function with the key that was used to set it and \code{NULL}
for \code{data}.}
\usage{cairoScaledFontSetUserData(scaled.font, key, user.data)}
\arguments{
\item{\verb{scaled.font}}{[\code{\link{CairoScaledFont}}] a \code{\link{CairoScaledFont}}}
\item{\verb{key}}{[\code{\link{CairoUserDataKey}}] the a \code{\link{CairoUserDataKey}} to attach the user data to}
\item{\verb{user.data}}{[R object] the user data to attach to the \code{\link{CairoScaledFont}}}
}
\details{ Since 1.4}
\value{[\code{\link{CairoStatus}}] \code{CAIRO_STATUS_SUCCESS} or \code{CAIRO_STATUS_NO_MEMORY} if a
slot could not be allocated for the user data.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
384591656fb5c64dce640e0414cf6c5f52a96b87
|
1144fe71d1be1db8481d0f32cd1e7b637bc4cded
|
/src/eda.R
|
cb8ee00b786d18f9974fd8bfb26dbcb482b713b7
|
[] |
no_license
|
2ndFloorStuff/CompetitionIndexer
|
b999a68dbb7a7c696a323ff07cf6faf32bbec71f
|
ef58777b32fbd6fd51f832dfc8138fce671da343
|
refs/heads/master
| 2020-05-17T11:48:03.448173
| 2014-04-15T02:11:10
| 2014-04-15T02:11:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 31,785
|
r
|
eda.R
|
R version 3.0.2 (2013-09-25) -- "Frisbee Sailing"
Copyright (C) 2013 The R Foundation for Statistical Computing
Platform: x86_64-pc-linux-gnu (64-bit)
R is free software and comes with ABSOLUTELY NO WARRANTY.
You are welcome to redistribute it under certain conditions.
Type 'license()' or 'licence()' for distribution details.
Natural language support but running in an English locale
R is a collaborative project with many contributors.
Type 'contributors()' for more information and
'citation()' on how to cite R or R packages in publications.
Type 'demo()' for some demos, 'help()' for on-line help, or
'help.start()' for an HTML browser interface to help.
Type 'q()' to quit R.
[Previously saved workspace restored]
Loading required package: JGR
Loading required package: rJava
Loading required package: JavaGD
Loading required package: iplots
starting httpd help server ... done
Loading required package: plyr
Loading required package: markdown
Loading required package: lubridate
Loading required package: knitr
Loading required package: DeducerPlugInExample
Loading required package: Deducer
Loading required package: ggplot2
Loading required package: scales
Loading required package: car
Loading required package: MASS
Loading required package: nnet
Loading required package: multcomp
Loading required package: mvtnorm
Loading required package: survival
Loading required package: splines
Loading required package: effects
Loading required package: lattice
Loading required package: grid
Loading required package: colorspace
Attaching package: ‘effects’
The following object is masked from ‘package:car’:
Prestige
The following object is masked from ‘package:datasets’:
Titanic
Loading required package: foreign
Loading required package: e1071
Loading required package: class
Attaching package: ‘Deducer’
The following object is masked from ‘package:stats’:
summary.lm
Loading required package: DeducerExtras
Loading required package: irr
Loading required package: lpSolve
> U07m2013Cleaned[,7]<-as.factor(U07m2013Cleaned[,7])
> write.csv(U07m2013Cleaned,'/home/bradfordbender/R/CompetitionIndexer/data/U07m2013Cleaned.csv')
> installPackages()
> install.packages(c("DeducerExtras","DeducerPlugInExample","DeducerPlugInScaling","DeducerSpatial","DeducerSurvival","DeducerText"))
Installing packages into ‘/home/bradfordbender/R/x86_64-pc-linux-gnu-library/3.0’
(as ‘lib’ is unspecified)
also installing the dependencies ‘RWekajars’, ‘slam’, ‘RWeka’, ‘OpenStreetMap’, ‘rgdal’, ‘tm’, ‘wordcloud’, ‘Snowball’
trying URL 'http://cran.r-project.org/src/contrib/RWekajars_3.7.10-1.tar.gz'
Content type 'application/x-gzip' length 5751089 bytes (5.5 Mb)
opened URL
==================================================
downloaded 5.5 Mb
trying URL 'http://cran.r-project.org/src/contrib/slam_0.1-31.tar.gz'
Content type 'application/x-gzip' length 46519 bytes (45 Kb)
opened URL
==================================================
downloaded 45 Kb
trying URL 'http://cran.r-project.org/src/contrib/RWeka_0.4-21.tar.gz'
Content type 'application/x-gzip' length 410287 bytes (400 Kb)
opened URL
==================================================
downloaded 400 Kb
trying URL 'http://cran.r-project.org/src/contrib/OpenStreetMap_0.3.1.tar.gz'
Content type 'application/x-gzip' length 2272103 bytes (2.2 Mb)
opened URL
==================================================
downloaded 2.2 Mb
trying URL 'http://cran.r-project.org/src/contrib/rgdal_0.8-14.tar.gz'
Content type 'application/x-gzip' length 1609221 bytes (1.5 Mb)
opened URL
==================================================
downloaded 1.5 Mb
trying URL 'http://cran.r-project.org/src/contrib/tm_0.5-10.tar.gz'
Content type 'application/x-gzip' length 503650 bytes (491 Kb)
opened URL
==================================================
downloaded 491 Kb
trying URL 'http://cran.r-project.org/src/contrib/wordcloud_2.4.tar.gz'
Content type 'application/x-gzip' length 40496 bytes (39 Kb)
opened URL
==================================================
downloaded 39 Kb
trying URL 'http://cran.r-project.org/src/contrib/Snowball_0.0-11.tar.gz'
Content type 'application/x-gzip' length 283386 bytes (276 Kb)
opened URL
==================================================
downloaded 276 Kb
trying URL 'http://cran.r-project.org/src/contrib/DeducerExtras_1.7.tar.gz'
Content type 'application/x-gzip' length 8669 bytes
opened URL
==================================================
downloaded 8669 bytes
trying URL 'http://cran.r-project.org/src/contrib/DeducerPlugInExample_0.2-0.tar.gz'
Content type 'application/x-gzip' length 1261779 bytes (1.2 Mb)
opened URL
==================================================
downloaded 1.2 Mb
trying URL 'http://cran.r-project.org/src/contrib/DeducerPlugInScaling_0.1-0.tar.gz'
Content type 'application/x-gzip' length 7511 bytes
opened URL
==================================================
downloaded 7511 bytes
trying URL 'http://cran.r-project.org/src/contrib/DeducerSpatial_0.7.tar.gz'
Content type 'application/x-gzip' length 480129 bytes (468 Kb)
opened URL
==================================================
downloaded 468 Kb
trying URL 'http://cran.r-project.org/src/contrib/DeducerSurvival_0.1-0.tar.gz'
Content type 'application/x-gzip' length 3099 bytes
opened URL
==================================================
downloaded 3099 bytes
trying URL 'http://cran.r-project.org/src/contrib/DeducerText_0.1-0.tar.gz'
Content type 'application/x-gzip' length 90996 bytes (88 Kb)
opened URL
==================================================
downloaded 88 Kb
The downloaded source packages are in
‘/tmp/RtmpIeNg4h/downloaded_packages’
Warning messages:
1: In install.packages(c("DeducerExtras", "DeducerPlugInExample", "DeducerPlugInScaling", :
installation of package ‘rgdal’ had non-zero exit status
2: In install.packages(c("DeducerExtras", "DeducerPlugInExample", "DeducerPlugInScaling", :
installation of package ‘OpenStreetMap’ had non-zero exit status
3: In install.packages(c("DeducerExtras", "DeducerPlugInExample", "DeducerPlugInScaling", :
installation of package ‘DeducerSpatial’ had non-zero exit status
> JGR::package.manager()
> model.lm <- lm(formula=dbh ~ block + name + block:name,data=U07m2013Cleaned,na.action=na.omit)
Warning messages:
1: not plotting observations with leverage one:
15, 35, 155, 156, 158, 159, 164, 186, 191, 194, 205
2: not plotting observations with leverage one:
15, 35, 155, 156, 158, 159, 164, 186, 191, 194, 205
3: not plotting observations with leverage one:
15, 35, 155, 156, 158, 159, 164, 186, 191, 194, 205
4: In sqrt(crit * p * (1 - hh)/hh) : NaNs produced
5: In sqrt(crit * p * (1 - hh)/hh) : NaNs produced
6: In analyze.model(term, mod, xlevels, default.levels) :
name is not a high-order term in the model
> Anova(model.lm,type='II')
Anova Table (Type II tests)
Response: dbh
Sum Sq Df F value Pr(>F)
block 25.62 11 0.8518 0.5888
name 274.01 12 8.3520 4.968e-12 ***
block:name 85.48 29 1.0781 0.3709
Residuals 421.04 154
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> summary(model.lm)
Call:
lm(formula = dbh ~ block + name + block:name, data = U07m2013Cleaned,
na.action = na.omit)
Residuals:
Min 1Q Median 3Q Max
-3.7750 -0.8354 0.0000 1.0708 4.0500
Coefficients: (103 not defined because of singularities)
Estimate Std. Error t value Pr(>|t|)
(Intercept) 5.600e+00 1.169e+00 4.790 3.89e-06 ***
block2 -3.775e+00 2.742e+00 -1.377 0.17059
block3 1.225e+00 2.742e+00 0.447 0.65568
block4 1.250e-01 2.742e+00 0.046 0.96370
block5 -6.750e-01 1.432e+00 -0.471 0.63804
block6 -7.500e-02 2.742e+00 -0.027 0.97821
block7 -7.667e-01 1.509e+00 -0.508 0.61224
block8 -4.750e-01 2.742e+00 -0.173 0.86270
block9 -1.375e+00 2.742e+00 -0.501 0.61677
block10 -1.575e+00 1.432e+00 -1.100 0.27310
block11 -1.070e+00 2.218e+00 -0.482 0.63026
block12 -7.988e-01 2.217e+00 -0.360 0.71910
name17XGA24 -1.266e-14 2.025e+00 0.000 1.00000
name17XGA5 -1.600e+00 2.025e+00 -0.790 0.43070
name18XAG04 -1.100e+00 2.614e+00 -0.421 0.67453
name1XTE04 -3.025e+00 2.658e+00 -1.138 0.25679
name2XT4E04 -2.250e-01 1.432e+00 -0.157 0.87535
name80XAA04 1.808e+00 1.263e+00 1.432 0.15419
name81XAA04 1.275e+00 1.432e+00 0.890 0.37465
name82XAA04 9.000e-01 1.509e+00 0.596 0.55188
name83XAA04 3.600e+00 1.432e+00 2.514 0.01296 *
name84XAA04 3.875e+00 1.263e+00 3.068 0.00254 **
name85XAA04 -2.025e+00 1.849e+00 -1.095 0.27506
nameNM6 1.975e+00 1.849e+00 1.068 0.28704
block2:name17XGA24 NA NA NA NA
block3:name17XGA24 NA NA NA NA
block4:name17XGA24 NA NA NA NA
block5:name17XGA24 NA NA NA NA
block6:name17XGA24 NA NA NA NA
block7:name17XGA24 NA NA NA NA
block8:name17XGA24 NA NA NA NA
block9:name17XGA24 NA NA NA NA
block10:name17XGA24 NA NA NA NA
block11:name17XGA24 NA NA NA NA
block12:name17XGA24 NA NA NA NA
block2:name17XGA5 NA NA NA NA
block3:name17XGA5 NA NA NA NA
block4:name17XGA5 NA NA NA NA
block5:name17XGA5 NA NA NA NA
block6:name17XGA5 NA NA NA NA
block7:name17XGA5 NA NA NA NA
block8:name17XGA5 NA NA NA NA
block9:name17XGA5 NA NA NA NA
block10:name17XGA5 NA NA NA NA
block11:name17XGA5 NA NA NA NA
block12:name17XGA5 NA NA NA NA
block2:name18XAG04 5.850e+00 3.697e+00 1.582 0.11565
block3:name18XAG04 NA NA NA NA
block4:name18XAG04 9.000e-01 3.697e+00 0.243 0.80800
block5:name18XAG04 NA NA NA NA
block6:name18XAG04 -2.500e-01 3.697e+00 -0.068 0.94618
block7:name18XAG04 NA NA NA NA
block8:name18XAG04 NA NA NA NA
block9:name18XAG04 NA NA NA NA
block10:name18XAG04 NA NA NA NA
block11:name18XAG04 NA NA NA NA
block12:name18XAG04 NA NA NA NA
block2:name1XTE04 NA NA NA NA
block3:name1XTE04 1.225e+00 3.728e+00 0.329 0.74291
block4:name1XTE04 NA NA NA NA
block5:name1XTE04 NA NA NA NA
block6:name1XTE04 4.000e-01 3.994e+00 0.100 0.92035
block7:name1XTE04 NA NA NA NA
block8:name1XTE04 NA NA NA NA
block9:name1XTE04 NA NA NA NA
block10:name1XTE04 NA NA NA NA
block11:name1XTE04 NA NA NA NA
block12:name1XTE04 NA NA NA NA
block2:name2XT4E04 NA NA NA NA
block3:name2XT4E04 NA NA NA NA
block4:name2XT4E04 NA NA NA NA
block5:name2XT4E04 NA NA NA NA
block6:name2XT4E04 NA NA NA NA
block7:name2XT4E04 -1.158e+00 2.081e+00 -0.557 0.57852
block8:name2XT4E04 NA NA NA NA
block9:name2XT4E04 NA NA NA NA
block10:name2XT4E04 NA NA NA NA
block11:name2XT4E04 NA NA NA NA
block12:name2XT4E04 NA NA NA NA
block2:name80XAA04 3.142e+00 2.844e+00 1.105 0.27102
block3:name80XAA04 -5.667e-01 2.942e+00 -0.193 0.84754
block4:name80XAA04 NA NA NA NA
block5:name80XAA04 NA NA NA NA
block6:name80XAA04 NA NA NA NA
block7:name80XAA04 NA NA NA NA
block8:name80XAA04 NA NA NA NA
block9:name80XAA04 1.292e+00 2.593e+00 0.498 0.61903
block10:name80XAA04 NA NA NA NA
block11:name80XAA04 NA NA NA NA
block12:name80XAA04 NA NA NA NA
block2:name81XAA04 1.100e+00 3.093e+00 0.356 0.72263
block3:name81XAA04 NA NA NA NA
block4:name81XAA04 NA NA NA NA
block5:name81XAA04 -5.000e-02 1.849e+00 -0.027 0.97846
block6:name81XAA04 1.000e+00 3.019e+00 0.331 0.74090
block7:name81XAA04 -2.750e-01 1.968e+00 -0.140 0.88905
block8:name81XAA04 -1.167e+00 3.019e+00 -0.386 0.69969
block9:name81XAA04 NA NA NA NA
block10:name81XAA04 NA NA NA NA
block11:name81XAA04 NA NA NA NA
block12:name81XAA04 NA NA NA NA
block2:name82XAA04 NA NA NA NA
block3:name82XAA04 -1.025e+00 3.019e+00 -0.340 0.73467
block4:name82XAA04 -7.000e-01 3.019e+00 -0.232 0.81694
block5:name82XAA04 NA NA NA NA
block6:name82XAA04 -7.500e-02 2.962e+00 -0.025 0.97983
block7:name82XAA04 NA NA NA NA
block8:name82XAA04 4.750e-01 3.019e+00 0.157 0.87518
block9:name82XAA04 NA NA NA NA
block10:name82XAA04 NA NA NA NA
block11:name82XAA04 NA NA NA NA
block12:name82XAA04 NA NA NA NA
block2:name83XAA04 NA NA NA NA
block3:name83XAA04 -3.000e+00 2.923e+00 -1.026 0.30634
block4:name83XAA04 NA NA NA NA
block5:name83XAA04 NA NA NA NA
block6:name83XAA04 NA NA NA NA
block7:name83XAA04 -9.583e-02 1.818e+00 -0.053 0.95802
block8:name83XAA04 NA NA NA NA
block9:name83XAA04 1.500e-01 2.981e+00 0.050 0.95993
block10:name83XAA04 NA NA NA NA
block11:name83XAA04 NA NA NA NA
block12:name83XAA04 NA NA NA NA
block2:name84XAA04 9.000e-01 2.903e+00 0.310 0.75700
block3:name84XAA04 NA NA NA NA
block4:name84XAA04 -2.967e+00 2.942e+00 -1.008 0.31492
block5:name84XAA04 -2.025e+00 1.721e+00 -1.177 0.24116
block6:name84XAA04 NA NA NA NA
block7:name84XAA04 NA NA NA NA
block8:name84XAA04 -3.267e+00 2.700e+00 -1.210 0.22820
block9:name84XAA04 -3.200e+00 2.658e+00 -1.204 0.23040
block10:name84XAA04 NA NA NA NA
block11:name84XAA04 NA NA NA NA
block12:name84XAA04 NA NA NA NA
block2:name85XAA04 NA NA NA NA
block3:name85XAA04 NA NA NA NA
block4:name85XAA04 -7.333e-01 3.237e+00 -0.227 0.82110
block5:name85XAA04 -1.000e-01 2.338e+00 -0.043 0.96594
block6:name85XAA04 NA NA NA NA
block7:name85XAA04 4.917e-01 2.658e+00 0.185 0.85347
block8:name85XAA04 NA NA NA NA
block9:name85XAA04 NA NA NA NA
block10:name85XAA04 NA NA NA NA
block11:name85XAA04 NA NA NA NA
block12:name85XAA04 NA NA NA NA
block2:nameNM6 NA NA NA NA
block3:nameNM6 NA NA NA NA
block4:nameNM6 NA NA NA NA
block5:nameNM6 NA NA NA NA
block6:nameNM6 NA NA NA NA
block7:nameNM6 NA NA NA NA
block8:nameNM6 NA NA NA NA
block9:nameNM6 NA NA NA NA
block10:nameNM6 NA NA NA NA
block11:nameNM6 NA NA NA NA
block12:nameNM6 NA NA NA NA
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Residual standard error: 1.653 on 154 degrees of freedom
(45 observations deleted due to missingness)
Multiple R-squared: 0.5232, Adjusted R-squared: 0.3622
F-statistic: 3.25 on 52 and 154 DF, p-value: 9.465e-09
> summary(effect(term="name",mod=model.lm))
name effect
name
17XGA04 17XGA24 17XGA5 18XAG04 1XTE04 2XT4E04 80XAA04 81XAA04 82XAA04 83XAA04 84XAA04 85XAA04 NM6
4.825322 4.825322 3.225322 4.306965 1.951530 4.505193 6.973349 6.155757 5.610709 8.139915 7.874356 2.780314 6.800322
Lower 95 Percent Confidence Limits
name
17XGA04 17XGA24 17XGA5 18XAG04 1XTE04 2XT4E04 80XAA04 81XAA04 82XAA04 83XAA04 84XAA04 85XAA04 NM6
2.56514144 0.31534954 -1.28465046 0.81364636 -2.03575558 1.23001054 4.73661004 4.00917655 3.36355423 5.46833490 5.81607613 -0.07822658 4.89598265
Upper 95 Percent Confidence Limits
name
17XGA04 17XGA24 17XGA5 18XAG04 1XTE04 2XT4E04 80XAA04 81XAA04 82XAA04 83XAA04 84XAA04 85XAA04 NM6
7.085503 9.335295 7.735295 7.800283 5.938815 7.780376 9.210089 8.302337 7.857863 10.811496 9.932636 5.638855 8.704661
Warning message:
In analyze.model(term, mod, xlevels, default.levels) :
name is not a high-order term in the model
> rm('model.lm')
replacement has 1 row, data has 0
dev.new()
> ggplot() +
+ geom_violin(aes(x = dbh,y = name),data=U07m2013Cleaned)
replacement has 1 row, data has 0
replacement has 1 row, data has 0
replacement has 1 row, data has 0
In addition: There were 50 or more warnings (use warnings() to see the first 50)
descriptive.table(vars = d(dbh) ,
+ strata = d(name),data= U07m2013Cleaned,
+ func.names =c("Mean","St. Deviation","Valid N","Median","25th Percentile","75th Percentile","Minimum","Maximum","Skew",
+ "Kurtosis"))
Need at least 3 complete observations.
> descriptive.table(vars = d(dbh) ,
+ strata = d(name),data= U07m2013Cleaned,
+ func.names =c("Mean","St. Deviation","Valid N","Median","25th Percentile","75th Percentile","Minimum","Maximum"))
$`name: 17XGA04 `
Mean.dbh St. Deviation.dbh Valid N.dbh Median.dbh 25th Percentile.dbh.25% 75th Percentile.dbh.75% Minimum.dbh
4.730769 1.489063 13.000000 4.700000 3.700000 5.100000 2.900000
Maximum.dbh
8.000000
$`name: 17XGA24 `
Mean.dbh St. Deviation.dbh Valid N.dbh Median.dbh 25th Percentile.dbh.25% 75th Percentile.dbh.75% Minimum.dbh
5.6 NA 1.0 5.6 5.6 5.6 5.6
Maximum.dbh
5.6
$`name: 17XGA5 `
Mean.dbh St. Deviation.dbh Valid N.dbh Median.dbh 25th Percentile.dbh.25% 75th Percentile.dbh.75% Minimum.dbh
4 NA 1 4 4 4 4
Maximum.dbh
4
$`name: 18XAG04 `
Mean.dbh St. Deviation.dbh Valid N.dbh Median.dbh 25th Percentile.dbh.25% 75th Percentile.dbh.75% Minimum.dbh
5.075000 2.086944 16.000000 5.650000 3.350000 6.375000 1.300000
Maximum.dbh
8.200000
$`name: 1XTE04 `
Mean.dbh St. Deviation.dbh Valid N.dbh Median.dbh 25th Percentile.dbh.25% 75th Percentile.dbh.75% Minimum.dbh
3.662500 1.707076 8.000000 3.100000 2.400000 5.200000 1.500000
Maximum.dbh
6.000000
$`name: 2XT4E04 `
Mean.dbh St. Deviation.dbh Valid N.dbh Median.dbh 25th Percentile.dbh.25% 75th Percentile.dbh.75% Minimum.dbh
3.625 0.450 4.000 3.750 3.450 3.925 3.000
Maximum.dbh
4.000
$`name: 80XAA04 `
Mean.dbh St. Deviation.dbh Valid N.dbh Median.dbh 25th Percentile.dbh.25% 75th Percentile.dbh.75% Minimum.dbh
7.022727 1.950807 22.000000 7.000000 5.950000 8.600000 3.000000
Maximum.dbh
10.100000
$`name: 81XAA04 `
Mean.dbh St. Deviation.dbh Valid N.dbh Median.dbh 25th Percentile.dbh.25% 75th Percentile.dbh.75% Minimum.dbh
6.163158 2.180013 19.000000 6.300000 4.650000 7.700000 2.700000
Maximum.dbh
10.200000
$`name: 82XAA04 `
Mean.dbh St. Deviation.dbh Valid N.dbh Median.dbh 25th Percentile.dbh.25% 75th Percentile.dbh.75% Minimum.dbh
6.382609 1.208174 23.000000 6.200000 5.750000 7.100000 3.500000
Maximum.dbh
8.700000
$`name: 83XAA04 `
Mean.dbh St. Deviation.dbh Valid N.dbh Median.dbh 25th Percentile.dbh.25% 75th Percentile.dbh.75% Minimum.dbh
8.116667 1.365942 24.000000 8.300000 7.475000 8.900000 5.000000
Maximum.dbh
10.600000
$`name: 84XAA04 `
Mean.dbh St. Deviation.dbh Valid N.dbh Median.dbh 25th Percentile.dbh.25% 75th Percentile.dbh.75% Minimum.dbh
6.376190 1.869466 21.000000 6.100000 4.800000 7.700000 3.400000
Maximum.dbh
9.700000
$`name: 85XAA04 `
Mean.dbh St. Deviation.dbh Valid N.dbh Median.dbh 25th Percentile.dbh.25% 75th Percentile.dbh.75% Minimum.dbh
2.7888889 0.9033887 9.0000000 2.7000000 2.0000000 3.3000000 1.8000000
Maximum.dbh
4.4000000
$`name: NM6 `
Mean.dbh St. Deviation.dbh Valid N.dbh Median.dbh 25th Percentile.dbh.25% 75th Percentile.dbh.75% Minimum.dbh
6.676087 1.507638 46.000000 6.650000 5.600000 7.775000 3.200000
Maximum.dbh
9.200000
replacement has 1 row, data has 0
replacement has 1 row, data has 0
dev.new()
There were 46 warnings (use warnings() to see them)
> ggplot() +
+ geom_boxplot(aes(y = dbh,x = name),data=U07m2013Cleaned,na.rm = TRUE) +
+ geom_jitter(aes(x = name,y = dbh),data=U07m2013Cleaned)
Warning message:
Removed 45 rows containing missing values (geom_point).
> Exception in thread "AWT-EventQueue-0" java.lang.NullPointerException
at org.rosuda.JGR.toolkit.JavaGD.getFileDlg(JavaGD.java:123)
at org.rosuda.JGR.toolkit.JavaGD.actionPerformed(JavaGD.java:151)
at javax.swing.AbstractButton.fireActionPerformed(AbstractButton.java:2018)
at javax.swing.AbstractButton$Handler.actionPerformed(AbstractButton.java:2341)
at javax.swing.DefaultButtonModel.fireActionPerformed(DefaultButtonModel.java:402)
at javax.swing.DefaultButtonModel.setPressed(DefaultButtonModel.java:259)
at javax.swing.AbstractButton.doClick(AbstractButton.java:376)
at javax.swing.plaf.basic.BasicMenuItemUI.doClick(BasicMenuItemUI.java:833)
at javax.swing.plaf.basic.BasicMenuItemUI$Handler.mouseReleased(BasicMenuItemUI.java:877)
at java.awt.Component.processMouseEvent(Component.java:6505)
at javax.swing.JComponent.processMouseEvent(JComponent.java:3311)
at java.awt.Component.processEvent(Component.java:6270)
at java.awt.Container.processEvent(Container.java:2229)
at java.awt.Component.dispatchEventImpl(Component.java:4861)
at java.awt.Container.dispatchEventImpl(Container.java:2287)
at java.awt.Component.dispatchEvent(Component.java:4687)
at java.awt.LightweightDispatcher.retargetMouseEvent(Container.java:4832)
at java.awt.LightweightDispatcher.processMouseEvent(Container.java:4492)
at java.awt.LightweightDispatcher.dispatchEvent(Container.java:4422)
at java.awt.Container.dispatchEventImpl(Container.java:2273)
at java.awt.Window.dispatchEventImpl(Window.java:2719)
at java.awt.Component.dispatchEvent(Component.java:4687)
at java.awt.EventQueue.dispatchEventImpl(EventQueue.java:735)
at java.awt.EventQueue.access$200(EventQueue.java:103)
at java.awt.EventQueue$3.run(EventQueue.java:694)
at java.awt.EventQueue$3.run(EventQueue.java:692)
at java.security.AccessController.doPrivileged(Native Method)
at java.security.ProtectionDomain$1.doIntersectionPrivilege(ProtectionDomain.java:76)
at java.security.ProtectionDomain$1.doIntersectionPrivilege(ProtectionDomain.java:87)
at java.awt.EventQueue$4.run(EventQueue.java:708)
at java.awt.EventQueue$4.run(EventQueue.java:706)
at java.security.AccessController.doPrivileged(Native Method)
at java.security.ProtectionDomain$1.doIntersectionPrivilege(ProtectionDomain.java:76)
at java.awt.EventQueue.dispatchEvent(EventQueue.java:705)
at java.awt.EventDispatchThread.pumpOneEventForFilters(EventDispatchThread.java:242)
at java.awt.EventDispatchThread.pumpEventsForFilter(EventDispatchThread.java:161)
at java.awt.EventDispatchThread.pumpEventsForHierarchy(EventDispatchThread.java:150)
at java.awt.EventDispatchThread.pumpEvents(EventDispatchThread.java:146)
at java.awt.EventDispatchThread.pumpEvents(EventDispatchThread.java:138)
at java.awt.EventDispatchThread.run(EventDispatchThread.java:91)
Exception in thread "AWT-EventQueue-0" java.lang.NullPointerException
at org.rosuda.JGR.toolkit.JavaGD.getFileDlg(JavaGD.java:123)
at org.rosuda.JGR.toolkit.JavaGD.actionPerformed(JavaGD.java:151)
at javax.swing.AbstractButton.fireActionPerformed(AbstractButton.java:2018)
at javax.swing.AbstractButton$Handler.actionPerformed(AbstractButton.java:2341)
at javax.swing.DefaultButtonModel.fireActionPerformed(DefaultButtonModel.java:402)
at javax.swing.DefaultButtonModel.setPressed(DefaultButtonModel.java:259)
at javax.swing.AbstractButton.doClick(AbstractButton.java:376)
at javax.swing.plaf.basic.BasicMenuItemUI.doClick(BasicMenuItemUI.java:833)
at javax.swing.plaf.basic.BasicMenuItemUI$Handler.mouseReleased(BasicMenuItemUI.java:877)
at java.awt.Component.processMouseEvent(Component.java:6505)
at javax.swing.JComponent.processMouseEvent(JComponent.java:3311)
at java.awt.Component.processEvent(Component.java:6270)
at java.awt.Container.processEvent(Container.java:2229)
at java.awt.Component.dispatchEventImpl(Component.java:4861)
at java.awt.Container.dispatchEventImpl(Container.java:2287)
at java.awt.Component.dispatchEvent(Component.java:4687)
at java.awt.LightweightDispatcher.retargetMouseEvent(Container.java:4832)
at java.awt.LightweightDispatcher.processMouseEvent(Container.java:4492)
at java.awt.LightweightDispatcher.dispatchEvent(Container.java:4422)
at java.awt.Container.dispatchEventImpl(Container.java:2273)
at java.awt.Window.dispatchEventImpl(Window.java:2719)
at java.awt.Component.dispatchEvent(Component.java:4687)
at java.awt.EventQueue.dispatchEventImpl(EventQueue.java:735)
at java.awt.EventQueue.access$200(EventQueue.java:103)
at java.awt.EventQueue$3.run(EventQueue.java:694)
at java.awt.EventQueue$3.run(EventQueue.java:692)
at java.security.AccessController.doPrivileged(Native Method)
at java.security.ProtectionDomain$1.doIntersectionPrivilege(ProtectionDomain.java:76)
at java.security.ProtectionDomain$1.doIntersectionPrivilege(ProtectionDomain.java:87)
at java.awt.EventQueue$4.run(EventQueue.java:708)
at java.awt.EventQueue$4.run(EventQueue.java:706)
at java.security.AccessController.doPrivileged(Native Method)
at java.security.ProtectionDomain$1.doIntersectionPrivilege(ProtectionDomain.java:76)
at java.awt.EventQueue.dispatchEvent(EventQueue.java:705)
at java.awt.EventDispatchThread.pumpOneEventForFilters(EventDispatchThread.java:242)
at java.awt.EventDispatchThread.pumpEventsForFilter(EventDispatchThread.java:161)
at java.awt.EventDispatchThread.pumpEventsForHierarchy(EventDispatchThread.java:150)
at java.awt.EventDispatchThread.pumpEvents(EventDispatchThread.java:146)
at java.awt.EventDispatchThread.pumpEvents(EventDispatchThread.java:138)
at java.awt.EventDispatchThread.run(EventDispatchThread.java:91)
dev.new()
Warning messages:
1: Removed 45 rows containing non-finite values (stat_ydensity).
2: Removed 45 rows containing missing values (geom_point).
3: Removed 45 rows containing missing values (geom_point).
> ggplot() +
+ geom_boxplot(aes(y = dbh,x = name),data=U07m2013Cleaned,na.rm = TRUE) +
+ geom_jitter(aes(x = name,y = dbh),data=U07m2013Cleaned)
Warning message:
Removed 45 rows containing missing values (geom_point).
> dev.new()
Warning messages:
1: Removed 45 rows containing non-finite values (stat_ydensity).
2: Removed 45 rows containing missing values (geom_point).
> ggplot() +
+ geom_boxplot(aes(y = dbh,x = name),data=U07m2013Cleaned,na.rm = TRUE) +
+ geom_violin(aes(x = name,y = dbh),data=U07m2013Cleaned) +
+ geom_jitter(aes(x = name,y = dbh),data=U07m2013Cleaned)
Warning messages:
1: Removed 45 rows containing non-finite values (stat_ydensity).
2: Removed 45 rows containing missing values (geom_point).
|
500e7f9b1f0cc89e7afeb3dc34344772337989ef
|
c8ca4a1e1229c0b431887a230644a83685b62cf0
|
/2 R Programming/ProgrammingAssignment2/cachematrix.R
|
89a3445fb18d318a1c8d366c7267861aea397af8
|
[] |
no_license
|
cabetodc/datasciencecoursera
|
0b633cbe6bc372760ec870dc23b86d1e0abb3d53
|
2d7faf533a47a8a6bc6a881fb0c852aaef4e3733
|
refs/heads/master
| 2021-01-10T19:40:19.783581
| 2015-04-10T23:20:58
| 2015-04-10T23:20:58
| 31,678,657
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,893
|
r
|
cachematrix.R
|
######################################
## Caching the Inverse of a Matrix ##
#####################################
## The following functions create a matrix that cache and
## compute the inverse of a square matrix with the "solve" function,
## assuming that the matrix supplied is always invertible.
## A. makeCacheMatrix: This function creates a special "matrix" object that
## can cache its inverse, which is really a list containing a function to
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse
## 4. get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
m.inverse <- NULL
set <- function(y) {
x <<- y
m.inverse <<- NULL
}
get <- function() x
setinv <- function(inv) m.inverse <<- inv
getinv <- function() m.inverse
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## B. cacheSolve: This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed), then the cachesolve should
## retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m.inverse <- x$getinv()
if(!is.null(m.inverse)) {
message("getting cached data")
return(m.inverse)
}
data <- x$get()
m.inverse <- solve(data, ...)
x$setinv(m.inverse)
m.inverse # Return a matrix that is the inverse of 'x'
}
## C. Testing the functions
cacheSolve(makeCacheMatrix(matrix(c(2, 1, 3, 0, 1, 7, 1, -4, -3), 3, 3)))
## [,1] [,2] [,3]
## [1,] 0.46296296 0.1296296 -0.01851852
## [2,] -0.16666667 -0.1666667 0.16666667
## [3,] 0.07407407 -0.2592593 0.03703704
|
662331c779142212974ae539b6ecf1054ea6d23b
|
f2ca5431d921b1189a6ebaacd88aef3a9a1a1820
|
/R/LearnerClustMiniBatchKMeans.R
|
dd0ed9103224da227a3587e2d1624641a6dc5454
|
[] |
no_license
|
mlr-org/mlr3cluster
|
44747d2b4fae9170b5ea20704cccfdad777f198f
|
161aee5e75aa299bea29617020339768a8d9a75c
|
refs/heads/main
| 2023-06-22T09:58:51.455583
| 2023-06-15T22:32:15
| 2023-06-15T22:32:15
| 157,852,274
| 15
| 7
| null | 2023-03-10T01:08:56
| 2018-11-16T10:32:38
|
R
|
UTF-8
|
R
| false
| false
| 4,073
|
r
|
LearnerClustMiniBatchKMeans.R
|
#' @title Mini Batch K-Means Clustering Learner
#'
#' @name mlr_learners_clust.MBatchKMeans
#' @include LearnerClust.R
#' @include aaa.R
#'
#' @description
#' A [LearnerClust] for mini batch k-means clustering implemented in [ClusterR::MiniBatchKmeans()].
#' [ClusterR::MiniBatchKmeans()] doesn't have a default value for the number of clusters.
#' Therefore, the `clusters` parameter here is set to 2 by default.
#' The predict method uses [ClusterR::predict_MBatchKMeans()] to compute the
#' cluster memberships for new data.
#' The learner supports both partitional and fuzzy clustering.
#'
#' @templateVar id clust.MBatchKMeans
#' @template learner
#' @template example
#'
#' @export
LearnerClustMiniBatchKMeans = R6Class("LearnerClustMiniBatchKMeans",
inherit = LearnerClust,
public = list(
#' @description
#' Creates a new instance of this [R6][R6::R6Class] class.
initialize = function() {
ps = ps(
clusters = p_int(default = 2L, lower = 1L, tags = "train"),
batch_size = p_int(lower = 1L, default = 10L, tags = "train"),
num_init = p_int(lower = 1L, default = 1L, tags = "train"),
max_iters = p_int(lower = 1L, default = 100L, tags = "train"),
init_fraction = p_dbl(lower = 0L, upper = 1L, default = 1L, tags = "train"),
initializer = p_fct(levels = c("optimal_init", "quantile_init", "kmeans++", "random"), default = "kmeans++", tags = "train"),
early_stop_iter = p_int(lower = 1L, default = 10L, tags = "train"),
verbose = p_lgl(default = FALSE, tags = "train"),
CENTROIDS = p_uty(default = NULL, tags = "train"),
tol = p_dbl(default = 1e-04, lower = 0, tags = "train"),
tol_optimal_init = p_dbl(default = 0.3, lower = 0, tags = "train"),
seed = p_int(default = 1L, tags = "train")
)
ps$values = list(clusters = 2L)
# add deps
ps$add_dep("init_fraction", "initializer", CondAnyOf$new(c("kmeans++", "optimal_init")))
super$initialize(
id = "clust.MBatchKMeans",
feature_types = c("logical", "integer", "numeric"),
predict_types = c("partition", "prob"),
param_set = ps,
properties = c("partitional", "fuzzy", "exclusive", "complete"),
packages = "ClusterR",
man = "mlr3cluster::mlr_learners_clust.MBatchKMeans",
label = "Mini Batch K-Means"
)
}
),
private = list(
.train = function(task) {
check_centers_param(self$param_set$values$CENTROIDS, task, test_matrix, "CENTROIDS")
if (test_matrix(self$param_set$values$CENTROIDS)) {
if (nrow(self$param_set$values$CENTROIDS) != self$param_set$values$clusters) {
stop("`CENTROIDS` must have same number of rows as `clusters`")
}
}
pv = self$param_set$get_values(tags = "train")
m = invoke(ClusterR::MiniBatchKmeans, data = task$data(), .args = pv)
if (self$save_assignments) {
self$assignments = unclass(ClusterR::predict_MBatchKMeans(
data = task$data(),
CENTROIDS = m$centroids,
fuzzy = FALSE))
self$assignments = as.integer(self$assignments)
}
return(m)
},
.predict = function(task) {
if (self$predict_type == "partition") {
partition = unclass(ClusterR::predict_MBatchKMeans(
data = task$data(),
CENTROIDS = self$model$centroids,
fuzzy = FALSE))
partition = as.integer(partition)
pred = PredictionClust$new(task = task, partition = partition)
} else if (self$predict_type == "prob") {
partition = unclass(ClusterR::predict_MBatchKMeans(
data = task$data(),
CENTROIDS = self$model$centroids,
fuzzy = TRUE))
colnames(partition$fuzzy_clusters) = seq_len(ncol(partition$fuzzy_clusters))
pred = PredictionClust$new(
task = task,
partition = as.integer(partition$clusters),
prob = partition$fuzzy_clusters)
}
return(pred)
}
)
)
learners[["clust.MBatchKMeans"]] = LearnerClustMiniBatchKMeans
|
7976fbf603c3267c963439ddb83838bb840a36ec
|
cf0a40ab0fb3b8f4a88f0710004900dbd3c3c1e9
|
/airbnb_trial.r
|
662e44bbc36b3d6712d0fc0cab185e13e3cb1a8e
|
[] |
no_license
|
codingfinance/misc
|
0da96fc4254bf6217d9b57c341028946ccddda52
|
2ec36a86c249089b0a95934736599f9b286d8b60
|
refs/heads/master
| 2022-05-25T01:56:10.142156
| 2022-03-27T23:37:55
| 2022-03-27T23:37:55
| 248,232,775
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 226
|
r
|
airbnb_trial.r
|
library(tidyquant)
library(tidyverse)
library(rvest)
my_url <- read_html("https://www.airbnb.com/s?query=milan&checkin=2020-04-16&checkout=2020-04-20")
text <- my_url %>%
html_nodes("div._8ssblpx") %>%
html_text()
text
|
70b4c290a8174b5ba0c7a5179e5f86f7bae64fd7
|
2a815dbd7c1e19628740238018a4a0f0c131af3b
|
/R/zomotu.R
|
d3dea90b3d42c8cf7c92393d95d5de382c252080
|
[] |
no_license
|
yewei369/clotu
|
227c470f19c1f6dacb068dfce2a1cac513d2cdaf
|
6b51bfc54a456c2149ef3fb915d148460d9afef2
|
refs/heads/main
| 2023-06-09T15:59:48.628243
| 2021-07-01T10:18:34
| 2021-07-01T10:18:34
| 337,001,765
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 992
|
r
|
zomotu.R
|
#' Zombie OTUs
#'
#' Find zombie OTUs, and preprocess the dataset through deleting zombie OTUs and zombie observations
#'
#' @param data, matrix of OTU data
#' @param thr, threshold for zombie OTU
#' @param del, when T, delete zombie OTUs
#' @return a list of preprocessed dataset, reference OTUs
#' @examples
#' da<-simotu.gaus(50,700,3,nref=5,full.mean=10000,unif.min=0,unif.max=0.4,seed=1234)
#' al<-data_extract(da,Target %in% c("target1","target2","target3")) # no otu names
#' zomotu(al,thr=0)
zomotu<-function(data,thr=0,del=TRUE){ ## analyze zombie OTUs, OTU in (otu*sample)
##thr, blood threshhold
##del, TRUE if zombie OTUs will be deleted
##ref, OTUs present in all samples
blood<-present(data)$blood
nyda<-as.data.frame(data[which(blood>thr),])
#bacs<-as.character(data$otu[,1])[which(blood>thr)]
nyblood<-present(nyda)$blood
ref<-which(nyblood==1)
if(del) return(list(otu=nyda,ref=ref)) else #,bacs=bacs
return(list(ref=ref)) } #,bacs=bacs
|
c2b410cf423c2f8012012e1388aae3e1b2201b60
|
5ec349b9abfcaaecf5916f27cad0180f5e21659a
|
/R/slider.match.R
|
1560e618354c14c37df4c7429fa4dbd153a49c9f
|
[] |
no_license
|
way2joy/LearnEDA
|
322b4abecc9df2927d9842e387c5b6e6d8a8a0be
|
e823e1ba9a174d4f2d89effe163ea9443c32e0ea
|
refs/heads/master
| 2021-01-24T04:43:24.272946
| 2015-08-28T23:27:18
| 2015-08-28T23:27:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 483
|
r
|
slider.match.R
|
slider.match <- function(x){
power.plot <- function(power, x){
power <- ifelse(round(power, 2)==0, 1e-04, power)
reexpressed <- mtrans(x, power)
xlb <- "Reexpressed Data"
tit <- paste("Power =", round(power, 1), ", d =",
round(hinkley(reexpressed), 2))
boxplot(data.frame(raw = x, reexpressed),
main=tit, horizontal=TRUE)
}
manipulate(
power.plot(power, x),
power = manipulate::slider(-2, 2, initial=1, step=0.1)
)
}
|
6d43cfd09cbbf54bbd4b040091e12797eba40a2b
|
2b79549048707707667fbabc464cd35ed0579278
|
/R/SMITE.R
|
3941fed5b2a7efaba15b6d0e383fa896f5eee85d
|
[] |
no_license
|
GreallyLab/SMITE
|
3e49f7e1927445ed895b63ada2d953eeb0fcfcf9
|
1536ac38c699be0d548c1702a64d6d77bac4be4d
|
refs/heads/master
| 2021-01-24T02:07:06.610389
| 2017-03-27T12:23:30
| 2017-03-27T12:23:30
| 39,521,031
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 62,625
|
r
|
SMITE.R
|
##SMITE_1_0_2 10/9/2015
##internal function to perform a Stouffer's test for p value combination
setMethod(
f="stoufferTest",
signature="vector",
definition=function(pvalues, weights)
{
if(is.null(weights)){
weights <- rep(1, length(pvalues))/length(pvalues)
}
Zi <- qnorm(1-pvalues/2)
Z <- sum(weights*Zi)/sqrt(sum(weights^2))
new_pvalues <- (1-pnorm(Z))*2
new_pvalues <- replace(new_pvalues,new_pvalues < .0000000000000001, .0000000000000001)
new_pvalues <- replace(new_pvalues, new_pvalues > .9999999999999999, .9999999999999999)
new_pvalues
}
)
##SMITE Functions
setMethod(
f="makePvalueAnnotation",
signature="ANY",
definition=function(data, other_data=NULL, other_tss_distance=10000,
promoter_upstream_distance=1000, promoter_downstream_distance=1000,
strand_col=NULL, gene_name_col=NULL)
{
##create a Granges data object
if(!inherits(data, "GRanges")){
if(is.null(gene_name_col)){
stop("Gene name column must be specified if GRanges not given")
}
##if the strand column was not specified auto-detect
if(is.null(strand_col)){
strand_col <- which(data[1, ] %in% c("+", "-"))[1]
}
data_grange <- GenomicRanges::GRanges(seqnames=data[, 1],
ranges=IRanges::IRanges(start=data[, 2], end=data[, 3]),
name=data[, gene_name_col],
strand=data[, strand_col])
}
else {
data_grange <- data
data_grange$score <- NULL
}
if(any(duplicated(data_grange$name)))
{
message("Genes are duplicated. Removing duplicates")
data_grange <-
subset(data_grange,!duplicated(data_grange$name))
}
data_grange$feature <- "original"
if(!is.null(other_data)){
if(is.null(names(other_data))){
otherdata_names <- paste("other", 1:length(other_data), sep="")
names(other_data) <- otherdata_names
}
else{
otherdata_names <- names(other_data)
}
}
if(!is.null(other_data)){
if(length(other_tss_distance) != length(other_data)){
other_tss_distance <- rep(other_tss_distance[1],
length(other_data))
}
if(is.null(names(other_tss_distance))){
names(other_tss_distance) <- otherdata_names
}
}
tss <- GenomicRanges::shift(GenomicRanges::flank(data_grange, width=2), 1)
tss$feature <- "tss"
if(!is.null(other_data)){
other <- do.call(c, sapply(1:length(other_data), function(i){
if(!inherits(other_data[[i]], "GRanges")){
temp_other<-c(GenomicRanges::GRanges(
seqnames=other_data[[i]][, 1],
ranges=IRanges::IRanges(start=other_data[[i]][, 2],
end=other_data[[i]][, 3])))
}
else {
temp_other <- other_data[[i]]
GenomicRanges::mcols(temp_other) <- NULL
}
temp_other <- unique(temp_other)
suppressWarnings(
overlap <- findOverlaps(GenomicRanges::flank(data_grange,
other_tss_distance[otherdata_names[i]],
start=TRUE), temp_other)
)
temp_other <- temp_other[as.numeric(S4Vectors::subjectHits(overlap))]
temp_other$name <-
data_grange$name[S4Vectors::queryHits(overlap)]
temp_other$feature <- otherdata_names[i]
temp_other
})
)
}
promoters_downstream <- GenomicRanges::flank(data_grange, -promoter_downstream_distance,
start=TRUE)
promoters_upstream <- GenomicRanges::flank(data_grange,
promoter_upstream_distance, start=TRUE)
end(promoters_upstream) <- end(promoters_upstream)+1
promoters <- GenomicRanges::punion(promoters_upstream, promoters_downstream)
promoters$name <- data_grange$name
promoters$feature <- "promoter"
body <- GenomicRanges::psetdiff(data_grange, promoters_downstream)
body$name <- data_grange$name
body$feature <- "body"
if(!is.null(other_data)){
suppressWarnings(combined_data <-
c(data_grange, promoters, body, other, tss))
}
else{
suppressWarnings(combined_data <-
c(data_grange, promoters, body, tss))
}
combined_data <- split(combined_data, combined_data$name)
slot(combined_data, "metadata")$params <-
c(
promoter_upstream_distance_tss=promoter_upstream_distance,
promoter_downstream_distance_tss=promoter_downstream_distance,
other_annotation_distance_tss=other_tss_distance
)
slot(combined_data, "metadata")$sizes_summary <- lapply(
split(unlist(combined_data), unlist(combined_data)$feature),
function(i){each_length <- width(i);
c(summary(each_length), sds=sd(each_length))
})
new("PvalueAnnotation", annotation=combined_data)
})
##Convert ids between types refseq, ensembleprot and uniprot to gene symbol
setMethod(
f="convertGeneIds",
signature(gene_IDs="character", ID_type="character", ID_convert_to="character"),
definition=function(gene_IDs, ID_type, ID_convert_to, delim=NULL, verbose=FALSE)
{
if(any(duplicated(gene_IDs))){stop(
"Cannot convert duplicated ids. Please remove duplicates.")
}
if(!is.null(delim)){
gene_IDs <- do.call(rbind, strsplit(gene_IDs, delim))[, 2]
}
genes_old <- unique(as.character(gene_IDs))
gene_IDs <- cbind(gene_IDs, 1:length(gene_IDs))
if(ID_type == "refseq"){
genes_old <- subset(genes_old, genes_old %in%
(AnnotationDbi::ls(org.Hs.eg.db::org.Hs.egREFSEQ2EG)))
if(ID_convert_to == "symbol"){
eg <- unlist(AnnotationDbi::mget(genes_old,
org.Hs.eg.db::org.Hs.egREFSEQ2EG))
symbol <- unlist(AnnotationDbi::mget(eg,
org.Hs.eg.db::org.Hs.egSYMBOL))
out <- cbind(names(eg), symbol)
}
}
else
if(ID_type == "ensembleprot"){
genes_old <- subset(genes_old, genes_old %in%
(AnnotationDbi::ls(org.Hs.eg.db::org.Hs.egENSEMBLPROT2EG)))
if(ID_convert_to == "symbol"){
eg <- unlist(AnnotationDbi::mget(genes_old,
org.Hs.eg.db::org.Hs.egREFSEQ2EG))
symbol <- unlist(AnnotationDbi::mget(eg,
org.Hs.eg.db::org.Hs.egSYMBOL))
out <- cbind(names(eg), symbol)
}
}
else
if(ID_type == "uniprot"){
genes_old <- subset(genes_old, genes_old %in%
(AnnotationDbi::ls(org.Hs.eg.db::org.Hs.egUNIPROT)))
if(ID_convert_to == "symbol"){
eg <- unlist(AnnotationDbi::mget(genes_old,
org.Hs.eg.db::org.Hs.egREFSEQ2EG))
symbol <- unlist(AnnotationDbi::mget(eg,
org.Hs.eg.db::org.Hs.egSYMBOL))
out <- cbind(names(eg), symbol)
}
}
else
if(ID_type == "ensemble"){
genes_old <- subset(genes_old, genes_old %in%
(AnnotationDbi::ls(org.Hs.eg.db::org.Hs.egENSEMBL2EG)))
if(ID_convert_to == "symbol"){
eg <- unlist(AnnotationDbi::mget(genes_old,
org.Hs.eg.db::org.Hs.egENSEMBL2EG))
symbol <- unlist(AnnotationDbi::mget(eg,
org.Hs.eg.db::org.Hs.egSYMBOL))
out <- cbind(names(eg), symbol)
}
}
else
if(ID_type == "entrez"){
if(ID_convert_to == "symbol"){
symbol <- unlist(AnnotationDbi::mget(genes_old,
org.Hs.eg.db::org.Hs.egSYMBOL))
out <- cbind(names(symbol), symbol)
}
}
else
if(ID_type == "symbol"){
genes_old <- subset(genes_old, genes_old %in%
(AnnotationDbi::ls(org.Hs.eg.db::org.Hs.egALIAS2EG)))
if(ID_convert_to == "entrez"){
eg <- unlist(AnnotationDbi::mget(genes_old,
org.Hs.eg.db::org.Hs.egALIAS2EG))
out <- cbind(names(eg), eg)
}
}
out <- merge(gene_IDs, out, by=1, all.x=TRUE)
out <- out[order(as.numeric(out[, 2])), ]
out <- subset(out,!duplicated(out[,1]))
out[, 3]
})
setMethod(
f="annotateExpression",
signature="PvalueAnnotation",
definition=function(pvalue_annotation, expr_data, effect_col=NULL, pval_col=NULL){
if(is.null(effect_col)){
effect_col=grep("effect|odds|coeff|B", tolower(colnames(expr_data)))
if(length(effect_col) != 1){
stop("Cannot determine effect column. Please specify with arg:effect_col")
}
}
if(any(!c(-1,1) %in% unique(sign(expr_data[, effect_col])))){
message("WARNING: Effects should provide a direction, but these effects
are all in the same direction.")
}
if(is.null(pval_col)){
pval_col <- grep("pval|p.val|p_val|sig", tolower(colnames(expr_data)))
if(length(effect_col) != 1){
stop("Cannot determine p.value column. Please specify with arg:effect_col")
}
}
if(any(expr_data[, pval_col] < 0, expr_data[, pval_col] > 1)){
stop("P-values must be between 0 and 1")
}
temp_pval_col <- expr_data[, pval_col]
temp_pval_col <- replace(temp_pval_col, temp_pval_col < .0000001,.0000001)
temp_pval_col <- replace(temp_pval_col, temp_pval_col > .9999999,.9999999)
expr_data[, pval_col] <- temp_pval_col
expression_output <- ExpressionSet(as.matrix(expr_data), featureNames=rownames(expr_data))
phenoData(expression_output) <-
new("AnnotatedDataFrame",
data=as.data.frame(exprs(expression_output)[,
c(effect_col, pval_col)]))
varLabels(expression_output) <- c("expression_effect",
"expression_pvalue")
slot(pvalue_annotation, "expression") <- expression_output
pvalue_annotation
})
setMethod(
f="annotateModification",
signature="PvalueAnnotation",
definition=function(pvalue_annotation, mod_data, weight_by=NULL,
weight_by_method="Stouffer", mod_included=NULL,
mod_corr=TRUE, mod_type="methylation", verbose=FALSE){
if(mod_type %in% names(slot(slot(pvalue_annotation,"modifications"),"metadata")$elements))
{
stop("Provided data set is already loaded as mod_type")
}
unique_feature_names <- unique(
unlist(slot(pvalue_annotation, "annotation"))$feature)
##no weights provided
if(missing(weight_by)){
weight_by <- rep("pval", length(unique_feature_names[!unique_feature_names %in%
c("original", "tss")]))
}
##no mod_included or weight names
if(is.null(names(weight_by))){
if(is.null(mod_included)){
mod_included <- unique_feature_names[!unique_feature_names %in%
c("original", "tss")]
}
names(weight_by) <- mod_included
}
##weight names were provided
if(!is.null(names(weight_by))){
mod_included <- names(weight_by)
if(!all(mod_included %in% unique_feature_names)){
stop("Provided weight names must match those in
unique(GenomicRanges::mcols(unlist(pvalue_annotation@annotation))$feature)")
}
}
if(any(!c(-1, 1) %in% unique(sign(mod_data[, 4])))){
message("WARNING: Effects should provide a direction,
but these effects are all in the same direction.")
}
if(any(mod_data[, 5] < 0,mod_data[, 5] > 1)){
stop("P-values must be between 0 and 1")
}
mod_grange <- GenomicRanges::GRanges(seqnames=mod_data[, 1],
ranges=IRanges::IRanges(start=mod_data[, 2], end=mod_data[, 3]),
effect=mod_data[, 4], pval=mod_data[, 5], type=mod_type)
temp_annotation <- unlist(slot(pvalue_annotation, "annotation"))
overlap_mods <- GenomicRanges::findOverlaps(temp_annotation, mod_grange)
mod_grange_overlaps <- mod_grange[S4Vectors::subjectHits(overlap_mods)]
GenomicRanges::mcols(mod_grange_overlaps) <- cbind(GenomicRanges::mcols(
temp_annotation[as.numeric(
S4Vectors::queryHits(overlap_mods))]),
GenomicRanges::mcols(mod_grange_overlaps))
mod_grange_overlaps <- split(mod_grange_overlaps, mod_grange_overlaps$name)
temp_annotation <- split(temp_annotation, temp_annotation$name)
if(mod_corr == TRUE){
if(verbose == TRUE){
message("Computing correlation matrices")
}
temp_split_mod_grange <- split(mod_grange, GenomicRanges::seqnames(mod_grange))
precede_follow_each_element <- lapply(temp_split_mod_grange,
function(chr){
temp_chr <- IRanges(start(chr), end(chr))
temp_precede <- precede(temp_chr)
temp_follow <- follow(temp_chr)
temp_precede[which(is.na(temp_precede))] <-
which(is.na(temp_precede))
temp_follow[which(is.na(temp_follow))] <-
which(is.na(temp_follow))
chr[c(temp_follow, temp_precede)]
})
mod_grange_corr <- unlist(GRangesList(precede_follow_each_element))
duplicate_each_chr <- lapply(temp_split_mod_grange, function(chr){
c(chr,chr)
})
duplicate_each_chr <- unlist(GRangesList(duplicate_each_chr))
mod_grange_corr$distance <- IRanges::distance(duplicate_each_chr,
mod_grange_corr)
mod_grange_corr$pval2 <- duplicate_each_chr$pval
mod_grange_corr <- mod_grange_corr[which(mod_grange_corr$pval2<0.05)]
quantile_distances_mod_corr <- Hmisc::cut2(mod_grange_corr$distance,
g=500, onlycuts=TRUE)
quantile_distances_mod_corr[length(quantile_distances_mod_corr)] <- 250000000
mod_grange_corr$cat <- cut(mod_grange_corr$distance, breaks=quantile_distances_mod_corr)
mod_grange_corr <- split(mod_grange_corr, mod_grange_corr$cat)
mod_grange_corr2 <- lapply(mod_grange_corr, function(j){
mean((sapply(1:500, function(i){
index <- sample(1:length(j), replace=TRUE);
cor(qnorm(1-j$pval[index]), qnorm(1-j$pval2[index]))
})))
})
correlations <- as.data.frame(do.call(rbind, mod_grange_corr2))
final_corr <- data.frame(correlations,
as.character(names(mod_grange_corr2)),
stringsAsFactors=FALSE)
final_corr <- rbind(c(.9, paste("(-1, ", quantile_distances_mod_corr[1], "]",
sep="")), final_corr)
rm(mod_grange_corr)
}
combined_pvalues_list <- sapply(mod_included, function(i){
if(verbose == TRUE){
message(paste("Combining p-values over:", i))
}
temp <- subset(unlist(mod_grange_overlaps), unlist(mod_grange_overlaps)$feature == i)
ref_data <- unlist(slot(pvalue_annotation, "annotation"))
ref_data <-
subset(ref_data,
ref_data$feature == "tss")
ref_data <- ref_data[temp$name]
suppressWarnings(temp$distance <- distance(ref_data,
temp)+2)
temp <- split(temp, temp$name)
forreturn <- lapply(temp, function(each){
each_length <- length(each)
each_effect <- each$effect[order(each$pval)]
each_pval <- each$pval[order(each$pval)]
distances <- each$distance[order(each$pval)]
if(length(each_pval > 1)){
if(mod_corr == TRUE){
corr_mat <- matrix(as.numeric(final_corr[match(cut(
as.matrix(dist(start(each)[order(each$pval)])),
breaks=c(-1, quantile_distances_mod_corr)), final_corr[, 2]), 1]),
ncol=each_length)
diag(corr_mat) <- 1
corr_mat<-abs(corr_mat)
chol_d <- try(chol(corr_mat), silent=TRUE)
while(is(chol_d, "try-error"))
{
index<- as.numeric(strsplit(
strsplit(chol_d[1],
"the leading minor of order ")[[1]][2],
" is not positive")[[1]][1])-1
chol_d <- try(chol(corr_mat[1:index, 1:index]),silent=TRUE)
each_pval <- each_pval[1:index]
each_effect <- each_effect[1:index]
distances <- distances[1:index]
each_length <- index
}
each_pval <- 1-pnorm(abs(solve(t(chol_d)) %*% qnorm(
1-each_pval/2)))
each_pval<-replace(each_pval, each_pval == 0 , 0.000000001)
each_pval <- each_pval*2
}
if(weight_by_method == "Stouffer"){
if(weight_by[i] == "distance"){
##mean is weighted by distance
out_mean <- weighted.mean(each_effect,
w=(1/log(distances)))
##Stouffer test is weighted by distance
out_pval <- stoufferTest(each_pval, weights=(1/log(distances)))
}
else if(weight_by[i] %in%
c("pval", "p.value", "pvalue", "p_val")){
##mean is weight by pvalue
out_mean <- weighted.mean(each_effect, w=-log(each_pval))
out_pval <- stoufferTest(each_pval, weights=NULL)
}
else {
##mean is not weighted
out_mean <- mean(each_effect, na.rm=TRUE)
out_pval <- stoufferTest(each_pval, weights=NULL)
}
}
else if(weight_by_method %in% c("minimum", "Sidak", "sidak")){
index <- which(each_pval == min(each_pval))
if(length(index) > 1){
index <- index[which(
abs(each_effect[index]) == max(abs(each_effect[index])))][1]
}
out_mean <- each_effect[index]
out_pval <- 1-(1-each_pval[index])^length(each_pval)
}
else if(weight_by_method == "binomial"){
index <- which(each_pval == min(each_pval))
if(length(index) > 1){
index <- index[which(abs(each_effect[index]) == max(
abs(each_effect[index])))][1]
}
out_mean <- each_effect[index]
out_pval <- (1-pbinom(q=length(which(each_pval<0.05)),
size=each_length, prob=0.05))
} else if(weight_by_method %in%
c("Fisher", "fisher", "chisq", "chi")){
out_pval <- 1-pchisq(-2*sum(log(each_pval)), each_length*2)
out_mean <- mean(sign(each_effect), na.rm=TRUE)
}
}
#if only one pval
else{
out_mean <- each_effect
out_pval <- each_pval
}
c(out_mean, out_pval, each_length)
})
do.call(rbind, forreturn)
})
if(verbose == TRUE){
message("Quantile permuting scores")
}
combined_pvalues_list <- lapply(combined_pvalues_list, function(each_feature){
categories <- data.frame(categories=as.numeric(
Hmisc::cut2(each_feature[, 3], g=100)))
categories_table <- data.frame(table(categories))
trans_p <- cbind(trans=qnorm(1-each_feature[, 2]/2),
plyr::join(categories, categories_table, by="categories"))
trans_p[, 1] <- replace(trans_p[, 1],is.infinite(trans_p[, 1]),
max(subset(trans_p, !is.infinite(
trans_p[, 1])), na.rm=TRUE))
num_list <- split(trans_p$trans, trans_p$categories)
rand_list <- sapply(1:length(num_list), function(i){
as.matrix(sapply(1:500, function(j){
sample(num_list[[as.numeric(i)]], replace=TRUE)
}))
})
new_pval <- apply(trans_p, 1, function(i){
length(which(
rand_list[[as.numeric(i[2])]] > as.numeric(i[1])))/
(500*as.numeric(i[3]))
})
new_pval <- replace(new_pval, new_pval == 0,
min(subset(new_pval, new_pval != 0),
na.rm=TRUE))
each_feature[, 2] <- new_pval
each_feature <- as.data.frame(each_feature)
each_feature
})
if(verbose == TRUE){
message("Scores have been adjusted")
}
newmods <- c(unlist(slot(pvalue_annotation, "modifications")),
unlist(mod_grange_overlaps))
names(newmods) <- NULL
newmods <- split(newmods, newmods$name)
output_m_summary <-
suppressWarnings(as.data.frame(c(list(names=names(mod_grange_overlaps)),
lapply(combined_pvalues_list, function(x){
x[match(names(
mod_grange_overlaps),
rownames(x)), 1:2]
}))))
rownames(output_m_summary) <- output_m_summary[, 1]
output_m_summary <- output_m_summary[, -1]
colnames(output_m_summary) <- paste(mod_type,
apply(expand.grid(c("effect", "pvalue"),
mod_included),1, function(i){
paste(i[2], i[1],
sep="_")
}), sep="_")
newmetadata <- slot(slot(pvalue_annotation, "modifications"), "metadata")
if(is.null(newmetadata$m_summary)){
newmetadata$m_summary <- output_m_summary
}
else{
newmetadata$m_summary <- merge(newmetadata$m_summary,
output_m_summary, by=0, all=TRUE)
rownames(newmetadata$m_summary) <- newmetadata$m_summary[, 1]
newmetadata$m_summary <- newmetadata$m_summary[, -1]
}
newmetadata[["elements"]][[mod_type]]$weight_by <- weight_by
newmetadata[["elements"]][[mod_type]]$weight_by_method <- weight_by_method
newmetadata$elementnames <- c(newmetadata$elementnames,
paste(mod_type, mod_included, sep="_"))
slot(newmods, "metadata") <- newmetadata
slot(pvalue_annotation, "modifications") <- newmods
pvalue_annotation
}
)
setMethod(
f="removeModification",
signature="PvalueAnnotation",
definition=function(pvalue_annotation, mod_type="methylation"){
if(!mod_type%in%names(slot(pvalue_annotation,
"modifications")@metadata$elements)){
stop("Provided mod_type is not in the pvalue_annotation")
}
temp_meta <- slot(slot(pvalue_annotation,"modifications"),"metadata")
temp <- unlist(slot(pvalue_annotation,"modifications"))
names(temp) <- NULL
temp <- subset(temp,!(temp$type == mod_type))
slot(temp,"metadata") <- temp_meta
temp_meta_colnams <- colnames(slot(temp,"metadata")$m_summary)
slot(temp,"metadata")$m_summary <-
slot(temp,"metadata")$m_summary[, -grep(mod_type, temp_meta_colnams)]
if(ncol(slot(temp,"metadata")$m_summary) == 0){
slot(temp,"metadata")$m_summary <- NULL
}
slot(temp,"metadata")$elements[which(
names(slot(temp,"metadata")$elements) == mod_type)] <- NULL
slot(temp,"metadata")$elementnames <-
slot(temp,"metadata")$elementnames[
-which(do.call(rbind, lapply(strsplit(
slot(temp,"metadata")$elementnames, "_"),
function(i)i[1]))[, 1] %in% mod_type)]
temp_meta <- slot(temp,"metadata")
slot(pvalue_annotation,"modifications") <- split(temp, temp$name)
slot(slot(pvalue_annotation,"modifications"),"metadata") <- temp_meta
pvalue_annotation
}
)
setMethod(
f="makePvalueObject",
signature="PvalueAnnotation",
definition=function(pvalue_annotation, effect_directions=NULL) {
if(is.null(effect_directions)){
effect_directions <- rep("bidirectional",
length(slot(
slot(pvalue_annotation, "modifications"),
"metadata")$elementnames))
}
if(is.null(names(effect_directions))){
names(effect_directions)<-slot(slot(pvalue_annotation, "modifications"),
"metadata")$elementnames
}
if(any(!names(effect_directions) %in% slot(slot(pvalue_annotation, "modifications"),
"metadata")$elementnames)){
stop("Effect name is invalid")
}
if(any(!(effect_directions %in% c("decrease", "increase", "bidirectional")
))){
stop("Effect argument is invalid.")
}
exp_ind <- ifelse(nrow(Biobase::pData(pvalue_annotation@expression)) >
0, 1, 0)
total_num_factor <- length(effect_directions)
signs_index <- merge(cbind(c("increase", "decrease", "bidirectional"),
c(1, -1, 2)),
cbind(effect_directions, 1:total_num_factor), by=1)
signs_index <- signs_index[order(signs_index[, 3]), ]
rownames(signs_index) <- NULL
colnames(signs_index) <- c("expression_relationship", "B_coeff","name")
temp1 <- annotationOutput(pvalue_annotation)
genenames <- temp1[, 1]
data <- temp1[, -1]
if(exp_ind == 1){
data <- data[, c(as.numeric(sapply(names(effect_directions),
function(i){
grep(i, colnames(data))
})),
grep("exp", colnames(data)))]
}
else {
data <- data[, as.numeric(sapply(names(effect_directions),
function(i){
grep(i, colnames(data))
}))]
}
signs_index[, 3] <- names(effect_directions)
slot(pvalue_annotation, "score_data") <- new(Class="PvalueObject",
pval_data=data[, grep("pval",
colnames(data))],
effect_data=data[, grep("effect",
colnames(data))],
genes=genenames,
signs_index=signs_index)
pvalue_annotation
}
)
setMethod(
f="normalizePval",
signature="PvalueAnnotation",
definition=function(pvalue_annotation, trans, ref="expression_pvalue",
method="rescale"){
temp_pval_data <- slot(slot(pvalue_annotation,"score_data"),"pval_data")
names_temp_pval_data <- colnames(temp_pval_data)
if(nrow(temp_pval_data) == 0){
stop("Run makePvalueObject function first.")
}
if(!any(grepl(ref,names_temp_pval_data))){
stop("paste(Reference is not one of the available:",
names_temp_pval_data)
}
if(length(grep(ref,names_temp_pval_data)) > 1){
stop("Reference was not specific enough.")
}
ref_index <- grep(ref,names_temp_pval_data)
p_ref <- temp_pval_data[[ref_index]]
logit_ref <- log(p_ref/(1-p_ref))
temp_signs_index<-slot(slot(pvalue_annotation,"score_data"),
"signs_index")[, 3]
if(!names(dev.cur()) %in% c("RStudioGD","pdf")){
dev.new(height=7, width=14)
}
par(mfrow=c(1, 2))
plotDensityPval(pvalue_annotation, ref=ref)
if(method %in% c("Box-Cox", "box-cox", "boxcox", "Boxcox")){
if(missing(trans)){
message(paste("Auto-detecting best transformation"))
optimal_boxcox_exponent <- c()
for(x in names_temp_pval_data[-ref_index]){
p_temp <- temp_pval_data[[x]]
if(!all(is.na(p_temp))){
logit_temp <- log(p_temp/(1-p_temp))
nonparametric_comparison <- t(sapply(c(seq(.05, .95, .05),
rev(1/seq(.05, .95, .05))),
function(i){
c(i,wilcox.test(logit_ref,
as.numeric(logit_temp)*i)$p_value)
}))
nonparametric_comparison <- subset(nonparametric_comparison,
nonparametric_comparison[, 2] == max(
nonparametric_comparison[, 2])[1])[, 1]
p_temp <-
(exp(logit_temp)^( nonparametric_comparison))/(1+exp(logit_temp)^( nonparametric_comparison))
}
else {
nonparametric_comparison <- 1
}
optimal_boxcox_exponent <- c(optimal_boxcox_exponent, nonparametric_comparison)
names(optimal_boxcox_exponent)[length(optimal_boxcox_exponent)] <- x
temp_pval_data[[x]] <- p_temp
}
}
else {
if(length(trans) !=
length(grep(paste(temp_signs_index, collapse="|"),
colnames(temp_pval_data)))){
stop("Length of p and transformations must equal!")
}
else {
optimal_boxcox_exponent <- trans
names(optimal_boxcox_exponent) <- subset(names_temp_pval_data,
!names_temp_pval_data %in% ref)
}
for(x in names(optimal_boxcox_exponent)){
p_temp <- temp_pval_data[[x]]
if(!all(is.na(p_temp))){
p_temp <- p_temp^optimal_boxcox_exponent[x]
}
temp_pval_data[[x]] <- p_temp
}
}
}
else if(method%in%c("Rescale", "rescale")){
for(i in temp_signs_index){
if(!all(is.na(slot(
slot(pvalue_annotation, "score_data"), "pval_data")[[grep(i,
names_temp_pval_data)]])
)){
p_temp <- temp_pval_data[[grep(i, names_temp_pval_data)]]
logit_temp <- log(p_temp/(1-p_temp))
logit_temp <- scales::rescale(logit_temp, to=range(logit_ref,
na.rm=TRUE))
temp_pval_data[[grep(i,
names_temp_pval_data
)]] <- exp(logit_temp)/(1+exp(logit_temp))
}
}
}
slot(slot(pvalue_annotation,"score_data"),"pval_data") <- temp_pval_data
plotDensityPval(pvalue_annotation, ref=ref)
pvalue_annotation
}
)
setMethod(
f="scorePval",
signature="PvalueAnnotation",
definition=function(pvalue_annotation, weights){
total_num_factor <- ncol(slot(slot(pvalue_annotation, "score_data"), "pval_data"))
pval_score_colnames <- colnames(slot(slot(pvalue_annotation,"score_data"),
"pval_data"))
if(missing(weights)){
weights <- rep((1/(total_num_factor)), total_num_factor)
names(weights) <- pval_score_colnames
}
else {
if(length(weights) != total_num_factor){
stop("ERROR: Number of factors(with expression data)
and weights must equal!")
}
else {
if(is.null(names(weights))){
names(weights) <- pval_score_colnames
}
sorted_pval_score_colnames <- sort(do.call(rbind, strsplit(pval_score_colnames,
"_pvalue")))
if(!all(sort(names(weights)) == sorted_pval_score_colnames))
{
stop(paste("Weight names must match the following:",
paste(sorted_pval_score_colnames,
collapse=", ")))
}
}
}
weights <- weights[match(
do.call(rbind, strsplit(pval_score_colnames, "_pvalue")),
names(weights))]
message("The following weights are being applied")
print(weights)
temp_score_data <- slot(pvalue_annotation, "score_data")
temp_pval_data <- slot(temp_score_data, "pval_data")
temp_effect_data <- slot(temp_score_data, "effect_data")
temp_signs_index <- slot(temp_score_data, "signs_index")
scoringdata <- temp_pval_data*sign(temp_effect_data)
weight_names_temp <- names(weights)
if(any(grepl("exp", names(weights)))){
weight_names_temp <- weight_names_temp[-grep("exp", names(weights))]
}
slot(temp_score_data, "scoring_vector") <- weights
unidirectional <- as.numeric(temp_signs_index[match(temp_signs_index[, 3], weight_names_temp), 2])
bidirectional <- unidirectional
unidirectional[which(unidirectional == 2)] <- 0
bidirectional[which(bidirectional != 2)] <- 0
bidirectional[which(bidirectional == 2)] <- 1
scoringdata <- qnorm(1-as.matrix(abs(scoringdata))/2)*sign(scoringdata)
scoresout <- apply(scoringdata, 1, function(each){
# for each gene
if(any(!is.na(each)))
{
# not all missing
if(any(grepl("exp", names(each)))){
# there is expression data
exp_index <- grep("exp", names(each))
forreturn <- (sum(
abs(sum(c(as.numeric(each[[exp_index]]),
as.numeric(each[-exp_index]))*
c(1, unidirectional)*
weights[c(exp_index,
which(!grepl("exp", names(each))))],
na.rm=TRUE)),
(abs(as.numeric(each[-exp_index])*bidirectional)*
weights[-exp_index]),
na.rm=TRUE)/sum(weights^2)^.5)
}
else {
# there is no expression data
forreturn <- (
sum(abs(sum(as.numeric(each)*unidirectional*weights,
na.rm=TRUE)), (abs(as.numeric(each)*
bidirectional)*
weights),
na.rm=TRUE)/sum(weights^2)^.5)
}
}
else {
forreturn <- (NA)
}
forreturn
})
scoresout <- (1-pnorm(as.numeric(scoresout)))*2
iscoresout<-replace(scoresout, scoresout == 0 , min(subset(scoresout,
!(scoresout == 0)), na.rm=TRUE))
scoresout <- (-2*log(scoresout))
rand_mat <- as.matrix(sapply(1:100, function(j){
sample(scoresout, replace=TRUE)
}))
new_pval <- sapply(scoresout , function(i){
length(which(rand_mat > as.numeric(i)))/(100*length(scoresout))
})
new_pval <- replace(new_pval, new_pval == 0,
min(subset(new_pval, new_pval!=0), na.rm=TRUE))
new_pval <- (-2)*log(new_pval)
slot(temp_score_data, "scores") <-
data.frame(scores=as.numeric(new_pval),
row.names=as.character(slot(temp_score_data,
"genes")))
slot(pvalue_annotation, "score_data") <- temp_score_data
pvalue_annotation
}
)
setMethod(
f="runSpinglass",
signature="PvalueAnnotation",
definition=function(pvalue_annotation, network, random_alpha = 0.05, gam = 0.5,
node_alpha = 0.05, maxsize = 500, minsize = 8,
num_iterations = 1000, simplify=TRUE)
{
if(inherits(network, what="graphNEL")){
network <- graph_from_graphnel(network)
}
if(length(slot(slot(pvalue_annotation,"score_data"),"module_output")) !=0 ){
slot(slot(pvalue_annotation,"score_data"),"module_output") <- list()
message("Overwriting existing modules.")
}
if(simplify == TRUE){
network <- igraph::simplify(network, remove.multiple=TRUE,
remove.loops=TRUE)
}
genes_in_network <- subset(slot(slot(pvalue_annotation, "score_data"),
"genes"),
slot(slot(pvalue_annotation, "score_data"),
"genes") %in%
igraph::V(network)$name)
scores_in_network <- extractScores(pvalue_annotation)[genes_in_network]
##should be FALSE, but just in case check for NAs
if(any(is.na(scores_in_network))){
##if there are NAs remove them
scores_in_network <- subset(scores_in_network, !is.na(
scores_in_network))
genes_in_network <- names(scores_in_network)
}
nodes_with_scores <- base::intersect(genes_in_network, igraph::V(network)$name)
network <- igraph::induced_subgraph(network, nodes_with_scores)
network_clusters <- igraph::clusters(network)
## choose largest connected nodes ## may want include > minsize
maxclust <- which(network_clusters$csize ==
max(network_clusters$csize))[1]
network <- igraph::induced_subgraph(network,
which(network_clusters$membership == maxclust))
rm(network_clusters)
genes_in_network <- intersect(genes_in_network, igraph::V(network)$name)
scores_in_network <- scores_in_network[genes_in_network]
network.adj <- igraph::as_adjacency_matrix(network)
## order of scores has to match order of adjacency rows
scores_in_network <- scores_in_network[rownames(network.adj)]
genes_in_network <- names(scores_in_network)
stat_scores <- as.numeric(scores_in_network)
pval_scores <- exp(scores_in_network/(-2))
network_with_scores <- apply(network.adj, 1, function(v) v*stat_scores)
W_for_spinglass <- (network_with_scores + t(network_with_scores))
rm(network_with_scores)
gc()
W_vec <- (-2*log(1-pchisq(as.vector(W_for_spinglass),4)))
W_vec <- replace(W_vec, is.infinite(W_vec),
max(subset(W_vec, !is.infinite(W_vec))) )
W_for_spinglass <- matrix(W_vec, nrow=nrow(W_for_spinglass))
rm(W_vec)
rownames(W_for_spinglass) <- genes_in_network
colnames(W_for_spinglass) <- genes_in_network
gc()
final_network <-
igraph::graph_from_adjacency_matrix(W_for_spinglass,
mode = "undirected",
weighted=TRUE)
igraph::V(final_network)$weight <- stat_scores
network <- final_network
rm(final_network)
gc()
## For significant genes apply Spinglass algorithm
sig_genes <- subset(genes_in_network, pval_scores < node_alpha)
sig_genes_counter <- 1:length(sig_genes)
names(sig_genes_counter) <- sig_genes
spin_glass_out <- lapply(sig_genes, function(j){
message(paste("Computing modules: Vertex",sig_genes_counter[j],"of",
length(sig_genes), "significant genes is", j))
genes_in_network[
cluster_spinglass(network, weights = igraph::E(network)$weight,
vertex=j, gamma=gam)$community]
})
names(spin_glass_out) <- sig_genes
## Select modules with size requirements
spin.size <- do.call(c, lapply(spin_glass_out, length))
spin_glass_out <- subset(spin_glass_out, spin.size >= minsize & spin.size
<= maxsize)
Modularity.edges = function(v, network)
{
h <- igraph::induced_subgraph(network, v);
c(sum(igraph::E(h)$weight))
}
edge_sum <- do.call(c,lapply(spin_glass_out, function(j) {
Modularity.edges(j, network)
}));
nspin_glass_out <- length(spin_glass_out);
random_edges <- lapply(1:nspin_glass_out, function(i) {
each_spin_result <- spin_glass_out[[i]]
subnetwork <- igraph::induced_subgraph(network, each_spin_result);
adjacency_of_subnetwork <- igraph::as_adjacency_matrix(subnetwork, sparse=FALSE);
sapply(1:num_iterations, function(k){
message(paste("Testing significance: module", i, "of",
nspin_glass_out, "Randomization", k, "of", num_iterations))
random_sample_of_scores = sample(stat_scores, nrow(adjacency_of_subnetwork) , replace=TRUE)
random_network_with_scores = apply(adjacency_of_subnetwork, 1, function(v) v*random_sample_of_scores)
W_random <- (random_network_with_scores + t(random_network_with_scores));
W_random <- apply(W_random, 2, function(i){
replace(i, i>0, (-2*log(1-pchisq(subset(i,i>0),4))))
})
sum(W_random)/2
})
})
names(random_edges) <- names(spin_glass_out);
random_p <- lapply(1:nspin_glass_out, function(k){
length(which(random_edges[[k]] >
edge_sum[k]))/num_iterations})
names(random_p) <- names(spin_glass_out)
if(length(spin_glass_out[which(do.call(c,random_p) < random_alpha)]) == 0){
stop("No modules found. Please adjust the random_alpha and node_alpha
parameters")
}
## Assemble output data
output <- list();
output[[1]] <- subset(spin_glass_out, do.call(c,random_p) < random_alpha);
output[[2]] <- pval_scores;
output[[3]] <- stat_scores;
output[[4]] <- igraph::induced_subgraph(network, as.character(
unlist(output[[1]]))) ;
output[[6]] <- "spinglass";
names(output) <- c("modules", "p_value",
"statistic","network","moduleStats","moduleType");
k <- 1
while(k < length(output[[1]])){
m <- k+1
while(m <= length(output[[1]])){
if(all(output[[1]][[m]] %in% output[[1]][[k]])){
names(output[[1]])[k] <- paste(names(output[[1]][k]),
names(output[[1]][m]),
sep=":")
output[[1]][[m]]<-NULL
m <- m-1
}
m <- m+1
}
k <- k+1
}
output[[5]] <-
lapply(output[[1]], function(i){
stat.mod <- sum(abs(subset(stat_scores,genes_in_network %in% i)))
pval.mod <- 1-pchisq(stat.mod,2*length(i))
c(statMod=stat.mod, pvalMod=pval.mod, lenMod=length(i))
})
index <- order(do.call(rbind,output[[5]])[,2])
output[[1]] <- output[[1]][index]
output[[5]] <- output[[5]][index]
slot(slot(pvalue_annotation, "score_data"), "module_output") <- output
pvalue_annotation
}
)
setMethod(
f="runBioNet",
signature="PvalueAnnotation",
definition=function(pvalue_annotation, network, alpha = 0.05)
{
if(any(alpha<0, alpha>1)){
stop("Error: alpha must be between zero and one.")
}
if(length(slot(slot(pvalue_annotation,"score_data"),"module_output")) !=0 ){
slot(slot(pvalue_annotation,"score_data"),"module_output") <- list()
message("Overwriting existing modules.")
}
scores <- highScores(pvalue_annotation, alpha=alpha)
pval.v <- exp(scores/(-2))
g <- suppressWarnings(subNetwork(names(pval.v), network))
g <- rmSelfLoops(g)
if(inherits(g,what="igraph")){
g <- as_graphnel(g)
}
heinzOut <- suppressWarnings(runFastHeinz(g, scores))
output <- list();
output[[1]] <- list(heinzOut@nodes);
output[[2]] <- pval.v;
output[[3]] <- scores;
output[[4]] <- g;
output[[6]] <- "BioNet";
names(output) <- c("modules", "p_value",
"statistic","network","moduleStats","moduleType");
sub_score <- subset(scores,names(scores)%in%output[[1]][[1]])
stat.mod <- sum(abs(sub_score))
pval.mod <- 1-pchisq(stat.mod,2*length(output[[1]][[1]]))
output[[5]] <-
list(c(statMod=stat.mod, pvalMod=pval.mod,
lenMod=length(output[[1]][[1]]))
)
names(output[[1]])[1] <-
names(sub_score[which(sub_score==max(sub_score))[1]]
)
slot(slot(pvalue_annotation,"score_data"),"module_output") <- output
pvalue_annotation
}
)
setMethod(
f="runGOseq",
signature="PvalueAnnotation",
definition=function(pvalue_annotation, p_thresh=0.05, supply_cov=TRUE, coverage=NULL, type="reactome")
{
while(!names(dev.cur()) %in% c("pdf","null device")){
dev.off()
}
names.eid <- names(slot(slot(pvalue_annotation, "score_data"),
"module_output")$modules)
eid <- slot(slot(pvalue_annotation, "score_data"), "module_output")$modules
pval <- slot(slot(pvalue_annotation, "score_data"), "module_output")$p_value
stat <- slot(slot(pvalue_annotation, "score_data"), "module_output")$statistic
genes <- names(slot(slot(pvalue_annotation, "score_data"), "module_output")$modules)
goseqdata <- slot(slot(pvalue_annotation, "score_data"), "module_output")$modules
sym2eg <- AnnotationDbi::as.list(org.Hs.eg.db::org.Hs.egSYMBOL2EG)
eg2sym <- AnnotationDbi::as.list(org.Hs.eg.db::org.Hs.egSYMBOL)
if(type == "reactome"){
eg2reactome <-
AnnotationDbi::as.list(reactome.db::reactomeEXTID2PATHID)
temp <- sapply(names(eg2reactome), function(y){
eg2sym[[as.character(y)]]})
temp <- lapply(temp, function(y){if(is.null(y)){y<-NA};y})
temp2 <- do.call("c", temp)
sym2network <- eg2reactome
names(sym2network) <- temp2
PATHID2NAME <- AnnotationDbi::as.list(reactome.db::reactomePATHID2NAME)
}
if(type == "kegg"){
eg2kegg <- AnnotationDbi::as.list(KEGG.db::KEGGEXTID2PATHID)
temp <- sapply(names(eg2kegg), function(y){
eg2sym[[as.character(y)]]})
temp <- lapply(temp, function(y){if(is.null(y)){y<-NA};y})
temp2 <- do.call("c", temp)
sym2network <- eg2kegg
names(sym2network) <- temp2
PATHID2NAME <- AnnotationDbi::as.list(KEGG.db::KEGGPATHID2NAME)
names(PATHID2NAME) <- paste("hsa", names(PATHID2NAME), sep="")
}
names(eid) <- paste("module_", names(eid), "_network", sep="")
nl <- unlist(goseqdata)
nl <- cbind(names(nl), nl)
rownames(nl) <- NULL
nl[, 1] <- gsub("[0-9]*$", "", nl[, 1])
nl <- split(nl[, 1], nl[, 2])
nl <- lapply(nl, function(i){unique(i)})
if (supply_cov == FALSE) {
annotations <- slot(slot(pvalue_annotation, "annotation"),"unlistData") # pvalue_annotation@annotation@unlistData
annotations_split <- split(annotations, slot(annotations,"elementMetadata")$name)
anntation_red <- reduce(annotations_split)
# make granges for modification
red_meth <- reduce(slot(slot(pvalue_annotation, "modifications"),"unlistData")) # pvalue_annotation@modifications@unlistData
anno_CpGs2 <- countOverlaps(anntation_red,red_meth)
a <- data.frame(id=names(anno_CpGs2), Overlaps=anno_CpGs2)
}
if (supply_cov == TRUE & is.null(coverage)) {
stop("Supply coverage marked TRUE, but no coverage supplied. Please
supply coverage as a data.frame (bed file) or a character vector (gene Symbol or RefSeq)")
}
if (supply_cov == TRUE) {
if(inherits(coverage, what="data.frame")){
a <- coverage
a <- a[, 4:5]
}
if(inherits(coverage, what="character")){
if(coverage == "refseq"){
hg19.refGene.LENGTH <- NULL
data(hg19.refGene.LENGTH,package="geneLenDataBase",
envir=environment())
a <- hg19.refGene.LENGTH[, c(1, 3)]
}
if(coverage == "symbol"){
hg19.geneSymbol.LENGTH <- NULL
data(hg19.geneSymbol.LENGTH,package="geneLenDataBase",
envir=environment())
a <- hg19.geneSymbol.LENGTH[, c(1, 3)]
}
}
}
if(any(duplicated(a[, 1]))){
a <- split(a, a[, 1])
a <- lapply(a, function(i){
if(nrow(i) > 1){
i <- i[which(i[, 2] == max(i[, 2])), ]
};
i
})
a <- do.call(rbind, a)
}
slot(slot(pvalue_annotation, "score_data"), "module_output")$goseqOut <-
lapply(slot(slot(pvalue_annotation, "score_data"),
"module_output")$modules, function(i){
b <- cbind(a[, 1], rep(0, nrow(a)))
b[which(b[, 1]%in%i), 2] <- 1
x <- as.vector(b[, 2])
x <- as.numeric(x)
names(x) <- b[, 1]
pwf <- nullp(x, 'h19', 'knownGene', bias.data=a[, 2])
path <- goseq(pwf, "hg19", "knownGene",
gene2cat=sym2network)
path <- cbind(path,(as.character(sapply(
path$category, function(i){PATHID2NAME[[i]]}))))
colnames(path)[6] <- "cat_name"
subset(path, path$over_represented_pvalue < p_thresh)
}
)
pvalue_annotation
}
)
setMethod(
f="searchGOseq",
signature="PvalueAnnotation",
definition=function(pvalue_annotation, search_string, wholeword=FALSE){
options(stringsAsFactors=FALSE)
search_string<-tolower(search_string)
nums <- which(do.call("c", lapply(
slot(slot(pvalue_annotation, "score_data"), "module_output")$goseqOut,
function(each){
any(grepl(ifelse(wholeword == FALSE, search_string,
paste("\\b", search_string, "\\b", sep="")),
tolower(each[, ncol(each)])))
}
))
)
if(length(nums) > 0){
out <- lapply(nums, function(i){
pos <- grep(ifelse(wholeword == FALSE, search_string,
paste("\\b", search_string, "\\b", sep="")),
tolower(slot(slot(pvalue_annotation, "score_data"),
"module_output")$goseqOut[[i]][, 6]))
tot <- nrow(slot(slot(pvalue_annotation, "score_data"),
"module_output")$goseqOut[[i]])
outterm <- as.data.frame(as.character(slot(
slot(pvalue_annotation, "score_data"),
"module_output")$goseqOut[[i]][pos, 6]))
cbind(outterm, as.data.frame(pos),
as.data.frame(rep(tot,length(pos))))
})
out <- lapply(1:length(nums), function(i){
old <- out[[i]]
rownames(old) <- NULL
old <- cbind(rep(names(nums)[i], nrow(old)),
rep(as.numeric(nums[i]), nrow(old)), old)
old
})
out <- do.call(rbind, out)
out[,2] <- sapply(out[,2], function(i){
i <- as.numeric(i);
i <- paste(i, "/", round(slot(
slot(pvalue_annotation, "score_data"),
"module_output")$moduleStats[[i]][2],4));
i
})
colnames(out) <- c("epimod_name", "epimod_position_pval",
"term", "rank_of_term", "total_terms")
rownames(out) <- NULL
out
}
else {
message("Search term not found.")
}
}
)
setMethod(
f="extractGOseq",
signature="PvalueAnnotation",
definition=function(pvalue_annotation, which_network=NULL){
temp <- slot(slot(pvalue_annotation, "score_data"), "module_output")$goseqOut
if(!is.null(which_network)){temp <- temp[which_network]}
temp
}
)
setMethod(
f="extractModification",
signature="PvalueAnnotation",
definition=function(pvalue_annotation, mod_type=NULL){
if(!is.null(mod_type)){
if(!mod_type%in%names(slot(pvalue_annotation,
"modifications")@metadata$elements)){
stop("Provided mod_type is not in the PvalueAnnotation object.")
}
}
temp_meta <- slot(slot(pvalue_annotation,"modifications"),"metadata")
temp <- unlist(slot(pvalue_annotation,"modifications"))
names(temp) <- NULL
if(is.null(mod_type)){mod_type <- unique(temp$type)}
temp<-subset(temp, temp$type %in% mod_type)
slot(temp, "metadata") <- temp_meta
temp
}
)
setMethod(
f="extractExpression",
signature="PvalueAnnotation",
definition=function(pvalue_annotation){
if(is.null(expression)){
stop("No expression data loaded.")
}
temp_exp <- slot(pvalue_annotation,"expression")
Biobase::pData(temp_exp)
}
)
setMethod(
f="extractModSummary",
signature="PvalueAnnotation",
definition=function(pvalue_annotation){
temp_meta<-slot(slot(pvalue_annotation,"modifications"),"metadata")
temp_meta$m_summary
}
)
setMethod(
f="extractScores",
signature="PvalueAnnotation",
definition=function(pvalue_annotation){
if(nrow(slot(slot(pvalue_annotation,"score_data"),"scores")) == 0){
stop("Run scorePval function first.")
}
temp <- slot(slot(pvalue_annotation,"score_data"),"scores")
names_temp <- rownames(temp)
temp <- as.vector(temp[,1])
names(temp) <- names_temp
temp
}
)
setMethod(
f="highScores",
signature="PvalueAnnotation",
definition=function(pvalue_annotation, alpha=0.05){
if(nrow(slot(slot(pvalue_annotation,"score_data"),"scores")) == 0){
stop("Run scorePval function first.")
}
if(any(alpha < 0, alpha > 1)){
stop("alpha must be between 0 and 1")
}
temp <- slot(slot(pvalue_annotation,"score_data"),"scores")
names_temp <- rownames(temp)
temp <- as.vector(temp[,1])
names(temp) <- names_temp
rand_mat <- as.matrix(sapply(1:100, function(j){
sample(temp, replace=TRUE)
}))
new_pval <- sapply(temp , function(i){
length(which(rand_mat > as.numeric(i)))/(100*length(temp))
})
new_pval <- replace(new_pval, new_pval == 0,
min(subset(new_pval, new_pval != 0), na.rm=TRUE))
temp[which(new_pval < alpha)]
}
)
setMethod(
f="addShadowText",
signature="ANY",
definition=function(x, y=NULL, labels, col='white', bg='black',
theta=seq(pi/4, 2*pi, length.out=8), r=0.1, ...) {
xy <- xy.coords(x,y)
xo <- r*strwidth('A')
yo <- r*strheight('A')
for (i in theta) {
text( xy$x + cos(i)*xo, xy$y + sin(i)*yo,
labels, col=bg, ... )
}
text(xy$x, xy$y, labels, col=col, ... )
}
)
setMethod(
f="extractModules",
signature="PvalueAnnotation",
definition=function(pvalue_annotation, which_module=NULL){
if(length(slot(slot(pvalue_annotation, "score_data"),
"module_output")$modules) == 0) {
stop("Spinglass or Bionet analysis has not been performed.")
}
temp <- slot(slot(pvalue_annotation,"score_data"),"module_output")$modules
if(!is.null(which_module)){
temp <- temp[which_module]
}
temp
}
)
|
62fc3714d5dde6fc9a08fe0acdb11690256e1391
|
d89cb07a30dac2581efc14fa2b53841f476a4e5a
|
/DSM_book/rcode/intro2R/P3_Intro_R_2017_Part3.R
|
de712c644fccbcd7f1908e008c3e5722eb2926f8
|
[] |
no_license
|
brendo1001/brendo1001.github.io
|
27da7ffdc543edc3512710fa9c9e00f1c2452f0a
|
ac78ed31430f838f51954351e7808b7c69d84476
|
refs/heads/master
| 2023-08-08T00:25:13.576182
| 2023-07-31T15:37:28
| 2023-07-31T15:37:28
| 72,090,350
| 3
| 0
| null | 2020-12-02T11:15:22
| 2016-10-27T09:01:07
|
HTML
|
UTF-8
|
R
| false
| false
| 4,050
|
r
|
P3_Intro_R_2017_Part3.R
|
## R literacy: Part 3
##########################################################################################################################
## Data frames, data import, and data export
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
dat<- data.frame(profile_id= c("Chromosol","Vertosol","Sodosol"),
FID=c("a1","a10","a11"), easting=c(337859, 344059,347034),
northing=c(6372415,6376715,6372740), visted=c(TRUE, FALSE, TRUE))
dat
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
str(dat)
##########################################################################################################################
##########################################################################################################################
## Reading data from files
## ----ERROR!-----
soil.data<- read.table("USYD_soil1.txt", header=TRUE, sep=",")
str(soil.data)
head(soil.data)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
library(ithir)
data(USYD_soil1)
soil.data<- USYD_soil1
str(soil.data)
head(soil.data)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
which(is.na(soil.data$CEC))
soil.data[8:11,]
## ----May not work-----
soil.data<- edit(soil.data)
##########################################################################################################################
##########################################################################################################################
## Creating data frames manually
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil<- c("Chromosol", "Vertosol", "Organosol", "Anthroposol")
carbon<- c(2.1, 2.9, 5.5, 0.2)
dat<- data.frame(soil.type=soil, soil.OC=carbon)
dat
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
names(dat)<- c("soil","SOC")
dat
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
dat<- data.frame(soil.type=soil, soil.OC=carbon,
row.names=c("Ch","Ve","Or","An"))
dat
##########################################################################################################################
##
##########################################################################################################################
## Working with data frames
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
names(soil.data)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.data$ESP
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
mean(soil.data$ESP)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
mean(na.omit(soil.data$ESP))
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
attach(soil.data)
ESP
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=FALSE, background='white', error=TRUE----
## ## This will throw an error
detach(soil.data)
ESP
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=FALSE, background='white'-----
## soil.data[,10]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.data$Upper<- soil.data$Upper.Depth*100
soil.data$Lower<- soil.data$Lower.Depth*100
head(soil.data)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.data$ESP
na.omit(soil.data$ESP)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=FALSE, background='white'-----
## soil.data.cleaned<- na.omit(soil.data)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
is.na(soil.data$ESP)
##########################################################################################################################
##
##########################################################################################################################
## Writing data to file
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=FALSE, background='white'-----
write.table(soil.data, file= "file name.txt",
col.names=TRUE, row.names=FALSE, sep="\t")
|
ba9a5dd65cd7ccf4ead65edac7810bc3b3d943d4
|
010cdc330fbbd95423de66a264d13853606477a5
|
/man/sdists.center.align.Rd
|
7f842cc206d02651c72ad6241b5b57e7a9229d86
|
[] |
no_license
|
cran/cba
|
1cd3ed427fded73783bf98ca346dddcd62baaf2a
|
d16e229b75fa69ff5b5484176bf5f6428073837c
|
refs/heads/master
| 2022-12-23T00:51:52.160556
| 2022-12-07T08:48:43
| 2022-12-07T08:48:43
| 17,694,994
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,971
|
rd
|
sdists.center.align.Rd
|
\name{sdists.center.align}
\alias{sdists.center.align}
\title{Align Sequences to a Center}
\description{
Find a global alignment of a collection of sequences using the
center-star-tree heuristic.
}
\usage{
sdists.center.align(x, center, method = "ow", weight = c(1, 1, 0, 2),
exclude = c(NA, NaN, Inf, -Inf),
break.ties = TRUE, transitive = FALSE,
to.data.frame = FALSE)
}
\arguments{
\item{x}{a list (of vectors) or a vector of character.}
\item{center}{a vector}
\item{method}{argument to \code{sdists}.}
\item{weight}{argument to \code{sdists}.}
\item{exclude}{arguments to \code{sdists}.}
\item{break.ties}{a logical specifying whether random tie-breaking
should be performed. Otherwise the first alignment is used.}
\item{transitive}{a logical specifying whether the sequences in
\code{x} should be aligned with each other, too.}
\item{to.data.frame}{a logical specifying whether the result should
be converted to \code{data.frame}.}
}
\details{
Each component of \code{x} is aligned with \code{center} in turn
such that the latter is aligned with all sequences processed so
far.
If \code{center} is missing \code{i\link{sdists.center}} is used
to compute an initial center.
}
\value{
Either a list of sequences with attributes \code{center} and
\code{ties}, or a \code{data.frame} with the sequences in
the columns.
}
\references{
D. Gusfield (1997). \emph{Algorithms on Strings, Trees, and
Sequences}. Cambridge University Press, Chapter XX.
}
\author{Christian Buchta}
\note{
The global alignment may depend on the order of \code{x}.
}
\seealso{
\code{\link{sdists}} for computation of distances,
\code{\link{sdists.center}} for computation of centroids.
}
\examples{
## continue example
x <- c("ABCD", "AD", "BCD", "ACF", "CDF", "BC")
sdists.center.align(x)
sdists.center.align(x, transitive = TRUE, to.data.frame = TRUE)
}
\keyword{cluster}
|
1a9fad2918121bce9833def1b0f8514ee1a13675
|
888bc7297bce0524768099a664dbb8cdb9ce6795
|
/R/class.R
|
4ed404951cbc6aff678615e5719efc75d05e5a6d
|
[
"MIT"
] |
permissive
|
chinhungtseng/wtotariffcrawler
|
b9da6814b5c1f47ff9ed5d5d7b5d06bb41935b86
|
db8f4f8a85c897ea4fd69e94f12b7a5eb398ebd3
|
refs/heads/master
| 2020-12-08T14:28:51.613865
| 2020-01-14T02:50:01
| 2020-01-14T02:50:01
| 233,005,191
| 0
| 0
|
NOASSERTION
| 2020-01-13T05:16:56
| 2020-01-10T08:49:54
|
R
|
UTF-8
|
R
| false
| false
| 2,970
|
r
|
class.R
|
#' new_wto_crawler
#'
#' @param .verbose TRUE or FALSE
#'
#' @return list
#' @export
new_wto_crawler <- function(.verbose = FALSE, .proxy = FALSE) {
ATTEMPTS <- 0
MAXTRY <- 10
while (ATTEMPTS < MAXTRY) {
ATTEMPTS <- ATTEMPTS + 1
tryCatch({
url <- "http://db2.wtocenter.org.tw/tariff/Search_byHSCode.aspx"
# userAgent <- "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36"
userAgent <- get_useragent()
if (.proxy) {
proxies <- get_proxies()
proxy <- proxy_pool(proxies)
}
t0 <- Sys.time()
session <- rvest::html_session(
url = url,
httr::add_headers(c(
"Accept" = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding" = "gzip, deflate",
"Accept-Language" = "en-US,en;q=0.9,zh-TW;q=0.8,zh;q=0.7",
"Cache-Control" = "max-age=0",
"Connection" = "keep-alive",
"Content-Type" = "application/x-www-form-urlencoded",
"Host" = "db2.wtocenter.org.tw",
"Origin" = "http://db2.wtocenter.org.tw",
"Referer" = "http://db2.wtocenter.org.tw/",
"Upgrade-Insecure-Requests" = "1"
)),
httr::user_agent(userAgent),
{if (.proxy) httr::use_proxy(url = proxy$ip, port = proxy$port)},
httr::timeout(10),
{if (.verbose) {httr::verbose()}}
)
t1 <- Sys.time()
session <- if (httr::status_code(session) != 200L) NULL else session
objs <- structure(list(
url = url,
config = session$config,
status_code = httr::status_code(session),
user_agent = session$response$request$options$useragent,
proxy = ifelse(!.proxy, list(proxy = NULL, proxyport = NULL), list(
proxy = session$response$request$options$proxy,
proxyport = session$response$request$options$proxyport
)),
session = session,
connet_times = ATTEMPTS,
response_delay = as.numeric(t1 - t0)
), class = c("wto", class(session)))
message(paste0("* request `", url, "` succeed."))
return(objs)
}, error = function(cond) {
message(paste0("* request `", url, "` failed, we will try again later."))
Sys.sleep.random()
})
}
stop(paste0("* request `", url, "` failed too many times, stop program."), call. = FALSE)
}
print.wto <- function(x, ...) {
cat("<wto crawler conifg>\n")
cat("* status code: ", x$status_code, "\n", sep = "")
cat("* url: ", x$url, "\n", sep = "")
cat("* proxy: ", ifelse(is.null(unlist(x$proxy)), "NULL", paste0(x$proxy$proxy, ":", x$proxy$proxyport)), "\n", sep = "")
cat("* user agent: ", x$user_agent, "\n", sep = "")
cat("* connet times: ", x$connet_times, "\n", sep = "")
cat("* response delay: ", x$response_delay, "\n", sep = "")
}
|
175491d553a7db064859657c3bf8617765ae807a
|
13bb1694b07014883228eaadfe29ca3774133006
|
/27-umap-tsne-plot-coloring.R
|
34649048fc9e00551488b9b253b8eff5b958b6f6
|
[] |
no_license
|
DevkotaLab/ha-et-al-2020-cell
|
0b8278ff040bf98ca8961ef73f5d9c752ba0013e
|
f5c5a724d5a89058b5d2b9cfac312222f15009c4
|
refs/heads/master
| 2022-12-08T15:52:27.632998
| 2020-09-14T19:35:08
| 2020-09-14T19:35:08
| 241,246,468
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,069
|
r
|
27-umap-tsne-plot-coloring.R
|
## Generate UMAP and tSNE plots.
## Updated 2020-06-26.
##
## Improve consistency of colors in figures, per Suzanne's request:
## - Control samples: purple
## - Crohn's samples: orange
##
## Need to set the factor levels in UMAP/tSNE plots for consistency.
source("_setup.R")
cellranger_all <- readRDS(file.path(
"rds",
"2020-05-18",
"cellranger_all_samples_seurat.rds"
))
## This dataset includes both Crohn's disease (CD) and ulcerative colitis (UC)
## samples. We're subsetting only the CD samples below.
surecell_all <- readRDS(file.path(
"rds",
"2020-06-26",
"surecell_all_samples_seurat.rds"
))
## This is a subset containing only the Crohn (and relevant control samples).
## The ulcerative colitis (UC) samples aren't included here.
surecell_cd <- readRDS(file.path(
"rds",
"2020-02-20",
"seurat-clustering",
"surecell_condition_cd_seurat.rds"
))
## Set the resolution.
resolution <- import("resolution.txt", format = "lines")
Idents(cellranger) <- resolution
Idents(surecell_cd) <- resolution
## Set the condition inside combined Cell Ranger dataset.
##
## - A04: Crohn's patient
## - A16: Crohn's patient
## - D1: Non-IBD control
## - H1: Non-IBD control
##
## Match the factor levels, so the plot colors are consistent with surecell_cd.
cellranger@meta.data$condition <-
ifelse(
test = cellranger@meta.data$sampleID %in% c("A04", "A16"),
yes = "Crohn's patient",
no = "Non-IBD control"
) %>%
as.factor() %>%
relevel(ref = "Non-IBD control")
colnames(cellranger@meta.data)
## [1] "orig.ident" "nCount_RNA" "nFeature_RNA"
## [4] "sampleID" "sampleName" "nCount"
## [7] "nFeature" "nCoding" "nMito"
## [10] "log10FeaturesPerCount" "mitoRatio" "S.Score"
## [13] "G2M.Score" "Phase" "CC.Difference"
## [16] "RNA_snn_res.0.2" "RNA_snn_res.0.4" "RNA_snn_res.0.6"
## [19] "RNA_snn_res.0.8" "RNA_snn_res.1" "RNA_snn_res.1.2"
## [22] "seurat_clusters" "condition"
colnames(surecell_cd@meta.data)
## [1] "orig.ident" "nCount_RNA" "nFeature_RNA"
## [4] "condition" "description" "log10FeaturesPerCount"
## [7] "mitoRatio" "nCoding" "nCount"
## [10] "nFeature" "nMito" "nRead"
## [13] "patientID" "sampleID" "sampleName"
## [16] "tissueStatus" "S.Score" "G2M.Score"
## [19] "Phase" "CC.Difference" "RNA_snn_res.0.2"
## [22] "RNA_snn_res.0.4" "RNA_snn_res.0.6" "RNA_snn_res.0.8"
## [25] "RNA_snn_res.1" "RNA_snn_res.1.2" "seurat_clusters"
## > saveData(cellranger, surecell_cd)
## Note that these plots are too large to save to disk.
## Just regenerate, if necessary.
width <- 8L
height <- 8L
point_size <- 0.5 # 0.75
label_size <- 5L # 6L
## Cell Ranger t-SNE.
ggsave(
filename = file.path(
results_dir,
"cellranger_tsne_ident.pdf"
),
width = width,
height = height,
plot = plotTSNE(
object = cellranger,
interestingGroups = NULL,
color = NULL,
pointSize = point_size,
labelSize = label_size
) + theme(aspect.ratio = 1L)
)
ggsave(
filename = file.path(
results_dir,
"cellranger_tsne_condition.pdf"
),
width = width,
height = height,
plot = plotTSNE(
object = cellranger,
interestingGroups = "condition",
pointSize = point_size,
labelSize = label_size
) + theme(aspect.ratio = 1L)
)
ggsave(
filename = file.path(
results_dir,
"cellranger_tsne_sample_name.pdf"
),
width = width,
height = height,
plot = plotTSNE(
object = cellranger,
interestingGroups = "sampleName",
pointSize = point_size,
labelSize = label_size
) + theme(aspect.ratio = 1L)
)
## Cell Ranger UMAP.
ggsave(
filename = file.path(
results_dir,
"cellranger_umap_ident.pdf"
),
width = width,
height = height,
plot = plotUMAP(
object = cellranger,
interestingGroups = NULL,
color = NULL,
pointSize = point_size,
labelSize = label_size
) + theme(aspect.ratio = 1L)
)
ggsave(
filename = file.path(
results_dir,
"cellranger_umap_condition.pdf"
),
width = width,
height = height,
plot = plotUMAP(
object = cellranger,
interestingGroups = "condition",
pointSize = point_size,
labelSize = label_size
) + theme(aspect.ratio = 1L)
)
ggsave(
filename = file.path(
results_dir,
"cellranger_umap_sample_name.pdf"
),
width = width,
height = height,
plot = plotUMAP(
object = cellranger,
interestingGroups = "sampleName",
pointSize = point_size,
labelSize = label_size
) + theme(aspect.ratio = 1L)
)
## SureCell t-SNE.
ggsave(
filename = file.path(
results_dir,
"surecell_cd_tsne_ident.pdf"
),
width = width,
height = height,
plot = plotTSNE(
object = surecell_cd,
interestingGroups = NULL,
pointSize = point_size,
labelSize = label_size
) + theme(aspect.ratio = 1L)
)
ggsave(
filename = file.path(
results_dir,
"surecell_cd_tsne_sample_name.pdf"
),
width = width,
height = height,
plot = plotTSNE(
object = surecell_cd,
interestingGroups = "sampleName",
pointSize = point_size,
labelSize = label_size
) + theme(aspect.ratio = 1L)
)
ggsave(
filename = file.path(
results_dir,
"surecell_cd_tsne_tissue_status.pdf"
),
width = width,
height = height,
plot = plotTSNE(
object = surecell_cd,
interestingGroups = "tissueStatus",
pointSize = point_size,
labelSize = label_size
) + theme(aspect.ratio = 1L)
)
## SureCell UMAP.
ggsave(
filename = file.path(
results_dir,
"surecell_cd_umap_ident.pdf"
),
width = width,
height = height,
plot = plotUMAP(
object = surecell_cd,
interestingGroups = NULL,
pointSize = point_size,
labelSize = label_size
) + theme(aspect.ratio = 1L)
)
ggsave(
filename = file.path(
results_dir,
"surecell_cd_umap_sample_name.pdf"
),
width = width,
height = height,
plot = plotUMAP(
object = surecell_cd,
interestingGroups = "sampleName",
pointSize = point_size,
labelSize = label_size
) + theme(aspect.ratio = 1L)
)
ggsave(
filename = file.path(
results_dir,
"surecell_cd_umap_tissue_status.pdf"
),
width = width,
height = height,
plot = plotUMAP(
object = surecell_cd,
interestingGroups = "tissueStatus",
pointSize = point_size,
labelSize = label_size
) + theme(aspect.ratio = 1L)
)
|
a2cbc364552c65c46f773202cf2a5a43f4d0638a
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615831070-test.R
|
51ec842b4accb2d579043b78ac7eb2a80382da75
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 575
|
r
|
1615831070-test.R
|
testlist <- list(doy = numeric(0), latitude = numeric(0), temp = c(1.67534972842843e-305, -8.94553002689695e-210, -1.56500839841835e-209, -1.5871349126006e-151, -5.81641510220318e-224, 7.60646408195443e-311, -1.07091123094137e+34, -1.82219451443966e+307, -7.76664403038719e-292, -1.49198822262059e-154, 1.25786901112938e+294, -2.80363318789445e-287, 5.56527053722931e-315, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
1314bceddf548bc7f52d19271dd33e7a1ad0ca8d
|
882a43935f3353a94c5d67e13f8308c04f9152f9
|
/0260_time.R
|
9bf07ea6ef972adb70d13cd307304e846e7f333c
|
[] |
no_license
|
akicho8/learn_R
|
a1711c6cd5f07b004b9dbccae6d681b8148c19ec
|
8f0977dfe8da05d179d265a5805304b4ecbebf08
|
refs/heads/master
| 2021-01-10T20:39:29.459947
| 2013-06-03T15:38:05
| 2013-06-03T15:39:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 119
|
r
|
0260_time.R
|
t <- proc.time() # 現在時間取得
Sys.sleep(0.000000001) # 指定秒待つ
proc.time() - t
|
53fb14b48ef44f43b53d1b30cac1d8a1545d9140
|
10fbd1788ed37fd0c61403f40e8233853bc00cfc
|
/R/1DGPfunctions.R
|
8d824c4e466441ac3ca0f07688df0214282ceb9b
|
[] |
no_license
|
lmmontoya/SL.ODTR
|
50dafaa45376dc1f7da74816585ce77047e03a02
|
9ffe0a3021f7c248f59f038a2f82fab135887da2
|
refs/heads/master
| 2023-03-06T12:48:47.929749
| 2023-02-20T22:14:59
| 2023-02-20T22:14:59
| 214,280,056
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,193
|
r
|
1DGPfunctions.R
|
#' @name QAW_null
#' @aliases QAW_null
#' @title Simulate with null
#' @description Generate QAW according to null
#'
#' @param W Data frame of observed baseline covariates
#' @param A Vector of treatment
#'
#' @return conditional mean of Y given A and W
#'
#' @export
#'
QAW_null = function(A, W) {
W1 = W$W1
W2 = W$W2
W3 = W$W3
W4 = W$W4
return(plogis(W1 + W4 + 0.01*A))
#return(plogis(W1 - W2 - W4 + 0.0001*A*(1 + W4 + W2*abs(W3) + W1^2)))
#return(plogis(W2 + W1))
}
#' @name DGP_null
#' @aliases DGP_null
#' @title Simulate with null
#' @description Generate data according to null
#'
#' @param n n
#' @param dA rule type
#' @param a static txt
#'
#' @return data for DGP_null
#'
#' @export
#'
DGP_null = function(n, dA = NULL, a = NULL){
# Covariates
W1 = rnorm(n)
W2 = rnorm(n)
W3 = rnorm(n)
W4 = rnorm(n)
W5 = rbinom(n, 1, .5)
W6 = rbinom(n, 1, .5)
W7 = rnorm(n, sd =20)
W8 = rnorm(n, sd =20)
W9 = rnorm(n, sd =20)
W10 = rnorm(n, sd =20)
W = data.frame(W1, W2, W3, W4, W5, W6, W7, W8, W9, W10)
A = rbinom(n, size = 1, prob = 0.5)
#A = rbinom(n, size = 1, prob = plogis(W1 + W2))
u = runif(n)
Y = as.numeric(u<QAW_null(A,W))
# Blip function
QAW1 = QAW_null(A = 1, W)
QAW0 = QAW_null(A = 0, W)
blip = QAW1 - QAW0
# Treatment under rule
if (!is.null(dA) & !is.null(a)){
stop("Can only have dA or a")
} else if (is.null(a) & is.null(dA)) {
A_star = A
} else if (!is.null(a)){
A_star = a
} else if (dA == "simple dynamic") {
A_star = ifelse(W2 > 0, 1, 0)
} else if (dA == "ODTR"){
A_star = as.numeric(blip <= 0)
}
# Outcome
Y_star = as.numeric(u<QAW_null(A_star,W))
# Data and target parameter
O = data.frame(W, A, A_star, Y, Y_star)
return(O)
}
#' @name DGP_null_obs
#' @aliases DGP_null_obs
#' @title Simulate with null
#' @description Generate data according to null with obs g
#'
#' @param n n
#' @param dA rule type
#' @param a static txt
#'
#' @return data for DGP_null_obs
#'
#' @export
#'
DGP_null_obs = function(n, dA = NULL, a = NULL){
# Covariates
W1 = rnorm(n)
W2 = rnorm(n)
W3 = rnorm(n)
W4 = rnorm(n)
W5 = rbinom(n, 1, .5)
W6 = rbinom(n, 1, .5)
W7 = rnorm(n, sd =20)
W8 = rnorm(n, sd =20)
W9 = rnorm(n, sd =20)
W10 = rnorm(n, sd =20)
W = data.frame(W1, W2, W3, W4, W5, W6, W7, W8, W9, W10)
#A = rbinom(n, size = 1, prob = 0.5)
A = rbinom(n, size = 1, prob = plogis(W1 + W2))
u = runif(n)
Y = as.numeric(u<QAW_null(A,W))
# Blip function
QAW1 = QAW_null(A = 1, W)
QAW0 = QAW_null(A = 0, W)
blip = QAW1 - QAW0
# Treatment under rule
if (!is.null(dA) & !is.null(a)){
stop("Can only have dA or a")
} else if (is.null(a) & is.null(dA)) {
A_star = A
} else if (!is.null(a)){
A_star = a
} else if (dA == "simple dynamic") {
A_star = ifelse(W2 > 0, 1, 0)
} else if (dA == "ODTR"){
A_star = as.numeric(blip <= 0)
}
# Outcome
Y_star = as.numeric(u<QAW_null(A_star,W))
# Data and target parameter
O = data.frame(W, A, A_star, Y, Y_star)
return(O)
}
#' @name QAW_eff
#' @aliases QAW_eff
#' @title Simulate with eff
#' @description Generate QAW according to eff
#'
#' @param W Data frame of observed baseline covariates
#' @param A Vector of treatment
#'
#' @return conditional mean of Y given A and W
#'
#' @export
#'
QAW_eff = function(A, W) {
W1 = W$W1
W2 = W$W2
W3 = W$W3
W4 = W$W4
return(plogis(W1 + 0.01*A + 5*W1*A))
#return(plogis(W1 + 0.1*A + W1*A))
}
#' @name DGP_eff
#' @aliases DGP_eff
#' @title Simulate with eff
#' @description Generate data according to eff
#'
#' @param n n
#' @param dA rule type
#' @param a static txt
#'
#' @return data for DGP_eff
#'
#' @export
#'
DGP_eff = function(n, dA = NULL, a = NULL){
# Covariates
W1 = rnorm(n)
W2 = rnorm(n)
W3 = rnorm(n)
W4 = rnorm(n)
W5 = rbinom(n, 1, .5)
W6 = rbinom(n, 1, .5)
W7 = rnorm(n, sd =20)
W8 = rnorm(n, sd =20)
W9 = rnorm(n, sd =20)
W10 = rnorm(n, sd =20)
W = data.frame(W1, W2, W3, W4, W5, W6, W7, W8, W9, W10)
A = rbinom(n, size = 1, prob = 0.5)
#A = rbinom(n, size = 1, prob = plogis(W1 + W2))
u = runif(n)
Y = as.numeric(u<QAW_eff(A,W))
# Blip function
QAW1 = QAW_eff(A = 1, W)
QAW0 = QAW_eff(A = 0, W)
blip = QAW1 - QAW0
# Treatment under rule
if (!is.null(dA) & !is.null(a)){
stop("Can only have dA or a")
} else if (is.null(a) & is.null(dA)) {
A_star = A
} else if (!is.null(a)){
A_star = a
} else if (dA == "simple dynamic") {
A_star = ifelse(W2 > 0, 1, 0)
} else if (dA == "ODTR"){
A_star = as.numeric(blip <= 0)
}
# Outcome
Y_star = as.numeric(u<QAW_eff(A_star,W))
# Data and target parameter
O = data.frame(W, A, A_star, Y, Y_star)
return(O)
}
#' @name DGP_eff_obs
#' @aliases DGP_eff_obs
#' @title Simulate with eff
#' @description Generate data according to eff with obs g
#'
#' @param n n
#' @param dA rule type
#' @param a static txt
#'
#' @return data for DGP_eff_obs
#'
#' @export
#'
DGP_eff_obs = function(n, dA = NULL, a = NULL){
# Covariates
W1 = rnorm(n)
W2 = rnorm(n)
W3 = rnorm(n)
W4 = rnorm(n)
W5 = rbinom(n, 1, .5)
W6 = rbinom(n, 1, .5)
W7 = rnorm(n, sd =20)
W8 = rnorm(n, sd =20)
W9 = rnorm(n, sd =20)
W10 = rnorm(n, sd =20)
W = data.frame(W1, W2, W3, W4, W5, W6, W7, W8, W9, W10)
#A = rbinom(n, size = 1, prob = 0.5)
A = rbinom(n, size = 1, prob = plogis(W1 + W2))
u = runif(n)
Y = as.numeric(u<QAW_eff(A,W))
# Blip function
QAW1 = QAW_eff(A = 1, W)
QAW0 = QAW_eff(A = 0, W)
blip = QAW1 - QAW0
# Treatment under rule
if (!is.null(dA) & !is.null(a)){
stop("Can only have dA or a")
} else if (is.null(a) & is.null(dA)) {
A_star = A
} else if (!is.null(a)){
A_star = a
} else if (dA == "simple dynamic") {
A_star = ifelse(W2 > 0, 1, 0)
} else if (dA == "ODTR"){
A_star = as.numeric(blip <= 0)
}
# Outcome
Y_star = as.numeric(u<QAW_eff(A_star,W))
# Data and target parameter
O = data.frame(W, A, A_star, Y, Y_star)
return(O)
}
#' @name QAW_bin_complex
#' @aliases QAW_bin_complex
#' @title Simulate with AL bin DGP
#' @description Generate QAW according to AL bin DGP
#'
#' @param W Data frame of observed baseline covariates
#' @param A Vector of treatment
#'
#' @return conditional mean of Y given A and W
#'
#' @export
#'
###############################################
### AL DGP binary outcome #####################
###############################################
# QAW
QAW_bin_complex = function(A, W) {
W1 = W$W1
W2 = W$W2
W3 = W$W3
W4 = W$W4
return(0.5*plogis(1-W1^2 + 3*W2 + 5*W3^2*A - 4.45*A)+0.5*plogis(-0.5- W3 + 2*W1*W2 + 3*abs(W2)*A - 1.5*A))
}
#' @name DGP_bin_complex
#' @aliases DGP_bin_complex
#' @title Simulate with AL bin DGP
#' @description Generate data according to AL bin DGP
#'
#' @param n n
#' @param dA rule type
#' @param a static txt
#' @param kappa kappa
#' @param QAW.fun QAW.fun
#'
#' @return data for DGP_bin_complex
#'
#' @export
#'
DGP_bin_complex = function(n, dA = NULL, a = NULL, kappa = NULL, QAW.fun = QAW_bin_complex){
# Covariates
W1 = rnorm(n)
W2 = rnorm(n)
W3 = rnorm(n)
W4 = rnorm(n)
A = rbinom(n, size = 1, prob = 0.5)
W = data.frame(W1, W2, W3, W4)
u = runif(n)
Y = as.numeric(u<QAW_bin_complex(A,W))
# Blip function
QAW1 = QAW_bin_complex(A = 1, W)
QAW0 = QAW_bin_complex(A = 0, W)
blip = QAW1 - QAW0
# Treatment under rule
if (!is.null(dA) & !is.null(a)){
stop("Can only have dA or a")
} else if (is.null(a) & is.null(dA) & is.null(kappa)) {
A_star = A
} else if (!is.null(a) & is.null(dA) & is.null(kappa)){
A_star = a
} else if (!is.null(kappa) & is.null(dA) & is.null(a)) {
rc.out = dopt.fun(blip = blip, kappa = kappa)
A_star = rbinom(n, 1, prob = rc.out$Prd.is.1)
} else if (!is.null(dA) & is.null(a) & is.null(kappa) & dA == "simple dynamic") {
A_star = ifelse(W2 > 0, 1, 0)
} else if (!is.null(dA) & is.null(a) & is.null(kappa) & dA == "ODTR"){
A_star = as.numeric(blip > 0)
}
# Outcome
Y_star = as.numeric(u<QAW_bin_complex(A_star,W))
# Data and target parameter
O = data.frame(W, A, A_star, Y, Y_star)
return(O)
}
#' @name QAW_AL_RC
#' @aliases QAW_AL_RC
#' @title Simulate with AL bin DGP RC
#' @description Generate QAW according to AL bin DGP RC
#'
#' @param W Data frame of observed baseline covariates
#' @param A Vector of treatment
#'
#' @return conditional mean of Y given A and W
#'
#' @export
#'
###############################################
### AL DGP binary outcome2 (RC) #####################
###############################################
# QAW
QAW_AL_RC = function(A, W) {
Wtilde = W+5/6
QAW = rep(NA, times = length(A))
QAW[A == 1 & W <= 1/3 & W >= -1/2] = 0
QAW[A == 1 & W < -1/2] = (-Wtilde^3 + Wtilde^2 - (1/3)*Wtilde + 1/27)[A == 1 & W < -1/2]
QAW[A == 1 & W > 1/3] = (-W^3 + W^2 - (1/3)*W + 1/27)[A == 1 & W > 1/3]
QAW[is.na(QAW)] = -(3/10)
QAW = QAW + (6/10)
return(QAW)
}
#' @name DGP_AL_RC
#' @aliases DGP_AL_RC
#' @title Simulate with AL bin DGP RC
#' @description Generate data according to AL bin DGP RC
#'
#' @param n n
#' @param dA rule type
#' @param a static txt
#' @param kappa kappa
#' @param QAW.fun QAW.fun
#'
#' @return data for DGP_AL_RC
#'
#' @export
#'
DGP_AL_RC = function(n, dA = NULL, a = NULL, kappa = NULL, QAW.fun = QAW_AL_RC){
# Covariates
W = runif(n)
A = rbinom(n, size = 1, prob = 0.5)
u = runif(n)
Y = rbinom(n, 1, p = QAW_AL_RC(A,W))
# Blip function
QAW1 = QAW_AL_RC(A = 1, W = W)
QAW0 = QAW_AL_RC(A = 0, W = W)
blip = QAW1 - QAW0
# Treatment under rule
if (!is.null(dA) & !is.null(a)){
stop("Can only have dA or a")
} else if (is.null(a) & is.null(dA) & is.null(kappa)) {
A_star = A
} else if (!is.null(a) & is.null(dA) & is.null(kappa)){
A_star = a
} else if (!is.null(kappa) & is.null(dA) & is.null(a)) {
rc.out = dopt.fun(blip = blip, kappa = kappa)
A_star = rbinom(n, 1, prob = rc.out$Prd.is.1)
} else if (!is.null(dA) & is.null(a) & is.null(kappa) & dA == "simple dynamic") {
A_star = ifelse(W2 > 0, 1, 0)
} else if (!is.null(dA) & is.null(a) & is.null(kappa) & dA == "ODTR"){
A_star = as.numeric(blip > 0)
}
# Outcome
Y_star = rbinom(n, 1, p = QAW_AL_RC(A_star,W))
# Data and target parameter
O = data.frame(W, A, A_star, Y, Y_star)
return(O)
}
#' @name DGP_bin_complex_obs
#' @aliases DGP_bin_complex_obs
#' @title Simulate with AL bin DGP
#' @description Generate data according to AL bin DGP obs g
#'
#' @param n n
#' @param dA rule type
#' @param a static txt
#'
#' @return data for DGP_bin_complex_obs
#'
#' @export
#'
DGP_bin_complex_obs = function(n, dA = NULL, a = NULL){
# Covariates
W1 = rnorm(n)
W2 = rnorm(n)
W3 = rnorm(n)
W4 = rnorm(n)
#A = rbinom(n, size = 1, prob = 0.5)
A = rbinom(n, size = 1, prob = plogis(W1 + W2))
W = data.frame(W1, W2, W3, W4)
u = runif(n)
Y = as.numeric(u<QAW_bin_complex(A,W))
# Blip function
QAW1 = QAW_bin_complex(A = 1, W)
QAW0 = QAW_bin_complex(A = 0, W)
blip = QAW1 - QAW0
# Treatment under rule
if (!is.null(dA) & !is.null(a)){
stop("Can only have dA or a")
} else if (is.null(a) & is.null(dA)) {
A_star = A
} else if (!is.null(a)){
A_star = a
} else if (dA == "simple dynamic") {
A_star = ifelse(W2 > 0, 1, 0)
} else if (dA == "ODTR"){
A_star = as.numeric(blip > 0)
}
# Outcome
Y_star = as.numeric(u<QAW_bin_complex(A_star,W))
# Data and target parameter
O = data.frame(W, A, A_star, Y, Y_star)
return(O)
}
#' @name DGP_bin_complex_min
#' @aliases DGP_bin_complex_min
#' @title Simulate with AL bin DGP
#' @description Generate data according to AL bin DGP - want lower outcomes
#'
#' @param n n
#' @param dA rule type
#' @param a static txt
#'
#' @return data for DGP_bin_complex_min
#'
#' @export
#'
DGP_bin_complex_min = function(n, dA = NULL, a = NULL){
# Covariates
W1 = rnorm(n)
W2 = rnorm(n)
W3 = rnorm(n)
W4 = rnorm(n)
A = rbinom(n, size = 1, prob = 0.5)
W = data.frame(W1, W2, W3, W4)
u = runif(n)
Y = as.numeric(u<QAW_bin_complex(A,W))
# Blip function
QAW1 = QAW_bin_complex(A = 1, W)
QAW0 = QAW_bin_complex(A = 0, W)
blip = QAW1 - QAW0
# Treatment under rule
if (!is.null(dA) & !is.null(a)){
stop("Can only have dA or a")
} else if (is.null(a) & is.null(dA)) {
A_star = A
} else if (!is.null(a)){
A_star = a
} else if (dA == "simple dynamic") {
A_star = ifelse(W2 > 0, 1, 0)
} else if (dA == "ODTR"){
A_star = as.numeric(blip < 0)
}
# Outcome
Y_star = as.numeric(u<QAW_bin_complex(A_star,W))
# Data and target parameter
O = data.frame(W, A, A_star, Y, Y_star)
return(O)
}
#' @name QAW_bin_dep
#' @aliases QAW_bin_dep
#' @title Simulate with AL bin DGP dep W
#' @description Generate QAW according to AL bin dep W
#'
#' @param W Data frame of observed baseline covariates
#' @param A Vector of treatment
#'
#' @return conditional mean of Y given A and W
#'
#' @export
#'
# QAW_bin_dep
QAW_bin_dep = function(A, W) {
W1 = W$W1
W2 = W$W2
W3 = W$W3
W4 = W$W4
return(0.5*plogis(1-W1^2 + 3*W2 + 5*W3^2*A - 4.45*A)+0.5*plogis(-0.5- W3 + 2*W1*W2 + 3*abs(W2)*A - 1.5*A))
}
#' @name DGP_bin_dep
#' @aliases DGP_bin_dep
#' @title Simulate with AL bin DGP with influential variable
#' @description Generate data according to AL bin DGP with influential variable
#'
#' @param n n
#' @param dA rule type
#' @param a static txt
#'
#' @return data for DGP_bin_dep
#'
#' @export
#'
DGP_bin_dep = function(n, dA = NULL, a = NULL){
# Covariates
W1 = rnorm(n)
Sigma = matrix(c(1,.3,.7,.3,1,.8,.7,.8,1), ncol=3)
W234 = mvrnorm(n = n, mu = c(0,0,0), Sigma, tol = 1e-06, empirical = FALSE)
A = rbinom(n, size = 1, prob = 0.5)
W = data.frame(W1, W234)
colnames(W) = c("W1", "W2", "W3", "W4")
u = runif(n)
Y = as.numeric(u<QAW_bin_dep(A,W))
# Blip function
QAW1 = QAW_bin_dep(A = 1, W)
QAW0 = QAW_bin_dep(A = 0, W)
blip = QAW1 - QAW0
# Treatment under rule
if (!is.null(dA) & !is.null(a)){
stop("Can only have dA or a")
} else if (is.null(a) & is.null(dA)) {
A_star = A
} else if (!is.null(a)){
A_star = a
} else if (dA == "simple dynamic") {
A_star = ifelse(W2 > 0, 1, 0)
} else if (dA == "ODTR"){
A_star = as.numeric(blip > 0)
}
# Outcome
Y_star = as.numeric(u<QAW_bin_dep(A_star,W))
# Data and target parameter
O = data.frame(W, A, A_star, Y, Y_star)
return(O)
}
#' @name QAW_bin_simple
#' @aliases QAW_bin_simple
#' @title Simulate with AL bin DGP simple
#' @description Generate QAW according to AL bin simple
#'
#' @param W Data frame of observed baseline covariates
#' @param A Vector of treatment
#'
#' @return conditional mean of Y given A and W
#'
#' @export
#'
###############################################
### AL DGP binary outcome #####################
###############################################
# QAW_bin_simple
QAW_bin_simple = function(A, W) {
W1 = W$W1
W2 = W$W2
W3 = W$W3
W4 = W$W4
return(plogis(W1 + 0.1*A + W1*A))
}
#' @name DGP_bin_simple
#' @aliases DGP_bin_simple
#' @title Simulate with AL bin DGP with influential variable
#' @description Generate data according to AL bin DGP with influential variable
#'
#' @param n n
#' @param dA rule type
#' @param a static txt
#'
#' @return data for DGP_bin_simple
#'
#' @export
#'
DGP_bin_simple = function(n, dA = NULL, a = NULL){
# Covariates
W1 = rnorm(n)
W2 = rnorm(n)
W3 = rnorm(n)
W4 = rnorm(n)
A = rbinom(n, size = 1, prob = 0.5)
W = data.frame(W1, W2, W3, W4)
u = runif(n)
Y = as.numeric(u<QAW_bin_simple(A,W))
# Blip function
QAW1 = QAW_bin_simple(A = 1, W)
QAW0 = QAW_bin_simple(A = 0, W)
blip = QAW1 - QAW0
# Treatment under rule
if (!is.null(dA) & !is.null(a)){
stop("Can only have dA or a")
} else if (is.null(a) & is.null(dA)) {
A_star = A
} else if (!is.null(a)){
A_star = a
} else if (dA == "simple dynamic") {
A_star = ifelse(W2 > 0, 1, 0)
} else if (dA == "ODTR"){
A_star = as.numeric(blip > 0)
}
# Outcome
Y_star = as.numeric(u<QAW_bin_simple(A_star,W))
# Data and target parameter
O = data.frame(W, A, A_star, Y, Y_star)
return(O)
}
#' @name DGP_bin_simple_obs
#' @aliases DGP_bin_simple_obs
#' @title Simulate with AL bin DGP with influential variable
#' @description Generate data according to AL bin DGP with influential variable obs g
#'
#' @param n n
#' @param dA rule type
#' @param a static txt
#'
#' @return data for DGP_bin_simple_obs
#'
#' @export
#'
DGP_bin_simple_obs = function(n, dA = NULL, a = NULL){
# Covariates
W1 = rnorm(n)
W2 = rnorm(n)
W3 = rnorm(n)
W4 = rnorm(n)
#A = rbinom(n, size = 1, prob = 0.5)
A = rbinom(n, size = 1, prob = plogis(W1 + W2))
W = data.frame(W1, W2, W3, W4)
u = runif(n)
Y = as.numeric(u<QAW_bin_simple(A,W))
# Blip function
QAW1 = QAW_bin_simple(A = 1, W)
QAW0 = QAW_bin_simple(A = 0, W)
blip = QAW1 - QAW0
# Treatment under rule
if (!is.null(dA) & !is.null(a)){
stop("Can only have dA or a")
} else if (is.null(a) & is.null(dA)) {
A_star = A
} else if (!is.null(a)){
A_star = a
} else if (dA == "simple dynamic") {
A_star = ifelse(W2 > 0, 1, 0)
} else if (dA == "ODTR"){
A_star = as.numeric(blip > 0)
}
# Outcome
Y_star = as.numeric(u<QAW_bin_simple(A_star,W))
# Data and target parameter
O = data.frame(W, A, A_star, Y, Y_star)
return(O)
}
#' @name QAW_bin6
#' @aliases QAW_bin6
#' @title Simulate with AL bin DGP6 treat all optimal
#' @description Generate QAW according to AL bin DGP6 treat all optimal
#'
#' @param W Data frame of observed baseline covariates
#' @param A Vector of treatment
#'
#' @return conditional mean of Y given A and W
#'
#' @export
#'
# QAW6
QAW_bin6 = function(A, W) {
W1 = W$W1
W2 = W$W2
W3 = W$W3
W4 = W$W4
W5 = W$W5
return(plogis(W1 + W2*W3 - 5*A + 0.5*A*W1 + A*W2*W5))
}
#' @name DGP_bin6
#' @aliases DGP_bin6
#' @title Simulate with AL bin DGP6 with treat all optimal
#' @description Generate data according to AL bin DGP6 with treat all optimal
#'
#' @param n n
#' @param dA rule type
#' @param a static txt
#'
#' @return data for DGP_bin6
#'
#' @export
#'
DGP_bin6 = function(n, dA = NULL, a = NULL){
# Covariates
W1 = rnorm(n)
W2 = rnorm(n)
W3 = rnorm(n)
W4 = rnorm(n)
W5 = rbinom(n, 1, .5)
w = (W5 - .5)*4 + 0.5*(W1 + W2 + W3 + W4) # make this bimodal and symmetric about 0, with extremes around -6 and 6
g1 = plogis(w) # make this symmetrical, and most density at extremes (positivity is here)
A = rbinom(n, size = 1, prob = g1) # make sure marginal prob is .5
W = data.frame(W1, W2, W3, W4, W5)
u = runif(n)
Y = as.numeric(u<QAW_bin6(A,W))
# Blip function
QAW1 = QAW_bin6(A = 1, W)
QAW0 = QAW_bin6(A = 0, W)
blip = QAW1 - QAW0
mean(blip>0) # make this 1
d0 = as.numeric(blip <0)
A == d0
# Treatment under rule
if (!is.null(dA) & !is.null(a)){
stop("Can only have dA or a")
} else if (is.null(a) & is.null(dA)) {
A_star = A
} else if (!is.null(a)){
A_star = a
} else if (dA == "simple dynamic") {
A_star = ifelse(W2 > 0, 1, 0)
} else if (dA == "ODTR"){
A_star = as.numeric(blip > 0)
}
# Outcome
Y_star = as.numeric(u<QAW_bin6(A_star,W))
# Data and target parameter
O = data.frame(W, A, A_star, Y, Y_star)
return(O)
}
#' @name QAW_bin_complex_3tx
#' @aliases QAW_bin_complex_3tx
#' @title Simulate with AL bin DGP
#' @description Generate QAW according to AL bin DGP
#'
#' @param W Data frame of observed baseline covariates
#' @param A Vector of treatment
#'
#' @return conditional mean of Y given A and W
#'
#' @export
#'
###############################################
### AL DGP binary outcome #####################
###############################################
# QAW
QAW_bin_complex_3tx = function(A, W) {
W1 = W$W1
W2 = W$W2
W3 = W$W3
W4 = W$W4
return(0.5*plogis(1-W1^2 + 3*W2 + 5*W3^2*A - 4.45*A)+0.5*plogis(-0.5- W3 + 2*W1*W2 + 3*abs(W2)*A - 1.5*A))
}
#' @name DGP_bin_complex_3tx
#' @aliases DGP_bin_complex_3tx
#' @title Simulate with AL bin DGP
#' @description Generate data according to AL bin DGP
#'
#' @param n n
#' @param dA rule type
#' @param a static txt
#'
#' @return data for DGP_bin_complex_3tx
#'
#' @export
#'
DGP_bin_complex_3tx = function(n, dA = NULL, a = NULL){
# Covariates
W1 = rnorm(n)
W2 = rnorm(n)
W3 = rnorm(n)
W4 = rnorm(n)
A = replicate(n, sample(0:2,1))
W = data.frame(W1, W2, W3, W4)
u = runif(n)
Y = as.numeric(u<QAW_bin_complex_3tx(A,W))
QAW = data.frame(QAW1 = QAW_bin_complex_3tx(A = 1, W),
QAW2 = QAW_bin_complex_3tx(A = 2, W),
QAW3 = QAW_bin_complex_3tx(A = 3, W))
# Treatment under rule
if (!is.null(dA) & !is.null(a)){
stop("Can only have dA or a")
} else if (is.null(a) & is.null(dA)) {
A_star = A
} else if (!is.null(a)){
A_star = a
} else if (dA == "simple dynamic") {
A_star = ifelse(W2 > 0, 1, 0)
} else if (dA == "ODTR"){
A_star = apply(QAW, 1, which.max)
}
# Outcome
Y_star = as.numeric(u<QAW_bin_complex_3tx(A_star,W))
# Data and target parameter
O = data.frame(W, A, A_star, Y, Y_star)
return(O)
}
#' @name QAW_cont
#' @aliases QAW_cont
#' @title Simulate with DB DGP
#' @description Generate QAW according to DB DGP
#'
#' @param W Data frame of observed baseline covariates
#' @param A Vector of treatment
#'
#' @return conditional mean of Y given A and W
#'
#' @export
#'
QAW_cont = function(A, W) {
W1 = W$W1
W2 = W$W2
W3 = W$W3
W4 = W$W4
return(W1/10 - 0.3*A*W1^2 + 0.25*W2 + 0.5*A*W2 - 0.5*W3*W1*A + 0.2*W4^2/5 - 0.1*W4 + 2*A)
}
#' @name DGP_cont
#' @aliases DGP_cont
#' @title Simulate with DB DGP
#' @description Generate data according to DB DGP
#'
#' @param n n
#' @param dA rule type
#' @param a static txt
#'
#' @return data for DGP_cont
#'
#' @export
#'
#'
DGP_cont = function(n, dA = NULL, a = NULL){
# Covariates
W1 <- runif(n,-4,4)
W2 <- runif(n,-4,4)
A <- rbinom(n, 1, 0.5)
W3 <- rnorm(n)
W4 <- rgamma(n, 2, 1)
W = data.frame(W1, W2, W3, W4)
z = rnorm(n)
Y = QAW_cont(A,W)+z
# Blip function
QAW1 = QAW_cont(A = 1, W)
QAW0 = QAW_cont(A = 0, W)
blip = QAW1 - QAW0
# Treatment under rule
if (!is.null(dA) & !is.null(a)){
stop("Can only have dA or a")
} else if (is.null(a) & is.null(dA)) {
A_star = A
} else if (!is.null(a)){
A_star = a
} else if (dA == "simple dynamic") {
A_star = ifelse(W2 > 0, 1, 0)
} else if (dA == "ODTR"){
A_star = as.numeric(blip > 0)
}
# Outcome
Y_star = QAW_cont(A_star,W)+z
# Data and target parameter
O = data.frame(W, A, A_star, Y, Y_star)
return(O)
}
#' @name QAW.rc.allpos
#' @aliases QAW.rc.allpos
#' @title QAW RC all pos
#' @description Generate QAW according to DGP RC - all positive
#'
#' @param W Data frame of observed baseline covariates
#' @param A Vector of treatment
#'
#' @return conditional mean of Y given A and W
#'
#' @export
#'
QAW.rc.allpos = function(A, W) {A + W + A*(W+10)}
#' @name QAW.rc.somepos
#' @aliases QAW.rc.somepos
#' @title QAW RC some pos
#' @description Generate QAW according to DGP RC - some positive
#'
#' @param W Data frame of observed baseline covariates
#' @param A Vector of treatment
#'
#' @return conditional mean of Y given A and W
#'
#' @export
#'
QAW.rc.somepos = function(A, W) {A + W + (-2)*A*W}
#' @name QAW.rc.nopos
#' @aliases QAW.rc.nopos
#' @title QAW RC no pos
#' @description Generate QAW according to DGP RC - no positive
#'
#' @param W Data frame of observed baseline covariates
#' @param A Vector of treatment
#'
#' @return conditional mean of Y given A and W
#'
#' @export
#'
QAW.rc.nopos = function(A, W) {A - W + A*(W-10)}
#' @name DGP.rc.contW
#' @aliases DGP.rc.contW
#' @title DGP.rc.contW
#' @description Generate data according to DGP RC - cont W
#'
#' @param n n
#' @param a static txt
#' @param kappa prop can be treated in population
#' @param QAW.fun QAW function
#'
#' @return data for DGP.rc.contW
#'
#' @export
#'
#'
DGP.rc.contW = function(n, a = NULL, kappa = NULL, QAW.fun){
W = rnorm(n)
A = rbinom(n, 1, 0.5)
U = rnorm(n)
Y = U + QAW.fun(A, W)
blip = QAW.fun(A = 1, W = W) - QAW.fun(A = 0,W = W)
# Treatment under rule
if (is.null(kappa) & is.null(a)){
toreturn = data.frame(W = W, A = A, Y = Y)
} else if (is.null(a) & !is.null(kappa)) {
rc.out = dopt.fun(blip = blip, kappa = kappa)
A_star = rbinom(n, 1, prob = rc.out$Prd.is.1)
Y_star = U + QAW.fun(A_star, W)
toreturn = data.frame(W = W, A = A, Y = Y, A_star = A_star, Y_star = Y_star, Prd.is.1 = rc.out$Prd.is.1, tauP = rc.out$tauP)
} else if (!is.null(a) & is.null(kappa)) {
A_star = a
Y_star = U + QAW.fun(A_star, W)
toreturn = data.frame(W = W, A = A, Y = Y, A_star = A_star, Y_star = Y_star)
}
return(toreturn)
}
#' @name DGP.rc.discreteW
#' @aliases DGP.rc.discreteW
#' @title DGP.rc.discreteW
#' @description Generate data according to DGP RC - discrete W
#'
#' @param n n
#' @param a static txt
#' @param kappa prop can be treated in population
#' @param QAW.fun QAW function
#'
#' @return data for DGP.rc.discreteW
#'
#' @export
#'
#'
DGP.rc.discreteW = function(n, a = NULL, kappa = NULL, QAW.fun){
W = rbinom(n, 1, 0.5)
A = rbinom(n, 1, 0.5)
U = rnorm(n)
Y = U + QAW.fun(A, W)
blip = QAW.fun(A = 1, W = W) - QAW.fun(A = 0,W = W)
# Treatment under rule
if (is.null(kappa) & is.null(a)){
toreturn = data.frame(W = W, A = A, Y = Y)
} else if (is.null(a) & !is.null(kappa)) {
rc.out = dopt.fun(blip = blip, kappa = kappa)
A_star = rbinom(n, 1, prob = rc.out$Prd.is.1)
Y_star = U + QAW.fun(A_star, W)
toreturn = data.frame(W = W, A = A, Y = Y, A_star = A_star, Y_star = Y_star, Prd.is.1 = rc.out$Prd.is.1, tauP = rc.out$tauP)
} else if (!is.null(a) & is.null(kappa)) {
A_star = a
Y_star = U + QAW.fun(A_star, W)
toreturn = data.frame(W = W, A = A, Y = Y, A_star = A_star, Y_star = Y_star)
}
return(toreturn)
}
|
71b685977e856129ba2d0b67bdaf277d5ece483e
|
9836c46617b7dc42d87fe9cf57dd00edda056089
|
/man/quarticity_rqq.Rd
|
027702380de348550fc8d14b7abf54893756c1c5
|
[] |
no_license
|
cran/PortfolioEffectEstim
|
577086a2d5e858caa0258f6a2c4e27afa9bea286
|
ea23b2129d544b500187e69dec5990351164a248
|
refs/heads/master
| 2020-04-06T21:07:08.344365
| 2016-09-17T19:54:52
| 2016-09-17T19:54:52
| 48,086,033
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,405
|
rd
|
quarticity_rqq.Rd
|
\name{quarticity_rqq}
\alias{quarticity_rqq}
\title{Realized Quadpower Quarticity}
\usage{quarticity_rqq(estimator)
}
\arguments{
\item{estimator}{Vector of (time, price) observations for market asset when external market data is used.}
}
\description{
Realized Quadpower Quarticity (RQQ) is an asymptotically unbiased estimator of integrated quarticity in the absence of microstructure noise.
}
\details{
- Convergence speed: \bold{\eqn{m^{1/4}}} (m - number of observation)
- Accounts for additive noise: \bold{no}
- Accounts for finite price jumps: \bold{yes}
- Accounts for time dependence in noise: \bold{no}
- Accounts for endogenous effects in noise: \bold{no}
}
\references{
O. E. Barndorff-Nielsen and N. Shephard. Power and bipower variation with stochastic volatility and jumps. Journal of Financial Econometrics, Vol.2(No.1):1-37,2004
}
\author{Kostin Andrey <andrei.kostin@snowfallsystems.com>}
\seealso{\code{\link{quarticity_rq}} \code{\link{quarticity_mrq}} \code{\link{quarticity_rtq}} \code{\link{quarticity_mtq}}}
\examples{
\dontrun{
data(spy.data)
estimator=estimator_create(priceData=spy.data)
estimator_settings(estimator,
inputSamplingInterval = '10s',
resultsSamplingInterval = '10s')
util_plot2d(quarticity_rqq(estimator),title="RQQ")
}}
\keyword{PortfolioEffectEstim,nonparametric, models}
\keyword{quarticity_rqq}
|
4263569e29f81bc99628540c581a2adddc4dc748
|
b12e833ec41c29d263f5060653da17e2d6ae7401
|
/app.R
|
a6281781ae16b35f3105ccae12a071ee9541b478
|
[] |
no_license
|
daddyprasad5/capstoneshiny
|
d14e4ddbe5d72e6a8a73504908c24c673eede4b7
|
666e0600958d4dda35f43fdf09bea777fe4cfb06
|
refs/heads/master
| 2021-01-12T06:21:57.832681
| 2016-12-27T17:40:44
| 2016-12-27T17:40:44
| 77,347,762
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,911
|
r
|
app.R
|
library(shiny)
jscode <- '
$(function() {
var $els = $("[data-proxy-click]");
$.each(
$els,
function(idx, el) {
var $el = $(el);
var $proxy = $("#" + $el.data("proxyClick"));
$el.keydown(function (e) {
if (e.keyCode == 13) {
$proxy.click();
}
});
}
);
});
'
ui =(pageWithSidebar(
headerPanel("What's Next?"),
sidebarPanel(
tags$head(tags$script(HTML(jscode))),
tagAppendAttributes(
textAreaInput("textIn", NULL, ""),
`data-proxy-click` = "btn"
),
actionButton("btn", "Gimme your best guess!"),
div(HTML("Best Guess: ")),
textOutput(outputId = "best_guess"),
div(HTML("Top Guesses: ")),
textOutput(outputId = "guesses"),
# div(HTML("Last Leader: ")),
# textOutput(outputId = "last_leader"),
# div(HTML("Search State: ")),
# textOutput(outputId = "search_state"),
plotOutput(outputId = "top10", width = "100%", height = "300px")
),
mainPanel(
h1("Start Typing!"),
p("After you type a few words, guesses for your next word will appear below the text box and button."),
p("Press Enter or click the button to add the Best Guess to what you're typing."),
p("Below the Best Guess, you'll see the top 10 guesses for your next word, presented in a cumulative distribution."),
p("On a held-out test sample of ~850 trigrams, the model included the right next word in the top 10 guesses 37% of the time."),
p("After typing 2 and 3 letters of the next word, it delivered an accurate best guess 47% and 53% of the time, respectively."),
p("The model is predicting based on the last 3 words, using a back-off model with absolute discounting."),
p("It includes some part-of-speech analysis as well, to remove from the answer set guesses of inaappropriate parts-of-speech. But that inclusion actually reduces predictive power. I'm still working on that..."),
hr(),
p(a(href = "https://github.com/daddyprasad5/capstoneshiny", "Here's the code for this shiny app.")),
p(a(href = "https://github.com/daddyprasad5/capstone_final", "Here's the code for the model construction.")),
p(a(href = "http://daddyprasad5.github.io/capstone_final.html", "Here's more info on the project."))
)))
server = function(input, output, session){
source('ngramBootstrap.R')
testing = FALSE #TRUE enables some additional display to help with debugging
ll <- NULL ##last leader
lw <- NULL ##last word
rs <- NULL ##remainder of sentence, less the last word
bg <- NULL ##best guess for next word
g <- data.frame() ##guesses dataframe
distro <- function (line, leaderIn) {
if (!is.null(leaderIn)) {
last_distro <- getDistro(line, leaderIn, triGram, biGram, uniGram, POSDictionary, triplets)
last_distro
}
}
observeEvent(input$btn, {
updateTextAreaInput(session, "textIn", value = paste0(rs, bg))
})
output$guesses <- renderPrint({
leader <- getLastNGram(input$textIn, 3)
lw <<- getLastNGram(input$textIn, 1)
if (is.null(leader)) {guessesOut <- ""}
else {
leader_leader <- getLeader(getLastNGram(input$textIn, 4))
last_ch <- substr(input$textIn, nchar(input$textIn), nchar(input$textIn))
state <- "search within"
#determine route - either "search within" existing suggested words
# or "get new distro". We want to search within the current list of guesses
# unless....
# (a) the leader is null (haven't typed anything yet),
# (b) we've finished a new word or
# (c) there are no words left in the current list that start as typed
if (is.null(ll)) {state <- "get new distro"}
else { if (isEndOfWord(last_ch)) {
state <- "get new distro"
}
}
if ( state == "search within") {
if (testing) {output$search_state <- renderPrint({"search within"})}
#gu <- g
if (nrow(g) > 0) {
guessesOut <- getStartsWithLastWords(g, lw, 10)
guessesOut <- guessesOut[guessesOut != lw]
}
if (length(guessesOut) == 0) {state = "get new distro"}
else {
output$best_guess <- renderPrint({guessesOut[1]})
bg <<- guessesOut[1]
rs <<- substr(input$textIn, 1, nchar(input$textIn) - nchar(lw))
output$top10 <- renderPlot({getTopCumFreq(g[g$lastWord %in% guessesOut,],10)})
}
}
if (state == "get new distro") {
if (testing) {output$search_state <- renderPrint({"get new distro"})}
g <<- distro(input$textIn, leader)
guessesOut <- as.character(head(g$lastWord,10))
if (testing) {output$last_leader <- renderPrint ({leader})}
ll <<- leader
output$best_guess <- renderPrint({guessesOut[1]})
bg <<- guessesOut[1]
rs <<- paste0(input$textIn," ")
output$top10 <- renderPlot({getTopCumFreq(g,10)})
}
}
return(guessesOut)
})
}
shinyApp(ui = ui, server = server)
|
9e22125057273d639354680b83152318848b0c4e
|
a6e50afb0bfd2b4994d4bae77bfaa56a7534c2ef
|
/scripts/plot/HE_plot_15P.R
|
46bf57e102a5c269a45f70d3589cfc3d1579784b
|
[] |
no_license
|
Yuzi-00/starch-degradation
|
827336e5dd98286bd454147cf98046aadfd5abb2
|
9cf1e650803dd524c766d15cea72a66b78f20260
|
refs/heads/master
| 2021-07-11T13:16:38.323117
| 2020-12-07T19:44:29
| 2020-12-07T19:44:29
| 225,079,205
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,899
|
r
|
HE_plot_15P.R
|
library(tidyverse)
# ** plots are generated using the dataset with outliers replaced **
# ** scatter plot of the HE **
# import the dataset
data_15P_cal_HE_outlier_replaced <- read_csv("data/tidydata/data_15P_cal_HE_outlier_replaced.csv") %>%
mutate(Plate = as.factor(Plate))
# remove the unused wells
data_15P_cal_HE_outlier_replaced <- data_15P_cal_HE_outlier_replaced %>%
filter(Sample != "X")
# change the name of the Wellgrouptype (for a proper legend later)
data_15P_cal_HE_outlier_replaced$WellGroupType <- gsub("Control[+]", "Wx", data_15P_cal_HE_outlier_replaced$WellGroupType)
data_15P_cal_HE_outlier_replaced$WellGroupType <- gsub("Control[-]", "HAM", data_15P_cal_HE_outlier_replaced$WellGroupType)
data_15P_cal_HE_outlier_replaced$WellGroupType <- gsub("Test sample", "MAGIC", data_15P_cal_HE_outlier_replaced$WellGroupType)
# plotting
HE_scatter_15P <- ggplot(data = data_15P_cal_HE_outlier_replaced,
aes(x = Time,
y = HE,
color = WellGroupType)) + # colored by the status (distinguish the control from the other samples)
geom_point(size = 1, shape = 1) + # add the transparency
scale_y_continuous(limits = c(0,100), expand = c(0, 0)) + ## set the range of the y axis
scale_x_continuous(limits = c(0, 2000), expand = c(0, 0)) +
ylab("Hydrolysis extent (%)") + ## change the label for the y axis
xlab("Time (min)") + ## change the name of the x axis
theme(legend.title = element_blank(),
panel.grid = element_blank(),
axis.line = element_line(colour = "black", size = 0.5),
panel.background = element_rect(fill = "white"),
axis.ticks=element_line(
colour="black",
size=.5)) +
labs(x = "Time (min)", y = "Hydrolysis extent (%)") +
theme(axis.text.x = element_text(color="black", size=10),
axis.text.y = element_text(color="black", size=10)) +
theme(legend.key = element_blank(),
legend.position = "bottom") +
theme(plot.margin = unit(c(5.5,12,5.5,5.5), "pt"))
# save the plot
ggsave("figures/scatter-plot_15P.png",
plot = HE_scatter_15P,
width = 10,
height = 8,
units = "cm")
# ** line plot of the HE **
HE_line_15P <- ggplot(data = data_15P_cal_HE_outlier_replaced,
aes(x = Time,
y = HE,
group = Well,
color = WellGroupType)) + # colored by the status (distinguish the control from the other samples)
geom_point(size = 1, shape = 1) + # add the transparency
geom_line(size = 0.3, alpha = 0.5) +
scale_y_continuous(limits = c(0,100), expand = c(0, 0)) + ## set the range of the y axis
scale_x_continuous(limits = c(0, 2000), expand = c(0, 0)) +
ylab("Hydrolysis extent (%)") + ## change the label for the y axis
xlab("Time (min)") + ## change the name of the x axis
theme(legend.title = element_blank(),
panel.grid = element_blank(),
axis.line = element_line(colour = "black", size = 0.5),
panel.background = element_rect(fill = "white"),
axis.ticks=element_line(
colour="black",
size=.5)) +
labs(x = "Time (min)", y = "Hydrolysis extent (%)") +
theme(axis.text.x = element_text(color="black", size=15),
axis.text.y = element_text(color="black", size=15)) +
theme(axis.title.x = element_text(size = 15),
axis.title.y = element_text(size = 15)) +
theme(legend.key = element_blank(),
legend.position = "bottom",
legend.spacing.x = unit(0.2, 'cm'),
legend.text = element_text(size = 15))+
theme(plot.margin = unit(c(7,18,5,5), "pt")) +
scale_color_brewer(palette = "Set1")
HE_line_15P
# save the plot
ggsave("figures/line-plot_15P.png",
plot = HE_line_15P,
width = 15,
height = 15,
units = "cm")
# noted that there's one blue line lower than the negative control, let's find it
low_HE <- data_15P_cal_HE_outlier_replaced %>%
filter(Time == 360) %>%
arrange(HE) # seems that it's the sample 92
# let's plot the sample 92 with the negative control
Neg_92 <- data_15P_cal_HE_outlier_replaced %>%
filter(Sample == "92" | Sample == "C-")
low_HE <- ggplot(data = Neg_92,
aes(x = Time,
y = HE,
group = Well,
color = WellGroupType)) + # colored by the status (distinguish the control from the other samples)
geom_point(size = 1, shape = 1) + # add the transparency
geom_line(size = 0.005) +
scale_y_continuous(limits = c(0,100), expand = c(0, 0)) + ## set the range of the y axis
scale_x_continuous(limits = c(0, 2000), expand = c(0, 0)) +
ylab("Hydrolysis extent (%)") + ## change the label for the y axis
xlab("Time (min)") + ## change the name of the x axis
theme(legend.title = element_blank(),
panel.grid = element_blank(),
axis.line = element_line(colour = "black", size = 0.5),
panel.background = element_rect(fill = "white"),
axis.ticks=element_line(
colour="black",
size=.5)) +
labs(x = "Time (min)", y = "Hydrolysis extent (%)") +
theme(axis.text.x = element_text(color="black", size=10),
axis.text.y = element_text(color="black", size=10)) +
theme(legend.key = element_blank(),
legend.position = "bottom")+
theme(plot.margin = unit(c(5.5,12,5.5,5.5), "pt"))
# save the plot with sample 92 and neg control
ggsave("figures/sample92.png",
plot = low_HE,
width = 10,
height = 8,
units = "cm")
# ** individual plot (pdf) **
pdf(file = "figures/degradability_individual plot.pdf") # creating a pdf file and senting all the plot below to this file
for(i in unique(data_15P_cal_HE_outlier_replaced$Sample)){ # i stands for each item within this dataset
# unique() can show all the Sample names here whithin the mean_HE_6P dataset
digestibility <- data_15P_cal_HE_outlier_replaced %>%
filter(Sample == i) %>% # pipe this to the first argument on the right side
# here, the first argument of ggplot is the data, otherwise, we have to type ggplot(data = .)
# to let it pipe to this argument
ggplot(aes(x = Time,
y = HE,
group = Well,
color = Plate)) +
geom_line() +
geom_point() +
ggtitle(i) + # set the title for each plot as i
scale_y_continuous(limits = c(0,100), expand = c(0, 0)) + ## set the range of the y axis
scale_x_continuous(limits = c(0, 2000), expand = c(0, 0)) +
theme( # remove the legend
panel.grid = element_blank(), # remove the grid
axis.line = element_line(colour = "black", size = 0.5), # add the x axis
panel.background = element_rect(fill = "white", color = "black"),
#change the backgroud color to white and the frame color to black
axis.ticks = element_line(colour="black", size=.5)) +
# change the color of the ticks into black and the size to 0.5
labs(x = "Time (min)", y = "Hydrolysis extent (%)") +
# change the title of the x and y axis
theme(axis.text.x = element_text(color="black", size=10),
axis.text.y = element_text(color="black", size=10)) +
# change the color and size of the tick label for x and y axis
theme(plot.margin = unit(c(5.5,12,5.5,5.5), "pt"))
print(digestibility) # print out the plots
}
dev.off() # stop sending plot to the pdf file
# ** individual plot with the sd error **
# import the variation dataset (noted that the outliers had already been removed from this dataset)
data_15P_cal_var <- read_csv("analysis/data_15P_cal_var.csv")
# remove the unused wells
data_15P_cal_var <- data_15P_cal_var %>%
filter(Sample != "X")
# creat the loop
pdf(file = "figures/degradability_individual plot_sd_error.pdf") # creating a pdf file and senting all the plot below to this file
for(i in unique(data_15P_cal_var$Sample)){ # i stands for each item within this dataset
# unique() can show all the Sample names here within the mean_HE_6P dataset
digestibility_var <- data_15P_cal_var %>%
filter(Sample == i) %>% # pipe this to the first argument on the right side
# here, the first argument of ggplot is the data, otherwise, we have to type ggplot(data = .)
# to let it pipe to this argument
ggplot() +
geom_line(aes(x = Time,
y = Mean_HE,
group = Sample),
color = "red",
size = 0.05) +
geom_errorbar(aes(x = Time,
ymin = Mean_HE - Se_HE,
ymax = Mean_HE + Se_HE),
width=20,
color = "red") +
geom_point(aes(x = Time,
y = HE),
shape = 1,
alpha = 0.2,
size = 1) +
geom_line(aes(x = Time,
y = HE,
group = Well),
size = 0.005,
alpha = 0.2) +
ggtitle(i) + # set the title for each plot as i
scale_y_continuous(limits = c(0,100), expand = c(0, 0)) + ## set the range of the y axis
scale_x_continuous(limits = c(0, 2000), expand = c(0, 0)) +
theme( # remove the legend
panel.grid = element_blank(), # remove the grid
axis.line = element_line(colour = "black", size = 0.5), # add the x axis
panel.background = element_rect(fill = "white", color = "black"),
#change the backgroud color to white and the frame color to black
axis.ticks = element_line(colour="black", size=.5)) +
# change the color of the ticks into black and the size to 0.5
labs(x = "Time (min)", y = "Hydrolysis extent (%)") +
# change the title of the x and y axis
theme(axis.text.x = element_text(color="black", size=10),
axis.text.y = element_text(color="black", size=10)) +
# change the color and size of the tick label for x and y axis
theme(plot.margin = unit(c(5.5,12,5.5,5.5), "pt"))
print(digestibility_var) # print out the plots
}
dev.off() # stop sending plot to the pdf file
# ** check if the lower lines come from the same plate **
# create a vector that contains the samples that we want to check
check_items <- c(32, 196, 108, 209, 40, 204, 87, 112, 72, 104, 122, 27, 197, 188, 8, 2, 52, 183, 98, 163, 203, 121, 92)
# select the items above from the total dataset
lower_line <- data_15P_cal_HE_outlier_replaced %>%
filter(Sample %in% check_items) %>% # give us the samples that are contained in check_items
filter(Time == 1800) %>% # choose just 1800min
group_by(Sample) %>% # group by sample
filter(HE == min(HE)) # find the minimum value of HE for each sample
# take a look at the corresponding plate number, they don't come from the same plates
|
ad99c225b869a7b0d1687b92e2df45a01efe5517
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.internet.of.things/man/iot1clickprojects_list_projects.Rd
|
23a17d20ae4a972da0301b750514dbd1a92c39da
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,114
|
rd
|
iot1clickprojects_list_projects.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iot1clickprojects_operations.R
\name{iot1clickprojects_list_projects}
\alias{iot1clickprojects_list_projects}
\title{Lists the AWS IoT 1-Click project(s) associated with your AWS account
and region}
\usage{
iot1clickprojects_list_projects(nextToken, maxResults)
}
\arguments{
\item{nextToken}{The token to retrieve the next set of results.}
\item{maxResults}{The maximum number of results to return per request. If not set, a
default value of 100 is used.}
}
\value{
A list with the following syntax:\preformatted{list(
projects = list(
list(
arn = "string",
projectName = "string",
createdDate = as.POSIXct(
"2015-01-01"
),
updatedDate = as.POSIXct(
"2015-01-01"
),
tags = list(
"string"
)
)
),
nextToken = "string"
)
}
}
\description{
Lists the AWS IoT 1-Click project(s) associated with your AWS account
and region.
}
\section{Request syntax}{
\preformatted{svc$list_projects(
nextToken = "string",
maxResults = 123
)
}
}
\keyword{internal}
|
8bde298e9659cf3326b0aab695c2bff1a6dc58fa
|
303a19806df04acf4049c40be3eab4ef22ff09f2
|
/Computational_Statistics_and_Stochastic_Optimization/Simulation_Algorithms_and_Hypothesis_Testing.R
|
580348a8199e265232b7340c7491cf2c1e4f5faf
|
[] |
no_license
|
marmix96/Data-Science-Machine-Learning
|
2dccf08ce03474f7b117d951464a57f9dd5bf9ac
|
a112cf9ba2273ca2cfa04230b8cd52af99441929
|
refs/heads/master
| 2023-06-12T09:13:45.852792
| 2021-07-04T15:49:25
| 2021-07-04T15:49:25
| 382,886,340
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,752
|
r
|
Simulation_Algorithms_and_Hypothesis_Testing.R
|
#askisi_2a
set.seed(03400100)
f = function(x) {return (exp(x) / (exp(3) - 1) )}
F_inv = function(x) {return (log(1 + x*(exp(3) - 1)))}
u = runif(1000)
x = F_inv(u)
hist(x, prob = T, xlab='X', ylim = c(0, 1.2), xlim = c(0, 3), main='Inversion Sampling')
curve(f , from = 0, to = 3, lwd=2, xlab = "", ylab = "", add = T, col = "red", yaxt = "n")
#askisi_2b
set.seed(03400100)
accepted_counter = 0
total_samples = 0
accepted_samples = c()
N = 1000
M = 3 * exp(3) / (exp(3) - 1)
my_unif = function(x) {return (M * dunif(x, min = 0, max = 3))}
while (accepted_counter < N) {
y = runif(1, min = 0, max = 3)
u = runif(1)
if (u <= f(y) / (M*dunif(y, min=0, max=3))) {
accepted_counter = accepted_counter + 1
accepted_samples = c(accepted_samples, y)
}
total_samples = total_samples + 1
}
print(paste("Total samples needed: ", total_samples))
print(paste("Theoretical acceptance probability = ", signif(1/M, 5), "Estimated acceptance probability = ", signif(accepted_counter / total_samples, 5)))
hist(accepted_samples, prob = T, xlab='X', ylim = c(0, 1.2), xlim = c(0, 3), main='Rejection Sampling')
plot(f , from = 0, to = 3, lwd=2, xlab = "", ylab = "", add = T, col = "red", yaxt = "n")
curve(my_unif , from = 0, to = 3, lwd=2, xlab = "", ylab = "", add = T, col = "green", yaxt = "n")
#askisi_2c
set.seed(50)
Epanechnikov = function(x_i, x_j, h) { return (pmax(0.75 * (1 - ((x_i - x_j) / h)^2), 0))}
likelihood_CLV = function(s, h) {
L = 1
for (i in 1:length(s)) L = L * mean(Epanechnikov(s[i], s[-i], h)) / h
return (L)
}
L_opt = -Inf
h_opt = NA
samples = F_inv(runif(100))
for (h in seq(0.01, 3, 0.01)) {
L = likelihood_CLV(samples, h)
if (L > L_opt) {
L_opt = L
h_opt = h
}
}
print(paste("h_opt = ", h_opt))
f_hat = function(x) return (mean(Epanechnikov(x, samples, h_opt)) / h_opt)
X = seq(0, 3, 0.001)
Y = lapply(X, f_hat)
plot(X, Y, 'l', xlim = c(0, 3), ylim = c(0,1.1), col='green', main = "Kernel Density Estimation")
curve(f, 0, 3, add=T, col='red')
#askisi_2d
set.seed(17)
B = 1000
n = 10
samples = F_inv(runif(n))
T_test = abs(2 - mean(samples))
samples_move = samples + 2 - mean(samples)
bootstrap_ask_2d = function(B, n, s) {
boot = c()
for(i in 1:B){
sam = sample(s, n, replace=TRUE)
J = mean(sam)
boot = c(boot, J)
}
return (boot)
}
p_value = (sum(abs(2 - bootstrap_ask_2d(B, n, samples_move)) > T_test) + 1) / (B + 1)
print(paste("p_value = ", p_value))
samples_boot = bootstrap_ask_2d(B, n, samples)
Confidence_Interval = sort(samples_boot)[25:975]
print(paste("Confidence Interval 95% = (", Confidence_Interval[1], ",", Confidence_Interval[951], ")"))
|
3737086b9276bb26b9cb125dc4f822c5ff09681b
|
24fcc7a9446871f5affbc82d3ae1ed20d6a7c8aa
|
/R/data.R
|
6ec11a382f114767e531810f6b806ba2f92ee240
|
[
"MIT"
] |
permissive
|
mrc-ide/malariasimulation
|
3188657f6ff9da4ea35646189d0bd75d6e35aa52
|
397a7b7efe90958dd01f97110a1d16c71d041f33
|
refs/heads/master
| 2023-08-23T11:29:10.050424
| 2023-07-03T15:58:32
| 2023-07-03T15:58:32
| 233,609,741
| 10
| 10
|
NOASSERTION
| 2023-08-17T15:48:41
| 2020-01-13T14:06:17
|
R
|
UTF-8
|
R
| false
| false
| 263
|
r
|
data.R
|
#' Parameter draws
#'
#' 1000 draws from the joint posterior fit from
#'
#' @format ## `parameter_draws`
#' A list of lists of length 1000, each level contains a list of drawn parameters
#'
#' @source <https://www.nature.com/articles/ncomms4136>
"parameter_draws"
|
b538f2aac268409aadea0e38091cb4de1343503c
|
1a3a98a94d54bd72c6cba0dfbe72d013141fbc51
|
/man/declare-package.Rd
|
e3b65231e4f72a42d835ffc7d595d82b75fad05f
|
[] |
no_license
|
kcf-jackson/declare
|
2c86fbf488b64a72107d4f196c46b4fca96e061b
|
5c63ee97c64fb9b1d4ba7fa6eed0bb531e29f62d
|
refs/heads/master
| 2020-04-15T04:28:03.207712
| 2019-01-10T12:12:49
| 2019-01-10T12:12:49
| 164,384,476
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 511
|
rd
|
declare-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/declare.R
\docType{package}
\name{declare-package}
\alias{declare}
\alias{declare-package}
\title{declare: Optional type annotation}
\description{
Allows user to add type annotation to a function via comments.
The package uses a simple mechanism, and it is designed such that the
annotation may be "switched on and off" as one pleases. This is an
experiment of programming in R with stronger type.
}
\keyword{internal}
|
3f35e915ddbb1393b2d3e0a421f25eb8e276c48c
|
ebee9629abd81143610a6352288ceb2296d111ac
|
/man/pcf_directional.Rd
|
5d52d0a06b18104ed72729ba6338f34e9a1ce2ef
|
[] |
no_license
|
antiphon/Kdirectional
|
76de70805b4537a5aff0636486eb387cb64069b0
|
98ab63c3491f1497d6fae8b7b096ddd58afc4b29
|
refs/heads/master
| 2023-02-26T02:19:41.235132
| 2023-02-12T13:07:11
| 2023-02-12T13:07:11
| 37,183,574
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,247
|
rd
|
pcf_directional.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pcf_directional.R
\name{pcf_directional}
\alias{pcf_directional}
\title{Directed pcf function}
\usage{
pcf_directional(x, u, epsilon, r, ..., cylindrical = FALSE)
}
\arguments{
\item{x}{pp, spatstat's ppp-object, or a coordinate matrix, or a list with $x~coordinates $bbox~bounding box}
\item{u}{unit direction(s), if many then one direction per row.}
\item{epsilon}{Central angle for the directed cone (total angle is 2 epsilon)}
\item{r}{radius vector at which to evaluate K}
\item{...}{Passed on to \code{\link{pcf_anin}}}
\item{cylindrical}{If TRUE, compute the cylindrical version using \code{\link{pcf_anin_cylinder}}}
}
\value{
Returns a dataframe.
}
\description{
We use translation edge correction if we have an axis-oriented bounding box. Otherwise minus-correction.
}
\details{
Compute the sector/cone/cylindrical pcf function. This version uses the more general anisotropic-inhomonogeneous \link{pcf_anin_conical} and \code{\link{pcf_anin_cylinder}}, by setting the intensity = constant. See there for further parameters, especially kernel smoothing options.
}
\examples{
x <- matrix(runif(300), ncol=2)
k <- pcf_directional(x)
plot(k, rmax = 0.1)
}
|
aff331f6ee6e9f2f9bf3edb051ff67369714a854
|
efd0d6bec42aa38c1e62b6eecd5b1f4234581ec2
|
/man/time_n.Rd
|
eaa126b2d726a28df2871334bcf13a360af11823
|
[] |
no_license
|
jlegewie/bife
|
5bdabfd799f8075ea1f55f60002d3d427d45c2c5
|
2206789a3a8bc157fbd7c8d10aca6243d626dc1d
|
refs/heads/master
| 2020-04-06T16:52:13.632534
| 2018-11-15T02:03:50
| 2018-11-15T02:03:50
| 157,637,657
| 0
| 0
| null | 2018-11-15T02:01:55
| 2018-11-15T02:01:55
| null |
UTF-8
|
R
| false
| true
| 340
|
rd
|
time_n.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/time_n.R
\docType{data}
\name{time_n}
\alias{time_n}
\title{Computation time with varying N}
\format{A named matrix with 10 rows and 4 columns.}
\usage{
time_n
}
\description{
Results reported in the vignette.
}
\seealso{
\code{\link{bife}}
}
\keyword{datasets}
|
64ee3e63e0ba2d9d1736614173db9d14d902b6ac
|
876f5f4de8ccb50bb67a28af9d731f8d94d9988e
|
/R/coenoclinerutil.R
|
a75559588dc7850f1cf3c68e86c4dddb399e8800
|
[] |
no_license
|
jarioksa/GO
|
f9e763b5e3b0e89ada51d588de71ff5f71040a0a
|
42e5f65902d48dfe702127a8b5ef5572405ab380
|
refs/heads/master
| 2016-09-16T15:07:10.117729
| 2015-08-12T08:23:28
| 2015-08-12T08:23:28
| 32,721,902
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,516
|
r
|
coenoclinerutil.R
|
#' Utility Functions for 'coenocliner' Package
#'
#' Functions to automated simulation routines using \pkg{coenocliner}
#' package.
#'
#' @author Jari Oksanen
#'
#' @examples
#' require(coenocliner) || stop("examples need 'coenocliner' package")
#' ## small simulation
#' nsim <- 10
#' npoints <- 50
#' ## generate a set of species parameters over the maximum extent
#' sp <- replicate(nsim, BinomGaussPar(800, 8, 4))
#' ## sample much narrower proportion of the space
#' xy <- replicate(nsim, GradLocs(npoints, 3, 2))
#' ## Simulations: these can be easily parallelized using mclapply
#' ## (Linux, Mac) or parSapply (all).
#' sapply(seq_len(nsim), function(i)
#' coenorun1(coenocline(xy[,,i], "gaussian", sp[,i],
#' countModel="bernoulli")))
#'
#' @param n Number of SUs.
#' @param xrange,yrange Desired range of gradients.
#'
#' @importFrom stats runif
#'
#' @describeIn coenoclinerutil Gradient Locations
#' @export
`GradLocs` <-
function(n, xrange, yrange)
{
cbind("x" = runif(n, -xrange/2, xrange/2),
"y" = runif(n, -yrange/2, yrange/2))
}
#'
#' @param xy Gradient locations in two dimensions.
#' @param xmul,ymul Multipliers for each gradient
#'
#' @describeIn coenoclinerutil Multiply input gradient which
#' presumably is a unit square
#' @export
`GradMul` <-
function(xy, xmul, ymul)
{
sweep(xy, 2, c(xmul, ymul), "*")
}
#' @param nsp Number of species.
#' @param buffer Width of buffer zone for optima surrounding ranges.
#' @param tsd Standard deviation of tolerance in log-Normal
#' distribution, in log scale
#'
#' @importFrom stats runif rlnorm
#'
#' @describeIn coenoclinerutil Gaussian Parameters for Binomial Response.
#' @export
`BinomGaussPar` <-
function(nsp, xrange, yrange, buffer=2, tsd=0.1)
{
## Create Gaussian parameters for Binomial responses to be used
## with coenocliner
## uniform h in (0,1)
h <- runif(nsp)
## uniform opt in range +/- buffer
ux <- runif(nsp, -xrange/2 - buffer, xrange/2 + buffer)
uy <- runif(nsp, -yrange/2 - buffer, yrange/2 + buffer)
## lognormal tol. Mean of lognormal is exp(tsd^2/2)
tx <- rlnorm(nsp, -tsd^2/2, tsd)
ty <- rlnorm(nsp, -tsd^2/2, tsd)
## Order species centrifugally by their expected abundance at the
## origin. First species should be present in most simulations,
## even with short gradient spans, and last species are either
## globally rare or found only with longest gradients spanning the
## whole species space.
i <- rev(order(-(ux/tx)^2 - (uy/ty)^2 + 2*log(h)))
h <- h[i]
ux <- ux[i]
tx <- tx[i]
uy <- uy[i]
ty <- ty[i]
## out
list(px = cbind("opt" = ux, "tol" = tx, "h" = h),
py = cbind("opt" = uy, "tol" = ty))
}
#' @importFrom stats qbeta qnorm runif
#' @param gausspar Gaussian response parameters for species as
#' returned by \code{BinomGaussPar}.
#' @param shape Random log-uniform range of shape parameters \eqn{alpha}
#' and \eqn{gamma} of response function
#' @param cover Find range of beta response so that the same span
#' covers the same proportion of 1-dim integral as the Gaussian
#' response function.
#'
#' @describeIn coenoclinerutil Translate Gaussian parameters into
#' corresponding beta response parameters.
#' @export
`Gauss2betaPar` <-
function(gausspar, shape = c(0.5, 6.5), cover = 0.95)
{
## Define beta response so that it as similar to a Gaussian model
## as possible -- except for shape. Input **must** be similar as
## from BinomGausPar. This is not checked.
nsp <- nrow(gausspar[[1]])
## shapes uniform in (shape)
shape <- log(shape)
ax <- exp(runif(nsp, shape[1], shape[2]))
gx <- exp(runif(nsp, shape[1], shape[2]))
ay <- exp(runif(nsp, shape[1], shape[2]))
gy <- exp(runif(nsp, shape[1], shape[2]))
## Scale beta response range so that a multiply of 't' covers the
## same integral as Gaussian response: high alpha and gamma give
## narrow responses and must use wider range.
lim <- (1 - cover)/2
lim <- c(lim, 1-lim)
range <- diff(qnorm(lim))
rx <- range * gausspar$px[,"tol"] /
(qbeta(lim[2], ax+1, gx+1) - qbeta(lim[1], ax+1, gx+1))
ry <- range * gausspar$py[,"tol"] /
(qbeta(lim[2], ay+1, gy+1) - qbeta(lim[1], ay+1, gy+1))
## modal abundance at Gaussian opt
mx <- gausspar$px[,"opt"]
my <- gausspar$py[,"opt"]
## Response height A0 should be such that beta response has the
## same mass as the corresponding Gaussian. The integral of
## univariate Gaussian response is h*t*sqrt(2*pi) and the integral
## of beta response is adj*range*beta(alpha+1, gamma+1), and we
## need to find A0 giving the desired height adjustment adj, and
## here beta() is the real mathematical beta function. However, we
## do not want A0>1 because we target Binomial models.
Gmass <- with(gausspar, px[,"h"] * px[,"tol"] * py[,"tol"] * 2 * pi)
Bmass <- rx * ry * beta(ax+1, gx+1) * beta(ay+1, gy+1)
adj <- Gmass/Bmass
## bx, by and A0 are from Minchin, Vegetatio 71, 145-156 (1987),
## and they are similarly used in coenocliner.
bx <- ax/(ax+gx)
by <- ay/(ay+gy)
A0 <- pmin(adj * bx^ax * (1-bx)^gx * by^ay * (1-by)^gy, 1)
## collect
list(px = cbind("m" = mx, "r" = rx, "alpha" = ax, "gamma" = gx, "A0" = A0),
py = cbind("m" = my, "r" = ry, "alpha" = ay, "gamma" = gy))
}
#' @param comm Community data.
#'
#' @describeIn coenoclinerutil Drop missing species from the data.
#' @export
`DropMissingSpec` <-
function(comm)
{
cl <- class(comm)
locs <- locations(comm)
colnames(comm) <- paste0("sp", seq_len(ncol(comm)))
comm <- comm[, colSums(comm) > 0]
attr(comm, "locations") <- locs
class(comm) <- cl
comm
}
#' @importFrom vegan metaMDS cca decorana procrustes specnumber wisconsin
#' @importFrom coenocliner locations
#' @param sim One simulated community.
#' @param tot Binomial total in \code{sim}.
#' @param family Error family passed to \code{\link{GO}}.
#' @param far Weirdness limit passed to \code{\link{GO}}.
#' @param trace Print tracing information. If \code{FALSE} or
#' \code{0}, work as silently as possible, and higher values print more.
#' @describeIn coenoclinerutil Takes one simulated community for
#' ordination with GO, NMDS, CA and DCA and returns average Procrustes
#' precision
#'
#' @export
`coenorun1` <-
function(sim, tot=1, family = "binomial", far=4, trace = TRUE)
{
locs <- locations(sim)
sim <- DropMissingSpec(sim)
empty <- rowSums(sim) <= 0
if (any(empty)) {
locs <- locs[!empty,]
sim <- sim[!empty,]
}
n <- nrow(locs)
out <- rep(NA, 6)
names(out) <- c("GO", "NMDS", "CA", "DCA", "gamma", "alpha")
## GO can fail -- even metaGO. Hardcode metaGO options to protect
## this routine against possible changes in metaGO.
mgo <- try(metaGO(sim, k=2, family=family, tot=tot, far=far, iterlim=1000,
trace = trace, firstOK=TRUE, trymax=3))
mmds <- metaMDS(sim, maxit=500, trymax=200, sratmax=0.999999, trace= trace > 1)
mca <- cca(sim)
mdca <- decorana(sim)
if (!inherits(mgo, "try-error"))
out["GO"] <- sqrt(procrustes(locs, mgo)$ss/n)
out["NMDS"] <- sqrt(procrustes(locs, mmds)$ss/n)
out["CA"] <- sqrt(procrustes(locs, mca, scaling=1)$ss/n)
out["DCA"] <- sqrt(procrustes(locs, mdca, choices=1:2)$ss/n)
## richness statistics
out["gamma"] <- ncol(sim)
out["alpha"] <- mean(specnumber(sim))
out
}
|
2a8008a3e18aa8bd99a95dbb378d3965c922dd3d
|
957b2233b78ce98dbce9fe10b676cee213da0d18
|
/superlda-dev/man/get_top_keywords.Rd
|
39356fd12f0f13f74d1daf9eb19c96a4793ff034
|
[] |
no_license
|
traviscoan/politics_in_sermons
|
646d0a69a3f8afd111832fa0e5d245e52de3e20b
|
eebe45252f39f3ac69f761ff55891a99bf8359c7
|
refs/heads/master
| 2020-11-27T01:26:22.360047
| 2020-07-09T19:36:32
| 2020-07-09T19:36:32
| 278,427,604
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 681
|
rd
|
get_top_keywords.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.R
\name{get_top_keywords}
\alias{get_top_keywords}
\title{Return the most probable tokens for each label}
\usage{
get_top_keywords(word_probs, slda_obj, n = 10)
}
\arguments{
\item{word_probs}{Word (or token) probability matrix from an estimated model}
\item{slda_obj}{A superlda data object (see \link{convert_superlda} for more information on superlda data objects).}
\item{n}{Number of keywords to return (default: n = 10)}
}
\description{
Returns the "n" most probable tokens (or "keywords) for each label
}
\author{
Travis G. Coan \href{mailto:t.coan@exeter.ac.uk}{t.coan@exeter.ac.uk}
}
|
3074cb6f12718e23a31ac82a22b3087fd7f4285d
|
cc1ef8247b36e600a795ab7d2662085712806f01
|
/prophet_R.R
|
fa5779176119680a875b03e831289ba230243163
|
[] |
no_license
|
fyin-stats/prophet_time_series
|
9f6ed1bb3aa27082498f578c6efd8b17b3b35053
|
83cc8aac0447dc1f48fd93dc697a7e73f75f0f5f
|
refs/heads/main
| 2023-03-14T18:46:50.396013
| 2021-03-01T05:09:13
| 2021-03-01T05:09:13
| 343,233,567
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,270
|
r
|
prophet_R.R
|
##################################
##################################
ipak <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE)
try(sapply(pkg, require, character.only = TRUE), silent = TRUE)
}
packages <- c("xts", "zoo", "astsa","dplyr","prophet","ggplot2","tidyr")
ipak(packages)
####################################
####################################
df <- read.csv("./data/example_wp_log_peyton_manning.csv", header = TRUE)
####################################
####################################
m <- prophet(df)
#####################################
#### predictions are made on a dataframe with a column ds containing
#### the dates for which predictions are to be made
#####################################
future <- make_future_dataframe(m, periods = 365)
tail(future)
######################################
######################################
# use the generic predict function to get our forecast
######################################
######################################
# The forecast object is a dataframe with a column yhat containing the forecast
# it has additional columns for uncertainty intervals and seasonal components
#
forecast <- predict(m, future)
#
tail(forecast[c("ds", "yhat", "yhat_lower", "yhat_upper")])
# can use the generic plot function to plot the forecast, by passing
# in the model and the forecast dataframe
plot(m, forecast)
# You can use the prophet_plot_components function to see the forecast broken
# down into trend, weekly seasonality, and yearly seasonality.
prophet_plot_components(m, forecast)
######################################################
# forecasting growth
df <- read.csv("./data/example_wp_log_R.csv", header = TRUE)
#
df_xts <- xts(df$y, order.by = as.Date(df$ds))
#
plot(df_xts)
# specify the carrying capacity in column cap
# here we assume a particular value, but this would usually be set using data or expertise
# about the market size
df$cap <- 8.5
# note that cap must be specified for every row in the data frame, and that is does not have to be constant
# if the market size is growing, then cap can be an increasing sequence
# fit the model as before
m <- prophet(df, growth = "logistic")
# future
# make a dataframe for future predictions, we must also specify the capacity in the future
# here we keep capacity constant at the same value as in the history
# and forecast 5 years into the future
#
future <- make_future_dataframe(m, periods = 1826)
future$cap <- 8.5
fcst <- predict(m, future)
plot(m, fcst)
###############################################################
# Saturating minimum
# the logistic growth model can also handle a saturating minimum, which is specified
# with a column floor in the same way as the cap column specifies the maximum
#
df$y <- 10 - df$y
df$cap <- 6
df$floor <- 1.5
future$cap <- 6
future$floor <- 1.5
m <- prophet(m, future)
plot(m, fcst)
# To use a logistic growth trend with a saturating minimum, a maximum capacity must also be specified.
##################################################
### real time series frequently have abrupt changes
### in their trajectories.
### by default, prophet will automatically detect these changepoints
### and will allow the trend to adapt appropriately.
### if you want to have finer control, there are several input arguments
### you can use
### Prophet detects changepoints by first specifying a large number of potential
### changepoints at which the rate is allowed to change
### it then puts a sparse prior on the magnitudes of the rate changes
### L1 regularization
### n_changepoints
###
plot(m, forecast) + add_changepoints_to_plot(m)
### changepoint_range
### adjust trend flexibility
### if the trend changes are being overfit or underfit, you can adjust the strength
### of the sparse prior using the input argument changepoint_prior_scale
### by default, this parameter is set to 0.05
### increasing it will make the trend more flexible
m <- prophet(df, changepoint.prior.scale = 0.5)
forecast <- predict(m, future)
plot(m, forecast)
### decreasing it will make the trend less flexible
### cross validation
### specifying the locations of the change points
m <- prophet(df, changepoints = c('2014-01-01'))
forecast <- predict(m, future)
plot(m, forecast)
##############################################################################
##############################################################################
### Seasonality, Holiday effects and regressors
### modeling holidays and special events
##############################################################################
##############################################################################
#
# If you have holidays or other recurring events that you’d like to model,
# you must create a dataframe for them. It has two columns (holiday and ds) and a row
# for each occurrence of the holiday. It must include all occurrences of the holiday, both
# in the past (back as far as the historical data go) and in the future
# (out as far as the forecast is being made). If they won’t repeat in the future,
# Prophet will model them and then not include them in the forecast.
# can include columns lower_window and upper_window which extend the holiday out to
# lower window, upper window days around the date
#
library(dplyr)
playoffs <- data_frame(
holiday = 'playoff',
ds = as.Date(c('2008-01-13', '2009-01-03', '2010-01-16',
'2010-01-24', '2010-02-07', '2011-01-08',
'2013-01-12', '2014-01-12', '2014-01-19',
'2014-02-02', '2015-01-11', '2016-01-17',
'2016-01-24', '2016-02-07')),
lower_window = 0,
upper_window = 1
)
#
superbowls <- data_frame(
holiday = 'superbowl',
ds = as.Date(c('2010-02-07', '2014-02-02', '2016-02-07')),
lower_window = 0,
upper_window = 1
)
#
holidays <- bind_rows(playoffs, superbowls)
#
m <- prophet(df, holidays = holidays)
forecast <- predict(m, future)
# The holiday effect can be seen in the forecast dataframe:
#
# R
forecast %>%
select(ds, playoff, superbowl) %>%
filter(abs(playoff + superbowl) > 0) %>%
tail(10)
#
# The holiday effects will also show up in the components plot,
# where we see that there is a spike on the days around playoff appearances,
# with an especially large spike for the superbowl:
prophet_plot_components(m, forecast)
# built-in country holidays
m <- prophet(holidays = holidays)
m <- add_country_holidays(m, country_name = 'US')
m <- fit.prophet(m, df)
#
m$train.holiday.names
#
# R
forecast <- predict(m, future)
prophet_plot_components(m, forecast)
#######################################################
## Fourier order of seasonalities
#######################################################
m <- prophet(df)
prophet::plot_yearly(m)
########################################################
# the default values are often appropriate, but they can be increased when
# the seasonality needs to fit higher-frequency changes, and generally less smooth
# the fourier order can be specified for each built-in seasonality when instantiating the model
# here it is increased to 20
m <- prophet(df, yearly.seasonality = 20)
prophet:::plot_yearly(m)
#########################################################
# Increasing the number of Fourier terms allows the seasonality to fit
# faster changing cycles, but can also lead to overfitting
##########################################################
# Specifying custom seasonalities
# Prophet will by default fit weekly and yearly seasonalities
# if the time series is more than two cycles long
# it will also fit daily seasonality for a sub-daily time series
# you can add other seasonalities (monthly, quarterly, hourly)
# using the add_seasonality method
# The inputs to this function are a name, the period of the seasonality in days
# and the fourier order for the seasonality
# default
# fourier order of 3 for weekly
# fourier order of 10 for yearly
m <- prophet(weekly.seasonality=FALSE)
m <- add_seasonality(m, name='monthly', period=30.5, fourier.order=5)
m <- fit.prophet(m, df)
forecast <- predict(m, future)
prophet_plot_components(m, forecast)
####################################
### Seasonalities that depend on other factors
####################################
# in some instances, the seasonality may depend on other factors, such as a weekly seasonal pattern
# that is different during the summer that it is during the rest of the year
# or a daily seasonal pattern that is different on weekends vs on weekdays
# these types of seasonal can be modeled using conditional seasonalities
#
is_nfl_season <- function(ds) {
dates <- as.Date(ds)
month <- as.numeric(format(dates, '%m'))
return(month > 8 | month < 2)
}
df$on_season <- is_nfl_season(df$ds)
df$off_season <- !is_nfl_season(df$ds)
#
m <- add_seasonality(m, name='weekly_on_season', period=7, fourier.order=3, condition.name='on_season')
m <- add_seasonality(m, name='weekly_off_season', period=7, fourier.order=3, condition.name='off_season')
m <- fit.prophet(m, df)
future$on_season <- is_nfl_season(future$ds)
future$off_season <- !is_nfl_season(future$ds)
forecast <- predict(m, future)
prophet_plot_components(m, forecast)
#
####################################
####################################
# Prior scale for holidays and seasonality
####################################
####################################
#
# If you find that the holidays are overfitting, you can adjust their prior scale to
# smooth them using the parameter holidays_prior_scale. By default this parameter is 10,
# which provides very little regularization. Reducing this parameter dampens holiday effects:
m <- prophet(df, holidays = holidays, holidays.prior.scale = 0.05)
forecast <- predict(m, future)
forecast %>%
select(ds, playoff, superbowl) %>%
filter(abs(playoff + superbowl) > 0) %>%
tail(10)
#
# R
m <- prophet()
m <- add_seasonality(
m, name='weekly', period=7, fourier.order=3, prior.scale=0.1)
####################################
# Additional regressors
# additional regressors can be added to the linear part of the model using the add_regressor method or
# function. A column with the regressor value will need to be present in both the fitting and prediction
# dataframes
# for example, we can add an additional effect on Sundays during the NFL season.
# On the components plot, this effect will show up in the "extra_regressors" plot
#
nfl_sunday <- function(ds) {
dates <- as.Date(ds)
month <- as.numeric(format(dates, '%m'))
as.numeric((weekdays(dates) == "Sunday") & (month > 8 | month < 2))
}
df$nfl_sunday <- nfl_sunday(df$ds)
m <- prophet()
m <- add_regressor(m, 'nfl_sunday')
m <- fit.prophet(m, df)
future$nfl_sunday <- nfl_sunday(future$ds)
forecast <- predict(m, future)
prophet_plot_components(m, forecast)
#############################################
#############################################
####### Multiplicative seasonality
#############################################
#############################################
# additive seasonality might not always work
# example: airline passengers
#
df <- read.csv('./data/example_air_passengers.csv', header = TRUE)
m <- prophet(df)
future <- make_future_dataframe(m, 50, freq = 'm')
forecast <- predict(m, future)
plot(m, forecast)
# this time series has a clear yearly cycyle
# but the seasonality in the forecast is too large at the start
# of the time series and too small at the end
# in this time series, the seasonality is not a constant additive factor
# as assumed by Prophet
# multiplicative factor
#
m <- prophet(df, seasonality.mode = 'multiplicative')
forecast <- predict(m, future)
plot(m, forecast)
#
# R
# The components figure will now show the seasonality as a percent of the trend:
prophet_plot_components(m, forecast)
# With seasonality_mode = "multiplicative", holiday effects will also be
# modeled as multiplicative
# Any added seasonalities or extra regressors will by default use whatever seasonality_mode is set to, but can be overriden by specifying mode='additive' or
# mode='multiplicative' as an argument when adding the seasonality or regressor.
#
m <- prophet(seasonality.mode = 'multiplicative')
m <- add_seasonality(m, 'quarterly', period = 91.25, fourier.order = 8, mode = 'additive')
m <- add_regressor(m, 'regressor', mode = 'additive')
# #
# Additive and multiplicative extra regressors will show up in
# separate panels on the components plot. Note, however,
# that it is pretty unlikely to have a mix of additive and multiplicative seasonalities,
# so this will generally only be used
# if there is a reason to expect that to be the case.
###############################
# Uncertainty intervals
###############################
# by default, prophet will return uncertainty intervals for the forecast yhat
# there are several important assumptions behind these uncertainty intervals
# three sources of uncertainty in the forecast
# uncertainty in the trend
# uncertainty in the seasonality estimates
# additional observation noise
# uncertainty in the trend
#
m <- prophet(df, interval.width = 0.95)
forecast <- predict(m, future)
# uncertainty in seasonality
# By default Prophet will only return uncertainty in the trend and observation noise.
# To get uncertainty in seasonality, you must do full Bayesian sampling.
# This is done using the parameter mcmc.samples (which defaults to 0).
m <- prophet(df, mcmc.samples = 300)
forecast <- predict(m, future)
#
prophet_plot_components(m, forecast)
#
# You can access the raw posterior predictive samples in Python using the method m.predictive_samples(future),
# or in R using the function predictive_samples(m, future).
###########################################
###########################################
### Outliers
###########################################
###########################################
df <- read.csv('./data/example_wp_log_R_outliers1.csv')
m <- prophet(df)
future <- make_future_dataframe(m, periods = 1096)
forecast <- predict(m, future)
plot(m, forecast)
# The trend forecast seems reasonable, but the uncertainty intervals seem way too wide.
# Prophet is able to handle the outliers in the history,
# but only by fitting them with trend changes.
# The uncertainty model then expects future trend changes of similar magnitude.
# The best way to handle outliers is to remove them - Prophet has no problem with missing data.
# If you set their values to NA in the history but leave the dates in future,
# then Prophet will give you a prediction for their values.
outliers <- (as.Date(df$ds) > as.Date('2010-01-01')
& as.Date(df$ds) < as.Date('2011-01-01'))
df$y[outliers] = NA
m <- prophet(df)
forecast <- predict(m, future)
plot(m, forecast)
#
df <- read.csv('./data/example_wp_log_R_outliers2.csv')
m <- prophet(df)
future <- make_future_dataframe(m, periods = 1096)
forecast <- predict(m, future)
plot(m, forecast)
#
outliers <- (as.Date(df$ds) > as.Date('2015-06-01')
& as.Date(df$ds) < as.Date('2015-06-30'))
df$y[outliers] = NA
m <- prophet(df)
forecast <- predict(m, future)
plot(m, forecast)
####################################
####################################
### non daily data
####################################
####################################
# Prophet can make forecasts for time series with sub-daily observations by passing
# in a dataframe with timestamps in the ds column.
# The format of the timestamps should be YYYY-MM-DD HH:MM:SS
df <- read.csv('./data/example_yosemite_temps.csv')
m <- prophet(df, changepoint.prior.scale=0.01)
future <- make_future_dataframe(m, periods = 300, freq = 60 * 60)
fcst <- predict(m, future)
plot(m, fcst)
#
prophet_plot_components(m, fcst)
# You can use Prophet to fit monthly data. However, the underlying model
# is continuous-time, which means that you can get strange results if you fit the model
# to monthly data and then ask for daily forecasts.
# Here we forecast US retail sales volume for the next 10 years:
#
df <- read.csv('./data/example_retail_sales.csv')
m <- prophet(df, seasonality.mode = 'multiplicative')
future <- make_future_dataframe(m, periods = 3652)
fcst <- predict(m, future)
plot(m, fcst)
# only predict at the month level
future <- make_future_dataframe(m, periods = 120, freq = 'month')
fcst <- predict(m, future)
plot(m, fcst)
#
###############################################
###############################################
###### Diagnostics
###############################################
###############################################
###### Prophet includes functionality for time series cross validation
###### to measure forecast error using historical data
######
# Here we do cross-validation to assess prediction performance on a horizon of 365 days,
# starting with 730 days of training data in the first cutoff and then making predictions every 180 days.
# On this 8 year time series, this corresponds to 11 total forecasts.
df.cv <- cross_validation(m, initial = 730, period = 180, horizon = 365, units = 'days')
head(df.cv)
# custom cutoffs
# Custom cutoffs can also be supplied as a list of dates to the cutoffs keyword in
# the cross_validation function in Python and R. For example, three cutoffs six months apart,
# would need to be passed to the cutoffs argument in a date format like:
cutoffs <- as.Date(c('2013-02-15', '2013-08-15', '2014-02-15'))
df.cv2 <- cross_validation(m, cutoffs = cutoffs, horizon = 365, units = 'days')
# performance metrics
# yhat, yhat_lower, yhat_upper compared to y
# as a function of the distance from the cutoff
# the statistics computed are MSE, RMSE, mean absolute error,
# mean absolute percent error
df.p <- performance_metrics(df.cv)
head(df.p)
#
plot_cross_validation_metric(df.cv, metric = 'mape')
# parallelizing cross validation
#
########################
### saving models
########################
### R rds
### python, json
##########################
## flat trends
|
e2468ed626b006ac8db81175a571d4b95a0d72f0
|
957c8d51a391ee5813d99fdd17087aa5fa986f39
|
/Barcode_analysis.R
|
75ba84daf480c0ac6cf70f3b9c2e1efc1466700f
|
[] |
no_license
|
minjdott/Barcode_Analysis
|
5d87fd75e71a4f44250e4d51e021dc302d94f03e
|
4bd5b33d79730ceac66c30e9e5eb3c328ce837e4
|
refs/heads/master
| 2020-04-19T20:52:37.532451
| 2019-02-01T04:48:29
| 2019-02-01T04:48:29
| 168,426,713
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,126
|
r
|
Barcode_analysis.R
|
##starting with a loop
#making a list that contains all the files from "dat"
library(sangerseqR)
list.names<-list.files(path = path.expand("./Data"),pattern=".ab1$")
#the list "list.names" contain all the names of the file that end with "ab1"
list.sequence<-list()
#making a list that will contain all my files
setwd("./Data") #setting the working directory to the file "Data" so that the loop can understand the command
for(i in 1:length(list.names)){
list.sequence[[i]]<-read.abif(list.names[i])
}
#for all the files in "dat", it is read into R
#list.sequence now is a list that contains all the files
names(list.sequence)<-list.names
#adding names to the sequences (the original file names)
##Extracting sanger sequence
Y<-list() #list that will hold the extracted sanger sequences
for(i in 1:length(list.sequence)){
Y[i]<-sangerseq(list.sequence[[i]])
}
#for every sequence file, the sanger sequence will be extracted and placed into a new list "Y"
#I ignored the warnings because I don't know how to fix them
##Primary and second basecalls
Primary<-list() #list that the basecalls will be contained
for (r in 1:length(Y)) {
Primary[r]<-makeBaseCalls(Y[[r]])
}
#for all the sanger sequence, the basecalls will be extracted and placed into the new list "Primary"
#subsetting only the primary sequence
sequence<-list()
for(i in 1:length(Primary)){
sequence[i]<-as.character(Primary[[i]]@primarySeq)
}
#Now all the primary sequences are in the list "sequence" but it is saved as a character vector because we want the final file as a vector of strings
##writing a FASTA file
library(seqinr) #seqinr package to use the function write.fasta()
write.fasta(sequences=sequence,names=list.names,file.out="data.fasta")
#sequences = what vector we are using for the sequence, names == the names of sequences
#saved as "data.fasta"
###quality control
control<-read.csv("BarcodePlateStats.csv") #reading out the file for quality control
library(dplyr)
names<-control %>% filter(Ok=="TRUE") %>%
select(Chromatogram) #getting only the names of the sequences that were "TRUE" in the quality check (column==Ok)
|
f739ba1da3884510614772fd73efe2474c090eb5
|
fae5be729442ae4d10cecf38eff2c27e56347be9
|
/spmutils/R/spm_stan.r
|
9a3bc51b6d43bb241209dc09c8f492c10a571b7b
|
[
"MIT"
] |
permissive
|
mlpeck/spmutils
|
80f38697c71ef41df33d95539c6b568c21212b1d
|
455cc47fc636691ce35ea5372d84861603c13166
|
refs/heads/master
| 2023-04-27T22:56:48.688934
| 2023-04-16T22:38:14
| 2023-04-16T22:38:14
| 159,246,982
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,471
|
r
|
spm_stan.r
|
stanfit_one <- function(gdat, dz, nnfits, which.spax,
prep_data = prep_data_mod,
init_opt = init_opt_mod,
init_sampler = init_sampler_mod,
stan_model=NULL,
stan_file="spm_dust_mod_simpl.stan", stan_filedir="~/spmcode/",
iter_opt=5000,
jv=1.e-4,
iter=1000, warmup=250, thin=1, chains=4,
OP=FALSE, ...) {
require(rstan)
if (is.null(stan_model)) {
stan_model <- rstan::stan_model(file=file.path(stan_filedir, stan_file))
}
spm_data <- prep_data(gdat, dz, nnfits, which.spax)
inits <- init_opt(spm_data, nnfits, which.spax, jv)
spm_opt <- optimizing(stan_model, data=spm_data, init=inits, as_vector=FALSE, verbose=TRUE, iter=iter_opt)
init_pars <- lapply(X=1:chains, init_sampler, stan_opt=spm_opt$par, jv=jv)
stanfit <- sampling(stan_model, data=spm_data,
chains=chains, iter=iter, warmup=warmup, thin=thin,
cores=min(chains, getOption("mc.cores")),
init=init_pars, open_progress=OP, ...)
list(spm_data=spm_data, stanfit=stanfit,
norm_g=spm_data$norm_g, norm_st=spm_data$norm_st, norm_em=spm_data$norm_em, in_em=spm_data$in_em)
}
stanfit_batch <- function(gdat, dz, nnfits,
init_tracked = init_tracked_mod,
update_tracked = update_tracked_mod,
return_tracked = return_tracked_mod,
prep_data = prep_data_mod,
init_opt = init_opt_mod,
init_sampler = init_sampler_mod,
stan_file="spm_dust_mod_simpl.stan", stan_filedir="~/spmcode/",
iter_opt=5000,
jv=1.e-4,
iter=1000, warmup=250, chains=4,
OP=FALSE,
start=NULL, end=NULL, fpart="bfits.rda", ...) {
dims <- dim(gdat$flux)
dz <- dz$dz
nsim <- (iter-warmup)*chains
nt <- length(ages)
nr <- dims[1]
n_st <- ncol(lib.ssp)-1
n_em <- length(emlines)
nl <- length(gdat$lambda)
smodel <- rstan::stan_model(file.path(stan_filedir, stan_file))
if (is.null(start) || !file.exists(fpart)) {
init_tracked(nsim, n_st, n_em, nr)
start <- 1
} else {
load(fpart)
}
if (is.null(end)) {
end <- nr
}
for (i in start:end) {
if (is.na(dz[i]) || is.na(nnfits$tauv[i])) next
sfit <- stanfit_one(gdat, dz, nnfits, which.spax=i,
prep_data,
init_opt,
init_sampler,
stan_model=smodel,
iter_opt=iter_opt, jv=jv,
iter = iter, warmup = warmup, chains = chains,
OP=OP, ...)
plot(plotpp(sfit)+ggtitle(paste("fiber =", i)))
update_tracked(i, sfit, fpart)
rm(sfit)
}
return_tracked()
}
## star formation history, mass growth history, etc.
get_sfh <- function(..., z, fibersinbin=1, tsf=0.1) {
ins <- list(...)
if (is.list(ins[[1]])) {
ins <- ins[[1]]
post <- rstan::extract(ins$stanfit)
b_st <- post$b_st
norm_st <- ins$norm_st
if (exists("norm_g", ins)) {
b_st <- b_st*ins$norm_g
norm_st <- 1/norm_st
}
} else {
b_st <- ins$b_st
norm_st <- ins$norm_st
}
nsim <- nrow(b_st)
nt <- length(ages)
nz <- ncol(b_st)/nt
T.gyr <- 10^(ages-9)
isf <- which.min(abs(tsf-T.gyr))
binarea <- log10(pi*fibersinbin*cosmo::ascale(z)^2)
b_st <- t(t(b_st)*norm_st)*cosmo::lum.sol(1, z)
rmass <- t(t(b_st) * mstar)
sfh_post <- matrix(0, nsim, nt)
mgh_post <- matrix(0, nsim, nt)
for (i in 1:nz) {
sfh_post <- sfh_post + b_st[,((i-1)*nt+1):(i*nt)]
mgh_post <- mgh_post + rmass[,((i-1)*nt+1):(i*nt)]
}
totalmg_post <- cbind(rowSums(mgh_post), rowSums(mgh_post) - t(apply(mgh_post, 1, cumsum)))
mgh_post <- cbind(rep(1, nsim), 1 - (t(apply(mgh_post, 1, cumsum))/rowSums(mgh_post)))
sfr <- log10(rowSums(sfh_post[,1:isf])/T.gyr[isf])-9.
relsfr <- sfr - log10(rowSums(sfh_post)/(cosmo::dcos(Inf)$dT-cosmo::dcos(z)$dT)) + 6
sigma_sfr <- sfr - binarea
mstar <- log10(b_st %*% mstar)
sigma_mstar <- mstar - binarea
ssfr <- sigma_sfr - sigma_mstar
list(sfh_post=sfh_post, mgh_post=mgh_post, totalmg_post=totalmg_post,
mstar=mstar, sigma_mstar=sigma_mstar,
sfr=sfr, sigma_sfr=sigma_sfr,
ssfr=ssfr, relsfr=relsfr)
}
batch_sfh <- function(gdat, sfits, tsf=0.1) {
b_st <- sfits$b_st
norm_st <- sfits$norm_st
dims <- dim(b_st)
nsim <- dims[1]
nt <- length(ages)
nz <- dims[2]/nt
nf <- dims[3]
if (exists("norm_g", sfits)) {
norm_g <- sfits$norm_g
norm_st <- 1/norm_st
} else {
norm_g <- rep(1, nf)
}
z <- gdat$meta$z
if (!exists("fibersinbin", gdat)) {
fibersinbin <- rep(1, nf)
} else {
fibersinbin <- gdat$fibersinbin
}
sfh_post <- array(NA, dim=c(nsim, nt, nf))
mgh_post <- array(0, dim=c(nsim, nt+1, nf))
totalmg_post <- matrix(0, nsim, nt+1)
mstar <- matrix(NA, nsim, nf)
sigma_mstar <- matrix(NA, nsim, nf)
sfr <- matrix(NA, nsim, nf)
sigma_sfr <- matrix(NA, nsim, nf)
ssfr <- matrix(NA, nsim, nf)
relsfr <- matrix(NA, nsim, nf)
for (i in 1:nf) {
if (is.na(b_st[1, 1, i])) next
sfi <- get_sfh(b_st=b_st[,,i]*norm_g[i], norm_st=norm_st[,i], z=z, fibersinbin=fibersinbin[i])
sfh_post[,,i] <- sfi$sfh_post
mgh_post[,,i] <- sfi$mgh_post
totalmg_post <- totalmg_post + sfi$totalmg_post
mstar[, i] <- sfi$mstar
sigma_mstar[, i] <- sfi$sigma_mstar
sfr[, i] <- sfi$sfr
sigma_sfr[, i] <- sfi$sigma_sfr
ssfr[, i] <- sfi$ssfr
relsfr[,i] <- sfi$relsfr
}
sfr_tot <- log10(rowSums(10^sfr, na.rm=TRUE))
list(sfh_post=sfh_post, mgh_post=mgh_post, totalmg_post=totalmg_post, sfr_tot=sfr_tot,
mstar=mstar, sigma_mstar=sigma_mstar,
sfr=sfr, sigma_sfr=sigma_sfr,
ssfr=ssfr, relsfr=relsfr)
}
## some sorta useful summary measures
get_proxies <- function(...) {
ins <- list(...)
if (is.list(ins[[1]])) {
ins <- ins[[1]]
post <- rstan::extract(ins$stanfit)
b_st <- post$b_st
norm_st <- ins$norm_st
if (exists("a", post)) {
b_st <- b_st*ins$norm_g
norm_st <- 1/norm_st
}
} else {
b_st <- ins$b_st
norm_st <- ins$norm_st
}
b_st <- t(t(b_st)*norm_st)
nz <- length(Z)
nt <- length(ages)
T.gyr <- 10^(ages-9)
tbar <- log10((b_st %*% rep(T.gyr, nz))/rowSums(b_st)) + 9
tbar_lum <- log10((b_st %*% (rep(T.gyr, nz) * gri.ssp["r",]))/
((b_st %*% gri.ssp["r",]))) + 9
g_i <- 2.5*log10(b_st %*% gri.ssp["i",]) - 2.5*log10(b_st %*% gri.ssp["g",])
data.frame(tbar=tbar, tbar_lum=tbar_lum, g_i=g_i)
}
## emission line fluxes, luminosity density, equivalent width
get_em <- function(..., z, fibersinbin=1, ew_width=15) {
ins <- list(...)
if (is.list(ins[[1]])) {
ins <- ins[[1]]
post <- rstan::extract(ins$stanfit)
b_st <- post$b_st
b_em <- post$b_em
norm_st <- ins$norm_st
norm_em <- ins$norm_em
if (exists("norm_g", ins)) {
b_st <- b_st*ins$norm_g
b_em <- b_em*ins$norm_g
norm_st <- 1/norm_st
}
} else {
b_st <- ins$b_st
b_em <- ins$b_em
norm_st <- ins$norm_st
norm_em <- ins$norm_em
}
emlines <- emlines[ins$in_em]
b_st <- t(t(b_st)*norm_st)
ne <- ncol(b_em)
nsim <- nrow(b_em)
binarea <- log10(pi*fibersinbin*cosmo::ascale(z)^2)
em.mult <- emlines*log(10)/10000
flux_em <- matrix(NA, nsim, ne)
logl_em <- matrix(NA, nsim, ne)
sigma_logl_em <- matrix(NA, nsim, ne)
ew_em <- matrix(NA, nsim, ne)
flux_em <- t(t(b_em)*em.mult)*norm_em
logl_em <- cosmo::loglum.ergs(flux_em, z)
sigma_logl_em <- logl_em - binarea
mu_st <- tcrossprod(b_st, as.matrix(lib.ssp[, -1]))
il_em <- findInterval(emlines, lib.ssp$lambda)
for (i in 1:ne) {
intvl <- (il_em[i]-ew_width):(il_em[i]+ew_width)
fc <- rowMeans(mu_st[, intvl])
ew_em[, i] <- flux_em[, i]/fc
}
colnames(flux_em) <- names(emlines)
colnames(logl_em) <- names(emlines)
colnames(sigma_logl_em) <- names(emlines)
colnames(ew_em) <- names(emlines)
list(flux_em=flux_em, logl_em=logl_em, sigma_logl_em=sigma_logl_em, ew_em=ew_em)
}
batch_em <- function(gdat, sfits, ew_width=15) {
nsim <- dim(sfits$b_em)[1]
ne <- length(emlines)
nf <- length(gdat$xpos)
norm_st <- sfits$norm_st
if (exists("norm_g", sfits)) {
norm_g <- sfits$norm_g
norm_st <- 1/sfits$norm_st
} else {
norm_g <- rep(1, nf)
}
flux_em <- array(NA, dim=c(nsim, ne, nf))
sigma_logl_em <- array(NA, dim=c(nsim, ne, nf))
ew_em <- array(NA, dim=c(nsim, ne, nf))
for (i in 1:nf) {
if (is.na(sfits$b_st[1, 1, i])) next
in_em <- sfits$in_em[!is.na(sfits$in_em[,i]), i]
if(is.null(dim(norm_st))) {
nst <- norm_st[i]
} else {
nst <- norm_st[,i]
}
emi <- get_em(b_em=sfits$b_em[,in_em,i]*norm_g[i], b_st=sfits$b_st[,,i]*norm_g[i],
norm_em=sfits$norm_em[i], norm_st=nst,
in_em=in_em, z=gdat$meta$z,
fibersinbin=gdat$fibersinbin[i], ew_width=ew_width)
flux_em[,in_em,i] <- emi$flux_em
sigma_logl_em[,in_em,i] <- emi$sigma_logl_em
ew_em[,in_em,i] <- emi$ew_em
}
dimnames(flux_em)[[2]] <- names(emlines)
dimnames(sigma_logl_em)[[2]] <- names(emlines)
dimnames(ew_em)[[2]] <- names(emlines)
list(flux_em=flux_em, sigma_logl_em=sigma_logl_em, ew_em=ew_em)
}
## bpt class from [N II]/Halpha
batch_bptclass <- function(flux_em, snthresh=3) {
nb <- dim(flux_em)[3]
bpt <- as.factor(rep("NO EM", nb))
levels(bpt) <- c("NO EM", "EL", "SF", "COMP", "LINER", "AGN")
f_m <- apply(flux_em, c(2, 3), mean)
f_sd <- apply(flux_em, c(2, 3), sd)
for (i in 1:nb) {
if (all(is.na(f_m[, i]))) {
bpt[i] <- NA
next
}
if(any(f_m[, i]/f_sd[, i] > snthresh, na.rm=TRUE)) bpt[i] <- "EL"
if (any(is.na(f_m[c("h_beta", "oiii_5007", "h_alpha", "nii_6584"), i]))) next
if (f_m["h_beta", i]/f_sd["h_beta", i] > snthresh &&
f_m["oiii_5007", i]/f_sd["oiii_5007", i] > snthresh &&
f_m["h_alpha", i]/f_sd["h_alpha", i] > snthresh &&
f_m["nii_6584", i]/f_sd["nii_6584", i] > snthresh) {
o3hbeta <- log10(f_m["oiii_5007", i]/f_m["h_beta", i])
n2halpha <- log10(f_m["nii_6584", i]/f_m["h_alpha", i])
if ((o3hbeta <= 0.61/(n2halpha-0.05)+1.3) &&
(n2halpha <= 0.05)) {
bpt[i] <- "SF"
next
}
if ((o3hbeta > 0.61/(n2halpha-0.05)+1.3 || n2halpha > 0.05) &&
(o3hbeta <= 0.61/(n2halpha-0.47)+1.19)) {
bpt[i] <- "COMP"
next
}
if ((o3hbeta > 0.61/(n2halpha-0.47)+1.19 || n2halpha > 0.47) &&
(o3hbeta > 1.05*n2halpha+0.45)) {
bpt[i] <- "AGN"
} else {
bpt[i] <- "LINER"
}
}
}
bpt
}
## emission line ratios and various "strong line" metallicity calibrations
get_lineratios <- function(flux_em, tauv, delta=0, tauv_mult=1, alaw=calzetti_mod) {
o3hbeta <- log10(flux_em[,"oiii_5007"]/flux_em[,"h_beta"])
o1halpha <- log10(flux_em[,"oi_6300"]/flux_em[,"h_alpha"])
n2halpha <- log10(flux_em[,"nii_6584"]/flux_em[,"h_alpha"])
s2halpha <- log10((flux_em[,"sii_6717"]+flux_em[,"sii_6730"])/flux_em[,"h_alpha"])
o2 <- (flux_em[,"oii_3727"]+flux_em[,"oii_3729"])*alaw(3728., -tauv*tauv_mult, delta)
o3 <- (flux_em[,"oiii_4959"]+flux_em[,"oiii_5007"])*alaw(4980., -tauv*tauv_mult, delta)
hb <- flux_em[,"h_beta"]*alaw(4863., -tauv*tauv_mult, delta)
r23 <- log10((o2+o3)/hb)
o3n2 <- o3hbeta-n2halpha
## log(O/H) estimates from Pettini & Pagel 2004, Tremonti et al. 2004, or Dopita et al. 2016
oh_n2 <- 9.37+2.03*n2halpha+1.26*n2halpha^2+0.32*n2halpha^3
oh_o3n2 <- 8.73-0.32*o3n2
oh_r23 <- 9.185-0.313*r23-0.264*r23^2-0.321*r23^3
oh_n2s2ha <- 8.77+n2halpha-s2halpha+0.264*n2halpha
data.frame(o3hbeta=o3hbeta, o1halpha=o1halpha, n2halpha=n2halpha, s2halpha=s2halpha,
r23=r23, o3n2=o3n2,
oh_n2=oh_n2, oh_o3n2=oh_o3n2, oh_r23=oh_r23, oh_n2s2ha=oh_n2s2ha)
}
sum_batchfits <- function(gdat, nnfits, sfits, drpcat, alaw=calzetti_mod, intr_bd=2.86, clim=0.95) {
tauv.bd <- function(flux_em, intr_bd, delta=0, alaw) {
bd <- flux_em[,'h_alpha']/flux_em[,'h_beta']
bd[!is.finite(bd)] <- NA
tauv <- log(bd/intr_bd)/(log(alaw(6562.8,1, delta))-log(alaw(4861.3,1, delta)))
tauv[tauv<0] <- 0
tauv
}
logl.ha.cor <- function(logl.halpha, tauv, delta=0, alaw) {
att <- alaw(lambda=6562.8, tauv, delta)
logl.halpha - log10(att)
}
nf <- length(gdat$xpos)
fibersinbin <- rep(1, nf)
if (exists("bin.fiber", gdat)) {
if (!exists("fibersinbin", gdat)) {
bin.fiber <- gdat$bin.fiber
bin.no <- unique(bin.fiber[!is.na(bin.fiber)])
for (i in seq_along(bin.no)) {
fibersinbin[i] <- length(which(bin.fiber == bin.no[i]))
}
} else {
fibersinbin <- gdat$fibersinbin
}
}
nsim <- nrow(sfits$b_st)
nt <- length(ages)
fiberarea <- pi*cosmo::ascale(gdat$meta$z)^2
plateifu <- rep(gdat$meta$plateifu, nf)
## projected distance in kpc and relative to effective radius
d_kpc <- cosmo::ascale(gdat$meta$z)*sqrt(gdat$xpos^2+gdat$ypos^2)
d_re <- sqrt(gdat$xpos^2+gdat$ypos^2)/
drpcat$nsa_petro_th50[match(gdat$meta$plateifu,drpcat$plateifu)]
## stuff taken from nnfits
d4000_n <- nnfits$d4000_n
d4000_n_err <- nnfits$d4000_n_err
lick_hd_a <- nnfits$lick[,'HdeltaA']
lick_hd_a_err <- nnfits$lick.err[,'HdeltaA_err']
mgfe <- sqrt(nnfits$lick[,'Mg_b']*(0.72*nnfits$lick[,'Fe5270']+0.28*nnfits$lick[,'Fe5335']))
mgfe[is.nan(mgfe)] <- NA
## tauv from batch fits
tauv_m <- colMeans(sfits$tauv)
tauv_std <- apply(sfits$tauv, 2, sd)
if (exists("delta", sfits)) {
delta <- sfits$delta
} else {
delta <- matrix(0, nsim, nf)
}
delta_m <- colMeans(delta)
delta_std <- apply(delta, 2, sd)
if (exists("ll", sfits)) {
ll_m <- colMeans(sfits$ll)
} else {
ll_m <- rep(NA, nf)
}
mgh_post <- array(NA, dim=c(nsim, nt+1, nf))
sfh_post <- array(NA, dim=c(nsim, nt, nf))
totalmg_post <- matrix(0, nsim, nt+1)
varnames <- c("sigma_mstar", "sigma_sfr", "ssfr", "relsfr",
"tbar", "tbar_lum", "g_i",
"tauv_bd", "sigma_logl_ha", "sigma_logl_ha_ctauv", "sigma_logl_ha_ctauv_bd",
"eqw_ha", "o3hbeta", "o1halpha", "n2halpha", "s2halpha",
"r23", "o3n2", "oh_n2", "oh_o3n2", "oh_r23", "oh_n2s2ha")
suffixes <- c("m", "std", "lo", "hi")
for (i in seq_along(varnames)) {
for (j in seq_along(suffixes)) {
assign(paste(varnames[i], suffixes[j], sep="_"), numeric(nf))
}
}
sfh_all <- batch_sfh(gdat, sfits)
for (i in 1:4) {
assign(paste(varnames[i], suffixes[1], sep="_"), colMeans(sfh_all[[varnames[i]]]))
assign(paste(varnames[i], suffixes[2], sep="_"), apply(sfh_all[[varnames[i]]], 2, sd))
}
em_all <- batch_em(gdat, sfits)
bpt <- batch_bptclass(em_all$flux_em)
for (i in 1:nf) {
## star formation history, etc.
quants <- hdiofmcmc(sfh_all$sigma_mstar[,i])
sigma_mstar_lo[i] <- quants[1]
sigma_mstar_hi[i] <- quants[2]
quants <- hdiofmcmc(sfh_all$sigma_sfr[,i])
sigma_sfr_lo[i] <- quants[1]
sigma_sfr_hi[i] <- quants[2]
quants <- hdiofmcmc(sfh_all$ssfr[,i])
ssfr_lo[i] <- quants[1]
ssfr_hi[i] <- quants[2]
quants <- hdiofmcmc(sfh_all$relsfr[,i])
relsfr_lo[i] <- quants[1]
relsfr_hi[i] <- quants[2]
if (is.null(dim(sfits$norm_st))) {
norm_st <- sfits$norm_st[i]
} else {
norm_st <- 1/sfits$norm_st[,i]
}
proxi <- get_proxies(b_st=sfits$b_st[,,i], norm_st=norm_st)
tbar_m[i] <- mean(proxi$tbar)
tbar_std[i] <- sd(proxi$tbar)
quants <- hdiofmcmc(proxi$tbar, credmass=clim)
tbar_lo[i] <- quants[1]
tbar_hi[i] <- quants[2]
tbar_lum_m[i] <- mean(proxi$tbar_lum)
tbar_lum_std[i] <- sd(proxi$tbar_lum)
quants <- hdiofmcmc(proxi$tbar_lum, credmass=clim)
tbar_lum_lo[i] <- quants[1]
tbar_lum_hi[i] <- quants[2]
g_i_m[i] <- mean(proxi$g_i)
g_i_std[i] <- sd(proxi$g_i)
quants <- hdiofmcmc(proxi$g_i, credmass=clim)
g_i_lo[i] <- quants[1]
g_i_hi[i] <- quants[2]
linesi <- get_lineratios(em_all$flux_em[,,i], sfits$tauv[,i], delta[,i], alaw=alaw)
tauv_bd <- tauv.bd(em_all$flux_em[,,i], intr_bd=intr_bd, delta[,i], alaw=alaw)
tauv_bd_m[i] <- mean(tauv_bd)
tauv_bd_std[i] <- sd(tauv_bd)
quants <- hdiofmcmc(tauv_bd, credmass=clim)
tauv_bd_lo[i] <- quants[1]
tauv_bd_hi[i] <- quants[2]
## uncorrected Halpha luminosity from stan fits
sigma_logl_ha_m[i] <- mean(em_all$sigma_logl_em[,"h_alpha", i])
sigma_logl_ha_std[i] <- sd(em_all$sigma_logl_em[,"h_alpha", i])
quants <- hdiofmcmc(em_all$sigma_logl_em[,"h_alpha", i], credmass=clim)
sigma_logl_ha_lo[i] <- quants[1]
sigma_logl_ha_hi[i] <- quants[2]
## correct from stan fit estimate of tauv
logl_ha_c <- logl.ha.cor(em_all$sigma_logl_em[, "h_alpha", i], sfits$tauv[,i], delta[,i], alaw=alaw)
sigma_logl_ha_ctauv_m[i] <- mean(logl_ha_c)
sigma_logl_ha_ctauv_std[i] <- sd(logl_ha_c)
quants <- hdiofmcmc(logl_ha_c, credmass=clim)
sigma_logl_ha_ctauv_lo[i] <- quants[1]
sigma_logl_ha_ctauv_hi[i] <- quants[2]
## correct from balmer decrement
logl_ha_c <- logl.ha.cor(em_all$sigma_logl_em[, "h_alpha", i], tauv_bd, delta[,i], alaw=alaw)
sigma_logl_ha_ctauv_bd_m[i] <- mean(logl_ha_c)
sigma_logl_ha_ctauv_bd_std[i] <- sd(logl_ha_c)
quants <- hdiofmcmc(logl_ha_c, credmass=clim)
sigma_logl_ha_ctauv_bd_lo[i] <- quants[1]
sigma_logl_ha_ctauv_bd_hi[i] <- quants[2]
eqw_ha_m[i] <- mean(em_all$ew_em[,"h_alpha", i])
eqw_ha_std[i] <- sd(em_all$ew_em[,"h_alpha", i])
quants <- hdiofmcmc(em_all$ew_em[,"h_alpha", i], credmass=clim)
eqw_ha_lo[i] <- quants[1]
eqw_ha_hi[i] <- quants[2]
## some emission line ratios
o3hbeta_m[i] <- mean(linesi$o3hbeta)
o3hbeta_std[i] <- sd(linesi$o3hbeta)
quants <- hdiofmcmc(linesi$o3hbeta, credmass=clim)
o3hbeta_lo[i] <- quants[1]
o3hbeta_hi[i] <- quants[2]
o1halpha_m[i] <- mean(linesi$o1halpha)
o1halpha_std[i] <- sd(linesi$o1halpha)
quants <- hdiofmcmc(linesi$o1halpha, credmass=clim)
o1halpha_lo[i] <- quants[1]
o1halpha_hi[i] <- quants[2]
n2halpha_m[i] <- mean(linesi$n2halpha)
n2halpha_std[i] <- sd(linesi$n2halpha)
quants <- hdiofmcmc(linesi$n2halpha, credmass=clim)
n2halpha_lo[i] <- quants[1]
n2halpha_hi[i] <- quants[2]
s2halpha_m[i] <- mean(linesi$s2halpha)
s2halpha_std[i] <- sd(linesi$s2halpha)
quants <- hdiofmcmc(linesi$s2halpha, credmass=clim)
s2halpha_lo[i] <- quants[1]
s2halpha_hi[i] <- quants[2]
r23_m[i] <- mean(linesi$r23)
r23_std[i] <- sd(linesi$r23)
quants <- hdiofmcmc(linesi$r23, credmass=clim)
r23_lo[i] <- quants[1]
r23_hi[i] <- quants[2]
o3n2_m[i] <- mean(linesi$o3n2)
o3n2_std[i] <- sd(linesi$o3n2)
quants <- hdiofmcmc(linesi$o3n2, credmass=clim)
o3n2_lo[i] <- quants[1]
o3n2_hi[i] <- quants[2]
oh_n2_m[i] <- mean(linesi$oh_n2)
oh_n2_std[i] <- sd(linesi$oh_n2)
quants <- hdiofmcmc(linesi$oh_n2, credmass=clim)
oh_n2_lo[i] <- quants[1]
oh_n2_hi[i] <- quants[2]
oh_o3n2_m[i] <- mean(linesi$oh_o3n2)
oh_o3n2_std[i] <- sd(linesi$oh_o3n2)
quants <- hdiofmcmc(linesi$oh_o3n2, credmass=clim)
oh_o3n2_lo[i] <- quants[1]
oh_o3n2_hi[i] <- quants[2]
oh_r23_m[i] <- mean(linesi$oh_r23)
oh_r23_std[i] <- sd(linesi$oh_r23)
quants <- hdiofmcmc(linesi$oh_r23, credmass=clim)
oh_r23_lo[i] <- quants[1]
oh_r23_hi[i] <- quants[2]
oh_n2s2ha_m[i] <- mean(linesi$oh_n2s2ha)
oh_n2s2ha_std[i] <- sd(linesi$oh_n2s2ha)
quants <- hdiofmcmc(linesi$oh_n2s2ha, credmass=clim)
oh_n2s2ha_lo[i] <- quants[1]
oh_n2s2ha_hi[i] <- quants[2]
}
data.frame(plateifu, d4000_n, d4000_n_err,
lick_hd_a, lick_hd_a_err, mgfe,
d_kpc, d_re,
tauv_m, tauv_std,
delta_m, delta_std, ll_m=ll_m,
sigma_mstar_m , sigma_mstar_std , sigma_mstar_lo , sigma_mstar_hi ,
sigma_sfr_m , sigma_sfr_std , sigma_sfr_lo , sigma_sfr_hi ,
ssfr_m , ssfr_std , ssfr_lo , ssfr_hi ,
relsfr_m , relsfr_std , relsfr_lo , relsfr_hi ,
tbar_m , tbar_std , tbar_lo , tbar_hi ,
tbar_lum_m , tbar_lum_std , tbar_lum_lo , tbar_lum_hi ,
g_i_m , g_i_std , g_i_lo , g_i_hi ,
tauv_bd_m , tauv_bd_std , tauv_bd_lo , tauv_bd_hi ,
sigma_logl_ha_m , sigma_logl_ha_std , sigma_logl_ha_lo , sigma_logl_ha_hi ,
sigma_logl_ha_ctauv_m , sigma_logl_ha_ctauv_std , sigma_logl_ha_ctauv_lo , sigma_logl_ha_ctauv_hi ,
sigma_logl_ha_ctauv_bd_m , sigma_logl_ha_ctauv_bd_std , sigma_logl_ha_ctauv_bd_lo , sigma_logl_ha_ctauv_bd_hi ,
eqw_ha_m, eqw_ha_std, eqw_ha_lo, eqw_ha_hi,
o3hbeta_m , o3hbeta_std , o3hbeta_lo , o3hbeta_hi ,
o1halpha_m , o1halpha_std , o1halpha_lo , o1halpha_hi ,
n2halpha_m , n2halpha_std , n2halpha_lo , n2halpha_hi ,
s2halpha_m , s2halpha_std , s2halpha_lo , s2halpha_hi ,
r23_m , r23_std , r23_lo , r23_hi ,
o3n2_m , o3n2_std , o3n2_lo , o3n2_hi ,
oh_n2_m , oh_n2_std , oh_n2_lo , oh_n2_hi ,
oh_o3n2_m , oh_o3n2_std , oh_o3n2_lo , oh_o3n2_hi ,
oh_r23_m , oh_r23_std , oh_r23_lo , oh_r23_hi ,
oh_n2s2ha_m , oh_n2s2ha_std , oh_n2s2ha_lo , oh_n2s2ha_hi ,
bpt=bpt)
}
## estimated mean mass fraction in broad ages bins
sum_binnedmass <- function(mgh_post, ages, ages.bins = c(0.1, 2.5, 5)) {
T.gyr <- 10^(ages-9)
ind.bins <- findInterval(ages.bins, T.gyr)
dims <- dim(mgh_post)
nsim <- dims[1]
nfib <- dims[3]
nt <- length(ages.bins)+2
mgh.binned <- mgh_post[, c(1, ind.bins, dims[2]), ]
mdiff <- mgh.binned[, 1:(nt-1),] - mgh.binned[, 2:nt, ]
mb_m <- apply(mdiff, c(2, 3), mean)
mb_sd <- apply(mdiff, c(2, 3), sd)
df <- data.frame(cbind(t(mb_m), t(mb_sd)))
df[df==0] <- NA
T.ind <- c(0, T.gyr[ind.bins], T.gyr[length(T.gyr)])
nt <- length(T.ind)
bnames <- paste(formatC(T.ind[1:(nt-1)], format="f", digits=1), " < T < ",
formatC(T.ind[2:nt], format="f", digits=1), sep="")
names(df) <- c(paste(bnames, "_m", sep=""), paste(bnames, "_sd", sep=""))
df
}
## computes highest density interval from a sample of representative values,
## estimated as shortest credible interval.
## arguments:
## samplevec
## is a vector of representative values from a probability distribution.
## credmass
## is a scalar between 0 and 1, indicating the mass within the credible
## interval that is to be estimated.
## value:
## hdilim is a vector containing the limits of the hdi
hdiofmcmc <- function(samplevec , credmass=0.95) {
sortedpts <- sort(samplevec)
ciidxinc <- floor(credmass * length(sortedpts))
ncis <- length(sortedpts) - ciidxinc
ciwidth <- rep(0 , ncis)
for (i in 1:ncis) {
ciwidth[i] <- sortedpts[i + ciidxinc] - sortedpts[i]
}
hdimin <- sortedpts[which.min(ciwidth)]
hdimax <- sortedpts[which.min(ciwidth) + ciidxinc]
hdilim <- c(hdimin , hdimax)
return(hdilim)
}
|
530367198dc931fb21330f0bc816562ca423aede
|
f4a3019d8055d06939abcf952f451f9a328af6a8
|
/self_test.R
|
d8268cb00ae01d050b62b775ef83c7b4787d16fb
|
[] |
no_license
|
COMP-1433/HousingPricePredictionRCode
|
048bbb02ea65cf9cfc676f125cce19b03934ca67
|
0c18c372bf863bdfbf268ea7b4aa56b9244b707c
|
refs/heads/main
| 2023-04-23T19:04:14.561183
| 2021-05-02T14:27:01
| 2021-05-02T14:27:01
| 363,667,635
| 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 6,839
|
r
|
self_test.R
|
# import data
train<- read.csv("train.csv")
test<- read.csv("test.csv")
train<-subset(train,select = -c(Utilities))
test<-subset(test,select = -c(Utilities))
train
#typeof(train)
#typeof(test)
train_char <- train[,sapply(train,is.character)]
#train_char
#the character data in train
train_int <- train[,sapply(train,is.integer)]
train_int
#train_int
#the integer data in train
test_char <- test[,sapply(test,is.character)]
#test_char
test_int <- test[,sapply(test,is.integer)]
#test_int
# Qi Shihao
train[is.na(train)]<-0
train$MSZoning[train$MSZoning=="C (all)"]<-1
train$MSZoning[train$MSZoning=="FV"]<-2
train$MSZoning[train$MSZoning=="RH"]<-3
train$MSZoning[train$MSZoning=="RL"]<-4
train$MSZoning[train$MSZoning=="RM"]<-5
train$MSZoning=as.numeric(train$MSZoning)
train$Street[train$Street=="Grvl"]=1
train$Street[train$Street=="Pave"]=2
train$Street=as.numeric(train$Street)
train$Alley[train$Alley=="Grvl"]=1
train$Alley[train$Alley=="Pave"]=2
train$Alley=as.numeric(train$Alley)
train$LotShape[train$LotShape=="IR1"]=1
train$LotShape[train$LotShape=="IR2"]=2
train$LotShape[train$LotShape=="IR2"]=3
train$LotShape[train$LotShape=="Reg"]=4
train$LotShape=as.numeric(train$LotShape)
train$LandContour[train$LandContour=="Bnk"]=1
train$LandContour[train$LandContour=="HLS"]=2
train$LandContour[train$LandContour=="Low"]=3
train$LandContour[train$LandContour=="Lvl"]=4
train$LandContour=as.numeric(train$LandContour)
train$LotConfig[train$LotConfig=="Corner"]=1
train$LotConfig[train$LotConfig=="CulDSac"]=2
train$LotConfig[train$LotConfig=="FR2"]=3
train$LotConfig[train$LotConfig=="FR3"]=4
train$LotConfig[train$LotConfig=="Inside"]=5
train$LotConfig=as.numeric(train$LotConfig)
train$LandSlope[train$LandSlope=="Gtl"]=1
train$LandSlope[train$LandSlope=="Mod"]=2
train$LandSlope[train$LandSlope=="Sev"]=3
train$Neighborhood[train$Neighborhood=="Blmngtn"]=1
train$Neighborhood[train$Neighborhood=="Blueste"]=2
train$Neighborhood[train$Neighborhood=="BrDale"]=3
train$Neighborhood[train$Neighborhood=="BrkSide"]=4
train$Neighborhood[train$Neighborhood=="ClearCr"]=5
train$Neighborhood[train$Neighborhood=="CollgCr"]=6
train$Neighborhood[train$Neighborhood=="Crawfor"]=7
train$Neighborhood[train$Neighborhood=="Edwards"]=8
train$Neighborhood[train$Neighborhood=="Gilbert"]=9
train$Neighborhood[train$Neighborhood=="IDOTRR"]=10
train$Neighborhood[train$Neighborhood=="MeadowV"]=11
train$Neighborhood[train$Neighborhood=="Mitchel"]=12
train$Neighborhood[train$Neighborhood=="NAmes"]=13
train$Neighborhood[train$Neighborhood=="NoRidge"]=14
train$Neighborhood[train$Neighborhood=="NPkVill"]=15
train$Neighborhood[train$Neighborhood=="NridgHt"]=16
train$Neighborhood[train$Neighborhood=="NWAmes"]=17
train$Neighborhood[train$Neighborhood=="OldTown"]=18
train$Neighborhood[train$Neighborhood=="Sawyer"]=19
train$Neighborhood[train$Neighborhood=="Somerst"]=20
train$Neighborhood[train$Neighborhood=="StoneBr"]=21
train$Neighborhood[train$Neighborhood=="SWISU"]=22
train$Neighborhood[train$Neighborhood=="Timber"]=23
train$Neighborhood[train$Neighborhood=="Veenker"]=24
train$Neighborhood=as.numeric(train$Neighborhood)
#HE Zhejun
train$MasVnrType[is.na(train$MasVnrType)]<-"0"
train$MasVnrType[train$MasVnrType=="BrkCmn"]<-"1"
train$MasVnrType[train$MasVnrType=="BrkFace"]<-"2"
train$MasVnrType[train$MasVnrType=="CBlock"]<-"3"
train$MasVnrType[train$MasVnrType=="Stone"]<-"4"
train$MasVnrType<-as.numeric(train$MasVnrType)
train$MasVnrArea[is.na(train$MasVnrArea)] <- median(na.omit(train$MasVnrArea))
#中位数
train$Foundation[train$Foundation=="BrkTil"]<-6
train$Foundation[train$Foundation=="CBlock"]<-5
train$Foundation[train$Foundation=="PConc"]<-4
train$Foundation[train$Foundation=="Slab"]<-3
train$Foundation[train$Foundation=="Stone"]<-2
train$Foundation[train$Foundation=="Wood"]<-1
train$Foundation<-as.numeric(train$Foundation)
train$ExterQual<-as.character(train$ExterQual)
train$ExterQual[train$ExterQual=="Ex"]<-"5"
train$ExterQual[train$ExterQual=="Gd"]<-"4"
train$ExterQual[train$ExterQual=="TA"]<-"3"
train$ExterQual[train$ExterQual=="Fa"]<-"2"
train$ExterQual[train$ExterQual=="Po"]<-"1"
train$ExterQual<-as.numeric(train$ExterQual)
train$ExterCond<-as.character(train$ExterCond)
train$ExterCond[train$ExterCond=="Ex"]<-"5"
train$ExterCond[train$ExterCond=="Gd"]<-"4"
train$ExterCond[train$ExterCond=="TA"]<-"3"
train$ExterCond[train$ExterCond=="Fa"]<-"2"
train$ExterCond[train$ExterCond=="Po"]<-"1"
train$ExterCond<-as.numeric(train$ExterCond)
train$BsmtQual <- as.character(train$BsmtQual)
train$BsmtQual[train$BsmtQual == "Ex"] <- "5"
train$BsmtQual[train$BsmtQual == "Gd"] <- "4"
train$BsmtQual[train$BsmtQual == "TA"] <- "3"
train$BsmtQual[train$BsmtQual == "Fa"] <- "2"
train$BsmtQual[train$BsmtQual == "Po"] <- "1"
train$BsmtQual[is.na(train$BsmtQual)] <- "0"
train$BsmtQual <- as.numeric(train$BsmtQual)
train$BsmtCond <- as.character(train$BsmtCond)
train$BsmtCond[train$BsmtCond == "Ex"] <- "5"
train$BsmtCond[train$BsmtCond == "Gd"] <- "4"
train$BsmtCond[train$BsmtCond == "TA"] <- "3"
train$BsmtCond[train$BsmtCond == "Fa"] <- "2"
train$BsmtCond[train$BsmtCond == "Po"] <- "1"
train$BsmtCond[is.na(train$BsmtCond)] <- "0"
train$BsmtCond <- as.numeric(train$BsmtCond)
train$BsmtExposure <- as.character(train$BsmtExposure)
train$BsmtExposure[train$BsmtExposure == "Gd"] <- "4"
train$BsmtExposure[train$BsmtExposure == "Av"] <- "3"
train$BsmtExposure[train$BsmtExposure == "Mn"] <- "2"
train$BsmtExposure[train$BsmtExposure == "No"] <- "1"
train$BsmtExposure[is.na(train$BsmtExposure)] <- "0"
train$BsmtExposure <- as.numeric(train$BsmtExposure)
train$BsmtFinType1 <- as.character(train$BsmtFinType1)
train$BsmtFinType1[train$BsmtFinType1 == "GLQ"] <- "6"
train$BsmtFinType1[train$BsmtFinType1 == "ALQ"] <- "5"
train$BsmtFinType1[train$BsmtFinType1 == "BLQ"] <- "4"
train$BsmtFinType1[train$BsmtFinType1 == "Rec"] <- "3"
train$BsmtFinType1[train$BsmtFinType1 == "LwQ"] <- "2"
train$BsmtFinType1[train$BsmtFinType1 == "Unf"] <- "1"
train$BsmtFinType1[is.na(train$BsmtFinType1)] <- "0"
train$BsmtFinType1 <- as.numeric(train$BsmtFinType1)
train$BsmtFinSF1[is.na(train$BsmtFinSF1)] <- median(na.omit(train$BsmtFinSF1))
# 中位数
train$BsmtFinType2 <- as.character(train$BsmtFinType2)
train$BsmtFinType2[train$BsmtFinType2 == "GLQ"] <- "6"
train$BsmtFinType2[train$BsmtFinType2 == "ALQ"] <- "5"
train$BsmtFinType2[train$BsmtFinType2 == "BLQ"] <- "4"
train$BsmtFinType2[train$BsmtFinType2 == "Rec"] <- "3"
train$BsmtFinType2[train$BsmtFinType2 == "LwQ"] <- "2"
train$BsmtFinType2[train$BsmtFinType2 == "Unf"] <- "1"
train$BsmtFinType2[is.na(train$BsmtFinType2)] <- "0"
train$BsmtFinType2 <- as.numeric(train$BsmtFinType2)
train$BsmtFinSF2[is.na(train$BsmtFinSF2)] <- 0
train$BsmtUnfSF[is.na(train$BsmtUnfSF)] <- 0
train_char <- train[,sapply(train,is.character)]
train_char
|
3fc3cff8a6a146aaf05967fce350e5a2ab55fe65
|
b432642a0d72020ee0ee22ecbfdc534721dfce6c
|
/R/count_votes.R
|
dd6b2f1d2e049f9806e99ac08761c69f81d91216
|
[] |
no_license
|
hanase/vote
|
33efd6f18ec925164c807573d67f2fe1b2eaa1de
|
c37db10367d4c433f1bf1add9580efe9def11a13
|
refs/heads/master
| 2022-02-12T16:30:36.366764
| 2022-02-04T21:11:45
| 2022-02-04T21:11:45
| 82,612,543
| 2
| 1
| null | 2021-05-28T18:32:18
| 2017-02-20T23:15:03
|
R
|
UTF-8
|
R
| false
| false
| 1,211
|
r
|
count_votes.R
|
count.votes <- function(votes, method=c("auto", "plurality", "approval", "stv", "score",
"condorcet", "tworound.runoff"),
fsep='\t', ...) {
# Main function for counting votes.
# If method is "auto" it determines the right method depending on the
# number of valid records.
x <- prepare.votes(votes, fsep=fsep)
method <- match.arg(method)
if(method == "auto") {
# extract method names from the method argument
all.methods <- eval(formals()[["method"]])[-1]
# count valid records for all methods
valid <- rep(0, length(all.methods))
names(valid) <- all.methods
for (meth in all.methods) {
assembly.fun <- paste0("assemble.args.for.check.", meth)
args <- if(exists(assembly.fun)) do.call(assembly.fun, list(x, ...)) else list()
valid[meth] <- sum(do.call(is.valid.vote, c(list(x, method=meth), args)))
}
method <- names(valid)[which.max(valid)]
}
return(do.call(method, list(x, ...)))
}
invalid.votes <- function(object) {
return(object$invalid.votes)
}
valid.votes <- function(object) {
return(object$data)
}
corrected.votes <- function(object) {
return(object$corrected.votes)
}
|
2d5f1ab34ce26008cc8b987c882df2413d9f7db0
|
1554efd5de247b393039460b2905d1dbb00136bb
|
/forest.r
|
7e9243e897a99cb8bac847aa9bf998c4588a3d82
|
[] |
no_license
|
daoleen/RAir
|
b0b71884f5470be7e83c0cf574e4ac30c929e461
|
323e70cc772dcaaa4fc1fa67db75aeeb924f6b8f
|
refs/heads/master
| 2021-01-10T20:35:35.185053
| 2014-06-09T11:54:26
| 2014-06-09T11:54:26
| 20,642,702
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,313
|
r
|
forest.r
|
require(ggplot2)
require(randomForest)
data <- read.csv(file="GreenwichLondonAirQuality.csv", header=TRUE)
#clearing data
#dataFail <- (data[is.na(data$Value),]) # failed data
data <- data[complete.cases(data$Value),]
data <- data[complete.cases(data$ReadingDateTime),]
# add the vibras in future
# Map
NO <- data[data$Species == "NO",]
NO2 <- data[data$Species == "NO2",]
NOX <- data[data$Species == "NOX",]
PM10 <- data[data$Species == "PM10",]
PM2.5 <- data[data$Species == "PM2.5",]
WDIR <- data[data$Species == "WDIR",]
WSPD <- data[data$Species == "WSPD",]
NO$Hours <- NULL
NO$Hours[1] <- 0
for(i in 2 : length(NO$ReadingDateTime)) {
date2 <- as.POSIXlt(NO$ReadingDateTime[i], tz="", format=c('%d/%m/%Y %H:%M'))
NO$Hours[i] <- as.numeric(difftime(date2, as.POSIXlt(NO$ReadingDateTime[1], tz="", format=c('%d/%m/%Y %H:%M')), units="hours"))
}
NO.clusters <- kmeans(NO$Value, centers=4)
NO$Cluster <- NO.clusters$cluster
NO.week <- (NO[NO$Hours < 168,])
NO.week <- NO.week[complete.cases(NO.week$ReadingDateTime),]
# Plot
ggplot(NO, aes(x = Hours, y = Value)) + geom_point(aes(color=(factor(Cluster)), group=Cluster))
# FOREST
N = 168;
rf <- randomForest(Value ~ Hours, data=NO[1:N,], importance=T, do.trace=100)
pred <- predict(rf)
plot(NO$Hours[1:N], NO$Value[1:N])
plot(NO$Hours[1:N], pred)
|
8e3a4baadceaa4b0f9b46ae5ad99c98f11956471
|
bc2ab255d93e0652fbb7bf72bdf91aee0d212d07
|
/buyer_ratio.R
|
87ec6a069a49efdcd3e3a923c77e5a943c79f6de
|
[] |
no_license
|
Humza-Wani/Hypothesis_Testing-WIth_R
|
46754cc787d5c2aea4972e110f7c199dd1e86491
|
4a849a9f6041ce009cf779e441626f78285ff1a0
|
refs/heads/main
| 2023-06-14T13:41:11.999722
| 2021-07-05T02:21:15
| 2021-07-05T02:21:15
| 382,989,784
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,342
|
r
|
buyer_ratio.R
|
data1 <- read.csv(file.choose())
View(data1)
#EDA on data
plot(data1)
class(data1)
data2 <- as.matrix(data1)
View(data2)
barplot(data2,col = c('red','green'))
legend("topleft",c("Males","Females"),fill = c("red","green"))
#prop.test can be used for testing the null that
#the proportions (probabilities of success) in several groups are the same, or that they equal certain given values.
#ho is : All proportions are equal
#h1: All proportions are not equal
#cleasing data for prop test
East_males=data1[1,2]
East_females=data1[2,2]
west_males=data1[1,3]
west_females=data1[2,3]
north_males=data1[1,4]
north_females=data1[2,4]
south_males=data1[1,5]
south_females=data1[2,5]
class(south_females)
#Employing prop.test for east region
a <- chisq.test(c(East_males,East_females))
#Employing prop.test for west region
b <- chisq.test(c(west_males,west_females))
#Employing prop.test for north region
c <- chisq.test(c(north_males,north_females))
#Employing prop.test for south region
d <- chisq.test(c(south_males,south_females))
#as we can see p-value in all tests is less than 0.05
#hence we say there is difference in the proportion
chisq. test(x)
p_val <- data.frame(a$p.value,b$p.value,c$p.value,d$p.value)
p_val
#as we can see all p-values are smaller than 0.05 Hence h0 is rejected
|
49d1bcefb128a5fc912211990958884b97e57416
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RCzechia/tests/test-1-data-structures.R
|
91795a35f1f1387393380079f6b2817fe6c5768d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,908
|
r
|
test-1-data-structures.R
|
library(dplyr)
library(httr)
library(sf)
context("republika")
expect_that(is.data.frame(republika()), is_true())
expect_that(is.data.frame(republika("low")), is_true())
expect_that(is.data.frame(republika("high")), is_true())
expect_that(inherits(republika(), "sf"), is_true())
expect_that(inherits(republika("high"), "sf"), is_true())
expect_that(inherits(republika("low"), "sf"), is_true())
expect_equal(nrow(republika()), 1)
expect_equal(nrow(republika("low")), 1)
expect_equal(nrow(republika("high")), 1)
expect_equal(st_crs(republika("low"))$epsg, 4326)
expect_equal(st_crs(republika("high"))$epsg, 4326)
expect_error(republika("bflm")) # neznámé rozlišení - očekávám high(default) / low
expect_that(object.size(republika("low")) < object.size(republika("high")), is_true())
# low res je menší než high res
context("kraje")
expect_that(is.data.frame(kraje()), is_true())
expect_that(is.data.frame(kraje("low")), is_true())
expect_that(is.data.frame(kraje("high")), is_true())
expect_that(inherits(kraje(), "sf"), is_true())
expect_that(inherits(kraje("high"), "sf"), is_true())
expect_that(inherits(kraje("low"), "sf"), is_true())
expect_equal(nrow(kraje()), 14)
expect_equal(nrow(kraje("low")), 14)
expect_equal(nrow(kraje("high")), 14)
expect_equal(st_crs(kraje("low"))$epsg, 4326)
expect_equal(st_crs(kraje("high"))$epsg, 4326)
expect_error(kraje("bflm")) # neznámé rozlišení - očekávám high(default) / low
expect_that(object.size(kraje("low")) < object.size(kraje("high")), is_true())
# low res je menší než high res
context("okresy")
expect_that(is.data.frame(okresy()), is_true())
expect_that(is.data.frame(okresy("low")), is_true())
expect_that(is.data.frame(okresy("high")), is_true())
expect_that(inherits(okresy(), "sf"), is_true())
expect_that(inherits(okresy("high"), "sf"), is_true())
expect_that(inherits(okresy("low"), "sf"), is_true())
expect_equal(nrow(okresy()), 77)
expect_equal(nrow(okresy("low")), 77)
expect_equal(nrow(okresy("high")), 77)
expect_equal(st_crs(okresy("low"))$epsg, 4326)
expect_equal(st_crs(okresy("high"))$epsg, 4326)
expect_error(okresy("bflm")) # neznámé rozlišení - očekávám high(default) / low
expect_that(object.size(okresy("low")) < object.size(okresy("high")), is_true()) # low res je menší než high res
context("ORP")
expect_that(is.data.frame(orp_polygony()), is_true())
expect_that(inherits(orp_polygony(), "sf"), is_true())
expect_equal(nrow(orp_polygony()), 206)
expect_equal(st_crs(orp_polygony())$epsg, 4326)
context("obce body")
expect_that(is.data.frame(obce_body()), is_true())
expect_equal(nrow(obce_body()), 6258)
expect_equal(st_crs(obce_body())$epsg, 4326)
context("obce polygony")
expect_that(is.data.frame(obce_polygony()), is_true())
expect_that(inherits(obce_polygony(), "sf"), is_true())
expect_equal(nrow(obce_polygony()), 6258)
expect_equal(st_crs(obce_polygony())$epsg, 4326)
context("městské části")
expect_that(is.data.frame(casti()), is_true())
expect_that(inherits(casti(), "sf"), is_true())
expect_equal(nrow(casti()), 142)
expect_equal(st_crs(casti())$epsg, 4326)
context("vodní plochy")
expect_that(is.data.frame(plochy()), is_true())
expect_that(inherits(plochy(), "sf"), is_true())
expect_equal(nrow(plochy()), 480)
expect_equal(st_crs(plochy())$epsg, 4326)
context("řeky")
expect_that(is.data.frame(reky()), is_true())
expect_that(inherits(reky(), "sf"), is_true())
expect_equal(nrow(reky()), 6198)
expect_equal(st_crs(reky())$epsg, 4326)
context("silnice")
expect_that(is.data.frame(silnice()), is_true())
expect_that(inherits(silnice(), "sf"), is_true())
expect_equal(nrow(silnice()), 18979)
expect_equal(st_crs(silnice())$epsg, 4326)
context("železnice")
expect_that(is.data.frame(zeleznice()), is_true())
expect_that(inherits(zeleznice(), "sf"), is_true())
expect_equal(nrow(zeleznice()), 3525)
expect_equal(st_crs(zeleznice())$epsg, 4326)
context("chráněná území")
expect_that(is.data.frame(chr_uzemi()), is_true())
expect_that(inherits(chr_uzemi(), "sf"), is_true())
expect_equal(nrow(chr_uzemi()), 36)
expect_equal(st_crs(chr_uzemi())$epsg, 4326)
context("lesy")
expect_that(is.data.frame(lesy()), is_true())
expect_that(inherits(lesy(), "sf"), is_true())
expect_equal(nrow(lesy()), 2366)
expect_equal(st_crs(lesy())$epsg, 4326)
context("integrace")
obec_praha <- obce_body() %>% # bod Praha (určitě správně)
filter(KOD_LAU1 == "CZ0100")
okres_praha <- okresy("low") %>% # low res "okres" Praha (zjednodušovaný)
filter(KOD_LAU1 == "CZ0100")
expect_equal(st_contains(republika("high"), okres_praha)[[1]], 1) # okres Praha je v republice
expect_equal(st_contains(okres_praha, obec_praha)[[1]], 1) # bod Praha je v okresu Praha
|
66be1e28270d2b116c0dd17a7d5b27e35f9100db
|
3c9651873db8b92697e71d5d6da96bf489efaf3f
|
/convexhull.R
|
93b046517238752984e2e1a88f2f433ac6dae1c8
|
[] |
no_license
|
pieterminnaar/ProgrammingAssignment2
|
0c566a66aff5d203aea73334998d9d692f45ade9
|
b15d691f19cda35ae76418409138ed9a94ec0d3a
|
refs/heads/master
| 2021-01-18T01:44:59.440436
| 2014-04-26T20:16:59
| 2014-04-26T20:16:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 761
|
r
|
convexhull.R
|
library(sp)
library(rgeos)
# Make up some 'banana shaped' points
mypts=cbind(runif(100),runif(100))
keep=4*(mypts[,1]-0.5)**2 + 0.2 > mypts[,2]
mypts=mypts[keep,]
# Coerce to SpatialPointsDataframe
mypts2=SpatialPointsDataFrame(mypts,data=data.frame(NA*mypts[,1]),
match.ID=F)
# Now take a buffer that covers up all the points
# You will need to decide on an appropriate 'width'
# argument so that the region is connected
buf1=gBuffer(mypts2, width=0.3,byid=T)
buf1_union=gUnionCascaded(buf1)
# Take the union -- so this polygon will
## contain all your points
# Now partly 'undo' the buffer -- again,
## experimentation is needed to
## choose the width
buf_final=gBuffer(buf1_union,width=-0.2)
# This should look okay
plot(buf_final)
points(mypts,col=2)
|
7bd51837242ec773f4fcb24a68f9d1de729aa04a
|
0e6c5b0fc0ee861539be1e32b0fe2d031a832066
|
/scripts/data_processing/2_calc_field_predictors.R
|
277bb912257e6c15008e27410f10f6d32e5c5349
|
[] |
no_license
|
limnoliver/EOF_SW1
|
945b7c1aa702a53232d37f3d7fe348a9d382de95
|
44f6fa95c659b54d3e507878a19ab3cd2fcce707
|
refs/heads/master
| 2020-06-13T16:33:49.262282
| 2019-07-01T16:59:52
| 2019-07-01T16:59:52
| 194,711,822
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,682
|
r
|
2_calc_field_predictors.R
|
# calculate days since field action variables
# e.g., days since planing corn, alfalfa
# days since last fertilizer/manure application
# use the field activity sheet and storm start dates to calculate
storm_filename <- file.path('data_cached', paste0(site, '_prepped_WQbystorm.csv'))
storms <- read.csv(storm_filename, stringsAsFactors = FALSE,
colClasses = c(storm_start = 'POSIXct', storm_end = 'POSIXct'))
timeline_filename <- file.path('data_raw', activity_file)
timeline <- read.csv(timeline_filename,
stringsAsFactors = FALSE, strip.white = TRUE)
timeline$date <- as.Date(timeline$date, format = date_format)
# days since planting
# for corn, after cutting, value assumes zero
# for alfalfa, resets after cutting but goes up until next cutting
field_events <- arrange(storms, storm_start) %>%
select(unique_storm_number, storm_start)
# calculate days since manure/fertilizer
# calculate days since planting, set back to zero after
# cutting
field_events <- field_events %>%
mutate(days_since_planting = NA) %>%
mutate(days_since_fertilizer = NA) %>%
mutate(days_since_cultivation = NA) %>%
mutate(days_since_disturbance = NA)
for (i in 1:nrow(field_events)) {
temp_date <- as.Date(format(field_events$storm_start[i], "%Y-%m-%d"))
#fert/manure
man_fert_filter <- grep(paste0(nut_additions_keywords, collapse = '|'), timeline$activity_group, ignore.case = T)
temp_timeline <- timeline[man_fert_filter, ]
temp_timeline <- filter(temp_timeline, date <= temp_date)
fert_diff <- temp_date - temp_timeline$date
field_events$days_since_fertilizer[i] <- as.numeric(min(fert_diff))
# cultivation
cultivation_filter <- grep(paste0(cultivation_keywords, collapse = '|'), timeline$activity_group, ignore.case = T)
temp_timeline <- timeline[cultivation_filter, ]
temp_cultivation <- filter(temp_timeline, date <= temp_date)
cultivation_diff <- temp_date - temp_cultivation$date
field_events$days_since_cultivation[i] <- as.numeric(min(cultivation_diff))
# disturbance
# basically count days since any field disturbance
date_last_disturb <- filter(timeline, date <= temp_date) %>% slice(which.max(date)) %>%
pull(date)
days_since_disturb <- as.numeric(temp_date - date_last_disturb)
field_events$days_since_disturbance[i] <- ifelse(days_since_disturb > 10, 10, days_since_disturb)
#plantings, cutting, & harvest to estimate plant cover
if (all(!is.na(cutting_keywords))) {
temp_timeline <- filter(timeline, date <= temp_date)
planting_filter <- grep(paste0(planting_keywords, collapse = '|'), temp_timeline$activity_group, ignore.case = T)
harvest_filter <- grep(paste0(harvest_keywords, collapse = '|'), temp_timeline$activity_group, ignore.case = T)
cutting_filter <- grep(paste0(cutting_keywords, collapse = '|'), temp_timeline$activity_group, ignore.case = T)
temp_plantings <- temp_timeline[planting_filter, ]
temp_harvest <- temp_timeline[harvest_filter, ]
temp_cutting <- temp_timeline[cutting_filter, ]
temp_all <- bind_rows(temp_plantings, temp_harvest, temp_cutting) %>%
arrange(date)
# now, filter out everything since the last harvest
last_harvest <- filter(temp_all, activity_group %in% c(harvest_keywords, cutting_keywords)) %>%
slice(which.max(date)) %>% pull(date)
temp_all <- filter(temp_all, date >= last_harvest)
# if harvest was the last activity, then set to 0
if (any(temp_all$activity_group[which.max(temp_all$date)] %in% harvest_keywords)) {
field_events$days_since_planting[i] <- 0
} else if (any(temp_all$activity_group[which.max(temp_all$date)] %in% cutting_keywords)) {
last_date <- max(temp_all$date)
temp_diff <- as.numeric(temp_date - most_recent$date)
field_events$days_since_planting[i] <- ifelse(temp_diff > 30, 30, temp_diff)
} else {
last_date <- max(temp_all$date)
temp_diff <- as.numeric(temp_date - last_date)
# check if there was a planting date before this. Do not want to reset if planting
# on top of a cover crop, for example.
if (temp_diff < 30 & any(temp_all$activity_group[nrow(temp_all)-1] %in% planting_keywords)) {
temp_diff <- as.numeric(temp_date - temp_all$date[nrow(temp_all)-1])
}
field_events$days_since_planting[i] <- ifelse(temp_diff > 30, 30, temp_diff)
}
}
# temp_timeline <- filter(timeline, date <= temp_date)
# planting_filter <- grep(paste0(planting_keywords, collapse = '|'), temp_timeline$activity_group, ignore.case = T)
# harvest_filter <- grep(paste0(harvest_keywords, collapse = '|'), temp_timeline$activity_group, ignore.case = T)
# temp_plantings <- temp_timeline[planting_filter, ]
# temp_harvest <- temp_timeline[harvest_filter, ]
#
# # now decide which planting/harvest date to use.
# # e.g., if a cutting happened between now and planting, use days since cutting
# temp_diff <- c((temp_date - temp_plantings$date), (temp_date - temp_harvest$date))
#
# field_events$days_since_planting[i] <- as.numeric(min(temp_diff))
}
field_events <- select(field_events, -storm_start)
temp_filename <- file.path('data_cached', paste0(site, '_field_predictors.csv'))
write.csv(field_events, temp_filename, row.names = FALSE)
if(nrow(field_events) == nrow(storms) & nrow(field_events) > 0) {
message(paste("Field events processing is complete. Please see", temp_filename, "to ensure correct processing."))
} else {
stop("Something went wrong with processing the field events data. To debug, see code in 'scripts/data_processing/2_calc_field_predictors.R'")
}
|
7a9b0d8d2cb1ff81e8f5e778449f7fb77111104a
|
643248857926aa16523e6b941cbe73e1bf9cf2c8
|
/Temp/kemaaans.R
|
27e89327b3892436258e61c2a38643fe33967e0e
|
[] |
no_license
|
ksrikanthcnc/Data-Mining
|
004135123e6c6d83d0a84bf99f38c4764f598bf0
|
1fdc62de42f8fb80e0dd2f645737317f5cfdb9fe
|
refs/heads/master
| 2020-03-16T17:56:06.725758
| 2019-05-23T13:58:15
| 2019-05-23T13:58:15
| 132,852,902
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,740
|
r
|
kemaaans.R
|
fold <- trainControl(method="repeatedcv", number = 10,repeats = 1,
verboseIter = TRUE,
savePredictions = TRUE
)
ratio <- trainControl(number = 5,
verboseIter = TRUE,
savePredictions = TRUE,
)
#cat("--------------------MODELS------------------\n")
data <- read_csv("finalnum.csv")
data <- data[1:1000,]
#income <- ifelse( data$income == "Low", 0,1)
#data <- data[,-14]
#data <- data.frame(data, income)
###data[sapply(data, is.numeric)] <- lapply(data[sapply(data, is.numeric)], scale)
data[sapply(data, is.character)] <- lapply(data[sapply(data, is.character)], as.factor)
f <- as.formula(paste(" ~", paste(names(data), collapse = " + ")))
m <- model.matrix( f,data = data)
d <- as.data.frame(m)
#d[sapply(d, is.numeric)] <- lapply(d[sapply(d, is.numeric)], scale)
#d <- d[,-1]
#d$`(Intercept)` <- 1
#inc = ifelse( d$`(Intercept)` == NA, 1,NA)
#inc
#d = d[,-1]
#d = data.frame(d, income)
#d <- data
View(d)
d$income <- NULL
d$`(Intercept)` <- NULL
#str(d)
print('totss')
print('tot.withinss')
k1 <- kmeans(x = d,
trace = T,
centers = 1
)
View(d)
#k1
k1$totss
k1$tot.withinss
k2 <- kmeans(x = d,
trace = T,
centers = 2
)
#k2
k2$totss
k2$tot.withinss
k3 <- kmeans(x = d,
trace = T,
centers = 3
)
#k3
k3$totss
k3$tot.withinss
View(k3)
k3$withinss
silh
print('clusinfo')
print('silinfo')
library(cluster)
p <- pam(x = d,k = 1)
p$clusinfo
p$silinfo
p <- pam(x = d,k = 2)
p$clusinfo
p$silinfo
View(p)
p <- pam(x = d,k = 3,
trace.lev = 2)
p$clusinfo
p$silinfo
summary(p)
silhouette(p)
View(silhouette(p))
pam()
c<-clara(x = d,k = 1)
View(c)
c
View(data)
?fanny
k1<-kmeans(x = d,centers = 1,trace = T)
k2<-kmeans(x = d,centers = 2,trace = T)
k3<-kmeans(x = d,centers = 3,trace = T)
f1<-fanny(x = d,k = 1,trace.lev = 2)
f2<-fanny(x = d,k = 2,trace.lev = 2)
f3<-fanny(x = d,k = 3,trace.lev = 2)
f1[["silinfo"]][["clus.avg.widths"]]
f1[["silinfo"]][["avg.width"]]
f2[["silinfo"]][["clus.avg.widths"]]
f2[["silinfo"]][["avg.width"]]
f3[["silinfo"]][["clus.avg.widths"]]
f3[["silinfo"]][["avg.width"]]
View(f3)
p1<-pam(x = d,k = 1,trace.lev = 2)
p2<-pam(x = d,k = 2,trace.lev = 2)
p3<-pam(x = d,k = 3,trace.lev = 2)
p1[["silinfo"]][["clus.avg.widths"]]
p1[["silinfo"]][["avg.width"]]
p2[["silinfo"]][["clus.avg.widths"]]
p2[["silinfo"]][["avg.width"]]
p3[["silinfo"]][["clus.avg.widths"]]
p3[["silinfo"]][["avg.width"]]
c1<-clara(x = d,k = 1)
c2<-clara(x = d,k = 2)
c3<-clara(x = d,k = 3)
View(c2)
View(p2)
View(f2)
View(k2)
library(dbscan)
db<-dbscan(d,eps=850666,minPts = 1000)
db
850666
c[["clusinfo"]]
andhulo eps=max_diss avg anukunta
minpts=minimum in size
|
f41bba66f5cf2f398fb4032f0e74487f61d3512a
|
602980a2b335336d9bac17d1a924ddc690449691
|
/man/startStandalone.Rd
|
965a799ca73ea93be50324949417ca5eeb33fe4e
|
[] |
no_license
|
epiviz/epivizr-release
|
55211407cb8bf781ce8c5706479299d81ad3a5f8
|
798b350442b74334fdf3ac834c65a878e0d436e0
|
refs/heads/master
| 2021-01-13T01:40:53.888146
| 2015-06-09T17:08:33
| 2015-06-09T17:08:33
| 18,971,179
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,848
|
rd
|
startStandalone.Rd
|
\name{startStandalone}
\alias{startStandalone}
\title{
Start the standalone epiviz interface
}
\description{
Create an epiviz session manager for the epiviz web application served
from the localhost.
}
\usage{
startStandalone(geneInfo = NULL, geneInfoName = "", seqinfo = NULL, chr = NULL, start = NULL, end = NULL, start.args = list(), ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{geneInfo}{
An object containing gene annotation information. See \code{\link{register-methods}} for information on objects permitted.
}
\item{geneInfoName}{
(character) The name of the gene annotation to display on the UI.
}
\item{seqinfo}{
\code{\link[GenomeInfoDb]{Seqinfo-class}} object containing sequence names and lengths.
Ignored if \code{geneInfo} is not NULL.
}
\item{chr}{
Sequence name to load the UI. If NULL, taken from \code{seqinfo(geneInfo)} or \code{seqinfo} in order.
}
\item{start}{
Starting genomic position when loading UI. If NULL, taken from \code{seqinfo(geneInfo)} or \code{seqinfo} in order.
}
\item{end}{
Ending genomic position when loading UI. If NULL, taken from \code{seqinfo(geneInfo)} or \code{seqinfo} in order.
}
\item{start.args}{
List containing other arguments to pass \code{\link{startEpiviz}}.
}
\item{\dots}{
Arguments passed to \code{addDevice} method of \code{\link{EpivizDeviceMgr}} when
adding gene annotation. For instance, \code{keepSeqlevels}.
}
}
\details{
One of \code{geneInfo} or \code{seqinfo} must be non-NULL. Otherwise an error is raised.
}
\value{
An object of class \linkS4class{EpivizDeviceMgr}.
}
\author{
Hector Corrada Bravo
}
\seealso{
\link{EpivizDeviceMgr-class}
\link{register-methods}
}
\examples{
\dontrun{
library(Mus.musculus)
mgr <- startStandalone(Mus.musculus, geneInfoName="mm10",
keepSeqlevels=paste0("chr",c(1:19,"X","Y")))
}
}
|
a9de8cf787cf52fee1c13924d285657b7a03bd98
|
664f7a10e3478c9ce542eebdfa67dbdc2858e24f
|
/man/create_predictions.Rd
|
be7f58d6476d0ab9cab615db8566527e91e027d1
|
[] |
no_license
|
aranryan/arfredpack
|
79f245a1d05fc88f0e0f579b22537978b7bc3e36
|
48036be5561f16151e19ea88badbeec7fe63a348
|
refs/heads/master
| 2020-05-27T21:09:36.222136
| 2017-03-02T15:24:36
| 2017-03-02T15:24:36
| 83,603,037
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 533
|
rd
|
create_predictions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_predictions.R
\name{create_predictions}
\alias{create_predictions}
\title{Create predictions using make_predictions}
\usage{
create_predictions(working_df, df_use, area_todo)
}
\arguments{
\item{area_todo}{}
}
\description{
This takes the coefficients in the working\\_df, joins on the actual data we
had set up. Then it calculates the predictions using the make predictions
function, and iterating down each row of working\\_df.
}
\examples{
}
|
ee9c90bde8e255580c66f02f53e3ee839b7ee569
|
23a8bfd3eff73d21afd1f50bd82e7bdb6e8358c8
|
/ggplot2.R
|
9e89e75425fa36cdf8e4a06cbca7f433147e1742
|
[] |
no_license
|
michbur/PADR
|
0f1233350601c22d4aec8dc45945f0acd06772ee
|
01389ab38f6a7c8a15ab23c22a2f4974ef513f13
|
refs/heads/master
| 2021-05-16T03:47:11.321262
| 2017-10-03T10:29:16
| 2017-10-03T10:29:16
| 105,617,472
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,795
|
r
|
ggplot2.R
|
# https://github.com/michbur/PADR/blob/master/ggplot2.R
final_dat <- melt(dat, variable.name = "medium") %>%
mutate(medium = sapply(strsplit(as.character(medium), "_"), first),
value = ifelse(value < 0, 0, value)) %>%
group_by(active, strain, medium) %>%
summarise(value = median(value)) %>%
inner_join(pathotype)
library(ggplot2)
ggplot(final_dat, aes(x = pathotype, y = value)) +
geom_point()
set.seed(1410)
ggplot(final_dat, aes(x = pathotype, y = value)) +
geom_point(position = "jitter")
ggplot(final_dat, aes(x = pathotype, y = value)) +
geom_point(position = "jitter") +
facet_wrap(~ active)
ggplot(final_dat, aes(x = pathotype, y = value)) +
geom_point(position = "jitter") +
facet_grid(medium ~ active)
ggplot(final_dat, aes(x = pathotype, y = value)) +
geom_point(position = "jitter") +
facet_wrap(~ active + medium)
ggplot(final_dat, aes(x = pathotype, y = value)) +
geom_boxplot() +
facet_grid(medium ~ active)
ggplot(final_dat, aes(x = pathotype, y = value, color = active)) +
geom_boxplot() +
facet_wrap(~ medium)
# 1. Add points to boxplots. What happened to outliers?
# 2. Create a boxplot only for active == "W1 and pathotype == "UPEC"
library(ggbeeswarm)
ggplot(final_dat, aes(x = pathotype, y = value, color = active)) +
geom_quasirandom() +
facet_wrap(~ medium)
ggplot(filter(final_dat, active != "W3"), aes(x = pathotype, y = value, color = active)) +
geom_quasirandom() +
facet_wrap(~ medium)
ggplot(final_dat, aes(x = value)) +
geom_density()
ggplot(final_dat, aes(x = value)) +
geom_density() +
facet_wrap(~ medium)
ggplot(final_dat, aes(x = value, fill = active)) +
geom_density() +
facet_wrap(~ medium)
ggplot(final_dat, aes(x = value, fill = active)) +
geom_density(alpha = 0.2) +
facet_wrap(~ medium)
ggplot(final_dat, aes(x = value, fill = active)) +
geom_density(alpha = 0.2) +
facet_grid(pathotype ~ medium)
ungroup(final_dat) %>%
mutate(active = factor(active,
levels = c("W2", "W3", "W1"),
labels = c("A1", "W3", "W1"))) %>%
ggplot(aes(x = value, fill = active)) +
geom_density(alpha = 0.2) +
facet_wrap(~ medium)
# 1. Create a density plot for each pathotype and medium.
thr_dat <- mutate(final_dat, thr = value > 0.07)
ggplot(thr_dat, aes(x = thr)) +
geom_bar()
ggplot(thr_dat, aes(x = thr, fill = medium)) +
geom_bar()
ggplot(thr_dat, aes(x = thr, fill = medium)) +
geom_bar(position = "fill")
ggplot(thr_dat, aes(x = medium, fill = thr)) +
geom_bar(position = "fill")
# 1. Using facets and bar charts show threshold data separately
# for each active substance.
# 2. Show on a barchart number of strains from each pathotype.
thr_dat2 <- group_by(thr_dat, medium) %>%
summarise(thr = mean(thr))
rbind(mutate(thr_dat2, thr_et = TRUE),
mutate(thr_dat2, thr_et = FALSE,
thr = 1 - thr)) %>%
ggplot(aes(x = medium, y = thr, fill = thr_et, label = formatC(thr, 2))) +
geom_bar(stat = "identity") +
geom_text(vjust = 2)
ggplot(thr_dat, aes(x = medium, fill = thr)) +
geom_bar(position = "fill") +
geom_text
mean_dat <- group_by(final_dat, active, medium, pathotype) %>%
summarise(mean_value = mean(value),
sd_value = sd(value))
ggplot(mean_dat, aes(x = pathotype, y = active, fill = mean_value)) +
geom_tile(color = "black") +
facet_wrap(~ medium)
ggplot(mean_dat, aes(x = pathotype, y = mean_value, fill = medium)) +
geom_bar(position = "dodge", stat = "identity") + +
facet_wrap(~ active, ncol = 1)
ggplot(mean_dat, aes(x = pathotype, y = mean_value, fill = medium)) +
geom_col(position = "dodge") +
facet_wrap(~ active, ncol = 1)
ggplot(mean_dat, aes(x = pathotype, y = mean_value, fill = medium)) +
geom_col(position = "dodge") +
geom_errorbar(aes(ymax = mean_value + sd_value, ymin = mean_value, color = medium), position = "dodge") +
facet_wrap(~ active, ncol = 1)
# 1. Using a bar chart compare median values for each medium and pathotype.
# Use median absolute deviation (mad()) as a dispersion measure.
# 2. Using a heat map compare median values for each medium and pathotype.
ggplot(mean_dat, aes(x = pathotype, y = mean_value, fill = medium)) +
geom_col(position = "dodge") +
geom_errorbar(aes(ymax = mean_value + sd_value, ymin = mean_value, color = medium), position = "dodge") +
facet_wrap(~ active, ncol = 1) +
coord_flip()
ggplot(mean_dat, aes(x = pathotype, y = mean_value, fill = medium)) +
geom_col(position = "dodge") +
geom_errorbar(aes(ymax = mean_value + sd_value, ymin = mean_value, color = medium), position = "dodge") +
facet_wrap(~ active, nrow = 1) +
coord_flip()
p <- ggplot(mean_dat, aes(x = pathotype, y = mean_value, fill = medium)) +
geom_col(position = "dodge") +
geom_errorbar(aes(ymax = mean_value + sd_value, ymin = mean_value, color = medium), position = "dodge") +
facet_wrap(~ active, ncol = 1)
p + theme(axis.text.x = element_text(angle = 90, hjust = 1))
p + theme(axis.text.x = element_text(angle = 90, hjust = 1),
legend.position = "bottom")
my_theme <- theme(axis.text.x = element_text(angle = 90, hjust = 1),
legend.position = "bottom")
ggplot(thr_dat, aes(x = medium, fill = thr)) +
geom_bar(position = "fill") +
my_theme
# 1. Create your own theme. See ?theme
# 2. See possible themes using theme_bw
ggplot(thr_dat, aes(x = medium, fill = thr)) +
geom_bar(position = "fill")
unique(thr_dat[["medium"]])
thr_dat2 <- mutate(thr_dat,
medium = factor(medium, levels = c("LB", "BHI", "M63", "TSB")))
ggplot(thr_dat2, aes(x = medium, fill = thr)) +
geom_bar(position = "fill")
# Reverse the sequence of active (W3, W2, W1) and create a bar chart,
# with the fraction of strains above threshold for each possible value
# of active.
ggplot(thr_dat, aes(x = medium, fill = thr)) +
geom_bar(position = "fill") +
scale_fill_discrete("Threshold")
ggplot(thr_dat, aes(x = medium, fill = thr)) +
geom_bar(position = "fill") +
scale_fill_manual("Threshold", values = c("orange", "lightblue3"))
ggplot(mean_dat, aes(x = pathotype, y = active, fill = mean_value, color = sd_value)) +
geom_tile(color = "black") +
geom_point() +
facet_wrap(~ medium) +
scale_color_continuous(low = "white", high = "black")
ggplot(mean_dat, aes(x = pathotype, y = active, fill = mean_value, color = sd_value)) +
geom_tile(color = "black") +
geom_point() +
facet_wrap(~ medium) +
scale_color_continuous(low = "white", high = "black") +
scale_fill_continuous(low = "blue", high = "red")
ggplot(mean_dat, aes(x = pathotype, y = active, color = mean_value, size = sd_value)) +
geom_point() +
facet_wrap(~ medium)
ggplot(mean_dat, aes(x = pathotype, y = active, color = mean_value, size = sd_value)) +
geom_point() +
facet_wrap(~ medium) +
scale_size_continuous(range = c(5, 10))
ggplot(mean_dat, aes(x = pathotype, y = active, fill = mean_value, color = sd_value)) +
geom_tile(color = "black") +
facet_wrap(~ medium) +
scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = (max(mean_dat[["mean_value"]]) - min(mean_dat[["mean_value"]]))/2)
# Create a heatmap with gradient scale, where midpoint is the median of mean_value
ggplot(final_dat, aes(x = value, fill = active)) +
geom_density(alpha = 0.2) +
facet_wrap( ~ medium)
ggplot(final_dat, aes(x = value, fill = active)) +
geom_density(alpha = 0.2) +
facet_wrap( ~ medium) +
coord_cartesian(xlim = c(0, 0.1))
library(plotly)
ggplotly(ggplot(thr_dat, aes(x = medium, fill = thr)) +
geom_bar(position = "fill"))
ggplotly(ggplot(final_dat, aes(x = pathotype, y = value, color = active)) +
geom_boxplot() +
facet_wrap(~ medium))
|
d2d97a58bfa4e7dced7a2529d0ce65a8b439fa66
|
7cff8ac1004df750d94468f594bcc71553e3dcec
|
/man/vol_salable.Rd
|
f80857387c944204938aac5d08af69eb9c362cb3
|
[] |
no_license
|
Forest-Economics-Goettingen/woodValuationDE
|
afad2be6e484e3d2e3af17ab1c16088bcb90c59a
|
44973dc02dc5d8bc1dcd1a84f07b00fd1c96e43a
|
refs/heads/master
| 2023-04-09T08:08:30.982932
| 2023-03-23T08:40:34
| 2023-03-23T08:40:34
| 473,502,255
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,646
|
rd
|
vol_salable.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vol_salable.R
\name{vol_salable}
\alias{vol_salable}
\title{Relative share in the volume over bark that is salable}
\usage{
vol_salable(
diameter.q,
species,
value.level = 2,
logging.method = "combined",
species.code.type = "en",
method = "fuchs.orig"
)
}
\arguments{
\item{diameter.q}{Quadratic mean of the diameter at breast height (dbh) of
the harvested trees \eqn{[cm]}{[cm]}.}
\item{species}{Tree species, using an available \code{species.code.type}. For
a list with the available species and codes call
\code{\link{get_species_codes}}.}
\item{value.level}{Stand quality expressed as an integer of \code{1:3}, with
\code{1} for an extraordinarily high stand quality with
high shares of wood suitable for high-valued usages such
as furniture, \code{2} for a moderate quality, and
\code{3} for a low quality (e.g., trees with thick
branches). The \code{value.level}s refer to the applied
assortment tables of Offer and Staupendahl (2018).}
\item{logging.method}{Logging method, with \code{"manually"} for
motor-manual logging using a chain saw,
\code{"harvester"} for logging with highly mechanized
forest harvesters, or \code{"combined"} for a
combination of the previous methods dependent on the
mean diameter.}
\item{species.code.type}{Type of code in which \code{species} is given.
\code{"en"} for English species names or
\code{"nds"} for numeric species codes used in Lower
Saxony, Germany. For a list with the available
species and codes call
\code{\link{get_species_codes}}.}
\item{method}{argument that is currently not used, but offers the possibility
to implement alternative parameters and functions in the
future.}
}
\value{
A vector with relative shares of salable wood volume.
}
\description{
The function estimates the salable share of the wood volume. It is expressed
in relation to the volume over bark (German unit: Vfm m.R.) as usually
provided by yield tables and forest simulators. This includes all pulp wood,
sawlog, and fuel wood assortments. The share of salable wood is required to
derive the wood revenues per cubic meter volume over bark. The function is
based on the assortment tables from Offer and Staupendahl (2018) and its
derivation is described in Fuchs et al. (in preparation). The underlying
assortment tables are based on data from HessenForst, the public forest
service of the Federal State of Hesse in Germany. For further details see the
\pkg{woodValuationDE}
\href{https://github.com/Forest-Economics-Goettingen/woodValuationDE}{README}.
}
\examples{
vol_salable(40,
"beech")
# species codes Lower Saxony (Germany)
vol_salable(40,
211,
species.code.type = "nds")
# vector input
vol_salable(seq(20, 50, 5),
"spruce")
vol_salable(rep(seq(20, 50, 10),
2),
rep(c("beech", "spruce"),
each = 4))
vol_salable(rep(seq(20, 50, 10),
2),
rep(c("beech", "spruce"),
each = 4),
logging.method = rep(c("manually", "harvester"),
each = 4))
}
\references{
Fuchs, Jasper M.; Husmann, Kai; v. Bodelschwingh, Hilmar; Koster,
Roman; Staupendahl, Kai; Offer, Armin; Moehring, Bernhard, Paul,
Carola (in preparation): woodValuationDE: A consistent framework
for calculating stumpage values in Germany (technical note)
Offer, Armin; Staupendahl, Kai (2018): Holzwerbungskosten- und
Bestandessortentafeln (Wood Harvest Cost and Assortment
Tables). Kassel: HessenForst (publisher).
}
|
92b26f958cdac6241a4536295fcc90bf12405bd8
|
18813ee8ff46b9e3f7f3c0fc6eb1657b77323398
|
/5,练习.R
|
a42d05adaabbfd4e0c5b5c364e86be0ddc0d734d
|
[] |
no_license
|
cj2030010002-xgk/2030010002
|
4032c89353df56e782e68df04b49616d7cd44020
|
7325a6f0232c2374bdc2e6c72338470f2067f7d0
|
refs/heads/main
| 2023-01-04T02:27:09.579575
| 2020-11-07T06:04:36
| 2020-11-07T06:04:36
| 309,053,657
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,754
|
r
|
5,练习.R
|
(a_list <- list(
c(1,1,2,5,14,42),
month.abb,
matrix(c(3,-8,1,-3),nrow = 2),
asin
))
names(a_list) <- c("catalan","months","involutary","arcsin")
a_list
(main_list <- list(
middle_list = list(
element_in_middle_list = diag(3),
inner_list = list(
element_in_inner_list = pi^(1:4),
another_element_in_inner_list = "a"
)
),
element_in_main_list = log10(1:10)
))
is.atomic(list())
is.recursive(list())
is.atomic(numeric())
is.recursive(numeric())
length(a_list)
length(main_list)
dim(a_list)
nrow(a_list)
ncol(a_list)
NROW(a_list)
NCOL(a_list)
l1 <- list(1:5)
l2 <- list(6:10)
l1[[1]]+l2[[1]]
l <- list(
first = 1,
second = 2,
third = list(
alpha = 3.1,
beta = 3.2
)
)
l <- list(first=1,second=2,third=list(alpha=3.1,beta=3.2))
l[1:2]
l[-3]
l[c(TRUE,TRUE,FALSE)]
l[[1]]
l[["first"]]
is.list(l[1])
is.list(l[[1]])
1$first
1$f
l[["third"]]["beta"]
l[["third"]][["beta"]]
l[[c("third","beta")]]
is.list(l[["third"]]["beta"])
is.list(l[["third"]][["beta"]])
l[c(4,2,5)]
l[c("fourth","second","fifth")]
l[["fourth"]]
1$fourth
l[[4]]#yuejie
busy_beaver <- c(1,6,21,107)
as.list(busy_beaver)
as.numeric(list(1,6,21,107))
(prime_factors <- list(
two = 2,
three = 3,
four = c(2,2),
five = 5,
six = c(2,3),
seven = 7,
eight = c(2,2,2),
nine = c(3,3),
ten = c(2,5)
))
new_factors <- unlist(prime_factors)
new_factors
new_factors[1]
new_factors[[1]]
is.list(new_factors)
is.list(new_factors[1])
is.list(new_factors[[1]])
is.list(3))
c(list(a=1,b=2),3)
matrix_list_hybrid <- cbind(list(a=1,b=2),list(c=3,list(d=4)))
matrix_list_hybrid
str(matrix_list_hybrid)
china_holiday <- list(
Jan = "new year's day",
Feb = "spring festival",
Mar = NULL,
Apr = "qingming festival",
May = "may day",
Jun = "dragon boat festival",
Jul = NULL,
Aug = NULL,
Sep = "moon festival",
Oct = "national day",
Noc = NULL,
Dec = NULL
)
length(NULL)
length(NA)
is.null(NULL)
is.null(NA)
china_holiday$Sep
china_holiday
china_holiday$Jun
china_holiday
(arguments_of_sd <- formals(sd))
class(arguments_of_sd)
pairlist()
list()
(a_data_frame <- data.frame(
x = letters[1:5],
y = rnorm(5),
z = runif(5)>0.5
))
class(a_data_frame)
y <- rnorm(5)
names(y) <- month.name[1:5]
data.frame(
x = letters[1:5],
y = y,
z = runif(5)>0.5
)
data.frame(
x = letters[1:5],
y = y,
z = runif(5)>0.5,
row.names = NULL
)
data.frame(
x = letters[1:5],
y = y,
z = runif(5)>0.5,
row.names = c("jackie","tito","jermaine","marlon","michael")
)
rownames(a_data_frame)
colnames(a_data_frame)
dimnames(a_data_frame)
nrow(a_data_frame)
ncol(a_data_frame)
dim(a_data_frame)
length(a_data_frame)
names(a_data_frame)
data.frame(
x = 1,
y = 2:3,
z = 4:7
)
data.frame(
"A column" = letters[1:5],
"..." = rnorm(5),
"..." = runif(5) > 0.5,
check.names = TRUE
)
data.frame(
"A column" = letters[1:5],
"..." = rnorm(5),
"..." = runif(5) > 0.5,
check.names = FALSE
)
a_data_frame[2:3,-3]
a_data_frame[c(FALSE,TRUE,TRUE,FALSE,FALSE),c("x","y")]
a_data_frame[2:3,1]
class(a_data_frame[2:3,-3])
class(a_data_frame[2:3,1])
a_data_frame$x[2:3]
a_data_frame[[1]][2:3]
a_data_frame[["x"]][2:3]
t(a_data_frame)
class(t(a_data_frame))
another_data_frame <- data.frame(
z = rlnorm(5),
y = sample(5),
x = letters[3:7]
)
rbind(a_data_frame,another_data_frame)
cbind(a_data_frame,another_data_frame)
merge(a_data_frame,another_data_frame,by = "x")
merge(a_data_frame,another_data_frame,by = "x",all = TRUE)
colSums(a_data_frame[,2:3])
colMeans(a_data_frame[,2:3])
|
4494ee9f762cdc61baf7607a1e28cbeaa8b68e70
|
b6daeeeb4d312f77ee39941545f46534219d3a51
|
/bachproef/scripts/test_rq1.R
|
5472a92e4fede2024b6f4c63488e0ff4b60198cc
|
[] |
no_license
|
JakobLierman/bachelorproef-hogent-1920
|
e2358930174ec71e3e448343659ae89241e24465
|
3fd2e4dfdbb6a87fd581d6135ccba92b79a2230d
|
refs/heads/master
| 2022-10-22T04:05:10.335514
| 2020-06-12T10:04:29
| 2020-06-12T10:04:29
| 226,527,749
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,353
|
r
|
test_rq1.R
|
# Data
dataSet <- read.csv(file = "dataset.csv", sep = ";")
dataSet$times_mean <- (dataSet$settings_task + dataSet$new_task + dataSet$add_task + dataSet$delete_task + dataSet$calculator_task + dataSet$add_task_repeat) / 6
subSetWithoutOnboarding <- subset(dataSet, onboarding_elements == FALSE, select = -c(onboarding_elements))
subSetWithOnboarding <- subset(dataSet, onboarding_elements == TRUE, select = -c(onboarding_elements))
# Hulp nodig
## Settings Task
#chisq.test(x = dataSet$onboarding_elements, y = dataSet$settings_task_help, correct = FALSE)
## New Task
#chisq.test(x = dataSet$onboarding_elements, y = dataSet$new_task_help, correct = FALSE)
## Add Task
#chisq.test(x = dataSet$onboarding_elements, y = dataSet$add_task_help, correct = FALSE)
## Delete Task
chisq.test(x = dataSet$onboarding_elements, y = dataSet$delete_task_help, correct = FALSE)
## Calculator Task
#chisq.test(x = dataSet$onboarding_elements, y = dataSet$calculator_task_help, correct = FALSE)
## Add Task Repeat
chisq.test(x = dataSet$onboarding_elements, y = dataSet$add_task_repeat_help, correct = FALSE)
# T-test van gemiddelde van tijden
meanCombinedTasksWithoutOnboarding <- mean(subSetWithoutOnboarding$times_mean)
round(meanCombinedTasksWithoutOnboarding, 2)
sdCombinedTasksWithoutOnboarding <- sd(subSetWithoutOnboarding$times_mean)
round(sdCombinedTasksWithoutOnboarding, 2)
meanCombinedTasksWithOnboarding <- mean(subSetWithOnboarding$times_mean)
round(meanCombinedTasksWithOnboarding, 2)
sdCombinedTasksWithOnboarding <- sd(subSetWithOnboarding$times_mean)
round(sdCombinedTasksWithOnboarding, 2)
t.test(x = dataSet$onboarding_elements, y = dataSet$times_mean)
# T-tests per opdracht
## Settings Task
meanSettingsTaskWithoutOnboarding <- mean(subSetWithoutOnboarding$settings_task)
round(meanSettingsTaskWithoutOnboarding, 2)
sdSettingsTaskWithoutOnboarding <- sd(subSetWithoutOnboarding$settings_task)
round(sdSettingsTaskWithoutOnboarding, 2)
meanSettingsTaskWithOnboarding <- mean(subSetWithOnboarding$settings_task)
round(meanSettingsTaskWithOnboarding, 2)
sdSettingsTaskWithOnboarding <- sd(subSetWithOnboarding$settings_task)
round(sdSettingsTaskWithOnboarding, 2)
t.test(x = dataSet$onboarding_elements, y = dataSet$settings_task)
## New Task
meanNewTaskWithoutOnboarding <- mean(subSetWithoutOnboarding$new_task)
round(meanNewTaskWithoutOnboarding, 2)
sdNewTaskWithoutOnboarding <- sd(subSetWithoutOnboarding$new_task)
round(sdNewTaskWithoutOnboarding, 2)
meanNewTaskWithOnboarding <- mean(subSetWithOnboarding$new_task)
round(meanNewTaskWithOnboarding, 2)
sdNewTaskWithOnboarding <- sd(subSetWithOnboarding$new_task)
round(sdNewTaskWithOnboarding, 2)
t.test(x = dataSet$onboarding_elements, y = dataSet$new_task)
## Add Task
meanAddTaskWithoutOnboarding <- mean(subSetWithoutOnboarding$add_task)
round(meanAddTaskWithoutOnboarding, 2)
sdAddTaskWithoutOnboarding <- sd(subSetWithoutOnboarding$add_task)
round(sdAddTaskWithoutOnboarding, 2)
meanAddTaskWithOnboarding <- mean(subSetWithOnboarding$add_task)
round(meanAddTaskWithOnboarding, 2)
sdAddTaskWithOnboarding <- sd(subSetWithOnboarding$add_task)
round(sdAddTaskWithOnboarding, 2)
t.test(x = dataSet$onboarding_elements, y = dataSet$add_task)
## Delete Task
meanDeleteTaskWithoutOnboarding <- mean(subSetWithoutOnboarding$delete_task)
round(meanDeleteTaskWithoutOnboarding, 2)
sdDeleteTaskWithoutOnboarding <- sd(subSetWithoutOnboarding$delete_task)
round(sdDeleteTaskWithoutOnboarding, 2)
meanDeleteTaskWithOnboarding <- mean(subSetWithOnboarding$delete_task)
round(meanDeleteTaskWithOnboarding, 2)
sdDeleteTaskWithOnboarding <- sd(subSetWithOnboarding$delete_task)
round(sdDeleteTaskWithOnboarding, 2)
t.test(x = dataSet$onboarding_elements, y = dataSet$delete_task)
## Calculator Task
meanCalculatorTaskWithoutOnboarding <- mean(subSetWithoutOnboarding$calculator_task)
round(meanCalculatorTaskWithoutOnboarding, 2)
sdCalculatorTaskWithoutOnboarding <- sd(subSetWithoutOnboarding$calculator_task)
round(sdCalculatorTaskWithoutOnboarding, 2)
meanCalculatorTaskWithOnboarding <- mean(subSetWithOnboarding$calculator_task)
round(meanCalculatorTaskWithOnboarding, 2)
sdCalculatorTaskWithOnboarding <- sd(subSetWithOnboarding$calculator_task)
round(sdCalculatorTaskWithOnboarding, 2)
t.test(x = dataSet$onboarding_elements, y = dataSet$calculator_task)
## Add Task Repeat
meanAddTaskRepeatWithoutOnboarding <- mean(subSetWithoutOnboarding$add_task_repeat)
round(meanAddTaskRepeatWithoutOnboarding, 2)
sdAddTaskRepeatWithoutOnboarding <- sd(subSetWithoutOnboarding$add_task_repeat)
round(sdAddTaskRepeatWithoutOnboarding, 2)
meanAddTaskRepeatWithOnboarding <- mean(subSetWithOnboarding$add_task_repeat)
round(meanAddTaskRepeatWithOnboarding, 2)
sdAddTaskRepeatWithOnboarding <- sd(subSetWithOnboarding$add_task_repeat)
round(sdAddTaskRepeatWithOnboarding, 2)
t.test(x = dataSet$onboarding_elements, y = dataSet$add_task_repeat)
# T-test SUS-score
meanSUSWithoutOnboarding <- mean(subSetWithoutOnboarding$sus)
round(meanSUSWithoutOnboarding, 2)
sdSUSWithoutOnboarding <- sd(subSetWithoutOnboarding$sus)
round(sdSUSWithoutOnboarding, 2)
meanSUSWithOnboarding <- mean(subSetWithOnboarding$sus)
round(meanSUSWithOnboarding, 2)
sdSUSWithOnboarding <- sd(subSetWithOnboarding$sus)
round(sdSUSWithOnboarding, 2)
t.test(x = dataSet$onboarding_elements, y = dataSet$sus)
|
0de12fed00d633059ddb8c23dc2427026f33a65c
|
3c9a1b88e0adccecc0a0570838c7e2616b164d14
|
/man/createRelationship.Rd
|
ad910ecefb3b2936b2d6eabae2ce51103748b991
|
[] |
no_license
|
pavang/R-Neo4J
|
36e662a522bb72c5b811c9de8ac8dc294d8b0114
|
0a8c8e35bfc03abf7692c60e639e068aea5ee69d
|
refs/heads/master
| 2021-01-19T18:00:57.893768
| 2014-03-19T01:41:02
| 2014-03-19T01:41:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,422
|
rd
|
createRelationship.Rd
|
\name{createRelationship}
\alias{createRelationship}
\title{This function creates a (directed) relationship between to nodes on the Neo4j graph DB specified by the handle.}
\usage{
createRelationship(start, end, type, properties = NULL, handle)
}
\arguments{
\item{start}{Numeric.The node at the bottom of the
relationship edge.}
\item{end}{Numeric.The node at the tip of the edge.}
\item{type}{A character string. The type of the
relationship between the two nodes.}
\item{properties}{A named list.The properties of the
relationship.}
\item{handle}{The handle on the Neo4j database as created
by connectGraphDb().}
}
\description{
This function creates a (directed) relationship between to
nodes on the Neo4j graph DB specified by the handle.
}
\details{
This function does not (yet) support creation of undirected
relationships. That said, the parameter "type" is Neo4j
lingo and should be understood as the relationship's label
and NOT whether it is directed or undirected.
}
\examples{
\dontrun{
## This creates a directed edge from node 1 to node 2 ##
createRelationship(1,2,type="likes",properties=list(to_do="Play with",where="At the park"),handle=neo4j_handle)
##By creating another relationship, only inverting start and end, one gets an undirected edge ##
createRelationship(2,1,type="likes",properties=list(to_do="Play with",where="At the park"),handle=neo4j_handle)
}
}
\keyword{graph}
|
b1822704f3e6192927bee3c0f8b688af13cdbaec
|
0855a3e9aad865ddcbbc02d62b7b86a329ac5814
|
/Restaurant_R/restaurant_outliers.R
|
d0102f7c4fab16e2893948592400f361ab5ba303
|
[
"MIT"
] |
permissive
|
Cat-n-Dog/follow-m
|
87bb2a37f581dc2de8e160add6dc755acd8d10d5
|
f6a52ac0dab0e315a2b32e1553f7d0ab18f0fad5
|
refs/heads/master
| 2016-09-06T09:24:59.525711
| 2015-05-05T04:44:07
| 2015-05-05T04:44:07
| 32,771,706
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 700
|
r
|
restaurant_outliers.R
|
library('ggplot2')
library('lubridate')
library('dplyr')
library('tree')
library('randomForest')
library('gbm')
library('caret')
train_df <- read.csv(file = 'train.csv')
train_df$Open.Date <- lubridate::mdy(train_df$Open.Date)
train_df <- train_df %>%
dplyr::mutate(Open.Year = year(Open.Date), Open.Month = month(Open.Date),
City2 = ifelse(test = City.Group == 'Big Cities', yes = as.character(City), no = 0))
train_df$City2 <- as.factor(train_df$City2)
train_df$Open.Year <- as.factor(train_df$Open.Year)
train_df$Open.Month <- as.factor(train_df$Open.Month)
train_df_wo_3_outliers <- train_df %>% filter(revenue < 1e7)
train_df_3_outliers <- train_df %>% filter(revenue >= 1e7)
|
96af317e55c6c520f2457af25538489e8aba8f50
|
82504d5f1ed4be1ed3b09e90661c550bab24220e
|
/scripts/generate_figure.R
|
96a212f3dd711fd879535a89410a120d3a69bf76
|
[] |
no_license
|
mbi2gs/its_blast_demo
|
1b883c4db21d6aa5a572b46bf3c6ae783cdf9619
|
f68ff51cfbbee98553fd6f3322297a0004457b0b
|
refs/heads/master
| 2020-08-30T14:35:58.176888
| 2019-11-20T17:10:36
| 2019-11-20T17:10:36
| 218,410,699
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 403
|
r
|
generate_figure.R
|
library(tidyverse)
genus_df = read_delim('results/summary_genus_counts.tsv', delim='\t')
ggplot(genus_df, aes(x=reorder(genus, -`0`), y=`0`)) +
geom_bar(stat='identity', aes(fill=genus)) +
coord_polar() +
scale_y_log10() +
theme_minimal() +
theme(axis.title=element_blank(),
axis.ticks = element_blank(),
axis.text.y = element_blank())
ggsave('results/genus_counts.png')
|
fdb16268e3bab462fc3a3ea2ecb05fd7bcecb50d
|
71fabf8fb017e64d9a0fc0bbde44204b893e2735
|
/DataScience/pset1/HW1.R
|
e8fc8f5ca1f47b273e436ae2b1015cfa18fb15b8
|
[] |
no_license
|
wconrad9/r
|
08e026d4b3a873a111a233a8cd8e56733ca71119
|
4b279ea6f6f4b8d22366a2c09e982285d101cbb7
|
refs/heads/main
| 2023-01-01T14:04:16.185130
| 2020-10-18T16:16:30
| 2020-10-18T16:16:30
| 305,004,306
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,642
|
r
|
HW1.R
|
install.packages("openintro")
library(openintro)
library(tidyverse)
marioKart
#to answer question 1, we find that the least expensive set with one wheel was 33 dollars
marioKart %>%
group_by(wheels) %>%
filter(wheels > 0) %>%
arrange(totalPr)
#question 2: UPS 3 day ground had the smallest shipping range at $0, parcel delivery has the
#greatest out of all 8 methods with $25.5
marioKart %>%
group_by(shipSp) %>%
summarize(range = max(shipPr) - min(shipPr), n = n())
#question 3: new mean: 53.8, new median: 54.0, used mean: 47.1, used median: 42.8
#Used games are cheaper by approximately 7 dollars when comparing means, but by 12 when
#comparing median prices. One imagines that certain expensive used games, perhaps barely
#used, are driving the mean price for used games up.
marioKart %>%
group_by(cond) %>%
summarize(mean = mean(totalPr), median = median(totalPr), n = n())
#question 4: for one thing, there are two used sets that sold at very expensive rates.
#removing these two outliers brings the mean and median for used games much closer together.
#I chose to remove any prices less than 100 because the two data points were so far from the
#normal spread of data that they skewed the values for mean and median. They were significantly
#more than any of the new sets sold for. Looking at the titles for the outliers,
#these sales were bundled with a variety of other products that drove the prices
#of the auctions up.
#after removing points: mean 42.9, median 42.4
marioKart %>%
group_by(cond) %>%
filter(cond == "used") %>%
filter(totalPr < 100) %>%
arrange(totalPr) %>%
summarize(mean = mean(totalPr), median = median(totalPr))
marioKart %>%
ggplot(mapping = aes(totalPr)) +
geom_histogram()
#question 5: See above.
marioKart %>%
group_by(cond) %>%
filter(cond == "used") %>%
filter(totalPr > 100) %>%
select(title)
#question 6: # of steering wheels -> median
marioKart %>%
group_by(cond, wheels) %>%
summarize(median = median(totalPr), n = n())
#having more wheels increases totalPr
marioKart %>%
group_by(cond) %>%
ggplot(mapping = aes(x = wheels,
y = totalPr)) +
geom_point()
#question 7: proportion of games less than $50
marioKart %>%
mutate(indicator = ifelse(totalPr<50, 1, 0)) %>%
group_by(cond) %>%
summarize(proportion = mean(indicator))
#question 8:
library(ggplot2)
library(tidyverse)
marioKart %>%
arrange(nBids) %>%
select(totalPr, nBids)
#number of bids didn't seem to affect totalPr... we actually can't determine this
marioKart %>%
ggplot(mapping = aes(x = nBids,
y = totalPr)) +
geom_point()
#start price doesn't seem to have any overall effect on totalPr
marioKart %>%
ggplot(mapping = aes(x = startPr,
y = totalPr)) +
geom_point()
marioKart %>%
group_by(startPr) %>%
summarize(price = mean(totalPr), n = n())
#duration doesn't seem to have any overall effect on totalPr
marioKart %>%
ggplot(mapping = aes(x = duration,
y = totalPr)) +
geom_point() +
xlim(0,7.5) +
ylim(0,100)
marioKart %>%
group_by(duration) %>%
summarize(price = mean(totalPr), n = n())
#shipping method doesn't seem to have any overall effect on totalPr
marioKart %>%
ggplot(mapping = aes(x = shipSp,
y = totalPr)) +
geom_point()+
ylim(25,75)
marioKart %>%
group_by(shipSp) %>%
summarize(price = mean(totalPr), n = n())
#stockPhoto or not... interesting finding about new vs. used without stock photo
marioKart %>%
group_by(stockPhoto, cond) %>%
summarize(price = mean(totalPr), n = n())
|
fcd9cddf71ebbe637e78c18cde02a6cc9af0933f
|
765ace978763d7e7d0ad63112565605100fec15a
|
/data handling/Day 1_ study_.R
|
f50328f5503c3f1700b3e3e3cc4d2dc62f5b6f21
|
[] |
no_license
|
ykiseong303/DataHandlingStudy
|
56017387b56fd9e434209c0278bcb310c250ece2
|
507179a24523c7fe961241f273d838b021d4d896
|
refs/heads/master
| 2020-12-28T18:54:25.116276
| 2020-02-21T14:42:01
| 2020-02-21T14:42:01
| 238,449,808
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,598
|
r
|
Day 1_ study_.R
|
#벡터 생성
vector1 <- c(1,2,3)
vector1
#벡터의 구조 확인
str(vector1)
#숫자 1과 벡터c(1)의 비교
# > 결과 true
item <- 1
vec_item <-c(1)
identical(item,vec_item)
# 벡터의 타입 확인
mode(vector1)
# 범주형 데이터
과일 <- c("사과","오렌지","딸기","딸기","사과","오렌지","사과","오렌지")
과일_범주 <- factor(과일)
print(과일_범주)
str(과일_범주)
# 범주형 데이터를 문자 벡터로 변환
과일_범주1 <- as.character(과일_범주)
과일_범주1
# 범주형 데이터를 숫자 벡터로 변환
과일_범주2 <- as.numeric(과일_범주)
과일_범주2
# 문자로 변환할 때는 Levels에 매칭된 문자열, 숫자는 내부 코드 값으로 변환
# 범주를 직접 지정
# Levels에 지정한 순서대로 범주로 들어감
과일_범주3 <- factor(과일_범주,levels=c("딸기","사과"))
과일_범주3
# 서열척도
서열 <-c("하","중","상")
서열_범주 <- factor(서열,ordered=T)
서열_범주
# 서열을 직접 지정하기 위해서 levels옵션에 직접 넣으면 됨
서열_범주2 <- factor(서열, levels=c("하","중","상"),ordered=T)
서열_범주2
# 벡터 합치기
vec1 <- c(1:5)
vec1 <- c(vec1,6:10)
vec1
# 벡터 중간에 값 넣기
vec2 <- append(vec1,c(100:103),2)
vec2
# 데이터 프레임
a <- c(1:5)
b <- c(2:6)
c <- c(3:7)
d <- c("안","녕","하","세","요")
e <- data.frame(d,a,b,c)
e
# 데이터 프레임 생성시 문자는 default로 factor로 지정
str(e)
# stingAsFactors 옵션을 사용하면 char형태로 지정 가능
e <- data.frame(d,a,b,c, stringsAsFactors = FALSE)
str(e)
# 데이터의 접근
want <- head(iris,10)
print(want[c(1:5),c(1:2)])
# 추출 칼럼이 하나인 경우 자동으로 벡터로 변환
print(want[c(1:5),c(1)])
# drop 옵션 사용하면 df 유지
print(want[c(1:5),c(1),drop=FALSE])
# 열에 접근하기 : $
# 벡터로 지정됨
want$Sepal.Length
# subset 함수로 조건 검색
want2 <- subset(want,subset = Sepal.Length > 4.9)
want2
# 특정 조건 or 조건에서 걸러진 확장영역 선택
want3 <- subset(want, subset=Sepal.Length > 4.9,Species)
want3
# subset에서는 열 접근 $필요 없음
# 다만 값의 변경이 불가
# []는 접근도되고 변경도 되지만, $사용해야함
want <- want[,1:2]
want
# 정렬
o_want<- order(want$Sepal.Length)
o_want <- want[order(want$Sepal.Length),]
o_want
# 정렬된 순번 넣기
o_want <- data.frame(want,order = order(o_want$Sepal.Length))
o_want
|
6bf1868b87c9bf40da7b725c4f7c67592d813e59
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/grouped/R/lL.gaussian.R
|
80ff3cf641708ef1fc54a0c5c50be3188504cedf
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 157
|
r
|
lL.gaussian.R
|
"lL.gaussian" <-
function(params){
sigma <- params[p]
mu <- c(X %*% params[-p])
-sum(log(pnorm(qb, mu, sigma) - pnorm(qa, mu, sigma)))
}
|
b9f61bc9d45818e749d6fa0dc9f5fd387ef355b8
|
6312f6e7e2e22bb7cb7580b0b92c0a6bbeeb5627
|
/wltr_new/wltr.git/joinTrainAndLabel.R
|
c7d8ebecec1de046b054eea730758a1ceef05ef9
|
[] |
no_license
|
babyang/wltr
|
20708cee2661b9c6ae8b67bdf43343dfbeadac84
|
9a9a76d474aebf3fc350b9cdcf5734328b11be60
|
refs/heads/master
| 2020-05-17T02:40:21.406024
| 2014-12-02T09:30:23
| 2014-12-02T09:30:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 488
|
r
|
joinTrainAndLabel.R
|
readDataFromCSV <- function(filename) {
return(read.table(file=filename,
sep=",",
header=TRUE))
}
joinTrainAndLabel <- function(args) {
trainFilename <- args[1]
labelFilename <- args[2]
sampleFilename <- args[3]
trainData <- readDataFromCSV(trainFilename)
labelData <- readDataFromCSV(labelFilename)
sample <- merge(trainData,labelData,by="tradeItemId")
write.table(sample,sampleFilename,sep=",",row.names=FALSE)
}
args <- commandArgs(TRUE)
joinTrainAndLabel(args)
|
473a9eec8d5fe0bbffd2eb565770bd7dbfa589bf
|
fdd5f9373bd535dda8575a681fbd5be7c8e3af1a
|
/R/fillna.R
|
70db49d9c2cafc63f7a56655f1ba93433e47150e
|
[
"MIT"
] |
permissive
|
jakesherman/jakemisc
|
941f46cd724cc15610c9dc9acb19bd6b973bc6eb
|
13dab160b6776b813c9768fd66654332741b5de1
|
refs/heads/master
| 2016-09-15T03:41:46.207665
| 2016-05-05T21:29:49
| 2016-05-05T21:29:49
| 27,505,697
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,393
|
r
|
fillna.R
|
## ============================================================================
##
## fillna() - inspired by pandas.fillna()
##
## ============================================================================
# Method dispatch for fillna
fillna <- function(data, ..., inplace = FALSE) UseMethod("fillna")
# Data.table method for fillna
fillna.data.table <- function(data, ..., fill_only = NULL, no_fill = NULL,
inplace = FALSE) {
# If inplace is TRUE, use data.table's set function, otherwise use the
# data.frame method
if (inplace) {
# Turn ... into a list
dot_args <- list(...)
# Error handling ------------------------------------------------------
# General errors
assert_that(length(dot_args) > 0)
if (!is.null(fill_only) & !is.null(no_fill))
stop("fill_only and no_fill may not be used simultaneously")
if ((!is.null(fill_only) | !is.null(no_fill)) & length(dot_args) > 1)
stop("If fill_only or no_fill is set to TRUE you cannot specify ",
"specific columns to fill on")
# Make sure column names are correct
if (length(dot_args) > 1) assert_that(namesIn(names(dot_args), data))
if (!is.null(fill_only)) assert_that(namesIn(fill_only, data))
if (!is.null(no_fill)) assert_that(namesIn(no_fill, data))
# Data.table must be installed for inplace to work - it does not need
# to be in the search list because set is accessed via ::
if (!isPackageInstalled("data.table")) {
warning("Data.table not installed, so inplace cannot be set to ",
"TRUE - returning the result while setting it to FALSE")
return(fillna(data, ..., fill_only = fill_only, no_fill = no_fill,
inplace = FALSE))
}
# Do NA filling -------------------------------------------------------
if (length(dot_args) == 1) {
# When the length of dot_args == 1, we are looping through one
# or more col_names and modifying those columns in place
col_names <- names(data)
if (!is.null(fill_only)) {
col_names <- fill_only
} else if (!is.null(no_fill)) {
col_names <- col_names[!col_names %in% no_fill]
}
# Fill the NAs by reference
for (col_name in col_names)
data.table::set(data, which(is.na(data[[col_name]])), col_name,
dot_args[[1]])
} else {
# When the length of dot_args is > 1, we are filling in specific
# columns with specific values
dot_names <- names(dot_args)
for (i in seq_along(dot_args))
data.table::set(data, which(is.na(data[[dot_names[i]]])),
dot_names[i], dot_args[[i]])
}
} else {
# Use the data.frame method
data_attributes <- attributes(data)
class(data) <- "data.frame"
data <- fillna(data, ..., fill_only = fill_only, no_fill = no_fill)
attributes(data) <- data_attributes
}
data
}
# Data.frame method for fillna
fillna.data.frame <- function(data, ..., fill_only = NULL, no_fill = NULL,
inplace = FALSE) {
# Turn ... into a list
dot_args <- list(...)
# Error handling
assert_that(length(dot_args) > 0)
if (inplace) warning("The data.frame method of fillna does not support ",
"inplace modification")
if (!is.null(fill_only) & !is.null(no_fill))
stop("fill_only and no_fill may not be used simultaneously")
if ((!is.null(fill_only) | !is.null(no_fill)) & length(dot_args) > 1) {
stop("If fill_only or no_fill is set to TRUE you cannot specify ",
"specific columns to fill on")
}
# Do NA filling -----------------------------------------------------------
if (!is.null(fill_only)) {
# Fill in the fill_only columns with dot_args[[1]]
data[fill_only] <- lapply(data[fill_only], fillna, dot_args[[1]])
} else if (!is.null(no_fill)) {
# Fill in all columns BUT no_fill with dot_args[[1]]
valid_columns <- names(data)[!names(data) %in% no_fill]
data[valid_columns] <- lapply(data[valid_columns], fillna,
dot_args[[1]])
} else if (length(dot_args) == 1 & is.null(names(dot_args))) {
# Simple NA filling
data[is.na(data)] <- dot_args[[1]]
} else if (all(names(dot_args) %in% names(data))) {
# Match names NA filling
dot_names <- names(dot_args)
for (i in seq_along(dot_args))
data[[dot_names[i]]] <- fillna(data[[dot_names[i]]], dot_args[[i]])
} else {
stop("Your function input was incorrect")
}
data
}
fillna.default <- function(data, ..., inplace = FALSE) {
# Turn ... into a list
dot_args <- list(...)
assert_that(length(dot_args) == 1)
# Do the conversion
data[which(is.na(data))] <- dot_args[[1]]
data
}
## Example --------------------------------------------------------------------
test <- data.frame(Jake = c(1, 2, NA, 4), Josh = c(NA, NA, 10, 20))
test
fillna(test, 5)
test %>% fillna(Jake = "hi", Josh = 0)
fillna(test, Jake = 0)
fillna(test, 5, fill_only = "Jake")
fillna(test, 5, no_fill = "Jake")
test <- data.table(test)
setkey(test, Jake)
attributes(test)
test1 <- fillna(test, 5)
attributes(test1)
fillna(test, Jake = "hi", Josh = 0)
fillna(test, Jake = 0)
fillna(test, 5, fill_only = "Jake")
fillna(test, 5, no_fill = "Jake")
# In place modification
fillna(test, Jake = 17, Josh = 0, inplace = TRUE)
# Other possibilities
fillna(test, 5, -Josh)
fillna(test, 5, no_fill = "Josh")
fillna(mtcars, 10.7, mpg, cyl, drat)
fillna(mtcars, 10, fill_only = "mpg")
fillna(mtcars, 12, fill_only = c("cyl", "disp"))
fillna(mtcars, 12, cyl = 12, disp = 12)
fillna(mtcars, 15, mpg, cyl, disp)
fillna(mtcars, mpg = 15, disp = 22)
createna(mtcars, c("15", "14", "7.33", "testing"))
createna(mtcars, c("15", "14"), -mpg, -cyl)
createna(mtcars, mpg = c("15", "14"), cyl = "NA")
# Logic: take a variable number of arguments
#
# 1. If length(...) is one, we are using ...[1] as the value to fill NAs with,
# or the values(s) to create NA on for all columns ("the value to go on")
# 2. If length(...) is > 1, there are two possibilities:
# a. ...[1] is the value to go on, and the rest of ... are columns to
# either use or exclude. We can determine that this is the desired
# option b/c ... will not have any names
# b. each ... has a 1. name and 2. value(s) "to go on" for each column
# name. We can tell that this is the desired behavior if ... has
# one or more names
# Example 1
fillna(mtcars, 5)
fillna(mtcars, value_to_go_on)
createna(mtcars, c(5, 6, 7, "NA", "na", "NaN"))
# Example 2a
fillna(mtcars, 5, mpg, cyl, disp)
fillna(mtcars, value_to_go_on, -drat, -wt)
createna(mtcars, c(5, 6, 7, "NA", "na", "NaN"), mpg, disp, vs, am)
# Example 2b
fillna(mtcars, mpg = 5, disp = value_to_go_on)
createna(mtcars, cyl = "na", disp = c("NaN", "NA", -999))
fillna_logic <- function(...) {
SE_args <- sapply(substitute(...()), deparse)
if (length(SE_args) == 1) {
# Example 1
print("1")
arg_value <- eval(parse(text = SE_args[1]))
print(arg_value)
} else {
# Example 2
if (is.null(names(SE_args))) {
# Example 2a
print("2a")
first_arg <- eval(parse(text = SE_args[1]))
column_names <- SE_args[2:length(SE_args)]
print(first_arg)
print(column_names)
} else {
# Example 2b
print("2b")
eval_list <- function(SE_args) {
eval_list <- vector("list", length(SE_args))
for (i in seq_along(SE_args))
eval_list[[i]] <- eval(parse(text = SE_args[[i]]))
names(eval_list) <- names(SE_args)
eval_list
}
new_list <- eval_list(SE_args)
print(new_list)
}
}
}
# Testing example 1
fillna_logic(5)
value_to_go_on = "Testing"
fillna_logic(value_to_go_on)
# Testing example 2a
fillna_logic(10, mpg, cyl)
fillna_logic(c(10, 40, 0), -disp, -vs)
fillna_logic(c(10, value_to_go_on, 25), mpg, jake)
# Testing example 2b
fillna_logic(mpg = 10, disp = 20)
fillna_logic(jake = c(0, 10, 20), roche = "brothers", kyle = value_to_go_on)
fillna_logic_ <- function(...) {
SE_args <- list(...)
if (length(SE_args) == 1) {
# Example 1
print("1")
arg_value <- unlist(SE_args[1])
print(arg_value)
} else {
# Example 2
if (is.null(names(SE_args))) {
# Example 2a
print("2a")
first_arg <- unlist(SE_args[1])
column_names <- unlist(SE_args[2:length(SE_args)])
print(first_arg)
print(column_names)
} else {
# Example 2b
print("2b")
print(SE_args)
}
}
}
# Testing example 1
fillna_logic_(5)
value_to_go_on = "Testing"
fillna_logic(value_to_go_on)
# Testing example 2a
fillna_logic_(10, "mpg", "cyl")
fillna_logic_(c(10, 40, 0), "-disp", "-vs")
fillna_logic_(c(10, value_to_go_on, 25), "mpg", "jake")
# Testing example 2b
fillna_logic_(mpg = 10, disp = 20)
fillna_logic_(jake = c(0, 10, 20), roche = "brothers", kyle = value_to_go_on)
# -----------------------------------------------------------------------------
f1 <- function(data, val) {
data[is.na(data)] <- val
data
}
f2 <- function(data, val) {
data[which(is.na(data))] <- val
data
}
test <- c(NA, LETTERS, NA, NA, NA, NA)
god <- sample(test, 10000000, TRUE)
library(microbenchmark)
microbenchmark(f1(god, 1), f2(god, 1))
# which() appears to speed things up
|
83de715a466b1bac78fe1f842631515c80b143ba
|
e8f6138f6bee3e95bea0255ec447226b4ef4b5f7
|
/EnhancedSpiral.js/rscripts/server.R
|
4403cb37fac9eb0f3335d068cf63ad28a4420f4b
|
[] |
no_license
|
timkonieczny/LekagulChallenge
|
4253c457d4ac0f85089ced3ef222ebda3738a9de
|
84b06bb8ed9f804c1428500f57c69f11631fd8f2
|
refs/heads/master
| 2020-06-15T09:14:36.200528
| 2017-08-09T06:56:44
| 2017-08-09T06:56:44
| 94,133,182
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,086
|
r
|
server.R
|
require(Rook)
require(xts)
require(rjson)
find.freq <- function(x)
{
n <- length(x)
spec <- spec.ar(c(x),plot=FALSE)
if(max(spec$spec)>10) # Arbitrary threshold chosen by trial and error.
{
period <- round(1/spec$freq[which.max(spec$spec)])
if(period==Inf) # Find next local maximum
{
j <- which(diff(spec$spec)>0)
if(length(j)>0)
{
nextmax <- j[1] + which.max(spec$spec[j[1]:500])
period <- round(1/spec$freq[nextmax])
}
else
period <- 1
}
}
else
period <- 1
return(period)
}
find.freq.all <- function(x){
f=find.freq(x);
freqs=c(f);
while(f>1){
start=1; #also try start=f;
x=period.apply(x,seq(start,length(x),f),mean);
f=find.freq(x);
freqs=c(freqs,f);
}
if(length(freqs)==1){ return(freqs); }
for(i in 2:length(freqs)){
freqs[i]=freqs[i]*freqs[i-1];
}
freqs[1:(length(freqs)-1)];
}
s <- Rhttpd$new()
s$start(listen='127.0.0.1', port=29499, quiet=FALSE)
queryManager <- function(env) {
req <- Request$new(env)
res <- Response$new()
response <- list()
userData <- req$POST()
if(!is.null(userData)) {
query <- ""
if(!is.null(userData$query))
query <- userData$query
response <- c(response, query = query)
inputFileName <- "data/noise.csv"
if(!is.null(userData$inputFileName))
inputFileName <- userData$inputFileName
response <- c(response, inputFileName = inputFileName)
attribute <- "Noise"
if(!is.null(userData$attr))
attribute <- userData$attr
response <- c(response, attr = attribute)
filename <- paste("../", inputFileName, sep="")
file <- read.table(filename, header=TRUE, sep=";", fill=FALSE, strip.white=TRUE, dec=".")
if(query == "computeCycles") {
print(is.numeric(file[, attribute]))
c <- find.freq.all(file[,attribute])
response <- c(response, cycles = list(c))
}
}
res$header("content-type","application/json")
res$write(toJSON(as.list(response)))
res$finish()
}
s$add(app=queryManager, name='spiralQueryManager')
suspend_console()
|
45ebc45bcee44d7690e03923d12b2dcc8b7e6b50
|
93d426e1a913d462a7969c84feae89cf59fef34e
|
/R/utils-globalVar.R
|
5a04c40d18138c48e7b7d5de6c435aff7f15b305
|
[] |
no_license
|
cran/polypharmacy
|
2c14ad772d1ccd01118b24d602388a2366fc7057
|
0f9cc8c7ffe7849356d176ece3f09b8f30c9b1b1
|
refs/heads/master
| 2023-06-21T22:59:22.184540
| 2021-07-12T08:30:02
| 2021-07-12T08:30:02
| 385,299,314
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 489
|
r
|
utils-globalVar.R
|
### Just to be sure that R CMD CHECK has no NOTE messages
### -> problem with data.table package
globalVariables(sort(unique(c(
".",
"by_hosp", "by_hospit",
"diff_grace", "drug_code", "drug_duration", "duration_ajust",
"grace_per",
"hosp", "hosp_date",
"i_D_s_", "id", "ids", "is_present",
"ndays", "nRx",
"P1", "P1_date", "P2", "P2_date", "per", "per1",
"quantile",
"ratios", "rx_date",
"tx_end", "tx_end_ajust", "tx_end_grace", "tx_start"
))))
|
8070801f5d0c0837f1c5daac7b0335da7f4a6226
|
67c40f58937f1ab0ee425eb026eaeb82b1ef52c3
|
/stretcher_to_zoo.R
|
c6f1d63650891e8279b22cf55f987712c0568913
|
[] |
no_license
|
katherinetanaka/scripts
|
1de668fd8e8b856f6d48bee8fdf798e869d63f87
|
2a995f3325697456d7764eb88b02984cf4b67ca4
|
refs/heads/master
| 2021-01-01T15:32:48.559127
| 2017-10-11T14:58:20
| 2017-10-11T14:58:20
| 97,637,368
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,168
|
r
|
stretcher_to_zoo.R
|
*###############################################################################
#### Multiple alignment to variant base call -- pAsa4 ##########################
# Katherine Tanaka, May 12th 2015 ##
#### Attribution ####
# This script is part of the following publication:
# Tanaka KH, Vincent AT, Trudel MV, Paquet VE, Frenette M, Charette SJ:
# The mosaic architecture of Aeromonas salmonicida subsp. salmonicida
# pAsa4 plasmid and its consequences on antibiotic resistance.
# PeerJ. 2016;4:e2595.
# and is reproduced according to its Creative Commons Licence
# https://creativecommons.org/licenses/by/4.0/
library(zoo)
library(reshape2)
library(ggplot2)
## Use with markx0 files, it's good!
stretcher <- readLines("stretcher/stretcher_output")
# Separate stretcher in head (the information) and Seq (the sequences)
endHead <- grep("#={1,250}", stretcher)[2] # To get end of last boxe
stretcherH <- stretcher[1:endHead]
stretcherSeq <- stretcher[(endHead+1):length(stretcher)]
# Extract sequences from stretcherSeq
find <- regexpr("\\s[ATGCatgc-]{1,50}$", stretcherSeq)
see <- regmatches(stretcherSeq, find)
extract <- strsplit(see, " ", fixed = TRUE)
# All odds: sequence 1. All evens, sequence 2, + get rid of the space
seq1 <- sapply(extract[seq(from = 1, to = length(extract)-1, by = 2)],
FUN = "[[", 2)
seq2 <- sapply(extract[seq(from = 2, to = length(extract), by = 2)],
FUN = "[[", 2)
seq1 <- paste0(seq1, collapse = "")
seq2 <- paste0(seq2, collapse = "")
# as seq1-track:
mutDF <- data.frame(strsplit(seq1, "", fixed = TRUE),
strsplit(seq2, "", fixed = TRUE),
Type = rep(NA, length(seq1)), stringsAsFactors = FALSE,
length = rep(NA, length(seq1)))
names(mutDF) <- c("Name1", "Name2", "Type", "length")
##### Insertions are the trickiest ones ####
ins <- grep("-", mutDF$Name1, fixed = TRUE)
names(ins) <- rep(NA, length(ins))
# How to get insertion by groups
i <- 1
names(ins)[1] <- i
for(j in 2:length(ins)){
if(ins[j] - ins[j-1] == 1){
names(ins)[j] <- i
} else {
i <- i + 1
names(ins)[j] <- i
}
}
# Do notation for each group. // i is max group number now
for(k in 1:i){
mutDF[(ins[names(ins) == k][1])-1, 3] <- "ins"
mutDF[(ins[names(ins) == k][1])-1, 4] <- length(ins[names(ins) == k])
}
# We can now shrink Seq1 to its original length, no insertion
mutDFclean <- mutDF[-ins, ]
##### Now the deletions ####
del <- grep("-", mutDFclean$Name2, fixed = TRUE)
names(del) <- rep(NA, length(del))
l <- 1
names(del)[1] <- l
for(m in 2:length(del)){
if(del[m] - del[m-1] == 1){
names(del)[m] <- l
} else {
l <- l + 1
names(del)[m] <- l
}
}
for(n in 1:l){
mutDFclean[(del[names(del) == n]), 3] <- "del"
mutDFclean[(del[names(del) == n][1]), 4] <- length(del[names(del) == n])
}
#### Substitutions #### # By the way ins + del = total - sub
sub <- mutDFclean$Name1 == mutDFclean$Name2 | mutDFclean$Name2 == "-"
whichSub <- grep("FALSE", sub, fixed = TRUE)
# Build bank for transition, transversion is everything else, since no "-"
transition <- c("AG", "GA", "CT", "TC")
# Write transition and transversion
for(p in 1:length(whichSub)){
if(is.na(mutDFclean[whichSub[p], 3])){
if(paste0(mutDFclean[whichSub[p], 1], mutDFclean[whichSub[p], 2]) %in%
transition){
mutDFclean[whichSub[p], 3] <- "transi"
} else {
mutDFclean[whichSub[p], 3] <- "transver"
}
}
}
#### Preparing for ggplot ####
mutDFclean$Type <- factor(mutDFclean$Type)
mutDFclean <- cbind(mutDFclean, 1:length(mutDFclean$Name1))
names(mutDFclean) <- c("Name1", "Name2", "Type", "length", "position")
# Lets use sliding windows
test <- rollapply(mutDFclean$Type, width = 1000, FUN = function(x) length(grep("transi", x, fixed = TRUE)))
test2 <- rollapply(mutDFclean$Type, width = 1000, FUN = function(x) length(grep("transver", x, fixed = TRUE)))
test3 <- rollapply(mutDFclean$Type, width = 1000, FUN = function(x) length(grep("del", x, fixed = TRUE)))
test4 <- rollapply(mutDFclean$Type, width = 1000, FUN = function(x) length(grep("ins", x, fixed = TRUE)))
sliding <- cbind(501:(length(test)+500), test, test2, test3, test4)
sliding <- data.frame(sliding)
names(sliding) <- c("window", "transitions", "transversions",
"deletion", "insertion")
slidingGG <- melt(sliding, id.vars = "window",
measure.vars = c("transitions", "transversions", "insertion"))
### Sliding gg2
names(test3) <- 501:(length(test3)+500)
deletionGG <- data.frame(window = names(test3), values = test3)
slidingGG2 <- merge(slidingGG, deletionGG, by = "window")
# Better: draw big deletions
bigdel <- test3[test3 > 500]
f <- ggplot(data = slidingGG2, aes(x = window, y = value,
color = factor(variable),
alpha = 5/(values+1)))
f +
geom_line() +
scale_x_continuous(breaks=seq(0, 182000, by = 10000)) +
ylim(c(0, 100))
theme(panel.background = element_rect(fill = "white", color = "grey20"),
panel.grid.major = element_line(color = "grey70"),
panel.grid.minor = element_line(color = "grey70"))
|
c7388c5d8102e255c7a74ea0576ae0ccc6d31808
|
b85cb92935407d40d03405ea09a7f96d005c1954
|
/scripts/0_1_extract_covariates.R
|
17c645190de4d58db2bef049b8ab4bcaa29723c0
|
[] |
no_license
|
enerhiya/Spatio-Temporal-Cross-Covariance-Functions-under-the-Lagrangian-Framework
|
0cccffd7a98d13e4f4c7353d9c42e923ae34dbdd
|
5084f24d9b89c9bff2794b0575a44d7ea0ccaf54
|
refs/heads/master
| 2021-06-18T19:50:38.829233
| 2021-02-17T17:09:46
| 2021-02-17T17:09:46
| 177,747,457
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,188
|
r
|
0_1_extract_covariates.R
|
directory <- '/home/salvanmo/Desktop/'
root <- paste(directory, 'Spatio-Temporal-Cross-Covariance-Functions-under-the-Lagrangian-Framework/', sep = '')
source(file = paste(root, "Functions/load_packages.R",sep=''))
source(file = paste(root, "Functions/auxiliary_functions.R",sep=''))
saudi<- map("world", "Saudi", fill = TRUE)
IDs <- sapply(strsplit(saudi$names, ":"), function(x) x[1])
saudi <- map2SpatialPolygons(saudi, IDs=IDs, proj4string=CRS("+proj=longlat +datum=WGS84"))
for (yr in 1980:2019){
data_array <- array(, dim = c(248, 550, 4, 2))
for(VAR in 1:2){
if(yr < 1992){
merra_ind <- 100
}else if(yr >= 1992 & yr < 2001){
merra_ind <- 200
}else if (yr >= 2001 & yr < 2011){
merra_ind <- 300
}else{
merra_ind <- 400
}
mnth = 1
if(mnth == 2){
mnth_end <- 28
}else if(mnth %in% c(1, 3, 5, 7, 8, 10, 12)){
mnth_end <- 31
}else{
mnth_end <- 30
}
if(mnth < 10){
mo <- paste("0", mnth, sep='')
}else{
mo <- mnth
}
ncname <- paste("/home/salvanmo/Downloads/MERRA2_", merra_ind, ".inst3_3d_asm_Nv.", yr, "0101.SUB.nc", sep='')
ncin <- nc_open(ncname)
dname1 <- "RH"
dname2 <- "T"
dname3 <- "U"
dname4 <- "V"
u_array <- ncvar_get(ncin,dname1)
v_array <- ncvar_get(ncin,dname2)
a_array <- ncvar_get(ncin,dname3)
b_array <- ncvar_get(ncin,dname4)
# get longitude and latitude
lon <- ncvar_get(ncin,"lon")
lat <- ncvar_get(ncin,"lat")
nc_close(ncin)
if(VAR == 1) lev <- 65 else lev <- 68
U <- u_array[,, lev, ]
V <- v_array[,, lev, ]
A <- a_array[,, lev, ]
B <- b_array[,, lev, ]
lon.lat <- expand.grid(lon,lat)
lon_new <- matrix(lon.lat[, 1], ncol = length(lat))
lat_new <- matrix(lon.lat[, 2], ncol = length(lat))
test1 <- data.frame(rep(lon.lat[,1], 8), rep(lon.lat[,2], 8), c(U), c(V), c(A), c(B))
for(day in 2:31){
cat('READING NETCDF DATA ===> year: ', yr, 'month: ', mnth, 'day: ', day, '\n')
if(day > 9){
ncname <- paste("/home/salvanmo/Downloads/MERRA2_", merra_ind, ".inst3_3d_asm_Nv.", yr, mo, day,".SUB.nc", sep='')
}else{
ncname <- paste("/home/salvanmo/Downloads/MERRA2_", merra_ind, ".inst3_3d_asm_Nv.", yr, mo, "0",day,".SUB.nc", sep='')
}
ncin <- nc_open(ncname)
u_array <- ncvar_get(ncin,dname1)
v_array <- ncvar_get(ncin,dname2)
nc_close(ncin)
U <- u_array[,, lev, ]
V <- v_array[,, lev, ]
A <- a_array[,, lev, ]
B <- b_array[,, lev, ]
test1 <- rbind(test1, data.frame(rep(lon.lat[,1], 8), rep(lon.lat[,2], 8), c(U), c(V), c(A), c(B)))
}
colnames(test1) <- c('lon', 'lat', 'Y1', 'Y2')
spdf <- SpatialPointsDataFrame(coords = test1[, c("lon", "lat")], data = test1, proj4string = CRS("+proj=longlat +datum=WGS84"))
saudi_data_orig <- data.frame(spdf[!is.na(over(spdf, as(saudi, "SpatialPolygons"))), ])
N <- nrow(saudi_data_orig)/(8 * mnth_end)
data_array_temp <- array(, dim = c(248, 550, 4))
for(tt in 1:4){
data_temp <- matrix(saudi_data_orig[, tt + 2], ncol = N, byrow = T)
data_array_temp[, , tt] <- data_temp
}
data_array[, , , VAR] <- data_array_temp
}
save(data_array, file = paste(root, "Data/ncdf/covariates_", yr, '.RData', sep = ''))
}
|
99ed0d15ae35a29497cb93988b62b03bc396a4d6
|
7255055a2d8552621a5839aac9c0f45761ceabf6
|
/GitHub/monografia/Scripts/CalculateCompanyTechAn.R
|
3a0097c8add261d68a788315c0f8bc5e566e6abe
|
[] |
no_license
|
camurca1/monografia
|
e78072ee7520fd417df0d38629ba0655667abee0
|
ae3df56c192fa94f435ea50b2e439ab61056ae2a
|
refs/heads/master
| 2023-08-06T05:56:47.305072
| 2021-08-10T20:53:11
| 2021-08-10T20:53:11
| 373,916,678
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,587
|
r
|
CalculateCompanyTechAn.R
|
# Escrito por: Alexandre Camurça Silva de Souza
# Ambiente RStudio Desktop 1.4.1717 "Juliet Rose"
# Etapa 5 - Calcular indicadores técnicos
# limpar memória e desativar notação científica
rm(list = ls())
options(scipen = 999)
#### Gereciamento de pacotes ####
# informar os pacotes que serao utilizados no script
pacotes <- c("pracma", "RcppRoll", "tidyverse")
# instalar pacotes ausentes
pacotes_instalados <- pacotes %in% rownames(installed.packages())
if (any(pacotes_instalados == FALSE)) {
install.packages(c(pacotes[!pacotes_instalados]))
}
# carregar pacotes
invisible(lapply(pacotes, library, character.only = TRUE))
#### funções para cálculos dos indicadores ####
calc_MME <- function(x){
x$MME.lenta <- movavg(x$price.adjusted, 150, "e")
x$MMS.rapida <- movavg(x$price.adjusted, 50, "s")
return(x)
}
calc_MACD <- function(x){
x$MACD1 <- (movavg(x$price.adjusted, 26, "e")- movavg(x$price.adjusted, 12, "e"))
x$MACD2 <- movavg(x$MACD1, 9, "e")
return(x)
}
calc_est <- function(x){
x$price.adjusted.min <- roll_min(x$price.adjusted,
n = 14,
fill = NA,
align = "right")
x$price.adjusted.max <- roll_max(x$price.adjusted,
n = 14,
fill = NA,
align = "right")
x$estK <- (x$price.adjusted - x$price.adjusted.min)/(x$price.adjusted.max - x$price.adjusted.min)
x$estD <- movavg(x$estK, 3, "s")
return(x)
}
calc_decisaoCV <- function(x){
x$mov.MM <- if_else(x$MMS.rapida > x$MME.lenta, 1,
if_else(x$MMS.rapida < x$MME.lenta, -1, 0, missing = 0),
missing = 0)
x$mov.MACD <- if_else(x$MACD1 > x$MACD2, 1, -1, missing = 0)
x$mov.est <- if_else(x$estK > 0.8 & x$estD > 0.8,
-1,
if_else(x$estK < 0.2 & x$estD < 0.2,
1,
0,
missing = 0),
missing = 0)
x$aux.sinal.MM <- roll_sum(x$mov.MM, n = 3, align = "right", fill = 0)
x$aux.sinal.MACD <- roll_sum(x$mov.MACD, n = 3, align = "right", fill = 0)
x$sinal.MM <- if_else(x$aux.sinal.MM == 3,
1,
if_else(x$aux.sinal.MM == -3,
1,
0,
missing = 0),
missing = 0)
x$sinal.MACD <- if_else(x$aux.sinal.MACD == 3,
1,
if_else(x$aux.sinal.MACD == -3,
-1,
0,
missing = 0),
missing = 0)
x$aux.sinal.decisao <- x$sinal.MACD + x$sinal.MM + x$mov.est
x$sinal.decisao <- if_else(x$aux.sinal.decisao > 1,
1,
if_else(x$aux.sinal.decisao < -1,
-1,
0,
missing = 0),
missing = 0)
x$decisao.final <- if_else(x$sinal.decisao == 1,
"COMPRA",
if_else(x$sinal.decisao == -1,
"VENDA",
"NEUTRO",
missing = "NEUTRO"),
missing = "NEUTRO")
x[ ,5:20] <- NULL
return(x)
}
#### carregar tabelas salvas ####
precos.empresas <- readRDS("Data/precos_acoes")
precos.analise <- readRDS("Data/retorno_volatilidade_acoes")
precos.analise$retornos <- NULL
precos.analise$volatilidade <- NULL
#### calcular indicadores ####
obs.completas <- precos.analise %>%
count(CD_CVM) %>%
filter(n==1736)
precos.analise <- left_join(obs.completas, precos.analise, by= "CD_CVM")
names(precos.analise)[1] <- "CD_CVM"
precos.analise$n <- NULL
l.precos <- split(precos.analise, precos.analise$ticker)
l.precos <- lapply(l.precos, calc_MME)
l.precos <- lapply(l.precos, calc_MACD)
l.precos <- lapply(l.precos, calc_est)
l.precos <- lapply(l.precos, calc_decisaoCV)
precos.analise <- unsplit(l.precos, precos.analise$ticker)
saveRDS(precos.analise, "Data/indAT")
#### limpeza de memória ####
rm(list = ls())
gc()
|
e4cc19edf12bb8e1c60f788f5e891693e0d8745a
|
ce02b8bec171866b782e9fa700ab86863a6e002f
|
/R/Aoptrcd.maeT.R
|
ffb5ec8db59455d37abf3ee3b8c29022a5d13292
|
[] |
no_license
|
cran/optrcdmaeAT
|
f8d66c044adda02648e07c7846d75e48fb34821e
|
20d1a5e82d018ba60a0839dd720f168c5f0f2f73
|
refs/heads/master
| 2021-01-19T13:05:39.050377
| 2017-04-12T13:24:01
| 2017-04-12T13:24:01
| 88,062,965
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,276
|
r
|
Aoptrcd.maeT.R
|
#Subsection 2.1: Function for search of A-optimal or near-optimal row-column designs
# SubSubsection 2.1.1 (Function for construction of A-optimal row-column designs using treatment exchange algorithm)
Aoptrcd.maeT<-function(trt.N,col.N,theta,nrep,itr.cvrgval) {
#House keeping
del.1<-matrix(1000,trt.N,3)
desbest.1<-matrix(0,nrep*2,col.N)
aoptbest.1<-matrix(0,nrep,2)
#Start iteration
for(irep in 1:nrep){
#Initial design with its corresponding Ascore value
des<-intcrcd.mae(trt.N,col.N)
if(trt.N==col.N&trt.N>3&irep<(trt.N-1)) {in.desns=matrix(0,(trt.N-3)*2,col.N)
in.desns0=rbind(seq(1,trt.N),c(seq(1,trt.N)[2:trt.N],1))
for(i in 1:(trt.N-3)) {in.desns01=cbind(rbind(seq(1,(trt.N-i)),c(seq(1,(trt.N-i))[2:(trt.N-i)],1)), rbind(rep(1,i),((trt.N-i+1):trt.N))); in.desns[c((i-1)*2+1,i*2),]=in.desns01}
in.desns=rbind(rbind(seq(1,trt.N),c(seq(1,trt.N)[2:trt.N],1)),in.desns)
des=in.desns[c((irep-1)*2+1,irep*2),]}
cmat<-cmatrcd.mae(trt.N,col.N,theta,des)
aopt=sum(diag(ginv(cmat)))
acold=aopt
descold=t(des)
#deletion difference
cdel=100
while( abs(cdel)>=0.000000001){
i=1;
ivalacold={}
for(i in 1:col.N){
for (m in 1:2){
j=1;
for(j in 1:trt.N){
temp=descold[i,]
if(m==1) {
if(j==descold[i,1]|j==descold[i,2]) {aopt=acold; del.1[j,]<-c(descold[i,1],(acold-aopt),aopt); next} else { descold[i,]=c(j,descold[i,2])}}
if(m==2) {
if(descold[i,2]==j|j==descold[i,1]) {aopt=acold; del.1[j,]<-c(descold[i,2],(acold-aopt),aopt); next} else { descold[i,]=c(descold[i,1],j)}}
trtin<-contrasts(as.factor(t(descold)),contrasts=FALSE)[as.factor(t(descold)),]
R.trt<-t(trtin)%*%trtin
if (rankMatrix(R.trt)[1]<trt.N) {aopt=acold; descold[i,]=temp; if(m==1) {del.1[j,]<-c(descold[i,1],(acold-aopt),aopt)} else {
del.1[j,]<-c(descold[i,2],(acold-aopt),aopt)}; next}
cmato=cmatrcd.mae(trt.N,col.N, 0,t(descold))
egv<-sort(eigen(cmato)$values)
if(egv[2]<0.000001) {aopt=acold; descold[i,]=temp; if(m==1){del.1[j,]<-c(descold[i,1],(acold-aopt),aopt)} else {
del.1[j,]<-c(descold[i,2],(acold-aopt),aopt)}; next}
cmat=cmatrcd.mae(trt.N,col.N,theta,t(descold))
aopt=sum(diag(ginv(cmat)))
del.n<-del.1[j,]<-c(j,(acold-aopt),aopt)
descold[i,]=temp
}
del.1<-del.1[order(del.1[,3]),]
delbest=t(del.1[1,])
if (m==1) {
if (delbest[1]==descold[i,2]) {descold[i,]=descold[i,]} else
{descold[i,]=c(delbest[1],descold[i,2]); cdel=delbest[2]; acold=delbest[3]}} else {
if (descold[i,1]==delbest[1]) {descold[i,]= descold[i,]} else
{descold[i,]=c(descold[i,1],delbest[1]); cdel=delbest[2]; acold=delbest[3]}}
}
ivalacold=rbind(ivalacold, c(i,acold))
if(i>itr.cvrgval) if(all(ivalacold[c(i-(itr.cvrgval-2),i),2]==ivalacold[i-(itr.cvrgval-1),2])) break
}
#print(c(000,irep,acold,cdel,000))
}
#aopt0=acold
cdel<-1000
while( abs(cdel)>=0.000000001){
aopt=acold
#desg<-graph(t(descold))
#plot(desg)
del.2<-matrix(1000,col.N+1,3)
del.2[col.N+1,]<-c(col.N+1,0,acold)
for(i in 1:col.N){
temp=descold[i,]
descold[i,]=rev(descold[i,])
cmato=cmatrcd.mae(trt.N,col.N, 0,t(descold))
egv<-sort(eigen(cmato)$values)
if(egv[2]<0.000001) {aopt2=1000; del.2[i,]<-c(i,(acold-aopt2),aopt2); next}
cmat=cmatrcd.mae(trt.N,col.N,theta,t(descold))
aopt2=sum(diag(ginv(cmat)))
del.2[i,]<-c(i,(acold-aopt2),aopt2)
descold[i,]=temp
}
del.2<-del.2[order(del.2[,3]),]
delbest=t(del.2[1,])
if(delbest[1]<=col.N) {descold[delbest[1],]=rev(descold[delbest[1],]); cdel=delbest[2]; acold=delbest[3]} else {cdel=0}
#print(delbest[1]<=col.N)
#desg<-graph(t(descold))
#plot(desg,main=paste(aopt-acold,sep=" / "))
#print(del.2)
#print(cdel)
#cat("\n", aopt-acold,"\n")
#cdel<-aopt-acold
##print(cdel)
#print(c(111,irep,acold,aopt0-acold,cdel,111))
}
#desg<-graph(t(descold))
#plot(desg,main=paste(aopt-acold,sep=" / "))
#print(del.2)
#print(cdel)
#cat("\n", aopt-acold,"\n")
#"============================================================="
next.it<- if (irep==1) {desbest.1=t(descold)} else {desbest.1=rbind(desbest.1,t(descold))}
aoptbest.1[irep,]=c(irep,acold)
#print(c(222,irep,acold,aopt0-acold,222))
}
best=aoptbest.1[order(aoptbest.1[,2]),]
#print(best)
nb=best[1,1]
Ascore<-best[1,2]
Aoptde<- desbest.1[c((nb-1)*2+1,nb*2),]
tkmessageBox(title="Search completed",message=paste("Search completed",sep=""))
cnames=paste0("Ary",1:col.N)
dimnames(Aoptde)=list(c("Dye 1:", "Dye 2:"),cnames)
Aopt_sum2<-list("v"=trt.N,"b"=col.N,theta=theta,nrep=nrep,itr.cvrgval=itr.cvrgval, "OptdesF"=Aoptde,"Optcrtsv" =Ascore)
return(Aopt_sum2)
}#End of SubSubsection 2.1.1 (Aoptrcd.maeT function) construction of A-optimal row-column design using treatment exchange algorithm
|
0f651f9bd2fd5913800f07288c26882466df9aaf
|
6eed4337c1a918c2e615198699b8271ac8d25ffc
|
/R_basics/3Rloop.R
|
3642437685b212d2b6e1b76f72c2da20185b4568
|
[] |
no_license
|
Niks056/R_basics
|
b1653d6d0cb0d6f31033fa1c822a513272c5d43d
|
67fb11246ebb5757a0f3d19543361bae23586064
|
refs/heads/master
| 2022-12-12T17:18:54.513429
| 2020-09-10T10:00:21
| 2020-09-10T10:00:21
| 294,370,966
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,179
|
r
|
3Rloop.R
|
#for loop
x <- c(5,8,10,15)
for (val in x){
print (val)
}
numbers<- c(6,5,3,8,4)
n=0
for (val in numbers) {
n=n+val
print(paste('TotalAmount:',n))
}
rollno <- c(1,2,3,4,5)
marks <- c(70,40,55,35,80)
n=length(marks)
result<-vector()
count<-1
for (val in marks) {
if(val>=50)
{
rslt="PASS"
}else {
rslt="FAIL"
}
result[count] <- rslt
count <- count+1
}
names(result) <- rollno
result
#while loop
i <- 1
while(i<4){
print(i)
i <- i+1
}
age1 <- c(30,65,60,45,80)
principal1 <- c(1000,4000,3000,4000,2000)
NoofYears <-1
irate <- 8
approved<-rep("NO",times=5)
approved
i <-1
totalamt <- 0
while (totalamt < 12000) {
totalamt=totalamt+(principal1[i]*(1+irate/100)^NoofYears)
approved[i] <- "YES"
i<- i+1
}
print(approved)
age1 <- c(30,65,60,45,80)
principal1 <- c(1000,4000,3000,4000,2000)
NoofYears <-1
irate <- 8
approved<-rep("NO",times=5)
i <-1
totalamt <- 0
while (totalamt < 12000) {
totalamt=totalamt+(principal1[i]*(1+irate/100)^NoofYears)
if(totalamt>12000)
{
break
}
approved[i] <- "YES"
i<- i+1
}
print(approved)
|
62267595e091f08cc7002f370fee7c701fb5be10
|
f94fdac9ef22ded8a95e6cc3758795314c2f9814
|
/man/ORDERS.Rd
|
87f2320b8e2827758607861e7d7d800a1b1f8cc4
|
[] |
no_license
|
shawngiese/classic.models
|
0d4be31582b4552931d145cd5a40f16345aa672a
|
56c1290a278748a3796e1a9d96f86fe5a25cd591
|
refs/heads/master
| 2021-06-10T21:17:18.368100
| 2020-11-24T20:34:14
| 2020-11-24T20:34:14
| 100,576,465
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,020
|
rd
|
ORDERS.Rd
|
\name{ORDERS}
\alias{ORDERS}
\docType{data}
\title{Order data}
\description{
This data describes the information about each order, the order status, comments by sales, and the dates of order events such as the order date.
}
\usage{data("ORDERS")}
\format{
A data frame with 326 observations on the following 7 variables.
\describe{
\item{\code{ORDERNUMBER}}{a numeric vector}
\item{\code{ORDERDATE}}{date the order was created}
\item{\code{REQUIREDDATE}}{date the order is required}
\item{\code{SHIPPEDDATE}}{date the order shipped}
\item{\code{STATUS}}{a character vector}
\item{\code{COMMENTS}}{a character vector}
\item{\code{CUSTOMERNUMBER}}{a numeric vector}
}
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
http://www.eclipse.org/birt/documentation/sample-database.php
}
\references{
http://www.eclipse.org/legal/epl/notice.php
}
\examples{
data(ORDERS)
## maybe str(ORDERS) ; plot(ORDERS) ...
ORDERS$ORDERDATE[2]
}
\keyword{datasets}
|
4cea227d1e7a86c3f25b9c467875389c28776387
|
7ccbc70f0348ce89aa72e28296e47d751f8a1938
|
/rebuild/ConvertToClioInfraLayout.R
|
0f9c321d2533f42a0ba981df2bfb60fa19f45a2f
|
[
"MIT"
] |
permissive
|
CLARIAH/wp4-clioinfra
|
0ccdb61ffe7799442d1cce8c65ab72cf565c384c
|
52a9f16c299d4286123b6ee16d29bed3afdaa630
|
refs/heads/master
| 2022-09-26T14:22:28.060810
| 2022-09-09T13:45:06
| 2022-09-09T13:45:06
| 83,451,537
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 43,697
|
r
|
ConvertToClioInfraLayout.R
|
options( java.parameters = "-Xmx10g")
library(rJava)
library(WriteXLS)
library(xlsx)
library(data.table)
library(tcltk)
library(countrycode)
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
source('hwlfunctions.R')
AddColsForMaddision <- F
ISO2plus <- read.xlsx('../UPDATE 20210315/ISO_Codes_2_3.xlsx',sheetIndex = 1, stringsAsFactors = F)
ISO3s <- read.xlsx('../UPDATE 20210315/ISO3REF.xls',sheetIndex = 1, stringsAsFactors = F)
ClioLayoutBase<- read.xlsx('../ClioLayout.xlsx', sheetIndex = 1, startRow = 1,
check.names = F, stringsAsFactors = F,
colClasses = c('character',"numeric","numeric","character",rep("numeric",518)))
#MAD <- read.xlsx('maddison19902011regionalaverages22.xlsx',sheetIndex = 1, stringsAsFactors = F, startRow = 1,endRow = 2)
#MAD <- as.character(MAD[1,c(2:ncol(MAD))])
if (AddColsForMaddision){
YearsToAdd <- c(1,730,1000,1150,1280:1499)
w1 = ncol(ClioLayout)+1
w2 = w1+length(YearsToAdd)-1
ClioLayout[c(w1:w2)] <- NA
names(ClioLayout)[c(w1:w2)] <- as.character(YearsToAdd)
ClioLayout <- ClioLayout[,c("Webmapper code","Webmapper numeric code","ccode","country name","start year","end year","ISO3",as.character(c(YearsToAdd,1500:2050)))]
}
ClioLayoutBase$ISO3 <- as.character(NA)
for (i in c(1:nrow(ClioLayoutBase))){
if (length(ISO3s$iso3[which(ISO3s$Country==ClioLayoutBase$`country name`[i])])>0){
ClioLayoutBase$ISO3[i] <-
ISO3s$iso3[which(ISO3s$Country==ClioLayoutBase$`country name`[i])]
} else {
if (ClioLayoutBase$`end year`[i]==2012){
print(ClioLayoutBase$`country name`[i])
}
}
}
# in Clio template DRC Congo is ZAR (set by my script above actually)
# here I am going to use the COD convension from JL's GDP:
ClioLayoutBase$ISO3[which(grepl("DRC",ClioLayoutBase$`country name`, fixed = T))] <- 'COD'
# add PSE:
temp <- ClioLayoutBase[1,]
temp$`Webmapper code` <- as.character(NA)
temp$`Webmapper numeric code` <- as.numeric(NA)
temp$ccode <- as.numeric(NA)
temp$`country name` <- "State of Palestine"
temp$`start year` <- 1950
temp$`end year` <- 2012
temp$ISO3 <- 'PSE'
ClioLayoutBase <- rbind(ClioLayoutBase,temp)
# add COK Cook Islands
temp <- ClioLayoutBase[1,]
temp$`Webmapper code` <- as.character(NA)
temp$`Webmapper numeric code` <- as.numeric(NA)
temp$ccode <- as.numeric(NA)
temp$`country name` <- "Cook Islands"
temp$`start year` <- as.numeric(NA)
temp$`end year` <- 2012
temp$ISO3 <- 'COK'
ClioLayoutBase <- rbind(ClioLayoutBase,temp)
# add GUF French Guiana
temp <- ClioLayoutBase[1,]
temp$`Webmapper code` <- as.character(NA)
temp$`Webmapper numeric code` <- as.numeric(NA)
temp$ccode <- as.numeric(NA)
temp$`country name` <- "French Guiana"
temp$`start year` <- as.numeric(NA)
temp$`end year` <- 2012
temp$ISO3 <- 'GUF'
ClioLayoutBase <- rbind(ClioLayoutBase,temp)
# add MAC Macau, China
temp <- ClioLayoutBase[1,]
temp$`Webmapper code` <- as.character(NA)
temp$`Webmapper numeric code` <- as.numeric(NA)
temp$ccode <- as.numeric(NA)
temp$`country name` <- "Macau, China"
temp$`start year` <- as.numeric(NA)
temp$`end year` <- 2012
temp$ISO3 <- 'MAC'
ClioLayoutBase <- rbind(ClioLayoutBase,temp)
# add VIR Virgin Islands
temp <- ClioLayoutBase[1,]
temp$`Webmapper code` <- as.character(NA)
temp$`Webmapper numeric code` <- as.numeric(NA)
temp$ccode <- as.numeric(NA)
temp$`country name` <- "Virgin Islands"
temp$`start year` <- as.numeric(NA)
temp$`end year` <- 2012
temp$ISO3 <- 'VIR'
ClioLayoutBase <- rbind(ClioLayoutBase,temp)
# add "ANT" Netherlands Antilles
temp <- ClioLayoutBase[1,]
temp$`Webmapper code` <- as.character(NA)
temp$`Webmapper numeric code` <- as.numeric(NA)
temp$ccode <- as.numeric(NA)
temp$`country name` <- "Netherlands Antilles"
temp$`start year` <- as.numeric(NA)
temp$`end year` <- 2012
temp$ISO3 <- 'ANT'
ClioLayoutBase <- rbind(ClioLayoutBase,temp)
# add "CYM" Cayman Islands
temp <- ClioLayoutBase[1,]
temp$`Webmapper code` <- as.character(NA)
temp$`Webmapper numeric code` <- as.numeric(NA)
temp$ccode <- as.numeric(NA)
temp$`country name` <- "Cayman Islands"
temp$`start year` <- as.numeric(NA)
temp$`end year` <- 2012
temp$ISO3 <- 'CYM'
ClioLayoutBase <- rbind(ClioLayoutBase,temp)
# add "IMN" Isle of Man
temp <- ClioLayoutBase[1,]
temp$`Webmapper code` <- as.character(NA)
temp$`Webmapper numeric code` <- as.numeric(NA)
temp$ccode <- as.numeric(NA)
temp$`country name` <- "Isle of Man"
temp$`start year` <- as.numeric(NA)
temp$`end year` <- 2012
temp$ISO3 <- 'IMN'
ClioLayoutBase <- rbind(ClioLayoutBase,temp)
# add "JEY" Jersey
temp <- ClioLayoutBase[1,]
temp$`Webmapper code` <- as.character(NA)
temp$`Webmapper numeric code` <- as.numeric(NA)
temp$ccode <- as.numeric(NA)
temp$`country name` <- "Jersey"
temp$`start year` <- as.numeric(NA)
temp$`end year` <- 2012
temp$ISO3 <- 'JEY'
ClioLayoutBase <- rbind(ClioLayoutBase,temp)
ClioLayoutBase$ISO3[which(ClioLayoutBase$`country name`=='Germany Democratic Republic')] <- "DDR"
total <- which(ClioLayoutBase$`end year`==2012)
#sort(unique(df$iso3))[which(!sort(unique(df$iso3)) %in% unique(ClioLayout$ISO3[which(ClioLayout$`end year`==2012)]))]
# "CSK" 1992 "HKG" 1945 "PRI" 1945 "SUN" 1991 "YUG" 1946 1992
ToAdd <- which(ClioLayoutBase$`end year`==1992 & ClioLayoutBase$ISO3=='CSK')
ToAdd <- c(ToAdd,which(ClioLayoutBase$`end year`==1945 & ClioLayoutBase$ISO3=='HKG'))
ToAdd <- c(ToAdd,which(ClioLayoutBase$`end year`==1945 & ClioLayoutBase$ISO3=='PRI'))
ToAdd <- c(ToAdd,which(ClioLayoutBase$`end year`==1945 & ClioLayoutBase$ISO3=='REU'))
ToAdd <- c(ToAdd,which(ClioLayoutBase$`end year`==1945 & ClioLayoutBase$ISO3=='BMU'))
ToAdd <- c(ToAdd,which(ClioLayoutBase$`end year`==1945 & ClioLayoutBase$ISO3=='GLP'))
ToAdd <- c(ToAdd,which(ClioLayoutBase$`end year`==1945 & ClioLayoutBase$ISO3=='GRL'))
ToAdd <- c(ToAdd,which(ClioLayoutBase$`end year`==1945 & ClioLayoutBase$ISO3=='MTQ'))
ToAdd <- c(ToAdd,which(ClioLayoutBase$`end year`==1945 & ClioLayoutBase$ISO3=='NCL'))
ToAdd <- c(ToAdd,which(ClioLayoutBase$`end year`==1945 & ClioLayoutBase$ISO3=='GIB'))
ToAdd <- c(ToAdd,which(ClioLayoutBase$`end year`==1945 & ClioLayoutBase$ISO3=='GUM'))
ToAdd <- c(ToAdd,which(ClioLayoutBase$`end year`==1945 & ClioLayoutBase$ISO3=='PYF' & ClioLayoutBase$`Webmapper numeric code`==867))
ToAdd <- c(ToAdd,which(ClioLayoutBase$`start year`==1946 & ClioLayoutBase$`end year`==1991 & ClioLayoutBase$ISO3=='SUN'))
ToAdd <- c(ToAdd,which(ClioLayoutBase$`start year`==1946 & ClioLayoutBase$`end year`==1992 & ClioLayoutBase$ISO3=='YUG'))
ToAdd <- c(ToAdd,which(ClioLayoutBase$`start year`==1954 & ClioLayoutBase$`end year`==1990 & ClioLayoutBase$ISO3=='DDR'))
total <- c(total,ToAdd)
ToRemove <- which(ClioLayoutBase$`start year`==1956 & ClioLayoutBase$`end year`==2012 & ClioLayoutBase$ISO3=='MAR')
ToRemove <- c(ToRemove,which(ClioLayoutBase$`start year`==1946 & ClioLayoutBase$`end year`==2012 & ClioLayoutBase$ISO3=='CAN'))
ToRemove <- c(ToRemove,which(ClioLayoutBase$`start year`==2011 & ClioLayoutBase$`end year`==2012 & ClioLayoutBase$ISO3=='SDN'))
total <- total[!total %in% ToRemove]
ttt <- ClioLayoutBase$ISO3[total]
ttt[duplicated(ttt)]
rm(ttt,ToAdd,ToRemove)
#### 24 new variables overall ####
#### Poverty CBN OK #####
pov = fread("OECD_PovertyRates_Final.csv",
dec = ",")
setnames(pov, "ISO3", "iso3")
setnames(pov, "Year", "year")
pov = pov[!is.na(PovRate)]
ClioLayout <- ClioLayoutBase
for (i in total){
if (ClioLayout$ISO3[i] %in% pov$iso3){
for (j in c(1820:2018)){
ClioLayout[i,as.character(j)] <- pov$PovRate[which(pov$iso3==ClioLayout$ISO3[i] & pov$year==j)]
}
} else {
#print(ClioLayout$ISO3[i])
}
}
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1820:2018))])
# 26268
if (!sum(ttt[as.character(c(1820:2018))])==nrow(pov)){
stop(paste0('Not all data were transfered: ','Poverty CBN'))
} else {
print(paste0('OK! All data were transfered: ','Poverty CBN'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Global_Extreme_Poverty_Cost_of_Basic_Needs.xlsx",row.names = F)
#### Poverty DAD OK #####
ClioLayout <- ClioLayoutBase
for (i in total){
if (ClioLayout$ISO3[i] %in% pov$iso3){
for (j in c(1820:2018)){
ClioLayout[i,as.character(j)] <- pov$PCN1.9[which(pov$iso3==ClioLayout$ISO3[i] & pov$year==j)]
}
} else {
#print(ClioLayout$ISO3[i])
}
}
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1820:2018))])
# 26268
if (!sum(ttt[as.character(c(1820:2018))])==nrow(pov)){
stop(paste0('Not all data were transfered: ','Poverty DAD'))
} else {
print(paste0('OK! All data were transfered: ','Poverty DAD'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Global_Extreme_Poverty_Dollar_a_Day.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','AddColsForMaddision','total'))])
#### GDP & Pop OK ####
##### gdppc_mix #####
gdp3 = readxl::read_excel("Data_Maddison update 2020 preliminary_2.xlsx",
sheet = "Country data", skip = 2)
setDT(gdp3)
gdp3 = melt(
data = gdp3,
id.var = "year",
value.name = "gdppc_mix",
variable.name = "iso3",
variable.factor = FALSE)
gdp3[, Country:=countrycode(iso3, "iso3c", "country.name")]
gdp3[iso3 == "ARE" & year %in% 1991:1992, gdppc_mix := NA]
gdp3[iso3 == "CSK", Country:='Czechoslovakia']
gdp3[iso3 == "SUN", Country:='USSR']
gdp3[iso3 == "YUG", Country:='Yugoslavia']
gdp_mix <- gdp3
rm(gdp3)
gdp_mix[, gdppc_mix := as.numeric(gdppc_mix)]
gdp_mix = gdp_mix[!is.na(year)]
gdp_mix = gdp_mix[!is.na(gdppc_mix)]
df <- gdp_mix
df <- subset(df,df$year>=1500)
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$gdppc_mix))])){
if (length(temp$gdppc_mix[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)])>0){
ClioLayout[i,as.character(j)] <- temp$gdppc_mix[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)]
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
# check if there are iso3 without entries:
df$iso3year <- paste0(df$iso3,df$year)
df <- df[!duplicated(df$iso3year),]
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 18195
ddd <- df$gdppc_mix
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 18195
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==length(ddd)){
stop(paste0('Not all data were transfered: ','GDP mix'))
} else {
print(paste0('OK! All data were transfered: ','GDP mix'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"GDP_Per_Capita_Mix.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','total'))])
#### Life Expectancy OK ####
##### Level Male #####
lif = readxl::read_excel("Chapter6Data.xls", "Sheet1")
setDT(lif)
setnames(lif, "iso_code", "iso3")
lif[iso3 == "GER", iso3 := "DEU"] # the horror
lif = lif[iso3 != "ANT"]
if (any(duplicated(lif, by = c("iso3", "year", "sex")))){
warning("duplicates")
}
lif <- lif[,c('iso3','year','sex',"LifeExpectancy_Level", "LifeExpectancy_Gini", "LifeExpectancy_RelativeGini")]
df <- lif
rm(lif)
Sex <- unique(df$sex)
df$sex[which(df$sex==Sex[1])] <- 'Male'
df$sex[which(df$sex==Sex[2])] <- 'Female'
#df <- df[which((df$sex=='Male'))]
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
ttt <- c()
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$LifeExpectancy_Level))])){
if (length(temp$LifeExpectancy_Level[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j & temp$sex=='Male')])>0){
ClioLayout[i,as.character(j)] <- temp$LifeExpectancy_Level[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j & temp$sex=='Male')]
ttt <- c(ttt,which(df$iso3==ClioLayout$ISO3[i] & df$year==j & df$sex=='Male'))
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
unique(df$iso3[!c(1:nrow(df)) %in% ttt])
sort(unique(df$iso3[which(!df$iso3 %in% ClioLayout$ISO3 & !is.na(df$LifeExpectancy_Level))]))
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 7581
ddd <- df$LifeExpectancy_Level[which(df$sex=='Male')]
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 7581
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==length(ddd)){
stop(paste0('Not all data were transfered: ','LE Male'))
} else {
print(paste0('OK! All data were transfered: ','LE Male'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Life_Expectancy_Level_Male.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','df','total'))])
##### Level Female #####
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$LifeExpectancy_Level))])){
if (length(temp$LifeExpectancy_Level[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j & temp$sex=='Female')])>0){
ClioLayout[i,as.character(j)] <- temp$LifeExpectancy_Level[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j & temp$sex=='Female')]
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 7562
ddd <- df$LifeExpectancy_Level[which(df$sex=='Female')]
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 7562
# add the double entries (meaning that they appear twice in the clio template with end year==2012) of CAN, Morocco, Sudan
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==length(ddd)){
stop(paste0('Not all data were transfered: ','LE Female'))
} else {
print(paste0('OK! All data were transfered: ','LE Female'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Life_Expectancy_Level_Female.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','df','total'))])
##### Gini Male #####
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$LifeExpectancy_Gini))])){
if (length(temp$LifeExpectancy_Gini[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j & temp$sex=='Male')])>0){
ClioLayout[i,as.character(j)] <- temp$LifeExpectancy_Gini[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j & temp$sex=='Male')]
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 7581
ddd <- df$LifeExpectancy_Gini[which(df$sex=='Male')]
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 7581
# add the double entries (meaning that they appear twice in the clio template with end year==2012) of CAN, Morocco, Sudan
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==length(ddd)){
stop(paste0('Not all data were transfered: ','LE Gini Male'))
} else {
print(paste0('OK! All data were transfered: ','LE Gini Male'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Life_Expectancy_Gini_Male.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','df','total'))])
##### Gini Female #####
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$LifeExpectancy_Gini))])){
if (length(temp$LifeExpectancy_Gini[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j & temp$sex=='Female')])>0){
ClioLayout[i,as.character(j)] <- temp$LifeExpectancy_Gini[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j & temp$sex=='Female')]
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 7562
ddd <- df$LifeExpectancy_Gini[which(df$sex=='Female')]
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 7562
# add the double entries (meaning that they appear twice in the clio template with end year==2012) of CAN, Morocco, Sudan
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==length(ddd)){
stop(paste0('Not all data were transfered: ','LE Gini Female'))
} else {
print(paste0('OK! All data were transfered: ','LE Gini Female'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Life_Expectancy_Gini_Female.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','df','total'))])
##### Relative Gini Male #####
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$LifeExpectancy_RelativeGini))])){
if (length(temp$LifeExpectancy_RelativeGini[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j & temp$sex=='Male')])>0){
ClioLayout[i,as.character(j)] <- temp$LifeExpectancy_RelativeGini[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j & temp$sex=='Male')]
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 6108
ddd <- df$LifeExpectancy_RelativeGini[which(df$sex=='Male')]
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 6108
# add the double entries (meaning that they appear twice in the clio template with end year==2012) of CAN, Morocco, Sudan
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==length(ddd)){
stop(paste0('Not all data were transfered: ','LE Relative Gini Male'))
} else {
print(paste0('OK! All data were transfered: ','LE Relative Gini Male'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Life_Expectancy_Relative_Gini_Male.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','df','total'))])
##### Relative Gini Female #####
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$LifeExpectancy_RelativeGini))])){
if (length(temp$LifeExpectancy_RelativeGini[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j & temp$sex=='Female')])>0){
ClioLayout[i,as.character(j)] <- temp$LifeExpectancy_RelativeGini[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j & temp$sex=='Female')]
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 6086
ddd <- df$LifeExpectancy_RelativeGini[which(df$sex=='Female')]
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 6109
# add the double entries (meaning that they appear twice in the clio template with end year==2012) of CAN, Morocco, Sudan
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==length(ddd)){
stop(paste0('Not all data were transfered: ','LE Relative Gini Female'))
} else {
print(paste0('OK! All data were transfered: ','LE Relative Gini Female'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Life_Expectancy_Relative_Gini_Female.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','total'))])
#### Education OK ####
##### Pop 15+ #####
edu = read.xlsx("rei1.xlsx", sheetName = "all", check.names = T, stringsAsFactors = F)
setDT(edu)
edu[, iso3 := countrycode(country, "country.name", "iso3c")]
edu[country == "Yugoslavia", iso3 := "YUG"]
edu[country == "USSR", iso3 := "SUN"]
edu <- edu[,c('year', 'iso3','pop15..MM','av.years.MM',
'av.years.gini.MM','st.dev','rei')]
names(edu) <- c('year', 'iso3',
'pop15plus',
'aveduLLMM',
'eduginiLLMM',
'edusd',
'relative_edu_gini')
df <- edu
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$pop15plus))])){
if (length(temp$pop15plus[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)])>0){
ClioLayout[i,as.character(j)] <- temp$pop15plus[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)]
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 12018
ddd <- df$pop15plus
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 12018
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==length(ddd)){
stop(paste0('Not all data were transfered: ','Edu: Pop 15+'))
} else {
print(paste0('OK! All data were transfered: ','Edu: Pop 15+'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Education_Population_Above_15.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','total','df'))])
##### aveduLLMM #####
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$aveduLLMM))])){
if (length(temp$aveduLLMM[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)])>0){
ClioLayout[i,as.character(j)] <- temp$aveduLLMM[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)]
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 10905
ddd <- df$aveduLLMM
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 10905
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==length(ddd)){
stop(paste0('Not all data were transfered: ','Edu: aveduLLMM'))
} else {
print(paste0('OK! All data were transfered: ','Edu: aveduLLMM'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Education_Average_LLMM.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','total','df'))])
##### eduginiLLMM #####
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$eduginiLLMM))])){
if (length(temp$eduginiLLMM[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)])>0){
ClioLayout[i,as.character(j)] <- temp$eduginiLLMM[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)]
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 10879
ddd <- df$eduginiLLMM
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 10879
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==length(ddd)){
stop(paste0('Not all data were transfered: ','Edu: eduginiLLMM'))
} else {
print(paste0('OK! All data were transfered: ','Edu: eduginiLLMM'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Education_Gini_LLMM.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','total','df'))])
##### edusd #####
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$edusd))])){
if (length(temp$edusd[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)])>0){
ClioLayout[i,as.character(j)] <- temp$edusd[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)]
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 10879
ddd <- df$edusd
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 10879
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==length(ddd)){
stop(paste0('Not all data were transfered: ','Edu: edusd'))
} else {
print(paste0('OK! All data were transfered: ','Edu: edusd'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Education_SD.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','total','df'))])
##### relative_edu_gini #####
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$relative_edu_gini))])){
if (length(temp$relative_edu_gini[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)])>0){
ClioLayout[i,as.character(j)] <- temp$relative_edu_gini[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)]
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 10879
ddd <- df$relative_edu_gini
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 10879
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==length(ddd)){
stop(paste0('Not all data were transfered: ','Edu: relative_edu_gini'))
} else {
print(paste0('OK! All data were transfered: ','Edu: relative_edu_gini'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Education_Relative_Gini.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','total'))])
#### Wealth OK ####
##### Decadal Ginis #####
wlt = readxl::read_xlsx("Wealth Inequality-Alfani_Schifano_FINAL.xlsx",
sheet = "Data")
setDT(wlt)
setnames(wlt, "Year", "year")
wlt <- wlt[year >= 1820,
list(year, iso3,
wealthgini = Gini,
wealth_gini_source = SG,
wealthtop10 = Top10per,
wealth_top10_source = SourceTop10,
wealth = Privatenetwealth2017)]
df <- wlt
rm(wlt)
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$wealthgini))])){
if (length(temp$wealthgini[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)])>0){
ClioLayout[i,as.character(j)] <- temp$wealthgini[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)]
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 240
ddd <- df$wealthgini
ddd <- ddd[!is.na(ddd)]
length(ddd)
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==nrow(df)){
stop(paste0('Not all data were transfered: ','Wealth Decadal Ginis'))
} else {
print(paste0('OK! All data were transfered: ','Wealth Decadal Ginis'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Wealth_Decadal_Ginis.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','total','df'))])
##### wealthtop10 #####
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$wealthtop10))])){
if (length(temp$wealthtop10[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)])>0){
ClioLayout[i,as.character(j)] <- temp$wealthtop10[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)]
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 240
ddd <- df$wealthtop10
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 240
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==nrow(df)){
stop(paste0('Not all data were transfered: ','Wealth wealthtop10'))
} else {
print(paste0('OK! All data were transfered: ','Wealth wealthtop10'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Wealth_Top10.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','total','df'))])
##### Privatenetwealth2017 #####
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$wealth))])){
if (length(temp$wealth[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)])>0){
ClioLayout[i,as.character(j)] <- temp$wealth[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)]
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 115
ddd <- df$wealth
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 115
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==length(ddd)){
stop(paste0('Not all data were transfered: ','Wealth wealth'))
} else {
print(paste0('OK! All data were transfered: ','Wealth wealth'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Wealth_Total_2017.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','total'))])
##### Yearly Ginis ####
annual_wealth_ginis = readxl::read_xls("Figure 5.2 -upd.xls",
sheet = "Figure 5.2", skip = 8)
setDT(annual_wealth_ginis)
setnames(annual_wealth_ginis, "Country code", "iso3")
setnames(annual_wealth_ginis, "Wealth Inequality", "Gini")
setnames(annual_wealth_ginis, "Year", "year")
annual_wealth_ginis <- annual_wealth_ginis[,c('year','iso3','Gini')]
df <- annual_wealth_ginis
rm(annual_wealth_ginis)
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$Gini))])){
if (length(temp$Gini[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)])>0){
ClioLayout[i,as.character(j)] <- temp$Gini[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)]
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 759
ddd <- df$Gini
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 759
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==length(ddd)){
stop(paste0('Not all data were transfered: ','Wealth Ginis'))
} else {
print(paste0('OK! All data were transfered: ','Wealth Ginis'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Wealth_Yearly_Ginis.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','total'))])
#### Working Hours OK ####
wrk = read.xlsx("workweek.xls", sheetName = "Sheet1", colIndex = c(1:4), stringsAsFactors = F)
setDT(wrk)
setnames(wrk, "Country.name", "iso3")
#wrk[is.na(iso3), iso3 := countrycode(`country`, "Country.name", "iso3c")]
wrk[`country` == "Netherlands Antilles", iso3 := "ANT"]
wrk[`country` == "Germany, Democratic Rep.", iso3 := "DDR"]
wrk[`country` == "Macau", iso3 := "MAC"]
wrk[`country` == "Germany (West Berlin)", iso3 := NA]
wrk[`country` == "Saar", iso3 := NA]
wrk = wrk[!is.na(Working.hours.per.week.in.Manufacturing)]
for (i in which(wrk$iso3=="")){
if (length(ISO3s$iso3[which(ISO3s$Country==wrk$country[i])])>0){
wrk$iso3[i] <-
ISO3s$iso3[which(ISO3s$Country==wrk$country[i])]
}
}
wrk = wrk[!is.na(iso3)]
df <- wrk
#rm(wrk)
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
ttt <- c()
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$Working.hours.per.week.in.Manufacturing))])){
if (length(temp$Working.hours.per.week.in.Manufacturing[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)])>0){
ClioLayout[i,as.character(j)] <- temp$Working.hours.per.week.in.Manufacturing[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)]
ttt <- c(ttt,which(df$iso3==ClioLayout$ISO3[i] & df$year==j))
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
sort(unique(df$iso3[!c(1:nrow(df)) %in% ttt]))
sort(unique(df$iso3[which(!df$iso3 %in% ClioLayout$ISO3 & !is.na(df$Working.hours.per.week.in.Manufacturing))]))
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 4311
ddd <- df$Working.hours.per.week.in.Manufacturing
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 4311
# add the double entries (meaning that they appear twice in the clio template with end year==2012) of CAN????
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==length(ddd)){
stop(paste0('Not all data were transfered: ','Working hours'))
} else {
print(paste0('OK! All data were transfered: ','Working hours'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Working_hours.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','total'))])
#### Gender OK ####
##### overall index (hgi) ####
gen = readRDS("genderimputations_1870_2010_averaged.rds.gz")
setDT(gen)
# to include:
# the overall index (hgi)
# the average years of education ratio (edu)
# the labour force participation ratio (lab)
# 728 = SSD
# 729 = SDN
# 736 = SDN before split
# the Sudan stuff needs to be checked here
# this is using 729 and 736 which makes zero sense
# also the gdp merge seems off, as if year was not included
# but it's probably the imputation madness
gen = gen[year >= 1900][order(year)]
gen[, iso3 := countrycode(countryid, "iso3n", "iso3c")]
gen[countryid == 200, iso3 := "CSK"]
gen[countryid == 729, iso3 := "SDN"]
gen[countryid == 810, iso3 := "SUN"]
gen[countryid == 890, iso3 := "YUG"]
gen = gen[countryid != 736]
df <- gen
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
ttt <- c()
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$hgi))])){
if (length(temp$hgi[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)])>0){
ClioLayout[i,as.character(j)] <- temp$hgi[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)]
ttt <- c(ttt,which(df$iso3==ClioLayout$ISO3[i] & df$year==j))
} else {
print(df[which(df$iso3==ClioLayout$ISO3[i] & df$year==j),])
}
}
rm(temp)
} else {
#print(ClioLayout$ISO3[i])
}
}
close(pb)
# df$iso3year[!c(1:nrow(df)) %in% ttt]
#[1] "REU1900" "REU1910" "REU1920" "REU1930" "REU1940" "REU1950" "REU1960" "REU1970" "REU1980" "REU1990" "REU2000" "REU2010"
# its La Reunion! and perhaps the same is true for other datasets above.
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 1685
ddd <- df$hgi
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 1685
# maybe missing ISO3?
#df$iso3[which(!df$iso3 %in% ClioLayout$ISO3)]
#sort(unique(ClioLayout$ISO3[which(!ClioLayout$ISO3 %in% df$iso3)]))
# No!
# maybe duplicates in df?
#df$iso3year <- paste0(df$iso3,df$year)
#df <- df[!duplicated(df$iso3year),]
# No!
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==length(ddd)){
stop(paste0('Not all data were transfered: ','Gender hgi'))
} else {
print(paste0('OK! All data were transfered: ','Gender hgi'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Gender_HGI.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','total','df'))])
##### average years of education ratio (edu) ####
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$edu))])){
if (length(temp$edu[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)])>0){
ClioLayout[i,as.character(j)] <- temp$edu[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)]
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 1685
ddd <- df$edu
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 1685
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==length(ddd)){
stop(paste0('Not all data were transfered: ','Gender edu'))
} else {
print(paste0('OK! All data were transfered: ','Gender edu'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Gender_edu.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','total','df'))])
##### labour force participation ratio (lab) ####
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$lab))])){
if (length(temp$lab[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)])>0){
ClioLayout[i,as.character(j)] <- temp$lab[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)]
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 1685
ddd <- df$hgi
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 1685
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==length(ddd)){
stop(paste0('Not all data were transfered: ','Gender edu'))
} else {
print(paste0('OK! All data were transfered: ','Gender edu'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Gender_lab.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase','total'))])
#### Social Spending OK ####
soc = readxl::read_xlsx("Socspen 1815-1945, Table x2, Fig's x1a, x1b ••.xlsx",
sheet = "Correl's with GDPpc",
skip = 5)
setDT(soc)
soc <- soc[1:14]
setnames(soc, 1, "iso3")
soc = melt(soc, id.vars = "iso3", variable.factor = FALSE)
soc[, socspend := as.numeric(value)]
soc[, year := as.numeric(variable)]
soc <- soc[!is.na(year)]
df <- soc
df <- subset(df,!is.na(df$socspend))
rm(soc)
ClioLayout <- ClioLayoutBase
pb <- tkProgressBar(title = "progress bar", min = 0, max = length(total), width = 300)
for (i in total){
setTkProgressBar(pb, which(total==i), label=paste( round(which(total==i)/length(total)*100, 1),"% done"))
if (ClioLayout$ISO3[i] %in% df$iso3){
temp <- subset(df,df$iso3==ClioLayout$ISO3[i])
for (j in c(temp$year[which(!is.na(temp$socspend))])){
if (length(temp$socspend[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)])>0){
ClioLayout[i,as.character(j)] <- temp$socspend[which(temp$iso3==ClioLayout$ISO3[i] & temp$year==j)]
}
}
rm(temp)
} else {
print(ClioLayout$ISO3[i])
}
}
close(pb)
# how many entries are transferred:
ttt <- apply(ClioLayout, 2, function(x) length(which(!is.na(x))))
sum(ttt[as.character(c(1:2050))], na.rm = T)
# 290
ddd <- df$socspend
ddd <- ddd[!is.na(ddd)]
length(ddd)
# 290
if (!sum(ttt[as.character(c(1:2050))], na.rm = T)==length(ddd)){
stop(paste0('Not all data were transfered: ','Social Transfers'))
} else {
print(paste0('OK! All data were transfered: ','Social Transfers'))
}
ClioLayout$ISO3 <- NULL
write.xlsx2(ClioLayout,"Social_Transfers.xlsx",row.names = F)
rm(list= ls()[!(ls() %in% c('ISO2plus','ISO3s','ClioLayoutBase'))])
|
890ab1ff655f45ed45111cdcaf8b212d3e67c0d2
|
3e2422c3547f959c36bf69ad38007bcc1b958120
|
/CensusData/top_five_cities_deaths.R
|
1dc87c791d1a226d1c1d184a160429df4971d32c
|
[] |
no_license
|
mewsha/r-examples
|
e8a93a1941b532daa7370a2e6640c8ea515f945d
|
6aff24957e3ca854b2547a486568c1fef9ca73a7
|
refs/heads/master
| 2020-06-27T11:57:36.829250
| 2019-08-13T17:33:57
| 2019-08-13T17:33:57
| 199,948,477
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,891
|
r
|
top_five_cities_deaths.R
|
#' Census Data Script
#' Author: Jessica Diehl
#' Data Source: https://www.census.gov/data/datasets/time-series/demo/
#' popest/2010s-total-metro-and-micro-statistical-areas.html
#' Style Guide: https://style.tidyverse.org/index.html
#' Load Dependencies ---------------------------------
options(scipen=999)
library(ggplot2)
#' Load Census Data ----------------------------------
census_data<-read.csv('D:/CSCI620_All/CSCI620_DataSets/cbsa-est2018-alldata.csv')
#' Combine data for each year
death_data <- data.frame(
Cities = census_data$NAME ,
Y2010 = census_data$DEATHS2010,
Y2011 = census_data$DEATHS2011,
Y2012 = census_data$DEATHS2012,
Y2013 = census_data$DEATHS2013,
Y2014 = census_data$DEATHS2014,
Y2015 = census_data$DEATHS2015,
Y2016 = census_data$DEATHS2016,
Y2017 = census_data$DEATHS2017,
Y2018 = census_data$DEATHS2018)
#' Extract top five cities
census_data_2<-tail(death_data[with(death_data, order(Y2018)),], 5)
#' census_data_2
#' Create two columns from nine columns
census_data_2 = melt(census_data_2, id.vars=c("Cities"))
#' Plot All Data -------------------------------------
ggplot(census_data_2, aes(Cities, value, fill=variable)) +
#' Create bar graph
geom_bar(stat='Identity',position=position_dodge()) +
#' Label title and axis
ggtitle("Top 5 Cities with the Most Deaths 2010-2018") +
labs(x="Cities", y="Number of Deaths")+
theme(plot.title=element_text(hjust=0.5),
text=element_text(size=16,
family="Arial"),
axis.text.x=element_text(size=11, angle=5))+
#' Customize legend
scale_fill_manual(name = "Census Years",
labels=c(2010,2011,2012,2013,2014,2015,2016,2017,2018),
values=c("#0A2F51",
"#0E4D64",
"#137177",
"#188977",
"#1D9A6C",
"#39A96B",
"#56B870",
"#74C67A",
"#99D492"))
|
8c1d4e3ca9e18f15488267bc3f1dcac6be4abb16
|
ec9725a7107f32f5bfff68fe6832ad0d5e1942f5
|
/session11.R
|
fa06e1dc89d71314be943639a9f3835216825bf3
|
[] |
no_license
|
leezasantos/readmission_analysis
|
4b284aa2ece74a36b6b8465defce07741be04c8e
|
7cb5d33561a938168211771b3a75e2abeabe0089
|
refs/heads/main
| 2023-04-10T11:22:32.087149
| 2021-04-20T03:08:15
| 2021-04-20T03:08:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,654
|
r
|
session11.R
|
# installing packages
install.packages("dplyr")
install.packages("ggplot2")
install.packages("tidyr")
install.packages("PASWR2")
# load libraries
library(dplyr) # data cleaning
library(ggplot2) # data visuals
library(tidyr) # data visuals
library(PASWR2) # z test
# load data, data was converted to csv
df <- read.csv("C:/Users/Leeza A. Santos/OneDrive/MS AHI/HHA 551/strokedata.csv")
# data characteristics
View(df) # view the full dataset in a new window
head(df) # shows
str(df) # data type of each column
length(df) # number of columns
nrow(df) # number of rows
# data cleaning
is.null(NULL)
df[is.na(df)] <- 0 # replace all na with 0
df[is.null(df)] <- 0 # replace all null with 0
sum(is.na(df)) # check
sum(is.null(df)) # check
# summary statistics
summary(df)
#z test
mean(df$bmi) # 28.89324
stdev <- sd(df$bmi)
z.test(df$bmi, mu = 28, stdev, alternative = c("two.sided", "less", "greater"),
sd = stdev, n=length(x), conf.level = 0.95)
# t test
t.test(df$avg_glucose_level,df$bmi, mu=28, alternative = 'less')
# to determine distribution I used ks.test
ks.test(df$avg_glucose_level,df$bmi,"pnorm", mean=mean(mydata), sd=sd(mydata))
# standard error
var.test(df$avg_glucose_level,df$bmi)
# correlation
cor.test(df$avg_glucose_level,df$bmi,method="kendall")
cor.test(df$avg_glucose_level,df$bmi,method="pearson")
cor.test(df$avg_glucose_level,df$bmi,method="spearman")
# covariance
cov(df$avg_glucose_level,df$bmi)
cov(df$avg_glucose_level,df$bmi,method="kendall")
cov(df$avg_glucose_level,df$bmi,method="pearson")
cov(df$avg_glucose_level,df$bmi,method="spearman")
|
04469f32ea64b56c815b5933691cf7a6a358e795
|
f2a7091926c21c42b6daf8cfc2eef7682a4cc338
|
/scripts/07-figure-2.R
|
adf05d035bdde8d4ba7b6fc8e28a9592d8fd82ef
|
[] |
no_license
|
mneunhoe/wahlkreis_vorhersage
|
b17d7d4d6da6054714bd86e6d61a94eb09c7f6aa
|
4b7e50c426544446043e80dbd45f7b0d97024340
|
refs/heads/master
| 2023-01-14T21:49:47.482057
| 2020-11-17T12:44:18
| 2020-11-17T12:44:18
| 180,787,479
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,052
|
r
|
07-figure-2.R
|
###########################
# Plot win probabilities on maps
###########################
old_par <- par()
nsim <- 500
mu_nsim <- 25
election <- 2017
cutoff <- 2
res_co_el <- readRDS("../processed-data/final_res_09_17.RDS")
res_list <- res_co_el[[paste0(election)]][[paste0(cutoff)]]
winner <-
matrix(
unlist(lapply(res_list, "[[", "winner_nn")),
nrow = nsim * mu_nsim,
ncol = 299,
byrow = T
)
plot_col <- rgb(0, 0, 0, maxColorValue = 255)
wkr17 <- readOGR(dsn = "../raw-data/shp", stringsAsFactors = F)
win_probs <- apply(winner, 2, function(x)
- sort(-table(x) / nrow(winner)))
if (save_figures) {
tiff(
"../figures/Abbildung2.tiff",
height = 9 * 0.75,
width = 16 * 0.75,
units = "in",
res = 300
)
}
par(mfrow = c(2, 3),
oma = c(0, 0, 4, 0),
lheight = 1)
col_vec <- NULL
for (i in 1:299) {
tmp <- win_probs[[i]]
if ("CDU" %in% names(tmp) | "CSU" %in% names(tmp)) {
col_vec <-
c(col_vec, adjustcolor(plot_col, alpha = tmp[names(tmp) == "CDU" |
names(tmp) == "CSU"]))
}
else{
col_vec <- c(col_vec, NA)
}
}
plot(
wkr17,
col = col_vec,
border = "lightgrey",
lwd = 0.2,
main = "CDU/CSU"
)
col_vec <- NULL
for (i in 1:299) {
tmp <- win_probs[[i]]
if ("SPD" %in% names(tmp)) {
col_vec <-
c(col_vec, adjustcolor(plot_col, alpha = tmp[names(tmp) == "SPD"]))
}
else{
col_vec <- c(col_vec, NA)
}
}
plot(
wkr17,
col = col_vec,
border = "lightgrey",
lwd = 0.2,
main = "SPD"
)
col_vec <- NULL
for (i in 1:299) {
tmp <- win_probs[[i]]
if ("AFD" %in% names(tmp)) {
col_vec <-
c(col_vec, adjustcolor(plot_col, alpha = tmp[names(tmp) == "AFD"]))
}
else{
col_vec <- c(col_vec, NA)
}
}
plot(
wkr17,
col = col_vec,
border = "lightgrey",
lwd = 0.2,
main = "AfD"
)
col_vec <- NULL
for (i in 1:299) {
tmp <- win_probs[[i]]
if ("FDP" %in% names(tmp)) {
col_vec <-
c(col_vec, adjustcolor(plot_col, alpha = tmp[names(tmp) == "FDP"]))
}
else{
col_vec <- c(col_vec, NA)
}
}
plot(
wkr17,
col = col_vec,
border = "lightgrey",
lwd = 0.2,
main = "FDP"
)
col_vec <- NULL
for (i in 1:299) {
tmp <- win_probs[[i]]
if ("LINKE" %in% names(tmp)) {
col_vec <-
c(col_vec, adjustcolor(plot_col, alpha = tmp[names(tmp) == "LINKE"]))
}
else{
col_vec <- c(col_vec, NA)
}
}
plot(
wkr17,
col = col_vec,
border = "lightgrey",
lwd = 0.2,
main = "LINKE"
)
col_vec <- NULL
for (i in 1:299) {
tmp <- win_probs[[i]]
if ("GRUENE" %in% names(tmp)) {
col_vec <-
c(col_vec, adjustcolor(plot_col, alpha = tmp[names(tmp) == "GRUENE"]))
}
else{
col_vec <- c(col_vec, NA)
}
}
plot(
wkr17,
col = col_vec,
border = "lightgrey",
lwd = 0.2,
main = "GRÜNE"
)
mtext(
"Gewinnwahrscheinlichkeiten der Kandidierenden der jeweiligen Parteien in den Wahlkreisen \n 2 Tage vor der Bundestagswahl 2017",
outer = T,
font = 2
)
if (save_figures) {
dev.off()
}
par(old_par)
|
8f32f3e4628ae8ae7cfc5b180a76ad73d4e9aa2e
|
b9b14fdf597bcfc1f400b4e51aafadb2d7b10acf
|
/src_old_2017/remove_dups_nhanes_table_description.R
|
f35309c062021e93870bc0f1f6ae9b68c930a9d1
|
[] |
no_license
|
chiragjp/nhanes_scraper
|
b625770008b8adb89f330701ee28c192485bab23
|
75ea624045b9e8869e23dcc4056e5eeda5170241
|
refs/heads/master
| 2021-01-21T15:03:28.868170
| 2020-06-02T15:14:27
| 2020-06-02T15:14:27
| 29,565,384
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 835
|
r
|
remove_dups_nhanes_table_description.R
|
## remove dups, ugh
source('db_connect.R')
sql <- "select table_name, count(table_name) from nhanes_table_description group by table_name"
dupTable <- dbGetQuery(con, sql)
duplicates <- dupTable[dupTable[, 2] > 1, ]
sql <- 'select * from nhanes_table_description'
allTables <- dbGetQuery(con, sql)
dupTables <- allTables[allTables$table_name %in% duplicates$table_name, ]
nonDup <- data.frame()
for(ii in 1:nrow(duplicates)) {
tableName <- duplicates[ii, 1]
nonDup <- rbind(nonDup, subset(dupTables, table_name == tableName)[1, ])
}
for(ii in 1:nrow(duplicates)) {
sql <- sprintf('delete from nhanes_table_description where table_name = \'%s\'', duplicates[ii, 1])
cat(sprintf('%s;\n', sql))
#dbSendQuery(con, sql)
}
dbWriteTable(con, 'nhanes_table_description', nonDup, append=TRUE, row.names=FALSE, overwrite=FALSE)
|
605e57d6968761bc45a2d49feb978cd42eef73b0
|
416550c21c0e3f49ae34ef843b4c352910c3c2f9
|
/man/MsDataSet-class.Rd
|
c6783b3b9f10e8ce8a7cb8fd89329eb8a80a66c8
|
[] |
no_license
|
thomasp85/MSsary
|
34dc8e93fd13a33ba6f78598626bb134d6cb151c
|
bf182b67b072256c4ff16b8c72678109f899ecc5
|
refs/heads/master
| 2021-01-22T12:12:39.641522
| 2015-01-26T11:44:40
| 2015-01-26T11:44:40
| 25,297,627
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 305
|
rd
|
MsDataSet-class.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/MsDataSet.R
\docType{class}
\name{MsDataSet-class}
\alias{MsDataSet-class}
\title{Store and operate on sets of samples}
\description{
This object handles multiple MsData sets and facilitates batch analysis on
these.
}
|
fa45b0bfc5edd866efafe1267bb12448d1ee3140
|
d7800c86c6f8a41bce1be6bd736339d7590da999
|
/R/dkim.R
|
179d96de9bbc29f8d9d27aee79fc3e8ab8fc4907
|
[] |
no_license
|
gyepi/aws.ses
|
e2974f8a8633b20dd15b5850c7275784c45fc81d
|
0db7875425d7089de595b9b9af0f5dd15c498380
|
refs/heads/master
| 2020-05-07T10:49:31.816279
| 2019-04-09T19:56:08
| 2019-04-09T19:56:08
| 180,434,185
| 0
| 1
| null | 2019-04-09T19:16:32
| 2019-04-09T19:16:31
| null |
UTF-8
|
R
| false
| false
| 1,316
|
r
|
dkim.R
|
#' @rdname dkim
#' @title DKIM
#' @description Manage DKIM
#' @template identity
#' @template dots
#' @examples
#' \dontrun{
#' verify_dkim("example.com")
#' get_dkim("me@example.com")
#' set_dkim("me@example.com", TRUE)
#' get_dkim("me@example.com")
#' }
#' @export
get_dkim <- function(identity, ...) {
query <- list(Action = "GetIdentityDkimAttributes")
identity <- as.list(identity)
names(identity) <- paste0("Identities.member.", 1:length(identity))
query <- c(query, identity)
r <- sesPOST(query = query, ...)
structure(r[["GetIdentityDkimAttributesResponse"]][["GetIdentityDkimAttributesResult"]][["DkimAttributes"]],
RequestId = r[["GetIdentityDkimAttributesResponse"]][["ResponseMetadata"]][["RequestId"]])
}
#' @rdname dkim
#' @param enabled A logical.
#' @export
set_dkim <- function(identity, enabled = TRUE, ...) {
query <- list(Action = "SetIdentityDkimEnabled",
Identity = identity,
DkimEnabled = tolower(as.character(enabled)))
r <- sesPOST(query = query, ...)
return(r)
}
#' @rdname dkim
#' @param domain A character string containing a domain.
#' @export
verify_dkim <- function(domain, ...) {
query <- list(Action = "VerifyDomainDkim", Domain = domain)
r <- sesPOST(query = query, ...)
return(r)
}
|
85f50bea59bcae23721eb245159ba3f7d6545247
|
2a631ebe0119ecb1ac2ef2ff3b270b06a05755ee
|
/R/R-class/c4/mynote_chapter4.R
|
c1a7385a4359fdede8f50ef99c4fdde2a8d56e7e
|
[] |
no_license
|
WangLiuying/R-in-SOE
|
1642ebe8b5745cfee16f2e594e40a1dce4f79d7e
|
bf582e2f1018720ce6f7fae2e7fe5bff36f3f0b9
|
refs/heads/master
| 2021-01-21T06:25:06.456610
| 2017-05-09T00:17:58
| 2017-05-09T00:17:58
| 82,865,560
| 0
| 0
| null | 2017-02-23T00:13:35
| 2017-02-23T00:13:35
| null |
UTF-8
|
R
| false
| false
| 3,967
|
r
|
mynote_chapter4.R
|
##mynote 2017-3-28
iTotal <- 0
for(i in 1:100)
{
iTotal <- iTotal + i
}
cat("Sum of 1-100:",iTotal,"\n",sep="")
szSymbols <- c("MSFT","GOOG","AAPL","INTL","ORCL","SYMC")
for(SymbolName in szSymbols)
{
cat(SymbolName,"\n",sep="")
}
x <- matrix(1:6, 2, 3)
ifelse(x >= 0, sqrt(x), NA)
ccc <- c("b","QQ","a","A","bb")
for (ch in ccc)
{
cat(ch,":",switch(EXPR=ch,a=1,b=2:3,"otherwise:break"),"\n")
}
"%g%" <- function(x,y)
{
print(x+y)
print(x-y)
print(x*y)
print(x/y)
}
3%g%5
View(airquality)
columnmean <- function(y){
nc <- ncol(y)
means <- numeric(nc)
for(i in 1:nc){
means[i] <- mean(y[,i])
}
means
}
?Map()
# 1. Check for exact match for a named argument
# 2. Check for a partial match
# 3. Check for a positional match
args(sd)
myplot <- function(x, y, type = "l", ...) {
plot(x, y, type = type, ...)
}
attach(mtcars)
opar=par()
par(mfrow=c(2,1))
plot(mpg,wt)
myplot(mpg,wt)
par(opar)
detach(mtcars)
args(paste)
args(cat)
paste("a", "b", sep = ":")
paste("a", "b", se = ":")
x <- c(1,9,2,8,3,7)
y <- c(9,2,8,3,7,2)
parboth <- function (a,b) {
c <- pmax(a,b)
d <- pmin(a,b)
answer <- list(median(c),median(d))
names(answer)[[1]] <- "median of the parallel maxima"
names(answer)[[2]] <- "median of the parallel minima"
return(answer) }
parboth(x,y)
#search path
search()
##exercise
# A 79.98, 80.04, 80.02, 80.04, 80.03, 80.03, 80.04, 79.97, 80.05, 80.03, 80.02, 80.00, 80.02
# B 80.02, 79.94, 79.98, 79.97, 79.97, 80.03, 79.95, 79.97
x <- c(79.98, 80.04, 80.02, 80.04, 80.03, 80.03, 80.04, 79.97, 80.05, 80.03, 80.02, 80.00, 80.02)
y <- c(80.02, 79.94, 79.98, 79.97, 79.97, 80.03, 79.95, 79.97)
ttest.mine <- function(x,y)
{
n1 <- length(x);n2 <- length(y)
xbar <- mean(x);ybar <- mean(y)
s2_1 <- var(x);s2_2 <- var(y)
s <- sqrt(((n1-1)*s2_1+(n2-1)*s2_2)/(n1+n2-2))
(t <- (xbar-ybar)/(s*sqrt(1/n1+1/n2)))
}
ttest.mine(x,y)
t.test(x,y)
##messages,warnings and errors
f <- function(x)
{
message("'x' contains ",toString(x))
x
}
f(letters[1:5])
suppressMessages(f(letters[1:5]))
g <- function(x) {
if(any(x < 0))
{
warning("'x' contains negative values: ", toString(x[x < 0]))
}
x
}
g(c(3, -7, 2, -9))
last.warning
suppressWarnings(g(c(3, -7, 2, -9)))
#*apply functions
str(apply)
?apply
x <- matrix(1:24,nrow=4,ncol=6)
x
apply(x,1,sum)
#对x矩阵的行进行求和
apply(x,2,sum)
#对x矩阵的列进行求和
x <- matrix(rnorm(200),20,10)
x
apply(x,1,quantile,probs=c(0.25,0.75))
#处理两个向量or列表
mapply(rep, 1:4, 4:1)
rnorm(1:5,1:5,2)
mapply(rnorm, 1:5, 1:5, 2)
lapply
#返回一个list
sapply
#返回一个简化的版本
x <- list(a=1:5,b=rnorm(1000))
lapply(x,mean)
str(mapply(rnorm,seq(50,500,100),1,3))
lapply(mapply(rnorm,seq(50,500,100),1,3),mean)
x <- list(a=matrix(1:4,2,2),b=matrix(1:6,3,2))
x
lapply(x,function(elt)elt[,1])
setwd("D:/DataAnalysis/R's workingspace/R-class XuHaifeng/R/R-class/c4/Data_Chap_4")
load("flags.RData")
View(flags)
str(flags)
str(lapply)
cls <- lapply(flags,class)
cls <- sapply(flags,class)
flag_colors <- flags[,11:17]
head(flag_colors)
lapply(flag_colors,sum)
sapply(flag_colors,sum)
flag_shapes <- flags[,19:23]
lshape <- lapply(flag_shapes,range)
mshape <- sapply(flag_shapes,range)
class(lshape)
lshape
subset(flags,flags$sunstars==50,name)
mshape
class(mshape)
unique(c(3,4,5,5,5,6,6))
unique_vals <- lapply(flags,unique)
sapply(unique_vals,length)
str(vapply)
vapply(flags,class,character(1))
str(tapply)
x <- c(rnorm(1000),runif(1000),rnorm(1000,1))
f <- gl(3,1000)
length(f)
tapply(x,f,mean)
table(flags$landmass)
table(flags$animate)
tapply(flags$animate, flags$landmass, mean)
##exercise
View(Titanic)
Titanic.df <- as.data.frame(Titanic)
tapply(Titanic.df$Freq,Titanic.df$Survived,sum)
##
str(split)
x <- c(rnorm(10),runif(10),rnorm(10,1))
f <- gl(3,10)
split(x,f)
library(datasets)
head(airquality)
s <- split(airquality,airquality$Month)
s
f1 <- gl(2, 5)
f2 <- gl(5, 2)
interaction(f1, f2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.