blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c2ab5d187ad363bc7c1ee00b23b36a1ed9b94dc1
|
26a4aaae333e1ada90d7d12f0d74c36045f5f429
|
/2-(3) plotting weather.R
|
6539923d3953668c26f7c5b96739cf7a45b30564
|
[] |
no_license
|
supportchelsea/Crawling_naverAPI
|
7425c8d6ba18bcfb647d7564f7db5e6a5b3e5770
|
52094dd27fb11ee1c84e6dc8b1072258be8353c6
|
refs/heads/master
| 2020-03-26T18:14:05.515947
| 2019-08-07T04:44:17
| 2019-08-07T04:44:17
| 145,202,694
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 313
|
r
|
2-(3) plotting weather.R
|
# make the new dataframe to draw a plot
weather = rbind(w2017,w2018)
head(weather)
str(weather)
#plotting
ggplot(weather, aes(x=date, y=degree,group=1)) + geom_line(linetype="solid", color="black", size=0.5)+ ggtitle("Degree",subtitle = 'from 2017-08-01 to 2018-07-24')
write.csv2(weather, file='weather.csv')
|
a18ad26c88c59c8866d290dbb724b966cb1db6b5
|
80e0469caa7900baaa6262b3b652e40db7088160
|
/complete_data_analysis/marginal_value_information_plotting_functions.R
|
dcb975c022d7df91ae4560033c242c616d0545d7
|
[] |
no_license
|
diegoleonbarido/flexbox-data-dump-analysis
|
8821e7bf897f5debbcaac0074bef46f15e3f5eb7
|
d895385a977fb2db3e7798856d92703fced5598f
|
refs/heads/master
| 2020-12-11T21:15:56.567109
| 2017-08-15T22:49:30
| 2017-08-15T22:49:30
| 39,653,179
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 88,278
|
r
|
marginal_value_information_plotting_functions.R
|
######### Plotting functions for the Marginal Value of Information
######### 1. Timeseries Plots for Treatment and Control
plot_energy_cost_ft <- function(treatment_time_series_data_table,control_time_series_data_table,data_time_series_data_table,energia_text_var,energy_file){
count.plot.list <- list()
count.plot.name <- list()
text.list <- list()
count.plot <- 0
options(warn=-1)
time_series_energy_treatment <- ggplot(subset(treatment_time_series_data_table,treatment_time_series_data_table$fecha>"2015-06-01" & treatment_time_series_data_table$fecha<"2017-02-01"),aes(fecha,get(energia_text_var),group=Casa,colour=Casa)) + geom_path(alpha=0.5) + xlab("Date") + ylab("Monthly Energy Consumption (kwh)") + ggtitle("Treatment Group") + theme(panel.background = element_blank(),axis.text=element_text(size=13),axis.title=element_text(size=14,face="bold")) + labs(colour= 'ID') + theme(legend.key = element_rect(fill = "white")) + theme(legend.position="bottom")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = time_series_energy_treatment
count.plot.name[[count.plot]] = 'time_series_energy_treatment'
time_series_energy_control <- ggplot(subset(control_time_series_data_table,control_time_series_data_table$fecha>"2015-09-01"),aes(fecha,get(energia_text_var),group=Casa,colour=Casa)) + geom_path(alpha=0.5) + xlab("Date") + ylab("Monthly Energy Consumption (kwh)") + ggtitle("Control Group") + theme(panel.background = element_blank(),axis.text=element_text(size=13),axis.title=element_text(size=14,face="bold")) + labs(colour= 'ID') + theme(legend.key = element_rect(fill = "white")) + theme(legend.position="bottom")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = time_series_energy_control
count.plot.name[[count.plot]] = 'time_series_energy_control'
time_series_importe_treatment <-ggplot(subset(treatment_time_series_data_table,treatment_time_series_data_table$fecha>"2015-06-01" & treatment_time_series_data_table$fecha<"2017-02-01"),aes(fecha,importe_dl,group=Casa,colour=Casa)) + geom_path(alpha=0.5) + xlab("Date") + ylab("Monthly Energy Cost ($US)") + ggtitle("Treatment Group") + theme(panel.background = element_blank(),axis.text=element_text(size=13),axis.title=element_text(size=14,face="bold")) + labs(colour= 'ID') + theme(legend.key = element_rect(fill = "white")) + theme(legend.position="bottom")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = time_series_importe_treatment
count.plot.name[[count.plot]] = 'time_series_importe_treatment'
time_series_importe_control <-ggplot(control_time_series_data_table,aes(fecha,importe_dl,group=Casa,colour=Casa)) + geom_path(alpha=0.5) + xlab("Date") + ylab("Monthly Energy Cost ($US)") + ggtitle("Control Group") + theme(panel.background = element_blank(),axis.text=element_text(size=13),axis.title=element_text(size=14,face="bold")) + labs(colour= 'ID') + theme(legend.key = element_rect(fill = "white")) + theme(legend.position="bottom")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = time_series_importe_control
count.plot.name[[count.plot]] = 'time_series_importe_control'
# Densities Energy and Cost
# All Data
densities_t_c_e <- call_plot_subset(data_time_series_data_table,"treatment","Treatment","Control",energia_text_var,"Monthly Energy Consumption (kWh)","Density","All Data (kWH/Month): Treatment vs. Control")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = densities_t_c_e
count.plot.name[[count.plot]] = 'densities_t_c_e'
densities_t_c_c <- call_plot_subset(data_time_series_data_table,"treatment","Treatment","Control","importe_dl","Monthly Energy Expenditure ($US)","Density","All Data ($US/Month): Treatment vs. Control")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = densities_t_c_c
count.plot.name[[count.plot]] = 'densities_t_c_c'
# Pre & Post Implementation
### All data for ENERGY treatment and control
# Post and Pre Implementation
post_pre_all_data_tr <- call_plot_subset(treatment_time_series_data_table[get(energia_text_var)<398 | get(energia_text_var)>=399],"intervention_group","Treatment Pre-Intervention","Treatment Post-Intervention",energia_text_var,"Monthly Energy Consumption (kWh)","Density","Post and Pre-Implementation (kWH/Month): Treatment")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = post_pre_all_data_tr
count.plot.name[[count.plot]] = 'post_pre_all_data_tr'
post_pre_all_data_cl <- call_plot_subset(control_time_series_data_table,"intervention_group","Control Pre-Intervention","Control Post-Intervention",energia_text_var,"Monthly Energy Consumption (kWh)","Density","Post and Pre-Implementation (kWH/Month): Contol")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = post_pre_all_data_cl
count.plot.name[[count.plot]] = 'post_pre_all_data_cl'
#Months with and without PAPER ENERGY REPORTS
paper_all_data_tr <- call_plot_subset(treatment_time_series_data_table[get(energia_text_var)<398 | get(energia_text_var)>=399 & report_intervention_month==1],"intervention_group","Treatment Pre-Intervention","Treatment Post-Intervention","energia_ajustada","Monthly Energy Consumption (kWh)","Density","Post Paper Reports Pre and Post Intervention: Treatment")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = paper_all_data_tr
count.plot.name[[count.plot]] = 'paper_all_data_tr'
paper_all_data_info_group <- call_plot_subset(treatment_time_series_data_table[get(energia_text_var)<398 | get(energia_text_var)>=399 & report_intervention_month==1],"information_group","Lost Willingness to Pay Information Bid","Won Willingness to Pay Information Bid" ,energia_text_var,"Monthly Energy Consumption (kWh)","Density","Post Paper Reports - Won or Lost the WTP Bid: Treatment")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = paper_all_data_info_group
count.plot.name[[count.plot]] = 'paper_all_data_info_group'
paper_all_data_wtp_tr <- call_plot_subset_several(treatment_time_series_data_table[get(energia_text_var)<398 | get(energia_text_var)>=399 & report_intervention_month==1],"wtp_lw_md_h",'low','high','medium-low','medium-high',energia_text_var,"Monthly Energy Consumption (kWh)","Density","Post SMS - Won or Lost the WTP Bid: Treatment")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = paper_all_data_wtp_tr
count.plot.name[[count.plot]] = 'paper_all_data_wtp_tr'
paper_all_data_wtpfraction_tr <- call_plot_subset_several(treatment_time_series_data_table[get(energia_text_var)<398 | get(energia_text_var)>=399 & report_intervention_month==1],"fraction_lw_md_h",'low','high','medium-low','medium-high' ,energia_text_var,"Monthly Energy Consumption (kWh)","Density","Post Paper Reports - Won or Lost the WTP Bid, Fraction of Bill: Treatment")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = paper_all_data_wtpfraction_tr
count.plot.name[[count.plot]] = 'paper_all_data_wtpfraction_tr'
paper_all_data_cl <- call_plot_subset(time.series.receipt.control.dt[report_intervention_month==1],"intervention_group","Control Pre-Intervention","Control Post-Intervention",energia_text_var,"Monthly Energy Consumption (kWh)","Density","Post Paper Reports Pre and Post Intervention: Control")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = paper_all_data_cl
count.plot.name[[count.plot]] = 'paper_all_data_cl'
#Months with and without SMS
sms_all_data_tr <- call_plot_subset(treatment_time_series_data_table[get(energia_text_var)<398 | get(energia_text_var)>=399 & sms_intervention_month==1],"intervention_group","Treatment Pre-Intervention","Treatment Post-Intervention",energia_text_var,"Monthly Energy Consumption (kWh)","Density","Post SMS Pre and Post Intervention: Treatment")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = sms_all_data_tr
count.plot.name[[count.plot]] = 'sms_all_data_tr'
sms_all_data_info_group <- call_plot_subset(treatment_time_series_data_table[get(energia_text_var)<398 | get(energia_text_var)>=399 & sms_intervention_month==1],"information_group","Lost Willingness to Pay Information Bid","Won Willingness to Pay Information Bid" ,energia_text_var,"Monthly Energy Consumption (kWh)","Density","Post SMS - Won or Lost the WTP Bid: Treatment")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = sms_all_data_info_group
count.plot.name[[count.plot]] = 'sms_all_data_info_group'
sms_all_data_wtp_tr <- call_plot_subset_several(treatment_time_series_data_table[get(energia_text_var)<398 | get(energia_text_var)>=399 & sms_intervention_month==1],"wtp_lw_md_h",'low','high','medium-low','medium-high',energia_text_var,"Monthly Energy Consumption (kWh)","Density","Post SMS - Won or Lost the WTP Bid: Treatment")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = sms_all_data_wtp_tr
count.plot.name[[count.plot]] = 'sms_all_data_wtp_tr'
sms_all_data_wtpfraction_tr <- call_plot_subset_several(treatment_time_series_data_table[get(energia_text_var)<398 | get(energia_text_var)>=399 & sms_intervention_month==1],"fraction_lw_md_h",'low','high','medium-low','medium-high' ,energia_text_var,"Monthly Energy Consumption (kWh)","Density","Post SMS - Won or Lost the WTP Bid, Fraction of Bill: Treatment")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = sms_all_data_wtpfraction_tr
count.plot.name[[count.plot]] = 'sms_all_data_wtpfraction_tr'
sms_all_data_cl <- call_plot_subset(control_time_series_data_table[sms_intervention_month==1],"intervention_group","Control Pre-Intervention","Control Post-Intervention",energia_text_var,"Monthly Energy Consumption (kWh)","Density","Post SMS - Pre and Post Intervention: Control")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = sms_all_data_cl
count.plot.name[[count.plot]] = 'sms_all_data_cl'
### All data for COSTS treatment and control
# Post and Pre Implementation
post_pre_all_data_tr_cost <- call_plot_subset(treatment_time_series_data_table[get(energia_text_var)<398 | get(energia_text_var)>=399],"intervention_group","Treatment Pre-Intervention","Treatment Post-Intervention","importe_dl","Monthly Energy Costs ($US)","Density","Post and Pre-Implementation ($US/Month): Treatment")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = post_pre_all_data_tr_cost
count.plot.name[[count.plot]] = 'post_pre_all_data_tr_cost'
post_pre_all_data_cl_cost <- call_plot_subset(control_time_series_data_table,"intervention_group","Control Pre-Intervention","Control Post-Intervention","importe_dl","Monthly Energy Costs ($US)","Density","Post and Pre-Implementation ($US/Month): Contol")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = post_pre_all_data_cl_cost
count.plot.name[[count.plot]] = 'post_pre_all_data_cl_cost'
#Months with and without PAPER ENERGY REPORTS
paper_all_data_tr_cost <- call_plot_subset(treatment_time_series_data_table[get(energia_text_var)<398 | get(energia_text_var)>=399 & report_intervention_month==1],"intervention_group","Treatment Pre-Intervention","Treatment Post-Intervention","importe_dl","Monthly Energy Cost ($US)","Density","Post Paper Reports Pre and Post Intervention: Treatment")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = paper_all_data_tr_cost
count.plot.name[[count.plot]] = 'paper_all_data_tr_cost'
paper_all_data_info_group <- call_plot_subset(treatment_time_series_data_table[get(energia_text_var)<398 | get(energia_text_var)>=399 & report_intervention_month==1],"information_group","Lost Willingness to Pay Information Bid","Won Willingness to Pay Information Bid" ,"importe_dl","Monthly Energy Cost ($US)","Density","Post Paper Reports - Won or Lost the WTP Bid: Treatment")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = paper_all_data_info_group
count.plot.name[[count.plot]] = 'paper_all_data_info_group'
paper_all_data_wtp_tr <- call_plot_subset_several(treatment_time_series_data_table[get(energia_text_var)<398 | get(energia_text_var)>=399 & report_intervention_month==1],"wtp_lw_md_h",'low','high','medium-low','medium-high',"importe_dl","Monthly Energy Cost ($US)","Density","Post SMS - Won or Lost the WTP Bid: Treatment")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = paper_all_data_wtp_tr
count.plot.name[[count.plot]] = 'paper_all_data_wtp_tr'
paper_all_data_wtpfraction_tr_cost <- call_plot_subset_several(treatment_time_series_data_table[get(energia_text_var)<398 | get(energia_text_var)>=399 & report_intervention_month==1],"fraction_lw_md_h",'low','high','medium-low','medium-high' ,"importe_dl","Monthly Energy Cost ($US)","Density","Post Paper Reports - Won or Lost the WTP Bid, Fraction of Bill: Treatment")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = paper_all_data_wtpfraction_tr_cost
count.plot.name[[count.plot]] = 'paper_all_data_wtpfraction_tr_cost'
paper_all_data_cl_cost <- call_plot_subset(time.series.receipt.control.dt[report_intervention_month==1],"intervention_group","Control Pre-Intervention","Control Post-Intervention","importe_dl","Monthly Energy Cost ($US)","Density","Post Paper Reports Pre and Post Intervention: Control")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = paper_all_data_cl_cost
count.plot.name[[count.plot]] = 'paper_all_data_cl_cost'
#Months with and without SMS
sms_all_data_tr_cost <- call_plot_subset(treatment_time_series_data_table[sms_intervention_month==1],"intervention_group","Treatment Pre-Intervention","Treatment Post-Intervention","importe_dl","Monthly Energy Cost ($US)","Density","Post SMS Pre and Post Intervention: Treatment")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = sms_all_data_tr_cost
count.plot.name[[count.plot]] = 'sms_all_data_tr_cost'
sms_all_data_info_group <- call_plot_subset(treatment_time_series_data_table[sms_intervention_month==1],"information_group","Lost Willingness to Pay Information Bid","Won Willingness to Pay Information Bid" ,"importe_dl","Monthly Energy Cost ($US)","Density","Post SMS - Won or Lost the WTP Bid: Treatment")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = sms_all_data_info_group
count.plot.name[[count.plot]] = 'sms_all_data_info_group'
sms_all_data_wtp_tr <- call_plot_subset_several(treatment_time_series_data_table[get(energia_text_var)<398 | get(energia_text_var)>=399 & sms_intervention_month==1],"wtp_lw_md_h",'low','high','medium-low','medium-high',"importe_dl","Monthly Energy Cost ($US)","Density","Post SMS - Won or Lost the WTP Bid: Treatment")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = sms_all_data_wtp_tr
count.plot.name[[count.plot]] = 'sms_all_data_wtp_tr'
sms_all_data_wtpfraction_tr_cost <- call_plot_subset_several(treatment_time_series_data_table[sms_intervention_month==1],"fraction_lw_md_h",'low','high','medium-low','medium-high' ,"importe_dl","Monthly Energy Cost ($US)","Density","Post SMS - Won or Lost the WTP Bid, Fraction of Bill: Treatment")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = sms_all_data_wtpfraction_tr_cost
count.plot.name[[count.plot]] = 'sms_all_data_wtpfraction_tr_cost'
sms_all_data_cl_cost <- call_plot_subset(control_time_series_data_table[sms_intervention_month==1],"intervention_group","Control Pre-Intervention","Control Post-Intervention","importe_dl","Monthly Energy Cost ($US)","Density","Post SMS - Pre and Post Intervention: Control")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = sms_all_data_cl_cost
count.plot.name[[count.plot]] = 'sms_all_data_cl_cost'
# All densities for each unique house (you can plot energia or importe here)
#for(i in 1:length(unique(data_time_series$Casa))){
# subset_house_data <- subset(data_time_series,data_time_series$Casa == unique(data_time_series$Casa)[i])
#mean_pre_intervention = mean(subset(subset_house_data,subset_house_data$intervention_group == unique(subset_house_data$intervention_group)[1])$energia,na.rm=TRUE)
#mean_post_intervention = mean(subset(subset_house_data,subset_house_data$intervention_group == unique(subset_house_data$intervention_group)[2])$energia,na.rm=TRUE)
#density_plot <- ggplot(subset_house_data, aes(energia, fill = intervention_group)) + geom_density(alpha = 0.2) + xlab("Energy (kWh") + ylab("Density") + geom_vline(xintercept=mean_pre_intervention,colour="blue") + geom_vline(xintercept=mean_post_intervention,colour="red")
#selected.house <- unique(data_time_series$Casa)[i]
#group_type <- unique(subset_house_data$treatment)
#plot.name = paste(selected.house,"_",group_type,sep="")
#mypath <- file.path("/Users/Diego/Desktop/Projects/Exploring the Marginal Value of Information/plots/densities",paste(plot.name,".jpg",sep=""))
#jpeg(file=mypath)
#print(density_plot)
#dev.off()
#}
# Comparing the energy report intervention and the SMS intervention
# NOTE CHANGE: report_intervention_month & sms_intervention_month
#for(i in 1:length(unique(data_time_series$Casa))){
# subset_house_data <- subset(data_time_series,data_time_series$Casa == unique(data_time_series$Casa)[i])
#subset_reports <- subset(subset_house_data,subset_house_data$sms_intervention_month ==1)
# mean_pre_intervention = mean(subset(subset_reports,subset_reports$intervention_group == unique(subset_reports$intervention_group)[1])$energia,na.rm=TRUE)
#mean_post_intervention = mean(subset(subset_reports,subset_reports$intervention_group == unique(subset_reports$intervention_group)[2])$energia,na.rm=TRUE)
#density_plot <- ggplot(subset_reports, aes(energia, fill = intervention_group)) + geom_density(alpha = 0.2) + xlab("Energy (kWh") + ylab("Density") + geom_vline(xintercept=mean_pre_intervention,colour="blue") + geom_vline(xintercept=mean_post_intervention,colour="red")
#selected.house <- unique(data_time_series$Casa)[i]
#group_type <- unique(subset_reports$treatment)
#plot.name = paste(selected.house,"_",group_type,sep="")
#mypath <- file.path("/Users/Diego/Desktop/Projects/Exploring the Marginal Value of Information/plots/densities/sms_reports",paste(plot.name,".jpg",sep=""))
#jpeg(file=mypath)
#print(density_plot)
#dev.off()
#}
##############################################################################################################
##############################################################################################################
### 1.2. Month differences: Differences for each month one year afterwards (e.g June 2016 - June 2015)
### Energy
month_e_difference <- month_diffs(treatment_time_series_data_table,energia_text_var) # Treatment
month_e_difference_control <- month_diffs(control_time_series_data_table,energia_text_var) # Control
distribution_e_differences <- rbind(month_e_difference,month_e_difference_control)
# All Data for Treatment and Control
an_e_t_c <- call_plot_subset(distribution_e_differences,'treatment','Treatment','Control','month_diff','Same Month Annual Differences ($US)','Density','Same Month Comparisons (All Data): Treatment vs Control')
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_e_t_c
count.plot.name[[count.plot]] = 'an_e_t_c'
# Post and Pre Implementation
an_e_treatment_intervention <- call_plot_subset(month_e_difference,'intervention_group','Treatment Pre-Intervention','Treatment Post-Intervention','month_diff',"Same Month Annual Differences (kWh)","Density","Same Month Annual Differences (Treatment): Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_e_treatment_intervention
count.plot.name[[count.plot]] = 'an_e_treatment_intervention'
an_e_treatment_information_group <- call_plot_subset(month_e_difference,'information_group','Lost Willingness to Pay Information Bid','Won Willingness to Pay Information Bid','month_diff',"Same Month Annual Differences (kWh)","Density","Same Month Annual Differences - Won or Lost WTP")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_e_treatment_information_group
count.plot.name[[count.plot]] = 'an_e_treatment_information_group'
an_e_treatment_wtp_intervention <- call_plot_subset_several(subset(month_e_difference,month_e_difference$intervention_group == 'Treatment Post-Intervention'),'wtp_lw_md_h','low','high','medium-low','medium-high','month_diff',"Same Month Annual Differences (kWh)","Density","Same Month Comparisons (Treatment - Willingness to Pay): Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_e_treatment_wtp_intervention
count.plot.name[[count.plot]] = 'an_e_treatment_wtp_intervention'
an_e_treatment_wtpfraction_intervention <- call_plot_subset_several(subset(month_e_difference,month_e_difference$intervention_group == 'Treatment Post-Intervention'),'fraction_lw_md_h','low','high','medium-low','medium-high','month_diff',"Same Month Annual Differences (kWh)","Density","Same Month Comparisons (Treatment - Willingness to Pay, Fraction of Bill): Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_e_treatment_wtpfraction_intervention
count.plot.name[[count.plot]] = 'an_e_treatment_wtpfraction_intervention'
an_e_control_intervention <- call_plot_subset(month_e_difference_control,'intervention_group','Control Pre-Intervention','Control Post-Intervention','month_diff',"Same Month Annual Differences (kWh)","Density","Same Month Annual Differences (Control): Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_e_control_intervention
count.plot.name[[count.plot]] = 'an_e_control_intervention'
#Months with and without PAPER ENERGY REPORTS
an_e_treatment_paper_intervention <- call_plot_subset(subset(month_e_difference, month_e_difference$report_intervention_month==1),'intervention_group','Treatment Pre-Intervention','Treatment Post-Intervention','month_diff',"Same Month Annual Differences (kWh)","Density","Post Paper Reports (Treatment): Pre and Post Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_e_treatment_paper_intervention
count.plot.name[[count.plot]] = 'an_e_treatment_paper_intervention'
an_e_treatment_paper_information_group <- call_plot_subset(subset(month_e_difference, month_e_difference$report_intervention_month==1),'information_group','Lost Willingness to Pay Information Bid','Won Willingness to Pay Information Bid','month_diff',"Same Month Annual Differences (kWh)","Density","Post Paper Reports (Treatment): Won or Lost the WTP Bid")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_e_treatment_paper_information_group
count.plot.name[[count.plot]] = 'an_e_treatment_paper_information_group'
an_e_treatment_wtp_paper_intervention <- call_plot_subset_several(subset(month_e_difference, month_e_difference$report_intervention_month==1),'wtp_lw_md_h','low','high','medium-low','medium-high','month_diff',"Same Month Annual Differences (kWh)","Density","Same Month Annual Differences (Treatment - Willingness to Pay): Post Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_e_treatment_wtp_paper_intervention
count.plot.name[[count.plot]] = 'an_e_treatment_wtp_paper_intervention'
an_e_treatment_wtpfraction_paper_intervention <- call_plot_subset_several(subset(month_e_difference, month_e_difference$report_intervention_month==1),'fraction_lw_md_h','low','high','medium-low','medium-high','month_diff',"Same Month Annual Differences (kWh)","Density","Same Month Annual Differences (Treatment - Willingness to Pay, Fraction of Bill): Post Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_e_treatment_wtpfraction_paper_intervention
count.plot.name[[count.plot]] = 'an_e_treatment_wtpfraction_paper_intervention'
an_e_control_paper_intervention <- call_plot_subset(subset(month_e_difference_control, month_e_difference_control$report_intervention_month==1),'intervention_group','Control Pre-Intervention','Control Post-Intervention','month_diff',"Monthly Energy Consumption (kWh)","Density","Post Paper Reports (Control): Pre and Post Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_e_control_paper_intervention
count.plot.name[[count.plot]] = 'an_e_control_paper_intervention'
#Months with and without SMS: Months with and without SMS
an_e_treatment_sms_intervention <- call_plot_subset(month_e_difference,'sms_intervention_month_text','Actively Receiving SMS','No SMS','month_diff',"Same Month Annual Differences (kWh)","Density","SMS - Treatment Group: Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_e_treatment_sms_intervention
count.plot.name[[count.plot]] = 'an_e_treatment_sms_intervention'
an_e_treatment_sms_information_group <- call_plot_subset(subset(month_e_difference, month_e_difference$sms_intervention_month==1),'information_group','Lost Willingness to Pay Information Bid','Won Willingness to Pay Information Bid','month_diff',"Same Month Annual Differences (kWh)","Density","SMS Post Intervention - Treatment Group: Won or Lost the WTP Bid")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_e_treatment_sms_information_group
count.plot.name[[count.plot]] = 'an_e_treatment_sms_information_group'
an_e_treatment_wtp_sms_intervention <- call_plot_subset_several(subset(month_e_difference, month_e_difference$sms_intervention_month==1),'wtp_lw_md_h','low','high','medium-low','medium-high','month_diff',"Same Month Annual Differences (kWh)","Density","Same Month Annual Differences (Treatment - Willingness to Pay): Post SMS")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_e_treatment_wtp_sms_intervention
count.plot.name[[count.plot]] = 'an_e_treatment_wtp_sms_intervention'
an_e_treatment_wtpfraction_sms_intervention <- call_plot_subset_several(subset(month_e_difference, month_e_difference$sms_intervention_month==1),'fraction_lw_md_h','low','high','medium-low','medium-high','month_diff',"Same Month Annual Differences (kWh)","Density","Same Month Annual Differences (Treatment - Willingness to Pay, Fraction of Bill): Post SMS")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_e_treatment_wtpfraction_sms_intervention
count.plot.name[[count.plot]] = 'an_e_treatment_wtpfraction_sms_intervention'
an_e_control_sms_intervention <- call_plot_subset(month_e_difference_control,'sms_intervention_month_text','Actively Receiving SMS','No SMS','month_diff',"Same Month Annual Differences (kWh)","Density","SMS - Control Group: Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_e_control_sms_intervention
count.plot.name[[count.plot]] = 'an_e_control_sms_intervention'
##############################################################################################################
##############################################################################################################
### Cost
#treatment_time_series_data_table$importe_dl <- as.integer(treatment_time_series_data_table$importe_dl)
#control_time_series_data_table$importe_dl <- as.integer(control_time_series_data_table$importe_dl)
month_c_difference <- month_diffs(treatment_time_series_data_table,'importe_dl') # Treatment
month_c_difference_control <- month_diffs(control_time_series_data_table,'importe_dl') # Control
distribution_differences_money <- rbind(month_c_difference,month_c_difference_control)
# All Data for Treatment and Control
an_c_t_c <- call_plot_subset(distribution_differences_money,'treatment','Treatment','Control','month_diff','Same Month Annual Differences ($US)','Density','Same Month Comparisons (All Data): Treatment vs Control')
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_c_t_c
count.plot.name[[count.plot]] = 'an_c_t_c'
# Post and Pre Implementation
an_c_treatment_intervention <- call_plot_subset(month_c_difference,'intervention_group','Treatment Pre-Intervention','Treatment Post-Intervention','month_diff',"Same Month Annual Differences ($US)","Density","Same Month Annual Differences (Treatment): Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_c_treatment_intervention
count.plot.name[[count.plot]] = 'an_c_treatment_intervention'
an_c_treatment_information_group <- call_plot_subset(month_c_difference,'information_group','Lost Willingness to Pay Information Bid','Won Willingness to Pay Information Bid','month_diff',"Same Month Annual Differences ($US)","Density","Same Month Annual Differences - Won or Lost WTP")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_c_treatment_information_group
count.plot.name[[count.plot]] = 'an_c_treatment_information_group'
an_c_treatment_wtp_intervention <- call_plot_subset_several(subset(month_c_difference,month_c_difference$intervention_group == 'Treatment Post-Intervention'),'wtp_lw_md_h','low','high','medium-low','medium-high','month_diff',"Same Month Annual Differences ($US)","Density","Same Month Comparisons (Treatment - Willingness to Pay): Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_c_treatment_wtp_intervention
count.plot.name[[count.plot]] = 'an_c_treatment_wtp_intervention'
an_c_treatment_wtpfraction_intervention <- call_plot_subset_several(subset(month_c_difference,month_c_difference$intervention_group == 'Treatment Post-Intervention'),'fraction_lw_md_h','low','high','medium-low','medium-high','month_diff',"Same Month Annual Differences ($US)","Density","Same Month Comparisons (Treatment - Willingness to Pay, Fraction of Bill): Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_c_treatment_wtpfraction_intervention
count.plot.name[[count.plot]] = 'an_c_treatment_wtpfraction_intervention'
an_c_control_intervention <- call_plot_subset(month_c_difference_control,'intervention_group','Control Pre-Intervention','Control Post-Intervention','month_diff',"Same Month Annual Differences ($US)","Density","Same Month Annual Differences (Control): Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_c_control_intervention
count.plot.name[[count.plot]] = 'an_c_control_intervention'
#Months with and without PAPER ENERGY REPORTS
an_c_treatment_paper_intervention <- call_plot_subset(subset(month_c_difference, month_c_difference$report_intervention_month==1),'intervention_group','Treatment Pre-Intervention','Treatment Post-Intervention','month_diff',"Same Month Annual Differences ($US)","Density","Post Paper Reports (Treatment): Pre and Post Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_c_treatment_paper_intervention
count.plot.name[[count.plot]] = 'an_c_treatment_paper_intervention'
an_c_treatment_paper_information_group <- call_plot_subset(subset(month_c_difference, month_c_difference$report_intervention_month==1),'information_group','Lost Willingness to Pay Information Bid','Won Willingness to Pay Information Bid','month_diff',"Same Month Annual Differences ($US)","Density","Post Paper Reports (Treatment): Won or Lost the WTP Bid")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_c_treatment_paper_information_group
count.plot.name[[count.plot]] = 'an_c_treatment_paper_information_group'
an_c_treatment_wtp_paper_intervention <- call_plot_subset_several(subset(month_c_difference, month_c_difference$report_intervention_month==1),'wtp_lw_md_h','low','high','medium-low','medium-high','month_diff',"Same Month Annual Differences ($US)","Density","Same Month Annual Differences (Treatment - Willingness to Pay): Post Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_c_treatment_wtp_paper_intervention
count.plot.name[[count.plot]] = 'an_c_treatment_wtp_paper_intervention'
an_c_treatment_wtpfraction_paper_intervention <- call_plot_subset_several(subset(month_c_difference, month_c_difference$report_intervention_month==1),'fraction_lw_md_h','low','high','medium-low','medium-high','month_diff',"Same Month Annual Differences ($US)","Density","Same Month Annual Differences (Treatment - Willingness to Pay, Fraction of Bill): Post Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_c_treatment_wtp_paper_intervention
count.plot.name[[count.plot]] = 'an_c_treatment_wtp_paper_intervention'
an_c_control_paper_intervention <- call_plot_subset(subset(month_c_difference_control, month_c_difference_control$report_intervention_month==1),'intervention_group','Control Pre-Intervention','Control Post-Intervention','month_diff',"Monthly Energy Consumption ($US)","Density","Post Paper Reports (Control): Pre and Post Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_c_control_paper_intervention
count.plot.name[[count.plot]] = 'an_c_control_paper_intervention'
#Months with and without SMS: Months with and without SMS
an_c_treatment_sms_intervention <- call_plot_subset(month_c_difference,'sms_intervention_month_text','Actively Receiving SMS','No SMS','month_diff',"Same Month Annual Differences ($US)","Density","SMS - Treatment Group: Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_c_treatment_sms_intervention
count.plot.name[[count.plot]] = 'an_c_treatment_sms_intervention'
an_c_treatment_sms_information_group <- call_plot_subset(subset(month_c_difference, month_c_difference$sms_intervention_month==1),'information_group','Lost Willingness to Pay Information Bid','Won Willingness to Pay Information Bid','month_diff',"Same Month Annual Differences ($US)","Density","SMS Post Intervention - Treatment Group: Won or Lost the WTP Bid")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_c_treatment_sms_information_group
count.plot.name[[count.plot]] = 'an_c_treatment_sms_information_group'
an_c_treatment_wtp_sms_intervention <- call_plot_subset_several(subset(month_c_difference, month_c_difference$sms_intervention_month==1),'wtp_lw_md_h','low','high','medium-low','medium-high','month_diff',"Same Month Annual Differences ($US)","Density","Same Month Annual Differences (Treatment - Willingness to Pay): Post SMS")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_c_treatment_wtp_sms_intervention
count.plot.name[[count.plot]] = 'an_c_treatment_wtp_sms_intervention'
an_c_treatment_wtpfraction_sms_intervention <- call_plot_subset_several(subset(month_c_difference, month_c_difference$sms_intervention_month==1),'fraction_lw_md_h','low','high','medium-low','medium-high','month_diff',"Same Month Annual Differences ($US)","Density","Same Month Annual Differences (Treatment - Willingness to Pay, Fraction of Bill): Post SMS")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_c_treatment_wtpfraction_sms_intervention
count.plot.name[[count.plot]] = 'an_c_treatment_wtpfraction_sms_intervention'
an_c_control_sms_intervention <- call_plot_subset(month_c_difference_control,'sms_intervention_month_text','Actively Receiving SMS','No SMS','month_diff',"Same Month Annual Differences ($US)","Density","SMS - Control Group: Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = an_c_control_sms_intervention
count.plot.name[[count.plot]] = 'an_c_control_sms_intervention'
##############################################################################################################
##############################################################################################################
### 1.3. Month by month reductions: (e.g Feb2016 - Jan2016 )
## Energy
mbm <- month_by_month(treatment_time_series_data_table,energia_text_var)
mbm_control <- month_by_month(control_time_series_data_table,energia_text_var)
mbm_bind <- rbind(mbm,mbm_control)
mbm_e_t_c <- call_plot_subset(mbm_bind,'treatment','Treatment','Control','diff_variable','Month by Month Differences (kWh)','Density','Month by Month Differences (All Data): Treatment vs Control')
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_e_t_c
count.plot.name[[count.plot]] = 'mbm_e_t_c'
# Post and Pre Implementation
mbm_e_treatment_intervention <- call_plot_subset(mbm,'intervention_group','Treatment Pre-Intervention','Treatment Post-Intervention','diff_variable',"Month by Month Differences (kWh)","Density","Month by Month Differences (Treatment): Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_e_treatment_intervention
count.plot.name[[count.plot]] = 'mbm_e_treatment_intervention'
mbm_e_treatment_information_group <- call_plot_subset(mbm,'information_group','Lost Willingness to Pay Information Bid','Won Willingness to Pay Information Bid','diff_variable',"Same Month Annual Differences (kWh)","Density","Month by Month Differences (Treatment): - Won or Lost WTP")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_e_treatment_information_group
count.plot.name[[count.plot]] = 'mbm_e_treatment_information_group'
mbm_e_treatment_wtp_intervention <- call_plot_subset_several(subset(mbm,mbm$intervention_group == 'Treatment Post-Intervention'),'wtp_lw_md_h','low','high','medium-low','medium-high','diff_variable',"Monthy by Month Differences (kWh)","Density","Month by Month Comparisons (Treatment - Willingness to Pay): Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_e_treatment_wtp_intervention
count.plot.name[[count.plot]] = 'mbm_e_treatment_wtp_intervention'
mbm_e_treatment_wtpfraction_intervention <- call_plot_subset_several(subset(mbm,mbm$intervention_group == 'Treatment Post-Intervention'),'fraction_lw_md_h','low','high','medium-low','medium-high','diff_variable',"Month by Month Differences (kWh)","Density","Month by Month Comparisons (Treatment - Willingness to Pay, Fraction of Bill): Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_e_treatment_wtpfraction_intervention
count.plot.name[[count.plot]] = 'mbm_e_treatment_wtpfraction_intervention'
mbm_e_control_intervention <- call_plot_subset(mbm_control,'intervention_group','Control Pre-Intervention','Control Post-Intervention','diff_variable',"Monthy by Month Differences (kWh)","Density","Monthy by Month Differences (Control): Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_e_control_intervention
count.plot.name[[count.plot]] = 'mbm_e_control_intervention'
#Months with and without PAPER ENERGY REPORTS
mbm_e_treatment_paper_intervention <- call_plot_subset(subset(mbm, mbm$report_intervention_month==1),'intervention_group','Treatment Pre-Intervention','Treatment Post-Intervention','diff_variable',"Month by Month Differences (kWh)","Density","Post Paper Reports Month by Month Differences (Treatment): Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_e_treatment_paper_intervention
count.plot.name[[count.plot]] = 'mbm_e_treatment_paper_intervention'
mbm_e_treatment_paper_information_group <- call_plot_subset(subset(mbm, mbm$report_intervention_month==1),'information_group','Lost Willingness to Pay Information Bid','Won Willingness to Pay Information Bid','diff_variable',"Month by Month Differences (kWh)","Density","Post Paper Reports (Treatment): Won or Lost the WTP Bid")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_e_treatment_paper_information_group
count.plot.name[[count.plot]] = 'mbm_e_treatment_paper_information_group'
mbm_e_treatment_wtp_paper_intervention <- call_plot_subset_several(subset(mbm, mbm$report_intervention_month==1),'wtp_lw_md_h','low','high','medium-low','medium-high','diff_variable',"Month by Month Differences (kWh)","Density","Month by Month Differences (Treatment - Willingness to Pay): Post Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_e_treatment_wtp_paper_intervention
count.plot.name[[count.plot]] = 'mbm_e_treatment_wtp_paper_intervention'
mbm_e_treatment_wtpfraction_paper_intervention <- call_plot_subset_several(subset(mbm, mbm$report_intervention_month==1),'fraction_lw_md_h','low','high','medium-low','medium-high','diff_variable',"Month by Month Differences (kWh)","Density","Month by Month Differences (Treatment - Willingness to Pay, Fraction of Bill): Post Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_e_treatment_wtpfraction_paper_intervention
count.plot.name[[count.plot]] = 'mbm_e_treatment_wtpfraction_paper_intervention'
mbm_e_control_paper_intervention <- call_plot_subset(subset(mbm_control, mbm_control$report_intervention_month==1),'intervention_group','Control Pre-Intervention','Control Post-Intervention','diff_variable',"Month by Month Differences (kwh)","Density","Post Paper Reports Month by Month Differences (Control): Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_e_control_paper_intervention
count.plot.name[[count.plot]] = 'mbm_e_control_paper_intervention'
#Months with and without SMS: Months with and without SMS
mbm_e_treatment_sms_intervention <- call_plot_subset(mbm,'sms_intervention_month_text','Actively Receiving SMS','No SMS','diff_variable',"Month by Month Differences (kWh)","Density","SMS - Treatment Group: Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_e_treatment_sms_intervention
count.plot.name[[count.plot]] = 'mbm_e_treatment_sms_intervention'
mbm_e_treatment_sms_information_group <- call_plot_subset(subset(mbm, mbm$sms_intervention_month==1),'information_group','Lost Willingness to Pay Information Bid','Won Willingness to Pay Information Bid','diff_variable',"Month by Month Differences (kWh)","Density","SMS Post Intervention - Treatment Group: Won or Lost the WTP Bid")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_e_treatment_sms_information_group
count.plot.name[[count.plot]] = 'mbm_e_treatment_sms_information_group'
mbm_e_treatment_wtp_sms_intervention <- call_plot_subset_several(subset(mbm, mbm$sms_intervention_month==1),'wtp_lw_md_h','low','high','medium-low','medium-high','diff_variable',"Month by Month Differences (kWh)","Density","Month by Month Differences (Treatment - Willingness to Pay): Post SMS")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_e_treatment_wtp_sms_intervention
count.plot.name[[count.plot]] = 'mbm_e_treatment_wtp_sms_intervention'
mbm_e_treatment_wtpfraction_sms_intervention <- call_plot_subset_several(subset(mbm, mbm$sms_intervention_month==1),'fraction_lw_md_h','low','high','medium-low','medium-high','diff_variable',"Month by Month Differences (kWh)","Density","Month by Month Differences (Treatment - Willingness to Pay, Fraction of Bill): Post SMS")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_e_treatment_wtpfraction_sms_intervention
count.plot.name[[count.plot]] = 'mbm_e_treatment_wtpfraction_sms_intervention'
mbm_e_control_sms_intervention <- call_plot_subset(mbm_control,'sms_intervention_month_text','Actively Receiving SMS','No SMS','diff_variable',"Same Month Annual Differences (kWh)","Density","SMS - Control Group: Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_e_control_sms_intervention
count.plot.name[[count.plot]] = 'mbm_e_control_sms_intervention'
####
######
###### Cordobas
mbm_c <- month_by_month(treatment_time_series_data_table,'importe_dl')
mbm_c_control <- month_by_month(control_time_series_data_table,'importe_dl')
mbm_bind_cordobas <- rbind(mbm_c,mbm_c_control)
mbm_c_t_c <- call_plot_subset(mbm_bind_cordobas,'treatment','Treatment','Control','diff_variable','Month by Month Differences ($US)','Density','Month by Month Differences (All Data): Treatment vs Control')
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_c_t_c
count.plot.name[[count.plot]] = 'mbm_c_t_c'
# Post and Pre Implementation
mbm_c_treatment_intervention <- call_plot_subset(mbm_c,'intervention_group','Treatment Pre-Intervention','Treatment Post-Intervention','diff_variable',"Month by Month Differences ($US)","Density","Month by Month Differences (Treatment): Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_c_treatment_intervention
count.plot.name[[count.plot]] = 'mbm_c_treatment_intervention'
mbm_c_treatment_information_group <- call_plot_subset(mbm_c,'information_group','Lost Willingness to Pay Information Bid','Won Willingness to Pay Information Bid','diff_variable',"Monthy by Month Differences ($US)","Density","Same Month Annual Differences - Won or Lost WTP")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_c_treatment_information_group
count.plot.name[[count.plot]] = 'mbm_c_treatment_information_group'
mbm_c_treatment_wtp_intervention <- call_plot_subset_several(subset(mbm_c,mbm_c$intervention_group == 'Treatment Post-Intervention'),'wtp_lw_md_h','low','high','medium-low','medium-high','diff_variable',"Monthy by Month Differences ($US)","Density","Month by Month Comparisons (Treatment - Willingness to Pay): Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_c_treatment_wtp_intervention
count.plot.name[[count.plot]] = 'mbm_c_treatment_wtp_intervention'
mbm_c_treatment_wtpfraction_intervention <- call_plot_subset_several(subset(mbm_c,mbm_c$intervention_group == 'Treatment Post-Intervention'),'fraction_lw_md_h','low','high','medium-low','medium-high','diff_variable',"Month by Month Differences ($US)","Density","Month by Month Comparisons (Treatment - Willingness to Pay, Fraction of Bill): Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_c_treatment_wtpfraction_intervention
count.plot.name[[count.plot]] = 'mbm_c_treatment_wtpfraction_intervention'
mbm_c_control_intervention <- call_plot_subset(mbm_c_control,'intervention_group','Control Pre-Intervention','Control Post-Intervention','diff_variable',"Monthy by Month Differences ($US)","Density","Monthy by Month Differences (Control): Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_c_control_intervention
count.plot.name[[count.plot]] = 'mbm_c_control_intervention'
#Months with and without PAPER ENERGY REPORTS
mbm_c_treatment_paper_intervention <- call_plot_subset(subset(mbm_c, mbm_c$report_intervention_month==1),'intervention_group','Treatment Pre-Intervention','Treatment Post-Intervention','diff_variable',"Month by Month Differences ($US)","Density","Post Paper Reports Month by Month Differences (Treatment): Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_c_treatment_paper_intervention
count.plot.name[[count.plot]] = 'mbm_c_treatment_paper_intervention'
mbm_c_treatment_paper_information_group <- call_plot_subset(subset(mbm_c, mbm_c$report_intervention_month==1),'information_group','Lost Willingness to Pay Information Bid','Won Willingness to Pay Information Bid','diff_variable',"Month by Month Differences ($US)","Density","Post Paper Reports (Treatment): Won or Lost the WTP Bid")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_c_treatment_paper_information_group
count.plot.name[[count.plot]] = 'mbm_c_treatment_paper_information_group'
mbm_c_treatment_wtp_paper_intervention <- call_plot_subset_several(subset(mbm_c, mbm_c$report_intervention_month==1),'wtp_lw_md_h','low','high','medium-low','medium-high','diff_variable',"Month by Month Differences ($US)","Density","Month by Month Differences (Treatment - Willingness to Pay): Post Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_c_treatment_wtp_paper_intervention
count.plot.name[[count.plot]] = 'mbm_c_treatment_wtp_paper_intervention'
mbm_c_treatment_wtpfraction_paper_intervention <- call_plot_subset_several(subset(mbm_c, mbm_c$report_intervention_month==1),'fraction_lw_md_h','low','high','medium-low','medium-high','diff_variable',"Month by Month Differences ($US)","Density","Month by Month Differences (Treatment - Willingness to Pay, Fraction of Bill): Post Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_c_treatment_wtpfraction_paper_intervention
count.plot.name[[count.plot]] = 'mbm_c_treatment_wtpfraction_paper_intervention'
mbm_c_control_paper_intervention <- call_plot_subset(subset(mbm_c_control, mbm_c_control$report_intervention_month==1),'intervention_group','Control Pre-Intervention','Control Post-Intervention','diff_variable',"Month by Month Differences (kwh)","Density","Post Paper Reports Month by Month Differences (Control): Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_c_control_paper_intervention
count.plot.name[[count.plot]] = 'mbm_c_control_paper_intervention'
#Months with and without SMS: Months with and without SMS
mbm_c_treatment_sms_intervention <- call_plot_subset(mbm_c,'sms_intervention_month_text','Actively Receiving SMS','No SMS','diff_variable',"Month by Month Differences ($US)","Density","SMS - Treatment Group: Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_c_treatment_sms_intervention
count.plot.name[[count.plot]] = 'mbm_c_treatment_sms_intervention'
mbm_c_treatment_sms_information_group <- call_plot_subset(subset(mbm_c, mbm_c$sms_intervention_month==1),'information_group','Lost Willingness to Pay Information Bid','Won Willingness to Pay Information Bid','diff_variable',"Month by Month Differences ($US)","Density","SMS Post Intervention - Treatment Group: Won or Lost the WTP Bid")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_c_treatment_sms_information_group
count.plot.name[[count.plot]] = 'mbm_c_treatment_sms_information_group'
mbm_c_treatment_wtp_sms_intervention <- call_plot_subset_several(subset(mbm_c, mbm_c$sms_intervention_month==1),'wtp_lw_md_h','low','high','medium-low','medium-high','diff_variable',"Month by Month Differences ($US)","Density","Month by Month Differences (Treatment - Willingness to Pay): Post SMS")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_c_treatment_wtp_sms_intervention
count.plot.name[[count.plot]] = 'mbm_c_treatment_wtp_sms_intervention'
mbm_c_treatment_wtpfraction_sms_intervention <- call_plot_subset_several(subset(mbm_c, mbm_c$sms_intervention_month==1),'fraction_lw_md_h','low','high','medium-low','medium-high','diff_variable',"Month by Month Differences ($US)","Density","Month by Month Differences (Treatment - Willingness to Pay, Fraction of Bill): Post SMS")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_c_treatment_wtpfraction_sms_intervention
count.plot.name[[count.plot]] = 'mbm_c_treatment_wtpfraction_sms_intervention'
mbm_c_control_sms_intervention <- call_plot_subset(mbm_c_control,'sms_intervention_month_text','Actively Receiving SMS','No SMS','diff_variable',"Same Month Annual Differences ($US)","Density","SMS - Control Group: Pre and Post Intervention")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_c_control_sms_intervention
count.plot.name[[count.plot]] = 'mbm_c_control_sms_intervention'
for (j in 1:count.plot) {
plot.name = count.plot.name[[j]]
mypath <- file.path("/Users/diego/Desktop/Projects/nicaragua_dr_ee_behavior/plots/energy_cost",energy_file,energia_text_var,paste(plot.name,".jpg",sep = ""))
jpeg(file=mypath)
print(count.plot.list[[j]])
dev.off()
}
}
#####################################################################################################################################################
#####################################################################################################################################################
#####################################################################################################################################################
## Treatment vs Control Plots
#####################################################################################################################################################
#####################################################################################################################################################
#####################################################################################################################################################
plot_tr_ctl <- function(time_series_data_table,energia_text_var,energy_file){
count.plot.list <- list()
count.plot.name <- list()
text.list <- list()
count.plot <- 0
options(warn=-1)
###### Monthly Differences one Year Afterwards Energia
month_diffs_dt <- month_diffs(time_series_data_table,energia_text_var)
# Adding variables to the dt
month_diffs_dt$treatment_v2 <- ifelse(month_diffs_dt$Casa == 'A3'| month_diffs_dt$Casa == 'A6'| month_diffs_dt$Casa == 'A11'| month_diffs_dt$Casa == 'A12'| month_diffs_dt$Casa == 'A17'| month_diffs_dt$Casa == 'A18'| month_diffs_dt$Casa == 'A20'| month_diffs_dt$Casa == 'A21'| month_diffs_dt$Casa == 'A25'| month_diffs_dt$Casa == 'A26','Treatment - Won Information',ifelse(month_diffs_dt$Casa == 'A1' | month_diffs_dt$Casa == 'A7' | month_diffs_dt$Casa == 'A9' | month_diffs_dt$Casa == 'A14' | month_diffs_dt$Casa == 'A16' | month_diffs_dt$Casa == 'A19' | month_diffs_dt$Casa == 'A22' | month_diffs_dt$Casa == 'A24' | month_diffs_dt$Casa == 'A28' | month_diffs_dt$Casa == 'A29','Treatment - Lost Information','Control'))
month_diffs_dt$only_paper <- ifelse(month_diffs_dt$Ano == 2016 & month_diffs_dt$Mes == 'Febrero' | month_diffs_dt$Ano == 2016 & month_diffs_dt$Mes == 'Marzo' | month_diffs_dt$Ano == 2016 & month_diffs_dt$Mes == 'Abril' | month_diffs_dt$Ano == 2016 & month_diffs_dt$Mes == 'Mayo','Only Paper Intervention','Other')
month_diffs_dt$sms_dr <- ifelse(month_diffs_dt$Ano == 2016 & month_diffs_dt$Mes == 'Junio' | month_diffs_dt$Ano == 2016 & month_diffs_dt$Mes == 'Julio' | month_diffs_dt$Ano == 2016 & month_diffs_dt$Mes == 'Agosto' | month_diffs_dt$Ano == 2016 & month_diffs_dt$Mes == 'Septiembre' | month_diffs_dt$Ano == 2016 & month_diffs_dt$Mes == 'Octubre' ,'DR Intervetnion + SMS','Other')
month_diffs_dt$all_months_intervention <- ifelse(month_diffs_dt$Ano == 2016 & month_diffs_dt$Mes == 'Junio' | month_diffs_dt$Ano == 2016 & month_diffs_dt$Mes == 'Julio' | month_diffs_dt$Ano == 2016 & month_diffs_dt$Mes == 'Agosto' | month_diffs_dt$Ano == 2016 & month_diffs_dt$Mes == 'Septiembre' | month_diffs_dt$Ano == 2016 & month_diffs_dt$Mes == 'Octubre' | month_diffs_dt$Ano == 2016 & month_diffs_dt$Mes == 'Noviembre' | month_diffs_dt$Ano == 2016 & month_diffs_dt$Mes == 'Diciembre','DR Intervetnion + SMS All','Other')
only_paper <- subset(month_diffs_dt,month_diffs_dt$only_paper == "Only Paper Intervention")
only_sms_dr <- subset(month_diffs_dt,month_diffs_dt$sms_dr == "DR Intervetnion + SMS")
only_sms_dr_full <- subset(month_diffs_dt,month_diffs_dt$all_months_intervention == "DR Intervetnion + SMS All")
# Two Variable Plots
only_paper_tr_cl <- call_plot_subset(only_paper,'treatment','Treatment','Control','month_diff',"Monthly Annual Differences (kWh)","Density","Only Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_paper_tr_cl
count.plot.name[[count.plot]] = 'only_paper_tr_cl'
only_dr_sms_tr_cl <- call_plot_subset(only_sms_dr,'treatment','Treatment','Control','month_diff',"Monthly Annual Differences (kWh)","Density","DR + SMS Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_dr_sms_tr_cl
count.plot.name[[count.plot]] = 'only_dr_sms_tr_cl'
only_dr_sms_full_tr_cl <- call_plot_subset(only_sms_dr_full,'treatment','Treatment','Control','month_diff',"Monthly Annual Differences (kWh)","Density","DR + SMS Full Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_dr_sms_full_tr_cl
count.plot.name[[count.plot]] = 'only_dr_sms_full_tr_cl'
# Three Variable PLots
only_paper_info <- call_plot_subset_three(only_paper,'treatment_v2','Treatment - Won Information','Treatment - Lost Information','Control','month_diff','Monthly Annual Differences (kWh)','Density',"Only Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_paper_info
count.plot.name[[count.plot]] = 'only_paper_info'
only_dr_sms_info <- call_plot_subset_three(only_sms_dr,'treatment_v2','Treatment - Won Information','Treatment - Lost Information','Control','month_diff','Monthly Annual Differences (kWh)','Density',"DR + SMS Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_dr_sms_info
count.plot.name[[count.plot]] = 'only_dr_sms_info'
only_dr_sms_full_info <- call_plot_subset_three(only_sms_dr_full,'treatment_v2','Treatment - Won Information','Treatment - Lost Information','Control','month_diff','Monthly Annual Differences (kWh)','Density','DR + SMS Full Implementation')
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_dr_sms_full_info
count.plot.name[[count.plot]] = 'only_dr_sms_full_info'
# Five Variable PLots
only_paper_info_wtp <- call_plot_subset_five(only_paper,"wtp_lw_md_h",'low','high','medium-low','medium-high','Control','month_diff',"Same Month Annual Differences (kWh)","Density","WTP: Only Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_paper_info_wtp
count.plot.name[[count.plot]] = 'only_paper_info_wtp'
only_dr_sms_info_wtp<- call_plot_subset_five(only_sms_dr,"wtp_lw_md_h",'low','high','medium-low','medium-high','Control','month_diff',"Same Month Annual Differences (kWh)","Density","WTP: Post DR + SMS Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_dr_sms_info_wtp
count.plot.name[[count.plot]] = 'only_dr_sms_info_wtp'
only_dr_sms_full_info_wtp <- call_plot_subset_five(only_sms_dr_full,"wtp_lw_md_h",'low','high','medium-low','medium-high','Control','month_diff',"Same Month Annual Differences (kWh)","Density","WTP: Post DR + SMS Full Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_dr_sms_full_info_wtp
count.plot.name[[count.plot]] = 'only_dr_sms_full_info_wtp'
only_paper_info_wtp_fr <-call_plot_subset_five(only_paper,"fraction_lw_md_h",'low','high','medium-low','medium-high','Control','month_diff',"Same Month Annual Differences (kWh)","Density","WTP Fraction: Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_paper_info_wtp_fr
count.plot.name[[count.plot]] = 'only_paper_info_wtp_fr'
only_dr_sms_info_wtp_fr <- call_plot_subset_five(only_sms_dr,"fraction_lw_md_h",'low','high','medium-low','medium-high','Control','month_diff',"Same Month Annual Differences (kWh)","Density","WTP Fraction: Post DR + SMS Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_dr_sms_info_wtp_fr
count.plot.name[[count.plot]] = 'only_dr_sms_info_wtp_fr'
only_dr_sms_full_info_wtp_fr <-call_plot_subset_five(only_sms_dr_full,"fraction_lw_md_h",'low','high','medium-low','medium-high','Control','month_diff',"Same Month Annual Differences (kWh)","Density","WTP Fraction: Post DR + SMS Full Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_dr_sms_full_info_wtp_fr
count.plot.name[[count.plot]] = 'only_dr_sms_full_info_wtp_fr'
###### Monthly Differences one Year Afterwards Cost
month_diffs_cost_dt <- month_diffs(time_series_data_table,'importe_dl')
# Adding variables to the dt
month_diffs_cost_dt$treatment_v2 <- ifelse(month_diffs_cost_dt$Casa == 'A3'| month_diffs_cost_dt$Casa == 'A6'| month_diffs_cost_dt$Casa == 'A11'| month_diffs_cost_dt$Casa == 'A12'| month_diffs_cost_dt$Casa == 'A17'| month_diffs_cost_dt$Casa == 'A18'| month_diffs_cost_dt$Casa == 'A20'| month_diffs_cost_dt$Casa == 'A21'| month_diffs_cost_dt$Casa == 'A25'| month_diffs_cost_dt$Casa == 'A26','Treatment - Won Information',ifelse(month_diffs_cost_dt$Casa == 'A1' | month_diffs_cost_dt$Casa == 'A7' | month_diffs_cost_dt$Casa == 'A9' | month_diffs_cost_dt$Casa == 'A14' | month_diffs_cost_dt$Casa == 'A16' | month_diffs_cost_dt$Casa == 'A19' | month_diffs_cost_dt$Casa == 'A22' | month_diffs_cost_dt$Casa == 'A24' | month_diffs_cost_dt$Casa == 'A28' | month_diffs_cost_dt$Casa == 'A29','Treatment - Lost Information','Control'))
month_diffs_cost_dt$only_paper <- ifelse(month_diffs_cost_dt$Ano == 2016 & month_diffs_cost_dt$Mes == 'Febrero' | month_diffs_cost_dt$Ano == 2016 & month_diffs_cost_dt$Mes == 'Marzo' | month_diffs_cost_dt$Ano == 2016 & month_diffs_cost_dt$Mes == 'Abril' | month_diffs_cost_dt$Ano == 2016 & month_diffs_cost_dt$Mes == 'Mayo','Only Paper Intervention','Other')
month_diffs_cost_dt$sms_dr <- ifelse(month_diffs_cost_dt$Ano == 2016 & month_diffs_cost_dt$Mes == 'Junio' | month_diffs_cost_dt$Ano == 2016 & month_diffs_cost_dt$Mes == 'Julio' | month_diffs_cost_dt$Ano == 2016 & month_diffs_cost_dt$Mes == 'Agosto' | month_diffs_cost_dt$Ano == 2016 & month_diffs_cost_dt$Mes == 'Septiembre' | month_diffs_cost_dt$Ano == 2016 & month_diffs_cost_dt$Mes == 'Octubre' ,'DR Intervetnion + SMS','Other')
month_diffs_cost_dt$all_months_intervention <- ifelse(month_diffs_cost_dt$Ano == 2016 & month_diffs_cost_dt$Mes == 'Junio' | month_diffs_cost_dt$Ano == 2016 & month_diffs_cost_dt$Mes == 'Julio' | month_diffs_cost_dt$Ano == 2016 & month_diffs_cost_dt$Mes == 'Agosto' | month_diffs_cost_dt$Ano == 2016 & month_diffs_cost_dt$Mes == 'Septiembre' | month_diffs_cost_dt$Ano == 2016 & month_diffs_cost_dt$Mes == 'Octubre' | month_diffs_cost_dt$Ano == 2016 & month_diffs_cost_dt$Mes == 'Noviembre' | month_diffs_cost_dt$Ano == 2016 & month_diffs_cost_dt$Mes == 'Diciembre','DR Intervetnion + SMS All','Other')
only_paper_ct <- subset(month_diffs_cost_dt,month_diffs_cost_dt$only_paper == "Only Paper Intervention")
only_sms_dr_ct <- subset(month_diffs_cost_dt,month_diffs_cost_dt$sms_dr == "DR Intervetnion + SMS")
only_sms_dr_full_ct <- subset(month_diffs_cost_dt,month_diffs_cost_dt$all_months_intervention == "DR Intervetnion + SMS All")
# Two Variable Plots
only_paper_tr_cl_ct <- call_plot_subset(only_paper_ct,'treatment','Treatment','Control','month_diff',"Same Month Annual Cost Differences ($US)","Density","only Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_paper_tr_cl_ct
count.plot.name[[count.plot]] = 'only_paper_tr_cl_ct'
only_dr_sms_tr_cl_ct <- call_plot_subset(only_sms_dr_ct,'treatment','Treatment','Control','month_diff',"Same Month Annual Cost Differences ($US)","Density","DR + SMS Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_dr_sms_tr_cl_ct
count.plot.name[[count.plot]] = 'only_dr_sms_tr_cl_ct'
only_dr_sms_full_tr_cl_ct <- call_plot_subset(only_sms_dr_full_ct,'treatment','Treatment','Control','month_diff',"Same Month Annual Cost Differences ($US)","Density","DR + SMS Full Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_dr_sms_full_tr_cl_ct
count.plot.name[[count.plot]] = 'only_dr_sms_full_tr_cl_ct'
# Three Variable PLots
only_paper_info_ct <- call_plot_subset_three(only_paper_ct,'treatment_v2','Treatment - Won Information','Treatment - Lost Information','Control','month_diff','Same Month Annual Cost Differences ($US)','Density',"only Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_paper_info_ct
count.plot.name[[count.plot]] = 'only_paper_info_ct'
only_dr_sms_info_ct <- call_plot_subset_three(only_sms_dr_ct,'treatment_v2','Treatment - Won Information','Treatment - Lost Information','Control','month_diff','Same Month Annual Cost Differences ($US)','Density',"DR + SMS Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_dr_sms_info_ct
count.plot.name[[count.plot]] = 'only_dr_sms_info_ct'
only_dr_sms_full_info_ct <- call_plot_subset_three(only_sms_dr_full_ct,'treatment_v2','Treatment - Won Information','Treatment - Lost Information','Control','month_diff','Same Month Annual Cost Differences ($US)','Density','DR + SMS Full Implementation')
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_dr_sms_full_info_ct
count.plot.name[[count.plot]] = 'only_dr_sms_full_info_ct'
# Five Variable PLots
only_paper_info_wtp_ct <- call_plot_subset_five(only_paper_ct,"wtp_lw_md_h",'low','high','medium-low','medium-high','Control','month_diff',"Same Month Annual Cost Differences ($US)","Density","WTP: only Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_paper_info_wtp_ct
count.plot.name[[count.plot]] = 'only_paper_info_wtp_ct'
only_dr_sms_info_wtp_ct <- call_plot_subset_five(only_sms_dr_ct,"wtp_lw_md_h",'low','high','medium-low','medium-high','Control','month_diff',"Same Month Annual Cost Differences ($US)","Density","WTP: Post DR + SMS Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_dr_sms_info_wtp_ct
count.plot.name[[count.plot]] = 'only_dr_sms_info_wtp_ct'
only_dr_sms_full_info_wtp_ct <- call_plot_subset_five(only_sms_dr_full_ct,"wtp_lw_md_h",'low','high','medium-low','medium-high','Control','month_diff',"Same Month Annual Cost Differences ($US)","Density","WTP: Post DR + SMS Full Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_dr_sms_full_info_wtp_ct
count.plot.name[[count.plot]] = 'only_dr_sms_full_info_wtp_ct'
only_paper_info_wtp_fr_ct <-call_plot_subset_five(only_paper_ct,"fraction_lw_md_h",'low','high','medium-low','medium-high','Control','month_diff',"Same Month Annual Cost Differences ($US)","Density","WTP Fraction: only Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_paper_info_wtp_fr_ct
count.plot.name[[count.plot]] = 'only_paper_info_wtp_fr_ct'
only_dr_sms_info_wtp_fr_ct <- call_plot_subset_five(only_sms_dr_ct,"fraction_lw_md_h",'low','high','medium-low','medium-high','Control','month_diff',"Same Month Annual Cost Differences ($US)","Density","WTP Fraction: Post DR + SMS Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_dr_sms_info_wtp_fr_ct
count.plot.name[[count.plot]] = 'only_dr_sms_info_wtp_fr_ct'
only_dr_sms_full_info_wtp_fr_ct <-call_plot_subset_five(only_sms_dr_full_ct,"fraction_lw_md_h",'low','high','medium-low','medium-high','Control','month_diff',"Same Month Annual Cost Differences ($US)","Density","WTP Fraction: Post DR + SMS Full Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = only_dr_sms_full_info_wtp_fr_ct
count.plot.name[[count.plot]] = 'only_dr_sms_full_info_wtp_fr_ct'
###### Month by Month Analysis Energia
month_by_month_dt <- month_by_month(time_series_data_table,energia_text_var)
# Adding variables to the dt
month_by_month_dt$treatment_v2 <- ifelse(month_by_month_dt$Casa == 'A3'| month_by_month_dt$Casa == 'A6'| month_by_month_dt$Casa == 'A11'| month_by_month_dt$Casa == 'A12'| month_by_month_dt$Casa == 'A17'| month_by_month_dt$Casa == 'A18'| month_by_month_dt$Casa == 'A20'| month_by_month_dt$Casa == 'A21'| month_by_month_dt$Casa == 'A25'| month_by_month_dt$Casa == 'A26','Treatment - Won Information',ifelse(month_by_month_dt$Casa == 'A1' | month_by_month_dt$Casa == 'A7' | month_by_month_dt$Casa == 'A9' | month_by_month_dt$Casa == 'A14' | month_by_month_dt$Casa == 'A16' | month_by_month_dt$Casa == 'A19' | month_by_month_dt$Casa == 'A22' | month_by_month_dt$Casa == 'A24' | month_by_month_dt$Casa == 'A28' | month_by_month_dt$Casa == 'A29','Treatment - Lost Information','Control'))
month_by_month_dt$mbm_paper <- ifelse(month_by_month_dt$Ano == 2016 & month_by_month_dt$Mes == 'Febrero' | month_by_month_dt$Ano == 2016 & month_by_month_dt$Mes == 'Marzo' | month_by_month_dt$Ano == 2016 & month_by_month_dt$Mes == 'Abril' | month_by_month_dt$Ano == 2016 & month_by_month_dt$Mes == 'Mayo','Only Paper Intervention','Other')
month_by_month_dt$sms_dr <- ifelse(month_by_month_dt$Ano == 2016 & month_by_month_dt$Mes == 'Junio' | month_by_month_dt$Ano == 2016 & month_by_month_dt$Mes == 'Julio' | month_by_month_dt$Ano == 2016 & month_by_month_dt$Mes == 'Agosto' | month_by_month_dt$Ano == 2016 & month_by_month_dt$Mes == 'Septiembre' | month_by_month_dt$Ano == 2016 & month_by_month_dt$Mes == 'Octubre' ,'DR Intervetnion + SMS','Other')
month_by_month_dt$all_months_intervention <- ifelse(month_by_month_dt$Ano == 2016 & month_by_month_dt$Mes == 'Junio' | month_by_month_dt$Ano == 2016 & month_by_month_dt$Mes == 'Julio' | month_by_month_dt$Ano == 2016 & month_by_month_dt$Mes == 'Agosto' | month_by_month_dt$Ano == 2016 & month_by_month_dt$Mes == 'Septiembre' | month_by_month_dt$Ano == 2016 & month_by_month_dt$Mes == 'Octubre' | month_by_month_dt$Ano == 2016 & month_by_month_dt$Mes == 'Noviembre' | month_by_month_dt$Ano == 2016 & month_by_month_dt$Mes == 'Diciembre','DR Intervetnion + SMS All','Other')
mbm_paper <- subset(month_by_month_dt,month_by_month_dt$mbm_paper == "Only Paper Intervention")
mbm_sms_dr <- subset(month_by_month_dt,month_by_month_dt$sms_dr == "DR Intervetnion + SMS")
mbm_sms_dr_full <- subset(month_by_month_dt,month_by_month_dt$all_months_intervention == "DR Intervetnion + SMS All")
# Two Variable Plots
mbm_paper_tr_cl <- call_plot_subset(mbm_paper,'treatment','Treatment','Control','diff_variable',"Month by Month Differences (kWh)","Density","Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_paper_tr_cl
count.plot.name[[count.plot]] = 'mbm_paper_tr_cl'
mbm_dr_sms_tr_cl <- call_plot_subset(mbm_sms_dr,'treatment','Treatment','Control','diff_variable',"Month by Month Differences (kWh)","Density","DR + SMS Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_dr_sms_tr_cl
count.plot.name[[count.plot]] = 'mbm_dr_sms_tr_cl'
mbm_dr_sms_full_tr_cl <- call_plot_subset(mbm_sms_dr_full,'treatment','Treatment','Control','diff_variable',"Month by Month Differences (kWh)","Density","DR + SMS Full Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_dr_sms_full_tr_cl
count.plot.name[[count.plot]] = 'mbm_dr_sms_full_tr_cl'
# Three Variable PLots
mbm_paper_info <- call_plot_subset_three(mbm_paper,'treatment_v2','Treatment - Won Information','Treatment - Lost Information','Control','diff_variable','Month by Month Differences (kWh)','Density',"Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_paper_info
count.plot.name[[count.plot]] = 'mbm_paper_info'
mbm_dr_sms_info <- call_plot_subset_three(mbm_sms_dr,'treatment_v2','Treatment - Won Information','Treatment - Lost Information','Control','diff_variable','Month by Month Differences (kWh)','Density',"DR + SMS Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_dr_sms_info
count.plot.name[[count.plot]] = 'mbm_dr_sms_info'
mbm_dr_sms_full_info <- call_plot_subset_three(mbm_sms_dr_full,'treatment_v2','Treatment - Won Information','Treatment - Lost Information','Control','diff_variable','Month by Month Differences (kWh)','Density','DR + SMS Full Implementation')
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_dr_sms_full_info
count.plot.name[[count.plot]] = 'mbm_dr_sms_full_info'
# Five Variable PLots
mbm_paper_info_wtp <- call_plot_subset_five(mbm_paper,"wtp_lw_md_h",'low','high','medium-low','medium-high','Control','diff_variable',"Month by Month Differences (kWh)","Density","WTP: Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_paper_info_wtp
count.plot.name[[count.plot]] = 'mbm_paper_info_wtp'
mbm_dr_sms_info_wtp<- call_plot_subset_five(mbm_sms_dr,"wtp_lw_md_h",'low','high','medium-low','medium-high','Control','diff_variable',"Month by Month Differences (kWh)","Density","WTP: Post DR + SMS Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_dr_sms_info_wtp
count.plot.name[[count.plot]] = 'mbm_dr_sms_info_wtp'
mbm_dr_sms_full_info_wtp <- call_plot_subset_five(mbm_sms_dr_full,"wtp_lw_md_h",'low','high','medium-low','medium-high','Control','diff_variable',"Month by Month Differences (kWh)","Density","WTP: Post DR + SMS Full Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_dr_sms_full_info_wtp
count.plot.name[[count.plot]] = 'mbm_dr_sms_full_info_wtp'
mbm_paper_info_wtp_fr <-call_plot_subset_five(mbm_paper,"fraction_lw_md_h",'low','high','medium-low','medium-high','Control','diff_variable',"Month by Month Differences (kWh)","Density","WTP Fraction:Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_paper_info_wtp_fr
count.plot.name[[count.plot]] = 'mbm_paper_info_wtp_fr'
mbm_dr_sms_info_wtp_fr <- call_plot_subset_five(mbm_sms_dr,"fraction_lw_md_h",'low','high','medium-low','medium-high','Control','diff_variable',"Month by Month Differences (kWh)","Density","WTP Fraction: Post DR + SMS Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_dr_sms_info_wtp_fr
count.plot.name[[count.plot]] = 'mbm_dr_sms_info_wtp_fr'
mbm_dr_sms_full_info_wtp_fr <-call_plot_subset_five(mbm_sms_dr_full,"fraction_lw_md_h",'low','high','medium-low','medium-high','Control','diff_variable',"Month by Month Differences (kWh)","Density","WTP Fraction: Post DR + SMS Full Implementatio")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_dr_sms_full_info_wtp_fr
count.plot.name[[count.plot]] = 'mbm_dr_sms_full_info_wtp_fr'
###### Month by Month Analysis Costo
time_series_data_table$importe_dl <- as.numeric(time_series_data_table$importe_dl)
month_by_month_costo_dt <- month_by_month(time_series_data_table,'importe_dl')
# Adding variables to the dt
month_by_month_costo_dt$treatment_v2 <- ifelse(month_by_month_costo_dt$Casa == 'A3'| month_by_month_costo_dt$Casa == 'A6'| month_by_month_costo_dt$Casa == 'A11'| month_by_month_costo_dt$Casa == 'A12'| month_by_month_costo_dt$Casa == 'A17'| month_by_month_costo_dt$Casa == 'A18'| month_by_month_costo_dt$Casa == 'A20'| month_by_month_costo_dt$Casa == 'A21'| month_by_month_costo_dt$Casa == 'A25'| month_by_month_costo_dt$Casa == 'A26','Treatment - Won Information',ifelse(month_by_month_costo_dt$Casa == 'A1' | month_by_month_costo_dt$Casa == 'A7' | month_by_month_costo_dt$Casa == 'A9' | month_by_month_costo_dt$Casa == 'A14' | month_by_month_costo_dt$Casa == 'A16' | month_by_month_costo_dt$Casa == 'A19' | month_by_month_costo_dt$Casa == 'A22' | month_by_month_costo_dt$Casa == 'A24' | month_by_month_costo_dt$Casa == 'A28' | month_by_month_costo_dt$Casa == 'A29','Treatment - Lost Information','Control'))
month_by_month_costo_dt$mbm_paper <- ifelse(month_by_month_costo_dt$Ano == 2016 & month_by_month_costo_dt$Mes == 'Febrero' | month_by_month_costo_dt$Ano == 2016 & month_by_month_costo_dt$Mes == 'Marzo' | month_by_month_costo_dt$Ano == 2016 & month_by_month_costo_dt$Mes == 'Abril' | month_by_month_costo_dt$Ano == 2016 & month_by_month_costo_dt$Mes == 'Mayo','Only Paper Intervention','Other')
month_by_month_costo_dt$sms_dr <- ifelse(month_by_month_costo_dt$Ano == 2016 & month_by_month_costo_dt$Mes == 'Junio' | month_by_month_costo_dt$Ano == 2016 & month_by_month_costo_dt$Mes == 'Julio' | month_by_month_costo_dt$Ano == 2016 & month_by_month_costo_dt$Mes == 'Agosto' | month_by_month_costo_dt$Ano == 2016 & month_by_month_costo_dt$Mes == 'Septiembre' | month_by_month_costo_dt$Ano == 2016 & month_by_month_costo_dt$Mes == 'Octubre' ,'DR Intervetnion + SMS','Other')
month_by_month_costo_dt$all_months_intervention <- ifelse(month_by_month_costo_dt$Ano == 2016 & month_by_month_costo_dt$Mes == 'Junio' | month_by_month_costo_dt$Ano == 2016 & month_by_month_costo_dt$Mes == 'Julio' | month_by_month_costo_dt$Ano == 2016 & month_by_month_costo_dt$Mes == 'Agosto' | month_by_month_costo_dt$Ano == 2016 & month_by_month_costo_dt$Mes == 'Septiembre' | month_by_month_costo_dt$Ano == 2016 & month_by_month_costo_dt$Mes == 'Octubre' | month_by_month_costo_dt$Ano == 2016 & month_by_month_costo_dt$Mes == 'Noviembre' | month_by_month_costo_dt$Ano == 2016 & month_by_month_costo_dt$Mes == 'Diciembre','DR Intervetnion + SMS All','Other')
mbm_paper <- subset(month_by_month_costo_dt,month_by_month_costo_dt$mbm_paper == "Only Paper Intervention")
mbm_sms_dr <- subset(month_by_month_costo_dt,month_by_month_costo_dt$sms_dr == "DR Intervetnion + SMS")
mbm_sms_dr_full <- subset(month_by_month_costo_dt,month_by_month_costo_dt$all_months_intervention == "DR Intervetnion + SMS All")
# Two Variable Plots
mbm_paper_tr_cl_ct <- call_plot_subset(mbm_paper,'treatment','Treatment','Control','diff_variable',"Month by Month Cost Differences ($US)","Density","Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_paper_tr_cl_ct
count.plot.name[[count.plot]] = 'mbm_paper_tr_cl_ct'
mbm_dr_sms_tr_cl_ct <- call_plot_subset(mbm_sms_dr,'treatment','Treatment','Control','diff_variable',"Month by Month Cost Differences ($US)","Density","DR + SMS Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_dr_sms_tr_cl_ct
count.plot.name[[count.plot]] = 'mbm_dr_sms_tr_cl_ct'
mbm_dr_sms_full_tr_cl_ct <- call_plot_subset(mbm_sms_dr_full,'treatment','Treatment','Control','diff_variable',"Month by Month Cost Differences ($US)","Density","DR + SMS Full Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_dr_sms_full_tr_cl_ct
count.plot.name[[count.plot]] = 'mbm_dr_sms_full_tr_cl_ct'
# Three Variable PLots
mbm_paper_info_dt <- call_plot_subset_three(mbm_paper,'treatment_v2','Treatment - Won Information','Treatment - Lost Information','Control','diff_variable','Month by Month Cost Differences ($US)','Density',"Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_paper_info_dt
count.plot.name[[count.plot]] = 'mbm_paper_info_dt'
mbm_dr_sms_info_dt <- call_plot_subset_three(mbm_sms_dr,'treatment_v2','Treatment - Won Information','Treatment - Lost Information','Control','diff_variable','Month by Month Cost Differences ($US)','Density',"DR + SMS Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_dr_sms_info_dt
count.plot.name[[count.plot]] = 'mbm_dr_sms_info_dt'
mbm_dr_sms_full_info_dt <- call_plot_subset_three(mbm_sms_dr_full,'treatment_v2','Treatment - Won Information','Treatment - Lost Information','Control','diff_variable','Month by Month Cost Differences ($US)','Density','DR + SMS Full Implementation')
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_dr_sms_full_info_dt
count.plot.name[[count.plot]] = 'mbm_dr_sms_full_info_dt'
# Five Variable PLots
mbm_paper_info_wtp_dt <- call_plot_subset_five(mbm_paper,"wtp_lw_md_h",'low','high','medium-low','medium-high','Control','diff_variable',"Month by Month Cost Differences ($US)","Density","WTP: Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_paper_info_wtp_dt
count.plot.name[[count.plot]] = 'mbm_paper_info_wtp_dt'
mbm_dr_sms_info_wtp_dt <- call_plot_subset_five(mbm_sms_dr,"wtp_lw_md_h",'low','high','medium-low','medium-high','Control','diff_variable',"Month by Month Cost Differences ($US)","Density","WTP: Post DR + SMS Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_dr_sms_info_wtp_dt
count.plot.name[[count.plot]] = 'mbm_dr_sms_info_wtp_dt'
mbm_dr_sms_full_info_wtp_dt <- call_plot_subset_five(mbm_sms_dr_full,"wtp_lw_md_h",'low','high','medium-low','medium-high','Control','diff_variable',"Month by Month Cost Differences ($US)","Density","WTP: Post DR + SMS Full Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_dr_sms_full_info_wtp_dt
count.plot.name[[count.plot]] = 'mbm_dr_sms_full_info_wtp_dt'
mbm_paper_info_wtp_fr_dt <-call_plot_subset_five(mbm_paper,"fraction_lw_md_h",'low','high','medium-low','medium-high','Control','diff_variable',"Month by Month Cost Differences ($US)","Density","WTP Fraction:Paper Reports")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_paper_info_wtp_fr_dt
count.plot.name[[count.plot]] = 'mbm_paper_info_wtp_fr_dt'
mbm_dr_sms_info_wtp_fr_dt <- call_plot_subset_five(mbm_sms_dr,"fraction_lw_md_h",'low','high','medium-low','medium-high','Control','diff_variable',"Month by Month Cost Differences ($US)","Density","WTP Fraction: Post DR + SMS Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_dr_sms_info_wtp_fr_dt
count.plot.name[[count.plot]] = 'mbm_dr_sms_info_wtp_fr_dt'
mbm_dr_sms_full_info_wtp_fr_dt <-call_plot_subset_five(mbm_sms_dr_full,"fraction_lw_md_h",'low','high','medium-low','medium-high','Control','diff_variable',"Month by Month Cost Differences ($US)","Density","WTP Fraction: Post DR + SMS Full Implementation")
count.plot <- count.plot + 1
count.plot.list[[count.plot]] = mbm_dr_sms_full_info_wtp_fr_dt
count.plot.name[[count.plot]] = 'mbm_dr_sms_full_info_wtp_fr_dt'
for (j in 1:count.plot) {
plot.name = count.plot.name[[j]]
mypath <- file.path("/Users/diego/Desktop/Projects/nicaragua_dr_ee_behavior/plots/treatment_control",energy_file,energia_text_var,paste(plot.name,".jpg",sep = ""))
jpeg(file=mypath)
print(count.plot.list[[j]])
dev.off()
}
}
|
24c5a3831573d4b417fda0b5ef6fa8c14dc5bd30
|
e9b3174732943e94cd1144d620608e2614e5773b
|
/sep_26_backend.r
|
1253e930a9992110e34a15491f97f83ca6b00472
|
[] |
no_license
|
johnchower/top_500_users
|
a0ce7804f49944ca628b3d11654ca31d05634d56
|
82c01dbdaa38fd1878b70f7e3219d63b3e3c49ad
|
refs/heads/master
| 2020-12-02T12:42:15.808159
| 2016-09-21T20:34:19
| 2016-09-21T20:34:19
| 66,874,400
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,923
|
r
|
sep_26_backend.r
|
# (Cohort, account_type) breakdown by tier
user_cohort_breakdown_data_list <- user_tier_cutoffs %>%
as.list %>% {
names(.) <- paste("top", ., sep = "_")
return(.) } %>%
llply(.fun = function(cutoff){
y <- char_tidy_user_table %>%
filter(variable %in% "Account_Type") %>%
dcast(user_id ~ variable, value.var = "value")
x <- num_tidy_user_table %>%
filter(variable %in% c("pa_rank")) %>%
dcast(user_id ~ variable, value.var = "value") %>%
filter(pa_rank <= cutoff) %>%
select(-pa_rank) %>% {
left_join(., y) }
cohortdata_char_user_belongs_to_cohort %>%
filter(variable=="cohort_group_name") %>%
select(user_id, belongs_to_cohort = value) %>% {
inner_join(., x) } %>%
group_by(belongs_to_cohort, Account_Type) %>%
summarise(number_of_users = length(unique(user_id))) %>%
mutate(Account_Type = gsub(pattern = " ", replacement = "_", Account_Type)) %>%
group_by(belongs_to_cohort) %>%
mutate(total_users = sum(number_of_users),
percent_users = total_users/cutoff) %>%
dcast(belongs_to_cohort + total_users + percent_users ~ Account_Type,
value.var = "number_of_users") %>%
arrange(desc(total_users)) %>% {
.[is.na(.)] <- 0
return(.) }
})
# Most active cohort groups
most_active_cohort_groups <- user_cohort_breakdown_data_list %>%
lapply(FUN = function(df){
df %>%
filter(percent_users >= .05) %>%
{.$belongs_to_cohort} %>%
unique
}) %>%
unlist %>%
unique
# Dominant Platform Action breakdown by tier, cohort_group, account_type
dominant_pa_breakdown_list <- user_tier_cutoffs %>%
as.list %>% {
names(.) <- paste("top", ., sep = "_")
return(.) } %>%
llply(.fun = function(cutoff){
y <- char_tidy_user_table %>%
filter(variable %in% "Account_Type") %>%
dcast(user_id ~ variable, value.var = "value")
x <- num_tidy_user_table %>%
filter(variable %in% c("pa_rank", "pa_count")|
grepl(pattern = "cornerstone_count_", x = variable)) %>%
dcast(user_id ~ variable, value.var = "value") %>%
filter(pa_rank <= cutoff) %>%
select(-pa_rank) %>% {
left_join(., y) } %>% {
.[is.na(.)] <- 0
return(.) }
cohortdata_char_user_belongs_to_cohort %>%
filter(variable == "cohort_group_name") %>%
select(user_id, belongs_to_cohort = value) %>% {
inner_join(., x) } %>%
select(-user_id) %>%
melt(id.vars = c("belongs_to_cohort", "Account_Type")) %>%
group_by(belongs_to_cohort, Account_Type, variable) %>%
summarise(total_actions = sum(value)) %>% head
})
# Response rate by (tier, account_type)
response_rate_list <- user_tier_cutoffs %>%
as.list %>% {
names(.) <- paste("top", ., sep = "_")
return(.) } %>%
llply(.fun = function(cutoff){
y <- char_tidy_user_table %>%
filter(variable %in% "Account_Type") %>%
dcast(user_id ~ variable, value.var = "value")
x <- num_tidy_user_table %>%
filter(variable %in% c("pa_rank", "response_rate")) %>%
dcast(user_id ~ variable, value.var = "value") %>%
filter(pa_rank <= cutoff) %>%
select(-pa_rank) %>% {
left_join(., y) } %>% {
.[is.na(.)] <- 0
return(.) }
cohortdata_char_user_belongs_to_cohort %>%
filter(variable == "cohort_group_name") %>%
select(user_id, belongs_to_cohort = value) %>% {
inner_join(., x) } %>%
select(-user_id) %>%
melt(id.vars = c("belongs_to_cohort", "Account_Type")) %>%
group_by(belongs_to_cohort, Account_Type, variable) %>%
summarise(avg_response_rate = mean(value))
})
# Triangle diagrams for active cohort groups
triangle_diagram_data_list <- most_active_cohort_groups %>%
as.list %>% {
names(.) <- .
return(.) } %>%
llply(.fun = function(cohort_group){
user_set <- cohortdata_char_user_belongs_to_cohort %>%
filter(variable == "cohort_group_name",
value == cohort_group) %>%
{.$user_id} %>%
unique
triangle_diagram_data %>%
filter(user_id %in% user_set)
})
# Triangle diagrams for specific cohorts.
# First extract specific cohorts:
active_cohort_id_data <- user_cohort_groups %>%
filter(cohort_group_name %in% most_active_cohort_groups,
belongs_to_cohort) %>% {
inner_join(cohort_to_champion_bridges, .) } %>%
select(cohort_id, cohort_group_name) %>%
unique %>%
ddply(.variables = "cohort_group_name",
.fun = function(df){
top_500 <- num_tidy_user_table %>%
filter(variable == "pa_rank", value <= 500) %>%
{.$user_id} %>%
unique
user_to_cohort_bridges %>%
filter(cohort_id %in% df$cohort_id,
user_id %in% top_500) %>%
group_by(cohort_id) %>%
summarise(number_of_users = length(unique(user_id))) %>%
arrange(desc(number_of_users))
})
# Get triangle diagram data for user_ids belonging to each cohort:
# active_cohort_id_data %>%
# dlply(.variables = c("'cohort_group_name", "cohort_id"),
# .fun = function(df){
# group <- df$cohort_group_name[1]
# current_cohort_id <- df$cohort_id[1]
# active_users <- df$user_id
# users_belonging_to_current_cohort <-
#
#
# triangle_diagram_data %>%
# filter(
#
# })
# Number of champion connectiosn vs number of actions
champ_connections_vs_activity <- num_tidy_user_table %>%
filter(variable %in% c("number_of_champion_connections", "pa_count")) %>%
dcast(user_id ~ variable, value.var = "value") %>% {
.[is.na(.)] <- 0 }
# Percent of 'champion_only' actions vs number of actions
# pct_champ_only_vs_activity <- num_tidy_user_table %>%
# filter(variable %in% c("
|
a550f946847ea299fad5110f3612f1d1d7f8accf
|
4b78f55a6b31e6f285d2d1dee360919ef1f311cb
|
/profiling_script/utils/exception_handling.R
|
90852e9381a4e3aa1e5ffbd7d9644d628099a5ef
|
[] |
no_license
|
mostafa-zakaria/Python_R_Automation-Tests
|
06d1a9366f3389f6cdbe76ef5f50e2e3cf4e2e36
|
b9348878a3e9798ce9b21963e0250531e200ae41
|
refs/heads/master
| 2021-01-23T08:03:49.561841
| 2017-04-06T10:38:02
| 2017-04-06T10:38:02
| 86,473,065
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 603
|
r
|
exception_handling.R
|
throw_invalid_number <- function (x) {
custom_stop("invalid number", paste('invalid number:',x,"column is not a number. Check the decimal_separator parameter in profiling_sales_item.R file."))
}
custom_stop <- function(subclass, message, call = sys.call(-1), ...) {
c <- condition(c(subclass, "error"), message, call = call, ...)
stop(c)
}
condition <- function(subclass, message, call = sys.call(-1), ...) {
structure(
class = c(subclass, "condition"),
list(message = message, call = call),
...
)
}
is.condition <- function(x) inherits(x, "condition")
|
efcc5a85cec23b8263905430cc4a112ae2156d6f
|
727e96e85a03cf01d46c132225e171218c8dd1e5
|
/inst/asmntDashboard/helpers/splitMod.R
|
fceaf13a44c4bcea1fb946ba328056cebe39c9ca
|
[
"MIT"
] |
permissive
|
utah-dwq/irTools
|
6faf9da88514cf72b2166f48074348c00a6f2137
|
da34c77f363a00767563d76112ea87004c3be0d4
|
refs/heads/master
| 2023-08-31T23:58:52.981989
| 2023-08-25T15:31:22
| 2023-08-25T15:31:22
| 147,577,302
| 2
| 0
|
MIT
| 2022-08-30T21:54:40
| 2018-09-05T20:44:04
|
R
|
UTF-8
|
R
| false
| false
| 1,291
|
r
|
splitMod.R
|
# AU split module
splitModUI <-function(id){
ns <- NS(id)
tagList(
shinycssloaders::withSpinner(editModUI(ns("auSplit"), height='800px', width='800px'),size=2, color="#0080b7")
)
}
splitMod <- function(input, output, session, selected_aus, au_asmnt_poly, site_asmnt, na_sites, rejected_sites){
reactive_objects=reactiveValues()
# Get data & format
observe({
req(req(selected_aus(), au_asmnt_poly(), site_asmnt(), na_sites(), rejected_sites())
reactive_objects$selected_aus = selected_aus()
reactive_objects$au_asmnt_poly = au_asmnt_poly()
reactive_objects$site_asmnt = site_asmnt()
reactive_objects$na_sites = na_sites()
reactive_objects$rejected_sites = rejected_sites()
})
print(selected_aus)
print(au_asmnt_poly)
au_asmnt_poly=au_asmnt_poly[au_asmnt_poly$ASSESS_ID %in% selected_aus,]
view=sf::st_bbox(au_asmnt_poly)
site_asmnt=subset(site_asmnt, IR_MLID %in% sel_sites)
sel_aus_map=asmntMap(au_asmnt_poly, site_asmnt, na_sites, rejected_sites) %>%
fitBounds(paste(view[1]),paste(view[2]),paste(view[3]),paste(view[4])) %>%
showGroup('Assessed sites')
splits<-callModule(editMod, "auSplit", sel_aus_map, targetLayerId='split_shapes')
observe({
req(splits()$finished)
print(splits()$finished)
})
#return(splits()$finished)
}
|
d2afdbf1f9fd4f49ce3e49f74b37a1c61fad0010
|
9ba9709a05ef94046d80dfee2720e1c7baf1c8b4
|
/publish_examples.r
|
70a71d27af19ad8f20ed1d8a19fed2195bb8e901
|
[] |
no_license
|
biips/biips-examples
|
fa9452dd580b307cd31333f43ef8480fac37d82f
|
544289de3e3335f59d6ccaef92e6d5689524704e
|
refs/heads/master
| 2021-01-11T15:52:32.007782
| 2018-05-07T15:16:03
| 2018-05-07T15:16:03
| 79,945,410
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,173
|
r
|
publish_examples.r
|
library(rmarkdown)
root_dir = getwd()
output_dir = file.path(root_dir, "docs/rbiips")
clear_cache = TRUE
example_scripts = list()
example_scripts$tutorial = c("tutorial1", "tutorial2", "tutorial3")
example_scripts$object_tracking = c("hmm_4d_nonlin")
example_scripts$stoch_kinetic = c("stoch_kinetic", "stoch_kinetic_gill")
example_scripts$stoch_volatility = c("stoch_volatility",
"switch_stoch_volatility",
"switch_stoch_volatility_param")
# loop over all example directories
for (ex_dir in names(example_scripts)) {
# set example output directory
out_dir = file.path(output_dir, ex_dir)
dir.create(out_dir, recursive = TRUE, showWarnings=FALSE)
# loop over all scripts in example directory
for (ex_script in example_scripts[[ex_dir]]) {
# change directory
setwd(file.path(root_dir, ex_dir))
# clear cache
if (clear_cache)
unlink(paste0(ex_script, "_cache"), recursive=TRUE)
# render html
input = paste0(ex_script, ".r")
rmarkdown::render(input,
output_dir = out_dir,
output_format="html_document")
}
}
|
2a6de6cf4fc17672b0a4d43ac176a3906aecbbeb
|
a40ece4b297a4f4c08d59b65b198f584bf965f59
|
/init_png.R
|
c228e9402de871fd42e394153f67368dda64ff67
|
[] |
no_license
|
qqchito/ExData_Plotting1
|
8ed642c0f7db55c59feaeb3ad117f4072f408a5f
|
e7dbc9a7fa4be8ae5766228a9b32d5dfac09b113
|
refs/heads/master
| 2022-12-10T04:45:34.244837
| 2020-09-04T00:50:23
| 2020-09-04T00:50:23
| 292,407,934
| 0
| 0
| null | 2020-09-02T22:27:44
| 2020-09-02T22:27:43
| null |
UTF-8
|
R
| false
| false
| 177
|
r
|
init_png.R
|
#Code to initialize a PNG file to save the plot
init_png <- function(file, w, h) {
#Call to png function to save the plot to
png(file = file, width = w, height = h)
}
|
fbd2d6e21c6b0f2c058819ab935958f513e0cdad
|
5e6ecbbb128562a065f576684c66c8c1ed07e50b
|
/ex7p3regressiontest.R
|
19572076463db6a4fed8311136767dd5a0951ba3
|
[] |
no_license
|
jodietrich/compstat
|
4630630b75048aa66babe90e3359d67399340a91
|
418a1ce9902c44ec35b1b6b68fbfa955d266ff44
|
refs/heads/master
| 2021-04-15T10:13:12.410523
| 2018-05-15T16:08:24
| 2018-05-15T16:08:24
| 126,856,447
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,917
|
r
|
ex7p3regressiontest.R
|
# 3a load data and fit polynomial up to degree 3
polynomial.degree <- 3
filename <- "C:\\Users\\aaaa\\Documents\\Code\\compstat\\data_ex3.csv"
# plot distribution of errors
t <- seq(-4,4,.001)
z <- dgamma(t+1,2,2)
plot(t,z)
# plot data and fit
data <- read.csv(filename)
x <- data$x
y <- data$y
poly.x <- poly(x, degree = polynomial.degree, raw = T)
poly.fit <- lm(y~ poly.x)
poly.fit.summary <- summary(poly.fit)
all.points <- c(poly.fit$fitted.values, y)
ylim = c(min(all.points), max(all.points))
plot(x,y, ylim = ylim)
points(x,poly.fit$fitted.values, col = "red")
# 3b compute F-test pval
poly.fit.summary
global.fstatistic <- poly.fit.summary$fstatistic
fstat.pval <- 1-pf(global.fstatistic[1], global.fstatistic[2], global.fstatistic[3])
# 3c type I error simulation
n <- 20
N.sims <- 250
alpha <- 0.05 # significance level for F test
add.noise <- function(v){
noise <- rgamma(length(v), shape=2, rate = 2) - 1
v.noisy <- v + noise
}
compute.fstat <- function(y, x, degree = 3){
stopifnot(length(x)==length(y))
poly.x <- poly(x, degree = degree, raw = T)
poly.fit <- lm(y~ poly.x)
global.fstatistic <- summary(poly.fit)$fstatistic
return(global.fstatistic)
}
pval.from.fstat <- function(fstat){
fstat.pval <- 1-pf(fstat[1], fstat[2], fstat[3])
return(fstat.pval)
}
pval.fstat <- function(y, x){
fstat <- compute.fstat(y, x)
pval <- pval.from.fstat(fstat)
return(pval)
}
is.test.significant <- function(y, x, sign.level, test.func, ...){
pval <- test.func(y, x, ...)
significant <- pval <= sign.level
return(significant)
}
significance.proportion <- function(x, y.matrix, sign.level, test.func){
significance <- apply(y.matrix, 1, is.test.significant, x = x, sign.level = sign.level, test.func = test.func)
significant.total <- sum(significance)
significance.rate <- significant.total/length(significance)
return(significance.rate)
}
x <- seq(from = 25, to = 30, length.out = n)
y.matrix.null <- apply(matrix(0, nrow = length(x), ncol = N.sims), 1, add.noise)
typeI.error.rate.ftest <- significance.proportion(x, y.matrix.null, alpha, pval.fstat)
# 3d power test
beta <- c(0.5, -0.003, 0.0001)
poly.function <- function(x, beta){
res <- beta[1]*x + beta[2]*x^2 + beta[3]*x^3
return(res)
}
y.from.x <- function(x, beta){
y <- poly.function(x, beta) + add.noise(x)
return(y)
}
y.matrix.alt <- apply(replicate(N.sims, x), 1, y.from.x, beta = beta)
power.ftest <- significance.proportion(x, y.matrix.alt, alpha, pval.fstat)
# 3e permutation test
data <- read.csv(filename)
x <- data$x
y <- data$y
N.perm <- 1000
fstat.func <- function(y, x, degree = 3){
fstatistic <- compute.fstat(y, x, degree = degree)[1]
return(fstatistic)
}
permutation.fstat.dist <- function(x, y, N.perm){
y.permutations <- random.permutations(y, N.perm)
fstats <- apply(y.permutations, 2, fstat.func, x=x, degree=3)
return(fstats)
}
random.permutations <- function(v, N.perm){
# each column is a permutation of v
permutation.matrix <- apply(replicate(N.perm, v), 2, permute)
}
permute <- function(v){
permutation <- sample(v, length(v), replace = F)
return(permutation)
}
empirical.pval <- function(statistic, distribution){
pval <- (sum(distribution >= statistic) + 1)/(length(distribution) + 1)
return(pval)
}
permutation.test <- function(y, x){
# returns p value
fstat.distribution <- permutation.fstat.dist(x, y, N.perm)
fstat.value <- fstat.func(y, x, degree = 3)
pval <- empirical.pval(fstat.value, fstat.distribution)
return(pval)
}
permutation.test(y, x)
# 3f test type I error rate and power
N.sims <- 250
y.matrix.null <- apply(matrix(0, nrow = length(x), ncol = N.sims), 1, add.noise)
y.matrix.alt <- apply(replicate(N.sims, x), 1, y.from.x, beta = beta)
typeI.error.rate.perm <- significance.proportion(x, y.matrix.null, alpha, permutation.test)
power.perm <- significance.proportion(x, y.matrix.alt, alpha, permutation.test)
|
5818d8cb1d36963d321cbb807fcb82f6c18ea493
|
bda15a37285411d70ae8432fabdf27096070199b
|
/man/comm_open.Rd
|
c8bc6f850f64ec9b3e4923011c2084bd32b478d1
|
[] |
no_license
|
cran/JuniperKernel
|
27ea3304bb36590096bb6a7289bc89f441884ea1
|
b118eab508b11c6016c4b67cbf6bbfbb75a893b8
|
refs/heads/master
| 2018-11-12T21:30:52.017957
| 2018-07-09T15:20:03
| 2018-07-09T15:20:03
| 114,757,125
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 628
|
rd
|
comm_open.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comm_open.R
\name{comm_open}
\alias{comm_open}
\title{Comm Open}
\usage{
comm_open(request_msg)
}
\arguments{
\item{request_msg}{A list passed in from \code{doRequest} representing the
deserialized \code{comm_open} message JSON.}
}
\description{
Handler for the comm_open Message Type
}
\examples{
\dontrun{
request_msg <- list("comm_id"="uniq_comm_id", "target_name"="my_comm", "data"=list())
comm_open(request_msg)
}
}
\references{
\url{http://jupyter-client.readthedocs.io/en/latest/messaging.html#opening-a-comm}
}
\author{
Spencer Aiello
}
|
e0f22981521ad216a3f47942fa56ed0f197bea57
|
040978be49651c2ffc63104fa971d6be39821c62
|
/testthat/test-inner-product.R
|
bbdb59da057a7cc4217575619da530e8fea7deb1
|
[] |
no_license
|
RcppCore/RcppParallelTests
|
28ee9744e7e845b0d0d7bb500e5d1f65b57859f7
|
b5701bf7f4b4a949c69ce8def6c8e2690a630ba5
|
refs/heads/master
| 2021-01-21T11:46:13.222562
| 2014-07-24T22:49:09
| 2014-07-24T22:49:09
| 22,141,842
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 253
|
r
|
test-inner-product.R
|
context( "inner product" )
test_that( "parallelInnerProduct works with Rcpp", {
sourceCpp( "cpp/innerproduct.cpp" )
x <- runif(1000000)
y <- runif(1000000)
expect_equal(innerProduct(x, y), parallelInnerProduct(x, y))
})
|
ea0050af78ad0efc99d9492f87783340de19f855
|
501b73fe00433ddb89ee25e9317807619427d3b1
|
/R/bootci_prop.R
|
0e2728029a6843a7abfe9920de8bea963eef4b74
|
[] |
no_license
|
Rianka2016/AdaptSlopeCut
|
360a6a481974839b329fab5ba0507e3c23923f65
|
4938076fd5cb08ce225804c15114549afd35ad1f
|
refs/heads/master
| 2016-08-12T13:52:28.993862
| 2016-04-22T21:57:58
| 2016-04-22T21:57:58
| 53,029,858
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,105
|
r
|
bootci_prop.R
|
mulsimci_prop <- function(nsamp,
k,
cfj,
par,
vcovpara,
sc,
scenario, # dataframe representing different effect-size scenarios for diff combination of median and cure rate improvement.First column called "impmed" has different values for median improvement and the second column "impcure" represents the different values of cure-rate improvement.
tpred=0.25, # time of additional follow up (in years) after study ends. Default value: 0.25.
tadd=0.25, # time of additional follow up (in years) after the first additional follow up time in study. Default value: 0.25
al=0.05, # (1-al)*100 % CI is calculated. Default value: 0.05.
entry = rep(lpi/npat * 1:npat, 2), # entry time of parients, twice for control group and then treatment group.
cencut=8 # time point (in years) after which a patient will no longer be followed.
){
output=replicate(nsamp, scenariosimci_prop(k,cfj,par,vcovpara,sc,scenario,tpred,tadd,entry,cencut=8),simplify=TRUE)
output=matrix(unlist(output),nrow=5,ncol=nsamp,byrow=F)
data.out=data.frame(mean_ctrhaz=mean(output[2,]),med_ctrhaz=median(output[2,]),
ctrhaz_l=quantile(output[2,],probs=al/2),ctrhaz_u=quantile(output[2,],probs=1-(al/2)),
mean_ctrevep=mean(output[3,]),med_ctrevep=median(output[3,]),
ctrevep_l=quantile(output[3,],probs=al/2),ctrevep_u=quantile(output[3,],probs=1-(al/2)),
mean_ctrevepa=mean(output[4,]),med_ctrevepa=median(output[4,]),
ctrevepa_l=quantile(output[4,],probs=al/2),ctrevepa_u=quantile(output[4,],probs=1-(al/2)),
mean_ctrevemax=mean(output[5,]),med_ctrevemax=median(output[5,]),
ctrevemax_l=quantile(output[5,],probs=al/2),ctrevemax_u=quantile(output[5,],probs=1-(al/2)))
data.out
}
|
f0c5dae966c10ef3fe61c5f993a2603026ab6e7f
|
1ebcc51f06fb337cc80940c33f4926e5a5669641
|
/tests/testthat/test-precision.R
|
f49f76042d6af7dbeb1dc41db92dee90281dfffa
|
[
"MIT"
] |
permissive
|
malcolmbarrett/precisely
|
7baf2e80666f5a18de6cd271af115038629b55ea
|
8e4c2f2db3bfb9ac8b853a675608f7f02d301021
|
refs/heads/master
| 2021-10-11T06:12:55.323059
| 2021-10-10T20:29:32
| 2021-10-10T20:29:32
| 152,901,375
| 91
| 4
|
NOASSERTION
| 2021-10-10T00:27:12
| 2018-10-13T18:18:04
|
R
|
UTF-8
|
R
| false
| false
| 1,809
|
r
|
test-precision.R
|
test_that("precision OR functions work", {
or <- precision_odds_ratio(
n_cases = 500,
exposed_cases = .6,
exposed_controls = .4,
group_ratio = 2
)
expected_names <- c(
"precision", "odds_ratio", "n_cases", "n_controls", "n_total",
"exposed_cases", "exposed_controls", "group_ratio", "ci"
)
returns_well_formed(or, .ncol = 9, .nrow = 1, expected_names = expected_names)
})
test_that("precision Risk Difference functions work", {
rd <- precision_risk_difference(500, .5, .1, group_ratio = 3)
expected_names <- c(
"precision", "risk_difference", "n_exposed", "n_unexposed",
"n_total", "exposed", "unexposed", "group_ratio", "ci"
)
returns_well_formed(rd, .ncol = 9, .nrow = 1, expected_names = expected_names)
})
test_that("precision Rate Difference functions work", {
rd <- precision_rate_difference(500, .5, .1, group_ratio = 3)
expected_names <- c(
"precision", "rate_difference", "n_exposed", "n_unexposed",
"n_total", "exposed", "unexposed", "group_ratio", "ci"
)
returns_well_formed(rd, .ncol = 9, .nrow = 1, expected_names = expected_names)
})
test_that("precision Risk Ratio functions work", {
rr <- precision_risk_ratio(500, .5, .1, group_ratio = 3)
expected_names <- c(
"precision", "risk_ratio", "n_exposed", "n_unexposed",
"n_total", "exposed", "unexposed", "group_ratio", "ci"
)
returns_well_formed(rr, .ncol = 9, .nrow = 1, expected_names = expected_names)
})
test_that("precision Rate Ratio functions work", {
rr <- precision_rate_ratio(500, .5, .1, group_ratio = 3)
expected_names <- c(
"precision", "rate_ratio", "n_exposed", "n_unexposed",
"n_total", "exposed", "unexposed", "group_ratio", "ci"
)
returns_well_formed(rr, .ncol = 9, .nrow = 1, expected_names = expected_names)
})
|
8bee6aa0a9df65320243ce4022a1cd4f79233c46
|
58b6270f3a45133a7e2dc8a08f80165a881bab91
|
/miprimerscript.R
|
7ee9d3b19a1c73e5b07bca86e7c317a11c260396
|
[
"Unlicense"
] |
permissive
|
barbarazambrano/trabajo_en_clases
|
bc8da62974393c9514fe7457ed2dfad2b21ea88a
|
fd5e8f75c3bbc644ef4df814bb97052b1241bfb1
|
refs/heads/master
| 2020-05-30T03:29:01.274168
| 2019-05-31T02:53:35
| 2019-05-31T02:53:35
| 189,516,747
| 0
| 0
| null | null | null | null |
MacCentralEurope
|
R
| false
| false
| 1,604
|
r
|
miprimerscript.R
|
#instalar paquete ggplot2
#install.packages('ggplot2')
library(ggplot2)
variableUno <- 1
variableDos <- 5
variableUno <- 'a'
#se le asignů un caracter a la variableUno, se cambio el valor 1, por el caracter a
variableDos <- TRUE
variableTres <- "fui a la esquina y volvi"
variableTres <- 1.6
#eliminar variables
rm(variableDos)
rm(variableTres)
variableUno <- variableDos
#para eliminar todas las variables
rm(list = ls())
variableUno == variableDos
variableDos <- variableUno == variableDos
variableUno <- variableDos
variableTres <- variableUno != variableDos
# != es distinto, == es igual
# || comparador logico OR
FALSE || TRUE
# && comparador logico AND
FALSE && TRUE
variableUno <- 1
variableDos <- 5
variableTres <- 1.6
(variableUno < variableDos) || (variableUno > variableTres)
#variableUno < variableDos TRUE
#variableUno > variableTres FALSE
#(variableUno < variableDos) || (variableUno > variableTres) TRUE
!((variableUno < variableDos) || (variableUno > variableTres))
#!((variableUno < variableDos) || (variableUno > variableTres)) FALSE !(NEGACION)
variableCuatro <- !(!((variableUno < variableDos) || (variableUno > variableTres))
)
#operaciones aritmeticas
variableUno <- variableUno + variableUno
variableUno <- variableUno^2+1
funcionUno()
#borramops todo y ejecutamos funcion, respondio Error in funcionUno() : could not find function "funcionUno"
#ahora para ejecutar, con funciones.r cerrado
source("funciones.R")
funcionUno(3,4)
#funciones son para hacer tareas repetitivas y se mostrara cada vez que se ejecute
variable <- funcionUno(1,2)
|
cf1ff14fef999dd76ab04647497eebc1dfec5ab6
|
6aa1252972ca74ee76cb78381f05d0cdab7fb2dc
|
/Plot1.R
|
eb1911e7eb9a29c7b72622646a6db7be673fca37
|
[] |
no_license
|
monazhu/ExData_Plotting1
|
39c9ec0e64a30691294d0770d411f756b32f1398
|
ab44c1275c7c34a1f595d4c3b80fb6741c574b01
|
refs/heads/master
| 2021-01-16T19:21:33.749552
| 2014-11-09T06:42:55
| 2014-11-09T06:42:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 825
|
r
|
Plot1.R
|
# setting working directory
setwd("C:/Users/Evil Plushie/Dropbox/Coursera/Module 4")
# reading file as data frame
DF<-read.table("C:/Users/Evil Plushie/Dropbox/Coursera/Module 4/household_power_consumption.txt", sep=";", header=T)
# changing Date into date format
DF$Date<-as.Date(DF$Date, "%d/%m/%Y")
# obtaining subset of the data based on date (only obtain rows with date stamped "2007-02-01" or "2007-02-02")
df.subset<-subset(DF, (DF$Date=="2007-02-01" | DF$Date=="2007-02-02"))
# converting Global_active_power into integers
df.subset$Global_active_power<-as.numeric(as.character(df.subset$Global_active_power))
# creating histogram
png(file="plot1.png", width=504, height=504)
hist(df.subset$Global_active_power, breaks=12, col="red", xlab="Global Active Power (kilowatts)", main="Global Active Power")
dev.off()
|
5ed1d7b33def126a936ef5f4cedd59f4ccd6ce1a
|
d65313edbb18f0920bc157492cdf94f6dfc86605
|
/10.Operações matemáticas em vetores.r
|
3e2e344b6cc6a58222eb10584cf745db85bdccef
|
[] |
no_license
|
HenriqueACF/Fundamentos-R
|
0cd32c84105ee3b935ae478a7123b130c619be71
|
31a17788e1002a25b5a58cc849e174ace46073fc
|
refs/heads/master
| 2023-01-12T13:41:16.445567
| 2020-11-13T20:15:06
| 2020-11-13T20:15:06
| 309,875,647
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 375
|
r
|
10.Operações matemáticas em vetores.r
|
#Vetor a ser calculado
x <- c(1:10)
x
#soma
x + 10
#Mutiplicação
x * 10
#O valor de X continua o mesmo que foi declarado, não sofrendo nenhuma alteração
#Se feita alguma operação matematica em um vetor, e quiser os dados alterados
#se cria um novo vetor
Xsoma <- x + 10
Xsoma
#Já na multiplicação entre vetores o comprimento tem que ser igual ou multiplo
Xsoma * x
|
d8d779553b19ae04afedeaa26e2c9f68398850b3
|
0f8d1c1435f1417aaf3b01bfdb1fba0a6dce6942
|
/man/getImages-Dataset-method.Rd
|
20d31961858edff1b9809a44ceb6441cc62bdb61
|
[] |
no_license
|
jburel/rOMERO-gateway
|
281711608447c74139096b344bba729dacefd88b
|
c1f0119675f566b4ae7ab0f91ea65783f5dcd159
|
refs/heads/master
| 2022-09-26T21:06:34.710460
| 2022-09-07T17:11:38
| 2022-09-07T17:11:38
| 45,210,719
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 388
|
rd
|
getImages-Dataset-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Dataset.R
\docType{methods}
\name{getImages,Dataset-method}
\alias{getImages,Dataset-method}
\title{Get all images of a dataset}
\usage{
\S4method{getImages}{Dataset}(omero)
}
\arguments{
\item{omero}{The dataset}
}
\value{
The images @seealso \linkS4class{Image}
}
\description{
Get all images of a dataset
}
|
6365b89323f10a099d92d4d2ab586a961e3069e2
|
132a9a34f29bc11e9912c8858f14f44c302455b9
|
/R/immigrant.R
|
3ca08b288adddf42be6982d2810d2b047d66e47a
|
[] |
no_license
|
tloux/slubst3100
|
e28b1fb7df37825c82b3fba7321d0904dd9ca6f2
|
abe9a95069974c335d30abbb890d54fb713c7f3e
|
refs/heads/master
| 2021-07-08T20:48:30.437237
| 2020-09-15T16:00:01
| 2020-09-15T16:00:01
| 191,233,443
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 350
|
r
|
immigrant.R
|
#' Fictional data set on immigrant depression
#'
#' A dataset containing clinical depression diagnosis for 275 U.S. immigrants.
#'
#' @format A data frame with 275 rows and 2 variables:
#' \describe{
#' \item{id}{Study participant identification number}
#' \item{depr}{Indicator variable for diagnosis of clinical depression}
#' }
"immigrant"
|
d99afa48f3791d542e336d8d6a5a7124ec68a89a
|
4115c98348bf0e7fe944272d91fe351d58d22a96
|
/R/ISSR800.R
|
1334a449177886a513e05ea9b4123284ede45a5f
|
[] |
no_license
|
ncss-tech/soilDB
|
a933bf98a674fd54b5e1073a4497ee38a177bdf2
|
380440fc7b804b495aa711c130ab914c673a54be
|
refs/heads/master
| 2023-09-02T14:19:17.348412
| 2023-09-02T00:56:16
| 2023-09-02T00:56:16
| 54,595,470
| 68
| 20
| null | 2023-09-01T19:00:48
| 2016-03-23T21:51:10
|
R
|
UTF-8
|
R
| false
| false
| 7,708
|
r
|
ISSR800.R
|
#' @title Get 800m gridded soil properties from SoilWeb ISSR-800 Web Coverage Service (WCS)
#' @author D.E. Beaudette and A.G. Brown
#' @description Intermediate-scale gridded (800m) soil property and interpretation maps from aggregated SSURGO and STATSGO data. These maps were developed by USDA-NRCS-SPSD staff in collaboration with UCD-LAWR. Originally for educational use and \href{https://casoilresource.lawr.ucdavis.edu/soil-properties/}{interactive thematic maps}, these data are a suitable alternative to gridded STATSGO-derived thematic soil maps. The full size grids can be \href{https://casoilresource.lawr.ucdavis.edu/soil-properties/download.php}{downloaded here}.
#' @param aoi area of interest (AOI) defined using a \code{Spatial*}, \code{RasterLayer}, \code{sf}, \code{sfc} or \code{bbox} object, OR a \code{list}, see details
#' @param var ISSR-800 grid name (case insensitive), see details
#' @param res grid resolution, units of meters. The native resolution of ISSR-800 grids (this WCS) is 800m.
#' @param quiet logical, passed to \code{curl::curl_download} to enable / suppress URL and progress bar for download.
#'
#' @details \code{aoi} should be specified as a \code{SpatRaster}, \code{Spatial*}, \code{RasterLayer}, \code{SpatRaster}/\code{SpatVector}, \code{sf}, \code{sfc}, or \code{bbox} object or a \code{list} containing:
#'
#' \describe{
#' \item{\code{aoi}}{bounding-box specified as (xmin, ymin, xmax, ymax) e.g. c(-114.16, 47.65, -114.08, 47.68)}
#' \item{\code{crs}}{coordinate reference system of BBOX, e.g. 'OGC:CRS84' (EPSG:4326, WGS84 Longitude/Latitude)}
#' }
#'
#' The WCS query is parameterized using a rectangular extent derived from the above AOI specification, after conversion to the native CRS (EPSG:5070) of the ISSR-800 grids.
#'
#' Variables available from this WCS can be queried using \code{WCS_details(wcs = 'ISSR800')}.
#'
#' @note There are still some issues to be resolved related to the encoding of NA Variables with a natural zero (e.g. SAR) have 0 set to NA.
#'
#' @return A SpatRaster (or RasterLayer) object containing indexed map unit keys and associated raster attribute table or a try-error if request fails. By default, spatial classes from the `terra` package are returned. If the input object class is from the `raster` or `sp` packages a RasterLayer is returned.
#' @examples
#' \dontrun{
#' library(terra)
#'
#' # see WCS_details() for variable options
#' WCS_details(wcs = 'ISSR800')
#'
#' # get wind erodibility group
#' res <- ISSR800.wcs(list(aoi = c(-116, 35, -115.5, 35.5), crs = "EPSG:4326"),
#' var = 'weg', res = 800)
#' plot(res)
#' }
#' @export
ISSR800.wcs <- function(aoi, var, res = 800, quiet = FALSE) {
# sanity check: aoi specification
if (!inherits(aoi, c('list', 'Spatial', 'sf', 'sfc', 'bbox', 'RasterLayer', 'SpatRaster', 'SpatVector'))) {
stop('invalid `aoi` specification', call. = FALSE)
}
# reasonable resolution
if (res < 400 || res > 1600) {
stop('`res` should be within 400 <= res <= 1600 meters')
}
# match variable name in catalog
var.cat <- sapply(.ISSR800.spec, '[[', 'dsn')
var <- match.arg(tolower(var), choices = var.cat)
# get variable specs
var.spec <- .ISSR800.spec[[var]]
# compute BBOX / IMG geometry in native CRS
wcs.geom <- .prepare_AEA_AOI(aoi, res = res, native_crs = 'EPSG:5070')
## TODO: investigate why this is so
# sanity check: a 1x1 pixel request to WCS results in a corrupt GeoTiff
if (wcs.geom$width == 1 && wcs.geom$height == 1) {
stop('WCS requests for a 1x1 pixel image are not supported, try a smaller resolution', call. = FALSE)
}
# sanity check: keep output images within a reasonable limit
# limits set in the MAPFILE
max.img.dim <- 5000
# check image size > max.img.dim
if (wcs.geom$height > max.img.dim || wcs.geom$width > max.img.dim) {
msg <- sprintf(
'AOI is too large: %sx%s pixels requested (%sx%s pixels max)',
wcs.geom$width,
wcs.geom$height,
max.img.dim,
max.img.dim
)
stop(msg, call. = FALSE)
}
# base URL + parameters
base.url <- 'http://soilmap2-1.lawr.ucdavis.edu/cgi-bin/mapserv?'
service.url <- 'map=/soilmap2/website/wcs/issr800.map&SERVICE=WCS&VERSION=2.0.1&REQUEST=GetCoverage'
# unpack BBOX for WCS 2.0
xmin <- wcs.geom$bbox[1]
ymin <- wcs.geom$bbox[2]
# xmax and ymax are now calculated from AOI dimensions and resolution
# xmax <- wcs.geom$bbox[3]
# ymax <- wcs.geom$bbox[4]
# recalculate x/ymax based on xmin + resolution multiplied by AOI dims
xmax2 <- xmin + res * wcs.geom$width
ymax2 <- ymin + res * wcs.geom$height
## TODO: source data are LZW compressed, does it make sense to alter the compression (e.g. Deflate) for delivery?
# compile WCS 2.0 style URL
u <- paste0(
base.url,
service.url,
'&COVERAGEID=', var.spec$dsn,
'&FORMAT=image/tiff',
'&GEOTIFF:COMPRESSION=LZW',
'&SUBSETTINGCRS=EPSG:5070',
'&FORMAT=', var.spec$type,
'&SUBSET=x(', xmin, ',', xmax2, ')',
'&SUBSET=y(', ymin, ',', ymax2, ')',
'&RESOLUTION=x(', res, ')',
'&RESOLUTION=y(', res, ')'
)
# get data
tf <- tempfile()
dl.try <- try(
suppressWarnings(
curl::curl_download(u, destfile = tf, mode = 'wb', handle = .soilDB_curl_handle(), quiet = quiet)
),
silent = TRUE
)
if (inherits(dl.try, 'try-error')) {
message('bad WCS request')
return(dl.try)
}
# load pointer to file and return
r <- try(terra::rast(tf), silent = TRUE)
if (inherits(r, 'try-error')) {
message(attr(r, 'condition'))
stop('result is not a valid GeoTIFF', call. = FALSE)
}
## NOTE: terra (as of 1.6-28) will create 0-indexed grids sometimes when
## values(x) <- function_that_returns_factors(...)
##
## soil texture grids remain indexed from 1 for this reason
##
## TODO: this isn't quite right... '0' is returned by the WCS sometimes
# specification of NODATA using local definitions
# NAvalue(r) <- var.spec$na
terra::NAflag(r) <- 0
# load all values into memory
terra::values(r) <- terra::values(r)
# remove tempfile
unlink(tf)
# set layer name in object
names(r) <- var.spec$desc
# and as an attribute
attr(r, 'layer name') <- var.spec$desc
# optional processing of RAT
if (!is.null(var.spec$rat)) {
# get rat
rat <- try(read.csv(var.spec$rat, stringsAsFactors = FALSE), silent = quiet)
if (inherits(rat, 'try-error')) {
message("Failed to download RAT from ", var.spec$rat, "; returning non-categorical grid")
return(r)
}
# the cell value / ID column is always the 2nd colum
# name it for reference later
names(rat)[2] <- 'ID'
## TODO: changes since previous version
## * the raster-based version set only existing levels
## * there may be more than ID, code in the RAT: see texture RATS
## * very large RATs with mostly un-used levels (series name) will be a problem
# re-order columns by name
# there may be > 2 columns (hex colors, etc.)
col.names <- c('ID', names(rat)[-2])
# unique cell values
u.values <- terra::unique(r)[[1]]
# index those categories present in r
cat.idx <- which(rat$ID %in% u.values)
# register categories in new order
levels(r) <- rat[cat.idx, col.names]
}
input_class <- attr(wcs.geom, '.input_class')
if ((!is.null(input_class) && input_class == "raster") ||
getOption('soilDB.return_Spatial', default = FALSE)) {
if (requireNamespace("raster")) {
r <- raster::raster(r)
}
}
return(r)
}
|
aec10ffe541fec49e5a21d74c6baae7551c3eca4
|
a5625f5ebf2ad7d28e21ccbcc4f6e446b8862068
|
/avances/fourierR.R
|
cd12e16e258604b551bede2321acb81dda6d3a11
|
[] |
no_license
|
dhasane/numerico
|
00c5b6e6baf78f8765dc86b57370dd269dcf0eaa
|
96d55873299d18cbe8a6b533a94999013ab90163
|
refs/heads/master
| 2020-04-20T00:39:43.326472
| 2019-07-03T19:20:24
| 2019-07-03T19:20:24
| 168,526,643
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,061
|
r
|
fourierR.R
|
intAn<- function(bin , numero , frecuencia )
{
tam = length(bin)
x = 0
pin = pi * numero *2 * frecuencia
dat = 0
for( nbin in bin ){
print( nbin )
if (nbin != '0')
{
A = x
B = x + 1
dat = dat + (cos(pin*A) - cos(pin*B))
}
x = x + 1
}
return (2/(periodo*pin))* dat
}
intBn <- function(bin , numero , frecuencia ){
tam = length(bin)
x = 0
pin = pi * numero * 2 * frecuencia
dat = 0
for (nbin in bin)
{
if (nbin != '0'){
A = x
B = x+1
dat = dat + (sin(pin*B) - sin(pin*A))
}
x = x + 1
}
return (2/(periodo*pin))* dat
}
bin = 10100011
armonicos = 10
frecuencia = 5
periodo = 3
an = c()
bn = c()
cn = c()
teta = c()
x = 0
for (arm in armonicos){
x = x + 1
num = x - 1
van = intAn(bin , x , frecuencia)
vbn = intBn(bin , x , frecuencia)
vcn = sqrt( van**2 + vbn**2 )
vteta = atan2( vbn , van )
an= c(an, van)
bn= c(bn, vbn)
cn= c(cn, vcn)
teta= c(teta, vteta )
}
|
d3e041a862c79d9ffc998cf106f70c77508fbf68
|
efb0d95a4774d8c2edaedbf18e98f768b1bc29cf
|
/R/mrmr.R
|
c4818d59cfa02c17326c7a0698a32f3011472214
|
[] |
no_license
|
Sandy4321/fmrmr
|
255293a7aec0b8b5a1303dbe39f9f18c6df27e98
|
71bab6cd4431022719aab7a503032b322e692b0e
|
refs/heads/master
| 2021-01-20T16:44:30.010403
| 2015-02-02T13:21:59
| 2015-02-02T13:21:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,130
|
r
|
mrmr.R
|
#' @title Minimum Redundancy, maximum relevance
#' @param time [\code{numeric}]\cr
#' Vector of survival times.
#' @param status [\code{logical}]\cr
#' Vector of survival event indicators.
#' @param x [\code{matrix[double]}]\cr
#' Matrix of \code{[observations]x[features]}.
#' @param nselect [\code{integer(1)}]\cr
#' Number of features to score. Default is all features.
#' @return [\code{named numeric}]: Vector of scores, named with column
#' names of \code{x}, in order of selection.
#' @export
calcMRMR = function(time, status, x, nselect = ncol(x)) {
assertNumeric(time, lower = 0, any.missing = FALSE)
assert(
checkLogical(status, any.missing = FALSE, len = length(time)),
checkIntegerish(status, any.missing = FALSE, len = length(time), lower = 0, upper = 1)
)
assertMatrix(x, mode = "double", any.missing = FALSE, nrows = length(time), col.names = "unique")
assertInt(nselect, lower = 0L, upper = ncol(x))
if (nselect == 0L)
return(setNames(numeric(0L), character(0L)))
res = mrmr(time, as.logical(status), x, as.integer(nselect))
setNames(res$score, colnames(x)[res$index + 1L])
}
|
797cd0819f616982e236fde9173fc5eb5f517b7b
|
5407b1a5daacb59daaf8f4871cd60c9c5b900e8b
|
/R/R/glmnet.R
|
2f0172f30f4f3fbb5bd92a94bf9552414e7d286a
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
Nathaniel-Haines/easyml
|
c93e82346446d7debade609138512c8fd37af3a1
|
2f888c6e7133270f2a841fdef7de19baa6469006
|
refs/heads/master
| 2021-01-20T16:58:49.604381
| 2017-05-08T13:41:54
| 2017-05-08T13:41:54
| 82,847,200
| 1
| 0
| null | 2017-02-22T20:04:02
| 2017-02-22T20:04:02
| null |
UTF-8
|
R
| false
| false
| 10,054
|
r
|
glmnet.R
|
#' Fit penalized gaussian regression model.
#'
#' @param X input matrix, of dimension nobs x nvars; each row is an observation vector. Can be in sparse matrix format (inherit from class "sparseMatrix" as in package Matrix; not yet available for family="cox")
#' @param y response variable. Quantitative for family="gaussian", or family="poisson" (non-negative counts). For family="binomial" should be either a factor with two levels, or a two-column matrix of counts or proportions (the second column is treated as the target class; for a factor, the last level in alphabetical order is the target class). For family="multinomial", can be a nc>=2 level factor, or a matrix with nc columns of counts or proportions. For either "binomial" or "multinomial", if y is presented as a vector, it will be coerced into a factor. For family="cox", y should be a two-column matrix with columns named 'time' and 'status'. The latter is a binary variable, with '1' indicating death, and '0' indicating right censored. The function Surv() in package survival produces such a matrix. For family="mgaussian", y is a matrix of quantitative responses.
#' @param ... Arguments to be passed to \code{\link[glmnet]{glmnet}}. See that function's documentation for more details.
#' @return A list, the model and the cross validated model.
#' @export
glmnet_fit_model_gaussian <- function(X, y, ...) {
# capture additional arguments
kwargs <- list(...)
# process kwargs
kwargs[["family"]] <- "gaussian"
kwargs[["standardize"]] <- FALSE
kwargs[["x"]] <- as.matrix(X)
kwargs[["y"]] <- y
# build cv_model
cv_model <- do.call(glmnet::cv.glmnet, kwargs)
# build model
kwargs[["nfolds"]] <- NULL
model <- do.call(glmnet::glmnet, kwargs)
# write output
list(model = model, cv_model = cv_model)
}
#' Fit penalized binomial regression model.
#'
#' @param X input matrix, of dimension nobs x nvars; each row is an observation vector. Can be in sparse matrix format (inherit from class "sparseMatrix" as in package Matrix; not yet available for family="cox")
#' @param y response variable. Quantitative for family="gaussian", or family="poisson" (non-negative counts). For family="binomial" should be either a factor with two levels, or a two-column matrix of counts or proportions (the second column is treated as the target class; for a factor, the last level in alphabetical order is the target class). For family="multinomial", can be a nc>=2 level factor, or a matrix with nc columns of counts or proportions. For either "binomial" or "multinomial", if y is presented as a vector, it will be coerced into a factor. For family="cox", y should be a two-column matrix with columns named 'time' and 'status'. The latter is a binary variable, with '1' indicating death, and '0' indicating right censored. The function Surv() in package survival produces such a matrix. For family="mgaussian", y is a matrix of quantitative responses.
#' @param ... Arguments to be passed to \code{\link[glmnet]{glmnet}}. See that function's documentation for more details.
#' @return A list, the model and the cross validated model.
#' @export
glmnet_fit_model_binomial <- function(X, y, ...) {
# capture additional arguments
kwargs <- list(...)
# process kwargs
kwargs[["family"]] <- "binomial"
kwargs[["standardize"]] <- FALSE
kwargs[["x"]] <- as.matrix(X)
kwargs[["y"]] <- y
# build cv_model
cv_model <- do.call(glmnet::cv.glmnet, kwargs)
# build model
kwargs[["nfolds"]] <- NULL
model <- do.call( glmnet::glmnet, kwargs)
# write output
list(model = model, cv_model = cv_model)
}
#' Extract coefficients from a penalized regression model.
#'
#' @param results The results of \code{\link{glmnet_fit_model_gaussian}} or \code{\link{glmnet_fit_model_binomial}}.
#' @return A data.frame of replicated penalized regression coefficients.
#' @export
glmnet_extract_coefficients <- function(results) {
model <- results[["model"]]
cv_model <- results[["cv_model"]]
coefs <- stats::coef(model, s = cv_model$lambda.min)
.data <- data.frame(t(as.matrix(as.numeric(coefs), nrow = 1)))
colnames(.data) <- rownames(coefs)
.data
}
#' Predict values for a penalized regression model.
#'
#' @param results The results of \code{\link{glmnet_fit_model_gaussian}} or \code{\link{glmnet_fit_model_binomial}}.
#' @param newx A data.frame, the new data to use for predictions.
#' @return A vector, the predicted values for a penalized regression model using the new data.
#' @export
glmnet_predict_model <- function(results, newx = NULL) {
newx <- as.matrix(newx)
model <- results[["model"]]
cv_model <- results[["cv_model"]]
stats::predict(model, newx = newx, s = cv_model$lambda.min, type = "response")
}
#' Easily build and evaluate a penalized regression model.
#'
#' @param ... Arguments to be passed to \code{\link[glmnet]{glmnet}} or \code{\link[glmnet]{cv.glmnet}}. See those functions' documentation for more details on possible arguments and what they mean. Examples of applicable arguments are \code{alpha}, \code{nlambda}, \code{nlambda.min.ratio}, \code{lambda}, \code{standardize}, \code{intercept}, \code{thresh}, \code{dfmax}, \code{pmax}, \code{exclude}, \code{penalty.factor}, \code{lower.limits}, \code{upper.limits}, \code{maxit}, and \code{standardize.response} for \code{\link[glmnet]{glmnet}} and \code{weights}, \code{offset}, \code{lambda}, \code{type.measure}, \code{nfolds}, \code{foldid}, \code{grouped}, \code{keep}, \code{parallel} for \code{\link[glmnet]{cv.glmnet}}.
#' @inheritParams easy_analysis
#' @return A list with the following values:
#' \describe{
#' \item{resample}{A function; the function for resampling the data.}
#' \item{preprocess}{A function; the function for preprocessing the data.}
#' \item{measure}{A function; the function for measuring the results.}
#' \item{fit_model}{A function; the function for fitting the model to the data.}
#' \item{extract_coefficients}{A function; the function for extracting coefficients from the model.}
#' \item{predict_model}{A function; the function for generating predictions on new data from the model.}
#' \item{plot_predictions}{A function; the function for plotting predictions generated by the model.}
#' \item{plot_metrics}{A function; the function for plotting metrics generated by scoring the model.}
#' \item{data}{A data.frame; the original data.}
#' \item{X}{A data.frame; the full dataset to be used for modeling.}
#' \item{y}{A vector; the full response variable to be used for modeling.}
#' \item{X_train}{A data.frame; the train dataset to be used for modeling.}
#' \item{X_test}{A data.frame; the test dataset to be used for modeling.}
#' \item{y_train}{A vector; the train response variable to be used for modeling.}
#' \item{y_test}{A vector; the test response variable to be used for modeling.}
#' \item{coefficients}{A (n_variables, n_samples) matrix; the replicated coefficients.}
#' \item{coefficients_processed}{A data.frame; the coefficients after being processed.}
#' \item{plot_coefficients_processed}{A ggplot object; the plot of the processed coefficients.}
#' \item{predictions_train}{A (nrow(X_train), n_samples) matrix; the train predictions.}
#' \item{predictions_test}{A (nrow(X_test), n_samples) matrix; the test predictions.}
#' \item{predictions_train_mean}{A vector; the mean train predictions.}
#' \item{predictions_test_mean}{A vector; the mean test predictions.}
#' \item{plot_predictions_train_mean}{A ggplot object; the plot of the mean train predictions.}
#' \item{plot_predictions_test_mean}{A ggplot object; the plot of the mean test predictions.}
#' \item{metrics_train_mean}{A vector of length n_divisions; the mean train metrics.}
#' \item{metrics_test_mean}{A vector of length n_divisions; the mean test metrics.}
#' \item{plot_metrics_train_mean}{A ggplot object; the plot of the mean train metrics.}
#' \item{plot_metrics_test_mean}{A ggplot object; the plot of the mean test metrics.}
#' }
#' @family recipes
#' @examples
#' library(easyml) # https://github.com/CCS-Lab/easyml
#'
#' # Gaussian
#' data("prostate", package = "easyml")
#' results <- easy_glmnet(prostate, "lpsa",
#' n_samples = 10, n_divisions = 10,
#' n_iterations = 2, random_state = 12345,
#' n_core = 1, alpha = 1.0)
#'
#' # Binomial
#' data("cocaine_dependence", package = "easyml")
#' results <- easy_glmnet(cocaine_dependence, "diagnosis",
#' family = "binomial",
#' exclude_variables = c("subject"),
#' categorical_variables = c("male"),
#' preprocess = preprocess_scale,
#' n_samples = 10, n_divisions = 10,
#' n_iterations = 2, random_state = 12345,
#' n_core = 1, alpha = 1.0)
#' @export
easy_glmnet <- function(.data, dependent_variable, family = "gaussian",
resample = NULL, preprocess = NULL, measure = NULL,
exclude_variables = NULL, categorical_variables = NULL,
train_size = 0.667, foldid = NULL,
survival_rate_cutoff = 0.05,
n_samples = 1000, n_divisions = 1000,
n_iterations = 10, random_state = NULL,
progress_bar = TRUE, n_core = 1, ...) {
easy_analysis(.data, dependent_variable, algorithm = "glmnet",
family = family, resample = resample,
preprocess = preprocess, measure = measure,
exclude_variables = exclude_variables,
categorical_variables = categorical_variables,
train_size = train_size, foldid = foldid,
survival_rate_cutoff = survival_rate_cutoff,
n_samples = n_samples, n_divisions = n_divisions,
n_iterations = n_iterations, random_state = random_state,
progress_bar = progress_bar, n_core = n_core, ...)
}
|
c41c12bef7b803b3e457a16823cf57d1feab675e
|
69d1e67b21c443856652b330d7a3d764df757a45
|
/R/constructTfIdf.R
|
a04c27559b18e3fddd0aecef692d18680f33afa3
|
[] |
no_license
|
iben-mickrick/minicaret
|
55df246cbc621bc285e3069eeac840728db670fe
|
2f6bfca37e188e4151cc3606032c6db80d0b8ad1
|
refs/heads/master
| 2020-05-15T03:48:16.022028
| 2014-05-13T20:36:30
| 2014-05-13T20:36:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 266
|
r
|
constructTfIdf.R
|
constructTfIdf <- function(inputdf) {
colnames <- names(inputdf)
idf <- log(nrow(inputdf)/colSums(inputdf))
tf <- as.matrix(inputdf) / rowSums(inputdf)
tmp <- as.matrix(tf) %*% diag(idf)
ret <- as.data.frame(tmp)
names(ret) <- colnames
return(ret)
}
|
95c19cdce73c014b2b97c4a610231dcd31133a66
|
706bbe374869615eca6f2cfe1c576fdd304057a4
|
/InSilicoVA/man/plot.insilico.Rd
|
55d363d7a646a7b32a3b5b15decfd6b4741ae5e6
|
[] |
no_license
|
verbal-autopsy-software/InSilicoVA
|
45213f56834cc6d8467763f76a5288db59cd6117
|
9a2eb1750a050ac29ce35ad9825b8cc3ad5a022c
|
refs/heads/master
| 2023-04-26T21:59:30.121905
| 2023-04-19T05:25:57
| 2023-04-19T05:25:57
| 31,554,655
| 4
| 6
| null | 2019-08-21T19:08:21
| 2015-03-02T18:02:43
|
R
|
UTF-8
|
R
| false
| true
| 4,265
|
rd
|
plot.insilico.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.r
\name{plot.insilico}
\alias{plot.insilico}
\title{plot CSMF from a "insilico" object}
\usage{
\method{plot}{insilico}(
x,
type = c("errorbar", "bar", "compare")[1],
top = 10,
causelist = NULL,
which.sub = NULL,
xlab = "Causes",
ylab = "CSMF",
title = "Top CSMF Distribution",
horiz = TRUE,
angle = 60,
fill = "lightblue",
err_width = 0.4,
err_size = 0.6,
point_size = 2,
border = "black",
bw = TRUE,
...
)
}
\arguments{
\item{x}{fitted \code{"insilico"} object}
\item{type}{An indicator of the type of chart to plot. "errorbar" for line
plots of only the error bars on single population; "bar" for bar chart with
error bars on single population; "compare" for line charts on multiple
sub-populations.}
\item{top}{The number of top causes to plot. If multiple sub-populations are
to be plotted, it will plot the union of the top causes in all
sub-populations.}
\item{causelist}{The list of causes to plot. It could be a numeric vector
indicating the position of the causes in the InterVA cause list (see
\code{\link{causetext}}), or a vector of character string of the cause
names. The argument supports partial matching of the cause names. e.g.,
"HIV/AIDS related death" could be abbreviated into "HIV"; "Other and
unspecified infect dis" could be abbreviated into "Other and unspecified
infect".}
\item{which.sub}{Specification of which sub-population to plot if there are
multiple and \code{type} is set to "bar".}
\item{xlab}{Labels for the causes.}
\item{ylab}{Labels for the CSMF values.}
\item{title}{Title of the plot.}
\item{horiz}{Logical indicator indicating if the bars are plotted
horizontally.}
\item{angle}{Angle of rotation for the texts on x axis when \code{horiz} is
set to FALSE}
\item{fill}{The color to fill the bars when \code{type} is set to "bar".}
\item{err_width}{Size of the error bars.}
\item{err_size}{Thickness of the error bar lines.}
\item{point_size}{Size of the points.}
\item{border}{The color to color the borders of bars when \code{type} is set
to "bar".}
\item{bw}{Logical indicator for setting the theme of the plots to be black
and white.}
\item{\dots}{Not used.}
}
\description{
Produce a bar plot of the CSMFs for a fitted \code{"insilico"} object.
}
\details{
To-do
}
\examples{
\dontrun{
data(RandomVA1)
##
## Scenario 1: without sub-population specification
##
fit1<- insilico(RandomVA1, subpop = NULL,
Nsim = 1000, burnin = 500, thin = 10 , seed = 1,
auto.length = FALSE)
# basic line plot
plot(fit1)
# basic bar plot
plot(fit1, type = "bar")
# line plot with customized look
plot(fit1, top = 15, horiz = FALSE, fill = "gold",
bw = TRUE, title = "Top 15 CSMFs", angle = 70,
err_width = .2, err_size = .6, point_size = 2)
##
## Scenario 2: with sub-population specification
##
data(RandomVA2)
fit2<- insilico(RandomVA2, subpop = list("sex"),
Nsim = 1000, burnin = 500, thin = 10 , seed = 1,
auto.length = FALSE)
summary(fit2)
# basic side-by-side line plot for all sub-populations
plot(fit2, type = "compare", main = "Top 5 causes comparison")
# basic line plot for specific sub-population
plot(fit2, which.sub = "Women", main = "Top 5 causes for women")
# customized plot with only specified causes
# the cause names need not be exact as InterVA cause list
# substrings in InterVA cause list is enough for specification
# e.g. the following two specifications are the same
some_causes_1 <- c("HIV/AIDS related death", "Pulmonary tuberculosis")
some_causes_2 <- c("HIV", "Pulmonary")
plot(fit2, type = "compare", horiz = FALSE, causelist = some_causes_1,
title = "HIV and TB fractions in two sub-populations",
angle = 20)
}
}
\references{
Tyler H. McCormick, Zehang R. Li, Clara Calvert, Amelia C. Crampin,
Kathleen Kahn and Samuel J. Clark Probabilistic cause-of-death assignment
using verbal autopsies, \emph{Journal of the American Statistical
Association} (2016), 111(515):1036-1049.
}
\seealso{
\code{\link{insilico}}, \code{\link{summary.insilico}}
}
\author{
Zehang Li, Tyler McCormick, Sam Clark
Maintainer: Zehang Li <lizehang@uw.edu>
}
\keyword{InSilicoVA}
|
1e63a5c24ced94718214506d9a2a535695691507
|
c78f343872185d5a45d1e1bad9777dc998087c4b
|
/R/code_package.r
|
8681d1fa4671bc08f5dd9dbf4ced83a2e78e4669
|
[] |
no_license
|
freakonometrics/TopIncome
|
bf1f95ebae036420814dadb6540bb2f9df04c9c3
|
f9ae8d402f80e34fb19a65ecb5c7f9f343fff9ad
|
refs/heads/master
| 2021-10-13T12:55:37.592908
| 2021-10-12T15:09:31
| 2021-10-12T15:09:31
| 188,045,950
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 42,665
|
r
|
code_package.r
|
#' import HMisc
#' importFrom("graphics", "abline", "legend", "lines", "par", "plot")
#' importFrom("stats", "integrate", "optim", "optimise", "runif", "var")
#' importFrom("utils", "install.packages")
#' Density of the Pareto 1 distribution
#'
#' @param x a (positive) vector
#' @param mu a number (the lower bound)
#' @param alpha a number (the tail index)
#' @return the density of the Pareto 1 distribution at points \code{x}
#' @seealso [ppareto1()], [qpareto1()] and [rpareto1()]
#' @examples
#' dpareto1(2, 1, 1.5)
dpareto1 <- function (x, mu, alpha) { alpha*mu^alpha/x^(alpha+1) }
#' Cumulative distribution function of the Pareto 1 distribution
#'
#' @param x a (positive) vector
#' @param mu a number (the lower bound)
#' @param alpha a number (the tail index)
#' @return the c.d.f. of the Pareto 1 distribution at points \code{x}
#' @examples
#' ppareto1(2, 1, 1.5)
ppareto1 <- function (x, mu, alpha) { 1 - ( x/mu )^(-alpha) }
#' Quantile function of the Pareto 1 distribution
#'
#' @param p a vector of probabilities (with values in [0,1])
#' @param mu a number (the lower bound)
#' @param alpha a number (the tail index)
#' @return the quantile function of the Pareto 1 distribution at points \code{p}
#' @examples
#' qpareto1(.5, 1, 1.5)
qpareto1 <- function (p, mu, alpha) { mu*(1-p)^(-1/alpha) }
#' Random generation of the Pareto 1 distribution
#'
#' @param n an integer
#' @param mu a number (the lower bound)
#' @param alpha a number (the tail index)
#' @return generates \code{n} values of the Pareto 1 distribution
#' @examples
#' set.seed(123)
#' rpareto1(6, 1, 1.5)
rpareto1 <- function (n, mu, alpha) { mu*(1-runif(n))^(-1/alpha) }
#' Maximum Likelihood estimation of the Pareto 1 distribution, with weights
#'
#' @param data a vector of observations
#' @param weights a vector of weights (default = 1)
#' @param threhold the threshold parameter of the Pareto 1 distribution (\code{mu})
#' @return a list with the index \code{alpha} and \code{k}, the number of observations above \code{threshold}
#' @examples
#' set.seed(123)
#' x <- rpareto1(100, 1, 1.5)
#' w <- rgamma(100,10,10)
#' estim <- MLE.pareto1(data=x, weights=w, threshold=1)
#' estim
MLE.pareto1 <- function(data, weights=rep(1,length(x)), threshold=min(x))
{
foo=cbind(data,weights)
foo=foo[foo[,1]>threshold,]
xx=foo[,1]
ww=foo[,2]/sum(foo[,2])
m <- as.numeric(threshold)
a <- 1/(sum(ww*log(xx))-log(m))
k <- NROW(xx)
return(list(alpha=a,k=k))
}
#' Density of the Generalized Pareto distribution (GPD)
#'
#' @param x a (positive) vector
#' @param xi a number (the tail index)
#' @param mu a number (the lower bound)
#' @param beta a number (the scaling paramater, default = 1)
#' @return the c.d.f. of the Generalized Pareto distribution at points \code{x}
#' @examples
#' pgpd(2, 1/1.5, 1, 1)
pgpd <- function (x, xi, mu = 0, beta = 1) { (1 - (1+(xi*(x-mu))/beta)^(-1/xi)) }
#' Density of the Generalized Pareto distribution (GPD)
#'
#' @param x a (positive) vector
#' @param xi a number (the tail index)
#' @param mu a number (the lower bound)
#' @param beta a number (the scaling paramater, default = 1)
#' @return the density of the Pareto 1 distribution at points \code{x}
#' @examples
#' dgpd(2, 1/1.5, 1, 1)
dgpd <- function (x, xi, mu = 0, beta = 1) { (beta^(-1))*(1+(xi*(x-mu))/beta)^((-1/xi)-1) }
#' Random generation of the Generalized Pareto distribution (GPD)
#'
#' @param n an integer
#' @param xi a number (the tail index)
#' @param mu a number (the lower bound)
#' @param beta a number (the scaling paramater, default = 1)
#' @return generates \code{n} values of the Pareto 1 distribution at points \code{x}
#' @examples
#' rgpd(10, 1/1.5, 1, 1)
rgpd <- function (n, xi, mu = 0, beta = 1) { mu + (beta/xi)*((1-runif(n))^(-xi)-1) }
#' Maximum Likelihood estimation of the Generalized Pareto distribution, with weights
#'
#' @param data a vector of observations
#' @param weights a vector of weights (default = 1)
#' @param threhold the threshold parameter of the Generalized Pareto distribution (\code{mu})
#' @param nextrmes the number of largest values considered (integer)
#' @param method method used for inference (\code{"ml"} for maximum likelihood)
#' @param information (not used)
#' @return a list with \code{n} the (total) number of observations, \code{threshold} the threshold, \code{p.less.thresh}, \code{n.exceed}, \code{k}, \code{method}, \code{converged}, \code{nllh.final} and \code{par.ests} a named vector with \code{"xi"} the tail index and \code{"beta"} the scaling coefficient
#' @examples
#' set.seed(123)
#' x <- rpareto1(100, 1, 1.5)
#' w <- rgamma(100,10,10)
#' estim <- MLE.gpd(data=x, weights=w, threshold=1)
#' estim$par.ests
MLE.gpd <- function (data, weights=rep(1,length(x)), threshold = NA, nextremes = NA, method="ml", information = c("observed", "expected"), ...)
{
n <- length(data)
if (is.na(nextremes) && is.na(threshold))
stop("Enter either a threshold or the number of upper extremes")
if (!is.na(nextremes) && !is.na(threshold))
stop("Enter EITHER a threshold or the number of upper extremes")
if (!is.na(nextremes))
data <- as.numeric(data)
foo=cbind(data,weights)
foo=foo[foo[,1]>threshold,]
x=foo[,1]
w=foo[,2]/sum(foo[,2])
exceedances <- x
excess <- exceedances - threshold
Nu <- length(excess)
xbar <- sum(w*excess)
method <- "ml"
s2 <- sum(w*(excess-xbar)^2)
xi0 <- -0.5 * (((xbar * xbar)/s2) - 1)
beta0 <- 0.5 * xbar * (((xbar * xbar)/s2) + 1)
theta <- c(xi0, beta0)
negloglik <- function(theta, tmp) {
xi <- theta[1]
beta <- theta[2]
cond1 <- beta <= 0
cond2 <- (xi <= 0) && (max(tmp) > (-beta/xi))
if (cond1 || cond2)
f <- 1e+06
else {
y <- logb(1 + (xi * tmp)/beta)
y <- w*y/xi
f <- logb(beta) + (1 + xi) * sum(y)
}
f
}
fit <- optim(theta, negloglik, hessian = TRUE, ..., tmp = excess)
if (fit$convergence)
warning("optimization may not have succeeded")
par.ests <- fit$par
converged <- fit$convergence
nllh.final <- fit$value
p.less.thresh <- 1 - Nu/n
out <- list(n = length(data), threshold = threshold,
p.less.thresh = p.less.thresh, n.exceed = Nu, k= Nu, method = method,
par.ests = par.ests, converged = converged, nllh.final = nllh.final)
names(out$par.ests) <- c("xi", "beta")
return(out)
}
.EPDinput <- function(y, gamma, kappa, tau, kappaTau = TRUE) {
#
# Modified EPD function from the ReIns R package to have:
# - weighted ML estimation
# - results from only one cutoff
# - direct ML estimation by default
# original R code : ReIns package version 1.0.7
# https://github.com/TReynkens/ReIns/blob/master/R/EPD.R
# https://github.com/TReynkens/ReIns/blob/master/R/Distributions.R
#
if (!is.numeric(gamma)) {
stop("gamma should be numeric.")
}
if (!is.numeric(kappa)) {
stop("kappa should be numeric.")
}
if (!is.numeric(tau)) {
stop("tau should be numeric.")
}
if (any(tau >= 0)) {
stop("tau should be strictly negative.")
}
if (any(gamma <= 0)) {
stop("gamma should be strictly positive.")
}
if (kappaTau) {
if (any(kappa <= pmax(-1, 1/tau))) {
stop("kappa should be larger than max(-1,1/tau).")
}
}
ly <- length(y)
lg <- length(gamma)
lk <- length(kappa)
lt <- length(tau)
l <- c(ly, lg, lk, lt)
ind <- which(l > 1)
if (length(ind) > 1) {
if (!length(unique(l[ind])) == 1) {
stop("All input arguments should have length 1 or equal length.")
}
}
}
#' Density of the Extended Pareto distribution
#'
#' @param x a (positive) vector
#' @param gamma a (strictly positive) number (the tail index)
#' @param kappa a number - must be larger than max{-1,1/tau}
#' @param tau a (negative) number (default is -1)
#' @param log logical indicating if logarithm of density should be returned
#' @return the density of the Extended Pareto distribution at points \code{x}
#' @source \url{https://github.com/TReynkens/ReIns/blob/master/R/EPD.R} Tom Reynkens, ReIns package version 1.0.7
#' @examples
#' depd(2,.5,1,-1)
depd <- function(x, gamma, kappa, tau = -1, log = FALSE) {
#
# Modified EPD function from the ReIns R package to have:
# - weighted ML estimation
# - results from only one cutoff
# - direct ML estimation by default
# original R code : ReIns package version 1.0.7
# https://github.com/TReynkens/ReIns/blob/master/R/EPD.R
# https://github.com/TReynkens/ReIns/blob/master/R/Distributions.R
#
.EPDinput(x, gamma, kappa, tau, kappaTau = TRUE)
d <- 1 / (gamma*x^(1/gamma+1)) * (1+kappa*(1-x^tau))^(-1/gamma-1) *
(1+kappa*(1-(1+tau)*x^tau))
d[x <= 1] <- 0
if (log) d <- log(d)
return(d)
}
#' Cumulative Distribution Function of the Extended Pareto distribution
#'
#' @param x a (positive) vector
#' @param gamma a (strictly positive) number (the tail index)
#' @param kappa a number - must be larger than max{-1,1/tau}
#' @param tau a (negative) number (default is -1)
#' @param log logical indicating if logarithm of density should be returned
#' @return the c.d.f. of the Extended Pareto distribution at points \code{x}
#' @source \url{https://github.com/TReynkens/ReIns/blob/master/R/Distributions.R} Tom Reynkens, ReIns package version 1.0.7
#' @examples
#' pepd(2,.5,1,-1)
pepd <- function(x, gamma, kappa, tau = -1, lower.tail = TRUE, log.p = FALSE) {
#
# Modified EPD function from the ReIns R package to have:
# - weighted ML estimation
# - results from only one cutoff
# - direct ML estimation by default
# original R code : ReIns package version 1.0.7
# https://github.com/TReynkens/ReIns/blob/master/R/EPD.R
# https://github.com/TReynkens/ReIns/blob/master/R/Distributions.R
#
.EPDinput(x, gamma, kappa, tau, kappaTau = FALSE)
p <- 1 - (x * (1+kappa*(1-x^tau)))^(-1/gamma)
p[x <= 1] <- 0
if (any(kappa <= pmax(-1, 1/tau))) {
if (length(kappa) > 1 | length(tau) > 1) {
p[kappa <= pmax(-1, 1/tau)] <- NA
} else {
p <- NA
}
}
if (!lower.tail) p <- 1-p
if (log.p) p <- log(p)
return(p)
}
#' Quantile Function of the Extended Pareto distribution
#'
#' @param p a vector of probabilities (in the interval [0,1])
#' @param gamma a (strictly positive) number (the tail index)
#' @param kappa a number - must be larger than max{-1,1/tau}
#' @param tau a (negative) number (default is -1)
#' @param log logical indicating if logarithm of density should be returned
#' @return the c.d.f. of the Extended Pareto distribution at points \code{x}
#' @source \url{https://github.com/TReynkens/ReIns/blob/master/R/Distributions.R} Tom Reynkens, ReIns package version 1.0.7
#' @examples
#' qepd(.5,.5,1,-1)
qepd <- function(p, gamma, kappa, tau = -1, lower.tail = TRUE, log.p = FALSE) {
#
# Modified EPD function from the ReIns R package to have:
# - weighted ML estimation
# - results from only one cutoff
# - direct ML estimation by default
# original R code : ReIns package version 1.0.7
# https://github.com/TReynkens/ReIns/blob/master/R/EPD.R
# https://github.com/TReynkens/ReIns/blob/master/R/Distributions.R
#
.EPDinput(p, gamma, kappa, tau, kappaTau = TRUE)
if (log.p) p <- exp(p)
if (!lower.tail) p <- 1-p
if (any(p < 0 | p > 1)) {
stop("p should be between 0 and 1.")
}
l <- length(p)
Q <- numeric(l)
endpoint <- 10
if (any(p < 1)) {
mx <- max(p[p < 1])
while (pepd(endpoint, gamma, kappa, tau) <= mx) {
endpoint <- endpoint*10
}
}
for (i in 1:l) {
if (p[i] < .Machine$double.eps) {
# p=0 case
Q[i] <- 1
} else if (abs(p[i]-1) > .Machine$double.eps) {
# 0<p<1 case
# Function to minimise
f <- function(x) {
((1-p[i])^(-gamma) - x*(1+kappa*(1-x^tau)))^2
}
# If minimising fails return NA
Q[i] <- tryCatch(optimise(f, lower=1, upper=endpoint)$minimum, error=function(e) NA)
} else {
# p=1 case
Q[i] <- Inf
}
}
return(Q)
}
#' Random Generation of the Extended Pareto distribution
#'
#' @param n integer, number of generations
#' @param gamma a (strictly positive) number (the tail index)
#' @param kappa a number - must be larger than max{-1,1/tau}
#' @param tau a (negative) number (default is -1)
#' @param log logical indicating if logarithm of density should be returned
#' @return a vector of \code{n} values generated from an Extended Pareto distribution
#' @source \url{https://github.com/TReynkens/ReIns/blob/master/R/Distributions.R} Tom Reynkens, ReIns package version 1.0.7
#' @examples
#' set.seed(123)
#' repd(6, gamma=.5,kappa=1,tau=-1)
repd <- function(n, gamma, kappa, tau = -1) {
return(qepd(runif(n), gamma=gamma, kappa=kappa, tau=tau))
}
#' Hill estimator of the tail index, with weights
#'
#' @param data the vector of observations
#' @param weights the vector of weights
#' @return Hill estimator of \code{gamma} (inverse of \code{alpha})
#' @examples
#' set.seed(123)
#' x <- rpareto1(100, 1, 1.5)
#' w <- rgamma(100,10,10)
#' Hill(x,w)
Hill = function(data,weights=rep(1,length(data))){
w <- weights/sum(weights)
n <- length(data)
X <- as.numeric(sort(data))
Hill <- sum(w[2:n]*(log(X[2:n])-log(X[1])))
return(list(gamma = Hill))
}
#' Fit the Extended Pareto distribution to a vector of observations, with weights
#'
#' @param data vector of observations
#' @param weights vector of (positive) weights
#' @param rho parameter of Fraga Alves et al. (2003) estimate
#' @param start vector of length 2 containing the starting values for the optimisation
#' @param direct logical indicating if the parameters are obtained by directly maximising the log-likelihood function
#' @param warnings logical indicating if possible warnings from the optimisation function are shown
#' @return a list with \code{k} the vector of the values of the tail parameter, \code{gamma} the vector of the corresponding estimates for the tail parameter of the EPD, \code{kappa} the vector of the corresponding MLE estimates for the kappa parameter of the EPD and \code{tau} the vector of the corresponding estimates for the second order tail index parameter of the EPD using Hill estimates and values for \code{rho}
#' @source adapted from \url{https://github.com/TReynkens/ReIns/blob/master/R/EPD.R} Tom Reynkens, ReIns package version 1.0.7
#' @examples
#' set.seed(123)
#' x <- rpareto1(100,1,.5)
#' w <- rgamma(100,10,10)
#' EPD(data=x, weights=w)
EPD <- function(data, weights=rep(1,length(data)), rho = -1, start = NULL, direct = TRUE, warnings = FALSE, ...) {
#
# Modified EPD function from the ReIns R package to have:
# - weighted ML estimation
# - results from only one cutoff
# - direct ML estimation by default
# original R code : ReIns package version 1.0.7
# https://github.com/TReynkens/ReIns/blob/master/R/EPD.R
# https://github.com/TReynkens/ReIns/blob/master/R/Distributions.R
#
df=data.frame(data,weights)
df=df[order(df$data),]
w <- df$weights/sum(df$weights)
X <- as.numeric(df$data)
n <- length(X)
K <- (n-1)
if (n == 1) {
stop("We need at least two data points.")
}
if (direct) {
EPD <- .EPDdirectMLE(data=data, weights=w, rho=rho, start=start, warnings=warnings)
} else {
# Select parameter using approach of Beirlant, Joosens and Segers (2009).
EPD <- .EPDredMLE(data=data, weights=w, rho=rho)
}
if (length(rho) == 1) {
EPD$gamma <- as.vector(EPD$gamma)
EPD$kappa <- as.vector(EPD$kappa)
EPD$tau <- as.vector(EPD$tau)
}
if (length(rho) == 1) {
return(list(k=K, gamma=EPD$gamma[K], kappa=EPD$kappa[K], tau=EPD$tau[K]))
} else {
return(list(k=K, gamma=EPD$gamma[K,], kappa=EPD$kappa[K,], tau=EPD$tau[K,]))
}
}
.EPDredMLE <- function(data, weights=rep(1,length(data)), rho = -1) {
#
# Modified EPD function from the ReIns R package to have:
# - weighted ML estimation
# - results from only one cutoff
# - direct ML estimation by default
# original R code : ReIns package version 1.0.7
# https://github.com/TReynkens/ReIns/blob/master/R/EPD.R
# https://github.com/TReynkens/ReIns/blob/master/R/Distributions.R
#
# Fit EPD using approach of Beirlant, Joosens and Segers (2009).
df=data.frame(data,weights)
df=df[order(df$data),]
w <- df$weights/sum(df$weights)
X <- as.numeric(df$data)
n <- length(X)
K <- (n-1)
if (n == 1) {
stop("We need at least two data points.")
}
nrho <- length(rho)
rho.orig <- rho
H <- Hill(data, w)$gamma
if (all(rho > 0) & nrho == 1) {
rho <- .rhoEst(data, alpha=1, tau=rho, weights=w)$rho
beta <- -rho
} else if (all(rho < 0)) {
beta <- -rho
} else {
stop("rho should be a single positive number or a vector (of length >=1) of negative numbers.")
}
gamma <- matrix(0, n-1, nrho)
kappa <- matrix(0, n-1, nrho)
tau <- matrix(0, n-1, nrho)
beta <- -rho
for (j in 1:nrho) {
if (nrho == 1 & all(rho.orig > 0)) {
tau[K, 1] <- -beta[K]/H
rhovec <- rho
} else {
tau[K, j] <- -beta[j]/H
rhovec <- rep(rho[j], n-1)
}
E <- numeric(n-1)
for (k in K) {
i <- 1:k
E[k] <- sum( w[n-k+i] * (X[n-k+i]/X[n-k])^tau[k,j] )
}
kappa[K,j] <- H * (1-2*rhovec[K]) * (1-rhovec[K])^3 / rhovec[K]^4 * (E[K] - 1 / (1-rhovec[K]))
gamma[K,j] <- H - kappa[K,j] * rhovec[K] / (1 - rhovec[K])
}
return(list(gamma=gamma, kappa=kappa, tau=tau))
}
.EPDdirectMLE <- function(data, weights=rep(1,length(data)), rho = -1, start = NULL, warnings = FALSE) {
#
# Modified EPD function from the ReIns R package to have:
# - weighted ML estimation
# - results from only one cutoff
# - direct ML estimation by default
# original R code : ReIns package version 1.0.7
# https://github.com/TReynkens/ReIns/blob/master/R/EPD.R
# https://github.com/TReynkens/ReIns/blob/master/R/Distributions.R
#
df=data.frame(data,weights)
df=df[order(df$data),]
w <- df$weights/sum(df$weights)
X <- as.numeric(df$data)
n <- length(X)
if (n == 1) {
stop("We need at least two data points.")
}
nrho <- length(rho)
rho.orig <- rho
H <- Hill(data, w)$gamma
if (all(rho > 0) & nrho == 1) {
rho <- .rhoEst(data, alpha=1, tau=rho, weights=w)$rho
beta <- -rho
} else if (all(rho < 0)) {
beta <- -rho
} else {
stop("rho should be a single positive number or a vector (of length >=1) of negative numbers.")
}
gamma <- matrix(0, n-1, nrho)
kappa <- matrix(0, n-1, nrho)
tau <- matrix(0, n-1, nrho)
for (j in 1:nrho) {
for (k in (n-1):(n-1)) {
epddf <- df[df$data > X[n-k],]
epddata <- epddf$data/X[n-k]
epdw <- epddf$w
if (nrho == 1 & all(rho.orig > 0)) {
tau[k,1] <- -beta[k]/H
} else {
tau[k,j] <- -beta[j]/H
}
if (is.null(start)) {
start2 <- numeric(2)
start2[1] <- H
start2[2] <- 0
} else if (is.matrix(start)) {
if (nrow(start >= n-1)) {
start2 <- numeric(2)
start2[1] <- start[k,1]
start2[2] <- start[k,2]
} else {
stop("start does not contain enough rows.")
}
} else {
start2 <- start
}
if (tau[k,j] < 0) {
tmp <- EPDfit(epddata, start=start2, tau=tau[k,j], weights=epdw)
gamma[k,j] <- tmp[1]
kappa[k,j] <- tmp[2]
} else {
gamma[k,j] <- kappa[k,j] <- NA
}
}
}
return(list(gamma=gamma, kappa=kappa, tau=tau))
}
#' Fit the Extended Pareto distribution to a vector of observations, with weights, using maximum likelihood estimation
#'
#' @param data vector of observations
#' @param tau the value for tau in the EPD distribution
#' @param weight vector of (positive) weights
#' @param rho parameter of Fraga Alves et al. (2003) estimate
#' @param start vector of length 2 containing the starting values for the optimisation (default are \code{c(.1,1})
#' @param warnings logical indicating if possible warnings from the optimisation function are shown
#' @return a vector with \code{gamma} the vector of the corresponding estimates for the tail parameter of the EPD, \code{kappa} the vector of the corresponding MLE estimates for the kappa parameter of the EPD
#' @source adapted from \url{https://github.com/TReynkens/ReIns/blob/master/R/EPD.R} Tom Reynkens, ReIns package version 1.0.7
#' @examples
#' set.seed(123)
#' x <- rpareto1(100, mu=1, alpha=.5)
#' w <- rgamma(100,10,10)
#' EPDfit(data=x, tau=-3.3, weights=w)
EPDfit <- function(data, tau, start = c(0.1, 1), warnings = FALSE, weights=rep(1,length(data))) {
#
# Modified EPD function from the ReIns R package to have:
# - weighted ML estimation
# - results from only one cutoff
# - direct ML estimation by default
# original R code : ReIns package version 1.0.7
# https://github.com/TReynkens/ReIns/blob/master/R/EPD.R
# https://github.com/TReynkens/ReIns/blob/master/R/Distributions.R
#
w <- weights/sum(weights)
if (is.numeric(start) & length(start) == 2) {
gamma_start <- start[1]
kappa_start <- start[2]
} else {
stop("start should be a 2-dimensional numeric vector.")
}
if (ifelse(length(data) > 1, var(data) == 0, 0)) {
sg <- c(NA, NA)
} else {
fit <- optim(par=c(gamma_start, kappa_start), fn=.EPDneglogL, Y=data, tau=tau, weights=w)
sg <- fit$par
if (fit$convergence > 0 & warnings) {
warning("Optimisation did not complete succesfully.")
if (!is.null(fit$message)) {
print(fit$message)
}
}
}
return(sg)
}
.EPDneglogL <- function(theta, Y, tau, weights) {
#
# Modified EPD function from the ReIns R package to have:
# - weighted ML estimation
# - results from only one cutoff
# - direct ML estimation by default
# original R code : ReIns package version 1.0.7
# https://github.com/TReynkens/ReIns/blob/master/R/EPD.R
# https://github.com/TReynkens/ReIns/blob/master/R/Distributions.R
#
w <- weights/sum(weights)
gamma <- theta[1]
kappa <- theta[2]
if (kappa <= max(-1, 1/tau) | gamma <= 0) {
logL <- -10^6
} else {
logL <- sum( w*log(depd(Y, gamma=gamma, kappa=kappa, tau=tau)) )
}
return(-logL)
}
.rhoEst <- function(data, alpha = 1, theta1 = 2, theta2 = 3, tau = 1, weights=rep(1,length(data))) {
#
# Modified EPD function from the ReIns R package to have:
# - weighted ML estimation
# - results from only one cutoff
# - direct ML estimation by default
# original R code : ReIns package version 1.0.7
# https://github.com/TReynkens/ReIns/blob/master/R/EPD.R
# https://github.com/TReynkens/ReIns/blob/master/R/Distributions.R
#
if (alpha <= 0) {
stop("alpha should be strictly positive.")
}
if (tau <= 0) {
stop("tau should be strictly positive.")
}
df=data.frame(data,weights)
df=df[order(df$data),]
w <- df$weights/sum(df$weights)
X <- as.numeric(df$data)
n <- length(X)
rho <- numeric(n)
Tn <- numeric(n)
K <- (n-1)
M_alpha <- numeric(n)
M_alpha_theta1 <- numeric(n)
M_alpha_theta2 <- numeric(n)
l <- log(X[n-K+1])
for (k in K) {
M_alpha[k] <- sum( (l[1:k]-log(X[n-k]))^alpha ) / k
M_alpha_theta1[k] <- sum( (l[1:k]-log(X[n-k]))^(alpha*theta1) ) / k
M_alpha_theta2[k] <- sum( (l[1:k]-log(X[n-k]))^(alpha*theta2) ) / k
}
Tn[K] <- ( (M_alpha[K]/gamma(alpha+1))^tau - (M_alpha_theta1[K]/gamma(alpha*theta1+1))^(tau/theta1) ) /
( (M_alpha_theta1[K]/gamma(alpha*theta1+1))^(tau/theta1) - (M_alpha_theta2[K]/gamma(alpha*theta2+1))^(tau/theta2) )
rho[K] <- 1 - ( 2 * Tn[K] / ( 3 - Tn[K]) ) ^ (1/alpha)
return(list(k=K, rho=rho[K], Tn=Tn[K]))
}
ProbEPD <- function(data, q, gamma, kappa, tau, ...) {
#
# Modified EPD function from the ReIns R package to have:
# - weighted ML estimation
# - results from only one cutoff
# - direct ML estimation by default
# original R code : ReIns package version 1.0.7
# https://github.com/TReynkens/ReIns/blob/master/R/EPD.R
# https://github.com/TReynkens/ReIns/blob/master/R/Distributions.R
#
if ( length(gamma) != length(kappa) | length(gamma) != length(tau)) {
stop("gamma, kappa and tau should have equal length.")
}
X <- as.numeric(sort(data))
n <- length(X)
prob <- numeric(n)
K <- (n-1)
K2 <- K[which(gamma[K] > 0)]
prob[K2] <- (K2+1)/(n+1) * (1 - pepd(q/X[n-K2], gamma=gamma[K2], kappa=kappa[K2], tau=tau[K2]))
prob[prob < 0 | prob > 1] <- NA
return(list(k=K, P=prob[K], q=q))
}
#' Large Return Period associated to the Extended Pareto distribution
#'
#' @param data a vector of observations
#' @param q the used large quantile - to estimate 1/P[X>q]
#' @param gamma vector of \code{n-1} estimates for the EVD obtained from [EPD]
#' @param kappa vector of \code{n-1} estimates for the EVD obtained from [EPD]
#' @param tau vector of \code{n-1} estimates for the EVD obtained from [EPD]
#' @return a list with \code{k} the vector of the values of the tail parameter k, \code{R} the vector of the corresponding return period and \code{q} the used large quantile
#' @source \url{https://github.com/TReynkens/ReIns/blob/master/R/Distributions.R} Tom Reynkens, ReIns package version 1.0.7
#' @examples
#' set.seed(123)
#' x <- rpareto1(100, mu=1, alpha=1.5)
#' fit <- EPD(x)
#' ReturnEPD(data=x, q=.01, gamma=fit$gamma, kappa=fit$kappa, tau=fit$tau)
ReturnEPD <- function(data, q, gamma, kappa, tau, ...) {
#
# Modified EPD function from the ReIns R package to have:
# - weighted ML estimation
# - results from only one cutoff
# - direct ML estimation by default
# original R code : ReIns package version 1.0.7
# https://github.com/TReynkens/ReIns/blob/master/R/EPD.R
# https://github.com/TReynkens/ReIns/blob/master/R/Distributions.R
#
if ( length(gamma) != length(kappa) | length(gamma) != length(tau)) {
stop("gamma, kappa and tau should have equal length.")
}
X <- as.numeric(sort(data))
n <- length(X)
r <- numeric(n)
K <- (n-1)
K2 <- K[which(gamma[K] > 0)]
r[K2] <- (n+1)/(K2+1) / (1 - pepd(q/X[n-K2], gamma=gamma[K2], kappa=kappa[K2], tau=tau[K2]))
r[which(gamma[K] <= 0)] <- NA
r[r <= 0] <- NA
return(list(k=K, R=r[K], q=q))
}
#' Estimate Top Share
#'
#' @param data a vector of observations
#' @param weight a vector of weights
#' @param p (default \code{0.1})
#' @param q (default \code{0.1})
#' @param method (default \code{"edf"}, but can be \code{"pareto1"} or \code{"gpd"})
#' @param edp.direct logical (default \code{TRUE})
#' @return estimation of the share of income/wealth owned by the top p percent of the population, assuming that the top q percent of the distribution is Pareto distributed
#' @examples
#' url_1 <- "https://github.com/freakonometrics/TopIncome/raw/master/data_csv/dataframe_yw_1.csv"
#' df <- read.table(url_1,sep=";",header=TRUE)
#' data_1 <- tidy_income(income = df$y, weights = df$w)
#' TopShare(data_1)
TopShare <- function(data, p=.1, q=.1, method="edf", epd.direct=TRUE) {
if (!require("Hmisc")) install.packages("Hmisc")
require(Hmisc)
#
# p : top 100p% share
# q : top 100q% of the distribution being Pareto
#
# method="edf" : sample top share, that is, based on the EDF
# method="pareto1" : EDF+Pareto1
#
x = data$y
weights = data$weights
if (p>1) stop('Error: p should be smaller than 1 \n\n')
if (p<0) stop('Error: p should be greater than 0 \n\n')
up=Hmisc::wtd.quantile(x, weights=weights, probs=1-p, normwt=TRUE) # weighted (1-p)-quantile
up=as.numeric(up)
## Top Share based on the Empirical Distribution Function (EDF)
if(method=="edf") {
tis=sum(weights*x*(x>up))/sum(weights*x)
return(list(index=tis,share=p,method="edf"))
}
## Top Share based on Pareto I and GPD Models
if (q>1) stop('Error: q should be smaller than 1 \n\n')
if (q<0) stop('Error: q should be greater than 0 \n\n')
u=Hmisc::wtd.quantile(x, weights=weights, probs=1-q, normwt=TRUE)
u=as.numeric(u) # threshold = weighted (1-q)-quantile
datax=cbind(x,weights)
dataq=datax[x<u,]
xq = Hmisc::wtd.mean(dataq[,1], weights=dataq[,2])
if(method=="pareto1" || method=="pareto2" || method=="gpd") {
# Estimate the Pareto distribution with weighted data
if(method=="pareto1") {
coef=MLE.pareto1(x,weights=weights,threshold=u)
sigma=u
alpha=coef$alpha
}
if(method=="pareto2" || method=="gpd") {
coef=MLE.gpd(x,weights=weights,threshold=u)
sigma=coef$par.ests["beta"]/coef$par.ests["xi"]
alpha=1/coef$par.ests["xi"]
}
# Top Income Shares with weighted data
if (alpha<1) { tis=NaN
} else if(p<=q) {
num = (alpha/(alpha-1))*sigma*(p/q)^(-1/alpha) + u - sigma
den = (1-q)*xq + q*sigma/(alpha-1) + q*u
tis = p*num/den
} else if(p>q) {
up=Hmisc::wtd.quantile(x, weights=weights, probs=1-p, normwt=TRUE)
up=as.numeric(up)
datap=data[x<up,]
xp = Hmisc::wtd.mean(datap[,1], weights=datap[,2])
den = (1-q)*xq + q*sigma/(alpha-1) + q*u
tis = 1 - (1-p)*xp/den
}
return(list(index=tis,alpha=alpha,coef=coef,share.index=p,share.pareto=q,threshold=u))
}
## Top Share based on EPD Model
if(method=="epd") {
dataqq=data[data>=u,]
coef=EPD(dataqq[,1], weights=dataqq[,2], direct=epd.direct)
delta=coef$kappa
tau=coef$tau
alpha=1/coef$gamma
up=Hmisc::wtd.quantile(x, weights=weights, probs=1-p, normwt=TRUE)
up=as.numeric(up)
datap=data[x<up,]
xp = Hmisc::wtd.mean(datap[,1], weights=datap[,2])
pextpareto=function(x, u=1, delta=0, tau=-1, alpha){#Compute CDF
d=1-((x/u)*(1+delta-delta*(x/u)^tau))^(-alpha)
d[x<=u] <- 0
return(d) }
ff_bis <- function(x) (1-pextpareto(1/x, u=u, delta=delta, tau=tau, alpha=alpha))/x^2
if (alpha<=1) {tis=NaN # infinite mean (tail=alpha=1/xi < 1)
} else if(delta<max(-1,1/tau)) {tis=NaN # kappa should be largen than max(-1,1/tau)
} else if(p<=q) {
uprim=u*qepd(1-p/q, gamma=coef$gamma, kappa=coef$kappa, tau=coef$tau)
Eup = try( integrate(ff_bis, lower=0, upper=1/uprim)$value , TRUE)
Eu = try( integrate(ff_bis, lower=0, upper=1/u)$value , TRUE)
if (inherits(Eup, "try-error") && inherits(Eup, "try-error")) tis=NaN
else tis=(p*uprim+q*Eup)/((1-q)*xq+q*(u+Eu))
} else if(p>q) {
Eu = try( integrate(ff_bis, lower=0, upper=1/u)$value , TRUE)
Ex = ((1-q)*xq+q*(u+Eu))
if (inherits(Eu, "try-error")) tis=NaN
else tis = 1-(1-p)*xp/Ex
}
return(list(index=tis,alpha=1/coef$gamma,coef=coef,share.index=p,share.pareto=q,threshold=u))
}
}
#' Convert income/wealth Data
#'
#' @param income a vector of data (income or wealth)
#' @param weights a vector of weight (same length as \code{income}
#' @return a dataframe with 4 columns, \code{y} the vector income (or wealth), \code{weights} the vector of weights, \code{Fw} the cumulated proportion of people (with weights) and \code{Fx} the cumulated proportion of people (without weights)
#' @examples
#' url_1 <- "https://github.com/freakonometrics/TopIncome/raw/master/data_csv/dataframe_yw_1.csv"
#' df <- read.table(url_1,sep=";",header=TRUE)
#' data_1 <- tidy_income(income = df$y, weights = df$w)
#' str(data_1)
tidy_income <- function(income, weights){
df=data.frame(w=weights, y=income)
df$w=df$w/sum(df$w)
df=df[order(df$y),]
Fw=cumsum(df$w)/(sum(df$w)+df$w[1])
n=length(df$y)
Fx=(1:n)/(n+1)
data = data.frame(y=df$y, weights=df$w, Fw=Fw, Fx=Fx)
return(data)
}
#' Pareto diagrams - Pareto 1, GPD and EPD
#'
#' @param data dataframe obtained from \code{tidy_income} function
#' @param p numeric, the probability level (default 0.01)
#' @param q numeric, the probability level to model a Pareto distribution (default 0.1)
#' @param viz logical \code{TRUE} to plot the estimates
#' @return a table with estimations of top share and a graph
#' @examples
#' url_1 <- "https://github.com/freakonometrics/TopIncome/raw/master/data_csv/dataframe_yw_1.csv"
#' df <- read.table(url_1,sep=";",header=TRUE)
#' data_1 <- tidy_income(income = df$y, weights = df$w)
#' \dontrun{Pareto_diagram(data_1)}
Pareto_diagram = function(data, p=.01, q=.1, viz=TRUE){
res1=TopShare(data, p=p, q=q, method="pareto1")
res2=TopShare(data, p=p, q=q, method="gpd")
res3=TopShare(data, p=p, q=q, method="epd", epd.direct= TRUE)
pot=data[data$y>0,] # Keep positive data
if(viz) par(mfrow=c(1,1), mar=c(4, 4, 4, 1)) # bottom, left, top, right
if(viz) plot(log(pot$y), log(1-pot$Fw), xlab="log(x)", ylab="log(1-F(x))", cex=.6, col="gray", xlim=PDxlim)
u=seq(log(res1$threshold), 30, length.out=500)
yhat.par1=ppareto1(exp(u),mu=res1$threshold,alpha=res1$coef$alpha)
yhat.par2=pgpd(exp(u),xi=res2$coef$par.ests["xi"],mu=res2$coef$threshold,beta=res2$coef$par.ests["beta"])
yhat.epd=pepd(exp(u)/res3$threshold,gamma=res3$coef$gamma,kappa=res3$coef$kappa,tau=res3$coef$tau)
if(viz){
lines(u,log(1-yhat.par1)+log(q), col="blue", lty=2, lwd=1.5)
lines(u,log(1-yhat.epd)+log(q), col="red", lty=1, lwd=1.5)
lines(u,log(1-yhat.par2)+log(q),col="green", lty=3, lwd=1.5)
legend("topright", legend=c("Pareto 1", "GPD", "EPD"), col=c("blue","green", "red"), lty=c(2,3,1))
}
res90=TopShare(data, p=p, q=.10, method="pareto1")
if(viz) abline(v=log(res90$threshold), col="lightgrey", lty=2) # percentile 90
if(viz) legend(log(res90$threshold)-top.x, top.y, legend=expression(italic('q')[90]), cex=.9, bty="n")
res95=TopShare(data, p=p, q=.05, method="pareto1")
if(viz) abline(v=log(res95$threshold), col="lightgrey", lty=2) # percentile 95
if(viz) legend(log(res95$threshold)-top.x, top.y, legend=expression(italic('q')[95]), cex=.9, bty="n")
res99=TopShare(data, p=p, q=.01, method="pareto1")
if(viz) abline(v=log(res99$threshold), col="lightgrey", lty=2) # percentile 99
legend(log(res99$threshold)-top.x, top.y, legend=expression(italic('q')[99]), cex=.9, bty="n")
}
#' Table of top shares (using three thresholds)
#'
#' @param data dataframe obtained from \code{tidy_income} function
#' @param p probability level (default 0.01)
#' @param q1 numeric, the probability level to model a Pareto distribution (default 0.1)
#' @param q2 numeric, the probability level to model a Pareto distribution (default 0.05)
#' @param q3 numeric, the probability level to model a Pareto distribution (default 0.01)
#' @examples
#' url_1 <- "https://github.com/freakonometrics/TopIncome/raw/master/data_csv/dataframe_yw_1.csv"
#' df <- read.table(url_1,sep=";",header=TRUE)
#' data_1 <- tidy_income(income = df$y, weights = df$w)
#' Table_Top_Share(data_1)$Mat_index
Table_Top_Share = function(data, p=.01, q1=.1 , q2=.05 , q3=.01, verbose=FALSE){
res90=TopShare(data, p=p, q=q1, method="pareto1")
res95=TopShare(data, p=p, q=q2, method="pareto1")
res99=TopShare(data, p=p, q=q3, method="pareto1")
pareto1.index=cbind(res90$index, res95$index, res99$index)
pareto1.alpha=cbind(res90$alpha, res95$alpha, res99$alpha)
res90=TopShare(data, p=p, q=q1, method="pareto2")
res95=TopShare(data, p=p, q=q2, method="pareto2")
res99=TopShare(data, p=p, q=q3, method="pareto2")
gpd.index=cbind(res90$index, res95$index, res99$index)
gpd.alpha=cbind(res90$alpha, res95$alpha, res99$alpha)
res90=TopShare(data, p=p, q=q1, method="epd")
res95=TopShare(data, p=p, q=q2, method="epd")
res99=TopShare(data, p=p, q=q3, method="epd")
epd.index=cbind(res90$index, res95$index, res99$index)
epd.alpha=cbind(res90$alpha, res95$alpha, res99$alpha)
cutoff=c(1-q1,1-q2,1-q3)
M1=rbind(cutoff,pareto1.index,gpd.index,epd.index)
colnames(M1)=c("index1","index2","index3")
M2=rbind(cutoff,pareto1.alpha,gpd.alpha,epd.alpha)
colnames(M2)=c("alpha1","alpha2","alpha3")
if(verbose){
cat("----- index ----------\n")
print(M1)
cat("----- alpha ----------\n")
print(M2)
cat("----- top share ------\n")
T=TopShare(data, p=p)
print(T)}
return(list(Mat_index=M1,Mat_alpha=M2,TopShare=T))}
#' Top Income plot
#'
#' @param data dataframe obtained from \code{tidy_income} function
#' @param p probability level (default 0.01)
#' @param thr numeric vector of probability levels to model a Pareto distribution (from 0.85 up to 0.999)
#' @param TSlim numeric 2-vector, range of y for the plot (default \code{NULL})
#' @param tail logical to plot the tail index (default \code{TRUE})
#' @return one or two graphs (depending on \code{tail==TRUE})
#' @examples
#' url_1 <- "https://github.com/freakonometrics/TopIncome/raw/master/data_csv/dataframe_yw_1.csv"
#' df <- read.table(url_1,sep=";",header=TRUE)
#' data_1 <- tidy_income(income = df$y, weights = df$w)
#' \dontrun{Top_Income(data_1)}
Top_Income = function(data, p=.01, thr=seq(.85,.999,by=.001), TSlim=NULL, tail = TRUE){
thr=round(thr,10)
tail=matrix(0,NROW(thr),7)
tis.index=matrix(0,NROW(thr),7)
tis.alpha=matrix(0,NROW(thr),7)
for(i in 1:NROW(thr)) {
res1=TopShare(data, p=p, q=1-thr[i], method="pareto1")
res2=TopShare(data, p=p, q=1-thr[i], method="gpd")
res3=TopShare(data, p=p, q=1-thr[i], method="epd", epd.direct=TRUE)
res4=TopShare(data, p=p, method="edf")
tis.index[i,1]=res1$threshold # threshold y0
tis.index[i,2]=res1$coef$k # k largest observations
tis.index[i,3]=thr[i] # quantile threshold
tis.index[i,4]=res1$index
tis.index[i,5]=res2$index
tis.index[i,6]=res3$index
tis.index[i,7]=res4$index
tis.alpha[i,1]=res2$threshold # threshold y0
tis.alpha[i,2]=res2$coef$k # k largest observations
tis.alpha[i,3]=thr[i] # quantile threshold
tis.alpha[i,4]=res1$alpha
tis.alpha[i,5]=res2$alpha
tis.alpha[i,6]=res3$alpha
tis.alpha[i,7]=0
}
if(tail){
plot(tis.alpha[,2],tis.alpha[,4], ylim=c(0,ysup), type="b", cex=.75, pch=3, main="MLE estimates of the tail index", xlab="k largest values", ylab="tail index (alpha)", col="blue")
lines(tis.alpha[,2],tis.alpha[,4], col="blue", type="l", cex=.75)
lines(tis.alpha[,2],tis.alpha[,5], col="green", type="p", cex=.75, pch=2)
lines(tis.alpha[,2],tis.alpha[,5], col="green", type="l", cex=.75)
lines(tis.alpha[,2],tis.alpha[,6], col="red", type="b", cex=.75, pch=1)
lines(tis.alpha[,2],tis.alpha[,6], col="red", type="l", cex=.75)
abline(v=tis.alpha[(tis.alpha[,3]==.90),2], col="lightgray", lty=2) # 10% top obs
abline(v=tis.alpha[(tis.alpha[,3]==.95),2], col="lightgray", lty=2) # 5% top obs
abline(v=tis.alpha[(tis.alpha[,3]==.99),2], col="lightgray", lty=2) # 1% top obs
legend("topright", legend=c("Pareto 1 (Hill estimator)","GPD", "EPD"), col=c("blue", "green", "red"), pch=c(3,2,1), lty=1)
legend(tis.alpha[(tis.alpha[,3]==.90),2]-top.xx,top.yy, legend=expression(italic('q')[90]), cex=.9, bty="n")
legend(tis.alpha[(tis.alpha[,3]==.95),2]-top.xx,top.yy, legend=expression(italic('q')[95]), cex=.9, bty="n")
legend(tis.alpha[(tis.alpha[,3]==.99),2]-top.xx,top.yy, legend=expression(italic('q')[99]), cex=.9, bty="n")
}
if(is.null(TSlim)) TSlim = c(0.1,0.4)
plot(tis.index[,2],tis.index[,4], ylim=TSlim, type="b", cex=.75, pch=3, main="Top 1% share", xlab="k largest values", ylab="share", col="blue")
lines(tis.index[,2],tis.index[,4], col="blue", type="l", cex=.75)
lines(tis.index[,2],tis.index[,5], col="green", type="p", cex=.75, pch=2)
lines(tis.index[,2],tis.index[,5], col="green", type="l", cex=.75)
lines(tis.index[,2],tis.index[,6], col="red", type="b", cex=.75, pch=1)
lines(tis.index[,2],tis.index[,6], col="red", type="l", cex=.75)
lines(tis.index[,2],tis.index[,7], col="gray", type="l", cex=.75)
abline(v=tis.index[(tis.index[,3]==.90),2], col="lightgray", lty=2) # 10% top obs
abline(v=tis.index[(tis.index[,3]==.95),2], col="lightgray", lty=2) # 5% top obs
abline(v=tis.index[(tis.index[,3]==.99),2], col="lightgray", lty=2) # 1% top obs
legend("topright", legend=c("Pareto 1","GPD", "EPD"), col=c("blue", "green", "red"), pch=c(3,2,1),lty=1)
legend(tis.index[(tis.index[,3]==.90),2]-top.xx,top.yy, legend=expression(italic('q')[90]), cex=.9, bty="n")
legend(tis.index[(tis.index[,3]==.95),2]-top.xx,top.yy, legend=expression(italic('q')[95]), cex=.9, bty="n")
legend(tis.index[(tis.index[,3]==.99),2]-top.xx,top.yy, legend=expression(italic('q')[99]), cex=.9, bty="n")
}
|
8361467cc00f44b37a5dcb2692711f8784d174b6
|
cf0cecd6a803305d88b306884d00e85f43fee484
|
/Exploratory_Data_Analysis/Week4/plot6.R
|
a35d73736b5fb048a867278c32d9e98fa05d24b6
|
[] |
no_license
|
viciliciv/datasciencecoursera
|
a0f74ec88d696e297f1ba1d43a5e37883691481a
|
3223fbb83575d64f548fe04bf41253d7514a8570
|
refs/heads/master
| 2021-07-25T23:31:08.685330
| 2017-11-09T17:40:10
| 2017-11-09T17:40:10
| 104,652,070
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,985
|
r
|
plot6.R
|
getwd()
setwd('Exp.Data')
list.files()
summ_pm<-readRDS('summarySCC_PM25.rds')
head(summ_pm)
library(dplyr)
library(ggplot2)
library(gridExtra)
str(summ_pm)
list.files()
source_class<-readRDS('Source_Classification_Code.rds')
head(source_class)
str(source_class)
test<-source_class%>%
select(EI.Sector)%>%
group_by(EI.Sector)%>%
unique()
combined<-merge(summ_pm, source_class, by = 'SCC')
#####
##6
num_six<-combined%>%
filter(grepl('Mobile', EI.Sector))%>%
filter(fips %in% c('24510', '06073'))%>%
mutate(Location = ifelse(fips == '24510', 'Baltimore City, MD', 'Los Angeles County'))%>%
select(year, Emissions, Location)%>%
group_by(year, Location)%>%
summarise(Total.Emissions = round(sum(Emissions)))%>%
ungroup()
num_six_plot<-ggplot(num_six, aes(year, Total.Emissions))+
geom_point(aes(color = Location))+
geom_line(aes(color = Location))+
labs(title = 'Emissions from Motor Vehicle Sources',
x = 'Year', y = 'PM2.5 (Tonnes)')
init_six<-num_six%>%
filter(year == '1999')%>%
select(Location, init_emissions = Total.Emissions)
ext_num_six<-merge(num_six, init_six, by = 'Location', all.x = TRUE)
ext_num_six<-ext_num_six%>%
mutate(Diff_Emissions = round((Total.Emissions-init_emissions)/init_emissions*100,2))
str(ext_num_six)
ext_num_six_plot<-ggplot(subset(ext_num_six, year == '2008'), aes(factor(year),Diff_Emissions, fill = Location))+
geom_bar(stat = 'identity', position = position_dodge())+
geom_label(aes(label = Diff_Emissions), show.legend = FALSE)+
labs(title = 'Percent Difference in Emissions between 1999 and 2008', x ='Year', y = 'PM2.5 Percent Difference')
png('plot6.png', 480, 480)
grid.arrange(num_six_plot, ext_num_six_plot)
dev.off()
##Although LA county reduced significantly more PM2.5, Baltimore showed the largest percentage difference in reducing PM2.5 emissions since 1999.
|
bde313ae02dcd4e70f810f3d9fcf15eaa89bbcc5
|
c57695227cb5a4f1385c07eee8f9c374b73b9607
|
/app/modules/help/global.R
|
92b1b6857530c5312179992820aea35c9db66b1c
|
[] |
no_license
|
doughnutnz/hist_bus_map
|
e66f387a1af74cc106d22167220eb2c115a07a56
|
0a1ab09fee03a7d1d649c233d729e72f86afa3bc
|
refs/heads/master
| 2020-04-09T03:48:16.908597
| 2018-12-01T23:46:14
| 2018-12-01T23:46:14
| 159,997,355
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 319
|
r
|
global.R
|
###########################################################
# Entry point of the HELP module
#
# Author: Doug Hunt
# Created: 2018-11-18
###########################################################
source("modules/help/server.R")
source("modules/help/ui.R")
source("modules/help/utils.R")
source("modules/help/help.R")
|
a160a85401f04cc6b9154047c3ae9d5e5d9a57ac
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/8410_0/rinput.R
|
6455708f43961bfeb3a964aec5610f980f30f37d
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("8410_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8410_0_unrooted.txt")
|
8ceb3a5e2a9f8f1523396629638129c8e4ebe64f
|
98daab7e12e9f6e241e964d67f28f36342e52b6f
|
/R/nuclear.R
|
24fc92ef8d75893ac10190d5d723598305936de8
|
[] |
no_license
|
cran/LARisk
|
4bbbf0b57d58d2159caecde04afcd182728f33d6
|
6794c6c0c16cc4c886ed881c03b3fb16a38e8190
|
refs/heads/master
| 2022-02-24T01:09:56.318827
| 2022-02-07T00:20:08
| 2022-02-07T00:20:08
| 236,619,163
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 818
|
r
|
nuclear.R
|
#' Simulated data of organ radiation exposure dose
#'
#' \code{nuclear} is simulated dataset for acute exposure event. The scenario assumes that the people exposure the radiation at 2011.
#'
#' @format A data frame with 100 observation of 11 variables:
#' \describe{
#' \item{\code{ID}}{person ID.}
#' \item{\code{sex}}{gender}
#' \item{\code{birth}}{birth-year}
#' \item{\code{exposure}}{exposed year to radiation}
#' \item{\code{site}}{organ where exposed to radiation}
#' \item{\code{exposure_rate}}{expsoure rate}
#' \item{\code{dosedist}}{distribution of dose}
#' \item{\code{dose1}}{dose parameter}
#' \item{\code{dose2}}{dose parameter}
#' \item{\code{dose3}}{dose parameter}
#' \item{\code{distance}}{distance from the hyper}
#' }
#'
"nuclear"
|
d76f57a5f898600c2ba06273298f9a3fed0869e9
|
c996caaffba5ba880bbac0a1154a67b1b637c78c
|
/plot1.R
|
ec52368e42c9c9cc1ca84a2fca2b6883ab9cdde2
|
[] |
no_license
|
krl0s04/ExData_Plotting1
|
b4a40dd0aedd74e06723928547b2abe0cf1713f3
|
caff4813e072ace5328dad256909917c983c47ab
|
refs/heads/master
| 2020-12-30T21:54:20.229909
| 2016-08-29T06:32:41
| 2016-08-29T06:32:41
| 66,801,858
| 0
| 0
| null | 2016-08-29T01:44:43
| 2016-08-29T01:44:42
| null |
UTF-8
|
R
| false
| false
| 765
|
r
|
plot1.R
|
# Code for Plot 1
#Reading the txt file
file <- read.table("household_power_consumption.txt", sep=";", header=T,na.strings = "?", nrows= )
# Subsetting the data from the dates 2007-02-01 and 2007-02-02
top <- which(file$Date == "1/2/2007" & file$Time =="00:00:00")
bottom <- which(file$Date == "2/2/2007" & file$Time =="23:59:00")
finalList <- a[top:bottom,]
#Conversion of the Date and Time variables to Date/Time classes
finalList$dateTime <- as.POSIXlt( paste( finalList$Date , finalList$Time), format = "%d/%m/%Y %H:%M:%S" )
# Histogram of Global Active Power
library(datasets)
png(file="plot1.png",width = 480, height = 480)
hist(finalList$Global_active_power, main = "Global Active Power", col= "red", xlab = "Global Active Power (kilowatts)")
dev.off()
|
f74ea7704ec14d6955f3159a6b53cc602d73de0f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/jsmodule/examples/jsPropensityAddin.Rd.R
|
03b71d0fc623043ef5e4f8d29064facc2911f331
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 209
|
r
|
jsPropensityAddin.Rd.R
|
library(jsmodule)
### Name: jsPropensityAddin
### Title: jsPropensityAddin: Rstudio addin of jsPropensityGadget
### Aliases: jsPropensityAddin
### ** Examples
if(interactive()){
jsPropensityAddin()
}
|
0ce1661af64e947113ff88a291d1463d897dc5d7
|
06073723cadcbe0324c1290a727aa0839e50e1ce
|
/rscripts/estimateSA1HouseholdsUsingSA2.R
|
1a6b9bee10fba614b87aefd336fd2226771febbe
|
[] |
no_license
|
agentsoz/synthetic-population
|
0ea229d4b87875cd6516778becd0f299abe4b40b
|
89407d4fd3b37a89c8dc7781d4838e681f107599
|
refs/heads/master
| 2022-11-22T13:32:50.482313
| 2020-10-12T23:18:19
| 2020-10-12T23:18:19
| 112,975,556
| 10
| 2
| null | 2022-11-16T09:25:18
| 2017-12-04T00:21:40
|
Java
|
UTF-8
|
R
| false
| false
| 7,569
|
r
|
estimateSA1HouseholdsUsingSA2.R
|
library(tools)
library(stringr)
sa1_dist_data <- list()
LoadSA1HouseholdsInSA2 <- function(sa1_files) {
sa1_dist_data <- list()
for (i in 1:length(sa1_files)) {
inFile = sa1_files[i]
csvname = paste(file_path_sans_ext(basename(inFile)), ".csv", sep = "")
inputCsv = unz(inFile, csvname)
#This csv cannot be read as a normal csv for some reason. So it is processed manually. When we do read.table we get a one column data set where
#each row has a sequence of values seperated by commas. We have to divide them into columns to do further process.
txt <- read.table(inputCsv,
sep = "\n",
fill = F,
strip.white = T)
end_line = sa1_data_start_row + (floor(nrow(txt) / hh_types_count) *
hh_types_count) - 1
# get the SA1s
sa1s = unlist(strsplit(as.character(txt[sa1_row, 1]), ","))
sa1s = sa1s[sa1_start_col:length(sa1s)]
# get household type column names
hhtype_col_names = unlist(strsplit(as.character(txt[colname_row, 1]), ","))[1:3]
col_names <- c(hhtype_col_names, sa1s)
# convert the data part into a matrix by seperating each line by comma.
data = txt[c(sa1_data_start_row:end_line), 1]
data = unlist(strsplit(as.character(data), ","))
data_mat = matrix(data, ncol = length(col_names), byrow = T)
colnames(data_mat) <- col_names # finally assign column names
sa1_dist_data[[i]] <- data_mat
}
return(sa1_dist_data)
}
GetSA1HouseholdDistInSA2 <-
function(sa1_hh_dists,
sa2,
family_types_count,
hh_sizes_count,
sa2_code_map_file) {
for (data_mat in sa1_hh_dists) {
row_count_per_sa2 = family_types_count * hh_sizes_count
#find the chunk in the data matrix that is relevant to the SA2 we process
sa2_row_id = which(data_mat[, sa2_col] == sa2)
if (length(sa2_row_id) != 0) {
sa2_chunk <-
data_mat[c(sa2_row_id:(sa2_row_id + row_count_per_sa2 - 1)), ]
nop_titles = sa2_chunk[which(sa2_chunk[, hh_size_col] != ""), hh_size_col]
#SA2 codes and SA1 codes have following relationship. So if we know the SA2 5 digit code we can figure out its SA1s from a list
#Example: SA2 51041
# S/T SA2
# 5 1041
#Example: SA1 5104118
# S/T SA2 SA1
# 5 1041 18
code_map_csv = GetCsvInZip(sa2_code_map_file, m_sa2_codes_csv)
code_map = read.csv(code_map_csv)
sa2_5digcode = code_map[code_map$SA2_NAME_2016 == sa2, "SA2_5DIGITCODE_2016"]
sa1_prefix_pattern = paste("^",sa2_5digcode,sep="")
sa1s = colnames(sa2_chunk)[grepl(sa1_prefix_pattern, colnames(sa2_chunk))]
selected_cols = c(colnames(sa2_chunk)[1:3], sa1s) # prepending sa2 name, num of persons and family hh type columns
sa2_chunk <- sa2_chunk[,selected_cols]
sa2_chunk[, hh_size_col] <- rep(nop_titles, each = family_types_count)
sa2_chunk[, sa2_col] <- sa2
return(sa2_chunk)
}
}
}
EstimateSA1HouseholdsDistribution <-
function(sa2, sa2_hh_dist, sa1_hhs_dist) {
#Following code iterates on hh types distributing them among SA1s. i.e each row represent a hh type
rowcount = nrow(sa2_hh_dist)
lastcol = ncol(sa1_hhs_dist)
sa2_sa1_conflicts = FALSE
mismatching_hh_types = c()
#If at least one of the SA1s have households in them, sa1_hhs_dist must at least have 4 columns: SA2 name, hh size, family houshoeld type and one sa1 coloumn.
#So lastcol in the sa1_hh_dist must be >= 4. As sa1_start_col = 4 in config.R, following is true if there are sa1 households
if (sa1_start_col <= lastcol) {
#If this SA2's SA1 level distribution has any households we can approximate a suitable distribution.
#If none of the SA1s have households according to the distribution we don't know where to put them.
#So we skip such SA2s
value_cells <-sa1_hhs_dist[ ,sa1_start_col:lastcol,drop=FALSE] #get data cells by skipping row and col headers
class(value_cells) <- "numeric"
for (i in 1:rowcount) {
sa1hhs = value_cells[i,]
sa1hhsttl = sum(sa1hhs)
sa2hhttl = sa2_hh_dist[i, 4]
#Distribute SA2 Hhs among SA1s assuming SA2 data is always correct
if (sa2hhttl == 0) {
#If there are no hhs in SA2 in current row, then there must be no hhs in SA1.
adjustedSA1Hhs = (sa1hhs * 0)
} else if ((sa2hhttl - sa1hhsttl) > 0 & sum(sa1hhs) == 0) {
#There are extra hhs of current type in SA2, but none in the SA1s. FillAccording2Dist function randomly assigns items to specified vector if
#the vector sum is 0. Here we pass the SA1s that are known to have other household types though there are no household of current type.
#This way we don't assign households to SA1s covering parks and industrial areas.
non_empty_sa1s = which(colSums(value_cells) > 0) #Get the SA1s in that are not empty in whole SA2
adjustedSA1Hhs = sa1hhs #book keeping
#If there are SA1s that have other household types assign current households to those SA1s. If there are no households at all in any of the SA1s
# we have no option but to assign current households to random SA1s
if(length(non_empty_sa1s) > 0 ){
adjustedSA1Hhs[non_empty_sa1s] = FillAccording2Dist(sa1hhs[non_empty_sa1s], (sa2hhttl - sa1hhsttl))
}else{
adjustedSA1Hhs = FillAccording2Dist(sa1hhs,(sa2hhttl - sa1hhsttl))
}
sa2_sa1_conflicts = TRUE
mismatching_hh_types = c(mismatching_hh_types, unname(unlist(sa1_hhs_dist[i, c(2:3)])))
} else{
#Redistribute hhs among SA1 according to the current distribution. At the end of this, total hhs in SA1s match the total in SA2
adjustedSA1Hhs = FillAccording2Dist(sa1hhs, (sa2hhttl - sa1hhsttl))
}
sa1_hhs_dist[i, sa1_start_col:lastcol] = adjustedSA1Hhs
}
if (sa2_sa1_conflicts) {
flog.info(
"Some household types were represented in SA2 data but not in SA1 data. These households were assigned to randomly selected SA1s"
)
flog.info(mismatching_hh_types)
}
return(sa1_hhs_dist)
} else{
return(NULL)
}
}
if (FALSE) {
source("config.R")
test_files = "../data/melbourne/raw/SA1_households_dist_in_SA2s_2016_Melbourne_Inner.zip,
../data/melbourne/raw/SA1_households_dist_in_SA2s_2016_Melbourne_Inner_East.zip,
../data/melbourne/raw/SA1_households_dist_in_SA2s_2016_Melbourne_Inner_South.zip,
../data/melbourne/raw/SA1_households_dist_in_SA2s_2016_Melbourne_North_East.zip,
../data/melbourne/raw/SA1_households_dist_in_SA2s_2016_Melbourne_North_West.zip,
../data/melbourne/raw/SA1_households_dist_in_SA2s_2016_Melbourne_Outer_East.zip,
../data/melbourne/raw/SA1_households_dist_in_SA2s_2016_Melbourne_South_East.zip,
../data/melbourne/raw/SA1_households_dist_in_SA2s_2016_Melbourne_West.zip,
../data/melbourne/raw/SA1_households_dist_in_SA2s_2016_Mornington_Peninsula.zip"
all_sa1_hh_dists <-
LoadSA1HouseholdsInSA2(unlist(lapply(strsplit(
sub("\\n", "", test_files), ","
), trimws)))
sa1_hh_dist = GetSA1HouseholdDistInSA2(all_sa1_hh_dists, "Port Melbourne Industrial", 14, 8,"../data/melbourne/raw/1270055001_sa2_2016_aust_csv.zip")
}
|
1530ce3de8631f30f323999b3ed46b628ff18d01
|
9fe17bac4c1247602960cc370f80f04c2c257701
|
/plot2.R
|
fbea7f90f1220aeaa5735a4eb99cf63362de0ca9
|
[] |
no_license
|
ltubia/ExData_Plotting1
|
3acd5440828d1ea87b604102626334cc99fd303c
|
3ff285207d25bb7d7a60d674b711ca105b00e0b5
|
refs/heads/master
| 2021-01-17T22:47:00.878210
| 2015-08-08T20:14:47
| 2015-08-08T20:14:47
| 40,413,680
| 0
| 0
| null | 2015-08-08T19:09:46
| 2015-08-08T19:09:45
| null |
UTF-8
|
R
| false
| false
| 1,439
|
r
|
plot2.R
|
plot2<-function() {
library(sqldf)
## 1. Download file to temp file at local path
temp<-"exdata_Fdata_Fhousehold_power_consumption.zip" ## Get file from local path
if (!file.exists(temp))
{
fileurl<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
#temp<-tempfile()
download.file(fileurl,paste0(getwd(),"/",temp), method="wget")
}
# read data filtering a priori so as to not load unnecesary dates
y<-read.csv.sql(unzip("exdata_Fdata_Fhousehold_power_consumption.zip", "household_power_consumption.txt"),sql="select * from file where Date IN('1/2/2007','2/2/2007')", eol = "\n", sep = ";", header=TRUE)
# convert dates
y<-transform(y, Date = as.Date(Date, format = "%d/%m/%Y"))
# calculate weekday
y$Wd<-weekdays(y$Date)
y$DateTime<-ymd_hms(paste(y$Date, y$Time))
# Draw graph
par(cex.main=0.9, cex=0.75)
plot(y$DateTime, y$Global_active_power, type = "l", main = "", ylab ="Global Active Power (kilowatts)", xlab = "")
dev.copy(png, file = "plot2.png", width=480, height=480) ## Copy my plot to a PNG file
dev.off() ## Don't forget to close the PNG device!
par(cex.main=1, cex=1)
rm(y)
}
|
9eb313403e1a19f41820c4273694029b0ccc339c
|
868cfb1391ddfdf87242ff0d2fbe6bde326289ae
|
/Labs/Week of 3-26 Lab.R
|
5f1bb3658ab3f6c06ea0d11a989191a6941ee872
|
[] |
no_license
|
akatzuka/Intro-to-Data-Science-Practice
|
a9ee28d77803bc9fd920a79a7c98d62c17f33a3c
|
c5f430125706e1fc90038eb896d6ee14bfaa94d0
|
refs/heads/master
| 2020-04-27T09:49:13.532508
| 2019-03-06T22:21:01
| 2019-03-06T22:21:01
| 174,230,047
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,245
|
r
|
Week of 3-26 Lab.R
|
# read data
user="brun1992"
dat = read.csv("https://raw.githubusercontent.com/grbruns/cst383/master/College.csv")
# make college names into the row
rownames(dat) = dat[,1]
dat = dat[,-1]
# scale all features
dat[,2:18] = scale(dat[,2:18])
# feature data
fdat = dat[,c("Outstate", "F.Undergrad")]
# labels for the feature data
labels = dat[,"Private"]
# example to classify
x1 = data.frame(Outstate=0, F.Undergrad=0.5)
# kNN parameter k
k = 5
edist = function(x1,x2) sqrt(sum((x1-x2)^2))
dists = apply(fdat, 1, function(x1) edist(fdat,x1))
close_rows <- dists[order(dists, decreasing = TRUE)][1:k]
close_labels <-fdat[]
x = runif(20)
y=(order(abs(x-.5)))
z=y[c(1:3)]
y1 = x[z]
# slope and intercept of a line
m = 1.2
b = -1.4
# randomly generate n inputs from 0 to 10
n = 30
x = sort(runif(n, min=0, max=10))
# calculate the corresponding outputs, with added noise
y = m*x + b + rnorm(n, sd=0.5)
# plot them
plot(x, y, pch=20, col="grey40")
k = 3
x1 = runif(1,0,10)
z=(order(abs(x-x1)))[1:k]
y = y[z]
y1 = mean(y)
par(new = FALSE)
points(x1,y1, pch=17, col="firebrick")
par(new = TRUE)
############
data = read.csv("https://raw.githubusercontent.com/grbruns/cst383/master/machine.csv")
plot(data)
plot()
|
829adbe2495187d50e73ca0ed80b56e71f72b545
|
29e22b9486dad7ed60e49378eedea301f1e2b9fe
|
/R/topic-analysis.R
|
50e02fdc620f3ebb42649cb161c6f3915a2a0aac
|
[
"MIT"
] |
permissive
|
sdaume/topicsplorrr
|
c54f8a277d5316695c921474a280cf961cfd69a0
|
eb657391b435361617cc2992da33f6bf122dea19
|
refs/heads/master
| 2023-05-30T06:43:18.111841
| 2021-06-15T13:04:30
| 2021-06-15T13:04:30
| 339,341,529
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,312
|
r
|
topic-analysis.R
|
#' Document counts and mean topic shares of the three primary topics for each
#' document
#'
#' \code{primary_topics} summarizes for each topic the number of documents and
#' the respective mean topic share (gamma) where a topic is one of the three
#' primary topics in a document.
#'
#' @param topicsByDocDate a dataframe as returned by
#' \code{\link{topics_by_doc_date}}
#'
#' @param minGamma the minimum share of a topic per document to be considered
#' when summarizing primary topic information; topics with smaller shares per
#' individual document will be ignored when summarizing the document counts
#' and mean topic shares. (In an \code{\link[stm:stm]{stm topic model}} the
#' likelihood that a topic is generated from a topic is expressed by the value
#' \emph{gamma}.) The default is \code{0}, thus ensuring that three topics are
#' included for each document.
#'
#' @return a dataframe with 7 columns where: \describe{ \item{topic_id}{a topic
#' ID as provided as an input in \code{topicsByDocDate}}
#' \item{n_docs_1}{number of documents where \code{topic_id} has the largest
#' probability} \item{n_docs_2}{number of documents where \code{topic_id} has
#' the second largest probability} \item{n_docs_3}{number of documents where
#' \code{topic_id} has the third largest probability} \item{mean_gamma_1}{mean
#' probability of all documents in \code{n_docs_1}} \item{mean_gamma_2}{mean
#' probability of all documents in \code{n_docs_2}} \item{mean_gamma_3}{mean
#' probability of all documents in \code{n_docs_3}} }
#'
#' @export
#'
primary_topics <- function(topicsByDocDate, minGamma = 0) {
# the number of top N topics per document
# could potentially be an additional function argument
n_ranks <- 3
primary_topic_stats <- topicsByDocDate %>%
dplyr::filter(.data$gamma >= minGamma) %>%
dplyr::group_by(.data$document) %>%
dplyr::mutate(topic_doc_rank = rank(dplyr::desc(.data$gamma))) %>%
dplyr::ungroup() %>%
dplyr::filter(.data$topic_doc_rank <= n_ranks) %>%
dplyr::group_by(.data$topic_id, .data$topic_doc_rank) %>%
dplyr::summarise(mean_gamma = mean(.data$gamma),
n_docs = dplyr::n()) %>%
dplyr::ungroup() %>%
tidyr::pivot_wider(names_from = .data$topic_doc_rank,
values_from = c(.data$n_docs, .data$mean_gamma)) %>%
dplyr::arrange(-.data$n_docs_1) #%>%
#replace(is.na(.data), 0)
primary_topic_stats <- replace(primary_topic_stats,
is.na(primary_topic_stats), 0)
return(primary_topic_stats)
}
#' Mean topic likelihoods summary
#'
#' \code{topics_summary} summarizes the mean likelihood of topics across all
#' documents combined with a set of suitable labels. This is a convenience
#' function to create summary visualizations or interactive tables.
#'
#' @param topicsByDocDate a dataframe as returned by
#' \code{\link{topics_by_doc_date}}
#'
#' @param topicLabels a dataframe as returned by \code{\link{topics_terms_map}},
#' associating a \code{topic_id} with a suitable \code{topic_label}.
#'
#' @return @return a dataframe with term frequencies by chosen timebin, where:
#' \describe{ \item{topic_id}{the unique topic identifier assigned by a topic
#' model} \item{topic_label}{a character vector of representative labels for
#' \code{topic_id}} \item{mean_gamma}{the mean likelihood of \code{topic_id}
#' across all documents} }
#'
#' @export
#'
topics_summary <- function(topicsByDocDate, topicLabels) {
topic_shares_summary <- topicsByDocDate %>%
dplyr::mutate(topic_id = as.character(.data$topic_id)) %>% #remove
dplyr::group_by(.data$topic_id) %>%
dplyr::summarise(mean_gamma = mean(.data$gamma)) %>%
dplyr::ungroup() %>%
dplyr::left_join(topicLabels, by = "topic_id") %>%
dplyr::select(.data$topic_id, .data$topic_label, .data$mean_gamma) %>%
dplyr::arrange(-.data$mean_gamma)
return(topic_shares_summary)
}
#' Compute topic shares for a given time bin
#'
#' \code{topic_frequencies} summarizes the shares of topics in a chosen time
#' interval as per provided topic shares by document and date.
#'
#' A \code{stm} topic model provides for each document the likelihood
#' (\emph{gamma}) that it is generated from a specific topic; here we interprete
#' these as the share of a document attributed to this topic and then summarize
#' these shares per timebin to obtain the share of a topic across all documents
#' over time.
#'
#' The topic share or likelihood per document has to be above a threshold
#' specified by \code{minGamma}. A suitable threshold might consider the number
#' of topics and the average document size. An additional filtering option is
#' provided with \code{minTopicTimeBins}.
#'
#' Timebins for which no occurrence of a given topic is recorded are added with
#' an explicit value of zero, excluding however such empty timebins before the
#' first occurrence of a topic and after the last.
#'
#' @param topicsByDocDate a dataframe as returned by
#' \code{\link{topics_by_doc_date}}
#'
#' @param timeBinUnit a character sequence specifying the time period that
#' should be used as a bin unit when computing topic share frequencies. Valid
#' values are \code{"day", "week", "month", "quarter", "year"}, \code{"week"}
#' is used as a default. \strong{NOTE}, for the assignment of \code{week}s
#' Monday is considered as the first day of the week.
#'
#' @param minGamma the minimum share of a topic per document to be considered
#' when summarizing topic frequencies, topics with smaller shares per
#' individual document will be ignored when computing topic frequencies. (In
#' an \code{\link[stm:stm]{stm topic model}} the likelihood that a topic is
#' generated from a topic is expressed by the value \emph{gamma}.) The default
#' is \code{0.01}, but should be adjusted with view of the number of topics
#' and the average length of a document.
#'
#' @param minTopicTimeBins a double in the range \code{[0,1]} specifying the
#' minimum share of all unique timebins in which an occurrence of a topic
#' share of at least \code{minGamma} must have been recorded, i.e. a value of
#' \code{0.5} (the default) requires that an occurrence of a topic must have
#' been recorded in at least 50\% of all unique timebins covered by the
#' dataset; topics that do not meet this threshold will not be included in the
#' returned results.
#'
#' @return a dataframe with term frequencies by chosen timebin, where:
#' \describe{ \item{topic_id}{a topic ID as provided as an input in
#' \code{topicsByDocDate}} \item{timebin}{the floor date of a timebin; if
#' \code{timeBinUnit} was set to \code{week}, this date will always be a
#' Monday} \item{median_gamma}{the median of likelihoods of the topic with
#' \code{topic_id} in \code{timebin}} \item{mean_gamma}{the mean of
#' likelihoods of the topic with \code{topic_id} in \code{timebin}}
#' \item{topicshare}{the share of topic with \code{topic_id} relative to all
#' topic shares recorded and included in a given \code{timebin}.
#' \strong{NOTE:} strictly speaking these are the likelihoods that a document
#' is generated from a topic, which we here interpret as the share of a
#' document attributed to a topic.} \item{n_docs_topic}{the total number of
#' documents in a dataset in which a topic with \code{topic_id} occurs as
#' least with likelihood \code{minGamma}} \item{first_occur}{the exact date of
#' the first occurrence of a topic with \code{topic_id} across the whole time
#' range covered by \code{timebin}s} \item{latest_occur}{the exact date of the
#' latest occurrence of a topic with \code{topic_id} across the whole time
#' range covered by \code{timebin}s; note that this date can be larger than
#' the maximum \code{timebin}, as \code{timebin} specifies the floor date of a
#' time unit} \item{n_topic_timebins}{the number of unique \code{timebin}s in
#' a topic with \code{topic_id} occurs at least with likelihood
#' \code{minGamma}} }
#'
#' @export
#'
topic_frequencies <- function(topicsByDocDate, timeBinUnit = "week",
minGamma = 0.01, minTopicTimeBins = 0.5) {
# potential additional arguments
weekStart <- 1
# remove topic occurrences with low likelihood
topic_freqs <- topicsByDocDate %>%
dplyr::filter(.data$gamma >= minGamma)
# create timebins and summarise the topic shares and add some other stats
topic_freqs <- topic_freqs %>%
dplyr::mutate(timebin = lubridate::floor_date(.data$occur,
unit = timeBinUnit,
week_start = weekStart)) %>%
dplyr::group_by(.data$topic_id) %>%
dplyr::mutate(n_docs_topic = dplyr::n(),
first_occur = min(.data$occur),
latest_occur = max(.data$occur)) %>%
dplyr::ungroup() %>%
dplyr::group_by(.data$timebin) %>%
dplyr::mutate(sum_gamma = sum(.data$gamma),
n_docs_timebin = dplyr::n_distinct(.data$document)) %>%
dplyr::ungroup() %>%
dplyr::group_by(.data$timebin, .data$topic_id) %>%
dplyr::mutate(gamma_share = .data$gamma/.data$sum_gamma) %>%
dplyr::ungroup() %>%
dplyr::group_by(.data$timebin, .data$topic_id, .data$n_docs_topic) %>%
dplyr::summarize(median_gamma = stats::median(.data$gamma),
mean_gamma = mean(.data$gamma),
topicshare = sum(.data$gamma_share),
first_occur = min(.data$first_occur),
latest_occur = max(.data$latest_occur)) %>%
dplyr::ungroup() %>%
dplyr::group_by(.data$topic_id) %>%
dplyr::mutate(n_topic_timebins = dplyr::n_distinct(.data$timebin)) %>%
dplyr::ungroup()
# filter out topics with infrequent occurrence
if (minTopicTimeBins > 1) {
warning("minTopicTimeBins must be in (0,1]. ",
"Using minTopicTimeBins = 1 instead of ",
minTopicTimeBins)
minTopicBins <- length(unique(topic_freqs$timebin))
} else if (minTopicTimeBins == 0) {
warning("minTopicTimeBins must be in (0,1]. ",
"Ignoring minTopicTimeBins, all topics will be included in the result.")
minTopicBins <- 1
} else {
minTopicBins <- ceiling(minTopicTimeBins * length(unique(topic_freqs$timebin)))
}
topic_freqs <- dplyr::filter(topic_freqs, .data$n_topic_timebins >= minTopicBins)
# complement missing timebins per topic with explicit zero counts
# (important wrt to plotting and regression); filter out empty bins before the
# first occurrence of a topic, and after the latest
topic_freqs <- topic_freqs %>%
tidyr::complete(.data$topic_id,
timebin = seq.Date(min(.data$timebin),
max(.data$timebin),
by = timeBinUnit)) %>%
#tidyr::complete(tidyr::crossing(topic_id, timebin = seq.Date(min(timebin),
# max(timebin),
# by = timeBinUnit))) %>%
tidyr::replace_na(list(topicshare = 0,
mean_gamma = 0,
median_gamma = 0)) %>%
dplyr::group_by(.data$topic_id) %>%
dplyr::mutate(n_docs_topic = max(.data$n_docs_topic, na.rm = TRUE),
n_topic_timebins = max(.data$n_topic_timebins, na.rm = TRUE),
first_occur = min(.data$first_occur, na.rm = TRUE),
latest_occur = max(.data$latest_occur, na.rm = TRUE)) %>%
dplyr::arrange(.data$timebin) %>%
dplyr::mutate(topicshare_cumsum = cumsum(.data$topicshare)) %>%
dplyr::filter(.data$topicshare_cumsum > 0) %>%
dplyr::filter(!(.data$topicshare == 0 & .data$topicshare_cumsum == sum(.data$topicshare))) %>%
dplyr::ungroup() %>%
dplyr::select(-.data$topicshare_cumsum)
return (topic_freqs)
}
#' Select top topics by document counts or temporal trend metric
#'
#' \code{select_top_topics} allows to select a specified number of top topics
#' based on miscellaneous properties of the topic frequencies. This method is
#' typically used to select a topic frequency time series for plotting and
#' exploratory analysis. See the details of the function arguments for selection
#' options.
#'
#' @param topicFrequencies a dataframe of \emph{topic} frequencies as returned
#' by \code{\link{topic_frequencies}}
#'
#' @param topN the number of returned top topics meeting the selection criteria
#' in \code{selectBy}
#'
#' @param selectBy the selection approach which determines the metric by which
#' \code{topic_id}s will be sorted to select the \code{topN} terms. Currently,
#' the following options are supported: \describe{
#' \item{most_frequent}{\strong{the default}, select terms based on the total
#' number of documents in which the topic occurs (\strong{NOTE}, that the
#' document count depends on the minimum topic likelihood \code{minGamma} that
#' was specified when obtaining the topic frequencies.)}
#' \item{trending_up}{select topics with largest upwards trend; internally
#' this is measured by the slope of a simple linear regression fit to a
#' \code{topic_id}'s frequency series.} \item{trending_down}{select topics
#' with largest downward trend; internally this is measured by the slope of a
#' simple linear regression fit to a \code{topic_id}'s frequency series.}
#' \item{trending}{select topics with either largest upward or downward trend;
#' internally this is measured by the absolute value of the slope of a simple
#' linear regression fit to a \code{topic_id}s frequency series.}
#' \item{most_volatile}{select topics with the largest change throughout the
#' covered time period; internally this is measured by the residual standard
#' deviation of the linear model fit to a \code{topic_id}'s time frequency
#' series.} \item{topic_id}{select topics specified by \code{topic_id} in the
#' function argument \code{selectTopics}.}}
#'
#' @param selectTopics a vector of topic IDs by which the returned results
#' should be filtered; this option is only considered when the option
#' \emph{"topic_id"} is chosen for \code{selectBy}.
#'
#' @return a dataframe specifying topic metrics employed for selecting top
#' \code{topic}s, where: \describe{ \item{topic_id}{a unique topic identifier}
#' \item{n_doc_topics}{the total number of documents in a dataset in which a
#' topic with \code{topic_id} occurs} \item{slope}{the slope coefficient of a
#' linear model fit to this \code{topic_id}'s time frequency series}
#' \item{volatility}{the residual standard deviation of a linear model fit to
#' this \code{topic_id}'s time frequency series} \item{trend}{a categorisation
#' of the topic frequency trend} }
#'
#' @export
#'
select_top_topics <- function(topicFrequencies, topN = 25,
selectBy = "most_frequent",
selectTopics = NULL) {
# check selection options
validSelectByOptions <- c("most_frequent", "trending", "trending_up",
"trending_down", "most_volatile", "topic_id")
if (!(selectBy %in% validSelectByOptions)) {
stop("'", selectBy, "' is not a valid option for selectBy. ",
"Use one of: ", paste(validSelectByOptions, collapse = ", "))
}
# the number of topics are unlikely to be very large, but for optimal
# efficiency we split the selection process into multiple steps to reduce
# the number of topics for which we have to fit a regression
top_n_topics <- topicFrequencies %>%
dplyr::select(.data$topic_id, .data$n_docs_topic) %>%
dplyr::distinct()
# for efficiency we therefore handle "most_frequent" as a special case
if(selectBy == "most_frequent") {
top_n_topics <- top_n_topics %>%
dplyr::arrange(-.data$n_docs_topic) %>%
dplyr::slice(1:topN)
}
# for all other selection options and the final data we need the trend model
topic_trends_lm <- topicFrequencies %>%
dplyr::filter(.data$topic_id %in% top_n_topics$topic_id) %>%
.topic_trends()
# we select according to the chosen metric (specified in 'selectBy')
top_n_topics <- top_n_topics %>%
merge(topic_trends_lm, by = "topic_id")
if(selectBy == "most_volatile") {
top_n_topics <- dplyr::arrange(top_n_topics, -.data$volatility)
} else if(selectBy == "trending") {
top_n_topics <- dplyr::arrange(top_n_topics, -abs(.data$slope))
} else if(selectBy == "trending_up") {
top_n_topics <- dplyr::arrange(top_n_topics, -.data$slope)
} else if(selectBy == "trending_down") {
top_n_topics <- dplyr::arrange(top_n_topics, .data$slope)
} else if(selectBy == "topic_id") {
top_n_topics <- top_n_topics %>%
dplyr::filter(.data$topic_id %in% selectTopics)
}
top_n_topics <- dplyr::slice(top_n_topics, 1:topN)
return(top_n_topics)
}
#' Fit a linear regression to time frequency series of each unique topic
#'
#' \code{.topic_trends} takes a dataframe with a topic frequency time series as
#' returned by the \code{\link{topic_frequencies}} function and fits a linear
#' regression for each \code{topic} (identified by \code{topic_id}).
#'
#' The primary purpose of this function is to provide a simple approach to
#' classify \code{topic}s by a general trend, which can be used for
#' visualization, filtering and exploratory analysis. Internally, the slope and
#' intercept of the fitted linear model are use to categorize a basic trend for
#' the topic frequencies.
#'
#' @param topicFrequencies a dataframe of \code{topic} frequencies as returned
#' by \code{\link{topic_frequencies}}
#'
#' @param trendThreshold a double used to categorise trends, default is
#' \code{0.0005}; if the intercept of the linear model of topic frequencies
#' falls within a range of \code{+/-trendThreshold} of the mean topic share,
#' the trend is categorized as \emph{"constant"}
#'
#' @return a dataframe specifying trend metrics for each \code{topic}, where:
#' \describe{ \item{topic_id}{a topic identifier} \item{slope}{the slope
#' coefficient of the linear model fit to this \code{topic}'s time frequency
#' series} \item{volatility}{the residual standard deviation of the linear
#' model fit to this \code{topic}'s time frequency series, which is used as a
#' basic measure of volatility of topic frequencies} \item{trend}{a
#' categorisation of the topic frequency trend, negative slopes with an
#' intercept \code{> mean topic share + trendThreshold} are interpreted as
#' \emph{decreasing}, positive slopes with an intercept \code{< mean topic
#' share - trendThreshold} as \emph{increasing}, all others as
#' \emph{constant}} }
#'
#' @keywords internal
#'
.topic_trends <- function(topicFrequencies, trendThreshold = 0.0005) {
# index the timebin for fitting the lm; in order to get interpretable
# intercepts when categorizing trends
topicFrequencies <- topicFrequencies %>%
dplyr::mutate(binindex = dplyr::dense_rank(.data$timebin) - 1)
# linear model fit for each term
lm_topics <- lapply(split(topicFrequencies, topicFrequencies$topic_id),
stats::lm,
formula = topicshare ~ binindex)
topic_slope <- as.data.frame(t(sapply(lm_topics, stats::coefficients)))
#topic_slope[1] <- NULL
colnames(topic_slope) <- c("intercept", "slope")
# use residual standard deviation as a basic measure of volatility
topic_sigma <- data.frame(volatility = sapply(lm_topics, stats::sigma))
# merge and categorise according to a threshhold/band
topic_lm_trends <- merge(topic_slope, topic_sigma, by=0)
colnames(topic_lm_trends)[1] <- c("topic_id")
topic_lm_trends <- topicFrequencies %>%
dplyr::group_by(.data$topic_id) %>%
dplyr::summarize(mean_topicshare = mean(.data$topicshare)) %>%
dplyr::ungroup() %>%
merge(topic_lm_trends, by = "topic_id") %>%
dplyr::mutate(trend = dplyr::case_when(
.data$mean_topicshare - .data$intercept > trendThreshold ~ "increasing",
.data$mean_topicshare - .data$intercept < -trendThreshold ~ "decreasing",
TRUE ~ "constant")) %>%
dplyr::select(-.data$mean_topicshare)
return(topic_lm_trends)
}
|
2534c65bb22489cbd4a16d082cf136d278e81a7c
|
6fc32d08e05d2369f92039040f5b2f9c4f1acb44
|
/summary_function.R
|
1cdd2b4213d4b346a819b8c42daba88375d72c34
|
[] |
no_license
|
jpainter/otto
|
d108060a2008562d9cfa21f27dcf6b8fc535de73
|
35df95e1f68e02154a9d560fdac3f25419a9c5bc
|
refs/heads/master
| 2021-01-23T13:47:42.906554
| 2015-03-20T11:13:58
| 2015-03-20T11:13:58
| 32,534,342
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 160
|
r
|
summary_function.R
|
# summaryFunction
LogLoss <- function(actual, predicted, eps=0.00001) {
predicted <- pmin(pmax(predicted, eps), 1-eps)
-sum(actual*log(predicted))
}
|
255193ce54f737207b8b94186dedd654057d6f02
|
4a85a2c5cbbbe26b7966cf25c4206e61dca2de39
|
/alirezaJahandide-99422053/project3/script.r
|
d25789b96e170fed432808f9cae931fa10716c00
|
[] |
no_license
|
Alirezajahandide27/CS-SBU-DataMining-Msc-projects
|
4e65235ebdf9a7bbaf6b9cfe632a18c8e869f886
|
6f9794036d05dfa1c019bc59cff713e8dda7f7e6
|
refs/heads/main
| 2023-06-18T12:29:19.120625
| 2021-07-17T07:10:36
| 2021-07-17T07:10:36
| 386,585,203
| 0
| 0
| null | 2021-07-16T09:40:02
| 2021-07-16T09:40:01
| null |
UTF-8
|
R
| false
| false
| 163,858
|
r
|
script.r
|
<!DOCTYPE html>
<html lang="en" data-color-mode="auto" data-light-theme="light" data-dark-theme="dark">
<head>
<meta charset="utf-8">
<link rel="dns-prefetch" href="https://github.githubassets.com">
<link rel="dns-prefetch" href="https://avatars.githubusercontent.com">
<link rel="dns-prefetch" href="https://github-cloud.s3.amazonaws.com">
<link rel="dns-prefetch" href="https://user-images.githubusercontent.com/">
<link crossorigin="anonymous" media="all" integrity="sha512-iwdBeEEuDZbd2aAEzZti+bkBYQ2UKC6VEAhVMLKq5cCJnyeWVpgVqtgd3scKeZ63wYQTUQegRZwFGKlWOyr5Ew==" rel="stylesheet" href="https://github.githubassets.com/assets/frameworks-8b074178412e0d96ddd9a004cd9b62f9.css" />
<link crossorigin="anonymous" media="all" integrity="sha512-jO7efoKVVWr0bC2+ExApdfiR6CJG9iyiOv0Nq6i/mCmMXkIoVYN3BmTFzQ6J909ZW029YAtHxEu4eHRDyWGbJQ==" rel="stylesheet" href="https://github.githubassets.com/assets/behaviors-8ceede7e8295556af46c2dbe13102975.css" />
<link crossorigin="anonymous" media="all" integrity="sha512-SOqRtMGw7KMarOhPDsnusrtAk8MOuURAPXHUAIm7tTS4gYzHbd8w7xTlpbu40HeG2mkN8/eUGpc4TlDT5TJsfg==" rel="stylesheet" href="https://github.githubassets.com/assets/github-48ea91b4c1b0eca31aace84f0ec9eeb2.css" />
<script crossorigin="anonymous" defer="defer" integrity="sha512-CzeY4A6TiG4fGZSWZU8FxmzFFmcQFoPpArF0hkH0/J/S7UL4eed/LKEXMQXfTwiG5yEJBI+9BdKG8KQJNbhcIQ==" type="application/javascript" src="https://github.githubassets.com/assets/environment-0b3798e0.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-czQZrI8Ar39Oil36WxqNfOBo7pZqlK1CUHWZ8CF9jMxyawHGQ+lKiPtd25OvoNHZF69LfWPpafuwqxcfiG/iYA==" type="application/javascript" src="https://github.githubassets.com/assets/chunk-frameworks-733419ac.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-US/IQhvG9ej4VfjEAZn++Hu2XUgXKUo/3YypnqjP1kjNCWzxGJyNw6JSFeeTcSI5KCDCi/iDdXbzi7i4TA47SQ==" type="application/javascript" src="https://github.githubassets.com/assets/chunk-vendor-512fc842.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-Nof93xeMudma5NoKgMI6z0Fk9c1wmbaj6MOpEdryrLvrJOIFcqJ8zNtpX1tEZ3ZwMrMZ5uO+MWgYUUHQnft6HA==" type="application/javascript" src="https://github.githubassets.com/assets/behaviors-3687fddf.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-5tWKSr7mhAzSh4Sx5YRFgKftdGxKwHKnOGYw5DlxjHhkQVURYFU3Bk5IMOGMKuAiJTlC3OXYM3xzGcyjzuEFQQ==" type="application/javascript" data-module-id="./chunk-animate-on-scroll.js" data-src="https://github.githubassets.com/assets/chunk-animate-on-scroll-e6d58a4a.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-0MZorw3oXnKy5eeSwQ9xGrKU4hxQeCXxmyxhneIHNhDIqu8vWh8mHss9FlC75Xd/bPWxFDCvdOo57tnTR46nbA==" type="application/javascript" data-module-id="./chunk-codemirror.js" data-src="https://github.githubassets.com/assets/chunk-codemirror-d0c668af.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-M6W/sGLOuJXCIkw+doDl6zl7J9q2DmqdwftQCtyEiZM/UJNGRVQdyKwI/PAMxD12se/wCx3ZcyJs9nz0o0OSVw==" type="application/javascript" data-module-id="./chunk-color-modes.js" data-src="https://github.githubassets.com/assets/chunk-color-modes-33a5bfb0.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-71HZu1T5JWqRNF9wrm2NXZAqYVvzxZ8Dvor5U5l/LuEBbGCBX57Sny60Rj+qUZZAvEBGFlNsz179DEn2HFwgVA==" type="application/javascript" data-module-id="./chunk-confetti.js" data-src="https://github.githubassets.com/assets/chunk-confetti-ef51d9bb.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-P29U0lNmhUj353VrCWp6czdhNpMtF70xVKf4GBGFVKCoqGtxp0sywAM8/46+iC0kdFiRvM13EBvDnq6oyWRwiw==" type="application/javascript" data-module-id="./chunk-contributions-spider-graph.js" data-src="https://github.githubassets.com/assets/chunk-contributions-spider-graph-3f6f54d2.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-arflMFcVzVAYaP2n7m7gklPChWsVsCDtRPav2Cb6bqLeJf8pgbojWJ3EseKXILCIqfxl/v6arBduZ9SLmpMEZw==" type="application/javascript" data-module-id="./chunk-delayed-loading-element.js" data-src="https://github.githubassets.com/assets/chunk-delayed-loading-element-6ab7e530.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-6j/oSF+kbW+yetNPvI684VzAu9pzug6Vj2h+3u1LdCuRhR4jnuiHZfeQKls3nxcT/S3H+oIt7FtigE/aeoj+gg==" type="application/javascript" data-module-id="./chunk-drag-drop.js" data-src="https://github.githubassets.com/assets/chunk-drag-drop-ea3fe848.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-VSSd+Yzi2iMS+pibY6hD/WdypxAEdob5F2RMKxuKcAHS2EpFYJPeTXoVxt0NXg03tfj2dka2mEtHS+vjpYSaDw==" type="application/javascript" data-module-id="./chunk-edit-hook-secret-element.js" data-src="https://github.githubassets.com/assets/chunk-edit-hook-secret-element-55249df9.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-XObZgIojqwx94ekra728uVPTHs30O37w4+dNCDNUrZXRnGmFRcitdymWoSEm7ztcvhzboxHmXOSP2TeoPSfQ5Q==" type="application/javascript" data-module-id="./chunk-edit.js" data-src="https://github.githubassets.com/assets/chunk-edit-5ce6d980.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-aiqMIGGZGo8AQMjcoImKPMTsZVVRl6htCSY7BpRmpGPG/AF+Wq+P/Oj/dthWQOIk9cCNMPEas7O2zAR6oqn0tA==" type="application/javascript" data-module-id="./chunk-emoji-picker-element.js" data-src="https://github.githubassets.com/assets/chunk-emoji-picker-element-6a2a8c20.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-qqRgtYe+VBe9oQvKTYSA9uVb3qCKhEMl3sHdsnP8AbVRfumjSOugTCEN1YLmnniNBMXb77ty2wddblbKSaQE1Q==" type="application/javascript" data-module-id="./chunk-failbot.js" data-src="https://github.githubassets.com/assets/chunk-failbot-aaa460b5.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-YrRWJ3DBTEGQ3kU5vH0Btt+bjUcZHoTj66uIO7wFIfT1LoKJQ0Q2+UTn4rmeKn+PrnMAnQogCNC6Lka17tDncw==" type="application/javascript" data-module-id="./chunk-filter-input.js" data-src="https://github.githubassets.com/assets/chunk-filter-input-62b45627.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-Z1wcyOFQHzyMSPqp5DLKrobr3DN2Q6Dz31cfPtw4b2vPs9PX0PrxyDXHpTbIlcZ9qT1M1BNAypHKKw8Lp6Yx/Q==" type="application/javascript" data-module-id="./chunk-insights-graph.js" data-src="https://github.githubassets.com/assets/chunk-insights-graph-675c1cc8.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-CqT7DFJm8e7yxvsDEb2AZLpdRHyFS9eqZpu6hvJ4Vkv6ybVThoa5tgTvyNQDZyBWy6V+kWHqxd5x3SkTpHHEGw==" type="application/javascript" data-module-id="./chunk-insights-query.js" data-src="https://github.githubassets.com/assets/chunk-insights-query-0aa4fb0c.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-lmosGyye+/xONUQs9SwGN/a9fQvUSiAFk5HrL8eLHjeuOx9DX9TW5ckRKFD+6FM54vutFf/mBmNFW/0R3KJEBw==" type="application/javascript" data-module-id="./chunk-invitations.js" data-src="https://github.githubassets.com/assets/chunk-invitations-966a2c1b.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-4MxGQhsDODvZgLbu5arO6CapfnNvZ5fXMsZ47FiklUKRmHq4B3h8uTokSIWAOAxsvCMRrZr0DVZ0i0gm3RAnsg==" type="application/javascript" data-module-id="./chunk-jump-to.js" data-src="https://github.githubassets.com/assets/chunk-jump-to-e0cc4642.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-VtdawM/OSsu+d6v25ZY6UcQa/GGLAStSESjsqdEwx+ey88GNYGkQ24o+JFFo4lY+7wLMRf7aCrLxkA5SquBoNQ==" type="application/javascript" data-module-id="./chunk-launch-code-element.js" data-src="https://github.githubassets.com/assets/chunk-launch-code-element-56d75ac0.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-RduaLAviB2ygvRK/eX5iwzYO43ie7svrJ0rYJs06x7XqpRl/IK8PPBscBWM9Moo5Z86DK2iRLE2+aR7TJ5Uc2Q==" type="application/javascript" data-module-id="./chunk-metric-selection-element.js" data-src="https://github.githubassets.com/assets/chunk-metric-selection-element-45db9a2c.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-7hZ031ngiF36wGsfcoyyCWTqwYxjX+qeTLtCV7CJ+IO+wzkzCm1RoR3WzWczfWmwLNqr+Hu3kQOgkBaGn4ntWQ==" type="application/javascript" data-module-id="./chunk-notification-list-focus.js" data-src="https://github.githubassets.com/assets/chunk-notification-list-focus-ee1674df.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-ma0OOy3nj0c1cqBx0BkcmIFsLqcSZ+MIukQxyEFM/OWTzZpG+QMgOoWPAHZz43M6fyjAUG1jH6c/6LPiiKPCyw==" type="application/javascript" data-module-id="./chunk-profile-pins-element.js" data-src="https://github.githubassets.com/assets/chunk-profile-pins-element-99ad0e3b.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-hgoSKLTlL8I3IWr/TLONCU+N4kdCtdrHCrrud4NKhgRlLrTw0XUPhqBaDdZUiFSzDQRw/nFQ1kw2VeTm0g9+lA==" type="application/javascript" data-module-id="./chunk-profile.js" data-src="https://github.githubassets.com/assets/chunk-profile-860a1228.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-dmP0pnRItCP7ydEXVipp98lz/HaQtHyG00kfd8lMS5AoLbDwGfqXPjj7Q0qLGpPc7lBkySNNHIeEPF7NblctEA==" type="application/javascript" data-module-id="./chunk-readme-toc-element.js" data-src="https://github.githubassets.com/assets/chunk-readme-toc-element-7663f4a6.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-/fwTpG2i+GCgHEZc/35F+pXdShv1RfJMxyixcTIxzxDdylOWVJvjIWoumYWEPj7gUqBdrWt4SFf989Szmxleaw==" type="application/javascript" data-module-id="./chunk-ref-selector.js" data-src="https://github.githubassets.com/assets/chunk-ref-selector-fdfc13a4.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-D/MxBjtRPjes6DvnYGi2dEH7AQEnLvSvTODabEkSo+1zP6SSEZpb8oF52kFWERA97t1L19fF/P3bn4pgIsMPuA==" type="application/javascript" data-module-id="./chunk-responsive-underlinenav.js" data-src="https://github.githubassets.com/assets/chunk-responsive-underlinenav-0ff33106.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-SWy36S28Js+/YzsvYgmp+IEdC0qtMcBf6sYhXTEcj1aFPCLPOTOnOKqzFiNyH2oNVDd+u5Qi8eqYINSIu28LFQ==" type="application/javascript" data-module-id="./chunk-runner-groups.js" data-src="https://github.githubassets.com/assets/chunk-runner-groups-496cb7e9.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-46wqItV5Pi6vzI9QkKDjwgwyI2luPiKlLp+jdWUle1wUAWzUh3BX3+/DmehNua4VT0ZvvcswOISMWcWLOXCOdw==" type="application/javascript" data-module-id="./chunk-series-table.js" data-src="https://github.githubassets.com/assets/chunk-series-table-e3ac2a22.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-tk76eoSLUqXSVZ8ANzPprrOImFIV1zQ/VBV+WzG8ZjZpVPH8cLkMH/ur5HJB1lxx9/yo+V2wjDF96t4qfUwZLA==" type="application/javascript" data-module-id="./chunk-severity-calculator-element.js" data-src="https://github.githubassets.com/assets/chunk-severity-calculator-element-b64efa7a.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-j7Pb1H+2Xt4YIKSrJLLXxl/NNkkpW//5PLTpu58JGD8pqRPODDjJKqjO6YPZd++BB4VJubHPjzvuMXhW/9jcqA==" type="application/javascript" data-module-id="./chunk-sortable-behavior.js" data-src="https://github.githubassets.com/assets/chunk-sortable-behavior-8fb3dbd4.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-nKa3UdA2O7Ve4Jn24gaB20yUfJvS7wlnd8Q8C+iWD8i2tXLgaKemDWkLeexeQdrs+an98FCl5fOiy0J+izn+tQ==" type="application/javascript" data-module-id="./chunk-three.module.js" data-src="https://github.githubassets.com/assets/chunk-three.module-9ca6b751.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-WK8VXw3lfUQ/VRW0zlgKPhcMUqH0uTnB/KzePUPdZhCm/HpxfXXHKTGvj5C0Oex7+zbIM2ECzULbtTCT4ug3yg==" type="application/javascript" data-module-id="./chunk-toast.js" data-src="https://github.githubassets.com/assets/chunk-toast-58af155f.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-1vSZvwpr106s8wjSNFNFGVmFT2E4YjI2N8k6JqiSb28GGYMkEJUhveotmvB00Z4bQZM61ZgvWcXax1U3M48gLQ==" type="application/javascript" data-module-id="./chunk-tweetsodium.js" data-src="https://github.githubassets.com/assets/chunk-tweetsodium-d6f499bf.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-Bfdp/SnnmjBzjVCUmgPLoPD2rJY5zExYOF+BrA1f1p7Nu+XSiOTLynIGofdvypysG2EC6IHMd8ghEaBzDasaAw==" type="application/javascript" data-module-id="./chunk-unveil.js" data-src="https://github.githubassets.com/assets/chunk-unveil-05f769fd.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-UOFNW/xcxynplVfC8Y3fQdFFiasmugYUUHU4N90G8sqBZGL1yR37yjVakxV8/FV5deBALx9OQMBoiba/3OHGDA==" type="application/javascript" data-module-id="./chunk-user-status-submit.js" data-src="https://github.githubassets.com/assets/chunk-user-status-submit-50e14d5b.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-cKu/+X7gT+WVH4sXKt0g3G77bfQfcgwurRObM+dt8XylPm9eEWI+/aWKhVab6VsYuvvuI5BTriKXhXfJwaSXdQ==" type="application/javascript" data-module-id="./chunk-webgl-warp.js" data-src="https://github.githubassets.com/assets/chunk-webgl-warp-70abbff9.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-9Ux7Idk4v6NfGWWacPgVOXymjG/0NapCoK352oWRQAb6yzpMuh4dfmo33HNbxQytH00P1bmOScD2Z3KZwJMS1Q==" type="application/javascript" src="https://github.githubassets.com/assets/repositories-f54c7b21.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-tSnUsdlbqbVl9w12DPULfAYz14KCDuCVpzc/b4hV54jZT+8LUKqBxG5mPEzNzLyhh1nKAeueWHYy3sJv0DLVaw==" type="application/javascript" src="https://github.githubassets.com/assets/diffs-b529d4b1.js"></script>
<meta name="viewport" content="width=device-width">
<title>CS-SBU-DataMining-Msc-projects/script.r at main · Alirezajahandide27/CS-SBU-DataMining-Msc-projects</title>
<meta name="description" content="Projects of DataMining Course for Master in Computer Science Department of Shahid Beheshti University - Alirezajahandide27/CS-SBU-DataMining-Msc-projects">
<link rel="search" type="application/opensearchdescription+xml" href="/opensearch.xml" title="GitHub">
<link rel="fluid-icon" href="https://github.com/fluidicon.png" title="GitHub">
<meta property="fb:app_id" content="1401488693436528">
<meta name="apple-itunes-app" content="app-id=1477376905" />
<meta name="twitter:image:src" content="https://opengraph.githubassets.com/49e0c60707bc71c51601256f9e35efff32d602bba2a4e94bba6df6523eeb9db0/Alirezajahandide27/CS-SBU-DataMining-Msc-projects" /><meta name="twitter:site" content="@github" /><meta name="twitter:card" content="summary_large_image" /><meta name="twitter:title" content="Alirezajahandide27/CS-SBU-DataMining-Msc-projects" /><meta name="twitter:description" content="Projects of DataMining Course for Master in Computer Science Department of Shahid Beheshti University - Alirezajahandide27/CS-SBU-DataMining-Msc-projects" />
<meta property="og:image" content="https://opengraph.githubassets.com/49e0c60707bc71c51601256f9e35efff32d602bba2a4e94bba6df6523eeb9db0/Alirezajahandide27/CS-SBU-DataMining-Msc-projects" /><meta property="og:image:alt" content="Projects of DataMining Course for Master in Computer Science Department of Shahid Beheshti University - Alirezajahandide27/CS-SBU-DataMining-Msc-projects" /><meta property="og:image:width" content="1200" /><meta property="og:image:height" content="600" /><meta property="og:site_name" content="GitHub" /><meta property="og:type" content="object" /><meta property="og:title" content="Alirezajahandide27/CS-SBU-DataMining-Msc-projects" /><meta property="og:url" content="https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects" /><meta property="og:description" content="Projects of DataMining Course for Master in Computer Science Department of Shahid Beheshti University - Alirezajahandide27/CS-SBU-DataMining-Msc-projects" />
<link rel="assets" href="https://github.githubassets.com/">
<link rel="shared-web-socket" href="wss://alive.github.com/_sockets/u/87523315/ws?session=eyJ2IjoiVjMiLCJ1Ijo4NzUyMzMxNSwicyI6NzI0NDU0MDg5LCJjIjoyOTk5MTY3Njk1LCJ0IjoxNjI2NTAzOTIyfQ==--155b2914a02e1aa391e1a1d9720a6420b86ed4428bfc52e25e457e0a108c8843" data-refresh-url="/_alive" data-session-id="33f2d22c066f3f3de74db006150536fd3dea04701cb51d5abf93cd37e8a8d011">
<link rel="shared-web-socket-src" href="/socket-worker-3f088aa2.js">
<link rel="sudo-modal" href="/sessions/sudo_modal">
<meta name="request-id" content="F44A:AFEA:27B9DF9:2938AB7:60F27AC4" data-pjax-transient="true" /><meta name="html-safe-nonce" content="f35494f5100142271a061abc24e6bb51ae51284ac7156792db07c6cad8ed2c83" data-pjax-transient="true" /><meta name="visitor-payload" content="eyJyZWZlcnJlciI6Imh0dHBzOi8vZ2l0aHViLmNvbS9BbGlyZXphamFoYW5kaWRlMjcvQ1MtU0JVLURhdGFNaW5pbmctTXNjLXByb2plY3RzL3RyZWUvbWFpbi9WYWxhJTIwS2hvc3JhdmklMjAtJTIwOTk0MjIwNjgvcHJvamVjdCUyMDMiLCJyZXF1ZXN0X2lkIjoiRjQ0QTpBRkVBOjI3QjlERjk6MjkzOEFCNzo2MEYyN0FDNCIsInZpc2l0b3JfaWQiOiI0MTY3MDIwMTU3NDUxMzk5NjEyIiwicmVnaW9uX2VkZ2UiOiJmcmEiLCJyZWdpb25fcmVuZGVyIjoiaWFkIn0=" data-pjax-transient="true" /><meta name="visitor-hmac" content="6b6c4e5e3eb397a58566449a8863b3659348d65d6083b144e4b6704e3c0ac0a4" data-pjax-transient="true" />
<meta name="hovercard-subject-tag" content="repository:386585203" data-pjax-transient>
<meta name="github-keyboard-shortcuts" content="repository,source-code" data-pjax-transient="true" />
<meta name="selected-link" value="repo_source" data-pjax-transient>
<meta name="google-site-verification" content="c1kuD-K2HIVF635lypcsWPoD4kilo5-jA_wBFyT4uMY">
<meta name="google-site-verification" content="KT5gs8h0wvaagLKAVWq8bbeNwnZZK1r1XQysX3xurLU">
<meta name="google-site-verification" content="ZzhVyEFwb7w3e0-uOTltm8Jsck2F5StVihD0exw2fsA">
<meta name="google-site-verification" content="GXs5KoUUkNCoaAZn7wPN-t01Pywp9M3sEjnt_3_ZWPc">
<meta name="octolytics-host" content="collector.githubapp.com" /><meta name="octolytics-app-id" content="github" /><meta name="octolytics-event-url" content="https://collector.githubapp.com/github-external/browser_event" /><meta name="octolytics-actor-id" content="87523315" /><meta name="octolytics-actor-login" content="Alirezajahandide27" /><meta name="octolytics-actor-hash" content="8de5dec89d41e31b3261b8ec53f4a82f6b2cb4e0ac04246b4a7247d1cbfa5273" />
<meta name="analytics-location" content="/<user-name>/<repo-name>/blob/show" data-pjax-transient="true" />
<meta name="optimizely-datafile" content="{"version": "4", "rollouts": [], "typedAudiences": [], "anonymizeIP": true, "projectId": "16737760170", "variables": [], "featureFlags": [], "experiments": [], "audiences": [{"conditions": "[\"or\", {\"match\": \"exact\", \"name\": \"$opt_dummy_attribute\", \"type\": \"custom_attribute\", \"value\": \"$opt_dummy_value\"}]", "id": "$opt_dummy_audience", "name": "Optimizely-Generated Audience for Backwards Compatibility"}], "groups": [], "attributes": [{"id": "16822470375", "key": "user_id"}, {"id": "17143601254", "key": "spammy"}, {"id": "18175660309", "key": "organization_plan"}, {"id": "18813001570", "key": "is_logged_in"}, {"id": "19073851829", "key": "geo"}, {"id": "20175462351", "key": "requestedCurrency"}], "botFiltering": false, "accountId": "16737760170", "events": [{"experimentIds": [], "id": "17911811441", "key": "hydro_click.dashboard.teacher_toolbox_cta"}, {"experimentIds": [], "id": "18124116703", "key": "submit.organizations.complete_sign_up"}, {"experimentIds": [], "id": "18145892387", "key": "no_metric.tracked_outside_of_optimizely"}, {"experimentIds": [], "id": "18178755568", "key": "click.org_onboarding_checklist.add_repo"}, {"experimentIds": [], "id": "18180553241", "key": "submit.repository_imports.create"}, {"experimentIds": [], "id": "18186103728", "key": "click.help.learn_more_about_repository_creation"}, {"experimentIds": [], "id": "18188530140", "key": "test_event.do_not_use_in_production"}, {"experimentIds": [], "id": "18191963644", "key": "click.empty_org_repo_cta.transfer_repository"}, {"experimentIds": [], "id": "18195612788", "key": "click.empty_org_repo_cta.import_repository"}, {"experimentIds": [], "id": "18210945499", "key": "click.org_onboarding_checklist.invite_members"}, {"experimentIds": [], "id": "18211063248", "key": "click.empty_org_repo_cta.create_repository"}, {"experimentIds": [], "id": "18215721889", "key": "click.org_onboarding_checklist.update_profile"}, {"experimentIds": [], "id": "18224360785", "key": "click.org_onboarding_checklist.dismiss"}, {"experimentIds": [], "id": "18234832286", "key": "submit.organization_activation.complete"}, {"experimentIds": [], "id": "18252392383", "key": "submit.org_repository.create"}, {"experimentIds": [], "id": "18257551537", "key": "submit.org_member_invitation.create"}, {"experimentIds": [], "id": "18259522260", "key": "submit.organization_profile.update"}, {"experimentIds": [], "id": "18564603625", "key": "view.classroom_select_organization"}, {"experimentIds": [], "id": "18568612016", "key": "click.classroom_sign_in_click"}, {"experimentIds": [], "id": "18572592540", "key": "view.classroom_name"}, {"experimentIds": [], "id": "18574203855", "key": "click.classroom_create_organization"}, {"experimentIds": [], "id": "18582053415", "key": "click.classroom_select_organization"}, {"experimentIds": [], "id": "18589463420", "key": "click.classroom_create_classroom"}, {"experimentIds": [], "id": "18591323364", "key": "click.classroom_create_first_classroom"}, {"experimentIds": [], "id": "18591652321", "key": "click.classroom_grant_access"}, {"experimentIds": [], "id": "18607131425", "key": "view.classroom_creation"}, {"experimentIds": [], "id": "18831680583", "key": "upgrade_account_plan"}, {"experimentIds": [], "id": "19064064515", "key": "click.signup"}, {"experimentIds": [], "id": "19075373687", "key": "click.view_account_billing_page"}, {"experimentIds": [], "id": "19077355841", "key": "click.dismiss_signup_prompt"}, {"experimentIds": [], "id": "19079713938", "key": "click.contact_sales"}, {"experimentIds": [], "id": "19120963070", "key": "click.compare_account_plans"}, {"experimentIds": [], "id": "19151690317", "key": "click.upgrade_account_cta"}, {"experimentIds": [], "id": "19424193129", "key": "click.open_account_switcher"}, {"experimentIds": [], "id": "19520330825", "key": "click.visit_account_profile"}, {"experimentIds": [], "id": "19540970635", "key": "click.switch_account_context"}, {"experimentIds": [], "id": "19730198868", "key": "submit.homepage_signup"}, {"experimentIds": [], "id": "19820830627", "key": "click.homepage_signup"}, {"experimentIds": [], "id": "19988571001", "key": "click.create_enterprise_trial"}, {"experimentIds": [], "id": "20036538294", "key": "click.create_organization_team"}, {"experimentIds": [], "id": "20040653299", "key": "click.input_enterprise_trial_form"}, {"experimentIds": [], "id": "20062030003", "key": "click.continue_with_team"}, {"experimentIds": [], "id": "20068947153", "key": "click.create_organization_free"}, {"experimentIds": [], "id": "20086636658", "key": "click.signup_continue.username"}, {"experimentIds": [], "id": "20091648988", "key": "click.signup_continue.create_account"}, {"experimentIds": [], "id": "20103637615", "key": "click.signup_continue.email"}, {"experimentIds": [], "id": "20111574253", "key": "click.signup_continue.password"}, {"experimentIds": [], "id": "20120044111", "key": "view.pricing_page"}, {"experimentIds": [], "id": "20152062109", "key": "submit.create_account"}, {"experimentIds": [], "id": "20165800992", "key": "submit.upgrade_payment_form"}, {"experimentIds": [], "id": "20171520319", "key": "submit.create_organization"}, {"experimentIds": [], "id": "20222645674", "key": "click.recommended_plan_in_signup.discuss_your_needs"}, {"experimentIds": [], "id": "20227443657", "key": "submit.verify_primary_user_email"}, {"experimentIds": [], "id": "20234607160", "key": "click.recommended_plan_in_signup.try_enterprise"}, {"experimentIds": [], "id": "20238175784", "key": "click.recommended_plan_in_signup.team"}, {"experimentIds": [], "id": "20239847212", "key": "click.recommended_plan_in_signup.continue_free"}, {"experimentIds": [], "id": "20251097193", "key": "recommended_plan"}], "revision": "709"}" />
<!-- To prevent page flashing, the optimizely JS needs to be loaded in the
<head> tag before the DOM renders -->
<script crossorigin="anonymous" defer="defer" integrity="sha512-+jU501Se8pk+19AWlNhSR/uznFeWGI9ndTB52CGeN8Fze/Srm+6H0FN6FCnvSdvVMtHwsV1NGq1sX5RvBwEGAg==" type="application/javascript" src="https://github.githubassets.com/assets/optimizely-fa3539d3.js"></script>
<meta name="hostname" content="github.com">
<meta name="user-login" content="Alirezajahandide27">
<meta name="expected-hostname" content="github.com">
<meta name="js-proxy-site-detection-payload" content="MDEwZWRlODk3NmZjZmFlMDk0MmEyODc2MjNiMGM0NDg5NGQ3YWQ0MDgxY2ViNDg5ODg2OTQwMmZjNjdiM2M3M3x7InJlbW90ZV9hZGRyZXNzIjoiODAuMjEwLjE3My4xOTMiLCJyZXF1ZXN0X2lkIjoiRjQ0QTpBRkVBOjI3QjlERjk6MjkzOEFCNzo2MEYyN0FDNCIsInRpbWVzdGFtcCI6MTYyNjUwMzkyMiwiaG9zdCI6ImdpdGh1Yi5jb20ifQ==">
<meta name="enabled-features" content="MARKETPLACE_PENDING_INSTALLATIONS,GITHUB_TOKEN_PERMISSION">
<meta http-equiv="x-pjax-version" content="b313a5dc8c3457c97f73cee5de0cbc1d076ea869e375a5547705df713432f3f6">
<meta name="go-import" content="github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects git https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects.git">
<meta name="octolytics-dimension-user_id" content="87523315" /><meta name="octolytics-dimension-user_login" content="Alirezajahandide27" /><meta name="octolytics-dimension-repository_id" content="386585203" /><meta name="octolytics-dimension-repository_nwo" content="Alirezajahandide27/CS-SBU-DataMining-Msc-projects" /><meta name="octolytics-dimension-repository_public" content="true" /><meta name="octolytics-dimension-repository_is_fork" content="true" /><meta name="octolytics-dimension-repository_parent_id" content="349919156" /><meta name="octolytics-dimension-repository_parent_nwo" content="alisharifi2000/CS-SBU-DataMining-Msc-projects" /><meta name="octolytics-dimension-repository_network_root_id" content="349919156" /><meta name="octolytics-dimension-repository_network_root_nwo" content="alisharifi2000/CS-SBU-DataMining-Msc-projects" />
<link rel="canonical" href="https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r" data-pjax-transient>
<meta name="browser-stats-url" content="https://api.github.com/_private/browser/stats">
<meta name="browser-errors-url" content="https://api.github.com/_private/browser/errors">
<meta name="browser-optimizely-client-errors-url" content="https://api.github.com/_private/browser/optimizely_client/errors">
<link rel="mask-icon" href="https://github.githubassets.com/pinned-octocat.svg" color="#000000">
<link rel="alternate icon" class="js-site-favicon" type="image/png" href="https://github.githubassets.com/favicons/favicon.png">
<link rel="icon" class="js-site-favicon" type="image/svg+xml" href="https://github.githubassets.com/favicons/favicon.svg">
<meta name="theme-color" content="#1e2327">
<meta name="color-scheme" content="light dark" />
<link rel="manifest" href="/manifest.json" crossOrigin="use-credentials">
<meta name="enabled-homepage-translation-languages" content="">
</head>
<body class="logged-in env-production page-responsive page-blob" style="word-wrap: break-word;">
<div class="position-relative js-header-wrapper ">
<a href="#start-of-content" class="p-3 color-bg-info-inverse color-text-white show-on-focus js-skip-to-content">Skip to content</a>
<span data-view-component="true" class="progress-pjax-loader width-full js-pjax-loader-bar Progress position-fixed">
<span style="background-color: #79b8ff;width: 0%;" data-view-component="true" class="Progress-item progress-pjax-loader-bar"></span>
</span>
<header class="Header js-details-container Details px-3 px-md-4 px-lg-5 flex-wrap flex-md-nowrap" role="banner" >
<div class="Header-item mt-n1 mb-n1 d-none d-md-flex">
<a
class="Header-link "
href="https://github.com/"
data-hotkey="g d"
aria-label="Homepage "
data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Header","action":"go to dashboard","label":"icon:logo","originating_url":"https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r","user_id":87523315}}" data-hydro-click-hmac="585150d6988e713cf0350427db069bb2dbfe49725e156a50e03c351cd2a82548"
>
<svg class="octicon octicon-mark-github v-align-middle" height="32" viewBox="0 0 16 16" version="1.1" width="32" aria-hidden="true"><path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z"></path></svg>
</a>
</div>
<div class="Header-item d-md-none">
<button class="Header-link btn-link js-details-target" type="button" aria-label="Toggle navigation" aria-expanded="false">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="24" width="24" class="octicon octicon-three-bars">
<path fill-rule="evenodd" d="M1 2.75A.75.75 0 011.75 2h12.5a.75.75 0 110 1.5H1.75A.75.75 0 011 2.75zm0 5A.75.75 0 011.75 7h12.5a.75.75 0 110 1.5H1.75A.75.75 0 011 7.75zM1.75 12a.75.75 0 100 1.5h12.5a.75.75 0 100-1.5H1.75z"></path>
</svg>
</button>
</div>
<div class="Header-item Header-item--full flex-column flex-md-row width-full flex-order-2 flex-md-order-none mr-0 mr-md-3 mt-3 mt-md-0 Details-content--hidden-not-important d-md-flex">
<div class="header-search flex-auto js-site-search position-relative flex-self-stretch flex-md-self-auto mb-3 mb-md-0 mr-0 mr-md-3 scoped-search site-scoped-search js-jump-to"
>
<div class="position-relative">
<!-- '"` --><!-- </textarea></xmp> --></option></form><form class="js-site-search-form" role="search" aria-label="Site" data-scope-type="Repository" data-scope-id="386585203" data-scoped-search-url="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/search" data-owner-scoped-search-url="/users/Alirezajahandide27/search" data-unscoped-search-url="/search" action="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/search" accept-charset="UTF-8" method="get">
<label class="form-control input-sm header-search-wrapper p-0 js-chromeless-input-container header-search-wrapper-jump-to position-relative d-flex flex-justify-between flex-items-center">
<input type="text"
class="form-control input-sm header-search-input jump-to-field js-jump-to-field js-site-search-focus js-site-search-field is-clearable"
data-hotkey=s,/
name="q"
value=""
placeholder="Search or jump to…"
data-unscoped-placeholder="Search or jump to…"
data-scoped-placeholder="Search or jump to…"
autocapitalize="off"
role="combobox"
aria-haspopup="listbox"
aria-expanded="false"
aria-autocomplete="list"
aria-controls="jump-to-results"
aria-label="Search or jump to…"
data-jump-to-suggestions-path="/_graphql/GetSuggestedNavigationDestinations"
spellcheck="false"
autocomplete="off"
>
<input type="hidden" value="RDRosNR5CN3IopdUSbJi48WTnlMhst4nrclQ7/hq+q5L/MaCY3mOS8caSS+RLJXcZLtxXEy07btj1dlH5e3r5Q==" data-csrf="true" class="js-data-jump-to-suggestions-path-csrf" />
<input type="hidden" class="js-site-search-type-field" name="type" >
<img src="https://github.githubassets.com/images/search-key-slash.svg" alt="" class="mr-2 header-search-key-slash">
<div class="Box position-absolute overflow-hidden d-none jump-to-suggestions js-jump-to-suggestions-container">
<ul class="d-none js-jump-to-suggestions-template-container">
<li class="d-flex flex-justify-start flex-items-center p-0 f5 navigation-item js-navigation-item js-jump-to-suggestion" role="option">
<a tabindex="-1" class="no-underline d-flex flex-auto flex-items-center jump-to-suggestions-path js-jump-to-suggestion-path js-navigation-open p-2" href="" data-item-type="suggestion">
<div class="jump-to-octicon js-jump-to-octicon flex-shrink-0 mr-2 text-center d-none">
<svg height="16" width="16" class="octicon octicon-repo flex-shrink-0 js-jump-to-octicon-repo d-none" title="Repository" aria-label="Repository" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M2 2.5A2.5 2.5 0 014.5 0h8.75a.75.75 0 01.75.75v12.5a.75.75 0 01-.75.75h-2.5a.75.75 0 110-1.5h1.75v-2h-8a1 1 0 00-.714 1.7.75.75 0 01-1.072 1.05A2.495 2.495 0 012 11.5v-9zm10.5-1V9h-8c-.356 0-.694.074-1 .208V2.5a1 1 0 011-1h8zM5 12.25v3.25a.25.25 0 00.4.2l1.45-1.087a.25.25 0 01.3 0L8.6 15.7a.25.25 0 00.4-.2v-3.25a.25.25 0 00-.25-.25h-3.5a.25.25 0 00-.25.25z"></path></svg>
<svg height="16" width="16" class="octicon octicon-project flex-shrink-0 js-jump-to-octicon-project d-none" title="Project" aria-label="Project" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M1.75 0A1.75 1.75 0 000 1.75v12.5C0 15.216.784 16 1.75 16h12.5A1.75 1.75 0 0016 14.25V1.75A1.75 1.75 0 0014.25 0H1.75zM1.5 1.75a.25.25 0 01.25-.25h12.5a.25.25 0 01.25.25v12.5a.25.25 0 01-.25.25H1.75a.25.25 0 01-.25-.25V1.75zM11.75 3a.75.75 0 00-.75.75v7.5a.75.75 0 001.5 0v-7.5a.75.75 0 00-.75-.75zm-8.25.75a.75.75 0 011.5 0v5.5a.75.75 0 01-1.5 0v-5.5zM8 3a.75.75 0 00-.75.75v3.5a.75.75 0 001.5 0v-3.5A.75.75 0 008 3z"></path></svg>
<svg height="16" width="16" class="octicon octicon-search flex-shrink-0 js-jump-to-octicon-search d-none" title="Search" aria-label="Search" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M11.5 7a4.499 4.499 0 11-8.998 0A4.499 4.499 0 0111.5 7zm-.82 4.74a6 6 0 111.06-1.06l3.04 3.04a.75.75 0 11-1.06 1.06l-3.04-3.04z"></path></svg>
</div>
<img class="avatar mr-2 flex-shrink-0 js-jump-to-suggestion-avatar d-none" alt="" aria-label="Team" src="" width="28" height="28">
<div class="jump-to-suggestion-name js-jump-to-suggestion-name flex-auto overflow-hidden text-left no-wrap css-truncate css-truncate-target">
</div>
<div class="border rounded-1 flex-shrink-0 color-bg-tertiary px-1 color-text-tertiary ml-1 f6 d-none js-jump-to-badge-search">
<span class="js-jump-to-badge-search-text-default d-none" aria-label="in this repository">
In this repository
</span>
<span class="js-jump-to-badge-search-text-global d-none" aria-label="in all of GitHub">
All GitHub
</span>
<span aria-hidden="true" class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
<div aria-hidden="true" class="border rounded-1 flex-shrink-0 color-bg-tertiary px-1 color-text-tertiary ml-1 f6 d-none d-on-nav-focus js-jump-to-badge-jump">
Jump to
<span class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
</a>
</li>
</ul>
<ul class="d-none js-jump-to-no-results-template-container">
<li class="d-flex flex-justify-center flex-items-center f5 d-none js-jump-to-suggestion p-2">
<span class="color-text-secondary">No suggested jump to results</span>
</li>
</ul>
<ul id="jump-to-results" role="listbox" class="p-0 m-0 js-navigation-container jump-to-suggestions-results-container js-jump-to-suggestions-results-container">
<li class="d-flex flex-justify-start flex-items-center p-0 f5 navigation-item js-navigation-item js-jump-to-scoped-search d-none" role="option">
<a tabindex="-1" class="no-underline d-flex flex-auto flex-items-center jump-to-suggestions-path js-jump-to-suggestion-path js-navigation-open p-2" href="" data-item-type="scoped_search">
<div class="jump-to-octicon js-jump-to-octicon flex-shrink-0 mr-2 text-center d-none">
<svg height="16" width="16" class="octicon octicon-repo flex-shrink-0 js-jump-to-octicon-repo d-none" title="Repository" aria-label="Repository" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M2 2.5A2.5 2.5 0 014.5 0h8.75a.75.75 0 01.75.75v12.5a.75.75 0 01-.75.75h-2.5a.75.75 0 110-1.5h1.75v-2h-8a1 1 0 00-.714 1.7.75.75 0 01-1.072 1.05A2.495 2.495 0 012 11.5v-9zm10.5-1V9h-8c-.356 0-.694.074-1 .208V2.5a1 1 0 011-1h8zM5 12.25v3.25a.25.25 0 00.4.2l1.45-1.087a.25.25 0 01.3 0L8.6 15.7a.25.25 0 00.4-.2v-3.25a.25.25 0 00-.25-.25h-3.5a.25.25 0 00-.25.25z"></path></svg>
<svg height="16" width="16" class="octicon octicon-project flex-shrink-0 js-jump-to-octicon-project d-none" title="Project" aria-label="Project" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M1.75 0A1.75 1.75 0 000 1.75v12.5C0 15.216.784 16 1.75 16h12.5A1.75 1.75 0 0016 14.25V1.75A1.75 1.75 0 0014.25 0H1.75zM1.5 1.75a.25.25 0 01.25-.25h12.5a.25.25 0 01.25.25v12.5a.25.25 0 01-.25.25H1.75a.25.25 0 01-.25-.25V1.75zM11.75 3a.75.75 0 00-.75.75v7.5a.75.75 0 001.5 0v-7.5a.75.75 0 00-.75-.75zm-8.25.75a.75.75 0 011.5 0v5.5a.75.75 0 01-1.5 0v-5.5zM8 3a.75.75 0 00-.75.75v3.5a.75.75 0 001.5 0v-3.5A.75.75 0 008 3z"></path></svg>
<svg height="16" width="16" class="octicon octicon-search flex-shrink-0 js-jump-to-octicon-search d-none" title="Search" aria-label="Search" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M11.5 7a4.499 4.499 0 11-8.998 0A4.499 4.499 0 0111.5 7zm-.82 4.74a6 6 0 111.06-1.06l3.04 3.04a.75.75 0 11-1.06 1.06l-3.04-3.04z"></path></svg>
</div>
<img class="avatar mr-2 flex-shrink-0 js-jump-to-suggestion-avatar d-none" alt="" aria-label="Team" src="" width="28" height="28">
<div class="jump-to-suggestion-name js-jump-to-suggestion-name flex-auto overflow-hidden text-left no-wrap css-truncate css-truncate-target">
</div>
<div class="border rounded-1 flex-shrink-0 color-bg-tertiary px-1 color-text-tertiary ml-1 f6 d-none js-jump-to-badge-search">
<span class="js-jump-to-badge-search-text-default d-none" aria-label="in this repository">
In this repository
</span>
<span class="js-jump-to-badge-search-text-global d-none" aria-label="in all of GitHub">
All GitHub
</span>
<span aria-hidden="true" class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
<div aria-hidden="true" class="border rounded-1 flex-shrink-0 color-bg-tertiary px-1 color-text-tertiary ml-1 f6 d-none d-on-nav-focus js-jump-to-badge-jump">
Jump to
<span class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
</a>
</li>
<li class="d-flex flex-justify-start flex-items-center p-0 f5 navigation-item js-navigation-item js-jump-to-owner-scoped-search d-none" role="option">
<a tabindex="-1" class="no-underline d-flex flex-auto flex-items-center jump-to-suggestions-path js-jump-to-suggestion-path js-navigation-open p-2" href="" data-item-type="owner_scoped_search">
<div class="jump-to-octicon js-jump-to-octicon flex-shrink-0 mr-2 text-center d-none">
<svg height="16" width="16" class="octicon octicon-repo flex-shrink-0 js-jump-to-octicon-repo d-none" title="Repository" aria-label="Repository" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M2 2.5A2.5 2.5 0 014.5 0h8.75a.75.75 0 01.75.75v12.5a.75.75 0 01-.75.75h-2.5a.75.75 0 110-1.5h1.75v-2h-8a1 1 0 00-.714 1.7.75.75 0 01-1.072 1.05A2.495 2.495 0 012 11.5v-9zm10.5-1V9h-8c-.356 0-.694.074-1 .208V2.5a1 1 0 011-1h8zM5 12.25v3.25a.25.25 0 00.4.2l1.45-1.087a.25.25 0 01.3 0L8.6 15.7a.25.25 0 00.4-.2v-3.25a.25.25 0 00-.25-.25h-3.5a.25.25 0 00-.25.25z"></path></svg>
<svg height="16" width="16" class="octicon octicon-project flex-shrink-0 js-jump-to-octicon-project d-none" title="Project" aria-label="Project" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M1.75 0A1.75 1.75 0 000 1.75v12.5C0 15.216.784 16 1.75 16h12.5A1.75 1.75 0 0016 14.25V1.75A1.75 1.75 0 0014.25 0H1.75zM1.5 1.75a.25.25 0 01.25-.25h12.5a.25.25 0 01.25.25v12.5a.25.25 0 01-.25.25H1.75a.25.25 0 01-.25-.25V1.75zM11.75 3a.75.75 0 00-.75.75v7.5a.75.75 0 001.5 0v-7.5a.75.75 0 00-.75-.75zm-8.25.75a.75.75 0 011.5 0v5.5a.75.75 0 01-1.5 0v-5.5zM8 3a.75.75 0 00-.75.75v3.5a.75.75 0 001.5 0v-3.5A.75.75 0 008 3z"></path></svg>
<svg height="16" width="16" class="octicon octicon-search flex-shrink-0 js-jump-to-octicon-search d-none" title="Search" aria-label="Search" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M11.5 7a4.499 4.499 0 11-8.998 0A4.499 4.499 0 0111.5 7zm-.82 4.74a6 6 0 111.06-1.06l3.04 3.04a.75.75 0 11-1.06 1.06l-3.04-3.04z"></path></svg>
</div>
<img class="avatar mr-2 flex-shrink-0 js-jump-to-suggestion-avatar d-none" alt="" aria-label="Team" src="" width="28" height="28">
<div class="jump-to-suggestion-name js-jump-to-suggestion-name flex-auto overflow-hidden text-left no-wrap css-truncate css-truncate-target">
</div>
<div class="border rounded-1 flex-shrink-0 color-bg-tertiary px-1 color-text-tertiary ml-1 f6 d-none js-jump-to-badge-search">
<span class="js-jump-to-badge-search-text-default d-none" aria-label="in this user">
In this user
</span>
<span class="js-jump-to-badge-search-text-global d-none" aria-label="in all of GitHub">
All GitHub
</span>
<span aria-hidden="true" class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
<div aria-hidden="true" class="border rounded-1 flex-shrink-0 color-bg-tertiary px-1 color-text-tertiary ml-1 f6 d-none d-on-nav-focus js-jump-to-badge-jump">
Jump to
<span class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
</a>
</li>
<li class="d-flex flex-justify-start flex-items-center p-0 f5 navigation-item js-navigation-item js-jump-to-global-search d-none" role="option">
<a tabindex="-1" class="no-underline d-flex flex-auto flex-items-center jump-to-suggestions-path js-jump-to-suggestion-path js-navigation-open p-2" href="" data-item-type="global_search">
<div class="jump-to-octicon js-jump-to-octicon flex-shrink-0 mr-2 text-center d-none">
<svg height="16" width="16" class="octicon octicon-repo flex-shrink-0 js-jump-to-octicon-repo d-none" title="Repository" aria-label="Repository" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M2 2.5A2.5 2.5 0 014.5 0h8.75a.75.75 0 01.75.75v12.5a.75.75 0 01-.75.75h-2.5a.75.75 0 110-1.5h1.75v-2h-8a1 1 0 00-.714 1.7.75.75 0 01-1.072 1.05A2.495 2.495 0 012 11.5v-9zm10.5-1V9h-8c-.356 0-.694.074-1 .208V2.5a1 1 0 011-1h8zM5 12.25v3.25a.25.25 0 00.4.2l1.45-1.087a.25.25 0 01.3 0L8.6 15.7a.25.25 0 00.4-.2v-3.25a.25.25 0 00-.25-.25h-3.5a.25.25 0 00-.25.25z"></path></svg>
<svg height="16" width="16" class="octicon octicon-project flex-shrink-0 js-jump-to-octicon-project d-none" title="Project" aria-label="Project" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M1.75 0A1.75 1.75 0 000 1.75v12.5C0 15.216.784 16 1.75 16h12.5A1.75 1.75 0 0016 14.25V1.75A1.75 1.75 0 0014.25 0H1.75zM1.5 1.75a.25.25 0 01.25-.25h12.5a.25.25 0 01.25.25v12.5a.25.25 0 01-.25.25H1.75a.25.25 0 01-.25-.25V1.75zM11.75 3a.75.75 0 00-.75.75v7.5a.75.75 0 001.5 0v-7.5a.75.75 0 00-.75-.75zm-8.25.75a.75.75 0 011.5 0v5.5a.75.75 0 01-1.5 0v-5.5zM8 3a.75.75 0 00-.75.75v3.5a.75.75 0 001.5 0v-3.5A.75.75 0 008 3z"></path></svg>
<svg height="16" width="16" class="octicon octicon-search flex-shrink-0 js-jump-to-octicon-search d-none" title="Search" aria-label="Search" viewBox="0 0 16 16" version="1.1" role="img"><path fill-rule="evenodd" d="M11.5 7a4.499 4.499 0 11-8.998 0A4.499 4.499 0 0111.5 7zm-.82 4.74a6 6 0 111.06-1.06l3.04 3.04a.75.75 0 11-1.06 1.06l-3.04-3.04z"></path></svg>
</div>
<img class="avatar mr-2 flex-shrink-0 js-jump-to-suggestion-avatar d-none" alt="" aria-label="Team" src="" width="28" height="28">
<div class="jump-to-suggestion-name js-jump-to-suggestion-name flex-auto overflow-hidden text-left no-wrap css-truncate css-truncate-target">
</div>
<div class="border rounded-1 flex-shrink-0 color-bg-tertiary px-1 color-text-tertiary ml-1 f6 d-none js-jump-to-badge-search">
<span class="js-jump-to-badge-search-text-default d-none" aria-label="in this repository">
In this repository
</span>
<span class="js-jump-to-badge-search-text-global d-none" aria-label="in all of GitHub">
All GitHub
</span>
<span aria-hidden="true" class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
<div aria-hidden="true" class="border rounded-1 flex-shrink-0 color-bg-tertiary px-1 color-text-tertiary ml-1 f6 d-none d-on-nav-focus js-jump-to-badge-jump">
Jump to
<span class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
</a>
</li>
<li class="d-flex flex-justify-center flex-items-center p-0 f5 js-jump-to-suggestion">
<svg style="box-sizing: content-box; color: var(--color-icon-primary);" viewBox="0 0 16 16" fill="none" data-view-component="true" width="32" height="32" class="m-3 anim-rotate">
<circle cx="8" cy="8" r="7" stroke="currentColor" stroke-opacity="0.25" stroke-width="2" vector-effect="non-scaling-stroke" />
<path d="M15 8a7.002 7.002 0 00-7-7" stroke="currentColor" stroke-width="2" stroke-linecap="round" vector-effect="non-scaling-stroke" />
</svg>
</li>
</ul>
</div>
</label>
</form> </div>
</div>
<nav class="d-flex flex-column flex-md-row flex-self-stretch flex-md-self-auto" aria-label="Global">
<a class="Header-link py-md-3 d-block d-md-none py-2 border-top border-md-top-0 border-white-fade" data-ga-click="Header, click, Nav menu - item:dashboard:user" aria-label="Dashboard" href="/dashboard">
Dashboard
</a>
<a class="js-selected-navigation-item Header-link mt-md-n3 mb-md-n3 py-2 py-md-3 mr-0 mr-md-3 border-top border-md-top-0 border-white-fade" data-hotkey="g p" data-ga-click="Header, click, Nav menu - item:pulls context:user" aria-label="Pull requests you created" data-selected-links="/pulls /pulls/assigned /pulls/mentioned /pulls" href="/pulls">
Pull<span class="d-inline d-md-none d-lg-inline"> request</span>s
</a>
<a class="js-selected-navigation-item Header-link mt-md-n3 mb-md-n3 py-2 py-md-3 mr-0 mr-md-3 border-top border-md-top-0 border-white-fade" data-hotkey="g i" data-ga-click="Header, click, Nav menu - item:issues context:user" aria-label="Issues you created" data-selected-links="/issues /issues/assigned /issues/mentioned /issues" href="/issues">
Issues
</a>
<div class="d-flex position-relative">
<a class="js-selected-navigation-item Header-link flex-auto mt-md-n3 mb-md-n3 py-2 py-md-3 mr-0 mr-md-3 border-top border-md-top-0 border-white-fade" data-ga-click="Header, click, Nav menu - item:marketplace context:user" data-octo-click="marketplace_click" data-octo-dimensions="location:nav_bar" data-selected-links=" /marketplace" href="/marketplace">
Marketplace
</a> </div>
<a class="js-selected-navigation-item Header-link mt-md-n3 mb-md-n3 py-2 py-md-3 mr-0 mr-md-3 border-top border-md-top-0 border-white-fade" data-ga-click="Header, click, Nav menu - item:explore" data-selected-links="/explore /trending /trending/developers /integrations /integrations/feature/code /integrations/feature/collaborate /integrations/feature/ship showcases showcases_search showcases_landing /explore" href="/explore">
Explore
</a>
<a class="js-selected-navigation-item Header-link d-block d-md-none py-2 py-md-3 border-top border-md-top-0 border-white-fade" data-ga-click="Header, click, Nav menu - item:workspaces context:user" data-selected-links="/codespaces /codespaces" href="/codespaces">
Codespaces
</a>
<a class="js-selected-navigation-item Header-link d-block d-md-none py-2 py-md-3 border-top border-md-top-0 border-white-fade" data-ga-click="Header, click, Nav menu - item:Sponsors" data-hydro-click="{"event_type":"sponsors.button_click","payload":{"button":"HEADER_SPONSORS_DASHBOARD","sponsorable_login":"Alirezajahandide27","originating_url":"https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r","user_id":87523315}}" data-hydro-click-hmac="cb8345e7b0ca66d4b6e91c8295877de2ee8d1b9d26318877023b0eb20884883c" data-selected-links=" /sponsors/accounts" href="/sponsors/accounts">Sponsors</a>
<a class="Header-link d-block d-md-none mr-0 mr-md-3 py-2 py-md-3 border-top border-md-top-0 border-white-fade" href="/settings/profile">
Settings
</a>
<a class="Header-link d-block d-md-none mr-0 mr-md-3 py-2 py-md-3 border-top border-md-top-0 border-white-fade" href="/Alirezajahandide27">
<img class="avatar avatar-user" src="https://avatars.githubusercontent.com/u/87523315?s=40&v=4" width="20" height="20" alt="@Alirezajahandide27" />
Alirezajahandide27
</a>
<!-- '"` --><!-- </textarea></xmp> --></option></form><form action="/logout" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="TPd7ySoDH0opU8OCwHw2oGa3+ie80ObN5q8EPFWBWXTpamID2qUIbUhwgH7afQ202q2gaa2xOKx2MxHA4Vm0XA==" />
<button
type="submit"
class="Header-link mr-0 mr-md-3 py-2 py-md-3 border-top border-md-top-0 border-white-fade d-md-none btn-link d-block width-full text-left"
style="padding-left: 2px;"
data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Header","action":"sign out","label":"icon:logout","originating_url":"https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r","user_id":87523315}}" data-hydro-click-hmac="17a57d720a6ab4964bc36e0b10a3b1eb986d004e0ca5de8e4cd130b4e19a776e"
>
<svg class="octicon octicon-sign-out v-align-middle" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M2 2.75C2 1.784 2.784 1 3.75 1h2.5a.75.75 0 010 1.5h-2.5a.25.25 0 00-.25.25v10.5c0 .138.112.25.25.25h2.5a.75.75 0 010 1.5h-2.5A1.75 1.75 0 012 13.25V2.75zm10.44 4.5H6.75a.75.75 0 000 1.5h5.69l-1.97 1.97a.75.75 0 101.06 1.06l3.25-3.25a.75.75 0 000-1.06l-3.25-3.25a.75.75 0 10-1.06 1.06l1.97 1.97z"></path></svg>
Sign out
</button>
</form></nav>
</div>
<div class="Header-item Header-item--full flex-justify-center d-md-none position-relative">
<a
class="Header-link "
href="https://github.com/"
data-hotkey="g d"
aria-label="Homepage "
data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Header","action":"go to dashboard","label":"icon:logo","originating_url":"https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r","user_id":87523315}}" data-hydro-click-hmac="585150d6988e713cf0350427db069bb2dbfe49725e156a50e03c351cd2a82548"
>
<svg class="octicon octicon-mark-github v-align-middle" height="32" viewBox="0 0 16 16" version="1.1" width="32" aria-hidden="true"><path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z"></path></svg>
</a>
</div>
<div class="Header-item mr-0 mr-md-3 flex-order-1 flex-md-order-none">
<notification-indicator
class="js-socket-channel"
data-test-selector="notifications-indicator"
data-channel="eyJjIjoibm90aWZpY2F0aW9uLWNoYW5nZWQ6ODc1MjMzMTUiLCJ0IjoxNjI2NTAzOTIyfQ==--2dd56015426c1b232495ed0c9eba3e4229be8292aab087b041ba2da950d8184d">
<a href="/notifications"
class="Header-link notification-indicator position-relative tooltipped tooltipped-sw"
aria-label="You have no unread notifications"
data-hotkey="g n"
data-ga-click="Header, go to notifications, icon:read"
data-target="notification-indicator.link">
<span class="mail-status " data-target="notification-indicator.modifier"></span>
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-bell">
<path d="M8 16a2 2 0 001.985-1.75c.017-.137-.097-.25-.235-.25h-3.5c-.138 0-.252.113-.235.25A2 2 0 008 16z"></path><path fill-rule="evenodd" d="M8 1.5A3.5 3.5 0 004.5 5v2.947c0 .346-.102.683-.294.97l-1.703 2.556a.018.018 0 00-.003.01l.001.006c0 .002.002.004.004.006a.017.017 0 00.006.004l.007.001h10.964l.007-.001a.016.016 0 00.006-.004.016.016 0 00.004-.006l.001-.007a.017.017 0 00-.003-.01l-1.703-2.554a1.75 1.75 0 01-.294-.97V5A3.5 3.5 0 008 1.5zM3 5a5 5 0 0110 0v2.947c0 .05.015.098.042.139l1.703 2.555A1.518 1.518 0 0113.482 13H2.518a1.518 1.518 0 01-1.263-2.36l1.703-2.554A.25.25 0 003 7.947V5z"></path>
</svg>
</a>
</notification-indicator>
</div>
<div class="Header-item position-relative d-none d-md-flex">
<details class="details-overlay details-reset">
<summary
class="Header-link"
aria-label="Create new…"
data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Header","action":"create new","label":"icon:add","originating_url":"https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r","user_id":87523315}}" data-hydro-click-hmac="a986d56ba4e25e153af727d2ffa8285e8e9f09f7db1c19a979051709e78a8bf4"
>
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-plus">
<path fill-rule="evenodd" d="M7.75 2a.75.75 0 01.75.75V7h4.25a.75.75 0 110 1.5H8.5v4.25a.75.75 0 11-1.5 0V8.5H2.75a.75.75 0 010-1.5H7V2.75A.75.75 0 017.75 2z"></path>
</svg> <span class="dropdown-caret"></span>
</summary>
<details-menu class="dropdown-menu dropdown-menu-sw">
<a role="menuitem" class="dropdown-item" href="/new" data-ga-click="Header, create new repository">
New repository
</a>
<a role="menuitem" class="dropdown-item" href="/new/import" data-ga-click="Header, import a repository">
Import repository
</a>
<a role="menuitem" class="dropdown-item" href="https://gist.github.com/" data-ga-click="Header, create new gist">
New gist
</a>
<a role="menuitem" class="dropdown-item" href="/organizations/new" data-ga-click="Header, create new organization">
New organization
</a>
</details-menu>
</details>
</div>
<div class="Header-item position-relative mr-0 d-none d-md-flex">
<details class="details-overlay details-reset js-feature-preview-indicator-container" data-feature-preview-indicator-src="/users/Alirezajahandide27/feature_preview/indicator_check">
<summary
class="Header-link"
aria-label="View profile and more"
data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Header","action":"show menu","label":"icon:avatar","originating_url":"https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r","user_id":87523315}}" data-hydro-click-hmac="e123ab01ff5c733c7e948f6dea631fa97ccc9a3d97a9b5fc406285ac8b51de3f"
>
<img src="https://avatars.githubusercontent.com/u/87523315?s=60&v=4" alt="@Alirezajahandide27" size="20" data-view-component="true" height="20" width="20" class="avatar-user avatar avatar-small"></img>
<span class="feature-preview-indicator js-feature-preview-indicator" style="top: 1px;" hidden></span>
<span class="dropdown-caret"></span>
</summary>
<details-menu class="dropdown-menu dropdown-menu-sw" style="width: 180px"
src="/users/87523315/menu" preload>
<include-fragment>
<p class="text-center mt-3" data-hide-on-error>
<svg style="box-sizing: content-box; color: var(--color-icon-primary);" viewBox="0 0 16 16" fill="none" data-view-component="true" width="32" height="32" class="anim-rotate">
<circle cx="8" cy="8" r="7" stroke="currentColor" stroke-opacity="0.25" stroke-width="2" vector-effect="non-scaling-stroke" />
<path d="M15 8a7.002 7.002 0 00-7-7" stroke="currentColor" stroke-width="2" stroke-linecap="round" vector-effect="non-scaling-stroke" />
</svg>
</p>
<p class="ml-1 mb-2 mt-2 color-text-primary" data-show-on-error>
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-alert">
<path fill-rule="evenodd" d="M8.22 1.754a.25.25 0 00-.44 0L1.698 13.132a.25.25 0 00.22.368h12.164a.25.25 0 00.22-.368L8.22 1.754zm-1.763-.707c.659-1.234 2.427-1.234 3.086 0l6.082 11.378A1.75 1.75 0 0114.082 15H1.918a1.75 1.75 0 01-1.543-2.575L6.457 1.047zM9 11a1 1 0 11-2 0 1 1 0 012 0zm-.25-5.25a.75.75 0 00-1.5 0v2.5a.75.75 0 001.5 0v-2.5z"></path>
</svg>
Sorry, something went wrong.
</p>
</include-fragment>
</details-menu>
</details>
</div>
</header>
</div>
<div id="start-of-content" class="show-on-focus"></div>
<div data-pjax-replace id="js-flash-container">
<template class="js-flash-template">
<div class="flash flash-full {{ className }}">
<div class=" px-2" >
<button class="flash-close js-flash-close" type="button" aria-label="Dismiss this message">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-x">
<path fill-rule="evenodd" d="M3.72 3.72a.75.75 0 011.06 0L8 6.94l3.22-3.22a.75.75 0 111.06 1.06L9.06 8l3.22 3.22a.75.75 0 11-1.06 1.06L8 9.06l-3.22 3.22a.75.75 0 01-1.06-1.06L6.94 8 3.72 4.78a.75.75 0 010-1.06z"></path>
</svg>
</button>
<div>{{ message }}</div>
</div>
</div>
</template>
</div>
<include-fragment class="js-notification-shelf-include-fragment" data-base-src="https://github.com/notifications/beta/shelf"></include-fragment>
<div
class="application-main "
data-commit-hovercards-enabled
data-discussion-hovercards-enabled
data-issue-and-pr-hovercards-enabled
>
<div itemscope itemtype="http://schema.org/SoftwareSourceCode" class="">
<main id="js-repo-pjax-container" data-pjax-container >
<div class="hx_page-header-bg pt-3 hide-full-screen mb-5">
<div class="d-flex mb-3 px-3 px-md-4 px-lg-5">
<div class="flex-auto min-width-0 width-fit mr-3">
<h1 class=" d-flex flex-wrap flex-items-center break-word f3 text-normal">
<svg class="octicon octicon-repo-forked color-text-secondary mr-2" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M5 3.25a.75.75 0 11-1.5 0 .75.75 0 011.5 0zm0 2.122a2.25 2.25 0 10-1.5 0v.878A2.25 2.25 0 005.75 8.5h1.5v2.128a2.251 2.251 0 101.5 0V8.5h1.5a2.25 2.25 0 002.25-2.25v-.878a2.25 2.25 0 10-1.5 0v.878a.75.75 0 01-.75.75h-4.5A.75.75 0 015 6.25v-.878zm3.75 7.378a.75.75 0 11-1.5 0 .75.75 0 011.5 0zm3-8.75a.75.75 0 100-1.5.75.75 0 000 1.5z"></path></svg>
<span class="author flex-self-stretch" itemprop="author">
<a class="url fn" rel="author" data-hovercard-type="user" data-hovercard-url="/users/Alirezajahandide27/hovercard" data-octo-click="hovercard-link-click" data-octo-dimensions="link_type:self" href="/Alirezajahandide27">Alirezajahandide27</a>
</span>
<span class="mx-1 flex-self-stretch color-text-secondary">/</span>
<strong itemprop="name" class="mr-2 flex-self-stretch">
<a data-pjax="#js-repo-pjax-container" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects">CS-SBU-DataMining-Msc-projects</a>
</strong>
</h1>
<span class="text-small lh-condensed-ultra no-wrap mt-1" data-repository-hovercards-enabled>
forked from <a data-hovercard-type="repository" data-hovercard-url="/alisharifi2000/CS-SBU-DataMining-Msc-projects/hovercard" href="/alisharifi2000/CS-SBU-DataMining-Msc-projects">alisharifi2000/CS-SBU-DataMining-Msc-projects</a>
</span>
</div>
<ul class="pagehead-actions flex-shrink-0 d-none d-md-inline" style="padding: 2px 0;">
<li>
<notifications-list-subscription-form class="f5 position-relative d-flex">
<details
class="details-reset details-overlay f5 position-relative"
data-target="notifications-list-subscription-form.details"
data-action="toggle:notifications-list-subscription-form#detailsToggled"
>
<summary class="btn btn-sm rounded-right-0" data-hydro-click="{"event_type":"repository.click","payload":{"target":"WATCH_BUTTON","repository_id":386585203,"originating_url":"https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r","user_id":87523315}}" data-hydro-click-hmac="3e6f67a40133bd9d4968a5b7dc43d705b7c2ee87b7c544e4b412a5fe0fa6648d" data-ga-click="Repository, click Watch settings, action:blob#show" aria-label="Notifications settings">
<span data-menu-button>
<span
hidden
data-target="notifications-list-subscription-form.unwatchButtonCopy"
>
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-eye">
<path fill-rule="evenodd" d="M1.679 7.932c.412-.621 1.242-1.75 2.366-2.717C5.175 4.242 6.527 3.5 8 3.5c1.473 0 2.824.742 3.955 1.715 1.124.967 1.954 2.096 2.366 2.717a.119.119 0 010 .136c-.412.621-1.242 1.75-2.366 2.717C10.825 11.758 9.473 12.5 8 12.5c-1.473 0-2.824-.742-3.955-1.715C2.92 9.818 2.09 8.69 1.679 8.068a.119.119 0 010-.136zM8 2c-1.981 0-3.67.992-4.933 2.078C1.797 5.169.88 6.423.43 7.1a1.619 1.619 0 000 1.798c.45.678 1.367 1.932 2.637 3.024C4.329 13.008 6.019 14 8 14c1.981 0 3.67-.992 4.933-2.078 1.27-1.091 2.187-2.345 2.637-3.023a1.619 1.619 0 000-1.798c-.45-.678-1.367-1.932-2.637-3.023C11.671 2.992 9.981 2 8 2zm0 8a2 2 0 100-4 2 2 0 000 4z"></path>
</svg>
Unwatch
</span>
<span
hidden
data-target="notifications-list-subscription-form.stopIgnoringButtonCopy"
>
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-bell-slash">
<path fill-rule="evenodd" d="M8 1.5c-.997 0-1.895.416-2.534 1.086A.75.75 0 014.38 1.55 5 5 0 0113 5v2.373a.75.75 0 01-1.5 0V5A3.5 3.5 0 008 1.5zM4.182 4.31L1.19 2.143a.75.75 0 10-.88 1.214L3 5.305v2.642a.25.25 0 01-.042.139L1.255 10.64A1.518 1.518 0 002.518 13h11.108l1.184.857a.75.75 0 10.88-1.214l-1.375-.996a1.196 1.196 0 00-.013-.01L4.198 4.321a.733.733 0 00-.016-.011zm7.373 7.19L4.5 6.391v1.556c0 .346-.102.683-.294.97l-1.703 2.556a.018.018 0 00-.003.01.015.015 0 00.005.012.017.017 0 00.006.004l.007.001h9.037zM8 16a2 2 0 001.985-1.75c.017-.137-.097-.25-.235-.25h-3.5c-.138 0-.252.113-.235.25A2 2 0 008 16z"></path>
</svg>
Stop ignoring
</span>
<span
data-target="notifications-list-subscription-form.watchButtonCopy"
>
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-eye">
<path fill-rule="evenodd" d="M1.679 7.932c.412-.621 1.242-1.75 2.366-2.717C5.175 4.242 6.527 3.5 8 3.5c1.473 0 2.824.742 3.955 1.715 1.124.967 1.954 2.096 2.366 2.717a.119.119 0 010 .136c-.412.621-1.242 1.75-2.366 2.717C10.825 11.758 9.473 12.5 8 12.5c-1.473 0-2.824-.742-3.955-1.715C2.92 9.818 2.09 8.69 1.679 8.068a.119.119 0 010-.136zM8 2c-1.981 0-3.67.992-4.933 2.078C1.797 5.169.88 6.423.43 7.1a1.619 1.619 0 000 1.798c.45.678 1.367 1.932 2.637 3.024C4.329 13.008 6.019 14 8 14c1.981 0 3.67-.992 4.933-2.078 1.27-1.091 2.187-2.345 2.637-3.023a1.619 1.619 0 000-1.798c-.45-.678-1.367-1.932-2.637-3.023C11.671 2.992 9.981 2 8 2zm0 8a2 2 0 100-4 2 2 0 000 4z"></path>
</svg>
Watch
</span>
</span>
<span class="dropdown-caret"></span>
</summary>
<details-menu
class="SelectMenu "
role="menu"
data-target="notifications-list-subscription-form.menu"
>
<div class="SelectMenu-modal notifications-component-menu-modal">
<header class="SelectMenu-header">
<h3 class="SelectMenu-title">Notifications</h3>
<button class="SelectMenu-closeButton" type="button" aria-label="Close menu" data-action="click:notifications-list-subscription-form#closeMenu">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-x">
<path fill-rule="evenodd" d="M3.72 3.72a.75.75 0 011.06 0L8 6.94l3.22-3.22a.75.75 0 111.06 1.06L9.06 8l3.22 3.22a.75.75 0 11-1.06 1.06L8 9.06l-3.22 3.22a.75.75 0 01-1.06-1.06L6.94 8 3.72 4.78a.75.75 0 010-1.06z"></path>
</svg>
</button>
</header>
<div class="SelectMenu-list">
<form data-target="notifications-list-subscription-form.form" data-action="submit:notifications-list-subscription-form#submitForm" action="/notifications/subscribe" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="sdr26RoKaGVNPuXnS+tTMZydXmnWFY4SP4DaJd+zmzNvSzr1Yl2gijTP6dVihrxaDqRgwmoKoWARXynauREGow==" />
<input type="hidden" name="repository_id" value="386585203">
<button
type="submit"
name="do"
value="included"
class="SelectMenu-item flex-items-start"
role="menuitemradio"
aria-checked="true"
data-targets="notifications-list-subscription-form.subscriptionButtons"
>
<span class="f5">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-check SelectMenu-icon SelectMenu-icon--check">
<path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path>
</svg>
</span>
<div>
<div class="f5 text-bold">
Participating and @mentions
</div>
<div class="text-small color-text-secondary text-normal pb-1">
Only receive notifications from this repository when participating or @mentioned.
</div>
</div>
</button>
<button
type="submit"
name="do"
value="subscribed"
class="SelectMenu-item flex-items-start"
role="menuitemradio"
aria-checked="false"
data-targets="notifications-list-subscription-form.subscriptionButtons"
>
<span class="f5">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-check SelectMenu-icon SelectMenu-icon--check">
<path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path>
</svg>
</span>
<div>
<div class="f5 text-bold">
All Activity
</div>
<div class="text-small color-text-secondary text-normal pb-1">
Notified of all notifications on this repository.
</div>
</div>
</button>
<button
type="submit"
name="do"
value="ignore"
class="SelectMenu-item flex-items-start"
role="menuitemradio"
aria-checked="false"
data-targets="notifications-list-subscription-form.subscriptionButtons"
>
<span class="f5">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-check SelectMenu-icon SelectMenu-icon--check">
<path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path>
</svg>
</span>
<div>
<div class="f5 text-bold">
Ignore
</div>
<div class="text-small color-text-secondary text-normal pb-1">
Never be notified.
</div>
</div>
</button>
</form>
<button
class="SelectMenu-item flex-items-start pr-3"
type="button"
role="menuitemradio"
data-target="notifications-list-subscription-form.customButton"
data-action="click:notifications-list-subscription-form#openCustomDialog"
aria-haspopup="true"
aria-checked="false"
>
<span class="f5">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-check SelectMenu-icon SelectMenu-icon--check">
<path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path>
</svg>
</span>
<div>
<div class="d-flex flex-items-start flex-justify-between">
<div class="f5 text-bold">Custom</div>
<div class="f5 pr-1">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-arrow-right">
<path fill-rule="evenodd" d="M8.22 2.97a.75.75 0 011.06 0l4.25 4.25a.75.75 0 010 1.06l-4.25 4.25a.75.75 0 01-1.06-1.06l2.97-2.97H3.75a.75.75 0 010-1.5h7.44L8.22 4.03a.75.75 0 010-1.06z"></path>
</svg>
</div>
</div>
<div class="text-small color-text-secondary text-normal pb-1">
Select events you want to be notified of in addition to participating and @mentions.
</div>
</div>
</button>
</div>
</div>
</details-menu>
<details-dialog class="notifications-component-dialog " data-target="notifications-list-subscription-form.customDialog" hidden>
<div class="SelectMenu-modal notifications-component-dialog-modal overflow-visible">
<form data-target="notifications-list-subscription-form.customform" data-action="submit:notifications-list-subscription-form#submitCustomForm" action="/notifications/subscribe" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="3j2Eg4IuoC848xDP2X/dWwX/IQL9F4e8Ey8U30slIUYArEif+nlowEECHP3wEjIwl8YfqUEIqM498OcgLYe81g==" />
<input type="hidden" name="repository_id" value="386585203">
<header class="d-sm-none SelectMenu-header pb-0 border-bottom-0 px-2 px-sm-3">
<h1 class="f3 SelectMenu-title d-inline-flex">
<button
class="color-bg-primary border-0 px-2 py-0 m-0 Link--secondary f5"
aria-label="Return to menu"
type="button"
data-action="click:notifications-list-subscription-form#closeCustomDialog"
>
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-arrow-left">
<path fill-rule="evenodd" d="M7.78 12.53a.75.75 0 01-1.06 0L2.47 8.28a.75.75 0 010-1.06l4.25-4.25a.75.75 0 011.06 1.06L4.81 7h7.44a.75.75 0 010 1.5H4.81l2.97 2.97a.75.75 0 010 1.06z"></path>
</svg>
</button>
Custom
</h1>
</header>
<header class="d-none d-sm-flex flex-items-start pt-1">
<button
class="border-0 px-2 pt-1 m-0 Link--secondary f5"
style="background-color: transparent;"
aria-label="Return to menu"
type="button"
data-action="click:notifications-list-subscription-form#closeCustomDialog"
>
<svg style="position: relative; left: 2px; top: 1px" aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-arrow-left">
<path fill-rule="evenodd" d="M7.78 12.53a.75.75 0 01-1.06 0L2.47 8.28a.75.75 0 010-1.06l4.25-4.25a.75.75 0 011.06 1.06L4.81 7h7.44a.75.75 0 010 1.5H4.81l2.97 2.97a.75.75 0 010 1.06z"></path>
</svg>
</button>
<h1 class="pt-1 pr-4 pb-0 pl-0 f5 text-bold">
Custom
</h1>
</header>
<fieldset>
<legend>
<div class="text-small color-text-secondary pt-0 pr-3 pb-3 pl-6 pl-sm-5 border-bottom mb-3">
Select events you want to be notified of in addition to participating and @mentions.
</div>
</legend>
<div class="form-checkbox mr-3 ml-6 ml-sm-5 mb-2 mt-0">
<label class="f5 text-normal">
<input
type="checkbox"
name="thread_types[]"
value="Issue"
data-targets="notifications-list-subscription-form.threadTypeCheckboxes"
data-action="change:notifications-list-subscription-form#threadTypeCheckboxesUpdated"
>
Issues
</label>
<span
class="tooltipped tooltipped-nw mr-2 p-1 float-right"
aria-label="Issues are not enabled for this repo">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-info color-icon-secondary">
<path fill-rule="evenodd" d="M8 1.5a6.5 6.5 0 100 13 6.5 6.5 0 000-13zM0 8a8 8 0 1116 0A8 8 0 010 8zm6.5-.25A.75.75 0 017.25 7h1a.75.75 0 01.75.75v2.75h.25a.75.75 0 010 1.5h-2a.75.75 0 010-1.5h.25v-2h-.25a.75.75 0 01-.75-.75zM8 6a1 1 0 100-2 1 1 0 000 2z"></path>
</svg>
</span>
</div>
<div class="form-checkbox mr-3 ml-6 ml-sm-5 mb-2 mt-0">
<label class="f5 text-normal">
<input
type="checkbox"
name="thread_types[]"
value="PullRequest"
data-targets="notifications-list-subscription-form.threadTypeCheckboxes"
data-action="change:notifications-list-subscription-form#threadTypeCheckboxesUpdated"
>
Pull requests
</label>
</div>
<div class="form-checkbox mr-3 ml-6 ml-sm-5 mb-2 mt-0">
<label class="f5 text-normal">
<input
type="checkbox"
name="thread_types[]"
value="Release"
data-targets="notifications-list-subscription-form.threadTypeCheckboxes"
data-action="change:notifications-list-subscription-form#threadTypeCheckboxesUpdated"
>
Releases
</label>
</div>
<div class="form-checkbox mr-3 ml-6 ml-sm-5 mb-2 mt-0">
<label class="f5 text-normal">
<input
type="checkbox"
name="thread_types[]"
value="Discussion"
data-targets="notifications-list-subscription-form.threadTypeCheckboxes"
data-action="change:notifications-list-subscription-form#threadTypeCheckboxesUpdated"
>
Discussions
</label>
<span
class="tooltipped tooltipped-nw mr-2 p-1 float-right"
aria-label="Discussions are not enabled for this repo">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-info color-icon-secondary">
<path fill-rule="evenodd" d="M8 1.5a6.5 6.5 0 100 13 6.5 6.5 0 000-13zM0 8a8 8 0 1116 0A8 8 0 010 8zm6.5-.25A.75.75 0 017.25 7h1a.75.75 0 01.75.75v2.75h.25a.75.75 0 010 1.5h-2a.75.75 0 010-1.5h.25v-2h-.25a.75.75 0 01-.75-.75zM8 6a1 1 0 100-2 1 1 0 000 2z"></path>
</svg>
</span>
</div>
<div class="form-checkbox mr-3 ml-6 ml-sm-5 mb-2 mt-0">
<label class="f5 text-normal">
<input
type="checkbox"
name="thread_types[]"
value="SecurityAlert"
data-targets="notifications-list-subscription-form.threadTypeCheckboxes"
data-action="change:notifications-list-subscription-form#threadTypeCheckboxesUpdated"
>
Security alerts
</label>
</div>
</fieldset>
<div class="pt-2 pb-3 px-3 d-flex flex-justify-start flex-row-reverse">
<button
type="submit"
name="do"
value="custom"
class="btn btn-sm btn-primary ml-2"
data-target="notifications-list-subscription-form.customSubmit"
disabled
>Apply</button>
<button data-action="click:notifications-list-subscription-form#resetForm" data-close-dialog="" type="button" data-view-component="true" class="btn-sm btn">
Cancel
</button>
</div>
</form> </div>
</details-dialog>
<div class="notifications-component-dialog-overlay"></div>
</details>
<a class="social-count"
href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/watchers"
aria-label="0 users are watching this repository"
data-target="notifications-list-subscription-form.socialCount"
>
0
</a>
</notifications-list-subscription-form>
</li>
<li>
<div class="d-block js-toggler-container js-social-container starring-container ">
<form class="starred js-social-form" action="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/unstar" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="3uNixS+hyoRDBgeJXyF8K53p8EYgqV1fwKi9NUlOuXzFlw/1CbUciRXBPIS6WKTNHg+Aac9eAzAngXJ3WmGauA==" />
<input type="hidden" name="context" value="repository">
<button type="submit" class="btn btn-sm btn-with-count js-toggler-target" aria-label="Unstar this repository" title="Unstar Alirezajahandide27/CS-SBU-DataMining-Msc-projects" data-hydro-click="{"event_type":"repository.click","payload":{"target":"UNSTAR_BUTTON","repository_id":386585203,"originating_url":"https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r","user_id":87523315}}" data-hydro-click-hmac="0a1cee9da22954eb14cbd31a11e6bfa2ac103aedd8ba12b5331365991261c902" data-ga-click="Repository, click unstar button, action:blob#show; text:Unstar"> <svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-star-fill mr-1">
<path fill-rule="evenodd" d="M8 .25a.75.75 0 01.673.418l1.882 3.815 4.21.612a.75.75 0 01.416 1.279l-3.046 2.97.719 4.192a.75.75 0 01-1.088.791L8 12.347l-3.766 1.98a.75.75 0 01-1.088-.79l.72-4.194L.818 6.374a.75.75 0 01.416-1.28l4.21-.611L7.327.668A.75.75 0 018 .25z"></path>
</svg>
<span data-view-component="true">
Unstar
</span></button> <a class="social-count js-social-count" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/stargazers"
aria-label="0 users starred this repository">
0
</a>
</form>
<form class="unstarred js-social-form" action="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/star" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="dswbatTApHt3QEXhUseu0dMzCg/nuqewPPgFDzBVUmYjYt4gwr0FgSusFgtuX5TBkEUiH4l7H+oedh/xL8A1Gw==" />
<input type="hidden" name="context" value="repository">
<button type="submit" class="btn btn-sm btn-with-count js-toggler-target" aria-label="Unstar this repository" title="Star Alirezajahandide27/CS-SBU-DataMining-Msc-projects" data-hydro-click="{"event_type":"repository.click","payload":{"target":"STAR_BUTTON","repository_id":386585203,"originating_url":"https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r","user_id":87523315}}" data-hydro-click-hmac="ebaf476cfe6d7b186068932327f78043ced52541a51385bac794144dee1ed907" data-ga-click="Repository, click star button, action:blob#show; text:Star"> <svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-star mr-1">
<path fill-rule="evenodd" d="M8 .25a.75.75 0 01.673.418l1.882 3.815 4.21.612a.75.75 0 01.416 1.279l-3.046 2.97.719 4.192a.75.75 0 01-1.088.791L8 12.347l-3.766 1.98a.75.75 0 01-1.088-.79l.72-4.194L.818 6.374a.75.75 0 01.416-1.28l4.21-.611L7.327.668A.75.75 0 018 .25zm0 2.445L6.615 5.5a.75.75 0 01-.564.41l-3.097.45 2.24 2.184a.75.75 0 01.216.664l-.528 3.084 2.769-1.456a.75.75 0 01.698 0l2.77 1.456-.53-3.084a.75.75 0 01.216-.664l2.24-2.183-3.096-.45a.75.75 0 01-.564-.41L8 2.694v.001z"></path>
</svg>
<span data-view-component="true">
Star
</span></button> <a class="social-count js-social-count" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/stargazers"
aria-label="0 users starred this repository">
0
</a>
</form> </div>
</li>
<li>
<span class="btn btn-sm btn-with-count disabled tooltipped tooltipped-sw" aria-label="Cannot fork because you own this repository and are not a member of any organizations.">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-repo-forked">
<path fill-rule="evenodd" d="M5 3.25a.75.75 0 11-1.5 0 .75.75 0 011.5 0zm0 2.122a2.25 2.25 0 10-1.5 0v.878A2.25 2.25 0 005.75 8.5h1.5v2.128a2.251 2.251 0 101.5 0V8.5h1.5a2.25 2.25 0 002.25-2.25v-.878a2.25 2.25 0 10-1.5 0v.878a.75.75 0 01-.75.75h-4.5A.75.75 0 015 6.25v-.878zm3.75 7.378a.75.75 0 11-1.5 0 .75.75 0 011.5 0zm3-8.75a.75.75 0 100-1.5.75.75 0 000 1.5z"></path>
</svg>
Fork
</span>
<a href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/network/members" class="social-count"
aria-label="24 users forked this repository">
24
</a>
</li>
</ul>
</div>
<nav data-pjax="#js-repo-pjax-container" aria-label="Repository" data-view-component="true" class="js-repo-nav js-sidenav-container-pjax js-responsive-underlinenav overflow-hidden UnderlineNav px-3 px-md-4 px-lg-5">
<ul data-view-component="true" class="UnderlineNav-body list-style-none">
<li data-view-component="true" class="d-flex">
<a href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects" data-tab-item="i0code-tab" data-selected-links="repo_source repo_downloads repo_commits repo_releases repo_tags repo_branches repo_packages repo_deployments /Alirezajahandide27/CS-SBU-DataMining-Msc-projects" data-hotkey="g c" data-ga-click="Repository, Navigation click, Code tab" data-pjax="#repo-content-pjax-container" aria-current="page" data-view-component="true" class="UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item js-selected-navigation-item selected">
<svg class="octicon octicon-code UnderlineNav-octicon d-none d-sm-inline" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M4.72 3.22a.75.75 0 011.06 1.06L2.06 8l3.72 3.72a.75.75 0 11-1.06 1.06L.47 8.53a.75.75 0 010-1.06l4.25-4.25zm6.56 0a.75.75 0 10-1.06 1.06L13.94 8l-3.72 3.72a.75.75 0 101.06 1.06l4.25-4.25a.75.75 0 000-1.06l-4.25-4.25z"></path></svg>
<span data-content="Code">Code</span>
<span title="Not available" data-view-component="true" class="Counter"></span>
</a></li>
<li data-view-component="true" class="d-flex">
<a href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/pulls" data-tab-item="i1pull-requests-tab" data-selected-links="repo_pulls checks /Alirezajahandide27/CS-SBU-DataMining-Msc-projects/pulls" data-hotkey="g p" data-ga-click="Repository, Navigation click, Pull requests tab" data-pjax="#repo-content-pjax-container" data-view-component="true" class="UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item js-selected-navigation-item">
<svg class="octicon octicon-git-pull-request UnderlineNav-octicon d-none d-sm-inline" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M7.177 3.073L9.573.677A.25.25 0 0110 .854v4.792a.25.25 0 01-.427.177L7.177 3.427a.25.25 0 010-.354zM3.75 2.5a.75.75 0 100 1.5.75.75 0 000-1.5zm-2.25.75a2.25 2.25 0 113 2.122v5.256a2.251 2.251 0 11-1.5 0V5.372A2.25 2.25 0 011.5 3.25zM11 2.5h-1V4h1a1 1 0 011 1v5.628a2.251 2.251 0 101.5 0V5A2.5 2.5 0 0011 2.5zm1 10.25a.75.75 0 111.5 0 .75.75 0 01-1.5 0zM3.75 12a.75.75 0 100 1.5.75.75 0 000-1.5z"></path></svg>
<span data-content="Pull requests">Pull requests</span>
<span title="0" hidden="hidden" data-view-component="true" class="Counter">0</span>
</a></li>
<li data-view-component="true" class="d-flex">
<a href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/actions" data-tab-item="i2actions-tab" data-selected-links="repo_actions /Alirezajahandide27/CS-SBU-DataMining-Msc-projects/actions" data-hotkey="g a" data-ga-click="Repository, Navigation click, Actions tab" data-view-component="true" class="UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item js-selected-navigation-item">
<svg class="octicon octicon-play UnderlineNav-octicon d-none d-sm-inline" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M1.5 8a6.5 6.5 0 1113 0 6.5 6.5 0 01-13 0zM8 0a8 8 0 100 16A8 8 0 008 0zM6.379 5.227A.25.25 0 006 5.442v5.117a.25.25 0 00.379.214l4.264-2.559a.25.25 0 000-.428L6.379 5.227z"></path></svg>
<span data-content="Actions">Actions</span>
<span title="Not available" data-view-component="true" class="Counter"></span>
</a></li>
<li data-view-component="true" class="d-flex">
<a href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/projects" data-tab-item="i3projects-tab" data-selected-links="repo_projects new_repo_project repo_project /Alirezajahandide27/CS-SBU-DataMining-Msc-projects/projects" data-hotkey="g b" data-ga-click="Repository, Navigation click, Projects tab" data-view-component="true" class="UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item js-selected-navigation-item">
<svg class="octicon octicon-project UnderlineNav-octicon d-none d-sm-inline" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M1.75 0A1.75 1.75 0 000 1.75v12.5C0 15.216.784 16 1.75 16h12.5A1.75 1.75 0 0016 14.25V1.75A1.75 1.75 0 0014.25 0H1.75zM1.5 1.75a.25.25 0 01.25-.25h12.5a.25.25 0 01.25.25v12.5a.25.25 0 01-.25.25H1.75a.25.25 0 01-.25-.25V1.75zM11.75 3a.75.75 0 00-.75.75v7.5a.75.75 0 001.5 0v-7.5a.75.75 0 00-.75-.75zm-8.25.75a.75.75 0 011.5 0v5.5a.75.75 0 01-1.5 0v-5.5zM8 3a.75.75 0 00-.75.75v3.5a.75.75 0 001.5 0v-3.5A.75.75 0 008 3z"></path></svg>
<span data-content="Projects">Projects</span>
<span title="0" hidden="hidden" data-view-component="true" class="Counter">0</span>
</a></li>
<li data-view-component="true" class="d-flex">
<a href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/wiki" data-tab-item="i4wiki-tab" data-selected-links="repo_wiki /Alirezajahandide27/CS-SBU-DataMining-Msc-projects/wiki" data-hotkey="g w" data-ga-click="Repository, Navigation click, Wikis tab" data-view-component="true" class="UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item js-selected-navigation-item">
<svg class="octicon octicon-book UnderlineNav-octicon d-none d-sm-inline" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M0 1.75A.75.75 0 01.75 1h4.253c1.227 0 2.317.59 3 1.501A3.744 3.744 0 0111.006 1h4.245a.75.75 0 01.75.75v10.5a.75.75 0 01-.75.75h-4.507a2.25 2.25 0 00-1.591.659l-.622.621a.75.75 0 01-1.06 0l-.622-.621A2.25 2.25 0 005.258 13H.75a.75.75 0 01-.75-.75V1.75zm8.755 3a2.25 2.25 0 012.25-2.25H14.5v9h-3.757c-.71 0-1.4.201-1.992.572l.004-7.322zm-1.504 7.324l.004-5.073-.002-2.253A2.25 2.25 0 005.003 2.5H1.5v9h3.757a3.75 3.75 0 011.994.574z"></path></svg>
<span data-content="Wiki">Wiki</span>
<span title="Not available" data-view-component="true" class="Counter"></span>
</a></li>
<li data-view-component="true" class="d-flex">
<a href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/security" data-tab-item="i5security-tab" data-selected-links="security overview alerts policy token_scanning code_scanning /Alirezajahandide27/CS-SBU-DataMining-Msc-projects/security" data-hotkey="g s" data-ga-click="Repository, Navigation click, Security tab" data-view-component="true" class="UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item js-selected-navigation-item">
<svg class="octicon octicon-shield UnderlineNav-octicon d-none d-sm-inline" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M7.467.133a1.75 1.75 0 011.066 0l5.25 1.68A1.75 1.75 0 0115 3.48V7c0 1.566-.32 3.182-1.303 4.682-.983 1.498-2.585 2.813-5.032 3.855a1.7 1.7 0 01-1.33 0c-2.447-1.042-4.049-2.357-5.032-3.855C1.32 10.182 1 8.566 1 7V3.48a1.75 1.75 0 011.217-1.667l5.25-1.68zm.61 1.429a.25.25 0 00-.153 0l-5.25 1.68a.25.25 0 00-.174.238V7c0 1.358.275 2.666 1.057 3.86.784 1.194 2.121 2.34 4.366 3.297a.2.2 0 00.154 0c2.245-.956 3.582-2.104 4.366-3.298C13.225 9.666 13.5 8.36 13.5 7V3.48a.25.25 0 00-.174-.237l-5.25-1.68zM9 10.5a1 1 0 11-2 0 1 1 0 012 0zm-.25-5.75a.75.75 0 10-1.5 0v3a.75.75 0 001.5 0v-3z"></path></svg>
<span data-content="Security">Security</span>
<include-fragment src="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/security/overall-count" accept="text/fragment+html"></include-fragment>
</a></li>
<li data-view-component="true" class="d-flex">
<a href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/pulse" data-tab-item="i6insights-tab" data-selected-links="repo_graphs repo_contributors dependency_graph dependabot_updates pulse people community /Alirezajahandide27/CS-SBU-DataMining-Msc-projects/pulse" data-ga-click="Repository, Navigation click, Insights tab" data-view-component="true" class="UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item js-selected-navigation-item">
<svg class="octicon octicon-graph UnderlineNav-octicon d-none d-sm-inline" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M1.5 1.75a.75.75 0 00-1.5 0v12.5c0 .414.336.75.75.75h14.5a.75.75 0 000-1.5H1.5V1.75zm14.28 2.53a.75.75 0 00-1.06-1.06L10 7.94 7.53 5.47a.75.75 0 00-1.06 0L3.22 8.72a.75.75 0 001.06 1.06L7 7.06l2.47 2.47a.75.75 0 001.06 0l5.25-5.25z"></path></svg>
<span data-content="Insights">Insights</span>
<span title="Not available" data-view-component="true" class="Counter"></span>
</a></li>
<li data-view-component="true" class="d-flex">
<a href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/settings" data-tab-item="i7settings-tab" data-selected-links="repo_settings repo_branch_settings hooks integration_installations repo_keys_settings issue_template_editor secrets_settings key_links_settings repo_actions_settings notifications repository_environments interaction_limits /Alirezajahandide27/CS-SBU-DataMining-Msc-projects/settings" data-ga-click="Repository, Navigation click, Settings tab" data-view-component="true" class="UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item js-selected-navigation-item">
<svg class="octicon octicon-gear UnderlineNav-octicon d-none d-sm-inline" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M7.429 1.525a6.593 6.593 0 011.142 0c.036.003.108.036.137.146l.289 1.105c.147.56.55.967.997 1.189.174.086.341.183.501.29.417.278.97.423 1.53.27l1.102-.303c.11-.03.175.016.195.046.219.31.41.641.573.989.014.031.022.11-.059.19l-.815.806c-.411.406-.562.957-.53 1.456a4.588 4.588 0 010 .582c-.032.499.119 1.05.53 1.456l.815.806c.08.08.073.159.059.19a6.494 6.494 0 01-.573.99c-.02.029-.086.074-.195.045l-1.103-.303c-.559-.153-1.112-.008-1.529.27-.16.107-.327.204-.5.29-.449.222-.851.628-.998 1.189l-.289 1.105c-.029.11-.101.143-.137.146a6.613 6.613 0 01-1.142 0c-.036-.003-.108-.037-.137-.146l-.289-1.105c-.147-.56-.55-.967-.997-1.189a4.502 4.502 0 01-.501-.29c-.417-.278-.97-.423-1.53-.27l-1.102.303c-.11.03-.175-.016-.195-.046a6.492 6.492 0 01-.573-.989c-.014-.031-.022-.11.059-.19l.815-.806c.411-.406.562-.957.53-1.456a4.587 4.587 0 010-.582c.032-.499-.119-1.05-.53-1.456l-.815-.806c-.08-.08-.073-.159-.059-.19a6.44 6.44 0 01.573-.99c.02-.029.086-.075.195-.045l1.103.303c.559.153 1.112.008 1.529-.27.16-.107.327-.204.5-.29.449-.222.851-.628.998-1.189l.289-1.105c.029-.11.101-.143.137-.146zM8 0c-.236 0-.47.01-.701.03-.743.065-1.29.615-1.458 1.261l-.29 1.106c-.017.066-.078.158-.211.224a5.994 5.994 0 00-.668.386c-.123.082-.233.09-.3.071L3.27 2.776c-.644-.177-1.392.02-1.82.63a7.977 7.977 0 00-.704 1.217c-.315.675-.111 1.422.363 1.891l.815.806c.05.048.098.147.088.294a6.084 6.084 0 000 .772c.01.147-.038.246-.088.294l-.815.806c-.474.469-.678 1.216-.363 1.891.2.428.436.835.704 1.218.428.609 1.176.806 1.82.63l1.103-.303c.066-.019.176-.011.299.071.213.143.436.272.668.386.133.066.194.158.212.224l.289 1.106c.169.646.715 1.196 1.458 1.26a8.094 8.094 0 001.402 0c.743-.064 1.29-.614 1.458-1.26l.29-1.106c.017-.066.078-.158.211-.224a5.98 5.98 0 00.668-.386c.123-.082.233-.09.3-.071l1.102.302c.644.177 1.392-.02 1.82-.63.268-.382.505-.789.704-1.217.315-.675.111-1.422-.364-1.891l-.814-.806c-.05-.048-.098-.147-.088-.294a6.1 6.1 0 000-.772c-.01-.147.039-.246.088-.294l.814-.806c.475-.469.679-1.216.364-1.891a7.992 7.992 0 00-.704-1.218c-.428-.609-1.176-.806-1.82-.63l-1.103.303c-.066.019-.176.011-.299-.071a5.991 5.991 0 00-.668-.386c-.133-.066-.194-.158-.212-.224L10.16 1.29C9.99.645 9.444.095 8.701.031A8.094 8.094 0 008 0zm1.5 8a1.5 1.5 0 11-3 0 1.5 1.5 0 013 0zM11 8a3 3 0 11-6 0 3 3 0 016 0z"></path></svg>
<span data-content="Settings">Settings</span>
<span title="Not available" data-view-component="true" class="Counter"></span>
</a></li>
</ul>
<div style="visibility:hidden;" data-view-component="true" class="UnderlineNav-actions js-responsive-underlinenav-overflow position-absolute pr-3 pr-md-4 pr-lg-5 right-0"> <details data-view-component="true" class="details-overlay details-reset position-relative">
<summary role="button" data-view-component="true"> <div class="UnderlineNav-item mr-0 border-0">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-kebab-horizontal">
<path d="M8 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zM1.5 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zm13 0a1.5 1.5 0 100-3 1.5 1.5 0 000 3z"></path>
</svg>
<span class="sr-only">More</span>
</div>
</summary>
<div data-view-component="true"> <details-menu role="menu" data-view-component="true" class="dropdown-menu dropdown-menu-sw">
<ul>
<li data-menu-item="i0code-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item selected dropdown-item" aria-current="page" data-selected-links="repo_source repo_downloads repo_commits repo_releases repo_tags repo_branches repo_packages repo_deployments /Alirezajahandide27/CS-SBU-DataMining-Msc-projects" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects">
Code
</a> </li>
<li data-menu-item="i1pull-requests-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links="repo_pulls checks /Alirezajahandide27/CS-SBU-DataMining-Msc-projects/pulls" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/pulls">
Pull requests
</a> </li>
<li data-menu-item="i2actions-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links="repo_actions /Alirezajahandide27/CS-SBU-DataMining-Msc-projects/actions" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/actions">
Actions
</a> </li>
<li data-menu-item="i3projects-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links="repo_projects new_repo_project repo_project /Alirezajahandide27/CS-SBU-DataMining-Msc-projects/projects" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/projects">
Projects
</a> </li>
<li data-menu-item="i4wiki-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links="repo_wiki /Alirezajahandide27/CS-SBU-DataMining-Msc-projects/wiki" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/wiki">
Wiki
</a> </li>
<li data-menu-item="i5security-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links="security overview alerts policy token_scanning code_scanning /Alirezajahandide27/CS-SBU-DataMining-Msc-projects/security" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/security">
Security
</a> </li>
<li data-menu-item="i6insights-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links="repo_graphs repo_contributors dependency_graph dependabot_updates pulse people community /Alirezajahandide27/CS-SBU-DataMining-Msc-projects/pulse" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/pulse">
Insights
</a> </li>
<li data-menu-item="i7settings-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links="repo_settings repo_branch_settings hooks integration_installations repo_keys_settings issue_template_editor secrets_settings key_links_settings repo_actions_settings notifications repository_environments interaction_limits /Alirezajahandide27/CS-SBU-DataMining-Msc-projects/settings" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/settings">
Settings
</a> </li>
</ul>
</details-menu></div>
</details></div>
</nav>
</div>
<div class="container-xl clearfix new-discussion-timeline px-3 px-md-4 px-lg-5">
<div id="repo-content-pjax-container" class="repository-content " >
<div>
<a class="d-none js-permalink-shortcut" data-hotkey="y" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/71b66c82d32535a9f9073faf84850521471c71b2/Vala%20Khosravi%20-%2099422068/project%203/script.r">Permalink</a>
<!-- blob contrib key: blob_contributors:v22:c4f8f36c7e3f90adb7d0fc0e126310dd355c8b04993b93a8f745da27092607da -->
<div class="d-flex flex-items-start flex-shrink-0 pb-3 flex-wrap flex-md-nowrap flex-justify-between flex-md-justify-start">
<div class="position-relative">
<details class="details-reset details-overlay mr-0 mb-0 " id="branch-select-menu">
<summary class="btn css-truncate"
data-hotkey="w"
title="Switch branches or tags">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-git-branch text-gray">
<path fill-rule="evenodd" d="M11.75 2.5a.75.75 0 100 1.5.75.75 0 000-1.5zm-2.25.75a2.25 2.25 0 113 2.122V6A2.5 2.5 0 0110 8.5H6a1 1 0 00-1 1v1.128a2.251 2.251 0 11-1.5 0V5.372a2.25 2.25 0 111.5 0v1.836A2.492 2.492 0 016 7h4a1 1 0 001-1v-.628A2.25 2.25 0 019.5 3.25zM4.25 12a.75.75 0 100 1.5.75.75 0 000-1.5zM3.5 3.25a.75.75 0 111.5 0 .75.75 0 01-1.5 0z"></path>
</svg>
<span class="css-truncate-target" data-menu-button>main</span>
<span class="dropdown-caret"></span>
</summary>
<div class="SelectMenu">
<div class="SelectMenu-modal">
<header class="SelectMenu-header">
<span class="SelectMenu-title">Switch branches/tags</span>
<button class="SelectMenu-closeButton" type="button" data-toggle-for="branch-select-menu"><svg aria-label="Close menu" aria-hidden="false" role="img" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-x">
<path fill-rule="evenodd" d="M3.72 3.72a.75.75 0 011.06 0L8 6.94l3.22-3.22a.75.75 0 111.06 1.06L9.06 8l3.22 3.22a.75.75 0 11-1.06 1.06L8 9.06l-3.22 3.22a.75.75 0 01-1.06-1.06L6.94 8 3.72 4.78a.75.75 0 010-1.06z"></path>
</svg></button>
</header>
<input-demux data-action="tab-container-change:input-demux#storeInput tab-container-changed:input-demux#updateInput">
<tab-container class="d-flex flex-column js-branches-tags-tabs" style="min-height: 0;">
<div class="SelectMenu-filter">
<input data-target="input-demux.source"
id="context-commitish-filter-field"
class="SelectMenu-input form-control"
aria-owns="ref-list-branches"
data-controls-ref-menu-id="ref-list-branches"
autofocus
autocomplete="off"
aria-label="Find or create a branch…"
placeholder="Find or create a branch…"
type="text"
>
</div>
<div class="SelectMenu-tabs" role="tablist" data-target="input-demux.control" >
<button class="SelectMenu-tab" type="button" role="tab" aria-selected="true">Branches</button>
<button class="SelectMenu-tab" type="button" role="tab">Tags</button>
</div>
<div role="tabpanel" id="ref-list-branches" data-filter-placeholder="Find or create a branch…" class="d-flex flex-column flex-auto overflow-auto" tabindex="">
<ref-selector
type="branch"
data-targets="input-demux.sinks"
data-action="
input-entered:ref-selector#inputEntered
tab-selected:ref-selector#tabSelected
focus-list:ref-selector#focusFirstListMember
"
query-endpoint="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/refs"
current-user-can-push
cache-key="v0:1626428408.914798"
current-committish="bWFpbg=="
default-branch="bWFpbg=="
name-with-owner="QWxpcmV6YWphaGFuZGlkZTI3L0NTLVNCVS1EYXRhTWluaW5nLU1zYy1wcm9qZWN0cw=="
>
<template data-target="ref-selector.fetchFailedTemplate">
<div class="SelectMenu-message" data-index="{{ index }}">Could not load branches</div>
</template>
<template data-target="ref-selector.noMatchTemplate">
<!-- '"` --><!-- </textarea></xmp> --></option></form><form action="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/branches" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="MbpRzcSh10HLy84DOv0zXZckoBEhuCMR6JNaYU1+JKJ4WXZGO/lZj56GGKFE7CtlG5GOWK8obA0tADNbgwPcIA==" />
<input type="hidden" name="name" value="{{ refName }}">
<input type="hidden" name="branch" value="main">
<input type="hidden" name="path_binary" value="VmFsYSBLaG9zcmF2aSAtIDk5NDIyMDY4L3Byb2plY3QgMy9zY3JpcHQucg==">
<button class="SelectMenu-item break-word" type="submit" role="menuitem" data-index="{{ index }}">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-git-branch SelectMenu-icon flex-self-baseline">
<path fill-rule="evenodd" d="M11.75 2.5a.75.75 0 100 1.5.75.75 0 000-1.5zm-2.25.75a2.25 2.25 0 113 2.122V6A2.5 2.5 0 0110 8.5H6a1 1 0 00-1 1v1.128a2.251 2.251 0 11-1.5 0V5.372a2.25 2.25 0 111.5 0v1.836A2.492 2.492 0 016 7h4a1 1 0 001-1v-.628A2.25 2.25 0 019.5 3.25zM4.25 12a.75.75 0 100 1.5.75.75 0 000-1.5zM3.5 3.25a.75.75 0 111.5 0 .75.75 0 01-1.5 0z"></path>
</svg>
<div>
<span class="text-bold">Create branch: {{ refName }}</span>
<span class="color-text-tertiary">from ‘main’</span>
</div>
</button>
</form></template>
<!-- TODO: this max-height is necessary or else the branch list won't scroll. why? -->
<div data-target="ref-selector.listContainer" role="menu" class="SelectMenu-list " style="max-height: 330px" data-pjax="#repo-content-pjax-container">
<div class="SelectMenu-loading pt-3 pb-0" aria-label="Menu is loading">
<svg style="box-sizing: content-box; color: var(--color-icon-primary);" viewBox="0 0 16 16" fill="none" data-view-component="true" width="32" height="32" class="anim-rotate">
<circle cx="8" cy="8" r="7" stroke="currentColor" stroke-opacity="0.25" stroke-width="2" vector-effect="non-scaling-stroke" />
<path d="M15 8a7.002 7.002 0 00-7-7" stroke="currentColor" stroke-width="2" stroke-linecap="round" vector-effect="non-scaling-stroke" />
</svg>
</div>
</div>
<template data-target="ref-selector.itemTemplate">
<a href="https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/{{ urlEncodedRefName }}/Vala%20Khosravi%20-%2099422068/project%203/script.r" class="SelectMenu-item" role="menuitemradio" rel="nofollow" aria-checked="{{ isCurrent }}" data-index="{{ index }}">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-check SelectMenu-icon SelectMenu-icon--check">
<path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path>
</svg>
<span class="flex-1 css-truncate css-truncate-overflow {{ isFilteringClass }}">{{ refName }}</span>
<span hidden="{{ isNotDefault }}" class="Label Label--secondary flex-self-start">default</span>
</a>
</template>
<footer class="SelectMenu-footer"><a href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/branches">View all branches</a></footer>
</ref-selector>
</div>
<div role="tabpanel" id="tags-menu" data-filter-placeholder="Find a tag" class="d-flex flex-column flex-auto overflow-auto" tabindex="" hidden>
<ref-selector
type="tag"
data-action="
input-entered:ref-selector#inputEntered
tab-selected:ref-selector#tabSelected
focus-list:ref-selector#focusFirstListMember
"
data-targets="input-demux.sinks"
query-endpoint="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/refs"
cache-key="v0:1626428408.914798"
current-committish="bWFpbg=="
default-branch="bWFpbg=="
name-with-owner="QWxpcmV6YWphaGFuZGlkZTI3L0NTLVNCVS1EYXRhTWluaW5nLU1zYy1wcm9qZWN0cw=="
>
<template data-target="ref-selector.fetchFailedTemplate">
<div class="SelectMenu-message" data-index="{{ index }}">Could not load tags</div>
</template>
<template data-target="ref-selector.noMatchTemplate">
<div class="SelectMenu-message" data-index="{{ index }}">Nothing to show</div>
</template>
<template data-target="ref-selector.itemTemplate">
<a href="https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/{{ urlEncodedRefName }}/Vala%20Khosravi%20-%2099422068/project%203/script.r" class="SelectMenu-item" role="menuitemradio" rel="nofollow" aria-checked="{{ isCurrent }}" data-index="{{ index }}">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-check SelectMenu-icon SelectMenu-icon--check">
<path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path>
</svg>
<span class="flex-1 css-truncate css-truncate-overflow {{ isFilteringClass }}">{{ refName }}</span>
<span hidden="{{ isNotDefault }}" class="Label Label--secondary flex-self-start">default</span>
</a>
</template>
<div data-target="ref-selector.listContainer" role="menu" class="SelectMenu-list" style="max-height: 330px" data-pjax="#repo-content-pjax-container">
<div class="SelectMenu-loading pt-3 pb-0" aria-label="Menu is loading">
<svg style="box-sizing: content-box; color: var(--color-icon-primary);" viewBox="0 0 16 16" fill="none" data-view-component="true" width="32" height="32" class="anim-rotate">
<circle cx="8" cy="8" r="7" stroke="currentColor" stroke-opacity="0.25" stroke-width="2" vector-effect="non-scaling-stroke" />
<path d="M15 8a7.002 7.002 0 00-7-7" stroke="currentColor" stroke-width="2" stroke-linecap="round" vector-effect="non-scaling-stroke" />
</svg>
</div>
</div>
<footer class="SelectMenu-footer"><a href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/tags">View all tags</a></footer>
</ref-selector>
</div>
</tab-container>
</input-demux>
</div>
</div>
</details>
</div>
<h2 id="blob-path" class="breadcrumb flex-auto flex-self-center min-width-0 text-normal mx-2 width-full width-md-auto flex-order-1 flex-md-order-none mt-3 mt-md-0">
<span class="js-repo-root text-bold"><span class="js-path-segment d-inline-block wb-break-all"><a data-pjax="#repo-content-pjax-container" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects"><span>CS-SBU-DataMining-Msc-projects</span></a></span></span><span class="separator">/</span><span class="js-path-segment d-inline-block wb-break-all"><a data-pjax="#repo-content-pjax-container" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/tree/main/Vala%20Khosravi%20-%2099422068"><span>Vala Khosravi - 99422068</span></a></span><span class="separator">/</span><span class="js-path-segment d-inline-block wb-break-all"><a data-pjax="#repo-content-pjax-container" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/tree/main/Vala%20Khosravi%20-%2099422068/project%203"><span>project 3</span></a></span><span class="separator">/</span><strong class="final-path">script.r</strong>
</h2>
<a href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/find/main"
class="js-pjax-capture-input btn mr-2 d-none d-md-block"
data-pjax
data-hotkey="t">
Go to file
</a>
<details id="blob-more-options-details" data-view-component="true" class="details-overlay details-reset position-relative">
<summary role="button" data-view-component="true" class="btn">
<svg aria-label="More options" role="img" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-kebab-horizontal">
<path d="M8 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zM1.5 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zm13 0a1.5 1.5 0 100-3 1.5 1.5 0 000 3z"></path>
</svg>
</summary>
<div data-view-component="true"> <ul class="dropdown-menu dropdown-menu-sw">
<li class="d-block d-md-none">
<a class="dropdown-item d-flex flex-items-baseline" data-hydro-click="{"event_type":"repository.click","payload":{"target":"FIND_FILE_BUTTON","repository_id":386585203,"originating_url":"https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r","user_id":87523315}}" data-hydro-click-hmac="8c7613a6c223e18044e9ea96a75dbf7906530e19be9cf48f6f3e9374f009cdd1" data-ga-click="Repository, find file, location:repo overview" data-hotkey="t" data-pjax="true" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/find/main">
<span class="flex-auto">Go to file</span>
<span class="text-small color-text-secondary" aria-hidden="true">T</span>
</a> </li>
<li data-toggle-for="blob-more-options-details">
<button type="button" data-toggle-for="jumpto-line-details-dialog" class="btn-link dropdown-item">
<span class="d-flex flex-items-baseline">
<span class="flex-auto">Go to line</span>
<span class="text-small color-text-secondary" aria-hidden="true">L</span>
</span>
</button>
</li>
<li class="dropdown-divider" role="none"></li>
<li>
<clipboard-copy value="Vala Khosravi - 99422068/project 3/script.r" class="dropdown-item cursor-pointer" data-toggle-for="blob-more-options-details">
Copy path
</clipboard-copy>
</li>
<li>
<clipboard-copy value="https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/71b66c82d32535a9f9073faf84850521471c71b2/Vala%20Khosravi%20-%2099422068/project%203/script.r" class="dropdown-item cursor-pointer" data-toggle-for="blob-more-options-details" >
<span class="d-flex flex-items-baseline">
<span class="flex-auto">Copy permalink</span>
</span>
</clipboard-copy>
</li>
</ul>
</div>
</details> </div>
<div class="Box d-flex flex-column flex-shrink-0 mb-3">
<include-fragment src="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/contributors/main/Vala%20Khosravi%20-%2099422068/project%203/script.r" class="commit-loader">
<div class="Box-header Box-header--blue d-flex flex-items-center">
<div class="Skeleton avatar avatar-user flex-shrink-0 ml-n1 mr-n1 mt-n1 mb-n1" style="width:24px;height:24px;"></div>
<div class="Skeleton Skeleton--text col-5 ml-2"> </div>
</div>
<div class="Box-body d-flex flex-items-center" >
<div class="Skeleton Skeleton--text col-1"> </div>
<span class="color-text-danger h6 loader-error">Cannot retrieve contributors at this time</span>
</div>
</include-fragment> </div>
<div data-target="readme-toc.content" class="Box mt-3 position-relative
">
<div
class="Box-header py-2 pr-2 d-flex flex-shrink-0 flex-md-row flex-items-center"
>
<div class="text-mono f6 flex-auto pr-3 flex-order-2 flex-md-order-1">
56 lines (51 sloc)
<span class="file-info-divider"></span>
2.2 KB
</div>
<div class="d-flex py-1 py-md-0 flex-auto flex-order-1 flex-md-order-2 flex-sm-grow-0 flex-justify-between hide-sm hide-md">
<div class="BtnGroup">
<a href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/raw/main/Vala%20Khosravi%20-%2099422068/project%203/script.r" id="raw-url" role="button" data-view-component="true" class="btn-sm btn BtnGroup-item">
Raw
</a>
<a href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blame/main/Vala%20Khosravi%20-%2099422068/project%203/script.r" data-hotkey="b" role="button" data-view-component="true" class="js-update-url-with-hash btn-sm btn BtnGroup-item">
Blame
</a>
</div>
<div>
<a class="btn-octicon tooltipped tooltipped-nw js-remove-unless-platform"
data-platforms="windows,mac"
href="https://desktop.github.com"
aria-label="Open this file in GitHub Desktop"
data-ga-click="Repository, open with desktop">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-device-desktop">
<path fill-rule="evenodd" d="M1.75 2.5h12.5a.25.25 0 01.25.25v7.5a.25.25 0 01-.25.25H1.75a.25.25 0 01-.25-.25v-7.5a.25.25 0 01.25-.25zM14.25 1H1.75A1.75 1.75 0 000 2.75v7.5C0 11.216.784 12 1.75 12h3.727c-.1 1.041-.52 1.872-1.292 2.757A.75.75 0 004.75 16h6.5a.75.75 0 00.565-1.243c-.772-.885-1.193-1.716-1.292-2.757h3.727A1.75 1.75 0 0016 10.25v-7.5A1.75 1.75 0 0014.25 1zM9.018 12H6.982a5.72 5.72 0 01-.765 2.5h3.566a5.72 5.72 0 01-.765-2.5z"></path>
</svg>
</a>
<!-- '"` --><!-- </textarea></xmp> --></option></form><form class="inline-form js-update-url-with-hash" action="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/edit/main/Vala%20Khosravi%20-%2099422068/project%203/script.r" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="XWytN6gZvDGCkSA0g4Nq2CBXeylr614A+hJllRAMlmZR81St0zIx62DxJVy0o55deVGrIRq43lxfzfYRfAnV1w==" />
<button class="btn-octicon tooltipped tooltipped-nw" type="submit"
aria-label="Edit this file" data-hotkey="e" data-disable-with>
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-pencil">
<path fill-rule="evenodd" d="M11.013 1.427a1.75 1.75 0 012.474 0l1.086 1.086a1.75 1.75 0 010 2.474l-8.61 8.61c-.21.21-.47.364-.756.445l-3.251.93a.75.75 0 01-.927-.928l.929-3.25a1.75 1.75 0 01.445-.758l8.61-8.61zm1.414 1.06a.25.25 0 00-.354 0L10.811 3.75l1.439 1.44 1.263-1.263a.25.25 0 000-.354l-1.086-1.086zM11.189 6.25L9.75 4.81l-6.286 6.287a.25.25 0 00-.064.108l-.558 1.953 1.953-.558a.249.249 0 00.108-.064l6.286-6.286z"></path>
</svg>
</button>
</form>
<!-- '"` --><!-- </textarea></xmp> --></option></form><form class="inline-form" action="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/delete/main/Vala%20Khosravi%20-%2099422068/project%203/script.r" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="n0/lk7OkFd6WL/lS2rA/SKUR7DBB6R20bxs3iZ/DkU2WKIeHRj/kQieiARitfueI85Z5E2AXb7EK5OAugbE+/g==" />
<button class="btn-octicon btn-octicon-danger tooltipped tooltipped-nw" type="submit"
aria-label="Delete this file" data-disable-with>
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-trash">
<path fill-rule="evenodd" d="M6.5 1.75a.25.25 0 01.25-.25h2.5a.25.25 0 01.25.25V3h-3V1.75zm4.5 0V3h2.25a.75.75 0 010 1.5H2.75a.75.75 0 010-1.5H5V1.75C5 .784 5.784 0 6.75 0h2.5C10.216 0 11 .784 11 1.75zM4.496 6.675a.75.75 0 10-1.492.15l.66 6.6A1.75 1.75 0 005.405 15h5.19c.9 0 1.652-.681 1.741-1.576l.66-6.6a.75.75 0 00-1.492-.149l-.66 6.6a.25.25 0 01-.249.225h-5.19a.25.25 0 01-.249-.225l-.66-6.6z"></path>
</svg>
</button>
</form> </div>
</div>
<div class="d-flex hide-lg hide-xl flex-order-2 flex-grow-0">
<details class="dropdown details-reset details-overlay d-inline-block">
<summary class="btn-octicon" aria-haspopup="true" aria-label="possible actions">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-kebab-horizontal">
<path d="M8 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zM1.5 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zm13 0a1.5 1.5 0 100-3 1.5 1.5 0 000 3z"></path>
</svg>
</summary>
<ul class="dropdown-menu dropdown-menu-sw">
<li>
<a class="dropdown-item tooltipped tooltipped-nw js-remove-unless-platform"
data-platforms="windows,mac"
href="https://desktop.github.com"
data-ga-click="Repository, open with desktop">
Open with Desktop
</a>
</li>
<li>
<a class="dropdown-item" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/raw/main/Vala%20Khosravi%20-%2099422068/project%203/script.r">
View raw
</a>
</li>
<li>
<a class="dropdown-item" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blame/main/Vala%20Khosravi%20-%2099422068/project%203/script.r">
View blame
</a>
</li>
<li class="dropdown-divider" role="none"></li>
<li>
<a class="dropdown-item" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/edit/main/Vala%20Khosravi%20-%2099422068/project%203/script.r">Edit file</a>
</li>
<li>
<a class="dropdown-item menu-item-danger" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/delete/main/Vala%20Khosravi%20-%2099422068/project%203/script.r">Delete file</a>
</li>
</ul>
</details>
</div>
</div>
<div itemprop="text" class="Box-body p-0 blob-wrapper data type-r gist-border-0">
<table class="highlight tab-size js-file-line-container" data-tab-size="8" data-paste-markdown-skip>
<tr>
<td id="L1" class="blob-num js-line-number" data-line-number="1"></td>
<td id="LC1" class="blob-code blob-code-inner js-file-line">library(<span class="pl-smi">e1071</span>)</td>
</tr>
<tr>
<td id="L2" class="blob-num js-line-number" data-line-number="2"></td>
<td id="LC2" class="blob-code blob-code-inner js-file-line">library(<span class="pl-smi">kernlab</span>)</td>
</tr>
<tr>
<td id="L3" class="blob-num js-line-number" data-line-number="3"></td>
<td id="LC3" class="blob-code blob-code-inner js-file-line">library(<span class="pl-smi">ggplot2</span>)</td>
</tr>
<tr>
<td id="L4" class="blob-num js-line-number" data-line-number="4"></td>
<td id="LC4" class="blob-code blob-code-inner js-file-line">library(<span class="pl-smi">dplyr</span>)</td>
</tr>
<tr>
<td id="L5" class="blob-num js-line-number" data-line-number="5"></td>
<td id="LC5" class="blob-code blob-code-inner js-file-line">library(<span class="pl-smi">rbin</span>)</td>
</tr>
<tr>
<td id="L6" class="blob-num js-line-number" data-line-number="6"></td>
<td id="LC6" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L7" class="blob-num js-line-number" data-line-number="7"></td>
<td id="LC7" class="blob-code blob-code-inner js-file-line">setwd(<span class="pl-s"><span class="pl-pds">"</span>~/Documents/Education/Master/data<span class="pl-cce">\ </span>mining/exercises/3/<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L8" class="blob-num js-line-number" data-line-number="8"></td>
<td id="LC8" class="blob-code blob-code-inner js-file-line"><span class="pl-smi">train_df</span> <span class="pl-k"><-</span> as.data.frame(read.csv(<span class="pl-s"><span class="pl-pds">"</span>train.csv<span class="pl-pds">"</span></span>))</td>
</tr>
<tr>
<td id="L9" class="blob-num js-line-number" data-line-number="9"></td>
<td id="LC9" class="blob-code blob-code-inner js-file-line"><span class="pl-smi">test_df</span> <span class="pl-k"><-</span> sample_n(<span class="pl-smi">train_df</span>, <span class="pl-c1">200</span>) </td>
</tr>
<tr>
<td id="L10" class="blob-num js-line-number" data-line-number="10"></td>
<td id="LC10" class="blob-code blob-code-inner js-file-line"><span class="pl-smi">kernels</span> <span class="pl-k"><-</span> c(<span class="pl-s"><span class="pl-pds">'</span>linear<span class="pl-pds">'</span></span>, <span class="pl-s"><span class="pl-pds">'</span>polynomial<span class="pl-pds">'</span></span>, <span class="pl-s"><span class="pl-pds">'</span>radial<span class="pl-pds">'</span></span>, <span class="pl-s"><span class="pl-pds">'</span>sigmoid<span class="pl-pds">'</span></span>)</td>
</tr>
<tr>
<td id="L11" class="blob-num js-line-number" data-line-number="11"></td>
<td id="LC11" class="blob-code blob-code-inner js-file-line"><span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">price_range</span> <span class="pl-k"><-</span> as.factor(<span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">price_range</span>)</td>
</tr>
<tr>
<td id="L12" class="blob-num js-line-number" data-line-number="12"></td>
<td id="LC12" class="blob-code blob-code-inner js-file-line"><span class="pl-smi">test_df</span><span class="pl-k">$</span><span class="pl-smi">price_range</span> <span class="pl-k"><-</span> as.factor(<span class="pl-smi">test_df</span><span class="pl-k">$</span><span class="pl-smi">price_range</span>)</td>
</tr>
<tr>
<td id="L13" class="blob-num js-line-number" data-line-number="13"></td>
<td id="LC13" class="blob-code blob-code-inner js-file-line">plot(<span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">ram</span>, <span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">battery_power</span>, <span class="pl-v">col</span> <span class="pl-k">=</span> <span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">price_range</span>)</td>
</tr>
<tr>
<td id="L14" class="blob-num js-line-number" data-line-number="14"></td>
<td id="LC14" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span> ggplot(train_df, aes(x=battery_power, y=ram, color=as.factor(price_range))) + geom_point()</span></td>
</tr>
<tr>
<td id="L15" class="blob-num js-line-number" data-line-number="15"></td>
<td id="LC15" class="blob-code blob-code-inner js-file-line"><span class="pl-k">for</span> (<span class="pl-smi">kernel</span> <span class="pl-k">in</span> <span class="pl-smi">kernels</span>) {</td>
</tr>
<tr>
<td id="L16" class="blob-num js-line-number" data-line-number="16"></td>
<td id="LC16" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">smv_model</span> <span class="pl-k"><-</span> svm(<span class="pl-smi">price_range</span> <span class="pl-k">~</span> <span class="pl-smi">.</span> , <span class="pl-v">data</span> <span class="pl-k">=</span> <span class="pl-smi">train_df</span>, <span class="pl-v">kernel</span> <span class="pl-k">=</span> <span class="pl-smi">kernel</span>)</td>
</tr>
<tr>
<td id="L17" class="blob-num js-line-number" data-line-number="17"></td>
<td id="LC17" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">pred</span> <span class="pl-k"><-</span> predict(<span class="pl-smi">smv_model</span>, <span class="pl-smi">test_df</span>)</td>
</tr>
<tr>
<td id="L18" class="blob-num js-line-number" data-line-number="18"></td>
<td id="LC18" class="blob-code blob-code-inner js-file-line"> plot(<span class="pl-smi">smv_model</span>, <span class="pl-smi">test_df</span>, <span class="pl-smi">battery_power</span><span class="pl-k">~</span><span class="pl-smi">ram</span>, <span class="pl-v">main</span><span class="pl-k">=</span><span class="pl-smi">kernel</span>)</td>
</tr>
<tr>
<td id="L19" class="blob-num js-line-number" data-line-number="19"></td>
<td id="LC19" class="blob-code blob-code-inner js-file-line"> title(<span class="pl-v">main</span><span class="pl-k">=</span> paste(<span class="pl-s"><span class="pl-pds">"</span> <span class="pl-pds">"</span></span>, <span class="pl-smi">kernel</span>, <span class="pl-v">sep</span><span class="pl-k">=</span><span class="pl-s"><span class="pl-pds">"</span> <span class="pl-pds">"</span></span>))</td>
</tr>
<tr>
<td id="L20" class="blob-num js-line-number" data-line-number="20"></td>
<td id="LC20" class="blob-code blob-code-inner js-file-line"> print(c(<span class="pl-smi">kernel</span>,table(<span class="pl-smi">pred</span>)))</td>
</tr>
<tr>
<td id="L21" class="blob-num js-line-number" data-line-number="21"></td>
<td id="LC21" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">error</span> <span class="pl-k">=</span> <span class="pl-c1">0</span></td>
</tr>
<tr>
<td id="L22" class="blob-num js-line-number" data-line-number="22"></td>
<td id="LC22" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> (<span class="pl-smi">i</span> <span class="pl-k">in</span> (<span class="pl-c1">1</span><span class="pl-k">:</span> nrow(<span class="pl-smi">test_df</span>))) {</td>
</tr>
<tr>
<td id="L23" class="blob-num js-line-number" data-line-number="23"></td>
<td id="LC23" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> (<span class="pl-smi">test_df</span>[<span class="pl-smi">i</span>,]<span class="pl-k">$</span><span class="pl-smi">price_range</span> <span class="pl-k">!=</span> <span class="pl-smi">pred</span>[<span class="pl-smi">i</span>]) {</td>
</tr>
<tr>
<td id="L24" class="blob-num js-line-number" data-line-number="24"></td>
<td id="LC24" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">error</span> <span class="pl-k">=</span> <span class="pl-smi">error</span> <span class="pl-k">+</span> <span class="pl-c1">1</span></td>
</tr>
<tr>
<td id="L25" class="blob-num js-line-number" data-line-number="25"></td>
<td id="LC25" class="blob-code blob-code-inner js-file-line"> }</td>
</tr>
<tr>
<td id="L26" class="blob-num js-line-number" data-line-number="26"></td>
<td id="LC26" class="blob-code blob-code-inner js-file-line"> }</td>
</tr>
<tr>
<td id="L27" class="blob-num js-line-number" data-line-number="27"></td>
<td id="LC27" class="blob-code blob-code-inner js-file-line"> print(c(<span class="pl-s"><span class="pl-pds">'</span>error rate<span class="pl-pds">'</span></span>, <span class="pl-smi">error</span> <span class="pl-k">/</span> nrow(<span class="pl-smi">test_df</span>)))</td>
</tr>
<tr>
<td id="L28" class="blob-num js-line-number" data-line-number="28"></td>
<td id="LC28" class="blob-code blob-code-inner js-file-line">}</td>
</tr>
<tr>
<td id="L29" class="blob-num js-line-number" data-line-number="29"></td>
<td id="LC29" class="blob-code blob-code-inner js-file-line"><span class="pl-smi">smv_model</span> <span class="pl-k"><-</span> svm(<span class="pl-smi">price_range</span> <span class="pl-k">~</span> <span class="pl-smi">.</span> , <span class="pl-v">data</span> <span class="pl-k">=</span> <span class="pl-smi">train_df</span>, <span class="pl-v">kernel</span> <span class="pl-k">=</span> <span class="pl-s"><span class="pl-pds">'</span>polynomial<span class="pl-pds">'</span></span>, <span class="pl-v">degree</span><span class="pl-k">=</span><span class="pl-c1">4</span>)</td>
</tr>
<tr>
<td id="L30" class="blob-num js-line-number" data-line-number="30"></td>
<td id="LC30" class="blob-code blob-code-inner js-file-line"><span class="pl-smi">pred</span> <span class="pl-k"><-</span> predict(<span class="pl-smi">smv_model</span>, <span class="pl-smi">test_df</span>)</td>
</tr>
<tr>
<td id="L31" class="blob-num js-line-number" data-line-number="31"></td>
<td id="LC31" class="blob-code blob-code-inner js-file-line">plot(<span class="pl-smi">smv_model</span>, <span class="pl-smi">test_df</span>, <span class="pl-smi">battery_power</span><span class="pl-k">~</span><span class="pl-smi">ram</span>, <span class="pl-v">main</span><span class="pl-k">=</span><span class="pl-smi">kernel</span>)</td>
</tr>
<tr>
<td id="L32" class="blob-num js-line-number" data-line-number="32"></td>
<td id="LC32" class="blob-code blob-code-inner js-file-line">title(<span class="pl-v">main</span><span class="pl-k">=</span> paste(<span class="pl-s"><span class="pl-pds">"</span> <span class="pl-pds">"</span></span>, <span class="pl-smi">kernel</span>, <span class="pl-v">sep</span><span class="pl-k">=</span><span class="pl-s"><span class="pl-pds">"</span> <span class="pl-pds">"</span></span>))</td>
</tr>
<tr>
<td id="L33" class="blob-num js-line-number" data-line-number="33"></td>
<td id="LC33" class="blob-code blob-code-inner js-file-line">print(c(<span class="pl-smi">kernel</span>,table(<span class="pl-smi">pred</span>)))</td>
</tr>
<tr>
<td id="L34" class="blob-num js-line-number" data-line-number="34"></td>
<td id="LC34" class="blob-code blob-code-inner js-file-line"><span class="pl-v">error</span> <span class="pl-k">=</span> <span class="pl-c1">0</span></td>
</tr>
<tr>
<td id="L35" class="blob-num js-line-number" data-line-number="35"></td>
<td id="LC35" class="blob-code blob-code-inner js-file-line"><span class="pl-k">for</span> (<span class="pl-smi">i</span> <span class="pl-k">in</span> (<span class="pl-c1">1</span><span class="pl-k">:</span> nrow(<span class="pl-smi">test_df</span>))) {</td>
</tr>
<tr>
<td id="L36" class="blob-num js-line-number" data-line-number="36"></td>
<td id="LC36" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> (<span class="pl-smi">test_df</span>[<span class="pl-smi">i</span>,]<span class="pl-k">$</span><span class="pl-smi">price_range</span> <span class="pl-k">!=</span> <span class="pl-smi">pred</span>[<span class="pl-smi">i</span>]) {</td>
</tr>
<tr>
<td id="L37" class="blob-num js-line-number" data-line-number="37"></td>
<td id="LC37" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">error</span> <span class="pl-k">=</span> <span class="pl-smi">error</span> <span class="pl-k">+</span> <span class="pl-c1">1</span></td>
</tr>
<tr>
<td id="L38" class="blob-num js-line-number" data-line-number="38"></td>
<td id="LC38" class="blob-code blob-code-inner js-file-line"> }</td>
</tr>
<tr>
<td id="L39" class="blob-num js-line-number" data-line-number="39"></td>
<td id="LC39" class="blob-code blob-code-inner js-file-line">}</td>
</tr>
<tr>
<td id="L40" class="blob-num js-line-number" data-line-number="40"></td>
<td id="LC40" class="blob-code blob-code-inner js-file-line">print(c(<span class="pl-s"><span class="pl-pds">'</span>error rate<span class="pl-pds">'</span></span>, <span class="pl-smi">error</span> <span class="pl-k">/</span> nrow(<span class="pl-smi">test_df</span>)))</td>
</tr>
<tr>
<td id="L41" class="blob-num js-line-number" data-line-number="41"></td>
<td id="LC41" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L42" class="blob-num js-line-number" data-line-number="42"></td>
<td id="LC42" class="blob-code blob-code-inner js-file-line">summary(<span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">battery_power</span>)</td>
</tr>
<tr>
<td id="L43" class="blob-num js-line-number" data-line-number="43"></td>
<td id="LC43" class="blob-code blob-code-inner js-file-line"><span class="pl-v">range1</span> <span class="pl-k">=</span> seq(<span class="pl-c1">500</span>, <span class="pl-c1">1500</span>, <span class="pl-v">by</span> <span class="pl-k">=</span> <span class="pl-c1">100</span>)</td>
</tr>
<tr>
<td id="L44" class="blob-num js-line-number" data-line-number="44"></td>
<td id="LC44" class="blob-code blob-code-inner js-file-line">hist(<span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">battery_power</span>, <span class="pl-v">breaks</span> <span class="pl-k">=</span> seq(min(<span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">battery_power</span>), max(<span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">battery_power</span>), <span class="pl-v">length.out</span> <span class="pl-k">=</span> <span class="pl-c1">11</span>))</td>
</tr>
<tr>
<td id="L45" class="blob-num js-line-number" data-line-number="45"></td>
<td id="LC45" class="blob-code blob-code-inner js-file-line">hist(<span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">battery_power</span>, <span class="pl-v">breaks</span> <span class="pl-k">=</span> seq(min(<span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">battery_power</span>), max(<span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">battery_power</span>), <span class="pl-v">length.out</span> <span class="pl-k">=</span> <span class="pl-c1">111</span>))</td>
</tr>
<tr>
<td id="L46" class="blob-num js-line-number" data-line-number="46"></td>
<td id="LC46" class="blob-code blob-code-inner js-file-line">hist(<span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">battery_power</span>, <span class="pl-v">breaks</span> <span class="pl-k">=</span> c(min(<span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">battery_power</span>),<span class="pl-c1">600</span>,<span class="pl-c1">640</span>,<span class="pl-c1">670</span>,<span class="pl-c1">1100</span>,<span class="pl-c1">1250</span>,<span class="pl-c1">1300</span>, <span class="pl-c1">1410</span>, <span class="pl-c1">1900</span>,max(<span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">battery_power</span>)))</td>
</tr>
<tr>
<td id="L47" class="blob-num js-line-number" data-line-number="47"></td>
<td id="LC47" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L48" class="blob-num js-line-number" data-line-number="48"></td>
<td id="LC48" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L49" class="blob-num js-line-number" data-line-number="49"></td>
<td id="LC49" class="blob-code blob-code-inner js-file-line">library(<span class="pl-smi">caret</span>)</td>
</tr>
<tr>
<td id="L50" class="blob-num js-line-number" data-line-number="50"></td>
<td id="LC50" class="blob-code blob-code-inner js-file-line"><span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">price_range</span> <span class="pl-k"><-</span> as.factor(<span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">price_range</span>)</td>
</tr>
<tr>
<td id="L51" class="blob-num js-line-number" data-line-number="51"></td>
<td id="LC51" class="blob-code blob-code-inner js-file-line"><span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">blue</span> <span class="pl-k"><-</span> as.factor(<span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">blue</span>)</td>
</tr>
<tr>
<td id="L52" class="blob-num js-line-number" data-line-number="52"></td>
<td id="LC52" class="blob-code blob-code-inner js-file-line"><span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">touch_screen</span> <span class="pl-k"><-</span> as.factor(<span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">touch_screen</span>)</td>
</tr>
<tr>
<td id="L53" class="blob-num js-line-number" data-line-number="53"></td>
<td id="LC53" class="blob-code blob-code-inner js-file-line"><span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">three_g</span> <span class="pl-k"><-</span> as.factor(<span class="pl-smi">train_df</span><span class="pl-k">$</span><span class="pl-smi">three_g</span>)</td>
</tr>
<tr>
<td id="L54" class="blob-num js-line-number" data-line-number="54"></td>
<td id="LC54" class="blob-code blob-code-inner js-file-line"><span class="pl-smi">dummy</span> <span class="pl-k"><-</span> dummyVars(<span class="pl-s"><span class="pl-pds">"</span> ~ .<span class="pl-pds">"</span></span>, <span class="pl-v">data</span><span class="pl-k">=</span><span class="pl-smi">train_df</span>)</td>
</tr>
<tr>
<td id="L55" class="blob-num js-line-number" data-line-number="55"></td>
<td id="LC55" class="blob-code blob-code-inner js-file-line"><span class="pl-smi">newdata</span> <span class="pl-k"><-</span> <span class="pl-k">data.frame</span>(predict(<span class="pl-smi">dummy</span>, <span class="pl-v">newdata</span> <span class="pl-k">=</span> <span class="pl-smi">train_df</span>)) </td>
</tr>
<tr>
<td id="L56" class="blob-num js-line-number" data-line-number="56"></td>
<td id="LC56" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
</table>
<details class="details-reset details-overlay BlobToolbar position-absolute js-file-line-actions dropdown d-none" aria-hidden="true">
<summary class="btn-octicon ml-0 px-2 p-0 color-bg-primary border color-border-tertiary rounded-1" aria-label="Inline file action toolbar">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-kebab-horizontal">
<path d="M8 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zM1.5 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zm13 0a1.5 1.5 0 100-3 1.5 1.5 0 000 3z"></path>
</svg>
</summary>
<details-menu>
<ul class="BlobToolbar-dropdown dropdown-menu dropdown-menu-se mt-2" style="width:185px">
<li>
<clipboard-copy role="menuitem" class="dropdown-item" id="js-copy-lines" style="cursor:pointer;">
Copy lines
</clipboard-copy>
</li>
<li>
<clipboard-copy role="menuitem" class="dropdown-item" id="js-copy-permalink" style="cursor:pointer;">
Copy permalink
</clipboard-copy>
</li>
<li><a class="dropdown-item js-update-url-with-hash" id="js-view-git-blame" role="menuitem" href="/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blame/71b66c82d32535a9f9073faf84850521471c71b2/Vala%20Khosravi%20-%2099422068/project%203/script.r">View git blame</a></li>
</ul>
</details-menu>
</details>
</div>
</div>
<details class="details-reset details-overlay details-overlay-dark" id="jumpto-line-details-dialog">
<summary data-hotkey="l" aria-label="Jump to line"></summary>
<details-dialog class="Box Box--overlay d-flex flex-column anim-fade-in fast linejump" aria-label="Jump to line">
<!-- '"` --><!-- </textarea></xmp> --></option></form><form class="js-jump-to-line-form Box-body d-flex" action="" accept-charset="UTF-8" method="get">
<input class="form-control flex-auto mr-3 linejump-input js-jump-to-line-field" type="text" placeholder="Jump to line…" aria-label="Jump to line" autofocus>
<button data-close-dialog="" type="submit" data-view-component="true" class="btn">
Go
</button>
</form> </details-dialog>
</details>
</div>
</div>
</div>
</main>
</div>
</div>
<div class="footer container-xl width-full p-responsive" role="contentinfo">
<div class="position-relative d-flex flex-row-reverse flex-lg-row flex-wrap flex-lg-nowrap flex-justify-center flex-lg-justify-between pt-6 pb-2 mt-6 f6 color-text-secondary border-top color-border-secondary ">
<ul class="list-style-none d-flex flex-wrap col-12 col-lg-5 flex-justify-center flex-lg-justify-between mb-2 mb-lg-0">
<li class="mr-3 mr-lg-0">© 2021 GitHub, Inc.</li>
<li class="mr-3 mr-lg-0"><a href="https://docs.github.com/en/github/site-policy/github-terms-of-service" data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Footer","action":"go to terms","label":"text:terms","originating_url":"https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r","user_id":87523315}}" data-hydro-click-hmac="a70958990997c84e234c62898ecea8beb3d5237a3f9fa00949090b5cf768f032">Terms</a></li>
<li class="mr-3 mr-lg-0"><a href="https://docs.github.com/en/github/site-policy/github-privacy-statement" data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Footer","action":"go to privacy","label":"text:privacy","originating_url":"https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r","user_id":87523315}}" data-hydro-click-hmac="ce666fc20aca37d66e2d6822bef3c935be5a2c8e6844a623f3710b406febbadd">Privacy</a></li>
<li class="mr-3 mr-lg-0"><a data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Footer","action":"go to security","label":"text:security","originating_url":"https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r","user_id":87523315}}" data-hydro-click-hmac="e68f165cdbabc80632b9109642c00be92a3226172fb3fda3966d0cc09f4df0d1" href="https://github.com/security">Security</a></li>
<li class="mr-3 mr-lg-0"><a href="https://www.githubstatus.com/" data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Footer","action":"go to status","label":"text:status","originating_url":"https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r","user_id":87523315}}" data-hydro-click-hmac="290f6291cf04a56ff1fc16817ee0145aead58847893ef411da938cbd9d2ba13c">Status</a></li>
<li><a data-ga-click="Footer, go to help, text:Docs" href="https://docs.github.com">Docs</a></li>
</ul>
<a aria-label="Homepage" title="GitHub" class="footer-octicon d-none d-lg-block mx-lg-4" href="https://github.com">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="24" width="24" class="octicon octicon-mark-github">
<path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z"></path>
</svg>
</a>
<ul class="list-style-none d-flex flex-wrap col-12 col-lg-5 flex-justify-center flex-lg-justify-between mb-2 mb-lg-0">
<li class="mr-3 mr-lg-0"><a href="https://support.github.com" data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Footer","action":"go to contact","label":"text:contact","originating_url":"https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r","user_id":87523315}}" data-hydro-click-hmac="e97d2f6da29a3bd33875011775ddf8c3f9b83137bc5be62bd066a3b9e354cfe3">Contact GitHub</a></li>
<li class="mr-3 mr-lg-0"><a href="https://github.com/pricing" data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Footer","action":"go to Pricing","label":"text:Pricing","originating_url":"https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r","user_id":87523315}}" data-hydro-click-hmac="c050429879f3d75341b7902a0d123fb148b98149105a21780fbd1c0b02218386">Pricing</a></li>
<li class="mr-3 mr-lg-0"><a href="https://docs.github.com" data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Footer","action":"go to api","label":"text:api","originating_url":"https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r","user_id":87523315}}" data-hydro-click-hmac="f40ac7fac262ba6c9fa652123f1ffb18bbb1cc81e6f57eb84b644c05147e94c8">API</a></li>
<li class="mr-3 mr-lg-0"><a href="https://services.github.com" data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Footer","action":"go to training","label":"text:training","originating_url":"https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r","user_id":87523315}}" data-hydro-click-hmac="d0636ec355cf2a6aada4b3f7427b5595dc42a75b0f719e4cc7814d7407a5753a">Training</a></li>
<li class="mr-3 mr-lg-0"><a href="https://github.blog" data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Footer","action":"go to blog","label":"text:blog","originating_url":"https://github.com/Alirezajahandide27/CS-SBU-DataMining-Msc-projects/blob/main/Vala%20Khosravi%20-%2099422068/project%203/script.r","user_id":87523315}}" data-hydro-click-hmac="75c076261b34c6d2eb9c72f154d6275ae2595b31a5dafd488a6543570d580b40">Blog</a></li>
<li><a data-ga-click="Footer, go to about, text:about" href="https://github.com/about">About</a></li>
</ul>
</div>
<div class="d-flex flex-justify-center pb-6">
<span class="f6 color-text-tertiary"></span>
</div>
</div>
<div id="ajax-error-message" class="ajax-error-message flash flash-error" hidden>
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-alert">
<path fill-rule="evenodd" d="M8.22 1.754a.25.25 0 00-.44 0L1.698 13.132a.25.25 0 00.22.368h12.164a.25.25 0 00.22-.368L8.22 1.754zm-1.763-.707c.659-1.234 2.427-1.234 3.086 0l6.082 11.378A1.75 1.75 0 0114.082 15H1.918a1.75 1.75 0 01-1.543-2.575L6.457 1.047zM9 11a1 1 0 11-2 0 1 1 0 012 0zm-.25-5.25a.75.75 0 00-1.5 0v2.5a.75.75 0 001.5 0v-2.5z"></path>
</svg>
<button type="button" class="flash-close js-ajax-error-dismiss" aria-label="Dismiss error">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-x">
<path fill-rule="evenodd" d="M3.72 3.72a.75.75 0 011.06 0L8 6.94l3.22-3.22a.75.75 0 111.06 1.06L9.06 8l3.22 3.22a.75.75 0 11-1.06 1.06L8 9.06l-3.22 3.22a.75.75 0 01-1.06-1.06L6.94 8 3.72 4.78a.75.75 0 010-1.06z"></path>
</svg>
</button>
You can’t perform that action at this time.
</div>
<div class="js-stale-session-flash flash flash-warn flash-banner" hidden
>
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-alert">
<path fill-rule="evenodd" d="M8.22 1.754a.25.25 0 00-.44 0L1.698 13.132a.25.25 0 00.22.368h12.164a.25.25 0 00.22-.368L8.22 1.754zm-1.763-.707c.659-1.234 2.427-1.234 3.086 0l6.082 11.378A1.75 1.75 0 0114.082 15H1.918a1.75 1.75 0 01-1.543-2.575L6.457 1.047zM9 11a1 1 0 11-2 0 1 1 0 012 0zm-.25-5.25a.75.75 0 00-1.5 0v2.5a.75.75 0 001.5 0v-2.5z"></path>
</svg>
<span class="js-stale-session-flash-signed-in" hidden>You signed in with another tab or window. <a href="">Reload</a> to refresh your session.</span>
<span class="js-stale-session-flash-signed-out" hidden>You signed out in another tab or window. <a href="">Reload</a> to refresh your session.</span>
</div>
<template id="site-details-dialog">
<details class="details-reset details-overlay details-overlay-dark lh-default color-text-primary hx_rsm" open>
<summary role="button" aria-label="Close dialog"></summary>
<details-dialog class="Box Box--overlay d-flex flex-column anim-fade-in fast hx_rsm-dialog hx_rsm-modal">
<button class="Box-btn-octicon m-0 btn-octicon position-absolute right-0 top-0" type="button" aria-label="Close dialog" data-close-dialog>
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-x">
<path fill-rule="evenodd" d="M3.72 3.72a.75.75 0 011.06 0L8 6.94l3.22-3.22a.75.75 0 111.06 1.06L9.06 8l3.22 3.22a.75.75 0 11-1.06 1.06L8 9.06l-3.22 3.22a.75.75 0 01-1.06-1.06L6.94 8 3.72 4.78a.75.75 0 010-1.06z"></path>
</svg>
</button>
<div class="octocat-spinner my-6 js-details-dialog-spinner"></div>
</details-dialog>
</details>
</template>
<div class="Popover js-hovercard-content position-absolute" style="display: none; outline: none;" tabindex="0">
<div class="Popover-message Popover-message--bottom-left Popover-message--large Box color-shadow-large" style="width:360px;">
</div>
</div>
<template id="snippet-clipboard-copy-button">
<div class="zeroclipboard-container position-absolute right-0 top-0">
<clipboard-copy aria-label="Copy" class="ClipboardButton btn js-clipboard-copy m-2 p-0 tooltipped-no-delay" data-copy-feedback="Copied!" data-tooltip-direction="w">
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-clippy js-clipboard-clippy-icon m-2">
<path fill-rule="evenodd" d="M5.75 1a.75.75 0 00-.75.75v3c0 .414.336.75.75.75h4.5a.75.75 0 00.75-.75v-3a.75.75 0 00-.75-.75h-4.5zm.75 3V2.5h3V4h-3zm-2.874-.467a.75.75 0 00-.752-1.298A1.75 1.75 0 002 3.75v9.5c0 .966.784 1.75 1.75 1.75h8.5A1.75 1.75 0 0014 13.25v-9.5a1.75 1.75 0 00-.874-1.515.75.75 0 10-.752 1.298.25.25 0 01.126.217v9.5a.25.25 0 01-.25.25h-8.5a.25.25 0 01-.25-.25v-9.5a.25.25 0 01.126-.217z"></path>
</svg>
<svg aria-hidden="true" viewBox="0 0 16 16" version="1.1" data-view-component="true" height="16" width="16" class="octicon octicon-check js-clipboard-check-icon color-text-success d-none m-2">
<path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path>
</svg>
</clipboard-copy>
</div>
</template>
</body>
</html>
|
302b2d574de7ef93345843a6e4d641ede7333e21
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Rknots/examples/Knot-class.Rd.R
|
279f9864a328cf583f88f14a65ec271554aa9dfb
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 689
|
r
|
Knot-class.Rd.R
|
library(Rknots)
### Name: Knot-class
### Title: Class "Knot" - a container for knot and link coordinates and
### ends
### Aliases: Knot-class
### Keywords: class
### ** Examples
# create an object of class 'Knot' by using new
link <- makeExampleKnot( k = FALSE )
new('Knot', points3D = link$points3D, ends = link$ends)
#or by means of the constructor
newKnot(points3D = link$points3D, ends = link$ends)
#for knots, it is sufficient to specify the 3D coordinates
#ends are set by default to numeric(0)
knot <- makeExampleKnot( k = TRUE )
newKnot(points3D = knot)
#for creating an example, use makeExampleKnot.
#knot:
makeExampleKnot(k = TRUE)
#link:
makeExampleKnot(k = FALSE)
|
e9e9329a51d5790870e00061d5c7a2c822913b91
|
fb94c4587fc2f8f64ccb30801a5fc9410c721409
|
/R/get_cog.R
|
c5f040dac878810d2d4a7cdb08e64acb6b44f7a1
|
[] |
no_license
|
Yuki-Kanamori/ggvast
|
a0f637c3d5ea39f85bfc108035307fd4142985b2
|
c863b0194f98c2625bc3c8a374ae256d4100b560
|
refs/heads/master
| 2021-10-28T04:14:39.133330
| 2021-10-20T02:39:05
| 2021-10-20T02:39:05
| 232,312,515
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,880
|
r
|
get_cog.R
|
#' Calculate the lon/lat of COG
#'
#' get the lon/lat of COG from nominal data
#' @param data Data_Geostat
#'
#' @importFrom dplyr filter
#' @importFrom dplyr select
#' @importFrom plyr ddply
#' @importFrom plyr .
#' @import dplyr
#' @import magrittr
#'
#' @export
#'
get_cog = function(data){
#single-species
if(!("spp" %in% names(data))){
#cog_lon = sum_{year}{Catch_KG*Lon}/sum_{year}{Catch_KG}を計算
dg = DG %>% select(Catch_KG, Lon, Lat, Year) %>% mutate(cog_lon = Catch_KG*Lon, cog_lat = Catch_KG*Lat)
lon_nume = ddply(dg, .(Year), summarize, nume = sum(cog_lon))
lon_deno = ddply(dg, .(Year), summarize, deno = sum(Catch_KG))
cog_lon = merge(lon_nume, lon_deno, by = "Year") %>% mutate(lon = nume/deno) %>% select(Year, lon)
lat_nume = ddply(dg, .(Year), summarize, nume = sum(cog_lat))
lat_deno = ddply(dg, .(Year), summarize, deno = sum(Catch_KG))
cog_lat = merge(lat_nume, lat_deno, by = "Year") %>% mutate(lat = nume/deno) %>% select(Year, lat)
cog_nominal = merge(cog_lon, cog_lat, by = "Year") %>% mutate(spp = 1)
}
#multi-species
else{
#cog = sum_{year}{Catch_KG*Lon}/sum_{year}{Catch_KG}を計算
df = DG %>% select(Catch_KG, Lon, Lat, Year, spp) %>% mutate(cog_lon = Catch_KG*Lon, cog_lat = Catch_KG*Lat)
lon_nume = ddply(df, .(Year, spp), summarize, nume = sum(cog_lon))
lon_deno = ddply(df, .(Year, spp), summarize, deno = sum(Catch_KG))
cog_lon = merge(lon_nume, lon_deno, by = c("Year", "spp")) %>% mutate(lon = nume/deno) %>% select(Year, lon, spp)
lat_nume = ddply(df, .(Year, spp), summarize, nume = sum(cog_lat))
lat_deno = ddply(df, .(Year, spp), summarize, deno = sum(Catch_KG))
cog_lat = merge(lat_nume, lat_deno, by = c("Year", "spp")) %>% mutate(lat = nume/deno) %>% select(Year, lat, spp)
cog_nominal = merge(cog_lon, cog_lat, by = c("Year", "spp"))
}
}
|
4b1154bd50c271d2ac1f68210e28a20719e15882
|
9bdef83f28b070321ba27709d2c7ec028474b5c3
|
/R/networkOperations.R
|
7182dcc8d99c44743b39c1c43833606fe80dfa13
|
[] |
no_license
|
antagomir/scripts
|
8e39ce00521792aca1a8169bfda0fc744d78c285
|
c0833f15c9ae35b1fd8b215e050d51475862846f
|
refs/heads/master
| 2023-08-10T13:33:30.093782
| 2023-05-29T08:19:56
| 2023-05-29T08:19:56
| 7,307,443
| 10
| 15
| null | 2023-07-19T12:36:45
| 2012-12-24T13:17:03
|
HTML
|
UTF-8
|
R
| false
| false
| 935
|
r
|
networkOperations.R
|
printSIF <- function (mynet, sif.file, sep = " ") {
write("", file=sif.file, append = FALSE)
# Print network in SIF format
# NOTE: print only upper triangle of the matrix
# Reason: since the edges are not directed, we would have two links for each
# pair of linked nodes which makes the figure unclear and is redundant
# in cytoscape figures anyway
for (i in 1:nrow(mynet)) {
gid<-rownames(mynet)[[i]]
inds = which(as.logical(mynet[i,]))
# keep only upper triangle of the network matrix
inds = inds[inds>i]
neighgids<-colnames(mynet)[inds]
for (ng in neighgids) {
# 'link' is arbitrary user-chosen name
# of the interaction type.
# here we just have this one type.
write(paste(gid,"link",ng, sep = sep), file=sif.file, append = TRUE)
}
}
print(paste("Network printed into",sif.file))
}
|
cb1e4a11127e28b23ffb5a13250a9f3de756b7a0
|
08176ba2ad3c3365d3e23381a1d2681ffe8d2dea
|
/R/processTSS.R
|
5d52a598c64f4abcf196d9691bd7d0ce212300a0
|
[] |
no_license
|
rpolicastro/TSRchitect
|
0b3a9d5f4ab6789a01d1d07498c102364a75d5ff
|
c3eb7e83f32e66bb47365defa8b883156cd84e84
|
refs/heads/master
| 2020-03-22T06:32:06.072429
| 2018-07-03T19:27:25
| 2018-07-03T19:27:25
| 139,640,992
| 1
| 0
| null | 2018-07-03T21:59:33
| 2018-07-03T21:59:33
| null |
UTF-8
|
R
| false
| false
| 2,904
|
r
|
processTSS.R
|
#' @title \strong{processTSS}
#' @description \code{processTSS} calulates the number of observed reads
#' at a given TSS coordinate across an entire dataset.
#'
#' @param experimentName an S4 object of class \emph{tssObject} containing
#' information in slot \emph{@tssTagData}
#' @param n.cores the number of cores to be used for this job.
#' n.cores=1 means serial execution of function calls (numeric)
#' @param tssSet default is "all"; to select a single \emph{tssSet},
#' specify it (as character)
#' @param writeTable specifies whether the output should be written
#' to a file. (logical)
#'
#' @importFrom BiocParallel bplapply MulticoreParam
#'
#' @return Creates a list of \linkS4class{GenomicRanges} containing TSS
#' positions in slot \emph{tssTagData} of the returned \emph{tssObject}.
#'
#' @examples
#' load(system.file("extdata", "tssObjectExample.RData",
#' package="TSRchitect"))
#' tssObjectExample <- processTSS(experimentName=tssObjectExample, n.cores=1,
#' tssSet="all", writeTable=FALSE)
#'
#' @note Note that the \emph{tssSet} parameter must be of class
#' \emph{character}, even when selecting an individual dataset.
#' @note An example similar to the one provided can be found in
#' the vignette (/inst/doc/TSRchitect.Rmd).
#'
#' @export
#' @rdname processTSS-methods
setGeneric("processTSS",
function(experimentName, n.cores, tssSet, writeTable)
standardGeneric("processTSS")
)
#' @rdname processTSS-methods
setMethod("processTSS",
signature("tssObject", "numeric", "character", "logical"),
function(experimentName, n.cores=1, tssSet="all", writeTable=FALSE) {
message("... processTSS ...")
if (tssSet=="all") {
iend <- length(experimentName@tssTagData)
multicoreParam <- MulticoreParam(workers=n.cores)
FUN <- function(x) {
prcTSS(experimentName, tssSet=x,
writeTable=writeTable)
}
experimentName@tssCountData <- bplapply(1:iend, FUN)
}
else {
i <- as.numeric(tssSet)
if (i > length(experimentName@tssTagData)) {
stop("The value selected for tssSet exceeds ",
"the number of slots in tssTagData.")
}
experimentName@tssCountData[[i]] <- prcTSS(experimentName =
experimentName,
tssSet = i,
writeTable=writeTable)
}
message("-----------------------------------------------------\n")
message(" Done.\n")
return(experimentName)
}
)
|
215c225e32fe0bb05e22fd2006b6bcf4ba67d78a
|
94feabc4866e141c085380211abedb5d03905de4
|
/man/get_last_played.Rd
|
0134422ae62857698f22cd2851af37a2367482a6
|
[
"MIT"
] |
permissive
|
SamMorrissette/sc2api
|
35eba252c73b325ea4c5fd4a667149ccf98bef5a
|
9d67a4000fa95d5b63a177a6ca7c4f598e31b60a
|
refs/heads/master
| 2022-12-24T06:01:15.663238
| 2020-09-25T15:12:14
| 2020-09-25T15:12:14
| 295,058,245
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,887
|
rd
|
get_last_played.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extensions.R
\name{get_last_played}
\alias{get_last_played}
\title{Last Played Match}
\usage{
get_last_played(region_id, realm_id, profile_id, host_region = "us")
}
\arguments{
\item{region_id}{A numeric argument indicating the region of the profile.
\itemize{
\item 1 = US Region
\item 2 = EU Region
\item 3 = KR/TW Region
\item 5 = CN Region
}}
\item{realm_id}{A numeric argument indicating the realm of the profile. A realm is a subset
of the region.
\itemize{
\item{US Region}
\itemize{
\item{1 = US}
\item{2 = LatAm}
}
\item{EU Region}
\itemize{
\item{1 = Europe}
\item{2 = Russia}
}
\item{KR/TW Region}
\itemize{
\item{1 = Korea}
\item{2 = Taiwan}
}
}}
\item{profile_id}{A unique, numeric identifier for an individual's profile.}
\item{host_region}{The host region that the API call will be sent to. For most API calls, the same data will be
returned regardless of which region the request is sent to. Must be one of "us", "eu", "kr", "tw", "cn". For more
information on regionality, refer to
\href{https://develop.battle.net/documentation/guides/regionality-and-apis}{Regionality and APIs}.}
}
\description{
Get the time of the last played match in a player's match history.
}
\note{
Data is only available for season 28 and higher.
}
\examples{
\donttest{
# Get last played match for a particular profile
try(get_last_played(1, 4716773, host_region = "us"))
}
}
\references{
\itemize{
\item \href{https://develop.battle.net/documentation/starcraft-2/community-apis}{Blizzard Community API Documentation}
\item \href{https://develop.battle.net/documentation/guides/regionality-and-apis}{Regionality and APIs}
}
}
|
0ddc9d45be95df9c84dead67f4bb7f04cf47e82f
|
22d41ca3bcf1e15c33c67105a5cb5891bf5237e5
|
/server.R
|
f8aa76619f796f40c83521d99eb12304ce7953b7
|
[] |
no_license
|
Debs20/data-proj-syn
|
f0a467273d2f9ba0244554e8cc43ef9d04738f23
|
2d6dfd14f0fcc17bcda27cb6eda3bb4ca7c43989
|
refs/heads/master
| 2022-12-02T23:14:18.133974
| 2020-08-24T13:50:26
| 2020-08-24T13:50:26
| 290,160,907
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,964
|
r
|
server.R
|
server <- function(input, output) {
output$referrers <- renderPlot({
# filtered_data <- eventReactive(input$action,{
referrers %>%
mutate(fullReferrer = recode(fullReferrer, "m.facebook.com/" = "social media", "linkedin.com/" = "social media",
"linkedin.com/feed/" = "social media", "facebook.com/" = "social media", "lm.facebook.com/l.php" = "social media",
"lm.facebook.com" = "social media", "lnkd.in" = "social media", "I.instagram.com" = "social media",
"instagram.com/" = "social media", "l.facebook.com/" = "social media", "lm.facebook.com/" = "social media", "lnkd.in/" = "social media",
"l.instagram.com/" = "social media", "gstatic.com/atari/embeds/913211048dfa67f4be7864f4505a4b63/intermediate-frame-minified.html" =
"gstatic.com")) %>%
arrange(desc(sessions)) %>%
group_by(fullReferrer) %>%
summarise(sessions = sum(sessions)) %>%
top_n(5) %>%
ggplot() +
aes(x = reorder(fullReferrer, - desc(sessions)), y = sessions) +
geom_col(fill = "dark blue", col = "black") +
labs(x = "\nOriginating Platform\n",
y = "\nNumber of sessions",
title = "\nHow do people find the website",
subtitle = "(period from 2020-03-01 to 2020-07-31)\n\n") +
coord_flip() +
theme_bw() +
theme(plot.title = element_text(size = 25, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 18, face = "italic", hjust = 0.5),
axis.title.x = element_text(size = 20),
axis.text = element_text(size = 15),
axis.title.y = element_text(size = 20))
})
output$social <- renderPlot({
social_network %>%
filter(socialNetwork != "(not set)") %>%
arrange(desc(sessions)) %>%
head(7) %>%
ggplot() +
aes(x = reorder(socialNetwork, - desc(sessions)), y = sessions) +
geom_col(fill = "dark blue", col = "black") +
labs(x = "\nSocial Network\n",
y = "\nNumber of sessions",
title = "\nTop social media networks proving traffic",
subtitle = "(period from 2020-03-01 to 2020-07-31)\n\n") +
coord_flip() +
theme_bw() +
theme(plot.title = element_text(size = 25, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 18, face = "italic", hjust = 0.5),
axis.title.x = element_text(size = 20),
axis.text = element_text(size = 15),
axis.title.y = element_text(size = 20))
})
output$landing <- renderPlot({
landing_page %>%
mutate(landingPagePath = recode(landingPagePath, "/blog/best-podcast-coders-programmers/" = "blogs",
"/blog/7-celebrities-didnt-know-code/" = "blogs", "/blog/hire-developers-online-tests/" = "blogs",
"/blog/meet-the-graduates-who-quit-their-jobs-to-learn-how-to-code/" = "blogs",
"/blog/first-developer-job-advice/" = "blogs",
"/blog/best-podcasts-coders-programmers/" = "blogs", "/blog/learn-to-code-working/" = "blogs",
"/blog/meet-emma-from-primary-teacher-to-front-end-developer/" = "blogs",
"/" = "Homepage", "/blog/9-common-misconceptions-coding/" = "blogs",
"/blog/9-websites-start-coding-journey/" = "blogs")) %>%
arrange(desc(sessions)) %>%
group_by(landingPagePath) %>%
summarise(sessions = sum(sessions)) %>%
top_n(8) %>%
ggplot() +
aes(x = reorder(landingPagePath, - desc(sessions)), y = sessions) +
geom_col(fill = "dark blue", col = "black") +
labs(x = "Landing webpage\n",
y = "\nNumber of sessions",
title = "\nHow do users typically enter the website",
subtitle = "(period from 2020-03-01 to 2020-07-31)\n\n") +
coord_flip() +
theme_bw() +
theme(plot.title = element_text(size = 25, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 18, face = "italic", hjust = 0.5),
axis.title.x = element_text(size = 20),
axis.text = element_text(size = 15),
axis.title.y = element_text(size = 20))
#What are the most popular landing pages
})
output$landing_goals17 <- renderPlot({
landing_goals %>%
mutate(landingPagePath = recode(landingPagePath, "/courses/professional-software-development/" = "/courses/PSD/",
"/courses/managing-growth-with-agile/?ct=t(short+courses+uk)&mc_cid=9e22d3be4a&mc_eid=[UNIQID]" =
"/courses/managaing-growth/", "/courses/thank-you-for-your-application/" = "/courses/thank-you/")) %>%
arrange(desc(goal17Completions)) %>%
head(9) %>%
ggplot() +
aes(x = reorder(landingPagePath, - desc(goal17Completions)), y = goal17Completions) +
geom_col(fill = "dark blue", col = "black") +
labs(x = "Landing webpage\n",
y = "\nNumber of completions",
title = "\nLanding page before completing goal 17 (PSD)",
subtitle = "(period from 2020-03-01 to 2020-07-31)\n\n") +
coord_flip() +
theme_bw() +
theme(plot.title = element_text(size = 23, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 18, face = "italic", hjust = 0.5),
axis.title.x = element_text(size = 20),
axis.text = element_text(size = 15),
axis.title.y = element_text(size = 20))
})
output$landing_goals13 <- renderPlot({
landing_goals %>%
filter(landingPagePath != "/webinars/?fbclid=IwAR2h2oGSxo4ew1QYxMdZVttw6_-wO-pFBSYDUNrZPIJ8vlTJNHpz8m_dtrE") %>%
mutate(landingPagePath = recode(landingPagePath, "/courses/managing-data-business-insights/?ct=t(short+courses+uk_COPY_01)&mc_cid=40897d9e96&mc_eid=[UNIQID]"
= "/managing-data-business-inights", "/courses/professional-software-development/" = "/courses/PSD/",
"/funding/?fbclid=IwAR1VbUBdzStdwzxDMzxJcrAmSxt4056YbybfsxduNaJuDftsN-hWP4skxX0" = "/funding/",
"/courses/thank-you-data-analysis-application/" = "/courses/thank-you/")) %>%
arrange(desc(goal13Completions)) %>%
head(9) %>%
ggplot() +
aes(x = reorder(landingPagePath, - desc(goal13Completions)), y = goal13Completions) +
geom_col(fill = "dark blue", col = "black") +
labs(x = "Landing webpage\n",
y = "\nNumber of completions",
title = "\nLanding page before completing goal 13 (DA)",
subtitle = "(period from 2020-03-01 to 2020-07-31)\n\n") +
coord_flip() +
theme_bw() +
theme(plot.title = element_text(size = 23, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 18, face = "italic", hjust = 0.5),
axis.title.x = element_text(size = 20),
axis.text = element_text(size = 15),
axis.title.y = element_text(size = 20))
})
output$goals <- renderPlot({
colours <- c("Goal 17 (PSD)" = "blue", "Goal 13 (DA)" = "red")
goals %>%
ggplot(aes(x = month)) +
geom_line(aes(y = goal17Completions, colour = "Goal 17 (PSD)"), group = 1) +
geom_point(aes(y = goal17Completions, colour = "Goal 17 (PSD)")) +
geom_line(aes(y = goal13Completions, colour = "Goal 13 (DA)"), group = 1) +
geom_point(aes(y = goal13Completions, colour = "Goal 13 (DA)")) +
scale_colour_manual(values = colours) +
labs(x = "Month\n",
y = "\nNumber of Completions",
title = "\nNumber of Completions of DA and PSD signups",
subtitle = "(period from 2020-03-01 to 2020-07-31)\n\n") +
theme_bw() +
theme(plot.title = element_text(size = 30, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 18, face = "italic", hjust = 0.5),
axis.title.x = element_text(size = 20),
axis.text = element_text(size = 15),
axis.title.y = element_text(size = 20),
legend.title = element_text(size = 20),
legend.text = element_text(size = 15))
})
output$bulletgraph13 <- renderPlotly({
plot_ly(
type = "indicator",
mode = "number+gauge+delta",
value = 45,
domain = list(x = c(0, 1), y= c(0, 1)),
title = list(text = "<b>DA clicks"),
delta = list(reference = 31),
gauge = list(
shape = "bullet",
axis = list(range = list(NULL, 100)),
threshold = list(
line = list(color = "red", width = 2),
thickness = 0.75,
value = 31),
steps = list(
list(range = c(0, 75), color = "lightgray"),
list(range = c(75, 100), color = "gray"))),
height = 120, width = 500) %>%
layout(margin = list(l= 150, r= 10))
})
output$bulletgraph17 <- renderPlotly({
plot_ly(
type = "indicator",
mode = "number+gauge+delta",
value = 87,
domain = list(x = c(0, 1), y= c(0, 1)),
title = list(text = "<b>PSD clicks</b>"),
delta = list(reference = 40),
gauge = list(
shape = "bullet",
axis = list(range = list(NULL, 100)),
threshold = list(
line = list(color = "red", width = 2),
thickness = 0.75,
value = 40),
steps = list(
list(range = c(0, 75), color = "lightgray"),
list(range = c(75, 100), color = "gray"))),
height = 120, width = 500) %>%
layout(margin = list(l= 150, r= 10))
})
}
|
cc7878f7b5ca3ab5c926112753c670d24db4cf4c
|
bc8a6124ad09a4a5a8604fb3cb3fa3418fbdafba
|
/Food web idea/R files/Current scripts/SEM_misc_code.R
|
9f6e806dc53aad7bc4a8e907da18de3171a0a16d
|
[] |
no_license
|
nembrown/100-islands
|
9397175169a2f900fb04aaad7f2e10e36b137c71
|
4f7ef53a63be9ab77fa0bacaa42e3e7650495c24
|
refs/heads/master
| 2021-07-09T16:15:50.089839
| 2020-09-30T21:53:26
| 2020-09-30T21:53:26
| 194,326,381
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,618
|
r
|
SEM_misc_code.R
|
# Hierarchical model ------------------------------------------------------
#demo model
model <- '
level: 1
fw =~ y1 + y2 + y3
fw ~ x1 + x2 + x3
level: 2
fb =~ y1 + y2 + y3
fb ~ w1 + w2'
fit <- sem(model = model, data = Demo.twolevel, cluster = "cluster")
summary(fit)
semPaths(fit)
N15_model_hierarch_lvl<-'
#transect level (between) only transect level stuff
level: 1
c.log_fish_biomass_bym3_mean ~ c.log_MEAN_kparea2k + c.log_MEAN_egarea2k
c.log_bycatch_biomass_bym3_mean ~ c.log_MEAN_kparea2k + c.log_MEAN_egarea2k
pres_otter ~ c.log_fish_biomass_bym3_mean + c.log_bycatch_biomass_bym3_mean + c.slope_degrees + c.log_MEAN_kparea2k + c.log_MEAN_egarea2k
ravens ~ c.log_fish_biomass_bym3_mean + c.log_bycatch_biomass_bym3_mean
eagles ~ c.log_fish_biomass_bym3_mean + c.log_bycatch_biomass_bym3_mean
c.log_site_mean_by_tran ~ c.log_MEAN_kparea2k + c.log_MEAN_egarea2k + c.SLOPE_degrees + c.WAVE_EXPOSURE + beachy_substrate + c.slope_degrees
human_pres_trans ~ c.log_MEAN_kparea2k + c.log_MEAN_egarea2k + c.log_fish_biomass_bym3_mean + c.log_bycatch_biomass_bym3_mean + c.WAVE_EXPOSURE + c.SLOPE_degrees
marine_animal_biomass_shore_trans ~ eagles + ravens + pres_otter + human_pres_trans + c.log_fish_biomass_bym3_mean + c.log_bycatch_biomass_bym3_mean
c.d15n ~ a1*c.log_site_mean_by_tran + h1*human_pres_trans + o1*marine_animal_biomass_shore_trans + c.slope_degrees
### correlations not already accounted for in model
pres_marine_invert ~~ c.log_bycatch_biomass_bym3_mean
pres_fish ~~ c.log_fish_biomass_bym3_mean
#latent variables measurement models
human_pres_trans =~ c.distance_to_midden + c.distance_to_fish + c.cult_imp_plant_richness
marine_animal_biomass_shore_trans =~ pres_marine_invert + pres_fish
#island level (within)
level: 2
pres_otter ~ c.log_Area + c.PA_norml
ravens ~ c.log_Area + c.PA_norml
eagles ~ c.log_Area + c.PA_norml
c.log_site_mean_by_tran ~ c.log_Area + c.PA_norml
c.d15n ~ c.log_Bog_area
'
fit_simple_hierarch_imp <- semList(N15_model_hierarch_lvl, dataList=implist, cluster = "unq_isl")
summary(fit_simple_hierarch_imp )
#does not work. 0/20 imputed lists converged.
fit_simple_hierarch_mice <- semList(N15_model_hierarch, dataList=imputed_transect, cluster = "unq_isl")
summary(fit_simple_hierarch_mice ) #0/21 datasets converged.
warnings()
# Piecewise fitting -------------------------------------------------------
#Algal model only
N15_model_algae_only<-'#latent variables as responses
algae_biomass_shore <~ log_MEAN_kparea2k + log_MEAN_egarea2k + SLOPE + WAVE_EXPOSURE + beachy_substrate + slope
a1*algae_biomass_shore ~ d15n
#latent variables measurement models
algae_biomass_shore =~ log_site_mean_by_tran + seaweed_all'
fit_algae <- sem(N15_model_algae_only, data=master_transect)
summary(fit_algae, fit.measures=TRUE)
varTable(fit_algae)
# fit_algae_PML<-sem(N15_model_algae_only, data=master_transect,estimator = "PML",missing = "available.cases",std.lv=TRUE, fixed.x=FALSE, conditional.x=FALSE, test = "none")
# summary(fit_algae_PML, fit.measures=TRUE)
fit_algae_pairwise <- sem(N15_model_algae_only, data=master_transect,missing="pairwise", std.lv=TRUE)
summary(fit_algae_pairwise, fit.measures=TRUE)
#This runs, and gives SE estimates so that is good
#The errors have to do with the variance/covariance matrix - can run "ridge" to help with this
################# marine animal biomass
N15_model_animal<-'#latent variables as responses
log_fish_biomass_bym3_mean + fish_bycatch_biomass + ravens + WAVE_EXPOSURE ~ marine_animal_biomass_shore
log_MEAN_kparea2k + log_MEAN_egarea2k ~ log_fish_biomass_bym3_mean
log_MEAN_kparea2k + log_MEAN_egarea2k ~ fish_bycatch_biomass
#correlations
# log_fish_biomass_bym3_mean ~~ fish_bycatch_biomass
# log_MEAN_kparea2k ~~ log_MEAN_egarea2k
#latent variables measurement models
marine_animal_biomass_shore =~ d15n
'
fit_animal <- sem(N15_model_animal, data=master_transect, missing = "ML")
summary(fit_animal, fit.measures=TRUE)
### This model also runs although has some problems...
#it assumed marine nimal biomass IS 1:1 with d15n, because we have no other estimator
#same errors as above....
##### Human model
N15_model_human<-'#latent variables as responses
human_pres <~ log_fish_biomass_bym3_mean + fish_bycatch_biomass + WAVE_EXPOSURE + log_Area
log_MEAN_kparea2k + log_MEAN_egarea2k ~ log_fish_biomass_bym3_mean
log_MEAN_kparea2k + log_MEAN_egarea2k ~ fish_bycatch_biomass
#correlations
#log_fish_biomass_bym3_mean ~~ fish_bycatch_biomass
#latent variables measurement models
human_pres =~ midden_feature_sem + fish_feature_sem + cult_imp_plant_richness + d15n
'
fit_human <- sem(N15_model_human, data=master_transect, std.lv=TRUE, std.ov=TRUE, missing="pairwise")
summary(fit_human, standardize=T, rsq=T)
coef(fit_human, standardize=T )
lavaanify(N15_model_human)
standardizedsolution(fit_human)
?sem
#
# #getting composite scores from fixing first loading to 1
#
# comp_formula2 <- '
# human_pres <~ 0.16*log_fish_biomass_bym3_mean + -19*fish_bycatch_biomass + 0.40*WAVE_EXPOSURE + -0.40*log_Area
#
# human_pres ~ d15n
# '
#
# comp_model2 <- sem(comp_formula2, data=master_transect, fixed.x=F)
#
#
#
# cover_model <- lm(d15n ~ log_fish_biomass_bym3_mean + fish_bycatch_biomass + WAVE_EXPOSURE + log_Area ,master_transect)
#
# summary(cover_model)
#think about centering variables later - if there are no interactions (currently how it's described, it shouldn't be a problem)
# # center variables, calculate interaction terms, ignore byproducts
# center_colmeans <- function(x) {
# xcenter = colMeans(x)
# x - rep(xcenter, rep.int(nrow(x), ncol(x)))
# }
#
# colnames.to.centre<-c("log_fish_biomass_bym3_mean", "log_bycatch_biomass_bym3_mean", "MEAN_kparea2k", "MEAN_egarea2k",
# "SLOPE_degrees", "log_Area", "WAVE_EXPOSURE", "beachy_substrate", "slope_degrees", "midden_feature_sem",
# "fish_feature_sem", "cult_imp_plant_richness", "d15n", "log_distance_to_any_arch", "log_distance_to_midden",
# "log_distance_to_fish", "PA_norml", "log_site_mean_by_tran", "log_MEAN_kparea2k", "log_MEAN_egarea2k", "log_Bog_area")
# length(colnames.to.centre)
# colnames.to.centre[1]
# new2.implist <- within(implist,{
# c.log_fish_biomass_bym3_mean<-log_fish_biomass_bym3_mean - mean(log_fish_biomass_bym3_mean)
# c.log_bycatch_biomass_bym3_mean<-log_bycatch_biomass_bym3_mean - mean(log_bycatch_biomass_bym3_mean)
# c.MEAN_kparea2k<-MEAN_kparea2k - mean(MEAN_kparea2k)
# c.MEAN_egarea2k<-MEAN_egarea2k - mean(MEAN_egarea2k)
# c.SLOPE_degrees<-SLOPE_degrees - mean(SLOPE_degrees)
# c.log_fish_biomass_bym3_mean<-log_fish_biomass_bym3_mean - mean(log_fish_biomass_bym3_mean)
# c.log_fish_biomass_bym3_mean<-log_fish_biomass_bym3_mean - mean(log_fish_biomass_bym3_mean)
# c.log_fish_biomass_bym3_mean<-log_fish_biomass_bym3_mean - mean(log_fish_biomass_bym3_mean)
# #
#
#
# center_colmeans(implist[[1]])
#
# new2.implist <- within(implist,{
# M.SES <- mean(SES)
# M.CognAbility <- mean(CognAbility)
# C.SES <- SES - M.SES
# C.CognAbility <- CognAbility - M.CognAbility
# SES.CognAbility <- C.SES * C.CognAbility
# }, ignore=c("M.SES", "M.CognAbility"))
#
# mean(colnames.to.centre)
#
# implist[[1]]
# str(implist)
# hist(master_transec_sem_subset$d15n)
# hist(test.df$d15n)
#
# test.df<-as.data.frame(new2.implist[1])
# head(test.df)
#plot(imp)
#####
#############################
##code for showing only significant paths ... doesn't seem to work all that well?? Gets the order of variables wrong.
lavaan::parameterEstimates(fit.adj.mitml.veg) %>% dplyr::filter(!is.na(pvalue)) %>% arrange((pvalue)) %>% mutate_if("is.numeric","round",3) %>% dplyr::select(-ci.lower,-ci.upper,-z)
parameterEstimates(fit.adj.mitml.veg)
pvalue_cutoff <- 0.10
obj <- semPlot:::semPlotModel(fit.adj.mitml.veg)
# save a copy of the original, so we can compare it later and be sure we removed only what we intended to remove
original_Pars <- obj@Pars
check_Pars <- obj@Pars %>% dplyr::filter(!(edge %in% c("int","<->") | lhs == rhs)) # this is the list of paramater to sift thru
keep_Pars <- obj@Pars %>% dplyr::filter(edge %in% c("int","<->") | lhs == rhs) # this is the list of paramater to keep asis
test_against <- lavaan::parameterEstimates(fit.adj.mitml.veg) %>% dplyr::filter(pvalue < pvalue_cutoff)
test_against_rev <- test_against %>% rename(rhs2 = lhs, # for some reason, the rhs and lhs are reversed in the standardizedSolution() output, for some of the values
lhs = rhs) %>% # I'll have to reverse it myself, and test against both orders
rename(rhs = rhs2)
checked_Pars <-
check_Pars %>% semi_join(test_against, by = c("lhs", "rhs")) %>% bind_rows(
check_Pars %>% semi_join(test_against_rev, by = c("lhs", "rhs"))
)
obj@Pars <- keep_Pars %>% bind_rows(checked_Pars)
#let's verify by looking at the list of the edges we removed from the object
anti_join(original_Pars,obj@Pars)
#> Joining, by = c("label", "lhs", "edge", "rhs", "est", "std", "group", "fixed", "par")
#> label lhs edge rhs est std group fixed par
#> 1 gear ~> mpg 0.1582792 0.0218978 FALSE 2
#> 2 cyl ~> mpg -0.4956938 -0.1660012 FALSE 3
# great, let's plot
semPlot::semPaths(obj, "std",fade = F, residuals = F, intercepts=FALSE, nodeLabels = nodelab, layout=lay_names)
### how to figure out the order of the variables that the model sees
semPlot::semPaths(fit.adj.mitml.veg, "path",fade = F, residuals = F, intercepts=FALSE, label.cex=2, nCharNodes = 0, nodeLabels = 1:25)
semPlot::semPaths(fit.adj.mitml.veg, "path",fade = F, residuals = F, intercepts=FALSE, label.cex=2, nCharNodes = 0)
?semPlot::semPaths
semPaths(obj, what="std", intercepts=FALSE, residuals=TRUE,
groups=grps, layout=lay_alt, nCharNodes=0, layoutSplit=TRUE, reorder=TRUE,
exoVar = FALSE, pastel=TRUE, rainbowStart = 0.4, label.cex=2)
semPaths(fit.adj.mitml.veg, what="path", residuals=FALSE,
groups=grps, layout=lay_alt, nCharNodes=0, layoutSplit=TRUE, reorder=TRUE,
exoVar = FALSE, pastel=TRUE, rainbowStart = 0.4, label.cex=2, intercepts=FALSE)
####tidy SEM
graph_sem(model=fit.adj.amelia.veg, layout=lay_alt)
lay<-get_layout("", "", "", "", "", "", "c.d15n", "","", "", "",
"c.log_site_mean_by_tran", "", "", "marine_animal_biomass_shore", "", "", "", "human_pres", "", "","",
"", "", "", "pres_marine_invert", "pres_fish","", "", "","cult_imp_plant_prop", "c.distance_to_midden","c.distance_to_fish",
"","", "", "eagles","ravens" ,"pres_otter","","","","","",
"","","","c.log_fish_biomass_bym3_mean", "c.log_bycatch_biomass_bym3_mean","","","c.log_Bog_area","","","",
"c.log_MEAN_rockarea2000", "c.log_MEAN_kparea2k", "c.log_MEAN_egarea2k", "c.PA_norml", "c.SLOPE_degrees", "c.log_Area", "c.WAVE_EXPOSURE", "beachy_substrate", "c.slope_degrees","c.log_Dist_Near", "elevation_max", rows=6)
lay_alt<-get_layout("", "", "", "", "", "","","","","","","","","","","","","","","","","","","","","","","", "","","","","","", "c.d15n", "","", "", "","","", "","","","","", "","",
"c.log_site_mean_by_tran","","", "","","","","", "","", "pres_marine_invert","","","","","","", "pres_fish", "","", "","","","", "","","","","","","","human_pres", "", "","","","","","","","", "","","","","","","",
"","", "","","","", "eagles","","","","","","","ravens" ,"","","","","","","pres_otter","","","","","","","","cult_imp_plant_prop","","","", "c.distance_to_midden","","","","c.distance_to_fish","","","","","","","","","",
"","","","","","","","","c.log_fish_biomass_bym3_mean","","","","","","","","","","", "c.log_bycatch_biomass_bym3_mean","","","","","","","","","","","","","","","","","", "","","","","","","","","",
"c.log_MEAN_rockarea2000","","","", "","","c.log_MEAN_kparea2k","","","", "","","c.log_MEAN_egarea2k","","", "","","","","","","","","","","","","","c.SLOPE_degrees","c.WAVE_EXPOSURE", "beachy_substrate","","","","","","","", "c.slope_degrees","","c.PA_norml","c.log_Area", "c.log_Dist_Near", "elevation_max", "c.slope_isl","", "c.log_Bog_area", rows=5)
###lavaan plot
lavaanPlot(model = fit.adj.mitml.veg,
node_options = list(shape = "box", fontname = "Helvetica"), edge_options = list(color = "grey"), coefs = TRUE)
|
35eeace36118cdd9250f64030e5b441dada348cb
|
1bc432bfe35cf98ca3e073afee76dc2b0cd4b77f
|
/tests/testthat/test-mathematics.R
|
3859928fdaa6f9a9557bc2e8700f0c55a9f0f346
|
[
"MIT"
] |
permissive
|
jessesadler/debkeepr
|
36dfcb533d0d25eed78077145627b6dd0acc22cc
|
64688727177298f80bb7ab3824ac01e1e74635db
|
refs/heads/main
| 2023-04-07T19:32:49.529952
| 2023-03-27T15:35:55
| 2023-03-27T15:35:55
| 135,849,064
| 7
| 1
|
NOASSERTION
| 2022-05-23T21:26:07
| 2018-06-02T20:13:08
|
R
|
UTF-8
|
R
| false
| false
| 8,034
|
r
|
test-mathematics.R
|
## Test mathematical functions ##
# vec_math ----------------------------------------------------------------
test_that("vec_math has error message for unimplemented functions", {
expect_snapshot_error(prod(deb_lsd(1, 16, 9), deb_lsd(1, 16, 9)))
expect_snapshot_error(sin(deb_lsd(1, 16, 9)))
expect_snapshot_error(prod(deb_tetra(1, 16, 9, 3), deb_tetra(1, 16, 9, 3)))
})
# Math group --------------------------------------------------------------
test_that("sum and mean with deb_lsd work", {
expect_equal(sum(deb_lsd(1, 16, 9), deb_lsd(5, 6, 8)), deb_lsd(7, 3, 5))
expect_equal(mean(deb_lsd(c(1, 5), c(42, 30), c(13, 15), c(50, 16))),
deb_lsd(3, 36, 14, c(50, 16)))
# NA
expect_equal(sum(deb_lsd(c(1, NA), c(2, NA), c(3, NA))),
deb_lsd(NA, NA, NA))
expect_equal(sum(deb_lsd(c(1, NA), c(2, NA), c(3, NA)), na.rm = TRUE),
deb_lsd(1, 2, 3))
expect_equal(mean(deb_lsd(c(1, NA), c(2, NA), c(3, NA)), na.rm = TRUE),
deb_lsd(1, 2, 3))
# Mean only takes first object
expect_equal(mean(deb_lsd(1, 16, 9), deb_lsd(5, 6, 8)), deb_lsd(1, 16, 9))
# Error with different bases
expect_snapshot(error = TRUE,
sum(deb_lsd(1, 16, 9), deb_lsd(1, 16, 9, c(50, 16))))
})
test_that("sum and mean work with deb_decimal", {
# Data
x <- deb_decimal(c(36.8125, 112.3125, 72.375, 48.5625),
unit = "s", bases = c(20, 12, 4))
expect_equal(sum(deb_decimal(1.8375), deb_decimal(1.5)), deb_decimal(3.3375))
expect_equal(sum(deb_decimal(36.75, unit = "s"), deb_decimal(20, "s")),
deb_decimal(56.75, "s"))
expect_equal(sum(deb_decimal(c(1.8375, NA, 5.225, 3.2875, 1.1125)),
na.rm = TRUE),
deb_decimal(11.4625))
expect_equal(mean(deb_decimal(c(1.8375, NA, 5.225, 3.2875, 1.1125)),
na.rm = TRUE),
deb_decimal(2.865625))
# Different units work
expect_equal(sum(deb_decimal(1.8375), deb_decimal(36.75, unit = "s")),
deb_decimal(3.675))
# Tetra bases
expect_equal(sum(x), deb_decimal(270.0625, "s", c(20, 12, 4)))
expect_equal(sum(deb_decimal(1.840625, bases = c(20, 12, 4)),
deb_decimal(1767, unit = "f", bases = c(20, 12, 4))),
deb_decimal(3.68125, bases = c(20, 12, 4)))
# Mixed bases
expect_equal(sum(x, deb_decimal(1.8375)), deb_decimal(15.340625))
# Errors
expect_snapshot(error = TRUE,
sum(deb_decimal(1.8375), deb_decimal(1.8375, bases = c(50, 16))))
expect_snapshot(error = TRUE,
sum(deb_decimal(1.8375), deb_decimal(1.8375, bases = c(50, 16, 8))))
})
test_that("sum and mean with deb_tetra work", {
# Data
x <- deb_tetra(c(1, 5), c(10, 36), c(3, 8), c(4, 6),
bases = c(50, 16, 8))
expect_equal(sum(deb_tetra(1, 16, 9, 3), deb_tetra(5, 12, 3, 3)),
deb_tetra(7, 9, 1, 2))
expect_equal(sum(x), deb_tetra(6, 46, 12, 2, c(50, 16, 8)))
expect_equal(mean(x), deb_tetra(3, 23, 6, 1, c(50, 16, 8)))
# NA
expect_equal(sum(deb_tetra(c(1, NA), c(16, NA), c(9, NA), c(3, NA))),
deb_tetra(NA, NA, NA, NA))
expect_equal(sum(deb_tetra(c(1, NA), c(16, NA), c(9, NA), c(3, NA)),
na.rm = TRUE),
deb_tetra(1, 16, 9, 3))
expect_equal(mean(deb_tetra(c(1, NA), c(16, NA), c(9, NA), c(3, NA)),
na.rm = TRUE),
deb_tetra(1, 16, 9, 3))
# Error with different bases
expect_snapshot(sum(deb_tetra(1, 16, 9, 3), x), error = TRUE)
})
test_that("sum works with deb-style vectors and numeric", {
expect_equal(sum(deb_lsd(5, 6, 8), 1.8375), deb_lsd(7, 3, 5))
expect_equal(sum(deb_decimal(1.8375), 3.5), deb_decimal(5.3375))
expect_equal(sum(deb_tetra(1, 16, 9, 3), 4.5), deb_tetra(6, 6, 9, 3))
})
test_that("sum works between deb-style vectors", {
# Bases and data
lsd <- deb_lsd(1, 16, 9)
tetra <- deb_tetra(1, 16, 9, 3)
# lsd and lsd decimal -> lsd
expect_equal(sum(lsd, deb_decimal(1.8375)), deb_lsd(3, 13, 6))
# lsd and tetra decimal -> lsd
expect_equal(sum(lsd, deb_decimal(36.8125, "s", c(20, 12, 4))),
deb_lsd(3, 13, 6.75))
# lsd and tetra -> lsd
expect_equal(sum(lsd, tetra), deb_lsd(3, 13, 6.75))
# tetra and tetra decimal -> tetra
expect_equal(sum(tetra, deb_decimal(36.8125, "s", c(20, 12, 4))),
deb_tetra(3, 13, 7, 2))
# tetra and lsd decimal -> tetra
expect_equal(sum(tetra, deb_decimal(36.75, unit = "s")),
deb_tetra(3, 13, 6, 3))
# All three -> lsd
expect_equal(sum(lsd, deb_decimal(36.75, unit = "s"), tetra),
deb_lsd(5, 10, 3.75))
# Errors
expect_snapshot(error = TRUE,
sum(lsd, deb_decimal(1.8375, bases = c(50, 16))))
expect_snapshot(error = TRUE,
sum(lsd, deb_tetra(1, 16, 9, 3, c(50, 16, 8))))
})
test_that("cumulative functions work", {
lsd <- deb_lsd(c(2, 3, 1), c(2, 3, 1), c(2, 3, 1))
dec <- deb_decimal(c(2, 3, 1))
tetra <- deb_tetra(c(2, 3, 1), c(2, 3, 1), c(2, 3, 1), c(2, 3, 1))
# cumsum
expect_equal(cumsum(deb_lsd(rep(1, 5), rep(1, 5), rep(1, 5))),
deb_lsd(1:5, 1:5, 1:5))
expect_equal(cumsum(deb_decimal(c(1, 2, 3))), deb_decimal(c(1, 3, 6)))
expect_equal(cumsum(deb_tetra(rep(1, 3), rep(1, 3), rep(1, 3), rep(1, 3))),
deb_tetra(1:3, 1:3, 1:3, 1:3))
# cummin
expect_equal(cummin(lsd), c(lsd[[1]], lsd[[1]], lsd[[3]]))
expect_equal(cummin(dec), c(dec[[1]], dec[[1]], dec[[3]]))
expect_equal(cummin(tetra), c(tetra[[1]], tetra[[1]], tetra[[3]]))
# cummax
expect_equal(cummax(lsd), c(lsd[[1]], lsd[[2]], lsd[[2]]))
expect_equal(cummax(dec), c(dec[[1]], dec[[2]], dec[[2]]))
expect_equal(cummax(tetra), c(tetra[[1]], tetra[[2]], tetra[[2]]))
})
test_that("finite, infinite and NaN checks work with deb_lsd and deb_tetra", {
# Data
lsd <- deb_lsd(c(NA, 1), c(NA, 1), c(NA, 1))
tetra <- deb_tetra(c(NA, 1), c(NA, 1), c(NA, 1), c(NA, 1))
# lsd
expect_equal(is.finite(lsd), c(FALSE, TRUE))
expect_equal(is.infinite(lsd), c(FALSE, FALSE))
expect_equal(is.nan(lsd), c(FALSE, FALSE))
# tetra
expect_equal(is.finite(tetra), c(FALSE, TRUE))
expect_equal(is.infinite(tetra), c(FALSE, FALSE))
expect_equal(is.nan(tetra), c(FALSE, FALSE))
})
# Round family with deb_lsd -----------------------------------------------
test_that("round family works with deb_lsd and deb_tetra", {
# Data
lsd <- deb_lsd(5, 19, 11.8755)
tetra <- deb_tetra(5, 19, 11, 3.8755)
# round
expect_equal(round(lsd), deb_lsd(6, 0, 0))
expect_equal(round(-lsd), deb_lsd(-6, 0, 0))
expect_equal(round(deb_lsd(5, 49, 15.6, c(50, 16))),
deb_lsd(6, 0, 0, c(50, 16)))
expect_equal(round(lsd, 3), deb_lsd(5, 19, 11.876))
expect_equal(round(deb_lsd(2, 3.3, 2.2)),
round(deb_normalize(deb_lsd(2, 3.3, 2.2))))
expect_equal(round(tetra), deb_tetra(6, 0, 0, 0))
expect_equal(round(tetra, 2), deb_tetra(5, 19, 11, 3.88))
# signif
expect_equal(signif(lsd, 3), deb_lsd(5, 19, 11.9))
expect_equal(signif(tetra, 3), deb_tetra(5, 19, 11, 3.88))
# ceiling
expect_equal(ceiling(lsd), deb_lsd(6, 0, 0))
expect_equal(ceiling(-lsd), deb_lsd(-5, -19, -11))
expect_equal(ceiling(tetra), deb_tetra(6, 0, 0, 0))
expect_equal(ceiling(-tetra), deb_tetra(-5, -19, -11, -3))
# floor
expect_equal(floor(lsd), deb_lsd(5, 19, 11))
expect_equal(floor(-lsd), deb_lsd(-6, 0, 0))
expect_equal(floor(tetra), deb_tetra(5, 19, 11, 3))
expect_equal(floor(-tetra), deb_tetra(-6, 0, 0, 0))
# trunc
expect_equal(trunc(lsd), deb_lsd(5, 19, 11))
expect_equal(trunc(deb_lsd(2, 3.3, 2.2)),
trunc(deb_normalize(deb_lsd(2, 3.3, 2.2))))
expect_equal(trunc(tetra), deb_tetra(5, 19, 11, 3))
})
test_that("abs works with deb_lsd vectors", {
expect_equal(abs(deb_lsd(1, 0, 0)), deb_lsd(1, 0, 0))
expect_equal(abs(deb_lsd(-1, 0, 0)), deb_lsd(1, 0, 0))
expect_equal(abs(deb_tetra(1, 0, 0, 0)), deb_tetra(1, 0, 0, 0))
expect_equal(abs(deb_tetra(-1, 0, 0, 0)), deb_tetra(1, 0, 0, 0))
})
|
82fff63ea12986c4dce076caf25ec3cb09cfe518
|
e82499ae1306bb762d27b03145aa187bae183e92
|
/HW12_5.R
|
28621d1509b9ecbbad047ec117b612a5ece516b7
|
[] |
no_license
|
immmjack/R-Programming
|
0db720c53d78a05ca83ca94dec488acaba79a938
|
0682ee02082406cdf5247c27b442cd57f8b30e3f
|
refs/heads/master
| 2021-05-21T16:49:14.852113
| 2020-08-26T08:02:23
| 2020-08-26T08:02:23
| 252,723,569
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 355
|
r
|
HW12_5.R
|
# Set Up
n = 7
sample = 10000
svar = numeric(sample)
pvar = numeric(sample)
statistic = numeric(sample)
#Generate Random Samples
for (i in 1 : 10000) {
result = rnorm(7, 4, sqrt(6))
svar[i] = var(result)
statistic[i] = (n - 1) * svar[i] / 6
}
d = density(statistic)
plot(d)
curve(dchisq(x, df = 6), from = 0, to = 30, col = "red", add = T)
|
1f4a9cb1c4b084d37c901f0d8565e22507a1efc1
|
be14bddbcca022e640f67b2225e4cfabb785cac5
|
/Practical 4.R
|
e99a4984b8af609030f4af06fb5f306b7acb10af
|
[] |
no_license
|
SamuelKWLi/Practical_4
|
fe559636d29222c380368abd94071cdc66916b65
|
8807248b819ed845cf081a62badf39b534a63f36
|
refs/heads/master
| 2020-08-26T14:18:59.461980
| 2019-10-23T19:59:33
| 2019-10-23T19:59:33
| 217,038,400
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,194
|
r
|
Practical 4.R
|
### Using RStudio with Git ###
# There are three methods of using Git with RStudio.
# 1. Set up the GitHub repository, clone it to your Git then load it in RStudio — using Git GUI
# 2. Create a new RStudio project and link it to GitHub — new version control
# 3. If you have an existing RProject then you can link them manually — existing project
# All of these methods first requires you create a new repo or have an emty existing one on GitHub.
# Go onto GitHub online and create a new repository, giving it a name and ensuring it is set to public.
# While Github recommends you include a Readme file, it is vital the repo is empty for the third method discussed here so tend to avoid it.
## Method 1 - Using the Git GUI ##
# Using this method creates a local copy of your repo on your computer through the Git GUI.
# First get the URL for your repo from Github by clicking on your repo and using the 'clone or download' button to create the URL.
# The open the Git GUI and select the ‘Clone Existing Repository’ option.
# You can then paste the repo URL into the 'Source Location' box to link your online repo.
# Then browse for where you want to put this local clone. Beware that you need to create a new folder for this through the Git GUI by selecting an existing folder and adding the new folder name to the end of the address.
# To link this with a new R project, open R and create a new project.
# Select the 'Existing Directory' option and select the new local repo file you created.
## Method 2 - Create a new Version Control in RStudio ##
# This is a simler method but requires that you are starting a completely new project.
# First get the URL for your repo from Github by clicking on your repo and using the 'clone or download' button to create the URL.
# To link this with a new R project, open R and create a new project.
# Select the 'Version Control' option and select the 'Git' option.
# You can then paste the repo URL into the 'Source Location' box to link your online repo.
# Then browse for where you want to put this local clone. Beware that you need to create a new folder for this through the Git GUI by selecting an existing folder and adding the new folder name to the end of the address.
## Method 3 - Using an Existing Project ##
# This method assumes you have an existing project that you want to save.
# First go to 'Tools', ' Global Options', and 'Git/SVN'.
# Ensure the top tick box is ticked and that R knows where the 'git.exe' is located.
# Then go to the 'Tools', 'Project Options' and select 'Git' from the drop down box.
# This will produce a git window on the top right pane and a git button on the top bar that can be used to manipulate git.
# Then to connect this local Git repo with the online Github repo you created, you need to access the shell through 'Tools' and 'Shell'.
# type 'git remote -v' into the command line and it should return with your Github repository URL, if not you need to connect it.
# to connect it you must type 'git remote add origin *********' where the * symbols are the Github repo URL which you copy and paste in.
# Then check if RStudio is tracking your Origin/Master branch from Github by entering 'git branch -vv'.
# If the response does not have 'origin/master', it is not tracking your main Github repo branch so you must change it.
# To change it enter 'git push --set-upstream origin master' and check it again using the above command. The result should have 'origin/master' in it.
## Committing to Git ##
# When ever you save your files, you are saving a copy of it to your local work directory.
# Git allows you to commit your files, creating a save point and allowing you to keep track of changes you make through saves.
# Before commiting, files need to be staged, which is the checking of differences in the files from the last commit so it is ready to be commited again.
# A commit only saves these save points to your local repo and not the the online Github repo.
# For the Commit window, go to the Git button on the top bar and select 'Commit', or click the commit button int eh git tab on the top right pane.
# The top left pane of this new window will have a list of all the files in the local repo that has been changed since the last commit.
# First stage the files that you want to commit by ticking their check box, or clicking the stage button to select all of them.
# Then Write a commit note in the top right pane and click the commit button.
# You can see what changes have been made for the selected file on the bottom pane int eh commit window.
## Pushing to Github ###
# Commiting only saves your changes to the local repo. if you want to upload these changes to your online repo on Github, you must push it.
# To do this, ensure all of your desired files have been commited and press the push button (Green up arrow) on either the commit window or the Git tab on the top right pane.
# If the push button is greyed out, you must check if Rstudio is connected to Github as discussed above in Method 3.
## Pulling from Github ##
# You can also make changes to your files from the online Github repo, or have other people make changes to them.
# To take these changes and apply them to your own local files, you must pull them from Github.
# To pull, press the pull button (blue down arrow) from either the commit window or the Git tab in the top right pane.
# The changed files will be downloaded into your local repo and replace them, updating your project with these new changes.
### Fork a Repository ###
# A fork is a copy or offshoot of someone elses or your own master repository.
# These forks can be used as a base from which you build your own code or with which you use to edit and fix someone elses code.
# You can then submit a pull request to the original who can choose to accept it and have it incorporated into their original.
## Branches ##
# For your repositories, you can creat branches that isolate development away from your master repo and other branches. It is like a test environment.
# You could make edits to your master repo and save it as another branch, or you can save your master repo as a branch before editing and edit the new branch.
# Any commits made on this new branch are seperate from the master repo.
# To send the eddited branch to the original author and repo, send an new pull request using the associated button.
# Select the base repository you want to send the request to from the top of the new window
# A comparison of the two branches are found at the bottom.
# You can then send the pull request.
# If you recieve a pull request It will appear on your pull request tab.
# You can choose to merge, squash and merge or rebase and merge from the drop down menu.
# Squash and merge squashes all of the commits in the editted branch into a single commit in the master repo.
### RMarkdown ###
# RMarkdown is a format that allows for the display of code, explanations and results within the same document.
# It is a very versitiles format that can be converted to many other usefuls formats like html for webpages, word documents, PDFs, blogs, books.
# It can also work with a variety of different code like R, Python and others. It can be designated by typeing the code type into the {} at the top of a code block.
# You need the retculate package to run python software.
install.packages("reticulate")
library(reticulate)
# You can now create a new Markdown document.
# There are two types, RMarkdown and R Notebook. There is little difference so we can just use the RMarkdown format.
# The R Notebook format allows for a preview using the 'output: html_notebook' line in the header area that can be added to an RMarkdown format if you want. This function is replaced by the knit button on the top bar for RMarkdown.
# You can create a new RMarkdown document in R by going to 'File', 'New File' and 'RMarkdown.
# You can also change the format of an existing file using the drop down menu on the bottom right corner of the script pane.
# To run the whole document, press the run button on the top right of the script pane. It has a green bar on the right that shows how where the code is currently running and red if an error.
# Click the 'Knit' button to run the RMarkdown document in its entirety, and creates a copy of the result as the format that is identified by the header shown by ---, within witch YAML information is written to format the document.
# When using code block you can edit how data is treated using the cog button on the right. You can have the code be shown or just the output etc.
# The play symbol facing downwards runs all of the code upto the top of your selected code block.
# The play button runs your code block.
## HTML ##
# To run code in a HTML file, you need to insert a code block which can be found in the insert button on the top right of the script pane.
# When using code chunk you can edit how data is treated using the cog button on the right. You can have the code be shown or just the output etc.
# The play symbol facing downwards runs all of the code upto the top of your selected code chunk.
# The play button runs your code chunk.
# Make sure the YAML information in the header area is set to format the document into HTML when it is knitted.
## Flexdashboard ##
# A dashboard is a format for focussing on interactive visualisations with some complimentary text or code.
# Flexdashboard is a package that is used to knit the RMarkdown format into a dashboard.
install.packages("flexdashboard")
library(flexdashboard)
# Make sure the relevent Felxdashboard YAML information is placed in the header area at the top of the document.
# To add visualisation parameters like column width, name the thing you want to edit (column) and enter the parameter intide {}. These parameters will be applied to the section bellow it.
# Once the visualisation parameters are entered, it must be underlined with a long string of --------------- that is long enough to turn from black to blue. This should be placed below any visualisation paramters like those above to identify them as being such parameters.
# The visualisation parameters will apply to all of the next sections until it is meets another set of parameters and a line of -------------------.
# To denote a section , you must use ### with the section name written after, turning the ### blue.
# Sections apply until the next set of ### and name.
# Remember that most of this is just for the flexdashboard format.
# To add text, just type information normally outside of a code chunk.
# To add code, just use the same method as for HTML and using the 'insert' button.
# However, the flexdashboard format does not normally show code in its final result.
# One method to show code in the felxdashboard format, is to enter 'echo=TRUE' in the {} brackets at the top of each desired code chunck which determines the chuncks code type.
# Another method that causes all code in the whole document to be shown is to enter 'knitr::opts_chunk$set(echo=TRUE)' within the first code chunk of the document, not within the {}.
## Word Document ##
# To set the format to a word document, change the YAML in the header area into the necessary information.
# Some functions may be limited to HTML, resulting in an error if trying to format the RMarkdown file into a word document.
# To force the convertion anyway, enter 'always_allow_html: true' in the YAML information in the header area. Ensure it has no spaces in front of it and is line with the other YAML information.
# The Word document format may also not print visualisations of data.
### Adding References ###
# Mendeley is a free reference manager where you link pdfs of your articles and books.
# You can then enter the necessary data about the articles and books so it can create a reference in your desired format.
# To link this library of references to R and RMarkdown documents, you need to create a BibTex file.
# A bibtex file can store all of your references in a single file, and if syncing is enabled, will be updated as you change things in the Mendelay software.
# To create it go to the Mendelay software and select 'Tools' > 'Options' > 'BibTex'.
# From this window, you can choose to Escape LaTex special characters (does not check the fields which may cause error), enable BibTex syncing to keep it updated, and choose if the file is for your whole library or a single section.
# When you create the file, it must be in the folder where the R project file is so it can be seen by R.
# There are many optional structural parameters that can be added and controlled using the YAML information in the header area. These include adding and formating Tables of contents, numbered sections, bibliography etc.
# Make sure you include the bibliography information and the Biblex file in the YMAL information in the header area, otherwise the citations will not work as it cannot find your reference file.
# To cite a document in RMarkdown, you need to enter the citation key which is [@author(s)year;@author(s)year], and can be found within Mendeley.
# Another way of getting the citation key is to use the citr package.
install.packages("citr")
library(citr)
# This tool can be accessed by going to the 'Addins' button on the top bar and selecting citr.
# Select the desired documents and the citation keys will be retrieved, which can be inserted into the RMarkdown document.
# Make sure citations are not placed inside code chuncks and are only placed where normal text is.
# Referenced documents will have their full reference appear in a complete bibliography within the last section of the RMarkdown document. You can create an empty last section for this bibliography.
### Packrat ###
# Packrat is a useful tool that is used to store all of your loaded packages into a single folder within the project.
# This improves the sharing of projects as people can use your Packrat to get all of the packages you used and with the same versions, hopefully allowing them to run your code without issue.
# This can be accessed in R using 'Tools', 'Project Options' or by useing the Packrat button at the top of the packages tab in the bottom right pane.
|
6720e84c6684e19310a3f86bb2056db8c09ca707
|
47d916b22c5880a6f02cdefc8eb62ce63390564d
|
/myCBD/myFunctions/make_rank_CAUSE_chart.R
|
f8b2169e4ee9a4a6491f035f385bbd2bb7121b4f
|
[] |
no_license
|
sharygin/CACommunityBurden
|
44d0626d49bb43714813d4325c6faefe63bcc7e0
|
b94870c60decd83733b959339131c63f00439e16
|
refs/heads/master
| 2021-06-24T11:07:03.066910
| 2020-02-28T01:10:01
| 2020-02-28T01:10:01
| 123,367,400
| 0
| 0
| null | 2018-03-01T01:55:08
| 2018-03-01T01:55:08
| null |
UTF-8
|
R
| false
| false
| 3,733
|
r
|
make_rank_CAUSE_chart.R
|
rankCause <- function(myLHJ="Amador",myMeasure = "aRate",myYear=2017,mySex="Total",myLev="lev1",myN=10) {
myCex <- 1.6
myCol <- "blue" #mycol <- rep("blue",nrow(filtered.df))
bLwd <- 2
filtered.df <- filter(datCounty,county==myLHJ,year==myYear,sex==mySex,Level %in% myLev,CAUSE !=0)
filtered.df <- filtered.df[order( filtered.df[,myMeasure],na.last = FALSE),]
if (myMeasure=="mean.age"){
filtered.df <- filtered.df[order( filtered.df[,myMeasure],na.last = NA,decreasing=TRUE),]
}
Nrows.df <- nrow(filtered.df)
Nrows.to.display <- min(Nrows.df,myN)
filtered.df <- filtered.df[((Nrows.df-Nrows.to.display+1):Nrows.df),]
layout(matrix(c(1,1,1,2,3,4,5),1,7,byrow=TRUE))
par(mar=c(5,25,0,0),oma = c(3, 0, 3, 0))
t.plot <- barplot( filtered.df$Ndeaths,
xlab = "Deaths (n)",
col = myCol, horiz = TRUE, space = .3, cex.lab = myCex,
xlim = c(0,1.04*max(filtered.df$Ndeaths,na.rm=TRUE)))
grid(nx=NULL,ny=NA,lty=1)
t.plot <- barplot( filtered.df$Ndeaths,add=TRUE,
xlab = "Deaths (n)",
col = myCol, horiz = TRUE, space = .3, cex.lab = myCex,
xlim = c(0,1.04*max(filtered.df$Ndeaths,na.rm=TRUE)))
box(lwd=bLwd)
t.label <- fullCauseList[match(filtered.df$CAUSE,fullCauseList[,"LABEL"]),"nameOnly"]
wr.lap <- wrap.labels(t.label ,30)
axis(side=2,at=t.plot,labels=wr.lap,las=2,cex.axis=1.8)
par(mar=c(5,0,0,0))
barplot(filtered.df$YLLper,
xlab="YLL per 100K pop",
col=myCol,horiz=TRUE,space=.3,cex.lab=myCex,
xlim=c(0,1.04*max(filtered.df$YLLper,na.rm=T)))
grid(nx=NULL,ny=NA,lty=1)
barplot(filtered.df$YLLper,add=TRUE,
xlab="YLL per 100K pop",
col=myCol,horiz=TRUE,space=.3,cex.lab=myCex,
xlim=c(0,1.04*max(filtered.df$YLLper,na.rm=T)))
box(lwd=bLwd)
barplot(filtered.df$aRate,
xlab="Age-Adjusted Rate",
col=myCol,horiz=TRUE,space=.3,cex.lab=myCex,
xlim=c(0,1.04*max(filtered.df$aRate,na.rm=T)))
grid(nx=NULL,ny=NA,lty=1)
barplot(filtered.df$aRate,add=TRUE,
xlab="Age-Adjusted Rate",
col=myCol,horiz=TRUE,space=.3,cex.lab=myCex,
xlim=c(0,1.04*max(filtered.df$aRate,na.rm=T)))
box(lwd=bLwd)
barplot(filtered.df$mean.age,
xlab="Mean Age",
col=myCol,horiz=TRUE,space=.3,cex.lab=myCex,
xlim=c(0,1.04*max(filtered.df$mean.age,na.rm=T)))
grid(nx=NULL,ny=NA,lty=1)
barplot(filtered.df$mean.age, add=TRUE,
xlab="Mean Age",
col=myCol,horiz=TRUE,space=.3,cex.lab=myCex,
xlim=c(0,1.04*max(filtered.df$mean.age,na.rm=T)))
box(lwd=bLwd)
if (myLHJ != "CALIFORNIA") {
t.plot <- barplot((filtered.df$SMR),
xlab="Stnd. Mortaility Ratio",
col=myCol,horiz=TRUE,space=.3,cex.lab=myCex,
xlim=c(0,1.04*max(filtered.df$SMR,na.rm=T)))
grid(nx=NULL,ny=NA,lty=1)
t.plot <- barplot((filtered.df$SMR),add=TRUE,
xlab="Stnd. Mortaility Ratio",
col=myCol,horiz=TRUE,space=.3,cex.lab=myCex,
xlim=c(0,1.04*max(filtered.df$SMR,na.rm=T)))
box(lwd=bLwd)
abline(v=0.8,col="green")
abline(v=1,col="gray")
abline(v=1.2,col="red")
text(1,.1,"state rate",srt=90,col="black",cex=1.3,adj=c(0,.5))
}
sexLab <- ""
if (mySex != "Total") sexLab <- paste0(", among ",mySex,"s")
mtext(paste0("Measures by Cause in ",myYear," in ",myLHJ,sexLab),outer = TRUE,cex=1.6,line=1,font=2)
mtext(figureAttribution,side=1,outer = TRUE,line=2)
}
if(1==2){
myLHJ="Amador"
myMeasure = "aRate"
myYear=2017
mySex="Total"
myLev="lev1"
myN=10
}
|
c69be25be42ab6af3f1a8df009937e28fe59b715
|
b7b24fcfb5fdad84898a4cbadf22e96199d837b3
|
/man/BitwiseValue.Rd
|
72ce16d96032324b143e1a93982b35b1c7b791eb
|
[] |
no_license
|
omegahat/RAutoGenRunTime
|
4de1f4095cebf80489e2836f613c0e6fa9c1496c
|
efda3017fe1945f9420c094471a902b9a64e55db
|
refs/heads/master
| 2023-01-06T14:34:11.880956
| 2023-01-01T02:03:32
| 2023-01-01T02:03:32
| 4,004,743
| 2
| 2
| null | 2017-07-04T19:48:57
| 2012-04-12T13:07:57
|
R
|
UTF-8
|
R
| false
| false
| 2,349
|
rd
|
BitwiseValue.Rd
|
\name{BitwiseValue}
%\alias{SymbolicConstant-class}
%\alias{show,SymbolicConstant-method}
\alias{BitwiseValue}
%\alias{EnumerationValue}
%\alias{asEnumValue}
\alias{asBitwiseValue}
\alias{BitwiseValue-class}
%\alias{EnumerationValue-class}
%\alias{EnumValue-class}
%\alias{EnumDef-class}
%\alias{show,EnumDef-method}
\alias{[,EnumDef,ANY,ANY-method}
\alias{&}
\alias{|}
\alias{c}
\alias{&,BitwiseValue,BitwiseValue-method}
\alias{|,BitwiseValue,BitwiseValue-method}
\alias{c,BitwiseValue-method}
\alias{coerce,BitwiseValue,numeric-method}
\title{Create enumeration value}
\description{
These functions and related classes and methods are used to work
}
\usage{
BitwiseValue(val, name = names(val),
class = if (is(val, "BitwiseValue"))
class(val)
else "BitwiseValue",
asVector = TRUE, S3 = FALSE,
defValues = getEnumValues(class, name))
}
\arguments{
\item{val}{the value which is to be converted to the appropriate
enumerated class. This can be a numeric value or vector, or a
character vector giving the names of the symbolic constants within
the enumeration set.}
\item{name}{the symbolic name(s) of the value(s) in \code{val}. This
is the symbolic names from the enumeration set.}
\item{class}{the name of the R class corresponding to the specific
enumeration type. }
\item{asVector}{a logical value controlling the case where there is
more than one value in \code{val} and how we return it.
We can process each value separately, or we can process them as a
collection of values to be OR'ed together.
If this is \code{TRUE}, then we OR the values together.
Otherwise, we treat each value separtely and reutrn a list of these
processed values.
}
\item{S3}{currently ignored. a logical value. This is no longer used
as we use S4 classes for these enumeration types}
\item{defValues}{the named vector of symbolic name and value pairs
that define the enumeration collection. We try to find this
based on the name of the class of \code{val} or \code{class}
or from the names of the variables
in \code{val} if this is a string.
}
}
\value{
}
\references{
}
\author{
Duncan Temple Lang
}
\seealso{
}
\examples{
# See tests/bits.R
}
\keyword{programming}
\keyword{interface}
|
9dd9807f17dcaef0bbb902c17572b1424fecd40f
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/samplingVarEst/R/VE_EB_HT_Mean_Hajek.r
|
36c523f1729451098f2b86184ab644253870784d
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 206
|
r
|
VE_EB_HT_Mean_Hajek.r
|
VE.EB.HT.Mean.Hajek <- function(VecY.s, VecPk.s, MatPkl.s, VecAlpha.s = rep(1, times= length(VecPk.s)))
{
VE.EB.HT.Ratio(VecY.s, as.double(rep(1, times= length(VecY.s))), VecPk.s, MatPkl.s, VecAlpha.s)
}
|
da383772f4602c3f1e1bd666d6d20b54166e03bd
|
52e69e98f60491914cb0d09f49474c3849472ea5
|
/R/numbers.R
|
ae02e9bd954a1b04e3a152354cef30a45857fb36
|
[
"MIT"
] |
permissive
|
mhatrep/doseminer
|
ee3029a14f8f7f7f8763cd1902c987b75636ccdc
|
9ae88dd291128d089e10dd370679625489ddc320
|
refs/heads/master
| 2023-06-17T15:58:08.391125
| 2021-07-19T10:28:44
| 2021-07-19T10:28:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,487
|
r
|
numbers.R
|
#' Regular expression to match numbers in English
#'
#' A regex pattern to identify natural language English number phrases, such as
#' "one hundred and fifty" or "thirty-seven". Used internally by
#' \code{\link{replace_numbers}} to identify substrings to replace with their
#' decimal representation.
#'
#' This is a PCRE (Perl type) regular expression, so it must be used with
#' \code{perl = TRUE} in base R regex functions. The packages \code{stringr}
#' and \code{stringi} are based on the alternative ICU regular expression
#' engine, so they cannot use this pattern.
#'
#' @note
#' There is limited support for fractional expressions like "one half".
#' The original pattern did not support expressions like "a thousand", but
#' it has been adapted to offer (experimental) support for this.
#' Phrases like "million" or "thousand" with no prefix will \emph{not} match.
#'
#' @source \url{https://www.rexegg.com/regex-trick-numbers-in-english.html}
regex_numbers <- "(?x) # free-spacing mode
(?(DEFINE)
# Within this DEFINE block, we'll define many subroutines
# They build on each other like lego until we can define
# a 'big number'
(?<one_to_9>
# The basic regex:
# one|two|three|four|five|six|seven|eight|nine
# We'll use an optimized version:
# Option 1: four|eight|(?:fiv|(?:ni|o)n)e|t(?:wo|hree)|
# s(?:ix|even)
# Option 2:
(?:f(?:ive|our)|s(?:even|ix)|t(?:hree|wo)|(?:ni|o)ne|eight)
) # end one_to_9 definition
(?<ten_to_19>
# The basic regex:
# ten|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|
# eighteen|nineteen
# We'll use an optimized version:
# Option 1: twelve|(?:(?:elev|t)e|(?:fif|eigh|nine|(?:thi|fou)r|
# s(?:ix|even))tee)n
# Option 2:
(?:(?:(?:s(?:even|ix)|f(?:our|if)|nine)te|e(?:ighte|lev))en|
t(?:(?:hirte)?en|welve))
) # end ten_to_19 definition
(?<two_digit_prefix>
# The basic regex:
# twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety
# We'll use an optimized version:
# Option 1: (?:fif|six|eigh|nine|(?:tw|sev)en|(?:thi|fo)r)ty
# Option 2:
(?:s(?:even|ix)|t(?:hir|wen)|f(?:if|or)|eigh|nine)ty
) # end two_digit_prefix definition
(?<one_to_99>
(?&two_digit_prefix)(?:[- ](?&one_to_9))?|(?&ten_to_19)|
(?&one_to_9)
) # end one_to_99 definition
(?<one_to_999>
(?:(?&one_to_9)|a)[ ]hundred(?:[ ](?:and[ ])?(?&one_to_99))?|
(?&one_to_99)
) # end one_to_999 definition
(?<one_to_999_999>
(?:(?&one_to_999)|a)[ ]thousand(?:[ ](?&one_to_999))?|
(?&one_to_999)
) # end one_to_999_999 definition
(?<one_to_999_999_999>
(?:(?&one_to_999)|a)[ ]million(?:[ ](?&one_to_999_999))?|
(?&one_to_999_999)
) # end one_to_999_999_999 definition
(?<one_to_999_999_999_999>
(?:(?&one_to_999)|a)[ ]billion(?:[ ](?&one_to_999_999_999))?|
(?&one_to_999_999_999)
) # end one_to_999_999_999_999 definition
(?<one_to_999_999_999_999_999>
(?:(?&one_to_999)|a)[ ]trillion(?:[ ](?&one_to_999_999_999_999))?|
(?&one_to_999_999_999_999)
) # end one_to_999_999_999_999_999 definition
(?<bignumber>
zero|(?&one_to_999_999_999_999_999)
) # end bignumber definition
(?<zero_to_9>
(?&one_to_9)|zero
) # end zero to 9 definition
(?<decimals>
point(?:[ ](?&zero_to_9))+
) # end decimals definition
(?<fractions>
(?:a[ ]|(?&one_to_9)[ ])?(?:\\b(?:hal(?:f|ves)|thirds?|quarters?|fifths?))
) # end fractions definition
(?<mixed_fractions>
(?:(?:(?&bignumber)|\\d+)(?:[ ])(?:and|[&])[ ])?(?&fractions)
) # end mixed fraction definition
) # End DEFINE
####### The Regex Matching Starts Here ########
(?&mixed_fractions)|(?&bignumber)(?:[ ](?&decimals))?
### Other examples of groups we could match ###
#(?&bignumber)
# (?&one_to_99)
# (?&one_to_999)
# (?&one_to_999_999)
# (?&one_to_999_999_999)
# (?&one_to_999_999_999_999)
# (?&one_to_999_999_999_999_999)"
#' Replace English number phrases with their decimal representations
#'
#' Uses \code{\link{numb_replacements}} to match parts of a string corresponding
#' to numbers, then invokes \code{\link{words2number}} to convert these
#' substrings to numeric. The rest of the string (the non-number words) is
#' left intact.
#'
#' Works on non-negative integer numbers under one billion
#' (one thousand million). Does not support fractions or decimals (yet).
#'
#' @param string A character vector. Can contain numbers and other text
#'
#' @return A character vector the same length as \code{string}, with words
#' replaced by their decimal representations.
#'
#' @examples
#' replace_numbers('Two plus two equals four')
#' replace_numbers('one hundred thousand dollars!')
#' replace_numbers(c('A vector', 'containing numbers', 'like thirty seven'))
#'
#' @seealso
#' \code{\link{words2number}}, for use on cleaned text that does not contain
#' any non-number words
#'
#' @export
replace_numbers <- function(string) {
#string <- str_remove_all(string, '(?:\\band|&)[ ]?')
matches <- gregexpr(regex_numbers, string, perl = TRUE, ignore.case = TRUE)
regmatches(string, matches) <- lapply(regmatches(string, matches), words2number)
string
}
#' Dictionary of English names of numbers
#'
#' For internal use in \code{\link{words2number}}. When passed as a replacement
#' to a function like
#' \code{\link[stringr:str_replace]{str_replace_all}}, it turns the
#' string into an arithmetic expression that can be evaluated to give an integer
#' representation of the named number.
#'
#' Lifted from Ben Marwick's \code{words2number} package and converted into
#' a named vector (previously a chain of \code{\link{gsub}} calls).
#'
#' @examples
#' \dontrun{
#' stringr::str_replace_all('one hundred and forty-two', numb_replacements)
#' }
#'
#' @note
#' Does not yet fully support decimals, fractions or mixed fractions.
#' Some limited support for 'half' expressions, e.g. 'one and a half'.
#'
#' @source \url{https://github.com/benmarwick/words2number}
numb_replacements <-
c('-' = ' ',
'eleven(?:th)?' = '+11',
'twel(?:ve|fth)' = '+12',
'thirteen(?:th)?' = '+13',
'fourteen(?:th)?' = '+14',
'fifteen(?:th)?' = '+15',
'sixteen(?:th)?' = '+16',
'seventeen(?:th)?' = '+17',
'eighteen(?:th)?' = '+18',
'nineteen(?:th)?' = '+19',
'twent(?:y|ieth)' = '+20',
'thirt(?:y|ieth)' = '+30',
'fort(?:y|ieth)' = '+40',
'fift(?:y|ieth)' = '+50',
'sixt(?:y|ieth)' = '+60',
'sevent(?:y|ieth)' = '+70',
'eight(?:y|ieth)' = '+80',
'ninet(?:y|ieth)' = '+90',
'(?:a|one) hundred(?:th)?' = '+100',
'two hundred(?:th)?' = '+200',
'three hundred(?:th)?' = '+300',
'four hundred(?:th)?' = '+400',
'five hundred(?:th)?' = '+500',
'six hundred(?:th)?' = '+600',
'seven hundred(?:th)?' = '+700',
'eight hundred(?:th)?' = '+800',
'nine hundred(?:th)?' = '+900',
'(?:\\b(?:a|one) )?half' = '+0.5',
'one|first|\\ba\\b' = '+1',
'second|two' = '+2',
'th(?:ree|ird)' = '+3',
'four(?:th)?' = '+4',
'fi(?:ve|fth)' = '+5',
'six(?:th)?' = '+6',
'seven(?:th)?' = '+7',
'eighth?' = '+8',
'nin(?:e|th)' = '+9',
'millions?' = ')*(1000000)+(0',
'thousand(?:th)?' = ')*(1000)+(0',
'hundred(?:th)?' = '+100',
'ten(?:th)?' = '+10',
'and|&' = '',
' ' = '',
'^' = '(0',
'$' = ')',
'\\(0\\(' = '',
'\\+\\+' = '\\+\\(',
'\\)\\+\\)' = '\\)',
# Finally remove any residual non-number words.
# Otherwise the generated arithmetic expression will not evaluate.
'[[:alpha:]]+' = ''
)
#' Convert English names of numbers to their numerical values
#'
#' @source Originally adapted from the
#' \href{https://github.com/benmarwick/words2number}{\code{words2number}} package by
#' Ben Marwick.
#'
#' @param txt A character vector containing names of numbers (only).
#'
#' @return A named numeric vector of the same length as \code{phrase}.
#'
#' @examples
#' words2number('seven')
#' words2number('forty-two')
#' words2number(c('three', 'one', 'twenty two thousand'))
#'
#' @importFrom stringr str_replace_all
#'
#' @export
words2number <- function(txt) {
if (length(txt) < 1)
return(txt)
if (any(lengths(txt) > 1))
stop('words2number does not work on nested lists')
if (!is.character(txt[[1]]))
stop('words2number should only be passed character-vector inputs')
expression <- stringr::str_replace_all(tolower(txt), numb_replacements)
result <- vapply(expression, function(e) eval(parse(text = e)), FUN.VALUE = 0)
setNames(result, txt)
}
#' List of Latin medical and pharmaceutical abbreviations
#'
#' A named character vector. Names represent Latin terms and values the English
#' translations. Used for converting terms like "q4h" into "every 4 hours",
#' which can then be parsed into a dosage frequency/interval.
#'
#' Use with a function like \code{\link[stringr:str_replace]{str_replace_all}}
#' to translate a prescription from Latin to English (thence to numbers).
#'
#' @source
#' \url{https://en.wikipedia.org/wiki/List_of_abbreviations_used_in_medical_prescriptions}
#'
#' @examples
#' stringr::str_replace_all('Take two tablets q4h', latin_medical_terms)
#'
#' @export
latin_medical_terms <- c(
`dieb alt` = 'every 2 days',
`alt hs` = 'every 2 hours', ## ?
`noc(?:te?)?` = 'at night',
`mane` = 'in the morning',
q8h = 'every 8 hours',
q7h = 'every 7 hours',
q6h = 'every 6 hours',
q5h = 'every 5 hours',
q4h = 'every 4 hours',
q3h = 'every 3 hours',
q2h = 'every 2 hours',
q1h = 'every 1 hour',
qhs = 'at bedtime',
qqh = 'every 4 hours',
qh = 'every 1 hour',
hs = 'bedtime',
bt = 'bedtime',
qam = 'every morning',
qpm = 'every afternoon',
`qds?` = 'daily',
q1d = 'daily',
qid = '4 / day',
qwk = 'every week',
`bds?` = 'twice daily',
bid = 'twice daily',
bis = 'twice',
biw = 'twice weekly',
`tds?` = 'thrice daily',
tiw = 'thrice weekly',
tid = 'thrice daily',
`alt h(?:or)?` = 'every 2 hours',
`alt d(?:\\b|ieb)` = 'every 2 days',
eod = 'every 2 days',
qad = 'every 2 days',
qod = 'every 2 days',
opd = '1 / day',
sid = '1 / day',
`\\bam\\b` = 'morning',
`\\bpm\\b` = 'afternoon',
`\\bom\\b` = 'every morning',
`\\bon\\b` = 'every night',
`\\bod\\b` = 'every day',
`sos|(?:si opus )?sit|siop` = 'as needed',
qs = 'as needed',
`prn|pro re nata` = 'as needed',
mdu = 'as directed',
`asd(?:ir)?` = 'as directed'
)
|
5119423776130962687256fda715dcfdd2c2bee3
|
65abe9a7747cf2470d2607b52cd28a306dfd541a
|
/man/NCmisc-internal.Rd
|
be86ac3ec1268375a7293976d0b2fff7aa2fa9d1
|
[] |
no_license
|
cran/NCmisc
|
22c73010bf15ef8b08a231f875aee74f018c6506
|
cf149ef8aaf77e6b7e3013372b90e6d23fc2980b
|
refs/heads/master
| 2022-11-04T13:34:38.299963
| 2022-10-17T08:15:22
| 2022-10-17T08:15:22
| 17,681,069
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 614
|
rd
|
NCmisc-internal.Rd
|
\name{NCmisc-internal}
\alias{get.ext}
\alias{suck.bytes}
\alias{suck.mem}
\alias{suck.cpu}
\alias{suck.num.from.txt}
\alias{divide.top.txt}
\alias{make.top.tab}
\alias{make.divisor}
\alias{display.var}
\alias{cat.path}
\alias{dir.force.slash}
\alias{file.split.windows}
\alias{is.ch}
\alias{rmv.ext}
\alias{head2}
\alias{tools_read_repositories}
\alias{check.bio}
\alias{is.col.numeric}
\alias{i_numerify}
\alias{is.missing}
\alias{se.na}
\title{Internal NCmisc Functions}
\description{
Internal NCmisc functions
}
\details{
These are not recommended/needed to be called by the user
}
\keyword{ internal }
|
b5a854e0dd9c73f436e123aef6b7aa26eba61a34
|
035229c811d57d91a11e3d43bc4b71dc58222f3b
|
/myfunctions/plot_enrich_manual.R
|
aa0a486f103ea3ba664a7943639ab647838f260a
|
[] |
no_license
|
Limour-dev/scRNA
|
d40580541e7ceef1a4ff875628a1a619e2912db7
|
834ec5c0c4323b521c96ca86db973b170831f2da
|
refs/heads/master
| 2023-06-15T13:40:41.843619
| 2021-07-17T14:44:22
| 2021-07-17T14:44:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,093
|
r
|
plot_enrich_manual.R
|
plot_enrich <- function(myego, name, width = 9, height = 10, xrot = 0,
spot.size.function = function(x) {abs(x)/2}, mybg = rownames(pros13@scale.data), nterms = 15, key.sizes = c(1, 5, 10)){
grep_data <- function(myego){
my.bg <- data.frame(matrix(NA, nrow = 0, ncol = length(myego)));
colnames(my.bg) <- names(myego);
my.or <- my.bg;
for(i in seq(length(myego))){
ego <- myego[[i]];
ipatient <- names(myego);
print(ipatient);
go <- ego[order(ego$FDR), ];
bg.dat <- -log10(ego[1:nterms, 'FDR', drop = FALSE]);
OR.dat <- ego[1:min(nterms, nrow(go)), 'OR', drop = FALSE];
rownames(bg.dat) <- rownames(OR.dat) <- go$TermName[1:nrow(bg.dat)];
my.bg[rownames(bg.dat), ipatient] <- bg.dat[, 1];
my.or[rownames(OR.dat), ipatient] <- log2(OR.dat[, 1]);
}
return(list(bg = my.bg, or = my.or))
};
to.plot <- grep_data(myego);
my.bg <- to.plot[['bg']];
my.or <- to.plot[['or']];
spot.colour.function <- function(x) {
colours <- rep("white", length(x));
colours[sign(x) == -1] <- default.colours(2, palette.type = "dotmap")[1];
colours[sign(x) == 1] <- default.colours(2, palette.type = "dotmap")[2];
return(colours);
};
dot.key <- list(
# indicate which side of the plot the legend will appear
space = "right",
points = list(
cex = spot.size.function(key.sizes),
col = spot.colour.function(key.sizes),
pch = 19
),
# dot labels
text = list(
lab = as.character(key.sizes),
cex = 1,
adj = 1.5,
fontface = "bold"
),
title = expression(underline('log'[2]*'OR')),
x = 3,
y =7
);
create.dotmap(
file = generate.filename('dotmap_go', name, 'pdf'),
x = my.or,
xaxis.cex = 1,
yaxis.cex = 1.2,
left.padding = 0,
bottom.padding = 4,
# use specified spot size and colour functions
spot.size.function = spot.size.function,
spot.colour.function = spot.colour.function,
# create a legend matching the dot sizes
key = dot.key,
key.top = 1,
xaxis.lab = gsub('JD1800|SL', '', colnames(my.bg)),
yaxis.lab = rownames(my.or),
xaxis.rot = xrot,
pch = 21,
pch.border.col = 'transparent',
# add the background
bg.data = my.bg,
# add a colourkey
colourkey = TRUE,
colour.scheme = c("white", "black"),
total.colour = 5,
bg.alpha = 1,
at = c(0, -log10(0.05), 5, 10),
colourkey.labels.at = c(0, -log10(0.05), 10, 50),
colourkey.labels = c(1, expression(0.05), expression(10^-10), expression(''<=10^-50)),
width = width,
height = height,
na.spot.size = 3,
add.grid = TRUE,
col.lwd = 1,
style = 'Nature',
col.colour = 'black',
row.colour = 'black',
);
};
|
65771fa1388fbd8889b04863fca2085a52dc30e8
|
74a2562f0688a232ac01c86df5cccb98175cac48
|
/scripts/demo_shiny_app/data/consolidate_app_data.R
|
4e31d6094fe2591233af2da930e382bab1b8eb03
|
[] |
no_license
|
briancaffey/the-rat-hack
|
e5ba881af5ccc5e23ee0602ceb1d37f1cbce235a
|
2264e5e61cdbc970b2f30fb1662e5922e0cf2a7d
|
refs/heads/master
| 2021-08-23T12:27:26.448700
| 2017-12-04T22:33:23
| 2017-12-04T22:33:23
| 112,543,090
| 1
| 0
| null | 2017-11-30T00:19:25
| 2017-11-30T00:19:25
| null |
UTF-8
|
R
| false
| false
| 2,119
|
r
|
consolidate_app_data.R
|
library(tidyverse)
setwd(getSrcDirectory(function(x){}))
# Loads the three ANC, Ward, and Census Tract datasets and consolidates them for use in the app
#####
# ANC
load("demo_shiny_app_data_anc.RData")
summarized_data = summarized_data %>%
mutate(spatial_aggregation_unit = "anc") %>%
rename(spatial_aggregation_value = anc)
total_request_data = total_request_data %>%
mutate(spatial_aggregation_unit = "anc") %>%
rename(spatial_aggregation_value = anc)
app_data = list(
anc = list(summarized_data = summarized_data,
total_request_data = total_request_data,
spatial_polygon_data = adminUnit_data,
spatial_unit_column_name = "ANC_ID",
spatial_unit_name = "ANC")
)
#####
# Census Tract
load("demo_shiny_app_data_census_tract.RData")
summarized_data = summarized_data %>%
mutate(spatial_aggregation_unit = "census_tract") %>%
rename(spatial_aggregation_value = census_tract)
total_request_data = total_request_data %>%
mutate(spatial_aggregation_unit = "census_tract") %>%
rename(spatial_aggregation_value = census_tract)
app_data$census_tract = list(summarized_data = summarized_data,
total_request_data = total_request_data,
spatial_polygon_data = census_tract_data,
spatial_unit_column_name = "TRACT",
spatial_unit_name = "Census Tract")
#####
# Ward
load("demo_shiny_app_data_ward.RData")
summarized_data = summarized_data %>%
mutate(spatial_aggregation_unit = "ward") %>%
rename(spatial_aggregation_value = ward)
total_request_data = total_request_data %>%
mutate(spatial_aggregation_unit = "ward") %>%
rename(spatial_aggregation_value = ward)
app_data$ward = list(summarized_data = summarized_data,
total_request_data = total_request_data,
spatial_polygon_data = adminUnit_data,
spatial_unit_column_name = "WARD_ID",
spatial_unit_name = "Ward")
save(app_data, service_codes_and_descriptions, file = "app_data.RData")
|
aba2d74eba78df15cdb6f8c3615cba1ce88e5203
|
80badebbbe4bd0398cd19b7c36492f5ab0e5facf
|
/man/as.SpatialPolygons.PolygonsList.Rd
|
3dfef061e05d5fd1f5466c932ca99c777c3f6120
|
[] |
no_license
|
edzer/sp
|
12012caba5cc6cf5778dfabfc846f7bf85311f05
|
0e8312edc0a2164380592c61577fe6bc825d9cd9
|
refs/heads/main
| 2023-06-21T09:36:24.101762
| 2023-06-20T19:27:01
| 2023-06-20T19:27:01
| 48,277,606
| 139
| 44
| null | 2023-08-19T09:19:39
| 2015-12-19T10:23:36
|
R
|
UTF-8
|
R
| false
| false
| 3,292
|
rd
|
as.SpatialPolygons.PolygonsList.Rd
|
\name{as.SpatialPolygons.PolygonsList}
\alias{as.SpatialPolygons.PolygonsList}
%\alias{as.SpatialPolygons.Shapes}
%\alias{as.SpatialPolygons.map}
%\alias{as.SpatialPolygons.pal}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Making SpatialPolygons objects}
\description{
This function is used in making SpatialPolygons objects from other formats.
}
\usage{
as.SpatialPolygons.PolygonsList(Srl, proj4string=CRS(as.character(NA)))
%as.SpatialPolygons.Shapes(shapes, IDs, proj4string=CRS(as.character(NA)))
%as.SpatialPolygons.map(map, IDs, proj4string=CRS(as.character(NA)))
%as.SpatialPolygons.pal(arc, pal, IDs, dropPoly1=TRUE, proj4string=CRS(as.character(NA)))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Srl}{A list of Polygons objects}
% \item{shapes}{The Shapes component of a Map object as read by \code{read.shape}}
% \item{IDs}{Unique character ID values for each output Polygons object; the input IDs can be an integer or character vector with duplicates, where the duplicates will be combined as a single output Polygons object}
\item{proj4string}{Object of class \code{"CRS"};
holding a valid proj4 string}
% \item{map}{Object returned by \code{map} containing polygon boundaries}
% \item{arc}{Object returned by \code{get.arcdata}}
% \item{pal}{Object returned by \code{get.paldata}}
% \item{dropPoly1}{Should the first polygon in the AVC or e00 data be dropped; the first polygon is typically the compound boundary of the whole dataset, and can be detected by looking at the relative lengths of the list components in the second component of pal, which are the numbers of arcs making up the boundary of each polygon}
}
\value{
The functions return a SpatialPolygons object
}
\author{ Roger Bivand }
\examples{
%\dontrun{
%library(maptools)
%nc.shp <- read.shape(system.file("shapes/sids.shp", package="maptools")[1])
%}
%data(ncshp)
%nc1 <- as.SpatialPolygons.Shapes(nc.shp$Shapes, as.character(nc.shp$att.data$FIPS))
%plot(nc1)
%invisible(text(coordinates(nc1), labels=getSpPPolygonsIDSlots(nc1), cex=0.6))
%\dontrun{
%library(maps)
%ncmap <- map("county", "north carolina", fill=TRUE, col="transparent", plot=FALSE)
%}
%data(ncmap)
%IDs <- sapply(strsplit(ncmap$names, "[,:]"), function(x) x[2])
%nc2 <- as.SpatialPolygons.map(ncmap, IDs)
%plot(nc2)
%invisible(text(coordinates(nc2), labels=getSpPPolygonsIDSlots(nc2), cex=0.6))
%\dontrun{
%library(RArcInfo)
%fl <- "http://www.census.gov/geo/cob/bdy/co/co90e00/co37_d90_e00.zip"
%download.file(fl, "co37_d90_e00.zip")
%e00 <- zip.file.extract("co37_d90.e00", "co37_d90_e00.zip")
%e00toavc(e00, "nc")
%arc <- get.arcdata(".", "nc")
%pal <- get.paldata(".", "nc")
%pat <- get.tabledata("info", "NC.PAT")
%}
%data(co37_d90_arc)
%data(co37_d90_pal)
%sapply(pal[[2]], function(x) length(x[[1]]))
%data(co37_d90_pat)
%IDs <- paste(pat$ST[-1], pat$CO[-1], sep="")
%nc3 <- as.SpatialPolygons.pal(arc, pal, IDs=IDs)
%plot(nc3)
%invisible(text(coordinates(nc3), labels=sapply(slot(nc3, "polygons"), function(i) slot(i, "ID")), cex=0.6))
grd <- GridTopology(c(1,1), c(1,1), c(10,10))
polys <- as.SpatialPolygons.GridTopology(grd)
plot(polys)
text(coordinates(polys), labels=sapply(slot(polys, "polygons"), function(i) slot(i, "ID")), cex=0.6)
}
\keyword{spatial}
|
2f4805f3c5fb2b5f74d3566dbee3c0e59ebc1ab2
|
f19ff97cd0882e267c3ce05f4cf9fcfaf6536acf
|
/BigBoost/R/testR.R
|
d0afad7049c8e111bee37c1f72728885c8b9f062
|
[] |
no_license
|
droyuki/lab-devops
|
0cc63191b5db553da96f696f3f0d8f19f8f832e7
|
357e50e8b5e0e1eb13e787d5d5217c0bc4174d9f
|
refs/heads/master
| 2016-09-06T11:06:00.561261
| 2016-01-19T08:56:31
| 2016-01-19T08:56:31
| 41,469,671
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 89
|
r
|
testR.R
|
cmdargs <- commandArgs()
write.table(cmdargs,file= "rLog.txt", sep = ",", col.names = NA)
|
b5434b4f43e78d3a9c99cd72d2c61fde4b91c30a
|
fa18ee2bcec08ba7dd950843cd3547e05eafcb16
|
/test_3.R
|
c51a2154431ecd261326dca3fc97b325f8968f52
|
[] |
no_license
|
luigidolcetti/barbieHistologist
|
6da4174defd6228e67ab22c2e226d5bee96cb018
|
4f4bbcd939257d05bac8ec785a99e30d2ca1db93
|
refs/heads/master
| 2023-07-19T01:09:14.843315
| 2021-06-14T16:50:08
| 2021-06-14T16:50:08
| 347,299,283
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,822
|
r
|
test_3.R
|
library(barbieHistologist)
CD45<-bh_defineMarker(markerName = 'cd45',
channelName = 'ch1',
patternFunction = '.pattern.random',
patternModifier = list(mean = 5, sd=5),
compartment = 'cytoplasm')
CD4<-bh_defineMarker(markerName = 'cd4',
channelName = 'ch2',
patternFunction = '.pattern.skin2',
patternModifier = list(mean = 2, sd=3,k=5,X0=0.2),
compartment = 'cytoplasm')
CD8<-bh_defineMarker(markerName = 'cd8',
channelName = 'ch3',
patternFunction = '.pattern.skin2',
patternModifier = list(mean = 4, sd=4,k=3,X0=0.3 ),
compartment = 'cytoplasm')
CD11c<-bh_defineMarker(markerName = 'cd11c',
channelName = 'ch4',
patternFunction = '.pattern.skin2',
patternModifier = list(mean = 4, sd=2,k=3,X0=0.7 ),
compartment = 'cytoplasm')
CD1000Low<-bh_defineMarker(markerName = 'cd1000',
channelName = 'ch5',
patternFunction = '.pattern.random',
patternModifier = list(mean = 2, sd=3),
compartment = 'cytoplasm')
CD1000high<-bh_defineMarker(markerName = 'cd1000',
channelName = 'ch5',
patternFunction = '.pattern.random',
patternModifier = list(mean = 8, sd=4),
compartment = 'cytoplasm')
DNA<-bh_defineMarker(markerName = 'dna',
channelName = 'ch6',
patternFunction = '.pattern.random',
patternModifier = list(mean = 10, sd=5),
compartment = 'nucleus')
cytop1<-bh_defineShape(majorAxis = c(3,0.5),
minorAxis = c(3,0.5),
roundness = c(1,0),
nArms = 8,
armExt = c(1,0.1),
armElbow = 2,
armSwing = 0,
armTrig = c(-2,0.1))
cytop2<-bh_defineShape(majorAxis = c(2,0.5),
minorAxis = c(2,0.5),
roundness = c(1,0),
nArms = 8,
armExt = c(3,0.1),
armElbow = 4,
armSwing = 2,
armTrig = c(-2,0.1))
nuc1<-bh_defineShape(majorAxis = c(1.5,0.1),
minorAxis = c(1.5,0.1),
roundness = c(1,0),
nArms = 8,
armExt = c(1,0),
armElbow = 2,
armSwing = 0,
armTrig = c(-1,0.1))
# SS1<-bh_create(cytop2)
# plot(SS1$outline)
# lapply(SS1$branch,plot,add=T)
# SS2<-bh_create(nuc1)
# plot(SS2$outline,add=T)
cell1<-bh_defineCell(name = 'CD4_lo',
cytoplasm = cytop1,
nucleus = nuc1,
organelle = NULL,
markers = list(CD45,CD4,DNA,CD1000Low))
cell2<-bh_defineCell(name = 'CD4_hi',
cytoplasm = cytop1,
nucleus = nuc1,
organelle = NULL,
markers = list(CD45,CD4,DNA,CD1000high))
cell3<-bh_defineCell(name = 'CD8_lo',
cytoplasm = cytop1,
nucleus = nuc1,
organelle = NULL,
markers = list(CD45,CD8,DNA,CD1000Low))
cell4<-bh_defineCell(name = 'CD8_hi',
cytoplasm = cytop1,
nucleus = nuc1,
organelle = NULL,
markers = list(CD45,CD8,DNA,CD1000high))
cell5<-bh_defineCell(name = 'dendr',
cytoplasm = cytop2,
nucleus = nuc1,
organelle = NULL,
markers = list(CD45,DNA,CD11c))
tissue1<-bh_defineTissue(coords = c(0,250,0,250),
resolution = 1,
bg = 0,
markers = list(CD45,CD4,CD8,CD11c,CD1000high,DNA))
TEMP_population<-bh_populate(cellPrototype = list(cell1,cell2,cell3,cell4,cell5),
proportion = c(0.2,0.2,0.2,0.2,0.2),
tissue = tissue1,
maxCloning = 3,
areaTresh = 0.25)
bh_savePopulation(TEMP_population,
file='/home/luigi/pop.R')
TEMP_population1<-bh_loadPopulation('/home/luigi/pop.R')
TEMP_pic<-bh_engrave(tissue = tissue1,
cells = TEMP_population$CD4_lo)
TEMP_pic<-bh_engrave(tissue = TEMP_pic,
cells = TEMP_population$CD4_hi)
TEMP_pic<-bh_engrave(tissue = TEMP_pic,
cells = TEMP_population$CD8_lo)
TEMP_pic<-bh_engrave(tissue = TEMP_pic,
cells = TEMP_population$CD8_hi)
TEMP_pic<-bh_engrave(tissue = TEMP_pic,
cells = TEMP_population$dendr)
par(mar=c(0,0,0,0))
raster::plot(TEMP_pic$x.ch5.ch5,col=gray.colors(n = 255,0,1))
plot(TEMP1['cell'],col=NA,border='red',add=T)
TEMP_mod<-TEMP_pic
raster::plot(TEMP_mod$x.ch1.ch1,col=gray.colors(n = 255,0,1),colNA='blue')
TEMP_mod$x.ch1.ch1<-bh_focal_modifier(TEMP_mod$x.ch1.ch1,wMatrix = matrix(1,5,5),fun=.modifier.multDiv,fun.param = list(quantity=5))
TEMP_mod$x.ch1.ch1<-bh_focal_modifier(TEMP_mod$x.ch1.ch1,wMatrix = matrix(1,5,5),fun = mean)
raster::plot(TEMP_mod$x.ch1.ch1,col=gray.colors(n = 255,0,1),colNA='blue')
raster::plot(TEMP_mod$x.ch2.ch2,col=gray.colors(n = 255,0,1))
TEMP_mod$x.ch2.ch2<-bh_focal_modifier(TEMP_mod$x.ch2.ch2,wMatrix = matrix(1,5,5),fun = mean)
TEMP_mod$x.ch2.ch2<-bh_focal_modifier(TEMP_mod$x.ch2.ch2,wMatrix = matrix(1,5,5),fun=.modifier.multDiv,fun.param = list(quantity=3))
raster::plot(TEMP_mod$x.ch2.ch2,col=gray.colors(n = 255,0,1))
raster::plot(TEMP_mod$x.ch3.ch3,col=gray.colors(n = 255,0,1))
TEMP_mod$x.ch3.ch3<-bh_focal_modifier(TEMP_mod$x.ch3.ch3,wMatrix = matrix(c(0,1,0,1,0,1,0,1,0),3,3),fun = mean)
raster::plot(TEMP_mod$x.ch3.ch3,col=gray.colors(n = 255,0,1))
raster::plot(TEMP_mod$x.ch6.ch6,col=gray.colors(n = 255,0,1))
TEMP_mod$x.ch6.ch6<-bh_focal_modifier(TEMP_mod$x.ch6.ch6,wMatrix = .matrix.gauss(TEMP_mod$x.ch2.ch2,1),fun = mean)
raster::plot(TEMP_mod$x.ch6.ch6,col=gray.colors(n = 255,0,1),colNA='blue')
raster::plot(TEMP_mod$x.ch4.ch4,col=gray.colors(n = 255,0,1))
TEMP_mod$x.ch4.ch4<-bh_focal_modifier(TEMP_mod$x.ch4.ch4,wMatrix = .matrix.gauss(TEMP_mod$x.ch2.ch2,1),fun = median)
raster::plot(TEMP_mod$x.ch4.ch4,col=gray.colors(n = 255,0,1))
raster::plot(TEMP_mod$x.ch5.ch5,col=gray.colors(n = 255,0,1))
TEMP_mod$x.ch5.ch5<-bh_focal_modifier(TEMP_mod$x.ch5.ch5,wMatrix = matrix(1,11,11),fun = .modifier.multDiv,fun.param = list(quantity=3))
TEMP_mod$x.ch5.ch5<-bh_focal_modifier(TEMP_mod$x.ch5.ch5,wMatrix = matrix(1,5,5),fun = median)
raster::plot(TEMP_mod$x.ch5.ch5,col=gray.colors(n = 255,0,1))
raster::plot(TEMP_mod,col=gray.colors(n = 255,0,1))
for (i in names(TEMP_mod)){
raster::writeRaster(TEMP_mod[[i]],
filename = file.path("C:/Users/k1343421/Documents/BH",paste0(i,'_training.tiff')),overwrite=T)
}
TEMP2<-bh_asXYZ(tissue = TEMP_mod)
colnames(TEMP2)<-c('X','Y','ch1-CD45(ch1)','ch2-CD4(ch2)','ch3-CD8(ch3)','ch4-CD11c(ch4)','ch5-CDx(ch5)','ch6-DNA(ch6)')
write.table(TEMP2,"C:/Users/k1343421/Documents/BH/BH_TEST.txt",
row.names = F,
quote = F,
sep='\t')
TEMP1<-bh_asSFC(cells = TEMP_population)
plot(TEMP1['cell'],col=NA)
sf::st_write(TEMP1,"C:/Users/k1343421/Documents/BH/ground_TEST.sqlite",)
|
7d28f48906e6866d294c65aed5d5a256a93b6092
|
49b8ff57b4184c137dde8ed358b3372f3020d9b0
|
/RStudioProjects/mbDiscoveryR/scripts/scripts_speed test.R
|
6804c2d6dfc912bd692e143ff3be2f753a3933c0
|
[] |
no_license
|
kelvinyangli/PhDProjects
|
c70bad5df7e4fd2b1803ceb80547dc9750162af8
|
db617e0dbb87e7d5ab7c5bfba2aec54ffa43208f
|
refs/heads/master
| 2022-06-30T23:36:29.251628
| 2019-09-08T07:14:42
| 2019-09-08T07:14:42
| 59,722,411
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,253
|
r
|
scripts_speed test.R
|
# speed test
setwd("realWorldModelWithTrueParameters")
model = "alarm"
cpts = read.dsc(paste0(model, "/cpts/", model, ".dsc"))
allNodes = names(cpts)
n = 500
# mmlcpt
datasets = list.files(paste0(model, "/data rds/"), pattern = paste0("_", n, "_"))
ii = 17
data = readRDS(paste0(model, "/data rds/", datasets[ii]))
dataInfo = getDataInfo(data)
mbList = list()
# compute mb of each node using standard forward selection
system.time(
for (i in 1:length(allNodes)) {
targetNode = allNodes[i]
mbList[[i]] = mbForwardSelection.fast(data, targetNode, mmlCPT.fast, dataInfo$arities, dataInfo$indexListPerNodePerValue)
} # end for i
)
# pcmb
datasets = list.files(paste0(model, "/data/"), pattern = paste0("_", n, "_"))
setwd("pcmb2/") # set wd to pcmb folder
file.copy(paste0("../", model, "/data/", datasets[ii]), paste0(model, ".data"), overwrite = TRUE) # copy data from "alarm data" to "pcmb" with new name "alarm.data"
system.time(system(paste0("kmb4 ", model, ".data ", n, " ", length(allNodes), " -1 1.0 1 1 0.01"), intern = TRUE))
file.remove("output.txt")
# iamb
system.time(system(paste0("kmb4 ", model, ".data ", n, " ", length(allNodes), " -1 1.0 1 0 0.01"), intern = TRUE))
file.remove("output.txt")
setwd("..")
|
cee076c19752689524bcc0c1d0e3cdfb5539af2e
|
e5b50b052d111753a7beb335911b6a98aca845d7
|
/inst/shiny-examples/gravitas_app/ui.R
|
426c207452c5cef3748de90ee3d2810cc2519d31
|
[] |
no_license
|
Sayani07/gravitas
|
a0d33639aa3329570ba02a67faa7f015a0b0b7e3
|
c1430843c5cd9bc547dc0f768cd38b979f4b5f6a
|
refs/heads/master
| 2022-06-28T06:36:04.146695
| 2022-06-14T01:25:28
| 2022-06-14T01:25:28
| 177,514,371
| 15
| 6
| null | 2021-12-03T03:14:16
| 2019-03-25T04:30:24
|
R
|
UTF-8
|
R
| false
| false
| 9,703
|
r
|
ui.R
|
source("global_shiny.R", local = TRUE)
# Input Data Tab
tabInput <- tabPanel(
"Data summary", fluidPage(
sidebarPanel(
# Input rda file
checkboxInput("default",
"Explore package with Victorian electricity demand
(Default)",
value = TRUE
),
fileInput("file", "Upload your own data file (tsibble as .Rda file)"),
wellPanel(helpText(HTML(
"Browse through the package
with the already loaded data set on",
"<a href=https://rdrr.io/cran/tsibble
data/man/vic_elec.html>Victorian
electricity demand</a>",
"or load your own dataset (tsibble) in a .Rda file
and have a glance of your data before
moving ahead with your exploratory journey.",
"<hr>",
"<b>Statistical Summary</b>: Provides the five point
summary of all the variables in your data.",
"<br>",
"<hr>",
"<b>Temporal structure</b>: Provides the temporal structure
of the data through a <i>tsibble</i> summary.
It consists of a <i>time index</i>, <i>key</i>
and other <i>measured variables</i>.
The print display gives information on data dimension,
time interval(within third brackets),
keys(if any) and the number of time-based units.",
"<br>",
"<br>",
"<i>Caution</i>: Re-confirm the time interval
of your data before proceeding with your analysis."
)))
),
mainPanel(
fluidRow(
column(6, h2("Statistical Summary"),
verbatimTextOutput("summary"),
style = "height:100px"),
column(6, h2("Temporal structure"),
verbatimTextOutput("str_data"),
style = "height:100px")
# h2("Data structure"), verbatimTextOutput("str_data"), style = "height:100px"),
)
)
)
)
# Create Harmony tab
tabcreate <- tabPanel(
"Harmony table", fluidPage(
sidebarPanel(
# Input csv file
selectInput("lowest_unit",
"lowest temporal unit",
gravitas:::lookup_table$units,
"hour"),
selectInput("highest_unit",
"highest temporal unit",
gravitas:::lookup_table$units,
"week"),
numericInput("facet_h",
"Maximum number of facets allowed",
value = 31, min = 1),
selectInput("filter_in",
"Any other temporal events like Public Holidays/Special Events/Weekends (logical/character vector)",
"<select>",
multiple = TRUE),
wellPanel(helpText(HTML(" Combinations of circular granularities
which promote the exploratory analysis through visualization are referred to as <b><i>harmonies</i></b>
and the ones which impede
the analysis are referred to as <b><i>clashes.</i></b> ",
"<br>", "<br>", "<br>",
"Have a look at the possible
harmonies given the lowest and highest temporal unit that you have chosen.",
"<br>", "<br>",
"Make sure the highest temporal
unit chosen is higher in temporal order than the lowest temporal unit.
Also lowest temporal unit should
at least be as large as the tsibble time </i></b> interval</i></b>.",
"<br>", "<br>", "<br>",
"Is there any Holidays/
Public Events that needs to be checked for Harmonies or Clashes?
Add the column which refers to them.
Make sure they are logical/categorical")))
),
mainPanel(
fluidRow(dataTableOutput("table"))
)
)
)
# Create Plot tab
tabplot <- tabPanel(
"Plot distribution across bivariate granularities", fluidPage(
sidebarPanel(
# Input csv file
selectInput(
"ycol",
"Which univariate time series to plot?",
"<select>"
),
selectInput(
"facet",
"Facet Variable",
"<select>"
),
selectInput(
"xcol",
"X-axis Variable",
"<select>"
),
checkboxInput("flip_axis",
"Flipped display",
value = FALSE
),
checkboxInput("flip_coord",
"Flipped coordinates",
value = FALSE
),
selectInput("plot_type", "Which distribution plot",
choices = c("boxplot",
"ridge",
"violin",
"lv",
"decile",
"quantile"),
selected = "boxplot"
),
textInput(
"vec1",
"Enter a probability vector (comma delimited)\n
only if quantile plot is chosen",
"0.1, 0.5, 0.9"
),
shinyalert::useShinyalert(), # Set up shinyalert
actionButton(
"preview",
"Check for warnings/messages"
),
# downloadButton('downloadData', 'Download Data'),
downloadButton("downloadPlot", "Download Plot"),
wellPanel(helpText(HTML(
"Explore the distribution of the time series
variables across bivariate granularities.
Choose the distribution plot that best satisfy
your contexual needs.",
"<br>",
"<br>",
"Have a look at the messages window
to see recommendations on how to improve
your choice of granularities.",
"<br>", "<br>",
"<br>", "Does the plot look interesting to you?
Go ahead and save it in your workspace."
)))
),
mainPanel(
# conditionalPanel(condition = "output.warnstat == 'Error'",
# verbatimTextOutput("warnmsg")),
fluidRow(plotOutput("plot1", width = "100%")),
htmlOutput("code")
# aceEditor(
# outputId = "ace",
# selectionId = "selection",
# value = "code",
# placeholder = "show nothing"
# # htmlOutput("warning_text"))
# #"Warning", verbatimTextOutput("warning"))
# )
# )
)
)
)
# Granularity table tab
tabgranularity <- tabPanel(
"Granularity Table", fluidPage(
# sidebarPanel(
# # Input csv file
# fileInput("file", "Data file (.csv format)",
# accept = c("text/csv", "text/comma-separated-values,text/plain", ".csv")
# )),
mainPanel(fluidRow( # verbatimTextOutput("clash_txt"),
dataTableOutput("grantbl")
))
)
)
### Combined tabs
ui <- fluidPage(
theme = shinythemes::shinytheme("united"),
tabsetPanel(
type = "tabs",
tabInput,
tabcreate,
tabplot
# tabgranularity
)
)
#
#
# ui <- fluidPage(theme = shinythemes::shinytheme("superhero"),
# headerPanel(" Explore probability distributions for bivariate temporal granularities"),
#
# sidebarPanel(width = 3,
# #tags$style(".well {background-color:[red];}"),
# fileInput("file", "Data file (tsibble as .Rda file)"),
# selectInput("ycol", "Which univariate time series to plot?", "<select>"),
# selectInput("lgran", "lowest temporal unit", gravitas:::lookup_table$granularity, "hour"),
# selectInput("ugran", "highest temporal unit", gravitas:::lookup_table$granularity, "week"),
# selectInput("facet", "facet Variable", "<select>"), # "<needs update>"),
# # search_gran(vic_elec, "hour", "minute")
# selectInput("xcol", "X Variable", "<select>"),
# selectInput("plot_type", "Which distribution plot", choices = c("boxplot", "ridge", "violin", "lv", "density", "quantile"), selected = "boxplot")
# ),
#
# mainPanel(
# tabsetPanel(
# type = "tabs",
# tabPanel("Data",
# fixedRow(
# column(12, "Data summary", verbatimTextOutput("summary")),
# column(12, "Data structure",
# verbatimTextOutput("str_data"))
# )
# ),
# # h3("Raw Data"),
# # dataTableOutput("data")),
# #,verbatimTextOutput("devMessage3")
# # h3("Index of tsibble"),
# # textOutput("index"),
# # h3("Key of tsibble"),
# # textOutput("")),
# # # h4("Five point summary"),
# # # tableOutput("fivepointsummary")),
# # # h4("Raw data"),
# # # dataTableOutput("data")),
# # # fluidRow(
# # # column(2,
# # # tableOutput("summary")
# # # ),
#
# tabPanel("Harmony Table", tableOutput("table")),
# tabPanel("Plot", plotOutput("plot1")),
# tabPanel("Granularity Table", dataTableOutput("grantbl"))
#
# )
# )
# )
|
0a313a7b021b287116b5410f3c48a36ee56f4189
|
4a2dbba17ea7101c78c26f4f68a7aed5cb346ade
|
/inst/rstudio/templates/project/ressources/m2/enonces/exo6.R
|
fa143f5249c6a327ada0fa4cebfef8ba72ba829e
|
[] |
no_license
|
MTES-MCT/savoirfR
|
c90298524cf5aec68ccaf9fcee62942204a62e9f
|
75ec0630803d7fb63e9fec51f3dcaa2c5e46459e
|
refs/heads/master
| 2023-06-25T02:33:09.036157
| 2023-06-10T23:19:13
| 2023-06-10T23:19:13
| 230,885,420
| 10
| 3
| null | 2023-03-23T19:02:08
| 2019-12-30T09:17:27
|
R
|
UTF-8
|
R
| false
| false
| 1,730
|
r
|
exo6.R
|
# ---
# title: "Exercice 6 - module 2"
# ---
# Calculer à partir des tables fournies dans le fichier `majic.RData` issues des fichiers fonciers (cf. http://piece-jointe-carto.developpement-durable.gouv.fr/NAT004/DTerNP/html3/_ff_descriptif_tables_image.html#pnb10) un indicateur d'étalement urbain entre 2009 et 2014 à la commune et à l'EPCI sur la région Pays de la Loire (départements 44, 49, 53, 72 et 85), et catégoriser chaque territoire.
# Définitions :
# - artificialisation = dcnt07 + dcnt09 + dcnt10 + dcnt11 + dcnt12 + dcnt13
# - indicateur_etalement_simple = évolution de l'artificialisation / évolution de la population
# - indicateur_etalement_avance, indicateur catégoriel qui vaut :
# * 1 si la population progresse ou reste stable alors que l'artificialisation recule ;
# * 2a si la population et l'artificialisation progressent ou sont stables et l'étalement urbain est inférieur ou égal à 1 (ou pop stable) ;
# * 2b si la population et l'artificialisation reculent et l'indicateur d'étalement urbain est supéreur à 1 ;
# * 2c si la population recule et l'indicateur d'étalement est compris entre 0 et 1 (inclus) ;
# * 3 si la population progresse, l'artificialisation progresse plus vite que la population, tout en restant inférieure ou égale à 4,9 m² ;
# * 4 si la population progresse, l'artificialisation est supérieure à 4,9 m², elle progresse plus vite que la population mais au plus 2 fois plus vite ;
# * 5 si la population progresse, l'artificialisation progresse plus de 2 fois plus vite que la population et est supérieure à 4,9 m² ;
# * 6 si la population recule et l'indicateur d'étalement urbain est négatif.
|
10e9fa5edf397c73ffba428ddc0d55275478486e
|
d4601bcbdbc66c31ce1d39b76f0abb36aa48c67b
|
/shared_code/gviz_common.r
|
d1031c477e3557f26cbd74ac70b5a71c3d5f854f
|
[] |
no_license
|
zeitlingerlab/he_johnston_nbt_2014
|
93e37a04e77ad8a0792fe21b9f10fd7efd89bbae
|
d2b8480ec3e096cc6c22479e32d4d0828cc21542
|
refs/heads/master
| 2020-05-28T14:17:17.921275
| 2015-03-09T15:33:31
| 2015-03-09T15:33:31
| 25,653,941
| 9
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,591
|
r
|
gviz_common.r
|
gviz_plot_with_exo <- function(genome="dm3", region.gr,
sample.regular, sample.nexus, sample.exo,
title, motifs=GRanges(),
normalization_method="none",
zoom_to_nexus_scale=FALSE) {
r_chr <- as.character(seqnames(region.gr))
r_start <- start(region.gr)
r_end <- end(region.gr)
stopifnot(length(region.gr) == 1)
seqlevels(region.gr, force=TRUE) <- unique(as.character(seqnames(region.gr)))
if(genome == "dm3") {
grtrack <- GeneRegionTrack(range=TxDb.Dmelanogaster.UCSC.dm3.ensGene,
genome="dm3",
chromosome=r_chr,
name="Genes",
showId=TRUE,
geneSymbol=TRUE,
fill="gray60", collapseTranscripts=FALSE)
}
if(genome == "hg19") {
grtrack <- GeneRegionTrack(range=TxDb.Hsapiens.UCSC.hg19.knownGene,
genome="hg19",
chromosome=r_chr,
name="Genes",
showId=TRUE,
geneSymbol=TRUE,
fill="gray60", collapseTranscripts=FALSE)
}
if(normalization_method == "downsample") {
if(length(sample.nexus$gr) > length(sample.exo$gr)) {
sample.nexus$gr <- sample(sample.nexus$gr, length(sample.exo$gr))
sample.nexus$pos <- coverage(sample.nexus$gr[strand(sample.nexus$gr) == "+"])
sample.nexus$neg <- coverage(sample.nexus$gr[strand(sample.nexus$gr) == "-"])
} else {
sample.exo$gr <- sample(sample.exo$gr, length(sample.nexus$gr))
sample.exo$pos <- coverage(sample.exo$gr[strand(sample.exo$gr) == "+"])
sample.exo$neg <- coverage(sample.exo$gr[strand(sample.exo$gr) == "-"])
}
}
data.nexus <- matrix(c(as.numeric(sample.nexus$pos[[r_chr]][r_start:r_end]),
-1 * as.numeric(sample.nexus$neg[[r_chr]][r_start:r_end])), nrow=2, byrow=TRUE)
data.exo <- matrix(c(as.numeric(sample.exo$pos[[r_chr]][r_start:r_end]),
-1 * as.numeric(sample.exo$neg[[r_chr]][r_start:r_end])), nrow=2, byrow=TRUE)
if(normalization_method == "read_count") {
nexus.rc <- length(sample.nexus$gr)
exo.rc <- length(sample.exo$gr)
target.rc <- ceiling(mean(c(nexus.rc, exo.rc)))
data.nexus <- data.nexus / nexus.rc * target.rc
data.exo <- data.exo / exo.rc * target.rc
}
if(normalization_method == "downscale_exo") {
nexus.rc <- length(sample.nexus$gr)
exo.rc <- length(sample.exo$gr)
data.exo <- data.exo / exo.rc * nexus.rc
}
ylim.nexus <- max(abs(c(max(data.nexus), min(data.nexus))))
ylim.exo <- max(abs(c(max(data.exo), min(data.exo))))
if(zoom_to_nexus_scale) ylim.exo <- ylim.nexus
nexus_track <- DataTrack(data=data.nexus,
start=r_start:r_end, width=0, chromosome=r_chr,
genome=genome, name="ChIP-nexus",
groups=c("Positive strand", "Negative strand"),
type="histogram",
ylim=c(-ylim.nexus, ylim.nexus),
legend=TRUE,
col=c("darkblue", "red"), col.line=c("darkblue", "red"))
exo_track <- DataTrack(data=data.exo,
start=r_start:r_end, width=0, chromosome=r_chr,
genome=genome, name="ChIP-exo",
groups=c("Positive strand", "Negative strand"),
type="histogram",
ylim=c(-ylim.exo, ylim.exo),
legend=TRUE,
col=c("darkblue", "red"), col.line=c("darkblue", "red"))
regular.rle <- import(sample.regular, which=region.gr, as="RleList")
data.reg <- matrix(c(as.numeric(regular.rle[[r_chr]][r_start:r_end])), nrow=1, byrow=TRUE)
ylim.reg <- max(data.reg)
reg_track <- DataTrack(range=sample.regular,
genome=genome, name="ChIP-seq",
type="polygon",
ylim=c(0, ylim.reg),
col=c("#1C5425"),
fill.mountain=c("#1C5425", "#1C5425"))
gtrack <- GenomeAxisTrack()
tlist <- list(gtrack, reg_track, nexus_track, exo_track, grtrack)
tsizes <- c(0.1, 0.25, 1, 1, 0.1)
if(length(motifs) > 0) {
motifs <- motifs[seqnames(motifs) == r_chr]
motif_track <- AnnotationTrack(range=motifs, strand=rep("*", length(motifs)),
genome=genome, name="Motifs", showFeatureId=TRUE,
stacking="dense", fill="gray50", fontcolor="black", fontcolor.group="black", fontsize=5)
tlist <- c(tlist, list(motif_track))
tsizes <- c(tsizes, 0.1)
}
plotTracks(tlist,
sizes=tsizes,
chromosome=r_chr,
from=r_start,
to=r_end,
main=title,
cex.title=1.2, cex.axis=0.8, col.title="black", col.axis="black",
fontcolor.legend="black", cex.legend=1.1)
}
gviz_plot_no_exo <- function(genome="dm3", region.gr,
sample.regular, sample.nexus,
title, motifs=GRanges()) {
r_chr <- as.character(seqnames(region.gr))
r_start <- start(region.gr)
r_end <- end(region.gr)
stopifnot(length(region.gr) == 1)
seqlevels(region.gr, force=TRUE) <- unique(as.character(seqnames(region.gr)))
if(genome == "dm3") {
grtrack <- GeneRegionTrack(range=TxDb.Dmelanogaster.UCSC.dm3.ensGene,
genome="dm3",
chromosome=r_chr,
name="Genes",
showId=TRUE,
geneSymbol=TRUE,
fill="gray60", collapseTranscripts=FALSE)
}
if(genome == "hg19") {
grtrack <- GeneRegionTrack(range=TxDb.Hsapiens.UCSC.hg19.knownGene,
genome="hg19",
chromosome=r_chr,
name="Genes",
showId=TRUE,
geneSymbol=TRUE,
fill="gray60", collapseTranscripts=FALSE)
}
data.nexus <- matrix(c(as.numeric(sample.nexus$pos[[r_chr]][r_start:r_end]),
-1 * as.numeric(sample.nexus$neg[[r_chr]][r_start:r_end])), nrow=2, byrow=TRUE)
ylim.nexus <- max(abs(c(max(data.nexus), min(data.nexus))))
nexus_track <- DataTrack(data=data.nexus,
start=r_start:r_end, width=0, chromosome=r_chr,
genome=genome, name="ChIP-nexus",
groups=c("Positive strand", "Negative strand"),
type="histogram",
ylim=c(-ylim.nexus, ylim.nexus),
legend=TRUE,
col=c("darkblue", "red"), col.line=c("darkblue", "red"))
regular.rle <- import(sample.regular, which=region.gr, as="RleList")
data.reg <- matrix(c(as.numeric(regular.rle[[r_chr]][r_start:r_end])), nrow=1, byrow=TRUE)
ylim.reg <- max(data.reg)
reg_track <- DataTrack(range=sample.regular,
genome=genome, name="ChIP-seq",
type="polygon",
ylim=c(0, ylim.reg),
col=c("#1C5425"),
fill.mountain=c("#1C5425", "#1C5425"))
gtrack <- GenomeAxisTrack()
tlist <- list(gtrack, reg_track, nexus_track, grtrack)
tsizes <- c(0.1, 0.25, 1, 0.1)
if(length(motifs) > 0) {
motifs <- motifs[seqnames(motifs) == r_chr]
motif_track <- AnnotationTrack(range=motifs, strand=rep("*", length(motifs)),
genome=genome, name="Motifs", showFeatureId=TRUE,
stacking="dense", fill="gray50", fontcolor="black", fontcolor.group="black", fontsize=5)
tlist <- c(tlist, list(motif_track))
tsizes <- c(tsizes, 0.1)
}
plotTracks(tlist,
sizes=tsizes,
chromosome=r_chr,
from=r_start,
to=r_end,
main=title,
cex.title=1.2, cex.axis=0.8, col.title="black", col.axis="black",
fontcolor.legend="black", cex.legend=1.1)
}
|
2c27acb8d76942cd9ea72e1938a0e3f45ce696e8
|
c161c257900e34832678360a3df3d2319aad99b0
|
/man/subset.questionList.Rd
|
7d770131399e04fe462a2f31db2a501f7fa81cf1
|
[] |
no_license
|
louwerse/dutchparl
|
be7e87c42e4c7056baab01016905a96376d2676c
|
855bb72ca75408af1adc3969e99dd51a86c0b182
|
refs/heads/master
| 2021-09-19T15:43:34.099963
| 2021-08-10T15:19:10
| 2021-08-10T15:19:10
| 69,758,321
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,035
|
rd
|
subset.questionList.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrangling.R
\name{subset.questionList}
\alias{subset.questionList}
\title{Subset questionList object}
\usage{
\method{subset}{questionList}(x, df, subset, select, drop = FALSE, drop.levels = TRUE, ...)
}
\arguments{
\item{x}{A questionList object, most of the time the questions object from the Dutch Parliamentary Behaviour Dataset.}
\item{df}{The name of the data.frame in the questionList to filter on. Options include metaList, questionerList, responderList, and categoryList.}
\item{subset}{The subset command.}
\item{select}{Expression, indicating columns to select from data frame}
\item{drop}{passed on to [ indexing operator}
\item{drop.levels}{If true, superfluous levels in the data.frames will be removed.}
\item{...}{Other parameters (ignored)}
}
\value{
The subsetted questionList object.
}
\description{
Subset questionList object
}
\examples{
subset(examplequestions, examplequestions$metaList, dateQuestion > as.Date("2010-01-04"))
}
|
b6cdde45c1a9879efc531e2403d0dd41a705127c
|
c273f036c986533afe78b92555ee709206d3c7d2
|
/man/get_tz_offset.Rd
|
60045e39acfb7e1a2bb8f0ccc0b16f90263c9455
|
[] |
no_license
|
pssguy/stationaRy
|
2ee3cde95361d20f0291396279991f7d275acf97
|
541286656a85de5fef7c8cd32aca5c65d748a67b
|
refs/heads/master
| 2021-01-24T22:36:03.906978
| 2015-06-22T00:09:50
| 2015-06-22T00:09:50
| 37,831,150
| 0
| 0
| null | 2015-06-22T00:06:36
| 2015-06-22T00:06:36
| null |
UTF-8
|
R
| false
| false
| 725
|
rd
|
get_tz_offset.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/get_tz_offset.R
\name{get_tz_offset}
\alias{get_tz_offset}
\title{Get time zone offset from GMT}
\usage{
get_tz_offset(x, y, proj4str = "")
}
\arguments{
\item{x}{coordinate in the x direction.}
\item{y}{coordinate in the y direction.}
\item{proj4str}{an optional PROJ.4 string for the provided coordinates;
not providing a value assumes lat/lon coordinates.}
}
\description{
Obtain the time zone difference in hours between GMT and a
provided location.
}
\examples{
\dontrun{
# Get the time zone offset from UTC+00 at 125.50000
# degrees W and 49.20000 degrees N
get_tz_offset(x = -125.50000,
y = 49.20000)
#> [1] -8
}
}
|
2ef2f3eea59f8e70399bddb3efbf2f89c3596b54
|
4f53cd5d9c037ecc307dc763941f51aa70c17f88
|
/RFE.R
|
d33211003d59b84f486847c0d27b72c748187f8b
|
[
"MIT"
] |
permissive
|
chenyuqing/feature_selection_on_gene_mutation
|
ad37f67a4f3ae9b3a3af02488381dc8c4755505e
|
c1cc957016c1f792e8a6bebb28e7692bb9318ea0
|
refs/heads/master
| 2021-01-21T11:15:01.629802
| 2017-03-01T12:51:16
| 2017-03-01T12:51:16
| 83,537,044
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 780
|
r
|
RFE.R
|
## 1. set the path
setwd("D:/git-ware/feature_selection_on_gene_mutation")
getwd()
## 2. get the data
source("getdata.R")
all_data = getdata()
## 3. get the gene mutation data
nci60GeneMut = all_data[,-1]
## 4. Feature selection: rfe: recursive feature elimination
library(caret)
require(glmnet)
set.seed(2016)
# funciton : random forest
control <- rfeControl(functions = rfFuncs, method = "cv", number = 10)
ptm <- proc.time()
results <- rfe(nci60GeneMut, all_data[,1], sizes = c(1:55), rfeControl = control)
proc.time() - ptm
print(results)
str(predictors(results))
plot(results, type=c('g','o'))
## 5. save the data
m <- nci60GeneMut[,predictors(results)[1:55]]
all_data_rfe <- cbind(IC50 = all_data[,1], m)
write.csv(all_data_rfe, file = "selected_features/data_rfe.csv")
|
06c65ae5a1ef3b20853d1ec9896ad659a791543f
|
eb75d3d5fcaa8fd73c4e19938aa5209021a57f1e
|
/tests/testthat/test-function-read_relevant_years_radolan.R
|
7034307723a6d51f3940c84739fe527c619100cc
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.dwd
|
104fca00203addd0ad342091e96548dd4ce1fadf
|
22af4297f424c62c886eaeb02dd5104da6a6087e
|
refs/heads/master
| 2023-06-12T05:24:44.603834
| 2022-09-20T12:58:52
| 2022-09-20T12:58:52
| 172,280,879
| 1
| 0
|
MIT
| 2023-05-29T03:48:14
| 2019-02-24T00:53:15
|
R
|
UTF-8
|
R
| false
| false
| 451
|
r
|
test-function-read_relevant_years_radolan.R
|
test_that("read_relevant_years_radolan() works", {
f <- kwb.dwd:::read_relevant_years_radolan
expect_error(f())
expect_error(f(years = 1999L))
expect_error(f("/no/such/path", years = 2000L))
expect_message(result <- f(tempdir(), years = 2000L))
expect_null(result)
writeLines("this is a test", file.path(tempdir(), "0-test.gri"))
expect_error(result <- f(tempdir(), 2000L), "Cannot create a RasterLayer")
expect_null(result)
})
|
c486d3ee5113794fe135e8c834a95fb2d7debf78
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/multinomRob/examples/multinomRob.Rd.R
|
1e4cbf3bd5da90c290490d1d6253ca21c979bdf4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,138
|
r
|
multinomRob.Rd.R
|
library(multinomRob)
### Name: Robust Multinomial Regression
### Title: Multinomial Robust Estimation
### Aliases: multinomRob
### Keywords: robust models regression
### ** Examples
# make some multinomial data
x1 <- rnorm(50);
x2 <- rnorm(50);
p1 <- exp(x1)/(1+exp(x1)+exp(x2));
p2 <- exp(x2)/(1+exp(x1)+exp(x2));
p3 <- 1 - (p1 + p2);
y <- matrix(0, 50, 3);
for (i in 1:50) {
y[i,] <- rmultinomial(1000, c(p1[i], p2[i], p3[i]));
}
# perturb the first 5 observations
y[1:5,c(1,2,3)] <- y[1:5,c(3,1,2)];
y1 <- y[,1];
y2 <- y[,2];
y3 <- y[,3];
# put data into a dataframe
dtf <- data.frame(x1, x2, y1, y2, y3);
## Set parameters for Genoud
## Not run:
##D ## For production, use these kinds of parameters
##D zz.genoud.parms <- list( pop.size = 1000,
##D wait.generations = 10,
##D max.generations = 100,
##D scale.domains = 5,
##D print.level = 0
##D )
## End(Not run)
## For testing, we are setting the parmeters to run quickly. Don't use these for production
zz.genoud.parms <- list( pop.size = 10,
wait.generations = 1,
max.generations = 1,
scale.domains = 5,
print.level = 0
)
# estimate a model, with "y3" being the reference category
# true coefficient values are: (Intercept) = 0, x = 1
# impose an equality constraint
# equality constraint: coefficients of x1 and x2 are equal
mulrobE <- multinomRob(list(y1 ~ x1, y2 ~ x2, y3 ~ 0),
dtf,
equality = list(list(y1 ~ x1 + 0, y2 ~ x2 + 0)),
genoud.parms = zz.genoud.parms,
print.level = 3, iter=FALSE);
summary(mulrobE, weights=TRUE);
#Do only MLE estimation. The following model is NOT identified if we
#try to estimate the overdispersed MNL.
dtf <- data.frame(y1=c(1,1),y2=c(2,1),y3=c(1,2),x=c(0,1))
summary(multinomRob(list(y1 ~ 0, y2 ~ x, y3 ~ x), data=dtf, MLEonly=TRUE))
|
f895acc72e087001556c2d2c53a2666fdd6a6fb5
|
38fd819dc184332ed8d86121cfa34edb4ea615ba
|
/inst/appdir/app.R
|
94a0f16f0b78eadbc8d58121da9dd29189c61c47
|
[] |
no_license
|
mstroehle/tdmsviewer
|
aa3015705a2063c3a6dfc2bd01fc2570e4049605
|
c8e1bd21824592e574086b99da320a59d0f621a8
|
refs/heads/master
| 2020-06-04T10:24:24.262845
| 2018-03-23T15:41:08
| 2018-03-23T15:41:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,431
|
r
|
app.R
|
source('global.R')
ui = function() {
source('page/home.R', local = T)
source('page/saved.R', local = T)
source('page/help.R', local = T)
source('page/landmark_page.R', local = T)
fluidPage(
includeCSS('styles.css'),
headerPanel('tdmsviewer'),
wellPanel(style = 'background-color: #ffffff;',
tabsetPanel(id = 'inTabset',
tabPanel(style = 'margin: 20px;', id = 'home', 'Home', homeUI('home')),
tabPanel(style = 'margin: 20px;', id = 'saved', 'Saved EODs', savedUI('saved')),
tabPanel(style = 'margin: 20px;', id = 'landmarkpage', 'Landmarks', landmarkpageUI('landmarkpage')),
tabPanel(style = 'margin: 20px;', id = 'help', 'Help', helpUI('help'))
)
)
)
}
server = function(input, output, session) {
source('page/home.R', local = T)
source('page/saved.R', local = T)
source('page/help.R', local = T)
source('page/landmark_page.R', local = T)
extrainput = callModule(homeServer, 'home')
moreinput = callModule(savedServer, 'saved', extrainput)
callModule(landmarkpageServer, 'landmarkpage', moreinput)
callModule(helpServer, 'help')
observe({
reactiveValuesToList(input)
session$doBookmark()
})
onBookmarked(function(url) {
updateQueryString(url)
})
}
shinyApp(ui = ui, server = server, enableBookmarking = 'url')
|
fdced5f3c81d308060f7cd9f095d0420589563cd
|
21bd7a9d6341b53d6f622f8e6e9de6ce5f638de0
|
/Module 2/Set 2.R
|
6c863711a2e7c5870de2df7d9db30c563fedf233
|
[] |
no_license
|
meganschmidt23/DataAnalyticSummer2020
|
fb00dc5dceb4a4810148b306de21944373cf3827
|
6299255136a1d6d6a9a362242769a55ae3550bf7
|
refs/heads/master
| 2023-03-02T02:37:43.717382
| 2021-02-11T03:25:17
| 2021-02-11T03:25:17
| 268,985,625
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,282
|
r
|
Set 2.R
|
#1. Download the data file "Wholesale_customers_data.csv". The attributes of the data are explained in comments at the top.
wh=read.csv("Wholesale_customers_data.csv", comment.char="#")
wh$Region=factor(wh$Region)
wh$Channel=factor(wh$Channel)
#(a) Check if there are NA values and eliminate those rows
wh <- na.omit(wh)
#(b) Obtain summary statistics of sales in each category of merchandise.
summary(wh$Fresh)
summary(wh$Milk)
summary(wh$Grocery)
#(c) Make three different data frames containing sales data to each of the three different regions
wh_Region1=subset(wh, Region==1)
wh_Region2=subset(wh, Region==2)
wh_Region3=subset(wh, Region==3)
#2. Download and read in the "birthdays.csv" file. It contains birth days of 480,000+ people. Call the data frame bday.
#(a) What classes are the month and day columns of bd, as read in. Should they be changed? If so, change them appropriately.
#As read in, these attributes are numeric. They need to be set to factors.
bday$month=factor(bday$month)
bday$day=factor(bday$day)
#(b) Do a scatter plot of month on x axis and count on y axis. What do you observe about distribution of birth days? Are there any outliers ?
qplot(x=bday$month, y=bday$count, data=bday)
#3. Load the dplyr and gapminder packages (install these if they are not installed – you might as well install tidyverse that instals a whole bunch of packages)
#(a) Examine the gapminder dataset. How many observations and how many attributes are in the data set?
#(b) Sort the gapminder data in descending order of life expectancy using the arrange verb from dplyr
gapminder %>% arrange(desc(lifeExp)) %>% arrange(desc(year))
#(c) Use filter() to extract observations from just the year 1957, then use arrange to sort in descending order of population (pop).
gapminder %>% filter (year==1957) %>% arrange(desc(lifeExp))
#(d) Use mutate() to change the existing lifeExp column, by multiplying it by 12: 12*lifeExp.
gapminder %>% mutate(lifeExp=lifeExp*12)
#(e) Use mutate() to add a new column, called lifeExpMonths, calculated as 12*lifeExp.
gapminder %>% mutate (lifeExpMonths=lifeExp*12)
#(f) In one sequence of pipes on the gapminder dataset:
#• filter() for observations from the year 2007,
#• mutate() to create a column lifeExpMonths, calculated as 12*lifeExp, and
#• arrange() in descending order of that new column
#Save the data as data2007
data2007 <- gapminder %>% filter(year==2007) %>% mutate (lifeExpMonths=lifeExp*12) %>% arrange(desc(lifeExpMonths))
#(g) Use dplyr’s arrange and summarise functions to get a count of the countries in data2007 by continents.
data2007%>% group_by(continent) %>% summarise(count=n())
#(h) Use dplyr data wrangling functions to get counts by continents of top 50 countries with highest life expectancies. Answer will be as shown below
data2007Top50<- data2007 %>% mutate(rank=row_number()) %>% filter(rank <= 50) data2007Top50 %>% group_by(continent) %>% summarise(count=n())
#4. In the solar.csv data (available in Data folder of “Course Documents” on BlackBoard), answer the following, with appropriate R code.
# a. Load the data into an R data frame. The month and day columns in the data must be factors, so set them to be factors in the data frame.
solar<- read.csv("solar.csv")
# b. What are the column names of data ?
colnames(solar)
# c. How many NA’s are in each column?
sol<-as_tibble(solar)
sol %>% filter(is.na(Ozone)) %>% summarise(n())
sol %>% filter(is.na(Solar.R)) %>% summarise(n())
sol %>% filter(is.na(Wind)) %>% summarise(n()) sol %>% filter(is.na(Temp)) %>% summarise(n()) sol %>% filter(is.na(Month)) %>% summarise(n()) sol %>% filter(is.na(Day)) %>% summarise(n())
#d. How many months of data do you have?
sol %>% group_by(Month) %>% count()
# e. For each month of available data, what days registered maximum Temperature, Ozone and Wind?
sol %>% group_by(Month)%>% summarise(max(Temp))
# f. Eliminate all rows containing NA’s in the data frame. Rename the rows to have sequential numbers.
solar<- na.omit(solar)
row.names(solar)<- 1:nrow(solar)
# g. Create a new data frame in which Solar.R > 300 and Temp >10
subset(solar, Solar.R>300 & Temp>10)
|
f704b5198fdeea2c7059f824723c11c7c13396fd
|
d42ac737b65a962a12edf07c16bd0cc42f69801a
|
/GRIP Monushree.R
|
2435ce4cd7ba214274ff26f14eabadf0b57f8cdd
|
[] |
no_license
|
MonushreeGanguly/GRIP-Task-1
|
f9159858f411b500a38524f4c428ab5f81ab50e3
|
d5ae505e864979b1b7e747ea01c095eac8e2101c
|
refs/heads/main
| 2023-03-21T15:56:07.190004
| 2021-03-17T17:10:28
| 2021-03-17T17:10:28
| 348,750,058
| 0
| 0
| null | 2021-03-17T15:16:54
| 2021-03-17T14:59:28
| null |
UTF-8
|
R
| false
| false
| 1,681
|
r
|
GRIP Monushree.R
|
# GRIP: The Sparks Foundation
# Monushree Ganguly, Data Science & Business Analysis Intern
## Task 1: Prediction using Supervised ML
#IMPORTING DATA
info<-read.csv("C:/Users/debashishganguly/Downloads/student_scores.csv")
head(info,25)
# 1.scatterplot
plot(info$Hours,info$Scores)
#2.correlation
cor(info$Hours,info$Scores)
#Interpretation: 1 unit increase in the number of hours studied is leading
# to 0.97 unit increase in marks scored.
#3.Simple Linear Regression Model
r<-lm(Scores~Hours,data=info)
#4.Add regression line
abline(r)
# Interpretation: There is a strong uphill (positive) linear relationship
# between number of hours studied and marks scored.Deviations are very small,
# majority of the points on the plot lie on the line of best fit.
#5.Summary Stats
summary(r)
# Residuals shows how different the predictions were than the actual results.
coef(summary(r))
# Coefficients shows the actual predictor and the significance of each of
# those predictors.
# 1) The Y intercept i.e. how much cann be scored with 0 hours of study
# 2) slope/ coefficient of no. of hours 9.7758 shows positive relationship,
# i.e. increasing the no.of study hours by 1 unit would increase the
# score by 9.7758 marks.
#7. fitted values
r$fitted.values
plot(info$Hours,r$fitted.values)
#8.0 prediction command on R
predict(r)
## QUESTION:
# What will be predicted score if a student studies for 9.25 hrs/ day?
predict(r,list(Hours=9.25))
# We can find this manually also
2.4837+9.7758*9.25
# CONCLUSION: If a student studies for 9.25 hrs/ day, his/her predicted
# score will be 92.90985.
|
ff425b67468ea32f2aaf9b7a9479b9dc48a57db7
|
73fd86dfc13baed81bfd7405b855ace05365c607
|
/man/build.lut.Rd
|
9ed847272cd8673bcc74a5c6e18ed588a8ac9114
|
[] |
no_license
|
cran/ShapePattern
|
976fba2c343705d178e42cc73206f92104c1e83a
|
75fdec13af558c80d38d865ac3154ca3de6fb253
|
refs/heads/master
| 2023-09-04T02:55:22.055477
| 2023-08-22T07:20:09
| 2023-08-22T09:30:35
| 57,234,343
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,431
|
rd
|
build.lut.Rd
|
\name{build.lut}
\alias{build.lut}
\title{
Bias correction lookup table (LUT) builder based on simulations.
}
\description{
This function needs to be run only once, at the beginning of analysis, to produce a lookup table of spatial autocorrelation (rho) bias correction factors. Since this process takes a long time, due to the need for simulating numerous replicates (generally 50 or more) landscapes at each pair-wise combination of composition and configuration parameterization, we provide a lookup table with the package that is used by the \code{singlemap} function internally. Currently, the implementation is limited to 64x64 pixel maps; however, future developments will expand on this limitation and thus the build.lut function is provided for those that may wish to fast-track the construction of scaled lookup tables for analyses of landscapes of different size. Bias corrections are achieved by comparing Whittle estimates of spatial autocorrelation on binary versus continuous surfaces with identical parameterizations.
}
\usage{
build.lut(LEVEL = 6, REPSIM = 5, RAJZ = FALSE, CIM = "", ENV="data")
}
\arguments{
\item{LEVEL}{
A power (n) of base 2 that controls the size of the simulated maps used to process the bias correction surface: 2^n x 2^n cells.
}
\item{REPSIM}{
The number of simulation replicates at each combination of composition and configuration.
}
\item{RAJZ}{
A Boolean flag that when TRUE will draw the result.
}
\item{CIM}{
An option that permits the naming of image objects. This is provided for potential future expansion.
}
\item{ENV}{
The name of the environment where temporary objects are stored. The default is data.
}
}
\details{
This does not need to be run if you have downloaded and installed the \code{PatternClass} package and are using it to analyze 64x64 pixel image subsets as proposed. The \code{singlemap} function will call on the provided internal lookup table and thus save on substantial processing time. The current implementation provides a lookup table with bias correction values based on the mean of 50 replicate simulations for each pair-wise combination of composition and configuration parameterization. hus, the 9 levels of composition, 11 levels of spatial autocorrelation, and 50 replicates, means that 9*11*50 = 4950 landscapes are simulated and used to produce the bias correction lookup table that is provided with the package.
}
\value{
The result is a lookuptable (LUT) that is a 9x11 matrix of values used by singlemap().
}
\references{
Remmel, T.K. and M.-J. Fortin. 2013. Categorical class map patterns: characterization and comparison. Landscape Ecology. DOI: 10.1007/s/10980-013-9905-x.
Remmel, T.K. and M.-J. Fortin. What constitutes a significant difference in landscape pattern? (using R). 2016. In Gergel, S.E. and M.G. Turner. Learning landscape ecology: concepts and techniques for a sustainable world (2nd ed.). New York: Springer.
}
\author{
Tarmo K. Remmel
}
\note{
The current implementation is for 64x64 image subsets. Future developments will expand this extent limitation. However, our work has shown that this extent provides a reasonable compromise between statistical power and computing efficiency.
}
\seealso{
See Also \code{\link{singlemap}}
}
\examples{
\donttest{build.lut(LEVEL = 6, REPSIM = 5, RAJZ = FALSE, CIM = "", ENV="data")}
}
\keyword{ distribution }
|
5cc2b37b597c1526c6460697e257df5a9c4f1067
|
de173be8ea860b6c536bdfbe8276afcef597889c
|
/trade2.r
|
c2a39ee9a3cbf110da4500952855312b96f0020b
|
[] |
no_license
|
liuhui2015/my-r-code
|
949be44b0901b545bdcda8b00f063da3f4339f80
|
0c0f1eee9973ca5249c0f1d2e284c4fb2e7e9ded
|
refs/heads/master
| 2020-12-31T03:26:19.956108
| 2013-06-07T14:22:38
| 2013-06-07T14:22:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,594
|
r
|
trade2.r
|
#以T检验为基础的交易系统
#首先载入两个所需程序包
library(quantmod)
library(PerformanceAnalytics)
#从YAHOO得到上海综指数据
getSymbols('^SSEC',src='yahoo')
#取出收盘价
close=as.numeric(Cl(SSEC))
#计算差分
d=c(NA,diff(close))
#计算总时长
n=length(close)
#预先赋空值 mean.c为收盘价均值,mean.d为波动均值,sd为波动标准差,
#test为判断趋势的函数,即T 统计量,sig为仓位,up/down为上下界
mean.c=mean.d=sd=test=sig=up=down=max.c=min.c=numeric(n)
#以约20单位为一周期,循环计算各变量
for ( i in 20:(n-2)){
mean.d[i]=mean(d[(i-19):i],na.rm=T)#计算本周期波动均值
mean.c[i]=mean(close[(i-19):i])#计算本周期收盘均值
sd[i]=sd(d[(i-19):i],na.rm=T)#计算本周期标准差
test[i]=abs(mean.d[i]/(sd[i]/sqrt(20)))#计算判断指标,T 统计量
up[i]=mean.c[i]+2*sd[i]#计算突破上界
down[i]=mean.c[i]-2*sd[i]#计算突破下界
max.c[i]=min.c[i]=close[i]
#若为震荡市,出现突破则入场
if (test[i]<0.5&close[i+1]>up[i]) sig[i+2]=1
if (test[i]<0.5&close[i+1]<down[i]) sig[i+2]=-1
#若为趋势市,则入场
if (test[i]>0.8&mean.d[i]>0) sig[i+1]=1
if (test[i]>0.8&mean.d[i]<0) sig[i+1]=-1
#若收盘打到止损位,出场
if (sig[i]==1){
max.c[i]=max(max.c[i-1],close[i])
if (close[i+1]<max.c[i]-1.5*sd[i]) sig[i+2]=0
}
if (sig[i]==-1){
min.c[i]=min(min.c[i-1],close[i])
if (close[i+1]>min.c[i]+1.5*sd[i]) sig[i+2]=0
}
}
#计算收益率
ret=na.omit(ROC(type='discrete',close)*sig)
eq=cumprod(1+ret)
plot(eq,type='l')
|
b01cc31ff54ade6e46cc38690373c8d2d6d2b0e2
|
8e4a213138c40d7670315bb00c8df224ff68b616
|
/man/math_gender_iat.Rd
|
2e3bd1d77500c385b993a02cac03d3c7b9f6cd5e
|
[] |
no_license
|
gitrman/itns
|
432f0ad6448667e38a5f389c8261eb5d1f1bd0be
|
6d5f9b3818d7b73dc1d643c220f3f9e89f4646a5
|
refs/heads/master
| 2020-12-23T22:25:11.501834
| 2017-03-09T06:57:01
| 2017-03-09T06:57:01
| 58,635,851
| 4
| 7
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,326
|
rd
|
math_gender_iat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/math_gender_iat.R
\docType{data}
\name{math_gender_iat}
\alias{math_gender_iat}
\title{Two Independent Groups - Math Gender IAT}
\format{A data frame with 243 rows and 4 variables:
\describe{
\item{sessionid}{Respondent identifier}
\item{location}{The lab that collected the data (ithaca or sdsu)}
\item{gender}{\emph{male} or \emph{female}}
\item{iat_score}{The score on the IAT test, where higher scores represent stronger negative bias towards math and negative scores represent stronger negative bias towards art.}
}}
\source{
This is data is available online at \url{https://osf.io/wx7ck} and is from:
Klein, R. A., Ratliff, K. A., Vianello, M., Adams ., R. B., Bahnik, S., Bernstein, M. J., ... & Nosek, B. A. (2014).
Investigating Variation in Replicability. \emph{Social Psychology, 45}, 142-152. \url{http://doi.org/10.1027/1864-9335/a000178}
Data from participants that had to be excluded due to high error rates or slow responses has already been deleted.
The original study to investigate this effect is:
Nosek, B. a, Banaji, M. R., & Greenwald, A. G. (2002). Math = male, me = female, therefore math not = me.
\emph{Journal of Personality and Social Psychology, 83}, 44-59. \url{http://doi.org/10.1037/0022-3514.83.1.44}
}
\usage{
math_gender_iat
}
\description{
An example of data from a study with a two independent groups design used in Chapter 7 of the book
\emph{Introduction to the New Statistics}.
}
\details{
To what extent is gender related to implicit attitudes about bias?
To find out, Nosek and colleagues asked male and female students to complete an Implicit Association Test (IAT) that
measured how easily negative ideas could be connected to art or to mathematics. The data shown here records the participants'
gender and their IAT score. Positive scores indicate an easier time linking negative ideas with mathematics,
negative scores indicate an easier time linking positive ideas with mathematics.
The data is from 2 different labs (Ithaca and SDSU), both part of a large-scale collaboration in which the same
studies were run in multiple labs all over the world.
}
\references{
Cumming, G., & Calin-Jageman, R. (2017).
\emph{Introduction to the New Statistics}. New York; Routledge.
}
\keyword{datasets}
|
236459fd00e8b2021a97213a7016d41623209abc
|
e7ff273130f86ce97c548ad9d6f037cd022be695
|
/plog2.r
|
3811b24a7e53f951bdc3125f6eb370e27baf6c10
|
[] |
no_license
|
lexili/ExData-Assign-1
|
49db24a74164812702df81536d822cd3424cd503
|
f9d02bf1210a886c70d3e35c539b1b3c69faa596
|
refs/heads/master
| 2021-01-18T20:20:14.717696
| 2014-10-18T22:11:37
| 2014-10-18T22:11:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 442
|
r
|
plog2.r
|
# read data
data<-read.table("~/household_power_consumption.txt", header=TRUE,sep=";", colClasses= "character")
# subset the data
testdata <- subset(data,Date=="1/2/2007" | Date=="2/2/2007")
png("plot2.png", width=480, height=480)
plot(as.numeric(testdata$Global_active_power),xaxt = "n",type='l',xlab="",ylab="Global Active Power(kilowatts)")
axis(1,at=1, labels="Thu")
axis(1,at=1440, labels="Fri")
axis(1,at=2880, labels="Sat")
dev.off()
|
423d0bb41d7aa91b77cb31e950d5ee12297d9014
|
a77ad6a7d32478a3921a67ed339ee1d120853f37
|
/pystan_examples/stan_toy.R
|
a647654307ddd00c5d06095b08332fa24179c79c
|
[] |
no_license
|
yiulau/all_code
|
5cf7804c5a77d197f6a979d6fafa036f95d16659
|
4630ea6efc630616e6f767807ebbd64c82852726
|
refs/heads/master
| 2020-03-18T05:19:50.586402
| 2018-08-03T09:14:59
| 2018-08-03T09:14:59
| 134,337,109
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,188
|
r
|
stan_toy.R
|
library(rstan)
num_obs = 100
num_non_zeros = 20
y = rep(0,num_obs)
true_beta = rnorm(num_non_zeros) * 5
y[1:num_non_zeros] = true_beta
y = y + rnorm(num_obs)
print(true_beta)
address = "/home/yiulau/PycharmProjects/all_code/stan_code/horseshoe_toy.stan"
model_hs = stan_model(file=address)
address = "/home/yiulau/PycharmProjects/all_code/stan_code/rhorseshoe_toy.stan"
model_rhs = stan_model(file=address)
address = "/home/yiulau/PycharmProjects/all_code/stan_code/linear_regression_horseshoe.stan"
model_lr_hs = stan_model(file=address)
address = "/home/yiulau/PycharmProjects/all_code/stan_code/linear_regression_rhorseshoe.stan"
model_lr_rhs = stan_model(file=address)
address = "/home/yiulau/PycharmProjects/all_code/stan_code/linear_regression_student_t.stan"
model_lr_student = stan_model(file=address)
address = "/home/yiulau/PycharmProjects/all_code/stan_code/logistic_regression_horseshoe.stan"
model_logit_horseshoe = stan_model(file=address)
address = "/home/yiulau/PycharmProjects/all_code/stan_code/logistic_regression_rhorseshoe.stan"
model_logit_rhorseshoe = stan_model(file=address)
address = "/home/yiulau/PycharmProjects/all_code/stan_code/logistic_regression_student_t.stan"
model_logit_student = stan_model(file=address)
options(mc.cores = parallel::detectCores())
data = list(y=y,N=num_obs)
o1 = sampling(model_hs,data=data,control=list("metric"="diag_e",adapt_delta = 0.9))
print(o1)
beta_summary <- summary(o1, pars = c("beta"))$summary
lp_summary = summary(o1,pars=c("lp__"))$summary
tau_summary = summary(o1,pars=c("tau"))$summary
print(beta_summary[1:non_zero_dim,])
print(true_beta[1:non_zero_dim])
print(tau_summary)
print(lp_summary)
sampler_params <- get_sampler_params(o1, inc_warmup = TRUE)
sampler_params_chain1 <- sampler_params[[1]]
mean_accept_stat_by_chain <- sapply(sampler_params, function(x) mean(x[, "accept_stat__"]))
print(mean_accept_stat_by_chain)
print(sampler_params_chain1[1:900,"stepsize__"])
print(sampler_params_chain1[1:900,"n_leapfrog__"])
print(sampler_params_chain1[1000:1900,"n_leapfrog__"])
num_divergent = sapply(sampler_params, function(x) sum(x[, "divergent__"]))
print(num_divergent)
####################################################################################################################################
o2 = sampling(model_hs,data=data,control=list("metric"="dense_e",adapt_delta = 0.99,max_treedepth=15))
beta_summary <- summary(o2, pars = c("beta"))$summary
lp_summary = summary(o2,pars=c("lp__"))$summary
tau_summary = summary(o2,pars=c("tau"))$summary
print(beta_summary)
print(tau_summary)
print(lp_summary)
sampler_params <- get_sampler_params(o2, inc_warmup = TRUE)
sampler_params_chain1 <- sampler_params[[1]]
mean_accept_stat_by_chain <- sapply(sampler_params, function(x) mean(x[, "accept_stat__"]))
print(mean_accept_stat_by_chain)
print(sampler_params_chain1[1:900,"stepsize__"])
print(sampler_params_chain1[1:900,"n_leapfrog__"])
print(sampler_params_chain1[1000:1900,"n_leapfrog__"])
num_divergent = sapply(sampler_params, function(x) sum(x[, "divergent__"]))
print(num_divergent)
#######################################################################################################################################
o3 = sampling(model_rhs,data=data,control=list("metric"="diag_e",adapt_delta = 0.9,max_treedepth=10))
beta_summary <- summary(o3, pars = c("beta"))$summary
lp_summary = summary(o3,pars=c("lp__"))$summary
tau_summary = summary(o3,pars=c("tau"))$summary
print(beta_summary)
print(tau_summary)
print(lp_summary)
sampler_params <- get_sampler_params(o3, inc_warmup = FALSE)
sampler_params_chain1 <- sampler_params[[1]]
mean_accept_stat_by_chain <- sapply(sampler_params, function(x) mean(x[, "accept_stat__"]))
print(mean_accept_stat_by_chain)
print(sampler_params_chain1[1:900,"stepsize__"])
print(sampler_params_chain1[1:900,"n_leapfrog__"])
print(sampler_params_chain1[1000:1900,"n_leapfrog__"])
num_divergent = sapply(sampler_params, function(x) sum(x[, "divergent__"]))
print(num_divergent)
###################################################################################################################################
o4 = sampling(model_rhs,data=data,control=list("metric"="dense_e",adapt_delta = 0.9,max_treedepth=10))
beta_summary <- summary(o4, pars = c("beta"))$summary
lp_summary = summary(o4,pars=c("lp__"))$summary
tau_summary = summary(o4,pars=c("tau"))$summary
print(beta_summary)
print(tau_summary)
print(lp_summary)
sampler_params <- get_sampler_params(o4, inc_warmup = FALSE)
sampler_params_chain1 <- sampler_params[[1]]
mean_accept_stat_by_chain <- sapply(sampler_params, function(x) mean(x[, "accept_stat__"]))
print(mean_accept_stat_by_chain)
print(sampler_params_chain1[1:900,"stepsize__"])
print(sampler_params_chain1[1:900,"n_leapfrog__"])
print(sampler_params_chain1[1000:1900,"n_leapfrog__"])
num_divergent = sapply(sampler_params, function(x) sum(x[, "divergent__"]))
print(num_divergent)
################################################################################################################################
num_obs = 400
non_zero_dim = 20
full_p = 100
X = matrix(rnorm(num_obs*full_p),nrow=num_obs,ncol=full_p)
library(Matrix)
rankMatrix(X)
true_beta = rnorm(full_p)
true_beta[1:non_zero_dim] = rnorm(non_zero_dim)*5
print(true_beta[1:non_zero_dim])
y = X%*%true_beta + rnorm(num_obs)
y = drop(y)
data = list(y=y,X=X,N=num_obs,K=full_p)
o5 = sampling(model_lr_hs,data=data,control=list("metric"="diag_e",adapt_delta = 0.9,max_treedepth=10))
beta_summary <- summary(o5, pars = c("beta"))$summary
lp_summary = summary(o5,pars=c("lp__"))$summary
tau_summary = summary(o5,pars=c("tau"))$summary
print(beta_summary)
print(tau_summary)
print(lp_summary)
sampler_params <- get_sampler_params(o5, inc_warmup = FALSE)
sampler_params_chain1 <- sampler_params[[1]]
mean_accept_stat_by_chain <- sapply(sampler_params, function(x) mean(x[, "accept_stat__"]))
print(mean_accept_stat_by_chain)
print(sampler_params_chain1[1:900,"stepsize__"])
print(sampler_params_chain1[1:900,"n_leapfrog__"])
print(sampler_params_chain1[1000:1900,"n_leapfrog__"])
num_divergent = sapply(sampler_params, function(x) sum(x[, "divergent__"]))
print(num_divergent)
##################################################################################################################################
o6 = sampling(model_lr_rhs,data=data,control=list("metric"="diag_e",adapt_delta = 0.9,max_treedepth=15))
beta_summary <- summary(o6, pars = c("beta"))$summary
lp_summary = summary(o6,pars=c("lp__"))$summary
tau_summary = summary(o6,pars=c("tau"))$summary
c_summary <- summary(o6, pars = c("c"))$summary
print(beta_summary[1:non_zero_dim,])
print(true_beta[1:non_zero_dim])
print(tau_summary)
print(c_summary)
print(lp_summary)
sampler_params <- get_sampler_params(o6, inc_warmup = FALSE)
sampler_params_chain1 <- sampler_params[[1]]
mean_accept_stat_by_chain <- sapply(sampler_params, function(x) mean(x[, "accept_stat__"]))
print(mean_accept_stat_by_chain)
print(sampler_params_chain1[1:900,"stepsize__"])
print(sampler_params_chain1[1:900,"n_leapfrog__"])
print(sampler_params_chain1[1000:1900,"n_leapfrog__"])
num_divergent = sapply(sampler_params, function(x) sum(x[, "divergent__"]))
print(num_divergent)
#########################################################################################################################################
library(glmnet)
lambda <- 10^seq(10, -2, length = 100)
lasso.mod = glmnet(X, y, alpha = 1, lambda = lambda)
bestlam <- lasso.mod$lambda.min
lasso.coef <- predict(lasso.mod, type = 'coefficients', s = bestlam)
out = glmnet(X, y, alpha = 1, lambda = 1)
print(coef(out))
##########################################################################################################################################
o7 = sampling(model_lr_student,data=data,control=list("metric"="diag_e",adapt_delta = 0.9,max_treedepth=15))
beta_summary <- summary(o7, pars = c("beta"))$summary
lp_summary = summary(o7,pars=c("lp__"))$summary
tau_summary = summary(o7,pars=c("tau"))$summary
c_summary <- summary(o7, pars = c("c"))$summary
print(beta_summary[1:non_zero_dim,])
print(true_beta[1:non_zero_dim])
print(tau_summary)
print(c_summary)
print(lp_summary)
sampler_params <- get_sampler_params(o7, inc_warmup = FALSE)
sampler_params_chain1 <- sampler_params[[1]]
mean_accept_stat_by_chain <- sapply(sampler_params, function(x) mean(x[, "accept_stat__"]))
print(mean_accept_stat_by_chain)
print(sampler_params_chain1[1:900,"stepsize__"])
print(sampler_params_chain1[1:900,"n_leapfrog__"])
print(sampler_params_chain1[1000:1900,"n_leapfrog__"])
num_divergent = sapply(sampler_params, function(x) sum(x[, "divergent__"]))
print(num_divergent)
#############################################################################################################################
# logistic
n = 30
dim = 100
X = matrix(rnorm(n*dim),nrow=n,ncol=dim)
y = rep(0,n)
for(i in 1:n){
y[i] = rbinom(n=1,size=1,prob=0.5)
if(y[i]>0){
X[i,1:2] = rnorm(2)*0.5 + 1
}
else{
X[i,1:2] = rnorm(2)*0.5 -1
}
}
data = list(X=X,y=y,N=n,K=dim)
o8 = sampling(model_logit_horseshoe,data=data,control=list("metric"="diag_e",adapt_delta = 0.9,max_treedepth=15))
beta_summary <- summary(o8, pars = c("beta"))$summary
lp_summary = summary(o8,pars=c("lp__"))$summary
tau_summary = summary(o8,pars=c("tau"))$summary
c_summary <- summary(o8, pars = c("c"))$summary
print(beta_summary[1:non_zero_dim,])
print(true_beta[1:non_zero_dim])
print(tau_summary)
print(c_summary)
print(lp_summary)
sampler_params <- get_sampler_params(o8, inc_warmup = FALSE)
sampler_params_chain1 <- sampler_params[[1]]
mean_accept_stat_by_chain <- sapply(sampler_params, function(x) mean(x[, "accept_stat__"]))
print(mean_accept_stat_by_chain)
print(sampler_params_chain1[1:900,"stepsize__"])
print(sampler_params_chain1[1:900,"n_leapfrog__"])
print(sampler_params_chain1[1000:1900,"n_leapfrog__"])
num_divergent = sapply(sampler_params, function(x) sum(x[, "divergent__"]))
print(num_divergent)
#######################################################################################################
o9 = sampling(model_logit_rhorseshoe,data=data,control=list("metric"="diag_e",adapt_delta = 0.99,max_treedepth=15))
beta_summary <- summary(o9, pars = c("beta"))$summary
lp_summary = summary(o9,pars=c("lp__"))$summary
tau_summary = summary(o9,pars=c("tau"))$summary
c_summary <- summary(o9, pars = c("c"))$summary
print(beta_summary[1:non_zero_dim,])
print(true_beta[1:non_zero_dim])
print(tau_summary)
print(c_summary)
print(lp_summary)
sampler_params <- get_sampler_params(o9, inc_warmup = FALSE)
sampler_params_chain1 <- sampler_params[[1]]
mean_accept_stat_by_chain <- sapply(sampler_params, function(x) mean(x[, "accept_stat__"]))
print(mean_accept_stat_by_chain)
print(sampler_params_chain1[1:900,"stepsize__"])
print(sampler_params_chain1[1:900,"n_leapfrog__"])
print(sampler_params_chain1[1000:1900,"n_leapfrog__"])
num_divergent = sapply(sampler_params, function(x) sum(x[, "divergent__"]))
print(num_divergent)
####################################################################################################
o10 = sampling(model_logit_student,data=data,control=list("metric"="diag_e",adapt_delta = 0.9,max_treedepth=15))
beta_summary <- summary(o10, pars = c("beta"))$summary
lp_summary = summary(o10,pars=c("lp__"))$summary
tau_summary = summary(o10,pars=c("tau"))$summary
c_summary <- summary(o10, pars = c("c"))$summary
print(beta_summary[1:non_zero_dim,])
print(true_beta[1:non_zero_dim])
print(tau_summary)
print(c_summary)
print(lp_summary)
sampler_params <- get_sampler_params(o10, inc_warmup = FALSE)
sampler_params_chain1 <- sampler_params[[1]]
mean_accept_stat_by_chain <- sapply(sampler_params, function(x) mean(x[, "accept_stat__"]))
print(mean_accept_stat_by_chain)
print(sampler_params_chain1[1:900,"stepsize__"])
print(sampler_params_chain1[1:900,"n_leapfrog__"])
print(sampler_params_chain1[1000:1900,"n_leapfrog__"])
num_divergent = sapply(sampler_params, function(x) sum(x[, "divergent__"]))
print(num_divergent)
############################################################################################################
|
39fce5a0ff50e0397e177d7a5a62bd8f9367d03d
|
63d50cbf64469abd6d4729ba0266496ced3433cf
|
/vedha/string_function.r
|
3efc088e0797cd466971c99ef1c17b5b957d16b3
|
[] |
no_license
|
tactlabs/r-samples
|
a391a9a07022ecd66f29e04d15b3d7abeca7ea7c
|
a5d7985fe815a87b31e4eeee739bc2b7c600c9dc
|
refs/heads/master
| 2023-07-08T09:00:59.805757
| 2021-07-25T15:16:20
| 2021-07-25T15:16:20
| 381,659,818
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 238
|
r
|
string_function.r
|
# Created on
# Course work:
# @author: Kevin
# Source:
# https://techvidvan.com/tutorials/r-string-manipulation/
# nchar: Size of string
string <- "TactLabs"
nchar(string)
strvec <- c(string,"HI", "hey", "haHa")
nchar(strvec)
|
50069c7c2bf94d1823fa3300c0fbdc1ed609bcea
|
b2548f033cfe74ca0cb0c755c034cba1e1f6b509
|
/Fanduel Baseball Full Roster.R
|
b6c648a1948f2ebf215cefeace447658489cf35e
|
[] |
no_license
|
mbecker0813/fanduel_baseball
|
f2dd34e82bffc5e500adba86e19c3abafa518647
|
6aa3eb0c6b20d6be9b1cf77bd716587aca2bd10e
|
refs/heads/main
| 2022-12-30T08:31:57.788530
| 2020-10-20T18:42:23
| 2020-10-20T18:42:23
| 305,800,484
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,696
|
r
|
Fanduel Baseball Full Roster.R
|
print(paste('Start Time:',Sys.time()))
library(tidyverse)
library(baseballr)
library(tictoc)
tic('Run time')
# Dates for stats
start_date_p <- as.character(Sys.Date() - 30)
end_date <- as.character(Sys.Date() - 1)
start_date_b <- as.character(Sys.Date() - 15)
# Import FanDuel player list
players <- read.csv('baseball.csv')
players <- players %>% filter(Injury.Indicator == '') %>%
select(Id, Position, Name = Nickname, FPPG, Played, Salary, Game,
Team, Opponent, Probable.Pitcher, Batting.Order)
players$Value <- players$FPPG / (players$Salary / 1000)
# Rankings
# Join pitching ratings of opponents to offense players
p_stats <- daily_pitcher_bref(t1 = start_date_p, t2 = end_date)
p_stats$Name <- gsub('é','e',p_stats$Name)
d_rank <- players %>% filter(Position == 'P' & Probable.Pitcher == 'Yes') %>% arrange(FPPG)
d_rank <- left_join(d_rank, p_stats, by = 'Name')
d_rank$Rank <- ((d_rank$HR / d_rank$IP)*10) + d_rank$WHIP + d_rank$ERA +
(d_rank$H / d_rank$IP) + (d_rank$BB / d_rank$IP) - (d_rank$SO9 * .1)
d_rank <- d_rank[,c('Team.x', 'Rank')]
colnames(d_rank) <- c('Opponent','Matchup')
offense <- players %>% filter(Position != 'P')
offense <- left_join(offense, d_rank, by = 'Opponent')
# Join offense ratings of opponents to defense teams
b_stats <- daily_batter_bref(t1 = start_date_b, t2 = end_date)
b_stats$Name <- gsub('é','e',b_stats$Name)
o_rank <- players %>% filter(Position != 'P')
o_rank <- left_join(o_rank, b_stats, by = 'Name')
o_rank$FPPG <- ((o_rank$X1B * 3) + (o_rank$X2B * 6) + (o_rank$X3B * 9) + (o_rank$HR * 12) +
(o_rank$BB * 3) + (o_rank$HBP * 3) + (o_rank$R * 3.2) +
(o_rank$RBI * 3.5) + (o_rank$SB * 6)) / o_rank$G
o_rank <- o_rank[complete.cases(o_rank$bbref_id),]
offense <- merge(offense, o_rank, all.x = T, by = c('Id','Position','Name','FPPG'))
offense <- offense[,1:13]
colnames(offense) <- c('Id','Position','Name','FPPG','Played','Salary','Game','Team',
'Opponent','Probable.Pitcher','Batting.Order','Value','Matchup')
o_team_rank <- players %>% filter(Position != 'P' & FPPG >= mean(FPPG)) %>% group_by(Team) %>%
summarize(TeamFPPG = mean(FPPG)) %>% arrange(TeamFPPG)
o_team_rank$Rank <- nrow(o_team_rank):1
o_team_rank <- o_team_rank[,-2]
colnames(o_team_rank) <- c('Opponent','Matchup')
defense <- players %>% filter(Position == 'P')
defense <- left_join(defense, o_team_rank, by = 'Opponent')
players <- rbind(offense, defense)
pitchers <- players %>% filter(Probable.Pitcher == 'Yes' & Salary >= mean(Salary))
batters <- players %>% filter(Position != 'P'
#& Batting.Order != 0
)
batters$Position[batters$Position %in% c('C','1B')] <- 'C/1B'
cb1 <- batters %>% filter(batters$Position == 'C/1B')
b2 <- batters %>% filter(batters$Position == '2B')
b3 <- batters %>% filter(batters$Position == '3B')
ss <- batters %>% filter(batters$Position == 'SS')
of <- batters %>% filter(batters$Position == 'OF')
cb1 <- cb1 %>% filter(FPPG >= mean(FPPG) & Played >= mean(Played) & Salary >= mean(Salary))
b2 <- b2 %>% filter(FPPG >= mean(FPPG) & Played >= mean(Played) & Salary >= mean(Salary))
b3 <- b3 %>% filter(FPPG >= mean(FPPG) & Played >= mean(Played) & Salary >= mean(Salary))
ss <- ss %>% filter(FPPG >= mean(FPPG) & Played >= mean(Played) & Salary >= mean(Salary))
of <- of %>% filter(FPPG >= mean(FPPG) & Played >= mean(Played) & Salary >= mean(Salary))
lineup_comb <- data.frame(matrix(ncol = 14, nrow = 0))
colnames(lineup_comb) <- c('P','CB1','B2','B3','SS',
'OF1','OF2','OF3','UTIL','Salary','FPPG',
'Matchup','Value','Dup')
for (i in 1:50000) {
p1 <- pitchers[sample(nrow(pitchers),1), ]
p2 <- cb1[sample(nrow(cb1),1), ]
p3 <- b2[sample(nrow(b2),1), ]
p4 <- b3[sample(nrow(b3),1), ]
p5 <- ss[sample(nrow(ss),1), ]
p68 <- of[sample(nrow(of),3), ]
p9 <- batters[sample(nrow(batters),1), ]
lineup <- rbind(p1[1:13],p2[1:13],p3[1:13],p4[1:13],p5[1:13],p68[1:13],p9[1:13])
bat_stats <- rbind(p2,p3,p4,p5,p68,p9)
lineup_check <- lineup %>% group_by(Team) %>% mutate(Count = n())
if(max(lineup_check$Count) < 5){
new_row <- data.frame(P = paste(p1$Id,p1$Name,sep = ':'),
CB1 = paste(p2$Id,p2$Name,sep = ':'),
B2 = paste(p3$Id,p3$Name,sep = ':'),
B3 = paste(p4$Id,p4$Name,sep = ':'),
SS = paste(p5$Id,p5$Name,sep = ':'),
OF1 = paste(p68$Id[1],p68$Name[1],sep = ':'),
OF2 = paste(p68$Id[2],p68$Name[2],sep = ':'),
OF3 = paste(p68$Id[3],p68$Name[3],sep = ':'),
UTIL = paste(p9$Id,p9$Name,sep = ':'),
Salary = sum(lineup$Salary),
FPPG = sum(lineup$FPPG),
Matchup = sum(lineup$Matchup) * 0.75,
Value = sum(lineup$Value)/2)
new_row$Score <- as.numeric(new_row$FPPG) + as.numeric(new_row$Matchup) +
as.numeric(new_row$Value)
new_row[t(apply(new_row,1,duplicated))] <- NA
if(any(is.na(new_row))){
next } else {
if(new_row$Salary <= 35000){
lineup_comb <- rbind(lineup_comb,new_row)
}
}
}
if(i %in% seq(from = 5000, to = 45000, by = 5000)){
pct <- i / 500
print(paste(pct,'% Complete', sep = ''))
}
}
lineup_comb <- lineup_comb[order(-lineup_comb$Score),1:ncol(lineup_comb)]
lineup_comb <- lineup_comb %>% dplyr::distinct()
top50 <- lineup_comb[1:50,]
write.csv(top50, 'optimal_baseball_fanduel.csv')
rm(list = ls())
print(paste('End Time:',Sys.time()))
toc()
|
e80e4a2a1493e7cd8701190782fda2a4cfda8eea
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pinnacle.API/examples/GetSpecialFixtures.Rd.R
|
753b13911767a7598e63fc6703bd30a20e3a707a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 265
|
r
|
GetSpecialFixtures.Rd.R
|
library(pinnacle.API)
### Name: GetSpecialFixtures
### Title: Get Special Fixtures
### Aliases: GetSpecialFixtures
### ** Examples
## No test:
SetCredentials("TESTAPI", "APITEST")
AcceptTermsAndConditions(accepted=TRUE)
GetSpecialFixtures()
## End(No test)
|
e3ebff3c10f29f19e7c9857b872b2b92d66d82ba
|
10cec89dd13ba113fbec9d74f7d27bf0f969c9a4
|
/run_analysis.R
|
045c9d550866c64d87c5414c19e996839e75c1d7
|
[] |
no_license
|
luvkhandelwal/Getting-and-Cleaning-Data-Course-Project
|
e8aac68521b7520f3934718212be70e9d435e123
|
f37b443021aedfb241ca0bffbbac68d3eee51477
|
refs/heads/master
| 2022-11-12T18:21:04.692255
| 2020-07-09T20:41:15
| 2020-07-09T20:41:15
| 278,038,268
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,548
|
r
|
run_analysis.R
|
##Initialization
rm(list = ls())
getwd()
library(dplyr)
library(tidyr)
library(data.table)
setwd("C:\\Users\\luv\\Desktop\\Online Courses\\DATA SCIENCE\\Data Cleaning\\getdata_projectfiles_UCI HAR Dataset\\UCI HAR Dataset")
##Reading Test data files :X , y and subject
test_data<-read.table("C:\\Users\\luv\\Desktop\\Online Courses\\DATA SCIENCE\\Data Cleaning\\getdata_projectfiles_UCI HAR Dataset\\UCI HAR Dataset\\test\\X_test.txt")
ytest_data<-read.table("C:\\Users\\luv\\Desktop\\Online Courses\\DATA SCIENCE\\Data Cleaning\\getdata_projectfiles_UCI HAR Dataset\\UCI HAR Dataset\\test\\y_test.txt",col.names = "label")
stest_data<-read.table("C:\\Users\\luv\\Desktop\\Online Courses\\DATA SCIENCE\\Data Cleaning\\getdata_projectfiles_UCI HAR Dataset\\UCI HAR Dataset\\test\\subject_test.txt",col.names = "subject")
##Reading Train data files :X , y and subject
train_data<-read.table("C:\\Users\\luv\\Desktop\\Online Courses\\DATA SCIENCE\\Data Cleaning\\getdata_projectfiles_UCI HAR Dataset\\UCI HAR Dataset\\train\\X_train.txt")
ytrain_data<-read.table("C:\\Users\\luv\\Desktop\\Online Courses\\DATA SCIENCE\\Data Cleaning\\getdata_projectfiles_UCI HAR Dataset\\UCI HAR Dataset\\train\\y_train.txt",col.names = "label")
strain_data<-read.table("C:\\Users\\luv\\Desktop\\Online Courses\\DATA SCIENCE\\Data Cleaning\\getdata_projectfiles_UCI HAR Dataset\\UCI HAR Dataset\\train\\subject_train.txt",col.names = "subject")
##Column bindindg these files
ctest_data<-cbind(stest_data,test_data,ytest_data)
ctrain_data<-cbind(strain_data,train_data,ytrain_data)
##Check if same number of columns before appending
length(names(ctrain_data))==length(names(ctest_data))
##Appending two tables
completedata<-rbind(ctrain_data,ctest_data)
##Mean for each activity
data_mean<-apply(completedata[,2:562],2,mean)
data_std<-apply(completedata[,2:562], 2,sd)
#Reading the names of each activity
features<-read.table("C:\\Users\\luv\\Desktop\\Online Courses\\DATA SCIENCE\\Data Cleaning\\getdata_projectfiles_UCI HAR Dataset\\UCI HAR Dataset\\features.txt")
#Tidy column names and assigning them to the data set
feature<-as.vector(features$V2)
feature<-tolower(gsub("[()]","",feature))
setnames(completedata,old = c(paste('V',1:561,sep = "")),new = feature)
#Mean by subject and label
new_data<- aggregate(completedata[, 3:562],by=list(subject = completedata$subject,
label = completedata$label),mean)
head(new_data)
write.table(new_data, "tidy_luv.txt",row.names=F, col.names=F, quote=2)
|
a7adc0664679195c989552f8ed572c75fc83f955
|
c0f10d1576b08e98d70f773bd870332e3f68af05
|
/man/addbuff.Rd
|
deba6f5c2e40bd4ca4cdfde255cf67bcfd756859
|
[
"MIT"
] |
permissive
|
ropensci/terrainr
|
fcc0413d88ade11e8004b5f078df022b19f99a02
|
492011c13e15884093887c768d90b666367c98a7
|
refs/heads/main
| 2023-08-29T01:40:32.008322
| 2023-02-16T18:53:52
| 2023-02-16T18:53:52
| 293,846,648
| 46
| 4
|
NOASSERTION
| 2023-02-16T15:24:01
| 2020-09-08T15:10:39
|
R
|
UTF-8
|
R
| false
| true
| 3,246
|
rd
|
addbuff.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_bbox_buffer.R
\name{addbuff}
\alias{addbuff}
\alias{add_bbox_buffer}
\alias{add_bbox_buffer.sf}
\alias{add_bbox_buffer.Raster}
\alias{add_bbox_buffer.SpatRaster}
\alias{set_bbox_side_length}
\alias{set_bbox_side_length.sf}
\alias{set_bbox_side_length.Raster}
\alias{set_bbox_side_length.SpatRaster}
\title{Add a uniform buffer around a bounding box for geographic coordinates}
\usage{
add_bbox_buffer(data, distance, distance_unit = "meters", error_crs = NULL)
\method{add_bbox_buffer}{sf}(data, distance, distance_unit = "meters", error_crs = NULL)
\method{add_bbox_buffer}{Raster}(data, distance, distance_unit = "meters", error_crs = NULL)
\method{add_bbox_buffer}{SpatRaster}(data, distance, distance_unit = "meters", error_crs = NULL)
set_bbox_side_length(
data,
distance,
distance_unit = "meters",
error_crs = NULL
)
\method{set_bbox_side_length}{sf}(
data,
distance,
distance_unit = "meters",
error_crs = NULL
)
\method{set_bbox_side_length}{Raster}(
data,
distance,
distance_unit = "meters",
error_crs = NULL
)
\method{set_bbox_side_length}{SpatRaster}(
data,
distance,
distance_unit = "meters",
error_crs = NULL
)
}
\arguments{
\item{data}{The original data to add a buffer around. Must be either an \code{sf}
or \code{SpatRaster} object.}
\item{distance}{The distance to add or to set side lengths equal to.}
\item{distance_unit}{The units of the distance to add to the buffer, passed
to \link[units:units]{units::as_units}.}
\item{error_crs}{Logical: Should this function error if \code{data} has no CRS?
If \code{TRUE}, function errors; if \code{FALSE}, function quietly assumes EPSG:4326.
If \code{NULL}, the default, function assumes EPSG:4326 with a warning.}
}
\value{
An \code{sfc} object (from \link[sf:st_as_sfc]{sf::st_as_sfc}).
}
\description{
\link{add_bbox_buffer} calculates the great circle distance both corners of
your bounding box are from the centroid and extends those by a set distance.
Due to using Haversine/great circle distance, latitude/longitude calculations
will not be exact.
\link{set_bbox_side_length} is a thin wrapper around \link{add_bbox_buffer} which sets
all sides of the bounding box to (approximately) a specified length.
Both of these functions are intended to be used with geographic coordinate
systems (data using longitude and latitude for position). For projected
coordinate systems, a more sane approach is to use \link[sf:geos_unary]{sf::st_buffer} to add a
buffer, or combine \link[sf:geos_unary]{sf::st_centroid} with the buffer to set a specific side
length.
}
\examples{
df <- data.frame(
lat = c(44.04905, 44.17609),
lng = c(-74.01188, -73.83493)
)
df_sf <- sf::st_as_sf(df, coords = c("lng", "lat"))
df_sf <- sf::st_set_crs(df_sf, 4326)
add_bbox_buffer(df_sf, 10)
df <- data.frame(
lat = c(44.04905, 44.17609),
lng = c(-74.01188, -73.83493)
)
df_sf <- sf::st_as_sf(df, coords = c("lng", "lat"))
df_sf <- sf::st_set_crs(df_sf, 4326)
set_bbox_side_length(df_sf, 4000)
}
\seealso{
Other utilities:
\code{\link{calc_haversine_distance}()},
\code{\link{deg_to_rad}()},
\code{\link{get_centroid}()},
\code{\link{rad_to_deg}()}
}
\concept{utilities}
|
d6a9672db99ab6e71ca2b4850c3b303de01722a7
|
5d9b83fbf87ecda43f5ad897f3022b1e22f5e452
|
/src/import_moh_weekly.R
|
1518096c9fa942fa38c0ac0347f147e040bbfc6f
|
[] |
no_license
|
roscoelai/dasr2020capstone
|
44fc93e7b0308c506f730beb5ce046c8ce892b38
|
10f3ee102f3d4f1a929329f57f02c894ba3cc7b4
|
refs/heads/master
| 2022-11-23T01:07:29.559385
| 2020-08-02T14:04:50
| 2020-08-02T14:04:50
| 276,071,316
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,276
|
r
|
import_moh_weekly.R
|
# import_moh_weekly.R
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
library(magrittr)
import_moh_weekly <- function(url_or_path = NULL) {
#' Weekly Infectious Diseases Bulletin
#'
#' @description
#' \href{https://www.moh.gov.sg/resources-statistics/infectious-disease-
#' statistics/2020/weekly-infectious-diseases-bulletin}{MOH Weekly Infectious
#' Disease Bulletin} from the Ministry of Health (MOH).
#'
#' \href{https://www.moh.gov.sg/docs/librariesprovider5/diseases-updates/
#' weekly-infectious-disease-bulletin-year-2020
#' d1092fcb484447bc96ef1722b16b0c08.xlsx}{Latest data as of 31 July 2020
#' (2012-W01 to 2020-W30)}.
#'
#' @param url_or_path The URL or file path of the .xlsx file.
#' @return Weekly infectious diseases bulletin (2012-W01 to 2020-W30).
# If no URL or path is specified, try to get the file directly from MOH.
if (is.null(url_or_path)) {
url_or_path = paste0(
"https://www.moh.gov.sg/docs/librariesprovider5/diseases-updates/",
"weekly-infectious-disease-bulletin-year-2020",
"d1092fcb484447bc96ef1722b16b0c08.xlsx"
)
}
# Check if the given path is a URL by trying to download to a temp file. If
# successful, return the temp file. If not, return the original path.
xlsx_file = tryCatch({
temp = tempfile(fileext = ".xlsx")
download.file(url_or_path, destfile = temp, mode = "wb")
temp
}, error = function(e) {
url_or_path
})
# Columns will be renamed to follow 2020
colnames_2020 = c(
"Campylobacter enterosis" = "Campylobacter enteritis",
"Campylobacterenterosis" = "Campylobacter enteritis",
"Campylobacteriosis" = "Campylobacter enteritis",
"Chikungunya Fever" = "Chikungunya",
"Dengue Haemorrhagic Fever" = "DHF",
"Dengue Fever" = "Dengue",
"Hand, Foot and Mouth Disease" = "HFMD",
"Hand, Foot Mouth Disease" = "HFMD",
"Nipah virus infection" = "Nipah",
"Viral Hepatitis A" = "Acute Viral Hepatitis A",
"Viral Hepatitis E" = "Acute Viral Hepatitis E",
"Zika Virus Infection" = "Zika",
"Zika virus infection" = "Zika"
)
xlsx_file %>%
readxl::excel_sheets() %>%
lapply(function(sheetname) {
df = readxl::read_xlsx(xlsx_file, sheetname, skip = 1)
# Date formats are different for 2020 (dmy instead of mdy)
if (sheetname == "2020") {
df$Start = lubridate::dmy(df$Start)
df$End = lubridate::dmy(df$End)
}
# Find and rename columns that need to be renamed, and rename them
mapper = na.omit(colnames_2020[names(df)])
dplyr::rename_with(df, ~mapper, names(mapper))
}) %>%
dplyr::bind_rows() %>%
dplyr::rename(Epiweek = `Epidemiology Wk`) %>%
dplyr::mutate(Epiyear = lubridate::epiyear(Start)) %>%
dplyr::select(Epiyear, everything()) %>%
dplyr::arrange(Start)
}
# Import START ----
# From MOH
bulletin <- import_moh_weekly()
# From online repo
bulletin <- import_moh_weekly(paste0(
"https://raw.githubusercontent.com/roscoelai/dasr2020capstone/master/",
"data/moh/weekly-infectious-disease-bulletin-year-2020.xlsx"
))
# From local
bulletin <- import_moh_weekly(
"../data/moh/weekly-infectious-disease-bulletin-year-2020.xlsx"
)
# Import END ----
|
1624ccab0e41c47a5622da7c2c130e648b6d1e68
|
2c19009a27ea3819a6b3da7744f67a2021e358c7
|
/tests/testthat/helper-run-insert-tests.R
|
11dbe981e4f9c92d3bd076c84506b3a0839262dd
|
[] |
no_license
|
GreedBlink/dbx
|
c4a7986e6e678b6cf18fe5ebf0b25fa419b883bd
|
b83311f509611eff1b60c68fb30e24f49ed618d9
|
refs/heads/master
| 2020-04-02T23:34:26.284604
| 2018-10-16T02:01:16
| 2018-10-16T02:01:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,352
|
r
|
helper-run-insert-tests.R
|
runInsertTests <- function(db, redshift=FALSE) {
test_that("insert works", {
events <- data.frame(id=c(1, 2), city=c("San Francisco", "Boston"), stringsAsFactors=FALSE)
dbxInsert(db, "events", events)
res <- dbxSelect(db, "SELECT id, city FROM events ORDER BY id")
expect_equal(res, events)
})
test_that("empty insert works", {
dbxInsert(db, "events", data.frame())
expect(TRUE)
})
test_that("insert returning works", {
skip_if(!isPostgres(db) || redshift)
events <- data.frame(id=c(1, 2), city=c("San Francisco", "Boston"), stringsAsFactors=FALSE)
res <- dbxInsert(db, "events", events, returning=c("id", "city"))
expect_equal(res$id, c(1, 2))
expect_equal(res$city, events$city)
})
test_that("insert returning star works", {
skip_if(!isPostgres(db) || redshift)
events <- data.frame(id=c(1, 2), city=c("San Francisco", "Boston"), stringsAsFactors=FALSE)
res <- dbxInsert(db, "events", events, returning=c("*"))
expect_equal(res$id, c(1, 2))
expect_equal(res$city, events$city)
})
test_that("insert batch size works", {
events <- data.frame(id=c(1, 2), city=c("San Francisco", "Boston"), stringsAsFactors=FALSE)
dbxInsert(db, "events", events, batch_size=1)
res <- dbxSelect(db, "SELECT id, city FROM events")
expect_equal(res, events)
})
}
|
4e43d5321b56511c6f62cb0fe27d3fb9c30f677c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rbugs/examples/genDataFile.Rd.R
|
8021f2eb9d72449f83c4bff11238557c14eb434c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 284
|
r
|
genDataFile.Rd.R
|
library(rbugs)
### Name: genDataFile
### Title: Generating the Data File for BUGS
### Aliases: genDataFile
### Keywords: utilities
### ** Examples
dat <- list(a = runif(1), b=rnorm(2), c=matrix(rexp(4), 2, 2))
genDataFile(dat, "foo.txt")
file.show("foo.txt")
unlink("foo.txt")
|
0725f34eb53ac7bab8ec05188d70b5c2829d4099
|
e986e68ce4abb1b60af3aaa1869ffda8ee5cb882
|
/cachematrix.R
|
cb7a5f53358dda932205c43d6d6518c28e7f9ea6
|
[] |
no_license
|
limpica/ProgrammingAssignment2
|
7b8e9d3b8ab35e4ba2e3d42a7a22f27292b4c2d3
|
3f1ec566306cb9753ccb653f060d4a63290401fc
|
refs/heads/master
| 2021-01-15T12:54:01.146011
| 2016-07-20T04:09:45
| 2016-07-20T04:09:45
| 63,606,992
| 0
| 0
| null | 2016-07-18T13:51:04
| 2016-07-18T13:51:03
| null |
UTF-8
|
R
| false
| false
| 1,371
|
r
|
cachematrix.R
|
## The functions will solve the inverse of the input matrix. If the
## solution was calculated before, the same calculation will not be
## proceeded. Result from previous calculation will be returned.
## makeCacheMatrix() funcion gets the input matrix and stores the
## inverse matrix calculated by cacheSolve() function.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve() function will judge if the matrix has been solved before.
## If the matrix has been solved before, the function will return the
## results calculated before, rather than doing another round of
## calculation.
## If the matrix has never been solved, the function will solve the
## inverse matrix.
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
01bc2e99753ac9ba00445ab402d4f0c25e4e444b
|
7aefb43bd178f805d1e2c34d110c0c7663883a06
|
/Package/R/bct_eval_RW.R
|
46dbe9f634fd6694179108e0c09ee88a0e1ae588
|
[] |
no_license
|
ntorbica/bct
|
82bcd91d53d64980853c0abb8cb8f80f74842d53
|
a0e52bb6483f629b3c7068b8ae46980567257b3a
|
refs/heads/master
| 2021-09-07T01:11:41.307548
| 2018-02-14T18:47:02
| 2018-02-14T18:47:02
| 112,538,898
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,473
|
r
|
bct_eval_RW.R
|
#' @export
#'
#' @title Basic evaluation of batch effect correction
#' @param peaks Peak data to evaluate. Samples have to be in rows (as produced
#' by outputted by the BCT call)
#' @param info Information table on peaks, containing information on sample grouping,
#' injection sequence and batch labels
#' @param arguments Input arguments from BCT produced object.
#' @param plot.pca Logical; should a PCA score plot be produced?
#' @param plot.rep Logical; should a repeatability histogram be produced?
#' @param plot.pdist Logical; should a p-value histogram be produced?
#' @param n Plot title
#' @description Rudimentary evaluation function computing Bhattacharyya distance and
#' plotting PCA the according score plots, computing repeatability between batches
#' and plotting histograms, as well as featurewise p-values for sample groups,
#' injection sequence and batch labels.
#' @return Returns a list object with the values specified in the arguments object.
bct.eval_RW <- function(peaks, info, arguments, plot.pca = F, plot.rep = F, plot.pdist = F,
n = '', adj = F, dens = F){
out <- list()
bct.msg('Plotting...', side = 'top')
if(arguments$PCA == arguments$Duplo){
layout(matrix(c(1,1,3,1,1,4,2,2,5), 3, 3, byrow = T))
par(mar = c(4,5,6.5,4))
bct.msg('PCA...', side = 'none')
pca <- evaluatePCA(peaks, info, plot = plot.pca, perBatch = F)
out[["Bhattacharyya Distance"]] <- pca
title(paste("Bhattacharyya Distance: ", round(pca, 3), sep = ''), cex.main = 2)
par(mar = c(6,6,5,6))
bct.msg('Repeatabilities...', side = 'none')
dup <- evaluateDuplos(peaks, info, plot = plot.rep, breaks = 20, xlim = c(0,1))
out[["Repeatabilities"]] <- dup
title('Feature Repeatability')
legend(x = 'topright', legend = paste('Mean Rep: ', round(mean(dup, na.rm = T), 3), sep = ''))
} else {
layout(matrix(c(1,1,2,1,1,3,1,1,4), 3, 3, byrow = T))
if(arguments$PCA == '1'){
par(mar = c(4,5,6.5,4))
bct.msg('PCA...', side = 'none')
pca <- evaluatePCA(peaks, info, plot = plot.pca, perBatch = F)
title(paste("Bhattacharyya Distance: ", round(pca, 3), sep = ''), cex.main = 2)
out[["Bhattacharyya Distance"]] <- pca
}
if(arguments$Duplo == '1'){
par(mar = c(6,6,5,6))
bct.msg('Repeatabilities...', side = 'none')
dup <- evaluateDuplos(peaks, info, plot = plot.rep, breaks = 20, xlim = c(0,1))
title('Feature Repeatability')
legend(x = 'topright', legend = paste('Mean Rep: ', round(mean(dup, na.rm = T), 3), sep = ''))
out[["Repeatabilities"]] <- dup
}
}
if(arguments$pdist == '1'){
par(mar = c(6,4,6,4))
bct.msg('P-values...', side = 'none')
pval.b <- bct.pval(peaks, info, PLOT = plot.pdist, t = '',
plotn = 'Batch', adj = adj, dens = dens)
pval.g <- bct.pval(peaks, info, PLOT = plot.pdist, t = '',
plotn = 'Group', adj = adj, dens = dens)
pval.s <- bct.pval(peaks, info, PLOT = plot.pdist, t = '',
plotn = 'SeqNr', adj = adj, dens = dens)
out[["P-value distributions"]] <- list("Batch" = pval.b,
"Group" = pval.g,
"Order" = pval.s)
} else {
pval <- NULL
}
par(oma = c(0,1,0,0))
mtext(n, side = 3, outer = T, line = -2.5, adj = c(0,-0.5), cex = 1.2)
bct.msg('Done!', side = 'bottom')
return(out)
}
|
8e15f5bf95c57e6d289e1560811be0c2841180f0
|
3cedc6ffc9bfc2abb1e282d14cd866f5d8d68b02
|
/code-hw3.R
|
c1c88a186d09078b829c6587d5aa2939e50933a9
|
[] |
no_license
|
pvarsh/math490
|
95844da63de12f1dd2a5f592f4ccd883d9d4b7d7
|
f26623dd75e138de2635accbf93dcf607ce10f7b
|
refs/heads/master
| 2021-01-15T11:43:51.646786
| 2014-05-25T22:55:49
| 2014-05-25T22:55:49
| 17,259,232
| 1
| 0
| null | 2014-03-06T19:19:41
| 2014-02-27T18:23:22
|
R
|
UTF-8
|
R
| false
| false
| 10,967
|
r
|
code-hw3.R
|
### Homework 3 Chapter 2-2
### Mirai Furukawa, Scott Thomas, Peter Varshavsky
### 2014/03/06
### Questions: 2.18, 2.21, 2.22, 2.30, 2.33, 2.38
#############################################################
#### Functions
G2.test=function(x)
{
total=sum(x)
rowsum=apply(x,1,sum)
colsum=apply(x,2,sum)
expected=(matrix(rowsum) %*% t(matrix(colsum))) / total
tmp=x*log(x/expected)
tmp[x==0]=0
G2=2*sum(tmp)
df=prod(dim(x)-1)
attr(G2,"P-value")=1-pchisq(G2,df)
return(G2)
}
procfreq=function(x, digits=4) # create a fuction similar to the proc freq is SAS
{
total=sum(x)
rowsum=apply(x,1,sum)
colsum=apply(x,2,sum)
prop=x/total
rowprop=sweep(x,1,rowsum,"/")
colprop=sweep(x,2,colsum,"/")
expected=(matrix(rowsum) %*% t(matrix(colsum))) / total
dimnames(expected)=dimnames(x)
resid=(x-expected)/sqrt(expected)
adj.resid=resid /sqrt((1-matrix(rowsum)/total) %*% t(1-matrix(colsum)/total))
df=prod(dim(x)-1)
X2=sum(resid^2)
attr(X2,"P-value")=1-pchisq(X2,df)
## Must be careful about zero freqencies. Want 0*log(0) = 0.
tmp=x*log(x/expected)
tmp[x==0]=0
G2=2*sum(tmp)
attr(G2,"P-value")=1-pchisq(G2,df)
list(sample.size=total, row.totals=rowsum,
col.totals=colsum,
overall.proportions=prop,
row.proportions=rowprop,
col.proportions=colprop,
expected.freqs=expected,
residuals=resid,
adjusted.residuals=adj.resid,
chi.square=X2,
likelihood.ratio.stat=G2,
df=df)}
odds.ratio = function(mat, conf.level = 0.95, noPrint = TRUE){
# matrix mat must contain counts of out comes:
# row1: treatment
# row2: control
# col1: success
# col2: failure
# 0 counts will result in error
oddsTreatment = mat[1,1] / mat[1,2]
oddsControl = mat[2,1] / mat[2,2]
oddsRatio = oddsTreatment / oddsControl
logOddsRatio = log(oddsRatio)
SE = sqrt((1/mat[1,1] + 1/mat[1,2] + 1/mat[2,1] + 1/mat[2,2]))
quantile = 1 - (1 - conf.level)/2
z = c(-qnorm(quantile, 0, 1), qnorm(quantile, 0, 1)) # two-tailed interval containing conf.level of standard normal distribution
confInt = exp(logOddsRatio + SE * z) # addition, multiplication and exp() are vectorized in R
out = list(oddsRatio = oddsRatio, confInt = confInt) # I'm using a list to return two different data types (numeric and numeric vector)
if (!noPrint){
print(out) # print can be suppressed by noPrint parameter
}
return(out)
}
standResid = function(mat){
# author: Peter
# parameter:
# mat: contingency table of observations
# returns:
# matrix of standardized residuals
# calls:
# marginals()
margMat = marginals(mat)
pMat = mat / sum(mat)
p.i.plus = rowSums(mat)/margMat$n
p.plus.j = colSums(mat)/margMat$n
print("testing")
resid = mat - margMat$exp * margMat$n
SE = sqrt(margMat$exp * margMat$n * ((1 - p.i.plus) %*% t(1 - p.plus.j)))
return(resid/SE)
}
marginals = function(mat){
# author: Peter
# returns a list with row marginal, column marginal, expected values of joint under independence
colMarg = colSums(mat)/sum(mat)
rowMarg = rowSums(mat)/sum(mat)
expMat = rowMarg %*% t(colMarg)
return(list(original = mat, row = rowMarg, col = colMarg, exp = expMat, n = sum(mat)))
}
fisherExact = function(mat){
# author: Peter
# One-sided (greater) Fisher's exact test for 2x2 contingency tables
#cat("\nmat: ")
#print(mat)
n = sum(mat)
#cat("\nn:",n)
observed = mat[1][1]
rs = rowSums(mat)
#cat("\nrs:", rs)
cs = colSums(mat)
p = 0
#cat("\nStarting for loop\n")
for (i in mat[1][1]:rs[1]){
#print(i)
p = p + choose(rs[1], i) * choose(rs[2], cs[1] - i) / choose(n, cs[1])
}
#return(choose(rowSums(mat)[1], mat[1,1]) * choose(rowSums(mat)[2], colSums(mat)[1] - mat[1][1]) / choose(sum(mat), colSums(mat)[1]))
#cat("\nP-value =",p, "\n")
names(p) = "P-Value"
return(p)
}
### End: functions
#############################################################
### Question 2.18
# a. Show how to obtain the estimated expected cell countof 35.8 for the first cell
# create matrix of expected counts
p2.18exp = matrix(c(35.8, 166.1, 88.1,
79.7, 370.0, 196.4,
52.5, 244.0, 129.5), byrow = T, nrow = 3)
p2.18obs = matrix(c(21, 159, 110,
53, 372, 221,
94, 249, 83), byrow = T, nrow = 3)
# divide each row by its sum
p2.18expR = p2.18exp / apply(p2.18exp, 1, sum) # expected rows
p2.18expC = t(t(p2.18exp) / apply(p2.18exp, 2, sum)) # expected columns
p2.18obsCalculated = p2.18expR * p2.18expC * sum(p2.18obs) # calculated expected values
cat("Columns divided by column sums:")
print(p2.18expC)
cat("Rows divided by row sums:")
print(p2.18expR)
cat("Under the null hypthesis Income and Happiness are independent multinomial variables with the following marginals:",
"\n\tIncome: ", p2.18expR[1,],
"\n\tHappiness: ", p2.18expC[,1], sep = " ")
cat("Multiplying the marginals and then multiplying the resulting matrix by total sample size", sum(p2.18obs), "we get the matrix of expected cell values")
print(p2.18obsCalculated)
# b. For testing independence, X^2 = 73.4. Report the degrees of freedom and the P-value and interpret.
cat("# b. For testing independence, X^2 = 73.4. Report the degrees of freedom and the P-value and interpret.
")
cat("For a 3x3 table there are 2*2 = 4 degrees of freedom.")
cat("P-value:", 1-pchisq(73.4, df = 2), "\nWe can reject the null hypothesis of independence")
# c. Interpret the standardized residuals in the corner cells having counts 21 and 83.
cat("# c. Interpret the standardized residuals in the corner cells having counts 21 and 83.
")
cat("Residuals are negative and greater than 2 in absolute value, which means there is likely
strong evidence against independence. The negative sign means that these cells are
underrepresented in the sample, that is the evidence suggests that there are fewer
unhappy rich or happy poor than the hypothesis of independence would imply.")
# d. Interpret the standardized residuals in the corner cells having counts 110 and 94.
cat("Interpret the standardized residuals in the corner cells having counts 110 and 94.")
cat("Residuals are positive and of high absolute value, again giving strong evidence against independence.
The positive values suggest that these categories are overrepresented in the sample, or
that there are more happy rich and unhappy poor than would be observed under independence.")
####################################################################################
### Question 2.21
#Original Table
p2.21 = matrix(c(60,81,75,75,87,86),
nrow=2,
byrow=TRUE,
dimnames = list(Gender=c("Men","Women"),Reason=c("A","B","C")))
#Table for Reason = A
p2.21abc = matrix(c(60, 100-60,
75, 100-75),
nrow=2,
byrow=TRUE,
dimnames = list(Gender=c("Men","Women"),
Reason=c("A is responsible","A not responsible")))
p2.21
p2.21abc
# Computing the residuals
standResid(p2.21abc)
####################################################################################
### Question 2.22
p2.22 = matrix(c(105, 8,
12, 2,
18, 19,
47, 52,
0, 13), byrow = T, nrow = 5,
dimnames = list("diagnosis" = c("schizophrenia",
"affective disorder",
"neurosis",
"personality disorder",
"special symptoms"),
"treatment" = c("drugs",
"no drugs")))
# a. Conduct a test of independence and interpret the P-value
print(p2.22)
chisq.test(p2.22)
# b. Obtain standardized residuals and interpret
standResid(p2.22)
# c. Partition chi-squared into three components to describe differences and similarities among the diagnoses by comparing
# c.i. the first two rows
standResid(p2.22[1:2, ])
chisq.test(p2.22[1:2, ])
# c.ii. the third and fourth rows
standResid(p2.22[3:4, ])
chisq.test(p2.22[3:4, ])
# c.iii. the last row to the first and second rows combined, and the third and fourth rows combined
p2.22iii.a = colSums(p2.22[c(1,2), ])
p2.22iii.a = rbind(p2.22iii.a, p2.22[5,])
standResid(p2.22iii.a)
p2.22iii.a = p2.22iii.a + 0.5 ### adding 0.5 for chi-square test
chisq.test(p2.22iii.a)
p2.22iii.b = colSums(p2.22[c(3,4), ])
p2.22iii.b = rbind(p2.22iii.b, p2.22[5,])
standResid(p2.22iii.b)
p2.22iii.b = p2.22iii.b + 0.5 ### adding 0.5 for chi-square test
chisq.test(p2.22iii.b)
####################################################################################
### Question 2.30
# Table 2.17 contains results of a study comparing
# radiation therapy with surgery in treating cancer of the larynx.
# Use Fisher's exact test to test H_0: theta = 1 against H_a: theta > 1.
# Interpret results
p2.30 = matrix(c(21, 2,
15, 3),
nrow=2,
byrow=TRUE,
dimnames = list(TreatmentType=c("Surgery","Radiation therapy"),
"Cancer Controlled" = c("Yes","No"))) #### PV: I renamed the variable name CaseControl
#fisher.test(p2.30,alternative="greater")
fisherExact(p2.30) #using Peter's function. Answer agrees with fisher.test()
####################################################################################
### Question 2.33
##### using 3-dimensional array following Alexandra's code
dp = c(19, 132, 11, 63, 0, 9, 6, 103)
dp = array(dp, dim = c(2,2,2))
dimnames(dp) = list(DeathPen = c("yes", "no"),
Defendant = c("white", "black"),
Victim = c("white", "black"))
dp_flat = ftable(dp, row.vars = c("Victim", "Defendant"), col.vars = "DeathPen")
dp_flat
#Partial table victim = white
p2.33w = matrix(c(19,132,11,52),
nrow=2,
byrow=TRUE,
dimnames = list(DefendantsRace=c("White","Black"),
Penalty=c("Yes","No")))
#partial table victim = black
p2.33b = matrix(c(0,9,6,97),
nrow=2,
byrow=TRUE,
dimnames = list(DefendantsRace=c("White","Black"),
Penalty=c("Yes","No")))
odds.ratio(p2.33w)
odds.ratio(p2.33b+0.5)
#Marginal Table
p2.33m = matrix(c(19,141,17,149),
nrow=2,
byrow=TRUE,
dimnames = list(DefendantsRace=c("White","Black"),
Penalty=c("Yes","No")))
odds.ratio(p2.33m)
####################################################################################
### Question 2.38
####################################################################################
|
d6dd9f91b9b9d7e83cab9325a9014d33c1686c1b
|
955eabc2b9f485f084d703b4f20a2a0d991f11f8
|
/man/squat_binom_overdisp_rand.Rd
|
80f511e7a058a499f3d7cb8631ac7aaefffd14f2
|
[
"Apache-2.0"
] |
permissive
|
Yichen-Si/squat
|
7e91b926560c4072b8e5bf8c72db5cfa8ba4470c
|
3da0ca4965e5be4e76d9401b193a4e1dfba71b0b
|
refs/heads/master
| 2022-11-29T21:17:06.309473
| 2020-07-20T00:13:58
| 2020-07-20T00:13:58
| 281,822,235
| 0
| 0
|
NOASSERTION
| 2020-07-23T01:40:55
| 2020-07-23T01:40:54
| null |
UTF-8
|
R
| false
| true
| 922
|
rd
|
squat_binom_overdisp_rand.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/squat.R
\name{squat_binom_overdisp_rand}
\alias{squat_binom_overdisp_rand}
\title{squat_binom_overdisp_rand : randomized binomial overdispersion test}
\usage{
squat_binom_overdisp_rand(xs, sizes, ps, ws, nrep = 1, pos.only = TRUE)
}
\arguments{
\item{xs}{A vector of non-negative counts representing observed data}
\item{sizes}{A vector of positive values representing total counts}
\item{ps}{A vector of binomial probabilities for each observed data}
\item{ws}{A vector of weights for each observed data}
\item{nrep}{Number of repetitions}
\item{pos.only}{Ignore zero observations}
}
\value{
A (nrep x 2) matrix, containing the following values in each column
* Z-score from randomzied exact meta-analysis
* -log10 p-value corresponding to the z-score
}
\description{
squat_binom_overdisp_rand : randomized binomial overdispersion test
}
|
bbd8973c582db8331bf8ba4b5dd23ce8e73c0931
|
1a4c6cdabc81e7f06353cfeec61160e5e4a8b5dd
|
/Project 1/piechart.R
|
774d547197c61a2a1a06fa371e6e5ce02a4fe99b
|
[] |
no_license
|
dhananjaymuddappa/rainman_assignments
|
d8a62ffe93f145939591b395547d319136e4b4c6
|
53c8d5d54b1230530099dbbc3c3515a21981ede4
|
refs/heads/master
| 2021-01-10T12:01:54.831940
| 2016-03-22T20:21:34
| 2016-03-22T20:21:34
| 54,506,113
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 530
|
r
|
piechart.R
|
piechart <- function(fileName, param)
{
data <- read.csv(fileName, header=T)
column_data <- count(data, param) #use count function to count the frequency of a given name
names <- column_data[1] #store names
frequency <- column_data[2] #store frequency of the names
frequency <- as.numeric (unlist(frequency)) #convert from list to numeric
names <- as.character(unlist(names))#convert from list to characters
pie(frequency,labels=names,radius=0.9, main="Pie Chart of Names")
#draw piechart using pie function
}
|
3f5efb9686221bf707fbcddd2486f35063c26f9f
|
28c3f73a6d70c2fed4b2d2011bd1d9416a293b0e
|
/R/bagging.wrapper.v2.R
|
b39f2d08fa05d232864528d08d0680204835b678
|
[] |
no_license
|
cdeterman/OmicsMarkeR
|
a8cbe69bc26f98db69b89c02949a3a4d0ab2d8a1
|
f9a0f3dfd067c0a0beb9ad421982ad86e63914cf
|
refs/heads/master
| 2021-01-10T20:58:11.111636
| 2017-01-10T15:14:14
| 2017-01-10T15:14:14
| 13,659,839
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,541
|
r
|
bagging.wrapper.v2.R
|
#' @title Bagging Wrapper for Ensemble Features Selection
#' @description Compiles results of ensemble feature selection
#' @param X A matrix containing numeric values of each feature
#' @param Y A factor vector containing group membership of samples
#' @param method A vector listing models to be fit
#' @param bags Number of bags to be run
#' @param f Number of features desired
#' @param aggregation.metric string indicating the type of ensemble aggregation.
#' Avialable options are \code{"CLA"} (Complete Linear),
#' \code{"EM"} (Ensemble Mean), \code{"ES"} (Ensemble Stability), and
#' \code{"EE"} (Ensemble Exponential)
#' @param k.folds Number of folds generated during cross-validation
#' @param repeats Number of times cross-validation repeated
#' @param res Optional - Resolution of model optimization grid
#' @param tuning.grid Optional list of grids containing parameters to optimize
#' for each algorithm. Default \code{"tuning.grid = NULL"} lets function
#' create grid determined by \code{"res"}
#' @param optimize Logical argument determining if each model should
#' be optimized. Default \code{"optimize = TRUE"}
#' @param optimize.resample Logical argument determining if each resample
#' should be re-optimized. Default \code{"optimize.resample = FALSE"} - Only
#' one optimization run, subsequent models use initially determined parameters
#' @param metric Criteria for model optimization. Available options are
#' \code{"Accuracy"} (Predication Accuracy), \code{"Kappa"} (Kappa Statistic),
#' and \code{"AUC-ROC"} (Area Under the Curve - Receiver Operator Curve)
#' @param model.features Logical argument if should have number of features
#' selected to be determined by the individual model runs.
#' Default \code{"model.features = FALSE"}
#' @param allowParallel Logical argument dictating if parallel processing
#' is allowed via foreach package. Default \code{allowParallel = FALSE}
#' @param verbose Logical argument if should output progress
#' @param theDots Optional arguments provided for specific models or user
#' defined parameters if \code{"optimize = FALSE"}.
#' @return \item{results}{List with the
#' following elements:}
#' @return \itemize{
#' \item{Methods: Vector of models fit to data}
#' \item{ensemble.results: List of length = length(method) containing
#' aggregated features}
#' \item{Number.bags: Number of bagging iterations}
#' \item{Agg.metric: Aggregation method applied}
#' \item{Number.features: Number of user-defined features}}
#' @return \item{bestTunes}{If \code{"optimize.resample = TRUE"}
#' then returns list of best parameters for each iteration}
#' @author Charles Determan Jr
#' @import DiscriMiner
#' @import randomForest
#' @import e1071
#' @import gbm
#' @import pamr
#' @import glmnet
# ' @export
bagging.wrapper <- function(X,
Y,
method,
bags,
f,
aggregation.metric,
k.folds,
repeats,
res,
tuning.grid,
optimize,
optimize.resample,
metric,
model.features,
allowParallel,
verbose,
theDots)
{
rownames(X) <- NULL
var.names <- colnames(X)
nr <- nrow(X)
nc <- ncol(X)
# number of groups
num.group = nlevels(Y)
# group levels
grp.levs <- levels(Y)
# leave out samples
# need to retain for SVM and PAM feature selection
trainVars.list <- vector("list", bags)
trainGroup.list <- vector("list", bags)
if(optimize == TRUE & optimize.resample == TRUE){
resample.tunes <- vector("list", bags)
names(resample.tunes) <- paste("Bag", 1:bags, sep=".")
}else{
resample.tunes <- NULL
}
###############
### Parallel Processing??
###############
for (i in 1:bags){
# bootstrapping (i.e. random sample with replacement)
boot=sample(nr,nr,replace=TRUE)
trainVars <- X[boot,]
trainGroup <- Y[boot]
trainVars.list[[i]] <- trainVars
trainGroup.list[[i]] <- trainGroup
trainData <- as.data.frame(trainVars)
trainData$.classes <- trainGroup
# duplicate rownames because of bagging, must reset to 1:nrow
rownames(trainData) <- NULL
## Run respective algorithm on bootstrapped subsamples
if(optimize == TRUE){
if(optimize.resample == TRUE){
# tune the methods
tuned.methods <-
optimize.model(trainVars = trainVars,
trainGroup = trainGroup,
method = method,
k.folds = k.folds,
repeats = repeats,
res = res,
grid = tuning.grid,
metric = metric,
allowParallel = allowParallel,
verbose = verbose,
theDots = theDots)
if(i == 1){
finalModel <- tuned.methods$finalModel
}else{
finalModel <- append(finalModel, tuned.methods$finalModel)
}
# store the best tune parameters for each iteration
names(tuned.methods$bestTune) = method
resample.tunes[[i]] <- tuned.methods$bestTune
# end of optimize.resample loop
}else{
if(i == 1){
tuned.methods <-
optimize.model(trainVars = trainVars,
trainGroup = trainGroup,
method = method,
k.folds = k.folds,
repeats = repeats,
res = res,
grid = tuning.grid,
metric = metric,
allowParallel = allowParallel,
verbose = verbose,
theDots = theDots)
finalModel <- tuned.methods$finalModel
names(tuned.methods$bestTune) <- method
}else{
# Fit remainder of resamples with initial best parameters
#if(i == 2){
tmp <- vector("list", length(method))
names(tmp) <- method
#}
for(d in seq(along = method)){
tmp[[d]] <-
training(data = trainData,
method = method[d],
tuneValue = tuned.methods$bestTune[[d]],
obsLevels = grp.levs,
theDots = theDots)$fit
}
finalModel <- append(finalModel, tmp)
}
} # end of single optimization loop
# end of optimizing loops
}else{
names(theDots) <- paste(".", names(theDots), sep="")
# sequester appropriate parameters
args.seq <- sequester(theDots, method)
# remove arguments used from theDots - also remove '.' from each
names(theDots) <- sub(".", "", names(theDots))
moreDots <- theDots[!names(theDots) %in% args.seq$pnames]
if(length(moreDots) == 0){
moreDots <- NULL
}
#moreDots <- theDots[-names(args.seq)]
finalModel <- vector("list", length(method))
for(q in seq(along = method)){
finalModel[[q]] <-
training(data = trainData,
method = method[q],
tuneValue = args.seq$parameters[[q]],
obsLevels = grp.levs,
theDots = moreDots)
}
# end of non-optimized model fitting
}
# end of bagging loop
}
# sort models together (e.g. first 5 are "plsda", next 5 "gbm", etc.)
method.names <-
unlist(
lapply(method,
FUN = function(x) paste(c(rep(x, bags)),
seq(bags), sep = "."))
)
names(finalModel) <- paste(method,
rep(seq(bags), each = length(method)),
sep = ".")
finalModel <- finalModel[match(method.names, names(finalModel))]
#names(finalModel) <- orig.method.names
# Create empty list for features identified by each chosen algorithm
features <- vector("list", length(method))
names(features) <- tolower(method)
for(j in seq(along = method)){
### Extract important features
# pam requires a special mydata argument
mydata <- vector("list", bags)
if(method[j] == "pam"){
for(t in 1:bags){
mydata[[t]] <-
list(x=t(trainVars.list[[t]]),
y=factor(trainGroup.list[[t]]),
geneid = as.character(colnames(trainVars.list[[t]])))
}
}else{
# svm requires training data for RFE
for(t in 1:bags){
mydata[[t]] <- trainVars.list[[t]]
}
}
if(j == 1){
start <- 1
end <- bags
}
if(method[j] == "svm" | method[j] == "pam" | method[j] == "glmnet"){
bt <- vector("list", bags)
for(l in seq(bags)){
if(optimize == TRUE){
if(optimize.resample == FALSE){
bt[[l]] <- tuned.methods$bestTune[[j]]
}else{
bt[[l]] <- tuned.methods$bestTune[[l]]
}
}
}
}else{
bt <- vector("list", bags)
}
if(method[j] == "plsda"){
cc <- vector("list", bags)
for(c in seq(bags)){
if(optimize == TRUE){
if(optimize.resample == FALSE){
cc[[c]] <- tuned.methods$bestTune[[j]]
}else{
cc[[c]] <- tuned.methods$bestTune[[c]]
}
}
}
}
finalModel.bag <- finalModel[start:end]
tmp <- vector("list", bags)
for(s in seq(bags)){
tmp[[s]] <- extract.features(
x = finalModel.bag[s],
dat = mydata[[s]],
grp = trainGroup.list[[s]],
# add in gbm best tune trees???
bestTune = bt[[s]],
model.features = FALSE,
method = method[j],
f = NULL,
#similarity.metric = similarity.metric,
comp.catch = cc)
}
if(method[j] == "glmnet"){
features[[j]] <- data.frame(
do.call("cbind", unlist(unlist(tmp, recursive = FALSE),
recursive = FALSE)))
}else{
features[[j]] <- do.call("cbind", unlist(tmp, recursive = FALSE))
if(class(features[[j]]) != "data.frame"){
features[[j]] <- data.frame(features[[j]])
}
}
rownames(features[[j]]) <- colnames(X)
start <- start + bags
end <- end + bags
}
### Ensemble Aggregation
#convert to numeric & set rownames
features.num <-
lapply(
features,
FUN = function(z){
sapply(z, FUN = function(x) as.numeric(as.character(x)))
})
features.num <- lapply(features.num, function(x) {
rownames(x) <- var.names
return(x)
})
# Generate summary lists of each algorithm
agg <- lapply(features.num,
FUN = function(x){
aggregation(efs = x, metric = aggregation.metric, f = f)
})
# Basic ensemble model parameters
ensemble.results <- list(Methods = method,
ensemble.results = agg,
Number.Bags = bags,
Agg.metric = aggregation.metric,
Number.features = f)
out <- list(results = ensemble.results,
bestTunes = resample.tunes)
out
}
|
a7c0b59b1ac69f06abbe09a82bf42016300ade07
|
1acd4c0ef1d925c4ef0a2c87aa93199b18ac1b4b
|
/phenmodels/wangengel_fakedata.R
|
308624ae251207b020aec34e38b5c72be5c5e0b4
|
[] |
no_license
|
sandyie/cloned-bcvin
|
3bccd695e7e6e15486d13cb85409714872d64153
|
90ef1700d8953f4298a8a7bfa1957fe4235fcc3d
|
refs/heads/master
| 2023-03-28T13:36:36.321884
| 2021-03-30T16:34:22
| 2021-03-30T16:34:22
| 303,593,418
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,136
|
r
|
wangengel_fakedata.R
|
## Trying to make fake data for Wang & Engel model ##
## Started 30 Oct 2018 ##
## By Lizzie ##
## housekeeping
rm(list=ls())
options(stringsAsFactors = FALSE)
nphendat <- 100
daysperyr <- 300
climdat <- matrix(data=rnorm(nphendat*daysperyr, 25, 10), daysperyr, nphendat)
Tmin <- 0
Tmax <- 40
Topt <- 30
# bbdate <- rnorm(nphendat, 100, 15) # not needed
Fstar <- 25
#' Wang and Engel model. This model computes the rate of phenological
#' development in response to temperature using Topt bounded by Tmin and Tmax
#' (below and above which there is no Temp action on the plant) in 3 parts
#'
#' Part 1- Alpha - based on fixed parameters
#' @param Tmin - Minimum temperature
#' @param Tmax - Maximum temperature
#' @param Topt - Optimum temperature
#'
Alphafx <- function(Tmin, Tmax, Topt){
Alpha <- log10(2)/(log10((Tmax-Tmin)/(Topt-Tmin)))
return(Alpha)
}
#' Part 2- WangEngelfx - computes temperature action across all days of the year
#' @param Tmin - Minimum temperature
#' @param Tmax - Maximum temperature
#' @param Topt - Optimum temperature
#' @param Alpha - alpha parameter
#' @param Tavg - daily average temperature
#' @return cTt - data frame with Temperature action on developement and its daily accumulated values -
#'
WangEngelfx <- function(Tmin, Tmax, Topt, Alpha, Tavg){
cTt<-array(NA, dim=c(length(Tavg),2))
colnames(cTt)<-c("Temp.action","Accum.Temp.action")
for(i in 1:length(Tavg)){
if (Tmin<Tavg[i] & Tavg[i]<Tmax){
cTt[i,1] <- (2*(Tavg[i]-Tmin)^Alpha*(Topt-Tmin)^Alpha-(Tavg[i]-Tmin)^(2*Alpha))/((Topt-Tmin)^(2*Alpha))
}
if (Tavg[i]<=Tmin | Tavg[i]>=Tmax){
cTt[i,1] <-0
}
}
cTt[,2]<-cumsum(cTt[,1])
return(cTt)
}
alphahere <- Alphafx(Tmin, Tmax, Topt)
# Example for one year ...
Tavg.oneyr <- rnorm(daysperyr, 25, 8)
test <- WangEngelfx(Tmin, Tmax, Topt, alphahere, Tavg.oneyr)
which.min(abs(test[,2] - Fstar))
# Example for nyears
fstar.days <- c()
for(i in c(1:ncol(climdat))){
wangengel.thisyr <- WangEngelfx(Tmin, Tmax, Topt, alphahere, climdat[,i])
fstar.days[i] <- which.min(abs(wangengel.thisyr[,2] - Fstar))
}
hist(fstar.days)
|
5c3428f9fbe316bb20f0899baa4bcd5d03c48d50
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/symmetry/man/symmetry_test.Rd
|
cf5744a4165fa7adf29b321ab4b30d9ab229c709
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,713
|
rd
|
symmetry_test.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/symmetry_test.R
\name{symmetry_test}
\alias{symmetry_test}
\alias{symmetry_test.default}
\alias{symmetry_test.lm}
\alias{symmetry_test.fGARCH}
\title{Perform symmetry tests}
\usage{
symmetry_test(x, ...)
\method{symmetry_test}{default}(x, stat, mu = NULL, bootstrap = TRUE,
B = 1000, boot_method = c("sign", "reflect"), trim = 0, k = 0,
...)
\method{symmetry_test}{lm}(x, stat, B = 1000, boot_method = c("sign",
"reflect"), k = 0, ...)
\method{symmetry_test}{fGARCH}(x, stat, B = 1000, burn = 0,
boot_method = c("sign", "reflect"), k = 0, approximate = FALSE,
...)
}
\arguments{
\item{x}{an object of class numeric, lm or fGARCH}
\item{...}{not used}
\item{stat}{a character vector indicating the test statistic to be used (see
\link[=TestStatistics]{Available Test Statistics})}
\item{mu}{the centre parameter around which to test symmetry}
\item{bootstrap}{a logical indicating whether to use bootstrap}
\item{B}{the number of bootstrap replications}
\item{boot_method}{the method of bootstrap sample generation (see Details)}
\item{trim}{the trim value used for estimating the centre (as used in "mean")}
\item{k}{the k parameter of the statistic, ignored if the test statistic
doesn't depend on a parameter (see \link[=TestStatistics]{Test Statistics})}
\item{burn}{the number of elements to remove from the beginning of the time
series for testing}
\item{approximate}{a logical indicating whether to use the faster approximate
bootstrap method (see Details)}
}
\value{
An object of class "htest" containing the results of the testing.
}
\description{
This is a generic function used to perform symmetry tests on numeric vectors
or objects of class lm (linear models) and objects of class fGARCH (GARCH
mdels fitted with the fGarch package).
}
\details{
The tests are performed using bootstrap procedures or using asymptotic
results, where applicable. Currently, two methods of generating a bootstrap
sample from the null distribution are available. The "sign" method generates
the bootstrap sample by multiplying the existing sample by -1 or 1 at random
(with equal probabilities), essentially randomizing the sign of the data,
giving a symmetric distribution. The "reflect" method reflects the sample
around zero and samples length(x) elements with replacement. In practice, it
has been shown that the "sign" method is almost always better, thus is the
default.
For numeric data, the tests can be performed around a known (parameter "mu")
or unknown centre. When the centre is known, the bootstrap method gives the
same results as a Monte Carlo simulation of the p value, for tests which are
distribution free. For unknown centre (when mu = NULL), bootstrap must be
used and the estimate of the centre used is the trimmed mean, with trim
parameter "trim". By default, the mean is taken (trim = 0).
For linear models, the tests are based on a bootstrap procedure as in
\insertCite{Allison}{symmetry} and are used to test the symmetry of the
residuals around zero.
For GARCH models (must be fitted with the 'fGarch' package), the tests are also
based on bootstrap and test for symmetry of the residuals around zero. An
approximation of the bootstrap procedure is available where the residuals are
treated as iid data, which is much faster and has been shown to give similar
results to the default bootstrap procedure (described in
\insertCite{Klar2012}{symmetry}).
For a comparison of the performance of various tests of symmetry around an
unknown centre, see \insertCite{UNKcentre}{symmetry}).
}
\examples{
set.seed(1)
# IID samples
x <- rnorm(50)
symmetry_test(x, "MOI", bootstrap = FALSE, k = 3, mu = 0)
symmetry_test(x, "MOI", bootstrap = TRUE, k = 3, mu = 0)
symmetry_test(x, "MOI", bootstrap = TRUE, k = 3)
x <- rsl(50, alpha = 1.5)
symmetry_test(x, "MOI", bootstrap = FALSE, k = 3, mu = 0)
symmetry_test(x, "MOI", bootstrap = TRUE, k = 3, mu = 0)
symmetry_test(x, "MOI", bootstrap = TRUE, k = 3)
# Linear models
lin_model <- lm(dist ~ speed, cars)
symmetry_test(lin_model, "B1")
# Garch models
library(fGarch)
specskew19 = fGarch::garchSpec(model = list(omega = 0.1,
alpha = 0.3,
beta = 0.3,
skew = 1.9),
cond.dist = "snorm")
x <- fGarch::garchSim(specskew19, n = 500)
g <- fGarch::garchFit(~garch(1,1), x, cond.dist = "QMLE",
include.mean = FALSE, trace = FALSE)
\donttest{symmetry_test(g, "CH", B=400, burn = 100)} # slower
\donttest{symmetry_test(g, "FM", B=400, burn = 100, approximate = TRUE)}
}
\references{
\insertAllCited{}
}
|
7abaa2480914504ff4c080b361f4f9e26fe69329
|
6272a453abab73843befb00e5f1429ea7296affa
|
/7ma clase.R
|
ee5cf2c8626f1ee248ece53357ef663361caab7a
|
[] |
no_license
|
lorenzora/cursoR_Untdf
|
aa4690fdc198be97c90ee8e10b7940c430c29cfd
|
6d50614fd6d9b9522c71451402fa56fa095a1fee
|
refs/heads/master
| 2021-01-13T09:06:52.986602
| 2016-09-26T15:48:59
| 2016-09-26T15:48:59
| 69,254,277
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,538
|
r
|
7ma clase.R
|
#Septima Clase#
#funciones#
dir.create("functions")
# seguimos con estructura de datos#
#Los gráficos creados con ggplot2 pueden guardarse con ggsave#
#ggsave("Mi_grafico_mas_reciente.pdf")#guarda el ultimo archivo en el formato pdf#
#También podemos especificar que gráfico guardar con el argumento plot Hay otras opciones, como ancho (width), alto (height), ppp (dpi).#
#Por otro lado, quizás queremos guardar varios gráficos en un solo documento.#
#Hay una forma más flexible, la función pdf crea un nuevo dipositivo donde guardar los gráficos.#
library(ggplot2)
pdf("Life_Exp_vs_time.pdf", width=12, height=4)
ggplot(data=gapminder, aes(x=year, y=lifeExp, colour=country)) +
geom_line()+
theme(legend.position= "bottom")
# ¡Acordarse de apagar el dispositivo!#
dev.off()
pdf("Mi_grafico.pdf", width = 12, height = 8, onefile = TRUE)
for(continent in levels(gapminder$continent)){
print(ggplot(data=gapminder[gapminder$continent == continent, ], aes(x=year, y=lifeExp, colour=country)) +
geom_line())
}
dev.off()
pdf("Life_Exp_vs_time2.pdf", width=12, height=12, onefile = TRUE) #cambie el titulo, pero no me dejaba poner el mismo nombre porque estaba abierto#
ggplot(data=gapminder, aes(x=year, y=lifeExp, colour=country)) +
geom_line() + facet_grid(continent ~ .) +
theme(legend.position = "bottom")
# You then have to make sure to turn off the pdf device!
dev.off()#siempre chequear que dice null device, sino seguir ejecutandolo#
?facet_grid
#ambién en algún momento vamos a querer guardar datos. Podemos usar la función write.table que es similar a read.table#
#Creemos un script para limpiar datos, y solo queremos los datos de Australia:#
dir.create("cleaned-data")
aust_subset <- gapminder[gapminder$country == "Australia",]#sustrato de datos de austraila#
write.table(aust_subset,
file="cleaned-data/gapminder-aus.csv",
sep=","
)
aust_subset #me incluyo las filas, para subsanarlo:#
write.table(aust_subset,
file="cleaned-data/gapminder-aus.csv",
sep=",", quote = FALSE, row.names = FALSE
)
head(aust_subset)
aust_subset <- gapminder[gapminder$country == "Australia",]
write.table(aust_subset,
file = "cleaned-data/gapminder-aus.csv",
sep = ",", quote = FALSE, row.names = FALSE
)
head(aust_subset)
subset_1990 = gapminder[1990 < gapminder$year,]
write.table(subset_1990,
file = "cleaned-data/gapminder-1990.csv",
sep = ",", quote = FALSE, row.names = FALSE
)
head(subset_1990)
|
406e43199128377fbdf21340f9701869a07e500c
|
52e460860c17991736b30e6319be40d632c6b537
|
/man/bam2counts.Rd
|
880269b661c1d979d406ba7b8fcd687d5817cfbc
|
[] |
no_license
|
yingstat/intePareto
|
bc973a197cfb55a50bc81fad81fe5f8f8c3910db
|
8a07886f0b6687cf195b6973604e7d6dbddb8bf8
|
refs/heads/master
| 2021-05-19T10:26:21.332880
| 2021-02-03T16:30:19
| 2021-02-03T16:30:19
| 251,651,442
| 7
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 979
|
rd
|
bam2counts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bam2counts.R
\name{bam2counts}
\alias{bam2counts}
\title{Compute the number of reads fall into specific genomic region}
\usage{
bam2counts(bamFile, region, fragLength = 180)
}
\arguments{
\item{bamFile}{Aligned bam file as input.}
\item{region}{The GRanges object defined by user to calculate the number of reads fall into
this specific region. For ChIP-Seq of histone modifications they are usually
promoter, enhancer and genebody regions.}
\item{fragLength}{Extend reads toward the 3'-end to the average DNA fragment size obtained
after DNA size selection}
}
\value{
a vector of numbers
}
\description{
\code{bam2counts} computes the number of reads fall into specific genomic
region such as promoter, enhancer, genebody
}
\examples{
data("promoter")
file.bam <- system.file("extdata", "SRR925640.bam", package = "intePareto")
bam2counts(bamFile = file.bam, region = promoter, fragLength = 180)
}
|
b5343eba56330fa674ed98beeb885bb67a5d4a36
|
12db0ea069010c87f033b1444787ef82499b345d
|
/man/pagecount.func.Rd
|
fd966d8d58c774e7d8265b67fe274a08158ba6d3
|
[] |
no_license
|
git05/saraminR
|
c0bba59226dcb7d1a938180fb6926fa7e9bfbd09
|
315342fc1b93ff84823508a0c967c70be2720c00
|
refs/heads/master
| 2021-09-04T18:46:20.741563
| 2018-01-21T07:51:12
| 2018-01-21T07:51:12
| 118,219,526
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 231
|
rd
|
pagecount.func.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getPage.R
\name{pagecount.func}
\alias{pagecount.func}
\title{get Keywords Pages}
\usage{
pagecount.func(url.frame)
}
\description{
get Keywords Pages
}
|
9d41100935c73213e84a5d896c766043e7abb253
|
f78cd948863b0d44fb817d10e28c52a885d425e5
|
/VientoyEnergiaPorHoras.R
|
fcc0d470ea0875c2885ffd9c7b7e4f11aab8a671
|
[] |
no_license
|
GuilleHM/TFM_GuillermoHuerta_MasterBigData_2019_UNIR
|
6044fdc94c32f01559b9ce1b64dac86c79b6172a
|
6126784ba90740eded8711bb22517463bc2eeb28
|
refs/heads/master
| 2020-07-10T17:02:08.893045
| 2019-09-15T11:08:40
| 2019-09-15T11:08:40
| 204,317,561
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 15,331
|
r
|
VientoyEnergiaPorHoras.R
|
## Este "script" sirve para calcular y crear un gráfico con la variación de la velocidad del
## viento según la hora para cada mes del año.
## Para ello se emplearán los archivos .csv provinientes de la conversión de los archivos .grib
## descargados desde la BBDD ERA-Iterim de la ECMWF para el periodo 1979 - 2018. Estos archivos proporcionan
## medidas a las 00, 06, 12 y 18h, por lo que el resto de valores los obtendremos por interpolación lineal.
## Con los valores relativos del viento según la hora y el resto de parámetros necesarios (calculados en varios
## "scripts" complementarios a éste), calculamos la energía media por hora generada por cada una de las cuatro
## instalaciones de aerogeneradores que estamos contemplando: dos modelos (i-700 e i-1000) y dos alturas (0m,
## o cero metros sobre la altura de instalación de la estación meteorológica FROOGIT -10metros- y 10m, o
## 10 metros por encima de la altura de la estación FROGGIT).
## Para que el "script" funcione, debemos establecer como directorio de trabajo aquél en el que
## se encuentran los archivos
# Cargamos las bibliotecas que nos harán falta
library(tidyverse) # Ecosistema Paquetes-r
library(lubridate) # Formateado de fechas
library(MASS) # Funciones estadísticas
library(stringr) # Modificación de cadenas
library(zoo)
library(viridis)
library(broom)
library(plotly)
library(knitr) # Dibujo de tablas
library(ggplot2)
library(fitdistrplus)
library(devtools)
library(ggpubr)
# Establecemos el diretrio de trabajo
setwd("C:\\Users\\GuilleHM\\TFM\\ERAInterim\\csv_files")
# Creamos un vector con el nombre de los meses
meses <- c("ENE", "FEB", "MAR", "ABR", "MAY", "JUN", "JUL", "AGO", "SEP", "OCT", "NOV", "DIC")
# Creamos la tabla donde guardaremos las medias para cada franja horaria y cada mes
df_VVhoraria_Meses <- data.frame(Mes=meses, H0=rep(0,12), H6=rep(0,12), H12=rep(0,12), H18=rep(0,12))
# Iteramos para los doce meses del año
for (i in 1:12) {
# Guardamos los nombres de los archivos para cada mes del año
files <- list.files(path=".", pattern=paste0("*", sprintf("%02d", i), ".csv", sep=""), full.names=TRUE, recursive=FALSE)
# Creamos un vector para cada franja horaria (correspondiente a cada mes)
H00 <- H06 <- H12 <- H18 <- c()
# Iteramos sobre todos los archivos (1 por año) para cada mes del año
for (j in 1:length(files)) {
# Creamos un vector para cada franja horaria (correspodiente a al mes del año en concreto sobre el que estmos iterando)
h00 <- h06 <- h12 <- h18 <- c()
# Creamos una tabla temporal donde guardamos los valores del archivo correspondiente
df_temp = read_csv(files[j], col_types=cols(), col_names = F)
colnames(df_temp) <- c("VV_alternate_u_v_componets_m/s")
# Nos aseguramos de que el archivo tiene el numero correcto de filas para no introducir cálculos erróneos
filas <- nrow(df_temp)
if (filas != 224 && filas != 232 && filas != 240 && filas != 248){next}
# Iteramos sobre la tabla (correpondiente a un mes de un año concreto) y agrupamos
# los valores por franja horaria en el vector correspondiente
columna <- 1
for (z in seq(from=1, to=filas-1, by=2)){
raiz_temp = sqrt((df_temp[[z,1]])^2 + (df_temp[[(z+1),1]])^2)
if (raiz_temp >= 40){
columna <- columna + 1
if (columna == 5){columna <- 1}
next
}
else{
if (columna == 1){h00 <- c(h00, raiz_temp)}
if (columna == 2){h06 <- c(h06, raiz_temp)}
if (columna == 3){h12 <- c(h12, raiz_temp)}
if (columna == 4){h18 <- c(h18, raiz_temp)}
}
columna <- columna + 1
if (columna == 5){columna <- 1}
}
# Guardamos la media para cada franja horaria para el mes en el año concreto sobre el que estamos iterando
H00 <- c(H00, mean(h00))
H06 <- c(H06, mean(h06))
H12 <- c(H12, mean(h12))
H18 <- c(H18, mean(h18))
}
# Calculamos y guardamos en la tabla final la media para cada franja horaria para cada mes del año
df_VVhoraria_Meses[i,2] <- round(mean(H00, na.rm = T),2)
df_VVhoraria_Meses[i,3] <- round(mean(H06, na.rm = T),2)
df_VVhoraria_Meses[i,4] <- round(mean(H12, na.rm = T),2)
df_VVhoraria_Meses[i,5] <- round(mean(H18, na.rm = T),2)
}
# Guardamos la tabla con los valores medios para cada franja en cada mes en un archivo .csv
write.csv(df_VVhoraria_Meses, file = "VelocidadMediaVientoFranjaHoraria.csv", row.names = FALSE)
# Calculamos la media de cada mes
medias_mensuales <- rowMeans(df_VVhoraria_Meses[,-1])
# Calculamos y sustituimos el valor en la tabla por el porcentaje (en tantos por ciento) de velocidad
# del viento sobre la media diaria, para cada franja horaria para cada mes del año
for (mes in 1:12) {
for (hora in 2:5) {
df_VVhoraria_Meses[mes, hora] <- (df_VVhoraria_Meses[mes, hora] / medias_mensuales[mes])*100
}
}
# Interpolamos linearmente para obterner los valores horarios de porcentaje de velocidad del viento
# Creamos primero la tabla
df_VVInterp_Meses <- data.frame(hora=c(0:23), Ene= rep(0,24), Feb= rep(0,24), Mar= rep(0,24), Abr= rep(0,24), May= rep(0,24),
Jun= rep(0,24), Jul= rep(0,24), Ago= rep(0,24), Sep= rep(0,24), Oct= rep(0,24), Nov= rep(0,24), Dic= rep(0,24))
# Iteramos sobre la tabla, interpolando linealmente los valores de porcentaje del viento
for (mes in 1:12) {
contador <- 1
H <- 1
for (hora in 1:24) {
if ((hora-1) == 0 | (hora-1) == 6 | (hora-1) == 12 | (hora-1) == 18 ){
df_VVInterp_Meses[hora, (mes+1)] <- df_VVhoraria_Meses[mes,(H+1)]
H <- H + 1
} else if ((hora-1) != 23){
if (H==5){
df_VVInterp_Meses[hora, (mes+1)] <- approx(c(0,6), c(df_VVhoraria_Meses[mes,5], df_VVhoraria_Meses[mes,(2)]), xout=contador)$y
}
else{
df_VVInterp_Meses[hora, (mes+1)] <- approx(c(0,6), c(df_VVhoraria_Meses[mes,H], df_VVhoraria_Meses[mes,(H+1)]), xout=contador)$y
}
contador <- contador + 1
if(contador==6){contador <- 1}
}
else{
df_VVInterp_Meses[hora, (mes+1)] <- approx(c(0,6), c(df_VVhoraria_Meses[mes,5], df_VVhoraria_Meses[mes,(2)]), xout=contador)$y
H <- 1
contador <- 1
}
}
}
# Guardamos la tabla con los valores de velocidad del viento proporcionales a la media diaria ,
# para cada franja horaria para cada mes en un archivo .csv
write.csv(df_VVInterp_Meses, file = "VariacionVelocidadVientoFranjaHoraria.csv", row.names = FALSE)
# Eliminamos la columna con los nombres de los meses
df_VVhoraria_Meses$Mes <- NULL
# Trasponemos la tabla
df_VVhoraria_Meses <- t(df_VVhoraria_Meses)
# Dibujamos un gráfico de barras agrupadas (4 franjas horarias por grupo) para cada mes del año
barplot(df_VVhoraria_Meses, col=c("#de7878", "#ded778", "#78de8e", "#47427d"),
main="Variación de la velocidad del viento según la franja horaria",
xlab= "dd", ylab= "% sobre la media diaria en cada mes del año", ylim=c(80,130),
legend.text=c("00h","06h", "12h", "18h"), beside = T)
## -------- Calculamos la energía teórica para cada hora --------------- ##
# Creamos las tablas donde guardaremos los valores estimados de energía horaria generada para cada modelo (2)
# y altura (2). En total trabajaremos con cuatro opciones
df_energia_hora_i700_0 <- df_VVInterp_Meses / 100
colnames(df_energia_hora_i700_0) <- c("Hora", "WT_ENE", "WT_FEB", "WT_MAR", "WT_ABR", "WT_MAY", "WT_JUN", "WT_JUL", "WT_AGO", "WT_SEP", "WT_OCT", "WT_NOV", "WT_DIC")
df_energia_hora_i700_0$Hora <- df_energia_hora_i700_0$Hora * 100
df_energia_hora_i700_10 <- df_energia_hora_i1000_0 <- df_energia_hora_i1000_10 <- df_energia_hora_i700_0
## Creamos las variables que necesitaremos para hacer los cálculos de energía generada
# Densidad del aire media para cada mes del año
DensAire <- c(1.247, 1.241, 1.233, 1.221, 1.208, 1.199, 1.190, 1.185, 1.195, 1.204, 1.227, 1.242)
# Área efectiva para cada modelo
a_i700 <- 2.83
a_i1000 <- 3.98
# Coeficiente de potencia para cada modelo (eliminamos el Cp correspondiente a 0 m/s para casar los valores
# con los contenedores de 1m/s de la tabla para el cálculo de la energía). Rango 1-21 m/s Velocidad Viento.
cp_i700 <- c(0.000000,0.000000,0.000000,35.989501,34.714282,29.324779,29.377437,26.992126,25.474763,23.321197,
20.875412,18.661223,16.773884,15.109295,13.137421,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000)
cp_i1000 <- c(0.000000,0.000000,0.000000,32.079530,37.956567,34.218165,29.926644,25.663624,22.531110,19.709663,
17.431242,15.445699,14.951535,14.216001,13.383024,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000)
# Factores de escala y forma (para cada mes del año) para cada una de las localizaciones (altura
# estación FROGGIT - 0m - y 10 metros por encima -10m-)
escala_0m <- c(3.04, 3.6, 4.18, 4.45, 4.48, 4.43, 4.08, 3.94, 3.71, 3.43, 3.11, 3.15)
escala_10m <- c(3.64, 4.32, 5.00, 5.32, 5.36, 5.30, 4.89, 4.71, 4.45, 4.1, 3.73, 3.77)
forma <- c(1.72, 1.89, 2.1, 2.43, 2.54, 2.66, 2.76, 2.55, 2.58, 2.15, 1.85, 1.71)
# Variable para el eje de abcisas de la función de densidad weibull
eje_x <- seq(1,21,1)
# Factores de relación entre los valores de potencia obtenidos desde los valores medidos y los obtenidos
# desde la función de densidad weibull
Rel_0m <- c(1.9786202, 1.713285, 1.7308071, 1.4868179, 1.4419836, 1.4082412, 1.3955166, 1.5183958, 1.4886731, 1.7619403, 1.8136051, 1.9555852)
Rel_10m <- c(1.10004409, 1.6992983, 1.6845774, 1.4648516, 1.4033297, 1.3666074, 1.3593554, 1.4782154, 1.4776561, 1.7172133, 1.8288339, 1.9423779)
## Iteramos sobre cada una de las cuatro tablas de energía para obterner los valores
## i_700_0m
for (i in 1:12) { # Iteramos para cada mes
for (j in 1:24) { # Iteramos para cada hora del dia
# Generamos la función de densidad weibull para estimar la energia estimada generada para cada hora
densidad_weibull<-tibble(VelViento = eje_x, Frecuencia = dweibull(x = eje_x,shape = forma[i],scale = escala_0m[i]))
# Creamos un vector temporal donde almacenaremos la energía para cada velocidad del viento
energia_temp <- c()
for (k in 1:21) { # Iteramos para las velocidades del viento de la función de densidad (21)
calulo_energia <- densidad_weibull$Frecuencia[k] * 0.5 * DensAire[i] * a_i700 * (densidad_weibull$VelViento[k])^3 * (cp_i700[k] / 100) * Rel_0m[i] * 0.001 # 0.001 para pasar a kWh
energia_temp <- c(energia_temp, calulo_energia)
}
df_energia_hora_i700_0[j,(i+1)] <- round(df_energia_hora_i700_0[j,(i+1)] * sum(energia_temp), 3)
}
}
## i_700_10m
for (i in 1:12) { # Iteramos para cada mes
for (j in 1:24) { # Iteramos para cada hora del dia
# Generamos la función de densidad weibull para estimar la energia estimada generada para cada hora
densidad_weibull<-tibble(VelViento = eje_x, Frecuencia = dweibull(x = eje_x,shape = forma[i],scale = escala_10m[i]))
# Creamos un vector temporal donde almacenaremos la energía para cada velocidad del viento
energia_temp <- c()
for (k in 1:21) { # Iteramos para las velocidades del viento de la función de densidad (21)
calulo_energia <- densidad_weibull$Frecuencia[k] * 0.5 * DensAire[i] * a_i700 * (densidad_weibull$VelViento[k])^3 * (cp_i700[k] / 100) * Rel_10m[i] * 0.001 # 0.001 para pasar a kWh
energia_temp <- c(energia_temp, calulo_energia)
}
df_energia_hora_i700_10[j,(i+1)] <- round(df_energia_hora_i700_10[j,(i+1)] * sum(energia_temp), 3)
}
}
## i_1000_0m
for (i in 1:12) { # Iteramos para cada mes
for (j in 1:24) { # Iteramos para cada hora del dia
# Generamos la función de densidad weibull para estimar la energia estimada generada para cada hora
densidad_weibull<-tibble(VelViento = eje_x, Frecuencia = dweibull(x = eje_x,shape = forma[i],scale = escala_0m[i]))
# Creamos un vector temporal donde almacenaremos la energía para cada velocidad del viento
energia_temp <- c()
for (k in 1:21) { # Iteramos para las velocidades del viento de la función de densidad (21)
calulo_energia <- densidad_weibull$Frecuencia[k] * 0.5 * DensAire[i] * a_i1000 * (densidad_weibull$VelViento[k])^3 * (cp_i1000[k] / 100) * Rel_0m[i] * 0.001 # 0.001 para pasar a kWh
energia_temp <- c(energia_temp, calulo_energia)
}
df_energia_hora_i1000_0[j,(i+1)] <- round(df_energia_hora_i1000_0[j,(i+1)] * sum(energia_temp), 3)
}
}
## i_1000_10m
for (i in 1:12) { # Iteramos para cada mes
for (j in 1:24) { # Iteramos para cada hora del dia
# Generamos la función de densidad weibull para estimar la energia estimada generada para cada hora
densidad_weibull<-tibble(VelViento = eje_x, Frecuencia = dweibull(x = eje_x,shape = forma[i],scale = escala_10m[i]))
# Creamos un vector temporal donde almacenaremos la energía para cada velocidad del viento
energia_temp <- c()
for (k in 1:21) { # Iteramos para las velocidades del viento de la función de densidad (21)
calulo_energia <- densidad_weibull$Frecuencia[k] * 0.5 * DensAire[i] * a_i1000 * (densidad_weibull$VelViento[k])^3 * (cp_i1000[k] / 100) * Rel_10m[i] * 0.001 # 0.001 para pasar a kWh
energia_temp <- c(energia_temp, calulo_energia)
}
df_energia_hora_i1000_10[j,(i+1)] <- round(df_energia_hora_i1000_10[j,(i+1)] * sum(energia_temp), 3)
}
}
# Obtenemos la energía media generada (kWh) cada día para cada mes del año y el total para cada configuración
a <- colSums(df_energia_hora_i700_0)
# 0.986 1.308 1.887 1.760 1.687 1.546 1.151 1.161 0.922 0.969 0.865 1.105
Total_a <- sum(a) * 30.42
#
b <- colSums(df_energia_hora_i700_10)
# 0.979 2.237 3.045 2.874 2.729 2.518 1.955 1.960 1.658 1.692 1.582 1.916
Total_b <- sum(b) * 30.42
#
c <- colSums(df_energia_hora_i1000_0)
# 1.422 1.886 2.721 2.566 2.466 2.271 1.687 1.691 1.334 1.401 1.243 1.588
Total_c <- sum(c) * 30.42
#
d <- colSums(df_energia_hora_i1000_10)
# 1.386 3.163 4.270 4.065 3.887 3.617 2.864 2.856 2.435 2.452 2.268 2.718
Total_c <- sum(c) * 30.42
#
# Guardamos las tabalas de energía en sendos archivos
write.csv(df_energia_hora_i700_0, file = "Energia_i700_0m.csv", row.names = FALSE)
write.csv(df_energia_hora_i700_10, file = "Energia_i700_10m.csv", row.names = FALSE)
write.csv(df_energia_hora_i1000_0, file = "Energia_i1000_0m.csv", row.names = FALSE)
write.csv(df_energia_hora_i1000_10, file = "Energia_i1000_10m.csv", row.names = FALSE)
## Para trabjar con un entorno mas limpio, eliminamos algunas variables cuando comenzamos con los cálculos de energía
## remove(hora, columna, contador, filas, files,H, H00, h00, H06, h06, H12, h12, H18, h18, i, j, medias_mensuales, mes, meses, raiz_temp, z, df_temp, df_VVhoraria_Meses, df_VVInterp_Meses)
## Eliminamos las variables del entorno
## rm(list=ls())
|
211946a9e2ae9f68c01306264991808d54e453ab
|
5b7e76d3f81983038ae567203cbbe57994a4d56c
|
/analysis/eQTL.pQTL.Mediation.ExampleScript.R
|
f699213615858ec067007a4dbdf3c894ecd97692
|
[
"MIT"
] |
permissive
|
fboehm/chick2016
|
12f15699babc3eaa2663c762b4d57495c0035504
|
9979224cb9bee2e11d9e87b6ad0e5036e3b2fe81
|
refs/heads/master
| 2020-05-19T10:27:41.253418
| 2019-05-05T03:40:02
| 2019-05-05T03:40:02
| 184,972,264
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,375
|
r
|
eQTL.pQTL.Mediation.ExampleScript.R
|
###################################################
# #
# eQTL/pQTL & Mediation Examples #
# Updated June 15, 2016 #
# Steve Munger #
# steven.munger@jax.org #
# #
###################################################
#### This script runs best in RStudio (especially the interactive plots) www.rstudio.com
#### Please forgive my clunky code. You can teach an old dog new tricks, but they won't necessarily be pretty tricks.
### First, install some R packages and our data
options(stringsAsFactors = F) #This will save headaches when working with data frames that contain character strings...
install.packages("devtools")
library(devtools)
install_github("dmgatti/DOQTL")
install_github("simecek/intermediate")
install_github("kbroman/qtlcharts")
library(DOQTL)
library(intermediate)
library(qtlcharts)
setwd("~/") #Set working directory
load("~/ChickMungeretal2016_DiversityOutbred.Rdata") ###Load in the dataset
# Data objects include:
#### RNA data
# expr.rna.192 <- 192 samples (rows) x 21,454 genes (columns) Liver RNA-seq expression data. Data was upper quartile normalized and transformed to rank normal scores.
# annotations.rna.192 <- Gene information for the 21,454 genes. 21,454 rows (genes) x 6 columns (annotations). Order of rows corresponds to order of columns in expr.rna.192.
# covariates.rna.192 <- Experimental variable information for the 192 Diversity Outbred samples.
# X.rna <- Covariate matrix for eQTL mapping (includes additive effects and interaction of sex, diet, and batch)
#### Protein data
# expr.protein.192 <- 192 samples (rows) x 8,050 proteins (columns) Liver protein abundance data. Data was quantile normalized and transformed to rank normal scores.
# annotations.protein.192 <- Protein information for 8,050 proteins. 8,050 rows (proteins) x 11 columns (annotations). Order of rows corresponds to order of columns in expr.protein.192.
# covariates.protein.192 <- Experimental variable information (pertinent to protein dataset) for the 192 Diversity Outbred samples.
# X.protein <- Covariate matrix for pQTL mapping (includes additive effects and interaction of sex and diet)
#### Common objects used in e/pQTL mapping and mediation analysis
# K.LOCO.192 <- Kinship Matrix for QTL mapping. Constructed using "Leave One Chromosome Out (LOCO)" method. List with 20 elements corresponding to the 19 autosomes and X chromosome.
# probs.192 <- 8-state founder genotypes at 64,000 imputed markers for each of 192 samples. 192x8x64000 array.
# markers.64k <- Location information for the 64,000 imputed markers.
# samples.192 <- List of 192 Diversity Outbred sample names.
### SCAN for eQTL and pQTL
## Example gene = Tmem68
##################################################################################################
## 1) Scan for eQTL
my.gene <- "Tmem68" ### Input your gene of interest
target.rna.index <- which(annotations.rna.192$Gene == my.gene) ### find the index number for your gene of interest
annotations.rna.192[target.rna.index,] ### Show information for the gene of interest
scanone.rna <- scanone(expr.rna.192, pheno.col=target.rna.index,K=K.LOCO.192, probs=probs.192, snps=markers.64K, addcovar=X.rna[,-1]) ## Perform the eQTL scan
plot(scanone.rna) ## plot the eQTL LOD scores
# A little function to find the SNP maximizing LOD score (autosomes only)
argmax.lod <- function(scanone.fit)
scanone.fit$lod$A$marker[which.max(scanone.fit$lod$A$lod)[1]]
# Let's plot the founder strain coefficients for the autosome with the peak LOD marker
argmax.snp.rna <- argmax.lod(scanone.rna)
coefplot(scanone.rna, chr=markers.64K[argmax.snp.rna,"chr"])
coefplot(scanone.rna,chr=4) # Run this if you want to manually input the chromosome to plot
###################################################################################################
###################################################################################################
## 2) Scan for pQTL
target.protein.index <- which(annotations.protein.192$Associated.Gene.Name == my.gene)
scanone.protein <- scanone(expr.protein.192, pheno.col=target.protein.index, K=K.LOCO.192,probs=probs.192, snps=markers.64K, addcovar=X.protein[,-1])
plot(scanone.protein)
#
# effect plot for autosome with max. LOD
argmax.snp.protein <- argmax.lod(scanone.protein)
coefplot(scanone.protein, chr=markers.64K[argmax.snp.protein,"chr"])
coefplot(scanone.protein, chr=13) # Run this if you want to manually input the chromosome to plot
###################################################################################################
###################################################################################################
# Mediation Scan
#Requires:
#### target - numeric vector with transcript/protein expression
#### mediator - matrix, each column is one transcript/protein's expression
#### annotation - data.frame with mediator annotation, must include columns "chr" and "pos"
#### qtl.geno - matrix, haplotype probabilities at the QTL we want to mediate
#### covar - additive covariates
#### method = c("ignore", "lod-diff", "double-lod-diff", "lod-ratio") ### we prefer "double-lod-diff"
## 3) Mediation Scan - Condition distant pQTL on protein intermediates
y <- expr.protein.192[,target.protein.index]
geno.argmax.protein <- probs.192[,-1,argmax.snp.protein]
# trim annotation, calculate middle point
annot.protein <- annotations.protein.192[,c("Ensembl.Protein.ID", "Ensembl.Gene.ID", "Associated.Gene.Name")]
annot.protein$Chr <- annotations.protein.192$Chromosome.Name
annot.protein$Pos <- (annotations.protein.192$Gene.Start..bp. + annotations.protein.192$Gene.End..bp.)/2
med <- mediation.scan(target=y, mediator=expr.protein.192, annotation=annot.protein,
covar=X.protein[,-1], qtl.geno=geno.argmax.protein,method="double-lod-diff")
kplot(med) #Interactive Plot - Hover over points to see gene symbols
plot(med) #Static plot
## 4) Mediation Scan - Condition distant pQTL on transcript intermediates
# trim annotation, calculate middle point
annot.rna <- annotations.rna.192[,c("EnsemblID", "Gene", "Chr")]
colnames(annot.rna) = c("Ensemble.Gene.ID","Associated.Gene.Name","Chr")
annot.rna$Pos <- (annotations.rna.192$Start.Mbp + annotations.rna.192$End.Mbp)/2
med <- mediation.scan(target=y, mediator=expr.rna.192, annotation=annot.rna,
covar=X.protein[,-1], qtl.geno=geno.argmax.protein)
kplot(med) #Interactive Plot - Hover over points to see gene symbols
plot(med) #Static plot
######################################################################################################
#### Other example proteins to scan
## Ndufaf1
## Mtr
## Cct7
## Glul
## Xrcc6
## Elp3
## Aven
## Klc4
###################################################################
### Optional code to set different covariates for RNA and Protein
# X.rna <- model.matrix(~Sex*Diet*Batch, covariates.rna.192)
# colnames(X.rna)[2] <- "sex" # DOQTL requirement
# X.protein <- model.matrix(~Sex*Diet, covariates.protein.192)
# colnames(X.protein)[2] <- "sex" # DOQTL requirement
###################################################################
|
4812d580372708ff815deb5763c25e431aff8b4a
|
d9212ebe9e3c2f615f4ab00c5312140e42b8f9fe
|
/R/globals.R
|
aec503c61c179fee1861fdb5b772d3c0ae859a03
|
[] |
no_license
|
cran/metaplus
|
b89e51b742682fd21529ba4aec63d5576ad02805
|
a849c33a3dd4795872949e6b250d511ade845b7a
|
refs/heads/master
| 2022-06-14T10:57:04.322727
| 2022-05-13T04:40:02
| 2022-05-13T04:40:02
| 23,156,103
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 151
|
r
|
globals.R
|
utils::globalVariables(c("newstart.meta","profilemix.fit0","p","profilenorm.start","profilenorm.fit0","profilet.fit0","isreg","grad","mymle.options"))
|
8d64206ea21df59ba7a4cf8c3949adb00051c69a
|
11fc7e9df7d1c468a9289001b0cc3bf1783ccd19
|
/Exercises/Chapter 3/q12.R
|
2eb7c71f715933adbfe99b8a526dab641d9bfa1d
|
[] |
no_license
|
IrfanZM/ISL
|
b30b91466732817989ff291da3d630d692a61183
|
4c57ba3d1ffb11fea878e326642d5d0f8bcb484c
|
refs/heads/master
| 2020-06-19T04:07:04.072730
| 2019-07-12T10:07:16
| 2019-07-12T10:07:16
| 196,556,609
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 62
|
r
|
q12.R
|
set.seed(1)
x <- rnorm(100)
y <- -sample(x,100)
plot(x,y)
|
2b74701146798b6985628eaf189a6de78b94861c
|
c109762169e1aa82f53ef9d6eb60e74994d041e9
|
/scripts/plot_ndw.R
|
298c1e6a7fc52449b4fb2f977c41ac2f33e2f1fb
|
[] |
no_license
|
hgfernan/r4prog
|
79bd22acdd7228711352a13971ca2d6b1fd1b4d5
|
84ec69b3c8323d43e3ac3923696943aa7a052729
|
refs/heads/master
| 2022-12-24T23:31:29.842564
| 2020-09-28T23:50:30
| 2020-09-28T23:50:30
| 299,457,294
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 747
|
r
|
plot_ndw.R
|
nweek <- 1
ndw <- 0
for (i in 1:length(work$new_deaths)) {
ndw[nweek] <- ndw[nweek] + work$new_deaths[i]
if ((i %% 7) == 0) {
ndw[nweek] <- ndw[nweek] / 7
nweek <- nweek + 1
ndw[nweek] <- 0
}
}
remainder <-length(work$new_deaths) %% 7
if (remainder != 0){
ndw[nweek] <- ndw[nweek] / remainder
}
week <- 1:length(ndw)
df_all <- data.frame(week, ndw)
df_nonzero <- subset(df_all, ndw > 0)
# dev.off()
# plot(week, ndw)
ndw_log = log(df_nonzero$ndw)
week_sq = df_nonzero$week * df_nonzero$week
df_ndw_log = data.frame(week = df_nonzero$week, week_sq, ndw_log)
ndw_log_lm = lm(ndw_log ~ week + week_sq, data = df_ndw_log)
# dev.off()
plot(df_ndw_log$week, df_ndw_log$ndw_log)
lines(df_ndw_log$week, ndw_log_lm$fitted.values)
|
c335a185671a0ff1a6c5a515bde2b609591623da
|
e129d729d4cb60e9311b658629c426a654e4beb1
|
/WavyFunction.R
|
6246e2271cd5a1f6c053498960c32c34a7aa73ab
|
[] |
no_license
|
chapinux/TestFunction_GA
|
536ffdee9772bd826e7a091d6c0b731682a5b4f3
|
e7199d5d0c82c3e833336e5409fe04f607db7924
|
refs/heads/master
| 2021-05-15T17:00:37.978336
| 2017-11-30T13:05:34
| 2017-11-30T13:05:34
| 107,564,151
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 905
|
r
|
WavyFunction.R
|
# Wavy function
#
#n is dimension of input space
# f(x) 1 - 1/ n sum for i ) 1 to n cos (k*x[i]) exp(-x[i]^2 / 2 )
# Where, in this exercise, k = 10. The number of local minima is kn and (k + 1)n for odd and even k respectively.
#
# Here, n represents the number of dimensions and x_i \in [-\pi, \pi] for i=1,2
library(plotly)
# lattice
x <- seq (-pi,pi, length=100)
y <- seq (-pi,pi, length=100)
#every x,y couple
x<- pi
y <- pi
points <- expand.grid(x,y)
z <- seq(0,0, length =100)
k<- 10
z <- 1 - 1/2 * ((cos(points$Var1 * k )* exp(-(points$Var1^2)/2)) + (cos(points$Var2 * k )* exp(-(points$Var2^2)/2)))
points <- cbind(points, z)
names(points) <- c("x", "y", "z")
#take z as a matrix for surface display with plotly
surf <- matrix(points$z, nrow = 100)
plot_ly( z=~surf, showlegend=FALSE, showscale=FALSE, colors='Spectral') %>%
add_surface( opacity= 0.9 )
dev.off()
|
853a3c91bb7172353de16811fa7cbf656df9e3aa
|
0766087bc0a2a407400a680039c9b55e4a3d5fa7
|
/man/sims_data.Rd
|
48132f3be411b1808dfa3c76aa2c9606e85fa0ab
|
[
"MIT"
] |
permissive
|
poissonconsulting/sims
|
e1b7f52b8ce836901f5b344744a9e3d3b6c4a90b
|
f58fd93f39dbed375a92de179163b748dda0fe9d
|
refs/heads/main
| 2022-06-28T16:48:32.481948
| 2022-06-17T22:14:34
| 2022-06-17T22:14:34
| 191,258,625
| 1
| 2
|
NOASSERTION
| 2022-06-17T21:51:24
| 2019-06-10T23:18:19
|
R
|
UTF-8
|
R
| false
| true
| 681
|
rd
|
sims_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\name{sims_data}
\alias{sims_data}
\title{Simulated Datasets}
\usage{
sims_data(path = ".")
}
\arguments{
\item{path}{A string of the path to the directory with the simulated data.}
}
\value{
An \code{\link[nlist:nlists]{nlist::nlists_object()}} of the simulated datasets.
}
\description{
Gets the simulated datasets as an \code{\link[nlist:nlists]{nlist::nlists_object()}}.
There is no guarantee that all the datasets will fit in memory.
}
\examples{
set.seed(10)
sims_simulate("a <- runif(1)",
nsims = 10L, path = tempdir(),
exists = NA, ask = FALSE
)
library(nlist)
sims_data(tempdir())
}
|
80fb9902a279ab4a91e4b0faf89a3710a23ebc61
|
0d43c45303c6b9b3ac79113c1b7fdc71a3a7e697
|
/tests/C_tests.R
|
6f419ff5f60f74cdd3604166f6a3395c107a7ca4
|
[] |
no_license
|
HappyLiPei/tea
|
b3f590bac389beb919809cda417250d66e351d0f
|
26d59f69592a0e31644f29707baa4f3c6dde7424
|
refs/heads/master
| 2021-04-03T05:09:17.597599
| 2015-05-23T14:48:23
| 2015-05-23T14:48:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 31
|
r
|
C_tests.R
|
library(tea)
.C("tea_c_tests")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.