blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f70d845a608cb734d80dd3586732122a470ee4ac
|
b61d15eb9c927bf95c8f9b9be2af9ce3cd60b901
|
/thurs_practice.R
|
05cc329b2e69c3fcb2c63450e4721eb7b2ac5901
|
[] |
no_license
|
alturkaa/soc133
|
4e636c2a1949f283bb8302d728cfb0f0c1fb7c42
|
25b25d41078dddfe491a15b92fd3ceb0663e911c
|
refs/heads/master
| 2022-02-09T06:29:51.419560
| 2019-08-01T18:54:18
| 2019-08-01T18:54:18
| 105,697,521
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,341
|
r
|
thurs_practice.R
|
library(tidyverse) # run just this line (ctrl-Enter)
# import the data file and assign it to an object called survey. Use the read_csv function.
survey <- read_csv("class_survey.csv")
# What's the median population of the counties you all live in?
# (no code required) Why is there such a big difference between the mean and the median?
# How many of you discuss politics with your closest contacts almost all of the time or most of the time? Use both the table and count functions.
# Create a new variable that gives you the percent of people you all talk politics with who have a similar political affiliation. Use mutate. Be sure to overwrite the dataframe. I've started it for you.
survey <- survey %>%
mutate()
# Create a table that shows the percent of people who wrote to Congress by how interested they are in politics. Use group_by, then summarize.
# What is the mean political heterogeneity index (2000-2016) (variable name is phi_00_16) broken down by whether someone thinks other people are generally trustworthy? Use group_by and summarize.
# What is the mean political heterogeneity index (2000-2016) (variable name is phi_00_16) broken down by whether someone thinks voting is a duty or a choice (variable name is duty_choice)? Use group_by and summarize.
# plot a bar graph of political interest
# draw a density plot of the average commute of all the counties
# plot a two-way chart of political interest and contribution (variable name is contrib)
# draw a boxplot of the relationship between duty_choice and phi_00_16
# draw something that plots the relationship between income inequality (gini) and phi_00_16
# Bonus: What is the political heterogeneity index (2000-2016) of counties that Trump won in 2016? What is it for counties that Clinton won?
# Bonus: Create a new variable that will tell you whether a county became more heterogenous or homogenous between 1980-1996 and 2000-2016. Be sure to overwrite the original dataframe or, if you prefer, save it as a new name.
# How many counties became more homogenous? How many became more heterogenous?
# Bonus: draw a boxplot of anything you're interested in, with a title and good labels
# Bonus: draw a scatterplot (showing points and a line) of something you're interested in, with a title and good labels
|
534c6fcae7d9d8b4da54931d62b85c4b660b1879
|
d8fa81bdb4ff6cd0fae27dcc18be7f854595d7eb
|
/man/trait_match.Rd
|
8986bea0d335255acb699feb66978ace85a688d4
|
[] |
no_license
|
rogini98/traitmatch
|
64fe7239fa2325e260cd7b7fd377f522eb15d289
|
2d6cc34a23e1cce999b506ffafcb8c6d5455cd62
|
refs/heads/master
| 2022-03-13T17:07:05.028679
| 2016-02-22T10:56:16
| 2016-02-22T10:56:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 264
|
rd
|
trait_match.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trait_match.R
\docType{package}
\name{trait_match}
\alias{trait_match}
\alias{trait_match-package}
\title{trait_match}
\description{
Run models to analyze trait matching in networks.
}
|
4c151b6d34f935fd3720bf9a1e133b8d42017760
|
01b1302af51d339f7c8827a620c4a5fb26c890f1
|
/outcome_measurement/malaria/cod/prep_data.R
|
81de8a40efefd0e62f4cf10be2a1d13590fc973d
|
[] |
no_license
|
ihmeuw/gf
|
64ab90fb5a5c49694bde1596f4b20fcf107a76e3
|
29e0c530b86867d5edd85104f4fe7dcb1ed0f1ee
|
refs/heads/develop
| 2021-08-15T02:16:59.086173
| 2021-08-03T19:52:31
| 2021-08-03T19:52:31
| 109,062,373
| 3
| 6
| null | 2019-03-21T01:48:02
| 2017-10-31T23:17:16
|
R
|
ISO-8859-1
|
R
| false
| false
| 34,956
|
r
|
prep_data.R
|
# ----------------------------------------------
# function to prep the DRC PNLP data
prep_data <- function(dataSheet, sheetname, index){
# column names
# ----------------------------------------------
columnNames2016 <- c("province", "dps", "health_zone", "donor", "operational_support_partner", "population", "quarter", "month",
"totalCasesAllDiseases_under5", "totalCasesAllDiseases_5andOlder", "totalCasesAllDiseases_pregnantWomen",
"suspectedMalaria_under5", "suspectedMalaria_5andOlder", "suspectedMalaria_pregnantWomen",
"newCasesMalariaMild_under5", "newCasesMalariaMild_5andOlder", "newCasesMalariaMild_pregnantWomen",
"totalHospAllDiseases_under5", "totalHospAllDiseases_5andOlder", "totalHospAllDiseases_pregnantWomen",
"newCasesMalariaSevere_under5", "newCasesMalariaSevere_5andOlder", "newCasesMalariaSevere_pregnantWomen",
"mildMalariaTreated_under5", "mildMalariaTreated_5andOlder", "mildMalariaTreated_pregnantWomen",
"severeMalariaTreated_under5", "severeMalariaTreated_5andOlder", "severeMalariaTreated_pregnantWomen",
"totalDeathsAllDiseases_under5", "totalDeathsAllDiseases_5andOlder", "totalDeathsAllDiseases_pregnantWomen",
"malariaDeaths_under5", "malariaDeaths_5andOlder", "malariaDeaths_pregnantWomen",
"ANC_1st", "ANC_2nd", "ANC_3rd", "ANC_4th", "SP_1st", "SP_2nd","SP_3rd",
"ITN_received", "ITN_distAtANC", "ITN_distAtPreschool", "VAR_0to11mos",
"ASAQ_received_2to11mos", "ASAQ_received_1to5yrs", "ASAQ_received_6to13yrs", "ASAQ_received_14yrsAndOlder",
"ASAQ_used_2to11mos", "ASAQ_used_1to5yrs", "ASAQ_used_6to13yrs", "ASAQ_used_14yrsAndOlder", "ASAQ_used_total",
"ArtLum_received", "ArtLum_used",
"stockOut_SP", "stockOut_ASAQ_2to11mos", "stockOut_ASAQ_1to5yrs", "stockOut_ASAQ_6to13yrs", "stockOut_ASAQ_14yrsAndOlder",
"stockOut_qui_pill", "stockOut_qui_inj", "stockOut_ASAQ_inj", "stockOut_RDT", "stockOut_artLum",
"smearTest_completedUnder5", "smearTest_completed5andOlder", "smearTest_positiveUnder5", "smearTest_positive5andOlder",
"RDT_received", "RDT_completedUnder5", "RDT_completed5andOlder", "RDT_positiveUnder5", "RDT_positive5andOlder",
"PMA_ASAQ", "PMA_TPI", "PMA_ITN", "PMA_complete",
"reports_received", "reports_expected", "healthFacilities_total", "healthFacilities_numReported", "healthFacilities_numReportedWithinDeadline",
"hzTeam_supervisors_numPlanned", "hzTeam_supervisors_numActual", "hzTeam_employees_numPlanned", "hzTeam_employees_numActual",
"awarenessTrainings_numPlanned", "awarenessTrainings_numActual",
"SSC_fevers", "SSC_RDT_completed", "SSC_RDT_positive",
"SSC_ACT", "SSC_casesReferred", "SSC_casesCrossReferred")
columnNames2015 <- c("province", "dps", "health_zone", "donor", "operational_support_partner", "population", "quarter", "month",
"totalCasesAllDiseases_under5", "totalCasesAllDiseases_5andOlder", "totalCasesAllDiseases_pregnantWomen",
"suspectedMalaria_under5", "suspectedMalaria_5andOlder", "suspectedMalaria_pregnantWomen",
"newCasesMalariaMild_under5", "newCasesMalariaMild_5andOlder", "newCasesMalariaMild_pregnantWomen",
"totalHospAllDiseases_under5", "totalHospAllDiseases_5andOlder", "totalHospAllDiseases_pregnantWomen",
"newCasesMalariaSevere_under5", "newCasesMalariaSevere_5andOlder", "newCasesMalariaSevere_pregnantWomen",
"mildMalariaTreated_under5", "mildMalariaTreated_5andOlder", "mildMalariaTreated_pregnantWomen",
"severeMalariaTreated_under5", "severeMalariaTreated_5andOlder", "severeMalariaTreated_pregnantWomen",
"totalDeathsAllDiseases_under5", "totalDeathsAllDiseases_5andOlder", "totalDeathsAllDiseases_pregnantWomen",
"malariaDeaths_under5", "malariaDeaths_5andOlder", "malariaDeaths_pregnantWomen",
"ANC_1st", "ANC_2nd", "ANC_3rd", "ANC_4th", "SP_1st", "SP_2nd","SP_3rd",
"ITN_received", "ITN_distAtANC", "ITN_distAtPreschool", "VAR_0to11mos",
"ASAQ_received_2to11mos", "ASAQ_received_1to5yrs", "ASAQ_received_6to13yrs", "ASAQ_received_14yrsAndOlder",
"ASAQ_used_2to11mos", "ASAQ_used_1to5yrs", "ASAQ_used_6to13yrs", "ASAQ_used_14yrsAndOlder", "ASAQ_used_total",
"ArtLum_received", "ArtLum_used",
"stockOut_SP", "stockOut_ASAQ_2to11mos", "stockOut_ASAQ_1to5yrs", "stockOut_ASAQ_6to13yrs", "stockOut_ASAQ_14yrsAndOlder",
"stockOut_qui_pill", "stockOut_qui_inj", "stockOut_ASAQ_inj", "stockOut_RDT", "stockOut_artLum",
"smearTest_completedUnder5", "smearTest_completed5andOlder", "smearTest_positiveUnder5", "smearTest_positive5andOlder",
"RDT_received", "RDT_completedUnder5", "RDT_completed5andOlder", "RDT_positiveUnder5", "RDT_positive5andOlder",
"PMA_ASAQ", "PMA_TPI", "PMA_ITN", "PMA_complete",
"reports_received", "reports_expected", "healthFacilities_total", "healthFacilities_numReported", "healthFacilities_numReportedWithinDeadline",
"hzTeam_supervisors_numPlanned", "hzTeam_supervisors_numActual",
"awarenessTrainings_numPlanned", "awarenessTrainings_numActual",
"SSC_fevers", "SSC_RDT_completed", "SSC_RDT_positive",
"SSC_ACT", "SSC_casesReferred", "SSC_casesCrossReferred")
columnNamesComplete <- c("province", "dps", "health_zone", "donor", "operational_support_partner", "population", "quarter", "month",
"totalCasesAllDiseases_under5", "totalCasesAllDiseases_5andOlder", "totalCasesAllDiseases_pregnantWomen",
"suspectedMalaria_under5", "suspectedMalaria_5andOlder", "suspectedMalaria_pregnantWomen",
"presumedMalaria_under5", "presumedMalaria_5andOlder", "presumedMalaria_pregnantWomen",
"newCasesMalariaMild_under5", "newCasesMalariaMild_5andOlder", "newCasesMalariaMild_pregnantWomen",
"totalHospAllDiseases_under5", "totalHospAllDiseases_5andOlder", "totalHospAllDiseases_pregnantWomen",
"newCasesMalariaSevere_under5", "newCasesMalariaSevere_5andOlder", "newCasesMalariaSevere_pregnantWomen",
"mildMalariaTreated_under5", "mildMalariaTreated_5andOlder", "mildMalariaTreated_pregnantWomen",
"severeMalariaTreated_under5", "severeMalariaTreated_5andOlder", "severeMalariaTreated_pregnantWomen",
"totalDeathsAllDiseases_under5", "totalDeathsAllDiseases_5andOlder", "totalDeathsAllDiseases_pregnantWomen",
"malariaDeaths_under5", "malariaDeaths_5andOlder", "malariaDeaths_pregnantWomen",
"ANC_1st", "ANC_2nd", "ANC_3rd", "ANC_4th", "SP_1st", "SP_2nd","SP_3rd",
"ITN_received", "ITN_distAtANC", "ITN_distAtPreschool", "VAR_0to11mos",
"ASAQ_received_2to11mos", "ASAQ_received_1to5yrs", "ASAQ_received_6to13yrs", "ASAQ_received_14yrsAndOlder",
"ASAQ_used_2to11mos", "ASAQ_used_1to5yrs", "ASAQ_used_6to13yrs", "ASAQ_used_14yrsAndOlder", "ASAQ_used_total",
"ArtLum_received", "ArtLum_used",
"stockOut_SP", "stockOut_ASAQ_2to11mos", "stockOut_ASAQ_1to5yrs", "stockOut_ASAQ_6to13yrs", "stockOut_ASAQ_14yrsAndOlder",
"stockOut_qui_pill", "stockOut_qui_inj", "stockOut_ASAQ_inj", "stockOut_RDT", "stockOut_artLum",
"smearTest_completedUnder5", "smearTest_completed5andOlder", "smearTest_positiveUnder5", "smearTest_positive5andOlder",
"RDT_received", "RDT_completedUnder5", "RDT_completed5andOlder", "RDT_positiveUnder5", "RDT_positive5andOlder",
"peopleTested_under5", "peopleTested_5andOlder",
"PMA_ASAQ", "PMA_TPI", "PMA_ITN", "PMA_complete",
"reports_received", "reports_expected", "healthFacilities_total", "healthFacilities_numReported", "healthFacilities_numReportedWithinDeadline",
"hzTeam_supervisors_numPlanned", "hzTeam_supervisors_numActual", "hzTeam_employees_numPlanned", "hzTeam_employees_numActual",
"awarenessTrainings_numPlanned", "awarenessTrainings_numActual",
"SSC_fevers_under5", "SSC_fevers_5andOlder", "SSC_RDT_completedUnder5", "SSC_RDT_completed5andOlder", "SSC_RDT_positiveUnder5", "SSC_RDT_positive5andOlder",
"SSC_ACT_under5", "SSC_ACT_5andOlder", "SSC_casesReferred_under5", "SSC_casesReferred_5andOlder",
"SSC_casesCrossReferred_under5", "SSC_casesCrossReferred_5andOlder")
columnNames2014 <- c("province", "dps", "health_zone", "donor", "operational_support_partner", "population", "quarter", "month",
"totalCasesAllDiseases_under5", "totalCasesAllDiseases_5andOlder", "totalCasesAllDiseases_pregnantWomen",
"suspectedMalaria_under5", "suspectedMalaria_5andOlder", "suspectedMalaria_pregnantWomen",
"newCasesMalariaMild_under5", "newCasesMalariaMild_5andOlder", "newCasesMalariaMild_pregnantWomen",
"totalHospAllDiseases_under5", "totalHospAllDiseases_5andOlder", "totalHospAllDiseases_pregnantWomen",
"newCasesMalariaSevere_under5", "newCasesMalariaSevere_5andOlder", "newCasesMalariaSevere_pregnantWomen",
"mildMalariaTreated_under5", "mildMalariaTreated_5andOlder", "mildMalariaTreated_pregnantWomen",
"severeMalariaTreated_under5", "severeMalariaTreated_5andOlder", "severeMalariaTreated_pregnantWomen",
"totalDeathsAllDiseases_under5", "totalDeathsAllDiseases_5andOlder", "totalDeathsAllDiseases_pregnantWomen",
"malariaDeaths_under5", "malariaDeaths_5andOlder", "malariaDeaths_pregnantWomen",
"ANC_1st", "SP_1st", "SP_2nd","SP_3rd",
"ITN_received", "ITN_distAtANC", "ITN_distAtPreschool",
"ASAQ_received_2to11mos", "ASAQ_received_1to5yrs", "ASAQ_received_6to13yrs", "ASAQ_received_14yrsAndOlder",
"ASAQ_used_2to11mos", "ASAQ_used_1to5yrs", "ASAQ_used_6to13yrs", "ASAQ_used_14yrsAndOlder", "ASAQ_used_total",
"stockOut_SP", "stockOut_ASAQ_2to11mos", "stockOut_ASAQ_1to5yrs", "stockOut_ASAQ_6to13yrs", "stockOut_ASAQ_14yrsAndOlder",
"stockOut_qui_pill", "stockOut_qui_inj", "stockOut_ASAQ_inj",
"smearTest_completed", "smearTest_positive", "thinSmearTest",
"RDT_received", "RDT_completed", "RDT_positive",
"PMA_ASAQ", "PMA_TPI", "PMA_ITN", "PMA_complete",
"reports_received", "reports_expected", "healthFacilities_total", "healthFacilities_numReported",
"hzTeam_supervisors_numPlanned", "hzTeam_supervisors_numActual",
"awarenessTrainings_numPlanned", "awarenessTrainings_numActual"
)
columnNames2011to2013 <- c("province", "dps", "health_zone", "donor", "operational_support_partner", "population", "quarter", "month",
"totalCasesAllDiseases_under5", "totalCasesAllDiseases_5andOlder", "totalCasesAllDiseases_pregnantWomen",
"newCasesMalariaMild_under5", "newCasesMalariaMild_5andOlder", "newCasesMalariaMild_pregnantWomen",
"totalHospAllDiseases_under5", "totalHospAllDiseases_5andOlder", "totalHospAllDiseases_pregnantWomen",
"newCasesMalariaSevere_under5", "newCasesMalariaSevere_5andOlder", "newCasesMalariaSevere_pregnantWomen",
"totalDeathsAllDiseases_under5", "totalDeathsAllDiseases_5andOlder", "totalDeathsAllDiseases_pregnantWomen",
"malariaDeaths_under5", "malariaDeaths_5andOlder", "malariaDeaths_pregnantWomen",
"ANC_1st", "SP_1st", "SP_2nd",
"ITN_received", "ITN_distAtANC", "ITN_distAtPreschool",
"ASAQ_received_2to11mos", "ASAQ_received_1to5yrs", "ASAQ_received_6to13yrs", "ASAQ_received_14yrsAndOlder",
"ASAQ_used_2to11mos", "ASAQ_used_1to5yrs", "ASAQ_used_6to13yrs", "ASAQ_used_14yrsAndOlder", "ASAQ_used_total",
"stockOut_SP", "stockOut_ASAQ_2to11mos", "stockOut_ASAQ_1to5yrs", "stockOut_ASAQ_6to13yrs", "stockOut_ASAQ_14yrsAndOlder",
"stockOut_qui_pill", "stockOut_qui_inj",
"smearTest_completed", "smearTest_positive",
"RDT_received", "RDT_completed", "RDT_positive",
"PMA_ASAQ", "PMA_TPI", "PMA_ITN", "PMA_complete",
"reports_received", "reports_expected", "healthFacilities_total", "healthFacilities_numReported"
)
columnNames2010 <- c("province", "dps", "health_zone", "donor", "operational_support_partner", "population", "quarter", "month",
"totalCasesAllDiseases_under5", "totalCasesAllDiseases_5andOlder", "totalCasesAllDiseases_pregnantWomen",
"newCasesMalariaMild_under5", "newCasesMalariaMild_5andOlder", "newCasesMalariaMild_pregnantWomen",
"totalHospAllDiseases_under5", "totalHospAllDiseases_5andOlder", "totalHospAllDiseases_pregnantWomen",
"newCasesMalariaSevere_under5", "newCasesMalariaSevere_5andOlder", "newCasesMalariaSevere_pregnantWomen",
"totalDeathsAllDiseases_under5", "totalDeathsAllDiseases_5andOlder", "totalDeathsAllDiseases_pregnantWomen",
"malariaDeaths_under5", "malariaDeaths_5andOlder", "malariaDeaths_pregnantWomen",
"ANC_1st", "SP_1st", "SP_2nd",
"ITN_received", "ITN_distAtANC", "ITN_distAtPreschool",
"ASAQ_received_2to11mos", "ASAQ_received_1to5yrs", "ASAQ_received_6to13yrs", "ASAQ_received_14yrsAndOlder",
"ASAQ_used_2to11mos", "ASAQ_used_1to5yrs", "ASAQ_used_6to13yrs", "ASAQ_used_14yrsAndOlder", "ASAQ_used_total",
"stockOut_SP", "stockOut_ASAQ_2to11mos", "stockOut_ASAQ_1to5yrs", "stockOut_ASAQ_6to13yrs", "stockOut_ASAQ_14yrsAndOlder",
"stockOut_qui_pill", "stockOut_qui_inj",
"smearTest_completed", "smearTest_positive",
"RDT_received", "RDT_completed", "RDT_positive",
"PMA_ASAQ", "PMA_TPI", "PMA_ITN", "PMA_complete",
"reports_received", "reports_expected", "healthFacilities_total", "healthFacilities_numReported")
columnNames2010KINKOR <- c("province", "dps", "health_zone", "donor", "operational_support_partner", "population", "quarter", "month",
"totalCasesAllDiseases_under5", "totalCasesAllDiseases_5andOlder", "totalCasesAllDiseases_pregnantWomen",
"newCasesMalariaMild_under5", "newCasesMalariaMild_5andOlder", "newCasesMalariaMild_pregnantWomen",
"totalHospAllDiseases_under5", "totalHospAllDiseases_5andOlder", "totalHospAllDiseases_pregnantWomen",
"newCasesMalariaSevere_under5", "newCasesMalariaSevere_5andOlder", "newCasesMalariaSevere_pregnantWomen",
"totalDeathsAllDiseases_under5", "totalDeathsAllDiseases_5andOlder", "totalDeathsAllDiseases_pregnantWomen",
"malariaDeaths_under5", "malariaDeaths_5andOlder", "malariaDeaths_pregnantWomen",
"ANC_1st", "SP_1st", "SP_2nd",
"ITN_received", "ITN_distAtANC", "ITN_distAtPreschool",
"ASAQ_received_2to11mos", "ASAQ_received_1to5yrs", "ASAQ_received_6to13yrs", "ASAQ_received_14yrsAndOlder",
"ASAQ_used_2to11mos", "ASAQ_used_1to5yrs", "ASAQ_used_6to13yrs", "ASAQ_used_14yrsAndOlder", "ASAQ_used_total",
"stockOut_SP", "stockOut_ASAQ_2to11mos", "stockOut_ASAQ_1to5yrs", "stockOut_ASAQ_6to13yrs", "stockOut_ASAQ_14yrsAndOlder",
"stockOut_qui_pill", "stockOut_qui_inj",
"smearTest_completed", "smearTest_positive",
"RDT_received", "RDT_completed", "RDT_positive",
"PMA_ASAQ", "PMA_TPI", "PMA_ITN", "PMA_complete",
"reports_received", "reports_expected")
# ----------------------------------------------
# fix various issues in spelling/typos or extra columns in the data sheet:
if ( PNLP_files$year[index] == 2015 & sheetname == "NK" ) {
dataSheet <- dataSheet[ , -c("X__71", "X__72", "X__73") ]
}
if ( PNLP_files$year[index] == 2010 & sheetname == "BC" ) {
dataSheet <- dataSheet[ , -c("X__62") ]
}
if ( PNLP_files$year[index] == 2010 ) {
dataSheet <- dataSheet[ , -c("X__14") ]
}
if ( PNLP_files$year[index] == 2011 & sheetname != "OR" ) {
dataSheet <- dataSheet[ , -c("X__14") ]
}
if ( PNLP_files$year[index] == 2011 & sheetname == "OR" ) {
dataSheet <- dataSheet[ , -c("X__11", "X__15", "X__25", "X__39") ]
}
if ((PNLP_files$year[index] == 2014 | PNLP_files$year[index] == 2013 | PNLP_files$year[index] == 2012 | PNLP_files$year[index] == 2011)
& sheetname == "BC") {
#dataSheet <- dataSheet[,names(dataSheet)[-length(names(dataSheet))], with=F]
dataSheet <- dataSheet[, -ncol(dataSheet), with=F ]
}
if ( PNLP_files$year[index] == 2016 & sheetname == "KIN") {
dataSheet <- dataSheet[ , -c("X__72", "X__73", "X__74") ]
}
if (PNLP_files$year[index] == 2010 & sheetname == "BDD") {
#dataSheet <- dataSheet[,names(dataSheet)[-length(names(dataSheet))], with=F]
dataSheet <- dataSheet[ , !apply( dt , 2 , function(x) all(is.na(x))), with=F ]
dataSheet <- dataSheet[, -ncol(dataSheet), with=F ]
}
if ((PNLP_files$year[index] == 2011) & sheetname == "BDD") {
#dataSheet <- dataSheet[,names(dataSheet)[-length(names(dataSheet))], with=F]
dataSheet <- dataSheet[ , !apply( dt , 2 , function(x) all(is.na(x))), with=F ]
dataSheet <- dataSheet[, -ncol(dataSheet), with=F ]
dataSheet <- dataSheet[, -ncol(dataSheet), with=F ]
}
# set column names, depending on differences in years and/or sheets
if ( PNLP_files$year[index] == 2014 ) {
columnNames <- columnNames2014
} else if (PNLP_files$year[index] < 2014 & PNLP_files$year[index] != 2010) {
columnNames <- columnNames2011to2013
} else if (PNLP_files$year[index] == 2010 & sheetname != "KIN" & sheetname != "KOR" ) {
columnNames <- columnNames2010
} else if (PNLP_files$year[index] == 2016) {
columnNames <- columnNames2016
} else if (PNLP_files$year[index] == 2015) {
columnNames <- columnNames2015
} else if (PNLP_files$year[index] == 2010 & ( sheetname == "KIN" | sheetname == "KOR")) {
columnNames <- columnNames2010KINKOR
} else {
columnNames <- columnNamesComplete
}
names(dataSheet) <- columnNames
if (PNLP_files$year[index] == 2012 & sheetname == "EQ"){
dataSheet <- dataSheet[(totalCasesAllDiseases_under5=="971" & totalCasesAllDiseases_5andOlder=="586" & totalCasesAllDiseases_pregnantWomen=="99"),
province:="Equateur"]
dataSheet <- dataSheet[(totalCasesAllDiseases_under5=="971" & totalCasesAllDiseases_5andOlder=="586" & totalCasesAllDiseases_pregnantWomen=="99"),
dps:="Sud Uban"]
dataSheet <- dataSheet[(totalCasesAllDiseases_under5=="971" & totalCasesAllDiseases_5andOlder=="586" & totalCasesAllDiseases_pregnantWomen=="99"),
health_zone:="Libenge"]
dataSheet <- dataSheet[(totalCasesAllDiseases_under5=="971" & totalCasesAllDiseases_5andOlder=="586" & totalCasesAllDiseases_pregnantWomen=="99"),
month:= "Janvier"]
dataSheet <- dataSheet[(totalCasesAllDiseases_under5=="977" & totalCasesAllDiseases_5andOlder=="816" & totalCasesAllDiseases_pregnantWomen=="242"),
month:= "Janvier"]
dataSheet <- dataSheet[(totalCasesAllDiseases_under5=="977" & totalCasesAllDiseases_5andOlder=="816" & totalCasesAllDiseases_pregnantWomen=="242"),
health_zone:="Mawuya"]
dataSheet <- dataSheet[(health_zone=="Mawuya" & !is.na(population)), health_zone:=NA]
dataSheet <- dataSheet[!is.na(health_zone)]
}
# add a column for the "year" to keep track of this variable as we add dataSheets to this one
dataSheet$year <- PNLP_files$year[index]
# ----------------------------------------------
# Get rid of rows you don't need- "subset"
# delete rows where the month column is NA (totals rows or any trailing rows)
dataSheet <- dataSheet[!is.na(month)]
dataSheet <- dataSheet[!month==0]
# clean "Province" column in BDD datasheet for 2016 and 2015 because
# it has some missing/"0" values that should be "BDD" - doesn't work
if (sheetname == "BDD"){
dataSheet <- dataSheet[province==0, province := sheetname]
dataSheet <- dataSheet[is.na(province), province := sheetname]
}
if (sheetname == "KOR"){
dataSheet <- dataSheet[province==0, province := "K.Or"]
dataSheet <- dataSheet[is.na(province), province := "K.Or"]
}
if (sheetname == "SK"){
dataSheet <- dataSheet[province==0, province := "SK"]
dataSheet <- dataSheet[is.na(province), province := "SK"]
}
# delete first row if it's first value is "NA" or "PROVINCE" as a way to
# only delete those unnecessary rows, and not any others accidentally - these
# were the column headers in the original datasheet in excel.
dataSheet <- dataSheet[!province %in% c('PROVINCE', 'Province')]
# BDD 2016 sheet has total row in the middle of the data, the other sheets have it
# in the last row of the sheet, sometimes in the first column, sometimes in the second;
# sometimes as "Total" and sometimes "TOTAL"
dataSheet <- dataSheet[!grepl(("TOTAL"), (dataSheet$province)),]
dataSheet <- dataSheet[!grepl(("Total"), (dataSheet$province)),]
dataSheet <- dataSheet[!grepl(("total"), (dataSheet$province)),]
dataSheet <- dataSheet[!grepl(("TOTAL"), (dataSheet$dps)),]
dataSheet <- dataSheet[!grepl(("Total"), (dataSheet$dps)),]
dataSheet <- dataSheet[!grepl(("total"), (dataSheet$province)),]
# ----------------------------------------------
# translate french to numeric version of month Janvier=1
# dataSheet[month=='Janvier', month:="01"]
# grepl() to make sure that any that may have trailing white space are also changed
dataSheet[grepl("Janvier", month), month:="01"]
dataSheet[grepl("Février", month), month:="02"]
dataSheet[grepl("Mars", month), month:="03"]
dataSheet[grepl("Avril", month), month:="04"]
dataSheet[grepl("Mai", month), month:="05"]
dataSheet[grepl("Juin", month), month:="06"]
dataSheet[grepl("Juillet", month), month:="07"]
dataSheet[grepl("Août", month), month:="08"]
dataSheet[grepl("Septembre", month), month:="09"]
dataSheet[grepl("Octobre", month), month:="10"]
dataSheet[grepl("Novembre", month), month:="11"]
dataSheet[grepl("Décembre", month), month:="12"]
dataSheet[grepl("janvier", month), month:="01"]
dataSheet[grepl("février", month), month:="02"]
dataSheet[grepl("mars", month), month:="03"]
dataSheet[grepl("avril", month), month:="04"]
dataSheet[grepl("mai", month), month:="05"]
dataSheet[grepl("juin", month), month:="06"]
dataSheet[grepl("juillet", month), month:="07"]
dataSheet[grepl("août", month), month:="08"]
dataSheet[grepl("septembre", month), month:="09"]
dataSheet[grepl("octobre", month), month:="10"]
dataSheet[grepl("novembre", month), month:="11"]
dataSheet[grepl("décembre", month), month:="12"]
# accounting for spelling mistakes/typos/other variations
dataSheet[grepl("fevrier", month), month:="02"]
dataSheet[grepl("Fevrier", month), month:="02"]
dataSheet[grepl("JUIN", month), month:="06"]
dataSheet[grepl("Aout", month), month:="08"]
dataSheet[grepl("Septembr", month), month:="09"]
dataSheet[grepl("Decembre", month), month:="12"]
# make string version of the date
dataSheet[, stringdate:=paste('01', month, year, sep='/')]
# combine year and month into one variable
dataSheet[, date:=as.Date(stringdate, "%d/%m/%Y")]
# make names of health zones consistent (change abbreviatons to full name in select cases)
if (PNLP_files$year[index] == 2017 & sheetname == "KOR"){
dataSheet <- dataSheet[(health_zone=="5" & month=="03" & totalCasesAllDiseases_under5=="3297"), health_zone:= "Kole"]
}
if (PNLP_files$year[index] == 2015 & sheetname == "EQ"){
dataSheet <- dataSheet[(health_zone=="Libenge" & month=="01" & totalCasesAllDiseases_under5=="1375"), health_zone:= "Mawuya"]
}
if (PNLP_files$year[index] == 2017 & sheetname == "EQ"){
dataSheet <- dataSheet[(health_zone=="Libenge" & month=="01" & totalCasesAllDiseases_under5=="1838"), health_zone:= "Mawuya"]
}
if (PNLP_files$year[index] == 2016 & sheetname == "EQ"){
dataSheet <- dataSheet[(health_zone=="Libenge" & month=="01" & totalCasesAllDiseases_under5=="2213"), health_zone:= "Mawuya"]
}
if (PNLP_files$year[index] == 2014 & sheetname == "EQ"){
dataSheet <- dataSheet[(health_zone=="Libenge" & month=="01" & totalCasesAllDiseases_under5=="754"), health_zone:= "Mawuya"]
}
if (PNLP_files$year[index] == 2013 & sheetname == "EQ"){
dataSheet <- dataSheet[(health_zone=="Libenge" & month=="01" & totalCasesAllDiseases_under5=="1628"), health_zone:= "Mawuya"]
}
if ((PNLP_files$year[index] == 2014 | PNLP_files$year[index] == 2015 | PNLP_files$year[index] == 2016 | PNLP_files$year[index] == 2017) & sheetname == "KAT"){
dataSheet <- dataSheet[health_zone=="Mutshat", health_zone:= "Mutshatsha"]
dataSheet <- dataSheet[health_zone=="Malem Nk", health_zone:= "Malemba Nkulu"]
}
if (PNLP_files$year[index] == 2013 & sheetname == "BDD"){
dataSheet <- dataSheet[health_zone=="Koshiba", health_zone:= "Koshibanda"]
}
if ((PNLP_files$year[index] == 2013 | PNLP_files$year[index] == 2012 )& sheetname == "KOR"){
dataSheet <- dataSheet[health_zone=="Mbuji May", health_zone:= "Bimpemba"]
}
if ((PNLP_files$year[index] == 2011 | PNLP_files$year[index] == 2010)& sheetname == "BDD"){
dataSheet <- dataSheet[health_zone=="KIKWITS", health_zone:= "Kikwit S"]
}
if ((PNLP_files$year[index] == 2010)& sheetname == "SK"){
dataSheet <- dataSheet[!is.na(health_zone)]
}
# there are still some added rows that happen to have something in the month column but are missing data everywhere else
dataSheet <- dataSheet[!is.na(province)]
dataSheet <- dataSheet[health_zone=="Kabond D", health_zone:= "Kabond Dianda"]
dataSheet <- dataSheet[health_zone=="Malem Nk", health_zone:= "Malemba Nkulu"]
dataSheet <- dataSheet[health_zone=="Mutshat", health_zone:= "Mutshatsha"]
dataSheet <- dataSheet[health_zone=="Kilela B", health_zone:= "Kilela Balanda"]
dataSheet <- dataSheet[health_zone=="Mufunga", health_zone:= "Mufunga sampwe"]
dataSheet <- dataSheet[health_zone=="Kafakumb", health_zone:= "Kafakumba"]
dataSheet <- dataSheet[health_zone=="Kamalond", health_zone:= "Kamalondo"]
dataSheet <- dataSheet[health_zone=="Kampem", health_zone:= "Kampemba"]
dataSheet <- dataSheet[health_zone=="Tshamile", health_zone:= "Tshamilemba"]
dataSheet <- dataSheet[health_zone=="Lshi", health_zone:= "Lubumbashi"]
dataSheet <- dataSheet[health_zone=="Mumbund", health_zone:= "Mumbunda"]
dataSheet <- dataSheet[health_zone=="Fungurum", health_zone:= "Fungurume"]
dataSheet$health_zone <- tolower(dataSheet$health_zone)
dataSheet[health_zone=="omendjadi", health_zone := "omondjadi"]
dataSheet[health_zone=="kiroshe", health_zone := "kirotshe"]
dataSheet[health_zone=="boma man", health_zone := "boma mangbetu"]
dataSheet[health_zone=="mutshat", health_zone := "mutshatsha"]
dataSheet[health_zone=="mumbund", health_zone := "mumbunda"]
dataSheet[health_zone=="fungurum", health_zone := "fungurume"]
dataSheet[health_zone=="yasa", health_zone := "yasa-bonga"]
dataSheet[health_zone=="malem nk", health_zone := "malemba nkulu"]
dataSheet[health_zone=="kampem", health_zone := "kampemba"]
dataSheet[health_zone=="kamalond", health_zone := "kamalondo"]
dataSheet[health_zone=="pay", health_zone := "pay kongila"]
dataSheet[health_zone=="kafakumb", health_zone := "kafakumba"]
dataSheet[health_zone=="tshamile", health_zone := "tshamilemba"]
dataSheet[health_zone=="ntandem", health_zone := "ntandembele"]
dataSheet[health_zone=="masi", health_zone := "masimanimba"]
dataSheet[health_zone=="koshiba", health_zone := "koshibanda"]
dataSheet[health_zone=="djalo djek", health_zone := "djalo djeka"]
dataSheet[health_zone=="ludimbi l", health_zone := "ludimbi lukula"]
dataSheet[health_zone=="mwela l", health_zone := "mwela lembwa"]
dataSheet[health_zone=="bena le", health_zone := "bena leka"]
dataSheet[health_zone=="vanga ket", health_zone := "vanga kete"]
dataSheet[health_zone=="bomineng", health_zone := "bominenge"]
dataSheet[health_zone=="bogosenu", health_zone := "bogosenusebea"]
dataSheet[health_zone=="bwamand", health_zone := "bwamanda"]
dataSheet[health_zone=="banga lu", health_zone := "banga lubaka"]
dataSheet[health_zone=="bosomanz", health_zone := "bosomanzi"]
dataSheet[health_zone=="bosomond", health_zone := "bosomondanda"]
dataSheet[health_zone=="bonganda", health_zone := "bongandanganda"]
dataSheet[health_zone=="lilanga b", health_zone := "lilanga bobanga"]
dataSheet[health_zone=="mondomb", health_zone := "mondombe"]
dataSheet[health_zone=="tshitshim", health_zone := "tshitshimbi"]
dataSheet[health_zone=="basankus", health_zone := "basankusu"]
dataSheet[health_zone=="mobayi m", health_zone := "mobayi mbongo"]
dataSheet[health_zone=="kabond d", health_zone := "kabond dianda"]
dataSheet[health_zone=="kilela b", health_zone := "kilela balanda"]
dataSheet[health_zone=="ndjoko m", health_zone := "ndjoko mpunda"]
dataSheet[health_zone=="benatshia", health_zone := "benatshiadi"]
dataSheet[health_zone=="tshudi lo", health_zone := "tshudi loto"]
dataSheet[health_zone=="pania mut", health_zone := "pania mutombo"]
dataSheet[health_zone=="ndjoko mp", health_zone := "ndjoko mpunda"]
dataSheet[health_zone=="kalonda e", health_zone := "kalonda est"]
dataSheet[health_zone=="kata k", health_zone := "kata kokombe"]
dataSheet[health_zone=="lshi", health_zone := "lubumbashi"]
dataSheet[health_zone=="bdd", health_zone := "bandundu"]
dataSheet[health_zone=="kikwit n", health_zone := "kikwit nord"]
dataSheet[health_zone=="kikwit s", health_zone := "kikwit sud"]
dataSheet[health_zone=="kasongo l", health_zone := "kasongo lunda"]
dataSheet[health_zone=="popoka", health_zone := "popokabaka"]
dataSheet[health_zone=="kanda k", health_zone := "kanda kanda"]
dataSheet[health_zone=="muene d", health_zone := "muene ditu"]
dataSheet[health_zone=="wembo n", health_zone := "wembo nyama"]
dataSheet[health_zone=="bena dib", health_zone := "bena dibele"]
dataSheet[health_zone=="wamba l", health_zone := "wamba luadi"]
dataSheet[health_zone=="kabeya", health_zone := "kabeya kamwanga"]
dataSheet[health_zone=="mampoko", health_zone := "lolanga mampoko"]
dataSheet[health_zone=="mufunga", health_zone := "mufunga sampwe"]
dataSheet[health_zone=="wembo nyana", health_zone := "wembo nyama"]
dataSheet[health_zone=="kamonya", health_zone := "kamonia"]
dataSheet[health_zone=="kitangwa", health_zone := "kitangua"]
# ----------------------------------------------
# Return current data sheet
return(dataSheet)
# ----------------------------------------------
}
# ----------------------------------------------
# currentSheet[health_zone=="Mbuji May"| health_zone== "Bimpemba", c(1:9)]
|
6fc1e0f5d67ab29fa5aa731e0b08252e3eefad56
|
dda08ebff68da583ec11f861cf1d0e75293fd2c5
|
/R/seqtest.R
|
0888e54b5f2354695524bd100a0014cfca80fcbb
|
[] |
no_license
|
lnalborczyk/ESTER
|
314f65f1a52d925f475cff2b0b54cbbf85fc5e0a
|
eee73e59b3e62caa936d64d563b6fa9d69e593b7
|
refs/heads/master
| 2021-01-11T17:03:10.560357
| 2018-05-19T08:57:35
| 2018-05-19T08:57:35
| 69,504,922
| 1
| 2
| null | 2017-01-26T14:34:38
| 2016-09-28T21:23:41
|
R
|
UTF-8
|
R
| false
| false
| 9,307
|
r
|
seqtest.R
|
#' Sequential testing with evidence ratios
#'
#' Computes sequential evidence ratios, either based on the AIC, BIC, WAIC, or LOOIC.
#' Supported models currently include \code{lm}, \code{merMod}, or \code{brmsfit} models.
#' When data involve repeated measures (and so multiple lines per subject),
#' a column indicating the subject "id" should be provided to the \code{id} argument.
#' If nothing is passed to the \code{id} argument, \code{seqtest} will suppose
#' that there is only one observation (i.e., one line) per subject.
#'
#' @param ic Indicates whether to use the aic or the bic.
#' @param mod1 A model of class \code{lm} or \code{lmerMod}.
#' @param mod2 A model of class \code{lm} or \code{lmerMod} (of the same class of mod1).
#' @param nmin Minimum sample size from which start to compute sequential evidence ratios.
#' @param id If applicable (i.e., repeated measures), name of the "id" column of your
#' dataframe, in character string.
#' @param boundary The Evidence Ratio (or its reciprocal) at which
#' the run is stopped as well
#' @param blind If true, the function only returns a "continue or stop" message
#' @param nsims Number of permutation samples to evaluate (is ignored if blind = TRUE)
#'
#' @importFrom stats family formula lm update
#' @importFrom magrittr %>% set_names
#' @importFrom lme4 lmer glmer
#' @importFrom rlang f_lhs
#' @import ggplot2
#' @import dplyr
#' @import utils
#' @import brms
#'
#' @examples
#' \dontrun{
#' # A first simple example
#' data(mtcars)
#' mod1 <- lm(mpg ~ cyl, mtcars)
#' mod2 <- lm(mpg ~ cyl + disp, mtcars)
#' seqtest(ic = aic, mod1, mod2, nmin = 10)
#'
#' # Plotting the results
#' seqtest(ic = aic, mod1, mod2, nmin = 10) %>% plot
#'
#' # Example with 10 permutation samples
#' seqtest(ic = aic, mod1, mod2, nmin = 10, nsims = 10)
#'
#' # Example with blinding
#' seqtest(ic = aic, mod1, mod2, nmin = 10, boundary = 10, blind = TRUE)
#'
#' # Example with repeated measures
#' library(lme4)
#' data(sleepstudy)
#' mod1 <- lmer(Reaction ~ Days + (1|Subject), sleepstudy)
#' mod2 <- lmer(Reaction ~ Days + I(Days^2) + (1|Subject), sleepstudy)
#' seqtest(ic = aic, mod1, mod2, nmin = 10, id = "Subject", nsims = 10)
#'
#' # Example with brmsfit models
#' library(brms)
#' mod1 <- brm(Reaction ~ Days + (1|Subject), sleepstudy)
#' mod2 <- brm(Reaction ~ Days + I(Days^2) + (1|Subject), sleepstudy)
#' seqtest(ic = WAIC, mod1, mod2, nmin = 10, id = "Subject")
#' }
#'
#' @author Ladislas Nalborczyk <\email{ladislas.nalborczyk@@gmail.com}>
#'
#' @seealso \code{\link{ictab}}
#'
#' @export
seqtest <-
function(
ic = aic, mod1, mod2, nmin = 10, id = NULL, boundary = Inf,
blind = FALSE, nsims = NULL) {
if (!class(mod1) == class(mod2) ) {
stop("Error: mod1 and mod2 have to be of the same class")
}
if (nmin < 10) {
warning("nmin should usually be set above 10...")
}
if (class(mod1) == "lm") {
data <- data.frame(eval(mod1$call[["data"]], envir = parent.frame() ) )
}
if (class(mod1) == "glmerMod" | class(mod1) == "lmerMod") {
data <- data.frame(eval(mod1@call$data, envir = parent.frame() ) )
}
if (class(mod1) == "brmsfit") {
data <- get(mod1$data.name)
}
if (is.null(id) == TRUE) {
id <- deparse(f_lhs(formula(mod1) ) )
nobs <- 1
data$ppt <- rep(seq(1, length(data[, id]), 1), each = nobs)
} else {
count <- data.frame(table(data[, id]) )
nobs <- max(count$Freq)
a <- as.vector(count$Var1[count$Freq < nobs])
data <- data[order(data[, id]), ]
data$ppt <- rep(seq(1, length(unique(data[, id]) ), 1), each = nobs)
if (length(a) > 0) {
for (i in 1:length(a) ) {
data <- data[!data[, id] == as.numeric(a[i]), ]
}
warning("Different numbers of observation by subject.
Subjects with less than max(nobs)
have been removed.")
}
}
startrow <- min(which(as.numeric(as.character(data$ppt) ) == nmin) )
endrow <- nrow(data)
for (i in seq(startrow, endrow, nobs) ) {
maxrow <- i - 1 + nobs
if ( (class(mod1) == "glmerMod") ) {
mod1 <- glmer(formula(mod1),
family = family(mod1)$family, data[1:maxrow, ])
mod2 <- glmer(formula(mod2),
family = family(mod2)$family, data[1:maxrow, ])
}
if ( (class(mod1) == "lmerMod") ) {
mod1 <- lmer(formula(mod1),
REML = FALSE, data[1:maxrow, ])
mod2 <- lmer(formula(mod2),
REML = FALSE, data[1:maxrow, ])
}
if ( (class(mod1) == "lm") ) {
mod1 <- lm(formula(mod1), data[1:maxrow, ])
mod2 <- lm(formula(mod2), data[1:maxrow, ])
}
if ( (class(mod1) == "brmsfit") ) {
mod1 <-
update(
mod1, newdata = data[1:maxrow, ],
recompile = FALSE, refresh = 0
)
mod2 <-
update(
mod2, newdata = data[1:maxrow, ],
recompile = FALSE, refresh = 0
)
}
tabtab <- ictab(list(mod1 = mod1, mod2 = mod2), ic)
temp_er <- data.frame(cbind(data$ppt[i],
tabtab$ic_wt[tabtab$modnames == "mod2"] /
tabtab$ic_wt[tabtab$modnames == "mod1"]) )
if (!exists("er") ) er <- temp_er else er <- rbind(er, temp_er)
rm(temp_er)
}
colnames(er) <- c("ppt", "ER")
if (blind == TRUE) {
if (tail(abs(log(er$ER) ), 1) >= log(boundary) ) {
return("stop the recruitment")
} else {
return("continue the recruitment")
}
}
erb <-
er %>%
mutate(ERi = rep("er", max(.$ppt) - nmin + 1) ) %>%
select_(~ERi, ~ppt, ~ER)
if (!is.null(nsims) ) {
for (i in 1:nsims) {
data_temp <- data[sample(nrow(data), replace = FALSE), ]
if (nobs > 1) {
data_temp <-
data_temp[order(factor(data_temp$ppt,
levels = unique(data_temp$ppt) ) ), ]
data_temp$ppt <- data$ppt
} else {
data_temp$ppt <- data$ppt
}
for (j in seq(startrow, endrow, nobs) ) {
maxrow <- j - 1 + nobs
if ( (class(mod1) == "glmerMod") ) {
mod1 <- glmer(formula(mod1),
family = family(mod1)$family, data_temp[1:maxrow, ])
mod2 <- glmer(formula(mod2),
family = family(mod2)$family, data_temp[1:maxrow, ])
}
if ( (class(mod1) == "lmerMod") ) {
mod1 <- lmer(formula(mod1),
REML = FALSE, data_temp[1:maxrow, ])
mod2 <- lmer(formula(mod2),
REML = FALSE, data_temp[1:maxrow, ])
}
if ( (class(mod1) == "lm") ) {
mod1 <- lm(formula(mod1), data_temp[1:maxrow, ])
mod2 <- lm(formula(mod2), data_temp[1:maxrow, ])
}
if ( (class(mod1) == "brmsfit") ) {
mod1 <-
update(
mod1, newdata = data_temp[1:maxrow, ],
recompile = FALSE, refresh = 0
)
mod2 <-
update(
mod2, newdata = data_temp[1:maxrow, ],
recompile = FALSE, refresh = 0
)
}
tabtab <- ictab(list(mod1 = mod1, mod2 = mod2), ic)
temp_temp_erb <-
data.frame(
cbind(data_temp$ppt[j],
tabtab$ic_wt[tabtab$modnames == "mod2"] /
tabtab$ic_wt[tabtab$modnames == "mod1"]) )
if (!exists("temp_erb") ) {
temp_erb <- temp_temp_erb
} else {
temp_erb <- rbind(temp_erb, temp_temp_erb)
}
rm(temp_temp_erb)
}
temp_erb <-
temp_erb %>%
mutate(ERi = rep(paste0("er", i), nrow(.) ) ) %>%
select(3, 1, 2) %>%
set_names(c("ERi", "ppt", "ER") )
erb <- rbind(erb, temp_erb)
rm(temp_erb)
set.seed(NULL)
}
}
class(erb) <- c("seqtest", "data.frame")
return(erb)
}
#' @export
plot.seqtest <- function(x, ... ) {
aes_lines <- sqrt(0.75 / n_distinct(x$ERi) )
ggplot(x, aes_string(x = "ppt", y = "ER", group = "ERi") ) +
scale_y_log10() +
geom_line(alpha = aes_lines, size = aes_lines) +
geom_line(
aes_string(x = "ppt", y = "ER", group = NULL),
data = x[x$ERi == "er", ], size = 0.75
) +
theme_bw(base_size = 12) +
xlab("Sample size") +
ylab(expression(Evidence~ ~Ratio~ ~ (ER[10]) ) )
}
|
012abc17641a9b6894df905fa1de9fc0faffa667
|
ca92f6290e4575f37fdb5e50543e108aa3f64378
|
/get_files.R
|
a3964cc5e9908bbad2064900a39bab2e6a370189
|
[] |
no_license
|
Kikzz/WorkWithDataAssignment
|
607ec4ab92c217eb35d1f1e8634fa5a00f558dbb
|
650f315ab1ad4ed61c9f2aa44bc0e3d02738ab43
|
refs/heads/master
| 2020-12-24T06:37:48.737633
| 2016-06-21T07:31:40
| 2016-06-21T07:31:40
| 60,773,113
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 506
|
r
|
get_files.R
|
## get_files.R
## 1. download the zip file as data.zip into working directory
## 2. unzip data.zip into working directory
## 3. Remove zip file from HDD
## download files into working directory
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",method="curl",dest="data.zip")
## unzip the file. This cretates a "UCI HAR Dataset" directory and unzips all files into it.
unzip(zipfile="data.zip")
## deletes data.zip from hard drive
unlink("data.zip")
|
bb126155a23c6dce64ed669152387555a2173e65
|
c7183ba0271862bd10e9e857c7bba918f6d092b9
|
/draft.R
|
a7a0b9e59bcc25176e059429161d9a1a7377836b
|
[] |
no_license
|
weslwz/RepData_PeerAssessment1
|
37d2c1e903bb0e682417281e4ac891d8569f27d7
|
39550fe79959074a9f4ac99294412503f7fe0b8f
|
refs/heads/master
| 2020-12-25T23:10:13.309393
| 2014-09-15T03:19:33
| 2014-09-15T03:19:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 845
|
r
|
draft.R
|
library(knitr)
library(markdown)
knit("PA1_template.Rmd")
markdownToHTML("PA1_template.md", "PA1_template.html")
X <- read.csv("activity.csv")
library(plyr)
library(ggplot2)
A <- ddply(X, "date", summarise, total_steps=sum(steps,na.rm=TRUE), mean_steps=mean(steps,na.rm=TRUE), median_steps=median(steps,na.rm=TRUE))
A$date <- as.Date(A$date)
print(A)
p <- ggplot(A, aes(x=date,y=total_steps)) + geom_bar(stat="identity",fill="blue") + ggtitle("Steps taken per day") + xlab("Date") + ylab("Number of steps")
print(p)
B <- ddply(X, "interval", summarise, mean_steps=mean(steps,na.rm=TRUE))
print(B)
p <- ggplot(B, aes(x=interval,y=mean_steps)) + geom_line(colour="blue") + ggtitle("Average steps taken per interval") + xlab("Interval") + ylab("Average number of steps")
print(p)
ind <- which.max(B$mean_steps)
B$interval[ind]
|
9ae0810d97f3120556077d03c7c80f3e14485149
|
5bc5eb0d1ad3a60ce230cc563b2ec9431c4249ad
|
/0415_pdbs/ras/sd_of_net_community.r
|
a54cc438a7357ffd92b989ed369858e502582f9a
|
[] |
no_license
|
Hongyang449/2016
|
dd1c0a124e7f0414b089a73977417053a82cc55e
|
8bdd3d003a32e03555fe979055245845ed841a43
|
refs/heads/master
| 2021-01-13T10:06:42.247046
| 2016-12-21T21:04:08
| 2016-12-21T21:04:08
| 72,122,275
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,871
|
r
|
sd_of_net_community.r
|
## name: sd_of_net_community.r
## date: 10/31/2016
## Here I want to examine the sd (variance) of community edges within and between states (gtp vs gdp)
library(abind)
load("/Users/hyangl/project/ras/results/2016/0415_pdbs/ras/net_ras_cmap_noh.RData")
## structure of net_complete:
## list(nets_ras_gtp_vs_gdp_signif, community_cij_ras_gtp, community_cij_ras_gdp, p_community_cij_ras)
## noh 4.5; calculate the sum of sd of all community edges
sum(apply(net_complete[['4.5']][[2]]$raw, 1:2, sd))
# [1] 40.88062 # within gtp
sum(apply(net_complete[['4.5']][[3]]$raw, 1:2, sd))
# [1] 41.30724 # within gdp
sum(apply(abind(net_complete[['4.5']][[2]]$raw,net_complete[['4.5']][[3]]$raw,along=3), 1:2, sd))
# [1] 50.63161 # combine gtp and gdp - much larger
## noh 6; calculate the sum of sd of all community edges
sum(apply(net_complete[['6']][[2]]$raw, 1:2, sd))
# [1] 65.65264 # within gtp
sum(apply(net_complete[['6']][[3]]$raw, 1:2, sd))
# [1] 68.9406 # within gdp
sum(apply(abind(net_complete[['6']][[2]]$raw,net_complete[['6']][[3]]$raw,along=3), 1:2, sd))
# [1] 81.83473 # combine gtp and gdp - much larger
# plot
load("/Users/hyangl/project/ras/results/2016/info/layout_2d.RData")
layout(matrix(1:2, nrow=1))
sd_gtp <- apply(net_complete[['6']][[2]]$raw, 1:2, sd)
sd_gtp[upper.tri(sd_gtp)] <- 0; weight <- sd_gtp[sd_gtp != 0]
plot.cna(net_ras_cmap_noh[["6"]]$gtp, layout=layout_ras, w=weight,
vertex.label=NA, edge.label=NA, edge.color="gray")
mtext("gtp_noh_6", outer=F, line=-5)
sd_gdp <- apply(net_complete[['6']][[3]]$raw, 1:2, sd)
sd_gdp[upper.tri(sd_gdp)] <- 0; weight <- sd_gdp[sd_gdp != 0]
plot.cna(net_ras_cmap_noh[["6"]]$gdp, layout=layout_ras, w=weight,
vertex.label=NA, edge.label=NA, edge.color="gray")
mtext("gdp_noh_6", outer=F, line=-5)
mtext("sd_of_cmap_noh_6", outer=T, line=-3)
dev.copy2pdf(file="figures/sd_ras_cmap_noh_6.pdf")
|
291d3ede9c3dbf269331fb11f5d8220b1de50fcd
|
c3b03c926c7be473ce36928f96252535982f686c
|
/plot2.R
|
c3927af0266b9a851a48a3045e20535e411f424e
|
[] |
no_license
|
amicarelli/ExData_Plotting1
|
39195770b173dfef6e46e68ffd8e3fc94556b39a
|
f9a4e945be5b442c2f09bd367834649ae42a7c41
|
refs/heads/master
| 2021-01-18T12:09:53.659536
| 2016-03-22T02:35:34
| 2016-03-22T02:35:34
| 54,424,734
| 0
| 0
| null | 2016-03-21T21:37:47
| 2016-03-21T21:37:46
| null |
UTF-8
|
R
| false
| false
| 1,547
|
r
|
plot2.R
|
##
# This script is for the course: exploratory data analysis
# The script reads in data from household power consumption
# and creates graphs of the data
#
#
# Check to see if the data already exists in the local directory, if not, download data
#
fileDir <- paste(getwd(),"/data/HPC",sep="") #use this directory for data and plots
if(!file.exists(fileDir)){
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile = "HPC.zip")
unzip("HPC.zip")
file.rename("exdata-data-household_power_consumption",fileDir)
file.remove("HPC.zip")}
#
# Read in data table and convert dates to R date format
#
hpc <- read.table(paste(fileDir,"/household_power_consumption.txt",sep=""),header=T,sep=";",na.strings = "?") #read data
hpc$Date <- as.Date(hpc$Date,format="%d/%m/%Y") #convert date from factor to POSIX date
hpc2 <- subset(hpc,Date=="2007-02-01" | Date=="2007-02-02") #select data from specific dates
hpc2$DateTime <- strptime(paste(hpc2$Date,hpc2$Time),format = "%Y-%m-%d %H:%M:%S") #create a new column that
#includes date and time
#
# Plot 2 - render plot to screen and to file
#
par(mfcol=c(1,1))
with(hpc2,plot(DateTime,Global_active_power,type="l",ylab="Global Active Power(kilowats)"))
png(filename = paste(fileDir,"/plot2.png",sep=""), width = 480, height = 480)
with(hpc2,plot(DateTime,Global_active_power,type="l",ylab="Global Active Power(kilowats)"))
dev.off()
|
1f5db3a2eeb84b6e2c54952187d2c0db0c2298d7
|
fe719dec5ad6f950c589874038ed00f15591952c
|
/Lalonde 3-ways.R
|
702bbfd09aca0708cd2b7349ce18a6823753f9bb
|
[] |
no_license
|
tuantvh/CS112---Causal-Inference
|
9f3ed6731935625e97cb4ffb71e684afd2a2fe55
|
1c6362c587e60ad556d835fe95e2a538d08405e9
|
refs/heads/master
| 2021-10-25T19:54:45.385703
| 2019-04-06T17:23:39
| 2019-04-06T17:23:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,663
|
r
|
Lalonde 3-ways.R
|
library("Matching")
library("stargazer")
library("ggplot2")
library(randomForest)
data <- data("lalonde")
#MODELS
#PART1
#high school degree and no degree
Degree <- lalonde[lalonde$nodegr ==0,]
NoDegree <- lalonde[lalonde$nodegr ==1,]
#regression model
fit.Degree <- lm(re78 ~ treat + age + black + hisp +married +re74 + re75, data = Degree)
fit.NoDegree <- lm(re78 ~ treat + age + black + hisp +married +re74 + re75, data = NoDegree)
CI.Degree <- confint(fit.Degree)
CI.NoDegree <- confint(fit.NoDegree)
#output image
stargazer(fit.Degree, fit.NoDegree,title="Table 1. Regression Results", type = "html",
align=TRUE, column.labels = c("Degree", "No Degree"), dep.var.labels= "Real Earnings in 1978",
covariate.labels=c("Treatment","Age",
"Black","Hispanic","Married","Real Earnings in 1974", "Real Earnings in 1975"),
omit.stat=c("LL","ser","f"), single.row= TRUE, out = "regression.htm")
#confidence intervals
stargazer(CI.Degree, type = "html", title="Degree", out ="CIDegree.htm")
stargazer(CI.NoDegree, type = "html", title="No Degree", out ="CINoDegree.htm")
#correlation matrix
correlation.matrix <- cor(Degree[,c("treat","age","black", "hisp", "married", "re74", "re75")])
stargazer(correlation.matrix, title="Correlation Matrix Degree", out ="corDegree.htm")
correlation.matrix2 <- cor(NoDegree[,c("treat","age","black", "hisp", "married", "re74", "re75")])
stargazer(correlation.matrix, title="Correlation Matrix No Degree", out ="corNoDegree.htm")
#PART2
#random forest
set.seed(1)
#DEGREE
rf.Degree <- randomForest(re78 ~ treat + age + black+ hisp+ married + re74 + re75, data = Degree, mtry = 3, importance = TRUE)
importance(rf.Degree)
varImpPlot(rf.Degree)
#predicting the counterfactual
treated.Degree <- Degree[Degree$treat == 1,]
treated.Degree$treat = 0 #change the treated value for prediction
counterfactual.degree <- predict(rf.Degree, newdata = treated.Degree)
#average treatment effects
mean(treated.Degree$re78) - mean(counterfactual.degree)
#bootstrap confidence interval of treatment effects:
storage.effect <- NULL
for (i in 1:1000) {
sample.index <- sample(c(1:nrow(Degree)), nrow(Degree), replace= TRUE)
sample <- Degree[sample.index,]
rf.sample <- randomForest(re78 ~ treat + age + black+ hisp+ married + re74 + re75, data = sample, mtry = 3, importance = TRUE)
#predicting the counterfactual
treated.sample <- sample[sample$treat == 1,]
treated.sample$treat = 0 #change the treated value for prediction
counterfactual.sample <- predict(rf.sample, newdata = treated.sample)
#average treatment effects
storage.effect[i] <- mean(treated.sample$re78) - mean(counterfactual.sample)
}
qplot(storage.effect,
geom="histogram",
main = "Histogram for Estimated Treatment Effect (Degree)",
xlab = "Treatment Effect" )
quantile(storage.effect, c(.025, .975))
#NODEGREE
rf.NoDegree <- randomForest(re78 ~ treat + age + black+ hisp+ married + re74 + re75, data = NoDegree, mtry = 3, importance = TRUE)
importance(rf.NoDegree)
varImpPlot(rf.NoDegree)
#predicting the counterfactual
treated.NoDegree <- NoDegree[NoDegree$treat == 1,]
treated.NoDegree$treat = 0 #change the treated value for prediction
counterfactual.NoDegree <- predict(rf.NoDegree, newdata = treated.NoDegree)
#average treatment effects
mean(treated.NoDegree$re78) - mean(counterfactual.NoDegree)
#bootstrap confidence interval of treatment effects:
storage.effect2 <- NULL
for (i in 1:1000) {
sample.index <- sample(c(1:nrow(NoDegree)), nrow(NoDegree), replace= TRUE)
sample <- NoDegree[sample.index,]
rf.sample <- randomForest(re78 ~ treat + age + black+ hisp+ married + re74 + re75, data = sample, mtry = 3, importance = TRUE)
#predicting the counterfactual
treated.sample <- sample[sample$treat == 1,]
treated.sample$treat = 0 #change the treated value for prediction
counterfactual.sample <- predict(rf.sample, newdata = treated.sample)
#average treatment effects
storage.effect2[i] <- mean(treated.sample$re78) - mean(counterfactual.sample)
}
qplot(storage.effect2,
geom="histogram",
main = "Histogram for Estimated Treatment Effect (No Degree)",
xlab = "Treatment Effect" )
quantile(storage.effect2, c(.025, .975))
#PART 3:
#DEGREE
#assume the sharp null hypothesis of no treatment
test.statistic.degree <- mean(Degree[Degree$treat == 1, ]$re78) - mean(Degree[Degree$treat == 0,]$re78)
storage.vector <- NULL
for (i in 1:10000) {
FishTreat_id <- sample(seq_len(nrow(Degree)), nrow(Degree[Degree$treat==1,]))
FishTreat <- Degree[FishTreat_id,]
FishControl <- Degree[-FishTreat_id,]
storage.vector[i] <- mean(FishTreat$re78) - mean(FishControl$re78)
}
test.statistic.degree
quantile(storage.vector, prob=c(0.975, 0.025))
plot(density(storage.vector))
abline(v = test.statistic.degree, lwd = 2, col = "red")
#NODEGREE
#assume the sharp null hypothesis of no treatment
test.statistic.NoDegree <- mean(NoDegree[NoDegree$treat == 1, ]$re78) - mean(NoDegree[NoDegree$treat == 0,]$re78)
storage.vector <- NULL
for (i in 1:10000) {
FishTreat_id <- sample(seq_len(nrow(NoDegree)), nrow(NoDegree[NoDegree$treat==1,]))
FishTreat <- NoDegree[FishTreat_id,]
FishControl <- NoDegree[-FishTreat_id,]
storage.vector[i] <- mean(FishTreat$re78) - mean(FishControl$re78)
}
test.statistic.NoDegree
quantile(storage.vector, prob=c(0.975, 0.025))
plot(density(storage.vector))
abline(v = test.statistic.NoDegree, lwd = 2, col = "red")
|
95798fb313bdb659b4c970702cff78f5b8595d6f
|
84a470b5c193e3854258729ad124673f6dfdbf1c
|
/R/makeContactMatrix.R
|
1e34f688fa5a7af0844af67ddc0be4adfeee9b2d
|
[] |
no_license
|
cdcepi/ACIP-SARS-CoV-2-Vaccine-Modeling
|
03dbd6ef0ef2785128968264614a7613a6570999
|
c0cdfbc06d49abfa7e4efed5dae64c114c34ec22
|
refs/heads/master
| 2023-03-07T13:06:21.763569
| 2021-02-23T20:50:17
| 2021-02-23T20:50:17
| 341,674,413
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,376
|
r
|
makeContactMatrix.R
|
#' @title Make contact matrix
#' @description Rebins an arbitrary contact matrix to a new group of population ranges. The default behavior is to use the UK POLYMOD data.
#' @param ages Vector of ages. Each element represents the upper range of an age range. The lowest bound is presumed to be zero.
#' If the oldest age does not reach the end of the population range, an additional element is added to span the full range.
#' The final age bracket cannot start after the final bracket of originalContactMatrixAges.
#' @param originalContactMatrix Contact matrix to serve as the basis for extrapolation. Defaults to UK POLYMOD.
#' This must be a square matrix.
#' @param originalContactMatrixAges Age ranges associated with originalContactMatrix. Defaults to UK population in 5-year bins, with
#' the final bracket starting at age 70. The format is a data frame with columns representing the age brackets and having two rows:
#' AgeStart and AgeEnd. The first column has ageStart of 0. The last column has ageEnd of NA.
#' @param originalPopulationFractions Vector of age fractions associated with originalContactMatrixAges.
#' Length must equal the dimension of originalContactMatrix. The vector will be normalized, so it can represent population fractions
#' or population in each age bin.
#' @return A contact matrix.
#' @author Jason Asher <jason.m.asher@gmail.com>
#' @export
makeContactMatrix <- function( ages, originalContactMatrix = flumodels_data$POLYMOD.matrix,
originalContactMatrixAges = flumodels_data$POLYMOD.age.ranges,
originalPopulationFractions = flumodels_data$population.fractions.US) {
if (is.unsorted(ages))
stop("Ages must be increasing order")
if (sum(ages - round(ages)) != 0 | min(ages) < 1)
stop("Ages must be positive integers")
if (ages[length(ages)] > originalContactMatrixAges[1, length(originalContactMatrixAges)])
stop(paste("Final age range is older than the maximum age possible based upon originalContactMatrixAges:",
originalContactMatrixAges[1, length(originalContactMatrixAges)]))
# Should check here to see if the matrix is one that we've determined before. This will help speed things up considerably.
if(nrow(originalContactMatrix) != ncol(originalContactMatrix))
stop("originalContactMatrix is not a square matrix")
if (nrow(originalContactMatrix) != length(originalContactMatrixAges) ||
length(originalContactMatrixAges) != length(originalPopulationFractions))
stop("Dimension mismatch between originalContactMatrix, originalContactMatrixAges, and originalPopulationFractions")
newAgeRanges <- makeAgeRangesFromInputs(ages)
#Setting adjusts whether entered data is normalized by column (default) or row (assuming the original Mossong data has been transposed)
byColumn <- TRUE
#Calculate the matrix of expected number of contacts between single-year age groups
if (byColumn) {
matrixOfContacts <- t(apply(originalContactMatrix, 1, function(t){t*originalPopulationFractions})) #Multiply each column by the corresponding population
} else {
matrixOfContacts <- apply(originalContactMatrix, 2, function(t){t*originalPopulationFractions}) #Multiply each row by the corresponding population
}
#Symmetrize this matrix of contacts (to remove artifacts from study and to ensure consistency with the population)
symmetrizedMatrixOfContacts <- (matrixOfContacts + t(matrixOfContacts))/2
#Re-group the matrix of expected contacts
regroupedMatrixOfContacts <- matrix(0, nrow = ncol(newAgeRanges), ncol = ncol(newAgeRanges),
dimnames = list(names(newAgeRanges),names(newAgeRanges))) #Initialize with a zero matrix
for (newRowIndex in seq_along(newAgeRanges)) {
for (newColumnIndex in seq_along(newAgeRanges)){
for (rowIndex in seq_along(originalContactMatrixAges)) {
for (columnIndex in seq_along(originalContactMatrixAges))
regroupedMatrixOfContacts[newRowIndex, newColumnIndex] <-
regroupedMatrixOfContacts[newRowIndex, newColumnIndex] +
(symmetrizedMatrixOfContacts[rowIndex, columnIndex] *
getAgeRangeFraction(rowIndex, as.numeric(unlist(newAgeRanges[, newRowIndex])),
ages = originalContactMatrixAges) *
getAgeRangeFraction(columnIndex, as.numeric(unlist(newAgeRanges[, newColumnIndex])),
ages = originalContactMatrixAges))
}
}
}
#debugOutput(regroupedMatrixOfContacts)
#Re-normalize the new matrix of expected contacts
newPopulation <- apply(newAgeRanges, 2, function(x){getPopulationForAgeRange(ageRange = as.numeric(unlist(x)),
ages = originalContactMatrixAges,
population = originalPopulationFractions)})
if (byColumn) {
newContactMatrix <- t(apply(regroupedMatrixOfContacts, 1, function(t){t/newPopulation})) #Divide each column by the corresponding population
} else {
newContactMatrix <- apply(regroupedMatrixOfContacts, 2, function(t){t/newPopulation}) #Divide each row by the corresponding population
}
return(newContactMatrix)
}
#Returns the population that would be assigned to the given age range c(ageStart, ageEnd)
# Ages is POLYMODAgeRanges
#Assumes constant interpolation between age groups when subdividing ranges
getPopulationForAgeRange <- function(ageRange, ages, population) {
ageRangePopulation <- 0
for (columnIndex in seq_along(ages)) {
ageRangePopulation <- ageRangePopulation + (getAgeRangeFraction(columnIndex, ageRange, ages) * as.numeric(population[columnIndex]))
}
ageRangePopulation
}
#Returns the fraction of the corresponding age range with column index columnIndex that is between ageRange = c(ageStart,ageEnd)
# Ages is POLYMODAgeRanges
#Warning, cannot subdivide a column whose upper bound is NA - this function will return the whole population in that case
getAgeRangeFraction <- function(columnIndex, ageRange, ages) {
ageStart <- ageRange[1]
ageEnd <- ageRange[2]
if (is.na(ages[2, columnIndex])) {
if (is.na(ageEnd) || (ages[1, columnIndex] <= ageEnd)) {
#Return
1
} else {
#Return
0
}
} else {
overlapEnd <- min(ageEnd, ages[2, columnIndex], na.rm = TRUE)
overlapStart <- max(ageStart, ages[1, columnIndex])
yearsInOverlapOfAgeRanges <- max(overlapEnd - overlapStart + 1, 0)
#Return
yearsInOverlapOfAgeRanges / getAgeRangeLength(columnIndex, ages)
}
}
getAgeRangeLength <- function(index, ages) {
ages[2, index] - ages[1, index ] + 1
}
# Changes the age ranges provided into what the rebinning module prefers
makeAgeRangesFromInputs <- function(ages) {
newFrame <- data.frame("col1" = c(AgeStart = 0, AgeEnd = ages[1]))
names(newFrame) <- paste0("Age00to", ages[1])
if (length(ages) != 1) {
for (currentAge in seq.int(2, length(ages))) {
newFrame[, paste0("Age", ages[currentAge-1]+1, "to", ages[currentAge])] <-
c(AgeStart = ages[currentAge-1]+1, AgeEnd = ages[currentAge])
}
}
newFrame[, paste0("Age", ages[length(ages)]+1, "plus")] <-
c(AgeStart = ages[currentAge]+1, AgeEnd = NA)
return(newFrame)
}
|
47a17748d748fe128d6970f90fa87abd8be59167
|
2712ec1deafe0fa3a9864c786b207d9ed409539a
|
/R/33_stripSP.R
|
c80f5ef0b6d1017f874bfbc9fe18550bf52b3933
|
[] |
no_license
|
Epiconcept-Paris/GADMTools
|
de556df201c7c6ec4fff0b6cd1e78c02d3f6cbd3
|
9549ec2da551de579a17c1ac5c6d55122a7be660
|
refs/heads/master
| 2021-01-17T13:05:39.092878
| 2020-03-04T20:53:28
| 2020-03-04T20:53:28
| 42,582,951
| 9
| 2
| null | 2020-01-07T11:38:54
| 2015-09-16T11:24:35
|
R
|
UTF-8
|
R
| false
| false
| 2,002
|
r
|
33_stripSP.R
|
## ---------------------------------------------------------------------------
## Method : stripSP
## Return : a GADMWrapper ready to use with ggplot2
## ---------------------------------------------------------------------------
stripSP <- function(x, level=NULL) UseMethod("stripSP", x)
stripSP.gadm_sp <- function(x, level=NULL) {
.level <- x$level
if (is.null(level)) {
if (x$level == 0) {
.name <-"ISO"
} else {
.name <- sprintf("NAME_%d", x$level)
}
} else {
if (level > x$level || level < 0) {
.name <- sprintf("NAME_%d", x$level)
} else {
.name <- sprintf("NAME_%d", level)
.level <- level
}
}
.map <- fortify(x$spdf, region=.name)
# ---- Create GADMWrapper object
structure(list("basename"=x$basefile,
"spdf"=.map,
"level"=.level,
"stripped" = TRUE),
class = "GADMWrapper")
}
## ===========================================================================
gadm_loadStripped <- function(name, level, basefile='./') {
FILENAME = sprintf("STRIP_%s_adm%d.rds", name,level)
LOCAL_FILE = sprintf("%s%s", basefile, FILENAME)
print(LOCAL_FILE)
.map <- readRDS(LOCAL_FILE)
if (is.null(.map)) {
stop("Error: Enable to read file!")
}
.map
}
saveAsStripped <- function(x, fname, name= NULL, basefile = './') UseMethod("saveAsStripped")
saveAsStripped.gadm_sp <- function(x, fname, name = NULL, basefile = './') {
SP <- x
if (x$stripped == FALSE) {
SP <- stripSP(x, name)
}
gadm_saveStripped(SP, fname, basefile)
}
strippedExists <- function(name, level, basefile = './') {
FILENAME = sprintf("STRIP_%s_adm%d.rds", name,level)
LOCAL_FILE = sprintf("%s%s", basefile, FILENAME)
file.exists(LOCAL_FILE)
}
gadm_saveStripped <- function(x, fname, basefile = './') {
FILENAME = sprintf("STRIP_%s_adm%d.rds", fname,x$level)
LOCAL_FILE = sprintf("%s%s", basefile, FILENAME)
saveRDS(x, file = LOCAL_FILE)
TRUE
}
|
c4770cc48888dc60dee7594bb552cb8644193bcc
|
519db64c2a30f4e52b787db1c1d11449f2c3bff8
|
/man/clust_remove.Rd
|
d840c4cbc1f5a2e6e501406293b16b77c3f9e013
|
[] |
no_license
|
juba/umapscan
|
dd71a4499ac6da989112e32eaa039df63be81eb9
|
180876adbc56c65d0a1b7fccaa7912ef823c46a3
|
refs/heads/master
| 2020-12-19T18:33:34.025036
| 2020-08-27T12:31:21
| 2020-08-27T12:31:21
| 235,815,547
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 883
|
rd
|
clust_remove.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustering.R
\name{clust_remove}
\alias{clust_remove}
\title{Remove a cluster from an umapscan object}
\usage{
clust_remove(us, id, rm_root = FALSE)
}
\arguments{
\item{us}{umapscan object}
\item{id}{id of the cluster to remove}
\item{rm_root}{if TRUE, also remove the root cluster node. Otherwise, only remove
its children (should not be used directly, only for recursive call).}
}
\value{
An updated umapscan object.
}
\description{
If the cluster has children, they will be removed too.
}
\examples{
library(dplyr)
iris_num <- iris \%>\% select_if(is.numeric)
us <- new_umapscan(iris_num, n_neighbors = 25, min_dist = 0.1, seed = 1337)
us <- clust_compute(us, minPts = 3, eps = 0.5)
us <- clust_compute(us, parent = "3" ,minPts = 3, eps = 0.45)
clust_remove(us, "3_1")
us
clust_remove(us, "3")
us
}
|
b6c1effbb52022b3903c7c5c91ae7fb06ab001b6
|
496548cd172301f8ae2866d02008140d1ac56fd3
|
/R/f_indicator.R
|
d6bb237ea89475ac058d51d93615fa97edb70cbd
|
[
"MIT"
] |
permissive
|
kristian-bak/kb.modelling
|
da04a20f743f1c4db66023d25bf9c6e8859758f4
|
93509b8cc6e1c44f005db31d9f29d61c4ad64720
|
refs/heads/master
| 2023-04-04T21:45:53.106511
| 2021-04-13T18:25:32
| 2021-04-13T18:25:32
| 342,622,062
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,094
|
r
|
f_indicator.R
|
#' Lag function
#' @param x A vector to perform lag on
#' @param n number of elements to shift. Default is 1.
#' @export
#' @examples
#' f_lag(x = 1:10, n = 1)
#'
f_lag <- function(x, n = 1) {
if (n == 0) {
return(x)
}
id <- 1:n
x <- c(x[-id], rep(NA, n))
return(x)
}
#' This function calculates the slope of a moving average
#' @param data data.table. data should be a obj an outcome from f_indicators to ensure MA is present in data.
#' @param n Moving average based on n days
#' @return The slope of the moving average
#' @import data.table
#' @importFrom stats coef lm na.omit
f_slope <- function(data, n) {
id <- NULL
data[, id := 1:.N]
ma_var <- paste0("MA", n)
subdata <- data[, c(ma_var, "id")]
subdata <- na.omit(subdata)
if (nrow(subdata) < 5) {
return(NA)
}
m <- lm(MA ~ id, subdata)
as.numeric(coef(m)[2])
}
#' This function calculates the slope of a moving average based on n data points
#' @param data data.table. data should be a obj an outcome from f_indicators to ensure MA is present in data.
#' @param n number of days used to calculate the slope
#' @return data.table object with moving average slope for each day
#' @import data.table
f_ma_slope <- function(data, n = 10) {
m <- nrow(data)
data$MA_slope <- rep(NA, nrow(data))
for (i in (n + n):m) {
loopdata <- data[(i - n):i, ]
data$MA_slope[i] <- f_slope(data = loopdata, n = n)
cat("\r", i, "of", m)
flush.console()
}
return(data$MA_slope)
}
#' This function calculates common indicators such as moving averages and RSI
#' @param data a data.table obtained from f_load
#' @param n number of days used to calculate the slope of moving average. Default is 10
#' @param m number of days used to calculate moving average. Default is 5
#' @return A data.table
#' @export
#' @import data.table
f_indicator <- function(data, n = 10, m = 5) {
Change <- MACD <- NULL
data <- data[!is.na(Change), ]
## OSCILLATORS
## Relative Strength Index
data$RSI <- TTR::RSI(data$Close)
## Commodity Channel Index (20)
data$CCI <- TTR::CCI(HLC = data[, c("High", "Low", "Close")], n = 20)
## Average Directional Index (14)
df_adi <- TTR::ADX(HLC = data[, c("High", "Low", "Close")], n = 14)
df_adi <- data.frame(df_adi)
data$ADI <- df_adi$ADX
## Awesome Oscillator
## Momentum (10)
data$momentum <- TTR::momentum(x = data$Close, n = 10)
## MACD Level (12, 26)
MACD_res <- data.frame(TTR::MACD(data$Close,Fast = 12, nSlow = 26))
data$MACD <- MACD_res$macd
## Stochastic RSI Fast (3, 3, 14, 14)
## Williams Percent Range (14)
data$Williams_pct <- TTR::WPR(HLC = data[, c("High", "Low", "Close")], n = 14)
## Bull Bear Power
## Ultimate Oscillator (7, 14, 28)
data$ultimateOscillator <- TTR::ultimateOscillator(HLC = data[, c("High", "Low", "Close")], n = c(7, 14, 28))
## MOVING AVERAGES
## Exponential Moving Average (5)
data$EMA5 <- TTR::EMA(x = data$Close, n = 5)
## Simple Moving Average (5)
data$MA5 <- TTR::SMA(x = data$Close, n = 5)
## Exponential Moving Average (10)
data$EMA10 <- TTR::EMA(x = data$Close, n = 10)
## Simple Moving Average (10)
data$MA10 <- TTR::SMA(x = data$Close, n = 10)
## Exponential Moving Average (20)
data$EMA20 <- TTR::EMA(x = data$Close, n = 20)
## Simple Moving Average (20)
data$MA20 <- TTR::SMA(x = data$Close, n = 20)
## Exponential Moving Average (30)
data$EMA30 <- TTR::EMA(x = data$Close, n = 30)
## Simple Moving Average (30)
data$MA30 <- TTR::SMA(x = data$Close, n = 30)
## Exponential Moving Average (50)
data$EMA50 <- TTR::EMA(x = data$Close, n = 50)
## Simple Moving Average (50)
data$MA50 <- TTR::SMA(x = data$Close, n = 50)
## Exponential Moving Average (100)
data$EMA100 <- TTR::EMA(x = data$Close, n = 100)
## Simple Moving Average (100)
data$MA100 <- TTR::SMA(x = data$Close, n = 100)
## Exponential Moving Average (200)
data$EMA200 <- TTR::EMA(x = data$Close, n = 200)
## Simple Moving Average (200)
data$MA200 <- TTR::SMA(x = data$Close, n = 200)
## Ichimoku Cloud Base Line (9, 26, 52, 26)
## Volume Weighted Moving Average (20)
data$WMA <- TTR::WMA(x = data$Close, wts = data$Volume, n = 20)
## Hull Moving Average (9)
data$HMA <- TTR::HMA(x = data$Close, n = 9)
## Signal
data$Signal <- MACD_res$signal
## MACD Signal difference
data$MACD_Signal_Diff <- data$MACD - data$Signal
## MA slopes
data$MA5_slope <- f_ma_slope(data, n = 5)
data$MA10_slope <- f_ma_slope(data, n = 10)
data$MA20_slope <- f_ma_slope(data, n = 20)
data$MA50_slope <- f_ma_slope(data, n = 50)
data$MA100_slope <- f_ma_slope(data, n = 100)
data$MA200_slope <- f_ma_slope(data, n = 200)
## Bollinger bands:
bb <- TTR::BBands(HLC = data[, c("High", "Low", "Close")])
data$lower_bb <- bb[, "dn"]
data$upper_bb <- bb[, "up"]
data$ma_bb <- bb[, "mavg"]
data$pct_bb <- bb[, "pctB"]
data[, pct_close_lower_bb := (Close - lower_bb) / Close]
data[, pct_upper_bb_close := (upper_bb - Close) / Close]
data[, bb_dif := upper_bb - lower_bb]
return(data)
}
|
41ba6cdbacbcf2efb0b9bcea6fc9e278934f876a
|
229bf9f5443566993bfd9ba16153c1ad0aada67f
|
/PaulOctopus/R/paul_pipeline/execDataScrap.R
|
15762c3bb3a82fce3645d7050e0571614e67d05b
|
[] |
no_license
|
GiulSposito/R-x
|
fc096199ca2efb483d164ba42b92a7a77281f39f
|
902aad081c5b7961983234f183ed1df4bf621e8b
|
refs/heads/master
| 2021-06-27T05:41:46.789209
| 2019-05-21T17:42:01
| 2019-05-21T17:42:01
| 115,006,654
| 2
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,466
|
r
|
execDataScrap.R
|
source("./PaulOctopus/R/eloratings/ern_scrap_domains.R")
source("./PaulOctopus/R/infra/bigQuery.R")
# faz o scrap das informações do site EloRating
scrapEloRatings <- function(scrap.years = 1930:2018){
# basic domains
elo.labels <- elo_scrapLabels()
elo.teams <- elo_scrapTeam()
elo.tourn <- elo_scrapTournaments()
# resultados dos jogos
scrap.years %>%
map(possibly(elo_scrapResults, otherwise = NULL)) %>%
bind_rows() -> elo.results
# ranking anuais
scrap.years[1:(length(scrap.years)-1)] %>%
map(possibly(elo_scrapRank, otherwise = NULL)) %>%
set_names(scrap.years[1:(length(scrap.years)-1)]) %>%
bind_rows(.id = "rank.year") %>%
mutate(rank.year=as.integer(rank.year)) -> elo.rank
# agrupa as tabelas e retorna como um alista
list(
labels = elo.labels,
teams = elo.teams,
tournaments = elo.tourn,
results = elo.results,
rank = elo.rank
) %>% return()
}
# salva um conjunto de tabelas (numa lista nomeada) no BigQuery
saveTablesToBigQuery <- function(.tables){
1:length(.tables) %>%
map(function(.idx, .tblist){
tn <- names(.tblist)[.idx]
tb <- .tblist[[.idx]]
print(paste0("Saving ", tn, " [", nrow(tb), ",", ncol(tb), "] to BigQuery..."))
createTable(tn,tb)
}, .tblist=.tables)
}
importEloRatings <- function(scrap.years = 1930:2018){
scrap.years %>%
scrapEloRatings() %T>%
saveTablesToBigQuery() %>%
return()
}
|
5c7464e8db0fb960034db3102c21736e964c2680
|
57a1c2838d0116241b256ec8af05319c0bdbc217
|
/src/common/GmicPlugin.r
|
08aa511381cec71f19ef90637ae4be75383d0235
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
ganego/gmic-8bf
|
7468398745a983b7b7f8055991117fc98b14f759
|
ee49ae507da60d648df582772163e059faa9f4f1
|
refs/heads/main
| 2023-03-03T00:55:06.461444
| 2021-02-11T18:08:01
| 2021-02-11T18:08:01
| 338,084,621
| 0
| 0
|
MIT
| 2021-02-11T16:29:17
| 2021-02-11T16:29:17
| null |
UTF-8
|
R
| false
| false
| 4,685
|
r
|
GmicPlugin.r
|
#include "PIDefines.h"
#ifdef __PIMac__
#include <Carbon.r>
#include "PIGeneral.r"
#include "PIUtilities.r"
#elif defined(__PIWin__)
#define Rez
#include "PIGeneral.h"
#endif
#include "PIActions.h"
resource 'PiPL' ( 16000, "GmicPlugin", purgeable )
{
{
Kind { Filter },
Name { "G'MIC-Qt..." },
Category { "GMIC" },
Version { (latestFilterVersion << 16 ) | latestFilterSubVersion },
#ifdef __PIMac__
#if (defined(__i386__))
CodeMacIntel32 { "Gmic_Entry_Point" },
#endif
#if (defined(__ppc__))
CodeMachOPowerPC { 0, 0, "Gmic_Entry_Point" },
#endif
#else
#if defined(_WIN64)
CodeWin64X86 { "Gmic_Entry_Point" },
#else
CodeWin32X86 { "Gmic_Entry_Point" },
#endif
#endif
SupportedModes
{
noBitmap, doesSupportGrayScale,
noIndexedColor, doesSupportRGBColor,
noCMYKColor, noHSLColor,
noHSBColor, noMultichannel,
noDuotone, noLABColor
},
EnableInfo { "in (PSHOP_ImageMode, RGBMode, GrayScaleMode, RGB48Mode, Gray16Mode)" },
/* Limit large documents to 100,000 x 100,000 pixels. */
PlugInMaxSize { 100000, 100000 },
FilterCaseInfo
{
{
/* Flat data, no selection */
inStraightData, outStraightData,
doNotWriteOutsideSelection,
filtersLayerMasks, worksWithBlankData,
copySourceToDestination,
/* Flat data with selection */
inStraightData, outStraightData,
writeOutsideSelection,
filtersLayerMasks, worksWithBlankData,
copySourceToDestination,
/* Floating selection */
inStraightData, outStraightData,
writeOutsideSelection,
filtersLayerMasks, worksWithBlankData,
copySourceToDestination,
/* Editable transparency, no selection */
inStraightData, outStraightData,
doNotWriteOutsideSelection,
filtersLayerMasks, worksWithBlankData,
copySourceToDestination,
/* Editable transparency, with selection */
inStraightData, outStraightData,
writeOutsideSelection,
filtersLayerMasks, worksWithBlankData,
copySourceToDestination,
/* Preserved transparency, no selection */
inStraightData, outStraightData,
doNotWriteOutsideSelection,
filtersLayerMasks, worksWithBlankData,
copySourceToDestination,
/* Preserved transparency, with selection */
inStraightData, outStraightData,
writeOutsideSelection,
filtersLayerMasks, worksWithBlankData,
copySourceToDestination
}
}
}
};
resource 'PiPL' ( 16001, "GmicOutputSettingsPlugin", purgeable )
{
{
Kind { Filter },
Name { "Input/Output Settings for G'MIC-Qt..." },
Category { "GMIC" },
Version { (latestFilterVersion << 16 ) | latestFilterSubVersion },
#ifdef __PIMac__
#if (defined(__i386__))
CodeMacIntel32 { "Gmic_IO_Settings_Entry_Point" },
#endif
#if (defined(__ppc__))
CodeMachOPowerPC { 0, 0, "Gmic_IO_Settings_Entry_Point" },
#endif
#else
#if defined(_WIN64)
CodeWin64X86 { "Gmic_IO_Settings_Entry_Point" },
#else
CodeWin32X86 { "Gmic_IO_Settings_Entry_Point" },
#endif
#endif
SupportedModes
{
noBitmap, doesSupportGrayScale,
noIndexedColor, doesSupportRGBColor,
noCMYKColor, noHSLColor,
noHSBColor, noMultichannel,
noDuotone, noLABColor
},
EnableInfo { "in (PSHOP_ImageMode, RGBMode, GrayScaleMode, RGB48Mode, Gray16Mode)" },
/* Limit large documents to 100,000 x 100,000 pixels. */
PlugInMaxSize { 100000, 100000 },
FilterCaseInfo
{
{
/* Flat data, no selection */
inStraightData, outStraightData,
doNotWriteOutsideSelection,
filtersLayerMasks, worksWithBlankData,
copySourceToDestination,
/* Flat data with selection */
inStraightData, outStraightData,
writeOutsideSelection,
filtersLayerMasks, worksWithBlankData,
copySourceToDestination,
/* Floating selection */
inStraightData, outStraightData,
writeOutsideSelection,
filtersLayerMasks, worksWithBlankData,
copySourceToDestination,
/* Editable transparency, no selection */
inStraightData, outStraightData,
doNotWriteOutsideSelection,
filtersLayerMasks, worksWithBlankData,
copySourceToDestination,
/* Editable transparency, with selection */
inStraightData, outStraightData,
writeOutsideSelection,
filtersLayerMasks, worksWithBlankData,
copySourceToDestination,
/* Preserved transparency, no selection */
inStraightData, outStraightData,
doNotWriteOutsideSelection,
filtersLayerMasks, worksWithBlankData,
copySourceToDestination,
/* Preserved transparency, with selection */
inStraightData, outStraightData,
writeOutsideSelection,
filtersLayerMasks, worksWithBlankData,
copySourceToDestination
}
}
}
};
|
d2e4d77dbf5e7a42ae27dbf8935e9e21aac23eea
|
8f39806714430f0dd2918f825845496c5b9c0fe7
|
/man/cochrane.Rd
|
fab8e52baa522065b7c8d9731b623b26dca79872
|
[] |
no_license
|
cran/rmeta
|
85913d9ef8b412c929d456629a59a91f7dfd689b
|
a521f0a0f6c6e64f4a6bef63b2fcddc0c25d11c2
|
refs/heads/master
| 2021-05-15T01:39:56.734524
| 2018-03-20T11:01:32
| 2018-03-20T11:01:32
| 17,699,255
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,094
|
rd
|
cochrane.Rd
|
\name{cochrane}
\alias{cochrane}
\non_function{}
\title{Data for Cochrane Collaboration logo }
\usage{data(cochrane)}
\description{
Data from randomised trials before 1980 of corticosteroid therapy in premature
labour and its effect on neonatal death.
}
\format{
This data frame contains the following columns:
\describe{
\item{name}{Identifier for the study
}
\item{ev.trt}{
Number of deaths in the treated group
}
\item{n.trt}{
Number in the treated group
}
\item{ev.ctrl}{
Number of deaths in the control group
}
\item{n.ctrl}{
Number in the control group
}
}
}
\details{
This meta-analysis, if done, would likely have resulted in the treatment being
widely used a decade earlier than it was, saving many lives. The graph
is part of the logo of the Cochrane Collaboration, a group aiming to
perform systematic reviews of the entire clinical trial literature.
} \source{
\url{http://www.cochrane.org}
}
\examples{
data(cochrane)
steroid <- meta.MH(n.trt, n.ctrl, ev.trt, ev.ctrl,
names=name, data=cochrane)
plot(steroid, col=meta.colors("RoyalBlue"))
}
\keyword{datasets}
|
1655ec33c4f586f7a1e3ac7c6bf7ff9ccd8ef561
|
b909e63baac12cc309b143539ec712fa1a0f7fd0
|
/RNA-seq/scripts/full_test.R
|
197f9b18756881aeb252f1cc7cfa684774bd1ab0
|
[
"MIT"
] |
permissive
|
daugherty-lab/NINL
|
c764bd7de81820664f6b107d2f5fd822d5fd7c3a
|
7949c4879f89869abc0abe42ea5c76fa9423ec4d
|
refs/heads/master
| 2023-08-30T15:19:49.966918
| 2022-11-09T21:29:32
| 2022-11-09T21:29:32
| 503,169,977
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,687
|
r
|
full_test.R
|
# R script for DESeq2 processing of 1
setwd('..')
#### installing and loading packages ####
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager", repos='http://cran.us.r-project.org')
knitr::opts_chunk$set(tidy=FALSE, cache=TRUE,
dev="png",
message=FALSE, error=FALSE, warning=TRUE)
options(scipen = 5) # scientific notation for plotting later
packages <- c("reshape", "ggplot2", "ggrepel", "RColorBrewer", "pheatmap", "data.table")
if (length(setdiff(packages, rownames(installed.packages()))) > 0) {
install.packages(setdiff(packages, rownames(installed.packages())), repos='http://cran.us.r-project.org')
}
install.packages("data.table", dependencies=TRUE)
BiocManager::install("DESeq2")
BiocManager::install("DEGreport")
BiocManager::install("GenomicFeatures")
library(reshape)
library(ggplot2)
library(ggrepel)
library(DEGreport)
library(RColorBrewer)
suppressPackageStartupMessages(library(DESeq2))
library(pheatmap)
suppressPackageStartupMessages(library('GenomicFeatures'))
library(tximport)
library(readr)
library(tximportData)
## ----config----
species <- 'hg38'
## ---- making an tx2gene for hg38 ------------
txdb <-makeTxDbFromGFF("Homo_sapiens.GRCh38.100.gff")
keytypes(txdb)
k <- keys(txdb, keytype = "GENEID")
df <- select(txdb, keys = k, columns = "TXNAME", keytype = "GENEID")
tx2gene <- df[, 2:1]
head(tx2gene)
write.csv(tx2gene, paste0("out/tx/", species, "_tx2gene.csv"), row.names = FALSE)
## ----txiSetup------------------------------------------------------------
#library("tximport")
#library("readr")
library("tximportData")
dir <- system.file("/chrisRNA/quants/", package="tximportData")
samples <- read.table(file.path(dir, paste0("Volumes/MiniDrive/Dropbox/daugherty-lab/Helitrons/DESeq2/lists/", species, ".txt")), header=TRUE)
samples$Condition
rownames(samples) <- samples$Run
samples
## ----txiFiles------------------------------------------------------------
files <- file.path(paste0("/Volumes/MiniDrive/Dropbox/daugherty-lab/Helitrons/DESeq2/quants/", samples$Run, ".sf"))
files
names(files) <- samples$run
tx2gene <- read_csv(file.path(paste0("/Volumes/MiniDrive/Dropbox/daugherty-lab/Helitrons/DESeq2/tx/", species, "_tx2gene.csv")))
## ----tximport, results="hide"--------------------------------------------
txi <- tximport(files, type="salmon", tx2gene=tx2gene, ignoreAfterBar = TRUE)
## ----txi2dds, results="hide"---------------------------------------------
library("DESeq2")
ddsTxi <- DESeqDataSetFromTximport(txi,
colData = samples,
design = ~ Condition)
dds <- ddsTxi
|
ac6c3fbd592a4ae434cbfed3caed58c648f1e1bb
|
42f2dba844a17a3b0f0bbc1007bbfe063419037b
|
/NNhuMG/R/phenoReduce.R
|
678e8c6b571bc4b1df6cd236898a115d943aaedc
|
[] |
no_license
|
steschlick/Boettcher_et_al_NN_2018
|
4569e577cc4d918ecce7d9f0e7a9a60fe3639af2
|
bc502b0168294e3645d1a1975b64ab313a1a9691
|
refs/heads/master
| 2020-04-02T22:00:26.959428
| 2019-01-26T14:59:28
| 2019-01-26T14:59:28
| 154,818,434
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,428
|
r
|
phenoReduce.R
|
#' @title Minimal Marker Cutoff Combinations
#' @description finds low-dimensional phenotypic representations of target subsets
#' @param con object returned by \code{\link{findCutoffs}}
#' @param target.num.marker desired least number of markers, Default: NA
#' @param b weighs recall and precision in F-score, Default: 0.5
#' @return \code{con} object with updated and added fields:
#' \describe{
#' \item{\code{cutoffs}}{list with updated cutoffs per marker element}
#' \item{\code{targets}}{named indices of subsets that phenotypically differ from at least one other subset}
#' \item{\code{reduc}}{\describe{
#' \item{\code{keep}}{logical vector indicating final marker-cutoff-combinations}
#' \item{\code{solutions}}{matrix containing all feasible solutions returned by \code{\link{lp}}}
#' \item{\code{fscore}}{matrix with computed fscores for all feasible solutions}
#' }}
#' }
#' @details The function solves an Integer Linear Programm (ILP) to find a set of at most 3 cutoffs and a minimal and/or required
#' number of markers that still allow to discriminate between subsets. In the likely case that multiple solutions are found,
#' these are used to gate the target phenotypes and compute F-scores. The optimal solution is selected based on the best average
#' accuracy among the subsets. If target.num.marker is specified, the ILP maximizes the sum of effective differences \eqn{Delta}
#' between the subsets and a minimal set as well as a solution with at least the required number of markers is returned. See
#' \code{\link{findCutoffs}} for examples. Note that currently no print, show, or summary functions are implemented.
#' @seealso
#' \code{\link{findCutoffs}}
#' \code{\link[lpSolve]{lp}}
#' @rdname phenoReduce
#' @export
#' @importFrom lpSolve lp
#' @importFrom utils head
phenoReduce <- function(con, target.num.marker = NA, b = 0.5) {
required <- !is.na(target.num.marker)
# need to copy
cutoff.comb <- con$reduc$cutoff.comb
cutoff.comb.ind <- con$reduc$cutoff.comb.ind
cutoff.comb.keep <- con$reduc$cutoff.comb.keep
cutoff.comb.constraint <- con$reduc$cutoff.comb.constraint
cutoff.comb.phenocode <- con$reduc$cutoff.comb.phenocode
comb.subset <- con$subset
# solve for minimal sets of markers, use negated ranks to select according to
# separation
keep.ord <- t(apply(-abs(con$delta.mat), 1, rank, ties.method = "min"))
dimnames(keep.ord) <- dimnames(con$keep.mat)
keep.comb.ord <- keep.ord[, cutoff.comb.ind]
# TODO: if findCutoffs(find.all==FALSE) make sure that
# all(apply(!!keep.solve, 1, any))
# keep.comb.mat <- 1 * (con$keep.mat[ , cutoff.comb.ind] & cutoff.comb.keep)
keep.comb.mat <- 1 * cutoff.comb.keep
if (required) {
# solve for required number of markers
target.num.marker <- min(target.num.marker, ncol(con$delta.mat))
cat(paste0("\ncomputing optimal combination for ", target.num.marker, " markers\n"))
delta.sums <- abs(con$delta.mat)[, cutoff.comb.ind]
delta.sums[!cutoff.comb.keep] <- 0
colnames(delta.sums) <- colnames(cutoff.comb.constraint)
delta.solve <- 1 * cutoff.comb.keep
delta.num.cuts <- sapply(cutoff.comb, length)
num.cuts <- 1:con$reduc$max.cuts
delta.sol <- vector("list", length(num.cuts))
for (n in num.cuts) {
d.con <- rbind(cutoff.comb.constraint, delta.solve)
d.con[, delta.num.cuts > n] <- 0
d.obj <- 1 + max(colSums(delta.sums)) - colSums(delta.sums)
# set target minimum number of markers
d.rhs <- c(target.num.marker, rep(1, nrow(d.con) - 1))
m.dir <- rep("<=", nrow(cutoff.comb.constraint) - 1)
d.dir <- c(">=", m.dir, rep(">=", nrow(delta.solve)))
print(d.sol <- lpSolve::lp("min", d.obj, d.con, d.dir, d.rhs, all.bin = TRUE,
num.bin.solns = 100))
delta.sol[[n]] <- matrix(utils::head(d.sol$solution, ncol(d.con) * d.sol$num.bin.solns),
nrow = d.sol$num.bin.solns, byrow = TRUE)
}
delta.sol <- do.call("rbind", delta.sol)
keep.comb.mat[, !apply(!(!delta.sol), 2, any)] <- 0
}
solutions.l <- vector("list", max(keep.ord))
cat("\ncalculating minimal marker combinations, starting with best separating markers\n")
for (i in seq_along(solutions.l)) {
keep.solve <- keep.comb.mat
keep.solve[keep.comb.ord > i] <- 0
f.con <- rbind(cutoff.comb.constraint, keep.solve)
f.obj <- rep(1, ncol(f.con))
# minimize objective function, i.e. minimal combo that satisfies constraints
f.rhs <- c(1, rep(1, nrow(f.con) - 1))
m.dir <- rep("<=", nrow(cutoff.comb.constraint) - 1)
f.dir <- c(">=", m.dir, rep(">=", nrow(keep.solve)))
cat("\n", i, "\n")
# Please note, we need to restrict number of feasible solution for there will be
# combinatorial explosion for many markers included in the model
# encountered crashes when 100 < num.bin.solns < 200 with expected solutions >> 200
print(sol <- lpSolve::lp("min", f.obj, f.con, f.dir, f.rhs, all.bin = TRUE, num.bin.solns = 100))
solutions.l[[i]] <- matrix(utils::head(sol$solution, ncol(f.con) * sol$num.bin.solns),
nrow = sol$num.bin.solns, byrow = TRUE)
}
# remove infeasible solutions
feasible <- sapply(solutions.l, function(sol) any(!(!sol)))
if (!any(feasible))
stop("try to increase delta, max number of cutoffs, or select subsets manually")
solutions.l <- solutions.l[feasible]
# check also for max number of possible sols
solutions.l <- solutions.l[sapply(solutions.l, function(sol) nrow(sol) < 100)]
if (required) {
solutions.l <- c(list(delta.sol), solutions.l)
}
# setup list to track (delta separation) ranks, number of solutions, number of
# markers, additional combinations of markers/cuts will be appended, duplicates
# removed
sol.track <- sapply(seq_along(solutions.l), function(d.rank) {
tmp <- cbind(d.rank, n.marker = apply(solutions.l[[d.rank]], 1, sum))
tmp <- cbind(tmp, solution = seq_len(nrow(tmp)))
tmp
}, simplify = FALSE)
# collapse solutions with equal number of markers (same within each)
sol.colps <- sapply(sol.track, "[", 1, 2)
sol.track <- lapply(unique(sol.colps), function(sc) {
do.call("rbind", sol.track[sol.colps %in% sc])
})
solutions <- lapply(unique(sol.colps), function(sc) {
do.call("rbind", solutions.l[sol.colps %in% sc])
})
# remove duplicates (caused by replicated solutions)
sol.track <- mapply(function(sol.h, sols) {
sol.h[!duplicated(sols), , drop = FALSE]
}, sol.track, solutions, SIMPLIFY = FALSE)
solutions <- lapply(solutions, function(sols) sols[!duplicated(sols), , drop = FALSE])
# take resulting marker combinations and pick the one with best average f-score
# among the subsets.
sol.track <- do.call("rbind", sol.track)
all.sols <- do.call("rbind", solutions)
colnames(all.sols) <- colnames(cutoff.comb.constraint)
# rename
names(cutoff.comb) <- colnames(keep.solve)
rownames(cutoff.comb.phenocode) <- names(comb.subset)
# create matrix of true positives (sampled from raw data)
tsne.trupos <- sapply(con$res$bin.idx[comb.subset], function(b) seq_len(nrow(con$comb$data)) %in%
b)
colnames(tsne.trupos) <- names(comb.subset)
# exclude the exclusion (i.e. NOT-subset-gated cells/bins)
tsne.trupos <- tsne.trupos[, !grepl("\\*\u00B7root", names(comb.subset))]
cutoff.comb.phenocode <- cutoff.comb.phenocode[!grepl("\\*\u00B7root", names(comb.subset)),
]
sol.phenotypes <- sapply(seq_len(nrow(all.sols)), function(l) {
cutoff.comb.phenocode[, !(!all.sols[l, ]), drop=FALSE]
}, simplify = FALSE)
sol.cutoff.comb <- sapply(seq_len(nrow(all.sols)), function(l) {
cutoff.comb[!(!all.sols[l, ])]
}, simplify = FALSE)
# compute f-measure for each gated phenotype/solution
fms.sols <- sapply(seq_len(nrow(all.sols)), function(l) {
phenocode <- cutoff.comb.phenocode[, !(!all.sols[l, ]), drop=FALSE]
dat <- con$comb$data[, names(cutoff.comb[!(!all.sols[l, ])]), drop=FALSE]
sapply(rownames(phenocode), function(s) {
gated <- apply(sapply(colnames(dat), function(m) {
testCutoffs(x = dat[, m], pheno = phenocode[s, m], cutoffs = cutoff.comb[!(!all.sols[l,
])][[m]])
}), 1, all)
fmeasure(pred = gated, true = tsne.trupos[, s], b = b)
})
})
# w/o any gating
fms.null <- apply(tsne.trupos, 2, function(tr) {
fmeasure(pred = !logical(nrow(con$comb$data)), true = tr, b = b)
})
# max per subset
fms.max <- apply(fms.sols, 1, max)
fms.opt <- apply(fms.sols, 1, function(x) which(x == max(x)))
fms.opt <- unique(unlist(fms.opt))
# calculate f-measure only within a combo so we need a hierarchy here
hierarchy.sol <- apply(all.sols, 1, function(x) {
apply(all.sols, 1, function(y) {
all(x[!(!x)] == y[!(!x)])
})
})
if (!is.matrix(hierarchy.sol)) {
hierarchy.sol <- as.matrix(hierarchy.sol)
}
# calculate row max for each family
# fms.sols.n <- sweep(fms.sols, 1, fms.max, '/')
fms.max.hierarchy <- apply(hierarchy.sol, 1, function(x) {
apply(fms.sols[, x, drop = FALSE], 1, max)
})
colnames(fms.max.hierarchy) <- seq_len(ncol(fms.max.hierarchy))
# pick the best average
opt <- which.max(apply(fms.max.hierarchy, 2, mean))
# find maxima for each subset in (possibly) different numbers of markers
targets <- apply(fms.sols[, hierarchy.sol[opt, ], drop = FALSE], 1, which.max)
# cbind(fms.null, hierarchy.max=fms.max.hierarchy[ , opt],
# fms.subset.max=fms.max, fms.sols[, hierarchy.sol[opt, ], drop=FALSE])
# the target marker-cutoff-combo
cuts.target <- apply(!(!all.sols[hierarchy.sol[opt, ], , drop = FALSE]), 2, any)
# check, this should never happen, for testthat
if (required && !any(apply(delta.sol, 1, function(x) all(all.sols[hierarchy.sol[opt,
], , drop = FALSE][1, ] == x)))) {
stop("check the ILP model!")
}
target.hierarchy <- sol.phenotypes[hierarchy.sol[opt, ]]
target.phenos <- sol.phenotypes[which(hierarchy.sol[opt, ])[targets]]
# extract the target phenotyp for each subset (the 'list' diagonal)
target.phenos <- sapply(seq_along(target.phenos), function(t) target.phenos[[t]][t,
, drop = FALSE], simplify = FALSE)
target.phenos <- sapply(target.phenos, function(x) paste(ifelse(nzchar(x[1, ]),
colnames(x), ""), x[1, ], sep = ""), simplify = FALSE)
names(target.phenos) <- paste("m\u00B7", rownames(fms.sols), sep = "")
# phenotypes for required number of markers or for any more than the minimal
full.phenos <- unlist(apply(target.hierarchy[[1]], 1, function(x) list(paste(ifelse(nzchar(x),
colnames(target.hierarchy[[1]]), ""), x, sep = ""))), recursive = FALSE)
names(full.phenos) <- paste("f\u00B7", names(full.phenos), sep = "")
target.phenos <- c(full.phenos, target.phenos)
# target.phenos <- target.phenos[!duplicated(target.phenos)]
con$cutoffs <- cutoff.comb[cuts.target]
con$targets <- target.phenos
con$reduc$keep <- cuts.target
con$reduc$solutions <- all.sols
con$reduc$fscores <- fms.sols
return(con)
}
## @title helper function
## @description simple 1D-gating
## @param x numeric vector of expression values
## @param pheno char-encoded phenotype of a given marker expression
## @param cutoffs numeric vector of cutoff-values
## @return logical vector
## @details gating without flowCore overhead. see also \code{\link{pheno}}
#' @keywords internal
testCutoffs <- function(x, pheno, cutoffs) {
n <- length(cutoffs) # cutoffs should be sorted already
if (n < 1 || n > 3) {
NULL
} else {
switch(n, {
if (pheno == "") return(!logical(length(x)))
if (pheno == "-") return(x <= cutoffs[1])
if (pheno == "+") return(cutoffs[1] < x)
}, {
if (pheno == "") return(!logical(length(x)))
if (pheno == "--") return(x <= cutoffs[1])
if (pheno == "-") return(x <= cutoffs[2])
if (pheno == "+-") return(cutoffs[1] < x & x <= cutoffs[2])
if (pheno == "+") return(cutoffs[1] < x)
if (pheno == "++") return(cutoffs[2] < x)
}, {
if (pheno == "") return(!logical(length(x)))
if (pheno == "---") return(x <= cutoffs[1])
if (pheno == "--") return(x <= cutoffs[2])
if (pheno == "-") return(x <= cutoffs[3])
if (pheno == "+--") return(cutoffs[1] < x & x <= cutoffs[2])
if (pheno == "+-") return(cutoffs[1] < x & x <= cutoffs[3])
if (pheno == "++-") return(cutoffs[2] < x & x <= cutoffs[3])
if (pheno == "+") return(cutoffs[1] < x)
if (pheno == "++") return(cutoffs[2] < x)
if (pheno == "+++") return(cutoffs[3] < x)
})
}
}
## @title helper function
## @description computes the harmonic mean of precision and recall
## @param pred logical vector of predicted positives
## @param true logical vector of true positives
## @param b (beta-) parameter weighing precision vs. recall, Default: 1
## @return F-beta score
## @details b = 0.5 weighs recall lower than precision
#' @keywords internal
fmeasure <- function(pred, true, b = 1) {
retrieved <- sum(pred)
if (retrieved != 0) {
precision <- sum(pred & true)/retrieved
} else precision <- 0
recall <- sum(pred & true)/sum(true)
if ((recall != 0) && (precision != 0)) {
Fm <- (1 + b^2) * precision * recall/((b^2) * precision + recall)
} else Fm <- 0
Fm
}
|
5f589b7abab5fa8ae4702d101251d73a9196f3ad
|
59070575eb00f1a92b9f7d8027312f6c783d59b0
|
/code/prepare_data_elections.R
|
cb760a1b98d69e7d796d0eeb66f44399509c7b73
|
[] |
no_license
|
tobiasnowacki/uk-careers
|
e8becb49f89da0901ef0b195ddbe1afa14f7cdd5
|
277ee21208632e1b7d1d637533cdfc127b8b6fbe
|
refs/heads/main
| 2023-08-26T01:47:35.469116
| 2021-11-05T00:22:39
| 2021-11-05T00:22:39
| 424,672,339
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,418
|
r
|
prepare_data_elections.R
|
# Replication files for 'The Emergence of Party-Based Political Careers in the UK, 1801-1918'
# Cox & Nowacki (Journal of Politics, forthcoming)
# prepare_data_elections.R: Tidy data for election-level analysis
# -------------
# DEPENDENCIES
# -------------
source("code/0_functions.R")
# ---------
# LOAD DATA
# ---------
d_orig <- import("data/eggers_spirling/elections.csv")
f <- import("data/eggers_spirling/election_returns.csv")
m <- import("data/eggers_spirling/mps.csv")
s <- import("data/eggers_spirling/services.csv")
of <- import("data/eggers_spirling/officeholdings.csv")
ofid <- import("data/eggers_spirling/offices.csv")
af <- import("data/aidt-franck/pre-reform.dta")
hss <- import("data/hss/hopt-v3.dta")
cdum <- import("data/eggers_spirling/constituencies.dta")
# ---------
# PREPARE DATA
# ---------
# Prepare Eggers-Spirling Data: Election-Level
elecs <- d_orig %>%
mutate(
date = as.Date(date),
year = year(date)
) %>%
filter(date > as.Date("1832-01-01") & date < as.Date("1930-01-01")) %>%
mutate(year = ifelse(date == as.Date("1910-12-03"), 1911, year)) %>%
filter(by_election == 0)
# Get IDs
ge_dates <- unique(elecs$date)
ge_dates2 <- unique(d_orig$date[d_orig$by_election == 0])
ge_ids <- unique(elecs$election_id)
# Clean party labels
con_labels <- c("C", "C (Ind C)", "C (Nat P)", "C (Nat P) (C)", "C*", "Ind L/Crf (LU)", "LU", "LU (C)", "LU (Nat P) (C)", "LU*", "U", "Co C")
lib_labels <- c("L", "Ind L (L)", "LU (L)", "Co L")
# how many party non-standard party labels?
trunc <- f %>%
filter(party %in% con_labels[c(-1, -7)] |
party %in% lib_labels[-1]) %>%
left_join(elecs) %>%
filter(year < 1912)
nrow(trunc)
# Prepare contest-level dataset
winners <- f %>%
group_by(election_id) %>%
mutate(party = case_when(
party %in% con_labels ~ "C",
party %in% lib_labels ~ "L",
TRUE ~ "O"
)) %>%
mutate(
v_share = votes / sum(votes),
con_share = sum(v_share[party == "C"]),
lib_share = sum(v_share[party == "L"])
) %>%
ungroup() %>%
filter(winner == 1 & election_id %in% ge_ids) %>%
mutate(
Con = party == "C" | party == "LU" | party == "LU (C)",
Lib = party == "L",
Other = party != "C" & party != "L"
) %>%
group_by(election_id, unopposed) %>%
summarise(
Con = sum(Con),
con_votes = mean(con_share),
Lib = sum(Lib),
lib_votes = mean(lib_share),
Other = sum(Other)
) %>%
left_join(elecs) # add contest-level info
# Contest-level dataset: add lagged seat shares / contest indicators
shares <- winners %>%
mutate(
tpty = Con + Lib,
Con = Con / tpty,
Lib = Lib / tpty,
tpty_v = con_votes + lib_votes,
con_votes = con_votes / tpty_v,
lib_votes = lib_votes / tpty_v,
con_sub = ifelse(
con_votes == 0 | is.na(con_votes) == TRUE,
Con,
con_votes
)
) %>%
group_by(constituency.id) %>%
arrange(date) %>%
mutate(
Con_lg = lag(Con),
Con_lg2 = lag(Con, 2),
unopposed_lg = lag(unopposed),
unopposed_lg2 = lag(unopposed, 2),
con_votes_lg = lag(con_votes),
con_sub_lg = lag(con_sub),
con_sub_lg2 = lag(con_sub, 2),
year = substr(date, 1, 4)
) %>%
mutate(year = ifelse(date == as.Date("1910-12-03"), 1911, year)) %>%
filter(year != "1831") %>%
ungroup() %>%
dplyr::select(election_id, constituency.id, constituency.name, date, unopposed, year, Con, Lib, total_seats, seats_up)
# Get constituency ID for pre-1832 constituencies
d_pre <- d_orig %>%
mutate(
date = as.Date(date),
year = year(date)
) %>%
filter(date < as.Date("1832-01-01"), by_election == 0) %>%
dplyr::select(election_id, constituency.id, constituency.name, year)
# Clean HSS data (pre-1832) to get contest-level
hss_shares <- hss %>%
rowwise() %>%
mutate(date = make_date(year, month, day)) %>%
filter(bye == 0 & winner == 1) %>%
group_by(election) %>%
mutate(
Con = party == "T",
Lib = party == "W",
unopposed = as.numeric(ncands == nwinners)
) %>%
group_by(election, constituency, unopposed, year, date, nwinners) %>%
summarise(
Con = sum(Con),
Lib = sum(Lib)
) %>%
mutate(
tpty = Con + Lib,
Con = Con / tpty,
Lib = Lib / tpty
) %>%
ungroup() %>%
group_by(constituency) %>%
arrange(year) %>%
mutate(
Con_lg = lag(Con),
Con_lg2 = lag(Con, 2),
unopposed_lg = lag(unopposed),
unopposed_lg2 = lag(unopposed, 2)
) %>%
left_join(d_pre, by = c("constituency" = "constituency.name", "year")) %>%
mutate(constituency.name = constituency, total_seats = nwinners, seats_up = nwinners) %>%
ungroup() %>%
dplyr::select(election_id, constituency.id, constituency.name, date, unopposed, year, Con, Lib, total_seats, seats_up)
# Bind pre-1832 and post-1832 together
mg_df <- rbind(hss_shares, shares) %>%
# mutate_at(vars(con, lib), ~ replace_na(., 0)) %>%
group_by(constituency.name) %>%
arrange(year) %>%
mutate(
Con_lg = lag(Con),
Con_lg2 = lag(Con, 2),
unopposed_lg = lag(unopposed),
unopposed_lg2 = lag(unopposed, 2),
year_fac = factor(year)
) %>%
distinct()
# prepare and merge in constituency dummies
cdum <- cdum %>%
select(
constituency_id, start_year, end_year, const_type,
country, patron_sack, patron_gash, patron_hanham
)
# add const dummies and recode patronal districts
mg_df <- mg_df %>%
left_join(cdum, by = c("constituency.id" = "constituency_id")) %>%
mutate(patronal = case_when(
year < 1832 & patron_sack == 1 ~ 1,
year %in% 1832:1867 & const_type == "borough" & patron_sack == 1 & patron_hanham == 1 ~ 1,
year %in% 1832:1867 & const_type == "county" & patron_hanham == 1 ~ 1,
year > 1867 & patron_hanham == 1 ~ 1,
TRUE ~ 0
))
# keep important variables
export <- mg_df %>%
dplyr::select(election_id, constituency.id, constituency.name, date, year_fac, unopposed, year, Con, Lib, Con_lg, Con_lg2, unopposed_lg, unopposed_lg2, const_type, country, patronal, total_seats)
# Export for next steps
write.csv(export, "output/mod_data/seat_shares.csv")
|
e9ecab5c1072989c44a9c02c7658c7889418eade
|
b68a219ba34b6aa60497d73968517c8a31bbdc9a
|
/Household_power_consumption/plot3.R
|
375eab5a3d7ce8f7ba4873f2d615ab00654fe2bd
|
[] |
no_license
|
Ziconin/Data_Science
|
935ac665f11040f9a8c21c32936bfdf4f57069cc
|
418709716eb1998004c6d38533f814ac7a8638d0
|
refs/heads/master
| 2021-09-23T20:22:10.477323
| 2018-09-27T12:03:27
| 2018-09-27T12:03:27
| 95,986,886
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 780
|
r
|
plot3.R
|
readFile <- function() {
require(data.table)
dates <- fread("household_power_consumption.txt", na.strings="?")
intervals <- subset(dates, Date == "1/2/2007" | Date == "2/2/2007")
intervals
}
plot3 <- function() {
require(lubridate)
data <- readFile()
times <- dmy_hms(paste(data$Date, data$Time, sep = " "))
png("plot3.png",width=480,height=480, bg="white")
plot(times, data$Sub_metering_1,
type="n",
ylab="Energy sub metering",
xlab=""
)
lines(times, data$Sub_metering_1,
col="black"
)
lines(times, data$Sub_metering_2,
col="red"
)
lines(times, data$Sub_metering_3,
col="blue"
)
legend("topright",
lty = c(1,1,1),
col=c("black","red","blue"),
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
cex=0.85
)
dev.off()
}
|
ff959d047069ad5324832e27c27f8c129da45652
|
262680e41eb04d5b947fdb0cb3b08d43a568c41a
|
/rsiconfi/gastos_estaduais_SP_com_saude_2018.R
|
bd5f000ce8fd8926ec1a5621a0e4ec457cfb4d73
|
[] |
no_license
|
carolina-quiterio-ifood/APIs
|
b071279334c7a5991e9fd13d2a38552e3d09caf1
|
f595f40a5952555e23422847b4f726bcec42cbc5
|
refs/heads/master
| 2023-03-02T00:34:58.646927
| 2021-02-10T15:20:53
| 2021-02-10T15:20:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,370
|
r
|
gastos_estaduais_SP_com_saude_2018.R
|
# -*- coding: utf-8
# Abraji (https://www.abraji.org.br)
# Reinaldo Chaves (reinaldo@abraji.org.br)
# Script exemplo de uso do rsiconfi
# Baixa gastos com saúde do Estado de SP
#
#install.packages("devtools")
#devtools::install_github("tchiluanda/rsiconfi")
library(rsiconfi)
library(dplyr)
library(tidyr)
# Códigos de UFs do Brasil
# https://atendimento.tecnospeed.com.br/hc/pt-br/articles/360021494734-Tabela-de-C%C3%B3digo-de-UF-do-IBGE
# São Paulo é 35 - entity
# Vamos ver Despesas por Função (I-E)
# A função get_account_dca retorna os códigos possíveis
# Coloco o ano 2018, o I-E e um código relacionado com SP
# Aqui a lista de todos:
# https://siconfi.tesouro.gov.br/siconfi/pages/public/conteudo/conteudo.jsf?id=581 clique em Tabela dos códigos de instituição utilizados no Siconfi - Código das Instituições
df_conta_dca <- get_account_dca(2018, "I-E", c("35") )
## Captura gastos de SP com Saúde (11)
gasto_uf_sp_2019 <- get_dca(year = 2018,
annex = "I-E",
entity = "35",
arg_cod_conta = "10")
# Os dados de 2019 ainda não estão na API
# Com um vetor dos anos posso baixar tudo e comparar depois
gasto_uf_sp <- get_dca(year = c(2013, 2014, 2015, 2016, 2017, 2018),
annex = "I-E",
entity = "35",
arg_cod_conta = "10")
|
a264ce8abefa688a1cb576b822b9cfbb8f10bd60
|
7dbbe94a839b1c94a2d30b7d2042aa5eef121854
|
/zillow.R
|
4ef8b8bf8f6cd85118dc1d86e2145016c17871ab
|
[] |
no_license
|
mleegina/R-real-estate
|
05e62567aec0a80b0c8f925ef5035fe74254d3cc
|
3c8da344178aa1442e3393e3c456cef19b5f0ae1
|
refs/heads/master
| 2020-03-19T11:07:49.255578
| 2018-06-20T23:20:19
| 2018-06-20T23:20:19
| 136,433,230
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,940
|
r
|
zillow.R
|
library(ggplot2)
library(readr)
library(igraph)
library(dplyr)
library(tidyr)
library(plyr)
library(data.table)
library(ggmap)
library(maptools)
library(ggthemes)
library(rgeos)
library(broom)
library(gridExtra)
library(reshape2)
library(scales)
library(rgdal)
# Plot themes for ggplot and ggmap to be used throughout
plotTheme <- function(base_size = 12) {
theme(
axis.text = element_blank(),
axis.line = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank(),
panel.border = element_blank(),
panel.grid = element_blank(),
text = element_text( color = "black"),
plot.title = element_text(size = 18,colour = "black"),
plot.subtitle = element_text(face="italic"),
plot.caption = element_text(hjust=0),
panel.background = element_blank(),
strip.background = element_rect(fill = "grey80", color = "white"),
strip.text = element_text(size=12),
plot.background = element_blank(),
legend.background = element_blank(),
legend.title = element_text(colour = "black", face = "italic"),
legend.text = element_text(colour = "black", face = "italic"))
}
mapTheme <- function(base_size = 12) {
theme(
text = element_text( color = "black"),
plot.title = element_text(size = 18,colour = "black"),
plot.subtitle=element_text(face="italic"),
plot.caption=element_text(hjust=0),
axis.ticks = element_blank(),
panel.background = element_blank(),
panel.grid.major = element_line("grey80", size = 0.1),
strip.text = element_text(size=12),
axis.title = element_blank(),
axis.text = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_rect(fill = "grey80", color = "white"),
plot.background = element_blank(),
legend.background = element_blank(),
legend.title = element_text(colour = "black", face = "italic"),
legend.text = element_text(colour = "black", face = "italic"))
}
########################### Graph 1 #############################
# Getting All Data pertaining to WA & convert to long format
price <- read.csv("price.csv", header = TRUE)
p1 <- na.omit(price)
p1.f<-filter(p1, State %in% c("WA"))
p1.wa<-gather(p1.f, year, price, Nov.10:Jan.17,factor_key = TRUE)
# Group price data by county and manipulate data
p1.wa<- p1.wa %>%
group_by(County)
p1.wa$Loc<-paste(p1.wa$County, p1.wa$State)
# Code to Geocode locations and save as new file
# for(i in 1:nrow(p1.wa))
# {
# result <- geocode(p1.wa$Loc[i], output = "latlona", source="google")
# p1.wa$lon[i]<-as.numeric(result[1])
# p1.wa$lat[i]<-as.numeric(result[2])
# }
#
# write.csv(p1.wa, "wageo.csv", row.names=FALSE)
wageo <- read.csv("wageo.csv", header = TRUE)
waCounty <- na.omit(wageo)
# Reduce data to get the areas with highest increase in price
# Sort Data by minimum price and group by county
wa.min<- waCounty %>%
group_by(County) %>%
slice(which.min(price))
# Sort data by max price and group by county
wa.max<- waCounty %>%
group_by(County) %>%
slice(which.max(price))
# Subtract max from min and sort by highest difference
wa.growth<-within(merge(wa.min,wa.max,by="Loc"), {
P <- price.y - price.x
})[,c("Loc","P")]
# Had to Geocode again
# for(i in 1:nrow(wa.growth))
# {
# result <- geocode(as.character(wa.growth$Loc[i]), output = "latlona", source="google",override_limit = TRUE)
# wa.growth$long[i]<-as.numeric(result[1])
# wa.growth$lat[i]<-as.numeric(result[2])
# }
#write.csv(wa.growth, "wageo2.csv", row.names=FALSE)
wageo2 <- read.csv("wageo2.csv", header = TRUE)
wac2 <- na.omit(wageo2)
# Removing WA from Location
wac2$Loc = substr(wac2$Loc,1,nchar(as.character(wac2$Loc))-3)
# Create WA basic map
states <- map_data("state")
counties <- map_data("county")
wa_df <- subset(states, region == "washington")
wa_county <- subset(counties, region == "washington")
# Cleaning/Prepping data
wc <-subset(wa_county, select = c(group,subregion))
wc <-unique(wc)
colnames(wc)[2]<-"County"
colnames(wac2)[1]<-"County"
wat <- subset(wac2, select = c(County, P))
colnames(wa_county)[6]<-"County"
wat$County<-tolower(wat$County)
# Joining the DF by "County"
wa_final <- inner_join(wa_county, wat, by = c("County"), all = TRUE)
w2 <- na.omit(wa_final)
# Create the base map of WA
wa_base <- ggplot(data = wa_df, mapping = aes(x = long, y = lat, group = group)) +
coord_fixed(1.3) +
geom_polygon(color = "black", fill = "gray") + plotTheme()
# Add county lines
wa_base<-wa_base +
geom_polygon(data = wa_county, fill = NA, color = "white") +
geom_polygon(color = "black", fill = NA)
# Plotting the Graph, adding colors, labels
wamap <- wa_base +
geom_polygon(data = w2, aes(fill = P), color = "white") +
geom_polygon(color = "black", fill = NA) +
scale_fill_gradient(low = "#a3fc25", high = "#ff2828",
space = "Lab", na.value = "grey50",
guide = "colourbar", trans = "log10",
labels = scales::dollar_format(prefix = "$"),
breaks = c(0,30,100,300,1000, 3000)) +
labs(title="Rise in price of Rent", subtitle="WA by county (2010 - 2017)", fill = "Price") + plotTheme()
wamap
########################### Graph 2 #############################
# Read in data, convert to long format, manipulate data
ppsq <- read.csv("pricepersqft.csv", header = TRUE)
ppsq1 <- na.omit(ppsq)
# Take the average of the price after combining by county and state
pps<-aggregate(ppsq1[, 7:81], list(ppsq1$County,ppsq1$State), mean)
pps <- na.omit(pps)
pps<-gather(pps, year, price, November.2010:January.2017,factor_key = TRUE)
# Extract date in YYYY format
pps$year <- gsub("[^0-9]", "", pps$year)
# Create base map
states <- map_data("state")
counties <- map_data("county")
state <- ggplot(data = states, mapping = aes(x = long, y = lat, group = group)) +
coord_fixed(1.3) +
geom_polygon(color = "black", fill = "gray")
base <- state +
geom_polygon(data = counties, fill = NA, color = "white") +
geom_polygon(color = "black", fill = NA)
# Convert data type to factor
colnames(counties)[6]<-"Group.1"
counties$Group.1 <- as.factor(counties$Group.1)
pps$Group.1<-tolower(pps$Group.1)
pps<-aggregate(pps[, 4], list(pps$Group.1,pps$year), mean)
# Make factor levels equal in order to join two df together
combined <- sort(union(levels(counties$Group.1), levels(pps$Group.1)))
n <- inner_join(mutate(counties, Group.1=factor(Group.1, levels=combined)),
mutate(pps, Group.1=factor(Group.1, levels=combined)))
n$x<-as.numeric(n$x)
# For each year, print a graph
lapply(sort(unique(n$Group.2)), function(i) {
base +
geom_polygon(data = n[n$Group.2==i,], aes(fill = x), color = "white") +
geom_polygon(color = "black", fill = NA) +
scale_fill_gradient("Price/sqf",low = "#a3fc25", high = "#ff2828",
space = "Lab", na.value = "grey50",
guide = "colourbar", trans = "log10",
labels = scales::dollar_format(prefix = "$"),
breaks = c(0,.5,1,2, 3)) +
labs(title="Average price per square foot by county ", subtitle=i, fill = "Price") + plotTheme()
})
########################### Graph 3 #############################
sfhomes <-read.csv("sfhomes.csv", header = TRUE, as.is = T)
sfhs <- na.omit(sfhomes)
sfhs$SaleYr <- as.factor(sfhs$SaleYr)
# Plot the neighborshoods in SF
nb <- readOGR("SF", "geo_export_6cb760e3-ca2c-47f6-9af2-01ec1009ce71")
plot(nb)
bbox <- nb@bbox
# Add some padding
sf_bbox <- c(left = bbox[1, 1] - .01, bottom = bbox[2, 1] - .005,
right = bbox[1, 2] + .01, top = bbox[2, 2] + .005)
# Download basemap
basemap <- get_stamenmap(
bbox = sf_bbox,
zoom = 13,
maptype = "toner-lite")
ggmap(basemap) +
geom_point(data = sfhs, aes(x = long, y = lat, color = SalePrice),
size = 0.25, alpha = .6) +
facet_wrap(~SaleYr, scales = "fixed", ncol = 4) +
coord_map() +
mapTheme() + theme(legend.position = c(.85, .25)) +
scale_color_gradientn("Price",
colors = c("#e9ff00","#ffff00","#ffd400","#ffbf00","#ff9000","#ff6100","#ff0000"),
labels = scales::dollar_format(prefix = "$")) +
labs(title="San Francisco home prices",
subtitle="2009 - 2015")
########################### Graph 4 #############################
# Eviction Heat Map Data for SF
evict <-read.csv("Eviction_Notices.csv", header = TRUE, as.is = T)
sfrent <-read.csv("sfrent.csv", header = TRUE, as.is = T)
e1 <- na.omit(evict)
sfr <- na.omit(sfrent)
# convert to numeric
e1[,7:25]<-suppressWarnings(sapply(e1[,7:25,drop=FALSE],as.numeric))
e1<-na.omit(e1)
# Separate "Location" col into "Lat", "Long"
e1<-e1 %>% extract("Location", c("Lat", "Long"), "\\(([^,]+), ([^)]+)\\)")
e1$freq <- rowSums(e1[,7:25])
e1
e1[,29:30]<-suppressWarnings(sapply(e1[,29:30,drop=FALSE],as.numeric))
# Plot the neighborshoods in SF
nb <- readOGR("SF", "geo_export_6cb760e3-ca2c-47f6-9af2-01ec1009ce71")
plot(nb)
bbox <- nb@bbox
# Add some padding
sf_bbox <- c(left = bbox[1, 1] - .01, bottom = bbox[2, 1] - .005,
right = bbox[1, 2] + .01, top = bbox[2, 2] + .005)
# Download basemap
basemap <- get_stamenmap(
bbox = sf_bbox,
zoom = 13,
maptype = "toner-lite")
e1<-data.frame(e1,stringsAsFactors=FALSE)
e1
sapply(e1, class)
sfr
ggmap(basemap) +
stat_density2d(data = e1, aes(x = Long, y = Lat, fill = ..level.., alpha = ..level..),
size = 0.01, bins = 16, geom = 'polygon') +
scale_fill_gradient(low = "green", high = "red") +
facet_wrap(~File.Date) +
theme(axis.title = element_blank(),
axis.text = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks = element_blank()) +
theme(legend.position="none")+
labs(title="Heat Map of San Francisco Evictions",
subtitle="1997 - 2015")
|
58e6cbb3038cc4ca98ce7f1c4f83911f1b1291c2
|
a19fc7dd26a2acdda9b2442fc490cf29928eade7
|
/multiSpeciesTeDistribution/Fig_1_ploter.R
|
891dca318fa9ab7917e37d1c595d4876d742f763
|
[] |
no_license
|
ReubenBuck/Domain_manuscript_scripts
|
96d2fed7042f5a0b6e7d79f10f80f20128b6e4e8
|
5c290e5334920e9dd70285cce242eefee3fd8be1
|
refs/heads/master
| 2020-04-16T19:30:03.535423
| 2016-10-23T23:08:53
| 2016-10-23T23:08:53
| 46,104,045
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,897
|
r
|
Fig_1_ploter.R
|
## lets do PCA analysis
library(ggplot2)
library(ks)
library(gplots)
library(GenomicRanges)
require(ggbio)
setwd("~/Desktop/Dist_matrix_TE_div/")
source(file="~/Desktop/Domain_manuscript/Domain_manuscript_scripts/functions.R")
spec1 <- "Human"
spec2 <- "Horse"
spec3 <- "Bovine"
spec4 <- "Dog"
spec5 <- "Mouse"
ucsc1 <- "hg19"
rem.un <- "yes"
no.xy <- F
keep.limit <- 1350000
mb <- 1000000
# bin.size is set so that the indexing stays consitent across pipeline
bin.size = 500000
# trying out the unscaled method
keep.NGenes = "yes"
keep.NG4s = "no"
keep.NCpGI = "no"
keep.CpGBP = "no"
keep.GC = "no"
SCALE = "no"
#create objects into whcih i will store the binsizes
# make a loop here to get all the species' names
s1name <- paste("count_tables/",spec1, "_AllBinCounts.txt", sep = "")
s2name <- paste("count_tables/",spec2, "_AllBinCounts.txt", sep = "")
s3name <- paste("count_tables/",spec3, "_AllBinCounts.txt", sep = "")
s4name <- paste("count_tables/",spec4, "_AllBinCounts.txt", sep = "")
s5name <- paste("count_tables/",spec5, "_AllBinCounts.txt", sep = "")
s1 <- read.table(s1name, header = TRUE)
s2 <- read.table(s2name, header = TRUE)
s3 <- read.table(s3name, header = TRUE)
s4 <- read.table(s4name, header = TRUE)
s5 <- read.table(s5name, header = TRUE)
slist <- list(s1,s2,s3,s4,s5)
for(i in seq(along=slist)){
count <- slist[[i]]
count <- count[count$Known >= bin.size,]
count$GC <- count$GC/count$Known*100
count[,5:(length(count)-2)] <- ((count[,5:(length(count)-2)]/count$Known) * mb)
if(keep.NGenes == "no"){count <- count[,!(colnames(count) == "NGenes")]}
if(keep.NG4s == "no"){count <- count[,!(colnames(count) == "NG4s")]}
if(keep.NCpGI == "no"){count <- count[,!(colnames(count) == "NCpGI")]}
if(keep.CpGBP == "no"){count <- count[,!(colnames(count) == "CpGBP")]}
if(keep.GC == "no"){count <- count[,!(colnames(count) == "GC")]}
if(SCALE == "yes"){count[,5:(length(count)-1)] <- scale(count[,5:(length(count)-1)])}
#count <- count[,!(colnames(count) == "Known")]
colnames(count)[1:4] <- c("chr", "binID", "start", "end")
count$binID <- 1:dim(count)[1]
slist[[i]] <- count
}
KnownS1 <- data.frame(slist[[1]]$binID, slist[[1]]$Known)
KnownS2 <- data.frame(slist[[2]]$binID, slist[[2]]$Known)
KnownS3 <- data.frame(slist[[3]]$binID, slist[[3]]$Known)
KnownS4 <- data.frame(slist[[4]]$binID, slist[[4]]$Known)
KnownS5 <- data.frame(slist[[5]]$binID, slist[[5]]$Known)
s1 <- slist[[1]][,!(colnames(slist[[1]]) == "Known")]
s2 <- slist[[2]][,!(colnames(slist[[2]]) == "Known")]
s3 <- slist[[3]][,!(colnames(slist[[3]]) == "Known")]
s4 <- slist[[4]][,!(colnames(slist[[4]]) == "Known")]
s5 <- slist[[5]][,!(colnames(slist[[5]]) == "Known")]
if(rem.un == "yes"){
if(length(grep("U", s1$chr)) > 0){s1 <- s1[-(grep( "U", s1$chr)),]}
if(length(grep("_", s1$chr)) > 0){s1 <- s1[-(grep("_", s1$chr)),]}
if(length(grep("M", s1$chr)) > 0){s1 <- s1[-(grep("M", s1$chr)),]}
}
if(rem.un == "yes"){
if(length(grep("U", s2$chr)) > 0){s2 <- s2[-(grep( "U", s2$chr)),]}
if(length(grep("_", s2$chr)) > 0){s2 <- s2[-(grep("_", s2$chr)),]}
if(length(grep("M", s2$chr)) > 0){s2 <- s2[-(grep("M", s2$chr)),]}
}
if(rem.un == "yes"){
if(length(grep("U", s3$chr)) > 0){s3 <- s3[-(grep( "U", s3$chr)),]}
if(length(grep("_", s3$chr)) > 0){s3 <- s3[-(grep("_", s3$chr)),]}
if(length(grep("M", s3$chr)) > 0){s3 <- s3[-(grep("M", s3$chr)),]}
}
if(rem.un == "yes"){
if(length(grep("U", s4$chr)) > 0){s4 <- s4[-(grep( "U", s4$chr)),]}
if(length(grep("_", s4$chr)) > 0){s4 <- s4[-(grep("_", s4$chr)),]}
if(length(grep("M", s4$chr)) > 0){s4 <- s4[-(grep("M", s4$chr)),]}
}
if(rem.un == "yes"){
if(length(grep("U", s5$chr)) > 0){s5 <- s5[-(grep( "U", s5$chr)),]}
if(length(grep("_", s5$chr)) > 0){s5 <- s5[-(grep("_", s5$chr)),]}
if(length(grep("M", s5$chr)) > 0){s5 <- s5[-(grep("M", s5$chr)),]}
}
human <- s1
horse <- s2
bovine <- s3
dog <- s4
mouse <- s5
keep.human <- KnownS1[KnownS1[,2] > keep.limit,1]
keep.horse <- KnownS1[KnownS2[,2] > keep.limit,1]
keep.bovine <- KnownS1[KnownS3[,2] > keep.limit,1]
keep.dog <- KnownS1[KnownS4[,2] > keep.limit,1]
keep.mouse <- KnownS2[KnownS5[,2] > keep.limit,1]
human <- human[human$binID %in% keep.human,]
horse <- horse[horse$binID %in% keep.horse,]
bovine <- bovine[bovine$binID %in% keep.bovine,]
dog <- dog[dog$binID %in% keep.dog,]
mouse <- mouse[mouse$binID %in% keep.mouse,]
pca.human <- prcomp(sqrt(human[,5:ncol(human)]), scale.=T, center=T)
pca.horse <- prcomp(sqrt(horse[,5:ncol(horse)]), scale.=T, center=T)
pca.bovine <- prcomp(sqrt(bovine[,5:ncol(bovine)]), scale.=T, center=T)
pca.dog <- prcomp(sqrt(dog[,5:ncol(dog)]), scale.=T, center=T)
pca.mouse <- prcomp(sqrt(mouse[,5:ncol(mouse)]), scale.=T, center=T)
### fix the above code
#### but can we chnage the colours of our arrows ? so we can highlight the important principal components
# now we can write whatever we want to set the colors.
# we can make the other names smaller
ycols_human <- rep("black", nrow(pca.human$rotation))
ycols_human[rownames(pca.human$rotation) == "SINE2_MIR"] = "blue"
ycols_human[rownames(pca.human$rotation) == "LINE_L2"] = "blue"
ycols_human[rownames(pca.human$rotation) == "LINE_L1"] = "red"
ycols_human[grep("SINE1_7SL",rownames(pca.human$rotation))] = "darkgreen"
ycols_human[rownames(pca.human$rotation) == "NGenes"] = "purple"
y_cex_human = rep(.9, nrow(pca.human$rotation))
y_cex_human[ycols_human == "black"] =.7
ycols_horse <- rep("black", nrow(pca.horse$rotation))
ycols_horse[rownames(pca.horse$rotation) == "SINE2_MIR"] = "blue"
ycols_horse[rownames(pca.horse$rotation) == "LINE_L2"] = "blue"
ycols_horse[rownames(pca.horse$rotation) == "LINE_L1"] = "red"
ycols_horse[grep("ERE",rownames(pca.horse$rotation))] = "darkgreen"
ycols_horse[rownames(pca.horse$rotation) == "NGenes"] = "purple"
y_cex_horse = rep(.9, nrow(pca.horse$rotation))
y_cex_horse[ycols_horse == "black"] =.7
ycols_bovine <- rep("black", nrow(pca.bovine$rotation))
ycols_bovine[rownames(pca.bovine$rotation) == "SINE2_MIR"] = "blue"
ycols_bovine[rownames(pca.bovine$rotation) == "LINE_L2"] = "blue"
ycols_bovine[rownames(pca.bovine$rotation) == "LINE_L1"] = "red"
ycols_bovine[grep("BOV",rownames(pca.bovine$rotation))] = "darkgreen"
ycols_bovine[grep("ov",rownames(pca.bovine$rotation))] = "darkred"
ycols_bovine[rownames(pca.bovine$rotation) == "NGenes"] = "purple"
y_cex_bovine = rep(.9, nrow(pca.bovine$rotation))
y_cex_bovine[ycols_bovine == "black"] =.7
ycols_dog <- rep("black", nrow(pca.dog$rotation))
ycols_dog[rownames(pca.dog$rotation) == "SINE2_MIR"] = "blue"
ycols_dog[rownames(pca.dog$rotation) == "LINE_L2"] = "blue"
ycols_dog[rownames(pca.dog$rotation) == "LINE_L1"] = "red"
ycols_dog[grep("Can",rownames(pca.dog$rotation))] = "darkgreen"
ycols_dog[rownames(pca.dog$rotation) == "NGenes"] = "purple"
y_cex_dog = rep(.9, nrow(pca.dog$rotation))
y_cex_dog[ycols_dog == "black"] =.7
ycols_mouse <- rep("black", nrow(pca.mouse$rotation))
ycols_mouse[rownames(pca.mouse$rotation) == "SINE2_MIR"] = "blue"
ycols_mouse[rownames(pca.mouse$rotation) == "LINE_L2"] = "blue"
ycols_mouse[rownames(pca.mouse$rotation) == "LINE_L1"] = "red"
ycols_mouse[rownames(pca.mouse$rotation) == "NGenes"] = "purple"
ycols_mouse[grep("7SL",rownames(pca.mouse$rotation))] = "darkgreen"
y_cex_mouse = rep(.9, nrow(pca.mouse$rotation))
y_cex_mouse[ycols_mouse == "black"] =.7
pdf(file = "~/Desktop/GSA_poster/plots_pca.pdf", onefile=T)
reuben.biplot(pca.human$x[,c(1,2)],
pca.human$rotation[,c(1,2)],
x.col= "grey",
y.col=ycols_human,
text.col=ycols_human,
text.cex=y_cex_human,
arrow.lwd=y_cex_human*2.5)
legend("bottomright",legend=spec1, bty="n", cex=1.5)
# green needs to go up
df.x <- pca.horse$x[,c(1,2)]
df.y <- pca.horse$rotation[,c(1,2)]
df.x[,2] = df.x[,2] * -1
df.y[,2] = df.y[,2] * -1
reuben.biplot(df.x[,c(1,2)],
df.y[,c(1,2)],
x.col= "grey",
y.col=ycols_horse,
text.col=ycols_horse,
text.cex=y_cex_horse,
arrow.lwd=y_cex_horse*2.5)
legend("bottomright",legend=spec2, bty="n", cex=1.5)
# blue needs to turn around
df.x <- pca.bovine$x[,c(1,2)] * -1
df.y <- pca.bovine$rotation[,c(1,2)] * -1
reuben.biplot(df.x[,c(1,2)],
df.y[,c(1,2)],
x.col= "grey",
y.col=ycols_bovine,
text.col=ycols_bovine,
text.cex=y_cex_bovine,
arrow.lwd=y_cex_bovine*2.5)
legend("bottomright",legend=spec3, bty="n", cex=1.5)
reuben.biplot(pca.dog$x[,c(1,2)],
pca.dog$rotation[,c(1,2)],
x.col= "grey",
y.col=ycols_dog,
text.col=ycols_dog,
text.cex=y_cex_dog,
arrow.lwd=y_cex_dog*2.5)
legend("bottomright",legend=spec4, bty="n", cex=1.5)
#green needs to go up
df.x <- pca.mouse$x[,c(1,2)]*-1
df.y <- pca.mouse$rotation[,c(1,2)] *-1
reuben.biplot(df.x[,c(1,2)],
df.y[,c(1,2)],
x.col= "grey",
y.col=ycols_mouse,
text.col=ycols_mouse,
text.cex=y_cex_mouse,
arrow.lwd=y_cex_mouse*2.5)
legend("bottomright",legend=spec5, bty="n", cex=1.5)
dev.off()
# write it that i can get every pca image i need
# with the correct features pointed out
pdf(file = "~/Desktop/GSA_poster/legend.pdf")
plot(NA)
legend("bottomleft", c("Ancestral element", "Clade specific SINE", "Number of genes", "L1", "BovB" ),
fill = c("blue", "darkgreen", "purple", "red", "darkred"),
cex=2, box.col = "white")
dev.off()
##### so lets turn them te right way around and colour them in
# so these are the pca plots of the names of things
pca.2 <- prcomp(t(scale(dog[,5:ncol(dog)])), scale.=TRUE)
plot(pca.2$x, cex = .5)
text(pca.2$x, labels=rownames(pca.2$x))
my_palette <- colorRampPalette(c("blue", "white", "red"))(n = 19)
my_p_val_palette <- colorRampPalette(c( "white", "black"))(n = 19)
pv <- seq(1,0, by = -.1)
pv.text <- rep("", length(pv))
pv.text[as.integer(quantile(1:(length(pv))))] <- as.character(pv[as.integer(quantile(1:(length(pv))))])
# now i can get the row side colours to give me a p,val
heatmap.2(matrix(seq(0,1, length.out = 100),nrow=10),scale="none", col = my_p_val_palette,dendrogram="n",keysize=2,
# symkey = TRUE,
breaks = seq(from = 0, to = 1, length = 20),
trace = "none",
# symbreaks = TRUE,
density.info = "none",
key.title = "",
key.xlab = "P-value"
)
heatmap.2(matrix(seq(-1.01,1.01, length.out = 100),nrow=10),scale="none", col = my_palette,dendrogram="n",keysize=2,
# symkey = TRUE,
breaks = seq(from = -1, to = 1, length = 20),
trace = "none",
# symbreaks = TRUE,
density.info = "none",
key.title = "",
key.xlab = "Pearson's r"
)
pdf(file = "~/Desktop/GSA_poster/keys.pdf", onefile = T)
layout(matrix(1:3,ncol=3), width = c(1,1,1),height = c(1,1,1))
legend_image <- as.raster(my_palette[19:1])
plot(c(0,2),c(-1,1),type = 'n', axes = F,xlab = '', ylab = '', main = "Pearson's r", cex.main=2)
text(x=1.5, y = c(1,0.5,0,-0.5, -1), label = c(1,0.5,0,-0.5, -1), cex = 2)
rasterImage(legend_image, 0, -1, 1,1)
plot(1:20, 1:20, pch = 19, cex=2, col = colfunc(20))
plot(1:20, 1:20, pch = 19, cex=2, col = colfunc(20))
legend_image <- as.raster(my_p_val_palette[19:1])
plot(c(0,2),c(0,1),type = 'n', axes = F,xlab = '', ylab = '', main = "P_value", cex.main=2)
text(x=1.5, y = c(0,0.2,.4,.6,.8, 1), label = c(0,0.2,.4,.6,.8, 1), cex = 2)
rasterImage(legend_image, 0, 0, 1,1)
dev.off()
# I think this script has what I'm looking for
# hoefully for each of these species we have already converted them over.
|
17c00cfb209460292e7d98ad9bc60b442ffb83a1
|
8e341461a2e3e615dde7ca12703e9437803605ef
|
/analyses/scripts/FSI_sitesRisk.R
|
20192f113f0b854d6b90f13a7160fd4da0daeaed
|
[] |
no_license
|
cchambe12/freezingexperiment
|
2aa50840fe98ee434970759437cbe5d8dfb4946b
|
a1cf6210979088833706763d858dafa2f8312cae
|
refs/heads/master
| 2021-01-11T20:06:40.705078
| 2018-12-07T19:59:36
| 2018-12-07T19:59:36
| 79,467,912
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,942
|
r
|
FSI_sitesRisk.R
|
## 31 August 2017
# working on weather data to try and evaluate FSI across 4 field sites
# Clear workspace
rm(list=ls()) # remove everything currently held in the R memory
options(stringsAsFactors=FALSE)
graphics.off()
# Load libraries
library(dplyr)
library(tidyr)
library(ggplot2)
library(lubridate)
# Set Working Directory
setwd("~/Documents/git/freezingexperiment/analyses/input")
d<-read.csv("weather_allsites.csv", header=TRUE)
d<-dplyr::rename(d, doy=Julian.Date)
d$year<-substr(d$Date, 7, 10)
### Harvard Forest #####
hf<-d%>%filter(site=="hf")%>%filter(year>=1990)
hf<-hf %>% filter(doy <=240)
hf$gdd <- hf$AirTMax - 5 # Can be 0 here if want 0 degC as threshold
hf$gdd <-ifelse(hf$gdd>0, hf$gdd, 0)
hf$count <- ave(
hf$gdd, hf$year,
FUN=function(x) cumsum(c(0, head(x, -1)))
)
hf$frz<- ifelse((hf$AirTMin<=-2.2), 1, 0)
hf$fs<- ifelse((hf$count >= 200 & hf$frz == 1 & hf$count<=300), TRUE, NA)
hf.fsi<-hf%>%dplyr::select(year, fs)
hf.fsi<-hf.fsi[!duplicated(hf.fsi),]
hf.fsi<-na.omit(hf.fsi)
### White Mountains #####
wm<-d%>%filter(site=="bart")%>%filter(year>=1990)
wm<-wm %>% filter(doy <=240)
wm$gdd <- wm$AirTMax - 5 # Can be 0 here if want 0 degC as threshold
wm$gdd <-ifelse(wm$gdd>0, wm$gdd, 0)
wm$count <- ave(
wm$gdd, wm$year,
FUN=function(x) cumsum(c(0, head(x, -1)))
)
wm$frz<- ifelse((wm$AirTMin<=-2.2), 1, 0)
wm$fs<- ifelse((wm$count >= 200 & wm$frz == 1 & wm$count<=300), TRUE, NA)
wm.fsi<-wm%>%dplyr::select(year, fs)
wm.fsi<-wm.fsi[!duplicated(wm.fsi),]
wm.fsi<-na.omit(wm.fsi)
### Grant ##### Need to find average because two stations...
gr<-d%>%filter(site=="berlin")%>%filter(year>=1990)
gr<-gr %>% filter(doy <=240)
gr$gdd <- gr$AirTMax - 5 # Can be 0 here if want 0 degC as threshold
gr$gdd <-ifelse(gr$gdd>0, gr$gdd, 0)
gr$count <- ave(
gr$gdd, gr$year,
FUN=function(x) cumsum(c(0, head(x, -1)))
)
gr$frz<- ifelse((gr$AirTMin<=-2.2), 1, 0)
gr$fs<- ifelse((gr$count >= 200 & gr$frz == 1 & gr$count<=300), TRUE, NA)
gr.fsi<-gr%>%dplyr::select(year, fs)
gr.fsi<-gr.fsi[!duplicated(gr.fsi),]
gr.fsi<-na.omit(gr.fsi)
gr1<-d%>%filter(site=="merr")%>%filter(year>=1990)
gr1<-gr1 %>% filter(doy <=240)
gr1$gdd <- gr1$AirTMax - 5 # Can be 0 here if want 0 degC as threshold
gr1$gdd <-ifelse(gr1$gdd>0, gr1$gdd, 0)
gr1$count <- ave(
gr1$gdd, gr1$year,
FUN=function(x) cumsum(c(0, head(x, -1)))
)
gr1$frz<- ifelse((gr1$AirTMin<=-2.2), 1, 0)
gr1$fs<- ifelse((gr1$count >= 200 & gr1$frz == 1 & gr1$count<=300), TRUE, NA)
gr1.fsi<-gr1%>%dplyr::select(year, fs)
gr1.fsi<-gr1.fsi[!duplicated(gr1.fsi),]
gr1.fsi<-na.omit(gr1.fsi)
gr2<-d%>%filter(site=="ct")%>%filter(year>=1990)
gr2<-gr2 %>% filter(doy <=240)
gr2$gdd <- gr2$AirTMax - 5 # Can be 0 here if want 0 degC as threshold
gr2$gdd <-ifelse(gr2$gdd>0, gr2$gdd, 0)
gr2$count <- ave(
gr2$gdd, gr2$year,
FUN=function(x) cumsum(c(0, head(x, -1)))
)
gr2$frz<- ifelse((gr2$AirTMin<=-2.2), 1, 0)
gr2$fs<- ifelse((gr2$count >= 200 & gr2$frz == 1 & gr2$count<=300), TRUE, NA)
gr2.fsi<-gr2%>%dplyr::select(year, fs)
gr2.fsi<-gr2.fsi[!duplicated(gr2.fsi),]
gr2.fsi<-na.omit(gr2.fsi)
### Saint Hipp #####
sh<-d%>%filter(site=="sh")%>%filter(year>=1990)
sh<-sh %>% filter(doy <=240)
sh$gdd <- sh$AirTMax - 5 # Can be 0 here if want 0 degC as threshold
sh$gdd <-ifelse(sh$gdd>0, sh$gdd, 0)
sh$count <- ave(
sh$gdd, sh$year,
FUN=function(x) cumsum(c(0, head(x, -1)))
)
sh$frz<- ifelse((sh$AirTMin<=-2.2), 1, 0)
sh$fs<- ifelse((sh$count >= 200 & sh$frz == 1 & sh$count<=300), TRUE, NA)
sh.fsi<-sh%>%dplyr::select(year, fs)
sh.fsi<-sh.fsi[!duplicated(sh.fsi),]
sh.fsi<-na.omit(sh.fsi)
############### Find Last Freeze Dates for each site.....
## Harvard Forest ##
hf.last<-hf%>%filter(frz>0)
hf.last<-hf.last[order(hf.last$doy,hf.last$year),]
hf.last<-hf.last[!duplicated(hf.last$year, fromLast=TRUE),]
hf.last$last<-hf.last$Date
#write.csv(hf.last, file="~/Documents/git/freezingexperiment/analyses/output/hf_fsi.csv", row.names = FALSE)
## White Mountains ##
wm.last<-wm%>%filter(frz>0)
wm.last<-wm.last[order(wm.last$doy,wm.last$year),]
wm.last<-wm.last[!duplicated(wm.last$year, fromLast=TRUE),]
wm.last$last<-wm.last$Date
## Grant(s)...
# 1:
gr.last<-gr%>%filter(frz>0)
gr.last<-gr.last[order(gr.last$doy,gr.last$year),]
gr.last<-gr.last[!duplicated(gr.last$year, fromLast=TRUE),]
gr.last$last<-gr.last$Date
# 2:
gr1.last<-gr1%>%filter(frz>0)
gr1.last<-gr1.last[order(gr1.last$doy,gr1.last$year),]
gr1.last<-gr1.last[!duplicated(gr1.last$year, fromLast=TRUE),]
gr1.last$last1<-gr1.last$Date
# 3:
gr2.last<-gr2%>%filter(frz>0)
gr2.last<-gr2.last[order(gr2.last$doy,gr2.last$year),]
gr2.last<-gr2.last[!duplicated(gr2.last$year, fromLast=TRUE),]
gr2.last$last2<-gr2.last$Date
gr.last<-full_join(gr.last, gr1.last)
gr.last<-full_join(gr.last, gr2.last)
## Saint Hipp ##
sh.last<-sh%>%filter(frz>0)
sh.last<-sh.last[order(sh.last$doy,sh.last$year),]
sh.last<-sh.last[!duplicated(sh.last$year, fromLast=TRUE),]
sh.last$last<-sh.last$Date
|
0836ca9866d09f79fccb431c112d07fda949aeff
|
5f1f5a4a640fc3e953b1f7899c7385c1f96b300a
|
/man/bmedian-methods.Rd
|
13c772694667c3de1e2e733532f6b9746ab8e7d6
|
[] |
no_license
|
katenambiar/pmpa
|
d947d2690d3146476b7aa3040958c6dbe302cc57
|
977b2b76e7446bd05be40cb1acc636dca445613f
|
refs/heads/master
| 2023-03-16T18:14:52.546654
| 2016-11-06T18:27:02
| 2016-11-06T18:27:02
| 4,173,902
| 0
| 1
| null | 2023-03-13T04:39:47
| 2012-04-29T11:22:25
|
R
|
UTF-8
|
R
| false
| false
| 507
|
rd
|
bmedian-methods.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/accessor-methods.R
\docType{methods}
\name{bmedian}
\alias{bmedian}
\alias{bmedian,MultiSet-method}
\title{Median Background Intensity Accessor}
\usage{
bmedian(x)
\S4method{bmedian}{MultiSet}(x)
}
\arguments{
\item{x}{MultiSet object}
}
\value{
matrix of median background intensities
}
\description{
Extracts the matrix of median background intensities (bMedian)
from a MultiSet object created by \link{readArrays}
}
|
57b7af6cccd8185ddf515117047111b1887646ab
|
5e830125d3a6d5e0b9309b990efe592c270724ca
|
/Production/Model/APM/man/set_param_num.Rd
|
05e224e6d154be673cca7b740a17cf40c54de18d
|
[
"CC0-1.0"
] |
permissive
|
ElderResearch/DAPM
|
0d8a0dd921c75a328a192419a0ad86073ca10e69
|
936e0d68298667a4e62998bc6d7f8f2cda819df0
|
refs/heads/master
| 2021-01-19T07:46:21.187143
| 2019-01-23T17:17:32
| 2019-01-23T17:17:32
| 87,570,226
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 453
|
rd
|
set_param_num.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{set_param_num}
\alias{set_param_num}
\title{Assign a parameter or optionally fill from NRDB}
\usage{
set_param_num(...)
}
\arguments{
\item{...}{Arguments to pass to \code{set_param}.}
}
\value{
A numeric.
}
\description{
Assign a parameter or optionally fill from NRDB
}
\examples{
\dontrun{
i <- set_param_num(param = "K", con = db_conn$conn, input = k)
}
}
|
9a3f1abcc6e503f42d04eae61f3da3828a53142b
|
afade7448d058e7c099268aaa7c41b2461e691bc
|
/Dia 1/voice-gender-master/Web/ui.R
|
95317f0f7f365171a4c9c31a6f7ba39d3df5be2a
|
[] |
no_license
|
JuanCorp/TallerML
|
6c7e487b6f7ec0c95a7e7dc53819dac1e52edb7b
|
a6277c36a8ac4f6d2247c3f1dab477b86e96f5dc
|
refs/heads/master
| 2021-01-19T12:40:22.899983
| 2017-11-23T11:37:48
| 2017-11-23T11:37:48
| 100,796,498
| 1
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,789
|
r
|
ui.R
|
## Including the required R packages.
#packages <- c('shiny', 'shinyjs')
#if (length(setdiff(packages, rownames(installed.packages()))) > 0) {
# install.packages(setdiff(packages, rownames(installed.packages())))
#}
library(shiny)
library(shinyjs)
shinyUI(fluidPage(
conditionalPanel(condition='!output.json',
tags$head(tags$script(src = "script.js"),
tags$script(src = "google-analytics.js"),
tags$style(HTML("a { font-weight: bold; } .shiny-output-error-validation { color: red; } .shiny-progress .progress { background-color: #ff00ff; } .fa-info { margin: 0 0 0 10px; cursor: pointer; font-size: 15px; color: #808080; } .fa-headphones { margin: 0 5px 0 2px; } .average-pitch { font-size: 18px; } .detail-summary { font-size: 16px; } .detail-summary .detail-header { font-size: 18px; margin: 0 0 10px 0; } .detail-summary span { font-weight: bold; }"))
),
titlePanel('Cual es tu genero segun tu voz?'),
div(style='margin: 30px 0 0 0;'),
mainPanel(width = '100%',
useShinyjs(),
h4(id='main', 'Sube un archivo .WAV or escribe un url de ', a(href='http://vocaroo.com', target='_blank', 'vocaroo.com'), ' o ', a(href='http://clyp.it', target='_blank', 'clyp.it'), ' para detectar el genero.'),
div(style='margin: 20px 0 0 0;'),
inputPanel(
div(id='uploadDiv', class='', style='height: 120px; border-right: 1px solid #ccc;',
fileInput('file1', 'Subir Archivo WAV.', accept = c('audio/wav'), width = '100%')
),
div(id='urlDiv', class='',
strong('Url (vocaroo o clyp.it)'),
textInput('url', NULL, width = '100%'),
actionButton('btnUrl', 'Cargar Url', class='btn-primary', icon=icon('cloud'))
)
),
div(style='margin: 20px 0 0 0;'),
div(id='result', style='font-size: 22px;', htmlOutput('content')),
div(style='margin: 20px 0 0 0;'),
conditionalPanel(condition='output.content != null && output.content.indexOf("Please Enter") == -1',
tabsetPanel(id='graphs',
tabPanel('Frequency Graph', plotOutput("graph1", width=1000, height=500)),
tabPanel('Spectrogram', plotOutput("graph2", width=1000, height=500))
),
div(style='margin: 20px 0 0 0;')
),
h4('Truquitos'),
p('- La entonación y tonos son factores importantes en la clasificacion hombre/mujer.'),
p('- Voces clasificadas como masculinas suelen ser monotonas(tono bajo).'),
p('- Voces clasificadas como femeninas tienden a ser de tono alto y de cambios de frequencia alta.'),
p('- Voces clasificadas como femeninas suelen aumentar en tono al final de una oración.'),
div(style='margin: 20px 0 0 0;'),
span(style='font-style: italic;', 'Forkeado de https://github.com/primaryobjects/voice-gender')
))
))
|
6e592580f349c4f72a264bcdd2b63d26c6e41a63
|
5629ffd9a3aed0f20ba7100b0b51525e9d5d2c7c
|
/waves/waveHspVCyto.R
|
c212e6fafcfec439a62f81bb5e033645d85e1313
|
[] |
no_license
|
Danko-Lab/polymeraseWaves
|
d77434fe2d59842ed54699df7cd9bc698549f02f
|
7bcc9e925cb026dee7269fd1bcd785ef280f24f7
|
refs/heads/master
| 2021-08-17T03:17:45.486865
| 2020-04-21T13:15:07
| 2020-04-21T13:15:07
| 31,675,302
| 0
| 3
| null | 2020-04-21T04:09:56
| 2015-03-04T19:22:10
|
R
|
UTF-8
|
R
| false
| false
| 2,922
|
r
|
waveHspVCyto.R
|
##
## callWaves.R -- identify the position of the leading edge of the wave of Pol II as it moves
## across gene bodies.
##
require(groHMM)
require(bigWig)
source("../polymeraseWave.bw.R")
## BigWig files.
pth <- "/home/cgd24/storage/home/work/polymeraseWaves/data/bigWigs/"
wtNHSpl <- paste(pth,"WT_NHS_BRs_pl.bigWig",sep="")
wtNHSmn <- paste(pth,"WT_NHS_BRs_mn.bigWig",sep="")
wt144pl <- paste(pth,"WT_144sHS_BRs_pl.bigWig",sep="")
wt144mn <- paste(pth,"WT_144sHS_BRs_mn.bigWig",sep="")
wt12pl <- paste(pth,"WT_12HS_BRs_pl.bigWig",sep="")
wt12mn <- paste(pth,"WT_12HS_BRs_mn.bigWig",sep="")
koNHSpl <- paste(pth,"Hsf1KO_NHS_BRs_pl.bigWig",sep="")
koNHSmn <- paste(pth,"Hsf1KO_NHS_BRs_mn.bigWig",sep="")
ko144pl <- paste(pth,"Hsf1KO_144sHS_BRs_pl.bigWig",sep="")
ko144mn <- paste(pth,"Hsf1KO_144sHS_BRs_mn.bigWig",sep="")
ko12pl <- paste(pth,"Hsf1KO_12HS_BRs_pl.bigWig",sep="")
ko12mn <- paste(pth,"Hsf1KO_12HS_BRs_mn.bigWig",sep="")
## Bed files.
bedPth <- "/home/cgd24/storage/home/work/polymeraseWaves/data/beds/"
readBed <- function(filename, minSize) {
dataf <- read.table(filename)
dataf <- dataf[(dataf[,3]-dataf[,2]) > minSize,]
dataf
}
cleanup <- function(f.d) {
# f.d <- f.d.list[[NROW(f.d.list)]] ## ONLY used for alldata
f.d[f.d$minOfMax & f.d$minOfAvg & f.d$KLdivParametric > 1,]
}
#################################
## Compare wildtype and HSF1ko
minSize=20000
Hsp_bed <- readBed(paste(bedPth, "HspGenes_mm10.bed", sep=""), minSize=minSize)[,c(1:3,6,4:5)]
Cyt_bed <- readBed(paste(bedPth, "CytoskeletonGenes_UpHC_WT-144sHS_unique.bed", sep=""), minSize=minSize)[,c(1:3,6,4:5)]
approx=5000
hsp_144 <- polymeraseWaveBW(wt144pl, wt144mn, wtNHSpl, wtNHSmn, Hsp_bed, TSmooth= 20, approxDist=approx, returnVal="simple", prefix="IMG/Hsp.")
cyt_144 <- polymeraseWaveBW(wt144pl, wt144mn, wtNHSpl, wtNHSmn, Cyt_bed, TSmooth= 20, approxDist=approx, returnVal="simple", prefix="IMG/Cyt.")
pdf("Hsf_Cyto.144s.pdf")
hist(cleanup(hsp_144)$Rate, breaks=seq(0,40000,3000))
hist(cleanup(cyt_144)$Rate, breaks=seq(0,40000,3000))
indx_hsp <- hsp_144$minOfMax & hsp_144$minOfAvg & hsp_144$KLdivParametric > 1
indx_cyt <- cyt_144$minOfMax & cyt_144$minOfAvg & cyt_144$KLdivParametric > 1
hsp.cdf <- ecdf(hsp_144$Rate[indx_hsp])
cyt.cdf <- ecdf(cyt_144$Rate[indx_cyt])
plot(hsp.cdf, col="dark red", xlim=c(0, 40000), ylim=c(0,1))
par(new=TRUE)
plot(cyt.cdf, col="black", xlim=c(0, 40000), ylim=c(0,1))
ks.test(hsp_144$Rate[indx_hsp], cyt_144$Rate[indx_cyt]) ## NOT significant.
boxplot(hsp_144$Rate[indx_hsp], cyt_144$Rate[indx_cyt], names=c("HSF1KO", "WT"))
wilcox.test(hsp_144$Rate[indx_hsp], cyt_144$Rate[indx_cyt])
dev.off()
## Write out data...
save.image("cyto_hsf.RData")
write.table(cbind(Hsp_bed, hsp_144), "HSP.144s.tsv", sep="\t", quote=FALSE, row.names=FALSE)
write.table(cbind(Cyt_bed, cyt_144), "CYT.144s.tsv", sep="\t", quote=FALSE, row.names=FALSE)
|
fa89816ad2d26516468d2d35f1bf66da7af46866
|
7b102f9c8f2e3f9240090d1d67af50333a2ba98d
|
/gbd_2019/nonfatal_code/encephalitis/nonfatal_code/02b_parallel.R
|
9ca74d6b630015bf85aaf2a99a52d87e458637d5
|
[] |
no_license
|
Nermin-Ghith/ihme-modeling
|
9c8ec56b249cb0c417361102724fef1e6e0bcebd
|
746ea5fb76a9c049c37a8c15aa089c041a90a6d5
|
refs/heads/main
| 2023-04-13T00:26:55.363986
| 2020-10-28T19:51:51
| 2020-10-28T19:51:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,179
|
r
|
02b_parallel.R
|
#####################################################################################################################################################################################
## Purpose: This sub-step template is for parallelized jobs submitted from main step code
## Author:
## Last updated: 2019/02/27
## Description: Parallelization of 02b_acute_survive
#####################################################################################################################################################################################
rm(list=ls())
# LOAD SETTINGS FROM MASTER CODE (NO NEED TO EDIT THIS SECTION) ----------------
# Load functions and packages
library(argparse)
library(data.table)
# Get arguments from parser
parser <- ArgumentParser()
parser$add_argument("--date", help = "timestamp of current run (i.e. 2014_01_17)", default = NULL, type = "character")
parser$add_argument("--step_num", help = "step number of this step (i.e. 01a)", default = NULL, type = "character")
parser$add_argument("--step_name", help = "name of current step (i.e. first_step_name)", default = NULL, type = "character")
parser$add_argument("--code_dir", help = "code directory", default = NULL, type = "character")
parser$add_argument("--in_dir", help = "directory for external inputs", default = NULL, type = "character")
parser$add_argument("--out_dir", help = "directory for this steps checks", default = NULL, type = "character")
parser$add_argument("--tmp_dir", help = "directory for this steps intermediate draw files", default = NULL, type = "character")
parser$add_argument("--root_j_dir", help = "base directory on J", default = NULL, type = "character")
parser$add_argument("--root_tmp_dir", help = "base directory on clustertmp", default = NULL, type = "character")
parser$add_argument("--ds", help = "specify decomp step", default = 'step1', type = "character")
args <- parser$parse_args()
print(args)
list2env(args, environment()); rm(args)
# Get location from parameter map
task_id <- as.integer(Sys.getenv("SGE_TASK_ID"))
parameters <- fread(file.path(code_dir, paste0(step_num, "_parameters.csv")))
location <- parameters[task_id, location_id]
# User specified options -------------------------------------------------------
# Source GBD 2019 shared functions
k <- # filepath
source(paste0(k, "get_draws.R"))
source(paste0(k, "get_demographics.R"))
# pull demographics from RDS created in model_custom
demographics <- readRDS(file.path(in_dir,"demographics_temp.rds"))
years <- demographics$year_id
sexes <- demographics$sex_id
ages <- demographics$age_group_id
functional <- "encephalitis"
parent_meid <- 1419
# Inputs -----------------------------------------------------------------------
pull_dir_02a <- file.path(root_tmp_dir, "02a_cfr_draws","03_outputs", "01_draws")
# Run job ----------------------------------------------------------------------
draws <- get_draws(gbd_id_type = "modelable_entity_id",
source = "epi",
gbd_id = parent_meid,
age_group_id = ages,
measure_id = 6,
location_id = location,
gbd_round_id = 6,
decomp_step = ds)
setDT(draws)
draws[, c("model_version_id", "measure_id"):= NULL]
for (y in years) {
for (s in sexes) {
survive <- readRDS(file.path(pull_dir_02a,paste0("dm-", functional, "-survive-", location, "_", y, "_", s, ".rds")))
draws.tmp <- draws[year_id == y & sex_id ==s]
merge.dt <- merge(draws.tmp, survive, by=c("year_id", "sex_id", "age_group_id", "location_id"))
# survival rate * incidence draws = survival rate of acute phase
merge.dt[, paste0("draw_", 0:999):= lapply(0:999, function(x){get(paste0("draw_",x)) * get(paste0("v_",x))})]
cols.remove <- paste0("v_", 0:999)
merge.dt[, (cols.remove):= NULL]
filename <- paste0("survive_", location, "_", y, "_", s, ".rds")
saveRDS(merge.dt, file.path(tmp_dir, "03_outputs", "01_draws", filename))
}
}
# CHECK FILES (NO NEED TO EDIT THIS SECTION) -----------------------------------
file.create(paste0(tmp_dir, "finished_loc", location, ".txt"), overwrite=T)
|
4b57e2abb9fc85124e0eaf1751417611c4186a52
|
b051db434b8ec8e30ec4264181ba6bf86b539ce9
|
/man/plot-PeakList-missing-method.Rd
|
233e1bf2b6f19ce56ff3c552a31e19fe23b46862
|
[] |
no_license
|
lorenzgerber/tofsims
|
6484b58a532385bcbc906fe2921190d2221cc77f
|
cf0791d3324638d604dea5a111729b507f5b2192
|
refs/heads/master
| 2021-06-25T23:27:27.148518
| 2020-10-15T06:32:08
| 2020-10-15T06:32:08
| 73,091,887
| 1
| 1
| null | 2016-11-07T15:24:28
| 2016-11-07T15:24:28
| null |
UTF-8
|
R
| false
| true
| 1,133
|
rd
|
plot-PeakList-missing-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PeakList.R
\name{plot,PeakList,missing-method}
\alias{plot,PeakList,missing-method}
\title{Method \code{plot()} for \code{MassSpectra}}
\usage{
\S4method{plot}{PeakList,missing}(
x,
y,
...,
mzRange = c(0, 200),
plotDeriv = FALSE,
plotPeaks = TRUE,
plotWidths = TRUE
)
}
\arguments{
\item{x}{object of type PeakList}
\item{y}{missing}
\item{...}{further args}
\item{mzRange}{vector or length two, indicating the mz range to be plotted}
\item{plotDeriv}{boolean plot derivate if available}
\item{plotPeaks}{boolean plot peaks if available}
\item{plotWidths}{boolean plot peak widths if available}
}
\value{
plot spectra with peaks and peak widths
}
\description{
Method defining \code{plot()} for the \code{MassSpectra} class
plot has no generic by default
}
\details{
The output of this method is adapted for plotting mass spectra. Uncalibrated
data is plotted as xy plot while uncalibrated data is plotted as barplot. The parameter
\code{mzRange} allows choosing the plot range directly according to the mz number
(when calibrated).
}
|
feae1f9ca7b3f8ba7c896152ad454810f5a3428c
|
dd7768a45761e58f51551959f891333e0d791faf
|
/R/makefun.R
|
8a497abc169b3173e8ffc8875bb815c25757d15f
|
[] |
no_license
|
cran/gcl
|
3d4920a52e1356401d6e860bf1fa10fa939a3d0b
|
fdc14f4896ba1278f82c6af1fd1fa6032ae2f86d
|
refs/heads/master
| 2021-01-13T07:40:52.479132
| 2007-03-14T00:00:00
| 2007-03-14T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 709
|
r
|
makefun.R
|
`makefun` <-
function (.thefun, .thealist, .moveddd = 1, ...)
{
frms <- formals(.thefun)
frn <- names(frms)
body <- body(.thefun)
na <- length(frms)
ddd <- names(frms)[na] == "..."
add.ddd <- F
if (ddd && .moveddd != 0) {
na <- na - 1
frms <- frms[1:na]
frn <- frn[1:na]
add.ddd <- T
}
if (.moveddd != 1)
add.ddd <- F
if (is.null(names(.thealist))) {
tmp <- rev(rev(names(frms))[1:min(length(.thealist),
na)])
names(.thealist)[1:length(tmp)] <- tmp
}
frms[names(.thealist)] <- .thealist
if (add.ddd)
frms <- c(frms, alist(... = ))
return(as.function(c(frms, body), ...))
}
|
586035b3c8b352bbc17e5cbc371fecc105a17266
|
89569fd9c4195a5473c880445221c0f7bfe0b896
|
/man/summary.binomialbcp.Rd
|
faa629dc9ea99ecd99c63eeb8431281e4cb8ff99
|
[] |
no_license
|
svana1/binomialbcp
|
7509262864ec5c22c9ad541119fba62aa8ab1f39
|
8053e21a7111b24338d1499cca7e2579bae32b9b
|
refs/heads/master
| 2021-05-08T03:50:46.534528
| 2016-09-24T22:53:37
| 2016-09-24T22:53:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 522
|
rd
|
summary.binomialbcp.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/binomialbcp.R
\name{summary.binomialbcp}
\alias{summary.binomialbcp}
\title{Summarize a Binomial BCP sample}
\usage{
\method{summary}{binomialbcp}(object, ...)
}
\arguments{
\item{object}{a binomialbcp sample}
\item{...}{(unused)}
}
\value{
a dataframe containing the probability of a change point and estimated proportion for each time point.
}
\description{
Average over the MCMC samples to estimate changepoints and proportions.
}
|
f0fc57bce063b32a0d9cf7487722637ac0418f37
|
f6ad2309ef0d422f344f3bc6f760622a3b4a0305
|
/NLCD_script.R
|
5c9896554995d955af510fd4d768c1ba0161368f
|
[] |
no_license
|
schefferslab/connectivity_SEUSA
|
49962fea96e96be27bf82bf8c3d70cc6bbe2fcc3
|
6fe634153f03a30b62f6dab787494929f4bd83ed
|
refs/heads/master
| 2022-09-06T14:10:42.337395
| 2020-05-29T12:23:29
| 2020-05-29T12:23:29
| 268,536,885
| 0
| 2
| null | 2020-06-01T13:55:52
| 2020-06-01T13:55:51
| null |
UTF-8
|
R
| false
| false
| 8,779
|
r
|
NLCD_script.R
|
library(readr)
library(maptools)
library(sp)
library(raster)
library(rgdal)
library(maps)
library(ggplot2)
library(tidyverse)
library(smoothr)
library(devtools)
library(ENMTools)
library(spatstat)
library(rgeos)
library(lattice)
library(reshape2)
library(ggthemes)
library(viridis)
library(gridExtra)
library(rasterVis)
library(spatialEco)
library(dismo)
rm(list = ls())
setwd("C:/Users/jbaecher/Dropbox (UFL)/UF/Scheffers lab/Projects/Landscape_prioritization")
HSI <- read_rds("C:/Users/jbaecher/Dropbox (UFL)/UF/Scheffers lab/Projects/Landscape_prioritization/HSI_D_aur.rds")
raster <- read_rds("data/circuitscape/se_raster_stack.rds") %>%
crop(HSI) %>%
mask(HSI)
proj4string <- crs(raster)
plot(raster)
# write_rds(D_aur_spdf, "D_aur_spdf.rds")
# write_rds(D_aur_xy, "D_aur_xy.rds")
D_aur_spdf <- read_rds("D_aur_spdf.rds") # Load GBIF data (precleaned), spdf version
D_aur_xy <- read_rds("D_aur_xy.rds")
# Load df version
D_aur_coords_decade <- split(cbind(D_aur_xy[,c('lon','lat')]),
f = D_aur_xy$decade) # Split occurrence data by decade
names(D_aur_coords_decade[2:4]) <- names(D_aur_coords_decade[1])
############# NLCD processing #############
# Original values:
# 1: Open Water
# 2: Urban/Developed
# 3: Intentionally Left Blank
# 4: Intentionally Left Blank
# 5: Intentionally Left Blank
# 6: Mining
# 7: Barren
# 8: Deciduous Forest
# 9: Evergreen Forest
# 10: Mixed Forest
# 11: Grassland
# 12: Shrubland
# 13: Cultivated Cropland
# 14: Hay/Pasture
# 15: Herbaceous Wetland
# 16: Woody Wetland
# 17: Perennial Ice/Snow
# Reclassify rasters based on resistance to salamander movement
## First, create reclassification matrix
reclassify <- matrix(
c( 0, 1, 7, #Open water 7
1, 6, 6, #Developed 6
6, 7, 5, #Barren 5
7, 10, 1, #Forest 1
10, 12, 3, #Grassland/Shrubland 3
12, 14, 4, #Planted/cultivated cropland 4
14, 16, 2, #Wetlands 2
16, Inf, NA), #Ice/Snow (none in SE) NA
ncol=3, byrow=T)
# Create labels for attribute table manipulations during raster processing
nlcdclass <- c("Forest", "Wetlands", "Grass/Shrub/Herbaceous", "Planted/Cultivated", "Barren", "Developed", "Open Water")
classdf <- data.frame(classvalue1 = c(1,2,3,4,5,6,7), classnames1 = nlcdclass)
# Load in backcasted NLCD rasters to project, reclassify, crop, mask, and stack
## First, identify files
all_rasters = list.files(
path="C:/Users/jbaecher/Dropbox (UFL)/UF/Scheffers lab/Projects/Landscape_prioritization/data/NLCD/Historic",
pattern = "\\.tif$",
full.names = TRUE)
## Next, remove files outside study time period
sub_rasters <- all_rasters[c(23:55)]
sub_rasters_list <- list()
decades <- c("CONUS_Backcasting_y196",
"CONUS_Backcasting_y197",
"CONUS_Backcasting_y198",
"CONUS_Backcasting_y199")
decade_layers <- list()
decade_modes <- list()
# Big as muthafuckin' for loop to... load, reproject, reclassify, crop, mask, stack,and calculate modes of NLCD rasters from 1960 until 1992
for (i in 1:length(sub_rasters)){
sub_rasters_list[[i]] <- raster(sub_rasters[i]) %>% # Loading rasters in from file names
projectRaster(HSI) %>% # Reprojecting
crop(HSI) %>% # Croping to study extent
mask(HSI) %>% # Masking to study polygon
reclassify(reclassify) %>% # Reclassifying habitat types based on resistance value
ratify() # Reorganizing attributes table
rat <- levels(sub_rasters_list[[i]]) # Setting levels of raster values
rat$landcover <- nlcdclass # Assigning category names to attribute table
levels(sub_rasters_list[[i]]) <- rat # Saving attribute table in raster object
if(length(sub_rasters_list) == length(sub_rasters)){ # Testing if raster processing is complete
print(names(sub_rasters_list[[i]])) # If raster processing is complete, printing final raster name
sub_rasters_stack <- stack(sub_rasters_list) # If raster processing is complete, stacking list into a raster brick
decade_layers <- lapply( # Begin function to...
decades, function(x) # use a list of decades from study period...
which(grepl(tolower(x), # to find raster years
tolower(names(sub_rasters_stack))))) # and return those rasters to decadal groupings
decade_layers[-c(5,6)] # removing unnecessary elements from list
for (j in 1:length(decade_layers)){ # Begin for loop to...
decade_modes[[j]] <- modal(sub_rasters_stack[[ # calculate the mode of a raster brick...
decade_layers[[j]] # across layers...
]], # representing decade groupings from previous...
ties="random",freq=F) # setting ties to a random outcome
if(length(decade_modes) == length(decade_layers)){ # Testing if mode calculation is complete
print(names(decade_modes[[j]])) # If mode calculation is complete, printing final name of calculated mode layer
decade_stack <- stack(decade_modes) # If mode calculation is complete, stacking list of mode layers into a raster brick
names(decade_stack) <- c("NLCD_1960_1969","NLCD_1970_1979",
"NLCD_1980_1989","NLCD_1990_1992")
} else( # If mode calculation is incomplete...
print(names(decade_modes[[j]]))) # print progress...
}
} else( # If raster processing is incomplete...
print(names(sub_rasters_list[[i]]))) # print progress...
}
barplot(decade_modes[[1]],axes=F,col=plasma(7));axis(1,labels=nlcdclass,at=c(1:7))
################################################# Maxent #################################################
plot(decade_stack)
##
# args to pass to Maxent
args <- list(
c("-J", "-P", "-q", "-p", "-h", "replicates=3", "randomtestpoints=27", "betamultiplier=1",
"askoverwrite=false", "threads=6"),
c("-J", "-P", "-q", "-p", "-h", "replicates=3", "randomtestpoints=38", "betamultiplier=1",
"askoverwrite=false", "threads=6"),
c("-J", "-P", "-q", "-p", "-h", "replicates=3", "randomtestpoints=8", "betamultiplier=1",
"askoverwrite=false", "threads=6"),
c("-J", "-P", "-q", "-p", "-h", "replicates=3", "randomtestpoints=5", "betamultiplier=1",
"askoverwrite=false", "threads=6"))
D_aur_maxent_list <- list()
D_aur_preds_list <- list()
for(k in 1:nlayers(decade_stack)){
D_aur_maxent_list[[k]] <- maxent(x=stack(decade_stack[[k]],raster), # Run MaxEnt on each decade
p=coordinates(D_aur_coords_decade[[k]]), # Partition occurrence by decade
args=args[[k]]) # Pass decade-specific arguments to MaxEnt
if(length(D_aur_maxent_list) == nlayers(decade_stack)){ # Test if Maxent calculations are done
D_aur_preds_list[[k]] <- mean(predict(D_aur_maxent_list[[k]], # If complete, calculate predictions...
stack(decade_stack[[k]],raster))) # from decadal raster data...
} else( # If incomplete,
print(names(decade_modes[[k]]))) # print progress...
}
D_aur_preds_stack <- stack(D_aur_preds_list)
plot(D_aur_preds_stack, zlim=c(0,1))
response(D_aur_maxent_list[[1]], var="NLCD_1960_1969")
|
23e5c6177ffdc017cf752bc382db9043bc68e5aa
|
a43de4a62a781d7804406334443e8eab0f77a626
|
/rotinamod2.r
|
1b26c1678877bda0654837d7e7670b6fae1fb97f
|
[] |
no_license
|
Frank2857/Disserta-o
|
106b6aa112e5e0fadf27fbc5c2d1c2db2eb9e08a
|
9e0f0975e35004b0c05efae0c5d3c361770d6752
|
refs/heads/master
| 2020-11-27T04:26:54.095710
| 2020-02-19T23:22:32
| 2020-02-19T23:22:32
| 228,650,501
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,244
|
r
|
rotinamod2.r
|
############################################################################
###################### MODELO 2 - PRIORI NÃO INFORMATIVA ###################
############################################################################
x = as.character(dados$ANO[1:23]);x
y= dados$FEV[1:23];y
t0=2 #Ponto do possível inicio de tendência
n=length(y)
ncT=n-t0;ncT
############################################################################
# GEV COM TENDÊNCIA LINEAR
sink("Gev_TREND_LINEAR_NI.txt")
cat("
model {
# Verossimilhança
for (i in 1:n) {
y[i] ~ dgev(mi[i], sigma, eta)
mi[i] <- mu + beta*x[i]
}
# Prior
# dnorm(media, precisao)
mu ~ dnorm(0,0.0001)
sigma ~ dnorm(0,0.0001)
eta ~ dunif(-1.5, 1.5)
beta ~ dunif(-5.0, 5.0)
#Para Fazer preditiva para Hipóses para a beta H0=B<=B0
beta0<-0
probH0<-step(beta0-beta)# Atribui valor cero, quando 'beta0-beta' menor ou aigual a cero '0'
probH1<- 1-probH0 # Complementar de H0
# Para Fazer preditiva
ncT<-21
yp5 <- (mu + (beta*(ncT+5))) +((sigma/eta)*(pow(-log(1-1/5),-eta)-1))
yp10 <- (mu + (beta*(ncT+10))) +((sigma/eta)*(pow(-log(1-1/10),-eta)-1))
yp15 <- (mu + (beta*(ncT+15))) +((sigma/eta)*(pow(-log(1-1/15),-eta)-1))
yp20 <- (mu + (beta*(ncT+20))) +((sigma/eta)*(pow(-log(1-1/20),-eta)-1))
}
",fill=TRUE)
sink()
trend=c(rep(0,t0),seq(1,ncT))
trend
dados_bug<- list(x=trend,y=y,n=length(y))
dados_bug
inits <- function(){ list(mu=30, sigma=10, beta=0.01, eta=0.01)}
params <- c("mu","sigma","eta","beta")
nc = 1 #Numero de cadeias
ni = 200000 #Tamanho da cadeira
nb = 50000 #Numero de simulação que serão descartadas
nt = 30 #Salto (thin)
# Inicie o Amostrador
gev.bayes.trend_NL2 = bugs(data = dados_bug, inits = inits,
parameters =c(params,"probH0","probH1","yp5","yp10","yp15","yp20"),
model = "Gev_TREND_LINEAR_NI.txt",
n.thin = nt, n.chains = nc,
n.burnin = nb, n.iter = ni, codaPkg=FALSE, debug=T)
print(gev.bayes.trend_NL2, dig = 4)
post_gb_t_NL2<-as.mcmc(gev.bayes.trend_NL2$sims.matrix[,]) #salva a saída como cadeia mcmc
HPDinterval(post_gb_t_NL2) #Intervalo HPD
raftery.diag(post_gb_t_NL2)
geweke.diag(post_gb_t_NL2)
heidel.diag(post_gb_t_NL2)
par(mar=c(2,2,2,2))
plot(post_gb_t_NL2)
summary(post_gb_t_NL2 )
post_gb_t_NL2[1:20,] # imprimindo os 10 primeiros valores
#####################################################
# CALCULANDO MEDIDAS PARA DECISÃO
##################################################
pH0=mean(post_gb_t_NL2[,5]);pH0
pH1=mean(post_gb_t_NL2[,6]);pH1
# Evidencia
O_h1h0=pH1/pH0;O_h1h0 # A chance de acontecer H1
resumo2_NL2=print(gev.bayes.trend_NL2,dig=4) # salva a resumo da cadeia mcmc
#Preditiva
ypredl_t_NL2<-c(resumo2_NL2$mean$yp5,
resumo2_NL2$mean$yp10,
resumo2_NL2$mean$yp15,
resumo2_NL2$mean$yp20) #salva as médias do y predito.
VPtNL2 = ypredl_t_NL2;VPtNL2
obs = c(34.6, 36.8, 36.8, 37.7)#Valores observados
EpGevT_NL2= abs((obs-VPtNL2)/obs) #Erro de predição
round(mean(EpGevT_NL2)*100,2) #Erro médio de predição percentual.
|
527d6f31758a4a374a8936f20ea1825c989d69f4
|
6a645547fdcd7d8b5c4a865c0ee86c937e8b60c5
|
/new_effect_size.R
|
e83c23a1e7fef88e1072a78f128ba8c412e407ef
|
[] |
no_license
|
MegaPast2Future/megalinkers
|
286a82d4cb68366c5358f8570d7c05a6243401db
|
85756fa8c4ca653ad803d6c7d548033f6d720d5b
|
refs/heads/master
| 2022-12-14T22:33:12.373730
| 2020-09-04T09:17:48
| 2020-09-04T09:17:48
| 215,502,224
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,835
|
r
|
new_effect_size.R
|
new_effect_size <- function(x, y, global = T, area = 'World'){
if (str_detect(class(x), "raster|Raster")) {
cont.name <- c('Asia', 'North America', 'Europe', 'Africa', 'South America', 'Oceania')
if(global == F){
cont.value <- which(cont.name == area)
clipped.continent <- clamp(continents, lower = cont.value, upper = cont.value, useValues = F) / cont.value
x <- mask(x, clipped.continent)
y <- mask(y, clipped.continent)
}
x <- na.omit(values(x))
y <- na.omit(values(y))
}
# rank-sum test
p.value <- wilcox.test(x, y)$p.value
# all favorable pairs
P <- sum(unlist(1:length(x) %>% map(function(i) sum(x[i] > y))))
# all unfavorable pairs
N <- sum(unlist(1:length(x) %>% map(function(i) sum(x[i] < y))))
# all ties
E <- sum(unlist(1:length(x) %>% map(function(i) sum(x[i] == y))))
# total
Tot <- tryCatch(P + N + E,
warning = function(w) {
warning(w, "Coercing to numeric, results might be inaccurate")
as.numeric(P) + as.numeric(N) + as.numeric(E)
})
r = (P - N) / (Tot)
prob = P / Tot
fraction = 10^mean(y - x)
return(tibble(`p` = p.value,
`r` = r,
`P(X > Y)` = prob,
Area = area,
Fraction = fraction))
}
magnitude <- function(superiority){
x <- rep(NA, length(superiority))
for(i in 1:length(superiority)){
if(superiority[i] >= 0.92){
x[i] <- 'Huge'
} else if(superiority[i] >= 0.80){
x[i] <- 'Very large'
} else if(superiority[i] >= 0.71){
x[i] <- 'Large'
} else if(superiority[i] >= 0.64){
x[i] <- 'Medium'
} else if(superiority[i] >= 0.56){
x[i] <- 'Small'
} else if(superiority[i] < 0.56){
x[i] <- ('No difference')
}
}
return(x)
}
|
ca662c7ea22f57f02f872e6402fa462a711261f0
|
59ea89f1162f8048d9f7f10f6e6a3a1567c56607
|
/rstudio/yc_all_pow_cue_effect.R
|
bd119660727fa3a68678baeadc412c2e7cf61b2d
|
[] |
no_license
|
elshafeh/own
|
a9b8199efb3511aa1b30b53755be9337d572b116
|
ef3c4e1a444b1231e3357c4b25b0ba1ba85267d6
|
refs/heads/master
| 2023-09-03T01:23:35.888318
| 2021-11-03T09:56:33
| 2021-11-03T09:56:33
| 314,668,569
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,768
|
r
|
yc_all_pow_cue_effect.R
|
library(car);library(ggplot2)
library(dae);library(nlme);library(effects);library(psych)
library(interplot);library(plyr);library(devtools)
library(ez);library(Rmisc);library(wesanderson);library(lme4);library(lsmeans)
library(plotly)
library(ggplot2)
library(ggpubr)
rm(list=ls())
cbPalette <- c( "#009E73","#E69F00","#999999")
ext1 <- "~/GoogleDrive/PhD/Fieldtripping/documents/4R/"
ext2 <- "yc_all_BroadandNeigh_alpha_cue_effect_minusevoked.txt"
pat <- read.table(paste0(ext1,ext2),header=T)
pat <- pat[pat$MOD != "occ",] ;pat$CHAN <- factor(pat$CHAN) ; pat$MOD <- factor(pat$MOD) ;pat$HEMI <- factor(pat$HEMI)
pat <- pat[pat$MOD != "vis",] ;pat$CHAN <- factor(pat$CHAN) ; pat$MOD <- factor(pat$MOD) ;pat$HEMI <- factor(pat$HEMI)
pat <- pat[pat$TIME != "1000ms",] ;pat$TIME <- factor(pat$TIME)
#pat <- pat[pat$CHAN != "aud_L",] ;pat$CHAN <- factor(pat$CHAN) ; pat$MOD <- factor(pat$MOD) ;pat$HEMI <- factor(pat$HEMI)
new_pat <- pat[pat$CUE_ORIG == "5Neig",] ; pat$CHAN <- factor(pat$CHAN)
model.pat <- lme4::lmer(POW ~ (HEMI+CUE_POSITION+FREQ)^3 + (1|SUB), data =new_pat)
model_anova <- Anova(model.pat,type=2,test.statistic=c("F"))
print(model_anova)
new_pat <- pat[pat$CUE_ORIG == "broad",] ; pat$CHAN <- factor(pat$CHAN)
model.pat <- lme4::lmer(POW ~ (HEMI+CUE_POSITION+FREQ)^3 + (1|SUB), data =new_pat)
model_anova <- Anova(model.pat,type=2,test.statistic=c("F"))
print(model_anova)
sub_pat <- pat[pat$CUE_ORIG == "5Neig" & pat$HEMI == "L_Hemi",]
sub_pat$CHAN <- factor(sub_pat$CHAN) ; sub_pat$HEMI <- factor(sub_pat$HEMI)
tgc <- summarySE(sub_pat, measurevar="POW", groupvars=c("CUE_POSITION","FREQ"))
ggplot2::ggplot(tgc, aes(x=FREQ, y=POW, fill=CUE_POSITION)) +geom_bar(position=position_dodge(), stat="identity") +
geom_errorbar(aes(ymin=POW-se, ymax=POW+se),width=.2,position=position_dodge(.9))+
ylim(-0.2,0.2)+scale_fill_manual(values=cbPalette)+ggtitle("Stat Based Left Acx")
sub_pat <- pat[pat$CUE_ORIG == "5Neig" & pat$HEMI == "R_Hemi",]
sub_pat$CHAN <- factor(sub_pat$CHAN) ; sub_pat$HEMI <- factor(sub_pat$HEMI)
tgc <- summarySE(sub_pat, measurevar="POW", groupvars=c("CUE_POSITION","FREQ"))
ggplot2::ggplot(tgc, aes(x=FREQ, y=POW, fill=CUE_POSITION)) +geom_bar(position=position_dodge(), stat="identity") +
geom_errorbar(aes(ymin=POW-se, ymax=POW+se),width=.2,position=position_dodge(.9))+
ylim(-0.2,0.2)+scale_fill_manual(values=cbPalette)+ggtitle("Stat Based Right Acx")
sub_pat <- pat[pat$CUE_ORIG == "broad" & pat$HEMI == "L_Hemi",]
sub_pat$CHAN <- factor(sub_pat$CHAN) ; sub_pat$HEMI <- factor(sub_pat$HEMI)
tgc <- summarySE(sub_pat, measurevar="POW", groupvars=c("CUE_POSITION","FREQ"))
ggplot2::ggplot(tgc, aes(x=FREQ, y=POW, fill=CUE_POSITION)) +geom_bar(position=position_dodge(), stat="identity") +
geom_errorbar(aes(ymin=POW-se, ymax=POW+se),width=.2,position=position_dodge(.9))+
ylim(-0.2,0.2)+scale_fill_manual(values=cbPalette)+ggtitle("Broad Based Left Acx")
sub_pat <- pat[pat$CUE_ORIG == "broad" & pat$HEMI == "R_Hemi",]
sub_pat$CHAN <- factor(sub_pat$CHAN); sub_pat$HEMI <- factor(sub_pat$HEMI)
tgc <- summarySE(sub_pat, measurevar="POW", groupvars=c("CUE_POSITION","FREQ"))
ggplot2::ggplot(tgc, aes(x=FREQ, y=POW, fill=CUE_POSITION)) +geom_bar(position=position_dodge(), stat="identity") +
geom_errorbar(aes(ymin=POW-se, ymax=POW+se),width=.2,position=position_dodge(.9))+
ylim(-0.2,0.2)+scale_fill_manual(values=cbPalette)+ggtitle("Broad Based Right Acx")
# ggarrange(p1, p2 , p3 , p4 , col = 2, nrow = 2,labels = c("Stat Based Left Acx", "Stat Based Right Acx","Broad Based Left Acx", "Broad Based Right Acx"))
|
380a952150d3b6c1da749944514d063112d0b249
|
add1ed8394da1dfd4ba1c81bcb9a1ad9c8a63808
|
/workshop_data.R
|
23906a37d63a020ff4d81e9aac6a8e9e510cfb44
|
[] |
no_license
|
TomKellyGenetics/Stats_Workshop_Osumi_Group
|
98a8e69f008aa86fcc4597d6fcbd3c15c9ad33d6
|
2e85947646f957004fd7fefc1e63627a09e7c226
|
refs/heads/master
| 2021-07-05T12:24:19.185604
| 2017-09-25T00:19:27
| 2017-09-25T00:19:27
| 104,685,871
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,347
|
r
|
workshop_data.R
|
setwd("C:/Users/Tom Kelly/Documents/Downloads/20170927_Stats_Workshop_Osumi_Group")
data <- rnorm(1000000, 1, 1.25)
data1 <- rnorm(100, 1, 1.25)
data2 <- rnorm(50, 1.25, 1)
pdf("1_distribution.pdf", width=800/75, 600/75)
plot(density(data), main="distribution", ylab="frequency", xlab="value of data")
abline(v=mean(data))
abline(v=mean(data)-sd(data), col="grey")
abline(v=mean(data)-2*sd(data), col="grey")
abline(v=mean(data)+sd(data), col="grey")
abline(v=mean(data)+2*sd(data), col="grey")
dev.off()
pdf("2_boxplot.pdf", width=800/75, 600/75)
boxplot(data1, data2, col=c("red", "blue"), names=c("group A", "group B"), xlab="categorical group", ylab="continuous outcome", main="difference between groups?")
dev.off()
pdf("3_distribution_compare.pdf", width=800/75, 600/75)
plot(density(data2), main="distribution", ylab="frequency", xlab="value of data", col="blue")
lines(density(data1), main="distribution", ylab="frequency", xlab="value of data", col="red")
legend("topright", fill=c("red", "blue"), legend=c("group A", "group B"))
dev.off()
pdf("4_t_distribution.pdf", width=800/75, 600/75)
plot(density(data2), main="distribution", ylab="frequency", xlab="value of data (t)", col="blue")
lines(density(data1), main="distribution", ylab="frequency", xlab="value of data", col="red")
lines(density(data), main="distribution", ylab="frequency", xlab="value of data")
legend("topright", fill=c("red", "blue", "black"), legend=c("group A", "group B", "normal (z)"))
dev.off()
t.test(data1, data2) #p.val = 0.029, t=-2.2051
dataset <- cbind(data1, data2)
colnames(dataset) <- c("placebo", "treatment")
write.csv(dataset, file="1_exercise_t_test.csv")
data3 <- rnorm(100, 0.95, 2.25)
data4 <- rnorm(50, 1.05, 2)
pdf("5_boxplot.pdf", width=800/75, 600/75)
boxplot(data3, data4, col=c("red", "blue"), names=c("group A", "group B"), xlab="categorical group", ylab="continuous outcome", main="difference between groups?")
dev.off()
pdf("6_distribution_compare.pdf", width=800/75, 600/75)
plot(density(data4), main="distribution", ylab="frequency", xlab="value of data", col="red")
lines(density(data3), main="distribution", ylab="frequency", xlab="value of data", col="blue")
legend("topright", fill=c("red", "blue"), legend=c("group A", "group B"))
dev.off()
t.test(data3, data4) #p.val = 0.3624, t=--0.91469
dataset <- cbind(data3, data4)
colnames(dataset) <- c("placebo", "treatment")
write.csv(dataset, file="1_example_t_test.csv")
example <- rbinom(100, 4, 0.5)
anime <- factor(c("never", "monthly", "weekly", "daily")[example+1], levels=c("never", "monthly", "weekly", "daily"))
table(anime)
sex <- factor(c("Male", "Female")[c(rep(1, 50), rep(2, 50))])
table(anime, sex)
fisher.test(table(anime, sex))
chisq.test(table(anime, sex)) #chi.sq = 0.80299, p.val = 0.8488
write.csv(table(anime, sex), file="2_example_chi_sq_test.csv")
example <- rbinom(100, 2, 0.5)
smoking <- factor(c("never", "ex-smoker", "current smoker")[example+1], levels=c("never", "ex-smoker", "current smoker"))
table(smoking)
cancer_diagnosed <- factor(c("Positive", "Negative")[c(rep(1, 50), rep(2, 50))])
table(smoking, cancer_diagnosed)
fisher.test(table(smoking, cancer_diagnosed))
chisq.test(table(smoking, cancer_diagnosed)) #chi.sq = 0.80299, p.val = 0.8488
write.csv(table(smoking, cancer_diagnosed), file="2_exercise_chi_sq_test.csv")
example <- rbinom(100, 1, 0.5)
risk_allele <- factor(c("A", "T")[example+1], levels=c("A", "T"))
table(risk_allele)
batten_disease <- factor(c("Case", "Control")[c(rep(1, 50), rep(2, 50))])
table(risk_allele, batten_disease)
fisher.test(table(risk_allele, batten_disease))
chisq.test(table(risk_allele, batten_disease)) #chi.sq = 0.80299, p.val = 0.8488
write.csv(table(risk_allele, batten_disease), file="3_exercise_fishers_test.csv")
fit <- lm(data1~sex+risk_allele)
summary(fit)
anova(fit)
cholesterol <- c(rnorm(50, 110, 30), rnorm(50, 90, 35))
fit <- lm(cholesterol~sex+risk_allele)
summary(fit)
anova(fit)
fit <- lm(cholesterol~sex*risk_allele)
summary(fit)
anova(fit)
boxplot(cholesterol~sex)
t.test(cholesterol~sex)
boxplot(cholesterol~risk_allele)
t.test(cholesterol~risk_allele)
dataset <- cbind(cholesterol, sex, risk_allele)
write.csv(dataset, file="4_exercise_anova.csv")
|
7fc29666a31a21f73b6a7a41dfb644cc5e2737a2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/samon/examples/samonCombineIM.Rd.R
|
908561533799ff9302492751b4f0931a3fddd249
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 336
|
r
|
samonCombineIM.Rd.R
|
library(samon)
### Name: samonCombineIM
### Title: Combines results from separate runs of the samonIM function
### Aliases: samonCombineIM
### ** Examples
# outputs from samonIM -- run on VAS1 with different seeds
data("V1Res001")
data("V1Res002")
# combine them
V1Results <- samonCombineIM( objlist = list(V1Res001,V1Res002) )
|
b499846e8b9157ea73ed107a2f7ca08650a952c8
|
737daba62560c93d39e0ddbec75d4d1e187b3239
|
/Arvore_clasificacao_tentativa.R
|
9b0ee09262806601f0ae325979834b499243cbce
|
[] |
no_license
|
Cicconella/No-Show-MAE5904
|
85f268333ca07fa6bac3fd636c2e6e6aad213b34
|
00247e22fa3a58407da397ee0ad2277930a0f646
|
refs/heads/master
| 2020-08-14T07:53:11.436926
| 2019-11-04T02:46:05
| 2019-11-04T02:46:05
| 215,126,640
| 0
| 0
| null | 2019-10-21T21:04:19
| 2019-10-14T19:18:48
|
R
|
UTF-8
|
R
| false
| false
| 4,237
|
r
|
Arvore_clasificacao_tentativa.R
|
rm(list=ls())
library(zoo)
library(rmarkdown)
library(knitr)
library(pROC)
library(rpart)
library(rpart.plot)
options(scipen = 999)
th <- read.table("noshowappointments/KaggleV2-May-2016.csv",
header=T, sep=",")
# recode the data
df <- data.frame(
# PATIENT PROPERTIES
patid = th$PatientId,
male = ifelse(th$Gender=="M",1,0),
age = th$Age,
burs = th$Scholarship,
hiper = th$Hipertension,
diabet = th$Diabetes,
alcohol = th$Alcoholism,
handcap = th$Handcap,
rayon = th$Neighbourhood,
# APPOINTMENT PROPERTIES
appid = th$AppointmentID,
daysc = as.Date(th$ScheduledDay),
wdaysc = strptime(th$ScheduledDay,"%Y-%m-%dT%H:%M:%SZ","GMT")$wday,
hoursc = strptime(th$ScheduledDay,"%Y-%m-%dT%H:%M:%SZ","GMT")$hour,
dayap = as.Date(th$AppointmentDay),
wdayap = strptime(th$AppointmentDay,"%Y-%m-%dT%H:%M:%SZ","GMT")$wday,
sms = th$SMS_received,
noshow = ifelse(th$No.show=="Yes",1,0)
)
dim(df)
#df <- df[ (df$age >= 0) & (df$age <= 97) & (df$dayap >= df$daysc),]
df <- df[ (df$age >= 0) & (df$dayap >= df$daysc),]
df$daybw <- as.numeric(df$dayap - df$daysc)
df$adult <- ifelse(df$age>=18,1,0)
df$solbor <- ifelse(df$rayon=="SOLON BORGES",1,0)
df$sandum <- ifelse(df$rayon=="SANTOS DUMONT",1,0)
df$sancla <- ifelse(df$rayon=="SANTA CLARA",1,0)
df$sancec <- ifelse(df$rayon=="SANTA CECÍLIA",1,0)
df$itarar <- ifelse(df$rayon=="ITARARÉ",1,0)
df$lourde <- ifelse(df$rayon=="DE LOURDES",1,0)
df$cabral <- ifelse(df$rayon=="DO CABRAL",1,0)
df$quadro <- ifelse(df$rayon=="DO QUADRO",1,0)
df$horto <- ifelse(df$rayon=="HORTO",1,0)
df$penha <- ifelse(df$rayon=="JARDIM DA PENHA",1,0)
df$jesus <- ifelse(df$rayon=="JESUS DE NAZARETH",1,0)
df$cypres <- ifelse(df$rayon=="MÁRIO CYPRESTE",1,0)
df$sanmar <- ifelse(df$rayon=="SANTA MARTHA",1,0)
df <- df[order(df$patid, df$dayap),]
df$prev <- rep(0,nrow(df))
df$noprev <- rep(0,nrow(df))
for(i in 2:nrow(df)){
if(df$patid[i] == df$patid[i-1]){
df$prev[i] <- df$prev[i-1]+1
df$noprev[i] <- df$noprev[i-1] + df$noshow[i-1]
}
}
df$noprevpct <- ifelse(is.nan(df$noprev/df$prev),0,df$noprev/df$prev)
df$mode_previous <- ifelse(df$noprevpct>=0.5, 1, 0)
df <- df[order(df$dayap),]
df$prior <- c(0,cumsum(df$noshow))[-nrow(df)]/c(1:nrow(df))
plot(df$prior, type="n")
rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = "#e5e5e5")
grid(col="white", lwd=2)
df$waittime <- ifelse(df$daybw==0,0,ifelse(df$daybw<=7,1,2))
df$morning <- ifelse(df$hoursc>=6 & df$hoursc<=10, 1, 0)
df$age18 <- ifelse(df$age>=18,1,0)
df$age80 <- ifelse(df$age>=80,1,0)
df$age_decade <- floor(df$age/10)
#df <- df[order(df$dayap),]
trainsize <- round(nrow(df)*0.2)
dftrain <- df[1:trainsize,]
dftest <- df[(trainsize+1):(trainsize*2),]
table(dftrain$noshow)
dim(df)
zf <- rbind(df[df$noshow==0,][sample(nrow(df[df$noshow==0,]),nrow(df[df$noshow==1,])),], df[df$noshow==1,])
dim(zf)
table(zf$noshow)
dtree <- rpart(formula=noshow ~ age_decade +
burs + diabet + handcap + alcohol + hiper
,
data = zf,
method = "class",
maxdepth =3,
minsplit = 2,
minbucket = 1,
cp=-1
)
rpart.plot(dtree)
predicted_noshow <- ifelse(predict(dtree, newdata = df)[,2]>=0.5,1,0)
actual_noshow <- df$noshow
mytab <- table(abs(predicted_noshow - actual_noshow))
accuracy <- mytab[1]/(mytab[1] + mytab[2])
accuracy
roc(actual_noshow, predicted_noshow)
table(predicted_noshow, actual_noshow)[2,2] / (table(predicted_noshow, actual_noshow)[1,2] + table(predicted_noshow, actual_noshow)[2,2])
logit <- glm(formula =
noshow ~ age + age18 + age80 + male + wdaysc + wdayap +
burs + diabet + handcap + alcohol +
solbor + sandum + itarar + lourde + cabral + quadro + penha + jesus + sanmar +
sms + morning + waittime + mode_previous,
family = binomial(link="logit"), data = zf, )
summary(logit)
#
predicted_noshow <- ifelse(predict(logit, df,type = "response")>=0.5,1,0)
actual_noshow <- df$noshow
mytab <- table(abs(predicted_noshow - actual_noshow))
accuracy <- mytab[1]/(mytab[1] + mytab[2])
accuracy
roc(actual_noshow, predicted_noshow)
|
eb43340498f096cd8513f87203b15bf7a7e598e1
|
3208d5737d307dd4e625ea094028c57f48d13aff
|
/REE_actual_mining_choropleth_map/Leaflet_mines_top_projects_by_country.R
|
b0892ff39c8a50dd61e3ccd00fdddcc6a81e115c
|
[
"Apache-2.0"
] |
permissive
|
DistrictDataLabs/03-mineralytics
|
849b7fbbe96704c004564041edfece2a07729a1c
|
1371f1608684b985a2169d2c20ea78acc814ab5c
|
refs/heads/master
| 2021-01-13T06:38:46.863028
| 2015-11-12T03:55:02
| 2015-11-12T03:55:02
| 39,696,039
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,323
|
r
|
Leaflet_mines_top_projects_by_country.R
|
library(rgdal)
library(leaflet)
library(plyr)
library(dplyr)
## code adapted from http://adolfoalvarez.cl/code/users.html
##use the mines dataframe from script "Parsing_mining_data.R"
# We download the data for the map from naturalearthdata.com
url <- "http://www.naturalearthdata.com/http//www.naturalearthdata.com/download/50m/cultural/ne_50m_admin_0_countries.zip"
folder <- getwd() #set a folder where to download and extract the data
file <- basename(url)
# download.file(url, file)
# unzip(file, exdir = folder)
#And read it with rgdal library
#change working directory to directory that contains ne_50m_admin_0_countries
world <- readOGR(dsn = folder,
layer = "ne_50m_admin_0_countries",
#encoding = "latin1", #you may need to use a different encoding
verbose = FALSE)
#renaming column 5
colnames(mines)[5] <- "MT_metric_tons"
#selecting relevant columns
mines2<-select(mines, Project, Country, MT_metric_tons)
mines2$MT_metric_tons <-as.numeric(mines2$MT_metric_tons)
#This function will extract the top 5 groups and return them as text to be included in the map
myfun <- function(x,y){
a <- data.frame(x,y) %>%
arrange(-y) %>%
slice(1:5)
return(paste0(a$x, ": ", a$y, collapse="<br>"))
}
#Now we group by country and apply the function
mines4 <- mines2 %>%
group_by(Country) %>%
summarise(top5=myfun(Project, MT_metric_tons),
MT_metric_tons = sum(MT_metric_tons, na.rm=T)
) %>%
ungroup() %>%
mutate(Country=toupper(Country))
#We need to reconvert to data.frame to merge it with the SpatialPolygons data frame "world"
mines5 <- data.frame(mines4)
names(mines5) = c("country", "top5", "MT_metric_tons")
world <- sp::merge(world, mines5,
by.x = "iso_a3",
by.y = "country",
sort = FALSE)
#Tiles coming from stamen.com
tiles <- "http://{s}.tile.stamen.com/toner-lite/{z}/{x}/{y}.png"
attribution <- 'Map tiles by <a href="http://stamen.com">Stamen Design</a>, under <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0</a>. Map data by <a href="http://www.naturalearthdata.com/">Natural Earth</a>.'
#######################################################################################################################
|
4c7fb2bbd4b56770e57a59f2458df4b535eaf75c
|
ec5104996880067a42771cec29dbb6f7051d6ad4
|
/R/computeChemicalPower.R
|
e06e67a4925db1a01e565167ab4680931af13239
|
[] |
no_license
|
MarcoKlH/afpt-r
|
f92e23a5605168e2ed9264193c53de9d9fdf63e5
|
f5850c125491c3d54fe9a2473f9f849b56956876
|
refs/heads/master
| 2021-03-27T18:22:37.416151
| 2020-03-18T15:32:16
| 2020-03-18T15:32:16
| 64,833,484
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 669
|
r
|
computeChemicalPower.R
|
computeChemicalPower <- function (power.mech,bird,...) UseMethod('computeChemicalPower')
computeChemicalPower.power.mechanical <- function (power.mech,bird,...) {
warning('computeChemicalPower has been made redundant... power.mechanical class has also been made redundant...')
power.chem <- power.mech
power.chem$power <- mech2chem(power.mech$power,bird,...)
class(power.chem) <- c('data.frame','power.chemical')
return(power.chem)
}
computeChemicalPower.numeric <- function (power.mech,bird,...) {
warning('computeChemicalPower has been made redundant... please adjust calling function to use mech2chem() instead.')
mech2chem(power.mech,bird,...)
}
|
83e58d2dac5d34e097cf982167d23e02fd052f62
|
64ff5f128bcc6486a17c64c9757fb4fa0d9b15f7
|
/agriculture_yarsa_gaasc_2075-76/yarsa_regression.R
|
f4e2ffc46a5709d441bb8cd5135226f29c688892
|
[] |
no_license
|
DeependraD/expdean_data
|
433c329695fe7d3f8329ed7771a50b11c3ce24d2
|
ae5a8d73fa46af4f324d2b5083b059073171834f
|
refs/heads/master
| 2020-09-17T14:50:55.870433
| 2019-11-26T16:51:02
| 2019-11-26T16:51:02
| 224,097,658
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,496
|
r
|
yarsa_regression.R
|
regressors_economic <- c("family_size"
,"harvester_members_household"
,"expenses"
,"yarsha_income_2074"
,"highest_price"
,"average_price"
,"lowest_price"
,"method_curing"
,"time_curing"
,"perception"
,"low_price"
,"low_productivity"
,"weak_coordination"
,"packing"
,"suitable_month"
,"customer_preference"
,"benefits"
,"demerits"
,"geophysical"
,"price_variation")
regressors_social <- c("respondent_gender"
,"respondent_age"
,"respondent_education"
,"started_harvesting"
,"time_collection")
yarsha_income_model_eco <- lm(reformulate(response = "total_income_2074",
termlabels = regressors_economic),
data = yarsa)
# # economic model
# model anova
yarsha_income_model_eco %>%
anova() %>%
broom::tidy() %>%
write_csv("./outputs/anova_economic_model.csv", "")
# mode coefficients
yarsha_income_model_eco_sum <- summary(yarsha_income_model_eco)
yarsha_income_model_eco_sum %>%
broom::tidy() %>%
write_csv("./outputs/regression_economic_model.csv")
yarsha_income_model_socioeco <- lm(reformulate(response = "total_income_2074",
termlabels = c(regressors_economic,
regressors_social)),
data = yarsa)
# # socio-economic model
# model anova
yarsha_income_model_socioeco %>%
anova() %>%
broom::tidy() %>%
write_csv("./outputs/anova_socio_economic_model.csv", "")
# mode coefficients
yarsha_income_model_socioeco_sum <- summary(yarsha_income_model_socioeco)
yarsha_income_model_socioeco_sum %>%
broom::tidy() %>%
write_csv("./outputs/regression_socio_economic_model.csv")
yarsha_income_model_eco %>%
summary() %>%
magrittr::extract(c("adj.r.squared", "fstatistic", "p-value")) %>%
map(1) %>%
as_tibble() %>%
write_csv("./outputs/summary_stat_economic.csv", "")
yarsha_income_model_socioeco %>%
summary() %>%
magrittr::extract(c("adj.r.squared", "fstatistic")) %>%
map(1) %>%
as_tibble() %>%
write_csv("./outputs/summary_stat_socio_economic.csv", "")
|
59ebf8927348d627e5dab0bc5266499a882aaacf
|
82b58caf997774e921c42e9df9ae301a0d403c0a
|
/man/ipm_analyse_l.Rd
|
afa161d9a93145ecaa92d1a1c8ae0c4b80fea06a
|
[] |
no_license
|
zywhy9/IPM
|
a290bbda1c517e4bd2d2492832bfdbad710eab21
|
76881ed3b8091ee7217b66bb0e74fd39ee7c229f
|
refs/heads/master
| 2022-04-12T21:14:12.813942
| 2020-04-10T14:35:23
| 2020-04-10T14:35:23
| 252,565,934
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,069
|
rd
|
ipm_analyse_l.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ipm_analyse_l.R
\name{ipm_analyse_l}
\alias{ipm_analyse_l}
\title{Analyse data using composite likelihood for a simulation study}
\usage{
ipm_analyse_l(
data,
Plot = FALSE,
priors = NULL,
maxtime = 5,
unit = "mins",
save = 20000L,
chain = 3
)
}
\arguments{
\item{data}{an nlists object, simulation dataset returned by ipm_sim_l.}
\item{Plot}{a flag, indicates whether to save the traceplot and the density plot for MCMC outputs.}
\item{priors}{a string of code to set the prior.}
\item{maxtime}{a scalar, specifying the maximum time to spend on analysis.}
\item{unit}{a character string specifying the units of time for \code{max.time}. See \code{difftime}.}
\item{save}{a scalar, the number of (potentially thinned) samples to save.}
\item{chain}{a scalar, the number of MCMC chains.}
}
\value{
a mcmcrs object, the MCMC outputs, and plots (If plot=TRUE).
}
\description{
Using the true joint likelihood model to analyse the simulation by true joint likelihood model.
}
|
f23dfc7410ed065f8e904163f9d24f40dd113d41
|
7d67f9e92dbafa6e479ec12e4d78df44fba2adf8
|
/R/create_text.R
|
7091c43be3f5e84e6edb6a772e8ba92649ec4e57
|
[] |
no_license
|
ntyndall/translink.bot
|
854d2a0a709f6a74c10b4a153dd89553d082bcda
|
b543f1cf9d62f278135c66cfc2404572f9f7ae3a
|
refs/heads/master
| 2020-04-03T12:21:33.897027
| 2018-11-13T15:16:30
| 2018-11-13T15:16:30
| 155,249,704
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,008
|
r
|
create_text.R
|
#' @title Create Text
#'
#' @export
create_text <- function(allresults, dbr, startStation, stopStation) {
# Get ETA's
etas <- allresults$callingpoints %>%
lapply(
FUN = function(x) {
x %>%
`[[`("etarr") %>%
`[`(x %>% `[[`("Name") %>% as.character %>% `==`(stopStation) %>% which) %>%
as.character
}
) %>%
purrr::flatten_chr()
incoming <- allresults$myresults
incoming %<>% lapply(as.character)
# Convert various timestamps to hh:mm
etas %<>% translink.bot::conv_time()
incoming$time %<>% translink.bot::conv_time()
# Header text
headTxt <- paste0("Trains from *", startStation, "* to * ", stopStation, "* (", incoming$originname[1], "/", incoming$name[1], " line)")
# Start to accumulate information
infoTxt <- paste0("Depart : *", incoming$time, "* // Arrive : *", etas, "*")
# Chceck if any trains are delayed
delayedTr <- incoming$Status %>%
`==`("On time") %>%
`!`()
# Convert for printing
incoming$Status <- paste0(" [ ", incoming$Status, " ]")
if (delayedTr %>% any) {
todelay <- incoming$Minutes[delayedTr]
if ("" %>% `==`(todelay) %>% any) {
todelay["" %>% `==`(todelay) %>% which] <- "?"
}
incoming$Status[delayedTr] <- paste0(" [ Delayed by ", todelay, " minutes ]")
}
# Get origin destination combo
originDest <- incoming$origintiploc %>%
paste0(":", incoming$tiploc)
lineColors <- c()
for (i in 1:(infoTxt %>% length)) {
res <- "stationcolors" %>%
dbr$HMGET(field = originDest[i]) %>%
`[[`(1)
lineColors %<>% c(if (res %>% is.null) "#000000" else res)
}
# Prepare the data structure
actualTimes <- paste0(infoTxt, incoming$Status)
# Data frame for slack message
return(
data.frame(
pretext = c(headTxt, NA %>% rep(actualTimes %>% length)),
fallback = c(headTxt, paste0(" - ", actualTimes)),
text = c(NA, actualTimes),
color = c(NA, lineColors)
)
)
}
|
694a92350be6d967f4be2177afcb408a6d73076e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RcppMLPACK/vignettes/RcppMLPACK-intro.R
|
d84c71df9b69c4b57480cf743a1d32b9ae1347a1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 76
|
r
|
RcppMLPACK-intro.R
|
### R code from vignette source 'RcppMLPACK-intro.Rnw'
### Encoding: UTF-8
|
10c64d79ea801759779dcb366d53864e45fb5505
|
a8412d5d6fa04a7176e6331096b2197cb18b9ebf
|
/R/build_network_window.R
|
596501f603a6c455ca1d3579e277e1763761b878
|
[
"BSD-3-Clause"
] |
permissive
|
melinagallopin/appinetwork
|
fe66f7a752c05800a0ce3e5f334873a0a41f12d9
|
3790f8d3265d5ec5fc2ab924506c3eb49a924cbb
|
refs/heads/master
| 2021-06-04T05:28:38.648596
| 2020-11-05T17:20:29
| 2020-11-05T17:20:29
| 146,662,875
| 0
| 2
|
BSD-3-Clause
| 2020-11-05T17:20:31
| 2018-08-29T21:51:28
|
R
|
UTF-8
|
R
| false
| false
| 22,204
|
r
|
build_network_window.R
|
build_network = function(organism, db, ipl, th, method, degree, remove.sdi, remove.ul, remove.si, update, mainpath, f_pos) {
cat("\n\n>BUILD NETWORK")
# Parametres de la fonction
Os <- organism
os <- tolower(organism)
ex.type <- tolower(method)
inter.type <- tolower(degree)
r.i <- tolower(remove.sdi)
r.s.i <- tolower(remove.si)
r.u.l <- tolower(remove.ul)
IPL <- ipl
DB <- db
Th <- th
UpDate <- tolower(update)
# Lecture des fichiers input
selected.database <- load_data(db)
input.uniprotID <- read.table(file = ipl, header = FALSE, sep = '\t', stringsAsFactors = F)
Thesaurus <- read.table(file = th, header = TRUE, quote = "", sep = "\t", stringsAsFactors = F)
### Tous les parametres et fichiers fournis peuvent etre utilises ###
# Completer la liste d'ID a rechercher dans les bases : ajout des anciens ID et isoformes ID
listsupp <- c()
# Parcours de tout le fichier thesaurus pour trouver tous les elements de la liste input et ajouter tous les autres uniprot-ID associe a la meme proteine
for (i in 1:length(Thesaurus[,1])) {
if (Thesaurus[i,1] %in% input.uniprotID[,1]) {
oldNames <- Thesaurus[i,7]
oldNamesList <- unlist(strsplit(oldNames, ";"))
if (is.na(Thesaurus[i,7]) == FALSE) {
# Ajout des anciens uniprot-ID a la liste
for (j in 1:length(oldNamesList[])) {
listsupp <- rbind(listsupp, oldNamesList[j])
}
}
Nisoformes <- Thesaurus[i,8]
if (Nisoformes > 0) {
# Ajout des uniprot-ID des isoformes associes au meme gene
for (k in 1 : Nisoformes) {
newID <- paste(Thesaurus[i,1], k, sep = "-")
listsupp <- rbind(listsupp, paste(Thesaurus[i,1], k, sep = "-"))
}
}
}
}
# Apres avoir recupere tous les uniprot-ID associes a ceux de la liste input, les uniprot-ID de la liste input sont ajoutes
input <- unlist(input.uniprotID)
for (i in 1:length(input)) {
uniprotID <- input[i]
listsupp <- rbind(listsupp, uniprotID)
}
input.uniprotID <- listsupp
### La liste des uniprot-ID a rechercher dans les bases est complete ###
# Creation d'un dossier pour les fichiers resultats en sortie
organismdir <- gsub(" ", "-", Os)
organism.path <- paste(mainpath, organismdir, sep = '/')
dir.create(organism.path, showWarnings = FALSE)
network.path <- paste(organism.path, '/Network', sep = '')
dir.create(network.path, showWarnings = FALSE)
# Affichage des parametres selectionnes pour la recherche d'interactions
cat(paste('\n\n>Species selected :', os))
cat(paste('\n>Experimental type :', ex.type))
cat(paste('\n>Interaction type :', inter.type))
# Filtres des resultats sur l'organisme recherche et la methode souhaitee, dans les bases de donnees
selected.database2 <- selected.database[(length(grep(os, selected.database[,8], ignore.case=TRUE)) > 0 && length(grep(os, selected.database[,9], ignore.case = TRUE)) > 0),]
if (method == "genetic") {
selected.database3 <- selected.database2[(length(grep(ex.type, selected.database2[,10], ignore.case = TRUE)) > 0 || length(grep(ex.type, selected.database2[,10], ignore.case = TRUE)) > 0),]
}
else {
selected.database3 <- selected.database2[selected.database2[,10] == ex.type,]
}
# Verification de la presence d'interactions dans la base
if (dim(selected.database3)[1] == 0) {
cat("\n\nWARNING : No result found in database(s)\nProcesuss stop")
stop()
}
# Rassemblement de toutes les databases fournies
selected.database4 <- as.matrix(data.frame(selected.database3[,1:15], stringAsFactors = F))
### Les bases sont filtrees sur la recherche souhaitee ###
##########################
##########################
# test.selected.database4 = selected.database4[,c(4,5,1,3,2,6:12)]
# unredundant <- remove_redundants(test.selected.database4)
# Recherche des interactions directes pour les proteines de l'input list
cat('\n\n>Search for first degree interactions')
PPI.Direct <- recup_ppi(input.uniprotID, selected.database3)
PPI.Direct[!duplicated(PPI.Direct[,c(1:4,6,11:12)]),]
# Verification de la presence d'interactions
if (dim(PPI.Direct)[1] == 0) {
cat("\n\nWARNING : No interaction found\nProcesuss stop")
stop()
}
### Les interactions avec les proteines de l'input list sont extraites des bases ###
# S'il y a bien des interactions identifiees dans les bases
# Recherches du nombre d'article par interaction
# unredundant <- pubmed_id(PPI.Direct, os,1)
# PPI.Direct <- unredundant
# Si le reseau demande est de degre 2 on relance une recherche d'interactions
if (inter.type == "second-degree") {
# Recuperation des uniprot-ID de toutes les proteines du reseau de degre 1
Interactions.matrix <- PPI.Direct
# Recuperation des proteines d'interet et de leurs interactants directes
col1 <- as.matrix(Interactions.matrix[,1])
col2 <- as.matrix(Interactions.matrix[,2])
# Rassemblement des listes pour recuperer les uniprot-ID en une seule copie
listProt <- rbind(col1, col2)
listProt <- unique(listProt)
ListProt.UniprotID <- as.matrix(listProt)
# Seconde recherche d'interactions, pour le degre 2
cat('\n>Search for second degree interactions')
PPI.Direct2 <- recup_ppi(ListProt.UniprotID, selected.database3)
# Suppression des interactions de degre 2 avec les proteines qui n'ont qu'une seule interaction dans le reseau de degre 1 : si demande
if (r.i == 'yes') {
PPI.Indirect <- remove_ppi(ListProt.UniprotID, PPI.Direct2, input.uniprotID)
}
else {
PPI.Indirect <- PPI.Direct2
}
PPI.Indirect<-unique(PPI.Indirect)
PPI.Indirect[!duplicated(PPI.Indirect[,c(1:4,6,11:12)]),]
cat('\n>Search for PUBMED IDS for each interaction')
# Recherches du nombre d'article par interaction
# unredundant <- pubmed_id(PPI.Indirect, os,2)
# PPI.Indirect<-unredundant
#
# Rassemblement des resultats d'interactions de degre 1 et degre 2
network <- rbind(PPI.Direct[,c(3,5,4,1,2,6:12)], PPI.Indirect[,c(3,5,4,1,2,6:12)])
### Les interactions de degre 2 sont ajoutees au reseau ###
}
else {
# On ne garde que les resultats de la premiere recherche d'interactions si on souhaite un reseau de degre 1
network <- rbind(PPI.Direct[,c(3,5,4,1,2,6:12)])
}
# Verification de la presence de resultats
if (is.null(network) == T) {
cat("WARNING :\nThe network is null!!!\nProcessus stoped")
stop()
}
cat('\n')
# Premier trie des interactions du reseau en fonction des parametre de construction selectionnes
# Remove redundants
network<-DataBases(network)
# selected.database3 <- remove_redundants(network)
selected.database3 <- network
network <- as.matrix(selected.database3)
# Remove self-interactant
if (r.s.i == 'yes'){
cat('\n>Removing self-interactant ...\n')
network <- network[network[,4]!=network[,5],]
network <<- network
cat(' OK')
}
else {
cat('\n>Proteins which interact with itselves are kept')
}
cat('\n>Removing redundants ...\n')
network = network[,c(4,5,1,3,2,6:12)]
unredundant <- pubmed_id(network, os,2)
network <- unredundant
network = network[,c(3,5,4,1,2,6:13)]
### Le reseau est complet et un premier trie a ete effectue ###
# Correction automatique du reseau avec le thesaurus : on met l'uniprot-ID principal pour chaque proteine (a la place des anciens)
data <- as.matrix(network)
thesaurus <- read.table(file = th, header = TRUE, quote = "", sep = "\t" , stringsAsFactors = F)
# Visualisation de la progression des corrections
cat ( '\n>Autocorrection' )
cat ( '\n' )
pb1 <<- txtProgressBar(min = 0, max = dim(data)[1], style = 3)
nbpassage<<-0
nf<<-c()
apply(data,1,cherche_uniprotID,data=data,thesaurus=thesaurus)
not_founds<-nf
# Parcours de toutes les interactions du reseau
# for (i in 1:dim(data)[1]) {
# Recherche de l'uniprot ID de reference et association du bon nom de proteine et de gene
# Colonne A
# resultatA <- search_id(data[i,4], thesaurus)
# if (length(resultatA) == 3) {
# ID <- resultatA[1]
# Proteine <- resultatA[2]
# Gene <- resultatA[3]
# data[i,4] <- ID
# data[i,1] <- Proteine
# data[i,11] <- Gene
# }
# v1<-c(grep(data[i,11],data[,11]))
# v2<-c(grep(data[i,11],data[,12]))
# if((length(unique(c(data[v1,4],data[v2,5])))>1)||(length(resultatA) != 3))
# {
# On recupere les identifiants que le thesaurus ne sait pas remplacer tout seul
# not_found <- data[i,c(4,1,11)]
# not_founds <- rbind(not_founds, not_found)
# }
# Colonne B
# resultatB <- search_id(data[i,5], thesaurus)
# if (length(resultatB) == 3) {
# ID <- resultatB[1]
# Proteine <- resultatB[2]
# Gene <- resultatB[3]
#
# data[i,5] <- ID
# data[i,3] <- Proteine
# data[i,12] <- Gene
# }
# v3<-c(grep(data[i,12],data[,11]))
# v4<-c(grep(data[i,12],data[,12]))
# if((length(unique(c(data[v3,4],data[v4,5])))>1)||(length(resultatB) != 3))
# {
# On recupere les identifiants que le thesaurus ne sait pas remplacer tout seul
# not_found <- data[i,c(5,3,12)]
# not_founds <- rbind(not_founds, not_found)
# }
# setTxtProgressBar(pb1, i)
# }#end for
# Memorisation du nouveau reseau (correction avec le thesaurus)
network2 <- data
### Toutes les corrections automatiques sont faites sur le reseau ###
# On recupere les identifiants que le thesaurus n'a pas trouves
not_founds <<- unique(not_founds)
not_founds = unique(not_founds)
if (is.null(dim(not_founds)[1]) == F) {
cat(paste('\n\n>Search finished,', dim(as.matrix(not_founds))[1], 'IDs are not matched with thesaurus'))
colnames (not_founds) <- c('uID', 'proteinname', 'genename')
# Correction manuelle pour les identifiants que le thesaurus n'a pas trouves
NF <- not_founds
not.found.ID <- as.matrix(NF)
# Mise en place de la fenetre d'affichage des corrections
panelcorrection <- gwindow("Manual correction panel", parent = f_pos, visible = F, expand = T)
pc <- ggroup(container = panelcorrection, horizontal = F, use.scrollwindow = T)
pcsb <- ggroup(container = pc, horizontal = T, use.scrollwindow = F)
lg <- gvbox(container = pcsb)
pg <- gvbox(container = pcsb)
rg <- gvbox(container = pcsb)
fllg <- gformlayout(container = lg)
flpg <- gformlayout(container = pg)
flrg <- gformlayout(container = rg)
not.found.ID<-unique(not.found.ID)
if(length(not_founds) >= 50) {
print("Warning more than 50 proteins need manual correction. Correction panel skipped.")
finish(network, network2, r.s.i, r.u.l, UpDate, selected.database4, mainpath, network.path, Os, r.i, not_founds, ex.type, inter.type, os, IPL, Th, DB, organism.path)
# dispose(bpc)
}
else {
# Affichage des identifiants a modifier
for (i in 1:dim(not.found.ID)[1]) {
uniprotID <- gedit(initial.msg = 'New UniprotID', label = as.character(not.found.ID[i,1]), container = fllg)
PROTEINname <- gedit(initial.msg = 'New Protein name', label = as.character(not.found.ID[i,2]), container = flpg)
GENEname <- gedit(initial.msg = 'New Gene name', label = as.character(not.found.ID[i,3]), container = flrg)
}
visible(panelcorrection) <- T
# Informations sur la correction manuelle
Info <- '\"Correct Network\" : This button will save your manual corrections in the network and in updated databases.\n\n\"Ignore\" : This step will be ignored and the interactions with the uncorrected proteins will be conserved.'
bpc <- ggroup(container = pc); addSpring(bpc)
# 1 : Informations sur la correction manuelle
bouton1 <- gbutton("Info", handler = function(h,...) {
winfo <- gwindow("Info..", parent = f_pos)
g <- gvbox(container = winfo); g$set_borderwidth(10L)
glabel(Info, container = g)
gseparator(container = g)
bg <- ggroup(container = g); addSpring(bg)
gbutton("Return", container = bg, handler = function(...) dispose(winfo))
}, container = bpc)
# 2 : Ignorer la correction manuelle, conservation des identifiants
bouton2 <- gbutton('Ignore', handler = function(h,...) {
# Recuperation des resultats finaux et sauvegardes
finish(network, network2, r.s.i, r.u.l, UpDate, selected.database4, mainpath, network.path, Os, r.i, not_founds, ex.type, inter.type, os, IPL, Th, DB, organism.path)
dispose(bpc)
}, container = bpc)
# 3 : Correction manuelle du reseau
bouton3 <- gbutton("Correct network", handler = function(h,...) {
# Rassemblement des modifications a apporter
if(svalue(fllg)!=""){
cor.uniprotID <- cbind(names(svalue(fllg)), svalue(fllg))
}
else{
cor.uniprotID <- cbind(names(svalue(fllg)), names(svalue(fllg)))
}
if(svalue(flpg)!=""){
cor.proteinname <- cbind(names(svalue(flpg)), svalue(flpg))
}
else{
cor.proteinname <- cbind(names(svalue(flpg)), names(svalue(flpg)))
}
if(svalue(flrg)!=""){
cor.genename <- cbind(names(svalue(flrg)), svalue(flrg))
}
else{
cor.genename <- cbind(names(svalue(flrg)), names(svalue(flrg)))
}
cor.uniprotID <- data.frame(cor.uniprotID, row.names = NULL)
cor.proteinname <- data.frame(cor.proteinname, row.names = NULL)
cor.genename <- data.frame(cor.genename, row.names = NULL)
cor.manuel <- cbind(cor.uniprotID, cor.proteinname, cor.genename)
cor.manuel <- data.frame(cor.manuel, row.names = NULL)
#
#
colnames(cor.manuel) <- c('old_uid', 'corrected_uid', 'old_proteinname', 'corrected_proteinname', 'old_genename', 'corrected_genename')
# Corrections du reseau et ajout a la liste des corrections
for (j in 1:dim(network2)[1]) {
for (i in 1:dim(cor.manuel)[1]) {
# Correction proteine A
if (network2[j,4] == cor.manuel[i,1]) {
network2[j,4] <- gsub("\n", "", cor.manuel[[i,2]][1])
}
if (network2[j,1] == cor.manuel[i,4]) {
network2[j,1] <- gsub("\n", "", cor.manuel[[i,4]][1])
}
if (network2[j,11] == cor.manuel[i,6]) {
network2[j,11] <-gsub("\n", "", cor.manuel[[i,6]][1])
}
# Correction proteine B
if (network2[j,5] == cor.manuel[i,1]) {
network2[j,5] <- gsub("\n", "", cor.manuel[[i,2]][1])
}
if (network2[j,3] == cor.manuel[i,4]) {
network2[j,3] <- gsub("\n", "", cor.manuel[[i,4]][1])
}
if (network2[j,12] == cor.manuel[i,6]) {
network2[j,12] <- gsub("\n", "", cor.manuel[[i,6]][1])
}
}
}
# Recuperation des resultats finaux et sauvegardes
finish(network, network2, r.s.i, r.u.l, UpDate, selected.database4, mainpath, network.path, Os, r.i, not_founds, ex.type, inter.type, os, IPL, Th, DB, organism.path)
dispose(bpc)
}, container = bpc)
}
}
else if (is.null(not_founds)[1] == T) {
cat(paste('\n\n>Search finished, all IDs are matched with thesaurus '))
# Recuperation des resultats finaux et sauvegardes
finish(network, network2, r.s.i, r.u.l, UpDate, selected.database4, mainpath, network.path, Os, r.i, not_founds, ex.type, inter.type, os, IPL, Th, DB, organism.path)
}
else {
cat(paste("\n\n>Search finished, 1 IDs isn't matched with thesaurus "))
# Recuperation des resultats finaux et sauvegardes
finish(network, network2, r.s.i, r.u.l, UpDate, selected.database4, mainpath, network.path, Os, r.i, not_founds, ex.type, inter.type, os, IPL, Th, DB, organism.path)
}
setwd(mainpath)
visible(mainpanel) <<- T
}
build_network_window <- function(f_pos, mainpanel, mainpath) {
db <- c()
return.parameter <- c()
ipl <- c()
th <- c()
panel_para <- gwindow("Build network : ", parent = f_pos, visible = T)
pp <- gvbox(container = panel_para)
pp$set_borderwidth(10L)
flyt <- gformlayout(container = pp, expand = TRUE)
# Selection des options de construction du reseau
gcombobox(c('Caenorhabditis elegans', 'Drosophila melanogaster', 'Escherichia coli', 'Homo sapiens', 'Mus musculus', 'Rattus norvegicus', 'Saccharomyces cerevisiae','Arabidopsis thaliana', 'Other'), label = "Organism", selected = 7, container = flyt)
gradio(c("Physical", "Genetic"), selected = 1, horizontal = T, label = "Experimental method", container = flyt)
gradio(c("First-degree", "Second-degree"), selected = 1, horizontal = TRUE, label = "Interaction type", container = flyt)
gradio(c("Yes", "No"), selected = 1, horizontal = TRUE, label = "Remove second degree unique links", container = flyt)
gradio(c("Yes", "No"), selected = 2, horizontal = TRUE, label = "Remove all unique links", container = flyt)
gradio(c("Yes", "No"), selected = 1, horizontal = TRUE, label = "Remove self-interactant", container = flyt)
gradio(c("Yes", "No"), selected = 1, horizontal = TRUE, label = "Update databases if necessary", container = flyt)
# Selection des bases de donnees
chdb <- ggroup(container = pp, horizontale = T)
addSpring(chdb)
bouton1 <- gbutton("Select database", container = chdb, handler = function(...) {
db <<- gfile(text = "Select database", type = "open", multi = T, container = chdb)
if (is.null(db) == T) {
gmessage('Selected database is null', icon = 'error')
}
if (is.null(db) == F) {
bouton1$set_value(paste(length(db), 'databases selected'))
cat(paste('\n>Database selected : ', db, sep = ''))
}
})
# Selection du fichier contenant les noms des proteines d'interet
bouton2 <- gbutton("Select input list", container = chdb, handler = function(...) {
ipl <<- gfile(text = "Select input list", type = "open", multi = T,container = chdb)
if (is.null(ipl) == T) {
gmessage('Selected input list is null', icon = 'error')
}
if (is.null(ipl) == F) {
bouton2$set_value(paste(length(ipl), 'input list selected'))
cat(paste('\n>Inputlist selected : ', ipl, sep = ''))
}
})
# Selection du fichier thesaurus
bouton3 <- gbutton("Select thesaurus", container = chdb, handler = function(...) {
th <<- gfile(text = "Select a thesaurus", type = "open", multi = F, container = chdb)
if (is.null(th) == T) {
gmessage('Selected thesaurus is null', icon = 'error')
}
if (is.null(th) == F) {
bouton3$set_value(paste(length(th), 'thesaurus selected'))
cat(paste('\n>Thesaurus selected : ', th, sep = ''))
}
})
ppb <- ggroup(container = pp)
addSpring(ppb)
gbutton("Build the network", handler = function(h,...) {
return.parameter <<- svalue(flyt)
visible(panel_para) <- F
# Memorisation des parametres de recherche selectionnes
organism <- as.character(return.parameter[1])
method <- return.parameter[2]
degree <- as.character(return.parameter[3])
remove.sdi <- as.character(return.parameter[4])
remove.ul <- as.character(return.parameter[5])
remove.si <- as.character(return.parameter[6])
update <- as.character(return.parameter[7])
# Verification de la presence de tous les elements necessaires a la construction du reseau
# Lancement de la construction du reseau avec les parametres et fichiers donnes
if (is.null(db) == F && is.null(ipl) == F && is.null(th) == F) {
#############################################################################################################
############################################## build_network() ##############################################
### Construction du reseau
if (organism == "Other") {
panelorganism <- gwindow("Organism description", parent = f_pos, visible = F, expand = T)
pc <- ggroup(container = panelorganism, horizontal = F, use.scrollwindow = T)
pcsb <- ggroup(container = pc, horizontal = F, use.scrollwindow = F)
lg <- gvbox(container = pcsb)
fllg <- gformlayout(container = lg)
organismName <- gedit(initial.msg = 'Organism Name', label = "NAME", container = fllg)
visible(panelorganism) <- T
bpc <- ggroup(container = pc); addSpring(bpc)
# 1 : Autre organism
bouton1 <- gbutton("OK", handler = function(h,...) {
# Rassemblement des modifications a apporter
org.name <- cbind(names(svalue(fllg)), svalue(fllg))
org.name <- data.frame(org.name, row.names = NULL)
org <- cbind(org.name)
colnames(org) <- c('Organism_name')
organism <- as.character(org[1,2])
build_network(organism, db, ipl, th, method, degree, remove.sdi, remove.ul, remove.si, update, mainpath, f_pos)
dispose(bpc)
}, container = bpc)
}
else {
build_network(organism, db, ipl, th, method, degree, remove.sdi, remove.ul, remove.si, update, mainpath, f_pos)
}
dispose(panel_para)
dispose(ppb)
}
# Affichage d'un message d'erreur s'il manque un fichier
else if (is.null(db) == T) {
gmessage('Database selected is null', icon = 'error')
dispose(ppb)
visible(panel_para) <- F
visible(mainpanel) <<- T
}
else if (is.null(ipl) == T) {
gmessage('Input list selected is null', icon = 'error')
dispose(ppb)
visible(panel_para) <- F
visible(mainpanel) <<- T
}
else if (is.null(th) == T) {
gmessage('Thesaurus selected is null', icon = 'error')
dispose(ppb)
visible(panel_para) <- F
visible(mainpanel) <<- T
}
else {
gmessage('Error : Unable to start search', icon = 'error')
dispose(ppb)
visible(panel_para) <- F
visible(mainpanel) <<- T
}
}, container = ppb)
gbutton("Return", handler = function(h,...) {
dispose(panel_para)
visible(mainpanel) <<- T
}, container = ppb)
visible(panel_para) <- T
}
|
760a1a4de1bff6e766342b2345a1ea5111a64327
|
27e801915229fd513cbd12392ad4644138a30f87
|
/man/ninvwish.Rd
|
06c50767d4b64ea0e664cfa78960dcd36434dc6a
|
[] |
no_license
|
cran/norm
|
66426c95e0916c379b838f933421e922afe31473
|
4c778c26e12db884174ec427f9ca544a28a6adda
|
refs/heads/master
| 2023-06-26T05:11:59.632171
| 2023-06-18T22:20:02
| 2023-06-18T22:20:02
| 17,697,940
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,004
|
rd
|
ninvwish.Rd
|
\name{ninvwish}
\alias{ninvwish}
\title{
Random normal-inverted Wishart variate
}
\description{
Simulates a value from a normal-inverted Wishart distribution. This
function may be useful for obtaining starting values of the
parameters of a multivariate normal distribution for multiple
chains of data augmentation.
}
\usage{
ninvwish(s, params)
}
\arguments{
\item{s}{
summary list of an incomplete normal data matrix produced by the
function \code{prelim.norm}.
}
\item{params}{
list of parameters of a normal-inverted Wishart distribution. In
order, the elements of the list are: tau (a scalar), m (a scalar), mu0
(a vector of length ncol(x)), and lambdainv (a matrix of dimension
c(ncol(x),ncol(x))). When using this function to create starting
values for data augmentation, mu0 and lambdainv should be chosen in
relation to the data matrix after the columns have been centered and
scaled to have mean zero and variance one.
}}
\value{
a vector in packed storage representing the simulated normal-inverted
Wishart variate. This vector has the same form as parameter vectors
produced by functions such as \code{em.norm} and \code{da.norm}, and may be
used directly as a starting value for these functions. This vector can
also be put into a more understandable format by \code{getparam.norm}.
}
\section{WARNING}{
Before this function may be used, the random number generator seed
must be initialized with \code{rngseed} at least once in the current S
session.
}
\references{
See Section 5.4.2 of Schafer (1996).
}
\seealso{
\code{\link{rngseed}}, \code{\link{getparam.norm}}, \code{\link{em.norm}} and \code{\link{da.norm}}.
}
\examples{
data(mdata)
s <- prelim.norm(mdata) #do preliminary manipulations
params <- list(1,.5,rep(0,ncol(mdata)), .5*diag(rep(1,ncol(mdata)))) # gives widely dispersed values
rngseed(1234567)
start <- ninvwish(s,params) # draw a variate
thetahat <- em.norm(s,start=start) # run EM from this starting value
}
\keyword{multivariate}
% Converted by Sd2Rd version 0.3-3.
|
d00e7f8b3dfcd9c395e3da20fe5735e4ef38a3b1
|
3eaebe3906513c683843732bc72b9abeb66065b0
|
/man/ctrmean.Rd
|
426805a9f3e52033b4dd22134a7ed658ddd968cb
|
[] |
no_license
|
cran/depth
|
2049230a64a976ab80ed08c4393646302ba3112a
|
a4a789404eda3cf1c73b0e7124fc0a11c7b5a8e0
|
refs/heads/master
| 2021-01-10T21:36:30.107439
| 2019-11-21T10:22:54
| 2019-11-21T10:22:54
| 17,695,467
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,637
|
rd
|
ctrmean.Rd
|
\name{ctrmean}
\alias{ctrmean}
\title{Centroid trimmed mean}
\description{
Computes the centroid of a Tukey depth-based trimmed region.
}
\usage{
ctrmean(x ,alpha, eps = 1e-8, mustdith = FALSE, maxdith = 50,
dithfactor = 10 ,factor = .8)
}
\arguments{
\item{x}{Bivariate data as a matrix, data frame or list. If it is a matrix or data frame, then each row is viewed as one bivariate observation. If it is a list, both components must be numerical vectors of equal length (coordinates of observations).}
\item{alpha}{Outer trimming fraction (0 to 0.5). Observations whose depth is less than \code{alpha} to be trimmed.}
\item{eps}{Error tolerance to control the calculation.}
\item{mustdith}{Logical. Should dithering be applied? Used when data set is not in general position or a numerical problem is encountered.}
\item{maxdith}{Positive integer. Maximum number of dithering steps.}
\item{dithfactor}{Scaling factor used for horizontal and vertical dithering.}
\item{factor}{Proportion (0 to 1) of outermost contours computed according to a version of the algorithm ISODEPTH of Rousseeuw and Ruts (1998); remaining contours are derived from an algorithm in Rousseeuw \emph{et al.} (1999). }
}
\details{Dimension 2 only. Centroid trimmed mean is defined to be the centroid
of a Tukey depth-based trimmed region relative to the uniform measure. Contours
are derived from algorithm ISODEPTH by Ruts and Rousseeuw (1996) or, more
exactly, revised versions of this algorithm which appear in Rousseeuw and Ruts
(1998) and Rousseeuw \emph{et al.} (1999). Argument \code{factor} determines
which version to use. If \eqn{n} is the number of observations, contours of
depth \eqn{\le } \code{factor} \eqn{n/2} are obtained from the 1998 version, while
the remaining contours are derived from the 1999 version.
When the data set is not in general position, dithering can be used in the sense that random noise is added to each component of each observation. Random noise takes the form \code{eps} times \code{dithfactor} times U for the horizontal component and \code{eps} times \code{dithfactor} times V for the vertical component, where U, V are independent uniform on [-.5, 5.]. This is done in a number of consecutive steps applying independent U's and V's.}
\value{Centroid trimmed mean vector
}
\references{Masse, J.C. (2008), Multivariate Trimmed means based on the Tukey depth, \emph{J. Statist. Plann. Inference}, in press.
Ruts, I. and Rousseeuw, P.J. (1996), Computing depth contours of bivariate point clouds, \emph{Comput. Statist. Data Anal.}, \bold{23}. 153--168.
Rousseeuw, P.J. and Ruts, I. (1998), Constructing the bivariate
Tukey median, \emph{Stat. Sinica}, \bold{8}, 828--839.
Rousseeuw, P.J., Ruts, I., and Tukey, J.W. (1999), The Bagplot: A Bivariate Boxplot, \emph{The Am. Stat.}, \bold{53}, 382--387.}
\author{Jean-Claude Masse and Jean-Francois Plante, based on Fortran code by Ruts and Rousseeuw from University of Antwerp.}
\seealso{\code{\link{med}} for multivariate medians and \code{\link{trmean}} for classical-like depth-based trimmed means.}
\examples{
## exact centroid trimmed mean
set.seed(345)
xx <- matrix(rnorm(1000), nc = 2)
ctrmean(xx, .2)
## second example of an exact centroid trimmed mean
set.seed(159); library(MASS)
mu1 <- c(0,0); mu2 <- c(6,0); sigma <- matrix(c(1,0,0,1), nc = 2)
mixbivnorm <- rbind(mvrnorm(80, mu1 ,sigma), mvrnorm(20, mu2, sigma))
ctrmean(mixbivnorm, 0.3)
## dithering used for data set not in general position
data(starsCYG, package = "robustbase")
ctrmean(starsCYG, .1, mustdith = TRUE)
}
\keyword{multivariate}
\keyword{nonparametric}
\keyword{robust}
|
15fee7d8421c8dc208fd759f266e62d05f5e5bff
|
527fab53ceb3bb687e0628a56344a9e269dd2b8e
|
/pso.R
|
707737f5b84a5005f95975c3cfed6f6ab7710589
|
[] |
no_license
|
sylwesterf/scripts
|
c09aec1bd500be0f8e856b845c0f7523040825a9
|
9d1fd41e79e827c962cb2b5c79ba71652ae2c311
|
refs/heads/master
| 2021-05-03T12:37:08.108358
| 2019-02-10T18:07:51
| 2019-02-10T18:07:51
| 72,124,524
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,623
|
r
|
pso.R
|
particleSwarm = function(f, popSize=10, d=2, l.bound=0, u.bound=1, w, c1, c2, maxIter=100, criterion=FALSE)
{
# psoAlgorithm
# INPUT
# - f objective function
# - popSize number of particles
# - d number of variables
# - l.bound lower boundary (for initial particles)
# - u.bound upper boundary (sup.)
# - w inertia weight (vector of length 2 for dynamic inertia, i.e. decreasing over time)
# - c1 learning factor (individual experience)
# - c2 learning factor (social communication)
# - maxIter number of generations
# - criterion MaxDistQuick stopping criterion (with threshold)
#store results
result <- list(x.opt = numeric(d), f.opt = numeric(1),
x.hist = c(), f.hist = numeric(maxIter),
iter.time = c(), epoch = c(),
particles = matrix(numeric(), nrow=popSize, ncol=d))
#pso start time
starttime <- Sys.time()
#initialize particles
#particle.matrix <- t(matrix((u.bound - l.bound) * runif(popSize*d) + l.bound, nrow=d, ncol=popSize))
particle.matrix <- t(matrix(runif(popSize*d, l.bound, u.bound), nrow=d, ncol=popSize))
#initial pbest
#f.pbest <- apply(particle.matrix, 1, f)
pbest <- particle.matrix
f.pbest <- apply(pbest, 1, f)
#initial gbest
gbest <- pbest[which.min(f.pbest), 1:d]
f.gbest <- min(f.pbest)
#f.gbest <- f(gbest)
result$x.hist <- gbest
result$f.hist <- f.gbest
#initial velocity
velocity <- matrix(runif(popSize*d), nrow=popSize, ncol=d)
#update velocity
velocity <- max(w) * velocity +
c1 * runif(1) * (pbest - particle.matrix) +
c2 * runif(1) * (t(matrix(rep(gbest, popSize), d, popSize)) - particle.matrix)
#new coordinates
particle.matrix <- particle.matrix + velocity
#first iteration end
result$iter.time <- Sys.time() - starttime
#iterate over particles
for (i in 2:maxIter){
#MaxDistQuick stopping criterion
if (criterion[1] == TRUE){
if (i/maxIter > 0.2){
best.20 <- unname(tail(result$x.hist, floor(i*0.2)))
max.euclidean.dist <- max(sqrt(rowSums(best.20 - t(matrix(gbest, d, floor(i*0.2))))^2))
if (max.euclidean.dist < criterion[2]){
result <- c(result, setNames((i-1), "no.ite"))
break
}
}
}
#epoch
start.iter <- Sys.time()
#calculate the objective function for new coordinates
f.particle.matrix <- apply(particle.matrix, 1, f)
#update pbest
pbest <- ifelse(matrix(rep(f.particle.matrix, d), nrow=popSize, ncol=d) <
matrix(rep(f.pbest, d), nrow=popSize, ncol=d), particle.matrix, pbest)
#update f.pbest
f.pbest <- apply(pbest, 1, f)
#update gbest, f.gbest
gbest <- pbest[which.min(f.pbest), 1:d]
f.gbest <- f(gbest)
#append results
result$x.hist <- rbind(result$x.hist, gbest)
result$f.hist <- append(result$f.hist, f.gbest)
#update velocity
velocity <- (max(w) - (max(w) - min(w)) * i/maxIter) * velocity +
c1 * runif(1) * (pbest - particle.matrix) +
c2 * runif(1) * (t(matrix(rep(gbest, popSize), d, popSize)) - particle.matrix)
#new coordinates
particle.matrix <- particle.matrix + velocity
#iteration time
result$iter.time <- append(result$iter.time, Sys.time() - start.iter)
}
#solution
result$x.opt <- gbest
result$f.opt <- f.gbest
#pso finish time
result$epoch <- Sys.time() - starttime
#end state particles' coordinates
result$particles <- particle.matrix
return(result)
}
|
c8e86e9d36c640beb2ea24362d3e84d797a535ca
|
4f3ba3b0cef1d704fb806deab97feec6fe794524
|
/tabs/server/project_specific/data_form/plant_tissue_culture_module.R
|
f96efdbe0b727b196f15e5c937b759a4f12e28bd
|
[] |
no_license
|
mkaranja/Banana-Tracker
|
fe72155f8593813c4a7c608898ae60ceadeb2d7a
|
3fbb615ac61ba48fa37f72c788217ae1d9e95e3c
|
refs/heads/master
| 2021-02-17T23:21:47.655521
| 2020-09-29T13:40:37
| 2020-09-29T13:40:37
| 245,135,422
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,922
|
r
|
plant_tissue_culture_module.R
|
tab_files <- list.files(path = "tabs/server/project_specific/data_form/plant_tissue_culture_module", full.names = T, recursive = T)
suppressMessages(lapply(tab_files, source))
observeEvent(input$plant_tissue_culture_module, {
showModal(modalDialog(tags$h2(style="color:#800000;text-align:center;",paste("Plant Tissue Culture Module - ",input$project_selected)),
tabsetPanel(type = "pills",id = "PTC",
search_PTC,
updating_last_subculture#,
#new_PTC
#PTC_deployment,
#TMPL_UMPL
),
easyClose = F, size = "l"
))
})
## SEARCH PLANT TISSUE CULTURE -----------------------------------------------------------
observeEvent(input$plant_tissue_culture_module, {
updateSelectInput(session, "searchPTC_ID", "Identity", choices = c('', unique(PTC()$PTCIdentity)))
updateSelectInput(session, "searchPTC_VectorID", "Vector ID", choices = c('', unique(PTC()$VectorID1)))
updateDateRangeInput(session, "searchPTC_DateOfStarterCulture", "Date of Starter Culture",
min=min(PTC()$DateOfStarterCulture), max=max(PTC()$DateOfStarterCulture),
start=min(PTC()$DateOfStarterCulture), end=max(PTC()$DateOfStarterCulture))
})
# Clear
observeEvent(input$searchPTC_ClearForm,{
confirmSweetAlert(
session = session,
inputId = "searchPTC_ClearForm_Confirm",
type = "",
title = "",
text = "Do you really want to clear the fields?",
btn_labels = c("Cancel", "Yes, Clear!"),
btn_colors = c("#D3D3D3", "#DD6B55")
)
})
observeEvent(input$searchPTC_ClearForm_Confirm, {
if(input$searchPTC_ClearForm_Confirm == TRUE){
reset("searchPTC_form")
shinyjs::hide("searchPTC_Table")
shinyjs::hide("searchPTCDeleted_Table")
shinyjs::hide("searchPTC_Culture_Form")
}
}, ignoreInit = TRUE)
# Search
observeEvent(input$searchPTC_LoadData,{
dt <- vector_inventory() %>%
dplyr::filter(trimws(VectorID) == trimws(input$searchPTC_VectorID))
updateTextInput(session, "searchPTC_PlantSelection", "Plant Selection", value = as.character(dt$PlantSelection[1])) # update plantSelection
})
# Search Table
output$searchPTC_Table_Output <- renderUI({
if(input$search_deletedPTC==TRUE){
rHandsontableOutput("searchPTCDeleted_Table")
}else {
rHandsontableOutput("searchPTC_Table")
}
})
# PTC table
searchPTC_input <- reactive({
dt <- PTC()
df <- dt %>%
dplyr::filter(between(lubridate::ymd(DateOfStarterCulture), input$searchPTC_DateOfStarterCulture[1], input$searchPTC_DateOfStarterCulture[2]))
if(input$searchPTC_ID !=""){
df <- df[trimws(df$PTCIdentity)==trimws(input$searchPTC_ID),]
}
if(input$searchPTC_VectorID !=""){
df <- df[trimws(df$VectorID1)==trimws(input$searchPTC_VectorID),]
}
df
})
observeEvent(input$searchPTC_Search,{
if(input$search_deletedPTC==FALSE){
output$searchPTC_Table <- renderRHandsontable({
rhandsontable(searchPTC_input(), selectCallback = T, readOnly = T, rowHeaders=F) %>%
hot_table(stretchH = "all")
})
}
})
# deleted PTC table
searchPTCDeleted_input <- reactive({
dt <- deletedPTC()
df <- dt %>%
dplyr::filter(between(lubridate::ymd(DateOfStarterCulture), input$searchPTC_DateOfStarterCulture[1], input$searchPTC_DateOfStarterCulture[2]))
if(input$searchPTC_ID !=""){
df <- dt[trimws(dt$PTCIdentity)==trimws(input$searchPTC_ID),]
}
if(input$searchPTC_VectorID !=""){
df <- dt[trimws(dt$VectorID1)==trimws(input$searchPTC_VectorID),]
}
})
observeEvent(input$searchPTC_Search,{
if(input$search_deletedPTC==TRUE){
if(!is.null(searchPTCDeleted_input())){
output$searchPTCDeleted_Table <- renderRHandsontable({
rhandsontable(searchPTCDeleted_input(), selectCallback = T, readOnly = T, rowHeaders=F) %>%
hot_table(stretchH = "all")
})
}else{
showNotification("Not data to display.", type = "warning")
}
}
})
output$test10 <- renderPrint({
length(input$searchPTC_Table_select$select)
})
# culture
observeEvent(input$searchPTC_Table_select$select,{
r <- isolate(input$searchPTC_Table_select$select$r)
c <- searchPTC_input()[r,]
output$searchPTC_Culture_Table <- renderRHandsontable({
rhandsontable(c, selectCallback = T, readOnly = T, rowHeaders=F) %>%
hot_table(stretchH = "all")
})
})
observeEvent(input$searchPTC_Culture,{
r <- isolate(input$searchPTC_Table_select$select$r)
c <- searchPTC_input()[r,]
id <- c$PTCIdentity
media <- tbl(pool, "tblMedia") %>% collect()
culturedby <- tbl(pool, "tblCulturedBy") %>% collect()
if(length(r)>0){
output$searchPTC_Culture_Output <- renderUI({
div(id="searchPTC_Culture_Form",
column(7,
panel_div(class_type = "default",
content = tags$div(
column(6,
disabled(textInput("searchPTC_SelectedIdentity","Identity", value = id, width = "100%")),
numericInput("searchPTC_NumberOfCultures",labelMandatory("Number of Cultures"), min = 0, value = NULL, width = "100%"),
dateInput("searchPTC_DateOfCulture",labelMandatory("Date of Cultures"), width = "100%"),
selectInput("searchPTC_CulturedBy",labelMandatory("Cultured By"), choices = c('', loadData("tblCulturedBy")$CulturedBy), width = "100%"),
selectInput("searchPTC_MediaForCultures",labelMandatory("Media"), choices = c('', loadData("tblMedia")$Media), width = "100%"),
numericInput("searchPTC_LabBookNumberForCultures",labelMandatory("Lab Book Number"), min = 0, value = NULL, width = "100%"),
numericInput("searchPTC_PageNumberForCultures",labelMandatory("Page Number"), min = 0, value = NULL, width = "100%"),
textInput("searchPTC_Comments", "Comments", width = "100%")
),
column(6, br(), br(),br(),br(), br(),br(),
panel_div(class_type = "default",
content = tags$div(
tags$b("Additives"),
awesomeCheckboxGroup(inputId = "searchPTC_AdditivesForCultures", label = "", choices = c(loadData("tblAdditives")$Additives), selected = NULL, status = "info")
)), br(), br(),br(),br(), br(),br(), br(),br(),br(), br(),
actionBttn("searchPTC_Culture_Save","Save Culture", style = "fill", size = "xs", color = "primary")
)
)
)
))
})
} else {
shinyalert::shinyalert("", "Select at least one Identity in the table", type = "warning")
}
})
searchPTC_Culture_MandatoryFields <-
c("searchPTC_SelectedIdentity","searchPTC_NumberOfCultures","searchPTC_CulturedBy",
"searchPTC_MediaForCultures", "searchPTC_LabBookNumberForCultures", "searchPTC_PageNumberForCultures")
observe({
# check if all mandatory fields have a value
mandatoryFilled <-
vapply(searchPTC_Culture_MandatoryFields,
function(x) {
!is.null(input[[x]]) && input[[x]] != ""
},
logical(1))
mandatoryFilled <- all(mandatoryFilled)
# enable/disable the submit button
shinyjs::toggleState(id = "searchPTC_Culture_Save", "Save Culture", condition = mandatoryFilled)
})
# Save
observeEvent(input$searchPTC_Culture_Save,{
tb <- paste0(input$project_selected, "_tblCulturesPlantTissueCulture")
dt <- data.frame(
PTCIdentity = input$searchPTC_SelectedIdentity,
NumberOfCultures = input$searchPTC_NumberOfCultures,
DateOfCulture = input$searchPTC_DateOfCulture,
CulturedBy = input$searchPTC_CulturedBy,
Comments = ifelse(nchar(input$searchPTC_Comments)>0,input$searchPTC_Comments,''),
MediaForCultures = input$searchPTC_MediaForCultures,
AdditivesForCultures = ifelse(!is.null(input$searchPTC_AdditivesForCultures),input$searchPTC_AdditivesForCultures,''),
LabBookNumberForCultures = input$searchPTC_LabBookNumberForCultures,
PageNumberForCultures = input$searchPTC_PageNumberForCultures
)
try(expr = dbWriteTable(conn = pool, name = tb, value = dt, overwrite = F, append = T))
#saveData(dt, tb)
shinyalert("Success!", "Record Saved", type = "success")
shinyjs::reset("searchPTC_form")
shinyjs::hide("searchPTC_Culture_Form")
shinyjs::hide("searchPTC_Table")
shinyjs::hide("searchPTCDeleted_Table")
shinyjs::hide("searchPTC_Culture_Table")
})
# delete selected
observeEvent(input$searchPTC_DeleteSelected,{
confirmSweetAlert(
session = session,
inputId = "searchPTC_DeleteSelected_Confirm",
type = "warning",
title = "",
text = "Do you really want to DELETE the record?",
btn_labels = c("Cancel", "Yes, Delete!"),
btn_colors = c("#DD6B55", "#04B404")
)
})
observeEvent(input$searchPTC_DeleteSelected_Confirm,{
if(input$searchPTC_DeleteSelected_Confirm==TRUE){
r <- input$searchPTC_Table_select$select$r
c <- searchPTC_input()[r,]
id <- c$PTCIdentity
if(input$userName %in% isAdmin$UserName){
if(length(c>0)){
tb <- paste0(input$project_selected,"_tblPlantTissueCulture")
tb2 <- paste0(input$project_selected, "_tblDeletedPlantTissueCulture")
sql <- paste("DELETE FROM",tb,"WHERE PTCIdentity = ?id;")
query <- sqlInterpolate(pool, sql, id = id)
dbExecute(pool, query)# delete
dbWriteTable(conn = pool, name = "tblDeletedPlantTissueCulture", value = c, overwrite = F, append = T)# save deleted record
reset("searchPTC_form")
shinyjs::hide("searchPTC_Table")
shinyjs::hide("searchPTCDeleted_Table")
shinyjs::hide("searchPTC_Culture_Form")
}
}else {
shinyalert("Oops!", "You don't have permissions to delete this record.", type = "error")
}
}
})
## UPDATING LAST SUBCULTURE
observeEvent(input$plant_tissue_culture_module,{
updateSelectInput(session, "updating_last_subculture_PTCIdentity", "Plant Tissue Culture Identity", choices = c("",culturesPTC()$PTCIdentity))
})
observeEvent(input$updating_last_subculture_PTCLoadData,{
dt <- culturesPTC() %>%
dplyr::filter(trimws(PTCIdentity) == trimws(input$updating_last_subculture_PTCIdentity))
updateNumericInput(session, "updating_last_subculture_PTCNumberOfCultures", "Number of Cultures", value = dt$NumberOfCultures[1])
updateDateInput(session, "updating_last_subculture_PTCDateOfCulture", "Date of Culture", value = dt$DateOfCulture[1])
updateSelectInput(session, 'updating_last_subculture_PTCCulturedBy', "Cultured By", choices = c(dt$CulturedBy), selected = dt$CulturedBy[1])
updateSelectInput(session, "updating_last_subculture_PTCMedia", "Media", choices = c(dt$Media), selected = dt$Media[1])
updateTextInput(session, "updating_last_subculture_PTCAdditives", "Additives", value = dt$Additives[1])
updateNumericInput(session, "updating_last_subculture_PTCLabBookNumber", "Lab Book Number", value = dt$LabBookNumber[1])
updateNumericInput(session, "updating_last_subculture_PTCPageNumber", "Page Number", value = dt$PageNumber[1])
updateTextAreaInput(session, "updating_last_subculture_PTCComments", "Comments", value = dt$Comments[1])
})
# Update
observeEvent(input$updating_last_subculture_PTCUpdate,{
id <- trimws(input$updating_last_subculture_PTCIdentity)
NumberOfCultures = input$updating_last_subculture_PTCNumberOfCultures
Comments = input$updating_last_subculture_PTCComments
tb <- paste0(input$project_selected, "_tblCulturesPlantTissueCulture")
sql <- paste("UPDATE ", tb, "SET NumberOfCultures = ?val1, Comments = ?val2 WHERE PTCIdentity = ?id1;")
query <- sqlInterpolate(pool, sql, val1 = NumberOfCultures, val2 = Comments, id1 = id)
dbExecute(pool, query)
shinyalert("Success!", "Record updated", type = "success")
reset("updating_last_subculture_PTCForm")
})
# Clear
observeEvent(input$updating_last_subculture_PTCClear,{
confirmSweetAlert(
session = session,
inputId = "updating_last_subculture_PTCClear_Confirm",
type = "",
title = "",
text = "Do you really want to clear the fields?",
btn_labels = c("Cancel", "Yes, Clear!"),
btn_colors = c("#D3D3D3", "#DD6B55")
)
})
observeEvent(input$updating_last_subculture_PTCClear_Confirm, {
if(input$updating_last_subculture_PTCClear_Confirm == TRUE){
reset("updating_last_subculture_PTCForm")
}
}, ignoreInit = TRUE)
## NEW PLANT TISSUE CULTURE
observeEvent(input$plant_tissue_culture_module, {
updateSelectInput(session, "new_PTC_TPCID", "TPC ID", choices = c('', unique(PTC()$TPCIdentity)))
updateSelectInput(session, "new_PTC_UPCID", "UPC ID", choices = c('', unique(PTC()$UPCIdentity)))
updateSelectInput(session, "new_PTC_ExplantID", "Explant ID", choices = c('', unique(PTC()$ExplantID)))
})
new_PTC_Input <- reactive({
dt <- PTC()
if(input$new_PTC_TPCID != ""){
dt <- dt %>%
dplyr::filter(TPCIdentity == input$new_PTC_TPCID)
}
if(input$new_PTC_UPCID != ""){
dt <- dt %>%
dplyr::filter(TPCIdentity == input$new_PTC_UPCID)
}
if(input$new_PTC_ExplantID != ""){
dt <- dt %>%
dplyr::filter(TPCIdentity == input$new_PTC_ExplantID)
}
dt
})
## update fields on data loading
observeEvent(input$new_PTC_ID_LoadData,{
#updateTextInput(session, "new_PTC_VectorID", "VectorID", value = new_PTC_Input()$VectorID1)
updateTextInput(session, "new_PTC_VectorID1_PromoterGene","Vector ID 1 Promoter-Gene", value = new_PTC_Input()$VectorID1)
updateTextInput(session, "new_PTC_VectorID2_PromoterGene","Vector ID 2 Promoter-Gene", value = new_PTC_Input()$VectorID2)
#updateTextInput(session, "new_PTC_VirusIndexed", "Virus Indexed")
updateTextInput(session, "new_PTC_PTCIdentity","PTC Identity", value = new_PTC_Input()$PTCIdentity)
updateTextInput(session, "new_PTC_IdentityType","Identity Type", value = new_PTC_Input()$IdentityType)
updateSelectInput(session, "new_PTC_Media", "Media", choices = c(new_PTC_Input()$Media))
updatePrettyCheckboxGroup(session, "new_PTC_Additives", "Additives", choices = new_PTC_Input()$Additives, animation = "jelly")
})
# update fields on New input
observeEvent(input$new_PTC_source,{
tb <- input$project_selected
cultivar <- tbl(pool,"tblCultivar") %>% collect()
source <- tbl(pool,"tblSource") %>% collect()
# virusIndexedBy <- tbl(pool, paste0(tb, "_tblVirusIndexedBy")) %>% collect()
permitType <- tbl(pool, "tblPermitType") %>% collect()
if(input$new_PTC_source == "New"){
updateSelectInput(session, "new_PTC_Cultivar", "Cultivar", choices = c('', cultivar$Cultivar))
updateSelectInput(session, "new_PTC_Source", "Source", choices = c('', source$Source))
updateSelectInput(session, "new_PTC_VirusIndexedBy", "Virus Indexed By", choices = input$username)
updateSelectInput(session, "new_PTC_PermitType1A", "", choices = permitType$PermitType)
updateSelectInput(session, "new_PTC_PermitType1B", "", choices = permitType$PermitType)
updateSelectInput(session, "new_PTC_PermitType2A", "", choices = permitType$PermitType)
updateSelectInput(session, "new_PTC_PermitType2B", "", choices = permitType$PermitType)
updateSelectInput(session, "new_PTC_PermitType3A", "", choices = permitType$PermitType)
updateSelectInput(session, "new_PTC_PermitType3B", "", choices = permitType$PermitType)
}
})
## Load Data on PTC Identity Input
observeEvent(input$new_PTC_LoadData,{
})
## Update PTC Identity
observeEvent(input$new_PTC_Update,{
})
## Save record
observeEvent(input$new_PTC_SaveStarterCultureAndSubCulture,{
})
## Save Culture
#observeEvent(input$,{
# tb <- IBBTV_tblCulturesPlantTissueCulture
#})
## Clear
observeEvent(input$new_PTC_ClearTheForm,{
reset("new_PTC_form")
})
# DEPLOYMENT
observeEvent(input$plant_tissue_culture_module, {
updateSelectInput(session, "PTC_deployment_TPLIdentity","TPL Identity", choices = c('', unique(PTC()$TPCIdentity)))
updateSelectInput(session, "new_PTC_UPCID", "UPC ID", choices = c('', unique(PTC()$UPCIdentity)))
updateSelectInput(session, "new_PTC_ExplantID", "Explant ID", choices = c('', unique(PTC()$ExplantID)))
})
observeEvent(input$PTC_deployment_TPLGetVectorData,{
})
|
0492d7e465856f2a8c7dbf4f1e6c0772b48e1804
|
206ce8f05936e26267af547c39978ecc19bef0f8
|
/Code/S4_analysis_code.R
|
efb2ce092e5e7f8bcda3d434f1cc578bdac9edcc
|
[] |
no_license
|
philipclare/missing_data_simulation
|
28df0089cb388ceb767ff1b04309762298fa34c4
|
54bba7aae4702f1d1ee1c7e1e573211b50ad948f
|
refs/heads/master
| 2020-04-29T03:18:55.805749
| 2019-07-22T03:21:08
| 2019-07-22T03:21:08
| 175,804,444
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64,883
|
r
|
S4_analysis_code.R
|
args <- commandArgs(trailingOnly = TRUE)
# args <- c(1,10,"C:/Users/z3312911/Cloudstor/PhD/Katana/missingsim/")
start <- as.numeric(args[1])
stop <- as.numeric(args[2])
n <- stop-start+1
nimp <- 40
packages <- paste0("/home/z3312911/RPackages/")
.libPaths(packages)
load(paste0(args[3],"seeds.RData"))
set.seed(eval(seeds[as.numeric(stop/n)]))
filepath1 <- paste0(args[3],"Data/S1/")
filepath2 <- paste0(args[3],"Data/S2/")
filepath3 <- paste0(args[3],"Data/S3/")
## Check required packages are installed, and if not, install them
libs <- c("nnls","SuperLearner","Matrix","foreach","glmnet","ranger","lme4","geepack","parallel",
"doParallel","mvtnorm","survival","TH.data","MASS","splines","boot","haven","ggplot2",
"multcomp","doBy","gam","future","stats","data.table","optimx","mitml","Amelia","mice","norm")
missing <- !libs %in% installed.packages()
if (any(missing)) {
install.packages(libs[missing],repos="https://cloud.r-project.org")
}
library("nnls")
library("SuperLearner")
library("Matrix")
library("foreach")
library("glmnet")
library("ranger")
library("lme4")
library("geepack")
library("parallel")
library("doParallel")
library("boot")
library("haven")
library("ggplot2")
library("multcomp")
library("doBy")
library("gam")
library("future")
library("stats")
library("data.table")
library("optimx")
library("plyr")
library("ltmle")
library("mitml")
library("mice")
library("norm")
library("Amelia")
library("clubSandwich")
## Define custom RF wrapper with slightly fewer trees (based on previous work, this is all that is required for this data)
create.Learner("SL.ranger", params = list(num.trees = 250))
## Program to truncate inverse probability weights
## Used to reduce variability in weights, which can cause issues
checkrange <- function(v) {
v <- ifelse(v<0.001,0.001,v)
v <- ifelse(v>0.999,ifelse(is.na(v),v,0.999),v)
}
## Complete-case analysis syntax
simcomp <- function(data) {
data<-data[complete.cases(data),]
# Define analytic models for outcome, propensities, etc
outmodel <- "y ~ a + la + l + ll + oa + ob + oc + obs"
outmodel2 <- "y ~ a + la + ll + oa + ob + oc + obs"
propa0model <- "la ~ ll + oa + ob + oc + obs"
propa1model <- "a ~ la + l + ll + oa + ob + oc + obs"
# qform/gform is essentially the same, but in the form required by the package ltmle
qform <- c(l="Q.kplus1 ~ la + ll + obs + oa + ob + oc",
y="Q.kplus1 ~ a + la + l + la + oa + ob + oc + obs")
gform1 <- c(la="la ~ ll + oa + ob + oc + obs",
a="a ~ la + l + ll + oa + ob + oc + obs")
create.Learner("SL.ranger", params = list(num.trees = 250)) # same as in top-level code - replicated because sometimes goes wrong in parallel
# Define libraries to be used by SuperLearner
# Top set is set to be used in final analysis. Second set is used for testing.
SLlib1 <- c("SL.glm","SL.glm.interaction","SL.gam","SL.ranger_1")
SLlib2 <- list(g=c("SL.glm","SL.glm.interaction","SL.gam","SL.ranger_1"),
Q=c("SL.glm","SL.glm.interaction","SL.gam"))
SLlib1 <- c("SL.glm","SL.glm.interaction")
SLlib2 <- list(g=c("SL.glm","SL.glm.interaction"),
Q=c("SL.glm","SL.glm.interaction"))
# GLM IPTW-based analysis
GLexp0 <- glm(formula=propa0model,data=data,family=binomial)
GLexp1 <- glm(formula=propa1model,data=data,family=binomial)
GLexpp <- data.table(cbind(id=data$id,obs=data$obs,a=data$a,la=data$la,
propa=ifelse(data$a==1,checkrange(predict(GLexp1,type="response")),checkrange(1-predict(GLexp1,type="response"))),
propla=ifelse(data$la==1,checkrange(predict(GLexp0,type="response")),checkrange(1-predict(GLexp0,type="response")))))
GLexpp$p <- GLexpp$propla*GLexpp$propa
GLexpp$GLwt <- 1/GLexpp$p
# GLM IPT and DR-IPT analysis
GLiptw <- glm(y~a+la,data=merge(data,GLexpp[,c("id","obs","GLwt")]),weight=GLwt,family=gaussian)
GLdriptw <- glm(outmodel2,data=merge(data,GLexpp[,c("id","obs","GLwt")]),weight=GLwt,family=gaussian)
# SuperLearner IPTW-based analysis
SLexp0 <- SuperLearner(Y=as.vector(data[,]$la),X=data[,c("obs","oa","ob","oc","ll")],id=data[,1],SL.library=SLlib1,family=binomial)
SLexp1 <- SuperLearner(Y=as.vector(data[,]$a),X=data[,c("obs","oa","ob","oc","ll","la","l")],id=data[,1],SL.library=SLlib1,family=binomial)
SLexpp <- data.table(cbind(id=data$id,obs=data$obs,a=data$a,la=data$la,
propa=ifelse(data$a==1,checkrange(predict(SLexp1)$pred),checkrange(1-predict(SLexp1)$pred)),
propla=ifelse(data$la==1,checkrange(predict(SLexp0)$pred),checkrange(1-predict(SLexp0)$pred))))
SLexpp$p <- SLexpp$propla*SLexpp$propa
SLexpp$SLwt <- 1/SLexpp$p
# SL IPT and DR-IPT analysis
SLiptw <- glm(y~a+la,data=merge(data,SLexpp[,c("id","obs","SLwt")]),weight=SLwt,family=gaussian)
SLdriptw <- glm(outmodel2,data=merge(data,SLexpp[,c("id","obs","SLwt")]),weight=SLwt,family=gaussian)
# GLM TMLE
GLtmle <- ltmle(data[,c("obs","oa","ob","oc","ll","la","l","a","y")],
id=data[,1],
Anodes=c("la","a"),
Lnodes=c("l"),
Ynodes="y",
Qform=qform,
gform=gform1,
abar=list(c(1,1),c(0,0)),
estimate.time = FALSE)
# SuperLearner TMLE
SLtmle <- ltmle(data[,c("obs","oa","ob","oc","ll","la","l","a","y")],
id=data[,1],
Anodes=c("la","a"),
Lnodes=c("l"),
Ynodes="y",
Qform=qform,
gform=gform1,
abar=list(c(1,1),c(0,0)),
SL.library = SLlib2,
estimate.time = FALSE)
# Naive analysis
GLM <- glm(outmodel,data=data,family=gaussian)
RI <- lmer(paste0(outmodel,"+(1|id)"),data=data)
GEE <- geeglm(formula(outmodel),data=data,id=data$id,waves=data$obs,family=gaussian)
c(coef(summary(GLM))[2,1]+coef(summary(GLM))[3,1],sqrt(vcovCR(GLM, cluster=data$id, type = "CR3")[2,2] + vcovCR(GLM, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(GLM, cluster=data$id, type = "CR3")[2,3]),
coef(summary(RI))[2,1]+coef(summary(RI))[3,1],sqrt(vcov(RI)[2,2] + vcov(RI)[3,3] + 2*vcov(RI)[2,3]),
coef(summary(GEE))[2,1]+coef(summary(GEE))[3,1],sqrt(summary(GEE)$cov.scaled[2,2] + summary(GEE)$cov.scaled[3,3] + 2*summary(GEE)$cov.scaled[2,3]),
coef(summary(GLiptw))[2,1]+coef(summary(GLiptw))[3,1],sqrt(vcovCR(GLiptw, cluster=data$id, type = "CR3")[2,2] + vcovCR(GLiptw, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(GLiptw, cluster=data$id, type = "CR3")[2,3]),
coef(summary(SLiptw))[2,1]+coef(summary(SLiptw))[3,1],sqrt(vcovCR(SLiptw, cluster=data$id, type = "CR3")[2,2] + vcovCR(SLiptw, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(SLiptw, cluster=data$id, type = "CR3")[2,3]),
coef(summary(GLdriptw))[2,1]+coef(summary(GLdriptw))[3,1],sqrt(vcovCR(GLdriptw, cluster=data$id, type = "CR3")[2,2] + vcovCR(GLdriptw, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(GLdriptw, cluster=data$id, type = "CR3")[2,3]),
coef(summary(SLdriptw))[2,1]+coef(summary(SLdriptw))[3,1],sqrt(vcovCR(SLdriptw, cluster=data$id, type = "CR3")[2,2] + vcovCR(SLdriptw, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(SLdriptw, cluster=data$id, type = "CR3")[2,3]),
summary(GLtmle)$effect.measures$ATE$estimate,summary(GLtmle)$effect.measures$ATE$std.dev,
summary(SLtmle)$effect.measures$ATE$estimate,summary(SLtmle)$effect.measures$ATE$std.dev)
}
## Missing Data analysis syntax
simmiss <- function(data,nimpute,scenario) {
if (scenario==1) {
# Define analytic models for outcome, propensities, etc
outmodel <- "y ~ a + la + l + ll + oa + ob + oc + obs"
outmodel2 <- "y ~ a + la + ll + oa + ob + oc + obs"
propa0model <- "la ~ ll + oa + ob + oc + obs"
propa1model <- "a ~ la + l + ll + oa + ob + oc + obs"
missmodel <- "c ~ a + la + l + ll + oa + ob + oc + obs"
# qform/gform is essentially the same, but in the form required by the package ltmle
qform <- c(l="Q.kplus1 ~ la + ll + obs + oa + ob + oc",
y="Q.kplus1 ~ a + la + l + la + oa + ob + oc + obs")
gform1 <- c(la="la ~ ll + oa + ob + oc + obs",
a="a ~ la + l + ll + oa + ob + oc + obs")
gform2 <- c(la="la ~ ll + oa + ob + oc + obs",
a="a ~ la + l + ll + oa + ob + oc + obs",
c="c ~ a + la + l + ll + oa + ob + oc + obs")
# Define imputation formulae for jomo impute
impfmlc <- y + a ~ la + l + ll + oa + ob + oc + obs
impfml <- y + a ~ l + ll + oa + ob + oc + obs + (1|id)
# Reshape data into wide format and drop all lagged variables except those at baseline
datawide <- data
datawide$id <- as.numeric(levels(datawide$id))[datawide$id]
datawide <- reshape(datawide[,c(-10)],
v.names=c("ll","la","l","a","y"),
idvar="id",
timevar="obs",
direction="wide")[,c(-10,-11,-15,-16,-20,-21)]
# Standard-FCS imputation via MICE
# Run imputation and save as list of data frames
impdatafcs <- complete(mice(datawide[,-1],
m=nimpute,
maxit=10),
action="all")
# Reshape back into long form for analysis, included re-creating lagged variables
impdatafcs <- lapply(impdatafcs, function (x) {
x <- cbind(id=seq.int(nrow(x)),x)
x$ll.2 <- x$l.1
x$ll.3 <- x$l.2
x$ll.4 <- x$l.3
x$la.2 <- x$a.1
x$la.3 <- x$a.2
x$la.4 <- x$a.3
x <- data.frame(x[,c(1:9,19,22,10:12,20,23,13:15,21,24,16:18)])
x <- reshape(x,varying=list(c(5,10,15,20),
c(6,11,16,21),
c(7,12,17,22),
c(8,13,18,23),
c(9,14,19,24)),
v.names=c("ll","la","l","a","y"),
times=c(1,2,3,4),
sep=".",
idvar="id",
timevar="obs",
direction="long")
x <- data.frame(x)
})
# JM imputation via MVN
# Run imputation and save as list of data frames
s <- prelim.norm(as.matrix(datawide[,-1]))
thetahat <- em.norm(s)
rngseed(269012)
theta <- da.norm(s, thetahat, steps=100, showits=TRUE)
ximp <- lapply(vector("list", length=nimpute),function (x) {data.frame(imp.norm(s,theta,as.matrix(datawide[,-1])))})
# Reshape back into long form for analysis, included re-creating lagged variables
impdatamvn <- lapply(ximp, function (x) {
x <- cbind(id=seq.int(nrow(x)),x)
x$ll.2 <- x$l.1
x$ll.3 <- x$l.2
x$ll.4 <- x$l.3
x$la.2 <- x$a.1
x$la.3 <- x$a.2
x$la.4 <- x$a.3
x <- data.frame(x[,c(1:9,19,22,10:12,20,23,13:15,21,24,16:18)])
x <- reshape(x,varying=list(c(5,10,15,20),
c(6,11,16,21),
c(7,12,17,22),
c(8,13,18,23),
c(9,14,19,24)),
v.names=c("ll","la","l","a","y"),
times=c(1,2,3,4),
sep=".",
idvar="id",
timevar="obs",
direction="long")
x <- data.frame(x)
})
# Random-intercept imputation via jomo
# Transform binary variables into factors (required for JOMO to run)
data$ll <- factor(data$ll)
data$la <- factor(data$la)
data$l <- factor(data$l)
data$a <- factor(data$a)
# Run imputation and save as list of data frames
impdatajomo <- mitmlComplete(jomoImpute(data=data.frame(data[,!(names(data) %in% "c")]),formula=impfml,m=nimpute,n.burn=100,n.iter=10),print="all")
data$ll <- as.numeric(levels(data$ll))[data$ll]
data$la <- as.numeric(levels(data$la))[data$la]
data$l <- as.numeric(levels(data$l))[data$l]
data$a <- as.numeric(levels(data$a))[data$a]
} else {
# Define analytic models for outcome, propensities, etc
outmodel <- "y ~ a + la + l + ll + oa + ob + oc + obs"
outmodel2 <- "y ~ a + la + ll + oa + ob + oc + obs"
propa0model <- "la ~ ll + oa + ob + oc + obs"
propa1model <- "a ~ la + l + ll + oa + ob + oc + obs"
missmodel <- "c ~ a + la + w + lw + oa + ob + oc + obs"
# qform/gform is essentially the same, but in the form required by the package ltmle
qform <- c(l="Q.kplus1 ~ la + ll + obs + oa + ob + oc",
y="Q.kplus1 ~ a + la + l + la + oa + ob + oc + obs")
gform1 <- c(la="la ~ ll + oa + ob + oc + obs",
a="a ~ la + l + ll + oa + ob + oc + obs")
gform2 <- c(la="la ~ ll + oa + ob + oc + obs",
a="a ~ la + l + ll + oa + ob + oc + obs",
c="c ~ a + la + w + lw + oa + ob + oc + obs")
# Define imputation formulae for jomo impute
impfmlc <- y + a ~ la + l + ll + w + lw + oa + ob + oc + obs
impfml <- y + a ~ l + ll + w + lw + oa + ob + oc + obs + (1|id)
# Reshape data into wide format and drop all lagged variables except those at baseline
datawide <- data
datawide$id <- as.numeric(levels(datawide$id))[datawide$id]
datawide <- reshape(datawide[,c(-12)],
v.names=c("ll","lw","la","l","w","a","y"),
idvar="id",
timevar="obs",
direction="wide")[,c(-12,-13,-14,-19,-20,-21,-26,-27,-28)]
# Standard-FCS imputation via MICE
# Run imputation and save as list of data frames
impdatafcs <- complete(mice(datawide[,-1],
m=nimpute,
maxit=10),
action="all")
# Reshape back into long form for analysis, included re-creating lagged variables
impdatafcs <- lapply(impdatafcs, function (x) {
x <- cbind(id=seq.int(nrow(x)),x)
x$ll.2 <- x$l.1
x$ll.3 <- x$l.2
x$ll.4 <- x$l.3
x$lw.2 <- x$w.1
x$lw.3 <- x$w.2
x$lw.4 <- x$w.3
x$la.2 <- x$a.1
x$la.3 <- x$a.2
x$la.4 <- x$a.3
x <- data.frame(x[,c(1:11,24,27,30,12:15,25,28,31,16:19,26,29,32,20:23)])
x <- reshape(x,varying=list(c(5,12,19,26),
c(6,13,20,27),
c(7,14,21,28),
c(8,15,22,29),
c(9,16,23,30),
c(10,17,24,31),
c(11,18,25,32)),
v.names=c("ll","lw","la","l","w","a","y"),
times=c(1,2,3,4),
sep=".",
idvar="id",
timevar="obs",
direction="long")
x <- data.frame(x)
})
# Random-intercept imputation via MVN
# Run imputation and save as list of data frames
s <- prelim.norm(as.matrix(datawide[,-1]))
thetahat <- em.norm(s)
rngseed(269012)
theta <- da.norm(s, thetahat, steps=100, showits=TRUE)
ximp <- lapply(vector("list", length=nimpute),function (x) {data.frame(imp.norm(s,theta,as.matrix(datawide[,-1])))})
# Reshape back into long form for analysis, included re-creating lagged variables
impdatamvn <- lapply(ximp, function (x) {
x <- cbind(id=seq.int(nrow(x)),x)
x$ll.2 <- x$l.1
x$ll.3 <- x$l.2
x$ll.4 <- x$l.3
x$lw.2 <- x$w.1
x$lw.3 <- x$w.2
x$lw.4 <- x$w.3
x$la.2 <- x$a.1
x$la.3 <- x$a.2
x$la.4 <- x$a.3
x <- data.frame(x[,c(1:11,24,27,30,12:15,25,28,31,16:19,26,29,32,20:23)])
x <- reshape(x,varying=list(c(5,12,19,26),
c(6,13,20,27),
c(7,14,21,28),
c(8,15,22,29),
c(9,16,23,30),
c(10,17,24,31),
c(11,18,25,32)),
v.names=c("ll","lw","la","l","w","a","y"),
times=c(1,2,3,4),
sep=".",
idvar="id",
timevar="obs",
direction="long")
x <- data.frame(x)
})
# Random-intercept imputation via jomo
# Transform binary variables into factors (required for JOMO to run)
data$ll <- factor(data$ll)
data$la <- factor(data$la)
data$lw <- factor(data$la)
data$l <- factor(data$l)
data$a <- factor(data$a)
data$w <- factor(data$a)
# Run imputation and save as list of data frames
impdatajomo <- mitmlComplete(jomoImpute(data=data.frame(data[,!(names(data) %in% "c")]),formula=impfml,m=nimpute,n.burn=100,n.iter=10),print="all")
data$ll <- as.numeric(levels(data$ll))[data$ll]
data$la <- as.numeric(levels(data$la))[data$la]
data$l <- as.numeric(levels(data$l))[data$l]
data$a <- as.numeric(levels(data$a))[data$a]
}
create.Learner("SL.ranger", params = list(num.trees = 250)) # same as in top-level code - replicated because sometimes goes wrong in parallel
# Define libraries to be used by SuperLearner
# Top set is set to be used in final analysis. Second set is used for testing.
SLlib1 <- c("SL.glm","SL.glm.interaction","SL.gam","SL.ranger_1")
SLlib2 <- list(g=c("SL.glm","SL.glm.interaction","SL.gam","SL.ranger_1"),
Q=c("SL.glm","SL.glm.interaction","SL.gam"))
SLlib1 <- c("SL.glm","SL.glm.interaction")
SLlib2 <- list(g=c("SL.glm","SL.glm.interaction"),
Q=c("SL.glm","SL.glm.interaction"))
# GLM IPTW-based analysis
GLexp0 <- glm(formula=propa0model,data=data,family=binomial)
GLexp1 <- glm(formula=propa1model,data=data,family=binomial)
GLexpc <- glm(formula=missmodel,data=data,family=binomial)
GLexpp <- data.table(cbind(id=data$id,obs=data$obs,a=data$a,la=data$la,
propa=ifelse(data$a==1,checkrange(predict(GLexp1,type="response")),checkrange(1-predict(GLexp1,type="response"))),
propla=ifelse(data$la==1,checkrange(predict(GLexp0,type="response")),checkrange(1-predict(GLexp0,type="response"))),
propc=checkrange(1-predict(GLexpc,type="response"))))
GLexpp$p <- GLexpp$propla*GLexpp$propa
GLexpp$pc <- GLexpp$propla*GLexpp$propa*GLexpp$propc
GLexpp$GLwt <- 1/GLexpp$p
GLexpp$GLwtc <- 1/GLexpp$pc
GLexpp$GLwc <- 1/GLexpp$propc
# IPC-weighted GLM IPT and DR-IPT analysis
GLiptcw <- glm(y~a+la,data=merge(data,GLexpp[,c("id","obs","GLwtc")]),weight=GLwtc,family=gaussian)
GLdriptcw <- glm(outmodel2,data=merge(data,GLexpp[,c("id","obs","GLwtc")]),weight=GLwtc,family=gaussian)
# SuperLearner IPTW-based analysis
SLexp0 <- SuperLearner(Y=as.vector(data[,]$la),X=data[,c("obs","oa","ob","oc","ll")],id=data[,1],SL.library=SLlib1,family=binomial)
SLexp1 <- SuperLearner(Y=as.vector(data[,]$a),X=data[,c("obs","oa","ob","oc","ll","la","l")],id=data[,1],SL.library=SLlib1,family=binomial)
SLexpc <- SuperLearner(Y=as.vector(data$c),X=data[,if (scenario==1) c("obs","oa","ob","oc","ll","la","l","a") else c("obs","oa","ob","oc","lw","la","w","a")],id=data[,1],SL.library=SLlib1,family=binomial)
SLexpp <- data.table(cbind(id=data$id,obs=data$obs,a=data$a,la=data$la,
propa=ifelse(data$a==1,checkrange(predict(SLexp1)$pred),checkrange(1-predict(SLexp1)$pred)),
propla=ifelse(data$la==1,checkrange(predict(SLexp0)$pred),checkrange(1-predict(SLexp0)$pred)),
propc=as.vector(checkrange(1-predict(SLexpc)$pred))))
SLexpp$p <- SLexpp$propla*SLexpp$propa
SLexpp$pc <- SLexpp$propla*SLexpp$propa*SLexpp$propc
SLexpp$SLwt <- 1/SLexpp$p
SLexpp$SLwtc <- 1/SLexpp$pc
SLexpp$SLwc <- 1/SLexpp$propc
# IPC-weighted SL IPT and DR-IPT analysis
SLiptcw <- glm(y~a+la,data=merge(data,SLexpp[,c("id","obs","SLwtc")]),weight=SLwtc,family=gaussian)
SLdriptcw <- glm(outmodel2,data=merge(data,SLexpp[,c("id","obs","SLwtc")]),weight=SLwtc,family=gaussian)
data$c <- BinaryToCensoring(is.censored=data$c)
# IPC-weighted GLM TMLE
GLtmlec <- ltmle(data[,if (scenario==1) c("obs","oa","ob","oc","ll","la","l","a","c","y") else c("obs","oa","ob","oc","ll","lw","la","l","w","a","c","y")],
id=data[,"id"],
Anodes=c("la","a"),
Lnodes=if (scenario==1) c("l") else c("l","w"),
Ynodes="y",
Cnodes="c",
Qform=qform,
gform=gform2,
abar=list(c(1,1),c(0,0)),
estimate.time = FALSE)
# IPC-weighted SuperLearner TMLE
SLtmlec <- ltmle(data[,if (scenario==1) c("obs","oa","ob","oc","ll","la","l","a","c","y") else c("obs","oa","ob","oc","ll","lw","la","l","w","a","c","y")],
id=data[,"id"],
Anodes=c("la","a"),
Lnodes=if (scenario==1) c("l") else c("l","w"),
Ynodes="y",
Cnodes="c",
Qform=qform,
gform=gform2,
abar=list(c(1,1),c(0,0)),
SL.library = SLlib2,
estimate.time = FALSE)
# GLM IPC-weighted naive analysis
GLGLMc <- glm(outmodel,data=merge(data,GLexpp[,c("id","obs","GLwc")]),weight=GLwc,family=gaussian)
GLRIc <- lmer(paste0(outmodel,"+(1|id)"),data=merge(data,GLexpp[,c("id","obs","GLwc")]),weight=GLwc)
GLGEEc <- geeglm(formula(outmodel),data=merge(data,GLexpp[,c("id","obs","GLwc")]),id=data$id,waves=data$obs,weight=GLwc,family=gaussian)
# SL IPC-weighted naive analysis
SLGLMc <- glm(outmodel,data=merge(data,SLexpp[,c("id","obs","SLwc")]),weight=SLwc,family=gaussian)
SLRIc <- lmer(paste0(outmodel,"+(1|id)"),data=merge(data,SLexpp[,c("id","obs","SLwc")]),weight=SLwc)
SLGEEc <- geeglm(formula(outmodel),data=merge(data,SLexpp[,c("id","obs","SLwc")]),id=data$id,waves=data$obs,weight=SLwc,family=gaussian)
# FCS Multiple Imputation Analyses
# Imputed naive analysis
# GLM
GLMimpfcs <- lapply(impdatafcs, function (x) {glm(formula=formula(outmodel),data=x,family=gaussian)})
GLMimpcofcs <- matrix(unlist(lapply(GLMimpfcs, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
GLMimpsefcs <- matrix(unlist(lapply(GLMimpfcs, function (x) {sqrt(vcovCR(x, cluster=data$id, type = "CR3")[2,2] + vcovCR(x, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(x, cluster=data$id, type = "CR3")[2,3])})),nrow=nimpute,ncol=1)
GLMfcs <- matrix(unlist(mi.meld(q=GLMimpcofcs, se=GLMimpsefcs)),nrow=1,ncol=2)
# Random intercept
RIimpfcs <- lapply(impdatafcs, function (x) {lmer(paste0(outmodel,"+(1|id)"),data=x)})
RIimpcofcs <- matrix(unlist(lapply(RIimpfcs, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
RIimpsefcs <- matrix(unlist(lapply(RIimpfcs, function (x) {sqrt(vcov(x)[2,2] + vcov(x)[3,3] + 2*vcov(x)[2,3])})),nrow=nimpute,ncol=1)
RIfcs <- matrix(unlist(mi.meld(q=RIimpcofcs, se=RIimpsefcs)),nrow=1,ncol=2)
# GEE
GEEimpfcs <- lapply(impdatafcs, function (x) {geeglm(formula(outmodel),data=x,id=x$id,waves=x$obs,family=gaussian)})
GEEimpcofcs <- matrix(unlist(lapply(GEEimpfcs, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
GEEimpsefcs <- matrix(unlist(lapply(GEEimpfcs, function (x) {sqrt(summary(x)$cov.scaled[2,2] + summary(x)$cov.scaled[3,3] + 2*summary(x)$cov.scaled[2,3])})),nrow=nimpute,ncol=1)
GEEfcs <- matrix(unlist(mi.meld(q=GEEimpcofcs, se=GEEimpsefcs)),nrow=1,ncol=2)
# GLM-based IPTW and DR-IPTW
GLiptwimpfcs <- lapply(impdatafcs, function (x) {
GLexp0 <- glm(formula=propa0model,data=x,family=binomial)
GLexp1 <- glm(formula=propa1model,data=x,family=binomial)
GLexpp <- data.table(cbind(id=x$id,obs=x$obs,a=x$a,la=x$la,
propa=ifelse(x$a==1,checkrange(predict(GLexp1,type="response")),checkrange(1-predict(GLexp1,type="response"))),
propla=ifelse(x$la==1,checkrange(predict(GLexp0,type="response")),checkrange(1-predict(GLexp0,type="response")))))
GLexpp$p <- GLexpp$propla*GLexpp$propa
GLexpp$GLwt <- 1/GLexpp$p
D <- merge(x,GLexpp[,c("id","obs","GLwt")])
D
})
GLiptwimpsumfcs <- lapply(GLiptwimpfcs, function (x) {glm(y~a+la,data=x,weight=GLwt,family=gaussian)})
GLiptwimpcofcs <- matrix(unlist(lapply(GLiptwimpsumfcs, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
GLiptwimpsefcs <- matrix(unlist(lapply(GLiptwimpsumfcs, function (x) {sqrt(vcovCR(x, cluster=data$id, type = "CR3")[2,2] + vcovCR(x, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(x, cluster=data$id, type = "CR3")[2,3])})),nrow=nimpute,ncol=1)
GLiptwfcs <- matrix(unlist(mi.meld(q=GLiptwimpcofcs, se=GLiptwimpsefcs)),nrow=1,ncol=2)
GLdriptwimpsumfcs <- lapply(GLiptwimpfcs, function (x) {glm(outmodel2,data=x,weight=GLwt,family=gaussian)})
GLdriptwimpcofcs <- matrix(unlist(lapply(GLdriptwimpsumfcs, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
GLdriptwimpsefcs <- matrix(unlist(lapply(GLdriptwimpsumfcs, function (x) {sqrt(vcovCR(x, cluster=data$id, type = "CR3")[2,2] + vcovCR(x, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(x, cluster=data$id, type = "CR3")[2,3])})),nrow=nimpute,ncol=1)
GLdriptwfcs <- matrix(unlist(mi.meld(q=GLdriptwimpcofcs, se=GLdriptwimpsefcs)),nrow=1,ncol=2)
# SuperLearner-based IPTW and DR-IPTW
SLiptwimpfcs <- lapply(impdatafcs, function (x) {
SLexp0 <- SuperLearner(Y=as.numeric(as.vector(x$la)),X=x[,c("obs","oa","ob","oc","ll")],id=x[,"id"],SL.library=SLlib1,family=binomial)
SLexp1 <- SuperLearner(Y=as.numeric(as.vector(x$a)),X=x[,c("obs","oa","ob","oc","ll","la","l")],id=x[,"id"],SL.library=SLlib1,family=binomial)
SLexpp <- data.table(cbind(id=x$id,obs=x$obs,a=x$a,la=x$la,
propa=ifelse(x$a==1,checkrange(predict(SLexp1)$pred),checkrange(1-predict(SLexp1)$pred)),
propla=ifelse(x$la==1,checkrange(predict(SLexp0)$pred),checkrange(1-predict(SLexp0)$pred))))
SLexpp$p <- SLexpp$propla*SLexpp$propa
SLexpp$SLwt <- 1/SLexpp$p
D <- merge(x,SLexpp[,c("id","obs","SLwt")])
D
})
SLiptwimpsumfcs <- lapply(SLiptwimpfcs, function (x) {glm(y~a+la,data=x,weight=SLwt,family=gaussian)})
SLiptwimpcofcs <- matrix(unlist(lapply(SLiptwimpsumfcs, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
SLiptwimpsefcs <- matrix(unlist(lapply(SLiptwimpsumfcs, function (x) {sqrt(vcovCR(x, cluster=data$id, type = "CR3")[2,2] + vcovCR(x, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(x, cluster=data$id, type = "CR3")[2,3])})),nrow=nimpute,ncol=1)
SLiptwfcs <- matrix(unlist(mi.meld(q=SLiptwimpcofcs, se=SLiptwimpsefcs)),nrow=1,ncol=2)
SLdriptwimpsumfcs <- lapply(SLiptwimpfcs, function (x) {glm(outmodel2,data=x,weight=SLwt,family=gaussian)})
SLdriptwimpcofcs <- matrix(unlist(lapply(SLdriptwimpsumfcs, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
SLdriptwimpsefcs <- matrix(unlist(lapply(SLdriptwimpsumfcs, function (x) {sqrt(vcovCR(x, cluster=data$id, type = "CR3")[2,2] + vcovCR(x, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(x, cluster=data$id, type = "CR3")[2,3])})),nrow=nimpute,ncol=1)
SLdriptwfcs <- matrix(unlist(mi.meld(q=SLdriptwimpcofcs, se=SLdriptwimpsefcs)),nrow=1,ncol=2)
# GLM-based TMLE
GLtmleimpfcs <- lapply(impdatafcs, function (x) {
ltmle(x[,if (scenario==1) c("obs","oa","ob","oc","ll","la","l","a","y") else c("obs","oa","ob","oc","ll","lw","la","l","w","a","y")],
id=x[,"id"],
Anodes=c("la","a"),
Lnodes=if (scenario==1) c("l") else c("l","w"),
Ynodes="y",
Qform=qform,
gform=gform1,
abar=list(c(1,1),c(0,0)),
estimate.time = FALSE)})
GLtmleimpsumfcs <- lapply(GLtmleimpfcs, function (x) {summary(x)})
GLtmleimpcofcs <- matrix(unlist(lapply(GLtmleimpsumfcs, function (x) {x$effect.measures$ATE$estimate})),nrow=nimpute,ncol=1)
GLtmleimpsefcs <- matrix(unlist(lapply(GLtmleimpsumfcs, function (x) {x$effect.measures$ATE$std.dev})),nrow=nimpute,ncol=1)
GLtmlefcs <- matrix(unlist(mi.meld(q=GLtmleimpcofcs, se=GLtmleimpsefcs)),nrow=1,ncol=2)
# SuperLearner based TMLE
SLtmleimpfcs <- lapply(impdatafcs, function (x) {
ltmle(x[,if (scenario==1) c("obs","oa","ob","oc","ll","la","l","a","y") else c("obs","oa","ob","oc","ll","lw","la","l","w","a","y")],
id=x[,"id"],
Anodes=c("la","a"),
Lnodes=if (scenario==1) c("l") else c("l","w"),
Ynodes="y",
Qform=qform,
gform=gform1,
abar=list(c(1,1),c(0,0)),
SL.library = SLlib2,
estimate.time = FALSE)})
SLtmleimpsumfcs <- lapply(SLtmleimpfcs, function (x) {summary(x)})
SLtmleimpcofcs <- matrix(unlist(lapply(SLtmleimpsumfcs, function (x) {x$effect.measures$ATE$estimate})),nrow=nimpute,ncol=1)
SLtmleimpsefcs <- matrix(unlist(lapply(SLtmleimpsumfcs, function (x) {x$effect.measures$ATE$std.dev})),nrow=nimpute,ncol=1)
SLtmlefcs <- matrix(unlist(mi.meld(q=SLtmleimpcofcs, se=SLtmleimpsefcs)),nrow=1,ncol=2)
# MVN Multiple Imputation Analyses
# Imputed naive analysis
# GLM
GLMimpmvn <- lapply(impdatamvn, function (x) {glm(formula=formula(outmodel),data=x,family=gaussian)})
GLMimpcomvn <- matrix(unlist(lapply(GLMimpmvn, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
GLMimpsemvn <- matrix(unlist(lapply(GLMimpmvn, function (x) {sqrt(vcovCR(x, cluster=data$id, type = "CR3")[2,2] + vcovCR(x, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(x, cluster=data$id, type = "CR3")[2,3])})),nrow=nimpute,ncol=1)
GLMmvn <- matrix(unlist(mi.meld(q=GLMimpcomvn, se=GLMimpsemvn)),nrow=1,ncol=2)
# Random intercept
RIimpmvn <- lapply(impdatamvn, function (x) {lmer(paste0(outmodel,"+(1|id)"),data=x)})
RIimpcomvn <- matrix(unlist(lapply(RIimpmvn, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
RIimpsemvn <- matrix(unlist(lapply(RIimpmvn, function (x) {sqrt(vcov(x)[2,2] + vcov(x)[3,3] + 2*vcov(x)[2,3])})),nrow=nimpute,ncol=1)
RImvn <- matrix(unlist(mi.meld(q=RIimpcomvn, se=RIimpsemvn)),nrow=1,ncol=2)
# GEE
GEEimpmvn <- lapply(impdatamvn, function (x) {geeglm(formula(outmodel),data=x,id=x$id,waves=x$obs,family=gaussian)})
GEEimpcomvn <- matrix(unlist(lapply(GEEimpmvn, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
GEEimpsemvn <- matrix(unlist(lapply(GEEimpmvn, function (x) {sqrt(summary(x)$cov.scaled[2,2] + summary(x)$cov.scaled[3,3] + 2*summary(x)$cov.scaled[2,3])})),nrow=nimpute,ncol=1)
GEEmvn <- matrix(unlist(mi.meld(q=GEEimpcomvn, se=GEEimpsemvn)),nrow=1,ncol=2)
# GLM-based IPTW and DR-IPTW
GLiptwimpmvn <- lapply(impdatamvn, function (x) {
GLexp0 <- glm(formula=propa0model,data=x,family=binomial)
GLexp1 <- glm(formula=propa1model,data=x,family=binomial)
GLexpp <- data.table(cbind(id=x$id,obs=x$obs,a=x$a,la=x$la,
propa=ifelse(x$a==1,checkrange(predict(GLexp1,type="response")),checkrange(1-predict(GLexp1,type="response"))),
propla=ifelse(x$la==1,checkrange(predict(GLexp0,type="response")),checkrange(1-predict(GLexp0,type="response")))))
GLexpp$p <- GLexpp$propla*GLexpp$propa
GLexpp$GLwt <- 1/GLexpp$p
D <- merge(x,GLexpp[,c("id","obs","GLwt")])
D
})
GLiptwimpsummvn <- lapply(GLiptwimpmvn, function (x) {glm(y~a+la,data=x,weight=GLwt,family=gaussian)})
GLiptwimpcomvn <- matrix(unlist(lapply(GLiptwimpsummvn, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
GLiptwimpsemvn <- matrix(unlist(lapply(GLiptwimpsummvn, function (x) {sqrt(vcovCR(x, cluster=data$id, type = "CR3")[2,2] + vcovCR(x, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(x, cluster=data$id, type = "CR3")[2,3])})),nrow=nimpute,ncol=1)
GLiptwmvn <- matrix(unlist(mi.meld(q=GLiptwimpcomvn, se=GLiptwimpsemvn)),nrow=1,ncol=2)
GLdriptwimpsummvn <- lapply(GLiptwimpmvn, function (x) {glm(outmodel2,data=x,weight=GLwt,family=gaussian)})
GLdriptwimpcomvn <- matrix(unlist(lapply(GLdriptwimpsummvn, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
GLdriptwimpsemvn <- matrix(unlist(lapply(GLdriptwimpsummvn, function (x) {sqrt(vcovCR(x, cluster=data$id, type = "CR3")[2,2] + vcovCR(x, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(x, cluster=data$id, type = "CR3")[2,3])})),nrow=nimpute,ncol=1)
GLdriptwmvn <- matrix(unlist(mi.meld(q=GLdriptwimpcomvn, se=GLdriptwimpsemvn)),nrow=1,ncol=2)
# SuperLearner-based IPTW and DR-IPTW
SLiptwimpmvn <- lapply(impdatamvn, function (x) {
SLexp0 <- SuperLearner(Y=as.numeric(as.vector(x$la)),X=x[,c("obs","oa","ob","oc","ll")],id=x[,"id"],SL.library=SLlib1,family=binomial)
SLexp1 <- SuperLearner(Y=as.numeric(as.vector(x$a)),X=x[,c("obs","oa","ob","oc","ll","la","l")],id=x[,"id"],SL.library=SLlib1,family=binomial)
SLexpp <- data.table(cbind(id=x$id,obs=x$obs,a=x$a,la=x$la,
propa=ifelse(x$a==1,checkrange(predict(SLexp1)$pred),checkrange(1-predict(SLexp1)$pred)),
propla=ifelse(x$la==1,checkrange(predict(SLexp0)$pred),checkrange(1-predict(SLexp0)$pred))))
SLexpp$p <- SLexpp$propla*SLexpp$propa
SLexpp$SLwt <- 1/SLexpp$p
D <- merge(x,SLexpp[,c("id","obs","SLwt")])
D
})
SLiptwimpsummvn <- lapply(SLiptwimpmvn, function (x) {glm(y~a+la,data=x,weight=SLwt,family=gaussian)})
SLiptwimpcomvn <- matrix(unlist(lapply(SLiptwimpsummvn, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
SLiptwimpsemvn <- matrix(unlist(lapply(SLiptwimpsummvn, function (x) {sqrt(vcovCR(x, cluster=data$id, type = "CR3")[2,2] + vcovCR(x, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(x, cluster=data$id, type = "CR3")[2,3])})),nrow=nimpute,ncol=1)
SLiptwmvn <- matrix(unlist(mi.meld(q=SLiptwimpcomvn, se=SLiptwimpsemvn)),nrow=1,ncol=2)
SLdriptwimpsummvn <- lapply(SLiptwimpmvn, function (x) {glm(outmodel2,data=x,weight=SLwt,family=gaussian)})
SLdriptwimpcomvn <- matrix(unlist(lapply(SLdriptwimpsummvn, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
SLdriptwimpsemvn <- matrix(unlist(lapply(SLdriptwimpsummvn, function (x) {sqrt(vcovCR(x, cluster=data$id, type = "CR3")[2,2] + vcovCR(x, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(x, cluster=data$id, type = "CR3")[2,3])})),nrow=nimpute,ncol=1)
SLdriptwmvn <- matrix(unlist(mi.meld(q=SLdriptwimpcomvn, se=SLdriptwimpsemvn)),nrow=1,ncol=2)
# GLM-based TMLE
GLtmleimpmvn <- lapply(impdatamvn, function (x) {
ltmle(x[,if (scenario==1) c("obs","oa","ob","oc","ll","la","l","a","y") else c("obs","oa","ob","oc","ll","lw","la","l","w","a","y")],
id=x[,"id"],
Anodes=c("la","a"),
Lnodes=if (scenario==1) c("l") else c("l","w"),
Ynodes="y",
Qform=qform,
gform=gform1,
abar=list(c(1,1),c(0,0)),
estimate.time = FALSE)})
GLtmleimpsummvn <- lapply(GLtmleimpmvn, function (x) {summary(x)})
GLtmleimpcomvn <- matrix(unlist(lapply(GLtmleimpsummvn, function (x) {x$effect.measures$ATE$estimate})),nrow=nimpute,ncol=1)
GLtmleimpsemvn <- matrix(unlist(lapply(GLtmleimpsummvn, function (x) {x$effect.measures$ATE$std.dev})),nrow=nimpute,ncol=1)
GLtmlemvn <- matrix(unlist(mi.meld(q=GLtmleimpcomvn, se=GLtmleimpsemvn)),nrow=1,ncol=2)
# SuperLearner-based TMLE
SLtmleimpmvn <- lapply(impdatamvn, function (x) {
ltmle(x[,if (scenario==1) c("obs","oa","ob","oc","ll","la","l","a","y") else c("obs","oa","ob","oc","ll","lw","la","l","w","a","y")],
id=x[,"id"],
Anodes=c("la","a"),
Lnodes=if (scenario==1) c("l") else c("l","w"),
Ynodes="y",
Qform=qform,
gform=gform1,
abar=list(c(1,1),c(0,0)),
SL.library = SLlib2,
estimate.time = FALSE)})
SLtmleimpsummvn <- lapply(SLtmleimpmvn, function (x) {summary(x)})
SLtmleimpcomvn <- matrix(unlist(lapply(SLtmleimpsummvn, function (x) {x$effect.measures$ATE$estimate})),nrow=nimpute,ncol=1)
SLtmleimpsemvn <- matrix(unlist(lapply(SLtmleimpsummvn, function (x) {x$effect.measures$ATE$std.dev})),nrow=nimpute,ncol=1)
SLtmlemvn <- matrix(unlist(mi.meld(q=SLtmleimpcomvn, se=SLtmleimpsemvn)),nrow=1,ncol=2)
# JOMO Multiple Imputation Analyses
# Imputed naive analysis
#GLM
GLMimpjomo <- lapply(impdatajomo, function (x) {glm(formula=formula(outmodel),data=x,family=gaussian)})
GLMimpcojomo <- matrix(unlist(lapply(GLMimpjomo, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
GLMimpsejomo <- matrix(unlist(lapply(GLMimpjomo, function (x) {sqrt(vcovCR(x, cluster=data$id, type = "CR3")[2,2] + vcovCR(x, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(x, cluster=data$id, type = "CR3")[2,3])})),nrow=nimpute,ncol=1)
GLMjomo <- matrix(unlist(mi.meld(q=GLMimpcojomo, se=GLMimpsejomo)),nrow=1,ncol=2)
# Random intercept
RIimpjomo <- lapply(impdatajomo, function (x) {lmer(paste0(outmodel,"+(1|id)"),data=x)})
RIimpcojomo <- matrix(unlist(lapply(RIimpjomo, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
RIimpsejomo <- matrix(unlist(lapply(RIimpjomo, function (x) {sqrt(vcov(x)[2,2] + vcov(x)[3,3] + 2*vcov(x)[2,3])})),nrow=nimpute,ncol=1)
RIjomo <- matrix(unlist(mi.meld(q=RIimpcojomo, se=RIimpsejomo)),nrow=1,ncol=2)
# GEE
GEEimpjomo <- lapply(impdatajomo, function (x) {geeglm(formula(outmodel),data=x,id=x$id,waves=x$obs,family=gaussian)})
GEEimpcojomo <- matrix(unlist(lapply(GEEimpjomo, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
GEEimpsejomo <- matrix(unlist(lapply(GEEimpjomo, function (x) {sqrt(summary(x)$cov.scaled[2,2] + summary(x)$cov.scaled[3,3] + 2*summary(x)$cov.scaled[2,3])})),nrow=nimpute,ncol=1)
GEEjomo <- matrix(unlist(mi.meld(q=GEEimpcojomo, se=GEEimpsejomo)),nrow=1,ncol=2)
# GLM-based IPTW and DR-IPTW
GLiptwimpjomo <- lapply(impdatajomo, function (x) {
GLexp0 <- glm(formula=propa0model,data=x,family=binomial)
GLexp1 <- glm(formula=propa1model,data=x,family=binomial)
GLexpp <- data.table(cbind(id=x$id,obs=x$obs,a=x$a,la=x$la,
propa=ifelse(x$a==1,checkrange(predict(GLexp1,type="response")),checkrange(1-predict(GLexp1,type="response"))),
propla=ifelse(x$la==1,checkrange(predict(GLexp0,type="response")),checkrange(1-predict(GLexp0,type="response")))))
GLexpp$p <- GLexpp$propla*GLexpp$propa
GLexpp$GLwt <- 1/GLexpp$p
D <- merge(x,GLexpp[,c("id","obs","GLwt")])
D
})
GLiptwimpsumjomo <- lapply(GLiptwimpjomo, function (x) {glm(y~a+la,data=x,weight=GLwt,family=gaussian)})
GLiptwimpcojomo <- matrix(unlist(lapply(GLiptwimpsumjomo, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
GLiptwimpsejomo <- matrix(unlist(lapply(GLiptwimpsumjomo, function (x) {sqrt(vcovCR(x, cluster=data$id, type = "CR3")[2,2] + vcovCR(x, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(x, cluster=data$id, type = "CR3")[2,3])})),nrow=nimpute,ncol=1)
GLiptwjomo <- matrix(unlist(mi.meld(q=GLiptwimpcojomo, se=GLiptwimpsejomo)),nrow=1,ncol=2)
GLdriptwimpsumjomo <- lapply(GLiptwimpjomo, function (x) {glm(outmodel2,data=x,weight=GLwt,family=gaussian)})
GLdriptwimpcojomo <- matrix(unlist(lapply(GLdriptwimpsumjomo, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
GLdriptwimpsejomo <- matrix(unlist(lapply(GLdriptwimpsumjomo, function (x) {sqrt(vcovCR(x, cluster=data$id, type = "CR3")[2,2] + vcovCR(x, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(x, cluster=data$id, type = "CR3")[2,3])})),nrow=nimpute,ncol=1)
GLdriptwjomo <- matrix(unlist(mi.meld(q=GLdriptwimpcojomo, se=GLdriptwimpsejomo)),nrow=1,ncol=2)
# SuperLearner-based IPTW and DR-IPTW
SLiptwimpjomo <- lapply(impdatajomo, function (x) {
SLexp0 <- SuperLearner(Y=as.numeric(levels(x$la))[x$la],X=x[,c("obs","oa","ob","oc","ll")],id=x[,"id"],SL.library=SLlib1,family=binomial)
SLexp1 <- SuperLearner(Y=as.numeric(levels(x$a))[x$a],X=x[,c("obs","oa","ob","oc","ll","la","l")],id=x[,"id"],SL.library=SLlib1,family=binomial)
SLexpp <- data.table(cbind(id=x$id,obs=x$obs,a=x$a,la=x$la,
propa=ifelse(x$a==1,checkrange(predict(SLexp1)$pred),checkrange(1-predict(SLexp1)$pred)),
propla=ifelse(x$la==1,checkrange(predict(SLexp0)$pred),checkrange(1-predict(SLexp0)$pred))))
SLexpp$p <- SLexpp$propla*SLexpp$propa
SLexpp$SLwt <- 1/SLexpp$p
D <- merge(x,SLexpp[,c("id","obs","SLwt")])
D
})
SLiptwimpsumjomo <- lapply(SLiptwimpjomo, function (x) {glm(y~a+la,data=x,weight=SLwt,family=gaussian)})
SLiptwimpcojomo <- matrix(unlist(lapply(SLiptwimpsumjomo, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
SLiptwimpsejomo <- matrix(unlist(lapply(SLiptwimpsumjomo, function (x) {sqrt(vcovCR(x, cluster=data$id, type = "CR3")[2,2] + vcovCR(x, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(x, cluster=data$id, type = "CR3")[2,3])})),nrow=nimpute,ncol=1)
SLiptwjomo <- matrix(unlist(mi.meld(q=SLiptwimpcojomo, se=SLiptwimpsejomo)),nrow=1,ncol=2)
SLdriptwimpsumjomo <- lapply(SLiptwimpjomo, function (x) {glm(outmodel2,data=x,weight=SLwt,family=gaussian)})
SLdriptwimpcojomo <- matrix(unlist(lapply(SLdriptwimpsumjomo, function (x) {coef(summary(x))[2,1]+coef(summary(x))[3,1]})),nrow=nimpute,ncol=1)
SLdriptwimpsejomo <- matrix(unlist(lapply(SLdriptwimpsumjomo, function (x) {sqrt(vcovCR(x, cluster=data$id, type = "CR3")[2,2] + vcovCR(x, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(x, cluster=data$id, type = "CR3")[2,3])})),nrow=nimpute,ncol=1)
SLdriptwjomo <- matrix(unlist(mi.meld(q=SLdriptwimpcojomo, se=SLdriptwimpsejomo)),nrow=1,ncol=2)
# GLM-based TMLE
GLtmleimpjomo <- lapply(impdatajomo, function (x) {
x$ll <- as.numeric(levels(x$ll))[x$ll]
x$la <- as.numeric(levels(x$la))[x$la]
x$l <- as.numeric(levels(x$l))[x$l]
x$a <- as.numeric(levels(x$a))[x$a]
ltmle(x[,if (scenario==1) c("obs","oa","ob","oc","ll","la","l","a","y") else c("obs","oa","ob","oc","ll","lw","la","l","w","a","y")],
id=x[,"id"],
Anodes=c("la","a"),
Lnodes=if (scenario==1) c("l") else c("l","w"),
Ynodes="y",
Qform=qform,
gform=gform1,
abar=list(c(1,1),c(0,0)),
estimate.time = FALSE)})
GLtmleimpsumjomo <- lapply(GLtmleimpjomo, function (x) {summary(x)})
GLtmleimpcojomo <- matrix(unlist(lapply(GLtmleimpsumjomo, function (x) {x$effect.measures$ATE$estimate})),nrow=nimpute,ncol=1)
GLtmleimpsejomo <- matrix(unlist(lapply(GLtmleimpsumjomo, function (x) {x$effect.measures$ATE$std.dev})),nrow=nimpute,ncol=1)
GLtmlejomo <- matrix(unlist(mi.meld(q=GLtmleimpcojomo, se=GLtmleimpsejomo)),nrow=1,ncol=2)
# SuperLearner-based TMLE
SLtmleimpjomo <- lapply(impdatajomo, function (x) {
x$ll <- as.numeric(levels(x$ll))[x$ll]
x$la <- as.numeric(levels(x$la))[x$la]
x$l <- as.numeric(levels(x$l))[x$l]
x$a <- as.numeric(levels(x$a))[x$a]
ltmle(x[,if (scenario==1) c("obs","oa","ob","oc","ll","la","l","a","y") else c("obs","oa","ob","oc","ll","lw","la","l","w","a","y")],
id=x[,"id"],
Anodes=c("la","a"),
Lnodes=if (scenario==1) c("l") else c("l","w"),
Ynodes="y",
Qform=qform,
gform=gform1,
abar=list(c(1,1),c(0,0)),
SL.library = SLlib2,
estimate.time = FALSE)})
SLtmleimpsumjomo <- lapply(SLtmleimpjomo, function (x) {summary(x)})
SLtmleimpcojomo <- matrix(unlist(lapply(SLtmleimpsumjomo, function (x) {x$effect.measures$ATE$estimate})),nrow=nimpute,ncol=1)
SLtmleimpsejomo <- matrix(unlist(lapply(SLtmleimpsumjomo, function (x) {x$effect.measures$ATE$std.dev})),nrow=nimpute,ncol=1)
SLtmlejomo <- matrix(unlist(mi.meld(q=SLtmleimpcojomo, se=SLtmleimpsejomo)),nrow=1,ncol=2)
# Combine results into vector
c(coef(summary(GLGLMc))[2,1]+coef(summary(GLGLMc))[3,1],sqrt(vcovCR(GLGLMc, cluster=data$id, type = "CR3")[2,2] + vcovCR(GLGLMc, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(GLGLMc, cluster=data$id, type = "CR3")[2,3]),
coef(summary(GLRIc))[2,1]+coef(summary(GLRIc))[3,1],sqrt(vcov(GLRIc)[2,2] + vcov(GLRIc)[3,3] + 2*vcov(GLRIc)[2,3]),
coef(summary(GLGEEc))[2,1]+coef(summary(GLGEEc))[3,1],sqrt(summary(GLGEEc)$cov.scaled[2,2] + summary(GLGEEc)$cov.scaled[3,3] + 2*summary(GLGEEc)$cov.scaled[2,3]),
coef(summary(GLiptcw))[2,1]+coef(summary(GLiptcw))[3,1],sqrt(vcovCR(GLiptcw, cluster=data$id, type = "CR3")[2,2] + vcovCR(GLiptcw, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(GLiptcw, cluster=data$id, type = "CR3")[2,3]),
coef(summary(GLdriptcw))[2,1]+coef(summary(GLdriptcw))[3,1],sqrt(vcovCR(GLdriptcw, cluster=data$id, type = "CR3")[2,2] + vcovCR(GLdriptcw, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(GLdriptcw, cluster=data$id, type = "CR3")[2,3]),
summary(GLtmlec)$effect.measures$ATE$estimate,summary(GLtmlec)$effect.measures$ATE$std.dev,
coef(summary(SLGLMc))[2,1]+coef(summary(SLGLMc))[3,1],sqrt(vcovCR(SLGLMc, cluster=data$id, type = "CR3")[2,2] + vcovCR(SLGLMc, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(SLGLMc, cluster=data$id, type = "CR3")[2,3]),
coef(summary(SLRIc))[2,1]+coef(summary(SLRIc))[3,1],sqrt(vcov(SLRIc)[2,2] + vcov(SLRIc)[3,3] + 2*vcov(SLRIc)[2,3]),
coef(summary(SLGEEc))[2,1]+coef(summary(SLGEEc))[3,1],sqrt(summary(SLGEEc)$cov.scaled[2,2] + summary(SLGEEc)$cov.scaled[3,3] + 2*summary(SLGEEc)$cov.scaled[2,3]),
coef(summary(SLiptcw))[2,1]+coef(summary(SLiptcw))[3,1],sqrt(vcovCR(SLiptcw, cluster=data$id, type = "CR3")[2,2] + vcovCR(SLiptcw, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(SLiptcw, cluster=data$id, type = "CR3")[2,3]),
coef(summary(SLdriptcw))[2,1]+coef(summary(SLdriptcw))[3,1],sqrt(vcovCR(SLdriptcw, cluster=data$id, type = "CR3")[2,2] + vcovCR(SLdriptcw, cluster=data$id, type = "CR3")[3,3] + 2*vcovCR(SLdriptcw, cluster=data$id, type = "CR3")[2,3]),
summary(SLtmlec)$effect.measures$ATE$estimate,summary(SLtmlec)$effect.measures$ATE$std.dev,
GLMfcs[1],GLMfcs[2],
RIfcs[1],RIfcs[2],
GEEfcs[1],GEEfcs[2],
GLiptwfcs[1],GLiptwfcs[2],
GLdriptwfcs[1],GLdriptwfcs[2],
GLtmlefcs[1],GLtmlefcs[2],
SLiptwfcs[1],SLiptwfcs[2],
SLdriptwfcs[1],SLdriptwfcs[2],
SLtmlefcs[1],SLtmlefcs[2],
GLMmvn[1],GLMmvn[2],
RImvn[1],RImvn[2],
GEEmvn[1],GEEmvn[2],
GLiptwmvn[1],GLiptwmvn[2],
GLdriptwmvn[1],GLdriptwmvn[2],
GLtmlemvn[1],GLtmlemvn[2],
SLiptwmvn[1],SLiptwmvn[2],
SLdriptwmvn[1],SLdriptwmvn[2],
SLtmlemvn[1],SLtmlemvn[2],
GLMjomo[1],GLMjomo[2],
RIjomo[1],RIjomo[2],
GEEjomo[1],GEEjomo[2],
GLiptwjomo[1],GLiptwjomo[2],
GLdriptwjomo[1],GLdriptwjomo[2],
GLtmlejomo[1],GLtmlejomo[2],
SLiptwjomo[1],SLiptwjomo[2],
SLdriptwjomo[1],SLdriptwjomo[2],
SLtmlejomo[1],SLtmlejomo[2])
}
col.names1 <- c("GLMjtco","GLMjtse","RIjtco","RIjtse","GEEjtco","GEEjtse",
"GLMiptwjtco","GLMiptwjtse","SLiptwjtco","SLiptwjtse",
"GLMdriptwjtco","GLMdriptwjtse","SLdriptwjtco","SLdriptwjtse",
"GLMtmlejtco","GLMtmlejtse","SLtmlejtco","SLtmlejtse")
col.names2 <- c("GLMjtco","GLMjtse","RIjtco","RIjtse","GEEjtco","GEEjtse",
"GLMiptwjtco","GLMiptwjtse","SLiptwjtco","SLiptwjtse",
"GLMdriptwjtco","GLMdriptwjtse","SLdriptwjtco","SLdriptwjtse",
"GLMtmlejtco","GLMtmlejtse","SLtmlejtco","SLtmlejtse",
"GLMIPC_GLMjtco","GLMIPC_GLMjtse","GLMIPC_RIjtco","GLMIPC_RIjtse","GLMIPC_GEEjtco","GLMIPC_GEEjtse",
"GLMIPC_GLMiptwjtco","GLMIPC_GLMiptwjtse","GLMIPC_GLMdriptwjtco","GLMIPC_GLMdriptwjtse","GLMIPC_GLMtmlejtco","GLMIPC_GLMtmlejtse",
"SLIPC_GLMjtco","SLIPC_GLMjtse","SLIPC_RIjtco","SLIPC_RIjtse","SLIPC_GEEjtco","SLIPC_GEEjtse",
"SLIPC_SLiptwjtco","SLIPC_SLiptwjtse","SLIPC_SLdriptwjtco","SLIPC_SLdriptwjtse","SLIPC_SLtmlejtco","SLIPC_SLtmlejtse",
"FCS_GLMjtco","FCS_GLMjtse","FCS_RIjtco","FCS_RIjtse","FCS_GEEjtco","FCS_GEEjtse",
"FCS_GLMiptwjtco","FCS_GLMiptwjtse","FCS_SLiptwjtco","FCS_SLiptwjtse",
"FCS_GLMdriptwjtco","FCS_GLMdriptwjtse","FCS_SLdriptwjtco","FCS_SLdriptwjtse",
"FCS_GLMtmlejtco","FCS_GLMtmlejtse","FCS_SLtmlejtco","FCS_SLtmlejtse",
"MVN_GLMjtco","MVN_GLMjtse","MVN_RIjtco","MVN_RIjtse","MVN_GEEjtco","MVN_GEEjtse",
"MVN_GLMiptwjtco","MVN_GLMiptwjtse","MVN_SLiptwjtco","MVN_SLiptwjtse",
"MVN_GLMdriptwjtco","MVN_GLMdriptwjtse","MVN_SLdriptwjtco","MVN_SLdriptwjtse",
"MVN_GLMtmlejtco","MVN_GLMtmlejtse","MVN_SLtmlejtco","MVN_SLtmlejtse",
"JOMO_GLMjtco","JOMO_GLMjtse","JOMO_RIjtco","JOMO_RIjtse","JOMO_GEEjtco","JOMO_GEEjtse",
"JOMO_GLMiptwjtco","JOMO_GLMiptwjtse","JOMO_SLiptwjtco","JOMO_SLiptwjtse",
"JOMO_GLMdriptwjtco","JOMO_GLMdriptwjtse","JOMO_SLdriptwjtco","JOMO_SLdriptwjtse",
"JOMO_GLMtmlejtco","JOMO_GLMtmlejtse","JOMO_SLtmlejtco","JOMO_SLtmlejtse")
# ############################
# ## Scenario 1 ##
# ############################
#
# # Complete data - no missing data
# datacomp1 <- lapply(paste0(filepath1,list.files(path=filepath1))[start:stop],function (x) {
# data <- data.frame(read_dta(x))
# data$id <- factor(data$id)
# data <- data.frame(data[,c(-11,-12,-13,-14,-15,-16)])
# data
# })
# rescomp1 <- matrix(unlist(lapply(datacomp1, function (x) {simcomp(x)})),nrow=n,ncol=18,byrow=TRUE)
# colnames(rescomp1) <- col.names1
# write_dta(data.frame(rescomp1),path=paste0(paste0(paste0(args[3],"Results/S1comp-"),start),".dta"))
# rm(datacomp1)
#
# # 25% Missing data scenario
# # MCAR - only complete case analysis run (because it is unbiased)
# datamcara1 <- lapply(paste0(filepath1,list.files(path=filepath1))[start:stop],function (x) {
# data <- data.frame(read_dta(x))
# data$id <- factor(data$id)
# data$y <- ifelse(data$missmcarmod==1,NA,data$y)
# data <- data.frame(cbind(data[,1:9],c=data[,"missmcarmod"],y=data[,"y"]))
# data
# })
# resmcara1 <- matrix(unlist(lapply(datamcara1, function (x) {simcomp(x[,!(names(x) %in% "c")])})),nrow=n,ncol=18,byrow=TRUE)
# colnames(resmcara1) <- col.names1
# write_dta(data.frame(resmcara1),path=paste0(paste0(paste0(args[3],"Results/S1mod-mcar-"),start),".dta"))
# rm(datamcara1)
#
# # MAR A - Missingness only related to L
# datamara1 <- lapply(paste0(filepath1,list.files(path=filepath1))[start:stop],function (x) {
# data <- data.frame(read_dta(x))
# data$id <- factor(data$id)
# data$y <- ifelse(data$missmarmod1==1,NA,data$y)
# data <- data.frame(cbind(data[,1:9],c=data[,"missmarmod1"],y=data[,"y"]))
# data
# })
# resmara1a <- matrix(unlist(lapply(datamara1, function (x) {simcomp(x[,!(names(x) %in% "c")])})),nrow=n,ncol=18,byrow=TRUE)
# resmara1b <- matrix(unlist(lapply(datamara1, function (x) {simmiss(x,nimpute=nimp,scenario=1)})),nrow=n,ncol=78,byrow=TRUE)
# resmara1 <- cbind(resmara1a,resmara1b)
# colnames(resmara1) <- col.names2
# write_dta(data.frame(resmara1),path=paste0(paste0(paste0(args[3],"Results/S1mod-mar1-"),start),".dta"))
# rm(datamara1)
#
# # MAR B - Missingness related to L and A
# datamarb1 <- lapply(paste0(filepath1,list.files(path=filepath1))[start:stop],function (x) {
# data <- data.frame(read_dta(x))
# data$id <- factor(data$id)
# data$y <- ifelse(data$missmarmod2==1,NA,data$y)
# data <- data.frame(cbind(data[,1:9],c=data[,"missmarmod2"],y=data[,"y"]))
# data
# })
# resmarb1a <- matrix(unlist(lapply(datamarb1, function (x) {simcomp(x[,!(names(x) %in% "c")])})),nrow=n,ncol=18,byrow=TRUE)
# resmarb1b <- matrix(unlist(lapply(datamarb1, function (x) {simmiss(x,nimpute=nimp,scenario=1)})),nrow=n,ncol=78,byrow=TRUE)
# resmarb1 <- cbind(resmarb1a,resmarb1b)
# colnames(resmarb1) <- col.names2
# write_dta(data.frame(resmarb1),path=paste0(paste0(paste0(args[3],"Results/S1mod-mar2-"),start),".dta"))
# rm(datamarb1)
#
# # 50% Missing data scenario
# # MCAR - only complete case analysis run (because it is unbiased)
# datamcara2 <- lapply(paste0(filepath1,list.files(path=filepath1))[start:stop],function (x) {
# data <- data.frame(read_dta(x))
# data$id <- factor(data$id)
# data$y <- ifelse(data$missmcarsev==1,NA,data$y)
# data <- data.frame(cbind(data[,1:9],c=data[,"missmcarsev"],y=data[,"y"]))
# data
# })
# resmcara2 <- matrix(unlist(lapply(datamcara2, function (x) {simcomp(x[,!(names(x) %in% "c")])})),nrow=n,ncol=18,byrow=TRUE)
# colnames(resmcara2) <- col.names1
# write_dta(data.frame(resmcara2),path=paste0(paste0(paste0(args[3],"Results/S1sev-mcar-"),start),".dta"))
# rm(datamcara2)
#
# # MAR A - Missingness only related to L
# datamara2 <- lapply(paste0(filepath1,list.files(path=filepath1))[start:stop],function (x) {
# data <- data.frame(read_dta(x))
# data$id <- factor(data$id)
# data$y <- ifelse(data$missmarsev1==1,NA,data$y)
# data <- data.frame(cbind(data[,1:9],c=data[,"missmarsev1"],y=data[,"y"]))
# data
# })
# resmara2a <- matrix(unlist(lapply(datamara2, function (x) {simcomp(x[,!(names(x) %in% "c")])})),nrow=n,ncol=18,byrow=TRUE)
# resmara2b <- matrix(unlist(lapply(datamara2, function (x) {simmiss(x,nimpute=nimp,scenario=1)})),nrow=n,ncol=78,byrow=TRUE)
# resmara2 <- cbind(resmara2a,resmara2b)
# colnames(resmara2) <- col.names2
# write_dta(data.frame(resmara2),path=paste0(paste0(paste0(args[3],"Results/S1sev-mar1-"),start),".dta"))
# rm(datamara2)
#
# # MAR B - Missingness related to L and A
# datamarb2 <- lapply(paste0(filepath1,list.files(path=filepath1))[start:stop],function (x) {
# data <- data.frame(read_dta(x))
# data$id <- factor(data$id)
# data$y <- ifelse(data$missmarsev2==1,NA,data$y)
# data <- data.frame(cbind(data[,1:9],c=data[,"missmarsev2"],y=data[,"y"]))
# data
# })
# resmarb2a <- matrix(unlist(lapply(datamarb2, function (x) {simcomp(x[,!(names(x) %in% "c")])})),nrow=n,ncol=18,byrow=TRUE)
# resmarb2b <- matrix(unlist(lapply(datamarb2, function (x) {simmiss(x,nimpute=nimp,scenario=1)})),nrow=n,ncol=78,byrow=TRUE)
# resmarb2 <- cbind(resmarb2a,resmarb2b)
# colnames(resmarb2) <- col.names2
# write_dta(data.frame(resmarb2),path=paste0(paste0(paste0(args[3],"Results/S1sev-mar2-"),start),".dta"))
# rm(datamarb2)
############################
## Scenario 2 ##
############################
# Complete data - no missing data
datacomp2 <- lapply(paste0(filepath2,list.files(path=filepath2))[start:stop],function (x) {
data <- data.frame(read_dta(x))
data$id <- factor(data$id)
data <- data.frame(data[,c(-13,-14,-15,-16,-17,-18)])
data
})
rescomp2 <- matrix(unlist(lapply(datacomp2, function (x) {simcomp(x)})),nrow=n,ncol=18,byrow=TRUE)
colnames(rescomp2) <- col.names1
write_dta(data.frame(rescomp2),path=paste0(paste0(paste0(args[3],"Results/S2comp-"),start),".dta"))
rm(datacomp2)
# 25% Missing data scenario
# MCAR - only complete case analysis run (because it is unbiased)
datamcarb1 <- lapply(paste0(filepath2,list.files(path=filepath2))[start:stop],function (x) {
data <- data.frame(read_dta(x))
data$id <- factor(data$id)
data$y <- ifelse(data$missmcarmod==1,NA,data$y)
data <- data.frame(cbind(data[,1:11],c=data[,"missmcarmod"],y=data[,"y"]))
data
})
resmcarb1 <- matrix(unlist(lapply(datamcarb1, function (x) {simcomp(x[,!(names(x) %in% "c")])})),nrow=n,ncol=18,byrow=TRUE)
colnames(resmcarb1) <- col.names1
write_dta(data.frame(resmcarb1),path=paste0(paste0(paste0(args[3],"Results/S2mod-mcar-"),start),".dta"))
rm(datamcarb1)
# MAR C - Missingness only related to W
datamarc1 <- lapply(paste0(filepath2,list.files(path=filepath2))[start:stop],function (x) {
data <- data.frame(read_dta(x))
data$id <- factor(data$id)
data$y <- ifelse(data$missmarmod1==1,NA,data$y)
data <- data.frame(cbind(data[,1:11],c=data[,"missmarmod1"],y=data[,"y"]))
data
})
resmarc1a <- matrix(unlist(lapply(datamarc1, function (x) {simcomp(x[,!(names(x) %in% "c")])})),nrow=n,ncol=18,byrow=TRUE)
resmarc1b <- matrix(unlist(lapply(datamarc1, function (x) {simmiss(x,nimpute=nimp,scenario=2)})),nrow=n,ncol=78,byrow=TRUE)
resmarc1 <- cbind(resmarc1a,resmarc1b)
colnames(resmarc1) <- col.names2
write_dta(data.frame(resmarc1),path=paste0(paste0(paste0(args[3],"Results/S2mod-mar1-"),start),".dta"))
rm(datamarc1)
# MAR D - Missingness related to W and A
datamard1 <- lapply(paste0(filepath2,list.files(path=filepath2))[start:stop],function (x) {
data <- data.frame(read_dta(x))
data$id <- factor(data$id)
data$y <- ifelse(data$missmarmod2==1,NA,data$y)
data <- data.frame(cbind(data[,1:11],c=data[,"missmarmod2"],y=data[,"y"]))
data
})
resmard1a <- matrix(unlist(lapply(datamard1, function (x) {simcomp(x[,!(names(x) %in% "c")])})),nrow=n,ncol=18,byrow=TRUE)
resmard1b <- matrix(unlist(lapply(datamard1, function (x) {simmiss(x,nimpute=nimp,scenario=2)})),nrow=n,ncol=78,byrow=TRUE)
resmard1 <- cbind(resmard1a,resmard1b)
colnames(resmard1) <- col.names2
write_dta(data.frame(resmard1),path=paste0(paste0(paste0(args[3],"Results/S2mod-mar2-"),start),".dta"))
rm(datamard1)
# 50% Missing data scenario
# MCAR - only complete case analysis run (because it is unbiased)
datamcarb2 <- lapply(paste0(filepath2,list.files(path=filepath2))[start:stop],function (x) {
data <- data.frame(read_dta(x))
data$id <- factor(data$id)
data$y <- ifelse(data$missmcarsev==1,NA,data$y)
data <- data.frame(cbind(data[,1:11],c=data[,"missmcarsev"],y=data[,"y"]))
data
})
resmcarb2 <- matrix(unlist(lapply(datamcarb2, function (x) {simcomp(x[,!(names(x) %in% "c")])})),nrow=n,ncol=18,byrow=TRUE)
colnames(resmcarb2) <- col.names1
write_dta(data.frame(resmcarb2),path=paste0(paste0(paste0(args[3],"Results/S2sev-mcar-"),start),".dta"))
rm(datamcarb2)
# MAR C - Missingness only related to W
datamarc2 <- lapply(paste0(filepath2,list.files(path=filepath2))[start:stop],function (x) {
data <- data.frame(read_dta(x))
data$id <- factor(data$id)
data$y <- ifelse(data$missmarsev1==1,NA,data$y)
data <- data.frame(cbind(data[,1:11],c=data[,"missmarsev1"],y=data[,"y"]))
data
})
resmarc2a <- matrix(unlist(lapply(datamarc2, function (x) {simcomp(x[,!(names(x) %in% "c")])})),nrow=n,ncol=18,byrow=TRUE)
resmarc2b <- matrix(unlist(lapply(datamarc2, function (x) {simmiss(x,nimpute=nimp,scenario=2)})),nrow=n,ncol=78,byrow=TRUE)
resmarc2 <- cbind(resmarc2a,resmarc2b)
colnames(resmarc2) <- col.names2
write_dta(data.frame(resmarc2),path=paste0(paste0(paste0(args[3],"Results/S2sev-mar1-"),start),".dta"))
rm(datamarc2)
# MAR D - Missingness related to W and A
datamard2 <- lapply(paste0(filepath2,list.files(path=filepath2))[start:stop],function (x) {
data <- data.frame(read_dta(x))
data$id <- factor(data$id)
data$y <- ifelse(data$missmarsev2==1,NA,data$y)
data <- data.frame(cbind(data[,1:11],c=data[,"missmarsev2"],y=data[,"y"]))
data
})
resmard2a <- matrix(unlist(lapply(datamard2, function (x) {simcomp(x[,!(names(x) %in% "c")])})),nrow=n,ncol=18,byrow=TRUE)
resmard2b <- matrix(unlist(lapply(datamard2, function (x) {simmiss(x,nimpute=nimp,scenario=2)})),nrow=n,ncol=78,byrow=TRUE)
resmard2 <- cbind(resmard2a,resmard2b)
colnames(resmard2) <- col.names2
write_dta(data.frame(resmard2),path=paste0(paste0(paste0(args[3],"Results/S2sev-mar2-"),start),".dta"))
rm(datamard2)
############################
## Scenario 3 ##
############################
# Complete data - no missing data
datacomp3 <- lapply(paste0(filepath3,list.files(path=filepath3))[start:stop],function (x) {
data <- data.frame(read_dta(x))
data$id <- factor(data$id)
data <- data.frame(data[,c(-13,-14,-15,-16,-17,-18)])
data
})
rescomp3 <- matrix(unlist(lapply(datacomp3, function (x) {simcomp(x)})),nrow=n,ncol=18,byrow=TRUE)
colnames(rescomp3) <- col.names1
write_dta(data.frame(rescomp3),path=paste0(paste0(paste0(args[3],"Results/S3comp-"),start),".dta"))
rm(datacomp3)
# 25% Missing data scenario
# MCAR - only complete case analysis run (because it is unbiased)
datamcarc1 <- lapply(paste0(filepath3,list.files(path=filepath3))[start:stop],function (x) {
data <- data.frame(read_dta(x))
data$id <- factor(data$id)
data$y <- ifelse(data$missmcarmod==1,NA,data$y)
data <- data.frame(cbind(data[,1:11],c=data[,"missmcarmod"],y=data[,"y"]))
data
})
resmcarc1 <- matrix(unlist(lapply(datamcarc1, function (x) {simcomp(x[,!(names(x) %in% "c")])})),nrow=n,ncol=18,byrow=TRUE)
colnames(resmcarc1) <- col.names1
write_dta(data.frame(resmcarc1),path=paste0(paste0(paste0(args[3],"Results/S3mod-mcar-"),start),".dta"))
rm(datamcarc1)
# MAR E - Missingness only related to W
datamare1 <- lapply(paste0(filepath3,list.files(path=filepath3))[start:stop],function (x) {
data <- data.frame(read_dta(x))
data$id <- factor(data$id)
data$y <- ifelse(data$missmarmod1==1,NA,data$y)
data <- data.frame(cbind(data[,1:11],c=data[,"missmarmod1"],y=data[,"y"]))
data
})
resmare1a <- matrix(unlist(lapply(datamare1, function (x) {simcomp(x[,!(names(x) %in% "c")])})),nrow=n,ncol=18,byrow=TRUE)
resmare1b <- matrix(unlist(lapply(datamare1, function (x) {simmiss(x,nimpute=nimp,scenario=2)})),nrow=n,ncol=78,byrow=TRUE)
resmare1 <- cbind(resmare1a,resmare1b)
colnames(resmare1) <- col.names2
write_dta(data.frame(resmare1),path=paste0(paste0(paste0(args[3],"Results/S3mod-mar1-"),start),".dta"))
rm(datamare1)
# MAR F - Missingness related to W and A
datamarf1 <- lapply(paste0(filepath3,list.files(path=filepath3))[start:stop],function (x) {
data <- data.frame(read_dta(x))
data$id <- factor(data$id)
data$y <- ifelse(data$missmarmod2==1,NA,data$y)
data <- data.frame(cbind(data[,1:11],c=data[,"missmarmod2"],y=data[,"y"]))
data
})
resmarf1a <- matrix(unlist(lapply(datamarf1, function (x) {simcomp(x[,!(names(x) %in% "c")])})),nrow=n,ncol=18,byrow=TRUE)
resmarf1b <- matrix(unlist(lapply(datamarf1, function (x) {simmiss(x,nimpute=nimp,scenario=2)})),nrow=n,ncol=78,byrow=TRUE)
resmarf1 <- cbind(resmarf1a,resmarf1b)
colnames(resmarf1) <- col.names2
write_dta(data.frame(resmarf1),path=paste0(paste0(paste0(args[3],"Results/S3mod-mar2-"),start),".dta"))
rm(datamarf1)
# 50% Missing data scenario
# MCAR - only complete case analysis run (because it is unbiased)
datamcarc2 <- lapply(paste0(filepath3,list.files(path=filepath3))[start:stop],function (x) {
data <- data.frame(read_dta(x))
data$id <- factor(data$id)
data$y <- ifelse(data$missmcarsev==1,NA,data$y)
data <- data.frame(cbind(data[,1:11],c=data[,"missmcarsev"],y=data[,"y"]))
data
})
resmcarc2 <- matrix(unlist(lapply(datamcarc2, function (x) {simcomp(x[,!(names(x) %in% "c")])})),nrow=n,ncol=18,byrow=TRUE)
colnames(resmcarc2) <- col.names1
write_dta(data.frame(resmcarc2),path=paste0(paste0(paste0(args[3],"Results/S3sev-mcar-"),start),".dta"))
rm(datamcarc2)
# MAR E - Missingness only related to W
datamare2 <- lapply(paste0(filepath3,list.files(path=filepath3))[start:stop],function (x) {
data <- data.frame(read_dta(x))
data$id <- factor(data$id)
data$y <- ifelse(data$missmarsev1==1,NA,data$y)
data <- data.frame(cbind(data[,1:11],c=data[,"missmarsev1"],y=data[,"y"]))
data
})
resmare2a <- matrix(unlist(lapply(datamare2, function (x) {simcomp(x[,!(names(x) %in% "c")])})),nrow=n,ncol=18,byrow=TRUE)
resmare2b <- matrix(unlist(lapply(datamare2, function (x) {simmiss(x,nimpute=nimp,scenario=2)})),nrow=n,ncol=78,byrow=TRUE)
resmare2 <- cbind(resmare2a,resmare2b)
colnames(resmare2) <- col.names2
write_dta(data.frame(resmare2),path=paste0(paste0(paste0(args[3],"Results/S3sev-mar1-"),start),".dta"))
rm(datamare2)
# MAR F - Missingness related to W and A
datamarf2 <- lapply(paste0(filepath3,list.files(path=filepath3))[start:stop],function (x) {
data <- data.frame(read_dta(x))
data$id <- factor(data$id)
data$y <- ifelse(data$missmarsev2==1,NA,data$y)
data <- data.frame(cbind(data[,1:11],c=data[,"missmarsev2"],y=data[,"y"]))
data
})
resmarf2a <- matrix(unlist(lapply(datamarf2, function (x) {simcomp(x[,!(names(x) %in% "c")])})),nrow=n,ncol=18,byrow=TRUE)
resmarf2b <- matrix(unlist(lapply(datamarf2, function (x) {simmiss(x,nimpute=nimp,scenario=2)})),nrow=n,ncol=78,byrow=TRUE)
resmarf2 <- cbind(resmarf2a,resmarf2b)
colnames(resmarf2) <- col.names2
write_dta(data.frame(resmarf2),path=paste0(paste0(paste0(args[3],"Results/S3sev-mar2-"),start),".dta"))
rm(datamarf2)
|
fac4764d73f6026b1a7311d5deabbe43a54e2119
|
fe2b7a0831171064b569860b2a605e19300bbba5
|
/00_learn/r/assignment1/corr.R
|
4340dc2cace3fd611db6241ec8db6aeb699ecc16
|
[] |
no_license
|
calimaborges-drafts/the-archive
|
2a47bda99a1abc48c837f15ee2dcb071f838a5b4
|
37195d243e30a0cd5236445d2bf440e44bd448b2
|
refs/heads/master
| 2021-09-15T11:29:51.479772
| 2018-05-31T13:40:31
| 2018-05-31T13:40:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,218
|
r
|
corr.R
|
source("readfiles.R")
source("complete.R")
corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
working_data <- complete(directory)
working_data <- subset(working_data, nobs > threshold)
if (nrow(working_data) == 0) return(numeric())
files <- readfiles(directory, working_data$id)
tables <- lapply(files, read.csv)
corr <- c()
for (table in tables) {
table <- subset(table, !is.na(table["sulfate"]))
table <- subset(table, !is.na(table["nitrate"]))
corr <- c(corr, cor(table$nitrate, table$sulfate))
}
corr
#print(cor(tables[1,]$sulfate, tables[1,]$nitrate))
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
## NOTE: Do not round the result!
}
|
62b232c4c29cc45fa072d67ef91736ae13ced215
|
6de2eabe819ca2ba9596a22fa94a90be3e8dccd6
|
/tests/testthat/test-msn.R
|
371b413c8b44e23c0700519c4d203007adae0385
|
[] |
no_license
|
knausb/poppr
|
ba6f2a2f199e5ba26654bf9712737a0acd058b7d
|
e1c4cff46083a6735ee43b9b4f713db0e0fb058b
|
refs/heads/master
| 2020-12-28T23:16:42.816458
| 2016-10-07T18:04:23
| 2016-10-07T18:04:30
| 70,727,479
| 1
| 0
| null | 2016-10-12T18:08:57
| 2016-10-12T18:08:57
| null |
UTF-8
|
R
| false
| false
| 13,899
|
r
|
test-msn.R
|
options(warn = -1)
ucl <- function(x){
unclass(x$graph)[-10]
}
set.seed(9005)
gend <- new("genind"
, tab = structure(c(1L, 1L, 0L, 0L,
1L, 1L, 2L, 2L,
0L, 1L, 1L, 0L,
2L, 1L, 1L, 2L),
.Dim = c(4L, 4L),
.Dimnames = list(c("1", "2", "3", "4"),
c("loc-1.1", "loc-1.2", "loc-2.1", "loc-2.2")))
, loc.names = structure(c("loc-1", "loc-2"), .Names = c("loc-1", "loc-2"))
, loc.fac = structure(c(1L, 1L, 2L, 2L), .Label = c("loc-1", "loc-2"), class = "factor")
, loc.nall = structure(c(2L, 2L), .Names = c("loc-1", "loc-2"))
, all.names = structure(list(`loc-1` = structure(c("3", "4"), .Names = c("1", "2"
)), `loc-2` = structure(c("3", "4"), .Names = c("1", "2"))), .Names = c("loc-1",
"loc-2"))
, call = NULL
, ind.names = structure(c("", "", "", ""), .Names = c("1", "2", "3", "4"))
, pop = structure(1:4, .Label = c("1", "2", "3", "4"), class = "factor")
, pop.names = structure(c("1", "2", "3", "4"))
, ploidy = 2L
, type = "codom"
, other = NULL
)
gend_gc <- as.genclone(gend)
gend_bruvo <- bruvo.dist(gend, replen = c(1, 1))
set.seed(9005)
gend_single <- gend
pop(gend_single) <- rep(1, 4)
no_ties <- structure(list(graph = structure(list(4, FALSE, c(3, 2, 3), c(0,
1, 2), c(1, 0, 2), c(0, 1, 2), c(0, 0, 0, 1, 3), c(0, 1, 2, 3,
3), list(c(1, 0, 1), structure(list(), .Names = character(0)),
structure(list(name = c("1", "2", "3", "4"), size = c(1L,
1L, 1L, 1L), shape = c("pie", "pie", "pie", "pie"), pie = list(
structure(1L, .Names = "1"), structure(1L, .Names = "2"),
structure(1L, .Names = "3"), structure(1L, .Names = "4")),
pie.color = list(structure("#4C00FFFF", .Names = "1"), structure("#00E5FFFF", .Names = "2"), structure("#00FF4DFF", .Names = "3"),
structure("#FFFF00FF", .Names = "4")), label = c("MLG.3", "MLG.4", "MLG.2",
"MLG.1")), .Names = c("name", "size", "shape", "pie",
"pie.color", "label")), structure(list(weight = c(0.125,
0.125, 0.125), color = c("#434343", "#434343", "#434343"),
width = c(8, 8, 8)), .Names = c("weight", "color", "width"
)))), class = "igraph"), populations = structure(c("1", "2",
"3", "4")), colors = structure(c("#4C00FFFF",
"#00E5FFFF", "#00FF4DFF", "#FFFF00FF"), .Names = c("1","2","3","4"))), .Names = c("graph",
"populations", "colors"))
no_ties_single <- structure(list(graph = structure(list(4, FALSE, c(3, 2, 3), c(0,
1, 2), c(1, 0, 2), c(0, 1, 2), c(0, 0, 0, 1, 3), c(0, 1, 2, 3,
3), list(c(1, 0, 1), structure(list(), .Names = character(0)),
structure(list(name = c("1", "2", "3", "4"), size = c(1L,
1L, 1L, 1L), color = c("#4C00FFFF", "#4C00FFFF", "#4C00FFFF",
"#4C00FFFF"), label = c("MLG.3", "MLG.4", "MLG.2", "MLG.1"
)), .Names = c("name", "size", "color", "label")), structure(list(
weight = c(0.125, 0.125, 0.125), color = c("#434343",
"#434343", "#434343"), width = c(8, 8, 8)), .Names = c("weight",
"color", "width")))), class = "igraph"), populations = "1",
colors = "#4C00FFFF"), .Names = c("graph", "populations",
"colors"))
ties <- structure(list(graph = structure(list(4, FALSE, c(3, 2, 3, 1),
c(0, 1, 2, 0), c(3, 1, 0, 2), c(3, 0, 1, 2), c(0, 0, 1, 2,
4), c(0, 2, 3, 4, 4), list(c(1, 0, 1), structure(list(), .Names = character(0)),
structure(list(name = c("1", "2", "3", "4"), size = c(1L,
1L, 1L, 1L), shape = c("pie", "pie", "pie", "pie"), pie = list(
structure(1L, .Names = "1"), structure(1L, .Names = "2"),
structure(1L, .Names = "3"), structure(1L, .Names = "4")),
pie.color = list(structure("#4C00FFFF", .Names = "1"), structure("#00E5FFFF", .Names = "2"),
structure("#00FF4DFF", .Names = "3"), structure("#FFFF00FF", .Names = "4")),
label = c("MLG.3", "MLG.4", "MLG.2",
"MLG.1")), .Names = c("name", "size", "shape", "pie",
"pie.color", "label")), structure(list(weight = c(0.125,
0.125, 0.125, 0.125), color = c("#434343", "#434343",
"#434343", "#434343"), width = c(8, 8, 8, 8)), .Names = c("weight",
"color", "width")))), class = "igraph"), populations = structure(c("1",
"2", "3", "4")), colors = structure(c("#4C00FFFF",
"#00E5FFFF", "#00FF4DFF", "#FFFF00FF"), .Names = c("1","2","3","4"))), .Names = c("graph",
"populations", "colors"))
ties_single <- structure(list(graph = structure(list(4, FALSE, c(3, 2, 3, 1),
c(0, 1, 2, 0), c(3, 1, 0, 2), c(3, 0, 1, 2), c(0, 0, 1, 2,
4), c(0, 2, 3, 4, 4), list(c(1, 0, 1), structure(list(), .Names = character(0)),
structure(list(name = c("1", "2", "3", "4"), size = c(1L,
1L, 1L, 1L), color = c("#4C00FFFF", "#4C00FFFF", "#4C00FFFF",
"#4C00FFFF"), label = c("MLG.3", "MLG.4", "MLG.2", "MLG.1"
)), .Names = c("name", "size", "color", "label")), structure(list(
weight = c(0.125, 0.125, 0.125, 0.125), color = c("#434343",
"#434343", "#434343", "#434343"), width = c(8, 8,
8, 8)), .Names = c("weight", "color", "width")))), class = "igraph"),
populations = "1", colors = "#4C00FFFF"), .Names = c("graph",
"populations", "colors"))
if (packageVersion("igraph") >= package_version("1.0.0")){
no_ties$graph <- igraph::upgrade_graph(no_ties$graph)
no_ties_single$graph <- igraph::upgrade_graph(no_ties_single$graph)
ties$graph <- igraph::upgrade_graph(ties$graph)
ties_single$graph <- igraph::upgrade_graph(ties_single$graph)
}
options(warn = 0)
context("Tied MSN edge tests")
test_that("bruvo.msn can properly account for tied edges", {
# Test Bruvo.msn
set.seed(9005)
expect_equal(ucl(bruvo.msn(gend, replen=c(1,1))), ucl(no_ties))
set.seed(9005)
expect_equal(ucl(bruvo.msn(gend, replen=c(1,1), include.ties = TRUE)), ucl(ties))
})
test_that("poppr.msn can properly account for tied edges", {
# Test poppr.msn
set.seed(9005)
expect_equal(ucl(poppr.msn(gend, distmat=bruvo.dist(gend,replen=c(1,1)))), ucl(no_ties))
set.seed(9005)
expect_equal(ucl(poppr.msn(gend, distmat=bruvo.dist(gend,replen=c(1,1)), include.ties = TRUE)), ucl(ties))
})
test_that("bruvo.msn can work with single populations", {
# Test both for single populations sets
set.seed(9005)
expect_equal(ucl(bruvo.msn(gend_single, replen=c(1,1))), ucl(no_ties_single))
set.seed(9005)
expect_equal(ucl(bruvo.msn(gend_single, replen=c(1,1), include.ties = TRUE)), ucl(ties_single))
})
test_that("poppr.msn can work with single populations", {
gs.bruvo <- bruvo.dist(gend_single,replen=c(1,1))
set.seed(9005)
expect_equal(ucl(poppr.msn(gend_single, distmat = gs.bruvo)), ucl(no_ties_single))
set.seed(9005)
expect_equal(ucl(poppr.msn(gend_single, distmat = gs.bruvo, include.ties = TRUE)), ucl(ties_single))
})
context("MSN and collapsed MLG tests")
gmsnt <- bruvo.msn(gend, replen = c(1, 1), threshold = 0.15)
test_that("Minimum spanning networks also collapse MLGs", {
skip_on_cran()
gend <- as.genclone(gend)
gend_single <- as.genclone(gend_single)
# Adding the filter for testing
mlg.filter(gend, dist = bruvo.dist, replen = c(1, 1)) <- 0.15
mll(gend) <- "original"
expect_equal(igraph::vcount(gmsnt$graph), 2)
pgmsnt <- poppr.msn(gend, distmat = gend_bruvo, threshold = 0.15)
mll(gend) <- "contracted"
gmsnot <- bruvo.msn(gend, replen = c(1, 1)) # no threshold supplied
gmsnone <- bruvo.msn(gend, replen = c(1, 1), threshold = 0.3)
expect_equal(igraph::vcount(gmsnone$graph), 1)
gmsnall <- bruvo.msn(gend, replen = c(1, 1), threshold = 0)
expect_equal(igraph::vcount(gmsnall$graph), 4)
expect_identical(igraph::V(gmsnt$graph)$pie, igraph::V(pgmsnt$graph)$pie)
expect_identical(igraph::V(gmsnot$graph)$pie, igraph::V(pgmsnt$graph)$pie)
expect_identical(igraph::V(gmsnt$graph)$name, igraph::V(pgmsnt$graph)$name)
expect_identical(igraph::V(gmsnot$graph)$name, igraph::V(pgmsnt$graph)$name)
expect_identical(igraph::E(gmsnt$graph)$weight, igraph::E(pgmsnt$graph)$weight)
expect_identical(igraph::E(gmsnot$graph)$weight, igraph::E(pgmsnt$graph)$weight)
mll(gend) <- "original"
gmsn <- bruvo.msn(gend, replen = c(1, 1), showplot = FALSE)
expect_equal(igraph::vcount(gmsn$graph), 4)
pgmsn <- poppr.msn(gend, distmat = gend_bruvo, showplot = FALSE)
expect_identical(igraph::V(gmsn$graph)$pie, igraph::V(pgmsn$graph)$pie)
expect_identical(igraph::V(gmsn$graph)$pie, igraph::V(gmsnall$graph)$pie)
expect_identical(igraph::V(gmsn$graph)$name, igraph::V(pgmsn$graph)$name)
expect_identical(igraph::V(gmsn$graph)$name, igraph::V(gmsnall$graph)$name)
expect_identical(igraph::E(gmsn$graph)$weight, igraph::E(pgmsn$graph)$weight)
expect_identical(igraph::E(gmsn$graph)$weight, igraph::E(gmsnall$graph)$weight)
})
test_that("Minimum spanning networks can collapse MLGs with single populations", {
skip_on_cran()
sgmsnt <- bruvo.msn(gend_single, replen = c(1, 1), threshold = 0.15)
psgmsnt <- poppr.msn(gend_single, distmat = gend_bruvo, threshold = 0.15)
expect_identical(igraph::V(sgmsnt$graph)$pie, igraph::V(psgmsnt$graph)$pie)
expect_identical(igraph::V(sgmsnt$graph)$name, igraph::V(psgmsnt$graph)$name)
expect_identical(igraph::E(sgmsnt$graph)$weight, igraph::E(psgmsnt$graph)$weight)
sgmsn <- bruvo.msn(gend_single, replen = c(1, 1), showplot = FALSE)
psgmsn <- poppr.msn(gend_single, distmat = gend_bruvo, showplot = FALSE)
expect_identical(igraph::V(sgmsn$graph)$pie, igraph::V(psgmsn$graph)$pie)
expect_identical(igraph::V(sgmsn$graph)$name, igraph::V(psgmsn$graph)$name)
expect_identical(igraph::E(sgmsn$graph)$weight, igraph::E(psgmsn$graph)$weight)
expect_equal(igraph::vcount(sgmsnt$graph), 2)
expect_equal(igraph::vcount(sgmsn$graph), 4)
expect_output(plot_poppr_msn(gend, gmsnt, palette = "cm.colors"), NA)
expect_output(plot_poppr_msn(gend_single, sgmsnt, palette = "cm.colors"), NA)
})
test_that("Filtered minimum spanning networks retain original names", {
skip_on_cran()
# setup ----------------------------------------------------
grid_example <- matrix(c(1, 4,
1, 1,
5, 1,
9, 1,
9, 4),
ncol = 2,
byrow = TRUE)
rownames(grid_example) <- LETTERS[1:5]
colnames(grid_example) <- c("x", "y")
grid_new <- rbind(grid_example,
new = c(5, NA),
mut = c(5, 2)
)
x <- as.genclone(df2genind(grid_new, ploidy = 1))
indNames(x)
## [1] "A" "B" "C" "D" "E" "new" "mut"
raw_dist <- function(x){
dist(genind2df(x, usepop = FALSE))
}
(xdis <- raw_dist(x))
# normal ---------------------------------------------------
set.seed(9001)
g1 <- poppr.msn(x, xdis, include.ties = TRUE, showplot = FALSE,
vertex.label.color = "firebrick", vertex.label.font = 2)
all_names <- igraph::V(g1$graph)$name
## [1] "A" "B" "C" "D" "E" "new" "mut"
# filtered ---------------------------------------------------
set.seed(9001)
g1.1 <- poppr.msn(x, xdis, threshold = 1, include.ties = TRUE, showplot = FALSE,
vertex.label.color = "firebrick", vertex.label.font = 2)
cc_names <- igraph::V(g1.1$graph)$name
## [1] "A" "B" "C" "D" "E" "new"
expect_identical(cc_names, head(all_names, -1))
})
context("minimum spanning network subset populations")
data("partial_clone")
pc <- as.genclone(partial_clone)
test_that("Minimum spanning networks can subset populations", {
bpc <- bruvo.dist(pc, replen = rep(1, 10))
bmsn <- bruvo.msn(pc, replen = rep(1, 10), showplot = FALSE)
pmsn <- poppr.msn(pc, bpc, showplot = FALSE)
expect_identical(ucl(bmsn), ucl(pmsn))
bmsn12 <- bruvo.msn(pc, replen = rep(1, 10), sublist = 1:2, showplot = FALSE)
pmsn12 <- poppr.msn(pc, bpc, sublist = 1:2, showplot = FALSE)
expect_identical(ucl(bmsn12), ucl(pmsn12))
bmsn1 <- bruvo.msn(pc, replen = rep(1, 10), sublist = 1, showplot = FALSE)
pmsn1 <- poppr.msn(pc, bpc, sublist = 1, showplot = FALSE)
expect_identical(ucl(bmsn1), ucl(pmsn1))
})
context("custom MLLs and minimum spanning networks")
mll.custom(pc) <- LETTERS[mll(pc)]
mll.levels(pc)[mll.levels(pc) == "Q"] <- "M"
mll(pc) <- "custom"
test_that("msn works with custom MLLs", {
skip_on_cran()
expect_error(pcmsn <- bruvo.msn(pc, replen = rep(1, 10)), NA)
expect_equivalent(sort(unique(igraph::V(pcmsn$graph)$label)), sort(mll.levels(pc)))
expect_error(plot_poppr_msn(pc, pcmsn), NA)
expect_error(plot_poppr_msn(pc, pcmsn, mlg = TRUE), NA)
})
context("Minimum spanning network aesthetics")
test_that("vectors can be used to color graphs", {
skip_on_cran()
data(Aeut)
A.dist <- diss.dist(Aeut)
# Graph it.
A.msn <- poppr.msn(Aeut, A.dist, gadj=15, vertex.label=NA, showplot = FALSE)
unpal <- c("black", "orange")
fpal <- function(x) unpal
npal <- setNames(unpal, c("Athena", "Mt. Vernon"))
xpal <- c(npal, JoMo = "awesome")
# Using palette without names
uname_pal <- plot_poppr_msn(Aeut, A.msn, palette = unpal)$colors
# Using palette with function
fun_pal <- plot_poppr_msn(Aeut, A.msn, palette = fpal)$colors
# Using palette with names
name_pal <- plot_poppr_msn(Aeut, A.msn, palette = npal[2:1])$colors
# Using palette with extra names
xname_pal <- plot_poppr_msn(Aeut, A.msn, palette = xpal)$colors
expect_identical(uname_pal, npal)
expect_identical(fun_pal, npal)
expect_identical(name_pal, npal)
expect_identical(xname_pal, npal)
})
|
fb4dc5d94025a9df1e23279e07c6fdc4cfeed90f
|
5947e79a3b1fff45aae0e14ea0f599b3c75198f2
|
/Model 1 Analysis_SzPt20_Final.R
|
ab3938901c4edef91fb09b4dd77f73c8d3882f6a
|
[] |
no_license
|
Fedzzy/Diameter-Class-Growth-and-Yield---Thesis
|
0d13123c3ead8f79e4c9a49635e824b374445e9c
|
c36890bf0f4723b454243099d031c5c63333ac4b
|
refs/heads/master
| 2020-04-06T16:16:03.048697
| 2018-11-14T22:10:56
| 2018-11-14T22:10:56
| 157,612,864
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,935
|
r
|
Model 1 Analysis_SzPt20_Final.R
|
#======================Regression Analysis of Different Varibles - Model 1 - SzPt20=======================
#Loading in Sourced Functions
source(file="Compiled Analysis Functions.R")
#Load in Plot Level Parameter Data.Rdata
load(file="Plot Level Parameter Data.RData")
#========================================Nonlinear Regression for AMD - Model 1
AMD_Mod1<-nls(AMDat~b0*(TPH^b1)*exp((b2/sqrt(DomHtM))+b3*Age),
data=(SzPt20ParamDat),
start=list(b0=105, b1=-0.2, b2=-10, b3=.006))
summary(AMD_Mod1)
#Prediction
SzPt20ParamDat$AMD_Mod1<-predict(AMD_Mod1)
#Calculating RMSE
RMSE(SzPt20ParamDat, SzPt20ParamDat$AMDat, SzPt20ParamDat$AMD_Mod1, 4)
#========================================Nonlinear Regression for QMD - Model 1
QMD_Mod1<-nls(QMDat~b0*(TPH^b1)*exp((b2/sqrt(DomHtM))+b3*Age),
data=(SzPt20ParamDat),
start=list(b0=105, b1=-0.2, b2=-10, b3=.006))
summary(QMD_Mod1)
#Prediction
SzPt20ParamDat$QMD_Mod1<-predict(QMD_Mod1)
#Calculating RMSE
RMSE(SzPt20ParamDat, SzPt20ParamDat$QMDat, SzPt20ParamDat$QMD_Mod1, 4)
#========================================Nonlinear Regression for D0 - Model 1
D0_Mod1<-nls(D0~b0*(TPH^b1)*exp((b2/sqrt(DomHtM))+b3*Age),
data=(SzPt20ParamDat),
start=list(b0=105, b1=-0.2, b2=-10, b3=.006))
#Obtaining Summary of Results
summary(D0_Mod1) #RMSE was 1.899
#Prediction
SzPt20ParamDat$D0_Mod1<-predict(D0_Mod1)
#Calculating RMSE
RMSE(SzPt20ParamDat, SzPt20ParamDat$D0, SzPt20ParamDat$D0_Mod1, 4)
#<><><><><><><><><><><><><><><><><><<><><><><><><><<Running alternative models for dbhMin<><><><><><><>
#Running alternative model 1 for dbhMin prediciton
D0_E1<-nls(D0~b0*AMDat^b1*exp(b2*(1/Age)),
data=(SzPt20ParamDat),
start=list(b0=.035, b1=1.826, b2=.774))
#Obtaining summary of results
summary(D0_E1) #RMSE found to be 1.723
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
#Running alternative model 2 for dbhMin prediction
D0_E2<-nls(D0~b0*QMDat^b1*exp(b2*(1/Age)),
data=(SzPt20ParamDat),
start=list(b0=.035, b1=1.826, b2=.416))
#Obtaining summary of results
summary(D0_E2) #RMSE was found to be 1.849
#Alternative 1(AMD) showed to have the lowest RMSE of 1.723
#========================================Nonlinear Regression for D25 - Model 1
D25_Mod1<-nls(D25~b0*(TPH^b1)*exp((b2/sqrt(DomHtM))+b3*Age),
data=(SzPt20ParamDat),
start=list(b0=105, b1=-0.2, b2=-10, b3=.006))
summary(D25_Mod1)
#Prediction
SzPt20ParamDat$D25_Mod1<-predict(D25_Mod1)
#Calculating RMSE
RMSE(SzPt20ParamDat, SzPt20ParamDat$D25, SzPt20ParamDat$D25_Mod1, 4)
#========================================Nonlinear Regression for D50 - Model 1
D50_Mod1<-nls(D50~b0*(TPH^b1)*exp((b2/sqrt(DomHtM))+b3*Age),
data=(SzPt20ParamDat),
start=list(b0=105, b1=-0.2, b2=-10, b3=.006))
summary(D50_Mod1)
#Prediction
SzPt20ParamDat$D50_Mod1<-predict(D50_Mod1)
#Calculating RMSE
RMSE(SzPt20ParamDat, SzPt20ParamDat$D50, SzPt20ParamDat$D50_Mod1, 4)
#========================================Nonlinear Regression for D93 - Model 1
D93_Mod1<-nls(D93~b0*(TPH^b1)*exp((b2/sqrt(DomHtM))+b3*Age),
data=(SzPt20ParamDat),
start=list(b0=105, b1=-0.2, b2=-10, b3=.006))
summary(D93_Mod1)
#Prediction
SzPt20ParamDat$D93_Mod1<-predict(D93_Mod1)
#Calculating RMSE
RMSE(SzPt20ParamDat, SzPt20ParamDat$D93, SzPt20ParamDat$D93_Mod1, 4)
#========================================Nonlinear Regression for D95 - Model 1
D95_Mod1<-nls(D95~b0*(TPH^b1)*exp((b2/sqrt(DomHtM))),
data=(SzPt20ParamDat),
start=list(b0=105, b1=-0.2, b2=-10))
summary(D95_Mod1)
#Prediction
SzPt20ParamDat$D95_Mod1<-predict(D95_Mod1)
#Calculating RMSE
RMSE(SzPt20ParamDat, SzPt20ParamDat$D95, SzPt20ParamDat$D95_Mod1, 3)
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
#========================================================================================
#Separating Plot Data for Further Analysis
#Get column subsets
PlotSz20<-SzPt20ParamDat[,c("MeasmtObs","Age","DomHtM","TPH","RS",
"AMD_Mod1", "QMD_Mod1","D0_Mod1","D25_Mod1","D50_Mod1","D93_Mod1",
"D95_Mod1")]
save(PlotSz20,file="K:/csabatia/GraduateStudentWork/JoshBankston/Data Analysis/Distribution Recovery Analyses/Model 1_Cao2004/PlotLevelAnalysis20.RData")
|
3430c71528a1b9b0e7ceea18e32a01ce3a9ef998
|
ff83ee8611ebd3d0280ce1718869d5dca42d1403
|
/main.r
|
e7246c505aff0f7030832c1b5d39f2f172e89cfa
|
[
"MIT"
] |
permissive
|
Clpr/HealthInequality2018Dec
|
661b9b679a68138cfce273634768fbe0d3a11d08
|
d88d80c97e46f3e0b10c2c15e83eb0932957e69d
|
refs/heads/master
| 2020-04-13T08:45:59.170010
| 2019-04-11T14:07:13
| 2019-04-11T14:07:13
| 163,091,406
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,919
|
r
|
main.r
|
# <-- encoding UTF-8 -->
# Portal for the code of: Socioeconomic Status Inequality and Morbidity Rate Differences in China: Based on NHSS and CHARLS Data
# Author: Y. Jiang, H. Zheng, T. Zhao
# -------------------------------------------
## DOC STRING
#
# The script is the portal of all programs of our submtted paper;
# you may run this file, or follow the documentation, README, and comments to specific source files.
# Please refer to README.md for more information.
# and, all scripts & file I/O are encoded in UTF-8 (because Chinese charaters, in data and/or scripts, may be included & used)
#
# Tianhao Zhao (GitHub: Clpr)
# Dec 2018
# -------------------------------------------
## DEPENDENCY
#
# 1. readxl: IO of xlsx files
# 2. plm: panel data models
# 3. nlme: GLS estimation of linear models
# 4. car: VIFs, qqplots
# 5. ggplot2, plyr, maptools, mapproj, Cairo: map drawing
# 6. sqldf: api of firendly sql enqury
# 7. nortest: normality tests
# 8. purrr: std lib, to do function operations
# 9. openxlsx: easy xlsx I/O, no dependency on rJava
# 10. psych: easier APIs for PCA
# -------------------------------------------
rm(list=ls()) # clear
setwd("C:/Users/Lenovo/OneDrive/GitHubProj/HealthInequality2018Dec") # working directory
# -------------------------------------------
# Section 1: NHSS data processing & variable selection
cat("\nScript 1: NHSS data processing & variable selection")
# MISSION:
# 1. NHSS data I/O
# 2. NHSS data processing, missing values
# 3. collinearity between income & edu: resid(income ~ edu) --> income
# 4. NHSS (control) variable selection I: through single-variable regressions
# 5. NHSS (control) variable selection II: PCA
# 6. VIFs of final specifications
source("./scripts/proc_1_NHSS.r")
# LEGACY IN WORKSPACE:
# 1. df_NHSSraw: raw NHSS dataset, consisting of all potential vars & useless vars; as backup
# 2. df_NHSS: processed NHSS dataset
# 3. li_Dat_NHSS: datasets for every specification
# 3. li_DatPlm_NHSS: panel-data for every specification
# 4. li_PCAres_NHSS: PCA results
# 5. li_PCAk_NHSS: numebr of components used in every specification
# 5. li_VIF_NHSS: VIF results of final specifications
# 6. li_Eq_NHSS: a list consisting of formula objects of final specifications
# 7. li_Xnames_NHSS: a list of namelists of independents of every specification
# 7. envNHSS: environment variables of NHSS dataset
# NOTE: pls go to ./output/proc_1_NHSS/ for output results (if applicable)
# ---------------------------------------------
# ---------------------------------------------
# Section 2: NHSS data, descriptive statistics (basic and advanced)
cat("\nScript 2: NHSS data descriptive statistics")
# DEPENDENCY: using legacy of proc_1_NHSS.r
# MISSION:
# 1. NHSS data, general descriptive stats (mean, sd etc)
# 2. county-level (individual) Lorenz Curve & Gini coef of health outcomes
# 3. county-level (individual) Theil-I, Theil-II
# 4. county-level (individual) C.V., coefficient of variance
# 5. county-level (individual) Variance
# 6. NHSS data, the existence of the inequalities of health outcomes among areas in China (colored map)
# 7. NHSS data, the difference of (income & edu) among areas in china (colored map)
source("./scripts/proc_2_NHSS.r")
# LEGACY IN WORKSPACE:
# 1. li_Descript_NHSS: tables of the descriptive statistics of NHSS data (only income & edu, we do not do this on normalized+centered principle components!)
# 2. df_InequalIdx_NHSS: a table of different kinds of inequality indices of NHSS data
# 3. MapShapeCH: a dataset of Chinese GIS data; will be used later
# 4. func_DescrStat: a function to do descriptive statistics
# 5. func_MapProv: draws colored map based on Chinese GIS data; province level
# 6. func_SaveMap2PDF: easy output method of the figures created by func_MapProv()
# 7. LorenzCurve: easy function to compute Lorenz curve & Gini coef
# 8. Theil: easy function to compute Theil-I/II index
# 9. TrapeInt: easy function to compute trapezium integral
# NOTE: pls go to ./output/proc_2_NHSS/ for output results (applicable)
# ---------------------------------------------
# ---------------------------------------------
# Section 3: NHSS data, econometric analysis (pool, fix, random, Hausman)
cat("\nScript 3: NHSS data, econometric analysis (pool, fix, random, Hausman)")
# DEPENDENCY: using legacy of proc_1_NHSS.r
#
# IMPORTANT NOTE: please refer to PanelAnalysisLogic.md under the ./docs directory to help understand how we designed this section !!! :)
#
# MISSION:
# 1. one-way fixed individual effect model & random individual effect model (FGLS estimators)
# 2. Hausman test & robust Hausman test (Wooldridge, 2010)
# 3. Residual QQplots & normality tests, one-way fixed individual effect model (only)
# 4. ROBUST: two-ways fixed effect model & Haussman test (to see if one-way & two-ways are consistent)
# 5. ROBUST: pooling, OLS
# 6. ROBUST: pooling, FGLS
#
# 5. ROBUST: add possible independent:
source("./scripts/proc_3_NHSS.r")
# LEGACY IN WORKSPACE:
# NOTE: pls go to ./output/proc_3_NHSS/ for output results (applicable)
# -------------------------------------------
# -------------------------------------------
# Section 4: CHARLS data processing & variable selection
cat("\nScript 4: CHARLS data processing & variable selection")
# MISSION:
# 1. CHARLS data I/O
# 2. CHARLS data processing, missing values
# 3. collinearity between income & edu: resid(income ~ edu) --> income
# 4. CHARLS (control) variable selection I: through single-variable regressions
# 5. CHARLS (control) variable selection II: AIC stepwise to FURTHER select control variables into final specifications
# 6. VIFs of final specifications
source("./scripts/proc_1_CHARLS.r")
# LEGACY IN WORKSPACE:
# 1. df_CHARLS_backup: raw CHARLS dataset, consisting of all potential vars & useless vars; as backup
# 2. df_CHARLS: processed CHARLS dataset
# 3. dfp_CHARLS: a panel-data-type dataframe (converted from df_NHSS, used in package plm)
# 4. df_FinalSpecif_CHARLS: a dataframe marking which variables in the final specifications
# 5. df_VIF_CHARLS: VIF results of final specifications
# 6. li_Eq_CHARLS: a list consisting of formula objects of final specifications
# 7. envCHARLS: environment variables of CHARLS dataset
# NOTE: pls go to ./output/proc_1_CHARLS/ for output results (if applicable)
# ---------------------------------------------
# ---------------------------------------------
# Section 5: CHARLS data, descriptive statistics (basic and advanced)
cat("\nScript 5: CHARLS data, descriptive statistics (basic and advanced)")
# DEPENDENCY: using legacy of proc_1_NHSS.r
# MISSION:
# 1. CHARLS data, general descriptive stats (mean, sd etc)
# 2. county-level (individual) Lorenz Curve & Gini coef of health outcomes
# 3. county-level (individual) Theil-I, Theil-II
# 4. county-level (individual) C.V., coefficient of variance
# 5. county-level (individual) Variance
# 6. CHARLS data, the existence of the inequalities of health outcomes among areas in China (colored map)
# 7. CHARLS data, the difference of (income & edu) among areas in china (colored map)
source("./scripts/proc_2_CHARLS.r")
# LEGACY IN WORKSPACE:
# 1. df_Descript_CHARLS: a table of the descriptive statistics of NHSS data (final specification)
# 2. df_InequalIdx_CHARLS: a table of different kinds of inequality indices of NHSS data
# 3. MapShapeCH: a dataset of Chinese GIS data; will be used later
# 4. func_DescrStat: a function to do descriptive statistics
# 5. func_MapProv: draws colored map based on Chinese GIS data; province level
# 6. func_SaveMap2PDF: easy output method of the figures created by func_MapProv()
# 7. LorenzCurve: easy function to compute Lorenz curve & Gini coef
# 8. Theil: easy function to compute Theil-I/II index
# 9. TrapeInt: easy function to compute trapezium integral
# NOTE: pls go to ./output/proc_2_CHARLS/ for output results (applicable)
# ---------------------------------------------
# ---------------------------------------------
# Section 6: CHARLS data, econometric analysis (pool, fix, random, Hausman)
cat("\nScript 3: CHARLS data, econometric analysis (pool, fix, random, Hausman)")
# DEPENDENCY: using legacy of proc_1_CHARLS.r
#
# IMPORTANT NOTE: please refer to PanelAnalysisLogic.md under the ./docs directory to help understand how we designed this section !!! :)
#
# MISSION:
# 1. one-way fixed individual effect model & random individual effect model (FGLS estimators)
# 2. Hausman test & robust Hausman test (Wooldridge, 2010)
# 3. Residual QQplots & normality tests, one-way fixed individual effect model (only)
# 4. ROBUST: two-ways fixed effect model & Haussman test (to see if one-way & two-ways are consistent)
# 5. ROBUST: pooling, OLS
# 6. ROBUST: pooling, FGLS
#
# 5. ROBUST: add possible independent:
source("./scripts/proc_3_CHARLS.r")
# LEGACY IN WORKSPACE:
# NOTE: pls go to ./output/proc_3_CHARLS/ for output results (applicable)
# -------------------------------------------
|
98670902aa4b6d8e89d6157d35d4c6c049769548
|
af1f3fcbc176817ba9d452652d40e24967540833
|
/rd_regress.R
|
25e66c14b299f7955a866204332af65c11a8dc21
|
[] |
no_license
|
cjs-quant/rd-simulation
|
96e982fd844e261339b0cfdb67ce3f71787c7de9
|
300d89781fc67565f0ad53f79f0929a9b29571d7
|
refs/heads/main
| 2023-03-10T02:49:18.585471
| 2021-03-01T20:59:16
| 2021-03-01T20:59:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,304
|
r
|
rd_regress.R
|
rd_regress = function(data, order, bw_l, bw_r, kernel, discontinuity) {
# DESCRIPTION:
# takes RD data and computes an RD regression with
# provided order and bandwidths. returns betas, variances,
# t-statistics, p-values, and the data used in the RD
# ARGUMENTS:
# data: the input data in the RD - RV in col 1, observations in col2
# order: order of RD regression
# bw_l: left-bandwidth
# bw_r: right-bandwidth
# kernel: triangular or uniform computation of kernel weights
# discontinuity: selected discontinuity
# keep observations if within bandwidths
data <- data[which(data$x > -1*bw_l & data$x < bw_r),]
# kernel weights if triangular
if (kernel == "triangular") {
w = 0
w[data$x < 0 & data$x > -1*bw_l] = 1 - abs(data$x[data$x < 0 & data$x > -1*bw_l]/bw_l)
w[data$x >= 0 & data$x < bw_r] = 1 - abs(data$x[data$x >= 0 & data$x < bw_r]/bw_r)
w = diag(w)
}
# kernel weights if uniform
if (kernel == "uniform") {
w = diag(1, nrow(data), nrow(data))
}
# dummy for over-cutoff (> 0)
data$over_cutoff = ifelse(data$x > 0, 1, 0)
# reates variables to be used in RD regression and cbinds together
for (i in 1:order) {
data[paste(colnames(data[1]),toString(i),sep ="_")] = data[1]^(i)
data[paste(colnames(data[1]),"over_cutoff",toString(i),sep ="_")] = data[1]^(i) * data$over_cutoff
}
# define matrices for OLS
y = data$y
data$y = NULL
data$x = NULL
x = data
x$constant = 1
x = data.matrix(x)
# run OLS
beta = solve(t(x) %*% w %*% x) %*% t(x) %*% w %*% y
# compute variance of each estimator
r = y - x %*% beta
sigma2_hat = (t(r) %*% r) / (nrow(x) - ncol(x))
# compute standard errors
vcov_beta_hat = c(sigma2_hat) * solve(t(x) %*% x)
se = sqrt(diag(vcov_beta_hat))
# compute t-statistic
t = beta/se
# compute p-values
p = 2*pt(abs(t), nrow(x) - 1, lower=FALSE)
p = p[1,1]
# collect estimates
estimate = cbind(discontinuity, beta)
colnames(estimate) = c("Discontinuity", "Beta")
# create output table
out_table = cbind(discontinuity, beta[1], p, beta[nrow(beta)])
colnames(out_table) = c("True Discontinuity", "Estimated Discontinuity",
"P-Value", "Control Mean")
# store coefficients for regression equation
coeffs = list()
coeffs[1] = paste(round(beta[nrow(beta)], 2))
coeffs[2] = paste(ifelse(round(beta[1], 2) >= 0, "+", ""), round(beta[1], 2), " \\times \\mathbb{D}_{X > 0}")
for (i in 1:order) {
if (i == 1) {
coeffs[2*i+1] = paste(ifelse(round(beta[2*i+1], 2) >= 0, "+", ""), round(beta[2*i+1], 2), " \\times X")
coeffs[2*i+2] = paste(ifelse(round(beta[2*i+2], 2) >= 0, "+", ""), round(beta[2*i+2], 2), " \\times X \\times \\mathbb{D}_{X > 0}")
}
if (i > 1) {
coeffs[2*i+1] = paste(ifelse(round(beta[2*i+1], 2) >= 0, "+", ""), round(beta[2*i+1], 2), " \\times X^", i)
coeffs[2*i+2] = paste(ifelse(round(beta[2*i+2], 2) >= 0, "+", ""), round(beta[2*i+2], 2), " \\times X^", i, " \\times \\mathbb{D}_{X > 0}")
}
}
# format regression equation
equation = paste0(coeffs)
# report estimates with underlying data
results = list("estimate" = estimate, "data" = x, "equation" = equation, "out_table" = out_table)
return(results)
}
|
7cd0341393916f4dfb083a3cb8f95cb339c51d5b
|
fd3a072634790d3b0f9ebe1f5466e7ab4d717690
|
/man/loglk.Rd
|
f669e2f730a5b39437b671c8d6305c487095db89
|
[] |
no_license
|
WeiAkaneDeng/SPAC2
|
e23de6f0b015550c0917b95741fcdb727bdf5734
|
9dd25a397e9a3647205419389db48364909b04d1
|
refs/heads/master
| 2022-01-27T09:01:01.571072
| 2022-01-14T23:17:06
| 2022-01-14T23:17:06
| 125,806,629
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 797
|
rd
|
loglk.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PPCA_profile_log_likelihood.R
\name{loglk}
\alias{loglk}
\title{Profile log-likelihood of the PPCA model with sample eigenvalues.}
\usage{
loglk(lam, n, tau = 1e-05)
}
\arguments{
\item{lam}{a numerical vector of positive sample eigenvalues}
\item{n}{the full dimension}
\item{tau}{a tolerance threshold for the smallest eigenvalue, the default value is 0.001.}
}
\value{
profile log-likelihood of length \eqn{n-1}.
}
\description{
The function returns the profile log-likelihood
of the PPCA model for each possible choice of
\eqn{K (1, 2, \dots, n-1)} at their respective MLEs.
The maximum choice was set at \eqn{n-1} because when \eqn{K=n},
the profile log-likelihood is equal to that at \eqn{K=n-1}.
}
|
e7c8793a553d68d5019559dd931cb074bc8f5c22
|
a7f0b0a405fc7f1d4c9e4ebb754529f3877a20dd
|
/RcodeData/linpower.r
|
0afc512d78cbd9e161c2e8ef144b71e60b5f7f30
|
[] |
no_license
|
tmuntianu/advancedstatistics
|
4298c0d0f3a52675d85d67f3aac3b7b11ab5cdc1
|
3885b67da560702df4b6d05b240ada6fde2f727c
|
refs/heads/master
| 2020-12-22T19:03:58.858657
| 2020-01-29T04:08:34
| 2020-01-29T04:08:34
| 236,890,079
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,079
|
r
|
linpower.r
|
linpower <-
function(n=10,m=3,k=2,NL=30,beta=c(1,.5,-.5),sigma=2,alpha=.05,nSim=10000,st=3)
{
set.seed(st)
C=matrix(c(0,1,1,0,-1,0),nrow=k);a=C%*%beta
X=cbind(rep(1,n),1:n,(1:n)^2)
XX=t(X)%*%X;iXX=solve(XX);ix=iXX%*%t(X)
cx=C%*%iXX%*%t(C)
icx=solve(cx)
d=runif(m,min=-1,max=1)
Cd=C%*%d
qff=qf(1-alpha,df1=k,df2=n-m)
Fobs=rep(NA,nSim)
lambda=powEMP=powT=seq(from=0,to=30,length=NL)
for(i in 1:NL)
{
nu=as.numeric(sigma*sqrt(lambda[i]/(t(Cd)%*%icx%*%Cd)))
beta.alt=beta+nu*d
for(isim in 1:nSim)
{
y=X%*%beta.alt+rnorm(n,sd=sigma)
bh=ix%*%y
br=bh+iXX%*%t(C)%*%icx%*%(a-C%*%bh)
S0=sum((y-X%*%br)^2)
Smin=sum((y-X%*%bh)^2)
Fobs[isim]=(S0-Smin)/k/Smin*(n-m)
}
powEMP[i]=mean(Fobs>qff)
powT[i]=1-pf(qff,df1=k,df2=n-m,ncp=lambda[i])
}
par(mfrow=c(1,1),mar=c(4,4,1,1))
plot(lambda,powT,type="l",ylim=c(0,1),xlab="",ylab="")
mtext(side=1,"l",font=5,cex=2,line=2.75)
mtext(side=2,"Power",cex=1.5,line=2.5)
segments(-1,alpha,lambda[NL],alpha,lty=2)
text(20,alpha+.03,paste("a =",alpha),cex=1.5,font=5)
points(lambda,powEMP)
}
|
b066dbddfaea319f920f236aa97872efdaab2404
|
4050c25b8aa1bd07808af59300bf8058c7890949
|
/Scripts/iii.LineageAnalyses/Use/OrderBiomeDiversity.R
|
302a15669a16edf198dcccc3f770a139a18b4c2c
|
[] |
no_license
|
KerkhoffLab/Bryophytes
|
6faf03b9f73be24eeff7017f092451e824ac15ca
|
9bb7a8e4c0fa5a9f16e4dbfc937b643da0a69ab4
|
refs/heads/master
| 2021-07-23T17:12:46.284440
| 2021-07-22T16:26:02
| 2021-07-22T16:26:02
| 193,754,056
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,860
|
r
|
OrderBiomeDiversity.R
|
#Plotting alpha diversity of BRYOPHYTE orders by biome and continent
#Adapted from BiomeDiversity.R
#Kathryn Dawdy and Hailey Napier, July 2020
# 0.0 FIRST ----------------------------------------------------------------
#Run DataProcessing.R, Continents.R, BiomeContinents.R, BiomeDiversity.R,
#ORange.R, OrdBiomeBP.R, TotalAlpha.R,
#then OrderBiomeDF.R - unless you can just load the DF (takes a while)
#then MossPlotData.R - or just load the DF
OrderBiomeDF <- readRDS("Data/OrderBiomeDF.rds")
OrderBiomeContDF <- readRDS("Data/OrderBiomeContDF.rds")
#Run OrderRichness.R
# 0.1 Load Packages --------------------------------------------------------
require(BIEN)
require(maps)
require(dplyr)
require(maptools)
require(raster)
require(sp)
require(rgdal)
require(mapdata)
require(mapproj)
require(wesanderson)
require(ggplot2)
require(rasterVis)
require(knitr)
require(latexpdf)
require(vegan)
require(gridExtra)
require(sf)
require(rgeos)
require(rworldmap)
require(filesstrings)
require(forcats)
require(tidyverse)
require(tmap)
# 0.2 Load data ------------------------------------------------------------
OrderBiomeDF <- readRDS("Data/OrderBiomeDF.rds")
OrdRichAbove100 <- readRDS("Data/OrdRichAbove100.rds")
OrdRich25to100 <- readRDS("Data/OrdRich25to100.rds")
OrdRich10to25 <- readRDS("Data/OrdRich10to25.rds")
OrdRichBelow10 <-readRDS("Data/OrdRichBelow10.rds")
OrderBiomeHemDF <- readRDS("Data/OrderBiomeHemDF.rds")
# 0.3 Colors ---------------------------------------------------------------
#From wes_palette() hex numbers on GitHub: karthik/wesanderson
#Color scheme for biomes (in order of BiomeNames (BiomeProcessing.R))
cols7 <- c("#D8B70A", "#972D15", "#A2A475", "#81A88D", "#02401B",
"#446455", "#FDD262", "#D3DDDC", "#C7B19C",
"#798E87", "#C27D38")
#Colors used for plots (# corresponds to # of boxplots/length of data)
biome_cols_11 <- c("#D8B70A", "#972D15", "#A2A475", "#81A88D", "#02401B",
"#446455", "#FDD262", "#D3DDDC", "#C7B19C", "#798E87",
"#C27D38")
biome_cols_22 <- c(biome_cols_11, biome_cols_11)
biome_cols_87 <- c(biome_cols_22, biome_cols_11,
"#D8B70A", "#972D15", "#A2A475", "#81A88D", "#02401B",
"#446455", "#FDD262", "#D3DDDC", "#C7B19C",
"#C27D38",
biome_cols_22, biome_cols_22)
biome_cols_66 <- c(biome_cols_22, biome_cols_22, biome_cols_22)
biome_cols_166 <- c(biome_cols_22,
c("#D8B70A", "#972D15", "#A2A475", "#81A88D", "#02401B",
"#FDD262", "#D3DDDC", "#C7B19C",
"#C27D38"),
biome_cols_11,
c("#D8B70A", "#972D15", "#A2A475", "#81A88D", "#02401B",
"#446455", "#FDD262", "#D3DDDC",
"#C27D38"),
c("#D8B70A", "#972D15", "#81A88D", "#02401B",
"#446455", "#FDD262", "#D3DDDC", "#C7B19C", "#798E87",
"#C27D38"),
c("#81A88D",
"#FDD262", "#D3DDDC"),
c("#D8B70A", "#972D15", "#81A88D",
"#446455", "#D3DDDC", "#C7B19C", "#798E87"),
c("#D8B70A", "#972D15", "#A2A475", "#81A88D", "#02401B",
"#FDD262", "#D3DDDC", "#C7B19C",
"#C27D38"),
"#81A88D",
c("#972D15", "#A2A475", "#81A88D", "#02401B",
"#FDD262", "#D3DDDC", "#C7B19C",
"#C27D38"),
c("#D8B70A",
"#446455", "#FDD262", "#D3DDDC"),
biome_cols_22,
c("#D8B70A", "#972D15", "#81A88D",
"#D3DDDC", "#C7B19C",
"#C27D38"),
c("#D8B70A",
"#446455", "#FDD262", "#D3DDDC", "#798E87",
"#C27D38"),
c("#A2A475", "#81A88D", "#02401B",
"#FDD262", "#D3DDDC", "#C7B19C"),
c("#D8B70A", "#972D15", "#A2A475", "#81A88D", "#02401B",
"#FDD262", "#D3DDDC", "#C7B19C",
"#C27D38"),
biome_cols_11,
c("#D8B70A",
"#C27D38"),
biome_cols_11)
biome_cols_29 <- c(biome_cols_11,
c("#D8B70A", "#972D15", "#A2A475", "#81A88D", "#02401B",
"#446455", "#FDD262", "#D3DDDC", "#798E87",
"#C27D38"),
c("#972D15", "#A2A475", "#81A88D", "#02401B",
"#FDD262", "#D3DDDC", "#C7B19C",
"#C27D38"))
biome_cols_18 <- c(c("#D8B70A", "#972D15", "#A2A475", "#81A88D", "#02401B",
"#446455", "#FDD262", "#D3DDDC", "#798E87",
"#C27D38"),
c("#972D15", "#A2A475", "#81A88D", "#02401B",
"#FDD262", "#D3DDDC", "#C7B19C",
"#C27D38"))
# 1.0 BIOME RICHNESS BY ORDER ----------------------------------------------
#Hailey's function:
source("Functions/OrdBiomeBP.R")
###Run OrdBiomeBP.R
#Use OrdBiomeBP function for box, violin, or layered violin on box plot
#Enter any order; box, violin, or boxyviolin; cont = "Southern", "Northern", or "both"
OrdBiomeBP("Hypnales", "box")
OrdBiomeBP("Hypnales", "violin")
OrdBiomeBP("Hypnales", "boxyviolin")
OrdBiomeBP("Hypnales", "boxyviolin", cont="South America")
OrdBiomeBP("Hypnales", "boxyviolin", cont="North America")
OrdBiomeBP("Hypnales", "boxyviolin", cont="both")
# 2.0 RICHNESS FACET PLOTS in biomes by order ------------------------------
# 2.1 Facet of richness in biomes by all orders ----------------------------
FacetOrdBiomeRich <- ggplot(OrderBiomeDF,
aes(x=Biome, y=Alpha)) +
geom_boxplot(show.legend = FALSE) +
theme_minimal() +
xlab("Biome") +
ylab("Richness") +
theme(axis.title.y = element_text(size=32),
axis.title.x = element_text(size=32),
axis.text.y = element_text(size=20),
axis.text.x = element_text(angle = 90, hjust = 1, size = 8))+
facet_wrap(~Order)
FacetOrdBiomeRich
png("Figures/AlphaOrderBiomeFacet.png", width = 1500, height = 1000, pointsize = 20)
FacetOrdBiomeRich
dev.off()
# 2.2 Subset order by maximum alpha diversity value ------------------------
###Run OrderRichness.R
# max α > 100
OrdRichAbove100 <- readRDS("Data/OrdRichAbove100.rds")
OrdRichAbove100
OBRAbove100DF <- subset(OrderBiomeDF,
OrderBiomeDF$Order=="Hypnales"|
OrderBiomeDF$Order=="Dicranales")
# max α 25-100
OrdRich25to100 <- readRDS("Data/OrdRich25to100.rds")
OrdRich25to100
OBR25to100DF <- subset(OrderBiomeDF,
OrderBiomeDF$Order=="Bartramiales"|
OrderBiomeDF$Order=="Bryales"|
OrderBiomeDF$Order=="Grimmiales"|
OrderBiomeDF$Order=="Hookeriales"|
OrderBiomeDF$Order=="Jungermanniales"|
OrderBiomeDF$Order=="Orthotrichales"|
OrderBiomeDF$Order=="Porellales"|
OrderBiomeDF$Order=="Pottiales")
# max α 10-25
OrdRich10to25 <- readRDS("Data/OrdRich10to25.rds")
OrdRich10to25
OBR10to25DF <- subset(OrderBiomeDF,
OrderBiomeDF$Order=="Funariales"|
OrderBiomeDF$Order=="Hedwigiales"|
OrderBiomeDF$Order=="Marchantiales"|
OrderBiomeDF$Order=="Metzgeriales"|
OrderBiomeDF$Order=="Polytrichales"|
OrderBiomeDF$Order=="Sphagnales")
# max α < 10
OrdRichBelow10 <-readRDS("Data/OrdRichBelow10.rds")
OrdRichBelow10
OBRBelow10DF <- subset(OrderBiomeDF,
OrderBiomeDF$Order!="Hypnales"&
OrderBiomeDF$Order!="Dicranales"&
OrderBiomeDF$Order!="Bartramiales"&
OrderBiomeDF$Order!="Bryales"&
OrderBiomeDF$Order!="Grimmiales"&
OrderBiomeDF$Order!="Hookeriales"&
OrderBiomeDF$Order!="Jungermanniales"&
OrderBiomeDF$Order!="Orthotrichales"&
OrderBiomeDF$Order!="Porellales"&
OrderBiomeDF$Order!="Pottiales"&
OrderBiomeDF$Order!="Funariales"&
OrderBiomeDF$Order!="Hedwigiales"&
OrderBiomeDF$Order!="Marchantiales"&
OrderBiomeDF$Order!="Metzgeriales"&
OrderBiomeDF$Order!="Polytrichales"&
OrderBiomeDF$Order!="Sphagnales")
# max α < 25
#We ended up not using the <25 grouping, but keeping this just in case...
#OrdRichBelow25
#OBRBelow25DF <- subset(OrderBiomeDF,
#OrderBiomeDF$Order!="Hypnales"&
#OrderBiomeDF$Order!="Dicranales"&
#OrderBiomeDF$Order!="Bartramiales"&
#OrderBiomeDF$Order!="Bryales"&
#OrderBiomeDF$Order!="Grimmiales"&
#OrderBiomeDF$Order!="Hookeriales"&
#OrderBiomeDF$Order!="Jungermanniales"&
#OrderBiomeDF$Order!="Orthotrichales"&
#OrderBiomeDF$Order!="Porellales"&
#OrderBiomeDF$Order!="Pottiales")
saveRDS(OBRAbove100DF, "Data/OBRAbove100DF.rds")
saveRDS(OBR25to100DF, "Data/OBR25to100DF.rds")
saveRDS(OBR10to25DF, "Data/OBR10to25DF.rds")
saveRDS(OBRBelow10DF, "Data/OBRBelow10DF.rds")
# 2.2.5 Subset for cells that don't have centers covered
# max α > 100
OrdRichAbove100 <- readRDS("Data/OrdRichAbove100.rds")
OrdRichAbove100
OBRAbove100DF <- subset(CleanOrderBiomeDF,
CleanOrderBiomeDF$Order=="Hypnales"|
CleanOrderBiomeDF$Order=="Dicranales")
# max α 25-100
OrdRich25to100 <- readRDS("Data/OrdRich25to100.rds")
OrdRich25to100
OBR25to100DF <- subset(CleanOrderBiomeDF,
CleanOrderBiomeDF$Order=="Bartramiales"|
CleanOrderBiomeDF$Order=="Bryales"|
CleanOrderBiomeDF$Order=="Grimmiales"|
CleanOrderBiomeDF$Order=="Hookeriales"|
CleanOrderBiomeDF$Order=="Jungermanniales"|
CleanOrderBiomeDF$Order=="Orthotrichales"|
CleanOrderBiomeDF$Order=="Porellales"|
CleanOrderBiomeDF$Order=="Pottiales")
# max α 10-25
OrdRich10to25 <- readRDS("Data/OrdRich10to25.rds")
OrdRich10to25
OBR10to25DF <- subset(CleanOrderBiomeDF,
CleanOrderBiomeDF$Order=="Funariales"|
CleanOrderBiomeDF$Order=="Hedwigiales"|
CleanOrderBiomeDF$Order=="Marchantiales"|
CleanOrderBiomeDF$Order=="Metzgeriales"|
CleanOrderBiomeDF$Order=="Polytrichales"|
CleanOrderBiomeDF$Order=="Sphagnales")
# max α < 10
OrdRichBelow10 <-readRDS("Data/OrdRichBelow10.rds")
OrdRichBelow10
OBRBelow10DF <- subset(CleanOrderBiomeDF,
CleanOrderBiomeDF$Order!="Hypnales"&
CleanOrderBiomeDF$Order!="Dicranales"&
CleanOrderBiomeDF$Order!="Bartramiales"&
CleanOrderBiomeDF$Order!="Bryales"&
CleanOrderBiomeDF$Order!="Grimmiales"&
CleanOrderBiomeDF$Order!="Hookeriales"&
CleanOrderBiomeDF$Order!="Jungermanniales"&
CleanOrderBiomeDF$Order!="Orthotrichales"&
CleanOrderBiomeDF$Order!="Porellales"&
CleanOrderBiomeDF$Order!="Pottiales"&
CleanOrderBiomeDF$Order!="Funariales"&
CleanOrderBiomeDF$Order!="Hedwigiales"&
CleanOrderBiomeDF$Order!="Marchantiales"&
CleanOrderBiomeDF$Order!="Metzgeriales"&
CleanOrderBiomeDF$Order!="Polytrichales"&
CleanOrderBiomeDF$Order!="Sphagnales")
saveRDS(OBRAbove100DF, "Data/CleanOBRAbove100DF.rds")
saveRDS(OBR25to100DF, "Data/CleanOBR25to100DF.rds")
saveRDS(OBR10to25DF, "Data/CleanOBR10to25DF.rds")
saveRDS(OBRBelow10DF, "Data/CleanOBRBelow10DF.rds")
# 2.3 Load max richness value groupings ------------------------------------
OBRAbove100DF <- readRDS("Data/OBRAbove100DF.rds")
OBR25to100DF <- readRDS("Data/OBR25to100DF.rds")
OBR10to25DF <- readRDS("Data/OBR10to25DF.rds")
OBRBelow10DF <- readRDS("Data/OBRBelow10DF.rds")
# 2.4 Facets of richness in biomes by orders grouped by max α --------------
# 2.4.1 Max α >100 ---------------------------------------------------------
FacetOBRAbove100 <- ggplot(OBRAbove100DF,
aes(x=Biome, y=Alpha, fill=Biome, color=Biome)) +
geom_boxplot(show.legend = FALSE, fill=biome_cols_22, color="black",
outlier.size=1) +
#theme_minimal() + #un-comment whichever theme you want
theme_gray() +
#theme_light() +
#theme_bw() +
#theme_dark() +
#theme_linedraw() +
geom_violin(scale="count", show.legend=FALSE, fill="gray", alpha=0.35,
color="gray25") +
xlab("Biome") +
ylab("Richness") +
theme(axis.title.y = element_text(size=32),
axis.title.x = element_text(size=32),
axis.text.y = element_text(size=15),
axis.text.x = element_text(angle = 30, hjust = 1, size = 8))+
facet_wrap(~Order
,
#ncol=1 #un-comment # of rows you want
ncol=2
)
FacetOBRAbove100
png("Figures/AlphaOrderBiomeAbove100.png", width = 1500, height = 1000, pointsize = 20)
FacetOBRAbove100
dev.off()
# 2.4.2 Max α 25-100 -------------------------------------------------------
FacetOBR25to100 <- ggplot(OBR25to100DF,
aes(x=Biome, y=Alpha, fill=Biome, color=Biome)) +
geom_boxplot(show.legend = FALSE, fill=biome_cols_87, color="black",
outlier.size=0.7) +
#theme_minimal() + #un-comment whichever theme you want
theme_gray() +
#theme_light() +
#theme_bw() +
#theme_dark() +
#theme_linedraw() +
geom_violin(scale="count", show.legend=FALSE, fill="gray", alpha=0.35,
color="gray25") +
xlab("Biome") +
ylab("Richness") +
theme(axis.title.y = element_text(size=32),
axis.title.x = element_text(size=32),
axis.text.y = element_text(size=15),
axis.text.x = element_text(angle = 30, hjust = 1, size = 8))+
facet_wrap(~Order
,
#ncol=2 #un-comment # of rows you want
ncol=4
)
FacetOBR25to100
png("Figures/AlphaOrderBiome25to100.png", width = 1500, height = 1000, pointsize = 20)
FacetOBR25to100
dev.off()
# 2.4.3 Max α 10-25 --------------------------------------------------------
FacetOBR10to25 <- ggplot(OBR10to25DF,
aes(x=Biome, y=Alpha, fill=Biome, color=Biome
)) +
geom_boxplot(show.legend = FALSE, fill=biome_cols_66, color="black",
outlier.size=0.7) +
#theme_minimal() + #un-comment whichever theme you want
theme_gray() +
#theme_light() +
#theme_bw() +
#theme_dark() +
#theme_linedraw() +
geom_violin(scale="count", show.legend=FALSE, fill="gray", alpha=0.35,
color="gray25") +
xlab("Biome") +
ylab("Richness") +
theme(axis.title.y = element_text(size=32),
axis.title.x = element_text(size=32),
axis.text.y = element_text(size=15),
axis.text.x = element_text(angle = 30, hjust = 1, size = 8))+
facet_wrap(~Order
,
ncol=3 #un-comment # of rows you want
#ncol=2
)
FacetOBR10to25
png("Figures/AlphaOrderBiome10to25.png", width = 1500, height = 1000, pointsize = 20)
FacetOBR10to25
dev.off()
# 2.4.4 Max α <10 ----------------------------------------------------------
FacetOBRBelow10 <- ggplot(OBRBelow10DF,
aes(x=Biome, y=Alpha, fill=Biome, color=Biome)) +
geom_boxplot(show.legend = FALSE, fill=biome_cols_166, color="black",
outlier.size=0.7) +
#theme_minimal() + #un-comment whichever theme you want
theme_gray() +
#theme_light() +
#theme_bw() +
#theme_dark() +
#theme_linedraw() +
#geom_violin(scale="count", show.legend=FALSE, fill="gray", alpha=0.35, color="gray25") +
xlab("Biome") +
ylab("Richness") +
theme(axis.title.y = element_text(size=32),
axis.title.x = element_text(size=32),
axis.text.y = element_text(size=15),
axis.text.x = element_text(angle = 90, hjust = 1, size = 8))+
facet_wrap(~Order
,
ncol=6 #un-comment # of rows you want
#ncol=5
)
FacetOBRBelow10
png("Figures/AlphaOrderBiomeBelow10.png", width = 1500, height = 1000, pointsize = 20)
FacetOBRBelow10
dev.off()
# 2.5 Plots with weighted cell biome count
# 2.5.1 Max α >100 ---------------------------------------------------------
CleanFacetOBRAbove100 <- ggplot(CleanOBRAbove100DF,
aes(x=Biome, y=Alpha, fill=Biome, color=Biome)) +
geom_boxplot(show.legend = FALSE, fill=biome_cols_22, color="black",
outlier.size=1) +
#theme_minimal() + #un-comment whichever theme you want
theme_gray() +
#theme_light() +
#theme_bw() +
#theme_dark() +
#theme_linedraw() +
geom_violin(scale="count", show.legend=FALSE, fill="gray", alpha=0.35,
color="gray25") +
xlab("Biome") +
ylab("Richness") +
theme(axis.title.y = element_text(size=32),
axis.title.x = element_text(size=32),
axis.text.y = element_text(size=15),
axis.text.x = element_text(angle = 30, hjust = 1, size = 8))+
facet_wrap(~Order
,
#ncol=1 #un-comment # of rows you want
ncol=2
)
CleanFacetOBRAbove100
png("Figures/CleanAlphaOrderBiomeAbove100.png", width = 1500, height = 1000, pointsize = 20)
CleanFacetOBRAbove100
dev.off()
# 2.4.2 Max α 25-100 -------------------------------------------------------
CleanFacetOBR25to100 <- ggplot(CleanOBR25to100DF,
aes(x=Biome, y=Alpha, fill=Biome, color=Biome)) +
geom_boxplot(show.legend = FALSE, fill=biome_cols_87, color="black",
outlier.size=0.7) +
#theme_minimal() + #un-comment whichever theme you want
theme_gray() +
#theme_light() +
#theme_bw() +
#theme_dark() +
#theme_linedraw() +
geom_violin(scale="count", show.legend=FALSE, fill="gray", alpha=0.35,
color="gray25") +
xlab("Biome") +
ylab("Richness") +
theme(axis.title.y = element_text(size=32),
axis.title.x = element_text(size=32),
axis.text.y = element_text(size=15),
axis.text.x = element_text(angle = 30, hjust = 1, size = 8))+
facet_wrap(~Order
,
#ncol=2 #un-comment # of rows you want
ncol=4
)
CleanFacetOBR25to100
png("Figures/CleanAlphaOrderBiome25to100.png", width = 1500, height = 1000, pointsize = 20)
CleanFacetOBR25to100
dev.off()
# 2.4.3 Max α 10-25 --------------------------------------------------------
CleanFacetOBR10to25 <- ggplot(CleanOBR10to25DF,
aes(x=Biome, y=Alpha, fill=Biome, color=Biome
)) +
geom_boxplot(show.legend = FALSE, fill=biome_cols_66, color="black",
outlier.size=0.7) +
#theme_minimal() + #un-comment whichever theme you want
theme_gray() +
#theme_light() +
#theme_bw() +
#theme_dark() +
#theme_linedraw() +
geom_violin(scale="count", show.legend=FALSE, fill="gray", alpha=0.35,
color="gray25") +
xlab("Biome") +
ylab("Richness") +
theme(axis.title.y = element_text(size=32),
axis.title.x = element_text(size=32),
axis.text.y = element_text(size=15),
axis.text.x = element_text(angle = 30, hjust = 1, size = 8))+
facet_wrap(~Order
,
ncol=3 #un-comment # of rows you want
#ncol=2
)
CleanFacetOBR10to25
png("Figures/CleanAlphaOrderBiome10to25.png", width = 1500, height = 1000, pointsize = 20)
CleanFacetOBR10to25
dev.off()
# 2.4.4 Max α <10 ----------------------------------------------------------
CleanFacetOBRBelow10 <- ggplot(CleanOBRBelow10DF,
aes(x=Biome, y=Alpha, fill=Biome, color=Biome)) +
geom_boxplot(show.legend = FALSE, fill=biome_cols_166, color="black",
outlier.size=0.7) +
#theme_minimal() + #un-comment whichever theme you want
theme_gray() +
#theme_light() +
#theme_bw() +
#theme_dark() +
#theme_linedraw() +
#geom_violin(scale="count", show.legend=FALSE, fill="gray", alpha=0.35, color="gray25") +
xlab("Biome") +
ylab("Richness") +
theme(axis.title.y = element_text(size=32),
axis.title.x = element_text(size=32),
axis.text.y = element_text(size=15),
axis.text.x = element_text(angle = 90, hjust = 1, size = 8))+
facet_wrap(~Order
,
ncol=6 #un-comment # of rows you want
#ncol=5
)
CleanFacetOBRBelow10
png("Figures/CleanAlphaOrderBiomeBelow10.png", width = 1500, height = 1000, pointsize = 20)
CleanFacetOBRBelow10
dev.off()
# 3.0 CONTINENT FACET PLOTS -----------------------------------------------
FacetContBiomeRich <- ggplot(OrderBiomeContDF,
aes(x=Biome, y=Alpha, fill=Biome, color=Biome)) +
geom_boxplot(show.legend = FALSE, fill=biome_cols_18, color="black") +
guides(x = guide_axis(angle=30)) +
theme_gray() +
geom_violin(scale="count", show.legend=FALSE, fill="gray", alpha=0.35,
color="gray25") +
xlab("Biome") +
ylab("Richness") +
theme(axis.title.y = element_text(size=32),
axis.title.x = element_text(size=32),
axis.text.y = element_text(size=15),
axis.text.x = element_text(angle = 30, hjust = 1, size = 8))+
facet_wrap(~Cont)
CleanFacetContBiomeRich
png("Figures/AlphaBiomeContinents.png", width = 1500, height = 1000, pointsize = 20)
FacetContBiomeRich
dev.off()
# 4.0 Orders on x axis, facet of biomes boxplots ---------------------------
FacetRichOrder <- ggplot(OrderBiomeDF,
aes(x=Order, y=Alpha)) +
geom_boxplot(show.legend = FALSE) +
theme_minimal() +
xlab("Order") +
ylab("Richness") +
theme(axis.title.y = element_text(size=32),
axis.title.x = element_text(size=32),
axis.text.y = element_text(size=20),
axis.text.x = element_text(angle = 90, hjust = 1, size = 6))+
facet_wrap(~Biome)
FacetRichOrder
png("Figures/AlphaOrderBiomeSwapped.png", width = 1500, height = 1000, pointsize = 20)
FacetRichOrder
dev.off()
|
0f590f98766cd991cf3a4b4b41cb6ce5db8f3cb5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RSDA/examples/lynne2.Rd.R
|
90f08f78f295ffc03d6bf9a36ce6df0961ba1049
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 179
|
r
|
lynne2.Rd.R
|
library(RSDA)
### Name: lynne2
### Title: Symbolic interval data example.
### Aliases: lynne2
### Keywords: datasets
### ** Examples
data(lynne2)
display.sym.table(lynne2)
|
ac4a4904ac523f45f604ef13dbc5bb55852ad555
|
28240bc82c7ff1e5e2cf0dce945fd30b293d9f9c
|
/videogames.R
|
b16ae093538a5aaef560ee2933408784a7a55ec2
|
[] |
no_license
|
Siddhesh19991/video-game-analysis
|
fca57043e572ba89e63a73a15b40f70075e4ef86
|
7bf456520ca96ad24f9b18944b60bdb633d05c9f
|
refs/heads/master
| 2022-07-22T06:43:03.338064
| 2020-05-24T13:59:25
| 2020-05-24T13:59:25
| 266,552,021
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,863
|
r
|
videogames.R
|
vg<- read.csv("~/Desktop/vgsales.csv")
vg$Platform<-as.factor(vg$Platform)
vg<-vg[complete.cases(vg),]
vg$Genre<-as.factor(vg$Genre)
vg$Year<-as.numeric(vg$Year)
vg$Publisher<-as.factor(vg$Publisher)
#to check for repeated observations
library(dplyr)
dim(distinct(vg))
dim(vg)
#EDA
library(ggplot2)
#From 1980-2020
which.max(table(vg$Publisher))#EA has most games
plot(table(vg$Platform),las=2)#DS,PS2 most platforms
plot(table(vg$Genre),las=2)#ACTION games made more
cor(vg$Global_Sales,vg$EU_Sales)
cor(vg$Global_Sales,vg$JP_Sales)
cor(vg$Global_Sales,vg$NA_Sales)
cor(vg$Global_Sales,vg$Other_Sales)
g<-ggplot(vg,aes(Platform))+geom_bar(aes(color=Global_Sales))
#global sales is more in DS,PS2,wii,xbox360
a<-ggplot(vg,aes(Genre))+geom_bar(aes(color=Global_Sales))
#action sales is more followed by sports
plot(table(vg$Year))
#more games releases during the 2000s,2007-8
#top 10 publishers
b<-sort(table(vg$Publisher),decreasing = TRUE)
plot(b[1:10],las=2)
#best genre every year
e<-group_by(vg,Year,Genre)
f<-summarise(e,sum(Global_Sales))
j<-top(f,1)
ggplot(data =j,aes(x = Year, y = j$`sum(Global_Sales)`,fill=Genre))+geom_bar(stat = "identity")
#best platform every year
h<-group_by(vg,Year,Platform)
i<-summarise(h,sum(Global_Sales))
ggplot(data =i,aes(x = Year, y = i$`sum(Global_Sales)`,fill=Platform))+geom_line(stat = "identity",aes(color= Platform))
#best game every year
k<-group_by(vg,Year)
m<-top_n(k,1)
ggplot(m,aes(m$Global_Sales,m$Name,fill=Year))+geom_bar(stat = "identity")+ theme(axis.text.x = element_text(angle=45,hjust=1),
plot.title = element_text(hjust=.5))
#z<-group_by(vg,Genre)
#z<-summarise(z,count=n())
#z<-top_n(z,1)
#ggplot(z,aes(z$Genre,z$count))+geom_bar(stat ="identity")
|
ac6aa7afce6d2daf6baa6e8baf4a36002201790e
|
3aa093dba669d1f52f43f86d4a23b861ec36d327
|
/graphs/plots.r
|
e787bfdd6bf0ed28f0ff6d5f1cc6bd3a06a0212d
|
[] |
no_license
|
ramhiser/paper-rlda-comparison
|
905cccbd8e3bd9d985e672c0ce39d5db5569dcae
|
2840a6dde85caf5136c07e4227d622e4f34265b3
|
refs/heads/master
| 2016-09-10T16:44:31.387420
| 2011-07-26T16:03:19
| 2011-07-26T16:03:19
| 967,339
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,357
|
r
|
plots.r
|
library('ggplot2')
load("duin.RData")
load("guo.RData")
duin_summary_q30 <- subset(duin_results$summary, q == 30)
duin_summary_q50 <- subset(duin_results$summary, q == 50)
duin_summary_q100 <- subset(duin_results$summary, q == 100)
guo_summary_q30 <- subset(guo_results$summary, q == 30)
guo_summary_q50 <- subset(guo_results$summary, q == 50)
guo_summary_q100 <- subset(guo_results$summary, q == 100)
p <- ggplot(duin_summary_q50, aes(x = n, y = error, group = method, color = method))
p <- p + geom_path(aes(color = method, linetype = method)) + ylim(c(0, .45))
p <- p + geom_point(aes(shape = method))
p <- p + xlab("Training Data Size") + ylab("Expected Error Rate") + opts(title = plot_title)
p + theme_set(theme_bw())
eer_plots <- function(results, plot_title = "TODO: Add plot title", ymax = 0.5, save = F, file) {
p <- ggplot(results, aes(x = n, y = error, group = method, color = method))
p <- p + geom_path(aes(color = method, linetype = method)) + ylim(c(0, ymax))
p <- p + geom_point(aes(shape = method))
p <- p + xlab(expression(n[k])) + ylab("Expected Error Rate") + opts(title = plot_title)
p <- p + theme_set(theme_bw())
if(save) ggsave(filename = file, plot = p)
p
}
cer_boxplot <- function(results, nk = 100, q = 100, plot_title = "TODO: Add plot title", ymax = 0.5, save = F, file) {
plot_title <- paste(plot_title, "(q = ", q, ", ", expression(n_k), " = ", nk, ")", sep = "")
p <- ggplot(subset(results, n == nk), aes(x = method, y = error))
p <- p + ylim(c(0, ymax)) + geom_boxplot(color = I("#3366FF"))
p <- p + xlab("") + ylab("Conditional Error Rate") + opts(title = plot_title)
p <- p + theme_set(theme_bw())
if(save) ggsave(filename = file, plot = p)
p
}
# Duin Plots
duin_q30 <- subset(duin_results$results, q == 30)
duin_q50 <- subset(duin_results$results, q == 50)
duin_q100 <- subset(duin_results$results, q == 100)
cer_boxplot(duin_q50, nk = 10, q = 50, plot_title = "Duin Simulation Configuration", ymax = 0.6, save = T, file = "duin-box-10.eps")
cer_boxplot(duin_q50, nk = 30, q = 50, plot_title = "Duin Simulation Configuration", ymax = 0.6, save = T, file = "duin-box-30.eps")
cer_boxplot(duin_q50, nk = 50, q = 50, plot_title = "Duin Simulation Configuration", ymax = 0.6, save = T, file = "duin-box-50.eps")
eer_plots(duin_summary_q30, "Duin Simulation Configuration (q = 30)", 0.45)
eer_plots(duin_summary_q50, "Duin Simulation Configuration(q = 50)", 0.45, save = T, file = "duin50.eps")
eer_plots(duin_summary_q100, "Duin Simulation Configuration (q = 100)", 0.5))
# Guo Plots
guo_q30 <- subset(guo_results$results, q == 30)
guo_q50 <- subset(guo_results$results, q == 50)
guo_q100 <- subset(guo_results$results, q == 100)
cer_boxplot(guo_q100, nk = 10, q = 100, plot_title = "Guo Simulation Configuration", ymax = 0.6, save = T, file = "guo-box-10.eps")
cer_boxplot(guo_q100, nk = 50, q = 100, plot_title = "Guo Simulation Configuration", ymax = 0.6, save = T, file = "guo-box-50.eps")
cer_boxplot(guo_q100, nk = 70, q = 100, plot_title = "Guo Simulation Configuration", ymax = 0.6, save = T, file = "guo-box-70.eps")
eer_plots(guo_summary_q30, "Guo Simulation Configuration (q = 30)", 0.35)
eer_plots(guo_summary_q50, "Guo Simulation Configuration (q = 50)", 0.35)
eer_plots(guo_summary_q100, "Guo Simulation Configuration (q = 100)", 0.40, save = T, file = "guo100.eps")
|
2a76b95f5b2d87483c81221e8a2fee7b8a313343
|
fa0e8940e02fe2d60e7417ae45fbd061baec7703
|
/SCRIPTS/Cluster_DTW_embolse_events_filtrados.R
|
a02c4b5e0485b67fbe3c7ece09b42eaf9044bbed
|
[] |
no_license
|
CIAT-DAPA/aeps_Grupo_Santa_Maria_DM
|
dc903583db403567340a416894c6420d09d15288
|
c1adfcf7a87d98006274822daea74e7276abb73a
|
refs/heads/master
| 2020-04-12T12:26:10.310273
| 2018-12-19T21:18:49
| 2018-12-19T21:18:49
| 162,491,842
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,821
|
r
|
Cluster_DTW_embolse_events_filtrados.R
|
# Analisis cluster de patrones de embolse
# Hugo Andres Dorado B
# 11-10-2018
library(ggplot2)
library(dtw)
library(dtwclust)
library(plyr)
library(zoo)
rm(list=ls())
source('SCRIPTS/funciones_cluster_temporal.R')
source('SCRIPTS/Cluster_DTW_embolse_FUN.R')
# Leer y alistar datos
embolse <- read.csv('DATOS/consolidado_embolse_removiendo_interv.csv')
embolse <- embolse[!is.na(embolse$Embolse),]
embolse_lst <-
split( embolse , paste(embolse$finca,embolse$anio,embolse$Lote,sep='_') )
conteoFilas <- do.call(rbind,lapply(embolse_lst,dim))
summary(conteoFilas)
boxplot(conteoFilas[,1])
conteoFilas <- conteoFilas[order(conteoFilas[,1],decreasing = F),]
ct <- conteoFilas[conteoFilas[,1] > 51,]
emls_50_51 <- embolse_lst[row.names(ct)]
emls_50_51 <- lapply(emls_50_51,
function(w){
w[order(w$Semana),]
})
time_series <- lapply( emls_50_51,function(x){x['Embolse']} )
time_series <- lapply( time_series,function(x){
data.frame(Embolse = rollmean(x$Embolse ,4) )
}
)
time_series <- lapply(time_series,ts)
plot(time_series[[1]],las=2)
# time_series <- time_series[1:30]
# lapply(time_series,function(x){plot(x,type='l')})
##----------------------------------------------------------------------------##
# -------------------Calcular matriz de distancia dtw---------------------------
distAllMatrix <- distDtwMV(time_series)
save(distAllMatrix,file=here::here('DATOS','distAllMatrix_media_movil_sin_interve.RDATA'))
#load(here::here('DATOS','distAllMatrix_media_movil_sin_interve.RDATA'))
# Cluster a (25 CLUSTER)
hClustEvents <- hirarCluster(distAllMatrix)
dfResults <- data.frame(Nam_time_series = names(time_series),
clust = hClustEvents)
spl_time_series <- split(time_series,hClustEvents)
dirSave <- 'CLUSTER_25_SIN_INTER'
if(!dir.exists(dirSave)){dir.create(dirSave)}
write.csv(dfResults,paste(dirSave,'/',dirSave,'.csv',sep=''),row.names = F)
tablResults <- ddply(dfResults,~clust,summarise,conteo = length(clust))
write.csv(tablResults,paste(dirSave,'/conteo_',dirSave,'.csv',sep=''),row.names = F)
graphics_cluster(ts= time_series,ts_per_cluster=spl_time_series,dirSave,limites_fijos=F)
#------------------------------------------------------------------------------
#--------------- Indetificar los centroides de las curvas----------------------
library(gtools)
# Agrupar cada curva en una matriz
groupsTable <-
lapply(spl_time_series,
function(w){
ltx <- lapply(w,function(q){
g <- as.numeric(q[,1])
names(g) <- 1:length(q[,1])
g
})
do.call(smartbind,ltx)
}
)
centroids <- lapply(groupsTable,function(w){
ts(data.frame(Embolse = apply(w,2,median,na.rm=T)))}
)
names(spl_time_series)
names(centroids)
centroids_obs <- sapply( names(spl_time_series) ,function(w){
names(which.min(sapply(spl_time_series[[w]],function(x){dtw(centroids[[w]],x)$distance})))
}
)
centerPatterns <- lapply(time_series[centroids_obs],function(w){data.frame(week=1:length(w),Embolse = w[,1])})
names(centerPatterns) <- names(centroids_obs)
namCenPatt <- names(centerPatterns)
patterCluster <-
do.call(rbind,
lapply(seq(length(centerPatterns)),
function(g){dfa <- data.frame(Group=as.character(namCenPatt[g]),centerPatterns[[g]])
dfa$Group <- as.character(dfa$Group )
dfa$week <- as.numeric(dfa$week)
dfa$Embolse <- as.numeric(dfa$Embolse)
dfa
}
)
)
patterCluster$Group <- as.numeric(patterCluster$Group)
tablResults$clust
mg_DS <- merge(patterCluster,tablResults,by.x= 'Group',by.y='clust',all.x = T ,all.y =F,sort=F)
mg_DS$Group_Count <- paste('G',mg_DS$Group,'_C',mg_DS$conteo,sep='')
mg_DS_1 <- mg_DS[mg_DS$conteo > 6,]
g1 <- ggplot(mg_DS,aes(x=week,y=Embolse))+geom_point(aes(colour=factor(Group_Count)),size = 0.3)+
geom_line(aes(colour=factor(Group_Count)),size = 0.3)+theme_bw()
g2 <- ggplot(mg_DS,aes(x=week,y=Embolse))+geom_point(aes(colour=factor(Group_Count)))+
geom_line(aes(colour=factor(Group_Count)))+facet_wrap(~Group_Count,scales = 'free')+theme_bw()
g3 <- ggplot(mg_DS_1,aes(x=week,y=Embolse))+geom_point(aes(colour=factor(Group_Count)),size = 0.3)+
geom_line(aes(colour=factor(Group_Count)),size = 0.3)+theme_bw()
ggsave('CLUSTER_25_SIN_INTER/ghrap_lin_sin_interve.png',g1)
ggsave('CLUSTER_25_SIN_INTER/ghra_wrap_sin_interve.png',g2)
ggsave('CLUSTER_25_SIN_INTER/ghrap_lin_mas_5_sin_interve.png',g3)
write.csv(mg_DS_1,'CLUSTER_25_SIN_INTER/datos_grafico_centro_cluster_sin_interve.csv')
write.csv(mg_DS_1,'CLUSTER_22_MA/datos_grafico_centro_cluster_sin_interve.csv')
|
585a1544f75418ab8ffa91b16181fa8469123862
|
bb1010954d2d068c4c01a756c9d38f6c2cd1523a
|
/plot3.R
|
b49fadb126aaa57e7f678ea4eca4277a57709ad4
|
[] |
no_license
|
dse-arvind/Peer-graded-Assignment-Course-Project-2
|
68548509a5c82b67424085c3597815b5be9e90ea
|
74e1e32c3a25649138bd3f56d16d22feb0ee1217
|
refs/heads/master
| 2022-11-05T23:45:57.064556
| 2020-06-27T06:42:07
| 2020-06-27T06:42:07
| 275,313,762
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 307
|
r
|
plot3.R
|
g<-ggplot(aes(x = year, y = Emissions, fill=type), data=NEIdataBaltimore)
g+geom_bar(stat="identity")+
facet_grid(.~type)+
labs(x="year", y=expression("Total PM"[2.5]*" Emission (Tons)")) +
labs(title=expression("PM"[2.5]*" Emissions, Baltimore City 1999-2008 by Source Type"))+
guides(fill=FALSE)
|
0ed6884305fa42f6e3db3106a99a7de2e466f817
|
3008be2f3ad8c2dceaa18e997d1243772b1b1301
|
/Effect of Coupons grocery Retail/marketing_project.R
|
3514373970ecc49e60ad23121253ba6d464b3e2f
|
[] |
no_license
|
as75999/Projects
|
a1feb70b8e78ca9add47ecfcf5b758d6966ee7f1
|
f5b8e5891e0ceda2b8c6364d1e3ef2be516acaf3
|
refs/heads/master
| 2016-08-12T03:31:50.743275
| 2016-01-19T20:34:39
| 2016-01-19T20:34:39
| 49,965,388
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,910
|
r
|
marketing_project.R
|
library(sqldf)
#loading files
data=read.csv('D:/Fall/MA/Project/grocery_data.csv')
# getting the column names
colnames(data)
#penetration by commodity
sqldf('SELECT commodity,count(distinct household) household from data group by 1')
#Penetration by brand
#pasta
sqldf('SELECT brand,count(distinct household) household from data where commodity="pasta" group by 1 order by 2 desc')
#pancakes
sqldf('SELECT brand,count(distinct household) household from data where commodity="pancake mixes" group by 1 order by 2 desc')
#pasta sauce
sqldf('SELECT brand,count(distinct household) household from data where commodity="pasta sauce" group by 1 order by 2 desc')
#syrups
sqldf('SELECT brand,count(distinct household) household from data where commodity="syrups" group by 1 order by 2 desc')
#Penetration by combinations of brand of different complimentary commodity
##########################################################################
Commodity_by_Trip=sqldf('
select
basket,
brand,
max(case when commodity = "pasta" then 1 else 0 end) as pasta,
max(case when commodity = "pasta sauce" then 1 else 0 end) as pasta_sauce,
max(case when commodity = "pancake mixes" then 1 else 0 end) as pancake_mixes,
max(case when commodity = "syrups" then 1 else 0 end) as syrups
from data
group by basket,brand
')
Pancake_and_Syrup =sqldf('
select
basket,
pancake_mixes,
syrups,
brand
from Commodity_by_Trip
where pancake_mixes = 1 or syrups =1
')
Pancake_and_Syrup_1 =sqldf('
select
basket,
sum(pancake_mixes) pancake_mixes,
sum(syrups) syrups
from Pancake_and_Syrup
group by basket
having sum(pancake_mixes) > 0 and sum(syrups) > 0 ')
Pancake_and_Syrup_2=sqldf('
SELECT
a.*
FROM Pancake_and_Syrup a
join Pancake_and_Syrup_1 b
ON a.basket=b.basket
')
Pancake =sqldf('
select basket, brand
from Pancake_and_Syrup_2
where pancake_mixes >= 1
order by basket
')
Syrup =sqldf('
select basket, brand
from Pancake_and_Syrup_2
where syrups >=1
order by basket
')
Pancake_and_Syrup_3 =sqldf('
select b.basket, a.brand as pancake_brand, b.brand as syrup_brand
from Pancake a, Syrup b
where a.basket = b.basket
order by b.basket, a.brand, b.brand
')
############## results for pan cakes and syrup
sqldf('
select pancake_brand, syrup_brand, count(basket)
from Pancake_and_Syrup_3
group by pancake_brand, syrup_brand
')
##############
pasta_sauce_and_pasta =sqldf('
select
basket,
pasta_sauce,
pasta,
brand
from Commodity_by_Trip
where pasta_sauce = 1 or pasta =1
')
pasta_sauce_and_pasta_1 =sqldf('
select
basket,
sum(pasta_sauce) pasta_sauce,
sum(pasta) pasta
from pasta_sauce_and_pasta
group by basket
having sum(pasta_sauce) > 0 and sum(pasta) > 0 ')
pasta_sauce_and_pasta_2=sqldf('
SELECT
a.*
FROM pasta_sauce_and_pasta a
join pasta_sauce_and_pasta_1 b
ON a.basket=b.basket
')
pasta_sauce =sqldf('
select basket, brand
from pasta_sauce_and_pasta_2
where pasta_sauce >= 1
order by basket
')
pasta =sqldf('
select basket, brand
from pasta_sauce_and_pasta_2
where pasta >=1
order by basket
')
pasta_sauce_and_pasta_3 =sqldf('
select b.basket, a.brand as pasta_sauce_brand, b.brand as pasta_brand
from pasta_sauce a, pasta b
where a.basket = b.basket
order by b.basket, a.brand, b.brand
')
############## results for pasta and pasta sauce
sqldf('
select pasta_sauce_brand, pasta_brand, count(basket) num_basket
from pasta_sauce_and_pasta_3
group by pasta_sauce_brand, pasta_brand order by count(basket) desc
')
###############
################################################################################
##Brand loyality INDEX
BL=sqldf('
select
household,
commodity,
brand, sum(units) as quantity
from data
group by household, commodity, brand'
)
BL1=sqldf('
select
household,
commodity,
sum(units) as total_quantity
from data
group by household, commodity')
BL2=sqldf('
select
a.household,
a.commodity,
a.brand,
a.quantity/b.total_quantity as bli
from BL a, BL1 b
where a.household = b.household and a.commodity = b.commodity
group by a.household, a.commodity, a.brand
')
###output for perceptual maps######
#Pasta
pasta_bli=sqldf('select brand , avg(bli) pasta_bli,count(household) pasta_hd from BL2
where commodity="pasta" group by 1 order by count(household) desc ' )
#Patsa Sauce
pasta_sauce_bli=sqldf('select brand , avg(bli) pasta_Sauce_bli,count(household) pasta_Sauce_hd from BL2
where commodity="pasta sauce" group by 1 order by count(household) desc ' )
pasta_past_sauce_bli=sqldf('select a.brand, a.pasta_bli, a.pasta_hd,b.pasta_Sauce_bli,
b.pasta_Sauce_hd from pasta_bli a ,pasta_sauce_bli b where
a.brand=b.brand')
View(pasta_past_sauce_bli)
#Pancakes
pancake_mixes_bli=sqldf('select brand , avg(bli) pancake_mixes_bli,count(household) pancake_mixes_hd from BL2
where commodity="pancake mixes" group by 1 order by count(household) desc ' )
#Syrup
syrups_bli=sqldf('select brand , avg(bli) syrups_bli,count(household) syrups_hd from BL2
where commodity="syrups" group by 1 order by count(household) desc ' )
pancake_syrups_bli=sqldf('select a.brand,a.pancake_mixes_bli, a.pancake_mixes_hd,b.syrups_bli,
b.syrups_hd from pancake_mixes_bli a ,syrups_bli b where
a.brand=b.brand')
View(pancake_syrups_bli)
##########################################################################
#first coupon usage
coupon=sqldf('select brand,commodity,household,min(day) min_day from data where coupon =1 group by 1,2,3');
non_coupon=sqldf('select brand,commodity,household,min(day) min_day from data where coupon =0 group by 1,2,3');
first_household=sqldf('select a.brand,a.commodity,count(distinct a.household) num_household FROM coupon a,non_coupon b where a.brand=b.brand and a.commodity=b.commodity
and a.household=b.household and a.min_day<b.min_day group by 1,2')
coupon_household=sqldf('select brand,commodity, count(distinct(household)) num_household FROM coupon group by 1,2');
#output to find for which branch coupon has been effective
coupon =sqldf('select a.brand, a.commodity, a.num_household total_h, b.num_household from first_household a,coupon_household b
where a.brand=b.brand and a.commodity=b.commodity and b.num_household>a.num_household group by 1,2')
coupon$household_onboarded=round((coupon$total_h/coupon$num_household)*100,1)
coupon_pasta=sqldf('select * from coupon where commodity="pasta"')
coupon_pasta_sauce=sqldf('select * from coupon where commodity="pasta sauce"')
coupon_pancake_mixes=sqldf('select * from coupon where commodity="pancake mixes"')
coupon_syrups=sqldf('select * from coupon where commodity="syrups"')
library(ggplot2)
A=ggplot(coupon_pasta,aes(x=brand,y=household_onboarded))+geom_bar(stat="identity", fill='red')+ ggtitle("Coupon Effect On Pasta Brand Adoption")
A=A + theme(plot.title=element_text(size=20,face='bold',color="dark gray"))
A=A +theme(axis.title.x=element_blank())
A=A +theme(axis.title.y=element_blank())
A=A+geom_text(aes(label=round(household_onboarded,3)),vjust=-0.2,color="dark blue",size=8)
A=A+theme(axis.text=element_text(size=16))
A
A=ggplot(coupon_pasta_sauce,aes(x=brand,y=household_onboarded))+geom_bar(stat="identity", fill='salmon2')+ ggtitle("Coupon Effect On Pasta Sauce Brand Adoption")
A=A + theme(plot.title=element_text(size=20,face='bold',color="dark gray"))
A=A +theme(axis.title.x=element_blank())
A=A +theme(axis.title.y=element_blank())
A=A+geom_text(aes(label=round(household_onboarded,3)),vjust=-0.2,color="dark blue",size=8)
A=A+theme(axis.text=element_text(size=16))
A
A=ggplot(coupon_pancake_mixes,aes(x=brand,y=household_onboarded))+geom_bar(stat="identity", fill='tan3')+ ggtitle("Coupon Effect On Pankcake Mixes Brand Adoption")
A=A + theme(plot.title=element_text(size=20,face='bold',color="dark gray"))
A=A +theme(axis.title.x=element_blank())
A=A +theme(axis.title.y=element_blank())
A=A+geom_text(aes(label=round(household_onboarded,3)),vjust=-0.2,color="dark blue",size=8)
A=A+theme(axis.text=element_text(size=16))
A
A=ggplot(coupon_syrups,aes(x=brand,y=household_onboarded))+geom_bar(stat="identity", fill='gold1')+ ggtitle("Coupon Effect On Syrup Brand Adoption")
A=A + theme(plot.title=element_text(size=20,face='bold',color="dark gray"))
A=A +theme(axis.title.x=element_blank())
A=A +theme(axis.title.y=element_blank())
A=A+geom_text(aes(label=round(household_onboarded,3)),vjust=-0.2,color="dark blue",size=8)
A=A+theme(axis.text=element_text(size=16))
A
ggplot(coupon_pasta_sauce,aes(x=brand,y=household_onboarded))+
geom_bar(stat="identity", fill='salmon2')+xlab("Brand")+ ylab("% household onboarded using coupon")+ ggtitle("Pasta Sauce Brand adoption")
ggplot(coupon_pancake_mixes,aes(x=brand,y=household_onboarded))+
geom_bar(stat="identity", fill='tan3')+xlab("Brand")+ ylab("% household onboarded using coupon")+ ggtitle("Pancake Mixes Brand adoption")
ggplot(coupon_syrups,aes(x=brand,y=household_onboarded))+
geom_bar(stat="identity", fill='gold1')+xlab("Brand")+ ylab("% household onboarded using coupon")+ ggtitle("Syrups Brand adoption")
#####################################################################################
#Modelling price elasticity
agg_data=sqldf('
select
week,
sum(coupon) num_coupon,
sum(Case when commodity ="pasta" then units else 0 end) pasta_sales,
avg(Case when commodity ="pasta" then price else 0 end) pasta_price,
sum(Case when commodity ="pancake mixes" then units else 0 end) pancake_mixes_sales,
avg(Case when commodity ="pancake mixes" then price else 0 end) pancake_mixes_price,
sum(Case when commodity ="pasta sauce" then units else 0 end) pasta_sauce_sales,
avg(Case when commodity ="pasta sauce" then price else 0 end) pasta_sauce_price,
sum(Case when commodity ="syrups" then dollar_sales else 0 end) syrups_sales,
avg(Case when commodity ="syrups" then price else 0 end) syrups_price,
sum(Case when commodity ="pasta" then coupon else 0 end) pasta_coupon,
sum(Case when commodity ="pancake mixes" then coupon else 0 end) pancake_mixes_coupon,
sum(Case when commodity ="pasta sauce" then coupon else 0 end) pasta_sauce_coupon,
sum(Case when commodity ="syrups" then coupon else 0 end) syrups_coupon
from data
group by week
')
agg_data=as.data.frame(agg_data)
row=nrow(agg_data)
agg_data$pasta_sales_lag[1]=0;
agg_data$pasta_sales_lag[2:row]=agg_data$pasta_sales[1:row-1]
colnames(agg_data)
##models for price elasticty and cross price ealsticity
agg_data$pasta_sales_lag[1]=0;
agg_data$pasta_sales_lag[2:row]=agg_data$pasta_sales[1:row-1]
agg_data$pancake_mixes_sales_lag[1]=0;
agg_data$pancake_mixes_sales_lag[2:row]=agg_data$pancake_mixes_sales[1:row-1]
agg_data$pasta_sauce_sales_lag[1]=0;
agg_data$pasta_sauce_sales_lag[2:row]=agg_data$pasta_sauce_sales[1:row-1]
agg_data$syrups_sales_lag[1]=0;
agg_data$syrups_sales_lag[2:row]=agg_data$syrups_sales[1:row-1]
agg_data$pasta_coupon_lag[1]=0;
agg_data$pasta_coupon_lag[2:row]=agg_data$pasta_coupon[1:row-1]
agg_data$pancake_mixes_coupon_lag[1]=0;
agg_data$pancake_mixes_coupon_lag[2:row]=agg_data$pancake_mixes_coupon[1:row-1]
agg_data$pasta_sauce_coupon_lag[1]=0;
agg_data$pasta_sauce_coupon_lag[2:row]=agg_data$pasta_sauce_coupon[1:row-1]
agg_data$syrups_coupon_lag[1]=0;
agg_data$syrups_coupon_lag[2:row]=agg_data$syrups_coupon[1:row-1]
str(agg_data)
model1=lm(log(pasta_sales)~week+log(pasta_price)+log(pasta_sauce_price)+log(pasta_sales_lag+1)+log(pasta_coupon+1), data=agg_data)
summary(model1)
model2=lm(log(pasta_sauce_sales)~week+log(pasta_sauce_price)+log(pasta_price)+log(pasta_sauce_sales_lag+1)+log(pasta_sauce_coupon+1), data=agg_data)
summary(model2)
model3=lm(log(pancake_mixes_sales)~log(pancake_mixes_price)+log(syrups_price)+log(pancake_mixes_sales_lag+1)+log(pancake_mixes_coupon+1), data=agg_data)
summary(model3)
model4=lm(log(syrups_sales)~log(pancake_mixes_price)+log(syrups_price)+log(syrups_sales_lag+1)+log(syrups_coupon+1), data=agg_data)
summary(model4)
####### instrument variance
library(systemfit)
library(AER)
model5=ivreg(log(pasta_sales)~log(pasta_price)+log(pasta_sauce_price)+ log(pasta_coupon+1)|log(pasta_sales_lag+1)+log(pasta_sauce_price)+ log(pasta_coupon+1), data=agg_data)
summary(model5)
model5=ivreg(log(pasta_sauce_sales)~log(pasta_sauce_price)+log(pasta_sauce_sauce_price)+log(pasta_sauce_coupon+1)|log(pasta_sauce_sauce_price)+log(pasta_sauce_sales_lag+1) +log(pasta_sauce_coupon+1), data=agg_data)
summary(model5)
library(dlpyr)
|
97138bb780a17510733b93d9cda1fdcaae4b1245
|
ef4eb23543224c14f4cae67190d1f82bd881a4a4
|
/IDESSA/BushEncroachment/AerialImages_MODIS/ProcessAerialImages/unsupervised.R
|
07c7486b8ab43d3300b189ca9b33ff7e8748c037
|
[] |
no_license
|
environmentalinformatics-marburg/magic
|
33ed410de55a1ba6ff943090207b99b1a852a3ef
|
b45cf66f0f9aa94c7f11e84d2c559040be0a1cfb
|
refs/heads/master
| 2022-05-27T06:40:23.443801
| 2022-05-05T12:55:28
| 2022-05-05T12:55:28
| 9,035,494
| 6
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,084
|
r
|
unsupervised.R
|
################################################################################
### UNSUPERVIDED CLASSIFICATION FOR SOUTH AFRICA AERIAL IMAGES
# script calculates predictor variables from aerial images and applys a k-means
# clustering algorithm
################################################################################
rm(list=ls())
library(Rsenal)
library(rgdal)
library(cluster)
library(fuerHanna) # for fortran implementation of vvi, hsi, simpletexture
require(vegan)
################################################################################
### Do adjustments here
################################################################################
pathToSampleImages <- "/media/hanna/data/IDESSA_Bush/AERIALIMAGERY/samples/"
tmpdir <- "/media/hanna/data/IDESSA_Bush/AERIALIMAGERY/samples/tmpdir"
outdir <- "/media/hanna/data/IDESSA_Bush/AERIALIMAGERY/samples/clusterResults/"
fname <- list.files(pathToSampleImages,pattern=".tif$")[2] # file to be clustered
sizeOfImage <- 300 # in m
method <- "elbow" # or"cascadeKM" method to determine optimal nr of clusters
includeTexture <- FALSE
applyShadowMask <- FALSE #might make more sense to let shadow be a separate class
#if it can clearly be distinguished from other objects
################################################################################
### Load and crop
################################################################################
setwd(pathToSampleImages)
tmpdir <- paste0(tmpdir,"/001")
dir.create(tmpdir)
rasterOptions(tmpdir=tmpdir)
rgb_img <- brick(fname)
center <- c((extent(rgb_img)@xmin+extent(rgb_img)@xmax)/2,
(extent(rgb_img)@ymin+extent(rgb_img)@ymax)/2)
rgb_img <- crop(rgb_img,c(center[1]-sizeOfImage/2,center[1]+sizeOfImage/2,
center[2]-sizeOfImage/2,center[2]+sizeOfImage/2))
names(rgb_img) <- c("R","G","B")
################################################################################
### Calculate further variables
################################################################################
rgb_hsi <- hsi(red = raster(rgb_img, layer = 1),
green = raster(rgb_img, layer = 2),
blue = raster(rgb_img, layer = 3))
vvindex <- fuerHanna::vvi(red = raster(rgb_img, layer = 1),
green = raster(rgb_img, layer = 2),
blue = raster(rgb_img, layer = 3))
names(vvindex) <- "VVI"
result <- brick(c(rgb_img,rgb_hsi,vvindex))
if(includeTexture){
txt <- simpletexture(result,3)
result <- stack(result,txt)
}
result <- stack(result)
################################################################################
### Shadow detection
################################################################################
## shadow detection
if(applyShadowMask){
shadow <- rgbShadowMask(rgb_img)
# modal filter
shadow <- focal(shadow, w = matrix(c(1, 1, 1,
1, 1, 1,
1, 1, 1), nc = 3),
fun = modal, na.rm = TRUE, pad = TRUE)
}
################################################################################
### Determine Number of clusters
################################################################################
image.df <- as.data.frame(result)
############################# Elbow method
if (method=="elbow"){
cluster.image<-list()
pExp <- c()
for (i in 1:10){
cluster.image[[i]] <- kmeans(na.omit(image.df), i, iter.max = 10,
nstart = 25)
pExp[i] = 1- cluster.image[[i]]$tot.withinss / cluster.image[[i]]$totss
print (paste0(i," processed..."))
}
pExpFunc <-splinefun(1:10,pExp)
optNrCluster <- min(which(round(pExpFunc(1:10,deriv=2),1)==0))
pdf(paste0(outdir,"/Nclust_elbow_",fname,".pdf"))
plot(1:10, pExp,
type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
points(optNrCluster,pExp[optNrCluster],col="red",pch=16)
dev.off()
}
############################# cascadeKM method
if (method=="cascadeKM"){
fit <- cascadeKM(na.omit(image.df), 2, 8, iter = 50)
optNrCluster <- as.numeric(which.max(fit$results[2,]))
}
################################################################################
### Cluster image with optimal nr of clusters
################################################################################
cluster.image <- kmeans(na.omit(image.df), optNrCluster, iter.max = 50,
nstart = 25)
### Create raster output from clusering
image.df.factor <- rep(NA, length(image.df[,1]))
image.df.factor[!is.na(image.df[,1])] <- cluster.image$cluster
clustered <- raster(result)
clustered <- setValues(clustered, image.df.factor)
if(applyShadowMask){
clustered[shadow==0] <- NA
}
################################################################################
### Save clustered image and clean tmpdir
################################################################################
writeRaster(clustered,paste0(outdir,"/clustered_",fname,".tif"),overwrite=TRUE)
unlink(tmpdir, recursive=TRUE)
|
a2bbbf6b0282e218e6cd65b4a6989be26675b1c5
|
e306d18bae6892b8439ab880913b269f2d94d9b3
|
/Heri Folder/rscript_plots.R
|
f93c704c3f7fe768d463ef2963affccd4962fac3
|
[] |
no_license
|
LizzieChung/Covid_19_Project
|
8aed83f9e89d13d5445a5e23075d3becc0bbd554
|
1e8af1068ef197f6acd170624fff0816520966f2
|
refs/heads/main
| 2023-05-29T22:09:59.121164
| 2021-06-17T13:08:04
| 2021-06-17T13:08:04
| 377,888,843
| 0
| 0
| null | 2021-06-17T16:05:14
| 2021-06-17T16:05:14
| null |
UTF-8
|
R
| false
| false
| 4,280
|
r
|
rscript_plots.R
|
library(tidycensus)
library(tidyverse)
library(tigris)
library(lubridate)
options(tigris_use_cache=TRUE)
census_api_key("8ce6395e8ec025d0c06764b840e7e5fe70221625", overwrite = TRUE, install=TRUE)
data("county_laea") #Dataset with county geometry for use when shifting Alaska and Hawaii
class(county_laea)
#fips_codes is a built-in daataset for smart state and county lookup. To access
# directly use:
#data(fips_codes)
get_decennial()
us_components <- get_estimates(geography = "state", product = "components")
unique(us_components$variable)
us_pop <- get_estimates(geography = "county", product = "population", year = 2019, geometry = TRUE, resolution = "20m") %>%
shift_geometry()
unique(us_pop$variable)
us_pop <- us_pop %>%
filter(variable == "POP")
order = c("10,000,000 +", "1,000,000 to 10,000,000", "100,000 to 1,000,000", "10,000 to 100,000", "10,000 and below")
us_pop <- us_pop %>%
mutate(groups = case_when(
value > 10000000 ~ "10,000,000 +",
value > 1000000 ~ "1,000,000 to 10,000,000",
value > 100000 ~ "100,000 to 1,000,000",
value > 10000 ~ "10,000 to 100,000",
value > 0 ~ "10,000 and below"
)) %>%
mutate(groups = factor(groups, levels = order))
state_overlay <- states(
cb = TRUE,
resolution = "20m"
) %>%
filter(GEOID != "72") %>%
shift_geometry()
ggplot() +
geom_sf(data = us_pop, aes(fill = groups, color = groups), size = 0.1) +
geom_sf(data = state_overlay, fill = NA, color = "black", size = 0.1) +
scale_fill_brewer(palette = "PuOr", direction = -1) +
scale_color_brewer(palette = "PuOr", direction = -1, guide = FALSE) +
coord_sf(datum = NA) +
theme_minimal(base_family = "Roboto") +
labs(title = "US Population",
subtitle = "US Census Bureau 2019 Population Estimates",
fill = "Population Grouping"
)
# Attempt at Making a plot for cases per day
covid.dt <- read_csv("C:/Users/hlop5/Downloads/owid-covid-data.csv")
head(covid.dt)
dim(covid.dt)
str(covid.dt)
names(covid.dt)
# selecting our varibales of interest
asia.covid.df <- covid.dt %>%
select(iso_code, continent, location, date, new_deaths, total_deaths,
new_cases, total_cases,
icu_patients, hosp_patients,
new_tests, total_tests,
new_vaccinations, total_vaccinations, population)
# Editing the format of the data variable
asia.covid.df$date = as.Date(covid.dt$date, format = "%Y-%m-%d")
# Filtering out the continent of Asia
asia.covid.df <- covid.dt %>%
filter(continent == "Asia")
#Creating a month variable
asia.covid.df <- asia.covid.df %>%
mutate(
month = month(date, label = T),
wday = wday(date)
)
#Creating a csv file to use for the shiny web app
write.csv(asia.covid.df, "~/HBSP/Covid_19_Project/Heri Folder/asia.covid.df", row.names = FALSE)
#variable.order = c("hosp_patients", "icu_patients",
#"new_tests","new_vaccinations",
#"new_cases","new_deaths")
#unique(asia.covid.df$location)
#function(location){
# }
#CleanCOVIDdataUS =
# subset(COVIDdataUS, select = c(date, population,
# total_cases, people_vaccinated,
# total_deaths))
#asia.covid.df %>%
# filter(!is.na(total_cases) | !is.na(population) | is.na(total_vaccinations))
#CleanCOVIDdataUS[is.na(CleanCOVIDdataUS)] = 0
#CleanCOVIDdataUS$date = as.Date(CleanCOVIDdataUS$date, format = "%Y-%m-%d")
#USCOVID_Plot = ggplot(data = CleanCOVIDdataUS) +
# geom_line(mapping = aes(x = date, y = total_cases, color = "Total Cases")) +
# geom_line(mapping = aes(x = date, y = population, color = "Population")) +
# geom_line(mapping = aes(x = date, y = people_vaccinated, color = "Total Vaccinations")) +
# geom_line(mapping = aes(x = date, y = total_deaths, color = "Total Deaths")) +
# xlab("Date") +
# ylab("Number of People") +
# ggtitle("COVID Pandemic in the US") + theme_bw() + labs(color = "") + theme(legend.position = "top")
#USCOVID_Plot
#covid.mn.tbl %>%
# pivot_longer(c(pcr.cases:hosp.cases, deaths)) %>%
# mutate(name = factor(name, levels=variable.order)) %>%
# ggplot(aes(x=date, y=value, color=month))+
# geom_point()+
# labs(x="Date", y="Logarithmic scale")+
# facet_grid(.~name)+
# scale_y_log10()
|
48beb8e5a692020d874c188d61672089b2e0b7d7
|
aee33d9c208e5ee3be85d574ad1217cdde06ce68
|
/2. Модуль 2/2.3. Работа с Яндекс Директ API/attributions_report.R
|
fdfcec770907a36ed6497b62163ef0744f884e11
|
[] |
no_license
|
selesnow/r_for_marketing
|
7f02cf55a2357df3d5de9bb87943c3dbf8377142
|
8117f879b769f0b66031b86ed4e2b5fc2427449d
|
refs/heads/master
| 2022-06-21T12:20:51.236644
| 2022-05-28T09:31:22
| 2022-05-28T09:31:22
| 142,575,304
| 9
| 4
| null | null | null | null |
WINDOWS-1251
|
R
| false
| false
| 730
|
r
|
attributions_report.R
|
# отчёт по конверсиям с моделью аттрибуции за статичный период
attribution_report <- yadirGetReport(DateFrom = "2018-11-15",
DateTo = "2018-11-20",
FieldNames = c("Date",
"Conversions"),
Goals = c(27475434, 38234732),
AttributionModels = c("LC", "LSC", "FC"),
Login = "irina.netpeak",
TokenPath = "C:\\r_for_marketing_course\\tokens")
|
a772d280ce5048a93b735be333c841cd844972ca
|
dc612e7334832bf81fe1d9b17e8f779fe40f9ee2
|
/plot3.R
|
b55efa42b22e79c98ecfeb267bedea9f90f29aab
|
[] |
no_license
|
hestiles/ExData_Project2
|
7819a1ce8d3c6b325c573e2d6e678ed3984b5f18
|
1b647dd23b7683fee55a4f2eab6fcb8822b0a884
|
refs/heads/master
| 2020-05-07T10:37:48.116799
| 2019-04-10T17:57:55
| 2019-04-10T17:57:55
| 180,425,453
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 509
|
r
|
plot3.R
|
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip", destfile="emissions.zip")
unzip("emissions.zip")
NEI<-readRDS("summarySCC_PM25.rds")
SCC<-readRDS("Source_Classification_Code.r")
library(dplyr)
##Filter to Baltimore
Bal<- filter(NEI, fips=="24510")
##sum by year & type
Bal3<-aggregate(Emissions~year+type,Bal, sum)
library(ggplot2)
##Create PNG file
png(file="plot3.png", width=480, height=480)
qplot(year, Emissions, data=Bal3, facets=.~type)+geom_smooth()
dev.off()
|
4291268cc3d86699bbfb02774d2c73ae0e75d57d
|
5fe7b5cbb596152e3dc05bb570d885c3fb17236b
|
/Main Simulation/2DataGeneration.R
|
3347bf82001b95a0cf1e876970af8c7b0a074b9c
|
[] |
no_license
|
AndresFPA/DependSim
|
8151fec9415a8cc2ac9bd260f503163a646b3622
|
a1cd6f7445fbbf34a2c2a2621e45a7f721f28119
|
refs/heads/main
| 2023-06-01T22:54:50.019409
| 2021-06-07T11:41:20
| 2021-06-07T11:41:20
| 350,783,528
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 865
|
r
|
2DataGeneration.R
|
#Comparison of dependence tests
# Variable x is randomly generated
# Variable y is generated based on one type of relationship with y (linear, quedratic, etc.)
# noise is the error introduced while creating the relationship (high, medium, low)
# samp is the sample size (10, 20, 35, 50, 75, 100, 150, 500)
DataGeneration <- function(samp, noise, relationship){
x <- rnorm(samp)
if(relationship == "linear"){
y <- linear(x, noise)
} else if(relationship == "quadratic"){
y <- quadratic(x, noise)
} else if(relationship == "exponential"){
y <- exponential(x, noise)
} else if(relationship == "sinwave"){
y <- sinwave(x, noise)
} else if(relationship == "cross"){
y <- cross(x, noise)
} else if(relationship == "no_relationship"){
y <- no_relationship(x, noise)
}
XY <- cbind(x, y)
dat <- data.frame(XY)
return(dat)
}
|
ae8d297c0ee35a9713fa039202d682ca5c1a483e
|
76e7b85a99b127e8f71b52c00dc2c9d3baa38676
|
/Advanced Programming in R /Lab Bonus/awesomebonus/R/airport_delays.R
|
88816e28a2fa0cbf59b64cc756585540f266a0d1
|
[
"MIT"
] |
permissive
|
aydinardalan/MSc_Statistics-and-Machine-Learning
|
44b65e4dd39d992de0bdb4cd1dcb88186d5d0371
|
0b171fecb481a713b7d61263129275c0aecdb2d7
|
refs/heads/master
| 2023-03-17T04:01:48.076902
| 2020-04-22T12:31:01
| 2020-04-22T12:31:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,812
|
r
|
airport_delays.R
|
#' Function for generating plot of mean arrival delays at different airports.
#'
#' \code{visualize_airport_delays} generates a map plot with color scale for mean arrival delay at different airports.Dataset used is the nycflights13.
#'
#'
#' @return \code{visualize_airport_delays} returns a plot.
#'
#' @examples
#' visualize_airport_delays()
#'
#' @references \url{https://www.rdocumentation.org/packages/nycflights13/versions/1.0.1}
#'
#' @importFrom stats na.omit
#' @importFrom rlang .data
#' @import maps
#' @import ggplot2
#' @import dplyr
#'
#' @export
#'
visualize_airport_delays <- function(){
flights_data <- na.omit(nycflights13::flights)
airport_data <- na.omit(nycflights13::airports)
# Join data sets
flights_data <- dplyr::rename(flights_data, "faa" = "dest")
joined_data <- inner_join(flights_data, airport_data, by="faa") # Discard data mismatches using inner join
# Create dataframe for plotting (different structure)
plot_data <- data.frame(unique(joined_data[c('faa', 'name', 'lat', 'lon')]))
# Group by airports, then get mean for each category of airport over arrival delay
#calculated_mean <- joined_data %>% group_by(faa) %>% summarise(mean_delay = mean(arr_delay))
grouped_data <- group_by(joined_data, .data$faa)
calculated_mean <- summarise(grouped_data, mean_delay = mean(.data$arr_delay))
# Join mean to plot data.
plot_data <- left_join(plot_data, calculated_mean, by = "faa")
# Create map and plot using mean value as color scale
used_map <- map_data("usa")
ggplot() + geom_polygon(data = used_map, aes_string(x="long", y = "lat", group = "group"), fill = "#636e72") +
coord_fixed(1.3) +
geom_point(data = plot_data, aes_string(x = "lon", y = "lat", color = "mean_delay"), size = 5) +
scale_colour_viridis_c()
}
|
a67eb4416727bf1d728d89b7d0b0540381876486
|
e5413540e82627c1ac6cabcc37ffa0e7a192b2f8
|
/plot3.R
|
f7e577b12a7a0b6de848600a7519275400b761e2
|
[] |
no_license
|
tlaytongoogle/ExData_Plotting1
|
c47441d6ed0bd7b6fe9dfa85a9989d018849540c
|
bb3d3de8796315cfa34f73fe8a61a50e6589f7c5
|
refs/heads/master
| 2021-01-21T09:47:06.457037
| 2015-06-05T17:57:33
| 2015-06-05T17:57:33
| 36,808,748
| 0
| 0
| null | 2015-06-03T14:23:50
| 2015-06-03T14:23:49
| null |
UTF-8
|
R
| false
| false
| 1,129
|
r
|
plot3.R
|
# This script must be run in a working directory which contains the electric power consumption dataset
# from the UC Irvine Machine Learning Repository, named "exdata-data-household_power_consumption.zip"
unzip("exdata-data-household_power_consumption.zip", "household_power_consumption.txt")
data <- read.csv(
"household_power_consumption.txt",
sep = ";",
colClasses = c(NA, NA, "NULL", "NULL", "NULL", "NULL", NA, NA, NA),
na.strings = c("?"))
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
data <- subset(data, Date %in% as.Date(c("2007-02-01", "2007-02-02")))
datetime <- strptime(paste(data$Date, data$Time), "%Y-%m-%d %H:%M:%S")
png(file = "plot3.png")
with(data, {
plot(
datetime,
Sub_metering_1,
type = "l",
col = "black",
main = NA,
xlab = NA,
ylab = "Energy sub metering")
lines(
datetime,
Sub_metering_2,
col = "red")
lines(
datetime,
Sub_metering_3,
col = "blue")
legend(
"topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = c(1, 1, 1),
col = c("black", "red", "blue"))
})
dev.off()
|
3d7a2600396455944a1e60a7c9bae44880fbe0a1
|
7de2bbabb8dcc0e203a9d24a92379d2e6dfb4c15
|
/R/pb.bay.R
|
8905991e57363e730a0635d35f95d4d9ebb4267e
|
[] |
no_license
|
cran/altmeta
|
5e87ebf847a968e8af4022775384b87d3229eb39
|
7d39be3ddeb63215a90cf6e42c903e94fb40f4bd
|
refs/heads/master
| 2022-09-18T11:48:51.398907
| 2022-08-29T06:30:09
| 2022-08-29T06:30:09
| 52,831,806
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,712
|
r
|
pb.bay.R
|
pb.bay <- function(y, s2 = NA, sig.level = 0.1, n00, n01, n10, n11, het = "mul",
sd.prior = "unif", n.adapt = 1000, n.chains = 3,
n.burnin = 5000, n.iter = 10000, thin = 2, upp.het = 2,
phi = 0.5, coda = FALSE, traceplot = FALSE){
out <- NULL
if(is.element("mul", het)){
if(is.element("unif", sd.prior)){
modelstring <- function(o){
out <- "
model{
for(i in 1:N){
y[i] ~ dnorm(alpha + beta*se[i], prec[i])
prec[i] <- pow(kappa*se[i], -2)
se[i] <- sqrt(1/(n0[i]*p01[i]*(1-p01[i])) + 1/(n1[i]*p11[i]*(1-p11[i])))
n01[i] ~ dbin(p01[i], n0[i])
n11[i] ~ dbin(p11[i], n1[i])
logit(p01[i]) <- mu[i]
logit(p11[i]) <- mu[i] + delta[i]
delta[i] ~ dnorm(d, inv.tau2)
mu[i] ~ dnorm(0, 0.0001)
}
alpha ~ dnorm(0, 0.0001)
beta ~ dnorm(0, 0.0001)
kappa ~ dunif(0, upp.kappa)
d ~ dnorm(0, 0.0001)
inv.tau2 <- pow(tau, -2)
tau ~ dunif(0, upp.tau)
}"
return(out)
}
jags.dat <- list(N = length(y), y = y,
n0 = n00 + n01, n1 = n10 + n11, n01 = n01, n11 = n11,
upp.kappa = upp.het, upp.tau = upp.het)
inits <- NULL
rng.seeds <- sample(1000000, n.chains)
for(i in 1:n.chains){
inits[[i]] <- list(alpha = rnorm(1), beta = rnorm(1), d = rnorm(1),
kappa = runif(1, 0.1, 1.9), tau = runif(1, 0.1, 1.9),
.RNG.name = "base::Wichmann-Hill", .RNG.seed = rng.seeds[i])
}
}
if(is.element("hn", sd.prior)){
modelstring <- function(o){
out <- "
model{
for(i in 1:N){
y[i] ~ dnorm(alpha + beta*se[i], prec[i])
prec[i] <- pow(kappa*se[i], -2)
se[i] <- sqrt(1/(n0[i]*p01[i]*(1-p01[i])) + 1/(n1[i]*p11[i]*(1-p11[i])))
n01[i] ~ dbin(p01[i], n0[i])
n11[i] ~ dbin(p11[i], n1[i])
logit(p01[i]) <- mu[i]
logit(p11[i]) <- mu[i] + delta[i]
delta[i] ~ dnorm(d, inv.tau2)
mu[i] ~ dnorm(0, 0.0001)
}
alpha ~ dnorm(0, 0.0001)
beta ~ dnorm(0, 0.0001)
kappa ~ dnorm(0, inv.phi2)I(0,)
d ~ dnorm(0, 0.0001)
inv.tau2 <- pow(tau, -2)
tau ~ dnorm(0, inv.phi2)I(0,)
}"
return(out)
}
jags.dat <- list(N = length(y), y = y,
n0 = n00 + n01, n1 = n10 + n11, n01 = n01, n11 = n11,
inv.phi2 = phi^(-2))
inits <- NULL
rng.seeds <- sample(1000000, n.chains)
for(i in 1:n.chains){
inits[[i]] <- list(alpha = rnorm(1), beta = rnorm(1), d = rnorm(1),
kappa = runif(1, 0.1, 1.9), tau = runif(1, 0.1, 1.9),
.RNG.name = "base::Wichmann-Hill", .RNG.seed = rng.seeds[i])
}
}
}
if(is.element("add", het)){
if(is.element("unif", sd.prior)){
modelstring <- function(o){
out <- "
model{
for(i in 1:N){
y[i] ~ dnorm(alpha + beta*se[i], prec[i])
prec[i] <- 1/(pow(gamma, 2) + pow(se[i], 2))
se[i] <- sqrt(1/(n0[i]*p01[i]*(1-p01[i])) + 1/(n1[i]*p11[i]*(1-p11[i])))
n01[i] ~ dbin(p01[i], n0[i])
n11[i] ~ dbin(p11[i], n1[i])
logit(p01[i]) <- mu[i]
logit(p11[i]) <- mu[i] + delta[i]
delta[i] ~ dnorm(d, inv.tau2)
mu[i] ~ dnorm(0, 0.0001)
}
alpha ~ dnorm(0, 0.0001)
beta ~ dnorm(0, 0.0001)
gamma ~ dunif(0, upp.gamma)
d ~ dnorm(0, 0.0001)
inv.tau2 <- pow(tau, -2)
tau ~ dunif(0, upp.tau)
}"
return(out)
}
jags.dat <- list(N = length(y), y = y,
n0 = n00 + n01, n1 = n10 + n11, n01 = n01, n11 = n11,
upp.gamma = upp.het, upp.tau = upp.het)
inits <- NULL
rng.seeds <- sample(1000000, n.chains)
for(i in 1:n.chains){
inits[[i]] <- list(alpha = rnorm(1), beta = rnorm(1), d = rnorm(1),
gamma = runif(1, 0.1, 1.9), tau = runif(1, 0.1, 1.9),
.RNG.name = "base::Wichmann-Hill", .RNG.seed = rng.seeds[i])
}
}
if(is.element("hn", sd.prior)){
modelstring <- function(o){
out <- "
model{
for(i in 1:N){
y[i] ~ dnorm(alpha + beta*se[i], prec[i])
prec[i] <- 1/(pow(gamma, 2) + pow(se[i], 2))
se[i] <- sqrt(1/(n0[i]*p01[i]*(1-p01[i])) + 1/(n1[i]*p11[i]*(1-p11[i])))
n01[i] ~ dbin(p01[i], n0[i])
n11[i] ~ dbin(p11[i], n1[i])
logit(p01[i]) <- mu[i]
logit(p11[i]) <- mu[i] + delta[i]
delta[i] ~ dnorm(d, inv.tau2)
mu[i] ~ dnorm(0, 0.0001)
}
alpha ~ dnorm(0, 0.0001)
beta ~ dnorm(0, 0.0001)
gamma ~ dnorm(0, inv.phi2)I(0,)
d ~ dnorm(0, 0.0001)
inv.tau2 <- pow(tau, -2)
tau ~ dnorm(0, inv.phi2)I(0,)
}"
return(out)
}
jags.dat <- list(N = length(y), y = y,
n0 = n00 + n01, n1 = n10 + n11, n01 = n01, n11 = n11,
inv.phi2 = phi^(-2))
inits <- NULL
rng.seeds <- sample(1000000, n.chains)
for(i in 1:n.chains){
inits[[i]] <- list(alpha = rnorm(1), beta = rnorm(1), d = rnorm(1),
gamma = runif(1, 0.1, 1.9), tau = runif(1, 0.1, 1.9),
.RNG.name = "base::Wichmann-Hill", .RNG.seed = rng.seeds[i])
}
}
}
jags.m <- jags.model(file = textConnection(modelstring()),
data = jags.dat, inits = inits, n.chains = n.chains, n.adapt = n.adapt)
update(jags.m, n.iter = n.burnin)
params <- c("beta")
samps <- coda.samples(model = jags.m, variable.names = params,
n.iter = n.iter, thin = thin)
quants <- quantile(unlist(samps),
probs = c(sig.level/2, 1 - sig.level/2, 0.5))
if(traceplot){
par(mfrow = c(n.chains,1))
for(i in 1:n.chains){
temp <- as.vector(samps[[i]])
tp <- plot(temp, type = "l", col = "red", ylab = paste("beta[", i, "]", sep = ""),
xlab = "Iterations", main = paste("Chain",i))
}
print(tp)
}
out$est.bay <- as.numeric(quants[3])
out$ci.bay <- quants[c(1, 2)]
temp <- name <- NULL
if(coda){
for(i in 1:n.chains){
temp <- cbind(temp, as.vector(samps[[i]]))
name <- c(name, paste("beta[", i, "]", sep = ""))
}
beta <- data.frame(temp)
names(beta) <- name
out$samps.bay <- beta
}
return(out)
}
|
fb41e2a70b976c839e6e4c0161651d4bdaca63d4
|
e45c6f36a065b6a44e873a773428105de4d3758e
|
/bases/br_sp_seduc_fluxo_escolar/code/fluxo_municipio.R
|
651e86a020896957df2fde97a348d26750039086
|
[
"MIT"
] |
permissive
|
basedosdados/mais
|
080cef1de14376699ef65ba71297e40784410f12
|
2836c8cfad11c27191f7a8aca5ca26b94808c1da
|
refs/heads/master
| 2023-09-05T20:55:27.351309
| 2023-09-02T03:21:02
| 2023-09-02T03:21:02
| 294,702,369
| 376
| 98
|
MIT
| 2023-08-30T21:17:28
| 2020-09-11T13:26:45
|
SQL
|
UTF-8
|
R
| false
| false
| 4,790
|
r
|
fluxo_municipio.R
|
# bibliotecas
library(tidyverse)
library(rio)
library(DBI)
library(bigrquery)
library(reshape2)
# diretorio
setwd("~/Documentos/bdmais")
# descobrindo onde as bases são iguais
nomedasvar <- function(ano){
colnames(import(paste0("bases_cruas/seduc/fluxo-escolar-municipio/Fluxo_Escolar_por_Municipio_",ano,".csv"),
setclass = "tbl"))
}
lista_var1 <- map(2015:2019, nomedasvar) # mesmas variaveis MAS 2018 tem uma situzinha
# vetor de nomes
vetor_nomes <- c("prop_aprovados_anos_inciais_ef","prop_reprovados_anos_iniciais_ef",
"prop_abandono_anos_iniciais_ef", "prop_aprovados_anos_finais_ef","prop_reprovados_anos_finais_ef",
"prop_abandono_anos_finais_ef", "prop_aprovados_em", "prop_reprovados_em",
"prop_abandono_em")
## importando dados do fluxo escolar
# abre de 15,16,17,19
abre_15_19 <- function(ano){
read.csv2(paste0("bases_cruas/seduc/fluxo-escolar-municipio/Fluxo_Escolar_por_Municipio_",ano,".csv"),
colClasses = rep("character", 13), sep = ",")%>%
set_names(c("ano", "diretoria", "municipio", "codigo_rede_ensino", vetor_nomes))
}
list15_19 <- map(c(2015,2016,2017,2019) , abre_15_19)
bases15_19 <- reduce(list15_19, bind_rows)
# caso de 2018
base18<- read.csv2("bases_cruas/seduc/fluxo-escolar-municipio/Fluxo_Escolar_por_Municipio_2018.csv", colClasses = rep("character", 13))%>%
set_names(c("ano", "diretoria", "municipio", "codigo_rede_ensino", vetor_nomes))
# caso de 2014
base14 <- read.csv2("bases_cruas/seduc/fluxo-escolar-municipio/Fluxo_Escolar_por_Municipio_2014.csv", sep = ",",
colClasses = rep("character", 13))%>%
select(-CODMUN)%>%
set_names("ano", "codigo_rede_ensino", "municipio", vetor_nomes)
# caso de 2013
base13 <- read.csv2("bases_cruas/seduc/fluxo-escolar-municipio/Fluxo_Escolar_por_Municipio_2013.csv", sep = ",",
colClasses = rep("character", 13))%>%
set_names("ano", "municipio", "diretoria", "codigo_rede_ensino", vetor_nomes)
# caso de 2012
base12 <- read.csv2("bases_cruas/seduc/fluxo-escolar-municipio/Fluxo_Escolar_por_Municipio_2012.csv", sep = ",",
colClasses = rep("character", 13))%>%
select(-CODMUN, -MUN)%>%
set_names( "municipio", "codigo_rede_ensino", "rede_ensino", vetor_nomes)%>%
mutate(ano = "2012",
across(everything(), ~recode(.x, "NULL" = NA_character_)))
# caso de 2011
base11 <- read.csv2("bases_cruas/seduc/fluxo-escolar-municipio/Fluxo_Escolar_por_Municipio_2011.csv", sep = ",",
colClasses = rep("character", 13))%>%
select(-starts_with("TOT"), -ends_with('A'))%>%
set_names("ano", "municipio", vetor_nomes)%>%
mutate( across(everything(), ~recode(.x, "NULL" = NA_character_)))
# pondo numa lista
lista <- list(base11,base12,base13,base14,base18, bases15_19)
basefinal <- reduce(lista, bind_rows)
# pegando os nomes dos municípios
bq_auth(path = "chavebigquery/My Project 55562-2deeaad41d85.json")
id_projeto <- "double-voice-305816"
con <- dbConnect(
bigrquery::bigquery(),
billing = id_projeto,
project = "basedosdados"
)
query <- 'SELECT id_municipio, municipio, sigla_uf
FROM `basedosdados.br_bd_diretorios_brasil.municipio`'
diretorio_com_id <- dbGetQuery(con, query)
# mudando o nome do municipio para se encaixar
diretorio_com_id_alt <- diretorio_com_id%>%
mutate(municipio = str_to_upper(municipio))%>%
mutate(municipio = str_replace_all(municipio, "Á", "A" ))%>%
mutate(municipio = str_replace_all(municipio, "É", "E" ))%>%
mutate(municipio = str_replace_all(municipio, "Í", "I" ))%>%
mutate(municipio = str_replace_all(municipio, "Ó", "O" ))%>%
mutate(municipio = str_replace_all(municipio, "Ú", "U" ))%>%
mutate(municipio = str_replace_all(municipio, "Â", "A" ))%>%
mutate(municipio = str_replace_all(municipio, "Ê", "E" ))%>%
mutate(municipio = str_replace_all(municipio, "Î", "I" ))%>%
mutate(municipio = str_replace_all(municipio, "Ô", "O" ))%>%
mutate(municipio = str_replace_all(municipio, "Û", "U" ))%>%
mutate(municipio = str_replace_all(municipio, "Ã", "A" ))%>%
mutate(municipio = str_replace_all(municipio, "Õ", "O" ))%>%
mutate(municipio = str_replace_all(municipio, "Ç", "C" ))%>%
mutate(chave = str_c(sigla_uf,municipio))%>%
select(chave, id_municipio)
# juntando com a base final
basefinal_comid <- basefinal%>%
mutate(sigla_uf = "SP",
chave = str_c(sigla_uf,municipio),
rede = case_when(rede_ensino == "ESTADUAL" ~ "estadual"),)%>%
left_join(diretorio_com_id_alt, by = 'chave')%>%
select(-chave, -municipio, -rede_ensino, -codigo_rede_ensino)
# exportando a base
export(basefinal_comid, "bases_prontas/fluxoescolarmun.csv", na = "", quote = TRUE)
|
56c4b8f1889f278cfee9f5267dee0d0892088613
|
3c148d5fac76e8e32d284787873885e3b64fd38a
|
/R/carbonise.R
|
9009e6bce884f211134ea3de9af61ffb8bdc1056
|
[
"MIT"
] |
permissive
|
drdcarpenter/carbonhabitats
|
b9599e97817433b24fd91d3bf5a64629b2e88d51
|
a67f0989503d069fe90c6e6b82ae635ad5443d60
|
refs/heads/master
| 2022-12-13T06:59:45.663086
| 2020-09-11T14:06:04
| 2020-09-11T14:06:04
| 277,628,133
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,198
|
r
|
carbonise.R
|
#' @title carbonise
#' @description Calculate the carbon storage (above ground biomass and soil carbon) and sequestration for UK priority habitats.
#' @param x habitats sf dataframe
#' @param habitats name of the column containing priority habitats types
#' @return sf features with stored above ground carbon and sequestered carbon per year for each feature
#' @details The function calculates the amount of above ground carbon stored in each priority habitat type using data taken from two papers (see main package description for references).
#' It also calculates stored soil carbon using data from the Global Soil Organic Carbon map. The soil carbon data only covers the UK. You need to supply the name of the field with the UK priority habitat descriptions in.
#' The function can be slow if you have a lot of habitat features, as it summarises soil carbon data from a large raster.
#' The input dataset needs to be an sf features dataset.
#' The CRS for this package is EPSG:27700. You may need to transform your data for the function to work.
#' @examples
#' # NOT RUN
#' # c <- carbonise(x, habitats = "S41Habitat")
#' @seealso
#' \code{\link[dplyr]{mutate_all}},\code{\link[dplyr]{mutate}},\code{\link[dplyr]{mutate-joins}}
#' \code{\link[sf]{geos_measures}}
#' @rdname carbonise
#' @export
#' @importFrom dplyr mutate left_join across
#' @importFrom sf st_area
#' @importFrom exactextractr exact_extract
carbonise <- function(x, habitats){
# convert factors to character for join
x <- x %>% dplyr::mutate(dplyr::across(where(is.factor), as.character))
# calculate feature areas in hectares
x <- x %>% dplyr::mutate(Area = as.numeric(sf::st_area(x) / 10000))
# join carbon data to habitats data
# cx <- dplyr::left_join(x, carbon, by = c(habitats = "S41Habitat"))
cx <- merge(x, carbon, by.x = habitats, by.y = "S41Habitat")
# calculate stored C and sequestered C per feature
cx$storedC <- as.numeric(cx$Area * cx$AGB)
cx$seqC <- as.numeric(cx$Area * cx$Cseq)
# calculate soil carbon per feature
cx$soilC <- exactextractr::exact_extract(soilcarbon, cx, "mean")
# calculate total C
cx$totalC <- cx$storedC * cx$soilC
return(cx)
}
|
b6234b0c37690f63178e760d1630f231f1ab0f63
|
56d70f2de8ff6e5edd00fb1e9ecd2ab5058e04c4
|
/NorDistSim.R
|
64861d700dc27b2c4b54152f22d5c920983d3a2b
|
[] |
no_license
|
mikeabd/Advanced-Stats-R-Source-Code
|
5d69cb8f670d17df59c26b098b7057741618d5cd
|
47f942ae8c59b91c3e74cef97bc25c58bb7bf968
|
refs/heads/master
| 2021-01-02T08:30:29.056664
| 2018-03-26T17:48:10
| 2018-03-26T17:48:10
| 99,014,277
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,219
|
r
|
NorDistSim.R
|
#Installing required packages
install.packages(c("dplyr", "ggplot2", "forecast", "lubridate", "ROCR"
, "rjson", "timeSeries", "xts", "highfrequency"
, "manipulate", "tseries", "bsts", "boot", "mixtools"))
# Loading Required Packages
library(dplyr)
library(ggplot2)
library(forecast)
library(lubridate)
library(ROCR)
library(rjson)
library(timeSeries)
library(xts)
library(highfrequency)
library(manipulate)
library(tseries)
library(bsts)
library(boot)
library(mixtools)
# Setting work directory
setwd("C:/Users/hosse/Desktop/ML/NormalDistSimulation")
paste("Work Directory is Set Up to", getwd())
# Set seed value
set.seed(20)
#######################################################
####### Bivariate Normal Distribution Simulation ######
#######################################################
# Number of random samples
N <- 200
# Normal Distribution Parameter Setting
rho <- -0.6
mu1 <- 1; s1 <- 2
mu2 <- 1; s2 <- 8
Mu <- c(mu1, mu2)
Sigmas <- matrix(c(s1^2, s1*s2*rho, s2*s1*rho, s2^2), 2)
# Function to draw ellipse for bivariate normal data
ellipse_bvn <- function(bvn, alpha, col){
Xbar <- apply(bvn,2,mean)
S <- cov(bvn)
ellipse(Xbar, S, alpha = alpha, col=col, npoints = 250, newplot = FALSE, draw = TRUE)
}
# Sampling data for Simulation
gibbs <- function (n, mu1, s1, mu2, s2, rho)
{
mat <- matrix(ncol = 2, nrow = n)
x <- 0
y <- 0
mat[1, ] <- c(x, y)
for (i in 2:n) {
x <- rnorm(1, mu1+(s1/s2)*rho*(y - mu2), sqrt((1 - rho^2)*s1^2))
y <- rnorm(1, mu2+(s2/s1)*rho*(x - mu1), sqrt((1 - rho^2)*s2^2))
mat[i, ] <- c(x, y)
}
mat
}
bvn <- gibbs(N,mu1,s1,mu2,s2,rho)
colnames(bvn) <- c("X1","X2")
# Plotting Results
plot(bvn,xlab="X1", ylab="X2", main = " Bivariate Normal Distribution Simulation Plot "
, col="black")
ellipse_bvn(bvn, 0.5, col = "red")
ellipse_bvn(bvn, 0.05, col = "blue")
###########################################################
####### Multi-variate Normal Distribution Simulation ######
###########################################################
install.packages("mvtnorm")
library(mvtnorm)
# Function to claculate mean and cov of a given data
sampleSize <- 1000
nbrVars <- 3
data_mat <- function (nbrVars, sampleSize, LB, UB)
{
data <- matrix(ncol = nbrVars, nrow = sampleSize)
mean_mat <- matrix(ncol = nbrVars, nrow = 1)
covar_mat <- matrix(ncol = nbrVars, nrow = nbrVars)
sigma_mat <- matrix(ncol = nbrVars, nrow = 1)
for(i in 1:nbrVars){
data[, i] <- sample(LB:UB, sampleSize, replace = TRUE)
mean_mat[, i] <- mean(data[, i])
sigma_mat[, i] <- var(data[, i])
}
covar_mat <- cov(data, method = "pearson")
output <- list("Data" = data, "Mean Matrix" = mean_mat
, "Covariance Matrix" = covar_mat, "Variance" = sigma_mat)
return(output)
}
# Varibale to hold the simulation results
mvn <- rmvnorm(n = 1000, unlist(as.list(data_mat(nbrVars, sampleSize, 1, 10)[2])))
for(i in 1:(ncol(mvn)-1)){
for(j in 1:ncol(mvn)){
if(j > i){
plot_main <- paste("Var", i, " and Var ", j, " Scatter Plot", sep="")
plot(x = mvn[, i], y = mvn[, j], xlab= paste("X", i), ylab= paste("X", j)
, main = plot_main , col= i*j)
}
}
}
|
ed6dc3c43d9d1aa150e4a2cc3119fa72f82107d1
|
f372c4ac0ee9d301b753dd9db774a3cf405cb15c
|
/plot4.R
|
136f93c7a6bc5d0ad7f27e84829f18c76176a3a5
|
[] |
no_license
|
perati/ExData_Plotting1
|
d8a0a84691a5848f8452d85975b7269c59455c40
|
ff5d8f1fb22ece463d86dcc80b96fea4c2315d3d
|
refs/heads/master
| 2021-01-22T16:10:34.859716
| 2015-10-09T15:26:49
| 2015-10-09T15:26:49
| 43,891,500
| 0
| 0
| null | 2015-10-08T13:48:24
| 2015-10-08T13:48:24
| null |
UTF-8
|
R
| false
| false
| 2,386
|
r
|
plot4.R
|
# Plot 4. Combination of 4 plots - Variables / Time
#read data stored in working directory
data<-read.table('household_power_consumption.txt',
header = TRUE, sep=";",
na.strings="?",
col.names = c("Date", "Time", "Global_active_power",
"Global_reactive_power", "Voltage", "Global_intensity",
"Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
colClasses = c( "character", "character", "numeric",
"numeric", "numeric", "numeric", "numeric",
"numeric", "numeric"))
#convert Date and Time variables to date and time classes
data$Time <- strptime(paste(data$Date,data$Time), '%d/%m/%Y %H:%M:%S')
data$Date <- as.Date(data$Date, '%d/%m/%Y')
#subset data to include the spesific dates only
subdata<-subset(data, Date == '2007-02-01' | Date == '2007-02-02')
#open png device
png('plot4.png',width = 480, height = 480)
#set parameters. 2 rows and 2 columns
par(mfrow = c(2, 2))
# 4.1. Global Active Power / Time
#create plot / use line type / add x and y labels
with(subdata,{
plot(Time,Global_active_power, type = 'l',
xlab = '',ylab = 'Global Active Power')
})
# 4.2. Voltage / Time
#create plot / use line type / add x and y labels
with(subdata,{
plot(Time,Voltage, type = 'l',
xlab = 'datetime',ylab = 'Voltage')
})
# 4.3. Energy Sub Metering / Time
#create empty plot with type 'n' / replace x and y label
#add Sub Metering 1, 2, and 3 line points in the plot with different color
#add a legend with the respective plot data
with(subdata,{
plot(Time,Sub_metering_1,type = 'n',xlab = '',ylab = 'Energy sub metering')
points(Time,Sub_metering_1,type = 'l')
points(Time,Sub_metering_2,type = 'l',col='red')
points(Time,Sub_metering_3,type = 'l',col='blue')
legend('topright', col=c('black','red','blue'), lty = c(1,1,1),
legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3'))
})
# 4.4. Global Reactive Power / Time
#create plot / use line type / add x and y labels
with(subdata,{
plot(Time,Global_reactive_power, type = 'l',
xlab = 'datetime',ylab = 'Global_reactive_power')
})
#close device
dev.off()
#the file is created in working directory
|
903c1d7a09dc8c04cd8f0defc137ef1613e830c9
|
c9aea1cc5af64391ad2287bb2647f2d24e66bc19
|
/R/serve.R
|
91a10c896d8fed1fb44a306956a44ccce841ce67
|
[] |
no_license
|
rstudio/blogdown
|
3b4ab7f040bbcb3e54b12616b522d6297774be65
|
6ab3a2be7a763473430bf750e07a5a15c8c7eeef
|
refs/heads/main
| 2023-07-07T07:39:16.369785
| 2023-06-25T01:50:03
| 2023-06-25T01:50:03
| 72,671,522
| 1,805
| 468
| null | 2023-01-03T14:42:32
| 2016-11-02T18:57:35
|
R
|
UTF-8
|
R
| false
| false
| 12,690
|
r
|
serve.R
|
#' Live preview a site
#'
#' The function \code{serve_site()} executes the server command of a static site
#' generator (e.g., \command{hugo server} or \command{jekyll server}) to start a
#' local web server, which watches for changes in the site, rebuilds the site if
#' necessary, and refreshes the web page automatically; \code{stop_server()}
#' stops the web server.
#'
#' By default, the server also watches for changes in R Markdown files, and
#' recompile them automatically if they are modified. This means they will be
#' automatically recompiled once you save them. If you do not like this
#' behavior, you may set \code{options(blogdown.knit.on_save = FALSE)} (ideally
#' in your \file{.Rprofile}). When this feature is disabled, you will have to
#' manually compile Rmd documents, e.g., by clicking the Knit button in RStudio.
#'
#' The site generator is defined by the global R option
#' \code{blogdown.generator}, with the default being \code{'hugo'}. You may use
#' other site generators including \code{jekyll} and \code{hexo}, e.g.,
#' \code{options(blogdown.generator = 'jekyll')}. You can define command-line
#' arguments to be passed to the server of the site generator via the global R
#' option \code{blogdown.X.server}, where \code{X} is \code{hugo},
#' \code{jekyll}, or \code{hexo}. The default for Hugo is
#' \code{options(blogdown.hugo.server = c('-D', '-F', '--navigateToChanged'))}
#' (see the documentation of Hugo server at
#' \url{https://gohugo.io/commands/hugo_server/} for the meaning of these
#' arguments).
#' @param ... Arguments passed to \code{servr::\link{server_config}()} (only
#' arguments \code{host}, \code{port}, \code{browser}, \code{daemon}, and
#' \code{interval} are supported).
#' @param .site_dir Directory to search for site configuration file. It defaults
#' to \code{getwd()}, and can also be specified via the global option
#' \code{blogdown.site_root}.
#' @note For the Hugo server, the argument \command{--navigateToChanged} is used
#' by default, which means when you edit and save a source file, Hugo will
#' automatically navigate the web browser to the page corresponding to this
#' source file (if the page exists). However, due to a Hugo bug
#' (\url{https://github.com/gohugoio/hugo/issues/3811}), this automatic
#' navigation may not always work for R Markdown posts, and you may have to
#' manually refresh your browser. It should work reliably for pure Markdown
#' posts, though.
#' @export
serve_site = function(..., .site_dir = NULL) {
serve = switch(
generator(), hugo = serve_it(),
jekyll = serve_it(
baseurl = get_config2('baseurl', ''),
pdir = get_config2('destination', '_site')
),
hexo = serve_it(
baseurl = get_config2('root', ''),
pdir = get_config2('public_dir', 'public')
),
stop("Cannot recognize the site (only Hugo, Jekyll, and Hexo are supported)")
)
serve(..., .site_dir = .site_dir)
}
server_ready = function(url) {
# for some reason, R cannot read localhost, but 127.0.0.1 works
url = sub('^http://localhost:', 'http://127.0.0.1:', url)
!inherits(
xfun::try_silent(suppressWarnings(readLines(url))), 'try-error'
)
}
# this function is primarily for users who click the Knit button in RStudio (the
# main purposes are to suppress a message that is not useful to Knit button
# users, and avoid rebuilding Rmd files because Knit button has done the job);
# normally you wouldn't need to call it by yourself
preview_site = function(..., startup = FALSE) {
# when startup = FALSE, set knitting = TRUE permanently for this R session, so
# that build_site() in serve_site() no longer automatically rebuilds Rmds on
# save by default, and an Rmd has to be manually knitted
if (startup) {
opts$set(preview = TRUE)
on.exit(opts$set(preview = NULL), add = TRUE)
# open some files initially if specified
init_files = get_option('blogdown.initial_files')
if (is.function(init_files)) init_files = init_files()
for (f in init_files) if (file_exists(f)) open_file(f)
} else {
opts$set(knitting = TRUE)
on.exit(refresh_viewer(), add = TRUE)
}
invisible(serve_site(...))
}
preview_mode = function() {
isTRUE(opts$get('preview')) || isTRUE(opts$get('knitting'))
}
serve_it = function(pdir = publish_dir(), baseurl = site_base_dir()) {
g = generator(); config = config_files(g)
function(..., .site_dir = NULL) {
root = site_root(config, .site_dir)
if (root %in% opts$get('served_dirs')) {
if (preview_mode()) return()
servr::browse_last()
return(message(
'The site has been served under the directory "', root, '". I have tried ',
'to reopen it for you with servr::browse_last(). If you do want to ',
'start a new server, you may stop existing servers with ',
'blogdown::stop_server(), or restart R. Normally you should not need to ',
'serve the same site multiple times in the same R session',
if (is_rstudio()) c(
', otherwise you may run into issues like ',
'https://github.com/rstudio/blogdown/issues/404'
), '.'
))
}
owd = setwd(root); on.exit(setwd(owd), add = TRUE)
server = servr::server_config(..., baseurl = baseurl, hosturl = function(host) {
if (g == 'hugo' && host == '127.0.0.1') 'localhost' else host
})
# launch the hugo/jekyll/hexo server
cmd = if (g == 'hugo') find_hugo() else g
host = server$host; port = server$port; intv = server$interval
if (!servr:::port_available(port, host)) stop(
'The port ', port, ' at ', host, ' is unavailable', call. = FALSE
)
args_fun = match.fun(paste0(g, '_server_args'))
cmd_args = args_fun(host, port)
if (g == 'hugo') {
# RStudio Server uses a proxy like http://localhost:8787/p/56a946ed/ for
# http://localhost:4321, so we must use relativeURLs = TRUE:
# https://github.com/rstudio/blogdown/issues/124
tweak_hugo_env(server = TRUE, relativeURLs = if (is_rstudio_server()) TRUE)
if (length(list_rmds(pattern = bundle_regex('.R(md|markdown)$'))))
create_shortcode('postref.html', 'blogdown/postref')
}
# run a function (if configured) before starting the server
if (is.function(serve_first <- getOption('blogdown.server.first'))) serve_first()
# call jekyll directly or use the bundler gem
if (g == 'jekyll' && getOption('blogdown.jekyll.bundler', FALSE)) {
cmd = 'bundle'; cmd_args = c('exec', g, cmd_args)
}
# if requested not to demonize the server, run it in the foreground process,
# which will block the R session
if (!server$daemon) return(system2(cmd, cmd_args))
verbose = get_option('blogdown.server.verbose', FALSE)
pid = if (is_psx <- getOption('blogdown.use.processx', xfun::loadable('processx'))) {
proc = processx::process$new(
cmd, cmd_args, cleanup_tree = TRUE,
stdout = if (verbose && processx::is_valid_fd(1L)) '',
stderr = if (verbose && processx::is_valid_fd(2L)) '' else '|'
)
I(proc$get_pid())
} else {
xfun::bg_process(cmd, cmd_args, verbose)
}
opts$append(pids = list(pid))
message(
'Launching the server via the command:\n ',
paste(c(cmd, cmd_args), collapse = ' ')
)
i = 0
repeat {
Sys.sleep(1)
# for a process started with processx, check if it has died with an error
if (is_psx && !proc$is_alive()) {
err = tryCatch(
paste(gsub('^Error: ', '', proc$read_error()), collapse = '\n'),
error = function(e) ''
)
stop(if (err == '') {
'Failed to serve the site; see if blogdown::build_site() gives more info.'
} else err, call. = FALSE)
}
if (server_ready(server$url)) break
if (i >= get_option('blogdown.server.timeout', 30)) {
s = proc_kill(pid) # if s == 0, the server must have been started successfully
stop(if (s == 0) c(
'Failed to launch the site preview in ', i, ' seconds. Try to give ',
'it more time via the global option "blogdown.server.timeout", e.g., ',
'options(blogdown.server.timeout = 600).'
) else c(
'It took more than ', i, ' seconds to launch the server. An error might ',
'have occurred with ', g, '. You may run blogdown::build_site() and see ',
'if it gives more info.'
), call. = FALSE)
}
i = i + 1
}
server$browse()
# server is correctly started so we record the directory served
opts$append(served_dirs = root)
Sys.setenv(BLOGDOWN_SERVING_DIR = root)
message(
'Launched the ', g, ' server in the background (process ID: ', pid, '). ',
'To stop it, call blogdown::stop_server() or restart the R session.'
)
# delete the resources/ dir if it is empty
if (g == 'hugo') del_empty_dir('resources')
# whether to watch for changes in Rmd files?
if (!get_option('blogdown.knit.on_save', TRUE)) return(invisible())
# rebuild specific or changed Rmd files
rebuild = function(files) {
if (is.null(b <- get_option('blogdown.knit.on_save'))) {
b = !isTRUE(opts$get('knitting'))
if (!b) {
options(blogdown.knit.on_save = b)
message(
'It seems you have clicked the Knit button in RStudio. If you prefer ',
'knitting a document manually over letting blogdown automatically ',
'knit it on save, you may set options(blogdown.knit.on_save = FALSE) ',
'in your .Rprofile so blogdown will not knit documents automatically ',
'again (I have just set this option for you for this R session). If ',
'you prefer knitting on save, set this option to TRUE instead.'
)
files = b # just ignore changed Rmd files, i.e., don't build them
}
}
xfun::in_dir(root, build_site(TRUE, run_hugo = FALSE, build_rmd = files))
}
# build Rmd files that are new and don't have corresponding output files
rebuild(rmd_files <- filter_newfile(list_rmds()))
watch = servr:::watch_dir('.', rmd_pattern, handler = function(files) {
files = list_rmds(files = files)
# ignore Rmd files in the public/ directory, in case users forgot to set
# ignoreFiles in config.yaml and Rmd files would be copied to public/
# (they should not be): https://github.com/rstudio/blogdown/issues/610
i = if (g == 'hugo') !xfun::is_sub_path(files, rel_path(publish_dir())) else TRUE
rmd_files <<- files[i]
})
unix = xfun::is_unix()
watch_build = function() {
# stop watching if stop_server() has cleared served_dirs
if (is.null(opts$get('served_dirs'))) return(invisible())
if (watch()) {
if (is_psx) proc$suspend() else if (unix) tools::pskill(pid, tools::SIGSTOP)
try(rebuild(rmd_files))
if (is_psx) proc$resume() else if (unix) tools::pskill(pid, tools::SIGCONT)
refresh_viewer()
}
if (get_option('blogdown.knit.on_save', TRUE)) later::later(watch_build, intv)
}
watch_build()
return(invisible())
}
}
jekyll_server_args = function(host, port) {
c('serve', '--port', port, '--host', host, get_option(
'blogdown.jekyll.server', c('--watch', '--incremental', '--livereload')
))
}
hexo_server_args = function(host, port) {
c('server', '-p', port, '-i', host, get_option('blogdown.hexo.server'))
}
#' @export
#' @rdname serve_site
stop_server = function() {
ids = NULL # collect pids that we failed to kill
quitting = isTRUE(opts$get('quitting'))
for (i in opts$get('pids')) {
# no need to kill a process started by processx when R is quitting
if (quitting && inherits(i, 'AsIs')) next
if (proc_kill(i, stdout = FALSE, stderr = FALSE) != 0) ids = c(ids, i)
}
if (length(ids)) warning(
'Failed to kill the process(es): ', paste(i, collapse = ' '),
'. You may need to kill them manually.'
) else if (!quitting) message('The web server has been stopped.')
set_envvar(c('BLOGDOWN_SERVING_DIR' = NA))
opts$set(pids = NULL, served_dirs = NULL)
}
get_config2 = function(key, default) {
res = yaml_load_file('_config.yml')
res[[key]] %n% default
}
# refresh the viewer because hugo's livereload doesn't work on RStudio
# Server: https://github.com/rstudio/rstudio/issues/8096 (TODO: check if
# it's fixed in the future: https://github.com/gohugoio/hugo/pull/6698)
refresh_viewer = function() {
if (!is_rstudio_server()) return()
server_wait()
rstudioapi::executeCommand('viewerRefresh')
}
server_wait = function() {
Sys.sleep(get_option('blogdown.server.wait', 2))
}
|
01b65a98a6cd0f4d744485853da948b46d190da1
|
83d35a0c687e56de320bbe025fe876df41ea3bf6
|
/R/findBAFvariance.R
|
ef6b3be5cc213627e4bbf7c34f8622d06e14a0b2
|
[] |
no_license
|
smgogarten/GWASTools
|
797f4cc0d90299195fea29ee1fc24c492267541a
|
720bfc6bede713dfcfbff1dd506f4c9f338caa9d
|
refs/heads/devel
| 2023-06-26T13:37:21.371466
| 2023-06-22T12:37:41
| 2023-06-22T12:37:41
| 100,623,140
| 11
| 8
| null | 2023-06-22T12:34:02
| 2017-08-17T16:18:11
|
R
|
UTF-8
|
R
| false
| false
| 3,143
|
r
|
findBAFvariance.R
|
# Description: This function determines which scans/chromosomes
# are a given number of SDs from the mean, using new BAF values
# produced by meanBAFSDbyChromWindow. The function returns a matrix
# (with columns "scanID", "chromosome", "bin", and "sex")
# of scan/chromosome combinations more than X SDs from the mean.
findBAFvariance <- function(sd.by.chrom.window, sd.by.scan.chrom.window,
sex, sd.threshold)
{
chromosomes <- names(sd.by.scan.chrom.window)
n.chromosomes <- length(chromosomes)
# create a matrix to hold scan, chr combination that satisfies our SDs from mean comparison
res <- vector()
r <- 0
# loop through chromosomes
for(s in 1:n.chromosomes) {
# autosomes
if (chromosomes[s] != "X") {
# loop through scans
for(n in 1:nrow(sd.by.scan.chrom.window[[s]])) {
bc <- 0
# loop through bins
for(b in 1:ncol(sd.by.scan.chrom.window[[s]])) {
sdev <- sd.by.chrom.window[[s]]["SD",b]
m <- sd.by.chrom.window[[s]]["Mean",b]
val <- sd.by.scan.chrom.window[[s]][n,b]
samp <- rownames(sd.by.scan.chrom.window[[s]])[n]
if(!is.na(val) & !is.na(sdev) & !is.na(m) &
val > sd.threshold*sdev+m) {
bc <- bc+1
if(bc==1) {
res <- rbind(res, c(samp, chromosomes[s], bc, sex[n]))
r <- r+1
} else {
res[r,3] <- bc
}
}
} # end loop through bins
} # end loop through scans
} # end if autosome
# X chrom
if (chromosomes[s] == "X") {
# loop through scans
for(n in 1:nrow(sd.by.scan.chrom.window[[s]])) {
bc <- 0
for(b in 1:ncol(sd.by.scan.chrom.window[[s]])) {
# loop through bins
sdf <- sd.by.chrom.window[[s]]["Female SD",b]
mf <- sd.by.chrom.window[[s]]["Female Mean",b]
sdm <- sd.by.chrom.window[[s]]["Male SD",b]
mm <- sd.by.chrom.window[[s]]["Male Mean",b]
val <- sd.by.scan.chrom.window[[s]][n,b]
samp <- rownames(sd.by.scan.chrom.window[[s]])[n]
if(sex[n] == "F") { # it's a female
if(!is.na(val) & !is.na(sdf) & !is.na(mf) &
val > sd.threshold*sdf+mf) {
bc <- bc+1
if(bc==1) {
res <- rbind(res, c(samp, "X", bc, sex[n]))
r <- r+1
} else {
res[r,3] <- bc
}
}
} else { # it's a male
if(!is.na(val) & !is.na(sdm) & !is.na(mm) &
val > sd.threshold*sdm+mm) {
bc <- bc+1
if(bc==1) {
res <- rbind(res, c(samp, "X", bc, sex[n]))
r <- r+1
} else {
res[r,3] <- bc
}
}
}
} # end loop through bins
} # end loop through scans
} # end if X
} # end loop through chromosomes
colnames(res) <- c("scanID", "chromosome", "bin", "sex")
return(res)
}
|
584e41b78ca630181f7f946bdaace0c97068d1ce
|
ca00a4f40a7066ff97bcab825d42accf79c2c541
|
/man/myWaitForJobs.Rd
|
8f61aa6e483339ceb904fa470becddab5cac0b14
|
[] |
no_license
|
heiniglab/eQTLpipeline
|
63b6f62f70efd749024e080829a76afede5a9745
|
46aa63e5b76b2e420e843b6c85cdd3def11d5bf6
|
refs/heads/master
| 2023-08-20T19:06:33.463554
| 2021-11-02T16:11:23
| 2021-11-02T16:11:23
| 422,500,174
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 768
|
rd
|
myWaitForJobs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eqtl_lib.R
\name{myWaitForJobs}
\alias{myWaitForJobs}
\title{basic eQTL interface}
\usage{
myWaitForJobs(reg, waittime = 3, nretry = 100)
}
\arguments{
\item{reg}{BatchJobs registry}
\item{waittime}{time to wait before updating job status}
\item{nretry}{number of time to retry getting the job status before throwing #' an error}
}
\description{
Extension to the waitForJobs function of the BatchJobs package which shows
some strange behaviour when waiting for jobs (database locked)
so we need to make it extra failsafe.
}
\seealso{
Other eqtl functions: \code{\link{eqtl.min.p}},
\code{\link{eqtl.run}}, \code{\link{eqtl}},
\code{\link{trans.qtl}}
}
\concept{eqtl functions}
|
f739a97c903e09085eb6c1a01c645f7c34dae2d4
|
589bf3773cd27c8d5f022747a7344596e7bd8067
|
/man/logR.Rd
|
ddf11e4782199b306f7215e782629f9114d5fa2f
|
[] |
no_license
|
MikkoVihtakari/MarineDatabase
|
974b67991ee2636028f5060e93eae875bd79e800
|
73879eecbedb92e126ce469504b8b18f635ff952
|
refs/heads/master
| 2022-11-06T18:17:31.395171
| 2020-06-29T14:23:09
| 2020-06-29T14:23:09
| 112,598,897
| 1
| 1
| null | 2017-12-08T09:22:11
| 2017-11-30T10:36:22
|
R
|
UTF-8
|
R
| false
| true
| 5,235
|
rd
|
logR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logR.R
\name{logR}
\alias{logR}
\title{Calculate logarithmic response ratio from a long table}
\usage{
logR(
X,
response,
levels,
groups,
control,
ci.type = NULL,
ci.conf = 0.95,
base = "e",
paired_tests = FALSE,
unlog = FALSE,
all.data = FALSE,
signif = 2,
sqrt_transform = FALSE
)
}
\arguments{
\item{X}{data frame}
\item{response}{character vector specifying the names of the columns for which the response ratio should be calculated.}
\item{levels}{Name of the column that contains factor which should be used to separate response ratios.}
\item{groups}{character vector specifying the names of the columns, which should used as grouping factors.}
\item{control}{the name of the control/base factor level in \code{levels} column.}
\item{ci.type}{indicates the distribution to be used for confidence intervals. \code{'z'} refers to normal distribution and \code{'t'} to t distribution. The default (\code{NULL}) is to decide the distribution based on the lowest \eqn{\sqrt(n)*mean(response)/sd(response)} (from Hedges et al. 1999). If over 10\% of values are less than 3, t-distribution is used. Otherwise normal. Note that Hedges et al. (1999) adviced for using normal distribution and that the t-distribution is an experimental addition, but I have used it in publications. The CIs will be wider (i.e. less "significant" results) when t-distribution is used. Consider using \code{paired_tests} to confirm your CIs.}
\item{ci.conf}{the confidence level for the confidence interval. Defaults to 0.95 (95\%).}
\item{base}{either "e" (default), 2 or 10 defining the base for the log response ratio. While "e" (i.e. \code{ln}) is the most used variant (see Hedges et al. 1999), 2 and 10 based logarithms are easier to read. Experimental. DO NOT USE in publications.}
\item{paired_tests}{Logical indicating whether \link[stats]{wilcox.test} should be used to "confirm" the results indicated by the confidence intervals for the response ratios.}
\item{unlog}{logical indicating whether the output should be unlogged. Defaults to \code{FALSE}. Read the page 1152 under eq. 9 and what follows on the next page from Hedges et al. (1999) before you switch this to \code{TRUE} in publications. The unlogging is done by simply exponentiating for confidence intervals, while the response ratio (mean) is calculated as \eqn{\exp(LnR + var/2)} after Greenacre (2016). Currently untested for response ratios.}
\item{all.data}{logical indicating whether all data used in calculations should be returned instead of a concise table of relevant results. Defaults to \code{FALSE}.}
\item{signif}{number of significant digits in output. Defaults to 2. If \code{'all'} output will not be rounded.}
\item{sqrt_transform}{Logical indicating whether values should be square root transformed prior calculation of means. This option makes the distributions more normally distributed, but might change the outcome. Highly experimental. DO NOT USE in publications.}
}
\value{
Returns a list where \code{$data} element contains the calculated response ratios for each \code{response} in a separate list named by the response's column name. \code{$info} contains information how the response ratios were calculated and, if \code{paired_tests = TRUE}, the \code{$tests} element gives \link[stats]{wilcox.test} results to "confirm" significance of the confidence intervals for the response ratios. Nonconforming tests are listed under \code{$nonconforming}.
}
\description{
Calculates logarithmic response ratio from a long table where each row represents one measurement.
}
\details{
The calculations are based on Hedges et al. (1999), with the exception that t-distribution is used to acquire confidence intervals instead of normal distribution. The difference is minimal for sample sizes > 20, but the confidence intervals will be a lot more conservative for small sample sizes leading to fewer false positives. Use \code{ci.type = "z"} to use normal distribution for CI estimation as described in the original source.
\strong{Note} that the function does not currently calculate dependent sample response ratios, as the pooled variance needs to be penalized by the correlation term for such analyses. See Lajeunesse (2011) and \href{https://stats.stackexchange.com/questions/141443/calculating-effect-size-lnr-variances-for-studies-with-different-study-designs}{CrossValidated} for further information.
The square root transformation routine is experimental and little tested, but seems to produce slightly less nonconforming test results against \link[stats]{wilcox.test} for non-normal data.
It is recommended to plot your raw values to confirm any results given by this function.
}
\references{
Hedges, L. V, Gurevitch, J., & Curtis, P.S. (1999) The meta-analysis of response ratios in experimental ecology. Ecology, 80, 1150–1156.
Lajeunesse, M. J. (2011). On the meta-analysis of response ratios for studies with correlated and multi-group designs. Ecology, 92, 2049-2055.
Greenacre, M., (2016). Data reporting and visualization in ecology. Polar Biology 39, 2189–2205. doi:10.1007/s00300-016-2047-2
}
\author{
Mikko Vihtakari
}
|
26a71bf75a2a98da8c4565aa459aaa0be26b67bd
|
a97ad6188fc5b0cb601158976f3799d640f3dc83
|
/03_plotting.R
|
710249ad8c4a32c2a025d56b6316cd282d9738f7
|
[] |
no_license
|
guidocor/R-for-the-lazy-psychologist
|
fe48c58fec8c9609fb28be23eb8444a96cecca67
|
dbece1778c26111301baa87f64d3a7a14fdf8e8e
|
refs/heads/master
| 2020-03-23T16:56:58.461197
| 2019-11-04T22:44:31
| 2019-11-04T22:44:31
| 141,834,918
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70
|
r
|
03_plotting.R
|
# https://www.datacamp.com/courses/data-visualization-with-ggplot2-1
|
1f89da983614bab7c33c798d919e012ae846c5cc
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mapproj/examples/map.grid.Rd.R
|
4cc4d135f202988f3a17d6367e8b9574a8444ed8
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 998
|
r
|
map.grid.Rd.R
|
library(mapproj)
### Name: map.grid
### Title: Draw a latitude/longitude grid on a projected map
### Aliases: map.grid
### Keywords: aplot
### ** Examples
library(maps)
m <- map("usa", plot=FALSE)
map("usa", project="albers", par=c(39, 45))
map.grid(m)
# get unprojected world limits
m <- map('world', plot=FALSE)
# center on NYC
map('world', proj='azequalarea', orient=c(41, -74, 0))
map.grid(m, col=2)
points(mapproject(list(y=41, x=-74)), col=3, pch="x", cex=2)
map('world', proj='orth', orient=c(41, -74, 0))
map.grid(m, col=2, nx=6, ny=5, label=FALSE, lty=2)
points(mapproject(list(y=41, x=-74)), col=3, pch="x", cex=2)
# center on Auckland
map('world', proj='orth', orient=c(-36.92, 174.6, 0))
map.grid(m, col=2, label=FALSE, lty=2)
points(mapproject(list(y=-36.92, x=174.6)), col=3, pch="x", cex=2)
m <- map('nz')
# center on Auckland
map('nz', proj='azequalarea', orient=c(-36.92, 174.6, 0))
points(mapproject(list(y=-36.92, x=174.6)), col=3, pch="x", cex=2)
map.grid(m, col=2)
|
3122c3c44818ebf954b6dc78c8ece36bc254c681
|
e5999ba1e3ee43d0ae195f8ff4518ff54f8ce96e
|
/C-score, dist.meu.R
|
253dc1834e8b90cd27b1fb1efbd2a4685547e8ef
|
[] |
no_license
|
csdambros/R-functions
|
55fea4358621c1eafe64f40390139027cfc2ba6e
|
f41ad4fd82056c7d0f8140b6cdb70683a8ec050c
|
refs/heads/master
| 2022-03-11T13:52:43.523166
| 2022-03-01T18:53:45
| 2022-03-01T18:53:45
| 9,036,691
| 2
| 1
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 795
|
r
|
C-score, dist.meu.R
|
###função criada por Cristian de Sales Dambros em 20/11/2009
###Retorna o índice C-score (Roberts & Stone 1990) para a dada matriz
cscore<-
function (x)
{
Nsites <- nrow(x)
S <- colSums(x)
R <- ncol(x)
P<-((R * (R - 1))/2)
ab <- 0
for (i in 1:R) {
for (j in (1:R)[-i]) {
if(j<i){next}else{
Q <- sum(x[, i] * x[, j])
ab <-ab+ ((S[i] - Q) * (S[j] - Q))/P}
}}
ab
}
######################
dist.COR<-
function (x)
{
x<-x[,colSums(x)!=nrow(x)]
R <- ncol(x)
ab <- 0
for (i in 1:R) {
for (j in (1:R)[-i]) {
if(j<i){next}else{
ab<-ab+cor(x[,i],x[,j],method="pearson")
}}}
ab
}
|
c58428e6d16f7c64758067b2d755a7d9a831d18f
|
6514cc40135346964e217ac6c67fae3bd139d9a0
|
/func.R
|
f48285976d9ddb3f64a8463a4b1ef1749b785847
|
[] |
no_license
|
asura349/publish
|
ca2a8e63da04f0a7156f7a0135b1fb82f05d95b2
|
201347cc34cb49d5fee4661b2cc0e7c33b65a456
|
refs/heads/master
| 2020-12-03T10:55:08.360907
| 2020-02-11T01:21:14
| 2020-02-11T01:21:14
| 231,289,817
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 886
|
r
|
func.R
|
# 1 - Model fit summary.
fit.summary<- function(fit)
{
return(fitMeasures(fit, c("chisq", "df", "pvalue", "rmsea", "srmr", "CFI", "tli", "aic", "bic")))
}
# 2 - Modification indices sum.
mi.sum<- function(mi, mod.list)
{
# l1: number of factor
l1<- length(mod.list)
for (i in 1:l1)
{
f.item<- unlist(mod.list[i])
# l2: number of item in one factor
l2<- length(f.item)
for (j in 2:l2)
{
item<- subset(mi, lhs== f.item[1] & rhs== f.item[j] & op== "=~")
mi.sum<- sum(item$mi)
if (j==2)
mi.sum.vector<- mi.sum
else
mi.sum.vector<- c(mi.sum.vector, mi.sum)
}
if (i==1)
mi.sum.list<- list(mi.sum.vector)
else
mi.sum.list[[i]]<- mi.sum.vector
}
return(mi.sum.list)
}
|
c8a680416b176bd4d12bded1b592df2b27f5af92
|
5082121b040e484a304d3b11e2844bfb3f6c8409
|
/R/DubObs.R
|
449b1e642535bd335bd91649177c5b11f5f565ce
|
[] |
no_license
|
eosnas/AKaerial
|
65033cce6add058720edd7646bf12d771c9d2e22
|
91be73f4ed2f04e9c2b058aa372547e355ecde76
|
refs/heads/master
| 2020-03-23T00:37:36.200318
| 2018-07-13T17:38:36
| 2018-07-13T17:38:36
| 140,874,904
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,336
|
r
|
DubObs.R
|
DubMatch = function(front.data=NA, rear.data=NA, combined.data=NA, front.obs, rear.obs, time=6, open=5){
#check if combined data was given, if not, read in each file
if(class(front.data)=="data.frame"){
f=front.data[front.data$obs==front.obs, ]
}
if(class(rear.data)=="data.frame"){
r=rear.data[rear.data$obs==rear.obs, ]
}
if(class(front.data)=="character"){
data=read.csv(front.data, header=TRUE, stringsAsFactors = FALSE)
f=data[data$obs==front.obs, ]
}
if(class(rear.data)=="character"){
data=read.csv(rear.data, header=TRUE, stringsAsFactors = FALSE)
r=data[data$obs==rear.obs, ]
}
if(class(combined.data)=="character"){
data=read.csv(combined.data, header=TRUE, stringsAsFactors = FALSE)
f=data[data$obs==front.obs, ]
r=data[data$obs==rear.obs, ]
}
if(class(combined.data)=="data.frame"){
f=combined.data[combined.data$obs==front.obs, ]
r=combined.data[combined.data$obs==rear.obs, ]
}
f$matched=0
r$matched=0
f.tran=unique(f$tran)
r.tran=unique(r$tran)
common=as.character(f.tran[!is.na(match(f.tran, r.tran))])
f=f[f$tran %in% common,]
r=r[r$tran %in% common,]
#empty data frame to populate with matches
matches=data.frame(yr=integer(),
tran=character(),
ch=character(),
sppn=character(),
grp=integer(),
unit=character(),
front=character(),
rear=character(),
crew=character(), stringsAsFactors = FALSE
)
for (i in 1:length(f$yr)){
# matches=rbind(matches, c(f$yr[i], f$tran[i], "10", f$sppn[i], f$grp[i], f$unit[i], f$obs[i], r$obs[1], paste(f$obs[i], r$obs[1])))
for (j in 1:length(r$yr)){
if(r$matched[j]==1) {next}
if(r$yr[j]!=f$yr[i]) {next}
if(r$tran[j]!=f$tran[i]) {next}
if(r$sppn[j]!=f$sppn[i]) {next}
if(r$unit[j]!=f$unit[i]) {next}
if(abs(f$ctime[i]-r$ctime[j])<=time){
newline=data.frame(yr=f$yr[i],
tran=f$tran[i],
ch="11",
sppn=f$sppn[i],
grp=(f$grp[i]+r$grp[j])/2,
unit=f$unit[i],
front=f$obs[i],
rear=r$obs[j],
crew=paste(f$obs[i], r$obs[j], sep=""), stringsAsFactors = FALSE
)
matches=rbind(matches, newline)
#matches= rbind(matches, list(f$yr[i], f$tran[i], "11", f$sppn[i], (f$grp[i]+r$grp[j])/2, f$unit[i], f$obs[i], r$obs[j], paste(f$obs[i], r$obs[j])))
f$matched[i]=1
r$matched[j]=1
break
}
}
if (f$matched[i]==0){
newline=data.frame(yr=f$yr[i],
tran=f$tran[i],
ch="10",
sppn=f$sppn[i],
grp=f$grp[i],
unit=f$unit[i],
front=f$obs[i],
rear=r$obs[1],
crew=paste(f$obs[i], r$obs[1], sep=""), stringsAsFactors = FALSE
)
matches=rbind(matches, newline)
}
}
for (k in 1:length(r$yr)){
if(r$matched[k]==1) {next}
newline=data.frame(yr=r$yr[k],
tran=r$tran[k],
ch="01",
sppn=r$sppn[k],
grp=r$grp[k],
unit=r$unit[k],
front=f$obs[1],
rear=r$obs[k],
crew=paste(f$obs[1], r$obs[k], sep=""), stringsAsFactors = FALSE
)
matches=rbind(matches, newline)
#matches=rbind(matches, c(r$yr[k], r$tran[k], "01", r$sppn[k], r$grp[k], r$unit[k], f$obs[1], r$obs[k], paste(f$obs[1], r$obs[k])))
}
return(list(matches=matches, f=f) )
}
|
79f232c969be18a4e61e643d283f0aef320d4d45
|
533bf408ab134cb0ae829ebc0b65b0f28cfb9b4c
|
/man/GEVASummary-class.Rd
|
4e8982bec481c19ae0ef3284fe239a28a06f6271
|
[] |
no_license
|
sbcblab/geva
|
ff24b15c102efb6df73d7a6509aa5020d0c40756
|
e0ed0aa243e1ddde676b1b9284fbb67916a84e49
|
refs/heads/master
| 2023-03-19T08:45:59.394924
| 2021-03-10T00:18:59
| 2021-03-10T00:18:59
| 241,670,846
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,331
|
rd
|
GEVASummary-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/c_GEVASummary.R
\docType{class}
\name{GEVASummary-class}
\alias{GEVASummary-class}
\alias{show,GEVASummary-method}
\alias{plot,GEVASummary,missing-method}
\alias{inputdata,GEVASummary-method}
\alias{inputvalues,GEVASummary-method}
\alias{inputweights,GEVASummary,logical-method}
\alias{inputweights,GEVASummary,missing-method}
\alias{inputnames,GEVASummary-method}
\alias{featureTable,GEVASummary-method}
\alias{factors,GEVASummary-method}
\alias{factors<-,GEVASummary,factor-method}
\alias{factors<-,GEVASummary,character-method}
\alias{infolist,GEVASummary,missing-method}
\alias{infolist<-,GEVASummary,list-method}
\alias{quantiles,GEVASummary-method}
\alias{analysis.params,GEVASummary-method}
\alias{groupsets,GEVASummary-method}
\alias{groupsets<-,GEVASummary,TypedList-method}
\alias{groupsets<-,GEVASummary,GEVAGroupSet-method}
\alias{get.summary.method.GEVASummary}
\alias{get.variation.method.GEVASummary}
\alias{as.matrix.GEVASummary}
\alias{as.expression.GEVASummary}
\title{GEVA Summary-Variation Table}
\value{
A \code{\linkS4class{GEVASummary}} object
}
\description{
The \code{GEVASummary} class represents the calculation results for summary and variation from a \code{\linkS4class{GEVAInput}}.
This class inherits from \code{\link{SVTable}}.
}
\section{Slots}{
\describe{
\item{\code{sv}}{\verb{numeric matrix} composed by two columns: \code{S} (summary) and \code{V} (variation)
\cr (Inherited from \code{\linkS4class{SVTable}})}
\item{\code{inputdata}}{GEVAInput-class with the data input}
\item{\code{sv.method}}{Names of the statistical methods used to summarize data}
\item{\code{info}}{list with additional information}
}}
\section{Methods}{
(See also the inherited methods from \code{\linkS4class{SVTable}})\cr
\sspace\cr\strong{Conversion and coercion}
\describe{
\item{\code{as.expression(x, ginput, ...)}}{Gets the expression that reproduces this \code{GEVASummary} object, including function parameters used by \code{geva.summary}. The \code{ginput} argument is optional but can be specified to replace the internal \code{GEVAInput}}
\item{\code{as.matrix(x, ...)}}{Equivalent to \code{sv(x)}}
}
\sspace\cr\strong{Grouping}
\describe{
\item{\code{groupsets(object) <- value}}{Converts this instance to \code{\linkS4class{GEVAGroupedSummary}} and sets the list of \code{\linkS4class{GEVAGroupSet}} objects.
Can be used with \verb{$<name>} to specify the object name in the list.
If \code{value} is a \code{GEVAGroupSet}, inserts the element and sets the name based on the value call}
\item{\code{groupsets(object)}}{Gets the list of \code{\linkS4class{GEVAGroupSet}} objects attached to this instance. Only applicable for \code{\linkS4class{GEVAGroupedSummary}} objects}
}
\sspace\cr\strong{Plotting}
\describe{
\item{\code{plot(x, y, ...)}}{Draws a SV-plot. The horizontal axis is for \emph{summary} (S) and the vertical axis is for \emph{variation} (V)}
}
\sspace\cr\strong{Properties}
\describe{
\item{\code{analysis.params(gobject)}}{Returns a \code{list} of analysis parameters passed to \code{\link{geva.summarize}} to obtain this object}
\item{\code{get.summary.method(x)}}{Gets a \code{character} for the summarization method name}
\item{\code{get.variation.method(x)}}{Gets a \code{character} for the variation calculation method name}
}
\sspace\cr\strong{Sub-slot accessors}
\describe{
\item{\code{factors(object) <- value}}{Sets the value to the \code{factor} slot in the internal \code{\linkS4class{GEVAInput}}}
\item{\code{factors(object)}}{Gets the \code{factor} defined in the \code{factors} slot in the internal \code{\linkS4class{GEVAInput}}}
\item{\code{featureTable(object)}}{Gets the \code{data.frame} from the \code{ftable} slot in the internal \code{\linkS4class{GEVAInput}}}
\item{\code{infolist(object, field = NULL, ...)}}{Gets the \code{list} from the \code{info} slot.
\cr If \code{recursive} is \code{TRUE}, appends the contents from the \code{info} slot in the internal \code{\linkS4class{GEVAInput}}}
\item{\code{inputvalues(object)}}{Gets the \code{matrix} from the \code{values} slot in the internal \code{\linkS4class{GEVAInput}}}
\item{\code{inputweights(object, normalized)}}{Gets the \code{matrix} from the \code{weights} slot in the internal \code{\linkS4class{GEVAInput}}}
}
}
|
2b79a4a0d88e68a203482b6edd963dfb120062f6
|
ee0394a3a407f2176bd30b6a6361519cc773584f
|
/TimeSeries_Models.R
|
634c2deed4ed8704332b91e2c520b6e905c504c2
|
[] |
no_license
|
send2ankur/MS_Research
|
4e9c184861de3532cba4f098700eaaaefdc3dd13
|
3047cb168c8f69d99c860b52aba3324b425ed948
|
refs/heads/master
| 2021-01-05T06:08:35.524210
| 2020-02-17T14:46:14
| 2020-02-17T14:46:14
| 240,909,498
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,915
|
r
|
TimeSeries_Models.R
|
library(dplyr)
library(forecast)
library(tseries)
library(dataPreparation)
# Import rds objets for delhi and ghaziabad hourly files
delpb <- readRDS("df_delpb_hourly.rds")
gha <- readRDS("df_gha_hourly.rds")
###########################################
# Delhi
###########################################
############# Training
# Create time series object
train_ts <- delpb %>%
filter(Timeline >= '2018-01-01 00:00:00' & Timeline <= '2019-12-31 23:00:00') %>%
select (PM2.5) %>%
dplyr::rename(pm25_0 = PM2.5) %>%
ts(frequency = 24)
plot(decompose(train_ts, type="additive"))
# ACF PACF Plots
par(mfrow=c(1,2))
acf(train_ts)
pacf(train_ts)
# Ist order differencing
par(mfrow=c(1,2))
acf(diff(train_ts))
pacf(diff(train_ts))
# Prepare xreg
xreg_to_scale <- delpb %>%
filter(Timeline >= '2018-01-01 00:00:00' & Timeline <= '2019-12-31 23:00:00') %>%
subset(select = c(precipIntensity,precipProbability,apparentTemperature,dewPoint,pressure,
windSpeed,windGust,cloudCover,uvIndex,visibility))
arimax_scales <- build_scales(xreg_to_scale, verbose = TRUE)
saveRDS(arimax_scales,"arimax_scales_delpb.rds")
arimax_scales <- readRDS("arimax_scales_delpb.rds")
xreg_scale <- fastScale(xreg_to_scale, scales = arimax_scales, verbose = TRUE)
xreg_train <- as.matrix(xreg_scale)
# Build ARIMAX
arimax_delpb <- forecast::auto.arima(train_ts,seasonal=TRUE,trace=TRUE,xreg=xreg_train,
parallel=TRUE)
# Regression with ARIMA(3,1,1)(0,0,2)[24]
# Save rds object
saveRDS(arimax_delpb,"arimax_delpb.rds")
tsdiag(arimax_delpb)
plot(arimax_delpb$x, col="black")
lines(fitted(arimax_delpb), col="red")
#Again, let's check if the residual series is white noise
resi_auto_arima <- train_ts - fitted(arimax_delpb)
adf.test(resi_auto_arima,alternative = "stationary")
# Dickey-Fuller = -21.312, Lag order = 25, p-value = 0.01
kpss.test(resi_auto_arima)
# KPSS Level = 0.060765, Truncation lag parameter = 14, p-value = 0.1
############# Test
test <- delpb %>%
filter(Timeline >= '2020-01-01 00:00:00')
# Prepare xreg
xregt_to_scale <- delpb %>%
filter(Timeline >= '2020-01-01 00:00:00') %>%
subset(select = c(precipIntensity,precipProbability,apparentTemperature,dewPoint,pressure,
windSpeed,windGust,cloudCover,uvIndex,visibility))
xregt_scale <- fastScale(xregt_to_scale, scales = arimax_scales, verbose = TRUE)
xreg_test <- as.matrix(xregt_scale)
# Model evaluation 96 hours
fcast_auto_arima <- forecast(arimax_delpb, xreg=xreg_test[c(1:360),], n.ahead = 360)
accuracy(fcast_auto_arima$fitted[1:360],delpb_test$PM2.5[1:360])
# ME RMSE MAE MPE MAPE
#Test set -93.16557 155.2201 123.8407 -110.1408 120.9653
arimax_delpb_eval <- data.frame(Timeline=as.POSIXct(delpb_test$Timeline[1:360]),
Forecast=fcast_auto_arima$fitted[1:360],
Actual=delpb_test$PM2.5[1:360])
# save rds object
saveRDS(arimax_delpb_eval,"C:/Users/send2/Documents/Ankur/MSc/Temp/rdsObject/arimax_delpb_eval.rds")
###########################################
# Ghaziabad
###########################################
############# Training
# Create time series object
train_ts <- gha %>%
filter(Timeline >= '2018-01-01 00:00:00' & Timeline <= '2019-12-31 23:00:00') %>%
select (PM2.5) %>%
dplyr::rename(pm25_0 = PM2.5) %>%
ts(frequency = 24)
plot(decompose(train_ts, type="additive"))
# ACF PACF Plots
par(mfrow=c(1,2))
acf(train_ts)
pacf(train_ts)
# Ist order differencing
par(mfrow=c(1,2))
acf(diff(train_ts))
pacf(diff(train_ts))
# Prepare xreg
xreg_to_scale <- gha %>%
filter(Timeline >= '2018-01-01 00:00:00' & Timeline <= '2019-12-31 23:00:00') %>%
subset(select = c(precipIntensity,precipProbability,apparentTemperature,dewPoint,pressure,
windSpeed,windGust,cloudCover,uvIndex,visibility))
arimax_scales <- build_scales(xreg_to_scale, verbose = TRUE)
saveRDS(arimax_scales,"arimax_scales_gha.rds")
xreg_scale <- fastScale(xreg_to_scale, scales = arimax_scales, verbose = TRUE)
xreg_train <- as.matrix(xreg_scale)
# Build ARIMAX
arimax_gha <- forecast::auto.arima(train_ts,seasonal=TRUE,trace=TRUE,xreg=xreg_train)
# Best model: Regression with ARIMA(2,1,2)(1,0,0)[24]
saveRDS(arimax_gha,"arimax_gha.rds")
tsdiag(arimax_gha)
plot(arimax_gha$x, col="black")
lines(fitted(arimax_gha), col="red")
#Again, let's check if the residual series is white noise
resi_auto_arima <- train_ts - fitted(arimax_gha)
adf.test(resi_auto_arima,alternative = "stationary")
# Dickey-Fuller = -20.92, Lag order = 25, p-value = 0.01
kpss.test(resi_auto_arima)
# KPSS Level = 0.035704, Truncation lag parameter = 14, p-value = 0.1
############# Test
gha_test <- gha %>%
filter(Timeline >= '2020-01-01 00:00:00')
# Prepare xreg
xregt_to_scale <- gha %>%
filter(Timeline >= '2020-01-01 00:00:00') %>%
subset(select = c(precipIntensity,precipProbability,apparentTemperature,dewPoint,pressure,
windSpeed,windGust,cloudCover,uvIndex,visibility))
xregt_scale <- fastScale(xregt_to_scale, scales = arimax_scales, verbose = TRUE)
xreg_test <- as.matrix(xregt_scale)
#Also, let's evaluate the model using MAPE
fcast_auto_arima <- forecast(arimax_gha, xreg=xreg_test[c(1:360),], n.ahead = 360)
accuracy(fcast_auto_arima$fitted[1:360],gha_test$PM2.5[1:360])
# ME RMSE MAE MPE MAPE
#Test set -95.699 158.0659 125.8172 -84.58171 94.25856
arimax_gha_eval <- data.frame(Timeline=as.POSIXct(gha_test$Timeline[1:360]),
Forecast=fcast_auto_arima$fitted[1:360],
Actual=gha_test$PM2.5[1:360])
saveRDS(arimax_gha_eval,"arimax_gha_eval.rds")
|
131919a5c21ae4e1d505bc616c236ef470c716e7
|
836a779c4e3405bb6ffe79117964b6b516cb3bc2
|
/man/errAAll.Rd
|
b47825942fc595a24f5dd11b08a1f442b7aa29b5
|
[] |
no_license
|
RajeswaranV/proportion
|
657fe69c4ad62c00ee95077d40683d4452d42c66
|
c0e0a60d43113004c0366c71a2b2dac36bf1fc86
|
refs/heads/master
| 2022-06-21T13:51:47.915249
| 2022-06-11T06:31:18
| 2022-06-11T06:31:18
| 46,557,957
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,582
|
rd
|
errAAll.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/511.Error-Failure_LimitBased_ADJ_All.R
\name{errAAll}
\alias{errAAll}
\title{Calculates error, long term power and pass/fail criteria using 6 adjusted methods (Wald, Wald-T, Likelihood, Score, Logit-Wald, ArcSine)}
\usage{
errAAll(n, alp, h, phi, f)
}
\arguments{
\item{n}{- Number of trials}
\item{alp}{- Alpha value (significance level required)}
\item{h}{- Adding factor}
\item{phi}{- Null hypothesis value}
\item{f}{- Failure criterion}
}
\value{
A dataframe with
\item{delalp}{ Delta-alpha is the increase of the nominal error with respect to real error}
\item{theta}{ Long term power of the test}
\item{Fail_Pass}{Fail/pass based on the input f criterion}
\item{method}{Name of the method}
}
\description{
Calculates error, long term power and pass/fail criteria using 6 adjusted methods (Wald, Wald-T, Likelihood, Score, Logit-Wald, ArcSine)
}
\details{
Calculates error, long term power and pass/fail
criteria using 6 adjusted methods (Wald, Wald-T, Likelihood, Score, Logit-Wald, ArcSine)
}
\examples{
n=20; alp=0.05;h=2; phi=0.99; f=-2
errAAll(n,alp,h,phi,f)
}
\seealso{
Other Error for adjusted methods:
\code{\link{PloterrAAS}()},
\code{\link{PloterrAAll}()},
\code{\link{PloterrALR}()},
\code{\link{PloterrALT}()},
\code{\link{PloterrASC}()},
\code{\link{PloterrATW}()},
\code{\link{PloterrAWD}()},
\code{\link{errAAS}()},
\code{\link{errALR}()},
\code{\link{errALT}()},
\code{\link{errASC}()},
\code{\link{errATW}()},
\code{\link{errAWD}()}
}
\concept{Error for adjusted methods}
|
3e34aed9576e87f174d0cb49fa95d816276409d3
|
81b37b77651e5d858a6748b1c5d2e093a434efc3
|
/R/rip.R
|
955a6743297e10034183b63e0ceb7c479e8ba3c8
|
[
"MIT"
] |
permissive
|
gregrs-uk/polite
|
e8b5df084301ef0141945b1e9d12cebbd703fad4
|
0e48571f176d961a685673d722b6fed3d581ee56
|
refs/heads/master
| 2020-04-12T03:39:27.961876
| 2018-10-21T15:06:13
| 2018-10-21T15:06:13
| 162,273,203
| 0
| 0
| null | 2018-12-18T10:47:36
| 2018-12-18T10:47:36
| null |
UTF-8
|
R
| false
| false
| 1,635
|
r
|
rip.R
|
#' Polite file download
#'
#' @param bow host introduction object of class polite, session created by bow() or nod
#' @param new_filename optional new file name to use when saving the file
#' @param suffix optional characters added to file name
#' @param sep separator between file name and suffix. Default "__"
#' @param path path where file should be saved. Defults to folder named "downloads" created in the working directory
#' @param overwrite if TRUE will overwrite file on disk
#' @param mode character. The mode with which to write the file. Useful values are "w", "wb" (binary), "a" (append) and "ab". Not used for methods "wget" and "curl".
#' @param ... other parameters passed to download.file
#'
#' @return Full path to file indicated by url saved on disk
#' @export
#'
#' @examples
#' \dontrun{
#' bow("www.mysite.com") %>%
#' nod("file.txt") %>%
#' rip()
#' }
#' @importFrom here here
#' @importFrom tools file_path_sans_ext file_ext
rip <- function(bow, new_filename=NULL, suffix=NULL, sep="__", path="downloads", overwrite=FALSE, mode="wb", ...){
url <- bow$url
if(!dir.exists(here::here(path)))
dir.create(here::here(path))
if(!is.null(suffix)) suffix <- paste0(sep, suffix)
new_filename <- new_filename %||% paste0(tools::file_path_sans_ext(basename(url)), suffix, ".", tools::file_ext(basename(url)), sep="")
if(file.exists(here::here(path, new_filename)) && !overwrite){
warning("File already exists", call. = FALSE)
return(here::here(path, new_filename))
}
bow$download_file_ltd(url, here::here(path, new_filename), mode=mode, ...)
return(here::here(path, new_filename))
}
|
b209b1bb7a0fd8c82b2fa59b99d7d9fe5f18f2da
|
ac2c8ae91a297d8e0f6527b771658645c92c4cbd
|
/day_4/corona/global.R
|
7bb8d8755efc55f53ab6da4a7adc77ce6c7f71b6
|
[] |
no_license
|
mohammad-alfaifi/shiny_workshop
|
9bb5de0c22cf1dfffb51d3b7b33ec4ab47c2d3b8
|
57457391e94693e958c0100af0b4504bd233f513
|
refs/heads/master
| 2021-05-19T03:59:05.037885
| 2020-04-29T10:24:03
| 2020-04-29T10:24:03
| 251,519,025
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,505
|
r
|
global.R
|
library(tidyverse)
library(tidylog)
library(janitor)
library(shiny)
library(leaflet)
library(scales)
library(plotly)
library(knitr)
library(shinydashboard)
d <- read_csv("data/corona.csv")
d_last <-
d %>%
filter(date == max(date)) %>%
mutate(color = case_when(
confirmed_num <= quantile(confirmed_num, .25) ~ "green",
confirmed_num > quantile(confirmed_num, .25) & confirmed_num <= quantile(confirmed_num, .5)~ "yellow",
confirmed_num > quantile(confirmed_num, .5) & confirmed_num <= quantile(confirmed_num, .75)~ "orange",
TRUE ~"red"
))
# source('/cloud/project/helpers.R')
#
#
# # data files paths --------------------------------------------------------
#
#
# confirmed_url <-
# "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv"
# deaths_url <-
# "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv"
# recorved_url <-
# "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv"
#
#
#
# # data cleaning -----------------------------------------------------------
#
# l_urls <- list(confirmed_url, deaths_url, recorved_url)
# c_titles <- c("confirmed_num", "deaths_num", "recovered_num")
#
# d <-
# map2(l_urls, c_titles, clean_df5) %>%
# reduce(left_join)
|
eaa698c45a08e92062cdaa626c08f75a2e11a00e
|
f387d8bac0ca340364a8f2f3767233c20797e2e4
|
/extraindo_dados_TWEETER.R
|
526d4d630c3d8243021d600e8d655297824696c2
|
[] |
no_license
|
WOLFurriell/twitterR
|
24ba737808bac151de8f24340a86ba006eb0a40b
|
f2317e282e62f9391b5c4e731381961a15886928
|
refs/heads/master
| 2020-04-15T14:10:29.591089
| 2019-01-08T22:40:32
| 2019-01-08T22:40:32
| 164,744,504
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,916
|
r
|
extraindo_dados_TWEETER.R
|
library(twitteR)
library(tm)
library(wordcloud)
library(RColorBrewer)
library(tm)
library(dplyr)
library(data.table)
library(WriteXLS)
rm(list = ls())
setwd("D:/TWITTER/Bancos_tweets")
#-----------------------------
#colocando as chaves
api_key <- "xxxxxxxxxxxxxxxxxxxxxxxxxxx"
api_secret <- "xxxxxxxxxxxxxxxxxxxxxxxxxxx"
access_token <- "xxxxxxxxxxxxxxxxxxxxxxxxxxx"
access_token_secret <- "xxxxxxxxxxxxxxxxxxxxxxxxxxx"
setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret)
#Retirar as primeiras 3200 com retweets e replies
##----------------- MARINA
twsilva_marina<-userTimeline('silva_marina',n=3200,includeRts=T,excludeReplies=F)
silva_marina_df<-twListToDF(twsilva_marina)
silva_marina_df2<-sapply(twsilva_marina, function(x) x$getText())
saveRDS(silva_marina_df, file="twsilva_marina.Rds")
saveRDS(silva_marina_df2, file="twsilva_marina2.Rds")
##----------------- DILMA
twdilma<-userTimeline('dilmabr',n=3200,includeRts=T,excludeReplies=F)
dilma_df<-twListToDF(twdilma)
dilma_df2<-sapply(twdilma, function(x) x$getText())
saveRDS(dilma_df, file="twdilma.Rds")
saveRDS(dilma_df2, file="twdilma2.Rds")
##----------------- AECIO
twaecio<-userTimeline('AecioNeves',n=3200,includeRts=T,excludeReplies=F)
aecio_df<-twListToDF(twaecio)
aecio_df2<-sapply(twaecio, function(x) x$getText())
saveRDS(aecio_df, file="twaecio.Rds")
saveRDS(aecio_df2, file="twaecio2.Rds")
#retirar por datas específicas
#podemos verificar a data do último tweet exportado por userTimeline e pegar o restante por aqui
#twsilva_marina2<- searchTwitter('from:silva_marinaNeves', n =1500,lang = 'pt',
# since ='2014-07-06',until = '2014-10-11')
## extrair o texto de todos os tweets
#write.table(silva_marina_df,'D:/TWITTER/Bancos_tweets/twsilva_marina4.txt')
#remover emojis, é possível usar 'ASCII'
|
40b3588cc575cd6348f22ad60f1d72001ba5723e
|
580eb05c82c46724defaa72508b2fcf44340a5de
|
/man/eel.test2.Rd
|
26480e878851a44d0d3413c36d3cc57e15aeb730
|
[] |
no_license
|
cran/Compositional
|
5bf22542a6242cc9c4ff66d5e081332e65f4fc76
|
608fa713f4c50933b580d1b3097bc81051777f37
|
refs/heads/master
| 2023-07-06T09:05:20.638372
| 2023-06-29T18:10:02
| 2023-06-29T18:10:02
| 51,369,033
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,436
|
rd
|
eel.test2.Rd
|
\name{Exponential empirical likelihood hypothesis testing for two mean vectors}
\alias{eel.test2}
\title{
Exponential empirical likelihood hypothesis testing for two mean vectors
}
\description{
Exponential empirical likelihood hypothesis testing for two mean vectors.
}
\usage{
eel.test2(y1, y2, tol = 1e-07, R = 0, graph = FALSE)
}
\arguments{
\item{y1}{
A matrix containing the Euclidean data of the first group.
}
\item{y2}{
A matrix containing the Euclidean data of the second group.
}
\item{tol}{
The tolerance level used to terminate the Newton-Raphson algorithm.
}
\item{R}{
If R is 0, the classical chi-square distribution is used, if R = 1,
the corrected chi-square distribution (James, 1954) is used and if R = 2,
the modified F distribution (Krishnamoorthy and Yanping, 2006) is used.
If R is greater than 3 bootstrap calibration is performed.
}
\item{graph}{
A boolean variable which is taken into consideration only when bootstrap
calibration is performed. IF TRUE the histogram of the bootstrap test
statistic values is plotted.
}
}
\details{
Exponential empirical likelihood is a non-parametric hypothesis testing
procedure for one sample. The generalization to two (or more samples) is via
searching for the mean vector that minimises the sum of the two test statistics.
}
\value{
A list including:
\item{test}{
The empirical likelihood test statistic value.
}
\item{modif.test}{
The modified test statistic, either via the chi-square or the F distribution.
}
\item{dof}{
The degrees of freedom of the chi-square or the F distribution.
}
\item{pvalue}{
The asymptotic or the bootstrap p-value.
}
\item{mu}{
The estimated common mean vector.
}
\item{runtime}{
The runtime of the bootstrap calibration.
}
}
\references{
Jing Bing-Yi and Andrew TA Wood (1996). Exponential empirical likelihood is
not Bartlett correctable. Annals of Statistics 24(1): 365-369.
G.S. James (1954). Tests of Linear Hypothese in Univariate and Multivariate
Analysis when the Ratios of the Population Variances are Unknown.
Biometrika, 41(1/2): 19-43
Krishnamoorthy K. and Yanping Xia (2006). On Selecting Tests for Equality
of Two Normal Mean Vectors.
Multivariate Behavioral Research 41(4): 533-548.
Owen A. B. (2001). Empirical likelihood. Chapman and Hall/CRC Press.
Amaral G.J.A., Dryden I.L. and Wood A.T.A. (2007). Pivotal bootstrap methods
for k-sample problems in directional statistics and shape analysis.
Journal of the American Statistical Association 102(478): 695-707.
Preston S.P. and Wood A.T.A. (2010). Two-Sample Bootstrap Hypothesis Tests
for Three-Dimensional Labelled Landmark Data. Scandinavian Journal of
Statistics 37(4): 568-587.
Tsagris M., Preston S. and Wood A.T.A. (2017). Nonparametric hypothesis
testing for equality of means on the
simplex. Journal of Statistical Computation and Simulation, 87(2): 406-422.
}
\author{
Michail Tsagris.
R implementation and documentation: Michail Tsagris \email{mtsagris@uoc.gr}.
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{\link{el.test2}, \link{maovjames}, \link{maov}, \link{hotel2T2},
\link{james}, \link{comp.test}
}
}
\examples{
y1 = as.matrix(iris[1:25, 1:4])
y2 = as.matrix(iris[26:50, 1:4])
eel.test2(y1, y2)
eel.test2(y1, y2 )
eel.test2( y1, y2 )
}
\keyword{ Multivariate hypothesis testing }
\keyword{ non parametric test }
|
a04c33d4146d7683212e649976b92558c5040118
|
461ea396babcd50cf55f83ba7e9b4e50e2588807
|
/myFunctions/R/visualization.R
|
c49bb36020c1afd8148202e2b1c4331d4ab3f257
|
[] |
no_license
|
vikram-g/R-Functions
|
e9f7dca39b41fa3b2c5552dd0013b6f72fa480e9
|
aabb1ac6616b75ac23d4985af99036c8fb3c9a27
|
refs/heads/master
| 2020-03-27T00:33:00.735484
| 2018-08-22T03:27:38
| 2018-08-22T03:27:38
| 145,633,010
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,114
|
r
|
visualization.R
|
#'
#'Plot logit curve
#'
#'Plot the logit curve along with dot histogram of the actual target values.
#'
#' @param df Dataframe containing the target as well as variable to plot
#' @param varName Name of the variable that has to be plotted
#' @param target Name of the target variable
#' @return Logit plot as output
#' @export
#'
plot_logit_curve <- function(df,varName,target){
require("ggplot2")
require("dplyr")
plt_dat <- df[,c(varName,target)]
names(plt_dat) <- c("idv","dv")
glm_model <- glm(dv ~ idv, data=plt_dat, family=binomial(link="logit"))
plt_dat <- cbind(plt_dat,as.data.frame(predict(glm_model, newdata = plt_dat, type="link", se=TRUE)))
plt_dat <- plt_dat[!is.na(plt_dat$idv),]
ones <- plt_dat[plt_dat$dv == 1,]
ones$breaks <- cut(ones$idv, breaks=30,include.lowest=TRUE)
ones$breaks <- gsub(("\\["),x = gsub(("\\]"),x = gsub(("\\("),x = as.character(ones$breaks), replacement = ""), replacement = ""), replacement = "")
ones$break1 <- sapply(1:nrow(ones), function(x) as.numeric(strsplit(ones$breaks[x],",")[[1]][1]))
ones$break2 <- sapply(1:nrow(ones), function(x) as.numeric(strsplit(ones$breaks[x],",")[[1]][2]))
ones$breaks <- (ones$break1 + ones$break2)/2
ones$break1 <- NULL
ones$break2 <- NULL
zeros <- plt_dat[plt_dat$dv == 0,]
zeros$breaks <- cut(zeros$idv, breaks=30,include.lowest=TRUE)
zeros$breaks <- gsub(("\\["),x = gsub(("\\]"),x = gsub(("\\("),x = as.character(zeros$breaks), replacement = ""), replacement = ""), replacement = "")
zeros$break1 <- sapply(1:nrow(zeros), function(x) as.numeric(strsplit(zeros$breaks[x],",")[[1]][1]))
zeros$break2 <- sapply(1:nrow(zeros), function(x) as.numeric(strsplit(zeros$breaks[x],",")[[1]][2]))
zeros$breaks <- (zeros$break1 + zeros$break2)/2
zeros$break1 <- NULL
zeros$break2 <- NULL
alls <- rbind(ones, zeros)
h <- alls %>% group_by(dv, breaks) %>%
summarise(n = n()) %>%
mutate(pct = ifelse(dv==0, n/sum(n), 1 - n/sum(n)))
# Calculate confidence intervals
std <- qnorm(0.95 / 2 + 0.5)
plt_dat$ymin <- glm_model$family$linkinv(plt_dat$fit - std * plt_dat$se.fit)
plt_dat$ymax <- glm_model$family$linkinv(plt_dat$fit + std * plt_dat$se.fit)
plt_dat$fit <- glm_model$family$linkinv(plt_dat$fit) # Rescale to 0-1
# Plot everything
logit_plot <- ggplot(plt_dat, aes(x=idv, y=dv)) +
geom_segment(data=h, size=4, show.legend=FALSE,
aes(x=breaks, xend=breaks, y=dv, yend=pct, colour=factor(dv))) +
#geom_point(color = "green", alpha = 0.1) +
geom_ribbon(data=plt_dat, fill = "blue", aes(y=fit, ymin=ymin, ymax=ymax), alpha=0.5) +
#geom_dotplot(
# aes(fill = factor(dv)), method = "histodot", binpositions = "all",
# stackgroups = TRUE, stackdir = "centerwhole", binwidth = 1, alpha = 0.25,stackratio = 1,position="identity"
#) +
scale_fill_discrete(name = target) +
scale_y_continuous(labels = scales::percent) +
geom_line(data=plt_dat, aes(y=fit), color = "red") +
labs(x=varName, y=paste0(target)) +
ggtitle(paste0(varName,' - Logit plot'))
plot(logit_plot)
}
|
3a0fcac3e71c590e4402ac7b02172d2e7e560964
|
d10a821c609e9afea8ef8c022a64e57a6c440184
|
/R/test_new_gam.R
|
43f96a604e369fadcba5db977d9bd7e52c1fd3a7
|
[] |
no_license
|
ToonVanDaele/trias-test
|
d8aef7d6c5ca2c1b6a6d19a8ac6be551768b08f1
|
04fe9e1d81bdb6b69cdad56b70d3f972ab5d5917
|
refs/heads/master
| 2020-05-28T02:06:58.706784
| 2019-12-06T16:57:20
| 2019-12-06T16:57:20
| 188,849,550
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,544
|
r
|
test_new_gam.R
|
### Test gam methods
library(tidyverse)
library(mgcv)
library(gratia)
# Load data
df_s <- readRDS(file = "./data/df_s.RDS")
df_pp <- readRDS(file = "./data/df_pp.RDS")
df_xy <- readRDS(file = "./data/df_xy.RDS")
spec_namE1s <- readRDS(file = "./data/spec_namE1s.RDS")
# Select species
spn <- "1718308"
spn <- "1690429"
spn <- "2219863" #piek - volledig weg door class correction
# spn <- "2362868"
#spn <- "2439261" #grote dataset (134000 records). "inner loop 1; can't correct step size
spn <- "2706056" #goede tijdreeks mE1t niet al te veel zeros
df_ss <- filter(df_s, taxonKey == spn)
df_sp <- filter(df_pp, taxonKey == spn)
df_ss$eea_cell_code <- as.factor(df_ss$eea_cell_code)
head(df_ss)
head(df_sp)
# SomE1 minor problem with cobs < obs (should never be the case)
# should be solved at the preprocessing stage
# temp <- df_ss %>%
# mutate(oops = ifelse(cobs < obs, TRUE, FALSE))
# summary(temp)
#
# dd <- temp %>%
# filter(oops == TRUE) %>%
# group_by(eea_cell_code) %>%
# summarise(grid = n())
df_ss <- df_ss %>%
mutate(cobs = ifelse(cobs < obs, obs, cobs))
# Plot reeks voor één enkele cel
df_ss %>%
filter(eea_cell_code == "1kmE13839N3112") %>%
dplyr::select(year, obs, cobs) %>%
gather(key = type, value = obs, - year) %>%
ggplot(aes(x = year, y = obs, colour = type)) + geom_point() + geom_line()
######################################################
# GAM
# Model A - gaussian
mA <- gam(obs ~ s(year, bs = "tp"), data = df_ss, method = "REML")
summary(mA)
plot(mA)
temp <- predict(object = mA, newdata = df_ss, type = "iterms",
interval = "prediction", se.fit = TRUE)
intercept <- unname(mA$coefficients[1])
df_mA <- df_ss
df_mA$fit <- temp$fit[,1] + intercept
df_mA$ucl <- temp$fit[,1] + intercept + temp$se.fit[,1] * 1.96
df_mA$lcl <- temp$fit[,1] + intercept - temp$se.fit[,1] * 1.96
summary(df_mA)
#appraise(mA)
# How does it fit?
df_mA %>%
group_by(year) %>%
summarise(yobs = sum(obs),
yfit = sum(fit),
ylcl = sum(lcl),
yucl = sum(ucl)) %>%
ggplot(aes(x = year, y = yobs)) + geom_point() + geom_line() +
geom_line(aes(y = yfit), colour = "red") +
geom_ribbon(aes(ymax = yucl, ymin = ylcl),
fill = grey(0.5),
alpha = 0.4)
###############################################
# Model B - negative binomial
mB <- gam(obs ~ s(year, bs = "tp"), family = nb(),
data = df_ss, method = "REML")
summary(mB)
plot(mB)
#appraise(mB)
# How does it fit?
temp <- predict(object = mB, newdata = df_ss, type = "iterms",
interval = "prediction", se.fit = TRUE)
intercept <- unname(mB$coefficients[1])
df_mB <- df_ss
df_mB$fit <- exp(temp$fit[,1] + intercept)
df_mB$ucl <- exp(temp$fit[,1] + intercept + temp$se.fit[,1] * 1.96)
df_mB$lcl <- exp(temp$fit[,1] + intercept - temp$se.fit[,1] * 1.96)
df_mB %>%
group_by(year) %>%
summarise(yobs = sum(obs),
yfit = sum(fit),
ylcl = sum(lcl),
yucl = sum(ucl)) %>%
ggplot(aes(x = year, y = yobs)) + geom_point() + geom_line() +
geom_line(aes(y = yfit), colour = "red") +
geom_ribbon(aes(ymax = yucl, ymin = ylcl),
fill = grey(0.5),
alpha = 0.4)
# Model C - negative binomial - correct for class observations
# How does cobs look like?
df_temp <- df_ss %>%
group_by(year) %>%
summarise(yobs = sum(obs),
ycobs = sum(cobs))
ggplot(df_temp, aes(x = year, y = ycobs)) + geom_point() + geom_line()
ggplot(df_temp, aes(x = yobs, y = ycobs)) + geom_point()
# C1 -> class observations as smoother
mC1 <- gam(obs ~ s(year, bs = "tp") + s(cobs, k = 3), family = nb(),
data = df_ss, method = "REML")
summary(mC1)
plot(mC1)
appraise(mC1)
# How does it fit?
temp <- predict(object = mC1, newdata = df_ss, type = "iterms",
interval = "prediction", se.fit = TRUE)
intercept <- unname(mC1$coefficients[1])
df_mC1 <- df_ss
df_mC1$fit <- exp(temp$fit[,1] + intercept)
df_mC1$ucl <- exp(temp$fit[,1] + intercept + temp$se.fit[,1] * 1.96)
df_mC1$lcl <- exp(temp$fit[,1] + intercept - temp$se.fit[,1] * 1.96)
df_mC1 %>%
group_by(year) %>%
summarise(yobs = sum(obs),
yfit = sum(fit),
ylcl = sum(lcl),
yucl = sum(ucl)) %>%
ggplot(aes(x = year, y = yobs)) + geom_point() + geom_line() +
geom_line(aes(y = yfit), colour = "red") +
geom_ribbon(aes(ymax = yucl, ymin = ylcl),
fill = grey(0.5),
alpha = 0.4) +
ggtitle("long data - nb - class observation as smoother")
# C2 -> class observations as linear predictor
mC2 <- gam(obs ~ s(year, bs = "tp") + cobs, family = nb(),
data = df_ss, method = "REML")
summary(mC2)
plot(mC2)
appraise(mC2)
# How does it fit?
temp <- predict(object = mC2, newdata = df_ss, type = "iterms",
interval = "prediction", se.fit = TRUE)
intercept <- unname(mC2$coefficients[1])
df_mC2 <- df_ss
df_mC2$fit <- exp(temp$fit[,1] + intercept)
df_mC2$ucl <- exp(temp$fit[,1] + intercept + temp$se.fit[,1] * 1.96)
df_mC2$lcl <- exp(temp$fit[,1] + intercept - temp$se.fit[,1] * 1.96)
df_mC2 %>%
group_by(year) %>%
summarise(yobs = sum(obs),
yfit = sum(fit),
ylcl = sum(lcl),
yucl = sum(ucl)) %>%
filter(!year == 1990) %>%
ggplot(aes(x = year, y = yobs)) + geom_point() + geom_line() +
geom_line(aes(y = yfit), colour = "red") +
geom_ribbon(aes(ymax = yucl, ymin = ylcl),
fill = grey(0.5),
alpha = 0.4)
# C3 -> class observations as offset
mC3 <- gam(obs ~ offset(cobs) + s(year, bs = "tp"), family = nb(),
data = df_ss, method = "REML")
summary(mC3)
plot(mC3)
appraise(mC3)
# How does it fit?
temp <- predict(object = mC3, newdata = df_ss, type = "iterms",
interval = "prediction", se.fit = TRUE)
intercept <- unname(mC3$coefficients[1])
df_mC3 <- df_ss
df_mC3$fit <- exp(temp$fit[,1] + intercept)
df_mC3$ucl <- exp(temp$fit[,1] + intercept + temp$se.fit[,1] * 1.96)
df_mC3$lcl <- exp(temp$fit[,1] + intercept - temp$se.fit[,1] * 1.96)
df_mC3 %>%
group_by(year) %>%
summarise(yobs = sum(obs),
yfit = sum(fit),
ylcl = sum(lcl),
yucl = sum(ucl)) %>%
ggplot(aes(x = year, y = yobs)) + geom_point() + geom_line() +
geom_line(aes(y = yfit), colour = "red") +
geom_ribbon(aes(ymax = yucl, ymin = ylcl),
fill = grey(0.5),
alpha = 0.4)
# Model D - negative binomial - random effect cell ID
# This model doesn't converge in a reasonable timE1
# length(unique(df_ss$eea_cell_code))
#
# mD <- gam(obs ~ offset(cobs) + s(eea_cell_code, bs = "re") + s(year), family = nb(),
# data = df_ss, method = "REML")
#
# summary(mD)
# plot(mD)
# appraise(mD)
#
# # How does it fit?
# temp <- predict(object = mD, newdata = df_ss, type = "iterms",
# interval = "prediction", se.fit = TRUE)
# intercept <- unname(mD$coefficients[1])
# df_mD <- df_ss
# df_mD$fit <- exp(temp$fit[,1] + intercept)
# df_mD$ucl <- exp(temp$fit[,1] + intercept + temp$se.fit[,1] * 1.96)
# df_mD$lcl <- exp(temp$fit[,1] + intercept - temp$se.fit[,1] * 1.96)
#
# df_mD %>%
# group_by(year) %>%
# summarise(yobs = sum(obs),
# yfit = sum(fit),
# ylcl = sum(lcl),
# yucl = sum(ucl)) %>%
# ggplot(aes(x = year, y = yobs)) + geom_point() + geom_line() +
# geom_line(aes(y = yfit), colour = "red") +
# geom_ribbon(aes(ymax = yucl, ymin = ylcl),
# fill = grey(0.5),
# alpha = 0.4)
# Model E: with s(x,y) smoother
length(unique(df_ss$eea_cell_code))
df_ss <- df_ss %>% left_join(df_xy %>%
dplyr::select(eea_cell_code, x, y, natura2000),
by = "eea_cell_code")
df_ss$eea_cell_code <- as.factor(df_ss$eea_cell_code)
df_ss %>%
filter(obs > 0) %>%
group_by(eea_cell_code, year) %>%
summarise(nc = n(),
x = first(x),
y = first(y)) %>%
ggplot(aes(x = x, y = y, colour = nc)) + geom_point() + facet_wrap(~year)
# Model mE1 - s(x,y) smoother only
mE1 <- gam(obs ~ s(year) + s(x, y, bs = "gp", k = 100, m = c(3, 10000)), family = nb(),
data = df_ss, method = "REML")
summary(mE1)
plot(mE1)
appraise(mE1)
# How does it fit?
temp <- predict(object = mE1, newdata = df_ss, type = "iterms",
interval = "prediction", se.fit = TRUE)
intercept <- unname(mE1$coefficients[1])
df_mE1 <- df_ss
df_mE1$fit <- exp(temp$fit[,1] + intercept)
df_mE1$ucl <- exp(temp$fit[,1] + intercept + temp$se.fit[,1] * 1.96)
df_mE1$lcl <- exp(temp$fit[,1] + intercept - temp$se.fit[,1] * 1.96)
df_mE1 %>%
group_by(year) %>%
summarise(yobs = sum(obs),
yfit = sum(fit),
ylcl = sum(lcl),
yucl = sum(ucl)) %>%
ggplot(aes(x = year, y = yobs)) + geom_point() + geom_line() +
geom_line(aes(y = yfit), colour = "red") +
geom_ribbon(aes(ymax = yucl, ymin = ylcl),
fill = grey(0.5),
alpha = 0.4) +
ggtitle("long data - nb - s(s,y) only")
# Model E2 - with s(x,y) smoother and smoother for class observations
mE2 <- gam(obs ~ s(year) + s(cobs, k = 3) + s(x, y, bs = "gp", k = 100, m = c(3, 10)), family = nb(),
data = df_ss, method = "REML")
summary(mE2)
draw(mE2)
plot(mE2)
appraise(mE2)
# How does it fit?
temp <- predict(object = mE2, newdata = df_ss, type = "iterms",
interval = "prediction", se.fit = TRUE)
intercept <- unname(mE2$coefficients[1])
df_mE2 <- df_ss
df_mE2$fit <- exp(temp$fit[,1] + intercept)
df_mE2$ucl <- exp(temp$fit[,1] + intercept + temp$se.fit[,1] * 1.96)
df_mE2$lcl <- exp(temp$fit[,1] + intercept - temp$se.fit[,1] * 1.96)
df_mE2 %>%
group_by(year) %>%
summarise(yobs = sum(obs),
yfit = sum(fit),
ylcl = sum(lcl),
yucl = sum(ucl)) %>%
ggplot(aes(x = year, y = yobs)) + geom_point() + geom_line() +
geom_line(aes(y = yfit), colour = "red") +
geom_ribbon(aes(ymax = yucl, ymin = ylcl),
fill = grey(0.5),
alpha = 0.4) +
ggtitle("long data - nb - s(x,y) + s(cobs)")
######################################################################""
#Model F - presence absence
df_ss %>%
group_by(year) %>%
summarise(ypa_obs = sum(pa_obs)) %>%
ggplot(aes(x = year, y = ypa_obs)) + geom_point() + geom_line()
#
# Model F1 - year smoother only
mF1 <- gam(pa_obs ~ s(year), family = "binomial",
data = df_ss, method = "REML")
summary(mF1)
plot(mF1)
appraise(mF1)
# How does it fit?
temp <- predict(object = mF1, newdata = df_ss, type = "iterms",
interval = "prediction", se.fit = TRUE)
intercept <- unname(mF1$coefficients[1])
df_mF1 <- df_ss
df_mF1$fit <- 1 / (1 + exp(-(temp$fit[,1] + intercept)))
df_mF1$ucl <- 1 / (1 + exp(-(temp$fit[,1] + intercept + temp$se.fit[,1] * 1.96)))
df_mF1$lcl <- 1 / (1 + exp(-(temp$fit[,1] + intercept - temp$se.fit[,1] * 1.96)))
df_mF1 %>%
group_by(year) %>%
summarise(y_pa_obs = sum(pa_obs),
yfit = sum(fit),
ylcl = sum(lcl),
yucl = sum(ucl)) %>%
ggplot(aes(x = year, y = y_pa_obs)) + geom_point() + geom_line() +
geom_line(aes(y = yfit), colour = "red") +
geom_ribbon(aes(ymax = yucl, ymin = ylcl),
fill = grey(0.5),
alpha = 0.4)
#
# Model F2 - year smoother + s(x,y) smoother
mF2 <- gam(pa_obs ~ s(year) + s(x, y, bs = "gp", k = 100, m = c(3, 10000)), family = "binomial",
data = df_ss, method = "REML")
summary(mF2)
plot(mF2)
appraise(mF2)
# How does it fit?
temp <- predict(object = mF2, newdata = df_ss, type = "iterms",
interval = "prediction", se.fit = TRUE)
intercept <- unname(mF2$coefficients[1])
df_mF2 <- df_ss
df_mF2$fit <- 1 / (1 + exp(-(temp$fit[,1] + intercept)))
df_mF2$ucl <- 1 / (1 + exp(-(temp$fit[,1] + intercept + temp$se.fit[,1] * 1.96)))
df_mF2$lcl <- 1 / (1 + exp(-(temp$fit[,1] + intercept - temp$se.fit[,1] * 1.96)))
df_mF2 %>%
group_by(year) %>%
summarise(y_pa_obs = sum(pa_obs),
yfit = sum(fit),
ylcl = sum(lcl),
yucl = sum(ucl)) %>%
ggplot(aes(x = year, y = y_pa_obs)) + geom_point() + geom_line() +
geom_line(aes(y = yfit), colour = "red") +
geom_ribbon(aes(ymax = yucl, ymin = ylcl),
fill = grey(0.5),
alpha = 0.4)
# Model F3 - year smoother + s(x,y) smoother + pa_cobs smoother
mF3 <- gam(pa_obs ~ s(year) + s(cobs) + s(x, y, bs = "gp", k = 100, m = c(3, 10000)), family = "binomial",
data = df_ss, method = "REML")
summary(mF3)
plot(mF3)
appraise(mF3)
# How does it fit?
temp <- predict(object = mF3, newdata = df_ss, type = "iterms",
interval = "prediction", se.fit = TRUE)
intercept <- unname(mF3$coefficients[1])
df_mF3 <- df_ss
df_mF3$fit <- 1 / (1 + exp(-(temp$fit[,1] + intercept)))
df_mF3$ucl <- 1 / (1 + exp(-(temp$fit[,1] + intercept + temp$se.fit[,1] * 1.96)))
df_mF3$lcl <- 1 / (1 + exp(-(temp$fit[,1] + intercept - temp$se.fit[,1] * 1.96)))
df_mF3 %>%
group_by(year) %>%
summarise(y_pa_obs = sum(pa_obs),
yfit = sum(fit),
ylcl = sum(lcl),
yucl = sum(ucl)) %>%
ggplot(aes(x = year, y = y_pa_obs)) + geom_point() + geom_line() +
geom_line(aes(y = yfit), colour = "red") +
geom_ribbon(aes(ymax = yucl, ymin = ylcl),
fill = grey(0.5),
alpha = 0.4)
######################################################
# GAM - simple time series
#
# Model SA - negative binomial
sA <- gam(obs ~ s(year, bs = "tp"), family = nb(),
data = df_sp, method = "REML")
summary(sA)
plot(sA)
appraise(sA)
# How does it fit?
temp <- predict(object = sA, newdata = df_sp, type = "iterms",
interval = "prediction", se.fit = TRUE)
intercept <- unname(sA$coefficients[1])
df_sA <- df_sp
df_sA$fit <- exp(temp$fit[,1] + intercept)
df_sA$ucl <- exp(temp$fit[,1] + intercept + temp$se.fit[,1] * 1.96)
df_sA$lcl <- exp(temp$fit[,1] + intercept - temp$se.fit[,1] * 1.96)
df_sA %>%
group_by(year) %>%
ggplot(aes(x = year, y = obs)) + geom_point() + geom_line() +
geom_line(aes(y = fit), colour = "red") +
geom_ribbon(aes(ymax = ucl, ymin = lcl),
fill = grey(0.5),
alpha = 0.4) +
ggtitle("short data - nb")
# Model Sb - correct for class observations
sB <- gam(obs ~ s(year, bs = "tp") + s(cobs, bs = "tp"), family = nb(),
data = df_sp, method = "REML")
summary(sB)
plot(sB)
appraise(sB)
# How does it fit?
temp <- predict(object = sB, newdata = df_sp, type = "iterms",
interval = "prediction", se.fit = TRUE)
intercept <- unname(sB$coefficients[1])
df_sB <- df_sp
df_sB$fit <- exp(temp$fit[,1] + intercept)
df_sB$ucl <- exp(temp$fit[,1] + intercept + temp$se.fit[,1] * 1.96)
df_sB$lcl <- exp(temp$fit[,1] + intercept - temp$se.fit[,1] * 1.96)
df_sB %>%
group_by(year) %>%
ggplot(aes(x = year, y = obs)) + geom_point() + geom_line() +
geom_line(aes(y = fit), colour = "red") +
geom_ribbon(aes(ymax = ucl, ymin = lcl),
fill = grey(0.5),
alpha = 0.4)
## INLA
library(INLA)
inlaA <- inla(obs ~ s(year, bs = "tp"), data = df_ss, method = "REML")
summary(inlamA)
plot(mA)
temp <- predict(object = mA, newdata = df_ss, type = "iterms",
interval = "prediction", se.fit = TRUE)
intercept <- unname(mA$coefficients[1])
df_mA <- df_ss
df_mA$fit <- temp$fit[,1] + intercept
df_mA$ucl <- temp$fit[,1] + intercept + temp$se.fit[,1] * 1.96
df_mA$lcl <- temp$fit[,1] + intercept - temp$se.fit[,1] * 1.96
summary(df_mA)
#appraise(mA)
|
410f3b8df8f1a87369bdf413b24a6e5961b4983b
|
7a2e2f6e661540a4a918ed5275b68dd1a8188d8e
|
/data_prep/db_init.R
|
8c206078fae52ef6f6b3ca25b654fa61351363ae
|
[] |
no_license
|
RichardHHill/course_planner
|
a92881fafab24f5e95c4d7d5ce21083fe94c99d6
|
5d2fd51fe7ec581c0781b99bb94878b95b62e08d
|
refs/heads/master
| 2021-07-13T12:29:30.357835
| 2020-08-08T01:59:40
| 2020-08-08T01:59:40
| 189,785,402
| 0
| 0
| null | 2020-08-04T21:41:33
| 2019-06-01T23:11:32
|
R
|
UTF-8
|
R
| false
| false
| 1,575
|
r
|
db_init.R
|
library(dplyr)
library(RPostgres)
Sys.setenv("R_CONFIG_ACTIVE" = "default")
app_config <- config::get(file = "shiny_app/config.yml")
conn <- tychobratools::db_connect(app_config$db)
# DBI::dbExecute(conn, "DROP TABLE IF EXISTS semester_courses")
# DBI::dbExecute(conn, "DROP TABLE IF EXISTS majors")
# DBI::dbExecute(conn, "DROP TABLE IF EXISTS semester_names")
# DBI::dbExecute(conn, "DROP TABLE IF EXISTS input_ids")
ids_table_query <- "CREATE TABLE input_ids (
uid VARCHAR(36) PRIMARY KEY,
time_created TIMESTAMPTZ NOT NULL DEFAULT NOW(),
time_modified TIMESTAMPTZ NOT NULL DEFAULT NOW(),
passkey TEXT,
name TEXT
);"
DBI::dbExecute(conn, ids_table_query)
semester_names_query <- "CREATE TABLE semester_names (
semester_uid VARCHAR(36),
schedule_uid VARCHAR(36) REFERENCES input_ids (uid) ON DELETE CASCADE,
semester_name TEXT
);"
DBI::dbExecute(conn, semester_names_query)
semester_courses_query <- "CREATE TABLE semester_courses (
schedule_uid VARCHAR(36) REFERENCES input_ids (uid) ON DELETE CASCADE,
semester_uid VARCHAR(36) REFERENCES semester_names (semester_uid) ON DELETE CASCADE,
course_code TEXT,
course_name TEXT
);"
DBI::dbExecute(conn, semester_courses_query)
majors_query <- "CREATE TABLE majors (
schedule_uid VARCHAR(36) REFERENCES input_ids (uid) ON DELETE CASCADE,
code TEXT,
name TEXT,
major_name TEXT,
major_id TEXT
);"
DBI::dbExecute(conn, majors_query)
DBI::dbDisconnect(conn)
|
24cd3377a5103584bbfebfa6bff0d1fdd03df7a9
|
0a03b56e5c7f9aa8c50ba1bc8448adc0f3c5d5a9
|
/man/rename_region_code_column.Rd
|
7058435e876fd45f662d5bcbd637bb3e95e7acd7
|
[
"MIT"
] |
permissive
|
FelipeJColon/covidregionaldata
|
ae94fa51215d28c62e99fae607fd2ae6230e6ebb
|
5dd0c648d531263ae8677d3a36dd9f659d13a2f8
|
refs/heads/master
| 2023-02-13T18:24:14.755101
| 2021-01-08T17:17:37
| 2021-01-08T17:17:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 880
|
rd
|
rename_region_code_column.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_region_codes.R
\name{rename_region_code_column}
\alias{rename_region_code_column}
\title{Helper to rename the region code column in each dataset to the correct code type for each country (e.g. ISO-3166-2).}
\usage{
rename_region_code_column(data, country)
}
\arguments{
\item{data}{a data frame with a region_level_1_code column and optionally a region_level_2_code column}
\item{country}{a string with the country of interest}
}
\value{
a tibble with the column(s) renamed to a sensible name
}
\description{
The package relies on column name 'region_level_1_code' etc. during processing but this often isn't the most
sensible name for the column (e.g. iso-3166-2 makes more sense for US states). This simply renames the column as the final step in
processing before returning data to the user.
}
|
ff6ddf2aa9fa26409345b9438ff974c9084a5da3
|
d865df19aa3903d609005b695fcc0cda41de3773
|
/ch3_operctovol_20190102.R
|
c6336d8a499479fb5e4c61f0db301e8aa439edcd
|
[] |
no_license
|
willking2/barnacle-IPM
|
93fa9799cbae3a94f57a6d5c9cb562d940f45213
|
907f36044e245a8d7bd5a3dbaf4643dd8b78b9b3
|
refs/heads/master
| 2020-04-24T00:12:51.143192
| 2019-05-10T20:54:31
| 2019-05-10T20:54:31
| 171,559,521
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,329
|
r
|
ch3_operctovol_20190102.R
|
### ch 3 using expt 2.4.1 ch 2 data
### Will King
### comparing operculum length to volume for banracles used in ch 2 mesocosm and field experiments. All barnacles pooled together and chosen randomly for measuring (all temps, meso/field, crowding, size pooled together)
### use this to do regression to predict vol from operc length, for ch 3 probability of reprod
### for operc-to-vol regression, only use data of volume 600mm3 or smaller, b/c that was the range used for the vol-to-prob-reproduction regression in ch 2, which you want to use in ch 3
# ---- load data and packages ----
setwd("~/PhD/PhD_projects/ch 3 IPM/analysis")
ov <- read.csv('expt2.4.1_operctovol_20180514.csv', header = T)
library(scales)
# ----- adjust data part 1 ----
# calculate volume
ov$vol <- ov$basal_diameter_mm^2 * ov$height_mm
# ----- explore: operc against various measures ----
plot(ov$vol ~ ov$operculum_length_mm)
abline(h = 600, lty = 2)
# ---- adjust data: limit to 600 mm3 or below ----
ov2 <- ov[ov$vol <= 600, ]
plot(ov2$vol ~ ov2$operculum_length_mm)
# ---- analyze: vol ~ operc length ----
m1 <- lm(vol ~ operculum_length_mm
, data = ov2
)
summary(m1)
# ---- plot: vol ~ operc length, for supp fig ----
pdf('plots/operctovol_ch3.pdf', width = 5.5, height = 5.5)
par(cex = 1.2, mar = c(4.5, 5, 0.5, 0.5))
# make blank graph
plot(vol ~ operculum_length_mm
, data = ov2
, pch = 2
, col = alpha('black', 0.55)
, axes = F
, xlab = ''
, ylab = ''
, xlim = c(0, 6)
, ylim = c(0, 600)
)
# add regression line
lines(seq(1.15, 5.75, by = 0.01)
, predict(m1
, newdata = data.frame(operculum_length_mm = seq(1.15, 5.75, by = 0.01) )
, type = 'response'
)
, lwd = 2
)
# axes and labels
axis(1
, tck = 0.02
, at = seq(0, 6, by = 1)
#, labels = seq(0.5, 6.5)
, pos = 0
)
axis(2
, las = 1
, tck = 0.02
, at = seq(0, 600, by = 100)
, pos = 0
)
axis(3
, tck = 0.02
, at = seq(0, 6, by = 1)
, labels = F
, pos = 600
)
axis(4
, tck = 0.02
, at = seq(0, 600, by = 100)
, labels = F
, pos = 6
)
mtext(expression(paste('Body volume, ', mm^3))
, side = 2, line = 2, cex = 1.2)
mtext('Operculum length, mm', side = 1, line = 2, cex = 1.2)
dev.off()
|
41b62c5b1546122e596714a9d548047d5a27f2bc
|
3e3cbe2c54db5ac4267d17764877d0e824e08be5
|
/man/trp.valueMatrix.Rd
|
f74119dd57eeac58851e1a783ad55d2a97f8c328
|
[] |
no_license
|
avilesd/productConfig
|
7444e5d04ce526654b8554a1d6af3adb71d7e8d5
|
516dd365c7f2c1733de2644a180a9098013a4125
|
refs/heads/master
| 2020-04-06T07:02:03.512297
| 2016-08-18T23:14:38
| 2016-08-18T23:14:38
| 37,199,135
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 5,094
|
rd
|
trp.valueMatrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TRPFunctions.R
\name{trp.valueMatrix}
\alias{trp.valueMatrix}
\title{Returns a Value Matrix using three reference points}
\usage{
trp.valueMatrix(dataset, userid = NULL, attr = NULL, rounds = NULL,
cost_ids = NULL, tri.refps = NULL, beta_f = 5, beta_l = 1.5,
beta_g = 1, beta_s = 3)
}
\arguments{
\item{dataset}{data.frame with the user generated data from a product
configurator. See \code{decisionMatrix} for specifications of the dataset.}
\item{userid}{a vector of integers that gives the information of which users
the matrix should be calculated. Vectorised.}
\item{attr}{attributes IDs, vector of integer numbers corresponding to the
attributes you desire to use; attr are assumed to be 1-indexed.}
\item{rounds}{integer vector or text option. Which steps of the configuration
process should be shown? Defaults are first and last step. Text options are
\code{all, first, last}.}
\item{cost_ids}{argument used to convert selected cost attributes into
benefit attributes. Integer vector.}
\item{tri.refps}{numeric matrix or vector - three numbers per attribute,
indicating the minimum requirements, status-quo and the goals for a user
(MR, SQ, G).}
\item{beta(s)}{numeric arguments representing the psychological impact of an
outcome equaling failer (_f), loss (_l), gain (_g) or success (_s). Default
values are taken from our reference paper \code{(5,1,1,3)}.}
}
\value{
a list of value matrices for each user.
}
\description{
This function is based on the value function of the tri-reference point (trp)
theory. It first builds a desicion matrix for each user and then applys the
trp-value function over each value using the three given reference points
(MR, SQ, G) and other four free parameters from the value function. See
references.
}
\details{
This function is an improvement over \code{\link{trpValueMatrix and
trpValueMatrix.oneAttr}} since it allows a matrix to be given through
\code{tri.refps}. The matrix should have three columns, first column is for
the minimum requirements, second for the status-quo, and third should be
for the Goal (MR, SQ, G). It should have as many rows as attributes, i.e.
one set of reference points for each attribute.
General: The value matrix has ncol = number of attributes you selected or
all(default) and nrow = number of rounds you selected or the first and
last(default) for all selected users.
\code{dataset} We assume the input data.frame has following columns usid =
User IDs, round = integers indicating which round the user is in (0-index
works best for 'round'), atid = integer column for referring the attribute
ID (1 indexed), selected = numeric value of the attribute for a specific,
given round, selectable = amount of options the user can chose at a given
round, with the current configuration.
\code{userid} is a necessary parameter, without it you'll get a warning.
Default is NULL.
\code{attr} Default calculates with all attributes. Attributes are
automatically read from provided dataset, it is important you always
provide the complete data so that the package functions properly. Moreover,
\code{userid} and \code{attr} will not be sorted and will appear in the
order you input them.
\code{rounds} Default calculates with first and last rounds (initial and
final product configuration). You can give a vector of arbitrarily chosen
rounds as well.
\code{cost_ids} Default assumes all your attributes are of benefit type,
that is a higher value in the attribute means the user is better off than
with a lower value. If one or more of the attributes in your data is of
cost type, e.g. price, so that lower is better then you should identify
this attributes as such, providing their id, they'll be converted to
benefit type (higher amount is better).
About reference points with cost_ids: For a cost attribute it should be
true, that a lower value is better for the user, this should also hold for
the three reference points. So contrary to normal/benefit attributes \code{
for cost attributes} reference points should follow that: \code{mr > sq >
g}.
Note: When converting a cost attribute to a benefit attribute its three
reference points change as well, enter the unconverted refps, the function
transforms them automatically when it detects a \code{cost_ids != NULL}.
But since for cost attributes, lower is better, unconverted they should
follow (G < SQ < MR).
}
\examples{
#Not runnable yet
trpValueMatrix(pc_config_data, 9:11, mr = 0.5, sq = 2, g = 4.7)
trpValueMatrix(my_data, userid = 11, attr = 1, cost_ids = 1, mr = 10, sq = 5, g =3) # Note that for cost attributes: MR > SQ > G
trpValueMatrix(keyboard_data, 60, rounds = "first", attr=1, mr = 0.5, sq = 1.8, g = 2.5, beta_f = 6)
trpValueMatrix(data1, 2) # Returns an error since no reference points entered (mr, sq, g)
}
\references{
[1]Wang, X. T.; Johnson, Joseph G. (2012) \emph{A tri-reference
point theory of decision making under risk. }Journal of Experimental
Psychology
}
|
6b2f93e7b4ebd7203c5cb23794389b7b7d116226
|
4c79590a7f0ca235979a1e663f865240fc7f0f2a
|
/Test1.R
|
7c0c6e369ac2536f4aa4259c3252a5bb52909f1c
|
[] |
no_license
|
ToniBarta/Clustering-data-machine-learning-in-R
|
4379cc5f5f79c09b24f1be8eb31c07c3128f692a
|
e8bc8dbd1cc38a365a1df769d69829a225230f78
|
refs/heads/master
| 2021-01-13T14:19:52.138770
| 2014-03-09T19:34:12
| 2014-03-09T19:34:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,972
|
r
|
Test1.R
|
library(RPostgreSQL)
library(hash)
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, dbname="momentus_development")
dbListConnections(drv)
# library(RPostgreSQL)
dbGetInfo(drv)
summary(con)
# rs <- dbSendQuery(con,"select songs_skipped from intra_weather_categories")
# fetch(rs,n=-1) ## return all elements
dbListFields(con,"intra_time_of_days")
# THIS DATA IS FOR INTRA_TIME_OF_DAYS
intra_times = dbGetQuery(con,"select distinct user_id,
case
when variable = 'early morning' then 1
when variable = 'morning' then 2
when variable = 'late morning' then 3
when variable = 'afternoon' then 4
when variable = 'late afternoon' then 5
when variable = 'evening' then 6
when variable = 'night' then 7
when variable = 'late night' then 8
end,
(1 - new_song_skipped*1.0/songs_skipped) AS discoverability from intra_time_of_days where songs_skipped != 0")
# dada = as.matrix(da)
plot(intra_times[3:2])
colnames(intra_times) <- c("user_id", "row_number", "discoverability")
# THIS DATA IS FOR INTRA_DAY_OF_WEEKS
intra_days = dbGetQuery(con,"select distinct user_id,
case
when variable = 'monday' then 1
when variable = 'tuesday' then 2
when variable = 'wednesday' then 3
when variable = 'thursday' then 4
when variable = 'friday' then 5
when variable = 'saturday' then 6
when variable = 'sunday' then 7
end,
(1 - new_song_skipped*1.0/songs_skipped) AS discoverability from intra_day_of_weeks where songs_skipped != 0")
plot(intra_days[3:2])
colnames(intra_days) <- c("user_id", "row_number", "discoverability")
# THIS DATA IS FOR INTRA_MONTHS
intra_months = dbGetQuery(con,"select distinct user_id,
case
when variable = 'january' then 1
when variable = 'february' then 2
when variable = 'march' then 3
when variable = 'april' then 4
when variable = 'may' then 5
when variable = 'june' then 6
when variable = 'july' then 7
when variable = 'august' then 8
when variable = 'september' then 9
when variable = 'october' then 10
when variable = 'november' then 11
when variable = 'december' then 12
end,
(1 - new_song_skipped*1.0/songs_skipped) AS discoverability from intra_months where songs_skipped != 0")
plot(intra_months[3:2])
colnames(intra_months) <- c("user_id", "row_number", "discoverability")
# THIS DATA IS FOR INTRA_WEATHER
intra_weathers = dbGetQuery(con, "SELECT DISTINCT iw.user_id, w.row_number,
(1 - iw.new_song_skipped*1.0/iw.songs_skipped) AS discoverability
from intra_weather_categories iw, (SELECT row_number() OVER(ORDER BY category), category
FROM weathers GROUP BY category) w
WHERE w.category = iw.variable AND iw.songs_skipped != 0 ORDER BY iw.user_id, w.row_number")
plot(intra_weathers[3:2])
# THIS DATA IS FOR INTRA_LOCATION
intra_locations = dbGetQuery(con, "SELECT DISTINCT ic.user_id, c.row_number,
(1 - ic.new_song_skipped*1.0/ic.songs_skipped) AS discoverability
from intra_cities ic, (SELECT row_number() OVER(ORDER BY city), city
FROM locations GROUP BY city) c
WHERE c.city = ic.variable AND ic.songs_skipped != 0 ORDER BY ic.user_id, c.row_number")
plot(intra_locations[3:2])
# THIS DATA IS FOR TEMPERATURE
intra_temperatures = dbGetQuery(con, "SELECT DISTINCT iw.user_id, w.row_number,
(1 - iw.new_song_skipped*1.0/iw.songs_skipped) AS discoverability
from intra_temperatures iw, (SELECT row_number() OVER(ORDER BY temperature), temperature
FROM weathers GROUP BY temperature) w
WHERE w.temperature = iw.variable AND iw.songs_skipped != 0 ORDER BY iw.user_id, w.row_number")
plot(intra_temperatures[3:2])
# @@@@@@@@@@@@@ FUNCTION TO RETURN A 3d MATRIX BASED ON VARIABLE @@@@@@@@@@@@@@@@
getInfoBasedOnVariables <- function(intra_matrix){
ordered_intra = intra_matrix[with(intra_matrix, order(row_number)), ]
j = 1
intra_3dMatrix = array(0, dim=c(max(ordered_intra$row_number),1000,2))
rowCount = ordered_intra$row_number[1]
for (i in 1:(nrow(ordered_intra) - 1)){
if (ordered_intra$row_number[i] == ordered_intra$row_number[i+1]){
intra_3dMatrix[ordered_intra$row_number[i], j, 1] = ordered_intra[["user_id"]][i]
intra_3dMatrix[ordered_intra$row_number[i], j, 2] = ordered_intra[["discoverability"]][i]
j = j + 1
}
else{
intra_3dMatrix[ordered_intra$row_number[i], j, 1] = ordered_intra[["user_id"]][i]
intra_3dMatrix[ordered_intra$row_number[i], j, 2] = ordered_intra[["discoverability"]][i]
rowCount = rowCount + 1
j = 1
}
if (i == (nrow(ordered_intra) - 1)){
intra_3dMatrix[ordered_intra$row_number[i],j,1] = ordered_intra[["user_id"]][i + 1]
intra_3dMatrix[ordered_intra$row_number[i],j,2] = ordered_intra[["discoverability"]][i + 1]
}
}
return(intra_3dMatrix)
}
# @@@@@@@@@@@@@@@@@@@@@@@@@@ END OF FUNCTION @@@@@@@@@@@@@@@@@@@@@@@@@@
intra_times_3dMatrix = getInfoBasedOnVariables(intra_times)
# to get rid of the 0's from the matrixs
# numberMatrix <- intra_3dMatrix[ , rowSums(abs(intra_3dMatrix[, ,]))>0 & rowSums(abs(intra_3dMatrix[, ,]))>0, ]
# @@@@@@@@@@@@@ FUNCTION TO RETURN A 3d MATRIX BASED ON USERS @@@@@@@@@@@@@
getInfoBasedOnUsers <- function(intra_matrix){
ordered_intra = intra_matrix[with(intra_matrix, order(user_id)), ]
# TODO set up the actual array size
userIdArray <<- array(0, 500)
user3DMatrix = array(0, dim=c(400,20,2) )
count = 1
arrayCount = 1
j = 1
for (i in 1:(nrow(ordered_intra) - 1)){
if ( i == 1){
userIdArray[arrayCount] <<- ordered_intra[["user_id"]][i]
arrayCount = arrayCount + 1
}
if (ordered_intra$user_id[i] == ordered_intra$user_id[i+1]){
user3DMatrix[count, j, 1] = ordered_intra[["row_number"]][i]
user3DMatrix[count, j, 2] = ordered_intra[["discoverability"]][i]
j = j + 1
}
else{
if (i != 1) {
userIdArray[arrayCount] <<- ordered_intra[["user_id"]][i + 1]
arrayCount = arrayCount + 1
}
user3DMatrix[count, j, 1] = ordered_intra[["row_number"]][i]
user3DMatrix[count, j, 2] = ordered_intra[["discoverability"]][i]
count = count + 1
j = 1
}
if (i == (nrow(ordered_intra) - 1)){
user3DMatrix[count,j,1] = ordered_intra[["row_number"]][i + 1]
user3DMatrix[count,j,2] = ordered_intra[["discoverability"]][i + 1]
}
}
return(user3DMatrix)
}
# @@@@@@@@@@@@@@@@@@@@@@@@@@ END OF FUNCTION @@@@@@@@@@@@@@@@@@@@@@@@@@
intra_times_user_3dMatrix = getInfoBasedOnUsers(intra_times)
intra_times_userID = userIdArray
intra_days_user_3dMatrix = getInfoBasedOnUsers(intra_days)
intra_days_userID = userIdArray
intra_months_user_3dMatrix = getInfoBasedOnUsers(intra_months)
intra_months_userID = userIdArray
intra_weathers_user_3dMatrix = getInfoBasedOnUsers(intra_weathers)
intra_weathers_userID = userIdArray
intra_locations_user_3dMatrix = getInfoBasedOnUsers(intra_locations)
intra_locations_userID = userIdArray
intra_temperatures_user_3dMatrix = getInfoBasedOnUsers(intra_temperatures)
intra_temperatures_userID = userIdArray
# user3DMatrix[1, ,]
# numberMatrix <- user3DMatrix[ 1 , rowSums(abs(user3DMatrix[1 , ,]))>0 & rowSums(abs(user3DMatrix[1 , ,]))>0, ]
# plot(numberMatrix)
numberMatrix <- intra_times_user_3dMatrix[ 1 , rowSums(abs(intra_times_user_3dMatrix[1 , ,]))>0 & rowSums(abs(intra_times_user_3dMatrix[1 , ,]))>0, ]
numberMatrix <- intra_days_user_3dMatrix[ 3 , rowSums(abs(intra_days_user_3dMatrix[3 , ,]))>0 & rowSums(abs(intra_days_user_3dMatrix[3 , ,]))>0, ]
kmeansClustering <- function(intra_matrix, nr_clusters, nr_iterations, nr_start){
da <- intra_matrix[3:2]
cl <- kmeans(da, nr_clusters, iter.max = nr_iterations, nstart = nr_start)
#pdf('intra_temperature_cluster')
plot(da, col=cl$cluster)
require(graphics)
points(cl$centers, col = 1:15, pch = 8)
#dev.off()
return(cl)
}
# intra_times_cluster = kmeansClustering(intra_times, 24, 50, 50)
# intra_days_cluster = kmeansClustering(intra_days, 14, 50, 50)
# intra_months_cluster = kmeansClustering(intra_months, 30, 50, 50)
#
# intra_locations_cluster = kmeansClustering(intra_locations, 50, 50, 50)
#
# intra_weathers_cluster = kmeansClustering(intra_weathers, 10, 100, 100)
# intra_temperatures_cluster = kmeansClustering(intra_temperatures, 50, 50 ,50)
# GETTING the users that are in the same cluster
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.