blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9d899c605f5e3c5b44a6ec5e199f126c7ed316e3 | b251f9b356673d08b21093b59690021dd8653d48 | /man/google.Rd | e9f1a19ecf63a314e53ca502d41ffd81c0b4501a | [] | no_license | GarrettMooney/moonmisc | c501728302e35908f888028f9be4522921b08be3 | 0dee0c4e5b3b55d93721a4c70501e7322d44cd15 | refs/heads/master | 2020-03-24T13:38:25.650579 | 2019-10-19T18:22:05 | 2019-10-19T18:22:05 | 142,748,283 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 192 | rd | google.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{google}
\alias{google}
\title{Search google}
\usage{
google(string)
}
\description{
Search google
}
|
490e7b8207c88a76ae553635c423053449af1085 | 6daeb33a35fd354502e1c23e977355295eef6f6c | /man/REMIND_FinalEnergy.Rd | 6a120f13e9cb45bcb551463b360e2db56f4e0f9d | [] | no_license | pik-piam/rmndt | 2642f3b2703b148f37bd942b3b96ae7a8a0bbbbc | f7b0704d78f2058c690885726247c703d9677277 | refs/heads/master | 2023-07-10T13:14:27.388585 | 2023-07-10T09:32:59 | 2023-07-10T09:32:59 | 243,305,595 | 0 | 3 | null | 2023-07-10T09:33:00 | 2020-02-26T16:07:29 | R | UTF-8 | R | false | true | 583 | rd | REMIND_FinalEnergy.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{REMIND_FinalEnergy}
\alias{REMIND_FinalEnergy}
\title{A random REMIND FE trajectory.}
\format{
A data frame with 2011 rows and 6 columns:
\describe{
\item{year}{REMIND time step}
\item{region}{REMIND-12 region}
\item{se}{Secondary energy identifier}
\item{fe}{Final energy identifier}
\item{te}{Conversion technology identifier}
\item{value}{The data column, EJ/yr}
}
}
\usage{
REMIND_FinalEnergy
}
\description{
A random REMIND FE trajectory.
}
\keyword{datasets}
|
dee9642dda18cbc44a75febc45c8487f1c6cf654 | 7c2d2af394b7452ad10f17aacd18a148a3d8aaf4 | /04_newdata.R | 900b67760f4ee2740d7ecd768c07af25ca342a31 | [] | no_license | VictorNautica/tlhc | 3ef37d9c5c05056bb94d53b865aa9c264e5f0cd4 | 5f8e751d3cb4547783fe4c3f823528daf701a7d3 | refs/heads/master | 2022-06-23T20:24:37.522098 | 2020-05-12T11:36:29 | 2020-05-12T11:36:29 | 262,035,064 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,750 | r | 04_newdata.R | library(tidyverse)
library(readxl)
library(patchwork)
source("C:/Users/victor.yu/OneDrive - Midlands and Lancashire CSU/TLHC random/sql pull script.R")
#### Incidence ####
incidence <- read_csv("data/incidence_age_standardised_rates_20200506T135748.csv")
#### Codes
## Mansfield and Ashfield - 04E and Corby - 03V
## Blackburn with Darwen - 00Q and Liverpool - 99A
## Doncaster - 02X
## Hull - 03F
## Knowlsey - 01J and Halton - 01F
## Newcastle Gateshead - 13T
## North Kirklees - 03J
## Southampton - 10X
## Tameside Glossop - 01Y
## Thurrock - 07G and Luton - 06P
#### Tidy initial
ccg_codes <- c("Mansfield and Ashfield" = "04E",
"Corby" = "03V",
"Blackburn with Darwen" = "00Q",
"Blackpool" = "99A",
"Doncaster" = "02X",
"Hull" = "03F",
"Knowsley" = "01J",
"Halton" = "01F",
"Newcastle Gateshead" = "13T",
"North Kirklees" = "03J",
"Southampton" = "10X",
"Tameside and Glossop" = "01Y",
"Thurrock" = "07G",
"Luton" = "06P")
incidence_plot <- function(ccg_name) {
incidence <- incidence %>% filter(HealthGeographyCode %in% c(ccg_codes, 921))
if (ccg_name == "Mansfield and Ashfield with Corby") {
selected_ccg_code <- c("04E", "03E")
} else if (ccg_name == "Blackburn with Darwen with Blackpool") {
selected_ccg_code <- c("00Q", "99A")
} else if (ccg_name == "Knowsley with Halton") {
selected_ccg_code <- c("01J", "01F")
} else if (ccg_name == "Thurrock and Luton") {
selected_ccg_code <- c("07G", "06P")
} else {
selected_ccg_code <- ccg_codes[which(names(ccg_codes) == ccg_name)]
}
incidence <- incidence %>% mutate(
TLHC_CCG = case_when(
HealthGeographyCode %in% c(
ccg_codes[which(!ccg_codes %in% selected_ccg_code)]) ~ "Other TLHC",
HealthGeographyCode %in% selected_ccg_code ~ ccg_name,
HealthGeographyCode == 921 ~ "National",
TRUE ~ "Other"
)
)
incidence <- incidence %>% mutate_at(vars("TLHC_CCG"), as_factor)
incidence <- incidence %>% mutate_at(vars("TLHC_CCG"), fct_relevel, "Other TLHC", "National", ccg_name)
incidence <- incidence %>% arrange(TLHC_CCG)
incidence_list <- list("55-59" = NA,
"60-64" = NA,
"65-69" = NA,
"70-74" = NA)
incidence_list <- imap(
incidence_list, ~ {
incidence %>% filter(AgeGroup == .y) %>% ggplot(
aes(
Year,
AgeStandardisedRate,
group = HealthGeographyCode,
colour = TLHC_CCG,
alpha = TLHC_CCG
)) +
geom_line(size = 0.6) +
geom_point(size = 0.6) +
labs(y = "Standardised Rate\nper 100,000") +
scale_colour_manual(values = c("lightgrey", "#377eb8", "#e41a1c")) +
scale_alpha_manual(values = c(0.5, 0.75, 1)) +
theme(legend.position = "top",
legend.title = element_blank()) +
labs(title = .y)
}
)
incidence_list[["55-59"]] + incidence_list[["60-64"]] + incidence_list[["65-69"]] + incidence_list[["70-74"]] + plot_layout(guides = "collect") & theme(legend.position = "bottom")
}
#### Mortality 55-74x ####
mortality <- read_csv("data/mortality_age_standardised_rates_20200430T173647.csv")
mortality_plot <- function(ccg_name) {
mortality <- mortality %>% filter(HealthGeographyCode %in% c(ccg_codes, 921))
if (ccg_name == "Mansfield and Ashfield with Corby") {
selected_ccg_code <- c("04E", "03E")
} else if (ccg_name == "Blackburn with Darwen with Blackpool") {
selected_ccg_code <- c("00Q", "99A")
} else if (ccg_name == "Knowsley with Halton") {
selected_ccg_code <- c("01J", "01F")
} else if (ccg_name == "Thurrock and Luton") {
selected_ccg_code <- c("07G", "06P")
} else {
selected_ccg_code <- ccg_codes[which(names(ccg_codes) == ccg_name)]
}
mortality <- mortality %>% mutate(
TLHC_CCG = case_when(
HealthGeographyCode %in% c(
ccg_codes[which(!ccg_codes %in% selected_ccg_code)]) ~ "Other TLHC",
HealthGeographyCode %in% selected_ccg_code ~ ccg_name,
HealthGeographyCode == 921 ~ "National",
TRUE ~ "Other"
)
)
mortality <- mortality %>% mutate_at(vars("TLHC_CCG"), as_factor)
mortality <- mortality %>% mutate_at(vars("TLHC_CCG"), fct_relevel, "Other TLHC", "National", ccg_name)
mortality <- mortality %>% arrange(TLHC_CCG)
mortality_list <- list("55-59" = NA,
"60-64" = NA,
"65-69" = NA,
"70-74" = NA)
mortality_list <- imap(
mortality_list, ~ {
mortality %>% filter(AgeGroup == .y) %>% ggplot(
aes(
Year,
AgeStandardisedRate,
group = HealthGeographyCode,
colour = TLHC_CCG,
alpha = TLHC_CCG
)) +
geom_line(size = 0.6) +
geom_point(size = 0.6) +
labs(y = "Standardised Rate\nper 100,000") +
scale_colour_manual(values = c("lightgrey", "#377eb8", "#e41a1c")) +
scale_alpha_manual(values = c(0.5, 0.75, 1)) +
theme(legend.position = "top",
legend.title = element_blank()) +
labs(title = .y)
}
)
mortality_list[["55-59"]] + mortality_list[["60-64"]] + mortality_list[["65-69"]] + mortality_list[["70-74"]] + plot_layout(guides = "collect") & theme(legend.position = "bottom")
}
## NCRAS Valid Stage (All Cancers) Proportion of Tumours diagnosed as early syage
ncras_stage <- read_csv("data/NCRAS - Stage at Diagnosis by financial year 2011-Q4-2018-Q1.csv .csv")
relevant_ccgs_and_england <- c("England",
"NHS Mansfield and Ashfield CCG",
"NHS Corby CCG",
"NHS Blackburn with Darwen CCG",
"NHS Blackpool CCG",
"NHS Doncaster CCG",
"NHS Hull CCG",
"NHS Knowsley CCG",
"NHS Halton CCG",
"NHS Newcastle Gateshead CCG",
"NHS North Kirklees CCG",
"NHS Southampton CCG",
"NHS Tameside and Glossop CCG",
"NHS Thurrock CCG",
"NHS Luton CCG")
ncras_stage <- ncras_stage %>% filter(CCG %in% relevant_ccgs_and_england)
stage_function <- function(ccg_name) {
if (ccg_name == "Mansfield and Ashfield with Corby") {
selected_ccg <- c("NHS Mansfield and Ashfield CCG", "NHS Corby CCG ")
} else if (ccg_name == "Blackburn with Darwen with Blackpool") {
selected_ccg <- c("NHS Blackburn with Darwen CCG", "NHS Blackpool CCG")
} else if (ccg_name == "Knowsley with Halton") {
selected_ccg <- c("NHS Knowsley CCG", "NHS Halton CCG")
} else if (ccg_name == "Thurrock and Luton") {
selected_ccg <- c("NHS Thurrock CCG", "NHS Luton CCG")
} else {
selected_ccg <- paste("NHS", ccg_name, "CCG")
}
ncras_stage <- ncras_stage %>% mutate(
TLHC_CCG = case_when(
CCG %in% relevant_ccgs_and_england[which(!relevant_ccgs_and_england %in% c("England", selected_ccg))] ~ "Other TLHC",
CCG %in% selected_ccg ~ ccg_name,
CCG == "England" ~ "National",
TRUE ~ "Other"
)
)
ncras_stage <- ncras_stage %>% mutate_at(vars("TLHC_CCG"), as_factor)
ncras_stage <- ncras_stage %>% mutate_at(vars("TLHC_CCG"), fct_relevel, "Other TLHC", "National", ccg_name)
ncras_stage <- ncras_stage %>% arrange(TLHC_CCG)
ncras_stage %>% ggplot(aes(`Financial Year and Quarter`, `Quarterly Proportion (%)`, group = CCG, colour = TLHC_CCG, alpha = TLHC_CCG)) +
geom_point(size = 0.6) +
geom_line(size = 0.6) +
scale_x_discrete(guide = guide_axis(n.dodge = 2)) +
scale_colour_manual(values = c("lightgrey", "#377eb8", "#e41a1c")) +
scale_alpha_manual(values = c(0.5, 0.75, 1)) +
theme(legend.position = "bottom",
legend.title = element_blank())
}
## Valid Stage Lung Cancer at Decision to Treat
validstage_dtt <- pull_from_sql("CCG_OIS", "Record_Of_Lung_Cancer_Stage_At_Decision_To_Treat1")
validstage_dtt <- validstage_dtt %>% filter(Level %in% c("National", ccg_codes))
validstage_dtt_function <- function(ccg_name) {
if (ccg_name == "Mansfield and Ashfield with Corby") {
selected_ccg_code <- c("04E", "03E")
} else if (ccg_name == "Blackburn with Darwen with Blackpool") {
selected_ccg_code <- c("00Q", "99A")
} else if (ccg_name == "Knowsley with Halton") {
selected_ccg_code <- c("01J", "01F")
} else if (ccg_name == "Thurrock and Luton") {
selected_ccg_code <- c("07G", "06P")
} else {
selected_ccg_code <- ccg_codes[which(names(ccg_codes) == ccg_name)]
}
validstage_dtt <- validstage_dtt %>% mutate(
TLHC_CCG = case_when(
Level %in% c(
ccg_codes[which(!ccg_codes %in% selected_ccg_code)]) ~ "Other TLHC",
Level %in% selected_ccg_code ~ ccg_name,
Level == "National" ~ "National",
TRUE ~ "Other"
)
)
validstage_dtt <- validstage_dtt %>% mutate_at(vars("TLHC_CCG"), as_factor)
validstage_dtt <- validstage_dtt %>% mutate_at(vars("TLHC_CCG"), fct_relevel, "Other TLHC", "National", ccg_name)
validstage_dtt <- validstage_dtt %>% arrange(TLHC_CCG)
validstage_dtt %>% ggplot(aes(Reporting_Period, Indicator_Value, group = Level, colour = TLHC_CCG, alpha = TLHC_CCG)) +
geom_point(size = 0.6) +
geom_line(size = 0.6) +
scale_x_discrete(guide = guide_axis(n.dodge = 2)) +
scale_colour_manual(values = c("lightgrey", "#377eb8", "#e41a1c")) +
scale_alpha_manual(values = c(0.5, 0.75, 1)) +
theme(legend.position = "top",
legend.title = element_blank())
}
## 1 year survival
one_yr_survival <- read_excel("data/Data_Tables_IndexofCancerSurvival_2002_2017.xlsx",
sheet = "Data_Complete")
one_yr_survival <-
one_yr_survival %>% filter(
`Geography type` %in% c("country", "CCG"),
`Cancer site` == "Lung",
Sex == "Persons",
`Geography name` %in% relevant_ccgs_and_england,
`Years since diagnosis` == 1
)
survival_func <- function(ccg_name) {
if (ccg_name == "Mansfield and Ashfield with Corby") {
selected_ccg <- c("NHS Mansfield and Ashfield CCG", "NHS Corby CCG ")
} else if (ccg_name == "Blackburn with Darwen with Blackpool") {
selected_ccg <- c("NHS Blackburn with Darwen CCG", "NHS Blackpool CCG")
} else if (ccg_name == "Knowsley with Halton") {
selected_ccg <- c("NHS Knowsley CCG", "NHS Halton CCG")
} else if (ccg_name == "Thurrock and Luton") {
selected_ccg <- c("NHS Thurrock CCG", "NHS Luton CCG")
} else {
selected_ccg <- paste("NHS", ccg_name, "CCG")
}
one_yr_survival <- one_yr_survival %>% mutate(
TLHC_CCG = case_when(
`Geography name` %in% relevant_ccgs_and_england[which(!relevant_ccgs_and_england %in% c("England", selected_ccg))] ~ "Other TLHC",
`Geography name` %in% selected_ccg ~ ccg_name,
`Geography name` == "England" ~ "National",
TRUE ~ "Other"
)
)
one_yr_survival <- one_yr_survival %>% mutate_at(vars("TLHC_CCG"), as_factor)
one_yr_survival <- one_yr_survival %>% mutate_at(vars("TLHC_CCG"), fct_relevel, "Other TLHC", "National", ccg_name)
one_yr_survival <- one_yr_survival %>% arrange(TLHC_CCG)
one_yr_survival %>% ggplot(aes(`Diagnosis Year`, `Survival (%)`, group = `Geography code`, colour = TLHC_CCG, alpha = TLHC_CCG)) +
geom_point() +
geom_line() +
scale_x_continuous() +
scale_colour_manual(values = c("lightgrey", "#377eb8", "#e41a1c")) +
scale_alpha_manual(values = c(0.5, 0.75, 1)) +
theme(legend.position = "top",
legend.title = element_blank())
}
## sitrep waiting list data (commissioner level) ####
cancer_waiting_list_func <- function(schema, tablename, before, after, ccg_name, y_axis_label) {
df <- pull_from_sql(schema, tablename)
df <- df %>% select(Commissioner_Code, all_of(before), all_of(after), Effective_Snapshot_Date)
df <- bind_rows(
df,
df %>% group_by(Effective_Snapshot_Date) %>% summarise(
before = sum(!!sym(before)),
after = sum(!!sym(after))
) %>% mutate(Commissioner_Code = "England")
) %>% arrange(Effective_Snapshot_Date)
df <- df %>% filter(Commissioner_Code %in% c("England", ccg_codes)) %>% mutate(
pct_2ww = !!sym(before) / (!!sym(before) + !!sym(after))*100)
df <- df %>% filter(Commissioner_Code %in% c("England", ccg_codes))
if (ccg_name == "Mansfield and Ashfield with Corby") {
selected_ccg_code <- c("04E", "03E")
} else if (ccg_name == "Blackburn with Darwen with Blackpool") {
selected_ccg_code <- c("00Q", "99A")
} else if (ccg_name == "Knowsley with Halton") {
selected_ccg_code <- c("01J", "01F")
} else if (ccg_name == "Thurrock and Luton") {
selected_ccg_code <- c("07G", "06P")
} else {
selected_ccg_code <- ccg_codes[which(names(ccg_codes) == ccg_name)]
}
df <- df %>% mutate(
TLHC_CCG = case_when(
Commissioner_Code %in% c(
ccg_codes[which(!ccg_codes %in% selected_ccg_code)]) ~ "Other TLHC",
Commissioner_Code %in% selected_ccg_code ~ ccg_name,
Commissioner_Code == "England" ~ "National",
TRUE ~ "Other"
)
)
df <- df %>% mutate_at(vars("TLHC_CCG"), as_factor)
df <- df %>% mutate_at(vars("TLHC_CCG"), fct_relevel, "Other TLHC", "National", ccg_name)
df <- df %>% arrange(TLHC_CCG)
df <- df %>% mutate_at(vars("Effective_Snapshot_Date"),
function(x) as.POSIXlt(x, tz = "", format = "%Y-%m-%d") %>% zoo::as.yearmon())
# cancer2ww %>% ggplot(
# aes(
# Effective_Snapshot_Date,
# pct_2ww,
# group = Commissioner_Code,
# colour = TLHC_CCG,
# alpha = TLHC_CCG
# )) +
# geom_line(size = 0.6) +
# geom_point(size = 0.6) +
# labs(y = "% seen within 2 weeks (all cancers)") +
# scale_colour_manual(values = c("lightgrey", "#377eb8", "#e41a1c")) +
# scale_alpha_manual(values = c(0.5, 0.75, 1)) +
# theme(legend.position = "top",
# legend.title = element_blank())
qic_chart <- qicharts2::qic(
y = pct_2ww,
x = Effective_Snapshot_Date,
data = df %>% filter(TLHC_CCG == ccg_name),
chart = "i"
)
use <- qic_chart[["data"]]
plot <- use %>% ggplot(aes(x, y.sum)) +
geom_rect(
data = use[1, ],
aes(ymin = unique(use$lcl), ymax = unique(use$ucl)),
xmin = -Inf,
xmax = Inf,
fill = "cadetblue3",
linetype = 0,
alpha = 0.5
) +
geom_hline(yintercept = unique(use$ucl), colour = "cadetblue3") +
geom_hline(yintercept = unique(use$lcl), colour = "cadetblue3") +
geom_hline(yintercept = unique(use$cl), colour = "red", linetype = "dashed") +
geom_point(aes(colour = sigma.signal)) +
geom_line() +
annotate("text", label = "UCL", x = max(use$x), y = unique(use$ucl), hjust = -1.1) +
annotate("text", label = "LCL", x = max(use$x), y = unique(use$lcl), hjust = -1.25) +
annotate("text", label = "CL", x = max(use$x), y = unique(use$cl), hjust = -1.65) +
labs(y = y_axis_label,
x = "Month") +
theme(
axis.title.y = element_text(margin = margin(t = 0, r = 8, b = 0, l = 0)),
text = element_text(size = 14),
axis.text.x = element_text(angle = 45, hjust = 1),
legend.position = "none"
) +
scale_y_continuous(labels = function(x) paste0(x, "%")) +
coord_cartesian(clip = "off")
return(plot)
}
# cancer_waiting_list_func("Sitreps", "Cancer_WL_2_Week_Wait_Comm_Mthly1", "No_Of_Patients_Seen_Within_14_Days", "No_Of_Patients_Seen_After_14_Days", "Southampton", "% of Patients Seen\nWithin Two Weeks") ## example
cancer_waiting_list_func(
"Sitreps",
"Cancer_WL_2_Week_Wait_Comm_Mthly1",
"No_Of_Patients_Seen_Within_14_Days",
"No_Of_Patients_Seen_After_14_Days",
"Southampton",
"% of Patients Receiving First Treatment\nWithin 31 Days"
)
|
75e3bb70facb9f7a6e2305e76c247a0391f56de1 | ebf779e254a6eaf60b6a9fd20ceb1fdb392b6a63 | /man/rmse.Rd | a90e75c88752236bc06afbead00fcf482398d6bd | [] | no_license | alexpghayes/hayeslib | 10f89b6b31c7aec56812e53977f5c63caff157ea | b838b7fa8bf8779f5421a0ee33d3ca2d902409db | refs/heads/master | 2021-01-21T15:31:42.281161 | 2018-11-03T19:26:03 | 2018-11-03T19:26:03 | 91,849,756 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 364 | rd | rmse.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{rmse}
\alias{rmse}
\title{Get the RMSE of a set of predictions}
\usage{
rmse(predicted, true)
}
\arguments{
\item{predicted}{Predicted values}
\item{true}{True values}
}
\value{
RMSE between true and predicted values
}
\description{
Get the RMSE of a set of predictions
}
|
cef220caa94da2f791b248d4b98d1fd95556a4c0 | 44c406b80aafd5c04e6619ca9ffc7e559d0294fe | /man/domain_values.Rd | 9645fa0763fdfbe805cf2b1f3cd980a5d5365fc1 | [
"MIT"
] | permissive | mps9506/rATTAINS | c18245e39c96db341c129202410014187b0261de | 3be587a5e941a327247770ff858b77c546d37fda | refs/heads/main | 2023-04-26T02:29:49.486927 | 2023-04-25T20:31:36 | 2023-04-25T20:31:36 | 233,949,403 | 3 | 1 | NOASSERTION | 2023-04-07T17:57:09 | 2020-01-14T22:35:26 | R | UTF-8 | R | false | true | 1,533 | rd | domain_values.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/domain_values.R
\name{domain_values}
\alias{domain_values}
\title{Download Domain Values}
\usage{
domain_values(domain_name = NULL, context = NULL, tidy = TRUE, ...)
}
\arguments{
\item{domain_name}{(character) Specified the domain name to obtain valid
parameter values for. Defaults to \code{NULL} which will a tibble with all
the domain names. To return the allowable parameter values for a given
domain, the domain should be specified here. optional}
\item{context}{(character) When specified, the service will return
domain_name values alongside the context. optional.}
\item{tidy}{(logical) \code{TRUE} (default) the function returns a tidied
tibble. \code{FALSE} the function returns the raw JSON string.}
\item{...}{list of curl options passed to \code{\link[crul:HttpClient]{crul::HttpClient()}}}
}
\value{
If \code{tidy = FALSE} the raw JSON string is returned, else the JSON
data is parsed and returned as a tibble.
}
\description{
Provides information on allowed parameter values in ATTAINS.
}
\note{
Data downloaded from the EPA webservice is automatically cached to
reduce uneccessary calls to the server.
}
\examples{
\dontrun{
## return a tibble with all domain names
domain_values()
## return allowable parameter values for a given domain name and context
domain_values(domain_name="UseName",context="TCEQMAIN")
## return the query as a JSON string instead
domain_values(domain_name="UseName",context="TCEQMAIN", tidy= FALSE)
}
}
|
2d07667d05711fb97ac03e2e77dc7f3b9ee52f6f | 8c12aa7c2d810e517613357e5e81a08e2f0c93f2 | /R/util.R | f7608d81a2a2fa2f5229bed0fedb5753a7542e92 | [] | no_license | sxinger/COVID-SOFA-SLLM-MLLM | 94a46fbf360b9e6b098032441228532a04aac215 | 135f068f4a618654cfa07ec24f187466d3de62a6 | refs/heads/master | 2023-02-24T18:28:10.679391 | 2021-01-31T22:14:05 | 2021-01-31T22:14:05 | 298,627,745 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 22,948 | r | util.R | ##---------------------------helper functions--------------------------------------##
## install (if needed) and require packages
require_libraries<-function(package_list){
#install missing packages
install_pkg<-as.data.frame(installed.packages())
new_packages<-package_list[!(package_list %in% install_pkg[which(install_pkg$LibPath==.libPaths()[1]),"Package"])]
if(length(new_packages)>0){
install.packages(new_packages,lib=.libPaths()[1],repos = "http://cran.us.r-project.org")
}
for (lib in package_list) {
library(lib, character.only=TRUE,lib.loc=.libPaths()[1])
cat("\n", lib, " loaded.", sep="")
}
}
connect_to_db<-function(DBMS_type,driver_type=c("OCI","JDBC"),config_file){
if(is.null(driver_type)){
stop("must specify type of database connection driver!")
}
if(DBMS_type=="Oracle"){
if(driver_type=="OCI"){
require_libraries("ROracle")
conn<-dbConnect(ROracle::Oracle(),
config_file$username,
config_file$password,
file.path(config_file$access,config_file$sid))
}else if(driver_type=="JDBC"){
require_libraries("RJDBC")
# make sure ojdbc6.jar is in the AKI_CDM folder
# Source: https://www.r-bloggers.com/connecting-r-to-an-oracle-database-with-rjdbc/
drv<-JDBC(driverClass="oracle.jdbc.OracleDriver",
classPath="./ojdbc6.jar")
url <- paste0("jdbc:oracle:thin:@", config_file$access,":",config_file$sid)
conn <- RJDBC::dbConnect(drv, url,
config_file$username,
config_file$password)
}else{
stop("The driver type is not currently supported!")
}
}else if(DBMS_type=="tSQL"){
require_libraries("RJDBC")
# make sure sqljdbc.jar is in the AKI_CDM folder
drv <- JDBC(driverClass="com.microsoft.sqlserver.jdbc.SQLServerDriver",
classPath="./sqljdbc.jar",
identifier.quote="`")
url <- paste0("jdbc:sqlserver:", config_file$access,
";DatabaseName=",config_file$cdm_db_name,
";username=",config_file$username,
";password=",config_file$password)
conn <- dbConnect(drv, url)
}else if(DBMS_type=="PostgreSQL"){
#not tested yet!
require_libraries("RPostgres")
server<-gsub("/","",str_extract(config_file$access,"//.*(/)"))
host<-gsub(":.*","",server)
port<-gsub(".*:","",server)
conn<-dbConnect(RPostgres::Postgres(),
host=host,
port=port,
dbname=config_file$cdm_db_name,
user=config_file$username,
password=config_file$password)
}else{
stop("the DBMS type is not currectly supported!")
}
attr(conn,"DBMS_type")<-DBMS_type
attr(conn,"driver_type")<-driver_type
return(conn)
}
chunk_load<-function(conn,dataset="",by_row=T,sample_by="",
chunk_size=1000,download_chunk=F,verb=T){
dat<-c()
i<-0
error<-FALSE
row_remain<-Inf
if(by_row){
while(!error&row_remain>0){
dat_add<-dbGetQuery(conn,
paste("select * from (",
"select m.*, rownum r from",dataset," m)",
"where r >= ",i+1,"and r < ",i+chunk_size))
#check remaining rows
row_remain<-nrow(dat_add)
#attach rows
if(download_chunk){
saveRDS(dat_add,file=paste0("./data/raw/",dataset,"_",i,".rda"))
}else{
dat %<>% bind_rows(dat_add)
}
#report progress
if(verb){
cat("row",i+1,"to","row",i+chunk_size,"loaded.\n")
}
#loop updates
i<-i+chunk_size
}
}else{
if(sample_by==""){
stop("Must specify the column name by which to cut dataset into chunks when by_row = F!")
}
pos<-3
for(i in seq(0,9,1)){
dat_add<-dbGetQuery(conn,
paste("select * from",dataset,
"where substr(lpad(",sample_by,",",pos*2,",0),",pos,",",pos,")=",i))
#attach rows
if(download_chunk){
saveRDS(dat_add,file=paste0("./data/raw/",dataset,"_",i,".rda"))
}else{
dat %<>% bind_rows(dat_add)
}
#report progress
if(verb){
cat(sample_by,"with",pos,"th digit equals to",i,"loaded.\n")
}
}
}
if(!download_chunk){
return(dat)
}
}
## parse Oracle sql lines
parse_sql<-function(file_path,...){
param_val<-list(...)
#read file
con<-file(file_path,"r")
#initialize string
sql_string <- ""
#intialize result holder
params_ind<-FALSE
tbl_out<-NULL
action<-NULL
while (TRUE){
#parse the first line
line <- readLines(con, n = 1)
#check for endings
if (length(line)==0) break
#collect overhead info
if(grepl("^(/\\*out)",line)){
#output table name
tbl_out<-trimws(gsub("(/\\*out\\:\\s)","",line),"both")
}else if(grepl("^(/\\*action)",line)){
#"write" or "query"(fetch) the output table
action<-trimws(gsub("(/\\*action\\:\\s)","",line),"both")
}else if(grepl("^(/\\*params)",line)){
params_ind<-TRUE
#breakdown global parameters
params<-gsub(",","",strsplit(trimws(gsub("(/\\*params\\:\\s)","",line),"both")," ")[[1]])
params_symbol<-params
#normalize the parameter names
params<-gsub("&&","",params)
}
#remove the first line
line<-gsub("\\t", " ", line)
#translate comment symbol '--'
if(grepl("--",line) == TRUE){
line <- paste(sub("--","/*",line),"*/")
}
#attach new line
if(!grepl("^(/\\*)",line)){
sql_string <- paste(sql_string, line)
}
}
close(con)
#update parameters as needed
if(params_ind){
#align param_val with params
params_miss<-params[!(params %in% names(param_val))]
for(j in seq_along(params_miss)){
param_val[params_miss[j]]<-list(NULL)
}
param_val<-param_val[which(names(param_val) %in% params)]
param_val<-param_val[order(names(param_val))]
params_symbol<-params_symbol[order(params)]
params<-params[order(params)]
#substitube params_symbol by param_val
for(i in seq_along(params)){
sql_string<-gsub(params_symbol[i],
ifelse(is.null(param_val[[i]])," ",
ifelse(params[i]=="db_link",
paste0("@",param_val[[i]]),
ifelse(params[i] %in% c("start_date","end_date"),
paste0("'",param_val[[i]],"'"),
param_val[[i]]))),
sql_string)
}
}
#clean up excessive "[ ]." or "[@" in tSQL when substitute value is NULL
sql_string<-gsub("\\[\\ ]\\.","",sql_string)
sql_string<-gsub("\\[@","[",sql_string)
out<-list(tbl_out=tbl_out,
action=action,
statement=sql_string)
return(out)
}
## execute single sql snippet
execute_single_sql<-function(conn,statement,write,table_name){
DBMS_type<-attr(conn,"DBMS_type")
driver_type<-attr(conn,"driver_type")
if(write){
#oracle and sql sever uses different connection driver and different functions are expected for sending queries
#dbSendQuery silently returns an S4 object after execution, which causes error in RJDBC connection (for sql server)
if(DBMS_type=="Oracle"){
if(!(driver_type %in% c("OCI","JDBC"))){
stop("Driver type not supported for ",DBMS_type,"!\n")
}else{
try_tbl<-try(dbGetQuery(conn,paste("select * from",table_name,"where 1=0")),silent=T)
if(is.null(attr(try_tbl,"condition"))){
if(driver_type=="OCI"){
dbSendQuery(conn,paste("drop table",table_name)) #in case there exists same table name
}else{
dbSendUpdate(conn,paste("drop table",table_name)) #in case there exists same table name
}
}
if(driver_type=="OCI"){
dbSendQuery(conn,statement)
}else{
dbSendUpdate(conn,statement)
}
}
}else if(DBMS_type=="tSQL"){
if(driver_type=="JDBC"){
try_tbl<-try(dbGetQuery(conn,paste("select * from",table_name,"where 1=0")),silent=T)
if(!grepl("(table or view does not exist)+",tolower(attr(try_tbl,"class")))){
dbSendUpdate(conn,paste("drop table",table_name)) #in case there exists same table name
}
dbSendUpdate(conn,statement)
}else{
stop("Driver type not supported for ",DBMS_type,"!\n")
}
}else{
stop("DBMS type not supported!")
}
}else{
dat<-dbGetQuery(conn,statement)
return(dat)
}
cat("create temporary table: ", table_name, ".\n")
}
## execute multiple sql snippets
#---statements have to be in correct logical order
execute_batch_sql<-function(conn,statements,verb,...){
for(i in seq_along(statements)){
sql<-parse_sql(file_path=statements[i],...)
execute_single_sql(conn,
statement=sql$statement,
write=(sql$action=="write"),
table_name=toupper(sql$tbl_out))
if(verb){
cat(statements[i],"has been executed and table",
toupper(sql$tbl_out),"was created.\n")
}
}
}
## clean up intermediate tables
drop_tbl<-function(conn,table_name){
DBMS_type<-attr(conn,"DBMS_type")
driver_type<-attr(conn,"driver_type")
if(DBMS_type=="Oracle"){
# purge is only required in Oracle for completely destroying temporary tables
drop_temp<-paste("drop table",table_name,"purge")
if(driver_type=="OCI"){
dbSendQuery(conn,drop_temp)
}else if(driver_type=="JDBC"){
dbSendUpdate(conn,drop_temp)
}else{
stop("Driver type not supported for ",DBMS_type,"!.\n")
}
}else if(DBMS_type=="tSQL"){
drop_temp<-paste("drop table",table_name)
if(driver_type=="JDBC"){
dbSendUpdate(conn,drop_temp)
}else{
stop("Driver type not supported for ",DBMS_type,"!.\n")
}
}else{
warning("DBMS type not supported!")
}
}
## print link for LOINC code search result
get_loinc_ref<-function(loinc){
#url to loinc.org
url<-paste0(paste0("https://loinc.org/",loinc))
#return the link
return(url)
}
## pring link for RXNORM codes search result
get_rxcui_nm<-function(rxcui){
#url link to REST API
rx_url<-paste0("https://rxnav.nlm.nih.gov/REST/rxcui/",rxcui,"/")
#get and parse html object
rxcui_obj <- getURL(url = rx_url)
rxcui_content<-htmlParse(rxcui_obj)
#extract name
rxcui_name<-xpathApply(rxcui_content, "//body//rxnormdata//idgroup//name", xmlValue)
if (length(rxcui_name)==0){
rxcui_name<-NA
}else{
rxcui_name<-unlist(rxcui_name)
}
return(rxcui_name)
}
get_ndc_nm<-function(ndc){
#url link to REST API
rx_url<-paste0("https://ndclist.com/?s=",ndc)
#get and parse html object
rx_obj<-getURL(url = rx_url)
if (rx_obj==""){
rx_name<-NA
}else{
#extract name
rx_content<-htmlParse(rx_obj)
rx_attr<-xpathApply(rx_content, "//tbody//td[@data-title]",xmlAttrs)
rx_name<-xpathApply(rx_content, "//tbody//td[@data-title]",xmlValue)[which(rx_attr=="Proprietary Name")]
rx_name<-unlist(rx_name)
if(length(rx_name) > 1){
rx_name<-rx_url
}
}
return(rx_name)
}
#ref: https://www.r-bloggers.com/web-scraping-google-urls/
google_code<-function(code,nlink=1){
code_type<-ifelse(gsub(":.*","",code)=="CH","CPT",
gsub(":.*","",code))
code<-gsub(".*:","",code)
#search on google
gu<-paste0("https://www.google.com/search?q=",code_type,":",code)
html<-getURL(gu)
#parse HTML into tree structure
doc<-htmlParse(html)
#extract url nodes using XPath. Originally I had used "//a[@href][@class='l']" until the google code change.
attrs<-xpathApply(doc, "//h3//a[@href]", xmlAttrs)
#extract urls
links<-sapply(attrs, function(x) x[[1]])
#only keep the secure links
links<-links[grepl("(https\\:)+",links)]
links<-gsub("(\\&sa=U).*$","",links)
links<-paste0("https://",gsub(".*(https://)","",links))
#free doc from memory
free(doc)
return(links[1])
}
## render report
render_report<-function(which_report="./report/AKI_CDM_EXT_VALID_p1_QA.Rmd",
DBMS_type,driver_type,remote_CDM=F,
start_date,end_date=as.character(Sys.Date())){
# to avoid <Error in unlockBinding("params", <environment>) : no binding for "params">
# a hack to trick r thinking it's in interactive environment --not work!
# unlockBinding('interactive',as.environment('package:base'))
# assign('interactive',function() TRUE,envir=as.environment('package:base'))
rmarkdown::render(input=which_report,
params=list(DBMS_type=DBMS_type,
driver_type=driver_type,
remote_CDM=remote_CDM,
start_date=start_date,
end_date=end_date),
output_dir="./output/",
knit_root_dir="../")
}
## convert long mastrix to wide sparse matrix
long_to_sparse_matrix<-function(df,id,variable,val,binary=FALSE){
if(binary){
x_sparse<-with(df,
sparseMatrix(i=as.numeric(as.factor(get(id))),
j=as.numeric(as.factor(get(variable))),
x=1,
dimnames=list(levels(as.factor(get(id))),
levels(as.factor(get(variable))))))
}else{
x_sparse<-with(df,
sparseMatrix(i=as.numeric(as.factor(get(id))),
j=as.numeric(as.factor(get(variable))),
x=ifelse(is.na(get(val)),1,as.numeric(get(val))),
dimnames=list(levels(as.factor(get(id))),
levels(as.factor(get(variable))))))
}
return(x_sparse)
}
univar_analysis_mixed<-function(id,grp,X,data_type,pretty=F){
if(ncol(X)!=length(data_type)){
stop("data types of X need to be specified")
}
#TODO: when there is only 1 category
# anova
df_num<-data.frame(cbind(id,grp,X[,(data_type=="num"),drop=F]),stringsAsFactors=F) %>%
gather(var,val,-grp,-id) %>%
mutate(grp=as.factor(grp)) %>%
mutate(val=as.numeric(val))
out_num<-df_num %>%
group_by(var,grp) %>%
dplyr::summarise(n=length(unique(id)),
val_miss=sum(is.na(val)),
val_mean=mean(val,na.rm=T),
val_sd=sd(val,na.rm=T),
val_med=median(val,na.rm=T),
val_q1=quantile(val,0.25,na.rm=T),
val_q3=quantile(val,0.75,na.rm=T),
val_min=min(val,na.rm=T),
val_max=max(val,na.rm=T)) %>%
ungroup %>%
left_join(df_num %>%
nest(-var) %>%
mutate(fit=map(data, ~ aov(val~grp,data=.x)),
tidied=map(fit,tidy)) %>%
unnest(tidied) %>%
filter(!is.na(p.value)) %>%
select(var,p.value),
by="var") %>%
mutate(label=paste0(n,"; ",
# round(val_miss/n,2),"; ", #missing rate
round(val_mean,2),"(",round(val_sd,3),"); ",
val_med,"(",val_q1,",",val_q3,")"))
# chi-sq
df_cat<-data.frame(cbind(id,grp,X[,(data_type=="cat")]),stringsAsFactors=F) %>%
gather(var,val,-grp,-id) %>%
mutate(grp=as.factor(grp),val=as.factor(val))
out_cat<-df_cat %>%
group_by(grp) %>%
dplyr::mutate(tot=length(unique(id))) %>%
ungroup %>%
group_by(var) %>%
dplyr::mutate(val_miss=sum(is.na(val))) %>% #
ungroup %>% filter(!is.na(val)) %>%
group_by(var,grp,tot,val_miss,val) %>%
dplyr::summarise(n=length(unique(id))) %>%
ungroup %>%
mutate(prop=round(n/tot,4)) %>%
left_join(df_cat %>%
group_by(var) %>%
dplyr::summarise(p.value=chisq.test(val,grp,simulate.p.value=T)$p.value) %>%
ungroup,
by="var") %>%
mutate(label=paste0(n,"; ",
# round(val_miss/n,2),"; ", #missing rate
"(",prop*100,"%)"))
#output
if(pretty){
out<-out_num %>%
select(n,grp) %>% unique %>%
gather(var,val,-grp) %>%
mutate(val=as.character(val)) %>%
spread(grp,val) %>%
bind_rows(out_num %>%
mutate(label2=paste0(round(val_mean,1)," (",round(val_sd,1),")"," [",round(val_miss/n,2),"]")) %>%
dplyr::select(var,grp,p.value,label2) %>% spread(grp,label2)) %>%
bind_rows(out_cat %>%
unite("var",c("var","val"),sep="=") %>%
mutate(label2=paste0(n," (",round(prop*100,1),"%)"," [",round(val_miss/tot,2),"]")) %>%
dplyr::select(var,grp,p.value,label2) %>% spread(grp,label2)) %>%
mutate(p.value=round(p.value,4)) %>%
separate("var",c("var","cat"),sep="=",extra="merge",fill="right") %>%
mutate(cat=case_when(var=="n" ~ "",
is.na(cat) ~ "mean(sd) [miss]",
TRUE ~ paste0(cat,",n(%) [miss]")))
}else{
out<-list(out_num=out_num,
out_cat=out_cat)
}
return(out)
}
get_perf_summ<-function(pred,real,keep_all_cutoffs=F){
# various performace table
pred_obj<-ROCR::prediction(pred,real)
prc<-performance(pred_obj,"prec","rec")
roc<-performance(pred_obj,"sens","spec")
nppv<-performance(pred_obj,"ppv","npv")
pcfall<-performance(pred_obj,"pcfall")
acc<-performance(pred_obj,"acc")
fscore<-performance(pred_obj,"f")
mcc<-performance(pred_obj,"phi")
perf_at<-data.frame(cutoff=prc@alpha.values[[1]],
prec=prc@y.values[[1]],
rec_sens=prc@x.values[[1]],
stringsAsFactors = F) %>%
arrange(cutoff) %>%
left_join(data.frame(cutoff=nppv@alpha.values[[1]],
ppv=nppv@y.values[[1]],
npv=nppv@x.values[[1]],
stringsAsFactors = F),
by="cutoff") %>%
dplyr::mutate(prec_rec_dist=abs(prec-rec_sens)) %>%
left_join(data.frame(cutoff=fscore@x.values[[1]],
fscore=fscore@y.values[[1]],
stringsAsFactors = F),
by="cutoff") %>%
left_join(data.frame(cutoff=roc@alpha.values[[1]],
spec=roc@x.values[[1]],
stringsAsFactors = F),
by="cutoff") %>%
dplyr::mutate(Euclid_meas=sqrt((1-rec_sens)^2+(0-(1-spec))^2),
Youden_meas=rec_sens+spec-1) %>%
left_join(data.frame(cutoff=pcfall@x.values[[1]],
pcfall=pcfall@y.values[[1]],
stringsAsFactors = F),
by="cutoff") %>%
left_join(data.frame(cutoff=acc@x.values[[1]],
acc=acc@y.values[[1]],
stringsAsFactors = F),
by="cutoff") %>%
left_join(data.frame(cutoff=mcc@x.values[[1]],
mcc=mcc@y.values[[1]],
stringsAsFactors = F),
by="cutoff") %>%
filter(prec > 0 & rec_sens > 0 & spec > 0) %>%
group_by(cutoff) %>%
dplyr::mutate(size=n()) %>%
ungroup
# performance summary
lab1<-pred[real==1]
lab0<-pred[real==0]
pr<-pr.curve(scores.class0 = lab1,
scores.class1 = lab0,curve=F)
roc_ci<-pROC::ci.auc(real,pred)
perf_summ<-data.frame(overall_meas=c("roauc_low",
"roauc",
"roauc_up",
"opt_thresh",
"opt_sens",
"opt_spec",
"opt_ppv",
"opt_npv",
"prauc1",
"prauc2",
"opt_prec",
"opt_rec",
"opt_fscore"),
meas_val=c(roc_ci[[1]],
roc_ci[[2]],
roc_ci[[3]],
perf_at$cutoff[which.min(perf_at$Euclid_meas)],
perf_at$rec_sens[which.min(perf_at$Euclid_meas)],
perf_at$spec[which.min(perf_at$Euclid_meas)],
perf_at$ppv[which.min(perf_at$Euclid_meas)],
perf_at$npv[which.min(perf_at$Euclid_meas)],
pr$auc.integral,
pr$auc.davis.goadrich,
perf_at$prec[which.min(perf_at$prec_rec_dist)],
perf_at$rec_sens[which.min(perf_at$prec_rec_dist)],
perf_at$fscore[which.min(perf_at$prec_rec_dist)]),
stringsAsFactors = F) %>%
bind_rows(perf_at %>%
dplyr::summarize(prec_m=mean(prec,na.rm=T),
sens_m=mean(rec_sens,na.rm=T),
spec_m=mean(spec,na.rm=T),
ppv_m=mean(ppv,na.rm=T),
npv_m=mean(npv,na.rm=T),
acc_m=mean(acc,na.rm=T),
fscore_m=mean(fscore,na.rm=T),
mcc_m=mean(mcc,na.rm=T)) %>%
gather(overall_meas,meas_val))
out<-list(perf_summ=perf_summ)
if(keep_all_cutoffs){
out$perf_at<-perf_at
}
return(out)
}
get_calibr<-function(pred,real,n_bin=20){
calib<-data.frame(pred=pred,
y=real) %>%
arrange(pred) %>%
dplyr::mutate(pred_bin = cut(pred,
breaks=unique(quantile(pred,0:(n_bin)/(n_bin))),
include.lowest=T,
labels=F)) %>%
ungroup %>% group_by(pred_bin) %>%
dplyr::summarize(expos=n(),
bin_lower=min(pred),
bin_upper=max(pred),
bin_mid=median(pred),
y_agg = sum(y),
pred_p = mean(pred)) %>%
dplyr::mutate(y_p=y_agg/expos) %>%
dplyr::mutate(binCI_lower = pmax(0,pred_p-1.96*sqrt(y_p*(1-y_p)/expos)),
binCI_upper = pred_p+1.96*sqrt(y_p*(1-y_p)/expos))
return(calib)
} |
10426a439a7fbc55eaff3316176c307979cb2f4f | 4b0626f4fa41179bdc61eb6ffccc4e487cc92c8b | /functions/read_county_data.R | 8d067b547f708eeb4658105cf4c3fd45f1bb3c31 | [] | no_license | awolff1110/thesis | 5a505c1e9bb45d47ff14b7eda335eeb370d2f6e8 | fc43ca4d9f84eebe28ba068d582e87f1c6ee0abc | refs/heads/master | 2020-03-11T12:10:58.268643 | 2018-04-21T22:22:18 | 2018-04-21T22:22:18 | 129,990,147 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,071 | r | read_county_data.R | ## read county data
## thesis
##
## andrew wolff
## 6 april 2018
read_county_data <- function(filename, YEAR, crime_list){
crime_totals <-
read.table(filename, sep = "|", colClasses = "character") %>%
mutate(year = YEAR,
state = substr(V1, crime_list[[1]][1], crime_list[[1]][2]),
county1 = substr(V1, crime_list[[3]][1], crime_list[[3]][2]),
murder = substr(V1, crime_list[[4]][1], crime_list[[4]][2]),
rape = substr(V1, crime_list[[5]][1], crime_list[[5]][2]),
robbery = substr(V1, crime_list[[6]][1], crime_list[[6]][2]),
assault = substr(V1, crime_list[[7]][1], crime_list[[7]][2]),
burglary = substr(V1, crime_list[[8]][1], crime_list[[8]][2]),
all = substr(V1, crime_list[[9]][1], crime_list[[9]][2]),
murder = as.numeric(murder),
rape = as.numeric(rape),
robbery = as.numeric(robbery),
assault = as.numeric(assault),
burglary = as.numeric(burglary),
all = as.numeric(all)) %>%
select(-V1)
}
|
a89277bc41eaf2c013f702bd35802932e5703ffa | 1455df4c711d01ffb2f92a0141e541c8650068a7 | /man/golubMerge.Rd | 035e301c157fa7393340109a663e193922ad443f | [] | no_license | cran/CAMAN | f3b9528fdb3c9bdbb68493e1b76b97340b4ebbb9 | 1321cb8e6fcc69c38eb403b1a3882ba875414559 | refs/heads/master | 2023-04-13T23:43:11.142110 | 2023-04-10T22:50:12 | 2023-04-10T22:50:12 | 17,678,211 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,036 | rd | golubMerge.Rd | \name{golubMerge}
\alias{golubMerge}
\alias{golubMerge.exprs}
\alias{sample.labels}
\title{Data from the Golub et al (1999) Paper}
\usage{
data("golubMerge")
}
\description{
The data are from Golub et al. These are the combined training samples and test samples. There are 47 patients with acute lymphoblastic leukemia (ALL) and 25 patients with acute myeloid leukemia (AML). The samples were assayed using Affymetrix Hgu6800 chips and data on the expression of 7129 genes (Affymetrix probes) are available. The data were obtained from the Web site listed below and transformed slightly. Two objects are in the workspace when the data is loaded: \code{golubMerge.exprs} and \code{sample.labels}.
}
\format{
A matrix with 7129 rows (for the genes) and 72 columns (for the patients).
}
\note{The data also appear in the Bioconductor package \code{golubEsets} in a different format. See Schlattmann (2009) for details on how to handle this type of data.}
\examples{
\dontrun{
## microarray analysis example
data(golubMerge)
idxALL <- which(sample.labels== "ALL")
idxAML <- which(sample.labels == "AML")
pvals <- apply(golubMerge.exprs, 1, function(x){t.test(x[idxAML],x[idxALL])[[3]]})
zvals <- qnorm(1-pvals)
hist(zvals,100)
### Z-values are gaussian distributed, mix identifies a mixture of gaussians.
mix <- mixalg(obs=zvals, family="gaussian", startk=25)
hist(mix)
### get False discovery rate (Not-differential expressed genes are in component 1)
getFDR(mix, threshold=.4)
}
}
\source{This data is a variant of the data from the Bioconductor \code{golubEsets} package.}
\references{
Molecular Classification of Cancer: Class Discovery and Class Prediction by Gene Expression Monitoring, Science, 531-537, 1999, T. R. Golub and D. K. Slonim and P. Tamayo and C. Huard and M. Gaasenbeek and J. P. Mesirov and H. Coller and M.L. Loh and J. R. Downing and M. A. Caligiuri and C. D. Bloomfield and E. S. Lander
Schlattmann, P. (2009). \emph{Medical Applications of Finite Mixture Models.} Berlin: Springer.
}
\keyword{datasets}
|
c488db385ea89f86057b15e9fe21cac30c061751 | 74491189b5a4da2a690531146e518523953f3758 | /fbanalyze.r | cc7cefbc074a7501340a6d5a2bba204a5f4b95c4 | [] | no_license | aserlich/frenchBur | 1bb14bc0f9a367ef61465cff0aba7516aa21b4ca | 96de70d753dce0a5751c37308f6e0f2c5de9614f | refs/heads/master | 2020-05-17T00:33:37.282836 | 2012-08-27T05:00:04 | 2012-08-27T05:00:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 102 | r | fbanalyze.r | levs <- read.csv("/Volumes/Optibay-1TB/FrenchBur/2011/output/frenchBurOrgs20120806V2.csv", header = T) |
fcade72725d66bd76f45d80ba816cf66a44edb15 | 77c20c0ec64d281936d94bd87b5791fcbab705be | /R/make_model.R | dd7cea62d29ee898ae566da09cbba23bf7b01660 | [] | no_license | singmann/cmmc | 2130dcbc2ec3bd70b8cc5ad8fb4a2cfac30ca986 | f6a46a5f5cce72695d0a7c4679dd63b397d22aa8 | refs/heads/master | 2016-09-06T10:40:05.597265 | 2014-05-28T09:18:59 | 2014-05-28T09:18:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,156 | r | make_model.R |
make_model <- function(model, restrictions = NULL, bounds = NULL, starting_values = NULL, is_file = FALSE, is_file_restrictions = FALSE) {
# read model:
model_list <- make_model_list(model)
# parameters that will be dsiplayed in the output:
parameters_show <- model_list[["parameter"]]
# handle restrictions:
if (!is.null(restrictions)) {
NULL # restriction handling here
# make new model_list
}
## local variables for easy programming:
n_parameter <- length(model_list[["parameter"]])
n_item_type <- vapply(model_list[["model_list"]], length, 0)
model_environment <- new.env()
#browser()
#assign("model_list", model_list[["model_list"]], envir=model_environment)
assign("unlist_model_list", unlist(model_list[["model_list"]]), envir=model_environment)
assign("parameter", model_list[["parameter"]], envir=model_environment)
assign("length_parameter", length(model_list[["parameter"]]), envir=model_environment)
assign("n_item_type", n_item_type, envir=model_environment)
assign("data", rep(1, sum(n_item_type))/rep(n_item_type, times = n_item_type), envir=model_environment)
for (d in seq_along(model_environment[["data"]])) assign(paste("cmmc_data.", d, sep = ""), model_environment[["data"]][d], envir = model_environment)
#ls.str(envir = model_environment)
# make functions (prediction, likelihood, ...)
predict <- predict_model(model_environment)
objective <- llk_model(model_environment)
likelihood <- tryCatch(make.llk.function(model_list[["model_list"]]), error = function(e) {warning("likelihood function cannot be build, please report example."); NULL})
assign("llk.gradient", tryCatch(make.llk.gradient(likelihood, model_list[["parameter"]]), error = function(e) {message("gradient function cannot be build (probably derivation failure, see ?D)\n Only numerical gradient available."); NULL}), envir=model_environment)
assign("llk.hessian", tryCatch(make.llk.hessian(likelihood, model_list[["parameter"]]), error = function(e) {message("Hessian function cannot be build (probably derivation failure, see ?D)\n Only numerical Hessian available."); NULL}), envir=model_environment)
gradient <- if (!is.null(model_environment[["llk.gradient"]])) gradient_model(model_environment) else NULL
hessian <- if (!is.null(model_environment[["llk.hessian"]])) hessian_model(model_environment) else NULL
# create bounds:
if (is.null(bounds)) {
bounds <- list(
lower_bound = rep(0, n_parameter),
upper_bound = rep(1, n_parameter)
)
}
if (is.null(starting_values)) {
starting_values <- list(
start_lower = rep(0.1, n_parameter),
start_upper = rep(0.9, n_parameter)
)
}
# return CmmcMod object:
new("CmmcMod",
predict = compiler::cmpfun(predict),
objective = compiler::cmpfun(objective),
gradient = compiler::cmpfun(gradient),
hessian = compiler::cmpfun(hessian),
model_environment = model_environment,
model = model_list,
bounds = c(bounds, starting_values),
parameters_show = parameters_show,
restrictions = NULL
)}
# makes model element of CmmcMod
make_model_list <- function(model) {
model_list <- read_model(model)
parameters <- unique(sort(unique(unlist(lapply(unlist(model_list), all.vars)))))
c(parameter = list(parameters), model_list = list(model_list))
}
# read_model() reads a model (as text),
# splits the string into characters for each row,
# and then parses it into a list of code elements.
read_model <- function(model) {
whole <- strsplit(model, "[\n\r]")[[1]] # split character string into single lines.
whole <- gsub("#.*", "", whole) # remove comments
model <- vector("list", length(whole))
c2 <- 1
c3 <- 1
s.flag <- FALSE
for (c1 in 1:length(whole)) {
if (!(grepl("^[[:space:]]*$", whole[c1]))) { # if empty line, use next list
s.flag <- TRUE
model[[c2]][c3] <- parse(text = whole[c1])[1]
c3 <- c3 + 1
fin <- c2
}
else {
if (s.flag == TRUE) c2 <- c2 + 1
c3 <- 1
s.flag <- FALSE
}
}
model[1:fin]
} |
9d681fea5c6a1a793e6d57257da2ce86149b65ea | bec62bf79c3bd19b3fc9d97be84ac102aa0a606e | /scripts/ballgown.R | 7d01939b8e30b15c360c34d2f339c46b9c11821d | [] | no_license | svsuresh/tuxedo2_snakemake | b759c5e6b06db472ef2e6454665721eb359d8e64 | e2356317d5ee3c2b1c545bea23ba78ce790d3ec4 | refs/heads/master | 2020-03-19T11:17:09.837104 | 2018-01-31T11:18:26 | 2018-01-31T11:18:26 | 136,444,395 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,113 | r | ballgown.R | # for ballgown analysis
library(ballgown)
# for plot labeling
library(calibrate)
# for manipulating file names/strings
library(stringr)
# make the ballgown object:
bg = ballgown (dataDir="./results/stringtie/" ,samplePattern="hcc", meas='all')
## where did you merge from
bg@dirs
## when did you merge?
bg@mergedDate
## what did you import?
bg@meas
## ----getexpr-----
transcript_fpkm = texpr(bg, 'FPKM')
transcript_cov = texpr(bg, 'cov')
whole_tx_table = texpr(bg, 'all')
exon_mcov = eexpr(bg, 'mcov')
junction_rcount = iexpr(bg)
whole_intron_table = iexpr(bg, 'all')
gene_expression = gexpr(bg)
## ----struct-------
structure(bg)$exon
structure(bg)$intron
structure(bg)$trans
exon_transcript_table = indexes(bg)$e2t
transcript_gene_table = indexes(bg)$t2g
# View(head(transcript_gene_table))
# View(head(exon_transcript_table))
# how many exons are there?
length(rownames(exon_transcript_table))
# how many transcripts are there?
length(rownames(transcript_gene_table))
# how many genes are there?
length(unique(transcript_gene_table[,"g_id"])) #Unique Gene count
### transcript stats
## plot average transcript length
hist(whole_tx_table$length, breaks=50, xlab="Transcript length (bp)", main="Distribution of transcript lengths", col="steelblue")
# how many transcripts are there per gene? count the number of genes and count the number of transcripts pere gene and plot it.
counts=table(transcript_gene_table[,"g_id"])
# View(counts)
## some interesting stats
# genes with one transcript
c_one = length(which(counts == 1))
# genes with more than one transcript
c_more_than_one = length(which(counts > 1))
# what is the maximum number of transcript per gene
c_max = max(counts)
# plot above data
hist(counts, breaks=50, col="bisque4", xlab="Transcripts per gene", main="Distribution of transcript count per gene")
# add legend
legend_text = c(paste("Genes with one transcript =", c_one), paste("Genes with more than one transcript =", c_more_than_one), paste("Max transcripts for single gene = ", c_max))
legend("topright", legend_text)
## extract gene names and transcript names
gene_names=data.frame(SYMBOL=unique(rownames(gene_expression)))
#View(gene_names)
t_names=unique(whole_tx_table[,c(1,6)])
#View(whole_tx_table)
## sample meta data---
phenotype_table= data.frame(id=sampleNames(bg), group=rep(c("normal","tumor"), each=2))
pData(bg) =phenotype_table
phenotype_table
## plotTranscripts for a gene, for a sample
plotTranscripts(gene='NCF4', bg, samples='hcc1395_normal_rep1', meas='FPKM', colorby='transcript', main='transcripts from gene NCF4: hcc1395_normal_rep1, FPKM')
## plotTranscripts for a gene, for 3 samples
plotTranscripts('NCF4', bg, samples=c('hcc1395_normal_rep1', 'hcc1395_normal_rep2', 'hcc1395_tumor_rep1', 'hcc1395_tumor_rep2'), meas='FPKM', colorby='transcript')
## plot transcript means for all the samples,
plotMeans('NCF4', bg, groupvar='group', meas='FPKM', colorby='transcript')
### boxplot with and without log transformation
par(mfrow=c(1,2))
boxplot(gene_expression, col=rainbow(4), las=2, ylab="log2(FPKM)", main="Distribution of FPKMs for all 6 samples")
boxplot(log2(gene_expression+1), col=rainbow(6), las=2, ylab="log2(FPKM)", main="log transformed distribution of FPKMs for all 6 samples")
# dev.off()
## differential transcript expression
results_txns = stattest(bg, feature='transcript', getFC = T, covariate='group',meas='FPKM' )
# Extract transcript names
t.ids=whole_tx_table[,c(1,6)]
head(results_txns)
# merge transcript results with transcript names
results_txns_merged = merge(results_txns,t.ids,by.x=c("id"),by.y=c("t_id"))
head(results_txns_merged)
# Differential gene expression
results_genes = stattest(bg, feature="gene", covariate="group", getFC=TRUE, meas="FPKM")
#View(head(results_genes))
## histogram of diffrentially expressed genes
# Log fold changes and store it in logfc column
results_genes[,"logfc"] = log2(results_genes[,"fc"])
# Fitler results by significant pvalue
sig=which(results_genes$qval<0.05)
#View(sig)
## View(results_genes[sig,])
# draw histogram
hist(results_genes[sig,"logfc"], breaks=50, col="seagreen", xlab="log2(Fold change) Tumor vs Normal", main="Distribution of differential expression values")
# Add vertical cut offs
abline(v=c(-2,2), col="black", lwd=2, lty=2)
# Add legend
legend("topright", "Fold change >2 and <-2", lwd=2, lty=2)
## correlation plot between tumor and normal samples. Average expression of Normal samples Vs Average expression of Tumor samples
# Convert the matrix to data
gene_expression=as.data.frame(gene_expression)
## View(gene_expression)
# create normal means column
gene_expression$normal=rowMeans(gene_expression[,c(1:2)])
# create tumor means column
gene_expression$tumor=rowMeans(gene_expression[,c(3:4)])
#write.table(gene_expression, "gene_expression.txt", sep="\t")
# to avoid log 0, add 1 to log values. FPKM values are not normalized
x=log2(gene_expression[,"normal"]+1)
y=log2(gene_expression[,"tumor"]+1)
plot(x=x, y=y, pch=1, cex=2, xlab="Normal FPKM (log2)", ylab="Tumor (log2)", main="Tumor vs Normal FPKMs")
abline(a=0, b=1)
# qval significance
qsig=which(results_genes$qval<0.05)
xqsig=x[qsig]
yqsig=y[qsig]
points(x=xqsig, y=yqsig, col="green", pch=19, cex=2)
## fold change signiificance
fsig=which(abs(results_genes$logfc)>4)
xfsig=x[fsig]
yfsig=y[fsig]
points(x=xfsig, y=yfsig, col="red", pch=1, cex=2)
## legend
legend_text = c("Significant by Q value", "Significant by Fold change")
legend("topright", legend_text,bty="n",pch = c(19,19), col=c("green","red"))
# label the significant genes
textxy(xfsig,yfsig, cex=0.8, labs=row.names(gene_expression[fsig,]))
# add red line through 0
abline(v=0, col="red", lwd=3)
# add red line through fold change 4 (log2,2)
abline(v=c(4,-4), col="red", lwd=3)
abline(h=c(-4,4), col="red",lwd=3)
## volcano plot
# filter by log fold change by 16 fold
fc_sig_results_genes=which(abs(results_genes$logfc)>4)
# Extract genes with fold change by 16 fold
fc_sig_results_genes_plot=results_genes[fc_sig_results_genes,]
# plot
plot(results_genes$logfc,results_genes$qval, col="steelblue", pch=1)
#abline
abline(v=c(2,-2), col="red", lwd=3)
abline(h=0.05, col="red",lwd=3)
# highlight the genes with color
points(fc_sig_results_genes_plot$logfc,fc_sig_results_genes_plot$qval, col="green", pch=16)
# label the significant genes
textxy(fc_sig_results_genes_plot$logfc,fc_sig_results_genes_plot$qval, labs=fc_sig_results_genes_plot$id, cex=1.2)
## density plot of differentially expressed genes
colors = colorRampPalette(c("white", "blue","red","green","yellow"))
par(mfrow=c(1,2))
plot(x,y, main="Scatter plot of DEGs")
smoothScatter(x,y, colramp = colors, main="Density plot of DEGs")
## write the results to the file
# Filter results_genes by p-value significance
sigpi = which(results_genes[,"pval"]<0.05)
# Extract p-significant genes in a separate object
sigp = results_genes[sigpi,]
## View(sigp)
# filter fc significant genes from p significant genes in a separate object
sigde = which(abs(sigp[,"logfc"]) >= 2)
# extract fc significant genes from p significant genes in a separate object
sig_tn_de = sigp[sigde,]
#View(sig_tn_de)
# Order by q value, followed by differential expression
o = order(sig_tn_de[,"qval"], -abs(sig_tn_de[,"logfc"]), decreasing=FALSE)
# write output to local disc with columns of desired output
output = sig_tn_de[o,c("id","fc","pval","qval","logfc")]
write.table(output, file="./results/ballgown/SigDE.txt", sep="\t", row.names=FALSE, quote=FALSE)
#View(gene_expression)
## heatmap
# Extract gene expression values using significant genes
dim(sig_tn_de)
#View(sig_tn_de)
dim(gene_expression)
# View(gene_expression)
length(sig_tn_de$id)
sig_gene_expression=gene_expression[rownames(gene_expression) %in% sig_tn_de$id,]
dim(sig_gene_expression)
#View(sig_gene_expression)
#remove tumor and normal columns
sig_gene_expression=sig_gene_expression[,-c(5:6)]
phenotype_table
# for pheatmap function, column names and row names of data and pdata mush be identical
# change the row names
rownames(phenotype_table)=phenotype_table[,1]
# remove the id column
phenotype_table=subset(phenotype_table, select = -c(id) )
# change the colnames to match with the sample names
colnames(sig_gene_expression)=row.names(phenotype_table)
# draw heatmap
library(pheatmap)
pheatmap(as.matrix(sig_gene_expression), scale = "row", clustering_distance_rows = "correlation", clustering_method = "complete",annotation_col = phenotype_table , main="Significant genes",fontsize_col=14, fontsize_row = 6 ,color = c("green","red"))
## Draw PCA plot
# transpose the data and compute principal components
pca_data=prcomp(t(sig_gene_expression))
# Calculate PCA component percentages
pca_data_perc=round(100*pca_data$sdev^2/sum(pca_data$sdev^2),1)
print(pca_data_perc)
# Extract 1 and 2 principle components and create a data frame with sample names, first and second principal components and group information
df_pca_data = data.frame(PC1 = pca_data$x[,1], PC2 = pca_data$x[,2], sample = colnames(sig_gene_expression), condition = rep(c("Normal","Tumor"),each=2))
## View(df_pca_data)
## use ggplot package to draw
# color by sample
library(ggplot2)
library(ggrepel)
ggplot(df_pca_data, aes(PC1,PC2, color = sample))+
geom_point(size=8)+
labs(x=paste0("PC1 (",pca_data_perc[1],")"), y=paste0("PC2 (",pca_data_perc[2],")"))
# color by condition/group
ggplot(df_pca_data, aes(PC1,PC2, color = condition))+
geom_point(size=8)+
labs(x=paste0("PC1 (",pca_data_perc[1],")"), y=paste0("PC2 (",pca_data_perc[2],")"))+
geom_text_repel(aes(label=sample),point.padding = 0.75)
# calculate the variance by each pc. Formula is variance/overall variance. Multiply this by 100 and round the result to 2 points
## gene wise pca
temp_data=prcomp(sig_gene_expression)
temp_data_df=data.frame(x=temp_data$x[,1], y=temp_data$x[,2])
ggplot(temp_data_df, aes(x,y))+geom_point()+geom_text(label=rownames(temp_data_df))
# save the workplace
# save.image("bg_08012018.rda")
# load("~/example_data/practice_rnaseq_data/ballgown_scripts/bg_08012018.rda")
|
157e831f5705947bccef03a1d49ce20154d24221 | a2766cb940f8f44aa175dad867f4abe1c1b96483 | /scripts/mhplots.R | 64fada4afe5fded9b09b7cb389a18eddd899599f | [] | no_license | yilmazbah/rep-cookbook | 9cb0b3ab671b708dd5d7cd6e740a4735be84de67 | b3b0b0872ab8c2dd1394edbd8ee2794fdb0dbc1c | refs/heads/master | 2022-11-23T19:25:35.486482 | 2020-07-13T08:29:31 | 2020-07-13T08:29:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,609 | r | mhplots.R | ##########################
# MGWAS - Manhattan Plots
##########################
# Create MHplots for specific analysis (RNT, LOG, HB)
# Import required libraries
require(data.table) # For efficient data handling
library(qqman) # Faor Manhattan plots
library(ggbio)
require(biovizBase)
require(GenomicRanges)
require(dplyr)
library(gdata)
#Import data and create tables
rnt <- read.table("RNT.pvalues", header=FALSE)
colnames(rnt) <- c("CHR", "BP", "P", "TAXA")
rnt$SNP <- paste(rnt$CHR, rnt$BP, sep=":")
rnt$source <- c("rnt")
log <- read.table("LOG.pvalues", header=FALSE)
colnames(log) <- c("CHR", "BP", "P", "TAXA")
log$SNP <- paste(log$CHR, log$BP, sep=":")
hb <- read.table("HB.pvalues", header=FALSE)
colnames(hb) <- c("CHR", "BP", "P", "TAXA")
hb$SNP <- paste(hb$CHR, hb$BP, sep=":")
hb$source <- c("hb")
#Merge the three datasets for estimating x-axis sizes
all <- rbind(rnt, hb)
#Create object to plot
gwas <- all %>%
# Compute chromosome size
group_by(CHR) %>%
summarise(chr_len=max(BP)) %>%
# Calculate cumulative position of each chromosome
mutate(tot=cumsum(chr_len)-chr_len) %>%
select(-chr_len) %>%
# Add this info to the initial dataset
left_join(all, ., by=c("CHR"="CHR")) %>%
# Add a cumulative position of each SNP
arrange(CHR, BP) %>%
mutate( BPcum=BP+tot)
axisdf = gwas %>% group_by(CHR) %>% summarize(center=( max(BPcum) + min(BPcum) ) / 2 )
#Plot hits
ggplot(data=gwas) +
# Show all points
geom_point(data=subset(gwas, gwas$P < 0.000000000159), aes(x=BPcum, y=-log10(P), color=as.factor(TAXA)), alpha=1, size=1.8) +
scale_color_manual(values=col) +
geom_point(data=subset(gwas, gwas$P > 0.000000000159), aes(x=BPcum, y=-log10(P)), color="grey85", alpha=1, size=1.8) +
geom_point(data=subset(gwas, gwas$P > 0.000000000159 & gwas$CHR %in% seq(1,22,2)), aes(x=BPcum, y=-log10(P)), color="grey75", alpha=1, size=1.8) +
# custom X axis:
scale_x_continuous(expand = c(0, 0), label = axisdf$CHR, breaks= axisdf$center ) +
scale_y_continuous(expand = c(0, 0), limits=c(5,10), breaks=c(5,6,7,8,9,10)) + # remove space between plot area and x axis
geom_hline(yintercept=9.798603, linetype="dashed", size=1, color = "red") +
geom_hline(yintercept=7.60206, linetype="dashed", size=1, color = "black") +
#Axis legends
xlab("Chromosome") + ylab("-log10(P value)") +
# Custom the theme:
theme_bw() +
theme(
legend.position="none",
panel.border = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
axis.line = element_line(colour = "black", size = 1, linetype = "solid"),
axis.title.x = element_text(size=16, face="bold"),
axis.title.y = element_text(size=16, face="bold"),
axis.text.x = element_text(size=14),
axis.text.y = element_text(size=14)
)
dev.off()
#meta-Analysis
metaanalysis <- read.table("snps_metaanalysis.txt", header=TRUE)
metaanalysis$SNP <- paste(metaanalysis$chr, metaanalysis$pos, sep=':')
metaanalysis$SNPTAX <- paste(metaanalysis$SNP, metaanalysis$tax, sep=':')
colnames(metaanalysis) <- c("TAXA", "METHOD", "CHR", "BP", "P", "SNP", "SNPTAX")
metaanalysis$BPcum <- metaanalysis$SNPTAX
metaanalysisMP <- merge(metaanalysis, ALL, by="SNPTAX")
svg("METAANALYSIS.svg", width = 14, height = 4)
ggplot(data=metaanalysis) +
# Show all points
geom_point(aes(x=BPcum, y=-log10(em_P_value), color=COLOR), alpha=0.8, size=2) +
# custom X axis:
scale_x_continuous(expand = c(0, 0), label = axisdf$CHR, breaks= axisdf$center ) +
scale_y_continuous(expand = c(0, 0), limits=c(0,14), breaks=c(2,4,6,8,10,12,14)) + # remove space between plot area and x axis
geom_hline(yintercept=9.798603, linetype="dashed", size=1, color = "black") +
geom_hline(yintercept=7.60206, linetype="dashed", size=1, color = "gray") +
geom_hline(yintercept=5, linetype="dashed", size=1, color = "red") +
#Axis legends
xlab("Chromosome") + ylab("-log10(P)") +
# Custom the theme:
theme_bw() +
theme(
legend.position="none",
panel.border = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
axis.line = element_line(colour = "black", size = 1, linetype = "solid"),
axis.title.x = element_text(size=16, face="bold"),
axis.title.y = element_text(size=16, face="bold"),
axis.text.x = element_text(size=14),
axis.text.y = element_text(size=14)
)
dev.off()
|
b28d7c0dac65580cd3e243dc8321987b70371bc1 | 041f22b6419ced643846676cfdc956405a041078 | /tdfAuto/man/tdfAuto_update.Rd | f9e7c84f532d0f9dd17791de5f935155f2e48606 | [] | no_license | SimonLyons/TDF_Predict | e308d821ad7b77f9b94b010e7391f6293ce02474 | c30c1f32d36863ced0479fffdbc4b0e6408e6bc9 | refs/heads/master | 2021-01-09T06:36:19.712069 | 2017-12-04T19:32:02 | 2017-12-04T19:32:02 | 81,016,435 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 493 | rd | tdfAuto_update.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/13_tdfAuto_update_function.R
\name{tdfAuto_update}
\alias{tdfAuto_update}
\title{This function will update the documentation and install (locally) the latest
local changes to the tdfAuto package, using devtools.}
\usage{
tdfAuto_update()
}
\value{
Nothing to return
}
\description{
This function will update the documentation and install (locally) the latest
local changes to the tdfAuto package, using devtools.
}
|
a308229a55c9d5494bc596ef4c1d7d0e6638ee73 | 17b797c94f2fc22e16b2cbfb80bb50fc6e79ab18 | /man/dsIsAsync-ArmadilloConnection-method.Rd | 6e2ddc2443247cd2bc2a28d57464408389583544 | [] | no_license | sidohaakma/molgenis-r-datashield | 266cc86fcedb0412b849474c874d02422c7d49b9 | dd99d24daab1b3ca21ee6141f83bf0de555e6ee1 | refs/heads/master | 2022-05-01T10:36:10.779352 | 2021-10-24T20:53:37 | 2021-10-24T20:53:37 | 113,619,266 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,058 | rd | dsIsAsync-ArmadilloConnection-method.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ArmadilloConnection.R
\name{dsIsAsync,ArmadilloConnection-method}
\alias{dsIsAsync,ArmadilloConnection-method}
\title{Armadillo DataShield Service asynchronous support}
\usage{
\S4method{dsIsAsync}{ArmadilloConnection}(conn)
}
\arguments{
\item{conn}{\code{\link{ArmadilloConnection-class}} class object}
}
\value{
The named list of logicals detailing the asynchronicity support.
}
\description{
List of DataSHIELD operations on which Armadillo DataSHIELD Service supports
asynchronicity.
}
\details{
When a \code{\link{DSResult-class}} object is returned on aggregation or
assignment operation, the raw result can be accessed asynchronously,
allowing parallelization of DataSHIELD calls over multpile servers.
The returned named list of logicals will specify if asynchronicity is
supported for:
aggregation operation ('aggregate'),
table assignment operation ('assignTable'),
resource assignment operation ('assignResource')
and expression assignment operation ('assignExpr').
}
|
d0cd1ef1f8bf7518dae173f60d948501c8e38a89 | f38cb25f9b0ae6f1c65e8f612e50d91f3ecde28e | /man/PIT.Rd | d11065831334da40786cf58d2fd6613785e87cdd | [
"MIT"
] | permissive | jbrowell/ProbCast | 5a2894946b8e433a5d43828cba834f6533f9b102 | b2f2311c1f72fc55ef08dac655f469a3545aa234 | refs/heads/master | 2023-07-02T01:30:39.897498 | 2023-06-07T15:02:44 | 2023-06-07T15:02:44 | 143,147,931 | 27 | 8 | NOASSERTION | 2021-11-04T17:12:27 | 2018-08-01T11:38:57 | R | UTF-8 | R | false | true | 806 | rd | PIT.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PIT.R
\name{PIT}
\alias{PIT}
\title{Probability integral transform: S3 Generic Method}
\usage{
PIT(distdata, ...)
}
\arguments{
\item{distdata}{An object defining cumulative distributions functions. Currently supported: \code{MultiQR} and \code{PPD}.}
\item{...}{Additional arguments.}
}
\value{
The Probability integral transform (or its invers) of
data through distributions specified by \code{distdata}.
}
\description{
Probability integral transform: S3 Generic Method
}
\details{
This is an S3 method, see spcific methods \code{\link{PIT.MultiQR}}
and \code{\link{PIT.PPD}} for details on functionality.
}
\author{
Jethro Browell, \email{jethro.browell@strath.ac.uk}; Ciaran Gilbert, \email{ciaran.gilbert@strath.ac.uk}
}
|
adc108d0e31b0a0060b0e0aaa958b0ea6ae877fb | dc0dfacaa2d82b87ea71a9e951ab2716d5459dd7 | /man/baseline.GMM.Rd | 5c1de96615cd61dcad1ac10b428a141655165696 | [] | no_license | navinlabcode/copykat | 7e797eaad48a5a98883024dc0ee2194f9d7010e0 | b795ff793522499f814f6ae282aad1aab790902f | refs/heads/master | 2023-09-05T13:42:47.124206 | 2022-09-23T17:43:44 | 2022-09-23T17:43:44 | 231,153,766 | 158 | 53 | null | 2021-03-05T22:20:36 | 2019-12-31T22:45:07 | R | UTF-8 | R | false | true | 861 | rd | baseline.GMM.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/baseline.GMM.R
\name{baseline.GMM}
\alias{baseline.GMM}
\title{pre-define a group of normal cells with GMM.}
\usage{
baseline.GMM(
CNA.mat,
max.normal = 5,
mu.cut = 0.05,
Nfraq.cut = 0.99,
RE.before = basa,
n.cores = 1
)
}
\arguments{
\item{CNA.mat}{smoothed data matrix; genes in rows; cell names in columns.}
\item{max.normal}{find the first number diploid cells to save efforts.}
\item{mu.cut}{diploid baseline cutoff.}
\item{Nfraq.cut}{minimal fractoins of genomes with CNAs.}
}
\value{
1) predefined diploid cell names; 2) clustering results; 3) inferred baseline.
}
\description{
pre-define a group of normal cells with GMM.
}
\examples{
test.gmm <- baseline.GMM(CNA.mat=smooth.com, max.normal=30, mu.cut=0.05, Nfraq.cut=0.99)
test.gmm.cells <- test.bnc$preN
}
|
f7b97d0cf08df37bd98eda416be159d9227a0a32 | 0caf9fb09bf6c7d1049ed6a17bcaab1f9be26605 | /R/segTraj_Mstep_simultanee.R | fa184ff2c75f8b026eecd18a0cba140aa98f1937 | [] | no_license | rpatin/segclust2d | 5d6dca2c92b26c1fe2ef1f703ac5eb03b34208dc | 43d9225e036f611a36fff19559ceb12b5661049a | refs/heads/master | 2022-09-15T00:17:21.799611 | 2022-09-06T12:46:32 | 2022-09-06T12:46:32 | 81,332,459 | 7 | 1 | null | 2021-09-20T09:33:21 | 2017-02-08T13:25:39 | R | UTF-8 | R | false | false | 1,880 | r | segTraj_Mstep_simultanee.R | # Mstep_simultanee
#' Mstep_simultanee computes the MLE within the EM framework
#' @param x the bivariate signal
#' @param rupt the rupture dataframe
#' @param phi the parameters of the mixture
#' @param tau the K*P matrix containing posterior probabilities of membership
#' to clusters
#' @param sameSigma TRUE if all segment have the same variance
#' @return phi the updated value of the parameters
Mstep_simultanee <- function(x, rupt, tau, phi, sameSigma = TRUE) {
K <- nrow(tau)
P <- ncol(tau)
m <- matrix(nrow = 2, ncol = P)
s <- matrix(nrow = 2, ncol = P)
prop <- matrix(nrow = 1, ncol = P)
Yk <- apply(rupt, 1, FUN = function(y) rowSums(x[, y[1]:y[2]]))
nk <- rupt[, 2] - rupt[, 1] + 1
n <- sum(nk)
#
np <- nk %*% tau
m <- Yk %*% tau / rep(np, each = 2)
if (!sameSigma) {
for (i in 1:2) {
s[i, ] <- colSums(
tau * (vapply(seq_len(P), function(p) {
apply(rupt, 1, FUN = function(y) sum((x[i, y[1]:y[2]] - m[i, p])^2))
}))
)
}
s <- sqrt(s / rep(np, each = 2))
} else {
for (i in 1:2) {
s[i, ] <- rep(
sum(
tau * (vapply(1:P, function(p) {
apply(rupt, 1, FUN = function(y) {
sum((x[i, y[1]:y[2]] - m[i, p])^2)
})
}))
),
P
)
}
s <- sqrt(s / n)
}
# prop = apply(tau,2,sum)/K
# emptyCluster = which(prop==0)
# if(length(emptyCluster)>0){
# prop = pmax(prop, eps)
# prop = prop /sum(prop)
# for (d in emptyCluster){
# m[,d]=rep(0,2)
# s[,d]=rep(1e9,2)
# }
# }
prop <- apply(tau, 2, sum) / K
b <- order(m[1, ])
m <- m[, b]
s <- s[, b]
prop <- prop[b]
phi <- list(mu = m, sigma = s, prop = prop)
invisible(phi)
}
|
5102ca188cc8aef5ced638a367c35ac5e2538866 | 610e6a3922af2e6a3e7da4905a9054aed2bd7e2e | /shiny/utils.R | 94325f31259f2e45dfff647974fc5eca6f423819 | [] | no_license | leylabmpi/ll_computer | 18c934202b177a64c15ef0b2fc3ca5c39b435c7a | cb4425083651cefe50f97b0f9694623893aee522 | refs/heads/master | 2023-06-22T02:59:21.277653 | 2023-06-15T12:54:54 | 2023-06-15T12:54:54 | 251,089,136 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 979 | r | utils.R | format_time = function(x){
x = as.POSIXct(as.POSIXlt(x,tz=Sys.timezone()))
return(x)
}
as.Num = function(x){
x = as.numeric(as.character(x))
return(x)
}
#' sql for recent records in {table}
sql_recent = function(table, time_val=4, units='hours',
data_type=NULL, filesystem=NULL){
sql = "SELECT * FROM {table} WHERE time > DATETIME('now', 'localtime', '-{time} {units}')"
sql = glue::glue(sql, table=table, time=time_val, units=units)
if(!is.null(data_type)){
sql = paste0(sql, glue::glue(" AND data_type == '{d}'", d=data_type))
}
if(!is.null(filesystem)){
sql = paste0(sql, glue::glue(" AND filesystem == '{f}'", f=filesystem))
}
return(sql)
}
#' where are the log files
which_file = function(log_file){
vol_dir = '/Volumes/abt3_projects/databases/server/'
vm_dir = '/ebio/abt3_projects/databases/server/'
F = file.path(vol_dir, log_file)
if(! file.exists(F)){
F = file.path(vm_dir, log_file)
}
return(F)
} |
8b9eef3d11e4bce7f0e4e52589a8e3a17a2d8211 | 413545e22f64a63c0653dfde72a1178736534dc7 | /Code/MM-IRT-I.R | bb6d615bbc90e7e2d30c8adb158e8a493282d5c0 | [] | no_license | PrisonRodeo/MM-git | 9b7fc2a5a465276b325270af589696508cbbaf52 | a4275c93e4c475b858896e3324fe86f74904613e | refs/heads/master | 2021-05-14T01:59:29.166673 | 2018-05-12T14:09:47 | 2018-05-12T14:09:47 | 116,581,238 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,312 | r | MM-IRT-I.R | ###################################################
# Modern Measurement (Spring 2018)
#
# Item Response Models
#
########################################################
# Load packages (install as needed), set options:
library(RCurl)
library(lme4)
library(plm)
library(gtools)
library(plyr)
library(texreg)
library(statmod)
library(psych)
library(ltm)
setwd("~/Dropbox (Personal)/Modern Measurement") # <-- change as necessary...
options(scipen = 6) # bias against scientific notation
options(digits = 2) # show fewer decimal places
#################################
# Simulation... True Rasch data:
N <- 1000
K <- 10
set.seed(7222009)
Rasch1Data <- sim.irt(nvar=K,n=N,low=-3,high=3,
a=1,c=0,d=NULL,mu=0,sd=1,
mod="logistic")
Rasch1<-rasch(Rasch1Data$items)
summary(Rasch1)
pdf("Notes and Slides/Sim1PLM.pdf",6,5)
par(mar=c(4,4,2,2))
plot(Rasch1,main=" ")
dev.off()
# Next, with a discrimination parameter different from 1.0:
set.seed(7222009)
RaschAltData <- sim.irt(nvar=K,n=N,low=-3,high=3,
a=2,c=0,d=NULL,mu=0,sd=1,
mod="logistic")
RaschAlt<-rasch(RaschAltData$items)
summary(RaschAlt)
pdf("Notes and Slides/SimAltPLM.pdf",6,5)
par(mar=c(4,4,2,2))
plot(RaschAlt,main=" ")
dev.off()
# Next, with different discrimination parameters:
set.seed(7222009)
Discrims <- runif(K,0.5,3)
Rasch2Data <- sim.irt(nvar=K,n=N,low=-3,high=3,
a=Discrims,c=0,d=NULL,mu=0,sd=1,
mod="logistic")
Rasch2<-ltm(Rasch2Data$items~z1)
summary(Rasch2)
pdf("Notes and Slides/Sim2PLM.pdf",6,5)
par(mar=c(4,4,2,2))
plot(Rasch2,main=" ")
dev.off()
pdf("Notes and Slides/Sim2Discrims.pdf",6,5)
par(mar=c(4,4,2,2))
plot(Discrims,Rasch2$coefficients[1:K,2],pch="",
xlab="Discrimination Parameters",xlim=c(0.5,3),
ylab="Estimates",ylim=c(0.5,3))
text(Discrims,Rasch2$coefficients[1:K,2],
labels=colnames(Rasch2Data$items))
abline(a=0,b=1)
dev.off()
# 3PLM with different "guessing parameters" for each
# item:
#
# Note: Boost the N...
N <- 10000
set.seed(7222009)
GuessThresh <- round(rbeta(K,1,8),digits=1)
Rasch3Data <- sim.irt(nvar=K,n=N,low=-3,high=3,
a=1,c=GuessThresh,d=NULL,mu=0,sd=1,
mod="logistic")
Rasch3 <- tpm(Rasch3Data$items)
summary(Rasch3)
pdf("Notes and Slides/Sim3PLM.pdf",6,5)
par(mar=c(4,4,2,2))
plot(Rasch3,main=" ")
dev.off()
pdf("Notes and Slides/Sim3GandDs.pdf",8,5)
par(mar=c(4,4,2,2))
par(mfrow=c(1,2))
plot(rep(1,times=K),Rasch3$coefficients[1:K,3],pch="",
xlab="Discrimination Parameter = 1.0",xlim=c(0.5,1.5),
ylab="Estimates",ylim=c(0.5,1.5))
text(rep(1,times=K),Rasch3$coefficients[1:K,3],
labels=colnames(Rasch3Data$items))
abline(h=1,lty=2,lwd=2)
plot(GuessThresh,summary(Rasch3)$coefficients[1:K,1],pch="",
xlab="Guessing Parameters",xlim=c(-0.1,0.5),
ylab="Estimates",ylim=c(-0.1,0.5))
text(GuessThresh,summary(Rasch3)$coefficients[1:K,1],
labels=colnames(Rasch3Data$items))
abline(a=0,b=1)
dev.off()
##########################
# SCOTUS voting example:
url <- getURL("https://raw.githubusercontent.com/PrisonRodeo/MM-git/master/Data/SCOTUS-IRT.csv")
SCOTUS <- read.csv(text = url)
rm(url)
head(SCOTUS,10)
summary(SCOTUS)
# 1PLM:
OnePLM<-rasch(SCOTUS[c(2:10)])
summary(OnePLM)
coef(OnePLM, prob=TRUE, order=TRUE)
# Alternative model constraining alpha = 1.0:
IRTData <- SCOTUS[c(2:10)]
AltOnePLM<-rasch(IRTData, constraint=cbind(length(IRTData)+1,1))
summary(AltOnePLM)
# 2PLM:
TwoPLM<-ltm(IRTData ~ z1)
summary(TwoPLM)
# 2PLM Probabilities and testing:
coef(TwoPLM, prob=TRUE, order=TRUE)
anova(OnePLM, TwoPLM)
# 3PLM:
ThreePLM<-tpm(IRTData)
summary(ThreePLM)
anova(TwoPLM, ThreePLM)
# Plots:
pdf("Notes and Slides/1PLMIRFsR.pdf",6,5)
par(mar=c(4,4,2,2))
plot(OnePLM,lty=seq(1:9), lwd=3,
zrange=c(-2.5,2.5),xlab="Liberalism",
legend=TRUE,main="1PLM ICCs")
dev.off()
pdf("Notes and Slides/2PLMIRFsR.pdf",6,5)
par(mar=c(4,4,2,2))
plot(TwoPLM,lty=seq(1:9), lwd=3,
zrange=c(-2.5,2.5),xlab="Liberalism",
legend=TRUE,main="2PLM ICCs")
dev.off()
pdf("Notes and Slides/3PLMIRFsR.pdf",6,5)
par(mar=c(4,4,2,2))
plot(ThreePLM,lty=seq(1:9), lwd=3,
zrange=c(-2.5,2.5),xlab="Liberalism",
legend=TRUE,main="3PLM ICCs")
dev.off()
|
a7dd955f0ee5fcbb59a0ea68d6dd4f3b43fc522f | 375383d4df1a77494dd5a57ba4d8e75325c524a6 | /code/02_bonus_spocc.R | 8f42e81e61ed8843a9058eb524390c9531d1c9d3 | [] | no_license | ocean-tracking-network/dfo-bio-robis-exercise | ca29d0f660ef8c22ad480101295bb7b6f04e4379 | 5427a428f5a4af79f4bfa286c02c35462995b241 | refs/heads/master | 2023-05-06T15:05:24.285666 | 2021-05-14T11:56:44 | 2021-05-14T11:56:44 | 366,370,817 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,053 | r | 02_bonus_spocc.R | # BONUS CONTENT: spocc ----------------------------
# Using spocc, we can search OBIS, GBIF, ALA, and other data systems at once! -----
# ROpenSci split spocc into three packages,
# now it's spocc -> scrubr -> mapr to get all the way to a map from
# searching occurrences
# install.packages('spocc')
library(spocc)
# scrubr
# we won't use this today but can confirm we've installed it
# install.packages('scrubr')
library(scrubr)
# mapr
# install.packages('mapr')
library(mapr)
# Once you have these, it's literally as easy as:
#1: make a vector of species you care about
spp <- c('Danaus plexippus','Accipiter striatus','Pinus contorta')
#2: ask some or all of the biodiversity databases what records they have
dat <- occ(query = spp, from = 'gbif', has_coords = TRUE, limit = 100)
#3: Map your results:
mapr::map_leaflet(dat)
# 1: Try your own favourite query:
here_fishy_fishy <- c('salmonidae')
# 2: and ask OBIS Canada for results!
dat <- occ(query = here_fishy_fishy,
from = 'obis', # just OBIS results
obisopts=list(nodeid="7dfb2d90-9317-434d-8d4e-64adf324579a"), # just OBIS Canada
has_coords = TRUE, # just entries we can geolocate
limit=200) # just the first 200 pages (200,000 rows) of data
# 3: What do we see?
mapr::map_leaflet(dat)
# Which datasets are providing my data?
dat$obis$data$salmonidae$dataset_id %>% unique()
# https://obis.org/dataset/[dataset_id]
# Visit the homepages for these datasets by appending their UUID to the OBIS dataset URL:
# e.g. https://obis.org/dataset/18e4fa5b-5f92-4e09-957b-b242003287e9
# A lot of the values in these Occurrences are DarwinCore terms
# coming as they do from DarwinCore Archives:
# https://dwc.tdwg.org/terms/
# can check for presence/absence records:
dat$obis$data$salmonidae$occurrenceStatus
# And there's very often information about who recorded the observations:
dat$obis$data$salmonidae$recordedBy %>% unique()
# These datasets are kind of sparse on important information
# - no dates, no recordedBy?
# |
e73df47e54fcd165dd1ee3c3ce4503ef9c6ad21d | 5eab6e8ffee6af224949d82db145d5fcda306360 | /scrapeMatchups.R | 4b764db13749b8c515f0d8de6ef81a550eed3f0f | [] | no_license | ericcolon/nba_projections | 826ddd7bcdd2aeda54e2a17fd8568de14c59a036 | 28ed92ec33913cad0cf18a3a3738f90e800a0105 | refs/heads/master | 2020-04-16T09:05:07.605757 | 2017-02-25T18:24:31 | 2017-02-25T18:24:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,147 | r | scrapeMatchups.R | library(rvest)
library(data.table)
scrapeMatchups <- function() {
matchups_url <- 'http://www.espn.com/nba/schedule'
# Download HTML data
matchups_html <- read_html(matchups_url)
# Find available schedule dates in h2 nodes
raw_dates <- matchups_html %>%
html_nodes("h2") %>%
html_text()
# Parse the available schedule dates and omit the unparseables
schedule_dates = na.omit(as.Date(raw_dates, '%a, %b %d'))
matchup_sections <- matchups_html %>%
html_nodes("table") %>%
html_table(header=FALSE, fill=TRUE)
# Number of schedule dates found should match the number of matchup sections
stopifnot(length(schedule_dates) == length(matchup_sections))
# Create an output data table
matchups <- data.table(date=as.Date(character()), home=character(), away=character())
for(i in 1:length(schedule_dates)) {
# Extract relevant data from first two columns, removing the header row
daily_matchups <- as.data.frame(matchup_sections[i])[-1,1:2]
# Remove "Eastern Conf" and "Western Conf"
# daily_matchups <- daily_matchups[!grep("Conf", paste(daily_matchups$X1, daily_matchups$X2)), c(1,2)]
daily_matchup_count <- nrow(daily_matchups)
if (daily_matchup_count == 0) { next; }
# Clean up team abbreviations
daily_matchups <- as.data.frame(as.list(apply(daily_matchups[,c(1,2)], 2, function(x) gsub(' \\w*$', '', x))))
# Change "Los Angeles" to "LA Lakers"
daily_matchups <- as.data.frame(as.list(apply(daily_matchups[,c(1,2)], 2, function(x) gsub('^Los Angeles$', 'LA Lakers', x))))
# Change "LA" to "LA Clippers"
daily_matchups <- as.data.frame(as.list(apply(daily_matchups[,c(1,2)], 2, function(x) gsub('^LA$', 'LA Clippers', x))))
# Convert back to a data.table
daily_matchups <- as.data.table(daily_matchups)
# Prepend a column with the current schedule date
daily_matchups <- cbind(schedule_dates[i], daily_matchups)
# Set dailyMatchup column names
colnames(daily_matchups) <- c('date', 'home', 'away')
matchups <- rbind(matchups, daily_matchups)
}
matchups
}
|
37b912f4c5baf47bd06cceef63f68a5dad0c23df | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.developer.tools/man/codebuild_import_source_credentials.Rd | a6032cc4f5724e0fdbff77842de94efd4ed2d496 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 1,587 | rd | codebuild_import_source_credentials.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codebuild_operations.R
\name{codebuild_import_source_credentials}
\alias{codebuild_import_source_credentials}
\title{Imports the source repository credentials for an CodeBuild project that
has its source code stored in a GitHub, GitHub Enterprise, or Bitbucket
repository}
\usage{
codebuild_import_source_credentials(
username = NULL,
token,
serverType,
authType,
shouldOverwrite = NULL
)
}
\arguments{
\item{username}{The Bitbucket username when the \code{authType} is BASIC_AUTH. This parameter
is not valid for other types of source providers or connections.}
\item{token}{[required] For GitHub or GitHub Enterprise, this is the personal access token. For
Bitbucket, this is the app password.}
\item{serverType}{[required] The source provider used for this project.}
\item{authType}{[required] The type of authentication used to connect to a GitHub, GitHub
Enterprise, or Bitbucket repository. An OAUTH connection is not
supported by the API and must be created using the CodeBuild console.}
\item{shouldOverwrite}{Set to \code{false} to prevent overwriting the repository source credentials.
Set to \code{true} to overwrite the repository source credentials. The
default value is \code{true}.}
}
\description{
Imports the source repository credentials for an CodeBuild project that has its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository.
See \url{https://www.paws-r-sdk.com/docs/codebuild_import_source_credentials/} for full documentation.
}
\keyword{internal}
|
da08403f81b3b210c46eacb9fb50dcbfae7422f7 | 8eaee73b4f4c29771cb56cfc630db58de40d849f | /man/print.gh_response.Rd | c2d5a80e37b24c7f442f8ed8e2b4dc477ae1da22 | [
"MIT"
] | permissive | krlmlr/gh | 3e4c9853adca072d7a66f2b82dff71c719464ce7 | 978cb6e7e934a384aac2c63a090cc0b9ffb6e057 | refs/heads/master | 2023-01-23T09:37:06.545490 | 2020-12-02T14:52:46 | 2020-12-02T14:52:46 | 158,952,244 | 0 | 0 | NOASSERTION | 2019-11-21T00:30:03 | 2018-11-24T16:05:31 | R | UTF-8 | R | false | true | 375 | rd | print.gh_response.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.R
\name{print.gh_response}
\alias{print.gh_response}
\title{Print the result of a GitHub API call}
\usage{
\method{print}{gh_response}(x, ...)
}
\arguments{
\item{x}{The result object.}
\item{...}{Ignored.}
}
\value{
The JSON result.
}
\description{
Print the result of a GitHub API call
}
|
7c8447513ebaa820828bc74e9d061d9f9cba3b44 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/oppr/tests/testthat/test_add_feature_weights.R | 2c25947dab0a518798d193e2c0cc9ca55d44ed93 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,739 | r | test_add_feature_weights.R | context("add_feature_weights")
test_that("numeric(5)", {
# load data
data(sim_projects, sim_actions, sim_features)
# create problem
p <- problem(sim_projects, sim_actions, sim_features,
"name", "success", "name", "cost", "name", FALSE) %>%
add_feature_weights(seq_len(5))
# calculate weights
weights <- p$feature_weights()
# run tests
expect_is(weights, "numeric")
expect_equal(weights, setNames(seq_len(5), sim_features$name))
})
test_that("character(1)", {
# load data
data(sim_projects, sim_actions, sim_features)
# create problem
p <- problem(sim_projects, sim_actions, sim_features,
"name", "success", "name", "cost", "name", FALSE) %>%
add_feature_weights("weight")
# calculate weights
weights <- p$feature_weights()
# run tests
expect_is(weights, "numeric")
expect_equal(weights, setNames(sim_features$weight, sim_features$name))
})
test_that("invalid arguments", {
data(sim_projects, sim_actions, sim_features)
p <- problem(sim_projects, sim_actions, sim_features,
"name", "success", "name", "cost", "name", FALSE)
## single numeric values
expect_error({
add_feature_weights(p, 2)
})
expect_error({
add_feature_weights(p, -1)
})
expect_error({
add_feature_weights(p, NA_real_)
})
## multiple numeric values
expect_error({
add_feature_weights(p, rep(0.1, nrow(sim_features) - 1))
})
expect_error({
add_feature_weights(p, c(NA_real_, rep(0.1, nrow(sim_features) - 1)))
})
## character values
expect_error({
add_feature_weights(p, NA_character_)
})
expect_error({
add_feature_weights(p, "a")
})
expect_error({
add_feature_weights(p, c("weight", "weight"))
})
})
|
086d44ae341e77a626c31cc9a3f847040f6359aa | 4c961b6c657e9a8e91a483ab48e1318094b40130 | /man/lip.Rd | 2590ac5703f488f727e05415e887f88b46f3dc98 | [] | no_license | cran/mdhglm | 5c220f06acb28428bd035dfb9097fb9b243ca504 | 689c0b38742cb75370cd0686d1b3717dd25c87c8 | refs/heads/master | 2020-04-06T21:26:22.271570 | 2018-10-25T07:00:19 | 2018-10-25T07:00:19 | 54,140,240 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,659 | rd | lip.Rd | \name{lip}
\alias{lip}
\docType{data}
\title{Scottish Lip Cancer Data}
\description{
Clayton and Kaldor (1987) analyzed observed and expected numbers
of lip cancer cases in the 56 administrative areas of Scotland
with a view to produce a map that would display regional variations
in cancer incidence and yet avoid the presentation of unstable rates for
the smaller areas. The expected numbers had been calculated allowing
for the different age distributions in the areas by using a fixed-effects
multiplicative model; these were regarded for the purpose of analysis as
(Intercept)s based on an external set of standard rates. Presumably the
spatial aggregation is due in large part to the effects of environmental
risk factors. Data were available on the percentage of the work force in
each area employed in agriculture, fishing, or forestry. This covariate
exhibits spatial aggregation paralleling that for lip cancer itself. Because
all three occupations involve outdoor work, exposure to sunlight, the
principal known risk factor for lip cancer, might be the explanation.
}
\usage{data("lip")}
\format{
A data frame with 56 observations on the following 4 variables.
\describe{
\item{\code{y}}{observed number of lip cancer}
\item{\code{n}}{expected number of lip cancer}
\item{\code{x}}{percentage of the work force in each area employed in agriculture, fishing, or forestry}
\item{\code{county}}{county number for 56 areas}
}
}
\references{
Clayton, D.G. and Kaldor, J. (1987). Empirical bayes estimates of agestandardized
relative risks for use in disease mapping. Biometrics, 43, 671--681.
}
|
36386bbc30c430fd48b92fd4ecc01b12f67bb4b7 | 5bf20a947b107500910ce88c034b26f97ca19e51 | /bar.R | d8f30aeb43c05f67a39836a7923cc786314a15c8 | [] | no_license | kulashish/consensus-graphs | 4b202f0de520aab674e7658d3ff826e3ec561cac | c5daab2e585632930c74f9318b7ffd069cf917b1 | refs/heads/master | 2020-12-30T16:40:53.110843 | 2017-05-11T18:21:57 | 2017-05-11T18:21:57 | 91,010,568 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,342 | r | bar.R | library(ggplot2)
path.running <- "/Users/kulkashi/Documents/PhD/ijcai/graphs_data/running_time.txt"
data <- read.delim(path.running, header = F, col.names = c('batch', 'pl', 'alu', 'ali'))
data.pl <- data[, c(1,2)]
colnames(data.pl) <- c('batch', 'time')
data.pl$type <- 'Rand-R'
data.alu <- data[, c(1,3)]
colnames(data.alu) <- c('batch', 'time')
data.alu$type <- 'Unc-R'
data.ali <- data[, c(1,4)]
colnames(data.ali) <- c('batch', 'time')
data.ali$type <- 'Inf-R'
data <- rbind(data.pl, data.alu, data.ali)
data$type <- factor(data$type, levels = c('Rand-R', 'Unc-R', 'Inf-R'))
data$batch <- factor(data$batch, levels = c('2', '5', '10', '25', '50', '100'))
ggplot(data, aes(x=batch, y=time, fill=type)) +
geom_bar(stat = "identity", position=position_dodge(), width = .5) +
xlab("Batch size") +
ylab("Running time") +
coord_flip() +
scale_fill_grey() +
theme(legend.title=element_blank(), legend.justification = c(1,1), legend.position = c(1,1),
legend.background = element_rect(size = .2, colour = "black"),
legend.key = element_rect(size = 5),
legend.key.size = unit(2.2, 'lines'),
panel.border = element_rect(color = "black", size = .2, fill = NA),
legend.text = element_text(size = 40),
axis.title = element_text(size = 40),
axis.text = element_text(size = 40)
) |
b5b67742ecbac7e26611bf53a039b4097ba9a862 | 56b503ede1f0f91e64b88d4b189a51d7782834a0 | /R/model.R | 02e20509e29adb0717799115a46b6a18c02c11b3 | [] | no_license | kciomek/vfranking | c412edfbcb0b55365b375d00188e8a3c731eabb4 | 037ed5299f542819d7d21872568bff203f854da5 | refs/heads/master | 2021-07-08T23:11:23.951863 | 2016-11-17T20:45:17 | 2016-11-17T20:45:17 | 58,743,270 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,451 | r | model.R | #### HELPERS
buildPairwiseComparisonConstraint <- function(alternative, referenceAlternative, model, type) {
stopifnot(type %in% c("weakPreference", "strongPreference", "indifference"))
lhs <- ua(referenceAlternative, ncol(model$constraints$lhs), model$perfToModelVariables) - ua(alternative, ncol(model$constraints$lhs), model$perfToModelVariables)
dir <- "<="
rhs <- 0
if (type == "strongPreference") {
if (is.null(model$epsilonIndex)) {
rhs <- -model$minEpsilon
} else {
lhs[model$epsilonIndex] <- 1
}
} else if (type == "indifference") {
dir <- "=="
}
return (list(lhs = lhs, dir = dir, rhs = rhs))
}
addVarialbesToModel <- function(constraints, variables) {
for (var in variables)
constraints$lhs <- cbind(constraints$lhs, 0)
constraints$types <- c(constraints$types, variables)
return (constraints)
}
combineConstraints <- function(...) {
allConst <- list(...)
lhs <- c()
dir <- c()
rhs <- c()
types <- c()
for (const in allConst) {
if (!is.null(const)) {
lhs <- rbind(lhs, const$lhs)
dir <- c(dir, const$dir)
rhs <- c(rhs, const$rhs)
types <- c(types, const$types)
}
}
return (list(lhs = lhs, dir = dir, rhs = rhs, types = types))
}
removeConstraints <- function(allConst, constraintsToRemoveIndices) {
return (list(lhs = allConst$lhs[-c(constraintsToRemoveIndices), ],
dir = allConst$dir[-c(constraintsToRemoveIndices)],
rhs = allConst$rhs[-c(constraintsToRemoveIndices)],
types = allConst$types))
}
#### BUILDING MODEL
#' @export
buildModel <- function(problem, minEpsilon = 1e-4) { # includeEpsilonAsVariable,
nrAlternatives <- nrow(problem$perf)
nrCriteria <- ncol(problem$perf)
# criterion value to alternative indices
criterionValues <- replicate(nrCriteria, list())
for (j in seq_len(nrCriteria)) {
for (i in seq_len(nrAlternatives)) {
value <- problem$perf[i, j]
found <- FALSE
for (k in seq_len(length(criterionValues[[j]]))) {
if (criterionValues[[j]][[k]]$value == value) { # todo: consider epsilon
found <- TRUE
criterionValues[[j]][[k]]$alternatives <- c(criterionValues[[j]][[k]]$alternatives, i)
}
}
if (!found) {
criterionValues[[j]][[length(criterionValues[[j]]) + 1]] <- list(
value=value,
alternatives=c(i)
)
}
}
if (length(criterionValues[[j]]) < 2) {
stop(paste("Criterion ", j, " is superfluous!"))
}
# sort criterion values
criterionValues[[j]] <- criterionValues[[j]][order(
sapply(criterionValues[[j]],
function(x) x$value, simplify=TRUE
), decreasing=FALSE)]
}
perfToModelVariables <- replicate(nrCriteria, replicate(nrAlternatives, list()))
firstChPointVariableIndex <- c(1)
chPoints <- c()
for (j in seq_len(nrCriteria)) {
numberOfCharacteristicPoints <- problem$characteristicPoints[j]
if (numberOfCharacteristicPoints == 0) {
numberOfCharacteristicPoints <- length(criterionValues[[j]])
}
if (j != nrCriteria) {
firstChPointVariableIndex[j + 1] <- firstChPointVariableIndex[j] + numberOfCharacteristicPoints - 1
}
chPoints[j] <- numberOfCharacteristicPoints
}
numberOfVariables <- firstChPointVariableIndex[length(firstChPointVariableIndex)] + chPoints[nrCriteria] - 2
for (j in seq_len(nrCriteria)) {
firstValue <- criterionValues[[j]][[1]]$value
lastValue <- criterionValues[[j]][[length(criterionValues[[j]])]]$value
direction <- problem$criteria[j]
if (problem$characteristicPoints[j] == 0) {
for (i in seq_len(nrAlternatives)) {
value <- problem$perf[i, j]
criterionValueIndex <- which(sapply(criterionValues[[j]], function(x){x$value == value}))
if (direction == "g" && criterionValueIndex > 1) {
perfToModelVariables[[i, j]][[1]] = c(firstChPointVariableIndex[j] + criterionValueIndex - 2, 1.0)
} else if (direction == "c" && criterionValueIndex < length(criterionValues[[j]])) {
perfToModelVariables[[i, j]][[1]] = c(firstChPointVariableIndex[j] + criterionValueIndex - 1, 1.0)
}
}
} else {
numberOfCharacteristicPoints <- problem$characteristicPoints[j]
intervalLength <- (lastValue - firstValue) / (numberOfCharacteristicPoints - 1);
coeff <- 1.0 / intervalLength;
for (i in seq_len(nrAlternatives)) {
value <- problem$perf[i, j]
if (direction == "g") {
if (value == lastValue) {
perfToModelVariables[[i, j]][[1]] <- c(firstChPointVariableIndex[j] + numberOfCharacteristicPoints - 2, 1.0)
} else if (value > firstValue) {
lowerChPointIndex <- floor((value - firstValue) * coeff)
if (lowerChPointIndex >= numberOfCharacteristicPoints - 1) {
stop("InternalError?: lowerChPointIndex >= numberOfCharacteristicPoints - 1: This should never happen.");
}
lowerValue = firstValue + intervalLength * lowerChPointIndex
upperValue = firstValue + intervalLength * (lowerChPointIndex + 1)
lowerCoeff <- 0.0
upperCoeff <- 0.0
if (value <= lowerValue) {
# comp accuracy
lowerCoeff = 1.0
upperCoeff = 0.0
} else if (value >= upperValue) {
# comp accuracy
lowerCoeff = 0.0
upperCoeff = 1.0
} else {
lowerCoeff = (lowerValue - value) / (upperValue - lowerValue) + 1.0
upperCoeff = (value - lowerValue) / (upperValue - lowerValue)
}
if (lowerChPointIndex > 0) {
perfToModelVariables[[i, j]][[1]] = c(firstChPointVariableIndex[j] + lowerChPointIndex - 1, lowerCoeff)
perfToModelVariables[[i, j]][[2]] = c(firstChPointVariableIndex[j] + lowerChPointIndex, upperCoeff)
} else {
perfToModelVariables[[i, j]][[1]] = c(firstChPointVariableIndex[j] + lowerChPointIndex, upperCoeff)
}
}
} else {
if (value == firstValue) {
perfToModelVariables[[i, j]][[1]] = c(firstChPointVariableIndex[j], 1.0)
} else if (value < lastValue) {
lowerChPointIndex <- floor((value - firstValue) * coeff)
if (lowerChPointIndex >= numberOfCharacteristicPoints - 1) {
stop("InternalError?: lowerChPointIndex >= numberOfCharacteristicPoints - 1: This should never happen.");
}
lowerValue = firstValue + intervalLength * lowerChPointIndex
upperValue = firstValue + intervalLength * (lowerChPointIndex + 1)
lowerCoeff <- 0.0
upperCoeff <- 0.0
if (value <= lowerValue) {
# comp accuracy
lowerCoeff = 1.0
upperCoeff = 0.0
} else if (value >= upperValue) {
# comp accuracy
lowerCoeff = 0.0
upperCoeff = 1.0
} else {
lowerCoeff = (upperValue - value) / (upperValue - lowerValue)
upperCoeff = (value - upperValue) / (upperValue - lowerValue) + 1.0
}
if (lowerChPointIndex < numberOfCharacteristicPoints - 2) {
perfToModelVariables[[i, j]][[1]] = c(firstChPointVariableIndex[j] + lowerChPointIndex, lowerCoeff)
perfToModelVariables[[i, j]][[2]] = c(firstChPointVariableIndex[j] + lowerChPointIndex + 1, upperCoeff)
} else {
perfToModelVariables[[i, j]][[1]] = c(firstChPointVariableIndex[j] + lowerChPointIndex, lowerCoeff)
}
}
}
}
}
}
# epsilon index
#epsilonIndex <- NULL
#if (includeEpsilonAsVariable) {
numberOfVariables <- numberOfVariables + 1
epsilonIndex <- numberOfVariables
#}
# constraints
# sum to 1
lhs <- rep(0, numberOfVariables)
for (j in seq_len(nrCriteria)) {
if (problem$criteria[j] == 'g')
lhs[firstChPointVariableIndex[j] + chPoints[j] - 2] <- 1
else
lhs[firstChPointVariableIndex[j]] <- 1
}
constraints <- list(lhs = lhs, dir = "==", rhs = 1)
# monotonicity of vf
for (j in seq_len(nrCriteria)) {
for (k in seq_len(chPoints[j] - 2)) {
lhs <- rep(0, numberOfVariables)
rhs <- 0
if (problem$criteria[j] == "g") {
lhs[firstChPointVariableIndex[j] + k - 1] <- 1
lhs[firstChPointVariableIndex[j] + k] <- -1
} else {
lhs[firstChPointVariableIndex[j] + k - 1] <- -1
lhs[firstChPointVariableIndex[j] + k] <- 1
}
if (problem$strictVF) {
#if (includeEpsilonAsVariable) {
lhs[epsilonIndex] <- 1
#} else {
# rhs <- -minEpsilon
#}
}
constraints <- combineConstraints(constraints,
list(lhs = lhs, dir = "<=", rhs = rhs))
}
lhs <- rep(0, numberOfVariables)
rhs <- 0
if (problem$criteria[j] == 'g')
lhs[firstChPointVariableIndex[j]] <- -1
else
lhs[firstChPointVariableIndex[j] + chPoints[j] - 2] <- -1
if (problem$strictVF) {
#if (includeEpsilonAsVariable) {
lhs[epsilonIndex] <- 1
#} else {
# rhs <- -minEpsilon
#}
}
constraints <- combineConstraints(constraints,
list(lhs = lhs, dir = "<=", rhs = rhs))
}
constraints$types <- rep("C", numberOfVariables)
# building model
model <- list(
constraints = constraints,
firstChPointVariableIndex = firstChPointVariableIndex,
epsilonIndex = epsilonIndex,
chPoints = chPoints,
perfToModelVariables = perfToModelVariables,
criterionValues = criterionValues,
criterionPreferenceDirection = problem$criteria,
prefInfoToConstraints = list(),
generalVF = problem$characteristicPoints == 0,
minEpsilon = minEpsilon
)
# preference information
# assignment examples
prefInfoIndex <- 1
if (is.matrix(problem$strongPreference)) {
for (k in seq_len(nrow(problem$strongPreference))) {
alternative <- problem$strongPreference[k, 1]
referenceAlternative <- problem$strongPreference[k, 2]
model$constraints <- combineConstraints(model$constraints,
buildPairwiseComparisonConstraint(alternative, referenceAlternative,
model, type = "strongPreference"))
model$prefInfoToConstraints[[prefInfoIndex]] <- nrow(model$constraints$lhs)
prefInfoIndex <- prefInfoIndex + 1
}
}
if (is.matrix(problem$weakPreference)) {
for (k in seq_len(nrow(problem$weakPreference))) {
alternative <- problem$weakPreference[k, 1]
referenceAlternative <- problem$weakPreference[k, 2]
model$constraints <- combineConstraints(model$constraints,
buildPairwiseComparisonConstraint(alternative, referenceAlternative,
model, type = "weakPreference"))
model$prefInfoToConstraints[[prefInfoIndex]] <- nrow(model$constraints$lhs)
prefInfoIndex <- prefInfoIndex + 1
}
}
if (is.matrix(problem$indifference)) {
for (k in seq_len(nrow(problem$indifference))) {
alternative <- problem$indifference[k, 1]
referenceAlternative <- problem$indifference[k, 2]
model$constraints <- combineConstraints(model$constraints,
buildPairwiseComparisonConstraint(alternative, referenceAlternative,
model, type = "indifference"))
model$prefInfoToConstraints[[prefInfoIndex]] <- nrow(model$constraints$lhs)
prefInfoIndex <- prefInfoIndex + 1
}
}
return (model)
}
ua <- function(alternative, nrVariables, perfToModelVariables) {
res <- rep(0, nrVariables)
for (j in seq_len(ncol(perfToModelVariables))) {
for (k in seq_len(length(perfToModelVariables[[alternative, j]]))) {
res[perfToModelVariables[[alternative, j]][[k]][1]] <- perfToModelVariables[[alternative, j]][[k]][2]
}
}
return (res)
}
eliminateEpsilon <- function(model) {
stopifnot(!is.null(model$epsilonIndex))
model$constraints$rhs <- model$constraints$rhs - model$constraints$lhs[, model$epsilonIndex] * model$minEpsilon
model$constraints$lhs <- model$constraints$lhs[, -c(model$epsilonIndex)]
model$constraints$types <- model$constraints$types[-c(model$epsilonIndex)]
model$epsilonIndex <- NULL
return (model)
}
|
b2793d0fbad2ce899998c5b6e74635d4c0175745 | 7b67ea4201dd7cbc090eba37e6002ef35930c372 | /Step_7th_PredictionAnalysis/AtlasLoading/1st_Sorted_2FCV/Step_4th_Prediction_ScatterPlot_2Fold_EFAccuracy.R | a4a2f7dd2cfa197ba3b4bbe94aa14b5fdf0bfd14 | [] | no_license | guoweiwuorgin/pncSingleFuncParcel | 8b4a09b7d8b6316c513cbf9a9e1b0b591600540b | d9cf211985c8b47ffd9fecc4fa0853487c40f604 | refs/heads/master | 2022-12-02T21:00:05.414808 | 2020-08-10T05:37:03 | 2020-08-10T05:37:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,105 | r | Step_4th_Prediction_ScatterPlot_2Fold_EFAccuracy.R |
library(R.matlab)
library(ggplot2)
library(visreg)
WorkingFolder <- '/data/jux/BBL/projects/pncSingleFuncParcel/Replication/Revision/PredictionAnalysis'
PredictionFolder <- paste0(WorkingFolder, '/AtlasLoading/2Fold_Sort_EFAccuracy');
Fold0 <- readMat(paste0(PredictionFolder, '/Fold_0_Score.mat'));
TestScore_Fold0 <- t(Fold0$Test.Score);
PredictScore_Fold0 <- as.numeric(t(Fold0$Predict.Score));
Index_Fold0 <- Fold0$Index + 1;
Fold1 <- readMat(paste0(PredictionFolder, '/Fold_1_Score.mat'));
TestScore_Fold1 <- t(Fold1$Test.Score);
PredictScore_Fold1 <- as.numeric(t(Fold1$Predict.Score));
Index_Fold1 <- Fold1$Index + 1;
Predict_Max <- max(c(PredictScore_Fold0, PredictScore_Fold1));
Predict_Min <- min(c(PredictScore_Fold0, PredictScore_Fold1));
Test_Max <- max(c(TestScore_Fold0, TestScore_Fold1));
Test_Min <- min(c(TestScore_Fold0, TestScore_Fold1));
Behavior <- readMat(paste0(WorkingFolder, '/Behavior_693.mat'));
# Fold 0
Behavior_Fold0 = data.frame(Age = as.numeric(Behavior$Age[Index_Fold0]));
Behavior_Fold0$Sex = as.numeric(Behavior$Sex[Index_Fold0]);
Behavior_Fold0$Motion = as.numeric(Behavior$Motion[Index_Fold0]);
Behavior_Fold0$F1_Exec_Comp_Res_Accuracy = as.numeric(Behavior$F1.Exec.Comp.Res.Accuracy[Index_Fold0]);
# Fold 1
Behavior_Fold1 = data.frame(Age = as.numeric(Behavior$Age[Index_Fold1]));
Behavior_Fold1$Sex = as.numeric(Behavior$Sex[Index_Fold1]);
Behavior_Fold1$Motion = as.numeric(Behavior$Motion[Index_Fold1]);
Behavior_Fold1$F1_Exec_Comp_Res_Accuracy = as.numeric(Behavior$F1.Exec.Comp.Res.Accuracy[Index_Fold1]);
Color_Fold0 = '#7F7F7F';
Color_Fold1 = '#000000';
# Fold 1
Energy_lm <- lm(PredictScore_Fold1 ~ F1_Exec_Comp_Res_Accuracy + Age + Sex + Motion, data = Behavior_Fold1);
plotdata <- visreg(Energy_lm, "F1_Exec_Comp_Res_Accuracy", type = "conditional", scale = "linear", plot = FALSE);
smooths_Fold1 <- data.frame(Variable = plotdata$meta$x,
x = plotdata$fit[[plotdata$meta$x]],
smooth = plotdata$fit$visregFit,
lower = plotdata$fit$visregLwr,
upper = plotdata$fit$visregUpr);
predicts_Fold1 <- data.frame(Variable = "dim1",
x = plotdata$res$F1_Exec_Comp_Res_Accuracy,
y = plotdata$res$visregRes)
Fig <- ggplot() +
geom_point(data = predicts_Fold1, aes(x, y), colour = Color_Fold1, size = 2) +
geom_line(data = smooths_Fold1, aes(x = x, y = smooth), colour = Color_Fold1, size = 1.5) +
geom_ribbon(data = smooths_Fold1, aes(x = x, ymin = lower, ymax = upper), fill = Color_Fold1, alpha = 0.2)
# Fold 0
Energy_lm <- lm(PredictScore_Fold0 ~ F1_Exec_Comp_Res_Accuracy + Age + Sex + Motion, data = Behavior_Fold0);
plotdata <- visreg(Energy_lm, "F1_Exec_Comp_Res_Accuracy", type = "conditional", scale = "linear", plot = FALSE);
smooths <- data.frame(Variable = plotdata$meta$x,
x = plotdata$fit[[plotdata$meta$x]],
smooth = plotdata$fit$visregFit,
lower = plotdata$fit$visregLwr,
upper = plotdata$fit$visregUpr);
predicts <- data.frame(Variable = "dim1",
x = plotdata$res$F1_Exec_Comp_Res_Accuracy,
y = plotdata$res$visregRes)
Fig <- Fig +
geom_point(data = predicts, aes(x, y), colour = Color_Fold0, size = 2, shape = 17) +
geom_line(data = smooths, aes(x = x, y = smooth), colour = Color_Fold0, size = 1.5) +
geom_ribbon(data = smooths, aes(x = x, ymin = lower, ymax = upper), fill = Color_Fold0, alpha = 0.2) +
theme_classic() + labs(x = "Actual EF Performance", y = "Predicted EF Performance") +
theme(axis.text=element_text(size=30, color='black'), axis.title=element_text(size=30)) +
scale_y_continuous(limits = c(-1.6, 1.9), breaks = c(-1.6, -0.8, 0, 0.8, 1.6)) +
scale_x_continuous(limits = c(-3.3, 2.3), breaks = c(-3.2, -1.6, 0, 1.6))
Fig
ggsave('/data/jux/BBL/projects/pncSingleFuncParcel/Replication/Revision/Figures/EFAccuracyPrediction_CorrACC.tiff', width = 17, height = 15, dpi = 600, units = "cm");
|
4e6f6a6e0404026a634539b1ff67d9f88e9c6908 | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /RScelestial/R/RScelestial-package.R | c443bbef96575614c79f1a51eb7474ced19f6046 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 589 | r | RScelestial-package.R | #' RScelestial: An R wrapper for scelestial algorithm for single-cell lineage tree reconstruction
#' through an approximation algorithm based on Steiner tree problem
#'
#' This package provides a wrapper for the scelestial which is implemented in C++.
#' The package contains function \code{scelestial} for running the algorithm and
#' \code{synthesis} for tumor simulation for providing synthetic data.
#'
#' @name RScelestial
#'
#' @useDynLib RScelestial
#' @importFrom Rcpp evalCpp
#' @importFrom "utils" "read.table"
#' @docType package
#' @exportPattern "^[^._][[:alpha:]]*"
NULL |
b2da4d5edaaec9c88d68941a5905f5e3cdabc065 | c3d1da39d7eeb2b96e2aa8bbbc2c9a3167e87f88 | /thesis_files/paper_example_data.R | 70bde5e8531a6a074369e33b1b5e2b8d4fe88a8f | [] | no_license | plofknaapje/gpowerr | 43b7397db818510cf5793edbd568e26e5fbc3788 | a82bbc228daf667da2ac4de28f3dd9c0f7889c19 | refs/heads/main | 2023-06-19T02:24:51.948753 | 2021-07-21T14:32:53 | 2021-07-21T14:32:53 | 335,255,377 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 179 | r | paper_example_data.R |
data <-
data.frame(
a = 1:6,
b = 1:6 ^ 2,
c = c(0,-1, 0.5, 0,-2,-3),
d = c(1, 1, 2, 2, 3, 3),
e = c(0, 1, 0, 1, 0, 1)
)
gpower(data, 2, 0.1)
prcomp(data)
|
c0608c224a80c1ea90ad7d2add23e16e1580d0ac | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /SuperGauss/tests/testthat/test-NormalToeplitz-logdens.R | 0a6aaea9d201eb7b19bae8c9e82485b046d388a1 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 709 | r | test-NormalToeplitz-logdens.R | source("SuperGauss-testfunctions.R")
context("NormalToeplitz - Log-Density.")
nrep <- 10
test_that("NormalToeplitz$logdens gives correct result.", {
replicate(n = nrep, expr = {
N <- round(abs(rnorm(n = 1, mean = 20, sd = 5)))
case.par <- expand.grid(type = c("fbm", "matern"),
n_obs = 1:3)
ncase <- nrow(case.par)
for(ii in 1:ncase){
cp <- case.par[ii, ]
type <- as.character(cp$type)
acf <- test_acf_func(N, type)
n_obs <- cp$n_obs
X <- rnormtz(n = n_obs, acf = acf, fft = FALSE)
Nt <- NormalToeplitz$new(N = N)
ld1 <- toep_ldens(t(X), acf)
ld2 <- Nt$logdens(X, acf)
expect_equal(ld1, ld2)
}
})
})
|
043b2b31c46fe20e2829ee35741e83019dcf5174 | 77502a8046a63d220c00088039483f2601d716cc | /man/MarginalLikelihoodFit.Rd | c00580c2b860daa5aa7d7898086707bd362dbcf4 | [] | no_license | cran/GLMMarp | d7679ea0d62f3294a33d39dd7d3ebe1e0f55d57c | 97f48c83eec0fa341fdff2aa48d9e5539bb3dff9 | refs/heads/master | 2021-01-19T10:34:45.108560 | 2009-09-28T00:00:00 | 2009-09-28T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,544 | rd | MarginalLikelihoodFit.Rd | \name{Marg.Like.Binary}
\alias{Marg.Like.Binary}
\title{Compute the Marginal Likelihood of the GLMM-AR(p) Model by Using the MCMC Output and Reduced Samples}
\description{
This function estimates the marginal likelihood for model comparison by using the Bayes factor. It is built in the \code{GLMMARp.Binary} function, but can also be used independently, and the way to fit in the function is similar to how to fit in the \code{GLMMARp.Binary} function.
}
\usage{
Marg.Like.Binary(y,X1, Wi, St, Ai="NULL", Ft="NULL", Unit.Index,
Time.Index, timeint.add=FALSE,unitint.add=FALSE,
mcmc.output, reduced.mcmc, reduced.burnin, nlag,
beta0,B0, D0 , d0, E0, e0, tracking)
}
\arguments{
\item{y}{A vector of response variable, dichotomous (0/1).}
\item{X1}{A matrix of covariates with fixed effects.}
\item{Wi}{A matrix of covariates with subject-varying effects.}
\item{St}{A matrix of covariates with time-varying effects.}
\item{Ai}{A matrix of covariates explaining the subject-varying effects
with the same length of y (the values of time-invariant variables
have to be repeated over the same times of each subject,
for time-varying covariate in Ai, the function will automatically
use the within-subject mean). The default is "NULL", no group-level
covariates.}
\item{Ft}{matrix of covariates explaining the time-varying effects
with the same length of y (the function will automatically
use the within-time-period mean). The default is "NULL",
no group-level covariates.}
\item{Unit.Index}{A vector of subject index, i's. Note: the number of
observations of each unit should be larger than the lag order, nlag. Those
units which have fewer than or equal to nlag observations should be taken
out of the sample in order to use the function.}
\item{Time.Index}{A vector of time index, t's. Note: no missing observations
in the middle of the sample time periods of a unit are allowed. In other words,
unbalanced data structures are allowed, but no broken data structure.}
\item{timeint.add}{Should a time-specific intercept be added into the model?
It takes two values: TRUE or FALSE with default as FALSE.}
\item{unitint.add}{Should a subject-specific intercept be added into the model?
It takes two values: TRUE or FALSE with default as FALSE.}
\item{reduced.mcmc}{The number of iterations to return in the reduced mcmc simulations.}
\item{reduced.burnin}{The number of burn-in iterations for the sampler in the reduced
mcmc simulations.}
\item{mcmc.output}{The mcmc output from the full simulation of GLMM-AR(p) model.
The format has to be the same as in the GLMMARp.Binary() function.}
\item{beta0}{The prior mean of \eqn{\beta}{beta}. This should be a
vector with dimension equal to the number of fixed effect parameters
in the reduced form. Since the dimension is difficult for the user to
compute when the model contains multiple random coefficients and multiple
group-level predictors, the function will provide the correct dimension
in the error message if the dimension of beta0 is specified incorrectly,
and the user can respecify beta0 with this information and recall the
function. No default is provided.}
\item{B0}{The prior covariance matrix of \eqn{\beta}{beta}. This should be a
positive definite matrix with dimensions equal to the number of betas in
the reduced form of GLMM-AR(p). No default is provided.}
\item{d0, D0}{The degree of freedom and scale matrix of the Wishart prior
on \eqn{b_i} which is the subject-level residual. D0 should
not be too defuse, otherwise it may takes a long time for
the chain to converge. Recommended values is a*I, where a is
between 1.67 and 10. No default is provided.}
\item{e0, E0}{The degree of freedom and scale matrix of the Wishart prior
on \eqn{c_t} which is the time-level residual. E0 should
not be too defuse, otherwise it may takes a long time for
the chain to converge. Recommended values is a*I, where a is
between 1.67 and 10. No default is provided (priors have to
be the same as are used in the full mcmc. }
\item{nlag}{A scalar of the lag order p, which should be an integer equal to
or greater than 1. In this version, the function does not support
the model with p=0, which can be estimated by using BUGs or JAGs.}
\item{tracking}{The tracking interval used in the simulation. Every "tacking"
iterations, the function will return the information about how many
iterations in total have been done.}
}
\value{
A scalar which is the marginal likelihood (on a natural logarithm scale).
}
\examples{
\dontrun{
require(panel)
require(bayesSurv)
data(StateAR3)
data(StateAR2)
y <- StateFailure$failure
Unit <- StateFailure$country
Time <- StateFailure$year
Fixed <- cbind(StateFailure$poldemoc, StateFailure$bnkv123, StateFailure$bnkv117,
StateFailure$poldurab, StateFailure$faocalry, StateFailure$pwtcgdp,
StateFailure$macnac,StateFailure$macnciv, StateFailure$wdiinfmt,
StateFailure$ptsamnes, StateFailure$dis1, StateFailure$bnkv81,
StateFailure$change.demo)
UnitRandom <- cbind(log(StateFailure$pwtopen))
TimeRandom <- as.matrix(rep( 1, length(y)))
UnitPred <- cbind(StateFailure$macnac, StateFailure$poldemoc)
TimePred <- cbind(StateFailure$dis1)
TimePred <- "NULL"
marginalLikely <- Marg.Like.Binary(y=y,X1=Fixed, Wi=UnitRandom, St=TimeRandom,
Ai=UnitPred, Ft=TimePred, Unit.Index=Unit, Time.Index=Time,
timeint.add=0, unitint.add=1, mcmc.output=StateAR2, reduced.mcmc=100,
reduced.burnin=50, nlag=2, beta0=rep(0,15),
B0=diag(10, 15), D0=diag(1.67, 2) , d0=9, E0=6, e0=6,
tracking=100)
}
}
\concept{Bayes factor}
\concept{model comparison}
\seealso{\code{\link{GLMMARp.Binary}}}
\keyword{models}
|
60629a603f2222f8029b776facbd1d37eb786574 | 3b9f814cd611f6365506680253d9a24a2ff1ae51 | /14-Linerisasi-Data.R | a9c41f32f6bcc01b77a2635984951f5c7facb2c1 | [] | no_license | dewasemadi/Numerical-Computation-Code-With-R | 96c983f6664901e61b47fdf22b56a94add06aa85 | 5a1af6a4414d94fd47f867b1715bf403063aedac | refs/heads/master | 2023-06-04T12:24:54.474221 | 2021-06-16T09:33:31 | 2021-06-16T09:33:31 | 346,715,448 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 176 | r | 14-Linerisasi-Data.R | x = c(-2,2,4,4,5,7,8,10,12,13)
y = c(3,3,10,12,18,38,54,58,62,66)
nlsfit = nls(y ~ d/(x+c), start=list(c=1,d=1))
summary(nlsfit)
plot(x,y,pch = 19, col="blue")
abline(nlsfit) |
92c17c50fde7519e15a0de8a63dc9d0973fd9260 | 58f4573bc3e9efbc14ff9ebbf089231c246cf066 | /demos/caseStudies/cts/cts1.R | 9375e14e2f8f47f8283221e33366a68f9771e2d9 | [] | no_license | Anathawa/mlxR | 1a4ec2f277076bd13525f0c1d912ede3d20cb1cc | 7e05119b78b47c8b19126de07c084e7d267c4baf | refs/heads/master | 2021-01-19T09:17:35.765267 | 2017-04-05T18:00:39 | 2017-04-05T18:00:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,510 | r | cts1.R | setwd(dirname(parent.frame(2)$ofile))
library(gridExtra)
source('titration.R')
#-------------------------------------
pk.model <- inlineModel("
[LONGITUDINAL]
input = {ka, V, k, a1}
EQUATION:
Cc = pkmodel(ka, V, k)
DEFINITION:
y1 = {distribution=lognormal, prediction=Cc, sd=a1}
[INDIVIDUAL]
input={ka_pop, omega_ka, V_pop, omega_V, k_pop, omega_k}
DEFINITION:
ka = {distribution=lognormal, prediction=ka_pop, sd=omega_ka}
V = {distribution=lognormal, prediction=V_pop, sd=omega_V}
k = {distribution=lognormal, prediction=k_pop, sd=omega_k}
")
adm <- list(time=seq(0,to=440,by=12), amount=20)
ppk <- c(ka_pop=0.4, V_pop=10, k_pop=0.05,
omega_ka=0.3, omega_V=0.5, omega_k=0.1, a1=0.05)
g <- list(size=20, level='individual')
Cc <- list(name='Cc', time=seq(0,to=440,by=1))
y1 <- list(name='y1', time=seq(4,to=440,by=24))
s <- list(seed=1234)
res1 <- simulx(model = pk.model,
parameter = ppk,
treatment = adm,
group = g,
output = list(Cc, y1),
settings = s)
plot1 <- ggplotmlx(data=res1$Cc, aes(x=time, y=Cc, colour=id)) +
geom_line(size=0.5) + xlab("time (hour)") + ylab("Cc (mg/l)") +
theme(legend.position="none")
plot2 <- ggplotmlx(data=res1$y1, aes(x=time, y=y1, colour=id)) +
geom_line(size=0.5) + geom_point(size=2) +
xlab("time (hour)") + ylab("measured concentration (mg/l)") +
theme(legend.position="none")
grid.arrange(plot1, plot2, ncol=2)
#--------------------------------------------------------
r1 <- list(name='y1',
time=seq(124,to=440,by=24),
condition="y1>5",
factor=0.75)
res2 <- titration(model = pk.model,
parameter = ppk,
treatment = adm,
output = list(Cc, y1),
rule = r1,
group = g,
settings = s)
plot3 <- ggplotmlx(data=res2$Cc, aes(x=time, y=Cc, colour=id)) +
geom_line(size=0.5) + xlab("time (hour)") + ylab("Cc (mg/l)") +
theme(legend.position="none") + geom_hline(yintercept=5)
plot4 <- ggplotmlx(data=res2$y1, aes(x=time, y=y1, colour=id)) +
geom_line(size=0.5) + geom_point(size=2) +
xlab("time (hour)") + ylab("measured concentration (mg/l)") +
theme(legend.position="none") + geom_hline(yintercept=5)
grid.arrange(plot3, plot4, ncol=2)
#--------------------------------------------------------
r2 <- list(name='y1',
time=seq(124,to=440,by=24),
condition="y1<3",
factor=1.5)
res3 <- titration(model = pk.model,
parameter = ppk,
treatment = adm,
output = list(Cc, y1),
rule = list(r1,r2),
group = g,
settings = s)
plot5 <- ggplotmlx(data=res3$Cc, aes(x=time, y=Cc, colour=id)) +
geom_line(size=0.5) + xlab("time (hour)") + ylab("Cc (mg/l)") +
theme(legend.position="none") + geom_hline(yintercept=c(3,5))
plot6 <- ggplotmlx(data=res3$y1, aes(x=time, y=y1, colour=id)) +
geom_line(size=0.5) + geom_point(size=2) +
xlab("time (hour)") + ylab("measured concentration (mg/l)") +
theme(legend.position="none") + geom_hline(yintercept=c(3,5))
grid.arrange(plot5, plot6, ncol=2)
#--------------------------------------------------------
pkpd.model <- inlineModel("
[LONGITUDINAL]
input = {ka, V, k, Emax, EC50, a1, a2}
EQUATION:
Cc = pkmodel(ka, V, k)
E = Emax*Cc/(EC50+Cc)
DEFINITION:
y1 = {distribution=lognormal, prediction=Cc, sd=a1}
y2 = {distribution=normal, prediction=E, sd=a2}
[INDIVIDUAL]
input={ka_pop, omega_ka, V_pop, omega_V, k_pop, omega_k,
Emax_pop, omega_Emax, EC50_pop, omega_EC50}
DEFINITION:
ka = {distribution=lognormal, prediction=ka_pop, sd=omega_ka}
V = {distribution=lognormal, prediction=V_pop, sd=omega_V}
k = {distribution=lognormal, prediction=k_pop, sd=omega_k}
Emax = {distribution=lognormal, prediction=Emax_pop, sd=omega_Emax}
EC50 = {distribution=lognormal, prediction=EC50_pop, sd=omega_EC50}
")
ppd <- c(Emax_pop=100, EC50_pop=3,
omega_Emax=0.1, omega_EC50=0.2, a2=5)
E <- list(name='E', time=seq(0,to=440,by=1))
y2 <- list(name='y2', time=seq(16,to=440,by=36))
res4 <- simulx(model = pkpd.model,
parameter = c(ppk,ppd),
treatment = adm,
output = list(Cc, E, y1, y2),
group = g,
settings = s)
pl1=ggplotmlx(data=res4$Cc, aes(x=time, y=Cc, colour=id)) + geom_line(size=0.5) +
xlab("time (hour)") + ylab("Cc (mg/l)") + theme(legend.position="none")
pl2=ggplotmlx(data=res4$E, aes(x=time, y=E, colour=id)) + geom_line(size=0.5) +
xlab("time (hour)") + ylab("E") + theme(legend.position="none")
pl3=ggplotmlx(data=res4$y1, aes(x=time, y=y1, colour=id)) + geom_line(size=0.5) + geom_point(size=2) +
xlab("time (hour)") + ylab("measured concentration (mg/l)") + theme(legend.position="none")
pl4=ggplotmlx(data=res4$y2, aes(x=time, y=y2, colour=id)) + geom_line(size=0.5) + geom_point(size=2) +
xlab("time (hour)") + ylab("measured effect") + theme(legend.position="none")
grid.arrange(pl1, pl2, pl3, pl4, ncol=2)
#--------------------------------------------------------
r3 <- list(name='y2',
time=seq(88,to=440,by=36),
condition="y2<40",
factor=1.5)
res5 <- titration(model = pkpd.model,
parameter = c(ppk,ppd),
treatment = adm,
output = list(Cc, E, y1, y2),
rule = list(r1,r3),
group = g,
settings = s)
pl5=ggplotmlx(data=res5$Cc, aes(x=time, y=Cc, colour=id)) + geom_line(size=0.5) +
xlab("time (hour)") + ylab("Cc (mg/l)") + theme(legend.position="none") +
geom_hline(yintercept=5)
pl6=ggplotmlx(data=res5$E, aes(x=time, y=E, colour=id)) + geom_line(size=0.5) +
xlab("time (hour)") + ylab("E") + theme(legend.position="none") +
geom_hline(yintercept=40)
pl7=ggplotmlx(data=res5$y1, aes(x=time, y=y1, colour=id)) + geom_line(size=0.5) + geom_point(size=2) +
xlab("time (hour)") + ylab("measured concentration (mg/l)") + theme(legend.position="none") +
geom_hline(yintercept=5)
pl8=ggplotmlx(data=res5$y2, aes(x=time, y=y2, colour=id)) + geom_line(size=0.5) + geom_point(size=2) +
xlab("time (hour)") + ylab("measured effect") + theme(legend.position="none") +
geom_hline(yintercept=40)
grid.arrange(pl5, pl6, pl7, pl8, ncol=2)
|
e703b540c778227ecd3ea8bf62f789ebb0a825f7 | e6a89ab297f45849492dbd60171570c2677865cb | /ViewShed_R/compareTurbineData.R | 8cc86505a111aae37b2a2a5dcfd2f48ef0d949e0 | [] | no_license | DanOlner/viewshed | 8c3f863e09a6f13eeb81afb3d9cfae93e9aa32b5 | 130d3e5bc6552dd006497b8fdd55f58df8654ad8 | refs/heads/master | 2020-04-12T01:35:44.527131 | 2016-11-01T16:45:27 | 2016-11-01T16:45:27 | 50,677,179 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,222 | r | compareTurbineData.R | #comparing older versions of turbine data to what we ended up with
library(dplyr)
library(tidyr)
library(pryr)
library(zoo)
library(ggplot2)
library(readstata13)
#load possible old turbine files...
oldTurbs <- read.dta13("C:/Users/SMI2/Dropbox/WindFarms/Data/turbinedata/turbine_data.dta")
#How does that compare on a map to what we currently have?
write.csv(oldTurbs, "data/oldTurbinesFromPrevProject.csv")
#Load new to compare dates
newTurbs <- read.csv("C:/Data/WindFarmViewShed/ViewshedPython/Data/turbinesFinal_reducedColumns_tipHeightsComplete.csv")
newTurbs$statusDateFormatted <- as.Date(newTurbs$statusDateFormatted)
#Graph based on height - are dates different?
newTurbs$tipMoreThan100m <- 0
newTurbs$tipMoreThan100m[newTurbs$TipHeight >100] <- 1
ggplot(newTurbs, aes(x=statusDateFormatted, fill=factor(tipMoreThan100m))) +
# geom_density(alpha = 0.2)
geom_area(alpha = 0.3, stat = "bin", position = "identity", colour="black", binwidth=365)
ggplot(newTurbs, aes(x=statusDateFormatted, y =TipHeight)) +
geom_point()
#~~~~~~~~~~~~~~~~~~~~~~~~
#Windfarms by size
windfarms <- newTurbs %>% group_by(nameMinusTurbine) %>%
summarise(count = n())
windfarms <- windfarms %>% arrange(-count)
|
876a88e7d56b7955000367ce80af9dcf02b8dd37 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/taxize/examples/get_wormsid.Rd.R | 68823f0a3b8ba8d5a1c55944411e93decc05eb60 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,013 | r | get_wormsid.Rd.R | library(taxize)
### Name: get_wormsid
### Title: Get Worms ID for a taxon name
### Aliases: get_wormsid as.wormsid as.wormsid.wormsid as.wormsid.character
### as.wormsid.list as.wormsid.numeric as.wormsid.data.frame
### as.data.frame.wormsid get_wormsid_
### ** Examples
## Not run:
##D (x <- get_wormsid('Platanista gangetica'))
##D attributes(x)
##D attr(x, "match")
##D attr(x, "multiple_matches")
##D attr(x, "pattern_match")
##D attr(x, "uri")
##D
##D get_wormsid('Gadus morhua')
##D get_wormsid('Pomatomus saltatrix')
##D get_wormsid(c("Platanista gangetica", "Lichenopora neapolitana"))
##D
##D # by common name
##D get_wormsid("dolphin", 'common')
##D get_wormsid("clam", 'common')
##D
##D # specify rows to limit choices available
##D get_wormsid('Plat')
##D get_wormsid('Plat', rows=1)
##D get_wormsid('Plat', rows=1:2)
##D
##D # When not found
##D get_wormsid("howdy")
##D get_wormsid(c('Gadus morhua', "howdy"))
##D
##D # Convert a wormsid without class information to a wormsid class
##D # already a wormsid, returns the same
##D as.wormsid(get_wormsid('Gadus morhua'))
##D # same
##D as.wormsid(get_wormsid(c('Gadus morhua', 'Pomatomus saltatrix')))
##D # numeric
##D as.wormsid(126436)
##D # numeric vector, length > 1
##D as.wormsid(c(126436,151482))
##D # character
##D as.wormsid("126436")
##D # character vector, length > 1
##D as.wormsid(c("126436","151482"))
##D # list, either numeric or character
##D as.wormsid(list("126436","151482"))
##D ## dont check, much faster
##D as.wormsid("126436", check=FALSE)
##D as.wormsid(126436, check=FALSE)
##D as.wormsid(c("126436","151482"), check=FALSE)
##D as.wormsid(list("126436","151482"), check=FALSE)
##D
##D (out <- as.wormsid(c(126436,151482)))
##D data.frame(out)
##D as.wormsid( data.frame(out) )
##D
##D # Get all data back
##D get_wormsid_("Plat")
##D get_wormsid_("Plat", rows=1)
##D get_wormsid_("Plat", rows=1:2)
##D get_wormsid_("Plat", rows=1:75)
##D # get_wormsid_(c("asdfadfasd","Plat"), rows=1:5)
## End(Not run)
|
10702bf16f4a47d3a329585acc967cc56ec823b8 | d3ea666d4f5de858f161355e84e1a5307f62817b | /scripts/2019-12-03_radar.R | 6ffdd66d5dff3aae7fad2825e26b7aedec67069c | [] | no_license | ryanpeek/2019_mapping | 56709a1c9630e7986491d7fe68bae01c072c13a7 | 8ff136ec7b75c9224105a056de1dd9d42627bfdf | refs/heads/main | 2021-07-09T00:45:46.197391 | 2020-12-21T21:36:45 | 2020-12-21T21:36:45 | 220,285,973 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,420 | r | 2019-12-03_radar.R | # cool radar thing, credit to hrbrmstr tweet:
# CODE: https://paste.sr.ht/~hrbrmstr/c63d38b7bdea4385e165940f451198e122c69fa4
# TWEET: https://twitter.com/hrbrmstr/status/1201975946015858697
library(rvest)
library(magick)
library(glue)
library(tidyverse)
# see here for list of radar sites: https://radar.weather.gov/ridge/
# then click on one and look at upper left side of screen for three letter abbrev:
# e.g., Sac: DAX, Hanford: HNX, WestCA: MUX
animate_radar <- function(station = "DAX") {
county_url <- "https://radar.weather.gov/Overlays/County/Short/{station}_County_Short.gif"
county <- image_read(glue(county_url))
ir <- possibly(image_read, NULL)
frames_dir_url <- "https://radar.weather.gov/ridge/RadarImg/N0R/{station}/"
httr::GET(url = glue(frames_dir_url)) %>%
httr::content() %>%
html_nodes(xpath = glue(".//a[contains(@href, '{station}_')]")) %>%
html_attr("href") %>%
sprintf(glue("https://radar.weather.gov/ridge/RadarImg/N0R/{station}/%s"), .) %>%
map(ir) %>%
compact() %>%
do.call(c, .) -> radar_imgs
image_background(county, "black", flatten = TRUE) %>%
image_composite(radar_imgs) %>%
image_animate() -> gif
gif
}
animate_radar("MUX")
animate_radar("DAX")
animate_radar("HNX")
animate_radar("ESX")
## library
library(rradar)
library(tidyverse)
filter(stations, state == "California")
animate_radar("MUX")
|
92eed3679c88f80f2b912b8818234b8f5cee5379 | 8bd647382bfa4f1d2cb0891a46bd5b79dd1caae5 | /man/ConvertUnknownPEDData.Rd | f54fc8945f41b72d5441c0d20c379fd1a8d99715 | [] | no_license | cran/OriGen | 9d22df3937ecc1f3f91c453c8f85c4120ddfc48b | 2a0f82a5d143bde447ec0a1c3722dce527b10299 | refs/heads/master | 2021-01-10T13:14:55.085197 | 2016-01-16T09:10:00 | 2016-01-16T09:10:00 | 48,085,290 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,819 | rd | ConvertUnknownPEDData.Rd | \name{ConvertUnknownPEDData}
\alias{ConvertUnknownPEDData}
\title{
Plink PED file conversion for known and unknown data
}
\description{
This function converts two Plink PED/MAP files (one for the known samples and one with unknown locations) into the data format required for OriGen.
}
\usage{
ConvertUnknownPEDData(PlinkFileName,LocationFileName,PlinkUnknownFileName)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{PlinkFileName}{Base name of Plink PED file (i.e. without ".ped" or ".map") containing the individuals with known locations.}
\item{LocationFileName}{Space or tab delimited text file with Longitude and Latitude coordinates for each individual listed in the 4th and 5th columns respectively. Note that rows should correspond to the individuals in the Plink File. Also, this file should have a header row.}
\item{PlinkUnknownFileName}{Base name of Plink PED file (i.e. without ".ped" or ".map") containing the individuals with unknown locations.}
}
\value{
List with the following components:
\item{DataArray}{An array giving the number of major/minor SNPs (defined as the most occuring in the dataset) grouped by sample sites for each SNP. The dimension of this array is [2,SampleSites,NumberSNPs].}
\item{SampleCoordinates}{This is an array which gives the longitude and latitude of each of the found sample sites. The dimension of this array is [SampleSites,2], where the second dimension represents longitude and latitude respectively.}
\item{PlinkFileName}{This shows the inputted PlinkFileName with ".ped" attached.}
\item{LocationFile}{This shows the inputted LocationFileName.}
\item{SampleSites}{This shows the integer number of sample sites found.}
\item{NumberSNPs}{This shows the integer number of SNPs found.}
\item{UnknownPEDFile}{This shows the inputted PED file for the unknown individuals.}
\item{NumberUnknowns}{This is an integer value showing the number of unknowns found in the UnknownPEDFile.}
\item{UnknownData}{An array showing the unknown individuals genetic data. The dimension of this array is [NumberUnknowns,NumberSNPs].}
\item{Membership}{This is an integer valued vector showing the group number of each member of the inputted known group. The dimension of this array is [NumberKnown].}
\item{NumberKnown}{This is an integer value showing the number of known found in the PlinkFileName.}
}
\references{
Ranola J, Novembre J, Lange K (2014) Fast Spatial Ancestry via Flexible Allele Frequency Surfaces. Bioinformatics, in press.
}
\author{
John Michael Ranola, John Novembre, and Kenneth Lange
}
\seealso{
%\code{\link{ConvertPEDData}} for converting Plink PED files into a format appropriate for analysis,
%\code{\link{FitOriGenModel}} for fitting allele surfaces to the converted data,
%\code{\link{PlotAlleleFrequencySurface}} for a quick way to plot the resulting allele frequency surfaces from \code{FitOriGenModel},
\code{\link{ConvertUnknownPEDData}} for converting two Plink PED files (known and unknown)into a format appropriate for analysis,
\code{\link{FitOriGenModelFindUnknowns}} for fitting allele surfaces to the converted data and finding the locations of the given unknown individuals,
\code{\link{PlotUnknownHeatMap}} for a quick way to plot the resulting unknown heat map surfaces from \code{FitOriGenModelFindUnknowns},;
%\code{\link{FitAdmixedFindUnknowns}} for fitting allele surfaces to the converted data and finding the locations of the given unknown individuals who may be admixed,
%\code{\link{PlotAdmixedSurface}} for a quick way to plot the resulting admixture surfaces from \code{FitAdmixedFindUnknowns},
%\code{\link{RankSNPsLRT}} for reducing the number of SNPs using a likelihood ratio test criteria or informativeness for assignment,
%\code{\link{FindRhoParamterCrossValidation}} for choosing an appropriate Rho parameter by way of crossvalidation,
}
\examples{
#Note that Plink files "10SNPs.ped", "10SNPs.map" and also "Locations.txt"
#are included in the data folder of the OriGen package with ".txt" appended to the Plink files.
#Please remove ".txt" and navigate to the appropriate location
#before testing the following commands.
#Note that this was done to allow inclusion of the test data in the package.
\dontrun{trials3=ConvertUnknownPEDData("10SNPs","Locations.txt",""10SNPs"")}
\dontrun{str(trials3)}
MaxGridLength=30
RhoParameter=10
\dontrun{trials4=FitOriGenModelFindUnknowns(trials3$DataArray,trials3$SampleCoordinates,
trials3$UnknownData[1:2,],MaxGridLength,RhoParameter)}
\dontrun{PlotUnknownHeatMap(trials4,UnknownNumber=1,MaskWater=TRUE)}
}
\keyword{Conversion}% __ONLY ONE__ keyword per line
\keyword{Plink}% __ONLY ONE__ keyword per line
\keyword{Files}% __ONLY ONE__ keyword per line
\keyword{PED}% __ONLY ONE__ keyword per line
|
ee14c73679100f45f15866da08047a9bef818b83 | 1998826ea362c9f4fd31dfc011ac9489671647c8 | /Packages/ancom_for_R_4_and_above/ancom.R/R/shiny_ancomUI.r | 6a8289218395d128485950afb9415a8fb1e0d6c3 | [] | no_license | barakbri/CompositionalAnalysis_CodeBase | f0d930decd96b2f6796c3e0420b48b618996288f | 252e220510be4f94ee1b8310762f937fa1cbf62c | refs/heads/master | 2022-05-23T00:38:49.545762 | 2022-02-13T09:09:20 | 2022-02-13T09:09:20 | 175,478,030 | 4 | 2 | null | null | null | null | UTF-8 | R | false | false | 3,753 | r | shiny_ancomUI.r | #'
#' @rdname shiny_ancom
#'
#' @import shiny
#' @export
#'
shiny_ancomUI <- fluidPage(
headerPanel("Analysis of Composition of Microbiomes (ANCOM) v1.1-3"),
sidebarLayout(
sidebarPanel(
##
## Data input
##
fileInput('file1', 'Choose data file',
accept=c('text/csv', 'text/comma-separated-values,text/plain', '.csv' )
),
helpText("Accepted file formats: csv, txt"),
textInput( 'file_out' , "Path of output file (without file name)" ),
##
## Main controls
##
hr(),
#helpText("Box edges flash red if selected value is out of bounds."),
#hr(),
selectInput(inputId="datafmt", width= '90%' ,
label = "Format of input dataset:",
choices = c("Subjects on rows, OTUs on columns" = "wide",
"OTUs on rows, Subjects on columns" = "tall")
),
#checkboxInput(inputId = "wexact",
# label = "Use exact p-values",
# value=FALSE
#),
checkboxInput(inputId = "repeated",
label = "Repeated measures",
value=FALSE
),
checkboxInput(inputId = "adjust",
label = "Correct for multiple testing",
value=FALSE
),
conditionalPanel(condition = "input.adjust == true ",
numericInput(inputId = "fdr",
label = "Enter desired FDR (between 0 and 1):",
min=0 , max=1 , value=0.05, step=0.005
)
),
conditionalPanel(condition = "input.adjust == false ",
numericInput(inputId = "alpha",
label = "Enter significance level (between 0 and 1):",
min=0 , max=1 , value=0.05, step=0.005
)
),
#numericInput(inputId = "ncores",
# label = "Number of cores (to run in parallel)",
# min=1 , max=1000 , value=1, step=1
#),
checkboxInput(inputId = "fixPlot",
label = "Adjust figure:",
value=FALSE
),
conditionalPanel(condition = "input.fixPlot == true ",
numericInput(inputId = "pltWidth",
label = "Width of plot (%)",
min=10 , max=100 , value=100, step=1
),
numericInput(inputId = "pltHeight",
label = "Height of plot (pixel)",
min=100 , max=10000 , value=400, step=1
),
sliderInput(inputId = "ncols",
label = "Number of columns for the plot",
min=1 , max=10 , value=3, step=1
)
),
##
## Action buttons
##
hr(),
actionButton(inputId = "compute1",
label = "Run ANCOM"
),
actionButton(inputId = "compute2",
label = "Update Plot"
),
helpText("'Update Plot' must be clicked before the plot will appear.")
),
mainPanel(
#uiOutput("plot.ui")
#tabsetPanel(type = "tabs",
# tabPanel("Summary", verbatimTextOutput("summary") ),
# tabPanel("Boxplots of Abundances", plotOutput("plot") ),
# tabPanel("Plot B", uiOutput("plot.ui") ),
# tabPanel("Etc" , verbatimTextOutput("other") )
#)
uiOutput("theTabset")
)
))
|
004777183f4048427d483581a280b6cd19c065cb | 5a3678fe194828fcf302e10f75c44e816e3b15a4 | /man/computeChar_substr.Rd | e4b2d72caa7fca37e05ae3361b2919a5cbb2d020 | [] | no_license | cran/translateSPSS2R | ba9565c766e7fced37d8f7f81a9164ebbf1bdf4c | 36df20fbd07ce46105e694fc6afd42ec343b77f5 | refs/heads/master | 2020-04-04T12:48:24.233519 | 2015-06-23T00:00:00 | 2015-06-23T00:00:00 | 37,931,559 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 797 | rd | computeChar_substr.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/xpssComputeStrings.R
\name{computeChar_substr}
\alias{computeChar_substr}
\title{creates a substring}
\usage{
computeChar_substr (x,pos = NULL, length= NULL)
}
\arguments{
\item{x}{input character vector.}
\item{pos}{atomic numeric. Indicates the start of the substring.}
\item{length}{atomic numeric. Specifies the length of the substring.}
}
\value{
String. Returns an shortened string.
}
\description{
Helper Function for xpssCompute. R Implementation of the SPSS \code{CHAR.SUBSTR} Function.
}
\examples{
xpssCompute(x=fromXPSS, variables="V1", fun="computeChar_substr", pos = 2, length=3)
}
\author{
Bastian Wiessner
}
\seealso{
\code{\link{substr}}
}
\keyword{internal}
|
749e930d3242f3cc6247d6e626368f12efb46b9b | 2aa0a2d8f4e6ef9d2879334c08094d0a95941ff4 | /myshinyapp/server.R | 63020907208bc385b341adc913cf858a7607977b | [] | no_license | pennybear1009/myfirstshinyapp | b91798a7435a44fd0f93577ff881493c397a78a2 | cb6fabc7fa2c4bc4c6db264bc9abf75db1b0cd43 | refs/heads/master | 2021-01-10T05:33:07.533007 | 2016-01-27T07:46:20 | 2016-01-27T07:46:20 | 50,165,150 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 924 | r | server.R |
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyServer(function(input, output) {
output$scatterplotmatrix <- renderPlot({
panel.cor <- function(x,y,digits=2,prefix="",cex.cor,...){
usr <- par("usr")
on.exit(par(usr))
par(usr=c(0,1,0,1))
r <- abs(cor(x,y,use="complete.obs"))
txt <- format(c(r,0.123456789),digits=digits)[1]
txt <- paste(prefix,txt,sep="")
if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt)
text(0.5,0.5,txt,cex=cex.cor*(1+r)/2)
}
pairs(mtcars[,input$feature],upper.panel = panel.cor)
})
})
|
950df74accbc7bdff9efbb3359a25a8a25a62d69 | a24aa2f4f09551d54813cafa3e29645b672803d3 | /src/resources/plotTLODR.R | ce3e6ba11065f2eb05537902d87c33a9dd725457 | [
"BSD-3-Clause"
] | permissive | wangzhennan14/Anaquin | 59ecae7fcdb9be5e2f3020c4aa5a1918a4348ec3 | c69f27454ed7be42095261ba560583244c0ce281 | refs/heads/master | 2021-01-18T15:30:21.351242 | 2017-03-28T09:57:32 | 2017-03-28T09:57:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,708 | r | plotTLODR.R | unsigned char src_r_plotTLODR_R[] = {
0x23, 0x0a, 0x23, 0x20, 0x41, 0x6e, 0x61, 0x71, 0x75, 0x69, 0x6e, 0x20,
0x2d, 0x20, 0x53, 0x65, 0x71, 0x75, 0x69, 0x6e, 0x20, 0x73, 0x74, 0x61,
0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x20, 0x61, 0x6e, 0x61,
0x6c, 0x79, 0x73, 0x69, 0x73, 0x2e, 0x20, 0x56, 0x65, 0x72, 0x73, 0x69,
0x6f, 0x6e, 0x20, 0x31, 0x2e, 0x30, 0x2e, 0x0a, 0x23, 0x0a, 0x23, 0x20,
0x54, 0x68, 0x69, 0x73, 0x20, 0x52, 0x20, 0x73, 0x63, 0x72, 0x69, 0x70,
0x74, 0x20, 0x77, 0x61, 0x73, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
0x74, 0x65, 0x64, 0x20, 0x61, 0x74, 0x20, 0x25, 0x31, 0x25, 0x2e, 0x0a,
0x23, 0x0a, 0x23, 0x20, 0x20, 0x20, 0x20, 0x25, 0x32, 0x25, 0x0a, 0x23,
0x0a, 0x0a, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x28, 0x41, 0x6e,
0x61, 0x71, 0x75, 0x69, 0x6e, 0x29, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61,
0x20, 0x3c, 0x2d, 0x20, 0x72, 0x65, 0x61, 0x64, 0x2e, 0x63, 0x73, 0x76,
0x28, 0x27, 0x25, 0x33, 0x25, 0x2f, 0x25, 0x34, 0x25, 0x27, 0x2c, 0x20,
0x72, 0x6f, 0x77, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x31, 0x2c, 0x20,
0x73, 0x65, 0x70, 0x3d, 0x27, 0x5c, 0x74, 0x27, 0x29, 0x0a, 0x0a, 0x23,
0x20, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x20, 0x75, 0x6e, 0x64, 0x65,
0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x20, 0x73, 0x65, 0x71, 0x75, 0x69,
0x6e, 0x73, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x20, 0x3c, 0x2d, 0x20, 0x64,
0x61, 0x74, 0x61, 0x5b, 0x21, 0x69, 0x73, 0x2e, 0x6e, 0x61, 0x28, 0x64,
0x61, 0x74, 0x61, 0x24, 0x4f, 0x62, 0x73, 0x4c, 0x46, 0x43, 0x29, 0x2c,
0x5d, 0x0a, 0x0a, 0x23, 0x20, 0x43, 0x68, 0x6f, 0x6f, 0x73, 0x65, 0x20,
0x79, 0x6f, 0x75, 0x72, 0x20, 0x46, 0x44, 0x52, 0x20, 0x72, 0x61, 0x74,
0x65, 0x0a, 0x46, 0x44, 0x52, 0x20, 0x3c, 0x2d, 0x20, 0x30, 0x2e, 0x31,
0x0a, 0x0a, 0x78, 0x6c, 0x61, 0x62, 0x20, 0x20, 0x3c, 0x2d, 0x20, 0x27,
0x41, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x20, 0x43, 0x6f, 0x75, 0x6e,
0x74, 0x73, 0x27, 0x0a, 0x79, 0x6c, 0x61, 0x62, 0x20, 0x20, 0x3c, 0x2d,
0x20, 0x27, 0x50, 0x2d, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x27, 0x0a, 0x74,
0x69, 0x74, 0x6c, 0x65, 0x20, 0x3c, 0x2d, 0x20, 0x27, 0x4c, 0x4f, 0x44,
0x52, 0x20, 0x43, 0x75, 0x72, 0x76, 0x65, 0x73, 0x27, 0x0a, 0x0a, 0x23,
0x20, 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x64, 0x20, 0x61, 0x62,
0x75, 0x6e, 0x64, 0x61, 0x6e, 0x63, 0x65, 0x0a, 0x6d, 0x65, 0x61, 0x73,
0x75, 0x72, 0x65, 0x64, 0x20, 0x3c, 0x2d, 0x20, 0x64, 0x61, 0x74, 0x61,
0x24, 0x4d, 0x65, 0x61, 0x6e, 0x0a, 0x0a, 0x23, 0x20, 0x45, 0x78, 0x70,
0x65, 0x63, 0x74, 0x65, 0x64, 0x20, 0x6c, 0x6f, 0x67, 0x2d, 0x66, 0x6f,
0x6c, 0x64, 0x20, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x0a, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x20, 0x3c, 0x2d, 0x20, 0x64, 0x61, 0x74, 0x61, 0x24,
0x45, 0x78, 0x70, 0x4c, 0x46, 0x43, 0x0a, 0x0a, 0x23, 0x20, 0x4d, 0x65,
0x61, 0x73, 0x75, 0x72, 0x65, 0x64, 0x20, 0x70, 0x2d, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x0a, 0x70, 0x76, 0x61, 0x6c, 0x20, 0x3c, 0x2d, 0x20, 0x64,
0x61, 0x74, 0x61, 0x24, 0x50, 0x76, 0x61, 0x6c, 0x0a, 0x0a, 0x23, 0x20,
0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x64, 0x20, 0x71, 0x2d, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x0a, 0x71, 0x76, 0x61, 0x6c, 0x20, 0x3c, 0x2d,
0x20, 0x64, 0x61, 0x74, 0x61, 0x24, 0x51, 0x76, 0x61, 0x6c, 0x0a, 0x0a,
0x23, 0x20, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x20, 0x41, 0x6e, 0x61,
0x71, 0x75, 0x69, 0x6e, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x66, 0x6f,
0x72, 0x20, 0x50, 0x6c, 0x6f, 0x74, 0x4c, 0x4f, 0x44, 0x52, 0x0a, 0x61,
0x6e, 0x61, 0x71, 0x75, 0x69, 0x6e, 0x20, 0x3c, 0x2d, 0x20, 0x41, 0x6e,
0x61, 0x71, 0x75, 0x69, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x28, 0x61, 0x6e,
0x61, 0x6c, 0x79, 0x73, 0x69, 0x73, 0x3d, 0x27, 0x50, 0x6c, 0x6f, 0x74,
0x4c, 0x4f, 0x44, 0x52, 0x27, 0x2c, 0x20, 0x73, 0x65, 0x71, 0x73, 0x3d,
0x72, 0x6f, 0x77, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x28, 0x64, 0x61,
0x74, 0x61, 0x29, 0x2c, 0x20, 0x6d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65,
0x64, 0x3d, 0x6d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x64, 0x2c, 0x20,
0x72, 0x61, 0x74, 0x69, 0x6f, 0x3d, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x2c,
0x20, 0x70, 0x76, 0x61, 0x6c, 0x3d, 0x70, 0x76, 0x61, 0x6c, 0x2c, 0x20,
0x71, 0x76, 0x61, 0x6c, 0x3d, 0x71, 0x76, 0x61, 0x6c, 0x29, 0x0a, 0x0a,
0x70, 0x6c, 0x6f, 0x74, 0x4c, 0x4f, 0x44, 0x52, 0x28, 0x61, 0x6e, 0x61,
0x71, 0x75, 0x69, 0x6e, 0x2c, 0x20, 0x78, 0x6c, 0x61, 0x62, 0x3d, 0x78,
0x6c, 0x61, 0x62, 0x2c, 0x20, 0x79, 0x6c, 0x61, 0x62, 0x3d, 0x79, 0x6c,
0x61, 0x62, 0x2c, 0x20, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x3d, 0x74, 0x69,
0x74, 0x6c, 0x65, 0x2c, 0x20, 0x46, 0x44, 0x52, 0x3d, 0x46, 0x44, 0x52,
0x2c, 0x20, 0x6c, 0x65, 0x67, 0x54, 0x69, 0x74, 0x6c, 0x65, 0x3d, 0x27,
0x4c, 0x46, 0x43, 0x27, 0x29, 0x0a
};
unsigned int src_r_plotTLODR_R_len = 750;
|
bf1540bc9fde7ab1bf9500716557f9ce1926568a | c6a27808c050fadf714aca7d7d5e548d2611a727 | /Uber.R | 5b643796184d666f1b13b295a2d5d1e37c7a5970 | [] | no_license | ee08b397/Uber-Data-Analysis-Challenge | 75fa053c32d5142460020146da2898857363da31 | 5a55810142a5bdb166e6c5518197d707c3e780b0 | refs/heads/master | 2021-01-16T19:31:59.626994 | 2015-10-02T21:59:31 | 2015-10-02T21:59:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,349 | r | Uber.R | ####### JSON dataset of client logins from an Uber city ######
install.packages("rjson") # to read the data from the JSON file
install.packages("lubridate") # Date Time functions
install.packages("quantmod") # timeseries analysis
install.packages("forecast") # timeseries forecasting
install.packages("chron") # create chronological object
install.packages("zoo") # created ordered observations
library(rjson)
library(lubridate)
library(quantmod)
library(forecast)
library(xts)
library(chron)
library(zoo)
json_file = 'logins.json' # this file stored in the local directory
json_data = fromJSON(paste(readLines(json_file), collapse="")) # read the JSON file
logins.data.frame = data.frame(json_data) # creating the dataframe
colnames(logins.data.frame) = c('DateTime')
names(logins.data.frame)
dim(logins.data.frame) # 22447 rows and 1 column
print(head(logins.data.frame)) # the below is the snapshot of the raw data frame
# DateTime
#1 2012-03-01T00:05:55+00:00
#2 2012-03-01T00:06:23+00:00
#3 2012-03-01T00:06:52+00:00
#4 2012-03-01T00:11:23+00:00
#5 2012-03-01T00:12:47+00:00
#6 2012-03-01T00:12:54+00:00
logins.data.frame$DateTime = ymd_hms(logins.data.frame$DateTime) # convert to datatime
print(head(logins.data.frame))
# DateTime
#1 2012-03-01 00:05:55
#2 2012-03-01 00:06:23
#3 2012-03-01 00:06:52
#4 2012-03-01 00:11:23
#5 2012-03-01 00:12:47
#6 2012-03-01 00:12:54
range(logins.data.frame$DateTime)
# "2012-03-01 00:05:55 UTC" "2012-04-30 23:59:29 UTC"
# i.e. 1st March 2012 to 30 th April 2012 , i.e. 2 months of user logins data
# Extracting the features from the login datetime
logins.data.frame$Year = year(logins.data.frame$DateTime) # extract year
logins.data.frame$Month = month(logins.data.frame$DateTime) # extract month
logins.data.frame$Day = mday(logins.data.frame$DateTime) # extract day
logins.data.frame$Hour = hour(logins.data.frame$DateTime) # extract hour
print(head(logins.data.frame))
# DateTime Year Month Day Hour
#1 2012-03-01 00:05:55 2012 3 1 0
#2 2012-03-01 00:06:23 2012 3 1 0
#3 2012-03-01 00:06:52 2012 3 1 0
#4 2012-03-01 00:11:23 2012 3 1 0
#5 2012-03-01 00:12:47 2012 3 1 0
#6 2012-03-01 00:12:54 2012 3 1 0
# Extract the unique values and storing in a vector
year.vector = unique(logins.data.frame$Year)
month.vector = unique(logins.data.frame$Month)
day.vector = unique(logins.data.frame$Day)
hour.vector = unique(logins.data.frame$Hour)
output.dataFrame = data.frame() # creating an output data frame
for (i in year.vector)
{
for (j in month.vector)
{
for (k in day.vector)
{
for (l in hour.vector)
{
# num of logins given year, month, day and hour of the day
NumOfLogins = nrow(logins.data.frame[logins.data.frame$Year == i
& logins.data.frame$Month == j
& logins.data.frame$Day == k
& logins.data.frame$Hour == l,])
t1 = paste(i, j, k, sep = "/")
t2 = paste(t1, l, sep = " ")
t3 = paste(t2, ':0', sep = "")
dTime = as.POSIXct(t3, '%Y/%m/%d %H:%M:%S')
day_of_week = wday(dTime)
day_of_week_Name = factor(day_of_week,
levels = c(1:7),
labels = c("Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"))
countLogin = data.frame(dTime, NumOfLogins, i, j, k, l, day_of_week, day_of_week_Name)
colnames(countLogin) = c('DateTime', 'TotalLogins', 'Year', 'Month', 'Day', 'Hour', 'WeekDay', 'WeekDayName')
output.dataFrame = rbind(output.dataFrame, countLogin)
}
}
}
}
print(head(output.dataFrame))
# DateTime TotalLogins Year Month Day Hour WeekDay WeekDayName
#1 2012-03-01 00:00:00 31 2012 3 1 0 5 Thursday
#2 2012-03-01 01:00:00 18 2012 3 1 1 5 Thursday
#3 2012-03-01 02:00:00 37 2012 3 1 2 5 Thursday
#4 2012-03-01 03:00:00 23 2012 3 1 3 5 Thursday
#5 2012-03-01 04:00:00 14 2012 3 1 4 5 Thursday
#6 2012-03-01 05:00:00 8 2012 3 1 5 5 Thursday
########################## Total Number of Logins grouped per month #########################
tot.logins.month = numeric(12)
for (i in 1:12)
{
month.login = output.dataFrame[output.dataFrame$Month == i,]
tot.logins.month[i] = with(month.login, sum(month.login$TotalLogins))
}
print(tot.logins.month)
# [1] 0 0 10131 12316 0 0 0 0 0 0 0 0
plot(seq(1, 12), y = tot.logins.month,
xlab = "Months in a year", ylab = "Number of logins",
main = "Monthly distribution of Uber logins", type = "l", cex = 0.5, lwd = 2)
######################## Total Number of Logins grouped per Day of the Month #####################
tot.logins.day = numeric(31)
for (i in 1:31)
{
day.login = output.dataFrame[output.dataFrame$Day == i,]
tot.logins.day[i] = with(day.login, sum(day.login$TotalLogins))
}
print(tot.logins.day)
#[1] 844 573 747 720 474 566 711 760 556 683 766 549 582 831 858 545 757 828 618 687 979 1081
#[23] 724 817 882 648 548 970 949 616 578
plot(seq(1, 31), y = tot.logins.day,
xlab = "Days", ylab = "Number of logins",
main = "Daily distribution of Uber logins", type = "l", cex = 0.5, lwd = 2)
######################## Total Number of Logins grouped per Days of Week ##########################
tot.logins.week = numeric(7) # On Week basis
for (i in 1:7)
{
week.login = output.dataFrame[output.dataFrame$WeekDay == i,]
tot.logins.week[i] = with(week.login, sum(week.login$TotalLogins))
}
print(tot.logins.week)
# [1] 5173 2139 1861 2155 2857 3198 5064
plot(seq(1, 7), y = tot.logins.week,
xlab = "Weekdays", ylab = "Number of logins",
main = "Distribution of Uber logins by Days of Week", type = "l", cex = 0.5, lwd = 2)
################### Total Number of Logins grouped per hour of the day ##########################
tot.logins.hour = numeric(24) # On Hour Basis
for (i in 1:24)
{
hour.login = output.dataFrame[output.dataFrame$Hour == i - 1,]
tot.logins.hour[i] = with(hour.login, sum(hour.login$TotalLogins))
}
print(tot.logins.hour)
#[1] 1667 1711 1911 1661 1235 926 671 378 263 247 325 471 628 702 678 708 691 762 795 813 883 1119
#[23] 1473 1729
plot(seq(1, 24), y = tot.logins.hour,
xlab = "Hours of the Day", ylab = "Number of logins",
main = "Distribution of Uber logins by Hour of the Day", type = "l", cex = 0.5, lwd = 2)
######################### Time Series Analysis of Logins grouped per hour of the day #########################
Hourly.ts.dataframe = output.dataFrame[c("DateTime", "TotalLogins")]
any(is.na(Hourly.ts.dataframe)) # False
Hourly.ts = zoo(Hourly.ts.dataframe[-1], as.chron(format(Hourly.ts.dataframe$DateTime)))
Hourly.ts[1:5]
#(03/01/12 00:00:00) (03/01/12 01:00:00) (03/01/12 02:00:00) (03/01/12 03:00:00) (03/01/12 04:00:00)
# 31 18 37 23 14
acf(Hourly.ts)
pacf(Hourly.ts)
Hourly.auto.arima.fit = auto.arima(Hourly.ts, d = NA, D = NA, max.p = 3,max.q = 3,
max.P = 2, max.Q = 2, max.order = 3, start.p = 2, start.q = 2,
start.P = 1, start.Q = 1, stationary = FALSE, seasonal = TRUE,
ic = c("aic"), stepwise = TRUE, trace = FALSE,
approximation = FALSE, xreg = NULL,
test = c("kpss", "adf", "pp"), seasonal.test = c("ocsb", "ch"),
allowdrift = FALSE, lambda = NULL, parallel = FALSE, num.cores = NULL)
print(summary(Hourly.auto.arima.fit))
#Seasonal ARIMA model
#Series: Hourly.ts
#ARIMA(1,1,0)(1,0,0)[24]
#Coefficients:
# ar1 sar1
#-0.1866 0.263
#s.e. 0.0276 0.027
#sigma^2 estimated as 48.15: log likelihood=-4910.86
#AIC=9827.73 AICc=9827.74 BIC=9843.59
#
#Training set error measures:
# ME RMSE MAE MPE MAPE MASE ACF1
#Training set -0.0103186 6.936712 4.992713 NaN Inf 0.6196782 -0.001654817
#ME RMSE MAE MPE MAPE MASE ACF1
#Training set -0.0103186 6.936712 4.992713 NaN Inf 0.6196782 -0.001654817
Hourly.forecast.OneDay = forecast(Hourly.auto.arima.fit, h = 24, level = c(90), fan = FALSE, xreg = NULL, bootstrap = FALSE)
# Point Forecast Lo 90 Hi 90
#15461.00 15.81756 4.4037872 27.23134
#15461.04 14.96449 0.2517874 29.67718
#15461.08 13.13552 -4.4767139 30.74776
#15461.12 14.44824 -5.6138569 34.51033
#15461.17 11.02979 -11.2199374 33.27952
#15461.21 11.81868 -12.4210298 36.05840
#15461.25 10.76674 -15.3117208 36.84520
#15461.29 10.50375 -17.2920581 38.29956
#15461.33 10.24076 -19.1722972 39.65382
#15461.38 11.02973 -19.9161753 41.97563
#15461.42 10.24076 -22.1655654 42.64708
#15461.46 10.76674 -23.0369730 44.57045
#15461.50 12.34467 -22.8009073 47.49026
#15461.54 12.34467 -24.0933951 48.78274
#15461.58 12.87065 -24.8156032 50.55691
#15461.62 12.34467 -26.5497327 51.23908
#15461.67 12.87065 -27.1954911 52.93680
#15461.71 11.29272 -29.9118566 52.49729
#15461.75 11.55571 -30.7566782 53.86809
#15461.79 11.81870 -31.5732262 55.21062
#15461.83 13.13364 -31.3116037 57.57889
#15461.88 14.18560 -31.2885787 59.65978
#15461.92 13.92261 -32.5577285 60.40295
#15461.96 13.92261 -33.5425652 61.38779
accuracy(Hourly.forecast.OneDay)
# ME RMSE MAE MPE MAPE MASE ACF1
#Training set -0.0103186 6.936712 4.992713 NaN Inf 0.6196782 -0.001654817
plot(Hourly.forecast.OneDay, ylab = "Total Number of Logins",
xlab = "Time", las = 1, lwd = 1.5)
Hourly.forecast.short = forecast(Hourly.auto.arima.fit, h = 24*7*2, level = c(90), fan = FALSE, xreg = NULL, bootstrap = FALSE)
Hourly.forecast.long = forecast(Hourly.auto.arima.fit, h = 24*7*15, level = c(90), fan = FALSE, xreg = NULL, bootstrap = FALSE)
|
d7564f6c9277c631d537b3788a85ce8ca279ee64 | 29585dff702209dd446c0ab52ceea046c58e384e | /EGRET/R/processQWData.r | 1fcc8e82c2c1ad1ff71ba758677a6a5203d7317a | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,805 | r | processQWData.r | #' Processing of USGS NWIS Water Quality Data
#'
#' Processes water quality portal data. This function looks at detection limit and detection
#' conditions to determine if a value is left censored or not. Censored values are given the qualifier
#' "<". The dataframe is also converted from a long to wide format.
#'
#' @param data dataframe from Water Quality Portal
#' @param pCode logical if TRUE, assume data came from a pCode search, if FALSE, characteristic name.
#' @keywords data import USGS web service
#' @return data dataframe with first column dateTime, and at least one qualifier and value columns
#' (subsequent qualifier/value columns could follow depending on the number of parameter codes)
#' @export
#' @seealso \code{\link[dataRetrieval]{readWQPqw}}
#' @examples
#' \dontrun{
#' library(dataRetrieval)
#'
#' rawSample <- readWQPqw('USGS-01594440','', '', '')
#' rawSampleSelect <- processQWData(rawSample)
#'
#' rawWQP <- readWQPqw('21FLEECO_WQX-IMPRGR80','Phosphorus', '', '')
#' Sample2 <- processQWData(rawWQP)
#' }
processQWData <- function(data,pCode=TRUE){
detectText <- data$ResultDetectionConditionText
detectText <- toupper(detectText)
qualifier <- rep("",length(detectText))
qualifier[grep("NON-DETECT",detectText)] <- "<"
qualifier[grep("NON DETECT",detectText)] <- "<"
qualifier[grep("NOT DETECTED",detectText)] <- "<"
qualifier[grep("DETECTED NOT QUANTIFIED",detectText)] <- "<"
qualifier[grep("BELOW QUANTIFICATION LIMIT",detectText)] <- "<"
qualifier[!is.na(data$DetectionQuantitationLimitMeasure.MeasureValue) &&
data$ResultMeasureValue < data$DetectionQuantitationLimitMeasure.MeasureValue] <- "<"
correctedData<-ifelse((nchar(qualifier)==0),data$ResultMeasureValue,data$DetectionQuantitationLimitMeasure.MeasureValue)
test <- data.frame(data$USGSPCode)
test$dateTime <- data$ActivityStartDate
originalLength <- nrow(test)
test$qualifier <- qualifier
test$value <- as.numeric(correctedData)
test <- test[!is.na(test$dateTime),]
newLength <- nrow(test)
if (originalLength != newLength){
numberRemoved <- originalLength - newLength
warningMessage <- paste(numberRemoved, " rows removed because no date was specified", sep="")
warning(warningMessage)
}
if (pCode){
colnames(test)<- c("USGSPCode","dateTime","qualifier","value")
newTimeVar <- "USGSPCode"
} else {
colnames(test)<- c("CharacteristicName","dateTime","qualifier","value")
newTimeVar <- "CharacteristicName"
}
data <- suppressWarnings(reshape(test, idvar="dateTime", timevar = newTimeVar, direction="wide"))
data$dateTime <- format(data$dateTime, "%Y-%m-%d")
data$dateTime <- as.Date(data$dateTime)
return(data)
}
|
ba40b8da44ffb5adda58db31586750df8fbde0f9 | 7f27db9c3a8e1eeda456dc64f11338466c6a2a98 | /man/detect_delim.Rd | 875c91e707f21b9b5130baada2b1031c45aae22b | [
"MIT"
] | permissive | ropensci/EML | 4c228654f2fbcc5846121255dbb3dc19ba1c61df | b7871cca2b996a33aa1f534e8446f0730e706d4d | refs/heads/master | 2023-05-24T01:50:33.364460 | 2022-06-06T22:10:05 | 2022-06-06T22:10:05 | 10,894,022 | 75 | 43 | NOASSERTION | 2022-06-06T22:10:07 | 2013-06-23T23:20:03 | R | UTF-8 | R | false | true | 606 | rd | detect_delim.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/set_physical.R
\name{detect_delim}
\alias{detect_delim}
\title{Automatically detect line delimiters in a text file}
\usage{
detect_delim(path, nchar = 1000)
}
\arguments{
\item{path}{(character) File to search for a delimiter}
\item{nchar}{(numeric) Maximum number of characters to read from disk when
searching}
}
\value{
(character) If found, the delimiter, it not, \\r\\n
}
\description{
This helper function was written expressly for \code{\link{set_physical}} to
be able to automate its \code{recordDelimiter} argument.
}
|
85f546aac00defb9e6114a55e74908b1c5ba0aa5 | 23f0a2b56a2921debc5c51f5c161817d48eb6a47 | /Scripts/Previos/DEMO_U3.R | 0f2fbd716a0f832f6e85ff6b0c56d8f9258de626 | [] | no_license | gefero/fund-prog-r | d0fe9a986a9da3d7560241255bc7609fe6aecf6f | adcd7887952b0144cf34b26d0d7cb2b5a844a42a | refs/heads/master | 2021-01-01T15:53:46.968739 | 2018-10-18T13:42:22 | 2018-10-18T13:42:22 | 97,727,779 | 4 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,362 | r | DEMO_U3.R | names(mtcars)
data <- mtcars
plot(cars$speed,cars$dist
,main="Velocidad por dist. recorrida"
,xlab="Velocidad",ylab="Distancia", type="p")
x<-rnorm(1000,0,1)
par(mfrow=c(1,1))
hist(x[(x>-2 & x<2)])
hist(x,breaks=100)
boxplot(mpg~cyl,data=mtcars, main="Car Milage Data"
, xlab="No. Cylinders", ylab="Miles Per Gallon"
, col=c("red","green","yellow")
, width=prop.table(table(mtcars$cyl)))
counts <- table(mtcars$gear)
barplot(counts, main="Car Distribution",
xlab="Number of Gears")
data$hp2<-NA
data$hp2[data$hp<95]<-"BAJO"
data$hp2[data$hp>=95 & data$hp<150]<-"MEDIO"
data$hp2[data$hp>=150]<-"ALTO"
data$hp2<-factor(data$hp2, levels=c("BAJO","MEDIO","ALTO"), ordered=TRUE)
table(data$hp2,data$hp)
count <- table(data$hp2)
barplot(count)
counts <- prop.table(table(mtcars$vs, mtcars$gear),margin=2)
barplot(counts, main="Car Distribution by Gears and VS",
xlab="Number of Gears", col=c("darkblue","red"),
beside=FALSE)
plot(mtcars$wt, mtcars$mpg, main="Scatterplot Example",
xlab="Car Weight ", ylab="Miles Per Gallon ")
points(mean(mtcars$wt),mean(mtcars$mpg), col="red")
abline(h=mean(mtcars$mpg))
abline(v=mean(mtcars$wt))
abline(a=37.285126, b=-5.344472)
abline(lm(mtcars$mpg~mtcars$wt), col="red")
lm(mtcars$mpg~mtcars$wt)
data$filtro1 <- data$cyl==8
lm(mpg~wt, data=data, subset=data$index)
data[data$cyl=="8",]
#lines(lowess(mtcars$wt,mtcars$mpg), col="blue")
library(MASS)
data(Boston)
plot(Boston$lstat, Boston$medv)
plot(log(Boston$lstat), Boston$medv)
model <- lm(medv ~ log(lstat), data = Boston)
names(model)
Boston$resid<-model$residuals
summary(model)
summary(model$residuals)
confint(model, level = 0.95)
x <-predict(model, data.frame(lstat = c(5, 10, 15), interval = "terms"))
sampleador<-function(p,df){
index <-sample((1:nrow(df)),round(p*nrow(df),0))
return(index)
}
ind<-sampleador(0.3, Boston)
train<-Boston[-ind,]
test<-Boston[ind,]
model <- lm(medv ~ log(lstat), data = Boston)
Boston[,-2]
pairs(Boston[,10:14])
model <- lm(medv ~ ., data = train)
model <- lm(medv ~ . - lstat + log(lstat), data = Boston)
summary(model)
model <- lm(medv ~ . - lstat + log(lstat) - tax - rad, data = train)
preds<-predict(model, test[,-14])
mean((preds-test$medv)^2)
preds_train<-predict(model, train[,-14])
mean((preds_train-train$medv)^2)
|
e71f1848a89a762b7d87df60ad4baf6621acd3b3 | ef98a8ba6b5c52220fcd9bf2e8f88f2d3e524e42 | /climatology.R | 5e4aead34a98b23df866763491e917b6299697b8 | [] | no_license | AmierohAbrahams/Honours | ff6d14119816ca70198e23e7884e7c46f3bf18ec | fe1be5432a8819ad12eb4880c1f7741e5b54a737 | refs/heads/master | 2020-03-07T04:24:56.108512 | 2018-04-18T18:56:05 | 2018-04-18T18:56:05 | 120,503,714 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,439 | r | climatology.R | # Loading the packages ----------------------------------------------------
library(tidyverse)
library(ggpubr)
library(dplyr)
library(lubridate)
library(gridExtra)
# Loading SACTN data ------------------------------------------------------
load("~/Honours/R assignment/Honours.P/SACTNdaily_v4.1.Rdata")
temp <- as_tibble(SACTNdaily_v4.1)
temp.mean <- temp %>%
group_by(site, src) %>%
mutate(zeroed.temp = temp - mean(temp, na.rm = TRUE)) %>%
ungroup()
temp.mean
temp.mean$year <- format(temp.mean$date,"%Y")
temp.mean$month <- format(temp.mean$date,"%b")
temp.mean$month <- factor(temp.mean$month, levels = c("Jan","Feb","Mar","Apr",
"May","Jun","Jul","Aug",
"Sep","Oct","Nov","Dec"))
after2001 <- filter(temp.mean, date >= as.Date("2001-12-31"))
site_list <- after2001 %>%
group_by(site) %>%
summarise(count = length(unique(src))) %>%
filter(count > 1) %>%
ungroup()
# extract only locations with multiple time series
site_dates <- after2001 %>%
filter(site %in% site_list$site) %>%
group_by(site, src) %>%
mutate(start.date = min(date),
end.date = max(date)) %>%
group_by(site) %>%
mutate(min.start.date = max(start.date),
max.end.date = min(end.date)) %>%
ungroup()
min_max_time <- site_dates %>%
group_by(site, src) %>%
filter(date >= min.start.date,
date <= max.end.date) %>%
ungroup()
site_list_final <- min_max_time %>%
group_by(site) %>%
summarise(count = length(unique(src))) %>%
filter(count > 1) %>%
ungroup()
final_time <- min_max_time %>%
filter(site %in% site_list_final$site)
year <- final_time %>%
mutate(month = month(date, label = TRUE, abbr = TRUE),
year = year(date)) %>%
filter(site %in% c("Ballito", "Hout Bay", "Mossel Bay", "Sodwana", "Knysna")) %>%
filter(year == 2005 & month != "Dec")
year_bonus <- final_time %>%
mutate(month = month(date, label = TRUE, abbr = TRUE),
year = year(date)) %>%
filter(site %in% c("Ballito", "Hout Bay", "Mossel Bay", "Sodwana", "Knysna")) %>%
filter(year == 2004 & month == "Dec")
year_clean <- rbind(year, year_bonus)
# Climatology -------------------------------------------------------------
#climatologies- always use raw data
# Getting the climatology (using the daily data)- monthly
temp_monthly <- year_clean %>%
mutate(date = lubridate::month(date, label = TRUE)) %>%
group_by(site, date) %>%
summarise(mean_temp = mean(temp, na.rm = TRUE),
min_temp = min(temp, na.rm = TRUE),
max_temp = max(temp, na.rm = TRUE),
range_temp = range(temp, na.rm = TRUE)[2]-range(temp, na.rm = TRUE)[1],
sd_temp = sd(temp, na.rm = TRUE)) %>%
filter(date %in% c("Jan","Feb","Mar","Apr",
"May","Jun","Jul","Aug",
"Sep","Oct","Nov","Dec")) %>%
ungroup()
# Annual climatology
temp_annually <- year_clean %>%
# mutate(date = lubridate::year(date)) %>%
mutate(date = "Annual") %>%
group_by(site, date) %>%
summarise(mean_temp = mean(temp, na.rm = TRUE),
min_temp = min(temp, na.rm = TRUE),
max_temp = max(temp, na.rm = TRUE),
range_temp = range(temp, na.rm = TRUE)[2]-range(temp, na.rm = TRUE)[1],
sd_temp = sd(temp, na.rm = TRUE)) %>%
ungroup() %>%
group_by(site) %>%
select(site, date, everything())
|
94d0fdc086541c417a3dd62f7ad551533471914f | 5decae4c58904e9a7f3cdb0cb5832a0e9e558281 | /tutorials/NIMBioS2014/presentations/rcpp_script.r | 0ab484832fa472ad8d704622eb659abf41988bea | [
"MIT"
] | permissive | abhik1368/RBigData.github.io | 73af2e201393341401370d502f509a572e1f3fd1 | f99ff558a0da25df19532024667bd5e24a7c1b7b | refs/heads/master | 2020-05-29T11:54:57.554854 | 2016-03-08T15:40:02 | 2016-03-08T15:40:02 | 56,902,370 | 0 | 1 | null | 2016-04-23T05:35:00 | 2016-04-23T05:35:00 | null | UTF-8 | R | false | false | 5,073 | r | rcpp_script.r | #----------------------------------------------------------------
# test function
#----------------------------------------------------------------
library(inline)
body <- "std::cout << \"It works\" << std::endl;"
test <- cxxfunction(signature(), body=body, plugin="Rcpp")
test()
#----------------------------------------------------------------
# Estimating pi
#----------------------------------------------------------------
mcsim_pi_r <- function(n){
r <- 0L
for (i in 1:n){
u <- runif(1)
v <- runif(1)
if (u^2 + v^2 <= 1)
r <- r + 1
}
return( 4*r/n )
}
mcsim_pi_r_vectorized <- function(n){
x <- matrix(runif(n * 2), ncol=2)
r <- sum(rowSums(x^2) <= 1)
return( 4*r/n )
}
library(inline)
cxx_pi <- cxxfunction(signature(n_="int"), body='
int i, r = 0;
int n = Rcpp::as<int >(n_);
double u, v;
for (i=0; i<n; i++){
u = R::runif(0, 1);
v = R::runif(0, 1);
if (u*u + v*v <= 1)
r++;
}
return Rcpp::wrap( (double) 4.*r/n );
',plugin="Rcpp"
)
mcsim_pi_r_rcpp <- function(n){
cxx_pi(as.integer(n))
}
library(rbenchmark)
n <- 50000
benchmark(R.loops = mcsim_pi_r(n),
R.vectorized = mcsim_pi_r_vectorized(n),
Rcpp = mcsim_pi_r_rcpp(n),
columns=c("test", "replications", "elapsed", "relative"))
#----------------------------------------------------------------
# Cosine similarity
#----------------------------------------------------------------
### cosine function from lsa package
cosine <- function (x, y = NULL){
if (is.matrix(x) && is.null(y)) {
co = array(0, c(ncol(x), ncol(x)))
f = colnames(x)
dimnames(co) = list(f, f)
for (i in 2:ncol(x)) {
for (j in 1:(i - 1)) {
co[i, j] = cosine(x[, i], x[, j])
}
}
co = co + t(co)
diag(co) = 1
return(as.matrix(co))
}
else if (is.vector(x) && is.vector(y)) {
return(crossprod(x, y)/sqrt(crossprod(x) * crossprod(y)))
}
else {
stop("argument mismatch. Either one matrix or two vectors needed as input.")
}
}
### Improved R solution
cosine2 <- function(x){
cp <- crossprod(x)
dg <- diag(cp)
co <- matrix(0.0, length(dg), length(dg))
for (j in 2L:length(dg)){
for (i in 1L:(j-1L)){
co[i, j] <- cp[i, j] / sqrt(dg[i] * dg[j])
}
}
co <- co + t(co)
diag(co) <- 1.0
return( co )
}
### Rcpp
library(inline)
fill_loop <- cxxfunction(
signature(cp_="matrix", dg_="numeric"),
body='
// Shallow copies
Rcpp::NumericMatrix cp(cp_);
Rcpp::NumericVector dg(dg_);
// Allocate return
Rcpp::NumericMatrix co(cp.nrow(), cp.ncol());
int i, j;
for (j=0; j<co.ncol(); j++){
for (i=0; i<co.nrow(); i++){
if (i == j)
co(i, j) = 1.0;
else
co(i, j) = cp(i, j) / std::sqrt(dg[i] * dg[j]);
}
}
return co;
',plugin="Rcpp"
)
cosine_Rcpp <- function(x){
cp <- crossprod(x)
dg <- diag(cp)
co <- fill_loop(cp, dg)
return( co )
}
### Rcpp improved
fill_loop2 <- cxxfunction(
signature(cp_="matrix", dg_="numeric"),
body='
// Shallow copies
Rcpp::NumericMatrix cp(cp_);
Rcpp::NumericVector dg(dg_);
const unsigned int n = cp.nrow();
// Allocate return
Rcpp::NumericMatrix co(n, n);
int i, j;
// Fill diagonal
for (j=0; j<n; j++)
co(j, j) = 1.0;
// Fill lower triangle
for (j=0; j<n; j++){
for (i=0; i<j; i++)
co(i, j) = cp(i, j) / std::sqrt(dg[i] * dg[j]);
}
// Copy lower triangle to upper
for (j=0; j<n; j++){
for (i=j+1; i<n; i++)
co(i, j) = co(j, i);
}
return co;
',plugin="Rcpp"
)
cosine_Rcpp2 <- function(x){
cp <- crossprod(x)
dg <- diag(cp)
co <- fill_loop2(cp, dg)
return( co )
}
#----------------------------------------------------------------
# RcppArmadillo
#----------------------------------------------------------------
f <- function(x) list(outer=x %*% t(x), inner=t(x) %*% x)
body <- '
arma::mat v = Rcpp::as<arma::mat>(vs);
arma::mat op = v * v.t();
arma::mat ip = v.t()*v;
return Rcpp::List::create(
Rcpp::Named("outer")=op, Rcpp::Named("inner") = ip);
'
library(inline)
g <- cxxfunction(signature(vs="matrix"), plugin="RcppArmadillo", body=body)
x <- matrix(1:30, 10)
all.equal(f(x), g(x))
#----------------------------------------------------------------
# RcppGSL
#----------------------------------------------------------------
includes <- '
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_blas.h>
'
body <- '
RcppGSL::matrix<double> M = sM;
int k = M.ncol();
Rcpp::NumericVector n(k);
for (int j = 0; j < k; j++) {
RcppGSL::vector_view<double> colview = gsl_matrix_column(M, j);
n[j] = gsl_blas_dnrm2(colview);
}
M.free() ;
return n;
'
library(inline)
g <- cxxfunction(signature(sM="matrix"), plugin="RcppGSL", body=body, inc=includes)
|
b7921a021f11f8458b5998e7c2168d9f87137963 | 44246f5d70b8499544cfa1b20fed0ed011dba66e | /examples/ex_erspc/input/user_options.R | d457bdd594dd790ca4127809733e1dbee7ff5872 | [] | no_license | netterie/screentreat | 9e4c25a7d3169ebc401ae74008fea5d475cadfb8 | 348cd82391fa8e5b62aca38bba8366dbd64ae891 | refs/heads/master | 2021-01-01T17:05:34.283584 | 2015-12-12T20:34:50 | 2015-12-12T20:34:50 | 39,050,162 | 1 | 3 | null | 2015-12-12T20:14:42 | 2015-07-14T02:56:31 | R | UTF-8 | R | false | false | 2,006 | r | user_options.R | #############################################################################
## File Name: user_options.r
## File Purpose:
## Author: Leslie Mallinger
## Date: 3/20/2013
## Edited on:
## Additional Comments:
#############################################################################
############################################################
# establish model type
############################################################
mtype = 'screening'
############################################################
# establish user options
############################################################
# simulation features
nsim = 50
times = '5,10,50'
pop_size = 100
study_year = 2000
# population characteristics at incidence in absence of screening
input_method = 'covariate_proportions' # also 'individual_data'
if (input_method=='individual_data') {
userdat_file = 'input/input_data.csv'
create_pop_method = 'weighted_bootstrap' # also 'simple_bootstrap'
weighted_bootstrap_table = 'tx,prop\nCM,0.3\nRP,0.3\nRT,0.4'
} else if (input_method=='covariate_proportions') {
continuous_vars = 'varname,mean,sd,min,max\nage,65,7,55,74'
categorical_chars1 = 'stage,grade,prop\nL,low,0.2\nL,high,0.1\nR,low,0.15\nR,high,0.15\nD,low,0.05\nD,high,0.35'
categorical_chars2 = 'tx,prop\nRP,.2\nRT,.4\nCM,.4'
categorical_chars3 = 'male,prop\n0,0\n1,1'
categorical_chars4 = ''
categorical_chars5 = ''
} else stop('Input method must be either individual_data or covariate_proportions')
# population stage distribution in presence of screening
scr_stg_dist = 'order,stage,prop\n1,L,0.5\n2,R,0.3\n3,D,0.2'
# time to cancer death estimation features
mort_param = 'ksurv' # options: 'rate', 'median', 'mean', 'ksurv'
mort_k = 5
mort_value = 0.68
mort_covar1 = 'stage,stat\nL,0.86\nR,0.68\nD,0.32'
mort_covar2 = ''
mort_covar3 = 'tx,HR\nRP,0.65\nRT,0.9\nCM,1'
# time to other-cause death estimation features
ocd_HR = 1
|
8fe096cf7cdd88064bd41ca2ac4823e0661259b9 | 18d59963400b3e6b116c5ba12fb111cca0e6ff0c | /r-files/proc-obs-flow-for-pest-base-cal.R | b34e4d4400aebd3fa9910c5eeda8c25257adc713 | [
"Apache-2.0"
] | permissive | kbrannan/Big-Elk-Cadmus-HydCal-Updated-WDM | 10000608e9455e0c9b53a505a8dfff7788b8935e | 7fc4fe34667fda0d0e5bbabcd7126423f726bf54 | refs/heads/master | 2020-12-18T12:34:52.880254 | 2016-08-03T22:40:15 | 2016-08-03T22:40:15 | 55,079,166 | 0 | 0 | null | 2016-05-09T22:35:50 | 2016-03-30T16:30:58 | TeX | UTF-8 | R | false | false | 16,810 | r | proc-obs-flow-for-pest-base-cal.R | ## load packages
library(DVstats, quietly = TRUE) # USGS-HySep R version in DVstats
library(doBy, quietly = TRUE) # need doBy package to sums for annual, summer and winter
## primary path
chr.dir.prime <- "M:/Models/Bacteria/HSPF/Big-Elk-Cadmus-HydCal-Updated-WDM"
## storm dates path
chr.dir.stm.dates <- "M:/Models/Bacteria/HSPF/HydroCal201506/R_projs/Select_Storm_HydCal"
## pest control template file
chr.file.pest.tpl <- "control-tpl.pst"
## new pest control file name
chr.file.pest.new <- "control-cal-base.pst"
# get obs flow from Yaquina River gage
source(file = "//deqhq1/tmdl/TMDL_WR/MidCoast/Models/Bacteria/HSPF/HydroCal201506/R_projs/Select_Storm_HydCal/devel/get-obs-flow-data.R")
# estimate flow for Big Elk Creek from Yaquina River Gage
source(file = "//deqhq1/tmdl/TMDL_WR/MidCoast/Models/Bacteria/HSPF/HydroCal201506/R_projs/Select_Storm_HydCal/devel/estimate-flow.R")
names(df.flow.est) <- c("date", "flow")
## clean up
rm(df.flow.obs)
## pest-hspf path
chr.dir.pest.hspf <- paste0(chr.dir.prime, "/pest-hspf-files")
## cal-base path
chr.dir.cal.base <- paste0(chr.dir.pest.hspf, "/cal-base")
## get simulation period
## using the uci file for the earlier calibration that used the updated
## simulation period
chr.uci <- scan(paste0(chr.dir.pest.hspf, "/bigelk.uci"), sep = "\n",
what = "character", quiet = TRUE)
dt.sim.period <- as.POSIXct(
sapply(
strsplit(
gsub(" {1,}", ",",
gsub("(^ {1,})|( {1,}$)", "",
gsub("[^0-9/ ]|(00\\:00)|(24\\:00)", "",
chr.uci[grep("START", chr.uci)]))),
split = ","), cbind))
## clean up
rm(chr.uci)
## sub-set the flow data to the simulation period
df.flow.est <- df.flow.est[df.flow.est$date >= min(dt.sim.period) &
df.flow.est$date <= max(dt.sim.period), ]
## bacteria data path
chr.dir.bac.obs <- paste0(chr.dir.prime, "/ObsData")
## obs bacteria data file
chr.file.obs.bac <- "obs.RData"
## load obs bacteria data
load(file = paste0(chr.dir.bac.obs, "/", chr.file.obs.bac))
## get all dates
chr.dates.bac.all <- strftime(obs.data$date, format = "%Y-%m-%d")
## remove duplicated dates
chr.dates.bac.unique <- unique(chr.dates.bac.all)
chr.dates.bac.unique <- chr.dates.bac.unique[order(chr.dates.bac.unique)]
## find rows in flow data.frame for dates of bacteria samples
lng.bac.flow.rows <- grep(pattern = paste0(chr.dates.bac.unique, collapse = "|"),
strftime(df.flow.est$date, format = "%Y-%m-%d"))
## clean up
rm(chr.dir.bac.obs, chr.file.obs.bac, chr.dates.bac.all, obs.data)
## flow data with the values on days when bacteria samples collected removed
df.flow.est.rm <- df.flow.est[-1 * lng.bac.flow.rows, ]
## calculate observations for pest obs groups
##
## mlog - log10 of daily flow + 1E-04
## Note: 1E-04 added to protect against log10(0)
mlog <- log10(df.flow.est.rm$flow + 1E-04)
##
## mbaseind <- baseflow / total flow
##
## calculate model groups
## mbaseind, base flow index
## baseflow seperation using USGS-HySep R version in DVstats
## need continuous time-series to use hysep, so need to break flow
## data into multiple continuous time-series
## drainage area in sqr mi for Big Elk Creek at outlet, used in HySep
da.be <- 88.8
## setup data.frame to check how many days in the flow series to the previous
## bacteria sample date (bck) and to the next (fwd) for each bacteria sample date
## and a variable to indicate that sample date has more than 1 day of flow series
## bewteen previous (bck) or next (fwd), chk == 1 is yes and chk == 0 in no
df.chk <- data.frame(fwd = rep(-1000, length(lng.bac.flow.rows)),
bck = -1000, chk = 0)
## first sample date only has forward
df.chk$fwd[1] <- lng.bac.flow.rows[1] - 1
## loop to do the remeaining sample dates except the last
for(ii in 2:(length(lng.bac.flow.rows) - 1)) {
df.chk$fwd[ii] <- lng.bac.flow.rows[ii + 1] - lng.bac.flow.rows[ii]
df.chk$bck[ii] <- lng.bac.flow.rows[ii] - lng.bac.flow.rows[ii - 1]
}
## clean up
rm(ii)
## last sample date only has a backward, use the entire flow data set for this
## because the rows for the bacteria samples correspond to this data.frame
df.chk$bck[length(lng.bac.flow.rows)] <- length(df.flow.est$date) -
lng.bac.flow.rows[length(lng.bac.flow.rows)]
## identify sample dates that have more than one day of flow series
df.chk$chk[df.chk$fwd > 1 & df.chk$bck > 1] <- 1
## first only has fwd
if(df.chk$fwd[1] > 1) df.chk$chk[1] <- 1
## last only has bwd
if(df.chk$bck[length(lng.bac.flow.rows)] > 1) df.chk$chk[length(lng.bac.flow.rows)] <- 1
## setup data.frame for the bounds of the flow series segments used to estimate
## baseflow index
df.bnds <- cbind(start = -1,
end = -1,
bac.rows = lng.bac.flow.rows,
df.chk,
len = -1,
bfi = -1)
## first start = 1
if(df.bnds$chk[1] == 1) {
df.bnds$start[1] <- 1
df.bnds$end[1] <- lng.bac.flow.rows[1] - 1
}
## do remaining sample dates that have chk == 1
for(ii in 2:(length(df.bnds$bac.rows) - 1)) {
if(df.bnds$chk[ii] == 1) {
df.bnds$start[ii] <- df.bnds$bac.rows[ii - 1] + 1
df.bnds$end[ii] <- df.bnds$bac.rows[ii] - 1
}
}
## last end is the length of the flow series
if(df.bnds$chk[length(df.bnds$chk)] == 1) {
df.bnds$start[length(lng.bac.flow.rows)] <- lng.bac.flow.rows[length(lng.bac.flow.rows)] + 1
df.bnds$end[length(lng.bac.flow.rows)] <- length(df.flow.est$date)
}
## get the length of the segements
df.bnds$len <- df.bnds$end - df.bnds$start
## calculate baseflow idex for each segment
tmp.wn <- options("warn")[[1]]
options(warn = -1)
for(ii in 1:length(df.bnds$bfi)) {
if(df.bnds$chk[ii] == 1) {
tmp.seq <- seq.int(from = df.bnds$start[ii], to = df.bnds$end[ii])
## use try function becuase can get error from hysep if there is only
## one minima (i think this means a constant slope)
tmp.hysep88.8 <- try(hysep(Flow = df.flow.est$flow[tmp.seq],
Dates = as.Date(df.flow.est$date[tmp.seq]), da = da.be,
select = "sliding"), silent = TRUE)
if(class(tmp.hysep88.8)[1] == "baseflow") {
## only calculate baseflow index for error free hysep
## can use the sums of the baseflow and flow even though the units are cfs
## the conversion coefficients cancel out in the ratio
df.bnds$bfi[ii] <- sum(tmp.hysep88.8$BaseQ) / sum(tmp.hysep88.8$Flow)
}
## clean up
rm(tmp.seq, tmp.hysep88.8)
}
}
options(warn = tmp.wn)
rm(tmp.wn)
## get rows where baseflow calculated
lng.bfi <- grep("[^-1]", df.bnds$bfi)
## calculate baseflow in the same way as calculated from the observed data
## as a wieght average. The weight is the length of the segment
mbaseind <- sum(df.bnds$len[lng.bfi] * df.bnds$bfi[lng.bfi]) / sum(df.bnds$len[lng.bfi])
## clean up
rm(ii, df.chk,lng.bfi, df.bnds, da.be)
##
## mdiff - backward difference in daily flow
## calculate differences, this is a bacwarddifference, so I add a NA at
## the beginning of the
mdiff <- c(NA,diff(df.flow.est$flow, lag = 1, differences = 1))
## removing the flow diffs related to days bacteria samples taken
## Two-diffs removed for each sample day, the one on the day of the sample and
## the one after the day of the sample. The diff function calculates the
## backward difference
lng.bac.flow.dif.rows <- c(lng.bac.flow.rows, lng.bac.flow.rows + 1)
## re-ordering the rows
lng.bac.flow.dif.rows <- lng.bac.flow.dif.rows[order(lng.bac.flow.dif.rows)]
## removing duplicates that occured when samples collected on sucessive days
lng.bac.flow.dif.rows <- unique(
lng.bac.flow.dif.rows[order(lng.bac.flow.dif.rows)])
## get diffs for days that samples not collected
## this data.frame is the source for the differnces in the flows ti use in the
## PEST control file and to get from the model output
mdiff <- mdiff[-1 * lng.bac.flow.dif.rows]
## get rid of first row becuase can't calc diff
mdiff <- mdiff[-1]
## clean up
rm(lng.bac.flow.dif.rows)
## calculate the flow annual, winter and summer volumes
## convert stream flow from cu ft / sec to ac-ft / day for use in volumes
## (1 cu ft / sec) * (86400 sec / day) * (1 ac-ft / 43559.9 cu ft)
df.vol <- cbind(df.flow.est.rm,
flow.ac.ft = 86400 * (1 / 43559.9) * df.flow.est.rm$flow)
## mvol_ann - annual volumes in ac-ft
## create factor for year
df.vol <- cbind(df.vol,
fac.ann = as.factor(
strftime(df.vol$date, format = "%Y")))
## create factor for month used in mvol_smr and mvol_wtr calculations
df.vol <- cbind(df.vol,
fac.mon = as.factor(
strftime(df.vol$date, format = "%b")))
## summer season, summer is Jun, Jul and Aug
lng.smr <- grep("Jun|Jul|Aug", df.vol$fac.mon)
## winter season
lng.wtr <- grep("Dec|Jan|Feb", df.vol$fac.mon)
## add season column
df.vol <- data.frame(df.vol, fac.season = "none", stringsAsFactors = FALSE)
## assign summer and winter values to season. leave spring and fall as none
df.vol$fac.season[lng.smr] <- "summer"
df.vol$fac.season[lng.wtr] <- "winter"
## convert season from character to factor
df.vol$fac.season <- as.factor(df.vol$fac.season)
## clean up
rm(lng.smr, lng.wtr)
## annual flow volume
mvol_ann <- as.numeric(
summaryBy(flow.ac.ft ~ fac.ann, data = df.vol, FUN = sum)[ ,2])
## season fow volume
df.tmp <- summaryBy(flow.ac.ft ~ fac.ann + fac.season , data = df.vol, FUN = sum)
## mvol_smr - summer flow volumes
mvol_smr <- as.numeric(df.tmp[as.character(df.tmp$fac.season) == "summer",
"flow.ac.ft.sum"])
## mvol_wtr - winter flow volumes
mvol_wtr <- as.numeric(df.tmp[as.character(df.tmp$fac.season) == "winter",
"flow.ac.ft.sum"])
## clean up
rm(df.vol,df.tmp)
## mtime - % exceedance for flow, using 0.01%, 1%, 5%, 25%, 50%, 75%, 95%, 99%
## this is different than what Cadmus using in tsproc which is the fraction
## of time the flow is above some value. I am not going to use tsproc when
## doinmg the calculations. I will use R script
## percents used
tmp.per <- c(0.0001, 0.01, 0.05, 0.25, 0.50, 0.75, 0.95, 0.99)
## calculate mtime from quantiles
mtime <- as.numeric(quantile(x = df.flow.est.rm$flow, probs = tmp.per))
## clean up
rm(tmp.per)
## storm information
## get storm dates from text file Information in this file from
## Select_Storm_HydCal repo
## column 2 is the begin date of storm and column 8 is the end date of storm
df.strm.dates.raw <- read.delim(file = paste0(chr.dir.stm.dates, "/dates_stm.dat"),
header = FALSE, sep = " ",
stringsAsFactors = FALSE)[ , c(2, 8)]
## convert to POSIXct dates
df.strm.dates <- data.frame(apply(df.strm.dates.raw, MARGIN = 2, strptime,
format = "%m/%d/%Y"))
names(df.strm.dates) <- c("begin", "end")
## get dates on bacteria samples
df.bac.dates <- data.frame(date = df.flow.est$date[lng.bac.flow.rows])
## check if bacteria samples are within storms
tmp.strm.dates <- cbind(df.strm.dates[, 1:2], keep = TRUE, bac.date = as.POSIXct("1967-07-02 00:00"))
## brute force not elegant
for(ii in 1:length(tmp.strm.dates$keep)) {
for(jj in 1:length(df.bac.dates$date)) {
if(as.numeric(df.bac.dates$date[jj]) >= as.numeric(df.strm.dates$begin[ii]) &
as.numeric(df.bac.dates$date[jj]) <= as.numeric(df.strm.dates$end[ii])) {
tmp.strm.dates$keep[ii] <- FALSE
tmp.strm.dates$bac.date[ii] <- as.Date(df.bac.dates$date[jj])
break
}
else tmp.strm.dates$bac.date[ii] <- NA
}
}
df.strm.dates.reduced <- data.frame(begin = tmp.strm.dates$begin[grep("TRUE",tmp.strm.dates$keep)],
end = tmp.strm.dates$end[grep("TRUE",tmp.strm.dates$keep)])
## clean up
rm(df.strm.dates.raw, ii, jj, tmp.strm.dates)
## storm durations in days
df.strm.dur <- floor(as.numeric(df.strm.dates.reduced$end - df.strm.dates.reduced$begin))
## mpeak
mpeak <- rep(-1, length(df.strm.dates.reduced$begin))
for(ii in 1:length(mpeak)) {
mpeak[ii] <- max(
df.flow.est.rm$flow[df.flow.est.rm$date >= df.strm.dates.reduced$begin[ii] &
df.flow.est.rm$date <= df.strm.dates.reduced$end[ii]])
}
rm(ii)
## mvol_stm in cu-ft for storm convert cu-ft/sec to cu-ft/day
## using 1 day = 86400 s
mvol_stm <- rep(-1, length(df.strm.dates.reduced$begin))
for(ii in 1:length(mvol_stm)) {
mvol_stm[ii] <- sum(
df.flow.est.rm$flow[df.flow.est.rm$date >= df.strm.dates.reduced$begin[ii] &
df.flow.est.rm$date <= df.strm.dates.reduced$end[ii]]) *
(df.strm.dur[ii] * 86400)
}
## clean up
rm(ii, df.strm.dur, df.strm.dates, df.strm.dates.reduced,
df.bac.dates)
##
## write pest control file
## get obs names (obs groups), assumes all/only obs variable names in workspace start with "m"
chr.obs.grp <- ls(pattern = "^m.*")
## number of observation groups
lng.num.obs.grp <- length(chr.obs.grp)
## get numbner of observations,
lng.num.obs <- sum(sapply(chr.obs.grp,
function(chr.name = NULL)
length(eval(as.name(chr.name)))))
## get the number of digits to use in format of obs names
lng.num.obs.dgt <- max(
nchar(
max(
sapply(chr.obs.grp, function(chr.name = NULL)
length(eval(as.name(chr.name)))))), 1)
## get max number of charcters for obs name
lng.max.nchar <- max(nchar(
sprintf(
paste0(chr.obs.grp, paste0("_%0", lng.num.obs.dgt, "i")), 0)))
## create string of obs for pest control file, make weight 1/value for obs
chr.obs.blk <- ""
chr.col.spc <- " "
## write obs data block
for(ii in 1:length(chr.obs.grp)) {
tmp.grp <- chr.obs.grp[ii]
tmp.data <- eval(as.name(tmp.grp))
tmp.blk <- ""
##tmp.wt <- 1 / length(tmp.data) ## weight the group by the number of observations in the group
##tmp.wt <- lng.num.obs / length(tmp.data)
tmp.wt <- 1 ## back to weight 1 / obs value
for(jj in 1:length(tmp.data)) {
tmp.nme <- sprintf(paste0("%-",lng.max.nchar,"s"),sprintf(paste0(tmp.grp, paste0("_%0", lng.num.obs.dgt, "i")), jj))
tmp.val <- sprintf("%8.4E", tmp.data[jj])
if(tmp.data[jj] != 0) {
tmp.wtg <- sprintf("%8.4E", tmp.wt * abs(1/tmp.data[jj])) ## initial weight set to inverse of value
}
else {
tmp.wtg <- sprintf("%8.4E", tmp.wt * 1) ## weight set to 1 for values of 0
}
tmp.blk <- c(tmp.blk,
paste0(tmp.nme, chr.col.spc, tmp.val, chr.col.spc, tmp.wtg,
chr.col.spc, tmp.grp))
rm(tmp.nme, tmp.val, tmp.wtg)
}
tmp.blk <- tmp.blk[-1]
chr.obs.blk <- c(chr.obs.blk, tmp.blk)
rm(tmp.grp, tmp.data, tmp.wt, tmp.blk)
}
## get rid of first row becuase it is empty
chr.obs.blk <- chr.obs.blk[-1]
# get pest control file
chr.control <- scan(paste0(chr.dir.pest.hspf, "/", chr.file.pest.tpl), sep = "\n",
what = "character", quiet = TRUE)
tmp.blk.hd <- grep("\\*", chr.control)
chr.obs.grp.names <-
chr.control[(tmp.blk.hd[grep("[Oo]bs.*[Gg]roups",
chr.control[tmp.blk.hd])] + 1):
(tmp.blk.hd[grep("[Oo]bs.*[Gg]roups",
chr.control[tmp.blk.hd]) + 1] - 1)]
## copy control file for updating
chr.control.new <- chr.control
## replace the number of obs and number of obs groups
tmp.ln.4 <- strsplit(gsub("( ){1,}", ",",gsub("^( ){1,}","", chr.control.new[4])),
split = ",")[[1]]
tmp.ln.4[2] <- as.character(lng.num.obs)
tmp.ln.4[5] <- as.character(lng.num.obs.grp)
chr.control.new[4] <- paste0(
sprintf(paste0("%", max(nchar(tmp.ln.4)) + 4, "s"), tmp.ln.4), collapse = "")
## insert new block of observations into the control
lng.obs.st <- grep("\\* observation data" ,chr.control.new)
lng.obs.ed <- lng.obs.st + min(grep("\\* " ,
chr.control.new[(lng.obs.st + 1):length(chr.control.new)]))
chr.control.new <- c(chr.control.new[1:lng.obs.st],
chr.obs.blk,
chr.control.new[lng.obs.ed:length(chr.control.new)])
## update obs group block
## insert new block of observations into the control
lng.obs.grp.st <- grep("\\* observation groups" ,chr.control.new)
lng.obs.grp.ed <- lng.obs.grp.st + min(grep("\\* " ,
chr.control.new[(lng.obs.grp.st + 1):length(chr.control.new)]))
chr.control.new <- c(chr.control.new[1:lng.obs.grp.st],
chr.obs.grp,
chr.control.new[lng.obs.grp.ed:length(chr.control.new)])
## write updated control file
write.table(chr.control.new, file = paste0(chr.dir.cal.base,"/", chr.file.pest.new),
row.names = FALSE, col.names = FALSE, quote = FALSE)
|
8b9736a1888f5d09626aadf03b7538859b8ec847 | 0c98d1475a0d7f5e809db021c6e0958999ce0354 | /4-cloudml/cloudml_2_train_flags.R | f102c95447854c7af23ed154c715f7f8b539b7a5 | [] | no_license | andrie/webinar_tensorflow_cloud | 7cde13074a8b0495dfc9703c6160bac19d5f9b1d | 765960fe83fa997ca8023ea7c91e49fdce3994e8 | refs/heads/master | 2020-03-17T09:07:58.737303 | 2018-05-15T05:16:09 | 2018-05-15T05:16:09 | 133,462,299 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 387 | r | cloudml_2_train_flags.R | # devtools::install_github("rstudio/cloudml")
# cloudml::gcloud_install()
# cloudml::gcloud_init()
# Then navigate to the CloudML console:
# https://console.cloud.google.com/mlengine/
library(cloudml)
library(here)
setwd(here("4-cloudml"))
cloudml_train("mnist_cnn_cloudml.R", collect = TRUE)
job_status()
job_collect()
view_runs()
ls_runs()
ls_runs(eval_acc > 0.97)
setwd(here())
|
e53ac320d45104e413bf5d20d6088f9a322081e0 | 5b5a18142a86e49a7deb2c349b484dadc335920a | /man/dot-contr.Rd | 151c8742fa2f2870845d5ff4b0a48057ee3f30f3 | [] | no_license | stm/imagefluency | 9067b79f3ad3d6c3e5c683761f89ef2e202cf0ee | d9e6d1e9bea92a20bd464ca3d1b71942cb7cc79e | refs/heads/master | 2023-04-18T05:55:46.240210 | 2022-09-29T17:33:07 | 2022-09-29T17:33:07 | 78,774,174 | 4 | 1 | null | 2021-11-03T15:00:19 | 2017-01-12T18:23:32 | R | UTF-8 | R | false | true | 348 | rd | dot-contr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/contrast.R
\name{.contr}
\alias{.contr}
\title{.contr}
\usage{
.contr(img)
}
\arguments{
\item{img}{A matrix of numeric values or integer values.}
}
\value{
a numeric value (RMS contrast)
}
\description{
Returns the RMS contrast of an image matrix.
}
\keyword{internal}
|
5aafba904622c5281ed1458650763ca049da58b5 | 129408919e4fcde9818bef047f6e9b2a74d23c8a | /man/build_hierarchy_label.Rd | cdab766cdf3249f1b513bc2cc1cb5c5cf3343fcc | [
"MIT"
] | permissive | mrc-ide/naomi | 93decfb73624de911f298aadcc0e0d02b8d7d5e5 | 94d34246144e4dfcb86161258faf213a7db03268 | refs/heads/master | 2023-06-14T06:37:36.343882 | 2023-05-05T11:08:33 | 2023-05-05T11:08:33 | 204,965,083 | 7 | 6 | NOASSERTION | 2023-09-12T12:54:48 | 2019-08-28T15:32:00 | R | UTF-8 | R | false | true | 522 | rd | build_hierarchy_label.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/input-time-series.R
\name{build_hierarchy_label}
\alias{build_hierarchy_label}
\title{Return the translated label & description for a set of plot types}
\usage{
build_hierarchy_label(meta_areas)
}
\arguments{
\item{meta_areas}{dataframe containing}
}
\value{
For each plot type the label and description as a list of lists
containing id, label and description
}
\description{
Return the translated label & description for a set of plot types
}
|
77b8c40a5578095f5e23c614598cb6a017ccca6c | 884f16907395307c828f0ea7cffbb1b19e2c2e5d | /man/optimize_auc.Rd | 3e493979b38d4a35812364b560cfeaabb6e7a2b2 | [
"MIT"
] | permissive | sachsmc/pseupersims | 7ace4683479cdee7b6cf239121afa1de7b889fea | 34dd053b74769bb4d777f0530818f643d3c4d1c5 | refs/heads/master | 2021-01-24T11:49:06.288041 | 2019-06-07T09:21:52 | 2019-06-07T09:21:52 | 123,104,219 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 499 | rd | optimize_auc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{optimize_auc}
\alias{optimize_auc}
\title{Optimize the time varying AUC}
\usage{
optimize_auc(Z, Y, pseu2)
}
\arguments{
\item{Z}{Matrix of predictions}
\item{Y}{Pseudo values for the cumulative incidence for the competing event of interest at a fixed time}
\item{pseu2}{Matrix of pseudo values for the cumulative incidences for all other competing events}
}
\description{
Optimize the time varying AUC
}
|
148600d4d231774afa0de781e9f2dabb01ea24a5 | 9759c35aaa298aa316beab7b6de52fb0df84bc7d | /R/print.MCAvariants.R | ed6c720e6dabab19b7729db350b232a217cca1ae | [] | no_license | cran/MCAvariants | 0af12457cdd752d6e7960a8d5e6530d13d5b327f | 1eb8bc31ee4b7356e8f97ed8780da51cd13168ae | refs/heads/master | 2021-07-06T07:45:58.915941 | 2021-05-24T16:30:05 | 2021-05-24T16:30:05 | 21,567,116 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,976 | r | print.MCAvariants.R | print.MCAvariants <-
function(x, printdims=2,...) {
d <- min(printdims, x$maxaxes)
axnames <- character(length = d)
for (i in 1:d) {
axnames[i] <- paste("Axis",i)
}
cat("\n RESULTS for Variants of Multiple Correspondence
Analysis:\n",x$catype, "\n")
cat("\n BURT table \n")
print(x$BURT)
cat("\n Rows in principal coordinates: the first 10 \n")
printwithaxes(data.frame(x$Rprinccoord[ 1:10,1:d], row.names=x$rowlabels[1:10]), axnames)
cat("\n Columns in principal coordinates \n")
#browser()
printwithaxes(data.frame(x$Cprinccoord[ ,1:d], row.names=x$collabels), axnames)
if (x$catype == "omca"){
cat("\n Polynomial functions of each variable \n")
print(x$listBpoly)
clusterlabels <- paste("C",1:x$nmod[1],sep="")
cat("\n Linear Percentage of Clusters \n")
print(x$LinearPercentage)
cat("\n Polynomial Components of Total Inertia \n")
print(x$comp)
cat("\n p-values of Polynomial Components of Total Inertia \n")
print(x$componentpvalue1[1:(x$tmod - x$np)])
cat("Degree of Freedom of Polynomial Component", x$degreef
/(x$tmod - x$np), "\n")
}
cat("\n Inertia values of super-indicator and Burt table\n")
print(round(x$inertias,digits=3))
cat("\n Benzecri's Inertia values, percentages and cumulative
percentages \n")
print(round(x$inertiasAdjusted, digits = 3))
cat("Total Degree of Freedom", x$degreef, "\n")
cat("Total inertia of Super-Indicator table\n")
print(x$inertiaXsum)
cat("Total inertia of BURT table\n")
print(sum(x$inertiaBurtsum))
cat("Chi-square values of BURT Inertia \n")
print(round(x$inertias[,2]*x$rows),dig=3)
cat("Chi-square value of Total inertia of BURT\n")
print(round(sum(x$inertiaBurtsum)*x$rows),dig=3)
} |
734d5dc6e876df1fa1bb40e3c854fbbe6fdcd187 | 9f3b906e11f6acd78afbe50eb0025eac02b9dc21 | /June2020/genelists.2.R | c539c3512aa027a3853273a3f9efa1f112a78b76 | [] | no_license | tiboloic/optiRVIS | 8cbb99daf381c79c0237dfc6d5cbea227ea62110 | f3ecbc42e53cdc84ec095dab14643964c7ce7cfc | refs/heads/master | 2023-06-09T15:24:07.305749 | 2021-07-02T06:11:12 | 2021-07-02T06:11:12 | 205,954,615 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,407 | r | genelists.2.R | # LT 11/06
# update with new score (HGNC)
# LT 7/05
# genelist analysis
# to avoid serach replace
optiRVIS=dat
# add RVIS
rvis = read.delim('GenicIntolerance_v3_12Mar16.txt')
# keep only recommended exac default: filter= 0.05%
rvis = rvis[,c(1,21)]
names(rvis) = c('gene','RVIS')
optiRVIS$RVIS = rvis[match(optiRVIS$gene, rvis$gene), 'RVIS']
## modified for gene groups
orderpercent = function(data, score, percent) {
# data is the dataset we wnat to order
# score is the field to order on, increasing
# percent are the fields appearing on the axes of the plot
# that need to be converted to percentiles
# order by increasing score
data = data[order(data[,score]),]
data$counter = 1:nrow(data)
# function to percentiize fields
percentilize = function(data) {
# data should be a numeric vector
cumsum(data)/sum(data)
}
percent = c(percent, 'counter')
for (i in 1:length(percent)) {
field = percent[i]
if (is.factor(data[, field]))
data[,paste("pc", field, sep="")] = percentilize(data[, field]=='Y')
else
data[,paste("pc", field, sep="")] = percentilize(data[, field])
}
data
}
AUC = function(x,y) {
x = c(0, x, 1)
y = c(0, y, 1)
sum((y[-1] - diff(y)/2) * diff(x))
}
geneGroups = function(genegroup) {
cols = rainbow(6, start=0.3)
# to store AUC results
AUCs = c()
pcy=paste0('pc', genegroup)
counts = orderpercent(optiRVIS, "virlof_percentile", genegroup)
plot(counts$pccounter, counts[,pcy], type="l", xlab="Percentile",
ylab = paste0("Proportion of genes in ", genegroup), main="", lwd=2, col=cols[1])
abline(b=1, a=0)
AUCs["virlof"] = AUC(counts$pccounter, counts[,pcy])
#gevir
counts = orderpercent(optiRVIS, "gevir_percentile", genegroup)
lines(counts$pccounter, counts[,pcy], lwd=2, col=cols[2])
AUCs["GeVIR"] = AUC(counts$pccounter, counts[,pcy])
# LOEUF
counts = orderpercent(optiRVIS, "oe_lof_upper", genegroup)
lines(counts$pccounter, counts[,pcy], lwd=2, col=cols[3])
AUCs["LOEUF"] = AUC(counts$pccounter, counts[,pcy])
# MOEUF
counts = orderpercent(optiRVIS, "oe_mis_upper", genegroup)
lines(counts$pccounter, counts[,pcy], lwd=2, col=cols[4])
AUCs["MOEUF"] = AUC(counts$pccounter, counts[,pcy])
#powerSFS
counts = orderpercent(optiRVIS, "pSFS_percentile", genegroup)
lines(counts$pccounter, counts[,pcy], lwd=2, col=cols[5])
AUCs["pSFS"] = AUC(counts$pccounter, counts[,pcy])
#RVIS
counts = orderpercent(optiRVIS, "RVIS", genegroup)
lines(counts$pccounter, counts[,pcy], lwd=2, col=cols[6])
AUCs["RVIS"] = AUC(counts$pccounter, counts[,pcy])
legend('bottomright', legend = c(paste0('virLOF AUC=', round(AUCs["virlof"],2)),
paste0('GeVIR AUC=', round(AUCs["GeVIR"],2)),
paste0('LOEUF AUC=', round(AUCs["LOEUF"],2)),
paste0('MOEUF AUC=', round(AUCs["MOEUF"],2)),
paste0('powerSFS AUC=', round(AUCs["pSFS"],2)),
paste0('RVIS AUC=', round(AUCs["RVIS"],2))),
fill=cols[1:6], bty='n')
AUCs
}
geneGroups("ad_group")
geneGroups("mouse_het_lethal_group")
chdgenes = read.csv('chdgene_table.csv')
optiRVIS$chd_group = optiRVIS$gene %in% chdgenes$Gene
geneGroups("chd_group")
# add DDG@P disorders
ddg2p = read.csv('DDG2P_11_6_2020.csv')
optiRVIS$ddg = optiRVIS$gene %in% ddg2p$gene.symbol
geneGroups("ddg")
|
dbe4b60ae9a97a9a7e449c71aaa5c4f4c4d5f64c | 770c10ba913d3173500ab376854acf4e1795b8dd | /R-package/R/lmdb_dataset.R | 62ce5e75e94cc0b150706770884f2686f6fd61a2 | [
"Apache-2.0"
] | permissive | alipay/tensorflow_io | 35cbefe33a586ea4c681b7d0a81a16458debf90b | cf2e92470ea2e947f2da193a5eea2d837a7b2fc2 | refs/heads/master | 2023-07-07T06:38:24.141414 | 2019-05-15T01:11:07 | 2019-05-15T01:11:07 | 174,248,072 | 10 | 3 | Apache-2.0 | 2019-05-18T17:05:46 | 2019-03-07T01:25:28 | C++ | UTF-8 | R | false | false | 690 | r | lmdb_dataset.R | #' Create a `LMDBDataset`.
#'
#' This function allows a user to read data from a LMDB
#' file. A lmdb file consists of (key value) pairs sequentially.
#'
#' @param filenames A `tf.string` tensor containing one or more filenames.
#'
#' @examples \dontrun{
#' dataset <- sequence_file_dataset("testdata/data.mdb") %>%
#' dataset_repeat(1)
#'
#' sess <- tf$Session()
#' iterator <- make_iterator_one_shot(dataset)
#' next_batch <- iterator_get_next(iterator)
#'
#' until_out_of_range({
#' batch <- sess$run(next_batch)
#' print(batch)
#' })
#' }
#'
#' @export
lmdb_dataset <- function(filenames) {
dataset <- tfio_lib$lmdb$LMDBDataset(filenames = filenames)
as_tf_dataset(dataset)
}
|
9dd7016671ee9e7e9e6b241ffd33ddfe9df3249a | e37a5bb9c7469f00d410e8d7ddbcee53813865ee | /tests/testthat/test.bt.spread.R | fab4cd59c127ccdd3dd001796956be83b80b6f65 | [] | no_license | knightsay/backtest | 6b06de87f60da2e5ab2ff222087205a8f466c9bf | ca61f3e54a7d348ae59a415ade262f5ceef39fda | refs/heads/master | 2021-01-16T08:58:08.460804 | 2015-03-24T05:54:14 | 2015-03-24T05:54:14 | 30,329,078 | 2 | 0 | null | 2015-02-05T00:27:19 | 2015-02-05T00:27:18 | R | UTF-8 | R | false | false | 273 | r | test.bt.spread.R | context("Test for bt.spread")
library("backtest")
test_that("Test for bt.spread", {
load("bt.spread.test.RData")
## save(m, n, sd, truth, file = "bt.spread.test.RData", compress = TRUE)
expect_true(all(mapply(all.equal, backtest:::.bt.spread(m, n, sd), truth)))
})
|
7052811a4b47fbb93d1cacbb5ebff396cf4e52a5 | d4a5506300645b47d43b38a59ace09e74ffeb1b8 | /functions_WT.R | 874aa4fcd540c14aab8b0d93b142e3ca84705c57 | [] | no_license | Speedcy/Analyse_ACV_eolienne | d511dd3f1c0781a31b0d39568a597ae3b808a9cd | 1d8b90c5cc4f24c2633ca6f42494bfd015f818ae | refs/heads/main | 2023-03-12T13:03:33.862966 | 2021-03-04T14:45:19 | 2021-03-04T14:45:19 | 342,656,113 | 0 | 0 | null | null | null | null | ISO-8859-2 | R | false | false | 8,065 | r | functions_WT.R | ##R script with the functions necessary to compute the GHG performances of a wind turbine
#Functions definition
# 1.FUNCTIONS TO MODEL THE ONSHORE WIND POWER ELECTRICITY
# PROCESS CHAIN
prod_invent_ws = function(M_part, share_part){
return(M_part*share_part/100)
}
# Vecteur procédé nacelle
p_nacelle<-prod_invent_ws(M_nacelle,share_nacelle)
# Vecteur procédé rotor
p_rotor<-prod_invent_ws(M_rotor,share_rotor)
# Vecteur procédé fondation
p_foundation<-prod_invent_ws(M_foundation,share_foundation)
# Vecteur procédé tower
p_tower<-prod_invent_ws(M_tower,share_tower)
# Vecteur procédé maintenance
p_maintenance=prod_invent_ws(M_nacelle,share_nacelle)*maintenance[1,1]*0.01
# Vecteur procédé énergie
p_energy=rbind(Q_diesel,Q_electMedVolt)
colnames(p_energy)<-"WT1"
# Vecteur procédé transport
p_transport<-rbind(1,2,3)
p_transport[2,1]<-(M_nacelle+M_rotor)*distance_movingPart*share_transport_movingPart[1,1]/1000
p_transport[3,1]<-(M_tower+M_foundation)*distance_fixedPart*share_transport_fixedPart[1,1]/1000
p_transport[1,1]<-(M_nacelle+M_rotor)*distance_movingPart*share_transport_movingPart[2,1]/1000+(M_tower+M_foundation)*distance_fixedPart*share_transport_fixedPart[2,1]/1000
rownames(p_transport)<-c("train","lorry 28t","lorry 32t")
colnames(p_transport)<-"WT1"
p=rbind(rep(0,18))
# 1 Cast Iron
p[1]=p_nacelle[4,1]+p_rotor[3,1]+p_maintenance[4,1]
# 2 Chromium Steel
p[2]=p_nacelle[3,1]+p_rotor[1,1]+p_maintenance[3,1]
# 3 Reinforcing Steel
p[3]=p_foundation[2,1]
# 4 Steel low
p[4]=p_tower[1,1]+p_nacelle[2,1]+p_maintenance[2,1]
# 5 Aluminium
p[5]=p_rotor[4,1]+p_nacelle[7,1]+p_maintenance[7,1]
# 6 Copper
p[6]=p_nacelle[6,1]+p_maintenance[6,1]
# 7 Concrete
p[7]=p_foundation[1,1]/density_conc[1]
# 8 GPRF
p[8]=p_nacelle[1,1]+p_rotor[2,1]+p_maintenance[1,1]
# 9 Rail
p[9]=p_transport[1,1]
# 10 lorry 28
p[10]=p_transport[2,1]
# 11 lorry 32
p[11]=p_transport[3,1]
# 12 Sheet Rolling Aluminium
p[12]=p_rotor[4,1]+p_nacelle[7,1]+p_maintenance[7,1]
# 13 Sheet Rolling Chromium Steel
p[13]=p_nacelle[3,1]+p_rotor[1,1]+p_maintenance[3,1]
# 14 Sheet Rolling Steel
p[14]=p_tower[1,1]+p_nacelle[2,1]+p_maintenance[2,1]+p_nacelle[4,1]+p_rotor[3,1]+p_maintenance[4,1]
# 15 Wire drawing steel
p[15]=p_nacelle[6,1]+p_maintenance[6,1]
# 16 Lubricating oil
p[16]=p_nacelle[5,1]+p_maintenance[5,1]
# 17 Diesel
p[17]=p_energy[1,1]
# 18 Elec
p[18]=p_energy[2,1]
p=data.frame(p)
p=t(as.matrix(p))
colnames(p)<-"WT1"
# 2.FUNCTIONS TO MODEL THE IMPACTS OF THE ONSHORE WIND POWER ELECTRICITY
impact_unitProcess = function(Q, E, CF_name){
Q_cf = Q[CF_name]
E1 = E[,4:ncol(E)]
QE = as.matrix(t(Q_cf)) %*% as.matrix(E1)
return(QE)
}
impact_nacelle = function(M_nacelle, share_nacelle, I_GFRP, I_steel, I_sheetSteel, I_castIron, I_chromiumSteel, I_sheetChromium, I_oil, I_copper, I_wire, I_alu, I_sheetAlu){
p_nacelle = prod_invent_ws(M_nacelle, share_nacelle)
Imp_nacelle = p_nacelle["GFRP",1]*I_GFRP+
p_nacelle["steel",1]*(I_steel+I_sheetSteel)+
p_nacelle["inox",1]*(I_chromiumSteel+I_sheetChromium)+
p_nacelle["cast_iron",1]*(I_castIron+I_sheetSteel)+
p_nacelle["oil",1]*I_oil+
p_nacelle["copper",1]*(I_copper+I_wire)+
p_nacelle["aluminium",1]*(I_alu+I_sheetAlu)
return(Imp_nacelle)
}
impact_rotor = function(M_rotor, share_rotor, I_GFRP, I_sheetSteel, I_castIron, I_chromiumSteel, I_sheetChromium, I_alu, I_sheetAlu){
p_rotor = prod_invent_ws(M_rotor,share_rotor)
Imp_rotor = p_rotor["inox",1]*(I_chromiumSteel+I_sheetChromium)+
p_rotor["GFRP",1]*I_GFRP+
p_rotor["aluminium",1]*(I_alu+I_sheetAlu)+
p_rotor["cast_iron",1]*(I_castIron+I_sheetSteel)
return(Imp_rotor)
}
impact_tower = function(M_tower, share_tower, I_steel, I_sheetSteel){
p_tower = prod_invent_ws(M_tower, share_tower)
Imp_tower = p_tower["steel",1]*(I_steel+I_sheetSteel)
return(Imp_tower)
}
impact_foundation = function(M_foundation, share_foundation, I_reinfSteel, I_concrete){
p_foundation = prod_invent_ws(M_foundation, share_foundation)
Imp_foundation = p_foundation["concrete",1]*I_concrete/2200+
p_foundation["reinforcing_steel",1]*I_reinfSteel
return(Imp_foundation)
}
impact_maintenance = function(per_replaced_nacelle, Ip_nacelle){
Imp_maintenance = Ip_nacelle*per_replaced_nacelle/100
return(Imp_maintenance)
}
impact_energy = function(Q_diesel, Q_electMedVolt, I_diesel, I_electMedVolt){
p_energy=rbind(Q_diesel,Q_electMedVolt)
colnames(p_energy)<-"WT1"
Imp_energy = p_energy["Q_diesel",1]*I_diesel+
p_energy["Q_electMedVolt",1]*I_electMedVolt
return(Imp_energy)
}
impact_transport = function(M_nacelle, M_rotor, M_tower, M_foundation, share_transport_movingPart, distance_movingPart, share_transport_fixedPart, distance_fixedPart, I_lorry28t, I_lorry32t, I_train){
p_transport<-rbind(1,2,3)
p_transport[2,1]<-(M_nacelle+M_rotor)*distance_movingPart*share_transport_movingPart[1,1]/1000
p_transport[3,1]<-(M_tower+M_foundation)*distance_fixedPart*share_transport_fixedPart[1,1]/1000
p_transport[1,1]<-(M_nacelle+M_rotor)*distance_movingPart*share_transport_movingPart[2,1]/1000+(M_tower+M_foundation)*distance_fixedPart*share_transport_fixedPart[2,1]/1000
rownames(p_transport)<-c("train","lorry 28t","lorry 32t")
colnames(p_transport)<-"WT1"
Imp_transport = p_transport["train",1]*I_train+
p_transport["lorry 28t",1]*I_lorry28t+
p_transport["lorry 32t",1]*I_lorry32t
return(Imp_transport)
}
impact_WT = function(M_nacelle, share_nacelle, M_rotor, share_rotor, M_tower, share_tower, M_foundation, share_foundation, per_replaced_nacelle, share_transport_movingPart, distance_movingPart, share_transport_fixedPart, distance_fixedPart, Q_diesel, Q_electMedVolt, QE){
I_GFRP = QE[8]
I_steel = QE[4]
I_sheetSteel = QE[14]
I_castIron = QE[1]
I_chromiumSteel = QE[2]
I_sheetChromium = QE[13]
I_oil = QE[16]
I_copper = QE[6]
I_wire = QE[15]
I_alu = QE[5]
I_sheetAlu = QE[12]
I_reinfSteel = QE[3]
I_concrete = QE[7]
I_lorry28t = QE[10]
I_lorry32t = QE[11]
I_train = QE[9]
I_diesel = QE[17]
I_electMedVolt = QE[18]
Ip_nacelle = impact_nacelle(M_nacelle, share_nacelle, I_GFRP, I_steel, I_sheetSteel, I_castIron, I_chromiumSteel, I_sheetChromium, I_oil, I_copper, I_wire, I_alu, I_sheetAlu)
Ip_rotor = impact_rotor(M_rotor, share_rotor, I_GFRP, I_sheetSteel, I_castIron, I_chromiumSteel, I_sheetChromium, I_alu, I_sheetAlu)
Ip_tower = impact_tower(M_tower, share_tower, I_steel, I_sheetSteel)
Ip_foundation = impact_foundation(M_foundation, share_foundation, I_reinfSteel, I_concrete)
Ip_maintenance = impact_maintenance(per_replaced_nacelle, Ip_nacelle)
Ip_transport = impact_transport(M_nacelle, M_rotor, M_tower, M_foundation, share_transport_movingPart, distance_movingPart, share_transport_fixedPart, distance_fixedPart, I_lorry28t, I_lorry32t, I_train)
Ip_energy = impact_energy(Q_diesel, Q_electMedVolt, I_diesel, I_electMedVolt)
print(c("Nacelle",Ip_nacelle))
print(c("Rotor",Ip_rotor))
print(c("Tower",Ip_tower))
print(c("Foundation",Ip_foundation))
print(c("Maintenance",Ip_maintenance))
print(c("Transport",Ip_transport))
print(c("Energy",Ip_energy))
return(Ip_nacelle+Ip_rotor+Ip_tower+Ip_foundation+Ip_maintenance+Ip_energy+Ip_transport)
}
# 3. FUNCTIONS TO CALCULATE ELECTRICITY PRODUCTION
# OVER THE TURBINE LIFETIME (TO BE CREATED BY THE STUDENTS)
elecProd = function(availability, load_factor, life_time, nom_power){
E=8760*availability*load_factor*life_time*nom_power
return(E)
}
# 4. FUNCTIONS TO CALCULATE THE TURBINE DIMENSIONS
# (NOT NEEDED FOR OUR PROJECT)
calc_D_h = function(P){
D = 1.5624 * P^(0.522)
h = 0.0153 *P + 48.493
return(c(D, h))
}
calc_weights = function(D, h){
M_nacelle = 10^(0.64)*D^(2.19)
M_rotor = 10^(0.3)*D^(2.22)
M_tower = 10^(1.7)*D^(1.82)
V_conc = 4500*h/2200
M_reinfSteel = 280*h
M_foundation = V_conc*density_conc + M_reinfSteel
return(c(M_nacelle, M_rotor, M_tower, M_foundation, V_conc, M_reinfSteel))
}
|
c6fdd9f47c6010f65d9f956895289772723d82e0 | 1c1ac604314d3c8785a8f3d14f2df1afc7429ad3 | /R/correspond_rank.R | fafa5a48f93ae15bb7ed9ed0e227932da690fbcf | [
"MIT"
] | permissive | jokergoo/cola | 7abd6dfd0bb487ce601a045f021c0a61359486df | 8376b71ab216f69fd77b7af9f898048c5dfc6070 | refs/heads/master | 2023-06-07T08:34:15.370800 | 2023-06-06T07:45:08 | 2023-06-06T07:45:08 | 91,272,219 | 58 | 14 | null | 2018-03-01T09:50:37 | 2017-05-14T21:21:56 | R | UTF-8 | R | false | false | 6,597 | r | correspond_rank.R |
# == title
# Correspond two rankings
#
# == param
# -x1 A vector of scores calculated by one metric.
# -x2 A vector of scores calculated by another metric.
# -name1 Name of the first metric.
# -name2 Name of the second metric.
# -col1 Color for the first metric.
# -col2 Color for the second metric.
# -top_n Top n elements to show the correspondance.
# -transparency Transparency of the connecting lines.
# -pt_size Size of the points, must be a `grid::unit` object.
# -newpage Whether to plot in a new graphic page.
# -ratio Ratio of width of the left barplot, connection lines and right barplot. The three values will be scaled to a sum of 1.
#
# == details
# In ``x1`` and ``x2``, the i^th element in both vectors corresponds to the same object (e.g. same row if they are calculated from a matrix) but with different
# scores under different metrics.
#
# ``x1`` and ``x2`` are sorted in the left panel and right panel respectively. The top n elements
# under corresponding metric are highlighted by vertical colored lines in both panels.
# The left and right panels also shown as barplots of the scores in the two metrics.
# Between the left and right panels, there are lines connecting the same element (e.g. i^th element in ``x1`` and ``x2``)
# in the two ordered vectors so that you can see how a same element has two different ranks in the two metrics.
#
# Under the plot is a simple Venn diagram showing the overlaps of the top n elements
# by the two metrics.
#
# == value
# No value is returned.
#
# == seealso
# `correspond_between_rankings` draws for more than 2 sets of rankings.
#
# == author
# Zuguang Gu <z.gu@dkfz.de>
#
# == examples
# require(matrixStats)
# mat = matrix(runif(1000), ncol = 10)
# x1 = rowSds(mat)
# x2 = rowMads(mat)
# correspond_between_two_rankings(x1, x2, name1 = "SD", name2 = "MAD", top_n = 20)
correspond_between_two_rankings = function(x1, x2, name1, name2,
col1 = 2, col2 = 3, top_n = round(0.25*length(x1)), transparency = 0.9,
pt_size = unit(1, "mm"), newpage = TRUE, ratio = c(1, 1, 1)) {
if(newpage) {
grid.newpage()
}
if(length(x1) != length(x2)) {
stop("Length of `x1` and `x2` should be the same.")
}
r1 = rank(x1, ties.method = "random")
r2 = rank(x2, ties.method = "random")
if(missing(name1)) name1 = deparse(substitute(x1))
if(missing(name2)) name2 = deparse(substitute(x2))
n = length(x1)
text_height = grobHeight(textGrob("foo"))*2
pushViewport(viewport(layout = grid.layout(nrow = 1, ncol = 3, widths = unit(ratio, "null")),
width = unit(1, "npc") - unit(2, "mm"),
height = unit(1, "npc") - text_height - unit(1, "cm"), y = unit(1, "cm"), just = "bottom"))
max_x1 = max(x1)
pushViewport(viewport(layout.pos.row = 1, layout.pos.col = 1,
xscale = c(0, max_x1), yscale = c(0, n + 1)))
grid.segments(max_x1 - x1, r1, max_x1, r1, default.units = "native", gp = gpar(col = "#EFEFEF"))
l = r2 >= n - top_n
grid.points(max_x1 - x1[l], r1[l], default.units = "native", pch = 16, size = pt_size, gp = gpar(col = add_transparency(col2, 0.5)))
grid.text(name1, x = 1, y = unit(n + 1, "native") + unit(1, "mm"), default.units = "npc", just = c("right", "bottom"))
upViewport()
max_x2 = max(x2)
pushViewport(viewport(layout.pos.row = 1, layout.pos.col = 3,
xscale = c(0, max_x2), yscale = c(0, n + 1)))
grid.segments(0, r2, x2, r2, default.units = "native", gp = gpar(col = "#EFEFEF"))
l = r1 >= n - top_n
grid.points(x2[l], r2[l], default.units = "native", pch = 16, size = pt_size, gp = gpar(col = add_transparency(col1, 0.5)))
grid.text(name2, x = 0, y = unit(n + 1, "native") + unit(1, "mm"), default.units = "native", just = c("left", "bottom"))
upViewport()
pushViewport(viewport(layout.pos.row = 1, layout.pos.col = 2, xscale = c(0, 1), yscale = c(0, n + 1)))
l = r1 >= n - top_n | r2 >= n - top_n
# if(sum(!l)) grid.segments(0, r1[!l], 1, r2[!l], default.units = "native", gp = gpar(col = "#EEEEEE80"))
if(sum(l)) {
grid.segments(0, r1[l], 1, r2[l], default.units = "native", gp = gpar(col = add_transparency("#000000", transparency)))
# for(ind in which(l)) {
# grid.bezier(c(0, 1, 0, 1), c(r1[ind], r1[ind], r2[ind], r2[ind]), default.units = "native", gp = gpar(col = add_transparency("#000000", transparency)))
# }
}
grid.segments(c(0, 1), c(1, 1), c(0, 1), c(n - top_n, n - top_n), default.units = "native", gp = gpar(col = "#EEEEEE"))
grid.segments(c(0, 1), c(n - top_n, n - top_n), c(0, 1), c(n, n), default.units = "native", gp = gpar(lwd = 4, col = c(col1, col2)))
upViewport()
upViewport()
# add a venn diagram at the bottom
n_intersect = length(intersect(order(x1, decreasing = TRUE)[1:top_n], order(x2, decreasing = TRUE)[1:top_n]))
n_union = 2*top_n - n_intersect
grid.roundrect(x = unit(0.5 - n_intersect/2/top_n*0.4, "npc"), y = unit(0.4, "cm"), width = unit(0.4, "npc"),
height = unit(0.4, "cm"), gp = gpar(fill = add_transparency(col2, 0.5), col = NA), just = "left")
grid.roundrect(x = unit(0.5 + n_intersect/2/top_n*0.4, "npc"), y = unit(0.4, "cm"), width = unit(0.4, "npc"),
height = unit(0.4, "cm"), gp = gpar(fill = add_transparency(col1, 0.5), col = NA), just = "right")
grid.text(qq("top @{top_n}/@{length(x1)}"), x = unit(0.5, "npc"), y = unit(0.7, "cm"), just = "bottom", gp = gpar(fontsize = 8))
}
# == title
# Correspond between a list of rankings
#
# == param
# -lt A list of scores under different metrics.
# -top_n Top n elements to show the correspondance.
# -col A vector of colors for ``lt``.
# -... Pass to `correspond_between_two_rankings`.
#
# == details
# It makes plots for every pairwise comparison in ``lt``.
#
# == value
# No value is returned.
#
# == author
# Zuguang Gu <z.gu@dkfz.de>
#
# == examples
# require(matrixStats)
# mat = matrix(runif(1000), ncol = 10)
# x1 = rowSds(mat)
# x2 = rowMads(mat)
# x3 = rowSds(mat)/rowMeans(mat)
# correspond_between_rankings(lt = list(SD = x1, MAD = x2, CV = x3),
# top_n = 20, col = c("red", "blue", "green"))
correspond_between_rankings = function(lt, top_n = length(lt[[1]]),
col = cola_opt$color_set_1[1:length(lt)], ...) {
nm = names(lt)
n = length(lt)
n_plots = n*(n-1)/2
if(length(col) == 1) {
col = rep(col, n)
}
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow = 1, ncol = n_plots)))
k = 0
for(i in seq_len(n-1)) {
for(j in (i+1):n) {
k = k + 1
pushViewport(viewport(layout.pos.row = 1, layout.pos.col = k))
pushViewport(viewport(width = 0.9))
correspond_between_two_rankings(lt[[i]], lt[[j]], nm[i], nm[j], col[i], col[j], top_n, newpage = FALSE, ...)
upViewport()
upViewport()
}
}
upViewport()
}
|
4fc4bb115b8d31193b4d8ac4f1c42ec2ed888a6a | 72f2bbe0b5ee226f514c8e317a7c6f999d3938f6 | /man/corenlp.Rd | 7b418435b8117e18647875895770abc5f9546cc0 | [] | no_license | bethanyleap/SpeedReader | 4853f6fa2dda451b24b795e11063e843d8a0f9a2 | 8e3bcd38d03dabc3efa7f4f7981ce002aa79a5e8 | refs/heads/master | 2022-04-10T20:36:35.103520 | 2020-03-18T02:07:36 | 2020-03-18T02:07:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,686 | rd | corenlp.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/corenlp.R
\name{corenlp}
\alias{corenlp}
\title{Runs Stanford CoreNLP on a collection of documents}
\usage{
corenlp(documents = NULL, document_directory = NULL, file_list = NULL,
delete_intermediate_files = TRUE, syntactic_parsing = FALSE,
coreference_resolution = FALSE, additional_options = "",
return_raw_output = FALSE, version = "3.5.2", block = 1)
}
\arguments{
\item{documents}{An optional list of character vectors or a vector of strings, with one entry per dcument. These documents will be run through CoreNLP.}
\item{document_directory}{An optional directory path to a directory contianing only .txt files (one per document) to be run through CoreNLP. Cannot be supplied in addition to the 'documents' argument.}
\item{file_list}{An optional list of .txt files to be used if document_directory option is specified. Can be useful if the user only wants to process a subset of documents in the directory such as when the corpus is extremely large.}
\item{delete_intermediate_files}{Logical indicating whether intermediate files produced by CoreNLP should be deleted. Defaults to TRUE, but can be set to FALSE and the xml output of CoreNLP will be saved.}
\item{syntactic_parsing}{Logical indicating whether syntactic parsing should be included as an option. Defaults to FALSE. Caution, enabling this argument may greatly increase runtime. If TRUE, output will automatically be return in raw format.}
\item{coreference_resolution}{Logical indicating whether coreference resolution should be included as an option. Defaults to FALSE. Caution, enabling this argument may greatly increase runtime. If TRUE, output will automatically be return in raw format.}
\item{additional_options}{An optional string specifying additional options for CoreNLP. May cause unexpected behavior, use at your own risk!}
\item{return_raw_output}{Defaults to FALSE, if TRUE, then CoreNLP output is not parsed and raw list objects are returned.}
\item{version}{The version of Core-NLP to download. Defaults to '3.5.2'. Newer versions of CoreNLP will be made available at a later date.}
\item{block}{An internal file list identifier used by corenlp_blocked() to avoid collisions. Should not be set by the user.}
}
\value{
Returns a list of data.frame objects, one per document, where each row is a token observation (in order)
}
\description{
Runs Stanford CoreNLP on a collection of documents
}
\examples{
\dontrun{
directory <- system.file("extdata", package = "SpeedReader")[1]
Tokenized <- corenlp(
document_directory = directory,
syntactic_parsing = FALSE,
coreference_resolution =FALSE)
}
}
|
9dddb04ca4d2e48111d79e9a0c0e2a2efbb28a44 | 5a7f7ebee0e458863e1da9d2a0fcc93b600d1786 | /man/BasicObject.Rd | 0e53a73a29ae15e73eb13d036ce613cf0afa8a54 | [] | no_license | HenrikBengtsson/R.oo | 68071bacb43afe2a46201aea0350a3597ee19e6c | 4101a141b2fa49a43a10df99f56c180ba2c662e6 | refs/heads/master | 2023-01-06T23:48:54.872999 | 2022-06-12T18:04:23 | 2022-06-12T18:04:23 | 19,437,907 | 20 | 1 | null | 2018-05-02T04:51:57 | 2014-05-04T22:47:54 | R | UTF-8 | R | false | false | 2,552 | rd | BasicObject.Rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% BasicObject.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{BasicObject}
\docType{class}
\alias{BasicObject}
\title{A root class like Object but without references}
\description{
R.oo\cr
\bold{Class BasicObject}\cr
public class \bold{BasicObject}\cr
}
\usage{
BasicObject(core=NULL)
}
\arguments{
\item{core}{The core value of the object.}
}
\section{Fields and Methods}{
\bold{Methods:}\cr
\tabular{rll}{
\tab \code{$} \tab -\cr
\tab \code{$<-} \tab -\cr
\tab \code{.DollarNames} \tab -\cr
\tab \code{.subset2Internal} \tab -\cr
\tab \code{[[} \tab -\cr
\tab \code{[[<-} \tab -\cr
\tab \code{\link[R.oo:as.character.BasicObject]{as.character}} \tab Gets a character string representing the object.\cr
\tab \code{\link[R.oo:attach.BasicObject]{attach}} \tab Attach an BasicObject to the R search path.\cr
\tab \code{\link[R.oo:detach.BasicObject]{detach}} \tab Detach an BasicObject from the R search path.\cr
\tab \code{\link[R.oo:equals.BasicObject]{equals}} \tab Compares an object with another.\cr
\tab \code{\link[R.oo:extend.BasicObject]{extend}} \tab Extends another class.\cr
\tab \code{\link[R.oo:getFields.BasicObject]{getFields}} \tab Returns the field names of an BasicObject.\cr
\tab \code{\link[R.oo:getInstantiationTime.BasicObject]{getInstantiationTime}} \tab Gets the time when the object was instantiated.\cr
\tab \code{\link[R.oo:hasField.BasicObject]{hasField}} \tab Checks if a field exists or not.\cr
\tab \code{\link[R.oo:hashCode.BasicObject]{hashCode}} \tab Gets a hash code for the object.\cr
\tab \code{\link[R.oo:isReferable.BasicObject]{isReferable}} \tab Checks if the object is referable or not.\cr
\tab \code{\link[R.oo:newInstance.BasicObject]{newInstance}} \tab Creates a new instance of the same class as this object.\cr
\tab \code{\link[R.oo:objectSize.BasicObject]{objectSize}} \tab Gets the size of the BasicObject in bytes.\cr
\tab \code{\link[R.oo:print.BasicObject]{print}} \tab Prints an BasicObject.\cr
}
\bold{Methods inherited from logical}:\cr
Ops,nonStructure,vector-method, Ops,structure,vector-method, Ops,vector,nonStructure-method, Ops,vector,structure-method, as.data.frame, as.raster, coerce,ANY,logical-method
}
\author{Henrik Bengtsson}
\keyword{programming}
\keyword{methods}
\keyword{internal}
\keyword{classes}
|
5af99c071e1c5a8f768fd201ec1af1e84d37f6c7 | 898ffa72bddaef9fda961d435ddae885b05e1758 | /model/plot.R | 09c26c8ae4173a9a9813893fc248520a4d0216fa | [
"MIT"
] | permissive | ryanvmenezes/tiepredict | d0b5399258957d53abb26d91ce85d2c644a66c51 | ebe25db30ea4cb91ccff73ac2f2fa08ad96c3d1e | refs/heads/master | 2023-04-11T21:29:27.732287 | 2021-05-07T23:31:56 | 2021-05-07T23:31:56 | 125,787,072 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 340 | r | plot.R | library(here)
library(furrr)
library(tidyverse)
plan(multiprocess)
source(here('model', 'utils.R'))
versions = c(
# 'v1',
# 'v2.1',
'v2.2'
# 'v2.2.1'
)
for (this.version in versions) {
predictions = read.predictions(this.version)
plots = make.all.plots(predictions)
export.all.plots(plots, this.version)
}
|
973fa62f2186cbc3d9e3315f4ddbc4284915c979 | feb4ad6eb06602dd3372309abc2b75536bc5e049 | /Crime_More/crime-app/ui.R | 0286fc7295ad48d71755b9589dc6325f416da9b6 | [] | no_license | alichtner/SeattleCrime | 838374cdf76488487df03443ab0505e26265bcbf | ebed16c087ecc2aa364863419f9e875f74c4d823 | refs/heads/master | 2021-01-10T14:29:52.913103 | 2015-11-13T16:10:33 | 2015-11-13T16:10:33 | 45,291,521 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,302 | r | ui.R | library(shiny)
library(dplyr)
library(ggplot2)
library(lubridate)
shinyUI(fluidPage(
titlePanel("Seattle Crime Microscope"),
fluidRow(
column(4, h3("How is crime changing in Seattle?"),
p("This tool uses Seattle Police Incident Reports from 2010 to 2015.
In that time period there have been over 600,000 reported crimes across the city."),
p("",
a("Source Data",
href = "https://data.seattle.gov/Public-Safety/Seattle-Police-Department-911-Incident-Response/3k2p-39jp")),
selectInput("crime",
label = "Which crime?",
choices = crimeChoices,
selected = "Burglaries"),
selectInput("area",
label = "Select a Precinct: See map below",
choices = list("B","C","D","E","F","G","J","K","L","M","N","O","Q","R","S","U","W"
)),
dateInput('from',
label = 'Start Date: yyyy-mm-dd',
value = "2010-01-01"
),
dateInput('to',
label = 'End Date: yyyy-mm-dd',
value = "2015-01-01"
),
h3("Seattle Precincts"),
img(src = "precinctmap.png", height = 500, width = 350)
),
column(8,
fluidRow(
column(12,
plotOutput("crimePlot"),
fluidRow(
column(6,
plotOutput("byHour")),
column(6,
plotOutput("acrossCity"))
)
)
)
)
)
))
|
0f883cfc44468285fafbaf6c39f9632d6cb282dc | 2819005ee99256cd861b8cc6ac88b52ba8bbdcbb | /BRAnalyse.R | 2080805fbe904abfccb183978884e47847ca975d | [] | no_license | bbarres/BRAMU | a305624f3e47f59a0671e61d282ff1c3c910f708 | fe487cdddd8fde4d4ec437c932b8d4838c326e19 | refs/heads/master | 2020-05-19T12:43:55.520644 | 2019-01-21T01:15:09 | 2019-01-21T01:15:09 | 30,965,423 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 39,914 | r | BRAnalyse.R | ###############################################################################
###############################################################################
#Script for the analyses of the Microcyclus ulei populations in Mato Grosso
###############################################################################
###############################################################################
library(maptools)
library(maps)
library(mapdata)
library(RColorBrewer)
library(adegenet)
library(raster)
library(RColorBrewer)
library(rgdal)
library(combinat)
library(pegas)
library(gdistance)
library(vegan)
library(Geneland)
###############################################################################
#Loading the genetic data and preparing the datafile for other softwares
###############################################################################
#first of all, we load the genetic dataset
BRA<-read.table("data/BRAdata.txt",header=T,sep="\t")
#here is the structure of the datafile, for explanation of each columns,
#see ReadMe.txt file in DRYAD repository
head(BRA)
#a summary of the different variables
summary(BRA)
#number of individuals in each sampled populations
table(BRA$pop_ID)
sum(table(BRA$pop_ID)) #474 individuals
#two clone-corrected datasets are build on the complete dataset. The first one
#is a 'conservative' clone correction since we only keep on MLG per site
BRAcccons<-BRA[BRA$cc_cons==1,]
sum(table(BRAcccons$pop_ID)) #only 259 individuals left
#the second clone corrected dataset is less conservative, we only removed
#over-represented multicopies MLG (see Mat & Meth for details)
BRAcc<-BRA[BRA$cc==1,]
sum(table(BRAcc$pop_ID)) #only 338 individuals left
#### STRUCTURE file format
#a function for exporting the file to a "STRUCTURE" file format, this function
#is only working with the appropriate input datafile (ie a file format similar
#to "BRAdata.txt")
structexport<-function(fileinput) {
temp<-fileinput[,c(1:2,14:27)]
temp[temp=="0"]<-"-9"
levels(temp$pop_ID)<-c(9,4,5,6,8,15,1,2,13,14,3,7,12,10,11)
write.table(temp,file="output/output.str",row.names=F)
}
#only a few edition to the output file are needed before running STRUCTURE
#on it (remove "", and the two first column headers)
structexport(BRA)
#change the name of the 'output.str' file before running the function again,
#or else the file will be overwrite
structexport(BRAcccons)
structexport(BRAcc)
#### TESS file format
#a function for exporting the file to a "TESS" file format, this function is
#only working with the appropriate input datafile (ie a file format similar
#to "BRAdata.txt")
tessexport<-function(fileinput) {
temp<-fileinput[,c(1:2,4:5,14:27)]
#we add some noise to geographic coordinates, so each individual has
#different coordinates
temp[,3]<-jitter(temp[,3])
temp[,4]<-jitter(temp[,4])
#missing data should be formatted with negative number
temp[temp=="0"]<-"-9"
levels(temp$pop_ID)<-c(9,4,5,6,8,15,1,2,13,14,3,7,12,10,11)
write.table(temp,file="output/output.tess",row.names=F)
}
tessexport(BRA)
#remove "" in the output datafile, then change the name of the 'output.tess'
#file before running the function again, or else the file will be overwrite
tessexport(BRAcccons)
tessexport(BRAcc)
#### GENELAND file format
#a function for exporting the file to "GENELAND" file format, this function is
#only working with the appropriate input datafile (ie a file format similar to
#"BRAdata.txt"), it is only necessary if you want to use the graphical user
#interface of Geneland ('Geneland.GUI()')
genelandexport<-function(fileinput) {
tempgeo<-fileinput[,c(4,5)]
tempgeo<-SpatialPoints(tempgeo,proj4string=CRS("+proj=longlat +datum=WGS84"))
tempgeo<-spTransform(tempgeo, CRS("+proj=utm +zone=21 +datum=WGS84"))
tempgen<-fileinput[,c(14:27)]
#missing data should be formatted with "NA"
tempgen[tempgen=="0"]<-"NA"
write.table(tempgeo,file="output/outputgeo.geneland",
row.names=F,col.names=FALSE,quote=FALSE,sep=" ")
write.table(tempgen,file="output/outputgen.geneland",
row.names=F,col.names=FALSE,quote=FALSE,sep=" ")
}
genelandexport(BRA)
#Change the name of the 'outputgeo.geneland' and 'outputgen.geneland' files
#before running the function again, or else the file will be overwrite
genelandexport(BRAcccons)
genelandexport(BRAcc)
###############################################################################
#Identifying the best K for STRUCTURE run
###############################################################################
#Analyzes were performed using STRUCTURE2.3.4 software, with a model allowing
#admixture and correlation of allele frequencies. Each run consisted of a
#burn-in period of 100.000 iterations followed by 500.000 simulations. Fifteen
#repetitions of each run were performed for K ranging from 1 to 15. before
#importing the file, replace white space in the column header names with
#underscore, replace "?1" by "alpha", and remove double white spaces or it
#will provoc importation problem or failure
resstr<-read.table(file="data/BRAstr.out", header=T,sep=" ",blank.lines.skip=T)
resccstr<-read.table(file="data/BRAccstr.out", header=T,sep=" ",
blank.lines.skip=T)
resccconsstr<-read.table(file="data/BRAccconsstr.out", header=T,sep=" ",
blank.lines.skip=T)
#new version of the file with 15 repetitions of each run
resstr<-read.table(file="data/BRAoutput.out", header=T,sep=" ",
blank.lines.skip=T)
resccstr<-read.table(file="data/BRAccoutput.out", header=T,sep=" ",
blank.lines.skip=T)
resccconsstr<-read.table(file="data/BRAccconsoutput.out", header=T,sep=" ",
blank.lines.skip=T)
#a function which compute delta K values, nb_K is the number of different K
#considered, and nb_rep is the number of repetition of each K
chooseK<-function(str_out,nb_K,nb_rep) {
datatable<-data.frame("K"=c(rep(1:nb_K,each=nb_rep)),"Ln(Pd)"=str_out[,4])
Lprim<-c(rep("NA",nb_rep))
for (i in ((nb_rep+1):(nb_K*nb_rep))) {
Lprim<-c(Lprim,str_out[i,4]-str_out[i-nb_rep,4])
}
datatable<-data.frame(datatable,as.numeric(Lprim))
Lsecond<-c(rep("NA",nb_rep))
for (i in (((2*nb_rep)+1):(nb_K*nb_rep))) {
Lsecond<-c(Lsecond,abs(datatable[i,3]-datatable[i-nb_rep,3]))
}
Lsecond<-c(Lsecond,rep("NA",nb_rep))
datatable<-data.frame(datatable,as.numeric(Lsecond))
reztable<-data.frame("K"=c(1:nb_K))
meanL<-c()
sdL<-c()
for (i in (1:nb_K)) {
meanL<-c(meanL,mean(datatable[datatable$K==i,2]))
sdL<-c(sdL,sd(datatable[datatable$K==i,2]))
}
reztable<-data.frame(reztable,meanL,sdL)
meanLprime<-c()
sdLprime<-c()
for (i in (1:nb_K)) {
meanLprime<-c(meanLprime,mean(as.numeric(datatable[datatable$K==i,3])))
sdLprime<-c(sdLprime,sd(datatable[datatable$K==i,3]))
}
reztable<-data.frame(reztable,meanLprime,sdLprime)
meanLsecond<-c()
sdLsecond<-c()
for (i in (1:nb_K)) {
meanLsecond<-c(meanLsecond,mean(as.numeric(datatable[datatable$K==i,4])))
sdLsecond<-c(sdLsecond,sd(datatable[datatable$K==i,4]))
}
reztable<-data.frame(reztable,meanLsecond,sdLsecond)
deltaK<-c()
for (i in (1:nb_K)) {
deltaK<-c(deltaK,reztable[reztable$K==i,6]/reztable[reztable$K==i,3])
}
reztable<-data.frame(reztable,deltaK)
return(reztable)
}
deltastr<-chooseK(resstr,15,15)
deltaccstr<-chooseK(resccstr,15,15)
deltaccconsstr<-chooseK(resccconsstr,15,15)
#a function to plot variation of Delta K and Ln(P(X|K)) with K. 'datadeltak'
#is the output file of 'chooseK' function, and nb_K is the number of different
#K considered
plotdeltaK<-function(datadeltaK,nb_K,titre){
op<-par(pty="s")
plot(datadeltaK[1:(nb_K-2),8],type="b",pch=24,cex=2.5,lwd=4,lty=1,
col="transparent",bg="white",bty="n",ann=F)
par(new=TRUE)
plot(datadeltaK[1:(nb_K-2),8],type="b",pch=24,bty="n",xaxt="n",yaxt="n",
ann=F,cex=2.5,lwd=4,lty=1)
axis(side=1,at=seq(1,13,1),lwd=3,font.axis=2)
axis(side=2,lwd=3,font.axis=2)
title(ylab="Delta K",font.lab=2,cex.lab=1.5)
par(new=TRUE)
plot(datadeltaK[1:(nb_K-2),2],type="b",pch=22,cex=2.5,lwd=4,lty=2,
col="grey50",bg="white",bty="n",xaxt="n",yaxt="n",ann=F)
axis(side=4,lwd=3,font.axis=2,col="grey50")
mtext("Ln(P(X|K))", side=4, line=4,font=2,cex=1,col="grey50")
title(main=titre,xlab="K",font.lab=2,cex.lab=1.5,cex.main=2)
par(op)
}
plotdeltaK(deltastr,15,"Complete dataset (n=474)")
plotdeltaK(deltaccstr,15,"Clone Corrected dataset (n=338)")
plotdeltaK(deltaccconsstr,15,"Conservative Clone Corrected dataset (n=259)")
#you can obtain the same figure as in the manuscript by exporting the plot to
#pdf format, with a width of 12 inches and an height of 11 inches
#You can also obtain a combined plot for the three different dataset
op<-par(mfrow=c(1,3))
plotdeltaK(deltastr,15,"Complete dataset (n=474)")
plotdeltaK(deltaccstr,15,"Clone Corrected dataset (n=338)")
plotdeltaK(deltaccconsstr,15,"Conservative Clone Corrected dataset (n=259)")
par(op)
#then export with a width of 32 inches and an height of 10 inches
###############################################################################
#Loading geographical data / informations / rasters / shapefiles
###############################################################################
#before importing the files, don't forget to set the correct working directory
#administrative limit and roads
municipioSH<-readShapePoly("mucipio_MT.shp",
proj4string=CRS("+proj=longlat +datum=WGS84"))
voies<-readShapeLines("voies.shp",
proj4string=CRS("+proj=longlat +datum=WGS84"))
#shapefile of the Hevea brasiliensis plantations identified by analysis of
#satellite images
parcPoly<-readShapePoly("parc_MT.shp",
proj4string=CRS("+proj=longlat +datum=WGS84"))
#in order to have the area of the different plantations in square meters and
#the distance in meters, we turn the coordinates of the object into planar
#coordinates format
parcPoly.utm <- spTransform(parcPoly, CRS("+proj=utm +zone=21 +datum=WGS84"))
#exploring the structure of the "polygons" object
str(parcPoly[1,])
parcPoly.utm[1,]@polygons
parcPoints<-readShapePoints("parc_point.shp",
proj4string=CRS("+proj=longlat +datum=WGS84"))
#in order to have the distance in meters, we turn the coordinates of the
#object into planar coordinates format
parcPoints.utm <- spTransform(parcPoints,
CRS("+proj=utm +zone=21 +datum=WGS84"))
sampPoints<-readShapePoints("prelevement.shp",
proj4string=CRS("+proj=longlat +datum=WGS84"))
#because there was no infection in 'Campo Verde' location, we remove this
#sampling point
sampPoints<-sampPoints[-c(2),]
levels(sampPoints@data$name_id)[levels(sampPoints@data$name_id)=="SOR1"]<-"SOR"
#in order to have the distance in meters, we turn the coordinates of the
#object into planar coordinates format
sampPoints.utm <- spTransform(sampPoints,
CRS("+proj=utm +zone=21 +datum=WGS84"))
#loading the altitudinal data, no need to clip the information, because it has
#been already clipped
alt<-raster("alt_Clip1.img")
#loading the raster of the land cover, no need to clip the information,
#because it has been already clipped
veget<-raster("couvert_veget1.img")
#simplification of the 'veget' raster to keep only 'veget' categories
#where Hevea brasiliensis can occur (40, 41 and 42, see
#http://postel.mediasfrance.org/fr/)
temp<-c(13,39,NA,43,210,NA,40,42,40)
temp <- matrix(temp, ncol=3, byrow=TRUE)
veget<-reclassify(veget,temp)
#there is no H. brasiliensis under 14°S approximatly,
temp<-c(extent(veget)@xmin,-14.3)
temp<-matrix(temp,ncol=2,byrow=T)
#determining the first cell of the grid we want to modify
extract(veget,temp,cellnumbers=TRUE)[1]
#We turn classification that don't fit our criteria in missing data
veget[extract(veget,temp,cellnumbers=TRUE)[1]:ncell(veget)]<-NA
#extraction of information from the different raster using the coordinates of
#sampling points
bioclim1<-raster("bio_1_Clip1.img")
bioclim2<-raster("bio_2_Clip1.img")
bioclim3<-raster("bio_3_Clip1.img")
bioclim4<-raster("bio_4_Clip1.img")
bioclim5<-raster("bio_5_Clip1.img")
bioclim6<-raster("bio_6_Clip1.img")
bioclim7<-raster("bio_7_Clip1.img")
bioclim8<-raster("bio_8_Clip1.img")
bioclim9<-raster("bio_9_Clip1.img")
bioclim10<-raster("bio_10_Clip1.img")
bioclim11<-raster("bio_11_Clip1.img")
bioclim12<-raster("bio_12_Clip1.img")
bioclim13<-raster("bio_13_Clip1.img")
bioclim14<-raster("bio_14_Clip1.img")
bioclim15<-raster("bio_15_Clip1.img")
bioclim16<-raster("bio_16_Clip1.img")
bioclim17<-raster("bio_17_Clip1.img")
bioclim18<-raster("bio_18_Clip1.img")
bioclim19<-raster("bio_19_Clip1.img")
site_table<-data.frame("site_ID"=sampPoints$name_id,
coordinates(sampPoints)[,1:2],
"Altitude"=extract(alt,
coordinates(sampPoints)[,1:2]),
"BioClim1"=extract(bioclim1,
coordinates(sampPoints)[,1:2])/10,
"BioClim2"=extract(bioclim2,
coordinates(sampPoints)[,1:2])/10,
"BioClim3"=extract(bioclim3,
coordinates(sampPoints)[,1:2]),
"BioClim4"=extract(bioclim4,
coordinates(sampPoints)[,1:2])/100,
"BioClim5"=extract(bioclim5,
coordinates(sampPoints)[,1:2])/10,
"BioClim6"=extract(bioclim6,
coordinates(sampPoints)[,1:2])/10,
"BioClim7"=extract(bioclim7,
coordinates(sampPoints)[,1:2])/10,
"BioClim8"=extract(bioclim8,
coordinates(sampPoints)[,1:2])/10,
"BioClim9"=extract(bioclim9,
coordinates(sampPoints)[,1:2])/10,
"BioClim10"=extract(bioclim10,
coordinates(sampPoints)[,1:2])/10,
"BioClim11"=extract(bioclim11,
coordinates(sampPoints)[,1:2])/10,
"BioClim12"=extract(bioclim12,
coordinates(sampPoints)[,1:2]),
"BioClim13"=extract(bioclim13,
coordinates(sampPoints)[,1:2]),
"BioClim14"=extract(bioclim14,
coordinates(sampPoints)[,1:2]),
"BioClim15"=extract(bioclim15,
coordinates(sampPoints)[,1:2]),
"BioClim16"=extract(bioclim16,
coordinates(sampPoints)[,1:2]),
"BioClim17"=extract(bioclim17,
coordinates(sampPoints)[,1:2]),
"BioClim18"=extract(bioclim18,
coordinates(sampPoints)[,1:2]),
"BioClim19"=extract(bioclim19,
coordinates(sampPoints)[,1:2]),
stringsAsFactors = FALSE)
colnames(site_table)<-c("site_ID","Longitude","Latitude","Altitude",
"Annual Mean Temperature",
"Mean Diurnal Range (Mean of monthly (max temp - min temp))",
"Isothermality",
"Temperature Seasonality (standard deviation *100)",
"Max Temperature of Warmest Month",
"Min Temperature of Coldest Month",
"Temperature Annual Range",
"Mean Temperature of Wettest Quarter",
"Mean Temperature of Driest Quarter",
"Mean Temperature of Warmest Quarter",
"Mean Temperature of Coldest Quarter",
"Annual Precipitation",
"Precipitation of Wettest Month",
"Precipitation of Driest Month",
"Precipitation Seasonality (Coefficient of Variation)",
"Precipitation of Wettest Quarter",
"Precipitation of Driest Quarter",
"Precipitation of Warmest Quarter",
"Precipitation of Coldest Quarter")
site_table$site_ID<-as.character(site_table$site_ID)
site_table[site_table$site_ID=="SOR1",1]<-c("SOR")
#we ordered this table to match the organization of the genetic table
site_table<-site_table[order(site_table$site_ID),]
site_table
###############################################################################
#some plotting examples of the files of geographical data sets
###############################################################################
plot(municipioSH)
plot(voies,col="red",add=TRUE)
plot(parcPoly,col="red")
#usefull for illustration only, there may be conflict with the
#zoom function of ape. Use detach(package:xx) to solve this.
raster::zoom(parcPoly,col="red")
plot(parcPoly,col="yellow",lwd=0.1)
plot(sampPoints,add=TRUE, col="black",bg="red",pch=21,cex=1.5)
text(coordinates(parcPoly),labels=round(plant_area), cex=0.2)
plot(alt)
#an example of a map that can be produced with the data loaded
op<-par(pty="s")
image(alt,col=brewer.pal(9,"Greys"))
image(veget,col="darkgreen",add=TRUE)
plot(municipioSH,add=TRUE)
plot(voies,add=TRUE,col="green")
#plot(parcPoly,add=TRUE, col="red") #not very usefull because area of
#plantation are too small for the considered scale
plot(parcPoints,add=TRUE, col="black",bg="blue",pch=21)
plot(sampPoints,add=TRUE, col="black",bg="red",pch=21)
par(op)
#you can export the file with a large size of file (like 50 by 50 inches)
#submap of South America
op<-par(pty="s")
image(alt,col=brewer.pal(9,"Greys"))
image(veget,col="darkgreen",add=TRUE)
#plot(municipioSH,add=TRUE)
plot(parcPoints,add=TRUE, col="black",bg="blue",pch=21)
plot(sampPoints,add=TRUE, col="black",bg="red",pch=21)
polygon(x=c(-53.39,-53.13,-53.13,-53.39,-53.39),
y=c(-13.29,-13.29,-13.03,-13.03,-13.29),col="white",
density=35,border="white",lwd=1.5)
box()
par(op)
#the following code is adapted from:
#http://www.stat.auckland.ac.nz/~paul/RGraphics/examples-map.R
#plotting an inset map of south america
maplocs <- map("worldHires", ylim=c(-60,12),xlim=c(-89,-35),
col="lightblue1",fill=TRUE,add=TRUE,plot=FALSE)
xrange <- range(maplocs$x, na.rm=TRUE)
yrange <- range(maplocs$y, na.rm=TRUE)
aspect <- abs(diff(yrange))/abs(diff(xrange))
# customised to 6.5 by 4.5 figure size
par(fig=c(0.93 - 0.15, 0.929, 0.72, 0.72 + 0.15*aspect), mar=rep(0, 4),
ew=TRUE, pty="s")
plot.new()
plot.window(xlim=xrange, ylim=yrange)
polygon(x=c(-88.5,-34.5,-34.5,-88.5,-88.5),y=c(-58,-58,15,15,-58),
col="lightblue1")
map("worldHires", ylim=c(-60,12),xlim=c(-89,-35), col="white",fill=T,add=TRUE)
polygon(x=c(-59.5,-52.5,-52.5,-59.5,-59.5),y=c(-17.5,-17.5,-10.5,-10.5,-17.5),
col="grey")
#plotting an inset map of a zoom on a part of Mato Grosso plantations
par(fig=c(0.93 - 0.15, 0.9298, 0.4, 0.4 + 0.15), mar=rep(0, 4),new=TRUE,
pty="s")
plot.new()
plot.window(xlim=c(-53.39,-53.13), ylim=c(-13.29,-13.03))
polygon(x=c(-53.39,-53.13,-53.13,-53.39,-53.39),
y=c(-13.29,-13.29,-13.03,-13.03,-13.29),col="white")
plot(parcPoly,col="red",xlim=c(-53.39,-53.13),ylim=c(-13.29,-13.03),
lwd="0.2",add=TRUE)
#export in pdf format with a size of 9.5*9.5 inches in order to obtain the
#same figure
map("world",regions=c("brazil","argentina","uruguay","paraguay","chile",
"bolivia","peru","ecuador","colombia","venezuela",
"guyana","suriname","french guiana","panama",
"costa rica","nicaragua"))
###############################################################################
#Distance matix, Minimum spanning tree network or least cost pathway analysis
###############################################################################
#first we built the combined table of coordinates of plantations and sampling
#sites
xy.utm<-rbind(coordinates(parcPoints.utm),coordinates(sampPoints.utm)[,c(1,2)])
row.names(xy.utm)[577:591]<-c("BBU","DEN","DAQ1","DAQ2","DAQ3","NHO","PEM1",
"PEM2","PGA1","PGA2","RND","ROS","SRC1","SRC2",
"SOR")
colnames(xy.utm)<-c("Longitude", "Latitude")
matdist.utm<-vegdist(xy.utm,method="euclidean")
mateucl.utm<-vegdist(xy.utm,method="euclidean")
#Euclidean distances beetween sites
mateucl.utm<-as.matrix(mateucl.utm)
dist_eucl<-mateucl.utm[577:591,577:591]
dist_eucl<-dist_eucl/1000 #we turn the distance in kilometers
#we ordered this table to match the organization of the genetic table
dist_eucl<-dist_eucl[order(colnames(dist_eucl)),order(colnames(dist_eucl))]
#Distances from the Minimum spanning tree network between Hevea brasiliensis
#plantation. We use this function to find the minimum spanning tree
MinSpaTre.utm<-spantree(matdist.utm)
#it is then possible to compute the distances between the nodes of the tree,
#following a tree path
coph.utm<-cophenetic(MinSpaTre.utm)
#we just want the distance between the sampling points
matcoph.utm<-as.matrix(coph.utm)
dist_MinSpanTree<-matcoph.utm[577:591,577:591]
dist_MinSpanTree<-dist_MinSpanTree/1000 #we turn the distance in kilometers
#we ordered this table to match the organization of the genetic table
dist_MinSpanTree<-dist_MinSpanTree[order(colnames(dist_MinSpanTree)),
order(colnames(dist_MinSpanTree))]
#Distances between sampling point using roads. This distances has been
#computed using google maps https://maps.google.fi/maps?hl=en&tab=wl in July
#2013 by copying and pasting coordinates of the plantation
dist_roads<-as.matrix(read.table("dist_roads.txt",header=TRUE))
#in order to plot the tree on the map, we built another tree with WGS84
#coordinates
xy<-rbind(coordinates(parcPoints),coordinates(sampPoints)[,c(1,2)])
row.names(xy)[577:591]<-c("BBU","DEN","DAQ1","DAQ2","DAQ3","NHO","PEM1","PEM2",
"PGA1","PGA2","RND","ROS","SRC1","SRC2","SOR")
colnames(xy)<-c("Longitude", "Latitude")
matdist<-vegdist(xy,method="euclidean")
#we use this function to find the minimum spanning tree
MinSpaTre<-spantree(matdist)
op<-par(pty="s")
image(alt,col=brewer.pal(9,"Greys"))
image(veget,col="darkgreen",add=T)
lines(MinSpaTre,xy,lwd=2)
plot(parcPoints,add=T, col="black",bg="blue",pch=21)
plot(parcPoly,add=T, col="yellow",lwd=0.1)
plot(sampPoints,add=T, col="black",bg="red",pch=21)
# #adding the ID of the sampled population
# scatterutil.eti(sampPoints$POINT_X,sampPoints$POINT_Y,
# as.character(sampPoints$name_id),0.5)
# #or a simplier way
# text(sampPoints,sampPoints$name_id)
box()
par(op)
###############################################################################
#Canonical Correspondence Analysis
###############################################################################
BRAt<-BRAcc
BRADE<-df2genind(BRAt[,14:27],ncode=3,ind.names=as.character(BRAt$sample_ID),
pop=BRAt$pop_ID,ploidy=1,NA.char=c("0"))
summary(BRADE)
BRADE@other$xy<-BRAt[,4:5]
BRADEpop<-genind2genpop(BRADE,process.other=T)
cca1<-cca(as.data.frame(BRADEpop$tab),site_table[,-c(1:3)],scann=F)
cca1
plot(cca1)
###############################################################################
#clusterisation with DAPC method using adegenet package
###############################################################################
#the analysis here are performed for the clone-corrected dataset, but the same
#analysis can be performed with the other dataset by replacing BRAcc by the
#complete (BRA) or the conservative clone-corrected dataset (BRAcccons) in the
#following line code, and then rerun other lines of code
BRAt<-BRAcc #name of the input file
#converting data to a genind format
BRADE<-df2genind(BRAt[,14:27],ncode=3,ind.names=as.character(BRAt$sample_ID),
pop=BRAt$pop_ID,NA.char=c("0"),ploidy=1)
BRADE@other$xy<-BRAt[,4:5]
#determination of the number of clusters
clustBRADE<- find.clusters(BRADE,max.n.clust=35)
#with 40 PCs, we lost nearly no information
clustBRADE<- find.clusters(BRADE,n.pca=40,max.n.clust=35) #chose 3 clusters
#which individuals in which clusters per population
table(pop(BRADE),clustBRADE$grp)
#DAPC by itself, first we try to optimized the number of principal component
#(PCs) to retain to perform the analysis
dapcBRADE<-dapc(BRADE,clustBRADE$grp,n.da=5,n.pca=100)
temp<-optim.a.score(dapcBRADE)
dapcBRADE<-dapc(BRADE,clustBRADE$grp,n.da=5,n.pca=30)
temp<-optim.a.score(dapcBRADE) #based on this result, we finaly chose 7 PCs
dapcBRADE<-dapc(BRADE,clustBRADE$grp,n.da=7,n.pca=7)
#STRUCTURE-like graphic
compoplot(dapcBRADE,lab=NA)
scatter(dapcBRADE,xax=1, yax=2)
BRADEpop<-genind2genpop(BRADE,process.other=T,missing="0")
image(alt,col=brewer.pal(9,"Greys"))
stars(table(pop(BRADE),dapcBRADE$assign),draw.segment=TRUE,
locations=BRADEpop@other$xy,
#locations=cbind(jitter(BRADEpop@other$xy$longitude,200),
# jitter(BRADEpop@other$xy$latitude,200)),
add=T,len=0.5)
###############################################################################
#clusterisation with GENELAND package
###############################################################################
MCMC(coordinates=BRAcc[,c(4,5)], geno.hap=BRAcc[,c(14:27)],varnpop=TRUE,
npopmax=15,spatial=TRUE,freq.model="Uncorrelated", nit=100000,
thinning=100, path.mcmc="c:/Users/bbarres/GENELAND/BRA/",
filter.null.alleles=FALSE,delta.coord=0.003,npopinit=2)
PostProcessChain(coordinates=BRAcc[,c(4,5)],
path.mcmc="c:/Users/bbarres/GENELAND/BRA/",
nxdom=100,nydom=100, burnin=200)
Plotnpop(path.mcmc="c:/Users/bbarres/GENELAND/BRA/",burnin=200)
PlotTessellation(coordinates=BRAcc[,c(4,5)],
path.mcmc="c:/Users/bbarres/GENELAND/BRA/")
PosteriorMode(coordinates=BRAcc[,c(4,5)],
path.mcmc="c:/Users/bbarres/GENELAND/BRA/")
###############################################################################
#Definition of functions to compute diversity indices
###############################################################################
#Allelic Richness computation
#data: a dataset at the 'genind' format from the package 'adegenet'
AllRich<-function(data)
{
#Conversion from 'genind' object to 'genpop' object
datapop<-genind2genpop(data, process.other=TRUE, other.action=mean,
quiet = TRUE)
#First, determining the smaller number of allele across sampled population
matloc<-t(matrix(data=datapop@loc.fac,nrow=(dim(datapop@tab)[2]),
ncol=(dim(datapop@tab)[1])))
matpop<-matrix(data=row.names(datapop@tab), nrow=(dim(datapop@tab)[1]),
ncol=(dim(datapop@tab)[2]))
conf<-list(matpop, matloc)
effN<-(tapply(datapop@tab, conf, sum))
echMin<-min(effN)
#Second, build of the matrix of total number of sampled allele
truc<-t(as.matrix(table(datapop@loc.fac)))
x<-matrix(nrow=(dim(effN)[1]), ncol=(dim(effN)[2]), data=truc,byrow=TRUE)
effTot<-matrix(rep(t(effN),t(x)), nrow=(dim(datapop@tab)[1]),
ncol=(dim(datapop@tab)[2]), byrow=TRUE)
#Third, compute the matrix of Ar for each population/loci combination
#(see El Mousadik and Petit 1996 for details)
CoMat<-matrix(nrow=(dim(datapop@tab)[1]),ncol=(dim(datapop@tab)[2]))
for (i in 1:(dim(datapop@tab)[1])) {
for (j in 1:(dim(datapop@tab)[2])) {
CoMat[i,j]<-(1-(nCm(effTot[i,j]-datapop@tab[i,j],echMin)/
nCm(effTot[i,j],echMin)))
}
}
#Allelic richness in each population, for each LOCUS
ArLOC<-(tapply(CoMat, conf, sum))
rez<-list("Minimum Sampling Size"=echMin,"Allelic Richness Matrix"=ArLOC)
return(rez)
}
BRAt<-BRAcc #name of the input file
#converting data to a genind format
BRADE<-df2genind(BRAt[,14:27],ncode=3,ind.names=as.character(BRAt$sample_ID),
pop=BRAt$pop_ID,NA.char=c("0"),ploidy=1)
BRADE@other$xy<-BRAt[,4:5]
AllRich(BRADE)[[2]]
Ar<-apply(AllRich(BRADE)[[2]],1,mean)
#Private Allelic Richness computation
#data: a dataset at the 'genind' format from the package 'adegenet'
PrivAllRich<-function(data)
{
#Conversion from 'genind' object to 'genpop' object
datapop<-genind2genpop(data, process.other=TRUE,other.action=mean,quiet=TRUE)
#First, determining the smaller number of allele across sampled population
matloc<-t(matrix(data=datapop@loc.fac,nrow=(dim(datapop@tab)[2]),
ncol=(dim(datapop@tab)[1])))
matpop<-matrix(data=row.names(datapop@tab), nrow=(dim(datapop@tab)[1]),
ncol=(dim(datapop@tab)[2]))
conf<-list(matpop, matloc)
effN<-(tapply(datapop@tab, conf, sum))
# effN<- effN[order(as.numeric(rownames(effN))),]
# colnames(effN)<-locNames(datapop)
echMin<-min(effN)
#Second, build of the matrix of total number of sampled allele
truc<-t(as.matrix(table(datapop@loc.fac)))
x<-matrix(nrow=(dim(effN)[1]), ncol=(dim(effN)[2]), data=truc,byrow=TRUE)
effTot<-matrix(rep(t(effN),t(x)), nrow=(dim(datapop@tab)[1]),
ncol=(dim(datapop@tab)[2]), byrow=TRUE)
#Third, compute the matrix of Pijg for each population/loci combination
#(see Kalinowski 2004 for details)
CoMat<-matrix(nrow=(dim(datapop@tab)[1]),ncol=(dim(datapop@tab)[2]))
for (i in 1:(dim(datapop@tab)[1])) {
for (j in 1:(dim(datapop@tab)[2])) {
CoMat[i,j]<-(1-(nCm(effTot[i,j]-datapop@tab[i,j],echMin)/
nCm(effTot[i,j],echMin)))
}
}
#fourth, compute the product of Qijg for each population/loci combination
#(see Kalinowski 2004 for details)
CoMat2<-matrix(nrow=(dim(datapop@tab)[1]),ncol=(dim(datapop@tab)[2]))
for (i in 1:(dim(datapop@tab)[1])) {
for (j in 1:(dim(datapop@tab)[2])) {
CoMat2[i,j]<-(nCm(effTot[i,j]-datapop@tab[i,j],echMin)/
nCm(effTot[i,j],echMin))
}
}
#fifth, compute the product of Kijg for each population/loci combination
#(see Kalinowski 2004 for details)
CoMat3<-matrix(nrow=(dim(datapop@tab)[1]),ncol=(dim(datapop@tab)[2]))
temp<-c()
for (i in 1:(dim(datapop@tab)[1])) {
temp<-as.matrix(CoMat2[-i,])
ifelse(dim(temp)[2]==1,CoMat3[i,]<-apply(temp,1,prod),
CoMat3[i,]<-apply(temp,2,prod))
}
CoMat4<-CoMat*CoMat3
#Private Allelic richness in each population, for each LOCUS
PrivArLOC<-(tapply(CoMat4, conf, sum))
# PrivArLOC<-PrivArLOC[order(as.numeric(dimnames(PrivArLOC)[[1]])),]
# colnames(PrivArLOC)<-locNames(datapop)
##determining mean Allelic Richness across site and loci
#determining mean Allelic Richness across loci
#Ar<-(apply(ArLOC,1,mean))
rez<-list("Minimum Sampling Size"=echMin,
"Private Allelic Richness Matrix"=PrivArLOC)
return(rez)
}
PrivAllRich(BRADE)
PrivAr<-apply(PrivAllRich(BRADE)[[2]],1,mean)
#Heterozygosity computation
#data: a dataset at the 'genind' format
HeterNei<-function(data)
{
#Conversion from 'genind' object to 'genpop' object
datapop<-genind2genpop(data, process.other=TRUE, other.action=mean,
quiet = TRUE)
#Heterozygosity (Nei 1987) in each population, for each LOCUS
HsLOC<-matrix(nrow=(dim(datapop@tab)[1]),
ncol=(length(levels(datapop@loc.fac))), byrow=TRUE)
for (i in (1:(dim(datapop@tab)[1]))) {
dataLOC<-genind2loci(data[data$pop==levels(data$pop)[i]])
ss<-summary(dataLOC)
HsLOC[i,]<-sapply(ss, function(x) H(x$allele))
}
#determining mean Heterozygosity across loci
Hs<-(apply(HsLOC,1,mean))
attr(Hs,"names")<-row.names(datapop@tab)
return(Hs)
}
HetNei<-HeterNei(BRADE)
#A function is directly implemented in adegenet: 'Hs'. It gives slightly
#different results, even if they seem to be correlated. Investigation to be
#conducted, why results are different (maybe different treatment of missing
#data when computing allele frequencies)
Hs(BRADE)
###############################################################################
#Comparison of diversity between the two groups of populations
###############################################################################
endemic<-c("NHO","PGA2","PGA1","SOR","SRC2","SRC1")
invaded<-c("BBU","DEN","ROS","DAQ3","DAQ2","DAQ1","RND","PEM2","PEM1")
boxplot(Ar[endemic],Ar[invaded])
wilcox.test(Ar[endemic],Ar[invaded])
t.test(Ar[endemic],Ar[invaded])
boxplot(HetNei[endemic],HetNei[invaded])
wilcox.test(HetNei[endemic],HetNei[invaded])
boxplot(PrivAr[endemic],PrivAr[invaded])
wilcox.test(PrivAr[endemic],PrivAr[invaded])
t.test(PrivAr[endemic],PrivAr[invaded])
###############################################################################
#Comparison of diversity between regions
###############################################################################
#Resampling procedure to balance the different number of populations sampled
#in the two regions. The aim here is to compare gene diversity between the two
#groups of populations endemic vs invaded
#we want to compare the allelic richness (Ar), private allelic richness (PAr)
#and the genetic diversity (He) in the two different regions: "endemic" (close
#to the natural compartment) vs "invaded" (not in the direct vicinity of the
#natural compartment). Because there are less "endemic" populations sampled
#than "invaded" population (6 populations vs 10 populations), we are using a
#nested-resampling procedure: first we are sampling 6 populations (the minimum
#number of populations in the 2 different regions), and then we sampled 4
#individuals (the minimum number of individuals in a population) in each of the
#sampled populations. Then we compute Ar and He for each resampled datasets at
#the region level; this procedure was repeated 100 times. A one-way ANOVA
#followed by a post hoc Tuckey test was used to compare the levels of the
#three different indices of diversity (Gladieux et al 2010)
resampleDIV<-function(data,grp1,grp2,nbsim)
#this function perform a balanced resampling of 6 populations in each defined
#group, and within each population it samples 4 individuals to create
#sub-dataset; then the function compute the allelic richness, the genetic
#diversity and the private allelic richness for each resampled dataset
#data: a data frame of the same format as "BRA"
#grp1: first group of populations
#grp2: second group of populations
#nbsim: the number of resampling procedure
{
ArDistr<-c()
PrivArDistr<-c()
HeDistr<-c()
SimPop<-c()
for (i in 1:nbsim) {
popsamp<-sample(grp1, size=6, replace=FALSE)
BRAinvad<-data[data$pop_ID %in% (popsamp),]
resamp<-c()
for (j in 1:6) {
listind<-as.character(sample(BRAinvad[BRAinvad$pop_ID == popsamp[j],
"sample_ID"],
size=4, replace=FALSE))
resamp<-rbind(resamp,BRAinvad[BRAinvad$sample_ID %in% (listind),])
}
popsamp<-sample(grp2, size=6, replace=FALSE)
BRAendem<-data[data$pop_ID %in% (popsamp),]
for (k in 1:6) {
listind<-as.character(sample(BRAendem[BRAendem$pop_ID == popsamp[k],
"sample_ID"],
size=4, replace=FALSE))
resamp<-rbind(resamp,BRAendem[BRAendem$sample_ID %in% (listind),])
}
resamp$pop_ID<-as.character(resamp$pop_ID)
resamp$pop_ID[1:24]<-rep("BRAinvad",24)
resamp$pop_ID[25:48]<-rep("BRAendem",24)
resampADE<-df2genind(resamp[,14:27],ncode=3,
ind.names=as.character(resamp$sample_ID),
pop=resamp$pop_ID,NA.char=c("0"),ploidy=1)
ArDistr<-rbind(ArDistr,apply(AllRich(resampADE)[[2]],1,mean))
PrivArDistr<-rbind(PrivArDistr,apply(PrivAllRich(resampADE)[[2]],1,mean))
HeDistr<-rbind(HeDistr,HeterNei(resampADE))
SimPop<-c(SimPop,resampADE)
}
rez<-list("Ar distribution"=ArDistr,"PrivAr distribution"=PrivArDistr,
"He distribution"=HeDistr,
"Simulated Pop"=SimPop)
return(rez)
}
endemic<-c("NHO","PGA2","PGA1","SOR","SRC2","SRC1")
invaded<-c("BBU","DEN","ROS","DAQ3","DAQ2","DAQ1","RND","PEM2","PEM1")
BRAt<-BRAcc #name of the input file
distri<-resampleDIV(BRAt,invaded,endemic,100)
#comparison between Allelic richness of endemic and invaded zones
boxplot(distri[[1]][,1], distri[[1]][,2],
main="Comparison of Allelic Richness",
names=c(colnames(distri[[1]])[1],colnames(distri[[1]])[2]))
wilcox.test(distri[[1]][,1], distri[[1]][,2],paired=TRUE)
t.test(distri[[1]][,1], distri[[1]][,2],paired=TRUE)
plot(density(distri[[1]][,1]))
lines(density(distri[[1]][,2]),col="red")
#comparison between Private Allelic richness of endemic and invaded zones
boxplot(distri[[2]][,1], distri[[2]][,2],
main="Comparison of Private Allelic Richness",
names=c(colnames(distri[[1]])[1],colnames(distri[[1]])[2]))
wilcox.test(distri[[2]][,1], distri[[2]][,2],paired=TRUE)
t.test(distri[[2]][,1], distri[[2]][,2],paired=TRUE)
plot(density(distri[[2]][,2]),col="red")
lines(density(distri[[2]][,1]))
#comparison between Nei diversity of endemic and invaded zones
boxplot(distri[[3]][,2], distri[[3]][,1],
main="Comparison of Nei Diversity",
names=c(colnames(distri[[3]])[2],colnames(distri[[3]])[1]))
wilcox.test(distri[[3]][,2], distri[[3]][,1],paired=TRUE)
t.test(distri[[3]][,2], distri[[3]][,1],paired=TRUE)
plot(density(distri[[3]][,2]))
lines(density(distri[[3]][,1]),col="red")
###############################################################################
#Connectivity index computation
###############################################################################
#first we built the combined table of coordinates of plantations and sampling
#sites
xy.utm<-rbind(coordinates(parcPoints.utm),coordinates(sampPoints.utm)[,c(1,2)])
row.names(xy.utm)[577:591]<-c("BBU","DEN","DAQ1","DAQ2","DAQ3","NHO","PEM1",
"PEM2","PGA1","PGA2","RND","ROS","SRC1","SRC2",
"SOR")
colnames(xy.utm)<-c("Longitude", "Latitude")
matdistb.utm<-vegdist(xy.utm,method="euclidean",diag=TRUE, upper=TRUE)/1000
#recovering the area information for each identified plantations in square km
#(reason why we divided by 1000000)
plant_area<-(sapply(slot(parcPoly.utm, "polygons"), slot, "area"))/1000000
#we add the area of the sampling sites to the table of area since the surface
#on which the samples were collected was limited, we fixe this area to 1 ha
plant_area<-c(plant_area,rep(0.01,15))
#'alpha' parameter of Laine and Hanski 2006, here we put 1km (take care that
#the same unit is used in the Weidist matrix)
alpha<-1
racArea<-sqrt(plant_area) #conversion of area in square meter to the square
#root of the area in square km
Weidist<-as.matrix(matdistb.utm)
Weidist<-exp(-alpha*Weidist)
connect<-matrix(nrow=(dim(Weidist)[1]),ncol=(dim(Weidist)[2]))
dimnames(connect)<-dimnames(Weidist)
for (i in 1:length(racArea)) {
connect[,i]<-Weidist[,i]*racArea
}
for (i in 1:length(racArea)) {
connect[i,i]<-0
}
connect<-apply(connect,2,sum)
connect<-cbind("PATCH_ID"=attr(connect,"names"),"connect"=as.numeric(connect))
connect[577:591][order(attr(connect[577:591],"names"))]
plot(Ar, connect[577:591][order(attr(connect[577:591],"names"))])
###############################################################################
#END
############################################################################### |
26e5fb38c7c8b818b38dabf28ec798e1d46e0345 | e41c5963ed4d777fa7123d64d09da007a17ec979 | /man/getsim.Rd | 46b786f44c10227b7a4c10cff6c5710318869662 | [] | no_license | jpgroup/enviGCMS | 58d89d55845aec5db2ec18d8949834b546495c41 | 1da3d3c09ce2b0bc2a74001271237a215ffd9a51 | refs/heads/master | 2021-01-12T11:14:19.206130 | 2017-05-18T13:55:41 | 2017-05-18T13:55:41 | 72,878,451 | 1 | 0 | null | 2016-11-04T19:21:49 | 2016-11-04T19:21:48 | null | UTF-8 | R | false | true | 342 | rd | getsim.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{getsim}
\alias{getsim}
\title{output the similarity of two dataset}
\usage{
getsim(xset1, xset2)
}
\arguments{
\item{xset1}{the first dataset}
\item{xset2}{the second dateset}
}
\value{
similarity
}
\description{
output the similarity of two dataset
}
|
6454856ddf653a39dd750ca1131dc2c9e87c4e04 | 7fd5a2a8c3a2c3da494774547a293c3747176d98 | /researchcyclematrix_code/man/rcm_longest_with_hq.Rd | b2bd866f2459f1a479ccde922670c1129a7ba17e | [] | no_license | ellieallien/impact_R_user_training | 07e119d06ed6e206614cba84df3b4404cee17936 | b68432fdc340e88af473a3fff0f5b26a7b8e3cb5 | refs/heads/master | 2020-05-04T15:49:44.601198 | 2019-05-23T11:18:12 | 2019-05-23T11:18:12 | 179,257,736 | 0 | 1 | null | 2019-04-03T09:35:16 | 2019-04-03T09:35:16 | null | UTF-8 | R | false | true | 470 | rd | rcm_longest_with_hq.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rcm_status.R
\name{rcm_longest_with_hq}
\alias{rcm_longest_with_hq}
\title{is with field?}
\usage{
rcm_longest_with_hq(rcm, n = NULL, add.columns = c())
}
\arguments{
\item{rcm}{the research cycle matrix from rcm_download(raw=F)}
}
\value{
a subset of the RCM: only rows that are with HQ; only basic information columns. Sorted by added column "days.with.hq"
}
\description{
is with field?
}
|
2b63a3d844a3fe70fba5a4b32b58fe5ec7fa6002 | b200e741591d40ee314a6a2c9a185787eb5e39c0 | /Processamento de dados .R | 178b1e3dfbd430e6c784aba6472f4a72268b9740 | [] | no_license | allogamous/HOBOmicroData | 1473ca8387ac81c6222b05a044a5ce653b006c9d | 382a081acc4fc58ad1cc32dd2af63885fbbb44f7 | refs/heads/master | 2020-07-14T05:11:14.110058 | 2019-08-29T21:10:12 | 2019-08-29T21:10:12 | 205,246,665 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,971 | r | Processamento de dados .R | #'########################################################################################
#' #'
#' #'
#' Codes para processamento de dados meteorologicos a partir da HOBOWARE #'
#' Autor: Germano #'
#' Versao: 2.0 (Agosto de 2019, versao anterior de Abril de 2019) #'
#' #'
#' Dados oriundos do processamento dos files binarios: #'
#' Anhumas_end.dtf e usp_end.dtf #'
#' #'
#' #'
#'########################################################################################'
#---------------------- Source Codes ----------------------------------------------------
#' Funcao para leitura e processamento inicial de arquivos csv exportados do HOBOWARE
read.HOBO = function(file, #' nome do arquivo (ex: dados.csv)
sep =",", #' dados com colunas separadas por ,
skip = 1, #' pula a primeira linha que tem informacoes
row.names = 1, #' ignora a 1 coluna como rowname
pattern = "H", #' padrao para cortar a coluna de hora (pode ser h tb)
id.vars = NULL, #' nome das colunas de variaveis climaticas e de solo
path = NULL, #' diretorio onde estao os arquivos .csv
daynight = TRUE, #' TRUE ou FALSE se ira criar coluna separando dia da noite
save = FALSE, #' opcao que define se ira ou nao salvar o output
start = NULL, #' inicio da janela (ex: 02-03-2019, para 2 de marco de 2019)
end = NULL, #' fim da janela (idem ao start)
formatDay = "%d/%m/%y" # formato de entrada do dia
){
#' -------------------------------------------------------------------------------------
#' Pacotes necessarios
require(reshape2);
require(plyr);
#' -------------------------------------------------------------------------------------
#' Organizando diretorios de saida
if(is.null(path)){path=getwd()};
DF = read.csv(as.character(paste0(path,"/",file)),
sep=sep,skip=skip,row.names = row.names);
#' -------------------------------------------------------------------------------------
#' Organizando colunas de data e hora
DF = data.frame(colsplit(as.character(DF[,2]),pattern,c("Hora","Minutos")),DF[,-2]);
DF$Data = as.Date(as.POSIXlt(as.character(DF$Data), format=formatDay))
print("Converting date to year-month-day as R default")
#' -------------------------------------------------------------------------------------
#' Renomendo variaveis (caso seja necessario)
if(!is.null(id.vars)){
colnames(DF)[-c(1:3)] = var.id; # obs: estou ignorando as 3 colunas iniciais (data etc)
}
#' -------------------------------------------------------------------------------------
#' Criando coluna que separa dia de noite
if(isTRUE(daynight)){
print("daynight assigned as TRUE: separating day from night");
DF$DayTime = "day"
DF$DayTime[DF$Hora %in% c(seq(from=19, to=23,by=1),seq(from=0, to=6,by=1))] = "night"
}
#' -------------------------------------------------------------------------------------
#' Ajustando a janela de coleta de dados (vai ignorar caso nao tenha especificado)
if(!is.null(start)){
start = as.Date(as.POSIXlt(as.character(start), format="%d/%m/%y"))
DF = DF[DF$Data >=start,]
}
if(!is.null(end)){
end = as.Date(as.POSIXlt(as.character(end), format="%d/%m/%y"))
DF = DF[DF$Data <= end,]
}
if(isTRUE(save)){
output = as.character(paste0(gsub(x = file,pattern = ".csv",replacement = ""),"_processed.csv"))
write.csv(x = DF,file = output,row.names = F)
print(paste0("save assigned as TRUE: saving processed output file at ",dir))
}
#' -------------------------------------------------------------------------------------
#' Informacao sobre output
print(paste0("Returning a data.frame with ",nrow(DF)," rows and ",ncol(DF)," columns"))
return(DF)
#' -------------------------------------------------------------------------------------
}
# funcao para processamento inicial dos dados em escala diaria
process.HOBO = function(dataset,
target=FALSE, #' TRUE usa so os targets definidos na collect
var.id,
collect = c(6,9,12,15),#' horario das coletas para media diaria
nightConsider = TRUE #' considera a noite toda para coleta de dados (coloque FALSE)
){
dataset$Collect = NA
dataset$Collect[dataset$Hora %in% collect] = "TARGET"
if(isTRUE(nightConsider)){
dataset$Collect[dataset$DayTime %in% "night"] = "TARGET"}
t = melt(dataset,id.vars=colnames(dataset)[!names(dataset) %in% var.id]);
if(isTRUE(target)){t=t[t$Collect %in% "TARGET",]}
t = dlply(ddply(t,.(DayTime,Data,variable),summarise,
min.value = quantile(value,na.rm=T)[1],
quant25 = quantile(value,na.rm=T)[2],
median.value = median(value,na.rm=T),
quant75 = quantile(value,na.rm=T)[4],
max.value = quantile(value,na.rm=T)[5],
mean.value = mean(value,na.rm=T),
sd.value = sd(value,na.rm=T),
cv.value = round(sd.value/mean.value,4),
soma.value=sum(value,na.rm=T)),.(DayTime))
return(t)
}
# funcao para gerar arquivos para exportacao
weatherReady = function(dataset){
p2=dcast(dataset, DayTime+Data~variable, value.var = "soma.value")
head(p2)
p2 = p2[,c(1:3,6)]
p2=rbind(p2,data.frame(DayTime="all",ddply(p2,.(Data),summarise, PAR = sum(PAR),RAIN=sum(RAIN))))
names(p2)[-c(1:2)] = c("PAR_cum","RAIN_cum")
p3 = dcast(dataset, DayTime+Data~variable, value.var = "mean.value")
p3 = p3[,c(1:5,7:9)]
colnames(p3)[-c(1:2)] = c("PAR_mean","ST_mean","SWC_mean","T_mean","RH_mean","DEW_mean");
p3=rbind(p3,data.frame(DayTime="all",ddply(p3,.(Data),summarise,
PAR_mean = mean(PAR_mean,na.rm=T),
ST_mean = mean(ST_mean,na.rm=T),
SWC_mean = mean(SWC_mean,na.rm=T),
T_mean = mean(T_mean,na.rm=T),
RH_mean = mean(RH_mean,na.rm=T),
DEW_mean = mean(DEW_mean,na.rm=T))));
p3$SRad = p3$PAR_mean/(2.1*4.6*10) # convering to MJm2
p4 = dcast(dataset, DayTime+Data~variable, value.var = "min.value")
p4 = p4[,c(1,2,4:5,7:9)]
colnames(p4)[-c(1:2)] = c("ST_min","SWC_min","T_min","RH_min","DEW_min")
p4=rbind(p4,data.frame(DayTime="all",ddply(p4,.(Data),summarise,
ST_min = min(ST_min ,na.rm=T),
SWC_min = min(SWC_min ,na.rm=T),
T_min = min(T_min ,na.rm=T),
RH_min = min(RH_min ,na.rm=T),
DEW_min = min(DEW_min,na.rm=T))))
p5 = dcast(dataset, DayTime+Data~variable, value.var = "max.value")
p5 = p5[,c(1,2,4:5,7:9)]
colnames(p5)[-c(1:2)] = c("ST_max","SWC_max","T_max","RH_max","DEW_max")
p5=rbind(p5,data.frame(DayTime="all",ddply(p5,.(Data),summarise,
ST_max = max(ST_max ,na.rm=T),
SWC_max = max(SWC_max ,na.rm=T),
T_max = max(T_max ,na.rm=T),
RH_max = max(RH_max ,na.rm=T),
DEW_max = max(DEW_max,na.rm=T))))
final = merge(merge(merge(p2,p3,by=c("DayTime","Data")),
p4,by=c("DayTime","Data")),p5,by=c("DayTime","Data"))
rm(p2,p3,p4,p5)
return(dlply(final,.(DayTime),mutate,final))
}
#'---------------------------------------------------------------------------------------#'
#' Abaixo segue um passo-a-passo para processar os dados #'
#' Duvidas procure o autor ou atual responsavel #'
#'---------------------------------------------------------------------------------------#'
#' STEP BY STEP
#------- STEP 1: No Software HOBOWARE ----------------------------------------------------
#' 1.1 Abra o arquivo X_end.dft pelo software HOBOWARE (double-click no arquivo)
#' #'----------------------------------------------------------------
#' #' OBS:
#' dentro do HOBOWARE, confira em PREFERENCIAS > Geral como estao configuradas
#' as notacoes de hora (formato de hora ideal = 24h) e data (DMA - dia mes ano, ideal)
#' #'----------------------------------------------------------------
#' OBS2:
#' na hora de importar o arquivo pro HOBOWARE, certifique-se de que esta usando o
#' defasamento a partir de GMT -3. Isso evitara problemas de mudança de horario (de verao)
#' e ira padronizar a coleta dos dados com base no tempo solar (e nao do pais)
#' #'----------------------------------------------------------------
#' # OBS3
#' ainda em preferencias, veja se a opcao "separar uma coluna para data e
#' oura para hora" esta marcada. isso ira faciltar sua vida ;D
#' #'----------------------------------------------------------------
#' 1.2 Uma planilha com os dados em escala temporal (ex: a cada 1h, por dia) serao mostrados
#' 1.3 Confira se as datas estao corretas
#' 1.4 Para exportar, pressione CTRL E ou va em Ficheiro > Exportar dados da Tabela
#' e selecione quais variaveis quer exportar (default = todas)
#' 1.5 Um arquivo .csv (ou txt) sera gerado.
#' 1.6 No excel, organize a coluna de data e hora
#' 1.7 salve o arquivo para ser utilizado no passo 2
#'
# ------- STEP 2: Importacao do arquivo .csv----------------------------------------------
require(reshape2);
require(plyr);
dir = getwd() # saving current directory
#' PAR : radicacao fotossinteticamente ativa
#' STEMP: temperatura do solo
#' SWC : umidade do solo
#' RAIN : chuva
#' ATEMP: temperatura do ar
#' RH : umidade do ar
#' DEWP : temperatura no ponto de orvalho
#'
#' exemplos:
#'
## exemplo: o arquivo esta em formato diferente do esperado.
# o que fazer? so adaptar o script!
usp1 = read.HOBO(file ="usp_end_01.csv",path = dir,skip=0,
formatDay = "%m/%d/%y",pattern = "h",sep=",");
# mudei o formato da data
# botei skip = 0 (ou seja, nao pule nenhuma linha)
# mudei o tipo de pattern (default - H, botei h)
# veja que o arquivo ta dando erro em definir dia e noite
# isso porque no HOBOWARE nao foi configurado a hora para 24h (de 0h ate 23h)
# nesse ponto, nao tem como arrumar o problema! tem que voltar no HOBOWARE e
# e gerar novamente o .csv a partir do metadado binario
head(usp1)
# importa sem mudar nome de colunas
usp = read.HOBO(file ="usp_end_0.csv",path = dir);
# agora adicionando novos argumentos, importa e muda nome de coluna
# crie um vetor com o nome das colunas que voce quer trabalhar
# fique esperto pois as colunas serao organizadas com base na ordem dos canais
# nos quais os sensores foram plugados no HOBOWARE
var.id = c("PAR","STEMP","SWC","RAIN","ATEMP","RH","DEWP");
usp = read.HOBO(file ="usp_end_0.csv",id.vars = var.id,path=dir);
# com o argumento save=T, salva o output no seu diretorio
usp = read.HOBO(file ="usp_end_0.csv",id.vars = var.id,path=dir,save=T);
head(usp)
# teste o mesmo para o anhembi
var.id = c("PAR","SWC","STEMP","RAIN","ATEMP","RH","DEWP");
anh = read.HOBO(file ="Anhumas_end_0.csv",path = dir);
anh = read.HOBO(file ="Anhumas_end_0.csv",id.vars = var.id,path=dir,save=T);
head(anh);
# conseguimos tambem inserir um argumento que ja corta a planilha com base na janela de interesse
# para isso, basta usar os argumentos start e/ou end para definir as datas de inicio e fim
## no anhembi a estacao foi ligada no dia do plantio
var.id = c("PAR","STEMP","SWC","RAIN","ATEMP","RH","DEWP");
anh = read.HOBO(file ="Anhumas_end_0.csv",
start=NULL, # ja ta comecando no dia que eu quero
end = "17/06/2019",
id.vars = var.id,path=dir,save=T);
tail(anh)
## na usp eu restartei no dia do plantio (dia 22/02) e retiramos antes da colheita
var.id = c("PAR","STEMP","SWC","RAIN","ATEMP","RH","DEWP");
usp = read.HOBO(file ="usp_end_0.csv",
start=NULL, # ja ta comecando no dia que eu quero
id.vars = var.id,path=dir,save=T);
head(usp)
head(anh)
## OBS
# vejam alguns sensores, por exemplo, radiacao:
boxplot(usp$PAR[usp$DayTime %in% "night"])
# a noite a radiacao tem que ser 0
# em umidade, ha valroes negativos. Nao existe umidade negativa
boxplot(usp$SWC)
boxplot(usp$RAIN)
# chuveu mais de 30mm numa hora...estranho? sim, mas nesse caso nos lembramos do dia! Choveu memso!
# Pensando nesses aspectos, criei uma funcao para controle de qualidade.
# porem acho melhor cada um fazer "na mao" com base nos proprios criterios
# minhas sugestões:
# a) deixar PAR noturno = 0
usp$PAR[usp$DayTime %in% "night"] = 0
anh$PAR[anh$DayTime %in% "night"] = 0
# b) botar NA em quaisquer valores excessivos
summary(usp) # checando, ok
summary(anh) # checando, ok
# c) botar NA em valores de umidade do solo negativos (sao erros do sensor)
usp$SWC[usp$SWC < 0] = NA
anh$SWC[anh$SWC < 0] = NA
# ------- STEP 3: Processamento das informacoes ----------------------------------------------
#' os arquivos lidos estao na janela de tempo entre ativacao/desativacao do equipamento em campo
#' Portanto, precisamos cortar a janela de tempo que e do nosso interesse
#' Em seguida, verificar possiveis outliers
#' Por fim, gerar informacoes ambientais uteis
head(usp)
var.id = c("PAR","STEMP","SWC","RAIN","ATEMP","RH","DEWP");
# com target = F, faz as sumarizacoes sem pegar horas especificas do dia
usp2 = process.HOBO(dataset = usp,var.id = var.id,target = F)
usp2$day # medias diarias do periodo diurno
usp2$night # medias diarias do periodo noturno
usp2 = process.HOBO(dataset = usp,var.id = var.id,target = T)
usp2$day # dia usando apenas o collect (default = c(9,12,15))
usp2$night # noite usando a media de TODO o periodo noturno
# se colocar nightConsider = F, so considera os collect ou o periodo diurno
usp2 = process.HOBO(dataset = usp,var.id = var.id,target = T,nightConsider = F)
head(usp2$day)
var.id = c("PAR","SWC","STEMP","RAIN","ATEMP","RH","DEWP");
anh2 = process.HOBO(dataset = anh,var.id = var.id,target = T)
anh2$day # dia usando apenas o collect (default = c(9,12,15))
anh2$night # noite usando a media de TODO o periodo noturno
## os dados nesse formato sao muito uteis para estudos de envirotyping!!
# O passo 4 visara apenas formar um dataset para disponibilziacao e outras aplicacoes
# ------- STEP 4: Formacao Banco de dados ----------------------------------------------
#' ao formar um banco de dados meteorologicos, temos interesse em:
#' Temperatura Media diaria
#' Temperatura Maxima diaria
#' Temperatura Minima Diaria
var.id = c("PAR","SWC","STEMP","RAIN","ATEMP","RH","DEWP");
usp2 = process.HOBO(dataset = usp,var.id = var.id,target = T);
head(usp2)
usp2 = ldply(usp2) # transformando num dataset
head(usp2$day)
usp2 = weatherReady(dataset = usp2)
usp2$all # media do dia e da noite
usp2$day # media apenas do dia
usp2$night # media apenas do noite
save(lisdt=usp2, file="Weather-data-usp-2019.Rdata")
anh2 = process.HOBO(dataset = anh,var.id = var.id,target = T);
head(anh2)
anh2 = ldply(anh2) # transformando num dataset
head(anh2$day)
anh2 = weatherReady(dataset = anh2)
anh2$all # media do dia e da noite
anh2$day # media apenas do dia
anh2$night # media apenas do noite
save(lisdt=anh2, file="Weather-data-anh-2019.Rdata")
# opcoes de operation:
# opcoes de operation:
#' operation = "mean.value" retorna as medias diarias
#' operation = "soma.value" retorna somatorio dos dias
#' operation = "median.value" retorna as medianas do dia
#' operation = "max.value" retorna as maximas do dia
#' operation = "min.value" retorna as minimas do dia
#' operation = "quant25" retorna percentil 25 do dia
#' operation = "quant75" retorna percentil 75 do dia
#' operation = "full" retorna o default da funcao, ajustando retorno de acordo com os dados
p = weatherReady(dataset = l)
save()
|
159c8129f866101ef3b69a4387276b9da5e91eb2 | b3e28db152e3bf211f60039c682851fdc3b89bc3 | /Lab6th_JSG.R | 1bc408a348e708941344217b1c566deec3b8c104 | [] | no_license | joaquinsanchezgomez/Lab6th | c6d80cd8d8dc2ac35c54a67a4dc79da421da0b75 | a24d25afb1cc7df64785537bbf59b36755f39b74 | refs/heads/main | 2023-09-05T08:06:18.667779 | 2021-11-05T05:30:47 | 2021-11-05T05:30:47 | 424,834,707 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,318 | r | Lab6th_JSG.R |
#Lab 6
#Joaquin Sanchez-Gomez
#I decided to change some variables, to see if anxiety and perhaps sexual orientation
#are significant to determine if people decide to be vaccinated.
#Lab 6
model_logit1 <- glm(vaxx ~ EEDUC,
family = binomial, data = Household_Pulse_data)
#First Step
Household_Pulse_data$vaxx <- (Household_Pulse_data$RECVDVACC == "yes got vaxx")
is.na(Household_Pulse_data$vaxx) <- which(Household_Pulse_data$RECVDVACC == "NA")
#to confirm that we don't have NA
sum(is.na(Household_Pulse_data$RECVDVACC))
table(Household_Pulse_data$vaxx,Household_Pulse_data$EEDUC)
summary(Household_Pulse_data$vaxx)
summary(as.numeric(Household_Pulse_data$vaxx))
vaxx_factor <- as.factor(Household_Pulse_data$vaxx)
levels(vaxx_factor)
levels(vaxx_factor) <- c("no","yes")
glm(RECVDVACC ~ EEDUC,family = binomial)
pick_use1 <- (Household_Pulse_data$REGION == "South") # just for example!
dat_use1 <- subset(Household_Pulse_data, pick_use1)
# and to be finicky, might want to use this for factors after subsetting in case some get lost
dat_use1$RECVDVACC <- droplevels(dat_use1$RECVDVACC)
model_logitX <- glm(vaxx ~ TBIRTH_YEAR + EEDUC + ANXIOUS + RRACE + SEXUAL_ORIENTATION + GENID_DESCRIBE,
family = binomial, data = dat_use1)
summary(model_logitX)
|
a3b6248c9daffd841a78812d076699b0d67d8168 | 17fe01ee7912fc6279a6c40838bdc423bb2ae788 | /city2province.R | be8663c79d4d356df315f6ba4faef52a77ac3992 | [] | no_license | TansyZhang/offshore-playground | 4b21d942f5061c2ff0319f24af037629b2d9caa8 | 127ec856a95dae07d70cc0cafbf15ef69575e580 | refs/heads/master | 2021-01-15T19:34:43.394119 | 2016-05-30T14:29:13 | 2016-05-30T14:29:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 690 | r | city2province.R | library(stringr)
df.addr <- read.csv("AddressesCN.csv")
df.cityOnlyAddr <- df.addr[lapply(df.addr$city, str_length)>0 & lapply(df.addr$province, str_length)==0,]
df.province <- read.csv("city_province_cleaned.txt", header=F)
colnames(df.province) <- c("city", "provinces")
# Duplicate city cause the increase of 100 rows. approximately 2% of the totaly number of rows
df.cityAddr <- merge(df.cityOnlyAddr, df.province, by = "city")
df.cityAddr$province <- df.cityAddr$provinces
df.cityAddr <- df.cityAddr[, colnames(df.addr)]
df.addr <- rbind(df.addr[!df.addr$node_id %in% df.cityAddr$node_id,],df.cityAddr)
df.addr <- df.addr[order(df.addr$row),]
write.csv(df.addr, "AddressCNFinal.csv")
|
3093e37d0e2d73687e2cc5028621e2a800d8325b | afd4e81020a20387a4ccc0019573b565ca264aca | /plot3.R | eec8f16a33e32e21fc63a7ab8d3a5923b0c09e0f | [] | no_license | rahadityazuhdi/Exploratory-Data-Analysis-Course-Project-2-National-Emissions-Inventory-NEI-Databases-Analysis | 30b2f3fd2eec1b6ff2a384712daf4f8d823f1850 | 0ce4f72cd1a5e6eff3fa299c9536cc55bf4e67fa | refs/heads/master | 2022-11-25T15:02:30.770167 | 2020-08-04T11:22:19 | 2020-08-04T11:22:19 | 284,956,575 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 457 | r | plot3.R | NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
library(ggplot2)
png(filename = 'plot3.png')
plot <- (ggplot(TotalByYearandType_Baltimore, aes(x=year, y=Emissions, color = type))
+ geom_line() + ggtitle(expression('Total PM'[2.5]*' emissions in Baltimore City-MD in kilotons'))
+ xlab("year")
+ ylab(expression('total PM'[2.5]*' emission in kilotons')))
print(plot)
dev.off() |
1b7341c03de11152ffa662352333eabdcf3a3b36 | 0b9eb01e71cfb86e2ca287a8cf21c84e5f2025a6 | /Code/ImmRes_generic.R | 127e4abde79305870897ee1f1e1bf85997762217 | [] | no_license | chitrita/ImmuneResistance | ff319def9d81edd171ffd7110189a36060d72ae5 | ae977f3a7cdb695cd7b075176789439e866819a3 | refs/heads/master | 2020-04-10T22:52:09.682786 | 2018-07-26T15:25:47 | 2018-07-26T15:25:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 21,327 | r | ImmRes_generic.R | apply.ttest<-function(m,b,ranksum.flag = F){
if(ranksum.flag){
p<-t(apply(m,1,function(x) c(wilcox.test(x[b],x[!b],alternative = 'greater')$p.value,
wilcox.test(x[b],x[!b],alternative = 'less')$p.value)))
}else{
p<-t(apply(m,1,function(x) c(t.test(x[b],x[!b],alternative = 'greater')$p.value,
t.test(x[b],x[!b],alternative = 'less')$p.value)))
}
colnames(p)<-c('more','less')
p<-cbind(p,get.p.zscores(p))
colnames(p)[3]<-"zscores"
p<-cbind.data.frame(p,BH = p.adjust.mat(p[,1:2],method = "BH"))
return(p)
}
ttest.class<-function(v,b,alternative="two.sided"){
p<-t.test(v[b],v[!b],alternative = alternative)$p.value
return(p)
}
wilcox.class<-function(v,b,alternative="two.sided"){
p<-NA;v<-v[!is.na(b)];b<-b[!is.na(b)]
v1<-v[b];v2<-v[!b]
if(sum(!is.na(v1))==0|sum(!is.na(v2))==0){return(p)}
p<-wilcox.test(v1,v2,alternative = alternative)$p.value
return(p)
}
get.p.zscores<-function(p){
b<-p[,1]>0.5
b[is.na(b)]<-F
zscores<-(-log10(p[,1]))
zscores[b]<-log10(p[b,2])
# signficiant in p[,1] will be positive
# signficiant in p[,2] will be negative
return(zscores)
}
get.cor.zscores<-function(R,P){
p<-cbind(get.onesided.p.value(R,P),get.onesided.p.value(-R,P))
z<-get.p.zscores(p)
return(z)
}
add.onesided.p<-function(m){
m<-cbind.data.frame(m,
p.pos = get.onesided.p.value(m[,"R"],m[,"P"]),
p.neg = get.onesided.p.value(-m[,"R"],m[,"P"]))
m[,c("p.pos","p.neg")]<-p.adjust.mat(m[,c("p.pos","p.neg")],method = "BH")
return(m)
}
get.onesided.p.value <- function(c,p){
p[p==0] = min(p[p>0],na.rm = T)
p.one.side <- p
p.one.side[] <- NA
b<-c>0&!is.na(c)
p.one.side[b]=p[b]/2
b<-c<=0&!is.na(c)
p.one.side[b]=1-(p[b]/2)
return(p.one.side)
}
p.adjust.mat<-function(m,method = "BH"){
P<-apply(m,2,function(x) p.adjust(x,method = method))
return(P)
}
get.top.elements <- function (m,no.elm = 100,min.cf = Inf,main = ""){
q<-no.elm/nrow(m)
top.l<-lapply(colnames(m),function(x){
v <- m[,x]
cf <- min(quantile(x = v,probs = q,na.rm = T),min.cf)
b <- v<=cf
elm <- sort(rownames(m)[b])
return(elm)
})
if(main!=""){main<-paste0(main,".")}
names(top.l)<-paste0(main,colnames(m))
return(top.l)
}
list.2.mat<-function(l){
n1<-max(laply(l,length))
m<-t(laply(l,function(x) c(x,matrix(data = "",nrow = n1-length(x)+1))))
m<-m[1:n1,]
colnames(m)<-names(l)
return(m)
}
get.mat<-function(m.rows,m.cols,data = NA){
m<-matrix(data = data, nrow = length(m.rows),ncol = length(m.cols),
dimnames = list(m.rows,m.cols))
return(m)
}
get.empirical.p.value <-function (p){
if(is.matrix(p)){
p.emp<-apply(p,1,function(x) sum(x<=x[1],na.rm = T))
p.emp[is.na(p[,1])]<-NA
no.permut<-rowSums(!is.na(p))
p.emp <- p.emp/no.permut
}else{
p.rand<-p[-1]
p.rand<-p.rand[!is.na(p.rand)]
p.emp<-c(emp.p = mean(p.rand<=p[1]),
sd.dist = (p[1]-mean(p.rand))/sd(p.rand),
mean.rand = mean(p.rand),
sd.rand = sd(p.rand))
}
return(p.emp)
}
discretize<-function(v,n.cat){
q1<-quantile(v,seq(from = (1/n.cat),to = 1,by = (1/n.cat)))
u<-matrix(nrow = length(v))
for(i in 2:n.cat){
u[(v>=q1[i-1])&(v<q1[i])]<-i
}
return(u)
}
get.cor<-function(v1,v2 = NULL,method = 'spearman',
use = "pairwise.complete.obs",match.flag = F,
alternative = "two.sided"){
#Previous name: spearman.cor
if(is.null(v2)){v2<-v1}
if(!is.matrix(v1)){v1<-as.matrix(v1)}
if(!is.matrix(v2)){v2<-as.matrix(v2)}
if(is.null(colnames(v1))){colnames(v1)<-1:ncol(v1)}
if(is.null(colnames(v2))){colnames(v2)<-1:ncol(v2)}
if(match.flag){
results<-laply(colnames(v1),function(i){
c.i<-cor.test(v1[,i],v2[,i],method = method,use = use, alternative = alternative)
p <- c(c.i$estimate,c.i$p.value)
return(p)
})
colnames(results)<-c("R","P")
return(results)
}
m<-get.mat(m.rows = colnames(v1),m.cols = colnames(v2))
results<-list(cor = m, p = m)
for(i in 1:ncol(v1)){
f<-function(x){
c.i<-cor.test(v1[,i],x,method = method,use = use, alternative = alternative);
c(c.i$estimate,c.i$p.value)}
c.i <- apply(v2,2,f)
results$cor[i,] <- c.i[1,]
results$p[i,] <- c.i[2,]
}
if(ncol(v2)==1){
results<-cbind(results$cor,results$p)
colnames(results)<-c('R','P')
}
return(results)
}
pcor.mat<-function(v1,v2 = NULL,v3, method = 'spearman',
use = "pairwise.complete.obs",match.flag = F,
alternative = "two.sided"){
f1<-function(x,y,z){
c.i<-pcor.test(x = x,y = y,z = z,method = method)
p<-c(c.i$estimate,c.i$p.value)
return(p)
}
f2<-function(x,y,z){
p<-tryCatch(f1(x,y,z),error = function(err){return(c(NA,NA))})
return(p)
}
if(is.null(v2)){v2<-v1}
if(!is.matrix(v1)){v1<-as.matrix(v1)}
if(!is.matrix(v2)){v2<-as.matrix(v2)}
if(!is.matrix(v3)){v3<-as.matrix(v3)}
if(match.flag){
n=ncol(v1)
results<-matrix(data = NA,nrow = n,ncol = 2)
rownames(results)<-colnames(v1)
for(i in 1:ncol(v1)){
b<-!is.na(v1[,i])&!is.na(v2[,i])
v1i<-v1[b,i];v2i<-v2[b,i]
results[i,] <- f2(v1i,v2i,v3[b,])
}
return(results)
}
m<-matrix(nrow = ncol(v1),ncol = ncol(v2))
rownames(m)<-colnames(v1)
colnames(m)<-colnames(v2)
results<-list(cor = m, p = m)
for(i in 1:ncol(v1)){
f<-function(x){
b<-!is.na(v1[,i])&!is.na(x)
c.i<-f2(v1[b,i],x[b],v3[b,])
return(c.i)
}
c.i <- apply(v2,2,f)
results$cor[i,] <- c.i[1,]
results$p[i,] <- c.i[2,]
}
if(ncol(v2)==1){
results<-cbind(results$cor,results$p)
colnames(results)<-c('R','P')
}
return(results)
}
remove.ribo<-function(l){
if(is.list(l)){
l<-lapply(l,function(x) x[!startsWith(x,"RP")])
}else{
l<-l[!startsWith(l,"RP")]
}
return(l)
}
cor.plot<-function(x,y = NULL,main = '',ylab = '', xlab = '',regression.flag = F){
if(is.null(y)){
v<-colnames(x)
xlab<-v[1];ylab<-v[2]
y<-x[,2];x<-x[,1]
}
v<-get.cor(x,y)
main <- paste(main,"\nR =",format(v[1],di = 2),"P =",format(v[2],scientific = T,di = 2))
plot(x,y,main = main, xlab = xlab, ylab = ylab,cex=0.3)
b<-!is.na(x)&!is.na(y)
v<-lowess(x[b],y[b])
lines(v,col = "red")
if(!regression.flag){return()}
y.d<-y-v$y[match(x,v$x)]
y.sd<-sd(y.d,na.rm = T)
y.av<-mean(y.d,na.rm = T)
labels<-matrix(data = "Moderate",nrow = length(y))
labels[y.d>(y.av+y.sd)]<-"High"
labels[y.d<(y.av-y.sd)]<-"Low"
my.plot(x,y,labels = labels,main = main,xlab = xlab,ylab = ylab)
lines(v)
return(y.d)
}
center.matrix<-function(m,dim = 1,sd.flag = F){
if(dim == 1){
zscores<-sweep(m,1,rowMeans(m,na.rm = T),FUN = '-')
}else{
zscores<-sweep(m,2,colMeans(m,na.rm = T),FUN = '-')
}
if(sd.flag){
zscores<-sweep(zscores,dim,apply(m,dim,function(x) (sd(x,na.rm = T))),FUN = '/')
}
return(zscores)
}
get.top.cor<-function(m,no.elm = 100,min.cf = 0,idx = NULL, add.prefix =""){
m<-as.matrix(m)
m.pos<-(-m);m.neg<-m
colnames(m.pos)<-paste0(colnames(m.pos),".up")
colnames(m.neg)<-paste0(colnames(m.neg),".down")
v<-get.top.elements(cbind(m.pos,m.neg),
no.elm = no.elm,
min.cf = (-abs(min.cf)))
names(v)<-c(colnames(m.pos),colnames(m.neg))
if(!is.null(idx)){
v<-v[paste(idx,c("up","down"),sep = ".")]
}
names(v)<-paste0(add.prefix,names(v))
return(v)
}
discretize.mat.q<-function(X,q1 = 0.9){
qv<-t(apply(X,2,function(x) quantile(x,q1,na.rm = T)))
B<-sweep(X,2,qv,FUN = "-")>=0
return(B)
}
remove.redundant.dashs<-function(v){
v<-gsub("__","_",v,fixed = T)
v<-laply(v, function(x){
if(startsWith(x,"_")){
x<-substr(x,2,nchar(x))
}
if(endsWith(x,"_")){
x<-substr(x,1,nchar(x)-1)
}
return(x)
})
return(v)
}
set.list<-function (r,b,data.name = NULL){
rn<-lapply(r,set.field, b = b)
if(!is.null(data.name)){rn$data.name<-data.name}
return(rn)
}
set.field <- function (v,b){
d <- dim(v);d.b<-length(b)
if(!is.null(d)){
if(d[1]==d.b){v <- v[b,]}
if(d[2]==d.b){v <- v[,b]}
}else{if(length(v)==d.b){v <- v[b]}}
return(v)
}
cmp.multi.refs<-function(m,b,ref.groups,ranksum.flag = T){
# Previous name: ranksum.t.test.multi.refs
ref.groups.u<-unique(ref.groups[!b])
m.up<-get.mat(rownames(m),ref.groups.u)
m.down<-m.up
for(x in ref.groups.u){
b.ref<-is.element(ref.groups,x)
if(length(unique(b.ref[b|b.ref]))<2){next()}
m.r<-apply.ttest(m[,b|b.ref],b[b|b.ref],ranksum.flag = ranksum.flag)
m.up[,x]<-m.r[,1]
m.down[,x]<-m.r[,2]
rm(m.r)
}
results<-list()
results$up<-cbind.data.frame(max.p = rowMaxs(m.up,na.rm = T),
No.sup = rowSums(m.up<0.05,na.rm = T),m.up)
results$down<-cbind.data.frame(max.p = rowMaxs(m.down,na.rm = T),
No.sup = rowSums(m.down<0.05,na.rm = T),m.down)
return(results)
}
list.2.boolean.mat<-function(l,ids = NULL){
if(is.null(ids)){
ids<-sort(unique(unlist(l)))
}
B<-t(laply(l,function(x) is.element(ids,x)))
colnames(B)<-names(l)
rownames(B)<-ids
return(B)
}
convert.to.mice<-function(mouse.human,genes){
genes<-as.character(unique(mouse.human[is.element(mouse.human[,"human"],genes),"mouse"]))
return(genes)
}
GO.enrichment.lapply<-function(go.env,genes,sig,valuType = 1){
m<-t(laply(sig,function(x) GO.enrichment(go.env,genes,x)[,valuType]))
colnames(m)<-names(sig)
return(m)
}
GO.enrichment<-function(go.env,genes,selected.genes){
b<-is.element(genes,selected.genes)
p<-laply(go.env,function(x) get.hyper.p.value(is.element(genes,x),b))
rownames(p)<-names(go.env)
return(p)
}
get.hyper.p.value<-function(b1,b2,full.flag = T){
b.na<-is.na(b1)|is.na(b2)
b1<-b1[!b.na];b2<-b2[!b.na]
p <- max(1-phyper(sum(b1&b2)-1, sum(b1), sum(!b1), sum(b2)),1e-17)
if(!full.flag){return(p)}
ex <- sum(b2)*(sum(b1)/length(b2))
ob.vs.ex <- sum(b1&b2)/ex
v<-c(p,ob.vs.ex,sum(b1&b2),ex)
names(v)<-c('hyper.p.value','ob.vs.exp','ob','exp')
return(v)
}
get.hyper.p.value.mat<-function(B1,B2){
B1<-set.colnames(B1);B2<-set.colnames(B2)
P<-get.mat(colnames(B1),colnames(B2))
for(i in 1:ncol(B1)){
for(j in 1:ncol(B2)){
P[i,j] <- get.hyper.p.value(B1[,i],B2[,j],full.flag = F)
}
}
return(P)
}
get.residuals<-function(X,g){
set.seed(1234)
f<-function(y) {lm(y~.,data = as.data.frame(g))$residuals}
residuals<-t(apply(X,1,f))
return(residuals)
}
set.colnames<-function(m){
if(is.null(colnames(m))){colnames(m)<-1:ncol(m)}
return(m)
}
cox.mat<-function(m,r,X = NULL,filter.flag = T){
if(is.null(X)){
f<-function(x) {summary(coxph(r$survival ~ x))$coefficients[1,c("coef","Pr(>|z|)")]}
}else{
f<-function(x) {summary(coxph(r$survival ~., cbind.data.frame(x,X)))$coefficients[1,c("coef","Pr(>|z|)")]}
}
if (filter.flag){
b<-rowSums(m>0,na.rm = T)>(ncol(m)*0.2)
}else{
b<-rowSums(m>0,na.rm = T)>0
}
p<-matrix(nrow = nrow(m),ncol = 2,dimnames = list(rownames(m)))
p[b,]<-t(apply(m[b,],1,f))
p<-cbind(p,get.onesided.p.value(p[,1],p[,2]),
get.onesided.p.value(-p[,1],p[,2]))
p<-cbind(p,get.p.zscores(p[,3:4]))
colnames(p)<-c("coef","Pr(>|z|)","P.high.exp.worse","P.high.exp.better","Zscore")
return(p)
}
get.auc.mat<-function(P,y){
auc<-apply(P,2,function(x) get.auc(x,y))
return(auc)
}
get.auc<-function(p1,y1){
pr <- prediction(p1, y1)
auc <- performance(pr, measure = "auc")
auc <- performance(pr, measure = "auc")
auc <- auc@y.values[[1]]
# prf <- performance(pr, measure = "prec", x.measure = "rec")
# plot(prf,ylim = c(0,1))
# abline(h = mean(y1))
return(auc)
}
multi.gsub<-function(pattern,replacement = '',x){
for(i in 1:length(pattern)){
x<-gsub(pattern = pattern[i],replacement = replacement ,x = x,fixed = T)
}
return(x)
}
format.pval.private<-function(p){
if(length(p)>1){
P<-laply(p,format.pval.private)
P<-gsub("P = ","",P)
P<-paste0("P",1:length(P)," = ",P)
P<-paste(P,collapse = ", ")
return(P)
}
if(abs(p)>1){
p<-10^(-abs(p))
}
return(paste("P =",format(p,scientific = T,di= 3)))
}
boxplot.test<-function(vals,labels,las = 1,main = "",cex = 1.1,ylab = NULL,alternative = "two.sided",
t.test.flag = T,dots.flag = F,legend.flag = T,ref.label = labels[1]){
no.labels<-length(unique(labels))
if(no.labels==2){col = c("cadetblue","gray")}
if(no.labels==3){col = c("cadetblue","gray70","brown4")}
if(no.labels>3){col = c("cadetblue","lightblue","gray70","brown4","orange")}
if(t.test.flag==T){
p<-ttest.class(vals,is.element(labels,ref.label),alternative = alternative)
if(alternative=="greater"){
auc <- get.auc(vals,is.element(labels,ref.label))
}else{
auc <- get.auc(vals,!is.element(labels,ref.label))
}
auc<-round(auc,di = 2)
}else{
p<-wilcox.class(vals,is.element(labels,ref.label),alternative = alternative)
}
# main<-set.titles(gsub("_"," ",main))
main<-gsub("_"," ",main)
if(is.null(ylab)){ylab <- main}
if(t.test.flag!="none"){
main <- gsub("_"," ",paste0(main,"\n(t-test P = ",format(p,scientific = T,di = 2),
"\nAUC = ",auc,")"))
}
boxplot(vals~labels,las=las,cex.axis = 1,col = col,ylab = ylab,main = main,cex.main = cex,
cex.lab = cex)
l<-as.matrix(table(labels))
l<-paste0(rownames(l)," (n = ",l,")")
if(legend.flag){legend("topleft",legend = l,fill = col,cex = 0.8)}
if(dots.flag){
stripchart(vals~labels, vertical = TRUE,method = "jitter",
add = TRUE, pch = 20, col = 'gray30')
}
return(p)
}
km.plot3<-function(r,v,main = '',X = NULL,qua = 0.2,xlim = NULL,direction = 0,
legend.flag = T,ylab = "Survival probability",four.levels = F){
M1<-summary(coxph(r$survival ~ v))$coefficients
coxD<-M1[1,"coef"]
cox.p<-M1[1,"Pr(>|z|)"]
if(!is.null(X)){
Mc<-summary(coxph(r$survival ~ cbind(v,X)))$coefficients
cox.p.c<-Mc[1,"Pr(>|z|)"]
coxD.c<-Mc[1,"coef"]
}
b1<-v>=quantile(v,1-qua,na.rm = T);b2<-v<=quantile(v,qua,na.rm = T)
G<-ifelse(b1,"High","Moderate")
col<-c("red","blue","darkgreen")
G[b2]<-"Low"
km2<-npsurv(r$survival ~ G)
sdf2<-survdiff(r$survival ~ G)
sdf2<-(1 - pchisq(sdf2$chisq, length(sdf2$n) - 1))/3
l<-paste0(c("High","Low","Moderate")," (",km2$n,")")
if(is.null(xlim)){
survplot(km2,col = col,lty = c(1,1), xlab = 'Years',label.curves = F,ylab = ylab,n.risk = T)
}else{
survplot(km2,col = col,lty = c(1,1), xlab = 'Years',label.curves = F,xlim = c(0,xlim),ylab = ylab,n.risk = T)
}
if(legend.flag){
legend("topright",fill = col[c(setdiff(1:length(col),2),2)],cex = 0.8,
legend = l[c(setdiff(1:length(col),2),2)])
}
if(!is.null(X)){
if(direction==0){
P<-c(cox.p,cox.p.c,sdf2)
}else{
P<-get.onesided.p.value(direction*c(coxD,coxD.c,coxD),c(cox.p,cox.p.c,sdf2))
}
P<-format(P,scientific = T,di = 2)
main<-paste0(main,"\nP=",P[1],", Pc=",P[2],"\nlogrank=",P[3])
}else{
if(direction==0){
P<-c(cox.p,sdf2)
}else{
P<-get.onesided.p.value(direction*c(coxD,coxD),c(cox.p,sdf2))
}
P<-format(P,scientific = T,di = 2)
main<-paste0(main,"\nP=",P[1],", logrank=",P[2])
}
title(main,cex.main =1)
return(G)
}
add.up.down.suffix<-function(v){
v<-lapply(v, function(x) x<-c(x,paste0(x,".up"),paste0(x,".down")))
v<-unlist(v,use.names = F)
return(v)
}
get.residuals<-function(X,g){
f<-function(y) {lm(y~.,data = as.data.frame(g))$residuals}
residuals<-t(apply(X,1,f))
return(residuals)
}
get.anova.p<-function(y,x,order.flag = F){
b<-is.infinite(y)|is.na(y)|is.na(x)
y<-y[!b];x<-x[!b]
a<-aov(y ~ x)
p <- unlist(summary(a))['Pr(>F)1']
return(p)
}
plot.bimodal.distribution<-function(y,density.flag = F,xlab = "",main = "",
pos.label = "pos",neg.label = "neg"){
set.seed(1234)
mixmdl = normalmixEM(y)
main = paste(main,paste("loglikelihood =",round(mixmdl$loglik)),sep = "\n")
plot(mixmdl,which=2,xlab2=xlab,main2 = main)
lines(density(y), lty=2, lwd=2)
mixmdl$rank<-rank(y)
idx1<-order(mixmdl$mu)[1]
mixmdl$labels<-mixmdl$rank>round(mixmdl$lambda[idx1]*length(y))
if(density.flag){
my.densityplot(y,mixmdl$labels)
}
if(length(mixmdl$all.loglik)==1001){
mixmdl$labels[]<-NA
}
mixmdl$labelsF<-ifelse(mixmdl$labels,pos.label,neg.label)
y.pos<-y[mixmdl$labels]
y.neg<-y[!mixmdl$labels]
mixmdl$labelsF[(y<(mean(y.pos)-sd(y.pos)))&mixmdl$labels]<-paste0(pos.label,"?")
mixmdl$labelsF[(y>(mean(y.neg)+sd(y.neg)))&!mixmdl$labels]<-paste0(neg.label,"?")
return(mixmdl)
}
plot.extra<-function(x, y = NULL,labels,regression.flag = F,col.v = NULL,set.flag = F,cor.flag = F,
pch=16,cex=0.5,main="",ylab = "tSNE2",xlab = "tSNE1", cex.axis = 0.6,
add.N = F){
print(xlab)
main<-gsub(".",": ",capitalize(main),fixed = T)
if(add.N){labels<-add.n.of.samples(labels)}
if(set.flag){par(mar=c(5.1, 4.1, 4.1, 12.1), xpd=TRUE)}
if(is.null(col.v)){col.v<-labels.2.colors(labels)}
if(is.null(y)){y<-x[,2];x<-x[,1]}
if(cor.flag){
xy.cor<-get.cor(y,x)
main <- paste(main, "\nR =",format(xy.cor[1],di = 2),"P =",format(xy.cor[2],scientific = T,di = 2))
}
plot(x,y,col=col.v,pch=pch,cex=cex,main=main,ylab=ylab,xlab =xlab,cex.axis = cex.axis)
labels<-gsub(" ","_",labels)
l<-(max(x,na.rm = T)-min(x,na.rm = T))/20
if(length(unique(labels))<30){
if(length(pch)==length(labels)){
map<-unique(paste(labels,col.v,pch))
labels.n<-as.matrix(table(labels))
idx<-match(get.strsplit(map,' ',1),names(labels.n))
map[,1]<-paste0(map[,1]," (N = ",m[idx],")")
legend(x = max(x,na.rm = T)+l,y = max(y,na.rm = T),
legend = get.strsplit(map,' ',1),
col = get.strsplit(map,' ',2),inset=c(-0.5,0),
pch = as.integer(get.strsplit(map,' ',3)),lty = 1)
}else{
map<-unique(paste(labels,col.v,pch))
legend(x = max(x,na.rm = T)+l,y = max(y,na.rm = T),
legend = get.strsplit(map,' ',1),
col = get.strsplit(map,' ',2),
bty='n',lty= 0, # line style
lwd = 2,cex = 0.7,pch = 0)
}
}
if(regression.flag==1){
b<-!is.na(x)&!is.na(y)
v<-lowess(x[b],y[b]);lines(v)
}
if(regression.flag ==2){
b<-!is.na(x)&!is.na(y)
ulabels<-unique(labels)
for(i in ulabels){
bi<-b&labels==i
v<-lowess(x[bi],y[bi]);lines(v)
}
}
return()
}
apply.plot.extra<-function(X,labels,main = NULL,xlab = "",ylab="",
pch = 16, cex.axis = 0.6,set.flag = F){
laply(1:ncol(labels),function(i){
plot.extra(X,labels = labels[,i],
main = ifelse(is.null(main),colnames(labels)[i],
paste(main,colnames(labels)[i],sep = ":")),
xlab = xlab,ylab = ylab,pch = pch,cex.axis = cex.axis,
set.flag = set.flag)
return(i)})
}
add.n.of.samples<-function(l){
num.samples<-table(l)
idx<-match(l,names(num.samples))
l<-paste0(l," (N=",num.samples[idx],")")
return(l)
}
labels.2.colors<-function(x.class,x = NULL,number.flag = F){
palette("default")
col.v<-c("black","red",setdiff(c(palette(),"cadetblue","gray","darkgreen","darkorange","darkviolet","gold3",
"lightpink","deeppink2","deepskyblue",rainbow(20)),c("black","red")))
no.classes<-length(unique(x.class))
if(number.flag){
col.v<-match(x.class,sort(unique(x.class)))
}else{
if(is.numeric(x.class[1])&&length(x.class)>10){
col.v<-color.scale(x.class,c(0,10),0.8,0.8,color.spec="hsv",
xrange = c(min(x.class,na.rm = T)-1,max(x.class,na.rm = T)+1))
}else{
col.v<-col.v[match(x.class,sort(unique(x.class)))]
}
}
if(!is.null(x)){names(col.v)<-x}
return(col.v)
}
get.strsplit<-function(v,sep,idx){
v<-as.character(v)
vi<-laply(strsplit(v,split = sep,fixed = T),function(x) x[idx])
return(vi)
}
gg.densityplot<-function(x,labels,title="",subtitle="",xlab = "Scores",legend.name="",caption=""){
theme_set(theme_classic())
b<-labels==labels[1]
p<-t.test(x[b],x[!b])$p.value
subtitle<-paste0(subtitle," (",format.pval.private(p),")")
mpg <- cbind.data.frame(cty = x,cyl = labels)
g <- ggplot(mpg, aes(cty))
g <- g + geom_density(aes(fill=factor(cyl)), alpha=0.5) +
labs(title=title,
x=xlab,
subtitle=subtitle,
fill=legend.name)
# g <- g + theme_classic()
return(g)
}
center.matrix<-function(m,dim = 1,sd.flag = F){
if(dim == 1){
zscores<-sweep(m,1,rowMeans(m,na.rm = T),FUN = '-')
}else{
zscores<-sweep(m,2,colMeans(m,na.rm = T),FUN = '-')
}
if(sd.flag){
zscores<-sweep(zscores,dim,apply(m,dim,function(x) (sd(x,na.rm = T))),FUN = '/')
}
return(zscores)
}
average.mat.rows<-function(m,ids,f = colMeans){
m<-cbind.data.frame(ids = ids,m)
m.av<-ddply(.data = m,.variables = "ids",.fun = function(x) f(x[,-1]))
rownames(m.av)<-m.av$ids
m.av$ids<-NULL
return(m.av)
}
get.auc<-function(p1,y1){
pr <- prediction(p1, y1)
auc <- performance(pr, measure = "auc")
auc <- auc@y.values[[1]]
return(auc)
}
|
ee359bf5fa6dc6031f5f0db57cfd48e8c0ed4bd9 | 9a4c3f85940d88aa1ef3da1ef30feb8c94847737 | /R/RANN-package.R | f25af723be85e3efe82eb69423e7483ee6de3c4a | [] | no_license | krlmlr/RANN1 | e68d4ff84e4fa0f9aa53dd9fe788adfd6e4ea922 | 8ed9fd6b11928a052082967ed1cbc9b8b2aa0fc3 | refs/heads/master | 2021-01-20T03:34:12.293174 | 2015-01-19T16:08:54 | 2015-01-19T16:08:54 | 29,477,067 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 202 | r | RANN-package.R | #' Wrapper for Arya and Mount's Approximate Nearest Neighbours (ANN) C++ library
#'
#' @name RANN1-package
#' @aliases RANN1
#' @seealso \code{\link{nn2}}
#' @docType package
#' @keywords package
NULL
|
aa146b6a2e16541e1d1bac287822954e7234d4b1 | ef40bd270d85d5eb252f0286f3e365b185fb08f4 | /man/frelevel.Rd | f32663104d030479056f63b56ed00b7b5914e574 | [] | no_license | dusadrian/admisc | b49e75300c3bf4d922d1d9ef2c065888c0f1576d | 44126a9d9c58dadd5f9b9c589fccfdcf3643e182 | refs/heads/master | 2023-07-19T18:57:56.391912 | 2023-07-16T09:31:30 | 2023-07-16T09:31:30 | 231,208,801 | 0 | 0 | null | 2023-06-07T07:55:07 | 2020-01-01T11:39:32 | R | UTF-8 | R | false | false | 828 | rd | frelevel.Rd | \name{frelevel}
\alias{frelevel}
\title{Modified \code{relevel()} function}
\description{
The base function \code{relevel()} accepts a single argument "ref", which
can only be a scalar and not a vector of values. \code{frelevel()} accepts
more (even all) levels and reorders them.
}
\usage{
frelevel(variable, levels)
}
\arguments{
\item{variable}{The categorical variable of interest}
\item{levels}{One or more levels of the factor, in the desired order}
}
\value{A factor of the same length as the initial one.}
\author{Adrian Dusa}
\seealso{\code{\link[stats]{relevel}}}
\examples{
words <- c("ini", "mini", "miny", "moe")
variable <- factor(words, levels = words)
# modify the order of the levels, keeping the order of the values
frelevel(variable, c("moe", "ini", "miny", "mini"))
}
\keyword{functions}
|
7bf3185ed043fb575a637d97435d4b32d046dc95 | 504fd5d21053981d468da010e673092d80744764 | /R for SQL.R | 5477a630a62a1a4104426b842a2cf5e0edf15923 | [] | no_license | OlgaShldrr/SQL_queries | 44860cfd9e894f341418a7325cedc9456c57dabc | ca3d3c0a3bfd19d223f2d9ff8b967defefe6ebc3 | refs/heads/master | 2020-12-04T02:47:57.235620 | 2020-01-03T13:29:21 | 2020-01-03T13:29:21 | 231,577,762 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,483 | r | R for SQL.R | library(DBI)
library(tidyverse)
#pulling the list of all variables-----
con_prod <- odbc::dbConnect(odbc::odbc(), Driver = "ODBC Driver 17 for SQL Server",
Server = Sys.getenv("SERVER_PRODUCTION"), Database = Sys.getenv("DB_PD"),
UID = Sys.getenv("UID"), PWD = Sys.getenv("PWD"))
table_id <- Id(schema = "surveys",
name = "all_variables")
all_vars <- dbReadTable(con_prod, name = table_id)
dbDisconnect(con_prod)
# section a1-----
con_st <- odbc::dbConnect(odbc::odbc(), Driver = "ODBC Driver 17 for SQL Server",
Server = Sys.getenv("SERVER_PRODUCTION"), Database = Sys.getenv("DB_ST"),
UID = Sys.getenv("UID"), PWD = Sys.getenv("PWD"))
bsq <- dbReadTable(con_st, "bsq")
dbDisconnect(con_st)
section_a1 <- all_vars %>%
filter(grepl(x=family_name, "BSQ-Business Unit Characteristics")) %>%
select(variable_id, shortname)%>%
right_join(bsq) %>%
filter(!is.na(shortname)) %>%
select (-remote_unit_id, -variable_id) %>%
spread(key = shortname, value= value)
table_id <- Id(schema = "organization",
name = "full_section_a1")
section_a1 %>%
mutate_if(is.character,
~stringi::stri_trans_general(., "latin-ascii")) %>%
mutate_if(is.character,
~ stringr::str_replace_all(., "[^[:alnum:][:blank:]?&/\\-]", "")) -> section_a1
dbWriteTable(con_prod, table_id, section_a1,append = TRUE, overwrite = FALSE,
row.names=FALSE, encoding = "UTF-8")
# section e1-----
con_st <- odbc::dbConnect(odbc::odbc(), Driver = "ODBC Driver 17 for SQL Server",
Server = Sys.getenv("SERVER_PRODUCTION"), Database = Sys.getenv("DB_ST"),
UID = Sys.getenv("UID"), PWD = Sys.getenv("PWD"))
bsq <- dbReadTable(con_st, "bsq")
dbDisconnect(con_st)
con_prod <- odbc::dbConnect(odbc::odbc(), Driver = "ODBC Driver 17 for SQL Server",
Server = Sys.getenv("SERVER_PRODUCTION"), Database = Sys.getenv("DB_PD"),
UID = Sys.getenv("UID"), PWD = Sys.getenv("PWD"))
table_id <- Id(schema = "organization",
name = "characteristics")
characteristics <- dbReadTable(con_prod, name = table_id)
dbDisconnect(con_prod)
section_e1 <- all_vars %>%
filter(grepl(x=family_name, "BSQ/SCDS - Shared Variables on BSQ Faculty/Staff Counts & Staff Comp & Demog Survey")) %>%
select(variable_id, shortname)%>%
right_join(bsq) %>%
filter(!is.na(shortname)) %>%
select (-remote_unit_id, -variable_id) %>%
spread(key = shortname, value= value) %>%
mutate(pf_unit_id = as.character(pf_unit_id)) %>%
left_join(characteristics, by = c("pf_unit_id" = "pf_unitid")) %>%
select(pf_unit_id, 171:177, everything(), -account_id)
table_id <- Id(schema = "organization",
name = "section_e1")
section_e1 %>%
mutate_if(is.character,
~stringi::stri_trans_general(., "latin-ascii")) %>%
mutate_if(is.character,
~ stringr::str_replace_all(., "[^[:alnum:][:blank:]?&/\\-]", "")) -> section_e1
dbWriteTable(con_prod, table_id, section_e1, append = TRUE, overwrite = FALSE,
row.names=FALSE, encoding = "UTF-8",
field.types = c(deffacIP= "varchar(max)",
deffacPA= "varchar(max)",
deffacSA= "varchar(max)",
deffacSP= "varchar(max)"))
|
734a93bc422feae3a0ced01e66145287555679a6 | 2ca1a0a0ad038cc82acc76eded9e0a2a3bccdfe3 | /cachematrix.R | 932572f0d762834bce6761f5ce1d25a092f1f00b | [] | no_license | libraguysgp/ProgrammingAssignment2 | 314c2acd71badfb386ae69d6f46494cbe73a64bc | ebf3d545b62cc6fac9d7b6f77558614b6ab8a3e0 | refs/heads/master | 2020-12-24T10:33:09.358813 | 2015-02-22T12:09:04 | 2015-02-22T12:09:04 | 30,877,154 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,547 | r | cachematrix.R | # Matrix inversion is usually a costly computation and there may be some benefit
# to caching the inverse of a matrix rather than compute it repeatedly. The
# following two functions are used to cache the inverse of a matrix.
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
# This function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("Getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
#Example usage:
#Use assignment2.R (test script) to simplify the tets process
#>source("cachematrix.R")
#>x = matrix(c(1,2,3,4), nrow=2, ncol=2)
#>m <- makeCacheMatrix(x)
#>m$get() # Returns original matrix
#[,1] [,2]
#[1,] 1 3
#[2,] 2 4
#>cacheSolve(m) # Computes, caches, and returns matrix inverse
#[,1] [,2]
#[1,] -2 1.5
#[2,] 1 -0.5
#>m$getinverse() # Retrieve matrix inverse
#[,1] [,2]
#[1,] -2 1.5
#[2,] 1 -0.5
#>cacheSolve(m) # Returns cached matrix inverse using previously computed matrix inverse getting cached data
#Getting cached data
#[,1] [,2]
#[1,] -2 1.5
#[2,] 1 -0.5
#>m$set(matrix(c(2,4,3,1), nrow=2, ncol=2)) # Modify existing matrix
#>m$get()
# [,1] [,2]
# [1,] 2 3
# [2,] 4 1
#Inversing the matrix in R
#>cacheSolve(m)
# [,1] [,2]
# [1,] -0.1 0.3
# [2,] 0.4 -0.2
#>m$getinverse() # Retrieve matrix inverse
# [,1] [,2]
# [1,] -0.1 0.3
# [2,] 0.4 -0.2
#>cacheSolve(m) # Returns cached matrix inverse using previously computed matrix inverse getting cached data
#Getting cached data
# [,1] [,2]
# [1,] -0.1 0.3
# [2,] 0.4 -0.2
|
7ed6bd3c68469c824334428e2a343f838e7e8a9a | 811703087cd53484d271f349178491e553be32fc | /docs/R_base/pacages/biomaRt.R | 3574c8159c77528e81def8e4ec007e4a73b1b42e | [
"Apache-2.0"
] | permissive | wan230114/BioNote | 35ea37d25d39c372ebcf93cc1875a5b081fccd81 | c5cfa684cad8aae8c90fc65f9932a802ff461b80 | refs/heads/master | 2023-01-18T18:56:27.025420 | 2023-01-12T18:40:40 | 2023-01-12T18:40:40 | 252,699,550 | 2 | 2 | null | 2020-04-04T05:04:39 | 2020-04-03T10:22:21 | null | UTF-8 | R | false | false | 594 | r | biomaRt.R | #!/usr/bin/env Rscript
library(biomaRt)
ensembl <- useMart("ensembl") # connect to a specified BioMart database
ensembl <- useDataset("hsapiens_gene_ensembl", mart = ensembl)
# use the hsapiens(人类) dataset.或者直接如下设置
# ensembl = useMart("ensembl",dataset="hsapiens_gene_ensembl")
test <- getBM(
attributes = c(
"ensembl_gene_id", "start_position",
"end_position", "ensembl_transcript_id",
"transcript_length"
),
filter = "ensembl_gene_id",
values = c("ENSG00000288723", "ENSG00000288724", "ENSG00000288725"),
mart = ensembl
)
test
|
5e55652e27f0924ff72d879acf010c4e8eb341e3 | 837d103e98a6216cdcdd87fe6378af1e68ce52fa | /man/fit_brms_npos_nneg_by_dose.Rd | 0a89ec545131f18f61d749002015a32091061481 | [] | no_license | momeara/MPStats | a64782d0d102d5ec5fcd97679d3616d59e6b0559 | 0ae07da4866a8a2e39e54bacad1e9501520e34e3 | refs/heads/master | 2022-07-26T03:35:00.158852 | 2022-07-13T20:51:51 | 2022-07-13T20:51:51 | 232,165,839 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 731 | rd | fit_brms_npos_nneg_by_dose.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_brms_hill_score_by_dose.R
\name{fit_brms_npos_nneg_by_dose}
\alias{fit_brms_npos_nneg_by_dose}
\title{brms model for the counts of positive and negative cells by compound dose}
\usage{
fit_brms_npos_nneg_by_dose(well_scores)
}
\arguments{
\item{well_scores}{tibble::tibble as output by read_well_scores}
}
\value{
list of brms::brmsfit objects one for each compound
}
\description{
Use the R BRMS package to fit dose response data to with a multiple poisson model
}
\details{
W. Scott Comulada and Robert E. Weiss, On Models for Binomial Data with Random Numbers of Trials
Biometrics, 2007, 63(2): 610–617. doi:10.1111/j.1541-0420.2006.00722.x.
}
|
faced18c3dab8e61abd6416a1428446bd71edb2e | adb99c0ab8aa71dea797a75e87d6c08b86c0b2ae | /scripts/R/correr1.R | f27c7f433a153b74f9351fd501347b45226d6fe8 | [] | no_license | eastmallingresearch/Metabarcoding_pipeline | eb1ea98bd76586f3364b9435504737575d308321 | d68ea5ea759b88c79c4f154ffcf7cadd41e74094 | refs/heads/master | 2022-08-25T05:52:28.381825 | 2022-08-01T12:22:22 | 2022-08-01T12:22:22 | 99,205,781 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 337 | r | correr1.R | correr1 <- function(
x,
returnData=F
) {
y <- x
count<-1
mycorr <- NULL
while (length(x) >2) {
if(returnData) {
mycorr[[count]] <- cbind(y[1:(length(x)-1)],x[2:length(x)])
} else {
mycorr[count] <- cor(y[1:(length(x)-1)],x[2:length(x)],use="pairwise.complete.obs")
}
count <- count +1
x<-x[-1]
}
return(mycorr)
}
|
c524ac9b13b1184f0c85505a63689090e2848c09 | 994ce442bffe1827f0885a96b12bb7bf4ec15f15 | /R/mixed_bridge_ordinal_none_serial.R | 1bccfca4530eae42a5d84ded290755d766571cee | [] | no_license | ozgurasarstat/mixed3 | 4aacd1ab3fd4fce402a5265f7d5e19956016ba39 | 3c4a0404efa18698b6a241afa86ec9ff76128c6a | refs/heads/master | 2020-03-23T03:07:46.631896 | 2020-01-04T11:42:38 | 2020-01-04T11:42:38 | 141,011,236 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,565 | r | mixed_bridge_ordinal_none_serial.R | mixed_bridge_ordinal_none_serial = "
functions{
//create cholesky decomposition of varcov matrix
matrix chol_cov(vector s_id,
vector t,
int mi,
real sigma2,
real delta2,
real kappa2){
matrix[mi, mi] mat2;
matrix[mi, mi] L;
for(i in 1:mi){
for(j in 1:mi){
if(s_id[i] == s_id[j]){
mat2[i, j] = (sigma2^2) * exp(-(fabs(t[i] - t[j]) * delta2)^kappa2);
}else{
mat2[i, j] = 0;
}
}
}
L = cholesky_decompose(mat2);
return L;
}
//inverse of the cdf of bridge
real inv_cdf_bridge(real x, real theta){
real out;
out = log( sin(theta * pi() * x) / sin(theta * pi() * (1 - x)) ) / theta;
return out;
}
}
data{
int<lower = 1> ntot; // total number of observations
vector[ntot] subj_id; // id column - second
vector[ntot] time; // time
int<lower = 0, upper = 5> y[ntot]; // responses
int<lower = 1> p; // number of columns of the x matrix
matrix[ntot, p] x; // desgin matrix for fixed effects
int<lower = 1> n_ec; // number of extended families
int<lower = 3> k; // number of categories for the ordinal variable
int ind_ec[n_ec, 2]; // matrix of indices for which observations belong to extended families
int nrepeat_ec[n_ec]; // number of observations that belong to extended families
real<lower = 0, upper = 2> kappa2;
}
transformed data{
matrix[ntot, p] xc;
vector[p] xmeans;
for(i in 1:p){
xmeans[i] = mean(x[, i]);
xc[, i] = x[, i] - xmeans[i];
}
}
parameters{
ordered[k - 1] alpha_c;
vector[p] beta;
vector[ntot] zstar;
real<lower = 0> sigma2;
real<lower = 0> delta2;
real<lower = 0> sd_v;
}
transformed parameters{
vector[ntot] z;
vector[ntot] v_vec;
real<lower = 0, upper = 1> phi_v;
phi_v = 1/sqrt(3*sd_v/(pi()^2) + 1);
for(i in 1:n_ec){
z[ind_ec[i, 1]:ind_ec[i, 2]] =
chol_cov(subj_id[ind_ec[i, 1]:ind_ec[i, 2]],
time[ind_ec[i, 1]:ind_ec[i, 2]],
nrepeat_ec[i],
sigma2,
delta2,
kappa2) *
zstar[ind_ec[i, 1]:ind_ec[i, 2]];
}
for(i in 1:ntot){
v_vec[i] = inv_cdf_bridge(Phi(z[i]), phi_v);
}
}
model{
vector[ntot] linpred = xc * beta + v_vec;
alpha_c ~ cauchy(0, 5);
beta ~ cauchy(0, 5);
sigma2 ~ cauchy(0, 5);
delta2 ~ cauchy(0, 3);
sd_v ~ cauchy(0, 5);
zstar ~ std_normal();
for(i in 1:ntot){
target += ordered_logistic_lpmf(y[i] | linpred[i], alpha_c);
}
}
generated quantities{
vector[k - 1] alpha = alpha_c + dot_product(xmeans, beta);
vector[k - 1] alphamarg = alpha * phi_v;
vector[p] betamarg = beta * phi_v;
real<lower = 0> sigmasq2 = sigma2^2;
}
"
|
5425155b06bf5e6efb89b64326f7b624fb81a5e9 | b2b5911175ab1b61bb3df44c616ce8c4cdaf882b | /process-carip-survey-data/carip.R | 059babbab2b3fe157423390d59fbc17aacc54da9 | [
"Apache-2.0"
] | permissive | bcgov/envreportbc-snippets | aaaa122b0dad157d8fea0c6e0f8b47436f37ea77 | 83b5205d8910778d802d24e68104a11abacf274b | refs/heads/master | 2023-02-22T10:17:39.153395 | 2023-02-16T18:47:50 | 2023-02-16T18:47:50 | 68,056,430 | 1 | 0 | Apache-2.0 | 2023-02-16T00:26:39 | 2016-09-12T23:29:14 | R | UTF-8 | R | false | false | 4,280 | r | carip.R | # Copyright 2018 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
library(tidyverse)
## Use the csv file from Survey Monkey XLS export of 'All Individual Responses' - condensed
dir <- "process-carip-survey-data/data"
filename <- "2018 CARIP Climate ActionCarbon Neutral Progress Survey.csv"
file_path <- file.path(dir, filename)
data_year <- "2018"
## First row (header) contains the main questions
colnames_1 <- read_csv(file_path, col_names = FALSE, n_max = 1) %>%
unlist()
## Second row contains the sub-questions/descriptions/options
colnames_2 <- read_csv(file_path, col_names = FALSE, n_max = 1, skip = 1) %>%
unlist()
## Read in the data without the first two rows of header info
all_data <- read_csv(file_path, col_names = FALSE, skip = 2)
## Drop off the metadata questions (1-5) and create an empty integer column to store question numbers
q_labels_df <- bind_cols(question_text = colnames_1,
description = colnames_2) %>%
slice(24:n()) %>%
mutate(q_num = NA_integer_)
## Hacky loop to parse the data frame of questions and number the questions, starting at
## number 6 and incrementing by one at each non-NA question
format_q <- function(x) paste0("q", formatC(x, width = 4, flag = "0"))
q <- 6
q_labels_df$q_num[1] <- format_q(q)
for (r in seq_len(nrow(q_labels_df))[-1]) {
if (!is.na(q_labels_df[r, "question_text"])) {
q <- q + 1
}
q_labels_df$q_num[r] <- format_q(q)
}
## Reconstruct the questions and subquestions
q_labels_df <- group_by(q_labels_df, q_num) %>%
mutate(
sub_q = if (n() > 1) {
formatC(1:n(), width = 4, flag = "0")
} else {
NA_character_
},
sub_q = ifelse(description == "Other (please specify)", "other",
sub_q),
question = ifelse(is.na(sub_q), q_num,
paste(q_num, sub_q, sep = "_")),
question_text = ifelse(is.na(question_text), question_text[1],
question_text)
)
## Extract the metadata (respondent information) columns from the full dataset
metadata <- all_data[, 1:23]
## Add metadata column names from headers, replace spaces with underscores
names(metadata) <- c(colnames_1[1:14], colnames_2[15:23])
names(metadata) <- gsub("\\s+", "_", names(metadata))
## Exctract the question responses plus the Respondent ID, Local_Govt, and Member_RD column
data <- select(all_data, 1, 10, 11, X24:ncol(all_data)) %>%
set_names(c(names(metadata)[1], "Local_Govt", "Member_RD",
q_labels_df$question))
## gather the wide columns to long format, then join the question data to the responses.
data_long <- gather(data, key = "question", value = "response",
starts_with("q0")) %>%
left_join(q_labels_df, by = "question") %>%
mutate(q_num = as.integer(substr(q_num, 2, 5)),
sub_q = ifelse(sub_q == "other", 0L, as.integer(sub_q))) %>%
group_by(Respondent_ID, q_num) %>%
mutate(
question_type = case_when(
grepl("other$", question) ~ "Multiple Choice - 'Other' (free text)",
n() > 1 & nchar(description) > 1 ~ "Multiple Choice (multi-answer)",
n() == 1 & description == "Response" ~ "Multiple Choice (single-answer)",
n() == 1 & description == "Open-Ended Response" ~ "Open-ended (single-answer)",
n() > 1 & grepl("^[1-9]$", description) & is.integer(sub_q) ~ "Open-ended (multi-answer)",
TRUE ~ "Aaaargh"
)
) %>%
select(Respondent_ID, Local_Govt, Member_RD, question, q_num, sub_q,
question_type, question_text, description, response) %>%
arrange(Respondent_ID, question)
write_csv(data_long,
file.path(dir, paste0(data_year, "survey_monkey_data_long.csv")))
write_csv(metadata,
file.path(dir, paste0(data_year, "survey_monkey_metadata.csv")))
|
cd39471b454d5f203ca0b9f9993adef0dd43405d | 987c78acdb6d8a3ab04f6e4c8e8d9f6caa9a1f8f | /wine/kernel.R | 8723fe1ce837b11b049b7cb5d4a699e36cb9987d | [] | no_license | mixify/kaggle | c2f9a2fd909e441d5c2daddc731762e2ce60e45f | 9e5c03e6013c4c71cad5b46d6a9da93f70cdb11f | refs/heads/master | 2020-05-05T10:59:02.914688 | 2019-05-13T06:30:16 | 2019-05-13T06:30:16 | 179,969,453 | 2 | 0 | null | null | null | null | UHC | R | false | false | 2,928 | r | kernel.R | #1번
red_data <- read.csv("winequality-red.csv", sep=";")
red_data
white_data <- read.csv("winequality-white.csv", sep=";")
white_data
red_data[!complete.cases(red_data),]#결측치 없음
white_data[!complete.cases(white_data),]#결측치 없음
opar <- par(mfrow = c(2,2))
plot(quality ~., data=red_data)
plot(quality ~., data=white_data)
#2번
library(mlbench)
m_red <- lm(quality ~ ., data=red_data)
m_white <- lm(quality ~ ., data=white_data)
m_red_both <- step(m_red, direction="both")
null = lm(quality~1, data=red_data)
full = lm(quality~., data=red_data)
m_red_forward <- step(null, direction="forward",scope = list(lower=NULL, upper=full))
m_red_backward <- step(m_red, direction="backward")
m_white_both <- step(m_white, direction="both")
null = lm(quality~1, data=white_data)
full = lm(quality~., data=white_data)
m_white_forward <- step(null, direction="forward",scope = list(lower=NULL, upper=full))
m_white_backward <- step(m_white, direction="backward")
#3번
cor(red_data[,1:11])
symnum(cor(red_data[,1:11]))
#4번
opar = par(mfrow=c(1,1))
plot(m_red_both)
plot(m_white_both)
red_inter <- lm(quality ~ (fixed.acidity+volatile.acidity+citric.acid+residual.sugar+chlorides+free.sulfur.dioxide+total.sulfur.dioxide+density+pH+sulphates+alcohol)^2, data=red_data)
red_inter
summary(m_red)
summary(red_inter)
white_inter <- lm(quality ~ (fixed.acidity+volatile.acidity+citric.acid+residual.sugar+chlorides+free.sulfur.dioxide+total.sulfur.dioxide+density+pH+sulphates+alcohol)^2, data=white_data)
white_inter
summary(m_white)
summary(white_inter)
#5번
library(car)
outlierTest(m_red_both)
m_rm_out_red <- lm(formula(m_red_both), data=red_data[-833,])
outlierTest(m_white_both)
m_rm_out_white <- lm(formula(m_white_both), data=white_data[-c(4746,2782,3308,254,446),])
summary(m_red_both)
summary(m_rm_out_red)
summary(m_white_both)
summary(m_rm_out_white)
#6번
na_ave <- function(data,percentage)
{
means <- colMeans(data)
for (i in 1:10) {
na_row<-sample(nrow(data),nrow(data)*percentage)
na_col<-sample(ncol(data),nrow(data)*percentage,replace=TRUE)
data[cbind(na_row,na_col)]<-NA
for (j in 1:ncol(data)) {
data[is.na(na_red_data[,j]), j] <- means[j]
}
m_na <<- lm(quality~.,data)
sum_data <- summary(m_na)
if(i==1)
total_value <<- sum_data$r.squared
else
total_value <<- c(total_value, sum_data$r.squared)
}
}
#red_data
summary(m_red)
na_ave(red_data,0.01)
mean(total_value)
sd(total_value)
na_ave(red_data,0.05)
mean(total_value)
sd(total_value)
na_ave(red_data,0.1)
mean(total_value)
sd(total_value)
#white data
summary(m_white)
na_ave(white_data,0.01)
mean(total_value)
sd(total_value)
na_ave(white_data,0.05)
mean(total_value)
sd(total_value)
na_ave(white_data,0.1)
mean(total_value)
sd(total_value)
|
f36cf5a67833539526dfa4a3de6e845739f1ebc1 | 3493d9e324f8b63e22b81ad0c3e9b02b2aa55436 | /speciesByCountry.R | c5dd00c7da4fc5feb6893412c14daa88738d685f | [
"MIT"
] | permissive | WL-Biol185-ShinyProjects/endangered-species-trends | f3a58177617e885fb953b538cb5b3817d353dc20 | a940d43c69244941130cdff450bbd07df09ee8d5 | refs/heads/master | 2021-08-28T00:36:20.255779 | 2017-12-10T22:02:27 | 2017-12-10T22:02:27 | 105,674,069 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,078 | r | speciesByCountry.R | worldData <- read.table( "data/worldData.txt")
# Define UI for application that draws a bar graph
statusTab <- tabPanel( "Species Status by Country"
, sidebarLayout(
sidebarPanel(
selectInput( inputId = "country"
, label = "Select a Country"
, choices = sort(unique(worldData$country))
)
)
# Show a plot of the endangered species by country
, mainPanel(
plotOutput( "countryPlot")
)
)
)
|
a3cf019ac5ac1ed86ed7d13eb5fb284fe978210d | 98692c9c8b74b475c2c2da41615e63e558f587e0 | /_shiny/bluetooth_tt/blue.R | ad8e282352a96e4f14b941c16c245c14a8e51004 | [] | no_license | cdmuhs/portalData | 4a4991c9642439848727fa88c48fb3a67866e788 | 738da597c5ad3f42517586e356c68de5e548e57f | refs/heads/master | 2021-08-07T21:19:20.081869 | 2015-10-09T20:52:44 | 2015-10-09T20:52:44 | 109,419,542 | 0 | 0 | null | 2017-11-03T16:41:12 | 2017-11-03T16:41:12 | null | UTF-8 | R | false | false | 4,378 | r | blue.R | library(lubridate)
library(shiny)
library(shinyapps)
library(ggvis)
library(knitr)
setwd("/Users/bblanc/OneDrive/_ODOT/_Portal/investigations/bluetooth/")
blue = read.csv("july2015.csv",stringsAsFactors = FALSE)
blue$time = as.POSIXct(strptime(blue$starttime, format ="%m/%d/%Y %I:%M %p"))
blue$period = cut(blue$time,breaks="1 hour")
blue$hour = hour(blue$period)
blue$tt_min = blue$traveltime/60
shinyApp(
ui = fluidPage(
titlePanel(title="Travel time histogram for July 2015 heading from Powell & 8th to Powell & 77th"),
sidebarLayout(
sidebarPanel(sliderInput("bin","Bin Width",min=0.5,max=5,value =1,step = 0.5),
sliderInput("hour","Hour of day",min=1,max=24,value=8, step = 1)),
mainPanel(
uiOutput("ggvis_ui"),ggvisOutput("ggvis")
)
)
),
server = function(input,output,session){
hplot = reactive({
hourSelect = input$hour
binSelect = input$bin
sub = subset(blue,blue$hour==hourSelect)
percentBins = quantile(sub$tt_min,probs = seq(0,1.0,0.05))
binNums = data.frame(matrix(nrow=length(percentBins),ncol=2))
colnames(binNums)=c("name","num")
binNums$name[1:20]= paste0(names(percentBins)[1:20],"-",names(percentBins)[2:21])
binNums$name[21]= "100%"
binNums$num=as.numeric(rownames(binNums))
for (i in 1:nrow(sub)){
obs = sub$tt_min[i]
if(obs==max(percentBins[length(percentBins)])){
binNames = "95%-100%"
}else if(obs ==min(percentBins[1])){
binNames ="0%-5%"
}
else{
lower = names(percentBins[percentBins == max(percentBins[obs >=percentBins])])
upper = names(percentBins[percentBins == min(percentBins[obs <percentBins])])
binNames = paste0(lower,"-",upper)
}
sub$pBin[i]= binNames
sub$pBinNum[i] = binNums$num[binNums$name == binNames]
}
sub$pBin=factor(sub$pBin)
tFunk = function(df){
paste0("Percentile: ",binNums$name[df$pBinNum])
}
colRamp = colorRampPalette(colors=c("white","red"))
h <- sub %>% ggvis(x = ~tt_min, fill=~pBinNum) %>%
scale_numeric("fill",label="Percentile Bin", domain=c(1,20),range =colRamp(20)[c(1,20)])%>% group_by(pBinNum) %>%
layer_histograms(width=binSelect) %>%
add_axis("x", title = "Travel Time (minutes)")%>%
add_axis("y", title = "Frequency") %>% add_tooltip(tFunk)
return(h)
})
hplot %>% bind_shiny("ggvis", "ggvis_ui")
}
)
###Applying ODOT ITS Bluetooth data filter
east = read.csv("eastboundJuly2015.csv")
west = read.csv("westboundJuly2015.csv")
dist = 3.6 #miles
east$speed_mph = dist/(east$traveltime/3600)
west$speed_mph = dist/(west$traveltime/3600)
tt_filter= function(data,dist){
data$speed_mph = dist/(data$traveltime/3600)
maxFilter = floor((dist/1)*3600)
minFilter = floor((dist/(35+15))*3600)
upper = mean(data$traveltime)+1.65*sd(data$traveltime)
data = subset(data,data$traveltime >=minFilter & data$traveltime <=maxFilter & data$traveltime <= upper)
}
require(dplyr)
setwd("~/_ODOT_Portal/investigations")
####Connect to Portal db
####Make sure you VPN into cecs network
db_cred = fromJSON(file="/Users/bblanc/OneDrive/_ODOT/_Portal/investigations/db_credentials.json")
con <- dbConnect(dbDriver("PostgreSQL"), host=db_cred$db_credentials$db_host, port= 5432, user=db_cred$db_credentials$db_user, password = db_cred$db_credentials$db_pass, dbname=db_cred$db_credentials$db_name)
db = src_postgres(dbname = db_cred$db_credentials$db_name,
host = db_cred$db_credentials$db_host,
port = 5432,
user = db_cred$db_credentials$db_user,
password = db_cred$db_credentials$db_pass,
options = "-c search_path=odot")
tbls = src_tbls(db)
ttseg_tbls = tbls[grep("ttseginventory",tbls)]
ttseg_tblRef = data.frame(matrix(nrow=length(ttseg_tbls),ncol=2))
colnames(ttseg_tblRef)=c("tbl_name","date")
ttseg_tblRef$tbl_name=ttseg_tbls
ttseg_tblRef$date = as.Date(paste0(substr(ttseg_tblRef$tbl_name,16,19),"-",
substr(ttseg_tblRef$tbl_name,20,21),"-",
substr(ttseg_tblRef$tbl_name,22,23)))
trav_tbls = tbls[grep("ttdcutraversals",tbls)]
test = tbl(db,"ttseginventory_20150615")
testdf = data_frame(tbl)
|
3867f4b236a6e32f1e09b9d951765116382a9733 | 8fc0e478378f300459792d043c2a36cfc8cbdb96 | /R/NewFunction.R | 693582524c81fa5479ba395176c9b2966dc6c9ff | [] | no_license | ajrgodfrey/BrailleR | 9be1b0e5b19ec51295e00e2d9231b41d8b2bd4c8 | 24c242623ea35ad8ae091b0f15db3dfc49a71ecc | refs/heads/master | 2023-07-10T05:59:31.409627 | 2023-07-09T07:42:17 | 2023-07-09T07:42:17 | 48,786,500 | 119 | 26 | null | 2023-07-09T07:42:19 | 2015-12-30T06:44:20 | HTML | UTF-8 | R | false | false | 1,057 | r | NewFunction.R |
NewFunction =
function(FunctionName, args = NULL, NArgs = 0) {
if (is.numeric(args[1])) {
NArgs = args[1]
args = NULL
}
if (is.null(args)) {
args = rep("", NArgs)
} else {
NArgs = length(args)
}
Filename = paste0(FunctionName, ".R")
cat(paste0(
"#' @rdname ", FunctionName, "\n#' @title
\n#' @aliases ",
FunctionName,
"
\n#' @description
\n#' @details
\n#' @return
\n#' @seealso
\n#' @author ",
getOption("BrailleR.Author"),
"
\n#' @references
\n#' @examples
\n#' @export ", FunctionName,
"
"), file = Filename, append = FALSE)
if (NArgs > 0) {
cat(paste("#' @param", args, "\n"), file = Filename, append = TRUE)
}
cat(paste0("\n", FunctionName, "= function(",
paste(args, collapse = ", "), "){
}
"), file = Filename,
append = TRUE)
.FileCreated(Filename, "in the current working directory.")
return(invisible(NULL))
}
|
a6934d19de6fb40e4026af90a4925006953d5c13 | 8eb59b62eea4f9f81c530152799b73febee12c9f | /man/check_impl_error.Rd | 2f605c494a061930df12f4ffee1658bcbe3e4aa4 | [] | no_license | realsmak88/SSMSE | 2514ddc8b5bce8c5b9fed47220f983140cacb1ac | e2d24b6c59ceb82b732eed7c6dca4a0e5d50f341 | refs/heads/main | 2023-06-24T15:53:21.396116 | 2021-06-09T00:16:18 | 2021-06-09T00:16:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 559 | rd | check_impl_error.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/impl_error.R
\name{check_impl_error}
\alias{check_impl_error}
\title{check that user input implementation error vectors are the correct length}
\usage{
check_impl_error(length_impl_error_seq, expected_length)
}
\arguments{
\item{length_impl_error_seq}{The length of the input implementation error sequence}
\item{expected_length}{the expected length of the implementation error sequence}
}
\description{
check that user input implementation error vectors are the correct length
}
|
6730e9bdfc01b83c36c0012971c5b6e012ed1943 | 5e6caa777731aca4d6bbc88fa92348401e33b0a6 | /R/weight_nIndivInterval.R | a3922d0ff4c18cae1c83d6d98607a4426d1a0bdb | [
"MIT"
] | permissive | metamelb-repliCATS/aggreCAT | e5c57d3645cb15d2bd6d2995992ad62a8878f7fb | 773617e4543d7287b0fca4a507ba4c94ee8f5e60 | refs/heads/master | 2023-05-22T19:51:20.949630 | 2023-03-31T05:21:39 | 2023-03-31T05:21:39 | 531,484,296 | 6 | 1 | NOASSERTION | 2023-03-31T05:03:05 | 2022-09-01T11:15:36 | R | UTF-8 | R | false | false | 2,173 | r | weight_nIndivInterval.R | #' @title
#' Weighting method: Individually scaled interval widths
#'
#' @description
#' Weighted by the rescaled interval width within individuals across claims.
#'
#' @details
#' \loadmathjax
#' This function is used inside [IntervalWAgg] for aggregation types `"IndIntWAgg"`,
#' `"IndIntAsymWAgg"` and `"KitchSinkWAgg"`. Interval width weights are rescaled
#' relative to an individuals interval widths across all claims.
#'
#' \mjdeqn{w\_nIndivInterval_{i,c} = \frac{1}{\frac{U_{i,c}-L_{i,c}}{\max\left(\{(U_{i,d}-L_{i,d}):d=1,...,C\}\right)}}}{ascii}
#'
#' @param expert_judgements A dataframe in the form of [data_ratings]
#'
#' @export
weight_nIndivInterval <- function(expert_judgements) {
expert_judgements %>%
tidyr::pivot_wider(names_from = element, values_from = value) %>%
dplyr::group_by(user_name) %>%
# calculate weight weight
dplyr::summarise(max_agg =
abs(max(three_point_upper - three_point_lower,
na.rm = TRUE))) %>%
dplyr::full_join(expert_judgements, by = "user_name") %>%
tidyr::pivot_wider(names_from = element, values_from = value) %>%
dplyr::mutate(int_agg = abs(three_point_upper - three_point_lower),
# check we're not dividing by zero
max_agg = dplyr::if_else(max_agg == 0,
.Machine$double.eps,
max_agg),
int_agg = dplyr::if_else(int_agg == 0,
.Machine$double.eps,
int_agg),
agg_weight = 1 / (int_agg / max_agg)) %>%
# Inverse of re-scaled interval width)
tidyr::pivot_longer(
c(three_point_upper,
three_point_lower,
three_point_best),
names_to = "element",
values_to = "value"
) %>%
dplyr::select(-max_agg, -int_agg) %>%
dplyr::filter(element == "three_point_best")
}
|
d72721b482cc85cd0d39b1ac7735c114d3dfbe7a | bade93cbfc1f25160dfbe9493bfa83f853326475 | /doc/mwc/doc/coherent/rel.notes/x/x1.1/thirdparty.r | 86a94081c0281af7c914e2bc123957f50a00a76f | [
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | gspu/Coherent | c8a9b956b1126ffc34df3c874554ee2eb7194299 | 299bea1bb52a4dcc42a06eabd5b476fce77013ef | refs/heads/master | 2021-12-01T17:49:53.618512 | 2021-11-25T22:27:12 | 2021-11-25T22:27:12 | 214,182,273 | 26 | 6 | null | null | null | null | UTF-8 | R | false | false | 4,307 | r | thirdparty.r | .ds TI "RELEASE NOTES"
.ds TL "Additional Software"
.NH "Software for X Windows"
.PP
Once you have the X Window System up and running on your computer,
you are going to want to explore the wealth of software that is available.
Many of the most popular and useful X programs can be purchased from Mark
Williams Company and from other suppliers.
You can also download these packages for free from the Mark Williams
Bulletin Board, and from other sources on the Internet.
.PP
The following some of the software you can obtain for your \*(CO system.
.SH "The COHERENT Club"
.PP
The \*(CO Club offers great savings on third-party software.
For a one-time payment of $49.95,
you can get significant discounts from Mark Williams Company
on such third-party software products as WordPerfect,
other \*(CO products sold by Mark Williams, and discounts on future
enhancements and updates to \*(CO.
The order form at the back of this booklet shows some of the discounts
you can receive
.PP
To join, fill out the form at the end of these notes, or call Mark Williams
Company at 1-800-636-6700 or 1-708-291-6700.
.SH Xware
.PP
The X Window System has a wealth of software available for it.
To help make this software available to you under \*(CO,
Mark Williams Company offers
.BR Xware !
.PP
Each Xware package is a collection of carefully selected X programs.
Almost every program comes with both an executable binary and source
code, plus rewritten and formatted manual pages that you can read with
the \*(CO
.B man
command.
.PP
Six Xware packages are available to you:
.IP "\fBXware 1\fR: Window Managers"
This package offers the window managers \*(OL, from Sun Microsystems; and
.BR fvwm ,
a small, efficient window manager that looks like Motif.
Also included are several hundred fonts, to further improve the appearance
of your X environment.
.IP "\fBXware 2\fR: Games\fR"
This package is just for fun.
Battle deadly tanks, struggle with your computer for control of the world,
play Mah-Jongg and solitaire \(em all in the X Windows graphical
environment.
.IP "\fBXware 3\fR: Graphics\fR"
This package offers a wealth of graphics programs for X.
View GIF files, display animations, model and display the evolution of
an imaginary planet, plug entertaining screen-savers into your system \(em
and much more.
Also included are a generous selection of GIF images from Project Apollo
and from the newly repaired Hubble Space Telescope.
.IP "\fBXware 4\fR: Tools and Utilities\fR"
This package assembles a number of helpful utilities.
Included are a powerful spreadsheet, including matrix algebra functions
and graphing; an interactive file manager;
utilities for attaching notes to your screen; tools for displaying system
load, and for viewing manual pages interactively; and more.
.IP "\fBXware 5\fR: Development Tools\fR"
This package offers Tcl, the hot new development language; and Tk,
a large collection of widgets and function that you can use interactively.
Also included are
.BR wish ,
the interactive shell built from Tcl and Tk; and
.BR xf ,
an interactive tool with which you can easily build X front ends for your
programs.
You can also use Tcl with your non-X programs.
.IP "\fBXware 6\fR: Ghostscript\fR"
With this package, you can display your PostScript files and clip art
directly within an X window, without having to print them.
Also included are drivers for a variety of output devices, including
a number of printers.
.PP
To order an Xware package, use the order form at the back of this booklet,
or call Mark Williams Company.
.PP
.SH "Freeware and Shareware"
.PP
The University of Alaska's Internet site
.B raven
maintains an archive of public-domain and shareware software
for X that has been ported to \*(CO.
You can access this site via the Internet and download
software for the price of the connection.
.PP
To access
.BR raven ,
you must have access to a system that
connects to the Internet via TCP/IP.
When you log into this system, type:
.DM
ftp raven.alaska.edu
.DE
.PP
All files are under the directory
.BR /pub/coherent .
.PP
You can also download X software for free from the Mark Williams Bulletin
Board, or from other bulletin boards maintained around the world by
\*(CO users.
For details on how to contact these boards, see the release notes that
came with your copy of \*(CO.
|
656bc5d09c9808e735c94f66c9b19c7297c78649 | edab67428b28fa49470058fcefa7c18ecd13b59c | /man/match_dir_edges.Rd | 4488c8373972971473269c2915e5d148daf2e1e8 | [] | no_license | gekepals/pcabs | f8cae9bfc1eb4e56aece7dde44a3e544e7ef5758 | 37b5153e0b8275679150347f1d6a46ceb9de973c | refs/heads/master | 2020-05-30T06:27:42.945043 | 2019-06-14T12:36:30 | 2019-06-14T12:37:02 | 189,580,073 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,656 | rd | match_dir_edges.Rd | \name{match_dir_edges}
\alias{match_dir_edges}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{match_dir_edges
}
\description{
Function to check if variables in leftlist are in either absgroup1 or absgroup2.
This is necessary, otherwise there has been made a mistake in the given absgroups.
}
\usage{
match_dir_edges(leftlist, rightlist, absgroup1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{leftlist}{
%% ~~Describe \code{leftlist} here~~
}
\item{rightlist}{
%% ~~Describe \code{rightlist} here~~
}
\item{absgroup1}{
%% ~~Describe \code{absgroup1} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (leftlist, rightlist, absgroup1)
{
result <- "error"
group1 <- FALSE
group2 <- FALSE
for (i in leftlist) {
if (i \%in\% absgroup1) {
group1 <- TRUE
}
else {
group2 <- TRUE
}
}
if (group1 && group2) {
return(result)
}
if (group1) {
for (i in rightlist) {
if (i \%in\% absgroup1) {
group1 <- FALSE
}
else {
group2 <- TRUE
}
}
if (group1 && group2) {
cat("Nice! The abstraction matches with the pattern.")
result <- "left"
return(result)
}
}
else if (group2) {
for (i in rightlist) {
if (i \%in\% absgroup1) {
group1 <- TRUE
}
else {
group2 <- FALSE
}
}
if (group1 && group2) {
cat("Nice! The abstraction matches witht he pattern.")
result <- "right"
return(result)
}
else {
return(result)
}
}
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
464076118a7a13bd5fc74bb5332645f31a8b85f5 | 3923fcfc18fa8b8a98aae15e89a1f8ec52f0938d | /tests/testthat.R | 65a02db01fe31054032ddc6ff793a1c548de4ee8 | [] | no_license | cran/tidysynth | fc346e3fd188d26a0bf46521342ed677e68cd96c | b6e990ee54079fe9edba6827fbf5e93376e3bc4b | refs/heads/master | 2023-05-26T16:29:21.881582 | 2023-05-21T06:10:05 | 2023-05-21T06:10:05 | 334,224,392 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 62 | r | testthat.R | library(testthat)
library(tidysynth)
test_check("tidysynth")
|
fbcba751baeeed2864b32ceffab90a4231ecddf8 | 4e367a32e74cbe87cca102e82eedf06b816fe235 | /man/htd.holdout.Rd | 75feb194a291b9767bf9aece301be2d6d4bf2df1 | [] | no_license | cran/HEMDAG | edb61a5f2682a7623219caf12d7cff5a0bc03837 | 3fb7efa2291779d5553898103f4ba9f85bf31e92 | refs/heads/master | 2021-07-23T10:38:59.150630 | 2021-02-12T14:00:06 | 2021-02-12T14:00:06 | 100,073,247 | 0 | 4 | null | null | null | null | UTF-8 | R | false | true | 1,555 | rd | htd.holdout.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/htd.dag.R
\name{htd.holdout}
\alias{htd.holdout}
\title{HTD-DAG holdout}
\usage{
htd.holdout(S, g, testIndex, norm = FALSE, norm.type = NULL)
}
\arguments{
\item{S}{a named flat scores matrix with examples on rows and classes on columns.}
\item{g}{a graph of class \code{graphNEL}. It represents the hierarchy of the classes.}
\item{testIndex}{a vector of integer numbers corresponding to the indexes of the elements (rows) of the scores matrix \code{S} to be used in the test set.}
\item{norm}{a boolean value. Should the flat score matrix be normalized? By default \code{norm=FALSE}.
If \code{norm=TRUE} the matrix \code{S} is normalized according to the normalization type selected in \code{norm.type}.}
\item{norm.type}{a string character. It can be one of the following values:
\enumerate{
\item \code{NULL} (def.): none normalization is applied (\code{norm=FALSE})
\item \code{maxnorm}: each score is divided for the maximum value of each class;
\item \code{qnorm}: quantile normalization. \pkg{preprocessCore} package is used;
}}
}
\value{
A matrix with the scores of the classes corrected according to the \code{HTD-DAG} algorithm. Rows of the matrix are shrunk to \code{testIndex}.
}
\description{
Correct the computed scores in a hierarchy according to the \code{HTD-DAG} algorithm applying a classical holdout procedure.
}
\examples{
data(graph);
data(scores);
data(test.index);
S.htd <- htd.holdout(S, g, testIndex=test.index, norm=FALSE, norm.type=NULL);
}
|
609afadb363f42bc834acbe7970a5486b404ede8 | 9e72f2d88e396432a7bdf217c8408f8a1fff02e8 | /190117_visulization_02.R | b9f9f49a9e15e2a5521a455c72e0b5a7a9d0de12 | [] | no_license | SeokHyeon-Hwang/R_data_analysis | 271cdc33b601d0cc61788e4a0fc1e51795daccbd | 61c4af51e1cac736a0290c6ac6c2dc2a927256f1 | refs/heads/master | 2021-07-11T23:02:26.650102 | 2019-03-05T04:52:33 | 2019-03-05T04:52:33 | 148,569,523 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,097 | r | 190117_visulization_02.R |
library(ggplot2)
library(MASS)
data('Cars93')
car<-Cars93
colnames(car)
head(car)
p<- ggplot(car, aes(x=Price, y=Horsepower))+
geom_jitter(aes(colour =DriveTrain))
p
## 타이틀
## ggtitle()
p1 = p+ggtitle('Jitter Plot for Cars93 Dataset')
p1+ theme(plot.title = element_text(size=15,
color='darkblue',
hjust=.5))
# x축, y축 레이블을 한글로 변경
p1 + labs(x='평균', y='최대마력')
# 파일
ggsave('axis_change.jpg', dpi=300)
# x축 범위를 설정
p1+labs(x='평균', y='최대마력') + lims(x=c(0,40))
p1 + labs(x='평균', y='최대마력')+ lims(x=c(0,60))
# 10, ..., 60
p1 + scale_x_continuous(breaks=c(10,20,30,40,50,60))
ggsave('axis_breaks.jpg', dpi=300)
# 10, 20, 30, ..., 60 앞에 $ 붙이기
p2 = p1+scale_x_continuous(breaks=c(10, 20,30,40,50,60),
labels=paste0('$', c(10,20,30,40,50,60)))
p2
# x축을 위로 올리기
p2 +scale_x_continuous(position='top')
# 범례 위치 바꾸기
p2 +theme(legend.position=c(.85, .2))
# 그래프에 글자, 도형, 선 넣기
p2 + annotate('text', x=52, y=100, label='Price Limit\nLine')+
annotate('rect', xmin=0, xmax=40, ymin=0, ymax=250, alpha=.2,
fill='skyblue')+
annotate('segment', x=60, xend=60, y=0, yend=300, colour='black', size=1)
# price와 horsepower 바꾸기
p2 + coord_flip()
# 흑백 표시
p2 + scale_color_grey()
# geom_jitter
library(ggplot2)
head(midwest)
head(midwest[, c('state', 'poptotal')])
ggplot(midwest, aes(state, percollege))+geom_point()+
geom_jitter()
ggplot(midwest, aes(state, percollege)) +
geom_jitter()
# 높이 넓어짐: height, 너비 넓어짐 : width
ggplot(midwest, aes(state, percollege))+
geom_jitter(height=.3)
ggplot(midwest, aes(state, percollege))+
geom_jitter(height=.1)
ggplot(midwest, aes(state, percollege))+
geom_jitter(height=.9)
# 색깔 바꾸기
ggplot(midwest, aes(state, percollege))+
geom_jitter(data=midwest, aes(color=inmetro==1))
# 모양 바꾸기
ggplot(midwest, aes(state, percollege))+
geom_jitter(data=midwest, aes(color=inmetro==1, shape=percprof>mean(percprof)))
# 02
library(ggplot2)
library(ggmap)
library(MASS)
data(Cars93)
car<-Cars93
str(car)
summary(car)
p1<-ggplot(data=car, aes(x=Manufacturer, y=Horsepower))
p1<-p1+geom_point()
p1
p1<-p1+ggtitle('Plot of Manufacturer vs Horsepower')
p1
# 03
rm(list=ls())
install.packages('data.table')
library(data.table)
library(ggplot2)
library(dplyr)
df<-read.csv('C:/Users/ktm/Downloads/train_1000m.csv')
str(df)
table(df$is_attributed)
# filter : raw, select: col
df_sel <- df %>% select(ip, app, device, os, channel, is_attributed)
head(df_sel, 20)
# 상위 10개 ip확인
ip_is_attr<-df_sel %>%
filter(!is.na(is_attributed)) %>%
group_by(ip) %>%
summarise(sum_is_attr1=sum(is_attributed), cnt=n())
ip_is_attr
#
top10 <- ip_is_attr %>%
arrange(desc(cnt)) %>%
head(10)
top10
top10$ip <- as.factor(top10$ip)
#
q<-ggplot(top10, aes(x=ip, y=cnt))
q+geom_boxplot()
p<-ggplot(top10, aes(x=reorder(ip, cnt), y=cnt), fill=ip)+
geom_bar(stat='identity')
pl1 = p+coord_flip()
pl1
p2=ggplot(top10, aes(x=reorder(ip, cnt), y=sum_is_attr1))+
geom_bar(stat='identity')
pl2 = p2+coord_flip()
pl2
install.packages('gridExtra')
library(gridExtra)
grid.arrange(pl1, pl2, ncol=2, nrow=1)
# 04
#관계도 그리기
library(igraph)
g1<-graph(c(1,2,2,3,2,4,1,4,5,5,3,6,3,3))
plot(g1)
df<-read.csv('C:/Users/ktm/Downloads//clustering.csv')
str(df)
summary(df)
graph<-data.frame(학생=df$학생, 교수=df$교수)
library(stringr)
df<-graph.data.frame(graph, directed=T)
is(df)
plot(df)
str(df)
# Vertical의 이름
gubun1<-V(df)$name
gubun1
# 시각화 한 것을 사이즈를 줄여보자.
plot(df, layout=layout.fruchterman.reingold,
vertex.size=2, # 노드의 크기
edge.arrow.size=.05, # 화살표의 크기
vertex.color='green', #점 색깔
vertex.label=NA)
# 색깔을 이용해서 T,S 를 구분
colors<-c()
for (i in 1:length(gubun1)){
if (gubun1[i]=='S'){
colors<-c(colors, 'red')
}
else {
colors<-c(colors, 'green')
}
}
plot(df, layout=layout.fruchterman.reingold,
vertex.size=2, # 노드의 크기
edge.arrow.size =.05, # 화살표의 크기
vertex.color = 'colors', # 점 색깔
vertex.label=NA)
#
sizes<-c()
colors<-c()
for (i in 1:length(gubun1)){
if (gubun1[i]=='S'){
sizes<-c(sizes, 2)
colors<-c(colors, 'green')
}
else{
sizes<-c(sizes, 4)
colors<-c(colors, 'darkgreen')
}
}
plot(df, layout=layout.fruchterman.reingold,
vertex.size=sizes, # 노드의 크기
edge.arrow.size = .03, # 화살표 크기
vertex.color=colors, # 점 색깔
vertex.label=NA)
install.packages('network')
install.packages('sna')
install.packages('visNetwork')
install.packages('threejs')
install.packages('ndtv') |
c2853f66a713272e5d66ca281b589f174fb151dd | 117ed25278efddda7cf416d9999fbbcc0571d71a | /gganimateSoccer.R | f47a2e4eb131467d91a3e01ce79c51b89dcd082b | [] | no_license | TJMac93/gganimateSoccer | 909ac9bf57ae04379882886009f579db307972c7 | cd71feb8e90545bb1ae58b70a3bc6703c46b9344 | refs/heads/master | 2022-06-16T04:32:41.487506 | 2020-05-09T13:20:47 | 2020-05-09T13:20:47 | 262,569,331 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,379 | r | gganimateSoccer.R | # Metrtica data
# https://github.com/metrica-sports/sample-data
library(ggsoccer)
library(gganimate)
library(ggplot2)
library(av)
library(dplyr)
library(lubridate)
# Read in data
# Tracking Data
awayTrack <- read.csv("AwayTracking.csv", stringsAsFactors = F, skip = 1)
homeTrack <- read.csv("HomeTracking.csv", stringsAsFactors = F, skip = 1)
events <- read.csv("Event.csv", stringsAsFactors = F)
# goal at 91.56s, passage of play starts at 85.72 - found manually in excel
# Away tracking data
oneNilA <- subset(awayTrack, Time..s. > 85.72 & Time..s. < 92.36)
# Home tracking data
oneNilH <- subset(homeTrack, Time..s. > 85.72 & Time..s. < 92.36)
# All tracking data
oneNil <- dplyr::full_join(oneNilA, oneNilH)
# drop colums for players who are not on the pitch
# https://stackoverflow.com/a/34903938/10575353
subs <- oneNil[1,] == "NaN" # Returns boolean for if their coordinates are not a number
oneNil <- oneNil[, !subs, drop = FALSE] # in oneNil, for every row, if subs is FALSE, remove
# check event data for same time period
oneNilEvent <- subset(events, Start.Time..s. > 84 & End.Time..s. < 93)
# For the plot, we need players in a table like in this example
# https://www.rostrum.blog/2020/05/02/aguerooooo/
# Need to make columns for X, Y, Player Name, and Frame number
# Alternatively use one geom_point for each player to include in the animation
# This is what we will use
# Metrica values are between 0 and 1. We need between 0 and 100. Multiply co-ordinate values by 100 to get correct scale
oneNil[,4:49] <- oneNil[,4:49] * 100
# change time to get clock - will be used in animation
oneNil$clock <- floor(oneNil$Time..s.)
oneNil$clock <- seconds_to_period(oneNil$clock)
oneNil$clock <- paste0(as.character(minute(oneNil$clock)), ":", as.character(second(oneNil$clock)))
# We can see which players we need by looking at the event
unique(oneNilEvent$From)
unique(oneNilEvent$To)
# Player Numbers 1-14 are home, 15+ are away
# Players named in event are only those involved in passage of play leading to the goal
# Frame is apparently a compuited variable so gganimate doesn't like it. Add new frame variable for transitions
# If you get error that length of columns are different this is why.
# https://github.com/thomasp85/gganimate/issues/122
oneNil$frameCount <- c(1:length(oneNil$Period)) # - possibly not necessary, error still shows when using base animation,
# but animate() seems to work
# plot
plot <-
ggplot(oneNil) +
annotate_pitch(
colour = "white", # Pitch lines
fill = "#7fc47f" # Pitch colour
) +
theme_pitch() + # removes xy labels
coord_cartesian( # crop pitch to limits, works best inside coord_cartesian rather than
xlim = c(45, 103), # just using xlim and ylim, not sure why
ylim = c(-3, 103)
) +
geom_point( # add ball location data
aes(x = BallX, y = BallY),
colour = "black", fill = "white", pch = 21, size = 4
) +
# HOME players
# add player6 location data
geom_point(
aes(x = Player6X, y = Player6Y),
colour = "black", fill = "red", pch = 21, size = 4
) +
# add player9 location data
geom_point(
aes(x = Player9X, y = Player9Y),
colour = "black", fill = "red", pch = 21, size = 4
) +
# add player10 location data
geom_point(
aes(x = Player10X, y = Player10Y),
colour = "black", fill = "red", pch = 21, size = 4
) +
# add title/subtitle/caption
labs(
title = "Home [1] - 0 Away",
subtitle = "Player9 Goal - 1'",
caption = "Made by @statnamara | Data source: Metrica"
) +
# Add clock to top left
geom_label(aes(x = 50,
y = 103,
label = clock),
size = 7) +
theme(title = element_text(face = "italic", size = 14),
panel.border = element_rect(colour = "black", fill=NA, size=1),
)
# shows static plot with all frames in one
plot
plot <- plot +
transition_states(
Frame, # variable used to change frame
state_length = 0.01, # duration of frame
transition_length = 1, # duration between frames
wrap = FALSE # restart, don't loop animation
)
animate(plot,
duration = 9,
fps = 30,
detail = 30,
width = 1000,
height = 700,
end_pause = 90,
# renderer = av_renderer() # for save as mp4
)
# Save animation
anim_save(filename = "goal.mp4", animation = last_animation()) # requires renderer line in animate function
anim_save(filename = "goal.gif", animation = last_animation())
# Let's add addtional players
# Player25 is Away GK so add him
# Player22 is marking Player9
# add some random players for depth
# Add away players
plot2 <-
ggplot(oneNil) +
annotate_pitch(
colour = "white", # Pitch lines
fill = "#7fc47f" # Pitch colour
) +
theme_pitch() + # removes xy labels
coord_cartesian( # crop pitch to limits, works best inside coord_cartesian rather than
xlim = c(45, 103), # just using xlim and ylim, not sure why
ylim = c(-3, 103)
) +
geom_point( # add ball location data
aes(x = BallX, y = BallY),
colour = "black", fill = "white", pch = 21, size = 4
) +
# HOME players
# add player6 location data
geom_point(
aes(x = Player6X, y = Player6Y),
colour = "black", fill = "red", pch = 21, size = 4
) +
# add player9 location data
geom_point(
aes(x = Player9X, y = Player9Y),
colour = "black", fill = "red", pch = 21, size = 4
) +
# add player10 location data
geom_point(
aes(x = Player10X, y = Player10Y),
colour = "black", fill = "red", pch = 21, size = 4
) +
# add player7 location data
geom_point(
aes(x = Player7X, y = Player7Y),
colour = "black", fill = "red", pch = 21, size = 4
) +
# add player8 location data
geom_point(
aes(x = Player8X, y = Player8Y),
colour = "black", fill = "red", pch = 21, size = 4
) +
# AWAY players
# add player22 location data
geom_point(
aes(x = Player22X, y = Player22Y),
colour = "white", fill = "blue", pch = 21, size = 4
) +
# add player25 (GK) location data - give different colour shirt
geom_point(
aes(x = Player25X, y = Player25Y),
colour = "white", fill = "black", pch = 21, size = 4
) +
# add player16 location data
geom_point(
aes(x = Player16X, y = Player16Y),
colour = "white", fill = "blue", pch = 21, size = 4
) +
# add player18 location data
geom_point(
aes(x = Player18X, y = Player18Y),
colour = "white", fill = "blue", pch = 21, size = 4
) +
# add title/subtitle/caption
labs(
title = "Home [1] - 0 Away",
subtitle = "Player9 Goal - 1'",
caption = "Made by @statnamara | Source: Metrica"
) +
# Add clock to top left
geom_label(aes(x = 50,
y = 103,
label = clock),
size = 7) +
theme(title = element_text(face = "italic", size = 14),
panel.border = element_rect(colour = "black", fill=NA, size=1),
) +
transition_states(
Frame, # time-step variable
state_length = 0.01, # duration of frame
transition_length = 1, # duration between frames
wrap = FALSE # restart, don't loop
)
animate(plot2,
duration = 10,
fps = 30,
detail = 30,
width = 1000,
height = 700,
end_pause = 90,
# renderer = av_renderer() # for save as mp4
)
# Save animation
anim_save(filename = "goal2.mp4", animation = last_animation()) # requires renderer line in animate function
anim_save(filename = "goal2.gif", animation = last_animation())
|
9412bf8d2f9288acb59b5cb696dcb2479f3d2220 | 47ff67dc83cb318d684afb58296ba9f2c4ec4a07 | /homework/wenzel_tobias/10_24/ch03_arnold.R | 07fcefcfc37fecf7e01d8e453d6c6dbb3cc1770d | [] | no_license | Islamicate-DH/hw | 378f0e8738a8c6b7bb438bea540af6d5af4c602a | 4af2cc99b8f75a57f32254a488df4e519ea81f49 | refs/heads/master | 2021-01-11T02:11:19.477315 | 2020-05-27T16:12:13 | 2020-05-27T16:12:13 | 70,803,965 | 2 | 12 | null | 2020-05-27T15:30:46 | 2016-10-13T12:26:32 | JavaScript | UTF-8 | R | false | false | 10,244 | r | ch03_arnold.R | # Author: Tobias Wenzel
# Month/Year: 10/2016
# In course: Studying the islamicate Culture through Text Analysis
# Description: Code snippets and exercises of Chapter 3 in 'arnold_humanities'
# setwd("/home/tobias/Dokumente/islamicate2.0/arnold_humanities/ch03_05/")
# geodf <- read.csv("data/ch03/geodf.csv", as.is=TRUE)
# geodf[1:6,]
# tab <- table(geodf$county) # table country as col-title, counts the tracts
# tab[order(tab, decreasing=TRUE)]
# names(tab)[order(tab,decreasing=TRUE)][1:5] # 5 highest countries
# table(geodf$county,geodf$csa)
# ppPerHH <- geodf$population / geodf$households
# hist(ppPerHH)
# hist(ppPerHH[ppPerHH < 5])
# hist(ppPerHH[ppPerHH < 5], breaks=30)
# hist(ppPerHH[ppPerHH < 5], breaks=(13:47) / 10) # 1.3-4.7
#
# hist(ppPerHH[ppPerHH < 5], breaks=30,
# col="gray",
# xlab="People per Household",
# ylab="Count",main="Household Size by Census Tract - Oregon")
#
# meansOfCommute <- read.csv("data/ch03/meansOfCommute.csv",
# as.is=TRUE)
# meansOfCommute <- as.matrix(meansOfCommute)
# walkPerc<-meansOfCommute[,"walk"]/ meansOfCommute[,"total"]
# walkPerc = round(walkPerc * 100)
# quantile(walkPerc)
# carPerc <- meansOfCommute[,"car"] / meansOfCommute[,"total"]
# carPerc <- round(carPerc * 100)
# quantile(carPerc)
# quantile(walkPerc, prob=(0:10)/10)
# cent <- quantile(walkPerc,prob=seq(0,1,length.out=100),
# names=FALSE)
# coff <- quantile(carPerc, prob=0.10)
# lowCarUsageFlag <- (carPerc < coff)
# table(lowCarUsageFlag, geodf$csa)
#
# # BINS
#
# breakPoints <- quantile(carPerc, prob=seq(0,1,length.out=11),
# names=FALSE)
# bin <- cut(carPerc, breakPoints,labels=FALSE, include.lowest=TRUE)
# table(bin)
# table(bin, geodf$csa)
#
# bins <- cut(ppPerHH[ppPerHH < 5], breaks=seq(1.3,4.7,by=0.1),
# labels=FALSE, include.lowest=TRUE)
# table(bins)
# hist(carPerc, breaks=breakPoints)
#
# hhIncome <- read.csv("data/ch03/hhIncome.csv",as.is=TRUE,
# check.names=FALSE)
# hhIncome <- as.matrix(hhIncome)
# hhIncome[1:5,]
# oneRow <- hhIncome[1,-1]
# cumsum(oneRow)
# cumIncome <- matrix(0, ncol=ncol(hhIncome)-1, nrow=nrow(hhIncome))
# for (j in 1:nrow(hhIncome)) {
# cumIncome[j,] <- cumsum(hhIncome[j,-1]) / hhIncome[j,1]
# cumIncome[j,] <- round(cumIncome[j,] * 100)
# }
# colnames(cumIncome) <- colnames(hhIncome)[-1]
#
# # 3.7 Combining Plots
# par(mfrow=c(4,4))
# for(j in 1:16) {
# hist(hhIncome[,j+1] / hhIncome[,1],
# breaks=seq(0,0.7,by=0.05), ylim=c(0,600))
# }
#
# bands <- colnames(hhIncome)[-1]
# bandNames <- paste(bands[-length(bands)],"-",bands[-1], sep="")
# bandNames <- c(bandNames, "200k+")
#
# par(mfrow=c(4,4))
# par(mar=c(0,0,0,0))
# # some x are not counted...
# for(j in 1:16) {
# hist(cumIncome[,j], breaks=seq(0,1,length.out=20),axes=FALSE,
# main="",xlab="",ylab="", ylim=c(0,600), col="grey")
# box()
# text(x=0.33,y=500, label=paste("Income band:", bandNames[j]))
# }
# # 3.8 Aggregation
# csaSet <- unique(geodf$csa)
# popTotal <- rep(0, length(csaSet))
# names(popTotal) <- csaSet
#
# for (j in 1:nrow(geodf)) {
# index <- match(geodf$csa[j], csaSet)
# popTotal[index] <- popTotal[index] + geodf$population[j]
# }
# popTotal
#
# csaSet <- unique(geodf$csa)
# wahTotal <- rep(0, length(csaSet))
# wahTotal/ popTotal
# apply(meansOfCommute[1:10,-1],MARGIN=1,FUN=sum) # margin~rows
# apply(meansOfCommute,2,sum)
################ EXERCISE 26 ################
table(round(iris$Sepal.Length),iris$Species)
################ EXERCISE 27 ################
#table(round(iris$Sepal.Length,0.5),iris$Species)
table(round(iris$Sepal.Length*2)/2, iris$Species)
################ EXERCISE 28 ################
hist(iris$Sepal.Length,breaks = 30)
################ EXERCISE 29 ################
hist(iris$Sepal.Length,breaks = seq(1,8,by=0.5),main="The Iris Dataset",xlab = "Sepal Length",ylab = "Frequency")
################ EXERCISE 30 #################
manseq=seq(1,8,by=0.5)
par(mfrow=c(1,3))
hist(iris$Sepal.Length[iris$Species=="setosa"],breaks =manseq,main = "setosa",xlab = "Sepal Length",ylab = "Freq.")
hist(iris$Sepal.Length[iris$Species=="versicolor"],breaks =manseq,main = "versicolor",xlab = "Sepal Length",ylab = "Freq.")
hist(iris$Sepal.Length[iris$Species=="virginica"],breaks =manseq,main = "virginica",xlab = "Sepal Length",ylab = "Freq.")
################ EXERCISE 31 #################
quantile(iris$Petal.Length, prob=(0:10)/10)
################ EXERCISE 32 #################
lowPetalLength <- quantile(iris$Petal.Length,probs = 0.7) # everything below 0.7
table(iris$Species, lowPetalLength > iris$Petal.Length)
# setosa and versicolor have both a petal length with is higher than 70% of the data, virginica is almost always below
################ EXERCISE 33 #################
breakPoints <- quantile(iris$Petal.Length, prob=seq(0,1,length.out=11),
names=FALSE)
bin <- cut(iris$Petal.Length, breakPoints,labels=FALSE, include.lowest=TRUE)
table(bin, iris$Species)
################ EXERCISE 34 #################
breakPoints <- quantile(iris$Petal.Length, prob=seq(0,1,length.out=11),
names=FALSE)
length.bin <- cut(iris$Petal.Length, breakPoints,labels=FALSE, include.lowest=TRUE)
petal.area <- iris$Petal.Length * iris$Petal.Width
breakPoints <- quantile(petal.area, prob=seq(0,1,length.out=11),
names=FALSE)
area.bin <- cut(petal.area, breakPoints,labels=FALSE, include.lowest=TRUE)
table(area.bin, length.bin)
################ EXERCISE 35 #################
ans.v<-rep(0,3)
ans.v[1]<-quantile(iris$Petal.Length[iris$Species=="setosa"],probs = 0.5)
ans.v[2]<-quantile(iris$Petal.Length[iris$Species=="versicolor"],probs = 0.5)
ans.v[3]<-quantile(iris$Petal.Length[iris$Species=="virginica"],probs = 0.5)
names(ans.v)<-c("setosa","versicolor","virginica")
################ EXERCISE 36 #################
no.species <- length(unique(iris$Species))
ans.v<-rep(0,length=no.species)
spec<- unique(iris$Species)
for(i in 1:no.species){
ans.v[i] <- quantile(iris$Petal.Length[iris$Species == spec[i]],
probs=0.5)#quantile(iris$Petal.Length[iris$Species == spec[i]],probs = 0.5)
}
names(ans.v)<-c("setosa","versicolor","virginica")
################ EXERCISE 37 #################
# wow.
tapply(iris$Petal.Length, iris$Species, quantile, probs=0.5)
# 38. As in a previous question, write a function which asks the user for a state
# abbreviation and returns the state name. However, this time, put the question
# in a for loop so the user can decode three straight state abbreviations.
for(i in 1:3){
abbr <- readline('Enter a state abbreviation.')
print(state.name[tolower(state.abb)==tolower(abbr)])
}
# 39. The command break immediately exits a for loop; it is often used inside
# of an if statement. Redo the previous question, but break out of the loop
# when a non-matching abbreviation is given. You can increase the number of
# iterations to something large (say, 100), as a user can always get out of the
# function by giving a non-abbreviation.
for(i in 1:100){
abbr <- readline('Enter a state abbreviation.')
if(length(state.name[tolower(state.abb)==tolower(abbr)])>0){
print(state.name[tolower(state.abb)==tolower(abbr)])
}else{
print("exit loop.")
break
}
}
# 40. Now, reverse the process so that the function returns when an abbreviation is
# found but asks again if it is not.
f.ex40 <- function(){
abbr <- ""
while(length(state.name[tolower(state.abb)==tolower(abbr)])==0){
abbr <- readline('Enter a correct state abbreviation.')
}
# or return (state.name[tolower(state.abb)==tolower(abbr)])
print(state.name[tolower(state.abb)==tolower(abbr)])
}
#f.ex40()
# 41. Using a for loop, print the sum 1 + 1/2 + 1/3 + 1/4 + · · · + 1/n for all n
# equal to 1 through 100.
ans<-0
for(i in 1:100){
print(sum(1 / (1:i)))
}
# 42. Now calculate the sum for all 100 values of n using a single function call.
cumsum(1/(1:100))
# 43. Ask the user for their year of birth and print out the age they turned for every
# year between then and now.
year.of.birth <- as.numeric(readline("enter year of birth"))
year.of.birth:2016-year.of.birth
# 44. The dataset InsectSprays shows the count of insects after applying one of
# six different insect repellents. Construct a two-row three-column grid of his-
# tograms, on the same scale, showing the number of insects from each spray.
# Do this using a for loop rather than coding each plot by hand.
par(mfrow=c(2,3))
no.sprays<-length(unique(InsectSprays$spray))
for(i in 1:no.sprays){
hist(InsectSprays$count,main = sprays[i],xlab="count",breaks = seq(0,30,by=5))
}
# 45. Repeat the same two by three plot, but now remove the margins, axes, and
# labels. Replace these by adding the spray identifier (a single letter) to the plot
# with the text command.
par(mfrow=c(2,3))
par(mar=c(0,0,0,0))
no.sprays<-length(unique(InsectSprays$spray))
for(i in 1:no.sprays){
hist(InsectSprays$count,xlab="",ylab="",axes = FALSE,main = "",breaks = seq(0,30,by=5))
text(25,30,label=sprays[i])
}
# 46. Calculate the median insect count for each spray.
quantile(InsectSprays$count,probs = 0.5)
no.sprays<-length(unique(InsectSprays$spray))
sprays<-unique(InsectSprays$spray)
for(i in 1:no.sprays){
# here i get 1:6 instead of A:F other than with the paste function
cat("Spray",sprays[i],":",quantile(InsectSprays$count[InsectSprays$spray==sprays[i]],probs = 0.5),"\n",sep=" " )
}
# 47. Using the WorldPhones dataset, calculate the total number of phones used
# in each year using a for loop.
years<-length(rownames(WorldPhones))
total.numbers.year<- rep(0,years)
for(i in 1:years){
total.numbers.year[i]<-sum(WorldPhones[i,])
}
# 48. Calculate the total number of phones used in each year using a single apply function.
# first col
apply(WorldPhones,1,sum)
# 49. Calculate the percentage of phones that were in Europe over the years in
# question.
total.phones<-apply(WorldPhones,1,sum)
(WorldPhones[,2]/total.phones)*100
# 50. Convert the entire WorldPhones matrix to percentages; in other words, each
# row should sum to 100.
round((WorldPhones/total.phones)*100,2)
|
eac77b4a32935d98fb6aebea0d3683f6e8fb3212 | 4173d9651667a117f636256ab22deb6d7793fbf4 | /epinow_extract.R | 1b75a530f84639cc76d96f7f604c46fe15114244 | [] | no_license | NicholasTanYuZhe/covid-19-epi | 9db61e425451b1ec12f5ca75e48d4f14168ee322 | 8067ab9ef794db457e9a30fe13db6444d94fd59e | refs/heads/main | 2023-07-09T14:26:07.667976 | 2021-08-02T14:09:29 | 2021-08-02T14:09:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,343 | r | epinow_extract.R | #rm(list=ls())
#load packages
library(EpiNow2)
library(tidyverse)
#pull data set of clusters
clusters <- read_csv("P:/COVID-19/R0 Malaysia/data/cluster data/clusters_dashboard.csv",
col_types = cols(no = col_number(), bangau = col_number(),
benteng = col_number(), benteng_pk = col_number(),
date = col_date(format = "%d/%m/%Y"),
dti = col_number(), import = col_number(),
kepayan = col_number(), laut = col_number(),
meru = col_number(), penjara_jawi = col_number(),
penjara_reman = col_number(), penjara_sandakan = col_number(),
penjara_tapah = col_number(), pts_tawau = col_number(),
pulau = col_number(), rumah_merah = col_number(),
sivagangga = col_number(), sungai = col_number(),
tabligh = col_number(), tawar = col_number(),
tembok = col_number(), gk_tawau=col_number(), matambai=col_number(),
jalan_harapan=col_number(), bakti=col_number(), tembok_gajah=col_number(),
kolam_air=col_number(), hala_mutiara=col_number(), pompod=col_number(),
pagar_siput=col_number(), pagar_bentong=col_number(), tembok_mempaga=col_number(),
telok_mas=col_number(), tropika=col_number(), tembok_kemus=col_number(),
tembok_nenas=col_number(), teratai=col_number(),sungai_jelok=col_number(),
tembok_renggam=col_number(), tembok_sgudang=col_number(), tembok_taiping=col_number(),
bukit_besi=col_number(), pengkalan_chepa=col_number(), tembok_bendera=col_number(),
jln_awang=col_number(), dti_persiaran_wawasan=col_number(), dti_machap_umboo=col_number(),
tembok_muhasabah=col_number(), imigresen_semuja=col_number(), dti_juru=col_number(),
dti_jln_duta=col_number(), bukit_chagar=col_number(), sri_aman=col_number(),
padang_hijau=col_number(), choh2=col_number(), jln_salleh=col_number(),
dti_lenggeng=col_number(), dti_sandakan=col_number(), pagar_siput2=col_number(),
sri_lalang=col_number(), kg_selamat=col_number(), pagar_rimba=col_number(),
meru_utama=col_number(), dti_tanah_merah=col_number(), dti_bukit_jalil2=col_number(),
total_prison=col_number()))
#pull new data set
df <- read_csv("P:/COVID-19/R0 Malaysia/data/incidence/covid-19_my.csv",
col_types = cols(date = col_date(format = "%d/%m/%Y"),
new_cases = col_number(), new_deaths = col_number(),
total_cases = col_number(), total_deaths = col_number(),
recover = col_number(), total_recover = col_number(),
icu = col_number(), support = col_number()))
#lock down one set#NO MANIPULATION#
core_set <- df
#cut out the dates to join with cluster
new <- df[21:as.numeric(Sys.Date()-18260),c(1,3,4,7)] # TODO: if elapsed by a day +1 to the constant (contstant=18260) (+1 for each elapsed day)
new$no <- seq(1:as.numeric(Sys.Date()-18280)) # TODO: if elapsed by a day +1 to the constant (contstant=18280) (+1 for each elapsed day)
new$date <- as.Date(new$date)
#merge the two sets
df <- full_join(new, clusters, by=c("no","date"))
#create a special variable for all immigration and prison outbreaks
df$ins <- df$dti+df$tembok+df$benteng_pk+df$penjara_reman+df$penjara_jawi+df$kepayan+df$penjara_tapah+
df$rumah_merah+df$pts_tawau+df$penjara_sandakan+df$gk_tawau+df$matambai+df$jalan_harapan+df$bakti+df$sibuga+
df$tembok_gajah + df$kolam_air + df$hala_mutiara + df$pompod + df$pagar_siput + df$pagar_bentong +
df$tembok_mempaga + df$telok_mas +df$tropika + df$tembok_kemus + df$tembok_nenas +df$sungai_jelok +df$tembok_renggam +
df$tembok_sgudang + df$tembok_taiping + df$bukit_besi + df$pengkalan_chepa + df$tembok_bendera +df$jln_awang +
df$dti_persiaran_wawasan + df$dti_machap_umboo + df$tembok_muhasabah +df$imigresen_semuja + df$dti_juru +
df$ dti_jln_duta + df$bukit_chagar +df$sri_aman + df$padang_hijau + df$choh2 + df$jln_salleh + df$dti_lenggeng +
df$dti_sandakan + df$pagar_siput2 + df$sri_lalang + df$kg_selamat + df$pagar_rimba + df$meru_utama + df$dti_tanah_merah +
df$dti_bukit_jalil2 +df$total_prison
##########################################################################################
#arbitrary cut of point for visualisation of clusters with at least 150 cases or more
##########################################################################################
#join the 2 sets
df$daily <- df$new_cases-df$ins-df$import#benteng ld has begun in an institution but quickly spiralled into community as well
#pick a small sample
df1 <- df %>% filter(date>as.Date("2020-03-01")&date<as.Date("2020-03-30"))
df2 <- df %>% filter(date>as.Date("2020-03-01")&date<as.Date("2020-03-14"))
df3 <- df %>% filter(date>as.Date("2020-03-14")&date<as.Date("2020-03-30"))
##########################################################################################
#run a sample of epi now
##########################################################################################
#define parameters
reporting_delay <- list(
mean = convert_to_logmean(2, 1), mean_sd = 0.1,
sd = convert_to_logsd(2, 1), sd_sd = 0.1,
max = 10
)
generation_time <- get_generation_time(disease = "SARS-CoV-2", source = "ganyani")
incubation_period <- get_incubation_period(disease = "SARS-CoV-2", source = "lauer")
reported_cases <- data.frame(date=df$date[39:531], confirm=df$daily[39:531])
head(reported_cases)
#run for the first batch of cases
estimates <- epinow(reported_cases = reported_cases,
generation_time = generation_time,
delays = delay_opts(incubation_period, reporting_delay),
rt = rt_opts(prior = list(mean = 2, sd = 0.2)),
stan = stan_opts(cores = 4))
plot(estimates)
#extracct from estimates
rt <- summary(estimates, type = "parameters", params = "R")
infections <- summary(estimates, output = "estimated_reported_cases")
#extrract as csv
write.csv(rt, "rt_epinow.csv")
write.csv(infections, "infections_epinow.csv")
##########################################################################################
#test for last 30 days
##########################################################################################
#define parameters
reporting_delay <- list(
mean = convert_to_logmean(2, 1), mean_sd = 0.1,
sd = convert_to_logsd(2, 1), sd_sd = 0.1,
max = 10
)
generation_time <- get_generation_time(disease = "SARS-CoV-2", source = "ganyani")
incubation_period <- get_incubation_period(disease = "SARS-CoV-2", source = "lauer")
reported_cases <- data.frame(date=df$date[500:531], confirm=df$daily[500:531])
head(reported_cases)
#run for the first batch of cases
estimates2 <- epinow(reported_cases = reported_cases,
generation_time = generation_time,
delays = delay_opts(incubation_period, reporting_delay),
rt = rt_opts(prior = list(mean = 2, sd = 0.2)),
stan = stan_opts(cores = 4))
plot(estimates2)
#extracct from estimates
rt2 <- summary(estimates2, type = "parameters", params = "R")
infections2 <- summary(estimates2, output = "estimated_reported_cases")
#extrract as csv
write.csv(rt2, "rt2_epinow.csv")
write.csv(infections2, "infections2_epinow.csv")
##########################################################################################
#test for last 100 days
##########################################################################################
#define parameters
reporting_delay <- list(
mean = convert_to_logmean(2, 1), mean_sd = 0.1,
sd = convert_to_logsd(2, 1), sd_sd = 0.1,
max = 10
)
generation_time <- get_generation_time(disease = "SARS-CoV-2", source = "ganyani")
incubation_period <- get_incubation_period(disease = "SARS-CoV-2", source = "lauer")
reported_cases <- data.frame(date=df$date[430:531], confirm=df$daily[430:531])
head(reported_cases)
#run for the first batch of cases
estimates3 <- epinow(reported_cases = reported_cases,
generation_time = generation_time,
delays = delay_opts(incubation_period, reporting_delay),
rt = rt_opts(prior = list(mean = 2, sd = 0.2)),
stan = stan_opts(cores = 4))
plot(estimates3)
#extracct from estimates
rt3 <- summary(estimates3, type = "parameters", params = "R")
infections3 <- summary(estimates3, output = "estimated_reported_cases")
#extrract as csv
write.csv(rt3, "rt3_epinow.csv")
write.csv(infections3, "infections3_epinow.csv")
|
7976d67b6eb50f1e18b70f5676f3825b11fc38a2 | fdafa8162e1a9aeab71f4343226877307148400e | /proj2_plot3.R | 4569f026a57a9d207b1cb4b5cd4780132ccf3ff3 | [] | no_license | JDMaughan/ExDataPlotting2 | a1c6e13298d7f280bc96c893d0e118e729fe73f6 | 4980893527928b40e152d51980c31cffb5850aeb | refs/heads/master | 2018-01-08T04:05:42.752154 | 2015-12-27T05:57:31 | 2015-12-27T05:57:31 | 48,306,995 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,785 | r | proj2_plot3.R | ## Exploratory Data Analysis Course Project 2, plot 3
##
library(dplyr)
library(ggplot2)
NEIFile <- "./Data/summarySCC_PM25.rds"
SCCFile <- "./Data/Source_Classification_Code.rds"
## If either NEI File or SCC File aren't present, download and unzip the original data
if (!file.exists(NEIFile) || !file.exists(SCCFile)) {
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip",
destfile = "./Data/exdata-data-NEI_data.zip",
mode = "wb")
unzip("./Data/exdata-data-NEI_data.zip", overwrite = TRUE, exdir = ".")
}
## Read the two files if they haven't been read into the environment
if (!nrow(NEI) == 6497651){
NEI <- readRDS(NEIFile)
}
if (!nrow(SCC) == 11717){
SCC <- readRDS(SCCFile)
}
## Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad) variable,
## which of these four sources have seen decreases in emissions from 1999–2008 for Baltimore City?
## Which have seen increases in emissions from 1999–2008?
## Use the ggplot2 plotting system to make a plot answer this question.
BaltimoreCity <- filter(NEI, fips == "24510")
balEmissionByType <- group_by(BaltimoreCity, type, year) %>%
summarize(AnnualTotalForType = sum(Emissions))
qplot(balEmissionByType$year, balEmissionByType$AnnualTotalForType, data=balEmissionByType,
facets = . ~ type,
geom = c("point", "smooth"), method = "lm", geom_smooth(fill=NA),
margins = TRUE,
## ylim = c(0, 2500),
xlab = "Year",
ylab = "PM2.5 Emissions (in tons)",
main = "Baltimore City Emissions by Type\n")
## Copy graphic device to a .png file
dev.copy(png, file = "P2_plot3.png", width = 800, height = 600)
dev.off()
|
b54e253b74b6fc46d9a6f590de6f6fe37bc1d8b3 | 499a284ec622dae5eadd417cee0bc9f25d859bee | /man/proposicoes_info.Rd | 43cca78af26e0e29dbad3043b73bdc8a12870505 | [
"MIT"
] | permissive | pedrodrocha/camaradeputadosapi | 84dba2943c8518218f8b01ce32befe148e15e08e | 0d460bfb607aa8175b6d91a7551f0f4cd6a421e2 | refs/heads/master | 2023-02-10T23:19:18.586110 | 2021-01-06T11:35:24 | 2021-01-06T11:35:24 | 324,643,824 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 873 | rd | proposicoes_info.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/proposicoes.R
\name{proposicoes_info}
\alias{proposicoes_info}
\title{Get information about a proposition of Brazilian House of Representatives}
\usage{
proposicoes_info(id)
}
\arguments{
\item{id}{A proposition unique identifier}
}
\value{
A tibble with information about a proposition
}
\description{
Get detailed information about a proposition presented at the Brazilian House of representatives
}
\examples{
a <- proposicoes_info(id = 19175)
}
\seealso{
Other proposicoes:
\code{\link{proposicoes_autores}()},
\code{\link{proposicoes_historico}()},
\code{\link{proposicoes_id}()},
\code{\link{proposicoes_referencias}()},
\code{\link{proposicoes_relacionadas}()},
\code{\link{proposicoes_temas}()},
\code{\link{proposicoes_votacoes}()},
\code{\link{proposicoes}()}
}
\concept{proposicoes}
|
b7aac64ddbb843f503dfe3221b6606754e60ab7c | 71e3b3ef7987639d35ff4a5e18be1e1038de3ce4 | /src/project/src/2014March/march2014DataCleaning.R | 0ec0154193f4185a70d335277af9617368afc6dd | [] | no_license | Edpearsonjr/howba | 046aa6bf619a9bf884d5b000d0f792b2afa1c99d | 0a472b74a0682fe2481388dd75a83437432dd364 | refs/heads/master | 2020-03-21T22:49:45.237914 | 2016-04-22T06:58:44 | 2016-04-22T06:58:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,801 | r | march2014DataCleaning.R | ###########################################################
# Loading the libraries
###########################################################
library("dplyr") # This helps in manipulating the data
library("data.table") # This helps to read the files faster
library("lubridate") # This helps in manipulating the dates and times
#########################################################
# Pre amble - if you want to remove the environment
# variables and other things
#########################################################
rm(list=ls())
#########################################################
#loading the data into a tbl_df
#########################################################
dataMarchYellowFile <- "~/Abhinav/howba/app/src/project/data/2014-03/yellow_tripdata_2014-03.csv"
dataMarchGreenFile <- "~/Abhinav/howba/app/src/project/data/2014-03/green_tripdata_2014-03.csv"
dataMarchYellow <- tbl_df(fread(dataMarchYellowFile))
dataMarchGreen <- tbl_df(fread(dataMarchGreenFile))
colnames(dataMarchGreen) <- c("vendor_id", "pickup_time", "dropoff_time", "store_and_forward_flag", "rate_code_id", "pickup_longitude", "pickup_latitude",
"dropoff_longitude", "dropoff_latitude", "passenger_count", "trip_distance", "fare_amount", "surcharge", "mta_tax", "tip_amount",
"toll_amount", "ehail_fee", "total_amount", "payment_type", "trip_type", "dummy1", "dummy2")
colnames(dataMarchYellow) <- c("vendor_id", "pickup_time", "dropoff_time", "passenger_count", "trip_distance", "pickup_longitude", "pickup_latitude",
"rate_code_id", "store_and_forward_flag", "dropoff_longitude", "dropoff_latitude", "payment_type", "fare_amount", "surcharge",
"mta_tax","tip_amount", "toll_amount", "total_amount")
#########################################################
# Pre processing the data to include
# Combine the yellow and green cab data
#########################################################
# The following are the column names in the final data frame
# vendor_id, pickup_time, dropoff_time, store_and_forward_flag, rate_code_id, pickup_longitude, pickup_latitude,
# dropoff_longitude, dropoff_latitude, passenger_count, trip_distance, fare_amount, surcharge, mta_tax, tip_amount
# toll_amount, total_amount, payment_type(cash or credit card and others),
# trip_type(this is for the green cab only), cab_type(yellow, green), year, month, medallion, hack_license
normalisedDataGreen <- dataMarchGreen %>%
select(-c(ehail_fee, dummy1, dummy2)) %>%
mutate(vendor_id=replace(vendor_id,vendor_id==1, "CMT")) %>%
mutate(vendor_id=replace(vendor_id,vendor_id==2, "VTS")) %>%
mutate(payment_type=replace(payment_type, payment_type==1, "CRD")) %>%
mutate(payment_type=replace(payment_type, payment_type==2, "CSH")) %>%
mutate(payment_type=replace(payment_type, payment_type==3, "NOC")) %>%
mutate(payment_type=replace(payment_type, payment_type==4, "DIS")) %>%
mutate(payment_type=replace(payment_type, payment_type==5, "UNK")) %>%
mutate(cab_type="green", year=2014, month=3, "medallion"=NA, hack_license=NA) %>%
mutate(pickup_time=ymd_hms(pickup_time, tz="EST")) %>%
mutate(dropoff_time=ymd_hms(dropoff_time, tz="EST")) %>%
mutate(store_and_forward_flag=as.factor(store_and_forward_flag)) %>%
mutate(rate_code_id=as.factor(rate_code_id)) %>%
mutate(payment_type=as.factor(payment_type)) %>%
mutate(trip_type=as.factor(trip_type)) %>%
mutate(cab_type=as.factor(cab_type)) %>%
mutate(medallion=as.factor(medallion)) %>%
mutate(hack_license=as.factor(hack_license)) %>%
select(c(vendor_id, pickup_time, dropoff_time, store_and_forward_flag, rate_code_id, pickup_longitude, pickup_latitude, dropoff_longitude,
dropoff_latitude, passenger_count, trip_distance, fare_amount, surcharge, mta_tax, tip_amount, toll_amount, total_amount, payment_type,
trip_type, cab_type, year, month, medallion, hack_license))
normalisedDataYellow <- dataMarchYellow %>%
mutate(pickup_time=ymd_hms(pickup_time, tz="EST"), dropoff_time=ymd_hms(dropoff_time, tz="EST")) %>%
mutate(rate_code_id=as.factor(rate_code_id)) %>%
mutate(store_and_forward_flag=as.factor(store_and_forward_flag)) %>%
mutate(payment_type=as.factor(payment_type)) %>%
mutate(cab_type="yellow", year=2014, month=3, "medallion"=NA, hack_license=NA, trip_type=NA) %>%
mutate(cab_type=as.factor(cab_type)) %>%
mutate(medallion=as.factor(medallion)) %>%
mutate(hack_license=as.factor(hack_license)) %>%
mutate(trip_type=as.factor(trip_type)) %>%
select(c(vendor_id, pickup_time, dropoff_time, store_and_forward_flag, rate_code_id, pickup_longitude, pickup_latitude, dropoff_longitude,
dropoff_latitude, passenger_count, trip_distance, fare_amount, surcharge, mta_tax, tip_amount, toll_amount, total_amount, payment_type,
trip_type, cab_type, year, month, medallion, hack_license))
normalisedMarch <- rbind(normalisedDataYellow, normalisedDataGreen)
normalisedMarch <- normalisedMarch %>%
filter(payment_type== "CRD") %>%
select(-c(store_and_forward_flag, mta_tax, payment_type)) %>%
mutate(vendor_id = as.factor(vendor_id)) %>%
mutate(weekday = factor(weekdays(pickup_time)),
duration = as.numeric(difftime(dropoff_time, pickup_time, units = c("mins"))), # trip duration in decimal minutes
ratio_tip_total = (tip_amount / total_amount)*100, # A) possible measure for tips
ratio_tip_distance = tip_amount / trip_distance, # B) possible measure for tips
ratio_tip_duration = tip_amount / duration)
save(normalisedMarch , file = "normalisedMarch2015.RData") |
898869a6780508bf7d5b6ae6b2add22d74054b1e | 62cfdb440c9f81b63514c9e545add414dc4d5f63 | /R/qat_save_lim_rule_static_2d.R | bc6effa5559b47740596fe3e95b55f5f3824e196 | [] | no_license | cran/qat | 7155052a40947f6e45ba216e8fd64a9da2926be4 | 92975a7e642997eac7b514210423eba2e099680c | refs/heads/master | 2020-04-15T16:53:45.041112 | 2016-07-24T01:26:59 | 2016-07-24T01:26:59 | 17,698,828 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,025 | r | qat_save_lim_rule_static_2d.R | qat_save_lim_rule_static_2d <-
function(resultlist_part, baseunit="") {
## functionality: save lim-static-rule
## author: André Düsterhus
## date: 22.10.2012
## version: A0.2
## input: resultlist part from qat_analyse_lim_rule_static_2d, optional: baseunit
## output: savelist
method <- "lim_static"
meanings <- list(flagvector = "Original value exceeds: 1 maxmimum value, -1 minimum value, 0 no value")
longname <- list(flagvector = "Flagvector of a LIM-static Test")
fillvalue <- -999
unit <- list(flagvector = "unitless")
dimension <- list(dim=list(mes_vec1 = NaN, mes_vec2 = NaN))
parameter <- list(min_value=resultlist_part$result$min_value, max_value=resultlist_part$result$max_value)
picnames <- list(firstpic = "lim_static")
content <- list(flagvector=resultlist_part$result$flagvector)
savelist <- list(method = method, meanings = meanings, longname = longname, fillvalue = fillvalue, unit = unit, dimension = dimension, parameter = parameter, picnames=picnames, content = content)
return(savelist)
}
|
8229aaae2f568ae44876ecb4ee1387c621047ada | 0565096ff2788b5ab9ad78e331e447ef089937e8 | /assignments/el3220_2.R | 7edb2dd9433d06fa32b310e0873607953ba0e146 | [] | no_license | lobodemonte/real-estate-data-analytics | 9f55fff39d7f90171d3a1828921122bb92729cf0 | 4f5c9c25dbc5a2f76edcc2627e18d0dfad6f2163 | refs/heads/master | 2022-09-17T07:10:39.952435 | 2020-06-01T21:30:38 | 2020-06-01T21:30:38 | 267,337,106 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,251 | r | el3220_2.R | # In-Class Assignment 2:
# Submit to NYU Classes as: [NetID]_2.R.
library(quantmod)
library(stargazer)
library(fredr)
library(tidyr)
library(PASWR2)
library(MASS)
library(repmis)
library(latex2exp)
library(dplyr)
library(ggplot2)
library(tidyverse)
library(knitr)
library(fredr) # Another library to import data from FRED
fredr_set_key('30e6ecb242a73869e11cb35f6aa3afc3')
# 1. Re-run all code above to ensure that it works.
# 2. Do CAPM for IBM stock for a period of your choice and test the standard hypotheses from CAPM.
getSymbols(c("IBM","^NYA"), from="2000-01-01", to="2019-12-31")
IBM = IBM$IBM.Adjusted
NYX = NYA$NYA.Adjusted
data = merge(as.zoo(IBM), as.zoo(NYX))
names = c("IBM", "NYX")
colnames(data) = names
data.level = as.xts(data) ## Levels
data.returns = diff(log(data.level), lag=1) ## Log returns
data.returns = na.omit(data.returns) ## Dump missing values
plot.ts(data.returns$NYX, data.returns$IBM, pch=16, col="darkgreen",
main="CAPM Data 2000-2019", xlab="Returns of NYX", ylab="Returns of IBM",
xlim=c(-.1,.1), ylim=c(-.1,.1) )
abline(lm(data.returns$IBM ~ data.returns$NYX), col="red")
grid(lw = 2)
capm.ols = lm(data.returns$IBM ~ data.returns$NYX)
stargazer(capm.ols, type="text", title="CAPM Results", single.row=TRUE,
ci=TRUE, ci.level=0.99)
# Our NULL HYPOTHESIS: A == 0 && B == 1, ALT HYPOTHESIS: A != 0 || B != 1
# At 99% CI, Alpha==0 (the constant) is within CI, but Beta==1 (the slope) is not within CI
# We REJECT the NULL HYPOTHESIS
# THUS, IBM seems to have no excess returns, and is less sensitive than the NYSE
# 3. Import data on short-term and long-term U.S. Treasurys for a period of your choice.
# Examine the null hypothesis of no relationship between short-term and long-term U.S. Treasurys.
threemonth = drop_na(fredr(series_id = "DGS3MO", observation_start = as.Date("2000-01-01"), observation_end = as.Date("2020-01-01") ))
tenyear = drop_na(fredr(series_id = "DGS10", observation_start = as.Date("2000-01-01"), observation_end = as.Date("2020-01-01") ))
tenyear$change = Delt(tenyear$value, type=c("arithmetic"))
threemonth$change = Delt(threemonth$value, type=c("arithmetic"))
tenyear$change[is.na(tenyear$change)] = 0
threemonth$change[is.na(threemonth$change)] = 0
tenyear$change[is.infinite(tenyear$change)] = 0
threemonth$change[is.infinite(threemonth$change)] = 0
plot(threemonth$change, tenyear$change,
xlab=TeX("3 Month Yield Changes"), ylab=TeX("10 Year Yields"),
main="Daily Interest Rate Changes 2000-2019", pch=16, col='darkblue')
grid(lw = 2)
abline(lm(tenyear$change ~ threemonth$change), col='red')
rates.ols = lm(tenyear$change ~ threemonth$change)
stargazer(rates.ols, type="text", title="Interest Rate Results", single.row=TRUE,
ci=TRUE, ci.level=0.99)
# Our NULL HYPOTHESIS: 100 BPS CHANGE IN 3MO NOTE RATES LEADS TO >= 50 BPS CHANGE IN 10YR BOND RATES
# At 99% CI, HOWEVER, WE DO NOT FIND OUR DESIRED OUTCOME IN 10 YR BOND RATES
# We REJECT the NULL HYPOTHESIS
# THUS, there seems to be no relation between the Treasury 3 month note rates and 10 yr bond rates BETWEEN 2000-2019
# 4. Replicate the rent and vacancy results.
# Write a paragraph about whether rents drive vacancy or vacancy drives rents.
# Consider the DiPasquale-Wheaton 4-Quadrant Model when doing so.
# Are there ways that we could develop an experiment to examine the relationship?
# In my opinion, it is vacancies that determine rents, not the other way around (at least in this limited scenario)
# If you assume a fixed amount of homogeneous Real Estate, and a number of potential tenants that varies depending on economic performance
# In an economic downturn, a property owner seeking max asset utilization (and profit), they would have to fight for a limited number of tenants
#, an there's a limited number of times you can redo the Lobby before you start lowereing your rents to steal tenants from others.
# In an economic upturn, there's more people looking to start businesses, more Tenants but the same fixed available Square Footage,
#, increasing the demand (and the rent) for a limited resource.
# For an experiment it would be helpful to model the relationship between Tenants and Property Owners on a computer simulation.
# In this simulation you can control for things like location premiums, asset depreciation.
# You can then model each tenants as having a rent budget (of quantity normally distributed), and the goal to seek shelter for the lowest price
# Owners would be models with the goals to maximize their income from rents
# On each run of the simulation you can change the amount of sq footage and/or the number of tenants (and their purchasing power)
#, and look at the mean rents at the end of the simulation(s)
data = read.csv('nyc.csv')
names = c("geo", "date", "rent", "vacancy", "cap_rate", "gross_rent")
colnames(data) = names
plot(vacancy, rent, col="blue", main="NYC Office Rent Index v. Vacancy", pch=16,
xlab="Vacancy", ylab="Rent Index (2008 $)", xlim=c(0, 20), ylim=c(20, 80))
abline(lm('rent ~ vacancy'), col="red")
grid(lw=2)
model = lm('rent ~ vacancy')
stargazer(model, type="text", title="What Causes What?", single.row=TRUE,
ci=TRUE, ci.level=0.95)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.