blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5cb4c3cc31a57b9f0f94eee9f22e566719c68ef9 | 0d12e397fbc299874e75434323e14d5d8cad12fa | /analysis/03_meff_pl/quick_look_at_meffs.R | 96b8ddf0af6a982f7b5f5ce80597ef213cad13d1 | [
"MIT",
"CC0-1.0"
] | permissive | mrc-ide/global-lmic-meffs | 398e0b9be264b3b81be1adb4d2c3a0571a538858 | f3914a986fc0c649fbf011dcd9cd31ad4dce34e7 | refs/heads/master | 2023-05-28T18:24:32.650279 | 2020-07-23T18:11:32 | 2020-07-23T18:11:32 | 272,553,030 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,132 | r | quick_look_at_meffs.R | # package set up
library(globallmicmeffs)
date_0 <- "2020-07-04"
reports <- reports_4parameter_day(date_0)
get <- vector("list", nrow(reports))
brt <- get_brt_predictions(date_0)
for(i in seq_along(get)) {
message(i)
out <- readRDS(file.path(here::here(),
"analysis/data/raw_data/server_results/archive/lmic_reports_google_pmcmc/",
(reports$id[i]),"grid_out.rds"))
df <- out$replicate_parameters
if(nrow(brt[[reports$country[i]]])>0) {
df$Rt0 <- vapply(seq_along(df$start_date),
function(x){
out$pmcmc_results$inputs$Rt_func(
brt[[reports$country[i]]]$C[match(df$start_date[x], brt[[reports$country[i]]]$date)],
df$R0[x],
Meff = df$Meff[x]
)}, numeric(1))
df$Rt_now <- vapply(seq_along(df$start_date),
function(x){
out$pmcmc_results$inputs$Rt_func(
brt[[reports$country[i]]]$C[match(as.Date(date_0), brt[[reports$country[i]]]$date)],
df$R0[x],
Meff = df$Meff[x]
)}, numeric(1))
} else {
df$Rt0 <- df$R0
df$Rt_06_16 <- df$R0
}
df$iso3c <- reports$country[[i]]
df$pld <- out$interventions$date_Meff_change
get[[i]] <- df
}
for_will <- do.call(rbind, get)
library(tidyverse)
wb <- get_brt_world_bank_classification(date_0)
for_will$continent <- countrycode::countrycode(for_will$iso3c, "iso3c", "continent")
for_will$income <- wb$income_group[match(for_will$iso3c,wb$country_code)]
for_will$income <- factor(as.character(for_will$income),levels = c( "Low income", "Lower middle income", "Upper middle income", "High income"))
ecdc <- get_ecdc(date_0)
ecdc$iso3c <- ecdc$countryterritoryCode
iso_d_10 <- ecdc %>% group_by(iso3c) %>% summarise(sum_d = sum(deaths)) %>% filter(sum_d >= 100) %>% select(iso3c) %>% unlist %>% as.character()
sum_d <- ecdc %>% group_by(iso3c) %>% summarise(sum_d = sum(deaths,na.rm=TRUE))
for_will$sum_deaths <- sum_d$sum_d[match(for_will$iso3c, sum_d$iso3c)]
library(cowplot)
ratio <- for_will %>% filter(iso3c %in% iso_d_10) %>% ungroup %>% group_by(continent, iso3c, income) %>% summarise_all(mean) %>%
ggplot(aes(x=Meff_pl/Meff,y=income,fill=income)) +
ggridges::geom_density_ridges() + geom_hline(yintercept = 1)
meff <- for_will %>% filter(iso3c %in% iso_d_10) %>%ungroup %>% group_by(continent, iso3c, income) %>% summarise_all(mean) %>%
ggplot(aes(x=Meff,y=income,fill=income)) +
ggridges::geom_density_ridges() + geom_hline(yintercept = 1)
meff_pl <- for_will %>% filter(iso3c %in% iso_d_10) %>% ungroup %>% group_by(continent, iso3c, income) %>% summarise_all(mean) %>%
ggplot(aes(x=Meff_pl,y=income,fill=income)) +
ggridges::geom_density_ridges() + geom_hline(yintercept = 1)
x11()
cowplot::plot_grid(cowplot::get_legend(ratio+theme(legend.position = "top")),
cowplot::plot_grid(ratio+theme(legend.position = "none"),
meff+theme(legend.position = "none"),
meff_pl+theme(legend.position = "none"),ncol=3),
rel_heights = c(1,10),ncol=1)
## BOXES
ratio <- for_will %>% filter(iso3c %in% iso_d_10) %>%
filter(as.Date(pld) < as.Date(date_0) - 30) %>%
ungroup %>% group_by(continent, iso3c, income) %>% summarise_all(mean) %>%
ggplot(aes(x=Meff_pl/Meff,y=income,fill=income)) +
geom_boxplot(notch = TRUE) + geom_vline(xintercept = 1)
meff <- for_will %>% filter(iso3c %in% iso_d_10) %>%
filter(as.Date(pld) < as.Date(date_0) - 30) %>% ungroup %>% group_by(continent, iso3c, income) %>% summarise_all(mean) %>%
ggplot(aes(x=Meff,y=income,fill=income)) +
geom_boxplot(notch = TRUE) + geom_vline(xintercept = 3)
meff_pl <- for_will %>% filter(iso3c %in% iso_d_10) %>%
filter(as.Date(pld) < as.Date(date_0) - 30) %>% ungroup %>% group_by(continent, iso3c, income) %>% summarise_all(mean) %>%
ggplot(aes(x=Meff_pl,y=income,fill=income)) +
geom_boxplot(notch = TRUE) + geom_vline(xintercept = 3)
cowplot::plot_grid(cowplot::get_legend(ratio+theme(legend.position = "top")),
cowplot::plot_grid(ratio+theme(legend.position = "none"),
meff+theme(legend.position = "none"),
meff_pl+theme(legend.position = "none"),ncol=3),
rel_heights = c(1,10),ncol=1)
## POINTS
ratio <- for_will %>% filter(iso3c %in% iso_d_10) %>%
filter(as.Date(pld) < as.Date(date_0) - 30) %>%
ungroup %>% group_by(continent, iso3c, income) %>% summarise_all(mean) %>%
ggplot(aes(x=Meff_pl/Meff,y=income,color=continent,size=sum_deaths)) +
geom_point(notch = TRUE,position = ) + geom_vline(xintercept = 1)
meff <- for_will %>% filter(iso3c %in% iso_d_10) %>%
filter(as.Date(pld) < as.Date(date_0) - 30) %>% ungroup %>% group_by(continent, iso3c, income) %>% summarise_all(mean) %>%
ggplot(aes(x=Meff,y=income,color=continent,size=sum_deaths)) +
geom_point(notch = TRUE,width=0.2) + geom_vline(xintercept = 3)
meff_pl <- for_will %>% filter(iso3c %in% iso_d_10) %>%
filter(as.Date(pld) < as.Date(date_0) - 30) %>% ungroup %>% group_by(continent, iso3c, income) %>% summarise_all(mean) %>%
ggplot(aes(x=Meff_pl,y=income,color=continent,size=sum_deaths)) +
geom_point(notch = TRUE,width=0.2) + geom_vline(xintercept = 3)
cowplot::plot_grid(cowplot::get_legend(ratio+theme(legend.position = "top")),
cowplot::plot_grid(ratio+theme(legend.position = "none"),
meff+theme(legend.position = "none"),
meff_pl+theme(legend.position = "none"),ncol=3),
rel_heights = c(1,10),ncol=1)
for_will %>% ungroup %>% group_by(continent, iso3c, income) %>% summarise_all(mean) %>%
ggplot(aes(x=Meff_pl/Meff,y=interaction(income,continent),fill=income)) +
ggridges::geom_density_ridges() + geom_hline(yintercept = 1)
|
22806f69c44a83affb122486dfbc92e492b4afbc | fd7ea836b91280f1dc05435a57e8d61a811430d0 | /Function_cluster.R | c351bb55fd6c8ed927325a46520561a709d2b1eb | [] | no_license | dmitry100/Functions-in-R | b5af50beb8ccbbba7b3ed64de331a1d667ce2d4f | c43108950415d3a0df15c8a2ee78788fd692c49f | refs/heads/main | 2023-01-09T00:56:49.376845 | 2020-11-05T21:03:22 | 2020-11-05T21:03:22 | 301,327,827 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 256 | r | Function_cluster.R | ###Функция кластеризации df
smart_hclust<- function(test_data, cluster_number){
dist_matrix <- dist(test_data)
fit <- hclust(dist_matrix)
test_data$cluster <- as.factor(cutree(fit,cluster_number))
return(test_data)
}
|
1b5caa1e14a821b0ba893545f22eb1e36d622425 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/CytobankBridgeR/examples/fcs_files.Rd.R | 82796fa3136b677da5026f73c405a0ca01ffcb7d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 597 | r | fcs_files.Rd.R | library(CytobankBridgeR)
### Name: fcs_files.download_fcs_files_stable
### Title: FCS Files
### Aliases: fcs_files fcs_files.download_fcs_files_stable
### ** Examples
## No test:
fcs_files.download_fcs_files_stable(cyto_session, 22, fcs_files=c(1,2,3),
directory="/my/new/download/directory/")
## End(No test)
## No test:
# Authenticate via username/password
cyto_session <- authenticate(site="premium", username="cyril_cytometry", password="cytobank_rocks!")
# Authenticate via auth_token
cyto_session <- authenticate(site="premium", auth_token="my_secret_auth_token")
## End(No test)
|
919cc5614fdebc3d63082d1058a475c16539d638 | a3a622c7dd2eed2e41558d17682d17cfb3e58d24 | /R/BigSkate_forecasting_notes.R | e66e3dd143a8b199fe4dcae8d4046eb95b704eb7 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | iantaylor-NOAA/BigSkate_Doc | 92399326664d70883c144c3f14c9f65ed5ce3a61 | 4c7144a4566bd2276dcf1759e36ddce5b0cb8d7e | refs/heads/master | 2021-07-10T03:55:08.723089 | 2020-07-31T18:17:36 | 2020-07-31T18:17:36 | 179,342,625 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,071 | r | BigSkate_forecasting_notes.R | #### values from Sigma_Examples.txt from Chantel
## Category 1 (baseline s = 0.5)
## P* 0.45 0.4 0.35 0.3 0.25
## 2021 0.935 0.873 0.813 0.754 0.696
## 2022 0.930 0.864 0.801 0.740 0.679
## 2023 0.926 0.856 0.790 0.725 0.662
## 2024 0.922 0.848 0.778 0.711 0.645
## 2025 0.917 0.840 0.767 0.697 0.629
## 2026 0.913 0.832 0.756 0.684 0.613
## 2027 0.909 0.824 0.745 0.670 0.598
## 2028 0.904 0.817 0.735 0.657 0.583
## 2029 0.900 0.809 0.724 0.645 0.568
## 2030 0.896 0.801 0.714 0.632 0.554
## Category 2 (baseline s = 1.0)
## P* 0.45 0.4 0.35 0.3 0.25
## 2021 0.874 0.762 0.661 0.569 0.484
## 2022 0.865 0.747 0.642 0.547 0.460
## 2023 0.857 0.733 0.624 0.526 0.438
## 2024 0.849 0.719 0.606 0.506 0.416
## 2025 0.841 0.706 0.589 0.486 0.396
## 2026 0.833 0.693 0.572 0.467 0.376
## 2027 0.826 0.680 0.556 0.449 0.358
## 2028 0.818 0.667 0.540 0.432 0.340
## 2029 0.810 0.654 0.524 0.415 0.323
## 2030 0.803 0.642 0.510 0.399 0.307
## Category 3 (sigma = 2)
## P* 0.45 0.4 0.35 0.3 0.25
## All Years 0.778 0.602 0.463 0.350 0.260
#### for forecast file:
## # Sigma values below based on Category 2 values with P* = 0.45
## #Yr Sigma
## 2019 1.0
## 2020 1.0
## 2021 0.874
## 2022 0.865
## 2023 0.857
## 2024 0.849
## 2025 0.841
## 2026 0.833
## 2027 0.826
## 2028 0.818
## 2029 0.810
## 2030 0.803
## -9999 0
# mean landings for the years 2014:2018:
mean(bs82$catch$Obs[bs82$catch$Yr %in% 2014:2018 & bs82$catch$Fleet == 1])
#[1] 258.4
mean(bs82$catch$Obs[bs82$catch$Yr %in% 2014:2018 & bs82$catch$Fleet == 4])
#[1] 54.76
mod.fore <- bs82
# resulting fixed input catch for forecast file:
#_Yr Seas Fleet Catch(or_F)
## 2019 1 1 258.4
## 2019 1 4 54.76
## 2020 1 1 258.4
## 2020 1 4 54.76
|
0151c089195c579e6901664b71a28e8adee4bc8f | 3d258230b3d08f43930e459fbe440e29849361ea | /Project_141B/server.R | 54055396bdc57cd8615e96aacb6fb26d8ca3fa70 | [] | no_license | jhuwu/Final_Project | 7bc3f76fe894c83a8ee6b1337f6d9d687b9a9ce0 | 1dbd89493890820e438bf0ebc9bb1f7d5214ec63 | refs/heads/master | 2021-04-10T01:59:47.485628 | 2020-03-21T03:48:29 | 2020-03-21T03:48:29 | 248,901,836 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,286 | r | server.R | server <- function(input, output, session) {
##############################################################################
#The air quality data at UC davis, including longitude and latitude at first tab
url_davis <- paste("api.airvisual.com/v2/nearest_city?lat=", "38.54",
"&lon=", "-121.74",
"&key=", my_key,
sep = ""
)
#Find the air quality data
davis_data <- httpGET(url_davis)
davis_data <- fromJSON(davis_data)
whether <- as.data.frame(davis_data$data$current$weather)
pollution <- as.data.frame(davis_data$data$current$pollution)
#output the data at the first tab. Including data shown in first tab.
output$text1 <- renderText({
paste0(
"<div style='position:relative;'>",
"<img src='pic.jpg'/>",
"<div style='position:absolute; z-index:2; left:500px; top:3px;color:black;font-size:30px'>",
whether$tp, "°</div>", "<div style='position:absolute; z-index:2; left:408px; top:157px;color:black;font-size:25px'>",
whether$ws, "</div>", "</div>"
)
})
##############################################################################
#Create a reactive textbar to retrive longitute and latitude that user input
data1 <- reactive({
input$go1
lat <- isolate(input$lat)
lon <- isolate(input$lon)
#Using the longitude and latitude to find data from the API
url <- paste("api.airvisual.com/v2/nearest_city?lat=", lat,
"&lon=", lon,
"&key=", my_key,
sep = ""
)
#Using Json to read the data
data <- httpGET(url)
data <- fromJSON(data)
#A if else loop function to return city with similar longitude and latitude
#If there is not suitable city for the longitude and latitude, return "no nearest city"
if (length(data$data) != 5) {
output_data <- cbind("warning" = "no nearest city")
} else {
output_data <- cbind(
"Country" = unname(data$data$country),
"State" = unname(data$data$state),
"City" = unname(data$data$city),
as.data.frame(data$data$current$weather),
"long" = as.numeric(lon),
"lat" = as.numeric(lat),
as.data.frame(data$data$current$pollution)
)
}
#return names with actual meaning instead of variables in the API
output_data <- plyr::rename(output_data, c(
tp = "Temperature", pr = "Pressure", hu = "Humidity",aqius ="AQI(US)" ))
output_data
})
################# output 1
#Output the table shown in the tab2. containing air quality table for a certain city.
output$table <- renderTable({
# country state city ts tp pr hu
all_col_name <- colnames(data1())
show_col <- all_col_name[which(all_col_name %in%
c("Country", "State",
"City", "Temperature", "Pressure", "Humidity","AQI(US)"))]
data1()[, show_col]
})
#Output a map of the city corresponding to the latitude and longitude.
output$plot1 <- renderLeaflet({
plotdata <- data1()
if (length(plotdata) != 1) {
leaflet() %>%
addTiles() %>% # Add default OpenStreetMap map tiles
addMarkers(
lng = plotdata$long, lat = plotdata$lat,
popup = paste(
"tp", plotdata$tp, "<br>",
"pr:", plotdata$pr, "<br>",
"hu:", plotdata$hu, "<br>",
"ws:", plotdata$ws, "<br>",
"wd:", plotdata$wd
)
) %>%
setView(lng = plotdata$long, lat = plotdata$lat, zoom = 10)
}
})
##############################################################################
#
#Using the input data from tab3, search all the cities in the chosen states
get_city_data <- function(city_name) {
url_city <- paste("api.airvisual.com/v2/city?city=", city_name,
"&state=", input$select_state, "&country=USA&key=", # input$select_state
my_key,
sep = ""
)
#Find cities' names and air quality data using Json
try_get_city_data <- function(url) {
city_data <- httpGET(url)
city_data <- fromJSON(city_data)
city_data_df <- city_data$data
}
#return all the data into a table
get_city_data <- safely(try_get_city_data, NULL)(url_city)
get_city_data <- get_city_data$result
return(get_city_data)
}
#Creating a reactive select bar to choose states
download_show_data <- reactive({
if (input$select_state != "") {
#Using the input name of the statem to find air qualities for all cities in the state
url2 <- paste("api.airvisual.com/v2/cities?state=", input$select_state, # all_state_list,#input$select_state,
"&country=USA&key=", my_key,
sep = ""
)
#Extract the data with Json
all_city_of_state <- httpGET(url2)
all_city_of_state <- fromJSON(all_city_of_state)
#loop function to find and output all cities' air qualities in the state
if (all_city_of_state$status == "success") {
#unlist all the cities names
all_city_of_state <- unlist(all_city_of_state$data)
#Showing message while processing the data
showNotification("start to download data from web")
message("Dowloading City Data")
ID2 <- showNotification("Dowloading City Data", duration = NULL)
#Get all the data
get_data <- do.call(
rbind,
lapply(lapply(all_city_of_state, FUN = get_city_data),
FUN = function(x) {
cbind(
data.frame(
"city_name" = x$city,
"lon" = x$location$coordinates[1],
"lat" = x$location$coordinates[2]
),
x$current$weather,
x$current$pollution
)
}
)
)
#output all the cities' air quality in a table
data <- get_data %>%
as_tibble(.name_repair = "unique") %>%
select(-starts_with("ts"))
removeNotification(ID2)
showNotification("done !")
} else {
showNotification("Failed in get the city of state")
}
data <- plyr::rename(data, c(
#rename all the variables in the API to meaningful names
tp = "temperature", pr = "pressure", hu = "humidity",
ws = "wind speed", wd = "wind direction",
ic = "icon code", aqius = "AQI(US)", mainus = "pollutant(US)",
aqicn = "AQI(CN)", maincn = "pollutant(CN)"
))
data
}
})
#
#
#
#
observeEvent(download_show_data(), {
# showNotification("update bottom")
show_variable <- colnames(download_show_data())
show_variable <- show_variable[-which(show_variable %in% c("lon", "city_name", "lat"))]
# if (is.null(show_variable)) {stop("data is error")}
updateCheckboxGroupInput(session, "show_num_variable", choices = show_variable)
updateSelectInput(session, "show_num_variable_map", choices = show_variable)
showNotification("update bottom")
})
#
#
#Creating a reactive button for the second selectbar
show_plot_1 <- reactive({
req(input$show_num_variable)
#store all the data(including cities) for the chosen variables
download_show_data() %>%
dplyr::select(c("city_name", input$show_num_variable)) %>%
reshape2::melt(id.vars = c("city_name")) %>%
mutate(value = as.numeric(value)) %>%
#plot a histogram for all data
ggplot(aes(
x = city_name, y = value,
color = variable, group = variable, fill = variable
)) +
geom_col(position = "dodge") + # theme_ft_rc() + scale_fill_ft() + scale_color_ft() +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
})
#
#Creating a reactive button for the third selectbar
show_plot_2 <- reactive({
req(input$show_num_variable_map)
#Find all the data for the chosen variable
g <- list(
scope = "usa",
projection = list(type = "albers usa"),
showland = TRUE,
landcolor = toRGB("gray85"),
subunitwidth = 1,
countrywidth = 1,
subunitcolor = toRGB("white"),
countrycolor = toRGB("white")
)
#plot the data on a map
plot_on_map_data <- download_show_data() %>% dplyr::select(c("city_name", "lon", "lat", input$show_num_variable_map))
# reshape2::melt(id.vars = c("city_name")) %>%
colnames(plot_on_map_data) <- c("city_name", "lon", "lat", "value")
plot_geo(plot_on_map_data,
lat = ~lat, lon = ~lon,
color = ~value
) %>%
add_markers() %>%
layout(title = "data on map", geo = g)
})
#
#
######################### output 2
#Final outputs for the third tab
output$show_table <- renderTable({
download_show_data()
})
output$plot_variable <- renderPlot({
show_plot_1()
})
output$show_map <- renderPlotly({
show_plot_2()
})
}
|
c26968a14d6e6bcac7cee7ce415b59dbb634f4a0 | 97e8719e1d9488eb984922c0b7650d72456ebccc | /R/Stats_phosphate_shallow.R | 0e26cb0315d4f67acdd98d1e99ad242ec21f98fa | [] | no_license | ShunHasegawa/FACE_Lysimeter | 86a4342b0fa57247db1fdb6ad1962247c6ddc218 | 8cc1d2131a151441449fed2f34313d3c7881bcc5 | refs/heads/master | 2021-01-01T06:50:28.556022 | 2015-03-13T13:52:34 | 2015-03-13T13:52:34 | 20,640,929 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,604 | r | Stats_phosphate_shallow.R | ## ----Stat_FACE_Lys_Phosphate_S_preCO2
###########
# Pre-CO2 #
###########
range(lys$po[lys$depth == "shallow" & lys$pre])
bxplts(value = "po", data = subsetD(lys, depth == "shallow" & pre))
# remove higher outlier
boxplot(lys$po[lys$depth == "shallow" & lys$pre])
PRmOl <- subset(lys, po < 0.4)
bxplts(value = "po", data = subsetD(PRmOl, depth == "shallow" & pre))
# poiwer(1/3) seems slightly fine
# different random factor structure
m1 <- lme((po)^(1/3) ~ co2 * time, random = ~1|ring/plot, data = subsetD(lys, depth == "shallow" & pre))
m2 <- lme((po)^(1/3) ~ co2 * time, random = ~1|ring, data = subsetD(lys, depth == "shallow" & pre))
m3 <- lme((po)^(1/3) ~ co2 * time, random = ~1|id, data = subsetD(lys, depth == "shallow" & pre))
anova(m1, m2, m3)
# m3 is slightly better
# autocorrelation
atml <- atcr.cmpr(m3)
atml$models
# no need for correlation
Iml_S_pre <- atml[[1]]
# The initial model is:
Iml_S_pre$call
Anova(Iml_S_pre)
# model simplification
MdlSmpl(Iml_S_pre)
# co2:time and co2 are removed
Fml_S_pre <- MdlSmpl(Iml_S_pre)$model.reml
# The final model is:
Fml_S_pre$call
Anova(Fml_S_pre)
# model diagnosis
plot(Fml_S_pre)
qqnorm(Fml_S_pre, ~ resid(.)|id)
qqnorm(residuals.lm(Fml_S_pre))
qqline(residuals.lm(Fml_S_pre))
## ----Stat_FACE_Lys_Phosphate_S_postCO2
############
# Post-CO2 #
############
range(lys$po[lys$depth == "shallow" & lys$post])
bxplts(value = "po", ofst= 0.02, data = subsetD(lys, depth == "shallow" & post))
# remove the higher outlier
boxplot(lys$po[lys$depth == "shallow" & lys$post])
PRmOl <- subset(lys, po < 0.4)
bxplts(value = "po", ofst= 0.03, data = subsetD(PRmOl, depth == "shallow" & post))
# use box-cox
# The initial model is
Iml_S_post <- lmer((po + .03)^(-1.2323) ~ co2 * time + (1|block) + (1|ring) + (1|id),
data = subsetD(PRmOl, depth == "shallow" & post), na.action = "na.omit")
Anova(Iml_S_post)
# The final model
Fml_S_post <- stepLmer(Iml_S_post)
Anova(Fml_S_post)
AnvF_P_S_Post <- Anova(Fml_S_post, test.statistic = "F")
AnvF_P_S_Post
summary(Fml_S_post)
# model diagnosis
plot(Fml_S_post)
qqnorm(resid(Fml_S_post))
qqline(resid(Fml_S_post))
## ----Stat_FACE_Lys_Phosphate_S_preCO2_Smmry
# The initial model is:
Iml_S_pre$call
Anova(Iml_S_pre)
# The final model is :
Fml_S_pre$call
Anova(Fml_S_pre)
## ----Stat_FACE_Lys_Phosphate_S_postCO2_Smmry
# The initial model is:
Iml_S_post@call
Anova(Iml_S_post)
# The final model is :
Fml_S_post@call
# Chi
Anova(Fml_S_post)
# F
AnvF_P_S_Post
|
c9fc8c45b21dbddb3cccc639b3380132bdcd455c | 11436cc2c8745ea7ad5813d3066c5aef96ae6f28 | /man/integral.Rd | ad97f766f2a4de96177b1503c76e707e0b190f63 | [] | no_license | tankwin08/waveformlidar | 4cfe51298e8a601748bda38241d6c6372593705f | 5e689ccfb3f7ab1f2743a3a515923971e0735263 | refs/heads/master | 2021-05-16T14:06:25.194624 | 2020-09-23T02:05:20 | 2020-09-23T02:05:20 | 118,007,742 | 29 | 12 | null | null | null | null | UTF-8 | R | false | true | 2,512 | rd | integral.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integral.R
\name{integral}
\alias{integral}
\title{integral}
\usage{
integral(
y,
smooth = TRUE,
rescale = TRUE,
thres = 0.2,
width = 3,
tr = 1,
dis = 20
)
}
\arguments{
\item{y}{is the waveform intensities. If you have other information, you should delete these intensites before you run this function .}
\item{smooth}{is tell whether you want to smooth the waveform to reduce the effect of some obvious noise. Default is TRUE.}
\item{rescale}{is to determine whether you want to rescale the waveform intensity or not. Here we used the minimum intensity of each waveform to conduct rescaling.
Default is using rescaling.}
\item{thres}{is to determine if the detected peak is the real peak whose intensity should be higher than threshold*maximum intensity.
Default is 0.2.}
\item{width}{width of moving window.Default is 3, must be integer between 1 and n.This parameter ONLY work when the smooth is TRUE.}
\item{tr}{the temporal resolution of waveform.Default is 1 ns. Must be integer from 1 to n.}
\item{dis}{the distance from last echo (assumed ground) which determine the boundary between ground and vegetation.
Default is 20 ns which equals to 3 m (20*0.15*1).This means the ground part signals are from
the assumed ground location to 20 ns above or the signals of vegetation are from waveform begining to 3 m above ground.}
}
\value{
return the integral of waveform intensity for the ground, vegetation parts, the whole waveform above ground
and the integral ration betwwen vegetation and the whole waveform .
}
\description{
The function allows you to calculate the integral of intensity from ground part, vegetation part, sum of them
and ratio between vegation integral and total integral with user-defined vegetation and ground boundary.
}
\examples{
data(return)
x<-return[1,]
##if we kept everything as default, before you use this function, you need to know the
## temporal resolution of waveform, default is 1 ns.
##for one peak, it generaly not make too much sense since generally only ground was
##present in this case.
r1<-integral(x)
#if you didn't want to smooth,
r2<-integral(x,smooth=FALSE)
##you also can define the boundary between vegetation and ground by assign adjusting dis
#if we assign 15, it means vegetation starts at 2.25 (15*1*0.15) m above the last echo.
r3<-integral(x,dis=15)
# when it comes to the waveform with several peaks
xx<-return[182,]
rr1<-integral(xx)
}
|
58fd963ce12d07c382e89bafca4dd5f1af6c8543 | 1ad621175b0e0f96555bfeb861a532a33e6d8ba7 | /functions/save.raw.data.R | d7b02a17e9facd73b6a3b3a4700839d5f8440698 | [] | no_license | enpjp/VC55-weather | 7fe0ea4e51ef2d3ff17c0b8aa858b26f9ff5e297 | 3cf7216eae7c51bcf83630d4916a9f3728c2c923 | refs/heads/master | 2023-04-14T22:00:26.097020 | 2023-03-17T09:39:19 | 2023-03-17T09:39:19 | 450,628,730 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 559 | r | save.raw.data.R | #' Save data in datumTriple format
#'
#' @param data
#' @param path.to.save
#'
#' @return
#' @export save.raw.data
#'
save.raw.data <- function(data, file.to.save){
path.to.save <- fs::path("data-raw", file.to.save, ext = "rds")
triple <- data %>%
map_df(as.character) %>% # Convert to character
pivot_longer(!datumEntity, # datumEntity as key
names_to = "datumAttribute", # Attribute name
values_to = "datumValue" # Save value
) %>% drop_na() # Drop NAs
saveRDS(triple,path.to.save)
} |
4ae80b2a89895430f711d81ef7f76daaefddbdbb | da4e3b5d4b0279bc1f6ff6381e19d9091e760f58 | /man/summary.morphyPtr.Rd | ea8c40f681a92df8b437475f0995c3a8b2366c44 | [] | no_license | ms609/inapplicable | e12df7a629b16fbb5ffaef6790377d8e2df87a68 | f749ef53df198c9d2d954bded8987e10fffdd3f1 | refs/heads/master | 2021-01-10T06:48:09.364877 | 2018-01-24T08:51:38 | 2018-01-24T08:51:38 | 54,025,855 | 2 | 1 | null | 2017-11-04T07:04:09 | 2016-03-16T11:22:34 | C | UTF-8 | R | false | true | 513 | rd | summary.morphyPtr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/morphy_objects.R
\name{summary.morphyPtr}
\alias{summary.morphyPtr}
\title{Details the attributes of a morphy object}
\usage{
\method{summary}{morphyPtr}(object, ...)
}
\arguments{
\item{object}{A Morphy object}
\item{\dots}{any other parameters...}
}
\value{
A list detailing the number of taxa, internal nodes, and characters and their weights.
}
\description{
Details the attributes of a morphy object
}
\author{
Martin R. Smith
}
|
e4690d0c24056fecbecba465bcec4a056596af4f | d2abf6cf847c276d43f0cfaf70c417e8e126f302 | /run_analysis.R | 76078611c2d794d3a412acb2a89199952b7d6de3 | [] | no_license | cahille/gettingAndCleaningDataCourseProject | 4b9cade77921ce34091dc67baabbe778a48b8f8a | 5d46db04c5801f778c189408a8d378f9691f468d | refs/heads/master | 2020-04-05T23:19:59.153051 | 2015-03-21T07:41:52 | 2015-03-21T07:41:52 | 32,623,520 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,046 | r | run_analysis.R | library(dplyr)
library(reshape2)
# slurp in the train data
xtrain <- read.table("train/X_train.txt", header=FALSE, sep="", stringsAsFactors=FALSE)
subjectTrain <- read.table("train/subject_train.txt", header=FALSE, sep="", stringsAsFactors=FALSE)
ytrain <- read.table("train/y_train.txt", header=FALSE, sep="", stringsAsFactors=FALSE)
# slurp in the test data
xtest = read.table("test/X_test.txt", header=FALSE, sep="", stringsAsFactors=FALSE)
subjectTest <- read.table("test/subject_test.txt", header=FALSE, sep="", stringsAsFactors=FALSE)
ytest = read.table("test/y_test.txt", header=FALSE, sep="", stringsAsFactors=FALSE)
# slurp in the features
features <- read.table("features.txt", header=FALSE, sep="", stringsAsFactors=FALSE)
# slurp in the labels
labels <- read.table("activity_labels.txt", header=FALSE, sep="", stringsAsFactors=FALSE)
# keep the unique features
features <- unique(features$V2)
# grab the mean and std features
features <- grep("mean|std", features, ignore.case=TRUE, value=TRUE)
# filter out the freq features
features <- features[!grepl("[fF]req", features)]
# assign the features as column names
names(xtest) <- features
names(xtrain) <- features
# merge the test and train data
x <- rbind(xtest, xtrain)
y <- rbind(ytest, ytrain)
subject <- rbind(subjectTest, subjectTrain)
# name the Subject column
names(subject) <- c('Subject')
# filter down to the selected features
data <- x[,features]
# add the subject column
data$Subject <- subject$Subject
# name the Activity column
names(y) <- c('Activity')
# add the Activity column
data$Activity <- y$Activity
# lookup and apply the names
for(i in 1:length(data$Activity)) {
data$ActivityLabeled[i] <- labels$V2[data$Activity[i]]
}
# group by Subject and ActivityLabeled and calculate the means
meltData <- melt(data, id=c("Subject", "ActivityLabeled"), measure.vars=features)
tidyData <- dcast(meltData, ActivityLabeled + Subject ~ variable, mean)
# write out the tidyData table
write.table(tidyData, file="tidyData.txt", row.name=FALSE)
# enjoy!
|
bf1a4c50ebf61caec5e0b0ac51b5ba28aadad684 | fded24639877669796deaa9e9b73b200a8c564e6 | /R/simulate-binary-phenotypes.R | 2e235671a6e877b3a4be635a9925f524df741792 | [] | no_license | AdamDugan/FUNGI | 8bc50b4daf301d54a254edba18856938eae5268c | ca75d2894085f85ccbc8b0be1cf885f21b76d212 | refs/heads/master | 2020-12-04T22:08:49.755290 | 2020-03-04T19:07:15 | 2020-03-04T19:07:15 | 231,916,779 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,041 | r | simulate-binary-phenotypes.R |
#############################################
## Items to Help Troubleshoot the Function ##
#############################################
# ## Load the 1000 Genomes data
# ped <- read.table(file = "olgas-files/ped.ped",
# sep = "\t")
# info <- read.table(file = "olgas-files/info.info",
# header = TRUE)
#
# ## Randomly select a starting position for the genetic region
# seq_start <- round( runif(n = 1, min = 0, max = (ncol(ped) - 500)) )
#
# ## Calculate the ending point for the genetic region
# seq_end <- (seq_start + 500 - 1)
#
# ## Extract the indexes for the selected SNPs
# variants_all = seq_start:seq_end
#
# ## Randomly select individuals to include
# geno <- ped[ sample(x = 1:nrow(ped),
# size = 300,
# replace = TRUE), ]
#
# ## Subset to the genetic region
# geno <- geno[, variants_all ]
#
# ## Define some inputs
# genotype_matrix <- as.matrix(geno)
# variants_maf <- info$maf[ variants_all ]
# variants_index <- variants_all
# proportions_true <- c(0.10, 0.10, 0.10)
# effects_range_true <- data.frame(min = c(0.005, 0.005, 0.005),
# max = c(0.025, 0.025, 0.025))
# effects_range_null <- c(0.000, 0.005)
# corr_matrix <- matrix(c(1, 0, 0,
# 0, 1, 0,
# 0, 0, 1),
# nrow = 3, ncol = 3, byrow = TRUE)
# dichotomize_probabilistically <- TRUE
#
# ## Remove R objects
# rm(seq_start, seq_end, variants_all, geno)
########################################
## A Function to Simulate Binary Data ##
########################################
simulate_binary_phenotypes <- function(genotype_matrix,
variants_maf,
variants_index,
proportions_true = c(0.10, 0.10, 0.10),
effects_range_true = data.frame(min = c(0.005, 0.005, 0.005),
max = c(0.025, 0.025, 0.025)),
effects_range_null = c(0.000, 0.005),
corr_matrix = matrix(c(1, 0, 0,
0, 1, 0,
0, 0, 1),
nrow = 3, ncol = 3, byrow = TRUE),
dichotomize_probabilistically = TRUE){
library(MASS)
require(MASS)
########################
## Define Some Values ##
########################
## The number of variants
n_variants <- ncol(genotype_matrix)
## The simulation details
simulation_details <- data.frame(Variant_MAF = variants_maf,
Variant_Index = variants_index)
####################################
## Simulate Continuous Phenotypes ##
####################################
## How many variants are truly associated?
n_variants_true <- round(n_variants*proportions_true)
## Randomly select the truly associated variants
variants_true <- apply(X = matrix(n_variants_true),
MARGIN = 1,
FUN = function(x){
tmp <- sample(x = 1:n_variants, size = x)
return( tmp[ order(tmp) ] ) } )
## Identify the non-causal variants
variants_null <- apply(X = variants_true,
MARGIN = 2,
FUN = function(x){
tmp <- 1:n_variants
return( tmp[ !tmp %in% x ] ) } )
## Save the details
simulation_details$Variants_Binary <- ""
for(i in 1:nrow(simulation_details)){
for(k in 1:ncol(variants_true)){
if( i %in% variants_true[,k] ){ simulation_details$Variants_Binary[i] <- paste0(simulation_details$Variants_Binary[i], "Causal", k) }
}
}
rm(i, k)
## Generate the causal effects
effects_causal <- matrix(NA, ncol = length(proportions_true), nrow = max(n_variants_true))
for(i in 1:ncol(effects_causal)){
effects_causal[, i] <- runif(n = n_variants_true[i],
min = effects_range_true$min[i],
max = effects_range_true$max[i])
}
rm(i)
## Generate the null effects
effects_null <- matrix(NA, ncol = length(proportions_true), nrow = (n_variants - min(n_variants_true)) )
for(i in 1:ncol(effects_null)){
effects_null[, i] <- runif(n = (n_variants - n_variants_true[i]),
min = effects_range_null[1],
max = effects_range_null[2])
}
rm(i)
## Combine the effects into a single matrix
effects <- matrix(NA, nrow = n_variants, ncol = length(proportions_true))
for(i in 1:ncol(effects)){
effects[ variants_true[, i], i] <- effects_causal[, i]
effects[ variants_null[, i], i] <- effects_null[, i]
}
rm(i, effects_causal, effects_null)
## Transform the proportions of variance explained
effects_transformed <- matrix(NA, ncol = ncol(effects), nrow = nrow(effects))
for(i in 1:nrow(effects)){
for(k in 1:ncol(effects)){
## Identify the major and minor allele frequencies
q <- variants_maf[i]
p <- (1 - q)
## Calculate the transformed value
effects_transformed[i, k] <- sqrt( ( effects[i, k] / (1 - effects[i, k] ) ) / (2*p*q) )
## Randomly make some effects negative
effects_transformed[i, k] <- (effects_transformed[i, k] * sample(x = c(-1, 1), size = 1))
}
}
rm(i, k, p, q)
## Generate some correlated errors
errors <- MASS::mvrnorm(n = nrow(genotype_matrix),
mu = rep(0, length(proportions_true)),
Sigma = corr_matrix)
## Make the geno object a matrix
geno_matrix <- as.matrix(genotype_matrix)
## Genreate the simulated data
X <- (geno_matrix %*% effects_transformed) + errors
## Remove R objects
rm(errors, geno_matrix)
## Combine the simualted data into a data.frame
dat = data.frame(X_Cont_1 = X[, 1],
stringsAsFactors = FALSE)
for(i in 2:ncol(X)){
dat[, paste0("X_Cont_", i) ] <- X[, i]
}
rm(i)
## Remove row.names
row.names(dat) = NULL
## Make the continuous phenotypes binary
if( dichotomize_probabilistically ){
for(k in 1:ncol(dat)){
dat[, paste0("X", k) ] <- as.numeric( dat[, k] > runif(n = nrow(dat), min = min(dat[, k]), max = max(dat[, k])) )
}
rm(k)
}
else{
for(k in 1:ncol(dat)){
tmp <- runif(n = 1, min = min(dat[, k]), max = max(dat[, k]))
dat[, paste0("X", k) ] <- as.numeric( dat[, k] > tmp )
}
rm(k, tmp)
}
## Boxplots of the data
# boxplot(dat$X_Cont_1 ~ dat$X1)
# boxplot(dat$X_Cont_2 ~ dat$X2)
# boxplot(dat$X_Cont_3 ~ dat$X3)
## Return the simulated data
return(dat)
} |
e49c5b2b8e1a9fcc8b73d78c640d565eed559354 | 409c1e62254b8926387f337850acfa8e77ff5864 | /R/gpl341 validation.R | d7db0b97d08b998e6e4fb59d05c1fa4ddef68677 | [] | no_license | mjsduncan/sf | 4772861bc8c60f602359c0f315f48d966137cc22 | fb511aad59f668bc8178811d028f3afdbd38aaf2 | refs/heads/master | 2021-01-18T21:34:03.210834 | 2016-04-03T20:12:11 | 2016-04-03T20:12:11 | 20,629,118 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,620 | r | gpl341 validation.R | ### gpl341 validation by gds
# start with jpmEset.rdata: lapply(jpmEset, exprs): 15923 probe values per sample
load("~/GitHub/stevia/data/jpmEset.rdata")
rawEsets <- jpmEset[c("stromal", "r_hippo", "oculomotor", "spinal_cord")]
rm(jpmEset)
# apply impute::impute.knn k=10 nearest neihbors from max block size of 1500 genes, > 50% missing in block => column mean; $data is result
# apply PGSEA::aggregateExprs combining current probe SYMBOL annotations with "median": 10000 gene symbols per sample
# convert RNA expression levels to log2
# turn back to expression set with associated sample binary age variable: 0 = young, 1 = old
load("~/GitHub/stevia/data/symEsets.rdata")
gpl341Esets <- symEsets[c("stromal", "r_hippo", "oculomotor", "spinal_cord")]
rm(symEsets)
sapply(gpl341Esets, function(x) dim(exprs(x)))
# stromal r_hippo oculomotor spinal_cord
# [1,] 10000 10000 10000 10000
# [2,] 3 78 9 9
library("ggplot2")
# genemeta analysis
library("GeneMeta")
load("~/GitHub/stevia/data/GMresults.rdata")
gpl341.fdr <- gpl.fdr$GPL341
gpl341.zscore <- gpl.zscore$GPL341
rm(gpl.fdr, gpl.zscore)
geneplotter::histStack(as.data.frame(gpl341.zscore[,1:4]), breaks = 100)
geneplotter::histStack(as.data.frame(gpl341.zscore[,2:4]), breaks = 100)
psych::multi.hist(gpl341.zscore[, 2:5], dcol = c("black", "red"), main = "differential z-score density\nnormal fit in red")
gpl341.z2 <- zScores(gpl341Esets, lapply(gpl341Esets, function(x) x$age))
geneplotter::histStack(as.data.frame(gpl341.z2[,1:4]), breaks = 100)
geneplotter::histStack(as.data.frame(gpl341.z2[,2:4]), breaks = 100)
psych::multi.hist(gpl341.z2[, 2:5], dcol = c("black", "red"), main = "differential z-score density\nnormal fit in red")
# r_hippo controls only
gpl341e2 <- gpl341Esets
gpl341e2$r_hippo <- gpl341e2$r_hippo[, c(1:10, 30:39)]
gpl341e2.zscore <- zScores(gpl341e2, lapply(gpl341e2, function(x) x$age))
geneplotter::histStack(as.data.frame(gpl341e2.zscore[,1:4]), breaks = 100)
geneplotter::histStack(as.data.frame(gpl341e2.zscore[,2:4]), breaks = 100)
psych::multi.hist(gpl341e2.zscore[, 2:5], dcol = c("black", "red"), main = "differential z-score density\nnormal fit in red")
# run without $stromal
gpl341e2.z2 <- zScores(gpl341e2[2:4], lapply(gpl341e2[2:4], function(x) x$age))
geneplotter::histStack(as.data.frame(gpl341e2.z2[,1:3]), breaks = 100)
psych::multi.hist(gpl341e2.z2[, 1:4], dcol = c("black", "red"), main = "differential z-score density\nnormal fit in red")
qq_plot(gpl341e2.z2[, "Qvals"], length(grep("zSco_Ex",colnames(gpl341e2.z2))), "GPL341b")
# make lattice plot comparing data sets and their combination
library(XDE)
GPL341e2.pos <- symbolsInteresting(rankingStatistic = pnorm(na.omit(gpl341e2.z2[, "zSco"])), percentile = .95)
png(file = "C:/Users/user/Documents/GitHub/XDE/GPL341e2_pairs.png", width = 1000, height = 1000)
pairs(gpl341e2.z2[GPL341e2.pos$order, 1:4], pch = GPL341e2.pos$pch, col = GPL341e2.pos$col, bg = GPL341e2.pos$bg, upper.panel = NULL, cex = GPL341e2.pos$cex, main = "GPL341b")
dev.off()
GPL341e2.neg <- symbolsInteresting(rankingStatistic = pnorm(1 - na.omit(gpl341e2.z2[, "zSco"])), percentile = .95)
png(file = "C:/Users/user/Documents/GitHub/XDE/GPL341e2_pairsN.png", width = 1000, height = 1000)
pairs(gpl341e2.z2[GPL341e2.neg$order, 1:4], pch = GPL341e2.neg$pch, col = GPL341e2.neg$col, bg = GPL341e2.neg$bg, upper.panel = NULL, cex = GPL341e2.neg$cex, main = "GPL341b")
dev.off()
# save symEsets with $r_hippo controls only
symEsets$r_hippo <- symEsets$r_hippo[, c(1:10, 30:39)]
save(symEsets, file = "~/GitHub/stevia/data/symEsets.rdata")
|
723c63d2497fdeeda1ffc8ac0848f90ac62f3cca | ac5c69316d737ff9406291a687e9ccd40ea8bd41 | /R/keepNsignif.R | c1097a2552d91c875b8f9e32030ce38201be9112 | [] | no_license | dmattek/tca-package | 6850519a77046f708ba8b82e889bc42338c7d065 | f6a059b0cabb1ac8001c472bc895325bd1376e5f | refs/heads/master | 2021-04-09T16:13:08.768966 | 2019-12-06T13:17:27 | 2019-12-06T13:17:27 | 125,763,998 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 976 | r | keepNsignif.R | #' Keep a number of significant digits in a data.table
#'
#' @param dt Data in data.table format
#' @param digits Number of significant digits to keep
#'
#' @return Returns original data table with numeric fields trimmed to n significant digits.
#' @export
#' @import data.table
#'
#' @examples
signif_dt <- function(dt, digits) {
loc.dt = copy(dt)
loc.cols <- vapply(loc.dt, is.double, FUN.VALUE = logical(1))
loc.cols = names(loc.cols[loc.cols])
loc.dt[, (loc.cols) := signif(.SD, digits), .SDcols = loc.cols]
return(loc.dt)
}
#' Keep a number of significant digits in a data.frame.
#'
#' @param df Data in data.frame format
#' @param digits Number of significant digits to keep
#'
#' @return Returns original data frame with numeric fields trimmed to n significant digits.
#' @export
#'
#' @examples
#'
signif_df <- function(df, digits) {
nums <- vapply(df, is.numeric, FUN.VALUE = logical(1))
df[,nums] <- signif(df[,nums], digits = digits)
(df)
}
|
5831263afb27494b14b4f2fb04c7a3058b14943a | 05b9658b10a9fb7d646d5414ace5fe7e8777f88d | /man/gv_met_volt2vis.Rd | 9cd77ffb18540cce7842210f76120e382035ac7a | [] | no_license | yitping/gravityr | 1dd4e07cd28cd2a76b73a365aff792e5f14a6da6 | 7c4e2ac739287ab92b2ec929fc038c606cd651f3 | refs/heads/master | 2021-01-10T21:58:58.139989 | 2014-12-11T22:01:16 | 2014-12-11T22:01:16 | 39,164,897 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 504 | rd | gv_met_volt2vis.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{gv_met_volt2vis}
\alias{gv_met_volt2vis}
\title{Compute complex visibility for a time series of voltages}
\usage{
gv_met_volt2vis(volt, N = 4, abcd = 1:4, ka = c(0, 2, 2, 0))
}
\arguments{
\item{volt}{Vector of diode voltages}
\item{N}{Number of shifts per phase estimates (must be multiple of 4)}
\item{abcd}{Initial phase shift}
\item{ka}{Phase shift calibration}
}
\description{
Compute complex visibility for a time series of voltages
}
|
2650ab629d090ae77757ed2510ba341acb263942 | 05248307f2f03099c99b0009d0cc1631cdbc50e9 | /inst/doc/recoding.R | dcbb5911a22403a804a33486ba1f381808f804ab | [] | no_license | cran/mde | 272f2d9185acc5d1b909cee97de4ef60e8c26b0a | 693a40f62ae8c96e40671dfe9cbf00038ee8d955 | refs/heads/master | 2022-02-18T07:31:12.423189 | 2022-02-10T11:10:06 | 2022-02-10T11:10:06 | 244,352,611 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,682 | r | recoding.R | ## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## -----------------------------------------------------------------------------
library(mde)
## -----------------------------------------------------------------------------
dummy_test <- data.frame(ID = c("A","B","B","A"),
values = c("n/a",NA,"Yes","No"))
# Convert n/a and no to NA
head(recode_as_na(dummy_test, value = c("n/a","No")))
## -----------------------------------------------------------------------------
another_dummy <- data.frame(ID = 1:5, Subject = 7:11,
Change = c("missing","n/a",2:4 ))
# Only change values at the column Change
head(recode_as_na(another_dummy, subset_cols = "Change", value = c("n/a","missing")))
## -----------------------------------------------------------------------------
# only change at columns that start with Solar
head(recode_as_na(airquality,value=190,pattern_type="starts_with",pattern="Solar"))
## -----------------------------------------------------------------------------
# recode at columns that start with O or S(case sensitive)
head(recode_as_na(airquality,value=c(67,118),pattern_type="starts_with",pattern="S|O"))
## -----------------------------------------------------------------------------
# use my own RegEx
head(recode_as_na(airquality,value=c(67,118),pattern_type="regex",pattern="(?i)^(s|o)"))
## -----------------------------------------------------------------------------
head(recode_as_na_if(airquality,sign="gt", percent_na=20))
## -----------------------------------------------------------------------------
partial_match <- data.frame(A=c("Hi","match_me","nope"), B=c(NA, "not_me","nah"))
recode_as_na_str(partial_match,"ends_with","ME", case_sensitive=FALSE)
## -----------------------------------------------------------------------------
head(recode_as_na_for(airquality,criteria="gt",value=25))
## -----------------------------------------------------------------------------
head(recode_as_na_for(airquality, value=40,subset_cols=c("Solar.R","Ozone"), criteria="gt"))
## -----------------------------------------------------------------------------
head(recode_na_as(airquality))
# use NaN
head(recode_na_as(airquality, value=NaN))
## -----------------------------------------------------------------------------
head(recode_na_as(airquality, value=0, subset_cols="Ozone"))
## -----------------------------------------------------------------------------
head(mde::recode_na_as(airquality, value=0, pattern_type="starts_with",pattern="Solar"))
## -----------------------------------------------------------------------------
head(column_based_recode(airquality, values_from = "Wind", values_to="Wind", pattern_type = "regex", pattern = "Solar|Ozone"))
## -----------------------------------------------------------------------------
head(custom_na_recode(airquality))
## -----------------------------------------------------------------------------
head(custom_na_recode(airquality,func="mean",across_columns=c("Solar.R","Ozone")))
## -----------------------------------------------------------------------------
# use lag for a backfill
head(custom_na_recode(airquality,func=dplyr::lead ))
## -----------------------------------------------------------------------------
some_data <- data.frame(ID=c("A1","A1","A1","A2","A2", "A2"),A=c(5,NA,0,8,3,4),B=c(10,0,0,NA,5,6),C=c(1,NA,NA,25,7,8))
head(custom_na_recode(some_data,func = "mean", grouping_cols = "ID"))
## -----------------------------------------------------------------------------
head(custom_na_recode(some_data,func = "mean", grouping_cols = "ID", across_columns = c("C", "A")))
## -----------------------------------------------------------------------------
some_data <- data.frame(ID=c("A1","A2","A3", "A4"),
A=c(5,NA,0,8), B=c(10,0,0,1),
C=c(1,NA,NA,25))
head(recode_na_if(some_data,grouping_col="ID", target_groups=c("A2","A3"),
replacement= 0))
## -----------------------------------------------------------------------------
head(drop_na_if(airquality, sign="gteq",percent_na = 24))
## -----------------------------------------------------------------------------
head(drop_na_if(airquality, percent_na = 24, keep_columns = "Ozone"))
## -----------------------------------------------------------------------------
head(drop_na_if(airquality, percent_na = 24))
## -----------------------------------------------------------------------------
grouped_drop <- structure(list(ID = c("A", "A", "B", "A", "B"),
Vals = c(4, NA, NA, NA, NA), Values = c(5, 6, 7, 8, NA)),
row.names = c(NA, -5L), class = "data.frame")
# Drop all columns for groups that meet a percent missingness of greater than or
# equal to 67
drop_na_if(grouped_drop,percent_na = 67,sign="gteq",
grouping_cols = "ID")
## -----------------------------------------------------------------------------
# Drop rows with at least two NAs
head(drop_row_if(airquality, sign="gteq", type="count" , value = 2))
## -----------------------------------------------------------------------------
# Drops 42 rows
head(drop_row_if(airquality, type="percent", value=16, sign="gteq",
as_percent=TRUE))
## -----------------------------------------------------------------------------
head(drop_na_at(airquality,pattern_type = "starts_with","O"))
## -----------------------------------------------------------------------------
test2 <- data.frame(ID= c("A","A","B","A","B"), Vals = c(4,rep(NA, 4)))
drop_all_na(test2, grouping_cols="ID")
## -----------------------------------------------------------------------------
test2 <- data.frame(ID= c("A","A","B","A","B"), Vals = rep(NA, 5))
head(drop_all_na(test2, grouping_cols = "ID"))
## -----------------------------------------------------------------------------
head(dict_recode(airquality, use_func="recode_na_as",
patterns = c("solar", "ozone"),
pattern_type="starts_with", values = c(520,42)))
## -----------------------------------------------------------------------------
head(recode_as_value(airquality, value=c(67,118),replacement=NA,
pattern_type="starts_with",pattern="S|O"))
|
659e5d16a96a2c5eba7804419854dd76f03f0ff2 | 77bcb519cb641bc6d82bd183fe2192473c4dd70d | /man/idbp.Rd | 8d90c72d0e41b3cbe304edfd48397b5938050026 | [] | no_license | Karlstefanrehn/biocro-copy | c958910fa8d6fa296c0093274e1a75bd93a5ad8b | 6b32cb8e83b9a0dc591dadede8cb755832baa3e2 | refs/heads/master | 2021-09-18T10:43:01.467245 | 2018-07-13T08:58:20 | 2018-07-13T08:58:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,264 | rd | idbp.Rd | \name{idbp}
\Rdversion{1.1}
\alias{idbp}
\title{ Initial Dry Biomass Partitioning Coefficients }
\description{ Atempts to guess good initial vales for dry biomass coefficients
that can be passed to \code{BioGro}, \code{OpBioGro}, \code{constrOpBioGro}, or \code{MCMCBioGro}.
It is very fragile.}
\usage{
idbp(data, phenoControl = list())
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{
Should have at least five columns with: ThermalT, Stem, Leaf, Root, Rhizome and Grain.
}
\item{phenoControl}{
list that supplies mainly in this case the thrmal time periods that delimit the phenological stages,
} }
\details{
This function will not accept missing values. It can be quite
fragile and it is rather inflexible in what it expects in terms of data.
}
\value{
It returns a vector of length 25 suitable for \code{BioGro}, \code{OpBioGro}, \code{constrOpBioGro}, or \code{MCMCBioGro}.
}
\author{ Fernando E. Miguez}
\note{ It is highly recommended that the results of this function are tested with \code{\link{valid_dbp}}. }
\seealso{
\code{\link{valid_dbp}}
}
\examples{
## See ?OpBioGro
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ utilities }
|
9d2a98236170f083c15288cfcc595acb1522c96d | 2e627e0abf7f01c48fddc9f7aaf46183574541df | /PBStools/man/restratify.Rd | f208ace599f9804d25fda14314b9de12af084428 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | pbs-software/pbs-tools | 30b245fd4d3fb20d67ba243bc6614dc38bc03af7 | 2110992d3b760a2995aa7ce0c36fcf938a3d2f4e | refs/heads/master | 2023-07-20T04:24:53.315152 | 2023-07-06T17:33:01 | 2023-07-06T17:33:01 | 37,491,664 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,473 | rd | restratify.Rd | \name{restratify}
\alias{restratify}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
PJS -- Restratify Survey by Depth
}
\description{
Restratify survey by using depth intervals not originally used in survey.
}
\usage{
restratify(dat, strategy, dbegin, dend, renamevar)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dat}{\code{data.frame} -- survey data object.}
\item{strategy}{\code{numeric} -- code that chooses depths to use for restratification.}
\item{dbegin}{\code{character} -- field name for beginning depth of tow.}
\item{dend}{\code{character} -- field name for ending depth of tow.}
\item{renamevar}{\code{character} -- name for new stratum field}
}
\details{
Restratification based on depths using the \code{strategy}:
\itemize{
\item \code{strategy=0} -- use the minimum of beginning and ending depths;
\item \code{strategy=1} -- use the maximum of beginning and ending depths;
\item \code{strategy=2} -- use beginning depth;
\item \code{strategy=3} -- use ending depth;
\item \code{strategy=4} -- use the mean of beginning and ending depths.
}
}
\value{
Survey data object with additional field with new depth strata.
}
\author{
\href{mailto:paul@starrfish.net}{Paul J. Starr}, Chief Groundfish Scientist\cr
Canadian Groundfish Research and Conservation Society (CGRCS), Nanaimo BC
\href{mailto:rowan.haigh@dfo-mpo.gc.ca}{Rowan Haigh}, Program Head -- Offshore Rockfish\cr
Pacific Biological Station (PBS), Fisheries & Oceans Canada (DFO), Nanaimo BC\cr
\emph{locus opus}: Institute of Ocean Sciences (IOS), Sidney BC\cr
Last modified \code{Rd: 2019-12-13}
}
\note{
PJS maintains code in statistical software called \href{https://www.stata.com/}{STATA}.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
In package \pkg{PBStools}:\cr
\code{\link[PBStools]{calcBiom}},
\code{\link[PBStools]{doSynoptic}},
\code{\link[PBStools]{getLabels}},
\code{\link[PBStools]{keepAtts}},
\code{\link[PBStools]{plotIndex}},
\code{\link[PBStools]{prepGFsurv}},
\code{\link[PBStools]{uniqtows}}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory (show via RShowDoc("KEYWORDS")):
\keyword{manip}
% \keyword{ ~kwd2 }
% Use only one keyword per line.
% For non-standard keywords, use \concept instead of \keyword:
\concept{indices}
\concept{PJS}
% Use only one concept per line.
|
be57400c93cabb6093a55f617d82ad89447014d9 | 9cc7423f4a94698df5173188b63c313a7df99b0e | /man/analyze.lavaan.Rd | 3f8501825b78efe3b1c1a786fd56b388701b912f | [
"MIT"
] | permissive | HugoNjb/psycho.R | 71a16406654b11007f0d2f84b8d36587c5c8caec | 601eef008ec463040c68bf72ac1ed8d4a8f7751f | refs/heads/master | 2020-03-27T01:24:23.389884 | 2018-07-19T13:08:53 | 2018-07-19T13:08:53 | 145,707,311 | 1 | 0 | null | 2018-08-22T12:39:27 | 2018-08-22T12:39:27 | null | UTF-8 | R | false | true | 758 | rd | analyze.lavaan.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analyze.lavaan.R
\name{analyze.lavaan}
\alias{analyze.lavaan}
\title{Analyze aov objects.}
\usage{
\method{analyze}{lavaan}(x, ...)
}
\arguments{
\item{x}{aov object.}
\item{...}{Arguments passed to or from other methods.}
}
\value{
output
}
\description{
Analyze aov objects.
}
\examples{
library(psycho)
library(lavaan)
model <- ' visual =~ x1 + x2 + x3
textual =~ x4 + x5 + x6
speed =~ x7 + x8 + x9 '
x <- lavaan::cfa(model, data=HolzingerSwineford1939)
rez <- analyze(x)
print(rez)
}
\seealso{
https://www.researchgate.net/post/Whats_the_standard_of_fit_indices_in_SEM
}
\author{
\href{https://dominiquemakowski.github.io/}{Dominique Makowski}
}
|
785f2c4f153a306d21bcf3423c64aa142db81770 | 59fcd757df0e2465dd42e7abec6cd59ada95ddb9 | /tests/run/test-visreg-lm.R | 3c7d70ebade4c46932786b007da49f5c4128dd17 | [] | no_license | cran/visreg | 8a5ba3d35997696ba59764097372a73c4de1ab58 | c37fcabf32c69264d4a4846f1bfd260d4f9d83e8 | refs/heads/master | 2021-05-24T01:15:28.693724 | 2020-06-04T19:30:02 | 2020-06-04T19:30:02 | 17,700,805 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 85 | r | test-visreg-lm.R | library(visreg)
f <- system.file('tests', 'visreg-lm.R', package='visreg')
source(f)
|
116a6e2a1440963c236c3c496bf749c681727ba8 | 608adcf47ef5c776429dfe2e555c20c0ef54547a | /man/fun.load.Rd | 8159d05d933a0531f830d79df250d0cf253ceca8 | [] | no_license | cran/widals | b722ad1e1e0938998461d8fe83e8b76437cbc031 | c431b52c0455ad4568072220838b571bacc3b6ba | refs/heads/master | 2021-05-15T01:43:27.321897 | 2019-12-07T21:20:02 | 2019-12-07T21:20:02 | 17,700,881 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,696 | rd | fun.load.Rd | \name{fun.load}
\alias{fun.load}
\alias{fun.load.hals.a}
\alias{fun.load.hals.fill}
\alias{fun.load.widals.a}
\alias{fun.load.widals.fill}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Stochastic Search Helper Functions
}
\description{
Functions that assign values and functions needed by \code{\link{MSS.snow}}
}
\usage{
fun.load.hals.a()
fun.load.hals.fill()
fun.load.widals.a()
fun.load.widals.fill()
}
%- maybe also 'usage' for other objects documented here.
\details{
Please see \code{\link{MSS.snow}} and examples.
}
\value{
Nothing. The central role of these functions is the creation of four functions required by \code{\link{MSS.snow}}: \code{FUN.MH}, \code{FUN.GP}, \code{FUN.I}, and \code{FUN.EXIT}. These four functions are assigned to the Global Environment. This \code{\link{fun.load}} suite of functions also passes needed objects (out-of-scope) to \code{\link{snowfall}} threads if the global user-made variable \code{run.parallel} is set to \code{TRUE}.
}
%\references{
%% ~put references to the literature/web site here ~
%}
%\author{
%% ~~who you are~~
%}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{MSS.snow}}
}
\examples{
### Here's an itty bitty example:
### we use stochastic search to find the minimum number in a vector
### GP isn't used here, and hence neither are p.ndx.ls nor f.d
### however, we still need to create them since MSS.snow requires their existence
\dontrun{
fun.load.simpleExample <- function() {
if( run.parallel ) {
sfExport("xx")
}
p.ndx.ls <- list( c(1) )
p.ndx.ls <<- p.ndx.ls
f.d <- list( dlog.norm )
f.d <<- f.d
FUN.MH <- function(jj, GP.mx, X) {
our.cost <- sample(xx, 1)
}
FUN.MH <<- FUN.MH
FUN.GP <- NULL
FUN.GP <<- FUN.GP
FUN.I <- function(envmh, X) {
cat( "Hello, I have found an even smaller number in xx ---> ", envmh$current.best, "\n" )
}
FUN.I <<- FUN.I
FUN.EXIT <- function(envmh, X) {
cat( "Done", "\n" )
}
FUN.EXIT <<- FUN.EXIT
}
xx <- 1:600
GP <- c(1)
run.parallel <- TRUE
sfInit(TRUE, 2)
MH.source <- fun.load.simpleExample
MH.source()
MSS.snow(MH.source, Inf, p.ndx.ls, f.d, matrix(1, nrow=28), 28, 7)
sfStop()
### Here's another itty bitty example:
### we use stochastic search to find the mean of a vector
### i.e., the argmin? of sum ( x - ? )^2
fun.load.simpleExample2 <- function() {
if( run.parallel ) {
sfExport("xx")
}
p.ndx.ls <- list( c(1) )
p.ndx.ls <<- p.ndx.ls
f.d <- list( unif.mh )
f.d <<- f.d
FUN.MH <- function(jj, GP.mx, X) {
our.cost <- sum( ( xx - GP.mx[jj, 1] )^2 )
return(our.cost)
}
FUN.MH <<- FUN.MH
FUN.GP <- NULL
FUN.GP <<- FUN.GP
FUN.I <- function(envmh, X) {
cat( "Improvement ---> ", envmh$current.best, " ---- " , envmh$GP, "\n" )
}
FUN.I <<- FUN.I
FUN.EXIT <- function(envmh, X) {
our.cost <- envmh$current.best
GP <- envmh$GP
cat( "Done", "\n" )
cat( envmh$GP, our.cost, "\n" )
}
FUN.EXIT <<- FUN.EXIT
}
##set.seed(99999)
xx <- rnorm(300, 5, 10)
GP <- c(1)
run.parallel <- TRUE
sfInit(TRUE, 2)
MH.source <- fun.load.simpleExample2
MH.source()
MSS.snow(MH.source, Inf, p.ndx.ls, f.d, matrix(1/10, nrow=140, ncol=length(GP)), 140, 14)
sfStop()
##### in fact:
mean(xx)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
2957702f818414d9316fcd9a4ae9533ef92ec567 | 1c84240eaf9e4469398e2b2347bc462684de7344 | /script.R | 9bd148dec378d1e0973cc4b637985f46699add84 | [
"CC0-1.0"
] | permissive | JaquesZanon/datradriven_cv | 4652327f0e904ff9611ec435d5feba9a4e514ce1 | 2ad4ba50a513ceb4dc3f13c3be9afc8a2fcfcb9c | refs/heads/main | 2023-03-21T14:24:54.301019 | 2021-03-15T17:30:25 | 2021-03-15T17:30:25 | 348,052,293 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,780 | r | script.R | devtools::install_github("nstrayer/datadrivencv")
devtools::install_github('mitchelloharawild/icons')
#library(googlesheets4)
#gs4_auth_configure(api_key = ".....")
#read_sheet("https://docs.google.com/spreadsheets/d/1uWsN_sXHwTmbgmNGUAy6FSFs4kEFJ_6li6SFR5UsutU/edit#gid=0")
#install.packages(googlesheets)
#library(googlesheets)
#gs_auth(new_user = TRUE)
#gs_ls()
#downloading the raw zip package
library(devtools)
install_local("C:/Users/jaque/Desktop/datadrivencv-master.zip")
library("datadrivencv")
datadrivencv::use_datadriven_cv(full_name = "My Name")
use_datadriven_cv(
full_name = "Jaques Everton Zanon",
data_location = "C:/Users/jaque/Desktop/Nova pasta/tabelas/",
source_location = getwd(),
output_dir = getwd())
use_datadriven_cv(
full_name = "Jaques Everton Zanon",
data_location = "https://docs.google.com/spreadsheets/d/1uWsN_sXHwTmbgmNGUAy6FSFs4kEFJ_6li6SFR5UsutU/edit#gid=0",
pdf_location = diretorio,
html_location = diretorio,
source_location = "https://github.com/nstrayer/cv")
use_datadriven_cv(
full_name = "Sarah Arcos",
data_location = "C:/Users/jaque/Desktop/Nova pasta/data/",
pdf_location = "https://github.com/nstrayer/cv/raw/master/strayer_cv.pdf",
html_location = "nickstrayer.me/datadrivencv/",
source_location = "https://github.com/nstrayer/datadrivencv",
which_files = "all",
output_dir = getwd(),
create_output_dir = FALSE,
use_network_logo = TRUE,
open_files = TRUE
)
use_datadriven_cv(
full_name = "Jaques E. Zanon",
data_location = "C:/Users/jaque/Desktop/Nova pasta/data/",
pdf_location = "https://github.com/nstrayer/cv/raw/master/strayer_cv.pdf",
html_location = "nickstrayer.me/datadrivencv/",
source_location = "https://github.com/nstrayer/datadrivencv",
which_files = "all",
output_dir = getwd(),
create_output_dir = FALSE,
use_network_logo = TRUE,
open_files = TRUE
)
use_csv_data_storage()
?use_csv_data_storage
temp_dir <- fs::dir_create(fs::path(tempdir(), "cv_w_csvs"))
datadrivencv::use_csv_data_storage(
folder_name = fs::path(temp_dir, "csv_data"),
create_output_dir = TRUE
)
list.files(fs::path(temp_dir, "csv_data"))
rmarkdown::render("cv.rmd",
params = list(pdf_mode = FALSE),
output_file = "cv.html")
# Knit the PDF version to temporary html location
tmp_html_cv_loc <- fs::file_temp(ext = ".html")
rmarkdown::render("cv.rmd",
params = list(pdf_mode = TRUE),
output_file = tmp_html_cv_loc)
# Convert to PDF using Pagedown
pagedown::chrome_print(input = tmp_html_cv_loc,
output = "cv.pdf")
|
829b5d47a8ee92289df70c84c6d827c0fc2d4bef | 3a8876cf29445f851168c865ace0292f9afecc02 | /man/badvalue_rm.Rd | 78590706a26835eb9b06e5436aec3481462aa9e2 | [] | no_license | sachserf/badval | 4b518379ca4e8eded5e1c085f490acc30da82e46 | 79e82b536d61451188f11cf170ea67d79f5c2714 | refs/heads/master | 2021-01-12T13:39:19.712943 | 2018-02-27T10:38:46 | 2018-02-27T10:38:46 | 70,050,829 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 865 | rd | badvalue_rm.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/badvalue_rm.R
\name{badvalue_rm}
\alias{badvalue_rm}
\title{Remove bad values from data}
\usage{
badvalue_rm(data, badstring, badpattern, badindex = "BADVAL")
}
\arguments{
\item{data}{A data frame containing a column with an index of bad values.}
\item{badstring}{Character. Specify an exact string to clean only one
single column of your data frame.}
\item{badpattern}{Character. Specify a pattern of bad values to clean
multiple (but not all) columns of your data frame.}
\item{badindex}{Character. Name of a column of the data frame that serves as an
index for bad values.}
}
\value{
data frame
}
\description{
This function will remove cells of a data frame, that have bad
values.
}
\seealso{
\code{\link{badindex_add}}, \code{\link{badindex_rm}}
}
\author{
Frederik Sachser
}
|
f0157a8870a6cb1a9e3ce7b3524327c76e9e1f74 | 7e3ce11bc22c009a399a36490ed962836d6aaff6 | /examples/DonchianChannel-intelligent_trading/intelligent_trading_donchian_qtopen_3818_month.R | 035bd2fbabc81ff10a64d954c919b44b08834f62 | [] | no_license | tedddy/Learn_R | 2340a1f73e0ab4a7e07b5aa97181bc42e7acd22f | a04cd823fb382b5457e3b043ec346b1ee5ab1724 | refs/heads/master | 2021-01-17T13:33:36.685310 | 2016-10-17T23:26:46 | 2016-10-17T23:26:46 | 25,558,722 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,884 | r | intelligent_trading_donchian_qtopen_3818_month.R | # http://intelligenttradingtech.blogspot.hk/2010/03/modified-dochian-band-trend-follower.html
# Refer to quantmod-vignette.pdf
# http://www.quantmod.com/documentation/getSymbols.yahoo.html
# troubleshooting
library(quantmod)
library(TTR)
# Set up symbol, start date, and end date.
tckr <- "3818.hk"
# start <- "2011-09-26"
# end <- "2014-10-28"
# empty "end" means end is today
# Pull tckr index data from Yahoo! Finance
# env_hk_3818 <- new.env()
# data_hk_3818 <- getSymbols(tckr, from = Sys.Date()-1095, auto.assign = FALSE)
# search()
# generate this month's quote
data_hk_3818_month <- getSymbols(tckr, from = Sys.Date()-31, auto.assign = FALSE)
data_hk_3818_month[,6]
data_hk_3818_month.cl <- data_hk_3818_month[,6]
data_hk_3818_month.H <- data_hk_3818_month[,2]
data_hk_3818_month.L <- data_hk_3818_month[,3]
# Generate DonchianChannel object
# ?DonchianChannel
dc_month <- DonchianChannel(cbind(data_hk_3818_month.H,data_hk_3818_month.L),n=10)
###################################################
# Create the long (up) and short (dn) signals
sigup <-ifelse(data_hk_3818_month.cl > dc_month[,2],1,0)
sigdn <-ifelse(data_hk_3818_month.cl < dc_month[,2],-1,0)
# Lag signals to align with days in market,
# not days signals were generated
sigup <- lag(sigup,1) # Note k=1 implies a move *forward*
sigdn <- lag(sigdn,1) # Note k=1 implies a move *forward*
# Replace missing signals with no position
# (generally just at beginning of series)
sigup[is.na(sigup)] <- 0
sigdn[is.na(sigdn)] <- 0
# Combine both signals into one vector
sig <- sigup + sigdn
# Calculate Close-to-Close returns
?ROC
ret <- ROC(data_hk_3818_month[,6])
ret[1] <- 0
# Calculate equity curves
# ?cumprod
# cumprod(1:10)
eq_up <- cumprod(1+ret*sigup)
eq_dn <- cumprod(1+ret*sigdn)
eq_all <- cumprod(1+ret*sig)
|
6521211ea13943c6583fcb7422a1ac221e4f4706 | a4b67ea46787badabc054665407cb8b90f7e2819 | /man/getDataElementsOrgunits.Rd | a7fb88e8f13ed220fbadc4657cb850bb4b23bddd | [] | permissive | vikwato/datim-validation | 2745486588b70b23ee257385d1c8230ebfb8985d | f206c43ea7710917936c1627fa0da02ba5771832 | refs/heads/master | 2020-03-31T17:53:56.344120 | 2019-10-16T14:19:02 | 2019-10-16T14:19:02 | 152,438,101 | 0 | 0 | BSD-3-Clause | 2019-10-16T14:19:09 | 2018-10-10T14:33:42 | R | UTF-8 | R | false | true | 990 | rd | getDataElementsOrgunits.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checkDataElementOrgunitValidity.R
\name{getDataElementsOrgunits}
\alias{getDataElementsOrgunits}
\title{getDataElementsOrgunits(data,organisationUnit,datasets)}
\usage{
getDataElementsOrgunits(organisationUnit = NA, datasets = NA)
}
\arguments{
\item{organisationUnit}{Organisation unit. Defaults to user organisation
unit if not supplied explicitly.}
\item{datasets}{Should be a character vector of data set UIDs. Alternatively,
if left missing, user will be promted.}
}
\value{
A named list of data frames, each consisting of two columns (des) representing
data elements and (ous) representing organisation unit UIDs
}
\description{
Returns a map of lists consisting of data elements and orgunits
for a dataset (or datasets) for a given organisationUnit
}
\examples{
\dontrun{
ds<-getCurrentMERDataSets(type="RESULTS")
de_ou_map<-getDataElementsOrgunits(organisationUnit = "f5RoebaDLMx",datasets=ds)
}
}
|
eec5b60d77e32f3a0fbd55bee8c0753098e4b4b7 | c126e18a6dd12595c986c3b57722fe692dfaa7c6 | /R/as_proj.R | ca23a3e5c37e81dff32b3c0c734d12cd1c401a0d | [
"MIT"
] | permissive | crowcanyon/localgrid | 85d420ad0b47faec17e0339c10c9d7c7dcfe6045 | cd1453b0d983062d5b756fc57049397b2b5a1ead | refs/heads/master | 2023-03-20T10:56:34.543862 | 2021-03-12T02:02:32 | 2021-03-12T02:02:32 | 346,803,761 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 576 | r | as_proj.R | #' Create CRS definition in Proj system
#'
#' This function creates a new Proj-style CRS definition from list of named
#' elements.
#'
#' @param x A list of named Proj elements
#'
#' @return A length-1 character string with the Proj-style CRS definition
#'
#' @export
#'
#' @examples
#' as_proj(
#' list(
#' proj = "omerc",
#' lat_0 = 37.385,
#' lonc = -108.51,
#' alpha = 0,
#' gamma = 0,
#' k_0 = 1,
#' x_0 = 500,
#' y_0 = 500
#' )
#' )
as_proj <-
function(x) {
x %>% {
paste0("+", names(.), "=", ., collapse = " ")
}
}
|
eda6a9a5f261666c9140727c93ad50a0e3dfc33c | e541579395d021c831e8e66d3c377d41c484d656 | /ExtremeGB_Full Code_Tariq.R | 3eaa6950dea9763b415e072a6cf46cbc1869b2de | [] | no_license | thaque2050/promotionprediction | f148e8310b8ced21cf332ffa160ee3d3c649ea91 | 089ca2c5f56ec0e44b88fae9501f47536914e9db | refs/heads/master | 2020-03-28T19:42:34.875583 | 2018-09-16T14:12:51 | 2018-09-16T14:12:51 | 149,002,030 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,729 | r | ExtremeGB_Full Code_Tariq.R | library(lattice)
library(ParamHelpers)
library(grid)
library(DMwR)
library(xgboost)
library(mlr)
library(dplyr)
library(tidyverse)
library(MlBayesOpt)
library(Matrix)
library(rBayesianOptimization)
train_data<-read.csv("train_LZdllcl.csv")
test_data<-read.csv("test_2umaH9m.csv")
#Summary of the data
summarizeColumns(train_data)
summarizeColumns(test_data)
#Description of variables
str(train_data)
str(test_data)
#Identify Missing Entires
sapply(train_data, function(x)sum(is.na(x)))
#Identify missing entries in test data
sapply(test_data, function(x)sum(is.na(x)))
#Replace missing values
train_data$previous_year_rating[is.na(train_data$previous_year_rating)]<-0
test_data$previous_year_rating[is.na(test_data$previous_year_rating)]<-0
#Convert previous year's rating, KPI's_met, and award won to categorical variable
#train_data$previous_year_rating<-factor(train_data$previous_year_rating,levels = c(1,2,3,4,5,99),labels = c(1,2,3,4,5,99),exclude = "NA")
#test_data$previous_year_rating<-factor(test_data$previous_year_rating,levels = c(1,2,3,4,5,99),labels = c(1,2,3,4,5,99),exclude = "NA")
#Correct some column names
colnames(train_data)[11]<-"KPI_Score"
colnames(train_data)[12]<-"award_won"
colnames(test_data)[11]<-"KPI_Score"
colnames(test_data)[12]<-"award_won"
#Check imbalance in data
table(train_data$is_promoted)
#Parameter Optimization for Xgboost
#Create dummary variables from the train_dataset
dummy_d<-model.matrix(~department+0,data=train_data)
dummy_r<-model.matrix(~region+0,data=train_data)
dummy_e<-model.matrix(~education+0,data=train_data)
dummy_g<-model.matrix(~gender+0,data=train_data)
dummy_rc<-model.matrix(~recruitment_channel+0,data=train_data)
new_train<-data.frame(train_data[,-c(1,2,3,4,5,6,14)],dummy_d,dummy_r,dummy_e,dummy_g,dummy_rc)
final_data<-cbind(new_train,train_data[,14])
colnames(final_data)[60]<-"is_promoted"
#Using Bayesian Optimization and creating a function
cv_folds <- KFold(final_data$is_promoted, nfolds = 5,stratified = TRUE, seed = 0)
xgb_cv_bayes <- function(eta,gamma,colsample_bytree,max_delta_step,lambda,alpha,
max_depth, min_child_weight, subsample) {
cv <- xgb.cv(params = list(booster = "gbtree",
eta = eta,
max_depth = max_depth,
min_child_weight = min_child_weight,
subsample = subsample,
colsample_bytree = colsample_bytree,
lambda = lambda,
alpha = alpha,
gamma=gamma,
max_delta_step=max_delta_step,
objective = "binary:logistic",
eval_metric = "error"),
data = train_matrix, nrounds=105,folds = cv_folds, prediction = TRUE,
showsd = TRUE,early_stopping_rounds = 5, maximize = TRUE, verbose = 0)
list(Score = cv$evaluation_log$test_error_mean[cv$best_iteration],
Pred = cv$pred)
}
OPT_Res <- BayesianOptimization(xgb_cv_bayes,
bounds = list(max_depth = c(0L,50L),
min_child_weight = c(0,50),
subsample = c(0, 1.0),
eta=c(0,1.0),
colsample_bytree = c(0,1.0),
lambda = c(0,1.0),
alpha = c(0,1.0),
gamma=c(0,50),
max_delta_step=c(0,50)),
init_grid_dt = NULL, init_points = 10, n_iter = 60,
acq = "ucb", kappa = 2.576, eps = 0.0,verbose = TRUE)
#Using MLBayesOPt Package
res0 <- xgb_cv_opt(data = final_data,
label = is_promoted,
objectfun = "binary:logistic",
evalmetric = "error",
n_folds = 5,
classes = numberOfClasses,
acq = "ucb",
init_points = 10,
n_iter = 20)
#XGBoost Model Building
X_train<-as.matrix(new_train)
Y_train<-train_data[,14]
train_matrix<-xgb.DMatrix(data=X_train, label=Y_train)
numberOfClasses <- length(unique(train_data$is_promoted))
xgb_params <- list(booster="gbtree",
objective = "binary:logistic",
# eval_metric = "error",
# eta=0.35,
subsample=0.8269)
# max_depth=4,
# alpha=0.3583948,
# lambda=0.8668652,
# gamma=34.9535110,
# min_child_weight=16.1055437,
# max_delta_step = 42.8950288,
# colsample_bytree=0.9210)
bst<-xgboost(params = xgb_params,data=X_train,label =Y_train,nrounds = 105.3588)
xgb.importance(feature_names = colnames(X_train), bst) %>% xgb.plot.importance()
xgb.plot.tree(model = bst)
#Prediction using model
#training data preparation
dummy_dt<-model.matrix(~department+0,data=test_data)
dummy_rt<-model.matrix(~region+0,data=test_data)
dummy_et<-model.matrix(~education+0,data=test_data)
dummy_gt<-model.matrix(~gender+0,data=test_data)
dummy_rct<-model.matrix(~recruitment_channel+0,data=test_data)
new_test<-data.frame(test_data[,-c(1,2,3,4,5,6)],dummy_dt,dummy_rt,dummy_et,dummy_gt,dummy_rct)
X_test<-as.matrix(new_test)
test_predict<-predict(bst,X_test)
#Write in the submission file
pred<-0
for(i in 1:length(test_predict)){
if(test_predict[i]<.5){
pred[i]=0
}
else{
pred[i]=1
}
}
submission_data<-data.frame(cbind(test_data$employee_id,pred))
colnames(submission_data)<-c("employee_id","is_promoted")
write.table(submission_data,"submission_Tariq.csv",col.names = TRUE,sep = ",",row.names = FALSE)
#F1 Score Formula
f1score_eval <- function(preds, dtrain) {
e_TP <- sum( (dtrain==1) & (preds >= 0.5) )
e_FP <- sum( (dtrain==0) & (preds >= 0.5) )
e_FN <- sum( (dtrain==1) & (preds < 0.5) )
e_TN <- sum( (dtrain==0) & (preds < 0.5) )
e_precision <- e_TP / (e_TP+e_FP)
e_recall <- e_TP / (e_TP+e_FN)
e_f1 <- 2*(e_precision*e_recall)/(e_precision+e_recall)
return(list(metric = "f1-score", value = e_f1))
}
#Check F1 Score
train_predict<-predict(bst,X_train)
dtrain<-train_data[,14]
f1score_eval(train_predict,dtrain)
|
15d69ff69f3c350547a26be2d751ca4b977d7527 | dd0d26163c4a0498de5b25e4ee57c4ce70b2676d | /man/ecr.Rd | b7004b5285d26b42421fe6fe6119d51aec15a043 | [] | no_license | jakobbossek/ecr | a1f97be9b4cb3b2538becebb38c9a5085b8464c9 | f9954f5b1374cc70776f8b7e780f906e57ca50b7 | refs/heads/master | 2020-04-04T07:26:32.216427 | 2017-06-06T11:05:27 | 2017-06-06T11:05:27 | 17,904,690 | 13 | 5 | null | 2016-09-27T10:30:10 | 2014-03-19T13:15:56 | R | UTF-8 | R | false | true | 6,903 | rd | ecr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ecr.R
\name{ecr}
\alias{ecr}
\title{Interface to \pkg{ecr} similar to the \code{\link[stats]{optim}} function.}
\usage{
ecr(obj.fun, n.dim, lower = NULL, upper = NULL, n.bits, representation,
n.population, n.offspring, n.mating.pool = floor(n.population/2),
survival.strategy = "plus", n.elite = 0L, vectorized.evaluation = FALSE,
custom.constants = list(), logger = NULL,
monitor = setupConsoleMonitor(), max.iter = 100L, max.evals = NULL,
max.time = NULL, more.args = list(), initial.population = NULL,
parent.selector = getDefaultEvolutionaryOperators(representation,
"parent.selector"),
survival.selector = getDefaultEvolutionaryOperators(representation,
"survival.selector"),
generator = getDefaultEvolutionaryOperators(representation, "generator"),
mutator = getDefaultEvolutionaryOperators(representation, "mutator"),
recombinator = getDefaultEvolutionaryOperators(representation,
"recombinator"))
}
\arguments{
\item{obj.fun}{[\code{function}]\cr
The single-objective target function. Can be any R function which takes a
single vector as input and returns a scalar value describing the vectors
fitness.}
\item{n.dim}{[\code{integer(1)}]\cr
Dimension of the decision space.}
\item{lower}{[\code{numeric}]\cr
Vector of minimal values for each parameter of the decision space in case
of float or permutation encoding.}
\item{upper}{[\code{numeric}]\cr
Vector of maximal values for each parameter of the decision space in case
of float or permutation encoding.}
\item{n.bits}{[\code{integer(1)}]\cr
Number of bits to use for binary representation.}
\item{representation}{[\code{character(1)}]\cr
Genotype representation of the parameters. Available are \dQuote{binary},
\dQuote{float}, \dQuote{permutation} and \dQuote{custom}.}
\item{n.population}{[\code{integer(1)}]\cr
Number of individuals in the population.}
\item{n.offspring}{[\code{integer(1)}]\cr
Number of individuals generated in each generation.}
\item{n.mating.pool}{[\code{integer(1)}]\cr
Number of individuals which can potentially participate in the
generation of offspring.
Default is half of the population size.}
\item{survival.strategy}{[\code{character(1)}]\cr
Determines the survival strategy used by the EA. Possible are \dQuote{plus} for
a classical (mu + lambda) strategy and \dQuote{comma} for (mu, lambda).
Default is \dQuote{plus}.}
\item{n.elite}{[\code{integer(1)}]\cr
Number of fittest individuals of the current generation that shall be copied to the
next generation without changing. Keep in mind, that the algorithm
does not care about this option if the \code{survival.strategy} is set to 'plus'.
Default is 0.}
\item{vectorized.evaluation}{[\code{logical(1L)}]\cr
Is the fitness/objective function vectorized? I.e., does the fitness function accept
a list? This allows for faster execution or parallelization by hand.
If \code{TRUE} the following destinction on the type of the objective function is made:
\describe{
\item{Is \code{smoof_function}}{If the objective function is of type \code{smoof_function} from package \pkg{smoof}
and the smoof function is vectorized, the population - which is a list internally -
is reduced to a matrix and passed to the smoof function (vectorization in smoof
is allowed for continuous functions only).}
\item{Is not a \code{smoof_function}}{In this case the individuals of
the population are passed entirely as a list to the objective function.}
}
Default is \code{FALSE}.}
\item{custom.constants}{[\code{list}]\cr
Additional constants which should be available to all generators and operators.
Defaults to empty list.}
\item{logger}{[\code{function}]\cr
Monitoring object used to log stuff.
Default is \code{NULL} which means no logging at all.
See \code{\link{setupOptPathLoggingMonitor}} for ecr's build-in logger.}
\item{monitor}{[\code{function}]\cr
Monitoring function.
Default is \code{NULL}, i.e. no monitoring.}
\item{max.iter}{[\code{integer(1)}]\cr
Maximal number of iterations. Default ist \code{100L}.}
\item{max.evals}{[\code{integer(1)}]\cr
Maximal number of iterations/generations. Default is \code{Inf}.}
\item{max.time}{[\code{integer(1)}]\cr
Time budget in seconds. Default ist \code{Inf}.}
\item{more.args}{[\code{list}]\cr
Additional arguments passed to objective function.}
\item{initial.population}{[\code{list}]\cr
List of individuals which should be placed in the initial population.
The function will stop with an error message if the number of passed individuals
is larger than \code{control$n.population}. If the number of passed individuals
is lower than \code{control$n.population}, the population will be filled up
by individuals generated by the corresponding generator.
Default is \code{NULL}, i.e., the entire population is generated by the
population generator.}
\item{parent.selector}{[\code{ecr_selector}]\cr
Selection operator which implements a procedure to copy individuals from a
given population to the mating pool, i. e., allow them to become parents.}
\item{survival.selector}{[\code{ecr_selector}]\cr
Selection operator which implements a procedurce to extract individuals from
a given set, which should survive and set up the next generation.}
\item{generator}{[\code{ecr_generator}]\cr
Generator operator of type \code{ecr_generator} for the generation of the initial
population.}
\item{mutator}{[\code{ecr_mutator}]\cr
Mutation operator of type \code{ecr_mutator}.}
\item{recombinator}{[\code{ecr_recombinator}]\cr
Recombination operator of type \code{ecr_recombinator}.}
}
\value{
[\code{\link{ecr_result}}]
}
\description{
The most flexible way to setup evolutionary algorithms with \pkg{ecr} is by
explicitely generating a task and a control object and passing both to
\code{\link{doTheEvolution}}. Although this approach is highly flexible
and very readable it requires quite a lot of code. However, in everyday
life R users frequently need to optimize a single-objective R function.
The \code{ecr} function thus provides a more R like interface for single
objective optimization similar to the interface of the \code{\link[stats]{optim}}
function.
}
\note{
This helper function is applicable for single-objective optimization based
on default encodings, i.e., binary, float and permutation, only.
If your function at hand has multiple objectives or you need special
encodings and operators you need to work with \code{\link{doTheEvolution}}
directly.
}
\examples{
fn = function(x) {
sum(x^2)
}
res = ecr(fn, n.dim = 2L, lower = c(-5, -5), upper = c(5, 5),
representation = "float", n.population = 20L, n.offspring = 10L, max.iter = 30L)
}
\seealso{
\code{\link{setupECRControl}} for building the control object,
\code{\link{makeOptimizationTask}} to define an optimization problem and
\code{\link{doTheEvolution}} for the main working horse of \pkg{ecr}.
}
\keyword{optimize}
|
2c2692d944f920afd751fef6da296639bc7264df | 933a029990944228d5e66724a4bf60cb6c62b13e | /tests/testthat/test-linreg.R | 2e0be24d423330c0e0e15126725c32f0a199905a | [] | no_license | cgrandin/tmbcompilationmwe | 237e23c27094bf5179bd30a0c81a6e3aa8d2fab2 | 8c96cc8f6ccdafcbdd873d784295ebedc34971d1 | refs/heads/master | 2022-11-19T05:48:46.887384 | 2020-07-16T09:11:33 | 2020-07-16T09:11:33 | 279,961,527 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 517 | r | test-linreg.R | context("Test linreg with TMB functionality")
test_that("Linreg works", {
j <- run_linreg()
expect_equal(format(j[1,], nsmall = 20, scientific = FALSE),
c("13.12724494489211046755",
"72.19984719690660313063",
"-0.00000019803342223771"))
j <- run_linreg(101)
expect_equal(format(j[1,], nsmall = 20, scientific = FALSE),
c(" 35.01395243288285286098",
"192.57673838085563033928",
" 0.00000054077407596467"))
}) |
dfddf97b2b8db07775883b42c872eed10ffadd44 | 1145da6e1c01508c5d59f52f285702002f92e2d8 | /Chart App/app.R | 85ab0203969c7cdb64afecdd47443ecf4bcb84e2 | [] | no_license | SSEngland/Shiny | 096bb3997aaace06ef8788e742978aa00198e5fb | 9a015ffe47a8c26fb612be5c7dfd9db1bb3f0f50 | refs/heads/master | 2020-03-22T23:39:22.185665 | 2018-07-17T14:55:08 | 2018-07-17T14:55:08 | 140,821,814 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,903 | r | app.R | #the file has to be saved as app.R in order to work, don't ask why!
#library(shiny)
#ui<-basicPage()
#server<-function(input,output){}
#shinyApp(ui=ui,server=server)
#these 4 lines above are the basic structure of shinyapp
#library(shiny)
# ui<-pageWithSidebar(
# titlePanel("Title"),
# sidebarPanel ("Sidebarpanel"),
# mainPanel ("Main Panel")
# )
#
#
# server<-function(input,output){}
#
# shinyApp(ui=ui,server=server)
#these scripts below made the apps resizable
# library(shiny)
#
# ui<-fluidPage(
# titlePanel("Title"),
# sidebarLayout(
# sidebarPanel("Sidebar Panel"),
# mainPanel ("Main Panel")
# )
# )
# server<-function(input,output){}
#
# shinyApp(ui=ui,server=server)
#this app below with slider
# library(shiny)
#
# ui<-fluidPage(
# titlePanel("Title"),
# sidebarLayout(
# sidebarPanel("Sidebar Panel",
# sliderInput(
# inputId = "bins",
# label = "Slider Label",
# min=1,
# max=30,
# value=15
# )
# ),
# mainPanel ("Main Panel")
# )
# )
# server<-function(input,output){}
#
# shinyApp(ui=ui,server=server)
#the app below creates dropdown list
# library(shiny)
#
# ui<-fluidPage(
# titlePanel("Title"),
# sidebarLayout(
# sidebarPanel("Sidebar Panel",
# selectInput(
# inputId = "list",
# label = "List Label",
# choices = c("fair","good","very good"),
# selected = "good"
# )
# ),
# mainPanel ("Main Panel")
# )
# )
# server<-function(input,output){}
#
# shinyApp(ui=ui,server=server)
#the code below gives framework for plots
# library(shiny)
#
# ui<-fluidPage(
# titlePanel("Title"),
# sidebarLayout(
# sidebarPanel("Sidebar Panel"),
# mainPanel ("Main Panel",
# tabsetPanel(
# tabPanel(title="1st Plot",plotOutput(outputId = "plot1")),
# tabPanel(title = "2nd Plot",plotOutput(outputId = "plot2"))
# )
# )
# )
# )
#
# server<-function(input,output){}
#
# shinyApp(ui=ui,server=server)
#this script below will plot 2 charts as different tabs
# library(shiny)
#
# ui<-fluidPage(
# titlePanel("Title"),
# sidebarLayout(
# sidebarPanel("Sidebar Panel"),
# mainPanel ("Main Panel",
# tabsetPanel(
# tabPanel(title="1st Plot",plotOutput(outputId = "plot1")),
# tabPanel(title = "2nd Plot",plotOutput(outputId = "plot2"))
# )
# )
# )
# )
# #all the plots are included in the function
# server<-function(input,output){
# output$plot1<-renderPlot(( #these two open brackets are necessary
# ggplot(data=diamonds,aes(x=price))+geom_histogram()
# ))
# output$plot2<-renderPlot((
# ggplot(data=diamonds,aes(x=carat))+geom_histogram()
# ))
# }
#
# shinyApp(ui=ui,server=server)
# #this will plot chart with side bar and dropdown list
# library(shiny)
#
# ui<-fluidPage(
# titlePanel("Title"),
# sidebarLayout(
# sidebarPanel("Sidebar Panel",
# sliderInput(
# inputId = 'bins',
# label = 'Slider label',
# min=1,
# max=30,
# value=15,
# ticks=FALSE
# ),
# selectInput(
# inputId = 'list',
# label="list label",
# choices=c("price","carat"),
# selected = "price"
# )),
# mainPanel ("Main Panel",
# tabsetPanel(
# tabPanel(title="1st Plot",plotOutput(outputId = "plot1")),
# tabPanel(title = "2nd Plot",plotOutput(outputId = "plot2")))
# )
# )
# )
#
# #all the plots are included in the function
# server<-function(input,output){
# output$plot1<-renderPlot(( #these two open brackets are necessary
# ggplot(data=diamonds,aes(x=price))+geom_histogram()
# ))
# output$plot2<-renderPlot((
# ggplot(data=diamonds,aes(x=carat))+geom_histogram()
# ))
# }
#
# shinyApp(ui=ui,server=server)
#this script below added slider
# library(shiny)
#
# ui<-fluidPage(
# titlePanel("Title"),
# sidebarLayout(
# sidebarPanel("Sidebar Panel",
# sliderInput(
# inputId = 'bins',
# label = 'Slider label',
# min=1,
# max=30,
# value=15,
# ticks=FALSE
# ),
# selectInput(
# inputId = 'list',
# label="list label",
# choices=c("price","carat"),
# selected = "price"
# )),
# mainPanel ("Main Panel",
# tabsetPanel(
# tabPanel(title="1st Plot",plotOutput(outputId = "plot1")),
# tabPanel(title = "2nd Plot",plotOutput(outputId = "plot2")))
# )
# )
# )
#
# #all the plots are included in the function
# server<-function(input,output){
# output$plot1<-renderPlot(( #these two open brackets are necessary
# ggplot(data=diamonds,aes(x=price))+geom_histogram()+stat_bin(bins=input$bins)
# ))
# output$plot2<-renderPlot((
# ggplot(data=diamonds,aes(x=carat))+geom_histogram()+stat_bin(bins=input$bins)
# ))
# }
#
# shinyApp(ui=ui,server=server)
# this script plot charts with dropdown
# library(shiny)
#
# ui<-fluidPage(
# titlePanel("Title"),
# sidebarLayout(
# sidebarPanel("Sidebar Panel",
# sliderInput(
# inputId = 'bins',
# label = 'Slider label',
# min=1,
# max=30,
# value=15,
# ticks=FALSE
# ),
# selectInput(
# inputId = 'list',
# label="list label",
# choices=c("price","carat"),
# selected = "price"
# )),
# mainPanel ("Main Panel",
# tabsetPanel(
# tabPanel(title="1st Plot",plotOutput(outputId = "plot1"))
# )
# )
# )
# )
#
# #all the plots are included in the function
# server<-function(input,output){
# output$plot1<-renderPlot(( #these two open brackets are necessary
# ggplot(data=diamonds,aes_string(x=input$list))+geom_histogram()+stat_bin(bins=input$bins)
# ))#need to use aes_string this time to convert the variables from choices to strings
#
# }
#
# shinyApp(ui=ui,server=server)
#plot more choices
# library(shiny)
# library(dplyr)
#
# ui<-fluidPage(
# titlePanel("Title"),
# sidebarLayout(
# sidebarPanel("Sidebar Panel",
# sliderInput(
# inputId = 'bins',
# label = 'Slider label',
# min=1,
# max=30,
# value=15,
# ticks=FALSE
# ),
# selectInput(
# inputId = 'cut',
# label="list label",
# choices=c('Fair','Good','Very Good','Premium','Ideal','Any'),
# selected = 'Fair'
# )),
# mainPanel ("Main Panel",
# tabsetPanel(
# tabPanel(title="1st Plot",plotOutput(outputId = "plot1")),
# tabPanel(title="2nd Plot",plotOutput(outputId = "plot2"))
# )
# )
# )
# )
#
# #all the plots are included in the function
# server<-function(input,output){
# output$plot1<-renderPlot(( #these two open brackets are necessary
# ggplot(data=diamonds,aes(x=price))+geom_histogram()+stat_bin(bins=input$bins)
# ))#need to use aes_string this time to convert the variables from choices to strings
# output$plot2<-renderPlot(
# {selectedcut<-
# if(input$cut=='Any'){
# c('Fair','Good','Very Good','Premium','Ideal')
# }else{
# input$cut}
# #filter diamonds data by cut
# filter(diamonds, cut%in% selectedcut) %>%
# ggplot(aes(clarity,fill=color))+geom_bar()})
# }
#
# shinyApp(ui=ui,server=server)
library(shiny)
library(dplyr)
library(ggplot2)
ui<-fluidPage(
titlePanel("Title"),
sidebarLayout(
sidebarPanel("Sidebar Panel",
sliderInput(
inputId = 'bins',
label = 'Slider label',
min=1,
max=30,
value=15,
ticks=FALSE
),
selectInput(
inputId = 'cut',
label="list label",
choices=c('Fair','Good','Very Good','Premium','Ideal','Any'),
selected = 'Fair'
)),
mainPanel ("Main Panel",
tabsetPanel(
tabPanel(title="1st Plot",plotOutput(outputId = "plot1")),
tabPanel(title="2nd Plot",plotOutput(outputId = "plot2"))
))))
#all the plots are included in the function
server<-function(input,output){
output$plot1<-renderPlot(( #these two open brackets are necessary
ggplot(data=diamonds,aes(x=price))+geom_histogram()+stat_bin(bins=input$bins)
))#need to use aes_string this time to convert the variables from choices to strings
output$plot2<-renderPlot(
{selectedcut<-
if(input$cut=='Any'){
c('Fair','Good','Very Good','Premium','Ideal')
}else{
input$cut}
#filter diamonds data by cut
filter(diamonds, cut%in% selectedcut) %>%
ggplot(aes(clarity,fill=color))+geom_bar()})
}
shinyApp(ui=ui,server=server)
#change the above with getDataset <- reactive so that you can return more choices/charts
|
1fa4045b9810790a5f26b5d9eee62a3e0c6fd3be | f557188d2bbe1f984fc9277d9e544befe65dffd2 | /Calculating Discharge.R | b7373efd6293343564e5374f4f33b77349f68d1f | [] | no_license | NehemiahDaGoat/Ecuador | 5a2b9b73a4a6263624743b2b7f7ea72f6dae7151 | 47acc1e9e8da489b4ea89b0bb03b1ba5b48e5296 | refs/heads/master | 2020-06-03T02:34:30.970601 | 2019-06-17T14:59:54 | 2019-06-17T14:59:54 | 191,398,384 | 0 | 1 | null | 2019-06-11T15:19:02 | 2019-06-11T15:19:02 | null | UTF-8 | R | false | false | 677 | r | Calculating Discharge.R | library(tidyverse)
#getwd(C:/Users/Nehemiah/Desktop)
#getwd(C:\Users\Nehemiah\Desktop)
# Set the working directory
setwd("C:/Users/Nehemiah/Documents/CarbonShed Lab/Data")
# Rename the Data
MMPast <-read.csv("C:/Users/Nehemiah/Documents/CarbonShed Lab/Data/Flow Data 6_4_19 - Marsh-McBernie Past-Injection Point")
MMPast <-read.csv("Flow Data 6_4_19 - Marsh-McBernie Past-Injection Point.csv")
# Separate and Name Columns for Area and Velocity
Area<-MMPast$Area..cm.2.
Velocity<-MMPast$Velocity..cm.s.
view(Area)
#Perform Trapezoidal Integration on Area and Velocity
install.packages("pracma")
library("pracma")
trp<-trapz(Area[1:8],Velocity[1:8])
trp
|
633ba4932ad588bfc82ee15f745b207266a3b41b | a697d073c563fb4ea027bd72833816f7ba35dbf6 | /man/cancer.Rd | 1398a96ff3b99567ba3b8148ae6c9cff81965cc7 | [] | no_license | gckc123/StatsNotebookServer | b3e68bcbe94ace0ac0e4b3d431141325acb5b2da | e8810ba64fb970d68c4eee7c74cec8879ee05428 | refs/heads/master | 2023-02-04T17:19:19.995879 | 2020-12-25T23:00:06 | 2020-12-25T23:00:06 | 289,435,432 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 995 | rd | cancer.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cancer.R
\docType{data}
\name{cancer}
\alias{cancer}
\title{A simulated dataset based on the simulated cancer dataset from UCLA Institute for
Digital Research and Education}
\format{
A data frame with 5959 rows and 12 variables:
\describe{
\item{pain}{Pain level ranged from 1 to 9}
\item{remission}{1: Remitted; 0: Not remitted}
\item{Age}{Patient age}
\item{SmokingHx}{Smoking status: former smoker/ current smoker/ non-smoker}
\item{Sex}{Patient sex: male/ female}
\item{CancerStage}{Cancer Stage: I/ II/ III/ IV}
\item{LengthofStay}{Length of hospital stay}
\item{BMI}{Body Mass Index}
\item{DID}{Doctor ID}
\item{Experience}{Doctor's years of experience}
\item{HID}{Hospital ID}
\item{TumourSize}{Tumor Size}
}
}
\source{
\url{https://stats.idre.ucla.edu/r/dae/mixed-effects-logistic-regression/}
}
\usage{
cancer
}
\description{
Lung cancer data from 5959 patients
}
\keyword{datasets}
|
9cbe95b443be509389414b14644bd0420c54f29e | 46ead79262c5d0495cfac458c5e7aba10d556169 | /final.R | 4244843a8dec4101bcd18ba54a21a8ead46305ff | [] | no_license | lwshu/shiny-price-finder | af643edd75e45ec414d5a26b482388197c90dda2 | 1223ad15be49768a5c3746b668c88bc6ef5ddf1a | refs/heads/master | 2021-01-17T21:11:54.723364 | 2016-05-31T00:19:04 | 2016-05-31T00:19:04 | 60,044,629 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 315 | r | final.R | setwd("C:/Users/lshu0/Documents/shiny")
source("priceTable.R")
str_dt<-"2015-12-09"
end_dt<-"2015-12-26"
div_no<-26
item_no<-88732
store_no<-c(1333,3976,1594,2888,2432)
df<-priceTable(str_dt,end_dt,div_no,item_no,store_no)
runApp("C:/Users/lshu0/Documents/shiny")
xxx<-printPrice(str_dt,end_dt,div_no,store_no)
|
eec1916da80c78a5eec9f470a98205a410d1412a | 4798b648072b27e08e56235b90f11a3ba359755a | /man/gtf_to_position.Rd | abca6329f52dc0b1eb86f538b0c9ddad66b5326f | [] | no_license | CharleneZ95/infercnvPlus | 44620c5a70af77d829a29916907711b835157d02 | 1cf1fb427520c5d6cfb87b177c047433fa0b9f73 | refs/heads/master | 2021-05-19T17:43:08.225022 | 2020-04-04T08:59:27 | 2020-04-04T08:59:27 | 252,051,599 | 5 | 2 | null | null | null | null | UTF-8 | R | false | true | 473 | rd | gtf_to_position.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{gtf_to_position}
\alias{gtf_to_position}
\title{Generate a genomic positions file from a GTF file}
\usage{
gtf_to_position(gtf, out_file = "genomic_positions.txt", out_path = "./")
}
\arguments{
\item{gtf}{path to input gtf file.}
\item{out_file}{filename to save genomic positions.}
\item{out_path}{output directory.}
}
\value{
Return genomic positions.
}
\description{
Args:
}
|
d0c4a35da9fc163023a53746b76f65e53631365c | 326537a42f5a3f128fbb26fda2ababa3e2de2576 | /HEQTL/mailman/RHE_cleanV.R | fc1bab429c51de8eedf784c0ebd721846b597f08 | [] | no_license | gc5k/Notes | b3f01c7c89d63de565bd968c99234ab04da51273 | 981d4d6935a446f09fb788e12161288d9727faa6 | refs/heads/master | 2022-10-01T08:42:13.174374 | 2022-09-12T03:03:58 | 2022-09-12T03:03:58 | 38,083,965 | 2 | 2 | null | 2016-08-14T07:02:59 | 2015-06-26T01:34:57 | null | UTF-8 | R | false | false | 1,446 | r | RHE_cleanV.R |
n=500 #sample size
m=10000 #marker
h2=0.3 #heritability
SM=100 #simulation
BS=30 #randomization factor
b=rnorm(m, 0, sqrt(h2/m)) #effect
BEst=matrix(0, SM, 2)
H2=matrix(0, SM, 2)
H2V=matrix(0, SM, 2)
LK=0
TIMER=array(0, 2)
I=diag(1, n,n)
fq=runif(m, 0.1, 0.5)
x=matrix(0, n, m)
for(i in 1:m) {
x[,i]=rbinom(n, 2, fq[i])
}
sx=apply(x, 2, scale)
#Randomized HE
Tstart=proc.time()
#evaluate LB
for(i in 1:SM) {
y=sx%*%b+rnorm(n, 0, sqrt(1-h2))
Lb=0
for(j in 1:BS) {
z=matrix(rnorm(n), n, 1)
x1=t(sx)%*%z
x2=sx%*%x1
Lb=Lb+(t(x2)%*%x2)[1,1]
}
LK=Lb/(BS*m^2)
m2=matrix(0, 2, 2)
m2[1,1]=LK
m2[1,2]=n
m2[2,1]=n
m2[2,2]=n
wy=t(sx)%*%y
yVec=matrix(c(sum(wy^2)/m, t(y)%*%y), 2, 1)
B2=solve(m2)%*%yVec
BEst[i,]=B2
H2[i,2]=B2[1,1]/(B2[1,1]+B2[2,1])
yyT=y%*%t(y)
t1=t(y)%*%sx
t2=t1%*%t(sx)
yyK=y%*%t2/m
t3=(yyK-yyT)
t4=t3%*%t3
t5=sum(diag(t4))
H2V[i,1]=1/(LK-n) * sqrt(2*t5+1/(LK-n)*LK*B2[1,1]^2)
}
Tend=proc.time()
TIMER[1]=Tend[3]-Tstart[3]
#old he
Tstart1=proc.time()
for(i in 1:SM) {
y=sx%*%b+rnorm(n, 0, sqrt(1-h2))
K=sx %*% t(sx)/m
yM=y%*%t(y)
yc=yM[col(yM) < row(yM)]
Kc=K[col(K) < row(K)]
md=lm(yc~Kc)
H2[i, 1]=coefficients(md)[2]
}
Tend1=proc.time()
TIMER[2]=Tend1[3]-Tstart1[3]
layout(matrix(c(1,1,2,3), byrow=T, 2, 2))
barplot(TIMER,col=c("red", "blue"))
barplot(H2[,1],beside = T, col="red")
barplot(H2[,2],beside = T, col="blue")
abline(h=h2)
|
13deb2d72e2caced1a0430a41674d944a38a8fd6 | 6a28ba69be875841ddc9e71ca6af5956110efcb2 | /Introductory_Business_Statistics_by_Alexander_Holmes_Barbara_Illowsky__Susan_Dean/CH10/EX10.8/Ex10_8.R | 3f37b55dfa8a05f6ca0f6686676c9f384eb69bac | [] | permissive | FOSSEE/R_TBC_Uploads | 1ea929010b46babb1842b3efe0ed34be0deea3c0 | 8ab94daf80307aee399c246682cb79ccf6e9c282 | refs/heads/master | 2023-04-15T04:36:13.331525 | 2023-03-15T18:39:42 | 2023-03-15T18:39:42 | 212,745,783 | 0 | 3 | MIT | 2019-10-04T06:57:33 | 2019-10-04T05:57:19 | null | UTF-8 | R | false | false | 870 | r | Ex10_8.R | #page no : 434-435
x1=61.675
x2=61.704
psd1=10.17
psd2=9.55
n1=30
n2=30
mdiff=abs(x1-x2)
sd=sqrt((psd1^2/60)+(psd2^2/60))
t_stat=((mdiff/sd))
alpha=.05
t_critical=qnorm(1-alpha)
print(paste("t critical=",round(t_critical,2)))
p_value=pnorm(t_stat)
if(t_stat>t_critical)
{
print("Reject H0")
}else
{
print("Accept H0")
}
if(alpha>p_value)
{
print("Reject H0")
}else
{
print("Accept H0")
}
dfs <- n1+n2-2
x <- seq(-3,3,0.01)
y <- dt(x,dfs)
plot(x,y,type='l',lwd=3,col='blue',xlab='x')
abline(v=0)
abline(v=round(t_stat,2),lwd=2,col='green')
polygon(c(x[x>=abs(t_critical)],abs(t_critical) ),c(y[x>=abs(t_critical)],0),col="red")
text(t_stat,0,round(t_stat,2))
text(2.0,0.1,expression(alpha))
text(2.3,0.1,alpha)
text(-2.6,0.1,expression(alpha))
text(-2.0,0.1,alpha)
#The answer may slightly vary due to rounding off values. |
514a53fd09e37c600bab7274d97800c676b69d42 | 93085075f4b9a8418e524981ef51581750494ba2 | /run_analysis.R | 21057e4a01fdd41dad5222f765e57438009813df | [] | no_license | satipatthana/TidyData | 7e8ae94629a198067969d1109aa82655c9027dfb | 8a0a1ba9eaa1b2b863ee894cc56ffc04019d802b | refs/heads/master | 2021-01-10T04:03:31.758522 | 2016-03-06T06:22:06 | 2016-03-06T06:22:06 | 53,240,472 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,920 | r | run_analysis.R | library(dplyr)
library(data.table)
# Read Activity Labels from file "activity_labels.txt" into data frame "actlabel"
actlabel<-read.table("activity_labels.txt")
# Read 561 feature vector from "features.txt" into data frame "fnames"
fnames<-read.table("features.txt")
#Store 561 variable names in a list, "flist", for use in future to name columns
flist<-as.vector(fnames$V2)
# Read experimental subject information from TEST data, and give column name "Subject"
subtest<-read.table("subject_test.txt")
names(subtest)<-"Subject"
# Read experimental activity information from TEST data, and give column name "Activity"
ytest<-read.table("y_test.txt")
names(ytest)<-"Activity"
# Read 561 feature information from TEST data, and give names for 561 columns
xtest<-read.table("X_test.txt")
colnames(xtest)<-flist
# Select column names that relate to mean() and std(). meanFreq names excluded by regex..
# ..."mean[^a-zA-Z]()|std()". This is Step 2 of the course project.
# Performing this operation first to reduce the number of columns that need to be merged..
#...with training data. By doing this operation first, only 66 mean/std columns need...
#...to be merged rather than all 561 features. Repeated operation for TRAIN data in...
#...in subsequent lines of code.
xtest<-xtest[,grep("mean[^a-zA-Z]()|std()",colnames(xtest))]
# Create data frame testdf
testdf<-cbind(subtest,ytest,xtest)
# Convert values in Acitivty column from numbers to descriptive words (e.g., WALKING, LAYING etc.)
#....by matching with Activity Labels. This is Step 3 of the course project.
# Repeat operation below for TRAIN data.
testdf$Activity<-actlabel[,2][match(testdf$Activity,actlabel[,1])]
# Next 9 lines of code repeat process for TRAIN data
subtrain<-read.table("subject_train.txt")
names(subtrain)<-"Subject"
ytrain<-read.table("y_train.txt")
names(ytrain)<-"Activity"
xtrain<-read.table("X_train.txt")
colnames(xtrain)<-flist
xtrain<-xtrain[,grep("mean[^a-zA-Z]()|std()",colnames(xtrain))]
traindf<-cbind(ystrain,yttrain,xtrain)
traindf$Activity<-actlabel[,2][match(traindf$Activity,actlabel[,1])]
# Merge TEST and TRAIN data frames and arrange them by Subject id. This is Step 1 of the course project.
# Tidy data set contained in data frame "alldat"
alldat<-rbind(testdf,traindf)
alldat<-arrange(alldat,Subject)
# Calculate means by subject and activity for each of the 66 columns.
# This accomplished by first grouping "alldat" by Subject and Activity, followed by...
#...the summarise_each operation that calculates means for each of the 66 features.
# Data frame "ans" contains 180 rows (i.e. 30 subjects times 6 activites/subject)...
#...for each of the 66 features.
sdat<-group_by(alldat,Subject,Activity)
ans<-summarise_each(sdat,funs(mean))
write.table(ans,"TidyStep5.txt",row.names=FALSE)
|
012ab881a39e9b5501b5029d736b3acff26afe4d | 0c1c6b6be35d18e024efcc6059b3b22d89ce4965 | /getData.R | 883a06e47f798abb4d4aca2b0949ddfdf74a8e00 | [] | no_license | ronrest/ExData_Plotting1 | 1bab741343866875b14f7fde054a6c4e12498588 | f1d056dfacbcf47a752f071366cdf702b18a99cc | refs/heads/master | 2021-01-17T06:42:41.431522 | 2015-03-08T21:30:14 | 2015-03-08T21:30:14 | 31,803,106 | 0 | 0 | null | 2015-03-07T06:08:25 | 2015-03-07T06:08:25 | null | UTF-8 | R | false | false | 5,020 | r | getData.R |
getDataInDateRange <- function(datafile, startDate, endDate){
# ==========================================================================
# GET DATA IN RANGE
# ==========================================================================
# Gets sub-data from the data file that falls within a specified range of
# timestamps.
#
# Assumes that the data being imported is the "Individual household
# electric power consumption Data Set" from the UC Irvine Machine Learning
# Repository, stored as a plain text file, with columns delimeted with
# semicolons ";" and missing values represented by question marks "?"
#
# This function efficiently imports the desired subset of data by ONLY
# reading the relevant lines from the source file, instead of reading the
# whole file into memory, and then filtering. It calculates the relevant
# lines by first sampling the very first line of data, and calculating how
# many minutes must elapse from this initial measurement and the start of
# the desired range. It also calculates how many minutes there are in the
# desired range. The number of minutes determines the number of rows that
# must be skipped and read in the text file.
#
# Args:
# datafile: A string representing the filepath to the data file.
# startDate: A POSIXlt date-time object. Determines the start of the date
# range we are interested in
# endDate: A POSIXlt date-time object. Determines the end of the date
# range we are interested in
#
# Returns:
# A dataframe object with only the rows containing information for the
# range of datetimes specified.
# ==========================================================================
# --------------------------------------------------------------------------
# Setup
# --------------------------------------------------------------------------
# Coerce the columns in the data to the following types.
# NOTE: coercing to "date" type for the first column does not work as
# expected using this data file, perhaps because of the date format
# used. This column will be instead be imported as a string, and a
# new datetime column will be added later that combines the
# information from the date, and the time column.
colClasses <- c("character", "character", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric")
# --------------------------------------------------------------------------
# Initial File Inspection
# --------------------------------------------------------------------------
# Retreive just the 1st row of data, to get the column names, and the
# timestamp of the first measurement
df <- read.csv(dataFile, sep=";", nrows=1)
df.names <- names(df)
firstMeasurement <- strptime(with(df, paste(Date, Time)),
format="%d/%m/%Y %H:%M:%S")
# --------------------------------------------------------------------------
# Calculate Relevant Lines Numbers
# --------------------------------------------------------------------------
# Calculation is based on the number of minutes that have elapsed since the
# first measurement
firstLineOfData = 2 # Line 2, because line 1 in the text file is column
# names, not data
firstLine = as.integer(difftime(startDate, firstMeasurement,
units = "mins") + firstLineOfData)
last_line = as.integer(difftime(endDate, startDate,
units = "mins") + firstLine)
df.numrows <- last_line + 1 - firstLine
# --------------------------------------------------------------------------
# Read The Relevant Lines From File
# --------------------------------------------------------------------------
message("Reading subset of lines from : ", datafile)
message("First Line: ", firstLine)
message("First Line: ", last_line)
message("Number of rows: ", df.numrows)
df <- read.table(dataFile, sep=";", na.strings="?",
col.names=df.names, colClasses=colClasses,
skip=firstLine-1, nrows=df.numrows)
# --------------------------------------------------------------------------
# Tidy up, and return the dataframe
# --------------------------------------------------------------------------
# Create a timestamp column
df$Timestamp = strptime(with(df, paste(Date, Time)), format="%d/%m/%Y %H:%M:%S")
return(df)
}
|
82a743622e69b0daec9cfc0bbb76298b8e081669 | 8933501a79dd57d38bbf1967d62f1b07823832bd | /F_LibreData_transformation(update).R | 54cacfa04c6dd57fff21b75387c9c076b245c270 | [] | no_license | imeunsolee/libre-forWellCheck | 44951a6c8b43c0cf13280d97fcb62eadd0c62c2b | e1975ee2b131ef0387618d33cdca7c49ff3f03a5 | refs/heads/master | 2023-08-21T14:10:30.516551 | 2021-10-22T12:01:12 | 2021-10-22T12:01:12 | 395,664,591 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,146 | r | F_LibreData_transformation(update).R |
##########################################
## LibreData_transformation
## ---------------------------------------
## input
## . inFileName : 'NAME_glucose_YYYYMMDD.xlsx'
## . FinalDate : 리브레 종료일 YYYY-MM-DD 의 형태
## . mod : 1=1기간 분석 2 = 2기간 이상 분석
## .... ver 2.0 에서는 mod = 2 에서 최대 2개 기간만 출력하도록 제한해둠 ####
## ---------------------------------------
## ver 2.0 ( 210713 )
LibreData_transformation = function( inFileName, FinalDate ) {
errCode.sub = c()
### step0 =============================================================================##
## 데이터 불러오기
lines = readLines(inFileName,encoding='UTF-8')[-c(1:2)]
data = matrix(NA, nrow=(length(lines)-1),ncol=19)
for ( i in 2:length(lines) ) {
lines.tmp = strsplit(lines[[i]],split="\"")
if( grepl('FreeStyle LibreLink',lines.tmp[[1]][1]) ) {
if( length(lines.tmp[[1]])==1 ) {
line1 = unlist(strsplit(lines.tmp[[1]],split=','))
if( length(line1)==18 ) line1[19] = ''
} else {
line1 = c()
for ( j in 1:length(lines.tmp[[1]]) ) {
if ( j==2 ) {
line1.tmp = unlist(lines.tmp[[1]][j])
} else {
line1.tmp = unlist(strsplit(lines.tmp[[1]][j],split=','))
}
line1 = c(line1,line1.tmp)
}
if ( length(line1)==14 ) next
}
data[(i-1),] = line1
} else {
addtext = ifelse(is.na(unlist(lines.tmp[[1]][1])),'',unlist(lines.tmp[[1]][1]))
line1[14] = paste(line1[14],addtext,sep=' ')
if ( length(unlist(lines.tmp))<=1 ) {
next
}
line1 = c(line1,unlist(strsplit(lines.tmp[[1]][2],split=',')))
if ( length(line1)==18 ) line1[19] = ''
data[(i-2),] = line1
}
}
colnames(data) = unlist(strsplit(lines[[1]],split=','))
data = as.data.frame(data)
data = data[!is.na(data[,1]),]
data$dateandtime = as.POSIXct(data[,3],tz='GMT')
data$date = as.Date(data$dateandtime)
data$time = format(data$dateandtime,format='%H:%M:%S')
### step1 =============================================================================##
## 분석기간 분류
subNum = 0
End.s1 = T
Datelist = as.character(unique(as.Date(data[which(data[,4]%in%c(0,1)),]$date)))
date.tmp = c(as.Date(FinalDate,'%Y-%m-%d')-13, as.Date(FinalDate,'%Y-%m-%d'))
data$sub = NA
ndays = length(Datelist[Datelist>=date.tmp[1] & Datelist<=date.tmp[2]])
while ( 1 ) {
subNum = ( subNum+1 )
## part1 ##
while ( End.s1==T ) {
if ( ndays==14 ) {
data[which(data$date>=date.tmp[1] & data$date<=date.tmp[2]),]$sub = subNum
} else if ( ndays > 0 & ndays < 14 ) {
FinalDate = max(as.Date(data$date[intersect(which(data$date>=date.tmp[1] & data$date<=date.tmp[2]),which(data[,4]%in%c(0,1)))]))
# FinalDate = max(as.Date(data$date[which(data$date>=date.tmp[1] & data$date<=date.tmp[2])]))
date.tmp1 = c(as.Date(FinalDate,'%Y-%m-%d')-13, as.Date(FinalDate,'%Y-%m-%d'))
# ndays.tmp1 = length(unique(as.Date(data$date[intersect(which(data$date>=date.tmp1[1] & data$date<=date.tmp1[2]),which(data[,4]%in%c(0,1)))])))
ndays.tmp1 = length(Datelist[Datelist>=date.tmp1[1] & Datelist<=date.tmp1[2]])
FinalDate = min(as.Date(data$date[which(data$date>=date.tmp[1] & data$date<=date.tmp[2])]))+13
date.tmp2 = c(as.Date(FinalDate,'%Y-%m-%d')-13, as.Date(FinalDate,'%Y-%m-%d'))
# ndays.tmp2 = length(unique(as.Date(data$date[intersect(which(data$date>=date.tmp2[1] & data$date<=date.tmp2[2]),which(data[,4]%in%c(0,1)))])))
ndays.tmp2 = length(Datelist[Datelist>=date.tmp2[1] & Datelist<=date.tmp2[2]])
if ( ndays.tmp1 > ndays.tmp2 ) {
data[which(data$date>=date.tmp1[1] & data$date<=date.tmp1[2]),]$sub = subNum
# cat('[경고] 설정된 분석시작일을 포함한 이전 기간으로 분석기간이 변경되었습니다.\n')
# cat(paste(' --- 변경된 마지막 분석일:', date.tmp1[2],'\n'))
errCode.sub = c(errCode.sub, 'Warn_101')
} else if ( ndays.tmp1 <= ndays.tmp2 ) {
data[which(data$date>=date.tmp2[1] & data$date<=date.tmp2[2]),]$sub = subNum
# cat('[경고] 설정된 분석종료일을 포함한 이후 기간으로 분석기간이 변경되었습니다.\n')
# cat(paste(' --- 변경된 마지막 분석일:', date.tmp2[2],'\n'))
errCode.sub = c(errCode.sub, 'Warn_102')
} else {
break
}
} else if ( ndays==0 ) {
FinalDate = max(data[which(data[,4]%in%c(0,1)),]$date)
date.tmp = c(as.Date(FinalDate,'%Y-%m-%d')-13, as.Date(FinalDate,'%Y-%m-%d'))
data[which(data$date>=date.tmp[1] & data$date<=date.tmp[2]),]$sub = subNum
# cat('[경고] 설정된 분석종료일 이후의 기간으로 분석기간이 변경되었습니다.\n')
# cat(paste(' --- 변경된 마지막 분석일:', date.tmp[2],'\n'))
errCode.sub = c(errCode.sub, 'Warn_103')
} else {
# cat('[에러] 리브레 데이터가 없어 분석을 종료합니다.\n')
errCode.sub = c(errCode.sub, 'Errr_101')
break
}
End.s1 = F
}
# break ## 웰체크다이어트프로젝트용 ## / todo ( 확인해보기 )
## part2 ##
## 과거 데이터 자동 탐색 ###
Datelist = setdiff(as.character(Datelist), unique(as.character(data[!is.na(data$sub),]$date)))
if ( length(Datelist)==0 ) {
break
} else {
FinalDate = max(Datelist[Datelist<min(data[!is.na(data$sub),]$date,na.rm=T)],na.rm=T)
if ( length(Datelist)==0 ) {
break
} else if ( is.na(FinalDate) ) {
break
} else {
date.tmp = c(as.Date(FinalDate,'%Y-%m-%d')-13, as.Date(FinalDate,'%Y-%m-%d'))
date.max = NULL
End.s2 = T
}
}
while ( End.s2==T ) {
Datelist.tmp = Datelist[Datelist>=date.tmp[1] & Datelist<=date.tmp[2]]
ndays = length(Datelist.tmp)
if ( length(Datelist[Datelist>=date.max[1] & Datelist<=date.max[2]]) < ndays ) {
# max date 저장, max date보다 길면 update
date.max = date.tmp
if ( ndays==14 ) {
break
}
}
if ( range(Datelist)[1] > (date.max[1]-1) ) {
End.s2 = F
break
}
date.tmp = ( date.tmp - 1 )
}
date.tmp = date.max
ndays = length(Datelist[Datelist>=date.tmp[1] & Datelist<=date.tmp[2]])
if ( ndays<10 ) { # 7일 미만으로 바꿀까.. 고민
End.s1 = F
break
} else {
End.s1 = T
}
}
mod = ifelse( max(subNum)==1, 1 , 2)
### step2 =============================================================================##
## 기록유형 분류 (자동,스캐닝,식사 등)
### 기록유형 = 0 -> 자동 혈당값
data$log = ifelse(data[,4]==0,1,0)
## S2-1. 동일시점기록 요약
### 자동 혈당 ---
data_type0 = data[which(data[,4]==0),c(1:3,20:24,5)]
colnames(data_type0)[9]='glucose'
## 스캔 혈당 ---
data_type1 = data[which(data[,4]==1),c(1:3,20:24,6)]
colnames(data_type1)[9]='glucose'
## 음식 기록 ---
# 아침, 점심, 저녁 분류할 필요 있음
data_type5 = data[which(data[,4]==5),c(1:3,20:24,9)]
colnames(data_type5)[9]='event_eat'
## 운동 기록 ---
data_type6_1 = data[which(data[,4]==6),c(1:3,20:24,14)]
data_type6_1 = data_type6_1[which(data_type6_1[,9]=='운동'),]## 메모로 '운동'이라고 기록한 경우도 포함시켜야하지 않을까 ? ## data_type6_1[grepl('운동',data_type6_1[,9][[1]]),]
if(dim(data_type6_1)[1]!=0){
data_type6_1[,9]=1
}
colnames(data_type6_1)[9]='event_exercise'
## 모든 메모 ---
data_type6_2 = data[which(data[,4]==6),c(1:3,20:24,14)]
if(dim(data_type6_2[which(data_type6_2[,9]!=''),])[1]!=0){
data_type6_2 = data_type6_2[which(data_type6_2[,9]!=''),]
}
colnames(data_type6_2)[9]='memo'
## S2-2. merge
data_type1to5 = merge(data_type1[,-c(1:3)],data_type5[,-c(1:3)],by=c('dateandtime','date','time','sub','log'),all=T)
data_type6 = merge(data_type6_1[,-c(1:3)],data_type6_2[,-c(1:3)],by=c('dateandtime','date','time','sub','log'),all=T)
data_type1to6 = merge(data_type1to5,data_type6,by=c('dateandtime','date','time','sub','log'),all=T)
data_type0to6 = merge(data_type0[,-c(1:3)],data_type1to6,by=c('dateandtime','date','time','sub','log','glucose'),all=T)
AGPdata = data.frame(data_type0to6[!is.na(data_type0to6$dateandtime),])
AGPdata$glucose = as.numeric(as.vector(AGPdata$glucose))
### output =============================================================================##
# out.sub = 1 # ver1.0 #
if ( mod !=1 ) {
out.sub = c(1,2) # ver2.0 # //최대 2개 기간까지만 출력하도록 제한 ##
} else {
out.sub = 1
}
if ( any(AGPdata$sub==1,na.rm=T)==F ) {
# cat('[경고] 리브레 데이터 변환 완료하였으나, 분석기간에 해당되는 데이터가 없습니다.\n')
errCode.sub = c(errCode.sub, 'Warn_104')
}
return(list(AGPdata = AGPdata[which(AGPdata$sub%in%out.sub),], errCode.sub = errCode.sub, mod=mod))
} |
a3fe0edb40cb9676f9d1c23455d556f3289bd237 | 444b4fa85eb7bf53d3fe963e5b60b121928637af | /man/makeRegulatorGraph.Rd | 592f40de56a931e9a164c2d84f0f9fa596495707 | [] | no_license | nathaliepochet/amarettoTools | db37fc1f8eea5e3c6b92d63ffff3840fa3e8fa6d | aa541ca51c6f50670aaaa076b4bceef27186ac18 | refs/heads/master | 2020-07-01T05:50:56.586885 | 2018-08-09T16:05:55 | 2018-08-09T16:05:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,036 | rd | makeRegulatorGraph.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doRegGraph.R
\name{makeRegulatorGraph}
\alias{makeRegulatorGraph}
\title{produce a graph that summarizes regulator-module relationships}
\usage{
makeRegulatorGraph(dataFolder = ".", modpatt = "^Module_")
}
\arguments{
\item{dataFolder}{character(1) specifying location of regulators
and gene_expression CSV files by module, with filenames of form
'Module_[n]_...' by default; other filename formats can be
specified by modpatt parameter. `_gene_expression.csv` and
`_regulators.csv` are the assumed suffixes.}
\item{modpatt}{character(1) regular expression used to pick out the CSV files}
}
\value{
instance of graph::graphNEL, mode 'directed'
}
\description{
produce a graph that summarizes regulator-module relationships
}
\note{
The returned graph has regulators, module names,
and module gene symbols as nodes. A module has edges to
all member genes, and a regulator has edges to all modules
that it regulates.
}
\examples{
data(regulGBM)
regulGBM
}
|
5e76ce5553042a17c017e302f3c44afbd7d42518 | 141cb000c5bd54fda357cefe87290a362aa008f2 | /R/caption.R | 59c54896f0361bc6685c0f9dd0a29348a2ede0e4 | [] | no_license | 2ndFloorStuff/reports | e632286f13ae90ca0af10721c9a80ae06a0e4fd8 | 98ca03c2e0344421cf2d9482495f8dd1448e3c0f | refs/heads/master | 2020-12-14T00:21:06.820047 | 2015-07-03T20:47:06 | 2015-07-03T20:47:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,932 | r | caption.R | #' Captions for Figures and Tables
#'
#' \code{caption} - Keep track of numbering for table and figure captions.
#'
#' @param type Either "figure" or "table" or accompanying numeric reference:
#' figure = 1, table = 2.
#' @param label A hyperlink reference label.
#' @param caption A caption to place below/above a figure/table.
#' @param style Additional style elements to be passed to the html \code{p} tag.
#' @param copy2clip logical. If \code{TRUE} attempts to copy the output to the
#' clipboard.
#' @param print logical. If \code{TRUE} \code{\link[base]{cat}} prints the
#' output to the console. If \code{FALSE} returns to the console.
#' @param spaces An integer value dictating the number of lines to skip after
#' the table caption header.
#' @return Returns a character vector of a caption.
#' @keywords caption
#' @rdname caption
#' @export
#' @examples
#' caption_figure("fig1", "A random figure.")
#' caption_table("tab1", "A table")
cap <- function(type = 1, label, caption, style="margin-bottom: 3em;",
copy2clip = interactive(), print = FALSE, spaces = 2) {
if (any(type %in% c(1, "figure"))) {
caption_figure(label = label, caption = caption, style = style,
copy2clip = copy2clip, print = print)
} else {
if (any(type %in% c(2, "table"))) {
caption_table(label = label, caption = caption,
style = style, copy2clip = copy2clip, print = print,
spaces = spaces)
} else {
stop("type must be \"figure\", \"table\", 1, or 2")
}
}
}
#' Captions for Figures and Tables
#'
#' \code{caption_figure} - Keep track of numbering for figure captions.
#'
#' @export
#' @rdname caption
caption_figure <- function(label, caption, style = "margin-bottom: 3em;",
copy2clip = interactive(), print = FALSE) {
err.out <- try(is.null(.fig), silent = TRUE)
if (class(err.out) == "try-error") .fig <- 0
.fig <<- .fig <- .fig + 1
style <- ifelse(is.null(style), "", sprintf("style=\"%s\"", style))
x <- sprintf("<p id=\"%s\" %s><em>Figure %s</em>: %s</p>", label, style,
.fig, caption)
if(copy2clip){
write_clip(x)
}
prin(x = x, print = print)
}
#' Captions for Figures and Tables
#'
#' \code{caption_table} - Keep track of numbering for table captions.
#'
#' @export
#' @rdname caption
caption_table <- function(label, caption, style = "margin-top: 4em;",
copy2clip = interactive(), print = FALSE, spaces = 2) {
err.out <- try(is.null(.tab), silent = TRUE)
if (class(err.out) == "try-error") .tab <- 0
.tab <<- .tab <- .tab + 1
spaces <- paste(rep("<br>", spaces), collapse = "")
style <- ifelse(is.null(style), "", sprintf("style=\"%s\"", style))
x <- sprintf("<p id=\"%s\" %s>Table %s%s<em>%s</em></p>", label,
style, .tab, spaces, caption)
if(copy2clip){
write_clip(x)
}
prin(x = x, print = print)
}
|
1328b9f1faadde0b0f3cff711b69ab2563a790f3 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/spatstat/examples/as.linfun.Rd.R | 08ceb6b027a85d0137076dcb8c8ea032edfe60f6 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 497 | r | as.linfun.Rd.R | library(spatstat)
### Name: as.linfun
### Title: Convert Data to a Function on a Linear Network
### Aliases: as.linfun as.linfun.linim as.linfun.lintess
### Keywords: spatial manip
### ** Examples
X <- runiflpp(2, simplenet)
Y <- runiflpp(5, simplenet)
# image on network
D <- density(Y, 0.1, verbose=FALSE)
f <- as.linfun(D)
f
f(X)
# tessellation on network
Z <- lineardirichlet(Y)
g <- as.linfun(Z)
g(X)
h <- as.linfun(Z, values = runif(5))
h(X)
|
98d60e4e7503041afa4636df98fe3191cf827bcb | 849e2bc163e16bb03800b4b1754086e8adcfecb6 | /Braga et al/boilerplate.R | cbbd578e23cdc99677bb6b992bc943f854f06348 | [] | no_license | merckey/HLCA | 46362226a8cc2ee80465d02b1164062ebb2f81d6 | 899dd282c36db5b09ded8f522fa3ba239fad1347 | refs/heads/master | 2023-06-13T01:43:54.477980 | 2021-07-06T18:36:31 | 2021-07-06T18:36:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,706 | r | boilerplate.R | if (!require("useful")) {
install.packages("useful", dependencies = TRUE)
library(useful)
}
if (!require("here")) {
install.packages("here", dependencies = TRUE)
library(here)
}
if (!require("Seurat")) {
install.packages("Seurat", dependencies = TRUE)
library(Seurat)
}
if (!require("dplyr")) {
install.packages("dplyr", dependencies = TRUE)
library(dplyr)
}
if (!require("Matrix")) {
install.packages("Matrix", dependencies = TRUE)
library(Matrix)
}
if (!require("RColorBrewer")) {
install.packages("RColorBrewer", dependencies = TRUE)
library(RColorBrewer)
}
my.cols <- rev(brewer.pal(11, "RdYlBu"))
process_tissue = function(tiss, scale){
tiss <- NormalizeData(object = tiss, scale.factor = scale)
tiss <- ScaleData(object = tiss)
tiss <- FindVariableGenes(object = tiss, do.plot = TRUE, x.high.cutoff = Inf, y.cutoff = 0.5)
tiss <- RunPCA(object = tiss, do.print = FALSE)
tiss <- ProjectPCA(object = tiss, do.print = FALSE)
}
load_tissue_droplet = function(dataset){
raw.data <- read.csv(file = here('datadropseq', paste(dataset, 'raw_counts.csv', sep = "_")), row.names = 1)
meta.data <- read.csv(file = here('metadata', paste(dataset, 'barcodes_cell_types.txt', sep="_")), row.names = 1, sep = "\t")
# Create the Seurat object with all the data
tiss <- CreateSeuratObject(raw.data = raw.data, project = 'Barga et al')
tiss <- AddMetaData(object = tiss, metadata = meta.data)
# Create metadata columns for annotations
tiss@meta.data[,'free_annotation'] <- NA
tiss <- FilterCells(object = tiss, subset.names = c("nGene", "nUMI"),
low.thresholds = c(100, 500))
tiss <- process_tissue(tiss, 1e4)
return(tiss)
} |
61411f64761c652d3328678b46cbe579b6becb9c | f6990f4746aca0291bc6970286ad6d46b9866a55 | /man/alpha.beta.sigma.Rd | 75256bac7976f9171e605c9b2c0cd3d67418691c | [] | no_license | cran/merror | 3c730db7c55bb309b055f14a23ce120647aacd3d | 9b606d2e3ed3b6f89df35c0a4e73a5b8b50584a2 | refs/heads/master | 2023-08-31T05:10:07.313831 | 2023-08-29T13:20:02 | 2023-08-29T14:30:47 | 17,697,402 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,738 | rd | alpha.beta.sigma.Rd | \name{alpha.beta.sigma}
\alias{alpha.beta.sigma}
\title{Build an alpha-beta-sigma Matrix for Use with the cplot Function}
\description{
Creates a \eqn{3 \times N} (no. of methods) \code{matrix} consisting of the estimated alphas, betas, and imprecision sigmas for use with the \code{cplot} function.
}
\usage{
alpha.beta.sigma(x)
}
\arguments{
\item{x}{A \eqn{k \times 3} \code{data.frame} with parameter estimates in the second column where \eqn{k} is the number of methods \eqn{m \times 3}. The estimates should be arranged with the estimated \eqn{m-1} betas first, followed by the m residual variances, the variance of the true values, the \eqn{m-1} alphas, the mean of the true values. The \code{omx} function returns the fitted model in \code{fit} from which parameter estimates can be retrieved. See the examples below.}
}
\details{
This is primarily a helper function used by the \code{omx} function.
}
\value{
A \eqn{3 \times N} \code{matrix} consisting of alphas on the first row, betas on the second row, followed by raw imprecision sigmas.
}
\seealso{
\code{\link{cplot}}, \code{\link{omx}}.
}
\examples{
\dontrun{
library(OpenMx)
library(merror)
data(pm2.5)
pm <- pm2.5
# OpenMx does not like periods in data column names
names(pm) <- c('ms_conc_1','ws_conc_1','ms_conc_2','ws_conc_2','frm')
# Fit model with FRM sampler as reference
omxfit <- omx(data=pm[,c(5,1:4)],bs.q=c(0.025,0.5,0.975),reps=100)
# Extract the estimates
alpha.beta.sigma(summary(omxfit$fit)$parameters[,c(1,5,6)])
# Make a calibration plot
cplot(pm[,c(5,1:4)],1,2,alpha.beta.sigma=
alpha.beta.sigma(summary(omxfit$fit)$parameters[,c(1,5,6)]))
# The easier way
cplot(pm[,c(5,1:4)],1,2,alpha.beta.sigma=omxfit$abs)
}
}
\keyword{ model } |
44abad9004c2c376f92eef334dfc7a7cb04fa870 | 1c49c1f9b7e1d664e512f3b2830c8baf5dd1b7d0 | /scritps/tema2/05-plots.R | 35c1f921cda9be97df92f93f31fff0c3a6de5227 | [] | no_license | fmcarrero/course-R | 98354f4c93433debb2342c87e7a918f6a7a62224 | 456116e7e375b75534eee1eb32c861021dad95ed | refs/heads/master | 2020-03-14T13:57:41.303257 | 2018-06-26T21:09:43 | 2018-06-26T21:09:43 | 131,643,585 | 0 | 0 | null | null | null | null | ISO-8859-10 | R | false | false | 1,616 | r | 05-plots.R | auto <- read.csv("../data/tema2/auto-mpg.csv")
auto$cylinders <- factor(auto$cylinders,
levels = c(3,4,5,6,8),
labels = c("3cil","4cil","5cil","6cil","8cil"))
attach(auto)
head(cylinders)
#histograma de frecuencias
hist(acceleration,
col= rainbow(12), # obtiene un ramdon de colores
xlab="Aceleracion",
ylab = "Frecuencias",
main = "Histograma de las aceleraciones",
breaks = 12)
hist(mpg,breaks = 16, prob=TRUE)
lines(density(mpg))
#boxplots
plot(mpg ~ horsepower , type ="n")
linearmodel <- lm(mpg ~ horsepower)
abline(linearmodel)
#agregar colores para cada cilindrada
with(subset(auto, cylinders=="8cil") ,
points(horsepower, mpg, col = "red"))
with(subset(auto, cylinders=="6cil") ,
points(horsepower, mpg, col = "yellow"))
with(subset(auto, cylinders=="5cil") ,
points(horsepower, mpg, col = "green"))
with(subset(auto, cylinders=="4cil") ,
points(horsepower, mpg, col = "blue"))
with(subset(auto, cylinders=="3cil") ,
points(horsepower, mpg, col = "black"))
boxplot(mpg , xlab = "Millas por Galeon")
boxplot(mpg ~ model_year ,xlab = "Millas por Galeon (por aņo)")
boxplot(mpg ~ cylinders , xlab = "Consumo por numero de cilindros")
boxplot(auto)
#scatterplot
plot(mpg ~ horsepower)
# matriz de scatterplots
pairs(~ mpg+displacement+horsepower+weight)
#combinacion de plots con par
old.par <- par()
par(mfrow=c(1,2))
with(auto , {
plot(mpg ~ weight , main =" peso vs consumo")
plot (mpg ~ acceleration , main ="acceleracion vs consumo")
})
par(old.par) # resetea los valores
|
0073f4a839315d03ebf2275b7aec61bbf212d9b6 | 8ef0b905e874e873d2ce00c1d1a68ea0ccdbeda8 | /plot2.R | 7c46626ebde9c50c67c61150be7c5e401187c5b6 | [] | no_license | TSestini/ExData_Plotting1 | d5d124a7c98ece980ce806a966ca80280c6b6634 | 2c0889658290708e3cfb4729bab5b0b00ec51aa3 | refs/heads/master | 2022-10-22T18:10:35.171283 | 2020-06-15T15:49:01 | 2020-06-15T15:49:01 | 272,323,679 | 0 | 0 | null | 2020-06-15T02:26:50 | 2020-06-15T02:26:49 | null | UTF-8 | R | false | false | 668 | r | plot2.R | #### Plot 2
data <- read.table("household_power_consumption.txt", sep = ";",
nrows = 85000, header = TRUE)
# subset and format the date and time variables
data_subset <- data[data$Date == "1/2/2007" | data$Date == "2/2/2007",]
x <- paste(data_subset$Date, data_subset$Time)
x2 <- strptime(x, format = "%d/%m/%Y %H:%M:%S")
# subset and format the Global active power variable
num <- as.numeric(levels(data_subset$Global_active_power)
)[data_subset$Global_active_power]
# create png plot
getwd()
png("plot2.png", width = 480, height = 480)
plot(x2, num, type = "l", xlab = NA, ylab = "Global Active Power (kilowatts)")
dev.off()
|
8118f68bd07bfd1ba906a9709d74347cce00ee35 | 246f3eee3cec218e972f64a1424fc900d167eedb | /find_best_threshold.r | 7454aaf5b22de5d8d54a341b9736595183ac3ca0 | [] | no_license | jwhitehill/MultiEnrollmentProjectWithDustin | a37aa68cc177338d61cd60e72caa28fb752bb0b9 | 21e14a5c597710b3680730d35522a4f567abbb6a | refs/heads/master | 2021-01-18T23:49:20.689242 | 2016-10-03T20:07:37 | 2016-10-03T20:07:37 | 45,936,727 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 531 | r | find_best_threshold.r | library(glmnet)
library(doParallel)
registerDoParallel(4)
source("utility.r")
if (! exists("d")) {
d <- loadData("train.csv");
}
X <- model.matrix(numCoursesAll ~ continent + LoE + ageRange + gender, d)
# Try a bunch of different thresholds
for (threshold in 1:30) {
# Binarize as numCoursesAll > threshold
y <- matrix((d[,c("numCoursesAll")] > threshold) * 1)
results <- cv.glmnet(X, y, nfolds=3, family="binomial", type.measure="auc", parallel=TRUE)
print("MLR no interactions")
print(threshold);
print(results$cvm);
}
|
7d92760891f28e639e3897df8f2e42b761325bbc | ab84adde7680f319f025396f156adb7d1df66b65 | /data-raw/cow_igo.R | f56bc2425d8dda6a57820f534d0f9cede5e92754 | [] | no_license | Louis8102/peacesciencer | 9492d44c0749ef484611003e1c7518293a4d73c4 | cc6f9b6130779b451a1300a7b199bc87904fb5b2 | refs/heads/master | 2023-04-15T11:30:23.807519 | 2021-04-28T14:56:46 | 2021-04-28T14:56:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,335 | r | cow_igo.R | library(tidyverse)
library(qs)
# This is just the dyadic_formatv3.csv, but compressed because Jesus this file is huge.
QIGO <- qs::qread("~/Dropbox/data/cow/igo/dyadic_formatv3.qs") %>%
# If it's negative, it's either missing or IGO not in existence
mutate_at(vars(AALCO:ncol(.)), ~ifelse(. < 0, NA, .))
QIGO %>%
select(AALCO:ncol(.)) -> the_igos
QIGO$dyadigos <- rowSums(the_igos[,1:ncol(the_igos)], na.rm=TRUE)
QIGO %>% select(ccode1, ccode2, year, dyadigos) -> cow_igo_ndy
save(cow_igo_ndy, file="data/cow_igo_ndy.rda")
read_csv("~/Dropbox/data/cow/igo/state_year_formatv3.csv") %>%
# If it's negative, it's either missing or IGO not in existence
mutate_at(vars(AAAID:ncol(.)), ~ifelse(. < 0, NA, .)) %>%
select(-state) %>%
group_by(ccode, year) %>%
gather(igo, member_code, AAAID:ncol(.)) %>%
mutate(full = ifelse(member_code == 1, 1, 0),
associate = ifelse(member_code == 2, 1, 0),
observer = ifelse(member_code == 3, 1, 0),
anytype = ifelse(member_code >= 1, 1, 0)) -> SY_IGO
SY_IGO %>%
summarize(sum_igo_full = sum(full, na.rm=T),
sum_igo_associate = sum(associate, na.rm=T),
sum_igo_observer = sum(observer, na.rm=T),
sum_igo_anytype = sum(anytype, na.rm=T)) %>%
ungroup() -> cow_igo_sy
save(cow_igo_sy, file="data/cow_igo_sy.rda")
|
060bb907c01e62ef5260d5b54acb8144d238a487 | 786d9bb431757c75ca7cc2dff2bf5d95765cc494 | /man/getEOTpaper.Rd | 3cf76c820765281efab9f9b6a5b917c189906e01 | [
"MIT"
] | permissive | NNpackages/NNtable | 573e7c046485b967852f50e7ae5effdc8d92bc12 | 6543edba390afccd5a25c615fa5fa5633107fd5a | refs/heads/master | 2023-06-08T09:39:50.570322 | 2021-06-15T07:23:33 | 2021-06-15T07:23:33 | 376,915,843 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 803 | rd | getEOTpaper.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getEOTpaper.R
\name{getEOTpaper}
\alias{getEOTpaper}
\title{Get EOT paper size}
\usage{
getEOTpaper(
orientation = c("port", "land"),
font_size = 8,
figure_size = c("defeault", "full"),
type = c("table", "listing", "figure"),
format = "txt"
)
}
\arguments{
\item{orientation}{The orientation of the page}
\item{font_size}{the font size. Only applicable for figure}
\item{figure_size}{Should the size be default or full}
\item{type}{Should the output be character widths or figure width in cm.}
\item{format}{\code{character} the type of output, currently \code{"txt"} or \code{"flextable"}}
}
\value{
list with paper size
}
\description{
Get EOT paper size
}
\examples{
getEOTpaper("Port", 8, type = "Table")
}
|
475adb00cfe0b3ea64f8d2fa319f0f6846ed56be | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/9231_0/rinput.R | c177ce6cd0b03057097807419dad9dbf1a091bf2 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("9231_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9231_0_unrooted.txt") |
09e265d9afd056629e326e59e98e5d2ec25e1794 | 71a9be645e2887158588276b57b44b6739a06147 | /R/constructLimit.R | 0bbc2f7b7a8e02c742c07c90b593c9f48a090912 | [] | no_license | gkovaig/pg13 | 87a1010b192cf203343e9387845c871a189177a8 | 56a85a32fef3f5fc599189a2c8ebc7f880a6e056 | refs/heads/master | 2022-12-02T14:29:47.604008 | 2020-08-16T23:35:20 | 2020-08-16T23:35:20 | 288,041,994 | 0 | 0 | null | 2020-08-16T23:34:05 | 2020-08-16T23:34:04 | null | UTF-8 | R | false | false | 482 | r | constructLimit.R | #' Construct LIMIT
#' @description This is a non-terminal render, meaning that the SQL component will not be terminated with a semicolon in order to construct complex SQL queries.
#' @import SqlRender
#' @param n rows to limit to
#' @export
constructLimit <-
function(n) {
base <- system.file(package='pg13')
path <- paste0(base, "/sql_constr")
SqlRender::render(SqlRender::readSql(paste0(path, "/limit.sql")),
n = n)
}
|
023736d3aec27a12650ab1d8a29746aab125f9f5 | 79247212d97cd82281783a71912d1305ff1ad652 | /plot2.R | 26b0b3e3bda39c9350de111827acd357dc50c436 | [] | no_license | rl89/ExData_Plotting1 | 03bb1db157b07a1419aab8550f91d2f6234f3de2 | ef238fb5bf5dcaadd9bfea0f8d246e274e224240 | refs/heads/master | 2020-04-01T22:43:42.894588 | 2015-05-10T15:41:53 | 2015-05-10T15:41:53 | 35,340,437 | 0 | 0 | null | 2015-05-09T18:37:19 | 2015-05-09T18:37:18 | null | UTF-8 | R | false | false | 726 | r | plot2.R | plot2 <- function(){
data <- read.table("household_power_consumption.txt", sep = ";", header = T, skip = 66000, nrows = 3700)
names(data) <- c("Date", "Time", "Global_active_power",
"Global_reactive_power", "Voltage", "Global_intensity",
"Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
subset <- as.Date(data$Date, format="%d/%m/%Y")
data <- data[subset == "2007-02-01" | subset == "2007-02-02",]
x <- paste(data$Date, data$Time)
x <- strptime(x, format="%d/%m/%Y %H:%M:%S")
png("plot2.png")
plot(x, data$Global_active_power, type = "l",
main = "", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
} |
772078fca748ef237a17977bfe87ec04d82dc713 | cc3d854fcdd2a2e6744379f23468d42079430f16 | /man/m2_names.Rd | 13f87fc19560f1d8459839c087761a44024a40d4 | [
"MIT"
] | permissive | sevenpark/AvenueAPI-R | c38eb391bc47e1c5a56e1b40e5bae7a38f7b66fc | 31cadd394f3116ebdb8e482d1a2ca790b8694747 | refs/heads/master | 2021-07-03T16:12:22.280787 | 2017-08-21T16:39:03 | 2017-08-21T16:39:03 | 100,972,537 | 0 | 0 | MIT | 2020-11-19T18:46:51 | 2017-08-21T16:35:28 | R | UTF-8 | R | false | true | 353 | rd | m2_names.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_resources.R
\docType{data}
\name{m2_names}
\alias{m2_names}
\title{Valid Firm Names for the Merchant Intel2 Data Series}
\format{A character vector of length 571}
\usage{
m2_names
}
\description{
Valid Firm Names for the Merchant Intel2 Data Series
}
\keyword{datasets}
|
dc73d634ecdfd36ea551bcf3b174d91378c19f90 | c015d6f605f0820f10deabbcb9ef846ffbe110be | /Kras_CCLE_LungCancer.R | e70472d293dec83c08bb4f8f76c8446459b71adc | [] | no_license | smacneil1/Lung_Cancer_Project_KRAS | 0c230f4cb0109ad85eafa936314966e1b7cf1033 | ce9f438dc26129cf46a848c98d93233f03f17993 | refs/heads/master | 2021-01-23T03:05:20.598992 | 2015-08-01T23:50:08 | 2015-08-01T23:50:08 | 38,844,660 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,489 | r | Kras_CCLE_LungCancer.R |
##########Running validated 300 gene KRAS signatures on samples####
library(ASSIGN)
library(sva)
# get this file
gfp_kras<-read.table("~/Documents/ThesisWork/GitRepos/Lung_Cancer_Project_KRAS/36_GFP_KRAS_TPMlog2.txt", sep='\t', header=1, row.names=1)
head(gfp_kras)
dim(gfp_kras)
CCLE<-read.table("~/Documents/ThesisWork/CCLE_Data/CCLE_RNASeq_TPMlog.txt",header=1, row.names=1, sep='\t',check.names = F)
CCLE_lung<-subset(CCLE,select=c("5bea08d5-6a97-41fa-b33c-d8cb45b67051", "39c19460-909e-47df-892d-86638ddd5969", "ec0491f2-e5f8-4dd4-9ebe-197916a04d6d","b759a72e-d81b-45f1-bc66-06e3d0f2f600", "20d36af9-655c-45e7-8f32-9770dd0c53c5", "d77cb0f1-9831-4613-a29a-5796b2d3754f", "73045153-e0f8-43a6-ae22-f1ecd7ce775a", "c83c157a-080c-478b-8aeb-d3104b431af0", "a1e1283d-14e1-4ff3-9e0f-d9c468e78dc4", "e619fbdb-2cd6-46a4-8a18-7fd1e23a31cc"))
head(CCLE_lung)
colnames(CCLE_lung)
colnames(CCLE_lung) = c("H838", "H1437", "HCC4006", "H1944", "SKLU-1", "H1650", "H460", "H1395", "H1355", "H2009")
head(CCLE_lung)
dim(CCLE_lung)
write.table(CCLE_lung,"~/Documents/ThesisWork/GitRepos/Lung_Cancer_Project_KRAS/CCLE_Lung_DrugScreen_RNASeq_TPMlog.txt",sep='\t', col.names = NA,quote=F)
sub=c(9,9,9,9,10)
expr_f <-gfp_kras[apply(gfp_kras[,1:36]==0,1,mean) < 0.85,]
#merge the PE cells with the signature
expr<-merge_drop(expr_f,CCLE_lung)
pdf("~/Documents/ThesisWork/GitRepos/Lung_Cancer_Project_KRAS/kras_CCLE_lung_pca_plots.pdf")
pcaplot(expr,sub)
bat<-as.matrix(cbind(colnames(expr),c(rep(1,ncol(gfp_kras)),rep(2,ncol(CCLE_lung)))))
# Batch adjust
combat_expr<-ComBat(dat=expr, batch=bat[,2], mod=NULL, numCovs=NULL)
pdf("~/Documents/ThesisWork/GitRepos/Lung_Cancer_Project_KRAS/kras_CCLE_lung_pca_plots_post.pdf")
pcaplot(combat_expr,sub)
dev.off()
#subset the diff KRAS signatures and the test data
c_kras_gfp<-subset(combat_expr,select=GFP.31:GFP.39)
c_kraswt<-subset(combat_expr,select=KRASWT.1:KRASWT.9)
c_krasqh<-subset(combat_expr,select=KRASQH.1:KRASQH.9)
c_krasgv<-subset(combat_expr,select=KRASGV.1:KRASGV.9)
c_test<-combat_expr[,(ncol(gfp_kras)+1):ncol(combat_expr)]
colnames(c_test)
##getting the gene list###
load("~/Documents/ThesisWork/GitRepos/Lung_Cancer_Project_KRAS/Kras_300s/krasqv_300_gene_list/adapB_single/output.rda")
krasqh_300_genelist<-output.data$processed.data$diffGeneList
head(krasqh_300_genelist)
load("~/Documents/ThesisWork/GitRepos/Lung_Cancer_Project_KRAS/Kras_300s/krasgv_300_gene_list/adapB_single/output.rda")
krasgv_300_genelist<-output.data$processed.data$diffGeneList
head(krasgv_300_genelist)
load("~/Documents/ThesisWork/GitRepos/Lung_Cancer_Project_KRAS/Kras_300s/kraswt_300_gene_list/adapB_single/output.rda")
kraswt_300_genelist<-output.data$processed.data$diffGeneList
head(kraswt_300_genelist)
basedir="~/Documents/ThesisWork/GitRepos/Lung_Cancer_Project_KRAS/predictions"
dir.create(basedir)
trainingLabel<-list(control=list(kraswt=1:9),kraswt=(10:18))
dir.create(paste(basedir,paste("kraswt",300,"gene_list", sep="_"),sep='/'))
trainingLabel<-list(control=list(krasgv=1:9),krasgv=(10:18))
assign_easy_multi(trainingData = cbind(c_kras_gfp,c_kraswt),test=c_test,trainingLabel1 = trainingLabel,geneList = kraswt_300_genelist,out_dir_base = paste(basedir,paste("kraswt",300,"gene_list", sep="_"),sep='/'),single = 1)
trainingLabel<-list(control=list(krasgv=1:9),krasgv=(10:18))
dir.create(paste(basedir,paste("krasgv",300,"gene_list", sep="_"),sep='/'))
trainingLabel<-list(control=list(krasgv=1:9),krasgv=(10:18))
assign_easy_multi(trainingData = cbind(c_kras_gfp,c_krasgv),test=c_test,trainingLabel1 = trainingLabel,geneList = krasgv_300_genelist,out_dir_base = paste(basedir,paste("krasgv",300,"gene_list", sep="_"),sep='/'),single = 1)
trainingLabel<-list(control=list(krasqh=1:9),krasqh=(10:18))
dir.create(paste(basedir,paste("krasqh",300,"gene_list", sep="_"),sep='/'))
assign_easy_multi(trainingData = cbind(c_kras_gfp,c_krasqh),test=c_test,trainingLabel1 = trainingLabel,geneList = krasqh_300_genelist,out_dir_base = paste(basedir,paste("krasqh",300,"gene_list", sep="_"),sep='/'),single = 1)
pe_preds<-gatherFile(basedir)
colnames(pe_preds)<-gsub(colnames(pe_preds),pattern = "/pathway_activity_testset.csv/V1",replacement = "")
colnames(pe_preds)<-gsub(colnames(pe_preds),pattern = "/pathway_activity_testset.csv",replacement = "")
rownames(pe_preds)
write.table(pe_preds,"~/Documents/ThesisWork/GitRepos/Lung_Cancer_Project_KRAS/CCLE_Lung_KRAS_Predictions",sep='\t', col.names = NA,quote=F)
# Not using
for(i in 1:nrow(pe_preds)){
pe_preds$new_names[i]<-strsplit(rownames(pe_preds)[i],"_")[[1]][2]
}
library(xlsx)
f=read.xlsx("~/Dropbox/bild_signatures/pe_preds/Copy of PE_Gray_DNA_RNA.xls",sheetName = "Sheet1")
pe_pts<- f[1:14,]
pe_pts[,1]<-toupper(pe_pts[,1])
order(rownames(pe_pts))
rownames(pe_pts)<-pe_pts$RNA
pe_pts<-pe_pts[order(rownames(pe_pts)),]
pe_preds<-pe_preds[order(pe_preds$new_names),]
pe_pred_phen<-merge(pe_preds,pe_pts,by.x=57,by.y=1)
dim(pe_pred_phen)
gsub("/SigProtein","",colnames(pe_pred_phen))
colnames(pe_pred_phen)
colnames(pe_pred_phen)[67]<-"PR.Status"
colnames(pe_pred_phen)[68]<-"HER2.Status"
cols<-gsub("/sigProtein","",c(colnames(single_pathway_best),colnames(multi_pathway_best)))
subset(pe_pred_phen,select=cols)#,"ER.Status","PR.Status","HER2.Status")]
################## for correlations example codes####
drugs<-read.delim("ICBP_drugs.txt", header=1, sep='\t',row.names=1)
icbp_drug<-merge_drop(data_icbp,drugs)
colnames(icbp_drug)
cor_mat=p_mat=matrix(0,length(filenames_icbp_multi),90)
rownames(cor_mat)=rownames(p_mat)=colnames(icbp_drug)[1:length(filenames_icbp_multi)]
colnames(cor_mat)=colnames(p_mat)=colnames(icbp_drug)[(length(filenames_icbp_multi)+11):ncol(icbp_drug)]
for(i in 1:length(filenames_icbp_multi)){
for(j in 1:90){
temp=cor.test(icbp_drug[,i],icbp_drug[,(j+length(filenames_icbp_multi)+10)],use="pairwise",method="spearman")
print(j)
print(temp)
cor_mat[i,j]=temp$estimate
p_mat[i,j]=temp$p.value
}
}
#write.table(cor_mat,"~/Dropbox/bild_signatures/kras/ICBP_single_cor_drug_mat_4_21.txt",col.names = NA,quote=F,sep='\t')
colnames(p_mat)=paste(colnames(p_mat),"p_value",sep="_")
write.table(p_mat,"~/Dropbox/bild_signatures/kras/ICBP_single_p_drug_mat_4_21.txt",col.names = NA,quote=F,sep='\t')
cor_p_mat<-cbind(cor_mat,p_mat)
order(colnames(cor_p_mat))
cor_p_mat<-cor_p_mat[,order(colnames(cor_p_mat))]
write.table(cor_p_mat,"~/Dropbox/bild_signatures/kras/ICBP_single_cor_p_mat_6_18.txt",col.names = NA,quote=F,sep='\t')
|
a8715848ee5d9593696506de55ab60e905be8538 | bb164f34ab0b39465f195c95e31c49f1c34c0292 | /man/Elasticity.Rd | 8abf384d91c34669c19bc83c651cc6311dc88900 | [] | no_license | ashander/matrixmodels | d02538457e9ccd2bdb7446e73fde02637df0113d | 5ed079365074e26b35901486fcab5c952b58a7f3 | refs/heads/master | 2021-01-10T21:18:49.089832 | 2015-08-29T21:25:43 | 2015-08-29T21:25:43 | 4,160,811 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 462 | rd | Elasticity.Rd | \name{Elasticity}
\alias{Elasticity}
\title{Compute a matrix of sensitivities}
\usage{
Elasticity(A)
}
\arguments{
\item{A}{projection matrix for population model x(t+1) =
A x(t)}
}
\description{
Compute a matrix of sensitivities
}
\details{
The sensitivity is defined as the change in dominant
eigenvalue, i.e., the growth rate, with changes in an
entry in matrix A. See Caswell 2001.
}
\examples{
A = matrix(c(0,1,.5,0), nrow=2)
Elasticity(A)
}
|
7091b16ac7bfbc09c459d89de3a76f6f5eb05882 | cb44591f1d940bd1f08bd30991b769da23127ebf | /Data Cleaning &EDA.R | 6f972b8d6ad2dbe4e3decc9a8c6d99ad8b0a48b9 | [] | no_license | angelayuanyuan/PUBG-Finish-Placement-Prediction | cb5a0f9a5591cfc005bcc64682cd5c0b2c404cd5 | e3d7a353d82867b0c682b83e461368d2d8fab690 | refs/heads/master | 2020-03-31T09:52:00.209085 | 2018-10-09T01:49:13 | 2018-10-09T01:49:13 | 152,113,553 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,368 | r | Data Cleaning &EDA.R | library(data.table)
library(ggplot2)
library(dplyr)
###########################################
############## Read Data ##################
###########################################
# train.raw <- fread("../all/train.csv")
# test.raw <- fread("../all/test.csv")
# train <- saveRDS(train.raw,file = "train.rds")
# test <- saveRDS(test.raw,file = "test.rds")
train.raw <- readRDS("train.rds")
test.raw <- readRDS("test.rds")
# compare train and test sets
setdiff(names(train.raw), names(test.raw))
# variables' distinct values
var_value.tr <- data.frame(apply(train.raw,2, n_distinct),row.names = colnames(train.raw))
colnames(var_value.tr) <- "count"
var_value.te <- data.frame(apply(test.raw,2, n_distinct),row.names = colnames(test.raw))
colnames(var_value.te) <- "count"
###########################################
################## EDA ####################
###########################################
# 4357336 records, 1888732 teams, 47734 matches
train.raw <- train.raw%>%
group_by(matchId)%>%
arrange(groupId)
# number of groups in matches with records
tr.match <- train.raw%>%
summarise(n=n()) # makes sense to have minimum 1 group, maximum 100 groups on record
# group-level stats
tr.players <- train.raw%>%
select(groupId,Id)%>%
group_by(groupId)%>%
summarise(n=n())
tr.group <- train.raw%>%
filter(groupId=="1193309")
quantile(tr.players$n) # having more than 4 players in a team is weird (maximum 8 players is possible but nothing above)
# does this happen in test set
te.players <- test.raw%>%
select(groupId,Id)%>%
group_by(groupId)%>%
summarise(n=n())
quantile(te.players$n) # yes, then we'll keep these data for now
# response variable
ggplot(train.raw)+
geom_histogram(aes(winPlacePerc),fill="#9999CC")
# independant variables
# walk distance
ggplot(train.raw)+
geom_histogram(aes(walkDistance),fill="#66CC99")
quantile(train.raw$walkDistance) # outliers
# ride distance
ggplot(train.raw)+
geom_histogram(aes(rideDistance),fill="#66CC99")
quantile(train.raw$rideDistance) # outliers
ggplot(train.raw[train.raw$rideDistance<5000,])+
geom_histogram(aes(rideDistance),fill="#66CC99")
sum(train.raw$rideDistance==0) # 3439985 records have zero ride distance (never used a car in the match)
# swim distance
ggplot(train.raw)+
geom_histogram(aes(swimDistance),fill="#66CC99")
quantile(train.raw$swimDistance)
sum(train.raw$swimDistance==0) # 4076544 records have zero swim distance (am I the only one who like to swim??)
# weapons aquired
ggplot(train.raw)+
geom_histogram(aes(weaponsAcquired),fill="#CC6666")
quantile(train.raw$weaponsAcquired)
ggplot(train.raw[train.raw$weaponsAcquired<20,])+
geom_histogram(aes(weaponsAcquired),fill="#CC6666")
median(train.raw$weaponsAcquired) # 3 weapons
# knocked downs
ggplot(train.raw)+
geom_histogram(aes(DBNOs),fill="#CC6666")
quantile(train.raw$DBNOs,0.99999) # more than 30 knock downs is really unlikely to happen
ggplot(train.raw[train.raw$DBNOs<20,])+
geom_histogram(aes(DBNOs),fill="#CC6666") # most players have zero knock downs
# kills
ggplot(train.raw)+
geom_histogram(aes(kills),fill="#CC6666")
quantile(train.raw$kills,0.99999) # more than 30 kills is really unlikely to happen too
ggplot(train.raw[train.raw$kills<20,])+
geom_histogram(aes(kills),fill="#CC6666") # most players have zero kills
# longest kills
ggplot(train.raw)+
geom_histogram(aes(longestKill),fill="#CC6666")
quantile(train.raw$longestKill,0.99999)
ggplot(train.raw[train.raw$longestKill<300,])+
geom_histogram(aes(longestKill),fill="#CC6666")
# head shots kill
ggplot(train.raw)+
geom_histogram(aes(headshotKills),fill="#CC6666")
quantile(train.raw$headshotKills,0.99999)
ggplot(train.raw[train.raw$headshotKills<10,])+
geom_histogram(aes(headshotKills),fill="#CC6666")
###########################################
############## Sample EDA #################
###########################################
set.seed(1234)
tr.sample <- sample_n(train.raw, 50000)
# kills versus rank
ggplot(tr.sample)+
geom_jitter(aes(winPlacePerc,kills,alpha=0.2))
# assists versus rank
ggplot(tr.sample)+
geom_jitter(aes(winPlacePerc,assists,alpha=0.2))
# head shots versus rank
ggplot(tr.sample)+
geom_jitter(aes(winPlacePerc,headshotKills,alpha=0.2))
# weapons versus rank
ggplot(tr.sample)+
geom_jitter(aes(winPlacePerc,weaponsAcquired,alpha=0.2))
|
31d9cf6d5c998a7c145338a3f398e5ce17bf56ac | 8b982bcdfa3a2f6b7897626f3849fc1536c220bf | /src/xgb_airbnb.R | a0a5c5dd2d28aeb1aa3333b90479fbb06d96d202 | [
"MIT"
] | permissive | syyunn/bays-dl-paper | 766c1dfe1bd5602c92743d0b06b90cc18bed00ad | 40191d0d900513c2568c299ff11e6dc27103a708 | refs/heads/master | 2020-12-09T13:42:00.656688 | 2018-04-02T20:21:26 | 2018-04-02T20:21:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,826 | r | xgb_airbnb.R | # This R script is based on indradenbakker's R script
# I customized eval_metric ndcg5 so that it is much easier to monitor ndcg value.
# load libraries
library(xgboost)
library(readr)
library(stringr)
library(caret)
library(car)
set.seed(1)
# load data
df_train = read_csv("../data/airbnb/train_users.csv")
df_test = read_csv("../data/airbnb/test_users.csv")
labels = df_train['country_destination']
df_train = df_train[-grep('country_destination', colnames(df_train))]
ndcg5 <- function(preds, dtrain) {
labels <- getinfo(dtrain,"label")
num.class = 12
pred <- matrix(preds, nrow = num.class)
top <- t(apply(pred, 2, function(y) order(y)[num.class:(num.class-4)]-1))
x <- ifelse(top==labels,1,0)
dcg <- function(y) sum((2^y - 1)/log(2:(length(y)+1), base = 2))
ndcg <- mean(apply(x,1,dcg))
return(list(metric = "ndcg5", value = ndcg))
}
# combine train and test data
df_all = rbind(df_train,df_test)
# remove date_first_booking
df_all = df_all[-c(which(colnames(df_all) %in% c('date_first_booking')))]
# replace missing values
df_all[is.na(df_all)] <- -1
# split date_account_created in year, month and day
dac = as.data.frame(str_split_fixed(df_all$date_account_created, '-', 3))
df_all['dac_year'] = dac[,1]
df_all['dac_month'] = dac[,2]
df_all['dac_day'] = dac[,3]
df_all = df_all[,-c(which(colnames(df_all) %in% c('date_account_created')))]
# split timestamp_first_active in year, month and day
df_all[,'tfa_year'] = substring(as.character(df_all[,'timestamp_first_active']), 1, 4)
df_all['tfa_month'] = substring(as.character(df_all['timestamp_first_active']), 5, 6)
df_all['tfa_day'] = substring(as.character(df_all['timestamp_first_active']), 7, 8)
df_all = df_all[,-c(which(colnames(df_all) %in% c('timestamp_first_active')))]
# clean Age by removing values
df_all[df_all$age < 14 | df_all$age > 100,'age'] <- -1
# one-hot-encoding features
ohe_feats = c('gender', 'signup_method', 'signup_flow', 'language', 'affiliate_channel', 'affiliate_provider', 'first_affiliate_tracked', 'signup_app', 'first_device_type', 'first_browser')
dummies <- dummyVars(~ gender + signup_method + signup_flow + language + affiliate_channel + affiliate_provider + first_affiliate_tracked + signup_app + first_device_type + first_browser, data = df_all)
df_all_ohe <- as.data.frame(predict(dummies, newdata = df_all))
df_all_combined <- cbind(df_all[,-c(which(colnames(df_all) %in% ohe_feats))],df_all_ohe)
# split train and test
X = df_all_combined[df_all_combined$id %in% df_train$id,]
y <- recode(labels$country_destination,"'NDF'=0; 'US'=1; 'other'=2; 'FR'=3; 'CA'=4; 'GB'=5; 'ES'=6; 'IT'=7; 'PT'=8; 'NL'=9; 'DE'=10; 'AU'=11")
X_test = df_all_combined[df_all_combined$id %in% df_test$id,]
# train xgboost
xgb <- xgboost(data = data.matrix(X[,-1]),
label = y,
eta = 0.1,
max_depth = 6,
nround=100,
subsample = 0.7,
colsample_bytree = 0.8,
seed = 1,
eval_metric = ndcg5,
objective = "multi:softprob",
num_class = 12,
nthread = 3
)
# predict values in test set
y_pred <- predict(xgb, data.matrix(X_test[,-1]))
# extract the 5 classes with highest probabilities
predictions <- as.data.frame(matrix(y_pred, nrow=12))
rownames(predictions) <- c('NDF','US','other','FR','CA','GB','ES','IT','PT','NL','DE','AU')
predictions_top5 <- as.vector(apply(predictions, 2, function(x) names(sort(x)[12:8])))
# create submission
ids <- NULL
for (i in 1:NROW(X_test)) {
idx <- X_test$id[i]
ids <- append(ids, rep(idx,5))
}
submission <- NULL
submission$id <- ids
submission$country <- predictions_top5
# generate submission file
submission <- as.data.frame(submission)
write.csv(submission, "submission.csv", quote=FALSE, row.names = FALSE) |
c305b9bf7e478eedccacd2df700b9e2a5544536d | 29585dff702209dd446c0ab52ceea046c58e384e | /humanFormat/R/formatDuration.R | 6f574ca34b704eaadd329b2e619d685ec5ac0b94 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,184 | r | formatDuration.R | kNanosecond <- 1
kMicrosecond <- kNanosecond * 1000
kMillisecond <- kMicrosecond * 1000
kSecond <- kMillisecond * 1000
kMinute <- kSecond * 60
kHour <- kMinute * 60
formatDurationSmall <- function(ns) {
sizes <- c('ns', 'us', 'ms', 's')
e <- ifelse(ns == 0, NA, floor(log(ns, 1000)))
suffix <- ifelse(ns == 0, '', sizes[e+1])
prefix <- ifelse(ns == 0, '0', sprintf("%g", ns/(1000 ^ floor(e))))
paste(prefix, suffix, sep="")
}
formatDurationHour <- function(ns) {
sprintf("%.0fh%s", ns/kHour, formatDurationMinute(ns %% kHour))
}
formatDurationMinute <- function(ns) {
ifelse(ns > kHour,
formatDurationHour(ns),
sprintf("%.0fm%ss", ns/kMinute,
format((ns %% kMinute) / kSecond, digits=9, scientific=F)))
}
formatDuration <- function(ns) {
prefix <- ifelse(ns < 0, "-", "")
ns <- abs(ns)
paste(prefix, ifelse(ns > kMinute,
formatDurationMinute(ns),
formatDurationSmall(ns)), sep="")
}
formatNanoseconds <- formatDuration
formatMicroseconds <- function(us) {
formatNanoseconds(us * kMicrosecond)
}
formatMilliseconds <- function(ms) {
formatNanoseconds(ms * kMillisecond)
}
formatSeconds <- function(s) {
formatNanoseconds(s * kSecond)
}
|
690effd213f99825dfcddf85813f8e13c6b7cc97 | a6dd945657d063735fce788ad2fe2659843bae33 | /Generating Families Functions/sim.simCurAgeVar.R | 66f78ebf5a26acad70546a6b3ba322bbbaf7955e | [] | no_license | theohuang/Gradient-boosting | bea103b232c0e1f45d55dbd3c6a514d026f5ff2c | 2abe54ea2b1cf9fbf4a68450f19595150e20c0ad | refs/heads/master | 2021-03-22T05:17:08.481646 | 2020-09-09T13:59:36 | 2020-09-09T13:59:36 | 120,679,021 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,216 | r | sim.simCurAgeVar.R | #' Simulate Current Ages for Branch
#'
#' Returns a numeric vector of current ages for the progeny of the
#' branch, plus any parents who have missing ages (parents who were simulated/not
#' blood relations of the main family, i.e. spouses or the grandparents). The
#' childrens' ages are simulated based on their mother's age.
#' @param nProg number of progeny in branch
#' @param mothAge mother's age, if known (optional)
#' @param fathAge father's age, if known (optional)
#' @details Does not ensure that the children have different ages from each other
#' (i.e., two siblings with the same ages are not necessarily twins).
#' If one of the parents' ages is missing, it will be simulated based on the other
#' parent's age. If both are missing, they will be assumed to be at the top of tree
#' (grandparents). The grandmother's age will be simulated from a Norm(100, 2)
#' distribution and the grandfather's age will be simulated based on her age.
#' @family simulations
sim.simCurAgeVar = function(nProg, mothAge, fathAge) {
# If both parents' ages are missing, assume they are at the top of the tree
# (grandparents)
if (missing(mothAge) & missing(fathAge)) {
# Simulate both parents' ages and add them to the vector of current ages
# Center mother's age distribution at 100
# Center father's age distribution around mother's age
mothAge = round(rnorm(1, 100, 2))
fathAge = round(rnorm(1, mothAge, 2))
CurAge = c(fathAge, mothAge)
# If only the mother's age is missing, simulate it to be close to the father's age
# Add it to the vector of current ages
# Must be at least 15
} else if (missing(mothAge)) {
mothAge = max(15, round(rnorm(1, fathAge, 2)))
CurAge = mothAge
# If only the father's age is missing, simulate it to be close to the mother's age
# Add it to the vector of current ages
# Must be at least 15
} else if (missing(fathAge)) {
fathAge = max(15, round(rnorm(1, mothAge, 2)))
CurAge = fathAge
} else {
CurAge = numeric(0)
}
# Simulate the children's ages to be centered around the mother's age minus 30
CurAge = c(CurAge, pmin(mothAge-15, round(rnorm(nProg, mothAge-30, 5))))
return(CurAge)
} |
92453265c017fc4613dbfc54328c23618e8aa921 | 9848b0e405a4a36f24d903ebe9dd8d505d14251d | /ts_trend.R | 8702f42259048379f8c5d54d18a33cc4f0e3be20 | [] | no_license | jsn-jin/economic-forecasting | b73e968923974711f065e71b99c262f8f686d8c0 | 4e04411537ff09c9f17f932213f5288ad3810afb | refs/heads/master | 2022-12-09T10:57:31.053283 | 2020-09-03T12:59:42 | 2020-09-03T12:59:42 | 272,431,207 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,533 | r | ts_trend.R |
#********************************************************************************
# Date: 08/17/2020
# Comment(s): R code example for fitting/forecasting a trend to ts data.
# Data File(s): labordata.dat
#********************************************************************************
# Variable Definitions
# male = labor force male participate rate = y (response variable)
# female = labor force female participate rate = y (response variable)
# t = time (monthly observations from 1948-1991)
#********************************************************************************
# Set your 'working directory' to the folder where all the data and respective codes are located.
# Clear all variables and prior sessions
rm(list=ls(all=TRUE))
# Load Libraries
library(forecast)
library(stats)
library(tis)
# NOTE: to add recession bands:
# Example: Presidents Approval Rating
# plot(presidents, type = 'n', ylab = "Presidents Approval Rating")
# nberShade()
# lines(presidents)
# Read in the data into a data file and attach names:
z <- read.table("labordata.dat")
names(z) <- c("male", "female", "total")
attach(z)
# Convert data to time series format:
male_ts <- ts(male, start = c(1948,1), freq = 12)
t <- seq(1948, 1991, length = length(male_ts)
ts.plot(male_ts)
# detach(z)
# ---------------------------- [1] TREND FITTING ----------------------------- #
# Linear Fit
m1 = lm(male_ts ~ t)
quartz()
par(mfrow = c(2,1))
plot(male_ts, ylab = "Participation Rate (Male)", xlab = "Time", lwd = 2, col = 'skyblue3', ylim = c(75,84), xlim = c(1968,1995))
# The next commands adds the U.S. recession bands
nberShade()
lines(male_ts, lwd = 2, col = "skyblue3")
lines(t, m1$fit, col = "red3", lwd = 2)
plot(t, m1$res, ylab = "Residuals", type = 'l', xlab = "Time")
par(mfrow = c(1,1))
## -------------[*] TIME DUMMY TESTING-------------- ##
mtest1 <- tslm(male_ts ~ trend)
mtest1
TIME <- seq(1:length(male_ts)) # time dummy
mtest2<- lm(male_ts ~ TIME)
mtest2
par(mfrow = c(1,1))
plot(male_ts, ylab = "Participation Rate (Male)", xlab = "Time", lwd = 2, col = 'skyblue3', ylim = c(75,84), xlim = c(1968,1995))
lines(t, m1$fit, col = "red3", lwd = 2)
lines(t, mtest$fit, col = "purple", lwd = 2) # same line
lines(t, mtest2$fit, col = "green", lwd = 2) # same line
## -------------------------------------------------- ##
# Quadratic Fit
m2 = lm(male_ts ~ t + I(t^2))
quartz()
par(mfrow = c(2,1))
plot(male_ts, ylab = "Participation Rate (Male)", xlab = "Time", lwd = 2, col = 'skyblue3', ylim = c(75,84), xlim = c(1968,1995))
lines(t, m2$fit, col = "red3", lwd = 2)
plot(t, m2$res, ylab = "Residuals", type = 'l', xlab = "Time")
par(mfrow = c(2,1))
# Log-Linear Fit
m3 = lm(log(male_ts) ~ t)
quartz()
par(mfrow = c(2,1))
plot(log(male_ts), ylab = "Participation Rate (Male)", xlab = "Time", lwd = 2, col = 'skyblue3', xlim = c(1968,1995))
lines(t, m3$fit, col = "red3", lwd = 2)
plot(t, m3$res, ylab = "Residuals", type = 'l', xlab = "Time")
# Exponential Fit
ds = data.frame(x = t, y = male_ts)
par(mfrow = c(2,1))
plot(male_ts, ylab = "Participation Rate (Male)", xlab = "Time", lwd = 2, col = 'skyblue3', ylim = c(75,84), xlim = c(1968,1995))
# lines(t,m1$fit, col = "green", lwd = 2)
m4 = nls(y ~ exp(a + b * t), data = ds, start = list(a = 0, b = 0))
lines(ds$x, predict(m4, list(x = ds$x)), col = "red3", lwd = 2)
plot(t, residuals(m4), ylab = "Residuals", type = 'l',xlab = "Time")
summary(m4)
# ----------------------- [2] MODEL SELECTION ------------------------ #
# Compare models using AIC and BIC
AIC(m1,m2,m3,m4)
BIC(m1,m2,m3,m4)
quartz()
plot(stl(male_ts, s.window = "periodic")) # Seasonal + Trend + Remainder (cycles)
# stl() is super powerful
# ----------------------- [3] TREND FORECASTING ------------------------ #
# Linear
tn = data.frame(t = seq(1992,1999))
pred = predict(lm(male_ts ~ t), tn, se.fit = TRUE)
# plot(c(male_ts,pred$fit), type = 'l', xlim = c(1940,2000))
pred.plim = predict(lm(male_ts ~ t), tn, level = 0.95, interval = "prediction")
pred.clim = predict(lm(male_ts ~ t), tn, level = 0.95, interval = "confidence")
plot(male_ts, ylab = "Participation Rate (Male)", xlab = "Time", lwd = 2, col = 'skyblue3', ylim = c(70,84), xlim = c(1968,1999))
matplot(tn$t, cbind(pred.clim, pred.plim[,-1]), lty = c(1,1,1,3,3), type = "l", lwd = 2, ylab = "predicted y", xlab = "Time", add = TRUE)
# dev.print(device = postscript, "tsfig.eps", width = 7, height = 7, horizontal = FALSE)
# dev.off()
# Quadratic
pred = predict(lm(male_ts ~ t + I(t^2)), tn, se.fit = TRUE)
# plot(c(male_ts,pred$fit), type = 'l', xlim = c(1940,2000))
pred.plim = predict(lm(male_ts ~ t + I(t^2)), tn, level = 0.95, interval = "prediction")
pred.clim = predict(lm(male_ts ~ t + I(t^2)), tn, level = 0.95, interval = "confidence")
plot(male_ts, ylab = "Participation Rate (Male)", xlab = "Time", lwd = 2, col = 'skyblue3', ylim = c(70,84), xlim = c(1968,1999))
matplot(tn$t, cbind(pred.clim, pred.plim[,-1]), lty = c(1,1,1,3,3), type = "l", lwd = 2, ylab = "predicted y", xlab = "Time", add = TRUE)
# ----------------------- [4] Holt-Winters Filter ------------------------ #
hwfit <- HoltWinters(male_ts)
quartz()
hwpred <- predict(hwfit, 60, prediction.interval = TRUE, level = 0.5)
plot(hwfit, hwpred, ylab = "Participation Rate (Male)", xlab = "Time", xlim = c(1948,1999))
# lines(predict(hwfit, n.ahead = 60), lwd = 1.5, col = 'blue')
# plot(hwfit, ylab = "Participation Rate (Male)", xlab = "Time", lwd = 1, col = 'lack', xlim = c(1948,1999))
# lines(predict(hwfit, n.ahead = 60), lwd = 1.5, col = 'blue')
|
33592117129e70d99746c822edfe67e31a1d8161 | 547df70472fc95f3b253e83672d4fc26b776e915 | /ignore_files/iNatskew/inat_skew.R | 8171bad8aa10c18755896a6be7c18fd1fdcaabb5 | [] | no_license | mbelitz/belitz_etal_phenometrics | ea6e5f3bf6a273e16d47e3fbd156c2fbf7f0a325 | 77deb08e47529aba8787c084810124e6788d01ae | refs/heads/master | 2020-09-06T03:41:16.093047 | 2020-05-22T12:37:59 | 2020-05-22T12:37:59 | 220,308,913 | 0 | 0 | null | 2020-01-07T16:41:04 | 2019-11-07T19:02:51 | R | UTF-8 | R | false | false | 7,484 | r | inat_skew.R | library(dplyr)
library(ggplot2)
oh <- read.csv("iNatskew/pollard_flightcurves_OH.csv", stringsAsFactors = FALSE)
inat_test <- read.csv("iNatskew/iNat_testdata.csv", stringsAsFactors = FALSE)
scybele_oh <- oh %>%
select(S.cybele, surveyN, DAY)
anumitor_oh <- oh %>%
select(A.numitor, surveyN, DAY)
cpegala_oh <- oh %>%
select(C.pegala, surveyN, DAY)
mcymela_oh <- oh %>%
select(M.cymela, surveyN, DAY)
scybele_inat_2017 <- inat_test %>%
filter(scientific_name == "Speyeria cybele") %>%
filter(year == 2017) %>%
filter(latitude >= 41 & latitude <= 42) %>%
filter(longitude >= -82 & longitude <= -81)
anumitor_inat_2017 <- inat_test %>%
filter(scientific_name == "Ancyloxypha numitor") %>%
filter(year == 2017) %>%
filter(latitude >= 41 & latitude <= 42) %>%
filter(longitude >= -82 & longitude <= -81)
mcymela_inat_2017 <- inat_test %>%
filter(scientific_name == "Megisto cymela") %>%
filter(year == 2017) %>%
filter(latitude >= 41 & latitude <= 42) %>%
filter(longitude >= -82 & longitude <= -81)
cpegala_inat_2017 <- inat_test %>%
filter(scientific_name == "Cercyonis pegala") %>%
filter(year == 2017) %>%
filter(latitude >= 41 & latitude <= 42) %>%
filter(longitude >= -82 & longitude <= -81)
set.seed(70)
d <- sample_n(scybele_oh, size = 100, weight = S.cybele, replace = TRUE)
mean(d$DAY)
sd(d$DAY)
mean(scybele_inat_2017$day)
sd(scybele_inat_2017$day)
### Megisto cymela
set.seed(70)
mc <- sample_n(mcymela_oh, size = 100, weight = M.cymela, replace = TRUE)
mean(mc$DAY)
sd(mc$DAY)
mean(mcymela_inat_2017$day)
sd(mcymela_inat_2017$day)
# Ancyloxypha numitor
set.seed(70)
an <- sample_n(anumitor_oh, size = 100, weight = A.numitor, replace = TRUE)
mean(an$DAY)
sd(an$DAY)
mean(anumitor_inat_2017$day)
sd(anumitor_inat_2017$day)
# Cercyonis pegala
set.seed(70)
cp <- sample_n(cpegala_oh, size = 100, weight = C.pegala, replace = TRUE)
mean(cp$DAY)
sd(cp$DAY)
mean(cpegala_inat_2017$day)
sd(cpegala_inat_2017$day)
##### Plot ohio data
sc_oh_plot <- ggplot() +
geom_line(scybele_oh, mapping = aes(x = DAY, y = S.cybele/nrow(scybele_oh)), color = "purple") +
geom_density(scybele_inat_2017, mapping = aes(day)) +
xlim(0,300) +
geom_vline(xintercept = 196.39, color = "purple") +
geom_vline(xintercept = 203.6, color = "black") +
labs(x = "DAY", y = "Relative Abundance", title = "Speyeria cybele") +
theme_classic()
mc_oh_plot <- ggplot() +
geom_line(mcymela_oh, mapping = aes(x = DAY, y = M.cymela/nrow(mcymela_oh)), color = "purple") +
geom_density(mcymela_inat_2017, mapping = aes(day)) +
xlim(0,300) +
geom_vline(xintercept = 155.58, color = "purple") +
geom_vline(xintercept = 158.59, color = "black") +
labs(x = "DAY", y = "Relative Abundance", title = "Megisto cymela") +
theme_classic()
an_oh_plot <- ggplot() +
geom_line(anumitor_oh, mapping = aes(x = DAY, y = A.numitor/nrow(anumitor_oh)), color = "purple") +
geom_density(anumitor_inat_2017, mapping = aes(day)) +
xlim(0,300) +
geom_vline(xintercept = 210.99, color = "purple") +
geom_vline(xintercept = 199.33, color = "black") +
labs(x = "DAY", y = "Relative Abundance", title = "Ancyloxypha numitor") +
theme_classic()
cp_oh_plot <- ggplot() +
geom_line(cpegala_oh, mapping = aes(x = DAY, y = C.pegala/nrow(cpegala_oh)), color = "purple") +
geom_density(cpegala_inat_2017, mapping = aes(day)) +
xlim(0,300) +
geom_vline(xintercept = 197.67, color = "purple") +
geom_vline(xintercept = 200.7, color = "black") +
labs(x = "DAY", y = "Relative Abundance", title = "Cercyonis pegala") +
theme_classic()
cowplot::plot_grid(sc_oh_plot, mc_oh_plot, an_oh_plot, cp_oh_plot,
nrow = 2)
ggsave(filename = "iNatskew/Ohiotests.png")
## Now with the IL data
il <- read.csv("iNatskew/pollard_flightCurves_IL.csv", stringsAsFactors = FALSE)
scybele_il <- il %>%
select(S.cybele, surveyN, DAY)
anumitor_il <- il %>%
select(A.numitor, surveyN, DAY)
cpegala_il <- il %>%
select(C.pegala, surveyN, DAY)
mcymela_il <- il %>%
select(M.cymela, surveyN, DAY)
scybele_inat_2018 <- inat_test %>%
filter(scientific_name == "Speyeria cybele") %>%
filter(year == 2018) %>%
filter(latitude >= 41.5 & latitude <= 42.5) %>%
filter(longitude >= -88.5 & longitude <= -87.5)
anumitor_inat_2018 <- inat_test %>%
filter(scientific_name == "Ancyloxypha numitor") %>%
filter(year == 2018) %>%
filter(latitude >= 41.5 & latitude <= 42.5) %>%
filter(longitude >= -88.5 & longitude <= -87.5)
mcymela_inat_2018 <- inat_test %>%
filter(scientific_name == "Megisto cymela") %>%
filter(year == 2018) %>%
filter(latitude >= 41.5 & latitude <= 42.5) %>%
filter(longitude >= -88.5 & longitude <= -87.5)
cpegala_inat_2018 <- inat_test %>%
filter(scientific_name == "Cercyonis pegala") %>%
filter(year == 2018) %>%
filter(latitude >= 41.5 & latitude <= 42.5) %>%
filter(longitude >= -88.5 & longitude <= -87.5)
set.seed(70)
d <- sample_n(scybele_il, size = 100, weight = S.cybele, replace = TRUE)
mean(d$DAY)
sd(d$DAY)
mean(scybele_inat_2018$day)
sd(scybele_inat_2018$day)
### Megisto cymela
set.seed(70)
mc <- sample_n(mcymela_il, size = 100, weight = M.cymela, replace = TRUE)
mean(mc$DAY)
sd(mc$DAY)
mean(mcymela_inat_2018$day)
sd(mcymela_inat_2018$day)
# Ancyloxypha numitor
set.seed(70)
an <- sample_n(anumitor, size = 100, weight = A.numitor, replace = TRUE)
mean(an$DAY)
sd(an$DAY)
mean(anumitor_inat_2018$day)
sd(anumitor_inat_2018$day)
# Cercyonis pegala
set.seed(70)
cp <- sample_n(cpegala, size = 100, weight = C.pegala, replace = TRUE)
mean(cp$DAY)
sd(cp$DAY)
mean(cpegala_inat_2018$day)
sd(cpegala_inat_2018$day)
##### Plot Illinois data
sc_il_plot <- ggplot() +
geom_line(scybele_il, mapping = aes(x = DAY, y = S.cybele/nrow(scybele_il)), color = "purple") +
geom_density(scybele_inat_2018, mapping = aes(day)) +
xlim(0,300) +
geom_vline(xintercept = 191.79, color = "purple") +
geom_vline(xintercept = 190.33, color = "black") +
labs(x = "DAY", y = "Relative Abundance", title = "Speyeria cybele") +
theme_classic()
mc_il_plot <- ggplot() +
geom_line(mcymela_il, mapping = aes(x = DAY, y = M.cymela/nrow(mcymela_il)), color = "purple") +
geom_density(mcymela_inat_2018, mapping = aes(day)) +
xlim(0,300) +
geom_vline(xintercept = 167.24, color = "purple") +
geom_vline(xintercept = 171.85, color = "black") +
labs(x = "DAY", y = "Relative Abundance", title = "Megisto cymela") +
theme_classic()
an_il_plot <- ggplot() +
geom_line(anumitor_il, mapping = aes(x = DAY, y = A.numitor/nrow(anumitor_il)), color = "purple") +
geom_density(anumitor_inat_2018, mapping = aes(day)) +
xlim(0,300) +
geom_vline(xintercept = 208.88, color = "purple") +
geom_vline(xintercept = 206.67, color = "black") +
labs(x = "DAY", y = "Relative Abundance", title = "Ancyloxypha numitor") +
theme_classic()
cp_il_plot <- ggplot() +
geom_line(cpegala_il, mapping = aes(x = DAY, y = C.pegala/nrow(cpegala_il)), color = "purple") +
geom_density(cpegala_inat_2018, mapping = aes(day)) +
xlim(0,300) +
geom_vline(xintercept = 202.04, color = "purple") +
geom_vline(xintercept = 208.33, color = "black") +
labs(x = "DAY", y = "Relative Abundance", title = "Cercyonis pegala") +
theme_classic()
cowplot::plot_grid(sc_il_plot, mc_il_plot, an_il_plot, cp_il_plot,
nrow = 2)
ggsave(filename = "iNatskew/Illinoistest.png")
|
e8e277630741db1b08c8b3884b8c2ecee20e4f70 | b072ec289aea8a1aa440cadf163860ad0e7ed448 | /man/QGlink.funcs.Rd | 958a378c578a968fab09f7eb77bf681974a3b271 | [] | no_license | hschielzeth/QGglmm | 2648b9e59b7e6d78940e3e3b22856b9d66a186e9 | 71f79ebf95be73859cc69357f22efeaacd62aee1 | refs/heads/master | 2020-04-06T04:18:29.023932 | 2015-04-09T16:46:21 | 2015-04-09T16:46:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,048 | rd | QGlink.funcs.Rd | \name{QGlink.funcs}
\alias{QGlink.funcs}
\title{List of functions according to a distribution and a link function}
\description{
Function yielding different functions (inverse-link, variance function, derivative of the inverse-link) according to a distribution and link function.
}
\usage{
QGlink.funcs(name, n.obs = NULL, theta = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{name}{Name of the distribution.link couple. See \code{\link{QGparams}} for a complete list of model available. (character)}
\item{n.obs}{Optional parameter required for "binomN" distributions (number of "trials"). See \code{\link{QGparams}}. (numeric)}
\item{theta}{Optional parameter required for "negbin" distributions (dispersion parameter). See \code{\link{QGparams}}. (numeric)}
}
\details{
This function takes the name of a distribution.link couple and yields several important functions such as the inverse-link function and its derivative, as well as the "distribution variance function".
The inverse-link function is the inverse function of the link function. For example, if the link function is the natural logarithm (typically for a Poisson distribution), then the inverse-link function is the exponential.
The distribution variance function is a function yielding the variance of the distribution for a given latent trait. For a Poisson distribution, the variance is equal to the mean, hence the variance function is equal to the inverse-link function. For a binomial distribution, the variance is N*p(l)*(1-p(l)), where p is the inverse-link function.
For some distributions, such as "binomN" and "negbin", some extra-parameters are required.
}
\value{
This function yields a list of function:
\itemize{
\item{\code{inv.link}}{Inverse function of the link function. (function)}
\item{\code{var.func}}{Distribution variance function. (function)}
\item{\code{inv.link}}{Derivative of the inverse-link function. (function)}
}
}
\references{
\strong{REF!!}
}
\author{
Pierre de Villemereuil & Micheal B. Morrissey
}
\seealso{
\code{\link{QGparams}}, \code{\link{QGpred}}, \code{\link{QGmean.obs}}
}
\examples{
##Getting the functions for a Poisson.log model
QGlink.funcs("Poisson.log")
#Note that because the variance is equal to the mean in a Poisson distribution
#and the derivative of exp is exp
#all functions are the same!
##Getting the functions for a binom1.probit model
QGlink.funcs("binom1.probit")
##The function QGparams automatically computes these functions
QGparams(mu=0,var.p=2,var.a=1,model="binom1.logit")
#Hence this is the same as using the custom.model argument with QGlink.funcs
QGparams(mu=0,var.p=2,var.a=1,custom.model=QGlink.funcs("binom1.logit"))
##We can create our own custom set of functions
#Let's create a custom model exactly identical to QGlink.funcs("binom1.logit")
custom=list(inv.link=function(x){plogis(x)},
var.func=function(x){plogis(x)*(1-plogis(x))},
d.inv.link=function(x){dlogis(x)})
QGparams(mu=0,var.p=2,var.a=1,custom.model=custom)
}
|
63d4fa457b0d71087e248eaa0a1115496ad52b33 | 6de2291ac367f454f5836887a2b359896a78c73f | /man/dodgr_flowmap.Rd | e8184f34abc44935882209beb53771f39de8e095 | [] | no_license | Robinlovelace/dodgr | 5245332da4c70fa6b03a9125e3cdad6a7003d4e2 | faf1923a0d8445f6ff541962cffd20f60f9a1b83 | refs/heads/master | 2023-07-06T00:46:43.415537 | 2017-12-15T18:29:25 | 2017-12-15T18:29:25 | 104,320,576 | 0 | 0 | null | 2017-09-21T08:15:52 | 2017-09-21T08:15:51 | null | UTF-8 | R | false | true | 735 | rd | dodgr_flowmap.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flowmap.R
\name{dodgr_flowmap}
\alias{dodgr_flowmap}
\title{dodgr_flowmap}
\usage{
dodgr_flowmap(net, filename, bbox = NULL, linescale = 5)
}
\arguments{
\item{net}{A street network with a \code{flow} column obtained from
\code{dodgr_flows}}
\item{filename}{Name of \code{.png} file to write to}
\item{bbox}{If given, scale the map to this bbox, otherwise use entire extend
of \code{net}}
\item{linescale}{Maximal thickness of plotted lines}
}
\description{
Map the output of \link{dodgr_flows}
}
\note{
\code{net} should be first passed through \code{merge_directed_flows}
prior to plotting, otherwise lines for different directions will be overlaid.
}
|
bfd61eef22ff8b18a25b499d38b12a3dc2aa31c4 | a462a24ff937e151e8151f3a1bdc9c3714b12c0e | /2021JIFS/8.data_outliers.R | a674e5abbc5d746484ea30366be5affc53d68709 | [] | no_license | noeliarico/kemeny | b4cbcac57203237769252de2c50ce959aa4ca50e | 50819f8bf0d19fb29a0b5c6d2ee031e8a811497d | refs/heads/main | 2023-03-29T14:36:37.931286 | 2023-03-16T09:04:12 | 2023-03-16T09:04:12 | 330,797,494 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,213 | r | 8.data_outliers.R | quantiles <- enframe(quantile(res_me_8$exec_time), name = "perc") %>% mutate(n = 8) %>%
bind_rows(enframe(quantile(res_me_9$exec_time), name = "perc") %>% mutate(n = 9)) %>%
bind_rows(enframe(quantile(res_me_10$exec_time), name = "perc") %>% mutate(n = 10)) %>%
mutate(perc = as.factor(perc),
n = as.factor(n))
quartiles8 <- res_me_8 %>% pull(exec_time) %>% quantile()
quartiles9 <- res_me_9 %>% pull(exec_time) %>% quantile()
quartiles10 <- res_me_10 %>% pull(exec_time) %>% quantile()
boxplot.stats(res_me_8$exec_time)$stats[5]
outliers_8 <- res_me_8 %>% mutate(outlier = exec_time > boxplot.stats(res_me_8$exec_time)$stats[5])
outliers_9 <- res_me_9 %>% mutate(outlier = exec_time > boxplot.stats(res_me_9$exec_time)$stats[5])
outliers_10 <- res_me_10 %>% mutate(outlier = exec_time > boxplot.stats(res_me_10$exec_time)$stats[5])
outliers <- bind_rows(outliers_8, outliers_9, outliers_10) %>%
mutate(n = as.factor(n),
m = as.factor(m),
outlier = factor(outlier, labels = c("no", "yes")),
method = NULL,
outlier = fct_relevel(outlier, "no", after = Inf)
)
outliers %>% filter(outlier == "yes") %>% group_by(n) %>% count() %>% mutate(perc = nn/2800)
|
c2badc6cad4a08cfae6f0166561a1e3d87a3bcb6 | fe17a58bd22e306aa3b5028dd1261c9b72d3037f | /man/func.tilemap.Rd | 08d463c1c8e6c9cfa77b42278dc50fc9b9cd47a9 | [
"MIT"
] | permissive | mrmtshmp/ExploratoryDataAnalysis | 2742d5aded4d8628d712734ec40d1ab1fe2f2ab5 | 0d5ee51a43ebe537e84e446ff189ec3c390b4bf7 | refs/heads/master | 2021-07-08T22:15:20.506869 | 2020-10-05T08:33:36 | 2020-10-05T08:33:36 | 200,771,127 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,842 | rd | func.tilemap.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tiling.R
\name{func.tilemap}
\alias{func.tilemap}
\title{Make heatmap-like tile plot. (wrapper of gplots::heatmap2)}
\usage{
func.tilemap(
fn.output.pdf = "Tile",
surfix.maintitle = "Group: ",
bin = FALSE,
n.breaks = 20,
col.bias = 2,
breaks = NULL,
row.adjust = 0,
count_table,
physi_table,
filt.Features = NULL,
unitHierarchy = c("Kingdom", "Phylum", "Class", "Order", "Family", "Genus",
"Species", "NoAgg"),
method.dist.Row = c("manhattan", "euclidean"),
method.hclust.Row = c("ward.D2"),
method.dist.Col = c("manhattan", "euclidean"),
method.hclust.Col = c("ward.D2")
)
}
\arguments{
\item{fn.output.pdf}{<character; output> File name of PDF output.}
\item{surfix.maintitle}{<character; output> Surfix of Main title in each page.}
\item{bin}{<numeric; processing>}
\item{n.breaks}{<numeric; processing; prefix =20>}
\item{col.bias}{<numeric; processing; prefix =2> Colour brewer setting}
\item{breaks}{<numeric>}
\item{row.adjust}{<numeric>}
\item{count_table}{<object; input data; mandatory> A data.frame with features as column and subjects as rows.}
\item{physi_table}{<object; input data> A data.frame with 2 column of a feature and subject ID. This ID must be identical with of count_table.}
\item{filt.Features}{<character strings; input data> Select column of count_table. If NULL, all features are selected.}
\item{unitHierarchy}{<for future update...> c('Kingdom', 'Phylum', 'Class', 'Order', 'Family', 'Genus', 'Species', "NoAgg"),}
\item{method.dist.Row}{= c('manhattan','euclidean'),}
\item{method.hclust.Row}{= c('ward.D2'),}
\item{method.dist.Col}{= c('manhattan','euclidean'),}
\item{method.hclust.Col}{= c('ward.D2')}
}
\description{
Make heatmap-like tile plot. (wrapper of gplots::heatmap2)
}
|
1a4462a9c4908606d710332918077f8d20a6959d | ad84018684ec74390bb26e8f812b6eaa06966ea5 | /plot2.R | 4afe56b4d624253c716898bd626ea9f084bdf6ea | [] | no_license | dfursevich/ExData_Plotting1 | b9dfc99b7ec5164dac9e71d991ab3302ba2d3eab | c8166c621ea7a884b3a65620756438feec52b6f4 | refs/heads/master | 2021-01-20T22:45:54.985899 | 2015-09-09T17:29:11 | 2015-09-09T17:29:11 | 42,185,736 | 0 | 0 | null | 2015-09-09T15:03:00 | 2015-09-09T15:03:00 | null | UTF-8 | R | false | false | 459 | r | plot2.R | data <- read.csv("d:/coursera/Exploratory Data Analysis/data/household_power_consumption.txt", sep = ";", na.strings="?")
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
data <- subset(data, Date >= as.Date("2007/02/01") & Date <= as.Date("2007/02/02"))
png(file = "plot2.png")
plot(data$Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab = "", xaxt = "n")
axis(1, at = c(0, 1440, 2880), labels = c("Thu", "Fri", "Sat"))
dev.off()
|
e160fc0f4e35d51b313331467318b7e2016effd6 | 5ad706fdaa7ec49564fe8f48f37aafd44ebed397 | /R/common.calculations.1.R | 43481f2adae911eaf339878c7d6a42c4c5296888 | [] | no_license | cran/pems.utils | 34d79305d0cbfdddc5f798e49381a264eee3d864 | 8763120b096dc7fe37482f47f0f6188931ab404a | refs/heads/master | 2021-07-07T11:48:43.028189 | 2021-04-25T05:40:02 | 2021-04-25T05:40:02 | 52,953,467 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,012 | r | common.calculations.1.R | ##########################
##########################
##common calculations
##########################
##########################
#kr
#description
##########################
#functions to do common calculates
#includes
##########################
#calcDistance
#calcSpeed
#calcAccel
#calcJerk
#inlcudes (removing)
##########################
#calcChecks
#calcPack
#currently using (exported)
##########################
#calcChecks
# uses checkOptions
#includes (using but not exported)
###########################
#pemsOutput
#to do
##########################
#
#comments
##########################
#template to think about
################################
#pemsOutput
################################
pemsOutput <- function(x, ..., track.name=TRUE){
args <- list(...)
ln <- quo_name(enquo(x))
if(is.null(args$output)) args$output <- "input"
if(args$output=="input") return(x)
#############
#next line not tracking fun.name at moment
if(args$output %in% c("pems", "data.frame") && is.null(args$data))
stop("no data to pack with")
if(!"pems" %in% class(args$data))
args$data <- pems(args$data)
#############
#change track name when packing x
#n.x <- quo_name(enquo(x))
#############
n.x <- if(track.name && "name" %in% names(attributes(x)))
attributes(x)$name else ln
args$data[n.x, force=c("na.pad.target", "na.pad.insert")] <- x
if(args$output=="data.frame") args$data <- pemsData(args$data)
args$data
}
##########################
##########################
##calc...
##########################
##########################
#to update fun.name
# input through out
# my.output through out
calcTemplate <- function(input=NULL, data = NULL,
..., fun.name = "calcTemplate"){
#setup
this.call <- match.call()
settings <- calcChecks(fun.name=fun.name, ..., data = data)
#input (one per input)
input <- getPEMSElement(!!enquo(input), data, units="units.i.want",
ref.name="what.i.call.it")
#my assumptions
#my operations
my.output <- input
#outputs
my.output <- pems.element(my.output, name="my.output", units="output.units")
pemsOutput(my.output, output = settings$output, data = data,
fun.name = fun.name, this.call = this.call)
}
############################
#calcDistance
############################
#kr v.02 2018/06/15
calcDistance <- function(speed = NULL, time = NULL, data = NULL,
..., fun.name = "calcDistance"){
#setup
this.call <- match.call()
settings <- calcChecks(fun.name, ..., data = data)
################
#think I can simplify setup
#maybe merge with pemsin
# so we don't rename data?
################
speed <- getPEMSElement(!!enquo(speed), data, units="m/s")
time <- getPEMSElement(!!enquo(time), data, units="s")
#my assumption
#first unit resolution is average of rest
#rest are time.now - time.last
temp <- diff(time)
temp <- c(mean(temp, na.rm=TRUE), temp)
#my calculation
distance <- speed * temp
#my structure
################
#look into this
# makePEMSElement versus pems.element
# also does it keep historical data types...
################
distance <- pems.element(distance, name="distance", units="m")
#make output
pemsOutput(distance, output = settings$output, data = data,
fun.name = fun.name, this.call = this.call)
}
#############################
##calcSpeed
#############################
#kr v.02 2018/06/17
calcSpeed <- function(distance = NULL, time = NULL, data = NULL,
..., fun.name = "calcSpeed"){
#setup
this.call <- match.call()
settings <- calcChecks(fun.name, ..., data = data)
#get inputs
distance <- getPEMSElement(!!enquo(distance), data, units="m")
time <- getPEMSElement(!!enquo(time), data, units="s")
#my assumption
#first unit resolution is average of rest
#rest are time.now - time.last
temp <- diff(time)
temp <- c(mean(temp, na.rm=TRUE), temp)
#my calculation
speed <- distance / temp
#my structure
speed <- pems.element(speed, name="speed", units="m/s")
pemsOutput(speed, output = settings$output, data = data,
fun.name = fun.name, this.call = this.call)
}
###############################
#calcAccel
###############################
#v0.2 kr 17/06/2018
calcAccel <- function(speed = NULL, time = NULL, data = NULL,
..., method = 2, fun.name = "calcAccel"){
#setup
this.call <- match.call()
settings <- calcChecks(fun.name, ..., data = data)
#get inputs
################
#think I can simplify setup
#maybe merge with pemsin
# so we don't rename data?
################
speed <- getPEMSElement(!!enquo(speed), data, units="m/s")
time <- getPEMSElement(!!enquo(time), data, units="s")
#my assumption
#first d.speed/d.time is 0
#rest are ...now - ....last
#current 4 methods for speed
#1 speed[t+1]-speed[t]/time[t+1]-time[t]
#2 speed[t]-speed[t-1]/time[t]-time[t-1]
#3 speed[t+1]-speed[t-1]/time[t+1]-time[t-1]
#4 average of 1 and 2
# missing start/ends NAs
ans <- diff(speed)/diff(time)
if(method==1) accel <- c(ans, NA)
if(method==2) accel <- c(NA, ans)
if(method==3) accel <- c(NA, diff(speed,2)/diff(time,2),NA)
#does method 4 need thinking about... If not 1Hz..?
if(method==4) accel <- (c(ans, NA) + c(NA, ans))/2
#my structure
accel <- pems.element(accel, name="accel", units="m/s/s")
#make output
pemsOutput(accel, output = settings$output, data = data,
fun.name = fun.name, this.call = this.call)
}
calcAcceleration <- calcAccel
############################
##calcJerk
############################
#kr v.02 2018/06/17
calcJerk <- function(accel = NULL, time = NULL, data = NULL,
...,fun.name = "calcJerk"){
#setup
this.call <- match.call()
settings <- calcChecks(fun.name, ..., data = data)
#get inputs
#accel
accel <- getPEMSElement(!!enquo(accel), data, units="m/s/s")
time <- getPEMSElement(!!enquo(time), data, units="s")
#my assumption
#first d.accel/d.time is 0
#rest are ...now - ....last
d.accel <- diff(accel)
d.time <- diff(time)
#my calculation
jerk <- c(NA, d.accel / d.time)
#my units
attr(jerk, "name") <- "jerk"
attr(jerk, "units") <- "m/s/s/s"
#my structure
jerk <- pems.element(jerk, name="jerk", units="m/s/s/s")
#make output
pemsOutput(jerk, output = settings$output, data = data,
fun.name = fun.name, this.call = this.call)
}
#############################
#############################
##calcChecks, calcPack
#############################
#############################
#kr 23/01/2012 v 0.0.6
#front end management
#changed data = data to data = null
calcChecks <- function(fun.name = "calcChecks", ..., data = NULL,
if.missing = c("stop", "warning", "return"),
output = c("input", "data.frame", "pems", "special"),
unit.conversions = NULL, overwrite = FALSE){
#output handling
output <- checkOption(output[1], formals(setUnits)$output,
"output", "allowed outputs",
fun.name = fun.name)
if(output == "special"){
output <- if(is.null(data))
"input" else if(comment(isPEMS(data)) == "other")
"input" else comment(isPEMS(data))
}
#if.missing handling
if.missing <- checkOption(if.missing[1], formals(setUnits)$if.missing,
"if.missing", "allowed if.missings",
fun.name = fun.name)
list(output = output, if.missing = if.missing, overwrite = overwrite,
unit.conversions = unit.conversions)
}
calcPack <- function(output = NULL, data = NULL, settings = NULL,
fun.name = "calcPack", this.call = NULL){
#make output
output <- checkOutput(input = output, data = data, if.missing = settings$if.missing,
fun.name = fun.name, output = settings$output,
overwrite = settings$overwrite)
#removing history
# if(isPEMS(output)){
# old.class <- class(output)
# class(output) <- "not.pems"
# output$history[[length(output$history)+1]] <- this.call
# class(output) <- old.class
# }
output
}
|
1a8993a78067be5bf48aa62a69f65c523b3c72aa | 9477c6d638c2918ca4b03eefe0cbe067b790ceaa | /R/HybridFinder.R | e29aaf40f1b0d53f7e9fbc794584aa71768fe303 | [] | no_license | cran/RHybridFinder | bbbfddab71db4890cd3d6c5ebf463731e6803542 | 3a89236619fc5f249a816ae80fd4ea35daf251df | refs/heads/master | 2023-07-07T14:29:51.392978 | 2021-08-17T15:30:24 | 2021-08-17T15:30:24 | 379,599,210 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,032 | r | HybridFinder.R | #' @title HybridFinder
#' @description This function takes in three mandatory inputs: (1) all denovo
#' candidates (2) database search results and (3) the corresponding proteome
#' fasta file. The function's role is to extract high confidence de novo peptides
#' and to search for their existence in the proteome, whether the entire peptide
#' sequence or its pair fragments (in one or two proteins).
#' @param denovo_candidates dataframe containing all denovo candidate peptides
#' @param db_search dataframe containing the database search peptides
#' @param proteome_db path to the proteome FASTA file
#' @param customALCcutoff the default is calculated based on the median ALC of the
#' assigned spectrum groups (spectrum groups that match in the database search
#' results and in the denovo sequencing results) where also the peptide sequence
#' matches, Default: NULL
#' @param with_parallel for faster results, this function also utilizes
#' parallel computing (please read more on parallel computing in order
#' to be sure that your computer does support this), Default: TRUE
#' @param customCores custom amount of cores strictly higher than 5, Default: 6
#' @param export_files a boolean parameter for exporting the dataframes into
#' files in the next parameter for the output directory, Default: FALSE,
#' Default: FALSE
#' @param export_dir the output directory for the results files if
#' export_files=TRUE, Default: NULL, Default: NULL
#' @return The output is a list of 3 dataframes containing:
#' \enumerate{
#' \item the HybridFinder output (dataframe) - the spectra that made
#' it to the end with their respective columns (ALC, m/z, RT, Fraction,
#' Scan) and a categorization column which denotes their potential splice
#' type (-cis, -trans) or whether they are linear (the entire sequence
#' was matched in proteins in the proteome database). Potential cis- &
#' trans-spliced peptide are peptides whose fragments were matched with
#' fragments within one protein, or two proteins, respectively.
#' \item character vector containing potentially hybrid peptides (cis-
#' and trans-)
#' \item list containing the reference proteome and the "fake" proteins
#' added at the end with a patterned naming convention (sp|denovo_HF_fake_protein)
#' made up of the concatenated potential hybrid peptides.}
#' @details This function is based on the published algorithm by Faridi et al.
#' (2018) for the identification and categorization of hybrid peptides. The function
#' described here adopts a slightly modified version of the algorithm for
#' computational efficiency. The function starts by extracting unassigned denovo
#' spectra where the Average Local Confidence (assigned by PEAKS software), is
#' equivalent to the ALC cutoff which is based on the median of the assigned spectra
#' (between denovo and database search). The sequences of all peptides are searched
#' against the reference proteome. If there is a hit then, then, the peptide sequence
#' within a spectrum group considered as being linear and each spectrum group is
#' is then filtered so as to keep the highest ALC-ranking spectra. Then, the rest
#' of the spectra (spectra that did not contain any sequence that had an entire
#' match in the proteome database) then undergo a "cutting" procedure where each
#' sequence yields n-2 sequences (with n being the length of the peptide. That is
#' if the peptide contains 9 amino acids i.e NTYASPRFK, then the sequence is cut
#' into a combination of 7 sequences of 2 fragment pairs each i.e fragment 1: NTY
#' and fragment 2: ASPRFK, etc).These are then searched in the proteome for hits
#' of both peptide fragments within a same protein, spectra in which sequences
#' have fragment pairs that match within a same protein, these are considerent
#' to be potentially cis-spliced. Potentially cis-spliced spectrum groups are then
#' filtered based on the highest ranking ALC. Spectrum groups not considered to be
#' potentially cis-spliced are further checked for potential trans-splicing. The
#' peptide sequences are cut again in the same fashion, however, this time peptide
#' fragment pairs are searched for matches in two proteins. Peptide sequences whose
#' fragment pairs match in 2 proteins are considerend to be potentially trans-spliced.
#' The same filtering for the highest ranking ALC within each peptide spectrum group.
#' The remaining spectra that were neither assigned as linear nor potentially spliced
#' (neither cis- nor trans-) are then discarded. The result is a list of spectra
#' along with their categorizations (Linear, potentially cis- and potentially trans-)
#' Potentially cis- and trans-spliced peptides are then concatenated and then broken into
#' several "fake" proteins and added to the bottom of the reference proteome. The
#' point of this last step is to create a merged proteome (consisting of the reference
#' proteome and the hybrid proteome) which would be used for a second database
#' search. After the second database search the checknetmhcpan function or the
#' step2_wo_netMHCpan function can be used in order to obtain the final list
#' of potentially spliced peptides.
#' Article: Faridi P, Li C, Ramarathinam SH, Vivian JP, Illing PT, Mifsud NA,
#' Ayala R, Song J, Gearing LJ, Hertzog PJ, Ternette N, Rossjohn J, Croft NP,
#' Purcell AW. A subset of HLA-I peptides are not genomically templated: Evidence
#' for cis- and trans-spliced peptide ligands. Sci Immunol. 2018 Oct 12;3(28):eaar3947.
#' <doi: 10.1126/sciimmunol.aar3947>. PMID: 30315122.
#' @examples
#' \dontrun{
#' hybridFinderResult_list <- HybridFinder(denovo_candidates, db_search,
#' proteome, export = TRUE, output_dir)
#' hybridFinderResult_list <- HybridFinder(denovo_candidates, db_search,
#' proteome)
#' hybridFinderResult_list <- HybridFinder(denovo_candidates, db_search,
#' proteome, export = FALSE)
#' }
#' @seealso
#' \code{\link[seqinr]{read.fasta}},\code{\link[seqinr]{s2c}}
#' @rdname HybridFinder
#' @export
#' @importFrom seqinr read.fasta s2c
HybridFinder<- function(denovo_candidates, db_search, proteome_db,
customALCcutoff=NULL, with_parallel= TRUE, customCores=6,
export_files = FALSE, export_dir = NULL){
#use path for concatenating at the end
proteome_path <- gsub(".*(/)","", proteome_db)
#extract high quality and unique denovo peptides
input_for_HF <- prepare_input_for_HF(denovo_candidates, db_search, customALCcutoff)
#create an extra id column
input_for_HF$extraid <- paste0(input_for_HF$denovo_id,"-+xx", input_for_HF$Peptide,
"--xx", input_for_HF$ALC...., "_x",
seq(1, nrow(input_for_HF)))
#import proteome database
proteome_db <- seqinr::read.fasta(proteome_db, as.string = TRUE, seqtype = "AA")
#switch all "I" in the proteome to "L" since denovo does not distinguish between them
proteome_db <- lapply(proteome_db, function(z) {gsub("I", "L", z)} )
#search for linear peptides
message('Step01: Search for linear peptides...')
linear_peptides <- search_for_linear_peptides(input_for_HF, proteome_db,
with_parallel, customCores)
not_linear_peptides <- linear_peptides[[1]]
final_df_linear_peptides <- linear_peptides[[2]]
#search for cis-spliced peptides
message('Step02: Search for cis-spliced peptides...')
cis_spliced_peptides <- search_for_cis_spliced_peptides(not_linear_peptides ,
proteome_db,
with_parallel,
customCores)
not_cis_peptides <- cis_spliced_peptides[[1]]
final_df_cis_peptides <- cis_spliced_peptides[[2]]
#search for trans-spliced peptides
message('Step03: Search for trans-spliced peptides...')
trans_spliced_peptides <- search_for_trans_spliced_peptides(not_cis_peptides,
proteome_db,
with_parallel,
customCores)
final_df_trans_peptides <- trans_spliced_peptides
#compile all peptides
final_df_all_peptides <- rbind(final_df_linear_peptides, final_df_cis_peptides,
final_df_trans_peptides )
final_df_all_peptides <- final_df_all_peptides[
which(!is.na(final_df_all_peptides$Peptide)),]
#final_df_all_peptides<- final_df_all_peptides[,-2]
metadata_denovo<- as.data.frame(matrix(unlist(strsplit(final_df_all_peptides$denovo_id, "-")),
nrow=nrow(final_df_all_peptides),
ncol=4, byrow=TRUE), stringsAsFactors = F)
metadata_denovo[,3] <- as.numeric(metadata_denovo[,3])
metadata_denovo[,4] <- as.numeric(metadata_denovo[,4])
colnames(metadata_denovo) <- c("Fraction", "Scan", "m/z", "RT")
#remove unnecessary columns
cols_to_delete <- grep("Fragment|spliceType|denovo_id", colnames(final_df_all_peptides))
final_df_all_peptides <- final_df_all_peptides[, -cols_to_delete]
final_df_all_peptides<- cbind(metadata_denovo, final_df_all_peptides)
#keep only 9-12 amino acids
final_df_all_peptides <- final_df_all_peptides[nchar(final_df_all_peptides$Peptide) > 8,]
final_df_all_peptides <- final_df_all_peptides[nchar(final_df_all_peptides$Peptide) < 13,]
# only spliced and filter for 9-12 -mers
only_spliced <- final_df_all_peptides[final_df_all_peptides$Type!= "Linear",
c(grep("^Peptide$",
colnames(final_df_all_peptides)),
grep("^Length$",
colnames(final_df_all_peptides)))]
only_spliced <- only_spliced[!duplicated(only_spliced$Peptide),]
#make fake proteins
hybrid_proteome <- make_fake_HF_proteins(only_spliced)
#concatenate proteome with hybrid proteins
hybrid_concat <- list(proteome_db, hybrid_proteome)
hybrid_concat <- do.call(c, hybrid_concat)
#get only the peptide column
only_spliced <- only_spliced$Peptide
#remove the denovo_idpep column
final_df_all_peptides$proteome_database_used <- proteome_path
colnames(final_df_all_peptides)[grep("Type", colnames(final_df_all_peptides))] <- "Potential_spliceType"
# return list of all peptides with categorization, the spliced peptides, the
# cis-spliced peptides with details and the hybrid proteome
results_list<- list(final_df_all_peptides, only_spliced, hybrid_concat)
#export the results list into three files
if (export_files == TRUE && !is.null(export_dir)) {
if (dir.exists(export_dir)){
export_HybridFinder_results(results_list, export_dir)
}
}
#create the final output
return (results_list)
}
|
a3fdd4780935f5c7ef921ca866b842f1f5a37587 | 5e3750b203161b7d0218ebe6ef1f6fdbe2fc490a | /Expression/GTEx/01_Filters_And_Pipeline/01_FiltrerIndividus.r | a0233168f0fb85586b20e12110f7e0db95c94912 | [] | no_license | HussinLab/adcy9_cetp_Gamache_2021 | a58e9ecf50be883db88e2665b1396b6a97054455 | 7c5e00be1ae9e986010dc532491de153d64158d6 | refs/heads/main | 2023-08-07T08:08:29.905295 | 2021-10-28T21:53:46 | 2021-10-28T21:53:46 | 368,565,603 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,862 | r | 01_FiltrerIndividus.r | library('reshape2')
# Info extracted from information file from GTEx, two columns : SAMPID et SMMPPD/SMTSD
# Coverage by ID
info=read.table('../../../Phenotype/Phenotype_GTEx/Data/SpecificPhenotype/SMMPPD.txt', fill=TRUE, header=TRUE)
# Association ID-tissue
tissu=read.table('../../../Phenotype/Phenotype_GTEx/Data/SpecificPhenotype/SMTSD.txt', fill=TRUE, header=TRUE)
all=merge(info,tissu,by='SAMPID')
a=colsplit(string=tissu[,1],pattern='-',names=c('A','B','C','D','E'))
all$ID=a$B
# Filter on coverage and associated to a tissue
higher10millions=all$SMMPPD>10000000 & !is.na(all$SMMPPD)
pass=all[higher10millions,]
allTissu=unique(pass$SMTSD)
write.table(allTissu,'AllTissues.txt',row.names=FALSE,col.names=FALSE)
keep=c()
# For individual who are duplicate, keep the one with the highest coverage
for (i in allTissu) {
print(i)
thisTissu=pass$SMTSD==i
oneID=unique(pass[thisTissu,'ID'])
subKeep=c()
for (j in oneID) {
thisID=pass$ID==j
duplicatedInThisTissu=thisTissu & thisID
if(sum(duplicatedInThisTissu)>1){
m=(which.max(pass[duplicatedInThisTissu,2]))
p=pass[duplicatedInThisTissu,]
m=p[m,1]
keep=c(keep,as.character(m))
subKeep=c(subKeep,as.character(m))
}else{
keep=c(keep,as.character(pass[duplicatedInThisTissu,1]))
subKeep=c(subKeep,as.character(pass[duplicatedInThisTissu,1]))
}
}
passDup=pass$SAMPID %in% subKeep
pass2=pass[passDup,]
idOnly=as.data.frame(as.character(pass2[,1]))
write.table(idOnly,paste(c('../Data/Covariables/ID.PassFilter10Millions.MaxDuplicated.',i,'.txt'),collapse=""),quote=FALSE,row.names=FALSE,col.names=FALSE)
}
passDup=pass$SAMPID %in% keep
pass2=pass[passDup,]
idOnly=as.data.frame(as.character(pass2[,1]))
# Write ID who pass
write.table(idOnly,'../Data/Covariables/ID.PassFilter10Millions.MaxDuplicated.txt',quote=FALSE,row.names=FALSE,col.names=FALSE)
|
2541854629126fca7aee17a67ef8dd13b48a254d | d8b3a5ac9330f7f83619f596186fe9d60577d35d | /LIB_RHESSys_modelBehavior7.R | 9221681b7ef2f041770a4e45e74a33732f4f4548 | [
"MIT"
] | permissive | laurencelin/R-coded-scripts-for-RHESSys-calibration | acbd31840adbe7cc06d8f806d4851133d3b3f091 | add43866727b54e502aa18b226624dac5dd6ea3f | refs/heads/master | 2021-07-14T18:31:02.834975 | 2021-02-25T17:23:00 | 2021-02-25T17:23:00 | 97,491,430 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 7,491 | r | LIB_RHESSys_modelBehavior7.R |
arg=commandArgs(T)
modelBehavior = function( rhessys_, dailytimeSeries_ ){
mostcol = dim(rhessys_)[2]
# ... assume rhessys_ is a matrix and already "matched"
# ... rhessys_ balance
brhessys_DayFlow =rhessys_[,19]#flow
brhessys_DayRain =rhessys_[,35]#rain
brhessys_DayET =rhessys_[,14]+rhessys_[,16]#et
brhessys_DayInf = rhessys_[,43] #ifelse(43<=mostcol,rhessys_[,43],rep(0,nrow(rhessys_)))# recharge (infiltration) <------ ifelse
brhessys_DayUnsatDrain =rhessys_[,12]# unsat drain
brhessys_DayCap =rhessys_[,13]# cap
brhessys_DayRZ =rhessys_[,9]#rz
brhessys_DayUnsat =rhessys_[,10]#unsat
brhessys_DaySatdef =rhessys_[,8]#satdef
brhessys_DaySatz =rhessys_[,7]#satz
brhessys_DayCanopy =rhessys_[,27]# canopy store
brhessys_DayLitter =rhessys_[,26]# litter store
brhessys_DayGWS =rhessys_[,23]# groundwater store
brhessys_DayDets =rhessys_[,24]# detention store
brhessys_DayPSN =rhessys_[,20]# psn
brhessys_DaySatArea =rhessys_[,25]# sat area
brhessys_DayReturn =rhessys_[,18]# return
brhessys_DayBaseflow =rhessys_[,17]# baseflow (sub-surface)
brhessys_DayGWq =rhessys_[,22]# gwq
### ----------------------------------------- model behavorial assessment
# ... annual flux + balance
brhessys_YearFlow = tapply(brhessys_DayFlow, dailytimeSeries_$year, sum)
brhessys_YearRain = tapply(brhessys_DayRain, dailytimeSeries_$year, sum)
brhessys_YearET = tapply(brhessys_DayET, dailytimeSeries_$year, sum)
wyFirstDate = tapply(1:dim(dailytimeSeries_)[1], dailytimeSeries_$year, function(x){
cond = dailytimeSeries_$month[x]==10 & dailytimeSeries_$day[x]==1;
return <- ifelse(sum(cond)>0, x[cond], x[1])
})
wyLastDate = tapply(1:dim(dailytimeSeries_)[1], dailytimeSeries_$year, function(x){
cond = dailytimeSeries_$month[x]==9 & dailytimeSeries_$day[x]==30;
return <- ifelse(sum(cond)>0, x[cond], x[length(x)])
})
brhessys_YearRZ = brhessys_DayRZ[wyLastDate] - brhessys_DayRZ[wyFirstDate]
brhessys_YearUnsat = brhessys_DayUnsat[wyLastDate] - brhessys_DayUnsat[wyFirstDate]
brhessys_YearSatdef = brhessys_DaySatdef[wyLastDate] - brhessys_DaySatdef[wyFirstDate]
brhessys_YearCanopy = brhessys_DayCanopy[wyLastDate] - brhessys_DayCanopy[wyFirstDate]
brhessys_YearLitter = brhessys_DayLitter[wyLastDate] - brhessys_DayLitter[wyFirstDate]
brhessys_YearGWS = brhessys_DayGWS[wyLastDate] - brhessys_DayGWS[wyFirstDate]
brhessys_YearDets = brhessys_DayDets[wyLastDate] - brhessys_DayDets[wyFirstDate]
brhessys_YearBalance = brhessys_YearRain-brhessys_YearET-brhessys_YearFlow-brhessys_YearRZ-brhessys_YearUnsat+brhessys_YearSatdef-brhessys_YearCanopy-brhessys_YearLitter-brhessys_YearGWS-brhessys_YearDets
brhessys_YearInf = tapply(brhessys_DayInf, dailytimeSeries_$year, sum)
brhessys_YearUnsatDrain = tapply(brhessys_DayUnsatDrain, dailytimeSeries_$year, sum)
brhessys_YearCap = tapply(brhessys_DayCap, dailytimeSeries_$year, sum)
brhessys_YearGWq = tapply(brhessys_DayGWq, dailytimeSeries_$year, sum)
brhessys_YearPSN = tapply(brhessys_DayPSN, dailytimeSeries_$year, sum)
brhessys_YearSatz = tapply(brhessys_DaySatz, dailytimeSeries_$year, mean)
# ... weekly flux
brhessys_WeekPrecip = tapply(brhessys_DayRain, dailytimeSeries_$yy_woy, sum)
brhessys_WeekStreamflow = tapply(brhessys_DayFlow, dailytimeSeries_$yy_woy, sum)
brhessys_WeekReturn = tapply(brhessys_DayReturn, dailytimeSeries_$yy_woy, sum)
brhessys_WeekGWq = tapply(brhessys_DayGWq, dailytimeSeries_$yy_woy, sum)
brhessys_WeekBaseflow = brhessys_WeekStreamflow-brhessys_WeekReturn-brhessys_WeekGWq #subsurface
brhessys_WeekSatArea = tapply(brhessys_DaySatArea, dailytimeSeries_$yy_woy, mean)
brhessys_WeekSatz = tapply(brhessys_DaySatz, dailytimeSeries_$yy_woy, mean)
brhessys_WeekUnsat = tapply(brhessys_DayUnsat, dailytimeSeries_$yy_woy, mean)
hhresult = simplify2array(tapply(1:dim(dailytimeSeries_)[1], dailytimeSeries_$year, function(x){
brhessys_WeekPrecip = tapply(brhessys_DayRain[x], dailytimeSeries_$yy_woy[x], sum)
brhessys_WeekStreamflow = tapply(brhessys_DayFlow[x], dailytimeSeries_$yy_woy[x], sum)
brhessys_WeekReturn = tapply(brhessys_DayReturn[x], dailytimeSeries_$yy_woy[x], sum)
brhessys_WeekGWq = tapply(brhessys_DayGWq[x], dailytimeSeries_$yy_woy[x], sum)
brhessys_WeekBaseflow = brhessys_WeekStreamflow-brhessys_WeekReturn-brhessys_WeekGWq #subsurface
brhessys_WeekSatArea = tapply(brhessys_DaySatArea[x], dailytimeSeries_$yy_woy[x], mean)
brhessys_WeekSatz = tapply(brhessys_DaySatz[x], dailytimeSeries_$yy_woy[x], mean)
brhessys_WeekUnsat = tapply(brhessys_DayUnsat[x], dailytimeSeries_$yy_woy[x], mean)
return <- c(
cor(brhessys_WeekReturn/brhessys_WeekStreamflow, brhessys_WeekPrecip),
cor(brhessys_WeekBaseflow/brhessys_WeekStreamflow, brhessys_WeekPrecip),
cor(brhessys_WeekGWq/brhessys_WeekStreamflow, brhessys_WeekPrecip),
cor(brhessys_WeekUnsat, brhessys_WeekPrecip),
cor(brhessys_WeekSatz, brhessys_WeekPrecip),
cor(brhessys_WeekSatArea, brhessys_WeekPrecip),
min(brhessys_DaySatArea),
median(brhessys_DaySatArea),
max(brhessys_DaySatArea),
sd(brhessys_DaySatArea)/mean(brhessys_DaySatArea)
)
})) ## years by columns; variable by rows
annualTable=cbind(
tapply(dailytimeSeries_$year,INDEX=dailytimeSeries_$year,mean),
brhessys_YearRain,
brhessys_YearET,
brhessys_YearFlow,
brhessys_YearCanopy,
brhessys_YearLitter,
brhessys_YearDets,
brhessys_YearRZ,
brhessys_YearUnsat,
brhessys_YearSatdef,
brhessys_YearGWS,
(brhessys_YearET+ brhessys_YearFlow)/brhessys_YearRain, # value 1 is good
##----------------------
brhessys_YearInf,
brhessys_YearUnsatDrain,
brhessys_YearCap,
brhessys_YearGWq,
brhessys_YearPSN,
brhessys_YearSatz,
##----------------------
t(hhresult)
##----------------------
);
colnames(annualTable)=c(
"year","rain","rhessys_ET","rhessys_Flow","rhessys_Canopy","rhessys_Litter","rhessys_Detention","rhessys_RZ","rhessys_Unsat","rhessys_Satdef","rhessys_GW","rhessys_Balance",
"annualInf","annualUnsatDrain","annualCap","annualGWq","annualPSN","annualSatz",
"returnflow_precip","subflow_precip","gwq_precip","unsat_precip","satz_precip","satArea_precip","minSatArea","medianSatArea","maxSatArea","SDSatArea"
)
annualList=c(
mean((brhessys_YearET+ brhessys_YearFlow)/brhessys_YearRain),
mean(brhessys_YearInf),
mean(brhessys_YearUnsatDrain),
mean(brhessys_YearCap),
mean(brhessys_YearSatz),
mean(brhessys_YearPSN),
mean(brhessys_YearFlow/brhessys_YearRain),
mean(brhessys_YearET/brhessys_YearRain),
cor(brhessys_YearET,brhessys_YearRain),
cor(brhessys_YearFlow,brhessys_YearRain),
mean(brhessys_YearGWq/brhessys_YearFlow),
cor(brhessys_WeekReturn/brhessys_WeekStreamflow, brhessys_WeekPrecip),
cor(brhessys_WeekBaseflow/brhessys_WeekStreamflow, brhessys_WeekPrecip),
cor(brhessys_WeekGWq/brhessys_WeekStreamflow, brhessys_WeekPrecip),
cor(brhessys_WeekUnsat, brhessys_WeekPrecip),
cor(brhessys_WeekSatz, brhessys_WeekPrecip),
cor(brhessys_WeekSatArea, brhessys_WeekPrecip),
mean(brhessys_DaySatArea),
sd(brhessys_DaySatArea)/mean(brhessys_DaySatArea)
)
names(annualList)=c('ETFpcp','inf','unsatDrain','cap','satz','PSN','runoffRatio','ETratio','ETpcpCor','FlowpcpCor','GWQratio','returnPcp','subflowPcp','GWpcp','UnsatPcp','satzPcp','satAreaPcp','satArea','satAreaSD')
return<-list(
AnnualTable=annualTable,
AnnualList=annualList
)
# add summary
}#end of function
|
bcf88e2adad47c13ee3923061537340417b0be3e | 735a990bd9dd38815d5b5c0337518c5ae6997036 | /cachematrix.R | 7a5ccb4b278e6e7646b1250f1603f393b9bb5cf3 | [] | no_license | Yussifm1/ProgrammingAssignment2 | c69bf4a5c8c04add3b3976ed47f16157f52ffff7 | 926236bdecbb21affa29481223e40d458dd31afc | refs/heads/master | 2021-07-01T09:49:58.630256 | 2020-12-25T18:20:17 | 2020-12-25T18:20:17 | 207,393,041 | 0 | 0 | null | 2019-09-09T19:55:57 | 2019-09-09T19:55:56 | null | UTF-8 | R | false | false | 984 | r | cachematrix.R | ## The function creates a special "matrix" object that cache its inverse.
## In the (1) function,we create a special"matrix" object which cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
in <- NULL
set <- function(y){ x <<-y
in <- NULL
}
get <- function()x
setInverse<- function(inverse)
in<<- inverse
getInverse <- function()in
list(set = set, get = get,
setInverse = setInverse, getInverse = getInverse)
}
## (2)function computes the inverse of the special "matrix" returned by makeCacheMatrix above
cacheSolve <- function(x, ...) {
in <- x$getInverse()
if (!is .null(in)){
message ("cached data")
return(in)
}
data <-x$get()
in <- solve(data)
x$setInverse(in)
in
}
|
ae6591e108d8ed38b2b83379694964d75f0a8019 | 3c37e20c65d3918fede609648fccc7e997527c47 | /man/crcrec.Rd | f409cfb2e18db141927056d9a80cdf3ca94edbfc | [] | no_license | cran/Rwave | 67cb1e7b8e234c1f49b1ac4cbadef024a04afc21 | 72a5fc18fdb0c4ae4cf6aa9985617268585ffae5 | refs/heads/master | 2022-11-04T20:48:09.750339 | 2022-10-21T22:17:49 | 2022-10-21T22:17:49 | 17,713,902 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,776 | rd | crcrec.Rd | \name{crcrec}
\alias{crcrec}
\title{
Crazy Climbers Reconstruction by Penalization
}
\description{
Reconstructs a real valued signal from the output of \code{\link{crc}}
(wavelet case) by minimizing an appropriate quadratic form.
}
\usage{
crcrec(siginput, inputwt, beemap, noct, nvoice, compr, minnbnodes=2,
w0=2 * pi, bstep=5, ptile=0.01, epsilon=0, fast=FALSE, para=5, real=FALSE,
plot=2)
}
\arguments{
\item{siginput}{
original signal.
}
\item{inputwt}{
wavelet transform.
}
\item{beemap}{
occupation measure, output of \code{\link{crc}}.
}
\item{noct}{
number of octaves.
}
\item{nvoice}{
number of voices per octave.
}
\item{compr}{
compression rate for sampling the ridges.
}
\item{minnbnodes}{
minimal number of points per ridge.
}
\item{w0}{
center frequency of the wavelet.
}
\item{bstep}{
size (in the time direction) of the steps for chaining.
}
\item{ptile}{
relative threshold of occupation measure.
}
\item{epsilon}{
constant in front of the smoothness term in penalty function.
}
\item{fast}{
if set to TRUE, uses trapezoidal rule to evaluate $Q_2$.
}
\item{para}{
scale parameter for extrapolating the ridges.
}
\item{real}{
if set to TRUE, uses only real constraints.
}
\item{plot}{
1: displays signal,components,and
reconstruction one after another. 2: displays
signal, components and reconstruction.
}}
\value{
Returns a structure containing the following elements:
\item{rec}{
reconstructed signal.
}
\item{ordered}{
image of the ridges (with different colors).
}
\item{comp}{
2D array containing the signals reconstructed from ridges.
}
}
\details{
When ptile is high, boundary effects may appeare.
para controls extrapolation of the ridge.
}
%\references{}
\seealso{
\code{\link{crc}}, \code{\link{cfamily}}, \code{\link{scrcrec}}.
}
\keyword{ts}
|
0ac74a8e4f8a7df46c6c4c9439f269f56e23c8d1 | aa04e8849d6b68749448784433d8bb59817e0a4d | /R/accesskey.R | 26d4b281b005bf24da72f8801cb5ed9ba40f2ca4 | [] | no_license | amhamm/aws.iam | c9471ecbb18829de3ab0a18316be9f1a76b896ab | b041db09620c3da9f301bfbc249e8d76d19989ce | refs/heads/master | 2022-07-01T04:36:23.494109 | 2020-05-11T04:54:42 | 2020-05-11T04:54:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,731 | r | accesskey.R | #' @rdname keys
#' @title Manage Access Keys/Credentials
#' @description Retrieve, create, update, and delete IAM access keys
#' @param key A character string specifying an access key or an object of class \dQuote{iam_key}.
#' @param user Optionally, a character string specifying a user name or an object of class \dQuote{iam_user}. This will be retrieved by default from the \dQuote{UserName} list entry in \code{key}, if available; otherwise the user is assumed to be the user whose credentials are being used to execute the request.
#' @param status A character string specifying either \dQuote{Active} or \dQuote{Inactive} to status the key status to.
#' @template n
#' @template marker
#' @template dots
#' @return \code{create_user} and \code{get_user} return objects of class \dQuote{iam_user}. \code{update_user} and \code{delete_user} return a logical \code{TRUE} (if successful) or an error. \code{list_users} returns a list of IAM user objects.
#' @examples
#' \dontrun{
#' # list access keys
#' list_keys()
#'
#' # create a user key
#' u <- create_user("example-user")
#' str(k <- create_key(u))
#'
#' # toggle key status to inactive
#' update_key(k, u, "Inactive")
#' list_keys(u)
#'
#' # cleanup
#' delete_key(k)
#' delete_user(u)
#' }
#' @seealso \code{\link{create_user}}
#' @export
create_key <- function(user, ...) {
query <- list(Action = "CreateAccessKey")
user <- get_username(user)
query$UserName <- user
out <- iamHTTP(query = query, ...)
out <- out[["CreateAccessKeyResponse"]][["CreateAccessKeyResult"]][["AccessKey"]]
out[["UserName"]] <- user
class(out) <- "iam_key"
out
}
#' @rdname keys
#' @export
update_key <- function(key, user, status, ...) {
query <- list(Action = "UpdateAccessKey", AccessKeyId = get_keyid(key))
vstatus <- c("Active", "Inactive")
if (!status %in% vstatus) {
stop("'status' must be one of: ", paste0(vstatus, collapse = ", "))
}
query$Status <- status
if (!missing(user)) {
query[["UserName"]] <- get_username(user)
} else if (inherits(key, "iam_key") && !is.null(key[["UserName"]])) {
query[["UserName"]] <- key[["UserName"]]
}
out <- iamHTTP(query = query, ...)
if (!inherits(out, "aws_error")) {
out <- TRUE
}
out
}
#' @rdname keys
#' @export
delete_key <- function(key, user, ...) {
query <- list(Action = "DeleteAccessKey", AccessKeyId = get_keyid(key))
if (!missing(user)) {
query[["UserName"]] <- get_username(user)
} else if (inherits(key, "iam_key") && !is.null(key[["UserName"]])) {
query[["UserName"]] <- key[["UserName"]]
}
out <- iamHTTP(query = query, ...)
if (!inherits(out, "aws_error")) {
out <- TRUE
}
out
}
#' @rdname keys
#' @export
list_keys <- function(user, n, marker, ...) {
query <- list(Action = "ListAccessKeys")
if (!missing(user)) {
user <- get_username(user)
query[["UserName"]] <- user
} else {
user <- NULL
}
if (!missing(marker)) {
query[["Marker"]] <- marker
}
if (!missing(n)) {
if (!n %in% 1:1e3) {
stop("'n' must be in 1:1000")
}
query[["MaxItems"]] <- n
}
out <- iamHTTP(query = query, ...)
if (!inherits(out, "aws_error")) {
check_truncation(out[["ListAccessKeysResponse"]][["ListAccessKeysResult"]][["IsTruncated"]])
out <- lapply(out[["ListAccessKeysResponse"]][["ListAccessKeysResult"]][["AccessKeyMetadata"]], function(x) {
x[["UserName"]] <- user
class(x) <- "iam_key"
x
})
attr(out, "marker") <- out[["ListAccessKeysResponse"]][["ListAccessKeysResult"]][["Marker"]]
}
out
}
|
62ab4ed6644b3eb25957fdac9251ab181fc9826c | a0cb7f726846348a79f2912c148dd148c08b0db6 | /R/Batch_Score.R | bec2716c78770ffc940ea9e476eaa87f8f5110d9 | [] | no_license | paparker/SHAPE | 9de0b036b4e4f659404bd1dc0039da5d8587edf3 | 1936cdbd26ed6deb9bd104aad81c4ee6f157a9c2 | refs/heads/main | 2023-03-17T15:08:48.677421 | 2021-03-11T16:48:25 | 2021-03-11T16:48:25 | 346,103,974 | 6 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,788 | r | Batch_Score.R | library(readr)
library(ggplot2)
library(dplyr)
### This script will batch score soil SOC data using the SHAPE procedure
## The input data should be changed to generate your own scores
dataIn <- read_csv('Data/dataIn.csv') ## Read in your own data with same format
## Columns should be:
# MATC: (numeric) mean annual temp. in degrees Celsius
# MAPmm: (numeric) mean annual precipitation in milimeters
# Texture: (character/text) Texture category...one of (T1, T2, T3, T4, T5)
# Suborder: (character/text) Suborder category...one of (S2, S3, S4, S5)
# Note that S1 is not part of the initial SHAPE modelling approach...to be done separately
# SOC: (numeric) soil organic carbon as a percentage (e.g. 3% -> 3)
##########################################
##### Leave the remaining code as is #####
##########################################
## Read in model output
mod <- read_rds('Data/logitMod.rds')
## Lookups
if(mod$Transform=="logit"){ soc <- qlogis(dataIn$SOC/100)} else soc <- dataIn$SOC
grp <- dataIn %>% left_join(mod$Groups, by=c("Texture", "Suborder")) %>% select(ID) %>% unlist()
xint <- mod$GroupXmat[grp,]
## Make Predictions
## Note that this creates a posterior distribution of predicitons...one for each iteration of the model fit
xt <- cbind(xint, dataIn$MATC, dataIn$MAPmm)
mu <- mod$Beta%*%t(xt)
sig <- sqrt(mod$Variance[,grp])
## Contruct output file
tmp <- pnorm(soc, mean=t(mu), sd=t(sig)) ## This is the CDF evaluated for input data
outTab <- data.frame("Score_2.5%"=apply(tmp, 1, quantile, probs=0.025),
"Score_Mean"=apply(tmp, 1, mean),
"Score_97.5%"=apply(tmp, 1, quantile, probs=0.975), check.names = F)
dataOut <- cbind(dataIn, outTab)
write_csv(dataOut, 'Data/Scored_Data.csv')
|
218df6ed1a4a8d56b9f3bd20b445f8f4404ddef0 | 7a062dd29d3392ef15043d35ad09a67c205904f5 | /man/dMVhyper.Rd | 24ef43e5c91c97f3d940d67296a43a971959deac | [] | no_license | benaug/Mbmisscap | 68b0bb83b52f8ef64b45a63a65f8eab8cf4e03a6 | 3340ff0752d71c9d3b64470f57692cc8c55fcae9 | refs/heads/master | 2021-01-10T14:08:44.503350 | 2018-10-08T16:53:55 | 2018-10-08T16:53:55 | 51,013,662 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 320 | rd | dMVhyper.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MVHyper.R
\name{dMVhyper}
\alias{dMVhyper}
\title{PDF for the multivariate hypergeometric distribution}
\usage{
dMVhyper(k, K)
}
\arguments{
\item{k}{adf}
\item{K}{adf}
}
\description{
PDF for the multivariate hypergeometric distribution
}
|
b3bcd2c8a4ded4702a19b479c0c0e008e5179dc6 | 64ba2b77091c05be531ba828ec309686f9988f3b | /R/Competing risks analysis/all_data_analysis_RC.R | 65806aea247f3348082f67bf8ab6439db0feefdd | [
"MIT"
] | permissive | robmarkcole/Useful-python | 0f718ea5f200a646b807047c5ca83081b4705f6d | 3be6169e35dc66d3e8af07d440a6895ed2455c1c | refs/heads/master | 2023-07-20T08:30:52.332756 | 2023-07-16T07:52:07 | 2023-07-16T07:52:07 | 107,223,253 | 75 | 35 | MIT | 2023-07-16T07:52:08 | 2017-10-17T05:45:32 | Jupyter Notebook | UTF-8 | R | false | false | 1,182 | r | all_data_analysis_RC.R | setwd("~/Documents/Github/Useful-python-for-medical-physics/R/Competing risks analysis")
# All 211 patients
#all_data_RC <- read.csv("~/Documents/Github/Useful-python-for-medical-physics/R/Competing risks analysis/all_data_RC.csv")
# Drop 8 PB patients
#all_data_RC <- read.csv("~/Documents/Github/Useful-python-for-medical-physics/R/Competing risks analysis/all_data_RC_drop_8PB.csv")
# Drop 32 PB patients
#all_data_RC <- read.csv("~/Documents/Github/Useful-python-for-medical-physics/R/Competing risks analysis/all_data_RC_drop_32PB.csv")
# include 3 equal competing events (cat 2)
# all_data_RC <- read.csv("~/Documents/Github/Useful-python-for-medical-physics/R/Competing risks analysis/all_data_RC_with_competing.csv")
# include significantly more competing events to PB
all_data_RC <- read.csv("~/Documents/Github/Useful-python-for-medical-physics/R/Competing risks analysis/all_data_RC_with_competing2.csv")
attach(all_data_RC)
dis=factor(dis, levels= c(0,1), labels =c("CCC", "PB"))
table(dis, status) # see summary of data
tapply(ftime, list(dis, status), mean)
source ("CumIncidence.R") # import
fit=CumIncidence (ftime, status, dis, cencode = 0, xlab="Months") |
92386910134dc05c3e0d1914af1bf0e1e0fa83e1 | 1ed12913fb9b98db702389871ea761c46fdee116 | /man/z[.td.data.frame.Rd | 9915ec9927f6a053ec2542c5bee3f9ccabd7293d | [] | no_license | ghuiber/teradataR | 9053adf62e0151b320da4f9ca840d056adcdcad2 | d097a9484f8cf53803f1ba26181970042bd146bb | refs/heads/master | 2021-01-22T01:33:57.288324 | 2014-09-12T20:43:55 | 2014-09-12T20:43:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 929 | rd | z[.td.data.frame.Rd | \name{[.td.data.frame}
\alias{[.td.data.frame}
\title{
Extract Teradata Data Frame
}
\description{
Allow manipulation and extraction of a td data frame.
}
\usage{
\method{[}{td.data.frame}(x, i, j)
}
\arguments{
\item{x}{
td data frame.
}
\item{i}{
element to extract
}
\item{j}{
element to extract
}
}
\details{
This overloaded function operates almost exactly like a data frame extract. It
can be used to reduce the number of columns by using the index values. One
noticeable difference is that a td data frame has no actual data in the client.
This means that row operations don't extract certain rows like a data frame object.
You must use td.sample if you wish to alter table sizes.
}
\value{
td data frame of new column dimensions.
}
\examples{
\dontrun{
tdf[3] #returns td data frame with just column 3
tdf["age"] #returns td data frame with column age
}
}
|
139d8f94d2c0262e8dab0d778432a70a80756687 | 244579496176fcdb898338e227a3a7818c18ef30 | /test2.R | cc1da6395435d03434e14e19e34d280081799f3d | [] | no_license | youknowwhatmynameis/Statistics2 | 93b816762494cbe00375b24a4561db1be8dcbf62 | 25e33c76302d81fab74e69d2e83460f3dc7b6a52 | refs/heads/master | 2023-04-15T07:55:40.952668 | 2021-04-29T09:47:42 | 2021-04-29T09:47:42 | 362,769,273 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 565 | r | test2.R | Centrala
print(Centrala)
liczebnosc <- table(Centrala)
liczebnosc
procent <- prop.table(table(Centrala))
procent
tabela <- cbind(liczebnosc,procent)
tabela
barplot(liczebnosc,
xlab = "Liczba zgłoszeń",
ylab = "Liczebność",
main = "Rozkład empiryczny",
col = 1:6)
barplot(procent,
xlab = "Liczba zgłoszeń",
ylab = "Prawdopodobieństwo",
main = "Rozkład empiryczny",
col = 1:6)
mean(Centrala$Liczba)
median(Centrala$Liczba)
sd(Centrala$Liczba)
sd(Centrala$Liczba)/mean(Centrala$Liczba)*100
|
461fa048ab3f77a52430d88acee64bd2d1ef4ec6 | cb93cf0799e3eedca6f9e720e09bb60e0f77ff10 | /R/Arguments.EXTRAS.R | df92825b465850f1b09f733c68c194d9c7f3e80f | [] | no_license | HenrikBengtsson/R.filesets | 254c37b4546e8280b9972d06840b918e12e0b4e9 | 17181ae1c84dbf7bad1214d37e6f133ed2deeba4 | refs/heads/master | 2023-01-08T23:58:09.708417 | 2022-07-21T09:52:18 | 2022-07-21T09:52:18 | 20,844,863 | 3 | 1 | null | 2018-04-03T22:12:45 | 2014-06-15T00:25:31 | R | UTF-8 | R | false | false | 1,729 | r | Arguments.EXTRAS.R | #########################################################################/**
# @set "class=Arguments"
# @RdocMethod getTags
#
# @title "Gets and validates tags"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{...}{@character @vector of tags.}
# \item{na.rm}{If @TRUE, empty ("missing") tags are dropped.}
# \item{collapse}{A @character string specifying how the tags should
# be concatenated into a single string.
# If @NULL, they are not concatenated.}
# }
#
# \value{
# Returns a @character string or
# @character @vector (iff \code{collapse} is @NULL).
# }
#
# @author
#
# \seealso{
# For more information see \code{\link[R.utils]{Arguments}}.
# }
#*/#########################################################################
setMethodS3("getTags", "Arguments", function(static, ..., na.rm=TRUE, collapse=",") {
args <- list(...)
## Drop empty elements
keep <- (unlist(lapply(args, FUN=length), use.names=FALSE) > 0)
args <- args[keep]
## Nothing to do?
if (length(args) == 0) return(NULL)
## Drop missing elements?
if (na.rm) {
keep <- !unlist(lapply(args, FUN=is.na), use.names=FALSE)
args <- args[keep]
}
# Generate tags
tags <- do.call(paste, args=c(args, sep=",", collapse=","))
tags <- Arguments$getCharacters(tags)
tags <- strsplit(tags, split=",", fixed=TRUE)
tags <- unlist(tags)
tags <- trim(tags)
# Drop missing tags?
if (na.rm) {
tags <- tags[!is.na(tags)]
}
# Drop empty tags
tags <- tags[nchar(tags, type="chars") > 0L]
# Nothing to do?
if (length(tags) == 0) return(NULL)
# Collapse?
if (!is.null(collapse)) {
tags <- paste(tags, collapse=collapse)
}
tags
}, static=TRUE, protected=TRUE)
|
2ab2134d22613c553f3cc94f1c0e14fbc9227af5 | a87fbb4d8286a50ea6d36f1432b1b27c5f96085e | /vignettes/unemployed/src/unemployed.R | 9d8b77eda5edbdffe450111c28f8f946f492947d | [] | no_license | terourou/small-area-estimation | 938908a28a0d87853368f11e4be51ad0b7e9eabf | 935796305459a7d348134d5578a32f18768402cb | refs/heads/master | 2023-07-03T08:19:35.748766 | 2021-08-11T04:58:00 | 2021-08-11T04:58:24 | 319,798,808 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 270 | r | unemployed.R |
library(dembase)
library(dplyr)
## for simplicity, taking only a single year
unemployed <- readRDS("out/raw.rds") %>%
filter(time == 2019) %>%
dtabs(unemployed ~ age + sex + district) %>%
Counts()
saveRDS(unemployed,
file = "out/unemployed.rds")
|
a34b8eb7cc54199fa834df8cebf110d060da7119 | 528de138fdcdc544b8420fbe87b00077abbc560a | /01_Introduction_to Statistics_Using_R/week01.R | be913f86595544e6a3560077c67e436cf99707b9 | [] | no_license | Romanism/R | 95ff5c3bdc5e40eb94a91f73d1d18dfbbb17db44 | 24d28085091d0da54f09db425dc7bd8cb22b7f57 | refs/heads/master | 2020-03-22T14:24:55.107301 | 2018-07-17T04:19:33 | 2018-07-17T04:19:33 | 140,177,661 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,827 | r | week01.R | # 1.3 R 들어가기
# 1.3.1 R 설치하기
# (1) go to the site called CRAN (Comprehensive R Archive Networks) http://www.r-project.org/
# (2) execute "Download R“
# (3) choose Korea http://cran.nexr.com/
# (4) click "Download for R Windows“
# (5) click "base“
# (6) click “Download R 3.3.1 for Windows”
# 1.3.2 R 사용법
# A. 주의할 점
# (1) case sensitive (대소문자 구분)
# (2) commands are seperated by ; oro new line
# (3) comments can be put anywhere starting with #
# (4) subsequent commands are made by + (더하기로 다음줄도 이어짐)
# B. 내장기능
# (1) help, example, demo
help(solve) # 도움말
example(solve) # 예제
demo(persp) # 데모기능
# (2) data : 내장되어 있는 자료파일을 불러올 수 있음
women # (height, weight, n=15)
stackloss # (Air.Flow, Water.Temp, Acid.Conc., stack.loss, n=21)
faithful # (eruptions, waiting, n=272)
sleep # (extra, group, n=20)
# (3) library : 다양한 라이브러리 제공
# lattice : lattice graphics
# MASS : Modern Applied Statistics using S-Plus
# mgcv : generalized additive models
# nlme : mixed effects models
# nnet : neural networks and multinomial log-linear models - spatial : spatial statistics
# survival : survival analysis
library(help=survival)
# (4) Packages : libary의 모음집
# Tools - install packages - package설치
# 2.1 Vectors and assignment
x <- c(10.4, 5.6, 3.1, 6.4, 21.7) # 방법1
assign('x', c(10.4, 5.6, 3.1, 6.4, 21.7)) # 방법2
c(10.4, 5.6, 3.1, 6.4, 21.7) -> x # 방법3
1/x # global 선언
y <- c(x, 0, x); y
# 2.2 Vector arithmetic
15/7 # real
15 %/% 7 # integer part
15 %% 7 # reminder part
sum((x-mean(x))^2)/(length(x)-1) # variance 공식
var(x) # variance
sqrt(-17) # NAN (Not a Number)
# 2.3 Generating regular sequences
s1 <- c(1:30) ; s1 # 1부터 30까지 정수
s2 <- seq(-5, 5, by = .2); s2 # -5부터 5까지 0.2 간격으로
s3 <- seq(length = 51, from = -5, by = .2); s3 # -5부터 0.2씩 증가하면서 51개 만들기
s4 <- rep(x, times = 5); s4 # x를 5번 반복 (1,2,3, 1,2,3..)
s5 <- rep(x, each = 5); s5 # x원소를 각각 5번 반복 (1,1, 2,2, 3,3)
# R에 내장되어 있는 자료 중 강좌에서 이용할 자료
# 1. faithful
# 자료설명 : 미국 Yellowstone 국립공원 내에 있는 여러 간헐천 중 Old Faithful Geyser 에서 수집된 자료로서 2개의 변수와 272개의 관측치로 구성
# 변수 : eruptions (분출시간 (단위:분)), waiting (다음 분출될 때까지의 시간 (단위:분))
# 2. Stackloss
# 자료설명 : 어떤 화학공정에서 여러 환경변화에 따른 암모니아의 산화비율을 측정한 자료로 4개의 변수와 21개의 관측치로 구성
# 변수 : Air.Flow (공기 주입량), Water.Temp (물의 온도), Acid.Conc. (질소농도), stack.loss (암모니아 산화비율) |
6a75f6f21b1d82bb93fbd1d5b1d5e57b77c791aa | d1bfc0292990e3d54023a3c85ae3a51af1a17660 | /test/rule_unset.r | d7a853c0439a0e183913738f24b78e42b9760800 | [
"Apache-2.0"
] | permissive | sara-nl/irods-msi-persistent-id | 8cc460ffa505444bd29537a32428a0efe82f564f | 3f8f8f06ebd0b55967486291417d14c5a3d0bd4a | refs/heads/master | 2020-04-12T03:33:46.291516 | 2020-04-11T11:49:20 | 2020-04-11T11:49:20 | 162,270,474 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 179 | r | rule_unset.r | myRule {
msiPidUnset(*path, *key, *result);
writeLine("stdout", "*result");
}
INPUT *path="/tempZone/home/rods/example.txt", *key="", *mvalue=""
OUTPUT ruleExecOut
|
645ee3bb136209eb5950462d184d27fb341d1264 | a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3 | /A_github/sources/authors/100/funHDDC/funHDDC-internal.R | a436a48062e5573c2f993bce3ce4c7117adbe064 | [] | no_license | Irbis3/crantasticScrapper | 6b6d7596344115343cfd934d3902b85fbfdd7295 | 7ec91721565ae7c9e2d0e098598ed86e29375567 | refs/heads/master | 2020-03-09T04:03:51.955742 | 2018-04-16T09:41:39 | 2018-04-16T09:41:39 | 128,578,890 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,413 | r | funHDDC-internal.R | .bic <-
function(loglik,prms,n,T){
K = prms$k;p = prms$p;d = prms$d;model = prms$model;
tau_bar=1/K*sum(d*(p-(d+1)/2))
if (model=='AkjBkQkDk'){comp = K*p + K -1 + K *tau_bar + K*sum(d) + K}
if (model=='AkjBQkDk'){comp = K*p + K -1 + K *tau_bar + K*sum(d) + 1}
if (model=='AkBkQkDk'){comp = K*p + K -1 + K *tau_bar + K + K }
if (model=='AkBQkDk'){comp = K*p + K -1 + K *tau_bar + K + 1}
if (model=='ABkQkDk'){comp = K*p + K -1 + K *tau_bar + 1 + K}
if (model=='ABQkDk'){comp = K*p + K -1 + K *tau_bar + 2}
# aic = 2 * loglik - 2* comp
# Z = matrix(0,n,K)
# for (i in 1:n) Z[i,which.max(T[i,])] = 1
# icl = 2 * loglik - comp * log(n) - sum(log(T[Z==1]))
aic = loglik - comp
bic = loglik - comp/2 * log(n)
T[T<1e-6] = 1e-6
icl = loglik - comp/2 * log(n) - sum(T*log(T)) # ICL criterion
list(aic=aic,bic=bic,icl=icl)
}
.diago <-
function(v){
if (length(v)==1){ res = v }
else { res = diag(v)}
res
}
.estep <-
function(prms,fd){
## Initialization
X = t(fd$coefs)
model = prms$model; k = prms$k; p = prms$p;
a = prms$a; b = prms$b; d = prms$d; prop = prms$prop;
m = prms$m; Q = prms$Q
A = matrix(NA,nrow(X),k)
T = matrix(NA,nrow(X),k)
## Cost function computing
for(i in 1:k){
# Projection of test data in the eigenspace Ei
Qi = as.matrix(Q[i,,1:d[i]])
Pa = (as.matrix(X - .repmat(m[i,],nrow(X),1)) %*% Qi) %*% t(Qi)
Pb = Pa + as.matrix(.repmat(m[i,],nrow(X),1) - X)
#Compute cost function A_i(x)
if (model=='AkjBkQkDk' | model=='AkjBQkDk'){
ai = a[1:d[i],i];
A[,i] = t(diag(Pa %*% Qi %*% .diago(1/ai) %*% t(Qi) %*% t(Pa))
+ (1/b[i] * rowSums(Pb^2)) + sum(log(ai))
+ (p-d[i]) * log(b[i]) - 2 * log(prop[i]) + p * log(2*pi));
}
if (model=='AkBkQkDk' | model=='AkBQkDk' | model=='ABkQkDk' | model=='ABQkDk'){
A[,i] = t(1/a[i] * rowSums(Pa^2) + (1/b[i] * rowSums(Pb^2)) + d[i] * log(a[i])
+ (p-d[i]) * log(b[i]) - 2 * log(prop[i]) + p * log(2*pi));
}
}
## Posterior probabilities
for (i in 1:k) {T[,i] = 1 / rowSums(exp(0.5*(t(.repmat(A[,i],k,1))-A)))}
T
}
.loglikelihood <-
function(prms,fd){
X = t(fd$coefs)
## Initialization
model = prms$model; k = prms$k; p = prms$p;
a = prms$a; b = prms$b; d = prms$d; prop = prms$prop;
m = prms$m; Q = prms$Q
A = matrix(NA,nrow(X),k)
## Cost function computing
for(i in 1:k){
# Projection of test data in the eigenspace Ei
Qi = as.matrix(Q[i,,1:d[i]])
Pa = (as.matrix(X - .repmat(m[i,],nrow(X),1)) %*% Qi) %*% t(Qi)
Pb = Pa + as.matrix(.repmat(m[i,],nrow(X),1) - X)
#Compute cost function A_i(x)
if (model=='AkjBkQkDk' | model=='AkjBQkDk'){
ai = a[1:d[i],i];
A[,i] = t(diag(Pa %*% Qi %*% .diago(1/ai) %*% t(Qi) %*% t(Pa))
+ (1/b[i] * rowSums(Pb^2)) + sum(log(ai))
+ (p-d[i]) * log(b[i]) - 2 * log(prop[i]) + p * log(2*pi));
}
if (model=='AkBkQkDk' | model=='AkBQkDk' | model=='ABkQkDk' | model=='ABQkDk'){
A[,i] = t(1/a[i] * rowSums(Pa^2) + (1/b[i] * rowSums(Pb^2)) + d[i] * log(a[i])
+ (p-d[i]) * log(b[i]) - 2 * log(prop[i]) + p * log(2*pi));
}
}
A = -1/2 * A
l = sum(log(rowSums(exp(A-apply(A,1,max))))+apply(A,1,max))
}
.mstep <-
function(fd,T,thd,model){
X = t(fd$coefs)
cls = max.col(T)
## Initialization
k = ncol(T); N <- nrow(X); p <- ncol(X)
n = matrix(0,1,k); d = matrix(0,1,k); prop = matrix(0,1,k)
m = matrix(0,k,p); Tr = matrix(0,1,k); L = matrix(0,k,p)
V = matrix(0,p,(k*p)); Q = array(NA,c(k,p,p))
fpcaobj = list()
## Compute intrinsic dim and eigen-decomposition
for (i in 1:k){
# fdi = fd
# Class parameters
n[i] = sum(T[,i])
prop[i] = n[i] / N
m[i,] = colSums(t(.repmat(T[,i],p,1)) * X) / n[i]
# XX = as.matrix(X - .repmat(m[i,],N,1))
# Si = t(T[,i] * XX) %*% XX / n[i]
# Eigen-decomposition
dc = .mypca.fd(fd,T[,i],nharm=p)
L[i,] = dc$val
L[i,L[i,]<0] = 0
# barplot(dc$val); Sys.sleep(1)
Q[i,,] = dc$U
Tr[i] = sum(L[i,]);
# Find intrinsic dimensions using the sree-test of Cattell
dc$val[dc$val<0] = 0
sc = abs(diff(dc$val))
d[i] = p-1
for (j in 1:(p-2)){
if (prod(sc[(j+1):length(sc)] < thd * max(sc))){
d[i] = j
break
}
}
fpcaobj[[i]] = dc
}
## Computing model parameters
if (model=='AkjBkQkDk'){a = matrix(0,p,k)} else {a = matrix(0,1,k)}
b = matrix(0,1,k)
if (model=='AkjBkQkDk' | model=='AkjBQkDk'){
for (i in 1:k){
a[1:d[i],i] = L[i,1:d[i]]
b[i] = (Tr[i] - sum(L[i,1:d[i]])) / (p-d[i])
}
if (model=='AkjBQkDk') b[1:k] = mean(b)
}
if (model=='AkBkQkDk' | model=='AkBQkDk' | model=='ABkQkDk' | model=='ABQkDk'){
for (i in 1:k){
a[i] = sum(L[i,1:d[i]]) / d[i]
b[i] = (Tr[i] - sum(L[i,1:d[i]])) / (p-d[i])
}
if (model=='AkBQkDk' | model=='ABQkDk') b[1:k] = mean(b)
if (model=='ABkQkDk' | model=='ABQkDk') a[1:k] = mean(a)
}
## Returning model paramters
prms <- list(model=model,k=k,p=p,a=a,b=b,m=m,prop=prop,d=d,Q=Q,fpcaobj=fpcaobj)
}
.mypca.fd <-
function(fdobj, Ti, nharm = 2, harmfdPar=fdPar(fdobj), centerfns = TRUE){
# Carry out a functional PCA with regularization
# Arguments:
# FDOBJ ... Functional data object
# NHARM ... Number of principal components or harmonics to be kept
# HARMFDPAR ... Functional parameter object for the harmonics
# CENTERFNS ... If TRUE, the mean function is first subtracted from each function
#
# Returns: An object PCAFD of class "pca.fd" with these named entries:
# harmonics ... A functional data object for the harmonics or eigenfunctions
# values ... The complete set of eigenvalues
# scores ... A matrix of scores on the principal components or harmonics
# varprop ... A vector giving the proportion of variance explained
# by each eigenfunction
# meanfd ... A functional data object giving the mean function
# Check FDOBJ
if (!(inherits(fdobj, "fd"))) stop(
"Argument FD not a functional data object.")
# compute mean function and center if required
# browser()
meanfd <- mean.fd(fdobj)
# if (centerfns) fdobj <- center.fd(fdobj)
if (centerfns){
coefmean <- apply(t(as.matrix(Ti) %*% matrix(1,1,nrow(fdobj$coefs))) * fdobj$coefs, 1, sum) / sum(Ti)
fdobj$coefs <- sweep(fdobj$coefs, 1, coefmean)
meanfd$coefs = as.matrix(data.frame(mean=coefmean))
}
# get coefficient matrix and its dimensions
coef <- fdobj$coefs
coefd <- dim(coef)
ndim <- length(coefd)
nrep <- coefd[2]
coefnames <- dimnames(coef)
if (nrep < 2) stop("PCA not possible without replications.")
basisobj <- fdobj$basis
nbasis <- basisobj$nbasis
type <- basisobj$type
# set up HARMBASIS
# currently this is required to be BASISOBJ
harmbasis <- basisobj
# set up LFDOBJ and LAMBDA
Lfdobj <- harmfdPar$Lfd
lambda <- harmfdPar$lambda
# compute CTEMP whose cross product is needed
ctemp <- coef
# set up cross product and penalty matrices
# Cmat <- crossprod(t(ctemp))/nrep
Cmat = (Ti * ctemp) %*% t(ctemp) / nrep
Jmat <- eval.penalty(basisobj, 0)
if(lambda > 0) {
Kmat <- eval.penalty(basisobj, Lfdobj)
Wmat <- Jmat + lambda * Kmat
} else { Wmat <- Jmat }
Wmat <- (Wmat + t(Wmat))/2
# compute the Choleski factor of Wmat
Lmat <- chol(Wmat)
Lmat.inv <- solve(Lmat)
# set up matrix for eigenanalysis
if(lambda > 0) { Cmat <- t(Lmat.inv) %*% Jmat %*% Cmat %*% Jmat %*% Lmat.inv }
else { Cmat <- Lmat %*% Cmat %*% t(Lmat) }
# eigenalysis
Cmat <- (Cmat + t(Cmat))/2
result <- eigen(Cmat)
eigvalc <- result$values
eigvecc <- as.matrix(result$vectors[, 1:nharm])
sumvecc <- apply(eigvecc, 2, sum)
eigvecc[,sumvecc < 0] <- - eigvecc[, sumvecc < 0]
varprop <- eigvalc[1:nharm]/sum(eigvalc)
harmcoef <- Lmat.inv %*% eigvecc
U = t(Lmat) %*% eigvecc
harmscr <- t(ctemp) %*% U
harmnames <- rep("", nharm)
for(i in 1:nharm)
harmnames[i] <- paste("PC", i, sep = "")
harmnames <- list(coefnames[[1]], harmnames,"values")
harmfd <- fd(harmcoef, basisobj, harmnames)
pcafd <- list(harmonics=harmfd,values=eigvalc,scores=harmscr,U=U,varprop=varprop,meanfd=meanfd)
class(pcafd) <- "pca.fd"
return(pcafd)
}
.Random.seed <-
c(403L, 10L, 168271670L, 10116960L, 276637091L, -1050734911L,
-1366514000L, -1987247666L, -1897335495L, -856102581L, -1654072582L,
908942332L, 2095651903L, -914324203L, 458944044L, -1611926782L,
644272093L, 957729495L, 426132926L, 505184248L, -2002043941L,
4109881L, -133947512L, -1475879114L, 1138498929L, 1187903395L,
1507245810L, -1081121212L, 956025095L, 1206096093L, 937833940L,
-915707654L, -1440346043L, 1433090207L, -286498266L, 961459888L,
1960074067L, 2079831665L, -43385952L, -1435702466L, -2027457975L,
2066884347L, 865769162L, 683305004L, -1116000209L, -1484762907L,
-1911473764L, 785236978L, -828719507L, 1094331303L, -1731127858L,
1810563880L, -1844934357L, -2092461239L, -311773096L, -107504378L,
2038659297L, -1943605133L, -722121502L, -1598317484L, -546021033L,
279445837L, -418114332L, 1514694858L, -1545648843L, -821318769L,
1712353430L, 1833529408L, 6215939L, 1537717537L, -166636016L,
673203694L, 1636088217L, 1634465323L, 130514714L, 253961692L,
-9188385L, 69237685L, 784868684L, -1980771422L, 927079165L, -582736905L,
-1455203042L, 1378099160L, -923520709L, 491366105L, 42556008L,
-721802346L, -942780655L, 2118826179L, -2114860846L, 505408868L,
2033791143L, 410265085L, 1721956340L, -621444902L, -1065725403L,
671207231L, 673610950L, 1028763984L, -864657037L, -107132015L,
1277050304L, 1302755230L, -1112290967L, 677415323L, -657089366L,
1101335244L, 670552975L, -753702971L, -330896516L, 507753426L,
1567826765L, 329949703L, 825319918L, -204800824L, -1832758325L,
998864425L, 642153592L, 1235663270L, -548791359L, -230811565L,
303648514L, -686138828L, 29962167L, -1167904851L, 1311640772L,
402440426L, 8050517L, -735098641L, -2073985290L, 1315473952L,
-1778966557L, 1037292673L, -496524560L, -846993010L, -1808251143L,
-1339190133L, 2038545594L, -170190916L, 395494655L, 2003241045L,
293763948L, 1485132738L, 1007359005L, -737524585L, 2085687806L,
695038392L, 1372306843L, 644563961L, -432723128L, -1827854730L,
-322805839L, -1864957469L, 1427277490L, -896009468L, -808957881L,
-2018910435L, -158153324L, 125667642L, 379671941L, 1089191519L,
-1799747354L, -1971156112L, -1528763245L, -904234703L, 1481334752L,
47768702L, -1712183543L, -357594821L, 184495370L, -2084240148L,
-1129770641L, 1033590437L, -1803220900L, 352518194L, 614061869L,
2091922919L, -231692018L, 2099302120L, -1127956245L, 1271644553L,
-1152753384L, -2029890490L, -1729962847L, -2146360525L, -1451545950L,
-1501142636L, 2013958167L, 441755661L, 1759288612L, -935892726L,
1731854453L, -1659581489L, 240082134L, 914140288L, 1016099523L,
-1188062111L, 752557264L, 1707879982L, -710299687L, 1306578923L,
12854874L, 533225500L, 229530911L, 1486642549L, -2045178868L,
1754697186L, 476475069L, 577300791L, 1596632542L, 159097624L,
1598646907L, 942802969L, 1524154536L, -247143850L, -1256149295L,
1373683331L, 1191908626L, -695066460L, 464371815L, -1086918211L,
-148710604L, 1343893658L, -524822811L, 1413529727L, -49333754L,
-1988893296L, 1666537523L, -543797216L, 1655197884L, -1246410160L,
-1851033630L, 9100904L, -695529908L, 1640382716L, -2093611022L,
502129920L, 65753972L, -1016459256L, -85585478L, -1322078768L,
-604703764L, -542921836L, 874556370L, -1428289696L, 65085772L,
298086688L, 1428496258L, 428656360L, -2047526676L, -738781636L,
1534841714L, -1353378192L, -982277084L, -178913480L, 1123055770L,
-1829953840L, -1866326852L, 686885524L, 1118261890L, 768975040L,
399529276L, -1484081008L, 1199715874L, 93303304L, -1561543412L,
2004374172L, 642467346L, -812299904L, 1603148308L, 897044008L,
-1060310726L, 1768782800L, 138870380L, 984437556L, 272503442L,
296166752L, 1236213068L, 259740640L, -2045972574L, 1004011432L,
-1327216756L, -287126852L, -308489358L, 1428941040L, -1208941500L,
251151704L, 1914847290L, 295490416L, 1567732796L, 743676692L,
-5904958L, -591176480L, 1463025532L, -974072112L, 870361762L,
302580776L, 215134476L, -648832068L, 739384626L, -127580224L,
677353012L, 1100850120L, 25574586L, -1995358320L, -1805319700L,
-1452477356L, -675804526L, 858237728L, 369863116L, 763057120L,
758664898L, -1483024856L, -1804005716L, 1776565948L, 1381007858L,
132198704L, 1970776228L, -201647432L, 1705908058L, -1319219824L,
-230285956L, 219040020L, 8291650L, -568511744L, 1390453692L,
1422979920L, -1431823070L, -121239224L, 112172044L, 666989660L,
1631323346L, 1045427200L, 934266708L, -565167640L, -1112742918L,
1478312656L, 50990380L, -171621132L, 1300468626L, -1741712288L,
-1687758580L, 1044632288L, 1943248226L, -985467224L, 493518412L,
1347310460L, -1151566030L, -1585690000L, -1763995324L, -1842018344L,
-1339255686L, -1659049424L, 1790867132L, -1317664940L, -245098942L,
314529696L, 1615258812L, 160944592L, 1608511842L, 1021759464L,
-1426926644L, 47445756L, -1333734414L, -1101697920L, -1114527500L,
-1606180728L, 1602441530L, -127956144L, 930173036L, -347640684L,
-409135790L, -197010720L, 1137188044L, -222788448L, 1119470978L,
215577832L, -823008020L, 774457404L, -590762894L, -1125618576L,
-799747548L, 381580856L, -425786854L, 1354050768L, 493792828L,
-822187884L, 334836866L, 1793971136L, -2124666564L, 1278840464L,
-712117214L, -1965359096L, 2114716812L, 640482332L, -2108190574L,
-1839940480L, -1167225836L, 641984296L, 1576767546L, 330930384L,
548839532L, -429566284L, 1189264402L, 70710240L, 1713849292L,
1905699296L, 141522978L, 541297960L, -1900199668L, 378053180L,
-1932908302L, 112307440L, -899793084L, -437887912L, 1536114234L,
-2005190416L, -1570273604L, -1870626284L, -1190132542L, -1305634208L,
1657864572L, -1417749296L, -2054418398L, -1668672344L, 103608716L,
1253134780L, 1881787442L, -355496128L, 1146636980L, -605921976L,
1058838202L, 660212496L, 204216556L, -43529132L, 84816658L, -1497920096L,
1237657548L, -865928352L, 1890553026L, -1452412376L, -352625236L,
1500410684L, 1002651506L, -1812271312L, -1291891676L, -469298632L,
-289466662L, 499114896L, 2133991676L, 636430740L, 1145760578L,
1670656512L, -2081131460L, -1728279984L, -798330029L, -2054628748L,
-17531902L, 1106210455L, 1076287585L, 60552358L, 1420551332L,
608413013L, 1803390463L, -1256463208L, -1975193162L, 1045547587L,
-992809083L, 1219767330L, -2028601584L, -1913095495L, -682168469L,
-123941284L, 2087629706L, -865653889L, 285791945L, 172891998L,
-266779636L, -1337515043L, -187119897L, 147171088L, -1970178514L,
-812307237L, -2015919683L, 1354604330L, 1302939592L, -1420788495L,
2075718243L, -1682401692L, -802464366L, -187827609L, 1017287217L,
1209712502L, 869573780L, -2109433179L, -1538336849L, -1567382424L,
810108998L, 2140634611L, 262650197L, -812790990L, 1870114560L,
-764920855L, 1133889115L, -618440532L, 542592698L, 2117028975L,
1767852537L, 383904910L, -1390033572L, 1182954957L, 1142470967L,
1577001920L, 1202330782L, -731119253L, 1715402989L, -1328946854L,
1062763032L, 1565624193L, -301972173L, -429577388L, -966902430L,
-1437278729L, 1724995137L, -1979166010L, -437681148L, -1052437579L,
-1278530401L, -802559752L, 277356758L, -779220637L, -2010946267L,
1406100162L, -2005626960L, 1954116569L, 792780107L, -1186243652L,
1217692650L, -1433082849L, -551050583L, 240150078L, 2072484652L,
-442772099L, 1311202247L, -30004560L, -1123641138L, -1615736325L,
-1340964259L, 731020682L, 981468584L, -1696412911L, -204191229L,
-1785324284L, 1959049522L, 1908942727L, -1073769007L, -1928853674L,
1623217460L, -1271481275L, 1041966223L, -843697656L, -460329434L,
1067914963L, 171853877L, -1031739502L, -325085344L, -360262967L,
-1881328133L, 1668620748L, 1407322202L, -1213549233L, -1259553767L,
-1452342738L, 75382844L, 1290626477L, -470854057L, 1806804896L,
-70843138L, 1295996875L, -1621466675L, -2145372934L, -1648519368L,
766018785L, 619915155L, -195740748L, 1525272770L, -976124713L,
893006113L, 776689894L, 4066788L, 14821653L, -464543425L, 892431576L,
-2037766922L, -920871165L, -1795729339L, -964167582L, -1194758192L,
1555215609L, -1830699477L, -493180516L, -2057913910L, -1140917569L,
1967016969L, 1125494046L, 1840320716L, 1941075101L, -502462169L,
1081275216L, 1608887790L, 2065259163L, 1076776189L, -736275478L,
-1940798328L, -1266959823L, 1274686627L, 801509540L, 1990295890L,
953564327L, -694847247L, -1192218570L, 1673547988L, 1800119397L,
-1659314577L, -1083292785L)
.repmat <-
function(v,n,p){
if (p==1){M = cbind(rep(1,n)) %*% v}
else { cat('!'); M = matrix(rep(v,n),n,(length(v)*p),byrow=T)}
M
}
|
b2815a3a0939a6cb1967e02c90b99e0e5953fab7 | 7960bc31c107627359e5da7ed7b7cf98d5b69f75 | /man/mcmc_rhat.Rd | cd9a650fac487c1ad182a3d0c3ed3a8349b22a84 | [] | no_license | rpruim/rjpBayes | 51eed1055968b3b130e8c2442b30b4fed4073644 | 89b03c89b3c78b3012e28a1539d036e21873ca68 | refs/heads/master | 2020-03-21T01:14:26.633318 | 2018-06-19T22:00:28 | 2018-06-19T22:00:28 | 137,931,576 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 970 | rd | mcmc_rhat.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rhat.R
\name{mcmc_rhat}
\alias{mcmc_rhat}
\alias{mcmc_rhat.default}
\alias{mcmc_rhat.brmsfit}
\title{Create Rhat plot}
\usage{
mcmc_rhat(object, ..., size = NULL)
\method{mcmc_rhat}{default}(object, ..., size = NULL)
\method{mcmc_rhat}{brmsfit}(object, pars = NULL, ..., size = NULL)
}
\arguments{
\item{object}{A model fitted with \code{[brms::brm()}].}
\item{...}{Arguments passed to individual methods.}
\item{pars}{An optional character vector of parameter names.
For \code{nuts_params} these will be NUTS sampler parameter
names rather than model parameters. If pars is omitted
all parameters are included.}
}
\description{
\code{mcmc_rhat()} is a generic. The default method simply calls \code{\link[bayesplot:mcmc_rhat]{bayesplot::mcmc_rhat()}}.
An additional method is provided for brmsfit objects so we can avoid first extracting
the R-hat values before creating the plot.
}
|
b122328ca02aa7a67a7381d419b2d694ca98ba46 | 198cc8a8b8c678de3fa6e56f85e4441f10517f0b | /Scripts/random effect models plots.R | f33cb6bbc81bb321bfc88df28552e98015edc0d4 | [] | no_license | morgan-sparks/CnGV-CoGV-Meta-Analysis | 1c5f6dc3440e4ef316deedbc1755b907367d2fb3 | 2a065e49c96021076af7cd6f0d6ce22728a07a9b | refs/heads/main | 2023-04-07T17:41:48.359189 | 2022-10-19T01:36:39 | 2022-10-19T01:36:39 | 398,073,550 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,502 | r | random effect models plots.R | ################################################################################################
# Morgan Sparks, sparks35@purdue.edu, December 2021
#
# Script to make randome effects plots for manuscript. It uses the tidybayes package to gather
# draws to plot posterior distributions for model coefficients.
################################################################################################
library(tidybayes); library(ggridges); library(tidyverse); library(metafor); library(brms); library(forcats)
#### setwd
################################################################################################
#` plots for CnGV
mod_2randeff <- readRDS("~/CnGV-CoGV-Meta-analysis/Data/model_output/mod_norm_logtrans_trait_2randeff_student_sp_allES.rds")
get_variables(mod_2randeff)
### see for example https://github.com/mvuorre/brmstools
# verify there are no divergent transitions
mod_2randeff["divergent__"]
########################
# workflow for paper number plot
# posterior summary
posteriors <- exp(posterior_samples(mod_2randeff))
round(posterior_summary(posteriors[1:5]), 2)
# pull out posterior distributions for paper variable + b_Intercept
out_r <- spread_draws(mod_2randeff, r_paper_number[paper_number, term], b_Intercept) %>%
mutate(b_Intercept = exp(r_paper_number))
out_r$paper_number <- as.character(out_r$paper_number)
# pull out b_Intercept and save it as average
out_f <- spread_draws(mod_2randeff, b_Intercept) %>%
mutate(paper_number = "Average")
out_f$b_Intercept <- exp(out_f$b_Intercept)
# bind the former two together
out_all <- bind_rows(out_r, out_f) %>%
ungroup() %>% # Ensure that Average effect is on the bottom of the forest plot
mutate(paper_number = fct_relevel(paper_number, "Average"))
# calculate the mean quantile interval
# http://mjskay.github.io/tidybayes/articles/tidybayes.html#point-summaries-and-intervals-with-the-point_interval-functions-medianmeanmode_qihdi
out_all_sum <- group_by(out_all,paper_number) %>%
mean_qi(b_Intercept)
reorder_object <- c("Average", 1:65)
## forest plot for papers
ggplot(data = out_all_sum, aes(b_Intercept, factor(paper_number, levels = reorder_object)))+
geom_density_ridges(data = out_all, rel_min_height = 0.01, col = NA, scale = 1, fill = "dodgerblue", alpha = 0.75) +
geom_pointintervalh( size = 1) +
geom_vline(xintercept = 0, linetype = "dashed") +
geom_text(
data = mutate_if(out_all_sum, is.numeric, round, 2),
# Use glue package to combine strings
aes(label = glue::glue("{b_Intercept} [{.lower}, {.upper}]"), x = Inf),
hjust = "inward") +
labs(x = "Intercept", y = "Paper Number") +
xlim(0,15) +
theme_classic() +
ggsave("~/Dropbox/PhD Work/Critical Review/Work for Publication/Supplementary Materials/int_mod_paper_randeff_forestplot.pdf",
width = 6, height = 8, units = "in", dpi = 300)
########################
# workflow for trait nested in paper number plot
out_r <- spread_draws(mod_2randeff, `r_paper_number:Trait`[paper.number.trait, term], b_Intercept) %>%
mutate(b_Intercept = exp(`r_paper_number:Trait`))
out_r$paper.number.trait<- as.character(out_r$paper.number.trait)
# pull out b_Intercept and save it as average
out_f <- spread_draws(mod_2randeff, b_Intercept) %>%
mutate(paper.number.trait = "Average")
out_f$b_Intercept <- exp(out_f$b_Intercept)
# bind the former two together
out_all <- bind_rows(out_r, out_f) %>%
ungroup()
out_all$paper.number.trait <- as.factor(out_all$paper.number.trait)
out_all_sum <- group_by(out_all,paper.number.trait) %>%
mean_qi(b_Intercept)
## forest plot for trait nested in paper
ggplot(data = out_all_sum, aes(b_Intercept, paper.number.trait))+
geom_density_ridges(data = out_all, rel_min_height = 0.01, col = NA, scale = 1, fill = "dodgerblue", alpha = 0.75) +
geom_pointintervalh(size = 0.5) +
geom_text(
data = mutate_if(out_all_sum, is.numeric, round, 2),
# Use glue package to combine strings
aes(label = glue::glue("{b_Intercept} [{.lower}, {.upper}]"), x = Inf),
hjust = "inward", size =2) +
labs(x = "Intercept", y = "Trait Nested in Paper Number") +
xlim(0,15) +
theme_classic(base_size = 8) +
ggsave("~/Dropbox/PhD Work/Critical Review/Work for Publication/Supplementary Materials/int_mod_paperXtrait_randeff_forestplot.pdf",
width = 6, height = 8, units = "in", dpi = 300)
################################################################################################
### make with cogradient
mod_2randeff_co <- readRDS("~/CnGV-CoGV-Meta-Analysis/Data/model_output/mod_norm_logtrans_trait_2randeff_student_co_sp_allES.rds")
get_variables(mod_2randeff_co)
### check for divergent transitions
mod_2randeff_co["divergent__"]
########################
# workflow for paper number plot
#### posterior summary
posteriors_co <- exp(posterior_samples(mod_2randeff_co))
round(posterior_summary(posteriors_co[1:5]), 2)
####
out_r_co <- spread_draws(mod_2randeff_co, r_paper_number[paper_number, term], b_Intercept) %>%
mutate(b_Intercept = exp(r_paper_number))
out_r_co$paper_number <- as.character(out_r_co$paper_number)
out_f_co <- spread_draws(mod_2randeff_co, b_Intercept) %>%
mutate(paper_number = "Average")
out_f_co$b_Intercept <- exp(out_f_co$b_Intercept)
out_all_co <- bind_rows(out_r_co, out_f_co) %>%
ungroup() %>% # Ensure that Average effect is on the bottom of the forest plot
mutate(paper_number = fct_relevel(paper_number, "Average"))
out_all_sum_co <- group_by(out_all_co, paper_number) %>%
mean_qi(b_Intercept)
reorder_object_co <- c("Average", 1:15)
# forest plot for paper
ggplot(data = out_all_sum_co, aes(b_Intercept, factor(paper_number, levels = reorder_object_co)))+
geom_density_ridges(data = out_all_co, rel_min_height = 0.01, col = NA, scale = 1, fill = "dodgerblue", alpha = 0.75) +
geom_pointintervalh( size = 1) +
geom_vline(xintercept = 0, linetype = "dashed") +
geom_text(
data = mutate_if(out_all_sum_co, is.numeric, round, 2),
# Use glue package to combine strings
aes(label = glue::glue("{b_Intercept} [{.lower}, {.upper}]"), x = Inf),
hjust = "inward") +
labs(x = "Intercept", y = "Paper Number") +
xlim(0,15) +
theme_classic() +
ggsave("~/Dropbox/PhD Work/Critical Review/Work for Publication/Supplementary Materials/int_mod_co_paper_randeff_forestplot.pdf",
width = 8, height = 6, units = "in", dpi = 300)
########################
# workflow for trait nested in paper number plot
out_r_co <- spread_draws(mod_2randeff_co, `r_paper_number:Trait`[paper.number.trait, term], b_Intercept) %>%
mutate(b_Intercept = exp(`r_paper_number:Trait`))
out_r_co$paper.number.trait <- as.character(out_r_co$paper.number.trait)
out_f_co <- spread_draws(mod_2randeff_co, b_Intercept) %>%
mutate(paper.number.trait = "Average")
out_f_co$b_Intercept <- exp(out_f_co$b_Intercept)
out_all_co <- bind_rows(out_r_co, out_f_co) %>%
ungroup()
out_all_sum_co <- group_by(out_all_co, paper.number.trait) %>%
mean_qi(b_Intercept)
# forest plot for trait nested in paper
ggplot(data = out_all_sum_co, aes(b_Intercept, paper.number.trait))+
geom_density_ridges(data = out_all_co, rel_min_height = 0.01, col = NA, scale = 1, fill = "dodgerblue", alpha = 0.75) +
geom_pointintervalh( size = 1) +
geom_vline(xintercept = 0, linetype = "dashed") +
geom_text(
data = mutate_if(out_all_sum_co, is.numeric, round, 2),
# Use glue package to combine strings
aes(label = glue::glue("{b_Intercept} [{.lower}, {.upper}]"), x = Inf),
hjust = "inward") +
labs(x = "Intercept", y = "Trait nested in Paper Number") +
xlim(0,15) +
theme_classic() +
ggsave("~/Dropbox/PhD Work/Critical Review/Work for Publication/Supplementary Materials/int_mod_co_paperXtrait_randeff_forestplot.pdf",
width = 4, height = 6, units = "in", dpi = 300)
################################################################################################
### Distribution plot for cngv and cogv together
out_cn <- spread_draws(mod_2randeff, b_Intercept) %>%
mutate(adaptation= "Countergradient")
out_cn$b_Intercept <- exp(out_cn$b_Intercept)
out_co <- spread_draws(mod_2randeff_co, b_Intercept) %>%
mutate(adaptation = "Cogradient")
out_co$b_Intercept <- exp(out_co$b_Intercept)
out_both <- bind_rows(out_cn, out_co)
out_both_sum <- bind_rows(out_cn, out_co) %>%
group_by(adaptation) %>%
mean_qi(b_Intercept)
out_both_sum_med <- bind_rows(out_cn, out_co) %>%
group_by(adaptation) %>%
median_qi(b_Intercept)
## plot for fig 2 in manuscript
fig2 <- ggplot() +
# geom_density_ridges(rel_min_height = 0.01, col = NA,
# scale = 1, fill = "dodgerblue", alpha = 0.75) +
# geom_pointintervalh(data = out_both_sum, size = 4) +
geom_vline(xintercept = 1.05, linetype = "dashed", color = "darkorchid4", size = 0.75) +
geom_vline(xintercept = 2.13, linetype = "dashed", color = "darkgreen", size = 0.75) +
geom_dots(data = out_both, aes(x = b_Intercept, y = adaptation, color = adaptation)) +
scale_color_manual(values=c("darkgreen", "darkorchid4")) +
geom_pointinterval(data = out_both_sum, aes(x= b_Intercept, y = adaptation, xmin = .lower, xmax = .upper), size = 4)+
geom_point(data = out_both_sum_med, aes(x =b_Intercept, y =adaptation), shape = 18, size = 4, color = "darkgrey") +
geom_text(
data = mutate_if(out_both_sum, is.numeric, round, 2),
# Use glue package to combine strings
aes(label = glue::glue("{b_Intercept} [{.lower}, {.upper}]"), x = Inf, y = adaptation),
hjust = "inward", nudge_y = 0.33) +
xlim(0,8) +
labs(x = "Effect size", y = NULL) +
theme_classic(base_size = 16) +
theme(legend.position = "none",
axis.text.y = element_text(angle=90, hjust = 0.5),
panel.background = element_rect(fill = "white"),
plot.margin = margin(1, 1, 1, 1, "cm"),
plot.background = element_rect(fill = "white"))
ggsave("~/Dropbox/PhD Work/Critical Review/Work for Publication/Tables:Figures/Fig. 2.pdf", fig2,
width = 5, height = 5, units = "in", dpi = 600)
# full version for supplementary materials
fig2_all <-ggplot() +
# geom_density_ridges(rel_min_height = 0.01, col = NA,
# scale = 1, fill = "dodgerblue", alpha = 0.75) +
# geom_pointintervalh(data = out_both_sum, size = 4) +
geom_vline(xintercept = 1.05, linetype = "dashed", color = "darkorchid4", size = 0.75) +
geom_vline(xintercept = 2.13, linetype = "dashed", color = "darkgreen", size = 0.75) +
geom_dots(data = out_both, aes(x = b_Intercept, y = adaptation, color = adaptation)) +
scale_color_manual(values=c("darkgreen", "darkorchid4")) +
geom_pointinterval(data = out_both_sum, aes(x= b_Intercept, y = adaptation, xmin = .lower, xmax = .upper), size = 4)+
geom_point(data = out_both_sum_med, aes(x =b_Intercept, y =adaptation), shape = 18, size = 4, color = "darkgrey") +
geom_text(
data = mutate_if(out_both_sum, is.numeric, round, 2),
# Use glue package to combine strings
aes(label = glue::glue("{b_Intercept} [{.lower}, {.upper}]"), x = Inf, y = adaptation),
hjust = "inward", nudge_y = 0.33) +
labs(x = "Effect size", y = NULL) +
theme_classic(base_size = 16) +
theme(legend.position = "none",
axis.text.y = element_text(angle=90, hjust = 0.5))
ggsave("~/Dropbox/PhD Work/Critical Review/Work for Publication/Supplementary Materials/Fig_2_all_results.png", fig2_all,
width = 6, height = 4, units = "in", dpi = 300)
### coefficients
|
01858fd5cd28134a51607862e62cb0a3feb404e4 | 3f1046972d8ed8f5aaf622d1c6be383201386cfd | /R/imports.R | c358270a6f57cc6d09788cbc68b753a540ae5b2a | [
"MIT"
] | permissive | W529/spatialwarnings | 0e1e81f6bc32edc73330ca9c7fe94d05961613b4 | defa6bf462d0f6906b32298c123bd7f531a9650c | refs/heads/master | 2020-05-22T05:13:06.449867 | 2019-01-20T16:21:04 | 2019-01-20T16:21:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 491 | r | imports.R | #
# These tags are here to explicitely show what is imported
#
#' @import stats
#' @import utils
# We need to pretend to import moments and poweRlaw as they
# are used in the package build process. They are not
# used in the final package tree though.
# We import here two harmless functions so that CRAN
# checks do note produce NOTEs (on certain archs only).
# See: https://groups.google.com/forum/#!topic/rdevtools/qT6cJt6DLJ0
#' @importFrom moments moment
#' @importFrom poweRlaw get_n
|
8874e58427334691c08babf93db6031017e162a9 | 62801439c226c0f72bec07c52b557039cb8aece6 | /man/Cluster_Medoids.Rd | 04b1f3594de63c54ce7d2a4b287c722f2a06c1d1 | [] | no_license | mlampros/ClusterR | ec790ef2150754e79f4cef79369892f7b65cd03f | 4942f0d985a3c461852e4fc9efcb70e5ea3c9959 | refs/heads/master | 2023-06-11T06:22:59.245954 | 2023-05-14T05:37:22 | 2023-05-14T05:37:22 | 68,004,249 | 82 | 27 | null | 2022-12-12T11:56:44 | 2016-09-12T11:46:14 | R | UTF-8 | R | false | true | 2,989 | rd | Cluster_Medoids.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustering_functions.R
\name{Cluster_Medoids}
\alias{Cluster_Medoids}
\title{Partitioning around medoids}
\usage{
Cluster_Medoids(
data,
clusters,
distance_metric = "euclidean",
minkowski_p = 1,
threads = 1,
swap_phase = TRUE,
fuzzy = FALSE,
verbose = FALSE,
seed = 1
)
}
\arguments{
\item{data}{matrix or data frame. The data parameter can be also a dissimilarity matrix, where the main diagonal equals 0.0 and the number of rows equals the number of columns}
\item{clusters}{the number of clusters}
\item{distance_metric}{a string specifying the distance method. One of, \emph{euclidean}, \emph{manhattan}, \emph{chebyshev}, \emph{canberra}, \emph{braycurtis}, \emph{pearson_correlation}, \emph{simple_matching_coefficient}, \emph{minkowski}, \emph{hamming}, \emph{jaccard_coefficient}, \emph{Rao_coefficient}, \emph{mahalanobis}, \emph{cosine}}
\item{minkowski_p}{a numeric value specifying the minkowski parameter in case that distance_metric = "minkowski"}
\item{threads}{an integer specifying the number of cores to run in parallel}
\item{swap_phase}{either TRUE or FALSE. If TRUE then both phases ('build' and 'swap') will take place. The 'swap_phase' is considered more computationally intensive.}
\item{fuzzy}{either TRUE or FALSE. If TRUE, then probabilities for each cluster will be returned based on the distance between observations and medoids}
\item{verbose}{either TRUE or FALSE, indicating whether progress is printed during clustering}
\item{seed}{`r lifecycle::badge("deprecated")` `seed` (integer value for random number generator (RNG)) is no longer supported and will be removed in version 1.4.0}
}
\value{
a list with the following attributes: medoids, medoid_indices, best_dissimilarity, dissimilarity_matrix, clusters, fuzzy_probs (if fuzzy = TRUE), silhouette_matrix, clustering_stats
}
\description{
Partitioning around medoids
}
\details{
Due to the fact that I didn't have access to the book 'Finding Groups in Data, Kaufman and Rousseeuw, 1990' (which includes the exact algorithm) I implemented the 'Cluster_Medoids' function based on the paper 'Clustering in an Object-Oriented Environment' (see 'References').
Therefore, the 'Cluster_Medoids' function is an approximate implementation and not an exact one. Furthermore, in comparison to k-means clustering, the function 'Cluster_Medoids' is more robust, because it minimizes the sum of unsquared dissimilarities. Moreover, it doesn't need initial guesses for the cluster centers.
}
\examples{
data(dietary_survey_IBS)
dat = dietary_survey_IBS[, -ncol(dietary_survey_IBS)]
dat = center_scale(dat)
cm = Cluster_Medoids(dat, clusters = 3, distance_metric = 'euclidean', swap_phase = TRUE)
}
\references{
Anja Struyf, Mia Hubert, Peter J. Rousseeuw, (Feb. 1997), Clustering in an Object-Oriented Environment, Journal of Statistical Software, Vol 1, Issue 4
}
\author{
Lampros Mouselimis
}
|
9655908a7bb2df3ff52c74257b17607d8e547e17 | 3853c7c2aec2afda0e14bf57c76aae4408ee74d2 | /man/getWordCloud.Rd | 964d9f27369fd9e92bd0f6ef832d0dc792422b70 | [] | no_license | lenamax2355/shinyr | 9342a439046517f4dd836f0b38dcc41b2d3dd6e0 | 5f1194b9ca6f39a2446aed166c31b68196bec108 | refs/heads/master | 2023-06-09T01:53:26.610183 | 2021-06-23T04:38:34 | 2021-06-23T04:38:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 431 | rd | getWordCloud.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/word_cloud.R
\name{getWordCloud}
\alias{getWordCloud}
\title{Get Word Cloud}
\usage{
getWordCloud(d)
}
\arguments{
\item{d}{table of word's frequency}
}
\value{
Word cloud plot
}
\description{
Get word cloud for given table of words' frequencies
}
\details{
getWordCloud
}
\examples{
\donttest{
x <- getFeqTable("Hello! R is Great")
getWordCloud(x)
}
}
|
2d360fa2d83afcac0b7b450951c06173e16ae0f2 | 6b83338df3d7e354a68e629e071e24883a495d6f | /man/mcri.palettes.Rd | 6a252050cc1335bf67a6c7ae60292d116c3eb60f | [] | no_license | lazappi/mcriPalettes | 123d901dc74802379cfef160f07633940e00817d | 4992c8aae27b87d77da07f6234dc4e5127d81b06 | refs/heads/master | 2021-01-16T21:40:01.246452 | 2017-08-14T04:21:20 | 2017-08-14T04:21:20 | 60,232,858 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 370 | rd | mcri.palettes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/palettes.R
\docType{data}
\name{mcri.palettes}
\alias{mcri.palettes}
\title{Complete list of MCRI palettes}
\format{An object of class \code{list} of length 13.}
\usage{
mcri.palettes
}
\description{
Use \code{\link{mcriPalette}} to construct palettes of desired length.
}
\keyword{datasets}
|
19bc2a3c190de8c23a1676371d9bef40776ff8c8 | 5b2f016f1298c790224d83c1e17a425640fc777d | /monod/analysis/code/R20151001.Group.Specificity.Index.R | 4eb09b47706f88dafba48b074b759a1c760ffe14 | [] | no_license | Shicheng-Guo/methylation2020 | b77017a1fc3629fe126bf4adbb8f21f3cc9738a0 | 90273b1120316864477dfcf71d0a5a273f279ef9 | refs/heads/master | 2023-01-15T20:07:53.853771 | 2020-02-28T03:48:13 | 2020-02-28T03:48:13 | 243,668,721 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 15,933 | r | R20151001.Group.Specificity.Index.R | ########################################################################################
### Title: Group Specificity Index (GSI) for Genome-wide Methylation Haplotype dataset
### Author: Shicheng Guo, Ph.D. Email: Shicheng.Guo@hotmail.com
### updata time: 9/1/2015
########################################################################################
library("impute")
RawNARemove<-function(data,missratio=0.3){
threshold<-(missratio)*dim(data)[2]
NaRaw<-which(apply(data,1,function(x) sum(is.na(x))>threshold))
zero<-which(apply(data,1,function(x) all(x==0))==T)
NaRAW<-c(NaRaw,zero)
if(length(NaRAW)>0){
dat<-data[-NaRAW,]
}else{
dat<-data;
}
dat
}
bedwithgap<-function(bed,gap){
bed<-as.matrix(bed)
bed[,2]=as.numeric(bed[,2])-gap
bed[,3]=as.numeric(bed[,3])+gap
bed<-data.frame(bed)
bed
}
Rbedtools<-function(functionstring="intersectBed",bed1,bed2,opt.string=""){
#create temp files
a.file=tempfile()
b.file=tempfile()
out =tempfile()
options(scipen =99) # not to use scientific notation when writing out
#write bed formatted dataframes to tempfile
write.table(bed1,file=a.file,quote=F,sep="\t",col.names=F,row.names=F)
write.table(bed2,file=b.file,quote=F,sep="\t",col.names=F,row.names=F)
# create the command string and call the command using system()
command=paste(functionstring,"-a",a.file,"-b",b.file,opt.string,">",out,sep=" ")
cat(command,"\n")
try(system(command))
res=read.table(out,header=F)
unlink(a.file);unlink(b.file);unlink(out)
return(res)
}
cor2bed<-function(cor){
a<-unlist(lapply(strsplit(as.character(cor),split=c(":")),function(x) strsplit(x,"-")))
bed<-matrix(a,ncol=3,byrow=T)
return(data.frame(bed))
}
bed2cor<-function(bed){
cor<-apply(bed,1,function(x){paste(unlist(strsplit(x,"\t"))[1],":",unlist(strsplit(x,"\t"))[2],"-",unlist(strsplit(x,"\t"))[3],sep="")})
cor<-gsub("[ ]","",cor)
return(cor)
}
#########################################################################################
setwd("/home/shg047/monod/oct/data")
file1<-read.table("WGBS_methHap_load_matrix_Oct2015.txt",head=T,sep="\t",row.names=1,as.is=T,check.names=F)
file1<-data.matrix(file1[,13:56]) # remove H1 and WBC, cancer
colnames(file1)
colnames(file1)<-gsub("_","-",colnames(file1))
colname2<-unlist(lapply(colnames(file1),function(x) unlist(strsplit(x,"[.]"))[1]))
saminfo2<-read.table("/home/shg047/monod/phase2/newsaminfo.txt",head=T,sep="\t",as.is=T)
saminfo2<-saminfo2[na.omit(match(colname2,saminfo2[,1])),]
colnames(file1)<-saminfo2[,2]
colnames(file1)
f2<-RawNARemove(file1,missratio=0.4)
file1<-impute.knn(f2)$data
group=names(table(colnames(file1)))
index=colnames(file1)
gsi<-c()
gmaxgroup<-c()
for(i in 1:nrow(file1)){
gsit<-0
gmax<-names(which.max(tapply(as.numeric(file1[i,]),index,mean)))
for(j in 1:length(group)){
tmp<-(1-10^(mean(file1[i,][which(index==group[j])]))/10^(mean(file1[i,][which(index==gmax)])))/(length(group)-1)
gsit<-gsit+tmp
}
gmaxgroup<-c(gmaxgroup,gmax)
gsi<-c(gsi,gsit)
print(c(gmax,gsit))
}
rlt=data.frame(region=rownames(file1),group=gmaxgroup,GSI=gsi)
write.table(rlt,file="Table.GSI.WGBS.Remove.H1.WBC.rlt.txt",col.names=T,row.names=F,quote=F,sep="\t")
# each take top 5 tissue-specific methylation regions.
data<-read.table(file="Table.GSI.WGBS.Remove.H1.WBC.rlt.txt",head=T,sep="\t",as.is=T)
# heat only have 3 high GSI regions
sum(table(subset(data,GSI>0.5)[,2]))
head(data)
tissue<-names(table(data[,2]))
tissue<-sort(c("Brain","Heart","muscle","Vessel","Spleen","Kidney","Ovary","Esophagus","Thymus","Lung","Liver","Pancreas","Stomach","Gastric","Intestine","Colon","Bladder"))
choose<-c()
for(i in 1:length(tissue)){
tmp<-subset(data,group==tissue[i])
tmp<-tmp[order(tmp[,3],decreasing=T),]
if(nrow(tmp)>5){
choose<-c(choose,tmp[1:80,1])
}else{
choose<-c(choose,tmp[,1])
}
choose
}
tmp2<-file1[match(choose,rownames(file1)),]
tmp2<-tmp2[,unlist(lapply(tissue,function(x) grep(x,colnames(tmp2))))]
write.table(tmp2,file="high.gsi.tissue.matrix.txt",sep='\t',quote=F,col.names=NA,row.names=T)
xx<-order(rowSums(tmp2),decreasing=T)[1:50]
yy<-which(unlist(apply(tmp2,1,function(x) max(x)<0.4)))
zz<-which(unlist(apply(tmp2,1,function(x) sum(x>0.5)>0.65*length(x))))
tmp2<-tmp2[-as.vector(c(xx,yy,zz)),]
tmp2[tmp2<0.4]<-0
library("grDevices")
library("gplots")
filename=paste("Figure-20-7-2-1-2-1",60,"pdf",sep=".")
pdf(filename)
col=colorRampPalette(c("yellow", "blue"))(20)
heatmap.2(tmp2,col=col,trace="none",density.info="none",Colv=F,Rowv=F,key=T,keysize=1,cexCol=0.8,labRow=NA)
dev.off()
# tissues signatures
names(table(data[,2]))
lung.signature<-subset(data,GSI>0.5 & group=="Lung") # 0.52 for bspp
colon.signature<-subset(data,GSI>0.55 & group=="Colon")
pancrease.signature<-subset(data,GSI>0.68 & group=="Pancreas")
nrow(lung.signature)
nrow(colon.signature)
nrow(pancrease.signature)
# prediction section
file1<-read.table("RRBS_methHap_load_matrix_Oct2015.txt",head=T,sep="\t",row.names=1,as.is=T,check.names=F)
colnames(file1)
f2<-RawNARemove(file1,missratio=0.4)
file1<-impute.knn(data.matrix(f2))$data
# remove solid tissue
samplename1=sapply(strsplit(colnames(file1),"[.]"),function(x) unlist(x)[1]) # get sample name
samplename2=sapply(strsplit(samplename1,"_"),function(x) unlist(x)[1]) # get sample id
remove=c(samplename2[grep("6-T",samplename2)],samplename2[grep("PC-T",samplename2)],samplename2[grep("CTT-",samplename2)],samplename2[grep("7-T",samplename2)])
file1<-file1[,-match(remove,samplename2)]
samplename1=sapply(strsplit(colnames(file1),"[.]"),function(x) unlist(x)[1])
samplename2=sapply(strsplit(samplename1,"_"),function(x) unlist(x)[1])
new<-read.table("/home/shg047/monod/phase2/saminfo.txt",sep="\t",as.is=T)
cor1<-match(samplename2,new[,3])
lab1<-new[cor1,4]
groupname=lab1
matrix=file1
samplename2<-gsub("6-P","CC-P",samplename2)
samplename2<-gsub("7-P","LC-P",samplename2)
samplename2<-gsub("6-T","CC-T",samplename2)
samplename2<-gsub("7-T","LC-T",samplename2)
samplename2<-gsub("frozen","Frozen",samplename2)
samplename2<-gsub("-100ng","",samplename2)
samplename2<-gsub("-5ng","",samplename2)
samplename2<-gsub("CTT","CC-T",samplename2)
colnames(matrix)=samplename2
# random Forest
x<-t(matrix)
y<-as.factor(sapply(colnames(matrix),function(x) substr(x,1,2)))
fit<-randomForest(scale(x),y,importance=T)
top<-order(fit$importance[,6],decreasing=T)[1:500]
fit<-randomForest(scale(x)[,top],y,importance=T)
fit
bed1<-cor2bed(rownames(matrix))
bed2<-cor2bed(lung.signature[,1])
bed3<-cor2bed(colon.signature[,1])
bed4<-cor2bed(pancrease.signature[,1])
rlt.lung<-Rbedtools(functionstring="intersectBed",bed1=bed1,bed2=bed2,opt.string="-wa -u")
rlt.colon<-Rbedtools(functionstring="intersectBed",bed1=bed1,bed2=bed3,opt.string="-wa -u")
rlt.pancrease<-Rbedtools(functionstring="intersectBed",bed1=bed1,bed2=bed4,opt.string="-wa -u")
cor.lung<-bed2cor(rlt.lung)
cor.colon<-bed2cor(rlt.colon)
cor.pancrease<-bed2cor(rlt.pancrease)
x<-y<-c()
z1<-z2<-z3<-z4<-z5<-c()
for(mhl in seq(0,0.3,by=0.01)){
for(ratio in seq(0,0.2,by=0.01)){
# assess the prediction performance to cancer orign
data.prediction.lung<-file1[match(cor.lung,rownames(file1)),] # collect lung data
data.prediction.lung<-file1[match(cor.lung,rownames(file1)),c(grep("7-P",colnames(file1)))] # collect lung data
data.prediction.colon<-file1[match(cor.colon,rownames(file1)),c(grep("6-P",colnames(file1)))] # collect colon data
data.prediction.pancrease<-file1[match(cor.pancrease,rownames(file1)),c(grep("PC-P",colnames(file1)))] # collect pancrease data
x1<-apply(data.prediction.lung,2,function(x) sum(x>mhl)/length(x))
x2<-apply(data.prediction.colon,2,function(x) sum(x>mhl)/length(x))
x3<-apply(data.prediction.pancrease,2,function(x) sum(x>mhl)/length(x))
# assess the prediction performance with Normal plasma as control
data.prediction.lung<-file1[match(cor.lung,rownames(file1)),grep("NC-P",colnames(file1))]
data.prediction.colon<-file1[match(cor.colon,rownames(file1)),grep("NC-P",colnames(file1))]
data.prediction.pancrease<-file1[match(cor.pancrease,rownames(file1)),grep("NC-P",colnames(file1))]
y1<-apply(data.prediction.lung,2,function(x) sum(x>mhl)/length(x))
y2<-apply(data.prediction.colon,2,function(x) sum(x>mhl)/length(x))
y3<-apply(data.prediction.pancrease,2,function(x) sum(x>mhl)/length(x))
print(c(x1,x2,x3,y1,y2,y3))
x<-c(x,mhl)
y<-c(y,ratio)
z1<-c(z1,sum(x1>ratio)/length(x1)) # accuracy for lung
z2<-c(z2,sum(x2>ratio)/length(x2)) # accuracy for colon
z3<-c(z3,sum(x3>ratio)/length(x3)) # accuracy for pancrease
z4<-c(z4,(sum(y1>ratio)+sum(y2>ratio)+sum(y3>ratio))/(3*length(y1))) # accuracy for lung
z5<-c(z5,z1+z2+z3-z4)
}
}
rlt1<-data.frame(x,y,z1,z2,z3,z4,z5)
write.table(rlt1,file="gsi.prediction.paramter.txt",quote=F,sep="\t")
### BSPP
file2<-read.table("/home/sguo/monod/phase2/150209_BSPP_mld_blocks_stringent_mhl_matrix.txt",head=T,sep="\t",row.names=1,as.is=T,check.names=F)
colnames(file2)
samplename1=sapply(strsplit(colnames(file2),"[.]"),function(x) unlist(x)[1])
samplename2=sapply(strsplit(samplename1,"_"),function(x) unlist(x)[1])
remove=c("6-T-3","6-T-4","7-T-2",paste("NC-P-",19:24,sep=""),"PC-P-10","6-P-6",paste("PC-P-",c(2,3,6,9),sep=""))
remove
file2<-file2[,-match(remove,samplename2)]
head(file2)
samplename1=sapply(strsplit(colnames(file2),"[.]"),function(x) unlist(x)[1])
samplename2=sapply(strsplit(samplename1,"_"),function(x) unlist(x)[1])
new<-read.table("saminfo.txt",sep="\t",as.is=T)
cor1<-match(samplename2,new[,3])
lab1<-new[cor1,4]
groupname=lab1
matrix=file2
samplename2<-gsub("6-P","CC-P",samplename2)
samplename2<-gsub("7-P","LC-P",samplename2)
samplename2<-gsub("6-T","CC-T",samplename2)
samplename2<-gsub("7-T","LC-T",samplename2)
samplename2<-gsub("frozen","Frozen",samplename2)
samplename2<-gsub("-100ng","",samplename2)
samplename2<-gsub("-5ng","",samplename2)
samplename2<-gsub("CTT","CC-T",samplename2)
colnames(matrix)=samplename2
bed1<-cor2bed(rownames(matrix))
bed2<-cor2bed(lung.signature[,1])
bed3<-cor2bed(colon.signature[,1])
bed4<-cor2bed(pancrease.signature[,1])
rlt.lung<-Rbedtools(functionstring="intersectBed",bed1=bed1,bed2=bed2,opt.string="-wa -u")
rlt.colon<-Rbedtools(functionstring="intersectBed",bed1=bed1,bed2=bed3,opt.string="-wa -u")
rlt.pancrease<-Rbedtools(functionstring="intersectBed",bed1=bed1,bed2=bed4,opt.string="-wa -u")
cor.lung<-bed2cor(rlt.lung)
cor.colon<-bed2cor(rlt.colon)
cor.pancrease<-bed2cor(rlt.pancrease)
choose.prediction<-unique(c(cor.lung,cor.colon,cor.pancrease))
data.predition<-file2[match(choose.prediction,rownames(file2)),]
write.table(data.predition,file="BSPP.subset.WGBS.TSI.Predition.txt",sep="\t",col.names=NA,row.names=T,quote=F) # send to desktop
prediction.data.plasma<-data.matrix(data.predition[,c(grep("6P",colnames(data.predition)),grep("7P",colnames(data.predition)),grep("NC",colnames(data.predition)))])
data.prediction.lung<-file1[match(cor.lung,rownames(file2)),c(grep("7P",colnames(file2)))]
data.prediction.colon<-file1[match(cor.colon,rownames(file2)),c(grep("6P",colnames(file2)))]
x1<-apply(data.prediction.lung,2,function(x) sum(x>0)/length(x))
x2<-apply(data.prediction.colon,2,function(x) sum(x>0/length(x)))
dim(data.prediction.lung)
dim(data.prediction.colon)
data.prediction.lung.normal.plasma<-file2[match(cor.lung,rownames(file2)),c(grep("NC",colnames(file2)))]
data.prediction.colon.normal.plasma<-file2[match(cor.colon,rownames(file2)),c(grep("NC",colnames(file2)))]
x1<-apply(data.prediction.lung.normal.plasma,2,function(x) sum(x>0)/length(x))
x2<-apply(data.prediction.colon.normal.plasma,2,function(x) sum(x>0)/length(x))
choose.prediction<-unique(c(cor.lung,cor.colon,cor.pancrease))
data.predition<-file1[match(choose.prediction,rownames(file1)),]
write.table(data.predition,file="RRBS.subset.WGBS.TSI.Predition.txt",sep="\t",col.names=NA,row.names=T,quote=F) # send to desktop
prediction.data.plasma<-data.matrix(data.predition[,c(grep("6-P",colnames(data.predition)),grep("7-P",colnames(data.predition)),grep("PC-P",colnames(data.predition)))])
# take normal plasma as the control
prediction.data.plasma<-data.matrix(data.predition[,grep("NC-P",colnames(data.predition))])
# GSI estimation to prediction.plasma.data
colnames(prediction.data.plasma)<-c(rep("7-P",9),rep("6-P",10),rep("PC-P",5))
apply(prediction.data.plasma,2,function(x) sum(x>0)/length(x))
group=names(table(colnames(prediction.data.plasma)))
index=colnames(prediction.data.plasma)
gsi<-c()
gmaxgroup<-c()
for(i in 1:nrow(prediction.data.plasma)){
gsit<-0
gmax<-names(which.max(tapply(as.numeric(prediction.data.plasma[i,]),index,mean)))
for(j in 1:length(group)){
tmp<-(1-10^(mean(prediction.data.plasma[i,][which(index==group[j])]))/10^(mean(prediction.data.plasma[i,][which(index==gmax)])))/(length(group)-1)
gsit<-gsit+tmp
}
gmaxgroup<-c(gmaxgroup,gmax)
gsi<-c(gsi,gsit)
print(c(gmax,gsit))
}
rlt=data.frame(region=rownames(prediction.data.plasma),group=gmaxgroup,GSI=gsi)
new.rlt<-rlt[order(rlt[,3],decreasing=T)[1:20],]
prediction.data.plasma.new<-prediction.data.plasma[match(new.rlt[,1],rownames(prediction.data.plasma)),]
library("grDevices")
library("gplots")
col=colorRampPalette(c("white", "red"))(20)
pdf("Figure.RRBS.subset.WGBS.TSI.Predition.pdf")
prediction.data.plasma.new<-prediction.data.plasma.new[,order(colnames(prediction.data.plasma.new))]
heatmap.2(data.matrix(prediction.data.plasma.new),col=greenred(20),trace="none",density.info="none",Colv=F,Rowv=F,key=T,keysize=1,cexCol=0.7,labRow=NA)
dev.off()
x<-c(29,31,36,43,48,47,40,35,29,28)
names(x)<-c("-4*up","-3*up","-2*up","-1*up","Domain","-1*down","-2*down","-3*down","-4*down")
barplot(x,col="blue",ylim=c(0,50))
# Seq-Cap
file1<-read.table("/home/shg047/monod/oct/data/WGBS_SeqCap_methHap_load_matrix_Oct2015.txt",head=T,sep="\t",row.names=1,as.is=T,check.names=F)
colnames(file1)
f2<-RawNARemove(file1,missratio=0.35)
file1<-impute.knn(data.matrix(f2))$data
# remove solid tissue
samplename1=sapply(strsplit(colnames(file1),"[.]"),function(x) unlist(x)[1]) # get sample name
samplename2=sapply(strsplit(samplename1,"_"),function(x) unlist(x)[1]) # get sample id
remove=c(samplename2[grep("6-T",samplename2)],samplename2[grep("PC-T",samplename2)],samplename2[grep("CTT-",samplename2)],samplename2[grep("7-T",samplename2)])
if(length(remove)>0){
file1<-file1[,-match(remove,samplename2)]
}else{
file1<-file1
}
colnames(file1)
samplename1=sapply(strsplit(colnames(file1),"[.]"),function(x) unlist(x)[1])
samplename2=sapply(strsplit(samplename1,"_"),function(x) unlist(x)[1])
samplename2
samplename2<-gsub("6P","CC-P",samplename2)
samplename2<-gsub("7P","LC-P",samplename2)
samplename2<-gsub("6T","CC-T",samplename2)
samplename2<-gsub("7T","LC-T",samplename2)
samplename2<-gsub("frozen","Frozen",samplename2)
samplename2<-gsub("-100ng","",samplename2)
samplename2<-gsub("-5ng","",samplename2)
samplename2<-gsub("CTT","CC-T",samplename2)
samplename2<-gsub("PCP","PC-P",samplename2)
samplename2<-gsub("PCT","PC-T",samplename2)
samplename2<-gsub("NC-","NC-P-",samplename2)
samplename2
colnames(file1)=samplename2
colnames(file1)
x<-t(file1)
y<-as.factor(sapply(colnames(file1),function(x) substr(x,1,2)))
library("randomForest")
fit<-randomForest(scale(x),y,importance=T)
top<-order(fit$importance[,6],decreasing=T)[1:150]
fit<-randomForest(scale(x)[,top],y,importance=T)
fit
######## supplementary code
|
aac1b4dfa2ac401a297010a0827e0fb90a967745 | 04cdef0025feee4392113e5c331eadccae75da61 | /man/compare_summarise_daily.Rd | 0ab21fc7d66f9d8092e85e7c077224a11dab3aaf | [] | no_license | NotetoNote/animaltracker | 683e01b9c9e20c3726e612a1477d2924f1df8cef | d3e050d40e13ab8160610bdbeb07e5015ecc26cb | refs/heads/master | 2020-12-13T09:33:03.058637 | 2019-12-22T00:54:26 | 2019-12-22T00:54:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,242 | rd | compare_summarise_daily.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_analysis.R
\name{compare_summarise_daily}
\alias{compare_summarise_daily}
\title{Compares two animal datasets and calculates daily summary statistics by GPS
GPS, date, lat, long, course, distance, rate, elevation column names should match.}
\usage{
compare_summarise_daily(correct, candidate, out)
}
\arguments{
\item{correct}{reference df}
\item{candidate}{df to be compared to the reference}
\item{out}{desired file name of .csv output summary}
}
\value{
summary df
}
\description{
Compares two animal datasets and calculates daily summary statistics by GPS
GPS, date, lat, long, course, distance, rate, elevation column names should match.
}
\examples{
# Compare and summarise unfiltered demo cows to filtered, grouped by both Date and GPS
\donttest{
\dontrun{
## Get elevation data for unfiltered demo
unfiltered_elev <- lookup_elevation_aws(demo_unfiltered, zoom=1,
get_slope=FALSE, get_aspect=FALSE)
## Get elevation data for filtered demo
filtered_elev <- lookup_elevation_aws(demo_filtered, zoom=1, get_slope=FALSE, get_aspect=FALSE)
## Compare and summarise
compare_summarise_daily(unfiltered_elev, filtered_elev, "ex_compare_daily.csv")
}
}
}
|
4b9deb829e761f0ecccd626d1268ca1baf097ced | 6cb4fbdd76a338d95f8348ef1351ec124aebc39f | /R/type-data-frame.R | e5723e13d27dd0125414b01b962e41fc051b5ee1 | [] | no_license | kevinykuo/vctrs | f0224bd1b011535935a8bc8d198da181d090b6ef | 2992d4737d35ff4dbd6c15b895fc4c2dc6c71066 | refs/heads/master | 2020-03-25T23:16:53.434722 | 2018-08-10T00:25:01 | 2018-08-10T00:25:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,545 | r | type-data-frame.R | # TODO: consider names and export so that they can be used by
# data frame subclasses in other packages
data_frame <- function(...) {
cols <- tibble::set_tidy_names(list(...))
if (length(cols) > 0) {
n <- length(cols[[1]])
} else {
n <- 0L
}
new_data_frame(cols, n = n)
}
new_data_frame <- function(x, n, subclass = NULL) {
n <- as.integer(n)
structure(
x,
class = c(subclass, "data.frame"),
row.names = .set_row_names(n)
)
}
new_tibble <- function(x, n) {
new_data_frame(x, n, subclass = c("tbl_df", "tbl"))
}
df_col_type2 <- function(x, y) {
common <- intersect(names(x), names(y))
only_x <- setdiff(names(x), names(y))
only_y <- setdiff(names(y), names(x))
# Find types
if (length(common) > 0) {
common_types <- map2(x[common], y[common], vec_type2)
} else {
common_types <- list()
}
only_x_types <- map(x[only_x], vec_subset, 0L)
only_y_types <- map(y[only_y], vec_subset, 0L)
# Combine and restore order
out <- c(common_types, only_x_types, only_y_types)
out[c(names(x), setdiff(names(y), names(x)))]
}
df_col_cast <- function(x, to) {
# Coerce common columns
common <- intersect(names(x), names(to))
x[common] <- map2(x[common], to[common], vec_cast)
# Add new columns
from_type <- setdiff(names(to), names(x))
x[from_type] <- map(to[from_type], vec_na, n = vec_length(x))
# Warn about dropped columns
dropped <- setdiff(names(x), names(to))
if (length(dropped) > 0 ) {
warn_cast_lossy_dataframe(x, to, dropped)
}
x[names(to)]
}
|
0e8ef403b4c44f18b0791acf99bf1cbc962bf8d8 | bc1fb36519974a949925fd243bbd51a8dafa9128 | /hw2redoo.R | fc1011fce088b704e566a6db25b60bcfb0153310 | [] | no_license | ramenkup/370HW2 | bb88563bff31c7c2b3e6572be2b3359e75b89b68 | d3bbb7eb1917d739a4ed31dacc40a968ce17e3b4 | refs/heads/master | 2021-01-10T05:00:46.642942 | 2016-01-13T22:56:28 | 2016-01-13T22:56:28 | 49,603,487 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,171 | r | hw2redoo.R |
#---------------------PART 1-------------------
#1
popvar<-function(x){
n<-length(x)
(((n-1) / n) * var(x))}
c4 <- mtcars$mpg[which(mtcars$cyl==4)]
c6 <- mtcars$mpg[which(mtcars$cyl==6)]
c8 <- mtcars$mpg[which(mtcars$cyl==8)]
car_count<- length(c4)+length(c6)+ length(c8)
resi<-((popvar(c4)*length(c4))+(popvar(c6)*length(c6))+(popvar(c8)*length(c8)))/(car_count)
#residuel variance of all factors but cylider
resi_func<- function(g1,g2,g3,count){
resid<-((popvar(g1)*length(g1))+(popvar(g2)*length(g2))+(popvar(g3)*length(g3)))/(count)
return(((popvar(mtcars$mpg)-resid)/popvar(mtcars$mpg))*100)
}
#vari- residual== amount expalied cylinder.
((popvar(mtcars$mpg)-resi)/popvar(mtcars$mpg))*100
#(73.25%)
resi_func(c4,c6,c8,car_count)
#(73.25%)
#2
g3 <- mtcars$mpg[which(mtcars$gear==3)]
g4 <- mtcars$mpg[which(mtcars$gear==4)]
g5 <- mtcars$mpg[which(mtcars$gear==5)]
car_count<- length(g3)+length(g4)+length(g5)
gr_Resi<- ((popvar(g3) *length(g3))+(popvar(g4)*length(g4))+(popvar(g5)*length(g5)))/(car_count)
((popvar(mtcars$mpg)-gr_Resi)/popvar(mtcars$mpg))*100
#42.92%
resi_func(g3,g4,g5,car_count)
#(42.92%)
#---------------------PART 2-------------------
#3
SS_within<-function(group){ #could be very wrong
x<-0
m<-mean(group)
for(i in 1:length(group)){
x=x+(group[i]-m)^2}
return(x)
}
cyl_within<-SS_within(c4)+SS_within(c6)+SS_within(c8)
cyl_within
#[1] 301.2626
GM<- mean(mtcars$mpg)
GM
#[1] 20.09062
SS_between <- function(group, grandmean){
#length(unique(group))*((mean(group)-grandmean)^2)}
length(group)*((mean(group)-grandmean)^2)}
cyl_between<-SS_between(c4,GM)+SS_between(c6,GM)+SS_between(c8,GM)
cyl_between
#Between: [1] 824.7844
SS_total<- function(within,between){
return(within+between)
}
SS_totz <- function(y){
return (var(y)*(length(y)-1))
}
#3 versions
SS_totz(mtcars$mpg)
#1126.047
cyl_total<- cyl_between+cyl_within
cyl_total
#[1] 1126.047
SS_total(cyl_within,cyl_between)
#[1] 1126.047
J<-3#number of groups
N<-length(mtcars$mpg)
DF_Between<-J-1 #J number of groups (ie: 3 cylander groups)
DF_Within<-N-J
MS_between<- function(group){
return(group/DF_Between)
}
cyl_MS_Between<-MS_between(cyl_between)
#[1] 412.3923
MS_within<- function(within){
return(within/DF_Within)
}
cyl_MS_Within<-MS_within(cyl_within)
#[1] 10.38837
F_value<- function(between,within){
return(between/within)
}
cyl_F<-F_value(cyl_MS_Between,cyl_MS_Within)
cyl_F
#[1] 39.69752
aovcyl = aov(mpg~cyl, data=mtcars)
summary(aovcyl)
#Df Sum Sq Mean Sq F value Pr(>F)
#cyl 1 817.7 817.7 79.56 6.11e-10 ***
# Residuals 30 308.3 10.3
#4
gr_Within<- SS_within(g3)+SS_within(g4)+SS_within(g5)
gr_Within
#[1] 642.804
gr_Between<-SS_between(g3,GM)+SS_between(g4,GM)+SS_between(g5,GM)
gr_Between
#[1] 483.2432
gr_Total<-SS_total(gr_Within,gr_Between)
gr_Total
#[1] 1126.047
gr_MS_Between<-MS_between(gr_Between)
gr_MS_Between
#[1] 241.6216
gr_MS_Within<-MS_within(gr_Within)
gr_MS_Within
#[1] 22.16566
gr_F<-F_value(gr_MS_Between,gr_MS_Within)
gr_F
#[1] 10.90072
aovmpg = aov(mpg~gear, data=mtcars)
summary(aovmpg)
#Df Sum Sq Mean Sq F value Pr(>F)
#gear 1 259.7 259.75 8.995 0.0054 **
# Residuals 30 866.3 28.88
#Cylinder explains more of the variance in MPG (F=39.69752) compared to that of the gears (F=10.90072). There is more variance between cylinders than variance between gears. The high variance between cylinders creates a high F score. This is the oppisite for gears.
#5
J=4
N<-length(ChickWeight$weight)
DF_Between=J-1 #J number of groups (ie:4 chicken diets groups)
DF_Within=N-J
d1 <- ChickWeight$weight[which(ChickWeight$Diet==1)]
d2 <- ChickWeight$weight[which(ChickWeight$Diet==2)]
d3 <- ChickWeight$weight[which(ChickWeight$Diet==3)]
d4 <- ChickWeight$weight[which(ChickWeight$Diet==4)]
ChickWeight
diet_Within<-SS_within(d1)+SS_within(d2)+SS_within(d3)+SS_within(d4)
diet_Within
#[1] 2758693
chick_GM<- mean(ChickWeight$weight)
chick_GM
#[1] 121.8183
diet_Between<-SS_between(d1,chick_GM)+SS_between(d2,chick_GM)+SS_between(d3,chick_GM)+SS_between(d4,chick_GM)
diet_Between
#[1] 155862.7
diet_Total<-SS_total(diet_Within,diet_Between)
diet_Total
#[1] 2914556
diet_MS_Between<-MS_between(diet_Between)
diet_MS_Between
#[1] 51954.22
diet_MS_Within<-MS_within(diet_Within)
diet_MS_Within
#[1] 4806.086
diet_F<-F_value(diet_MS_Between, diet_MS_Within)
diet_F
#[1] 10.81009
chickenaov = aov(ChickWeight$weight~ChickWeight$Diet, data=ChickWeight)
summary(chickenaov)
boxplot(weight~Diet, data = ChickWeight, xlab="Diet", ylab = "Weight" )
# Df Sum Sq Mean Sq F value Pr(>F)
#ChickWeight$Diet 3 155863 51954 10.81 6.43e-07 ***
#Residuals 574 2758693 4806
# A lower F score means more overlap and low variance (F=10.81), and subsequently, the effect of the variable is low. The F Score is 10.81, this is a low F score similar to our gear F score. Gears had less between variance than the cylinders. There is a lot of overlap across all diets around the same weight (somewhere around 75-125). Diet does not have a significant effect on weight. Diet does not matter
|
1a6fb406480eee438d1798d32b4836f5cba2e429 | ecb6bfe5e4db873400a52bbf675ebf6048237cd1 | /paper_figures/fig5.R | cfcdcbf9eb78158ba7a6547dc8c5a8a4810dbc88 | [] | no_license | ilariac92/soccer_ha_covid | cf3cf695261df2215eaf98134d9d97772ebfa315 | bcf645633e56d45581d4e7201faf7b3d3f5e879b | refs/heads/master | 2023-02-07T07:46:41.263170 | 2021-01-01T02:00:08 | 2021-01-01T02:00:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,072 | r | fig5.R | library(tidyverse)
library(patchwork)
library(grid)
library(gridExtra)
library(here)
source(here('helpers.R'))
league_info <- read_csv(here("league_info.csv"))
### Read in Posterior HA Draws
posterior_means <-
map_dfr(league_info$alias, ~{
league_ <- gsub("\\s", "_", tolower(.x))
goals_posterior <- try(suppressWarnings(read_rds(here(glue('posteriors/bvp_goals_no_corr/{league_}.rds')))))
yc_posterior <- try(suppressWarnings(read_rds(here(glue('posteriors/bvp_yc_lambda3/{league_}.rds')))))
tibble('league' = ifelse(.x == 'English League Championship', 'English Championship', .x),
'goals_ha_pre' = mean(goals_posterior$home_field_pre),
'goals_ha_post' = mean(goals_posterior$home_field_post),
'yc_ha_pre' = mean(yc_posterior$home_field_pre),
'yc_ha_post' = mean(yc_posterior$home_field_post)
) %>%
mutate('cluster' = case_when(
goals_ha_post > goals_ha_pre & yc_ha_pre > yc_ha_post ~ "Goals HA Increase | Yellow Cards HA Increase",
goals_ha_post < goals_ha_pre & yc_ha_pre > yc_ha_post ~ "Goals HA Decrease | Yellow Cards HA Increase",
goals_ha_post > goals_ha_pre & yc_ha_pre < yc_ha_post ~ "Goals HA Increase | Yellow Cards HA Decrease",
goals_ha_post < goals_ha_pre & yc_ha_pre < yc_ha_post ~ "Goals HA Decrease | Yellow Cards HA Decrease")) %>%
mutate('magnitude' = sqrt((goals_ha_post-goals_ha_pre)^2 + (yc_ha_post - yc_ha_pre)^2))
})
df_means <-
posterior_means %>%
rename_at(vars(contains('ha')), ~gsub('_ha', '', .x)) %>%
pivot_longer(cols = matches('goals|yc'),
names_sep = '_',
names_to = c('stat', 'ha_type'),
values_to = 'value') %>%
pivot_wider(names_from = stat,
values_from = value)
theme_set(theme_bw() +
theme(plot.title = element_text(size = 40, vjust = 3, hjust = 0.5),
strip.text = element_text(size = 28),
axis.text = element_text(size = 22),
plot.margin = unit(c(2,0.5,0.5,0.5), "cm")
))
p1 <-
df_means %>%
filter(cluster == 'Goals HA Decrease | Yellow Cards HA Decrease') %>%
ggplot(aes(x = yc, y = goals)) +
facet_wrap(~fct_reorder(league, desc(magnitude)), ncol = 4) +
geom_vline(xintercept = 0, lty = 2) +
geom_hline(yintercept = 0, lty = 2) +
geom_point(data = select(df_means, -league), alpha = 0.2, size = 3) +
geom_point(color = "#F8766D", size = 10, alpha = 0.4) +
geom_segment(data = posterior_means %>% filter(cluster == 'Goals HA Decrease | Yellow Cards HA Decrease'),
color = "#F8766D",
aes(x = yc_ha_pre,
xend = yc_ha_post,
y = goals_ha_pre,
yend = goals_ha_post),
arrow = arrow(length = unit(0.7, "cm")), lwd = 4, show.legend = F) +
scale_x_continuous(limits = c(-0.65, 0.2)) +
scale_y_continuous(limits = c(-0.4, 0.5)) +
annotate('text', x = -0.555, y = 0.17, label = 'Decreasing HA\n(Goals)', size = 7, angle = 90) +
annotate('segment', x = -0.625, y = 0.475, xend = -0.625, yend = -0.16, arrow = arrow(length = unit(0.4, 'cm')), lwd = 1.5) +
annotate('text', x = -0.275, y = -0.305, label = 'Decreasing HA (Yellow Cards)', size = 7) +
annotate('segment', x = -0.635, y = -0.375, xend = 0.1, yend = -0.375, arrow = arrow(length = unit(0.4, 'cm')), lwd = 1.5) +
labs(x = '',
y = '',
title = 'Yellow Cards HA Decrease + Goals HA Decrease',
color = '',
fill = '')
p2 <-
df_means %>%
filter(cluster == 'Goals HA Increase | Yellow Cards HA Decrease') %>%
ggplot(aes(x = yc, y = goals)) +
facet_wrap(~fct_reorder(league, desc(magnitude)), ncol = 4) +
geom_vline(xintercept = 0, lty = 2) +
geom_hline(yintercept = 0, lty = 2) +
geom_point(data = select(df_means, -league), alpha = 0.2, size = 3) +
geom_point(color = "#C77CFF", size = 10, alpha = 0.4) +
geom_segment(data = posterior_means %>% filter(cluster == 'Goals HA Increase | Yellow Cards HA Decrease'),
color = "#C77CFF",
aes(x = yc_ha_pre,
xend = yc_ha_post,
y = goals_ha_pre,
yend = goals_ha_post),
arrow = arrow(length = unit(0.7, "cm")), lwd = 4, show.legend = F) +
scale_x_continuous(limits = c(-0.65, 0.2)) +
scale_y_continuous(limits = c(-0.4, 0.5)) +
annotate('text', x = -0.555, y = 0.17, label = 'Decreasing HA\n(Goals)', size = 7, angle = 90) +
annotate('segment', x = -0.625, y = 0.475, xend = -0.625, yend = -0.15, arrow = arrow(length = unit(0.4, 'cm')), lwd = 1.5) +
annotate('text', x = -0.275, y = -0.305, label = 'Decreasing HA (Yellow Cards)', size = 7) +
annotate('segment', x = -0.635, y = -0.375, xend = 0.1, yend = -0.375, arrow = arrow(length = unit(0.4, 'cm')), lwd = 1.5) +
labs(x = '',
y = '',
title = 'Yellow Cards HA Decrease + Goals HA Increase',
caption = 'Yellow Cards HA Increase + Goals HA Increase',
color = '',
fill = '') +
theme(plot.caption = element_text(hjust = 0.5, size = 40, vjust = -8))
p3 <-
df_means %>%
filter(cluster == 'Goals HA Increase | Yellow Cards HA Increase') %>%
ggplot(aes(x = yc, y = goals)) +
facet_wrap(~fct_reorder(league, desc(magnitude)), ncol = 4) +
geom_vline(xintercept = 0, lty = 2) +
geom_hline(yintercept = 0, lty = 2) +
geom_point(data = select(df_means, -league), alpha = 0.2, size = 3) +
geom_point(color = "#00BFC4", size = 10, alpha = 0.4) +
geom_segment(data = posterior_means %>% filter(cluster == 'Goals HA Increase | Yellow Cards HA Increase'),
color = "#00BFC4",
aes(x = yc_ha_pre,
xend = yc_ha_post,
y = goals_ha_pre,
yend = goals_ha_post),
arrow = arrow(length = unit(0.7, "cm")), lwd = 4, show.legend = F) +
scale_x_continuous(limits = c(-0.65, 0.2)) +
scale_y_continuous(limits = c(-0.4, 0.5)) +
annotate('text', x = -0.555, y = 0.17, label = 'Decreasing HA\n(Goals)', size = 7, angle = 90) +
annotate('segment', x = -0.625, y = 0.475, xend = -0.625, yend = -0.15, arrow = arrow(length = unit(0.4, 'cm')), lwd = 1.5) +
annotate('text', x = -0.275, y = -0.305, label = 'Decreasing HA (Yellow Cards)', size = 7) +
annotate('segment', x = -0.635, y = -0.375, xend = 0.1, yend = -0.375, arrow = arrow(length = unit(0.4, 'cm')), lwd = 1.5) +
labs(x = '',
y = '',
title = '',
color = '',
fill = '')
design <-"
11
11
11
33
2#"
p1 + p3 + p2 + plot_layout(design = design) +
plot_annotation(title = 'Change in Home Advantages:\nGoals and Yellow Cards',
subtitle = 'Arrows reflect Pre-Covid to Post-Covid posterior means in HA',
theme = theme(plot.title = element_text(size = 72, hjust = 0.5, face = 'bold'),
plot.subtitle = element_text(size = 48, hjust = 0.5, face = 'bold')))
ggsave('figures/figure5.png', height = 30, width = 22)
|
bb64879cb792d97c35dadfb98b5507aa4afefdf6 | bd9ba219102f6fa6bbae66b08cdcb28ed50df3ad | /20190916_조국.R | 5dc508c5313e6dab1d9b10195b404542994d5527 | [] | no_license | i-am-chan/2019_2_BigData | 955df5cef2d713603056d3801c5e5d4dee394b10 | 227c70029f9b9d9af6686a8071ded614a2e8cf79 | refs/heads/master | 2020-07-16T08:51:36.694585 | 2019-10-09T12:05:35 | 2019-10-09T12:05:35 | 205,758,212 | 0 | 0 | null | null | null | null | UHC | R | false | false | 635 | r | 20190916_조국.R | setwd('R_Practice_Examples_1')
library(KoNLP)
library(wordcloud2)
jo <- readLines('조국.txt')
# readLines('조국.txt', encoding='utf-8'??) -> 찾아보자.
# 난 그냥 ANSI 파일로 저장했다.
jo.list <- sapply(jo, extractNoun, USE.NAMES=F)
View(jo.list)
jo.vector <- unlist(jo.list)
jo.vector <- Filter(function(x) {10 >= nchar(x) & nchar(x) >= 2}, jo.vector)
View(jo.list)
v = c('\\d+', '\\.', ' ', "\\'", "", "조국", "교수")
for (ch in v) {
jo.vector <- gsub(ch, '', jo.vector)
}
write(unlist(jo.vector), '조국_2.txt')
jo.table <- read.table('조국_2.txt')
wordcount <- table(jo.table)
wordcloud2(wordcount) |
6e7e11a0c84868e83440f0d6813d2b4fd9543b8c | 6420e078e825209d927cbef53f5f68800f65df67 | /man/makeDisplay.Rd | aa2467dbb158749bc4b0bd9a0240724ce41e0d45 | [
"BSD-3-Clause"
] | permissive | hafen/trelliscope | 0cb32f638923f6eaefd25bb184a2a5fdb0db377d | 97858083fb18c245006d216bec802097dbcaab07 | refs/heads/master | 2020-04-05T15:52:47.649484 | 2017-09-20T14:57:40 | 2017-09-20T14:57:40 | 20,783,488 | 4 | 1 | null | null | null | null | UTF-8 | R | false | true | 7,142 | rd | makeDisplay.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeDisplay.R
\name{makeDisplay}
\alias{makeDisplay}
\title{Create a Trelliscope Display}
\usage{
makeDisplay(data, name, group = "common", desc = "", mdDesc = NULL,
height = 500, width = 500, panelFn = NULL, lims = list(x = "free", y =
"free", prepanelFn = NULL), cogFn = NULL, state = NULL,
preRender = FALSE, thumbIndex = 1, cogConn = dfCogConn(),
output = NULL, conn = getOption("vdbConn"), verbose = TRUE,
keySig = NULL, params = NULL, packages = NULL, control = NULL,
detectGlobals = TRUE)
}
\arguments{
\item{data}{data of class "ddo" or "ddf" (see \code{\link{ddo}}, \code{\link{ddf}})}
\item{name}{the name of the display (no special characters, spaces are converted to underscores)}
\item{group}{the group the display belongs to, where displays are organized into groups (no special characters, spaces are
converted to underscores). Defaults to "common"}
\item{desc}{a description of the display (used in the viewer)}
\item{mdDesc}{an optional longer-form description of the display and data, which can be text or can be a path to a markdown file or file with html snippets. The description will appear in the "Display Information" panel in the Trelliscope viewer.}
\item{height}{reference dimensions (in pixels) for each panel (panels will be resized based on available space in the viewer)}
\item{width}{reference dimensions (in pixels) for each panel (panels will be resized based on available space in the viewer)}
\item{panelFn}{a function that produces a plot and takes one argument, which will be the current split of the data being passed to it. It is recommended that you first test \code{panelFn} on a single key-value pair using \code{panelFn(data[[1]][[2]])}. This function must return either an object of class "ggplot", "trellis", or return "NULL" (for base plot commands)}
\item{lims}{either an object of class "trsLims" as obtained from \code{\link{setLims}} or a list with elements x, y, and prepanelFn, that specify how to apply \code{\link{prepanel}} and \code{\link{setLims}}}
\item{cogFn}{a function that returns a named list, where each element of the list is a cognostic feature (with length 1). This list must be coerceable to a 1-row data frame. The function should take one argument, which will be the current split of the data being passed to it. Useful to test with \code{cogFn(divExample(dat))}}
\item{state}{if specified, this tells the viewer the default parameter settings (such as layout, sorting, filtering, etc.) to use when the display is viewed (see \code{\link{validateState}} for details)}
\item{preRender}{should the panels be pre-rendered and stored (\code{TRUE}), or rendered on-the-fly (\code{FALSE}, default)? Default is recommended unless rendering is very expensive. See Details.}
\item{thumbIndex}{the index value to use for creating the thumbnail}
\item{cogConn}{a connection to store the cognostics data. By default, this is \code{\link{dfCogConn}()}.}
\item{output}{how to store the panels and metadata for the display (unnecessary to specify in most cases -- see details)}
\item{conn}{VDB connection info, typically stored in options("vdbConn") at the beginning of a session, and not necessary to specify here if a valid "vdbConn" object exists}
\item{verbose}{print status messages?}
\item{keySig}{a user-defined key signature (string - see details)}
\item{params}{a named list of objects external to the input data that are needed in the distributed computing (most should be taken care of automatically such that this is rarely necessary to specify)}
\item{packages}{a vector of R package names that contain functions used in \code{panelFn} or \code{cogFn} (most should be taken care of automatically such that this is rarely necessary to specify)}
\item{control}{parameters specifying how the backend should handle things (most-likely parameters to \code{rhwatch} in RHIPE) - see \code{\link[datadr]{rhipeControl}} and \code{\link[datadr]{localDiskControl}}}
\item{detectGlobals}{if TRUE params are automatically detected (packages are always auto-detected)}
}
\description{
Create a trelliscope display and add it to a visualization database (VDB)
}
\details{
Many of the parameters are optional or have defaults. For several examples, see the documentation at deltarho.org: \url{http://deltarho.org/docs-trelliscope}
Panels by default are not pre-rendered. Instead, this function creates a display object and computes and stores the cognostics. Panels are then rendered on the fly by the DeltaRho backend and pushed to the Trelliscope viewer as html with the panel images embedded in the html. If a user would like to pre-render the images for every subset (using \code{preRender = TRUE}), then by default the image files for the panels will be stored to a local disk connection (see \code{\link{localDiskConn}}) inside the VDB directory, organized in subdirectories by group and name of the display. Optionally, the user can specify the \code{output} parameter to be any valid "kvConnection" object, as long as it is one that persists on disk (e.g. \code{\link{hdfsConn}}).
\code{keySig} does not generally need to be specified. It is useful to specify when creating multiple displays that you would like to be treated as related displays, so that you can view them side by side. Two displays are determined to be related when their key signatures, typically computed as a md5 hash of the complete collection of keys, match. Sometimes two displays will have data where the keys match for a significant portion of subsets, but not all. Manually specifying the same \code{keySig} for each can ensure that they will be treated as related displays.
}
\examples{
\dontrun{
library(ggplot2)
vdbConn(tempfile(), autoYes = TRUE)
# divide housing data by county
byCounty <- divide(housingData::housing, by = c("county", "state"))
xlim <- as.Date(c("2008-01-31", "2016-01-31"))
# plot list price vs. time for each county
makeDisplay(byCounty, name = "county_time",
panelFn = function(x)
ggplot(x, aes(time, medListPriceSqft)) +
geom_point() + xlim(xlim))
# divide housing data by state
byState <- divide(housingData::housing, by = "state")
# create a "displayHref" cognostic that links to the by county display
# filtered down to all counties in the current state
cogFn <- function(x) {
state <- stateSpec(
name = "county_time",
sort = list(county = "asc"),
layout = list(nrow = 2, ncol = 4),
filter = list(state = list(select = getSplitVar(x, "state"))))
list(countyPlots = cogDisplayHref(state = state, defLabel = TRUE))
}
# plot distribution of list price vs. time for each state
makeDisplay(byState, name = "state_time_CI",
panelFn = function(x)
ggplot(x, aes(time, medListPriceSqft)) +
stat_summary(fun.data = "mean_cl_boot") + xlim(xlim),
cogFn = cogFn)
# open up the state display
# try clicking on the link for "countyPlots"
# the by county display will be loaded filtered to the state
view("state_time_CI")
}
}
\seealso{
\code{\link{prepanel}}, \code{\link{setLims}}, \code{\link{divide}}
}
|
4b5bdc2a8686755983135b27698e766237accdb5 | bb660ae1c6194d054248ca508475493ee264a6ae | /man/get_coords.Rd | 11b573bd9cca81b6beccdf3579a4581db97ba48b | [
"MIT"
] | permissive | hrvg/RiverML | bdfdd6d47f2bb7a7a26c255fa4bf268b11f59c43 | 22b3223f31f310e00313096b7f1fb3a9ab267990 | refs/heads/master | 2023-04-10T05:02:17.788230 | 2020-10-08T16:13:21 | 2020-10-08T16:13:21 | 288,584,365 | 0 | 0 | NOASSERTION | 2020-12-04T01:19:50 | 2020-08-18T23:18:50 | R | UTF-8 | R | false | true | 458 | rd | get_coords.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_data_loading.R
\name{get_coords}
\alias{get_coords}
\title{Retrieve the coordinates of the observations}
\usage{
get_coords(region)
}
\arguments{
\item{region}{\code{character}, identifier of the region of study}
}
\value{
the coordinates of the labelled points for a given \code{region}
}
\description{
Retrieve the coordinates of the observations
}
\keyword{ml-data-loading}
|
5474712e6974e228e7514e755fe1ec6b216ed648 | 9e6c6d3ea78d408a6746fcdeca6ff0d3a8a3308c | /man/complementarycolor.Rd | 6fb0b64de5d68120c45b0e922035c73a224ec1fe | [] | no_license | stineb/rbeni | 36f28d38f58301d2af24255e9d63fe5ac6809ebe | 2f9d26d0a286c550cb90ee9d30a1f2b6c3b112f6 | refs/heads/master | 2023-02-18T22:18:52.856980 | 2023-02-16T17:29:09 | 2023-02-16T17:29:09 | 167,402,490 | 3 | 6 | null | 2020-09-25T09:35:32 | 2019-01-24T16:49:15 | R | UTF-8 | R | false | true | 852 | rd | complementarycolor.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LSD.color.R
\name{complementarycolor}
\alias{complementarycolor}
\alias{LSD.complementarycolor}
\title{Complement R colors}
\usage{
complementarycolor(cols, alpha = NULL)
}
\arguments{
\item{cols}{a character vector containing R built-in colors.}
\item{alpha}{alpha value: a two-digit integer between 01 and 99 for color opacity, i.e. appearance of partial or full transparency (usage omitted by default).}
}
\value{
\code{complementarycolor} returns a vector containing R built-in colors in hexadecimal representation.
}
\description{
Convert R built-in colors to their color complement
}
\examples{
complementarycolor(c("red","green","blue"))
}
\seealso{
\code{\link{disco}}, \code{\link{colorpalette}}, \code{\link{demotour}}
}
\author{
Bjoern Schwalb
}
\keyword{color}
|
bf1bd731cc63ce5b7f355798a338c7c0827f2b26 | f84139438cc48d29c45d14596638d5027ca640af | /R/cci.R | 21a12a7ec2dddc47653677beb037c1a4d848540b | [] | no_license | cran/currentSurvival | 15c99ed83f3d31b6e9efab253f1cb5147ceb64a4 | 076bfc7cc68a49acedd51befa0bcab02a165e674 | refs/heads/master | 2022-06-05T23:53:27.567904 | 2022-05-12T06:20:02 | 2022-05-12T06:20:02 | 17,695,347 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,191 | r | cci.R |
cci <- function(data, maxx = NULL, com.est = TRUE, conf.int = FALSE, conf.int.level = NULL, no.iter = NULL, points = NULL, fig = TRUE, strat = FALSE, pvals = FALSE, pval.test = NULL)
{
#### check input parameters:
## check input parameter com.est:
if (!is.logical(com.est)) {
stop(paste("","Invalid logical parameter 'com.est'! Its value must be set to","TRUE or FALSE. The default value is TRUE.",sep="\n"))
}
## check input parameter conf.int:
if (!is.logical(conf.int)) {
stop(paste("","Invalid logical parameter 'conf.int'! Its value must be set to","TRUE or FALSE. The default value is FALSE.",sep="\n"))
}
## check input parameter conf.int.level:
if (conf.int) {
if (is.null(conf.int.level)) {
conf.int.level <- 0.95
} else {
if (!is.numeric(conf.int.level) || conf.int.level<0.9 || conf.int.level>0.99) {
stop(paste("","Invalid numerical parameter 'conf.int.level'! Its value must be","in the range 0.9-0.99. The default value is 0.95.",sep="\n"))
}
}
} else {
if (!is.null(conf.int.level)) {
if (!is.numeric(conf.int.level) || conf.int.level<0.9 || conf.int.level>0.99) {
stop(paste("","Invalid numerical parameter 'conf.int.level'! Its value must be","in the range 0.9-0.99. The default value is 0.95. However, if","you want to calculate confidence intervals, you must also set","'conf.int' to TRUE. If not, do not specify 'conf.int.level'.",sep="\n"))
} else {
stop(paste("","Parameter 'conf.int' is missing or set to FALSE! If you want to","calculate confidence intervals, you must set 'conf.int' to TRUE.","If not, do not specify 'conf.int.level'.",sep="\n"))
}
}
}
## check input parameter no.iter:
if (conf.int) {
if (is.null(no.iter)) {
no.iter <- 100
} else {
if (!is.numeric(no.iter) || no.iter<10 || no.iter>10000) {
stop(paste("","Invalid numerical parameter 'no.iter'! Its value must be in the","range 10-10000. The default value is 100.",sep="\n"))
}
}
} else {
if (!is.null(no.iter)) {
if (!is.numeric(no.iter) || no.iter<10 || no.iter>10000) {
stop(paste("","Invalid numerical parameter 'no.iter'! Its value must be in the","range 10-10000. The default value is 100. However, if you want","to calculate confidence intervals, you must also set 'conf.int'","to TRUE. If not, do not specify 'no.iter'.",sep="\n"))
} else {
stop(paste("","Parameter 'conf.int' is missing or set to FALSE! If you want to","calculate confidence intervals, you must set 'conf.int' to TRUE.","If not, do not specify 'no.iter'.",sep="\n"))
}
}
}
## check input parameter fig:
if (!is.logical(fig)) {
stop(paste("","Invalid logical parameter 'fig'! Its value must be set to TRUE","or FALSE. The default value is TRUE.",sep="\n"))
}
## check input logical parameter strat:
if (!is.logical(strat)) {
stop(paste("","Invalid logical parameter 'strat'! Its value must be set to TRUE","or FALSE. The default value is FALSE.",sep="\n"))
}
## check input parameter pvals:
if (!is.logical(pvals)) {
if (!strat || !conf.int) {
stop(paste("","Invalid logical parameter 'pvals'! Its value must be set to TRUE","or FALSE. The default value is FALSE. If you want to calculate","p-values for the stratified CCI estimates, you must also set","'strat' and 'conf.int' to TRUE. If not, do not specify 'pvals'.",sep="\n"))
} else {
stop(paste("","Invalid logical parameter 'pvals'! Its value must be set to TRUE","or FALSE. The default value is FALSE.",sep="\n"))
}
}
if (pvals && !strat && !conf.int) {
stop(paste("","Parameters 'strat' and 'conf.int' are missing or set to FALSE!","If you want to calculate p-values for the stratified CCI","estimates, you must set 'strat' and 'conf.int' to TRUE.","If not, do not specify 'pvals'.",sep="\n"))
}
if (pvals && !strat) {
stop(paste("","Parameter 'strat' is missing or set to FALSE!","If you want to calculate p-values for the stratified CCI","estimates, you must set 'strat' to TRUE. If not, do not","specify 'pvals'.",sep="\n"))
}
if (pvals && !conf.int) {
stop(paste("","Parameter 'conf.int' is missing or set to FALSE!","If you want to calculate p-values for the stratified CCI","estimates, you must set 'conf.int' to TRUE because the","computation of p-values is based on the estimation of","confidence intervals. If not, do not specify 'pvals'.",sep="\n"))
}
## check input parameter pval.test:
if (!is.null(pval.test)) {
check <- switch(pval.test,
naive = 1,
log = 1,
loglog = 1 )
if (is.null(check)) {
if (!pvals || !strat || !conf.int) {
stop(paste("","Invalid string parameter 'pval.test'! Its value must be set","to 'naive', 'log' or 'loglog'. The default value is 'loglog'.","If you want to calculate p-values for the stratified CCI","estimates, you must also set 'pvals', 'strat' and 'conf.int'","to TRUE. If not, do not specify 'pvals.test'.",sep="\n"))
} else {
stop(paste("","Invalid string parameter 'pval.test'! Its value must be set","to 'naive', 'log' or 'loglog'. The default value is 'loglog'.",sep="\n"))
}
} else {
if (!pvals && !strat && !conf.int) {
stop(paste("","Parameters 'pvals', 'strat' and 'conf.int' are missing or set","to FALSE! If you want to calculate p-values for the stratified","CCI estimates, you must set 'pvals', 'strat' and 'conf.int'","to TRUE. If not, do not specify 'pvals.test'.",sep="\n"))
}
if (!pvals && !strat) {
stop(paste("","Parameters 'pvals' and 'strat' are missing or set to FALSE!","If you want to calculate p-values for the stratified CCI","estimates, you must set 'pvals' and 'strat' to TRUE. If not,","do not specify 'pvals.test'.",sep="\n"))
}
if (!pvals && !conf.int) {
stop(paste("","Parameters 'pvals' and 'conf.int' are missing or set to FALSE!","If you want to calculate p-values for the stratified CCI","estimates, you must set 'pvals' and 'conf.int' to TRUE. If not,","do not specify 'pvals.test'.",sep="\n"))
}
if (!strat && !conf.int) {
stop(paste("","Parameters 'strat' and 'conf.int' are missing or set to FALSE!","If you want to calculate p-values for the stratified CCI","estimates, you must set 'strat' and 'conf.int' to TRUE. If not,","do not specify 'pvals.test'.",sep="\n"))
}
if (!pvals) {
stop(paste("","Parameter 'pvals' is missing or set to FALSE!","If you want to calculate p-values for the stratified CCI","estimates, you must set 'pvals' to TRUE. If not, do not","specify 'pvals.test'.",sep="\n"))
}
if (!strat) {
stop(paste("","Parameter 'strat' is missing or set to FALSE!","If you want to calculate p-values for the stratified CCI","estimates, you must set 'strat' to TRUE. If not, do not","specify 'pvals.test'.",sep="\n"))
}
if (!conf.int) {
stop(paste("","Parameter 'conf.int' is missing or set to FALSE!","If you want to calculate p-values for the stratified CCI","estimates, you must set 'conf.int' to TRUE because the","computation of p-values is based on the estimation of","confidence intervals. If not, do not specify 'pvals.test'.",sep="\n"))
}
}
} else {
if (pvals) {
pval.test <- "loglog"
}
}
#### data pre-processing:
if (strat) {
stratf <- data[,ncol(data)] # separate the stratification factor
data <- data[,-ncol(data)] # remove the stratification factor from the data matrix
# check whether the data contain only numeric values:
isfac <- array(0,ncol(data)) # allocation of a vector for the identification of factor variables among the data columns
for (i in 1:ncol(data)) {
isfac[i] <- is.factor(data[,i])
}
if (sum(isfac)>0) {
stop(paste("","Invalid input data! Data contain string variables (apart from","the stratification factor in which string values are allowed).",sep="\n"))
}
} else {
# check whether the data contain only numeric values:
isfac <- array(0,ncol(data)) # allocation of a vector for the identification of factor variables among the data columns
for (i in 1:ncol(data)) {
isfac[i] <- is.factor(data[,i])
}
if (sum(isfac)>0) {
stop(paste("","Invalid input data! Data contain string variables.","If the last column of the data matrix is a stratification factor,","in which string values are allowed, and you want to calculate the","stratified CCI estimates, you must set 'strat' to TRUE. If not,","all columns in the data matrix must be numeric variables of type","Integer.",sep="\n"))
}
}
### other data controls:
# allocate a vector for error messages regarding input data:
error.messages <- NULL
# check whether the data matrix does not contain a column with only NA values:
if (sum(colSums(is.na(data))==nrow(data))>0) {
if (sum(colSums(is.na(data))==nrow(data))==1) {
warning(paste(paste("Column no.",c(1:ncol(data))[colSums(is.na(data))==nrow(data)],"was excluded from the data matrix as it contains"),"only NA values.",sep="\n"))
} else {
warning(paste(paste("Columns no.",paste(c(1:ncol(data))[colSums(is.na(data))==nrow(data)],collapse=", ")," were excluded from the data matrix"),"as they contain only NA values.",sep="\n"))
}
data <- data[,(colSums(is.na(data))<nrow(data))]
}
# check whether the times to events and the follow-up time are integers:
if (sum(!(abs(data[,-ncol(data)]-round(data[,-ncol(data)]))<.Machine$double.eps),na.rm=TRUE)>0) {
error.messages <- c(error.messages,"All times to events as well as follow-up times must be in days","(i.e. integer values)!")
}
# check whether the times to events and the follow-up time contain only values higher than 0:
if (sum(data[,-ncol(data)]<=0,na.rm=TRUE)>0) {
error.messages <- c(error.messages,"All times to events as well as follow-up times must be higher than 0!")
}
# check if there are no NA values in the censoring indicator:
if(sum(is.na(data[,ncol(data)]))>0) {
warning(paste("","NA values were detected in the censoring indicator.",paste("Number of patients excluded from the analysis due to NAs:",sum(is.na(data[,ncol(data)]))),sep="\n"))
if (strat) {
stratf <- stratf[!is.na(data[,ncol(data)])]
}
data <- data[!is.na(data[,ncol(data)]),]
}
# check if there are no NA values in the follow-up time:
if(sum(is.na(data[,ncol(data)-1]))>0) {
warning(paste("","NA values were detected in the follow-up time.",paste("Number of patients excluded from the analysis due to NAs:",sum(is.na(data[,ncol(data)-1]))),sep="\n"))
if (strat) {
stratf <- stratf[!is.na(data[,ncol(data)-1])]
}
data <- data[!is.na(data[,ncol(data)-1]),]
}
# allocate another vector for error messages regarding input data:
error.messages2 <- NULL
# check whether the event times are in ascending order:
E <- data[,1:(ncol(data)-2)]
for (i in 1:nrow(E)) {
E[i,is.na(E[i,])] <- data[i,ncol(data)-1]
}
Ediff <- t(diff(t(E))) # compute a matrix of differences between the subsequent pairs of data columns
Ediff[is.na(data[,2:(ncol(data)-2)])] <- NA
if (sum(Ediff<=0,na.rm=TRUE)>0) {
error.messages2 <- c(error.messages2,"The event times must be ascending and not equal for each patient!") # check whether all differences are >= 0
}
rm(list=c("E","Ediff"))
# check whether the censoring indicator has only values 0 and 1:
if ((sum(data[,ncol(data)]==0) + sum(data[,ncol(data)]==1)) != nrow(data)) {
error.messages2 <- c(error.messages2,"Invalid censoring indicator; it must be 0 (censored) or 1 (dead)","for each patient!")
}
# check whether the follow-up time is higher than the preceding times to events:
fup <- array(0,nrow(data))
for (i in 1:nrow(data)) {
if (sum(!is.na(data[i,1:(ncol(data)-2)]))>0) {
fup[i] <- (max(data[i,1:(ncol(data)-2)],na.rm=TRUE)>data[i,ncol(data)-1]) # the follow-up time must be higher or equal to individual event times
}
}
if (sum(fup)>0) {
error.messages2 <- c(error.messages2,"Invalid follow-up time; it must be higher than all event times","for each patient!")
}
# check the number of levels for stratification:
if (strat) {
if (length(levels(as.factor(stratf)))<2) {
error.messages2 <- c(error.messages2,"Stratification factor must have at least 2 levels!")
} else {
if (length(levels(as.factor(stratf)))>8) {
error.messages2 <- c(error.messages2,"Stratification factor must have no more than 8 levels!")
}
}
}
# check if there are no NA values in the stratification factor:
if (strat) {
if(sum(is.na(stratf))>0) {
warning(paste("","NA values were detected in the stratification factor.",paste("Number of patients excluded from the analysis due to NAs:",sum(is.na(stratf))),sep="\n"))
data <- data[!is.na(stratf),]
stratf <- stratf[!is.na(stratf)]
}
}
# create final error message:
if (strat) {
if (!is.null(error.messages)) {
if (!is.null(error.messages2)) {
stop(paste("","Following problem(s) were found in the data matrix:",paste(error.messages,collapse="\n"),paste(error.messages2,collapse="\n"),"As the parameter 'strat' is set to TRUE, check whether the last","three data columns contain follow-up times, censoring indicators,","and stratification levels, respectively.",sep="\n"))
} else {
stop(paste("","Following problem(s) were found in the data matrix:",paste(error.messages,collapse="\n"),sep="\n"))
}
} else {
if (!is.null(error.messages2)) {
stop(paste("","Following problem(s) were found in the data matrix:",paste(error.messages2,collapse="\n"),"As the parameter 'strat' is set to TRUE, check whether the last","three data columns contain follow-up times, censoring indicators,","and stratification levels, respectively.",sep="\n"))
}
}
} else {
if (!is.null(error.messages)) {
if (!is.null(error.messages2)) {
stop(paste("","Following problem(s) were found in the data matrix:",paste(error.messages,collapse="\n"),paste(error.messages2,collapse="\n"),"As the parameter 'strat' is missing or set to FALSE, check","whether the last two data columns contain follow-up times and","censoring indicators, respectively.",sep="\n"))
} else {
stop(paste("","Following problem(s) were found in the data matrix:",paste(error.messages,collapse="\n"),sep="\n"))
}
} else {
if (!is.null(error.messages2)) {
stop(paste("","Following problem(s) were found in the data matrix:",paste(error.messages2,collapse="\n"),"As the parameter 'strat' is missing or set to FALSE, check","whether the last two data columns contain follow-up times and","censoring indicators, respectively.",sep="\n"))
}
}
}
### check input parameters maxx and points:
# check input parameter maxx and convert it to days:
LastContact <- data[,ncol(data)-1]
if (is.null(maxx)) {
maxx <- max(LastContact)
} else {
if (!is.numeric(maxx) || maxx<1 || maxx>(max(LastContact)/365)) {
stop(paste("","Invalid numerical parameter 'maxx'! It must be in range from","1 year to maximum value in the column of follow-up times. The","default value is a maximum value in the column of follow-up times.",sep="\n"))
} else {
maxx <- floor(maxx*365)
}
}
# check input parameter points:
if (is.null(points)) {
points <- seq(12,floor(maxx/(365/12)),12)
} else {
if (!is.vector(points,mode="numeric") || sum(points<0)>0 || sum(points>floor(maxx/(365/12)))>0) {
stop(paste("","Invalid numerical vector 'points'! Its values must be in range","from 0 months to maximum value in the column of follow-up times.","The default is a vector of 0, 12, 24, ..., floor(maxx/(365/12))","months.",sep="\n"))
} else {
points <- sort(points)
if (sum(points==0)>0) {
points <- points[-1] # if 0 is included in the time points, it is removed because it will be added later automatically
}
}
}
### call functions cci.strat or cci.nostrat:
if (strat) {
cci <- cci.strat(data, stratf, maxx, com.est, conf.int, conf.int.level, no.iter, points, fig, pvals, pval.test)
} else {
cci <- cci.nostrat(data, maxx, com.est, conf.int, conf.int.level, no.iter, points, fig)
}
}
|
e938f99de2f6c50f23e224e62fd71bf149423291 | 87cd6f59e44ce81375400cc88d55e1f23736a4cd | /R functions/funct_tabulation.R | ce5353c1d546aa7e3affae7b6d10bb424cfe121b | [] | no_license | basiliomp/CISsurveys | 7ba6d7da6bfc91b62ab0494fb2d9425655ea5463 | 64e89ef382fdf6d1d8dfa2c34bbe5819c7073e7a | refs/heads/master | 2020-03-22T08:17:20.844259 | 2018-11-21T16:49:42 | 2018-11-21T16:49:42 | 139,757,432 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 4,739 | r | funct_tabulation.R | # write_tab_header funct --------
# New function for creating a meaningful title for voting recall tables
write_tab_header <- function(x, file, header){
cat(header, '\n', file = file)
write.table(x = x, file = file, col.names = NA, sep = ";", dec = ",", append = T, row.names = T, na = "")
}
# Tabular functions ---------------
# Consider adding a second argument to the function below so `x` is specified explicitly?
#Function for tabulating answers from reported vote on general elections
generaltab <- function(RECUERDO, RVGENAGR, weight = NA) {
# Weighted tables
if (!is.na(weight)) {
tab_gen <- svytable(~RECUERDO + RVGENAGR, design = weight)
} else {
tab_gen <- table(RECUERDO, RVGENAGR)
}
# Tabla de transferencias desde anteriores generales en cifras absolutas
write_tab_header(x = round((tab_gen)), file = paste(general[x,"Token"], "GEN_abs.csv", sep = "_"),
header = "Voto reciente (filas) y en anteriores elecciones generales (columnas) en número absolutos")
# Tabla de transferencias porcentuales desde anteriores generales por fila
write_tab_header(x = round(prop.table(tab_gen, margin = 1), digits = 4)*100, #margin=1 es para % por fila
file = paste(general[x,"Token"], "GEN_perc_fila.csv", sep = "_"),
header = "Voto reciente (filas) y en anteriores elecciones generales (columnas) en % por fila")
# Tabla de transferencias porcentuales desde anteriores generales por columna
write_tab_header(x = round(prop.table(tab_gen, margin = 2), digits = 4)*100, #margin=2 es para % por columna
file = paste(general[x,"Token"], "GEN_perc_colu.csv", sep = "_"),
header = "Voto reciente (filas) y en anteriores elecciones generales (columnas) en % por columna")
}
#Function for tabulating answers from reported vote on regional elections
autonotab <- function(RECUERDO, RVAUTAGR, weight = NA) {
# Weighted tables
if (!is.na(weight)) {
tab_auto <- svytable(~RECUERDO + RVAUTAGR, design = weight)
} else {
tab_auto <- table(RECUERDO, RVAUTAGR)
}
# Tabla de transferencias desde anteriores autonómicas en cifras absolutas
write_tab_header(x = round(tab_auto), file = paste(general[x,"Token"], "AUTO_abs.csv", sep = "_"),
header = "Voto reciente (filas) y en anteriores elecciones autonómicas (columnas) en números absolutos")
# Tabla de transferencias porcentuales desde anteriores autonómicas por fila
write_tab_header(x = round(prop.table(tab_auto, margin = 1), digits = 4)*100, #margin=1 es para % por fila
file = paste(general[x,"Token"], "AUTO_perc_fila.csv", sep = "_"),
header = "Voto reciente (filas) y en anteriores elecciones autonómicas (columnas) en % por fila")
# Tabla de transferencias porcentuales desde anteriores autonómicas por columna
write_tab_header(x = round(prop.table(tab_auto, margin = 2), digits = 4)*100, #margin=2 es para % por columna
file = paste(general[x,"Token"], "AUTO_perc_colu.csv", sep = "_"),
header = "Voto reciente (filas) y en anteriores elecciones autonómicas (columnas) en % por columnas")
}
#Function for tabulating answers from voting intention from pre election surveys
intentab <- function(RVAUTAGR, INTVAGR, weight = NA) {
# Weighted tables
if (!is.na(weight)) {
tab_inten <- svytable(~RVAUTAGR + INTVAGR, design = weight)
} else {
tab_inten <- table(RVAUTAGR, INTVAGR)
}
# Tabla de transferencias desde anteriores autonómicas en cifras absolutas
write_tab_header(x = round(tab_inten), file = paste(general[x,"Token"], "INTEN_abs.csv", sep = "_"),
header = "Intención de voto (filas) y recuerdo de voto anteriores elecciones autonómicas (columnas) en números absolutos")
# Tabla de transferencias porcentuales desde anteriores micas por fila
write_tab_header(x = round(prop.table(tab_inten, margin = 1), digits = 4)*100, #margin=1 es para % por fila
file = paste(general[x,"Token"], "INTEN_perc_fila.csv", sep = "_"),
header = "Intencón de voto (filas) y recuerdo de voto en anteriores elecciones autonómicas (columnas) en % por fila")
# Tabla de transferencias porcentuales desde anteriores intennómicas por columna
write_tab_header(x = round(prop.table(tab_inten, margin = 2), digits = 4)*100, #margin=2 es para % por columna
file = paste(general[x,"Token"], "INTEN_perc_colu.csv", sep = "_"),
header = "Intención de voto (filas) y recuerdo de voto en anteriores elecciones autonómicas (columnas) en % por columnas")
}
|
65993e33244db2c898259f30b745d2279f178599 | ac9046de49de25efd9ad0545ca514a4209d2c7f3 | /man/coef.vblogitfit.Rd | b20c68f4f5a7fb0794dae9c06ad472e7de2ca489 | [] | no_license | antiphon/vblogistic | 029979ee1bd826b6f6a7c2b27e2b476d937243a8 | 439eb1c6602389b0267038aa5a424557c381b22f | refs/heads/master | 2021-07-31T06:02:13.617684 | 2021-07-27T07:51:53 | 2021-07-27T07:51:53 | 24,378,925 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 288 | rd | coef.vblogitfit.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/methods.R
\name{coef.vblogitfit}
\alias{coef.vblogitfit}
\title{Coef}
\usage{
\method{coef}{vblogitfit}(x, ...)
}
\arguments{
\item{x}{object from vblogit}
\item{...}{ignored.}
}
\description{
Coef
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.