blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bef205a271e56ef3f52fdd16b70153bc325e50ae | bbe35176b05f886a37b2b9f79fd23d59feba5cd4 | /man/tri2cor.Rd | 8692302fea6cd2e6708790d808eb5f6b07689728 | [
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | wjschne/WJSmisc | 5f91a5195c550db66ee7fe51e0bab90c0f4e64ed | 85d58415775ff5f004d101ad2478e4c2b02fe8ba | refs/heads/master | 2023-06-08T21:13:48.444893 | 2023-05-31T14:12:31 | 2023-05-31T14:12:31 | 211,534,399 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 446 | rd | tri2cor.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{tri2cor}
\alias{tri2cor}
\title{Create square correlation matrix from lower triangle}
\usage{
tri2cor(x, variable_names = NULL)
}
\arguments{
\item{x}{vector of correlations}
\item{variable_names}{a vector of variable names}
}
\value{
square matrix
}
\description{
Create square correlation matrix from lower triangle
}
\examples{
tri2cor(c(.2,.3,.4))
}
|
4df8c20d44de9f21400d16005255d02757371f6e | fac56cba38e4919be45519605837165d85835fff | /man/country_codes.Rd | b74319dbbb5ecd90088e7b5aa59af986ece70a23 | [] | no_license | mmparker/to1check | 28cb87273ce41e442edc674fd70d4b842052afdd | 808d1bc447760c618af7747d9cee1206ecb43333 | refs/heads/master | 2016-08-06T07:19:13.231958 | 2014-05-29T15:43:52 | 2014-05-29T15:43:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 493 | rd | country_codes.Rd | \docType{data}
\name{country_codes}
\alias{country_codes}
\title{Countries by name and their ISO 3166-1 alpha-3 codes.}
\source{
\url{http://en.wikipedia.org/wiki/ISO_3166-1_alpha-3}
}
\description{
A dataset containing the names and three-letter ISO country
codes of 249 countries, dependent territories, and special
areas of geographical interest.
}
\details{
\itemize{ \item name. Name of country/area. \item code ISO
3166-1 alpha-3 code for the country/area. ... }
}
\keyword{datasets}
|
89f14093b5b775289f77a29bed8a1aa7a6141177 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/palettetown/examples/ichooseyou.Rd.R | d1a1aedaf9abb31c83d9d1504b27577b9ebb1ddc | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 228 | r | ichooseyou.Rd.R | library(palettetown)
### Name: ichooseyou
### Title: Get a pokemon palette. Alias for pokepal.
### Aliases: ichooseyou
### ** Examples
library(magrittr)
pal <- 'Hoothoot' %>% ichooseyou
pal2 <- 'Pichu' %>% ichooseyou(6)
|
b33d208de376bc9b863408c3b57a89795a52f966 | 0841838ba8723e94b37a1514409a5a9767cbf181 | /MESA_project/code/haplotype_inference/MESA_clean_lab_key.R | 7adc6eea8f3d5a3af5cbe5ef6a9028b5d58f6978 | [] | no_license | kelseysumner/taylorlab | cfa2358b5c552e7853b111de12940983d081de6a | 8801f5d32b7f81f2a66b3efd763cc18c5d35f42b | refs/heads/master | 2021-08-07T03:55:06.004801 | 2021-06-20T21:29:08 | 2021-06-20T21:29:08 | 150,612,627 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 16,390 | r | MESA_clean_lab_key.R | # ------------------------------------ #
# MESA Clean Lab Key #
# October 10, 2018 #
# K. Sumner #
# ------------------------------------ #
#### ----- load the necessary libraries ----- ####
library(tidyverse)
#### ----- read in all data sets ----- ####
# read in the data set of the lab key Wendy created 10/6/2018
lab_key = read_csv("/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Meta Data/MESA inventory matched to labid Oct 6 minimum.csv")
#### --------------- CLEAN LAB KEY FOR MESA ------------------- ####
# remove variables that are not needed
lab_key$sid <- NULL
lab_key$studyid_case_controldata <- NULL
lab_key$case_control_childdata <- NULL
lab_key$mem_rdt_idindata <- NULL
lab_key$`_merge` <- NULL
# check the levels of the RDT results and recode the variable
table(lab_key$mem_rdt_results, useNA = "always")
lab_key$mem_rdt_results[lab_key$mem_rdt_results == "POSITIVE"] = "positive"
lab_key$mem_rdt_results = as.factor(lab_key$mem_rdt_results)
# rename labid variable names
names(lab_key)[names(lab_key) == "labid edited for matching"] <- 'labid_new'
names(lab_key)[names(lab_key) == "labid_from_inventory"] <- 'labid_old_socialinventory'
names(lab_key)[names(lab_key) == "labid_original"] <- 'labid_old_labinventory'
# check remaining variables
table(lab_key$interview_date,useNA = "always")
table(lab_key$studyid_case_control_new,useNA = "always")
table(lab_key$mem_rdt_results,useNA = "always")
# labid_old_socialinventory
table(lab_key$labid_old_socialinventory,useNA = "always")
lab_key$labid_old_socialinventory[lab_key$labid_old_socialinventory == "_"] = NA
length(which(is.na(lab_key$labid_old_socialinventory) == T))
socialinventory = rep(NA,nrow(lab_key))
for (m in 1:nrow(lab_key)){
if ("-" %in% strsplit(lab_key$labid_old_socialinventory[m],"")[[1]]){
socialinventory[m] = m
} else {
socialinventory[m] = NA
}
}
length(na.omit(socialinventory))
# labid_old_labinventory
table(lab_key$labid_old_labinventory,useNA = "always")
length(which(is.na(lab_key$labid_old_labinventory) == T))
labinventory = rep(NA,nrow(lab_key))
for (m in 1:nrow(lab_key)){
if ("-" %in% strsplit(lab_key$labid_old_labinventory[m],"")[[1]]){
labinventory[m] = m
} else {
labinventory[m] = NA
}
}
length(na.omit(labinventory))
# labid_new
table(lab_key$labid_new,useNA = "always")
length(which(is.na(lab_key$labid_new) == T))
new = rep(NA,nrow(lab_key))
for (m in 1:nrow(lab_key)){
if ("-" %in% strsplit(lab_key$labid_new[m],"")[[1]]){
new[m] = m
} else {
new[m] = NA
}
}
length(na.omit(new))
table(lab_key$gdnaplate,useNA = "always")
table(lab_key$gdnacolumn,useNA = "always")
table(lab_key$gdnarow,useNA = "always")
table(lab_key$dbsbox,useNA = "always")
table(lab_key$dbsbag,useNA = "always")
# create a function for something not being in the list
"%ni%" <- Negate("%in%")
# clean labid_new variable
# first find all "-"
changed_labid = rep(NA,nrow(lab_key))
for (i in 1:nrow(lab_key)){
if("-" %in% strsplit(lab_key$labid_new[i],"")[[1]]){
splitup = strsplit(lab_key$labid_new[i],"-")
changed_labid[i] = paste0(splitup[[1]][1],"_",splitup[[1]][2])
} else {
changed_labid[i] = lab_key$labid_new[i]
}
}
lab_key$labid_new = changed_labid
# then add leading 0s and make all uppercase
cleanlab_id = rep(NA,nrow(lab_key))
for (k in 1:nrow(lab_key)){
if (is.na(lab_key$labid_new[k]) == T){
cleanlab_id[k] = NA
} else if ("_" %ni% strsplit(lab_key$labid_new[k],"")[[1]]) {
if (nchar(lab_key$labid_new[k]) == 4){
cleanlab_id[k] = paste0(lab_key$labid_new[k])
}
if (nchar(lab_key$labid_new[k]) == 3){
cleanlab_id[k] = paste0("0",lab_key$labid_new[k])
}
if (nchar(lab_key$labid_new[k]) == 2){
cleanlab_id[k] = paste0("00",lab_key$labid_new[k])
}
if (nchar(lab_key$labid_new[k]) == 1){
cleanlab_id[k] = paste0("000",lab_key$labid_new[k])
}
} else {
part_mesa_id = strsplit(lab_key$labid_new[k],"_")[[1]][1]
if (nchar(part_mesa_id) == 4){
cleanlab_id[k] = paste0(lab_key$labid_new[k])
}
if (nchar(part_mesa_id) == 3){
cleanlab_id[k] = paste0("0",lab_key$labid_new[k])
}
if (nchar(part_mesa_id) == 2){
cleanlab_id[k] = paste0("00",lab_key$labid_new[k])
}
if (nchar(part_mesa_id) == 1){
cleanlab_id[k] = paste0("000",lab_key$labid_new[k])
}
}
}
# check the output
length(which(is.na(cleanlab_id) == T)) # added 9 missing so something going on
check_df = data.frame(lab_key$labid_new,cleanlab_id)
check_df = check_df[-which(is.na(check_df$cleanlab_id) == F),]
# add the cleaned variable to the data set
lab_key$labid_new = toupper(cleanlab_id)
# some of these variables were coded differently than usual so will keep their coding in the new variable
lab_key$labid_new[282] = "04884_4"
lab_key$labid_new[5191] = "0381A"
lab_key$labid_new[5192] = "0381B"
lab_key$labid_new[5193] = "0381C"
lab_key$labid_new[5194] = "0381D"
lab_key$labid_new[5200] = "0386A"
lab_key$labid_new[5201] = "0386B"
lab_key$labid_new[5202] = "0386C"
lab_key$labid_new[5239] = "04884_B"
# check the output
length(which(is.na(lab_key$labid_new) == T)) # 0 missing so correct now
# clean labid_old_socialinventory variable
# doesn't have any "-"
# so just add leading 0s and make all uppercase
cleanlab_id = rep(NA,nrow(lab_key))
for (k in 1:nrow(lab_key)){
if (is.na(lab_key$labid_old_socialinventory[k]) == T){
cleanlab_id[k] = NA
} else if ("_" %ni% strsplit(lab_key$labid_old_socialinventory[k],"")[[1]]) {
if (nchar(lab_key$labid_old_socialinventory[k]) == 4){
cleanlab_id[k] = paste0(lab_key$labid_old_socialinventory[k])
}
if (nchar(lab_key$labid_old_socialinventory[k]) == 3){
cleanlab_id[k] = paste0("0",lab_key$labid_old_socialinventory[k])
}
if (nchar(lab_key$labid_old_socialinventory[k]) == 2){
cleanlab_id[k] = paste0("00",lab_key$labid_old_socialinventory[k])
}
if (nchar(lab_key$labid_old_socialinventory[k]) == 1){
cleanlab_id[k] = paste0("000",lab_key$labid_old_socialinventory[k])
}
} else {
part_mesa_id = strsplit(lab_key$labid_old_socialinventory[k],"_")[[1]][1]
if (nchar(part_mesa_id) == 4){
cleanlab_id[k] = paste0(lab_key$labid_old_socialinventory[k])
}
if (nchar(part_mesa_id) == 3){
cleanlab_id[k] = paste0("0",lab_key$labid_old_socialinventory[k])
}
if (nchar(part_mesa_id) == 2){
cleanlab_id[k] = paste0("00",lab_key$labid_old_socialinventory[k])
}
if (nchar(part_mesa_id) == 1){
cleanlab_id[k] = paste0("000",lab_key$labid_old_socialinventory[k])
}
}
}
# check the output
length(which(is.na(cleanlab_id) == T)) # 274 missing like there was originally
check_df = data.frame(lab_key$labid_old_socialinventory,cleanlab_id)
check_df = check_df[-which(is.na(check_df$cleanlab_id) == F),]
# add the cleaned variable to the data set
lab_key$labid_old_socialinventory = toupper(cleanlab_id)
# clean labid_old_labinventory variable
# first find all "-"
changed_labid = rep(NA,nrow(lab_key))
for (i in 1:nrow(lab_key)){
if("-" %in% strsplit(lab_key$labid_old_labinventory[i],"")[[1]]){
splitup = strsplit(lab_key$labid_old_labinventory[i],"-")
changed_labid[i] = paste0(splitup[[1]][1],"_",splitup[[1]][2])
} else {
changed_labid[i] = lab_key$labid_new[i]
}
}
lab_key$labid_old_labinventory = changed_labid
# then add leading 0s and make all uppercase
cleanlab_id = rep(NA,nrow(lab_key))
for (k in 1:nrow(lab_key)){
if (is.na(lab_key$labid_old_labinventory[k]) == T){
cleanlab_id[k] = NA
} else if ("_" %ni% strsplit(lab_key$labid_old_labinventory[k],"")[[1]]) {
if (nchar(lab_key$labid_old_labinventory[k]) == 4){
cleanlab_id[k] = paste0(lab_key$labid_old_labinventory[k])
}
if (nchar(lab_key$labid_old_labinventory[k]) == 3){
cleanlab_id[k] = paste0("0",lab_key$labid_old_labinventory[k])
}
if (nchar(lab_key$labid_old_labinventory[k]) == 2){
cleanlab_id[k] = paste0("00",lab_key$labid_old_labinventory[k])
}
if (nchar(lab_key$labid_old_labinventory[k]) == 1){
cleanlab_id[k] = paste0("000",lab_key$labid_old_labinventory[k])
}
} else {
part_mesa_id = strsplit(lab_key$labid_old_labinventory[k],"_")[[1]][1]
if (nchar(part_mesa_id) == 4){
cleanlab_id[k] = paste0(lab_key$labid_old_labinventory[k])
}
if (nchar(part_mesa_id) == 3){
cleanlab_id[k] = paste0("0",lab_key$labid_old_labinventory[k])
}
if (nchar(part_mesa_id) == 2){
cleanlab_id[k] = paste0("00",lab_key$labid_old_labinventory[k])
}
if (nchar(part_mesa_id) == 1){
cleanlab_id[k] = paste0("000",lab_key$labid_old_labinventory[k])
}
}
}
# check the output
length(which(is.na(cleanlab_id) == T)) # 9 missing so something is going on
check_df = data.frame(lab_key$labid_old_labinventory,cleanlab_id)
check_df = check_df[-which(is.na(check_df$cleanlab_id) == F),]
# add the cleaned variable to the data set
lab_key$labid_old_labinventory = toupper(cleanlab_id)
# some of these variables were coded differently than usual so will keep their coding in the new variable
lab_key$labid_old_labinventory[282] = "04884_4"
lab_key$labid_old_labinventory[5191] = "0381A"
lab_key$labid_old_labinventory[5192] = "0381B"
lab_key$labid_old_labinventory[5193] = "0381C"
lab_key$labid_old_labinventory[5194] = "0381D"
lab_key$labid_old_labinventory[5200] = "0386A"
lab_key$labid_old_labinventory[5201] = "0386B"
lab_key$labid_old_labinventory[5202] = "0386C"
lab_key$labid_old_labinventory[5239] = "04884_B"
# check the output
length(which(is.na(lab_key$labid_old_labinventory) == T)) # 0 missing so correct now
# double check remaining variables
table(lab_key$interview_date,useNA = "always")
table(lab_key$studyid_case_control_new,useNA = "always")
table(lab_key$mem_rdt_results,useNA = "always")
table(lab_key$labid_old_socialinventory,useNA = "always")
table(lab_key$labid_old_labinventory,useNA = "always")
table(lab_key$labid_new,useNA = "always")
table(lab_key$gdnaplate,useNA = "always")
table(lab_key$gdnacolumn,useNA = "always")
table(lab_key$gdnarow,useNA = "always")
table(lab_key$dbsbox,useNA = "always")
table(lab_key$dbsbag,useNA = "always")
# output the new file
write_csv(lab_key,"clean_lab_key.csv")
#### --------------- EXPORT RDT+ SAMPLES THAT STILL NEED TO BE SEQUENCED ------------------- ####
# read in the inventory of the 514 RDT+ samples originally sent out for sequencing (in spring 2018)
# will just pull these numbers from the database already matched to AMA samples
original_rdt_pos = read_csv("/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Mapped Cut Reads/AMA_haplotypes/AMA/23AUG2018 AMA MESA Update/AMA_sample_summary.csv")
# create a function for something not being in the list
"%ni%" <- Negate("%in%")
# for AMA
# look at the data set
table(original_rdt_pos$lab_mesa_id, useNA = "always")
# recode the "none1" and "none2" labids to NA
original_rdt_pos$lab_mesa_id[original_rdt_pos$lab_mesa_id == "none1"] = NA
original_rdt_pos$lab_mesa_id[original_rdt_pos$lab_mesa_id == "none2"] = NA
# check for "-" in labids
length(which(is.na(original_rdt_pos$lab_mesa_id) == T))
mesaid = rep(NA,nrow(original_rdt_pos))
for (m in 1:nrow(original_rdt_pos)){
if ("-" %in% strsplit(original_rdt_pos$lab_mesa_id[m],"")[[1]]){
mesaid[m] = m
} else {
mesaid[m] = NA
}
}
length(na.omit(mesaid))
# now clean the labid variable anda rename it to "labid_old
cleanlab_id = rep(NA,nrow(original_rdt_pos))
for (k in 1:nrow(original_rdt_pos)){
if (is.na(original_rdt_pos$lab_mesa_id[k]) == T){
cleanlab_id[k] = NA
} else if ("_" %ni% strsplit(original_rdt_pos$lab_mesa_id[k],"")[[1]]) {
if (nchar(original_rdt_pos$lab_mesa_id[k]) == 4){
cleanlab_id[k] = paste0(original_rdt_pos$lab_mesa_id[k])
}
if (nchar(original_rdt_pos$lab_mesa_id[k]) == 3){
cleanlab_id[k] = paste0("0",original_rdt_pos$lab_mesa_id[k])
}
if (nchar(original_rdt_pos$lab_mesa_id[k]) == 2){
cleanlab_id[k] = paste0("00",original_rdt_pos$lab_mesa_id[k])
}
if (nchar(original_rdt_pos$lab_mesa_id[k]) == 1){
cleanlab_id[k] = paste0("000",original_rdt_pos$lab_mesa_id[k])
}
} else {
part_mesa_id = strsplit(original_rdt_pos$lab_mesa_id[k],"_")[[1]][1]
if (nchar(part_mesa_id) == 4){
cleanlab_id[k] = paste0(original_rdt_pos$lab_mesa_id[k])
}
if (nchar(part_mesa_id) == 3){
cleanlab_id[k] = paste0("0",original_rdt_pos$lab_mesa_id[k])
}
if (nchar(part_mesa_id) == 2){
cleanlab_id[k] = paste0("00",original_rdt_pos$lab_mesa_id[k])
}
if (nchar(part_mesa_id) == 1){
cleanlab_id[k] = paste0("000",original_rdt_pos$lab_mesa_id[k])
}
}
}
original_rdt_pos$labid_old_labinventory = toupper(cleanlab_id)
# change controls to NA
original_rdt_pos$labid_old_labinventory[original_rdt_pos$labid_old_labinventory == "03D7"] = NA
# should now have 5 NAs
length(which(is.na(original_rdt_pos$labid_old_labinventory) == T)) # 5 NAs so looks good
# now look at how many of the labid_old_labinventory values in the lab_key match the labid_old_labinventory values in the original_rdt_pos
length(which(original_rdt_pos$labid_old_labinventory %in% lab_key$labid_old_labinventory)) # 509 match
# check with the lab_key new_labid coding
length(which(original_rdt_pos$labid_old_labinventory %in% lab_key$labid_new)) # 509 match
# check with the lab_key labid_old_socialinventory
length(which(original_rdt_pos$labid_old_labinventory %in% lab_key$labid_old_socialinventory)) # 514 match
# look at differences in those that are unique and those that are duplicated in the original_rdt_pos dataset
length(unique(original_rdt_pos$labid_old_labinventory)) # 510 unique -> probably saying NA is unique so is the 509 samples present plus 5 NA
length(which(is.na(original_rdt_pos$labid_old_labinventory) == T)) # 5 missing (controls + 2 coded "none1" & "none2")
count_table = table(original_rdt_pos$labid_old_labinventory, useNA = "always")
dups_table = count_table[which(count_table > 1)] # only NAs are duplicates
# no duplicates in the original_rdt_pos data set - yay!
# look at differences in those that are unique and those that are duplicated in lab_key$labid_old_labinventory
length(unique(lab_key$labid_old_labinventory)) # 5359/5379 -> looks like good be some duplicates
length(which(is.na(lab_key$labid_old_labinventory) == T)) # 0 missing
count_table = table(lab_key$labid_old_labinventory, useNA = "always")
dups_table = count_table[which(count_table > 1)] # 20 duplicates
# look at differences in those that are unique and those that are duplicated in lab_key$labid_old_socialinventory
length(unique(lab_key$labid_old_socialinventory)) # 5089/5379 -> looks like good be some duplicates
length(which(is.na(lab_key$labid_old_socialinventory) == T)) # 274 missing
count_table = table(lab_key$labid_old_socialinventory, useNA = "always")
dups_table = count_table[which(count_table > 1)] # 16 duplicates
# look at differences in those that are unique and those that are duplicated in lab_key$labid_new
length(unique(lab_key$labid_new)) # 5359/5379 -> looks like good be some duplicates
length(which(is.na(lab_key$labid_new) == T)) # 0 missing
count_table = table(lab_key$labid_new, useNA = "always")
dups_table = count_table[which(count_table > 1)] # 20 duplicates
# pull out the indices that match and don't match
matched_indices = original_rdt_pos[which(original_rdt_pos$labid_old_labinventory %in% lab_key$labid_old_labinventory),] # 509/514 matched
unmatched_indices = original_rdt_pos[which(!(original_rdt_pos$labid_old_labinventory %in% lab_key$labid_old_labinventory)),] # 5/514 unmatched - 3 were controls, 2 didn't have an associated mesaID
# now pull out the old_labid values from the lab_key to make sure those are reprocessed
unsequenced_lab_key = lab_key[which(!(lab_key$labid_old_labinventory %in% original_rdt_pos$labid_old_labinventory)),]
# subset this lab key to those that are rdt_positive
rdtpos_unsequenced_lab_key = unsequenced_lab_key[which(unsequenced_lab_key$mem_rdt_results == "positive"),]
# note 337_2, 337_3, 375_3 are in the sequenced data but are also duplicates in the labid_old_labinventory in the lab_key
# write out this data set
write_csv(rdtpos_unsequenced_lab_key, "rdtpos_unsequenced_lab_key.csv")
|
6bf617e944f4b42fe82c4f6578348dfd1a861e9e | 1d9f5371e86b802c620b4c0adfef7bc5053c319a | /_drake.R | 5c6d2abb5be5dc91b1dbd17d1e794830d0efb3be | [
"MIT"
] | permissive | pat-s/2019-feature-selection | 340c44f527a3210511b0a8ebbf7b80dae55ca6c3 | 63468571dfe8774250e26a418c3034e6da28db38 | refs/heads/main | 2021-12-29T16:41:40.582468 | 2021-12-18T17:11:09 | 2021-12-18T17:11:09 | 119,857,222 | 26 | 12 | NOASSERTION | 2021-12-06T19:34:07 | 2018-02-01T15:49:49 | R | UTF-8 | R | false | false | 2,487 | r | _drake.R | # load R packages --------------------------------------------------------------
source("code/099-packages.R")
# create log directory
fs::dir_create("log")
library("drake")
library("magrittr")
library("conflicted")
conflict_prefer("target", "drake", quiet = TRUE)
conflict_prefer("pull", "dplyr", quiet = TRUE)
conflict_prefer("filter", "dplyr", quiet = TRUE)
suppressMessages(library("R.utils"))
Sys.setenv(DISPLAY = ":99")
options(
# set this to "slurm" if you have access to a Slurm cluster
clustermq.scheduler = "multicore",
clustermq.template = "~/papers/2019-feature-selection/slurm_clustermq.tmpl"
)
# load mlr extra learner -------------------------------------------------------
source("https://raw.githubusercontent.com/mlr-org/mlr-extralearner/master/R/RLearner_regr_ranger_mtry_pow.R") # nolint
# source functions -------------------------------------------------------------
R.utils::sourceDirectory("R")
R.utils::sourceDirectory("code")
# Combine all plans ------------------------------------------------------------
plan_paper <- bind_plans(
download_data_plan,
hyperspectral_processing_plan,
data_preprocessing_plan,
tasks_plan,
filter_eda_plan,
param_sets_plan,
learners_plan,
filter_wrapper_plan,
resampling_plan,
tune_ctrl_plan,
tune_wrapper_plan,
benchmark_plan,
bm_aggregated_plan,
feature_imp_plan,
reports_plan_paper
)
# paper ------------------------------------------------------------------------
# config for long running tasks
drake_config(plan_paper,
targets = c(
# replace this with the target name that should be computed
# c("eda_wfr")
"benchmark_no_models"
),
verbose = 1,
lazy_load = "eager",
packages = NULL,
log_make = "log/drake-BM.log",
caching = "main",
template = list(
log_file = "log/worker-BM%a.log", n_cpus = 4,
memory = 3500, job_name = "paper2-BM",
partition = "all"
),
prework = list(
quote(load_packages()),
# seed for parallel tasks
quote(set.seed(1, "L'Ecuyer-CMRG")),
# intra-target parallelization (when running CV with mlr)
quote(parallelStart(
mode = "multicore",
cpus = 4,
level = "mlr.resample",
# level = "mlr.selectFeatures", # for mlr feature importance calculation
mc.cleanup = TRUE,
mc.preschedule = FALSE
))
),
garbage_collection = TRUE,
jobs = 1,
parallelism = "clustermq",
keep_going = FALSE, recover = FALSE, lock_envir = FALSE, lock_cache = FALSE
)
|
a8846d907d34579d9e9999f2833ec3d812b68e2e | e35cb922fc6761880140eed88c9af4ed84e43ecd | /man/fun_enrich.Rd | f166eadb2790bf2da18ba9aa4554fb45d2706a10 | [
"MIT"
] | permissive | galanisl/FunEnrich | bff4e21eea9a863a82485847ca0690029fdafab2 | df9329156613cc4f21902919e7fd6c5b58a6821e | refs/heads/master | 2021-01-20T02:54:50.813701 | 2017-07-31T07:16:34 | 2017-07-31T07:16:34 | 89,466,750 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,789 | rd | fun_enrich.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/func_enrich.R
\name{fun_enrich}
\alias{fun_enrich}
\title{Perform BP, CC, MF and REACTOME enrichment analyses}
\usage{
fun_enrich(gene.list, background, id.type = "ENTREZID", benjamini = F)
}
\arguments{
\item{gene.list}{character; A vector with the genes of interest.}
\item{background}{character; A vector with the background list of genes.}
\item{id.type}{character; One of ENTREZID (default), SYMBOL or UNIPROT
accession. This is the ID type of \code{gene.list} and \code{background}.}
\item{benjamini}{logical; Whether to include Benjamini-Hochberg adjusted
p-values or not.}
}
\value{
A list with four data frames, one per enrichment analysis:
\item{bp}{Contains the Biological Process \code{go.id}s annotating the genes
of interest, together with their \code{term} description, p-values
\code{pval} and Benjamini-Hochberg adjusted p-values \code{bh} if requested.
In the latter case, the data frame is sorted by corrected p-value.}
\item{cc}{Same as \code{bp} but for Cellular Compartment.}
\item{mf}{Same as \code{bp} but for Molecular Function.}
\item{reactome}{Same as the rest but the first column is \code{react.id}
instead of \code{go.id}.}
}
\description{
Given a list of genes of interest and a reference background, performs a
Fisher's test for the over-representation of Gene Ontology terms and REACTOME
pathways in the former.
}
\examples{
# Use the included lists of disease genes and genes associated with metabolic
# disorders as background and genes of interest, respectively
analysis <- fun_enrich(gene.list = metabolic, background = disease.genes,
id.type = "ENTREZID", benjamini = TRUE)
}
\author{
Gregorio Alanis-Lobato \email{galanisl@uni-mainz.de}
}
|
e8f345efe8c1bf0cc028859c2e4e24d01925f5aa | 992a7655ab00edb8c9416b7ea57927b86f187394 | /Codes-tous-projets/GS_Codes-graphiques_Exemples.r | 6e552691629d34da872fad7c3001c8e5e0a7c480 | [] | no_license | lauratboyer/cw-ginger-soproner | f57de9e1c75e398bbc8d6518695f496949e3e95c | 6c0039ba5d3b77c0106b4220798729a2b3d0ae21 | refs/heads/master | 2021-01-18T22:38:40.383647 | 2016-05-01T23:38:49 | 2016-05-01T23:38:49 | 7,609,161 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 87 | r | GS_Codes-graphiques_Exemples.r | fig.2var(var1="Campagne", var2="Geomorpho", typ.fig="point")
fig.2var(typ.fig="point")
|
d04b0ff068dad170b6c0ebc5fd52e2f869351592 | 59a08c32ad43ed83b39c5572aa898d26cce75f0b | /[19-2] Statistical-Computing/sc_assignment_04.R | 170d2ad769bbb8dfddfafa507d5887a997d796f0 | [] | no_license | yourmean/Statistics | bf2d1c85a732e3e0137c7a7c62ece4477de6daa1 | 914df3ae0fb7fac67fe9bee387002df504d29ebb | refs/heads/main | 2023-03-15T01:45:05.307720 | 2021-03-20T12:54:21 | 2021-03-20T12:54:21 | 332,705,631 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 3,316 | r | sc_assignment_04.R | # Q1 ------------------------------------
# 승산합동법 함수에서 U[0,1] 따르는 난수 100개 생성
random_uni = function(n, seed){
for(i in 1:(n-1)){
seed[i+1] = ((16807*seed[i]) %% 2147483647)
}
return(seed/2147483647)
}
ran_seed = random_uni(n = 100, seed = 2020)
ran_seed
## [1] 9.406358e-07 1.580927e-02 7.063320e-01 3.227508e-01 4.732665e-01
## [6] 1.898196e-01 2.975564e-01 3.012475e-02 3.067281e-01 1.787398e-01
## [11] 7.964980e-02 6.741965e-01 2.210716e-01 5.499991e-01 8.356633e-01
## [16] 9.925028e-01 9.943905e-01 7.217558e-01 5.503361e-01 4.996145e-01
## [21] 2.131979e-02 3.216684e-01 2.808790e-01 7.328186e-01 4.817194e-01
## [26] 2.571188e-01 3.961307e-01 7.690793e-01 9.163038e-01 3.187008e-01
## [31] 4.050008e-01 8.480776e-01 6.400402e-01 1.550472e-01 8.783177e-01
## [36] 8.854069e-01 3.313678e-02 9.299071e-01 9.491816e-01 8.955188e-01
## [41] 9.841149e-01 1.854891e-02 7.515782e-01 7.740096e-01 7.787867e-01
## [46] 6.752100e-02 8.254836e-01 9.021306e-01 1.083997e-01 8.741329e-01
## [51] 5.510394e-01 3.194513e-01 1.816531e-02 3.043560e-01 3.118174e-01
## [56] 7.152582e-01 3.445448e-01 7.651137e-01 2.656988e-01 6.003653e-01
## [61] 3.399416e-01 3.992465e-01 1.362474e-01 9.097666e-01 4.465411e-01
## [66] 1.706628e-02 8.329037e-01 6.119046e-01 2.804477e-01 4.837849e-01
## [71] 9.731319e-01 4.283315e-01 9.671390e-01 7.052366e-01 9.112328e-01
## [76] 8.973526e-02 1.805664e-01 7.795067e-01 1.696471e-01 2.588096e-01
## [81] 8.131844e-01 1.909465e-01 2.380587e-01 5.275657e-02 6.797084e-01
## [86] 8.592061e-01 6.777041e-01 1.735189e-01 3.322369e-01 9.049156e-01
## [91] 9.158716e-01 5.365444e-02 7.700988e-01 5.080103e-02 8.128785e-01
## [96] 4.943671e-02 8.827514e-01 4.031565e-01 8.514917e-01 2.088993e-02
# (a) 1. 카이제곱 적합도 검정
k = 11
N = 100
range = seq(0, 1, length = k) # [0, 1]을 10등분
n = as.numeric(table(cut(ran_seed, range))) # 구간별로 난수의 갯수 세기
W = ((k-1)/N) * sum((n-(N/(k-1)))^2)
pchisq(W, df = k-2, lower.tail = FALSE) # p-value
U[0, 1]
ui
## [1] 0.1372824
# (a) 2. 콜모고로프-스미르노프 적합도 검정(by ks.test)
u = runif(100)
ks.test(ran_seed, u)
##
## Two-sample Kolmogorov-Smirnov test
##
## data: ran_seed and u
## D = 0.12, p-value = 0.4676
## alternative hypothesis: two-sided
# p-value > 0.05
# (b) 독립성 검정: 런 검정
library(snpar)
runs.test(ran_seed)
##
## Approximate runs rest
##
## data: ran_seed
## Runs = 53, p-value = 0.6877
## alternative hypothesis: two.sided
# p-value > 0.05
# Q2 ------------------------------------
set.seed(10)
N = 1000 # 반복 수
p = 6
Y_sample = sample(c(0, 1), N * p, prob = c(0.8, 0.2), replace = T)
Y = matrix(Y_sample, nrow = N, ncol = p)
mean(rowSums(Y) >= 1) # 경험적 확률
## [1] 0.746
1-0.8^6 # 이론적 확률
## [1] 0.737856
# Q3 ------------------------------------
Buffon = function(n, lofneedle, distance)
{
set.seed(2020)
lofneedle = lofneedle / 2
distance = distance / 2
r1 = runif(n)
r2 = runif(n)
prob = mean(r1*distance < lofneedle*sin(r2*pi))
return(prob)
}
# 경험적 확률
N = c(10, 50, 100, 1000, 5000)
est_p = unlist(lapply(N, Buffon, lofneedle = 15, distance = 20))
# 이론적 확률
theo_p = 2 * 15 / (3.14 * 20)
theo_p
## [1] 0.477707
plot(N, abs(est_p-theo_p), type = 'b')
|
43f5d86ca3e5d001fe0107acf2fed2971d3acd9c | 51a9040d886b1b6894e0786aeb60a1469aeb34c1 | /theory/R_script_plus_data/Confronti ML.r | 95e9bd76d9a73d4ce3bd640a8d1de87c76076c59 | [
"MIT"
] | permissive | franec94/R-Script-Analyses | a566f453a0089f44fb7faf8de5bba9c8dbb440d2 | a9c875759f63b63ceeeb7d44b98092dce075a7e1 | refs/heads/master | 2022-11-14T22:46:38.727969 | 2020-07-11T10:19:01 | 2020-07-11T10:19:01 | 276,652,456 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,207 | r | Confronti ML.r | #### #### #### #### #### #### #### #### #### #### ####
####
#### Confronti tra Machine Learning approaches
#### https://machinelearningmastery.com/compare-the-performance-of-machine-learning-algorithms-in-r/
#### #### #### #### #### #### #### #### #### #### ####
rm(list=ls())
# libraries
library(mlbench)
library(caret)
# load the dataset
data(PimaIndiansDiabetes)
#### Pima Indians Diabetes Database
# Variables:
# pregnant Number of times pregnant
# glucose Plasma glucose concentration (glucose tolerance test)
# pressure Diastolic blood pressure (mm Hg)
# triceps Triceps skin fold thickness (mm)
# insulin 2-Hour serum insulin (mu U/ml)
# mass Body mass index (weight in kg/(height in m)\^2)
# pedigree Diabetes pedigree function
# age Age (years)
# diabetes Class variable (test for diabetes)
# descriptives
summary(PimaIndiansDiabetes)
plot(PimaIndiansDiabetes[,-9])
dim(PimaIndiansDiabetes)
# puliamo il dataset
for(i in c(2,3,4,7,8))
{
W = PimaIndiansDiabetes[,i]!=0
PimaIndiansDiabetes = PimaIndiansDiabetes[W,]
}
dim(PimaIndiansDiabetes)
plot(PimaIndiansDiabetes[,-9])
# boxplot
par(mfrow=c(1,4))
boxplot(pregnant ~diabetes, data=PimaIndiansDiabetes, col=2:3)
boxplot(glucose ~diabetes, data=PimaIndiansDiabetes, col=2:3)
boxplot(pressure ~diabetes, data=PimaIndiansDiabetes, col=2:3)
boxplot(triceps ~diabetes, data=PimaIndiansDiabetes, col=2:3)
par(mfrow=c(1,4))
boxplot(insulin ~diabetes, data=PimaIndiansDiabetes, col=2:3)
boxplot(mass ~diabetes, data=PimaIndiansDiabetes, col=2:3)
boxplot(pedigree ~diabetes, data=PimaIndiansDiabetes, col=2:3)
boxplot(age ~diabetes, data=PimaIndiansDiabetes, col=2:3)
# Creiamo il training set
control <- trainControl(method="repeatedcv", number=10, repeats=3)
# CART
set.seed(7)
fit.cart <- train(diabetes~., data=PimaIndiansDiabetes, method="rpart", trControl=control)
# LDA
set.seed(7)
fit.lda <- train(diabetes~., data=PimaIndiansDiabetes, method="lda", trControl=control)
# SVM
set.seed(7)
fit.svm <- train(diabetes~., data=PimaIndiansDiabetes, method="svmRadial", trControl=control)
# kNN
set.seed(7)
fit.knn <- train(diabetes~., data=PimaIndiansDiabetes, method="knn", trControl=control)
# Random Forest
set.seed(7)
fit.rf <- train(diabetes~., data=PimaIndiansDiabetes, method="rf", trControl=control)
# collect resamples
results <- resamples(list(CART=fit.cart, LDA=fit.lda, SVM=fit.svm, KNN=fit.knn, RF=fit.rf))
# facciamo un summary
summary(results)
# box and whisker plots to compare models
scales <- list(x=list(relation="free"), y=list(relation="free"))
bwplot(results, scales=scales)
#bwplot(results)
# density plots of accuracy
scales <- list(x=list(relation="free"), y=list(relation="free"))
densityplot(results, scales=scales, pch = "|")
# dot plots of accuracy
scales <- list(x=list(relation="free"), y=list(relation="free"))
dotplot(results, scales=scales)
# parallel plots to compare models
parallelplot(results)
# pair-wise scatterplots of predictions to compare models
splom(results)
# xyplot plots to compare models
xyplot(results, models=c("LDA", "SVM"))
# difference in model predictions
diffs <- diff(results)
# ?diff.resamples
# summarize p-values for pair-wise comparisons
summary(diffs) |
c9dd5383f9b2be61c39a857ab4ca0c3983699626 | 6feb3311bf37803cf677985384487d00825394f4 | /plot4.R | fe039d1373c4616ac002c169e604c9c04aa6fb0a | [] | no_license | PaulaRoberts/ExData_Plotting1 | 0346ee50ddf08263f12d400b1863c8e5b29c16a9 | e393213759554fe5edcc1340a5b4d813ac85fa96 | refs/heads/master | 2021-01-24T03:57:36.274801 | 2015-03-07T23:06:34 | 2015-03-07T23:06:34 | 31,651,981 | 0 | 0 | null | 2015-03-04T10:46:55 | 2015-03-04T10:46:55 | null | UTF-8 | R | false | false | 2,055 | r | plot4.R | # This script creates the fourth plot for Exploratory
# Data Analysis, Assignment 1
# This plot displays 4 plots in a 2x2 fashion
# Download and extract the data file
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
install.packages("downloader")
library(downloader)
download(url, dest="exdata_data_household_power_comsumption.zip", mode="wb")
unzip("exdata_data_household_power_comsumption.zip", exdir="./")
fileName <- "household_power_consumption.txt"
# Read in the data and subset for the required dates
dataSet <- read.table(fileName, header=TRUE, na.strings="?", sep=";")
subDataSet <- dataSet[(dataSet$Date=="1/2/2007" | dataSet$Date=="2/2/2007" ),]
subDataSet$DateTime<-strptime(paste(subDataSet$Date,subDataSet$Time),"%d/%m/%Y %H:%M:%S")
# Plot the data
png("plot4.png")
par(mfrow = c(2,2))
par(mar=c(4,5,4,0.5),
oma=c(0,0,0,0))
plot(subDataSet$DateTime, subDataSet$Global_active_power,
type = "n",
xlab = "",
ylab = "Global Active Power")
lines(subDataSet$DateTime, subDataSet$Global_active_power, type = "l")
plot(subDataSet$DateTime, subDataSet$Voltage,
type = "n",
xlab = "datetime",
ylab = "Voltage")
lines(subDataSet$DateTime, subDataSet$Voltage, type = "l")
xrange <- range(subDataSet$DateTime)
yrange <- range(subDataSet$Sub_metering_1,subDataSet$Sub_metering_2,subDataSet$Sub_metering_3)
plot(xrange, yrange,
type ="n",
xlab = "",
ylab = "Energy sub metering")
lines(subDataSet$DateTime,subDataSet$Sub_metering_1, col = "black")
lines(subDataSet$DateTime,subDataSet$Sub_metering_2, col = "red")
lines(subDataSet$DateTime,subDataSet$Sub_metering_3, col = "blue")
legend("topright",
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1,1,1),
col=c("black", "blue","red"))
plot(subDataSet$DateTime, subDataSet$Global_reactive_power,
type = "n",
xlab = "datetime",
ylab = "Global_reactive_power")
lines(subDataSet$DateTime, subDataSet$Global_reactive_power, type = "l")
dev.off()
|
96205b318d960d7900677763c92e94e2ce519d87 | fce7e69b92cef1138e434fd5bac9223d2d8b7055 | /Module09/Conte_ModuleHW9.r | 45a791a52db0921e47bba986aaca94ae16f9d09f | [] | no_license | contej/Statistics_for_Bioinformatics | 958ee8d717dcee5674f3d1770ce5122c68bc76b3 | 8ec73214349b72d8f63fcbb3b6b1f731d0ce2b45 | refs/heads/master | 2021-01-23T01:12:33.938462 | 2017-05-30T17:32:18 | 2017-05-30T17:32:18 | 92,858,560 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,964 | r | Conte_ModuleHW9.r | rm(list=ls()) #clears environment
cat("\014") #clears the console in RStudio
#Problem 1
data(golub,package="multtest") #Load golub data from multtest package
#The row number of the GRO2 gene.
grep("GRO2 GRO2 oncogene",golub.gnames[,2])
#The row number of the GRO3 gene.
grep("GRO3 GRO3 oncogene",golub.gnames[,2])
#(a) Find the correlation between the expression values of these two genes.
x <- golub[2714,] #GRO2 gene.
y <- golub[2715,] #GRO3 gene.
cor(x,y) #calculate correlation
plot (x,y)
#(b) Find the parametric 90% confident interval for the correlation with cor.test().
#use ?cor.test to learn how to set the confidence level different from the default
#value of 95%.
?cor.test
#set conf.level=0.9 to make it 90 percent confidence interval
cor.test(x,y, conf.level=0.9) #test if true correlation=0
#(c) Find the bootstrap 90% confident interval for the correlation.
nboot <- 2000 # We will resample 2000 times
boot.cor <- matrix(0,nrow=nboot, ncol = 1) #A vector to save the resampled statistics
data <- cbind(x,y) #Data set with x and y in two columns.
for (i in 1:nboot){
dat.star <- data[sample(1:nrow(data),replace=TRUE), ] #Resample the
pairs
boot.cor[i,] <- cor(dat.star[,1], dat.star[,2]) #Correlation on resampled
data
}
quantile(boot.cor[,1],c(0.05,0.95)) #Find quantiles for resampled statistics
#(d) Test the null hypothesis that correlation = 0.64
n<-length(x) #sample size n = number of pairs
T.obs<- 0.64 #correlation = 0.64
n.perm=2000 # We will permute 2000 times
T.perm = rep(NA, n.perm) #A vector to save the permuted statistics
for(i in 1:n.perm) {
x.perm = sample(x, n, replace=F) #permute data (x only)
T.perm[i] = cor(x.perm, y) #Permuted statistic is the correlation
}
mean(abs(T.perm)>abs(T.obs)) #p-value
#problem 2
#On the Golub et al. (1999) data set, we consider the correlation between the Zyxin
#gene expression values and each of the gene in the data set.
rm(list=ls()) #clears environment
data(golub, package="multtest")
Zyxin <- (golub[2124,])
n <- 3051
t.perm <- rep(NA, n) #A vector to save the permuted statistics
for (i in 1:n){
y.perm = golub[i,]
t.perm[i] = cor(Zyxin, y.perm) #Permuted statistic is the correlation
}
#(a) How many of the genes have correlation values less than negative 0.5?
#(Those genes are highly negatively correlated with Zyxin gene).
sum(t.perm<(-0.5))
#(b) Find the gene names for the top five genes that are most negatively
#correlated with Zyxin gene.
o <- order(t.perm,decreasing=FALSE)
golub.gnames[o[1:5],2]
#(c) Using the t-test, how many genes are negatively correlated with the Zyxin gene?
p.values <- apply(golub, 1, function(x) cor.test(Zyxin,x)$p.value)
p.fdr <-p.adjust(p=p.values, method="fdr")
sum(p.fdr<0.05)
#problem 3
#On the Golub et al. (1999) data set, regress the expression values for the GRO3
#GRO3 oncogene on the expression values of the GRO2 GRO2 oncogene.
rm(list=ls()) #clears environment
data(golub,package="multtest") #Load golub data from multtest package
GRO2_d3 <- golub[2714,] #GRO2 gene.
GRO3_x6 <- golub[2715,] #GRO3 gene.
#(a) Is there a statistically significant linear relationship between the two genes'
#expression?
cor.test(GRO2_d3,GRO3_x6) #test if true correlation=0
#(b)Test if the slope parameter is less than 0.5 at the ?? = 0.05 level.
reg.fit<-lm(GRO3_x6 ~ GRO2_d3) #Regression GRO2_x6 = b0+ b1*GRO3_d3
reg.fit #Results of the regression fit
#b0 = -0.8426 and b1 = 0.3582
summary(reg.fit) #summary of regression results
#To get the 90% confidence intervals for b0 and b1
confint(reg.fit, level=0.9) #Show 90% 2-sided CIs from regression fit
#(c)Find an 80% prediction interval for the GRO3 GRO3 oncogene expression when
#GRO2 GRO2 oncogene is not expressed (zero expression value).
predict(reg.fit, newdata=data.frame(GRO2_d3=0), interval="prediction", level=0.8)
#(d) Check the regression model assumptions.
shapiro.test(residuals(reg.fit)) #normality test on residuals
plot(reg.fit,which=1)
plot(reg.fit,which=2)
#problem 4
#For this problem, work with the data set stackloss that comes with R.
#You can get help on the data set with ?stackloss command.
rm(list=ls()) #clears environment
?stackloss
#(a) Regress stack.loss on the other three variables.
#What is the fitted regression equation?
lin.reg<-lm(stack.loss~Air.Flow+Water.Temp+Acid.Conc., data=stackloss) #multiple regression of stack.loss on 3 variables
summary(lin.reg) #summary of regression results
#(c) Find a 90% confidence interval and 90% prediction interval for stack.loss when Air.Flow=60, Water.Temp=20 and Acid.Conc.=90.
#confidence interval
predict(lin.reg, newdata=data.frame(Air.Flow=60, Water.Temp=20, Acid.Conc.=90), interval="confidence", level=0.9)
#prediction interval
predict(lin.reg, newdata=data.frame(Air.Flow=60, Water.Temp=20, Acid.Conc.=90), interval="prediction", level=0.9)
|
044cabf0ad98717588723c66094b3d07a58bc47d | 29585dff702209dd446c0ab52ceea046c58e384e | /SimHaz/R/method1.R | ccd2782fad1566a1b138bc52c608d606cd3b4ee5 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,901 | r | method1.R | ### method 1
simulWeib <- function(N, duration, lambda, rho, beta, rateC,exp.prop, min.futime)
{
# covariate --> N Bernoulli trials
expose<-rbinom(n=N,size=1,prob=exp.prop)
# Weibull latent event times
v <- runif(n=N)
Tlat <- (- log(v) / (lambda * exp(expose * beta)))^(1 / rho)
# censoring times
C <- rexp(n=N, rate=rateC)
C=pmin(C,rep(duration,length(C)))
# follow-up times and event indicators
time <- pmin(Tlat, C)
status <- as.numeric(Tlat <= C)
start = rep(0,length(time)) #all start at 0
if(min.futime==0){
return(data.frame(id=1:N,start=start,stop=time,status=status,x=expose))
}
else{
return(data.frame(id=1:N,start=start,stop=time,status=status,x=expose)[which(time>min.futime),])
}
}
# regular version to generate time-dependent dataset
# fullyexp.p: fully exposed proportion, the default value is 0, can take values in [0, 1)
# maxrelexp.t: maximum relative exposuret time, the default value is 1, can take values in (0, 1]
# min.postexp.fut: minimum post-exposure follow-up time
#' @export
tdSim.method1<-function(N, duration=24,lambda, rho=1, beta, rateC,exp.prop,
prop.fullexp=0,maxrelexptime=1,min.futime=0, min.postexp.futime=0){
data <- simulWeib(N, duration,lambda, rho, beta, rateC,exp.prop,min.futime)
if(prop.fullexp==0){
data_tdexposed<-data[data$x==1,] #####add comment
}
else{
id_tdexposed<-sample(x = data[data$x==1,]$id,size = round(nrow(data[data$x==1,])*(1-prop.fullexp)))
data_tdexposed<-data[data$id %in% id_tdexposed,]
}
data_tdexposed$t_exposed<-runif(nrow(data_tdexposed),0,data_tdexposed$stop*maxrelexptime)
if(min.postexp.futime>0){
if(sum(data_tdexposed$stop-data_tdexposed$t_exposed>min.postexp.futime) == 0){
print('no exposure left')
}
data_tdexposed <- data_tdexposed[data_tdexposed$stop-data_tdexposed$t_exposed>min.postexp.futime,]
}
new_data1<-data_tdexposed
new_data2<-data_tdexposed
new_data1$id<-data_tdexposed$id
new_data1$start<-data_tdexposed$start
new_data1$stop<-data_tdexposed$t_exposed
new_data1$status<-0
new_data1$x<-0
new_data2$id<-data_tdexposed$id
new_data2$start<-data_tdexposed$t_exposed
new_data2$stop<-data_tdexposed$stop
new_data2$x<-1
new_data2$status<-data_tdexposed$status
merged_tdexposed<-subset(na.omit(merge(new_data1,new_data2,all.x=TRUE,all.y=TRUE)))
merged_tdexposed$t_exposed<-NULL
full_data<-merge(merged_tdexposed,data[data$x==0,],all.x=TRUE,all.y=TRUE)
return(full_data)
}
#' @export
getpower.method1<-function(nSim, N,duration=24,med.TTE.Control=24,rho=1,med.TimeToCensor=14,beta,exp.prop,type,scenario,
prop.fullexp=0,maxrelexptime=1,min.futime=0,min.postexp.futime=0,output.fn,simu.plot=FALSE)
{
lambda=log(2)/med.TTE.Control
rateC=log(2)/med.TimeToCensor
#numsim=500
res=matrix(0,nSim,10)
colnames(res)=c("N.eff","N.effexp.p","betahat","HR","signif","events",
"events_c","events_exp","medsurvt_c","medsurvt_exp")
alpha=.05
if(simu.plot){
set.seed(999)
if(type == "fixed"){
dat <- simulWeib(N=N, duration=duration,lambda=lambda, rho=rho, beta=beta, rateC=rateC,
exp.prop=exp.prop,min.futime=min.futime)
}
else{
dat <- tdSim.method1(N=N, duration=duration,lambda=lambda, rho=rho, beta=beta, rateC=rateC,
exp.prop=exp.prop,prop.fullexp=prop.fullexp,maxrelexptime=maxrelexptime,
min.futime=min.futime,min.postexp.futime=min.postexp.futime)
}
plot_simuData(dat)
}
set.seed(999)
for(k in 1:nSim)
{
if(type == "fixed"){
dat <- simulWeib(N=N, duration=duration,lambda=lambda, rho=rho, beta=beta, rateC=rateC,
exp.prop=exp.prop,min.futime=min.futime)
}
else{
dat <- tdSim.method1(N=N, duration=duration,lambda=lambda, rho=rho, beta=beta, rateC=rateC,
exp.prop=exp.prop,prop.fullexp=prop.fullexp,maxrelexptime=maxrelexptime,
min.futime=min.futime,min.postexp.futime=min.postexp.futime)
}
fit <- coxph(Surv(start,stop, status) ~ factor(x), data=dat)
sfit <- survfit(Surv(start,stop, status) ~ factor(x), data=dat)
res[k,"N.eff"] <- length(unique(dat$id))
res[k,"N.effexp.p"] <- sum(dat$x)/length(unique(dat$id))
res[k,"betahat"] <- summary(fit)$coef[,"coef"]
res[k,"HR"] <- summary(fit)$coef[,"exp(coef)"]
res[k,"signif"] <- ifelse(summary(fit)$coef[,"Pr(>|z|)"]<alpha,1,0)
res[k,"events"] <- sum(dat$status)
res[k,"events_c"] <- summary(sfit)$table[1,'events']
res[k,"events_exp"] <- summary(sfit)$table[2,'events']
res[k,"medsurvt_c"] <- summary(sfit)$table[1,'median']
res[k,"medsurvt_exp"] <- summary(sfit)$table[2,'median']
}
df=data.frame(i_scenario=scenario,
i_type=type,
i_N=N,
i_min.futime=min.futime,
i_min.postexp.futime=min.postexp.futime,
i_exp.prop=exp.prop,
i_lambda=lambda,
i_rho=rho,
i_rateC=rateC,
i_beta=beta,
N_eff=mean(res[,"N.eff"]),
N_effexp_p=mean(res[,"N.effexp.p"]),
bhat=mean(res[,"betahat"]),
HR=mean(res[,"HR"]),
d=mean(res[,"events"]),
d_c=mean(res[,"events_c"]),
d_exp=mean(res[,"events_exp"]),
mst_c=mean(na.omit(res[,"medsurvt_c"])),
mst_exp=mean(na.omit(res[,"medsurvt_exp"])),
pow=mean(res[,"signif"])
)
if(file.exists(output.fn)){
write.table(df,file=output.fn,row.names=FALSE,col.names=FALSE,append=TRUE,sep=",")
}
else{
write.table(df,file=output.fn,row.names=FALSE,col.names=TRUE,sep=",")
}
return(df)
}
|
0c53abff30b7cda523ee648dadc6b398b10407bf | 5726af6f671d6cc37b5d9e8e2b1e5fac946e357b | /Michael/Scratch.R | 38419477f7025f1e9b059cf7c7c792d2d205f979 | [] | no_license | michael-huber2772/STAT5650_Final_Project | 4f2ca34371e0b4a9fe4212df0e12ac75fd5ee477 | 870280e06b9f49af1979005567695723f5a9c49d | refs/heads/master | 2022-06-20T20:15:09.243538 | 2020-05-10T16:53:11 | 2020-05-10T16:53:11 | 256,347,058 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,013 | r | Scratch.R | library(ggplot2)
library(gridExtra)
library(MASS)
library(vioplot)
library(verification)
library(caret)
library(rpart)
library(e1071) # SVM
library(gbm) # GBM
library(klaR)
library(dplyr)
hotel = read.csv("../Data/hotel_bookings.csv")
hotel1 = subset(na.omit(hotel), select = -c(country, agent, company, reservation_status, reservation_status_date))
kappa=function(x){
n=sum(x)
pobs=(x[1,1]+x[2,2])/n
pexp=(sum(x[1,])*sum(x[,1])+sum(x[2,])*sum(x[,2]))/n^2
kappa=(pobs-pexp)/(1-pexp)
t1=0
t2=0
t3=0
pii=x/n
pidot=apply(pii,1,sum)
pdotj=apply(pii,2,sum)
for(i in 1:2){
t1 = t1 + pii[i,i]*((1-pexp) - (1-pobs)*(pidot[i]+pdotj[i]))^2
}
t2 = pii[1,2]*(pdotj[1]+pidot[2])^2 + pii[2,1]*(pdotj[2] + pidot[1])^2
t3 = (pobs*pexp-2*pexp+pobs)^2
vhat = (t1 + t2*(1-pobs)^2 -t3)/(n*(1-pexp)^4)
se=sqrt(vhat)
return(c(kappa,se))
}
class.sum=function(truth,predicted){
xt=table(truth,round(predicted+0.000001))
pcc=round(100*sum(diag(xt))/sum(xt),2)
spec=round(100*xt[1,1]/sum(xt[1,]),2)
sens=round(100*xt[2,2]/sum(xt[2,]),2)
kap=round(kappa(xt)[1],4)
au=round(roc.area(truth,predicted)$A,4)
return(cbind(c("Percent Correctly Classified = ","Specificity = ","Sensitivity = ","Kappa =","AUC= "),c(pcc,spec,sens,kap,au)))
}
set.seed(5341)
hotel1.rpart=rpart(is_canceled~.,data=hotel1,method="class",control=rpart.control(cp=0.0,minsplit=2))
plotcp(hotel1.rpart)
#### 5 splits ####
hotel1.rpart5=rpart(is_canceled~.,data=hotel1,method="class",control=rpart.control(cp=1.2981e-02,minsplit=2))
plot(hotel1.rpart5, margin=0.1)
text(hotel1.rpart5,use.n=TRUE)
#### CROSS VALIDATION ACCURACY FOR 5 SPLITS####
set.seed(5341)
xvs=rep(c(1:10),length=nrow(hotel1))
xvs=sample(xvs)
hotel1.rpart5.xval=rep(0,length(nrow(hotel1)))
for(i in 1:10){
train=hotel1[xvs!=i,]
test=hotel1[xvs==i,]
rp=rpart(is_canceled~ .
,method="class",data=train,control=rpart.control(cp=0.0,minsplit=2))
hotel1.rpart5.xval[xvs==i]=predict(rp,test,type="prob")[,2]
}
table(hotel1$is_canceled,round(hotel1.rpart5.xval))
class.sum(hotel1$is_canceled,hotel1.rpart5.xval)
##################################################################################################
# Support Vector Machines
##################################################################################################
Hotel = names(hotel) %in% c("hotel")
HotelDataSansHotel= hotel[!Hotel]
par(mar=c(.5,.5,.5,.5))
par(mfrow=c(13,2))
qqnorm(HotelDataSansHotel$lead_time, main="lead_time")
qqnorm(HotelDataSansHotel$arrival_date_year, main="arrival_date_year")
#qqnorm(HotelDataSansHotel$arrival_date_month, main="lead_time")
qqnorm(HotelDataSansHotel$arrival_date_week_number, main="arrival_date_week_number")
qqnorm(HotelDataSansHotel$arrival_date_day_of_month, main="arrival_date_day_of_month")
qqnorm(HotelDataSansHotel$stays_in_weekend_nights, main="stays_in_weekend_nights")
qqnorm(HotelDataSansHotel$stays_in_week_nights, main="stays_in_week_nights")
qqnorm(HotelDataSansHotel$adults, main="adults")
qqnorm(HotelDataSansHotel$children, main="children")
qqnorm(HotelDataSansHotel$babies, main="babies")
#qqnorm(HotelDataSansHotel$meal)
#qqnorm(HotelDataSansHotel$market_segment)
#qqnorm(HotelDataSansHotel$distribution_channel)
qqnorm(HotelDataSansHotel$is_repeated_guest, main="is_repeated_guest")
qqnorm(HotelDataSansHotel$previous_cancellations, main="previous_cancellations")
qqnorm(HotelDataSansHotel$previous_bookings_not_canceled, main="previous_bookings_not_canceled")
#qqnorm(HotelDataSansHotel$reserved_room_type)
#qqnorm(HotelDataSansHotel$assigned_room_type)
qqnorm(HotelDataSansHotel$booking_changes, main="booking_changes")
#qqnorm(HotelDataSansHotel$deposit_type)
qqnorm(HotelDataSansHotel$days_in_waiting_list, main="days_in_waiting_list")
#qqnorm(HotelDataSansHotel$customer_type)
qqnorm(HotelDataSansHotel$adr, main="adr")
qqnorm(HotelDataSansHotel$required_car_parking_spaces, main="required_car_parking_spacese")
qqnorm(HotelDataSansHotel$total_of_special_requests, main="total_of_special_requests")
#subset to include only binomial and numeric variables
nonNumericVariables = names(hotel) %in% c("hotel", "arrival_date_month", "meal", "country", "market_segment", "distribution_channel", "reserved_room_type", "assigned_room_type", "deposit_type", "agent", "company", "customer_type", "reservation_status", "reservation_status_date")
HotelDataSubset=hotel[!nonNumericVariables]
colnames(HotelDataSubset)
library(tidyr)
HotelDataNumericSansNAN = drop_na(HotelDataSubset)
HotelDataNumeric.lr = glm(is_canceled~ . ,family=binomial,data=HotelDataNumericSansNAN)
#confusion matrix and accuracy (Dr. Cutler's way)
table(HotelDataNumericSansNAN$is_canceled,round(predict(HotelDataNumeric.lr,type="response")))
class.sum(HotelDataNumericSansNAN$is_canceled,predict(HotelDataNumeric.lr,type="response"))
HotelDataNumeric.lr = glm(is_canceled~ . ,family=binomial,data=HotelDataNumericSansNAN)
step.model=stepAIC(HotelDataNumeric.lr, direction = "both", trace = FALSE)
summary(step.model)
step.model
class.sum(HotelDataNumericSansNAN$is_canceled,predict(step.model,type="response"))
HotelDataNumeric.lr.xval=rep(0,nrow(HotelDataNumericSansNAN))
xvs=rep(1:10,length=nrow(HotelDataNumericSansNAN))
xvs=sample(xvs)
for(i in 1:10){
train=HotelDataNumericSansNAN[xvs!=i,]
test=HotelDataNumericSansNAN[xvs==i,]
glub=glm(is_canceled~ lead_time + arrival_date_year + arrival_date_week_number +
stays_in_weekend_nights + adults + children + babies + is_repeated_guest +
previous_cancellations + previous_bookings_not_canceled +
booking_changes + days_in_waiting_list + adr + required_car_parking_spaces +
total_of_special_requests ,family=binomial,data=train)
HotelDataNumeric.lr.xval[xvs==i]=predict(glub,test,type="response")
}
table(HotelDataNumericSansNAN$is_canceled,round(HotelDataNumericSansNAN.lr.xval))
class.sum(HotelDataNumericSansNAN$is_canceled,HotelDataNumericSansNAN.lr.xval)
|
486cbe1f43e3f8716703125284e46811f8793283 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/OpenRepGrid/examples/indexPvaff.Rd.R | 7e845ec04f2aad079d57c8b3af4e7e3e2ca53c44 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 281 | r | indexPvaff.Rd.R | library(OpenRepGrid)
### Name: indexPvaff
### Title: Percentage of Variance Accounted for by the First Factor (PVAFF)
### Aliases: indexPvaff
### ** Examples
indexPvaff(bell2010)
indexPvaff(feixas2004)
# save results to object
p <- indexPvaff(bell2010)
p
|
075c8d13700db375ad6ad8e5c8613d9ba3796435 | 2ba22f489011cfb61d6727ab522bf3904f78eefc | /man/simil_groups_cata.Rd | d86e78b2181e14ead1259529576cb3237d24ddd8 | [] | no_license | cran/ClustBlock | 847297472d9cc6f05bad33b23fd78a48938dead7 | eed656e469929805b6c72f465912c965fe9f580f | refs/heads/master | 2023-07-07T05:26:15.786420 | 2023-06-29T17:00:02 | 2023-06-29T17:00:02 | 174,553,356 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,811 | rd | simil_groups_cata.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simil_groups_cata.R
\name{simil_groups_cata}
\alias{simil_groups_cata}
\title{Testing the difference in perception between two predetermined groups of subjects in a CATA experiment}
\usage{
simil_groups_cata(Data, groups, one=1, two=2, nperm=50, Graph=TRUE,
alpha= 0.05, printl=FALSE)
}
\arguments{
\item{Data}{data frame or matrix. Correspond to all the blocks of variables merged horizontally}
\item{groups}{categorical vector. The groups of each subject . The length must be the number of subjects.}
\item{one}{string. Name of the group 1 in groups vector.}
\item{two}{string. Name of the group 2 in groups vector.}
\item{nperm}{numerical. How many permutations are required? Default: 50}
\item{Graph}{logical. Should the CATATIS graph of each group be plotted? Default: TRUE}
\item{alpha}{numerical between 0 and 1. What is the threshold of the test? Default: 0.05}
\item{printl}{logical. Print the number of remaining permutations during the algorithm? Default: FALSE}
}
\value{
a list with:
\itemize{
\item decision: the decision of the test
\item pval: pvalue of the test
}
}
\description{
Test adapted to CATA data to determine whether two predetermined groups of subjects have a different perception or not. For example, men and women.
}
\examples{
data(straw)
groups=sample(1:2, 114, replace=TRUE)
simil_groups_cata(straw, groups, one=1, two=2)
}
\references{
Llobell, F., Giacalone, D., Jaeger, S.R. & Qannari, E. M. (2021). CATA data: Are there differences in perception? JSM conference.\cr
Llobell, F., Giacalone, D., Jaeger, S.R. & Qannari, E. M. (2021). CATA data: Are there differences in perception? AgroStat conference.
}
\keyword{CATA}
|
dbb9e4e85f50466851735af5614b2143d1ad9f8b | d07d30d3544f6b59be3bc81b29298b9edcddff1b | /save_my_file.R | 9385a366f84b960848fe3f52fa2fc4bc2d6b376a | [] | no_license | brouwern/FUNCTIONS | 9569a3521abfe32bdef06d02ed86b4f661d97c2e | 010a8c46efc22d5f01aa9ce65e73953b0d2a9c58 | refs/heads/master | 2021-01-01T16:36:43.262233 | 2017-07-28T17:12:38 | 2017-07-28T17:12:38 | 97,870,653 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 706 | r | save_my_file.R | save.my.file <- function(object.,
abs.root. = abs.root.,
rel.dir. = rel.dir.,
file.name.,
extension){
full.name <- paste(abs.root.,
rel.dir.,
file.name.,
sep = "/")
full.name <- paste(full.name,
extension,
sep = ".")
full.name <- gsub("\\.\\.",".",full.name)
print(full.name)
if(extension == ".csv" | extension == "csv"){
write.csv(object., file = full.name, row.names = F)
}
if(extension == ".Rdata" | extension == "Rdata"){
save(object., file = full.name)
}
} |
adbcd98b91204596830d667e368832da677c16cb | b2eeb5e69ce34680a1aeb6259a549cf0d8153978 | /R/CEUDataRd.R | 8fc7dfccd013bb8afd3e3d9cb2dd32204fd7d255 | [] | no_license | SFUStatgen/LDheatmap | abb3444304c6185287436fc482ae5b45c06f7cfc | 510c60056c371127c4a2fc63c0d1b032717bde62 | refs/heads/master | 2023-03-09T17:57:04.521184 | 2023-02-24T01:15:32 | 2023-02-24T01:15:32 | 131,069,485 | 11 | 5 | null | null | null | null | UTF-8 | R | false | false | 1,353 | r | CEUDataRd.R | #' @name CEUSNP
#' @aliases CEUDist
#' CEUSNP
#' CEUDist
#' hapmapCEU
#' @docType data
#' @title Example data set for LDheatmap
#'@description CEUSNP: Genotypes on 15 SNPs for 60 people
#'
#'CEUDist: Physical map positions of the 15 SNPs in CEUSNP
#'
#' @usage data(CEUSNP); data(CEUDist)
#' @format CEUSNP: A dataframe of SNP genotypes.
#' Each row represents an individual.
#' Each column represents a SNP.
#'
#'CEUDist: A vector of integers, representing SNP physical map locations on the chromosome.
#' @details Data on SNPs with minor allele frequency greater
#'than 5\% from a 9kb region of chromosome 7 (base positions 126273659
#' through 126282556 from release 7 of the International HapMap Project).
#'Genotypes from 30 parent-offspring trios (both
#' parents, one offspring) were obtained.
#'The 30 trios are taken from the so-called CEPH families, a set of
#'multi-generational families from Utah with ancestry from northern and
#'western Europe. From this set of 90 people, 60 parents were extracted.
#' @source International HapMap Project \url{ftp://ftp.ncbi.nlm.nih.gov/hapmap/}
#' @references The International HapMap Consortium. A haplotype map of
#'the human genome. Nature 437, 1299-1320. 2005.
#'@examples data(CEUData)
#'@keywords datasets
NULL
|
e79b32c7cc37b36b5c761f1f38eb24664ceefbde | 7035407d841fc5c60459f96d63d30cdda1322f67 | /Zero Inflated Poisson model for distribution for 5m 2014-4-21.R | 2081bc07fbb287543de20e7273135dbdaf377d2b | [] | no_license | wangbinzjcc/DeadStandingTrees | cafd634b7e193df5fdea57ad97b6b1f5a2f17adb | f3d95f7e93927ce5c05fea5012588a1a92feb142 | refs/heads/master | 2021-01-02T08:52:48.246776 | 2014-04-22T18:40:25 | 2014-04-22T18:40:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,376 | r | Zero Inflated Poisson model for distribution for 5m 2014-4-21.R | #################################################################
# zero inflated model for distribution wangbinzjcc 2014-4-21
#################################################################
#
# rm(list=ls())
#############################
setwd("F:\\DataW\\DeadStandingTrees")
dir()
##
#dea.5 <- read.csv("DeadData-5m-abun.csv")
dea.5 <- read.csv('DeadData-5m-abun-mrt2014-4-22.csv')
head(dea.5)
#
XX <- dea.5
for(i in 1:dim(XX)[1]){
x0 <- XX[i, 'x']
y0 <- XX[i, 'y']
logi.0 <- abs(XX$x-x0)<=1 & abs(XX$y-y0)<=1
logi.0[i] <- FALSE
XX[i,"mean.neigh"] <- round(mean(XX[logi.0,'Abu']), 2)
}
dea.5 <- XX
Abu.5 <- dea.5$Abu
hist(Abu.5)
yy=Abu.5
###########################################################
topo.5 <- read.csv("topograCal-5m.csv" )
head(topo.5)
t5.p3 <- topo.5[, c('meanelev', 'slope', 'convex')]
t5.p3.cs <- apply(t5.p3, 2, CenterScale, Binary="centerscale")
t5.p3.cs <- as.matrix(t5.p3.cs)
t5.p3.poly3 <- poly(t5.p3.cs, degree=3,raw =T )
t5.p3.poly3 <- as.data.frame(t5.p3.poly3[,c('1.0.0','2.0.0','3.0.0','0.1.0','0.2.0','0.3.0','0.0.1','0.0.2','0.0.3')])
names(t5.p3.poly3) <- paste(rep(names(t5.p3),each=3), rep(1:3,times=3), sep='')
t5.asp <- data.frame(cos.asp = cos(topo.5$aspect*pi/180),
sin.asp = sin(topo.5$aspect*pi/180))
X.t5.cs <- cbind(t5.p3.poly3, t5.asp)
head(X.t5.cs)
###############################################################
#
###############################################################
require(LaplacesDemon)
# ###############################################################
# # testing data ~~~~~~~~
# # ####################################
# set.seed(1)
# N=100
# X1 <- matrix(runif(N*12),N,12) ; X1[,1] <- 1
#
# beta.zero <- c(-2,runif(4, -2, 2))
# beta.one <- c(1, runif(11, -1, 1))
# #
# lamb.log.one <- X1[,1:12] %*% beta.one
# lamb.one <- exp(lamb.log.one)
# mean(lamb.one)
# hist(lamb.one)
# #
# prob.zero.logit <- X1[,1:5] %*% beta.zero
# prob.ze <- invlogit(prob.zero.logit)
# y <- ifelse(test= prob.ze>0.5, yes=0, no=lamb.one)
# yy <- rpois(N,lambda=y)
# z <- ifelse(test= y>0, yes=1, no=0)
# #
# hist(yy)
# #
# #############################################################
# PGF <- function(Data) return(c(rnorm(17,0,1)))
# MyData <- Data <- list( parm.names=names(c(beta.zero=rep(0,5), beta.one=rep(0,12))),
# y=yy,X=X1,
# PGF=PGF,mon.names=c("Deviance") )
#####################################################################
yy=dea.5$Abu
Auto=dea.5$mean.neigh
xx=apply(X.t5.cs,2,as.numeric)
head(xx)
mode(xx)
#
PGF <- function(Data) return(c(rnorm(1+12,0,1), rnorm(4,0,1)))
#
MyData <- Data <- list( parm.names=names(c(auto=0, beta.one=rep(0,12), beta.zero=rep(0,4))),
y=yy,X=xx,
PGF=PGF,mon.names=c("loglikelihood") )
parm <- PGF(Data)
#
###### Model for Binomial distribution ~~~~~~~~~~~~~~~~#####
Model <- function(parm, Data){
# parameter
auto <- parm[1]
beta.one <- parm[2:13]
beta.zero <- parm[13+ 1:4]
#
lamb.log.one <- cbind(alpha=1,Data$X) %*% beta.one
lamb.one <- exp(lamb.log.one)
#
nam0 <- c('meanelev1','slope1','convex1')
prob.zero.logit <- cbind(alpha=1,Data$X[,nam0]) %*% beta.zero
prob.zero <- invlogit(prob.zero.logit)
#
y.lamb <- ifelse(test= prob.zero>0.5, yes=0, no=lamb.one)
#
### # priors distribution
beta.prior <- sum(dnorm(c(auto,beta.one,beta.zero), 0, 5,log=T))
lamb.one.prior <- sum(dgamma(lamb.one, 3, log=T))
prob.zero.logit.prior <- sum(dnorm(prob.zero.logit, 0, 2, log=T))
#
LL <- sum(dbern(x=as.numeric(Data$y==0), prob=prob.zero , log=T),
dpois(x=Data$y, lambda=y.lamb, log=T))
# log-Posterior
LP <- LL + beta.prior + lamb.one.prior + prob.zero.logit.prior
#
Modelout <- list(LP=LP, Dev=-2*LL,
Monitor=c(-2*LL),
yhat=rpois(length(Data$y),lambda=y.lamb),
parm=parm)
return(Modelout)
}
#####################################################################
#
Initial.Values <- parm <- GIV(Model, MyData, n=100000, PGF=TRUE)
#
Initial.Values <- as.initial.values(Fit6)
#
Iterations <- 200000
Status <- 2000
Thinning <- 10
# ########################## Adaptive Metropolis ##########################
# Fit2 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
# Covar=Fit2$Covar,, Iterations=Iterations, Status=Status, Thinning=Thinning,
# Algorithm="AMWG", Specs=list(Periodicity=128))
################# Componentwise Hit-And-Run Metropolis ##################
Fit6 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=Iterations, Status=Status, Thinning=Thinning,
Algorithm="CHARM", Specs=NULL)
dput(summary(Fit6),'Fit6')
#dget('Fit6')
#
# ########## Componentwise Hit-And-Run (Adaptive) Metropolis #############
# Fit7 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
# Covar=NULL, Iterations=Iterations, Status=Status, Thinning=Thinning,
# Algorithm="CHARM", Specs=list(alpha.star=0.44))
#
# ##################### Robust Adaptive Metropolis #######################
# Fit18 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
# Covar=Fit18$Covar, Iterations=Iterations, Status=Status, Thinning=Thinning,
# Algorithm="RAM", Specs=list(20))
#
# ##########################################################################
#
Juxt <- Juxtapose(list(Fit2,Fit6,Fit7,Fit18 )); Juxt
plot(Juxt, Style="ISM")
#
Consort(Fit2)
Consort(Fit6)
Consort(Fit7)
Consort(Fit18)
Fit <- Fit6
Consort(Fit)
plot(BMK.Diagnostic(Fit, batches=10))
PosteriorChecks(Fit)
caterpillar.plot(Fit, Parms="beta")
BurnIn <- Fit$Rec.BurnIn.Thinned
plot(Fit, BurnIn, MyData, PDF=FALSE)
Pred <- predict(Fit, Model, MyData)
#
summary(Pred, Discrep="Chi-Square")
summary(Pred, Discrep="Kurtosis")
plot(Pred, Style="Covariates", Data=MyData)
plot(Pred, Style="Density", Rows=1:9)
plot(Pred, Style="ECDF")
plot(Pred, Style="Fitted")
plot(Pred, Style="Jarque-Bera")
plot(Pred, Style="Predictive Quantiles")
plot(Pred, Style="Residual Density")
plot(Pred, Style="Residuals")
Levene.Test(Pred)
Importance(Fit, Model, MyData, Discrep="Chi-Square")
############################################################################
# |
c8d420bca5a1ceb4f1f16d3b617a7babd2fb5459 | 46025f750d612169f2e335133f44e0deb243127a | /Test Functions.R | 82bae488da73e0ebba1fd5292219252080970a00 | [] | no_license | jamesijw23/spades_game | c78e848aef2e5c434d2e0ba0f87470c1444ea100 | e51902a827b42aebf9f160f2fdb985612a59f81e | refs/heads/master | 2020-12-20T20:04:55.209026 | 2020-01-25T16:50:06 | 2020-01-25T16:50:06 | 236,196,131 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,059 | r | Test Functions.R | #####
## Finds out whose turn it is based on who
## won last
#####
determine_turn = function(turn1){
turn2 = turn1+1;
turn3 = turn1+2;
turn4 = turn1+3;
if (turn2 > 4 ) {turn2=turn2-4;}
if (turn3 > 4) {turn3=turn3-4;}
if (turn4 > 4) {turn4=turn4-4;}
return(list(turn2=turn2,turn3=turn3,turn4=turn4))
}
#####
## Choose Suite(DONE)
#####
suite_fun=function(s){
if(s<1/3) {suite=1}
else if(s>=1/3 && s<=2/3) {suite=2}
else {suite=3}
return(suite)
}
suite_fun_ws=function(s){
if(s<=0.25) {
suite = 1
} else if(s > 0.25 & s <= 0.50) {
suite = 2
} else if (s > 0.50 & s<=0.75) {
suite = 3
} else {
suite = 4
}
return(suite)
}
#####
## find_suit_card Function(DONE)
## Finds the right cards based on suit
#####
find_suit_cards=function(chos_suit,player){
## 2) Gather the cards that is in the suite played
if(chos_suit==1){
tmp1 = as.matrix(player[player > 0 & player <=13,])
} else if(chos_suit==2){
tmp1 = as.matrix(player[player > 13 & player <=26,])
} else if(chos_suit==3){
tmp1 = as.matrix(player[player > 26 & player <=39,])
} else{
tmp1 = as.matrix(player[player > 39 & player <=52,])
}
return(tmp1)
}
test_func = function(X){
## 1) Always make card a character
X=as.character(X)
## 2) Split card into chucks of characters
tmp=strsplit(X,"")
## 3) Check if Card is valid
if(length(tmp[[1]])>1 & length(tmp[[1]]) < 4 ){
if(length(tmp[[1]])==3){
tmp1 = tmp[[1]][1:2];
tmp2=tmp[[1]][3];
tmp1=paste(tmp1[1],tmp1[2],sep="")
} else if (length(tmp[[1]])==2) {
tmp1=tmp[[1]][1];tmp2=tmp[[1]][2]
}
} else {
tmp1= (-9998) ## Not Appropriate Card
}
## 4) Finding Card's Number
if(tmp1==10){
card_number = 10
}else if (tmp1=='J') {
card_number = 11
} else if (tmp1=='Q') {
card_number = 12
} else if (tmp1=='K'){
card_number = 13
} else if (tmp1=='A') {
card_number =14
} else if (as.numeric(tmp1)<10 & as.numeric(tmp1)>1){
card_number = as.numeric(tmp1)
} else{
card_number = (-9997) ## Not an appropriate card number
}
card_number=(as.numeric(noquote(card_number))-1);
### 5) Finding Card's suit
if(tmp2=='c'){
suit_number=0
} else if(tmp2=='d'){
suit_number=13
} else if(tmp2=='h'){
suit_number=26
} else if(tmp2=='s'){
suit_number=39
} else{
suit_number=NA
}
## 6) Find total card value
card_total_number =suit_number+card_number
return(card_total_number)
}
cards_test = c(seq(2,10),'J','Q','K','A')
d_clubs <- paste(cards_test,'c',sep="")
d_diamonds <- paste(cards_test,'d',sep="")
d_hearts <- paste(cards_test,'h',sep="")
d_spades <- paste(cards_test,'s',sep="")
all_cards = rbind(as.matrix(d_clubs),
as.matrix(d_diamonds),
as.matrix(d_hearts),
as.matrix(d_spades))
plot(apply(all_cards,1,test_func))
abline(a=0,b=1)
|
f1a8e86d36d88fcbf16f06e97af98e3936d17ac3 | 0ffa92734e4571b7c73a09056c056eb443bcf317 | /man/multiverse.plot.Rd | 0ed3b04e98e7f200388a54c1cae92859a00a0b8e | [] | no_license | cran/splithalf | e37ade35d0d6394625efcda27523c5934a44af46 | 68800b85ffdd0c2c99b971a557d50e57c3807564 | refs/heads/master | 2022-08-28T17:37:51.085813 | 2022-08-11T13:30:02 | 2022-08-11T13:30:02 | 87,544,080 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 6,450 | rd | multiverse.plot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_multiverse.R, R/threshold.R
\name{multiverse.plot}
\alias{multiverse.plot}
\alias{threshold}
\title{Visualising reliability multiverses}
\usage{
multiverse.plot(
multiverse,
title = "",
vline = "none",
heights = c(4, 5),
SE = FALSE
)
threshold(multiverse, threshold, use = "estimate", dir = "above")
}
\arguments{
\item{multiverse}{multiverse object}
\item{title}{string add a title to the plot? default is ""}
\item{vline}{add a vertical line to the plot, e.g. use .5 for the median reliability estimate}
\item{heights}{must be a vector of length 2, relative heights of plot panels. Defaults to c(4,5)}
\item{SE}{logical includes an additional panel to plot the standard errors of the scores. Note: the heights parameter must be a vector of length 3, e.g. c(2,2,3). Defaults to FALSE}
\item{threshold}{threshold to look for, e.g. 0.7}
\item{use}{set to check the reliability "estimates", or the "upper" or "lower" CIs}
\item{dir}{look "above" or "below" the 'use' at the set threshold}
}
\value{
Returns a visualization of a multiverse object
}
\description{
This function allows the user to plot the output from splithalf_multiverse or testretest_multiverse. The plot includes an upper panel with all reliability estimates (and CIs) and a lower panel that indicates the data processing specifications corresponding to that reliability estimate.
The (unofficial) function version name is "This function will make you a master in bird law"
This function examines the output from splithalf_multiverse or testretest_multiverse to extract the proportions of estimates above or below a set threshold (can be the estimate or the upper or lower CI estimates).
The (unofficial) function version name is "This function will get you up to here with it"
}
\examples{
\dontrun{
## see online documentation for examples
https://github.com/sdparsons/splithalf
## also see https://psyarxiv.com/y6tcz
## example simulated data
n_participants = 60 ## sample size
n_trials = 80
n_blocks = 2
sim_data <- data.frame(participant_number = rep(1:n_participants,
each = n_blocks * n_trials),
trial_number = rep(1:n_trials,
times = n_blocks * n_participants),
block_name = rep(c("A","B"),
each = n_trials,
length.out = n_participants * n_trials * n_blocks),
trial_type = rep(c("congruent","incongruent"),
length.out = n_participants * n_trials * n_blocks),
RT = rnorm(n_participants * n_trials * n_blocks,
500,
200),
ACC = 1)
## specify several data processing decisions
specifications <- list(RT_min = c(0, 100, 200),
RT_max = c(1000, 2000),
averaging_method = c("mean", "median"))
## run splithalf, and save the output
difference <- splithalf(data = sim_data,
outcome = "RT",
score = "difference",
conditionlist = c("A"),
halftype = "random",
permutations = 5000,
var.RT = "RT",
var.condition = "block_name",
var.participant = "participant_number",
var.compare = "trial_type",
var.ACC = "ACC",
compare1 = "congruent",
compare2 = "incongruent",
average = "mean")
## run splithalf.multiverse to perform the multiverse of data processing
## and reliability estimation
multiverse <- splithalf.multiverse(input = difference,
specifications = specifications)
## can be plot with:
multiverse.plot(multiverse = multiverse,
title = "README multiverse")
}
\dontrun{
## see online documentation for examples
https://github.com/sdparsons/splithalf
## also see https://psyarxiv.com/y6tcz
## example simulated data
n_participants = 60 ## sample size
n_trials = 80
n_blocks = 2
sim_data <- data.frame(participant_number = rep(1:n_participants,
each = n_blocks * n_trials),
trial_number = rep(1:n_trials,
times = n_blocks * n_participants),
block_name = rep(c("A","B"),
each = n_trials,
length.out = n_participants * n_trials * n_blocks),
trial_type = rep(c("congruent","incongruent"),
length.out = n_participants * n_trials * n_blocks),
RT = rnorm(n_participants * n_trials * n_blocks,
500,
200),
ACC = 1)
## specify several data processing decisions
specifications <- list(RT_min = c(0, 100, 200),
RT_max = c(1000, 2000),
averaging_method = c("mean", "median"))
## run splithalf, and save the output
difference <- splithalf(data = sim_data,
outcome = "RT",
score = "difference",
conditionlist = c("A"),
halftype = "random",
permutations = 5000,
var.RT = "RT",
var.condition = "block_name",
var.participant = "participant_number",
var.compare = "trial_type",
var.ACC = "ACC",
compare1 = "congruent",
compare2 = "incongruent",
average = "mean")
## run splithalf.multiverse to perform the multiverse of data processing
## and reliability estimation
multiverse <- splithalf.multiverse(input = difference,
specifications = specifications)
## the threshold function can be used to return the number of estimates
## above or below a certain threshold
threshold(multiverse = multiverse,
threshold = 0.7,
use = "estimate",
dir = "above")
}
}
|
5616a0ecb041eb9f1c2779e2cdf823d56a260391 | 257ffc3438528729b62bc3e7abc24eea2be6193e | /man/write.xts.Rd | 415d40f08e243767dc862ba8a08c53d069ef6fb5 | [
"MIT"
] | permissive | SHUD-System/rSHUD | 91e1ae7f077cf5efa52575a32ed4e692ed8034b9 | 1915a9cf2b241a1368b9768251b2f140454bd94e | refs/heads/master | 2023-07-06T11:07:18.335307 | 2023-07-01T15:08:11 | 2023-07-01T15:08:11 | 224,737,854 | 6 | 0 | null | null | null | null | UTF-8 | R | false | true | 371 | rd | write.xts.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/writeInput.R
\name{write.xts}
\alias{write.xts}
\title{Write xts data out into file
\code{write.xts}}
\usage{
write.xts(x, file, append = F)
}
\arguments{
\item{x}{xts data}
\item{file}{file name}
\item{append}{whether append}
}
\description{
Write xts data out into file
\code{write.xts}
}
|
594ebdd499e49e046f2a6add1450c907c9832175 | 02dbb451925f35d1abe781360968596eaa59bcbd | /tests/testthat/test-lazy.counter.R | 6d8eaea9f48515372e37c41b761d5e99a5a1810a | [] | no_license | nutterb/lazyWeave | ad68c57edd703e83124fa7267314de1902050c47 | fe7ba16ff61dabcace079a9399050b2610224bac | refs/heads/master | 2021-06-06T04:26:31.480092 | 2018-01-25T00:19:56 | 2018-01-25T00:19:56 | 4,657,725 | 1 | 0 | null | 2015-07-27T00:59:17 | 2012-06-14T00:34:01 | R | UTF-8 | R | false | false | 25 | r | test-lazy.counter.R | context("lazy.counter")
|
c2845b90bfa397fcbb55abd3f0d49ee1b52bcea9 | 2ae68221fbe1fb2d8f9f9e2e5925bb6e4adb1aea | /plot4.R | 7a8d44879cf50e91bde4a0e06fea568bd948b95d | [] | no_license | iggypro/ExData_Plotting1 | f5c037a72b35b8fa0e807e85dd5064d7a042be38 | fe18005665dfdc66c0afe9311ad2970e52a1a81b | refs/heads/master | 2021-01-18T11:34:23.592137 | 2014-06-06T16:53:39 | 2014-06-06T16:53:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,486 | r | plot4.R | ## Exploratory Data Analysis - Course Project 1
## Plot4 script - June 6th, 2014
## Reading data from "Electric Power Consumption" source
readData <- function(path, startDate="01/02/2007", endDate="02/02/2007")
{
skipRows <- as.numeric(
difftime(as.Date(startDate,format="%d/%m/%Y"),
as.Date("17/12/2006",format="%d/%m/%Y"),
units="days"),
units="days")*1440 + 396 + 1
readRows <- as.numeric(
difftime(as.Date(endDate,format="%d/%m/%Y"),
as.Date(startDate,format="%d/%m/%Y"),
units="days"),
units="days")*1440 + 1440
return(read.table(path,sep=";",nrows=readRows,skip=skipRows,na.strings="?"))
}
## Creating a plot similar to
## https://github.com/iggypro/ExData_Plotting1/blob/master/figure/unnamed-chunk-5.png
createPlot4 <- function(path, startDate="01/02/2007", endDate="02/02/2007")
{
if(!file.exists(path)) return ("Data file doesn't exist")
data <- readData(path,startDate,endDate)
png("plot4.png")
##create 2x2 layout
par(mfrow=c(2,2))
##create plot 1/4
plot(c(1:length(data$V2)),data$V3,xlab="",ylab="Global Active Power",type="l",axes=F)
axis(side=1,at=seq(0,length(data$V2),by=1440),labels=c("Thu","Fri","Sat"))
axis(side=2,at=seq(0,6,by=2))
box()
##create plot 2/4
plot(c(1:length(data$V2)),data$V5,xlab="datetime",ylab="Voltage",type="l",axes=F)
axis(side=1,at=seq(0,length(data$V2),by=1440),labels=c("Thu","Fri","Sat"))
axis(side=2,at=seq(234,246,by=2))
box()
##create plot 3/4
plot(c(1:length(data$V2)),data$V7,xlab="",ylab="Energy sub metering",type="l",axes=F,
ylim=c(0,max(data$V7)))
par(new=T)
plot(c(1:length(data$V2)),data$V8,xlab="",ylab="",type="l",axes=F,
col="red",ylim=c(0,max(data$V7)))
par(new=T)
plot(c(1:length(data$V2)),data$V9,xlab="",ylab="",type="l",axes=F,
col="blue",ylim=c(0,max(data$V7)))
axis(side=1,at=seq(0,length(data$V2),by=1440),labels=c("Thu","Fri","Sat"))
axis(side=2,at=seq(0,30,by=10))
box()
legend(x="topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=c(1,1,1),col=c("black","red","blue"),bty="n",cex=1)
##create plot 4/4
plot(c(1:length(data$V2)),data$V4,xlab="datetime",ylab="Global_reactive_power",type="l",axes=F)
axis(side=1,at=seq(0,length(data$V2),by=1440),labels=c("Thu","Fri","Sat"))
axis(side=2,at=seq(0.0,0.5,by=0.1))
box()
dev.off()
return("Plot4 has been saved to plot4.png in your working directory")
}
|
e18f4b556375c260382cf64d0cf609fca1063e2f | eb84df48022e055e3df06ec26ffb65e106bc4c9e | /R/corsig.R | 6c2d4dd3bfaab27e596c3f0d69e94a2f039701ea | [] | no_license | MarvinLawPhD/datasummariser | 7367b7a91a8ac47dd1b6b95bd2fdf7f0883f26da | 1391c9f1fb239de47ee079584564ce7718247cd0 | refs/heads/master | 2022-12-01T07:31:13.848704 | 2020-08-11T00:16:31 | 2020-08-11T00:16:31 | 285,495,081 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,106 | r | corsig.R | #' corsig
#'
#' This function follows stats::cor results to provide correlation results and
#' significance based on p-values in a table
#'
#' @param .data a dataframe or matrix with numeric values
#' @param decimals the number of decimals that are shown in the correlations
#' @param method methods used based on stats::cor methods
#' @param numbered whether to number the row and column names to condense results
#' @return a dataframe with correlations and significance based on p-values
#' @export
#'
#' @examples
#' irisnum <- iris %>%
#' select_if(is.numeric)
#' corsig(irisnum, decimals = 3, method = "kendall", numbered = T)
#' corsig(irisnum, decimals = 2, method = "pearson", numbered = F)
#'
#'
#'
corsig <- function(.data, decimals = 2, method = "pearson", numbered = T) {
d <- .data %>%
cor(method = method, use = "pairwise.complete.obs") %>%
round(decimals)
dd <- d %>%
as.data.frame() %>%
rownames_to_column() %>%
gather(key = key, val = val, -rowname)
vec <- c()
for (i in 1:nrow(dd)) {
vec <- c(vec, stats::cor.test(.data[, dd$rowname[i]], .data[, dd$key[i]])$p.value)
}
dd$pvalue <- vec
cort <- dd %>%
mutate(
sign = ifelse(pvalue < .001, "***",
ifelse(pvalue < .01, "**",
ifelse(pvalue < .05, "*",
""
)
)
),
val = paste0(str_replace(format(val, nsmall = 2), "0.", "."), sign)
) %>%
select(-pvalue, -sign) %>%
spread(key, val) %>%
mutate(rowname = factor(rowname, levels = rownames(d))) %>%
arrange(rowname) %>%
select(rowname, rownames(d))
colnames(.data) %in% cort$rowname
cort[upper.tri(cort, diag = FALSE)] <- ""
diag(cort[-1]) <- NA
if (numbered) {
rownames(cort) <- c(paste0(1:nrow(cort), ". ", cort$rowname))
cort <- cort %>% select(-rowname)
colnames(cort) <- 1:(ncol(cort))
} else {
rownames(cort) <- cort$rowname
cort <- cort %>% select(-rowname)
}
message("Correlations produced using ", method, ".")
return(cort)
}
|
3f4905c8bb2cfa44f17253c520788b2a60ebc36a | 73fe2ad06a6bf7e9738b3c5857ec7ab5c0e8e2e2 | /tests/testthat/test_packages/r_err_1/R/foo.R | cecdd3eb198abca346b8189d2dee23722acd5221 | [] | no_license | cran/potools | d0f371639ee442d900260b0efacea76dbbdecb67 | b42203fd72955c8337b7d8a78a4e60e6482c50f9 | refs/heads/master | 2023-06-22T01:19:11.394525 | 2021-07-12T06:00:02 | 2021-07-12T06:00:02 | 381,753,937 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 83 | r | foo.R | f1 <- function(x) {
# notranslate start
base::warning("I warned you!")
x+1
}
|
8ec265c133a76a46cd007145a564f4f8df8e2277 | 58da9138afdd901b4b26622132489a0f4298f32c | /Code/Functions/summarize_impacts.R | 490edbe1b88ddb0a4a65b25bc4c0d8434da9371d | [] | no_license | david-beauchesne/FoodWeb-MultiStressors | cc4f11d773d9a0ef054fd6fff46f72c29f7a431d | 0a8782ac10505d8932e4a3cd92b4b2f2b1304410 | refs/heads/master | 2023-04-18T20:45:59.773816 | 2021-06-22T17:20:54 | 2021-06-22T17:20:54 | 188,241,374 | 0 | 0 | null | 2021-04-13T01:14:04 | 2019-05-23T13:38:21 | TeX | UTF-8 | R | false | false | 6,981 | r | summarize_impacts.R | summarize_impacts <- function(ParamInit, ParamImpact, equilibrium, stability, motif) {
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# Initial conditions (pre-stressor)
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# Number of parameters in model
nParam <- length(ParamInit)
# Names of parameters in model
nmParam <- names(ParamInit)
# List of initial parameters
lsInit <- split(as.matrix(ParamInit), nmParam)
# Initial abundances
InitAbundances <- do.call(equilibrium, lsInit)
# Initial stability
InitStability <- do.call(stability, lsInit)
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# Impacted conditions (post-stressor)
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# Number of impacted parameters
nImpact <- length(ParamImpact)
# Names of impacted parameters
nmImpact <- names(ParamImpact)
# Pathways of effect
pathways <- list()
for(i in 1:nImpact) pathways <- c(pathways, combn(nmImpact, i, simplify = F))
# Number of integrative pathways of effect (K)
nK <- length(pathways)
# Names of integrative pathways of effect
nmK <- unlist(lapply(pathways, paste, collapse = '-'))
# Number of parameters in pathways of effect (unitary pathways of effect; k)
k <- unlist(lapply(pathways, length))
# Impacted abundances and stability
ImpactAbundances <- data.frame(Pathways = nmK, nParam = k, Motif = motif,
x = numeric(nK),
y = numeric(nK),
z = numeric(nK),
stringsAsFactors = F, row.names = NULL)
ImpactStability <- data.frame(Pathways = nmK, nParam = k, Motif = motif,
InitStability = InitStability,
ImpactStability = numeric(nK),
stringsAsFactors = F)
for(i in 1:nK) {
# Initial parameters
param <- ParamInit
# Replace impacted parameters
param[, pathways[[i]]] <- ParamImpact[, pathways[[i]]]
# List of parameters
lsImpact <- split(as.matrix(param), nmParam)
# Impacted abundances
ImpactAbundances[i,c('x','y','z')] <- do.call(equilibrium, lsImpact)
# Impacted stability
ImpactStability[i, 'ImpactStability'] <- do.call(stability, lsImpact)
}
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# Abundance data
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
library(dplyr)
library(magrittr)
# Gather data
ImpactAbundances <- gather(ImpactAbundances, 'Species', 'ImpactAbundances',
-Pathways, -nParam, -Motif)
# Add unique position
ImpactAbundances$Position <- paste0(ImpactAbundances$Motif,
ImpactAbundances$Species)
# Add initial abundances
uid <- match(ImpactAbundances$Species, c('x','y','z'))
ImpactAbundances$InitAbundances <- InitAbundances[uid]
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# Trophic sensitivity
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# > We define a species ($m$) trophic sensitivity ($S_{m,K}$) as the net
# > impact -- *i.e.* the pre- and post-stressors variation in abundance --
# > resulting from an integrative pathway of effect $K$:
# >
# > $$S_{m,K} = \frac{a_{m,K} - a_m}{a_m}$$
# >
# > where $a_m$ and $a_{m,K}$ are the pre- and post-stressors abundances of
# > species $m$, respectively.
sensitivity <- function(init, impact) (impact - init) / init
ImpactAbundances$Sensitivity <- sensitivity(ImpactAbundances$InitAbundances,
ImpactAbundances$ImpactAbundances)
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# Trophic amplification
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# > We define a species ($m$) trophic amplification ($A_{m,K}$) as the
# > difference between its trophic sensitivity to an integrative pathway of
# > effect ($K$) and the sum of its trophic sensitivities to the unitary
# > pathways of effect forming $K$ ($k \in K$):
# >
# > $$A_{m, K} = \sum_{k \in K} S_{m, k} - \frac{1}{|K|} S_{m, K}$$
# >
# > where $|K|$ is the number of unitary pathways of effect $k$ forming the
# > integrative pathway of effect $K$.
amplification <- function(SK, Sk, n) {
if (n == 1) {
NA
} else {
int <- (1/n)*SK
sum(int-Sk)
}
}
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# Variance
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# > Comparing the effective and expected impacts of a unitary pathway of
# > effect -- *i.e.* the average impact of an integerative pathways of effect
# > -- provides a measure of variance associated to trophic sensitivity to an
# > integrative pathway of effect ($K$):
# >
# > $$V_{m, K} = \sum_{k \in K} \left(S_{m, k} - \frac{1}{|K|} S_{m, K} \right)^2$$
variance <- function(SK, Sk, n) {
if (n == 1) {
NA
} else {
int <- (1/n)*SK
sum((int-Sk)^2)
}
}
# Trophic amplification & variance
ImpactAbundances$Amplification <- NA
ImpactAbundances$Variance <- NA
for(i in 1:nrow(ImpactAbundances)) {
# S_{m,K}
SK <- ImpactAbundances$Sensitivity[i]
# |K|
n <- ImpactAbundances$nParam[i]
# Sk
pos <- ImpactAbundances$Species[i]
k <- unlist(stringr::str_split(ImpactAbundances$Pathways,'-')[i])
uid <- ImpactAbundances$Pathways %in% k & ImpactAbundances$Species == pos
Sk <- ImpactAbundances$Sensitivity[uid]
# Trophic amplification
ImpactAbundances$Amplification[i] <- amplification(SK, Sk, n)
# Variance
ImpactAbundances$Variance[i] <- variance(SK, Sk, n)
}
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# Remove abundance data
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
ImpactAbundances <- select(ImpactAbundances, -InitAbundances, -ImpactAbundances)
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# Stability data
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# Calculate a percent variation in stability for now
StabilityVariation <- function(S1, S2) (S2-S1)/S1
ImpactStability$StabilityVariation <- StabilityVariation(ImpactStability$InitStability,
ImpactStability$ImpactStability)
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# Remove stability data
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# ImpactStability <- select(ImpactStability, -InitStability, -ImpactStability)
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# Return impact summary
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
return(list(ImpactAbundances, ImpactStability))
}
|
21caf305eee08218177e1d3f8c3705a0a9f7e806 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/GEint/examples/GE_bias_old.Rd.R | a6bbd01e849418c1b15a663ccf75af87243bf0de | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 380 | r | GE_bias_old.Rd.R | library(GEint)
### Name: GE_bias_old
### Title: GE_bias_old.R
### Aliases: GE_bias_old
### ** Examples
solutions <- GE_bias_normal_squaredmis_old( beta_list=as.list(runif(n=6, min=0, max=1)),
rho_list=as.list(rep(0.3,6)), prob_G=0.3)
GE_bias_old(beta_list=solutions$beta_list, solutions$cov_list, solutions$cov_mat_list,
solutions$mu_list, solutions$HOM_list)
|
f25a40402c5eb60bd214cdc6ba7edbedccc68af5 | d89833cb72d19896cbd0865cdc1e419e55d09872 | /tests/testthat/test-seedframe.R | feb3f214e58f8a0e613f24056a71626b6779e4dd | [
"MIT"
] | permissive | mskilab-org/loosends | b9a8c62db6ea9925ad7b91e204289f29cdf17975 | 45a378ae332ebcb6db75a6d9d3e3f3f214300b32 | refs/heads/main | 2023-08-17T09:15:37.251009 | 2023-08-17T01:22:18 | 2023-08-17T01:22:18 | 338,084,700 | 1 | 0 | MIT | 2023-08-17T01:22:34 | 2021-02-11T16:29:36 | R | UTF-8 | R | false | false | 3,936 | r | test-seedframe.R | library(loosends)
inv.bp.gr.fn = system.file("extdata", "tests", "new_caller_1", "inv.bp.gr.rds", package = "loosends")
inv.reads.dt.fn = system.file("extdata", "tests", "new_caller_1", "inv.reads.rds", package = "loosends")
inv.bp.gr = readRDS(inv.bp.gr.fn)
inv.reads.dt = readRDS(inv.reads.dt.fn)
test_that(desc = "check seed frame preparation for an FBI", code = {
suppressWarnings(
expr = {
le.dt = prep_loose_ends(li = inv.bp.gr, id = "2527")
prepped.reads.dt = prep_loose_reads(li = le.dt, loose.reads.dt = inv.reads.dt)
seed.rds = grab_seed_frame(prepped.reads.dt,
gr.flipstrand(inv.bp.gr) + 1e3,
seq.field = "reading.frame")
## check that there is one seed read per qname pair
expect_true(all(seed.rds[, sum(seed), by = qname]$V1 <= 1))
## check that seed reads have the correct strand
expect_true(all(seed.rds[(seed), as.character(strand)] == strand(gr.flipstrand(inv.bp.gr))))
## check that seed reads are not reverse complemented, but non-seed reads are
expect_true(all(seed.rds[(seed), reading.frame == seed.frame]))
expect_true(all(seed.rds[(!seed), reading.frame != seed.frame]))
})
})
dup.bp.gr.fn = system.file("extdata", "tests", "new_caller_1", "dup.bp.gr.rds", package = "loosends")
dup.reads.dt.fn = system.file("extdata", "tests", "new_caller_1", "dup.reads.rds", package = "loosends")
dup.bp.gr = readRDS(dup.bp.gr.fn)
dup.reads.dt = readRDS(dup.reads.dt.fn)
test_that(desc = "check seed frame preparation for a DUP", code = {
suppressWarnings(
expr = {
le.dt = prep_loose_ends(li = dup.bp.gr, id = "1109")
prepped.reads.dt = prep_loose_reads(li = le.dt, loose.reads.dt = dup.reads.dt)
seed.rds = grab_seed_frame(prepped.reads.dt,
gr.flipstrand(dup.bp.gr) + 1e3,
seq.field = "reading.frame")
## check that there is one seed read per qname pair
expect_true(all(seed.rds[, sum(seed), by = qname]$V1 <= 1))
## check that seed reads have the correct strand
expect_true(all(seed.rds[(seed), as.character(strand)] == strand(gr.flipstrand(dup.bp.gr))))
## check that seed reads are not reverse complemented, but non-seed reads are
expect_true(all(seed.rds[(seed), reading.frame == seed.frame]))
expect_true(all(seed.rds[(!seed), reading.frame != seed.frame]))
})
})
tra.bp.gr.fn = system.file("extdata", "tests", "new_caller_1", "tra.bp.gr.rds", package = "loosends")
tra.reads.dt.fn = system.file("extdata", "tests", "new_caller_1", "tra.reads.rds", package = "loosends")
tra.bp.gr = readRDS(tra.bp.gr.fn)
tra.reads.dt = readRDS(tra.reads.dt.fn)
test_that(desc = "check seed frame preparation for a TRA", code = {
suppressWarnings(
expr = {
le.dt = prep_loose_ends(li = tra.bp.gr, id = "387")
prepped.reads.dt = prep_loose_reads(li = le.dt, loose.reads.dt = tra.reads.dt)
seed.rds = grab_seed_frame(prepped.reads.dt,
gr.flipstrand(tra.bp.gr) + 1e3,
seq.field = "reading.frame")
## check that there is one seed read per qname pair
expect_true(all(seed.rds[, sum(seed), by = qname]$V1 <= 1))
## check that seed reads have the correct strand
expect_true(all(seed.rds[(seed), as.character(strand)] == strand(gr.flipstrand(tra.bp.gr))))
## check that seed reads are not reverse complemented, but non-seed reads are
expect_true(all(seed.rds[(seed), reading.frame == seed.frame]))
expect_true(all(seed.rds[(!seed), reading.frame != seed.frame]))
})
})
|
5333527ee18c17f6c1427c24e96e544a995912be | 86f84028c88e9a7fb9aa616fb6f263b4ecfce2ac | /R/api_logout.R | d71fb4f8cb3940a64c5eac2cb0350e0e3a93fea8 | [] | no_license | cran/RobinHood | e9a2d1e95927bba10ada8aac423f0b397f966304 | af813c73bd9daea5c030a03e0abf99577b7d8be2 | refs/heads/master | 2023-01-27T22:28:15.564583 | 2023-01-06T18:10:06 | 2023-01-06T18:10:06 | 168,972,005 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 655 | r | api_logout.R | #' RobinHood API: Logout
#'
#' Backend function called by logout(). Sends a logout call and disables your oauth2 token.
#'
#' @param RH object of class RobinHood
#' @import httr magrittr
#' @export
api_logout <- function(RH) {
detail <- paste("?client_id=",
RH$api_request.client_id,
"&token=",
RH$tokens.refresh_token,
sep = "")
# URL
url <- paste(RobinHood::api_endpoints("revoke_token"), detail, sep = "")
dta <- httr::POST(url)
httr::stop_for_status(dta)
dta <- dta %>%
content(type = "json") %>%
rawToChar()
return(dta)
}
|
a6b14098c1f4ee53af99c1c1f7e98ec56acfc674 | 4aa737125e5f9d2163f83362564bcbf863a26058 | /man/plotNcluster.Rd | 3529316d9110c61065ce29178f39e3a9f50406f3 | [] | no_license | cran/DoTC | 3199acd8c40d661290d0c19f1761472f9081f407 | 98c2b179e0a482e71b4f861a7fc3bdb26763a496 | refs/heads/master | 2021-04-05T23:31:00.525552 | 2016-06-17T21:20:50 | 2016-06-17T21:20:50 | 61,398,492 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 788 | rd | plotNcluster.Rd | \name{plotNcluster}
\alias{plotNcluster}
\title{Plot the Cluster Solution Across Varying Fuzziness Parameter}
\description{Plot the cluster solution, ie. the number of clusters, as a step function across varying fuzziness parameter \code{m}.}
\usage{
plotNcluster(fkm, ...)
}
\arguments{
\item{fkm}{Result for one single fuzziness parameter \code{m} as calculated by \code{\link{wrapFKM}}.}
\item{...}{Additional attributes to \code{plot}.}
}
\value{A step plot of the numbers of clusters (y axis) across different values for the fuzziness parameter m (x axis). The largest values of m conditional on a fixed numbers of clusters are highlighted using grey, dashed lines and bullet points. }
\author{Holger Sennhenn-Reulen}
\examples{
\dontrun{plotNcluster(fkm, ...)}
} |
34c4cbee5f9ecca68daafc3853e480af30875bc7 | a7b2eaba9bb75f212a433bbfb7af9fa8a801cd6f | /Conflict measures/Codes/Conflict_measures_wave1.R | 48d442cbb2dcbee1df58cdc99456b2093993131a | [] | no_license | bavuwadah/Conflict-Project | 59a0bedc296fc67fb0b519a9dca7edd8721f6475 | 5e188fb0b6321841be69cc7b42536e5daa6b5db2 | refs/heads/master | 2020-08-08T06:37:15.510962 | 2019-10-08T21:48:32 | 2019-10-08T21:48:32 | 213,759,635 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,373 | r | Conflict_measures_wave1.R | ###################################
#
# Conflict measures for wave 1
#
###################################
library(dplyr)
library(lubridate)
library(purrr)
#
# LOADING DATA -----
#
setwd("C:/Users/avuwa/Dropbox (UFL)/Nigeria ag livestock conflict")
## Events dataset
conflict_event <- read.csv("Data/ACLED/Nigeria/data/1900-01-01-2019-04-16-Nigeria.csv")
conflict_event_wave1$event.date <- as.Date(conflict_event_wave1$event_date, format="%d %B %Y")
conflict_event_wave1 <- conflict_event %>% filter(event.date >= "2009-08-01" & event.date <= "2011-04-30")
geodata.hh10 <- read.csv("Data/LSMS/hh10/data/Geodata/nga_householdgeovariables_y1.csv")
## Distance matrix
### load distance matrix
load("C:/Users/avuwa/Dropbox (UFL)/Nigeria ag livestock conflict/Benjamin workspace/Conflict Distance/hh10_dist_matrix.RData")
dist_matrix_wave1 <- hh10_dist_matrix %>% select(event_id_no_cnty_2372:event_id_no_cnty_3120,
event_id_no_cnty_13045, event_id_no_cnty_13046)
######################
#
# Conflicts within 5km
#
#######################
#
# SELECTING EVENTS -----
#
## List and object where the data will be stored
HH_vector <- lst()
id_events <- c()
for(h in 1:nrow(dist_matrix_wave1)){ ## Complete dataset
# for(h in 1:5000){ ## Complete dataset
hh <- c(dist_matrix_wave1[h,]) ## Selecting each Household (hh)
events <- names(which(hh<=5)) ## selecting # of Events which met criteria
distance <- unlist(hh[events]) ## Selecting the Distance for previous events
names(distance) <- NULL
for(i in seq_along(events)){ ## Capturing number of event
id_events[i] <- strsplit(events,split = "_")[[i]][5]
}
## Creating list with Events and Distances
HH_vector[[h]] <- data.frame(idEvents = as.numeric(id_events), Distances = 1/(distance)^2)
cat('Household No: ', h," - ", base::date(),'\n')
id_events <- c()
}
rm(hh,h,i,distance,events,id_events) ## cleaning
gc() ## releasing memory
## Saving data
# save(HH_vector,file='List_HH_Events.RData')
#
# MATCHING WITH OTHER DATASET AND COUNTING FATALITIES ----
#
nFatalities <- matrix(0, nrow = 5000, ncol = max(map_dbl(lapply(HH_vector,nrow),max))) ## Complete dataset
for (e in 1:5000){ ## complete dataset
# for (e in 1:50){ ## sample dataset
for(f in 1:ncol(nFatalities)){
fa_vector <- as.matrix(conflict_event_wave1[match(HH_vector[[e]][,1], conflict_event_wave1$event_id_no_cnty),'fatalities'])
if(length(fa_vector) == ncol(nFatalities)){
nFatalities[e,] <- as.matrix(conflict_event_wave1[match(HH_vector[[e]][,1], conflict_event_wave1$event_id_no_cnty),'fatalities'])
} else {
di <- ncol(nFatalities) - length(fa_vector)
fa_vector_ <- rbind(fa_vector,as.matrix(rep(0,di)))
nFatalities[e,] <- fa_vector_
}
fa_vector <- c()
}
cat('Household No: ', e,' - ',base::date(),'\n')
}
# nFatalities
nFatalities[1:5,1:10] ## just inspecting
#
# CREATING SEVERITY MEASURE FOR EACH HH
#
S <- matrix(0,1,5000)
We <- lapply(HH_vector, "[[", 2) ## extracting weights (inverse of distances) from HH_vector
for(n in 1:5000){
s <- We[[n]] * nFatalities[n,]
S[n] <- sum(s, NA, na.rm = TRUE)
cat('Household No: ', n,' - ',base::date(),'\n')
}
S[1:50] ## just inspecting the first 100 hh
## Conflict Measures per Household
weighted_events <- data.frame(do.call(rbind, lapply(HH_vector, colSums))) ## summation of weights
weighted_events <- weighted_events[-1]
total_events <- data.frame(sapply(HH_vector, NROW))
total_fatalities <- data.frame(rowSums(nFatalities))
hh.id <- data.frame(geodata.hh10[,6])
Severity <- data.frame(matrix(S, 5000, 1))
wave1_conflict_measure_5km <- data.frame(cbind(hh.id, Severity, weighted_events, total_events, total_fatalities))
names(wave1_conflict_measure_5km) <- c("hhid", "severity_conflict_5km", "dist_weighted_events_5km", "total_events_5km", "total_fatalities_5km")
save(wave1_conflict_measure_5km, file='wave1_conflict_measure_5km.RData')
rm(HH_vector, weighted_events, total_events, total_fatalities, Severity, nFatalities)
## List and object where the data will be stored 20Km
HH_vector <- lst()
id_events <- c()
for(h in 1:nrow(dist_matrix_wave1)){ ## Complete dataset
# for(h in 1:5000){ ## Complete dataset
hh <- c(dist_matrix_wave1[h,]) ## Selecting each Household (hh)
events <- names(which(hh<=20)) ## selecting # of Events which met criteria
distance <- unlist(hh[events]) ## Selecting the Distance for previous events
names(distance) <- NULL
for(i in seq_along(events)){ ## Capturing number of event
id_events[i] <- strsplit(events,split = "_")[[i]][5]
}
## Creating list with Events and Distances
HH_vector[[h]] <- data.frame(idEvents = as.numeric(id_events), Distances = 1/(distance)^2)
cat('Household No: ', h," - ", base::date(),'\n')
id_events <- c()
}
rm(hh,h,i,distance,events,id_events) ## cleaning
gc() ## releasing memory
## Saving data
# save(HH_vector,file='List_HH_Events.RData')
#
# MATCHING WITH OTHER DATASET AND COUNTING FATALITIES ----
#
nFatalities <- matrix(0, nrow = 5000, ncol = max(map_dbl(lapply(HH_vector,nrow),max))) ## Complete dataset
for (e in 1:5000){ ## complete dataset
# for (e in 1:50){ ## sample dataset
for(f in 1:ncol(nFatalities)){
fa_vector <- as.matrix(conflict_event_wave1[match(HH_vector[[e]][,1], conflict_event_wave1$event_id_no_cnty),'fatalities'])
if(length(fa_vector) == ncol(nFatalities)){
nFatalities[e,] <- as.matrix(conflict_event_wave1[match(HH_vector[[e]][,1], conflict_event_wave1$event_id_no_cnty),'fatalities'])
} else {
di <- ncol(nFatalities) - length(fa_vector)
fa_vector_ <- rbind(fa_vector,as.matrix(rep(0,di)))
nFatalities[e,] <- fa_vector_
}
fa_vector <- c()
}
cat('Household No: ', e,' - ',base::date(),'\n')
}
# nFatalities
nFatalities[1:5,1:10] ## just inspecting
#
# CREATING SEVERITY MEASURE FOR EACH HH
#
S <- matrix(0,1,5000)
We <- lapply(HH_vector, "[[", 2) ## extracting weights (inverse of distances) from HH_vector
for(n in 1:5000){
s <- We[[n]] * nFatalities[n,]
S[n] <- sum(s, NA, na.rm = TRUE)
cat('Household No: ', n,' - ',base::date(),'\n')
}
S[1:50] ## just inspecting the first 100 hh
## Conflict Measures per Household
weighted_events <- data.frame(do.call(rbind, lapply(HH_vector, colSums))) ## summation of weights
weighted_events <- weighted_events[-1]
total_events <- data.frame(sapply(HH_vector, NROW))
total_fatalities <- data.frame(rowSums(nFatalities))
hh.id <- data.frame(geodata.hh10[,6])
Severity <- data.frame(matrix(S, 5000, 1))
wave1_conflict_measure_20km <- data.frame(cbind(hh.id, Severity, weighted_events, total_events, total_fatalities))
names(wave1_conflict_measure_20km) <- c("hhid", "severity_conflict_20km", "dist_weighted_events_20km", "total_events_20km", "total_fatalities_20km")
save(wave1_conflict_measure_20km, file='wave1_conflict_measure_20km.RData')
rm(HH_vector, weighted_events, total_events, total_fatalities, Severity, nFatalities)
## List and object where the data will be stored 50Km
HH_vector <- lst()
id_events <- c()
for(h in 1:nrow(dist_matrix_wave1)){ ## Complete dataset
# for(h in 1:5000){ ## Complete dataset
hh <- c(dist_matrix_wave1[h,]) ## Selecting each Household (hh)
events <- names(which(hh<=50)) ## selecting # of Events which met criteria
distance <- unlist(hh[events]) ## Selecting the Distance for previous events
names(distance) <- NULL
for(i in seq_along(events)){ ## Capturing number of event
id_events[i] <- strsplit(events,split = "_")[[i]][5]
}
## Creating list with Events and Distances
HH_vector[[h]] <- data.frame(idEvents = as.numeric(id_events), Distances = 1/(distance)^2)
cat('Household No: ', h," - ", base::date(),'\n')
id_events <- c()
}
rm(hh,h,i,distance,events,id_events) ## cleaning
gc() ## releasing memory
## Saving data
# save(HH_vector,file='List_HH_Events.RData')
#
# MATCHING WITH OTHER DATASET AND COUNTING FATALITIES ----
#
nFatalities <- matrix(0, nrow = 5000, ncol = max(map_dbl(lapply(HH_vector,nrow),max))) ## Complete dataset
for (e in 1:5000){ ## complete dataset
# for (e in 1:50){ ## sample dataset
for(f in 1:ncol(nFatalities)){
fa_vector <- as.matrix(conflict_event_wave1[match(HH_vector[[e]][,1], conflict_event_wave1$event_id_no_cnty),'fatalities'])
if(length(fa_vector) == ncol(nFatalities)){
nFatalities[e,] <- as.matrix(conflict_event_wave1[match(HH_vector[[e]][,1], conflict_event_wave1$event_id_no_cnty),'fatalities'])
} else {
di <- ncol(nFatalities) - length(fa_vector)
fa_vector_ <- rbind(fa_vector,as.matrix(rep(0,di)))
nFatalities[e,] <- fa_vector_
}
fa_vector <- c()
}
cat('Household No: ', e,' - ',base::date(),'\n')
}
# nFatalities
nFatalities[1:5,1:10] ## just inspecting
#
# CREATING SEVERITY MEASURE FOR EACH HH
#
S <- matrix(0,1,5000)
We <- lapply(HH_vector, "[[", 2) ## extracting weights (inverse of distances) from HH_vector
for(n in 1:5000){
s <- We[[n]] * nFatalities[n,]
S[n] <- sum(s, NA, na.rm = TRUE)
cat('Household No: ', n,' - ',base::date(),'\n')
}
S[1:50] ## just inspecting the first 100 hh
## Conflict Measures per Household
weighted_events <- data.frame(do.call(rbind, lapply(HH_vector, colSums))) ## summation of weights
weighted_events <- weighted_events[-1]
total_events <- data.frame(sapply(HH_vector, NROW))
total_fatalities <- data.frame(rowSums(nFatalities))
hh.id <- data.frame(geodata.hh10[,6])
Severity <- data.frame(matrix(S, 5000, 1))
wave1_conflict_measure_50km <- data.frame(cbind(hh.id, Severity, weighted_events, total_events, total_fatalities))
names(wave1_conflict_measure_50km) <- c("hhid", "severity_conflict_50km", "dist_weighted_events_50km", "total_events_50km", "total_fatalities_50km")
save(wave1_conflict_measure_50km, file='wave1_conflict_measure_50km.RData')
rm(HH_vector, weighted_events, total_events, total_fatalities, Severity, nFatalities)
## List and object where the data will be stored 100Km
HH_vector <- lst()
id_events <- c()
for(h in 1:nrow(dist_matrix_wave1)){ ## Complete dataset
# for(h in 1:5000){ ## Complete dataset
hh <- c(dist_matrix_wave1[h,]) ## Selecting each Household (hh)
events <- names(which(hh<=100)) ## selecting # of Events which met criteria
distance <- unlist(hh[events]) ## Selecting the Distance for previous events
names(distance) <- NULL
for(i in seq_along(events)){ ## Capturing number of event
id_events[i] <- strsplit(events,split = "_")[[i]][5]
}
## Creating list with Events and Distances
HH_vector[[h]] <- data.frame(idEvents = as.numeric(id_events), Distances = 1/(distance)^2)
cat('Household No: ', h," - ", base::date(),'\n')
id_events <- c()
}
rm(hh,h,i,distance,events,id_events) ## cleaning
gc() ## releasing memory
## Saving data
# save(HH_vector,file='List_HH_Events.RData')
#
# MATCHING WITH OTHER DATASET AND COUNTING FATALITIES ----
#
nFatalities <- matrix(0, nrow = 5000, ncol = max(map_dbl(lapply(HH_vector,nrow),max))) ## Complete dataset
for (e in 1:5000){ ## complete dataset
# for (e in 1:50){ ## sample dataset
for(f in 1:ncol(nFatalities)){
fa_vector <- as.matrix(conflict_event_wave1[match(HH_vector[[e]][,1], conflict_event_wave1$event_id_no_cnty),'fatalities'])
if(length(fa_vector) == ncol(nFatalities)){
nFatalities[e,] <- as.matrix(conflict_event_wave1[match(HH_vector[[e]][,1], conflict_event_wave1$event_id_no_cnty),'fatalities'])
} else {
di <- ncol(nFatalities) - length(fa_vector)
fa_vector_ <- rbind(fa_vector,as.matrix(rep(0,di)))
nFatalities[e,] <- fa_vector_
}
fa_vector <- c()
}
cat('Household No: ', e,' - ',base::date(),'\n')
}
# nFatalities
nFatalities[1:5,1:10] ## just inspecting
#
# CREATING SEVERITY MEASURE FOR EACH HH
#
S <- matrix(0,1,5000)
We <- lapply(HH_vector, "[[", 2) ## extracting weights (inverse of distances) from HH_vector
for(n in 1:5000){
s <- We[[n]] * nFatalities[n,]
S[n] <- sum(s, NA, na.rm = TRUE)
cat('Household No: ', n,' - ',base::date(),'\n')
}
S[1:50] ## just inspecting the first 100 hh
## Conflict Measures per Household
weighted_events <- data.frame(do.call(rbind, lapply(HH_vector, colSums))) ## summation of weights
weighted_events <- weighted_events[-1]
total_events <- data.frame(sapply(HH_vector, NROW))
total_fatalities <- data.frame(rowSums(nFatalities))
hh.id <- data.frame(geodata.hh10[,6])
Severity <- data.frame(matrix(S, 5000, 1))
wave1_conflict_measure_100km <- data.frame(cbind(hh.id, Severity, weighted_events, total_events, total_fatalities))
names(wave1_conflict_measure_100km) <- c("hhid", "severity_conflict_100km", "dist_weighted_events_100km", "total_events_100km", "total_fatalities_100km")
save(wave1_conflict_measure_100km, file='wave1_conflict_measure_100km.RData')
|
461779af45938d4c51de2da5b4627330227dedad | fe37eb48de72c5c9f5babf29646f282c92e05bcb | /fn.varpart.in.R | d565d2f215f65d53b548ff21ce5c20c4aa2bc21e | [] | no_license | takayukiyunoki/spatialIBM | 3cd6dee9632a8a1ef628dc749f8ca272bde53f8c | bbcc06329ddc92cd22b1ff81dda6ce7959b56cbb | refs/heads/main | 2023-07-22T22:53:31.419056 | 2021-09-03T17:46:29 | 2021-09-03T17:46:29 | 307,781,650 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 654 | r | fn.varpart.in.R | # Function to calculating the summary statics of variation partitioning for inicial conditions
fn.varpart.in <- function(PCNM.pos, E.trans, pool, pool.t0) {
varpart.in <- data.frame(matrix(, nrow=1, ncol=0))
varpart.in$n.groups.in <- length(unique(pool.t0$groups))
pool$species <- pool.t0$species
LC <- dcast(pool, locations~species, length) [,-1]
LC.h <- decostand(LC, "hellinger")
LC.PCNM.rda <- rda(LC.h, PCNM.pos)
anova.LC.PCNM.rda <- anova.cca(LC.PCNM.rda)
varpart.in$space.sign <- anova.LC.PCNM.rda$Pr[1]
LC.env.rda <- rda(LC.h, E.trans)
anova.LC.env.rda <- anova.cca(LC.env.rda)
varpart.in$env.sign <- anova.LC.env.rda$Pr[1]
return(varpart.in)
}
|
a5a0f7afc51eaffc6ea05e288aae4f047bbe4b61 | 48d0740e681ee0198933ecbaa7aa86f386e5e216 | /Training Scripts/Project/ui.r | 7cd8ac16af7ac308b195ec7a4c43ffdb93a5a9ed | [] | no_license | Barthurmun/Data_Analysis_R | a3e7ce7876410f1a7d7e3085f5db80c101b16edb | 5f412fbac167cd9a3d6b1c11618b3c274bb2de67 | refs/heads/master | 2022-11-30T22:20:51.378529 | 2020-08-15T21:54:22 | 2020-08-15T21:54:22 | 286,578,662 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,284 | r | ui.r | shinyUI(
navbarPage("Amazon fires",
tabPanel("Dynamic",
tags$img(src="brazil.png", style = 'position: absolute; opacity:0.2; width: 100%'),
sidebarPanel(
selectInput("variable", "Please select the chart type:",
list("Total summary" = "state",
"By Month" = "month",
"By Year" = "year")),
conditionalPanel(condition = "input.variable == 'year'",
sliderInput("year_int", "Please select Year:",
min=1998, max=2017, value=2010, step=1, sep="")),
conditionalPanel(condition = "input.variable == 'month'",
selectInput("month_str", "Please select month:",
list("January" = "January",
"February" = "February",
"March" = "March",
"April" = "April",
"May" = "May",
"June" = "June",
"July" = "July",
"August" = "August",
"September" = "September",
"October" = "October",
"November" = "November",
"December" = "December"))),
mainPanel(
plotOutput("myPlot", width="auto")
)),
),
tabPanel("Static",
tags$img(src="brazil.png", style = 'position: absolute; opacity:0.20; width: 100%'),
sidebarPanel(
selectInput("type", "Please select the chart type:",
list("Years" = "year",
"States" = "state",
"Months" = "month",
"Histogram" = "hist",
"Regression" = "regr")),
),
mainPanel(
plotOutput("staticPlot")
)
)
)
)
|
30ac8add7391ecaea07957528a398cfc52106dd8 | 8c1ca8e54e57d1b07c5466fb10f200c4a9d62d59 | /R/flux.calib.R | cb24eff659bf625826621c09f75836cc3ca1caaf | [] | no_license | cran/flux | 61651b39bbf1973f5e983cba69ddaae4c20250ab | ecd586b6c04fdff98a9e1a94112aad35e439bde3 | refs/heads/master | 2022-07-13T10:14:57.646989 | 2022-06-26T05:58:11 | 2022-06-26T05:58:11 | 17,696,089 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,886 | r | flux.calib.R | flux.calib <-
function(dat, columns, calib, format = "%Y-%m-%d %H:%M:%S", window = 3, buffer = 1000, n.cg = 4, rl.backup = 20, attach = FALSE){
# defining the function which does the work
flux.cal <-
function(conz.dat, calib, format = "%Y-%m-%d %H:%M:%S", window=3, buffer = 1000, n.cg = 4){
# do not allow NA measurements in conz.dat
conz.dat <- conz.dat[!is.na(conz.dat[,2]),]
# extract date
m.date <- strptime(conz.dat[1,1], format = format)
dts.cal <- strptime(calib[,1], format = format)
na.omit <- !is.na(dts.cal)
dts.cal <- dts.cal[na.omit]
calib <- calib[na.omit,]
# extract calibration gas measurements according to the date of the measurement
# of the ghg and a window width window (hours) around it
# first make seconds window because seconds are the primary unit for datetime objects
window <- window*60*60/2
calib <- calib[(dts.cal >= (m.date-(window-60))) & (dts.cal <= (m.date+window)), 2]
# omit 0 concentrations
calib <- calib[calib>1]
# check whether enough concentrations are in calib
if(length(calib) <= n.cg){
range.lim <- rl.backup
warning("Not enough calibration gas measurements", call. = FALSE)
}
else{
# create an index from the grouping of the calibration gas measurements (via clustering)
cin <- cutree(hclust(dist(calib)), n.cg)
# omit calibration gas concentrations that are too far away from measured concentrations
# to provide helpful ranges
sel <- rowSums(as.matrix(dist(c(range(conz.dat[,2]), calib)))[-c(1:2),1:2] < buffer) > 0
# calculate range limits (standard deviation of the calibration gas
# measurements) per calibration gas
range.lims <- as.vector(by(data.frame(calib, sel), cin, function(x) with(x, sd(calib[sel]))))
# take only the good ones (non that are more than 500 apart)
#tmp <- as.matrix(dist(c(range(conz.dat[,2]),range.lims)))[-c(1:2),1:2]
#which <- apply(tmp,1,function(x) sum(x>500)>0)
# calculate average range limits across all included calibration gases
range.lim <- mean(range.lims, na.rm=TRUE)
}
return(range.lim)
}
# actually do the work
# extract the needed columns from calib
calib <- calib[,columns]
ghg.lim <- sapply(dat$tables, function(x) flux.cal(x[,columns], calib[,columns], format = format, window = window, buffer=buffer, n.cg=n.cg))
# if NA's result fall back to rl.backup
ghg.lim[is.na(ghg.lim)] <- ifelse(!is.null(rl.backup), rl.backup, min(ghg.lim, na.rm=TRUE))
if(attach){
if(length(grep("CO2", columns))!=0){
for(i in c(1:length(dat$tables))){
dat$tables[[i]]$CO2.rl <- ghg.lim[i]
}
}
if(length(grep("CH4", columns))!=0){
for(i in c(1:length(dat$tables))){
dat$tables[[i]]$CH4.rl <- ghg.lim[i]
}
}
if(length(grep("N2O", columns))!=0){
for(i in c(1:length(dat$tables))){
dat$tables[[i]]$N2O.rl <- ghg.lim[i]
}
}
}
else{
dat <- ghg.lim
}
return(dat)
} |
8dc5443c703b7e7afaf87d1dc7c35be6a1f74949 | 24b4a968fc610819ff6c2a2932491da663bd9d59 | /src/pre-processing data and graphics.R | aa001e5d63d249ee1c50a3ad6030d88a4e33c6e2 | [] | no_license | raquelaoki/ProjectSpring2018 | cdd846ae7941835591e884f8182353531735ff21 | 181c80ef3270e6ff036eab555ea4d605c6b76c7c | refs/heads/master | 2022-02-23T22:34:30.886218 | 2019-10-11T20:18:04 | 2019-10-11T20:18:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,461 | r | pre-processing data and graphics.R | #-------#-------#-------#-------#-------#-------#-------#-------#-------#-------#
#2018/07/05
#Raquel Aoki
#-------#-------#-------#-------#-------#-------#-------#-------#-------#-------#
#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#
#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#
#Script to get genes from gistic output
#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#
#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#
#---- WORK DIRETORY
rm(list=ls(all=TRUE))
#setwd("C:\\Users\\raoki\\Documents\\Project 1")
setwd('C:\\Users\\raque\\Google Drive\\SFU\\CMPT884 - Course Project and presentation')
#---- PACKAGES
#source("https://bioconductor.org/biocLite.R")
#biocLite("biomaRt")
library(biomaRt)
#---- GET GENES POSITION
ensembl = useEnsembl(biomart="ensembl", dataset="hsapiens_gene_ensembl")
cn = c("1","2","3","4","5","6","7","8","9","10","11","12",
"13","14","15","16","17","18","19","20","21","22","X","Y")
genes <- getBM(attributes=c('ensembl_gene_id','gene_biotype','hgnc_symbol',
'chromosome_name','start_position','end_position'),
filters = 'chromosome_name', values =cn, mart = ensembl)
genes[genes=='X' | genes=='Y'] = 23
genes = subset(genes, gene_biotype=='protein_coding')
genes = subset(genes, !is.na(hgnc_symbol) & hgnc_symbol!='')
rownames(genes) = NULL
names(genes)[c(5,6)] = c('start','end')
dim(genes)
head(genes)
#---- LOAD GISTIC OUTPUT
sg = read.table("scores_gistic.txt", sep='\t', header=T)
names(sg)=c('type','chromosome_name','start','end','log10qvalue','g_score',
'average_amplitude','frequency')
sg = subset(sg, type=='Amp')
#---- LOOKING IF IN THE GENE POSITION THERE IS AN AMPLIFICATION
#I dont need all genes, only the ones with amplification areas
#Paper work only with amplifications
genes$g_score = c()
for(i in 1:dim(genes)[1]){
g_score = subset(sg, (sg$start<genes$start[i] & genes$start[i]<sg$end & genes$chromosome_name[i]==sg$chromosome_name) |
(sg$start<genes$end[i] & genes$end[i]<sg$end & genes$chromosome_name[i]==sg$chromosome_name))#$g_score
if(dim(g_score)[1]!=0){
genes$g_score[i] = sum(g_score$g_score*g_score$frequency)/sum(g_score$frequency)
}else{
genes$g_score[i] = NA
}
}
summary(genes$g_score)
dim(subset(genes, is.na(g_score)))
dim(subset(genes, !is.na(g_score)))
write.table(genes, "g_score_by_gene.txt",sep=';',row.names = F)
#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#
#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#
# join g-score and features
#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#
#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#
gsd = read.table('g_score_by_gene.txt',sep=';',header=T)
features = read.table('features_new_sirna.csv',sep=',',header=T)
gsd = subset(gsd, !duplicated(hgnc_symbol))
features = subset(features, !duplicated(gene))
data = merge(gsd,features,by.x='hgnc_symbol',by.y='gene',all.x=T)
dim(gsd)
dim(features)
dim(data)
names(data)[1] = 'gene'
write.table(data, 'data_gscore_features_bygene.csv', row.names = F, sep=';')
#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#
#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#
# graphic presentation and report
#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#
#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#
#Features plot
features = read.csv('features_new_sirna.csv', sep=',')
#features2 = read.csv('features_normalized.csv',sep=',')
head(features)
par(mfrow=c(3,1))
par(mar= c(3.1, 3.1, 1.1, 1.1))
hist(features$mutsig,col='#2a3990',main='Mutsig',xlab='',ylab='',cex.main=1.5)
hist(features$avg_expr,col='#2a3990',main='Expression',xlab='',ylab='',cex.main=1.5)
hist(features$sirna,col='#2a3990',main='SiRNA',xlab='',ylab='',cex.main=1.5)
#chain
cchain1= read.csv('complete_chain1_T.csv',sep=';', header = F)
cchain2= read.csv('complete_chain2_T.csv',sep=';', header = F)
cchainT= read.csv('complete_matrix_T.csv',sep=';', header = F)
cchainT = cchainT[-c(1:1600),]
schain2= read.csv('simplified_chain2_T.csv',sep=';', header = F)
schainT= read.csv('simplified_matrix_T.csv',sep=';', header = F)
#complete model
par(mfrow=c(1,3))
w_min_max = c(min(cchain1), max(cchain1)*1.5)
names(cchain1) = c('w0','w1','w2','w3')
plot(cchain1$w0,type='l', lwd =2,main='',ylim=w_min_max,ylab='Wi value',
cex.axis=1.5,cex.main=1.5,cex.lab=1.5,xlab='iterations')
points(cchain1$w1,type='l', lwd =2,main='',ylim=w_min_max,col='darkblue')
points(cchain1$w2,type='l', lwd =2,main='',ylim=w_min_max,col='darkgreen')
points(cchain1$w3,type='l', lwd =2,main='',ylim=w_min_max,col='darkred')
abline(v=1600,lty=2)
legend('topright',lwd = c(3,3,3,3),col=c('black','darkblue','darkgreen','darkred'),
legend = c('w0','w1','w2','w3'),bty='n', cex=1.5,ncol=2)
mu_min_max = c(min(cchain2[,c(1,2)]), 1.15*max(cchain2[,c(1,2)]))
names(cchain2) = c('mu0','mu1','var0','var1')
plot(cchain2$mu1,type='l', lwd =2,main='',ylim=mu_min_max,xlab='iterations', col = 'darkred',
ylab=expression(paste(mu,' value',sep='')),cex.axis=1.5,cex.main=1.5,cex.lab=1.5)
points(cchain2$mu0,type='l', lwd =2,main='w',ylim=mu_min_max,col='darkblue')
abline(v=1600,lty=2)
legend('topright',lwd = c(3,3),col=c('darkred','darkblue'), legend = c('Driver','Passenger'),
bty='n', cex=1.5)
var_min_max = c(min(cchain2[,c(3,4)]), 1.15*max(cchain2[,c(3,4)]))
plot(cchain2$var1,type='l', lwd =2,main='',ylim=var_min_max,xlab='iterations', col = 'darkred',
ylab=expression(paste(sigma,' value',sep='')),cex.axis=1.5,cex.main=1.5,cex.lab=1.5)
points(cchain2$var0,type='l', lwd =2,main='w',ylim=var_min_max,col='darkblue')
abline(v=1600,lty=2)
legend('topright',lwd = c(3,3),col=c('darkred','darkblue'), legend = c('Driver','Passenger'),
bty='n', cex=1.5)
#simplified model
par(mfrow=c(2,1))
par(mar=c(4,4.2,1,1))
w_min_max = c(min(schain2[,c(1:4)]), max(schain2[,c(1:4)])*2)
names(schain2) = c('w0','w1','w2','w3','mu0','mu1')
plot(schain2$w0,type='l', lwd =2,main='',ylim=w_min_max,ylab='Wi value',
cex.axis=1.5,cex.main=1.5,cex.lab=1.5,xlab='iterations')
points(schain2$w1,type='l', lwd =2,main='',ylim=w_min_max,col='darkblue')
points(schain2$w2,type='l', lwd =2,main='',ylim=w_min_max,col='darkgreen')
points(schain2$w3,type='l', lwd =2,main='',ylim=w_min_max,col='darkred')
abline(v=1600,lty=2)
legend('topright',lwd = c(3,3,3,3),col=c('black','darkblue','darkgreen','darkred'),
legend = c('w0','w1','w2','w3'),bty='n', cex=1.5,ncol=2)
par(mar=c(4,4.1,1,1))
mu_min_max = c(min(schain2[,c(5,6)]), 1.15*max(schain2[,c(5,6)]))
plot(schain2$mu1,type='l', lwd =2,main='',ylim=mu_min_max,xlab='iterations', col = 'darkred',
ylab=expression(paste(mu,' value',sep='')),cex.axis=1.5,cex.main=1.5,cex.lab=1.5)
points(schain2$mu0,type='l', lwd =2,main='w',ylim=mu_min_max,col='darkblue')
abline(v=1600,lty=2)
legend('topright',lwd = c(3,3),col=c('darkred','darkblue'), legend = c('Driver','Passenger'),
bty='n', cex=1.5)
#--------- Normal Mix
par(mar=c(1,1,1,1))
plot( density(rnorm(1000000,0.08,0.015)),main='', col='red',lwd=4, ylab='',xlab='',axes=F, xlim=c(0,0.15))
points(density(rnorm(1000000,0.04,0.02)) ,main='', col= 'blue', lwd=4,type='l')
text(0.08,-0.5,'G Score')
legend('topright',lwd=c(4,4),col=c('red','blue'),legend=c('Driver Genes','Passenger Genes'),bty='n')
|
97d154608787ae2ee257e9d2d933b5278411325a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Sleuth2/examples/ex1512.Rd.R | b42844670d4f1c5701d2318055026129a948b74c | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 176 | r | ex1512.Rd.R | library(Sleuth2)
### Name: ex1512
### Title: Melanoma and Sunspot Activity-An Observational Study
### Aliases: ex1512
### Keywords: datasets
### ** Examples
str(ex1512)
|
0380871d13396e551180c227750b8a4fdb6da05c | 1eb83aaffb3c50b51fbeb2275f90ed68fec385bb | /R/draws.R | f42042ec99443f706f67ad9386756065f40a18fd | [
"MIT"
] | permissive | dkc88/logitr | 77668c66f29e933cad1515789045359c973bc641 | 73c90f5a4461add80f820bbd1fd6ea2ed4240d4a | refs/heads/master | 2023-05-24T07:10:11.744989 | 2021-06-15T22:48:52 | 2021-06-15T22:48:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,743 | r | draws.R | # ============================================================================
# Functions for taking draws for mixed logit models
# ============================================================================
# Returns shifted normal draws for each parameter
makeBetaDraws <- function(pars, parSetup, numDraws, standardDraws) {
muMat <- getMuMat(pars, parSetup, numDraws)
sigmaMat <- getSigmaMat(pars, parSetup, numDraws)
# Shift draws by mu and sigma
betaDraws <- muMat + standardDraws * sigmaMat
# Exponentiate draws for those with logN distribution
logNormParIDs <- getLogNormParIDs(parSetup)
if (length(logNormParIDs) > 0) {
betaDraws[, logNormParIDs] <- exp(betaDraws[, logNormParIDs])
}
return(betaDraws)
}
getMuMat <- function(pars, parSetup, numDraws) {
pars_mu <- as.numeric(pars[seq_len(length(parSetup))])
muMat <- matrix(rep(pars_mu, numDraws), ncol = length(pars_mu), byrow = T)
return(muMat)
}
getSigmaMat <- function(pars, parSetup, numDraws) {
numPars <- length(parSetup)
pars_sigma <- rep(0, length(parSetup))
randParIDs <- getRandParIDs(parSetup)
pars_sigma[randParIDs] <- as.numeric(pars[(numPars + 1):length(pars)])
sigmaMat <- matrix(rep(pars_sigma, numDraws),
ncol = length(pars_sigma),
byrow = T
)
return(sigmaMat)
}
getStandardDraws <- function(parSetup, numDraws) {
numBetas <- length(parSetup)
draws <- as.matrix(randtoolbox::halton(numDraws, numBetas, normal = TRUE))
fixedParIDs <- getFixedParIDs(parSetup)
draws[, fixedParIDs] <- rep(0, numDraws)
return(draws)
}
getUncertaintyDraws <- function(model, numDraws) {
draws <- data.frame(MASS::mvrnorm(numDraws, model$coef, model$covariance))
colnames(draws) <- names(model$coef)
return(draws)
}
|
d81a14545e3110301b949fe89c8236d7a6e312ef | 33e9d4855433b8942914e94b312333d357c18918 | /man/rkafka.closeProducer.Rd | 5d22e817456afbc5c5d6b2c17b83d496a664c35c | [
"Apache-2.0"
] | permissive | jurbanhost/rkafka | 8c370c20523bb9999afb73730f7a243bdd3afd66 | 27d4e31bdc2f6004087c7466062f2c939f01e36b | refs/heads/master | 2020-05-03T01:30:53.973396 | 2019-03-29T05:47:48 | 2019-03-29T05:47:48 | 178,339,214 | 0 | 0 | Apache-2.0 | 2019-03-29T05:37:10 | 2019-03-29T05:37:07 | R | UTF-8 | R | false | false | 719 | rd | rkafka.closeProducer.Rd | \name{rkafka.closeProducer}
\alias{rkafka.closeProducer}
%\alias{producer}
\title{
KAFKA producer shutdown
}
\description{
This function closes the KAFKA producer
}
\usage{
rkafka.closeProducer(producer)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{producer}{
Producer which is to be terminated
Required:Mandatory
Type:Producer
}
}
\value{
Doesn't return anything
}
\author{
Shruti Gupta
}
\examples{
\dontrun{
producer1=rkafka.createProducer("127.0.0.1:9092")
rkafka.closeProducer(producer1)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kafka }
\keyword{ ~producer }% __ONLY ONE__ keyword per line
\keyword{ ~close } |
d1b7d70f03620c62c45869ce6c8f46e23496bea8 | 499fb8e6fc432f7af4092ca97689c8e53f14fffa | /man/x2_itemf.Rd | bb8077d9f550ede8428d1d771f10dba92a2fe0d6 | [] | no_license | SICSresearch/LatentREGpp | e2c7229997ad23b96b6cf6adf03b5ee008cb4254 | 7a9ad36af27a039e16ea9ac69ef3333af0690a40 | refs/heads/master | 2020-12-25T18:07:38.933864 | 2017-03-06T03:20:40 | 2017-03-06T03:20:40 | 66,164,535 | 3 | 1 | null | null | null | null | UTF-8 | R | false | true | 577 | rd | x2_itemf.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/goodness_of_fit.R
\name{x2_itemf}
\alias{x2_itemf}
\title{Statistical x2.}
\usage{
x2_itemf(zetas, patterns, G, FUN)
}
\arguments{
\item{zetas}{matrix of estimations of the parameters of the items (alphas, d's, guessing).}
\item{patterns}{list with Patterns, frequencies and traits.}
\item{G}{the number of groups, by default 10}
\item{FUN}{It is the function with which the expected probability, by default median
is calculated in each group.}
}
\description{
Calculates the statistical x2.
}
|
995a9c822d9630823b66c8fd13e786347074f700 | e2443f13b2b5b592dd3d5de22f2d08ad63aba4f7 | /app/ui.R | 5e5672aa2b9ea04b6864ed5c0e8f162e2860befd | [
"MIT"
] | permissive | smartinsightsfromdata/maddata | 0129805171f48bf0f96e3754beb97dd4b2e58128 | d343b4fc2402026cd539cac2f9d54ddd81b727d9 | refs/heads/master | 2021-01-17T05:08:16.935168 | 2014-10-23T17:30:04 | 2014-10-23T17:30:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,404 | r | ui.R | require(shiny)
require(rCharts)
shinyUI(pageWithSidebar(
headerPanel("MADtraffic"),
sidebarPanel(
"MADtraffic",
selectInput(inputId = "x",
label = "Choose X",
choices = c('SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth'),
selected = "SepalLength"),
selectInput(inputId = "y",
label = "Choose Y",
choices = c('SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth'),
selected = "SepalWidth")
),
mainPanel(
tabsetPanel(
# tabPanel("Puntos de medida del tráfico y calidad del aire", mapOutput('map_container')),
tabPanel("Puntos de medida del tráfico y calidad del aire", chartOutput("map_container", 'leaflet')),
tabPanel("Prueba", chartOutput("series_container1", 'morris')),
tabPanel("Prueba", chartOutput("series_container2", 'nvd3')),
tabPanel("Prueba", chartOutput("series_container3", 'polycharts')),
tabPanel("Prueba", chartOutput("series_container4", 'xcharts'))
)
)
))
#
# shinyUI(bootstrapPage(
# # tags$link(href='style.css', rel='stylesheet'),
# # tags$script(src='app.js'),
# # includeHTML('www/credits.html'),
# # selectInput('network', '', sort(names(networks)), 'citibikenyc'),
# chartOutput('map_container', 'leaflet')
# ))
|
4b8355c1a5c702b7bfaf7b31284fe76d19d4c680 | c902d3bb683c8d473272192732eed2f05ae2d2cc | /R/unibisect.R | 819c5a35b7d83ccc1b0fcf058646088bf9c1b6be | [] | no_license | dkahle/kumerical | 43210f99d5e4e2d733d2c9533bde28986912cf84 | bb85c1eb777845ecddd1baf780a371528b4afa73 | refs/heads/master | 2020-03-21T03:55:20.270695 | 2018-06-22T01:17:36 | 2018-06-22T01:17:36 | 138,081,169 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,785 | r | unibisect.R | #' Univariate bisection method
#'
#' Find the root of a function between two points a and b using the
#' bisection method.
#'
#' @param f function
#' @param a lower bound
#' @param b upper bound
#' @param tol tolerance, defaults to 10*.Machine$double.eps
#' @param maxit maximum number of iterations
#' @param bisections number or bisection steps for fast unibisect
#' @return a list
#' @name unibisect
#' @examples
#'
#' f <- function(x) x^2 - 2
#' a <- 0; b <- 2
#' unibisect(f, a, b)
#' simple_unibisect(f, a, b)
#' fast_unibisect(f, a, b)
#'
#' (out <- unibisect(f, a, b))
#'
#' curve(f(x), col = "red", from = a, to = b)
#' with(out$evals, points(x, fx))
#' out$n_evals # = number of function calls
#' plot(1:out$n_evals, out$evals$fx)
#'
#'
#'
#'
#' f <- sin
#' a <- .1; b <- 2*pi - .2
#' (out <- unibisect(f, a, b))
#'
#' curve(f(x), col = "red", from = a, to = b)
#' with(out$evals, points(x, fx))
#' out$n_evals # = number of function calls
#' plot(1:out$n_evals, out$evals$fx)
#'
#'
#'
#'
#' f <- function(x) (x-.24) * (x - .51) * (x - .76)
#' a <- 0; b <- 1
#' (out <- unibisect(f, a, b))
#'
#' curve(f(x), col = "red", from = a, to = b)
#' with(out$evals, points(x, fx))
#' out$n_evals # = number of function calls
#' plot(1:out$n_evals, out$evals$fx)
#'
#'
#'
#' f <- function(x) pbeta(x, 90, 10) - .5
#' a <- 0; b <- 1
#' (out <- fast_unibisect(f, 0, 1))
#'
#' curve(f(x), col = "red", from = 0, to = 1)
#' with(out$evals, points(x, fx))
#'
#' library(ggplot2)
#' ggplot(out$evals, aes(x, fx, color = method)) +
#' stat_function(fun = f, color = "black") +
#' geom_point()
#' @export
#' @rdname unibisect
unibisect <- function(f, a, b, tol = 10*.Machine$double.eps, maxit = 100L) {
# initialize x and fx
x <- fx <- numeric(maxit)
# check endpoints and return early if root there
x[1] <- a;
fa <- fx[1] <- f(a)
if(abs(fa) <= tol) {
return(list(
root = a, f.root = fa,
evals = data_frame(x = x[1], fx = fx[1]),
n_evals = 1
))
}
x[2] <- b
fb <- fx[2] <- f(b)
if(sign(fa) == sign(fb)) stop("f(a) and f(b) must have opposite signs.")
if(abs(fb) <= tol) {
return(list(
root = b, f.root = fb,
evals = data_frame(x = x[1:2], fx = fx[1:2]),
n_evals = 2
))
}
# compute first midpoint, return early if root found
c <- x[3] <- (a+b)/2
fc <- fx[3] <- f(c)
if(abs(fc) < tol) {
return(list(
root = c, f.root = fc,
evals = data_frame(x = x[1:3], fx = fx[1:3]),
n_evals = 3
))
}
# loop
for(k in 4:maxit) {
if (sign(fa) == sign(fc)) {
a <- c; fa <- fc
} else {
b <- c; fb <- fc
}
c <- x[k] <- (a+b)/2
fc <- fx[k] <- f(c)
n_evals <- k
if(abs(fc) <= tol) break
}
# return
if(abs(fc) > tol) warning("tolerance not achieved.")
list(
root = c, f.root = fc,
evals = data_frame(x = x[1:n_evals], fx = fx[1:n_evals]),
n_evals = n_evals
)
}
#' @export
#' @rdname unibisect
fast_unibisect <- function(f, a, b, tol = 10*.Machine$double.eps, bisections = 8L, maxit = 100L) {
if(bisections >= maxit) return(unibisect(f, a, b, tol, maxit))
# initialize x and fx
x <- fx <- numeric(maxit)
# check endpoints and return early if root there
x[1] <- a;
fa <- fx[1] <- f(a)
if(abs(fa) <= tol) {
return(list(
root = a, f.root = fa,
evals = data_frame(x = x[1], fx = fx[1], method = "bisection"),
n_evals = 1
))
}
x[2] <- b
fb <- fx[2] <- f(b)
if(sign(fa) == sign(fb)) stop("f(a) and f(b) must have opposite signs.")
if(abs(fb) <= tol) {
return(list(
root = b, f.root = fb,
evals = data_frame(x = x[1:2], fx = fx[1:2], method = "bisection"),
n_evals = 2
))
}
# compute first midpoint, return early if root found
c <- x[3] <- (a+b)/2
fc <- fx[3] <- f(c)
if(abs(fc) < tol) {
return(list(
root = c, f.root = fc,
evals = data_frame(x = x[1:3], fx = fx[1:3], method = "bisection"),
n_evals = 3
))
}
# bisect
for(k in 4:bisections) {
if (sign(fa) == sign(fc)) {
a <- c; fa <- fc
} else {
b <- c; fb <- fc
}
c <- x[k] <- (a+b)/2
fc <- fx[k] <- f(c)
n_evals <- k
if(abs(fc) <= tol) break
}
# return if bisection successful
if(abs(fc) < tol) {
return(list(
root = c, f.root = fc,
evals = data_frame(x = x[1:n_evals], fx = fx[1:n_evals], method = "bisection"),
n_evals = n_evals
))
}
# secant
for(k in (bisections+1):maxit) {
x[k] <- x[k-1] - fx[k-1] / ((fx[k-1]-fx[k-2])/(x[k-1]-x[k-2]))
fx[k] <- f(x[k])
n_evals <- k
if(abs(fx[k]) <= tol) break
}
# return
if(abs(fx[k]) > tol) warning("tolerance not achieved.")
list(
root = x[k], f.root = fx[k],
evals = data_frame(
x = x[1:n_evals],
fx = fx[1:n_evals],
method = c(rep("bisection", bisections), rep("secant", n_evals-bisections))
),
n_evals = n_evals
)
}
#' @export
#' @rdname unibisect
simple_unibisect <- function(f, a, b, tol = 10*.Machine$double.eps, maxit = 100L) {
# check endpoints and return early if root there
fa <- f(a)
if(abs(fa) <= tol) return(list(root = a, f.root = fa))
fb <- f(b)
if(sign(fa) == sign(fb)) stop("f(a) and f(b) must have opposite signs.")
if(abs(fb) <= tol) return(list(root = b, f.root = fb))
# compute first midpoint, return early if root found
c <- (a+b)/2; fc <- f(c)
if(abs(fc) < tol) return(list(root = c, f.root = fc))
# loop
for(k in 4:maxit) {
if (sign(fa) == sign(fc)) {
a <- c; fa <- fc
} else {
b <- c; fb <- fc
}
c <- (a+b)/2; fc <- f(c)
if(abs(fc) <= tol) break
}
# return
if(abs(fc) > tol) warning("tolerance not achieved.")
list(root = c, f.root = fc)
}
|
329635eac24f3c20566cd0e47a5500bf4c06851d | de97146cdd1331a2fe66108baebb1c38a3f43f3a | /example/02.Enrich/SGA/G5-VS-S5.down.R | 29ac0621d457703adf45baad4201899235d0cd7a | [
"MIT"
] | permissive | pengchy/EACO | 4824ee4e39bb4212fb790977a29c3d00df32f2d7 | 4f0e903f14186d260b6a1f2c15cec7178ab2b329 | refs/heads/master | 2020-06-04T03:27:13.112012 | 2015-08-07T02:04:22 | 2015-08-07T02:04:22 | 40,104,538 | 1 | 1 | null | 2015-08-03T05:16:51 | 2015-08-03T04:10:59 | null | UTF-8 | R | false | false | 1,220 | r | G5-VS-S5.down.R |
date()
source("/panfs/home/kang/yangpc/bin/EnrichPipeline/EACO_r20150201/bin/EnrichSGA.R")
source("/panfs/home/kang/yangpc/bin/EnrichPipeline/EACO_r20150201/bin/sort.data.frame.R")
source("/panfs/home/kang/yangpc/bin/EnrichPipeline/EACO_r20150201/bin/Fisher.Chi.test.R")
supplyID <- scan("/panfs/home/kang/yangpc/bin/EnrichPipeline/EACO_r20150201/example/02.Enrich/00.gstat/G5-VS-S5.down",what="character",sep="\n")
if(identical("NullFile","NullFile")){
res.dt <- EnrichSGA(gmt="/panfs/home/kang/yangpc/bin/EnrichPipeline/EACO_r20150201/example/01.prep/Kappa/gSets.gmt.filt.gmt.filtkappa.gmt",supplyID=supplyID,p.adjust.methods="fdr",
test.method="FisherChiSquare",enrichFile="/panfs/home/kang/yangpc/bin/EnrichPipeline/EACO_r20150201/example/02.Enrich//SGA///G5-VS-S5.down.difsga")
}else{
univerID <- scan("NullFile",what="character",sep="\n")
res.dt <- EnrichSGA(gmt="/panfs/home/kang/yangpc/bin/EnrichPipeline/EACO_r20150201/example/01.prep/Kappa/gSets.gmt.filt.gmt.filtkappa.gmt",supplyID=supplyID,univerID=univerID,p.adjust.methods="fdr",
test.method="FisherChiSquare",enrichFile="/panfs/home/kang/yangpc/bin/EnrichPipeline/EACO_r20150201/example/02.Enrich//SGA///G5-VS-S5.down.difsga")
}
date()
q('no')
|
45127d2281cfc04be7e018876141695aabffc691 | e0348679a3661bb43f6e08c6218a7b8802db8544 | /2_Script_Leitura_Base_git.R | 3362e2ccc3381e7970e216d1ebbc74219dee610c | [] | no_license | crfaccin/INMET | 1ebad662e73b18b1626cae6e02b4cefc55c42ac2 | 0cf403aafa00459ed97ca92880f8b3b2e0ae3dc1 | refs/heads/master | 2022-02-16T05:08:56.735284 | 2019-09-03T03:39:51 | 2019-09-03T03:39:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,078 | r | 2_Script_Leitura_Base_git.R | library(rvest)
library(tidyverse)
library(httr)
library(XML)
# Lista de estações do INMET
estacoes <- data.table::fread("Estacoes")
#lista_estacoes<-read.csv("estacoes.csv", header = TRUE, sep=";", skip=1)[,3]
# Função que extrai os dados do INMET a partir de um link
# O link deve ser do site do INMET
ler_inmet<-function(Site,Tratado=T){
url <-"http://www.inmet.gov.br/projetos/rede/pesquisa/inicio.php"
# Iniciando a sessão
pgsession <- html_session(url)
# Lista com os espaços para preenchimento
pgform <- html_form(pgsession)[[1]]
# Completar de acordo com o pgform
filled_form <- set_values(pgform,
"mCod" = "a",
"mSenha" = "b")
# Apertar o botão "entrar"
teste <- submit_form(pgsession,filled_form,'btnProcesso')
memberlist <- jump_to(pgsession, paste0(site))
# Extraindo informações da página
page <- read_html(memberlist)
dadosPag <- as.character(html_text(html_node(page,'pre')))
# A página retorna um txt em 4 partes, sendo:
# Primeira e Terceira: nada de importante
# Segunda: Estação, Latitude e Longitude
# Quarta: Dados temporais
dadosPag2 <- strsplit(dadosPag, "\n")
dadosPag3 <- strsplit(dadosPag,"\n--------------------\n")
# Extraindo e limpando os dados
dados <- read.table(text = dadosPag3[[1]][4], sep=";", header = TRUE)
dados12 <- dados %>% filter(Hora==1200) %>% select(-Hora)
dados0 <- dados %>% filter(Hora==0) %>% select(-Hora)
#Incluindo Estação, Latitude e Longitude
estacao <- strsplit(dadosPag2[[1]][4], ": |\\(")[[1]][2]
lat <- strsplit(dadosPag2[[1]][5], ": ")[[1]][2]
long <- strsplit(dadosPag2[[1]][6], ": ")[[1]][2]
alt <- strsplit(dadosPag2[[1]][7], ": ")[[1]][2]
dados <- cbind(dados,estacao,lat,long,alt)
gc()
return(dados)
}
site<-("http://www.inmet.gov.br/projetos/rede/pesquisa/gera_serie_txt.php?&mRelEstacao=82989&btnProcesso=serie&mRelDtInicio=01/01/1961&mRelDtFim=01/01/2016&mAtributos=,,1,1,,,,,,1,1,,1,1,1,1,")
dados<-ler_inmet(site)
|
ba4820fbd5fc0f2c1df07502f00e2e1fa564450b | 9e64931578c45424e26d9296abaa35b76d2e4d9f | /investment/data_prep/sample_4_flowermeasure.R | 95d1671c4dc38bb79cbb99abab12a7e1472bd97e | [] | no_license | dbuona/proterant | 246aee53781c184ecb9b00156928e677240b3298 | e7dbcd0b0e0153bed2a0b87b266111afd9a9fb51 | refs/heads/master | 2023-06-12T14:27:58.639503 | 2023-06-06T18:32:30 | 2023-06-06T18:32:30 | 80,126,531 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 926 | r | sample_4_flowermeasure.R | ### Explore the prunus data
rm(list=ls())
options(stringsAsFactors = FALSE)
graphics.off()
library(stringr)
library("ncdf4")
library(raster)
library(ggplot2)
library("brms")
library(dplyr)
library(purrr)
library(tidyr)
setwd("~/Documents/git/proterant/investment/input")
d<-read.csv("midwest_round1Dec11.csv")
d.measure<-dplyr::filter(d,bbch.f==65)
table(d.measure$specificEpithet)
nested_prun <- d.measure %>%
group_by(specificEpithet) %>% # prep for work by Species
nest() %>% # --> one row per Species
ungroup() %>%
mutate(n = c(30,30,30,8,30,30,30,6,30,30,30,7,
30))
print(nested_prun)
sampled_pruny <- nested_prun%>%
mutate(samp = map2(data, n, sample_n))
sampled_pruny<-sampled_pruny %>%
dplyr::select(-data) %>%
unnest(samp)
sampled_pruny2<-dplyr::select(sampled_pruny,specificEpithet,id,references)
write.csv(sampled_pruny2,"flowermeasures.csv", row.names = FALSE)
|
ad2eb94c0d617bd8bfa4f26357e11247acd64d12 | 1aa181d71afabca430b05437149e1a45ac35d5d5 | /testdata/create_testdata.R | ba05c997dc045a043870fc2e55f1fc092614ff69 | [] | no_license | maxplanck-ie/DGE-viz | fa49a354797cf44b71ff1acbc810fdcb1a787050 | b2660a325629496fcafa857c0a58f51bf4719157 | refs/heads/master | 2023-03-03T07:41:56.213459 | 2021-02-15T12:56:53 | 2021-02-15T12:56:53 | 155,186,623 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,208 | r | create_testdata.R | source('../helpers/helpers.R')
config0 = parse_yaml('../config.yaml')
# BiocManager::install("pasilla", lib = '/rstudio/rauer/Rlib_DGE-viz_3.6.0')
# BiocManager::install("DESeq2", lib = '/rstudio/rauer/Rlib_DGE-viz_3.6.0')
# BiocManager::install("edgeR", lib = '/rstudio/rauer/Rlib_DGE-viz_3.6.0')
lib.dir0 = config0['lib.path']
library(DESeq2, lib.loc = lib.dir0)
library(pasilla, lib.loc = lib.dir0)
library(dplyr, lib.loc = lib.dir0)
library(tibble, lib.loc = lib.dir0)
library(readr, lib.loc = lib.dir0)
library(edgeR, lib.loc = lib.dir0)
pasCts <- system.file("extdata",
"pasilla_gene_counts.tsv",
package="pasilla", mustWork=TRUE)
pasAnno <- system.file("extdata",
"pasilla_sample_annotation.csv",
package="pasilla", mustWork=TRUE)
cts <- as.matrix(read.csv(pasCts,sep="\t",row.names="gene_id"))
coldata <- read.csv(pasAnno, row.names=1)
coldata <- coldata[,c("condition","type")]
coldata$condition <- factor(coldata$condition)
coldata$type <- factor(coldata$type)
rownames(coldata) <- sub("fb", "", rownames(coldata))
all(rownames(coldata) %in% colnames(cts))
cts <- cts[, rownames(coldata)]
all(rownames(coldata) == colnames(cts))
# DESeq2
dds <- DESeqDataSetFromMatrix(countData = cts,
colData = coldata,
design = ~ condition)
featureData <- data.frame(gene=rownames(cts))
mcols(dds) <- DataFrame(mcols(dds), featureData)
mcols(dds)
dds = DESeq(dds)
tab0 = results(dds) %>% as.data.frame() %>% rownames_to_column()
write_tsv(tab0,'./Pasilla_testdata.DESeq2.tsv')
tab1 = lfcShrink(dds,coef="condition_untreated_vs_treated", type="normal") %>% as.data.frame() %>% rownames_to_column()
write_tsv(tab1,'./Pasilla_testdata.DESeq2_lfcShrink.tsv')
# edgeR
y <- DGEList(counts=cts,group=as.factor(coldata$type))
keep <- filterByExpr(y)
y <- y[keep,,keep.lib.sizes=FALSE]
y <- calcNormFactors(y)
design <- model.matrix(~as.factor(coldata$type))
y <- estimateDisp(y,design)
fit <- glmQLFit(y,design)
qlf <- glmQLFTest(fit,coef=2)
tab2 = topTags(qlf, n = Inf) %>% as.data.frame %>% rownames_to_column()
write_tsv(tab2,'./Pasilla_testdata.edgeR.tsv')
|
c6e0eb3bab0ea3ddbdb733b4cac2884ce2edecf3 | 42d2f0089120491588b857fbffd6a811ade40fd6 | /R/utils.r | aa5b0620249fc9cf8352cce9909f911fd15741c0 | [] | no_license | assaforon/bootkappas | 1429cea310b93fa71188d357b419c5281e21545d | cf5a6be79fe81794f8610d6137abdc6b94b16cb4 | refs/heads/master | 2021-01-15T11:58:23.564635 | 2017-09-15T22:33:05 | 2017-09-15T22:33:05 | 99,637,960 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 482 | r | utils.r | #'
#
#
genperms<-function(orig,B,strata=NULL,boots=TRUE)
{
if(!is.null(strata))
{
if(length(strata)!=length(orig)) stop("Length mismatch.\\n")
orig=paste(strata,orig) ### appends strata ID to individual ID to avoid ID aliasing
resort=unlist(lapply(split(orig,strata),identity)) ### rearranges obs in blocks by strata
return(replicate(B,sapply(split(orig,strata),sample,replace=boots)[match(orig,resort)]))
}
return(replicate(B,sample(orig,replace=boots)))
} |
4682a7670d4fae0620410376cb4e30827cfc5c84 | e2c990cc4e5f9f1d3f1eca68a237da8c78d6125a | /tests/testthat/test_ArraySchema.R | 5b3632ce23fa791352866fa2342c3629fbd73333 | [] | no_license | wrathematics/TileDB-R | 2e32727f3054bc303c4ed69796d453ceaf8ac34e | cfd461d92ce3b6a33e8c6946284c26a499084ae0 | refs/heads/master | 2020-03-15T08:01:40.849145 | 2018-05-03T19:52:45 | 2018-05-03T19:55:46 | 132,042,246 | 0 | 0 | null | 2018-05-03T19:46:30 | 2018-05-03T19:46:29 | null | UTF-8 | R | false | false | 2,840 | r | test_ArraySchema.R | library(tiledb)
context("tiledb::ArraySchema")
test_that("tiledb::ArraySchema default constructor works", {
ctx <- tiledb::Ctx()
d1 <- tiledb::Dim(ctx, domain=c(1L, 100L))
dom <- tiledb::Domain(ctx, c(d1))
a1 <- tiledb::Attr(ctx)
sch <- tiledb::ArraySchema(ctx, dom, c(a1))
expect_is(sch, "ArraySchema")
})
test_that("tiledb::ArraySchema default constructor arugment values are correct", {
ctx <- tiledb::Ctx()
d1 <- tiledb::Dim(ctx, domain = c(1L, 100L))
d2 <- tiledb::Dim(ctx, domain = c(1L, 100L))
dom <- tiledb::Domain(ctx, c(d1, d2))
a1 <- tiledb::Attr(ctx)
sch <- tiledb::ArraySchema(ctx, dom, c(a1))
# test domain
expect_is(tiledb::domain(sch), "Domain")
# test dimensions
ds <- tiledb::dimensions(sch)
expect_equal(length(ds), 2)
expect_is(ds[[1]], "Dim")
expect_is(ds[[2]], "Dim")
# test attrs
as <- tiledb::attrs(sch)
expect_equal(length(as), 1)
expect_is(as[[1]], "Attr")
# test that default R schema is COL_MAJOR
expect_equal(tiledb::cell_order(sch), "COL_MAJOR")
expect_equal(tiledb::tile_order(sch), "COL_MAJOR")
# test that the default R schema is dense
expect_false(is.sparse(sch))
})
test_that("tiledb::ArraySchema full constructor arugment values are correct", {
ctx <- tiledb::Ctx()
d1 <- tiledb::Dim(ctx, domain = c(1L, 100L))
d2 <- tiledb::Dim(ctx, domain = c(1L, 100L))
d3 <- tiledb::Dim(ctx, domain = c(1L, 100L))
dom <- tiledb::Domain(ctx, c(d1, d2, d3))
a1 <- tiledb::Attr(ctx, "attribute1", type = "FLOAT64")
a2 <- tiledb::Attr(ctx, "attribute2", type = "INT32")
sch <- tiledb::ArraySchema(ctx, dom, c(a1, a2),
cell_order = "ROW_MAJOR",
tile_order = "ROW_MAJOR",
coords_compressor = tiledb::Compressor("GZIP", 10),
offsets_compressor = tiledb::Compressor("ZSTD", 5),
sparse = TRUE)
# test domain
expect_is(tiledb::domain(sch), "Domain")
# test dimensions
ds <- tiledb::dimensions(sch)
expect_equal(length(ds), 3)
expect_is(ds[[1]], "Dim")
expect_is(ds[[2]], "Dim")
expect_is(ds[[3]], "Dim")
# test attrs
as <- tiledb::attrs(sch)
expect_equal(length(as), 2)
expect_equal(names(as), c("attribute1", "attribute2"))
expect_is(as[[1]], "Attr")
expect_is(as[[2]], "Attr")
expect_equal(tiledb::cell_order(sch), "ROW_MAJOR")
expect_equal(tiledb::tile_order(sch), "ROW_MAJOR")
compr <- tiledb::compressor(sch)
expect_equal(tiledb::compressor_type(compr[["coords"]]), "GZIP")
expect_equal(tiledb::compressor_level(compr[["coords"]]), 10)
expect_equal(tiledb::compressor_type(compr[["offsets"]]), "ZSTD")
expect_equal(tiledb::compressor_level(compr[["offsets"]]), 5)
expect_true(is.sparse(sch))
}) |
09eef3b5c30d8958828bff37e223550e60131c83 | 1c5e993681ab00b3eb698d05f84e1daf2b4723e9 | /man/sampleBxy.Rd | 19955dd7da885b8450a9af6cb77bd56640ab7c16 | [] | no_license | cran/EDISON | d4e9ecb9187ec33efa2276d00ca5b37a487159e1 | 9d3d3660629fe0ee9fa81697452c47369485fa3a | refs/heads/master | 2021-01-21T21:55:02.171225 | 2016-03-30T21:04:12 | 2016-03-30T21:04:12 | 17,678,892 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 777 | rd | sampleBxy.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sampleBxy.R
\name{sampleBxy}
\alias{sampleBxy}
\title{Sample regression coefficients.}
\usage{
sampleBxy(xi, y, Sig2, delta2)
}
\arguments{
\item{xi}{Response data.}
\item{y}{Target data.}
\item{Sig2}{Sigma squared.}
\item{delta2}{Signal-to-noise hyperparameter.}
}
\value{
The regression parameters.
}
\description{
This function samples the regression coefficients given the current state of
the MCMC simulation.
}
\author{
Sophie Lebre
}
\references{
For details of the regression model, see:
Dondelinger et al. (2012), "Non-homogeneous dynamic Bayesian networks with
Bayesian regularization for inferring gene regulatory networks with
gradually time-varying structure", Machine Learning.
}
|
0dc2979b472b21a6d8d435d459f942ea9de6918b | 7917fc0a7108a994bf39359385fb5728d189c182 | /cran/paws.security.identity/man/cognitoidentity_list_tags_for_resource.Rd | ac6bb2c7aa38738027e1207a1c801a20fe2a549a | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | true | 997 | rd | cognitoidentity_list_tags_for_resource.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cognitoidentity_operations.R
\name{cognitoidentity_list_tags_for_resource}
\alias{cognitoidentity_list_tags_for_resource}
\title{Lists the tags that are assigned to an Amazon Cognito identity pool}
\usage{
cognitoidentity_list_tags_for_resource(ResourceArn)
}
\arguments{
\item{ResourceArn}{[required] The Amazon Resource Name (ARN) of the identity pool that the tags are
assigned to.}
}
\value{
A list with the following syntax:\preformatted{list(
Tags = list(
"string"
)
)
}
}
\description{
Lists the tags that are assigned to an Amazon Cognito identity pool.
A tag is a label that you can apply to identity pools to categorize and
manage them in different ways, such as by purpose, owner, environment,
or other criteria.
You can use this action up to 10 times per second, per account.
}
\section{Request syntax}{
\preformatted{svc$list_tags_for_resource(
ResourceArn = "string"
)
}
}
\keyword{internal}
|
3678f3bd5d2788997658af2b490d505eba7f73f4 | 10c2dbb072e022a11a61f3a86d7f327cae85aa6e | /man/vep_region.Rd | 7c8684c8e58d1d5ab1a634ba3684a08c7c0c9f09 | [] | no_license | dwinter/rensembl | 3dda8f10637de87d1663b4a4771baddc2322c8f4 | 2225df9968dfb1ad8bc3c9c6d1cc8c950cd65453 | refs/heads/master | 2020-05-30T05:44:51.784053 | 2016-10-25T21:20:21 | 2016-10-25T21:22:56 | 29,898,025 | 2 | 3 | null | 2015-10-12T18:13:29 | 2015-01-27T04:55:32 | R | UTF-8 | R | false | true | 370 | rd | vep_region.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vep.r
\name{vep_region}
\alias{vep_region}
\title{Fetch variant consequences from regions}
\usage{
vep_region(region, species = "human", format = "json")
}
\description{
Fetch variant consequences from regions
}
\examples{
vep_region(c("21 26960070 rs116645811 G A", "21 26965148 - G A"))
}
|
c463e72e38d53ba9cb5167a3c8b7b36a06fb115a | 4a5266a00d51e309496527eea724a2133988d23a | /scripts/plots.R | 63b837cba5ec7fbfef38d599b6f769921d7757cb | [
"MIT"
] | permissive | clint-leach/optimal-design | b33522c2fc87fc4246d89cbff527509394b1e637 | 81e237dd365a135bb46f32bf39d92dd7c5104de1 | refs/heads/master | 2023-04-12T04:47:18.346349 | 2021-08-09T17:49:42 | 2021-08-09T17:49:42 | 277,606,855 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,684 | r | plots.R | library(magrittr)
library(plyr)
library(ggplot2)
library(patchwork)
library(reshape2)
# Reading in output from julia code
data <- readRDS("output/data.rds") %>%
arrange(sample(1:100, replace = FALSE)) %>%
mutate(lat = rep(1:10, 10),
lon = rep(1:10, each = 10),
y = as.numeric(y))
post <- readRDS("output/chain.rds") %>%
melt(varnames = c("loc", "iter"), value.name = "p") %>%
ddply(.(loc), summarise,
med = median(p),
lower = quantile(p, 0.025),
upper = quantile(p, 0.975)) %>%
join(data)
xopt <- data$x[which.min(data$score)]
dopt <- data$loc[which.min(data$score)]
# Plots ========================================================================
# Initial fit
post %>%
ggplot(aes(x, med)) +
geom_line() +
geom_ribbon(aes(min = lower, max = upper), alpha = 0.2) +
geom_point(data = subset(data, K1 == 1), aes(x, as.numeric(y), shape = factor(y)), size = 2) +
scale_shape_manual(values = c(1, 19), guide = FALSE) +
geom_vline(xintercept = xopt, linetype = 2) +
theme_classic() +
ylab("p") +
ggtitle("a") -> fig1a
# Initial sampling locations
data %>%
ggplot(aes(lon, lat, fill = x)) +
geom_raster() +
scale_fill_distiller(type = "div", palette = "BrBG") +
geom_point(aes(lon, lat, shape = factor(y)), data = subset(data, K1 == 1), size = 3) +
scale_shape_manual("y", values = c(1, 19)) +
theme_classic() +
scale_y_continuous("northing", expand = c(0, 0)) +
scale_x_continuous("easting", expand = c(0, 0)) +
theme(axis.text = element_blank()) +
ggtitle("b") -> fig1b
# Scores by x
data %>%
ggplot(aes(x, score)) +
geom_line(alpha = 0.5) +
geom_point(aes(color = score)) +
scale_color_viridis_c(option = "B", guide = FALSE) +
geom_vline(xintercept = xopt, linetype = 2) +
geom_rug(data = subset(data, K1 == 1), aes(x = x), inherit.aes = FALSE) +
ylab("design criterion") +
theme_classic() +
ggtitle("c") -> fig1c
# Map of scores
data %>%
ggplot(aes(lon, lat, fill = score)) +
geom_raster() +
scale_fill_viridis_c("design\ncriterion", option = "B") +
geom_point(aes(lon, lat, shape = factor(y)), data = subset(data, K1 == 1), size = 3, inherit.aes = FALSE) +
scale_shape_manual(values = c(1, 19), guide = FALSE) +
geom_point(aes(lon, lat), data = subset(data, loc == dopt), color = "white", shape = 4, size = 3) +
theme_classic() +
scale_y_continuous("northing", expand = c(0, 0)) +
scale_x_continuous("easting", expand = c(0, 0)) +
theme(axis.text = element_blank()) +
ggtitle("d") -> fig1d
pdf(file = "output/fig1.pdf", width = 7, height = 6)
fig1a + fig1b + fig1c + fig1d + plot_layout(ncol = 2)
dev.off()
|
ed1afbf396bdd92447019851c132a815a4bbe360 | 43418fa45f03b4c68c9e6f6fcefdec462105f6ee | /man/eV2L.Rd | 925fb5fd9a26de499d2c9e796e01182353cc2a65 | [] | no_license | cran/dielectric | 9e66a9271b93042cb0d31de23be8f496663ee687 | 6c0e1164e5ba3c1da87a9b014c701d1236cbb0de | refs/heads/master | 2020-04-06T03:31:31.574027 | 2012-03-04T00:00:00 | 2012-03-04T00:00:00 | 17,695,498 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 266 | rd | eV2L.Rd | \name{eV2L}
\alias{eV2L}
\title{eV2L}
\usage{
eV2L(energy)
}
\arguments{
\item{energy}{energy in eV}
}
\description{
Unit conversions
}
\details{
Unit conversions
}
\seealso{
Other conversion: \code{\link{L2eV}}, \code{\link{L2w}},
\code{\link{t2eV}}
}
|
a848a501e9d4844dfe3947be591f6d1bd5e4cd59 | 83bfc2ffa4b4e28c1c6ea877c204931980a3e99d | /reports/PCB_resampling/PCB_total_glasso_graph.R | a3ac3d66f001d68695c7b3f3b95b917d9d4e123b | [] | no_license | wal615/prime_project | 0d555626292a713d94700e565363681e2e2e514e | 8a85b47ecbcaf4419ca33588fd607019226bf3ca | refs/heads/master | 2022-07-04T20:58:33.789355 | 2020-05-05T20:13:16 | 2020-05-05T20:13:16 | 111,431,232 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,822 | r | PCB_total_glasso_graph.R | library(data.table)
library(tidyverse)
library(ggforce)
setwd("~/dev/projects/Chen_environmental_study/")
### fixed fixed
# lasso
load(file = "./result/PCB_resampling/simulation_result_list_fixed_fixed_total_glasso")
table_fixed_fixed <- rbindlist(result_list_fixed_fixed_total_glasso)
total_glasso <- table_fixed_fixed[true_total != 0, -c(2,4,6)]
plot_PCB_fixed_fixed_total_glasso <- as.list(numeric(3))
for (i in (1:3)){
plot_PCB_fixed_fixed_total_glasso[[i]] <- tidyr::gather(total_glasso, ends_with("total"), key = "method", value = "value") %>%
ggplot(., aes(x = method, y = value, fill = method)) +
geom_violin(alpha = 0.2) +
geom_boxplot(alpha = 0.7) +
facet_wrap_paginate(facets = vars(main_fixed, inter_fixed, pro, rho),
ncol = 5 , nrow = 3, scales = "free", labeller = "label_both", page = i)+
ggtitle("Total effect with glasso under main fixed and inter fixed") +
theme(plot.title = element_text(hjust = 0.5))
}
# svd
load(file = "./result/PCB_resampling/simulation_result_list_fixed_fixed_total_svd")
table_fixed_fixed <- rbindlist(result_list_fixed_fixed_total_svd)
main <- table_fixed_fixed[true_total != 0, -c(2,4,6)]
plot_PCB_fixed_fixed_total_svd <- tidyr::gather(main, ends_with("total"), key = "method", value = "value") %>%
ggplot(., aes(x = method, y = value, fill = method)) +
geom_violin(alpha = 0.2) +
geom_boxplot(alpha = 0.7) +
facet_wrap(facets = vars(main_fixed, inter_fixed, pro), ncol =3 , scales = "free", labeller = "label_both")+
ggtitle("Total effect with svd fixed main fixed and inter fixed") +
theme(plot.title = element_text(hjust = 0.5))
### fixed random
# lasso
load(file = "./result/PCB_resampling/simulation_result_list_fixed_random_total_glasso")
table_fixed_random <- rbindlist(result_list_fixed_random_total_glasso)
total_glasso <- table_fixed_random[true_total != 0, -c(2,4,6)]
plot_PCB_fixed_random_total_glasso <- as.list(numeric(3))
for (i in (1:3)){
plot_PCB_fixed_random_total_glasso[[i]] <- tidyr::gather(total_glasso, ends_with("total"), key = "method", value = "value") %>%
ggplot(., aes(x = method, y = value, fill = method)) +
geom_violin(alpha = 0.2) +
geom_boxplot(alpha = 0.7) +
facet_wrap_paginate(facets = vars(main_fixed, inter_fixed, pro, rho),
ncol = 5 , nrow = 3, scales = "free", labeller = "label_both", page = i)+
ggtitle("Total effect with glasso under main fixed and inter random") +
theme(plot.title = element_text(hjust = 0.5))
}
# svd
load(file = "./result/PCB_resampling/simulation_result_list_fixed_random_total_svd")
table_fixed_random <- rbindlist(result_list_fixed_random_total_svd)
main <- table_fixed_random[true_total != 0, -c(2,4,6)]
plot_PCB_fixed_random_total_svd <- tidyr::gather(main, ends_with("total"), key = "method", value = "value") %>%
ggplot(., aes(x = method, y = value, fill = method)) +
geom_violin(alpha = 0.2) +
geom_boxplot(alpha = 0.7) +
facet_wrap(facets = vars(main_fixed, inter_fixed, pro), ncol =3 , scales = "free", labeller = "label_both")+
ggtitle("Total effect with svd main fixed and inter random") +
theme(plot.title = element_text(hjust = 0.5))
### random random
# lasso
load(file = "./result/PCB_resampling/simulation_result_list_random_random_total_glasso")
table_random_random <- rbindlist(result_list_random_random_total_glasso)
total_glasso <- table_random_random[true_total != 0, -c(2,4,6)]
plot_PCB_random_random_total_glasso <- as.list(numeric(3))
for (i in (1:3)){
plot_PCB_random_random_total_glasso[[i]] <- tidyr::gather(total_glasso, ends_with("total"), key = "method", value = "value") %>%
ggplot(., aes(x = method, y = value, fill = method)) +
geom_violin(alpha = 0.2) +
geom_boxplot(alpha = 0.7) +
facet_wrap_paginate(facets = vars(main_fixed, inter_fixed, pro, rho),
ncol = 5 , nrow = 3, scales = "free", labeller = "label_both", page = i)+
ggtitle("Total effect with glasso under main fixed and inter fixed") +
theme(plot.title = element_text(hjust = 0.5))
}
#svd
load(file = "./result/PCB_resampling/simulation_result_list_random_random_total_svd")
table_random_random <- rbindlist(result_list_random_random_total_svd)
main <- table_random_random[true_total != 0, -c(2,4,6)]
plot_PCB_random_random_total_svd <- tidyr::gather(main, ends_with("total"), key = "method", value = "value") %>%
ggplot(., aes(x = method, y = value, fill = method)) +
geom_violin(alpha = 0.2) +
geom_boxplot(alpha = 0.7) +
facet_wrap(facets = vars(main_fixed, inter_fixed, pro), ncol =3 , scales = "free", labeller = "label_both")+
ggtitle("Total effect with svd main fixed and inter random") +
theme(plot.title = element_text(hjust = 0.5))
|
576a96b13fd8d1e83cc3452bc9e7864b9aefb6e2 | 1e9c9f2a9639db7cdb032aae69cb4d99aef1d3a5 | /dataCamp/openCourses/dataAnalysisAndStatisticalInference/4_foundationsForInferenceSamplingDistributions/5_question2.R | f839565215004b672a8713b8234c535615627b65 | [
"MIT"
] | permissive | sagarnikam123/learnNPractice | f0da3f8acf653e56c591353ab342765a6831698c | 1b3b0cb2cff2f478006626a4c37a99102acbb628 | refs/heads/master | 2023-02-04T11:21:18.211654 | 2023-01-24T14:47:52 | 2023-01-24T14:47:52 | 61,184,927 | 2 | 1 | MIT | 2022-03-06T11:07:18 | 2016-06-15T06:57:19 | Python | UTF-8 | R | false | false | 1,004 | r | 5_question2.R | # Question 2
#######################################################################################################################
#
# If we're interested in estimating the average living area of homes in Ames using the sample,
# our best single guess is the sample mean: mean(samp1).
#
# Depending on which 50 homes you selected, your estimate could be a bit above or a bit below the true
# population mean of approximately 1,500 square feet. In general, though, the sample mean turns out to be a pretty
# good estimate of the average living area, and we were able to get it by sampling less than 3% of the population.
#
# Suppose we took two more samples, one of size 100 and one of size 1000.
# Which would you think would provide a more accurate estimate of the population mean?
#
#######################################################################################################################
1 Sample size of 50
2 Sample size of 100
3 Sample size of 1000
Answer - 3 Sample size of 1000
|
0185c9957218f08ccfc423618b84819da50c323a | 02649b1fb964689f49e87c4c5ac906f17cd73760 | /man/GenerateSeriesToCutoff.Rd | 594bf570431d5e990be84140a46a707d33870d46 | [] | no_license | grieman/grieman | 02e4058e679d17610e6a28004fa044883c9390f3 | 6acf5cd5be335ad340fda40a01bf5a3480fcdced | refs/heads/master | 2021-01-18T17:54:44.693810 | 2017-07-31T20:30:54 | 2017-07-31T20:30:54 | 86,822,985 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 828 | rd | GenerateSeriesToCutoff.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GenerateSeriesToCutoff.R
\name{GenerateSeriesToCutoff}
\alias{GenerateSeriesToCutoff}
\title{GenerateSeriesToCutoff}
\usage{
GenerateSeriesToCutoff(N, mu, sigma, cutoff_point, starting_point)
}
\arguments{
\item{N}{Maximum length of series}
\item{mu}{mean}
\item{sigma}{variance}
\item{cutoff_point}{threshold at which to cut series}
\item{starting_point}{first point in generated series}
}
\value{
A vector of points
}
\description{
This function generates a random series with a set trend, and will cut the series as soon as it crosses a certain threshold. The series, p, is generated as follows: p[i - 1] * exp(rnorm(1, mu, sigma)
}
\examples{
GenerateSeriesToCutoff(100, .01, .005, 200, 100)
GenerateSeriesToCutoff(100, .1, .001, 100, 50)
}
|
742525a17c45c28bc37e6f8665df486d0acd3665 | 1bcbbf7a623d2605e4951096c6e6c78204b02dac | /orderbookVPIN.R | b38ee244393e8cc321cfdab04c0038df18d60cb5 | [] | no_license | qg0/rltrading | 5153bdf80680df3b32778de0cfb745ef2840be08 | f78045ec021bdc257e856dc926039276d97e47a5 | refs/heads/master | 2021-06-14T08:43:25.942764 | 2017-02-24T17:53:36 | 2017-02-24T17:53:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,553 | r | orderbookVPIN.R | ##VPIN
#install.packages('fasttime',repos='http://www.rforge.net/', type="source")
#install.packages("data.table")
require(data.table);
require(fasttime);
require(plyr)
# Now we have an xts data frame called 'stock' with a DateTime index and...
# two columns: Price and Volume
# Vbucket=Number of volume buckets in an average volume day (Vbucket=50)
VPIN=function(stock,Vbucket=50, period="seconds") {
stock$dP1=diff(stock[,'Price'],lag=1,diff=1,na.pad=TRUE)
ends=endpoints(stock,period)
timeDF=period.apply(stock[,'dP1'],INDEX=ends,FUN=sum)
timeDF$Volume=period.apply(stock[,'Volume'],INDEX=ends,FUN=sum)
Vbar=mean(period.apply(timeDF[,'Volume'],INDEX=endpoints(timeDF,'hours'),
FUN=sum))/Vbucket
timeDF$Vfrac=timeDF[,'Volume']/Vbar
timeDF$CumVfrac=cumsum(timeDF[,'Vfrac'])
timeDF$Next=(timeDF[,'CumVfrac']-floor(timeDF[,'CumVfrac']))/timeDF[,'Vfrac']
timeDF[timeDF[,'Next']<1,'Next']=0
timeDF$Previous=lag(timeDF[,'dP1'])*lag(timeDF[,'Next'])
timeDF$dP2=(1-timeDF[,'Next'])*timeDF[,'dP1'] + timeDF[,'Previous']
timeDF$Vtick=floor(timeDF[,'CumVfrac'])
timeDF[,'Vtick']=timeDF[,'Vtick']-diff(timeDF[,'Vtick']); timeDF[1,'Vtick']=0
timeDF=as.data.frame(timeDF); timeDF[,'DateTime']=row.names(timeDF)
timeDF=ddply(as.data.frame(timeDF),.(Vtick),last)
timeDF=as.xts(timeDF[,c('Volume','dP2','Vtick')],
order.by=fastPOSIXct(timeDF$DateTime,tz='GMT'))
timeDF[1,'dP2']=0
timeDF$sigma=rollapply(timeDF[,'dP2'],Vbucket,FUN=sd,fill=NA)
timeDF$sigma=na.fill(timeDF$sigma,"extend")
timeDF$Vbuy=Vbar*pnorm(timeDF[,'dP2']/timeDF[,'sigma'])
timeDF$Vsell=Vbar-timeDF[,'Vbuy']
timeDF$OI=abs(timeDF[,'Vsell']-timeDF[,'Vbuy'])
timeDF$VPIN=rollapply(timeDF[,'OI'],Vbucket,sum)/(Vbar*Vbucket)
timeDF=timeDF[,c('VPIN')]
return(timeDF)
}
fname<-"g:/TRADE/Data/research/"
setwd(fname)
fname<-c("tickorderbookSI07072015.RData")
load(fname)
if(!is.numeric(df[,2]))
for(i in 2 :ncol(df))
df[,i]<-as.numeric(levels(df[,i]))[df[,i]]
#Discrete data
dfts<-xts(x=df[,-1],order.by=df$datetime, unique=FALSE)
dfts<-dfts[!is.na(index(dfts))]
dfts<-dfts[, c("price", "volume")]
colnames(dfts)<-c("Price", "Volume")
out=VPIN(dfts,50)
vv<-merge(dfts, out)
vv$VPIN<-na.fill(vv$VPIN,"extend")
dfsec<-endpoints(dfts, "seconds")
vvsec<-vv[dfsec,]
vvsec$delta<-diff(vvsec$Price)
qplot(x=VPIN, y=abs(delta), data=vvsec, color=VPIN)
|
1ae709db3f8b1f5097751d7592a51d0b50086841 | f497fa661eac1e041e8516b3f07ba9c753a4a421 | /cachematrix.R | 6a5a9cfe23718e42d1a05e4a06b9835905ef5b0c | [] | no_license | brianddatascience/ProgrammingAssignment2 | 9ad16a4e1fa86a2487f0fe2b1ee4faca102f0c21 | d1734c7f7e30f40017b2013aa7fda369b84d1f0e | refs/heads/master | 2021-01-18T07:56:19.875871 | 2016-09-03T22:54:39 | 2016-09-03T22:54:39 | 67,312,352 | 0 | 0 | null | 2016-09-03T21:23:53 | 2016-09-03T21:23:52 | null | UTF-8 | R | false | false | 1,322 | r | cachematrix.R | ## These two functions implement a special version of
## inverting a non-singuler matrix in such a way as to
## cache the computation so that once performed, if the value is
## requested again, it will be returned having to recompute it.
## The function makeCacheMatrix takes the matrix we wish to invert
## and converts it to a list that appends additional functions
## to it which enable the caching.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## The function cacheSolve takes the special matix created by
## makeCacheMatrix and ckeck to see if the inverse already
## exists. If so, it returns the cached value (and notifies you).
## Else it computes the inverse.
##
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
76aad3b745383f9002627dd85e45eddd194543a5 | 43c419ef955c3cefc82642f6289e37948df582f1 | /tests/testthat/test-detect_entities.R | 18db2b5e5e2b1960fe32fc13ead67a7ee0099388 | [] | no_license | cloudyr/aws.comprehend | c3a50c8d20d74d673acdb849bc6271b6ba7e139b | d48c95542c28f268d67929c8ea828437a7a9d72b | refs/heads/master | 2023-06-07T22:16:02.674457 | 2020-03-18T14:58:34 | 2020-03-18T14:58:34 | 124,287,441 | 12 | 5 | null | 2023-05-31T17:07:53 | 2018-03-07T19:59:01 | R | UTF-8 | R | false | false | 1,098 | r | test-detect_entities.R | context("detect_entities")
body = get_request_body()
test_that("detect_entities works on single string", {
output <- with_mock(
comprehendHTTP = mock_comprehendHTTP,
detect_entities(text = body$single$Text,
language = body$single$LanguageCode)
)
expected <- read.table(sep="\t", text="
Index BeginOffset EndOffset Score Text Type
0 0 10 0.9999857 Jeff Bezos PERSON
0 23 26 0.6394255 CEO PERSON", header=TRUE, stringsAsFactors=FALSE)
expect_similar(output, expected)
})
test_that("detect_entities works on character vector", {
output <- with_mock(
comprehendHTTP = mock_comprehendHTTP,
detect_entities(text = body$batch$TextList,
language = body$batch$LanguageCode)
)
expected <- read.table(sep="\t", text="
Index BeginOffset EndOffset Score Text Type
0 0 10 0.9999857 Jeff Bezos PERSON
0 23 26 0.6394255 CEO PERSON
2 0 3 0.9972390 AWS ORGANIZATION
2 13 21 0.5615919 numerous QUANTITY", header=TRUE, stringsAsFactors=FALSE)
attr(expected, "ErrorList") <- list()
expect_similar(output, expected)
})
|
69ed2e478661444ae8e2c187fc9e5cc181a9ef6a | 0ef1a314914e88740dbe33b248a552a57c0b261d | /MBQhsi/R/DEgd.R | 986bb09530e227e7ffec148f6c9c2bd0fcb3cbea | [] | no_license | rocrat/MBQ_Package | 845d25faed797835d916ed646496f26f78254521 | b8c4f978fce36cfd3deb5cb2604372b00bf68e15 | refs/heads/master | 2021-01-21T13:25:37.952739 | 2016-05-17T19:13:40 | 2016-05-17T19:13:40 | 53,088,771 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 464 | r | DEgd.R | #' @title Dr. Dave Ellis Grass Diversity
#'
#' @description Calculates the partial HSI given the grass diversity.
#'
#' @param x Grass Diversity measured as the total number of both annual and perennial grass species on a given home range throughout the year.
#'
#' @return Returns the relative HSI value
#'
#' @usage GD.Ellis(x)
#' @export
#' @name GD.Ellis
#' @author Dominic LaRoche
#'
#'
GD.Ellis <- function(x){
s <- pgamma(x, 22.5, rate = 1)
return(s)
}
|
d4e222f1fb32c44bfab5f7743f8932564d18c0ad | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/rtweet/examples/tweets_with_users.Rd.R | f6cd2f34464e6ce0caf01fb32686742d8d7de09d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 590 | r | tweets_with_users.Rd.R | library(rtweet)
### Name: tweets_with_users
### Title: Parsing data into tweets/users data tibbles
### Aliases: tweets_with_users users_with_tweets
### ** Examples
## Not run:
##D ## search with parse = FALSE
##D rt <- search_tweets("rstats", n = 500, parse = FALSE)
##D
##D ## parse to tweets data tibble with users data attribute object
##D tweets_with_users(rt)
##D
##D ## search with parse = FALSE
##D usr <- search_users("rstats", n = 300, parse = FALSE)
##D
##D ## parse to users data tibble with users data attribute object
##D users_with_tweets(usr)
##D
## End(Not run)
|
f2bc86717513e41b0e12f437aa8d7c38b953e5b2 | 9667a66174faccda6a378d03ebd2cccb46c199ae | /mapper1.r | d85fc0aefbb3e37189c00e883b7d282c16f6037d | [] | no_license | regstrtn/hadoop | ae72fa491d9b5ffa655622593ae649950cd2292f | 20ef146285d805a3c9ee09456031cd1c1161f8f9 | refs/heads/master | 2021-06-09T16:39:09.438672 | 2016-12-13T09:07:37 | 2016-12-13T09:07:37 | 72,678,189 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,092 | r | mapper1.r | #!/usr/bin/env Rscript
# wc_mapper.r
#args = commandArgs(trailingOnly = T)
args = c(10)
input = file("stdin", "r")
df = data.frame(title = character(), id = character(), numtags = numeric(), stringsAsFactors =F)
anyna = function(fields) {
for(i in 1:length(fields)) {
if(is.na(fields[i]) || fields[i]=="") return (1)
}
return (0)
}
while(length(line <- readLines(input, n = 1, warn = F))) {
fields = unlist(strsplit(line, ",", fixed = T))
if(anyna(fields)) next #Check whether any value is na
artistlist = fields[1]
taglist = fields[2]
title = fields[3]
songid = fields[4]
tags = unlist(strsplit(taglist, ";", fixed = T))
artists = unlist(strsplit(artistlist, ";", fixed = T))
numtags = length(tags)
numartists = length(artists)
if(numtags>args[1]) cat(title, "\t", songid, "\t", numtags, "\n")
}
# newrow = data.frame(title = title, id = songid, numtags = numtags, stringsAsFactors = F)
# df = rbind(df, newrow)
#df = df[order(-df$numtags),]
#for(i in 1:nrow(df)) {
# if(df$numtags[i]>args[1]) cat(df$title[i],"\t", df$id[i],"\t", df$numtags[i],"\n")
#}
|
b69fd3a820ca6d665bf9cf7ff5df42a6f30b4aaf | 83bce7e127191950acd99126225b513a5f63a5f4 | /data_prep.R | 0ca2865cb226795c8cb5cb77f525b3b7b365cab2 | [] | no_license | Yi5117/Stat-517-Statistical-Learning-and-Predictive-Modeling | 62e25dac484573c9f9008a6cc9160b0d94123176 | ac4d2b026c34dfa1a981fd2eaee2b5ce1e120628 | refs/heads/master | 2020-04-05T06:03:21.943530 | 2018-12-11T20:19:59 | 2018-12-11T20:19:59 | 156,623,722 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 994 | r | data_prep.R | library(dplyr)
library(reshape2)
read.train <- function(){
#Load train data
cls <- c('factor','factor','Date','numeric','logical') #Classes for Store, Dept, Date, Weekly_Sales, isHoliday
train<- read.csv(file='data/train.csv',colClasses = cls)
train<-tbl_df(train)
}
read.test <- function(){
#Load test data
cls <- c('factor','factor','Date','logical') #Classes for Store, Dept, Date, isHoliday
test<- read.csv(file='data/test.csv',colClasses = cls)
test<- tbl_df(test)
}
reshape.by.stores <- function(train){
#Reshape the train data into a matrix containing the weekly sales for each store
#This is preparation required for time series clustering
#Input: Train dataset which contain multiple rows x 4 column variables
#Output: Matrix of 143 weekly sales observations x 45 stores
store.matrix <- dcast(train,formula=Date~Store,value.var = "Weekly_Sales",fun.aggregate = sum)
store.matrix <- tbl_df(store.matrix)
return(store.matrix)
}
|
96197269d404afa32d1573aecd5284c619ffcf3b | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/CoxBoost/examples/estimPVal.Rd.R | 2efd5173b0b937c6d17e7ba4d598cd4cce40bcb7 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,056 | r | estimPVal.Rd.R | library(CoxBoost)
### Name: estimPVal
### Title: Estimate p-values for a model fitted by CoxBoost
### Aliases: estimPVal
### Keywords: models regression survial
### ** Examples
## Not run:
##D # Generate some survival data with 10 informative covariates
##D n <- 200; p <- 100
##D beta <- c(rep(1,10),rep(0,p-10))
##D x <- matrix(rnorm(n*p),n,p)
##D real.time <- -(log(runif(n)))/(10*exp(drop(x %*% beta)))
##D cens.time <- rexp(n,rate=1/10)
##D status <- ifelse(real.time <= cens.time,1,0)
##D obs.time <- ifelse(real.time <= cens.time,real.time,cens.time)
##D
##D # Fit a Cox proportional hazards model by CoxBoost
##D
##D cbfit <- CoxBoost(time=obs.time,status=status,x=x,stepno=100,
##D penalty=100)
##D
##D # estimate p-values
##D
##D p1 <- estimPVal(cbfit,x,permute.n=10)
##D
##D # get a second vector of estimates for checking how large
##D # random variation is
##D
##D p2 <- estimPVal(cbfit,x,permute.n=10)
##D
##D plot(p1,p2,xlim=c(0,1),ylim=c(0,1),xlab="permute 1",ylab="permute 2")
## End(Not run)
|
59ad543f78da1666323fcbbfbac1d95bcb0600f6 | 636aafe5cef2627f72b317675574ba69ad57337e | /groupe-006/R/stat.R | cce765aad07ff6007b9223bdf71901a84c79b775 | [
"MIT"
] | permissive | RemiFELIN/Projet-R-Gun-violence-data | 1062536e72a0b21de29b2188752381ee19f03559 | cd7b88249810aede8350d0e82c97c0bac744f3bf | refs/heads/master | 2022-03-26T02:40:02.082944 | 2020-01-07T14:13:16 | 2020-01-07T14:13:16 | 222,500,687 | 0 | 2 | null | null | null | null | ISO-8859-1 | R | false | false | 4,902 | r | stat.R | # STATISTIQUES ET TESTS D'HYPOTHESES
#
#
# import libraries
library(knitr)
library(dplyr)
library(readr)
library(ggplot2)
library(tibble)
library(stringr)
library(gridExtra)
library(scales)
library(lubridate)
library(ggrepel)
library(leaflet)
library(rgdal)
library(tibble)
library(purrr)
library(splitstackshape)
library(PerformanceAnalytics)
library(tidyr)
library(corrplot)
library(lubridate)
#Choose "dataset_df1.csv" : you can generate it with "treatment_data.R" on 'PRE' folder
df1 = read.csv(file.choose(), header=TRUE,stringsAsFactors = FALSE, na.strings=c("NA", ""))
#Prenons les datas df1 avec les colonnes utiles
data <- subset(df1, select = c(n_killed, n_injured, state, participant_age, date))
#et prenons les datas recueillis en 2018 (01-01-2018 -> 31-12-2018)
data_2018 <- subset(data, data$date >= as.Date('2018-01-01') & data$date <= as.Date('2018-03-31'))
#Pour la période suivante : 01-01-2013 -> 31-12-2017
data_With_Periode <- subset(data, data$date >= as.Date('2013-01-01') & data$date <= as.Date('2017-12-31'))
#Etudions plus en détail ce jeu de données
summary(data_With_Periode)
#a voir si on peut améliorer cette partie
#Considérons l'échantillon suivant : data_2018
summary(data_2018)
# Si nous comparons les résultats, nous remarquons les choses suivantes :
# H0 NB MORT : Le nombre de morts obtenus en 2018 a permis de modifier la moyenne de mort
# HO NB BLESSE : Le nombre de blesses obtenus en 2018 a permis de modifier la moyenne de blesses
# HO AGE PARTICIPANT : L'age des participants releves en 2018 a permis de modifier la moyenne d'age
#
# Pour les variables relatives aux nombres de mort, blesses et l'age des protagonistes : nous effectuerons
# un test bilatérale afin de valider ou non H0
#
# Etablissons nos fonctions au préalable, elles faciliterons le traitement et ce sera plus lisible
test <- function(mu, n, xbar, sigma) {
res = abs(xbar-mu)/(sigma/sqrt(n))
return(res)
}
p_value <- function(alpha, ddl) {
pval = qt(alpha, df=ddl, lower.tail = FALSE)
return(pval)
}
sigma_value <- function(data) {
res = var(data)
return(res)
}
###################
# #
# DEBUT DES TESTS #
# #
###################
#data fixes
#taille de l'echantillon
n <- length(data_2018)
# On trouve notre valeur de comparaison (P-value) avec alpha = 5%
tPVAL <- p_value(0.05, n-1)
#############################################################
#
## Pour H0 NB DE MORTS
#
#d'après les resultats obtenus, on a :
mu_NbMort <- mean(data_2018$n_killed)
Xbar_NbMort <- mean(data_With_Periode$n_killed)
sigma_NbMort <- sigma_value(data_2018$n_killed)
# On applique notre test avec les données obtenues
t_NbMort <- test(mu_NbMort, n, Xbar_NbMort, sigma_NbMort)
# On compare et on regarde le résultat
if(t_NbMort >= -tPVAL && t_NbMort <= tPVAL) {
cat("pour alpha = 5%, on a t_nbMort (", t_NbMort ,") appartenant à l'intervalle [",
-tPVAL , ";", tPVAL, "]\n-> Ainsi, on ne peut pas refuser H0\n")
} else {
cat("pour alpha = 5%, on a t_nbMort (", t_NbMort ,") n'appartenant pas à l'intervalle [",
-tPVAL , ";", tPVAL, "]\n-> Ainsi,on rejette H0\n")
}
#############################################################
#
## Pour H0 NB DE BLESSES
#
#d'après les resultats obtenus, on a :
mu_NbBlesses <- mean(data_2018$n_injured)
Xbar_NbBlesses <- mean(data_With_Periode$n_injured)
sigma_NbBlesses <- sigma_value(data_2018$n_injured)
# On applique notre test avec les données obtenues
t_NbBlesses <- test(mu_NbBlesses, n, Xbar_NbBlesses, sigma_NbBlesses)
# On compare et on regarde le résultat
if(t_NbBlesses >= -tPVAL && t_NbBlesses <= tPVAL) {
cat("pour alpha = 5%, on a t_nbMort (", t_NbBlesses ,") appartenant à l'intervalle [",
-tPVAL , ";", tPVAL, "]\n-> Ainsi, on ne peut pas refuser H0\n")
} else {
cat("pour alpha = 5%, on a t_nbMort (", t_NbBlesses ,") n'appartenant pas à l'intervalle [",
-tPVAL , ";", tPVAL, "]\n-> Ainsi,on rejette H0\n")
}
#############################################################
#
## Pour l'age des participants
#
#d'après les resultats obtenus, on a :
mu_Age <- mean(data_2018$participant_age)
Xbar_Age <- 29.63 # mean(data_With_Periode$participant_age) la cmd ne marche pas ... à voir
#tmp : on la saisie à la main
sigma_Age <- sigma_value(data_2018$participant_age)
# On applique notre test avec les données obtenues
t_Age <- test(mu_Age, n, Xbar_Age, sigma_Age)
# On compare et on regarde le résultat
if(t_Age >= -tPVAL && t_Age <= tPVAL) {
cat("pour alpha = 5%, on a t_nbMort (", t_Age ,") appartenant à l'intervalle [",
-tPVAL , ";", tPVAL, "]\n-> Ainsi, on ne peut pas refuser H0\n")
} else {
cat("pour alpha = 5%, on a t_nbMort (", t_Age ,") n'appartenant pas à l'intervalle [",
-tPVAL , ";", tPVAL, "]\n-> Ainsi,on rejette H0\n")
}
############################################################# |
8d3974854fefe914367cc030d71257355429b858 | 72b398fdaa9301a8876e7f6095f3572eb412e5b9 | /analysis/names_special_characters.R | fbc709cf75cc1a0daf7f654fa4ff4d04970f988d | [] | no_license | mariariveraaraya/PhD_render | 37c9a9a4d9e1a453d07a356e7f352f2ffe194d80 | 168834f1d446f9b027ce54244f86c3462ba1c4ef | refs/heads/master | 2023-08-29T12:15:02.113638 | 2021-10-04T05:30:31 | 2021-10-04T05:30:31 | 413,006,724 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 788 | r | names_special_characters.R | # reproducible data
plates <- data.frame(WatexCl = rnorm(100), ConcuM = rnorm(100), Depth = rnorm(100))
# alter the default plot margins so the
# superscript in the y-axis label is completely displayed
par(mar=c(5,5,4,2))
# draw the plot
plot(WatexCl ~ ConcuM, data = plates,
col = as.numeric(1),
pch = as.numeric(Depth),
xlab = bquote("Concentration Cl ("*mu~"moles/g dry wt)"),
ylab = bquote("Average Conc of S- on plates ("~mu~"Moles/cm"^"2"*")"))
alpha = rnorm(1e3)
hist(alpha,cex.main=2,cex.axis=1.2,cex.lab=1.2,main=NULL )
title <- list( bquote( paste( "Histogram of " , hat(mu) ) ) ,
bquote( paste( "Bootstrap samples, Allianz" ) ) )
mtext(do.call(expression, title ),side=3, line = c(1,-1) , cex = 2 )
w <- readline()
a=3+2
log(-20)
|
e8f658b1152e80361c86b18156b7950f51d65d45 | 0aadcf7d61193d1a2405370cbad3764f565cdf3e | /man/get_worldclim.Rd | 02f469616c07d4a42b113daed36a1b06c3ff154a | [] | no_license | rsh249/vegdistmod | da662a75579ab32b7b462aa2e1547ae9b8caac61 | cc4e5f0c31fa1ef7d53e8127f0a57001c1b04403 | refs/heads/master | 2021-01-17T10:01:22.940779 | 2019-01-28T16:45:12 | 2019-01-28T16:45:12 | 59,592,035 | 4 | 0 | null | 2016-10-19T19:21:41 | 2016-05-24T16:58:17 | R | UTF-8 | R | false | true | 1,002 | rd | get_worldclim.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_climate_data.R
\name{get_worldclim}
\alias{get_worldclim}
\title{Download WorldClim data for modern or paleo models.}
\usage{
get_worldclim(period = "cur", model = "", version = "1_4",
varset = "bio", res = 2.5)
}
\arguments{
\item{period}{A string. Either 'cur', 'midholo', or 'lgm'.}
\item{model}{For paleo models. Which to use (i.e., 'ccsm4'). See http://worldclim.org/paleo-climate1 for options.}
\item{version}{Either '1_4', or '2.0'}
\item{varset}{Either 'bio', 'tmean', 'tmin', 'tmax', or 'prec'.}
\item{res}{What spatial resolution? '10', '2.5' arcmin, or '30' seconds. (options are '10', '2.5', '30').}
}
\description{
This function requests climate raster objects from www.worldclim.org,
downloads the files, reads them into R and disposes of the original files.
}
\examples{
\dontrun{
#get 10 minute mean monthly temperature grids.
abies <- get_worldclim(period='cur', varset = 'tmean', res=10);
}
}
|
b05682bb1df95f5ce267d65ee08cd13802e2ecf8 | 6464efbccd76256c3fb97fa4e50efb5d480b7c8c | /cran/paws.end.user.computing/man/workdocs_deactivate_user.Rd | f32a87b61da0af0031183d71c4dc943874fe8985 | [
"Apache-2.0"
] | permissive | johnnytommy/paws | 019b410ad8d4218199eb7349eb1844864bd45119 | a371a5f2207b534cf60735e693c809bd33ce3ccf | refs/heads/master | 2020-09-14T23:09:23.848860 | 2020-04-06T21:49:17 | 2020-04-06T21:49:17 | 223,286,996 | 1 | 0 | NOASSERTION | 2019-11-22T00:29:10 | 2019-11-21T23:56:19 | null | UTF-8 | R | false | true | 797 | rd | workdocs_deactivate_user.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workdocs_operations.R
\name{workdocs_deactivate_user}
\alias{workdocs_deactivate_user}
\title{Deactivates the specified user, which revokes the user's access to
Amazon WorkDocs}
\usage{
workdocs_deactivate_user(UserId, AuthenticationToken)
}
\arguments{
\item{UserId}{[required] The ID of the user.}
\item{AuthenticationToken}{Amazon WorkDocs authentication token. Do not set this field when using
administrative API actions, as in accessing the API using AWS
credentials.}
}
\description{
Deactivates the specified user, which revokes the user\'s access to
Amazon WorkDocs.
}
\section{Request syntax}{
\preformatted{svc$deactivate_user(
UserId = "string",
AuthenticationToken = "string"
)
}
}
\keyword{internal}
|
88fbe2aa4790b667ad8921201cb6f2b871010417 | 1ad0d4313389f1c4dd6abd6dbf4b3d78b9944e25 | /scripts/AG_2_5_PPI_network.R | a124747ce6e1717a0f31a7d736e4f5d3f7903918 | [] | no_license | ag1805x/EDTOX | ce996813d7630740f0ed5b08919f405081b074b7 | 2107df6d815a69f7a52e750ea994a09fb15700a2 | refs/heads/master | 2023-01-31T01:04:58.143025 | 2022-10-18T06:04:58 | 2022-10-18T06:04:58 | 548,797,477 | 0 | 0 | null | 2022-10-10T07:41:08 | 2022-10-10T07:41:07 | null | UTF-8 | R | false | false | 1,202 | r | AG_2_5_PPI_network.R | # Updates:
# (1) Updated STRIGdb versio from 10 to 11.5
# (2) Due to major update in STRINGdb, only combined scores are now available and the threshold also need to be changed
library(STRINGdb)
library(igraph)
library(org.Hs.eg.db)
combscore<-0.85 #based on pareto
# string_db<-STRINGdb$new(version="10",species=9606)
string_db<-STRINGdb$new(version="11.5",species=9606)
entz_id<-data.frame(entz_id=keys(org.Hs.egGENENAME),stringsAsFactors = F)
string_ids<-string_db$map(my_data_frame=entz_id,my_data_frame_id_col_names='entz_id',removeUnmappedRows=T)
string_inter<-string_db$get_interactions(string_ids$STRING_id)
# string_inter<-string_inter[,-grep('coexpression',names(string_inter))]
# sel_inter<-1-(string_inter[,3:(ncol(string_inter)-1)]/1000)
# string_inter$combined_score<-1-Reduce('*',sel_inter)
edge_list<-string_inter[string_inter$combined_score>=combscore,1:2]
edge_list<-merge(edge_list,string_ids,by.x='from',by.y='STRING_id')
edge_list<-merge(edge_list,string_ids,by.x='to',by.y='STRING_id')[,3:4]
colnames(edge_list)<-c('Node.1','Node.2')
gr<-graph_from_edgelist(as.matrix(edge_list),directed=F)
save(gr,file='outputData/network/ppi_network.RData')
|
40cd9723f01639eb3cc418deb0eb2fc60e8290c7 | 0982fb077cd59436811077b505d4377f7c3a3311 | /R/view_dr_surv.r | fcad4528523342537c6471e6003ae221cfa6a43f | [] | no_license | teazrq/orthoDr | c87a851477b84693b81905423cc7db32659dd795 | b009267a5b0124008711f556f1642f9fb880a5f2 | refs/heads/master | 2023-07-06T23:52:36.724019 | 2022-07-19T21:14:17 | 2022-07-19T21:14:17 | 106,500,687 | 7 | 8 | null | 2023-06-22T04:16:27 | 2017-10-11T03:24:50 | C++ | UTF-8 | R | false | false | 4,309 | r | view_dr_surv.r | #' 2D or 2D view of survival data on reduced dimension
#'
#' Produce 2D or 3D plots of right censored survival data based on a given
#' dimension reduction space
#'
#' @param x A `matrix` or `data.frame` for features (continuous only).
#' The algorithm will not scale the columns to unit variance
#' @param y A `vector` of observed time
#' @param censor A `vector` of censoring indicator
#' @param B The dimension reduction subspace, can only be 1 dimensional
#' @param bw A Kernel bandwidth (3D plot only) for approximating the
#' survival function, default is the Silverman's formula
#' @param FUN A scaling function applied to the time points `y`.
#' Default is `"log"`.
#' @param type `2D` or `3D` plot
#' @param legend.add Should legend be added (2D plot only)
#' @param xlab x axis label
#' @param ylab y axis label
#' @param zlab z axis label
#'
#' @return
#'
#' An `rgl` object that is rendered.
#'
#' @export
#'
#' @importFrom grDevices rainbow
#' @importFrom graphics legend par plot plot.new
#' @importFrom rgl axis3d mtext3d box3d surface3d
#' @importFrom plot3D mesh
#'
#' @references
#' Sun, Q., Zhu, R., Wang, T., & Zeng, D. (2019). Counting process-based dimension reduction methods for censored outcomes. Biometrika, 106(1), 181-196.
#' DOI: \doi{10.1093/biomet/asy064}
#'
#' @examples
#' # generate some survival data
#' N <- 100
#' P <- 4
#' dataX <- matrix(rnorm(N * P), N, P)
#' Y <- exp(-1 + dataX[, 1] + rnorm(N))
#' Censor <- rbinom(N, 1, 0.8)
#'
#' orthoDr.fit <- orthoDr_surv(dataX, Y, Censor, ndr = 1, method = "dm")
#' view_dr_surv(dataX, Y, Censor, orthoDr.fit$B)
view_dr_surv <- function(x, y, censor, B = NULL, bw = NULL, FUN = "log", type = "2D", legend.add = TRUE, xlab = "Reduced Direction", ylab = "Time", zlab = "Survival") {
if (!is.matrix(x)) stop("x must be a matrix")
if (!is.numeric(x)) stop("x must be numerical")
if (nrow(x) != length(y) | nrow(x) != length(censor)) stop("Number of observations do not match")
if (is.null(B)) stop("B must be given")
if (length(B) != ncol(x)) stop("Dimension of B does not match x")
FUN <- match.fun(FUN)
y2 <- FUN(y)
bx <- x %*% B
if (type == "2D") {
plot.new()
par(mar = c(4, 4.2, 2, 2))
plot(bx, y2, col = ifelse(censor == 1, "blue", "red"), pch = ifelse(censor == 1, 19, 3), xlab = xlab, ylab = ylab, cex.lab = 1.5)
if (legend.add) legend("topright", c("failure", "censored"), pch = c(19, 3), col = c("blue", "red"), cex = 1.5)
}
if (type == "3D") {
if (is.null(bw)) {
bw <- silverman(1, length(bx)) * sd(bx)
}
if (!is.numeric(bw)) {
warning("bw must be a number")
bw <- silverman(1, length(bx)) * sd(bx)
}
timegrid <- sort(unique(c(y2, seq(0, max(y2), length.out = 100))))
xgrid <- seq(min(bx), max(bx), length.out = 100)
S <- matrix(NA, length(xgrid), length(timegrid))
for (i in 1:nrow(S))
{
dif <- xgrid[i] - bx
k <- exp(-0.5 * (dif / bw)^2)
fit <- survfit(Surv(y2, censor) ~ 1, weights = k)
S[i, ] <- summary(fit, times = timegrid)$surv
}
M <- mesh(xgrid, timegrid)
colorlut <- rainbow(102, start = 0.1)
yscale <- max(y2) - min(y2)
xscale <- max(bx) - min(bx)
surface3d(M$x / xscale, M$y / yscale * 1.5, S, col = colorlut[S * 100 + 1], alpha = 0.9, theta = 50, phi = 20, labels = c("x", "y", "z"))
box3d(expand = 1.1, draw_front = FALSE)
axis3d(
edge = "x-+", at = seq(min(bx), max(bx), length.out = 6) / xscale,
labels = round(seq(min(bx), max(bx), length.out = 6), 2),
tick = TRUE, line = 0, nticks = 5, cex = 1.5, adj = c(0, 0.75)
)
axis3d(
edge = "y+", at = seq(0, max(y2) - min(y2), length.out = 6) / yscale,
labels = round(seq(0, max(y2) - min(y2), length.out = 6), 2),
tick = TRUE, line = 0, nticks = 6, cex = 1.5, adj = c(0, -0.25)
)
axis3d(
edge = "z+", at = seq(0, 1, 0.2), labels = seq(0, 1, 0.2),
tick = TRUE, line = 1, nticks = 5, cex = 1.5, adj = 0
)
mtext3d(text = xlab, edge = "x-+", line = 2, cex = 1.5)
mtext3d(text = ylab, edge = "y+-", line = 1.5, cex = 1.5)
mtext3d(text = zlab, edge = "z+", line = 2, cex = 1.5)
}
}
|
96df89515670168a4ceb7b11f54492e20b475490 | 6464c0bf7dd71c0b46e31585ce37c1861d2e1b25 | /Animation.R | f3733e5bf654149037d5c4274b07d14e913e019f | [] | no_license | quantumesx/TadroSimulation | 950fa4cb5b746eec951a7922526dcded54aaf82b | ec5fbf88c082c72de6e2c6d6d32ef810d4e62774 | refs/heads/master | 2021-06-18T15:31:39.911431 | 2017-06-27T15:37:04 | 2017-06-27T15:37:04 | 94,559,011 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,156 | r | Animation.R | ### Animation
results
animate.tadro(results)
animate.tadro <- function(results){
for(i in 1:n.iteration){
# creating a name for each plot file with leading zeros
if (i < 10) {name = paste('000',i,'plot.png',sep='')}
if (i < 100 && i >= 10) {name = paste('00',i,'plot.png', sep='')}
if (i >= 100) {name = paste('0', i,'plot.png', sep='')}
#saves the plot as a .png file in the working directory
png(name)
plot <- ggplot(results[i,], aes(x = tadro.x, y = tadro.y)) +
geom_point() +
geom_segment(aes(xend = head.x, yend = head.y),
arrow = arrow(length=unit(0.1, "cm"), ends = "last", type = "closed"), color = "green") +
geom_path(data = pool, aes(x = xx, y = yy)) +
geom_segment(data = LShed.mark, aes(x = x1, y = y1, xend = x2, yend = y2)) +
geom_segment(data = RShed.mark, aes(x = x1, y = y1, xend = x2, yend = y2)) +
geom_path(data=light, aes(x = xx, y = yy), color = "orange") +
labs(title = paste("Network ID:", Id, sep = " "),
subtitle = paste("iteration =", i, sep = "")) +
theme_void()
dev.off()
}
}
### Animation
results
for(i in 1:n.iteration){
# creating a name for each plot file with leading zeros
if (i < 10) {name = paste('000',i,'plot.png',sep='')}
if (i < 100 && i >= 10) {name = paste('00',i,'plot.png', sep='')}
if (i >= 100) {name = paste('0', i,'plot.png', sep='')}
#saves the plot as a .png file in the working directory
png(name)
ggplot(results[i,], aes(x = tadro.x, y = tadro.y)) +
geom_point() +
geom_segment(aes(xend = head.x, yend = head.y),
arrow = arrow(length=unit(0.1, "cm"), ends = "last", type = "closed"), color = "green") +
geom_path(data = pool, aes(x = xx, y = yy)) +
geom_segment(data = LShed.mark, aes(x = x1, y = y1, xend = x2, yend = y2)) +
geom_segment(data = RShed.mark, aes(x = x1, y = y1, xend = x2, yend = y2)) +
geom_path(data=light, aes(x = xx, y = yy), color = "orange") +
labs(title = paste("Network ID:", Id, sep = " "),
subtitle = paste("iteration =", i, sep = "")) +
theme_void()
dev.off()
}
|
2c7f3dffb0ecaefb77ad7e9fcf562b99fa99f813 | 5a739c45535c97844af5dfc126be6954e7747890 | /tests/testthat.R | 079b9aeaf5bd3107f13ea7e7c1446b3139d4f1af | [] | no_license | cran/colorplaner | d0ed6c8c910c78801bd57a35ad67ee63b207b1f6 | bcd3ce49ef8b4a778efd15854c815f9517069f34 | refs/heads/master | 2020-12-25T22:47:26.090640 | 2016-11-01T11:07:29 | 2016-11-01T11:07:29 | 68,783,370 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 88 | r | testthat.R | library(testthat)
library(ggplot2)
library(colorplaner)
test_check("colorplaner")
|
c1d05ee9f7f0bec7feebe15d72af42d8ee67ef82 | 360df3c6d013b7a9423b65d1fac0172bbbcf73ca | /FDA_Pesticide_Glossary/sulfentrazone.R | 54d1b9cefb3f3d4d43511a7ad6d515da10f19464 | [
"MIT"
] | permissive | andrewdefries/andrewdefries.github.io | 026aad7bd35d29d60d9746039dd7a516ad6c215f | d84f2c21f06c40b7ec49512a4fb13b4246f92209 | refs/heads/master | 2016-09-06T01:44:48.290950 | 2015-05-01T17:19:42 | 2015-05-01T17:19:42 | 17,783,203 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 234 | r | sulfentrazone.R | library("knitr")
library("rgl")
#knit("sulfentrazone.Rmd")
#markdownToHTML('sulfentrazone.md', 'sulfentrazone.html', options=c("use_xhml"))
#system("pandoc -s sulfentrazone.html -o sulfentrazone.pdf")
knit2html('sulfentrazone.Rmd')
|
9cb93b6c15c78b21240cb3ab8f1144d8712197d0 | c5707b03ca4c5015fdd48aa98a87c1d038b0ed7f | /mc/man/neo4j.query.mc.Rd | 4b806bdae9eee3c1f517d0b4304650cc8790f4f4 | [] | no_license | nhtuong/mc-r | e145ab2a130c90ab57ea0ce7fc10e2f740f39e9a | 50e523c1ce2d8ec323f7cb3a58e2d8edd8dc211d | refs/heads/master | 2020-05-17T16:05:53.766057 | 2015-01-07T11:44:01 | 2015-01-07T11:44:01 | 33,657,174 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 277 | rd | neo4j.query.mc.Rd | \docType{methods}
\name{neo4j.query.mc}
\alias{neo4j.query.mc}
\title{Executing a query of Cipher language}
\usage{
neo4j.query.mc(querystring)
}
\arguments{
\item{query}{}
}
\description{
Executing a query of Cipher language
}
\author{
Hoai Tuong Nguyen
}
|
41a57794d47f2c1aeb5db19b82bf59ac8045444e | 7567e449b26cc1c25817bc4490c615761ca74344 | /SamplingScript.R | 0cf7ab59e608eec303aa453e98d21b88b3e079ab | [] | no_license | mhandreae/ChangePoint | b33646a20459cc8bfa654a8620a0fd41caae9ab3 | b7c47e82097ca0b07ea87282765922cf97534540 | refs/heads/master | 2021-01-10T06:30:51.437407 | 2016-03-11T15:08:35 | 2016-03-11T15:08:35 | 53,002,742 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 388 | r | SamplingScript.R | ## Sampling model 6, irregular times with random intercept
require(rstan)
require(rstanarm)
setwd("~/Projects/ChangePoint")
load(file="Fit/MarginalChangePoint7.Rdata")
load(file = "Data/continuous_data7.Rdata")
# with the updated stan file
fit7 <- stan(fit=MarginalChangePoint7, iter= 1000, chains = 4,
data= continuous_data7, cores = 4)
save(fit7, file="Fit/fit7.Rdata")
|
43bfc845aa43599a15ca58832fb3e6d4f67b1ffc | 51c61fe400ce7226a52ff66c629fd97cb919fb1a | /R/get_grid.R | 35c33fe1a334b08171840addb7b622fc55f39258 | [
"MIT"
] | permissive | henckr/maidrr | 306d0fe29884d935ae3ced23622eb2366c54f6f9 | 262d90e94b91fbaa289c590335e33ae43e26c50b | refs/heads/master | 2023-07-25T01:54:39.947147 | 2023-07-17T14:35:44 | 2023-07-17T14:35:44 | 252,255,074 | 7 | 6 | NOASSERTION | 2023-07-17T14:35:46 | 2020-04-01T18:26:13 | R | UTF-8 | R | false | false | 1,069 | r | get_grid.R | #' Get feature grid
#'
#' Get the grid values for features based on the observed values in the data.
#'
#' @param var Character string or vector giving the names of the features.
#' @param data Data frame containing the original training data.
#' @return Tidy data frame (i.e., a "tibble" object). The columns contain the grid
#' values for features \code{var} based on the observed values in \code{data}.
#' @examples
#' \dontrun{
#' data('mtpl_be')
#' 'ageph' %>% get_grid(data = mtpl_be)
#' 'coverage' %>% get_grid(data = mtpl_be)
#' c('ageph', 'coverage') %>% get_grid(data = mtpl_be)
#' tidyr::expand_grid('ageph' %>% get_grid(data = mtpl_be),
#' 'coverage' %>% get_grid(data = mtpl_be))
#' }
#' @export
get_grid <- function(var, data) {
if (! all(var %in% names(data))) stop(paste0('The following variable(s) could not be found in the supplied data: ', paste(var[! var %in% names(data)], collapse = ' ')))
data %>% dplyr::select(!!!rlang::syms(var)) %>% dplyr::distinct() %>% dplyr::arrange(!!!rlang::syms(var)) %>% tibble::as_tibble()
}
|
de7ad396b9814dea14804214ade8eca5d8eabd5f | 5d0bc9fa9c48a468d115e9930f5eac66a0764789 | /inst/snippets/Figure2.6.R | 13e5913cc50b22cc0383d13d4302ed1f097c0ba3 | [] | no_license | rpruim/ISIwithR | a48aac902c9a25b857d2fd9c81cb2fc0eb0e848e | 7703172a2d854516348267c87319ace046508eef | refs/heads/master | 2020-04-15T20:36:55.171770 | 2015-05-21T09:20:21 | 2015-05-21T09:20:21 | 21,158,247 | 5 | 2 | null | null | null | null | UTF-8 | R | false | false | 141 | r | Figure2.6.R | head(TimePopulation, 3)
favstats(~ estimate, data = TimePopulation)
histogram(~ estimate, data = TimePopulation, type = "count", nint = 20)
|
907142048c0c06b4bf78b1af70ab37ecd0a998e1 | d07ad97624709efdb6a65e758eb98972ac12ef8c | /exercises/exc_03_05.R | 49cddce52a0c88100a5bf782efc00f235ecd317c | [
"MIT"
] | permissive | jminnier/RBootcamp | 17e3118cb820788c2b9628ed4a7b5e33ffe9342c | b359a92641527e55dbfee18fe418a8eebdb2eec4 | refs/heads/master | 2020-06-14T08:56:56.091966 | 2019-07-08T18:58:03 | 2019-07-08T18:58:03 | 194,964,808 | 1 | 0 | MIT | 2019-07-08T18:58:04 | 2019-07-03T02:23:52 | CSS | UTF-8 | R | false | false | 241 | r | exc_03_05.R | library(dplyr)
biopics <- readRDS("data/biopics.rds")
#add your comparison to the end of this filter statement
crimeFilms <- filter(biopics, year_release > 1980 &
type_of_subject == "Criminal")
#show number of rows in crimeFilms
|
3a2abcc274954baf45e213179fee360e2f336a86 | 35c9e4d193140845a8f649ba00fa548bfe4c9f7e | /tests/testthat/test_demos.R | 27f68630cf4e786ea5caedb817386e5ac1f7110e | [] | no_license | jhellewell14/ICDMM | 0cd2dd98d052d9c6fa64036629b064e334bc8eae | 5653caa0da1076c4afc935f07d633120eba95dcb | refs/heads/master | 2022-10-30T07:10:17.350290 | 2020-06-22T09:55:42 | 2020-06-22T09:55:42 | 274,096,145 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,102 | r | test_demos.R | context("test-demos_run.R")
source("eqm_soln_varying_nets.R")
test_that("model_run demo runs", {
set.seed(1234)
# define input parameters
init_age <- c(0,0.25,0.5,0.75,1,1.25,1.5,1.75,2,3.5,5,
7.5,10,15,20,30,40,50,60,70,80)
init_EIR <- 10
admin_str <- NULL
time_period <- 30*1
prop_treated <- 0.4
# run the model
model_run <- run_model(age=init_age, EIR=init_EIR, ft = prop_treated,
admin2 = admin_str, time = time_period)
# objects out correct
expect_equal(c("plot", "dat"), names(model_run))
expect_true(is.list(model_run$dat))
expect_true(all(class(model_run$plot) == c("gg", "ggplot")))
# time handled right
expect_equal(length(model_run$dat$t), 31)
# equilibrium init check indirectly (though this could be risky)
expect_equal(model_run$dat$prev[10]-model_run$dat$inc[15], 0.2287737)
})
test_that("create_r_model demo runs", {
# define input parameters
init_age <- c(0,0.25,0.5,0.75,1,1.25,1.5,1.75,2,3.5,5,
7.5,10,15,20,30,40,50,60,70,80)
init_EIR <- 10
time_period <- 30*1
prop_treated <- 0.4
# creates the odin model
wh <- hanojoel:::create_r_model(odin_model_path = system.file("extdata/odin_model.R",
package = "hanojoel"),
het_brackets = 5,
age = init_age,
init_EIR = init_EIR,
init_ft = prop_treated,
country = NULL,
admin2 = NULL)
# generates model functions with initial state data
mod <- wh$generator(user= wh$state, use_dde = TRUE)
# Runs the model
mod_run <- mod$run(t = 1:time_period)
out <- mod$transform_variables(mod_run)
expect_equal(out$prev[10]-out$inc[15], 0.2287737)
})
test_that("compare model outputs", {
# define input parameters
init_age <- c(0,0.25,0.5,0.75,1,1.25,1.5,1.75,2,3.5,5,
7.5,10,15,20,30,40,50,60,70,80)
init_EIR <- 10
time_period <- 30*1
prop_treated <- 0.4
# creates the odin model
wh <- hanojoel:::create_r_model(odin_model_path = system.file("extdata/odin_model.R",
package = "hanojoel"),
num_int = 1,
het_brackets = 5,
age = init_age,
init_EIR = init_EIR,
init_ft = prop_treated,
country = NULL,
admin2 = NULL)
# generates model functions with initial state data
mod <- wh$generator(user= wh$state, use_dde = TRUE)
# Runs the model
mod_run <- mod$run(t = 1:(time_period+1))
out <- mod$transform_variables(mod_run)
model_run <- run_model(age=init_age, EIR=init_EIR, ft = prop_treated,
admin2 = NULL, time = time_period)
expect_equal(model_run$dat$prev, out$prev, tolerance= 1e-8)
expect_equal(out$prev[10]-out$inc[15], 0.2287737)
})
test_that("compare varying itns and not", {
# define input parameters
init_age <- c(0,0.25,0.5,0.75,1,1.25,1.5,1.75,2,3.5,5,
7.5,10,15,20,30,40,50,60,70,80)
init_EIR <- 10
time_period <- 30
# Specify coverage as a coverage after a time
wh <- hanojoel:::create_r_model(odin_model_path = system.file("extdata/odin_model.R",
package = "hanojoel"),
het_brackets = 5,
age = init_age,
init_EIR = init_EIR,
itn_cov = 0.3,
ITN_IRS_on = 20,
num_int = 2,
country = NULL,
admin2 = NULL)
mod <- wh$generator(user= wh$state, use_dde = TRUE)
mod_run <- mod$run(t = 1:(time_period))
out <- mod$transform_variables(mod_run)
# Specify coverage as a vector
wh2 <- hanojoel:::create_r_model(odin_model_path = system.file("extdata/odin_model_itn.R",package = "hanojoel"),
het_brackets = 5,
age = init_age,
init_EIR = init_EIR,
num_int = 2,
t_vector = c(-25, 20),
itn_vector = c(0, 0.3),
ITN_IRS_on = 20,
pop_split = c(0.5, 0.5),
country = NULL,
admin2 = NULL)
wh2 <- edit_equilibrium_varying_nets(wh=wh2)
mod2 <- wh2$generator(user= wh2$state, use_dde = TRUE)
mod_run2 <- mod2$run(t = 1:(time_period))
out2 <- mod2$transform_variables(mod_run2)
# Specify coverage as old coverage vector
wh3 <- hanojoel:::create_r_model(odin_model_path = system.file("extdata/odin_model_itn.R",package = "hanojoel"),
het_brackets = 5,
age = init_age,
init_EIR = init_EIR,
num_int = 2,
t_vector = c(-25, 20),
itn_vector = c(0, 0.3),
ITN_IRS_on = 20,
pop_split = c(0.7, 0.3),
country = NULL,
admin2 = NULL)
wh3 <- edit_equilibrium_varying_nets(wh=wh3)
mod3 <- wh3$generator(user= wh3$state, use_dde = TRUE)
mod_run3 <- mod3$run(t = 1:(time_period))
out3 <- mod3$transform_variables(mod_run3)
expect_equal(out$prev, out2$prev, tolerance=1e-5)
expect_equal(out$prev, out3$prev, tolerance=1e-5)
expect_equal(out$inc, out2$inc, tolerance=1e-5)
expect_equal(out$inc, out3$inc, tolerance=1e-5)
})
|
0460de1dc37942932ed2129402b5738f0b73cb5b | d53e84c25a103426600dfac806b9cfc110f29ebb | /recommend.r | bc3c0747ef8cdfa95ffb6f63cd0b30a781696fb3 | [] | no_license | rvazquezv/Titanic | 42a608c6e4556ffb197be9dfaf3867ad9026c3d6 | 8b66e7721c7e8dffbe716d2b7ffe1cb6dcec1d3f | refs/heads/main | 2023-06-03T22:23:45.737013 | 2021-06-21T10:52:32 | 2021-06-21T10:52:32 | 372,794,499 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,481 | r | recommend.r | ############################################################################################################################################################
## Recommendation Systems questions
############################################################################################################################################################
library(tidyverse)
library(lubridate)
library(dslabs)
data("movielens")
#### Q1
ex1<-movielens %>% filter(!is.na(rating))%>%group_by(movieId,year)%>%summarize(n=n(),year = as.character(first(year)))
qplot(year,n,data=ex1,geom = "boxplot") + coord_trans(y = "sqrt") + theme(axis.text.x = element_text(angle = 90, hjust = 1))
v<-ex1%>%group_by(year)%>%summarize(m=median(n))
v$year[which.max(v$m)]
#### Q2
ex2<-movielens %>% filter(year>=1993)
ex2$timestamp<-as.Date(as.POSIXct(ex2$timestamp,tz = "UTC",origin = "1970-01-01"),format ="%Y-%m-%d")
## The Shawshank Redemption
idx<-grep("Shawshank",ex2$title)
ex2$movieId[idx]
movielens %>% filter(movieId==318)%>%summarize(m=mean(rating))
## "Forrest Gump"
a<-ex2 %>% filter(title=="Forrest Gump")
a<-a%>% mutate(R_year=year(timestamp))
a2<-a%>%group_by(R_year)%>%summarize(n=n())
sum(a2$n)/(2018-min(a$year))
movielens %>%
filter(year >= 1993) %>%
group_by(movieId) %>%
summarize(n = n(), years = 2018 - first(year),
title = title[1],
rating = mean(rating)) %>%
mutate(rate = n/years) %>%
top_n(25, rate) %>%
arrange(desc(rate))
#### Q3
a<-movielens %>%
filter(year >= 1993) %>%
group_by(movieId,year) %>%
summarize(n = n(), years = 2018 - first(year),
title = title[1],
rating = mean(rating)) %>%
mutate(rate = n/years)
a%>% ggplot(aes(rate, rating)) +
geom_point() +
geom_smooth()
#### Q5
movielens <- mutate(movielens, date = as_datetime(timestamp))
###as.Date(as.POSIXct(ex2$timestamp,tz = "UTC",origin = "1970-01-01"),format ="%Y-%m-%d")
#### Q6
movielens <- mutate(movielens,week=round_date(date,unit="week"))
movielens%>% group_by(week)%>%summarize(m=mean(rating))%>% ggplot(aes(week, m)) +
geom_point() +
geom_smooth()
#### Q8
gui<-movielens%>% group_by(genres)%>%summarize(n=n(),avg=mean(rating),se=sd(rating)/sqrt(n()))%>%filter(n>1000)
gui%>%ggplot(aes(x = genres, y = avg, ymin = avg - 2*se, ymax = avg + 2*se)) +
geom_point() +
geom_errorbar() +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
gui$genres[which.min(gui$avg)]
|
0a92d850b14501e37fd5fbd8fe9ed4c74abe2c70 | 70ef716830bc1496cd75a1d19bc130d950e9dc26 | /Hi-C/plotHeatmap.R | 82b250215aeb5fcd0228bac496a296799fd31a71 | [] | no_license | shenlab423/2CLC-Project-Code | ec56427d764cfa040d0e22b1ed02286821aa772a | 1ba667520a1c170b59ac7fb119dfbd380e4ed4c7 | refs/heads/main | 2023-08-27T11:42:48.402982 | 2021-10-22T10:37:24 | 2021-10-22T10:37:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 642 | r | plotHeatmap.R |
tclc_enhancer <- read.table("2CLC.compartment.txt",sep=" ")
library(gplots)
rampCol1 <- colorRampPalette(c("blue","white","red"))(100)
pdf("2CP-2CLC.pdf",width=6,height=6)
heatmap.2(as.matrix(log2(tclc_enhancer)),trace = "none",col = rampCol1,Rowv = F,Colv = F,density.info = "none",breaks = seq(-1,1,0.02))
dev.off()
tclc_enhancer <- read.table("ESC.compartment.txt",sep=" ")
library(gplots)
rampCol1 <- colorRampPalette(c("blue","white","red"))(100)
pdf("2CN-ESC.pdf",width=6,height=6)
heatmap.2(as.matrix(log2(tclc_enhancer)),trace = "none",col = rampCol1,Rowv = F,Colv = F,density.info = "none",breaks = seq(-1,1,0.02))
dev.off()
|
6eb81513546abe792077ab11723cadd0ac9380ab | 16d96ea48eba2cad07ac33a2e9c5630ba1b65e62 | /MyFacebook/analisis_comments.R | e7a3bdece7479e8608632befea10814d971caec8 | [] | no_license | nerudista/DataViz | 0986dd1f93b609569b087c031fd9b4e39e7546b3 | 41078583efff58f9331b1635ed055a69fe7205e1 | refs/heads/master | 2021-07-01T00:29:27.198820 | 2020-12-22T04:24:21 | 2020-12-22T04:24:21 | 228,298,420 | 6 | 2 | null | 2020-12-22T04:24:23 | 2019-12-16T03:53:53 | HTML | UTF-8 | R | false | false | 12,030 | r | analisis_comments.R | pacman::p_load(tidyverse,
tidytext,
tm, #para los stopwords
quanteda, #para el text mining
quanteda.corpora,
syuzhet, #para el nrc_sentiment
ggthemes,
wordcloud,
reshape2, #para el acast
topicmodels #para el LDA
)
#Cargar la fuente desde mi Windows
windowsFonts(`Lato` = windowsFont("Lato"))
# Crear theme
# Inspirado en el styleguide del Urban Institute
# https://urbaninstitute.github.io/graphics-styleguide/
theme_fb <- theme(
#Esto pone blanco el titulo , los ejes, etc
plot.background = element_rect(fill = '#FFFFFF', colour = '#FFFFFF'),
#Esto pone blanco el panel de la gráfica, es decir, sobre lo que van los bubbles
panel.background = element_rect(fill="#FFFFFF",color="#FFFFFF"),
panel.grid.minor = element_blank(),
panel.grid.major.y = element_line(color="#DEDDDD"),
panel.grid.major.x = element_blank(),
text = element_text(#color = "#1db954",
family="Lato"),
# limpiar la gráfica
axis.line = element_line(colour="#FFFFFF"),
#axis.title=element_blank(),
axis.text=element_text(family="Lato",
size=12),
axis.ticks=element_blank(),
axis.title.x = element_text( margin=margin(10,0,0,0),
family="Lato",
size=12,
color="#0D1F2D"
),
axis.title.y = element_text( margin=margin(0,15,0,0),
family="Lato",
size=12,
color="#0D1F2D"
),
# ajustar titulos y notas
plot.title = element_text(family="Lato",
size=18,
margin=margin(0,0,15,0),
hjust = 0, #align left
color="#0D1F2D"),
plot.subtitle = element_text(size=14,
family="Lato",
hjust = 0, #align left
margin=margin(0,0,25,0),
color="#0D1F2D"),
plot.caption = element_text(
color="#0D1F2D",
family="Lato",
size=11,
hjust = 0 #align left
),
legend.position = "none",
#para los titulos del facet_wrap
strip.text.x = element_text(size=12, face="bold"),
complete=FALSE
)
my_caption <- expression(paste(bold("Fuente:"), " Datos proporcionados por Facebook para el usuario ", bold("nerudista")))
######
data_comments <- read_csv("./Datos/misComentarios.csv")
data_posts <- read_csv("./Datos/misPosts.csv")
data_comments <- data_comments %>% mutate(tipo = "Comentario")
data_posts <- data_posts%>% mutate(tipo = "Post")
data <- rbind(data_comments, data_posts)
# cargar stopwords en español
stop_words <- tm::stopwords(kind="es")
my_stop_words <- c("si","p","d","así","tan","!","¡","=","$","esposa",
"de","que","a","with","to","ps","made","nocroprc")
final_stop_words <- c(stop_words,my_stop_words)
################# QUANTEDA ###########################
#creo un corpus con QUANTEDA con los comentarios
corp_comments <- quanteda::corpus(data$Comment)
head(docvars(corp_comments))
summary(corp_comments, n=5)
#To extract texts from a corpus, we use an extractor, called texts().
texts(corp_comments)[7]
# voy a tokenizar cada post
tok_comments <- quanteda::tokens(corp_comments,
remove_numbers = TRUE,
remove_punct = TRUE) %>%
tokens_remove(pattern = final_stop_words,
valuetype = 'fixed')
# ahora voy a crear una matrix dfm
dfmat_comments <- quanteda::dfm(tok_comments,remove_punct = TRUE)
#otra matrix dfm
dfmat_comments_2 <- quanteda::dfm(corp_comments, groups="tipo")
#veo frecuencias
com_stat_freq <- quanteda::textstat_frequency(dfmat_comments, n=80)
#creo grafica
set.seed(132)
quanteda::textplot_wordcloud(dfmat_comments,
color = rev(RColorBrewer::brewer.pal(10, "RdBu")),
max_words = 100,
random_order =FALSE)
################# TIDYTEXT ###########################
#Voy a limpiar un poco la base para estandarizarla
data.limpia <- data %>%
mutate(Comment = iconv (Comment,"UTF-8", "ASCII//TRANSLIT")) %>% # QUITA ACENTOS Y CARAC. ESPECIALES
mutate(Comment = tolower(Comment)) %>%
mutate(Comment = str_squish(Comment)) %>%
mutate(Comment = str_remove_all(Comment,"[[:punct:]]") ) %>%
mutate(Comment = str_remove_all(Comment,"[[:digit:]]") )
#Ahora a tokenizar por palabra
tidy.tokens.word <- data.limpia %>%
tidytext::unnest_tokens(word,Comment)
#Ahora a tokenizar por oracion
#Da casi lo mismo que la data.limpia.
#Es decir, casi nnca repetí oraciones.
tidy.tokens.sentence <- data.limpia %>%
unnest_tokens(sentence, Comment, token = "sentences")
#Ahora por n-gramas
tidy.tokens.ngram <- data.limpia %>%
unnest_tokens(ngram, Comment, token = "ngrams", n = 2)
cuenta.ngramas <- tidy.tokens.ngram %>%
count(ngram) %>%
arrange(-n) %>%
#dplyr::filter(!ngram %in% tm::stopwords(kind="es")) %>%
dplyr::filter(!ngram %in% final_stop_words) %>%
dplyr::filter(nchar(ngram)> 0)
png("graficas/wordcloud_tm.png", width = 1000, height = 1000, res=200)
#Visualizar ngramas
cuenta.ngramas %>%
with(wordcloud::wordcloud(ngram,
n,
max.words = 40,
random.order = FALSE,
colors = rev(brewer.pal(5,"Paired"))))
dev.off()
# ANALISIS DE SENTIMIENTOS CON AFINN (ESPAÑOL)
afinn.esp <- read_csv("./datos/lexico_afinn.en.es.csv",
locale=locale(encoding = "LATIN1"))
fb.affin.esp <- tidy.tokens.word %>%
filter(!word %in% final_stop_words) %>%
inner_join(afinn.esp,
by = c("word" = "Palabra" )) %>%
distinct(word, .keep_all = TRUE)
#graficar
fb.affin.esp %>%
group_by(tipo) %>%
summarise( neto = sum(Puntuacion)) %>%
ggplot( aes(x=tipo,
y=neto,
fill = tipo))+
geom_col()+
geom_text( aes(x=tipo,y=neto,label = neto),
nudge_y = 8,
family="Lato",
fontface="bold",
size=4.5
)+
labs(title="Sentimiento por Tipo de Publicación",
subtitle = "Calificación Obtenida Usando AFFIN",
caption = my_caption,
y= "",
x="")+
scale_fill_manual(values = c("#0a4c6a","#cfe8f3"))+
theme_fb
#otro wordlcoud pero ahora por comparacio´n
#graficar
png("graficas/comparacion_cloud.png", width = 1000, height = 1000, res=200)
affin.count <- fb.affin.esp %>%
mutate(sentimiento = dplyr::case_when(
Puntuacion < 0 ~ "Negativo",
Puntuacion > 0 ~ "Positivo"
)) %>%
count(tipo,sentimiento,word) %>%
arrange(-n) %>%
reshape2::acast(word ~ sentimiento, fill = 0, value.var = "n") %>%
wordcloud::comparison.cloud(colors = c("#db2b27", "#12719e"),
random.order = FALSE,
scale=c(1.5,.5),
title.size = 2,
max.words = 400)
dev.off()
# Sentiment analysis con syuzhet
# Aplico NRC a mis posts y comments
fb_sentimientos_nrc <- syuzhet::get_nrc_sentiment(data$Comment , language = "spanish")
df_fb_sentimientos_nrc <- fb_sentimientos_nrc %>%
dplyr::summarise_all(funs(sum)) %>%
rowid_to_column("id") %>%
pivot_longer(-id, names_to = "sentimiento", values_to = "count")
## grafica con sentimientos
df_fb_sentimientos_nrc %>%
filter(!sentimiento %in% c('positive','negative')) %>%
ggplot( ) +
geom_col(aes(x= reorder(sentimiento,count),
y= count,
fill = sentimiento))+
scale_fill_brewer(palette="Blues")+
coord_flip()+
labs(title ="")
theme_clean()+
theme(legend.position = "none")
# treemap
df_fb_sentimientos_nrc %>%
filter(!sentimiento %in% c('positive','negative')) %>%
treemap(
index="sentimiento",
vSize="count",
type="index",
fontsize.labels=c(12),
fontsize.title = 18,
palette = "Blues",
title="Sentimientos en Post y Comments",
fontfamily.title = "Lato",
fontfamily.labels = "Lato",
border.col = "#191414"
)+
theme_fb
## grafica con positivo y negativo
BarPositiveNegative <- df_fb_sentimientos_nrc %>%
filter(sentimiento %in% c('positive','negative')) %>%
ggplot( ) +
geom_col(aes(x= reorder(sentimiento,count),
y= count,
fill = sentimiento))+
labs(title = "Clasificación de Palabras Por Sentimiento ",
caption = my_caption ,
y = "Número de Palabras",
x= "")+
scale_fill_manual(values=c("#db2b27", "#12719e"))+
scale_x_discrete(labels=c("Negativas","Positivas")) +
theme_fb;BarPositiveNegative
ggsave("./graficas/BarPositiveNegative.png", BarPositiveNegative, width = 6, height = 9)
################## TF-IDF
# Term frequency - inerse document frequency
#Necesito que ya esté tokenizado los post y los comments
# voy a usar el tidy.tokens.word
#para que los DF no me de exponentes en en los resultados.
options(scipen=99)
tfidf <- tidy.tokens.word %>%
filter(!word %in% final_stop_words) %>%
count(word,tipo) %>%
tidytext::bind_tf_idf(word,tipo,n)
# A graficar tf-idf
#primero creo los labels para los titulos del facet_wrap
labels <- c(comment = "Comentario", post = "Post")
tfidf %>%
group_by(tipo) %>%
arrange(-tf_idf) %>%
top_n(5) %>%
#ungroup() %>%
ggplot(aes( x = reorder(word,tf_idf),
y = tf_idf,
fill = tipo))+
geom_col(show.legend = FALSE)+
scale_fill_manual(values = c("#0a4c6a","#cfe8f3"))+
facet_wrap( ~ tipo,scales = "free",
labeller =labeller(tipo=labels))+
coord_flip()+
labs(title="Palabras más Representativas por Tipo de Publicación",
subtitle = "Ranking obtenido por TF_IDF",
caption = my_caption,
y= "",
x="")+
theme_fb
#LDA con topic models
# TRATA DE ENCONTRAR AQUELLAS PALABRAS QUE DEFINEN UNA CATEGORIA
# cada observación es un documento
#
# Crear un corpus
# lee el vector y lo convierte en un corpus
corpus.fb <- tm::Corpus(VectorSource(data$Comment))
corpus.fb <- tm::tm_map(corpus.fb, removeWords, stopwords("es"))
corpus.fb <- tm::tm_map(corpus.fb, removePunctuation)
corpus.fb <- tm::tm_map(corpus.fb, removeNumbers)
dtm.fb <- tm::DocumentTermMatrix(corpus.fb)
inspect(dtm.fb)
rowTotals <- apply(dtm.fb , 1, sum)
dtm.fb <- dtm.fb[rowTotals>0,]
bd.lda <- topicmodels::LDA(dtm.fb,k=4,control=list(seed=1234))
bd.topics <- tidytext::tidy(bd.lda, matrix="beta") #Prob por topico por palabra
bd.docs <- tidytext::tidy(bd.lda, matrix="gamma") %>% #Prob por topico por documento
pivot_wider(names_from = topic, values_from = gamma)
top_terminos <- bd.topics %>%
group_by(topic) %>%
top_n(10, beta) %>%
ungroup %>%
arrange(topic, -beta)
top_terminos %>%
mutate(term = reorder_within(term,beta,topic)) %>%
ggplot(aes(term,
beta,
fill=factor(topic)))+
geom_col()+
facet_wrap(~ factor(topic), scales = "free")+
coord_flip()+
scale_y_continuous(labels = scales::percent_format())+
scale_fill_manual(values = c("#1696d2","#ec008b","#fdbf11","#5c5859"))+
scale_x_reordered()+ # necesita el mutate de arriba. Quita el __1, __2 que éste pone.
labs(title = "Probabilidad de palabras por tópico",
caption = my_caption ,
y = "Porcentaje LDA ",
x= "Palabra")+
#scale_fill_manual(values=c("#db2b27", "#55b748","fdbf11","898F9C"))+
theme_fb
#4267B2 azul
#898F9C gris
#000000 negro
|
ddbf60e9448640cfd79b8b8481969bd273f6a4c6 | dff2b8e81e7be006a30f748e99ae7bea29287813 | /week4/q2.R | 0a3bbe1a9d22c1a69bab296bd341cdfb6008cf0e | [
"Apache-2.0"
] | permissive | jigyasu10/mining-massive-datasets-1 | 900172eec0181ea64ed6abac3e9b87d957d949ee | 2887452d1f073c8ada5417faa08d5b0a6fbc0533 | refs/heads/master | 2021-01-17T10:39:57.533316 | 2014-11-23T14:00:54 | 2014-11-23T14:00:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,083 | r | q2.R |
week4_q2 <- function() {
# alpha = 0
a <- 0
X <- rbind(c(1,0,1,0,1,2*a),
c(1,1,0,0,1,6*a),
c(0,1,0,1,0,2*a))
print(X)
n <- nrow(X)
cmb <- expand.grid(i=1:n, j=1:n)
# print(cmb)
C <- matrix(apply(cmb,1,cos.sim, X),n,n)
rownames(C) <- c("A", "B", "C")
colnames(C) <- c("A", "B", "C")
print("alpha = 0")
print(C)
# alpha = 0.5
a <- 0.5
X <- rbind(c(1,0,1,0,1,2*a),
c(1,1,0,0,1,6*a),
c(0,1,0,1,0,2*a))
print(X)
n <- nrow(X)
cmb <- expand.grid(i=1:n, j=1:n)
# print(cmb)
C <- matrix(apply(cmb,1,cos.sim, X),n,n)
rownames(C) <- c("A", "B", "C")
colnames(C) <- c("A", "B", "C")
print("alpha = 0.5")
print(C)
# alpha = 1
a <- 1
X <- rbind(c(1,0,1,0,1,2*a),
c(1,1,0,0,1,6*a),
c(0,1,0,1,0,2*a))
print(X)
n <- nrow(X)
cmb <- expand.grid(i=1:n, j=1:n)
# print(cmb)
C <- matrix(apply(cmb,1,cos.sim, X),n,n)
rownames(C) <- c("A", "B", "C")
colnames(C) <- c("A", "B", "C")
print("alpha = 1")
print(C)
# alpha = 2
a <- 2
X <- rbind(c(1,0,1,0,1,2*a),
c(1,1,0,0,1,6*a),
c(0,1,0,1,0,2*a))
print(X)
n <- nrow(X)
cmb <- expand.grid(i=1:n, j=1:n)
# print(cmb)
C <- matrix(apply(cmb,1,cos.sim, X),n,n)
rownames(C) <- c("A", "B", "C")
colnames(C) <- c("A", "B", "C")
print("alpha = 2")
print(C)
}
# Angular similarity
# http://en.wikipedia.org/wiki/Cosine_similarity#Angular_similarity
# cos.sim <- function(ix, X)
# {
# A = X[ix[1],]
# B = X[ix[2],]
# return (1 - ((1 /(sum(A*B)/sqrt(sum(A^2)*sum(B^2)))) / pi))
# }
# cos.sim <- function(ix, X)
# {
# A = X[ix[1],]
# B = X[ix[2],]
# return (1 - ((2*(1 /(sum(A*B)/sqrt(sum(A^2)*sum(B^2))))) / pi))
# }
# Cosine Disance
cos.sim <- function(ix, X)
{
A = X[ix[1],]
B = X[ix[2],]
return (1 - (sum(A*B)/sqrt(sum(A^2)*sum(B^2))))
}
# cos.sim <- function(ix, X)
# {
# A = X[ix[1],]
# B = X[ix[2],]
# return( sum(A*B)/sqrt(sum(A^2)*sum(B^2)) )
# }
# main()
library("lsa")
week4_q2()
|
6a7309c1fe9a92c1908ca31943881afbe9189a3b | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/FDRreg/man/FDRreg-package.Rd | 2a673a93e275c7aa0866203a8211eaefd83f34e8 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,406 | rd | FDRreg-package.Rd | \name{FDRreg-package}
\alias{FDRreg-package}
\docType{package}
\title{
False discovery rate regression
}
\description{
Tools for FDR problems, including false discovery rate regression. Fits models whereby the local false discovery rate may depend upon covariates, either via a linear or additive logistic regression model.
}
\details{
\tabular{ll}{
Package: \tab FDRreg\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2014-02-25\cr
License: \tab GPL (>=3) \cr
}
The workhouse function is FDRreg(z,X, ...), where z is an observed vector of z statistics, and X is a matrix of covariates. Do not add a column of ones to X to get an intercept term; the function does that for you, just like R's base lm() and glm() functions.
}
\author{
Author: James G. Scott, with contributions from Rob Kass and Jesse Windle.
Maintainer: James G. Scott <james.scott@mccombs.utexas.edu>
}
\references{
False discovery rate regression: application to neural synchrony detection in primary visual cortex. James G. Scott, Ryan C. Kelly, Matthew A. Smith, Pengcheng Zhou, and Robert E. Kass. arXiv:1307.3495 [stat.ME].
}
\keyword{ False discovery rates }
\examples{
library(FDRreg)
# Simulated data
P = 2
N = 10000
betatrue = c(-3.5,rep(1/sqrt(P), P))
X = matrix(rnorm(N*P), N,P)
psi = crossprod(t(cbind(1,X)), betatrue)
wsuccess = 1/{1+exp(-psi)}
# Some theta's are signals, most are noise
gammatrue = rbinom(N,1,wsuccess)
table(gammatrue)
# Density of signals
thetatrue = rnorm(N,3,0.5)
thetatrue[gammatrue==0] = 0
z = rnorm(N, thetatrue, 1)
hist(z, 100, prob=TRUE, col='lightblue', border=NA)
curve(dnorm(x,0,1), add=TRUE, n=1001)
\dontrun{
# Fit the model
fdr1 <- FDRreg(z, covars=X, nmc=2500, nburn=100, nmids=120, nulltype='theoretical')
# Show the empirical-Bayes estimate of the mixture density
# and the findings at a specific FDR level
Q = 0.1
plotFDR(fdr1, Q=Q, showfz=TRUE)
# Posterior distribution of the intercept
hist(fdr1$betasave[,1], 20)
# Compare actual versus estimated prior probabilities of being a signal
plot(wsuccess, fdr1$priorprob)
# Covariate effects
plot(X[,1], log(fdr1$priorprob/{1-fdr1$priorprob}), ylab='Logit of prior probability')
plot(X[,2], log(fdr1$priorprob/{1-fdr1$priorprob}), ylab='Logit of prior probability')
# Local FDR
plot(z, fdr1$localfdr, ylab='Local false-discovery rate')
# Extract findings at level FDR = Q
myfindings = which(fdr1$FDR <= Q)
}
}
|
ac8d0ac96d7219f8a9c5c1771892fbe5d0ce23f7 | 92554d4f4a1238ad4dec9c7ac51c7ccd32b8b50c | /man/test_junc_gr.Rd | c19d5c59143851de55fdda21f7224bacbd4a15f8 | [] | no_license | jmw86069/splicejam | a1fbe599ac8ae286a66569db84bb69bbf5674ab7 | 04d7c2d08461a6dfe7eb1890434afe9c6e9e9fd8 | refs/heads/master | 2022-12-11T06:53:14.337405 | 2022-11-28T19:41:02 | 2022-11-28T19:41:02 | 139,864,732 | 20 | 5 | null | null | null | null | UTF-8 | R | false | true | 1,935 | rd | test_junc_gr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jambio-data.R
\docType{data}
\name{test_junc_gr}
\alias{test_junc_gr}
\title{Sample junction data GRangesList}
\format{
GRangesList where each GRangesList item represents
a distinct biological sample. Each GRanges item represents
one splice junction, whose score represents the abundance
of splice junction reads observed. The start of each splice
junction should be one base after the end of the corresponding
exon, using 1-based coordinates. Therefore, an exon spanning
1-10 covers 10 bases, the corresponding junction would
begin at position 11. Similarly, if the connected exon
begins at position 100, the junction would end at position
99.
}
\usage{
test_junc_gr
}
\description{
Sample junction data GRangesList
}
\details{
This dataset contains RNA-seq splice junction data
stored as a GRangesList.
}
\examples{
# The code below is used to create the junction test data
suppressPackageStartupMessages(library(GenomicRanges));
suppressPackageStartupMessages(library(ggplot2));
test_junc_gr <- GRanges(seqnames=rep("chr1", 5),
ranges=IRanges::IRanges(
start=c(200, 200, 400, 400, 750),
end=c(299, 499, 499, 899, 899)),
strand=rep("+", 5),
score=c(200, 50, 120, 80, 170),
sample_id=rep("sample_A", 5));
names(test_junc_gr) <- jamba::makeNames(
rep("junc", length(test_junc_gr)),
suffix="");
test_junc_gr;
# To plot junctions, use grl2df(..., shape="junction")
junc_df <- grl2df(test_junc_gr, shape="junction")
gg1 <- ggplot(junc_df, aes(x=x, y=y, group=id, fill=gr_name)) +
ggforce::geom_diagonal_wide(alpha=0.7) +
colorjam::theme_jam() +
colorjam::scale_fill_jam()
print(gg1);
}
\seealso{
Other splicejam data:
\code{\link{test_cov_gr}},
\code{\link{test_cov_wide_gr}},
\code{\link{test_exon_gr}},
\code{\link{test_exon_wide_gr}},
\code{\link{test_junc_wide_gr}}
}
\concept{splicejam data}
\keyword{datasets}
|
b4884885c1927198443b36d58f695329fabc7ecd | 3083e64fbf83e3375e268ed1e079d857dc3f508f | /R/eventPlot.R | b537ab3b3ac945d1f18fb2ae7590514462ad5964 | [
"MIT"
] | permissive | KWB-R/kwb.event | 25b4b8518e2d52d6c39dcbee0fc8c4227eba6c1d | 7fe54ea62fa3b88992098b59af4c562104c9c427 | refs/heads/master | 2023-04-13T01:02:20.711538 | 2022-06-08T16:51:50 | 2022-06-08T16:51:50 | 136,062,520 | 0 | 1 | MIT | 2022-06-08T16:51:51 | 2018-06-04T17:50:35 | R | UTF-8 | R | false | false | 11,413 | r | eventPlot.R | # plotMergedEventInfoForValidation ---------------------------------------------
#' Plot merged Event Info for Validation
#'
#' @param mergedEvents data frame containing information about merged events,
#' i.e. containing columns \code{tBeg.event1}, \code{tEnd.event1},
#' \code{tBeg.event2first}, \code{tEnd.event2last}, \code{tBeg.merged},
#' \code{tEnd.merged}
#' @export
plotMergedEventInfoForValidation <- function(mergedEvents)
{
beginEndColumns <- c(
"tBeg.event1", "tEnd.event1", "tBeg.event2first", "tEnd.event2last",
"tBeg.merged", "tEnd.merged"
)
for (i in seq_len(nrow(mergedEvents))) {
mergedEvent <- mergedEvents[i, ]
xlim <- range(
kwb.datetime::hsToPosix(as.vector(as.matrix(
mergedEvent[, beginEndColumns]
))),
na.rm = TRUE
)
ganttPlotEvents(
kwb.utils::hsRenameColumns(mergedEvent, list(
tBeg.event1 = "tBeg", tEnd.event1 = "tEnd"
)),
title = "reference event", xlim = xlim, ylim = c(1, 7)
)
graphics::abline(v = mergedEvent$tEnd.event1, lty = 2)
graphics::title(paste(
"Reference event", i, "/", nrow(mergedEvents), ":",
mergedEvent$tBeg.event1, "-", mergedEvent$tEnd.event1
))
ganttPlotEvents(
kwb.utils::hsRenameColumns(mergedEvent, list(
tBeg.event2first = "tBeg", tEnd.event2last = "tEnd"
)),
title = "partner event(s)", add = TRUE, y1 = 3, col = "red"
)
A <- list(col = "red", lty = 2)
kwb.utils::callWith(graphics::abline, A, v = mergedEvent$tBeg.event2first)
kwb.utils::callWith(graphics::abline, A, v = mergedEvent$tEnd.event2last)
ganttPlotEvents(
kwb.utils::hsRenameColumns(mergedEvent, list(
tBeg.merged = "tBeg", tEnd.merged = "tEnd"
)),
title = "merged event",
add = TRUE,
y1 = 5
)
}
}
# plotEventInfo ----------------------------------------------------------------
#' Plot Event Info
#'
#' @param eventInfo as returned by \emph{getParallelEventsInfo}, with first
#' columns (timestamps) removed
#' @export
plotEventInfo <- function(eventInfo)
{
graphics::plot(
NA, NA, ylim = c(1, ncol(eventInfo) + 1), xlim = c(1, nrow(eventInfo) + 1),
ylab = "event list number", xlab = "event number"
)
for (i in seq_len(nrow(eventInfo))) {
na.columns <- which(is.na(eventInfo[i, ]))
x <- seq_along(eventInfo)
y <- rep(i, ncol(eventInfo))
col <- (eventInfo[i, ] %% 2) + 1
x[na.columns] <- NA
y[na.columns] <- NA
graphics::rect(y, x, y + 1, x + 1, col = col, border = NA)
}
}
# plotEventProperty1VersusEventProperty2 ---------------------------------------
#' Plot Event Property 1 versus Event Property 2
#'
#' @param events data frame with at least two columns named as given in
#' \emph{propertyName1} and \emph{propertyName2}
#' @param propertyName1 name of property to appear on the x-axis
#' @param propertyName2 name of property to appear on the y-axis
#' @param eventNumbers vector of event numbers used for labelling. Default:
#' rownames of \emph{events}
#' @param xlab default: \code{propertyName1}
#' @param ylab default: \code{propertyName2}
#' @param cex character expansion factor passed to \code{\link[graphics]{plot}}
#' @param \dots further arguments passed to \code{\link[graphics]{plot}}
#' @export
plotEventProperty1VersusEventProperty2 <- function(
events, propertyName1, propertyName2, eventNumbers = events$eventNumber,
xlab = propertyName1, ylab = propertyName2, cex = 0.7, ...
)
{
kwb.utils::checkForMissingColumns(events, c(propertyName1, propertyName2))
x <- events[[propertyName1]]
y <- events[[propertyName2]]
if (all(is.na(x))) {
.propertyWarning(propertyName1)
} else if (all(is.na(y))) {
.propertyWarning(propertyName2)
} else {
graphics::plot(x, y, pch = 16, cex = 0.5, xlab = xlab, ylab = ylab, ...)
graphics::grid()
plotRegionSize <- kwb.plot::getPlotRegionSizeInUserCoords()
delta <- kwb.plot::cmToUserWidthAndHeight(0.25)
graphics::text(
x + delta$width, y + delta$height, labels = eventNumbers, cex = cex
)
}
}
# .propertyWarning -------------------------------------------------------------
.propertyWarning <- function(propertyName)
{
warning(
sprintf("Event property \"%s\" is NA for all events", propertyName),
" -> I do not plot!"
)
}
# ganttPlotEventLists ----------------------------------------------------------
#' Gantt Plot of Event Lists
#'
#' Plot event lists, one above the other
#'
#' @param eventLists list of data frames containing events (containing columns
#' \emph{tBeg}, \emph{tBeg}, as returned by \code{\link{hsEvents}})
#' @param margin.top top margin as a fraction of the total plot height
#' @param time.format passed to \code{\link[kwb.plot]{addTimeAxis}}
#' @param n.xticks passed to \code{\link[kwb.plot]{addTimeAxis}}
#' @param showLabels passed to \code{\link{ganttPlotEvents}}
#' @param \dots further arguments passed to ganttPlotEvents
#' @export
ganttPlotEventLists <- function(
eventLists, margin.top = 0.8, time.format = NULL, n.xticks = 10,
showLabels = TRUE, ...
)
{
time.format <- kwb.utils::defaultIfNULL(time.format, "%d.%m.")
elementNames <- names(eventLists)
eventLists$merged <- kwb.utils::defaultIfNULL(
eventLists$merged, mergeAllEvents(eventLists)
)
n <- length(eventLists)
ylim <- c(0, n + (n-1) * margin.top)
bandheight <- 0.5 * margin.top / diff(ylim) # for labels
y1 <- 0
yLabel <- y1 + rep(c(-0.3, -0.1), length.out = nrow(eventLists$merged))
# start with plotting the merged events (since ganttPlotEvents does not have
# an xlim parameter)
ganttPlotEvents(
eventLists$merged, y1 = y1, ylim = ylim, yLabel = yLabel,
showLabels = FALSE, title = "merged", ...
)
if (showLabels) {
x <- 0.5 * (
as.numeric(eventLists$merged$tBeg) +
as.numeric(eventLists$merged$tEnd)
)
kwb.plot::addLabels(
x = x, y0 = y1 + 1, col.line="grey", labels = rownames(eventLists$merged),
bandheight = bandheight
)
}
for (elementName in setdiff(elementNames, "merged")) {
events <- eventLists[[elementName]]
y1 <- y1 + 1 + margin.top
yLabel <- y1 + rep(c(-0.3,-0.1), length.out=nrow(events))
ganttPlotEvents(
events, add = TRUE, y1 = y1, showLabels = FALSE, title = elementName, ...
)
if (showLabels) {
x <- (as.numeric(events$tBeg) + as.numeric(events$tEnd)) / 2
kwb.plot::addLabels(
x = x, y0 = y1 + 1, col.line="grey", labels = rownames(events),
bandheight = bandheight
)
}
}
xlim <- getXLimFromEventLists(eventLists)
timestamps <- xlim
kwb.plot::addTimeAxis(timestamps, n = n.xticks, time.format = time.format)
timestamps
}
# ganttPlotEvents --------------------------------------------------------------
#' Gantt-like Diagram to plot Event's Time Extension
#'
#' @param events event list as retrieved by \code{\link{hsEvents}}. Required
#' columns: \emph{tBeg} (begin of event) and \emph{tEnd} (end of event), both
#' of class POSIXt
#' @param add if TRUE, the event boxes are added to the current plot, otherwise
#' a new plot is generated
#' @param y1 lower coordinates of the event boxes
#' @param y2 upper coordinates of the event boxes
#' @param xlim x limits. If NULL (default) the limits will be chosen so that all
#' events fit into the plot
#' @param ylim y limits
#' @param col colour of shading lines
#' @param density density of shading lines
#' @param showLabels if TRUE, the event boxes are labelled with the row names of
#' the events
#' @param eventLabels labels to be given to the events. Default:
#' rownames(\emph{events})
#' @param yLabel y-position of labels, if labels are to be shown
#' @param type one of c("rectange", "vertical")
#' @param title title to be plotted left of event rectangles
#' @param leftMargin left margin (where title is printed) as fraction of the
#' range of the total time interval spanned by the events
#' @param xlab x axis label
#' @param cex character expansion factor
#' @param indicate indices of events to be indicated in a different color
#' (indicationColuor)
#' @param indicationColour colour to be used for indication, default: "red"
#' extension factor for labels (event numbers)
#' @param bandheight passed to \code{addLabels}
#' @param alternating passed to \code{addLabels}
#' @param adj passed to \code{text} plotting the event labels
#' @param \dots further arguments passed to rect or segments
#' @export
ganttPlotEvents <- function(
events, add = FALSE, y1 = 1, y2 = y1 + 1, xlim = NULL,
ylim = c(min(y1), max(y2)), col = "black", density = 5, showLabels = TRUE,
eventLabels = rownames(events), yLabel = (y1 + y2) / 2, type = "rectangle",
title = "", leftMargin = 0.2, xlab = "Time", cex = 0.8, indicate = NULL,
indicationColour = "red", bandheight = 0.1, alternating = FALSE, adj = 0.5,
...
)
{
if (! add) {
x1 <- utils::head(events$tBeg, 1)
if (is.null(xlim)) {
x2 <- utils::tail(events$tEnd, 1)
xrange <- diff(as.integer(range(x1, x2)))
xlim <- c(x1 - leftMargin*xrange, x2)
}
graphics::plot(
x = x1, y = rep(NA, length.out=length(x1)), xlim = xlim, ylim = ylim,
type = "n", xlab = xlab, ylab = "", xaxt = "n", yaxt = "n"
)
}
if (! is.null(indicate)) {
col <- rep(col, length.out = nrow(events))
col[indicate] <- indicationColour
}
if (type == "rectangle") {
graphics::rect(events$tBeg, y1, events$tEnd, y2, col=col, density=density, ...)
} else if (type == "vertical") {
graphics::segments(events$tBeg, y1, events$tBeg, y2, col = col, ...)
graphics::segments(events$tEnd, y1, events$tEnd, y2, col = col, ...)
} else {
stop("Unsupported type: ", type)
}
if (showLabels) {
x <- rowMeans(data.frame(as.numeric(events$tBeg), as.numeric(events$tEnd)))
kwb.plot::addLabels(
x = x, labels = eventLabels, y0 = yLabel, bandheight = bandheight,
col.line = NA, alternating = alternating, adj = adj
)
}
userCoordinates <- kwb.plot::getPlotRegionSizeInUserCoords()
x <- userCoordinates$left + 0.01 * userCoordinates$width
graphics::text(x, (y1 + y2) / 2, labels = title, adj = 0, cex = cex)
}
# .hsShowEvents ----------------------------------------------------------------
.hsShowEvents <- function(evts, sigWidth = 60, nTicks = 25)
{
n <- nrow(evts)
tmin <- evts$tBeg[1]
tmax <- evts$tEnd[n]
tmin <- kwb.datetime::roundTime(tmin, 10 * sigWidth, 1)
tmax <- kwb.datetime::roundTime(tmax, 10 * sigWidth, 0)
timediff <- (as.integer(tmax) - as.integer(tmin))
tstep <- kwb.datetime::roundTime(timediff / nTicks, 10 * sigWidth, 0)
cat("tmin:", tmin, "tmax:", tmax, "tstep:", tstep, "\n")
graphics::par(mar = c(10, 5, 5, 5))
graphics::plot(
NA, NA, xlim = c(tmin, tmax), ylim = c(0, 1), axes = FALSE, xlab = NA,
ylab = NA
)
graphics::rect(evts$tBeg - sigWidth, 0, evts$tEnd, 1)
positions <- seq(tmin, tmax, by = tstep)
graphics::axis(
1, at = positions, labels = format(positions, "%Y-%m-%d %H:%M"), las = 3
)
}
|
65f379fde0737239958bc5492b0002a00adb85b0 | 9423d6bc590daebcad6c8227d3d452c48a1b2c94 | /iTOL/Generating_.tre/CALeDNAformatter.R | 82b848f0b085692ea1235c0f1d815ad4dd7122d4 | [] | no_license | SamuelLRapp/CALeDNA-NPS-AIS | 3dca81202e27d774f4a4a983e6f9583a4c03d689 | 38bcec9b757757e4f004bdc5fb18b31c3d266e87 | refs/heads/main | 2023-06-05T18:45:16.100468 | 2021-06-17T21:15:39 | 2021-06-17T21:15:39 | 334,285,397 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,402 | r | CALeDNAformatter.R | #### take in google sheet/data frame and bring them all into 1 column
X<-file.choose() #
GoogleSheetData_domain <- read.csv(file = X) #filled in domain info, because it was missing on taxize
#https://docs.google.com/spreadsheets/d/1czLS8ZBG-2ykZzEPpOLvWCmkyVJF2SnatQhcJlz3aFA/edit?usp=sharing
num_rows <- nrow(GoogleSheetData_domain)
CALeDNAformat <- as.data.frame(matrix(nrow = num_rows, ncol = 10))
for(i in 1:num_rows)
{
print(i)
CALeDNAformat[i,1] <- paste(GoogleSheetData_domain[i,7],GoogleSheetData_domain[i,6], GoogleSheetData_domain[i,5],GoogleSheetData_domain[i,4],GoogleSheetData_domain[i,3],GoogleSheetData_domain[i,2],GoogleSheetData_domain[i,1], sep = ";", collapse = '')
}
#export
write.table(CALeDNAformat[,1], file = "CALeDNAformatT.txt", row.names= FALSE, quote=FALSE, col.names=FALSE)
#to create a comma seperated .txt file of a single row
CleanDataframe <- GoogleSheetData_domain#make a df copy to manipulate
#remove rows if there is a NA value in a specified cols: #"speciesgenus", "Genus", "Family", "order", "class","phylum", "domain", "CALeDNA format")
CleanDataframe<- CleanDataframe[!is.na(CleanDataframe$speciesgenus), ]
#removing rows in the dataframe where duplicate values are found in a specified cols
CleanDataframe<- unique(CleanDataframe[,1])#create vector of unique values in specified subset of dataframe
Family_comma_sep_list.txt <- CleanDataframe
|
59c405b726cf1c5197716cf0802b51ce239dcd8a | 1b141d6887525dd038885603ba0525a4799fb297 | /man/rename_columns.Rd | 2bce091581c3ac71792df2d5e65839932f0453b5 | [
"MIT"
] | permissive | mjkarlsen/traumaR | c7b032ad24f5511d891348cf40b764e14a4d784b | dd52deec08282e8955c5fea6ad1fb7b2a80e0a9f | refs/heads/master | 2022-09-17T04:17:13.452037 | 2020-06-06T18:47:08 | 2020-06-06T18:47:08 | 260,229,827 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 359 | rd | rename_columns.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/02 - Column Renaming.R
\name{rename_columns}
\alias{rename_columns}
\title{Renaming PTOS Data}
\usage{
rename_columns(.data)
}
\arguments{
\item{.data}{A dataframe of PTOS data}
}
\value{
It translations of the code into human friendly values.
}
\description{
Renaming PTOS Data
}
|
8df4856517d3264c3f8a6ca2e045218cd529d546 | ff8f992ac991c5adb2532702d73aa1ff0675fcee | /rached.R | 52cbe290a69c5488c3613c152c154989826d3146 | [] | no_license | zzj/rached | ae8262a0e9ba16faa0e40dff36e114984366208b | fe849bccc5a0ba9ab2847b3101ec0ac341aa5baa | refs/heads/master | 2016-09-05T23:33:30.046633 | 2013-01-16T16:15:01 | 2013-01-16T16:15:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,250 | r | rached.R |
rached.folder <- "temp.folder/"
rached.initialize <- function() {
if (!file.exists(rached.folder)) {
dir.create(rached.folder)
}
}
rached.clear <- function() {
unlink(paste(rached.folder, "*", sep=""))
}
rached.initialize()
rached.legalize.filename <- function(k) {
gsub("/",".",k)
}
rached.key <- function(name, version, md5, ...) {
parameters = list(...)
k <- paste(name, version, paste(names(parameters), collapse="-"),
paste(parameters, collapse="-"), sep=":")
return(k)
}
rached.file <- function(name, version, md5, ...) {
k <- rached.key(name, version, md5, ...)
legal.k <- rached.legalize.filename(k)
paste(rached.folder, "/", legal.k, ".rds", sep="")
}
rached.memoise <- function(func, name, version=0, md5=F) {
new.func <- function(...) {
data.file <- rached.file(name, version, md5, ...)
if (!file.exists(data.file)) {
ret <- func(...)
saveRDS(ret, data.file)
## remove old cached data
if (version > 0) {
old.data.file <- rached.file(name, version - 1, md5, ...)
if (file.exist(old.data.file)) {
unlink(old.data.file)
}
}
}
else {
ret <- readRDS(data.file)
}
return (ret)
}
return (new.func)
}
|
1cd040e8ed36d7c85f54abc69f27812f1e9e60d6 | a0c2f62ff97e3c65a2db9a31b6fb2396fc49d832 | /R/igbTrack.R | 93ae3172f3418d506f3c37ec4dad9e3983cbd664 | [] | no_license | gmbecker/rigbLite | 602272a345d67b173f8376b2a1f694a963a5639f | b478f5323eaf839b0bd750b22ad536a52ecb98bb | refs/heads/master | 2021-01-10T00:53:40.174665 | 2012-05-30T16:39:47 | 2012-05-30T16:39:47 | 3,603,919 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,015 | r | igbTrack.R | #
#
#This code was automatically generated. Do not edit.
#
#
setClass( Class = "igbTrack" , representation = representation( genome = "character" , loadmode = "character" , refresh = "logical" , uri = "character"))
if (!isGeneric( "genome" )) {
if (is.function( "genome" ))
fun <- get( "genome" , mode = "function")
else
fun <- function( x ) standardGeneric( "genome" )
setGeneric( "genome" , fun)
}
setMethod( f = "genome" , definition = function(x)
{
x@genome
} , signature = c( x = "igbTrack"))
if (!isGeneric( "genome<-" )) {
if (is.function( "genome<-" ))
fun <- get( "genome<-" , mode = "function")
else
fun <- function( object , ... , value ) standardGeneric( "genome<-" )
setGeneric( "genome<-" , fun)
}
setMethod( f = "genome<-" , definition = function(x , value)
{
x@genome = value
x
} , signature = c( x = "igbTrack"))
if (!isGeneric( "loadmode" )) {
if (is.function( "loadmode" ))
fun <- get( "loadmode" , mode = "function")
else
fun <- function( object , ... ) standardGeneric( "loadmode" )
setGeneric( "loadmode" , fun)
}
setMethod( f = "loadmode" , definition = function(object , ...)
{
object@loadmode
} , signature = c( object = "igbTrack"))
if (!isGeneric( "loadmode<-" )) {
if (is.function( "loadmode<-" ))
fun <- get( "loadmode<-" , mode = "function")
else
fun <- function( object , ... , value ) standardGeneric( "loadmode<-" )
setGeneric( "loadmode<-" , fun)
}
setMethod( f = "loadmode<-" , definition = function(object , ... , value)
{
object@loadmode = value
object
} , signature = c( object = "igbTrack"))
if (!isGeneric( "refresh" )) {
if (is.function( "refresh" ))
fun <- get( "refresh" , mode = "function")
else
fun <- function( object , ... ) standardGeneric( "refresh" )
setGeneric( "refresh" , fun)
}
setMethod( f = "refresh" , definition = function(object , ...)
{
object@refresh
} , signature = c( object = "igbTrack"))
if (!isGeneric( "refresh<-" )) {
if (is.function( "refresh<-" ))
fun <- get( "refresh<-" , mode = "function")
else
fun <- function( object , ... , value ) standardGeneric( "refresh<-" )
setGeneric( "refresh<-" , fun)
}
setMethod( f = "refresh<-" , definition = function(object , ... , value)
{
object@refresh = value
object
} , signature = c( object = "igbTrack"))
if (!isGeneric( "uri" )) {
if (is.function( "uri" ))
fun <- get( "uri" , mode = "function")
else
fun <- function( x , ... ) standardGeneric( "uri" )
setGeneric( "uri" , fun)
}
setMethod( f = "uri" , definition = function(x , ...)
{
x@uri
} , signature = c( x = "igbTrack"))
if (!isGeneric( "uri<-" )) {
if (is.function( "uri<-" ))
fun <- get( "uri<-" , mode = "function")
else
fun <- function( object , ... , value ) standardGeneric( "uri<-" )
setGeneric( "uri<-" , fun)
}
setMethod( f = "uri<-" , definition = function(object , ... , value)
{
object@uri = value
object
} , signature = c( object = "igbTrack"))
|
99d0cffdeff3ad97b29eefc99a9eeb00b734413d | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /VUROCS/man/clar.Rd | 580b7dbf97d4e08b0ef2bc22dc9e0f3c62de6180 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 819 | rd | clar.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clar.R
\name{clar}
\alias{clar}
\title{Cumulative LGD Accuracy Ratio}
\usage{
clar(y, hx)
}
\arguments{
\item{y}{a vector of realized values.}
\item{hx}{a vector of predicted values.}
}
\value{
The function returns the CLAR for a vector of realized categories \code{y} and a vector of predicted categories \code{hx}.
}
\description{
Calculates for a vector of realized categories \code{y} and a vector of predicted categories \code{hx} the cumulative LGD accuarcy ratio (CLAR) according to Ozdemir and Miu 2009.
}
\examples{
clar(rep(1:5,each=3),c(3,3,3,rep(2:5,each=3)))
}
\references{
Ozdemir, B., Miu, P., 2009. Basel II Implementation. A Guide to Developing and Validating a Compliant Internal Risk Rating System. McGraw-Hill, USA.
}
|
b5d86cf1935600a8a0101c9d22c9ecc7fce67b22 | da15f54df71d9d46e07e841a106b7d9fe8f60795 | /man/pssm.Rd | c1b2276731682d13ed1ebda298bd904f87aea81d | [] | no_license | tlfvincent/LOGICOIL | 6ad972df5c8ccb98fbaa865250ad895e072dae5b | ef3a02267a0586510343247bc5bda60c13019d46 | refs/heads/master | 2016-09-05T09:51:37.613331 | 2014-04-13T04:24:50 | 2014-04-13T04:24:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,893 | rd | pssm.Rd | \name{pssm}
\alias{pssm}
\docType{data}
\title{Profile scoring matrix derived from the LOGICOIL training set.}
\description{
This data set contains the Position Specific Scoring Matrix (PSSM) used as the LOGICOIL training set. This dataset was compiled from the CC+ coiled-coil database. This PSSM is used by LOGICOIL to predict the oligomeric state of coiled-coil sequences. More details on the training set can be found in the reference below.
}
\usage{data(pssm)}
\format{
A data frame with 20295 observations on the following 18 variables.
\describe{
\item{\code{amino}}{a factor with levels \code{A} \code{C} \code{D} \code{E} \code{F} \code{G} \code{H} \code{I} \code{K} \code{L} \code{M} \code{N} \code{P} \code{Q} \code{R} \code{S} \code{T} \code{V} \code{W} \code{X} \code{Y}}
\item{\code{register}}{a factor with levels \code{a} \code{b} \code{c} \code{d} \code{e} \code{f} \code{g}}
\item{\code{ab}}{a factor with levels \code{0} \code{AE} \code{IR} \code{NL}}
\item{\code{bc}}{a factor with levels \code{0} \code{ES}}
\item{\code{cd}}{a factor with levels \code{0} \code{LT} \code{QN}}
\item{\code{de}}{a factor with levels \code{0} \code{EL} \code{HD} \code{ID} \code{IK} \code{IL} \code{KD} \code{LK} \code{LQ} \code{NT} \code{SE} \code{TD} \code{TN} \code{TT}}
\item{\code{ef}}{a factor with levels \code{0} \code{FG} \code{IT} \code{QG} \code{TT} \code{YK}}
\item{\code{fg}}{a factor with levels \code{0} \code{KE} \code{KN} \code{LM} \code{QI} \code{RH} \code{RS} \code{SK} \code{TN} \code{TV}}
\item{\code{ga}}{a factor with levels \code{0} \code{EN} \code{KV} \code{NK} \code{RV} \code{VA} \code{VI} \code{YL}}
\item{\code{ad}}{a factor with levels \code{0} \code{AF} \code{AT} \code{IL} \code{LI} \code{LK} \code{LS} \code{NL} \code{RL} \code{VL}}
\item{\code{be}}{a factor with levels \code{0} \code{AE} \code{KE} \code{QN} \code{RQ}}
\item{\code{cf}}{a factor with levels \code{0} \code{SA}}
\item{\code{dg}}{a factor with levels \code{0} \code{EH} \code{LE}}
\item{\code{ea}}{a factor with levels \code{0} \code{IV} \code{YA}}
\item{\code{ae}}{a factor with levels \code{0} \code{KI} \code{KQ} \code{LE} \code{LT} \code{NR} \code{YF}}
\item{\code{bf}}{a factor with levels \code{0} \code{AK} \code{ED} \code{IH} \code{VT}}
\item{\code{cg}}{a factor with levels \code{0} \code{DR} \code{EH} \code{EI} \code{HE} \code{MA} \code{QE} \code{VT}}
\item{\code{da}}{a factor with levels \code{0} \code{II} \code{IL} \code{IS} \code{KI} \code{LK} \code{LN} \code{LR} \code{LV} \code{NI} \code{VI} \code{YA} \code{YT}}
}
}
\source{DOI: 10.1093/bioinformatics/bts648}
\references{
Thomas L. Vincent, Peter J. Green and Dek N. Woolfson. "LOGICOIL-multi-state prediction of coiled-coil oligomeric state", 29(1), pp69-76, Bioinformatics, (2013).
}
\examples{
data(pssm)
head(pssm, 10)
}
\keyword{datasets}
|
4337c64ad3efc747a5241141b2bd8657af0444a1 | dedf1e4fc2c6b7bc13144ef8c6a947397ded70c2 | /R/convert_data.R | d87e2c51efb840c7fa87453b7c5d8c14135fb2eb | [] | no_license | bips-hb/survnet | b345f04596471f63a1a19627e7ba0219206c9467 | 99ac8a5a0071dd770195b0c8dceb7eea01fc080f | refs/heads/master | 2021-06-05T18:45:24.047076 | 2021-06-03T12:55:23 | 2021-06-03T12:55:23 | 147,163,906 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,285 | r | convert_data.R |
#' Create binary response matrix for survival data
#'
#' Creates matrix with at-risk and event information. Format: (S_1, ..., S_K, E_1, ..., E_K). Dimensions: obs X 2*causes*time.
#'
#' @param time Survival time
#' @param status Censoring indicator: 0 for censored observations, positive values for events.
#' @param breaks Right interval limits for discrete survival time.
#' @param num_causes Number of competing risks.
#'
#' @return Binary response matrix.
#' @export
convert_surv_cens <- function(time, status, breaks, num_causes) {
n <- length(time)
S <- array(0, dim = c(n, length(breaks), num_causes))
E <- array(0, dim = c(n, length(breaks), num_causes))
warn <- FALSE
for (i in 1:n) {
idx <- time[i] > breaks
S[i, which(idx), ] <- 1
# Set S=0 in event interval
if (any(!idx)) {
S[i, min(which(!idx)), ] <- 0
# Set event
if (status[i] > 0) {
E[i, min(which(!idx)), status[i]] <- 1
}
} else {
warn = TRUE
}
}
if (warn) {
warning("One or more event times larger than right-most interval limit, setting to censored.")
}
# Reshape
S <- do.call(cbind, lapply(1:num_causes, function(i) S[,,i]))
E <- do.call(cbind, lapply(1:num_causes, function(i) E[,,i]))
cbind(S, E)
}
|
7ea940faeac8911cacbc2f9ac5fc30ea7ed5b8a0 | 06464f8bb8aa0a555cbe5ecce40400dbcb5964e4 | /R/deprecated/economic-index.R | 8db4e1cefd0348451b10f695147c5bb059f516a2 | [
"Apache-2.0"
] | permissive | bcgov/bc-econ-status-indices | 36476d58abb2218a37c32aa95a8c06f7e229e475 | b7ba948db2eadbfdeb27b6461308dac53124d20c | refs/heads/master | 2021-06-10T12:35:22.604824 | 2021-05-03T19:03:44 | 2021-05-03T19:03:44 | 179,336,900 | 3 | 0 | Apache-2.0 | 2020-04-15T00:40:54 | 2019-04-03T17:20:39 | R | UTF-8 | R | false | false | 2,497 | r | economic-index.R | # Copyright 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
## Source setup and function scripts
if (!exists(".setup_sourced")) source(here::here("R/setup.R"))
#-------------------------------------------------------------------------------
# read in synthetic data
working <- setwd("~/bc-econ-status-indices/input-data")
working <- file.path("~/bc-econ-status-indices/input-data")
list.files(working)
tax <- fread("1_IND.csv")
dip <- read.csv("synthetic-dip-data.csv")
# Urban quintile
# cleanup the tax data
clean_taxdata_u <- tax %>%
filter(`level|of|geo` == 61) %>%
select(`level|of|geo`, `total|income|median|total`, `year`, `postal|area`) %>%
mutate(UQs = ntile(`total|income|median|total`, 5)) %>%
mutate(year = as.numeric(`year`)) %>%
mutate(geo = `level|of|geo`) %>%
mutate(pc = as.factor(`postal|area`)) %>%
select(UQs, year, geo, pc)
# cleanup the dip data
clean_dipdata_u <- dip %>%
mutate(date = as.Date(date)) %>%
mutate(year = as.numeric(format(date, "%Y"))) %>%
mutate(pc = as.factor(pc)) %>%
select(studyid, pc, year, geo)
# integrate the two datasets
integrate_dipdata_u <- inner_join(clean_taxdata_u, clean_dipdata_u, by = c("geo", "year", "pc"))
#-------------------------------------------------------------------------------
# Rural Quintile
# cleanup the tax data
clean_taxdata_r <- tax %>%
filter(`level|of|geo` == 9) %>%
select(`level|of|geo`, `total|income|median|total`, `year`, `postal|area`) %>%
mutate(RQs = ntile(`total|income|median|total`, 5)) %>%
mutate(year = as.numeric(`year`)) %>%
mutate(geo = `level|of|geo`) %>%
mutate(pc = as.factor(`postal|area`)) %>%
select(RQs, year, geo, pc)
# cleanup the dip data
clean_dipdata_r <- dip %>%
mutate(date = as.Date(date)) %>%
mutate(year = as.numeric(format(date, "%Y"))) %>%
mutate(pc = as.factor(pc)) %>%
select(studyid, pc, year, geo)
integrate_dipdata_r <- inner_join(clean_taxdata_r, clean_dipdata_r, by = c("geo", "year", "pc"))
|
68017dac952d265db5059a825ade4ac780f6027c | 7dc4161819947091b1d15533e7d3fba4f4497885 | /First Semester/2013_MWeekday.R | 6edb164f32346f042a76c6f64df9dbec7ebe1844 | [] | no_license | StanleyLin-TW/IM-Project | 40cf622cefa0cc7a02e98aa3f2035107b43da440 | c8ce2e9cb0ae9cbd46bfa0ec57e09a7ef562eefd | refs/heads/master | 2020-04-08T04:21:31.015276 | 2019-01-09T07:34:29 | 2019-01-09T07:34:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 401 | r | 2013_MWeekday.R | library(ggplot2)
#Generate the data
A=read.table("/Users/StanleyLIn/Desktop/專題研究/2013Mfreq_wd")
print(A)
names(A)<-NULL
print(A)
df <- data.frame(gp = 1:7,y = A)
#Set Aesthetic attributes and assign Geometric objects
ggplot(df, aes(x = gp, y = A))+geom_line(color="#CAA661")+ scale_x_discrete(limits=c("Mon","Tue","Wed","Thu","Fri","Sat","Sun"))+ggtitle("2013_male_Weekdays", subtitle = NULL)
|
d8a8687bfd04712b40dc48c1f29750b04f85b12a | 6a10a4fa507a99f20302af79377215218b1633d2 | /scripts/xgboost2.R | 2ec2711a142bf1803f74701fe52a2f8c9372fa0f | [] | no_license | sahilthapar/dataminingproject | 341bb9107a92bd9ed44fd7686ff23c84579fa4af | fec41687e8a18182df87f60e5731c8ccdf000c3b | refs/heads/master | 2021-01-18T20:19:45.358979 | 2017-04-27T15:33:03 | 2017-04-27T15:33:03 | 86,957,566 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,285 | r | xgboost2.R | library(readr)
train.raw <- read_csv('./data/train.csv', col_types="ncDnncccc")
test.raw <- read_csv('./data/test.csv', col_types="nncDcccc")
store.raw <- read_csv('./data/store.csv', col_types='nccnnncnnc', na='')
# Parse features to factors
train.raw$DayOfWeek <- as.factor(train.raw$DayOfWeek)
train.raw$Open <- as.factor(train.raw$Open)
train.raw$Promo <- as.factor(train.raw$Promo)
train.raw$StateHoliday <- as.factor(train.raw$StateHoliday)
train.raw$SchoolHoliday <- as.factor(train.raw$SchoolHoliday)
store.raw$StoreType <- as.factor(store.raw$StoreType)
store.raw$Assortment <- as.factor(store.raw$Assortment)
store.raw$Promo2 <- as.factor(store.raw$Promo2)
require(dplyr)
require(lubridate)
# Join datasets by `Store` column
train.full <- left_join(train.raw, store.raw, by = 'Store')
# Filtering
train.full <- train.full %>%
filter(Sales > 0) %>%
filter(Open == 1) %>%
mutate(Day = lubridate::day(Date)) %>%
mutate(Month = lubridate::month(Date)) %>%
mutate(Year = lubridate::year(Date)) %>%
mutate(LogSales = log(Sales))
# Drop unnecessary columns
train.full$Date <- NULL
train.full$Promo2SinceWeek <- NULL
train.full$PromoInterval <- NULL
train.full$Sales <- NULL
train.full$Customers <- NULL
# Remove columns below due to many NAs
train.full$CompetitionOpenSinceYear <- NULL
train.full$CompetitionOpenSinceMonth <- NULL
train.full$Promo2SinceYear <- NULL
# Fill CompetitionDistance with the average value
meanCompetitionDistance <- mean(train.full$CompetitionDistance, na.rm = TRUE)
train.full[is.na(train.full$CompetitionDistance), c("CompetitionDistance")] <- meanCompetitionDistance
require(Matrix)
train.full.sparse <- sparse.model.matrix(LogSales~.-1, data=train.full)
require(xgboost)
dtrain <- xgb.DMatrix(
data=train.full.sparse,
label=train.full$LogSales)
rmpse <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
elab <- exp(as.numeric(labels))
epreds <- exp(as.numeric(preds))
err <- sqrt(mean((epreds/elab-1)^2))
return(list(metric = "RMPSE", value = err))
}
param <- list(
objective="reg:linear",
booster="gbtree",
eta=0.8, # Control the learning rate
max.depth=8, # Maximum depth of the tree
subsample=0.7, # subsample ratio of the training instance
colsample_bytree=0.7 # subsample ratio of columns when constructing each tree
)
history <- xgb.cv(
data=dtrain,
params = param,
early_stopping_rounds=30, # training with a validation set will stop if the performance keeps getting worse consecutively for k rounds
nthread=4, # number of CPU threads
nround=30, # number of trees
verbose=0, # do not show partial info
nfold=3, # number of CV folds
feval=rmpse, # custom evaluation metric
maximize=FALSE # the lower the evaluation score the better
)
x<- history$evaluation_log$iter
y1<- history$evaluation_log$train_RMPSE_mean
y2<- history$evaluation_log$test_RMPSE_mean
xgb.plot.tree(feature_names = NULL, model = history, n_first_tree = NULL,
plot_width = NULL, plot_height = NULL)
ggplot() + geom_line(aes(x=x,y=y1), color = "blue", size=1) +
labs(x="No. of trees", y="RMPSE", title = "Training RMPSE with 3 fold CV")
ggplot() + geom_line(aes(x=x,y=y2), color = "red", size=1)+
labs(x="No. of trees", y="RMPSE", title = "Test RMPSE with 3 fold CV")
|
89701c633f7a22818da73da5d9f2055f731ddeea | 1a8b54238141f92403b9306e49c7c24964705247 | /man/gym_type.Rd | 72ca408a2a9bf52358992b6dba8edf14216827e4 | [
"MIT"
] | permissive | EmanuelHark12/pkmnR | f790f0f3473859ea686b2d4d85ef4549bf5c6d68 | 7d3a8fc2fa55009b349741801314a1242e6932af | refs/heads/master | 2023-05-04T08:19:08.288495 | 2021-05-29T01:32:12 | 2021-05-29T01:32:12 | 351,294,000 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 534 | rd | gym_type.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/poke_type.R
\name{gym_type}
\alias{gym_type}
\title{gym_type}
\usage{
gym_type(tipo)
}
\arguments{
\item{tipo}{the pokemon type.}
}
\value{
The dataframe of gym leader of certain type.
#' @references Pokémon and Pokémon character names are trademarks of Nintendo. The data collect from https://www.serebii.net/.
}
\description{
This is the function that filter the all gym leaders of certain type that was collected by the function
poke_all_leaders.
}
|
f69fd1eced39564eea8ed9293479ce1526472b1c | ffc36327dbb1de374d190a702f9a677e27663afa | /R/osler_package_table.R | 504d91d7670c8ecb08ae071606c5fe9acd782ca2 | [] | no_license | muschellij2/oslerInstall | 0d8e07bb26c853cba118c9cc9f7658509db2d99d | b6e9570a55fdf04999ab9cff48aa3877643539ce | refs/heads/master | 2021-01-15T10:57:39.233621 | 2017-08-07T17:22:01 | 2017-08-07T17:22:01 | 99,600,597 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,205 | r | osler_package_table.R | #' @title OSLER Package Table
#' @description Returns the table of OSLER packages
#' @return \code{data.frame} of packages with commit IDs
#' @param path Path to the table of package
#' @param long Should the data be "long" (with respect to stable/current)
#' @export
#'
#' @note Package information is obtained from
#' \url{"https://oslerinhealth.org/oslerPackages"}
#'
#' @importFrom stats reshape
#' @examples
#' osler_package_table()
osler_package_table = function(
path = "https://oslerinhealth.org/oslerPackages",
long = FALSE
) {
#############################
## grab list of current OSLER packages
#############################
args = list(file = path,
stringsAsFactors = FALSE, header = TRUE,
na.strings = "")
suppressWarnings({
tab = try( {
do.call("read.csv", args)
} , silent = TRUE)
})
if (inherits(tab, "try-error")) {
args$file = gsub("^https", "http", args$file)
tab = do.call("read.csv", args)
}
colnames(tab) = c("repo",
"version.stable",
"osler_version.stable",
"commit_id.stable",
"version.current",
"osler_version.current",
"commit_id.current")
tab$v = package_version(tab$version.stable)
ss = split(tab, tab$repo)
ss = lapply(ss, function(x) {
x = x[ order(x$v, decreasing = TRUE), ]
x = x[1,,drop = FALSE]
x$v = NULL
x
})
tab = do.call("rbind", ss)
tab = as.data.frame(tab, stringsAsFactors = FALSE)
rownames(tab) = NULL
if (long) {
cn = colnames(tab)
varying = cn[ cn != "repo"]
tab = reshape(data = tab, direction = "long", idvar = "repo", varying = varying,
times = c("current", "stable"), timevar = "release")
rownames(tab) = NULL
}
return(tab)
}
#' @title OSLER Packages
#' @description Returns the vector of OSLER packages
#' @return \code{vector} of packages available on OSLER
#' @param ... Arguments passed to \code{\link{osler_package_table}}
#'
#' @export
#'
#' @examples
#' osler_packages()
osler_packages = function(...) {
tab = osler_package_table(...)
tab = tab$repo
tab = unique(tab)
return(tab)
}
|
dbee2b01769d4d231c53a4617b8a2fe7548514cf | 29d87698c80e23cad4d31dafad48fee6a4e899fb | /R/geodetic.R | 6e7fb99f3e5802a10d2a3635a481acb8572f17e9 | [] | no_license | fostergeotech/Vs30_NZ | 56459df71b8d0148bf89cfe548a78b5f707c69bf | 2760af63199f48ed326e370ccfd9ec8a78891aa2 | refs/heads/master | 2020-04-10T15:42:52.496050 | 2020-01-16T22:39:57 | 2020-01-16T22:39:57 | 161,119,831 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,048 | r | geodetic.R | # geodetic.R
# Just geodetic conversions. For applying to objects of type sp*
# (spatialPointsDataFrame, spatialPolygons, etc.)
# Note to self - NZ coordinate systems described here
# http://gis.stackexchange.com/questions/20389/converting-nzmg-or-nztm-to-latitude-longitude-for-use-with-r-map-library
library(sp)
convert2NZMG <- function(inp) {
NZMGdata <- spTransform(inp, CRS=crsNZMG())
return(NZMGdata)
}
convert2NZGD49 <- convert2NZMG
convert2NZGD00 <- function(inp) {
NZGD00data <- spTransform(inp, crsNZGD00())
return(NZGD00data)
}
convert2WGS84 <- function(inp) {
WGSdata <- spTransform(inp, crsWGS84())
return(WGSdata)
}
crsNZMG <- function(){
return(CRS(paste0(
"+proj=nzmg +lat_0=-41 +lon_0=173 +x_0=2510000 +y_0=6023150 ",
# "+datum=nzgd49 +units=m +no_defs +ellps=intl ",
"+units=m +no_defs +ellps=intl ", # months after originally writing this, scripts are behaving badly.
# over() function in vspr.R is the culprit. Removing "nzgd49" tag is
# an attempt to solve this problem.
"+towgs84=59.47,-5.04,187.44,0.47,-0.1,1.024,-4.5993"),
doCheckCRSArgs = FALSE))
}
crsNZGD49 <- function() {
return(crsNZMG())
}
crsNZGD00 <- function() {
# This comes from http://www.linz.govt.nz/data/linz-data-service/guides-and-documentation/using-lds-xyz-services-in-leaflet
# This is also known as EPSG:2193
return(CRS(paste0("+proj=tmerc +lat_0=0 +lon_0=173 +k=0.9996 ",
"+x_0=1600000 +y_0=10000000 +ellps=GRS80 ",
"+towgs84=0,0,0,0,0,0,0 +units=m +no_defs")))
}
# A note on the parameter strings for WGS84
# from http://lists.maptools.org/pipermail/proj/2009-February/004446.html
# Instead of using
# +proj=latlong +ellps=WGS84 +towgs84=0,0,0
# you can use
# +proj=latlong +datum=WGS84
# which is equivalent.
crsWGS84 <- function() {
return(CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs", doCheckCRSArgs = FALSE)) #A
} |
b4563b400e75333829ed12e410318c75c1fa12e3 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/XVector/examples/XVector-class.Rd.R | 37e33d800ada854ccb93417f8a9a3727b527318c | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,824 | r | XVector-class.Rd.R | library(XVector)
### Name: XVector-class
### Title: XVector objects
### Aliases: class:XVector XVector-class XVector length,XVector-method
### bindROWS,XVector-method subseq subseq<- subseq,XVector-method
### subseq<-,XVector-method as.numeric,XVector-method show,XVector-method
### ==,XVector,XVector-method class:XRaw XRaw-class XRaw
### coerce,raw,XRaw-method coerce,raw,XVector-method
### coerce,numeric,XRaw-method as.raw,XRaw-method as.integer,XRaw-method
### as.vector,XRaw-method class:XInteger XInteger-class XInteger
### coerce,numeric,XInteger-method coerce,integer,XVector-method
### as.integer,XInteger-method as.vector,XInteger-method class:XDouble
### XDouble-class XDouble XNumeric coerce,numeric,XDouble-method
### coerce,numeric,XVector-method as.numeric,XDouble-method
### as.vector,XDouble-method show,XDouble-method
### Keywords: methods classes
### ** Examples
## ---------------------------------------------------------------------
## A. XRaw OBJECTS
## ---------------------------------------------------------------------
x1 <- XRaw(4) # values are not initialized
x1
x2 <- as(c(255, 255, 199), "XRaw")
x2
y <- c(x1, x2, NULL, x1) # NULLs are ignored
y
subseq(y, start=-4)
subseq(y, start=-4) <- x2
y
## ---------------------------------------------------------------------
## B. XInteger OBJECTS
## ---------------------------------------------------------------------
x3 <- XInteger(12, val=c(-1:10))
x3
length(x3)
## Subsetting
x4 <- XInteger(99999, val=sample(99, 99999, replace=TRUE) - 50)
x4
subseq(x4, start=10)
subseq(x4, start=-10)
subseq(x4, start=-20, end=-10)
subseq(x4, start=10, width=5)
subseq(x4, end=10, width=5)
subseq(x4, end=10, width=0)
x3[length(x3):1]
x3[length(x3):1, drop=FALSE]
|
2c10b5abbe25ba57a88489119602003122bfd2a7 | ff1e9b73b54de979d7615de579a3783eab0c6677 | /r/two-fer/two-fer.R | e78b1f4aad84f175acb9cf23e570f0ac18983379 | [] | no_license | jdm79/exercism | f40119aff56f70c6889162e6951fca25bef44bb6 | d6dd3650fa9c606988890aa4767e2ffe1299b6a9 | refs/heads/master | 2023-01-13T20:51:13.289018 | 2019-08-01T15:35:16 | 2019-08-01T15:35:16 | 165,097,791 | 0 | 0 | null | 2023-01-04T20:57:13 | 2019-01-10T16:57:02 | Swift | UTF-8 | R | false | false | 80 | r | two-fer.R | two_fer <- function(input="you") {
sprintf("One for %s, one for me.",input)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.