blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
874e1b1593c203b8d55c08f7820ca54d6e240692 | 2764167b5743be62adadc491ec7dfde210e0703d | /R/linesGEOmapXY.R | 84bdb752f90a00de48db40b642315398677e43d8 | [] | no_license | cran/GEOmap | 528a4cbe293211d324405037eb280b415e65f62e | 0149894022496cee8237868b0bb693d00ef01e41 | refs/heads/master | 2023-08-18T14:47:52.021469 | 2023-08-13T12:40:21 | 2023-08-13T13:30:31 | 17,713,753 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 324 | r | linesGEOmapXY.R | linesGEOmapXY<-function(lat=0, lon=0, PROJ=NULL, PMAT=NULL, ... )
{
if(missing(PMAT)) { PMAT=NULL }
pxy = GLOB.XY(lat, lon, PROJ)
if(!is.null(PMAT))
{
tem1 = trans3d(pxy$x, pxy$y, rep(0, length(pxy$y)) , PMAT)
}
else
{
tem1 =pxy
}
lines(tem1, ...)
}
|
ad180e011635588840afcdeae73ff1360b69230f | 4ea866e3c680c7b4b00ec834e51f46416bf3c421 | /atac_peak_annotation.r | f12ea015d385fa13e76f0ca7f56639b7e43343de | [] | no_license | rywet56/GenomicsPeakAnnotation | d92d9f1f22a90fdf2a98f43433ddb720852da536 | 253c103b43d677ab5f9c6a60828c592816c5dccd | refs/heads/master | 2023-07-03T07:20:58.575118 | 2021-08-04T09:32:41 | 2021-08-04T09:32:41 | 380,727,885 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 21,670 | r | atac_peak_annotation.r | library(GenomicRanges)
library(annotate)
library(org.At.tair.db)
############################################################
# Process GTF file and Add introns to GTF file
############################################################
# # Get GTF file
# ##############
# # Read in the raw gtf file and process it so that the columns make sense
# path <- "/Users/manuel/Downloads/Araport11_GTF_genes_transposons.Mar202021.gtf"
# gtf_df <- read.table(file = path)
# gtf_df$V9 <- NULL; gtf_df$V11 <- NULL; gtf_df$V12 <- NULL; gtf_df$V14 <- NULL
# col_names <- c("chr", "src", "feature", "start", "end", "score", "strand", "readingframe", "transcript_id", "gene_id")
# colnames(gtf_df) <- col_names
# ##############
#
# # Add introns to GTF DataFrame
# ##############
# gtf_introns_df <- rbind(gtf_df, intron_df)
# path <- '/Volumes/work_storg/xiaocai_project/domain_atac_seq/gtf_introns.csv'
# write.csv(x = gtf_introns_df, file = path)
# ##############
############################################################
# All functions for annotation
############################################################
find_tair <- function(tair_id, mtx){
# used to find whether any annotated column contains a gene and if so, which
# column(s) and row(s) are annotated with that gene
tair_cols <- grep(pattern = "tair", x = colnames(mtx))
tair_cols <- colnames(mtx)[tair_cols]
cols_idx_dic <- NULL
for(colname in tair_cols){
found <- mtx[, colname] %in% tair_id
if(sum(found) > 0){
found_idxs <- which(found == TRUE)
cols_idx_dic[[colname]] <- found_idxs
}
}
return(cols_idx_dic)
}
get_introns <- function(gtf_df){
transcripts <- unique(gtf_df$transcript_id)
intron_df <- NULL
for(transcript in transcripts){
d <- gtf_df[gtf_df$transcript_id == transcript,]
d <- d[d$feature == 'exon',]
no_introns <- (nrow(d)-1)
# test whether there are any exons and if there are at least one intron
if(dim(d)[1] > 0 & no_introns > 0){
row <- as.character(d[1,])
intron_mtx_temp <- NULL
for(i in 1:no_introns){
# create cp of dummy row
row_cp <- row
# fill row with intron information
row_cp[3] <- 'intron'
row_cp[4] <- d[i, "end"]
row_cp[5] <- d[i+1, "start"]
# add row to matrix of introns for transcript
intron_mtx_temp <- rbind(intron_mtx_temp, row_cp)
}
intron_df <- rbind(intron_df, intron_mtx_temp)
}
}
rownames(intron_df) <- (nrow(gtf_df) + 1):(nrow(gtf_df) + nrow(intron_df))
intron_df <- as.data.frame(intron_df)
colnames(intron_df) <- colnames(gtf_df)
return(intron_df)
}
get_genes <- function(gtf_df){
# select genomic features from GTF
genes <- gtf_df$feature == 'gene' | gtf_df$feature == 'transposable_element_gene'
feature_df <- gtf_df[genes, c('chr', 'start', 'end', 'strand', 'gene_id')]
# define feature summit
SUMMIT <- (feature_df$start + feature_df$end)/2
feature_df[,'SUMMIT'] <- SUMMIT
return(feature_df)
}
get_promoters <- function(gtf_df, prom_region=c(-1000, 200)){
# define TSSs
gtf_genes <- gtf_df[gtf_df$feature == 'gene', ]
pos_strand <- gtf_genes$strand == "+"
tss <- rep(0, length(pos_strand))
tss[pos_strand] <- (gtf_genes$start)[pos_strand]
tss[!pos_strand] <- (gtf_genes$end)[!pos_strand]
# define promoter regions
ps <- rep(0, length(tss)); pe <- rep(0, length(tss))
ps[pos_strand] <- tss[pos_strand] + prom_region[1]
pe[pos_strand] <- tss[pos_strand] + prom_region[2]
ps[!pos_strand] <- tss[!pos_strand] - prom_region[2]
pe[!pos_strand] <- tss[!pos_strand] - prom_region[1]
prom_df <- data.frame(tss=tss, start=ps, end=pe, strand=gtf_genes$strand, chr=gtf_genes$chr, tair=gtf_genes$gene_id)
# define summit
SUMMIT <- (prom_df$start + prom_df$end)/2
prom_df[,'SUMMIT'] <- SUMMIT
return(prom_df)
}
get_promoter_downstream <- function(gtf_df, prom_region=c(0, 1000)){
# define TES
gtf_genes <- gtf_df[gtf_df$feature == 'gene', ] # maybe replace with get_genes() and include transposable genes
pos_strand <- gtf_genes$strand == "+"
tes <- rep(0, length(pos_strand))
tes[pos_strand] <- (gtf_genes$end)[pos_strand]
tes[!pos_strand] <- (gtf_genes$start)[!pos_strand]
# define promoter regions
ps <- rep(0, length(tes)); pe <- rep(0, length(tes))
ps[pos_strand] <- tes[pos_strand] + prom_region[1]
pe[pos_strand] <- tes[pos_strand] + prom_region[2]
ps[!pos_strand] <- tes[!pos_strand] - prom_region[2]
pe[!pos_strand] <- tes[!pos_strand] - prom_region[1]
prom_df <- data.frame(tes=tes, start=ps, end=pe, strand=gtf_genes$strand, chr=gtf_genes$chr, tair=gtf_genes$gene_id)
# define summit
SUMMIT <- (prom_df$start + prom_df$end)/2
prom_df[,'SUMMIT'] <- SUMMIT
return(prom_df)
}
get_5UTR <- function(gtf_df){
# select genomic features from GTF
feature_df <- gtf_df[gtf_df$feature == "five_prime_UTR", c('chr', 'start', 'end', 'strand', 'gene_id')]
# define feature summit
SUMMIT <- (feature_df$start + feature_df$end)/2
feature_df[,'SUMMIT'] <- SUMMIT
return(feature_df)
}
get_3UTR <- function(gtf_df){
# select genomic features from GTF
feature_df <- gtf_df[gtf_df$feature == "three_prime_UTR", c('chr', 'start', 'end', 'strand', 'gene_id')]
# define feature summit
SUMMIT <- (feature_df$start + feature_df$end)/2
feature_df[,'SUMMIT'] <- SUMMIT
return(feature_df)
}
get_CDS <- function(gtf_df){
# select genomic features from GTF
feature_df <- gtf_df[gtf_df$feature == "CDS", c('chr', 'start', 'end', 'strand', 'gene_id')]
# define feature summit
SUMMIT <- (feature_df$start + feature_df$end)/2
feature_df[,'SUMMIT'] <- SUMMIT
return(feature_df)
}
get_Exon <- function(gtf_df){
# select genomic features from GTF
feature_df <- gtf_df[gtf_df$feature == "exon", c('chr', 'start', 'end', 'strand', 'gene_id')]
# define feature summit
SUMMIT <- (feature_df$start + feature_df$end)/2
feature_df[,'SUMMIT'] <- SUMMIT
return(feature_df)
}
get_Intron <- function(gtf_df){
# select genomic features from GTF
feature_df <- gtf_df[gtf_df$feature == "intron", c('chr', 'start', 'end', 'strand', 'gene_id')]
# define feature summit
SUMMIT <- (feature_df$start + feature_df$end)/2
feature_df[,'SUMMIT'] <- SUMMIT
return(feature_df)
}
get_lncRNA <- function(gtf_df){
# select genomic features from GTF
feature_df <- gtf_df[gtf_df$feature == "lnc_RNA", c('chr', 'start', 'end', 'strand', 'gene_id')]
# define feature summit
SUMMIT <- (feature_df$start + feature_df$end)/2
feature_df[,'SUMMIT'] <- SUMMIT
return(feature_df)
}
get_ncRNA <- function(gtf_df){
# select genomic features from GTF
feature_df <- gtf_df[gtf_df$feature == "nc_RNA", c('chr', 'start', 'end', 'strand', 'gene_id')]
# define feature summit
SUMMIT <- (feature_df$start + feature_df$end)/2
feature_df[,'SUMMIT'] <- SUMMIT
return(feature_df)
}
get_snoRNA <- function(gtf_df){
# select genomic features from GTF
feature_df <- gtf_df[gtf_df$feature == "snoRNA", c('chr', 'start', 'end', 'strand', 'gene_id')]
# define feature summit
SUMMIT <- (feature_df$start + feature_df$end)/2
feature_df[,'SUMMIT'] <- SUMMIT
return(feature_df)
}
get_miRNA <- function(gtf_df){
# select genomic features from GTF
feature_df <- gtf_df[gtf_df$feature == "miRNA", c('chr', 'start', 'end', 'strand', 'gene_id')]
# define feature summit
SUMMIT <- (feature_df$start + feature_df$end)/2
feature_df[,'SUMMIT'] <- SUMMIT
return(feature_df)
}
get_overlaps <- function(peaks_df, regions_df, genomic_feature, summit=TRUE,
unique_assignment=TRUE, next_gene_range = c(-2000, 2000),
add_tair=TRUE, add_target_index=FALSE){
# peaks_df <- peaks_df
# regions_df <- regions_df
# genomic_feature <- "prom"
# calculate peak summit
SUMMIT <- (peaks_df$start + peaks_df$end)/2
peaks_df[,'SUMMIT'] <- SUMMIT
SUMMIT <- (regions_df$start + regions_df$end)/2
regions_df[,'SUMMIT'] <- SUMMIT
# use peak summit to calculate overlap
if(summit){
peaks_df$start_save <- peaks_df$start
peaks_df$end_save <- peaks_df$end
peaks_df$start <- peaks_df$SUMMIT
peaks_df$end <- peaks_df$SUMMIT
}
if(genomic_feature == 'next_gene'){
# span region around peak summit
peaks_df$start <- peaks_df$SUMMIT + next_gene_range[1]
peaks_df$end <- peaks_df$SUMMIT + next_gene_range[2]
}
# transform to GR object
peaks_gr <- GenomicRanges::makeGRangesFromDataFrame(peaks_df)
regions_gr <- GenomicRanges::makeGRangesFromDataFrame(regions_df)
peaks_df$start <- round(peaks_df$start, 0)
peaks_df$end <- round(peaks_df$end, 0)
# regions_gr[1:5,]
# find overlaps of peaks with TSS regions
res <- IRanges::findOverlaps(query = peaks_gr, subject = regions_gr)
res <- data.frame(from=from(res), to=to(res))
# find unique assignment based on distance of peak summit and region summit
# res is a df which maps regions in peak_df to regions in regions_df
# one region in peak_df can be mapped to more than one region in regions_df
# this may make sense if one region is a promoter for two genes (regions in regions_df)
# in this case the annotated peaks_df will contain one more row
if(unique_assignment){
# find distance to middle of peak
ps <- peaks_df[res$from, 'SUMMIT']
rs <- regions_df[res$to, 'SUMMIT']
res[,'distance'] <- abs(ps-rs)
# keep peak summit that is closest to region summit if there are more than one query-target pairs
choosen_region <- NULL
for(i in unique(res$from)){
reg <- res[res$from == i, ]
min_row_number <- which.min(reg$distance)
min_row_name <- rownames(reg)[min_row_number]
choosen_region <- c(choosen_region, min_row_name)
}
# subset overlaps
res <- res[choosen_region, ]
}
# get regions for which no hits were found
rows_hit <- unique(res$from); rows_hit <- 1:nrow(peaks_df) %in% rows_hit
# get part of peaks_df for which no hits were found
pd_nohits <- peaks_df[!rows_hit,]
pd_nohits[, genomic_feature] <- rep(FALSE, nrow(pd_nohits))
# get part of peaks_df for which hits were found
pd_hits <- peaks_df[res$from,] # res$from could be replaced by rows_hit
pd_hits[, genomic_feature] <- rep(TRUE, nrow(pd_hits))
if(add_target_index){
print("Adding row index of target...")
# add row index of regions_df that was connected to peak in peaks_df
pd_hits[, "hit_row_idx"] <- res$to
pd_nohits[, "hit_row_idx"] <- -1
}
# if feature is promoter, add the genes that regions are promoter for
if(genomic_feature %in% c("promoter_1kb", "promoter_2kb", "promoter_3kb")){
gene_type <- paste0(genomic_feature, "_tair")
pd_nohits[, gene_type] <- rep("", nrow(pd_nohits))
pd_hits[, gene_type] <- regions_df[res$to, "tair"]
# add distance to TSS of gene
dist_name <- paste0('dist_tss_', genomic_feature)
rd <- regions_df[res$to, ]
strand <- rd[, "strand"]
strand <- replace(strand, strand=="+", 1)
strand <- replace(strand, strand=="-", -1)
strand <- as.numeric(strand)
pd_nohits[, dist_name] <- rep(NA, nrow(pd_nohits))
pd_hits[strand==1, dist_name] <- pd_hits[strand==1, 'SUMMIT'] - rd[strand==1, "tss"]
pd_hits[strand==-1, dist_name] <- rd[strand==-1, "tss"] - pd_hits[strand==-1, 'SUMMIT']
}
else if(genomic_feature == "promoter_1kb_downstream"){
gene_type <- paste0(genomic_feature, "_tair")
pd_nohits[, gene_type] <- rep("", nrow(pd_nohits))
pd_hits[, gene_type] <- regions_df[res$to, "tair"]
# add distance to TES of gene
dist_name <- paste0('dist_tes_', genomic_feature)
rd <- regions_df[res$to, ]
strand <- rd[, "strand"]
strand <- replace(strand, strand=="+", 1)
strand <- replace(strand, strand=="-", -1)
strand <- as.numeric(strand)
pd_nohits[, dist_name] <- rep(NA, nrow(pd_nohits))
pd_hits[strand==1, dist_name] <- pd_hits[strand==1, 'SUMMIT'] - rd[strand==1, "tes"]
pd_hits[strand==-1, dist_name] <- rd[strand==-1, "tes"] - pd_hits[strand==-1, 'SUMMIT']
}
# add gene name if next_gene is requested
else if(genomic_feature == 'next_gene'){
# add next closest gene
pd_nohits[, 'next_gene_tair'] <- rep("", nrow(pd_nohits))
pd_hits[, 'next_gene_tair'] <- regions_df[res$to, 'gene_id']
# add distance to TSS of next closest gene
dist_name <- paste0('dist_summit_', genomic_feature)
rd <- regions_df[res$to, ]
strand <- rd[, "strand"]
strand <- replace(strand, strand=="+", 1)
strand <- replace(strand, strand=="-", -1)
strand <- as.numeric(strand)
pd_nohits[, dist_name] <- rep(NA, nrow(pd_nohits))
pd_hits[strand==1, dist_name] <- pd_hits[strand==1, 'SUMMIT'] - rd[strand==1, 'SUMMIT']
pd_hits[strand==-1, dist_name] <- rd[strand==-1, 'SUMMIT'] - pd_hits[strand==-1, 'SUMMIT']
}
else {
if(add_tair & "gene_id" %in% colnames(regions_df)){
gene_type <- paste0(genomic_feature, "_tair")
pd_nohits[, gene_type] <- rep("", nrow(pd_nohits))
pd_hits[, gene_type] <- regions_df[res$to, "gene_id"]
}
}
# combine separated DataFrames
peaks_df <- as.data.frame(rbind(pd_nohits, pd_hits))
# put back original start and end of peak if summit was used
if(summit){
peaks_df$start <- peaks_df$start_save
peaks_df$end <- peaks_df$end_save
peaks_df$start_save <- NULL
peaks_df$end_save <- NULL
}
return(peaks_df)
}
annotate_peaks_internal <- function(peaks_df, gtf_df, genomic_feature, next_gene_range=c(-2000, 2000),
prom_1kb_region=c(-1000, 0), prom_2kb_region=c(-2000, -1000), prom_3kb_region=c(-3000, -2000),
prom_dowstream_region=c(0, 1000), unique_promoter_assignment=TRUE){
# Find whether peaks overlap with genomic features
if(genomic_feature == 'next_gene'){
regions_df <- get_genes(gtf_df = gtf_df)
anno_df <- get_overlaps(peaks_df = peaks_df, regions_df = regions_df, genomic_feature = genomic_feature,
next_gene_range = next_gene_range, summit = TRUE, unique_assignment = TRUE)
# force to use summit instead of range to find overlaps
# necessary, otherwise the way I defined finding next genes does not work
}
else if(genomic_feature == 'genic'){
regions_df <- get_genes(gtf_df = gtf_df)
anno_df <- get_overlaps(peaks_df = peaks_df, regions_df = regions_df, genomic_feature = genomic_feature)
}
else if(genomic_feature == 'promoter_1kb'){
regions_df <- get_promoters(gtf_df = gtf_df, prom_region = prom_1kb_region)
anno_df <- get_overlaps(peaks_df = peaks_df, regions_df = regions_df, genomic_feature = genomic_feature,
unique_assignment=unique_promoter_assignment, summit=TRUE)
}
else if(genomic_feature == 'promoter_2kb'){
regions_df <- get_promoters(gtf_df = gtf_df, prom_region = prom_2kb_region)
anno_df <- get_overlaps(peaks_df = peaks_df, regions_df = regions_df, genomic_feature = genomic_feature,
unique_assignment=unique_promoter_assignment, summit=TRUE)
}
else if(genomic_feature == 'promoter_3kb'){
regions_df <- get_promoters(gtf_df = gtf_df, prom_region = prom_3kb_region)
anno_df <- get_overlaps(peaks_df = peaks_df, regions_df = regions_df, genomic_feature = genomic_feature,
unique_assignment=unique_promoter_assignment, summit=TRUE)
}
else if(genomic_feature == 'promoter_1kb_downstream'){
regions_df <- get_promoter_downstream(gtf_df = gtf_df, prom_region=prom_dowstream_region)
anno_df <- get_overlaps(peaks_df = peaks_df, regions_df = regions_df, genomic_feature = genomic_feature,
unique_assignment=unique_promoter_assignment, summit=TRUE)
}
else if(genomic_feature == 'UTR5'){
regions_df <- get_5UTR(gtf_df = gtf_df)
anno_df <- get_overlaps(peaks_df = peaks_df, regions_df = regions_df, genomic_feature = genomic_feature)
}
else if(genomic_feature == 'UTR3'){
regions_df <- get_3UTR(gtf_df = gtf_df)
anno_df <- get_overlaps(peaks_df = peaks_df, regions_df = regions_df, genomic_feature = genomic_feature)
}
else if(genomic_feature == 'CDS'){
regions_df <- get_CDS(gtf_df = gtf_df)
anno_df <- get_overlaps(peaks_df = peaks_df, regions_df = regions_df, genomic_feature = genomic_feature)
}
else if(genomic_feature == 'Exon'){
regions_df <- get_Exon(gtf_df = gtf_df)
anno_df <- get_overlaps(peaks_df = peaks_df, regions_df = regions_df, genomic_feature = genomic_feature)
}
else if(genomic_feature == 'Intron'){
regions_df <- get_Intron(gtf_df = gtf_df)
anno_df <- get_overlaps(peaks_df = peaks_df, regions_df = regions_df, genomic_feature = genomic_feature)
}
else if(genomic_feature == 'lncRNA'){
regions_df <- get_lncRNA(gtf_df = gtf_df)
anno_df <- get_overlaps(peaks_df = peaks_df, regions_df = regions_df, genomic_feature = genomic_feature)
}
else if(genomic_feature == 'ncRNA'){
regions_df <- get_ncRNA(gtf_df = gtf_df)
anno_df <- get_overlaps(peaks_df = peaks_df, regions_df = regions_df, genomic_feature = genomic_feature)
}
else if(genomic_feature == 'snoRNA'){
regions_df <- get_snoRNA(gtf_df = gtf_df)
anno_df <- get_overlaps(peaks_df = peaks_df, regions_df = regions_df, genomic_feature = genomic_feature)
}
else if(genomic_feature == 'miRNA'){
regions_df <- get_miRNA(gtf_df = gtf_df)
anno_df <- get_overlaps(peaks_df = peaks_df, regions_df = regions_df, genomic_feature = genomic_feature)
}
return(anno_df)
}
annotate_peaks <- function(peaks_df, gtf_df, summit=TRUE, unique_assignment=TRUE, annotation_priority=NULL,
next_gene=TRUE, next_gene_range = c(2000, 2000),
prom_1kb_region=c(-1000, 0), prom_2kb_region=c(-2000, -1000), prom_3kb_region=c(-3000, -2000),
prom_dowstream_region=c(0, 1000), translate_tair=TRUE,
unique_promoter_assignment=FALSE){
# set annotation_priority if user does not provide input
if(is.null(annotation_priority)){
annotation_priority <- c('promoter_1kb', 'promoter_2kb', 'promoter_3kb', 'promoter_1kb_downstream',
'UTR5', 'Intron', 'Exon', 'UTR3', 'CDS',
'intergenic', 'lncRNA', 'snoRNA', 'miRNA', 'genic')
}
if('intergenic' %in% annotation_priority){
# remove the feature intergenic since there is no function finding it and keeping it leads to error
# it will be added at the end
index <- which(annotation_priority == 'intergenic')
an_pr <- annotation_priority[-index]
}else{
an_pr <- annotation_priority
}
# go through all features and test whether peaks belong to any of those features
message("Annotating peaks with selected genomic features ...")
for(feature in an_pr){
message(paste0("|-- ", feature))
peaks_df <- annotate_peaks_internal(peaks_df = peaks_df, gtf_df = gtf_df,
genomic_feature = feature,
prom_1kb_region=prom_1kb_region, prom_2kb_region=prom_2kb_region, prom_3kb_region=prom_3kb_region,
prom_dowstream_region=prom_dowstream_region,
unique_promoter_assignment = unique_promoter_assignment)
if(translate_tair){
# translate tair IDs
gene_type_tair <- paste0(feature, "_tair")
gene_type_symbol <- paste0(feature, "_symb")
peaks_df[, gene_type_symbol] <- tair_to_symbol(peaks_df[, gene_type_tair])
}
}
if(next_gene){
message("|-- next_gene")
peaks_df <- annotate_peaks_internal(peaks_df = peaks_df, gtf_df = gtf_df, genomic_feature = 'next_gene',
next_gene_range = next_gene_range)
if(translate_tair){
# translate tair IDs of closest genes if user requested finding of closest genes to peaks
peaks_df$next_gene_symbol <- tair_to_symbol(peaks_df$next_gene_tair)
}
}
# classify all regions that have not been annotated as 'intergenic' (if the user wants to classify intergenic regions)
if('intergenic' %in% annotation_priority){
message("|-- intergenic")
intergenic_regions <- rowSums(peaks_df[, an_pr]) == 0
peaks_df[, 'intergenic'] <- intergenic_regions
# adding ID of closest gene
peaks_df[, 'intergenic_tair'] <- rep("", nrow(peaks_df))
peaks_df[, 'intergenic_symbol'] <- rep("", nrow(peaks_df))
if(sum(peaks_df$intergenic) > 0){ # only attempt to add tair and symbol of there are intergeneic regions
peaks_df[intergenic_regions, 'intergenic_tair'] <- peaks_df[intergenic_regions, 'next_gene_tair']
if(translate_tair){
peaks_df[intergenic_regions, 'intergenic_symbol'] <- peaks_df[intergenic_regions, 'next_gene_symbol']
}
}
}
# find final annotation for each feature
message("|-- Final Annotation")
annotation_priority_rev <- base::rev(annotation_priority)
final_annotation <- rep("", nrow(peaks_df))
for(feature in annotation_priority_rev){
final_annotation[peaks_df[, feature]] <- feature
}
peaks_df[,'annotation'] <- final_annotation
message("... Genomic Feature Annotation Complete")
return(peaks_df)
}
tair_to_symbol <- function(genes){
symbols <- NULL
for(gene in genes){
k <- annotate::getSYMBOL(gene, data='org.At.tair.db')
symb <- as.character(k[length(k)])
if(is.na(symb)){symbols <- c(symbols, '')}
else{symbols <- c(symbols, symb)}
}
symbols <- toupper(symbols)
return(symbols)
}
|
08a041e5bdc8cc229b3bcbad5b8e30a5869b40f7 | 163994117d94800e0723b64d51e3e2f9f1616ff7 | /R/makeZ.R | 240f70487e7c7fb04a4b701ffa8112ca2ff61806 | [
"MIT"
] | permissive | harvard-P01/medBSA | 04b4e99c741b56d8fcea72be1eaa3d87fa6e34b7 | 0d8c4f9c5c2622fb474033a9844f673c499dc6dc | refs/heads/master | 2021-01-19T20:34:24.572765 | 2015-10-12T18:29:44 | 2015-10-12T18:29:44 | 46,285,644 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,191 | r | makeZ.R | #' Generate Z variables (confounders) for simulation
#'
#' This function generates a dataset of size ss with the Z parameters located
#' in the params[[Z]] list. The Z's are created successively, so the Z1 depends
#' on nothing, while Z2 can depend on Z1, and so on. The dimension of Z is
#' determined by Zp. When Zp > 1, params[["Z"]] should be list of lists containing
#' parameter vectors.
#'
#' @return A vector (if Zp = 1) or matrix (Zp > 1) of Z variables
#' @seealso genVar
#'
#' @examples
#' ##Variables that need to exist in environment
#' ss = 10
#' Zp = 2
#' fam = list(Z = list(binomial(link="logit"), binomial(link="logit")))
#' params = list(Z = list( c(0), c(0,1) ) )
#' ##Make a Z matrix
#' makeZ()
#'
makeZ <- function(){
#Start design matrix for Z
Xmat = matrix(1, nrow=ss, ncol=1)
#Make
for (i in 1:Zp){
Zt = genVar(fam$Z[[i]], Xmat, params$Z[[i]])
Xmat = cbind(Xmat, Zt)
}
#Return a full matrix of Z at the end
Z = Xmat[,-1]
if (Zp == 1) { Z = as.matrix(Z, ncol=1, nrow=ss) }
if (exists("Znames")){
colnames(Z) = Znames
} else {
colnames(Z) = paste0("Z",1:Zp)
}
return(Z)
}
|
a867ebc6001037e44651f19c1fcee7e69fbb3671 | 3a04468a8440fe5d8d2c8665334b654a63f1af74 | /3_r_DataStructures.R | 55ae1ea6b5a0158d74170cdafcbde9da12274d23 | [] | no_license | sukesh-reddy/rTutorials | b6c6e76538d8b66c897fb61a6d50c4ba497cf000 | cd6fd4e87d1c311361b13ef0c24379d45bb71384 | refs/heads/master | 2021-05-17T19:02:53.523434 | 2020-04-12T07:58:25 | 2020-04-12T07:58:25 | 250,930,313 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,040 | r | 3_r_DataStructures.R | # ##################################################
# # R - Data Strucutres
# # Vectors, matrices, arrays, data frames , list
# ##################################################
#
# ########## Vector ##############
# a <- c(1,2,3,45)
# a <- c(1:5)
#
# #acessing elements
# a[1]
# a[c(1,3)]
#
# # matrices ############
# a <- matrix(1:4,nrow = 2, ncol=2,byrow = T)
#
# # Acessing elements
# a[1,]
# a[,2]
# a[1,2]
#
# ############ array ##############
# a <- array(1:27,dim = c(3,3,3))
#
# #Accessing elements
# a[1,1,1]
#
# ########## data frame#############
# num <- 1:3
# char <- c('q','w','r')
# logi <- c(T,F,T)
#
# df <- data.frame(num,char,logi)
#
# #inbuilt dataframe
# mtcars
#
# ----------------------------------------- function used on top of a data frame
# head(mtcars,4) - to see first several rows
# tail(mtcars,3) - to see last several rows
# str(mtcars) - return structure of a data set
# mtcars[1:2,1:4]
# summary(mtcars) - it gives descriptive stats
# mtcars$mpg - access a particular
# mtcars[,-3] - drops a column
# mtcars[,-c(1,2)] - drops a column
# subset(mtcars,hp>50) - subseeting with condition
# mtcars[mtcars$hp>50,] - subsetting with conition
# df1 <- mtcars[1:20,]
# df2 <- mtcars[21;31,]
# rbind(d1,df2) - combines two data frames with same number of coulmns and result will increase in row length(
# cbind() - combines a data frame with column wise, result will increase in column length
#
# ########### factor ############
# # in a data frame a character vector is automatically cnverted into factors and number of levels will define by that
# mtcars$cyl <- as.factor(mtcars$cyl)
#
# gender_vector <- as.factor(c('Male',"Female",'Male','Male','male','female','male','Female','female'))
# levels(gender_vector) <- c('Male','Female','male','female')
#
#
# #################### List #########################
# vect <- 1:3
# my_mat <- matrix(1:9, nrow = 3, ncol = 3)
# my_df <- mtcars[1:3,1:3]
# my_list <- list(vect,my_mat,my_df)
# my_list
# names(my_list) <- c('vect','matric','dataframe')
|
cd2d889e16b45005b0b3b5b72093932577946120 | 2b7066f30b070930a3e325f9a2f3abbf774da88c | /man/print.roll.Rd | 2eea8849cfb0138cee87a1e9b168cf0129f0d224 | [] | no_license | clagett/dieroller | cc1e896f648af366669bbf31fa26c263fb12e40a | 21f0c2af629d09753262637e77a2668675b89d09 | refs/heads/master | 2020-03-14T01:30:20.126938 | 2018-04-28T06:15:30 | 2018-04-28T06:15:30 | 131,378,845 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 235 | rd | print.roll.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rolls.R
\name{print.roll}
\alias{print.roll}
\title{print.roll}
\usage{
\method{print}{roll}(x, ...)
}
\description{
print method for objects of type roll
}
|
93121289e1a6707aff79673666da769428165092 | 05c8ef8706b84a0c4abd5886cf98b246495fed36 | /TP Final/codigo.R | 91bd0fb89ab6a86d13c30d6e883443e470d5dd6b | [] | no_license | Tomasfdel/Trabajos-DM | dbc53ff4bd544b5f09da2b7ed9209c552a33855e | bbf40027d7853aa0f187d7453d5c216be027982a | refs/heads/main | 2023-02-27T17:02:19.241945 | 2021-01-31T07:15:31 | 2021-01-31T07:15:31 | 334,008,370 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,590 | r | codigo.R | #~ CARGA DE DATOS
dataset = read.csv("nasa.csv")
#~ PRETRATAMIENDO DE DATOS
#~ Eliminación de columnas con información ya presente pero con otra unidad de medición.
dataset = dataset[,-(6:11)]
dataset = dataset[,-(9:10)]
dataset = dataset[,-c(9, 11, 12)]
#~ Eliminación de columnas con valores constantes.
dataset = dataset[,-c(10, 28)]
#~ Eliminación del Diámetro Mínimo Estimado.
dataset = dataset[,-4]
#~ Eliminación de la Fecha de Determinación de Órbita.
dataset = dataset[,-10]
#~ Eliminación de la Fecha de Aproximación.
dataset = dataset[,-5]
#~ Eliminación de nombres e IDs.
dataset = dataset[,-(1:2)]
#~ Escalado de datos.
dataset[,1:21] = scale(dataset[,1:21])
#~ Conversión del predictor de bools a enteros.
dataset[,22] = as.integer(dataset[,22])
#~ ANÁLISIS DE VARIABLES RELEVANTES
source("Util/Filtro.R")
filter_result = non.parametric.filter(dataset[,1:21], dataset[, 22])
#~ Dejamos en el dataset los ocho campos más relevantes según el filtro, más el clasificador.
dataset = dataset[,c(filter_result$ordered.features.list[1:8], 22)]
#~ BÚSQUEDA DE CLUSTERS
#~ Cálculo de GAP y determinación de la cantidad sugerida de clusters.
source("Util/Clustering.R")
gapResult = gapStatistic(dataset[,-9], 1, 10, 500)
gapResult$suggestedClusterAmount
#~ Ejecución de K-Means.
kmeansResult = kmeans(dataset[,-9],cent=4)
#~ Distribución por cluster de los asteroides peligrosos y no peligrosos.
table(kmeansResult$cluster[dataset[,9] == 1])
table(kmeansResult$cluster[dataset[,9] == 2])
#~ CLASIFICACIÓN
source("Util/Clasificacion.R")
runRandomForest()
|
908dd9607399891401876bd29c1b3aed6f79305b | 9c173a9342104e6779fea737e4ebe3b5e066098a | /7 - Regression Models/Quiz 2/Question 7.R | 720425a6d1c046f115bb38d21aef5db19c1fccb6 | [] | no_license | xush65/Data-Scientist-MOOC | 639fa7ad291ae4607eb361099d74f7a36bd78322 | f91e8f71ce63d82242edfd4c36520211261a01d3 | refs/heads/master | 2021-05-30T09:42:05.456437 | 2014-12-17T10:41:49 | 2014-12-17T10:41:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 520 | r | Question 7.R | data(mtcars)
# Consider again the mtcars data set and a linear regression model with mpg as predicted by weight (in 1,000 lbs).
# A "short" ton is defined as 2,000 lbs.
# Construct a 95% confidence interval for the expected change in mpg per 1 short ton increase in weight.
# Give the lower endpoint.
# to get an increase of 1 short tonne, divide by 2
wt <- (1/2) * mtcars$wt
mpg <- mtcars$mpg
fit <- lm(mpg ~ wt)
sumCoef <- summary(fit)$coefficients
sumCoef[2,1] + c(-1, 1) * qt(.95, df = fit$df) * sumCoef[2, 2]
|
3d145138534c89e053aa22e94193e8ec2882b140 | 5db2cd0763446283caeaaf5a619be1685bfc3268 | /scripts/div_functions.R | 00313ce4b6658e687cf63694e6d04e2e8e38011e | [] | no_license | MoBiodiv/div_gradient | e8eb6c54d46476ee476222e05fa6708c368371ca | 1f754653e78b66b9457ddaa373fdfd20b9f847b5 | refs/heads/master | 2020-12-23T00:17:21.150961 | 2016-08-08T09:05:41 | 2016-08-08T09:05:41 | 38,508,209 | 0 | 2 | null | 2016-07-12T13:30:47 | 2015-07-03T19:45:47 | R | UTF-8 | R | false | false | 2,787 | r | div_functions.R |
stems2comm = function(spids, stem_coords, plot_size, domain,
abu=NULL, rm_absent_sp=TRUE)
{
## Convert a mapped stem dataset into a community data matrix.
## Output:
## A community matrix where each row is a differnet pixel on a grid.
## Arguments:
## spids: an integer or string specifying species identities
## stem_stem_coords : two column matrix (x,y) specifying the spatial coordinates of each stem
## plot_size : the x and the y size of each sub-plot.
## domain : specifies the spatial domain of the area: (xmin, xmax, ymin, ymax)
## abu: abundance associated with each record, if NULL then it is set to 1
## individual per record
## grainSuffix : if supplied the grain column will have this appended to it
## so that it is clear what community this corresponds with
## rm_absent_sp: boolean that defaults to TRUE to remove species
## who no longer occur in the site x species matrix after subsetting base
## on the defined spatial domain (i.e., argument 'domain' specifies a smaller
## area than spatial coordiantes are provided for)
xdiff = abs(domain[2] - domain[1])
ydiff = abs(domain[4] - domain[3])
xlength = plot_size[1]
ylength = plot_size[2]
n_plots = (xdiff / xlength) * (ydiff / ylength)
if (n_plots != round(n_plots))
stop('number of plots that study site can be devided into is not an integer please chose a new plot_size')
S = length(unique(spids))
comm = matrix(NA, nrow=n_plots, ncol=S)
spat = matrix(NA, nrow=n_plots, ncol=2)
irow = 1
xbreaks = seq(domain[1], domain[2], xlength)
ybreaks = seq(domain[3], domain[4], ylength)
for (i in 1:(length(xbreaks) - 1)) {
for (j in 1:(length(ybreaks) - 1)) {
spat[irow, ] = c(xbreaks[i], ybreaks[j])
if (i == length(xbreaks) - 1)
xbreaks[i + 1] = xbreaks[i + 1] + 0.01
if (j == length(ybreaks) - 1)
ybreaks[j + 1] = ybreaks[j + 1] + 0.01
in_plot = xbreaks[i] <= stem_coords[ , 1] &
stem_coords[ , 1] < xbreaks[i + 1] &
ybreaks[j] <= stem_coords[ , 2] &
stem_coords[ , 2] < ybreaks[j + 1]
if (is.null(abu) ){
comm[irow, ] = as.integer(table(c(spids[in_plot], 1:S)) - 1)
}
else {
comm[irow, ] = as.integer(table(c(unlist(mapply(
rep, spids[in_plot], abu[in_plot])), 1:S)) - 1)
}
irow = irow + 1
}
}
if (rm_absent_sp) {
cols_to_rm = which(apply(comm, 2, function(x) all(x == '0')))
if (length(cols_to_rm) > 0)
comm = comm[ , -cols_to_rm]
}
params = data.frame(grain=prod(plot_size), extent=xdiff*ydiff,
n_plots)
out = list(params=params, comm=comm, spat=spat)
return(out)
}
|
d9bcf9876b27a5866bcbbd04a769ab6a8f3d06ba | 0eb22bf570e6a3f079b4e561568627c4f1fa815b | /man/equacoes.Rd | 77b7050d682684e16630ffc711a48a88be8bb4dc | [
"MIT"
] | permissive | lfpdroubi/appraiseR | 7e6795d3287dfeb8bcc50e38d33ae6c7bafdf5ca | 8ed726beb34c7e16005abda21582ebb5e46c5169 | refs/heads/master | 2022-05-26T20:43:45.722451 | 2022-03-19T16:00:20 | 2022-03-19T16:00:20 | 211,920,843 | 3 | 1 | NOASSERTION | 2020-10-15T11:49:17 | 2019-09-30T17:54:13 | R | UTF-8 | R | false | true | 903 | rd | equacoes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{equacoes}
\alias{equacoes}
\title{Regression equation}
\usage{
equacoes(
object,
type = c("reg", "est"),
inline = TRUE,
func,
accuracy = 100,
f = round,
errorTerm = TRUE
)
}
\arguments{
\item{object}{object of class \code{lm}}
\item{type}{the equation type required: regression (reg) or estimation (est).}
\item{inline}{the equation mode. TRUE for inline equations or FALSE for
displayed mode.}
\item{func}{transformation applied to dependent variable.}
\item{accuracy}{number to round to; for POSIXct objects, a number of seconds}
\item{f}{rounding function: floor, ceiling or round}
}
\description{
Givens a \link{lm} object, returns its regression equation
}
\examples{
dados <- st_drop_geometry(centro_2015)
fit <- lm(log(valor) ~ ., dados)
equacoes(fit)
equacoes(fit, precision = 1)
}
|
99f750124110007c8e01eb2921474d0d0ea1cca8 | ea7f5f5fd4d9be8ba9e772f6cd087513861dd61b | /tests/testthat/tests.R | f43864c1fb7301a99e73fcd54d27385dab66a4da | [] | no_license | blauwers/rEarthQuakes | 38e3d31efb7280ea1c24296d4e9f40cf69231299 | 36775dd00d7fd0048e42515b863530ce46ff9d1f | refs/heads/master | 2021-07-08T00:44:20.177836 | 2017-10-05T14:23:53 | 2017-10-05T14:23:53 | 105,855,990 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,216 | r | tests.R | context("Test every function in the package")
data <- readr::read_delim("https://www.ngdc.noaa.gov/nndc/struts/results?type_0=Exact&query_0=$ID&t=101650&s=13&d=189&dfn=signif.txt", delim = "\t")
# eq_clean_data is supposed to return a data frame
test_that("eq_clean_data returns data frame", {
expect_is(eq_clean_data(data), "data.frame")
})
# Date column is supposed to be added and of type Date
test_that("eq_clean_data$DATE is Date type", {
expect_is(eq_clean_data(data)$DATE, "Date")
})
# Latitude and Longitude are supposed to be numeric
test_that("eq_clean_data returns numeric coordinates", {
expect_is(eq_clean_data(data)$LATITUDE, "numeric")
expect_is(eq_clean_data(data)$LONGITUDE, "numeric")
})
# eq_location_clean returns a data frame
test_that("eq_location_clean returns a data frame", {
expect_is(eq_location_clean(data), "data.frame")
})
# geom_timeline returns a ggplot object
test_that("geom_timeline returns ggplot object", {
myplot <- data %>% eq_clean_data() %>%
dplyr::filter(COUNTRY %in% c("USA", "CHINA"), YEAR > 2010) %>%
ggplot2::ggplot(ggplot2::aes(x = DATE,
y = COUNTRY,
color = as.numeric(TOTAL_DEATHS),
size = as.numeric(EQ_PRIMARY)
)) +
geom_timeline()
expect_is(myplot, "ggplot")
})
# geom_timeline_label returns a ggplot object
test_that("geom_timeline_label returns ggplot object", {
myplot <- data %>% eq_clean_data() %>%
dplyr::filter(COUNTRY %in% c("USA", "CHINA"), YEAR > 2010) %>%
ggplot2::ggplot(ggplot2::aes(x = DATE,
y = COUNTRY,
color = as.numeric(TOTAL_DEATHS),
size = as.numeric(EQ_PRIMARY)
)) +
geom_timeline_label(aes(label = LOCATION_NAME))
expect_is(myplot, "ggplot")
})
# theme_timeline returns a ggplot object
test_that("theme_timeline returns ggplot object", {
myplot <- data %>% eq_clean_data() %>%
dplyr::filter(COUNTRY %in% c("USA", "CHINA"), YEAR > 2010) %>%
ggplot2::ggplot(ggplot2::aes(x = DATE,
y = COUNTRY,
color = as.numeric(TOTAL_DEATHS),
size = as.numeric(EQ_PRIMARY)
)) +
theme_timeline()
expect_is(myplot, "ggplot")
})
# eq_map returns a ggplot object
test_that("eq_map returns leaflet object", {
mymap <- data %>%
eq_clean_data() %>%
dplyr::filter(COUNTRY == "USA" & lubridate::year(DATE) >= 2010) %>%
dplyr::mutate(popup_text = eq_create_label(.)) %>%
eq_map(annotation = "popup_text")
expect_is(mymap, "leaflet")
})
# eq_create_label returns a character vector
test_that("eq_create_label returns character vector", {
expect_is(eq_create_label(data), "character")
})
|
f4969cee09277b4f28eb50cbf7cc98c3662de3fa | 7e2550b3c6c8e0c05c9a2f0c530a28255ff5be2b | /MSDABridgeCoursework/Week_5_Final/R_Scratchpad.R | 5183e6fa670f37e431aa8b2714194cc23630c336 | [] | no_license | ChristopheHunt/MSDA---Coursework | 48ed46f521bc16f398162cbd37832e60fdb7d30a | 2cf45ed9f39398c44150d0de30f0733a175db959 | refs/heads/master | 2021-01-17T01:05:24.376315 | 2018-02-18T13:30:28 | 2018-02-18T13:30:28 | 51,212,211 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 294 | r | R_Scratchpad.R | library(babynames)
x <- data.frame(babynames)
x.sub1 <- subset(x, year > 2010 & prop > 2.366e-05)
x.sub1
hist(x.sub1$prop)
boxplot(x.sub1$n)
plot(x = x.sub1$n, y = x.sub1$prop)
str(x.sub1)
summary(x)
head(x)
hist(x$prop)
histogram
boxplot
scatterplot
table(x.sub1$name)
summary(x.sub1$)
|
32d3777f24adef0128d10d858f77e9c8f45f71ff | e1a6c389bf0b04ada8edcc387543b9b4bcc17697 | /PA_BotsWithIndiv.R | 5bae27ed704127caa0a02a7d2efde49962a87469 | [] | no_license | kurtawirth/dissertation | 2953672d1180ebfb31a393fd1848ad8db75d4b6b | a2c8213bf3a3788a363b9291c4354ba404820a64 | refs/heads/master | 2022-11-17T16:16:17.712291 | 2020-07-19T20:55:50 | 2020-07-19T20:55:50 | 280,940,835 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,247 | r | PA_BotsWithIndiv.R | library(tidyverse)
library(jsonlite)
library(sentimentr)
#Change name of file as needed
file = "D:/Users/Kurt/Documents/Research/Dissertation/Data/tweets/01172020/economy01172020.json"
#Import file as a dataframe, limit to first 2000 tweets
current_day = fromJSON(file) %>% as.data.frame()
current_day = current_day[1:2000,]
#Select only the handles and the text of each tweet
handles_text = current_day %>% select(5, 6)
#Identify separate sentences in each tweet
tweet_text_sentences = get_sentences(handles_text$tweets.text)
#Score the sentiment of each sentence and then average them together to form a single sentiment score for each tweet
current_day_sentiment_scores = sentiment_by(tweet_text_sentences) %>%
mutate(tweets.screen_name = handles_text$tweets.screen_name) %>%
select(-element_id)
#Reorder columns for readability
current_day_sentiment_scores = current_day_sentiment_scores[,c(4,3,2,1)]
#Calculate sentiment for the day's individuals
sentiment_by_individual = current_day_sentiment_scores %>% group_by(tweets.screen_name) %>%
summarise(individual_sentiment = mean(ave_sentiment))
current_day_individual_sentiment = mean(sentiment_by_individual$individual_sentiment)
current_day_individual_sentiment
|
540eb50ef34f734d1e9b848d2715d0fd8a0fff65 | 794a02c3d58e497bcf6d6aad8531ab2d3cdbb3e0 | /apps/China_census/controllers/bak/house.R | 5888e1d7646c713d01526e07895c1a9b2e329a67 | [] | no_license | GL-Li/shiny-server | b154ef57afd0ebc75f186def9188184fde567abf | 8b9a4c953d8666da126df7ee712c9d31b3067a9f | refs/heads/master | 2020-12-10T01:33:11.364204 | 2020-01-13T02:04:50 | 2020-01-13T02:04:50 | 233,470,768 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,932 | r | house.R | # click on a map to get distribution ==========================================
# get reactive variables from the input
roomDistr <- reactive(get(paste0("houseDistr", input$chooseWhere))$roomDistr )
plotRoomDistr <- reactive(get(paste0("houseDistr", input$chooseWhere))$plotRoomDistr)
roomPopularMap <- reactive(get(paste0("houseDistr", input$chooseWhere))$roomPopularMap)
where <- reactive(tolower(input$chooseWhere))
# start up plot of popular room numbers on China map
output$map <- renderPlot(
height = function() {
0.8 * session$clientData$output_map_width
},
expr = {
ggplot(data = roomPopularMap(), aes(x = long, y = lat)) +
geom_polygon(aes(group = group, fill = factor(popular)), color = "black") +
ggtitle(paste0("most popular: ", where())) +
scale_fill_manual(values = c("1" = "#5858FA", "2" = "#58FAF4",
"3" = "#58FA58", "4" = "#F4FA58")) +
theme(legend.position = c(0.92, 0.2),
legend.text=element_text(size=11),
legend.title=element_blank()) +
myThemeAxis +
ylim(c(15, 55))
})
# start up plot of distribution, cannot fix coor ratio as not Cartisan
output$distr <- renderPlot(
height = function() {
0.8 * session$clientData$output_distr_width
},
expr = {
ggplot(plotRoomDistr(), aes(num_rooms, percent)) +
geom_line(aes(group = Chinese), alpha = 0.3) +
geom_point(aes(group = Chinese), alpha = 0.3) +
ggtitle(paste0("distribution: all provinces -- ", where())) +
xlab ("Number of Rooms") +
ylab("Percent of Household (%)") +
theme(axis.text.x = element_text(angle = 18, hjust = 1)) +
myThemeAxis
})
# update when clicked
observeEvent(input$clickMap, {
# force x and y NOT reactive, otherwise related variables disappear when
# unclicked, input$clickMap$x and input$clickMap$y are always reactive
x <- input$clickMap$x
y = input$clickMap$y
clicked_prov <- which_province(map, x, y, "province")
# these two are two close to seperate, but we only need one
if (clicked_prov == "Hong Kong") clicked_prov <- "Guangdong"
# back to room data, no data for Taiwan
if (clicked_prov != "Taiwan") {
room_prov <- as.numeric(roomDistr()[roomDistr()$province == clicked_prov, 2:12])
} else {
room_prov <- c(1,rep(0, 10))
# names(roomProv) <- names(rooms[3:12])
}
room_DF <- data.frame(num_room = factor(names(roomDistr()[3:12]), ordered = TRUE,
levels = names(roomDistr()[3:12])),
household = room_prov[2:11] / room_prov[1] )
# for geom_label( )
if (clicked_prov != "") {
labelDf <- data.frame(long = x, lat = y, text = clicked_prov)
} else {
labelDf <- data.frame(long = x, lat = y, text = NA)
}
output$map <- renderPlot(
height = function() {
0.8 * session$clientData$output_map_width
},
expr = {
ggplot(data = roomPopularMap(), aes(x = long, y = lat)) +
geom_polygon(aes(group = group, fill = factor(popular)), color = "black") +
ggtitle(paste0("most popular: ", where())) +
scale_fill_manual(values = c("1" = "#5858FA", "2" = "#58FAF4",
"3" = "#58FA58", "4" = "#F4FA58")) +
theme(legend.position = c(0.92, 0.2),
legend.text=element_text(size=11),
legend.title=element_blank()) +
myThemeAxis +
ylim(c(15, 55)) +
# geom_polygon(data = map[map$province == clicked_prov,],
# aes(long, lat, group = group), fill = "orange") +
geom_path(data = map[map$province == clicked_prov,],
aes(long, lat, group = group), color = "orange", size = 1) +
geom_label(data = labelDf, aes(long, lat, label = text),
alpha = 0.5, na.rm = TRUE, color = "red")
# annotate("text", x = x, y = y, label = clicked_prov, color = "red")
})
selected_prov <- plotRoomDistr()[plotRoomDistr()$province == clicked_prov,]
output$distr <- renderPlot(
height = function() {
0.8 * session$clientData$output_distr_width
},
expr = {
ggplot(plotRoomDistr(), aes(num_rooms, percent)) +
geom_line(aes(group = province), alpha = 0.3) +
geom_point(aes(group = province), alpha = 0.3) +
ggtitle(paste0("distribution: ", clicked_prov, " -- ", where())) +
# theme(plot.title = element_text(color = "red")) +
xlab ("Number of Rooms") +
ylab("Percent of Household (%)") +
geom_line(data = selected_prov, aes(num_rooms, percent, group = province), color = "red") +
geom_point(data = selected_prov, aes(num_rooms, percent, group = province), color = "red") +
theme(axis.text.x = element_text(angle = 18, hjust = 1)) +
myThemeAxis
})
})
# average living area per head of all, city, town, and villiage ================
# map_title <- reactive(input$chooseWhere)
houseMapWhere <- reactive(get(paste0("houseMap", input$chooseWhere)))
output$avgLivingArea <- renderPlot(
height = function() {
0.8 * session$clientData$output_avgLivingArea_width
},
expr = {
ggplot(houseMapWhere(), aes(long, lat)) +
geom_polygon(aes(group = group, fill = area_per_head), color = "black") +
scale_fill_continuous(limit = c(18, 50), low = "white", high = "red") +
ggtitle(where()) +
theme(legend.position = c(0.905, 0.25),
legend.text=element_text(size=11),
legend.title=element_blank()) +
myThemeAxis +
ylim(c(15, 55))
})
# mouse hover over
observeEvent(input$hoverAvgLivingArea, {
# force x and y NOT reactive, otherwise related variables disappear when
# unclicked, input$clickMap$x and input$clickMap$y are always reactive
x <- input$hoverAvgLivingArea$x
y = input$hoverAvgLivingArea$y
hoveredProv <- which_province(map, x, y, "province")
# these two are two close to seperate, but we only need one
if (hoveredProv == "Hong Kong") hoveredProv <- "Guangdong"
annotateNumber <- unique(houseMapWhere()$area_per_head[houseMapWhere()$province == hoveredProv])[1]
if (hoveredProv != "") {
annotateText <- paste0(gsub(" ", "~", hoveredProv), ":~", annotateNumber, "~m^2")
if (hoveredProv == "Taiwan") {
annotateText <- "Taiwan:~no~data"
}
} else {
# prepare for na.rm = TRUE in geom_label()
annotateText <- NA
}
print(annotateText)
# create data frame for geom_label
labelDf <- data.frame(long = x, lat = y, text = annotateText)
output$avgLivingArea <- renderPlot(
height = function() {
0.8 * session$clientData$output_avgLivingArea_width
},
expr = {
ggplot(houseMapWhere(), aes(long, lat)) +
geom_polygon(aes(group = group, fill = area_per_head), color = "black") +
scale_fill_continuous(limit = c(18, 50), low = "white", high = "red") +
ggtitle(where()) +
theme(legend.position = c(0.905, 0.25),
legend.text=element_text(size=11),
legend.title=element_blank()) +
myThemeAxis +
ylim(c(15, 55)) +
geom_path(data = map[map$province == hoveredProv,],
aes(long, lat, group = group), color = "yellow", size = 1) +
geom_label(data = labelDf, aes(long, lat, label = text),
alpha = 0.7, vjust = 2, na.rm = TRUE, parse = TRUE)
# annotate("text", x = x, y = y, label = annotateText, color = "black")
})
})
# dependence of building area per person on education =========================
plotHouseEdu <- ggplot(houseEduCombined, aes(education, area_per_person)) +
geom_line(aes(group = where, color = where), alpha = 0.6) +
geom_point(aes(size = number_of_household, color = where), alpha = 0.6) +
guides(color = "legend", size = FALSE) +
scale_size_area(max_size = 20) +
expand_limits(y = c(25.1, 43.2)) +
# do not use limits = c(26, 43) below. click returns nothing out of the range
scale_y_continuous(breaks = seq(26, 43, 2)) +
scale_color_manual(values = c("city" = "blue", "town" = "green",
"villiage" = "red")) +
theme(legend.position = c(0.9, 0.15),
legend.text=element_text(size=11),
legend.title=element_blank()) +
theme(axis.title = element_text(size = 16),
axis.text.y = element_text(color = "black", size = 12),
axis.text.x = element_text(angle = 18, hjust = 1,
color = "black", size = 12)) +
xlab("Final Education") +
ylab(expression(Area~per~Person~(m^2))) +
# labs(list(x = "Final Education", y = expression("Area per Person (m^2)"))) +
ggtitle("dependence on education: city, town, and villiage")
#startup plot
output$areaPerPerson <- renderPlot(
height = function() {
0.8 * session$clientData$output_areaPerPerson_width
},
expr = plotHouseEdu
)
# when clicked
observeEvent(input$clickAreaPerPerson, {
x <- round(input$clickAreaPerPerson$x)
y <- input$clickAreaPerPerson$y
# from x select the average area of city, town and villiage
ctv <- houseEduCombined[c(x, x + 7, x + 14), 5]
# find the one closest to click point
idx <- which.min(abs(y - ctv))
# ignore if too far from the closest one
if (abs(y - ctv)[idx] > 0.5) {
labelDf <- data.frame(long = x, lat = y,
text = NA)
} else {
householdNumber <- round(houseEduCombined[x + 7 * (idx - 1), 2] / 1e6,2)
where <- c("city ", "town ", "villiage ")[idx]
labelDf <- data.frame(long = x, lat = y,
text = paste0(where, "\n", householdNumber, " M"))
}
# make data frame for geom_label()
# labelDf <- data.frame(long = x, lat = y,
# text = paste0(where, "\n", householdNumber))
output$areaPerPerson <- renderPlot(
height = function() {
0.8 * session$clientData$output_areaPerPerson_width
},
expr = {
plotHouseEdu +
geom_label(data = labelDf, aes(long, lat, label = text),
alpha = 0.5, vjust = 1, na.rm = TRUE)
# annotate("text", x = x, y = y,
# label = paste0(where, "\n", householdNumber))
})
})
|
747f8a428cf4d7abe4fab845225213184584972d | 258efad8aa80325ce0e860690672dfb61d6d0756 | /Src/loadPackages.R | 4de8841a818b7782f6fe3f88c26b142ff5597a83 | [] | no_license | janniklas93/OptimizeParameters | c0b1c10f72553ed80cd93829927ae3beddf8b454 | c31255ec64329c69a7cf9f0ea6d3af9188dca1fa | refs/heads/master | 2021-01-20T22:10:44.026733 | 2016-07-26T09:33:58 | 2016-07-26T09:33:58 | 60,241,380 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 596 | r | loadPackages.R | suppressMessages(library(foreach))
suppressMessages(library(doParallel))
suppressMessages(library(parallel))
suppressMessages(library(doMC))
suppressMessages(library(stringr))
suppressMessages(library(hgu133plus2.db))
suppressMessages(library(hgu133a.db))
suppressMessages(library(affy))
suppressMessages(library(simpleaffy))
suppressMessages(library(affyPLM))
suppressMessages(library(affycoretools))
suppressMessages(library(affyQCReport))
suppressMessages(library(annaffy))
suppressMessages(library(limma))
#suppressMessages(source("Src/GSEA.1.0.R", verbose = TRUE, max.deparse.length = 9999)) |
7522dc73f12463d63a22e60f9ea8263d4f6427f9 | 11b21336733fee090e1490f193624aa9b23f962b | /man/LRT_multiple_groups.Rd | a316246c4ff5dc42ffc05d6906e4e636db41bf46 | [] | no_license | satabdisaha1288/scBT | 4f39d4c3d132e76b8e51a7960f5f0b1f9403b4c4 | 39c2db4d80ceb1f3087929881e243d576d32e571 | refs/heads/main | 2023-08-14T11:14:53.981821 | 2021-09-15T18:39:57 | 2021-09-15T18:39:57 | 336,386,409 | 2 | 0 | null | 2021-09-15T18:39:58 | 2021-02-05T20:52:00 | R | UTF-8 | R | false | true | 547 | rd | LRT_multiple_groups.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/new_LRT_multipleGroups.R
\name{LRT_multiple_groups}
\alias{LRT_multiple_groups}
\title{INSERT DESCRIPTION HERE}
\usage{
LRT_multiple_groups(data.list)
}
\arguments{
\item{data}{the list of vectors of the positive observations( must of length sum(each vector in data_ind))}
\item{data_ind}{a list having K vectors of the zeroes/ones for expressed or not in each of the K -groups}
}
\value{
INSERT RETURN DESCR HERE
}
\description{
INSERT DESCRIPTION HERE
}
\author{
}
|
d875fdafdd8e22cbb0ea30a410f276126c330cac | bd7975600f74c964f0b7ed226daed0eb687921e3 | /week2/complete.R | 6fe8195babb12457044eeef87c3c08e8390349d7 | [] | no_license | vinod-desireddy/Coursera-Rprogramming | f6d6f0029c0f85c80f010c1065247af17a65af69 | 6376e4181233d472586452bd75ad61e0fc81935b | refs/heads/master | 2022-04-23T02:10:38.777717 | 2020-04-28T10:41:21 | 2020-04-28T10:41:21 | 259,593,419 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 937 | r | complete.R | # Function that reads a directory full of files and reports
# the number of completely observed cases in each data file.
# The function should return a data frame where the first column is
# the name of the file and the second column is the number of complete cases
source('readdata.R')
#loading dataf function from readdata.R to read the desired csv file
# from the required directory
complete <- function(directory = 'specdata', id = 1:332){
b <- numeric(length = length(id))
I <- numeric(length = length(id))
a = 1
for(i in id){
b[a] <- nrow(dataf(directory, id = i)[complete.cases(dataf(directory, id = i)),])
##loading the files using dataf function and calculating the returning the rows which do not have NAs.
I[a] <- i
a = a + 1
}
cc <- as.data.frame(cbind(I, b))
colnames(cc) <- c('id', 'nobs')
cc
} |
5758dac667f5dca57ee2ef6519e0e0881bbe2df4 | 9bdef4e24ce5ba2309a4d5e678430e019b204597 | /R/bb.R | e9191681b4a6524f321a262c4f26dc78a2336d47 | [] | no_license | mikebesso/two.laws.quant.indicators | 30f51557f1185fa0adcc511a904ac77fcfe9dfcb | ec22a6b605d5fd55b6028717c2ceeb705e194c77 | refs/heads/master | 2021-09-02T01:11:59.339061 | 2017-12-29T15:09:25 | 2017-12-29T15:09:25 | 115,731,767 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 780 | r | bb.R | #' @include attributes.R
#' @include averages.R
NULL
#' @export
CalculateBBands <- function(hlc, n = 20, sd = 2, maType = CalculateSMA){
BB <- dplyr::as_data_frame(TTR::BBands(hlc, n))
Indicator <- InitializeIndicator(
name = "BBands",
parameters = list(n = n, sd = sd),
BB.Lower = BB$dn,
BB.Upper = BB$up,
BB.Mid = BB$mavg,
BB.Percent = BB$pctB
)
return(Indicator)
}
#' @export
AddBBands <- function(dataSet, n = 20, sd = 2, maType = CalculateSMA){
HLC <- dataSet %>%
dplyr::select(dplyr::one_of("High", "Low", "Close"))
BB <- CalculateBBands(HLC, n = n, maType = maType, sd = sd)
DataSet <-
dataSet %>%
dplyr::bind_cols(BB)
attr(DataSet, attr(BB, "Name")) <- attr(BB, attr(BB, "Name"))
return(DataSet)
}
|
29bb07b5d7f188674ed9a1f0c3258a41e34012cf | 96689fad580675151d46b6219dca9f322f8539ac | /19oct.R | 6a98e196a16d8d806628175d72d7f6e6b831e84d | [] | no_license | dency14/analytics | 0cfe277db9f9f1a86759e4342cf68db3b0ef0b00 | 55b7cba1bdadd612194ee89ed5ba829989ad8b10 | refs/heads/master | 2020-08-02T18:20:30.472761 | 2019-12-01T08:41:48 | 2019-12-01T08:41:48 | 211,462,621 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,781 | r | 19oct.R | library('dplyr')
head(mtcars)
mtcars%>%filter(gear==2& mpg>20)
mtcars%>%arrange(hp)
mtcars%>%top_n(1,hp)
mtcars%>%top_n(-1,hp)
mtcars%>%top_n(-3,hp)
mtcars%>%top_n(4,hp)
mtcars%>%filter(gear==3)%>%top_n(3,hp)
mtcars%>%group_by(gear)%>% summarise(total=n())
mtcars%>%group_by()
mtcars%>%group_by(gear)%>% summarise(total=n(), dency= mean(mpg) , rish=max(hp),geeta=median(wt))
mtcars%>% arrange(am,mpg)
mtcars%>% group_by(am,mpg)%>% top_n(1,wt)
mtcars%>% sample_n(2)
mtcars%>% sample_frac(.2)
(data=trunc(rnorm(24,mean=60,sd=10)))
(m1=matrix(data,nrow=6))
colSums(m1)
rowSums(m1)
apply(X=m1,MARGIN = 2,FUN = sum)
apply(X=m1,MARGIN = 2,FUN = sd)
apply(X=m1,MARGIN = 2,FUN = max)
?lapply
lapply(X=m1,FUN = sum)
A=1:5;B=4:9;C=5:10
list1=list(A,B,C)
list1
lapply(X=list1, FUN = mean)
sapply(X=list1, FUN = mean)
library(reshape2)
library(xlsx)
(rollno=1:5)
(gender=sample(c('m','f'),size = 5,replace = T))
sub1=trunc(rnorm(5,60,10))
sub1
sub2=trunc(rnorm(5,65,5))
sub2
sub3=trunc(rnorm(5,70,3))
sub3
students
students=data.frame(rollno,sub1,sub2,sub3,gender)
students
f=file.choose()
longdata=melt(students,id.vars = c('rollno','gender'),variable.name = 'subject',value.name='marks')
longdata
#missing values
df= data.frame(A=c(2,NA,9,NA),B=c(4,NA,55,60),C=c(3,4,11,44))
df
is.na(df)
any(is.na(df))
sum(is.na(df))
summary(df)
complete.cases(df)
df[complete.cases(df),]
df=na.omit(df)
df
(df_titanic=read.csv(file = "TitanicDataMissingValues.csv"))
dim(df_titanic)
(list.na=colnames(df_titanic))
View(mtcars)
(cylinders=table(mtcars$cyl))
(barplot(height=table(mtcars$wt)))
(barplot(height=table(mtcars$am)))
?xlab
(barplot(height=table(mtcars$cyl),col=1:7,horiz = F))
polygon(d,col="red")
install.packages("sm")
barplot(height = table)
|
0067fedf11bafdc8fc77a23e59ec8d62dcd52733 | 976eec1405c3477698f2bca0ddebb350b074b2b8 | /plot6.R | e25096a36c4daf7e7a52e585bb19e27e203d0298 | [] | no_license | bigdatascience/EDAProject2 | c5c4fcb6d60edf804095507f087ed65e413595c7 | 4a3e6ae35cf34c0a6e9540fd1ba2d0cb9b528e5d | refs/heads/master | 2020-04-27T23:17:57.275256 | 2014-05-25T23:11:55 | 2014-05-25T23:11:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,265 | r | plot6.R | unzip(".exdata_data_NEI_data.zip", exdir="./")
NEI=readRDS("summarySCC_PM25.rds")
SCC=readRDS("Source_Classification_Code.rds")
#Compare emissions from motor vehicle sources in Baltimore City with emissions from motor vehicle sources in Los Angeles County, California (fips == "06037"). Which city has seen greater changes over time in motor vehicle emissions?
SCCMotor=SCC[grepl("On-Road", SCC$EI.Sector, ignore.case=FALSE),]
NEIBalCity=NEI[which(NEI$fips=="24510"),]
NEILA=NEI[which(NEI$fips=="06037"),]
NEIMotorBalCity=subset(NEIBalCity, SCC %in% SCCMotor$SCC)
NEIMotorBalCityTotal=aggregate(NEIMotorBalCity$Emissions ~ NEIMotorBalCity$year,FUN=sum)
NEIMotorLA=subset(NEILA, SCC %in% SCCMotor$SCC)
NEIMotorLATotal=aggregate(NEIMotorLA$Emissions ~ NEIMotorLA$year,FUN=sum)
NEIMotorCombined=NEIMotorBalCityTotal
NEIMotorCombined[,3]=NEIMotorLATotal[2]
colnames(NEIMotorCombined)=c("Year","TotalEmissionsBC","TotalEmissionsLA")
par(mfrow=c(1,2))
plot(NEIMotorCombined$Year,NEIMotorCombined$TotalEmissionsBC,type="l",xlab="Year",ylab="Total Emissions in tons",main="Baltimore City")
plot(NEIMotorCombined$Year,NEIMotorCombined$TotalEmissionsLA,type="l",xlab="Year",ylab="Total Emissions in tons",main="Los Angeles")
dev.copy(png, file="Plot6.png")
dev.off()
|
69da886d7930cc539ee7930f5fb85def538252ae | 7f72ac13d08fa64bfd8ac00f44784fef6060fec3 | /RGtk2/man/gtkIconThemeAppendSearchPath.Rd | afd6f0a9884fdf86bbd236f457e1f46bc1deb7b3 | [] | no_license | lawremi/RGtk2 | d2412ccedf2d2bc12888618b42486f7e9cceee43 | eb315232f75c3bed73bae9584510018293ba6b83 | refs/heads/master | 2023-03-05T01:13:14.484107 | 2023-02-25T15:19:06 | 2023-02-25T15:20:41 | 2,554,865 | 14 | 9 | null | 2023-02-06T21:28:56 | 2011-10-11T11:50:22 | R | UTF-8 | R | false | false | 479 | rd | gtkIconThemeAppendSearchPath.Rd | \alias{gtkIconThemeAppendSearchPath}
\name{gtkIconThemeAppendSearchPath}
\title{gtkIconThemeAppendSearchPath}
\description{Appends a directory to the search path.
See \code{\link{gtkIconThemeSetSearchPath}}.}
\usage{gtkIconThemeAppendSearchPath(object, path)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkIconTheme}}}
\item{\verb{path}}{directory name to append to the icon path}
}
\details{Since 2.4}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
d6f0bb0b4b857a4b24342d7bc6eacc90f3f86135 | 7e2132998f818a4c4395b1bda40eae67d3ba45d8 | /week_05/day3/app.R | d8fa6acfc373d62f3ca4960d081882f210de2b23 | [] | no_license | jwwood2020/codeclan_homework_johnwood | eed4df47389e6ef5717c05800ba58c07662218a3 | 14e75c34b26ddc0108a94c901c3c11742dc7970f | refs/heads/main | 2023-06-02T08:29:17.681213 | 2021-06-16T19:52:03 | 2021-06-16T19:52:03 | 352,730,089 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,113 | r | app.R | library(shiny)
library(tidyverse)
library(CodeClanData)
library(shinythemes)
ui <- fluidPage(
theme = shinytheme("flatly"),
titlePanel(tags$h1("Five Country Medal Comparison")),
tabsetPanel(
tabPanel( "Choose your inputs",
fluidRow(
column(4,
radioButtons(
inputId = "season",
label = "Summer or Winter Olympics?",
choices = c(
"Summer",
"Winter"
)
),
),
column(8,
radioButtons(
inputId = "medal",
label = "Medal type",
choices = c(
"Gold",
"Silver",
"Bronze"
)
)
),
),
),
tabPanel(
"Medals",
plotOutput("medal_plot")
)
)
)
server <- function(input, output) {
output$medal_plot <- renderPlot({
olympics_overall_medals %>%
filter(team %in% c(
"United States",
"Soviet Union",
"Germany",
"Italy",
"Great Britain"
)
) %>%
filter(medal == input$medal) %>%
filter(season == input$season) %>%
ggplot() +
aes(
x = team,
y = count,
fill = count
) +
geom_col(fill = case_when(
input$medal == "Gold" ~ "gold2",
input$medal == "Silver" ~ "grey50",
input$medal == "Bronze" ~ "orange4"
)
)
})
}
shinyApp(
ui = ui,
server = server
)
|
03750625290408c4705be6fa396693ef892858b3 | ed049e9bec00ceb3d8cc5386b8a42ef92efdb9cf | /R/auto_style.R | df6394a2b32e61efaf02b21968592bc719ce7b69 | [] | no_license | harryprince/xltabr | 1477bb1ba45c1bdbafa459ad9b209f99298a61cf | 1ed1c309112bdab3a1d718934034ec2ba79e1572 | refs/heads/master | 2020-03-27T10:44:44.314314 | 2017-11-28T10:39:28 | 2017-11-28T10:39:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,656 | r | auto_style.R | # Functions in this file are used to attempt to auto-detect styles from the information in the table
#' Use the data type of the columns to choose an automatic Excel format for body cells
#'
#' This function reads styling from the styles defined [here](https://github.com/moj-analytical-services/xltabr/blob/master/inst/extdata/number_format_defaults.csv)
#' @param tab a table object
#' @param overrides a list containing any manual overrides where the user wants to provide their own style name
#' @export
#' @examples
#' body_data <- readRDS(system.file("extdata", "test_number_types.rds", package="xltabr"))
#' tab <- initialise()
#' tab <- add_body(tab, body_data)
#' tab <- auto_style_number_formatting(tab)
auto_style_number_formatting <- function(tab, overrides = list()) {
# Want to add number style to meta_col_ on body only - tab$body$meta_col_
col_classes <- sapply(tab$body$body_df_to_write, class)
# This lookup table coverts
path <- get_num_format_path()
lookup_df <- utils::read.csv(path, stringsAsFactors = FALSE)
# Convert to a named vector that can be used as a lookup
lookup <- lookup_df$style_name
names(lookup) <- lookup_df$class
# Iterate through body columns applying lookup or override if exists
body_cols <- names(tab$body$body_df_to_write)
for (this_col_name in body_cols) {
if (this_col_name %in% names(overrides)) {
this_style <- overrides[[this_col_name]]
} else {
this_style <- lookup[col_classes[[this_col_name]][1]]
if (is.na(this_style)) {
stop(paste0("Trying to autoformat column of class ", col_classes[[this_col_name]], "but no style defined in number_format_defaults.csv"))
}
}
if (is_null_or_blank(tab$body$meta_col_[this_col_name])) {
tab$body$meta_col_[this_col_name] <- this_style
} else {
tab$body$meta_col_[this_col_name] <- paste(tab$body$meta_col_[this_col_name], lookup[this_col_name], sep = "|")
}
}
tab
}
# Uses presence of '(all)' in the leftmost columns to detect the summary level
# of the row, and then writes this style information to tab$body$body_df
auto_style_body_rows <- function(tab, indent = FALSE, keyword = "(all)") {
# If headers haven't been provided by the user, attempt to autodetect them
if (is.null(tab$body$left_header_colnames)) {
tab <- auto_detect_left_headers(tab)
}
# If no headers, return tab and message
if (is.null(tab$body$left_header_colnames)) {
message("No left header columns detected or provided")
return(tab)
}
# Autodetect the 'summary level' e.g. header 1 is most prominent, header 2 next etc.
tab <- auto_detect_body_title_level(tab, keyword)
tab
}
#' Uses the presence of '(all)' in the leftmost columns of data to detect that these
#' columns are really left headers rather than body columns
#'
#' Populates tab$body$left_header_colnames automatically
#' @param tab a tab object
#' @param keyword The keyword to use to detect summarisation. Uses '(all)' by default because this is what reshape2::dcast uses
#' @export
#' @examples
#' crosstab <- read.csv(system.file("extdata", "example_crosstab.csv", package="xltabr"))
#' tab <- initialise()
#' tab <- add_body(tab, crosstab)
#' tab <- auto_detect_left_headers(tab)
auto_detect_left_headers <- function(tab, keyword = "(all)") {
# Looking to write tab$body$left_header_colnames
# These must be character columns - stop if you hit a non character column
# First find all leftmost character columns, then iterate from right to left, finding the first column with keyword in it.
# This is the last left_header
col_classes <- sapply(tab$body$body_df_to_write, class)
if (!("character" %in% col_classes)) {
tab$body$left_header_colnames = NULL
return(tab)
}
rightmost_character <- min(which(col_classes != "character")) - 1
if (rightmost_character == 0) {
tab$body$left_header_colnames = NULL
return(tab)
}
rightmost_character_cols <- rightmost_character:1
found <- FALSE
for (col in rightmost_character_cols) {
this_col <- tab$body$body_df_to_write[,col]
if (any(keyword == this_col)) {
found <- TRUE
break
}
}
# TODO: this is not robust when multiple columns have the same name
if (found) {
left_col_names <- names(tab$body$body_df_to_write)[1:col]
} else {
left_col_names <- NULL
}
tab$body$left_header_colnames <- left_col_names
tab
}
# For title level
# Note we assume there are a maximum of 5 title levels
get_inv_title_count_title <- function(left_headers_df,
keyword,
allcount_to_level_translate = NULL) {
# +-------+-------+-------+-----------+-------------+--------------+
# | col1 | col2 | col3 | all_count | title_level | indent_level |
# +-------+-------+-------+-----------+-------------+--------------+
# | (all) | (all) | (all) | 3 | title_3 | |
# | (all) | (all) | - | 2 | title_4 | indent_1 |
# | (all) | - | - | 1 | title_5 | indent_2 |
# | - | - | - | 0 | | indent_3 |
# +-------+-------+-------+-----------+-------------+--------------+
if (is.null(allcount_to_level_translate)) {
allcount_to_level_translate = c("0" = NA, "1" = 5, "2" = 4, "3" = 3, "4" = 2, "5" = 1)
}
to_count <- (left_headers_df == keyword)
all_count <- rowSums(to_count)
#rows with higher (all) count should have a lower title value because title_1 is the most emphasized
allcount_to_level_translate[as.character(all_count)]
}
# For indent level
get_inv_title_count_indent <- function(left_headers_df, keyword) {
# +-------+-------+-------+-----------+-------------+--------------+
# | col1 | col2 | col3 | all_count | title_level | indent_level |
# +-------+-------+-------+-----------+-------------+--------------+
# | (all) | (all) | (all) | 3 | title_3 | |
# | (all) | (all) | - | 2 | title_4 | indent_1 |
# | (all) | - | - | 1 | title_5 | indent_2 |
# | - | - | - | 0 | | indent_3 |
# +-------+-------+-------+-----------+-------------+--------------+
to_count <- (left_headers_df == keyword)
all_count <- rowSums(to_count)
#rows with higher (all) count should have a lower title value because title_1 is the most emphasized
all_count_inv <- max(all_count) - all_count
all_count_inv[all_count_inv == 0] <- NA
all_count_inv
}
#' Autodetect the 'title level' of each row in the cross tabulation
#' e.g. title 1 is most prominent, title 2 next etc.
#'
#' Uses the presence of '(all)' to detect the prominence. The parameter allcount_to_level_translate allows the user to control how the count of '(all)' in the left header is translated into the header level
#' @param tab a tab object
#' @param keyword The keyword to use to detect summarisation. Uses '(all)' by default because this is what reshape2::dcast uses
#' @param allcount_to_level_translate A named vector that provides a lookup - by default c("0" = NA, "1" = 5, "2" = 4, "3" = 3, "4" = 2, "5" = 1), which says that e.g. allcount 1 results in title_5 etc
#' @export
#' @examples
#' crosstab <- read.csv(system.file("extdata", "example_crosstab.csv", package="xltabr"))
#' tab <- initialise()
#' tab <- add_body(tab, crosstab, left_header_colnames = c("drive", "age"))
#' tab <- auto_detect_body_title_level(tab)
auto_detect_body_title_level <- function(tab, keyword = "(all)", allcount_to_level_translate = NULL) {
# Stop if no left header colnames provided
if (is.null(tab$body$left_header_colnames)) {
return(tab)
}
left_headers_df <- tab$body$body_df_to_write[tab$body$left_header_colnames]
all_count_inv <- get_inv_title_count_title(left_headers_df, keyword = keyword, allcount_to_level_translate = allcount_to_level_translate)
# Append title level to both meta_row_ and meta_left_title_row_
col <- tab$body$body_df$meta_row_[not_na(all_count_inv)]
concat <- all_count_inv[not_na(all_count_inv)]
concat <- paste0("title_", concat)
tab$body$body_df[not_na(all_count_inv),"meta_row_"] <- paste(col, concat,sep = "|")
col <- tab$body$body_df$meta_left_header_row_[not_na(all_count_inv)]
concat <- all_count_inv[not_na(all_count_inv)]
concat <- paste0("title_", concat)
tab$body$body_df[not_na(all_count_inv),"meta_left_header_row_"] <- paste(col, concat,sep = "|")
tab
}
#' Consolidate the header columns into one, taking the rightmost value and applying indent
#'
#' e.g. a | b | (all) -> b
#' e.g. (all) | (all) | (all) -> Grand Total
#'
#' @param tab a tab object
#' @param keyword The keyword to use to detect summarisation. Uses '(all)' by default because this is what reshape2::dcast uses
#' @param total_text The text to use for the grand total (a row where all the left headers are '(all)'. Defaults to Grand Total.
#' @param left_header_colname The column name of left header column, which is now a single column.
#'
#' @export
#' @examples
#' crosstab <- read.csv(system.file("extdata", "example_crosstab.csv", package="xltabr"))
#' tab <- initialise()
#' tab <- add_body(tab, crosstab, left_header_colnames = c("drive", "age"))
#' tab <- auto_style_indent(tab)
auto_style_indent <- function(tab, keyword = "(all)", total_text = NULL, left_header_colname = " ") {
if (is.null(total_text)) {
total_text = "Grand Total"
}
tab$misc$coalesce_left_header_colname = left_header_colname
if (is.null(tab$body$left_header_colnames )) {
message("You've called auto_style_indent, but there are no left_header_colnames to work with")
return(tab)
}
left_headers_df <- tab$body$body_df_to_write[tab$body$left_header_colnames]
orig_left_header_colnames <- tab$body$left_header_colnames
# count '(all)'
to_count <- (left_headers_df == keyword)
all_count <- rowSums(to_count)
# paste together all left headers
concat <- do.call(paste, c(left_headers_df, sep = "=|="))
# Split concatenated string into elements, and find last element that's not (all)
elems <- strsplit(concat, "=\\|=", perl = TRUE)
last_elem <- lapply(elems, function(x) {
x <- x[x != keyword]
if (length(x) == 0) {
x <- total_text
}
utils::tail(x,1)
})
new_left_headers <- unlist(last_elem)
# Remove original left_header_columns and replace with new
cols <- !(names(tab$body$body_df_to_write) %in% tab$body$left_header_colnames)
tab$body$body_df_to_write <- tab$body$body_df_to_write[cols]
tab$body$body_df_to_write <- cbind(new_left_headers = new_left_headers, tab$body$body_df_to_write, stringsAsFactors = FALSE)
names(tab$body$body_df_to_write)[1] <- left_header_colname
tab$body$left_header_colnames <- c(left_header_colname)
# Now need to fix meta_left_header_row_ to include indents
#Set meta_left_header_row_ to include relevant indents
all_count_inv <- get_inv_title_count_indent(left_headers_df, keyword = keyword)
col <- tab$body$body_df$meta_left_header_row_
rows_indices_to_change <- not_na(all_count_inv)
concat <- all_count_inv[rows_indices_to_change]
concat <- paste0("indent_", concat)
tab$body$body_df[rows_indices_to_change,"meta_left_header_row_"] <- paste(col[rows_indices_to_change], concat,sep = "|")
# Update body$meta_col_
tab$body$meta_col_ <- c(left_header_colname = tab$body$meta_col_[[1]], tab$body$meta_col_[cols])
names(tab$body$meta_col_)[1] <- left_header_colname
# Finally, if tab$top_headers has too many cols, remove the extra cols, removing from 2:n
if (not_null(tab$top_headers$top_headers_list)) {
len_th_cols <- length(tab$top_headers$top_headers_list[[1]])
if (len_th_cols > length(colnames(tab$body$body_df_to_write))) {
cols_to_delete <- 2:length(orig_left_header_colnames)
cols_to_retain <- !(1:len_th_cols %in% cols_to_delete)
tab$top_headers$top_headers_col_style_names <- tab$top_headers$top_headers_col_style_names[cols_to_retain]
for (r in length(tab$top_headers$top_headers_list)) {
tab$top_headers$top_headers_list[[r]] <- tab$top_headers$top_headers_list[[r]] [cols_to_retain]
tab$top_headers$top_headers_list[[r]][1] <- left_header_colname
}
}
}
tab
}
# Add a right vertical border to the rightmost column of the left headers
add_left_header_vertical_border <- function(tab, stylename = "right_border") {
# What's the right most cell in the
right_most <- length(tab$body$left_header_colnames)
tab$body$meta_col_[right_most] <- paste(tab$body$meta_col_[right_most], stylename, sep = "|")
tab
}
|
886ccd50c11e31879499099fed519ec73db7e44a | 589fbea2ca497d0d2e4c56f323cc3034bc5513c4 | /2020-08-25-chopped.R | 4b94683bc4999065804a65e9b9eb0d6b5100d5d8 | [] | no_license | aekendig/TidyTuesday | e6b64398e826d64533ffb83e9d8f97816596ba6b | 67d0ba97c1b1b1d09902a46a161cff38f7d507f5 | refs/heads/master | 2021-02-13T14:16:40.501075 | 2020-10-07T12:19:47 | 2020-10-07T12:19:47 | 244,703,230 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,342 | r | 2020-08-25-chopped.R | #### set-up ####
# description
# https://github.com/rfordatascience/tidytuesday/blob/master/data/2020/2020-08-25/readme.md
# clear environment
rm(list = ls())
# load packages
library(tidyverse)
# install.packages("NutrienTrackeR")
library(NutrienTrackeR)
# import data
chopped <- readr::read_tsv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-08-25/chopped.tsv')
data(food_composition_data)
#### judge scores ####
judges <- chopped %>%
filter(!is.na(episode_rating)) %>%
select(episode_rating, judge1, judge2, judge3) %>%
pivot_longer(cols = starts_with("judge"), names_to = "judge_number", values_to = "judge") %>%
mutate(judge = case_when(judge == "Aarón Sanchez" ~ "Aarón Sánchez",
judge %in% c("Amanda Freita", "Amanda Frietag") ~ "Amanda Freitag",
judge == "Chris Santo" ~ "Chris Santos",
judge == "Geoffrey Zacharian" ~ "Geoffrey Zakarian",
judge == "Jody William" ~ "Jody Williams",
judge == "Missy Robbin" ~ "Missy Robbins",
judge == "Maneet Chauhaun" ~ "Maneet Chauhan",
TRUE ~ judge))
judge_rating <- judges %>%
group_by(judge) %>%
summarise(rating = mean(episode_rating),
episodes = n()) %>%
ungroup()
# visualize
ggplot(judge_rating, aes(x = reorder(judge, rating), y = rating, fill = episodes)) +
geom_bar(stat = "identity") +
coord_flip() +
ylab("Judge") +
xlab("Episode rating") +
theme(axis.text.y = element_text(size = 6))
ggplot(judges, aes(x = reorder(judge, episode_rating), y = episode_rating)) +
stat_summary(fun = "mean", geom = "bar") +
coord_flip() +
ylab("Judge") +
xlab("Episode rating") +
theme(axis.text.y = element_text(size = 6))
#### edit data ####
# test: separate ingredients
app_test <- str_split(chopped$appetizer, ", ") %>% unlist()
# separate ingredients:
# select columns
# separate appetizer ingredients into 4 columns
# make dataset long
# remove NA's (fewer than 4 ingredients)
appetizers <- chopped %>%
select(series_episode, appetizer) %>%
separate(appetizer, c("a1", "a2", "a3", "a4"), sep = ", ") %>%
pivot_longer(cols = starts_with("a"), names_to = "ingredient", values_to = "food") %>%
filter(!is.na(food))
# extract food composition data
usda_foods <- food_composition_data[[1]] %>%
unlist() %>%
as_tibble() %>%
rename(water = 'Water (g)',
energy = 'Energy (kcal)',
sugar = 'Sugars, total (g)') %>%
select(food_id:food_group, water, energy, sugar)
# test: food name function
appetizers$food[10]
findFoodName(keywords = c("grapefruit"), food_database = "USDA")
name_test <- findFoodName(keywords = str_split(appetizers$food[10], " ") %>% unlist(), food_database = "USDA")
str(name_test)
names(name_test)
# not able to match ingredients with this food list
# do foods repeat?
appetizers %>%
group_by(food) %>%
summarise(episodes = length(series_episode)) %>%
filter(episodes > 1)
# yes
# separate ingredients for entree and dessert
entrees <- chopped %>%
select(series_episode, entree) %>%
separate(entree, c("a1", "a2", "a3", "a4", "a5", "a6"), sep = ", ") %>%
pivot_longer(cols = starts_with("a"), names_to = "ingredient", values_to = "food") %>%
filter(!is.na(food))
desserts <- chopped %>%
select(series_episode, dessert) %>%
separate(dessert, c("a1", "a2", "a3", "a4", "a5"), sep = ", ") %>%
pivot_longer(cols = starts_with("a"), names_to = "ingredient", values_to = "food") %>%
filter(!is.na(food))
# combine ingredients across meal types
ingredients <- appetizers %>%
select(-ingredient) %>%
mutate(meal_type = "appetizer") %>%
full_join(entrees %>%
select(-ingredient) %>%
mutate(meal_type = "entree")) %>%
full_join(desserts %>%
select(-ingredient) %>%
mutate(meal_type = "dessert"))
# summarize number of repeats
repeats <- ingredients %>%
group_by(food) %>%
mutate(total_repeats = n()) %>%
group_by(food, total_repeats, meal_type) %>%
summarise(uses = n()) %>%
ungroup()
# order food by total repeats
# save the order
repeat_levels <- repeats %>%
select(food, total_repeats) %>%
unique() %>%
mutate(repeat_order = rank(total_repeats), ties.method = "random")
# subset for highest repeats
repeats_high <- repeats %>%
filter(total_repeats > 10) %>%
left_join(repeat_levels) %>%
mutate(food = fct_reorder(food, repeat_order),
meal_type = fct_relevel(meal_type, "appetizer", "entree"))
#### visualize ####
ggplot(repeats_high, aes(x = food, y = uses, fill = meal_type)) +
geom_bar(stat = "identity") +
scale_fill_viridis_d(name = "Course") +
xlab("Ingredient") +
ylab("Uses") +
ggtitle("Frequently Used Chopped Ingredients") +
theme_bw() +
theme(axis.text.x = element_text(color = "black", angle = 45, hjust = 1, size = 12),
axis.text.y = element_text(color = "black", size = 12),
axis.title = element_text(size = 14),
plot.title = element_text(size = 16, hjust = 0.5),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.text = element_text(size = 10),
legend.title = element_text(size = 12))
|
a5ed8d077402da9c89b1584e183d48aa332d78eb | f6123e538f3a530126c4673be2c4679e63f505bd | /R/start.valgrid.R | 1ae2e66a054010938e74528f91d0e1020ded7df4 | [] | no_license | cran/pencopula | 8fe1124a1447f28b05a72648b4934c87336f75af | dc7411113e6a15bc9c15ebebb17e3348fb2cb30b | refs/heads/master | 2021-01-01T17:17:14.765551 | 2018-08-31T16:40:06 | 2018-08-31T16:40:06 | 17,698,457 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,897 | r | start.valgrid.R | start.valgrid <- function(penden.env) {
p <- get("p",penden.env)
X.knots <- matrix(NA,get("DD",penden.env),p)
tilde.Psi.d.knots.start.r <- array(NA, dim=c(dim(X.knots)[1],get("ddb",penden.env),p))
for(j in 1:p) X.knots[,j] <- get("knots.t",penden.env)[get("Index.basis.D",penden.env)[,j]]
env.extend <- list()
length.cond <- get("ddb",penden.env)
for(j in 1:p) {
name <- c(paste("y",j,sep=""))
env.extend[[noquote(name)]] <- seq(0,1,length=length.cond)
}
assign("X.knots.g.all",expand.grid(env.extend),penden.env)
if(get("adapt.grid",penden.env)) grid.points(penden.env)
if(get("adapt.grid",penden.env)) tilde.Psi.d.knots.start.g <- array(NA, dim=c(dim(get("X.knots.g",penden.env))[1],get("ddb",penden.env),p))
tilde.Psi.d.knots.start.g.all <- array(NA, dim=c(dim(get("X.knots.g.all",penden.env))[1],get("ddb",penden.env),p))
assign("X.knots",X.knots,penden.env)
for (j in 1:p)
{
tilde.Psi.d.knots.start.r[,,j] <- hierarch.bs(X.knots[,j], d = get("d",penden.env), plot.bsp = FALSE,typ=3,penden.env,int=FALSE)$B.tilde
if(get("adapt.grid",penden.env)) tilde.Psi.d.knots.start.g[,,j] <- hierarch.bs(get("X.knots.g",penden.env)[,j], d = get("d",penden.env), plot.bsp =FALSE,typ=3,penden.env,int=FALSE)$B.tilde
tilde.Psi.d.knots.start.g.all[,,j] <- hierarch.bs(get("X.knots.g.all",penden.env)[,j], d = get("d",penden.env), plot.bsp =FALSE,typ=3,penden.env,int=FALSE)$B.tilde
}
assign("tilde.PSI.d.D.knots.start.r",tilde.Psi.d.knots.start.r[,get("Index.basis.D",penden.env)[,1],1],penden.env)
if(get("adapt.grid",penden.env)) assign("tilde.PSI.d.D.knots.start.g",tilde.Psi.d.knots.start.g[,get("Index.basis.D",penden.env)[,1],1],penden.env)
assign("tilde.PSI.d.D.knots.start.g.all",tilde.Psi.d.knots.start.g.all[,get("Index.basis.D",penden.env)[,1],1],penden.env)
for (j in 2:p)
{
assign("tilde.PSI.d.D.knots.start.r",get("tilde.PSI.d.D.knots.start.r",penden.env) * tilde.Psi.d.knots.start.r[,get("Index.basis.D",penden.env)[,j],j],penden.env)
if(get("adapt.grid",penden.env)) assign("tilde.PSI.d.D.knots.start.g",get("tilde.PSI.d.D.knots.start.g",penden.env) * tilde.Psi.d.knots.start.g[,get("Index.basis.D",penden.env)[,j],j],penden.env)
assign("tilde.PSI.d.D.knots.start.g.all",get("tilde.PSI.d.D.knots.start.g.all",penden.env) * tilde.Psi.d.knots.start.g.all[,get("Index.basis.D",penden.env)[,j],j],penden.env)
}
if(get("base",penden.env)=="B-spline") assign("ck.val",matrix(solve(get("tilde.PSI.d.D.knots.start.r",penden.env),rep(1,get("DD",penden.env)))),penden.env)
if(get("base",penden.env)=="Bernstein") assign("ck.val",matrix(rep(1/get("DD",penden.env)),get("DD",penden.env)),penden.env)
#if((get("base",penden.env)=="Bernstein") & (p>2)) assign("ck.val",matrix(solve(get("tilde.PSI.d.D.knots.start.r",penden.env),rep(1,get("DD",penden.env)))),penden.env)
}
|
cf5e62ce560c9683382bc16c8a477df763e4d459 | 52680e152f5c2c4a81d340eafc7bd9ee2d2a8e70 | /1_data_prep.R | 01284101bb9ce9863c3f0e8889cb6693b2763ff8 | [] | no_license | AdamCSmithCWS/CWS_National_Harvest_Survey | 31acfa47b3d066bbfe77fdbcc01dbdb4247a7d8f | e9db5fcb2a64064d52b775ed20b12c333699efbe | refs/heads/main | 2023-07-27T00:18:30.561752 | 2023-07-15T11:07:36 | 2023-07-15T11:07:36 | 340,784,801 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,610 | r | 1_data_prep.R |
####### the following commented out lines ~ 400 lines
## represent the add-hoc processes required to load all the
## historical harvest survey data
## the line :load("full data prep updated harvest model.RData")
## on about line-400 will load everything created below
###
# species changes for final harvest survey estimates ----------------------
### recode all 1720 as 1722 (large race Canada Geese)
### recode all 1699 and 1690 as 1692 (white-phase snow goose)
### recode all 1691 as 1693 (blue-phase snow goose)
### Drop Black Brant - 1740
### drop Eurasian Green-winged Teal - 1380
### consider splitting eastern and western Harlequin ducks
# 1550 = western, 1551 = eastern?
### consider splitting eastern and western Barrow's Goldeneye
# 1520 = western, 1521 = eastern?
Y <- 2022
years <- 1976:Y
names(years) <- paste(years)
home.fold1 <- "C:/Users/smithac/OneDrive - EC-EC/Harvest Survey A146/"
home.fold1 <- "M:/My Documents/Harvest Survey A146/"
# C:\Users\smithac\OneDrive - EC-EC\Harvest Survey A146
home.fold <- getwd()
# setwd(home.fold)
library(foreign)
#library(runjags)
library(rjags)
library(tidyverse)
sashome <- "C:\\Program Files\\SASHome\\SASFoundation\\9.4"
provs = c("AB","BC","SK","MB","ON","PQ","NS","PE","NB","NF","NT","YT")#,"NU") #All prov
#ignoring territories above
sps <- read.csv(paste(home.fold,"/data/Bird_names_2023.csv", sep = ""))
aou_rec <- read.csv("data/Reconcile_AOU_codes_2023.csv")
species <- unique(sps[which(sps$group %in% c("duck","goose","murre")),"specieslevelenglish"])
species <- species[-which(species == "Hybrid Mallard/Northern Pintail")]
gnames <- unique(sps[which(sps$group == "goose"),"specieslevelenglish"])
dnames <- unique(sps[which(sps$group == "duck"),"specieslevelenglish"])
dnames <- dnames[-which(dnames == "Hybrid Mallard/Northern Pintail")]
#
#
#
goose <- TRUE ### change to false, if no goose species included (simplifies the PEF file extraction)
murre <- T #change to fales if no murre species should be included
zone <- T ### change to false, if provincial summaries are desired
# #
# #
# ############## extracting harvest survey data for all years
#
provzone = read.csv("data/province and zone table.csv",stringsAsFactors = F)
casteslist = read.csv("data/caste table.csv",stringsAsFactors = F)
#
#
# #
# #
harvw <- list()
length(harvw) <- length(years)
names(harvw) <- as.character(years)
cald = harvw
calg = cald
calm = cald
cls = c("PRHUNT",
"ZOHUNT",
"AOU",
"MONH",
"DAYH",
"BAGE",
"BSEX",
"PAGE",
"SAMPLE",
"PERMIT",
"YEAR",
#"FOLYEAR",
#"JDLWA",
"YRHUNT",
#"JDHUN",
"WEEK")
#
for (y in years){
dir.yr <- paste0(home.fold1,y)
fil.yr <- paste0("harv",substring(y,3,4),"w")
harvw[[as.character(y)]] <- read.ssd(libname = dir.yr,
sectionnames = fil.yr,
sascmd = file.path(sashome, "sas.exe"))
fil.yr <- paste0("dcal",substring(y,3,4))
cald[[as.character(y)]] <- read.ssd(libname = dir.yr,
sectionnames = fil.yr,
sascmd = file.path(sashome, "sas.exe"))
fil.yr <- paste0("gcal",substring(y,3,4))
calg[[as.character(y)]] <- read.ssd(libname = dir.yr,
sectionnames = fil.yr,
sascmd = file.path(sashome, "sas.exe"))
if(y > 2012){
fil.yr <- paste0("mcal",substring(y,3,4))
calm[[as.character(y)]] <- read.ssd(libname = dir.yr,
sectionnames = fil.yr,
sascmd = file.path(sashome, "sas.exe"))
}
fil.yr = paste0("persal",substring(y,3,4))
tmpp <- read.ssd(libname = paste0(home.fold1,"/PermitSales"),
sectionnames = fil.yr,
sascmd = file.path(sashome, "sas.exe"))
if(any(tmpp$YEAR > 50,na.rm = T) ){
tmpp$YEAR = tmpp$YEAR+1900
}else{
tmpp$YEAR = tmpp$YEAR+2000
}
fil.yr = paste0("popsiz",substring(y,3,4))
tmppop <- read.ssd(libname = paste0(home.fold1,"/PopulationSize"),
sectionnames = fil.yr,
sascmd = file.path(sashome, "sas.exe"))
### if desired to swap BAGE for PAGE (geese), then scsYY is required, instead of scsYYe
### but then additional changes are needed to align with old data
fil.yr <- paste0("scs",substring(y,3,4),"e")
tmp <- read.ssd(libname = dir.yr,
sectionnames = fil.yr,
sascmd = file.path(sashome, "sas.exe"))
# fil.yr <- paste0("scs",substring(y,3,4))
# tmp2 <- read.ssd(libname = dir.yr,
# sectionnames = fil.yr,
# sascmd = file.path(sashome, "sas.exe"))
#
# tmp2u <- unique(tmp2[,c("PRHUNT","ZOHUNT","AOU","MONH","DAYH","BAGE","BSEX","PAGE","PERMIT")])
tmp[which(tmp$PRHUNT == ""),"PRHUNT"] <- tmp[which(tmp$PRHUNT == ""),"PRSALE"]
tmp[which(tmp$PRHUNT == ""),"ZOHUNT"] <- tmp[which(tmp$PRHUNT == ""),"ZOSALE"]
## fixing a handful of years in which column names varied and years were recorded as 2 digits
if(c("YHUN") %in% names(tmp)){
names(tmp)[which(names(tmp) == "YHUN")] <- "YRHUNT"
}
if(any(tmp$YEAR < min(years),na.rm = T)){
tmp[which(tmp$YEAR < min(years)),"YEAR"] <- tmp[which(tmp$YEAR < min(years)),"YEAR"]+1900
}
if(any(tmp$YRHUNT < min(years),na.rm = T)){
tmp[which(tmp$YRHUNT < min(years)),"YRHUNT"] <- tmp[which(tmp$YRHUNT < min(years)),"YRHUNT"]+1900
}
# if(any(tmp$JDHUN < 1000)){
# tmp[which(tmp$JDHUN < 1000),"JDHUN"] <- tmp[which(tmp$JDHUN < 1000),"JDHUN"]+((y-1900)*10)
# }
miscls = cls[-which(cls %in% names(tmp))]
if(length(miscls) > 0){
if(miscls == "PAGE"){
tmp$PAGE <- ""
}
}
tmp = tmp[,cls]
if(y == years[1]) {
outscse <- tmp
perms = tmpp
popsiz = tmppop
}else{
outscse <- rbind(outscse,tmp)
perms = rbind(perms,tmpp)
popsiz = rbind(popsiz,tmppop)
}
#
print(y)
}#y
#save.image(file = "data/stored_SAS_download.RData")
saveRDS(outscse,paste0("data/outscse",Y,".rds"))
saveRDS(perms,paste0("data/perms",Y,".rds"))
saveRDS(popsiz,paste0("data/popsiz",Y,".rds"))
saveRDS(cald,paste0("data/cald",Y,".rds"))
saveRDS(harvw,paste0("data/harvw",Y,".rds"))
saveRDS(calg,paste0("data/calg",Y,".rds"))
saveRDS(calm,paste0("data/calm",Y,".rds"))
#if(any(calm[[as.character(2019)]]$YEAR != 2019)){stop("ERROR murre calendar info for 2019 is wrong")}
for(y in years){
if(any(cald[[as.character(y)]]$YEAR != y)){print(paste("Warning duck calendar info for",y," had to be corrected == ",unique(cald[[as.character(y)]]$YEAR)))
cald[[as.character(y)]]$YEAR <- y}
if(any(calm[[as.character(y)]]$YEAR != y)){print(paste("Warning murre calendar info for",y," had to be corrected == ",unique(calm[[as.character(y)]]$YEAR)))
calm[[as.character(y)]]$YEAR <- y}
if(any(calg[[as.character(y)]]$YEAR != y)){print(paste("Warning goose calendar info for",y," had to be corrected == ",unique(calg[[as.character(y)]]$YEAR)))
calg[[as.character(y)]]$YEAR <- y}
}
save(list = c("calg","cald","calm"),
file = "data/calendars.RData")
# Fix species AOU values incl Eiders, CANG, SNGO, etc. --------------------------------------
for(i in 1:nrow(aou_rec)){
c1 = aou_rec[i,"AOUoriginal"]
c2 = aou_rec[i,"AOU"]
outscse[which(outscse$AOU == c1),"AOU"] <- c2 #cod for COEI changed in ~1990
}
outscse <- outscse[which(!is.na(outscse$AOU)),]
#fixing historical data with -weeks
tof <- which(outscse$WEEK < 1) #small % of parts have negative weeks because the dates indicate hunting in August (range from -5 to -1)
outscse[tof,"MONH"] <- 9 #this works because all tof have MONH == 8, it's just hunters getting the month wrong
tof2 = which(outscse$MONH == 8) #few remaining parts with harvest month = august
outscse[tof2,"MONH"] <- 9 #this works because all tof have MONH == 8, it's just hunters getting the month wrong
first_day <- "09-01" ### No hunting in August, so all week definitions begin on September 1
# setting all weeks based on days since Sept 1 ----------------------------
outscse$date = as.Date(paste(outscse$YRHUNT,
outscse$MONH,
outscse$DAYH,sep = "-"),
format = "%Y-%m-%d")
for(y in years){
min_day_y <- as.Date(paste(y,first_day,sep = "-"),format = "%Y-%m-%d")
wy = which(outscse$YEAR == y)
outscse[wy,"WEEK"] = as.integer(ceiling((outscse[wy,"date"]-(min_day_y-1))/7))
}
# outscse$WKdif = outscse$WEEK - outscse$WEEK2
#
#
tof <- which(outscse$WEEK < 1) #small % of parts have negative weeks because the dates indicate hunting in August (range from -5 to -1)
if(length(tof) > 0){stop("some parts have non-positive weeks = hunting pre September 1")}
#### replace BAGE with PAGE
outscse[which(outscse$PAGE != ""),"BAGE"] <- outscse[which(outscse$PAGE != ""),"PAGE"]
# outscse[which(outscse$BAGE %in% c("2")),"BAGE"] <- "I"
# outscse[which(outscse$BAGE %in% c("3")),"BAGE"] <- "U"
# outscse[which(outscse$BAGE %in% c("")),"BAGE"] <- "U"
outscse[which(outscse$BAGE %in% c("1","S","T")),"BAGE"] <- "A"
outscse[which(outscse$BAGE %in% c("2")),"BAGE"] <- "I"
outscse[which(outscse$BAGE %in% c("3")),"BAGE"] <- "U"
outscse[which(outscse$BAGE %in% c("")),"BAGE"] <- "U"
outscse[which(outscse$BSEX %in% c("1")),"BSEX"] <- "M"
outscse[which(outscse$BSEX %in% c("2")),"BSEX"] <- "F"
outscse[which(outscse$BSEX %in% c("3")),"BSEX"] <- "U"
outscse[which(outscse$BSEX %in% c("")),"BSEX"] <- "U"
#outscse$BAGE = factor(outscse$BAGE)
#round(prop.table(table(outscse$BAGE,outscse$AOU),2),2)
#outscse$BSEX = factor(outscse$BSEX)
#round(prop.table(table(outscse$BSEX,outscse$AOU),2),2)
cls = c("PERMIT",
"CASTE",
"YEAR",
"SELYEAR",
"PRHUNT",
"ZOHUNT",
"LATD",
"LOND",
"TODUK",
"TOGOK",
"COOTK",
"WOODK",
"SNIPK",
"DOVEK",
"PIGEK",
"CRANK",
"RAILK",
"MURRK",
"RNDMURK",
"DAYWF",
"DAYOT",
"DAYM",
"PRHUNTG",
"ZOHUNTG",
"LATG",
"LONG",
"PRHUNTM",
"ZOHUNTM",
"LATM",
"LONM",
"SUCCWF",
"SUTODU",
"SUTOGO",
"SUCCOT",
"SUCCM",
"ACTIVEOT",
"ACTIVE",
"ACTIVEWF",
"ACTIVEM",
"POTNTL",
"PRSALE",
"ZOSALE",
"PRSAMP",
"ZOSAMP")
allkill <- NULL
for(y in years){
tmp1 <- harvw[[as.character(y)]]
tmp <- tmp1[,which(names(tmp1) %in% cls)]
#tmp = tmp1
if(y == years[1]){
allkill <- tmp
}else{
allkill <- bind_rows(allkill,tmp)
}
}
trem = which(allkill$CASTE %in% c("C","F","H"))
if(length(trem)>0){
allkill = allkill[-trem,]
}### removing the unused castes; there are permits that have this caste designation across all years
tkp = which(allkill$POTNTL == "Y")
# if(length(tkp)>0){
allkill = allkill[tkp,]
#}### removing the hunters sampled from last year's permit file who indicated they didn't buy a permit this year
### and are therefore not potential hunters
trem = which(allkill$PERMIT == 0)
if(length(trem)>0){
allkill = allkill[-trem,]
}### removes a single permit from 1985 with no permit number
allkill$uniperm = allkill$PERMIT + allkill$SELYEAR*1000000 + allkill$YEAR*10000000000
dupuni = allkill$uniperm[duplicated(allkill$uniperm)]
## there are no duplicates.
# dupdf = allkill[which(allkill$uniperm %in% dupuni),]
# dupdf = dupdf[order(dupdf$uniperm),]
wmigoo <- which(allkill$PRHUNTG == "")
allkill$PRHUNTG = as.character(allkill$PRHUNTG)
allkill[wmigoo,"PRHUNTG"] <- as.character(allkill[wmigoo,"PRHUNT"])
allkill[wmigoo,"ZOHUNTG"] <- allkill[wmigoo,"ZOHUNT"]
wsud = which(allkill$TODUK > 0)
allkill$SUTODU <- "N"
allkill[wsud,"SUTODU"] <- "Y"
wsud = which(allkill$TOGOK > 0)
allkill$SUTOGO <- "N"
allkill[wsud,"SUTOGO"] <- "Y"
tkeepP = which(allkill$PRSAMP %in% provs) #keeps only permits sampled in primary provinces. drops NU
allkill = allkill[tkeepP,]
nrow(allkill) == length(unique(allkill$uniperm))
allkill$year = allkill$YEAR-(min(allkill$YEAR)-1)
allkill$caste = factor(allkill$CASTE,
ordered = T,
levels = c("D","B","A","E"))
######## sampling population sizes
popsiz_s = merge(popsiz,provzone[,c("prov","provn")],by.x = "PRSAMP",by.y = "provn",all.x = T)
popsiz_s = unique(popsiz_s)
#### total number of permits in each year
popsiz_perm = merge(perms,provzone[,c("prov","provn")],by.x = "PRSALE",by.y = "provn",all.x = T)
popsiz_perm = unique(popsiz_perm)
### total number of permits by zone and year
z_pops <- popsiz_perm %>%
select(-PRSALE) %>%
rename(PRSAMP = prov,ZOSAMP = ZOSALE) %>%
group_by(PRSAMP,ZOSAMP,YEAR) %>%
summarise(TOTSALE = sum(TOTSALE))
# popsiz_perm$yr = str_sub(popsiz_perm$YEAR,start = 3,end = 4)
# tmp = left_join(popsiz_perm,popsiz_s[,c("zone","caste","TOTPERM","yr","prov")])
# correcting the age and sex indicators -----------------------------------
save(list = c("allkill",
"outscse",
"z_pops",
"popsiz_s",
"popsiz_perm",
"sps"),
file = "data/allkill.RData")
# exporting the parts data as a readable csv file -------------------------
parts_out <- outscse[,c("PRHUNT","ZOHUNT","AOU","YRHUNT","MONH","DAYH","BAGE","BSEX","WEEK")]
names(parts_out) <- c("Province of hunt",
"Zone of hunt",
"AOU",
"Year",
"Month",
"Day",
"Age",
"Sex",
"Week of season")
parts_out <- left_join(parts_out,sps,by = "AOU")
write.csv(parts_out,paste0("GoogleDrive/All_raw_parts_data_",Y,".csv"))
# tmp <- parts_out %>% filter(specieslevelenglish == "Mallard")
# write.csv(tmp,paste0("output/Mallard_parts_all_years_through_",Y,".csv"))
######################
#define periods across all years
zones <- 1:3
pers <- 1:20
##### identify periods based on weeks with at least prop_period-% of the parts across all years
prop_period <- 0.05
period.duck <- expand.grid(pr = provs,zo = zones,period = pers,stringsAsFactors = F)
period.duck[,"startweek"] <- NA
period.duck[,"endweek"] <- NA
period.goose <- period.duck
period.murre <- period.duck
for(pr in provs){
pzones <- unique(outscse[which(outscse$PRHUNT == pr),"ZOHUNT"])
if(anyNA(pzones)){pzones <- pzones[-which(is.na(pzones))]}
for(z in pzones){
tmp <- outscse[which(outscse$PRHUNT == pr & outscse$ZOHUNT == z & outscse$AOU %in% sps[which(sps$group == "duck"),"AOU"]),]
testm <- table(tmp[,c("AOU")],tmp[,c("WEEK")])
wsums <- colSums(testm)
wprops <- wsums/sum(wsums)
per1 <- 1
per2 <- NA
p = 1
mw <- F
q <- 0
for(w in 1:length(wprops)){
if(mw){
if(sum(wprops[per1[p]:(per1[p]+q)]) > prop_period){
q <- 0
per2[p] <- w
p <- p+1
mw <- F
}else{
q <- q+1
}
}else{
if(wprops[w] > prop_period){
per1[p] <- w
per2[p] <- w
p <- p+1
}else{
per1[p] <- w
mw <- T
q <- q+1
}
}
}#w
if(length(per1) > length(per2)){ #if TRUE it means that the end of the final period did not include 5% of parts
per1 <- per1[-length(per1)]
per2[p-1] <- length(wprops)}
for(j in 1:length(per1)){
rs <- which(period.duck$pr == pr & period.duck$zo == z & period.duck$period == j)
period.duck[rs,"startweek"] <- per1[j]
period.duck[rs,"endweek"] <- per2[j]
}
}#z
}#pr
period.duck <- period.duck[-which(is.na(period.duck$startweek)),]
period.duck <- period.duck[order(period.duck$pr,period.duck$zo),]
write.csv(period.duck,"data/period.duck.csv",row.names = F)
##### goose periods
for(pr in provs){
pzones <- unique(outscse[which(outscse$PRHUNT == pr),"ZOHUNT"])
if(anyNA(pzones)){pzones <- pzones[-which(is.na(pzones))]}
for(z in pzones){
tmp <- outscse[which(outscse$PRHUNT == pr & outscse$ZOHUNT == z & outscse$AOU %in% sps[which(sps$group == "goose"),"AOU"]),]
testm <- table(tmp[,c("AOU")],tmp[,c("WEEK")])
wsums <- colSums(testm)
wprops <- wsums/sum(wsums)
##### identify periods based on weeks with at least 5% of the parts across all years
per1 <- 1
per2 <- NA
p = 1
mw <- F
q <- 0
for(w in 1:length(wprops)){
if(mw){
if(sum(wprops[per1[p]:(per1[p]+q)]) > prop_period){
q <- 0
per2[p] <- w
p <- p+1
mw <- F
}else{
q <- q+1
}
}else{
if(wprops[w] > prop_period){
per1[p] <- w
per2[p] <- w
p <- p+1
}else{
per1[p] <- w
mw <- T
q <- q+1
}
}
}#w
if(length(per1) > length(per2)){per1 <- per1[-length(per1)]
per2[p-1] <- length(wprops)}
for(j in 1:length(per1)){
rs <- which(period.goose$pr == pr & period.goose$zo == z & period.goose$period == j)
period.goose[rs,"startweek"] <- per1[j]
period.goose[rs,"endweek"] <- per2[j]
}
}#z
}#pr
period.goose <- period.goose[-which(is.na(period.goose$startweek)),]
period.goose <- period.goose[order(period.goose$pr,period.goose$zo),]
write.csv(period.goose,"data/period.goose.csv",row.names = F)
##### murre periods
for(pr in "NF"){
pzones <- unique(outscse[which(outscse$PRHUNT == pr),"ZOHUNT"])
if(anyNA(pzones)){pzones <- pzones[-which(is.na(pzones))]}
for(z in pzones){
tmp <- outscse[which(outscse$PRHUNT == pr & outscse$ZOHUNT == z & outscse$AOU %in% sps[which(sps$group == "murre"),"AOU"]),]
wkblank = data.frame(WEEK = as.integer(1:max(tmp$WEEK)),
AOU_bl = 300)
tmp <- merge(tmp,wkblank,by = c("WEEK"),all.y = TRUE)
#tmp[which(is.na(tmp$AOU)),"AOU"]
testm <- table(tmp[,c("AOU")],tmp[,c("WEEK")])
wsums <- colSums(testm)
wprops <- wsums/sum(wsums)
if(length(unique(names(testm[1,]))) != max(as.integer(names(testm[1,]))))
##### identify periods based on weeks with at least 5% of the parts across all years
per1 <- 1
per2 <- NA
p = 1
mw <- F
q <- 0
for(w in 1:length(wprops)){
if(mw){
if(sum(wprops[per1[p]:(per1[p]+q)]) > prop_period){
q <- 0
per2[p] <- w
p <- p+1
mw <- F
}else{
q <- q+1
}
}else{
if(wprops[w] > prop_period){
per1[p] <- w
per2[p] <- w
p <- p+1
}else{
per1[p] <- w
mw <- T
q <- q+1
}
}
}#w
if(length(per1) > length(per2)){per1 <- per1[-length(per1)]
per2[p-1] <- length(wprops)}
for(j in 1:length(per1)){
rs <- which(period.murre$pr == pr & period.murre$zo == z & period.murre$period == j)
period.murre[rs,"startweek"] <- per1[j]
period.murre[rs,"endweek"] <- per2[j]
}
}#z
}#pr
period.murre <- period.murre[-which(is.na(period.murre$startweek)),]
period.murre <- period.murre[order(period.murre$pr,period.murre$zo),]
write.csv(period.murre,"data/period.murre.csv",row.names = F)
#save.image(file = paste0("data/parts and harvest survey info",Y,".RData"))
#
|
20d91b66fa436ef2cd8c64563c4fa0e4e1b31dee | 64e50bac8ef7b07d679118e12fe2501089e89dfb | /man/y.Rd | a9252e33e06e0993f74f5892d608b55b2faa0498 | [] | no_license | atomczik/linmod | c633f4596ef52b6a7c081baef71d35f958087a97 | f9a18c56293a0918e67bbbef9d97987db4fb358d | refs/heads/master | 2021-01-22T11:20:42.501134 | 2015-01-07T20:53:26 | 2015-01-07T20:53:26 | 28,930,426 | 0 | 1 | null | 2015-01-07T20:53:27 | 2015-01-07T19:36:53 | R | UTF-8 | R | false | false | 279 | rd | y.Rd | \name{y}
\alias{y}
\docType{data}
\title{
ds}
\description{
sdf}
\usage{data(y)}
\format{
The format is:
num [1:144] 7 7.4 9.5 7.2 7.3 7.6 8.1 8.2 8.3 8.5 ...
}
\details{
sdf}
\source{
sf}
\references{
sf}
\examples{
data(y)
## maybe str(y) ; plot(y) ...
}
\keyword{datasets}
|
6fb342f828f2099802c5cecc68fc409f401c01bc | a35729a973c1e27bec65cca5abb480d014ba491e | /2019-06-24_Plumber/entrypoint.R | 3a19ffc92c20949a41c9bffe7edc9d2b6b79eaa5 | [] | no_license | EdwinChirre/encuentros | 4f34bf0e63e931de46182425ab8d110f99325432 | d2235a3388bafc1af07789682fa88b8e2bbc3ae8 | refs/heads/master | 2022-04-17T11:37:08.286382 | 2019-07-27T17:12:28 | 2019-07-27T17:12:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 68 | r | entrypoint.R | library(plumber)
p <- plumb("plomeriaAvance.R")
p$run(port = 8888)
|
cb2a8d07cf16637d17e8abe2e1d162091d463c30 | 6b9a6ce9762d57565365484262b6804b65a0ac8a | /codes/1_quality_check/indexcov_read_depth_postprocessing.R | 4b5a649408de97ca4e570d24270e9db53d197ce9 | [
"MIT"
] | permissive | phansol/gremlin | 9f75ae9feed6fbad1979bc37342f6178d5139ecd | 3c178143f9181d5b3c7bf6f0d74d1346eabb1426 | refs/heads/main | 2023-04-07T19:55:21.847037 | 2022-12-12T09:00:25 | 2022-12-12T09:00:25 | 330,940,350 | 5 | 1 | null | 2021-07-14T03:35:39 | 2021-01-19T10:17:20 | Python | UTF-8 | R | false | false | 4,534 | r | indexcov_read_depth_postprocessing.R | #' variable coverage flag
#' args1 = tumour.indexcov.bed.gz
#' args2 = normal.indexcov.bed.gz
#' args3 = reference version (19 or 38)
#' args4 = reference.fasta.fai
suppressMessages(library(tidyverse))
suppressMessages(library(GWASTools))
suppressMessages(library(rpart))
options(warn = -1)
args <- commandArgs(trailingOnly = T)
message('# estimating the variance of read depths')
# inputs
tcov <- read_tsv(args[1], col_types = cols(`#chrom` = 'c'))
ncov <- read_tsv(args[2], col_types = cols(`#chrom` = 'c'))
if (args[3] == '19'){
data(centromeres.hg19)
centromere <- centromeres.hg19
} else if (args[3] == '38'){
data(centromeres.hg38)
centromere <- centromeres.hg38
}
fai <- read_tsv(args[4], col_names = c('chr', 'size', 'x', 'y', 'z'), col_types = cols())
cov_trim <- function(cov){
# match 'chr' prefix
if (grepl('chr', cov$`#chrom`[1])){
centromere$chrom <- centromere$chrom %>% gsub('chr', '', .) %>% paste0('chr', .)
fai$chr <- fai$chr %>% gsub('chr', '', .) %>% paste0('chr', .)
} else {
centromere$chrom <- centromere$chrom %>% gsub('chr', '', .)
fai$chr <- fai$chr %>% gsub('chr', '', .)
}
# primary chromosomes
cov <- cov %>% filter(`#chrom` %in% c(c(1:22, 'X'), paste0('chr', c(1:22, 'X'))))
# exclude centomeric & telomeric regions
for (i in 1:23){
cov <- cov %>%
filter(!(`#chrom` == centromere$chrom[i] &
((start > centromere$left.base[i] & start < centromere$right.base[i]) | (end > centromere$left.base[i] & end < centromere$right.base[i]))))
cov <- cov %>%
filter(!(`#chrom` == fai$chr[i] & ((end > fai$size[i] - 100000) | (start < 100000))))
# acrocentric or telocentric chromosomes (chr13, 14, 15, 21, 22)
if (i %in% c(13, 14, 15, 21, 22)){
cov <- cov %>%
filter(!(`#chrom` == centromere$chrom[i] & end < centromere$right.base[i]))
}
}
cov
}
tcov <- cov_trim(tcov)
ncov <- cov_trim(ncov)
# depth ratio
colnames(tcov)[4] <- 'tumor'
colnames(ncov)[4] <- 'normal'
cov <- merge(tcov, ncov, by = c('#chrom', 'start', 'end')) %>% as.data.frame()
cov$cn_estimate <- cov$tumor/cov$normal*2
cov$`#chrom` <- cov$`#chrom` %>% gsub('chr', '', .) %>% gsub('X', 23, .) %>% as.numeric()
cov <- cov[order(cov$`#chrom`, cov$start), ]
cov <- cov[complete.cases(cov), ]
# remove extreme cn cases
m <- quantile(cov$cn_estimate, 0.005)
M <- quantile(cov$cn_estimate, 0.995)
cov <- cov %>% filter(cn_estimate >= m & cn_estimate <= M)
# cnv detection using recursive partitioning
cov$index <- 1:nrow(cov)
cov$cn_smoothened <- 0
sample <- basename(args[1]) %>% gsub('.tmp-indexcov.bed.gz', '', .)
sample <- ifelse(nchar(sample) <= 25, sample,
substring(sample, seq(1, nchar(sample)-1, 25), unique(c(seq(25, nchar(sample), 25), nchar(sample)))) %>% paste0(., collapse = '\n'))
png(dirname(args[1]) %>% gsub('tmp$', 'depth_ratio.png', .), height = 2970, width = 2100)
par(mfrow = c(8, 3), mar = c(8, 8, 4, 4), mgp = c(6, 2, 0))
plot(c(0, 1), c(0, 1), ann = F, bty = 'n', type = 'n', xaxt = 'n', yaxt = 'n')
text(0.5, 0.5, sample, cex = 5)
for (i in 1:23){
cov_i <- cov %>% filter(`#chrom` == i)
tree <- rpart(cn_estimate ~ start, data = cov_i, minsplit = 1, minbucket = 60) # minbucket = 60 corressponds to 1Mb
cov_i$cn_smoothened <- predict(tree, data.frame(start = cov_i$start))
cov$cn_smoothened[cov_i$index] <- cov_i$cn_smoothened
plot(cov_i$start, cov_i$cn_estimate, ylim = c(max(min(cov$cn_estimate) -0.2, 0), max(cov$cn_estimate) + 0.2), cex.axis = 2, cex.lab = 3,
xlab = paste0('chromosome ', i %>% gsub(23, 'X', .)), ylab = 'Read depth ratio x 2')
lines(cov_i$start, cov_i$cn_smoothened, col = 'red', lwd = 5)
}
p <- dev.off()
message(paste0('# see ', dirname(args[1]) %>% gsub('tmp$', 'depth_ratio.png', .)))
# variance of read depths (kb bin)
v <- sum((cov$cn_smoothened - cov$cn_estimate)^2)/nrow(cov)
if (v > 0.05){
writeLines(paste0('# variance of read depths: ', v), dirname(args[1]) %>% gsub('tmp$', 'depth.fail', .))
message(paste0('# See ', dirname(args[1]) %>% gsub('tmp$', 'depth.fail', .), ' for the estimate of variance of read depths'))
message(paste0('# WARNING: ', dirname(args[1]) %>% gsub('.tmp$', '', .), ' is suspected of highly variable read depths'))
} else {
message(paste0('# See ', dirname(args[1]) %>% gsub('tmp$', 'depth.pass', .), ' for the estimate of variance of read depths'))
writeLines(paste0('# variance of read depths: ', v), dirname(args[1]) %>% gsub('tmp$', 'depth.pass', .))
}
message('# Done')
|
573df4651142bf5b89159196a68a8c7343d662a1 | 311dbdc75af75f439fcdd3eddcd0ad0d71fdf977 | /R/p005.R | 3a0e52e2c29b36964733ffaa67cb791473c30343 | [] | no_license | EGrandgi/projectEuler | bc76db45983de94c0d39f49a3b6d7a24fab29f12 | 8f3df9857ff83762155e13130ec93a93d44b366b | refs/heads/master | 2021-06-16T04:06:36.526142 | 2021-05-29T07:44:37 | 2021-05-29T07:44:37 | 192,505,836 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 388 | r | p005.R | ### 0005
is.mdiv <- function(n, until) {
i <- 0
while (i < until) {
i <- i + 1
if (n%%i == 0) {
answer <- TRUE
} else {
answer <- FALSE
i <- until + 1
}
}
answer
}
smallest_mdiv <- function(until) {
n <- 1
while (is.mdiv(n, until) == FALSE) {
n <- n + 1
}
n
}
smallest_mdiv(20)
|
e92af8075c2d9b97a09315349f542c0fbd32f31b | 8f67330a4700bc888d13dd92c62220e20e6b6828 | /R/index.G2.r | 3f707fb22fea946ca22eb72fad382b29abfa0676 | [] | no_license | cran/clusterSim | cfcc584e368fa9f6d1d86887b0f61cc2f3676b32 | 54764dbd65330165e71fc1af2fadf75942915769 | refs/heads/master | 2023-06-25T15:51:42.061284 | 2023-06-10T17:30:02 | 2023-06-10T17:30:02 | 17,695,119 | 2 | 6 | null | 2015-12-05T19:09:43 | 2014-03-13T04:16:30 | R | UTF-8 | R | false | false | 1,126 | r | index.G2.r | index.G2 <- function(d,cl){
cn <- max(cl)
n <- length(cl)
dmat <- as.matrix(d)
diameter <- average.distance <- median.distance <- separation <-
average.toother <-
cluster.size <- within.dist <- between.dist <- numeric(0)
separation.matrix <- matrix(0,ncol=cn,nrow=cn)
di <- list()
for (i in 1:cn){
cluster.size[i] <- sum(cl==i)
#print(i)
#print(cl==i)
#print(dmat[cl==i,cl==i])
di <- as.dist(dmat[cl==i,cl==i])
within.dist <- c(within.dist,di)
#diameter[i] <- max(di)
average.distance[i] <- mean(di)
median.distance[i] <- median(di)
bv <- numeric(0)
for (j in 1:cn){
if (j!=i){
sij <- dmat[cl==i,cl==j]
bv <- c(bv,sij)
if (i<j){
separation.matrix[i,j] <- separation.matrix[j,i] <- min(sij)
between.dist <- c(between.dist,sij)
}
}
}
}
nwithin<-length(within.dist)
nbetween<-length(between.dist)
.C(C_fng2,as.double(within.dist),as.integer(nwithin),as.double(between.dist),as.integer(nbetween),wynik=double(2),PACKAGE="clusterSim")$wynik[1]
}
|
03082fd491fb109f7446b304492ad3a9e16d645d | 6074b4746c7a409a452734bf8dc2f5226cd02dec | /tests/testthat.R | e01043cb0c8436cd08ac3c356a752bc0334119a6 | [
"MIT"
] | permissive | rds64/mortgage | 7128f739c0f0ec78ac3190f98a7cd0e5f386f1af | a1e7a53703fa7e05e6cc8fa2ad6dfe737c2b53e5 | refs/heads/master | 2021-05-03T20:21:25.227657 | 2018-04-13T07:01:24 | 2018-04-13T07:01:24 | 120,428,823 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 59 | r | testthat.R | library(testthat)
library(mortgage)
test_check('mortgage') |
b8c380d20301ffe1280cb4716ec127275a365d2e | 4644ea0db62a692648be6d6f9eaf4868fb4b779e | /Analyses/LineageData/BayesDetInference.R | 13f87c57284114673109f7fd3313a5500748eec5 | [] | no_license | lwlss/discstoch | 883f4b4f546f0f31c33a3037f3a0cac1cdd1153d | be11f0caf3f0db00d9ff6c910dbc41e20e3476c7 | refs/heads/master | 2021-01-18T06:07:19.429111 | 2017-04-05T06:27:49 | 2017-04-05T06:27:49 | 54,037,255 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,604 | r | BayesDetInference.R | # Bayesian Deterministic Parameter Inference
library(detstocgrowth)
library(data.table)
dataset<-function(x){
if (x == "Lawless"){
# DataSet1: Lawless
area=fread("~/Lawless_area_shortTC.txt",header=FALSE)
times=fread("~/Lawless_time_shortTC.txt",header=FALSE)
data=fread("~/Lawless_data_shortTC.txt",header=FALSE) #3rd column (Identifier) => strain_parentcolony
return(list("area"=area,"data"=data,"times"=times))
}
else if (x == "Levy"){
# DataSet2: Levy
area=fread("~/Levy_area_filtered1.txt",header=FALSE)
times=fread("~/Levy_times_filtered1.txt",header=FALSE)
data=fread("~/Levy_data_filtered1.txt",header=FALSE) #3rd column (Identifier) => replicate
return(list("area"=area,"data"=data,"times"=times))
}
else if (x == "Ziv"){
# DataSet3: Ziv
area=fread("~/Ziv_area_filtered1.txt",header=FALSE)
times=fread("~/Ziv_times_filtered1.txt",header=FALSE)
data=fread("~/Ziv_data_filtered1.txt",header=FALSE) #3rd column (Identifier) => colony
return(list("area"=area,"data"=data,"times"=times))
}
else {print("Not a valid dataset")}
}
# Choosing a data set
datsetname="Lawless"
# Extracting data for the growth curve(s) on which to do inference
gc=252
x=dataset(datsetname)
area=x$area[gc,]
times=x$times[gc,]
data=x$data[gc,]
# Write output to PDF file
pdf(height = 16, width = 16, file = paste(datsetname,"_Exponential_Model_Output.pdf",sep=""))
bayesinf=BayesDet(area,times,2,1000000,1000,2)
dev.off()
# Write first MCMC chain to file
write.table(bayesinf$Samples[[1]],"GC252DetBayesExp.txt",col.names=TRUE,row.names=FALSE)
|
60c9f1611089ab55c4a848a0c71b294d5b464887 | 657ed851c5f4fb4adb52d0975cf89ef98b201723 | /man/mmLogLik.Rd | 3317b52ddc5f66d60739a6a81002b2d27dfdcb2f | [] | no_license | ShojiTaniguchi/timsync | 0e6861798fb8bac84e3d1a33ae01f80862b13c11 | 6868b03d5dfa22faa37aff358d3da3dc7703712e | refs/heads/master | 2022-12-28T07:40:53.434929 | 2020-10-09T06:31:10 | 2020-10-09T06:31:10 | 218,429,284 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,143 | rd | mmLogLik.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mmLogLik.R
\name{mmLogLik}
\alias{mmLogLik}
\title{Function to calculate maximum likelihood}
\usage{
mmLogLik(rho, omega, dat, y, f, r, grp, w, b, Ve, Vu, method)
}
\arguments{
\item{rho}{parameter for serial correlation}
\item{omega}{parameter for synchronization}
\item{dat}{data for the analysis}
\item{y}{column number of the dependent variable}
\item{f}{column number of the fixed effects}
\item{r}{column number of the random effects}
\item{grp}{column number of the group}
\item{w}{weight vector for the random effects variance}
\item{Ve}{Variance of the residuals}
\item{Vu}{Variance of the random effects}
\item{method}{"ML" or "REML"}
}
\description{
mmLogLik function is to calculate the maximum likelihood values
under the estimated parameters.
The appropriate usage is to calculate the ML under the REML parameters
estiamted by the timixCov or timix function.
}
\references{
Hamazaki, K., & Iwata, H. (2020).
RAINBOW: Haplotype-based genome-wide association study
using a novel SNP-set method. PLoS computational biology, 16(2), e1007663.
}
|
f2069a619315b508e6172632ea6e62c0f727b5d3 | 3f41c3a5792e04b8e8bb9ecb3a437fc2ca3c1457 | /tests/testthat/test-gsqerr_spd.R | 1ec859a50e4e9a9950bd2565ba9d181fa52b7f58 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | zhangzjjjjjj/MGLMRiem | e1ebc648f1b87834a4968d97780b80ef3fb96852 | 6ab59ff4160c6a1ce36385a31e81ecb1a4429e3c | refs/heads/master | 2023-03-17T21:04:34.044303 | 2020-03-18T20:17:31 | 2020-03-18T20:17:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 267 | r | test-gsqerr_spd.R | test_that("gsqerr_spd works", {
Y = array(0, dim=c(3,3,5))
for(i in 1:5) {
Y[,,i] <- randspd_FAST(3)
}
Y_hat = array(0, dim=c(3,3,5))
for(i in 1:5) {
Y_hat[,,i] <- randspd_FAST(3)
}
gsqerr_spd(X = Y, X_hat = Y_hat)
expect_equal(F, F)
})
|
8579da4416072f674ab024ed986844005db85ccc | d6031ae0f6a6960a2e676e469d2bd5e3c9477768 | /wordcloud/Rshiny/app.R | 8bc20b03b96787681bcc48d23d5a9ef3b9c63ca2 | [] | no_license | ayoung-shin/mulcam | e3fa3904afa9c96c1331dacdf3938c3437a879ed | 9c23989781d600706152542fe776d91c38a87beb | refs/heads/master | 2021-03-07T17:24:13.452643 | 2020-06-22T08:53:05 | 2020-06-22T08:53:05 | 246,282,551 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,190 | r | app.R | library(shiny)
library(ggplot2)
library(memoise)
library(tm)
library(stringr)
library(rJava)
library(SnowballC)
library(wordcloud)
# library(KoNLP)
source("news/code_for_shiny.R")
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Wordcloud of the year by Each Broadcaster"),
# Sidebar with a slider and selection inputs
sidebarLayout(
sidebarPanel(
radioButtons("broadcast", "Choose a broadcaster",
choices = list("SBS", "JTBC", "YTN")),
selectInput("year", "Choose a year",
choices = c("2015", "2016", "2017", "2018", "2019", "2020")),
actionButton("update", "Change"),
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("plot", width = "100%", height = "650px")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
# 분석 결과에 대한 입력값 정의
terms = reactive({
# "updata"를 눌렀을 때 변화
input$update
# 아무것도 하지 않았을 경우
isolate({
withProgress({
setProgress(message = "Processing corpus...")
getAnalResult(tolower(input$broadcast), input$year)
})
})
})
# Make the wordcloud drawing predictable during a session
wordcloud_rep = repeatable(wordcloud)
output$plot <- renderPlot({
v = terms()
wordcloud_rep(
names(v),
v,
# 빈도가 가장 큰 단어와 가장 작은 단어 폰트 사이의 크기
scale = c(5, 0.2),
# # 90도 회전해서 보여줄 단어 비율
rot.per = 0,
# # 빈도 2 이상
min.freq = 2,
# # T:random, F:빈도수가 큰단어를 중앙에 배치
random.order = F,
# # T:random, F:빈도순
random.color = F,
colors = brewer.pal(10, "Paired")
)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
5688545b57d37fcd3c1a1660f5be3a1df0b7a785 | a0033092d925f26e4c164055fe6234649c01902e | /earth-observation/synthetic-aperture-radar/007-fpc-scores/code/nc-convert-spatiotemporal.R | eaa1163a93994425b3d73410e06a8385ed130a43 | [] | no_license | paradisepilot/study | af440e945c75360cc5905d92d7980486a1c9ba33 | 27af595c8ec5bd4ef9ae7ef0cf8a552721b160c7 | refs/heads/master | 2022-12-11T09:45:32.057660 | 2022-12-09T00:13:06 | 2022-12-09T00:13:06 | 11,211,137 | 0 | 0 | null | 2021-09-05T20:02:22 | 2013-07-06T01:11:39 | TeX | UTF-8 | R | false | false | 7,390 | r | nc-convert-spatiotemporal.R |
nc_convert.spatiotemporal <- function(
ncdf4.file.input = NULL,
date.reference = as.Date("1970-01-01", tz = "UTC"),
CSV.preprocessed = "data-preprocessed-table.csv"
) {
thisFunctionName <- "nc_convert.spatiotemporal";
cat("\n### ~~~~~~~~~~~~~~~~~~~~ ###");
cat(paste0("\n",thisFunctionName,"() starts.\n\n"));
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
if ( file.exists(CSV.preprocessed) ) {
# cat(paste0("\n# ",ncdf4.output," already exists; loading this file ...\n"));
# list.arrays <- readRDS(file = ncdf4.output);
# cat(paste0("\n# Loading complete: ",ncdf4.output,"\n"));
cat(paste0("\n# ",CSV.preprocessed," already exists; no conversion will be performed ...\n"));
DF.preprocessed <- read.csv(CSV.preprocessed);
} else {
ncdf4.object.input <- ncdf4::nc_open(ncdf4.file.input);
cat("\n# names(ncdf4.object.input[['var']])\n");
print( names(ncdf4.object.input[['var']]) );
DF.preprocessed <- nc_convert.spatiotemporal_inner(
ncdf4.object.input = ncdf4.object.input,
date.reference = date.reference
);
ncdf4::nc_close(ncdf4.object.input);
remove(list = c('ncdf4.object.input'));
gc();
write.csv(
x = DF.preprocessed,
file = CSV.preprocessed,
row.names = FALSE
);
}
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
cat(paste0("\n",thisFunctionName,"() quits."));
cat("\n### ~~~~~~~~~~~~~~~~~~~~ ###\n");
return( DF.preprocessed );
}
##################################################
nc_convert.spatiotemporal_inner <- function(
ncdf4.object.input = NULL,
date.reference = NULL
) {
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
band.names <- names(ncdf4.object.input[['var']]);
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
var.names <- band.names;
var.names <- unique(gsub(x = var.names, pattern = "_{1,}[0-9]{1,2}[A-Za-z]{3}[0-9]{4}", replacement = ""));
var.names <- unique(gsub(x = var.names, pattern = "_{1,}(mst|slv[0-9]{1,})", replacement = ""));
cat("\nvar.names\n");
print( var.names );
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
date.suffixes <- unique(stringr::str_extract(string = band.names, pattern = "[0-9]{1,2}[A-Za-z]{3}[0-9]{4}"));
DF.dates <- data.frame(
date.suffix = date.suffixes,
date = as.Date(x = date.suffixes, format = "%d%B%Y", tz = "UTC")
);
remove(list = c("date.suffixes"));
DF.dates <- DF.dates[order(DF.dates[,'date']),c('date.suffix','date')];
DF.dates[,'date.integer'] <- as.integer(DF.dates[,'date'] - date.reference);
DF.dates[,'year'] <- as.integer(lubridate::year(DF.dates[,'date']));
rownames(DF.dates) <- seq(1,nrow(DF.dates));
cat("\nDF.dates\n");
print( DF.dates );
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
DF.preprocessed <- data.frame();
years <- unique(DF.dates[,'year']);
for ( temp.year in years ) {
ncdf4.file.output <- paste0("data-preprocessed-",temp.year,".nc");
DF.temp.preprocessed <- data.frame(
year = as.integer(temp.year),
nc_file = ncdf4.file.output
);
DF.preprocessed <- rbind(DF.preprocessed,DF.temp.preprocessed);
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
DF.temp.dates <- DF.dates[DF.dates[,'year'] == temp.year,];
rownames(DF.temp.dates) <- seq(1,nrow(DF.temp.dates));
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
dimension.time <- ncdf4::ncdim_def(
name = "time",
units = paste("days since",date.reference,"UTC"),
vals = DF.temp.dates[,'date.integer']
);
list.vars <- list();
for ( var.name in var.names ) {
list.vars[[var.name]] <- ncdf4::ncvar_def(
name = var.name,
units = ncdf4.object.input[['var']][[1]][['units']],
dim = list(
time = dimension.time,
lat = ncdf4.object.input[['dim']][['lat']],
lon = ncdf4.object.input[['dim']][['lon']]
)
);
}
remove(list = c("dimension.time"));
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
ncdf4.object.output <- ncdf4::nc_create(
filename = ncdf4.file.output,
vars = list.vars
);
remove(list = c("list.vars"));
gc();
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
global.attributes <- ncdf4::ncatt_get(nc = ncdf4.object.input, varid = 0);
retained.attributes <- c('Conventions','TileSize','title');
for ( retained.attribute in retained.attributes ) {
if ( retained.attribute %in% names(global.attributes) ) {
ncdf4::ncatt_put(
nc = ncdf4.object.output,
varid = 0,
attname = retained.attribute,
attval = global.attributes[[retained.attribute]],
prec = NA,
verbose = FALSE,
definemode = FALSE
);
}
}
remove(list = c("global.attributes","retained.attributes"));
gc();
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
n.lats <- ncdf4.object.input[['dim']][['lat']][['len']];
n.lons <- ncdf4.object.input[['dim']][['lon']][['len']];
for ( date.index in base::seq(1,nrow(DF.temp.dates)) ) {
date.suffix <- DF.temp.dates[date.index,'date.suffix'];
for ( var.name in var.names ) {
band.name <- base::grep(x = band.names, pattern = base::paste0(var.name,".+",date.suffix), value = TRUE);
DF.band <- ncdf4::ncvar_get(nc = ncdf4.object.input, varid = band.name);
if ( all(dim(DF.band) == c(n.lats,n.lons)) ) {
ncdf4::ncvar_put(
nc = ncdf4.object.output,
varid = var.name,
vals = DF.band,
start = c(date.index,1,1),
count = c(1,n.lats,n.lons)
);
} else {
ncdf4::ncvar_put(
nc = ncdf4.object.output,
varid = var.name,
vals = base::t(DF.band),
start = c(date.index,1,1),
count = c(1,n.lats,n.lons)
);
}
cat("\n(",date.suffix,",",var.name,",",band.name,"): dim(DF.band) =",base::paste(dim(DF.band),collapse = " x "),"\n");
remove(list = c('DF.band'));
}
}
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
ncdf4::nc_close(ncdf4.object.output);
remove(list = c('DF.temp.dates','ncdf4.file.output','ncdf4.object.output','n.lats','n.lons'));
gc();
} # for ( temp.year in years )
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
remove(list = c('DF.dates'));
gc();
return( DF.preprocessed );
}
|
ec73c9af648b347dd991e8c10642d41a950bb36d | 71b2c44b282dee6ead9f4e1132aae6372e9f3788 | /R/zzz.R | d0e5188104063882dc452da3f9b10bcdeaebb677 | [
"Python-2.0"
] | permissive | vishalbelsare/RGF | ce7e5e13c861d3e9234270740246e69803424140 | 44699666138b22cb9c085fcc86921919c066e0c7 | refs/heads/master | 2021-04-26T23:21:17.726680 | 2021-03-09T15:34:28 | 2021-03-09T15:34:28 | 123,978,365 | 0 | 0 | null | 2021-04-04T16:13:33 | 2018-03-05T21:01:43 | R | UTF-8 | R | false | false | 474 | r | zzz.R | # temporary startup message beginning from version 1.0.3 [ SEE : http://r-pkgs.had.co.nz/r.html#r-differences ]
.onAttach <- function(libname, pkgname) {
packageStartupMessage("Beginning from version 1.0.3 the 'dgCMatrix_2scipy_sparse' function was renamed to 'TO_scipy_sparse' and now accepts either a 'dgCMatrix' or a 'dgRMatrix' as input. The appropriate format for the 'RGF' package in case of sparse matrices is the 'dgCMatrix' format (scipy.sparse.csc_matrix)")
}
|
20893caddb1936acccc7b6948a9bae2b8c998f11 | 06d4e50804f75f80f8cb57707b84176f387a0810 | /R/BinaryCNNParasiteCells.R | ff08543a63c9590db66fe2fd84a7c68e8c34e2e7 | [
"MIT"
] | permissive | StatsGary/LondonRComputerVision | d12faa70649fbf3bee7748835eb82439c9b51a12 | 346703dbbaac4841d6b303dc84a8f9e6a9570d3f | refs/heads/main | 2023-01-24T18:23:26.237177 | 2020-11-25T16:27:39 | 2020-11-25T16:27:39 | 315,700,134 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,946 | r | BinaryCNNParasiteCells.R | ###########################################################################
###########################################################################
### ###
### Script name: BinaryCNNParasiteCells.R ###
### Author: Gary Hutson ###
### Date Created: 23/10/2020 ###
### Description: CNN of malaria parasite cells classification ###
### Copyright: All reprints and usage should be cited ###
### ###
###########################################################################
###########################################################################
<<<<<<< Updated upstream
=======
>>>>>>> Stashed changes
# -----------------------------------------------------------
# Load in libraries #
# -----------------------------------------------------------
library(tensorflow)
library(keras)
library(plyr)
library(dplyr)
library(ggplot2)
library(magrittr)
library(tidyr)
library(sys)
library(caret)
library(magick)
library(fs)
library(abind)
library(imager)
library(purrr)
# -----------------------------------------------------------
# Create Function #
# -----------------------------------------------------------
show_image <- function(path){
if (!is.character(path)){
stop("The file path must be a character string")
}
image <- imager::load.image(path)
plot(image)
}
get_image_info <- function(img){
image_result <- list(img_width = imager::width(img),
img_height = imager::height(img),
img_depth = imager::depth(img),
img_colour_channels=imager::spectrum(img))
return(image_result)
}
# -----------------------------------------------------------
# Set directories of images #
# -----------------------------------------------------------
#setwd("C:/Users/GaryHutson/Desktop/NHS R Community - Lightening Talk 2020")
dataset_dir <- "Data/cell_images/"
train_dir <- paste0(dataset_dir, "/train/")
test_dir <- paste0(dataset_dir, "/test/")
# List files with magick and create an animation of all the parasite cells in training
train_parasite <- dir_ls(path=paste0(train_dir, "parasite"),
glob = "*.png")
train_uninfected <- dir_ls(path=paste0(train_dir, "uninfected"),
glob = "*.png")
# View an indexed image from the uninfected and parasitsed bunch
parasite_image <- show_image(train_parasite[2])
uninfected_image <- show_image(train_uninfected[2])
# Check the dimensions of the images - these potentially need to be scaled in Keras before
# learning on the images can commence
dim(parasite_image)
dim(uninfected_image)
# This shows the image width x height x depth x colour channels i.e. rgb - this will need to be
# set in Keras
# Loop through all images to get the image info
# -----------------------------------------------------------
# Create image animations #
# -----------------------------------------------------------
system.time(train_parasite[1:100] %>%
map(image_read) %>%
image_join() %>%
image_scale("300x300") %>%
image_animate(fps = .5) %>%
image_write("Data/Parasite_Cells.gif"))
system.time(train_uninfected[1:100] %>%
map(image_read) %>%
image_join() %>%
image_scale("300x300") %>%
image_animate(fps = .5) %>%
image_write("Data/Uninfected_Cells.gif"))
# -----------------------------------------------------------
# Build Baseline Model #
# -----------------------------------------------------------
image_shape <- c(130,130,3)
print(image_shape)
#Build Keras Baseline Model
model <- keras_model_sequential() %>%
layer_conv_2d(filters=32, kernel_size=c(3,3), activation = "relu",
input_shape = image_shape) %>%
layer_max_pooling_2d(pool_size = c(2,2)) %>%
layer_conv_2d(filters=64, kernel_size = c(3,3),
input_shape = image_shape, activation="relu") %>%
layer_max_pooling_2d(pool_size = c(2,2)) %>%
layer_conv_2d(filters=64, kernel_size = c(3,3)) %>%
layer_max_pooling_2d(pool_size = c(2,2)) %>%
layer_conv_2d(filters=32, kernel_size=c(3,3), activation = "relu",
input_shape = image_shape) %>%
layer_max_pooling_2d(pool_size = c(2,2)) %>%
layer_flatten() %>%
layer_dense(1, activation = "sigmoid") %>%
layer_dropout(0.5)
# -----------------------------------------------------------
# Compile baseline model #
# -----------------------------------------------------------
model %>%
compile(
loss='binary_crossentropy',
optimizer=optimizer_rmsprop(),
metrics = c("acc")
)
print(model) # This will print the model structure
train_datagen <- image_data_generator(rescale = 1/255)
test_datagen <- image_data_generator(rescale=1/255)
batch_size <- 16
train_generator <- flow_images_from_directory(
train_dir,
train_datagen,
target_size = c(image_shape[1:2]),
batch_size = batch_size,
class_mode = "binary"
)
test_generator <- flow_images_from_directory(
test_dir,
test_datagen,
target_size = c(image_shape[1:2]),
batch_size = batch_size,
class_mode = "binary"
)
#batch <- generator_next(train_generator)
#print(batch)
history <- model %>% fit_generator(
train_generator,
steps_per_epoch = 150,
epochs = 50,
validation_data = test_generator,
validation_steps = 75
)
model %>% save_model_hdf5("Data/parasite_cells_classification.h5")
# -----------------------------------------------------------
# Image Augmentation to improve model #
# -----------------------------------------------------------
image_gen <- image_data_generator(rotation_range = 40,
width_shift_range = 0.1,
height_shift_range = 0.1,
shear_range = 0.1,
zoom_range = 0.8, #Zoom is the key addition
horizontal_flip = T,
fill_mode = 'nearest',
rescale = 1/255)
help("image_data_generator")
test_datagen <- image_data_generator(rescale=1/255)
# This normalises the pixel scales so our DL can work with the images
# -----------------------------------------------------------
# Create Augmented Model #
# -----------------------------------------------------------
# Create a new model
model <- keras_model_sequential() %>%
layer_conv_2d(filters=32, kernel_size=c(3,3), activation = "relu",
input_shape = image_shape) %>%
layer_max_pooling_2d(pool_size = c(2,2)) %>%
layer_conv_2d(filters=64, kernel_size = c(3,3),
input_shape = image_shape, activation="relu") %>%
layer_max_pooling_2d(pool_size = c(2,2)) %>%
layer_conv_2d(filters=128, kernel_size = c(3,3),
input_shape = image_shape, activation="relu") %>%
layer_max_pooling_2d(pool_size = c(2,2)) %>%
layer_conv_2d(filters=128, kernel_size = c(3,3),
input_shape = image_shape, activation="relu") %>%
layer_max_pooling_2d(pool_size = c(2,2)) %>%
layer_flatten() %>%
layer_dense(512, activation = "relu") %>%
layer_dense(1, activation = "sigmoid") %>%
layer_dropout(0.5)
model %>% compile(
loss = "binary_crossentropy",
optimizer = optimizer_rmsprop(lr=1e-4),
metrics = c("acc")
)
# Get model summary
summary(model)
# -----------------------------------------------------------
# Create augmented versions of images #
# -----------------------------------------------------------
train_gen_augment <- flow_images_from_directory(
train_dir,
image_gen,
target_size = c(image_shape[1:2]),
batch_size = batch_size * 2,
class_mode = "binary"
)
test_gen_augment <- flow_images_from_directory(
test_dir,
test_datagen,
target_size = c(image_shape[1:2]),
batch_size = batch_size * 2,
class_mode = "binary"
)
# Batch size is 24,960 and batch size selected is 32 so there is 780 images per batch
history_augment <- model %>%
fit_generator(
train_gen_augment,
steps_per_epoch = 100,
epochs = 50,
validation_data = test_gen_augment,
validation_steps = as.integer(100 / 2),
callbacks = callback_early_stopping(monitor = "val_loss",
patience=5)
)
# -----------------------------------------------------------
# Save best fitting model #
# -----------------------------------------------------------
summary(history_augment$metrics$acc)
model %>% save_model_hdf5("Data/parasite_cells_classification_augmented.h5")
# -----------------------------------------------------------
# Load model to replace the need to train model #
# -----------------------------------------------------------
model <- load_model_hdf5("Data/parasite_cells_classification_augmented.h5")
# -----------------------------------------------------------
# Predicting and preprocessing test image for prediction #
# -----------------------------------------------------------
# Make a prediction with our model
pred_img <- train_parasite[100] #Selects the index as a prediction of a new parasitic image
img_new <- image_load(pred_img, target_size = c(image_shape[1:2]))
pred_img <- image_to_array(img_new)
img_tensor <- array_reshape(pred_img, c(1, image_shape)) # Reshape the array to have a 1 and the image shape
img_tensor <- img_tensor / 255 #Scale the image between 0 and 1
plot(as.raster(img_tensor[1,,,])) #Select the prediction from the tensor array
# Predict image class from model
predval <- predict(model, img_tensor)
pred <- keras::predict_classes(model, img_tensor) %>%
as.data.frame() %>%
dplyr::mutate(Class=case_when(
V1 == 0 ~ "Parasite Class",
TRUE ~ "Uninfected"
)) %>%
dplyr::rename(ClassPred=V1) %>%
cbind(predval)
print(pred)
# Due to keras from directory sorting by alphabetical order
# Parasite will be give label 0 and Uninfected 1, so we switch these
# -----------------------------------------------------------
# -----------------------------------------------------------
# -----------------------------------------------------------
# EOF #
# -----------------------------------------------------------
# -----------------------------------------------------------
# -----------------------------------------------------------
|
f43b2170f8c499c38472980566154bbc18a984b9 | 4ceba5f9ca0867a34b2998eba05c9133fb7e6ad7 | /R Programming/r_program7.R | 0eb20a1d08056d7a5b3f412be9c75465169cce00 | [] | no_license | shashidhargr111/R-Program | e6a777e388f06e2d24bbd6f911d62f1a09f58aaf | 74b5ebd744c9d66ae945a3de5763ec9d8cf347d8 | refs/heads/master | 2020-04-15T12:54:00.983969 | 2019-01-08T16:50:32 | 2019-01-08T16:50:32 | 164,691,637 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 110 | r | r_program7.R | m<- matrix(-10:9,nrow=4,ncol=5)
print(m)
print("this returns a list")
print(lapply (1:3,function(x) x^2)) |
633dd8b57b29cf0b6f4cbe3958ac41163494b46f | f252c7af74b27070ec28fc6120a72273fce2c39b | /Log/Sep2014.R | 7f20216fc34c0087a04c58583065e553d79a7af8 | [] | no_license | sadapple/Research | bf142ff98a30c28713281aed6a870e19045cb11b | a07b5894c0c6be7e5d980ef79730fd8442046a15 | refs/heads/master | 2016-09-05T11:36:56.473431 | 2016-02-01T02:22:41 | 2016-02-01T02:22:41 | 28,351,519 | 0 | 0 | null | 2016-01-29T21:25:15 | 2014-12-22T18:05:05 | R | UTF-8 | R | false | false | 4,010 | r | Sep2014.R | ## 9.28
x <- sample( c( "dog", "cat", "hamster", "goldfish"), size = 1000, prob=c( 0.25, 0.25, 0.25, 0.25 ) ,replace=TRUE )
table(x)
## search patterns of all the objects in R
ls( pattern="a." , all.names=TRUE)
## 9.25
## Precisions in R
options(digits=6)
a=rnorm(1000,59,3)
b=rnorm(1000,152,4)
a
b
c=round(a,3)
d=round(b,3)
c
d
## save R objects
save(list = ls(all = TRUE), file = ".RData")
## Using scan() to read data
data <- t(matrix(scan("fileName",sep=’,’), 5, 364))
## filter missing values
x[!is.na(x)]
## some other useful functions
stack()
unstack()
merge()
aggregate()
unique()
## rug() show density on side of the plot
x <- rnorm(50)
plot(sort(x))
rug(x,side = 2)
#### before 9.25
## Find the index of all different values in a vector
x = c(2,2,2,2,3,3,3,2,1,2,3,2,2,2,1,1,3,2,1,2,1,2,1,1,1,1,1,1,1,1,1,1,1,1)
lapply(1:length(unique(x)),function(i) which(x == i))
##### Scotland Referendum via Bayes
# Data latest polls
polls = NULL
polls <- data.frame( rbind(
Opinium = c(43, 47, 1156),
Survation = c(44, 48, 1000),
ICM = c(41, 45, 1175)
))
# set up for decimals
polls[, 1:2] <- polls[, 1:2]/100
# weighting polls
wtd.polls <- rbind(polls, c(apply(polls[,1:2],2, weighted.mean, polls[,3]), sum(polls[,3])))
names(wtd.polls) <- c("Yes","No","n")
wtd.polls$Others = 1-wtd.polls$Yes-wtd.polls$No
# Adjusting for the undecideds for the final results
wtd.polls[5,] <- data.frame(wtd.polls[4,1:2] + wtd.polls[4,1:2]/ sum(wtd.polls[4,1:2])*wtd.polls[4,4],n=wtd.polls[4,3],Others=0)
##################################################
## If we have more than two categories (Yes, No, Uncertain), Beta distribution won't work properly.
## The following is the main function, which I used to randomly draw data from a dirichlet distribution.
##################################################
prob2win = function(x=wtd.polls, j=5, export=1){
p=gtools::rdirichlet(100000, x[j,'n']*c(x[j,'Yes'],x[j,'No'], 1-x[j,'Yes']-x[j,'No'])+1
)
if(export==1){
mean(p[,1] < p[,2])
} else {
return(p)
}
}
(No.win.probs = prob2win(j=5,export=1) )
## set simulated data for determining parameters and graphing
p = prob2win(j=5,export=2)
## Get the marginal distribution. Since we have two classes the Dirichlet is distributed as a Beta.
p = cbind(p, p[,2]-p[,1])
## Draw a histogram of simulated data
hist(p[,4], col="gray", nclass=50, main="Histogram of the Yes/No Differences", xlab="Yes/No Difference")
abline(v=0, col=c('red'), lwd=2, lty=c(1))
## In this case I ran the function a few time to set the start value close to what I believe be the output.
## So, you have to adjust the shape parameters (shape1 and shape2) manually; it's much of a trial and error exercise,
## but you can use the function beta.par from SciencesPo package to estimate these parameters using \mu and \sigma^2.
library(MCMCpack)
fit1 = fitdistr(p[,1], "beta",
start=list(shape1=3,shape2=3))
fit2 = fitdistr(p[,2], "beta",
start=list(shape1=3,shape2=3))
library(png)
#Replace the directory and file information with your info
png <- readPNG(file.choose())
#Get the plot information so the image will fill the plot box, and draw it
lim <- par()
rasterImage(png, lim$usr[1], lim$usr[3], lim$usr[2], lim$usr[4])
grid()
## Draw densities of simulated data
curve(dbeta(x,fit1$estimate[1],fit1$estimate[2]), ylim=c(0,50), xlim=c(.40,.60), col='NA', lty=1, lwd=3, ylab="Density", xlab=expression("Posterior "*pi(theta)*""), main="Distribution of the Yes/No Preference Referendum 2014", sub=paste("Probability that NO wins: ", round(No.win.probs,3) ) ) ## yes 1
curve(dbeta(x,fit1$estimate[1],fit1$estimate[2]), xlim=c(.43,.52), col='green', lty=1, add=TRUE, lwd=8) ## yes 1
curve(dbeta(x,fit2$estimate[1],fit2$estimate[2]), add=TRUE, col='magenta', xlim=c(.49,.56), lty=1, lwd=8) ## No 2
abline(v=c(median(p[,1]), median(p[,2])), col=c('green','magenta'), lwd=2, lty=c(2,2))
legend("topright",c("Yes","No"), lwd=2, col=c('green','magenta'), lty=c(1,1))
|
5d100ff5750cdde47830011cb1204bc0d42b2aaa | aa993b5bdd4b3e230a0fe912ca8d038f27c4eef7 | /cachematrix.R | 468560bc43b083fd3e9cc3d51a8986c2b992f61b | [] | no_license | SpaceActuary/ProgrammingAssignment2 | 43e2ace1ad56989289c47f3056b71a4311da10cf | 5a8c1a221030e29b3e732d59b4894068aa928c97 | refs/heads/master | 2021-01-21T07:26:07.173303 | 2015-05-24T20:09:39 | 2015-05-24T20:09:39 | 36,189,865 | 0 | 0 | null | 2015-05-24T19:46:59 | 2015-05-24T19:46:58 | null | UTF-8 | R | false | false | 1,243 | r | cachematrix.R | ## Solving for the inverse of a matrix can be computationally
## intensive, so these functions cache (save) the results of
## already solved matrices in case you need to solve them again.
## To use these functions, first create a CacheMatrix from your
## original matrix 'm':
##
## > m2 <- makeCacheMatrix(m)
##
## Then, solve the matrix using the cacheSolve function:
##
## > cacheSolve(m2)
## makeCacheMatrix creates a list of 4 functions
## to solve the matrix and save the results
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve checks to see if the matrix 'x' has already
## been solved. if so, it returns the previous result.
## if not, it solves the matrix and saves the result for later.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
5e98cfa1b6c7f7546f761bb33296a63f9c03be10 | c02f51ef0601063f2a8785bebb79036d2fde98fa | /EDA_prj2/plot2.R | 384d89bfb2e9564692971ed61e37af2528b889b0 | [] | no_license | eyedvabny/coursera-ds-main | 0d5284c0e1175de288d749b8c1270e7db158629b | d93f518bf5af8ead1fc69e6cf2d3105e785388da | refs/heads/master | 2016-09-06T14:15:30.076467 | 2014-10-25T21:34:28 | 2014-10-25T21:34:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 388 | r | plot2.R | # Load in the data
NEI <- readRDS("FNEI_data/summarySCC_PM25.rds")
# Aggregate the pollution by year
agg_emiss <- aggregate(Emissions~year,NEI[NEI$fips=="24510",],sum)
with(agg_emiss,{
png(filename="plot2.png", height=480, width=480)
plot(year,Emissions,
type='l',
main='Total Emissions in Baltimore',
ylab='Emissions (tons)',
xlab='Year')
dev.off()
}) |
66600fd979baa17aa75a37488a0a19e01dc43de0 | e756bcd2b578b74c238dbca53ef88e2d026bd121 | /man/h_common.Rd | 2be67f2ec4bf9f8bb16255e26414b2c15a2e29f9 | [] | no_license | HYDFKI7/htsr | d95abbdbafde547b726733524176bd08e55ccdee | d56e8ea98c99f0158b0465d279ae805728ec5adb | refs/heads/master | 2023-02-02T23:56:09.548157 | 2020-12-16T07:50:02 | 2020-12-16T07:50:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 627 | rd | h_common.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/h_common.R
\name{h_common}
\alias{h_common}
\title{Extract 2 (or more) time-series on their common period}
\usage{
h_common(files)
}
\arguments{
\item{files}{List of file names to process.}
}
\value{
hts files resulting of the operation; their names are composed as:
co_<original filename>
}
\description{
The fonction extract the data of 2 (or more) hts time-series
for the common date/time records (precision of the second).
}
\examples{
\dontrun{
f <- h_common(files = c("foo1.hts","foo2.hts"))
}
}
\author{
P. Chevallier - Oct 2017-Dec 2019
}
|
d55540a6d9dca36219a04d3512c0f4b46d8637ae | fb2df9e50233feb186f1318d18745d4d5775bc5c | /R/plot-yield.R | b7fecb4de15feef51af3cc58e4481aa718b4a8f4 | [
"MIT"
] | permissive | poissonconsulting/ypr | 2c6fc7292027b6a15e2c7e3e87253a4d11dd4ca4 | 708f12e548716ff5cf4875af1b6d49980525a6fc | refs/heads/main | 2023-04-06T18:33:16.694687 | 2023-03-30T20:20:39 | 2023-03-30T20:20:39 | 130,767,486 | 6 | 0 | NOASSERTION | 2022-08-25T21:22:47 | 2018-04-23T23:15:39 | R | UTF-8 | R | false | false | 5,547 | r | plot-yield.R | #' Plot Yield by Capture
#'
#' Plots the 'Yield', 'Age', 'Length', 'Weight', 'Effort', or 'YPUE' by the
#' **annual interval** capture/exploitation probability.
#'
#' @inheritParams params
#' @inheritParams ypr_plot_schedule
#' @return A ggplot2 object.
#' @family populations
#' @family yield
#' @family plot
#' @examples
#' \dontrun{
#' ypr_plot_yield(ypr_populations(
#' Rk = c(2.5, 4.6),
#' Llo = c(0, 60)
#' ),
#' plot_values = FALSE
#' ) +
#' ggplot2::facet_wrap(~Llo) +
#' ggplot2::aes_string(group = "Rk", color = "Rk") +
#' ggplot2::scale_color_manual(values = c("black", "blue"))
#'
#' ypr_plot_yield(ypr_populations(Rk = c(2.5, 4.6), Llo = c(0, 60))) +
#' ggplot2::facet_grid(Rk ~ Llo)
#' }
#'
#' ypr_plot_yield(ypr_population())
#' @export
#'
ypr_plot_yield <- function(object, ...) {
UseMethod("ypr_plot_yield")
}
#' @describeIn ypr_plot_yield Plot Yield by Capture
#' @export
ypr_plot_yield.default <- function(object,
y = "Yield",
pi = seq(0, 1, length.out = 100),
Ly = 0,
harvest = TRUE,
biomass = FALSE,
u = harvest,
plot_values = TRUE,
...) {
chkor_vld(vld_is(object, "ypr_population"), vld_is(object, "ypr_ecotypes"))
if (!requireNamespace("ggplot2")) err("Package 'ggplot2' must be installed.")
if (!requireNamespace("scales")) err("Package 'scales' must be installed.")
chk_number(Ly)
chk_gte(Ly)
chk_flag(biomass)
chk_flag(harvest)
chk_string(y)
chk_subset(y, c("Yield", "Age", "Length", "Weight", "Effort", "YPUE"))
chk_flag(u)
data <- ypr_tabulate_yields(
object,
pi = pi,
Ly = Ly,
harvest = harvest,
biomass = biomass
)
data2 <- ypr_tabulate_yield(
object = object,
Ly = Ly,
harvest = harvest,
biomass = biomass
)
data$YPUE <- data$Yield / data$Effort
data2$YPUE <- data2$Yield / data2$Effort
data1 <- data2
data3 <- data2
data1[c("Yield", "Age", "Length", "Weight", "Effort", "YPUE")] <- 0
data3[c("pi", "u")] <- 0
data2 <- rbind(data1, data2, data3, stringsAsFactors = FALSE)
xlab <- if (u) "Exploitation Probability (%)" else "Capture Probability (%)"
x <- if (u) "u" else "pi"
ggplot2::ggplot(data = data, ggplot2::aes_string(x = x, y = y)) +
(
if (plot_values) {
list(
ggplot2::geom_path(
data = data2,
ggplot2::aes_string(
group = "Type",
color = "Type"
),
linetype = "dotted"
),
ggplot2::scale_color_manual(values = c("red", "blue"))
)
} else {
NULL
}) +
ggplot2::geom_line() +
ggplot2::expand_limits(x = 0) +
ggplot2::scale_x_continuous(xlab, labels = scales::percent) +
NULL
}
#' @describeIn ypr_plot_yield Plot Yield by Capture
#' @export
ypr_plot_yield.ypr_populations <- function(object,
y = "Yield",
pi = seq(0, 1, length.out = 100),
Ly = 0,
harvest = TRUE,
biomass = FALSE,
u = harvest,
plot_values = TRUE,
...) {
if (!requireNamespace("ggplot2")) err("Package 'ggplot2' must be installed.")
if (!requireNamespace("scales")) err("Package 'scales' must be installed.")
chk_string(y)
chk_subset(y, c("Yield", "Age", "Length", "Weight", "Effort", "YPUE"))
chk_flag(u)
data <- ypr_tabulate_yields(object,
pi = pi, Ly = Ly, harvest = harvest,
biomass = biomass
)
data2 <- ypr_tabulate_yield(
object = object,
Ly = Ly,
harvest = harvest,
biomass = biomass
)
data$YPUE <- data$Yield / data$Effort
data2$YPUE <- data2$Yield / data2$Effort
parameters <- setdiff(intersect(colnames(data), parameters()), "pi")
for (parameter in parameters) {
data[[parameter]] <- factor(
paste0(parameter, ": ", data[[parameter]]),
levels = unique(paste0(parameter, ": ", sort(data[[parameter]])))
)
data2[[parameter]] <- factor(
paste0(parameter, ": ", data2[[parameter]]),
levels = unique(paste0(parameter, ": ", sort(data2[[parameter]])))
)
}
data1 <- data2
data3 <- data2
data1[c("Yield", "Age", "Length", "Weight", "Effort", "YPUE")] <- 0
data3[c("pi", "u")] <- 0
data2 <- rbind(data1, data2, data3, stringsAsFactors = FALSE)
xlab <- if (u) "Exploitation Probability (%)" else "Capture Probability (%)"
x <- if (u) "u" else "pi"
ggplot2::ggplot(data = data, ggplot2::aes_string(x = x, y = y)) +
(
if (plot_values) {
list(
ggplot2::geom_path(
data = data2,
ggplot2::aes_string(
group = "Type",
color = "Type"
),
linetype = "dotted"
),
ggplot2::scale_color_manual(values = c("red", "blue"))
)
} else {
NULL
}) +
ggplot2::geom_line() +
ggplot2::expand_limits(x = 0) +
ggplot2::scale_x_continuous(xlab, labels = scales::percent) +
NULL
}
|
fb981ccd727b6fe7c075d523c70618067d9af2ac | 78a2baf744b2513d1366a2d51000c2ffea08d794 | /man/calcContinuityFromRank.Rd | 8f3d598661e5f7411e3765799f615b9ae078a542 | [
"MIT"
] | permissive | csoneson/dreval | 3b6fbe6ae8fda66435d4ec763b85d4968dcf97e3 | 0b4d11e08badd7651ae05e1a274d8ab3bf3f33eb | refs/heads/master | 2023-07-08T03:35:43.242884 | 2023-06-25T05:45:30 | 2023-06-25T05:45:30 | 206,408,507 | 7 | 0 | NOASSERTION | 2020-02-13T11:50:22 | 2019-09-04T20:33:35 | R | UTF-8 | R | false | true | 1,838 | rd | calcContinuityFromRank.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/continuity.R
\name{calcContinuityFromRank}
\alias{calcContinuityFromRank}
\title{Calculate continuity based on sample rankings}
\usage{
calcContinuityFromRank(rankReference, rankLowDim, kTM)
}
\arguments{
\item{rankReference}{N x N matrix, each row/column corresponding to one
sample. The value of entry (i, j) represents the position of sample i in
the ranking of all samples with respect to their distance from sample j,
based on the reference (high-dimensional) observed values. The sample
itself has rank 0.}
\item{rankLowDim}{N x N matrix, each row/column corresponding to one sample.
The value of entry (i, j) represents the position of sample i in the
ranking of all samples with respect to their distance from sample j, based
on the low-dimensional representation. The sample itself has rank 0.}
\item{kTM}{The number of nearest neighbors.}
}
\value{
The continuity value.
}
\description{
The continuity was proposed by Venna and Kaski, as a local quality measure of
a low-dimensional representation. The metric focuses on the preservation of
local neighborhoods, and compares the neighborhoods of points in the
low-dimensional representation to those in the reference data. Hence, the
continuity measure indicates to which degree we can trust that the points
closest to a given sample in the reference data set are placed close to the
sample also in the low-dimensional representation. The \code{kTM} parameter
defines the size of the neighborhoods to consider.
}
\references{
Venna J., Kaski S. (2001). Neighborhood preservation in nonlinear
projection methods: An experimental study. In Dorffner G., Bischof H.,
Hornik K., editors, Proceedings of ICANN 2001, pp 485–491. Springer,
Berlin.
}
\author{
Charlotte Soneson
}
\keyword{internal}
|
90d0db168d88fb1a017c4ec7c8efd38d155fe841 | 0abf16c147a819cf5fd9bb4ce380cf4f2222bb8d | /Data_Science - Johns Hopkins University/003_Getting_and_Cleaning_Data/Project/run_analysis.R | 3db0312e364e712e84cb934f88f0f95327e67cfa | [] | no_license | bhunkeler/DataScienceCoursera | 6c6c17f5808cd6a8e882f7558ca32e70b9b39b30 | 4ae8c176acbb5b2d78ff08379a856c4afefea8f8 | refs/heads/master | 2022-05-01T14:26:22.738900 | 2022-03-11T16:56:07 | 2022-03-11T16:56:07 | 43,755,669 | 52 | 120 | null | 2022-03-24T19:07:28 | 2015-10-06T14:23:57 | Jupyter Notebook | UTF-8 | R | false | false | 5,038 | r | run_analysis.R | # ========================================================================================================
# Description: run_analysis.R - Getting and Cleaning Data - John Hopkins University
#
# This R script called run_analysis.R does the following:
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each
# measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject.
#
# Data Resource: https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
# Data Description: http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
#
# Authhor: Bruno Hunkeler
# Date: xx.12.2015
#
# ========================================================================================================
# ========================================================================================================
# Load Libraries
# ========================================================================================================
library(reshape2)
library(data.table)
# download end extract ZipFile if not already downloaded
if(!file.exists("UCI HAR Dataset")){
dataURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(dataURL, zipFile)
unzip(zipFile, files = NULL, list = FALSE, overwrite = TRUE, junkpaths = FALSE, exdir = ".", unzip = "internal", setTimes = FALSE)
}
# load test data files X_test.txt and y_test.txt
test.x <- read.table("./UCI HAR Dataset/test/X_test.txt")
test.y <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject.test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
# load training data files X_train.txt and y_train.txt
train.x <- read.table("./UCI HAR Dataset/train/X_train.txt")
train.y <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject.train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
# ========================================================================================================
# part 1 - Merges the training and the test sets to create one data set (vertically).
# ========================================================================================================
merged.x <- rbind(test.x, train.x)
merged.y <- rbind(test.y, train.y)
merged.subject <- rbind(subject.test, subject.train)
# add feature names to columns
features.names <- read.table("./UCI HAR Dataset/features.txt")
features.names <- features.names$V2
colnames(merged.x) <- features.names
# ========================================================================================================
# part 2 - Extracts only the measurements on the mean and standard deviation for each measurement.
# ========================================================================================================
merged.subset <- merged.x[ , grep("mean|std", colnames(merged.x))]
# ========================================================================================================
# part 3 - Uses descriptive activity names to name the activities in the data set
# ========================================================================================================
# load activity lables data files
activity.labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
merged.y$activity <- activity.labels[merged.y$V1, 2]
# ========================================================================================================
# part 4 - Appropriately labels the data set with descriptive variable names.
# ========================================================================================================
names(merged.y) <- c("ActivityID", "ActivityLabel")
names(merged.subject) <- "Subject"
# ========================================================================================================
# part 5 - From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject.
# ========================================================================================================
# merge (bind) all data to a single data set
merged.all <- cbind(merged.subject, merged.y, merged.x)
labels.all <- c("Subject", "ActivityID", "ActivityLabel")
data.labels = setdiff(colnames(merged.all), labels.all)
melted.data = melt(merged.all, id = labels.all, measure.vars = data.labels, na.rm=TRUE)
tidy.data = dcast(melted.data, Subject + ActivityLabel ~ variable, mean)
write.table(tidy.data, file = "./tidy_data.txt", row.names = FALSE)
ThisIsTheEnd <- "ThisIsTheEnd"
|
025dbdb09ab544be4184b477118171c27a4d9da6 | 961a69f174505f34a29820d2a0e1882b91fa6de6 | /get_experiments.R | 70844f44eafbc7a6b2d9f14f2fff6d4ab6730efb | [] | no_license | meuleman/roadmap_state_calls | ba2217cf1ca4edc0334c544f768d7886a4399214 | 9636773b340fde40ced75cb624500169961e8679 | refs/heads/master | 2021-01-19T01:40:21.811731 | 2016-11-09T18:12:53 | 2016-11-09T18:12:53 | 47,841,922 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,941 | r | get_experiments.R | library(jsonlite)
library(httr)
library(gplots)
################################################################################################################
consumer_key = "QBJZ2HWO";
consumer_secret = "gb2prbadp2vuwucd";
secret <- RCurl::base64(paste(consumer_key, consumer_secret, sep=":"));
################################################################################################################
### This is where we obtain most of the metadata
URLs <- list("released_hg19" = "type=experiment&status=released&assembly=hg19",
"released_GRCh38" = "type=experiment&status=released&assembly=GRCh38",
# "preliminary" = "type=Experiment&status=preliminary&replicates.library.biosample.donor.organism.scientific_name=Homo+sapiens",
# "proposed" = "type=Experiment&status=proposed&replicates.library.biosample.donor.organism.scientific_name=Homo+sapiens",
# "submitted" = "type=Experiment&status=submitted&replicates.library.biosample.donor.organism.scientific_name=Homo+sapiens",
"all" = "type=experiment&replicates.library.biosample.donor.organism.scientific_name=Homo%20sapiens");
for (i in 1:length(URLs)) {
url <- paste("https://www.encodeproject.org/search/?", URLs[[i]], "&format=json&limit=all", sep="");
nam <- names(URLs)[i];
print(nam);
if (length(grep(paste("^experiments_", nam, ".RData$", sep=""), dir())) == 0) {
call <- GET(url, config(c("Authorization" = paste("Basic", secret))))
obj <- fromJSON(rawToChar(call$content))
# Parse some of the data and save.
terms <- obj$facets[[2]]
assays <- obj$facets[3,"terms"][[1]]
dates <- obj$facets[22,"terms"][[1]]
filetypes <- obj$facets[15,"terms"][[1]];
experiments <- obj[["@graph"]]
save(assays, dates, filetypes, experiments, terms, file=paste("experiments_", nam, ".RData", sep=""))
}
}
|
4a7e95788fd39f5184eb9506382f9f0551085d65 | 4f886387a6c0c86b39d799c0270bfc8eabf11e8c | /RData/Lecture/07-1.R | f909d16829044da74fff8786b80d31970f04e3fb | [] | no_license | rumen-scholar/kosmo41_KimCheolEon | 38d3cbdd7576784440c95b6291656e11eb20915e | 3ea53334f6b8178c8f85c8bc5bf23d58429bb3a0 | refs/heads/master | 2020-03-21T04:41:23.634173 | 2018-12-20T09:19:06 | 2018-12-20T09:19:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,307 | r | 07-1.R | ## 결측치 정제하기 ##
#결측치 찾기
#결측치가 포함된 데이터 프레임 생성
df <- data.frame(sex = c("M", "F", NA, "M", "F"),
score = c(5, 4, 3, 4, NA))
df
#결측치 확인
is.na(df)
#결측치 빈도 출력
table(is.na(df))
table(is.na(df$sex))
table(is.na(df$score))
#평균산출 - 결측치때문에 에러가 발생(NA)
mean(df$score)
sum(df$score)
#---------------------------------------------------#
#결측치 제거
#dplyr 패키지 로드
library(dplyr)
#score 가 NA인 데이터만 출력
df %>% filter(is.na(score))
#score 결측치를 제거
df_nomiss <- df %>% filter(!is.na(score))
#score 평균 산출
mean(df_nomiss$score)
#score, sex 결측치 제거
df_nomiss <- df %>% filter(!is.na(score) & !is.na(sex))
df_nomiss
#모든 변수에 결측치 없는 데이터 추출
# - 결측치가 하나라도 있으면 제거된다.
# - 간편하지만 분석에 필요한 행까지 손실될 수 있다. (단점)
#위와 같은 방식. 한번에. 물론 단점도 동일함.
df_nomiss2 <- na.omit(df)
df_nomiss2
##------------------------------------------------------------------##
#함수의 결측치 제외 기능 이용하기
mean(df$score) # X
mean(df$score, na.rm = T)
sum(df$score, na.rm = T)
exam <- read.csv("../Doit_R-master/Data/df_csv_exam2_re.csv", fileEncoding = "UTF-8")
#결측치 삽입
exam[c(3,8,15), "math"] <- NA
exam
#math 평균 산출
exam %>% summarise(mean_math = mean(math))
#math 결측치 제외하고 평균 산출 (계산해보니, 결측치는 아예 인원에서도 제외됨. 20명에 결측치 3명 제외 17명으로 평균)
exam %>% summarise(mean_math = mean(math, na.rm = T))
exam %>% summarise(mean_math = mean(math, na.rm = T),
sum_math = sum(math, na.rm = T),
median_math = median(math, na.rm = T))
## 결측치 대체법 ##
## - 결측치를 제거하는 대신 다른 값을 채워넣는 방법
#결측치를 제외하고 math 평균 산출
exam %>% summarise(mean_math = mean(math, na.rm = T))
#math 가 NA면 평균값으로 결측치를 대체
exam$math <- ifelse(is.na(exam$math), exam %>% summarise(mean_math = mean(math, na.rm = T)), exam$math)
#결측치 빈도표 생성
table(is.na(exam$math))
exam
#math 평균 산출
mean(exam$math)
|
dbb51b1794acb2a7584c4f6612cb9063d8105a86 | f842ff215f44fdb06446475280abb0a2e2cedcb6 | /cars_script.R | 7ae755c0e5ac7d82566a0647d69049d033887d72 | [] | no_license | nelsonrsjr/hw1-cars | 3eb1a0c07736376fd9a0312c107c521eae875f33 | c514f34d8cd268d5fac03865f52446db01b8373c | refs/heads/main | 2023-02-18T18:43:52.157764 | 2021-01-20T09:01:56 | 2021-01-20T09:01:56 | 330,103,209 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 563 | r | cars_script.R | # cars <- read.csv( "cars.csv")
cars # built in dataset - comes with R
speed <- cars$speed # save speed as a vector
dist <- cars$dist
plot( speed, dist, cex=2) # plot x,y
speed.lm <- lm (dist ~ speed) # save regression line
abline( speed.lm, col="red" ) # plot regression line - you now have the best fit line plotted with the points
par(mfrow=c(2,2)) # this sets the plot environment to do 4 plots, 2 by 2
plot(speed.lm) # the default plot method for lm - crazy huh?
write.csv(cars, "cars.csv") # save data to csv file in working directory
|
55456b8ce2c3cd6aeed8ce7de398115250ea3f3a | 3ffe61be1846789242fba8c3bb71663b2053d074 | /practice_chap02(R의 데이터구조의 이해)/ex04.R | eee80f89f6506ec91d1bd1a84a8d3b61e31be7ee | [] | no_license | harrycjy1/R-bootcamp | 244cefae2b1785ce3b31acf57753447febf2f871 | 2db273a53188dd1fd825b35f8a3bdb3ba308fb0e | refs/heads/master | 2020-04-02T21:07:22.552300 | 2018-10-26T06:48:57 | 2018-10-26T06:48:57 | 154,788,516 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 775 | r | ex04.R | #리스트(list)는 다차원이며 벡터의 각 원소들이 이름을 갖고, 각 원소들은
#서로 다른 데이터 유형으로 구성되어 있다. 벡터는 모든 요소들이 모두 같은
#데이터 타입이지만, 리시트는 요소들이 여러 데이터 타입을 가지는 데이터 구조가
#가능하다.
#list()
#패키지 : base
#사용법 : list(... )
#설명 : 리스트 생성
#반환값 : 리스트
#매개변수
# ...: 객체들(가능하면 이름이 주어짐)
#list생성
#"신은혁"을 표현하는 리스트이다.
x <- list("신은혁","20081223",11,c("4학년 3반","태권도"))
x
class(x)
y <- list(성명 = "신은혁", 출생 = "20081223", 나이 = 11, 반특기 = c("4학년3반","태권도"))
y
View(y)
|
a2830ff440d4eaa0fc7763f208a3f4ffbfcfcc7e | 0a5dc6db4c2c5e2377d9f09238449eb1e7534303 | /R/csv.to.bvh.R | 054a76ee6519cd9714f842a95e084dce6f86e048 | [] | no_license | Vohrt/RMoCap | 87b13ba5726478b54ae14018318265a4a5821e26 | 76a81d8976161f25a5e97dae9c7dc2ec097c307e | refs/heads/master | 2022-12-10T01:28:46.384522 | 2020-09-11T02:56:30 | 2020-09-11T02:56:30 | 294,393,675 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,587 | r | csv.to.bvh.R | #' This function recalculates direct to hierarchical kinematic model.
#'
#' Procedure implements iterative algebraic procedure with additional initial optimization, that is required to align root body joints.
#' Optimization is done using simplex method. The rotation order in hierarchical model is automatically set to ZYX, even if input.skeleton has different order.
#'
#' @param input.skeleton object of mocap class that defines hierarchical kinematic model.
#' @param df.to.save data frame with column names compatible with input.skeleton. Data that is used for calculation has to be placed in columns with names ending .Dx, .Dy and .Dz.
#' @param plot.me if TRUE plot steps of skeleton aligning of frame with index frame.id. Default value is plot.me = FALSE.
#' @param frame.id if frame.id > 0 and plot.me = TRUE plot steps of skeleton aligning of frame with index frame.id. Default value is frame.id = -1.
#' @param debug.messages print additional messages informing about calculation progress.
#'
#' @return object of class mocap.
#'
#' @examples
#' data("header.mocap")
#' data("heian.yondan")
#'
#' input.skeleton <- header.mocap
#'
#' df.to.save <- heian.yondan[1:300,]
#' first.frame <- df.to.bvh(input.skeleton, df.to.save, plot.me = FALSE, debug.messages = TRUE)
#' write.bvh(first.frame, "e:\\bvh in r\\gotowy_kod\\output\\heian.yondan.frames300.bvh")
#'
#' plot(first.frame$skeleton$Joints[[1]]$Rxyz[,1], type = "l", col = "black", xlab = "sample", ylab = "angle (degrees)")
#' lines(first.frame$skeleton$Joints[[1]]$Rxyz[,2], type = "l", col = "red")
#' lines(first.frame$skeleton$Joints[[1]]$Rxyz[,3], type = "l", col = "blue")
#' legend("bottomright", legend=c("X axis rotation", "Y axis rotation", "Z axis rotation"), col=c("black", "red", "blue"), lty = 1)
#' title("Hips rotation data")
#'
#' plot(df.to.save[,2], ylab = "Displacement [cm]", xlab = "Time [10^-2 sec]", pch = 1)
#' for (a in 1:ncol(df.to.save))
#' {
#' df.to.save[,a] <- jitter(df.to.save[,a], factor = 500)
#' }
#' points(df.to.save[,2],col="red", pch = 2)
#' legend("bottomright", legend=c("Original", "Jitter"), col=c("black", "red"), pch = c(1,2))
#' title("Example channel of MoCap data")
#'
#' first.frame <- df.to.bvh(input.skeleton, df.to.save, plot.me = FALSE, debug.messages = TRUE)
#'
#' #plot rotation data
#' plot(first.frame$skeleton$Joints[[1]]$Rxyz[,1], type = "l", col = "black", xlab = "sample", ylab = "angle (degrees)")
#' lines(first.frame$skeleton$Joints[[1]]$Rxyz[,2], type = "l", col = "red")
#' lines(first.frame$skeleton$Joints[[1]]$Rxyz[,3], type = "l", col = "blue")
#' legend("bottomright", legend=c("X axis rotation", "Y axis rotation", "Z axis rotation"), col=c("black", "red", "blue"), lty = 1)
#' title("Hips rotation data")
#'
#' write.bvh(first.frame, "e:\\bvh in r\\gotowy_kod\\output\\jitter.heian.yondan.frames300.bvh")
#'
#' df.to.save <- heian.yondan[1000:1001,]
#' foo <- df.to.bvh(input.skeleton, df.to.save, plot.me = TRUE, debug.messages = FALSE, frame.id = 1)
df.to.bvh <-function(input.skeleton, df.to.save, plot.me = FALSE, frame.id = -1, debug.messages = FALSE)
{
for (a in 1:length(input.skeleton$skeleton$Joints))
{
input.skeleton$skeleton$Joints[[a]]$Order <- c(3,2,1)
}
if (frame.id != -1 && frame.id > nrow(df.to.save))
{
frame.id <- nrow(df.to.save)
}
first.frame <- generate.first.frame(input.skeleton, nrow(df.to.save))
for (index in 1:nrow(df.to.save))
{
exclusion.vector <- c()
list.to.visit <- list()
find.child <- function(skeleton, parent.id = -1, exclusion.vector = c())
{
allchildren <- c()
for (a in 1:length(skeleton$Joints))
{
if (skeleton$Joints[[a]]$Parent == parent.id)
{
if (!(a %in% exclusion.vector))
{
allchildren <- c(allchildren, a)
}
}
}
return (allchildren)
}
#parent - different processing
#find all parent joints, works only if there are at least two children
skeleton <- input.skeleton$skeleton
parent.id <- find.child(skeleton)
first.frame$skeleton$Joints[[parent.id]]$RawDxyz[index,1] <- df.to.save[index,paste(skeleton$Joints[[parent.id]]$Name, ".Dx", sep = "")] - first.frame$skeleton$Joints[[parent.id]]$Offset[1]
first.frame$skeleton$Joints[[parent.id]]$RawDxyz[index,2] <- df.to.save[index,paste(skeleton$Joints[[parent.id]]$Name, ".Dy", sep = "")] - first.frame$skeleton$Joints[[parent.id]]$Offset[2]
first.frame$skeleton$Joints[[parent.id]]$RawDxyz[index,3] <- df.to.save[index,paste(skeleton$Joints[[parent.id]]$Name, ".Dz", sep = "")] - first.frame$skeleton$Joints[[parent.id]]$Offset[3]
first.frame <- generate.single.frame(first.frame, index)
exclusion.vector <- c(exclusion.vector, parent.id)
if (plot.me && frame.id == index)
{
plot(first.frame, my.color = rgb(runif(5),runif(5),runif(5)), frame = index, spheres = FALSE, alpha = (10/(length(skeleton$Joints)+2)))
}
#find all children of root joint
ChildId <- find.child(skeleton, parent.id, exclusion.vector)
#they are excluded from further finding
exclusion.vector <- c(exclusion.vector, ChildId)
for (a in 1:length(ChildId))
{
list.to.visit[[length(list.to.visit) + 1]] <- ChildId[a]
}
SecondChildId <- ChildId[2]
ChildId <- ChildId[1]
parent.dxyz <- c(df.to.save[index,paste(skeleton$Joints[[parent.id]]$Name, ".Dx", sep = "")],
df.to.save[index,paste(skeleton$Joints[[parent.id]]$Name, ".Dy", sep = "")],
df.to.save[index,paste(skeleton$Joints[[parent.id]]$Name, ".Dz", sep = "")])
child.dxyz <- c(df.to.save[index,paste(skeleton$Joints[[ChildId]]$Name, ".Dx", sep = "")],
df.to.save[index,paste(skeleton$Joints[[ChildId]]$Name, ".Dy", sep = "")],
df.to.save[index,paste(skeleton$Joints[[ChildId]]$Name, ".Dz", sep = "")])
map <- child.dxyz - parent.dxyz
tomapowac <- first.frame$skeleton$Joints[[ChildId]]$Offset
mapu <- vector.to.unit(map)
tomapowacu <- vector.to.unit(tomapowac)
Rx2y = rotation.matrix.between.vectors(tomapowacu, mapu)
if (anyNA(Rx2y))
{
Rx2y <- matrix(c(1,0,0,0,1,0,0,0,1),nrow = 3, ncol = 3)
}
library(RSpincalc)
ea <- DCM2EA(Rx2y, 'zyx',ignoreAllChk = TRUE) * 180 / pi
eeaa <- ea * (pi / 180)
q.lewe.biodro <- EA2Q(eeaa, 'zyx')
Q2DCM(q.lewe.biodro)
vectQrot(q.lewe.biodro, tomapowacu)
input.skeleton$skeleton$Joints[[1]]$Trans[[1]]
c(ea[3],ea[2],ea[1])
input.skeleton$skeleton$Joints[[1]]$Rxyz[1,]
first.frame$skeleton$Joints[[parent.id]]$Rxyz[index,1] <- ea[3]
first.frame$skeleton$Joints[[parent.id]]$Rxyz[index,2] <- ea[2]
first.frame$skeleton$Joints[[parent.id]]$Rxyz[index,3] <- ea[1]
first.frame <- generate.single.frame(first.frame, index)
axis <- first.frame$skeleton$Joints[[ChildId]]$Dxyz[index,] - first.frame$skeleton$Joints[[parent.id]]$Dxyz[index,]
axisu <- vector.to.unit(axis)
#########################################
library('subplex')
euc.dist <- function(x1, x2) sqrt(sum((x1 - x2) ^ 2))
optimizeangle <- function(x)
{
q.prawe.biodro <- myEV2Q(axisu, x)
q.oba.biodra <- q.prawe.biodro %Q*% q.lewe.biodro
if (anyNA(q.oba.biodra)) {
ea.oba.biodra <- matrix(c(0, 0, 0), nrow = 1,
ncol = 3)
}
else
{
ea.oba.biodra <- Q2EA(q.oba.biodra, 'zyx', ignoreAllChk = FALSE) * 180 / pi
}
first.frame$skeleton$Joints[[parent.id]]$Rxyz[index,1] <- ea.oba.biodra[3]
first.frame$skeleton$Joints[[parent.id]]$Rxyz[index,2] <- ea.oba.biodra[2]
first.frame$skeleton$Joints[[parent.id]]$Rxyz[index,3] <- ea.oba.biodra[1]
first.frame <- generate.single.frame(first.frame, index = index)
v11 <- c(df.to.save[index,paste(skeleton$Joints[[SecondChildId]]$Name, ".Dx", sep = "")],
df.to.save[index,paste(skeleton$Joints[[SecondChildId]]$Name, ".Dy", sep = "")],
df.to.save[index,paste(skeleton$Joints[[SecondChildId]]$Name, ".Dz", sep = "")])
v2 <- first.frame$skeleton$Joints[[SecondChildId]]$Dxyz[index,]
return (euc.dist(v11, v2))
}
response <- subplex(par=c(0),fn=optimizeangle)
q.prawe.biodro <- myEV2Q(axisu, response$par)
q.oba.biodra <- q.prawe.biodro %Q*% q.lewe.biodro
if (anyNA(q.oba.biodra)) {
ea.oba.biodra <- matrix(c(0, 0, 0), nrow = 1,
ncol = 3)
}
else
{
ea.oba.biodra <- Q2EA(q.oba.biodra, 'zyx', ignoreAllChk = FALSE) * 180 / pi
}
first.frame$skeleton$Joints[[parent.id]]$Rxyz[index,1] <- ea.oba.biodra[3]
first.frame$skeleton$Joints[[parent.id]]$Rxyz[index,2] <- ea.oba.biodra[2]
first.frame$skeleton$Joints[[parent.id]]$Rxyz[index,3] <- ea.oba.biodra[1]
first.frame <- generate.single.frame(first.frame, index)
if (plot.me && frame.id == index)
{
plot(first.frame, my.color = rgb(runif(5),runif(5),runif(5)), frame = index, append = TRUE, spheres = FALSE, alpha = (11/(length(skeleton$Joints)+2)))
}
while (length(list.to.visit) > 0)
{
parent.id <- list.to.visit[[1]]
parent.id <- list.to.visit[[1]]
list.to.visit <- list.to.visit[-1]
ChildId <- find.child(skeleton, parent.id, exclusion.vector)
if (length(ChildId) < 1)
{
next
}
exclusion.vector <- c(exclusion.vector, ChildId)
for (a in 1:length(ChildId))
{
list.to.visit[[length(list.to.visit) + 1]] <- ChildId[a]
}
ChildId <- ChildId[1]
parent.dxyz <- c(df.to.save[index,paste(skeleton$Joints[[parent.id]]$Name, ".Dx", sep = "")],
df.to.save[index,paste(skeleton$Joints[[parent.id]]$Name, ".Dy", sep = "")],
df.to.save[index,paste(skeleton$Joints[[parent.id]]$Name, ".Dz", sep = "")])
child.dxyz <- c(df.to.save[index,paste(skeleton$Joints[[ChildId]]$Name, ".Dx", sep = "")],
df.to.save[index,paste(skeleton$Joints[[ChildId]]$Name, ".Dy", sep = "")],
df.to.save[index,paste(skeleton$Joints[[ChildId]]$Name, ".Dz", sep = "")])
map <- child.dxyz - parent.dxyz
rp <- solve(a=first.frame$skeleton$Joints[[skeleton$Joints[[parent.id]]$Parent]]$Trans[[index]][1:3,1:3])
map <- as.vector(rp %*% map)
tomapowac <- first.frame$skeleton$Joints[[ChildId]]$Offset
mapu <- vector.to.unit(map)
tomapowacu <- vector.to.unit(tomapowac)
Rx2y = rotation.matrix.between.vectors(tomapowacu, mapu)
if (anyNA(Rx2y))
{
Rx2y <- matrix(c(1,0,0,0,1,0,0,0,1),nrow = 3, ncol = 3)
}
tomapowacu %*% Rx2y
library(RSpincalc)
ea <- DCM2EA(Rx2y, 'zyx',ignoreAllChk = TRUE) * 180 / pi
eeaa <- ea * (pi / 180)
q.lewe.biodro <- EA2Q(eeaa, 'zyx')
Q2DCM(q.lewe.biodro)
first.frame$skeleton$Joints[[parent.id]]$Rxyz[index,1] <- ea[3]
first.frame$skeleton$Joints[[parent.id]]$Rxyz[index,2] <- ea[2]
first.frame$skeleton$Joints[[parent.id]]$Rxyz[index,3] <- ea[1]
first.frame <- generate.single.frame(first.frame, index)
if (plot.me && frame.id == index)
{
plot(first.frame, my.color = rgb(runif(5),runif(5),runif(5)), frame = index, append = TRUE, spheres = FALSE, alpha = ((11+index)/(length(skeleton$Joints)+2)))
}
}
if (debug.messages)
{
message(paste("Processed frame index:",index))
}
}
return (first.frame)
}
|
427c275371ab4e7d9bb47d72228ead9c7fcbd7ed | 93fb493d8b1115c8a261bd3b08fa83755f3a5569 | /run_analysis.R | 5d42069300030cf571d583c85954cbefef86c673 | [] | no_license | sunilvarun/Tidy-Data | ce58d4d98d96afebd8d72cb8462c97f8f1db149f | a1867fb69ce36d62f0f7f5a581d74611118cdf79 | refs/heads/master | 2021-09-01T11:40:28.750786 | 2017-12-26T18:58:08 | 2017-12-26T18:58:08 | 115,442,814 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,503 | r | run_analysis.R | library(dplyr)
library(reshape2)
setwd("/Users/varun/Documents/Coursera R/data/Assignment 3/UCI HAR Dataset")
## download the test & train data into data frames
subject_test <- read.csv("./test/subject_test.txt", header = FALSE)
x_test <- read.csv("./test/X_test.txt", header = FALSE, sep = "")
y_test <- read.csv("./test/y_test.txt", header = FALSE)
subject_train <- read.csv("./train/subject_train.txt", header = FALSE)
x_train <- read.csv("./train/X_train.txt", header = FALSE, sep = "")
y_train <- read.csv("./train/y_train.txt", header = FALSE)
## Bind the rows
subject <- rbind(subject_test, subject_train)
x <- rbind(x_test, x_train)
y <- rbind(y_test, y_train)
## Bind the columns
merged_data <- cbind (subject, y, x)
## free up space by removing unwanted variables
rm(subject_test, subject_train, x_test, x_train, y_test, y_train, subject, x, y)
## read variable names (column names) from features.txt
variable_names <- read.csv("features.txt", header = FALSE, sep = "")
## Use grep to select column names with "mean" OR "std"
## Perform operations to account for subject & activity columns
relevant_columns <- grep("mean|std", variable_names[,2])
relevant_columns <- relevant_columns + 2
relevant_columns <- append(relevant_columns, 1:2, after = 0)
## Subset the merge_data to select only those columns that contain mean & std
filtered_data <- subset(merged_data, select = relevant_columns)
## Replace activity numbers with activity labels
activity_labels <- read.csv("activity_labels.txt", header = FALSE, sep = "")
filtered_data[, 2] <- activity_labels[, 2][match(unlist(filtered_data[, 2]), activity_labels[, 1])]
## Give appropriate column names
column_names <- grep("mean|std", variable_names[,2], value = TRUE)
column_names <- gsub("-", ".", column_names, fixed = TRUE)
column_names <- gsub("()", "", column_names, fixed = TRUE)
column_names <- gsub("f", "freq", column_names, fixed = TRUE)
column_names <- gsub("t", "time", column_names, fixed = TRUE)
column_names <- append(column_names, c("subjectID", "Activityname"), after = 0)
## Add column names to data frame
colnames(filtered_data) <- column_names
# Create tidy data
filtered_data <- melt(filtered_data, id=c("subjectID","Activityname"))
tidy_data <- dcast(filtered_data, subjectID+Activityname ~ variable, mean)
# Write back value in CSV
write.table(tidy_data, "tidy_data.txt", row.names=FALSE)
# Remove all variables
rm(activity_labels, filtered_data, merged_data, variable_names, column_names, relevant_columns) |
09f50309ad380e383b65f7371b8b27bf46743c2c | 681c1e16950a0ecf69f0b39e5322292d5fc0f603 | /man/daubenmire_surveys.Rd | c0609454887933ddf2a3cca6b8997bb626fee751 | [] | no_license | ISU-Monarchs/HabitatRestoration | 2ad6e5caee18de6a1fee9d93f53746005a371c7d | 0c5f1af16fdf0967d36dd45c7d7cd93775ef051f | refs/heads/master | 2022-08-29T18:06:08.452585 | 2022-08-09T19:56:48 | 2022-08-09T19:56:48 | 67,713,936 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,017 | rd | daubenmire_surveys.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/daubenmire.R
\docType{data}
\name{daubenmire_surveys}
\alias{daubenmire_surveys}
\title{Daubenmire surveys}
\format{A data.frame with the following columns:
\itemize{
\item date: Date, survey date
\item round: character, round number for the survey: 1, 2, or 3
\item observer: character, the individual/group who did the survey
\item transectID: character, unique identified for the transect
}}
\usage{
daubenmire_surveys
}
\description{
Dates and observers for Daubenmire surveys
}
\details{
This data.frame was used primarily to fill in any missing data in the
Daubenmire data sets. As of 2020-05-15, there was no missing data and thus
this is just duplicated information for what can be found in the other
Daubenmire data.frames:
\code{\link{cover}}, \code{\link{milkweed}}, \code{\link{litter}}, \code{\link{daubenmire_raw}}
}
\seealso{
\code{\link{cover}}, \code{\link{milkweed}}, \code{\link{litter}}
}
\keyword{datasets}
|
6b114b8ba062132bdd70814db207ce9359120703 | a995faa3db24385cf3afb7ed2f67830483d29b48 | /bin/WGSData.R | 8328c22a26da99fc04346677185edfa0d6c5d436 | [] | no_license | allaway/dermalNF | 72bc7c75b887b5129b1ebbe4585647ca349ae939 | 2d2c61c53ae8896bcfa7da7cc00f93f5925e4418 | refs/heads/master | 2020-03-21T16:42:58.458317 | 2018-06-26T20:18:42 | 2018-06-26T20:18:42 | 138,788,787 | 0 | 0 | null | 2018-06-26T20:20:07 | 2018-06-26T20:20:07 | null | UTF-8 | R | false | false | 20,926 | r | WGSData.R | ##General set of commands to analyze and process WGS data
##
library(synapser)
synLogin()
library(data.table)
require(dplyr)
require(reshape2)
require(pheatmap)
##read in all cancer gene mutations
#cancer.gene.muts<-read.table(synGet('syn5611520')@filePath,header=T,sep='\t')
if(!exists('all.gene.muts'))
all.gene.muts<-read.table(synGet('syn5839666')$path,header=T,sep='\t')
if(!exists('cancer.gene.muts')){
cancer.genes=unique(read.table('../../data/Census_allTue Jan 19 18-58-56 2016.csv',sep=',',header=T,quote='"')$Gene.Symbol)
cancer.gene.muts<-subset(all.gene.muts,Hugo_Symbol%in%cancer.genes)
}
# cancer.gene.muts<-read.table(synGet('syn5611520')@filePath,header=T,sep='\t')
#all.gene.muts<-read.table(synGet('syn5713423')@filePath,header=T,sep='\t')
doPatientHeatmap<-function(mut.tab,title,fname,minSamples=1){
##format into matrix
mut.counts=mut.tab%>%
group_by(Hugo_Symbol,Sample_ID) %>%
summarize(Count=n()) %>%
acast(Hugo_Symbol~Sample_ID,value.var='Count')
#get extra variables, like distinct variants
num.variants=mut.tab%>%group_by(Hugo_Symbol)%>%summarize(Variants=n_distinct(Protein_Change))
variants=num.variants$Variants
names(variants)=num.variants$Hugo_Symbol
##now filt by minCount
mut.counts=mut.counts[which(apply(mut.counts,1,function(x) length(which(x>0)))>minSamples),]
#get row and column order
r.o=order(apply(mut.counts,1,function(x) length(which(x>0))))
c.o=order(apply(mut.counts,2,function(x) length(which(x>0))))
mut.counts=mut.counts[r.o,c.o]
pheatmap(log10(1+mut.counts),cellwidth=10,cellheight=10,cluster_rows=F,cluster_cols=F,
main=title,filename=fname,annotation_row=data.frame(Variants=variants))
}
#'Define function to plot gene mutations across patients in a heatmap
#'@param mutTable - table of mutations in cBio format
#'@param minSampls - minimum number of samples required to include a gene
#'@param notIncluded - what type of mutation to exclude?
#'
panPatientPlots<-function(mutTable=all.gene.muts,minSamples=2,notIncluded=c(),prefix=''){
##first remove genes
if(length(notIncluded)>0){
print(paste("Removing",length(which(mutTable$Mutation_Type%in%notIncluded)),'mutations that are',paste(notIncluded,collapse='or')))
mutTable=mutTable[-which(mutTable$Mutation_Type%in%notIncluded),]
}
#get somatic mutants, then plot
som.muts=subset(mutTable,Mutation_Status=='Somatic')
#now filt by minSamples value
som.sample.count=som.muts%>%group_by(Hugo_Symbol)%>%summarize(Samples=n_distinct(Sample_ID))
genes.to.plot=as.character(som.sample.count$Hugo_Symbol[which(som.sample.count$Samples>minSamples)])
som.muts=som.muts[which(as.character(som.muts$Hugo_Symbol)%in%genes.to.plot),]
title=paste('Number of somatic mutations in\n genes',
ifelse(length(notIncluded)>0,paste('(not',paste(notIncluded,collapse=','),')'),''),
'that occur in at least',minSamples,'samples')
fname=paste(prefix,'somaticMuts_not',paste(notIncluded,collapse='_'),'minSamples',minSamples,sep='_')
doPatientHeatmap(som.muts,title,paste(fname,'png',sep='.'))
#then get germline
g.muts=subset(mutTable,Mutation_Status=='Germline')
title=paste('Number of somatic mutations in\n genes',
ifelse(length(notIncluded)>0,paste('(not',paste(notIncluded,collapse=','),')'),''),
'that occur in at least',minSamples,'samples')
fname=paste(prefix,'somaticMuts_not',paste(notIncluded,collapse='_'),'minSamples',minSamples,sep='_')
g.sample.count=g.muts%>%group_by(Hugo_Symbol)%>%summarize(Samples=n_distinct(Sample_ID))
genes.to.plot=as.character(g.sample.count$Hugo_Symbol[which(g.sample.count$Samples>minSamples)])
g.muts=g.muts[which(as.character(g.muts$Hugo_Symbol)%in%genes.to.plot),]
title=paste('Number of germline mutations in\n genes',
ifelse(length(notIncluded)>0,paste('(not',paste(notIncluded,collapse=','),')'),''),
'that occur in at least',minSamples,'samples')
fname=paste(prefix,'germlineMuts_not',paste(notIncluded,collapse='_'),'minSamples',minSamples,sep='_')
doPatientHeatmap(g.muts,title,paste(fname,'png',sep='.'))
}
#' Get all MAF files that were calculated using VCF2MAF
#' @param mut.type can be 'all' (default),'somatic' or 'germline'
#' @return table describing entities of all MAF files on synapse
getMAFs<-function(mut.type='all'){
allmafs <- synTableQuery("SELECT * FROM syn12555329 where parentId='syn5522808'") %>% as.data.frame()
som.mafs<-allmafs[which(allmafs$tissue=='primary tumor,blood'),]
gl.mafs<-allmafs[which(allmafs$tissue=='blood'),]
if(tolower(mut.type)=='all')
return(allmafs)
else if(tolower(mut.type)=='somatic')
return(som.mafs)
else if (tolower(mut.type)=='germline')
return(gl.mafs)
else
print("mut.type must be either 'all','somatic',or 'germline'")
return(NULL)
}
#'getMutationSummary opens all maf files and loads into list
#'@param allmafs - list of MAFs to summarize
#'@return list of tables
getMutationSummary<-function(allmafs=getMAFs('all')){
allMuts<-sapply(allmafs$id,function(x){
res<-synGet(x)
tab<-read.table(gzfile(res$path),sep='\t',header=T, colClasses = "character")
})
}
#summary(tab$Consequence)
#library(parallel)
#'Takes original MAF files and filts by predicted 'impact' and/or patient. Will include both
#'Somatic and Germline mutations, because they are both in the original MAF files for each tumor
#'@param som.mafs MAFs to be analyzed - the somatic files include both the somatic and germline
#'@param impact can be "HIGH",'MODERATE' or 'LOW'
#'@param patient
storeSomMutationFiles<-function(som.mafs=getMAFs('somatic'),impact='HIGH',patient=NA){
if(!is.na(patient))
som.mafs=som.mafs[grep(paste('0',patient,sep=''),som.mafs$entity.patientId),]
allMuts<-lapply(som.mafs$entity.id,function(x){
res<-synGet(x)
# if(res@annotations$tissueType=='PBMC')
# return(NULL)
fp=res@filePath
fname=paste('patient',gsub('CT0+','',res@annotations$patientId),'tissue',res@annotations$tissueID,impact,'impact_somaticMutations.maf',sep='_')
if(! file.exists(fname)){
tab<-as.data.frame(fread(paste('zcat',fp)))
#dont filt by consequence
#vars<-tab[which(tab$Consequence%in%mutClasses),]
if(!is.na(impact))
vars<-tab[which(tab$IMPACT==impact),]
else
vars<-tab
print(length(which(vars$Hugo_Symbol=='NF1')))#summary(vars$Hugo_Symbol))
write.table(vars,file=fname,row.names=F,quote=F)
}
sf=File(fname,parentId='syn5578958')
annotations(sf)<-res@annotations
executed(sf)<-'https://raw.githubusercontent.com/Sage-Bionetworks/dermalNF/master/bin/WGSData.R'
synStore(sf)
return (vars)
})#,mc.cores=4)
return(allMuts)
}
#'getAllMutData is a function that will take a mutation maf table and filt
#'by whether or not a particular mutation is somatic or germline
#'@param allsoms is a table of all mutations of a particular gene, impact or patient
#'@return list of two tables 'Somatic' is table of somatic mutations while 'Germline' is table of germline
getAllMutData<-function(allsoms=getMAFs('all'),filt=c()){
##this function will get the number of 'high impact' somatic mutations as well
##as the
#try to create matrix.
##first read in all files
allmuts<-lapply(allsoms$id,function(x) {
print(paste('read in',x))
read.table(synGet(x)$path,sep=' ',header=T,quote='"', colClasses = "character")
})
names(allmuts)<-allsoms$id
##now split out somatic or germline
som.germ<<-lapply(allmuts,function(x){
# print(paste('Separating out germline/somatic for sample',x))
fout=which(as.character(x$FILTER)%in%filt)
if(length(fout)>0){
print(paste('Keeping',length(fout),'out of',nrow(x),'because they are',paste(filt,collapse=',')))
x=x[fout,]
}
is.germ=apply(x,1,function(y){
(y[['Match_Norm_Seq_Allele1']]!=y[['Reference_Allele']] || y[['Reference_Allele']]!=y[['Match_Norm_Seq_Allele2']] )})
is.som=apply(x,1,function(y){
(y[['Tumor_Seq_Allele1']]!=y[['Match_Norm_Seq_Allele1']] || y[['Tumor_Seq_Allele2']]!=y[['Match_Norm_Seq_Allele2']] )})
return(list(Somatic=x[which(is.som),],Germline=x[which(is.germ),]))
})
return(som.germ)
}
#'getMutationStatsForGene obtains all mutations for a particular gene of interest across all patients
#'@param gene is the gene symbol in question
#'@param impact is a list of which mutations to include, defaults to all ('HIGH','MODERATE' and 'LOW')
#'@param doPlot: if set to true, will plot some basic statistics about where and when this mutation occurs
#'@param som.germ - the MAF file tables separated by whether or not the mutation is somatic or germline
getMutationStatsForGene<-function(gene='NF1',impact=c('HIGH','MODERATE','LOW'),doPlot=FALSE,filt=c(),som.germ=NULL,redo=FALSE){
##first check to see if we have the file already on synapse
if(gene%in%all.gene.muts$Hugo_Symbol && !redo){
print(paste('Found gene',gene,' already processed, will analyze mutations of all impact (impact argument ignored'))
df=subset(all.gene.muts,Hugo_Symbol==gene)
}else{
print(paste('No evidence of',gene,'in mutation data'))
if(!redo){
print('Set redo=TRUE to double check')
return(data.frame())
}
if(is.null(som.germ)){
allsoms<-synTableQuery("SELECT * FROM syn12555329 where parentId='syn5578958'") %>% as.data.frame()
print(paste('Selecting from',nrow(allsoms),'mutation files'))
allsoms=allsoms[unlist(sapply(impact,grep,allsoms$name)),]
print(paste("Found",nrow(allsoms),'with',paste(impact,collapse=' or '),'impact'))
som.germ=getAllMutData(allsoms,filt=filt)
#df<-apply(allsoms,1,function(x){
}
classes=c()
pats<-c()
tissue=c()
pos=c()
ppos=c()
mutType=c()
t_depth=c()
mut_chrom=c()
mut_start=c()
mut_end=c()
ref_al=c()
var_al=c()
sid=c()
exac=c()
gmaf=c()
i <- 1
for(i in 1:nrow(allsoms)){
x=allsoms[i,]
arr=unlist(strsplit(x[['name']],split='_'))
mv=som.germ[[x[['id']]]]
for(mt in c("Somatic","Germline")){
mvl=mv[[mt]]
idx=c()
try(idx<-which(mvl[,'Hugo_Symbol']==gene))
if(length(idx)>0){
classes=c(classes,as.character(mvl[idx,'Variant_Classification']))
pos=c(pos,as.character(mvl[idx,'HGVSc']))
ppos=c(ppos,gsub('p.','',as.character(mvl[idx,'HGVSp_Short']),fixed=T))
t_depth=c(t_depth,as.numeric(as.character(mvl[idx,'t_depth'])))
if(mt=='Somatic')
sid=c(sid,rep(paste(arr[1:4],collapse='_'),length(idx)))
else
sid=c(sid,rep(paste(arr[1:2],collapse='_'),length(idx)))
mut_chrom=c(mut_chrom,as.character(mvl[idx,'Chromosome']))
mut_start=c(mut_start,mvl[idx,'Start_Position'])
mut_end=c(mut_end,mvl[idx,'End_Position'])
ra=as.character(mvl[idx,'Reference_Allele'])
ref_al=c(ref_al,ra)
var_al=c(var_al,apply(mvl[idx,grep('_Allele',colnames(mvl))[1:3]],1,function(y){ if(y[1]!=y[2]) return(y[2]) else return(y[3])}))
exac=c(exac,mvl[idx,'ExAC_AF'])
gmaf=c(gmaf,mvl[idx,'GMAF'])
pats=c(pats,rep(arr[2],length(idx)))
tissue=c(tissue,rep(arr[4],length(idx)))
mutType=c(mutType,rep(mt,length(idx)))
}
}
}
if(length(pats)==0)
return(NULL)
df=data.frame(Hugo_Symbol=rep(gene,length(mutType)), Protein_Change=ppos,
Sample_ID=sid,
Mutation_Status=mutType,Chromosome=mut_chrom,
Start_Position=mut_start,End_Position=mut_end,
Reference_Allele=ref_al,Variant_Allele=var_al,
Mutation_Type=classes,TumorDepth=t_depth,
Position=pos,Tissue=tissue,Patient=pats,ExAC_AF=exac,
GMAF=gmaf)
##the mindf files are visible via cbioportal.
mindf=unique(df[,-c(11,13,14)])
write.table(mindf,file=paste(gene,paste(impact,collapse='_'),'mutations.tsv',sep=''),quote=FALSE,sep='\t',row.names=F)
#somatic only
if(length(which(mutType=='Somatic'))>0){
red.df<-subset(mindf,Mutation_Status=="Somatic")
write.table(red.df,file=paste(gene,paste(impact,collapse='_'),'SOMATICmutations.tsv',sep=''),quote=FALSE,sep='\t',row.names=F)
}
#germline only
if(length(which(mutType=='Germline'))>0){
red.df<-subset(mindf,Mutation_Status=='Germline')
write.table(red.df,file=paste(gene,paste(impact,collapse='_'),'GERMLINEmutations.tsv',sep=''),quote=FALSE,sep='\t',row.names=F)
}
df$Position=as.character(df$Position)
mns=grep("NN+",df$Position)
df$Position[mns]=unlist(sapply(df$Position[mns],function(x) {
ml=attr(regexpr("N+",x),'match.length')
gsub("N+",paste("N",ml,sep=''),x)
}))
ins=grep("GGTTACTCTGTTTGATTCTCGGC",df$Position)
df$Position[ins]<-unlist(sapply(df$Position[ins],function(x) gsub("GGTTACTCTGTTTGATTCTCGGC","GGT...",x)))
# df$Sample_ID=as.numeric(as.character(df$Sample_ID))
impact=paste(impact,collapse='_or_')
}
if(doPlot){
require(ggplot2)
pdf(paste('numberOf',impact,'impact',gene,'MutationsPerPatient.pdf',sep=''))
p<-ggplot(df)+geom_bar(aes(Sample_ID,fill=Mutation_Status),position='dodge')
p<-p+ggtitle(paste('Number of',impact,'impact mutations in',gene))
print(p)
dev.off()
##now do class of mutation
pdf(paste('typeOf',impact,'impact',gene,'GermlineMutationsPerPatient.pdf',sep=''))
p<-ggplot(unique(subset(df,Mutation_Status=='Germline')))+geom_bar(aes(Sample_ID,fill=Mutation_Type),position='dodge')
p<-p+ggtitle(paste('Type of Germline mutations in',gene))
print(p)
dev.off()
pdf(paste('typeOf',impact,'impact',gene,'SomaticMutationsPerPatient.pdf',sep=''))
p<-ggplot(subset(df,Mutation_Status=='Somatic'))+geom_bar(aes(Sample_ID,fill=Mutation_Type),position='dodge')
p<-p+ggtitle(paste('Type of',impact,'impact Somatic mutations in',gene))
print(p)
dev.off()
##now try to classify the position of the mutation for each gene
##now do class of mutation
pdf(paste('locationOf',impact,'impact',gene,'SomaticMutationsPerPatient.pdf',sep=''))
p=ggplot(subset(df,Mutation_Status=='Somatic'))+geom_bar(aes(Sample_ID,fill=Protein_Change),position='dodge')+ggtitle(paste(gene,'Mutation Position'))
print(p)
dev.off()
pdf(paste('locationOf',impact,'impact',gene,'GermlineMutationsPerPatient.pdf',sep=''))
p=ggplot(subset(df,Mutation_Status=='Germline'))+geom_bar(aes(Sample_ID,fill=Protein_Change),position='dodge')+ggtitle(paste(gene,'Mutation Position'))
print(p)
dev.off()
##frequency of hits
pdf(paste('frequencyOf',impact,'impact',gene,'SomaticMutationsPerPatient.pdf',sep=''))
p=ggplot(subset(df,Mutation_Status=='Somatic'))+geom_bar(aes(Protein_Change,fill=Sample_ID))+ggtitle(paste(gene,'Mutation Position'))+theme(axis.text.x=element_text(angle = -90, hjust = 0))
print(p)
dev.off()
pdf(paste('frequencyOf',impact,'impact',gene,'GermlineMutationsPerPatient.pdf',sep=''))
p=ggplot(subset(df,Mutation_Status=='Germline'))+geom_bar(aes(Protein_Change,fill=Sample_ID))+ggtitle(paste(gene,'Mutation Position'))+theme(axis.text.x=element_text(angle = -90, hjust = 0))
print(p)
dev.off()
}
return(df)
}
#'Create a heatmap with PHRED scores (need to add in separately)
heatmapWithPhredScore<-function(df,fname,phredscore,cutoff=10,samp.vars=NA){
require(reshape2)
require(dplyr)
require(pheatmap)
##first map df to phred
mut.idx<-match(df$Start_Position,phredscore$Pos)
na.vals<-which(is.na(mut.idx))
mut.idx[na.vals]<-sapply(na.vals,function(x){
match(df[x,'Start_Position']-1,phredscore$Pos)
})
df$PHRED<-phredscore$PHRED[mut.idx]
# print(head(df))
#df$Consequence<-nf1.deets$Consequence[mut.idx]
##first start with matrix of phred scores
##then separate out by tumor type
changes<-as.character(df$Protein_Change)
types<-df$Mutation_Type[match(unique(changes),as.character(df$Protein_Change))]
ttypes<-data.frame(Consequence=types)
rownames(ttypes)<-unique(changes)
# print(head(ttypes))
# print(head(types))
pmat = acast(df,Sample_ID~Protein_Change,value.var='PHRED',fill=0)
col.ords=order(apply(pmat,2,function(x) mean(x[which(x>0)])))
pmat<-pmat[order(rowSums(pmat)),col.ords]
col.cuts=which(apply(pmat,2,function(x) mean(x[which(x>0)])>cutoff))
soms=grep('tissue',rownames(pmat))
#print(head(pmat))
pheatmap(pmat[soms,],annotation_col = ttypes,annotation_row=samp.vars,
cellheight=10,cellwidth=10,cluster_rows=F,cluster_cols=F,
# legend_labels=as.character(types),legend_breaks=c(type.nums)-0.5,
filename=paste('positionScoreSomatic',fname,sep=''))
pheatmap(pmat[-soms,],annotation_col = ttypes,annotation_row=samp.vars,
cluster_rows=F,cluster_cols=F,
cellheight=10,cellwidth=10,
filename=paste('positionScoreGermline',fname,sep=''))
pheatmap(pmat[soms,col.cuts],annotation_col = ttypes,annotation_row=samp.vars,
cellheight=10,cellwidth=10,cluster_rows=F,cluster_cols=F,
# legend_labels=as.character(types),legend_breaks=c(type.nums)-0.5,
filename=paste('positionScoreOver',cutoff,'Somatic',fname,sep=''))
pheatmap(pmat[-soms,col.cuts],annotation_col = ttypes,annotation_row=samp.vars,
cluster_rows=F,cluster_cols=F,
cellheight=10,cellwidth=10,
filename=paste('positionScoreOver',cutoff,'Germline',fname,sep=''))
fs=paste(c('positionScoreGermline','positionScoreSomatic',
paste('positionScoreOver',cutoff,'Somatic',sep=''),
paste('positionScoreOver',cutoff,'Germline',sep='')),fname,sep='')
return(fs)
}
#'Create a heatmap of tumor depth for a single-gene data frame
#'@param df is a data frame containing a single gene derived from
#'getMutationStatsForGeen
#'@param fname is name of output file
heatmapFromMutDf<-function(df=getMutationStatsForGene(gene='NF1'),fname='NF1mutations.png'){
require(reshape2)
require(dplyr)
require(pheatmap)
##first start with matrix of counts
countmat=df %>% group_by(Sample_ID,Protein_Change) %>% summarize(Count=n()) %>% acast(Sample_ID~Protein_Change,value.var='Count',fill=0)
mut_ann=sapply(colnames(countmat),function(x) as.character(df[match(x,as.character(df$Protein_Change)),'Mutation_Type']))
if(is.na(nrow(countmat)))
countmat<-countmat[order(rowSums(countmat)),]
if(is.na(ncol(countmat)))
countmat<-countmat[,order(colSums(countmat))]
print(dim(countmat))
##now separate out countmap by germline and somatic
# par(mfrow=c(2,1))
soms=grep('tissue',rownames(countmat))
if(length(soms)>0){
try(pheatmap(countmat[soms,],cluster_rows=F,cluster_cols=F,cellheight=10,cellwidth=10,annotation_col=data.frame(MutationClass=mut_ann),filename=paste('positionSomaticCount',fname,sep='')))
try(pheatmap(countmat[-soms,],cluster_rows=F,cluster_cols=F,cellheight=10,cellwidth=10,annotation_col=data.frame(MutationClass=mut_ann),filename=paste('positionGermlineCount',fname,sep='')))
}else{
try(pheatmap(countmat,cluster_rows=F,cluster_cols=F,cellheight=10,cellwidth=10,annotation_col=data.frame(MutationClass=mut_ann),filename=paste('positionGermlineCount',fname,sep='')))
}
##then separate out by tumor type
types=unique(df$Mutation_Type)
type.nums=seq(1,length(types))
tdf=data.frame(TumorType=types,Numeric=type.nums)
df$Numeric_Mut=tdf$Numeric[match(df$Mutation_Type,tdf$TumorType)]
typemat= acast(df,Sample_ID~Protein_Change,value.var='Numeric_Mut',fill=0)
typemat<-typemat[order(rowSums(typemat)),order(apply(typemat,2,function(x) mean(x[which(x>0)])))]
soms=grep('tissue',rownames(typemat))
if(length(soms)>0){
pheatmap(typemat[soms,],color = c('white',rainbow(length(types))),
cellheight=10,cellwidth=10,cluster_rows=F,cluster_cols=F,
legend_labels=as.character(types),legend_breaks=c(type.nums)-0.5,
filename=paste('positionTypeSomatic',fname,sep=''))
pheatmap(typemat[-soms,],color = c('white',rainbow(length(types))),
cluster_rows=F,cluster_cols=F,
cellheight=10,cellwidth=10,
legend_labels=as.character(types),legend_breaks=c(type.nums)-0.5,filename=paste('positionTypeGermline',fname,sep=''))
}else{
try(pheatmap(typemat,color = c('white',rainbow(length(types))),
cluster_rows=F,cluster_cols=F,
cellheight=10,cellwidth=10,
legend_labels=as.character(types),legend_breaks=c(type.nums)-0.5,filename=paste('positionTypeGermline',fname,sep='')))
}
}
|
a8db3b8ca36a3fb2051ad4a84e88261d88fbaab5 | 27d8238cb9b758903417c5151fa8ace7db71ad15 | /kingCounty_Price.R | 180131a5c1c78d6c96d043edaafd80068ee084b8 | [] | no_license | ritika14695/king-county-house-price-prediction | dd19fbaf1c054e1c5a67a53b8e4db77994f3e2ef | 5af49b264eed6b6e8a60c24e9cdb3a6d727a2e1a | refs/heads/main | 2023-03-24T19:28:56.512353 | 2021-03-01T00:00:10 | 2021-03-01T00:00:10 | 343,242,748 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,231 | r | kingCounty_Price.R | #calling the required libraries and packages
library(lattice)
library(move)
library(moments)
library(pacman)
library(readxl)
library(dplyr)
library(ggplot2)
library(ggiraph)
library(ggiraphExtra)
library(plyr)
library(caret)
library(corrplot)
library(GGally)
library(ggcorrplot)
library(corrr)
#Fetching the raw data-file
data_raw <- read.csv("C:/Users/91773/Desktop/MSc Data Analytics/DMML/Project/New folder/House_Price/kc_house_dataset.csv")
#Data Exploration
str(data_raw)
dim(data_raw)
head(data_raw)
class(data_raw)
glimpse(data_raw)
data_clean <- data_raw
#Obtaining the Summary Statistics of the dataset
summary(data_clean)
#Performing data cleaning operations. Changing the date format to yymmdd format
#Selecting the sub-string of date column from 1:8
data_clean$date <- substr(data_clean$date, 1, 8)
data_clean$date <- as.numeric(as.character(data_clean$date))
head(data_clean)
#Checking for any NA values
length(which(is.na(data_clean)))
#Removing ID column since it does not add value to our visualization
data_clean$id <- NULL
par(mfrow=c(2,3))
boxplot(data_clean$bedrooms, main = "Bedroom Plot")
boxplot(data_clean$bathrooms, main = "Bathroom Plot")
boxplot(data_clean$floors, main = "Floors Plot")
boxplot(data_clean$sqft_living, main = "Living SqFt Plot")
boxplot(data_clean$sqft_above, main = "Above SqFt Plot")
#Removing Outliers
med <- quantile(data_clean$bedrooms, 0.5)
out1 <- quantile(data_clean$bedrooms, 0.95)
out2 <- quantile(data_clean$bedrooms, 0.05)
data_clean$bedrooms[data_clean$bedrooms > out1 ] <- med
data_clean$bedrooms[data_clean$bedrooms < out2 ] <- med
med <- quantile(data_clean$bathrooms, 0.5)
out1 <- quantile(data_clean$bathrooms, 0.95)
out2 <- quantile(data_clean$bathrooms, 0.05)
data_clean$bathrooms[data_clean$bathrooms > out1 ] <- med
data_clean$bathrooms[data_clean$bathrooms < out2 ] <- med
med <- quantile(data_clean$sqft_living, 0.5)
out1 <- quantile(data_clean$sqft_living, 0.95)
out2 <- quantile(data_clean$sqft_living, 0.05)
data_clean$sqft_living[data_clean$sqft_living > out1 ] <- med
data_clean$sqft_living[data_clean$sqft_living < out2 ] <- med
med <- quantile(data_clean$sqft_above, 0.5)
out1 <- quantile(data_clean$sqft_above, 0.95)
data_clean$sqft_above[data_clean$sqft_above > out1 ] <- med
par(mfrow=c(2,2))
boxplot(data_clean$bedrooms, main = "Bedroom Plot")
boxplot(data_clean$bathrooms, main = "Bathroom Plot")
boxplot(data_clean$sqft_living, main = "Living SqFt Plot")
boxplot(data_clean$sqft_above, main = "Above SqFt Plot")
#Checking for skewness and adjusting those that add value to the prediction
apply(data_clean[,1:19], 2, skewness, na.rm =TRUE)
data_clean$price <- log(data_clean$price)
data_clean$sqft_lot <- log(data_clean$sqft_lot)
data_clean$sqft_lot15 <- log(data_clean$sqft_lot15)
#Data Visualization
ggplot(data = data_clean, aes(x = sqft_living, y = price)) + geom_point() +ggtitle("Prices of houses according to Square feet")
ggplot(data = data_clean, aes(x = bathrooms, y = price)) + geom_point() +ggtitle("Prices of houses according to Bathrooms")
#finding correlation
CorrelationResults = cor(data_clean)
corrplot(CorrelationResults)
#Separating data in train and test sets
set.seed(1234)
samp <- sample(nrow(data_clean),0.75*nrow(data_clean))
train <- data_clean[samp,]
test <- data_clean[-samp,]
#Applying linear regression model on all variables to check significance of each variable
model <- lm(data = train, price ~ .)
summary(model)
#predicting prices for reduced model
pred_lm<-predict(model, newdata = test, type = 'response')
#finding RMSE(root mean square error) less the value more better the model
#and R2 to check how much variance the model explains
RMSE(pred_lm,test$price)
R2(pred_lm,test$price)
#forward selection method
frwd_model<-step(model,direction = 'forward')
Null_to_full<-lm(price ~ bedrooms + bathrooms + sqft_living + sqft_lot + floors +
waterfront + view + condition + grade + sqft_above + sqft_basement +
yr_built + yr_renovated + zipcode + lat + long + sqft_living15 +
sqft_lot15, data=train)
summary(Null_to_full)
#backward selection method
bckd_model<-step(model,direction = 'backward')
reduced_model<-lm(price ~ bedrooms + bathrooms + sqft_living + sqft_lot + floors +
waterfront + view + condition + grade + yr_built + yr_renovated +
zipcode + lat + long + sqft_living15 + sqft_lot15, data=train)
summary(reduced_model)
#plotting the reduced model to check normality and homoscidastisity
par(mfrow=c(2,2))
plot(reduced_model)
#predicting prices for reduced model
pred_log_prob<-predict(reduced_model, newdata = test, type = 'response')
#finding RMSE(root mean square error) less the value more better the model and R2 to check how much variance the model explains
RMSE(pred_log_prob,test$price)
R2(pred_log_prob,test$price)
df2<-data.frame(pred_log_prob,test$price)
write.csv(df1, file = 'C:/Users/91773/Desktop/MSc Data Analytics/DMML/Project/New Folder/House_Price/house_price_reduced_2.csv'
,row.names = F)
|
f7d92e371f4cce83873ee47109ac1938534da085 | 85d7b807dd75162918e7e4215f23781252004bc9 | /Forest_Fire_ Neural Networks.R | 21c412a729f94636eedce1d2d6c8fe18b3a63b69 | [] | no_license | Leepsa/Data-Science_R | a5d8ae883c1caa4ac0dd398922cdc9a74df78dd7 | 0a8d7d4dcb038814b57be3675e8d9ab41e9b5215 | refs/heads/master | 2023-04-05T19:41:05.450870 | 2021-05-08T17:00:54 | 2021-05-08T17:00:54 | 290,532,910 | 0 | 0 | null | 2021-05-08T17:00:55 | 2020-08-26T15:22:16 | HTML | UTF-8 | R | false | false | 5,831 | r | Forest_Fire_ Neural Networks.R |
### PREDICT THE BURNED AREA OF FOREST FIRES WITH NEURAL NETWORKS ####
#Load the data
install.packages("readr")
library(readr)
forest_fires <- read.csv("C:\\Users\\KIIT\\Desktop\\assignment2021\\Data Science Assignment_LeepsaMahankud\\Neural Networks\\forestfires.csv")
## Convert catagorical column state of data set into numerical
forest_fires$month <- as.numeric(factor(forest_fires$month))
forest_fires$day <- as.numeric(factor(forest_fires$day))
forest_fires$size_category <- as.numeric(factor(forest_fires$size_category))
### EDA
# Cheking the correlation
cor(forest_fires)
## Summary of all continuous features
summary(forest_fires$month)
summary(forest_fires$day)
summary(forest_fires$FFMC)
summary(forest_fires$DMC)
summary(forest_fires$DC)
summary(forest_fires$ISI)
summary(forest_fires$temp)
summary(forest_fires$RH)
summary(forest_fires$wind)
summary(forest_fires$rain)
summary(forest_fires$area)
summary(forest_fires$dayfri)
summary(forest_fires$daymon)
summary(forest_fires$daysat)
summary(forest_fires$daysun)
summary(forest_fires$daythu)
summary(forest_fires$daytue)
summary(forest_fires$daywed)
summary(forest_fires$monthapr)
summary(forest_fires$monthaug)
summary(forest_fires$monthdec)
summary(forest_fires$monthfeb)
summary(forest_fires$monthjan)
summary(forest_fires$monthjul)
summary(forest_fires$monthjun)
summary(forest_fires$monthmar)
summary(forest_fires$monthmay)
summary(forest_fires$monthnov)
summary(forest_fires$monthoct)
summary(forest_fires$monthsep)
summary(forest_fires$size_category)
# Set the environment for automated EDA
install.packages("DataExplorer")
library(DataExplorer)
install.packages("tidyverse")
library(tidyverse)
##Visualiation of all features & report generation of them
plot_intro(forest_fires)
plot_missing(forest_fires)
#Histogram
DataExplorer::plot_histogram(forest_fires)
#Histogram for transformed variable featuers
DataExplorer::plot_histogram(log(forest_fires))
#Density plot
plot_density(forest_fires)
#Correlation between features
plot_correlation(forest_fires, cor_args = list( 'use' = 'complete.obs'))
#Box plots by indivisual feature based against all features
plot_boxplot(forest_fires, by= 'month', ncol = 2)
plot_boxplot(forest_fires, by= 'day', ncol = 2)
plot_boxplot(forest_fires, by= 'FFMC', ncol=2)
plot_boxplot(forest_fires, by= 'DMC', ncol=2)
plot_boxplot(forest_fires, by= 'DC', ncol=2)
plot_boxplot(forest_fires, by= 'ISI', ncol=2)
plot_boxplot(forest_fires, by= 'temp', ncol=2)
plot_boxplot(forest_fires, by= 'RH', ncol=2)
plot_boxplot(forest_fires, by= 'wind', ncol=2)
plot_boxplot(forest_fires, by= 'rain', ncol=2)
plot_boxplot(forest_fires, by= 'area', ncol=2)
# Box plots by transofmed indivisual feature based against all features
plot_boxplot(log(forest_fires), by= 'month', ncol = 2)
plot_boxplot(log(forest_fires), by= 'day', ncol = 2)
plot_boxplot(log(forest_fires), by= 'FFMC', ncol=2)
plot_boxplot(log(forest_fires), by= 'DMC', ncol=2)
plot_boxplot(log(forest_fires), by= 'DC', ncol=2)
plot_boxplot(log(forest_fires), by= 'ISI', ncol=2)
plot_boxplot(log(forest_fires), by= 'temp', ncol=2)
plot_boxplot(log(forest_fires), by= 'RH', ncol=2)
plot_boxplot(log(forest_fires), by= 'wind', ncol=2)
plot_boxplot(log(forest_fires), by= 'rain', ncol=2)
plot_boxplot(log(forest_fires), by= 'area', ncol=2)
# Scatter plots by indivisual feature based against all features
plot_scatterplot(forest_fires, by= 'month')
plot_scatterplot(forest_fires, by= 'day')
plot_scatterplot(forest_fires, by= 'FFMC')
plot_scatterplot(forest_fires, by= 'DMC')
plot_scatterplot(forest_fires, by= 'DC')
plot_scatterplot(forest_fires, by= 'ISI')
plot_scatterplot(forest_fires, by= 'temp')
plot_scatterplot(forest_fires, by= 'RH')
plot_scatterplot(forest_fires, by= 'wind')
plot_scatterplot(forest_fires, by= 'rain')
plot_scatterplot(forest_fires, by= 'area')
# Visualization HTML report generation in webbrowser of all features
create_report(forest_fires)
# Cheking the duplicay in data
install.packages("dplyr")
library(dplyr)
sum(duplicated(forest_fires))
# Remove the duplicated va;ue from data
forest_fires1 <- distinct(forest_fires)
# Cheking the NA values
sum(is.na(forest_fires1))
#Custom normalization function
normalize <- function(x) {
return((x - min(x)) / (max(x) - min(x)))
}
#Apply normalization to entire data frame
norm_forest_fires <- as.data.frame(lapply(forest_fires1[-31], normalize))
summary(norm_forest_fires)
norm_forest_fires1 <- cbind(norm_forest_fires,forest_fires1[31])
#Create training and test data
set.seed(26)
library(caTools)
sample <- sample.int(n = nrow(norm_forest_fires1), size = floor(.75*nrow(norm_forest_fires1)), replace = F)
forest_fire_train <- norm_forest_fires1[sample, ]
forest_fire_test <- norm_forest_fires1[-sample, ]
##Training a model on the data ----
#Train the neuralnet model
install.packages("neuralnet")
library(neuralnet)
#Simple ANN with only a single hidden neuron
forest_fire_model <- neuralnet(size_category~.,data=forest_fire_train,
act.fct="logistic")
#Visualize the network topology
plot(forest_fire_model)
##Evaluating model performance ----
#Obtain model results
model_results <- compute(forest_fire_model, forest_fire_test[,-31])
#Obtain predicted forest burn area
predicted_burn_area <- model_results$net.result
#Examine the correlation between predicted and actual forest burn area
cor(predicted_burn_area,forest_fire_test$size_category)
#Checking the accuracy of model
rmse=(sum((forest_fire_test$size_category-model_results$net.result)^2)/nrow(forest_fire_test))^0.5
Rsquare <- c(1-rmse)
Rsquare
|
59ff90135906af83ab50c746e56bd89cd5db097c | 577db27827dba26dbffc7e7444ac1d93dbd2d526 | /Proyectos R/BONUS - El Baul de Codigo R/3. Explorar Datos/Mas Graficos/2-Multivaraite/correlation_plot.R | e3f7d5467f7c41ac3d108152c26c3d3e27b63cfb | [] | no_license | adanpalma/analizatusdatos | daea5d39bd836f4ca4668f282eb5dd8928339a81 | 83f1494f07a86a13b16acce608d5cdd694a94d86 | refs/heads/master | 2023-04-17T23:31:42.640384 | 2021-04-28T03:36:16 | 2021-04-28T03:36:16 | 348,168,732 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 202 | r | correlation_plot.R | # Correlation Plot
# load library
library(corrplot)
# load the data
data(iris)
# calculate correlations
correlations <- cor(iris[,1:4])
# create correlation plot
corrplot(correlations, method="circle") |
d09e57b616212125b89735d2e7a65db992425764 | 3be034fa5c3465684fb67e48a8c24e55a71c4b9d | /cachematrix.R | a53e2c8c1fd9dcfae37e4adb906806e5f3f4e97e | [] | no_license | NDavis72516/ProgrammingAssignment2 | 75bc755b4b2d43c60fbbdc87a2cc4f08e45bab61 | 0a28216d6ca7a61e6151a1209f6d031f7f89bf6a | refs/heads/master | 2020-04-07T09:03:48.226396 | 2018-11-19T16:15:40 | 2018-11-19T16:15:40 | 158,238,712 | 0 | 0 | null | 2018-11-19T14:38:18 | 2018-11-19T14:38:18 | null | UTF-8 | R | false | false | 1,108 | r | cachematrix.R | ## The following makeCacheMatrix function creates a special matrix
## object that can cache its inverse. It will return a list
## containing functions that will be used as input in the
## cacheSolve function.
makeCacheMatrix <- function(x = matrix()) {
invmat <- NULL
##Set the Matrix
set <- function(y){
x <<- y
invmat <<- NULL
}
##Get the Matrix
get <- function()x
##Set the Inverse
setinvmat <- function(inverse) invmat <<- inverse
##Get the Inverse
getinvmat <- function() invmat
list(set=set, get=get, setinvmat=setinvmat, getinvmat=getinvmat)
}
## The cacheSolve function takes the output of the makeCacheMatrix
## function and returns the inverse of the original matrix.
cacheSolve <- function(x, ...) {
invmat <- x$getinvmat()
## if the inverse is already calculated and the matrix hasn't changed
## the inverse from the cache is retrieved.
if(!is.null(invmat)){
message("getting cached data")
return(invmat)
}
## otherwise, the inverse is calculated below
data <- x$get()
invmat <- solve(data, ...)
x$setinvmat(invmat)
return(invmat)
}
|
b95b2ddbba626898d5e0569017abc962b8cb8699 | 8a1b860d3eb1095d2f7e360059a21ffff4cd3217 | /tests/testthat/test-tabyl.R | 5bb73e431fc7e293cb2fb02083ad400915f86885 | [
"MIT"
] | permissive | sfirke/janitor | 3ad0dafb644ba704035be2bee0cc5ce4dce4bbb7 | df24347c9aa72093d06a5e30a11e67eae59412a6 | refs/heads/main | 2023-08-21T17:10:59.686361 | 2023-08-19T17:33:30 | 2023-08-19T17:33:30 | 56,102,322 | 1,355 | 152 | NOASSERTION | 2023-09-10T15:26:33 | 2016-04-12T22:29:40 | R | UTF-8 | R | false | false | 15,868 | r | test-tabyl.R | # Tests for data.frame renaming function
cyl_tbl <- tabyl(mtcars$cyl)
test_that("counts are accurate", {
expect_equal(cyl_tbl$`mtcars$cyl`, c(4, 6, 8))
expect_equal(cyl_tbl$n, c(11, 7, 14))
})
test_that("percentages are accurate", {
expect_equal(cyl_tbl$percent, c(11 / 32, 7 / 32, 14 / 32))
})
# Character input, with and without NA
test_df <- data.frame(grp = c("a", "b", "b", "c"), stringsAsFactors = FALSE)
test_df_na <- data.frame(grp = c("a", "b", "b", "c", NA), stringsAsFactors = FALSE)
test_res <- tabyl(test_df$grp)
test_res_na <- tabyl(test_df_na$grp)
test_that("names are right", {
expect_equal(names(cyl_tbl), c("mtcars$cyl", "n", "percent"))
expect_equal(names(test_res_na), c("test_df_na$grp", "n", "percent", "valid_percent"))
})
test_that("named vectors are handled properly", { # issue 144
x <- c(a = "x", b = "y", c = "z")
expect_equal(names(tabyl(x))[1], "x")
})
test_that("NAs handled correctly", {
expect_equal(test_res_na$percent, c(0.2, 0.4, 0.2, 0.2))
expect_equal(test_res_na$valid_percent, c(0.25, 0.5, 0.25, NA))
})
test_that("show_NA = FALSE parameter works, incl. with piped input", {
resss <- test_res
names(resss)[1] <- "test_df_na$grp"
names(attr(resss, "core"))[1] <- "test_df_na$grp"
expect_equal(
resss,
tabyl(test_df_na$grp, show_na = FALSE)
)
names(attr(resss, "core"))[1] <- "grp"
names(resss)[1] <- "grp" # for this next instance, col name changes
expect_equal(
resss,
test_df_na %>% tabyl(grp, show_na = FALSE)
)
})
test_that("ordering of result by factor levels is preserved for factors", {
expect_equal(tabyl(factor(c("x", "y", "z"), levels = c("y", "z", "x")))[[1]], factor(c("y", "z", "x"), levels = c("y", "z", "x")))
})
# missing factor levels shown, with and without NA
fac <- iris[["Species"]][70:80] # to get versicolor, not the first alphabetically
fac_na <- fac
fac_na[1:2] <- NA
test_that("missing factor levels are displayed without NA values", {
expect_equal(tabyl(fac)[[1]], factor(c("setosa", "versicolor", "virginica"), levels = c("setosa", "versicolor", "virginica")))
expect_equal(tabyl(fac)[[2]], c(0, 11, 0))
expect_equal(tabyl(fac)[[3]], c(0, 1, 0))
})
test_that("missing factor levels are displayed with NA values", {
expect_equal(tabyl(fac_na)[[1]], factor(c("setosa", "versicolor", "virginica", NA), levels = c("setosa", "versicolor", "virginica")))
expect_equal(tabyl(fac_na)[[2]], c(0, 9, 0, 2))
expect_equal(tabyl(fac_na)[[3]], c(0, 9 / 11, 0, 2 / 11))
expect_equal(tabyl(fac_na)[[4]], c(0, 1, 0, NA))
})
# piping
test_that("piping in a data.frame works", {
x <- tabyl(mtcars$cyl)
names(x)[1] <- "cyl"
names(attr(x, "core"))[1] <- "cyl"
expect_equal(
x,
mtcars %>% tabyl(cyl)
)
})
test_that("column1 stays its original data type per #168, in both resulting tabyl and core", {
# test character, logical, numeric, factor X both values for show_missing_levels; confirm class in core and in main result
# do those 8 tests in a loop?
loop_df <- data.frame(
a = c(TRUE, FALSE, TRUE),
b = c("x", "y", "y"),
c = c(1, 1, 2), stringsAsFactors = FALSE
)
for (i in c("logical", "numeric", "character")) {
for (j in c(TRUE, FALSE)) {
loop_df_temp <- loop_df
class(loop_df_temp$a) <- i
loop_tab <- loop_df_temp %>% tabyl(a, b, c, show_missing_levels = j)
expect_equal(class(loop_tab[[1]]$a), class(loop_df_temp$a))
expect_equal(class(attr(loop_tab[[1]], "core")$a), class(loop_df_temp$a)) # check core class
}
}
loop_df$a <- factor(c("hi", "lo", "hi"))
for (j in c(TRUE, FALSE)) {
loop_df_temp <- loop_df
loop_tab <- loop_df_temp %>% tabyl(a, b, c, show_missing_levels = j)
expect_equal(class(loop_tab[[1]]$a), class(loop_df_temp$a))
expect_equal(levels(loop_tab[[1]]$a), levels(loop_df_temp$a))
expect_equal(class(attr(loop_tab[[1]], "core")$a), class(loop_df_temp$a)) # check core class and levels
expect_equal(levels(attr(loop_tab[[1]], "core")$a), levels(loop_df_temp$a))
}
})
# bad inputs
test_that("failure occurs when passed unsupported types", {
expect_error(tabyl(matrix(1:10, nrow = 5)), "input must be a vector of type logical, numeric, character, list, or factor")
expect_error(tabyl(complex(10)), "input must be a vector of type logical, numeric, character, list, or factor")
})
test_that("bad input variable name is preserved", {
expect_equal(
mtcars %>% dplyr::mutate(`bad name` = cyl) %>% tabyl(`bad name`) %>% names() %>% .[[1]],
"bad name"
)
k <- mtcars %>% dplyr::mutate(`bad name` = cyl)
expect_equal(
tabyl(k$`bad name`) %>% names() %>% .[[1]],
"k$`bad name`"
)
})
test_that("input variable names 'percent' and 'n' are handled", {
a <- mtcars %>% tabyl(mpg)
expect_equal(
a %>% tabyl(percent),
as_tabyl(
data.frame(
percent = c(1 / 32, 2 / 32),
n = c(18, 7),
percent_percent = c(18 / 25, 7 / 25)
),
1
)
)
expect_equal(
a %>% tabyl(n),
as_tabyl(
data.frame(
n = 1:2,
n_n = c(18, 7),
percent = c(18 / 25, 7 / 25)
),
1
)
)
})
test_that("bizarre combination of %>%, quotes, and spaces in names is handled", {
dat <- data.frame(
`The candidate(s) applied directly to my school` = c("a", "b", "a", "b"),
check.names = FALSE,
stringsAsFactors = FALSE
)
expect_equal(
tabyl(dat$`The candidate(s) applied directly to my school` %>% gsub("hi", "there", .)) %>%
names() %>%
.[1],
"dat$`The candidate(s) applied directly to my school` %>% gsub(\"hi\", \"there\", .)"
)
})
test_that("grouped data.frame inputs are handled (#125)", {
expect_equal(
mtcars %>% dplyr::group_by(cyl) %>% tabyl(carb, gear),
mtcars %>% tabyl(carb, gear)
)
})
test_that("if called on non-existent vector, returns useful error message", {
expect_error(tabyl(mtcars$moose), "object mtcars\\$moose not found")
expect_error(tabyl(moose), "object 'moose' not found")
expect_error(mtcars %>% tabyl(moose))
})
test_that("if called on data.frame with no or irregular columns specified, returns informative error message", {
expect_error(tabyl(mtcars), "if calling on a data.frame, specify unquoted column names(s) to tabulate. Did you mean to call tabyl() on a vector?",
fixed = TRUE
)
expect_error(tabyl(mtcars, var2 = am),
"please specify var1 OR var1 & var2 OR var1 & var2 & var3",
fixed = TRUE
)
})
test_that("fails if called on a non-data.frame list", { # it's not meant to do this and result will likely be garbage, so fail
L <- list(a = 1, b = "rstats")
expect_error(tabyl(L),
"tabyl() is meant to be called on vectors and data.frames; convert non-data.frame lists to one of these types",
fixed = TRUE
)
})
# showing missing factor levels
test_that("show_missing_levels parameter works", {
z <- structure(
list(
a = structure(1, .Label = c("hi", "lo"), class = "factor"),
b = structure(2, .Label = c("big", "small"), class = "factor"),
new = structure(1, .Label = c("lvl1", "lvl2"), class = "factor")
),
row.names = c(NA, -1L), class = c("tbl_df", "tbl", "data.frame"),
.Names = c("a", "b", "new")
)
expect_equal(
z %>% tabyl(a, b, new, show_missing_levels = TRUE),
list(lvl1 = data.frame(
a = c("hi", "lo"),
big = c(0, 0),
small = c(1, 0),
stringsAsFactors = TRUE
) %>% as_tabyl(2, "a", "b"))
)
expect_equal(
z %>% tabyl(a, b, new, show_missing_levels = FALSE) %>% .[[1]],
data.frame(
a = factor("hi", levels = c("hi", "lo")),
small = c(1)
) %>% as_tabyl(2, "a", "b")
)
# Works with numerics
expect_equal(
mtcars %>% tabyl(cyl, am),
data.frame(
cyl = c(4, 6, 8),
`0` = c(3, 4, 12),
`1` = c(8, 3, 2),
check.names = FALSE
) %>% as_tabyl(2, "cyl", "am")
)
})
# NA handling - position and removal
# Putting this outside the following test block for later re-use
x <- data.frame(
a = c(1, 2, 2, 2, 1, 1, 1, NA, NA, 1),
b = c(rep("up", 4), rep("down", 4), NA, NA),
c = 10,
d = c(NA, 10:2),
stringsAsFactors = FALSE
)
test_that("NA levels get moved to the last column in the data.frame, are suppressed properly", {
y <- tabyl(x, a, b) %>%
untabyl()
expect_equal(
y,
data.frame(
a = c(1, 2, NA),
down = c(3, 0, 1),
up = c(1, 3, 0),
NA_ = c(1, 0, 1)
)
)
expect_equal(
tabyl(x, a, b, show_na = FALSE) %>%
untabyl(),
data.frame(
a = c(1, 2),
down = c(3, 0),
up = c(1, 3)
)
)
# one-way suppression
expect_equal(
tabyl(x$a, show_na = FALSE) %>%
untabyl(),
data.frame(
`x$a` = 1:2,
n = c(5, 3),
percent = c(0.625, 0.375),
check.names = FALSE
)
)
# NA level is shown in 3 way split
y <- x %>% tabyl(c, a, b, show_missing_levels = FALSE)
expect_equal(length(y), 3)
expect_equal(names(y), c("down", "up", "NA_"))
expect_equal(
y[["NA_"]], # column c remains numeric
x %>%
dplyr::filter(is.na(b)) %>%
tabyl(c, a)
)
y_with_missing <- x %>% tabyl(c, a, b, show_missing_levels = TRUE)
expect_equal(length(y_with_missing), 3)
expect_equal(names(y_with_missing), c("down", "up", "NA_"))
expect_equal(
y_with_missing[["NA_"]] %>% untabyl(), # column c remains numeric
data.frame(c = 10, `1` = 1, `2` = 0, NA_ = 1, check.names = FALSE)
)
# If no NA in 3rd variable, it doesn't appear in split list
expect_equal(length(dplyr::starwars %>%
dplyr::filter(species == "Human") %>%
tabyl(eye_color, skin_color, gender, show_missing_levels = TRUE)), 2)
# The starwars data set changed in dplyr v 1.0.0 so have two blocks of tests:
if (packageVersion("dplyr") > package_version("0.8.5")) {
# If there is NA, it does appear in split list
expect_equal(length(dplyr::starwars %>%
tabyl(eye_color, skin_color, gender, show_missing_levels = TRUE)), 3)
expect_equal(length(dplyr::starwars %>%
tabyl(eye_color, skin_color, gender, show_missing_levels = FALSE)), 3)
# NA level in the list gets suppressed if show_na = FALSE. Should have one less level if NA is suppressed.
expect_equal(length(dplyr::starwars %>%
tabyl(eye_color, skin_color, gender, show_na = TRUE)), 3)
expect_equal(length(dplyr::starwars %>%
tabyl(eye_color, skin_color, gender, show_na = FALSE)), 2)
} else {
# If there is NA, it does appear in split list
expect_equal(length(dplyr::starwars %>%
tabyl(eye_color, skin_color, gender, show_missing_levels = TRUE)), 5)
expect_equal(length(dplyr::starwars %>%
tabyl(eye_color, skin_color, gender, show_missing_levels = FALSE)), 5)
# NA level in the list gets suppressed if show_na = FALSE. Should have one less level if NA is suppressed.
expect_equal(length(dplyr::starwars %>%
tabyl(eye_color, skin_color, gender, show_na = TRUE)), 5)
expect_equal(length(dplyr::starwars %>%
tabyl(eye_color, skin_color, gender, show_na = FALSE)), 4)
}
})
test_that("zero-row and fully-NA inputs are handled", {
zero_vec <- character(0)
expect_equal(nrow(tabyl(zero_vec)), 0)
expect_equal(names(tabyl(zero_vec)), c("zero_vec", "n", "percent"))
zero_df <- data.frame(a = character(0), b = character(0))
expect_message(
expect_equal(nrow(tabyl(zero_df, a, b)), 0)
)
expect_message(
expect_equal(names(tabyl(zero_df, a, b)), "a"),
"No records to count so returning a zero-row tabyl"
)
all_na_df <- data.frame(a = c(NA, NA), b = c(NA_character_, NA_character_))
expect_message(
expect_equal(tabyl(all_na_df, a, b, show_na = FALSE) %>% nrow(), 0)
)
expect_message(
expect_equal(tabyl(all_na_df, a, b, show_na = FALSE) %>% names(), "a"),
"No records to count so returning a zero-row tabyl"
)
})
test_that("print.tabyl prints without row numbers", {
expect_equal(
mtcars %>% tabyl(am, cyl) %>% capture.output(),
c(" am 4 6 8", " 0 3 4 12", " 1 8 3 2")
)
})
test_that("the dplyr warning suggesting forcats::fct_explicit_na that is generated by a tabyl of a factor with NA values is caught ", {
# leaving this in as I'd want to know if it ever gets loud again, but the warning seems to be gone in
# dplyr 1.0.0 and I have removed the withCallingHandlers({}) code in tabyl() that this was testing
expect_silent(
tabyl(factor(c("a", "b", NA)))
)
xx <- data.frame(
a = factor(c("a", "b", NA)),
b = 1:3
)
expect_silent(xx %>%
tabyl(a, b))
})
test_that("3-way tabyl with 3rd var factor is listed in right order, #250", {
z <- mtcars
z$cyl <- factor(z$cyl, levels = c(4, 8, 6))
expect_equal(names(tabyl(z, am, gear, cyl)), c("4", "8", "6"))
z$cyl[32] <- NA
expect_equal(names(tabyl(z, am, gear, cyl)), c("4", "8", "6", "NA_"))
expect_equal(names(tabyl(z, am, gear, cyl, show_na = FALSE)), c("4", "8", "6"))
z <- z %>% dplyr::filter(!cyl %in% "4")
expect_equal(names(tabyl(z, am, gear, cyl)), c("8", "6", "NA_"))
})
test_that("tabyl works with ordered 1st variable, #386", {
mt_ordered <- mtcars
mt_ordered$cyl <- ordered(mt_ordered$cyl, levels = c("4", "8", "6"))
ordered_3way <- mt_ordered %>%
tabyl(cyl, gear, am)
expect_equal(class(ordered_3way[[1]]$cyl), c("ordered", "factor")) # 1st col in resulting tabyl
expect_equal(class(attr(ordered_3way[[1]], "core")$cyl), c("ordered", "factor")) # 1st col in tabyl core
})
test_that("factor ordering of columns is correct in 2-way tabyl", {
two_factors <- data.frame(
x = factor(c("big", "small", "medium", "small"),
levels = c("small", "medium", "big")
),
y = factor(c("hi", "hi", "hi", "lo"),
levels = c("lo", "hi")
)
)
expect_equal(
two_factors %>%
tabyl(x, y) %>%
names(),
c("x", "lo", "hi")
)
})
test_that("empty strings converted to _emptystring", {
mt_empty <- mtcars
mt_empty$cyl[1:2] <- c("", NA_character_)
expect_equal(
mt_empty %>%
tabyl(am, cyl) %>%
names(),
c("am", "4", "6", "8", "emptystring_", "NA_")
)
})
test_that("3way tabyls with factors in cols 1-2 are arranged correctly, #379", {
dat_3wayfactors <- data.frame(
gender = c("f", "m", "m", "f", "m"),
age_group = factor(
c("18-35", "46-55", "46-55", "36-45", ">55"),
levels = c("18-35", "36-45", "46-55", ">55")
),
bmi_group = factor(
c("18.5 - 25", "25 - 30", "18.5 - 25", ">30", "<18.5"),
levels = c("<18.5", "18.5 - 25", "25 - 30", ">30")
),
stringsAsFactors = TRUE
)
tabyl_3wf <- dat_3wayfactors %>%
tabyl(bmi_group, age_group, gender, show_missing_levels = FALSE)
expect_equal(names(tabyl_3wf$m), c("bmi_group", "46-55", ">55"))
expect_equal(
tabyl_3wf$f[[1]],
factor(
c("18.5 - 25", ">30"),
levels = c("<18.5", "18.5 - 25", "25 - 30", ">30")
)
)
})
test_that("tabyl errors informatively called like tabyl(mtcars$cyl, mtcars$gear), #377", {
expect_error(
tabyl(mtcars$cyl, mtcars$am),
regexp = "Did you try to call tabyl on two vectors"
)
has_logicals <- data.frame(x = 1:2, y = c(TRUE, FALSE))
expect_error(
tabyl(has_logicals$x, has_logicals$y),
regexp = "Did you try to call tabyl on two vectors"
)
expect_type(
has_logicals %>%
tabyl(x, y),
"list"
)
})
test_that("2-way tabyl with numeric column names is sorted numerically", {
df <- data.frame(var1 = c(1:11), var2 = c(NA, 10:1))
expect_equal(colnames(df %>% tabyl(var1, var2)), c("var1", 1:10, "NA_"))
})
test_that("3-way tabyl with numeric names is sorted numerically", {
expect_equal(
names(mtcars %>% tabyl(gear, cyl, hp)),
as.character(sort(unique(mtcars$hp)))
)
# Check putting NA last - data.frame "x" is created way above
expect_equal(
names(x %>% tabyl(a, c, d)),
c(2:10, "NA_")
)
})
|
05f9371d9e15719c19afdf89a3484efe0919f617 | 5f10cd06234643712e76bd93f93fcabfd675c5c6 | /server.R | 3316d9711db62306c112bac3fe336a6ef4400c75 | [] | no_license | Deleetdk/tail_effects | 0cd1e3850eef35965d84bfc7e2a7a99813c259a8 | eaebbc4c61eb4988c49da850f821d0913666fb0f | refs/heads/master | 2021-01-10T18:00:38.560913 | 2020-06-12T23:42:16 | 2020-06-12T23:42:16 | 36,995,656 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,326 | r | server.R |
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
shinyServer(function(input, output) {
reac_data = reactive({
#make dataframe
d = data.frame(matrix(ncol = 3, nrow = 1000))
colnames(d) = c("A_den", "B_den", "x")
#find lower x limit
lowest.group.mean = min(input$mean_A, input$mean_B)
which.lowest = which.min(c(input$mean_A, input$mean_B))
if (input$mean_A > input$mean_B) {
lowest.group.SD = input$sd_B
} else {
lowest.group.SD = input$sd_A
}
plot.lower.limit = min(lowest.group.mean - (lowest.group.SD * 3), input$threshold)
#find upper x limit
highest.group.mean = max(input$mean_A, input$mean_B)
which.highest = which.max(c(input$mean_A, input$mean_B))
if (input$mean_A > input$mean_B) {
highest.group.SD = input$sd_A
} else {
highest.group.SD = input$sd_B
}
plot.upper.limit = max(highest.group.mean + (highest.group.SD * 3), input$threshold)
#calculate x steps
d$x = seq(plot.lower.limit, plot.upper.limit, length.out = 1000)
#calculate densities
d$A.den = dnorm(d$x, mean = input$mean_A, sd = input$sd_A) * input$size_A
d$B.den = dnorm(d$x, mean = input$mean_B, sd = input$sd_B) * input$size_B
#return
return(d)
})
output$plot <- renderPlot({
#plot
w = 1.5
ggplot(reac_data(), aes(x = x)) +
geom_line(aes(x = x, y = A.den), color = "blue", size = w) +
geom_line(aes(x = x, y = B.den), color = "red", size = w) +
geom_vline(xintercept = input$threshold, linetype = "dashed", size = w, alpha = .7) +
xlab("Trait level") + ylab("Density")
})
output$table = renderDataTable({
t = data.frame(matrix(nrow = 4, ncol = 4))
colnames(t) = c("Blue Group", "Red group", "Ratio blue/red", "Percent blue")
rownames(t) = c("Percent above threshold",
"Percent below threshold",
"Mean of population above threshold",
"Mean of population below threshold")
## insert values
#percent above
t[1, 1] = (1 - pnorm(input$threshold, mean = input$mean_A, sd = input$sd_A)) * 100
t[1, 2] = (1 - pnorm(input$threshold, mean = input$mean_B, sd = input$sd_B)) * 100
#percent below
t[2, 1] = pnorm(input$threshold, mean = input$mean_A, sd = input$sd_A) * 100
t[2, 2] = pnorm(input$threshold, mean = input$mean_B, sd = input$sd_B) * 100
#ratio
t[1, 3] = (t[1, 1] * input$size_A) / (t[1, 2] * input$size_B)
t[2, 3] = (t[2, 1] * input$size_A) / (t[2, 2] * input$size_B)
## means of subgroups
d = reac_data() #fetch data
d.above = d[d$x > input$threshold, ]
d.below = d[d$x < input$threshold, ]
#above
t[3, 1] = weighted.mean(d.above$x, d.above$A.den)
t[3, 2] = weighted.mean(d.above$x, d.above$B.den)
#below
t[4, 1] = weighted.mean(d.below$x, d.below$A.den)
t[4, 2] = weighted.mean(d.below$x, d.below$B.den)
#Percent blue
t[1, 4] = (t[1, 1] * input$size_A / (t[1, 1] * input$size_A + t[1, 2] * input$size_B)) * 100
t[2, 4] = (t[2, 1] * input$size_A / (t[2, 1] * input$size_A + t[2, 2] * input$size_B)) * 100
#render and return
t = DT::datatable(t, options = list(dom = 't'))
return(t)
})
})
|
f74dcce1050d7cf226a01fb69203ff9366906445 | d2d512343113d39f49ae889bc5e7faf15af07c97 | /ui.R | a2b5714104f218b5060b6382f3d302a58db966a7 | [
"MIT"
] | permissive | NickSalkowski/HALICON_2015_Reporting_Presentation | 142c316996f10630d14eff00f4e0a6960358e707 | 4488debb11828d8e75bab0e164977b2b99398721 | refs/heads/master | 2016-08-12T15:47:49.793871 | 2015-10-21T17:27:04 | 2015-10-21T17:27:04 | 44,686,833 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 284 | r | ui.R | library(shiny)
shinyUI(pageWithSidebar(
headerPanel('Iris Summary Plots'),
sidebarPanel(
selectInput('species', 'Species',
unique(as.character(iris$Species)))),
mainPanel(
htmlOutput("spec_title"),
plotOutput('plot1'),
plotOutput('plot2')
)
)) |
6e588f8d726e248eeafbbc49043fa10212ae80c0 | 12c5748b0121523f75ac29d40c4980d5805347b5 | /Heatmaps_plots/example_heatmap.R | 5804913b2c6ce3442f28b95a442fe2758067f3e2 | [] | no_license | giuliagrisot/shared_scripts | d9cbd3c4056b10d66c6473d9cafce116186e510e | 440e4f1829948c408df9eb9d799835af86c515a7 | refs/heads/main | 2023-06-24T18:51:55.579777 | 2021-07-02T13:07:33 | 2021-07-02T13:07:33 | 382,304,081 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,101 | r | example_heatmap.R | # suppose you have a dataset of texts, split by word, with ids for words, sentences, line, page, position in line, where values are aggregated by item (what i was plotting here is the "perspective count").
library(readr)
mini_dataset <- read_csv("mini_dataset.csv")
# you can create a heatmap as follows (One text at the time, here Text 1)
mini_dataset %>%
filter(Text== 1) %>%
ggplot() +
geom_tile(aes(x=Position,
y=Line_on_page,
fill = Persp_count)) +
scale_fill_viridis_c(limits=c(1, 5), option="inferno") +
geom_text(aes(x=Position,
y=Line_on_page,
label=word, colour = as.factor(Persp_count)),
size = 4, check_overlap = T, family = "sans") +
scale_color_manual(values = cols, guide = F) +
# scale_color_brewer(palette = "Greys", guide = F) +
# scale_color_viridis_c(direction = -1, guide = F) +
scale_y_reverse() +
facet_grid(Page ~ Modified) +
theme(
# strip.text = element_text(size=7, face="bold.italic"),
axis.title.x = element_blank(), axis.title.y = element_blank())
|
c9d05eb43ca450b47ac8fa045562167ae6f11ef2 | 4f264890932ec5679cb38803520a6fd4da3f5e19 | /R/coxgrad.R | 6157be6a418265cf53f1a080f000b209201a0416 | [] | no_license | nfultz/glmnet-mirror | 9287ab325b381149c92f7a16cbb5a1afc30b11a6 | 813aae116e0adea471c9dd8124ebf7ef1a45ca23 | refs/heads/master | 2020-09-08T10:03:45.940440 | 2020-05-17T20:37:10 | 2020-05-17T20:37:10 | 221,103,049 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,317 | r | coxgrad.R | #' compute gradient for cox model
#'
#' Compute the gradient of the partial likelihood at a particular fit
#'
#' Compute a gradient vector at the fitted vector for the log partial likelihood.
#' This is like a residual vector, and useful for manual screening of predictors for \code{glmnet}
#' in applications where \code{p} is very large (as in GWAS). Uses the Breslow approach to ties
#'
#' @aliases coxgrad
#' @param f fit vector
#' @param time time vector (can have ties)
#' @param d death/censoring indicator 1/0
#' @param w observation weights (default equal)
#' @param eps (default 0.00001) Breaks ties between death and censoring by making death times \code{eps} earlier
#' @return a single gradient vector the same length as \code{f}
#' @author Trevor Hastie\cr Maintainer: Trevor Hastie <hastie@@stanford.edu>
#' @seealso \code{coxnet.deviance}
#' @keywords Cox model
#'
#' @export coxgrad
coxgrad=function(f,time,d,w,eps=0.00001){
### f is fitted function from glmnet at a particular lambda
### time is death or censoring time
### d is death indicator; d=0 means censored, d=1 means death
### w is a weight vector of non-negative weights, which will be normalized to sum to 1
if(missing(w))w=rep(1,length(f))
w=w/sum(w)
f=scale(f,TRUE,FALSE)#center f so exponents are not too large
time=time-d*eps#break ties between death times and non death times, leaving tied death times tied
o=order(time)
ef=exp(f)[o]
time=time[o]
d=d[o]
w=w[o]
rskden=rev(cumsum(rev(ef*w))) ##reverse order inside;last guy is in all the risk sets
### See if there are dups in death times
dups=fid(time[d==1],seq(length(d))[d==1])
dd=d
ww=w
### next code replaces each sequence of tied death indicators by a new
### sequence where only the first is a 1 and the rest are zero. This
### makes the accounting in the following step work properly we also
### sums the weights in each of the tied death sets, and assign that
### weight to the first
if(!is.null(ties<-dups$index_ties)){
dd[unlist(ties)]=0
dd[dups$index_first]=1
wsum=sapply(ties,function(i,w)sum(w[i]),ww)
tie1=sapply(ties,function(i)i[1])
ww[tie1]=wsum
}
### Get counts over risk sets at each death time
rskcount=cumsum(dd)#this says how many of the risk sets each observation is in; 0 is none
### We now form partial sums of the 1/den just at the risk sets
rskdeninv=cumsum((ww/rskden)[dd==1])
### pad with a zero, so we can index it
rskdeninv=c(0,rskdeninv)
### compute gradient for each obs
grad=(d-rskdeninv[rskcount+1]*ef)*w
grad[o]=grad
grad
}
fid=function(x,index){
### Input:
### x is a sorted vector of death times
### index is vector of indices of this set
### Output:
### index of first member of every death set as they appear in sorted list
### list of ties for each element of index, in the case of two or more ties;
## if no ties, this list is NULL
idup=duplicated(x)
if(!any(idup)) list(index_first=index,index_ties=NULL)
else{
ndup=!idup
xu=x[ndup]# first death times
index_first=index[ndup]
ities=match(x,xu)
index_ties=split(index,ities)
nties=sapply(index_ties,length)
list(index_first=index_first,index_ties=index_ties[nties>1])
}
}
|
a22d63bad87e9bf8ac683f53f6c720b4e50fb72f | 9f56f2c528db5f15b6cb2709a9521b5845632ccb | /oldversions/20131215erccdashboard/R/meltExpDat.R | a92ab9a1b062e7136feb4ce0f543a6a005502196 | [] | no_license | munrosa/one-gene-two-gene | a44af2d82de33441bb43ed831363f42211cd06b1 | 3b7de253afcea5cf5ad8f38dc972c09cce2771ed | refs/heads/master | 2021-01-01T19:42:34.144808 | 2014-01-27T05:31:24 | 2014-01-27T05:31:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,216 | r | meltExpDat.R | meltExpDat <- function(expDat, cnt, designMat){
sampleInfo <- expDat$sampleInfo
libeSize <- expDat$sampleLibeSums
datNames <- colnames(designMat)[-1]
sample1 <- expDat$sampleNames[1]
sample2 <- expDat$sampleNames[2]
datCols = cnt[-c(1)]
libAdjust = sweep(datCols, 2, libeSize,"/")
sampleLibeDataNorm = cbind(cnt[c(1)],libAdjust)
myDataERCC = sampleLibeDataNorm
expressDat = myDataERCC[-c(1)]
sampleNameList = c(sample1,sample2)
expressDatAandBERCCs = merge(expDat$idColsAdj[c(1,4)],myDataERCC)
expressDatAandBERCCs$Feature <- as.factor(as.character(
expressDatAandBERCCs$Feature))
expressDatAandBERCCs$Ratio <- as.factor(as.character(
expressDatAandBERCCs$Ratio))
expressDat_l <- melt(expressDatAandBERCCs)
colAdd <- colsplit(expressDat_l$variable,pattern="_",names=datNames)
colAdd = as.data.frame(lapply(colAdd,as.factor))
expressDat_l<- expressDat_l[,-c(3)]
colnames(expressDat_l)[3]<-"NormCounts"
expressDat_l <- cbind(expressDat_l, colAdd)
#expressDat_l$Sample <- as.factor(as.character(expressDat_l$Sample))
#expressDat_l$Replicate <- as.factor(as.character(expressDat_l$Replicate))
expDat$expressDat_l <- expressDat_l
return(expDat)
} |
2920f612b24e8b44fe560ea540b064c5f65862c2 | 9a816ba959c4880e78165fadd8cc23c8e7409f18 | /tests/testthat.R | 1a5521b9f440d684359d24c8ee7b85f0ea889753 | [
"MIT"
] | permissive | augustobrusaca/KHQ | bdee00c014688d9367d6a8c88300b4ee1d43af72 | 744ae68f9f46bd00067468f5bf4edaf2d64353d5 | refs/heads/master | 2023-07-01T13:38:37.358782 | 2021-08-09T01:31:21 | 2021-08-09T01:31:21 | 384,135,327 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 50 | r | testthat.R | library(testthat)
library(KHQ)
test_check("KHQ")
|
33ab1fb65eee5804d9fe45152e98be946734c583 | c3a7c2a227dd4f01a198eed13af163b34a237986 | /App.R | 58d3a1890c1ecd01dc8b478a9fbdcdb745db2ec6 | [] | no_license | akramRedjdal/ProjetUE4 | 126e1b087ae96f9b019e7fe241d57c63766e136c | 989dc432200505f6a304c2d954523ba39b673513 | refs/heads/master | 2020-04-15T03:53:42.201705 | 2019-01-10T10:25:53 | 2019-01-10T10:25:53 | 164,364,355 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,924 | r | App.R | library(data.table)
library(spdep)
library(sp)
library(stringr)
library(rgdal)
library(DCluster)
library(epitools)
library(sf)
library(leaflet)
library(dplyr)
library(stats)
library(tidyverse)
library(geojsonio)
library(shape)
library(shiny)
library(ggplot2)
library(plotly)
library(DT)
library(shinydashboard)
library(shinythemes)
library(graphics)
library(knitr)
library(kableExtra)
tableau <- function(df) {
df <- as.data.frame(df)
df <- format(df, width = NULL, justify = "centre", big.mark = " ")
kable(df, format = "html") %>%
kable_styling(bootstrap_options = c("striped", "hover"), full_width = F,
font_size = 11, position = "left")
}
#IMPORTATION DE DONNEES
polluants <- read_delim("data/etablissement.csv" , "," , escape_double = FALSE , trim_ws = TRUE)
localisationParEtablissement <- read_delim("data/registre-francais-des-emission-polluantes-etablissements.csv" , ";" , escape_double = FALSE , trim_ws = TRUE)
radon <- read_delim("data/radon.csv" , ";" , escape_double = FALSE , trim_ws = TRUE)
#Creation de la base de données
EtablissementsPolluants <- polluants %>% dplyr::select( nom ) %>%
setnames(old=c("nom"), new=c("Nom Etablissement")) %>%
merge(localisationParEtablissement,by="Nom Etablissement")
EtablissementsPolluants <- setDT(EtablissementsPolluants)[, paste0("type", 1:2) := tstrsplit(coordonnees, ",", type.convert = TRUE, fixed = TRUE)] %>%
setnames(old=c("type1","type2"), new=c("lng","lat"))
EtablissementsPolluants <- EtablissementsPolluants %>% filter(!is.na(EtablissementsPolluants$lat))
colnames(EtablissementsPolluants)[12] <- "Libelle_APE"
colnames(EtablissementsPolluants)[1] <- "Nom_etablissement"
colnames(EtablissementsPolluants)[3] <- "Num_Siret"
colnames(EtablissementsPolluants)[11] <- "CODE_APE"
colnames(EtablissementsPolluants)[13] <- "CODE_Eprtr"
#Normalisation des colonnes departement et communes pour merger a partir des 2 et eviter les NA
EtablissementsPolluants$Departement <- gsub("[[:punct:]]", "", as.character(EtablissementsPolluants$Departement))
radon$nom_dept <- gsub("[[:punct:]]", "", as.character(radon$nom_dept))
EtablissementsPolluants$Departement <- tolower(EtablissementsPolluants$Departement)
radon$nom_dept <- tolower(radon$nom_dept)
EtablissementsPolluants <- EtablissementsPolluants %>% filter(!is.na(Departement))%>% group_by(Departement)
radon <- radon %>% filter(!is.na(nom_dept)) %>% group_by(nom_dept)
EtablissementsPolluants$Commune <- gsub("[[:punct:]]", "", as.character(EtablissementsPolluants$Commune))
radon$nom_comm <- gsub("[[:punct:]]", "", as.character(radon$nom_comm))
EtablissementsPolluants$Commune <- tolower(EtablissementsPolluants$Commune)
radon$nom_comm <- tolower(radon$nom_comm)
EtablissementsPolluants <- EtablissementsPolluants %>% filter(!is.na(Commune))%>% group_by(Commune)
radon <- radon %>% filter(!is.na(nom_comm)) %>% group_by(nom_comm)
EtablissementRadon <- merge(radon,EtablissementsPolluants,by.x=c("nom_comm","nom_dept"),by.y=c("Commune","Departement"),all.x=F,all.y=T)
EtablissementRadon <- EtablissementRadon %>% dplyr::filter(!is.na(EtablissementRadon$lng))
#creation data frame spatial
coordinates(EtablissementRadon) <- ~ lng + lat
EtablissementRadon@coords <- EtablissementRadon@coords[,c(2,1)]
#donnée cours
effectif_france <- read_delim("data/effectif.france.csv", ";", escape_double = FALSE, trim_ws = TRUE)
evenements <- read_delim("data/evenements.csv", ";", escape_double = FALSE, trim_ws = TRUE)
effectif_departement <- read_delim("data/effectif.departement.csv", ";", escape_double = FALSE, trim_ws = TRUE)
ratio.vector <- vector()
for (i in colnames(evenements[,-1])) {
ratio.vector <- append(ratio.vector, round(ageadjust.direct(count = evenements[,i],
pop = effectif_departement[,i],
stdpop = effectif_france[,2])["adj.rate"] *10^5, 2))
}
ratioEvenement <- tibble(dep = colnames(evenements[,-1]),
ratio = ratio.vector )
ratioEvenement$dep <- gsub("[[:punct:]]", "", as.character(ratioEvenement$dep))
ratioEvenement$dep <- tolower(ratioEvenement$dep )
radon <- radon %>% group_by(nom_dept)
ratioRadon <- left_join(ratioEvenement,radon,by=c("dep"="nom_dept"),all.x=T,all.y=F)
rm(effectif_france)
rm(evenements)
rm(effectif_departement)
#normalisation Ratio
ratioEvenement$dep <- gsub("[[:punct:]]", "", as.character(ratioEvenement$dep))
ratioEvenement$dep <- tolower(ratioEvenement$dep )
#Merge fichier Spatial data Polygon avec ratio ET radon
departements <- geojsonio::geojson_read("data/departements.geojson", what = "sp")
departements$nom <- gsub("[[:punct:]]", "", as.character(departements$nom ))
departements$nom <- tolower(departements$nom )
departements@data = data.frame(departements@data, ratioEvenement[match(departements@data$nom, ratioEvenement$dep),])
departements@data = data.frame(departements@data, radon[match(departements@data$nom, radon$nom_dept),])
EtablissementRadon@data= data.frame(EtablissementRadon@data, ratioRadon[match(EtablissementRadon@data$classe_potentiel, ratioRadon$classe_potentiel),])
EtablissementsPolluants$Departement <- toupper(EtablissementsPolluants$Departement)
EtablissementsPolluants$Commune <- toupper(EtablissementsPolluants$Commune)
##carte potentiel Radon par Departement
pal <-colorFactor(c("gray","yellow","red"), domain = c(1,2,3) , na.color ="white" , alpha = TRUE )
carteRadonDepartement <- leaflet(departements) %>% addProviderTiles(providers$Esri.WorldStreetMap) %>%
setView(lat = 47.256, lng = 2.35, zoom = 6) %>%
addPolygons( fillColor = ~pal(classe_potentiel),
weight = 2,
opacity = 1,
color = "white",
dashArray = "3",
fillOpacity = 0.7) %>% addLegend(pal = pal,
values = ~classe_potentiel,
opacity = 0.7,
title = "Potentiel Radon",
position = "topright")
#carte ratio de l'evenement de la maladie x par Region
bins2 <- c(0,1, 2,3,4,5,6,7,8,Inf)
pal2 <- colorBin("Blues", domain = departements$ratio, bins = bins2)
carteMaladieDepartement <- leaflet(departements) %>% addProviderTiles(providers$Esri.WorldStreetMap) %>%
setView(lat = 47.256, lng = 2.35, zoom = 6) %>%
addPolygons( fillColor = ~pal2(ratio),
weight = 2,
opacity = 1,
color = "white",
dashArray = "3",
fillOpacity = 0.7) %>% addLegend(pal = pal2,
values = ~ratio,
opacity = 0.7,
title = "Ratio incidence maladie X",
position = "bottomright")
#carte france par departement avec Marquers vers tout les etablissements :
col <- colorFactor(c("gray","yellow","red"), domain = c(1,2,3) , na.color ="white" , alpha = TRUE )
carteEtablissements <- leaflet(departements) %>%
addProviderTiles(providers$Esri.WorldStreetMap) %>%setView(lat = 47.256, lng = 2.35, zoom = 6) %>%
addMarkers(data = EtablissementRadon@coords,
clusterOptions = markerClusterOptions() ,
label = EtablissementRadon@data$Nom_etablissement ,
popup = str_c("<br/>Commune :<br/>",EtablissementRadon@data$nom_comm ,"<br/> num siret :<br/> " ,
EtablissementRadon@data$Num_Siret, "<br/> APE : <br/>" ,
EtablissementRadon@data$Libelle_APE ) )
#Fonction Histogramme Selon libellé APE .
histDepartement <- function(EtablissementsPolluants, choice,ligne) {
plot <- EtablissementsPolluants %>% filter(Departement == choice) %>% group_by(Libelle_APE) %>% filter(!is.na(Libelle_APE))
plot2 <- plot[1:ligne,]
p <- ggplot(data = plot2 , aes (x=plot2$Libelle_APE,y=length(Nom_etablissement)/ligne)) +
geom_bar(aes(fill=Commune), stat="identity" , position = position_stack(reverse = TRUE)) + coord_flip() +
theme(legend.position = "bottom") +
xlab("Libellé APE") + ylab("Nombre Etablissements ") + ggtitle(choice)
ggplotly(p )
}
histDepartementNom <- function(EtablissementsPolluants, choice,ligne) {
plot <- EtablissementsPolluants %>% filter(Departement == choice) %>% group_by(Nom_etablissement) %>% filter(!is.na(Nom_etablissement))
plot2 <- plot[1:ligne,]
p <- ggplot(data = plot2 , aes (x=plot2$Nom_etablissement,y=length(Nom_etablissement)/ligne)) +
geom_bar(aes(fill=Commune), stat="identity" , position = position_stack(reverse = TRUE)) + coord_flip() +
theme(legend.position = "bottom") +
xlab("Nom Etablissement") + ylab("Nombre Etablissements ") + ggtitle(choice)
ggplotly(p )
}
histDepartementCodeEprtr <- function(EtablissementsPolluants, choice,ligne) {
plot <- EtablissementsPolluants %>% filter(Departement == choice) %>% group_by(CODE_Eprtr) %>% filter(!is.na(CODE_Eprtr))
plot2 <- plot[1:ligne,]
p <- ggplot(data = plot2 , aes (x=plot2$CODE_Eprtr,y=length(Nom_etablissement)/ligne)) +
geom_bar(aes(fill=Commune), stat="identity" , position = position_stack(reverse = TRUE)) + coord_flip() +
theme(legend.position = "bottom") +
xlab("Code Eprtr ") + ylab("Nombre Etablissements ") + ggtitle(choice)
ggplotly(p )
}
shinyApp <- shinyApp(
ui <- fluidPage(
theme = shinytheme("slate"),
sidebarPanel(
selectInput("axis","Choix X :" , choices = c("Libelle APE","Nom Etablissement","Code Eprtr"),selected = "Libelle APE"),
selectInput("Departement", "Departement :",
choices = EtablissementsPolluants$Departement , selected = ""),
checkboxInput("table","montrer le tableau de données",value = FALSE),
sliderInput(inputId ="size", label="Nombre De Lignes", value = 15,
min = 5, max = 150, step = 5, animate =T , width = '400px' ),
sliderInput(inputId ="opacity", label="Opacitée Incidence de la maladie X", value = 0,
min = 0, max = 2, step = 0.5, animate =T , width = '400px' ),
sliderInput(inputId ="opacityRadon", label="Opacitée Potentiel Radon Par Region", value = 0,
min = 0, max = 2, step = 0.5, animate =T , width = '400px' ),
radioButtons(inputId="points", label="Ploter des points",
choices = c("Incidence Maladie Par Commune",
"Potentiel Radon par Commune",
"Pas de Points"),
selected = "Pas de Points",
width = "400px" )
),
mainPanel(
tabsetPanel(
tabPanel("Carte Interactive",box( leafletOutput("map", width = "100%", height = "700px"),title = "Carte Interavtive pour les EtablissementsPolluants,PotentielRadon,IncidenceMaladieX",width = "100%", height = "100%")),
tabPanel("Analyse de Donnees",plotlyOutput("Plot"),
DT::dataTableOutput("tableau"))
))),
server <- function(input, output) {
output$tableau <- DT::renderDataTable({
if (input$table){
DT::datatable(data = EtablissementsPolluants %>% filter(Departement == input$Departement) %>% dplyr::select(Nom_etablissement,
Libelle_APE,
CODE_APE ,
`Libelle Eprtr`,Departement,Commune),
options = list(pageLength=10),rownames = F)
}})
output$Plot <- renderPlotly({ if(input$axis=="Libelle APE"){
histDepartement(EtablissementsPolluants,input$Departement,input$size)}
else if (input$axis=="Nom Etablissement"){ histDepartementNom(EtablissementsPolluants,input$Departement,input$size)}
else if (input$axis=="Code Eprtr"){histDepartementCodeEprtr(EtablissementsPolluants,input$Departement,input$size)
}})
output$map <- renderLeaflet({
if(input$points=="Incidence Maladie Par Commune"){
carteEtablissements %>%
addPolygons( fillColor = ~pal2(ratio),
weight = 2,
opacity = 1,
color = "white",
dashArray = "3",
fillOpacity = input$opacity)%>% addLegend(pal = pal2,
values = ~ratio,
opacity = 0.7,
title = "Ratio incidence maladie X",
position = "bottomright") %>%
addPolygons( fillColor = ~pal(classe_potentiel),
weight = 2,
opacity = 1,
color = "White",
dashArray = "3",
fillOpacity = input$opacityRadon) %>%
addLegend(pal = pal,
values = ~classe_potentiel,
opacity = 0.7,
title = "Potentiel Radon",
position = "topright") %>%
addCircles(data = EtablissementRadon@coords,
color= pal2(EtablissementRadon$ratio) ,
label = EtablissementRadon$nom_dept,
popup = str_c("<br/>Ratio :<br/>",EtablissementRadon@data$ratio,
"<br/>Commune :<br/>",EtablissementRadon@data$nom_comm ) )
}
else if(input$points=="Potentiel Radon par Commune"){
carteEtablissements %>% addPolygons( fillColor = ~pal2(ratio),
weight = 2,
opacity = 1,
color = "white",
dashArray = "3",
fillOpacity = input$opacity)%>% addLegend(pal = pal2,
values = ~ratio,
opacity = 0.7,
title = "Ratio incidence maladie X",
position = "bottomright") %>%
addPolygons( fillColor = ~pal(classe_potentiel),
weight = 2,
opacity = 1,
color = "White",
dashArray = "3",
fillOpacity = input$opacityRadon) %>%
addLegend(pal = pal,
values = ~classe_potentiel,
opacity = 0.7,
title = "Potentiel Radon",
position = "topright") %>%
addCircles(data = EtablissementRadon@coords,
color= pal(EtablissementRadon@data$classe_potentiel) ,
label = EtablissementRadon$nom_dept,
popup = str_c("<br/>Ratio :<br/>",EtablissementRadon@data$classe_potentiel,
"<br/>Commune :<br/>",EtablissementRadon@data$nom_comm ) )
}
else if(input$points=="Pas de Points"){carteEtablissements %>%
addPolygons( fillColor = ~pal2(ratio),
weight = 2,
opacity = 1,
color = "white",
dashArray = "3",
fillOpacity = input$opacity)%>% addLegend(pal = pal2,
values = ~ratio,
opacity = input$opacity,
title = "Ratio incidence maladie X",
position = "bottomright") %>%
addPolygons( fillColor = ~pal(classe_potentiel),
weight = 2,
opacity = 1,
color = "White",
dashArray = "3",
fillOpacity = input$opacityRadon) %>%
addLegend(pal = pal,
values = ~classe_potentiel,
opacity = 0.6,
title = "Potentiel Radon",
position = "topright")
}
})
},
options = list(width = "1400", height = "1000")
)
#Voisinage pour autocorrelation
departements@data$ratio <- replace(departements@data$ratio, is.na(departements@data$ratio), 0)
voisinQ=poly2nb(departements)
voisinR=poly2nb(departements, queen=FALSE)
coords = coordinates(departements)
#Matrice conguite
matrice_conguiteQ = nb2listw(voisinQ,zero.policy=TRUE)
#Bootstrap
bootstrap=moran.mc(departements$ratio,listw=matrice_conguiteQ, nsim=999)
#autocorrelogram
cor=sp.correlogram(voisinQ, departements$ratio, order=8, method="I", style="W",zero.policy=TRUE )
#stone test
Coordonees <- EtablissementsPolluants %>% dplyr::select(Departement,lng,lat)
doublons <- which(duplicated(Coordonees$Departement))
Coordonees<-Coordonees[-doublons,]
Coordonees$Departement <- gsub("[[:punct:]]", "", as.character(Coordonees$Departement))
Coordonees$Departement <- tolower(Coordonees$Departement)
stoneTest <- merge(ratioEvenement,Coordonees,by.x="dep" ,by.y="Departement")
ratioRadon <- ratioRadon %>% select(dep,ratio,classe_potentiel)
colnames(stoneTest) <- c("dep","Observed","commune","x","y")
stoneTest <- left_join(stoneTest,ratioRadon)
stoneTest <- stoneTest %>% filter(!is.na(stoneTest$ratio))
stoneTest <- stoneTest %>% filter(!is.na(stoneTest$classe_potentiel))
stoneTest<-cbind(stoneTest, Expected=stoneTest$classe_potentiel*stoneTest$ratio/stoneTest$classe_potentiel)
stoneTest <- stoneTest %>% dplyr::select(dep,Observed,Expected,x,y)
|
ef009bd6c24d99960869caec8179e9d0aa8e2130 | 592ebb93208dceb623ff376db8f4ebb98d2ea309 | /R/get-sablefish.R | 247809e523fe930bef6c23ecb2ca1e25fd1a5fca | [] | no_license | pbs-assess/gfdata | 7264351cf87086e4dfd9c1ff4d3b51fa75c2c89d | 2087664e77fa907c5bd9f3f6e18defd319b32712 | refs/heads/master | 2023-08-05T12:48:57.392354 | 2023-07-14T00:04:39 | 2023-07-14T00:04:39 | 179,116,653 | 1 | 1 | null | 2023-07-27T23:33:38 | 2019-04-02T16:23:22 | R | UTF-8 | R | false | false | 608 | r | get-sablefish.R | #' Extract sablefish survey data from GFBIOSQL
#'
#' @export
#' @rdname get_data
get_sablefish_surveys <- function() {
.q <- read_sql("get-sablefish-survey-data.sql")
# length_type <- get_spp_sample_length_type(225)
# search_flag <- "-- insert length type here"
# i <- grep(search_flag, .q)
# .q[i] <- paste0("CAST(ROUND(", length_type, "/ 10, 1) AS DECIMAL(8,1)) AS LENGTH,")
.d <- run_sql("GFBioSQL", .q)
names(.d) <- tolower(names(.d))
.d$species_common_name <- tolower(.d$species_common_name)
.d$species_science_name <- tolower(.d$species_science_name)
add_version(as_tibble(.d))
}
|
6c11243022fb8cd3ae2f2805d1956ee2e78fced9 | 0ef52d5c0f4e9860d22957b77623ef78334fe445 | /2021/wk_30_droughts/drought_script.R | 72e1c8ea9c4a9eb302d711ec3ebc5343c150b810 | [] | no_license | LauraCole2445/TidyTuesday | 469ee9bcb6c693b5d8da78137c9bd790c24127bb | 356cb52549b96550efd1f1db06788c42a9fc952d | refs/heads/master | 2023-07-06T02:42:22.196336 | 2021-08-11T18:18:37 | 2021-08-11T18:18:37 | 389,582,970 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,818 | r | drought_script.R | #load in libraries
library(dplyr)
library(tidyr)
library(janitor)
library(lubridate)
library(countrycode)
library(maps)
library(stringr)
library(gganimate)
library(ragg)
library(systemfonts)
library(textshaping)
library(showtext)
library(RColorBrewer)
library(extrafont)
extrafont::loadfonts(device = "win")
library(ggplot2)
library(magick)
#load in data
drought_raw<-read.csv("2021\\wk_30_droughts\\input_data.csv")
state_dict <- "https://bit.ly/2ToSrFv"
state_dict <- read.csv(state_dict)
#create dataset for map
drought<-drought_raw%>%
clean_names()%>%
filter(state!="PR")%>%
mutate(valid_start=ymd(valid_start),
none=as.numeric(as.character(gsub(",","",none))),
d0=as.numeric(as.character(gsub(",","",d0))),
d1=as.numeric(as.character(gsub(",","",d1))),
d2=as.numeric(as.character(gsub(",","",d2))),
d3=as.numeric(as.character(gsub(",","",d3))),
d4=as.numeric(as.character(gsub(",","",d4))),
county=as.character(county))%>%
pivot_longer(!c(map_date,fips,county,state,valid_start,valid_end,statistic_format_id),
names_to="drought_level",
values_to = "total_population")%>%
select(-county,-state,-valid_end,-statistic_format_id)%>%
group_by(map_date,fips,valid_start)%>%
mutate(max_cat=max(total_population))%>%
mutate(metric = drought_level[which(total_population == max(total_population))],
metric=case_when(metric=="none"~"None",
metric=="d0"~"Abnormally Dry",
metric=="d1"~"Moderate",
metric=="d2"~"Severe",
metric=="d3"~"Extreme",
metric=="d4"~"Exceptional"),
metric = factor(metric, levels = c("None","Abnormally Dry","Moderate","Severe","Extreme","Exceptional")))%>%
select(fips,map_date,valid_start,metric)%>%
distinct()
drought<-drought%>%
filter(fips==46007)%>%
mutate(fips=46113)%>%
rbind(drought)
#shannon fips = 46113
#bennet fips = 46007
data("county.fips")
county.fips <-county.fips %>%
mutate(region = word(polyname, 1, sep = ","),
subregion = word(polyname, 2, sep = ",")) %>%
mutate(subregion = word(subregion, 1, sep = ":")) %>%
mutate(fips = str_pad(as.character(fips), side = "left", width = 5, "0"))
map_data<-map_data("county")%>%
left_join(county.fips)%>%
mutate(fips=as.numeric(fips))%>%
left_join(drought)
#create dataset for time series
drought<-drought_raw%>%
clean_names()%>%
filter(state!="PR")%>%
mutate(valid_start=ymd(valid_start),
none=as.numeric(as.character(gsub(",","",none))),
d0=as.numeric(as.character(gsub(",","",d0))),
d1=as.numeric(as.character(gsub(",","",d1))),
d2=as.numeric(as.character(gsub(",","",d2))),
d3=as.numeric(as.character(gsub(",","",d3))),
d4=as.numeric(as.character(gsub(",","",d4))),
county=as.character(county))%>%
select(valid_start,none,d0,d1,d2,d3,d4)%>%
pivot_longer(!valid_start,
names_to="drought_level",
values_to = "population")%>%
group_by(valid_start)%>%
mutate(total_population=sum(population),
pop_per=100*population/total_population)%>%
mutate(drought_level=case_when(drought_level=="none"~"None",
drought_level=="d0"~"Abnormally Dry",
drought_level=="d1"~"Moderate",
drought_level=="d2"~"Severe",
drought_level=="d3"~"Extreme",
drought_level=="d4"~"Exceptional"),
rank=case_when(drought_level=="None"~1,
drought_level=="Abnormally Dry"~2,
drought_level=="Moderate"~3,
drought_level=="Severe"~4,
drought_level=="Extreme"~5,
drought_level=="Exceptional"~6))%>%
mutate(drought_level = factor(drought_level, levels = c("Exceptional","Extreme","Severe","Moderate","Abnormally Dry","None")))%>%
group_by(valid_start,drought_level)%>%
summarise(pop_per=sum(pop_per))
font<-"Ink Free"
text_colour<-"black"
font_size=20
n_frames<-100
map_plot<-map_data%>%
filter(valid_start>=unique(map_data$valid_start)[n_frames])%>%
ggplot(aes(long, lat, group = group)) +
geom_polygon(aes(fill=metric)) +
borders("state")+
coord_map() +
scale_fill_brewer(palette = "YlOrRd")+
theme_minimal()+
theme(
plot.caption=element_text(family = font,color = text_colour,size = 14),
text = element_text(family = font,color = text_colour, size = font_size),
panel.grid = element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
axis.text.y= element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.title=element_blank(),
legend.text=element_text(colour=text_colour,size=16)
)+
xlab("")+
ylab("")+
labs(title = 'Drought levels in the US from August 2019 to August 2021')+
transition_manual(valid_start)
time_series_plot<-drought%>%
filter(valid_start>=unique(drought$valid_start)[length(unique(drought$valid_start))-n_frames][1])%>%
filter(drought_level!="None")%>%
mutate(year=year(valid_start),
month=month(valid_start))%>%
ggplot(aes(x=valid_start, y=pop_per, fill=drought_level)) +
geom_area()+
scale_x_date(date_labels = "%b %y",
date_breaks = "3 months")+
scale_fill_brewer(palette = "YlOrRd",direction=-1)+
theme(
#legend
legend.title=element_blank(),
legend.text=element_text(colour=text_colour,size=14),
legend.position = "none",
#title and caption
plot.caption=element_text(family = font,color = text_colour,size = 16),
#text
text = element_text(family = font,color = text_colour, size = font_size),
#panel
panel.grid.major = element_line(colour="grey"),
plot.background = element_rect(fill = "white"),
panel.background = element_rect(fill = "white", colour = "white"),
#axes
axis.text.x = element_text(family = font,color = text_colour,size = 14),
axis.ticks.x = element_blank(),
axis.text.y = element_text(family = font,color = text_colour,size = 14)
)+
xlab("")+
ylab("")+
labs(title = 'Percentage of US population experiencing drought levels of at least Abnormally Dry',
caption = "Data from the US Drought Monitor website | Laura Cole | #TidyTuesday")+
transition_reveal(valid_start)
map_animation<-animate(map_plot,
duration=n_frames/20,
fps=20,
height=675,
width=1200,
units="px",
res=90,
device='ragg_png',
end_pause=round(n_frames/10,0),
renderer = magick_renderer())
time_series_animation<-animate(time_series_plot,
duration=n_frames/20,
fps=20,
height=250,
width=1200,
units="px",
res=90,
device='ragg_png',
end_pause=round(n_frames/10,0),
renderer = magick_renderer())
final_gif <- image_append(c(map_animation[1], time_series_animation[1]),stack=TRUE)
for(i in 2:n_frames){
combined <- image_append(c(map_animation[i], time_series_animation[i]),stack=TRUE)
final_gif <- c(final_gif, combined)
}
rm(map_animation,time_series_animation,combined)
image_write_gif(
image = final_gif,
loop = 0,
path = here::here("2021\\wk_30_droughts\\final_animation.gif"))
|
fcb81153e4c8adfb86d33d4fa0ccfdf46a187b97 | b8249fe50c83fb2e2137093126bda162d46dde31 | /R/VectorTable.R | d2c4c857ef111bd6c9d2c904b14e2129a7a36ab7 | [] | no_license | danielle0730/domstat-cleanup | 3a608e36ad0c23a710324e1b2d40c4280484794f | 41ea80f141cac4d644e8de33ded099f7278a7d30 | refs/heads/master | 2020-04-01T11:18:24.353518 | 2018-10-26T18:35:28 | 2018-10-26T18:35:28 | 152,792,014 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 663 | r | VectorTable.R | #' @export
# table of checked entires in vector variable ("check all that apply")
# returns count and frequencies per option
VectorTable <- function(variables) {
n.records <- dim(variables)[1]
n.options <- dim(variables)[2]
output <- data.frame(rep("", n.options), rep("", n.options))
for (i in 1:n.options) {
output[i, 1] <- unlist(strsplit(colnames(variables)[i], " \\| "))[2]
count <- sum(variables[, i] == "CHECKED")
percentage <- Fixed(100 * (count / n.records))
output[i, 2] <- paste(count, " (", percentage, "%)", sep = "")
}
colnames(output) <- c("COL.2", "COL.3")
rownames(output) <- NULL
return(output)
} |
c2886a83ae604f643679bb9392572c0c526f0fb8 | 546e1d0c58a3983dd80908ac6edc8d375e992c64 | /R/zzz.R | 3b352cfc43e78d65ce599942a5c4c7ad590a05a8 | [] | no_license | cran/yacca | 58ba74e5dd755ec955c798d325516e2f1dc12ccb | a633c8343f36fc729d02b24f21f07eb31df14692 | refs/heads/master | 2022-08-29T16:48:14.156036 | 2022-03-07T06:40:02 | 2022-03-07T06:40:02 | 17,700,951 | 1 | 3 | null | 2022-08-06T08:48:45 | 2014-03-13T06:48:18 | R | UTF-8 | R | false | false | 947 | r | zzz.R | ######################################################################
#
# zzz.R
#
# copyright (c) 2018, Carter T. Butts <buttsc@uci.edu>
# Last Modified 2/25/18
# Licensed under the GNU General Public License version 3
#
# Part of the R/yacca package; based on the zzz.R files from sna
# and other statnet packages (all hail).
#
# .onAttach is run when the package is loaded with library(yacca)
#
######################################################################
.onAttach <- function(libname, pkgname){
temp<-packageDescription("yacca")
msg<-paste(temp$Package,": ",temp$Title,"\n",
"Version ",temp$Version,
" created on ",
temp$Date,".\n", sep="")
msg<-paste(msg,"copyright (c) 2008, Carter T. Butts, University of California-Irvine\n",sep="")
msg<-paste(msg,'For citation information, type citation("yacca").\n')
msg<-paste(msg,'Type help("yacca-package") to get started.\n')
packageStartupMessage(msg)
}
|
a4a9508f6f289fad031082ff8e3f5e2636f4aab1 | 5781cc9568b34d8149ce2a2ea51e4fbaa7336ef8 | /plot4.R | 6c2588b8d85d2963393486f42e938a5f0556a241 | [] | no_license | YuliyaBoozer/ExData_Plotting1 | fbd86b91b12312ac4b1abd6f20180c06fcf86d18 | d86c72d0e7df3ea4b10fbb2dec74eef0254afb38 | refs/heads/master | 2021-01-22T16:37:37.697015 | 2015-10-08T05:15:07 | 2015-10-08T05:15:07 | 43,795,805 | 0 | 0 | null | 2015-10-07T04:53:11 | 2015-10-07T04:53:10 | null | UTF-8 | R | false | false | 1,392 | r | plot4.R | #install package if does not exist
if("sqldf" %in% rownames(installed.packages()) == FALSE)
{
install.packages("sqldf")
}
#load subset of needed data
library(sqldf)
dataSet <- read.csv.sql("household_power_consumption.txt",sql = 'select * from file where Date in("1/2/2007","2/2/2007") ', sep = ";")
closeAllConnections()
#Set proper data types
dataSet$Date <- as.Date(dataSet$Date, format="%d/%m/%Y")
DateTime <- paste(as.Date(dataSet$Date), dataSet$Time)
dataSet$DateTime <- as.POSIXct(DateTime)
# Make Plot 4
par(mfrow = c(2,2), mar = c(4,5,2,1), oma = c(3,1,0,0), cex = 0.7)
plot(dataSet$Global_active_power~dataSet$DateTime, type="l", ylab="Global Active Power", xlab="")
plot(dataSet$Voltage~dataSet$DateTime, type="l", ylab="Voltage", xlab="datetime")
with (dataSet, plot(DateTime, Sub_metering_1, type = "n", ylab = "Energy sub metering", xlab = ""))
with(dataSet, lines(DateTime, Sub_metering_1, col = "Black"))
with(dataSet, lines(DateTime, Sub_metering_2, col = "Red"))
with(dataSet, lines(DateTime, Sub_metering_3, col = "Blue"))
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=1, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(dataSet$Global_reactive_power~dataSet$DateTime, type="l", ylab="Global_reactive_power", xlab="datetime")
#Save the file
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off() |
a9c5a96de79f8d1addd7a7b9eba89fa82bb11374 | 9e6a6b1538766b29eb580c7094b989f4b3b3806d | /run_analysis.R | 92f5b0132ae69f121630dd39470c0a19349c34ea | [] | no_license | seanolondon/GettingAndCleaningDataCourseProject | 017406948bca99df57dc375b7fdb8aa75f57f123 | 7d71f502a065d1aa8a9b5c54426fea381f28931d | refs/heads/master | 2021-01-20T01:10:55.342093 | 2017-04-24T11:43:00 | 2017-04-24T11:43:00 | 89,230,520 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,241 | r | run_analysis.R | ## See README for how to use script
## See CodeBook describing variables
library(dplyr)
#setwd
setwd("~/coursera/CleaningDataCourseProject/UCI HAR Dataset")
##read in all data tables
#feature names (rows) (x)
featurenames <- read.table('features.txt')
#Links the class labels with their activity name.
actlab <- read.table('activity_labels.txt')
#Training set. (features)
xtrain <- read.table('train/X_train.txt')
#Training labels. (activity)
ytrain <- read.table('train/y_train.txt')
#Test set. (features)
xtest <- read.table('test/X_test.txt')
#Test labels. (activity)
ytest <- read.table('test/y_test.txt')
#train subjects
subjtrain <- read.table('train/subject_train.txt')
#test subjects
subjtest <- read.table('test/subject_test.txt')
#merge test and train tables
subject <- rbind(subjtrain, subjtest)
activity <- rbind(ytrain,ytest)
features <- rbind(xtrain,xtest)
#check layout of tables
str(subject)
str(activity)
str(features)
#apply headers to columns, subject and activity have one column,
names(subject) <- c("subject")
names(activity) <- c("activity")
names(features) <- featurenames[,2]
#merge subject and activity columns
df <- cbind(subject, activity)
#check 2 columns, 10299 rows
dim(df)
#merge features with subject and activity
df <- cbind(features, df)
#check 563 columns, 10299 rows, with column headers, and values in rows
str(df)
#identify meaurements on mean and standard deviation(std)
#measurements come from the features, and mean std identified from featurenames
meanstdfeatures <- featurenames$V2[grep('mean|std|Mean', featurenames$V2)]
#subset the mean, std features, subject and activity
dfSub <- subset(df, select = (c(as.character(meanstdfeatures), "subject", "activity")))
#change the activity numeric to values to text values. Read labels from file.
#head function used to see change
head(dfSub$activity, n = 50)
activitynames <- read.table('activity_labels.txt')
dfSub$activity <- factor(dfSub$activity, levels = activitynames$V1, labels = activitynames$V2)
head(dfSub$activity, n = 50)
#use features-info.txt to find column naming conventions
#change these abbreviations/shortenings to better ones
#t = Time
#Acc = Accelerometer
#Gyro = Gyroscope
#f = Frequency
#Mag = Magnitude
#angle = AngularVelocity
names(dfSub)
names(dfSub) <- gsub("^t", "Time", names(dfSub))
names(dfSub) <- gsub("Acc", "Accelerometer", names(dfSub))
names(dfSub) <- gsub("Gyro", "Gyroscope", names(dfSub))
names(dfSub) <- gsub("Mag", "Magnitude", names(dfSub))
names(dfSub) <- gsub("^angle", "AngularVelocity", names(dfSub))
names(dfSub) <- gsub("^f", "Frequency", names(dfSub))
names(dfSub) <- gsub("BodyBody", "Body", names(dfSub))
names(dfSub) <- gsub('tBody', 'TimeBody', names(dfSub))
names(dfSub)
#aggregate the the mean of each variable by subject and activity type
#this is a separate data set.
df2 <- aggregate(. ~ subject + activity, data = dfSub, FUN = mean)
head(df2)
#make tidy by ordering subject number then activity type
df2 <- df2[order(df2$subject, df2$activity)]
#review order to confirm tidy
head(df2[,1:4], n = 20)
tail(df2[,1:4], n = 20)
#write the tidy data set
setwd("~/coursera/CleaningDataCourseProject/final")
write.table(df2, file = "tidydataset.txt", row.name=FALSE)
|
7ea9c21fa3b753063e95a24327d126852651cae2 | a7b6f2da032fb4113ac494a8830aa570295ad2f7 | /R/ncbi_snp_summary.R | c7d20903f495d685fb03feb45adcc7339da9e936 | [
"MIT"
] | permissive | gyd1990/rsnps | ad8760f2d6a5a284fe2054fde6408c478d81a785 | 898bee69459790bc8667729fa922841f0d36dcd0 | refs/heads/master | 2020-03-25T22:34:49.855029 | 2018-04-11T20:58:47 | 2018-04-11T20:58:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,735 | r | ncbi_snp_summary.R | #' Query NCBI's dbSNP for summary information on a set of SNPs
#'
#' @export
#' @param x A vector of SNPs (with or without 'rs' prefix)
#' @param ... Curl options passed on to [crul::HttpClient]
#' @seealso [ncbi_snp_query2()]
#' @examples \dontrun{
#' # use with 'rs' or without it
#' ncbi_snp_summary("rs420358")
#' ncbi_snp_summary("420358")
#'
#' # you can pass > 1
#' x <- c("rs332", "rs420358", "rs1837253", "rs1209415715", "rs111068718")
#' ncbi_snp_summary(x)
#'
#' ncbi_snp_summary("rs420358")
#' ncbi_snp_summary("rs332") # warning, merged into new one
#' ncbi_snp_summary("rs121909001")
#' ncbi_snp_summary("rs1837253")
#' ncbi_snp_summary("rs1209415715") # no data available
#' ncbi_snp_summary("rs111068718") # chromosomal information may be unmapped
#' }
ncbi_snp_summary <- function(x, ...) {
stopifnot(inherits(x, "character"))
x <- gsub("^rs", "", x)
args <- list(db = 'snp', retmode = 'flt', rettype = 'flt',
id = paste(x, collapse = ","))
url <- "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi"
cli <- crul::HttpClient$new(url = url, opts = list(...))
res <- cli$get(query = args)
res$raise_for_status()
tmp <- res$parse("UTF-8")
xml <- xml2::read_xml(tmp)
docsums <- xml2::xml_find_all(xml, "//DocSum")
dats <- lapply(docsums, function(w) {
items <- xml2::xml_find_all(w, "Item")
unlist(lapply(items, make_named_list), FALSE)
})
dats <- lapply(dats, function(z) {
gn <- stract_(z$docsum, "GENE=[A-Za-z0-9]+:[0-9]+,?([A-Za-z0-9]+:[0-9]+)?")
gn <- sub("GENE=", "", gn)
z$gene2 <- gn %||% NA_character_
z
})
rbl(dats)
}
make_named_list <- function(x) {
as.list(stats::setNames(xml2::xml_text(x),
tolower(xml2::xml_attr(x, "Name"))))
}
|
8411859c4b83a4d2d949e5bfa9e79e1e508b3ae7 | fe59f5f5edb013868bbc8c129be765b880329973 | /Butterflies.R | 0c0a44862d974501d382ca5d696ec7bfa2df2c32 | [] | no_license | ginalamka/ABM_Class | 4e82c19915bb55440e4a3f77678224ff075e922c | 4bd15197f547b2da2bf6c02dc841deaaeb4473c5 | refs/heads/main | 2023-09-03T19:12:31.127194 | 2021-10-14T17:59:00 | 2021-10-14T17:59:00 | 399,605,370 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,070 | r | Butterflies.R | #in ABM class 8.24.2021
#the premise is that there are butterflies that go up in elevation
setwd("C:/Users/HP/Box/New Computer/Auburn/Data/ABM_Class")
directory = getwd()
outdir = paste(directory, "/output/", sep = "")
source(paste(directory, "/source/FunctionSourcer.R", sep = ''))
#parameters
elevation.V = c(0,400) #peak elevation min and max
landscape.V = 150 #number of patches on each side, total patch number = landscape*landscape
nindv.V = 50 #number of individuals to simulate
nsteps.V = 500 #number of steps an individual can take
move.V = 0.8 #decimal likelihood of individual moving to highest neighbor patch (R&G call this q)
reps = 1 #number of times the simmulation is replicated. can increase this for more simulations (5, 10, 100 times)
parameters = expand.grid(elevation.V, landscape.V, nindv.V, nsteps.V, move.V)
colnames(parameters) = c("elevation", "landscape", "nindv", "nsteps", "move")
parameters = parameters[parameters$elevation!=0,]
for(p in 1:nrow(parameters)){
elevation = c(0, parameters$elevation[p])
landscape = parameters$landscape[p]
nindv = parameters$nindv[p]
nsteps = parameters$nsteps[p]
move = parameters$move[p]
for(r in 1:reps){
#another way to paste, put at top of script, not bottom
#**MAKE SURE dev.off() is still there at the end!!!***
#pdf(paste(directory, "/Output/butterflypath_", r, ".pdf", sep=""), width=8, height=8)
#notice the _, r, .pdf syntax makes it so you dont overwrite each figure
#init landscape
land = LandscapeInit(elevation)
image(land)
#init individuals on landscape
pop = NewPop(nindv, landscape)
points(pop[,1]/150, pop[,2]/150, pch=19, cex=0.5)
dev.copy(png, "../Output/indvlocation.png")
dev.off()
#pop = rbind(pop,NewPop(nindv,landscape)) #this will add the different NewPops together
#plot(-100,-100, xlim=c(0,150), ylim=c(0,150)) #this puts the points on its own figure (note 0-150 axes)
#points(pop[,1], pop[,2], pch=19, cex=0.5) #puts points on own fig
#allow individuals to move within landscape
paths = NULL
for(i in 1:nindv){
indv = pop[i,,drop = FALSE]
#chart movement
movepath = Move(nindv, land, move, nsteps, elevation)
#plot movement
lines(movepath[seq(1,length(movepath), 2)]/150, movepath[seq(2,length(movepath), 2)]/150, lwd = 2)
#record path in single object for all indv
paths = rbind(paths, movepath) #save cooridantes into "paths"
}
rownames(paths) = seq(1, nindv, 1)
#extract needed output from simulation
#for this project, we will not be doing any stats, but export something (like a figure) to see how the model worked
dev.copy(png, "../Output/butterflypaths.png")
dev.off()
#to write a table with the x, y coordinates use the code below
#write.table(paths, paste(directory, "/Output/buttpath_", r, ".csv", sep=""), sep="/t", col.names=F, row.names=F)
}
}
|
cec57535a3d34206352e2701b36d7cc35e9d5e59 | 2e5ef9f2ef34cf60013c9e362037d49b610c7048 | /SFEgarchest/SFEgarchest.r | af5e931941d9e8a3761b27dcccc0ddb2eaa983e0 | [] | no_license | SHIccc/SFM2-SS16-ToDo | 67afa1d7410cdabe76cfc6bcfdbf32b65dd5b441 | 8d3c9e1de44c6da6e3c57658ff8d5b5fe653210e | refs/heads/master | 2021-01-21T05:48:56.047740 | 2016-06-17T10:14:30 | 2016-06-17T10:14:30 | 64,660,369 | 1 | 0 | null | 2016-08-01T11:07:20 | 2016-08-01T11:07:18 | R | UTF-8 | R | false | false | 4,009 | r | SFEgarchest.r | # ---------------------------------------------------------------------
# Book: SFE3
# ---------------------------------------------------------------------
# Quantlet: SFEgarchest
# ---------------------------------------------------------------------
# Description: SFEgarchest reads the date, DAX index values, stock
# prices of 20 largest companies at Frankfurt Stock
# Exchange (FSE), FTSE 100 index values and stock prices
# of 20 largest companies at London Stock Exchange (LSE)
# and estimates various GARCH models for the DAX and
# FTSE 100 daily return procesess from 1998 to 2007
# ---------------------------------------------------------------------
# Usage: -
# ---------------------------------------------------------------------
# Inputs: none
# ---------------------------------------------------------------------
# Output: P - matrix of estimated coefficients
# T - matrix of t-statistics of estimated coefficients
# ---------------------------------------------------------------------
# Example:
# ---------------------------------------------------------------------
# Author: Matlab: Andrija Mihoci 20091019
# R: Awdesch Melzer 20121008
# ---------------------------------------------------------------------
# close windows, clear history
rm(list=ls(all=TRUE))
graphics.off()
install.packages("tseries")
library(tseries)
install.packages("fGarch")
library(fGarch)
install.packages("rugarch")
library(rugarch)
# Read data for FSE and LSE
DS = read.table("FSE_LSE.dat");
D = DS[,1] # date
S = DS[,2:43] # S(t)
s = log(S)
end = length(D) # log(S(t))
r = s[2:end,] - s[1:(end-1),] # r(t)
n = length(r) # sample size
t = c(1:n) # time index, t
# Parameter estimation of various GARCH models
# (1) AR(1)-GARCH(1,1)
#DAX.AR1GARCH11 = garchFit(~ arma(1,0) + garch(1,1), r[,1], trace=T)
DAX.AR1GARCH11 = garchFit(~ arma(1,0) + garch(1,1), r[,1], trace=F)
#FTSE.AR1GARCH11 = garchFit(~ arma(1,0) + garch(1,1), r[,22], trace=T)
FTSE.AR1GARCH11 = garchFit(~ arma(1,0) + garch(1,1), r[,22], trace=F)
# (2) AR(1)-TGARCH(1,1)
DAX.AR1TGARCH11 = garchFit(~arma(1,0)+aparch(1,1), data = r[,1], delta = 2, include.delta = FALSE,leverage=TRUE)
FTSE.AR1TGARCH11 = garchFit(~arma(1,0)+aparch(1,1), data = r[,22], delta = 2, include.delta = FALSE,leverage=TRUE)
# (3) AR(1)-EGARCH(1,1)
ctrl = list(RHO = 1,DELTA = 1e-8,MAJIT = 100,MINIT = 650,TOL = 1e-6)
spec = ugarchspec(variance.model = list(model = "eGARCH", garchOrder = c(1,1)), mean.model = list(armaOrder = c(1,0), include.mean = TRUE), distribution.model = "std")
DAX.AR1EGARCH11 = ugarchfit(data = r[,1], spec = spec, solver = "solnp", solver.control = ctrl)
FTSE.AR1EGARCH11 = ugarchfit(data = r[,22], spec = spec, solver = "solnp", solver.control = ctrl)
# Summary of parameter estimates (P), standard errors (E), t-statistics (T) and p-values (Pvalues)
P = matrix(0,7,6)
P[,1]=c(DAX.AR1GARCH11@fit$matcoef[,1],0,0)
P[,2]=c(FTSE.AR1GARCH11@fit$matcoef[,1],0,0)
P[,3]=c(DAX.AR1TGARCH11@fit$matcoef[,1],0)
P[,4]=c(FTSE.AR1TGARCH11@fit$matcoef[,1],0)
P[,5]=c(DAX.AR1EGARCH11@fit$matcoef[,1])
P[,6]=c(FTSE.AR1EGARCH11@fit$matcoef[,1])
E = matrix(0,7,6)
E[,1]=c(DAX.AR1GARCH11@fit$matcoef[,2],0,0)
E[,2]=c(FTSE.AR1GARCH11@fit$matcoef[,2],0,0)
E[,3]=c(DAX.AR1TGARCH11@fit$matcoef[,2],0)
E[,4]=c(FTSE.AR1TGARCH11@fit$matcoef[,2],0)
E[,5]=c(DAX.AR1EGARCH11@fit$matcoef[,2])
E[,6]=c(FTSE.AR1EGARCH11@fit$matcoef[,2])
T = P/E
Pvalues = matrix(0,7,6)
Pvalues[,1]=c(DAX.AR1GARCH11@fit$matcoef[,4],0,0)
Pvalues[,2]=c(FTSE.AR1GARCH11@fit$matcoef[,4],0,0)
Pvalues[,3]=c(DAX.AR1TGARCH11@fit$matcoef[,4],0)
Pvalues[,4]=c(FTSE.AR1TGARCH11@fit$matcoef[,4],0)
Pvalues[,5]=c(DAX.AR1EGARCH11@fit$matcoef[,4])
Pvalues[,6]=c(FTSE.AR1EGARCH11@fit$matcoef[,4])
P
E
T
Pvalues |
deb2d897a985992666e3d656f023ec846d26553b | 209af0d72fc59bf161725e157ec71706c7d13a7c | /Code/2021_0709_CMP_modelstructure_stability.R | 6cbcee174a5a1e57f89c6943ec817277a822fd3f | [] | no_license | QiuBingCheng/XAI-Practice | 71bd2294e7531c75a4e00dfcd98a9eca81556bca | 36d8e7cf1537240f1f63f33266dcf1f64ba4069f | refs/heads/main | 2023-08-22T01:45:21.997671 | 2021-10-15T07:15:35 | 2021-10-15T07:15:35 | 417,400,140 | 0 | 0 | null | null | null | null | BIG5 | R | false | false | 22,633 | r | 2021_0709_CMP_modelstructure_stability.R | ##################################################################################################
############################### flatten 192 feature###############################################
library(tensorflow)
library(keras)
library(caret)
library(data.table)
library(dplyr)
library(ggplot2)
library(readr)
library(fs)
library(abind)
library(tidyverse)
library(magrittr)
library(factoextra)
library(ggpubr)
library(ggforce)
library(amap)
library(e1071)
## Origin
xtrain <- readRDS("D:/AmberChu/Handover/Data/CMP/Preprocess/final_set/A123_xtrain.rds")
xtest <- readRDS("D:/AmberChu/Handover/Data/CMP/Preprocess/final_set/A123_xtest.rds")
trainy <- readRDS("D:/AmberChu/Handover/Data/CMP/Preprocess/final_set/A123_trainy.rds")
testy <- readRDS("D:/AmberChu/Handover/Data/CMP/Preprocess/final_set/A123_testy.rds")
#### normal CAE(192) -----------------------------------------------------------
## encoderA123
enc_input = layer_input(shape = c(316, 19, 1),name="input")
enc_output = enc_input %>%
layer_conv_2d(64,kernel_size=c(3,3), padding="same",name="encoder1") %>%
layer_activation_leaky_relu(name="leak1")%>%
layer_average_pooling_2d(c(3,3), padding="same",name="max_pool1")%>%
layer_conv_2d(32,kernel_size = c(3,3),padding="same",name="encoder2")%>%
layer_activation_leaky_relu(name="leak2")%>%
layer_average_pooling_2d(c(3,3),padding="same",name="max_pool2")%>%
layer_conv_2d(16,kernel_size = c(3,3),padding="same",name="encoder3")%>%
layer_activation_leaky_relu(name="leak3")%>%
layer_average_pooling_2d(c(3,3),padding="same",name="max_pool3")%>%
layer_flatten(name="flatten")
encoder <- keras_model(enc_input,enc_output)
summary(encoder)
## encoder AB456
# enc_input = layer_input(shape = c(316, 19, 1),name="input")
# enc_output = enc_input %>%
# layer_conv_2d(64,kernel_size=c(3,3), padding="same",name="encoder1") %>%
# layer_activation_leaky_relu(name="leak1")%>%
# layer_max_pooling_2d(c(3,3), padding="same",name="max_pool1")%>%
# layer_conv_2d(32,kernel_size = c(3,3),padding="same",name="encoder2")%>%
# layer_activation_leaky_relu(name="leak2")%>%
# layer_max_pooling_2d(c(3,3),padding="same",name="max_pool2")%>%
# layer_conv_2d(16,kernel_size = c(3,3),padding="same",name="encoder3")%>%
# layer_activation_leaky_relu(name="leak3")%>%
# layer_max_pooling_2d(c(3,3),padding="same",name="max_pool3")%>%
# layer_flatten()
# encoder <- keras_model(enc_input,enc_output)
# summary(encoder)
## decoder
decoder <- encoder$output %>%
layer_reshape(c(12,1,16),name="reshape")%>%
layer_conv_2d(16, kernel_size=c(3,3), padding="same",name="decoder1") %>%
layer_activation_leaky_relu(name="leak4")%>%
layer_upsampling_2d(c(3,3),name="up_samp1")%>%
layer_conv_2d(32, kernel_size=c(3,3), padding="same",name="decoder2") %>%
layer_activation_leaky_relu(name="leak5")%>%
layer_upsampling_2d(c(3,3),name="up_samp2")%>%
layer_conv_2d(64, kernel_size=c(3,3), padding="valid",name="decoder3") %>%
layer_activation_leaky_relu(name="leak6")%>%
layer_upsampling_2d(c(3,3),name="up_samp3")%>%
layer_conv_2d(1, kernel_size=c(3,3), activation="sigmoid",padding="valid",name="autoencoder")
autoencoder <- keras_model(encoder$input,decoder)
summary(autoencoder)
callbacks = list(
callback_model_checkpoint("checkpoints.h5"), callback_reduce_lr_on_plateau(monitor = "val_loss", factor = 0.1)) ### 再調整!!
autoencoder%>% compile(optimizer="RMSprop", loss="mse")
history <- autoencoder %>% fit(x= xtrain, y= xtrain,validation_split=0.1,batch_size=10,epochs=200,callback=callbacks)
## predict_loss & reconstruct_loss
# history_df <- as.data.frame(history)
# train_loss <-data.frame(t(history_df %>%
# filter(metric=="loss" & data=="training" & epoch==200)%>%
# select(value)))
# colnames(train_loss)<-"autoencoder_loss"
#
# test_loss <-data.frame(t(history_df %>%
# filter(metric=="loss" & data=="validation" & epoch==200)%>%
# select(value)))
# colnames(test_loss)<-c("autoencoder_loss")
#
# saveRDS(train_loss,"C:/Users/User/Desktop/2021_0605/model/Results_loss/B456_trainloss_normalCAE.rds")
# saveRDS(test_loss,"C:/Users/User/Desktop/2021_0605/model/Results_loss/B456_testloss_normalCAE.rds")
# save_model_hdf5(autoencoder,"C:/Users/User/Desktop/2021_0605/model/B456_normalCAE.h5")
#stability
test_history <- read_rds("C:/Users/User/Desktop/2021_0605/model/Results_loss/A456_testloss_normalCAE.rds")
test_history <- data.frame(t(test_history))
test_history <- autoencoder %>% evaluate(x=xtest,y=xtest)
test_history <- data.frame(test_history)
saveRDS(test_history,"C:/Users/User/Desktop/2021_0709/CMP_data/Stability/A456_testloss_normalCAE1.rds")
save_model_hdf5(autoencoder,"C:/Users/User/Desktop/2021_0709/CMP_data/Stability/A456_normalCAE_1.h5")
###### CNN(192)-------------------------------------------------------------------------
## encoder
##A123:
enc_input = layer_input(shape = c(316, 19, 1),name="input")
enc_output = enc_input %>%
layer_conv_2d(64,kernel_size=c(3,3), padding="same",name="encoder1") %>%
layer_activation_leaky_relu(name="leak1")%>%
layer_average_pooling_2d(c(3,3), padding="same",name="max_pool1")%>%
layer_conv_2d(32,kernel_size = c(3,3),padding="same",name="encoder2")%>%
layer_activation_leaky_relu(name="leak2")%>%
layer_average_pooling_2d(c(3,3),padding="same",name="max_pool2")%>%
layer_conv_2d(16,kernel_size = c(3,3),padding="same",name="encoder3")%>%
layer_activation_leaky_relu(name="leak3")%>%
layer_average_pooling_2d(c(3,3),padding="same",name="max_pool3")%>%
layer_flatten(name="flatten")
encoder <- keras_model(enc_input,enc_output)
summary(encoder)
##AB456:
# enc_input = layer_input(shape = c(316, 19, 1),name="input")
# enc_output = enc_input %>%
# layer_conv_2d(64,kernel_size=c(3,3), padding="same",name="encoder1") %>%
# layer_activation_leaky_relu(name="leak1")%>%
# layer_max_pooling_2d(c(3,3), padding="same",name="max_pool1")%>%
# layer_conv_2d(32,kernel_size = c(3,3),padding="same",name="encoder2")%>%
# layer_activation_leaky_relu(name="leak2")%>%
# layer_max_pooling_2d(c(3,3),padding="same",name="max_pool2")%>%
# layer_conv_2d(16,kernel_size = c(3,3),padding="same",name="encoder3")%>%
# layer_activation_leaky_relu(name="leak3")%>%
# layer_max_pooling_2d(c(3,3),padding="same",name="max_pool3")%>%
# layer_flatten(name="flatten")
# encoder <- keras_model(enc_input,enc_output)
# summary(encoder)
## predictor
pred <- encoder$output%>%
layer_dense(units=96,name="dec_class1")%>%
layer_activation_leaky_relu(name="leak4")%>%
layer_dense(units=48,name="dec_class2")%>%
layer_activation_leaky_relu(name="leak5")%>%
layer_dense(units=24,name="dec_class3")%>%
layer_activation_leaky_relu(name="leak6")%>%
layer_dense(units=1,name="predict")
pred_model <- keras_model(encoder$input,pred)
summary(pred_model)
cnn_model <- keras_model(encoder$input,pred)
summary(cnn_model)
callbacks = list(
callback_model_checkpoint("checkpoints.h5"), callback_reduce_lr_on_plateau(monitor = "val_loss", factor = 0.1))
cnn_model %>% compile(optimizer="RMSprop", loss="mse")
callbacks = list(
callback_model_checkpoint("checkpoints.h5"), callback_reduce_lr_on_plateau(monitor = "val_loss", factor = 0.1)) ### 再調整!!
history <- cnn_model %>% fit(x= xtrain, y= trainy,
validation_split=0.1,batch_size=10,epochs=200,callback=callbacks)
## predict_loss & reconstruct_loss
# history_df <- as.data.frame(history)
# train_loss <-data.frame(t(history_df %>%
# filter(metric=="loss" & data=="training" & epoch==200)%>%
# select(value)))
# colnames(train_loss)<-"predict_loss"
#
# test_loss <-data.frame(t(history_df %>%
# filter(metric=="loss" & data=="validation" & epoch==200)%>%
# select(value)))
# colnames(test_loss)<-c("predict_loss")
#
#
# saveRDS(train_loss,"C:/Users/User/Desktop/2021_0605/model/Results_loss/B456_trainloss_CNN.rds")
# saveRDS(test_loss,"C:/Users/User/Desktop/2021_0605/model/Results_loss/B456_testloss_CNN.rds")
# save_model_hdf5(cnn_model,"C:/Users/User/Desktop/2021_0605/model/B456_CNN.h5")
### 0711 stability --------------------------------
# test_history <- read_rds("C:/Users/User/Desktop/2021_0605/model/Results_loss/A123_testloss_CNN.rds")
# test_history <- data.frame(t(test_history))
test_history <- cnn_model %>% evaluate(x=xtest,y=testy)
test_history <- data.frame(test_history)
print(test_history)
saveRDS(test_history,"C:/Users/User/Desktop/2021_0709/CMP_data/Stability/B456_testloss_CNN2.rds")
save_model_hdf5(cnn_model,"C:/Users/User/Desktop/2021_0709/CMP_data/Stability/B456_CNN_2.h5")
###### CAE+ANN(192)-------------------------------------------------------------------------
## encoder
#encoder A123
# enc_input = layer_input(shape = c(316, 19, 1),name="input")
# enc_output = enc_input %>%
# layer_conv_2d(64,kernel_size=c(3,3), padding="same",name="encoder1") %>%
# layer_activation_leaky_relu(name="leak1")%>%
# layer_average_pooling_2d(c(3,3), padding="same",name="max_pool1")%>%
# layer_conv_2d(32,kernel_size = c(3,3),padding="same",name="encoder2")%>%
# layer_activation_leaky_relu(name="leak2")%>%
# layer_average_pooling_2d(c(3,3),padding="same",name="max_pool2")%>%
# layer_conv_2d(16,kernel_size = c(3,3),padding="same",name="encoder3")%>%
# layer_activation_leaky_relu(name="leak3")%>%
# layer_average_pooling_2d(c(3,3),padding="same",name="max_pool3")%>%
# layer_flatten(name="flatten")
# encoder <- keras_model(enc_input,enc_output)
# summary(encoder)
##encoder AB456
enc_input = layer_input(shape = c(316, 19, 1),name="input")
enc_output = enc_input %>%
layer_conv_2d(64,kernel_size=c(3,3), padding="same",name="encoder1") %>%
layer_activation_leaky_relu(name="leak1")%>%
layer_max_pooling_2d(c(3,3), padding="same",name="max_pool1")%>%
layer_conv_2d(32,kernel_size = c(3,3),padding="same",name="encoder2")%>%
layer_activation_leaky_relu(name="leak2")%>%
layer_max_pooling_2d(c(3,3),padding="same",name="max_pool2")%>%
layer_conv_2d(16,kernel_size = c(3,3),padding="same",name="encoder3")%>%
layer_activation_leaky_relu(name="leak3")%>%
layer_max_pooling_2d(c(3,3),padding="same",name="max_pool3")%>%
layer_flatten(name="flatten")
encoder <- keras_model(enc_input,enc_output)
summary(encoder)
## predictor
pred <- encoder$output%>%
layer_dense(units=96,name="dec_class1")%>%
layer_activation_leaky_relu(name="leak4")%>%
layer_dense(units=48,name="dec_class2")%>%
layer_activation_leaky_relu(name="leak5")%>%
layer_dense(units=24,name="dec_class3")%>%
layer_activation_leaky_relu(name="leak6")%>%
layer_dense(units=1,name="predict")
pred_model <- keras_model(encoder$input,pred)
summary(pred_model)
## decoder
decoder <- encoder$output %>%
layer_dense(units = 192,name="dec_fully")%>%
layer_activation_leaky_relu(name="leak7")%>%
layer_reshape(c(12,1,16),name="reshape")%>%
layer_conv_2d(16, kernel_size=c(3,3), padding="same",name="decoder1") %>%
layer_activation_leaky_relu(name="leak8")%>%
layer_upsampling_2d(c(3,3),name="up_samp1")%>%
layer_conv_2d(32, kernel_size=c(3,3), padding="same",name="decoder2") %>%
layer_activation_leaky_relu(name="leak9")%>%
layer_upsampling_2d(c(3,3),name="up_samp2")%>%
layer_conv_2d(64, kernel_size=c(3,3), padding="valid",name="decoder3") %>%
layer_activation_leaky_relu(name="leak10")%>%
layer_upsampling_2d(c(3,3),name="up_samp3")%>%
layer_conv_2d(1, kernel_size=c(3,3), activation="sigmoid",padding="valid",name="autoencoder")
autoencoder <- keras_model(encoder$input,decoder)
summary(autoencoder)
## full model (model)
model <- keras_model(inputs=enc_input,outputs=c(pred,decoder))
summary(model)
callbacks = list(
callback_model_checkpoint("checkpoints.h5"), callback_reduce_lr_on_plateau(monitor = "val_loss", factor = 0.1)) ### 再調整!!
model%>% compile(optimizer="RMSprop", loss=list(predict="mse",autoencoder="mse"))
history <- model %>% fit(x= xtrain, y= list(predict=trainy,autoencoder=xtrain),
validation_split=0.1,batch_size=10,epochs=200,callback=callbacks)
## predict_loss & reconstruct_loss
# history_df <- as.data.frame(history)
# train_loss <-data.frame(t(history_df %>%
# filter(metric!="loss" & data=="training" & epoch==200)%>%
# select(value)))
# colnames(train_loss)<-c("predict_loss","autoencoder_loss")
#
# test_loss <-data.frame(t(history_df %>%
# filter(metric!="loss" & data=="validation" & epoch==200)%>%
# select(value)))
# colnames(test_loss)<-c("predict_loss","autoencoder_loss")
# saveRDS(train_loss,"C:/Users/User/Desktop/2021_0605/model/Results_loss/A456_trainloss_CAE_ANN.rds")
# saveRDS(test_loss,"C:/Users/User/Desktop/2021_0605/model/Results_loss/A456_testloss_CAE_ANN.rds")
# save_model_hdf5(model,"C:/Users/User/Desktop/2021_0605/model/A456_CAE_ANN.h5")
### 0711 stability:
# test_history <- readRDS("C:/Users/User/Desktop/2021_0605/model/Results_loss/B456_testloss_CAE_ANN.rds")
# test_history <- data.frame(t(test_history))
test_history <- model %>% evaluate(x=xtest,y=list("predict"=testy,"autoencoder"=xtest))
test_history <- data.frame(test_history)
print(test_history)
saveRDS(test_history,"C:/Users/User/Desktop/2021_0709/CMP_data/Stability/B456_testloss_CAE_ANN10.rds")
save_model_hdf5(model,"C:/Users/User/Desktop/2021_0709/CMP_data/Stability/B456_CAE_ANN_10.h5")
#########################
###### CAE+ SVR (192)-------------------------------------------------------------------------
#encoder A123
# enc_input = layer_input(shape = c(316, 19, 1),name="input")
# enc_output = enc_input %>%
# layer_conv_2d(64,kernel_size=c(3,3), padding="same",name="encoder1") %>%
# layer_activation_leaky_relu(name="leak1")%>%
# layer_average_pooling_2d(c(3,3), padding="same",name="max_pool1")%>%
# layer_conv_2d(32,kernel_size = c(3,3),padding="same",name="encoder2")%>%
# layer_activation_leaky_relu(name="leak2")%>%
# layer_average_pooling_2d(c(3,3),padding="same",name="max_pool2")%>%
# layer_conv_2d(16,kernel_size = c(3,3),padding="same",name="encoder3")%>%
# layer_activation_leaky_relu(name="leak3")%>%
# layer_average_pooling_2d(c(3,3),padding="same",name="max_pool3")%>%
# layer_flatten(name="flatten")
# encoder <- keras_model(enc_input,enc_output)
# summary(encoder)
# ## encoder A456/B456
enc_input = layer_input(shape = c(316, 19, 1),name="input")
enc_output = enc_input %>%
layer_conv_2d(64,kernel_size=c(3,3), padding="same",name="encoder1") %>%
layer_activation_leaky_relu(name="leak1")%>%
layer_max_pooling_2d(c(3,3), padding="same",name="max_pool1")%>%
layer_conv_2d(32,kernel_size = c(3,3),padding="same",name="encoder2")%>%
layer_activation_leaky_relu(name="leak2")%>%
layer_max_pooling_2d(c(3,3),padding="same",name="max_pool2")%>%
layer_conv_2d(16,kernel_size = c(3,3),padding="same",name="encoder3")%>%
layer_activation_leaky_relu(name="leak3")%>%
layer_max_pooling_2d(c(3,3),padding="same",name="max_pool3")%>%
layer_flatten(name="flatten")
encoder <- keras_model(enc_input,enc_output)
summary(encoder)
## classifier
pred <- encoder$output%>%
layer_dense(units=96,name="dec_pred")%>%
layer_activation_leaky_relu(name="leak4")%>%
layer_dense(units=1,name="predict")
pred_model <- keras_model(encoder$input,pred)
summary(pred_model)
## decoder
decoder <- encoder$output %>%
layer_dense(units = 192,name="dec_fully")%>%
layer_activation_leaky_relu(name="leak5")%>%
layer_reshape(c(12,1,16),name="reshape")%>%
layer_conv_2d(16, kernel_size=c(3,3), padding="same",name="decoder1") %>%
layer_activation_leaky_relu(name="leak6")%>%
layer_upsampling_2d(c(3,3),name="up_samp1")%>%
layer_conv_2d(32, kernel_size=c(3,3), padding="same",name="decoder2") %>%
layer_activation_leaky_relu(name="leak7")%>%
layer_upsampling_2d(c(3,3),name="up_samp2")%>%
layer_conv_2d(64, kernel_size=c(3,3), padding="valid",name="decoder3") %>%
layer_activation_leaky_relu(name="leak8")%>%
layer_upsampling_2d(c(3,3),name="up_samp3")%>%
layer_conv_2d(1, kernel_size=c(3,3), activation="sigmoid",padding="valid",name="autoencoder")
autoencoder <- keras_model(encoder$input,decoder)
summary(autoencoder)
## full model (model)
model <- keras_model(inputs=enc_input,outputs=c(pred,decoder))
summary(model)
# Declare loss function
# = max(0, abs(target - predicted) + epsilon)
# 1/2 margin width parameter = epsilon
callbacks = list(
callback_model_checkpoint("checkpoints.h5"), callback_reduce_lr_on_plateau(monitor = "val_loss", factor = 0.1))
### custom loss:
eplison <- tf$constant(0.5)
# Margin term in loss
svr_loss <- function(y_true,y_pred)
{
tf$reduce_mean(tf$maximum(0.,tf$subtract(tf$abs(tf$subtract(y_pred,y_true)),eplison)))
}
attr(svr_loss, "py_function_name") <- "svr_loss"
callbacks = list(
callback_model_checkpoint("checkpoints.h5"), callback_reduce_lr_on_plateau(monitor = "val_loss", factor = 0.1)) ### 再調整!!
model%>% compile(optimizer="RMSprop", loss=list(predict=svr_loss,autoencoder="mse"))
# history <- model %>% fit(x= xtrain, y= list(predict=trainy,autoencoder=xtrain),
# validation_data=list(x=xtest,y=list(predict=testy,autoencoder=xtest)),batch_size=10,epochs=200,callback=callbacks)
history <- model %>% fit(x= xtrain, y= list(predict=trainy,autoencoder=xtrain),
validation_split=0.1,batch_size=10,epochs=200,callback=callbacks)
################
# verify predict mse:
# layer_name<-"predict"
# encoder <- keras_model(inputs=model$input,outputs=get_layer(model,layer_name)$output)
# summary(encoder)
# train_pred = encoder%>% predict(xtrain) # 570 12
# test_pred = encoder %>% predict(xtest) # 245 12
# dim(train_pred)
# pred1 <- data.frame(trainy,train_pred)
# colnames(pred1)<-c("actual","pred")
# pred1<- pred1%>%
# mutate(SSE=(actual-pred)^2)
# MSE_train<- mean(pred1$SSE) #19.98398
#
# pred2 <- data.frame(testy,test_pred)
# colnames(pred2)<-c("actual","pred")
# pred2<- pred2%>%
# mutate(SSE=(actual-pred)^2)
# MSE_test <- mean(pred2$SSE) #25.92496
#
# ## predict_loss & reconstruct_loss
# history_df <- as.data.frame(history)
# train_loss <-data.frame(t(history_df %>%
# filter(metric!="loss" & data=="training" & epoch==200)%>%
# select(value)))
# train_loss <- cbind(MSE_train,train_loss$X2,train_loss$X1)
# colnames(train_loss)<-c("predict_loss","autoencoder_loss","eplison_loss")
#
# test_loss <-data.frame(t(history_df %>%
# filter(metric!="loss" & data=="validation" & epoch==200)%>%
# select(value)))
#
# test_loss <- cbind(MSE_test,test_loss$X2,test_loss$X1)
# colnames(test_loss)<-c("predict_loss","autoencoder_loss","eplison_loss")
# saveRDS(train_loss,"C:/Users/User/Desktop/2021_0605/model/Results_loss/B456_trainloss_CAE_SVR.rds")
# saveRDS(test_loss,"C:/Users/User/Desktop/2021_0605/model/Results_loss/B456_testloss_CAE_SVR.rds")
# save_model_hdf5(model,"C:/Users/User/Desktop/2021_0605/model/B456_CAE_SVR.h5")
### 0711 stability:
# test_history <- readRDS("C:/Users/User/Desktop/2021_0605/model/Results_loss/B456_testloss_CAE_SVR.rds")
# test_history <- data.frame(t(test_history))
layer_name<-"predict"
encoder <- keras_model(inputs=model$input,outputs=get_layer(model,layer_name)$output)
summary(encoder)
test_pred = encoder %>% predict(xtest) # 245 12
pred2 <- data.frame(testy,test_pred)
colnames(pred2)<-c("actual","pred")
pred2<- pred2%>%
mutate(SSE=(actual-pred)^2)
MSE_test <- mean(pred2$SSE) #25.92496
test_history <- model %>% evaluate(x=xtest,y=list("predict"=testy,"autoencoder"=xtest))
test_history <- data.frame(test_history)
test_history[2,]<-MSE_test
print(test_history)
saveRDS(test_history,"C:/Users/User/Desktop/2021_0709/CMP_data/Stability/B456_testloss_CAE_SVR10.rds")
save_model_hdf5(model,"C:/Users/User/Desktop/2021_0709/CMP_data/Stability/B456_CAE_SVR_10.h5")
#########################################################################################
#########################################################################################
######## 2021_0712 model stability ----------------------------
ANN <- list()
SVR <- list()
CAE <- list()
CNN <- list()
for(i in 1:10)
{
if(i!=1)
{
tmp <-read_rds(file=paste0("D:/AmberChu/Handover/Output_result/CMP/2021_0709_finalresults/Stability/A123_testloss_CAE_ANN",i,".rds"))
ANN[[i]]<- data.frame(tmp[2:3,])
tmp2 <-read_rds(file=paste0("D:/AmberChu/Handover/Output_result/CMP/2021_0709_finalresults/Stability/A123_testloss_CAE_SVR",i,".rds"))
SVR[[i]]<- data.frame(tmp2[2:3,])
}
else
{
ANN[[i]] <-read_rds(file=paste0("D:/AmberChu/Handover/Output_result/CMP/2021_0709_finalresults/Stability/A123_testloss_CAE_ANN",i,".rds"))
tmp3 <-read_rds(file=paste0("D:/AmberChu/Handover/Output_result/CMP/2021_0709_finalresults/Stability/A123_testloss_CAE_SVR",i,".rds"))
SVR[[i]]<- data.frame(tmp3[1:2,])
}
CAE[[i]] <-read_rds(file=paste0("D:/AmberChu/Handover/Output_result/CMP/2021_0709_finalresults/Stability/A123_testloss_normalCAE",i,".rds"))
CNN[[i]] <-read_rds(file=paste0("D:/AmberChu/Handover/Output_result/CMP/2021_0709_finalresults/Stability/A123_testloss_CNN",i,".rds"))
}
ann <- list()
svr <- list()
cae <- list()
cnn <- list()
for(i in 1:10)
{
tmp <- t(data.table(ANN[[i]]))
ann <- rbind(ann,tmp)
tmp2 <- t(data.table(SVR[[i]]))
svr <- rbind(svr,tmp2)
tmp3 <- t(data.table(CAE[[i]]))
cae <- rbind(cae,tmp3)
tmp4 <- t(data.table(CNN[[i]]))
cnn <- rbind(cnn,tmp4)
}
pred <- cbind(unlist(ann[,1]),unlist(svr[,1]),unlist(cnn[,1]))
rec <- cbind(unlist(ann[,2]),unlist(svr[,2]),unlist(cae[,1]))
colnames(pred)<-c("CAE_ANN","CAE_SVR","CNN")
colnames(rec)<-c("CAE_ANN","CAE_SVR","CAE")
pred_set <-cbind(data.frame(colMeans(pred)),data.frame(apply(pred,2,var)))
rec_set <- cbind(data.frame(colMeans(rec)),data.frame(apply(rec,2,var)))
colnames(pred_set)<-c("avg_predMSE","var_predMSE")
colnames(rec_set)<-c("avg_recMSE","var_recMSE")
write.csv(pred_set,"C:/Users/User/Desktop/2021_0709/CMP_data/Stability/B456pred_set.csv")
write.csv(rec_set,"C:/Users/User/Desktop/2021_0709/CMP_data/Stability/B456rec_set.csv")
write.csv(pred,"C:/Users/User/Desktop/2021_0709/CMP_data/Stability/B456exp_pred.csv")
write.csv(rec,"C:/Users/User/Desktop/2021_0709/CMP_data/Stability/B456exp_rec.csv")
|
7d57d2031c5c46d2aac228c63ecc3c9c5c04d1a6 | fbc4e8b54e4f139729865b320ac6fdd7528f39ab | /R/build-prospects-table.R | cbb819a4b0375c4d76fd9adfcfd542fa0b757fec | [] | no_license | anthonyshook/nhldata | b93694aa83b75877d3f0facaf530be935f45da8a | 5b96e7891d5764c9225e3981cc073547ff4726a5 | refs/heads/master | 2023-03-04T13:08:14.581260 | 2021-02-20T16:57:53 | 2021-02-20T16:57:53 | 287,140,063 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,014 | r | build-prospects-table.R | #' Build prospects table
#'
#' @param conn A connection to a database - defaults to creating a new one
#'
#' @description The internals here are hard-coded, as this is not meant to be
#' something used outside of the current process, at the moment.
#' In the future, perhaps something config-based could be created
#' This is meant to create the table brand new, not update it. If the table already
#' exists, it will be re-written by running this function.
#'
#' @export
#'
build_prospects_table <- function(conn = connect_to_db()) {
on.exit(
if(substitute(conn) == 'connect_to_db()') {
DBI::dbDisconnect(conn)
}
)
# Fetch the prospect data
prospies <- fetch_prospect_info()
# Write out to tables, based on conn
DBI::dbWriteTable(conn = conn, name = "current_prospects", prospies, overwrite = TRUE, row.names = FALSE)
# Add an index
statement <- "CREATE UNIQUE INDEX prospectIndex ON current_prospects(prospectid);"
DBI::dbExecute(conn, statement)
return(invisible(TRUE))
}
|
9343b5425ebcc19b0bb5c6429e2e007ff5f8cb17 | 6855ac1106597ae48483e129fda6510354efa2bd | /not_included/croatia_scrap.R | bb8265e6f229ef36488184551cb37e9d2f4acfe4 | [
"MIT"
] | permissive | rOpenGov/iotables | ad73aae57b410396995635d1c432744c06db32db | 91cfdbc1d29ac6fe606d3a0deecdb4c90e7016b9 | refs/heads/master | 2022-10-02T13:03:54.563374 | 2022-09-24T11:47:20 | 2022-09-24T11:47:20 | 108,267,715 | 19 | 8 | NOASSERTION | 2021-12-17T15:09:35 | 2017-10-25T12:35:47 | R | UTF-8 | R | false | false | 1,714 | r | croatia_scrap.R |
library ( dplyr ) ; library (tidyr) ; library ( iotables)
voc <- readRDS("not_included/t_rows2.rdf")
doc = XML::xmlParse("not_included/t_rows2.rdf")
t_rows_vocabulary <- readxl::read_excel(path = "not_included/t_rows_vocabulary.xlsx")
names (t_rows_vocabulary ) <- gsub( ":", "_", names (t_rows_vocabulary ))
t_rows_vocabulary$ns5:label
croatia_temp_file <- paste0(tempdir(),
"\\croatia.xlsx" )
?download.file
download.file(url = "https://dzs.gov.hr/Hrv_Eng/publication/2015/12-01-04_01_2015.xlsx",
destfile = croatia_temp_file,
mode = 'wb' )
t_rows_hr_1700 <- readxl::read_excel(
path = croatia_temp_file,
sheet = 3,
range = "a430:b512",
col_names = TRUE)
##all fine
t_rows_hr_1700 <- t_rows_hr_1700 %>%
dplyr::rename( t_rows2 = Code) %>%
left_join (., t_rows_data)
io <- readxl::read_excel(
path = croatia_temp_file,
sheet = 3,
range = "C521:CF521",
col_names = FALSE
)P1
dzs_2010_1700
t_rows are identitcal
a<-iotables::t_rows_data
View (t_rows_data)
#1800 - Symmetric input-output table for domestic production (product x product)
t_rows_hr_1800 <- readxl::read_excel(
path = croatia_temp_file,
sheet = 3,
range = "a521:b598",
col_names = TRUE)
t_rows_hr_1800 <- t_rows_hr_1800 %>%
dplyr::rename( t_rows2 = Code) %>%
left_join (., t_rows_data)
#DP6A = Use of imported products, cif
#1900 - Symmetric input-output table for imports (product x product)
t_rows_hr_1900 <- readxl::read_excel(
path = croatia_temp_file,
sheet = 3,
range = "a604:b670",
col_names = TRUE)
t_rows_hr_1900 <- t_rows_hr_1900 %>%
dplyr::rename( t_rows2 = Code) %>%
left_join (., t_rows_data)
|
1d0b22a41c4c40ea97bf7ed64d5806ec5880ce8d | b94deca3de381159d70533825fac36171e4eee74 | /Q1.R | 70d70dc20477895f29b7b792f05713b940e1931f | [] | no_license | un-sweet/hw2 | 9a4542e210d76ecdca2390fbbcdc229f124280cc | d6fa1e559ec5ec3e728d68c99e71c09078904baf | refs/heads/main | 2023-03-27T23:32:13.334987 | 2021-03-26T06:29:56 | 2021-03-26T06:29:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,086 | r | Q1.R | #####################################
####### R Help Page (共 40 分) #######
#####################################
# 1. (20 分) 請先閱讀 `seq()` 的 help page,
# 再使用此函數產生一個由 `101` 至 `1`、彼此間隔 2 的等差數列,
# 亦即 `101, 99, 97, ..., 3, 1`
# Modify the code below
seq()
# should print out:
#> [1] 101 99 97 95 93 91 ... 3 1
# 2. (20 分) 請先閱讀 `sum()` 與 `mean()` 的 help page,
# 再使用這些函數計算前一題等差數列的 **總和** 及 **算術平均數**
# (hint: 你可能需要使用 **變數** 暫存 `seq()` 產生的等差數列)
# Write your code here
# should print out:
#> [1] 2601
#> [1] 51
################################
#### 整個 R Script 的執行結果 ####
###############################
#> Rscript Q1.R
#> [1] 101 99 97 95 93 91 89 87 85 83 81 79 77 75 73 71 69 67 65
#> [20] 63 61 59 57 55 53 51 49 47 45 43 41 39 37 35 33 31 29 27
#> [39] 25 23 21 19 17 15 13 11 9 7 5 3 1
#> [1] 2601
#> [1] 51
|
8e8719837b2dfec62bca888b80a7a8d16c1c1afe | ee49d256837b4b77048b8c4ad775ad7f789dac9b | /webscraping_osebx.R | 6f1fb496c8165d99bc517916da10bc6e585df1d6 | [] | no_license | ruialv/webscraping | 10f375975b3e915b9b3fa17a0f47711835d1ee81 | f2da0bb36552bc9b67d026094273958b4b856dad | refs/heads/main | 2023-08-22T08:40:39.373836 | 2021-10-13T08:51:05 | 2021-10-13T08:51:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,620 | r | webscraping_osebx.R | library(dplyr)
library(RSelenium)
library(stringr)
library(rvest)
list.files()
library(utils)
library(lubridate)
remDr <- remoteDriver(port = 6990L,
remoteServerAddr = "localhost", browserName = "chrome")
{
links_month <- list()
links_month[1] <- "https://newsweb.oslobors.no/search?category=1001&issuer=&fromDate=2020-01-01&toDate=2020-01-31&market=&messageTitle="
links_month[2] <- "https://newsweb.oslobors.no/search?category=1001&issuer=&fromDate=2020-02-01&toDate=2020-02-29&market=&messageTitle="
links_month[3] <- "https://newsweb.oslobors.no/search?category=1001&issuer=&fromDate=2020-03-01&toDate=2020-03-31&market=&messageTitle="
links_month[4] <- "https://newsweb.oslobors.no/search?category=1001&issuer=&fromDate=2020-04-01&toDate=2020-04-30&market=&messageTitle="
links_month[5] <- "https://newsweb.oslobors.no/search?category=1001&issuer=&fromDate=2020-05-01&toDate=2020-05-31&market=&messageTitle="
links_month[6] <- "https://newsweb.oslobors.no/search?category=1001&issuer=&fromDate=2020-06-01&toDate=2020-06-30&market=&messageTitle="
links_month[7] <- "https://newsweb.oslobors.no/search?category=1001&issuer=&fromDate=2020-07-01&toDate=2020-07-31&market=&messageTitle="
links_month[8] <- "https://newsweb.oslobors.no/search?category=1001&issuer=&fromDate=2020-08-01&toDate=2020-08-31&market=&messageTitle="
links_month[9] <- "https://newsweb.oslobors.no/search?category=1001&issuer=&fromDate=2020-09-01&toDate=2020-09-30&market=&messageTitle="
links_month[10] <- "https://newsweb.oslobors.no/search?category=1001&issuer=&fromDate=2020-10-01&toDate=2020-10-31&market=&messageTitle="
links_month[11] <- "https://newsweb.oslobors.no/search?category=1001&issuer=&fromDate=2020-11-01&toDate=2020-11-30&market=&messageTitle="
links_month[12] <- "https://newsweb.oslobors.no/search?category=1001&issuer=&fromDate=2020-12-01&toDate=2020-12-31&market=&messageTitle="
}
remDr$open()
for (i in 1:length(links_month)) {
remDr$navigate(links_month[[i]]) ##Navigates to month i
Sys.sleep(2) ##sleep while page loads
remDr$screenshot(display = TRUE)
html <- list()
for (i in 1:500) {
html[[i]] <-paste('//*[@id="root"]/div/main/table/tbody/tr[', paste(i, "]/td[4]/a", sep=""), sep ="") ##xpath's to find number of pages
}
html_mtry <- list()
for (i in 1:500) {
html_mtry[[i]] <- try(new_click <- remDr$findElement(using = "xpath", html[[i]]), silent = TRUE) ##Testing which pages that work
}
try_error_html <- list()
for (i in 1:500) {
try_error_html[[i]] <- class(html_mtry[[i]]) == "try-error" ##Classifies true/false whether it works
}
df_2 <- cbind(html, html_mtry, try_error_html)
df_2 <- as.data.frame(df_2)
df_2 <- df_2 %>% filter(try_error_html == "FALSE") ##Only pages marked with FALSE works
linker_mtry <- data.frame(matrix(NA, nrow = length(seq(1:nrow(df_2))), ncol = length(seq(1:5)))) ##Dataframe to extract information to
colnames(linker_mtry) <- c("click_link", "link_head", "text_head", "link_attach", "text_attach") ##Change column names
linker_mtry$click_link <- df_2$html ##defining working links into click_link
for (i in 1:nrow(df_2)) {
linker_mtry$link_head[[i]] <- paste0('//*[@id="root"]/div/main/table/tbody/tr[', paste0(i, ']/td[4]/a/div/span/span[1]')) ##Makes new paste for text of link
linker_mtry$text_head[[i]] <- tolower(unlist(remDr$findElement(using = "xpath", linker_mtry$link_head[[i]])$getElementText())) ##Extract link text
linker_mtry$link_attach[[i]] <- paste0('//*[@id="root"]/div/main/table/tbody/tr[', paste0(i, ']/td[6]')) ##Makes new paste for number of attachments
linker_mtry$text_attach[[i]] <- tolower(unlist(remDr$findElement(using = "xpath", linker_mtry$link_attach[[i]])$getElementText())) ##Extract number of attachments
}
linker_mtry <- linker_mtry %>% filter(str_detect(text_head, "annual report | årsrapport | annual | årsrekneskap")) %>% ##detect wheter it is an annual report or not
mutate(text_attach = as.numeric(text_attach)) %>% filter(text_attach > 0 & text_attach < 4) ##Define attachment as numeric, and number of attachments is 1-3
for(i in 1:nrow(linker_mtry)) {
click <- remDr$findElement(using = "xpath", linker_mtry$click_link[[i]]) ##Defines next click
remDr$screenshot(display = TRUE)
remDr$mouseMoveToLocation(webElement = click) ##Marks page to navigate to
click$clickElement() ##Moves to page i (company page)
Sys.sleep(2)
remDr$screenshot(display = TRUE) ### ABOVE LINES NAVIGATES TO THE PAGE
#### PROGRAM DOWNLOADS/SCRAPE DATA FROM THIS
instrument <- data.frame(matrix(NA, nrow = length(seq(1:(linker_mtry$text_attach[[i]]*2))), ncol = length(seq(1:2)))) ##Makes df based on number of attachments (files)
colnames(instrument) <- c("mtry", "test_attachment") ##adds column names
instru_8 <- list() ##Makes two lists to paste xpath links into
instru_9 <- list()
for (j in 1:linker_mtry$text_attach[[i]]) {
instru_8[[j]] <- paste('//*[@id="root"]/div/main/div[2]/div[2]/div[1]/div[8]/ul/li[', paste(j, "]/a", sep=""), sep="") ##Xpath for div[8]
}
for (j in 1:linker_mtry$text_attach[[i]]) {
instru_9[[j]] <- paste('//*[@id="root"]/div/main/div[2]/div[2]/div[1]/div[9]/ul/li[', paste(j, "]/a", sep=""), sep="") ##Xpath for div[9]
}
instru_comb <- append(instru_8, instru_9)
instrument$mtry <- instru_comb
if(linker_mtry$text_attach[[i]] == 1) { ##If 1 attachment
instrument_8 <-try(new_click <- remDr$findElement(using = "xpath", instrument$mtry[[1]]), silent = TRUE) ##Tests whether attachment is in div[8] or div[9]
instrument_9 <-try(new_click <- remDr$findElement(using = "xpath", instrument$mtry[[2]]), silent = TRUE)
instrument$test_attachment[[1]] <- class(instrument_8) == "try-error" ##Classifies which one that works
instrument$test_attachment[[2]] <- class(instrument_9) == "try-error"
Sys.sleep(1)
instrument <- instrument %>% filter(test_attachment == "FALSE")
for (k in 1:nrow(instrument)) { ##Loops through all attachments
remDr$screenshot(display = TRUE)
new_click <- remDr$findElement(using = "xpath", instrument$mtry[[k]])
remDr$mouseMoveToLocation(webElement = new_click)
new_click$clickElement()
remDr$screenshot(display = TRUE)
Sys.sleep(2) }
remDr$goBack()
} else if(linker_mtry$text_attach[[i]] == 2) { ##If 2 attachments
instrument_8_1 <-try(new_click <- remDr$findElement(using = "xpath", instrument$mtry[[1]]), silent = TRUE)
instrument_8_2 <-try(new_click <- remDr$findElement(using = "xpath", instrument$mtry[[2]]), silent = TRUE)
instrument_9_1 <-try(new_click <- remDr$findElement(using = "xpath", instrument$mtry[[3]]), silent = TRUE)
instrument_9_2 <-try(new_click <- remDr$findElement(using = "xpath", instrument$mtry[[4]]), silent = TRUE)
instrument$test_attachment[[1]] <- class(instrument_8_1) == "try-error"
instrument$test_attachment[[2]] <- class(instrument_8_2) == "try-error"
instrument$test_attachment[[3]] <- class(instrument_9_1) == "try-error"
instrument$test_attachment[[4]] <- class(instrument_9_2) == "try-error"
instrument <- instrument %>% filter(test_attachment == "FALSE")
Sys.sleep(1)
for (k in 1:nrow(instrument)) { ##Loops through all attachments
remDr$screenshot(display = TRUE)
new_click <- remDr$findElement(using = "xpath", instrument$mtry[[k]])
remDr$mouseMoveToLocation(webElement = new_click)
new_click$clickElement()
remDr$screenshot(display = TRUE)
Sys.sleep(2) }
remDr$goBack()
} else if(linker_mtry$text_attach[[i]] == 3) { ##If 3 attachments
instrument_8_1 <-try(new_click <- remDr$findElement(using = "xpath", instrument$mtry[[1]]), silent = TRUE)
instrument_8_2 <-try(new_click <- remDr$findElement(using = "xpath", instrument$mtry[[2]]), silent = TRUE)
instrument_8_3 <-try(new_click <- remDr$findElement(using = "xpath", instrument$mtry[[3]]), silent = TRUE)
instrument_9_1 <-try(new_click <- remDr$findElement(using = "xpath", instrument$mtry[[4]]), silent = TRUE)
instrument_9_2 <-try(new_click <- remDr$findElement(using = "xpath", instrument$mtry[[5]]), silent = TRUE)
instrument_9_3 <-try(new_click <- remDr$findElement(using = "xpath", instrument$mtry[[6]]), silent = TRUE)
Sys.sleep(2)
instrument$test_attachment[[1]] <- class(instrument_8_1) == "try-error"
instrument$test_attachment[[2]] <- class(instrument_8_2) == "try-error"
instrument$test_attachment[[3]] <- class(instrument_8_3) == "try-error"
instrument$test_attachment[[4]] <- class(instrument_9_1) == "try-error"
instrument$test_attachment[[5]] <- class(instrument_9_2) == "try-error"
instrument$test_attachment[[6]] <- class(instrument_9_3) == "try-error"
Sys.sleep(1)
instrument <- instrument %>% filter(test_attachment == "FALSE")
for (k in 1:nrow(instrument)) { ##Loops through all attachments
remDr$screenshot(display = TRUE)
new_click <- remDr$findElement(using = "xpath", instrument$mtry[[k]])
remDr$mouseMoveToLocation(webElement = new_click)
new_click$clickElement()
remDr$screenshot(display = TRUE)
Sys.sleep(2)
}
remDr$goBack() ## #GOES BACK TO INITIAL PAGE
}
Sys.sleep(1)
remDr$screenshot(display = TRUE)
}
}
|
b0324b184a53d5904b8376bf1d1881bd0e0da4f5 | 6bb58ee76ac5293b1c356078f3df2547dcef9b63 | /cachematrix.R | e5502960541a2bc8e725dfc9a69a194f838917bc | [] | no_license | marystreet/coursework | e5af2e3d93170c8fcb8518d7246fa9aa5c18ad7c | f3f5956f0b8b948bb35c93a2580e95214ddce0b6 | refs/heads/master | 2021-01-01T06:05:16.475417 | 2015-05-22T16:43:53 | 2015-05-22T16:43:53 | 35,449,091 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,092 | r | cachematrix.R | ## makeCacheMatix creates a special "vector",
## which is really a list containing a function to:
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse of the matrix
## 4. get the value of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv.matrix <- NULL
set <- function(y){
x <<- y
inv.matrix <<- NULL
}
get <- function() x
setinv <- function(inverse) inv.matrix <<- inverse
getinv <- function() inv.matrix
list( set=set, get=get,setinv = setinv,getinv=getinv)
}
## cacheSolve checks to see if the inverse of the matrix has been
## previously calculated, and if so returns it. If not, calculates
## and stores and returns the inverse of the matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv.matrix <- x$getinv()
if(!is.null(inv.matrix)) {
message("getting cached data")
return(inv.matrix)
}
data <- x$get()
inv.matrix <- solve(data, ...)
x$setinv(inv.matrix)
inv.matrix
}
|
1316d13cb78e439760ed691457807e290a00aee4 | ed823b6da656fb94805c7ff74dfd7b921c5624c9 | /man/plot_heatmap_scExp.Rd | 10c4e2574c95a293653bb966807739002341c9cc | [] | no_license | vallotlab/ChromSCape | cbde454c903445706e75b27aade45a7a68db5986 | 382eac1015cd7f67e448124faf5a917f4c973aa1 | refs/heads/master | 2023-03-15T20:18:37.915065 | 2023-03-13T16:46:50 | 2023-03-13T16:46:50 | 191,729,569 | 11 | 5 | null | 2019-07-03T13:06:05 | 2019-06-13T09:10:39 | R | UTF-8 | R | false | true | 1,051 | rd | plot_heatmap_scExp.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting_functions.R
\name{plot_heatmap_scExp}
\alias{plot_heatmap_scExp}
\title{Plot cell correlation heatmap with annotations}
\usage{
plot_heatmap_scExp(
scExp,
name_hc = "hc_cor",
corColors = (grDevices::colorRampPalette(c("royalblue", "white", "indianred1")))(256),
color_by = NULL,
downsample = 1000,
hc_linkage = "ward.D"
)
}
\arguments{
\item{scExp}{A SingleCellExperiment Object}
\item{name_hc}{Name of the hclust contained in the SingleCellExperiment
object}
\item{corColors}{A palette of colors for the heatmap}
\item{color_by}{Which features to add as additional bands on top of plot}
\item{downsample}{Number of cells to downsample}
\item{hc_linkage}{A linkage method for hierarchical clustering. See
\link[stats]{cor}. ('ward.D')}
}
\value{
A heatmap of cell to cell correlation, grouping cells by hierarchical
clustering.
}
\description{
Plot cell correlation heatmap with annotations
}
\examples{
data("scExp")
plot_heatmap_scExp(scExp)
}
|
62c30ffb2fb9e097b604db27cf7b675a36d920b2 | 6121da376efe804fc8d9a5b33731c7c35f6d5fc0 | /R_basics/text_while.R | a3a49c5ee5333252da84ca2303fc294d89c85330 | [] | no_license | Gitus-Maximus/Skills | 4e67b5cdc19d695aef0ab1f768d9ab5c2a9591ac | 1ba6bd63de18afe2ca698430aaa4b5bd5434351b | refs/heads/main | 2023-04-30T18:35:31.654718 | 2021-05-22T11:56:06 | 2021-05-22T11:56:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 114 | r | text_while.R | text = readline("Insert a sentence: ")
x = nchar(text)
while (x > 0) {
print(substr(text, x, x))
x = x - 1
}
|
5a2cc217889598a99f3dcecba3497221aed81193 | 0e22254566e88a834241cda1bde831316a66ef52 | /Sandbox/CaseStudy2-EXPLORING.R | 469c60adf96912d9f74b03c2c290428a154aaf44 | [] | no_license | jlubich/DDS-CASE-STUDY-2 | 58dcdccbb885d8ca71dcb6fc1041a14a1712fc75 | 6d3e8cd058af569c2565ccc0d94a28a2b598b6f2 | refs/heads/master | 2021-08-23T07:27:01.800244 | 2017-12-04T04:16:42 | 2017-12-04T04:16:42 | 112,124,698 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,789 | r | CaseStudy2-EXPLORING.R | library(dplyr)
detach("package:plyr", unload=TRUE)
library(sqldf)
library(reshape2)
#load required packages
library(ggplot2)
library(tabplot)
library(tidyr)
# import data set
data(diamonds)
# make the plot
tableplot(diamonds)
?
tableplot(TalentData[1:15], sortCol = 2)
plot.ts(TalentData[1:10])
## Set the WD to where the R file is located
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
TalentData <- read.csv("casestudy2-data.csv")
str(TalentData)
summary(TalentData)
summarise_all(TalentData)
sqldf("SELECT CASE WHEN Attrition = 'Yes' THEN 1 ELSE 0 END AttritionCount, TotalWorkingYears, TrainingTimesLastYear, WorkLifeBalance FROM TalentData ")
sqldf("SELECT ROUND(DailyRate / 100, 0) * 100, COUNT(*) FROM TalentData GROUP BY DailyRate ORDER BY COUNT(*) DESC")
mtcars
head(melt(TalentData))
EmployeeCount
## Establish the mean attrition rate and 95% confidence interval for it
values <- data.frame(value = c("a", "a", "a", "a", "a",
"b", "b", "b",
"c", "c", "c", "c"))
nr.of.appearances <- aggregate(x = values,
by = list(unique.values = values$value),
FUN = length)
aggregate(
x = TalentData,
by = as.list(TalentData[c("Age")]),
FUN = mean)
# do not run the syntax
aggregate(x = d.f,
by = by.list,
FUN = function(s.d.f){y <- s.d.f; return(y)}
assets <- data.frame(
asset.class = c("equity", "equity","equity","option","option","option","bond", "bond"),
rating = c("AAA", "A", "A", "AAA", "BB", "BB", "AAA", "A"),
counterparty.a = c(runif(3), rnorm(5)),
counterparty.b = c(runif(3), rnorm(5)),
counterparty.c = c(runif(3), rnorm(5))
)
m <- matrix(data=cbind(rnorm(30, 0), rnorm(30, 2), rnorm(30, 5)), nrow=30, ncol=3)
apply(m, 2, function(x) length(x[x<0]))
TalentData$AttritionCount <- ifelse(TalentData$Attrition=="Yes",1,0)
aggregate(cbind(c("AttritionCount", "EmployeeCount")), data = TalentData, sum)
AllNumerics <- select_if(TalentData, is.numeric)
select_if(TalentData,is.factor)
names(select_if(TalentData,is.factor))
[1] "Attrition" "BusinessTravel" "Department" "EducationField" "Gender" "JobRole"
[7] "MaritalStatus" "Over18" "OverTime"
TalentData %>%
summarise(AttritionRate = sum(AttritionCount)/sum(EmployeeCount)) %>%
arrange(AttritionRate)
TalentData %>%
group_by(MaritalStatus) %>%
summarise(AttritionRate = sum(AttritionCount)/sum(EmployeeCount)) %>%
arrange(AttritionRate)
TalentData %>%
group_by(BusinessTravel) %>%
summarise(AttritionRate = sum(AttritionCount)/sum(EmployeeCount)) %>%
arrange(AttritionRate)
TalentData %>%
group_by(Department) %>%
summarise(AttritionRate = sum(AttritionCount)/sum(EmployeeCount)) %>%
arrange(AttritionRate)
TalentData %>%
group_by("Department") %>%
summarise(AttritionRate = sum(AttritionCount)/sum(EmployeeCount)) %>%
arrange(AttritionRate)
GroupByField <- TalentData[,1]
fun.AttritionRate <- function(input.table, group.by.column) {
input.table %>%
group_by(CategoryVar = input.table[,group.by.column]) %>%
summarise(
AttritionRate = sum(AttritionCount)/sum(EmployeeCount),
AttritionCount = sum(AttritionCount),
AttritionEffect = sum(AttritionCount) * sum(AttritionCount)/sum(EmployeeCount)) %>%
arrange(CategoryVar)
# input.table <- as.data.frame(input.table)
# sum(input.table$AttritionCount) / sum(input.table$EmployeeCount)
}
fun.AttritionRate(TalentData, TalentData[,2])
fun.AttritionRate(TalentData, 3)
fun.AttritionRate(TalentData, 4)
fun.AttritionRate(TalentData, 5)
class(5)
Attrition.AllColumns <- data.frame()
Attrition.List = list()
for (i in 1:5) {
# ... make some data
dat <- data.frame(x = rnorm(10), y = runif(10))
dat$i <- i # maybe you want to keep track of which iteration produced it?
datalist[[i]] <- dat # add it to your list
}
big_data = do.call(rbind, datalist)
# or big_data <- dplyr::bind_rows(datalist)
# or big_data <- data.table::rbindlist(datalist)
names(TalentData)
CategoricalColumns <- c(1,3,5, 37:43)
Attrition.List = list()
for(i in CategoricalColumns){
print(i)
# print(class(as.numeric(i[[1]])))
SummaryColumn <- names(TalentData)[i]
SummaryResults <- fun.AttritionRate(TalentData, i)
SummaryResults$ColumnName <- SummaryColumn
Attrition.List[[i]] <- SummaryResults
print(SummaryColumn)
print(SummaryResults)
#fun.AttritionRate(TalentData, TalentData[,i])
}
big_data = do.call(rbind, Attrition.List)
big_data$observation <- 1:nrow(big_data)
#big_data$CategoryVar <- factor(big_data$CategoryVar, levels = big_data$observation)
d$Team2 <- factor(d$Team1, as.character(d$Team1))
ggplot(data = melt(select_if(TalentData, is.numeric)), mapping = aes(x = value)) +
geom_histogram(bins = 10) + facet_wrap(~variable, scales = 'free')
ggplot(data = SummaryResults, mapping = aes(x = CategoryVar, y = AttritionRate)) +
geom_bar(stat="identity") + facet_wrap(~ColumnName, scales = 'free')
ggplot(
data = big_data,
mapping = aes(
x = reorder(big_data$CategoryVar, order(big_data$observation, decreasing=FALSE)),
y = AttritionRate
)
) +
geom_col() + facet_wrap(~ColumnName, scales = 'free_x')
ggplot(data=Final.df, aes(x=reorder(CategoryVar, order(big_data$observation, decreasing=FALSE)),y=(FG_Percent*100),fill=Position)) +
+ scale_x_discrete(limits= big_data$CategoryVar)
Yejur Kunwar: ggplot(aes(x = p.name, y = colleagues), data = top) + geom_bar(fill = "dark blue", stat = "identity") + scale_x_discrete(limits= top$p.name)
dim(TalentData)
ungroup(TalentData)
names(TalentData)
dat_y<-(dat[,c(2:1130)])
dat_x<-(dat[,c(1)])
models <- list()
#
for(i in names(dat_y)){
y <- dat_y[i]
model[[i]] = lm( y~dat_x )
}
TalentData %>%
group_by(TalentData[,2]) %>%
summarise(AttritionRate = sum(AttritionCount)/sum(EmployeeCount)) %>%
arrange(AttritionRate)
TalentData %>%
group_by(Education) %>%
summarise(AttritionRate = sum(AttritionCount)/sum(EmployeeCount)) %>%
arrange(AttritionRate)
tapply(TalentData$AttritionCount,TalentData$BusinessTravel, sum)
by(TalentData,TalentData$BusinessTravel, fun.AttritionRate)
aggregate(TalentData,TalentData$BusinessTravel, fun.AttritionRate)
names(select_if(TalentData,is.factor))
TalentData %>%
group_by(MaritalStatus) %>%
mutate(AttritionRate = sum(AttritionCount)/sum(EmployeeCount))
%>%
arrange(AttritionRate)
TalentData
library(dplyr)
tbl_df(TalentData) %>%
mutate(AtCnt = ifelse(Attrition == "Yes", 1, 0)) %>%
group_by(Age) %>%
summarise_each(AttritionRate = sum(AtCnt)/sum(EmployeeCount))
tbl_df(TalentData) %>%
mutate(AtCnt = ifelse(Attrition == "Yes", 1, 0)) %>%
group_by(Age) %>%
summarise(AttritionRate = sum(AtCnt)/sum(EmployeeCount))
fun.AttritionRate <- function(input.table) {
input.table <- as.data.frame(input.table)
sum(input.table$AttritionCount) / sum(input.table$EmployeeCount)
}
tbl_df(TalentData) %>%
mutate(AtCnt = ifelse(Attrition == "Yes", 1, 0)) %>%
group_by(Age) %>%
summarise_each(fun.AttritionRate)
summarise_all(TalentData, funs(fun.AttritionRate))
gather(TalentData, )
ggplot(data = melt(select_if(TalentData, is.factor)), mapping = aes(x = value)) +
geom_histogram(bins = 10) + facet_wrap(~variable, scales = 'free')
ggplot(data = melt(select_if(TalentData, is.factor)), mapping = aes(x = value)) +
geom_histogram(bins = 10) + facet_wrap(~variable, scales = 'free')
select_if(TalentData, is.factor)
exposures <- ?aggregate(
x = assets[c("counterparty.a", "counterparty.b", "counterparty.c")],
by = assets[c("asset.class", "rating")],
FUN = function(market.values){
sum(pmax(market.values, 0))
}
)
min(5:1, pi) #-> one number
pmin(5:1, pi) #-> 5 numbers
library(MASS)
categories <- data.frame(category = c("a", "a", "a", "a", "a",
"b", "b", "b", "b", "b",
"c", "c", "c", "c"))
observations <- data.frame(observation = c(rnorm(5, mean = 3, sd = 0.2),
rnorm(5, mean = -2, sd = 0.4),
rnorm(4, mean = 0, sd = 1)))
distr.estimate <- aggregate(x = observations,
by = categories,
FUN = function(observations){
fitdistr(observations, densfun = "normal")$estimate
})
## Compute the averages for the variables in 'state.x77', grouped
## according to the region (Northeast, South, North Central, West) that
## each state belongs to.
aggregate(state.x77, list(Region = state.region), mean)
## Compute the averages according to region and the occurrence of more
## than 130 days of frost.
aggregate(state.x77,
list(Region = state.region,
Cold = state.x77[,"Frost"] > 130),
mean)
## (Note that no state in 'South' is THAT cold.)
## example with character variables and NAs
testDF <- data.frame(v1 = c(1,3,5,7,8,3,5,NA,4,5,7,9),
v2 = c(11,33,55,77,88,33,55,NA,44,55,77,99) )
by1 <- c("red", "blue", 1, 2, NA, "big", 1, 2, "red", 1, NA, 12)
by2 <- c("wet", "dry", 99, 95, NA, "damp", 95, 99, "red", 99, NA, NA)
aggregate(x = testDF, by = list(by1, by2), FUN = "mean")
# and if you want to treat NAs as a group
fby1 <- factor(by1, exclude = "")
fby2 <- factor(by2, exclude = "")
aggregate(x = testDF, by = list(fby1, fby2), FUN = "mean")
## Formulas, one ~ one, one ~ many, many ~ one, and many ~ many:
aggregate(weight ~ feed, data = chickwts, mean)
aggregate(breaks ~ wool + tension, data = warpbreaks, mean)
aggregate(cbind(Ozone, Temp) ~ Month, data = airquality, mean)
aggregate(cbind(ncases, ncontrols) ~ alcgp + tobgp, data = esoph, sum)
## Dot notation:
aggregate(. ~ Species, data = iris, mean)
aggregate(len ~ ., data = ToothGrowth, mean)
## Often followed by xtabs():
ag <- aggregate(len ~ ., data = ToothGrowth, mean)
xtabs(len ~ ., data = ag)
## Compute the average annual approval ratings for American presidents.
aggregate(presidents, nfrequency = 1, FUN = mean)
## Give the summer less weight.
aggregate(presidents, nfrequency = 1,
FUN = weighted.mean, w = c(1, 1, 0.5, 1))
set.seed(1234)
total_bill <- rnorm(50, 25, 3)
tip <- 0.15 * total_bill + rnorm(50, 0, 1)
sex <- rbinom(50, 1, 0.5)
smoker <- rbinom(50, 1, 0.3)
day <- ceiling(runif(50, 0,7))
time <- ceiling(runif(50, 0,3))
size <- 1 + rpois(50, 2)
my.data <- as.data.frame(cbind(total_bill, tip, sex, smoker, day, time, size))
my.data
my.table <- table(my.data$smoker)
my.prop <- prop.table(my.table)
cbind(my.table, my.prop)
prop.table(TalentData[,13:15],1)
library(data.table)
seed(123)
dt = data.table(x1 = rep(letters[1:2], 6),
x2 = rep(letters[3:5], 4),
x3 = rep(letters[5:8], 3),
y = rnorm(12))
dt = dt[sample(.N)]
df = as.data.frame(dt)
# split consistency with data.frame: `x, f, drop`
all.equal(
split(dt, list(dt$x1, dt$x2)),
lapply(split(df, list(df$x1, df$x2)), setDT)
)
# nested list using `flatten` arguments
split(dt, by=c("x1", "x2"))
split(dt, by=c("x1", "x2"), flatten=FALSE)
# dealing with factors
fdt = dt[, c(lapply(.SD, as.factor), list(y=y)), .SDcols=x1:x3]
fdf = as.data.frame(fdt)
sdf = split(fdf, list(fdf$x1, fdf$x2))
all.equal(
split(fdt, by=c("x1", "x2"), sorted=TRUE),
lapply(sdf[sort(names(sdf))], setDT)
)
# factors having unused levels, drop FALSE, TRUE
fdt = dt[, .(x1 = as.factor(c(as.character(x1), "c"))[-13L],
x2 = as.factor(c("a", as.character(x2)))[-1L],
x3 = as.factor(c("a", as.character(x3), "z"))[c(-1L,-14L)],
y = y)]
fdf = as.data.frame(fdt)
sdf = split(fdf, list(fdf$x1, fdf$x2))
all.equal(
split(fdt, by=c("x1", "x2"), sorted=TRUE),
lapply(sdf[sort(names(sdf))], setDT)
)
sdf = split(fdf, list(fdf$x1, fdf$x2), drop=TRUE)
all.equal(
split(fdt, by=c("x1", "x2"), sorted=TRUE, drop=TRUE),
lapply(sdf[sort(names(sdf))], setDT)
)
x$bins <- cut(x$rank, breaks=c(0,4,10,15), labels=c("1-4","5-10","10-15"))
Jeff: split(das, cut2(das$wt, g=3))
Jeff: x$bins <- cut(x$rank, breaks=c(0,4,10,15), labels=c("1-4","5-10","10-15"))
x$bins <- cut(x$rank, breaks=c(0,4,10,15), labels=c("1-4","5-10","10-15"))
Z <- stats::rnorm(10000)
table(cut(Z, breaks = -6:6))
sum(table(cut(Z, breaks = -6:6, labels = FALSE)))
sum(graphics::hist(Z, breaks = -6:6, plot = FALSE)$counts)
cut(rep(1,5), 4) #-- dummy
tx0 <- c(9, 4, 6, 5, 3, 10, 5, 3, 5)
x <- rep(0:8, tx0)
stopifnot(table(x) == tx0)
table( cut(x, b = 8))
table( cut(x, breaks = 3*(-2:5)))
table( cut(x, breaks = 3*(-2:5), right = FALSE))
##--- some values OUTSIDE the breaks :
table(cx <- cut(x, breaks = 2*(0:4)))
table(cxl <- cut(x, breaks = 2*(0:4), right = FALSE))
which(is.na(cx)); x[is.na(cx)] #-- the first 9 values 0
which(is.na(cxl)); x[is.na(cxl)] #-- the last 5 values 8
## Label construction:
y <- stats::rnorm(100)
table(cut(y, breaks = pi/3*(-3:3)))
table(cut(y, breaks = pi/3*(-3:3), dig.lab = 4))
table(cut(y, breaks = 1*(-3:3), dig.lab = 4))
# extra digits don't "harm" here
table(cut(y, breaks = 1*(-3:3), right = FALSE))
#- the same, since no exact INT!
## sometimes the default dig.lab is not enough to be avoid confusion:
aaa <- c(1,2,3,4,5,2,3,4,5,6,7)
cut(aaa, 3)
cut(aaa, 3, dig.lab = 4, ordered = TRUE)
## one way to extract the breakpoints
labs <- levels(cut(aaa, 3))
cbind(lower = as.numeric( sub("\\((.+),.*", "\\1", labs) ),
upper = as.numeric( sub("[^,]*,([^]]*)\\]", "\\1", labs) ))
|
60b9b2488a20e6dca2a870de0731e05e056ead90 | e4890aefa3127e117bf73c26c346dc234f69d822 | /R/WAQI-package.R | bef108703933505f4b08bd8a62fb73adc3125549 | [
"MIT"
] | permissive | fdetsch/claiR | 3a89bb98f7db03f1a87dedceed0f216f727346a8 | d46629c38012cdcb3d874562714ce785b8afda05 | refs/heads/master | 2021-08-01T06:07:37.719161 | 2021-07-23T08:16:35 | 2021-07-23T08:16:35 | 252,429,710 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,163 | r | WAQI-package.R | #' Assessment of Air Cleanliness using Open Data
#'
#' Access real-time data through the World Air Quality Index (WAQI) project's
#' API.
#'
#' @name claiR-package
#' @docType package
#' @title Assessment of Air Cleanliness using Open Data
#' @author Florian Detsch
#' \cr
#' \cr
#' \emph{Maintainer:} Florian Detsch \email{fdetsch@@web.de}
#'
#' @import parallel rgeos sf
#' @importFrom devtools install_github
#' @importFrom fst read_fst write_fst
#' @importFrom jsonlite fromJSON
#' @importFrom Orcs list2df
#' @importFrom plyr rbind.fill
#' @importFrom R.utils withTimeout
#' @importFrom RCurl getURLContent
#' @importFrom stats na.omit
#'
#' @keywords package
#'
NULL
#'
#' @docType data
#' @name WAQIstations
#' @title WAQI Stations in Germany
#' @description World Air Qualilty Index (WAQI) project monitoring stations
#' located in Germany.
#' @details Data available through the Air Quality Programmatic API (see Source).
#' @format \code{sf data.frame} with 210 features and 2 fields covering a
#' spatial extent from 6.094444, 47.515447, 14.638056, 53.638190 (xmin, ymin,
#' xmax, ymax; EPSG:4326).
#' @source
#' \url{http://aqicn.org/api/de/}
#'
NULL
|
13331fbfe58e34c59e09881771344789789d6786 | 82084a4774fc65ac42c450e1fff12c12358df856 | /man/C22.Rd | 653ee91a410d5244acba8e6baca1c41d13c54402 | [] | no_license | davan690/RBNZ | 3d1367cf247e5c0ffb48b93fab0a9266a329e8c7 | 0ef36330f05236cc7f3c10f7eda46eab89e96e26 | refs/heads/master | 2023-07-18T05:11:05.115326 | 2021-08-27T13:48:07 | 2021-08-27T13:48:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 263 | rd | C22.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seriesDescriptions.R
\name{C22}
\alias{C22}
\title{Household Balance Sheet}
\description{
See \url{https://www.rbnz.govt.nz/statistics/c22} for a full description.
}
\keyword{internal}
|
7a17a465ffc5457e2960aa82cf70abe6ff309997 | 73129c7bf5a84f9a841f11de695f5fabea3d8073 | /man/genomeHits.Rd | f0a3d9bf7ef14b3586693476a4db4bbaaba87510 | [
"MIT"
] | permissive | nriddiford/svBreaks | 34af47eca66f686be858480791a3160c5053ef4d | 5e128468a41e4c206a225e0d81f2752bb6225f35 | refs/heads/master | 2021-12-29T11:22:06.796312 | 2021-12-19T14:51:55 | 2021-12-19T14:51:55 | 102,706,907 | 3 | 1 | null | 2019-03-18T11:11:17 | 2017-09-07T07:41:14 | R | UTF-8 | R | false | true | 237 | rd | genomeHits.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{genomeHits}
\alias{genomeHits}
\title{genomeHits}
\usage{
genomeHits(bp_data)
}
\description{
Plot distribution of brekpoints across the genome
}
|
19eeb0cd0700edc2019b8d1d670182230693bc59 | ab707c6c4426530354e9dc2c2d02af03a1d7257a | /R/utilityFunctions.R | 9851aabc10a8a03b87d2daac6aba3a2077793d60 | [] | no_license | plantarum/virtualspecies | 84fb54e1d29cf89ab9d84a2744d9da4d214bd208 | 36c079b3c853b77df46573ca46adff5abd253d56 | refs/heads/master | 2022-03-20T10:49:43.963146 | 2019-12-12T12:32:05 | 2019-12-12T12:32:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,376 | r | utilityFunctions.R | # Functions ".randomCellsLonLat" , "..randomCellsLonLat2" and ".randomPoints"
# have been duplicated from package dismo
# (author RJ Hijmans) for modifications.
# We duplicated them here because we needed a change in randomPoints
# function, since we use it in a manner that was not initially intended.
.randomCellsLonLat <- function(r, n) {
# sampling cells with weights based on their acutal area, to avoid sampling
# too many cells when going towards the poles
# as suggested by Jane Elith
y <- yFromRow(r, 1:nrow(r)) # get y coordinates for all rows
dx <- pointDistance(cbind(0, y), cbind(xres(r), y), longlat=TRUE) # size of each cell in longitude directoin
dx <- dx / max(dx) # standardize to 1
row <- sample.int(nrow(r), n, replace = TRUE, prob = dx) # sample from rows using weights
col <- sample.int(ncol(r), n, replace = TRUE) # sample from cols, not using weights
rowcol <- unique(cbind(row, col))
maxrow <- pmax(1, round(dx * ncol(r)))
cellsel <- matrix(nrow=0, ncol=2)
for (i in unique(rowcol[,1])) {
a <- subset(rowcol, rowcol[,1]==i)
if (nrow(a) > maxrow[i]) { a <- a[1:maxrow[i]] }
cellsel <- rbind(cellsel, a)
}
cells <- cellFromRowCol(r, cellsel[,1], cellsel[,2])
return(cells)
}
..randomCellsLonLat2 <- function(r, n) {
# only cells that are not NA
cells <- which(! is.na(getValues(r)) )
# which rows are that?
rows <- rowFromCell(r, cells)
# what is the latitude?
y <- yFromRow(r, rows)
# what is the 'width' of a cell?
dx <- pointDistance(cbind(0, y), cbind(xres(r), y), longlat=TRUE)
cells <- sample(cells, n, prob=dx)
return(cells)
}
.randomPoints <- function(mask, n, p, ext=NULL, extf=1.1, excludep=TRUE, prob=FALSE,
cellnumbers=FALSE, tryf=3, warn=2, lonlatCorrection=TRUE,
replaceCells = FALSE) {
if (nlayers(mask) > 1) {
mask <- raster(mask, 1)
}
tryf <- max(round(tryf[1]), 1)
if (missing(p)) {
excludep <- FALSE
} else {
if (inherits(p, 'SpatialPoints')) {
p <- sp::coordinates(p)
}
}
if (inherits(ext, 'character')) {
if (! ext %in% c('points')) {
stop("if ext is a character variable it should be 'points'")
} else if (missing(p)) {
warning("if p is missing, 'ext=points' is meaningless")
ext <- extent(mask)
} else {
ext <- extent(min(p[,1]), max(p[,1]), min(p[,2]), max(p[,2]))
}
}
if (! is.null(ext)) {
ext <- extent(ext)
ext <- ext * extf
ext <- intersect(ext, extent(mask))
mask2 <- crop(raster(mask), ext)
} else {
mask2 <- raster(mask)
}
nn <- n * tryf
nn <- max(nn, 10)
if (prob) {
stopifnot(hasValues(mask))
cells <- crop(mask, mask2)
cells <- try( stats::na.omit(cbind(1:ncell(cells), getValues(cells))))
if (inherits(cells, 'try-error')) {
stop("the raster is too large to be used with 'prob=TRUE'")
}
prob <- cells[,2]
cells <- cells[,1]
if (couldBeLonLat(mask)) {
rows <- rowFromCell(mask2, cells)
y <- yFromRow(mask2, rows)
dx <- pointDistance(cbind(0, y), cbind(xres(mask2), y), longlat=TRUE)
prob <- prob * dx
}
cells <- sample(cells, nn, prob=prob, replace = replaceCells)
xy <- xyFromCell(mask2, cells)
cells <- cellFromXY(mask, xy)
} else if (canProcessInMemory(mask2)) {
cells <- crop(mask, mask2)
if (hasValues(cells)) {
cells <- which(! is.na(getValues(cells)) )
} else {
cells <- 1:ncell(cells)
}
nn <- min(length(cells), nn)
if (lonlatCorrection & couldBeLonLat(mask)) {
# which rows are that?
rows <- rowFromCell(mask2, cells)
# what is the latitude?
y <- yFromRow(mask2, rows)
# what is the 'width' of a cell?
dx <- pointDistance(cbind(0, y), cbind(xres(mask2), y), longlat=TRUE)
cells <- sample(cells, nn, prob=dx, replace = replaceCells)
} else {
cells <- sample(cells, nn, replace = replaceCells)
}
xy <- xyFromCell(mask2, cells)
cells <- cellFromXY(mask, xy)
} else {
nn <- min(ncell(mask2), nn)
if (couldBeLonLat(mask2)) {
cells <- .randomCellsLonLat(mask2, nn)
} else {
if (nn >= ncell(mask2)) {
cells <- 1:ncell(mask2)
} else {
cells <- sampleInt(ncell(mask2), nn, replace = replaceCells)
}
}
xy <- xyFromCell(mask2, cells)
cells <- cellFromXY(mask, xy)
if (hasValues(mask)) {
vals <- cbind(cells, extract(mask, cells))
cells <- stats::na.omit(vals)[,1]
}
}
if (excludep) {
pcells <- cellFromXY(mask, p)
cells <- cells[ ! (cells %in% pcells) ]
}
if (length(cells) > n) {
cells <- sample(cells, n)
} else if (length(cells) < n) {
frac <- length(cells) / n
if (frac < 0.1) {
stop("generated random points = ", frac," times requested number; Use a higher value for tryf" )
}
if (frac < 0.5 & warn==1) {
warning("generated random points = ", frac," times requested number; Use a higher value for tryf" )
} else if (warn > 1) {
warning("generated random points = ", frac," times requested number")
}
}
if (cellnumbers) {
return(cells)
} else {
return(xyFromCell(mask, cells))
}
}
|
c36f8a81858ee52933dad7cc9df3ebedd530de16 | 271c28a105c28cf433f7fedd8562d23116038124 | /man/classify-as.data.frame.Rd | c5e62d4d86aa1d25d3608c900a9ac55c0b215f09 | [] | no_license | hejibo/gazetools | f368ef4500d38c884ce66a268a7135a173c41288 | eb7875b2c7e43fe74d66e07ed339bc3061c97a03 | refs/heads/master | 2021-01-17T06:59:11.809112 | 2013-05-21T13:57:43 | 2013-05-21T13:57:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 252 | rd | classify-as.data.frame.Rd | \name{classify.as.data.frame}
\alias{as.data.frame,classify,missing,missing-method}
\alias{classify.as.data.frame}
\title{Coerce object of class \code{classify} to a Data Frame}
\description{
Coerce object of class \code{classify} to a Data Frame
}
|
6538f36788f04690a40a46348deb619382ccc756 | 9a4d33b3aa79c9684867c8e038659573932cb429 | /R/knitr_utils.R | 85e9a001c01cbadff58e556cb06a341192732f28 | [
"MIT"
] | permissive | joshloyal/STAT542 | 795fd29cdfff4628b28dc4b72739a86bbfc647bf | 4659eb828047f6fbaca386e687cd67ed0c988b16 | refs/heads/master | 2021-07-09T05:16:33.612467 | 2017-10-07T00:45:44 | 2017-10-07T00:45:44 | 102,968,018 | 5 | 1 | null | null | null | null | UTF-8 | R | false | false | 164 | r | knitr_utils.R | library(knitr)
read_package_chunks <- function(path = '.') {
pkg_files <- list.files(path = path, full.names = TRUE)
invisible(sapply(pkg_files, read_chunk))
} |
90b8612141b1537902fc244dc7a52d9ba4367e18 | 81eef7196d7378b8d9b38205373f7412acdb1c54 | /code/run_scale_up_scenarios_actuals.R | 83652acd0f8376e69f47fc6bf0e3d80341242a12 | [
"MIT"
] | permissive | yangyijane/covid_vaccination_disparities | a1e35e283cce4fe9a68dc2ff91625d77c258f4a9 | 03a5de37639114eb9049fe592c0f4bb0fe30d3a7 | refs/heads/main | 2023-08-23T14:36:53.906133 | 2021-10-19T17:07:04 | 2021-10-19T17:07:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,433 | r | run_scale_up_scenarios_actuals.R | rm(list = ls())
library(data.table)
library(ggplot2)
library(DBI)
library(RSQLite)
library(lubridate)
setwd("~/COVID/vaccination_analysis/covid_vaccination_disparities_PPML/")
regions <- fread("https://raw.githubusercontent.com/cphalpert/census-regions/master/us%20census%20bureau%20regions%20and%20divisions.csv")
regions <- regions[, geo_id:=tolower(`State Code`)]
## Census Tract Population Age-Race Structure
df <- fread("data/census_tract_final_long.csv")
df <- df[, c("tract", "county", "state_name"):=tstrsplit(x = NAME, split = ", ")]
fips_codes <- as.data.table(tidycensus::fips_codes)
df <- merge(df, fips_codes, by = c("county", "state_name"), all.x=T)
df <- df[county=="Doña Ana County", state:="NM"]
df <- df[county=="Doña Ana County", state_code:="35"]
df <- df[county=="Doña Ana County", county_code:="013"]
df <- df[county=="LaSalle Parish", state:="LA"]
df <- df[county=="LaSalle Parish", state_code:="22"]
df <- df[county=="LaSalle Parish", county_code:="059"]
df <- df[county=="Petersburg Borough", state:="AK"]
df <- df[county=="Petersburg Borough", state_code:="02"]
df <- df[county=="Petersburg Borough", county_code:="195"]
# Clean race/ethnicity
df <- df[race_grp_full == "Multiracial", race_grp_full:="Other"]
df <- df[race_grp_full == "Native Hawaiian or Pacific Islander", race_grp_full:="Native Hawaiian or Other Pacific Islander"]
df <- df[race_grp_full=="Latino", race_grp_full:="Hispanic"]
setnames(df, "race_grp_full", "race_grp")
## For this, using ages 18+
df <- df[`Age Start`>=18]
df <- df[, state_pop:=sum(estimate, na.rm=T), by = c("state_name", "race_grp")]
## Merge in SVI
files <- list.files("data/svi_data/")
svi <- NULL
for (f in files) {
temp <- fread(paste0("data/svi_data/", f))
temp <- temp[,.(ST_ABBR, FIPS, RPL_THEMES)]
svi <- rbind(svi, temp, fill = T)
}
df <- merge(df, svi, by.x = c("GEOID"), by.y = "FIPS") # This drops 219 census tracts with 0 population (and no SVI)
## Fill missing SVI with mean of the state
df <- df[RPL_THEMES>=0 & RPL_THEMES<=1, mean_svi_state:=sum(RPL_THEMES*estimate)/sum(estimate), by = c("state")]
df <- df[, mean_svi_state:=mean(mean_svi_state, na.rm=T), by = "state"]
df <- df[RPL_THEMES<0 | RPL_THEMES>1, RPL_THEMES:=mean_svi_state] # Replace missing SVI with state mean SVI
df <- df[, estimate:=sum(estimate, na.rm=T), by = c("GEOID", "state_name", "race_grp")]
df <- unique(df[,.(GEOID, race_grp, county, state_name, state_code, estimate, state, county_code, RPL_THEMES, ST_ABBR, state_pop)])
df <- merge(df, regions[,.(`State Code`, Region, Division)], by.y = "State Code", by.x="state")
# Access
state <- fread("prepped/effective_demand_weights_state.csv")
df <- merge(df, state[,.(race_grp, state_name, effective_demand)], by = c("race_grp", "state_name"), all.x=T)
division <- fread("prepped/effective_demand_weights.csv")
setnames(division, "weight_actual_division", "weight_actual")
df <- merge(df, division[,.(race_grp, Division, weight_actual)], by = c("race_grp", "Division"), all.x=T)
df <- df[is.na(effective_demand), effective_demand:=weight_actual]
df <- df[is.na(effective_demand), effective_demand:=1] # Other doesn't have weight, assign proportional to population
df <- df[, weight_actual:=NULL]
setnames(df, "effective_demand", "weight_actual")
# Vaccination rates
pops <- unique(df[,.(state_name, race_grp, state_pop)])
setnames(pops, "state_pop", "state_pop_race")
vax_stats <- fread("./data/COVID-19_Vaccinations_in_the_United_States_Jurisdiction.csv") #https://data.cdc.gov/Vaccinations/COVID-19-Vaccinations-in-the-United-States-Jurisdi/unsk-b7fc
vax_stats <- vax_stats[, Date:=mdy(Date)]
vax_stats <- vax_stats[Date=="2021-04-01"]
setnames(vax_stats, "Location", "state")
fips <- as.data.table(tidycensus::fips_codes)
fips <- unique(fips[,.(state_name, state, state_code)])
vax_stats <- merge(vax_stats, fips, by = c("state"))
vax_stats <- vax_stats[,.(Date, state_name, Administered_Dose1_Recip_18PlusPop_Pct)]
vax_stats <- vax_stats[, lapply(.SD, as.numeric), by = c("state_name", "Date")]
vax_stats <- merge(vax_stats, state, by = c("state_name"))
vax_stats <- vax_stats[is.na(value_adj), missing:=1]
vax_stats <- vax_stats[!is.na(value_adj), missing:=0]
vax_stats <- vax_stats[is.na(value_adj), pct_missing:=pop_share_agg]
vax_stats <- vax_stats[, pct_missing:=sum(pct_missing, na.rm=T), by = c("state_name", "Date")]
vax_stats <- merge(vax_stats, pops, by = c("state_name", "race_grp"))
vax_stats <- vax_stats[, value_adj:=value_adj*(1-pct_missing)]
vax_stats <- vax_stats[is.na(value_adj), value_adj:=pop_share_agg]
vax_stats <- vax_stats[, value_adj:=value_adj/sum(value_adj, na.rm=T), by = c("state_name", "Date")]
vax_stats <- vax_stats[, pop_12_allrace:=sum(state_pop_race, na.rm=T), by = c("state_name", "Date")]
vax_stats <- vax_stats[, doses_adj:=(Administered_Dose1_Recip_18PlusPop_Pct/100)*pop_12_allrace]
vax_stats <- vax_stats[, race_doses:=doses_adj*value_adj]
## Deal with 100+ round 1
vax_stats <- vax_stats[race_doses>state_pop_race, add_doses:=race_doses-state_pop_race]
vax_stats <- vax_stats[, add_doses:=sum(add_doses, na.rm=T), by = c("state_name", "Date")]
vax_stats <- vax_stats[race_doses>state_pop_race, exceeded:=1]
vax_stats <- vax_stats[race_doses>state_pop_race, race_doses:=state_pop_race]
vax_stats <- vax_stats[is.na(exceeded), second_pct:=value_adj/sum(value_adj), by = c("state_name", "Date")]
vax_stats <- vax_stats[is.na(exceeded), race_doses:=race_doses+(second_pct*add_doses)]
vax_stats <- vax_stats[, vaccinated:=(race_doses/state_pop_race)*100]
df <- merge(df, vax_stats, by = c("state_name", "race_grp"))
df <- df[, vaccinated:=estimate*(vaccinated/100)]
# Vaccination rate (one week average)
vax_history <- fread("./data/COVID-19_Vaccinations_in_the_United_States_Jurisdiction.csv") #https://data.cdc.gov/Vaccinations/COVID-19-Vaccinations-in-the-United-States-Jurisdi/unsk-b7fc
vax_history <- vax_history[, Date:=mdy(Date)]
setnames(vax_history, "Location", "state")
fips <- as.data.table(tidycensus::fips_codes)
fips <- unique(fips[,.(state_name, state, state_code)])
vax_history <- merge(vax_history, fips, by = c("state"))
pops_all <- copy(pops)
pops_all <- pops_all[, tot_pop:=sum(state_pop_race, na.rm=T), by = c("state_name")]
pops_all <- unique(pops_all[,.(state_name, tot_pop)])
vax_history <- merge(vax_history, pops_all, by = c("state_name"))
vax_history <- vax_history[, Administered_Dose1_Recip:=(Administered_Dose1_Recip_18PlusPop_Pct/100)*tot_pop]
vax_history <- vax_history[,.(Date, state_name, Administered_Dose1_Recip)]
## FIX NON-MONTONICITY
for (i in as.list(seq.Date(as.Date(min(vax_history$Date)), as.Date(max(vax_history$Date)) , "days"))) {
print(ymd(i))
vax_history <- vax_history[, current_temp:=NULL]
vax_history <- vax_history[Date == ymd(i), current_temp:=Administered_Dose1_Recip]
vax_history <- vax_history[, current_temp:=mean(current_temp, na.rm=T), by = "state_name"]
vax_history <- vax_history[Date < ymd(i) & Administered_Dose1_Recip > current_temp, Administered_Dose1_Recip:=current_temp]
}
## COMPUTE INCIDENT
vax_history <- vax_history[, l1:=shift(Administered_Dose1_Recip, n = 1, type = "lead"), by = c("state_name")]
vax_history <- vax_history[, incident:=as.numeric(Administered_Dose1_Recip)-as.numeric(l1)]
vax_history <- vax_history[, smooth:=frollmean(incident, 7, align = "center"), by = c("state_name")]
# vax_history <- vax_history[Date=="2021-04-01"]
real_backup <- copy(df)
## Start here for scenarios
df <- copy(real_backup)
geo <- "GEOID"
sim_data <- copy(df)
########################
#### SCENARIO FLAGS ####
########################
## Status Quo
## Equalized Uptake
## Equalized Uptake and Geographic Targeting
scenario_label <- "Status Quo"
# scenario_label <- "Equalized Uptake"
# scenario_label <- "Equalized Uptake and Geographic Targeting"
print(paste0("Running ", scenario_label))
## Compute tract allocation
### Percent of states' population in each census tract
sim_data <- sim_data[, geo_pop:=sum(estimate, na.rm=T), by = c(geo, "state_name")]
sim_data <- sim_data[, pop_elig:=sum(estimate, na.rm=T), by = "state_name"]
sim_data <- sim_data[, geo_alloc:=geo_pop/pop_elig]
if (scenario_label%in%c("Status Quo")) {
sim_data <- sim_data[, weighted_demand:=(estimate-vaccinated)*weight_actual] ### Demand scalar is the size of the unvaccinated population multiplied by the relative uptake rate
sim_data <- sim_data[, race_age_pct:=weighted_demand/sum(weighted_demand, na.rm=T), by = c(geo, "state_name")] ### Compute share of vaccinations in each tract by race
}
if (scenario_label%in%c("Equalized Uptake", "Equalized Uptake and Geographic Targeting")) {
sim_data <- sim_data[, weighted_demand_sq:=(estimate-vaccinated)*weight_actual] # For scenarios, we need to know observed relative uptake to set "best rate"
sim_data <- sim_data[, race_age_pct_sq:=weighted_demand_sq/sum(weighted_demand_sq, na.rm=T), by = c(geo, "state_name")]
sim_data <- sim_data[, weighted_demand:=(estimate-vaccinated)*1] # But actual scale-up further assumes there is no differential uptake rates
sim_data <- sim_data[, race_age_pct:=weighted_demand/sum(weighted_demand, na.rm=T), by = c(geo, "state_name")]
}
sim_data <- sim_data[, state_race_elig:=sum(estimate, na.rm=T), by = c("state_name", "race_grp")]
backup <- copy(sim_data)
out <- NULL
for (s in unique(sim_data$state_name)) {
print(s)
sim_data <- backup[state_name==s]
counter <- 1
for (i in as.list(seq.Date(as.Date("04/01/2021", format="%m/%d/%Y"),as.Date("09/01/2021", format="%m/%d/%Y"), "days"))) {
if (scenario_label%in%c("Equalized Uptake", "Equalized Uptake and Geographic Targeting")) {
if (i > ymd("2021-07-15")) {
supply <- vax_history$smooth[vax_history$state_name==s & vax_history$Date==ymd("2021-07-15")] # To make long-term projections, assume supply is maintained at current rates
} else {
supply <- vax_history$smooth[vax_history$state_name==s & vax_history$Date==i]
}
sim_data <- sim_data[, daily_vax:=supply*geo_alloc*race_age_pct_sq] # Compute number vaccinated under observed uptake rates
sim_data <- sim_data[is.na(daily_vax), daily_vax:=0]
best_rate <- copy(sim_data)
best_rate <- best_rate[, tot_daily_vax:=sum(daily_vax, na.rm=T), by = "race_grp"] # Compute vaccination rate under observed
best_rate <- best_rate[, vax_rate:=tot_daily_vax/state_race_elig]
best_rate <- unique(best_rate[,.(race_grp, state_race_elig, pop_elig, vax_rate)])
best_vax_rate <- max(best_rate$vax_rate[best_rate$state_race_elig>200000 & best_rate$race_grp%in%c("Asian", "Black", "Hispanic", "White")]) # Scenarios assume best vaccination rate among four racial/ethnic groups with at least 200K population
new_supply <- best_vax_rate*best_rate$pop_elig[1]
if (scenario_label == "Equalized Uptake and Geographic Targeting" & counter <= 42) { # Applies for first six weeks
sim_data <- sim_data[RPL_THEMES>=.75, daily_vax:=2*new_supply*geo_alloc*race_age_pct] # For geographic targeting, assume doubled vaccination rate in most disadvantaged quartile
sim_data <- sim_data[RPL_THEMES<.75 | is.na(RPL_THEMES), daily_vax:=new_supply*geo_alloc*race_age_pct]
} else {
sim_data <- sim_data[, daily_vax:=new_supply*geo_alloc*race_age_pct]
}
sim_data <- sim_data[is.na(daily_vax), daily_vax:=0]
} else {
if (i >= ymd("2021-07-15")) {
supply <- vax_history$smooth[vax_history$state_name==s & vax_history$Date==ymd("2021-07-15")]
} else {
supply <- vax_history$smooth[vax_history$state_name==s & vax_history$Date==i]
}
sim_data <- sim_data[, daily_vax:=supply*geo_alloc*race_age_pct]
sim_data <- sim_data[is.na(daily_vax), daily_vax:=0]
}
sim_data <- sim_data[, vaccinated:=ifelse(vaccinated+daily_vax<=estimate*(.95), vaccinated+daily_vax, estimate*(.95))]
temp <- copy(sim_data)
temp <- temp[, daily_vax:=sum(daily_vax, na.rm=T), by = c("race_grp", "state_name")]
temp <- temp[, vaccinated:=sum(vaccinated, na.rm=T), by = c("race_grp", "state_name")]
temp <- unique(temp[,.(race_grp, daily_vax, vaccinated, state_name, state_race_elig, state_pop, pop_elig)])
temp <- temp[, day:=i]
out <- rbind(out, temp, fill = T)
counter <- counter+1
}
}
out <- out[, scenario:=paste0(scenario_label)]
write.csv(out, paste0("results/", scenario_label, ".csv"), na = "", row.names = F)
|
048aa4cb869d86a89b276d5698d12efbd21d95f5 | 76438f4642a21d133067516c794f2cdaafc30ea6 | /hicdcplus/R/diff_inter.R | fd43adb4f128cb450136f73b68dae1558bdb5858 | [] | no_license | 1092-bioinformatics/finalproject-109753144 | dafc7d296a1a720008a2911f9cbd375d04bc8f16 | f32683dd0f5976898b3b7bf5f55fc63d549e585f | refs/heads/master | 2023-06-11T03:52:25.274818 | 2021-06-23T13:45:48 | 2021-06-23T13:45:48 | 378,655,136 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,345 | r | diff_inter.R | library('HiCDCPlus')
outdir <- '.'
#generate features
construct_features(output_path=paste0(outdir,"/hg38_50kb_GATC"),
gen="Hsapiens",gen_ver="hg38",
sig="GATC",bin_type="Bins-uniform",
binsize=50000,
chrs=c("chr21","chr22"))
#add .hic counts
hicfile_paths<-c(
system.file("extdata", "GSE131651_NSD2_LOW_arima_example.hic", package = "HiCDCPlus"),
system.file("extdata", "GSE131651_NSD2_HIGH_arima_example.hic", package = "HiCDCPlus"),
system.file("extdata", "GSE131651_TKOCTCF_new_example.hic", package = "HiCDCPlus"),
system.file("extdata", "GSE131651_NTKOCTCF_new_example.hic", package = "HiCDCPlus"))
indexfile<-data.frame()
for(hicfile_path in hicfile_paths){
output_path<-paste0(outdir,'/',gsub("^(.*[\\/])", "",gsub('.hic','.txt.gz',hicfile_path)))
#generate gi_list instance
gi_list<-generate_bintolen_gi_list(bintolen_path=paste0(outdir,"/hg38_50kb_GATC_bintolen.txt.gz"),gen="Hsapiens",gen_ver="hg38")
gi_list<-add_hic_counts(gi_list,hic_path = hicfile_path)
#expand features for modeling
gi_list<-expand_1D_features(gi_list)
#run HiC-DC+ on 2 cores
set.seed(1010)
gi_list<-HiCDCPlus(gi_list,ssize=0.1)
for (i in seq(length(gi_list))){
indexfile<-unique(rbind(indexfile, as.data.frame(gi_list[[i]][gi_list[[i]]$qvalue<=0.05])[c('seqnames1', 'start1','start2')]))
}
#write results to a text file
gi_list_write(gi_list,fname=output_path)
}
#save index file---union of significants at 50kb
colnames(indexfile)<-c('chr','startI','startJ')
data.table::fwrite(indexfile, paste0(outdir,'/GSE131651_analysis_indices.txt.gz'),sep='\t',row.names=FALSE,quote=FALSE)
#Differential analysis using modified DESeq2 (see ?hicdcdiff)
hicdcdiff(input_paths=list(NSD2=c(paste0(outdir,'/GSE131651_NSD2_LOW_arima_example.txt.gz'),
paste0(outdir,'/GSE131651_NSD2_HIGH_arima_example.txt.gz')),
TKO=c(paste0(outdir,'/GSE131651_TKOCTCF_new_example.txt.gz'),
paste0(outdir,'/GSE131651_NTKOCTCF_new_example.txt.gz'))),
filter_file=paste0(outdir,'/GSE131651_analysis_indices.txt.gz'),
output_path=paste0(outdir,'/diff_analysis_example/'),
fitType = 'mean',
binsize=50000,
diagnostics=TRUE)
|
b1b3ee48b1c454d13d461ea95e55860f00c08ae7 | 21b4ee3f8f6d4b31b7b553e821358ead779313d8 | /R/length.R | cf733bf354db7e83b661bbc2a3681c3459511fa2 | [] | no_license | cran/simFrame | 232ac52dfb4e79445c8c3da6183f3a5e541bce43 | 9da160cc2897919de7c0fac56ca9b6f7f0035d36 | refs/heads/master | 2021-10-28T08:54:47.060719 | 2021-10-14T10:10:02 | 2021-10-14T10:10:02 | 17,699,667 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 617 | r | length.R | # ----------------------
# Author: Andreas Alfons
# KU Leuven
# ----------------------
## sample control
setMethod("length", "VirtualSampleControl", function(x) getK(x))
## sample setup
setMethod("length", "SampleSetup", function(x) length(getIndices(x)))
## contamination control
setMethod("length", "VirtualContControl", function(x) length(getEpsilon(x)))
## NA control
setMethod("length", "VirtualNAControl", function(x) getLength(getNArate(x)))
getLength <- function(x) {
if(is(x, "numeric")) length(x)
else if(is(x, "matrix")) nrow(x)
else NA # other classes
}
|
eeed3666196ddacef657a73f1449d79059658797 | 7917fc0a7108a994bf39359385fb5728d189c182 | /cran/paws.security.identity/man/cognitoidentityprovider_create_user_pool.Rd | 4f627e7c4e302d6b22252fcd50f60dda8e8bd8bc | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | true | 11,790 | rd | cognitoidentityprovider_create_user_pool.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cognitoidentityprovider_operations.R
\name{cognitoidentityprovider_create_user_pool}
\alias{cognitoidentityprovider_create_user_pool}
\title{Creates a new Amazon Cognito user pool and sets the password policy for
the pool}
\usage{
cognitoidentityprovider_create_user_pool(PoolName, Policies,
LambdaConfig, AutoVerifiedAttributes, AliasAttributes,
UsernameAttributes, SmsVerificationMessage, EmailVerificationMessage,
EmailVerificationSubject, VerificationMessageTemplate,
SmsAuthenticationMessage, MfaConfiguration, DeviceConfiguration,
EmailConfiguration, SmsConfiguration, UserPoolTags,
AdminCreateUserConfig, Schema, UserPoolAddOns, UsernameConfiguration,
AccountRecoverySetting)
}
\arguments{
\item{PoolName}{[required] A string used to name the user pool.}
\item{Policies}{The policies associated with the new user pool.}
\item{LambdaConfig}{The Lambda trigger configuration information for the new user pool.
In a push model, event sources (such as Amazon S3 and custom
applications) need permission to invoke a function. So you will need to
make an extra call to add permission for these event sources to invoke
your Lambda function.
For more information on using the Lambda API to add permission, see
\href{https://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html}{AddPermission}
.
For adding permission using the AWS CLI, see
\href{https://docs.aws.amazon.com/cli/latest/reference/lambda/add-permission.html}{add-permission}
.}
\item{AutoVerifiedAttributes}{The attributes to be auto-verified. Possible values: \strong{email},
\strong{phone_number}.}
\item{AliasAttributes}{Attributes supported as an alias for this user pool. Possible values:
\strong{phone_number}, \strong{email}, or \strong{preferred_username}.}
\item{UsernameAttributes}{Specifies whether email addresses or phone numbers can be specified as
usernames when a user signs up.}
\item{SmsVerificationMessage}{A string representing the SMS verification message.}
\item{EmailVerificationMessage}{A string representing the email verification message.
EmailVerificationMessage is allowed only if
\href{https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount}{EmailSendingAccount}
is DEVELOPER.}
\item{EmailVerificationSubject}{A string representing the email verification subject.
EmailVerificationSubject is allowed only if
\href{https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount}{EmailSendingAccount}
is DEVELOPER.}
\item{VerificationMessageTemplate}{The template for the verification message that the user sees when the
app requests permission to access the user's information.}
\item{SmsAuthenticationMessage}{A string representing the SMS authentication message.}
\item{MfaConfiguration}{Specifies MFA configuration details.}
\item{DeviceConfiguration}{The device configuration.}
\item{EmailConfiguration}{The email configuration.}
\item{SmsConfiguration}{The SMS configuration.}
\item{UserPoolTags}{The tag keys and values to assign to the user pool. A tag is a label
that you can use to categorize and manage user pools in different ways,
such as by purpose, owner, environment, or other criteria.}
\item{AdminCreateUserConfig}{The configuration for
\code{\link[=cognitoidentityprovider_admin_create_user]{admin_create_user}}
requests.}
\item{Schema}{An array of schema attributes for the new user pool. These attributes
can be standard or custom attributes.}
\item{UserPoolAddOns}{Used to enable advanced security risk detection. Set the key
\code{AdvancedSecurityMode} to the value "AUDIT".}
\item{UsernameConfiguration}{You can choose to set case sensitivity on the username input for the
selected sign-in option. For example, when this is set to \code{False}, users
will be able to sign in using either "username" or "Username". This
configuration is immutable once it has been set. For more information,
see
\href{https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UsernameConfigurationType.html}{UsernameConfigurationType}.}
\item{AccountRecoverySetting}{Use this setting to define which verified available method a user can
use to recover their password when they call
\code{\link[=cognitoidentityprovider_forgot_password]{forgot_password}}. It allows
you to define a preferred method when a user has more than one method
available. With this setting, SMS does not qualify for a valid password
recovery mechanism if the user also has SMS MFA enabled. In the absence
of this setting, Cognito uses the legacy behavior to determine the
recovery method where SMS is preferred over email.}
}
\value{
A list with the following syntax:\preformatted{list(
UserPool = list(
Id = "string",
Name = "string",
Policies = list(
PasswordPolicy = list(
MinimumLength = 123,
RequireUppercase = TRUE|FALSE,
RequireLowercase = TRUE|FALSE,
RequireNumbers = TRUE|FALSE,
RequireSymbols = TRUE|FALSE,
TemporaryPasswordValidityDays = 123
)
),
LambdaConfig = list(
PreSignUp = "string",
CustomMessage = "string",
PostConfirmation = "string",
PreAuthentication = "string",
PostAuthentication = "string",
DefineAuthChallenge = "string",
CreateAuthChallenge = "string",
VerifyAuthChallengeResponse = "string",
PreTokenGeneration = "string",
UserMigration = "string",
CustomSMSSender = list(
LambdaVersion = "V1_0",
LambdaArn = "string"
),
CustomEmailSender = list(
LambdaVersion = "V1_0",
LambdaArn = "string"
),
KMSKeyID = "string"
),
Status = "Enabled"|"Disabled",
LastModifiedDate = as.POSIXct(
"2015-01-01"
),
CreationDate = as.POSIXct(
"2015-01-01"
),
SchemaAttributes = list(
list(
Name = "string",
AttributeDataType = "String"|"Number"|"DateTime"|"Boolean",
DeveloperOnlyAttribute = TRUE|FALSE,
Mutable = TRUE|FALSE,
Required = TRUE|FALSE,
NumberAttributeConstraints = list(
MinValue = "string",
MaxValue = "string"
),
StringAttributeConstraints = list(
MinLength = "string",
MaxLength = "string"
)
)
),
AutoVerifiedAttributes = list(
"phone_number"|"email"
),
AliasAttributes = list(
"phone_number"|"email"|"preferred_username"
),
UsernameAttributes = list(
"phone_number"|"email"
),
SmsVerificationMessage = "string",
EmailVerificationMessage = "string",
EmailVerificationSubject = "string",
VerificationMessageTemplate = list(
SmsMessage = "string",
EmailMessage = "string",
EmailSubject = "string",
EmailMessageByLink = "string",
EmailSubjectByLink = "string",
DefaultEmailOption = "CONFIRM_WITH_LINK"|"CONFIRM_WITH_CODE"
),
SmsAuthenticationMessage = "string",
MfaConfiguration = "OFF"|"ON"|"OPTIONAL",
DeviceConfiguration = list(
ChallengeRequiredOnNewDevice = TRUE|FALSE,
DeviceOnlyRememberedOnUserPrompt = TRUE|FALSE
),
EstimatedNumberOfUsers = 123,
EmailConfiguration = list(
SourceArn = "string",
ReplyToEmailAddress = "string",
EmailSendingAccount = "COGNITO_DEFAULT"|"DEVELOPER",
From = "string",
ConfigurationSet = "string"
),
SmsConfiguration = list(
SnsCallerArn = "string",
ExternalId = "string"
),
UserPoolTags = list(
"string"
),
SmsConfigurationFailure = "string",
EmailConfigurationFailure = "string",
Domain = "string",
CustomDomain = "string",
AdminCreateUserConfig = list(
AllowAdminCreateUserOnly = TRUE|FALSE,
UnusedAccountValidityDays = 123,
InviteMessageTemplate = list(
SMSMessage = "string",
EmailMessage = "string",
EmailSubject = "string"
)
),
UserPoolAddOns = list(
AdvancedSecurityMode = "OFF"|"AUDIT"|"ENFORCED"
),
UsernameConfiguration = list(
CaseSensitive = TRUE|FALSE
),
Arn = "string",
AccountRecoverySetting = list(
RecoveryMechanisms = list(
list(
Priority = 123,
Name = "verified_email"|"verified_phone_number"|"admin_only"
)
)
)
)
)
}
}
\description{
Creates a new Amazon Cognito user pool and sets the password policy for
the pool.
}
\section{Request syntax}{
\preformatted{svc$create_user_pool(
PoolName = "string",
Policies = list(
PasswordPolicy = list(
MinimumLength = 123,
RequireUppercase = TRUE|FALSE,
RequireLowercase = TRUE|FALSE,
RequireNumbers = TRUE|FALSE,
RequireSymbols = TRUE|FALSE,
TemporaryPasswordValidityDays = 123
)
),
LambdaConfig = list(
PreSignUp = "string",
CustomMessage = "string",
PostConfirmation = "string",
PreAuthentication = "string",
PostAuthentication = "string",
DefineAuthChallenge = "string",
CreateAuthChallenge = "string",
VerifyAuthChallengeResponse = "string",
PreTokenGeneration = "string",
UserMigration = "string",
CustomSMSSender = list(
LambdaVersion = "V1_0",
LambdaArn = "string"
),
CustomEmailSender = list(
LambdaVersion = "V1_0",
LambdaArn = "string"
),
KMSKeyID = "string"
),
AutoVerifiedAttributes = list(
"phone_number"|"email"
),
AliasAttributes = list(
"phone_number"|"email"|"preferred_username"
),
UsernameAttributes = list(
"phone_number"|"email"
),
SmsVerificationMessage = "string",
EmailVerificationMessage = "string",
EmailVerificationSubject = "string",
VerificationMessageTemplate = list(
SmsMessage = "string",
EmailMessage = "string",
EmailSubject = "string",
EmailMessageByLink = "string",
EmailSubjectByLink = "string",
DefaultEmailOption = "CONFIRM_WITH_LINK"|"CONFIRM_WITH_CODE"
),
SmsAuthenticationMessage = "string",
MfaConfiguration = "OFF"|"ON"|"OPTIONAL",
DeviceConfiguration = list(
ChallengeRequiredOnNewDevice = TRUE|FALSE,
DeviceOnlyRememberedOnUserPrompt = TRUE|FALSE
),
EmailConfiguration = list(
SourceArn = "string",
ReplyToEmailAddress = "string",
EmailSendingAccount = "COGNITO_DEFAULT"|"DEVELOPER",
From = "string",
ConfigurationSet = "string"
),
SmsConfiguration = list(
SnsCallerArn = "string",
ExternalId = "string"
),
UserPoolTags = list(
"string"
),
AdminCreateUserConfig = list(
AllowAdminCreateUserOnly = TRUE|FALSE,
UnusedAccountValidityDays = 123,
InviteMessageTemplate = list(
SMSMessage = "string",
EmailMessage = "string",
EmailSubject = "string"
)
),
Schema = list(
list(
Name = "string",
AttributeDataType = "String"|"Number"|"DateTime"|"Boolean",
DeveloperOnlyAttribute = TRUE|FALSE,
Mutable = TRUE|FALSE,
Required = TRUE|FALSE,
NumberAttributeConstraints = list(
MinValue = "string",
MaxValue = "string"
),
StringAttributeConstraints = list(
MinLength = "string",
MaxLength = "string"
)
)
),
UserPoolAddOns = list(
AdvancedSecurityMode = "OFF"|"AUDIT"|"ENFORCED"
),
UsernameConfiguration = list(
CaseSensitive = TRUE|FALSE
),
AccountRecoverySetting = list(
RecoveryMechanisms = list(
list(
Priority = 123,
Name = "verified_email"|"verified_phone_number"|"admin_only"
)
)
)
)
}
}
\keyword{internal}
|
4a8889f83160274de01decfc7189807bbd9c4981 | 27366236f9d5bbfd1e69ad24f717ae94590bb8f3 | /R/ContactPotentialProfiling_FragmentLibrary.R | ddbc63d11719fbd7a306b35ff7afa0473decc840 | [
"MIT"
] | permissive | yanmc/Repitope | 95cef04dc6d56247be014c9440c95f5d08b9a4f4 | f7d86bfee1483ed93598eb67f6fa425169c76bff | refs/heads/master | 2020-06-03T20:02:04.253837 | 2019-05-18T16:40:54 | 2019-05-18T16:40:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,253 | r | ContactPotentialProfiling_FragmentLibrary.R | #' Generate sequence fragment libraries.
#'
#' \code{CPP_FragmentLibrary} generates a formatted fragment library datatable.
#'
#' @param tcrSequenceSet A set of TCR CDR3B amino acid sequences.
#' @param fragLenSet A set of sliding window sizes.
#' @param fragDepth The depth of fragments used to compute repertoire-wide TCR-peptide contact potentials.
#' @param seedSet A set of random seeds.
#' @export
#' @rdname ContactPotentialProfiling_FragmentLibrary
#' @name ContactPotentialProfiling_FragmentLibrary
CPP_FragmentLibrary <- function(tcrSequenceSet, fragLenSet=3:8, fragDepth=100000, seedSet=1:5){
# Generate fragment libraries from the entire sequenece dataset provided
fragmentLibrary_default <- function(tcrSequenceSet, fragLen){
cat("Fragment length = ", fragLen, "\n", sep="")
maxLen <- max(nchar(tcrSequenceSet), na.rm=T)
cl <- parallel::makeCluster(parallel::detectCores(logical=F))
parallel::clusterExport(cl, varlist=c("tcrSequenceSet", "fragLen", "maxLen"), envir=environment())
f <- unlist(pbapply::pblapply(seq(1, maxLen-fragLen+1),
function(i){stringr::str_sub(tcrSequenceSet, i, i+fragLen-1)}, cl=cl))
f <- f[nchar(f)==fragLen]
parallel::stopCluster(cl)
f <- c(f, stringi::stri_reverse(f))
dt.fr <- data.table::as.data.table(table(f))
colnames(dt.fr) <- c("Fragment", "Count")
dt.fr[,Freq:=Count/length(f)]
return(dt.fr)
}
cat("Fragmenting...\n", sep="")
fragLibList <- lapply(fragLenSet, function(l){fragmentLibrary_default(tcrSequenceSet, l)})
names(fragLibList) <- fragLenSet
# Format the libraries for contact potential profiling analysis
format_FragLib <- function(fragLen, seed=12345){
FragLib <- fragLibList[[as.character(fragLen)]]
set.seed(seed); FragSet.Dedup <- sample(FragLib$Fragment, size=fragDepth, replace=T)
set.seed(seed); FragSet.Weighted <- sample(FragLib$Fragment, size=fragDepth, replace=T, prob=FragLib$Freq)
set.seed(seed); FragSet.Mock <- sapply(1:fragDepth, function(i){paste0(sample(Biostrings::AA_STANDARD, size=fragLen, replace=T), collapse="")})
FragDF <- data.table::data.table("ID"=1:fragDepth, "FragLen"=fragLen, "Seed"=seed, "Deduplicated"=FragSet.Dedup, "Weighted"=FragSet.Weighted, "Mock"=FragSet.Mock)
return(FragDF)
}
cat("Formatting...\n", sep="")
paramGrid <- data.table::CJ("FragLen"=fragLenSet, "Seed"=seedSet)
cl <- parallel::makeCluster(parallel::detectCores(logical=F))
parallel::clusterExport(cl, varlist=c("format_FragLib", "fragLibList", "paramGrid"), envir=environment())
fragLibDT <- pbapply::pblapply(1:nrow(paramGrid), function(i){
format_FragLib(paramGrid$"FragLen"[i], paramGrid$"Seed"[i])
}, cl=cl) %>%
data.table::rbindlist()
parallel::stopCluster(cl)
gc();gc()
fragLibDT <- data.table::melt.data.table(
fragLibDT,
id.vars=c("ID", "FragLen", "Seed"),
measure.vars=c("Deduplicated", "Weighted", "Mock"),
variable.name="Library", value.name="Fragment"
)
gc();gc()
fragLibDT[,"Library":=paste0(Library, "_", FragLen, "_", Seed)][,FragLen:=NULL][,Seed:=NULL]
fragLibDT <- data.table::dcast.data.table(fragLibDT, ID~Library, value.var="Fragment")
fragLibDT[,"ID":=NULL]
gc();gc()
return(fragLibDT)
}
|
6fa04b7b6b2d56809147c4a528fdba14140c78ee | 1dfc525df1fc820262b29595df0058d2ef9d48af | /Probabilistic-Record-Linkage/Probabilistic-Record-Linkage.R | a50ea08401cc67d3e8bb93ea1462a32fd8b780c6 | [] | no_license | ykkim123/Data_Science | a2ac6e07cb09866363fda73aac469e33a99a6993 | b67434e0f7f9e8bed7a6de19dd2fa7c6df1c7b16 | refs/heads/master | 2021-01-09T15:08:06.895961 | 2020-09-04T05:15:47 | 2020-09-04T05:15:47 | 242,348,961 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,588 | r | Probabilistic-Record-Linkage.R | #############################
######### Probabilstic Record Linkage
#############################
# Load data
library(PPRL)
data = read.csv('testdata.csv', header=F, sep='\t')
write.table(data[1:5], file='testdata_new.csv', sep=',', row.names=F, col.names=c('id','gender','year','month','date'))
data = read.csv('testdata_new.csv', header=T, sep=',')
data1 = data[c(1,3,5,7),]
data2 = data[c(2,4,6,8),]
data.cp = merge(x=data1, y=data2, by=NULL)
data.cp
attach(data.cp)
# Estimate parameter by EM
## Calculate gamma
N = nrow(data.cp)
gamma = list()
for (i in 1:N){
temp = c()
if (gender.x[i]==gender.y[i]){
g=1}
else {
g=0}
if (year.x[i]==year.y[i]){
y=1}
else {
y=0}
if (month.x[i]==month.y[i]){
m=1}
else {
m=0}
if (date.x[i]==date.y[i]){
d=1}
else {
d=0}
gamma[[i]] = c(g,y,m,d)
}
## Initialization
K=4
m = rep(0.7,K)
u = rep(0.3,K)
p=0.5
eta = c(m,u,p)
eta.list = eta
## Implement EM
repeat{
eta.prev = eta
# E-step: update g
g = c()
for (i in 1:N){
m.temp = 1
u.temp = 1
for (k in 1:K){
m.temp = m.temp * (m[k]^(gamma[[i]][k])) *((1-m[k])^(1-gamma[[i]][k]))
u.temp = u.temp * (u[k]^(gamma[[i]][k])) *((1-u[k])^(1-gamma[[i]][k]))}
g.i = (p*m.temp)/(p*m.temp + (1-p)*u.temp)
g = append(g, g.i)}
# M-step
## Update m
m = c()
for (k in 1:K){
num.temp = 0
for (i in 1:N){
num.i = g[i] * gamma[[i]][k]
num.temp = num.temp + num.i}
m.k = num.temp/sum(g)
m = append(m, m.k)}
## Update u
u = c()
for (k in 1:K){
num.temp = 0
for (i in 1:N){
num.i = (1-g[i]) * gamma[[i]][k]
num.temp = num.temp + num.i}
u.k = num.temp/sum(1-g)
u = append(u, u.k)}
## Update p
p = c()
p.temp = sum(g)/N
p = append(p, p.temp)
# Convergence criteria
eta = c(m,u,p)
eta.list = rbind(eta.list,eta)
diff = (eta.prev-eta)^2
if (sum(diff)<1e-7)
break}
# Calculate R
R = c()
for (i in 1:N){
num = 1
denom = 1
for (k in 1:K){
if (gamma[[i]][k]==1){
p.num.temp = m[k]
p.denom.temp = u[k]}
else{
p.num.temp = 1-m[k]
p.denom.temp = 1-u[k]}
num = num * p.num.temp
denom = denom * p.denom.temp}
R_i = num/denom
R = append(R, R_i)}
R
matching = rep(0,N)
matching[c(12,16)] = c(1,1)
# Matching result
data.matching = cbind(data.cp, matching)
data.matching
|
5e369432ae9fa65457db1e06b0b9efcb5695ed76 | 261ca2741bfadfe74685caf9dc9c34d606d0630f | /functions/raw_data.R | 9b19117430481407bd4e380a397221fe3ba434ec | [] | no_license | michbur/AmpGram-analysis | a743be25e69eaeb205cdac4673aac4c7d285472d | 9a76d4c90a74622ec5ad9dc8b3d908799ac7745a | refs/heads/master | 2022-06-16T07:03:40.763515 | 2020-05-11T17:18:16 | 2020-05-11T17:18:16 | 227,064,156 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,100 | r | raw_data.R | #' Reads in the raw AMP sequences for analysis
#'
#' Reads in AMP sequences from the dbAMP database and extracts those
#' with experimental evidence for antimicrobial activity. Returns a list
#' of two lists:
#' \itemize{
#' \item{standard}{Sequences comprised of only standard amino acids}
#' \item{non_standard}{Sequences containing nonstandard amino acids}
#' }
read_raw_data <- function()
read.csv("./data/dbamp_df.csv") %>%
mutate(id = 1L:nrow(.)) %>%
mutate(id = sapply(id, paste_to_five)) %>%
mutate(id = paste0("dbAMP_", id)) %T>% {
print(paste0("Number of sequences: ", nrow(.)))
} %>%
filter(Experimental.Evidence == "YES") %T>% {
print(paste0("Number of sequences: ", nrow(.)))
} %>% {
setNames(as.character(.[["Sequence"]]), .[["id"]])
} %>%
strsplit("") %>%
purify() %T>% {
print(paste0("Number of sequences with standard AA: ", length(.[["standard"]])))
print(paste0("Number of sequences with non-standard AA: ", length(.[["non_standard"]])))
}
#' Identifies sequences containing nonstandard amino acids.
#'
#' This function checks the list of sequences for presence of nonstandard
#' amino acids and returns a list of two lists:
#' \itemize{
#' \item{standard}{Sequences comprised of only standard amino acids}
#' \item{non_standard}{Sequences containing nonstandard amino acids}
#' }
#' @param sequences list of marked sequences (each is list of character vector
#' of \code{sequence} and integer \code{target})
#' @return Input list of length 2 \code{sequences} excluding ones that are shorter than
#' \code{min_length}, longer than \code{max_length} or has at least one
#' amino acid not in \code{a()[-1]}.
purify <- function(sequences) {
standard <- toupper(biogram:::return_elements(seq_type = "prot"))
is_standard <- vapply(sequences, function(seq) all(seq %in% standard), c(FALSE))
list(standard = sequences[is_standard],
non_standard = sequences[!is_standard])
}
#' function used to give dbAMP ids
paste_to_five <- function(x) {
paste0(paste0(rep("0", 5 - nchar(x)), collapse = ""), x)
}
|
5e1dc16b4a0273fdb0c7f6131f74c2fcd4be78cb | e943294fe503d00513175402cc3f81089618e409 | /man/scientificLabels.Rd | 0ee77eea61e99f3047d5f8f7cf0ec86cdef6fe03 | [
"MIT"
] | permissive | sfeuerriegel/ResearchGroupTools | 395aa520315003b7626b7a5707a03df963059d3e | 96c0238f7261d471d522628be941a32aa9fc042a | refs/heads/master | 2020-04-24T01:22:59.561316 | 2017-02-03T17:21:50 | 2017-02-03T17:21:50 | 66,145,324 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 733 | rd | scientificLabels.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{scientificLabels}
\alias{scientificLabels}
\title{ggplot labels with exponential notation}
\source{
Brian Diggs \url{https://groups.google.com/forum/#!topic/ggplot2/a_xhMoQyxZ4}
}
\usage{
scientificLabels(l)
}
\arguments{
\item{l}{Labels}
}
\value{
Labels in the desired format
}
\description{
Allows to adjust the numbers display on both axes for ggplot2 plots. The format is such that the expontent is nicely set.
}
\examples{
library(ggplot2)
df <- data.frame(x=rnorm(100), y=rnorm(100))
ggplot(df, aes(x=x, y=y)) +
geom_point() +
scale_x_continuous(labels=scientificLabels) +
scale_y_continuous(labels=scientificLabels)
}
|
981c8b39cefff53cdb075b7356f884297ef29330 | 8e5c16034bee9a17ca7f9318a92b864f51ba58d1 | /RandomScale/man/fitadmb.Rd | 928c8d09167b68e6cd222cb2811dbbda9444c979 | [] | no_license | jlaake/RandomScale | 0532dd9fca561d99a8b6fc9a2d089bf15f61968a | 96e212ccd3f3e024d283a509c9b5db8dd5b67b10 | refs/heads/master | 2021-01-18T22:41:52.669034 | 2016-05-24T22:48:54 | 2016-05-24T22:48:54 | 59,616,794 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,710 | rd | fitadmb.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{fitadmb}
\alias{fitadmb}
\title{Random Scale Detection Function Fitting with admb}
\usage{
fitadmb(x, w = Inf, formula = ~1, beta = NULL, sigma = 0,
likelihood = "f2", extra.args = "-gh 10", verbose = TRUE, nsteps = 8,
keep = FALSE, debug = FALSE)
}
\arguments{
\item{x}{vector of distances or dataframe containing observed distances (distance) and other covariates}
\item{w}{half-width of strip; if infinite w routine sets w to 2*max(x)}
\item{formula}{formula for scale function}
\item{beta}{starting values for beta}
\item{sigma}{starting value for log sigma}
\item{likelihood}{character string "g","f1","f2","fixed"; see likelihoods.pdf in the package directory}
\item{extra.args}{for admb run}
\item{verbose}{for compile and run}
\item{nsteps}{adromb integration argument; default 8.}
\item{keep}{if TRUE, uses existing tpl and exe}
\item{debug}{if TRUE output parameter and -lnl values during iterations}
}
\description{
Fits a half-normal detection function with random scale for line
transect sampling data. Uses one of 3 likelihoods. g,f1,f2,fixed
}
\details{
Beta[1] and bounds for beta[1] are set based on the measured distance scale and
they are adjusted internally by scaling distances such that max(x)=1.
Likewise the likelihood that is shown when debug=T is for scaled distances
but the reported value for the output is adjusted back to the original scale
}
\examples{
# random effect example in paper
dev.new()
par(mfrow=c(1,3))
set.seed(123)
# simulate data
x=simdata(n=500,w=Inf,beta=2,beta_eps=-.5)
# fit data with g likelihood eq(6) using R code
results_random=fitdata(x,w=Inf)
plotfit(x,w=max(x),results_random$model$par,nclass=30,
main="R code\\neq 6 likelihood",adjust=FALSE)
# fit data with g likelihood eq (6) using ADMB
glike=fitadmb(x,w=Inf,likelihood="g",verbose=FALSE)
plotfit(x,w=Inf, glike$coefficients[1:2],nclass=30,
main="ADMB code\\neq 6 likelihood",adjust=FALSE)
# fit data with f likelihood eq (9) using ADMB
f2like=fitadmb(x,w=Inf,likelihood="f2",verbose=FALSE)
plotfit(x,w=Inf,f2like$coefficients[1:2],nclass=30,
main="ADMB code\\neq 9 likelihood")
#results in table 1
# R code
sprintf("\%7.2f",-results_random$model$val)
sprintf("\%5.3f",results_random$model$par[1])
sprintf("\%5.3f",results_random$model$par[2])
# Admb code g likelihood
sprintf("\%7.2f",glike$loglik)
sprintf("\%5.3f",glike$coeflist[[1]])
sprintf("\%5.3f",glike$coeflist[[2]])
# Admb code with f likelihood (f2)
sprintf("\%7.2f",f2like$loglik)
sprintf("\%5.3f",f2like$coeflist[[1]])
sprintf("\%5.3f",f2like$coeflist[[1]]-exp(2*f2like$coeflist[[2]]))
sprintf("\%5.3f",f2like$coeflist[[2]])
# mixed efffect example in paper
dev.new()
par(mfrow=c(1,2))
# simulate data
x1=simdata(n=2000,w=50,beta_eps=-.5,beta=2,
fixed=FALSE,reject=TRUE)
x2=simdata(n=1000,w=50,beta_eps=-.5,beta=1,
fixed=FALSE,reject=TRUE)
df=data.frame(covariate=c(rep(0,length(x1)),
rep(1,length(x2))),distance=c(x1,x2))
# fit data with covariate
fwlike=fitadmb(df,w=50,formula=~covariate,
likelihood="f2",verbose=FALSE)
param=fwlike$coefficients[1:3]
# plot and get estimates of abundance in covered area and its std error
Nhatwcov=plotfit(df$distance,w=50,par=param,nclass=30,
dm=model.matrix(~covariate,df),
main="With covariate")
Nhatwcov.se=compute_Nhat.se(par=param,fwlike$vcov[1:3,1:3],
x=df,w=50,dm=model.matrix(~covariate,df))
# fit data without covariate
flike=fitadmb(df,w=50,formula=~1,
likelihood="f2",verbose=FALSE)
param=flike$coefficients[1:2]
# plot and get estimates of abundance in covered area and its std error
Nhatwocov=plotfit(df$distance,w=50,par=param,nclass=30,
main="Without covariate")
Nhatwocov.se=compute_Nhat.se(par=param,flike$vcov[1:2,1:2],
x=df,w=50,dm=model.matrix(~1,df))
# The code to show delta AIC, abundance and std errors and sigma estimates is
round(-2*flike$loglik+2*2-(-2*fwlike$loglik+2*3),2)
round(Nhatwcov,0)
round(Nhatwcov.se,1)
round(Nhatwocov,0)
round(Nhatwocov.se,1)
round(exp(fwlike$coefficients[3]),2)
round(exp(flike$coefficients[2]),2)
# plots in figure 3 and results in paper
dev.new()
par(mfrow=c(2,2))
param=fwlike$coefficients[1:3]
Nhatwcov0=plotfit(df$distance[df$covariate==0],w=50,par=param,
nclass=30,dm=model.matrix(~covariate,df[df$covariate==0,]),
main="Model with covariate\\ncovariate value=0")
Nhatwcov0.se=compute_Nhat.se(par=param,fwlike$vcov[1:3,1:3],
x=df[df$covariate==0,],w=50,
dm=model.matrix(~covariate,df[df$covariate==0,]))
Nhatwcov1=plotfit(df$distance[df$covariate==1],w=50,par=param,
nclass=30,dm=model.matrix(~covariate,df[df$covariate==1,]),
main="Model with covariate\\ncovariate value=1")
Nhatwcov1.se=compute_Nhat.se(par=param,fwlike$vcov[1:3,1:3],
x=df[df$covariate==1,],w=50,
dm=model.matrix(~covariate,df[df$covariate==1,]))
param=flike$coefficients[1:2]
Nhatwocov0=plotfit(df$distance[df$covariate==0],w=50,par=param,
nclass=30, main="Model without covariate\\ncovariate value=0")
Nhatwocov0.se=compute_Nhat.se(par=param,flike$vcov[1:2,1:2],
x=df[df$covariate==0,],w=50,
dm=model.matrix(~1,df[df$covariate==0,]))
Nhatwocov1=plotfit(df$distance[df$covariate==1],w=50,par=param,
nclass=30,main="Model without covariate\\ncovariate value=1")
Nhatwocov1.se=compute_Nhat.se(par=param,flike$vcov[1:2,1:2],
x=df[df$covariate==1,],w=50,
dm=model.matrix(~1,df[df$covariate==1,]))
round(Nhatwcov0,0)
round(Nhatwcov0.se,1)
round(Nhatwcov1,1)
round(Nhatwcov1.se,1)
round(Nhatwocov0,0)
round(Nhatwocov0.se,1)
round(Nhatwocov1,1)
round(Nhatwocov1.se,1)
}
\author{
Jeff Laake
}
|
b5a03ae2c34c5a915fa50ff2f79cac171e205036 | 51250726e0ce12a81f75572be193d0b6742554cf | /man/p_search_library.Rd | 0944ba33877e8d24c512fa51d02d68e135ae66db | [] | no_license | dpastoor/pacman | 6ead1b9913e7d2a6b018fc2e6390fd2d86ff4673 | 3b4c2c7f47f2d7faf7563f7b76a92953da47f884 | refs/heads/master | 2021-01-23T22:01:25.517006 | 2014-11-04T00:00:09 | 2014-11-04T00:00:09 | 26,233,734 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 930 | rd | p_search_library.Rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{p_search_library}
\alias{p_search_library}
\alias{p_sl}
\title{Partial Matching Package Search}
\usage{
p_search_library(begins.with = NULL, contains = NULL)
p_sl(begins.with = NULL, contains = NULL)
}
\arguments{
\item{begins.with}{A character string to search for packages starting with
the letter(s).}
\item{contains}{A character string to search for packages containing the
letter(s).}
}
\description{
Search library packages using partial matching. Search for packages by
partial matching letter(s) or by any letter(s) contained within the package's
name. Useful for those times when you can't remember that package name but
you know ``it starts with...''
}
\examples{
\dontrun{
p_search_library(begins.with = "ma")
p_search_library(begins.with = "r", contains = "ar")
p_search_library(contains = "att")
}
}
\keyword{library}
\keyword{package}
\keyword{search}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.