blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
58d62f4f08719380b291c661e9b29987d7afdef3
|
f8495690bcd97ccbde1e8fe4e43d2b00a90ff7cb
|
/reddit_survey.R
|
a7c0f57fe501286e9c6cfa9fa030211df1558614
|
[] |
no_license
|
BarnetteME1/Learning_R
|
de221661716d7e50d126be5ced06287a68132308
|
1a0671aa0b05ba4c4a20e75c7a94ecb62168ba19
|
refs/heads/master
| 2016-09-01T08:22:10.342651
| 2016-04-03T19:25:36
| 2016-04-03T19:25:36
| 53,447,850
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 638
|
r
|
reddit_survey.R
|
setwd('~/R')
reddit <- read.csv('reddit.csv')
str(reddit)
dim(reddit)
table(reddit$employment.status)
summary(reddit)
levels(reddit$age.range)
library(ggplot2)
qplot(data = reddit, x = age.range)
levels(reddit$age.range) <- c('Under 18', '18-25', '35-44', '45-54', '55-64',
'65 or Above', 'NA')
qplot(data = reddit, x = age.range)
reddit$age.range
#teacher's way
reddit$age.range <- ordered(reddit$age.range, levels = c('Under 18', '18-25',
'35-44', '45-54', '55-64',
'65 or Above', 'NA'))
|
efdfe083e3fad546024d724f8e60c9d53911ed2c
|
3050675a24f529b8e795bdc81bd84149c0b41476
|
/files/install-packages.R
|
c71644220937d8d3f4b1a06f7129598a44585e2b
|
[
"CC-BY-4.0"
] |
permissive
|
coadunate/ansible-role-qiime
|
5d36b44398a1f4b730afbbe7bae987b0cc9cb648
|
816bf57ca2741332fccc2938ff72d37cd7bc72c3
|
refs/heads/master
| 2021-01-23T05:39:04.257400
| 2018-01-25T21:16:53
| 2018-01-25T21:16:53
| 92,977,491
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 212
|
r
|
install-packages.R
|
install.packages(c('ape', 'biom', 'optparse', 'RColorBrewer', 'randomForest', 'vegan'), repos = "http://cran.us.r-project.org")
source('http://bioconductor.org/biocLite.R')
biocLite(c('DESeq2', 'metagenomeSeq'))
|
b5a2d723a855ac35c1cae5df2076886c8a47df2e
|
0169e2d76b415c5ce11d290ff543f37fd9fec70f
|
/SMBKC/code/map_new.R
|
61a98856a0a8a789a1c5da4bcfd0bb65ef847e68
|
[] |
no_license
|
commfish/BSAI_crab_assessments
|
91765b2aeb96cdb3861e614c9fc1dbf811d77f5c
|
4ed906b77c25ba6c1bc38c1586f91115693fbf28
|
refs/heads/master
| 2023-08-17T03:16:13.808993
| 2023-08-16T01:06:06
| 2023-08-16T01:06:06
| 197,654,608
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,786
|
r
|
map_new.R
|
# katie.palof@alaska.gov 9-5-19 / 4-22-22/ 8-25-22
# map creation for SMBKC trawl survey samples from recent years
# WIP
# https://geocompr.robinlovelace.net/adv-map.html
# load -------
library(PBSmapping)
library(tidyverse)
library(mapproj)
data("nepacLLhigh")
cur_yr = "2022"
folder = "smbkc_22f"
### data ---
# haul data from st.matt area NOAA trawl survey
# AKFIN data - crab data - EBS trawl survey - CPUE by Haul Downloads - "bkc st.matt's"
#C:\Users\kjpalof\Documents\BSAI_crab_assessments\SMBKC\data\trawl_survey\ebs-crab-cpue-69323-stmatt\2021
smbkc_haul_cpue <-data.frame(read.csv(paste0(here::here(), "/SMBKC/data/trawl_survey/ebs-crab-cpue-69323-stmatt/",
cur_yr, "/ebs-crab-cpue-69323-stmatt-6.csv"),
header=T, as.is = TRUE))
smbkc_haul_cpue %>%
filter(SURVEY_YEAR >= 2013, SIZE_GROUP == "MALE_GE90") -> m.df
glimpse(m.df)
m.df %>%
dplyr::select(lat = MID_LATITUDE, long = MID_LONGITUDE, year = SURVEY_YEAR, catch = CRAB_NUM) -> df.out
nepacLLhigh %>%
dplyr::select(group=PID, POS=POS,long=X,lat=Y) -> ak
ggplot() +
geom_polygon(data = ak, aes(long, lat, group = group), fill=8, color='black') +
theme(panel.background = element_rect(fill = 'white')) +
xlab(expression(paste(Longitude^o,~'W'))) +
ylab(expression(paste(Latitude^o,~'W'))) +
coord_map(xlim = c(-176, -170.5), ylim = c(58.5, 61.5)) +
geom_point(data = df.out, aes(long, lat, size = catch), color = "darkorange2") +
facet_wrap(~year, dir = "v") +
scale_size_area() +
#FNGr::theme_sleek()+
theme(axis.text.x = element_text(angle = 45, hjust = 1.0))
ggsave(paste0(here::here("SMBKC/", folder, "/doc/safe_figure/CrabN_Station.png")),dpi=300,
width=8, height=8,units="in")
|
981105ac7b7e7db913e27e7821b6b473750f9f29
|
eda7552fae1cbb9a51050484b90f6ffeb5fc670a
|
/scripts/alluvial_plots_v3.R
|
fe3688a30e726ed7c6150c80dcf385ac9506ecb1
|
[] |
no_license
|
mcjmigdal/ConnectOR
|
d5c70cdaebf9c70d68e046997f9d3cdcaa953119
|
78671742f0e187ff538183cc6a2663058a8787ec
|
refs/heads/master
| 2022-11-21T03:34:17.625976
| 2020-03-26T12:08:26
| 2020-03-26T12:08:26
| 278,582,987
| 0
| 0
| null | 2020-07-10T08:39:12
| 2020-07-10T08:39:11
| null |
UTF-8
|
R
| false
| false
| 4,026
|
r
|
alluvial_plots_v3.R
|
library(dplyr)
library(ggplot2)
library(ggrepel)
library(ggalluvial)
library(grid)
library(ggplotify)
library(colorspace)
library(gridExtra)
#library(ggpubr)
#Packages for synteny plots
#install.packages("devtools")
#devtools::install_github("kassambara/ggpubr")
#install.packages("RIdeogram")
#install.packages("rsvg")
#library(RIdeogram)
#library(rsvg)
##################################################################################
cols = c("lncRNA"="springgreen4",
"protein_coding"="steelblue4",
"pseudogene" = "hotpink3",
"lncRNA(h.c.)" = "springgreen4",
"lncRNA(l.c.)" = "palegreen3",
"pc(h.c.)" = "steelblue4",
"pc(l.c.)" = "steelblue3",
"pseudogene(l.c.)" = "hotpink3",
"none" = "grey")
q <- theme_minimal() +
theme(plot.margin = margin(0, 0, 0, 0, "cm"),
plot.title = element_text(size = 25, hjust = 0.5),
plot.subtitle = element_text(size = 15, hjust = 0.5),
panel.grid = element_blank(),
axis.ticks.y = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.x = element_blank(),
axis.title.x = element_blank(),
axis.text.x = element_text(size = 15, color = "black"))
counts_table <- function(sp1, sp2, path = "./classification/"){
file_name <- paste0(path, sp1, "to", sp2, ".classification")
df_complete <- read.csv(file_name, sep = "\t", header = FALSE)
df <- df_complete[,c(2,3)]
counts <- as.data.frame(df %>% group_by_all() %>% summarise(COUNT = n()))
counts$id <- counts$V3
counts_long <- to_lodes_form(counts, key = "class", axes = 1:2)
return(counts_long)
}
alluvial_plot <- function(sp1, sp2, path = "./classification/"){
counts_long <- counts_table(sp1, sp2, path)
p <- ggplot(data = counts_long,
aes(x = class,
y = COUNT,
stratum = stratum,
alluvium = alluvium,
label = stratum)) +
geom_flow(aes(fill=stratum), width = 1/20, alpha = 0.6) +
geom_stratum(aes(fill=stratum), width = 1/20, size = 0.5) +
scale_fill_manual(values = cols) +
geom_label_repel(aes(fill=NULL),
stat = "stratum",
size = 5,
direction = "y",
nudge_x = 0.05,
hjust = -0.05,
point.padding = 0,
segment.size = 0.8,
show.legend = FALSE) +
scale_x_discrete(labels=c(sp1, sp2)) +
guides(fill="none")
return(p+q+ggtitle("Orthology predictions", paste0(sp1, " to ", sp2)))
}
bar_plot <- function(sp1, sp2, path = "./classification/"){
counts_long <- counts_table(sp1, sp2, path)
counts_bar <- counts_long[counts_long$stratum =="lncRNA" & counts_long$class=="V2" ,]
counts_bar$COUNT <- round(counts_bar$COUNT*100/sum(counts_bar$COUNT), digits = 2)
p <- ggplot(data = counts_bar, aes(x = class, y=COUNT, label=paste0(COUNT,"%"), fill=id)) +
geom_bar(aes(fill = id),
position="stack",
stat="identity",
width=1) +
scale_fill_manual(values = cols)+#c("springgreen4", "palegreen3", "steelblue4", "steelblue3", "grey50", "grey80")) +
geom_text(position = position_stack(vjust = 0.5)) +
guides(fill="none") +
scale_x_discrete(labels = "lncRNA")
return(p+q+ggtitle("1", "2")+theme(plot.title = element_text(colour = "white"),
plot.subtitle = element_text(colour = "white")))
}
##################################################################################
args = commandArgs(trailingOnly=TRUE)
sp1=args[1]
sp2=args[2]
all <- alluvial_plot(sp1, sp2, path = "./classification/")
bar <- bar_plot(sp1, sp2, path = "./classification/")
plot <- grid.arrange(bar, all, ncol=2, widths=c(1, 9), nrow = 1)
plot_name <- paste0("./plots/", sp1, "to", sp2, ".plot", ".pdf")
ggsave(plot_name, plot, width=6, height=4, units="in", scale=3)
|
e2faf44e4d5efb1eb682636d276ed6d1a47f154f
|
850898c179e63adf03e07ec066046e3eba524aee
|
/reduce_colors/reduce_image_colors.R
|
ea0d597290699b5a60cc67257be425ac2174d35c
|
[
"MIT"
] |
permissive
|
zettsu-t/cPlusPlusFriend
|
c658810a7392b71bbcd0fbf6e73fa106e227c0d0
|
8eefb1c18e1b57b1b7ca906027f08500f9fbefcc
|
refs/heads/master
| 2023-08-28T09:29:02.669194
| 2023-08-27T04:43:24
| 2023-08-27T04:43:24
| 81,944,943
| 10
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,206
|
r
|
reduce_image_colors.R
|
## Reduce the number of colors in an image
##
## How to launch
## from command line (note that --args is required):
## Rscript reduce_image_colors.R --args -i dir/input.png -o outdir -t text-on-images
## on Rstudio:
## arg_set <- c('-i', 'dir/input.png', '-o', 'outdir', '-t', 'text-on-images')
## open reduce_image_colors.R, set a working directory and the current and hit Ctrl-Alt-R
## This script reads dir/input.png, reduces its color andwrites reduced-color images in outdir/
## See reduce_image_colors() to know details of options.
library(plyr)
library(dplyr)
library(purrrlyr)
library(stringr)
library(tibble)
library(bit64)
library(car)
library(grDevices)
library(imager)
library(optparse)
library(rgl)
g_color_rgb <- 'rgb'
g_color_hsv <- 'hsv'
g_color_yuv <- 'yuv'
get_position <- function(width_height, offst_pos) {
if (offst_pos > 0) {
offst_pos
} else {
width_height + offst_pos
}
}
add_label_to_original_image <- function(img, width, height, labels) {
original_x <- get_position(width_height=width, offst_pos=labels$original_x)
original_y <- get_position(width_height=height, offst_pos=labels$original_y)
name_x <- get_position(width_height=width, offst_pos=labels$name_x)
name_y <- get_position(width_height=height, offst_pos=labels$name_y)
imager::draw_text(im=img, x=original_x, y=original_y, text='original',
color=labels$color, opacity=labels$opacity, fsize=labels$fsize) %>%
imager::draw_text(x=name_x, y=name_y, text=labels$text,
color=labels$color, opacity=labels$opacity, fsize=labels$fsize)
}
read_image <- function(in_png_filename, color_model, labels) {
## Remove alpha channels
start_time <- proc.time()
img <- imager::rm.alpha(imager::load.image(in_png_filename))
dim_img <- dim(img)
width <- dim_img[1]
height <- dim_img[2]
print(paste('Read', in_png_filename))
print(proc.time() - start_time)
## Value levels per pixel
n_value_level <- 1024
n_value_level_2 <- n_value_level * n_value_level
n_value_level_3 <- n_value_level_2 * n_value_level
## Counts RGB color values
start_time <- proc.time()
source_img <- if (color_model == g_color_rgb) {
img
} else if (color_model == g_color_hsv) {
imager::RGBtoHSV(img)
} else if (color_model == g_color_yuv) {
imager::RGBtoYUV(img)
}
img_array <- array(data=source_img, dim=c(width * height, dim_img[3] * dim_img[4]))
df <- tibble::as_tibble(img_array) %>% dplyr::rename(c1=V1, c2=V2, c3=V3)
df$color <- as.integer64(df$c1 * n_value_level_3) + as.integer64(df$c2 * n_value_level_2) + as.integer64(df$c3 * n_value_level)
n_colors <- NROW(unique(df$color))
print(paste(n_colors, 'colors found'))
print(proc.time() - start_time)
img_label <- NA
if (!is.null(labels)) {
print(paste('Draw label', in_png_filename))
img_label <- add_label_to_original_image(img=img, width=width, height=height, labels=labels)
print(proc.time() - start_time)
}
list(df=df %>% select(-c('color')), img=img, img_label=img_label, width=width, height=height, n_colors=n_colors)
}
add_label_to_new_image <- function(img, width, height, labels, text) {
colors_x <- get_position(width_height=width, offst_pos=labels$colors_x)
colors_y <- get_position(width_height=height, offst_pos=labels$colors_y)
imager::draw_text(im=img, x=colors_x, y=colors_y, text=text,
color=labels$color, opacity=labels$opacity, fsize=labels$fsize)
}
cluster_colors <- function(df, n_colors) {
start_time <- proc.time()
## Reduces colors by the k-means algorithm
## Try other clustering and dimension-reducing methods!
fit <- kmeans(x=df, centers=n_colors, iter.max=100000)
print('k-means')
print(proc.time() - start_time)
## Maps pixels to cluster numbers
start_time <- proc.time()
cluster <- as_tibble(fit$cluster)
color_map <- as_tibble(fit$centers)
color_map$value <- 1:NROW(color_map)
## Faster than by_row()
new_image_df <- inner_join(cluster, color_map, by='value')
print(paste0('Make a ', n_colors, '-colors map'))
print(proc.time() - start_time)
list(color_map=color_map, new_image_df=new_image_df)
}
reduce_colors <- function(img_data, labels, n_colors, color_model, show_images, out_png_filenames) {
img_reduced <- cluster_colors(df=img_data$df, n_colors=n_colors)
## Saves the new RGB image
start_time <- proc.time()
new_img <- imager::as.cimg(obj=c(img_reduced$new_image_df$c1, img_reduced$new_image_df$c2, img_reduced$new_image_df$c3),
x=img_data$width, y=img_data$height, cc=3)
new_img <- if (color_model == g_color_rgb) {
new_img
} else if (color_model == g_color_hsv) {
imager::HSVtoRGB(new_img)
} else if (color_model == g_color_yuv) {
imager::YUVtoRGB(new_img)
}
plot(new_img)
out_filename <- out_png_filenames$post
imager::save.image(im=new_img, file=out_filename)
## Saves the new image with a label
text <- paste0(stringr::str_to_upper(color_model), ' ', n_colors, ' colors')
new_img_label <- add_label_to_new_image(img=new_img, width=img_data$width, height=img_data$height, labels=labels, text=text)
out_filename <- out_png_filenames$post_label
imager::save.image(im=new_img_label, file=out_filename)
## Combines the original and new images and saves it
joint_horizontal_img <- imager::imappend(imlist=list(img_data$img, new_img), axis='x')
out_filename <- out_png_filenames$joint_horizontal
imager::save.image(im=joint_horizontal_img, file=out_filename)
joint_vertical_img <- imager::imappend(imlist=list(img_data$img, new_img), axis='y')
out_filename <- out_png_filenames$joint_vertical
imager::save.image(im=joint_vertical_img, file=out_filename)
joint_horizontal_img_label <- imager::imappend(imlist=list(img_data$img_label, new_img_label), axis='x')
out_filename <- out_png_filenames$joint_horizontal_label
imager::save.image(im=joint_horizontal_img_label, file=out_filename)
joint_vertical_img_label <- imager::imappend(imlist=list(img_data$img_label, new_img_label), axis='y')
out_filename <- out_png_filenames$joint_vertical_label
imager::save.image(im=joint_vertical_img_label, file=out_filename)
print(paste0('Joint images and save PNG file', color_model))
print(proc.time() - start_time)
if (show_images == TRUE) {
plot(new_img_label)
surface_colors <- c()
labels_3d <- list(x='', y='', z='')
if (color_model == g_color_rgb) {
surface_colors <- grDevices::rgb(img_reduced$color_map$c1, img_reduced$color_map$c2, img_reduced$color_map$c3)
labels_3d <- list(x='Red', y='Green', z='Blue')
} else if (color_model == g_color_hsv) {
surface_colors <- grDevices::hsv(img_reduced$color_map$c1 / 360.0, img_reduced$color_map$c2, img_reduced$color_map$c3)
labels_3d <- list(x='Hue', y='Saturation', z='Value')
} else if (color_model == g_color_yuv) {
surface_colors <- grDevices::rgb(pmax(0.0, pmin(1.0, (img_reduced$color_map$c1 + 1.402 * img_reduced$color_map$c3))),
pmax(0.0, pmin(1.0, (img_reduced$color_map$c1 - 0.244 * img_reduced$color_map$c2 - 0.714 * img_reduced$color_map$c3))),
pmax(0.0, pmin(1.0, (img_reduced$color_map$c1 + 1.772 * img_reduced$color_map$c2))))
labels_3d <- list(x='Y', y='U', z='V')
}
rgl::par3d(c(10, 10, 240, 240))
car::scatter3d(x=img_reduced$color_map$c1, y=img_reduced$color_map$c2, z=img_reduced$color_map$c3,
xlab=labels_3d$x, ylab=labels_3d$y, zlab=labels_3d$z,
axis.col=rep('black', 3), surface.col=surface_colors,
surface=FALSE, sphere.size=2, groups=factor(1:NROW(img_reduced$color_map)),
ellipsoid.alpha=0.03)
invisible(readline(prompt='Press [enter] to continue'))
}
list(new_img=new_img, new_img_label=new_img_label,
joint_horizontal_img=joint_horizontal_img, joint_horizontal_img_label=joint_horizontal_img_label,
joint_vertical_img=joint_vertical_img, joint_vertical_img_label=joint_vertical_img_label)
}
reduce_n_color_set <- function(original_filename, labels, n_color_set, color_model, show_images, out_dirname, no_reduced_images) {
img_data <- read_image(in_png_filename=original_filename, color_model=color_model, labels=labels)
images <- lapply(n_color_set, function(n_colors) {
file_basename <- paste0('color_', n_colors, '_', basename(original_filename)) %>%
stringr::str_replace_all('(.*?)\\.[^.]*$', paste0('\\1', '_', color_model))
joint_horizontal_prefix <- 'joint_horizontal'
joint_vertical_prefix <- 'joint_vertical'
no_label_suffix <- '.png'
label_suffix <- '_label.png'
out_png_filenames <- list(post=file.path(out_dirname, paste0(file_basename, no_label_suffix)),
post_label=file.path(out_dirname, paste0(file_basename, label_suffix)),
joint_horizontal=file.path(out_dirname, paste0(joint_horizontal_prefix, file_basename, no_label_suffix)),
joint_horizontal_label=file.path(out_dirname, paste0(joint_horizontal_prefix, file_basename, label_suffix)),
joint_vertical=file.path(out_dirname, paste0(joint_vertical_prefix, file_basename, no_label_suffix)),
joint_vertical_label=file.path(out_dirname, paste0(joint_vertical_prefix, file_basename, label_suffix)))
images <- reduce_colors(img_data=img_data, labels=labels, n_colors=n_colors, color_model=color_model,
show_images=show_images, out_png_filenames=out_png_filenames)
## Checks whether this script reduced the number of colors in the input image
verify_data <- read_image(in_png_filename=out_png_filenames$post, color_model=g_color_rgb, labels=NULL)
if (g_color_rgb == color_model) {
assertthat::assert_that(n_colors == verify_data$n_colors)
}
if (no_reduced_images == TRUE) {
NA
} else {
images
}
})
if (no_reduced_images == TRUE) {
list(img_data=img_data, images=NA)
} else {
list(img_data=img_data, images=images)
}
}
reduce_image_colors <- function(args, n_color_set) {
parser <- OptionParser()
parser <- optparse::add_option(parser, c('-i', '--input'), type='character', action='store',
dest='input_filename', default='incoming/input.png', help='Input original image file')
parser <- optparse::add_option(parser, c('-o', '--outdir'), type='character', action='store',
dest='out_dirname', default='out', help='Output directory')
## The text is drawn on copies of the input images to show its credit or byline.
parser <- optparse::add_option(parser, c('-t', '--text'), type='character', action='store',
dest='text', default='by whom', help='Text on the original image')
parser <- optparse::add_option(parser, c('-x', '--no_reduced_images'), action='store_true',
dest='no_reduced_images', default=FALSE, help='Do not keep generated images on memory')
parser <- optparse::add_option(parser, c('-s', '--show_images'), action='store_true',
dest='show_images', default=FALSE, help='Show interactive 2D and 3D images')
parser <- optparse::add_option(parser, c('-m', '--color_model'), type='character', action='store',
dest='color_model', default=g_color_rgb, help='Color model')
opts <- optparse::parse_args(parser, args=args)
original_filename <- opts$input_filename
out_dirname <- opts$out_dirname
text <- opts$text
if ((stringr::str_length(out_dirname) > 0) && (dir.exists(out_dirname) == FALSE)) {
if (dir.create(out_dirname) == FALSE) {
stop(paste0('Failed in creating ', out_dirname, '/'))
}
}
## Do not write to /
if (stringr::str_length(out_dirname) == 0) {
out_dirname <- '.'
}
## Adjust the width of characters
name_x <- -(12 + stringr::str_length(text) * 9)
labels <- list(original_x=-80, original_y=2,
name_x=name_x, name_y=-20, text=text,
colors_x=2, colors_y=2, color='white', opacity=1, fsize=6)
result <- reduce_n_color_set(original_filename=original_filename, labels=labels,
n_color_set=n_color_set, color_model=opts$color_model, show_images=opts$show_images,
out_dirname=out_dirname, no_reduced_images=opts$no_reduced_images)
list(original=result$img_data, images=result$images, color_model=opts$color_model, out_dirname=out_dirname)
}
get_args <- function() {
## Set args in Rstudio
if (exists('arg_set') == FALSE) {
arg_set <- c()
}
command_args <- commandArgs(trailingOnly=FALSE)
if ((NROW(command_args) > 0) && (command_args[1] != 'RStudio')) {
## Exclude --args
arg_set <- tail(commandArgs(trailingOnly=TRUE), -1)
}
print("This script takes args")
print(arg_set)
arg_set
}
write_8_level_images <- function() {
## n_color_set <- c(2, 4, 8, 16, 32, 64, 128, 256)
n_color_set <- c(256)
result <- reduce_image_colors(args=get_args(), n_color_set=n_color_set)
if (is.na(result$images)) {
return(NA)
}
color_model <- result$color_model
out_dirname <- result$out_dirname
## Do not write to /
if (stringr::str_length(out_dirname) == 0) {
out_dirname <- '.'
}
img_upper_1 <- imager::imappend(imlist=list(result$images[1][[1]]$new_img_label, result$images[2][[1]]$new_img_label), axis='x')
img_lower_1 <- imager::imappend(imlist=list(result$images[3][[1]]$new_img_label, result$images[4][[1]]$new_img_label), axis='x')
img_upper_2 <- imager::imappend(imlist=list(result$images[5][[1]]$new_img_label, result$images[6][[1]]$new_img_label), axis='x')
img_lower_2 <- imager::imappend(imlist=list(result$images[7][[1]]$new_img_label, result$images[8][[1]]$new_img_label), axis='x')
img_wide_1 <- imager::imappend(imlist=list(img_upper_1, img_lower_1), axis='x')
img_square_1 <- imager::imappend(imlist=list(img_upper_1, img_lower_1), axis='y')
img_wide_2 <- imager::imappend(imlist=list(img_upper_2, img_lower_2), axis='x')
img_square_2 <- imager::imappend(imlist=list(img_upper_2, img_lower_2), axis='y')
img_wide_12 <- imager::imappend(imlist=list(img_wide_1, img_wide_2), axis='x')
img_all <- imager::imappend(imlist=list(img_wide_12, result$original$img_label), axis='x')
make_png_filename <- function(filename) {
paste0(filename, color_model, '.png')
}
imager::save.image(im=img_wide_1, file=file.path(out_dirname, make_png_filename('img_wide_1')))
imager::save.image(im=img_wide_2, file=file.path(out_dirname, make_png_filename('img_wide_2')))
imager::save.image(im=img_square_1, file=file.path(out_dirname, make_png_filename('img_square_1')))
imager::save.image(im=img_square_2, file=file.path(out_dirname, make_png_filename('img_square_2')))
imager::save.image(im=img_wide_12, file=file.path(out_dirname, make_png_filename('img_wide_12')))
imager::save.image(im=img_all, file=file.path(out_dirname, make_png_filename('img_all')))
result
}
result <- write_8_level_images()
|
847e06ec0681f93b9dd841a0ab36b40c12552319
|
29792357241afdd2d527c5f17c990f78e5e3e69f
|
/plot2.R
|
010524dadc68df80fde886b449940ed826ea3b2a
|
[] |
no_license
|
wanggith/ExData_Plotting1
|
c535e2a68b1d0586a385208f26591de270d9113c
|
71e8be1492365750ad01c39f48579e986140002e
|
refs/heads/master
| 2021-01-15T05:45:47.488619
| 2016-01-10T22:09:11
| 2016-01-10T22:09:11
| 49,376,982
| 0
| 0
| null | 2016-01-10T17:20:40
| 2016-01-10T17:20:40
| null |
UTF-8
|
R
| false
| false
| 545
|
r
|
plot2.R
|
data <- read.table("./household_power_consumption.txt", header=TRUE, sep=";", colClasses=c("character", "character", rep("numeric",7)), na="?")
data_sub <- data[data$Date %in% c("1/2/2007","2/2/2007"), ]
datetime <- strptime(paste(data_sub$Date, data_sub$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
global_active_power <- data_sub$Global_active_power
png(filename="C:\Users\frankwang\ExData_Plotting1\plot2.png", width=480, height=480, units="px")
plot(datetime, global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
5510b5dc5ec737b3b6026faabc0242c2877b1dbe
|
98a0bd2de4836b813642df0faf5f0b5bd31f7617
|
/man/starInstallation.Rd
|
d364911e365219310240aaac940c640ade275ee7
|
[] |
no_license
|
inambioinfo/chimera
|
7bf3834f72464e546b83f52704354acbc9c329bc
|
17e0580ccd842a57f519fd968bc9df3d9ec29a0f
|
refs/heads/master
| 2021-06-25T06:56:13.520654
| 2017-04-24T19:50:57
| 2017-04-24T19:50:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 617
|
rd
|
starInstallation.Rd
|
\name{starInstallation}
\alias{starInstallation}
\title{A function to download STAR}
\description{A function allowing the download and installation of STAR (Dobin et al. Bioinformatics 2012) in chimera package folder. The function also creates soft links in the user bin folder to allow the call of the above mentioned program.
}
\usage{starInstallation(binDir, os=c("unix","mac"))}
\arguments{
\item{binDir}{The user bin folder}
\item{os}{The supported operating systems}
}
\author{Raffaele A Calogero}
\examples{
#starInstallation(binDir="/somewhere/inyourpc/bin", os="mac")
}
\keyword{utilities}
|
ed234de3fee0d3dfe8ed52a667c0f46c601c84c5
|
266efa63779ac8ad5dbada8943fe1d6de8cb0cfe
|
/Shiny.R
|
ede887ffad2609fc8fbe6bec5208f4323927eda8
|
[
"MIT"
] |
permissive
|
evocativebloom8/m14-shiny
|
1420d38a2621ae1d0e4e98f199ec7afa5adb2541
|
422888fd6d3939136e438d35a84688f022fb68dc
|
refs/heads/master
| 2020-07-27T19:51:06.644558
| 2016-11-16T04:20:00
| 2016-11-16T04:20:00
| 73,426,443
| 0
| 0
| null | 2016-11-10T22:33:54
| 2016-11-10T22:33:54
| null |
UTF-8
|
R
| false
| false
| 467
|
r
|
Shiny.R
|
library(shiny)
# Define UI for an application that has a title
shinyUI(
# Specify a fluidPage layout (most common)
fluidPage(
# Create a title in your fluidPage
titlePanel("Hello Shiny!")
)
)
# server.R
library(shiny)
shinyServer(function(input, output) {
# Create a histogram property of the output
output$histogram <- renderPlot({
# Use shiny's renderPlot function to render a plot
x <- rnorm(100)
return(hist(x))
})
})
|
51178beb0292e194eae1d7fa5b54e2ef22fcad2c
|
c6fca8eec6da3dc340ab6dd8b471d02c45acf022
|
/power_expansions.R
|
568c0c59329b4b5fe54d969467595e2dea991a3c
|
[] |
no_license
|
lhf28/Signals-from-the-past
|
7d40a78e440c10146a4bde49c8a54707eeebb430
|
3c848334c96f00b238e0a00c0de0b6cb15257393
|
refs/heads/main
| 2023-06-19T04:24:38.491875
| 2021-07-21T13:17:00
| 2021-07-21T13:17:00
| 368,926,139
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,413
|
r
|
power_expansions.R
|
########################################################### SETUP ##################################################################
setwd("~/Desktop/DissertationR")
library(coala)
activate_msms(jar = "~/Desktop/DissertationR/msms/lib/msms.jar", priority = 500, download = FALSE)
list_simulators()
########################################################### EXPANSION POWER DIFFERENCES ##################################################################
### NEUTRAL POWERS
load("Tajneutralpwr1.RData")
load("Tajneutralpwr2.RData")
load("nuc_divneutralpwr1.RData")
load("nuc_divneutralpwr2.RData")
### EXTREME
load("TajDExExpanpwr1.RData")
load("TajDExExpanpwr2.RData")
load("nuc_divExExpanpwr1.RData")
load("nuc_divExExpanpwr2.RData")
### MILD
load("TajDMExpanpwr1.RData")
load("TajDMExpanpwr2.RData")
load("nuc_divMExpanpwr1.RData")
load("nuc_divMExpanpwr2.RData")
### EXTREME EXPANSION POWERS
### s = 0.04
load("Tajneutralpwr1.RData")
coeff1taj <- c(FPRs2[[1]][1],FPRs2[[2]][1],FPRs2[[3]][1],FPRs2[[4]][1],FPRs2[[5]][1],FPRs2[[6]][1])
load("nuc_divneutralpwr1.RData")
coeff1nucdiv <- c(FPRs6[[1]][1],FPRs6[[2]][1],FPRs6[[3]][1],FPRs6[[4]][1],FPRs6[[5]][1],FPRs6[[6]][1])
load("TajDExExpanpwr1.RData")
coeff2taj <- c(FPRs1[[1]][1],FPRs1[[2]][1],FPRs1[[3]][1],FPRs1[[4]][1],FPRs1[[5]][1],FPRs1[[6]][1])
load("nuc_divExExpanpwr1.RData")
coeff2nucdiv <- c(FPRs1[[1]][1],FPRs1[[2]][1],FPRs1[[3]][1],FPRs1[[4]][1],FPRs1[[5]][1],FPRs1[[6]][1])
plot(coeff1taj, type = "o", lty = 2, lwd = 2, col = "pink", ylim = c(0.0:1.0), axes = FALSE, ann = FALSE)
axis(1, at=1:6, lab = c("5 kya", "10 kya", "20 kya", "50 kya", "100 kya", "200 kya"))
axis(2, tick = , las = 1, at = c(0.0,0.2,0.4,0.6,0.8,1.0))
box()
lines(coeff2taj, type = "o", pch = 21, lty = "solid", lwd = 2, col = "pink")
lines(coeff1nucdiv, type = "o", pch = 21, lty = 2, lwd = 2, col = "lightblue")
lines(coeff2nucdiv, type = "o", pch = 21, lty = "solid", lwd = 2, col = "lightblue")
title(main = "s = 0.04")
title(xlab = "Time interval")
title(ylab = "Power")
legend("bottomright", cex = 0.9, c("Neutral Tajima's D", "Neutral nucleotide diversity (π)","Extreme expansion Tajima's D", "Extreme expansion nucleotide diversity (π)"), col = c("pink", "lightblue", "pink", "lightblue"), pch = 21, lty = c(2,2,1,1))
### s = 0.06
load("Tajneutralpwr2.RData")
coeff3taj <- c(FPRs3[[1]][1],FPRs3[[2]][1],FPRs3[[3]][1],FPRs3[[4]][1],FPRs3[[5]][1],FPRs3[[6]][1])
load("nuc_divneutralpwr2.RData")
coeff3nucdiv <- c(FPRs7[[1]][1],FPRs7[[2]][1],FPRs7[[3]][1],FPRs7[[4]][1],FPRs7[[5]][1],FPRs7[[6]][1])
load("TajDExExpanpwr2.RData")
coeff4taj <- c(FPRs2[[1]][1],FPRs2[[2]][1],FPRs2[[3]][1],FPRs2[[4]][1],FPRs2[[5]][1],FPRs2[[6]][1])
load("nuc_divExExpanpwr2.RData")
coeff4nucdiv <- c(FPRs2[[1]][1],FPRs2[[2]][1],FPRs2[[3]][1],FPRs2[[4]][1],FPRs2[[5]][1],FPRs2[[6]][1])
plot(coeff3taj, type = "o", lty = 2, lwd = 2, col = "pink", ylim = c(0.0:1.0), axes = FALSE, ann = FALSE)
axis(1, at=1:6, lab = c("5 kya", "10 kya", "20 kya", "50 kya", "100 kya", "200 kya"))
axis(2, tick = , las = 1, at = c(0.0,0.2,0.4,0.6,0.8,1.0))
box()
lines(coeff4taj, type = "o", pch = 21, lty = "solid", lwd = 2, col = "pink")
lines(coeff3nucdiv, type = "o", pch = 21, lty = 2, lwd = 2, col = "lightblue")
lines(coeff4nucdiv, type = "o", pch = 21, lty = "solid", lwd = 2, col = "lightblue")
title(main = "s = 0.06")
title(xlab = "Time interval")
title(ylab = "Power")
legend("bottomright", cex = 0.9, c("Neutral Tajima's D", "Neutral nucleotide diversity (π)","Extreme expansion Tajima's D", "Extreme expansion nucleotide diversity (π)"), col = c("pink", "lightblue", "pink", "lightblue"), pch = 21, lty = c(2,2,1,1))
### MILD BOTTLENECK POWERS
### s = 0.04
load("Tajneutralpwr1.RData")
coeff1taj <- c(FPRs2[[1]][1],FPRs2[[2]][1],FPRs2[[3]][1],FPRs2[[4]][1],FPRs2[[5]][1],FPRs2[[6]][1])
load("nuc_divneutralpwr1.RData")
coeff1nucdiv <- c(FPRs6[[1]][1],FPRs6[[2]][1],FPRs6[[3]][1],FPRs6[[4]][1],FPRs6[[5]][1],FPRs6[[6]][1])
load("TajDMExpanpwr1.RData")
coeff2taj <- c(FPRs3[[1]][1],FPRs3[[2]][1],FPRs3[[3]][1],FPRs3[[4]][1],FPRs3[[5]][1],FPRs3[[6]][1])
load("nuc_divMExpanpwr1.RData")
coeff2nucdiv <- c(FPRs3[[1]][1],FPRs3[[2]][1],FPRs3[[3]][1],FPRs3[[4]][1],FPRs3[[5]][1],FPRs3[[6]][1])
plot(coeff1taj, type = "o", lty = 2, lwd = 2, col = "pink", ylim = c(0.0:1.0), axes = FALSE, ann = FALSE)
axis(1, at=1:6, lab = c("5 kya", "10 kya", "20 kya", "50 kya", "100 kya", "200 kya"))
axis(2, tick = , las = 1, at = c(0.0,0.2,0.4,0.6,0.8,1.0))
box()
lines(coeff2taj, type = "o", pch = 21, lty = "solid", lwd = 2, col = "pink")
lines(coeff1nucdiv, type = "o", pch = 21, lty = 2, lwd = 2, col = "lightblue")
lines(coeff2nucdiv, type = "o", pch = 21, lty = "solid", lwd = 2, col = "lightblue")
title(main = "s = 0.04")
title(xlab = "Time interval")
title(ylab = "Power")
legend("bottomright", cex = 0.9, c("Neutral Tajima's D", "Neutral nucleotide diversity (π)","Extreme expansion Tajima's D", "Extreme expansion nucleotide diversity (π)"), col = c("pink", "lightblue", "pink", "lightblue"), pch = 21, lty = c(2,2,1,1))
### s = 0.06
load("Tajneutralpwr2.RData")
coeff3taj <- c(FPRs3[[1]][1],FPRs3[[2]][1],FPRs3[[3]][1],FPRs3[[4]][1],FPRs3[[5]][1],FPRs3[[6]][1])
load("nuc_divneutralpwr2.RData")
coeff3nucdiv <- c(FPRs7[[1]][1],FPRs7[[2]][1],FPRs7[[3]][1],FPRs7[[4]][1],FPRs7[[5]][1],FPRs7[[6]][1])
load("TajDMExpanpwr2.RData")
coeff4taj <- c(FPRs4[[1]][1],FPRs4[[2]][1],FPRs4[[3]][1],FPRs4[[4]][1],FPRs4[[5]][1],FPRs4[[6]][1])
load("nuc_divMExpanpwr2.RData")
coeff4nucdiv <- c(FPRs4[[1]][1],FPRs4[[2]][1],FPRs4[[3]][1],FPRs4[[4]][1],FPRs4[[5]][1],FPRs4[[6]][1])
plot(coeff3taj, type = "o", lty = 2, lwd = 2, col = "pink", ylim = c(0.0:1.0), axes = FALSE, ann = FALSE)
axis(1, at=1:6, lab = c("5 kya", "10 kya", "20 kya", "50 kya", "100 kya", "200 kya"))
axis(2, tick = , las = 1, at = c(0.0,0.2,0.4,0.6,0.8,1.0))
box()
lines(coeff4taj, type = "o", pch = 21, lty = "solid", lwd = 2, col = "pink")
lines(coeff3nucdiv, type = "o", pch = 21, lty = 2, lwd = 2, col = "lightblue")
lines(coeff4nucdiv, type = "o", pch = 21, lty = "solid", lwd = 2, col = "lightblue")
title(main = "s = 0.06")
title(xlab = "Time interval")
title(ylab = "Power")
legend("bottomright", cex = 0.9, c("Neutral Tajima's D", "Neutral nucleotide diversity (π)","Extreme expansion Tajima's D", "Extreme expansion nucleotide diversity (π)"), col = c("pink", "lightblue", "pink", "lightblue"), pch = 21, lty = c(2,2,1,1))
|
d465eaadea2b47178e940a48f94f174e52104f69
|
c8b9dd433edc3ae10dfea91ec7e3c50123085ca5
|
/app.R
|
80514324fb3f46c46adf0cda06f1a019abfa50b9
|
[] |
no_license
|
brunocosta88/EvolucaoBandaLargaBrasil_2007-2020
|
06e21b7d2ff38f324cf967001878b928d8bce317
|
f5793967383e84af0db2b78c0983ca717f19a170
|
refs/heads/master
| 2023-01-22T04:47:03.667380
| 2020-12-03T23:02:02
| 2020-12-03T23:02:02
| 318,315,411
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,248
|
r
|
app.R
|
# More info:
# https://github.com/jcheng5/googleCharts
# Install:
# devtools::install_github("jcheng5/googleCharts")
library(googleCharts)
library(shiny)
library(dplyr)
url <- "https://github.com/brunocosta88/EvolucaoBandaLargaBrasil_2007-2020/raw/master/acessosBandaLarga.rds"
destfile <- "./temp.rds"
download.file(url,destfile)
acessos <- readRDS("temp.rds")
acessos$area <- as.factor(acessos$area)
acessos$Regiao <- as.factor(acessos$Regiao)
acessos$UF <- as.factor(acessos$UF)
acessos$Ano <- as.numeric(acessos$Ano)
#View(acessos)
#print(unique(acessos$Faixa.de.Velocidade))
# Use global max/min for axes so the view window stays
# constant as the user moves between years
xlim2 <- list(min = 0.55,
max = 0.86)
ylim2 <- list(min = min(acessos$Densidade),
max = max(200))
ylim <- list(min = min(acessos$Densidade),
max = 200)
xlim <- list(min = min(log(acessos$Acessos)),
max = 25)
tecnologias = c("Cable Modem","xDSL","HFC","Fibra", "Outras")
faixas <- c("Baixa (ate 2Mbps)", "Media (2Mbps a 34Mbps)", "Alta (>34Mbps)")
ui <-
navbarPage(
"Projeto VISemap2020",
tabPanel("Por Estado",
fluidPage(
titlePanel(
"Relacao entre Densidade de Acessos de Banda Larga e IDHM dos Municipios Brasileiros"
),
sidebarLayout(
sidebarPanel(
checkboxGroupInput(
"reg",
"Selecione a Regiao",
choices = unique(acessos$Regiao),
selected = unique(acessos$Regiao)
),
checkboxGroupInput(
"area",
"Selecione a Area",
choices = unique(acessos$area),
selected = unique(acessos$area)
),
uiOutput("lista_UF")
),
mainPanel(
# This line loads the Google Charts JS library
googleChartsInit(),
# Use the Google webfont "Source Sans Pro"
tags$link(
href = paste0(
"http://fonts.googleapis.com/css?",
"family=Source+Sans+Pro:300,600,300italic"
),
rel = "stylesheet",
type = "text/css"
),
tags$style(type = "text/css",
"body {font-family: 'Source Sans Pro'}"),
#h2("Google Charts demo"),
h2("Densidade de Acessos Telecom Brasil"),
googleBubbleChart(
"chart",
width = "100%",
height = "475px",
# Set the default options for this chart; they can be
# overridden in server.R on a per-update basis. See
# https://developers.google.com/chart/interactive/docs/gallery/bubblechart
# for option documentation.
options = list(
fontName = "Source Sans Pro",
fontSize = 13,
# Set axis labels and ranges
hAxis = list(title = "IDHM",
viewWindow = xlim2),
vAxis = list(title = "Densidade de Acessos Banda Larga",
viewWindow = ylim2),
sizeAxis = list(maxValue = max(acessos$Populacao),
minValue = min(acessos$Populacao)),
# The default padding is a little too spaced out
chartArea = list(
top = 50,
left = 75,
height = "75%",
width = "75%"
),
# Allow pan/zoom
explorer = list(),
# Set bubble visual props
bubble = list(
opacity = 0.4,
stroke = "none",
# Hide bubble label
textStyle = list(color = "none")
),
# Set fonts
titleTextStyle = list(fontSize = 16),
tooltip = list(textStyle = list(fontSize = 12))
)
),
fluidRow(shiny::column(
4,
offset = 4,
sliderInput(
"ano",
"Ano",
min = 2007,
max = 2020,
value = min(acessos$Ano),
animate = TRUE
)
))
)
)
)),
tabPanel("Por Tecnologia",
fluidPage(
titlePanel(
"Relacao entre Densidade de Acessos de Banda Larga e IDHM dos Municipios Brasileiros"
),
sidebarLayout(
sidebarPanel(
checkboxGroupInput(
"reg2",
"Selecione a Regiao",
choices = unique(acessos$Regiao),
selected = unique(acessos$Regiao)
),
checkboxGroupInput(
"area2",
"Selecione a Area",
choices = unique(acessos$area),
selected = unique(acessos$area)
),
checkboxGroupInput(
"faixa",
"Selecione a Faixa de Velocidade",
choices = faixas,
selected = faixas
),
checkboxGroupInput(
"tecn",
"Selecione a Tecnologia",
choices = tecnologias,
selected = tecnologias
)
),
mainPanel(
# This line loads the Google Charts JS library
googleBubbleChart(
"chart2",
width = "100%",
height = "475px",
# Set the default options for this chart; they can be
# overridden in server.R on a per-update basis. See
# https://developers.google.com/chart/interactive/docs/gallery/bubblechart
# for option documentation.
options = list(
fontName = "Source Sans Pro",
fontSize = 13,
# Set axis labels and ranges
hAxis = list(title = "Log Acessos de Banda Larga",
viewWindow = xlim),
vAxis = list(title = "Densidade de Acessos de Banda Larga",
viewWindow = ylim),
sizeAxis = list(maxValue = max(acessos$Populacao),
minValue = min(acessos$Populacao)),
# The default padding is a little too spaced out
chartArea = list(
top = 50,
left = 75,
height = "75%",
width = "75%"
),
# Allow pan/zoom
explorer = list(),
# Set bubble visual props
bubble = list(
opacity = 0.4,
stroke = "none",
# Hide bubble label
textStyle = list(color = "none")
),
# Set fonts
titleTextStyle = list(fontSize = 16),
tooltip = list(textStyle = list(fontSize = 12))
)
),
fluidRow(shiny::column(
4,
offset = 4,
sliderInput(
"ano2",
"Ano",
min = 2007,
max = 2020,
value = min(acessos$Ano),
animate = TRUE
)
))
)
)
)),
tabPanel("Sobre",
fluidPage(
titlePanel(
"Projeto de VisEmap2020, Bruno M. Costa"
),
p("Projeto do curso de Visualizacao da Informacao, da Escola de Matematica Aplicada da FGV no segundo semestre de 2020"),
p("Aluno: Bruno Martins Costa, email:",a("brunocosta88@gmail.com")),
p("Professor(a): Asla Medeiros e Sa")
)
)
)
server <- function(input, output, session) {
library(googleCharts)
library(shiny)
library(dplyr)
url <- "https://github.com/brunocosta88/EvolucaoBandaLargaBrasil_2007-2020/raw/master/acessosBandaLarga.rds"
destfile <- "./temp.rds"
download.file(url,destfile)
acessos <- readRDS("temp.rds")
acessos$area <- as.factor(acessos$area)
acessos$Regiao <- as.factor(acessos$Regiao)
acessos$UF <- as.factor(acessos$UF)
acessos$Ano <- as.numeric(acessos$Ano)
tecnologias = c("Cable Modem","xDSL","HFC","Fibra", "Outras")
faixas <- c("Baixa (ate 2Mbps)", "Media (2Mbps a 34Mbps)", "Alta (>34Mbps)")
# Provide explicit colors for regions, so they don't get recoded when the
# different series happen to be ordered differently from year to year.
# http://andrewgelman.com/2014/09/11/mysterious-shiny-things/
defaultColors <-
c("#3366cc",
"#dc3912",
"#ff9900",
"#109618",
"#990099",
"#0099c6",
"#dd4477")
series <-
structure(lapply(defaultColors, function(color) {
list(color = color)
}),
names = levels(acessos$Regiao))
AnoData <- reactive({
# Filter to the desired year, and put the columns
# in the order that Google's Bubble Chart expects
# them (name, x, y, color, size). Also sort by region
# so that Google Charts orders and colors the regions
# consistently.
df2 <- acessos %>%
filter(Ano == input$ano,
UF %in% input$estados,
area %in% input$area) %>%
group_by(Municipio, IDHM, Densidade , Regiao, Populacao) %>%
summarise(Acessos = sum(Acessos)) %>%
select(Municipio, IDHM, Densidade, Regiao, Populacao) %>%
arrange(Regiao)
#View(df2)
df2
})
output$chart <- reactive({
# Return the data and options
list(
data = googleDataTable(AnoData()),
options = list(
title = sprintf("IDHM versus Densidade, %s",
input$ano),
series = series
)
)
})
output$chart2 <- reactive({
# Return the data and options
list(
data = googleDataTable(TecData()),
options = list(
title = sprintf("Densidade de Acessos versus Acessos Telecom, %s",
input$ano2),
series = series
)
)
})
TecData <- reactive({
# Filter to the desired year, and put the columns
# in the order that Google's Bubble Chart expects
# them (name, x, y, color, size). Also sort by region
# so that Google Charts orders and colors the regions
# consistently.
if ( "Outras" %in% input$tecn ){
tecn <- c(unique(acessos$Tecnologia[!(acessos$Tecnologia %in% tecnologias)]), input$tecn)
#print(tecn)
}else{
tecn <- input$tecn
#print(tecn)
}
if (input$faixa == "Baixa (ate 2Mbps)"){
faixa.sel <- unique(acessos$Faixa.de.Velocidade)[c(1,2,4,5)]
}else if(input$faixa == "Media (2Mbps a 34Mbps)"){
faixa.sel <- unique(acessos$Faixa.de.Velocidade)[c(3,6,7)]
}else{
faixa.sel <- unique(acessos$Faixa.de.Velocidade)[8]
}
df <- acessos %>%
filter(
Ano == input$ano2,
Faixa.de.Velocidade %in% faixa.sel,
Tecnologia %in% tecn,
area %in% input$area2,
Regiao %in% input$reg2
) %>%
group_by(Municipio, Densidade, Regiao, Populacao) %>%
#mutate(log.Acessos = log(sum(Acessos))) %>%
summarise(Acessos = sum(Acessos)) %>%
mutate(log.Acessos= log(Acessos) ) %>%
select(Municipio, log.Acessos, Densidade, Regiao, Populacao) %>%
arrange(Regiao)
#View(df)
df
})
output$lista_UF <- renderUI({
data_sub <- subset(acessos, Regiao == input$reg)
ufs <- unique(data_sub$UF)
checkboxGroupInput(
"estados",
"Selecione os Estados",
choices = unique(acessos$UF),
selected = ufs
)
})
}
shinyApp(ui = ui, server = server)
|
5b6e2cfb78d049c7eca07f0ca13c10b5bf93c580
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googleadmindirectoryv1.auto/man/directory.customers.update.Rd
|
86af9ff4d0a6f1d986d441a5f5ac3277835b8911
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,016
|
rd
|
directory.customers.update.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/admin_functions.R
\name{directory.customers.update}
\alias{directory.customers.update}
\title{Updates a customer.}
\usage{
directory.customers.update(Customer, customerKey)
}
\arguments{
\item{Customer}{The \link{Customer} object to pass to this method}
\item{customerKey}{Id of the customer to be updated}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/admin.directory.customer
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/admin.directory.customer)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/admin-sdk/directory/}{Google Documentation}
Other Customer functions: \code{\link{Customer}},
\code{\link{directory.customers.patch}}
}
|
13188b7c97b45b22bae21e306799b05e38d479ea
|
7a7d01f06ec8ddf5dabab431ecef063955353ae1
|
/misc/future_topics.R
|
7bc05c6a67dedec470244871b16d7a04674dff87
|
[] |
no_license
|
rsoren/r_training_beira2017
|
314c190787821942e53e380b4d5521196f3eaa8b
|
6e51e32c2ac3e07e24afcbcec5821a46a960b3ac
|
refs/heads/master
| 2021-01-01T04:16:17.877155
| 2017-07-28T11:48:36
| 2017-07-28T11:48:36
| 97,157,463
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,148
|
r
|
future_topics.R
|
#
# future_topics.R
#
# Reed Sorensen
# July 2017
#
# LISTS
# list, as.list
# str
# index by [], [[]] and $
# -- Challenge:
# 1. Create a list called 'my_list' that contains
# an element with 1, 2, 3 and another element with "a", "b", "c"
# 2. Have R give you the contents of the second element
# The output should look like: "a" "b" "C"
#
# DATA FRAMES
# data.frame, head, tail
# index data frames with $ (like a list) and [] (like a matrix)
# -- for creating new variables
# subset rows and columns with the subset() function
# 'dplyr' package -- more on this later
# SUMMARIZING DATA
# str, class
# table, summary, mean, sd
# READING, WRITING, and CHANGING FILES
# write.csv, read.csv
# saveRDS, readRDS
# 'foreign' package for other file types
# -- example with Access database
# help(files)
# OTHERS
# lm, plot, abline,
# INSPECTING THE OBJECTS and the ENVIRONMENT
# ls, getwd, setwd, list.files
#####
# CHALLENGES
#-- Insert the number 168 between 12 and 13
# using ONLY operations on 'x' (don't just rewrite the whole vector)
x <- c(88, 5, 12, 13)
|
919dcd6cd94da91f8d0b0e27f683a5e100727f7e
|
fcdbdb4dcbde7d0c37cca433de2ea317214ea6ae
|
/man/ZIPPCApn.Rd
|
a6cc2cd4c219638e0158b162c462832e580ec8dc
|
[] |
no_license
|
YanyZeng/mbDenoise
|
c0566ca038ef2ad6eda8948e3f50370d3934b614
|
3200490f601b2b24898914d90fdbea503780d818
|
refs/heads/main
| 2022-07-27T13:55:55.308756
| 2021-09-26T10:47:25
| 2021-09-26T10:47:25
| 380,777,633
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,963
|
rd
|
ZIPPCApn.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ZIPPCApn.R
\name{ZIPPCApn}
\alias{ZIPPCApn}
\title{ZIPPCApn}
\usage{
ZIPPCApn(
X,
V = NULL,
family = "negative.binomial",
n.factors = 2,
rank = FALSE,
trace = FALSE,
maxit = 100,
parallel = TRUE
)
}
\arguments{
\item{X}{matrix of observations.}
\item{V}{vector of the sample covariate.}
\item{family}{distribution of models. Two options are "poisson" and "negative.binomial". Defaults to "negative.binomial".}
\item{n.factors}{the rank or number of factors, after dimensional reduction. Defaults to 2.}
\item{rank}{logical, if TRUE, the rank or number of factors, is chosen from 1 to 5 by HIC (hybrid information criterion). Defaults to FALSE.}
\item{trace}{logical, defaults to \code{FALSE}. if \code{TRUE} each current iteration step information will be printed.}
\item{maxit}{maximum number of iterations within \code{optim} and \code{constrOptim} function, defaults to 100.}
\item{parallel}{logical, if TRUE, use parallel toolbox to accelerate.}
}
\value{
\item{VLB }{ variational lower bound of log likelihood}
\item{lvs}{list of latent variables
\itemize{
\item{pi }{ the probabilities of excess zeros}
\item{factor_scores }{ coordinates or factor scores in low-dimensional subspace}
\item{factor_scores2 }{ coordinates or factor scores in low-dimensional subspace with defalt rank 2, which is suitable for visualization.}
}}
\item{params}{list of model parameters
\itemize{
\item{factor_coefs_j }{ coefficients of latent variables fator scores or factor loadings}
\item{factor_coefs_0 }{ taxon-specific intercepts}
\item{alpha }{ sample-specifc coeffcient that adjusts for the sequencing depth}
\item{dispersion }{ taxon-specific over-dispersion parameter for negative binomial distribution}
\item{gamma }{ coeffcients of sample covariate}
\item{tuo }{ taxon-specific parameter of zero-inflation probability}
\item{c }{ sample-specific parameter of zero-inflation probability}
}}
\item{Q }{ the underlying composition of microbiome data}
\item{muz }{ the denoised counts of microbiome data}
\item{hic}{ the number of the rank selection, chosen by HIC type information criterion}
}
\description{
Microbiome data denoising framework (mbDenoise) with zero-inflated probabilistic PCA with Poisson (ZIPPCA-Poi) and negative-binomial model (ZIPPCA-NB),
which can be used for downstream statistical analysis including ordination, compositional
normalization, differential abundance analysis, etc. mbDenoise with ZIPPCA-NB model is recommended for empirical data analysis.
}
\examples{
n.n = 60
n.w = 100
n.factors = 2
set.seed(1)
si <- diag(n.factors)
me <- c(0,0)
f <- matrix(0,nrow = n.n, ncol = n.factors)
for(i in 1:n.n){
f[i,] <- rnorm(n.factors, mean = 0, sd = 1)
}
betaj <- matrix(0,nrow = n.w, ncol = n.factors)
for(j in 1:n.w){
betaj[j,] <- runif(n.factors,-3,3)
}
alpha <- runif(n.n,-5,5)
beta0 <- rep(0,n.w)
g <- rep(0,n.w*0.5*0.5)
gamma <- c(g,-g,rep(0,(n.w-n.w*0.5)))
X_cov<- c(rep(1,n.n/2),rep(0,n.n/2))
ll <- f \%*\% t(betaj) +matrix(alpha,n.n,n.w)+matrix(beta0,n.n,n.w,byrow=TRUE)
exp_mat <- exp(ll)
eta_mat <- matrix(0.25,n.n,n.w,byrow=TRUE)
z <- matrix(0,n.n,n.w,byrow = TRUE)
for(i in 1:n.n){
z[i,] <- rbinom(n.w, size=1, prob=eta_mat[i,])
}
sum <- rowSums((1-z)*exp_mat)
Qn_z <- (1-z)*exp_mat/sum
sum <- rowSums(exp_mat)
Qn <- exp_mat/sum
X <- matrix(0,n.n,n.w,byrow = TRUE)
for(i in 1:n.n){
for(j in 1:n.w){
X[i,j] <- rnbinom(n=1,size=10,mu=exp_mat[i,j])
}
}
X[z==1]=0
zerorow <- which(rowSums(X)==0)
if(length(zerorow) >0 ){
X <- X[-zerorow,];X_cov<-X_cov[-zerorow];f <- f[-zerorow,];
Qn <- Qn[-zerorow,];Qn_z <- Qn_z[-zerorow,];
}
zerocol <- which(colSums(X)==0)
if(length(zerocol) >0 ){
X <- X[,-zerocol];betaj <- t(t(betaj)[,-zerocol]);
Qn <- Qn[,-zerocol];Qn_z <- Qn_z[,-zerocol];
}
re_zinb_cov <- ZIPPCApn(X,X_cov)
re_zinb <- ZIPPCApn(X)
}
|
b2f3e9f83cb0c9423cc0356c7e9200740a1cb081
|
39f8eec5c6c7210208675f48835d0a2254947fd5
|
/man/MDScols.Rd
|
4a7d725fee32b17c6e543cf6d36cd9532603abc7
|
[
"MIT"
] |
permissive
|
peterjuv/myHelpers
|
5b4586433ae729f8e8b816b00b74841cbeb26d8e
|
cf976325abad166158428c89b4521149440580df
|
refs/heads/master
| 2023-05-23T04:08:40.473975
| 2021-06-10T08:04:52
| 2021-06-10T08:04:52
| 292,005,498
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,353
|
rd
|
MDScols.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/myHelpers.R
\name{MDScols}
\alias{MDScols}
\alias{MASS_MDScols}
\alias{plotIsoMDS}
\title{MDS by columns and optional plot using Minkowski metrics}
\usage{
MDScols(
data,
scale = FALSE,
center = FALSE,
FUN = "isoMDS",
p = 2,
selection = "pairwise",
top = 500,
k = 2,
maxit = 50,
trace = TRUE,
tol = 1e-04,
plot = FALSE,
labels = names(data),
col = NULL,
cex = 1,
main = NULL,
cex.main = 1,
xlab = "Coordinate 1",
ylab = "Coordinate 2",
...
)
MASS_MDScols(
data,
scale = FALSE,
center = FALSE,
method = "euclidean",
FUN = "isoMDS",
p = 2,
k = 2,
maxit = 50,
trace = TRUE,
tol = 0.001,
plot = FALSE,
labels = names(data),
col = NULL,
cex = 1,
main = NULL,
cex.main = 1,
xlab = "Coordinate 1",
ylab = "Coordinate 2",
...
)
plotIsoMDS(FUN = "isoMDS", plot = TRUE, selection = NULL, ...)
}
\arguments{
\item{data}{Matrix-like object to MDS (and plot) distances between columns}
\item{scale}{Logical scale data; standardize together with center}
\item{center}{Logical center data; standardize together with scale}
\item{FUN}{MDS function from MASS, default "isoMDS", alternative "sammon"}
\item{p}{Power of the Minkowski distance, passed to distance calculation \code{dist(...)} and \code{isoMDS(...)}}
\item{selection}{Character "pairwise" or "common" for selection of rows or NULL for using all rows; default "pairwise"}
\item{top}{Integer number of rows for distance calculation, default 500}
\item{k}{Desired dimension for the solution, passed to \code{cmdscale(...)} through \code{FUN}}
\item{maxit}{Max number of iterations, passed to \code{isoMDS(maxit)} or \code{sammon(niter = maxit)},}
\item{trace}{Print trace, passed to \code{FUN()},}
\item{tol}{Tolerance, passed to \code{FUN()},}
\item{plot}{Logical, plot using R, default FALSE}
\item{labels}{Character vector of alternative column names, default \code{names(data)}}
\item{col}{Colors of labels}
\item{cex}{Size of labels}
\item{main}{String or TRUE to generate title generated automatically; default NULL}
\item{cex.main}{Size of title}
\item{xlab}{a label for the x axis, defaults to a description of \code{x}.}
\item{ylab}{a label for the y axis, defaults to a description of \code{y}.}
\item{...}{other \link[graphics]{graphical parameters} (see \code{\link[graphics]{par}} and
section \sQuote{Details} below).}
\item{method}{Distance metrics, passed to \code{dist(...)}, must be one of "euclidean", "maximum", "manhattan", "canberra", "binary" or "minkowski"}
}
\value{
A k-column vector of the fitted configuration from \code{FUN()}
A k-column vector of the fitted configuration from \code{FUN()}
}
\description{
Parametric (\code{stats::cmdscale()}) and non-parametric MDS (\code{MASS::isoMDS()} or \code{MASS::sammon()})
Data may be scales and/or centered before distance calculation.
Distances are calculated between columns on a (possibly subset) of rows.
Similar to limma::plotMDS in terms of subseting rows, but it allows for all rows for distance calculation, while limma uses only top=XX genes
Uses MASS::isoMDS or MASS::sammon
Similar to limma::plotMDS, except that is uses all parameters for distance calculation,
while limma uses only top=XX genes
}
\section{TODO}{
add parameter dim.plot
}
|
fa90387fab4d7c1b105062f029256473b0c5dfa7
|
da5d0b3125b53246b84ef8f6250db0f932d74eb8
|
/DataScience/Rscripts/d0315_Ex.R
|
0d972bfea1da41263ee5f0b92cf1300c4812b3a4
|
[] |
no_license
|
HyeonGyuChi/2019_1st_Semester
|
fd373496a4970fdb5566fb33f48b28facad9a377
|
4d7bcf575634088110ade43951a49752ca3475f6
|
refs/heads/master
| 2020-05-07T17:36:32.285358
| 2020-03-19T13:33:28
| 2020-03-19T13:33:28
| 180,732,477
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,376
|
r
|
d0315_Ex.R
|
#연습문제(2) == 실습 #2.배열연습
#20155342 지현규
# 3) @@@ 연습문제(2) 해결 @@@
#1) 짝수,홀수처리
# 1-10사이의 홀수 짝수벡터 생성
(odd = seq(1,10,2))
(even = seq(2,10,2))
# odd와 even벡터 결합
(total = c(odd, even) )
# sorting
(stotal = sort(total))
# stotal 벡터에서 짝수의 제외
(newEven = setdiff(stotal, even))
#2) 5명의 BMI처리
height = c(1.6,1.7,1.8,1.76,1.85)
weight = c(67,78,46,78,50)
print("몸무게")
mean(weight) #평균
sd(weight) #표준편차
max(weight) #최대
min(weight) #최소
median(weight) #중앙값
print("키")
mean(height) #평균
sd(height) #표준편차
max(height) #최대
min(height) #최소
median(height) #중앙값
(BMI = weight / (height^2)) #BMI 계산
#BMI 23 이상 == 정상
(BMI > 23)
#@@@ Report #2. 행렬연습 == 연습문제 (3) @@@
BMI = matrix(data = NA,nrow = 5, ncol = 2)
names = c("hong", "kim", "park", "jin", "han")
inform = c("height", "weight")
# 행렬의 이름 설정 (행:성명,열:키,몸무게)
rownames(BMI) = names
colnames(BMI) = inform
# 행렬에 데이터 입력
BMI[,1] = c(1.6,1.7,1.8,1.76,1.85)
BMI[,2] = c(67,78,46,78,50)
#키와 몸무게 평균,표주편차,최대,최소,중앙값 계산
(avg = apply(BMI, 2, mean))
(sd = apply(BMI, 2, sd))
(max = apply(BMI, 2, max))
(min = apply(BMI, 2, min))
(median = apply(BMI, 2, median))
|
8c0fd7e7b9cb42e5f9792bdd8267bf25eb885fe9
|
6c6334d3d716da34aae8079f7f673c2324ddf480
|
/tests/testthat/test-function-info_to_text.R
|
31608c553f3c92ee07c750bc56ccadb594a55108
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.code
|
94f80f51b2977cd0c0fda094f3c7796e1cea95cf
|
bc81324403e3881124fa2230c023807eba26e32d
|
refs/heads/master
| 2023-08-17T07:40:18.766253
| 2023-07-15T05:50:50
| 2023-07-15T05:50:50
| 140,209,624
| 0
| 0
|
MIT
| 2023-08-06T22:33:32
| 2018-07-08T23:23:47
|
R
|
UTF-8
|
R
| false
| false
| 394
|
r
|
test-function-info_to_text.R
|
#
# This test file has been generated by kwb.test::create_test_files()
# launched by user hauke on 2021-11-27 17:51:46.
# Your are strongly encouraged to modify the dummy functions
# so that real cases are tested. You should then delete this comment.
#
test_that("info_to_text() works", {
expect_error(
kwb.code:::info_to_text()
# Argument "info" fehlt (ohne Standardwert)
)
})
|
5d203b1bc49ceabda8cf9f58bf67eea5cdaaf47c
|
e8d3a7368b14680795236b836b97b63488b68544
|
/terraform/provisioners/libraries.R
|
a8ce3df7e34bf6ac8b863de45355d6c7ecb18ee2
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
Quantanalyst/insight_newvision_ci_pipeline
|
de1bf4f12ef9f155008470751cb334765eedb0c1
|
92b4adddfe62c4c37cc4e30bd695c67b835dbebc
|
refs/heads/master
| 2022-11-19T22:02:17.882518
| 2020-07-06T20:54:06
| 2020-07-06T20:54:06
| 268,702,076
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 443
|
r
|
libraries.R
|
# install additional R libraries
# install.packages('testthat')
# install.packages("RJDBC")
if(!"RJDBC" %in% installed.packages()) install.packages("RJDBC")
# if(!"dplyr" %in% installed.packages()) install.packages("dplyr")
# if(!"reshape2" %in% installed.packages()) install.packages("reshape2")
if(!"data.table" %in% installed.packages()) install.packages("data.table")
# if(!"stringr" %in% installed.packages()) install.packages("stringr")
|
e7cc0bd1d4915451bcaa0c926f8485f545b6568a
|
aa9735132f52b22bc6768ef7bec8e5e034a85b47
|
/R/plotDirectClass.R
|
1a4c88ff713429532d4c275c73f26382828599da
|
[] |
no_license
|
cran/SPreFuGED
|
6e8ed93276cc96c878b0e8535cf687ee17aad2f3
|
a0146399644093188b3e62fbc53dff3ae6693dc1
|
refs/heads/master
| 2021-01-09T20:41:09.447515
| 2016-07-29T12:35:58
| 2016-07-29T12:35:58
| 64,473,076
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 761
|
r
|
plotDirectClass.R
|
plotDirectClass<-function(restDirectClass){
classFxn<-rep(colnames(restDirectClass), each=dim(restDirectClass)[1])
acc<-c(restDirectClass[,1], restDirectClass[,2], restDirectClass[,3], restDirectClass[,4], restDirectClass[,5],
restDirectClass[,6], restDirectClass[,7], restDirectClass[,8], restDirectClass[,9], restDirectClass[,10]);
obsAcc<-data.frame(classFxn, acc);
obsAcc$trAcc<-1-obsAcc$acc;
obsAcc$classifier<-as.numeric(as.factor(obsAcc$classFxn));
class_name<-sort(unique(as.character(obsAcc$classFxn)))
bwplot(trAcc~classifier, data=obsAcc, grid=T, pch=20, cex=1.5, horizontal=F,
xlim=class_name, xlab="Classifier", ylab="Accuracy",
main="Accuracy of each classification function on test sets");
}
|
18a89b570a4f3618309dad0d7043cf2bd84e1037
|
342edb52f539db557b03a36dd1163a755f2c2d81
|
/Linear_Regression/AssignmentOne.R
|
f5bef1e6fa7d9e8f59fc009a9e043f9fe8a0dc0b
|
[] |
no_license
|
Akmystery/Data_Science_R
|
0ef90dd79c6d5fc6380dd8c9fb6d9253c43d6090
|
34ad0662e1c607bc3c3838553d912541dce07c58
|
refs/heads/master
| 2020-07-06T17:37:31.847012
| 2019-08-19T03:43:21
| 2019-08-19T03:43:21
| 203,092,682
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,083
|
r
|
AssignmentOne.R
|
energyData <- read.csv("assign1_EnergyData.csv", header = TRUE)
str(energyData)
dim(energyData)
names(energyData)
head(energyData)
tail(energyData)
summary(energyData)
#histogram
hist(energyData$Compactness,col = 'lightgreen') #numeric
hist(energyData$SurfaceArea,col = 'lightgreen') #numeric
hist(energyData$WallArea,col = 'lightgreen') #numeric
hist(energyData$RoofArea,col = 'lightgreen') #numeric There is a big gap
hist(energyData$Height,col = 'lightgreen') #category
hist(energyData$Orientation,col = 'lightgreen') #category
hist(energyData$GlazingArea,col = 'lightgreen') #category
hist(energyData$GAreaDist,col = 'lightgreen') #category
hist(energyData$HeatingLoad,col = 'lightgreen') #target
hist(energyData$CoolingLoad,col = 'lightgreen') #target
orientation <- as.factor(energyData$Orientation)
barplot(table(orientation), col = 'lightgreen')
gArea <- as.factor(energyData$GlazingArea)
barplot(table(gArea), col = 'lightgreen')
gAreaDist <- as.factor(energyData$GAreaDist)
barplot(table(gAreaDist), col = 'lightgreen')
height <- as.factor(energyData$Height)
barplot(table(height), col = 'lightgreen')
#boxplot
boxplot(energyData$Compactness, horizontal = TRUE, col = 'steelblue')
boxplot(energyData$SurfaceArea, horizontal = TRUE, col = 'steelblue')
boxplot(energyData$WallArea, horizontal = TRUE, col = 'steelblue')
boxplot(energyData$RoofArea, horizontal = TRUE, col = 'steelblue')
boxplot(energyData$HeatingLoad, horizontal = TRUE, col = 'steelblue')
boxplot(energyData$CoolingLoad, horizontal = TRUE, col = 'steelblue')
boxplot(energyData$Height, horizontal = TRUE, col = 'steelblue')
boxplot(energyData$Orientation, horizontal = TRUE, col = 'steelblue')
boxplot(energyData$GlazingArea, horizontal = TRUE, col = 'steelblue')
boxplot(energyData$GAreaDist, horizontal = TRUE, col = 'steelblue')
#correlation
cor(energyData)
library(corrplot)
corrplot.mixed(cor(energyData))
pairs(energyData, pch = 19, col = "blue")
#There is a non-linear relationship between compactness and surface area
#Start Fitting for heating load
lmHeatFit1 <- lm(HeatingLoad ~ (.- CoolingLoad), data = energyData)
summary(lmHeatFit1)
lmHeatFit2 <- update(lmHeatFit1, ~ . - RoofArea, data = energyData)
summary(lmHeatFit2)
lmHeatFit3 <- update(lmHeatFit2, ~ . - Orientation, data = energyData)
summary(lmHeatFit3)
lmHeatFit4 <- update(lmHeatFit3, ~ . + SurfaceArea:Compactness, data = energyData)
summary(lmHeatFit4)
lmHeatFit5 <- update(lmHeatFit4, ~ . + I(Compactness^2), data = energyData)
summary(lmHeatFit5)
lmHeatFit6 <- update(lmHeatFit5, ~ . - WallArea, data = energyData)
summary(lmHeatFit6)
#Line is not that flat
summary(lmHeatFit5)
plot(lmHeatFit5)
cd <- cooks.distance(lmHeatFit5)
energyData.clean <- energyData[abs(cd) < 4/nrow(energyData), ] #cleaning the outlier? That's it
nrow(energyData.clean)
formula(lmHeatFit5)
lmHeatFit <- lm(formula(lmHeatFit5), data = energyData.clean)
summary(lmHeatFit)
plot(lmHeatFit)
#Start Fitting for Cooling Load
lmCoolFit1 <- lm(CoolingLoad ~ (.- HeatingLoad), data = energyData)
summary(lmCoolFit1)
lmCoolFit2 <- update(lmCoolFit1, ~ . - RoofArea, data = energyData)
summary(lmCoolFit2)
lmCoolFit3 <- update(lmCoolFit2, ~ . - (Orientation + GAreaDist), data = energyData)
summary(lmCoolFit3)
lmCoolFit4 <- update(lmCoolFit3, ~ . + Compactness:SurfaceArea, data = energyData)
summary(lmCoolFit4)
lmCoolFit5 <- update(lmCoolFit4, ~ . + I(SurfaceArea^2), data = energyData)
summary(lmCoolFit5)
lmCoolFit6 <- update(lmCoolFit5, ~ . + I(Compactness^3), data = energyData)
summary(lmCoolFit6)
summary(lmCoolFit6)
plot(lmCoolFit6)
cd <- cooks.distance(lmCoolFit6)
energyCoolData.clean <- energyData[abs(cd) < 4/nrow(energyData), ] #cleaning the outlier? That's it
nrow(energyData.clean)
formula(lmCoolFit6)
lmCoolFit <- lm(formula(lmCoolFit6), data = energyCoolData.clean)
summary(lmCoolFit)
plot(lmCoolFit)
#Prediction
newData <- read.csv("assign1_EnergyPred.csv", header = TRUE)
Heatprediction <- predict(lmHeatFit, newdata = newData)
Heatprediction
CoolPrediction <- predict(lmCoolFit, newdata = newData)
|
f5dd256ad08a083cec99f7b3028d81de00ca4df2
|
3acc2f91e116ea0e5ea148dd1711e71b4d8d2c22
|
/man/InteractionHelpers.Rd
|
2b93d8f99953f789828811a63833a317c3dc3a44
|
[] |
no_license
|
LTLA/fugi
|
ecceeb4f0d2cd8f122481283549396f1ba554587
|
d2f33b7d9c8deb468effc5d2fbe6b757837a7c93
|
refs/heads/master
| 2020-05-15T15:08:55.334600
| 2019-06-22T18:45:35
| 2019-06-22T18:45:35
| 182,362,581
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,344
|
rd
|
InteractionHelpers.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/helpers.R
\docType{methods}
\name{is.pp}
\alias{is.pp}
\alias{is.pd}
\alias{is.pt}
\alias{is.dd}
\alias{is.dt}
\alias{is.tt}
\alias{isInteractionType}
\alias{is.trans}
\alias{is.cis}
\alias{InteractionHelpers}
\alias{is.pp,GenomicInteractions-method}
\alias{is.pd,GenomicInteractions-method}
\alias{is.pt,GenomicInteractions-method}
\alias{is.dd,GenomicInteractions-method}
\alias{is.dt,GenomicInteractions-method}
\alias{is.tt,GenomicInteractions-method}
\alias{isInteractionType,GenomicInteractions-method}
\alias{is.trans,GenomicInteractions-method}
\alias{is.cis,GenomicInteractions-method}
\title{Interaction type helper functions}
\usage{
is.pp(GIObject)
is.pd(GIObject)
is.pt(GIObject)
is.dd(GIObject)
is.dt(GIObject)
is.tt(GIObject)
isInteractionType(GIObject, x, y)
is.trans(GIObject)
is.cis(GIObject)
\S4method{is.pp}{GenomicInteractions}(GIObject)
\S4method{is.pd}{GenomicInteractions}(GIObject)
\S4method{is.pt}{GenomicInteractions}(GIObject)
\S4method{is.dd}{GenomicInteractions}(GIObject)
\S4method{is.dt}{GenomicInteractions}(GIObject)
\S4method{is.tt}{GenomicInteractions}(GIObject)
\S4method{isInteractionType}{GenomicInteractions}(GIObject, x, y)
\S4method{is.trans}{GenomicInteractions}(GIObject)
\S4method{is.cis}{GenomicInteractions}(GIObject)
}
\arguments{
\item{GIObject}{A \linkS4class{GenomicInteractions} object.}
\item{x, y}{Names of annotated node classes}
}
\value{
A logical vector indicating whether each entry of \code{GIObject} is of the specified type.
}
\description{
Functions to classify interactions within \linkS4class{GenomicInteractions} objects.
}
\details{
\code{isInteractionType} identifies all interactions that occur between the annotated node classes \code{x} and \code{y}.
\code{is.trans} and \code{is.cis} select trans-chromosomal and intra-chromosomal interactions, respectively.
The other functions are convenience wrappers for identifying interactions between regions of common annotations,
namely promoter \code{p}, distal \code{d} or terminator \code{t} regions.
}
\examples{
data(hic_example_data)
hic_example_data <- updateObject(hic_example_data)
table(is.cis(hic_example_data))
sum(interactionCounts(hic_example_data))
}
\author{
Malcolm Perry, Elizabeth Ing-Simmons
}
|
427332b6500584525012edc104aa8cee19c3adf0
|
1a47d66ed545c8d93953eb932d51d90c9cbf26a8
|
/R/99_country_list.R
|
2eeaf741791de03a2797f0d46aaf185514de6cba
|
[] |
no_license
|
agbarnett/pollies
|
b8a731832d4bbe30abd8bf94895f0ba572f84d1b
|
325d272c41e17b8f8084946511dd03c02d29b2bc
|
refs/heads/master
| 2022-05-01T13:02:49.965562
| 2022-03-20T01:38:47
| 2022-03-20T01:38:47
| 189,006,425
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 586
|
r
|
99_country_list.R
|
# 99_country_list.R
# simple list of countries called by other programs
# April 2021
## dropped Japan in December 2019
# all countries:
countries = c('Austria','Australia','Canada','Italy','France','Germany','Netherlands','NZ','Switzerland','UK','USA')
# countries with imputed life table data
countries.imputed = c('Austria','Canada','Germany','NZ','USA','UK')
# countries with political party data
countries_with_politics = c('Austria','Australia','Canada','Germany','Netherlands',"NZ",'Switzerland','USA','UK')
# no politics data for: France (too much missing), Italy,
|
441c1ab07b1eccf1d9f0b875ead87548d9bdad87
|
9b1127c1fa497e344018a603cde732980d0cc725
|
/best.R
|
7ff62befba0ad30367353abc7a790116c8427435
|
[] |
no_license
|
jbluesmith/ProgrammingAssignment3
|
5a3db4f5b1756bdc05904223879d4135b5a9e937
|
6f9a1237501fbcf4f10fcef6d475db43cb81c82f
|
refs/heads/master
| 2016-09-10T21:27:29.508940
| 2015-03-02T17:31:15
| 2015-03-02T17:31:15
| 31,553,104
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,711
|
r
|
best.R
|
best<- function(state, outcome) {
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
states <- sapply(data[,7], as.character)
if (!state%in%states) {
stop("invalid state")
}
if (outcome=="heart attack") {
outcomes <- sapply(data[,11], as.numeric)
by_state <- subset(data, State == state, select = c(Hospital.Name,Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack))
minimum <- as.character(by_state$Hospital.Name[which.min(by_state$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack)])
return(minimum)
}
if (outcome=="heart failure") {
outcomes <- sapply(data[,17], as.numeric)
by_state <- subset(data, State == state, select = c(Hospital.Name,Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure))
minimum <- as.character(by_state$Hospital.Name[which.min(by_state$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure)])
return(minimum)
}
if (outcome=="pneumonia") {
outcomes <- sapply(data[,23], as.numeric)
by_state <- subset(data, State == state, select = c(Hospital.Name,Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia))
minimum <- as.character(by_state$Hospital.Name[which.min(by_state$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia)])
return(minimum)
}
else {
stop("invalid outcome")
}
}
### column 11: 30-day mortality rate for heart attack
### column 17: 30-day mortality rate for heart failure
### column 23: 30-day mortality rate for pneumonia
### column 2: hospital name
|
a41a6251b633db009fd0f703c702b558c854fa49
|
edc63123efa668eb1c0b6aab769c26d9ea59210d
|
/man/make_deseq.Rd
|
2dd44066ddcb19695512cce21d8964a7fd23c326
|
[] |
no_license
|
seedpcseed/phylodeseqr
|
3637463608675335fe93ef71745d1050fe9ffc52
|
89fe1981b06ace25adcbfbdc361674c7deb79846
|
refs/heads/master
| 2020-09-19T21:19:23.067925
| 2016-08-22T21:05:55
| 2016-08-22T21:05:55
| 66,208,384
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 717
|
rd
|
make_deseq.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_deseq.R
\name{make_deseq}
\alias{make_deseq}
\title{Make a DESeq S4 object using a user design and a phyloseq S4 object
@param phy A phyloseq S4 object
@param design A string
@param design2 A string
@return A DESeq S4 object
@export
@examples
make.deseq(phy0,"Treatment+Time+Treatment:Time","Time")}
\usage{
make_deseq(phy, design, design2 = "none")
}
\description{
Make a DESeq S4 object using a user design and a phyloseq S4 object
@param phy A phyloseq S4 object
@param design A string
@param design2 A string
@return A DESeq S4 object
@export
@examples
make.deseq(phy0,"Treatment+Time+Treatment:Time","Time")
}
|
9e1726c2a51fc162d07ebe17074e0c019a7dc00f
|
acfcc96992e838be8baab6fe3fc848165c21ad96
|
/plot4.R
|
e310b623363c87788b99b2aab3f8bc780190b2b5
|
[] |
no_license
|
Nagateja/ExData_Plotting1
|
9fc9d1e10386f7b12463f59675aed17eb142e8ec
|
41a761bad847ce85c694defbb3e7a7b88a6dc80d
|
refs/heads/master
| 2021-01-17T23:38:21.856115
| 2015-12-11T21:39:36
| 2015-12-11T21:39:36
| 47,843,881
| 0
| 0
| null | 2015-12-11T18:24:35
| 2015-12-11T18:24:35
| null |
UTF-8
|
R
| false
| false
| 1,664
|
r
|
plot4.R
|
##This program creates a plot which is names plot4.png
##Reading the data in using read.table
eda1<-read.table("exdata-data-household_power_consumption/household_power_consumption.txt", sep=";", na.strings = "NA", header = TRUE, stringsAsFactors = FALSE)
##Subsetting required data
reqdata<-subset(eda1, Date %in% c("1/2/2007", "2/2/2007"))
library(dplyr)
##Adding the Date_Time column to the required data
reqdata<-mutate(reqdata, Date_Time = paste(Date, Time, sep= " "))
reqdata$Date_Time<-strptime(reqdata$Date_Time, "%d/%m/%Y %H:%M:%S")
##Changing the classof the charcater data to numeric
reqdata[,3]<-as.numeric(reqdata[,3])
reqdata[,4]<-as.numeric(reqdata[,4])
reqdata[,5]<-as.numeric(reqdata[,5])
reqdata[,6]<-as.numeric(reqdata[,6])
reqdata[,7]<-as.numeric(reqdata[,7])
reqdata[,8]<-as.numeric(reqdata[,8])
reqdata[,9]<-as.numeric(reqdata[,9])
##Opening png graphic device, plotting and closing the device
png("plot4.png", width = 480, height = 480, units = "px")
par(mfrow = c(2,2))
with(reqdata, {
plot(Date_Time, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
plot(Date_Time, Voltage, col = "black", type = "l", xlab = "datetime", ylab = "Voltage")
plot(Date_Time, Sub_metering_1, type = "l", col = "black", xlab = "", ylab = "Energy sub metering")
lines(Date_Time, Sub_metering_2, col = "red")
lines(Date_Time, Sub_metering_3, col = "blue")
legend("topright", bty = "n", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lty = 1, lwd = 1.5)
plot(Date_Time, Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
})
dev.off()
|
b9c6ff17d0d682897455bd8851849e9363c13cd8
|
9c56e4ef9d59492e76f2f8d2b3516733852e5237
|
/tests/testthat/test-calculate_complexities.R
|
3e2f87a737524152598a9445b998e87ef11ac2c1
|
[
"MIT"
] |
permissive
|
openplantpathology/hagis
|
e12029b5dbec2b379196e1995d65076685551087
|
1f9e8d17bb6e33865da8e13e735e54c0214c9c9f
|
refs/heads/main
| 2023-08-30T19:36:54.325248
| 2023-06-06T10:08:12
| 2023-06-06T10:08:12
| 164,751,172
| 4
| 2
|
NOASSERTION
| 2023-06-06T10:08:14
| 2019-01-08T23:38:30
|
R
|
UTF-8
|
R
| false
| false
| 4,074
|
r
|
test-calculate_complexities.R
|
# test calculate complexities --------------------------------------------------
data(P_sojae_survey)
complexities <- calculate_complexities(
x = P_sojae_survey,
cutoff = 60,
control = "susceptible",
sample = "Isolate",
gene = "Rps",
perc_susc = "perc.susc"
)
test_that("calculate_complexities() works properly", {
expect_s3_class(complexities, "hagis.complexities")
expect_length(complexities, 2)
expect_named(complexities,
c("grouped_complexities",
"indvidual_complexities"))
# test summary.hagis.complexities
expect_length(summary(complexities), 3)
expect_equal(summary(complexities)[[1]], 8.714286,
tolerance = 1e-3)
expect_equal(summary(complexities)[[2]], 2.003568,
tolerance = 1e-3)
expect_equal(summary(complexities)[[3]], 0.4372144,
tolerance = 1e-3)
expect_named(summary(complexities), c("mean", "sd", "se"))
})
test_that("calculate_complexities() stops if lacking all params", {
expect_error(
calculate_complexities(
x = "y",
cutoff = 60,
control = "susceptible",
sample = "Isolate",
gene = "Rps",
perc_susc = "perc.susc"
),
regexp = "You have failed to provide all necessary inputs"
)
expect_error(
calculate_complexities(
x = P_sojae_survey,
cutoff = "sixty",
control = "susceptible",
sample = "Isolate",
gene = "Rps",
perc_susc = "perc.susc"
),
regexp = "You have failed to provide all necessary inputs"
)
expect_error(
calculate_complexities(
x = P_sojae_survey,
cutoff = 60,
control = NULL,
sample = "Isolate",
gene = "Rps",
perc_susc = "perc.susc"
),
regexp = "You have failed to provide all necessary inputs"
)
expect_error(
calculate_complexities(
x = P_sojae_survey,
cutoff = 60,
control = "susceptible",
sample = NULL,
gene = "Rps",
perc_susc = "perc.susc"
),
regexp = "You have failed to provide all necessary inputs"
)
expect_error(
calculate_complexities(
x = P_sojae_survey,
cutoff = 60,
control = "susceptible",
sample = "isolate",
gene = NULL,
perc_susc = "perc.susc"
),
regexp = "You have failed to provide all necessary inputs"
)
expect_error(
calculate_complexities(
x = P_sojae_survey,
cutoff = 60,
control = "susceptible",
sample = "isolate",
gene = "Rps",
perc_susc = 60
),
regexp = "You have failed to provide all necessary inputs"
)
})
test_that("print.summary.complexities() returns a proper summary", {
x <- capture.output(summary(complexities))
expect_type(x, "character")
expect_equal(x[[2]], "Mean of Complexities")
expect_equal(x[[3]], "8.714286 ")
expect_equal(x[[5]], "Standard Deviation of Complexities")
expect_equal(x[[6]], "2.003568 ")
expect_equal(x[[8]], "Standard Error of Complexities")
expect_equal(x[[9]], "0.4372144")
})
test_that("print.hagis.complexities() returns a proper summary", {
x <- capture.output(print(complexities))
expect_type(x, "character")
expect_equal(x[[2]], "Grouped Complexities")
expect_equal(x[[3]], " complexity frequency distribution")
expect_equal(tail(x),
c(
"18: 18 10",
"19: 19 11",
"20: 20 11",
"21: 21 13",
" sample N_samp",
""
))
})
test_that("pander.summary.complexities returns a properly formatted table",
{x <- capture.output(pander(summary(complexities)))
expect_type(x, "character")
expect_equal(x[[1]], "")
expect_equal(x[[2]], "------------------------")
expect_equal(x[[3]], " Mean SD SE ")
expect_equal(x[[4]], "------- ------- --------")
expect_equal(x[[5]], " 8.714 2.004 0.4372 ")
expect_equal(x[[6]], "------------------------")
})
|
a71579a3ba4630aa3d3891ca52ff717618f9d237
|
cd8408d90328e53c6a8d7b21521e5a29e1d1d889
|
/shinyapp/global.R
|
0856e8d6da3eeb6fc431be6ca256e3ffba6c5944
|
[] |
no_license
|
amerus/BenchmarkingTensorflow
|
8e46d740be872625706fc48104cab4aa0dcc5d29
|
addda049aa44cb5bee7d217136a4fc543f619dd6
|
refs/heads/master
| 2020-05-09T14:41:17.463744
| 2019-05-10T20:31:01
| 2019-05-10T20:31:01
| 181,203,770
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 816
|
r
|
global.R
|
library(shinydashboard)
library(tidyverse)
library(ggplot2)
library(DT)
library(naturalsort)
library(codeModules)
# load saved metrics from RDS files
ALL <- readRDS('./data/ALL.RDS')
# slider controls for seconds
sliderSeconds <- as.data.frame(ALL) %>%
select(seconds) %>%
unique() %>%
unlist() %>%
as.numeric()
# dropdown controls for execution (Processor/Graphics Card) selection
dropDownTags <- ALL %>%
group_by(execution) %>%
select(execution) %>%
unique()
colnames(dropDownTags) <- "Trained on:"
# dropdown controls for hardware selection
selHard <- ALL %>%
group_by(Hardware) %>%
select(Hardware) %>%
unique()
colnames(selHard) <- "Affected Hardware:"
# dropdown controls for Waldo image selection
selWaldo <- list.files(path = './waldo/images', pattern = '.jpg') %>% naturalsort()
|
e2021194cec30d09a87570c0c66899da1ba40068
|
6fc1a75f0017bf73598075164a4a2c0bbfc2c521
|
/man/argument.Rd
|
232ccb3c98cfb48beaa71f373124282336a40383
|
[] |
no_license
|
nsgrantham/scriptr-deprecated
|
88a3e1bab25d99de33a1166984dbaad90e201ad3
|
7d9b42e12c85b8d81911c2b45f00b3cb2dd4ea44
|
refs/heads/master
| 2021-09-07T08:25:05.211619
| 2018-02-20T06:40:17
| 2018-02-20T06:40:17
| 103,743,481
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 479
|
rd
|
argument.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scriptr.R
\name{argument}
\alias{argument}
\title{Argument}
\usage{
argument(scp, name, type = NULL, nargs = 1, help = "")
}
\arguments{
\item{scp}{Script object}
\item{name}{Name of argument}
\item{type}{String of data type, scriptr::interval, or scriptr::choice}
\item{nargs}{Number of arguments (Inf for unlimited)}
\item{help}{Description of argument for help page}
}
\description{
Argument
}
|
405728ee01d796eeec0b98c1956537fe2be8d23a
|
3165136f79bb0154b8af62dc71a112ffc80ae787
|
/funs/ub.wn.ign.R
|
6474f4ae3a7e7a035d0d3e909a7813b0c68a91be
|
[] |
no_license
|
yuliasidi/wilson_newcombe
|
0dce8043477417799fe098f1a99645fb12f5e9d0
|
0b9b065bdb01b35f48088df787d0e3b6a15bda02
|
refs/heads/master
| 2021-06-23T00:14:48.715457
| 2019-10-21T01:28:03
| 2019-10-21T01:28:03
| 192,383,341
| 0
| 0
| null | 2019-10-21T01:28:05
| 2019-06-17T16:37:50
|
R
|
UTF-8
|
R
| false
| false
| 255
|
r
|
ub.wn.ign.R
|
ub.wn.ign <- function(z, qhat, nobs, rn){
(2*qhat + z^2/nobs + z^2*rn/nobs)/(2*(1 + z^2/nobs + z^2*rn/nobs)) +
sqrt(
(2*qhat + z^2/nobs + z^2*rn/nobs)^2/(2*(1 + z^2/nobs + z^2*rn/nobs))^2 -
qhat^2/(1 + z^2/nobs + z^2*rn/nobs)
)
}
|
11c195c9380abdf7350542c0cd719c635772eaa4
|
db2cc56460b8054e4ade9b0b618cb3d9593e0632
|
/genMBSliceTimes.R
|
f6c1f5d0f69c530b6a0a062ea829e0759a61d5ef
|
[] |
no_license
|
LabNeuroCogDevel/mMRDA_analysis
|
9a9845635c4e34212a9ac0de26ccedf2c82ef435
|
2fb8321a553851a80c47fd7921fd64b85c8842aa
|
refs/heads/master
| 2020-04-08T12:10:31.167549
| 2014-04-03T16:09:29
| 2014-04-03T16:09:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,716
|
r
|
genMBSliceTimes.R
|
## Script developed by MH 2014
## adapted 20140319 (WF)
##output approximate slice times for MB data
##From Tae Kim email dated 2/26:
##Each slice took 83 ms, and 12 slices for each TR acquired with interleaved order, 9, 5, 1, 10, 6, 2, 11, 7, 3, 12, 8, 4.
##Used 5x acceleration, so need to replicate this for every 12 slices
##asked Tae about this Dec2013, he says look at slice times in dicom header
##these indicate by time: 2, 4, 6, 7, 10, 12, 1, 3, 5, 7, 9, 11
tr <- 1.0
#baseTiming <- c(9, 5, 1, 10, 6, 2, 11, 7, 3, 12, 8, 4) #shift to zero-based timing
#baseTiming <- c(2, 4, 6, 8, 10, 12, 1, 3, 5, 7, 9, 11) #interleaved ascending, even first
#but apparently times are not exactly even (appear to be rounded to 2.5ms resolution)
#fromHeaderTimes <- c(500, 0, 585, 82.5, 667.5, 167.5, 752.5, 250, 835, 335, 920, 417.5)/1000 #bottom to top, in seconds
# sort(fromHeaderTimes,index.return=T)$ix
# 2 4 6 8 10 12 1 3 5 7 9 11
# ie. get bottom second, one up from botton fourth,
# interleaved, odd first
fromHeaderTimes <- c(0, 802.953, 100.366, 903.351, 200.733, 1003.71, 301.121, 1104.07,401.487, 1204.47, 501.853, 1304.82, 602.231, 1405.17, 702.586)/1000
# ^^ above from:
# bash: for f in *; do dicom_hdr -slice_times $f; done > stfor-epi13
# a<-read.table('stfor-epi13')[-c(1:5)]
# signif(unname(colMeans(a)),6))
# should be intervals of 100ms, but timing is +/- 2.5ms -- so averaged over one run
nsimultaneous <- 3 # number of slices excited at once (acceleration)
nslices <- nsimultaneous*length(fromHeaderTimes)
timing <- tcrossprod(fromHeaderTimes, rep(1, nsimultaneous)) #replicate timings vector 5x
sink("mMRDA_MBTimings.1D")
cat(paste(as.vector(timing), collapse=","), "\n")
sink()
|
7b57fb612f2d608d109db2d6d52b5b7ad9c311ea
|
48e31e278423d7910dbf50d21afce0cf5e5b32be
|
/kaggle_titanic_code.R
|
b58af96b9243eda6af191571965897629dcf6b9e
|
[] |
no_license
|
mukeshviru/kaggle-titanic1
|
e2a82ab069f824128a0df5fa4b307d2fdc1179fe
|
5e2f9a93a79a8cea2c5ba01f17f3750994f6f0c5
|
refs/heads/master
| 2020-04-12T10:51:20.231407
| 2018-12-27T19:38:36
| 2018-12-27T19:38:36
| 162,442,566
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,626
|
r
|
kaggle_titanic_code.R
|
# Load libraries
library(randomForest)
# Load the train dataset111
train_data <- read.csv("train.csv",stringsAsFactors = FALSE,na.strings = c("","NA"))
View(train_data)
str(train_data)
# Load the test dataset
test_data <- read.csv("test.csv",stringsAsFactors = FALSE,na.strings = c("","NA"))
View(test_data)
str(test_data)
################# Data Cleaning and Understanding ##################
# Check NA Values
sapply(train_data, function(x) sum(is.na(x)))
## There are 177 rows in Age column having NA values, 2 records in Embarked and 687 records in Cabin
sapply(test_data, function(x) sum(is.na(x)))
## There are 86 rows in Age column having NA values, 1 row in Fare column having NA value and 327 records in Cabin
## Since we will be using age/Emabrked in our prediction, we cannot remove the rows having NA.
## Hence we will be replace it with average value or maximum occurence.
# Replacing the NA values with average in train as well as test data for Age
train_data$Age[which(is.na(train_data$Age))] <- round(mean(train_data$Age,na.rm = TRUE), digits = 1)
test_data$Age[which(is.na(test_data$Age))] <- round(mean(test_data$Age,na.rm = TRUE),digits = 1)
train_data$Embarked[which(is.na(train_data$Embarked))] <- "S"
test_data$Fare[which(is.na(test_data$Fare))] <- round(mean(test_data$Fare,na.rm = TRUE),digits = 1)
# There are two records in the test data where the Parch value is 9 which is not available in the train data
# Hence those two records were replaced with 6
summary(train_data$Parch)
summary(test_data$Parch)
test_data$Parch[which(test_data$Parch==9)] <- 6
# Converting columns to numerical or factor based on their values
numericcols <- c('PassengerId','Age')
factorcols_train <- c('Pclass','Sex','SibSp','Parch','Embarked','Survived')
factorcols_test <- c('Pclass','Sex','SibSp','Parch','Embarked')
train_data[, numericcols] <- lapply(numericcols, function(x) as.numeric(train_data[, x]))
train_data[, factorcols_train] <- lapply(factorcols_train, function(x) as.factor(train_data[, x]))
test_data[,numericcols] <- lapply(numericcols, function(x) as.numeric(test_data[,x]))
test_data[,factorcols_test] <- lapply(factorcols_test, function(x) as.factor(test_data[,x]))
# Build the random forest
set.seed(71)
data.rf <- randomForest(Survived ~ Age+Pclass+Sex+SibSp+Parch+Embarked, data=train_data, proximity=FALSE,
ntree=500, do.trace=TRUE)
data.rf
testPred <- predict(data.rf, test_data)
submission <- data.frame(PassengerID=test_data$PassengerId,Survived=testPred)
head(submission)
write.csv(submission,file = "gender_submission.csv",row.names = FALSE)
View(submission)
|
9dc11622f3ec918dabf252a2cdc5af4d81589402
|
a73ca7a675e371d1af66e030fd18003e4821064d
|
/truth.R
|
9d3950bf4703e8edd011398589e25b9c82376586
|
[] |
no_license
|
weiyaw/multi-curves
|
f5a80d28a91eb24bde367149a01e83e1b95a8e9a
|
5feebf27e7b03d4f7e1e23d8280b18b318eb2db8
|
refs/heads/master
| 2020-03-10T21:45:44.085011
| 2019-08-19T15:01:50
| 2019-08-19T15:01:50
| 129,602,089
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,474
|
r
|
truth.R
|
# generate some parameters
library(tidyverse)
library(gridExtra)
library(bayesplot)
library(doMC) # for parallel loops
registerDoMC(4) # use 4 threads
source("~/Dropbox/master/algo/subor.R")
knt <- seq(0, 10, 2)
simdata <- read_csv("~/Dropbox/master/algo/data/simdata.csv")
plotdata <- read_csv("~/Dropbox/master/algo/data/simdata-curve.csv")
plotdata_pop <- read_csv("~/Dropbox/master/algo/data/simdata-popcurve.csv")
plotdata_ridge <- read_csv("~/Dropbox/master/algo/data/simdata-ridge.csv")
plotdata_lmm <- read_csv("~/Dropbox/master/algo/data/simdata-lmm.csv")
## compara gen-data from either ridge and LMM
ridge_curve <- ggplot() +
geom_line(aes(plot_x, plot_y, col = sub), plotdata_ridge) +
geom_line(aes(plot_x, plot_y), plotdata_pop) +
labs(x = 'x', y = 'y') +
ylim(0, 35) +
theme_bw() + theme(legend.position="none")
lmm_curve <- ggplot() +
geom_line(aes(plot_x, plot_y, col = sub), plotdata_lmm) +
geom_line(aes(plot_x, plot_y), plotdata_pop) +
labs(x = 'x', y = 'y') +
ylim(0, 35) +
theme_bw() + theme(legend.position="none")
ridge_vs_lmm <- grid.arrange(ridge_curve, lmm_curve, ncol = 2)
## ggsave("~/Dropbox/master/thesis/images/ridge-vs-lmm.pdf", ridge_vs_lmm,
## width = 10, height = 6)
## generate the truth (dotted)
true_curve <- list(sub = geom_line(aes(plot_x, plot_y, col = sub), plotdata, lty = 2,
alpha = 0.9),
pop = geom_line(aes(plot_x, plot_y), plotdata_pop, lty = 2),
theme_bw(),
theme(legend.position="none"),
labs(x = 'x', y = 'y'))
## design matrices (B-spline LMM model)
rm(list = setdiff(ls(), c("simdata", "knt", "true_curve")))
source("~/Dropbox/master/algo/subor.R")
deg <- 1
K <- length(knt) - 2 # inner knots
n_bsf <- K + deg + 1 # number of b-spline basis functions
D <- get_diff_mat(n_bsf, deg + 1) # difference matrix
type <- "bs" # "bs", "bs-ridge" or "tpf"
## design matrices, Gmat and Hmat
des_info <- get_design_bs(simdata$x, K, deg) # bs
Gmat <- cbind(-1/sqrt(n_bsf), poly(1:n_bsf, deg = deg, raw = FALSE)) # bs unraw
Hmat <- crossprod(D, solve(tcrossprod(D))) # bs
Bmat <- des_info$design %*% cbind(Gmat, Hmat) # bs
Kmat <- cbind(matrix(0, K, deg + 1), diag(K)) # bs
rm(list = c("K", "n_bsf", "D"))
source("~/Dropbox/master/algo/main-ridge.R")
set.seed(100, kind = "L'Ecuyer-CMRG")
fm1_ls <- foreach(i = 1:4) %dopar% {
init <- list(pop = get_pls(simdata$y, Bmat, Kmat) + rnorm(NCOL(Bmat), sd = 100))
fm <- bayes_ridge_sub(simdata$y, simdata$sub, Bmat, Kmat, deg + 1, 1000, 2000,
init = init)
fm$basis <- list(type = 'bs_hier', knots = des_info$knots, degree = deg, # bs
trans_mat = cbind(Gmat, Hmat))
fm$data <- simdata %>% mutate(grp_sub = sub, grp_pop = NA, sub = NULL)
fm
}
RNGkind("Mersenne-Twister")
source("~/Dropbox/master/algo/subor.R")
g1ls <- list()
for (fm in fm1_ls) {
g1ls <- c(g1ls, plot_spline(fm, shade = TRUE))
}
g1curve_all <- ggplot() + g1ls[grep("pop|sub|data", names(g1ls))] + theme_bw() +
theme(legend.position="none")
## ggsave("~/Dropbox/master/thesis/images/truth-gibbs-all.pdf", g1curve_all,
## width = 5, height = 6)
source("~/Dropbox/master/algo/diagnostic.R")
## visualise the truth curve and add it to g1curve
fm1 <- do.call(combine_fm, fm1_ls)
g1curve_true <- ggplot() + true_curve + plot_spline(fm1, shade = FALSE)
## regression curve
## ggsave("~/Dropbox/master/thesis/images/truth-gibbs-true.pdf", g1curve_true,
## width = 5, height = 6)
## diagnostic
library(xtable)
source("~/Dropbox/master/algo/diagnostic.R")
flat1 <- do.call(flatten_chains, fm1_ls)
long1 <- summary_matrix_flats(flat1)
short1 <- long1 %>% filter(Rhat > 1.01 | n_eff < 500)
short1 <- long1 %>% filter(grepl("theta\\[1\\]|delta\\[1,1\\]", Parameter))
## print(xtable(short1), include.rownames=FALSE, tabular.environment = "tabular")
## diagnostic plots
g1combo_theta1 <- mcmc_combo(flat1, pars = "theta[1]", c("dens_overlay", "trace"))
g1combo_delta11 <- mcmc_combo(flat1, pars = "delta[1,1]", c("dens_overlay", "trace"))
g1combo_sum11 <- as.tibble(flat1[, , "theta[1]"] + flat1[, , "delta[1,1]"]) %>%
gather(Chain, "theta[1] + delta[1,1]") %>%
separate(Chain, c(NA, "Chain"), " ") %>%
mcmc_combo(c("dens_overlay", "trace"))
## ggsave("~/Dropbox/master/thesis/images/truth-gibbs-combo-theta1.pdf", g1combo_theta1,
## width = 10, height = 3)
## ggsave("~/Dropbox/master/thesis/images/truth-gibbs-combo-delta11.pdf", g1combo_delta11,
## width = 10, height = 3)
## ggsave("~/Dropbox/master/thesis/images/truth-gibbs-combo-sum11.pdf", g1combo_sum11,
## width = 10, height = 3)
## VERSION 2 OF GIBBS
source("~/Dropbox/master/algo/main-ridge.R")
set.seed(101, kind = "L'Ecuyer-CMRG")
fm1v2_ls <- foreach(i = 1:4) %dopar% {
init <- list(pop = get_pls(simdata$y, Bmat, Kmat) + rnorm(NCOL(Bmat), sd = 100))
fm <- bayes_ridge_sub_v2(simdata$y, simdata$sub, Bmat, Kmat, deg + 1, 1000, 2000,
init = init)
if (type == "tpf") {
fm$basis <- list(type = 'tpf', knots = des_info$knots, degree = deg) # tpf
} else if (type == "bs") {
fm$basis <- list(type = 'bs_hier', knots = des_info$knots, degree = deg, # bs
trans_mat = cbind(Gmat, Hmat))
} else if (type == "bs-ridge") {
fm$basis <- list(type = 'bs', knots = des_info$knots, degree = deg) # bs-ridge
}
fm$data <- simdata %>% mutate(grp_sub = sub, grp_pop = NA, sub = NULL)
fm
}
RNGkind("Mersenne-Twister")
source("~/Dropbox/master/algo/subor.R")
g1v2ls <- list()
for (fm in fm1v2_ls) {
g1v2ls <- c(g1v2ls, plot_spline(fm, shade = TRUE, silent = TRUE))
}
g1v2curve_all <- ggplot() + g1v2ls[grep("pop|sub|data", names(g1v2ls))] + theme_bw() +
theme(legend.position="none")
## ggsave("~/Dropbox/master/thesis/images/truth-gibbsv2-all.pdf", g1v2curve_all,
## width = 5, height = 6)
source("~/Dropbox/master/algo/diagnostic.R")
## visualise the truth curve and add it to g1v2curve
fm1v2 <- do.call(combine_fm, fm1v2_ls)
g1v2curve_true <- ggplot() + true_curve + plot_spline(fm1v2, shade = FALSE, silent = TRUE)
## regression curve
## ggsave("~/Dropbox/master/thesis/images/truth-gibbsv2-true.pdf", g1v2curve_true,
## width = 5, height = 6)
## diagnostic
source("~/Dropbox/master/algo/diagnostic.R")
flat1v2 <- do.call(flatten_chains, fm1v2_ls)
long1v2 <- summary_matrix_flats(flat1v2)
## print(xtable(long1v2), include.rownames=FALSE, tabular.environment = "longtable")
short1v2 <- long1v2 %>% filter(Rhat > 1.01 | n_eff < 500)
short1v2 <- long1v2 %>% filter(grepl("theta\\[1\\]|delta\\[1,1\\]", Parameter))
## print(xtable(short1v2), include.rownames=FALSE, tabular.environment = "tabular")
## diagnostic plots
g1v2combo_theta1 <- mcmc_combo(flat1v2, pars = "theta[1]", c("dens_overlay", "trace"))
g1v2combo_delta11 <- mcmc_combo(flat1v2, pars = "delta[1,1]", c("dens_overlay", "trace"))
g1v2combo_sum11 <- as.tibble(flat1v2[, , "theta[1]"] + flat1v2[, , "delta[1,1]"]) %>%
gather(Chain, "theta[1] + delta[1,1]") %>%
separate(Chain, c(NA, "Chain"), " ") %>%
mcmc_combo(c("dens_overlay", "trace"))
## ggsave("~/Dropbox/master/thesis/images/truth-gibbsv2-combo-theta1.pdf",
## g1v2combo_theta1, width = 10, height = 3)
## ggsave("~/Dropbox/master/thesis/images/truth-gibbsv2-combo-delta11.pdf",
## g1v2combo_delta11, width = 10, height = 3)
## ggsave("~/Dropbox/master/thesis/images/truth-gibbsv2-combo-sum11.pdf",
## g1v2combo_sum11, width = 10, height = 3)
## TRY DIFFERENT MODELS, change the model parameters at the beginning and rerun v2
## bs-ridge
## ggsave("~/Dropbox/master/thesis/images/truth-bsridge-combo-theta1.pdf",
## g1v2combo_theta1, width = 10, height = 3.5)
## ggsave("~/Dropbox/master/thesis/images/truth-bsridge-combo-delta11.pdf",
## g1v2combo_delta11, width = 10, height = 3.5)
## ggsave("~/Dropbox/master/thesis/images/truth-bsridge-combo-sum11.pdf",
## g1v2combo_sum11, width = 10, height = 3.5)
## tpf
## ggsave("~/Dropbox/master/thesis/images/truth-tpf-combo-theta1.pdf",
## g1v2combo_theta1, width = 10, height = 3.5)
## ggsave("~/Dropbox/master/thesis/images/truth-tpf-combo-delta11.pdf",
## g1v2combo_delta11, width = 10, height = 3.5)
## ggsave("~/Dropbox/master/thesis/images/truth-tpf-combo-sum11.pdf",
## g1v2combo_sum11, width = 10, height = 3.5)
## WITH MONOTONICITY CONSTRAINT
## WITH MONOTONICITY CONSTRAINT
## TRY B-SPLINE (LINEAR AND QUADRATIC) AND TPF (QUADRATIC)
## TRY B-SPLINE (LINEAR AND QUADRATIC) AND TPF (QUADRATIC)
# generate some parameters
library(tidyverse)
library(gridExtra)
library(bayesplot)
library(doMC) # for parallel loops
registerDoMC(4) # use 4 threads
source("~/Dropbox/master/algo/subor.R")
knt <- seq(0, 10, 2)
simdata <- read_csv("~/Dropbox/master/algo/data/simdata.csv")
plotdata <- read_csv("~/Dropbox/master/algo/data/simdata-curve.csv")
plotdata_pop <- read_csv("~/Dropbox/master/algo/data/simdata-popcurve.csv")
## generate the truth (dotted)
true_curve <- list(sub = geom_line(aes(plot_x, plot_y, col = sub), plotdata, lty = 2,
alpha = 0.9),
pop = geom_line(aes(plot_x, plot_y), plotdata_pop, lty = 2),
theme_bw(),
theme(legend.position="none"),
labs(x = 'x', y = 'y'))
## design matrices (tpf of B-spline LMM model)
rm(list = setdiff(ls(), c("simdata", "knt", "true_curve")))
source("~/Dropbox/master/algo/subor.R")
deg <- 1
K <- length(knt) - 2 # inner knots
n_bsf <- K + deg + 1 # number of b-spline basis functions
D <- get_diff_mat(n_bsf, deg + 1) # difference matrix
type <- "bs" # "bs" or "tpf"
## design matrices, Gmat and Hmat
if (type == "tpf") {
des_info <- get_design_tpf(simdata$x, K, deg) # tpf
Bmat <- des_info$design # tpf
Kmat <- cbind(matrix(0, K, deg + 1), diag(K)) # tpf
Amat <- get_constmat_tpf(des_info$knots, "increasing", deg)
lower <- rep(0, NROW(Amat))
} else if (type == "bs") {
des_info <- get_design_bs(simdata$x, K, deg) # bs
Gmat <- cbind(-1/sqrt(n_bsf), poly(1:n_bsf, deg = deg, raw = FALSE)) # bs unraw
Hmat <- crossprod(D, solve(tcrossprod(D))) # bs
Bmat <- des_info$design %*% cbind(Gmat, Hmat) # bs
Kmat <- cbind(matrix(0, K, deg + 1), diag(K)) # bs
Amat <- get_constmat_bs(NCOL(Bmat), "increasing") %*% cbind(Gmat, Hmat)
lower <- rep(0, NROW(Amat))
}
rm(list = c("K", "n_bsf", "D"))
library(nlme)
pop <- rep(1, length(simdata$sub))
Xmat <- unname(Bmat[ , 1:(deg + 1)])
Zmat <- unname(Bmat[, -(1:(deg + 1))])
fit <- lme(y ~ Xmat - 1, simdata, list(pop = pdIdent(~Zmat - 1),
sub = pdBlocked(list(pdSymm(~Xmat - 1),
pdIdent(~Zmat - 1)))))
prec <- lapply(as.matrix(fit$modelStruct$reStruct), function(x) x * fit$sigma^2)
prec$pop <- 1 / diag(prec$pop)[[1]]
prec$sub1 <- unname(solve(prec$sub[1:(deg + 1), 1:(deg + 1)]))
prec$sub2 <- 1 / diag(prec$sub)[[deg + 2]]
prec$sub <- NULL
source("~/Dropbox/master/algo/main-ridge.R")
set.seed(103, kind = "L'Ecuyer-CMRG")
fm1cv2_ls <- foreach(i = 1:4) %dopar% {
init <- list(pop = c(tnorm::rmvtnorm(1, mean = get_pls(simdata$y, Bmat, Kmat),
initial = c(1, 1, rep(0, deg + 3)),
F = Amat, g = -1 * lower)))
fm <- bayes_ridge_cons_sub_v2(simdata$y, simdata$sub, Bmat, Kmat, deg + 1,
Amat, 1000, 2000, init, prec = prec)
if (type == "tpf") {
fm$basis <- list(type = 'tpf', knots = des_info$knots, degree = deg) # tpf
} else if (type == "bs") {
fm$basis <- list(type = 'bs_hier', knots = des_info$knots, degree = deg, # bs
trans_mat = cbind(Gmat, Hmat))
}
fm$data <- simdata %>% mutate(grp_sub = sub, grp_pop = NA, sub = NULL)
fm
}
RNGkind("Mersenne-Twister")
source("~/Dropbox/master/algo/subor.R")
g1cv2ls <- list()
for (fm in fm1cv2_ls) {
g1cv2ls <- c(g1cv2ls, plot_spline(fm, shade = TRUE, silent = TRUE))
}
g1cv2curve_all <- ggplot() + g1cv2ls[grep("pop|sub|data", names(g1cv2ls))] + theme_bw() +
theme(legend.position="none")
g1cv2curve_all
## ggsave("~/Dropbox/master/thesis/images/truth-gibbscv2-all-bslin.pdf",
## g1cv2curve_all, width = 5, height = 6)
## ggsave("~/Dropbox/master/thesis/images/truth-gibbscv2-all-bsquad.pdf",
## g1cv2curve_all, width = 5, height = 6)
## ggsave("~/Dropbox/master/thesis/images/truth-gibbscv2-all-tpfquad.pdf",
## g1cv2curve_all, width = 5, height = 6)
source("~/Dropbox/master/algo/diagnostic.R")
## visualise the truth curve and add it to g1cv2curve
fm1cv2 <- do.call(combine_fm, fm1cv2_ls)
g1cv2curve_true <- ggplot() + true_curve +
plot_spline(fm1cv2, shade = FALSE, silent = TRUE)
g1cv2curve_true
## regression curve
## ggsave("~/Dropbox/master/thesis/images/truth-gibbscv2-true-bslin.pdf",
## g1cv2curve_true, width = 5, height = 6)
## diagnostic
source("~/Dropbox/master/algo/diagnostic.R")
flat1cv2 <- do.call(flatten_chains, fm1cv2_ls)
long1cv2 <- summary_matrix_flats(flat1cv2)
print(xtable(long1cv2), include.rownames=FALSE, tabular.environment = "longtable")
short1cv2 <- long1cv2 %>% filter(Rhat > 1.01 | n_eff < 500)
short1cv2 <- long1cv2 %>% filter(grepl("theta\\[1\\]|delta\\[1,1\\]", Parameter))
## print(xtable(short1cv2), include.rownames=FALSE, tabular.environment = "tabular")
## diagnostic plots
g1cv2combo_theta1 <- mcmc_combo(flat1cv2, pars = "theta[1]", c("dens_overlay", "trace"))
g1cv2combo_delta11 <- mcmc_combo(flat1cv2, pars = "delta[1,1]", c("dens_overlay", "trace"))
source("~/Dropbox/master/algo/diagnostic.R")
flat1cv2 <- do.call(flatten_chains, list(fm1cv2))[, , 1:43, drop = FALSE]
tail(summary_matrix_flats(flat1cv2), n = 10)
mcmc_combo(flat1cv2[, , 1:43], pars = "theta[1]", c("dens", "trace"))
|
ad66cc1f0d753ad0516da8238fb574051141eacc
|
b28e19cbd6a9945d8bbeaeecaac7cad2c48ea87e
|
/scraper/rap_eng/texts/Wiz Khalifa/B.A.R
|
5a5b8a1c0f22f5b14c299fa7909a662e135f9fda
|
[] |
no_license
|
clarnomargurite591/rapper_ml
|
1a0a8d491a546bc0a1d6d051f2ffcdf2cf17d54f
|
f094af3b0af67e8c5e7c65df2390be8f541ba857
|
refs/heads/main
| 2023-03-18T06:56:49.555809
| 2020-12-20T16:21:45
| 2020-12-20T16:21:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,139
|
r
|
B.A.R
|
[Intro]
Fuck hoes everywhere we go
Taylor Gang, paper planes
Uh, they loving what I say
Tell her keep count
What you other niggas speak 'bout
[Verse 1]
Lamborghini dreams
Beach house wishes
Pour bottles of champagne
For my beach house bitches
It ain't new to me
That money, boy, I been 'bout
Throwing hundreds on the floor
I tell her keep count
Nigga
They in love with what I say
'Cause I
Really live the life you other niggas speak 'bout
Got my cameraman
He down to do a movie for me
Couple niggas 'round
That's down to do the shooting for me
I'm still riding with my main bitch
She rolling a joint
Something old school playing
She love me
We fucking
We in the fly-free zone
When some niggas will captain-save-it
I let her shop 'til she drop dead
Sleeping in her crib
Wake up to decent pot plant
Jordan shorts and a pair of Polo socks, blazing
With your bitch
You wanna lift, smoke this
[Hook]
I'm glad to be here, I been waiting
So long...
I finally found me a cloud to
Float on...
And I'mma float on...
And I don't have much
But I take all I got
And that's what I give
What I get in return
Is the money I earn
And the life I live
I'm so gone
As I burn after rolling
And float on...
[Verse 2]
Don't talk numbers
I hire people to speak for me
If you love her
Then hide your bitch so you keep shorty
Ever fly private?
So much diamonds in my chain
Hella sky mileage
I fell asleep on a plane
And never woke up
And now I'm living a dream
Suckas hate hard
Hoes treat me like I'm a king
They wanna live comfortably
Sipping on champagne
Real niggas fuck with me
So drama is not a thang
I gave my momma the old shit
Told her anything that come through the door
To open the whole clip
I'm with your bitch smoking
Let her keep the mid, I'mma roll this potent
Hotel so close to the water
You can even hear the ocean
Them bitches can't breathe
Beware them niggas with tattoo sleeves
Plus weed
(Gang)
[Hook]
I'm glad to be here, I been waiting
So long...
I finally found me a cloud to
Float on...
And I'mma float on...
And I don't have much
But I take all I got
And that's what I give
What I get in return
Is the money I earn
And the life I live
I'm so gone
As I burn after rolling
And float on...
[Verse 3]
Wanna smoke 'cause they know that I keep flavors
Tell me how them other niggas lame
And she love the cool crowd, so she fucking with the Taylors
Wear All-Stars and smoke papers
IPhone with no ringtones
Vibrate or on plane mode
Palm trees, and bomb pre-rolled
The weed burning, but the money just fold
While I'm looking at you niggas face
Light another L, and pull the liquor out the case
Niggas try and fail, see me, now they wanna hate
Fly another plane, a different city, 'nother state
My cash change the forecast
As a teen was half-baked before class
Now I smoke joints with others niggas' hoes
And this shit you burn after you roll
Fool
[Hook]
I'm glad to be here, I been waiting
So long...
I finally found me a cloud to
Float on...
And I'mma float on...
And I don't have much
But I take all I got
And that's what I give
What I get in return
Is the money I earn
And the life I live
I'm so gone
As I burn after rolling
And float on...
|
f3f7d192d13844475c9885a5fa732959980c6a6f
|
ddf318aa7903e2de192170948b1bac837c8b3abb
|
/R/GEDI_Utils_l2b.R
|
07eaaeaf5feafa00694bf2bafeb07b37fb4c6d80
|
[] |
no_license
|
JohMast/GEDI_Yucatan
|
2ba9b9bea7b51427260ebcd00eef4800479baafe
|
e54fa5b44606641c5f69cc5c6ffb474511fce797
|
refs/heads/main
| 2023-02-17T05:28:23.137686
| 2021-01-08T08:31:39
| 2021-01-08T08:31:39
| 327,345,859
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,576
|
r
|
GEDI_Utils_l2b.R
|
######################################################################################
#
# Utility functions for the processing of GEDI L2B Data.
# Most of these are related to extracting and visualising data:
#
# - make_transsect_plot() : Plots ALS elevation vs GEDI elevation in a profile along a transect.
# - get_clip_distance_along_straight_line() : Utility which calculates
# intersections between a line and object
# - extract_GEDI_attribute_by_bbox() :Extracts parameters(layers)
# for every point from a L2B hdf file.
# - extract_GEDI_points() : Extracts parameters(layers) for every point from
# a L2B hdf file. (deprecated, use extract_GEDI_attribute_by_bbox)
# - extract_GEDI_points_wrapper(): Wrapper for the above which additonally
# subsets by bbox (deprecated, use extract_GEDI_attribute_by_bbox)
# - query_GEDI_download_links() : Query GEDI download links from the LP DAAC Server.
#
# Project Contributors:
# Žiga Kokalj, Research Centre of the Slovenian Academy of Sciences and Arts
# Johannes Mast, University of Würzburg
#
# This research was partly funded by the
# Slovenian Research Agency core funding No. P2-0406.
######################################################################################
library(rhdf5)
library(rgdal)
library(tidyverse)
#' make_transsect_plot
#' @description Creates a specific type of transect plot GEDI Points along a ALS-DEM points transect with a different number of points.
#' Only recommended if the two transects are in reality overlapping. Little divergences are inconsequential but larger ones lead to
#' misleading plots. This is very specific to the data formats used within this project and may well not be useful outside of its scope.
#' @param transects_path Path to the shapefile containing the ALS-DEM points
#' @param elev_points Path to a points shapefile containing the GEDI Points
#' @param ALS_csv_path Path to a csv file which lists for certain distances from the transect-start the relevant DEM elevation.
#' @param plot_title Optional plot title
#' @param plot_legpos legend position
#' @param plot_rectcol color of the rectangle which indicates presence of ruins.
#' @param remove_outliers remove outliers before plotting?
#' @param remove_lowquality remove points marked as low quality before plotting?
#' @param plot_alscol Color of ALS-DEM points
#' @param plot_gedicol Color of GEDI-Points
#' @param linepath path to a line shapefile which should approximate the transect. Used for calculating intersections with ruins.
#' @param ruinspath Path to polygon shapefile of ruins which may intersect the transect-line.
#' @return A list with [[1]] The transect without elev_lastbin [[2]] The dataframe used for creating the plot [[3]] the transect plot with elev_lastbin
make_transsect_plot <- function(transects_path,
elev_points,
ALS_csv_path,
plot_title = "Transect",
plot_legpos = c(0.5, 0.15),
plot_rectcol="green",
remove_outliers = T,
remove_lowquality = T,
plot_alscol="orange",
plot_gedicol="dodgerblue2",
linepath=NULL,
ruinspath=NULL){
#First, read in points. Calculate the distances starting from the southernmost
ts_points_T1 <- readOGR(transects_path)
#we first have to order them, starting from the southernmost
ts_points_T1_reordered <- ts_points_T1[order(ts_points_T1@coords[,1]),]
ts_dists_T1 <- dist(ts_points_T1_reordered@coords) %>%
as.matrix()
#We take the diagonal of the matrix nudged by 1 (so we always get the distance to the next neighbor)
ts_cumdists_T1 <- c(0,cumsum(diag(ts_dists_T1[,-1])))
ts_footprints_T1 <- readOGR(transects_path) %>%
spTransform( CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ")) %>%
buffer(width=12,dissolve=F)
ts_footprints_T1_reordered <- ts_footprints_T1[order(ts_points_T1@coords[,1]),]
ts_footprints_T1_reordered$cumdists <- ts_cumdists_T1
#extract the elevation from the elev_points
ts_footprints_T1_reordered$elev <- over(ts_footprints_T1_reordered,elev_points)[,1] %>% deframe()
ts_footprints_T1_reordered$elevlastbin <- over(ts_footprints_T1_reordered,elev_points)[,2] %>% deframe()
ts_footprints_T1_reordered$l2aqual <- over(ts_footprints_T1_reordered,elev_points)[,3] %>% deframe()
#Make one DF for GEDi
GEDI_cumdists <- ts_footprints_T1_reordered$cumdists
GEDI_elevs <- ts_footprints_T1_reordered$elev
GEDI_elevslastbin <- ts_footprints_T1_reordered$elevlastbin
GEDI_quality <- as.character(ts_footprints_T1_reordered$l2aqual)
GEDI_df <- data.frame(GEDI_cumdists,GEDI_elevs,GEDI_elevslastbin,GEDI_quality)
GEDI_df$source <- "GEDI"
names(GEDI_df) <- c("cumdists","elev","elev_lastbin","quality","source")
#Make one DF for ALS
ALS_cumdists <- read.csv2(ALS_csv_path)[,1]
ALS_elevs <- read.csv2(ALS_csv_path)[,2]
ALS_elevslastbin <- rep(NA,length(ALS_elevs))
ALS_df <- data.frame(ALS_cumdists,ALS_elevs,ALS_elevslastbin)
ALS_df$quality <- NA
ALS_df$source <- "ALS"
names(ALS_df) <- c("cumdists","elev","elev_lastbin","quality","source")
combined_df_T1 <- rbind(GEDI_df,ALS_df)
#Find outliers. Optionally remove outliers and points of low quality in gedi.
combined_df_T1$outliers <- is_outlier(combined_df_T1$elev)
combined_df_filtered <- combined_df_T1 %>%
{if(remove_outliers) filter(.,!outliers) else .} %>%
{if(remove_lowquality) filter(.,is.na(as.numeric(as.character(quality))) | as.logical(as.numeric(as.character(quality)))) else .}
# get distances to plot
ruindists <- get_clip_distance_along_straight_line(linepath,ruinspath) %>% as.data.frame()
ruindists$V3 <- min(combined_df_filtered$elev)
ruindists$V4 <- max(combined_df_filtered$elev)
# pivot elevation column
combined_df_filtered %>% pivot_longer(cols=starts_with("elev"))
transect_T1_ol <-combined_df_filtered %>%
ggplot(aes(x=cumdists,y = elev,group=source,color=source,))+
geom_point(aes(shape=quality,size=2),show.legend=!remove_lowquality)+
geom_line(size=0.3)+
geom_rect(data=ruindists, inherit.aes=FALSE,
aes(xmin=V1,xmax=V2,ymin=V3,ymax=V4), fill = plot_rectcol,
alpha=0.23,show.legend = F)+
theme_bw()+
xlab("distance along transect [m]")+
ylab("elevation lowest mode [m]")+
scale_color_manual(values = c(plot_alscol,plot_gedicol))+
scale_shape_manual(breaks = c("0","1"),values = c(23,16))+
ggtitle(plot_title)+
theme(legend.position = plot_legpos)+
guides(color=guide_legend(title="data \nsource"))+
guides(shape=guide_legend(title="GEDI L2A \nquality flag"))+
guides(size=FALSE)+
guides(fill=FALSE)+
theme(legend.direction = "horizontal", legend.box = "horizontal")+
theme(legend.key = element_rect(fill = "white", colour = "black"))
transect_T1_ol_with_lastbin <- transect_T1_ol+geom_line(aes(x=cumdists,y=elev_lastbin),linetype=2)
return(list(transect_T1_ol,combined_df_filtered,transect_T1_ol_with_lastbin))
}
#' get_clip_distance_along_straight_line
#' @description Calculates intersections between ruins and a straight line.
#' @param linepath path to a line shapefile which should approximate the transect. Used for calculating intersections with ruins.
#' @param ruinspath Path to polygon shapefile of ruins which may intersect the transect-line.
#' @return A two column matrix containing coordinates (along the line) which signify the outer boundaries of the intersected ruins.
get_clip_distance_along_straight_line <- function(linepath,ruinspath){
line <- readOGR(linepath)
ruins <- readOGR(ruinspath) %>% buffer(width=0.0,dissolve=F)
# Intersect the line into a number of segments
intlines <- rgeos::gIntersection(line,ruins)
# For every line segments, collect the coordiantes
intcoords <- list()
for(i in 1: length(intlines@lines[[1]]@Lines)){
coordints <-coordinates(intlines)[[1]][i][[1]]
intcoords[[i]] <- rbind(coordints[1,],coordints[nrow(coordints),])
}
intcoords <- do.call(rbind.data.frame,intcoords)
# Format the relevant coordinates into a matrix
line_start_sw <- coordinates(line)[[1]][[1]][1,]
intdists <- pointDistance(intcoords,line_start_sw,lonlat = F) %>% sort() %>% matrix(ncol=2,byrow=T)
return(intdists)
}
#' extract_GEDI_attribute_by_bbox
#' @description From a number of GEDI L2B .hdf files, extract the desired fields within a certain lat-long bounding box.
#' @param desired_parameter_pattern The parameter to extract
#' @param lat_name The field containing the desired lat to filter by. Default is "lat_lowestmode"
#' @param lon_name The field containing the desired lon to filter by. Default is "lon_lowestmode"
#' @param paths path to the GEDI hdf file.
#' @param min_lon which is applied to lon_lowestmode to subset the points
#' @param max_lon which is applied to lon_lowestmode to subset the points
#' @param min_lat which is applied to lat_lowestmode to subset the points
#' @param max_lat which is applied to lat_lowestmode to subset the points
extract_GEDI_attribute_by_bbox <- function(paths, desired_parameter_pattern,min_long,max_long,min_lat,max_lat,lat_name="lat_lowestmode",lon_name="lon_lowestmode") {
internal_extract <- function(path){
#Get the internal structure of the file
l2b_structure <- h5ls(path, recursive = T)
#Read the group names
l2b_groups <- unique(l2b_structure$name)
#Read the beam designations
l2b_beams <- l2b_groups[grepl("BEAM", l2b_groups)]
#one field for lat and ond field for lon are temporarily required
desired_parameter_pattern <- c(lat_name,lon_name,desired_parameter_pattern)
#Extract and bind together the desired parameters for the desired beams
df <- lapply(
l2b_beams,
FUN = function(current_beam)
{
#subset the structure, selecting only the ones for this beam
beam_parameters <-
l2b_structure[grepl(current_beam, l2b_structure$group), ]
#Get a list of all the matching parameters for this beam
parameter_matches <-
lapply(
desired_parameter_pattern,
FUN = function(y) {
#beam_parameters[grepl(y, beam_parameters$name), ]
beam_parameters[beam_parameters$name %in% y,]
}
) %>% do.call(rbind, .)
#Merge the first two fields to get a list of paths to read in with h5read
parameter_matches_m <-
paste0(parameter_matches$group, "/", parameter_matches$name)
#Read out the lats and the lons and use them to create an index vector
lats <- h5read(path,parameter_matches_m[1])
lons <- h5read(path,parameter_matches_m[2])
aoi_subset <- lons > min_lon & lons <= max_lon & lats > min_lat & lats <= max_lat
#remove the lats and lons from the list of parameters we want, they are no longer necessary
parameter_matches_m <- parameter_matches_m[-c(1,2)]
parameter_matches <- parameter_matches[-c(1,2),]
#Read in all the desired arrays
attribute_fields <- lapply(
parameter_matches_m,
FUN = function(x) {
t <- h5read(file = path,name = x)
if(length(dim(t))==1){t <- t[aoi_subset]} #If t is a vector, index by index
if(length(dim(t))==2){t <- t[,aoi_subset] %>% t()} #if t is a matrix, index by column and transpose it
return(t)
}
)
#Convert and bind them into a dataframe for this beam
names(attribute_fields) <- parameter_matches$name
beam_df = do.call(cbind, attribute_fields) %>% as.data.frame()
names(beam_df) <- paste0(desired_parameter_pattern[3], 1:ncol(beam_df))
return(beam_df)
}
) %>% do.call(rbind, .)
return(df)
}
out_df <- lapply(paths,FUN = internal_extract)%>% do.call(rbind, .)
}
#' extract_GEDI_points
#' @description Reads a L2B GEDI hdf file. Extracts only the desired parameters.
#' @param path to a l2b GEDI hdf file
#' @param desired_parameter_pattern a vector of strings which are used as patterns to grab the desired parameters
#' @return a dataframe of the requested fields
extract_GEDI_points <- function(path, desired_parameter_pattern) {
# Get the internal structure of the file
l2b_structure <- h5ls(path, recursive = T)
# Read the group names
l2b_groups <- unique(l2b_structure$name)
# Read the beam designations
l2b_beams <- l2b_groups[grepl("BEAM", l2b_groups)]
#Extract and bind together the desired parameters for the desired beams
df <- lapply(
l2b_beams,
FUN = function(current_beam)
{
# Subset the structure, selecting only the ones for this beam
beam_parameters <-
l2b_structure[grepl(current_beam, l2b_structure$group), ]
# Get a list of all the matching parameters for this beam
parameter_matches <-
lapply(
desired_parameter_pattern,
FUN = function(y) {
#beam_parameters[grepl(y, beam_parameters$name), ]
beam_parameters[beam_parameters$name %in% y,]
}
) %>% do.call(rbind, .)
# Merge the first two fields to get a list of paths
parameter_matches_m <-
paste0(parameter_matches$group, "/", parameter_matches$name)
# Read in all the desired arrays
columns <- lapply(
parameter_matches_m,
FUN = function(x) {
t <- h5read(path, x)
return(t)
}
)
# Convert and bind them into a dataframe for this beam
names(columns) <- parameter_matches$name
beam_df = do.call(cbind, columns) %>% as.data.frame()
beam_df$Beam <- current_beam
return(beam_df)
}
) %>% do.call(rbind, .)
return(df)
}
#Helper function for outliers:
#https://stackoverflow.com/a/43659981
is_outlier <- function(x) {
return(x < quantile(x, 0.25,na.rm=T) - 1.5 * IQR(x) | x > quantile(x, 0.75,na.rm=T) + 1.5 * IQR(x))
}
#' extract_GEDI_points_wrapper
#' @param l2b_path path to a l2b GEDI hdf file
#' @param params a vector of strings which are used as patterns to grab the desired parameters
#' @param min_lon which is applied to lon_lowestmode to subset the points
#' @param max_lon which is applied to lon_lowestmode to subset the points
#' @param min_lat which is applied to lat_lowestmode to subset the points
#' @param max_lat which is applied to lat_lowestmode to subset the points
#' @param set_NA_dem Optionally set DEM nodata value (-999999) to NA?
#' @return a SpatialPointsDataframe of the requested fields, subset by location
#' @export
extract_GEDI_points_wrapper <-
function(l2b_path,
params = c("gfit"),
min_lon,
max_lon,
min_lat,
max_lat,
set_NA_dem = FALSE) {
#Extract the required params from the L2B file
df <- extract_GEDI_points(l2b_path, params)
# Optionally set DEM nodata value (-999999) to NA
if(set_NA_dem){
df$digital_elevation_model[df$digital_elevation_model==-999999] <- NA
}
df_processed <- df %>%
##Optional: Subset by Longitude and Latitude
filter(between(lon_lowestmode, min_lon, max_lon)) %>%
filter(between(lat_lowestmode, min_lat, max_lat))
if(nrow(df_processed)==0){return(0)}
# Create a shapefile
coordinates(df_processed) <- ~lon_lowestmode+lat_lowestmode
df_processed@proj4string <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ")
#add the name of the source file as a column (called out_name because it could be used as output filename)
out_name <- strsplit(l2b_path,"/")
out_name <- out_name[[1]][length(out_name[[1]])] %>%
paste0("_extracted_points_",
min_lon,"_",max_lon,"_",min_lat,"_",max_lat,"3")
df_processed$src_file <- out_name
return(df_processed)
}
library(httr)
library(jsonlite)
library(magrittr)
library(assertthat)
#' query_GEDI_download_links
#' @description Finds available GEDI download links from the LP DAAC.
#' @param max_lat Upper latitude of the query
#' @param min_lon Lower longitude of the query
#' @param min_lat Lower latitude of the query
#' @param max_lon Upper longitude of the query
#' @param product_type One of "GEDI01_B", "GEDI02_A", "GEDI02_B". Desired product type.
#' @return a vector of download links.
query_GEDI_download_links <- function(max_lat, min_lon, min_lat, max_lon, product_type = "GEDI02_A") {
assert_that(product_type %in% c("GEDI01_B", "GEDI02_A", "GEDI02_B"))
link <- paste0("https://lpdaacsvc.cr.usgs.gov/services/gedifinder?product=GEDI02_B&version=001&bbox=",max_lat,",",min_lon,",",min_lat,",",max_lon,",","&output=json")
response <- GET(link)
if(response$status_code==200){
print("Successfully requested files!")
}else{
print(paste("Could not request files. Status Code:", response$status_code))
}
response_content <- rawToChar(response$content) %>%
fromJSON()
download_links <- response_content$data %>% unlist() %>% c()
return(download_links)
}
|
2f5337a5b763933739c61dc30f36c86d524c7b5f
|
9c48287a818f1c2d3c596a5f9591b2d3a3bd68b6
|
/man/print.igEdge.Rd
|
a8584f1f2c6b1bb277eb4fa0da580527f8355062
|
[] |
no_license
|
cran/integr
|
f158f19273de3ba0e62ae43449741e1e1661bc8d
|
03130a5d2f779cad5bec854379f04faebd31dfd7
|
refs/heads/master
| 2020-12-22T00:32:03.660478
| 2019-05-24T10:50:03
| 2019-05-24T10:50:03
| 236,615,394
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 431
|
rd
|
print.igEdge.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interactionGraphs.R
\name{print.igEdge}
\alias{print.igEdge}
\title{Print generic method for Interaction Graph Edges (S3 class)}
\usage{
\method{print}{igEdge}(edge)
}
\arguments{
\item{edge}{An \code{(igEdge)} object}
}
\value{
Print \code{(igEdge)} object
}
\description{
Print generic method for Interaction Graph Edges (S3 class)
}
|
896384afd669b5dd8db5fd4d6e3c8968252f0a3e
|
efcd73d82923061a933661202415da88f6f0975a
|
/R/eps_ind_dist.R
|
61d55e4faff5290fb55a369e285df71cbfc111ff
|
[] |
no_license
|
SimoneHermann/rexpar
|
624b0d30bd3fde12a5e908bd90057dc6d260459a
|
3883b9f8aa1685c28979c621d427ae3080a1cd8e
|
refs/heads/master
| 2021-01-21T15:33:59.129522
| 2015-06-22T12:03:59
| 2015-06-22T12:03:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 188
|
r
|
eps_ind_dist.R
|
eps_ind_dist<-function(v1,Mat,eps)
{
dists<-apply(Mat,1,Ele_Norm,center=v1)
v2<-Mat[dists==sort(dists)[2],]
v3<-Mat[dists==sort(dists)[3],]
m<-v1+eps*(((v2+v3)/2)-v1)
return(m)
}
|
9ec93d7a1dc93ffb819665e9fdb4c3772dfc7edf
|
56a0f9cb2b2d0765eec9740697b434516e2f4d04
|
/R_code_classification.r
|
936c6e3fef14531bebf7da9262d212e68da02b2b
|
[] |
no_license
|
SerenaBarberis/Telerilevamento_2021
|
688a1fd3ad120aa936a1dbc0685180d28355a2a0
|
b76dd1aaef6852b95d6a96088edee3e76a22a4af
|
refs/heads/main
| 2023-07-02T04:01:02.497632
| 2021-08-05T17:18:07
| 2021-08-05T17:18:07
| 348,290,149
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,766
|
r
|
R_code_classification.r
|
#R_code_classification.r
setwd('C:/Users/Sery/Desktop/lab')
library(raster)
library(RStoolbox)
#funzione brick: prende un pacchetto di dati e crea un raste con bande RGB
so <- brick("Solar_Orbiter_s_first_views_of_the_Sun_pillars.jpg")
#funzione plotRGB: permette di visualizzare il rasterbrick
plotRGB(so, 1,2,3, stretch='lin')
#funzione unsuperClass (pacchetto RStoolbox):opera la classificazione non supervised dei pixel. vengono scelti il numero di classi (in questo caso 3)
soc <- unsuperClass(so, nClasses=3) #crea un modello di classificazione e una mappa, che per vedere bisogna plottarla (collegandola con $)
cl <- colorRampPalette(c('yellow','red','black'))(100)
plot(soc$map, col=cl) #funzione set.seed per considerare gli stessi pixel, altrimenti l'immagine può variare perchè considera diversi pixel
#classificazione unsupervised con 20 classi
set.seed(42)
soe <- unsuperClass(so, nClasses=20)
plot(soe$map, col=cl)
#download imagine from https://www.esa.int/ESA_Multimedia/Missions/Solar_Orbiter/(result_type)/images and make the unsupervised classification
sun <- brick('sun.png')
sunc <-unsuperClass(sun, nClasses=3)
plot(sunc$map)
# con 20 classi
sunc <-unsuperClass(sun, nClasses=20)
plot(sunc$map)
#Gran Canyon
#https://earthobservatory.nasa.gov/images/80948/exploring-the-grand-canyon
#When John Wesley Powell led an expedition down the Colorado River and through the Grand Canyon in 1869, he was confronted with a daunting landscape. At its highest point, the serpentine gorge plunged 1,829 meters (6,000 feet) from rim to river bottom, making it one of the deepest canyons in the United States. In just 6 million years, water had carved through rock layers that collectively represented more than 2 billion years of geological history, nearly half of the time Earth has existed.
setwd('C:/Users/Sery/Desktop/lab')
library(raster)
library(RStoolbox)
#il file è in RGB, quindi si utilizza la funzione brick
gc <- brick('dolansprings_oli_2013088_canyon_lrg.jpg')
plotRGB(gc, r=1, g=2, b=3, stretc='lin')
plotRGB(gc, r=1, g=2, b=3, stretc='hist') #strech ancora più alto dei valori, in modo da visualizzare più variazioni di colore possibili (tutte le gamme delle bande RGB)
#classificazione dell'immagine: si misura la distanza di ogni pixel da un valore multispettrale e si raggruppano a seconda della distanza minore dal valore
#funzione unsuperClass (all interno del pacchetto RStoolbox): classifica i pixel dell'immagine in base alla classe che scegliamo
#facciamo un modello dell'immagine, quindi avremo la mappa e le informazioni delle classi, per visualizzare solo la mappa nel plot bisogna legarla con $
gcc2 <- unsuperClass(gc, nClasses=2)
plot(gcc2$map)
# con 4 classi
gcc4 <- unsuperClass(gc, nClasses=4)
plot(gcc4$map)
|
50188b7363508d1dd7475f6d381974ae5b097492
|
d37afdcab3673f594bd9534b5134e805b588eb4f
|
/02-walking.R
|
80f7a19194d18a2ee04c7b2a50e851d38e840f30
|
[
"MIT"
] |
permissive
|
dougmet/cloudml
|
774b9eec0d59c505ad2ee66dc520df598d81734f
|
0488a83661232d3c0d59a9c5f5bef93fda6d958e
|
refs/heads/master
| 2020-05-02T09:39:16.080459
| 2019-04-09T08:31:40
| 2019-04-09T08:31:40
| 177,877,344
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 479
|
r
|
02-walking.R
|
library(here)
library(cloudml)
# Need to be in right directory to submit job
# The here function anchors us to the project root
setwd(here("02-walking"))
# Submit a job
cloudml_train("walking.R")
cloudml_train("walking.R", master_type = "standard_gpu")
# Collect a specific job
job_collect("cloudml_2019_03_26_214758262")
view_run()
# Load in the hdf5 model that we saved
model <- load_model_hdf5("runs/cloudml_2019_03_26_214758262/iris.hdf5")
# it's a normal model!
model
|
bfab66f5fa7591469df4589f3085dcdb2f8ddef6
|
fd372fc8e9887c560700636ca4f8af60625f3cd8
|
/wordscount.R
|
8942931d3ef51e56b17cefae6eb48c75b7605861
|
[] |
no_license
|
simoncarrignon/CHASM
|
dd8438fc3598ebf94e2f49e68ae4e4508240ea09
|
7c9b690da9adb36bba06690576c5cfb05d6f24cd
|
refs/heads/master
| 2021-01-08T11:07:12.195430
| 2020-04-08T06:21:45
| 2020-04-08T06:21:45
| 242,012,645
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,873
|
r
|
wordscount.R
|
library(tidyverse)
library(tidytext)
library(rtweet)
#Take a timeline and return all unique words from this timeline
getAllWords <- function(timeline,replace_reg=NULL,unnest_reg=NULL){
if(is.null(replace_reg))replace_reg <- "https://t.co/[A-Za-z\\d]+|http://[A-Za-z\\d]+|&|<|>|RT|https"
if(is.null(unnest_reg))unnest_reg <- "([^A-Za-z_\\d#@']|'(?![A-Za-z_\\d#@]))"
alltext = timeline %>% filter(!str_detect(text, "^RT")) %>%
mutate(text = str_replace_all(text, replace_reg, ""))
text=alltext[,c("text","created_at")]
allwords=text %>% unnest_tokens(word, text, token = "regex", pattern = unnest_reg) %>%
filter(!word %in% stop_words$word, str_detect(word, "[a-z]"))
return(allwords[,"word"])
}
#count number of words
getWordsCounts <- function(listwords,wordspace){
counts=NULL
if(is.null(dim(listwords)))
counts=table(factor(listwords[,"word"],levels=wordspace)) #ira dataset
else
counts=table(factor(listwords[["word"]],levels=wordspace)) #rtweet output
return(counts/sum(counts))
}
allfiles=list.files("ira_data/",full.names=T) #get all filenames
alldf=lapply(allfiles,read.csv)#import all files as dataframe (may take times)
allTL=lapply(alldf,getAllWords) # extract all words for all datagrames and stor in a list
names(allTL)=basename(allfiles) #give to each element of our list the names of the files
names(allTL)=sapply(names(allTL),substring,first=1,last=10) #shorten the name
allwords=unlist(allTL) #generate a unique list of all words used by all twitter accounts in our databse
vocab=unique(allwords)
countedword=lapply(allTL,getWordsCounts,wordspace=vocab) #now we count for each twitter account which words they use in their own vocabulary and at wich frequency, and which words they don't use
names(countedword)=paste(names(allTL),lapply(countedword,function(n)paste0(names(sort(n,decreasing=T)[1:5]),collapse=" "))) #here we add the top five words used by each account to visualise them on a graph
wordscounts=table(allwords) #count how many each of these word have been used
mostused=sort(wordscounts,decreasing=T) #rank the words by their usage
mostused=mostused[mostused>20] #take the words that have been used more than 20 times
limitedmatrix=sapply(countedword,"[",names(mostused)) #we restrain our analyse to those words
plot(hclust(dist(t(limitedmatrix))),cex=.8)
#redo with known people
users <- c("simoncarrignon","holyhologram","LeCuisinotron","damianjruck","ralexbentley","mcotsar","duransamson","xilrian","heuredelasieste","damiengrapton","svalver","ricard_sole","brigan_raman","acerbialberto")
alltl=lapply(users,get_timelines,n=3000)
alltl_words=lapply(alltl,getAllWords)
names(alltl_words)=users
allwords=unlist(alltl_words)
vocab=unique(allwords)
countedword=sapply(alltl_words,getWordsCounts,wordspace=vocab)
plot(hclust(dist(t(countedword))),cex=.8)
|
09db40d7839f36d375c5d2c0f7a72c4d327529f9
|
9eb7c16ab805bec439323c2c5fc9ee62cc197aa4
|
/HierarchicalGOF/man/run.chain.2pl.list.Rd
|
f9d1fff0407797d3f16729fea09bd4325cfeb100
|
[] |
no_license
|
sadanapr/HierarchicalGOF
|
51377da28eda8758148f719cd1dcdecaf18b7947
|
ac01f0dd3a5ec73e96833af1db258885ef271248
|
refs/heads/master
| 2023-08-11T18:21:50.351626
| 2018-01-26T22:28:12
| 2018-01-26T22:28:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 951
|
rd
|
run.chain.2pl.list.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/N_mixture_functions.R
\name{run.chain.2pl.list}
\alias{run.chain.2pl.list}
\title{Title Wrapper for parallel processing}
\usage{
run.chain.2pl.list(models, Y.list, X, W, n.iter, checkpoint, thin, name.l,
starting.values)
}
\arguments{
\item{models}{Vector of models to be run}
\item{Y.list}{List of n*J matrices of n sites and J repeat visits to each site}
\item{X}{Covariate matrix for abundance}
\item{W}{Covariate matrix for detection probability}
\item{n.iter}{Number of MCMC iterations}
\item{checkpoint}{The number of MCMC iterations that are run between each save of a file containing the MCMC output}
\item{thin}{The thinning rate}
\item{name.l}{List of character name of file output that is saved after every checkpoint}
\item{starting.values}{A list of MCMC starting values}
}
\value{
MCMC output
}
\description{
Title Wrapper for parallel processing
}
|
1578fbfe0dd3df4951705eab9d978728793293bc
|
a61104488f204a969a825fae8aa292ba53267ceb
|
/R/cell_tissue.R
|
e7c77a6e16818ef312df61be8b9befce109b5bb1
|
[
"MIT"
] |
permissive
|
sigven/oncoEnrichR
|
2dbfdca6d49d4b40862942d2997611f841d9c80c
|
3a42581a7fdf90ff33d955b0b8135f71217412ec
|
refs/heads/master
| 2023-08-17T00:01:36.046133
| 2023-08-16T10:10:05
| 2023-08-16T10:10:05
| 223,118,510
| 45
| 9
|
MIT
| 2023-08-16T09:49:02
| 2019-11-21T07:53:42
|
R
|
UTF-8
|
R
| false
| false
| 13,085
|
r
|
cell_tissue.R
|
gene_tissue_cell_spec_cat <-
function(qgenes = NULL,
q_id_type = "symbol",
resolution = "tissue",
genedb = NULL,
hpa_enrichment_db_df = NULL,
hpa_expr_db_df = NULL) {
lgr::lgr$appenders$console$set_layout(
lgr::LayoutFormat$new(timestamp_fmt = "%Y-%m-%d %T"))
stopifnot(!is.null(genedb))
stopifnot(!is.null(qgenes))
stopifnot(!is.null(hpa_enrichment_db_df))
stopifnot(!is.null(hpa_expr_db_df))
stopifnot(is.data.frame(hpa_expr_db_df) &
length(colnames(hpa_expr_db_df)) > 35 &
typeof(hpa_expr_db_df) == "list")
validate_db_df(genedb, dbtype = "genedb")
stopifnot(resolution == "tissue" | resolution == "single_cell")
stopifnot(q_id_type == "symbol" | q_id_type == "entrezgene")
query_genes_df <- data.frame('symbol' = qgenes, stringsAsFactors = F)
if (q_id_type == 'entrezgene') {
stopifnot(is.integer(qgenes))
query_genes_df <-
data.frame('entrezgene' = qgenes, stringsAsFactors = F) |>
dplyr::inner_join(genedb, by = "entrezgene",
relationship = "many-to-many") |>
dplyr::distinct()
} else {
stopifnot(is.character(qgenes))
query_genes_df <- query_genes_df |>
dplyr::inner_join(genedb, by = "symbol",
relationship = "many-to-many") |>
dplyr::distinct()
}
etype <- "tissue"
edb <- "GTEx"
specificity_categories <-
c('Group enriched',
'Low tissue specificity',
'Not detected',
'Mixed',
'Tissue enhanced',
'Tissue enriched')
source = 'tissue'
if (resolution != "tissue") {
etype <- "cell type"
edb <- "HPA"
validate_db_df(hpa_enrichment_db_df,
dbtype = "enrichment_db_hpa_singlecell")
specificity_categories <-
c('Group enriched',
'Low cell type specificity',
'Not detected',
'Mixed',
'Cell type enhanced',
'Cell type enriched')
source = 'single_cell'
} else {
validate_db_df(hpa_enrichment_db_df,
dbtype = "enrichment_db_hpa_tissue")
}
lgr::lgr$info(
glue::glue(
"{edb}: Retrieving {etype} specificity category ",
"of target genes")
)
specificity_groups_target <- as.data.frame(
hpa_enrichment_db_df |>
dplyr::inner_join(
dplyr::select(query_genes_df,
c("symbol",
"ensembl_gene_id",
"name")),
by = "ensembl_gene_id", relationship = "many-to-many")
)
if (resolution == "tissue") {
specificity_groups_target <-
specificity_groups_target |>
dplyr::mutate(
genename = glue::glue(
"<a href='https://gtexportal.org/home/gene/",
"{.data$ensembl_gene_id},' target='_blank'>",
"{.data$name}</a>")
)
} else {
specificity_groups_target <-
specificity_groups_target |>
dplyr::mutate(
genename = glue::glue(
"<a href='https://www.proteinatlas.org/",
"{.data$ensembl_gene_id}-{.data$symbol}",
"/celltype' target='_blank'>{.data$name}</a>")
)
}
specificity_groups_target <- as.data.frame(
specificity_groups_target |>
dplyr::group_by(.data$category) |>
dplyr::summarise(n = dplyr::n(), .groups = "drop") |>
dplyr::mutate(tot = sum(.data$n)) |>
dplyr::mutate(pct = round((.data$n / .data$tot) * 100, digits = 2)) |>
dplyr::mutate(
group = paste0("Target set (n = ", .data$tot,")"
)
)
)
tot <- unique(specificity_groups_target$tot)
for (n in specificity_categories) {
df <- data.frame(
'category' = n,
'pct' = 0,
'n' = 0,
'tot' = 0,
group = paste0(
"Target set (n = ",
formatC(tot, format="f",
big.mark = ",", digits=0),")"))
if (nrow(dplyr::inner_join(
df,
specificity_groups_target,
by = "category", relationship = "many-to-many")) == 0) {
specificity_groups_target <-
specificity_groups_target |>
dplyr::bind_rows(df)
}
}
specificity_groups_all <- as.data.frame(
hpa_enrichment_db_df |>
dplyr::group_by(.data$category) |>
dplyr::summarise(n = dplyr::n(), .groups = "drop") |>
dplyr::mutate(tot = sum(.data$n)) |>
dplyr::mutate(pct = round((.data$n / .data$tot) * 100, digits = 2)) |>
dplyr::mutate(group =
paste0("All HPA proteins (n = ",
formatC(.data$tot, format="f",
big.mark = ",",
digits=0),")"
)
)
)
category_df <- dplyr::bind_rows(
specificity_groups_all,
specificity_groups_target
)
exp_dist_df <- data.frame()
exp_dist_df <- hpa_expr_db_df
exp_dist_df$ensembl_gene_id <-
rownames(exp_dist_df)
if (resolution == "tissue") {
exp_dist_df <- as.data.frame(
exp_dist_df |>
tidyr::pivot_longer(cols = !tidyr::starts_with("ensembl"),
names_to = "tissue",
values_to = "nTPM") |>
dplyr::inner_join(
dplyr::select(
query_genes_df,
c("symbol",
"ensembl_gene_id")),
by = "ensembl_gene_id", relationship = "many-to-many") |>
dplyr::mutate(exp = round(log2(.data$nTPM + 1), digits = 3)) |>
dplyr::mutate(exp_measure = "log2(nTPM + 1)")
)
} else {
exp_dist_df <- as.data.frame(
exp_dist_df |>
tidyr::pivot_longer(cols = !tidyr::starts_with("ensembl"),
names_to = "cell_type",
values_to = "nTPM") |>
dplyr::inner_join(
dplyr::select(
query_genes_df,
c("symbol",
"ensembl_gene_id")),
by = "ensembl_gene_id", relationship = "many-to-many") |>
dplyr::mutate(exp = round(log2(.data$nTPM + 1), digits = 3)) |>
dplyr::mutate(exp_measure = "log2(nTPM + 1)")
)
}
return(list('category_df' = category_df,
'exp_dist_df' = exp_dist_df))
}
gene_tissue_cell_enrichment <-
function(qgenes_entrez = NULL,
background_entrez = NULL,
genedb = NULL,
hpa_enrichment_db_df = NULL,
hpa_enrichment_db_SE = NULL,
resolution = "tissue") {
lgr::lgr$appenders$console$set_layout(
lgr::LayoutFormat$new(timestamp_fmt = "%Y-%m-%d %T"))
stopifnot(!is.null(genedb))
stopifnot(!is.null(hpa_enrichment_db_df))
stopifnot(!is.null(hpa_enrichment_db_SE))
stopifnot(!is.null(qgenes_entrez) &
is.integer(qgenes_entrez))
stopifnot(resolution == "tissue" | resolution == "single_cell")
validate_db_df(genedb, dbtype = "genedb")
#if (!("GSEABase" %in% (.packages(all.available = T)))) {
# suppressPackageStartupMessages(library(GSEABase))
#}
etype <- "tissues"
edb <- "GTEx"
if (resolution != "tissue") {
etype <- "cell types"
edb <- "HPA"
validate_db_df(hpa_enrichment_db_df,
dbtype = "enrichment_db_hpa_singlecell")
} else {
validate_db_df(hpa_enrichment_db_df,
dbtype = "enrichment_db_hpa_tissue")
}
lgr::lgr$info(
glue::glue(
"{edb}: Estimating enrichment of {etype}",
" in target set with TissueEnrich"))
df <- data.frame('entrezgene' = as.integer(qgenes_entrez),
stringsAsFactors = F) |>
dplyr::inner_join(
dplyr::select(genedb,
c("entrezgene",
"ensembl_gene_id",
"symbol",
"name",
"cancer_max_rank")),
by = "entrezgene", relationship = "many-to-many")
if (resolution == "tissue") {
df <- df |>
dplyr::mutate(
genename = glue::glue(
"<a href='https://gtexportal.org/home/gene/",
"{.data$ensembl_gene_id}' target='_blank'>",
"{.data$name}</a>")
)
} else {
df <- df |>
dplyr::mutate(
genename = glue::glue(
"<a href='https://www.proteinatlas.org/",
"{.data$ensembl_gene_id}-{.data$symbol}",
"/celltype' target='_blank'>{.data$name}</a>")
)
}
bg <- hpa_enrichment_db_df
q <- bg |>
dplyr::select("ensembl_gene_id") |>
dplyr::inner_join(
df, by = "ensembl_gene_id", relationship = "many-to-many") |>
dplyr::distinct()
query_ensembl <- q$ensembl_gene_id
specificities_per_gene <- bg |>
dplyr::filter(!is.na(.data$ensembl_gene_id)) |>
dplyr::inner_join(
df, by = "ensembl_gene_id", relationship = "many-to-many")
if (nrow(specificities_per_gene) > 0) {
if (resolution == "tissue") {
specificities_per_gene <- specificities_per_gene |>
dplyr::select(c("symbol",
"genename",
"category",
"tissue",
"cancer_max_rank")) |>
dplyr::arrange(.data$category,
dplyr::desc(.data$cancer_max_rank))
} else {
specificities_per_gene <- specificities_per_gene |>
dplyr::select(c("symbol",
"genename",
"category",
"cell_type",
"cancer_max_rank")) |>
dplyr::arrange(.data$category,
dplyr::desc(.data$cancer_max_rank))
}
}
background_ensembl <- bg$ensembl_gene_id
if (!is.null(background_entrez)) {
df <-
data.frame('entrezgene' = as.integer(background_entrez),
stringsAsFactors = F) |>
dplyr::inner_join(
dplyr::select(
genedb, c("ensembl_gene_id", "entrezgene")),
by = "entrezgene", relationship = "many-to-many"
)
bg <- bg |>
dplyr::inner_join(
df, by = "ensembl_gene_id", relationship = "many-to-many") |>
dplyr::distinct()
background_ensembl <- bg$ensembl_gene_id
background_ensembl <- unique(
c(background_ensembl, query_ensembl)
)
}
gene_id_TE <- GSEABase::ENSEMBLIdentifier()
gs_query <- GSEABase::GeneSet(
geneIds = query_ensembl,
organism = "Homo Sapiens",
geneIdType = gene_id_TE)
gs_background <- GSEABase::GeneSet(
geneIds = background_ensembl,
organism = "Homo Sapiens",
geneIdType = gene_id_TE)
te_output <- NULL
## get pre-defined tissue-specificities (SummarisedExperiement) - created with
## tissueEnrich::teGeneRetrieval on expression data sets
se <- hpa_enrichment_db_SE
#se <- oeDB$tissuecelldb[[resolution]][['te_SE']]
## perform tissue enrichment analysis for query dataset (gs_query),
## using gs_background as the background dataset, and the annotated
## tissue/cell-type-specific genes in se as basis for detection
## of enriched tissues/cell types
suppressMessages(
te_output <- TissueEnrich::teEnrichmentCustom(
inputGenes = gs_query,
tissueSpecificGenes = se,
backgroundGenes = gs_background)
)
enrichment_df <- data.frame()
if (!is.null(te_output)) {
if (!is.null(te_output[[1]])) {
se_enrich_output <- te_output[[1]]
enrichment_df <- stats::setNames(
data.frame(SummarizedExperiment::assay(se_enrich_output),
row.names = SummarizedExperiment::rowData(se_enrich_output)[,1]),
SummarizedExperiment::colData(se_enrich_output)[,1])
enrichment_df$Tissue <- rownames(enrichment_df)
rownames(enrichment_df) <- NULL
enrichment_df <- enrichment_df |>
dplyr::filter(.data$Tissue != "All") |>
dplyr::rename(fold_change = "fold.change",
tissue = "Tissue",
tissue_specific_genes = "Tissue.Specific.Genes",
log10_pvalue = "Log10PValue") |>
dplyr::select(c("tissue",
"tissue_specific_genes",
"fold_change",
"log10_pvalue")) |>
dplyr::arrange(dplyr::desc(.data$fold_change))
if (resolution == "single_cell") {
enrichment_df <- enrichment_df |>
dplyr::rename(cell_type = "tissue",
celltype_specific_genes =
"tissue_specific_genes")
}
}
}
return(list('per_gene' = specificities_per_gene,
'per_type' = enrichment_df))
}
|
71d2552507206443dfd342488acb09d885e55f09
|
5e094991872cd54c38fa10f8527030df89af6438
|
/R/ordmove.R
|
b01546b71a6a71dd2930d269fd3dbf0740d54669
|
[] |
no_license
|
lshtm-gis/PHSM_SI_package
|
39b0c34f0f2190c39f20a5f745eee53e89352462
|
43c41f6da721e68748777bbcf9689aca4d1b8413
|
refs/heads/main
| 2023-03-31T05:19:38.416560
| 2021-04-09T07:14:45
| 2021-04-09T07:14:45
| 353,692,002
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,288
|
r
|
ordmove.R
|
#' Compute the Ordinal Scope Scale for Movements
#'
#' This function read the output of the \code{ordgather} function
#' and compute the Ordinal scope scale for Indicator 4.5.1,4.5.2,4.5.3,4.5.4.
#'
#' @param x Output file from previous run - \code{ordgather} function
#'
#' @return Computed ordinal scale for Movements category
#'
#' @export
#' @examples
#' ordmove(x)
#'
ordmove <- function(x){
#
# Assign default of 0 for 4.5.1,4.5.2,4.5.3,4.5.4.
#
x$ord <- ifelse(x$who_code %in% c('4.5.1','4.5.3','4.5.2','4.5.4'), 0,x$ord)
#
# Assign 1 if who code is 4.5.2 and enforcement is recommended or who code is any of
# 4.5.2,4.5.3,4.5.4 and enforcement is recommended.
#
x$ord <- ifelse(x$who_code == '4.5.2' & x$enforcement == 'recommended' |
x$who_code %in% c('4.5.1','4.5.3','4.5.4') & x$enforcement == 'recommended',
1,x$ord)
#
# Assign 2 if who code is any of 4.5.1,4.5.3,4.5.4 and enforcement is required or monitored
#
x$ord <- ifelse(x$who_code %in% c('4.5.1','4.5.3','4.5.4') &
x$enforcement %in% c('required','monitored'),
2,x$ord)
#
# Assign 3 if who code is any of 4.5.2 and enforcement is required or monitored and
# target = partial curfew and not contain stay at home, weekend curfew or full curfew.
#
x$ord <- ifelse(x$who_code == '4.5.2' &
x$enforcement %in% c('required','monitored') &
tolower(x$targeted) == 'partial curfew' &
!tolower(x$targeted) %in% c('stay at home','weekend curfew','full curfew'),
3,x$ord)
#
# Assign 4 if who code is any of 4.5.2 and enforcement is required or monitored and
# target is not partial curfew but contain stay at home or full curfew.
#
x$ord <- ifelse(x$who_code == '4.5.2' &
x$enforcement %in% c('required','monitored') &
!grepl(paste(c('partial curfew'),collapse = "|"),tolower(x$targeted)) &
grepl(paste(c('stay at home','full curfew'),collapse = "|"),tolower(x$targeted)),
4,x$ord)
#
# Assign 5 if who code is any of 4.5.2 and enforcement is required or monitored and
# target = weekend curfew or (partial curfew and stay at home) or target = full curfew or partial curfew.
#
x$ord <- ifelse(x$who_code == '4.5.2' &
x$enforcement %in% c('required','monitored') &
(gsub(",.*$", "", x$targeted) == 'weekend curfew' | (gsub(",.*$", "", x$targeted) == 'partial curfew' & grepl(paste(c('stay at home'),collapse = "|"),tolower(x$targeted)))) |
(gsub(",.*$", "", x$targeted) == 'full curfew' | grepl(paste(c('partial curfew'),collapse = "|"),tolower(x$targeted))), 5 ,x$ord)
#
# if measure stage is new, extension or modification, the score does not go down
x$ord <- ifelse(x$who_code %in% c('4.5.1','4.5.3','4.5.2','4.5.4') &
x$measure_stage %in% c('new','modification','extension') &
x$ord < lag(x$ord),lag(x$ord),x$ord)
#
x <- replace_na(x, list(ord = 1))
#
#
return(x)
}
|
0bd7a13c8a3079adbc0c20ce12aa506b458c73bf
|
691cf0cdb5d1d26f1a5d11e913c4a4dc8574ff0e
|
/Assignments/Assignment 8 code.R
|
d8ecb91e8269949a5acaa0de2581aae2273af3af
|
[] |
no_license
|
bharwood-data/IST-707
|
96253d23e22deb4e32efd099ae8f63aeeaa8128e
|
91382d1b6798146bdc4d42b7219c351fefcfac65
|
refs/heads/master
| 2022-04-22T11:23:34.277047
| 2020-04-15T20:29:07
| 2020-04-15T20:29:07
| 256,027,405
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,535
|
r
|
Assignment 8 code.R
|
# libraries
library(network)
library(wordcloud)
library(tm)
library(slam)
library(quanteda)
library(SnowballC)
library(arules)
library(proxy)
library(stringr)
library(textmineR)
library(igraph)
library(lsa)
library(tidyr)
library(gofastr)
library(mclust)
library(cluster)
library(stringi)
library(proxy)
library(Matrix)
library(tidytext)
library(plyr)
library(ggplot2)
library(factoextra)
library(wordcloud)
library(e1071)
library(ROCR)
# prep
set.seed(121804)
setwd("E:/Documents/IST 707")
#load data
data <- read.csv("data/deception_data_converted_final1.csv")
data <- data[-(83:84),]
lie <- data$lie
sent <- data$sentiment
data <- data[,-(1:2)]
data <- unite(data, "review")
head(data, n=5)
lie_df <- data.frame(lie, data)
dfCorpus <- Corpus(VectorSource(t(data)))
tm::inspect(dfCorpus)
getTransformations()
nreviews <- length(t(data))
(minTermFreq <- nreviews * 0.0001)
(maxTermFreq <- nreviews /2)
(STOPS <-stopwords('english'))
reviews_dtm <- DocumentTermMatrix(dfCorpus,
control = list(
stopwords = TRUE,
#wordLengths=c(4, 10),
removePunctuation = TRUE,
removeNumbers = TRUE,
tolower=TRUE,
#stemming = F,
#stemWords=TRUE,
remove_separators = TRUE,
#stem=TRUE,
stopwords("english"),
bounds = list(global = c(minTermFreq, maxTermFreq))
))
tm::inspect(reviews_dtm)
WordFreq <- colSums(as.matrix(reviews_dtm))
head(WordFreq)
length(WordFreq)
Row_Sum_Per_Review <- rowSums(as.matrix(reviews_dtm))
reviews_M <- as.matrix(reviews_dtm)
reviews_m1 <- apply(reviews_M, 1, function(i) i/sum(i))
reviews_norm <- t(reviews_m1)
reviews_df <- as.data.frame(reviews_M)
wordcloud(colnames(reviews_norm), reviews_norm, max.words = 1000)
m <- reviews_M
m1 <- reviews_norm
M <- data.frame(lie, m1)
#function to quickly produce precision, recall, and f1 for a prediction table
model_metrics<- function(pred_table, labels)
{
precision <-c()
recall <-c()
f1 <- c()
cats <- length(labels)
for (i in 1:cats)
{
precision[[i]] <- pred_table[i,i]/sum(pred_table[,i])
precision
recall[[i]] <- pred_table[i,i]/sum(pred_table[i,])
recall
f1[[i]] <- 2*precision[[i]]*recall[[i]]/(precision[[i]]+recall[[i]])
f1
}
df <- rbind(precision, recall, f1)
df <- t(df)
rownames(df) <- labels
return(df)
}
sampleSize <- nrow(M)*0.8
#Naive Bayes
# MOdel 1
samples1 <- sample(1:nrow(M),sampleSize, replace=FALSE)
LNBtrain1 <- M[samples1,]
LNBtest1 <- M[-samples1,]
LNB1 <- naiveBayes(lie ~ ., data=LNBtrain1)
LNB1_predict <- predict(LNB1, LNBtest1)
table(LNB1_predict, LNBtest1$lie)
Lpred1 <- prediction(as.numeric(LNB1_predict), as.numeric(LNBtest1$lie))
Lperf1 <- performance(Lpred1, measure="tpr", x.measure="fpr")
LNBplot1 <- (plot(Lperf1, main= "ROC curve for Naive Bayes", col="blue", lwd=3)+ abline(coef=c(0,1)))
LNB1_pred_table <- table(LNB1_predict, LNBtest1$lie)
print(knitr::kable(LNB1_pred_table))
LNB1_acc <- (sum(diag(LNB1_pred_table)/sum(LNB1_pred_table)))
print(paste0("Naive Bayes Accuracy: ", LNB1_acc))
model_metrics(LNB1_pred_table,c("Lie","Truth"))
Ltune1 <- tune.svm(x=LNBtrain1[,-1], y=LNBtrain1[,1], type="C-classification", kernel = "polynomial", degree=2, cost=10^(1:3), gamma=c(0.1,1,10), coef0=c(0.1,1,10))
(Lgamma1 <- Ltune1$best.parameters$gamma)
(Lcoef01 <- Ltune1$best.parameters$coef0)
(Lcost1 <- Ltune1$best.parameters$cost)
LSVM1 <- svm(lie ~ ., data=LNBtrain1, kernel="polynomial", degree=2, gamma=Lgamma1, coef0=Lcoef01, cost=Lcost1, scale=FALSE)
LSVM_pred1<- predict(LSVM1, LNBtest1[,-1], type="class")
LSVMpred1 <- prediction(as.numeric(LSVM_pred1), as.numeric(LNBtest1$lie))
LSVMperf1 <- performance(LSVMpred1, measure="tpr", x.measure="fpr")
plot(LSVMperf1, main ="ROC curve for SVM", col="purple", lwd=3)+abline(coef=c(0,1))
(LSVM1_pred_table <- table(LSVM_pred1, LNBtest1$lie))
LSVM1_acc <- (sum(diag(LSVM1_pred_table)/sum(LSVM1_pred_table)))
print(paste0("SVM accuracy: ", LSVM1_acc))
model_metrics(LSVM1_pred_table, c("Lie","Truth"))
# Model 2
samples2 <- sample(1:nrow(M),sampleSize, replace=FALSE)
LNBtrain2 <- M[samples2,]
LNBtest2 <- M[-samples2,]
LNB2 <- naiveBayes(lie ~ ., data=LNBtrain2)
LNB2_predict <- predict(LNB2, LNBtest2)
Lpred2 <- prediction(as.numeric(LNB2_predict), as.numeric(LNBtest2$lie))
Lperf2 <- performance(Lpred2, measure="tpr", x.measure="fpr")
LNBplot2 <- (plot(Lperf2, main= "ROC curve for Naive Bayes", col="blue", lwd=3)+ abline(coef=c(0,1)))
LNB2_pred_table <- table(LNB2_predict, LNBtest2$lie)
print(knitr::kable(LNB2_pred_table))
LNB2_acc <- (sum(diag(LNB2_pred_table)/sum(LNB2_pred_table)))
print(paste0("Naive Bayes Accuracy: ", LNB2_acc))
model_metrics(LNB2_pred_table,c("Lie","Truth"))
Ltune2 <- tune.svm(x=LNBtrain2[,-1], y=LNBtrain2[,1], type="C-classification", kernel = "polynomial", degree=2, cost=10^(1:3), gamma=c(0.1,1,10), coef0=c(0.1,1,10))
Lgamma2 <- Ltune2$best.parameters$gamma
Lcoef02 <- Ltune2$best.parameters$coef0
Lcost2 <- Ltune2$best.parameters$cost
LSVM2 <- svm(lie ~ ., data=LNBtrain2, kernel="polynomial", degree=2, gamma=Lgamma2, coef0=Lcoef02, cost=Lcost2, scale=FALSE)
LSVM_pred2<- predict(LSVM2, LNBtest2[,-1], type="class")
LSVMpred2 <- prediction(as.numeric(LSVM_pred2), as.numeric(LNBtest2$lie))
LSVMperf2 <- performance(LSVMpred2, measure="tpr", x.measure="fpr")
plot(LSVMperf2, main ="ROC curve for SVM", col="purple", lwd=3)+abline(coef=c(0,1))
(LSVM2_pred_table <- table(LSVM_pred2, LNBtest2$lie))
LSVM2_acc <- (sum(diag(LSVM2_pred_table)/sum(LSVM2_pred_table)))
print(paste0("SVM accuracy: ", LSVM2_acc))
model_metrics(LSVM2_pred_table, c("Lie","Truth"))
# Model 3
samples3 <- sample(1:nrow(M),sampleSize, replace=FALSE)
LNBtrain3 <- M[samples3,]
LNBtest3 <- M[-samples3,]
LNB3 <- naiveBayes(lie ~ ., data=LNBtrain3)
LNB3_predict <- predict(LNB3, LNBtest3)
Lpred3 <- prediction(as.numeric(LNB3_predict), as.numeric(LNBtest3$lie))
Lperf3 <- performance(Lpred3, measure="tpr", x.measure="fpr")
plot(Lperf3, main= "ROC curve for Naive Bayes", col="blue", lwd=3)+ abline(coef=c(0,1))
LNB3_pred_table <- table(LNB3_predict, LNBtest3$lie)
print(knitr::kable(LNB3_pred_table))
LNB3_acc <- (sum(diag(LNB3_pred_table)/sum(LNB3_pred_table)))
print(paste0("Naive Bayes Accuracy: ", LNB3_acc))
model_metrics(LNB3_pred_table,c("Lie","Truth"))
Ltune3 <- tune.svm(x=LNBtrain3[,-1], y=LNBtrain3[,1], type="C-classification", kernel = "polynomial", degree=2, cost=10^(1:3), gamma=c(0.1,1,10), coef0=c(0.1,1,10))
Lgamma3 <- Ltune3$best.parameters$gamma
Lcoef03 <- Ltune3$best.parameters$coef0
Lcost3 <- Ltune3$best.parameters$cost
LSVM3 <- svm(lie ~ ., data=LNBtrain3, kernel="polynomial", degree=2, gamma=Lgamma3, coef0=Lcoef03, cost=Lcost3, scale=FALSE)
LSVM_pred3<- predict(LSVM3, LNBtest3[,-1], type="class")
LSVMpred3 <- prediction(as.numeric(LSVM_pred3), as.numeric(LNBtest3$lie))
LSVMperf3 <- performance(LSVMpred3, measure="tpr", x.measure="fpr")
plot(LSVMperf3, main ="ROC curve for SVM", col="purple", lwd=3)+abline(coef=c(0,1))
(LSVM3_pred_table <- table(LSVM_pred3, LNBtest3$lie))
LSVM3_acc <- (sum(diag(LSVM3_pred_table)/sum(LSVM3_pred_table)))
print(paste0("SVM accuracy: ", LSVM3_acc))
model_metrics(LSVM3_pred_table, c("Lie","Truth"))
## Sentiment, using same sample sets
M1 <- data.frame(sent, m1)
#model 1
SNBtrain1 <- M1[samples1,]
SNBtest1 <- M1[-samples1,]
SNB1 <- naiveBayes(sent ~ ., data=SNBtrain1)
SNB1_predict <- predict(SNB1, SNBtest1)
Spred1 <- prediction(as.numeric(SNB1_predict), as.numeric(SNBtest1$sent))
Sperf1 <- performance(Spred1, measure="tpr", x.measure="fpr")
plot(Sperf1, main= "ROC curve for Naive Bayes", col="blue", lwd=3)+ abline(coef=c(0,1))
SNB1_pred_table <- table(SNB1_predict, SNBtest1$sent)
print(knitr::kable(SNB1_pred_table))
SNB1_acc <- (sum(diag(SNB1_pred_table)/sum(SNB1_pred_table)))
print(paste0("Naive Bayes Accuracy: ", SNB1_acc))
model_metrics(SNB1_pred_table,c("Negative","Positive"))
Stune1 <- tune.svm(x=SNBtrain1[,-1], y=SNBtrain1[,1], type="C-classification", kernel = "polynomial", degree=2, cost=10^(1:3), gamma=c(0.1,1,10), coef0=c(0.1,1,10))
Sgamma1 <- Stune1$best.parameters$gamma
Scoef01 <- Stune1$best.parameters$coef0
Scost1 <- Stune1$best.parameters$cost
SSVM1 <- svm(sent ~ ., data=SNBtrain1, kernel="polynomial", degree=2, gamma=Sgamma1, coef0=Scoef01, cost=Scost1, scale=FALSE)
SSVM_pred1<- predict(SSVM1, SNBtest1[,-1], type="class")
SSVMpred1 <- prediction(as.numeric(SSVM_pred1), as.numeric(SNBtest1$sent))
SSVMperf1 <- performance(SSVMpred1, measure="tpr", x.measure="fpr")
plot(SSVMperf1, main ="ROC curve for SVM", col="purple", lwd=3)+abline(coef=c(0,1))
(SSVM1_pred_table <- table(SSVM_pred1, SNBtest1$sent))
SSVM1_acc <- (sum(diag(SSVM1_pred_table)/sum(SSVM1_pred_table)))
print(paste0("SVM accuracy: ", SSVM1_acc))
model_metrics(SSVM1_pred_table, c("Negative","Positive"))
#Model 2
SNBtrain2 <- M1[samples2,]
SNBtest2 <- M1[-samples2,]
SNB2 <- naiveBayes(sent ~ ., data=SNBtrain2)
SNB2_predict <- predict(SNB2, SNBtest2)
Spred2 <- prediction(as.numeric(SNB2_predict), as.numeric(SNBtest2$sent))
Sperf2 <- performance(Spred2, measure="tpr", x.measure="fpr")
plot(Sperf2, main= "ROC curve for Naive Bayes", col="blue", lwd=3)+ abline(coef=c(0,1))
SNB2_pred_table <- table(SNB2_predict, SNBtest2$sent)
print(knitr::kable(SNB2_pred_table))
SNB2_acc <- (sum(diag(SNB2_pred_table)/sum(SNB2_pred_table)))
print(paste0("Naive Bayes Accuracy: ", SNB2_acc))
model_metrics(SNB2_pred_table,c("Negative","Positive"))
Stune2 <- tune.svm(x=SNBtrain2[,-1], y=SNBtrain2[,1], type="C-classification", kernel = "polynomial", degree=2, cost=10^(1:3), gamma=c(0.1,1,10), coef0=c(0.1,1,10))
Sgamma2 <- Stune2$best.parameters$gamma
Scoef02 <- Stune2$best.parameters$coef0
Scost2 <- Stune2$best.parameters$cost
SSVM2 <- svm(sent ~ ., data=SNBtrain2, kernel="polynomial", degree=2, gamma=Sgamma2, coef0=Scoef02, cost=Scost2, scale=FALSE)
SSVM_pred2<- predict(SSVM2, SNBtest2[,-1], type="class")
SSVMpred2 <- prediction(as.numeric(SSVM_pred2), as.numeric(SNBtest2$sent))
SSVMperf2 <- performance(SSVMpred2, measure="tpr", x.measure="fpr")
plot(SSVMperf2, main ="ROC curve for SVM", col="purple", lwd=3)+abline(coef=c(0,1))
(SSVM2_pred_table <- table(SSVM_pred2, SNBtest2$sent))
SSVM2_acc <- (sum(diag(SSVM2_pred_table)/sum(SSVM2_pred_table)))
print(paste0("SVM accuracy: ", SSVM2_acc))
model_metrics(SSVM2_pred_table, c("Negative","Positive"))
#Model 3
SNBtrain3 <- M1[samples3,]
SNBtest3 <- M1[-samples3,]
SNB3 <- naiveBayes(sent ~ ., data=SNBtrain3)
SNB3_predict <- predict(SNB3, SNBtest3)
Spred3 <- prediction(as.numeric(SNB3_predict), as.numeric(SNBtest3$sent))
Sperf3 <- performance(Spred3, measure="tpr", x.measure="fpr")
plot(Sperf3, main= "ROC curve for Naive Bayes", col="blue", lwd=3) + abline(coef=c(0,1))
SNB3_pred_table <- table(SNB3_predict, SNBtest3$sent)
print(knitr::kable(SNB3_pred_table))
SNB3_acc <- (sum(diag(SNB3_pred_table)/sum(SNB3_pred_table)))
print(paste0("Naive Bayes Accuracy: ", SNB3_acc))
model_metrics(SNB3_pred_table,c("Negative","Positive"))
Stune3 <- tune.svm(x=SNBtrain3[,-1], y=SNBtrain3[,1], type="C-classification", kernel = "polynomial", degree=2, cost=10^(1:3), gamma=c(0.1,1,10), coef0=c(0.1,1,10))
Sgamma3 <- Stune3$best.parameters$gamma
Scoef03 <- Stune3$best.parameters$coef0
Scost3 <- Stune3$best.parameters$cost
SSVM3 <- svm(sent ~ ., data=SNBtrain3, kernel="polynomial", degree=2, gamma=Sgamma3, coef0=Scoef03, cost=Scost3, scale=FALSE)
SSVM_pred3<- predict(SSVM3, SNBtest3[,-1], type="class")
SSVMpred3 <- prediction(as.numeric(SSVM_pred3), as.numeric(SNBtest3$sent))
SSVMperf3 <- performance(SSVMpred3, measure="tpr", x.measure="fpr")
plot(SSVMperf3, main ="ROC curve for SVM", col="purple", lwd=3)+abline(coef=c(0,1))
(SSVM3_pred_table <- table(SSVM_pred3, SNBtest3$sent))
SSVM3_acc <- (sum(diag(SSVM3_pred_table)/sum(SSVM3_pred_table)))
print(paste0("SVM accuracy: ", SSVM3_acc))
model_metrics(SSVM3_pred_table, c("Negative","Positive"))
|
e00523f37604563adf81265589605d8af7546fd3
|
ef84851bd06ab41faa62190f6c8464809605cbb9
|
/functions/taxa.turnover.models.R
|
8052c6ff28c75ee30ac182185fd62b5677de4f6e
|
[] |
no_license
|
TimothyStaples/novelty-cenozoic-microplankton
|
f306c22161c7fdaf840c1662f67178a91c92748a
|
0a062c18a6e661d1d0a4af750186a9e42448470a
|
refs/heads/master
| 2022-12-23T11:58:01.238855
| 2020-09-15T23:36:37
| 2020-09-15T23:36:37
| 288,867,070
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,460
|
r
|
taxa.turnover.models.R
|
taxa.turnover.models <- function(novel.list,
test.edge.effects = FALSE,
ext.cutoff = NA,
orig.cutoff = NA){
# calculate bin-to-bin turnover for each taxa
turnover.list <- lapply(1:length(novel.list), function(x.n){
print(x.n)
x <- novel.list[[x.n]]
taxa <- c("nano", "foram", "radio", "diatom")[x.n]
# for each site
do.call("rbind", lapply(1:length(x$sites), function(n){
temp <- local.extorig(ssmat = x$ssmats[[n]],
novel = x$novel[[n]],
site.name = x$sites[n],
raw.data = x$data)
temp$taxa <- taxa
return(temp)
}))
})
turnover.df <- do.call("rbind", turnover.list)
success.vars <- c("ext", "orig", "emig", "immig", "loss", "gain")
trial.vars <- c("ext.rich", "orig.rich", "ext.rich", "orig.rich", "ext.rich", "orig.rich")
edge.vars <- c("n.from.end", "n.from.start", NA, NA, NA, NA)
if(test.edge.effects){
# for extinction and origination, there are likely edge effects. We need to work out
# where those quadratic edge effects balance out
turnover.edge.effect(turnover.df)
}
demographic.models <- lapply(1:6, function(n){
sub.turnover <- turnover.df
print(n)
success.var = success.vars[n]
trail.var = trial.vars[n]
edge.var = edge.vars[n]
if(n %in% c(1,3) &
!is.na(ext.cutoff)){
sub.turnover <- turnover.df[turnover.df$n.from.end >= ext.cutoff,]
}
if(n %in% c(2,4) &
!is.na(orig.cutoff)){
sub.turnover <- turnover.df[turnover.df$n.from.start >= orig.cutoff,]
}
model.list <- causes.cons.models.binom.edge(success.var = success.var,
trial.var = trail.var,
edge.var = edge.var,
turnover.df = sub.turnover)
coefs <- as.data.frame(summary(model.list$model)$coefficients)
coefs$taxa.rand <- summary(model.list$model)$varcor$taxa[1,1]
coefs$site.rand <- summary(model.list$model)$varcor$`site:taxa`[1,1]
write.csv(coefs,
date.wrap(paste0("./outputs/", success.var,
"demographic model summary"),
".csv"))
return(model.list)
})
return(demographic.models)
}
|
9a571631b8de7c19402adb69baaab72daeb5e84f
|
12efb36fec3c3a1296dc20f7c7e988668a4f9b7a
|
/R/export.relex.experiment.R
|
4320b995508cfe878720db9a51452683bc67ecff
|
[] |
no_license
|
drmjc/relex
|
d4ec694f6185657a6e11990730267bdb24fc7527
|
2d03dcbfa968e207fe4762efe78bc74c6dba47cf
|
refs/heads/master
| 2020-05-02T02:45:49.397218
| 2019-03-26T04:12:54
| 2019-03-26T04:12:54
| 177,711,122
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,254
|
r
|
export.relex.experiment.R
|
#' Export a Relex experiment to tab delimited files.
#'
#' Export the peptide, protein and normalised protein ratios from a Relex
#' experiment as tab delimited files.
#'
#' @param exp A list of RelEx experiments
#' @param dir The path to the directory where you'd like the files to be made.
#' @param prefix The file name's prefix. You can leave this as \dQuote{}, or
#' name it something useful that will separate these files that will be made
#' from others that you may wish to make.
#' @return this exports 2 or 3 tab delimited files to a directory,
#' corresponding to the peptides, proteins and normalised protein ratios.
#' @author Mark Cowley
#' @seealso \code{\link{write.delim}}
#' @keywords IO file
#' @examples
#' \dontrun{
#' export.relex.experiment(exp, dir, prefix)
#' export.relex.experiment(exp, "/path/to/output", "")
#' export.relex.experiment(exp, "/path/to/output", "MUTvsWT")
#' }
#' @export
export.relex.experiment <- function(exp, dir="./", prefix="") {
f <- file.path(dir, paste(prefix, c("peptides.csv", "proteins.csv", "proteins.norm.csv"), sep="."))
write.delim(exp$data.peptides, f[1])
write.delim(exp$data.proteins, f[2])
if( "data.proteins.norm" %in% names(exp) )
write.delim(exp$data.proteins.norm, f[3])
}
|
4f7ef9630782166ddc1d9ac7429a792565c60a88
|
0f64ac5e3d3cf43124dcb4917a4154829e7bb535
|
/scripts/analyse_DKM-FRS5_VG.R
|
cbf4d1a791e0b642a39246c352549a52a0558f1f
|
[] |
no_license
|
wactbprot/r4vl
|
8e1d6b920dfd91d22a01c8e270d8810f02cea27c
|
a34b1fa9951926796186189202750c71e7883f8d
|
refs/heads/master
| 2016-09-11T02:22:39.828280
| 2014-10-07T14:37:55
| 2014-10-07T14:37:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 253
|
r
|
analyse_DKM-FRS5_VG.R
|
## run with rproc
if(length(doc$Calibration) > 0){
doc <- refreshAnalysis(cdb,doc)
doc <- getOutIndex(doc)
doc <- dkm.calPdkm(doc)
doc <- frs5.calPfrs5(doc)
doc <- dkm.uncertPdkm(doc)
doc <- frs5.uncertPfrs5(doc)
doc <- calEn(doc)
}
|
b5f2fb80164ffd223be94f60fe8b4a0bd686b035
|
b46248dcee9ce1affb3a436e9090eb65dc5406ad
|
/script/script_raw/winterWheat/BIC/7_WW_BIC.R
|
ecaff1082dfdf669904c7fce65998196d375bbd5
|
[] |
no_license
|
MikyPiky/Project1
|
43561e6e0c1f75e070f0c66c2817d8bd4c7e13c7
|
3053e5a45731b65eb3306ad33875bcbf253cf879
|
refs/heads/master
| 2021-01-23T12:38:56.332927
| 2017-06-06T08:52:56
| 2017-06-06T08:52:56
| 93,317,514
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,631
|
r
|
7_WW_BIC.R
|
################################
#### Winter Wheat in July ####
################################
'
######################
## File Discription ##
The purpose of this script is to estimate the impact of weather fluctuations in the month mentionend above on yearly crop yield.
This is done by the following the steps:
- Create data frame with Winterwheat as dependent and variables of the month above as independent variables
- Create stepwise function which is based on drought categories of german drought monitor
- Remove comIds with less than 7 observations to avoid leveage issues
- Remove log trend of indepedent variable
- Delete outliers which appear to be measurement error
- Use BIC to choose the degrees of the polynomial and to compare various model configurations
- Loop through polynomials configuration of each model; highest possible polynomial is of degree 3
- Compare models graphically
- Explore Models
- Model with lowest BIC in general: Tavg, Prec, SMI
- Model with lowest BIC of standard configuration: Tavg, Prec, SMI
- Model with lowest BIC with SMI: Tavg, Prec, SMI
- Correct Standard Errors with either Driscoll Kray or Cameron et al /Thompson estimator
The --vcovHC– function estimates three heteroskedasticity-consistent covariance estimators:
• "white1" - for general heteroskedasticity but no serial correlation. Recommended for random effects.
• "white2" - is "white1" restricted to a common variance within groups. Recommended for random effects.
• "arellano" - both heteroskedasticity and serial correlation. Recommended for fixed effects.
The following options apply*:
• HC0 - heteroskedasticity consistent. The default.
• HC1,HC2, HC3 – Recommended for small samples. HC3 gives less weight to influential
observations.
• HC4 - small samples with influential observations
• HAC - heteroskedasticity and autocorrelation consistent (type ?vcovHAC for more
details)
Solution for serial correlation: Cluster by groups.
Solution for cross sectional correlation: Cluster by time
Ich arbeitet vorerst mir Driscoll Kraay und weighting von 1 (maxlag=0). Die Ergebnisse sollten solide sein, da Cameron/Thompson ähnlich ist
## Input ##
- aus 4km_tmax: Yield_SMI_Prec_Tavg_Pet_Dem_Por_Tmin_Tmax_nodemean_nozscore_ww.csv (komplete data.frame)
## Output ##
- Yield_Covariates_WW_Jul.csv (auf July reduzierter Data.Frame)
- Export Data frame for use in BIC_Graphic: file="./data/data_raw/BIC/BIC_WW_Jul.csv")
- Export Data Frame of Fixed Effects to be used in Script FixedEffects_Graphic:
"./figures/figures_exploratory/FixedEffects/Winterwheat/..."
'
###################
## Load Packages ##
library(plm)
library(boot)
library(gtools)
library(lme4)
library(lmtest)
library(car)
library(sp)
library(rgdal)
library(raster)
library(rasterVis)
library(maptools)
library(reshape)
library(stringr)
library(classInt)
library(RColorBrewer)
library(stargazer)
library(ggplot2)
####################################################################################################################################################################
#################################################################################################################
#### Create data frame with Winterwheat as dependent and variables of the month above as independent variables ####
#################################################################################################################
## Read in large Dataframe for Maize ##
Yield_Covariates <- read.csv("~/Documents/projects/correlation/data/data_processed/Yield_SMI_Prec_Tavg_Pet_Dem_Por_Tmin_Tmax_nodemean_nozscore_ww.csv")
Yield_Covariates$X <- NULL
## For publication worth regression output need to change data names ##
'Get rid of variables which are not necessary: other months and other not needed variables'
names(Yield_Covariates)
names <- names(Yield_Covariates)
names_Jul <- grep(c("*_Jul"), names)
names_Jul
Yield_Covariates_Jul <- Yield_Covariates[,names_Jul]
names(Yield_Covariates_Jul)
dim(Yield_Covariates_Jul)
## Delete all but SMI, Prec, Tavg and Pet
names(Yield_Covariates_Jul)
Yield_Covariates_Jul <- Yield_Covariates_Jul[,c(1:4)]
## Establish first part of data frame_ time and spatial reference plus Winterwheat ##
names(Yield_Covariates[,c(2,1,3:5,7)])
Yield_Covariates_SM <- Yield_Covariates[,c(2,1,3:5,7)] # Achtung, darauf achten, dass comId und year in der richtigen Reihenfolge sind.
names(Yield_Covariates_SM)
head(Yield_Covariates_SM)
Yield_Covariates_WW_Jul <- cbind(Yield_Covariates_SM, Yield_Covariates_Jul)
names(Yield_Covariates_WW_Jul)
names(Yield_Covariates_WW_Jul) <- c( "comId" , "year","com","stateId","state","Winterwheat","SMI", "Prec","Tavg", "Pet")
names(Yield_Covariates_WW_Jul)
#########################################
#### Create stepwise function of SMI ####
#########################################
' Drought Monitor Spezification '
Yield_Covariates_WW_Jul$SMI_GDM <- cut(Yield_Covariates_WW_Jul$SMI, breaks = c(0, 0.1, 0.2, 0.3, 0.7, 0.8, 0.9, 1), ,
labels = c("severe drought","moderate drought","abnormal dry", "normal","abnormal wet" ,"abundant wet", "severe wet"))
#############
## Na-omit ##
sum(is.na(Yield_Covariates_WW_Jul) )
dim(Yield_Covariates_WW_Jul)
Yield_Covariates_WW_Jul_nna <- na.omit(Yield_Covariates_WW_Jul)
dim(Yield_Covariates_WW_Jul_nna)
## Check for NAs
any(is.na(Yield_Covariates_WW_Jul_nna))
## Reset Rownames
rownames(Yield_Covariates_WW_Jul_nna) <- NULL
## Further work with DataFrame without Yield_Covariates_WW_Jul index ##
Yield_Covariates_WW_Jul <- Yield_Covariates_WW_Jul_nna
#########################################################################
## Remove comIds with less than 7 observations to avoid leveage issues ##
#########################################################################
#####################################################
## Delete all comIds with less than 7 observations ##
sum(table(Yield_Covariates_WW_Jul$comId) < 7 )
table(Yield_Covariates_WW_Jul$comId) < 7
## comIds mit weniger als 7 Beoachtungen: ##
list <- c(3402, 5117, 5124, 5314, 5334, 5916, 8421, 9762, 12052, 12053, 15001, 15002,
15082, 15083, 15084, 15085, 15086, 15087, 15088, 15089, 15091)
length(list)
list[[1]]
temp <- Yield_Covariates_WW_Jul
for (i in 1:length(list))
{
print(Yield_Covariates_WW_Jul[Yield_Covariates_WW_Jul$comId==list[i],])
temp <- (temp[!temp$comId==list[i],])
}
## Number of deleted rows
dim(temp)-dim(Yield_Covariates_WW_Jul)
## Further use old name for data.frame
Yield_Covariates_WW_Jul <- temp
################################
## Befehle nach jedem löschen ##
Yield_Covariates_WW_Jul <- na.omit(Yield_Covariates_WW_Jul)
rownames(Yield_Covariates_WW_Jul) <- NULL
Yield_Covariates_WW_Jul <- plm.data(Yield_Covariates_WW_Jul, index=c("comId", "year"))
Yield_Covariates_WW_Jul[,c("comId","stateId")] <- lapply(Yield_Covariates_WW_Jul[,c("comId","stateId")], factor )
#################################################
#### Remove log trend of indepedent variable ####
#################################################
'Fit log of yield on log of time and use the residuals of that for yields'
logtrend <- lm(log(Winterwheat) ~ log(as.integer(year)), data= Yield_Covariates_WW_Jul)
##########################
## Issue with Outliers ###
##########################
par(mfrow = c(2,2))
plot(logtrend)
## Look Outliers Values ##
Yield_Covariates_WW_Jul[c(3382, 3442, 3454, 2574,3451,3511),]
## Look at other values of outliers com #
Yield_Covariates_WW_Jul[Yield_Covariates_WW_Jul$comId == "12060",] #2003
Yield_Covariates_WW_Jul[Yield_Covariates_WW_Jul$comId == "12065",] #2003
Yield_Covariates_WW_Jul[Yield_Covariates_WW_Jul$comId == "12066",] #2003
Yield_Covariates_WW_Jul[Yield_Covariates_WW_Jul$comId == "9276",] # 1999: hier sehe ich keinen Grund, warum die Daten geläscht werden sollten
Yield_Covariates_WW_Jul[Yield_Covariates_WW_Jul$comId == "12060",] # 2003
Yield_Covariates_WW_Jul[Yield_Covariates_WW_Jul$comId == "12071",] # 2000
## Interpretation ##
' Im Gegensatz zu SoliMoais nehme ich hier keine Beobachtungen wegen Outlier und Leverage raus, da es wohl keine Messfehler sind.'
Yield_Covariates_WW_Jul <- na.omit(Yield_Covariates_WW_Jul)
rownames(Yield_Covariates_WW_Jul) <- NULL
#################################################
#### Remove log trend of indepedent variable ####
logtrend <- lm(log(Winterwheat) ~ log(as.integer(year)), data= Yield_Covariates_WW_Jul)
summary(logtrend)
Yield_Covariates_WW_Jul$Winterwheat_logtrend <- resid(logtrend)
#######################################
## Prepare dataframe for plm package ##
'Change Indexing so that it can be used in plm package'
Yield_Covariates_WW_Jul <- plm.data(Yield_Covariates_WW_Jul, index=c("comId", "year"))
str(Yield_Covariates_WW_Jul)
## Transform comId and stateId to factor ##
Yield_Covariates_WW_Jul[,c("comId","stateId")] <- lapply(Yield_Covariates_WW_Jul[,c("comId","stateId")], factor )
lapply(Yield_Covariates_WW_Jul, class)
###############################################
##### Save Yield_Covariates_WW_Julober extern ####
write.csv(Yield_Covariates_WW_Jul, file="./data/data_raw/Yield_Covariates_WW_Jul.csv")
#######################################################
#### BIC to choose the degrees of the polynomials ####
#######################################################
## create a matrix which contains all possible degree combinations, here for three variables ##
degree <- permutations(n=3,r=2,v=c(1:3),repeats.allowed=T)
degree
################################################
## Formulas for Model Variations to be tested ##
## with SMI
formula_Jul_WW_detrendlog_SMIPrecTavg <- Winterwheat_logtrend ~ poly(Prec, degree[r, 1], raw = T) + poly(Tavg, degree[r, 2], raw = T) +
dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId)
formula_Jul_WW_detrendlog_SMIPrecPet <- Winterwheat_logtrend ~ poly(Prec, degree[r, 1], raw = T) + poly(Pet, degree[r, 2], raw = T) +
dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId)
formula_Jul_WW_detrendlog_SMIPrec <- Winterwheat_logtrend ~ poly(Prec, degree[r, 1], raw = T) +
dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId)
formula_Jul_WW_detrendlog_SMIPet <- Winterwheat_logtrend ~ poly(Pet, degree[r, 2], raw = T) +
dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId)
formula_Jul_WW_detrendlog_SMITavg <- Winterwheat_logtrend ~ poly(Tavg, degree[r, 2], raw = T) +
dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId)
formula_Jul_WW_detrendlog_SMI <- Winterwheat_logtrend ~
dummy(SMI_GDM,c("severe drought","moderate drought","abnormal dry", "abnormal wet", "abundant wet","severe wet")) + dummy(comId)
## no SMI
formula_Jul_WW_detrendlog_PrecTavg <- Winterwheat_logtrend ~ poly(Prec, degree[r, 1], raw = T) + poly(Tavg, degree[r, 2], raw = T) + dummy(comId)
formula_Jul_WW_detrendlog_PrecPet <- Winterwheat_logtrend ~ poly(Prec, degree[r, 1], raw = T) + poly(Pet, degree[r, 2], raw = T) + dummy(comId)
formula_Jul_WW_detrendlog_Prec <- Winterwheat_logtrend ~ poly(Prec, degree[r, 1], raw = T) + dummy(comId)
formula_Jul_WW_detrendlog_Pet <- Winterwheat_logtrend ~ poly(Pet, degree[r, 2], raw = T) + dummy(comId)
formula_Jul_WW_detrendlog_Tavg <- Winterwheat_logtrend ~ poly(Tavg, degree[r, 2], raw = T) + dummy(comId)
#################################################################################################
# Loop through the container list to cover all permutations of posssible degree of freedoms of ##
# of the polynomials of the variables ##
#################################################################################################
##################################################
## Loop through various variable configurations ##
BIC_SMIPrecTavg <- rep(0,9)
for(r in 1:9){
glm.fit_SMIPrecTavg <- glm(formula = formula_Jul_WW_detrendlog_SMIPrecTavg, data = Yield_Covariates_WW_Jul)
BIC_SMIPrecTavg[r] <- BIC(glm.fit_SMIPrecTavg)
}
BIC_SMIPrecPet <- rep(0,9)
for(r in 1:9){
glm.fit_SMIPrecPet <- glm(formula = formula_Jul_WW_detrendlog_SMIPrecPet, data = Yield_Covariates_WW_Jul)
BIC_SMIPrecPet[r] <- BIC(glm.fit_SMIPrecPet)
}
BIC_SMIPrec <- rep(0,9)
for(r in 1:9){
glm.fit_SMIPrec <- glm(formula = formula_Jul_WW_detrendlog_SMIPrec, data = Yield_Covariates_WW_Jul)
BIC_SMIPrec[r] <- BIC(glm.fit_SMIPrec)
}
BIC_SMIPet <- rep(0,9)
for(r in 1:9){
glm.fit_SMIPet <- glm(formula = formula_Jul_WW_detrendlog_SMIPet, data = Yield_Covariates_WW_Jul)
BIC_SMIPet[r] <- BIC(glm.fit_SMIPet)
}
BIC_SMITavg <- rep(0,9)
for(r in 1:9){
glm.fit_SMITavg <- glm(formula = formula_Jul_WW_detrendlog_SMITavg, data = Yield_Covariates_WW_Jul)
BIC_SMITavg[r] <- BIC(glm.fit_SMITavg)
}
BIC_SMI <- rep(0,9)
for(r in 1:9){
glm.fit_SMI <- glm(formula = formula_Jul_WW_detrendlog_SMI, data = Yield_Covariates_WW_Jul)
BIC_SMI[r] <- BIC(glm.fit_SMI)
}
BIC_PrecTavg <- rep(0,9)
for(r in 1:9){
glm.fit_PrecTavg <- glm(formula = formula_Jul_WW_detrendlog_PrecTavg, data = Yield_Covariates_WW_Jul)
BIC_PrecTavg[r] <- BIC(glm.fit_PrecTavg)
}
BIC_PrecPet <- rep(0,9)
for(r in 1:9){
glm.fit_PrecPet <- glm(formula = formula_Jul_WW_detrendlog_PrecPet, data = Yield_Covariates_WW_Jul)
BIC_PrecPet[r] <- BIC(glm.fit_PrecPet)
}
BIC_Prec <- rep(0,9)
for(r in 1:9){
glm.fit_Prec <- glm(formula = formula_Jul_WW_detrendlog_Prec, data = Yield_Covariates_WW_Jul)
BIC_Prec[r] <- BIC(glm.fit_Prec)
}
BIC_Pet <- rep(0,9)
for(r in 1:9){
glm.fit_Pet <- glm(formula = formula_Jul_WW_detrendlog_Pet , data = Yield_Covariates_WW_Jul)
BIC_Pet [r] <- BIC(glm.fit_Pet )
}
BIC_Tavg <- rep(0,9)
for(r in 1:9){
glm.fit_Tavg <- glm(formula = formula_Jul_WW_detrendlog_Tavg , data = Yield_Covariates_WW_Jul)
BIC_Tavg [r] <- BIC(glm.fit_Tavg )
}
## Compare BIC values ##
BIC <- c(BIC_SMIPrecTavg, BIC_SMIPrecPet, BIC_SMIPrec, BIC_SMIPet, BIC_SMITavg, BIC_SMI, BIC_Prec, BIC_Tavg, BIC_Pet, BIC_PrecTavg, BIC_PrecPet)
BIC
par(mfrow=c(1,1))
plot(BIC)
###########################
## Plot BIC with ggplot2 ##
###########################
##############################################
## Create Dataframe for plotting in ggplot2 ##
## repeat name of modelconfiguration ##
list <-c("01_SMIPrecTavg", "02_SMIPrecPet", "03_SMIPrec", "04_SMIPet",
"05_SMITavg", "06_SMI", "07_Prec", "08_Tavg", "09_Pet", "10_PrecTavg", "11_PrecPet")
list2 <- 1:11
model <- NULL
model_index <- NULL
for (i in 1:11)
{
x <- rep(list[i],9)
y <- rep(list2[i],9)
model <- append(model, x)
model_index <- as.numeric(append(model_index, y))
}
###################################
## Combine data in on data.frame ##
BIC <- as.data.frame(BIC)
model <- as.data.frame(model)
model_index <- as.data.frame(model_index)
index <- 1:99
month <-rep("July",99)
BIC_Jul <- cbind(BIC, model ,model_index, index, month)
#######################
## Delete Duplicates ##
which(duplicated(BIC_Jul$BIC))
list3 <- c(20,21,23,24,26,27,31,32,33,34,35,36,40,41,42,43,44,45,47,48,49,50,51,52,53,54,56,57,59,60,62,63,67,68,69,70,71,72,76,77,78,79,80,81)
length(list3)
temp <- BIC_Jul
for (i in 1:44)
{
print(BIC_Jul[BIC_Jul$index ==list3[i],])
temp <- (temp[!temp$index==list3[i],])
}
dim(BIC_Jul)
dim(temp)
################################
## Correct created data.frame ##
rownames(temp) <- NULL
BIC_Jul <- temp
lapply(BIC_Jul, class)
############################
## Plot data with ggplot2 ##
g <- ggplot(BIC_Jul,aes(y=BIC, x=index))
g + geom_point(aes(color=model)) + labs(title="BIC of various model configurations", x="") + theme(plot.title=element_text(size=15, face="bold")) + theme_dark()
g + geom_point(aes(color=model)) + labs(title="BIC of various model configurations", x="") + theme(plot.title=element_text(size=15, face="bold")) + theme_dark() +
facet_wrap( ~ month)
BIC_Jul
## Export Data frame for use in BIC_Grafic
BIC_WW_Jul <- BIC_Jul
class(BIC_WW_Jul)
write.csv(BIC_WW_Jul, file="./data/data_raw/BIC/BIC_WW_Jul.csv")
################################################################
################################### Explore Models #############
################################################################
###################
## Load Data Set ##
# Yield_Covariates_WW_Jul <- read.csv( file="./data/data_raw/Yield_Covariates_WW_Jul.csv")
# names(Yield_Covariates_WW_Jul)
# Yield_Covariates_WW_Jul$X <- NULL
#######################################
## Prepare dataframe for plm package ##
'Change Indexing so that it can be used in plm package'
Yield_Covariates_WW_Jul <- plm.data(Yield_Covariates_WW_Jul, index=c("comId", "year"))
## Transform comId and stateId to factor ##
Yield_Covariates_WW_Jul[,c("comId","stateId")] <- lapply(Yield_Covariates_WW_Jul[,c("comId","stateId")], factor )
str(Yield_Covariates_WW_Jul)
#################################
###############################
## Results with smallest BIC ##
###############################
plot(BIC_SMIPrecTavg)
which.min(BIC_SMIPrecTavg)
r = 3
best_formula <- formula_Jul_WW_detrendlog_SMIPrecTavg
###################
## GLM Ergebniss ##
glm.fit_WW_BEST_Jul <- glm(formula = best_formula, data = Yield_Covariates_WW_Jul)
summary(glm.fit_WW_BEST_Jul)
'AIC: -7368.9'
####################
## PLM Ergebnisse ##
plm.fit_WW_BEST_Jul <- plm(formula = update(best_formula, .~. - dummy(comId)), data = Yield_Covariates_WW_Jul, effect="individual", model=("within"), index = c("comId","year"))
summary(plm.fit_WW_BEST_Jul)
'Adj. R-Squared: 0.11031'
fixef <- fixef(plm.fit_WW_BEST_Jul)
fixef <- as.data.frame(as.matrix(fixef))
head(fixef)
fixef <- cbind(rownames(fixef), fixef)
rownames(fixef) <- NULL
names(fixef) <- c("comId", "FE")
fixef
write.csv(fixef, "./figures/figures_exploratory/FixedEffects/Winterwheat/plm.fit_WW_BEST_Jul_FE.csv")
##################
## LM Ergebniss ##
lm.fit_WW_BEST_Jul <-lm(formula = best_formula, data = Yield_Covariates_WW_Jul)
summary(lm.fit_WW_BEST_Jul)
'Adjusted R-squared: 0.6792'
################################################
## Assessing Influence (Leverage*discrepancy) ##
cutoff_Jul <- 4/((nrow(Yield_Covariates_WW_Jul)-length(lm.fit_WW_BEST_Jul$coefficients)-1))
cutoff_Jul
plot(lm.fit_WW_BEST_Jul, which=4, cook_Jul.levels=cutoff_Jul)
cook_Jul <- cooks.distance(lm.fit_WW_BEST_Jul)
nrow(Yield_Covariates_WW_Jul[cook_Jul > cutoff_Jul,]) # 189
year_cooks_Jul <- table(Yield_Covariates_WW_Jul$year[cook_Jul > cutoff_Jul ])
year_cooks_Jul
'1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010
8 9 2 7 64 47 3 6 22 8 7 6
'
com_cooks_Jul <- sort(table(Yield_Covariates_WW_Jul$com[cook_Jul > cutoff_Jul ] ) )
tail(com_cooks_Jul,20)
'
Rheingau-Taunus-Kreis Schwabach Schwandorf, Landkreis Schweinfurt
2 2 2 2
Schweinfurt, Landkreis S\xf6mmerda, Kreis Teltow-Fl\xe4ming, Landkreis Weiden i.d.OPf.
2 2 2 2
Weimar, krsfr. Stadt Dahme-Spreewald, Landkreis Duisburg, Kreisfreie Stadt Ha\xdfberge, Landkreis
2 3 3 3
Magdeburg, Kreisfreie Stadt Nordsachsen, Landkreis Oberhavel, Landkreis Olpe, Kreis
3 3 3 3
Potsdam-Mittelmark, Landkreis Rheinisch-Bergischer Kreis Spree-Nei\xdfe, Landkreis Oberspreewald-Lausitz, Landkreis
3 3 4 5 '
########################
## Heteroskedasdicity ##
bptest(glm.fit_WW_BEST_Jul) # Breusch Pagan Test of Heteroskedastie in den Störgrößen: Null: Homoskedasdicity.
bptest(plm.fit_WW_BEST_Jul)
' In beiden Fällen kann die Null widerlegt werden. Es gibt also heteroskedasdicity '
## Koenkers Version on BP Test: robuste Modification wenn die Störgrößen nicht normalverteilt sind.
bptest(plm.fit_WW_BEST_Jul, studentize = TRUE)
'Auch hier kann die Null widerlegt werden. Need to use robust covariance variance matrix to correct standard errors'
######################################
## Tests for serial autocorrelation ##
pwartest(plm.fit_WW_BEST_Jul)
pbgtest(plm.fit_WW_BEST_Jul)
'
both, H_1 of serial autocorrelation cannot be rejected
'
#################################
## Correct the Standard Errors ##
#################################
## Correct Standard Errors used in table ##
coeftest(plm.fit_WW_BEST_Jul)
## Robust covariance matrix estimators a la White ##
# coeftest(plm.fit_WW_BEST_Jul,vcov=vcovHC(plm.fit_WW_BEST_Jul,method = "arellano", type = "HC0"))
cov0_WW_BEST_Jul <- vcovHC(plm.fit_WW_BEST_Jul,method = "arellano", type = "HC0", cluster="group")
Wh.se_serial_WW_BEST_Jul <- sqrt(diag(cov0_WW_BEST_Jul))
cov0.1_WW_BEST_Jul <- vcovHC(plm.fit_WW_BEST_Jul,method = "arellano", type = "HC0", cluster="time")
Wh.se_cross_WW_BEST_Jul <- sqrt(diag(cov0.1_WW_BEST_Jul))
#
# ## Beck Katz:
# # coeftest(plm.fit_WW_BEST_Jul, vcov = function(x) vcovBK(plm.fit_WW_BEST_Jul,method = "arellano", type = "HC0"))
# cov1 <- vcovBK(plm.fit_WW_BEST_Jul,method = "arellano", type = "HC0", cluster="time")
# BK.se <- sqrt(diag(cov1))
# ## Driscoll Kraay ##
# summary(plm.fit_WW_BEST_Jul)
coeftest(plm.fit_WW_BEST_Jul, vcov=function(x) vcovSCC(plm.fit_WW_BEST_Jul,method = "arellano",type = "HC0"))
cov2_WW_BEST_Jul <- vcovSCC(plm.fit_WW_BEST_Jul,method = "arellano",type = "HC0")
DK.se_WW_BEST_Jul <- sqrt(diag(cov2_WW_BEST_Jul))
#
# cov2.1_WW_BEST_Jul <- vcovSCC(plm.fit_WW_BEST_Jul,method = "arellano",type = "HC0", maxlag=1)
# DK2.1.se_WW_BEST_Jul <- sqrt(diag(cov2.1_WW_BEST_Jul))
# cov2.2_WW_BEST_Jul <- vcovSCC(plm.fit_WW_BEST_Jul,method = "arellano",type = "HC0", maxlag=2)
# DK2.2.se_WW_BEST_Jul <- sqrt(diag(cov2.2_WW_BEST_Jul))
#
# cov2.3_WW_BEST_Jul <- vcovSCC(plm.fit_WW_BEST_Jul,method = "arellano",type = "HC0", maxlag=3)
# DK2.3.se_WW_BEST_Jul <- sqrt(diag(cov2.3_WW_BEST_Jul))
#
# cov2.4_WW_BEST_Jul <- vcovSCC(plm.fit_WW_BEST_Jul,method = "arellano",type = "HC0", maxlag=4)
# DK2.4.se_WW_BEST_Jul <- sqrt(diag(cov2.4_WW_BEST_Jul))
#
cov2.5_WW_BEST_Jul <- vcovSCC(plm.fit_WW_BEST_Jul,method = "arellano",type = "HC0", maxlag=5)
DK2.5.se_WW_BEST_Jul <- sqrt(diag(cov2.5_WW_BEST_Jul))
## Cameron et al /Thompson : doouble-clustering estimator ##
# coeftest(plm.fit_WW_BEST_Jul, vcovDC(plm.fit_WW_BEST_Jul, method = "arellano", type = "HC0"))
cov3_WW_BEST_Jul <- vcovDC(plm.fit_WW_BEST_Jul, method = "arellano", type = "HC0")
CT.se_WW_BEST_Jul <- sqrt(diag(cov3_WW_BEST_Jul))
'Our estimator is qualitatively similar to the ones presented in White and Domowitz (1984), for
time series data, and Conley (1999), for spatial data. '
## Generate Table with Output ##
se <- list(NULL, Wh.se_cross_WW_BEST_Jul, Wh.se_serial_WW_BEST_Jul, DK.se_WW_BEST_Jul, DK2.5.se_WW_BEST_Jul, CT.se_WW_BEST_Jul)
labels1 <-c("NULL","WhiteCross","WhiteSerial", "DriscollKraay", "DriscollKray2.5","CameronThompson")
stargazer(plm.fit_WW_BEST_Jul, plm.fit_WW_BEST_Jul, plm.fit_WW_BEST_Jul, plm.fit_WW_BEST_Jul, plm.fit_WW_BEST_Jul,plm.fit_WW_BEST_Jul,
se = se,
dep.var.caption = "Model with smallest BIC - July",
dep.var.labels = "log(Winterwheat)",
style="default",
model.numbers = FALSE,
column.labels = labels1,
type="text", out="./figures/figures_exploratory/BIC/Winterwheat/WW_Jul_best.txt"
)
#########################################################
## Results with smallest BIC of Standard Configuration ##
#########################################################
## Caveat: Für Winterweizen ist die Standard - Configuration immer am besten ##
' Deswegen produziere ich keine anderen Output, dennoch schreibe ich die anderen Tables, da
das eventiuell spätere Arbeiten erleichtert.'
## Generate Table with Output ##
se <- list(NULL, Wh.se_cross_WW_BEST_Jul, Wh.se_serial_WW_BEST_Jul, DK.se_WW_BEST_Jul, DK2.5.se_WW_BEST_Jul, CT.se_WW_BEST_Jul)
labels1 <-c("NULL","WhiteCross","WhiteSerial", "DriscollKraay", "DriscollKray2.5","CameronThompson")
stargazer(plm.fit_WW_BEST_Jul, plm.fit_WW_BEST_Jul, plm.fit_WW_BEST_Jul, plm.fit_WW_BEST_Jul, plm.fit_WW_BEST_Jul,plm.fit_WW_BEST_Jul,
se = se,
dep.var.caption = "Model with smallest BIC - July",
dep.var.labels = "log(Winterwheat)",
style="default",
model.numbers = FALSE,
column.labels = labels1,
type="text", out="./figures/figures_exploratory/BIC/Winterwheat/WW_Jul_bestStandard.txt"
)
########################################
## Results with smallest BIC with SMI ##
########################################
## Generate Table with Output ##
se <- list(NULL, Wh.se_cross_WW_BEST_Jul, Wh.se_serial_WW_BEST_Jul, DK.se_WW_BEST_Jul, DK2.5.se_WW_BEST_Jul, CT.se_WW_BEST_Jul)
labels1 <-c("NULL","WhiteCross","WhiteSerial", "DriscollKraay", "DriscollKray2.5","CameronThompson")
stargazer(plm.fit_WW_BEST_Jul, plm.fit_WW_BEST_Jul, plm.fit_WW_BEST_Jul, plm.fit_WW_BEST_Jul, plm.fit_WW_BEST_Jul,plm.fit_WW_BEST_Jul,
se = se,
dep.var.caption = "Model with smallest BIC - July",
dep.var.labels = "log(Winterwheat)",
style="default",
model.numbers = FALSE,
column.labels = labels1,
type="text", out="./figures/figures_exploratory/BIC/Winterwheat/WW_Jul_bestSM.txt"
)
|
3d0cc397284e95c104ad07ac399b5520a52b2039
|
2f74b6fa3057fcb98ad562247ea055ea63446146
|
/man/reg_gam.Rd
|
40168d224d5b13faca8a4753aa388f53b8367bdb
|
[] |
no_license
|
strayMat/warpDE
|
977e0f0b2d99d3ef1e7bdef9e2cad1a3ff6d8275
|
92e50beba7c54581173925aeff14ab02233980b5
|
refs/heads/master
| 2021-01-01T16:38:04.340919
| 2017-12-07T13:41:45
| 2017-12-07T13:41:45
| 97,879,353
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,476
|
rd
|
reg_gam.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regression_plots.R
\name{reg_gam}
\alias{reg_gam}
\title{Plot one gene raw data and its regressions}
\usage{
reg_gam(data, gene, reg.f = "loess", span = 0.75, s.df = 4,
regression = T, null.model = T, npred = F, sd.show = F,
legend.show = F)
}
\arguments{
\item{data}{a \code{warpDEDataSet} with results to be plotted.}
\item{gene}{character, a gene of interest.}
\item{reg.f}{a function to perform regression, either "ns" for natural splines, "loess" or "s" (default is "loess").}
\item{span}{numeric, a smoothing parameter for the regression function (default is 0.75, see \code{gam::lo} for details).}
\item{s.df}{numeric, a smoothing parameter for the nsplines regregression (default is 4, see \code{splines::s} for details about regularization).}
\item{regression}{logical, if the loess regression is to be computed and plotted or not (default is TRUE).}
\item{null.model}{logical, if the null model is to be computed and plotted or not (default is TRUE).}
\item{npred}{logical, if the unshared part of the data is to be plotted or not(default is FALSE).}
\item{legend.show}{logical, if the legend is wanted (default is FALSE).}
}
\value{
returns \itemize{\item{\code{pl},the visualization of the data}
\item{\code{regs}, the regression objects for both lineages and the null model.}}
}
\description{
Tools for visualizing gene signals for one given gene with the gam framework.
}
|
d48aa2730807e8fa011c51933c3764fef40f5778
|
2cf96544b40099506217ddd7c3088cdc769fa796
|
/man/get_sentences.Rd
|
6ea55327d4ee1ec2a790e82c8f1fc430272c8e3a
|
[] |
no_license
|
cran/sentimentr
|
0e3e9b7097670f6deb3c194c69e0429ce8f182f8
|
42a00f5813ba2ddefe1499ec053c7d9fe0b4c311
|
refs/heads/master
| 2021-10-27T21:24:05.432619
| 2021-10-12T07:30:02
| 2021-10-12T07:30:02
| 66,079,531
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,001
|
rd
|
get_sentences.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_sentences.R
\name{get_sentences}
\alias{get_sentences}
\title{Get Sentences}
\usage{
get_sentences(x, ...)
}
\arguments{
\item{x}{A character vector, \code{sentiment}, or \code{sentiment_by} object.}
\item{\ldots}{Other arguments passed to \code{\link[textshape]{split_sentence}}.}
}
\value{
Returns a list of vectors of sentences.
}
\description{
\code{get_sentences} - Get sentences from a character vector, \code{sentiment}, or
\code{sentiment_by} object.
}
\examples{
dat <- data.frame(
w = c('Person 1', 'Person 2'),
x = c(paste0(
"Mr. Brown comes! He says hello. i give him coffee. i will ",
"go at 5 p. m. eastern time. Or somewhere in between!go there"
), "One more thought for the road! I am going now. Good day."),
y = state.name[c(32, 38)],
z = c(.456, .124),
stringsAsFactors = FALSE
)
get_sentences(dat$x)
get_sentences(dat)
}
|
4cb8efe213d1bd343fcccd7517f21a3f3488cabc
|
c17b385466d7a618f748d8768c63c09fc55a4d58
|
/man/cleansubject.Rd
|
5aebe9cc621115a0977f9123393a0ce8082bff42
|
[] |
no_license
|
johnsonnei/Faculty
|
3155c11950ad846317719f69dba6f6511776c836
|
061b773a21c32312df247e0d2922d2ef8c9bd2e7
|
refs/heads/master
| 2021-01-10T09:57:50.748116
| 2016-02-08T13:04:07
| 2016-02-08T13:04:07
| 51,293,018
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 566
|
rd
|
cleansubject.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/cleansubject.R
\name{cleansubject}
\alias{cleansubject}
\title{Cleaning the Data 3.0 - By Department}
\usage{
cleansubject(df)
}
\arguments{
\item{data}{frame The name of the data frame, Faculty (from cleanname)}
}
\value{
A new dataframe (unassigned) that now has department of the Faculty member, cleaning out unnecessary symbols and terminology that had previously existed in the data.
}
\description{
Cleans the data for the Department the faculty is a member of.
}
|
65b29490564ac0b81ee703f7e6e2855cc7e15733
|
1552c44b53a9a071532792cc611ce76d43795453
|
/Inteligencia_Artificial/One_row.r
|
50df70489902934dc326a81ff9f680d1737a4097
|
[] |
no_license
|
robl-25/Faculdade
|
8ce16cee93f5948d33d45714de66578d189163f4
|
0801f5748d8d7d79314699b2e35258e402a55bd1
|
refs/heads/master
| 2021-01-10T05:16:45.842442
| 2017-11-09T02:26:26
| 2017-11-09T02:26:26
| 45,509,015
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,633
|
r
|
One_row.r
|
rm(list = ls())
# Arquivo csv
tabela = read.csv("~/Downloads/play_tennis.csv", header=T)
# Faz contagens
contagens = apply(tabela[,1:(ncol(tabela)-1)], 2, function(valores)
table(valores, tabela,[, ncol(tabela)]))
idx_coluna = 1
erros_valores = apply(contagens[[idx_coluna]], 1, function(cont){
maior = which.max(cont)
menor = which.min(cont)
data.frame(valor=names(cont[maior]), erro=cont[menor])
})
"""
dimensao = dim(tabela)
coluna = length(tabela)-1
sum_parametros = 0
for(i in 1:(length(tabela)-1)){
dimensao = dim(table(tabela[i]))
if(sum_parametros < dimensao){
sum_parametros = dimensao
}
}
linha = sum_parametros
classes = names(table(tabela[5]))
res = matrix(runif(linha*coluna, Inf, Inf), ncol = coluna, byrow=TRUE)
calculo = function(tabela, classes){
for(i in 1:(length(tabela)-1)){
sum_parametros = table(tabela[i])
name_parametros = names(sum_parametros)
tam_p = length(name_parametros)
tam_c = length(classes)
res_parcial = matrix(runif(tam_p*tam_c, 0, 0), ncol=tam_p, byrow=TRUE)
for(j in 1:tam_p){
n = sum_parametros[name_parametros[j]]
for(z in 1:tam_c){
at = subset(tabela, Temperature=="Sunny" & Play.Tennis=="No",
select=c(Temperature, Play.Tennis))
num = dim(at)
res_parcial[1,1] = num[1]/n
}
}
for(k in 1:length(res_parcial[1,])){
in_max = which.max(res_parcial[k,])
res[k,i] = classes[in_max]
}
}
res
}
res = calculo(tabela, classes)
"""
|
ce6c69475c6c1737e6c3d075ad7c2fc056d2db67
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/RMKL/man/benchmark.data.Rd
|
17d8e65b44f97e4cafe2c3783c40433b73a0eef8
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 526
|
rd
|
benchmark.data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/benchmark.data.R
\docType{data}
\name{benchmark.data}
\alias{benchmark.data}
\title{Benchmark.data.}
\format{A list of dataframes with 3 columns and 200 samples, where the x and y
are generated from 2 multivariate distributions. The mean of the two groups vary,
to allow for different amount overlap for the groups.}
\usage{
benchmark.data
}
\description{
Datasets two groups, labels -1 and 1, with varying amounts of overlap.
}
\keyword{datasets}
|
c2c427cc2cbd18f5693892fc0da56132a5105f12
|
e10191a3c8906bf4512ab16731e632185f84513b
|
/cachematrix.R
|
4357699732aa0c93359c171a4511d7c10d045e3e
|
[] |
no_license
|
suhitag93/ProgrammingAssignment2
|
9f34817935a5be3501d83cf8f50903e31304ac49
|
fe82beb69f4050cf0c6899c31f3599ca033f1639
|
refs/heads/master
| 2021-01-21T23:33:17.663501
| 2015-06-20T18:04:23
| 2015-06-20T18:04:23
| 37,609,705
| 0
| 0
| null | 2015-06-17T17:23:53
| 2015-06-17T17:23:53
| null |
UTF-8
|
R
| false
| false
| 1,083
|
r
|
cachematrix.R
|
## MakeCache Matrix takes the input of the dimension for the square matrix and generates a matrix of random values
## it then generates the inverse of that matrix and returns a list of the set and get functions for the two.
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv<- NULL
set_mat<- function(n){
n<- readline(prompt="Enter the dimension of the matrix: ")
n<- as.integer(n)
set.seed(50)
x<- matrix(rnorm(100),n,n)
}
get_mat<- function() mat
set_inv<- function(){
inv<-solve(x)
}
get_inv <- function() inv
list(set=set_mat, get=get_mat, set_inv= set_inv, get_inv= get_inv)
}
## cacheSolve calls the inverse of the matrix from the cache memory if present
##else calls the set_inv function to generate a matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m<-x$get_mat()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix<-x$get()
m<-solve(matrix, ...)
x$set_mat(m)
return(m)
}
|
72b36d2dd8064c3656b1d99926c5fe02117fa93c
|
e206a00f45f9cb6e00438b61195bb5754c71ab3f
|
/R/model.R
|
f765754e724ddb9fa0610f953fd4fbf409aa41b7
|
[
"MIT"
] |
permissive
|
knifecake/forrelgui
|
e3602fa5ef8eac7332fd24b6572953dfa24ce51f
|
7f15ab0dc231500ef88e98e7e53d0e16e4f0444c
|
refs/heads/master
| 2023-03-15T08:22:23.418347
| 2021-03-18T12:04:02
| 2021-03-18T12:04:02
| 298,838,696
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,934
|
r
|
model.R
|
empty_model <- function() {
list(
# a pedtools::ped object, or a list of such
claim_ped = NULL,
# a pedtools::ped object, or a list of such
true_ped = NULL,
# a character vector containing the IDs of the individuals which are marked
# as available for genotyping. this is indented to be passed as the ids
# parameter to forrel::exclusionPower
available = c(),
# an allele frequency database, in the format returned by
# pedtools::getFreqDatabase(x, format = 'list')
database = NULL,
# a list containing global settings
settings = NULL,
# a data frame with three columns and one row per marker
marker_settings = NULL,
# an allele matrix, in the format returned by pedtools::getAlleles
genotypes = NULL,
# the result of a computation
result = NULL
)
}
set_true_ped <- function(x) {
if (isTruthy(x)) {
model$true_ped <- x
clean_available()
}
NULL
}
set_claim_ped <- function(x) {
if (!isTruthy(x)) return(NULL)
model$claim_ped <- x
clean_available()
# apply locus attributes
apply_locus_attributes()
# apply genotypes
apply_genotypes()
}
apply_locus_attributes <- function() {
if (!isTruthy(model$claim_ped) || !isTruthy(model$database)) return(NULL)
la <- lapply(names(model$database), function(marker_name) {
list(
alleles = names(model$database[[marker_name]]),
afreq = as.numeric(model$database[[marker_name]]),
name = marker_name
# TODO: add mutation model and rate
)
})
names(la) <- NULL
model$claim_ped <- pedtools::setMarkers(model$claim_ped, locusAttributes = la)
clean_available()
NULL
}
apply_genotypes <- function() {
if (!isTruthy(model$claim_ped)) return(NULL)
if (!isTruthy(model$genotypes)) {
# remove genotypes
model$claim_ped <- pedtools::setAlleles(model$claim_ped, alleles = 0)
} else {
model$claim_ped <- pedtools::setAlleles(model$claim_ped, alleles = model$genotypes)
}
}
get_database <- function(mode = 'ladder') {
if (!isTruthy(model$database)) return(NULL)
if (mode == 'ladder')
list_to_ladder(model$database)
else
model$database
}
set_database <- function(db) {
if (!isTruthy(db)) return(NULL)
model$database <- db
apply_locus_attributes()
init_marker_settings()
NULL
}
get_genotyped_labels <- function() {
get_genotyped_ids(model$claim_ped)
}
get_genotypes <- function() {
if (!isTruthy(model$genotypes)) return(NULL);
data.frame(model$genotypes,
row.names = rownames(model$genotypes),
stringsAsFactors = FALSE,
check.names = FALSE)
}
set_genotypes <- function(genotypes) {
if (!isTruthy(genotypes)) return(NULL)
model$genotypes <- genotypes
apply_genotypes()
# remove genotyped IDs from available list
model$available <- setdiff(model$available, get_genotyped_labels())
}
remove_genotypes <- function() {
model$genotypes <- NULL
apply_genotypes()
}
get_available <- function() {
model$available
}
set_available <- function(available) {
if (!isTruthy(available)) return(NULL);
model$available <- available
NULL
}
init_marker_settings <- function() {
markers <- names(get_database(mode = 'list'))
model$marker_settings <- data.frame(t(sapply(markers, function(m) list(
'Use in calculation?' = TRUE,
'Mutations' = 'Auto'
))), check.names = FALSE)
NULL
}
get_marker_settings <- function() {
if (!isTruthy(model$marker_settings)) init_marker_settings()
model$marker_settings
}
set_marker_settings <- function(settings, marker = NULL, name = NULL) {
# apply just one setting to one marker
if (!is.null(marker) && !is.null(name)) {
model$marker_settings[marker, name] <- settings
return(NULL);
}
if (!isTruthy(settings)) return(NULL);
model$marker_settings <- settings
NULL
}
set_all_mutations <- function(value) {
if (!isTruthy(value)) return();
model$marker_settings$Mutations <- rep(value)
}
clean_available <- function() {
model$available <- intersect(get_available(), get_candidate_available_ids())
}
get_settings <- function() {
if (!isTruthy(model$settings)) {
model$settings = list(exactMaxL = Inf, nsim = 1000, seed = NULL)
}
model$settings
}
set_settings <- function(exactMaxL = NULL, nsim = NULL, seed = NULL) {
# make sure settings are initialized
get_settings()
if (isTruthy(exactMaxL)) {
model$settings$exactMaxL <- as.numeric(exactMaxL)
}
if (isTruthy(nsim)) {
model$settings$nsim <- as.numeric(nsim)
}
if (isTruthy(seed)) {
model$settings$seed <- as.numeric(seed)
}
}
#' Determines whether the exclusion power can be calculated
#'
#' @return TRUE if all the ingredients for calculating EP are present, otherwise
#' returns an error message
#'
can_calculate_ep <- function() {
if (!isTruthy(model$claim_ped))
return('Missing claim pedigree')
if (!isTruthy(model$true_ped))
return('Missing true pedigree')
if (!isTruthy(model$available) || length(model$available) == 0)
return('No individuals were marked as available for genotyping')
if (!isTruthy(model$database))
return('No allele frequency database provided')
return(TRUE)
}
#' Calculate IDs that could be available for genotyping
#'
#' An ID can be defined as available for genotyping if: 1. It is present in both
#' the claim pedigree and the true pedigree, and 2. it has not been already
#' genotyped.
#'
#' @return a character vector of IDs that the user may choose to define as
#' available for genotyping
#'
get_candidate_available_ids <- function() {
setdiff(
intersect(custom_ped_labels(model$claim_ped), custom_ped_labels(model$true_ped)),
get_genotyped_labels()
)
}
get_selected_markers <- function() {
ms = get_marker_settings()
rownames(ms[ms[, 1] == TRUE, ])
}
|
eb6c5e7fd6940baa1d737b6380b7e2a326289d4b
|
87b55c6c2fea04aafd73ba793a251c836ec8eba1
|
/lib/summary.df.R
|
0397e8e7149b646c25d2b02dd04f1e895312b039
|
[] |
no_license
|
irishlouis/DIA_TEETH_POC
|
38660dea4f2eeed74bd168ff61a238e4a34bb9a4
|
5837e5b21a7c22ce8b42a1031e879bd0475ae04b
|
refs/heads/master
| 2021-01-21T13:57:32.336977
| 2016-06-01T13:51:29
| 2016-06-01T13:51:29
| 53,400,986
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 816
|
r
|
summary.df.R
|
#' summary.df
#'
#' @param df data.frame to summarise
#' @param freq frequency device recording at
#' @param k window size of smoothing
#'
#' @export summary data.frame
#'
summary.df <- function(df, freq, k=10){
test <- select(df, time_minute, vector.mag) %>%
melt(id.vars = "time_minute")
times <- unique(df$time_minute)
test.summary <- test %>%
group_by(time_minute, variable) %>%
summarise(min = min(value),
Qu1 = quantile(value, .25),
Median = median(value),
Mean = mean(value),
Qu2 = quantile(value, .75),
Max = max(value))
tmp <- do.call(rbind, lapply(seq_along(times), function(t) peak.func(times[t], df, freq, k)))
test.summary <- cbind(test.summary, tmp %>% select(-Timestamp))
return(test.summary)
}
|
5fc6c63c18d38b26fa3ad1607be58cfbff531bde
|
04d12d9c20048ca86da8d05fc72a65b8f13da571
|
/predict_logistic_regression.R
|
ab9855cd6aa3c14c292e0f8edc49140185036345
|
[] |
no_license
|
yashparekh/risk-prediction
|
6712804fd8729a2b3c464f8cecbb264327cb16b2
|
2593d84fcc2a9ccaa2207686a5cea6b23e5711b1
|
refs/heads/master
| 2021-04-15T09:27:33.775949
| 2018-03-25T20:13:27
| 2018-03-25T20:13:27
| 126,735,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,511
|
r
|
predict_logistic_regression.R
|
#part1
cars<-read.csv('car_data.csv')
set.seed(71923)
train_insts<-sample(nrow(cars),0.7*nrow(cars))
cars_train<-cars[train_insts,]
cars_test<-cars[-train_insts,]
#part2
attach(cars_train)
boxplot(VehOdo~IsBadBuy,ylab='VehOdo',xlab='IsBadBuy')
boxplot(VehicleAge~IsBadBuy,ylab='VehicleAge',xlab='IsBadBuy')
make_table<-table(IsBadBuy,Make)
make_table
make_table/rowSums(make_table)
#part3---linear regression model
car_lin<-lm(IsBadBuy~Auction+Size+Color+Make+MMRAcquisitionAuctionAveragePrice+MMRAcquisitionRetailAveragePrice+VehicleAge+VehOdo+WheelType)
summary(car_lin)
lin_preds_train <- predict(car_lin,newdata=cars_train)
lin_preds_test <- predict(car_lin,newdata=cars_test)
measure_perf <- function(predicted,actual){
AE = mean(predicted-actual)
RMSE = sqrt(mean((predicted-actual)^2))
return(c(AE,RMSE))
}
measure_perf(lin_preds_train, cars_train$IsBadBuy)
measure_perf(lin_preds_test, cars_test$IsBadBuy)
confusion_matrix <- function(predicted, actual, cutoff){
classifications <- ifelse(predicted>cutoff,1,0)
confusion_matrix <- table(actual,classifications)
}
model_performance <- function(confusion_matrix){
TP <- confusion_matrix[2,2]
TN <- confusion_matrix[1,1]
FP <- confusion_matrix[1,2]
FN <- confusion_matrix[2,1]
acc <- (TP+TN)/(TP+TN+FP+FN) #accuracy
tpr <- TP/(TP+FN) #true positive rate/sensitivity
tnr <- TN/(TN+FP) #true negative rate/specificity
return(c(acc, tpr, tnr))
}
lin_matrix <- confusion_matrix(lin_preds_test, cars_test$IsBadBuy,.5)
lin_matrix
lin_metrics <- model_performance(lin_matrix)
lin_metrics
#part4---logistic regression model
car_log<-glm(IsBadBuy~Auction+Size+Color+Make+MMRAcquisitionAuctionAveragePrice+MMRAcquisitionRetailAveragePrice+VehicleAge+VehOdo+WheelType,data=cars_train,family='binomial')
summary(car_log)
log_preds_train <- predict(car_log,newdata=cars_train,type='response')
log_preds_test <- predict(car_log,newdata=cars_test,type='response')
measure_perf(log_preds_train, cars_train$IsBadBuy)
measure_perf(log_preds_test, cars_test$IsBadBuy)
log_matrix <- confusion_matrix(log_preds_test, cars_test$IsBadBuy,.5)
log_matrix
log_metrics <- model_performance(log_matrix)
log_metrics
pred_car <- data.frame(Auction='MANHEIM', VehicleAge=1, Make='NISSAN', Color='RED',WheelType='NULL', VehOdo=10,000, Size='COMPACT', MMRAcquisitionAuctionAveragePrice=8000, MMRAcquisitionRetailAveragePrice=10000)
p<- predict(car_log,newdata = pred_car, type='response')
|
34fc4e15e67ef454682405231d779a0da10820fe
|
ba2845eadc8880147e906ab727d322d875226efa
|
/Analyses/soilmoisture/savestan.R
|
8824c04aefe5aaa8a69f53ed9540050483469f77
|
[] |
no_license
|
AileneKane/radcliffe
|
80e52e7260195a237646e499bf4e3dad4af55330
|
182cd194814e46785d38230027610ea9a499b7e8
|
refs/heads/master
| 2023-04-27T19:55:13.285880
| 2023-04-19T15:15:02
| 2023-04-19T15:15:02
| 49,010,639
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 553
|
r
|
savestan.R
|
# Utility function which saves all stanfit or shinystan objects in the working memory to a .RData file with today's date. Optionally add a suffix to describe this set of models
savestan <- function(suffix=NULL) {
tosave <- which(
sapply(ls(envir=.GlobalEnv), function(x) class(get(x)))
=="stanfit" |
sapply(ls(envir=.GlobalEnv), function(x) class(get(x)))
=="shinystan"
)
suffix = paste("", suffix, collapse = "")
save(file=paste("Stan Output ", Sys.Date(), suffix, ".RData", sep=""), list = ls(envir=.GlobalEnv)[tosave])
}
|
5b93af50dc5c360167267d2eaa89b5f5f58eef88
|
0ecc38c2cc3d5061f4668ca151475703ac8b5ee6
|
/r/02.loadConvertCleanMergeData.R
|
008e3ceb96e6f5b23f4d3803854f0f6d3af80b64
|
[] |
no_license
|
jsorbo/r-exposome
|
b9fca734550bfe6636a15542271c16fc9802b2b1
|
9d665cd12b49eaaec10045e6fe3cb08275cc4882
|
refs/heads/master
| 2021-01-22T20:26:49.117486
| 2020-10-16T02:33:46
| 2020-10-16T02:33:46
| 85,323,080
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,740
|
r
|
02.loadConvertCleanMergeData.R
|
################################################################################
# 2. load data, rename columns, clean data, merge data frames
# Import from csv
independent <- read.csv(file="..\\exposome-data\\independent.csv", header=TRUE)
dependentQuintiles <- read.csv(file="..\\exposome-data\\dependentQuintiles.csv", header=TRUE)
independent <- rename.column(independent, "AGE030200D", "populationApril2000")
independent <- rename.column(independent, "AGE040200D", "populationJuly2000")
independent <- rename.column(independent, "AGE050200D", "populationMedianAgeApril2000")
independent <- rename.column(independent, "BNK010200D", "bankOfficesJune2000")
independent <- rename.column(independent, "BNK050200D", "bankDepositsJune2000")
independent <- rename.column(independent, "CLF030200D", "unemployment")
independent <- rename.column(independent, "CLF040200D", "unemploymentRate")
independent <- rename.column(independent, "CRM110200D", "violentCrimes")
independent <- rename.column(independent, "EDU635200D", "educationHighSchoolOrAboveRate")
independent <- rename.column(independent, "HEA010200D", "insuranceOrMedicare")
independent <- rename.column(independent, "HEA070200D", "medicare")
independent <- rename.column(independent, "HSD150200D", "householdsMaleNoWife")
independent <- rename.column(independent, "HSD170200D", "householdsFemaleNoHusband")
independent <- rename.column(independent, "HSG455200D", "ownerOccupiedHomesHouseholderBlackOrAfricanAmerican")
independent <- rename.column(independent, "HSG460200D", "ownerOccupiedHomesHouseholderHispanicOrLatino")
independent <- rename.column(independent, "HSG495200D", "sampleMedianHousingUnitValue")
independent <- rename.column(independent, "HSG680200D", "renterOccupiedHousingUnits")
independent <- rename.column(independent, "HSG695200D", "renterOccupiedHomesHouseholderBlackOrAfricanAmerican")
independent <- rename.column(independent, "HSG700200D", "renterOccupiedHomesHouseholderHispanicOrLatino")
independent <- rename.column(independent, "INC110199D", "medianHouseholdIncome1999")
independent <- rename.column(independent, "INC415199D", "meanHouseholdEarnings")
independent <- rename.column(independent, "INC420200D", "householdsWithSocialSecurityIncome")
independent <- rename.column(independent, "INC910199D", "perCapitaIncome")
independent <- rename.column(independent, "IPE010200D", "medianHouseholdIncome2000")
independent <- rename.column(independent, "IPE120200D", "peopleInPovertyRate")
independent <- rename.column(independent, "LND110200D", "landArea")
independent <- rename.column(independent, "PIN020200D", "perCapitaPersonalIncome")
independent <- rename.column(independent, "POP060200D", "populationPerSquareMile")
independent <- rename.column(independent, "POP110200D", "urbanPopulationSample")
independent <- rename.column(independent, "POP150200D", "malePopulationCompleteCount")
independent <- rename.column(independent, "POP160200D", "femalePopulationCompleteCount")
independent <- rename.column(independent, "POP220200D", "populationOfOneRaceWhiteAloneCompleteCount")
independent <- rename.column(independent, "POP250200D", "populationOfOneRaceBlackOrAfricanAmericanAloneCompleteCount")
independent <- rename.column(independent, "PVY020199D", "populationBelowPovertyLevel")
independent <- rename.column(independent, "SPR010200D", "socialSecurityBenefitRecipients")
independent <- rename.column(independent, "SPR410200D", "supplementalSecurityIncomeRecipients")
# Source: ArcGIS REST Services Directory
# https://gis.ers.usda.gov/arcgis/rest/services/fa_restaurants/MapServer/layers
independent <- rename.column(independent, "FFRPTH07", "fastFoodRestaurantsPer1000")
independent <- rename.column(independent, "FSRPTH07", "fullServiceRestaurantsPer1000")
# Convert to numeric
independent["medianHouseholdIncome2000"] <- lapply(independent["medianHouseholdIncome2000"], as.numeric)
independent["peopleInPovertyRate"] <- lapply(independent["peopleInPovertyRate"], as.numeric)
independent["insuranceOrMedicare"] <- lapply(independent["insuranceOrMedicare"], as.numeric)
independent["violentCrimes"] <- lapply(independent["violentCrimes"], as.numeric)
independent["perCapitaPersonalIncome"] <- lapply(independent["perCapitaPersonalIncome"], as.numeric)
independent["educationHighSchoolOrAboveRate"] <- lapply(independent["educationHighSchoolOrAboveRate"], as.numeric)
independent["populationPerSquareMile"] <- lapply(independent["populationPerSquareMile"], as.numeric)
independent["urbanPopulationSample"] <- lapply(independent["urbanPopulationSample"], as.numeric)
independent["malePopulationCompleteCount"] <- lapply(independent["malePopulationCompleteCount"], as.numeric)
independent["femalePopulationCompleteCount"] <- lapply(independent["femalePopulationCompleteCount"], as.numeric)
independent["populationOfOneRaceWhiteAloneCompleteCount"] <- lapply(independent["populationOfOneRaceWhiteAloneCompleteCount"], as.numeric)
independent["populationOfOneRaceBlackOrAfricanAmericanAloneCompleteCount"] <- lapply(independent["populationOfOneRaceBlackOrAfricanAmericanAloneCompleteCount"], as.numeric)
independent["renterOccupiedHousingUnits"] <- lapply(independent["renterOccupiedHousingUnits"], as.numeric)
independent["renterOccupiedHomesHouseholderBlackOrAfricanAmerican"] <- lapply(independent["renterOccupiedHomesHouseholderBlackOrAfricanAmerican"], as.numeric)
independent["renterOccupiedHomesHouseholderHispanicOrLatino"] <- lapply(independent["renterOccupiedHomesHouseholderHispanicOrLatino"], as.numeric)
independent["householdsMaleNoWife"] <- lapply(independent["householdsMaleNoWife"], as.numeric)
independent["householdsFemaleNoHusband"] <- lapply(independent["householdsFemaleNoHusband"], as.numeric)
independent["socialSecurityBenefitRecipients"] <- lapply(independent["socialSecurityBenefitRecipients"], as.numeric)
independent["supplementalSecurityIncomeRecipients"] <- lapply(independent["supplementalSecurityIncomeRecipients"], as.numeric)
independent["landArea"] <- lapply(independent["landArea"], as.numeric)
independent["unemploymentRate"] <- lapply(independent["unemploymentRate"], as.numeric)
independent["bankOfficesJune2000"] <- lapply(independent["bankOfficesJune2000"], as.numeric)
independent["AvgFineParticulateMatterµgm"] <- lapply(independent["AvgFineParticulateMatterµgm"], as.numeric)
independent["AvgDailyPrecipitationmm"] <- lapply(independent["AvgDailyPrecipitationmm"], as.numeric)
independent["AvgDayLandSurfaceTemperatureF"] <- lapply(independent["AvgDayLandSurfaceTemperatureF"], as.numeric)
independent["AvgDailyMaxAirTemperatureF"] <- lapply(independent["AvgDailyMaxAirTemperatureF"], as.numeric)
independent["AvgDailyMinAirTemperatureF"] <- lapply(independent["AvgDailyMinAirTemperatureF"], as.numeric)
independent["AvgDailyMaxHeatIndexF"] <- lapply(independent["AvgDailyMaxHeatIndexF"], as.numeric)
independent["populationApril2000"] <- lapply(independent["populationApril2000"], as.numeric)
independent["populationJuly2000"] <- lapply(independent["populationJuly2000"], as.numeric)
independent["b_1996"] <- lapply(independent["b_1996"], as.numeric)
independent["b_1997"] <- lapply(independent["b_1997"], as.numeric)
independent["b_1998"] <- lapply(independent["b_1998"], as.numeric)
independent["b_1999"] <- lapply(independent["b_1999"], as.numeric)
independent["b_2000"] <- lapply(independent["b_2000"], as.numeric)
independent["averagesmoke1996to2000"] <- lapply(independent["averagesmoke1996to2000"], as.numeric)
independent["populationMedianAgeApril2000"] <- lapply(independent["populationMedianAgeApril2000"], as.numeric)
independent["number2004diabetes"] <- lapply(independent["number2004diabetes"], as.numeric)
independent["percent2004diabetes"] <- lapply(independent["percent2004diabetes"], as.numeric)
independent["ageadjustedpercent2004diabetes"] <- lapply(independent["ageadjustedpercent2004diabetes"], as.numeric)
independent["ownerOccupiedHomesHouseholderBlackOrAfricanAmerican"] <- lapply(independent["ownerOccupiedHomesHouseholderBlackOrAfricanAmerican"], as.numeric)
independent["ownerOccupiedHomesHouseholderHispanicOrLatino"] <- lapply(independent["ownerOccupiedHomesHouseholderHispanicOrLatino"], as.numeric)
independent["sampleMedianHousingUnitValue"] <- lapply(independent["sampleMedianHousingUnitValue"], as.numeric)
independent["perCapitaIncome"] <- lapply(independent["perCapitaIncome"], as.numeric)
independent["numberobesityprevalence2004"] <- lapply(independent["numberobesityprevalence2004"], as.numeric)
independent["percentobesity2004"] <- lapply(independent["percentobesity2004"], as.numeric)
independent["ageadjustedpercentobesity2004"] <- lapply(independent["ageadjustedpercentobesity2004"], as.numeric)
independent["numberleisuretimephysicalinactivityprevalence2004"] <- lapply(independent["numberleisuretimephysicalinactivityprevalence2004"], as.numeric)
independent["percentleisuretimephysicalinactivityprevalence2004"] <- lapply(independent["percentleisuretimephysicalinactivityprevalence2004"], as.numeric)
independent["ageadjustedpercentleisuretimephysicalinactivityprevalence2004"] <- lapply(independent["ageadjustedpercentleisuretimephysicalinactivityprevalence2004"], as.numeric)
independent["HHNV1MI"] <- lapply(independent["HHNV1MI"], as.numeric)
independent["fastFoodRestaurantsPer1000"] <- lapply(independent["fastFoodRestaurantsPer1000"], as.numeric)
independent["fullServiceRestaurantsPer1000"] <- lapply(independent["fullServiceRestaurantsPer1000"], as.numeric)
independent["PH_FRUVEG"] <- lapply(independent["PH_FRUVEG"], as.numeric)
independent["PH_SNACKS"] <- lapply(independent["PH_SNACKS"], as.numeric)
independent["PH_SODA"] <- lapply(independent["PH_SODA"], as.numeric)
independent["PH_MEAT"] <- lapply(independent["PH_MEAT"], as.numeric)
independent["PH_FATS"] <- lapply(independent["PH_FATS"], as.numeric)
independent["PH_PREPFOOD"] <- lapply(independent["PH_PREPFOOD"], as.numeric)
independent["medicare"] <- lapply(independent["medicare"], as.numeric)
# Remove na
independent = independent[complete.cases(independent),]
# convert to numeric
dependentQuintiles["ageadjustedrate"] <- lapply(dependentQuintiles["ageadjustedrate"], as.numeric)
# Remove ' County' substring from County field
dependentQuintiles["County"] <- lapply(dependentQuintiles["County"], gsub, pattern = " County", replacement = "", fixed = TRUE)
# Join independent with dependent quintiles
merged <- merge(x=independent, y=dependentQuintiles, by.x="Areaname", by.y=gsub(" County", "", "County"))
# Remove na, null
merged = merged[complete.cases(merged),]
merged <- merged[merged$Quintiles!="#NULL!",]
# group quintiles into two groups
merged$cvd <- sapply(merged$Quintiles, function(x) ifelse(x == "5", "1", "0"))
merged$cvd <- sapply(merged$cvd, as.factor)
|
49e78891ab0af6d2c9bc316b9978c6780bd51697
|
ad07245f317e5bbd83668a97d9212dc9bd2d2df7
|
/man/zscore.Rd
|
db7b1660a21d3dddd1093d0b3abbac976f441fde
|
[] |
no_license
|
lin-jennifer/polstat
|
895022dd114eafe80c11852bfc78edaa5a7093f5
|
33e73d8486b7c06e98af1b7d8c8c97a439d250d8
|
refs/heads/main
| 2023-01-21T15:00:26.850008
| 2020-12-01T15:59:48
| 2020-12-01T15:59:48
| 313,154,514
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 477
|
rd
|
zscore.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zscore.R
\name{zscore}
\alias{zscore}
\title{z-score Calculations}
\usage{
zscore(x, mean, sd)
}
\arguments{
\item{x}{the observation}
\item{mean}{mean of interest -- can be sample or population depending on zscore interest}
\item{sd}{standard deviation or standard error, depending on context}
}
\description{
Calculating a standard score in Base R can be hard.
}
\examples{
zscore(10, 15, 2)
}
|
9dafe0e88e12b67baf5aa57238050a04ce11eb6d
|
74cbb0028395fc41e172dffe63970acf19f53361
|
/R/colors.R
|
666731852ba69ecc016bdd28210c0c5df7bafddf
|
[
"MIT"
] |
permissive
|
fionahevans/Ragronomy
|
004cdc59f7fa9e5b89be7dbc25f797dad8c4eb09
|
1b5420b15796f59499f8c33fc3457ee1f0ebe0ac
|
refs/heads/master
| 2021-09-28T09:21:55.266071
| 2018-09-07T05:20:14
| 2018-09-07T05:20:14
| 112,438,858
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,475
|
r
|
colors.R
|
#' Makes colors for plotting
#'
#' Makes colors for plotting, stretched to cover the range of x
#'
#' @param x Input data.
#' @param col Color map (default is tim.colors).
#' @param range Range of x values to limit color to (not required).
#'
#' @author Fiona Evans
#'
#' @return Returns a vector of colors.
#' @export
color.of <- function (x, col = rainbow(100), range = NULL)
{
if (is.null(range))
range <- range(x, na.rm = T)
this.col <- c("#FFFFFF", col)
n <- length(this.col)
indx <- round(my.rescale(x, 2, n, mina = range[1], maxa = range[2]))
indx <- replace.gt(indx, n, n)
indx <- replace.lt(indx, 2, 2)
indx <- na.replace(indx, 1)
this.col[indx]
}
#' Rescale a vector.
#'
#' Linearly rescale a vector to range between minb and maxb.
#'
#' @param a Input vector.
#' @param minb .
#' @param maxb .
#' @param mina .
#' @param maxa .
#'
#' @keywords manip
#' @export
#' @examples
#' a <- c(2:8)
#' my.rescale(a, 1, 10)
#' my.rescale(a, 1, 10, minb=1, maxb=10)
my.rescale <- function(a, minb, maxb, mina=NULL, maxa=NULL) {
if (is.null(mina)) mina <- min(a, na.rm=T)
if (is.null(maxa)) maxa <- max(a, na.rm=T)
minb + (maxb - minb) * (a - mina)/ (maxa - mina)
}
# Replace values in vector x that are greater than val1 with val2
replace.gt <- function (x, val1, val2) {
x[x > val1] <- val2
x
}
# Replace values in vector x that are less than val1 with val2
replace.lt <- function (x, val1, val2) {
x[x < val1] <- val2
x
}
# Replace values in vector x that NA with val
na.replace <- function(x, val) {
x[is.na(x)] <- val
x
}
#' Older version from package 'fields' (less of the middle colour present).
#'
#' Color interpolation between three colors to output a color vector.
#'
#' @param n Length of output vector.
#' @param start Color.
#' @param end Color.
#' @param middle Color.
#'
#' @keywords color
#' @export
two.colors.old <- function (n = 256, start = "darkgreen", end = "red", middle = "white")
{
n1 <- n/2
n2 <- n - n1
col2 <- col2rgb(end)
col1 <- col2rgb(start)
mid.col <- col2rgb(middle)
e1 <- seq(1, 0, , n1)
e2 <- seq(0, 1, , n2)
temp <- rbind(e1 * matrix(col1, nrow = n1, ncol = 3, byrow = TRUE) +
(1 - e1) * matrix(mid.col, nrow = n1, ncol = 3, byrow = TRUE),
e2 * matrix(col2, nrow = n1, ncol = 3, byrow = TRUE) +
(1 - e2) * matrix(mid.col, nrow = n1, ncol = 3, byrow = TRUE))
temp <- temp/256
rgb(temp[, 1], temp[, 2], temp[, 3])
}
|
044ed973dedb882f740bef19b6ff7646da8e3538
|
f10238b9bee304a4d035b4063923ba3b04a31b5d
|
/logspec/ranges.R
|
03feb6d10a121271680bdb8d5a181c8bee1cd012
|
[] |
no_license
|
ClimateImpactLab/hierarchical-estimation
|
9a6787e4388135ef25c29ac322cc2a686e175a58
|
267141f8dede7b5be11c6d15f58589c8e6a742e1
|
refs/heads/master
| 2021-01-18T20:16:43.242558
| 2018-09-12T09:43:24
| 2018-09-12T09:43:24
| 86,951,455
| 0
| 0
| null | 2018-09-12T09:44:06
| 2017-04-02T00:18:25
|
R
|
UTF-8
|
R
| false
| false
| 12,127
|
r
|
ranges.R
|
## This library supports logspec.R, and logspec.R must be loaded.
## Calculate the log likelihood, computing ADM1 sigmas from residuals
calc.likeli.nosigma <- function(dmxxs, dmyy, zzs, kls, mm, betas, gammas, weights, prior) {
dmyy.exp <- calc.expected.demeaned(dmxxs, zzs, kls, mm, betas, gammas)
sigmas <- c()
for (jj in unique(mm)) {
included <- mm == jj
residuals <- dmyy.exp[included] - dmyy[included]
sigmas <- c(sigmas, sd(residuals))
}
calc.likeli.demeaned(dmxxs, dmyy, zzs, kls, mm, betas, gammas, sigmas, weights, prior)
}
calc.likeli.withsigma <- function(dmxxs, dmyy, zzs, kls, mm, betas, gammas, sigmas, weights, prior) {
calc.likeli.demeaned(dmxxs, dmyy, zzs, kls, mm, betas, gammas, sigmas, weights, prior)
}
make.methast.betagamma.likeli <- function(K, L, dmxxs, dmyy, zzs, kls, mm, weights, prior) {
function(param) {
beta <- param[1:K]
gamma <- param[(K+1):(K+max(kls))]
calc.likeli.nosigma(dmxxs, dmyy, zzs, kls, mm, beta, gamma, weights, prior)
}
}
make.methast.betagamma.sigma.likeli <- function(K, L, dmxxs, dmyy, zzs, kls, mm, sigmas, weights, prior) {
function(param) {
betas <- param[1:K]
gammas <- param[(K+1):(K+max(kls))]
calc.likeli.demeaned(dmxxs, dmyy, zzs, kls, mm, betas, gammas, sigmas, weights, prior)
}
}
make.methast.gamma.sigma.likeli <- function(K, L, dmxxs, dmyy, zzs, kls, mm, sigmas, weights, prior, get.betas) {
function(gammas) {
betas <- get.betas(K, L, gammas, dmyy, dmxxs.orig, zzs, kls, mm, weights)
calc.likeli.demeaned(dmxxs, dmyy, zzs, kls, mm, betas, gammas, sigmas, weights, prior)
}
}
methast.betagamma <- function(K, L, dmxxs, dmyy, zzs, kls, mm, iter, beta0, gamma0, betaerr, gammaerr, weights=1, prior=noninformative.prior) {
likelifunc <- make.methast.betagamma.likeli(K, L, dmxxs, dmyy, zzs, kls, mm, weights, prior)
result <- methast(iter, c(beta0, gamma0), c(betaerr, gammaerr), likelifunc)
list(betas=result$params[, 1:K], gammas=result$params[, (K+1):(K+max(kls))], best.likeli=likelifunc(result$params[result$best.index,]), best.index=result$best.index)
}
methast.betagamma.sigma <- function(K, L, dmxxs, dmyy, zzs, kls, mm, iter, beta0, gamma0, sigmas, betaerr, gammaerr, weights=1, prior=noninformative.prior) {
likelifunc <- make.methast.betagamma.sigma.likeli(K, L, dmxxs, dmyy, zzs, kls, mm, sigmas, weights, prior)
result <- methast(iter, c(beta0, gamma0), c(betaerr, gammaerr), likelifunc)
if (max(kls) > 0)
list(betas=result$params[, 1:K], gammas=result$params[, (K+1):(K+max(kls))], best.likeli=likelifunc(result$params[result$best.index,]), best.index=result$best.index)
else
list(betas=result$params[, 1:K], gammas=matrix(NA, nrow(result$params), 0), best.likeli=likelifunc(result$params[result$best.index,]), best.index=result$best.index)
}
methast.gamma.sigma <- function(K, L, dmxxs, dmyy, zzs, kls, mm, iter, get.betas, gamma0, sigmas, gammaerr, weights=1, prior=noninformative.prior) {
likelifunc <- make.methast.gamma.sigma.likeli(K, L, dmxxs, dmyy, zzs, kls, mm, sigmas, weights, prior, get.betas)
if (max(kls) == 0) { # Nothing to do
betas <- get.betas(K, L, gammas, dmyy, dmxxs, zzs, kls, mm, weights)
return(list(betas=betas, gammas=gamma0, best.likeli=likelifunc(gamma0), best.index=NA))
}
result <- methast(iter, gamma0, gammaerr, likelifunc)
betas0 <- get.betas(K, L, result$params[result$best.index, ], dmyy, dmxxs, zzs, kls, mm, weights)
betas <- t(matrix(betas0, length(betas0), iter))
list(betas=betas, gammas=result$params, best.likeli=likelifunc(result$params[result$best.index,]), best.index=result$best.index)
}
## Use Metropolis-Hastings with N seeds
repeated.methast.betagamma <- function(K, L, dmxxs, dmyy, zzs, kls, adm1, iter, warmup, seeds, beta0, gamma0, betaerr, gammaerr, weights=1, prior=noninformative.prior) {
result <- repeated.methast(seeds, iter, warmup,
c(beta0, gamma0), c(betaerr, gammaerr),
make.methast.betagamma.likeli(K, L, dmxxs, dmyy, zzs, kls, adm1, weights, prior))
list(betas=result$params[, 1:K], gammas=result$params[, (K+1):(K+max(kls))], best.beta=result$best.param[1:K], best.gamma=result$best.param[(K+1):(K+max(kls))])
}
## Single Metropolis-Hastings with automatic tuning
repeated.methast.each <- function(K, L, dmxxs, dmyy, zzs, kls, mm, iter, warmup, seeds, beta0, gamma0, sigmas, weights=1, prior=noninformative.prior, verbose=F) {
betaerr <- c()
for (kk in 1:length(beta0)) {
print(c("Beta", kk))
result <- repeated.methast(seeds, iter, warmup, beta0[kk], 1,
function(beta) {
beta2 = beta0
beta2[kk] <- beta
calc.likeli.demeaned(dmxxs, dmyy, zzs, kls, mm,
beta2, gamma0, sigmas, weights, prior)
}, verbose=verbose)
betaerr <- c(betaerr, sd(result$params))
}
gammaerr <- repeated.methast.each.gammaonly(K, L, dmxxs, dmyy, zzs, kls, mm, iter, warmup, seeds, beta0, gamma0, sigmas, weights, prior, verbose)
list(betaerr=betaerr, gammaerr=gammaerr)
}
repeated.methast.each.gammaonly <- function(K, L, dmxxs, dmyy, zzs, kls, mm, iter, warmup, seeds, beta0, gamma0, sigmas, weights=1, prior=noninformative.prior, verbose=F) {
gammaerr <- c()
for (kl in 1:length(gamma0)) {
print(c("Gamma", kl))
result <- repeated.methast(seeds, iter, warmup, gamma0[kl], 1,
function(gamma) {
gamma2 = gamma0
gamma2[kl] <- gamma
calc.likeli.demeaned(dmxxs, dmyy, zzs, kls, mm,
beta0, gamma2, sigmas, weights, prior)
}, verbose=verbose)
gammaerr <- c(gammaerr, sd(result$params))
}
gammaerr
}
calc.vcv.ols <- function(K, L, dmxxs, dmyy, zzs, kls, adm1, betas, gammas, sigmas, weights, prior=noninformative.prior) {
objective <- function(params) {
betas <- params[1:K]
gammas <- params[(K+1):(K+max(kls))]
-calc.likeli.demeaned(dmxxs, dmyy, zzs, kls, adm1, betas, gammas, sigmas, weights, prior)
}
params <- c(betas, gammas)
hessian <- optimHess(params, objective)
solve(hessian)
}
calc.vcv.ols.betasingle <- function(K, L, dmxxs, dmyy, zzs, kls, adm1, betas, gammas, bk, sigmas, weights, prior=noninformative.prior) {
objective <- function(param) {
betas[bk] <- param
-calc.likeli.demeaned(dmxxs, dmyy, zzs, kls, adm1, betas, gammas, sigmas, weights, prior)
}
hessian <- optimHess(betas[bk], objective)
1 / hessian
}
calc.vcv.ols.gammasingle <- function(K, L, dmxxs, dmyy, zzs, kls, adm1, betas, gammas, gk, sigmas, weights, prior=noninformative.prior) {
objective <- function(param) {
gammas[gk] <- param
-calc.likeli.demeaned(dmxxs, dmyy, zzs, kls, adm1, betas, gammas, sigmas, weights, prior)
}
hessian <- optimHess(gammas[gk], objective)
1 / hessian
}
calc.vcv.ols.gammaonly <- function(K, L, dmxxs, dmyy, zzs, kls, adm1, gammas, sigmas, weights, prior=noninformative.prior, get.betas=stacked.betas) {
objective <- function(gammas) {
betas <- get.betas(K, L, gammas, dmyy, dmxxs, zzs, kls, adm1, weights)
-calc.likeli.demeaned(dmxxs, dmyy, zzs, kls, adm1, betas, gammas, sigmas, weights, prior)
}
hessian <- optimHess(gammas, objective)
solve(hessian)
}
calc.vcv.methast <- function(K, L, dmxxs, dmyy, zzs, kls, adm1, iter, warmup, seeds, betas, gammas, sigmas, weights=1, prior=noninformative.prior) {
result <- repeated.methast.each(K, L, dmxxs, dmyy, zzs, kls, adm1, iter, warmup, seeds, betas, gammas, sigmas, weights=1, prior=prior)
vcv.bayes(K, L, dmxxs, dmyy, zzs, kls, adm1, iter, warmup, seeds, betas, gammas, result$betaerr, result$gammaerr, weights=1)
}
vcv.bayes <- function(K, L, dmxxs, dmyy, zzs, kls, adm1, iters, warmup, seeds, beta0, gamma0, betaerr, gammaerr, weights=1) {
result <- repeated.methast(K, L, dmxxs, dmyy, zzs, kls, adm1, iter, warmup, seeds, beta0, gamma0, betaerr, gammaerr, weights=1)
cov(cbind(result$betas, result$gammas))
}
ses.tails <- function(params) {
## Also, calculate effective SD that would fit the 95% range
obsd <- apply(apply(params, 2, function(xx) quantile(xx, probs=c(.025, .975))), 2, diff)
expd <- qnorm(.975) - qnorm(.025)
obsd / expd
}
serr.conservative <- function(vcv.ols, params) {
sd.ols <- sqrt(abs(diag(vcv.ols))) # Can get small negative values
sd.bayes <- apply(params, 2, sd)
sd.tails <- ses.tails(params)
pmax(sd.ols, sd.bayes, sd.tails)
}
estimate.vcv <- function(betas, gammas, sigmas, yy, xxs, zzs, kls, adm1, factors, iter=600, warmup=100, seeds=4, use.ols=T, weights=1, prior=noninformative.prior) {
list2env(check.arguments(yy, xxs, zzs, kls, adm1, factors), environment())
list2env(demean.yxs(yy, xxs, factors, weights), environment())
if (use.ols) {
vcv.start <- tryCatch({
calc.vcv.ols(K, L, dmxxs, dmyy, zzs, kls, adm1, betas, gammas, sigmas, weights, prior=prior)
}, error=function(e) {
print(e)
NULL
})
if (is.null(vcv.start))
use.ols <- F
else {
## Make sure that all on diag are reasonable
for (bk in 1:length(betas)) {
update <- calc.vcv.ols.betasingle(K, L, dmxxs, dmyy, zzs, kls, adm1, betas, gammas, bk, sigmas, weights, prior=prior)
vcv.start[bk, bk] <- max(vcv.start[bk, bk], update)
}
for (gk in 1:length(gammas)) {
update <- calc.vcv.ols.gammasingle(K, L, dmxxs, dmyy, zzs, kls, adm1, betas, gammas, gk, sigmas, weights, prior=prior)
vcv.start[K+gk, K+gk] <- max(vcv.start[K+gk, K+gk], update)
}
}
}
if (use.ols) {
se.start <- sqrt(abs(diag(vcv.start))) # Can get small negative values
} else {
result.each <- repeated.methast.each(K, L, dmxxs, dmyy, zzs, kls, adm1, iter, warmup, seeds, betas, gammas, sigmas, weights=1, prior=prior)
se.start <- c(result.each$betaerr, result.each$gammaerr)
}
result <- repeated.methast.betagamma(K, L, dmxxs, dmyy, zzs, kls, adm1, iter, warmup, seeds, betas, gammas, se.start[1:K], se.start[(K+1):(K+max(kls))], weights=1, prior=prior)
if (!use.ols)
vcv.start <- cov(cbind(result$betas, result$gammas))
serr <- serr.conservative(vcv.start, cbind(result$betas, result$gammas))
if (sum(serr != se.start) == 0 && use.ols)
list(betas=result$best.beta, gammas=result$best.gamma, vcv=vcv.start, se=se.start)
else
list(betas=result$best.beta, gammas=result$best.gamma, vcv=diag(serr) %*% cor(cbind(result$betas, result$gammas)) %*% diag(serr), se=serr)
}
estimate.se <- function(betas, gammas, sigmas, yy, xxs, zzs, kls, adm1, factors, iter=600, warmup=100, seeds=4, use.ols=T, weights=1, prior=noninformative.prior) {
list2env(check.arguments(yy, xxs, zzs, kls, adm1, factors), environment())
list2env(demean.yxs(yy, xxs, factors, weights), environment())
if (use.ols) {
vcv.start <- tryCatch({
calc.vcv.ols(K, L, dmxxs, dmyy, zzs, kls, adm1, betas, gammas, sigmas, weights=1, prior=prior)
}, error=function(e) {
NULL
})
if (is.null(vcv.start))
use.ols <- F
}
if (use.ols) {
return(sqrt(abs(diag(vcv.start)))) # Can get small negative values
} else {
result.each <- repeated.methast.each(K, L, dmxxs, dmyy, zzs, kls, adm1, iter, warmup, seeds, betas, gammas, sigmas, weights=1, prior=prior)
return(c(result.each$betaerr, result.each$gammaerr))
}
}
|
53a2fbd34302e7e974017c039d49f53eb6ef6180
|
13c60c86f5eaf15b0a5286cf28f4785c433a05d2
|
/inst/manage/server.R
|
90abf8b1108974675b25d75ed6dd490c0c11179a
|
[] |
no_license
|
dondealban/marxanui
|
8cfe95e93d37e42af0f35e06f3527eefb14316c8
|
8eb2a2f8c2f712b10c42c215283aa6a31d52aa8e
|
refs/heads/master
| 2021-06-06T21:46:25.401672
| 2016-07-01T04:20:06
| 2016-07-01T04:20:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,931
|
r
|
server.R
|
# marxan.io
library(shiny)
require(sp)
require(maptools)
require(PBSmapping)
require(foreign)
require(sqldf)
require(vegan)
require(labdsv)
require(xtable)
library(foreach)
library(rhandsontable)
library(iptools)
library(png)
library(rjson)
# which platform are we running on?
detect_platform <- function()
{
sPkgType <- .Platform$pkgType
fWindows <<- (sPkgType == "win.binary")
f64bit <<- T
fLinux <<- (sPkgType == "source")
fMac <<- !(fWindows|fLinux)
if (fWindows) { f64bit <<- (Sys.getenv("R_ARCH") == "/x64") }
if (fLinux) { f64bit <<- (.Machine$sizeof.pointer == 8) }
}
detect_platform()
if (fWindows)
{
library(doParallel)
} else {
library(doMC)
registerDoMC(iCores) # the number of CPU cores
}
shinyServer(function(input, output, session, clientData) {
observe({
sUserIP <<- as.character(input$ipid)
cat(paste0("sUserIP ",sUserIP,"\n"))
})
observe({
sFingerprint <<- as.character(input$fingerprint)
cat(paste0("sFingerprint ",sFingerprint,"\n"))
})
values = list()
setHot = function(x) values[["hot"]] <<- x
#source(paste0(sShinySourcePath,"/server_pre_marxan.R"), local = TRUE)
marxanui_start("manage")
autoInvalidate <- reactiveTimer(2000,session=session)
observe({
autoInvalidate()
list_dirs <- c(list.dirs(sMarxanHome,full.names = TRUE),
list.dirs(sMarZoneHome,full.names = TRUE))
# we detect if there are new folders in the users directory, indicating a new database import
CurrentImportTime <- max(file.info(list_dirs)$ctime)
if (!(CurrentImportTime == ImportTime))
{
# user has imported a new dataset
cat(paste0("new dataset detected","\n"))
ImportTime <<- CurrentImportTime
# update the list of datasets to include the new one(s)
updateSelectInput(session, "database",
choices = c(list.dirs(sMarxanHome),list.dirs(sMarZoneHome)),
selected = sSelectDb)
# trigger a refresh of the UI
irefreshtable <<- irefreshtable + 1
updateNumericInput(session, "refreshtable", value = irefreshtable)
updateNumericInput(session,"areyousure",value=0)
}
})
observe({
# render the user interface
source(paste0(sAppDir,"/render_ui.R"), local = TRUE)
})
session$onSessionEnded(function() {
stopApp()
})
observe({
if (!is.null(input$publicdb))
{
# select this database from the list of databases
sSelectPublicDb <<- input$publicdb
cat(paste0("sSelectPublicDb ",sSelectPublicDb,"\n"))
y <- strsplit(sSelectPublicDb,"/")
sSelectPublicUser <<- y[[1]][1]
sSelectPublicType <<- y[[1]][2]
sSelectPublicDatabase <<- y[[1]][3]
updateNumericInput(session,"copypublicdata",value=0)
}
})
observe({
if (!is.null(input$database))
{
# select this database from the list of databases
sSelectDb <<- input$database
cat(paste0("sSelectDb ",sSelectDb,"\n"))
sPrevious <- sMarxanDir
sMarxanDir <<- paste0(sMarxanHome,"/",sSelectDb)
sZipWD <<- paste0(sMarxanHome)
fMarxan <<- TRUE
fMarZone <<- FALSE
if (!file.exists(sMarxanDir))
{
sMarxanDir <<- paste0(sMarZoneHome,"/",sSelectDb)
sZipWD <<- paste0(sMarZoneHome)
fMarxan <<- FALSE
fMarZone <<- TRUE
}
cat(paste0("sMarxanDir ",sMarxanDir,"\n"))
AppendLogFile(sLogFile,paste0("sSelectDb ",sSelectDb))
AppendLogFile(sLogFile,paste0("sMarxanDir ",sMarxanDir))
if (sPrevious != sMarxanDir)
{
if (sSelectDb != "")
{
#ChangeDatabase("marxan")
# update the relevant UI components
# trigger a refresh of the marxan UI
# trigger a refresh of the cluster
#irefreshcluster <<- irefreshcluster + 1
#updateNumericInput(session, "refreshcluster", value = irefreshcluster)
updateNumericInput(session,"areyousure",value=0)
}
}
}
})
observe({
fWindowsEOLN <<- input$windowseoln
})
output$downloadData <- downloadHandler(
filename = function()
{
paste0(sSelectDb, '.zip')
},
content = function(file) {
withProgress(message="Generating export",value=0,
{
# remove existing zip file
sZipFile <- paste0(sAppHome,"/",sSelectDb,".zip")
if (file.exists(sZipFile))
{
file.remove(sZipFile)
}
# create temp directory
sTempDir <- paste0(sShinyTempPath,"/",sUserName)
dir.create(sTempDir)
# copy files to temp directory
system(paste0("rm -rf ",sTempDir,"/",sSelectDb))
system(paste0("cp -rf ",sMarxanDir," ",sTempDir))
system(paste0("cp -f ",sTempDir,"/",sSelectDb,"/core1/*.csv ",sTempDir,"/",sSelectDb))
system(paste0("cp -f ",sTempDir,"/",sSelectDb,"/core1/*.txt ",sTempDir,"/",sSelectDb))
# remove unnecessary files
system(paste0("rm -rf ",sTempDir,"/",sSelectDb,"/core*"))
system(paste0("rm ",sTempDir,"/",sSelectDb,"/BLM.csv"))
system(paste0("rm ",sTempDir,"/",sSelectDb,"/SPF.csv"))
system(paste0("rm ",sTempDir,"/",sSelectDb,"/Targ.csv"))
system(paste0("rm ",sTempDir,"/",sSelectDb,"/input/specBLM*.dat"))
system(paste0("rm ",sTempDir,"/",sSelectDb,"/input/specSPF*.dat"))
system(paste0("rm ",sTempDir,"/",sSelectDb,"/input/specTarg*.dat"))
for (i in 1:10)
{
system(paste0("rm ",sTempDir,"/",sSelectDb,"/output/output",i,"*.csv"))
system(paste0("rm ",sTempDir,"/",sSelectDb,"/output/output",i,"*.dat"))
system(paste0("rm ",sTempDir,"/",sSelectDb,"/output/outputBLM",i,"*.csv"))
system(paste0("rm ",sTempDir,"/",sSelectDb,"/output/outputBLM",i,"*.dat"))
system(paste0("rm ",sTempDir,"/",sSelectDb,"/output/outputSPF",i,"*.csv"))
system(paste0("rm ",sTempDir,"/",sSelectDb,"/output/outputSPF",i,"*.dat"))
system(paste0("rm ",sTempDir,"/",sSelectDb,"/output/outputTarg",i,"*.csv"))
system(paste0("rm ",sTempDir,"/",sSelectDb,"/output/outputTarg",i,"*.dat"))
system(paste0("rm ",sTempDir,"/",sSelectDb,"/output/output_BLMsummary",i,".csv"))
system(paste0("rm ",sTempDir,"/",sSelectDb,"/output/output_SPFsummary",i,".csv"))
system(paste0("rm ",sTempDir,"/",sSelectDb,"/output/output_Targsummary",i,".csv"))
system(paste0("rm ",sTempDir,"/",sSelectDb,"/output/output_BLMsummary.csv"))
system(paste0("rm ",sTempDir,"/",sSelectDb,"/output/output_SPFsummary.csv"))
system(paste0("rm ",sTempDir,"/",sSelectDb,"/output/output_Targsummary.csv"))
}
# convert windows eoln
if (fWindowsEOLN)
{
system(paste0("unix2dos ",sTempDir,"/",sSelectDb,"/input.dat"))
system(paste0("unix2dos ",sTempDir,"/",sSelectDb,"/*.txt"))
system(paste0("unix2dos ",sTempDir,"/",sSelectDb,"/*.csv"))
system(paste0("unix2dos ",sTempDir,"/",sSelectDb,"/input/*"))
system(paste0("unix2dos ",sTempDir,"/",sSelectDb,"/output/*.csv"))
system(paste0("unix2dos ",sTempDir,"/",sSelectDb,"/output/*.dat"))
}
sWD <- getwd()
setwd(sTempDir)
# create new zip file
system(paste0("zip -r ",sZipFile," ",sSelectDb))
setwd(sWD)
})
file.copy(sZipFile,file)
}
)
observe({
if (!is.null(input$deletedb))
{
if (input$deletedb > 0)
{
# user has pressed delete
cat(paste0("delete clicked ",input$deletedb,"\n"))
updateNumericInput(session,"areyousure",value=1)
}
}
})
observe({
if (!is.null(input$cancelDelete))
{
if (input$cancelDelete > 0)
{
updateNumericInput(session,"areyousure",value=0)
}
}
})
observe({
# click rename
if (!is.null(input$renameData))
{
if (input$renameData > 0)
{
cat("rename clicked\n")
updateNumericInput(session,"renamemydata",value=1)
updateTextInput(session,"renameName",value=isolate(sSelectDb))
}
}
})
observe({
# click accept rename
if (!is.null(input$acceptName))
{
if (input$acceptName > 0)
{
# does the new name already exist?
if (fMarxan)
{
sNewNameDb <- paste0(sMarxanHome,"/",sRenameName)
} else {
sNewNameDb <- paste0(sMarZoneHome,"/",sRenameName)
}
if (file.exists(sNewNameDb))
{
# can't rename. name already exists
withProgress(message=paste0("Can't rename. New name ",sRenameName," already exists"),value=0,min=0,max=20, { Sys.sleep(5) })
} else {
# rename the dataset
file.rename(sMarxanDir,sNewNameDb)
sOldName <- sSelectDb
sSelectDb <<- sRenameName
if (fMarxan)
{
sMarxanDir <<- paste0(sMarxanHome,"/",sSelectDb)
sZipWD <<- paste0(sMarxanHome)
} else {
sMarxanDir <<- paste0(sMarZoneHome,"/",sSelectDb)
sZipWD <<- paste0(sMarZoneHome)
}
# trigger a refresh of the "My data" table & "database"
irefreshtable <<- irefreshtable + 1
updateNumericInput(session, "refreshtable", value = irefreshtable)
updateSelectInput(session, "database",
choices = c(list.dirs(sMarxanHome),list.dirs(sMarZoneHome)),
selected = sSelectDb)
withProgress(message=paste0("Dataset ",sOldName," renamed to ",sSelectDb),value=0,min=0,max=20, { Sys.sleep(5) })
updateNumericInput(session,"renamemydata",value=0)
}
}
}
})
observe({
# click cancel rename
if (!is.null(input$cancelName))
{
if (input$cancelName > 0)
{
updateNumericInput(session,"renamemydata",value=0)
}
}
})
observe({
sRenameName <<- input$renameName
})
observe({
if (!is.null(input$yesimsure))
{
if (input$yesimsure > 0)
{
# user has pressed yesimsure
cat("yesimsure clicked\n")
cat(paste0("deleting ",sMarxanDir,"\n"))
# erase the database
system(paste0("rm -rf ",sMarxanDir))
system(paste0("touch ",sMarxanHome))
system(paste0("touch ",sMarZoneHome))
# refresh dataset list
list_dirs <- c(list.dirs(sMarxanHome,full.names = TRUE),
list.dirs(sMarZoneHome,full.names = TRUE))
cat(paste0("new dataset detected","\n"))
ImportTime <<-max(file.info(list_dirs)$ctime)
a_choices <- c(list.dirs(sMarxanHome),list.dirs(sMarZoneHome))
# update the list of datasets to include the new one(s)
updateSelectInput(session, "database",
choices = a_choices,
selected = a_choices[1])
# trigger a refresh of the UI
irefreshtable <<- irefreshtable + 1
updateNumericInput(session, "refreshtable", value = irefreshtable)
updateNumericInput(session,"areyousure",value=0)
# display a message to user for 5 seconds
withProgress(message=paste0("Deleted ",sSelectDb),value=0,min=0,max=20, { Sys.sleep(5) })
}
}
})
output$mydatatable <- renderTable({
input$removeOk
input$refreshtable
# parse the marxan and marzone databases, listing them in the grid
col_names <- c("name","type","used","planning_units","features","polygons","leaflet","zones","costs","created","last_run")
list_dirs_mx <- list.dirs(sMarxanHome,full.names=FALSE)
list_dirs_mz <- list.dirs(sMarZoneHome,full.names=FALSE)
for (i in 1:length(list_dirs_mx))
{
# read stats for this database
sMarxanDir <- paste0(sMarxanHome,"/",list_dirs_mx[i],"/")
sName <- list_dirs_mx[i]
sType <- "marxan"
sPuRdataFile <- paste0(sMarxanDir,"/pulayer/pulayer.Rdata")
sCreated <- as.character(file.info(sPuRdataFile)$ctime)
sSumFile <- paste0(sMarxanDir,"/output/output_sum.csv")
fUsed <- file.exists(sSumFile)
if (fUsed)
{
sLastUsed <- as.character(file.info(sSumFile)$ctime)
} else {
sLastUsed <- ""
}
pudat <- read.csv(paste0(sMarxanDir,"/input/pu.dat"),stringsAsFactors=FALSE)
sPlanningUnits <- nrow(pudat)
specdat <- read.csv(paste0(sMarxanDir,"/input/spec.dat"),stringsAsFactors=FALSE)
sFeatures <- nrow(specdat)
putable <- read.dbf(paste0(sMarxanDir,"/pulayer/pulayer.dbf"))
sPolygons <- nrow(putable)
sZones <- ""
sCosts <- ""
fLeaflet <- file.exists(paste0(sMarxanDir,"/pulayer/leaflet.Rdata"))
a_row <- c(sName,sType,as.character(fUsed),sPlanningUnits,sFeatures,sPolygons,as.character(fLeaflet),sZones,sCosts,sCreated,sLastUsed)
if (i == 1)
{
the_table <- a_row
} else {
the_table <- rbind(the_table,a_row)
}
}
for (i in 1:length(list_dirs_mz))
{
# read stats for this database
sMarxanDir <- paste0(sMarZoneHome,"/",list_dirs_mz[i],"/")
sName <- list_dirs_mz[i]
sType <- "marzone"
sCreated <- as.character(file.info(sMarxanDir)$ctime)
sSumFile <- paste0(sMarxanDir,"/output/output_sum.csv")
fUsed <- file.exists(sSumFile)
if (fUsed)
{
sLastUsed <- as.character(file.info(sSumFile)$ctime)
} else {
sLastUsed <- ""
}
pudat <- read.csv(paste0(sMarxanDir,"/input/pu.dat"),stringsAsFactors=FALSE)
sPlanningUnits <- nrow(pudat)
specdat <- read.csv(paste0(sMarxanDir,"/input/spec.dat"),stringsAsFactors=FALSE)
sFeatures <- nrow(specdat)
putable <- read.dbf(paste0(sMarxanDir,"/pulayer/pulayer.dbf"))
sPolygons <- nrow(putable)
zonesdat <- read.csv(paste0(sMarxanDir,"/input/zones.dat"),stringsAsFactors=FALSE)
sZones <- nrow(zonesdat)
costsdat <- read.csv(paste0(sMarxanDir,"/input/costs.dat"),stringsAsFactors=FALSE)
sCosts <- nrow(costsdat)
fLeaflet <- file.exists(paste0(sMarxanDir,"/pulayer/leaflet.Rdata"))
a_row <- c(sName,sType,as.character(fUsed),sPlanningUnits,sFeatures,sPolygons,as.character(fLeaflet),sZones,sCosts,sCreated,sLastUsed)
the_table <- rbind(the_table,a_row)
}
colnames(the_table) <- col_names
rownames(the_table) <- rep("",nrow(the_table))
return(the_table)
})
observe({
sUserIP <<- as.character(input$ipid)
UserGeoIP <<- freegeoip(sUserIP)
Hostname <- ip_to_hostname(sUserIP)
sUserHostname <<- Hostname[[1]]
})
observe({
# User has logged in. Record details about the HTTP session.
query <- parseQueryString(session$clientData$url_search)
sText <- paste0("fingerprint: ", input$fingerprint,"\n",
"ip: ", sUserIP,"\n",
"userhostname: ",sUserHostname,"\n",
"protocol: ", session$clientData$url_protocol, "\n",
"hostname: ", session$clientData$url_hostname, "\n",
"pathname: ", session$clientData$url_pathname, "\n",
"port: ", session$clientData$url_port, "\n",
"search: ", session$clientData$url_search, "\n",
"queries: ",paste(names(query), query, sep = "=", collapse=", "),"\n")
AppendLogFile(sLogFile,sText)
cat(paste0(sText,"\n"))
})
})
|
bc64979e465de9dadf0a50827732d426a9c7f8f2
|
fc1d61d27acdd923dd91f78e6633ba62caa31c82
|
/man/DCF-devel-package.Rd
|
fb14f4450b64ce2f868c22ac4f42ce14147bec79
|
[] |
no_license
|
dtkaplan/DCF-devel
|
7cbba8f25403ae8cb6c2d86d5a3034073e813058
|
cda59571dfeb3b4832705d83d4a1fa71d2f969af
|
refs/heads/master
| 2021-01-21T13:08:42.353997
| 2014-07-03T21:40:16
| 2014-07-03T21:40:16
| 19,037,348
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 617
|
rd
|
DCF-devel-package.Rd
|
\name{DCFdevel-package}
\alias{DCFdevel-package}
\alias{DCFdevel}
\docType{package}
\title{
Testing organization of Materials for the DCF course
}
\description{
This is a package to let me try out different ideas for organizing the DCF course
}
\details{
\tabular{ll}{
Package: \tab DCFdevel\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2014-04-22\cr
License: \tab Do not use. All rights reserved.\cr
}
}
\author{
Daniel Kaplan
Maintainer: Daniel Kaplan <dtkaplan@gmail.com>
}
\references{
~~ Literature or other references for background information ~~
}
\keyword{ package }
\seealso{
}
\examples{
}
|
ed254b654745e5e3013fe4f565abddb01c0ce238
|
c9ee3e253e94cfe599f7ebefa3cab108772a50a3
|
/carcomp021721.R
|
ffbe92d7f5fd2289417e8423d0ebf140b7d6c5bd
|
[] |
no_license
|
RAFrancais/UsedCarComparisons
|
7b277ab2a4577ba5cade41187068c599d20551c4
|
90d3410258314e9ebc04aaf715b9290f381ee7e4
|
refs/heads/master
| 2023-04-24T12:30:40.486047
| 2021-05-12T09:01:46
| 2021-05-12T09:01:46
| 366,651,763
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 826
|
r
|
carcomp021721.R
|
load("D:/RLearning/usedcarcomp/at_xbprice_20210217.Rda")
load("D:/RLearning/usedcarcomp/cars_xbprice_20210217.Rda")
load("D:/RLearning/usedcarcomp/tc_xbprice_20210217.Rda")
load("D:/RLearning/usedcarcomp/at_avalonprice_20210217.Rda")
load("D:/RLearning/usedcarcomp/cars_avalonprice_20210217.Rda")
load("D:/RLearning/usedcarcomp/tc_avalonprice_20210217.Rda")
load("D:/RLearning/usedcarcomp/at_tcprice_20210217.Rda")
load("D:/RLearning/usedcarcomp/cars_tcprice_20210217.Rda")
load("D:/RLearning/usedcarcomp/tc_tcprice_20210217.Rda")
xb0217 <- rbind(at_xbprice, cars_xbprice, tc_xbprice)
avalon0217 <- rbind(at_avalonprice, cars_avalonprice, tc_avalonprice)
tc0217 <- rbind(at_tcprice, cars_tcprice, tc_tcprice)
save(xb0217, file = "xb021721.Rda")
save(tc0217, file = "tc021721.Rda")
save(avalon0217, file = "avalon021721.Rda")
|
1872a9e8bda5bc3349e322fb20f1310b7318a443
|
28e604609e6d97c95f1856d80b6ed4eb2fadefe0
|
/man/neighbours.Rd
|
c6e40f80fe40e58dbc6f1b921315899dfb26e2ca
|
[] |
no_license
|
beerda/hexmatrix
|
f75d4c4ad819c8a7d9049d475d92a5fda5ec7485
|
1ae37a739fb5803932d3e71a9ff972b4342ea809
|
refs/heads/master
| 2021-06-21T05:32:09.752989
| 2021-04-28T10:32:09
| 2021-04-28T10:32:09
| 210,574,230
| 0
| 0
| null | 2020-11-04T08:17:30
| 2019-09-24T10:20:51
|
R
|
UTF-8
|
R
| false
| true
| 848
|
rd
|
neighbours.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/neighbours.R
\name{neighbours}
\alias{neighbours}
\title{Returns an array that represents values of neighbours
in the clock-wise order starting from top-left neighbour}
\usage{
neighbours(m, self = FALSE)
}
\arguments{
\item{m}{a matrix or array to compute the neighbours from}
\item{self}{whether to add the 7-th direction, which is a copy of \code{m}}
}
\value{
If \code{m} is a matrix, the returned value is an array with third
dimension representing the neighbour direction (1 to 6, or 7 if \code{self} is
\code{TRUE}). If \code{m} is a 3D array, then then the result is a 4D array with
the fourth dimension representing the direction.
}
\description{
Returns an array that represents values of neighbours
in the clock-wise order starting from top-left neighbour
}
|
9232f5ddd71051e8965428008acb666b93e228c0
|
12587953f560adfc3c1be32a2c9775d223389c6d
|
/plot3.R
|
3b30d591e684d3613b586cc5016cc255d87c499e
|
[] |
no_license
|
DrShashiPonraja/ExData_Plotting1
|
3a2bcb275dc19fac06fb74ab61b0c366c9ee97c7
|
2ad7d6d7b4dc11fa6ffcb4ee1cb972af37b680cc
|
refs/heads/master
| 2021-05-08T14:47:58.791401
| 2018-02-04T06:01:45
| 2018-02-04T06:01:45
| 120,099,649
| 0
| 0
| null | 2018-02-03T14:44:25
| 2018-02-03T14:44:24
| null |
UTF-8
|
R
| false
| false
| 421
|
r
|
plot3.R
|
source('getData.R')
myDataSet<-getData()
plot(myDataSet$DateTimeFormat, myDataSet$Sub_metering_1, type="n", xlab="",ylab="Energy sub metering")
lines(myDataSet$DateTimeFormat, myDataSet$Sub_metering_1,type='l')
lines(myDataSet$DateTimeFormat, myDataSet$Sub_metering_2,type='l', col="red")
lines(myDataSet$DateTimeFormat, myDataSet$Sub_metering_3,type='l', col="blue")
dev.copy(png, file='./figure/plot3.png')
dev.off()
|
120eb797ebc7961175980e917acd1e7584645113
|
b8e545dd1ff413af4d787567159f6b5419170070
|
/setSeedsForClassifications.R
|
974ceb47ce58f476a4d1b5a51e076e8ce71098a1
|
[] |
no_license
|
Wytz/DiseaseTrajectories
|
3849bc9dd7c407eb58c1fb2f6e3fbe50b5bb810b
|
58034f94d011fb92ab9a00eeb2abf7ca1c24799f
|
refs/heads/master
| 2020-07-11T22:59:58.592695
| 2019-11-07T10:18:38
| 2019-11-07T10:18:38
| 204,661,816
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,067
|
r
|
setSeedsForClassifications.R
|
# Set the seeds which are used to randomize in Caret, to enable a paired T-test between the AUCs
#directory = "Akker feature sets"
directory = "Jensen feature sets"
require(caret)
require(ranger)
require(doMC)
require(data.table)
registerDoMC(cores = 3)
# Select the seeds
SampledSeeds = sample(c(1:10000), size = 101)
# Create the TrainControl
trc = trainControl(method = "repeatedcv", number = 10, repeats = 10, savePredictions = T,
allowParallel = T, summaryFunction = twoClassSummary, classProbs = TRUE,
seeds = SampledSeeds)
feature_sets = list.files(directory, pattern = "Feature table for ")
removalColumns = c("ICPC1", "ICPC2", "CUI_A", "DiseaseA", "dpsA", "CUI_B", "DiseaseB", "dpsB", "goldstandard")
for(j in 1:length(feature_sets)){
f = as.data.frame(fread(paste0(directory, feature_sets[j])))
gs = factor(f$goldstandard, levels = c("VALID", "INVALID"))
f = f[, -which(colnames(f) %in% removalColumns)]
model = train(y = gs, x = f, method = "ranger", metric = "ROC",
tuneGrid = data.frame(mtry = round(sqrt(ncol(f))),
min.node.size = 1,
splitrule = "gini"),
save.memory = T,
trControl = trc)
print(model)
assign(paste0("m", j), model)
}
print("Between Split directed and Split Undirected")
t.test(m4$resample$ROC, m6$resample$ROC, paired = T, conf.level = 0.95)
print("Between Split Mixed and Split Undirected")
t.test(m5$resample$ROC, m6$resample$ROC, paired = T, conf.level = 0.95)
print("Between Split Mixed and Split Directed")
t.test(m4$resample$ROC, m5$resample$ROC, paired = T, conf.level = 0.95)
print("Between Directed metapaths and Undirected metapaths")
t.test(m1$resample$ROC, m3$resample$ROC, paired = T, conf.level = 0.95)
print("Between Mixed metapaths and Undirected metapaths")
t.test(m2$resample$ROC, m3$resample$ROC, paired = T, conf.level = 0.95)
print("Between Directed metapaths and Mixed metapaths")
t.test(m2$resample$ROC, m1$resample$ROC, paired = T, conf.level = 0.95)
|
36b351721904d5573e9ceb0cfa738a4188d5d9c4
|
7af0de4a6767812f392bd69a2298f45550f8abb5
|
/neuralnet_diags.R
|
a5be68fd323e1d0d1e1d064db4b922a8db5d6360
|
[] |
no_license
|
SudhakaranP/Statistical_Learning_Basics
|
615077494c15c9ae8f28cd3e856eee7b8cd03678
|
40162b9831bdc165da5af926cc2c7ba8a9fe674f
|
refs/heads/master
| 2021-06-14T18:56:40.890625
| 2016-12-14T02:04:34
| 2016-12-14T02:04:34
| 105,226,455
| 0
| 1
| null | 2017-09-29T03:37:24
| 2017-09-29T03:37:23
| null |
UTF-8
|
R
| false
| false
| 7,521
|
r
|
neuralnet_diags.R
|
# action like a lekprofile
neuralnet.diagnostics <- function(nn) {
outs <- unlist(strsplit(
as.character(
formula(nn))[2]
, split = " + "
, fixed = TRUE))
nouts <- length(outs)
if(nouts != 1) stop("Function can only handle single response variable")
ins <- unlist(strsplit(
as.character(
formula(nn))[3]
, split = " + "
, fixed = TRUE))
nins <- length(ins)
# quantiles and ranges for each input.var
ins.range <- sapply(nn$data[, ins], range)
ins.quant <- sapply(nn$data[, ins], quantile)
# matrix of all the quantiles for all input.vars
ins.mat <- ins.quant[rep(1:5, each = 10), ]
# empty df to hold the predictions
preds <- list()
input.vals <- list()
preds$quantiles <- rownames(ins.mat)
preds$quantiles <- factor(preds$quantiles, levels = c("0%", "25%", "50%", "75%", "100%"), ordered = TRUE)
for (var in ins) {
# one input.var at a time
# temporarily replace the static quantiles with a incremental range
ins.val <- seq(ins.range[1, var], ins.range[2, var], length.out = 10)
ins.mat[, var] <- ins.val
input.vals[[var]] <- ins.val
# empty vector to hold the predictions
pred <- numeric(50)
for (i in 1:5) {
pred[1:10 + (i - 1) * 10] <- neuralnet::compute(nn
, ins.mat[1:10 + (i - 1) * 10, ])$net.result
}
# capture
preds[[var]] <- pred
# reset
ins.mat <- ins.quant[rep(1:5, each = 10), ]
}
preds <- as.data.frame(preds)
input.vals <- as.data.frame(input.vals)
# variable importance from profile
var.imp.p <- matrix(nrow = 3, ncol = nins, dimnames = list(NULL, ins))
pred.ranges <- as.data.frame(
lapply(preds[, -1]
, function(x) {
tapply(x
, preds$quantiles
, function(y) {
range(y)[2] - range(y)[1]
}
)
}
)
)
for (var in ins) {
var.imp.p[, var] <- c(pred.ranges[[var]]["0%"]
, pred.ranges[[var]]["100%"]
, max(pred.ranges[[var]]))
}
dimnames(var.imp.p) <- list(
c("vars.quant.0", "vars.quant.100", "max.effect")
, NULL)
var.imp.p <- data.frame(input.var = factor(ins, levels = ins[order(var.imp.p["max.effect", ])])
, t(var.imp.p))
# variable importance from weights
wts <- nn$weights[[1]]
layers <- numeric(0)
# input layer = rows of wts element 1, minus 1
# (the first values is the bias value)
layers[1] <- dim(wts[[1]])[1] - 1
seq.layers <- seq_along(wts)
for (i in seq.layers) {
# other layers are the number of columns of each wts element
layers[i + 1] <- dim(wts[[i]])[2]
}
# extract the biases and weights
bias <- list()
ptrons <- list()
for (i in seq.layers) {
bias[[i]] <- wts[[i]][1, ]
ptrons[[i]] <- wts[[i]][-1, ]
}
# calculate the influence by weights accumulation
var.imp.w <- runif(1)
for (i in rev(seq.layers)) {
var.imp.w <- var.imp.w - bias[[i]]
if (layers[i + 1] == 1) {
var.imp.w <- var.imp.w * ptrons[[i]]
} else {
# sign according to input layer only
var.imp.w <- ptrons[[i]] * rep(var.imp.w, each = layers[i])
var.imp.w <- apply(var.imp.w, 1, sum)
if(i == 1) {
var.imp.w <- abs(var.imp.w) * sign(apply(ptrons[[i]], 1, sum))
}
}
}
var.imp.w <- data.frame(input.var = factor(ins, levels = ins[order(abs(var.imp.w))])
, effect = var.imp.w
, sgn = factor(sign(var.imp.w), labels = c("negative", "positive")))
# calculate the influence by weights of only the input layer
var.imp.f <- apply(ptrons[[1]], 1, sum)
var.imp.f <- data.frame(input.var = factor(ins, levels = ins[order(abs(var.imp.f))])
, effect = var.imp.f
, sgn = factor(sign(var.imp.f), labels = c("negative", "positive")))
return(list(preds = preds
, var.imp.p = var.imp.p
, var.imp.w = var.imp.w
, var.imp.f = var.imp.f
, compute.matrix = ins.mat
, input.values = input.vals
, layers = layers
, ptrons = ptrons
, bias = bias))
}
library(lattice)
library(tidyr)
nn.profile.plot <- function(nn.diag, var = NULL, ...) {
if (missing(var)) {
MyLatticeScale$x <- list(relation = "free")
number.of.levels <- length(unique(dimnames(nn.diag$compute.matrix[,])[[1]]))
input.vars <- matrix(nrow = 0, ncol = nn.diag$layers[1])
for (i in 1:number.of.levels) {
input.vars <- rbind(input.vars, nn.diag$input.values)
}
preds <- gather(nn.diag$preds, input.var, effect, - quantiles)
preds$input <- gather(input.vars, input.var, input)[, -1]
xlab.title <- "Predictor values"
main.title <- "Profile Plot of changing each predictor
while holding other predictors at quantiles"
fmla <- as.formula("effect ~ input | input.var")
} else {
n <- names(nn.diag$preds)
n <- n[n != "quantiles"]
if (!(var %in% n)) {stop("Variable selected does not exist.")}
preds <- nn.diag$preds
preds$input <- nn.diag$input.values[[var]]
xlab.title <- var
main.title <- paste("Profile Plot of changing"
, var, "\nwhile holding other predictors at quantiles")
fmla <- as.formula(paste(var, "~ input"))
}
xyplot(fmla
, group = quantiles
, data = preds
, type = "l"
, xlab = xlab.title
, ylab = "Predicted value (scaled)"
, main = main.title
, sub = paste("Hidden layers"
, paste(nn.diag$layers[-c(1, length(nn.diag$layers))]
, collapse = ", "))
, scales = MyLatticeScale
, strip = MyLatticeStrip
, par.settings = MyLatticeTheme
, auto.key = list(columns = 3
, points = FALSE
, lines = TRUE)
, ...)
}
nn.varimp.p.plot <- function(nn.diag, ...) {
barchart(max.effect+vars.quant.0+vars.quant.100~input.var
, data = nn.diag$var.imp.p
, ylab = "Range of effect of changing each input
while holding the others constant"
, main = "Variable Importance Plot from profile"
, sub = paste("Hidden layers"
, paste(nn.diag$layers[-c(1, length(nn.diag$layers))]
, collapse = ", "))
, scales = MyLatticeScale
, strip = MyLatticeStrip
, par.settings = MyLatticeTheme
, auto.key = list(columns = 3)
, ...)
}
nn.varimp.w.plot <- function(nn.diag, weight = "w", ...) {
if (weight == "w") {
d <- nn.diag$var.imp.w
main.title <- "Variable Importance Plot from weights accumulation"
}
if (weight == "f") {
d <- nn.diag$var.imp.f
main.title <- "Variable Importance Plot from input weights"
}
dotplot(abs(effect)~input.var
, groups = sgn
, data = d
, ylab = "Result of matrix multiplication of weights"
, main = main.title
, sub = paste("Hidden layers"
, paste(nn.diag$layers[-c(1, length(nn.diag$layers))]
, collapse = ", "))
, scales = MyLatticeScale
, strip = MyLatticeStrip
, par.settings = MyLatticeTheme
, auto.key = list(columns = 2)
, ...)
}
|
454f3c02acfa80def2e8dc3b7b5a5427d7e1755e
|
cea7b5b1a105534c57ddbfb9382553758f94ab0f
|
/Lectures/Week 6/DFA_with_the_MARSS_pkg.R
|
27bf0e9fbdcd42fc135f7c98530c42589f4eaa30
|
[] |
no_license
|
YanVT/atsa2017
|
973ed4dc7fe812d0794421c516225da668cf6477
|
60313ca2e5a3aae47de68e1adb4f32c9c5cef4e0
|
refs/heads/master
| 2023-08-31T08:47:02.631251
| 2019-05-09T02:46:04
| 2019-05-09T02:46:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,572
|
r
|
DFA_with_the_MARSS_pkg.R
|
# Here is the code from Chap 9 of the MARSS User's Guide
# Dynamic factor analysis (DFA)
###################################################
### code chunk number 2: Cs01_read.in.data
###################################################
# load the data (there are 3 datasets contained here)
data(lakeWAplankton)
# we want lakeWAplanktonTrans, which has been transformed
# so the 0s are replaced with NAs and the data z-scored
dat = lakeWAplanktonTrans
# use only the 10 years from 1980-1989
plankdat = dat[dat[,"Year"]>=1980 & dat[,"Year"]<1990,]
# create vector of phytoplankton group names
phytoplankton = c("Cryptomonas", "Diatoms", "Greens",
"Unicells", "Other.algae")
# get only the phytoplankton
dat.spp.1980 = plankdat[,phytoplankton]
###################################################
### code chunk number 3: Cs02_transpose.data
###################################################
# transpose data so time goes across columns
dat.spp.1980 = t(dat.spp.1980)
# get number of time series
N.ts = dim(dat.spp.1980)[1]
# get length of time series
TT = dim(dat.spp.1980)[2]
###################################################
### code chunk number 4: Cs03_z.score
###################################################
Sigma = sqrt(apply(dat.spp.1980, 1, var, na.rm=TRUE))
y.bar = apply(dat.spp.1980, 1, mean, na.rm=TRUE)
dat.z = (dat.spp.1980 - y.bar) * (1/Sigma)
rownames(dat.z) = rownames(dat.spp.1980)
###################################################
### code chunk number 5: Cs04_plotdata
###################################################
spp = rownames(dat.spp.1980)
par(mfcol=c(3,2), mar=c(3,4,1.5,0.5), oma=c(0.4,1,1,1))
for(i in spp){
plot(dat.z[i,],xlab="",ylab="Abundance index", bty="L", xaxt="n", pch=16, col="blue", type="b")
axis(1,12*(0:dim(dat.spp.1980)[2])+1,1980+0:dim(dat.spp.1980)[2])
title(i)
}
###################################################
### code chunk number 6: Cs05_set.up.Z
###################################################
Z.vals = list(
"z11", 0 , 0 ,
"z21","z22", 0 ,
"z31","z32","z33",
"z41","z42","z43",
"z51","z52","z53")
Z = matrix(Z.vals, nrow=N.ts, ncol=3, byrow=TRUE)
###################################################
### code chunk number 7: Cs06_print.Z
###################################################
print(Z)
###################################################
### code chunk number 8: Cs07_set.up.QR
###################################################
Q = B = diag(1,3)
###################################################
### code chunk number 9: Cs08_set.up
###################################################
R.vals = list(
"r11",0,0,0,0,
0,"r22",0,0,0,
0,0,"r33",0,0,
0,0,0,"r44",0,
0,0,0,0,"r55")
R = matrix(R.vals, nrow=N.ts, ncol=N.ts, byrow=TRUE)
###################################################
### code chunk number 10: Cs09_print
###################################################
print(R)
###################################################
### code chunk number 11: Cs10_set.up.R.short
###################################################
R = "diagonal and unequal"
###################################################
### code chunk number 12: Cs11_set.up.U
###################################################
x0 = U = matrix(0, nrow=3, ncol=1)
A = matrix(0, nrow=6, ncol=1)
x0 = U = A = "zero"
###################################################
### code chunk number 13: Cs12_set.up.x0
###################################################
V0 = diag(5,3)
###################################################
### code chunk number 14: Cs13_define.model.list
###################################################
dfa.model = list(Z=Z, A="zero", R=R, B=B, U=U, Q=Q, x0=x0, V0=V0)
cntl.list = list(maxit=50)
###################################################
### code chunk number 15: Cs14_fit.data
###################################################
kemz.3 = MARSS(dat.z, model=dfa.model, control=cntl.list)
###################################################
### code chunk number 18: Cs15_plotfits
###################################################
fit = kemz.3
spp = rownames(dat.z)
par(mfcol=c(3,2), mar=c(3,4,1.5,0.5), oma=c(0.4,1,1,1))
for(i in 1:length(spp)){
plot(dat.z[i,],xlab="",ylab="abundance index",bty="L", xaxt="n", ylim=c(-4,3), pch=16, col="blue")
axis(1,12*(0:dim(dat.z)[2])+1,1980+0:dim(dat.z)[2])
par.mat=coef(fit,type="matrix")
lines(as.vector(par.mat$Z[i,,drop=FALSE]%*%fit$states+par.mat$A[i,]), lwd=2)
title(spp[i])
}
###################################################
### code chunk number 20: Cs16_set.up.two.trends.echo
###################################################
model.list = list(m=2, R="diagonal and unequal")
kemz.2 = MARSS(dat.spp.1980, model=model.list,
z.score=TRUE, form="dfa", control=cntl.list)
###################################################
### code chunk number 21: Cs17_compare.mods.2n3
###################################################
print(cbind(model=c("3 trends", "2 trends"),
AICc=round(c(kemz.3$AICc, kemz.2$AICc))),
quote=FALSE)
###################################################
### code chunk number 23: Cs18_set.up.many.trends.echo (eval = FALSE)
###################################################
## # set new control params
## cntl.list = list(minit=200, maxit=5000, allow.degen=FALSE)
## # set up forms of R matrices
## levels.R = c("diagonal and equal",
## "diagonal and unequal",
## "equalvarcov",
## "unconstrained")
## model.data = data.frame()
## # fit lots of models & store results
## # NOTE: this will take a long time to run!
## for(R in levels.R) {
## for(m in 1:(N.ts-1)) {
## dfa.model = list(A="zero", R=R, m=m)
## kemz = MARSS(dat.z, model=dfa.model, control=cntl.list,
## form="dfa", z.score=TRUE)
## model.data = rbind(model.data,
## data.frame(R=R,
## m=m,
## logLik=kemz$logLik,
## K=kemz$num.params,
## AICc=kemz$AICc,
## stringsAsFactors=FALSE))
## assign(paste("kemz", m, R, sep="."), kemz)
## } # end m loop
## } # end R loop
###################################################
### code chunk number 24: Cs19_makemodeltable
###################################################
# calculate delta-AICc
model.data$delta.AICc = model.data$AICc - min(model.data$AICc)
# calculate Akaike weights
wt = exp(-0.5*model.data$delta.AICc)
model.data$Ak.wt = wt/sum(wt)
# sort results
model.tbl = model.data[order(model.data$AICc),-4]
# drop AICc from table
# calculate cumulative wts
model.tbl$Ak.wt.cum = cumsum(model.tbl$Ak.wt)
model.tbl = model.tbl[,-4]
###################################################
### code chunk number 26: Cs20_getbestmodel
###################################################
# get the "best" model
best.model = model.tbl[1,]
fitname = paste("kemz",best.model$m,best.model$R,sep=".")
best.fit = get(fitname)
###################################################
### code chunk number 27: Cs21_varimax
###################################################
# get the inverse of the rotation matrix
H.inv = varimax(coef(best.fit, type="matrix")$Z)$rotmat
###################################################
### code chunk number 28: Cs22_rotations
###################################################
# rotate factor loadings
Z.rot = coef(best.fit, type="matrix")$Z %*% H.inv
# rotate trends
trends.rot = solve(H.inv) %*% best.fit$states
###################################################
### code chunk number 29: Cs23_plotfacloadings
###################################################
spp = rownames(dat.z)
minZ = 0.05
ylims = c(-1.1*max(abs(Z.rot)), 1.1*max(abs(Z.rot)))
par(mfrow=c(ceiling(dim(trends.rot)[1]/2),2), mar=c(3,4,1.5,0.5), oma=c(0.4,1,1,1))
for(i in 1:best.model$m) {
plot(c(1:N.ts)[abs(Z.rot[,i])>minZ], as.vector(Z.rot[abs(Z.rot[,i])>minZ,i]),
type="h", lwd=2, xlab="", ylab="", xaxt="n", ylim=ylims, xlim=c(0,N.ts+1))
for(j in 1:N.ts) {
if(Z.rot[j,i] > minZ) {text(j, -0.05, spp[j], srt=90, adj=1, cex=0.9)}
if(Z.rot[j,i] < -minZ) {text(j, 0.05, spp[j], srt=90, adj=0, cex=0.9)}
abline(h=0, lwd=1, col="gray")
} # end j loop
mtext(paste("Factor loadings on trend",i,sep=" "),side=3,line=.5)
} # end i loop
###################################################
### code chunk number 30: Cs24_plottrends
###################################################
# get ts of trends
ts.trends = t(trends.rot)
par(mfrow=c(ceiling(dim(ts.trends)[2]/2),2), mar=c(3,4,1.5,0.5), oma=c(0.4,1,1,1))
# loop over each trend
for(i in 1:dim(ts.trends)[2]) {
# set up plot area
plot(ts.trends[,i],
ylim=c(-1.1,1.1)*max(abs(ts.trends)),
type="n", lwd=2, bty="L",
xlab="", ylab="", xaxt="n", yaxt="n")
# draw zero-line
abline(h=0, col="gray")
# plot trend line
par(new=TRUE)
plot(ts.trends[,i],
ylim=c(-1.1,1.1)*max(abs(ts.trends)),
type="l", lwd=2, bty="L",
xlab="", ylab="", xaxt="n")
# add panel labels
mtext(paste("Trend",i,sep=" "), side=3, line=0.5)
axis(1,12*(0:dim(dat.spp.1980)[2])+1,1980+0:dim(dat.spp.1980)[2])
} # end i loop (trends)
###################################################
### code chunk number 31: Cs25_plotbestfits
###################################################
par.mat=coef(best.fit, type="matrix")
fit.b = par.mat$Z %*% best.fit$states + matrix(par.mat$A, nrow=N.ts, ncol=TT)
spp = rownames(dat.z)
par(mfcol=c(3,2), mar=c(3,4,1.5,0.5), oma=c(0.4,1,1,1))
for(i in 1:length(spp)){
plot(dat.z[i,],xlab="",ylab="abundance index",bty="L", xaxt="n", ylim=c(-4,3), pch=16, col="blue")
axis(1,12*(0:dim(dat.z)[2])+1,1980+0:dim(dat.z)[2])
lines(fit.b[i,], lwd=2)
title(spp[i])
}
###################################################
### code chunk number 32: Cs26_set-up-covar
###################################################
temp = t(plankdat[,"Temp",drop=FALSE])
TP = t(plankdat[,"TP",drop=FALSE])
###################################################
### code chunk number 34: Cs27_fit.covar.echo (eval = FALSE)
###################################################
## model.list=list(m=2, R="unconstrained")
## kemz.temp = MARSS(dat.spp.1980, model=model.list, z.score=TRUE,
## form="dfa", control=cntl.list, covariates=temp)
## kemz.TP = MARSS(dat.spp.1980, model=model.list, z.score=TRUE,
## form="dfa", control=cntl.list, covariates=TP)
## kemz.both = MARSS(dat.spp.1980, model=model.list, z.score=TRUE,
## form="dfa", control=cntl.list, covariates=rbind(temp,TP))
###################################################
### code chunk number 36: Cs28_covar.AICs
###################################################
print(cbind(model=c("no covars", "Temp", "TP", "Temp & TP"),
AICc=round(c(best.fit$AICc, kemz.temp$AICc, kemz.TP$AICc,
kemz.both$AICc))), quote=FALSE)
###################################################
### code chunk number 37: Cs29_plotbestcovarfits
###################################################
par.mat=coef(kemz.temp, type="matrix")
fit.b = par.mat$Z %*% kemz.temp$states + matrix(par.mat$A, nrow=N.ts, ncol=TT)
spp = rownames(dat.z)
par(mfcol=c(3,2), mar=c(3,4,1.5,0.5), oma=c(0.4,1,1,1))
for(i in 1:length(spp)){
plot(dat.z[i,],xlab="",ylab="abundance index",bty="L", xaxt="n", ylim=c(-4,3), pch=16, col="blue")
axis(1,12*(0:dim(dat.z)[2])+1,1980+0:dim(dat.z)[2])
lines(fit.b[i,], lwd=2)
title(spp[i])
}
|
711e3177a5969875069cfb3f2d9acfee9bfb1cc4
|
8cc220a74dff794980b29352a5eb451bd2044d6f
|
/weather_dash/app.R
|
0a9476b70d0d5478fc0f560589886c9f65352fc7
|
[] |
no_license
|
austincauthon/weather_dash
|
4dadaeb68b215725cc91087d5f00a83ba7505adb
|
a74e7fc382e257b02f2b446e1450e652d0492bcc
|
refs/heads/master
| 2020-12-07T15:23:39.345039
| 2017-07-07T02:07:54
| 2017-07-07T02:07:54
| 95,505,645
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,589
|
r
|
app.R
|
rm(list=ls())
library(darksky) # powered by dark sky
library(data.table)
library(ggplot2)
library(gridExtra)
library(magrittr)
library(lubridate)
library(scales)
library(shiny)
library(zipcode)
source("R/weather_functions.R")
source("C:/Users/set-darksky-api.R") # need key from darksky.net to work
data(zipcode)
setDT(zipcode)
# Shiny App ---------------------------------------------------------------
ui <- fluidPage(
fluidRow(
column(6,
inputPanel(textInput(inputId = 'zip', label = 'Enter a Zipcode', value = '66210'),
fluidPage(
fluidRow(uiOutput(outputId = 'citystate')),
fluidRow(uiOutput(outputId = 'conditions')))
)
)
),
fluidRow(
column(6,
plotOutput(outputId = "hourly_graph")),
column(6,
plotOutput(outputId = "weekly_graph"))
),
fluidRow(
column(6,
fluidPage(
fluidRow(),
fluidRow(plotOutput(outputId = "windspeed_graph", click = 'plot_click'))
)),
column(3,
fluidPage(
fluidRow(),
fluidRow(plotOutput(outputId = 'windbearing_graph'))
)),
column(3,
verbatimTextOutput(outputId = 'table_info'))
)
)
server <- function(input, output) {
zip_weather_data <- reactive({
if(length(zipcode[zip == input$zip]$city) > 0){
print('getting new forecast...')
get_current_forecast(latitude = as.character(zipcode[zip == input$zip,]$latitude),
longitude = as.character(zipcode[zip == input$zip,]$longitude))
} else {
print('not yet')
}
})
city_state <- reactive({
my_city <- zipcode[zip == input$zip,]$city
my_state <- zipcode[zip== input$zip,]$state
paste0("<H4><b>",my_city, ", ", my_state,"</H4></b>")
})
conditions <- reactive({
paste0("<H5><b>It is ", tolower(zip_weather_data()$currently$summary), " and ",
round(zip_weather_data()$currently$temperature), "°F</H5>")
})
click_time <- reactive({
print(input$plot_click$x)
if(is.null(input$plot_click$x)) {
min(zip_weather_data()$hourly$time)
} else {
as.POSIXct(input$plot_click$x, origin ='1970-01-01 00:00.00 UTC') %>% round(units = "hours")
}
#print(input$plot_click)
#as.POSIXct(input$plot_click$x)
})
output$weekly_graph <- renderPlot({
if(length(zipcode[zip == input$zip]$city) > 0){
weekly_graph(zip_weather_data()$daily)
}
})
output$hourly_graph <- renderPlot({
if(length(zipcode[zip == input$zip]$city) > 0) {
hourly_graph(zip_weather_data()$hourly)
}
})
output$conditions <- renderText({
if(length(zipcode[zip == input$zip]$city) > 0) {
HTML(conditions())
}
})
output$citystate <- renderText({
if(length(zipcode[zip == input$zip]$city) > 0) {
HTML(city_state())
} else {
HTML('<H4>Zip code not found...</H4>')
}
})
output$windspeed_graph <- renderPlot({
if(length(zipcode[zip == input$zip]$city) > 0) {
windspeed_graph(zip_weather_data()$hourly)
}
})
output$windbearing_graph <- renderPlot({
if(length(zipcode[zip == input$zip]$city) > 0) {
windbearing_graph(zip_weather_data()$hourly, click_time())
}
})
output$table_info <- renderPrint(zip_weather_data()$hourly[time==click_time()]$temperature)
}
shinyApp(ui = ui, server = server)
|
55bf2ad70292b2f972967a2a26bcc5e24d1b7726
|
86422f71fb0db244ea0c49909563a9420c584128
|
/R/facebook.R
|
82adc5c5cb075c98a80afa53f4b0f5a4a8a835b8
|
[] |
no_license
|
Decision-Stats/s15_codes
|
0d69fb9e95faabf35d41fd327ed490968cf2dbac
|
673a8078163a2eddd02fe418744a71542326c2e0
|
refs/heads/master
| 2021-01-19T06:58:29.421813
| 2015-07-10T17:16:21
| 2015-07-10T17:16:21
| 38,714,623
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 599
|
r
|
facebook.R
|
# use the follwoing site to generate the access token,
# https://developers.facebook.com/tools/explorer
library(Rfacebook)
library(Rook)
library(RCurl)
require(rjson)
access_tk="CAACEdEose0cBAIs9NDtrWnwYFtbAU38nNwKvk4BMmvg8rqnYVyDFigBflqaiHEIjdCMTbUCgtrZAZCkh9ZAM1RXFjKSJw7uJtn2F6mY13BOBrsiTWOxRMAKuQfQCHNX71310TXZBUDZBXieg955rAdDlkEy8pE9QJ6hQGcUyyi1AXCy7ZCDga1Evd5Wf9eBaYpg0RdGBaBwUOpQJEGiRrU"
my_account=getUsers("me",access_tk)
my_account
my_friends=getFriends(access_tk,simplify=F)
my_friends_account=getUsers(my_friends$id, token=access_tk, private_info=TRUE)
table(my_friends_account$gender)
|
409d39396d29cf3860c9ed4925b42bc3d4d92351
|
eb8e225361c5a640d23207735e88d87aa8477533
|
/man/Mstep.hh.MSAR.with.constraints.Rd
|
c4a98e8718c8ba6912ff86b10ce4a82f40fb7112
|
[] |
no_license
|
cran/NHMSAR
|
6ec23ee69d3398005f85e64293b32fe5f71b5375
|
6fbbfdd21700d5b6b95322844abdc93129cddc22
|
refs/heads/master
| 2022-03-03T06:41:29.892008
| 2022-02-09T06:40:11
| 2022-02-09T06:40:11
| 27,052,794
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,616
|
rd
|
Mstep.hh.MSAR.with.constraints.Rd
|
\name{Mstep.hh.MSAR.with.constraints}
\alias{Mstep.hh.MSAR.with.constraints}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
M step of the EM algorithm for fitting homogeneous multivariate Markov switching auto-regressive models with constraints on VAR models.}
\description{
M step of the EM algorithm for fitting homogeneous multivariate Markov switching auto-regressive models with constraints on VAR models, called in fit.MSAR. Maximum likelihood is used. Matrices A and sigma are diagonal by blocks.}
\usage{
Mstep.hh.MSAR.with.constraints(data, theta, FB, K, d.y)
}
\arguments{
\item{data}{
array of univariate or multivariate series with dimension T x N.samples x d.
T: number of time steps of each sample, N.samples: number of realisations of the same stationary process, d: dimension.
}
\item{theta}{
model's parameter; object of class MSAR. See also init.theta.MSAR.
}
\item{FB}{
Forward-Backward results, obtained by calling Estep.MSAR function}
\item{K}{
number of sites. For instance, if one considers wind at k locations, K=k. Or more generally number of independent groups of components. }
\item{d.y}{
dimension in each sites. For instance, if one considers only wind intensity than d.y = 1; but, if one considers cartesian components of wind, then d.y =2.}
}
\value{
\item{A0}{intercepts}
\item{A}{AR coefficients}
\item{sigma}{variance of innovation}
\item{prior}{prior probabilities}
\item{transmat}{transition matrix}
}
%\references{}
\author{
Valerie Monbet, valerie.monbet@univ-rennes1.fr
}
\seealso{
Mstep.hh.MSAR, fit.MSAR, Mstep.hh.SCAD.MSAR
}
|
a8f017b0fa6f61fa7703a8eb1b3b9b25feb9e159
|
91d9b5e6c26b41b9e0dd369aa4984462a629b38e
|
/man/reorder_factor.Rd
|
7faaab4f943cc7b1dbd569cdde80d38dbd2ccf7b
|
[] |
no_license
|
jeffbone/First-Package
|
d3b421952bb709f2c7aa3cdc91ac82475880bbaf
|
d5888b58b07e2f66a7f8aa27956e2f458fc12c69
|
refs/heads/master
| 2021-01-10T09:02:21.907372
| 2015-12-01T17:41:51
| 2015-12-01T17:41:51
| 46,591,500
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 372
|
rd
|
reorder_factor.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/reorder.R
\name{reorder_factor}
\alias{reorder_factor}
\title{Reorder the levels of a factor by frequency}
\usage{
reorder_factor(x)
}
\arguments{
\item{x}{factor}
}
\value{
factor
}
\description{
Reorder the levels of a factor by frequency
}
\examples{
reorder_factor(iris$Species)
}
|
ffd30f7f19db216190126a2cc325ff16b30e1dce
|
1df7034a9bafd862b03994f97b4ead5445488a12
|
/examples/ex01DistanceVNearness.R
|
edaabc4aee875e04d64dee3237dda9a0aa938a81
|
[] |
no_license
|
neurodata/U-Rerf
|
c28d76baacecbf8f8be4cce220294ade68575eeb
|
f3bf3d91892eae7817a6106caab094b620b964f3
|
refs/heads/master
| 2021-05-14T08:37:27.760419
| 2019-02-12T17:09:07
| 2019-02-12T17:09:07
| 116,304,664
| 2
| 8
| null | 2018-12-03T21:52:37
| 2018-01-04T20:31:15
|
R
|
UTF-8
|
R
| false
| false
| 2,372
|
r
|
ex01DistanceVNearness.R
|
source('../rfr_us.R')
library(ggplot2)
# number of trees for forest
numtrees <- 100
# number of dimensions in dataset
m <- 10
# number of samples in dataset
sizeD <- 1000
# the 'k' of k nearest neighbors
depth=8
k = 3
# create a sizeD by m synthetic dataset
X <- matrix(sort(runif(m*sizeD)), nrow=sizeD, ncol=m)
AkNN <- matrix(0, nrow=sizeD, ncol=sizeD)
# find the actual euclidean distance between all samples of the synthetic dataset
for(z in 1:sizeD){
AkNN[z,] <- sqrt(rowSums(sweep(X,2,X[z,])^2))
}
# create a similarity matrix using urerf
sM <- urerf(X, numtrees, depth=depth, mtry=5)
nnzPts <- which(sM$similarityMatrix != 0 & sM$similarityMatrix != 1)
#create output
ssd <- data.frame(Distance = AkNN[nnzPts], Nearness = sM$similarityMatrix[nnzPts])
png(file="results/ex01_10dLineDistVNearAll.png")
p <- ggplot(aes(x = Nearness, y = Distance), data = ssd) + geom_point() + labs(title="Distance vs Similarity of All Points to All Other Points\n10-D Line, n=1000, depth=8, trees=100\n(0 Similarity Omitted)")
p <- p + scale_y_log10()
p <- p + xlab("Similarity") + ylab("Euclidean Distance")
print(p)
dev.off()
png(file="results/ex01_10dLineDistVNear3Subset.png")
nnzPts <- which(sM$similarityMatrix[,499:501] != 0 & sM$similarityMatrix[,499:501] != 1)
ssd <- data.frame(Distance = AkNN[,499:501][nnzPts], Nearness = sM$similarityMatrix[,499:501][nnzPts])
groupLabels <- c(rep("499",sizeD), rep("500", sizeD), rep("501",sizeD ))[nnzPts]
ssd[["Sample"]] <- groupLabels
p <- ggplot(aes(x = Nearness, y = Distance, color = Sample), data = ssd) + geom_point()+ labs(title="Distancech vs Similarity of points 499-501 to all other points\n10-D Line, n=1000, depth=8, trees=100\nThree Samples (0 Similarity omitted)")+ geom_jitter()
p <- p + scale_y_log10()
print(p)
dev.off()
png(file="results/ex01_10dLineDistVNear1Subset.png")
nnzPts <- which(sM$similarityMatrix[,500] != 1)
ssd <- data.frame(Distance = AkNN[,500][nnzPts], Nearness = sM$similarityMatrix[,500][nnzPts])
groupLabels <- (rep("500", sizeD))[nnzPts]
ssd[["Sample"]] <- groupLabels
p<-ggplot(aes(x = Nearness, y = Distance), data = ssd) + geom_point()+ labs(title="Distance vs Similarity of One Observation to All Others\n10-D Line, n=1000, depth=8, trees=100\nThree Samples (0 Similarity Retained)")+ geom_jitter()
p <- p + scale_y_log10()
print(p)
dev.off()
|
51827abd57ed22a4d30d1bf54b4f35a205031ed4
|
9ae55589b58f9393c342a353b99da4c9955497e1
|
/man/str_2_list.Rd
|
eca636b54d0d34f4dc4ce7eca4f7a0f1a668db13
|
[] |
no_license
|
nrkoehler/xyzfuns
|
25cf0b076122037cef6720a48b7d36bd1afe14ae
|
f57e8e759eba16e7fdf3bb216b6121b26077c60c
|
refs/heads/master
| 2022-02-05T20:34:03.438697
| 2022-01-12T16:17:11
| 2022-01-12T16:17:11
| 223,168,481
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 255
|
rd
|
str_2_list.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{str_2_list}
\alias{str_2_list}
\title{{Character string to bullet list}}
\usage{
str_2_list(str)
}
\description{
{Convert a character string to a bullet list}
}
|
e27078b12c1c13f565f94206e071d1ece6172034
|
e6549edacf38351730ca91ead2456d50ba20f1cd
|
/man/logabs.rd
|
cc7212ad0ad8ba539bf072c00203418824d7e7fa
|
[] |
no_license
|
cran/wavethresh
|
96f92574f59f62f77b9b5fe5c318e27011de585c
|
433dac8d2b5f3bf806530a29b5fe022fd2fe9087
|
refs/heads/master
| 2022-11-29T22:37:39.292801
| 2022-11-16T14:20:02
| 2022-11-16T14:20:02
| 17,700,852
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 587
|
rd
|
logabs.rd
|
\name{logabs}
\alias{logabs}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Take the logarithm of the squares of the argument
}
\description{
Take the log of the squares of the argument
}
\usage{
logabs(x)
}
\arguments{
\item{x}{A number
}
}
\details{
Description says all
}
\value{
Just the logarithm of the square of the argument
}
\author{
G P Nason
}
\seealso{\code{\link{image.wd}}, \code{\link{image.wst}}
}
\examples{
logabs(3)
# [1] 1.098612
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{math}
|
425386ab1803598e8b7ae20e683ff65922cbebc2
|
8b4ada2a81d8f1ece2355197d37001d87b55f12c
|
/man/kumexp.Rd
|
939a0b70c72eec98923a8a4de4418f5491febb94
|
[] |
no_license
|
cran/VaRES
|
f7685d92072b9646c47a62ed554dde0222e2ada9
|
1a17f2389450e7f67319a9f62e21cbb03bcb5d24
|
refs/heads/master
| 2023-05-06T00:09:29.334906
| 2023-04-21T23:42:37
| 2023-04-21T23:42:37
| 17,694,053
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,529
|
rd
|
kumexp.Rd
|
\name{kumexp}
\alias{dkumexp}
\alias{pkumexp}
\alias{varkumexp}
\alias{eskumexp}
\title{Kumaraswamy exponential distribution}
\description{Computes the pdf, cdf, value at risk and expected shortfall for the Kumaraswamy exponential distribution due to Cordeiro and de Castro (2011) given by
\deqn{\begin{array}{ll}
&\displaystyle
f (x) = a b \lambda \exp (-\lambda x) \left[ 1 - \exp (-\lambda x) \right]^{a - 1}
\left\{ 1 - \left[ 1 - \exp (-\lambda x) \right]^a \right\}^{b - 1},
\\
&\displaystyle
F (x) = 1 - \left\{ 1 - \left[ 1 - \exp (-\lambda x) \right]^a \right\}^b,
\\
&\displaystyle
{\rm VaR}_p (X) = -\frac {1}{\lambda} \log \left\{ 1 - \left[ 1 - (1 - p)^{1 / b} \right]^{1 / a} \right\},
\\
&\displaystyle
{\rm ES}_p (X) = -\frac {1}{p \lambda} \int_0^p \log \left\{ 1 - \left[ 1 - (1 - v)^{1 / b} \right]^{1 / a} \right\} dv
\end{array}}
for \eqn{x > 0}, \eqn{0 < p < 1}, \eqn{a > 0}, the first shape parameter, \eqn{b > 0}, the second shape parameter, and \eqn{\lambda > 0}, the scale parameter.}
\usage{
dkumexp(x, lambda=1, a=1, b=1, log=FALSE)
pkumexp(x, lambda=1, a=1, b=1, log.p=FALSE, lower.tail=TRUE)
varkumexp(p, lambda=1, a=1, b=1, log.p=FALSE, lower.tail=TRUE)
eskumexp(p, lambda=1, a=1, b=1)
}
\arguments{
\item{x}{scaler or vector of values at which the pdf or cdf needs to be computed}
\item{p}{scaler or vector of values at which the value at risk or expected shortfall needs to be computed}
\item{lambda}{the value of the scale parameter, must be positive, the default is 1}
\item{a}{the value of the first shape parameter, must be positive, the default is 1}
\item{b}{the value of the second shape parameter, must be positive, the default is 1}
\item{log}{if TRUE then log(pdf) are returned}
\item{log.p}{if TRUE then log(cdf) are returned and quantiles are computed for exp(p)}
\item{lower.tail}{if FALSE then 1-cdf are returned and quantiles are computed for 1-p}
}
\value{An object of the same length as \code{x}, giving the pdf or cdf values computed at \code{x} or an object of the same length as \code{p}, giving the values at risk or expected shortfall computed at \code{p}.}
\references{Stephen Chan, Saralees Nadarajah & Emmanuel Afuecheta (2016). An R Package for Value at Risk and Expected Shortfall, Communications in Statistics - Simulation and Computation, 45:9, 3416-3434, \doi{10.1080/03610918.2014.944658}}
\author{Saralees Nadarajah}
\examples{x=runif(10,min=0,max=1)
dkumexp(x)
pkumexp(x)
varkumexp(x)
eskumexp(x)}
|
6f73928f3ddf97afef37ba1d75c50e66961d67fb
|
90b1d0ac1ce238ebb70998aec1d31adfa2eed463
|
/plot1.R
|
00e2a30867a4edb70bbcef11f67174736ce7fe1f
|
[] |
no_license
|
dlmaas/ExData_Plotting1
|
c827aebcf49479a5b634fc0d9919b3c352fc2280
|
d736b4a72b1ab869ab5dd4aa2d6e62e565e7be68
|
refs/heads/master
| 2021-01-18T00:22:32.028137
| 2016-07-17T20:29:28
| 2016-07-17T20:29:28
| 63,515,152
| 0
| 0
| null | 2016-07-17T04:45:06
| 2016-07-17T04:45:05
| null |
UTF-8
|
R
| false
| false
| 1,440
|
r
|
plot1.R
|
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",destfile="dataset.zip")
#
# The zip file will be downloaded and store in the working directory of the user.
# The file is then Unziped and stored in the working directory of the user.
#
unzip(zipfile="dataset.zip")
#
# Read in entire data set from text file.
#
full_file<- file("household_power_consumption.txt")
#
# Create a subset of the file based on the specified date intervals
#
sub_set_full_file <- read.table(text = grep("^[1,2]/2/2007", readLines(full_file), value = TRUE), col.names = c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), sep = ";", header = TRUE)
#
# Open the graphics file device to create the required png.
#
png(filename = "plot1.png",
width = 480, height = 480, units = "px", pointsize = 12,
bg = "white", res = NA, family = "", restoreConsole = TRUE,
type = c("windows", "cairo", "cairo-png"))
#
# Plot 1 is generated using the hist()
#
hist(sub_set_full_file$Global_active_power, col = "red", main = paste("Global Active Power"), xlab = "Global Active Power (kilowatts)")
#
# The graphics device is closed with the dev.off() function
# The output from dev.off() is stored in a garbage file to pevent the screen error message "Null device 1"..
#
garbage <- dev.off()
|
fbcf9df21741461d51570a7f9c57daaa8d29122d
|
27a4b648ab2dfb3c4279e2c20f5acd37f933e6c9
|
/tests/testthat.R
|
35f808afdecc97dfbcfa01c65330ba423294e2e8
|
[
"MIT"
] |
permissive
|
Sung-Huan/PerseusR
|
2a8ece1a9c90bc5f3349a94dd3e06c82f8170f9f
|
bb36d046bc3925ca9dd0320ba66f2ab71c142448
|
refs/heads/master
| 2020-03-30T09:14:57.371581
| 2020-02-20T10:56:16
| 2020-02-20T10:56:16
| 151,067,743
| 0
| 0
|
NOASSERTION
| 2018-10-01T09:56:50
| 2018-10-01T09:56:50
| null |
UTF-8
|
R
| false
| false
| 64
|
r
|
testthat.R
|
library(testthat)
library(PerseusR)
test_check("PerseusR")
|
9f77a7d82d6e3450b683ba2e9e536e1fcabb64fb
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/pkgmaker/R/CLI.R
|
49eaac200e4a3fb8242c2d749d2e4ea321c7d42c
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,184
|
r
|
CLI.R
|
# Command Line Interface utils
#
# Author: Renaud Gaujoux
# Created: May 9, 2013
###############################################################################
.silenceF <- function(f, verbose=FALSE){
if( verbose ) f
else{
function(...){
capture.output(suppressPackageStartupMessages(suppressMessages(res <- f(...))));
res
}
}
}
qlibrary <- .silenceF(library, verbose = FALSE)
smessage <- function(..., indent = 0L, item = NULL, appendLF = FALSE){
if( is.null(item) ){ # choose item from indent
.item <- c('*', '*', '-', '-', '>', '>')
item <- .item[indent+1]
}
indent <- if( indent ) paste0(rep(' ', indent), collapse='') else ''
if( nzchar(item) ) item <- paste0(item, ' ')
message(indent, item, ..., appendLF = appendLF)
}
CLIfile <- function(full = FALSE){
pattern <- "--file=(.*)"
if( !length(f <- grep(pattern, commandArgs(), value = TRUE)) ) ''
else{
pf <- gsub(pattern, "\\1", f)
if( full ) pf
else basename(pf)
}
}
#' Enhanced Command Line Argument Parser
#'
#' Extends the capabilities of package \pkg{argparse}, e.g., in defining sub commands.
#'
#' @param prog program name
#' @param description program description
#' @param ... extra arguments passed to \code{\link[argparse]{ArgumentParser}}.
#' @param epilog epilog messages to display at the end of the man pages
#' @param show.defaults logical that indicates if default arugment values should
#' be displayed.
#'
#' @export
CLIArgumentParser <- function(prog = CLIfile(), description = '', ..., epilog = '', show.defaults = TRUE){
# load argparse
suppressMessages( library(argparse, quietly = TRUE) )
.flag_newlines <- function(x){
gsub("\n", "", x)
}
.special <- '__@@##@@__'
epilog <- paste0(.special, epilog)
p <- ArgumentParser(prog = prog, description = .flag_newlines(description), ..., epilog = .flag_newlines(epilog))
# change argument formatter if required
if( show.defaults ){
i <- grep("argparse\\.ArgumentParser", p$python_code)
inst <- p$python_code[i]
p$python_code[i] <- paste0(substr(inst, 1, nchar(inst)-1), ', formatter_class=argparse.ArgumentDefaultsHelpFormatter)')
}
p <- proto(p)
# add add_command function
p$command_loc <- .special
p$prog <- prog
p$exec <- if( nchar(exec_path <- CLIfile(full = TRUE)) ) normalizePath(CLIfile(full = TRUE)) else ''
p$command <- list()
p$command_idefault <- 0L
p$command_default <- function(.){
if( .$command_idefault ) names(.$command)[.$command_idefault]
else ''
}
# add a (sub-)command
p$add_command <- function(., command, help='', ..., default = FALSE){
# add command argument if necessary
if( !length(.$command) ){
.$.super$add_argument('command', help = paste0(.$prog, ' command to run'))
}
# store command
.$command[command] <- help
# store command as default
if( default ) .$command_idefault <- length(.$command)
}
#
p$add_argument <- function(., ..., help = ''){
.flag_newlines <- function(x){
gsub("\n", "", x)
}
help <- .flag_newlines(help)
.$.super$add_argument(..., help = help)
}
# overload print_usage
p$print_usage <- function(.){
.$.super$print_usage()
if( length(.$command) ){
cat("\n Use --help for listing all available commands\n")
}
}
#
# overload print_help to add command descriptions
p$print_help <- function(.){
# get formatted help
h <- paste(capture.output(.$.super$print_help()), collapse="\n")
# # fix new lines if necessary
# nl <- strsplit(h, "##NL##")[[1]]
# if( length(nl) > 1L ){
# indent <- nchar(gsub("^([ ]+).*", "\\1", tail(strsplit(nl[1], "\n")[[1L]], 1L)))
# i <- 2:length(nl)
# print(sprintf(paste0("%", indent, 's'), ''))
# nl[i] <- paste0(sprintf(paste0("%", indent, 's'), ''), nl[i])
# h <- paste0(nl, collapse="\n")
# }
cmds <- ''
if( length(.$command) ){
# format command help
lm <- max(nchar(names(.$command)))
fmt <- paste0(" %-", lm, "s")
cmds <- strwrap(.$command, indent = 4, exdent = 2 + lm + 4, width = 80, simplify = FALSE)
cmds <- sapply(cmds, paste, collapse = "\n")
cmds <- paste0(sprintf(fmt, names(.$command)), cmds)
cmds <- paste0('Commands:\n', paste(cmds, collapse = "\n"))
}
h <- gsub(.$command_loc, cmds, h, fixed = TRUE)
cat(h, sep="\n")
}
#
# add function call_string
p$call_string <- function(., args = commandArgs(TRUE)){
paste(.$prog, paste0(args, collapse = ' '))
}
e <- parent.frame()
p$locenvir <- parent.env(e)
# commmand parer
p$parse_cmd <- function(., ...){
# print(ls(.$locenvir))
pkgmaker::parseCMD(., ..., envir = .$locenvir)
}
p
}
# combine argument parsers
.combineParser <- function(p1, p2){
if( length(i <- grep("^parser\\.add_argument", p2$python_code)) ){
p1$.that$python_code <- c(p1$python_code, p2$python_code[i])
}
p1
}
.hasArgument <- function(ARGS){
function(x) length(ARGS[[x]]) && nzchar(ARGS[[x]])
}
logMessage <- function(..., appendLF = TRUE, extfile = NULL){
# output to external file as well
if( !is.null(extfile) ){
cat(..., if( appendLF ) "\n", sep ='', file = extfile, append = TRUE)
}
message(..., appendLF = appendLF)
}
#' \code{parseCMD} parse command line arguments for sub-commands,
#' and dispatch to the associated function.
#'
#' @param parser parser object as returned by \code{CLIArgumentParser}.
#' @param ARGS command line argument to parse, as a named list or a character string.
#' @param debug logical that indicate if debugging information should be printed.
#' @param envir environment that contains where the sub-command functions are looked for.
#'
#' @export
#' @rdname CLIArgumentParser
parseCMD <- function(parser, ARGS = commandArgs(TRUE), debug = FALSE, envir = parent.frame()){
if( isString(ARGS) == 1L ){ # used in dev/debugging
ARGS <- strsplit(ARGS, ' ')[[1]]
}
# fix quotes to avoid python JSON parsing error
ARGS <- gsub("'", "\"", ARGS)
library(pkgmaker, quietly = TRUE)
# define command line arguments
prog <- parser$prog
# check validity of command
# shows usage/help in trivial calls
if( !length(ARGS) ){
parser$print_usage()
return( invisible(parser) )
}else if( !grepl("^-", ARGS[1L]) ){ # first argument is the command
command <- ARGS[1L]
if( !command %in% names(parser$command) ){
stop("unknown ", prog," command '", command, "'\n"
, " Available commands: ", paste0(names(parser$command), collapse = ', ')
#, paste(capture.output(parser$print_usage()), collapse = "\n")
)
}
}else if( any(ARGS %in% c('-h', '--help')) ){
parser$print_help()
return( invisible(parser) )
}else{
# default command if any
if( nzchar(parser$command_default()) )
ARGS <- c(parser$command_default(), ARGS)
else{
stop("Missing command:\n "
, paste(capture.output(parser$print_usage()), collapse = "\n")
, "\n Available command(s): ", str_out(names(parser$command), Inf, quote=FALSE)
, call. = FALSE)
}
}
# get command-specific parser
command <- ARGS[1L]
cmd_funame <- paste0('CLI_', command)
if( !exists(cmd_funame, envir, inherits = TRUE) ){
# if( is.null(cmd_fun <- getFunction(cmd_funame, mustFind = FALSE)) ){
stop("Could not execute ", prog , " command ", command, ": did not find CLI entry point '", cmd_funame, "'")
}
cmd_fun <- get(cmd_funame, envir, inherits = TRUE)
cmd_parser <- cmd_fun(ARGS=NULL)
ARGS <- ARGS[-1L]
if( !length(ARGS) ){
# show command line
cmd_parser$print_usage()
invisible(cmd_parser)
}else if( any(ARGS %in% c('-h', '--help')) ){
cmd_parser$print_help()
return( invisible(cmd_parser) )
}else{
# parse command arguments
args <- cmd_parser$parse_args(ARGS)
# log call and parsed arguments
if( debug ){
message('Call: ', parser$call_string(ARGS))
message('Parsed arguments:')
str(args)
}
#
# call command handler
cmd_fun(ARGS = args)
}
}
#' Package Specific Command Line Interface
#'
#' @param package package name
#' @param altfile alternative file that defines the main CLI entry point.
#' That is a function named \code{CLI}, which takes the list of parsed command line
#' arguments as its first argument.
#' @param local logical that indicates if the main CLI function should be
#' defined and evaluated in a local environment, or in the user's Global
#' environment.
#' @param ARGS list of parsed arguments passed to the main CLI function.
#' @param ... extra arugments passed to the package's CLI function.
#'
#' @export
packageCLI <- function(package, altfile = NULL, local = TRUE, ARGS = commandArgs(TRUE), ...){
master_cli <- if( !is.null(package) ) system.file('scripts', 'CLI.R', package = package)
else if( is.null(altfile) ){
stop('Could not load CLI definition: argument `package` or `altfile` is required')
}
if( !length(master_cli) || !nzchar(master_cli) ){
master_cli <- altfile
}
# load CLI
source(master_cli, keep.source = TRUE, chdir = TRUE, local = local)
if( !exists('CLI', inherits = FALSE) ){
stop("Could not start command line interface for package '", package, "': main entry point function CLI() not found.")
}
CLI <- get('CLI', inherits = !local)
# run CLI
CLI(ARGS, ...)
}
|
a71b0cf1a449dc07c4efb814b19bdce3cc50943c
|
21290234281322dec6fdb3e7cf7d55629e65daea
|
/data/code-r/KD_TREE.R
|
0304421bedc71485702be0e4c9f59e6a802b0ca7
|
[] |
no_license
|
chasememeda/POI
|
b55aa1e7557ee0fc4494d7f15e48131707ce3760
|
cbfd234e779c347ac6591bb5cb85f430e51fbe24
|
refs/heads/master
| 2020-04-28T10:48:10.345790
| 2015-06-22T13:38:13
| 2015-06-22T13:38:13
| 37,855,983
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,520
|
r
|
KD_TREE.R
|
library(RODBC);
db <- odbcConnect("poi",uid="root",pwd="root");
#info <- odbcQuery(db,"select ID, NAME, X_COORD AS X, Y_COORD AS Y , CATEGORY AS CAT from poi_info");
cats <- sqlQuery(db,"select count(ID) as total, category from poi_info group by category ORDER BY total DESC");
#################################
#Functions
#################################
runTree<- function( cat ){
kdt <- sqlQuery(db,paste("select ID, X_COORD AS X, Y_COORD AS Y from poi_info WHERE CATEGORY = ",cat));
#initialize the tree
father <- 0;
#####################################
# kd-tree generation function
#####################################
runKdTree <- function(db,data,category,father){
if( dim(data)[1] == 1 ){
#leaf node
child <- as.character(data[1,1]);
tree<-append(tree,c(father,child));
sql<-paste("INSERT INTO poi_kdt(cat,father,child) VALUES(",cat,",'",father,"','",child,"');",sep="")
sqlQuery(db,sql);
#print(sql);
}else{
#data <-kdt;
varx <- var(data[,2]);
vary <- var(data[,3]);
cur <- 3;
#print(c(varx,vary));
#father <- 0;
if(varx > vary){
#start from x
cur <- 2;
}
vector <- data[,cur];
med <- median(x=vector);
if( length(vector)%%2 != 0 ){
node <- which(vector==med)[1]
}else{
node <- which.min(abs(vector-med))[1];
med <- vector[node];
}
child <- as.character(data[node,1]);
sql<-paste("INSERT INTO poi_kdt(cat,father,child,axis) VALUES(",cat,",'",father,"','",child,"',", (cur - 1),");",sep="")
sqlQuery(db,sql);
#print(sql);
data1 <- data[which(vector<med),]
data2 <- which(vector>=med);
#in case of duplicate median value, the others are assigned to the second child
data2 <- data2[-which(data2==node)];
data2 <- data[data2,]
if(!is.null(dim(data1))){
if(dim(data1)[1]>0){
#recursion
runKdTree(db,data1,category,child);
}
}
if(!is.null(dim(data2))){
if(dim(data2)[1]>0){
#recursion
runKdTree(db,data2,category,child);
}
}
}
}
odbcSetAutoCommit(db, autoCommit = FALSE)
runKdTree(db,kdt,cat,father);
odbcEndTran(db, commit = TRUE)
}
run<-function(cat){
print(paste("total",cat[1]))
runTree(cat[2]);
print(paste("end",cat[2]))
}
##################################
#Scripts
##################################
sqlQuery(db,"truncate poi_kdt;");
apply(cats,1,run)
|
dfe722dfeea1595f282741c04fb18901fd8ada51
|
5220d6bcfd84e4a5486253bad9b43ce9e7bd3d86
|
/R/RDFBonesQuery-package.r
|
21818cce6abc7e6bb063b20bcebc11082f600a9d
|
[] |
no_license
|
RDFBones/RDFBonesQuery
|
e545e841911f5757a96a6b15a9877bc69df34059
|
fd1dace72742ca558445064f08831e295556f23e
|
refs/heads/master
| 2021-01-22T19:25:52.978543
| 2017-03-28T13:32:47
| 2017-03-28T13:32:47
| 85,200,569
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,755
|
r
|
RDFBonesQuery-package.r
|
#' Provides predefined queries of RDFBones SPARQL endpoint
#'
#' Provides predefined queries of RDFBones SPARQL endpoint
#'
#' \tabular{ll}{
#' Package: \tab RDFBonesQuery\cr
#' Type: \tab Package\cr
#' Version: \tab 0.0.1\cr
#' Date: \tab 2017-03-16\cr
#' License: \tab GPL\cr
#' LazyLoad: \tab yes\cr
#' }
#'
#' @name RDFBonesQuery-package
#' @aliases RDFBonesQuery RDFBonesQuery-package
#' @docType package
#' @author Stefan Schlager \email{stefan.schlager@@uniklinik-freiburg.de}
#'
#' Maintainer: Stefan Schlager \email{stefan.schlager@@uniklinik-freiburg.de}
#' @note
#' The pdf-version of Morpho-help can be obtained from CRAN on \url{https://cran.r-project.org/package=Morpho}
#'
#' For more advanced operations on triangular surface meshes, check out my package Rvcg: \url{https://cran.r-project.org/package=Rvcg} or the code repository on github \url{https://github.com/zarquon42b/Rvcg}
#'
#'
#'
#'
#' @references Schlager S. 2013. Soft-tissue reconstruction of the human nose:
#' population differences and sexual dimorphism. PhD thesis,
#' \enc{Universitätsbibliothek}{Universitaetsbibliothek} Freiburg. URL:
#' \url{http://www.freidok.uni-freiburg.de/volltexte/9181/}.
#' @encoding utf8
#' @keywords package
#' @import SPARQL graphics grDevices methods stats utils XML RCurl bitops
NULL
#' Landmarks and a triangular mesh
#'
#' Landmarks on the osseous human nose and a triangular mesh representing this
#' structure.
#'
#'
#' @name RDFBonesVariables
#' @aliases RDFBonesPrefix skull_0144_ch_fe.mesh
#' @docType data
#' @format \code{RDFBonesPrefix}: character vector containing namespace prefix definitions.
#'
#' \code{RDFBonesPrefixString}: character specifying all NS-Prefixes for SPARQL queries.
#' @keywords datasets
NULL
|
887199d41aeaa6feb5ae6ce93dd8179c426d2c49
|
e796addadd71d250546a9a592d119b52efb5f447
|
/man/combine_terms.Rd
|
ca59f33b1717c3ee3eba74efed28b31a3dd6f39c
|
[] |
no_license
|
DanielReedOcean/ACCASPR
|
cab6be70a0167911170e9c1b2860844567aa90ff
|
56267e22a3348645af831e445c79f8653cbb4062
|
refs/heads/master
| 2021-04-30T12:51:39.013388
| 2019-08-20T14:26:29
| 2019-08-20T14:26:29
| 121,279,940
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 327
|
rd
|
combine_terms.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_models.R
\name{combine_terms}
\alias{combine_terms}
\title{Create right-hand side of GAM formula by combining terms}
\usage{
combine_terms(vars, kval)
}
\description{
Create right-hand side of GAM formula by combining terms
}
\keyword{internal}
|
fb49ede16e7b1ca64706a7763a7ac229adbadc9d
|
b59d136b1fdb4d3adf4f8984bece7b746d1d36cb
|
/cachematrix.R
|
893757b0f7560bd2edea5d3aeacf9123f5d4ac37
|
[] |
no_license
|
acmars/ProgrammingAssignment2
|
c2b80a0450c9a400eca8280bb84c5ca4d3947505
|
d75cffb5d65252ddb25bbc4b936c2c3d86d24254
|
refs/heads/master
| 2021-01-09T07:36:25.982104
| 2014-09-18T00:58:26
| 2014-09-18T00:58:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,327
|
r
|
cachematrix.R
|
##---cacheMatrix.R---
## This script contains two functions: makeCacheMatrix and cacheSolve
## makeCacheMatrix is designed to create a matrix that can cache the
## inverse of a matrix, using the solve function.
## cacheSolve is designed to compute the inverse of the matrix created
## in the makeCacheMatrix function, using the cached solution if it
## had already been calculated by the previous function to save time.
## makeCacheMatrix --- A function that creates a matrix to cache the inverse of
## that matrix.
makeCacheMatrix <- function(x = matrix()) {
m=NULL
set=function(y){
x<<-y
m<<-NULL
}
get=function() x
setmatrix=function(solve) m<<-solve
getmatrix=function()m
list(set=set,get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## cacheSolve --- A function to return the inverse of a matrix, either by
## calculating it, or returning the previously cached inverse.
cacheSolve <- function(x, ...) {
m=x$getmatrix()
if(!is.null(m)){
message("getting cached data")
return(m)
}
## Return a matrix that is the inverse of 'x'
data=x$get()
m=solve(data,...)
x$setmatrix()
}
|
5b0b575bc5bdcf096e7b24b68372e3baced173df
|
d5331ec752b979e7c0b7edb81536875f6400a97f
|
/inst/shiny/server.R
|
863551146b8401bc973716deb7cfc4cc697cd6e8
|
[] |
no_license
|
AuHau/ISRaD
|
81ef5005951ad1dfb0bd72d34d2aea0ebd257f9b
|
4e69adfdfadd521d932898a0b01f436bd2328e8e
|
refs/heads/master
| 2020-04-18T01:23:27.646220
| 2019-04-04T16:25:29
| 2019-04-04T16:25:29
| 167,116,873
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,053
|
r
|
server.R
|
library(ggplot2)
library(dplyr)
shinyServer(function(input, output, session) {
options(shiny.maxRequestSize=30*1024^2)
observe({
x <- input$presets
if (x==1) {
updateSelectInput(session, "y_var", selected = "lyr_bot")
updateSelectInput(session, "x_var", selected = "lyr_bd_tot")
updateSelectInput(session, "size_var", selected = "NULL")
updateSelectInput(session, "col_var", selected = "pro_MAT")
updateSelectInput(session, "col_facet_var", selected = "pro_MAP")
updateSelectInput(session, "row_facet_var", selected = "pro_MAT")
}
if (x==2) {
updateSelectInput(session, "y_var", selected = "frc_c_tot")
updateSelectInput(session, "x_var", selected = "frc_14c")
updateSelectInput(session, "size_var", selected = "NULL")
updateSelectInput(session, "col_var", selected = "frc_property")
updateSelectInput(session, "col_facet_var", selected = "NULL")
updateSelectInput(session, "row_facet_var", selected = "NULL")
}
})
soilcarbon_database<-ISRaD_data
soilcarbon_database <- lapply(soilcarbon_database, function(x) x %>% mutate_all(as.character))
soilcarbon_database<-soilcarbon_database %>%
Reduce(function(dtf1,dtf2) full_join(dtf1,dtf2), .)
soilcarbon_database[]<-lapply(soilcarbon_database, type.convert)
output$plot <- renderPlot({
variables<-list(
y_var=input$y_var,
x_var=input$x_var,
size_var=input$size_var,
col_var=input$col_var,
col_facet_var=input$col_facet_var,
row_facet_var=input$row_facet_var
)
variables[which(variables=="NULL")]<-NULL
# uncomment below for troubleshooting
# variables<-list(y_var="lyr_top",
# x_var="lyr_bot",
# size_var="NULL",
# col_var="NULL",
# col_facet_var="NULL",
# row_facet_var="NULL")
# variables[which(variables=="NULL")]<-NULL
plot_data<-na.omit(data.frame(soilcarbon_database[,unlist(variables)]))
colnames(plot_data)<-unique(variables)
plot_data$facet_cut<-""
plot_data$facet_cut2<-""
col_facet_thresh<-NA
row_facet_thresh<-NA
if ((input$col_facet_thresh != "" & !is.null(variables$col_facet_var))){
col_facet_thresh<-as.numeric(input$col_facet_thresh)
plot_data$facet_cut<-plot_data[,variables$col_facet_var] < col_facet_thresh
plot_data$facet_cut[which(plot_data$facet_cut)]<-paste(variables$col_facet_var, " < ", col_facet_thresh)
plot_data$facet_cut[which(plot_data$facet_cut==F)]<-paste(variables$col_facet_var, " > ", col_facet_thresh)
}
if ((input$row_facet_thresh != "" & !is.null(variables$row_facet_var))){
row_facet_thresh<-as.numeric(input$row_facet_thresh)
plot_data$facet_cut2<-plot_data[,variables$row_facet_var] < row_facet_thresh
plot_data$facet_cut2[which(plot_data$facet_cut2)]<-paste(variables$row_facet_var, " < ", row_facet_thresh)
plot_data$facet_cut2[which(plot_data$facet_cut2==F)]<-paste(variables$row_facet_var, " > ", row_facet_thresh)
}
if(is.null(variables$y_var)){
p<-ggplot(plot_data, aes_string(x=variables$x_var))+
geom_histogram(bins = 30)+
facet_grid(facet_cut2~facet_cut)+
theme_light(base_size = 16)+
theme(strip.background = element_blank())+
theme(strip.text = element_text(colour = 'black'))
p
}else{
p<-ggplot(plot_data, aes_string(x=variables$x_var, y=variables$y_var, col=variables$col_var))+
facet_grid(facet_cut2~facet_cut)+
theme_light(base_size = 16)+
theme(strip.background = element_blank())+
theme(strip.text = element_text(colour = 'black'))
if(variables$y_var=="layer_top" | variables$y_var=="layer_bot"){
p<-p+scale_y_reverse()
}
if(is.null(variables$size)){
p<-p+geom_point(alpha=input$alpha, size=2)
}else
p<-p+geom_point(alpha=input$alpha, aes_string(size=variables$size_var))+ scale_size_continuous(range = c(1, 10))
}
print(p)
})
})
|
d7a73e2c8b724252e6b3cd980013fc4c24c12a43
|
430fbb7da6ed247c9f0d43e18301608344c37800
|
/man/temperature_curve.Rd
|
7539ea2482e679bbbf969bc42d669737dc09bcc7
|
[
"MIT"
] |
permissive
|
nielsjdewinter/ShellChron
|
5158a6b4a8325c5d9bd828a35d9329465eaea0bf
|
ac2ecdddd0cd1616e60b3711f6b2cbca56abc411
|
refs/heads/main
| 2023-04-10T10:00:53.741679
| 2022-08-18T13:57:00
| 2022-08-18T13:57:00
| 303,992,660
| 3
| 2
|
MIT
| 2022-08-15T07:37:48
| 2020-10-14T11:22:55
|
R
|
UTF-8
|
R
| false
| true
| 1,218
|
rd
|
temperature_curve.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/temperature_curve.r
\name{temperature_curve}
\alias{temperature_curve}
\title{Function that creates a sinusoidal Sea Surface Temperature (SST) curve
from a list of parameters}
\usage{
temperature_curve(T_par, years = 1, t_int = 1)
}
\arguments{
\item{T_par}{List of four parameters describing (in order) amplitude
(\code{T_amp}; in degrees C), period (\code{T_per}; in days), phase
(\code{T_pha} in day of the year) and average temperature (\code{T_av};
in degrees C)}
\item{years}{Length of the preferred sinusoid in number of years (defaults
to 1)}
\item{t_int}{Time interval of sinusoidal record (in days)}
}
\value{
A matrix containing columns for time (in days) and SST (in degrees C)
}
\description{
Takes the specified parameters for amplitude, period, phase and average value
as well as the number of years specified and the time interval. It then
creates a sinusoid based on the boundary conditions. Used as intermediate
step during iterative modeling.
}
\examples{
# Set parameters
T_amp <- 20
T_per <- 365
T_pha <- 150
T_av <- 15
T_par <- c(T_amp, T_per, T_pha, T_av)
SST <- temperature_curve(T_par, 1, 1) # Run the function
}
|
1138a7b28b99278666002a6808c6c1a648394c2f
|
04479a0945363db519854f8aeb02d864690fc47a
|
/R/load_data_ss.R
|
5ebb6c29223afe970c58add28d5bba2ed22db13e
|
[] |
no_license
|
nissandjac/PacifichakeMSE
|
f398521fd096f0e55b5634ebfe08232b8d9839ef
|
cf967413fa052c1a843bfaf6f1fc1542afd994ce
|
refs/heads/master
| 2022-03-08T14:45:01.953621
| 2022-03-01T09:21:06
| 2022-03-01T09:21:06
| 124,948,437
| 6
| 8
| null | 2020-05-01T06:34:23
| 2018-03-12T20:37:25
|
HTML
|
UTF-8
|
R
| false
| false
| 7,037
|
r
|
load_data_ss.R
|
## Load the hake data
# year and age input
#' Set up a TMB data list from a SS object
#'
#' @param mod # SS3 object
#' @param sum_zero # Force the recruitment deviations to sum to zero?
#'
#' @return Returns a list for TMB
#' @export
#'
#' @examples
load_data_ss <- function(mod,
sum_zero = 0){
#' @mod SS3 file
#' @sum_zero flag on whether 'main' recruitment deviations should sum to zero
years <- mod$startyr:mod$endyr
tEnd <- length(years)
age <- 0:mod$accuage
F0 <- mod$catch$F[mod$catch$Yr >=years[1] & mod$catch$Yr <(max(years)+1)]
nage <- length(age)
msel <- rep(1,nage)
# Maturity
mat <- as.numeric(mod$ageselex[mod$ageselex$Factor == 'Fecund' & mod$ageselex$Yr == 1963,paste(age)])
# weight at age
wage_ss <- mod$wtatage
wage_ssb <- wage_ss[wage_ss$Fleet == -2,paste(age)]
wage_catch <- wage_ss[wage_ss$Fleet == 1 & wage_ss$Yr > (years[1]-1) & wage_ss$Yr < years[tEnd]+1, paste(age)]
wage_survey <- wage_ss[wage_ss$Fleet == 2 & wage_ss$Yr > (years[1]-1) & wage_ss$Yr < years[tEnd]+1, paste(age)]
wage_mid <- wage_ss[wage_ss$Fleet == -1 & wage_ss$Yr > (years[1]-1) & wage_ss$Yr < years[tEnd]+1, paste(age)]
catch <- mod$catch$ret_bio[mod$catch$Yr> (years[1]-1) & (mod$catch$Yr < years[tEnd]+1)]
# Survey abundance
survey <- rep(1, tEnd)
sidx <- mod$cpue$Yr[mod$cpue$Use == 1]
survey[which(years %in% sidx)] <- mod$cpue$Obs[mod$cpue$Use == 1]
ss.error <- rep(1,tEnd)
ss.error[which(years %in% sidx)] <- (1+mod$cpue$SE[mod$cpue$Use == 1])-mod$cpue$SE[2] # Dunno why this calc is necessary
# Age comps
survey.ss <- mod$agedbase[mod$agedbase$Fleet == 2,]
age_survey_ss <- data.frame(year = survey.ss$Yr, age = survey.ss$Bin, obs = survey.ss$Obs,
N = survey.ss$N)
age_survey.tmp <- matrix(-1,length(mod$agebins), tEnd)
syears <- unique(survey.ss$Yr)
survey_x <- rep(-2, tEnd) #
survey_flag <- rep(-1, tEnd)
ss.survey <- rep(0, tEnd)
for(i in 1:tEnd){
if(years[i] %in% syears){
tmp <- age_survey_ss[age_survey_ss$year == years[i],]
age_survey.tmp[,i] <- tmp$obs
ss.survey[i] <- mean(tmp$N)
survey_x[i]<- 2
survey_flag[i] <- 1
}else{
age_survey.tmp[,i] <- -1
}
}
###### Order the catch age comps
catch.ss <- mod$agedbase[mod$agedbase$Fleet == 1,]
age_catch_ss <- data.frame(year = catch.ss$Yr, age = catch.ss$Bin, obs = catch.ss$Obs,
N = catch.ss$N)
age_catch.tmp <- matrix(-1,length(mod$agebins), tEnd)
cyears <- unique(catch.ss$Yr)
cflag <- rep(-1, tEnd)
ss.catch <- rep(0, tEnd)
for(i in 1:tEnd){
if(years[i] %in% cyears){
tmp <- age_catch_ss[age_catch_ss$year == years[i],]
age_catch.tmp[,i] <- tmp$obs
ss.catch[i] <- mean(tmp$N)
cflag[i] <- 1
}else{
age_catch.tmp[,i] <- -1
}
}
b <- matrix(NA, tEnd)
# Parameters
yb_1 <- as.numeric(mod$breakpoints_for_bias_adjustment_ramp[1]) #_last_early_yr_nobias_adj_in_MPD
yb_2 <- as.numeric(mod$breakpoints_for_bias_adjustment_ramp[2]) #_first_yr_fullbias_adj_in_MPD
yb_3 <- as.numeric(mod$breakpoints_for_bias_adjustment_ramp[3]) #_last_yr_fullbias_adj_in_MPD
yb_4 <- as.numeric(mod$breakpoints_for_bias_adjustment_ramp[4]) #_first_recent_yr_nobias_adj_in_MPD
b_max <- as.numeric(mod$breakpoints_for_bias_adjustment_ramp[5]) #_max_bias_adj_in_MPD
#b[1] <- 0
for(j in 1:length(years)){
if (years[j] <= yb_1){
b[j] = 0}
if(years[j] > yb_1 & years[j]< yb_2){
b[j] = b_max*((years[j]-yb_1)/(yb_2-yb_1));
}
if(years[j] >= yb_2 & years[j] <= yb_3){
b[j] = b_max}
if(years[j] > yb_3 & years[j] < yb_4){
b[j] = b_max*(1-(yb_3-years[j])/(yb_4-yb_3))
}
if(years[j] >= yb_4){
b[j] = 0
}
}
### h prior distribution ###
hmin <- 0.2
hmax <- 1
hprior <- 0.777
hsd <- 0.117
mu <- (hprior-hmin)/(hmax-hmin)
tau <- ((hprior-hmin)*(hmax-hprior))/hsd^2-1
Bprior= tau*mu
Aprior = tau*(1-mu)
Pconst <- 1e-6
hrange <- seq(0.2,1, length.out = 100)
Prior_Like = (1.0-Bprior)*log(Pconst+hrange-hmin) +
(1.0-Aprior)*log(Pconst+hmax-hrange)-
(1.0-Bprior)*log(Pconst+hprior-hmin) -
(1.0-Aprior)*log(Pconst+hmax-hprior)
### selyear
sel.tmp <- mod$SelAgeAdj$Yr[mod$SelAgeAdj$`Change?` == 1 & mod$SelAgeAdj$Yr>years[1]][1]
flag_sel <- rep(0,length(years))
flag_sel[years %in% unique(mod$SelAgeAdj$Yr[mod$SelAgeAdj$`Change?` == 1 & mod$SelAgeAdj$Yr>years[1]])] <- 1
df <-list( #### Parameters #####
wage_ssb = t(wage_ssb),
wage_catch = t(wage_catch),
wage_survey = t(wage_survey),
wage_mid = t(wage_mid),
# Input parameters
Msel = msel,
Matsel= mat,
nage = nage,
age = age,
year_sel = length(sel.tmp:years[length(years)]), # Years to model time varying sel
selYear = which(sel.tmp == years),
flag_sel = flag_sel,
tEnd = length(years), # The extra year is to initialize
logQ = mod$parameters$Value[mod$parameters$Label == "LnQ_base_Acoustic_Survey(2)"], # Analytical solution
# Selectivity
Smin = 1,
Smin_survey = 2,
Smax = 6,
Smax_survey = 6,
# survey
survey = survey, # Make sure the survey has the same length as the catch time series
survey_err = ss.error, # Make sure the survey has the same length as the catch time series
survey_x = survey_x, # Is there a survey in that year?
ss_survey = ss.survey, # sample sizes of ages
flag_survey =survey_flag, # Are there age comps that year
age_survey = age_survey.tmp,
age_maxage = max(mod$agebins), # Max age for age comps
# Catch
Catchobs = catch, # Convert to kg
ss_catch = ss.catch, # Age comp sample sizes
flag_catch =cflag, # Are the age comps from the catch?
age_catch = age_catch.tmp,
# variance parameters
logSDcatch = log(mod$catch$se[1]),
logSDR = log(mod$parameters$Value[mod$parameters$Label == 'SR_sigmaR']), # Fixed in stock assessment ,
F0 = F0,
#logphi_survey = log(0.91),
sigma_psel = mod$parameters$Value[mod$parameters$Label == "AgeSel_P3_Fishery(1)_dev_se"],
smul = 0.5,
sum_zero = sum_zero,
years = years,
logphi_survey = mod$parameters$Value[mod$parameters$Label == "ln(EffN_mult)_2"],
Bprior= tau*mu,
Aprior = tau*(1-mu),
b = b#,
# ageerr = as.matrix(age_err[,2:22])
)
return(df)
}
|
8e272197e9dce77f880fb7ebdb900de2f7cceea6
|
057cb37817ffeec47fffecdabb59fc1f060884e8
|
/old/experiment_real_data_missing_data_1/import.data.R
|
a22a2c41a46b16815040d37e60aff0852a09666f
|
[] |
no_license
|
BIMIB-DISCo/MST
|
7e1a89e430ed0c16f42a9caecafb0e1f426189fc
|
d1db82d70c6df9c19ab86153e3b7e696d1f7588b
|
refs/heads/master
| 2021-01-13T06:56:36.494870
| 2018-12-09T22:22:08
| 2018-12-09T22:22:08
| 54,653,989
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,527
|
r
|
import.data.R
|
# load the required R packages
library(mi)
library(TRONCO)
# set the seed
set.seed(12345)
# structure to save all the results
data_paper1 = list()
# read the data and format them
data_1 = read.table(file=paste0(getwd(),"/alterations.txt"),header=TRUE,sep="\t",check.names=FALSE,stringsAsFactors=FALSE)
rownames(data_1) = data_1[,1]
data_1 = data_1[,-1]
for (i in 1:nrow(data_1)) {
for(j in 1:ncol(data_1)) {
if(data_1[i,j]=="-") {
data_1[i,j] = NA
}
else if(data_1[i,j]==2) {
data_1[i,j] = 1
}
}
}
origina.data.paper1 = data_1
# impute the missing data 100 times
dataset_imputed = mi(missing_data.frame(origina.data.paper1))
dataset_imputed = complete(dataset_imputed,m=100)
# save the imputed datasets
data_imputations = list()
dataset.missing.data = matrix(list(), 1, 100)
colnames(dataset.missing.data) = paste("Experiment", 1:length(dataset_imputed))
rownames(dataset.missing.data) = "58"
for(i in 1:length(dataset_imputed)) {
curr_imputation = dataset_imputed[[i]]
curr_imputation = as.matrix(curr_imputation[,1:ncol(origina.data.paper1)])
curr_imputation = matrix(mapply(as.numeric,curr_imputation),nrow=nrow(origina.data.paper1),ncol=ncol(origina.data.paper1))
colnames(curr_imputation) = colnames(origina.data.paper1)
rownames(curr_imputation) = rownames(origina.data.paper1)
exp = list()
exp$dataset = curr_imputation
exp$epos = 0
exp$eneg = 0
exp = list("1" = exp)
dataset.missing.data[[1,i]] = exp
}
save(dataset.missing.data, file = "RData/dataset.missing.data.RData")
save(origina.data.paper1, file = "RData/original.data.paper1.RData")
# create scite dataset
scite = matrix(0, ncol = ncol(origina.data.paper1), nrow = nrow(origina.data.paper1))
colnames(scite) = colnames(origina.data.paper1)
rownames(scite) = rownames(origina.data.paper1)
for (i in 1:nrow(origina.data.paper1)) {
for (j in 1:ncol(origina.data.paper1)) {
if (is.na(origina.data.paper1[[i,j]])) {
scite[[i,j]] = 3
} else if (origina.data.paper1[[i,j]] == 1) {
scite[[i,j]] = 1
}
}
}
save(scite, file='RData/scite.RData')
scite.dataset = NULL
scite.dataset$dataset = scite
scite.dataset$epos = 0
scite.dataset$eneg = 0
scite.input = matrix(list(), ncol = 1, nrow = 1)
colnames(scite.input) = 'Experiment 1'
rownames(scite.input) = '58'
scite.input[[1,1]] = list(scite.dataset)
source('../generate.scite.input.R')
create.scite.input(scite.input, 'single', 'missing', 0)
|
a9c80a3d8e486e63640113a50f530a73909193d0
|
867426f0d9725bbcc73a41f22695c920df36bdc7
|
/error_calculator.R
|
64cf5be7a082a4816e4ebc3f02244cbb0d407f26
|
[] |
no_license
|
kasungayan/favorita-grocery-sales-forecasting
|
e3626ceef3d3739aea8af8b02f5d08badc78a566
|
1ba4a2419aaed65a67dfb07904a9030af36787ce
|
refs/heads/master
| 2023-04-17T11:48:59.383550
| 2021-05-03T14:45:45
| 2021-05-03T14:45:45
| 275,045,527
| 0
| 0
| null | 2021-05-03T14:44:41
| 2020-06-26T00:58:26
|
R
|
UTF-8
|
R
| false
| false
| 1,849
|
r
|
error_calculator.R
|
# Favorita-grocery-sales-forecasting.
# Eror calculation script.
# Kasun Bandara, June 2020
# Define the generated forecasts and actual values file name.
generated_forecast <- "GENERATED_FORECAST_FILE_NAME"
actual_forecast <- "ACTUA_FILE_NAME"
df_forecast <-
read.csv(generated_forecast, sep = ",", header = FALSE)
df_forecast <- df_forecast[1:528, ]
# Define the actual test file name.
df_actual <- read.csv(actual_forecast, sep = ",", header = FALSE)
# Get the perishable information.
df_perishable <-
read.csv("Sales_test_final.txt", sep = ",", header = TRUE)
actual_df <- matrix(NA, ncol = 28, nrow = 528)
forecast_df <- matrix(NA, ncol = 28, nrow = 528)
weightError = list()
weights = list()
for (idr in 1:528) {
actual_predictions <- as.numeric(df_actual[idr,])
model_predictions <- as.numeric(df_forecast[idr,])
forecast_df[idr, ] <- model_predictions
actual_df[idr, ] <- actual_predictions
perishable <- as.numeric(df_perishable$perishable)[idr]
if (perishable == 1) {
weight <- 1.25
} else{
weight <- 1.00
}
error <-
log(model_predictions + 1, base = exp(1)) - log(actual_predictions + 1, base = exp(1))
weighted_error <- weight * (error ^ 2)
weightError[[idr]] <- sum(weighted_error)
weights[[idr]] <- sum(rep(weight, 28))
}
NWRMSLE_error <- sqrt(Reduce("+", weightError) / Reduce("+", weights))
# Printing NWRMSLE_error
print(NWRMSLE_error)
epsilon = 0.1
sum = NULL
comparator = data.frame(matrix((0.5 + epsilon),
nrow = nrow(df_actual),
ncol = ncol(df_actual)
))
sum = pmax(comparator, (abs(df_forecast) + abs(df_actual) + epsilon))
time_series_wise_SMAPE <- 2 * abs(df_forecast - df_actual) / (sum)
SMAPEPerSeries <- rowMeans(time_series_wise_SMAPE, na.rm = TRUE)
# Printing SMAPE_error
print(mean(SMAPEPerSeries))
|
d14965711e7eec4f25449bc52a0edc10563789c7
|
3a42630716521b58a20d5a9445fd3eb1007188aa
|
/man/color-interpolation-presentationAttribute.Rd
|
361d42ac018aa605b485d175b1402aaa3e74bbae
|
[
"MIT",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
mslegrand/svgR
|
2a8addde6b1348db34dee3e5145af976008bf8f0
|
e781c9c0929a0892e4bc6e23e7194fb252833e8c
|
refs/heads/master
| 2020-05-22T01:22:16.991851
| 2020-01-18T03:16:30
| 2020-01-18T03:16:30
| 28,827,655
| 10
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,812
|
rd
|
color-interpolation-presentationAttribute.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doc_PresAttrPages.R
\name{color-interpolation-presentationAttribute}
\alias{color-interpolation-presentationAttribute}
\title{color.interpolation}
\description{
Governs the optimization strategy for color-interpolation.
}
\section{Available Attribute Values}{
\describe{
\item{\emph{'\emph{'\emph{'\emph{'auto'}'}'}'}}{Specifies an auto selection of the color interpolation.}
\item{\emph{'\emph{'\emph{'\emph{'inherit'}'}'}'}}{Specifies to inherit the method of color interpolation from the parents.}
\item{\emph{'\emph{'\emph{'\emph{'linearRGB'}'}'}'}}{Specifies to use a linearized RGB color space for color interpolation}
\item{\emph{'\emph{'\emph{'\emph{'sRGB'}'}'}'}}{Specifies to use sRGB color space for color interpolation}
}
}
\section{Used by the Elements}{
\describe{
\item{\emph{Animation Elements}}{\code{\link[=animate]{animate}}, \code{\link[=animateColor]{animateColor}}}
\item{\emph{Graphics Referencing Elements}}{\code{\link[=image]{image}}, \code{\link[=use]{use}}}
\item{\emph{Non-structural Container Elements}}{\code{\link[=a]{a}}, \code{\link[=glyph]{glyph}}, \code{\link[=marker]{marker}}, \code{\link[=mask]{mask}}, \code{\link[=missing-glyph]{missing.glyph}}, \code{\link[=pattern]{pattern}}, \code{\link[=switch]{switch}}}
\item{\emph{Shape Elements}}{\code{\link[=circle]{circle}}, \code{\link[=ellipse]{ellipse}}, \code{\link[=line]{line}}, \code{\link[=path]{path}}, \code{\link[=polygon]{polygon}}, \code{\link[=polyline]{polyline}}, \code{\link[=rect]{rect}}}
\item{\emph{Structural Container Elements}}{\code{\link[=defs]{defs}}, \code{\link[=g]{g}}, \code{\link[=svg]{svg}}, \code{\link[=symbol]{symbol}}}
\item{\emph{Text Content Elements}}{\code{\link[=text]{text}}}
}
}
\keyword{internal}
|
b39b2804ff8f21cd2b7a8a85afa5a2cbaa6d2d2a
|
2cd54a4365c128d94c120a204aaccf68c3607b49
|
/tests/testthat/test-random_subsampling.R
|
14a05ed4d4d6e4a9446291eada3b61d0afd3f916
|
[
"MIT"
] |
permissive
|
tikunov/AlpsNMR
|
952a9e47a93cbdc22d7f11b4cb1640edd736a5c7
|
748d140d94f65b93cb67fd34753cc1ef9e450445
|
refs/heads/master
| 2021-01-13T17:35:29.827357
| 2020-02-23T02:35:30
| 2020-02-23T02:35:30
| 242,443,517
| 0
| 0
|
NOASSERTION
| 2020-02-23T02:27:15
| 2020-02-23T02:27:15
| null |
UTF-8
|
R
| false
| false
| 473
|
r
|
test-random_subsampling.R
|
context("test-random_subsampling")
test_that("random_subsampling", {
subject_id <- c("Alice", "Bob", "Alice", "Bob")
replicate <- c(1, 1, 2, 2)
rnd <- random_subsampling(1:4, iterations = 2, test_size = 0.25, keep_together = subject_id)
rnd2 <- random_subsampling(1:4, iterations = 2, test_size = 0.25, keep_together = NULL)
rnd3 <- random_subsampling(1:3, iterations = 2, test_size = 0.3)
expect_true(is.list(rnd))
expect_true(is.list(rnd2))
expect_true(is.list(rnd3))
})
|
09c80199809e3a26c5d9658689fe6c518bf8ff75
|
c972b8f8b15c9aafa0f0a2dd0e407c9ca08b8a54
|
/R/hello.R
|
ade3905a72b437b194f32464cbe0a1358d7bf896
|
[] |
no_license
|
nathania/dummy
|
4f2dbf0829986e5f6b951945cbc13ec78ee09595
|
f1c62b5049521eb6eee02be4231f200c0f116814
|
refs/heads/master
| 2021-01-01T05:14:47.194611
| 2016-04-20T15:18:54
| 2016-04-20T15:18:54
| 56,693,076
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 529
|
r
|
hello.R
|
# Hello, world!
#
# This is an example function named 'hello'
# which prints 'Hello, world!'.
#
# You can learn more about package authoring with RStudio at:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Cmd + Shift + B'
# Check Package: 'Cmd + Shift + E'
# Test Package: 'Cmd + Shift + T'
# Namaste
hello <- function() {
}
zzz
yyy
xxx
=======
#bla bla bla
# Ashmit added a line
# natty added this line
# Freddie Mercury
|
b27478683d4ade9c7a95283b9667f0a138d177ec
|
0d0cac5c14fad2728187b708c666c304ca249648
|
/Multi_ sets.r
|
c20a743af9b2ba179c02e0ccc283098e651e444c
|
[] |
no_license
|
bfuhs/R_disasters
|
feb6d69c2ece36b5cf058d784d35d01ae0026026
|
e71fc10d5f4318ba0c9e07a9ac949117c6179adc
|
refs/heads/master
| 2016-09-05T21:55:42.596117
| 2013-07-09T20:07:22
| 2013-07-09T20:07:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,008
|
r
|
Multi_ sets.r
|
##### Multi_sets.r
#
# Brendon Fuhs
# updated 2-11-13
#
# NON-DEFAULT PACKAGES REQUIRED
# ggplot2, (xtable?)
#
# FUNCTIONS
#
# makeStatsTable <- function(x, categories)
# # makes a table of descriptive stats for a single dataset split up by category
# makeFitsTable <- function(x, categories, analysis, modelNames=NULL)
# createDiffsByFactor <- (absolute times, categories)
# # Creates frequencies from times
#
##### OLD/UNFINISHED/DELETE
# plotDensitiesByFactor?
# powerFits?
# something to create multiple diffsets and durationsets?
#
#library(xtable)
library(ggplot2)
addCategory <- function(x, tableFun, categories1, categories2, ...){
dataTable <- NULL
for (category in categories2){
#dataTable <- cbind(dataTable, tableFun(x[categories2=category], categories1, ...))
dataTable <- cbind(dataTable, do.call(tableFun, as.list(c( x[categories2=category], categories1, as.list(...)))))
}
return (dataTable)
}
# Use xtable on this
### Should I include a "total" or "everything" column? I probably should.
makeStatsTable <- function(x, categories){
# maybe check that they're the same length
categories <- as.factor(categories)
factorStats <- function(category, x)
{
return (getStats(x[categories == category]))
}
statsTable <- sapply(levels(categories), factorStats, x=x)
colnames(statsTable) <- levels(categories)
#statsTable <- cbind(statsTable, "All data" = getStats(x))
return (statsTable)
}
# can use with hazardAnalysis
# analysis is a choice of function from Stats_stuff.r
#### 2-10 added "=hazardAnalysis"
makeFitsTable <- function(x, categories, analysis=hazardAnalysis, maxLength=30, modelNames=NULL){
## modelNames is unused
categories <- as.factor(categories)
newCategories <- categories
for (level in levels(categories)){ # Do I need to worry about NAs here?
if (length(newCategories[newCategories==level & !is.na(x)]) < maxLength){
x <- x[newCategories != level]
newCategories = factor(newCategories[newCategories!=level])
print(paste( level, " category omitted due to not enough values"))
}
}
categories <- newCategories
print(levels(categories))##################
print("HEY!") #####################
getFits <- function(category, x, analysis)
{
print(category) #######
print(length(x[categories == category & !is.na(x)] )) #######
print(class(x[categories == category])) #######
print(head(x[categories == category & !is.na(x)] )) #######
if (length(x[categories == category])==0){ #######
print("GOLLEE!") #######
return(NULL) ###########
} ######################## ALL THE NA'S ARE AT THE END???
return ( analysis(x[categories == category], as.character(category)) )
}
fitsInfoTable <- sapply(levels(categories), getFits, x=x, analysis=analysis)
colnames(fitsInfoTable) <- levels(categories)
#fitsInfoTable <- cbind(fitsInfoTable, "All data" = analysis(x, "all data"))
return (fitsInfoTable)
}
# createDiffsByFactor (absolute times, categories)
# * Does not have to be sorted by times
createDiffsByFactor <- function(times, categories){
categories <- as.factor(categories)
names(times) <- 1:length(times) # breaks if times is length 1 or less
sortVector <- diffs <- rep(NA, length(times))
for (level in levels(categories)){
if (length(categories==level) > 0 ) {
theseTimes <- sort(times[categories==level], na.last = TRUE)
diffs[categories==level] <- createDiffs(theseTimes)
sortVector[categories==level] <- names(theseTimes)
}
}
#print(length(sortVector))###################### (debugging)
#print(length(diffs))########################### yup these are the same
#print(sortVector[is.na(sortVector)])## SHOULD THRE BE NA's in sortVector?
diffs <- diffs[order(as.numeric(sortVector))] ###
return (diffs)
}
###########################
### OLD OLD OLD #######
####################
makeFitsTableOLD <- function(allDataVec, categories){
if (length(allDataVec)!=length(categories)){
print ("data and category vectors are not the same length")
return (NULL)
}
# create list/vector of subsets using factor
subsets <- split(allDataVec, factor(unlist(categories)))
# function to generate info I'm looking for
# Calls Stats_stuff.r
analyzeSubset <- function(subset){
if (length(subset[!is.na(subset)])<5){ #### I need to exclude 0's here too
return (NA)
}
subsetInfo <- hazardAnalysis(subset, "nullname") # I should as.character(subset)
modelFits <- subsetInfo$fitList
modelChisqStats <- subsetInfo$chiSquareList
pars <- list()
chis <- list()
for (dist in modelFits){
pars <- c(pars, list(dist$estimate))
}
for (chisqStat in modelChisqStats){
chis <- c(chis, chisqStat)
# chis <- c(chis, list(chisqStat$statistic)) # or maybe I should get p-value?
}
names(chis) <- names(pars) <- names(modelFits)
N <- length(subset)
statstcs <- getStats(subset)
return (list("N"=N, "parameters"=pars, "chiSquareStats"=chis, "stats"=statstcs ) )
}
# Create matrix of info by applying above function
subsetsInfo <- sapply(subsets, analyzeSubset)
names(subsetsInfo) <- names(subsets)
# Show the table
View(subsetsInfo) ###### This is the second worst way to do it
return (subsetsInfo)
}
plotDensitiesByFactor <- function(x, categories){
categories <- as.factor(categories)
##thisPlot <- ggplot()
##for (level in levels(categories)){
## thisPlot <- thisPlot + geom_density(x[categories==level])
##
##}
#df <- data.frame(cat=categories, dat=x)
#plt <- ggplot(df, aes(x=dat, fill=cat)) + geom_density(alpha=.3)
#plt <- plt + scale_y_log10(limits = c(1,20)) + scale_x_log10(limits = c(1,100))
#plt
for (level in levels(categories)){
plotEmpirical(x[categories==level], level)
}
}
powerFits <- function(x, categories){
for (level in levels(categories)){
fitPower(x[categories==level], level)
}
}
|
dc216326441530949ec72ab36da6a37873be4a45
|
dc5568c5988381e9d46ac9779e0ff7bb72630301
|
/tests/testthat.R
|
d85eb10df8106db6733c26ef504a8164d009dfcf
|
[] |
no_license
|
SoftFx/TTWebClient-R
|
f6a0f756df7ff139f03fb31f7fa94a3471402c17
|
a2538a19721828b8e439b1dca1611dcefde31633
|
refs/heads/master
| 2022-10-23T03:40:43.340643
| 2022-10-11T14:19:03
| 2022-10-11T14:19:03
| 35,541,118
| 0
| 1
| null | 2018-12-21T16:15:44
| 2015-05-13T09:50:34
|
HTML
|
UTF-8
|
R
| false
| false
| 68
|
r
|
testthat.R
|
library(testthat)
library(RTTWebClient)
test_check("RTTWebClient")
|
b726d49ab837d43c77572eef2ed22c47f7e29ed0
|
24e58764de7e1ecc1efdd5eee25800d63ab96d26
|
/GEO837_class_18_climate_plot_code_updated_20181031.R
|
bf6e885fd81f1e7afc37fe4e9698cab2c5078eda
|
[] |
no_license
|
dpoursanidis/GEO837
|
73df21d639b3ec94fcb96a964632eced9d0d7c0d
|
418b82887195dbaa22971b2aafb971b11287e0c8
|
refs/heads/master
| 2020-08-27T23:13:37.570992
| 2018-11-07T13:59:53
| 2018-11-07T13:59:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,141
|
r
|
GEO837_class_18_climate_plot_code_updated_20181031.R
|
#### GEO 837 - Remote Sensing of the Biosphere ####
#### Author: Kyla M. Dahlin, MSU Geography ########
#### making climate plots from worldclim data #####
# note - in RStudio go to 'Tools > Global Options' to change background color,
# font size, and 'soft-wrap R source files' (in 'Code')
# load packages to do these analyses
# note if this is new to you you'll need to first run
#install.packages(c("raster", "rgdal", "maptools"))
library(raster)
library(rgdal)
library(maptools)
# set your working directory
setwd("YOUR WORKING DIRECTORY")
# what day is it? use this to add to output file names to keep track of when you
# made something
today <- "20181029"
# because our data is stored elsewhere, let's also give that a name
# Kyla downloaded this data from worldclim.org, it is commonly used (though not
# perfect data for modern data climate)
# the reference for this data is Fick et al 2017, which is in the 'extra'
# readings section on D2L
data.path <- "WORLDCLIM2 DATA LOCATION"
# read in a file just to check it out
in.tavg <- raster(paste0(data.path, "wc2.0_10m_tavg/wc2.0_10m_tavg_06.tif"))
# take a look
plot(in.tavg)
# but what we really want is to get ALL of the data just for one location
# (the kml file of your path/row)
# note that this is assuming you have a kmz file (zipped kml) outlining the
# boundary of your study area, and it is located in your working directory
# first unzip the KMZ to a kml (this creates a file in your working directory,
# called doc.kml which I don't love...)
kml.unzipped <- unzip("carrizo_box_kml.kmz",
files = "doc.kml",
unzip = "internal")
# this is UPDATED!!! turns out the other function is old, this is better.
# everything else will still run the same, ish.
in.kml <- readOGR(kml.unzipped)
# look at the output, which is now a 'spatialPolygonsDataFrame'
in.kml
# convert to a geographic extent object - NOTE: for us we know we have a kml
# with a single polygon in it, so getting the extent is more or less the same
# as looking at a rectangle. If you read in a kml with multiple polygons, this
# would give you the outer extent of ALL Of them.
in.kml.ext <- extent(in.kml)
# look at it to see format
in.kml.ext
# you can plot it on your temperature map (if you still have it open), too
plot(in.kml.ext, add = TRUE)
# now extract TAVG for your location
TAVG.loc <- extract(in.tavg, in.kml.ext, method = 'simple', fun = mean,
na.rm = TRUE)
# did that give us a single, reasonable value?
TAVG.loc
# OK, but what we really want is tmin, tmean, tavg, and precip for each month
# so we need to do some more work!
# first we need a dataframe to put our outputs into
out.clim <- matrix(NA, nrow = 12, ncol = 5)
out.clim <- as.data.frame(out.clim)
# what does it look like?
out.clim
# let's give it some names
names(out.clim) <- c("month", "TMIN", "TAVG", "TMAX", "PRECIP")
# and let's assign number months to the first column
out.clim[,1] <- 1:12
# now what we want to do is open each raster file, extract the data for our kml
# polygon, and write it into the proper place in our table
# to do that we'll need to use a 'for-loop', but first we should get all of
# our filenames and paths sorted out
months <- c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11",
"12")
TMIN.files <- paste0("wc2.0_10m_tmin_", months, ".tif")
TAVG.files <- paste0("wc2.0_10m_tavg_", months, ".tif")
TMAX.files <- paste0("wc2.0_10m_tmax_", months, ".tif")
PRECIP.files <- paste0("wc2.0_10m_prec_", months, ".tif")
for (m in 1:12) {
in.tmin <- raster(paste0(data.path,
"wc2.0_10m_tmin/",
TMIN.files[m]))
out.clim$TMIN[m] <- extract(in.tmin,
in.kml.ext,
method = 'simple',
fun = mean,
na.rm = TRUE)
in.tavg <- raster(paste0(data.path,
"wc2.0_10m_tavg/",
TAVG.files[m]))
out.clim$TAVG[m] <- extract(in.tavg,
in.kml.ext,
method = 'simple',
fun = mean,
na.rm = TRUE)
in.tmax <- raster(paste0(data.path,
"wc2.0_10m_tmax/",
TMAX.files[m]))
out.clim$TMAX[m] <- extract(in.tmax,
in.kml.ext,
method = 'simple',
fun = mean,
na.rm = TRUE)
in.precip <- raster(paste0(data.path,
"wc2.0_10m_prec/",
PRECIP.files[m]))
out.clim$PRECIP[m] <- extract(in.precip,
in.kml.ext,
method = 'simple',
fun = mean,
na.rm = TRUE)
print(m)
}
# check and make sure you actually got values
out.clim
### NOW TO MAKE A PLOT! GRAYSCALE FIRST
# first get max and min values for temperature
max.temp <- max(out.clim$TMAX)
min.temp <- min(out.clim$TMIN)
dummy.temp <- c(rep(min.temp, 6), rep(max.temp, 6))
title = "Climate of the Carrizo Plain, California (1970-2000 avg)"
months.letters <- c("J","F","M","A","M","J","J","A","S","O","N","D")
# make a plot!
tiff(paste0("carrizo_climate_plot_", today, ".tif"),
units = "in",
width = 6.5,
height = 4.5,
res = 200)
par(mar = c(4,4,4,4))
barplot(c(rep(0,6),
rep(max(out.clim$PRECIP, na.rm = T) + 5, 6)),
col = NA,
border = NA,
xaxt = "n",
yaxt = "n",
axes = F)
barplot(out.clim$PRECIP,
pch = 16,
col = "gray50",
border = "gray50",
yaxt = "n",
add = T,
axes = F,
names.arg = months.letters,
xlab = "Month")
axis(side=4, at = pretty(range(out.clim$PRECIP, na.rm = T)))
mtext("Precipitation (mm)", side = 4, line = 2.3, cex = 0.8)
par(new = T)
plot(1:12, dummy.temp, type = "n", xaxt = "n",
xlab = NA, ylab = NA, main = title)
mtext("Temperature (C)", side = 2, line = 2.3, cex = 0.8)
lines(out.clim$TMIN, col = "gray80", lty = 1, lwd = 1)
lines(out.clim$TMAX, col = "gray80", lty = 1, lwd = 1)
lines(out.clim$TAVG, col = "black", lty = 1, lwd = 2)
dev.off()
# I'm leaving as an open exercise changing the colors to something more fun
# than grayscale. Check out colorbrewer2.org for good ideas.
###############################################################################
#################### BASIC RASTER MANIPULATION ################################
###############################################################################
# now let's read in and stack the temperature rasters to look at patterns.
# first read in the january tavg data as a single raster
tavg.stack <- raster(paste0(data.path,
"wc2.0_10m_tavg/",
TAVG.files[1]))
# take a look at the data in this single layer geotiff
tavg.stack
# one thing to check is where are the NA values
plot(is.na(tavg.stack))
# this will return a map where 1 = NA values, 0 = non-NA values
# then write a for-loop to read in each subsequent tavg layer and stack it on
# to the previous one.
for (m in 2:12) {
in.tavg <- raster(paste0(data.path,
"wc2.0_10m_tavg/",
TAVG.files[m]))
tavg.stack <- stack(tavg.stack, in.tavg)
print(m)
}
# now look at the data stack
tavg.stack
# now we can do spatial and non-spatial calculations
# we can calculate the mean just by using the normal 'mean' command... this
# doesn't work for fancier functions (like standard deviation)
annual.mean.temp <- mean(tavg.stack, na.rm = TRUE)
# take a look at the map
plot(annual.mean.temp)
# we can also calculate summary stats for each layer
monthly.means <- cellStats(tavg.stack, "mean", na.rm = TRUE)
monthly.means
# and calculate the global mean
global.mean <- mean(monthly.means)
# but you can also do 'raster math' to look at more interesting things
# like calculate the range of temperatures at each pixel
temp.range <- max(tavg.stack, na.rm = TRUE) - min(tavg.stack, na.rm = TRUE)
plot(temp.range)
# and THE BEST PART ABOUT R (in Kyla's opinion) is that you can write any
# function you can think of, and apply it to a raster stack using the 'calc'
# function!!
# easy one = calculate the standard deviation of average temperture at each
# pixel using the standard 'sd' function (this is really slow, so may skip
# this if running short on time - surprise! it calculates the sd)
temp.sd <- calc(tavg.stack, fun = sd, na.rm = TRUE)
plot(temp.sd)
# now to try this out we're going to write a function to find around which
# months the biggest change in temperature occurs from the next month, then
# apply that to the whole timeseries
# first we write a function to do what we want (note: hardest part of this is
# figuring out where it may go wrong. this is often in exceptional cases like
# a string of all NA values)
biggest.change <- function(x, na.rm = TRUE) {
# how long is the vector?
n <- length(x)
# check if all the values in the vector are NA, return NA, do not try other
# calculations! *else* proceed to figure out where the biggest change is
if (sum(is.na(x)) == n ) {return(NA)} else {
# let's make a new vector of 1 month later
x.plus.1 <- c(x[-1], x[1])
# calculate the absolute difference between each month's value and the next
abs.diff <- abs(x - x.plus.1)
# calculate the maximum of those values
max.diff <- max(abs.diff, na.rm = TRUE)
# figure out where that value falls in the list of months
big.change <- which(abs.diff == max.diff)
# return the first of those (could have 2 or more with same difference)
return(big.change[1])
}
}
# now to see if it works, try it on a test vector
test <- c(2,3,1,4,5,2,8,11,NA,9,1,7)
biggest.change(test)
# now try on a string of NAs to see what happens (trouble shooting!)
test.na <- c(NA, NA, NA, NA, NA)
# if this runs and returns an NA without an error, we win!
biggest.change(test.na)
# if it worked, proceed to try on the whole dataset!
big.change.month <- calc(tavg.stack, fun = biggest.change, na.rm = TRUE)
# and take a look
plot(big.change.month)
# to see just a single month, you can just plot it (like this is all the places
# where big change month is July)
plot(big.change.month == 7)
# the power of R is being able to write ANY FUNCTION YOU WANT and then be able
# to do it on a grid. this is do-able in other programs, but not as simple.
|
c46a5315ed8e62563042b2d0481bfbb1f967f14f
|
8fcd363c8dd5cb712cd8ed88a37f5138dd4cf079
|
/man/ggGroup.Rd
|
d32d3e497da14bcc14f97dd0166bb1d82ed20f31
|
[
"MIT"
] |
permissive
|
GreenleafLab/ArchR
|
2d4fd5b4febf21d0d0315922fc1690ef16a6a2a0
|
c61b0645d1482f80dcc24e25fbd915128c1b2500
|
refs/heads/master
| 2023-09-04T05:04:35.202961
| 2023-05-17T12:47:27
| 2023-05-17T12:47:27
| 216,123,064
| 313
| 132
|
MIT
| 2023-09-01T16:14:59
| 2019-10-18T23:35:41
|
R
|
UTF-8
|
R
| false
| true
| 2,101
|
rd
|
ggGroup.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GgplotUtils.R
\name{ggGroup}
\alias{ggGroup}
\title{A ggplot-based ridge/violin plot wrapper function}
\usage{
ggGroup(
x = NULL,
y = NULL,
xlabel = NULL,
ylabel = NULL,
groupOrder = NULL,
groupSort = FALSE,
size = 1,
baseSize = 10,
ridgeScale = 1,
ratioYX = NULL,
alpha = 1,
title = "",
pal = paletteDiscrete(values = x, set = "stallion"),
addBoxPlot = TRUE,
plotAs = "ridges",
...
)
}
\arguments{
\item{x}{A character vector containing the categorical x-axis values for each y-axis value.}
\item{y}{A numeric vector containing the y-axis values for each point.}
\item{xlabel}{The label to plot for the x-axis.}
\item{ylabel}{The label to plot for the y-axis.}
\item{groupOrder}{A character vector indicating a custom order for plotting x-axis categorical values. Should contain all possible
values of \code{x} in the desired order.}
\item{groupSort}{A boolean indicating whether to sort groups based on the average value of the group.}
\item{size}{The line width for boxplot/summary lines.}
\item{baseSize}{The base font size (in points) to use in the plot.}
\item{ridgeScale}{A numeric indicating the relative size for each ridge in the ridgeplot.}
\item{ratioYX}{The aspect ratio of the x and y axes on the plot.}
\item{alpha}{A number indicating the transparency to use for each point. See \code{ggplot2} for more details.}
\item{title}{The title of the plot.}
\item{pal}{A named custom palette (see \code{paletteDiscrete()} and \code{ArchRPalettes}) for discrete coloring.}
\item{addBoxPlot}{A boolean indicating whether to add a boxplot to the plot if \code{plotAs="violin"}.}
\item{plotAs}{A string indicating how the groups should be plotted. Acceptable values are "ridges" (for a \code{ggrides}-style plot) or "violin" (for a violin plot).}
\item{...}{Additional parameters to pass to \code{ggplot2} for plotting.}
}
\description{
This function is a wrapper around ggplot geom_density_ridges or geom_violin to allow for plotting group distribution plots in ArchR.
}
|
8e8a82a052617c5c7046cd947665d57f09f07540
|
e5c066999a2d2cc87565e54cdc4c1a03db11ac4d
|
/trees_randomForest_bagging.R
|
7eaf5060eb586d43d5f9a3c7a712a6c40990b2b2
|
[] |
no_license
|
jpwyckoff/Trees
|
e745d9a67a15f65b10447d202737943f13951fb7
|
0780bf427be28b784599762ecd4668b47c5c66f4
|
refs/heads/master
| 2021-08-31T22:42:18.955542
| 2017-12-23T06:29:05
| 2017-12-23T06:29:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,217
|
r
|
trees_randomForest_bagging.R
|
##Fitting regression tree
library(MASS)
library(ISLR)
library(tree)
attach(Boston)
names(Boston)
set.seed(1)
#create training set
train = sample(1:nrow(Boston), nrow(Boston)/2)
tree.boston = tree(medv~.,Boston, subset=train)
summary(tree.boston)
#---in summary you will see: ----
#variables actually used in tree construction:
#Number of terminal nodes:
#Residual mean deviance: === in a regression tree deviance is the sume of squared errors for the tree
#Distribution of residuals:
#plot the tree
plot(tree.boston)
#add values to the tree
text(tree.boston, pretty=0)
#cross validation of the tree to see what tree is selected
cv.boston=cv.tree(tree.boston)
plot(cv.boston$size, cv.boston$dev, type = 'b')
## If you want to prune the tree
prune.boston=prune.tree(tree.boston,best=5)
plot(prune.boston)
text(prune.boston, pretty=0)
#pick your tree (use cv or pruned), below is for an unpruned tree
yhat=predict(tree.boston, newdata=Boston[-train,])
boston.test=Boston[-train, "medv"]
plot(yhat, boston.test)
abline(0,1)
mean((yhat-boston.test)^2) #gives you the MSE associated with the regression tree
####bagging and random forest
library(randomForest)
set.seed(1)
bag.boston=randomForest(medv~.,data=Boston, subset=train, mtry=13, importance=TRUE)
bag.boston #bagging is simply a special case of a random forest with m=p
#mtry=13 indicates that all 13 predictors should be considered for each split of the tree
#see how well this bagged model perform on the test set?
yhat.bag=predict(bag.boston, newdata=Boston[-train,])
plot(yhat.bag, boston.test)
abline(0,1)
#get the test set MSE
mean((yhat.bag-boston.test)^2)
#change number of cheese grown with ntree arguement
bag.boston=randomForest(medv~.,data=Boston, subset = train, mtry=13,ntree=25)
yhat.bag=predict(bag.boston, newdata=Boston[-train,])
mean((yhat.bag-boston.test)^2)
###Random Forest
#is the same as above but we use a smaller mtry = #
#by default randomForest() uses p/3 variables for regression trees
set.seed(1)
rf.boston=randomForest(medv~.,data=Boston,subset=train,mtry=6,importance=T)
yhat.rf=predict(rf.boston,newdata=Boston[-train,])
#get test set MSE
mean((yhat.rf-boston.test)^2)
#use importance function to view the importance of each variable
importance(rf.boston)
#plot the importance
varImpPlot(rf.boston)
##Boosting
library(gbm)
set.seed(1)
boost.boston=gbm(medv~.,data=Boston[train,], distribution="gaussian",n.trees=5000,interaction.depth=4)
#use summary function to produce relative influence plot and outputs of relative influence statistics
summary(boost.boston)
#produce partial dependence plots for the influencial variables
##these plots illustrate the marginal effect of the selected variables on the response after intergrating out the other variables
par(mfrow=c(1,2))
plot(boost.boston, i="rm")
plot(boost.boston, i="lstat")
#now use the boosted model to predict Y on the test set
yhat.boost=predict(boost.boston,newdata=Boston[-train,],n.trees=5000)
#get test MSE
mean((yhat.boost-boston.test)^2)
###references
#creds to this book
#Introduction to Applied Statistical Learning with Applications in R by James, Witten, Hastie & Tibshirani
#http://www-bcf.usc.edu/~gareth/ISL/ISLR%20First%20Printing.pdf
|
b09c4f1208658fc4ff2d67c42934770c176c971c
|
f255ef3c7452a307bbaaf95a092e4279aa5f366e
|
/test_data/visibility_analysis_within.R
|
a7148e11aa6b79b6e9bcff3c2dfb8c6b0133cfd3
|
[] |
no_license
|
bbuchsbaum/eyesim
|
a1a61068f53a16925566deb81e03fa5943686f0e
|
4d4f48ef0b1812d5200b86d7216f8d03792c2435
|
refs/heads/master
| 2023-05-11T00:43:30.086058
| 2023-05-08T15:02:35
| 2023-05-08T15:02:35
| 86,451,769
| 8
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,601
|
r
|
visibility_analysis_within.R
|
library(dplyr)
library(tibble)
library(tidyr)
## load study data
pcstudy <- as_tibble(read.csv("~/Dropbox/New_pc_behavioural_data/study_fixations.csv")) %>%
filter(Image != "." & !(Subject %in% c(28,32, 109))) %>% droplevels()
pcstudy$ImageNumber <- as.integer(as.character(pcstudy$ImageNumber))
## create table for each study trial (Subject/Image)
study_tab <- eye_table("FixX", "FixY", duration="FixDuration", onset="FixStartTime",
groupvar=c("Image", "Subject", "Block"), data=pcstudy,
clip_bounds=c(112, (112+800), 684, 84),
vars=c("ImageVersion",
"ImageSet", "Block", "Image", "ImageNumber"))
## load test data
pctest <- as_tibble(read.csv("~/Dropbox/New_pc_behavioural_data/test_fixations.csv")) %>%
mutate(fix_onset=FixOffset) %>%
filter(Image != "." & !(Subject %in% c(28,32, 109))) %>% droplevels()
## create table for each test trial
test_tab <- eye_table("FixX", "FixY", duration="FixDuration", onset="FixOffset",
groupvar=c("Image", "Subject"), data=pctest,
clip_bounds=c(112, (112+800), 684, 84),
vars=c("ImageVersion", "Saliency", "Accuracy",
"ImageSet", "Trial", "Duration", "ImageNumber", "ImageRepetition"))
## construct heatmaps for the study phase, averaged within subjects
study_dens <- density_by(study_tab, groups=c("ImageNumber", "Subject"), xbounds=c(0,800), ybounds=c(0,600), outdim=c(80,60),
duration_weighted=TRUE, sigma=60)
#study_dens_subj_avg <- density_by(study_tab, groups=c("Subject"), xbounds=c(0,800), ybounds=c(0,600), outdim=c(80,60),
# duration_weighted=TRUE, sigma=80)
study_dens_avg <- Reduce("+", lapply(study_dens$density, function(x) x$z))/length(study_dens)
study_dens_avg <- study_dens_avg/sum(study_dens_avg)
study_dens_sqw <- study_dens_avg^(1/2)
study_dens_sqw <- study_dens_sqw/sum(study_dens_sqw)
#sigma <- .1
#weights <- exp(-study_dens_avg^2/(2 * sigma^2))
saliency <- study_dens %>% rowwise() %>% do({
zdens <- .$density$z/sum(.$density$z)
zsqw <- .$density$z^(1/2)
zsqw <- zsqw / sum(zsqw)
zrank <- rank(.$density$z)
zrank <- zrank/sum(zrank)
gg <- expand.grid(x=1:80, y=1:60)
tibble(Subject=.$Subject, Image=.$Image, zdens=list(zdens), zrank=list(matrix(zrank, 80,60)), zsqw=list(zsqw))
})
#write.table(saliency, "~/Dropbox/Jordana_experiments/Jordana_saliency_study/saliency_grid.txt", row.names=FALSE)
library(imager)
maskset <- lapply(levels(pctest$Image), function(im) {
fname <- paste0("~/Dropbox/Jordana_experiments/Jordana_saliency_study/images/Mat_", gsub("jpeg", "jpeg.rds", im))
if (file.exists(fname)) {
print("got it")
readRDS(fname)
} else {
print(paste("no ", im))
NULL
}
})
names(maskset) <- levels(pctest$Image)
sal_out <- test_tab %>% rowwise() %>% do({
if (nrow(.$fixgroup) > 4) {
browser()
}
print(as.character(.$Image))
if (.$Saliency == 100) {
im <- paste0(strsplit(as.character(.$Image), "_")[[1]][1:2], collapse="_")
im <- paste0(im, "_1")
} else {
im <- paste0(strsplit(as.character(.$Image), "_")[[1]][1:3], collapse="_")
}
im <- paste0(im, ".jpeg")
sal <- saliency$zdens[[which(saliency$Image == .$ImageNumber & saliency$Subject == .$Subject)]]
fix <- .$fixgroup
fm <- round(cbind(fix$x, fix$y)/10)
fm[,1] <- ifelse(fm[,1] < 1, 1, fm[,1])
fm[,2] <- ifelse(fm[,2] < 1, 1, fm[,2])
if (.$Saliency == 100) {
mvals <- rep(1, nrow(fm))
} else {
mask <- maskset[[as.character(.$Image)]]
mvals <- mask[fm]
}
## the salience of the visible items
vis <- ifelse(mvals, sal[fm], NA)
## the salience of the invisible items
novis <- ifelse(mvals == 0, sal[fm], NA)
## the total salience
tot <- sal[fm]
bvis <- study_dens_sqw[fm]
pvis <- ifelse(mvals, 1, 0)
pnovis <- ifelse(mvals == 0, 1, 0)
ret <- data.frame(vis=vis-bvis, novis=novis-bvis, totvis=tot-bvis, pvis=pvis, pnovis=pnovis)
as_tibble(cbind(ret, .))
}) %>% ungroup()
sal_out <- gather(sal_out, key=measure, value=sim, vis, novis, totvis, pvis, pnovis)
#sal_out %>% group_by(Saliency, Duration) %>% summarize(vis=mean(vis), novis=mean(novis), totvis=mean(totvis))
library(mgcv)
library(ggplot2)
gam.1 <- gam(totvis ~ s(fixgroup.onset), data=sal_out)
ggplot(aes(fixgroup.onset, sim, linetype=measure), data=subset(sal_out, measure %in% c("vis", "novis"))) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=10, fx=TRUE)) + facet_wrap(~ Match)
ggplot(aes(fixgroup.onset, sim, colour=factor(Accuracy)), data=subset(sal_out, measure %in% c("totvis") )) + facet_wrap(Saliency ~ Match, ncol=5) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=8, fx=TRUE))
ggplot(aes(fixgroup.onset, sim, colour=factor(Accuracy)), data=subset(sal_out, measure %in% c("totvis") )) + facet_wrap(~ Match) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=8, fx=TRUE))
ggplot(aes(fixgroup.onset, sim, colour=factor(Duration)), data=subset(sal_out, measure %in% c("totvis") )) + facet_wrap(~ Match) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=8, fx=TRUE))
ggplot(aes(fixgroup.onset, sim, colour=factor(Saliency)), data=subset(sal_out, measure %in% c("totvis") )) + facet_wrap(~ Match) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=8, fx=TRUE))
ggplot(aes(fixgroup.onset, sim, colour=factor(Accuracy)), data=subset(sal_out, measure %in% c("totvis") & Saliency < 40 & Duration < 700)) + facet_wrap(~ Match) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=6, fx=TRUE))
ggplot(aes(fixgroup.onset, sim, colour=measure, linetype=Match), data=subset(sal_out, measure %in% c("totvis"))) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=10, fx=TRUE))
ggplot(aes(fixgroup.onset, sim, colour=measure, linetype=factor(Accuracy)), data=subset(sal_out, measure %in% c("totvis"))) + facet_wrap( ~ Match) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=10, fx=TRUE))
ggplot(aes(fixgroup.onset, sim, colour=factor(Saliency)), data=subset(sal_out, measure %in% c("totvis"))) + facet_wrap(~ Match) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=10, fx=TRUE))
ggplot(aes(fixgroup.onset, sim, colour=measure, linetype=factor(Accuracy)), data=subset(sal_out, measure %in% c("totvis", "tot_other"))) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=10, fx=TRUE))
ggplot(aes(fixgroup.onset, sim, colour=measure, linetype=factor(Accuracy)), data=subset(sal_out, measure %in% c("totvis", "tot_other"))) +
facet_wrap(~ Match, nrow=2) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=10, fx=TRUE))
ggplot(aes(fixgroup.onset, sim, colour=measure, linetype=factor(Accuracy)), data=subset(sal_out, measure %in% c("totvis", "tot_other"))) +
facet_wrap(Match ~ Duration, nrow=2) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=10, fx=TRUE))
ggplot(aes(fixgroup.onset, sim, colour=factor(Duration), linetype=factor(Accuracy)), data=subset(sal_out, measure %in% c("vis"))) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=10, fx=TRUE)) + facet_wrap(~ Match)
ggplot(aes(fixgroup.onset, sim, colour=measure, linetype=factor(Accuracy)), data=subset(sal_out, measure %in% c("totvis"))) +
facet_wrap( ~ Match) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=10, fx=TRUE))
ggplot(aes(fixgroup.onset, sim, colour=measure, linetype=Match), data=subset(sal_out, measure %in% c("novis", "novis_other") & Duration == 250)) + facet_wrap( ~ factor(Accuracy)) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=10, fx=TRUE))
ggplot(aes(fixgroup.onset, sim, colour=measure, linetype=Match), data=subset(sal_out, measure %in% c("novis", "novis_other") & Saliency < 40)) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=12, fx=TRUE))
ggplot(aes(fixgroup.onset, sim, colour=measure, linetype=factor(Match)), data=subset(sal_out, measure %in% c("novis"))) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=12, fx=TRUE))
ggplot(aes(fixgroup.onset, sim, colour=measure, linetype=factor(Accuracy)), data=subset(sal_out, measure %in% c("totvis", "tot_other"))) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=12, fx=TRUE))
ggplot(aes(fixgroup.onset, sim, colour=measure), data=subset(sal_out, measure %in% c("totvis", "tot_other"))) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=12, fx=TRUE)) + facet_wrap(Duration ~ Saliency, nrow=3)
ggplot(aes(fixgroup.onset, sim, colour=measure), data=subset(sal_out, measure %in% c("novis", "novis_other",
"totvis", "totvis_other",
"vis", "vis_other"))) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=10, fx=TRUE)) + facet_wrap(Match ~ Duration, nrow=2)
ggplot(aes(fixgroup.onset, totvis, colour=factor(Accuracy)), data=sal_out) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=10, fx=TRUE))
ggplot(aes(fixgroup.onset, vis, colour=factor(Saliency)), data=sal_out) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=10, fx=TRUE))
ggplot(aes(fixgroup.onset, vis, colour=factor(Duration), linetype=Match), data=sal_out) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=7, fx=TRUE))
ggplot(aes(fixgroup.onset, vis, linetype=factor(Match)), data=sal_out) +
geom_smooth(se=FALSE, method=gam, formula = y ~ s(x, k=10, fx=TRUE))
|
bced2746750af00b8b195a5a81ea6fb641bde80c
|
6beef7a871c10d3baf74d87520ae53dbd52cf450
|
/R/SequenceData-coverage.R
|
a83528b53f8b7c1e686792d89c2d33f35417897c
|
[] |
no_license
|
FelixErnst/RNAmodR
|
d66ed5cb83b300b3d9e24f8310f46bb2f9b734ee
|
114a9f8f781a896205e573c3a87f437978dfe03f
|
refs/heads/master
| 2021-11-22T11:37:10.254735
| 2021-08-25T19:23:14
| 2021-08-25T19:23:14
| 53,844,127
| 2
| 0
| null | 2020-04-29T10:15:57
| 2016-03-14T09:40:42
|
R
|
UTF-8
|
R
| false
| false
| 7,681
|
r
|
SequenceData-coverage.R
|
#' @include RNAmodR.R
#' @include SequenceData-class.R
NULL
#' @name CoverageSequenceData-class
#' @aliases CoverageSequenceData CoverageSequenceDataFrame
#'
#' @title CoverageSequenceData
#'
#' @description
#' \code{CoverageSequenceData} implements
#' \code{\link[=SequenceData-class]{SequenceData}} to contain and aggregate the
#' coverage of reads per position along the transcripts.
#'
#' \code{CoverageSequenceData} contains one column per data file named using the
#' following naming convention \code{coverage.condition.replicate}.
#'
#' \code{aggregate} calculates the mean and sd for samples in the \code{control}
#' and \code{treated} condition separatly.
#'
#' @param bamfiles,annotation,seqinfo,grl,sequences,param,args,... See
#' \code{\link[=SequenceData-class]{SequenceData}}
#' @param x a \code{CoverageSequenceData}
#' @param name For \code{getDataTrack}: a valid transcript name. Must be a name
#' of \code{ranges(x)}
#' @param condition For \code{\link{aggregate}}: condition for which the data
#' should be aggregated.
#' @param df,ranges,sequence,replicate inputs for creating a
#' \code{SequenceDataFrame}. See
#' \code{\link[=SequenceDataFrame-class]{SequenceDataFrame}}.
#'
#' @return a \code{CoverageSequenceData} object
#'
#' @examples
#' # Construction of a CoverageSequenceData objectobject
#' library(RNAmodR.Data)
#' library(rtracklayer)
#' annotation <- GFF3File(RNAmodR.Data.example.man.gff3())
#' sequences <- RNAmodR.Data.example.man.fasta()
#' files <- c(treated = RNAmodR.Data.example.wt.1())
#' csd <- CoverageSequenceData(files, annotation = annotation,
#' sequences = sequences)
NULL
#' @rdname CoverageSequenceData-class
#' @export
setClass(Class = "CoverageSequenceDataFrame",
contains = "SequenceDFrame")
#' @rdname CoverageSequenceData-class
#' @export
CoverageSequenceDataFrame <- function(df, ranges, sequence, replicate,
condition, bamfiles, seqinfo){
.SequenceDataFrame("Coverage",df, ranges, sequence, replicate, condition,
bamfiles, seqinfo)
}
#' @rdname CoverageSequenceData-class
#' @export
setClass(Class = "CoverageSequenceData",
contains = "SequenceData",
slots = c(unlistData = "CoverageSequenceDataFrame"),
prototype = list(unlistData = CoverageSequenceDataFrame(),
unlistType = "CoverageSequenceDataFrame",
minQuality = 5L,
dataDescription = "Coverage data"))
#' @rdname CoverageSequenceData-class
#' @export
CoverageSequenceData <- function(bamfiles, annotation, sequences, seqinfo, ...){
.new_SequenceData("Coverage", bamfiles = bamfiles, annotation = annotation,
sequences = sequences, seqinfo = seqinfo, ...)
}
setSequenceDataCoercions("Coverage")
# CoverageSequenceData ---------------------------------------------------------
#' @importFrom GenomeInfoDb seqlevels
.process_coverage_data <- function(coverage, grl){
coverage <- coverage[seqlevels(grl)]
coverage <- as(coverage,"IntegerList")
# subset per transcript
seqs <- .seqs_rl_strand(grl, force_continous = TRUE)
seqs_list <- split(seqs,unlist(unique(GenomicRanges::seqnames(grl))))
coverage <- Map(
function(sn,s){
relist(coverage[[sn]][unlist(s)],s)
},
names(seqs_list),
seqs_list)
partitioning <- IRanges::PartitioningByWidth(
unlist(unname(lapply(coverage,lengths))))
coverage <- relist(unlist(lapply(coverage,unlist),
use.names=FALSE),
partitioning)
coverage <- coverage[names(grl)]
coverage
}
#' @importFrom GenomicAlignments coverage
.get_coverage_from_GA <- function(data, grl){
# get data per chromosome
coverage <- GenomicAlignments::coverage(data)
.process_coverage_data(coverage, grl)
}
#' @importFrom GenomicAlignments coverage
.get_position_data_of_transcript_coverage <- function(bamFile, grl, param,
args = list()){
data <- .load_bam_alignment_data(bamFile, param, args)
.get_coverage_from_GA(data, grl)
}
#' @rdname CoverageSequenceData-class
#' @export
setMethod("getData",
signature = c(x = "CoverageSequenceData",
bamfiles = "BamFileList",
grl = "GRangesList",
sequences = "XStringSet",
param = "ScanBamParam"),
definition = function(x, bamfiles, grl, sequences, param, args){
data <- lapply(bamfiles,
FUN = .get_position_data_of_transcript_coverage,
grl = grl,
param = param,
args = args)
names(data) <- rep("coverage",length(data))
data
})
# aggregation ------------------------------------------------------------------
#' @rdname CoverageSequenceData-class
#' @export
setMethod("aggregateData",
signature = c(x = "CoverageSequenceData"),
function(x, condition = c("Both","Treated","Control")){
condition <- tolower(match.arg(condition))
.aggregate_list_data_mean_sd(x, condition)
}
)
# data visualization -----------------------------------------------------------
RNAMODR_PLOT_SEQ_COVERAGE_NAMES <- c("means" = "mean(coverage)")
.clean_mcols_coverage <- function(seqdata){
d <- mcols(seqdata@unlistData)
d <- d[,!grepl("sds.",colnames(d)),drop=FALSE]
mcols(seqdata@unlistData) <- d
seqdata
}
#' @rdname CoverageSequenceData-class
setMethod(
f = "getDataTrack",
signature = signature(x = "CoverageSequenceData"),
definition = function(x, name, ...) {
args <- list(...)
# DataTrack for sequence data
seqdata <- .get_data_for_visualization(x, name)
# clean meta data columns
seqdata <- .clean_mcols_coverage(seqdata)
seqdata <- unlist(seqdata)
conditions <- unique(conditions(x))
if("control" %in% conditions){
d <- seqdata[,grepl("control",colnames(mcols(seqdata)))]
colnames(mcols(d)) <- gsub(".control","",colnames(mcols(d)))
dt.control <- Gviz::DataTrack(range = d,
group = factor("means"),
name = paste0(RNAMODR_PLOT_SEQ_COVERAGE_NAMES["means"],
"\ncontrol"),
type = "histogram")
Gviz::displayPars(dt.control)$background.title <- "#FFFFFF"
Gviz::displayPars(dt.control)$fontcolor.title <- "#000000"
Gviz::displayPars(dt.control)$col.axis <- "#000000"
Gviz::displayPars(dt.control) <- args
track <- list("Coverage" = dt.control)
}
if("treated" %in% conditions){
d <- seqdata[,grepl("treated",colnames(mcols(seqdata)))]
colnames(mcols(d)) <- gsub(".treated","",colnames(mcols(d)))
dt.treated <- Gviz::DataTrack(range = d,
group = factor("means"),
name = paste0(RNAMODR_PLOT_SEQ_COVERAGE_NAMES["means"],
"\ntreated"),
type = "histogram")
Gviz::displayPars(dt.treated)$background.title <- "#FFFFFF"
Gviz::displayPars(dt.treated)$fontcolor.title <- "#000000"
Gviz::displayPars(dt.treated)$col.axis <- "#000000"
Gviz::displayPars(dt.treated) <- args
track <- list("Coverage" = dt.treated)
}
if(length(conditions) == 2L){
track <- list("Coverage" = dt.control,
"Coverage" = dt.treated)
}
track
}
)
|
0e33e9e2b1a2bfbcd457976c54dc3782c95faa72
|
16304becc4c42c9a07591d038526ce543bfc3700
|
/man/validate.covariance.model.Rd
|
e99912ab08bdc011cc72ea86231ff61c5aca65a4
|
[] |
no_license
|
cran/saemix
|
2884fd4df50006001b6b0b828092d4a05b3accf1
|
5aadc67a897200bb74187459e46c51a3e7b6268c
|
refs/heads/master
| 2023-07-08T07:45:00.826030
| 2023-06-27T09:10:02
| 2023-06-27T09:10:02
| 17,699,444
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,360
|
rd
|
validate.covariance.model.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SaemixModel.R
\name{validate.covariance.model}
\alias{validate.covariance.model}
\title{Validate the structure of the covariance model}
\usage{
validate.covariance.model(x, verbose = TRUE)
}
\arguments{
\item{x}{a matrix}
\item{verbose}{a boolean indicating whether warnings should be output if x is not a valid covariance model}
}
\value{
a boolean, TRUE if x is an acceptable structure and FALSE if not. Messages will be output to describe why x isn't a valid covariance model if the argument verbose is TRUE.
}
\description{
Check that a matrix corresponds to a structure defining a covariance model for a non-linear mixed effect model.
Such a matrix should be composed of only 0s and 1s, with at least one element set to 1, and should be square and symmetrical.
1s on the diagonal indicate that the corresponding parameter has interindividual variability and that its variance will be estimated.
1s as off-diagonal elements indicate that a covariance between the two corresponding parameters will be estimated.
}
\examples{
covarmodel<-diag(c(1,1,0))
validate.covariance.model(covarmodel) # should return TRUE
}
\seealso{
\code{SaemixModel}
}
\author{
Emmanuelle Comets \href{mailto:emmanuelle.comets@inserm.fr}{emmanuelle.comets@inserm.fr}, Belhal Karimi
}
\keyword{models}
|
d07b80dc92dc4019bfcac5310400eca55722eb17
|
6e4b1d5db4f6d5a42a324adbae883177e4ab0f23
|
/v105pn.R
|
2909ebf8fecce8e1e726423efecdc952ece03ce5
|
[] |
no_license
|
alexanderm10/druckenbrod_ecological_time_series
|
b9c749fbc24e67bb7430729fa83b11376bd4d23f
|
0033476196b6fc7e19a3b809a3475e54f2d0dc53
|
refs/heads/master
| 2020-06-04T08:36:48.936310
| 2019-06-14T16:41:15
| 2019-06-14T16:41:15
| 191,947,590
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,879
|
r
|
v105pn.R
|
# v105pn.R
# This function extracts a single tree-ring time series from
# an input RWL file and places it in a vector for time series analysis.
# The function power transforms and removes the mean to create transformed
# residuals. The function then detrends with an iterative neg. exponential
# fit, or if that does not fit or fails to find a solution, then a linear
# regression with either a positive or negative slope is fit to the data.
# Using the maximum entropy model solution otherwise known as the Burg
# method, the autoregressive model that is the best fit for the series is
# determined. Using the best fit model, the function searches for
# autoregressive outliers iteratively. These outliers may either be pulse
# events (1 yr) or CSTs (> minimum no. of yrs). After the first pass,
# the outliers are removed and the series is reconstituted. The best ar
# order is then redetermined and the function searches for additional
# outliers. The # of iterations is set by the user (8 should be enough).
# This version uses a power transformation to minimize
# the heteroscedastic nature of my time series. 'fig' is a flag that
# specifies whether you want a figure (=1) or not (=0). Missing years are
# set to the average of neighboring rings. The central limit theorem
# is used to search the residuals for trend outliers. This version also
# uses Dave Meko's biweight mean code and currently runs with a window of
# 9 to 30 yrs. Estimated values fpr missing rinngs are removed in the
# output series. This version uses a modified Hugershoff curve with a
# potentially nonzero asymptote to detrend + and - disturbance events.
# It also returns the transformed standardized series.
#
# Updated input (9/25/2014): User defined working directory (direct)
# and input file (fileN).
#
# Function written Sep 10, 2002.
# Function last revised Jun 6, 2014.
# Function converted to R Sep 25, 2014
# Test core
#core <- "TJO180"
#core <- "TJ190A"
#core <- "CGR03b"
core <- 29
fig <- 0
iter <- 8
#fileN <- "Bishop_Example.txt"
#fileN <- "CGR_all_5b_changed.rwl"
fileN <- "EasternMA_Presto_QUSP_Final.rwl"
direct <- "/Users/tessamandra/OneDrive - Harvard University/NortheastRegionalDisturbanceProject/Sorted regional data/Eastern MA/Presto"
v105pn = function(direct,fileN,core,fig,iter){
# Load packages
library(R.matlab)
library(dplR)
library(MASS)
library(robustbase)
library(minpack.lm)
# Set working directory
setwd(direct)
# Load sub-functions
source("bisqmean_dan.R")
source("ar_order.R")
source("outlier_clt.R")
source("backcast.R")
# Load tree-ring data (returns var *rings*)
# Unneccesary output suppressed by 'capture.output' & assigned to *dummy.capt*
# NOTE: Global env variables with same name will mask "attached" variables, if they exist
dummy.capt <- capture.output(rings <- read.rwl(fileN,format="auto"))
col_header <- names(rings)
# Find pointer to start and end of series
ind <- which(rings[,core]>0,arr.ind=TRUE)
sos <- ind[1]
eos <- tail(ind,1)
# Assign years (as global variable) and raw widths to respective vectors.
YEARS <<- as.numeric(rownames(rings[sos:eos,]))
raw <- rings[sos:eos,core]
print(paste(c('Core: ',core),collapse=""))
nyrs <- length(YEARS)
print(paste(c('Total no. of measured years: ',toString(nyrs)),collapse=""))
print(paste(c('First year is ',toString(YEARS[1])),collapse=""))
print(paste(c('Last year is ',toString(YEARS[nyrs])),collapse=""))
# Estimate missing ring widths using mean of neighboring rings
mss <- array(0,length(raw))
if(any(raw==0)){
m1 <- which(raw==0,arr.ind=TRUE)
print(paste(c('Missing rings at years ',toString(YEARS[m1])),collapse=""))
for(nm in 1:length(m1)){
ind <- which(raw[1:m1[nm]]>0,arr.ind=TRUE)
prior <- mean(raw[tail(ind,1)])
ind <- which(raw[m1[nm]:length(raw)]>0,arr.ind=TRUE)
subs <- mean(raw[ind[1]+m1[nm]-1])
mss[m1[nm]] <- mean(c(prior,subs))
}
raw <- raw+mss # Replace missing values with imputed values
#print(mss)
}
# Power transformation.
fdiff <- array(0,c(length(YEARS),2))
for(x in 1:((length(YEARS)-1))){ # Calculate 1st differences
fdiff[x,1] <- raw[x+1]
fdiff[x,2] <- abs(raw[x+1]-raw[x])
}
s <- 1
for(q in 1:(length(YEARS)-1)){
if(fdiff[q,1]!=0 & fdiff[q,2]!=0){
if(s==1){
nz_fdiff <- fdiff[q,1:2] # non-zero ring widths
}else{
nz_fdiff <- rbind(nz_fdiff,fdiff[q,1:2]) # non-zero ring widths
}
s=s+1
}
}
log_fdiff <- log(nz_fdiff)
Y <- log_fdiff[,2]
X <- log_fdiff[,1]
bb <- lm(Y~X)
optimal_line <- predict(bb)
optimal_pwr <- 1-bb$coefficients[[2]]
print(paste(c('Optimal Power = ',toString(optimal_pwr)),collapse=""))
if(optimal_pwr <= 0.05){
transformed <- log10(raw)
tzero <- log10(0.001)
print('Series was log10 transformed')
}else if(optimal_pwr>1){
optimal_pwr <- 1
transformed <- raw^optimal_pwr # Don't need matrix operator in R for power function
tzero <- 0.001^optimal_pwr
print('Series was power transformed with power = 1')
}else{
transformed <- raw^optimal_pwr
print(paste(c('Series was power transformed with power = ',toString(optimal_pwr)),collapse=""))
tzero <- 0.001^optimal_pwr
}
transm <- mean(transformed)
# print(paste(c('tzero = ',toString(tzero)),collapse=="))
# tresids <- transformed-transm # transformed residuals cannot be used with
# iterative age detrending since half of residuals are negative.
options(warn=-1)
# Nonlinear detrending option.
# Function nls employs nonlinear least squares data fitting by the
# Gauss-Newton Method.
# Function nlsLM employs nonlinear least squares data fitting by the
# Levenberg-Marquardt Method.
crashed <- rep(0,nyrs)
wlngth <- rep(0,nyrs)
trendtype <- 0 # Neg exp = 1, neg linear reg = 2, or pos linear reg = 3
minyr <- 30 # minimum number of yrs to fit to nlinfit
if(minyr>nyrs){
print('Insufficient # of years to fit minimum nonlinear age trend.')
}
b <- array(0,c(nyrs,3))
mse <- array(NA,nyrs)
wExist <- array(NA,(nyrs-minyr+1))
for(i in minyr:nyrs){
last.warning <- NULL
beta <- c(0.5,0.1,1)
xyrs <- c(1:i) # set years from 1 to length of series
Y <- transformed[1:i]
X <- xyrs[1:i]
# Stops code from crashing because of problems fitting exp curve
nlin.mod <- try({
nlin.mod <- nls(Y~beta1*exp(-beta2*X)+beta3,start=list(beta1=0.5,beta2=0.1,beta3=1))
#nlin.mod <- nlsLM(Y~beta1*exp(-beta2*X)+beta3,start=list(beta1=0.5,beta2=0.1,beta3=1))
},silent=TRUE)
# Check for error/warning message
wExist[i] <- is(nlin.mod,"try-error")
if(is(nlin.mod,"try-error")){
crashed[i] <- 2
#print('Exponential fit function failed, reverting to linear regression.')
}else{
b[i,1:3] <- summary(nlin.mod)$coefficients[1:3] # Model coefficients
mse[i] <- mean((predict(nlin.mod)-Y)^2,na.rm=TRUE) # Manually calculate MSE from residuals
crashed[i]=1
#print(paste(c('Variance of the error term: ',toString(mse[i])),collapse=""))
}
}
options(warn=1)
#print(mse)
# Dissallow curve to be concave up and make sure nlinfit
# converges by making b(2) sufficiently large.
# constant b(3) must be >=0 in original mm
i_c <- which(crashed==1 & b[,1]>=0 & b[,2]>0.001 & b[,3]>=tzero & wExist==FALSE,arr.ind=TRUE) # & b[,2]<0.5)
if(length(i_c>0)){
mmse <- min(mse[i_c])
imse <- which.min(mse[i_c])
}else{
mmse <- 0
i_c <- 0
imse <- 1
}
if(fig==1){ # fig==1 if you want a figure as output
windows()
#par(mfrow=c(3,1))
plot(x=YEARS,y=raw,col="black",lwd=2,lty=1,type="l",ylab="Ring width (mm)",xlab="Year")
fig1atext <- paste(c("Optimal power = ",substring(toString(optimal_pwr),1,6)),collapse="")
mtext(fig1atext,side=3,adj=0.5,line=-1.5)
title(main=paste(c("CST Intervention Detection on Core ",core),collapse=""))
}
if(i_c[imse]>0){
print(paste(c('Lowest error from fit = ',substring(toString(mmse),1,6)),collapse=""))
print(paste(c('Best age trend fit from years ',toString(YEARS[1]),' to ',toString(YEARS[i_c[imse]])),collapse=""))
print(paste(c('Best fit extends for ',toString(i_c[imse]),' years'),collapse=""))
best <- b[i_c[imse],]
trendtype <- 1
y_exp <- best[1]*exp(-best[2]*xyrs)+best[3]
detrended <- transformed-y_exp
print('Initial Age Detrending')
print(paste(c('Y = ',substring(toString(best[1]),1,6),'*exp(-',substring(toString(best[2]),1,6),'*x)+',
substring(toString(best[3]),1,6)),collapse=""))
if(fig==1){ # fig==1 if you want a figure as output
windows()
#par(mfrow=c(3,2))
par(mar=c(0,0,0,0),oma=c(5,5,5,5))
plot(x=YEARS,y=transformed,col="black",lwd=2,lty=1,type="l",ylab="",xlab="",xlim=c(min(YEARS),max(YEARS)))
lines(YEARS,y_exp)
mtext("Year",side=1,line=2.5,adj=0.5)
mtext("Transformed Width",side=2,line=2.5,adj=0.5)
fig1btext <- paste(c("Y = ",substring(toString(best[1]),1,6),"*exp(-",substring(toString(best[2]),1,6),
"*x)+",substring(toString(best[3]),1,6)),collapse="")
mtext(fig1btext,side=3,adj=0.5,line=-1.5)
par(new=TRUE)
plot(YEARS[i_c],mse[i_c],axes=FALSE,xlim=c(min(YEARS),max(YEARS)),type="l",xlab="",ylab="",col="red")
axis(4)
mtext("Error Term Variance",side=4,line=2.5,adj=0.5)
windows()
plot(YEARS,detrended,type="l",lwd=2,xlab="Year",ylab="Detrended Width")
}
}else{
trendtype=2;
xyrs <- c(1:nyrs)
# Linear detrending option used if neg. exponential curve dissallowed.
mod1.lm <- lm(transformed~xyrs)
b <- mod1.lm$coefficients
sum.lm <- summary(mod1.lm)
stats <- c(sum.lm$r.squared,sum.lm$fstatistic,sum.lm$coefficients[[8]],sum.lm$sigma)
if(b[2]>=0){
trendtype <- 3
}
y_lin <- b[2]*xyrs + b[1]
detrended <- transformed - y_lin
print('Initial Age Detrending')
print(paste(c('Y = ',substring(toString(b[2]),1,6),' * X + ',substring(toString(b[1]),1,6)),collapse=""))
if(fig==1){ # fig=1 if you want a figure as output
windows()
#par(mfrow=c(3,2))
#par(mar=c(0,0,0,0),oma=c(5,5,5,5))
plot(x=YEARS,y=transformed,col="black",lwd=2,lty=1,type="l",ylab="",xlab="",xlim=c(min(YEARS),max(YEARS)),
main="Transformed Series")
lines(YEARS,y_lin)
mtext("Year",side=1,line=2.5,adj=0.5)
mtext("Transformed Width",side=2,line=2.5,adj=0.5)
fig1btext <- paste(c("Y = ",substring(toString(b[2]),1,6),"* X +",substring(toString(b[1]),1,6)),collapse="")
mtext(fig1btext,side=3,adj=0.5,line=-1.5)
windows()
plot(YEARS,detrended,type="l",lwd=2,xlab="Year",ylab="Detrended Width",main="Detrended Series")
}
}
# Output age detrending info
age <- c(as.character(col_header[core]),trendtype,YEARS[i_c[imse]])
# Plot a histogram of the data to investigate its skew.
if(fig==1){
windows()
hist(detrended,xlab="Detrended Width",main="Histogram of Detrended Ring Widths")
box()
}
# Initialize arrays.
next_iter <- 1 #Switch to determine whether next iteration is needed
St <- detrended # St will be the iterated series (standardized)
Atr<- array(NA,length(raw)) # Age trend re-expressed in raw units
rline <- array(NA,length(raw)) # Just the slope of the intervention
tline <- array(NA,c(length(raw),iter)) # Slope and constant of the intervention
outs <- array(0,c(iter,5))
for(q in 1:iter){ # Iterate AR model 'iter' times to remove all outliers
if(next_iter==1){
bckcasted <- 0
ar_estimate <- 0
residuals <- 0
area_t <- 0
iter_i <- St # Initial values of series for ith iteration.
print(' ')
print(paste(c('Statistics for AR model iteration ',toString(q),':'),collapse=""))
# Calculate best AR model order and return in the following order:
# residuals (white noise) and ar model estimates
ar.mod <- ar_order(St)
ar_white <- ar.mod[[1]]
ar_model <- ar.mod[[2]]
# Use new coefficients to prewhiten ORIGINAL series without
# downweighted originals.
# Backcast for pth order years of AR model.
# bckcasted <- backcast(detrended)
bckcasted <- backcast(St) #I think this is correct here.
for(g in ORDER:length(bckcasted)){ # g = observation year
ar <- 0 # ar model estimate for order i, year g
for(k in 1:ORDER){ # kth parameter of order ORDER
if((g-ORDER)>0){ # ensure obs yr > model order
ar <- PARAM[k]*(bckcasted[g-k])+ar
}
}
if((g-ORDER)>0){ # calculate model estimate and residuals
if(detrended[g-ORDER]==0){ # Set missing rings to ar estimate value
print(paste(c('Missing ring at year: ',toString(YEARS[g-ORDER])),collapse=""))
bckcasted[g] <- ar
}
ar_estimate[g-ORDER] <- ar
residuals[g-ORDER] <- bckcasted[g]-ar
}
}
if(fig==1){ # fig=1 if you want a figure as output
windows()
plot(YEARS,St,type="l",lwd=2,main=paste(c(core," iteration ",toString(q)),collapse=""),
ylab="Transformed Width",xlab="Year",ylim=c(min(St)*0.9,max(St)*1.1))
lines(YEARS,ar_estimate)
legend("topright",legend=c("Standardized Series","Autoregressive Estimate"),lwd=c(2,1),bty="n")
}
# Find release outliers
outlier.out <- outlier_clt(residuals,fig)
downres <- outlier.out[[1]]
mres <- outlier.out[[2]]
otype <- outlier.out[[3]]
f <- which(downres!=0,arr.ind=TRUE)
if(otype==1 & length(f)!=0){ # Pulse Outlier Detected
St[f] <- ar_estimate[f]
}else if(otype>1 & length(f)>1){ # Trend Outlier Detected
w <- c(1:length(f))
slope <- lm(St[f]~w)
print(paste(c('Constant and slope = ',substring(toString(slope$coefficients[[1]]),1,6),", ",
substring(toString(slope$coefficients[[2]]),1,6)),collapse=""))
# Fit Hugershoff curve to remainder of series
lngthw <- c(min(f):length(St))
lngthwf <- c((max(f)+1):length(St))
lngthn <- c(1:length(lngthw))
lngthn <- lngthn
opts <- nls.control(maxiter=1000)
X <- lngthn
Y <- St[lngthw]
nlin.mod <- try({
#nlin.mod <- nls(Y~(beta1*(X^beta2)*exp(-beta3*X))+beta4,
# start=list(beta1=0.1,beta2=0.5,beta3=0.1,beta4=-0.1),control=opts)
nlin.mod <- nlsLM(Y~(beta1*(X^beta2)*exp(-beta3*X))+beta4,
start=list(beta1=0.1,beta2=0.5,beta3=0.1,beta4=-0.1),control=opts)
#targets <- data.frame(Y,X)
#model.hug <- function(beta1,beta2,beta3,beta4){
# (beta1*(targets$X^beta2)*exp(-beta3*targets$X))+beta4
#}
#par <- list(beta1=0.1,beta2=0.5,beta3=0.1,beta4=-0.1)
#var <- list(x="Y",mean="predicted",sd=1)
#par_lo <- list(beta1=-25,beta2=-25,beta3=-25,beta4=-25)
#par_hi <- list(beta1=25,beta2=25,beta3=25,beta4=25)
#nlin.mod <- anneal(model=model.hug,
# par=par,
# var=var,source_data=targets,max_iter=1000,pdf=dnorm,dep_var="Y",
# par_lo=par_lo,
# par_hi=par_hi)
},silent=TRUE)
#print(nlin.mod)
if(!is(nlin.mod,"try-error")){
bw <- summary(nlin.mod)$coefficients[1:4]
#bw <- unlist(nlin.mod$best_pars)
print(paste(c('Hugershoff Parameters: ',toString(bw)),collapse=""))
ar_est <- ar_estimate[f[1]]
rline[lngthw] <- -bw[1]*(lngthn^bw[2])*exp(-bw[3]*lngthn)-bw[4]
}
# If nlinfit returns error, then try again with diffent initial parameters.
if(is(nlin.mod,"try-error")){
rline[lngthw] <- 0
print('Default initial parameters for Hugershoff curve failed')
print('Fitting alternate, robust initial parameters [.1 .5 .1 .1]')
inMat <- data.frame(Y,X)
#stop("Adjust model in code to run robust bisquare model...")
nlin.mod <- try({
nlin.mod <- nlrob(Y~(beta1*(X^beta2)*exp(-beta3*X))+beta4,data=inMat,
start=list(beta1=0.1,beta2=0.5,beta3=0.1,beta4=-0.1),
psi=psi.bisquare,control=opts)
},silent=TRUE)
print(nlin.mod)
if(!is(nlin.mod,"try-error")){
print(paste(c('Hugershoff Parameters: ',toString(bw)),collapse=""))
ar_est <- ar_estimate[f[1]]
rline[lngthw] <- -bw[1]*(lngthn^bw[2])*exp(-bw[3]*lngthn)-bw[4]
}
}
# If nlinfit returns error, then end outlier iterations and quit.
if(is(nlin.mod,"try-error")){
rline[lngthw] <- 0
print('Unable to fit Hugershoff curve')
ar_est <- 0
next_iter <- 0
outs[q,1:5] <- rep(0,5)
}
if(f[1]>1){
St[lngthw] <- rline[lngthw]+St[lngthw]+ar_est
tline[lngthw,q] <- -rline[lngthw]
}else if(f[1]==1){ # If trend occurs in 1st yr of series
St[lngthw] <- rline[lngthw]+St[lngthw]
tline[lngthw,q] <- -rline[lngthw]
}
#print(c(YEARS[min(f)],YEARS[max(f)],slope$coefficients[[1]],slope$coefficients[[2]],otype))
outs[q,1:5] <- c(YEARS[min(f)],YEARS[max(f)],slope$coefficients[[1]],slope$coefficients[[2]],otype)
}
if(length(f)==0){ # Determine whether any outliers...
next_iter <- 0 # were detected on this iteration
}
if(q==iter & length(f)>0){
print('Need to run additional iterations to resolve series!')
}
if(fig==1){ # fig=1 if you want a figure as output
ymin=min(c(min(iter_i),min(St)))*1.1
ymax=max(c(max(iter_i),max(St)))*1.1
if(length(f)>0){ # Draw detrended regression line
if(min(f)>1){
windows()
plot(c(YEARS[min(f)],YEARS[max(f)]),c(ar_est,ar_est),type="l",lwd=2,lty=2,ylab="Transformed Width",
xlab="Year",col="grey60",ylim=c(ymin,ymax))
# lines(YEARS[f],St[f],lwd=2,lty=2,col="grey60")
}else{
windows()
plot(c(YEARS[1],YEARS[max(f)]),rep(0,2),type="l",lwd=2,lty=2,ylab="Transformed Width",xlab="Year",
ylim=c(ymin,ymax),col="grey60")
}
}else{ # Draw same line, but set to first year of series
windows()
plot(YEARS,rep(0,length(YEARS)),type="l",lwd=2,lty=2,col="grey60",ylab="Transformed Width",xlab="Year",
ylim=c(ymin,ymax))
}
lines(YEARS,iter_i,lwd=2)
lines(YEARS,St)
lines(YEARS,tline[,q],lwd=2,col="grey60")
legend("topleft",legend=c("AR Estimate","Initial Series before detrend","Standardized Series","Hugershoff Fit"),
lty=c(2,1,1,1),lwd=c(2,2,1,2),col=c("grey60","black","black","grey60"))
windows()
plot(YEARS,residuals,type="l",xlab="Year",ylab="Residuals",ylim=c(min(residuals)*1.1,max(residuals*1.1)),
main=paste(c("Iteration ",toString(q),": AR Residuals and running residual mean for ",
toString(length(f))," years"),collapse=""))
abline(h=0)
lines(YEARS,mres,col="grey60",lwd=2)
}
}
}
if(fig==1){ # fig=1 if you want a figure as output
# Shows final iterated series in transformed units
windows()
#transDt <- detrended - St # transformed outlier series
# print(paste(c('transDtarea = ',toString(sum(transDt[f]))),collapse=""))
# plot(YEARS,detrended,type="l")
# lines(YEARS,transDt,lwd=2)
plot(YEARS,detrended,type="l",lwd=2,main="Outlier Series",ylab="Transformed Width",xlab="Year",
ylim=c(min(St),max(detrended)))
lines(YEARS,St,lwd=2,col="grey60");
legend("topleft",legend=c('Age-detrended series','Standardized series'),col=c("black","grey60"),
lwd=c(2,2),bty="n")
# windows()
# plot(YEARS,St,lwd=2,main="Standardized Series",xlab="Years",ylab="Transformed Width")
}
# Shows final iterated series in original units (mm presumably)
# St <- St+St_pos
if(trendtype==1){ # negative exponential trend
Stt <- y_exp+St # Size trend & first detrending
# Stt <- Stt+transm # Add transformed mean back to series
if(optimal_pwr<=0.05){
Str <- 10^(Stt) # Size trend in original (raw) units
Atr <- 10^(y_exp) # Age trend in original (raw) units
}else{
Stt[Stt<=0] <- 0 # Set neg values to zero
Str <- (Stt)^(1/optimal_pwr)
Atr <- (y_exp)^(1/optimal_pwr)
}
}else if(trendtype==2 | trendtype==3){ # linear regression trend
Stt <- y_lin+St # Size trend & first detrending
# Stt <- Stt+transm # Add transformed mean back to series
if(optimal_pwr<=0.05){
Str <- 10^(Stt) # Size trend in original (raw) units
Atr <- 10^(y_lin) # Age trend in original (raw) units
}else{
Stt[Stt<=0] <- 0 # Set neg values to zero
Str <- (Stt)^(1/optimal_pwr)
Atr <- (y_lin)^(1/optimal_pwr)
}
}else{
print('Error in trend type designation')
}
# outs[,6] <- mean(prline,na.rm=TRUE) # mean prior to intervention removal
# outs[,7] <- mean(arline,na.rm=TRUE) # mean after intervention removal
raw[mss>0] <- NA # Remove estimated values of missing rings
Str[mss>0] <- NA # Remove estimated values of missing rings
Dtr <- raw-Str # Remove estimated values of missing rings
if(fig==1){ # fig=1 if you want a figure as output
windows()
plot(YEARS,Dtr,type="l",col="grey60",lwd=2,lty=2,xlab="Year",ylab="Ring Width (mm)",ylim=c(min(Dtr),max(raw)))
lines(YEARS,Str,lwd=2,lty=2)
lines(YEARS,raw,lwd=2)
#lines(YEARS,Atr)
legend("topleft",legend=c('Disturbance index','Standardized series','Original series'),
col=c("grey60","black","black"),lty=c(2,2,1),lwd=c(2,2,2),bty="n")
}
return(list(YEARS,transformed,detrended,St,Str,Dtr,Atr,age,outs))
} # End of function
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.