blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c24cda1c87a77191b759292fb18bf91969e7988f
|
b8fa00b408af080b5e25363c7fdde05e5d869be1
|
/Task5_0867117/Analytics5_7.r
|
a7899d252a2f5b1454d4da05ca01b5f7887d8ef5
|
[] |
no_license
|
anurag199/r-studio
|
bc89f0c18a8d44164cb4ede8df79321ea965bc77
|
e42909505fbe709f476081be97f89cc945a2745d
|
refs/heads/master
| 2020-04-26T23:04:25.061780
| 2019-03-05T06:56:26
| 2019-03-05T06:56:26
| 173,891,322
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 132
|
r
|
Analytics5_7.r
|
library(tm)
library(SnowballC)
# creating dtm for cleanemail_corpus
email_dtm <- DocumentTermMatrix(cleanemail_corpus)
email_dtm
|
ebbe9de9274a0502ae3006ad979299f5fa6de5e4
|
5693d3497d8d4e61f5ffa0becca905a1074de569
|
/PRA/18-08-2020/ArimaXpm2.5.R
|
483cbc887b6278894ab257e2ae726918c622083f
|
[] |
no_license
|
souviksamanta95/R_Personal
|
65a6272253792839c3b44cfab3cf934227609ae8
|
56597d51b620a587e097c8b669d014a807481f39
|
refs/heads/master
| 2023-02-16T11:10:15.966371
| 2021-01-20T11:12:20
| 2021-01-20T11:12:20
| 260,390,124
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,147
|
r
|
ArimaXpm2.5.R
|
## Time Series ARIMA and MSARIMA Models in R: Box Jenkin's Methodology
rm(list=ls())
cat("\014")
dev.off()
install.packages("tseries")
install.packages("forecast")
install.packages("plm")
install.packages("Formula")
install.packages("tcltk")
install.packages("uroot")
install.packages("pdR")
install.packages("stats")
install.packages("TSA")
install.packages("readxl")
install.packages("aTSA")
install.packages("rms")
library(tseries)
library(forecast)
library(plm)
library(Formula)
library (usdm)
library(tcltk)
library(uroot)
library(pdR)
library(stats)
library(TSA)
library (aTSA)
library (rms)
options (scipen=99999)
data<-read.csv(file.choose(),header=TRUE)
df<-data
nrow(df)
ncol(df)
head(df)
View(df)
df_nohold<- df[1:4000,]; View(df_nohold);
df_hold<-df[4001:8000,]; View (df_hold);
#sum(is.null(df))
#sum(is.na(df))
#y1 <- ts(data.matrix(df$pm2.5), frequency=24)
#y1
tsdisplay(dt$pm2.5,lag = 50)
k<- trunc((length(dt$pm2.5)-1)^(1/3))
k
adf.test((dt$pm2.5), alternative="stationary", k=12)
#Trend Differencing
tsdisplay(diff(dt$pm2.5,1),lag=50)
k = trunc((length(diff((dt$pm2.5),1)-1)^(1/3)))
k
adf.test(diff((dt$pm2.5),1), alternative="stationary", k=12)
#Seasonal Differencing
tsdisplay(diff(dt$pm2.5,24),lag=24)
covariate_xmatrix <- matrix(c(df_nohold$Iws, df_nohold$DEWP, df_nohold$TEMP, df_nohold$PRES, df_nohold$Is, df_nohold$Ir),nrow=4000,ncol=6)
covariate_xmatrix_hold <-matrix(c(df_hold$Iws, df_hold$DEWP, df_hold$TEMP, df_hold$PRES, df_hold$Is, df_hold$Ir), nrow=4000,ncol=6)
covariate_xmatrix_hold
#Iws: Cumulated wind speed (m/s)
#DEWP: Dew Point
#TEMP: Temperature
#PRES: Pressure (hPa)
#Is: Cumulated hours of snow
#Ir: Cumulated hours of rain
##Multicollinearity Check amongst independent variables
vif(df_hold[c(7,8,9,11)])
##Estimation
fit_arimax<-Arima(df_nohold$pm2.5, order=c(1,0,0),seasonal = list(order = c(0,1,1), period = 24),xreg=covariate_xmatrix,method = "ML")
summary(fit_arimax)
windows()
tsdiag(fit_arimax)
#Forecast
f_hold<-forecast(fit_arimax,xreg = covariate_xmatrix_hold)
f_hold
plot(f_hold)
##MAPE
f_hold_MAPE<-((abs(df_hold$pm2.5-f$mean)/df_hold$pm2.5))*100
mean(f_hold_MAPE)
|
24af2af275e54fca1e43a21879d33e3796907880
|
b4d28b7cb7b25f05687a027e24d70b3c1116408b
|
/Resources/AlertDlogUtils.r
|
eade5c3d6c06d6c623364e5c6831ba1c443f2a2a
|
[] |
no_license
|
fruitsamples/Bitblitz
|
af494e92834306c0e5002da03da390fa4ff748b8
|
4708515a9fe06052a42df257025729a35cdf0ca3
|
refs/heads/master
| 2021-01-10T11:47:18.098116
| 2015-11-25T21:22:37
| 2015-11-25T21:22:37
| 46,887,726
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,149
|
r
|
AlertDlogUtils.r
|
/*======================================================================================*/
/* File: AlertDlogUtils.r */
/* */
/* By: George Delaney */
/* Mac CPU Software Quality */
/* Date: 5/14/90 */
/* */
/* Contents: */
/* All resource declarations needed to support the AlertDlogUtils library file. */
/*======================================================================================*/
#include "Types.r"
/*--------------------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------------------*/
/* Alerts */
/*--------------------------------------------------------------------------------------*/
/* Generic text string alert box */
resource 'ALRT' (5000) {{46, 106, 146, 421},5000,{
OK, visible, sound1,
OK, visible, sound1,
OK, visible, sound1,
OK, visible, sound1}};
resource 'DITL' (5000) {{
{ 75, 128, 95, 188}, Button {enabled,"OK"},
{ 5, 4, 71, 311}, StaticText {disabled,"^0"}
}};
|
e7bc7f154526ada715f009fb8d95e724dfe762d1
|
303ee8c30e03e6bf734e69e1e00f43fefaf3bda4
|
/heatmap.R
|
7244d0e9c069152651d9d2092ec6e8b804ff06c8
|
[] |
no_license
|
zt2730/Rplot
|
d2d57c331283d309dd8ae1d41425874ee432e291
|
a4979f63029b26912c43eb4d631e04c489ca7328
|
refs/heads/master
| 2021-01-01T03:33:35.002731
| 2016-05-24T21:37:27
| 2016-05-24T21:37:27
| 59,609,059
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 681
|
r
|
heatmap.R
|
x <- as.matrix(eurodist)
countries <- c("Greece", "Spain", "Belgium", "France", "France",
"Germany", "Denmark", "Switzerland", "Gibraltar", "Germany",
"Holland", "Portugal", "France", "Spain", "France", "Italy",
"Germany", "France", "Italy", "Sweden", "Austria")
f.countries <- factor(countries)
group.colors <- heat.colors(nlevels(f.countries))
country.color <- group.colors[as.integer(f.countries)]
group.colors <- gray(seq(0, nlevels(f.countries))/nlevels(f.countries))
country.color <- group.colors[as.integer(f.countries)]
heatmap(x,ColSideColors = country.color,
RowSideColors = country.color
)
|
2377d908f6c82be4c896dfc70099bd0d05b4414a
|
ab1985a8774796e33ff3aea3bab6ecf45b700101
|
/R/coordinate_conv.R
|
072c5619b01a2747f805fa740a5715e2631e23b7
|
[] |
no_license
|
dindiarto/coordinate_converter
|
161dc47639ce45d08087de94a8a360fbcfded673
|
26bea7490343a1522b3601caa58eec35aa7ed872
|
refs/heads/master
| 2020-07-16T14:57:38.842452
| 2019-09-02T08:35:46
| 2019-09-02T08:35:46
| 205,810,609
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,570
|
r
|
coordinate_conv.R
|
library(measurements)
library(stringr)
library(magrittr)
library(dplyr)
library(readr)
# Geographic coordinates converter
#
#' DMS to decimal degree
#'
#' @param df df is a dataframe with latitude and longitude columns in gms format
#' @param lat latitude in gms format
#' @param lon longitude in gms format
#'
#' @return
#' @export
#'
#' @examples
dms_to_dec_deg <- function(df, lon=lon,lat=lat) {
remove_separators <- function(x) {
stringr::str_remove_all(x, " ") %>%
stringr::str_replace_all('[°\'\"]', " ")
}
dms_dd<-function(x3) {
measurements::conv_unit(x3, from = 'deg_min_sec', to = 'dec_deg')
}
remove_cardinal<-function(x4){
gsub('.{1}$' , "", x4)
}
df %>%
dplyr::mutate(Latitude = lat, Longitude = lon) %>%
mutate_at(vars(Latitude, Longitude), remove_separators) %>% #remove whitespaces,'," ymbols
mutate(Hem_Lat = str_sub(Latitude, start = -1),Hem_Lon = str_sub(Longitude, start = -1)) %>% # extract the latest character ~ N/S and E/W
mutate(Hem_Lat = case_when(Hem_Lat == "N" ~ 1 , Hem_Lat == "S" ~ -1),Hem_Lon = case_when(Hem_Lon == "E" ~ 1 , Hem_Lon == "W" ~ -1)) %>% # determine N/S and E/W and convert it to 1 or -1
mutate_at(vars(Latitude, Longitude), remove_cardinal)%>% #remove latest character ~ N/S and E/W
mutate_at(vars(Latitude, Longitude), dms_dd) %>% # convert dms to dec deg
mutate( Latitude = as.numeric(Latitude) * Hem_Lat, Longitude = as.numeric(Longitude) * Hem_Lon) %>% # assign N/S and E/W to dec deg format
dplyr::select(-Hem_Lat,-Hem_Lon)
}
|
a0ddc23ea1df8678eb297f77a5ca8841f636cabe
|
9d3e3c3950c4101bc863a90e69606d7c7d03a4e9
|
/Lagoon/999_laptop/51_analysis_plots/three_in_one/0_AW_Precip_3_in_1.R
|
c6230f2c28a3fc0b293620a277cbdb664107da12
|
[
"MIT"
] |
permissive
|
HNoorazar/Ag
|
ca6eb5a72ac7ea74e4fe982e70e148d5ad6c6fee
|
24fea71e9740de7eb01782fa102ad79491257b58
|
refs/heads/main
| 2023-09-03T18:14:12.241300
| 2023-08-23T00:03:40
| 2023-08-23T00:03:40
| 146,382,473
| 3
| 6
| null | 2019-09-23T16:45:37
| 2018-08-28T02:44:37
|
R
|
UTF-8
|
R
| false
| false
| 9,135
|
r
|
0_AW_Precip_3_in_1.R
|
rm(list=ls())
library(lubridate)
library(ggpubr)
library(purrr)
library(tidyverse)
library(data.table)
library(dplyr)
library(ggplot2)
options(digit=9)
options(digits=9)
source_path_1 = "/Users/hn/Documents/GitHub/Ag/Lagoon/core_lagoon.R"
source_path_2 = "/Users/hn/Documents/GitHub/Ag/Lagoon/core_plot_lagoon.R"
source(source_path_1)
source(source_path_2)
############################################################################
base <- "/Users/hn/Desktop/Desktop/Ag/check_point/lagoon/"
data_base <- paste0(base, "rain_snow_fractions/")
AV_fileNs <- c("annual_fracs", "wtr_yr_fracs")
timeP_ty_middN <- c("ann", "wtr_yr")
timeP_ty <- 1
##########################################
#
# unbias diff data directory
#
diff_dir <- paste0(base, "precip/02_med_diff_med_no_bias/")
for (timeP_ty in 1:2){ # annual or wtr_yr?
###############################################################
# set up title stuff
#
if (timeP_ty_middN[timeP_ty]== "ann"){
title_time <- "calendar year"
} else if (timeP_ty_middN[timeP_ty]== "wtr_yr"){
title_time <- "water year"
}
AV_y_lab <- "cum. precip. (mm)"
AV_title <- paste0("ann. precip.", " (", title_time, ")")
AV_tg_col <- "annual_cum_precip"
###############################################################
##################################################################################
AVs <- readRDS(paste0(data_base, AV_fileNs[timeP_ty], ".rds")) %>% data.table()
unbias_diff <- readRDS(paste0(diff_dir, "detail_med_diff_med_",
timeP_ty_middN[timeP_ty], "_precip.rds")) %>%
data.table()
AVs <- subset(AVs, select = c("location", "cluster", "year", "time_period",
"model", "emission",
"annual_cum_precip", "rain_fraction", "snow_fraction"))
AVs <- remove_observed(AVs)
AVs <- remove_current_timeP(AVs) # remove 2006-2025
AVs <- convert_5_numeric_clusts_to_alphabet(data_tb = AVs)
unbias_diff <- remove_observed(unbias_diff)
unbias_diff <- remove_current_timeP(unbias_diff) # remove 2006-2025
unbias_diff <- convert_5_numeric_clusts_to_alphabet(data_tb=unbias_diff)
AVs_45 <- AVs %>% filter(emission=="RCP 4.5") %>% data.table()
AVs_85 <- AVs %>% filter(emission=="RCP 8.5") %>% data.table(); rm(AVs)
unbias_diff_45 <- unbias_diff %>% filter(emission=="RCP 4.5") %>% data.table()
unbias_diff_85 <- unbias_diff %>% filter(emission=="RCP 8.5") %>% data.table()
rm(unbias_diff)
##################################
#####
##### AVs plots
quans_85 <- find_quantiles(AVs_85, tgt_col= AV_tg_col, time_type="annual")
quans_45 <- find_quantiles(AVs_45, tgt_col= AV_tg_col, time_type="annual")
AV_box_85 <- ann_wtrYr_chunk_cum_box_cluster_x(dt = AVs_85, y_lab = AV_y_lab,
tgt_col = AV_tg_col) +
ggtitle(AV_title) +
coord_cartesian(ylim = c(quans_85[1], quans_85[2]))
AV_box_45 <- ann_wtrYr_chunk_cum_box_cluster_x(dt = AVs_45, y_lab = AV_y_lab,
tgt_col = AV_tg_col) +
ggtitle(AV_title) +
coord_cartesian(ylim=c(quans_45[1], quans_45[2]))
###################################
#####
##### fraction plots
#####
###################################
box_title <- paste0("fraction of precip. fell as rain", " (", title_time, ")")
quans_85 <- 100 * find_quantiles(AVs_85, tgt_col= "rain_fraction", time_type="annual")
quans_45 <- 100 * find_quantiles(AVs_45, tgt_col= "rain_fraction", time_type="annual")
rain_frac_85 <- annual_fraction(data_tb = AVs_85,
y_lab = "rain fraction (%)",
tgt_col="rain_fraction") +
ggtitle(box_title) +
coord_cartesian(ylim = c(max(-2, quans_85[1]), min(quans_85[2], 110)))
######
rain_frac_45 <- annual_fraction(data_tb = AVs_45,
y_lab = "rain fraction (%)",
tgt_col="rain_fraction") +
ggtitle(box_title) +
coord_cartesian(ylim = c(max(-2, quans_45[1]), min(quans_45[2], 110)))
##################################################################################
###################################
#####
##### difference plots
#####
###################################
box_title <- "percentage differences between future time periods and historical"
quans_85 <- find_quantiles(unbias_diff_85, tgt_col= "perc_diff", time_type="annual")
quans_45 <- find_quantiles(unbias_diff_45, tgt_col= "perc_diff", time_type="annual")
unbias_perc_diff_85 <- ann_wtrYr_chunk_cum_box_cluster_x(dt = unbias_diff_85,
y_lab = "differences (%)",
tgt_col = "perc_diff",
ttl = box_title,
subttl = box_subtitle) +
ggtitle(box_title) +
coord_cartesian(ylim = c(quans_85[1], quans_85[2]))
unbias_perc_diff_45 <- ann_wtrYr_chunk_cum_box_cluster_x(dt = unbias_diff_45,
y_lab = "differences (%)",
tgt_col = "perc_diff",
ttl = box_title,
subttl = box_subtitle) +
ggtitle(box_title) +
coord_cartesian(ylim = c(quans_45[1], quans_45[2]))
###################################
#####
##### save plots
#####
###################################
#
# 3 in 1
#
plot_dir <- paste0(data_base, "narrowed_rain_snow_fractions/",
timeP_ty_middN[timeP_ty], "/3_in_1/")
if (dir.exists(plot_dir) == F) {dir.create(path = plot_dir, recursive = T)}
print (plot_dir)
rain_45 <- ggarrange(plotlist = list(AV_box_45,
unbias_perc_diff_45,
rain_frac_45),
ncol = 1, nrow = 3,
common.legend = TRUE, legend="bottom")
rain_85 <- ggarrange(plotlist = list(AV_box_85,
unbias_perc_diff_85,
rain_frac_85),
ncol = 1, nrow = 3,
common.legend = TRUE, legend="bottom")
ggsave(filename = paste0(timeP_ty_middN[timeP_ty], "_rain_45.png"),
plot = rain_45, width=5.5, height=5, units = "in",
dpi=400, device = "png", path = plot_dir)
ggsave(filename = paste0(timeP_ty_middN[timeP_ty], "_rain_85.png"),
plot = rain_85, width=5.5, height=5, units = "in",
dpi=400, device = "png", path = plot_dir)
#################################
#
# just plots
#
#
# just rain fractions
#
just_frac_dir <- paste0(data_base, "narrowed_rain_snow_fractions/",
timeP_ty_middN[timeP_ty], "/just_frac/")
if (dir.exists(just_frac_dir) == F) {
dir.create(path = just_frac_dir, recursive = T)}
print (just_frac_dir)
ggsave(filename = paste0(timeP_ty_middN[timeP_ty], "_rain_45.png"),
plot = rain_frac_45, width=5.5, height=1.5, units = "in",
dpi=400, device = "png", path = just_frac_dir)
ggsave(filename = paste0(timeP_ty_middN[timeP_ty], "_rain_85.png"),
plot = rain_frac_85, width=5.5, height=1.5, units = "in",
dpi=400, device = "png", path = just_frac_dir)
#
# just rain AVs
#
just_AVs_dir <- paste0(data_base, "narrowed_rain_snow_fractions/",
timeP_ty_middN[timeP_ty], "/just_AVs/")
if (dir.exists(just_AVs_dir) == F) {
dir.create(path = just_AVs_dir, recursive = T)}
print (just_AVs_dir)
ggsave(filename = paste0(timeP_ty_middN[timeP_ty], "_rain_45.png"),
plot = AV_box_45, width=5.5, height=1.5, units = "in",
dpi=400, device = "png", path = just_AVs_dir)
ggsave(filename = paste0(timeP_ty_middN[timeP_ty], "_rain_85.png"),
plot = AV_box_85, width=5.5, height=1.5, units = "in",
dpi=400, device = "png", path = just_AVs_dir)
#
# just unbiass diff
#
just_diff_dir <- paste0(data_base, "narrowed_rain_snow_fractions/",
timeP_ty_middN[timeP_ty], "/just_unbiass_diff/")
if (dir.exists(just_diff_dir) == F) {
dir.create(path = just_diff_dir, recursive = T)}
print (just_diff_dir)
ggsave(filename = paste0(timeP_ty_middN[timeP_ty], "_rain_45.png"),
plot = unbias_perc_diff_45, width=5.5, height=1.5, units = "in",
dpi=400, device = "png", path = just_diff_dir)
ggsave(filename = paste0(timeP_ty_middN[timeP_ty], "_rain_85.png"),
plot = unbias_perc_diff_85, width=5.5, height=1.5, units = "in",
dpi=400, device = "png", path = just_diff_dir)
}
|
d38bcc79cb5d8c641300479cf6cf76040466dbff
|
107f84efa479feb92f1a0b8ab2ff2193ff2d38ba
|
/man/timer.Rd
|
cd0db1b54a00520794ab71640de9c7837f98a2b3
|
[] |
no_license
|
data-steve/holstr
|
f7cc7a01a0686fa37c522c344d83d53adb786261
|
5a3176d7f213ffee1897a7d11ec38044f2e7af44
|
refs/heads/master
| 2021-07-16T14:32:54.556193
| 2016-09-16T19:17:22
| 2016-09-16T19:17:22
| 57,908,552
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 871
|
rd
|
timer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/timer.R
\name{tic}
\alias{tic}
\alias{toc}
\title{Crude Timing}
\usage{
tic(pos = 1, envir = as.environment(pos))
toc(pos = 1, envir = as.environment(pos))
}
\arguments{
\item{pos}{Where to do the assignment. By default, assigns into the global
environment.}
\item{envir}{The \code{\link[base]{environment}} to use.}
}
\value{
\code{tic} makes a reference to the current time in the global
environment. \code{toc} compares elapsed time and returns the difference.
}
\description{
Crude timings based on \code{\link[base]{Sys.time}}. \pkg{microbenchmark}
provides a more accurate result.
}
\note{
The \pkg{data.table} package formerly had these functions.
}
\examples{
tic();toc()
tic()
Sys.sleep(3)
toc()
}
\seealso{
\code{\link[base]{assign}},
\code{\link[base]{get}}
}
\keyword{timer}
|
87e9c31b200eabe9e96500866b9ad0646be832a4
|
18ff0dc93ac08d5e6a7e3f774d659012092a9a8b
|
/render_site.R
|
e372f84455e97b1ff4bfa7cc1990a222a8405112
|
[] |
no_license
|
jpowerj/tad-workshop
|
b492aa726c9ef064e8fb0c7bdd323acc41f7b659
|
30f900a217f76d6c167b747b5d36e8d8c2413b02
|
refs/heads/master
| 2020-04-23T05:18:49.342473
| 2019-02-20T22:41:42
| 2019-02-20T22:41:42
| 150,912,761
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 231
|
r
|
render_site.R
|
# Used to be cool. Now it just runs render_site()
library(rmarkdown)
# Delete tad_old
#file.remove("../../git_io/tad_old/")
# Rename the old version tad_old
#file.rename("../../git_io/tad/","../../git_io/tad_old/")
render_site()
|
766fc46ae28d868ffba418aa374186d3f0025a0d
|
e95c8b8e2b5aa04d24030818efe28cc92dc0937e
|
/R/covyw.R
|
7790522e56fbf8abd3f5c77adfc0f48b272f4326
|
[] |
no_license
|
cran/touchard
|
42ad60a56f704c6fed7a221ed644987a5642eaeb
|
4c7b4f62f616c75052ce7296ce32e4281ab663d4
|
refs/heads/master
| 2020-03-27T04:05:37.158323
| 2019-05-31T11:40:03
| 2019-05-31T11:40:03
| 145,911,224
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 516
|
r
|
covyw.R
|
### E[ Y x W ] and COV(Y,W), W=log(Y+1)
cov_yw <- function(lambda,delta,N=100){
c0 <- function(lambda, delta, N){
B = 0
j=1
B[j] = lambda * 2^delta * log(2)
eps = sqrt(.Machine$double.eps)
while(B[j] > eps){
B[j+1] = (lambda/j) * (log(j+2)/log(j+1)) * ((j+2)/(j+1))^delta * B[j]
j=j+1
}
return(sum(B)/tau(lambda,delta,N))
}
EYW <- sapply(lambda, c0, delta=delta, N=N)
EY <- mu(lambda, delta, N=N)
EW <- kapa(lambda, delta, N=N)
val <- EYW - EY*EW
return(val)
}
|
ecedb512a776c3114eea26e9ca091385e0add719
|
c4c1c6cf56b94961369e1f8f665580b09aa38b8e
|
/ScrapingWebsitesData.R
|
92fd2e43d06145edde00b4e103f5876d6e5c03bc
|
[] |
no_license
|
tadevosyana/Home
|
5561b79b11a8fa18c646140d9ef5b146cb19b57e
|
0105fb38d96889ea26099061f3097c0b996f3247
|
refs/heads/main
| 2023-06-18T16:10:31.055972
| 2021-07-15T14:06:46
| 2021-07-15T14:06:46
| 386,302,187
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,258
|
r
|
ScrapingWebsitesData.R
|
#Cleans everything (any previously datasets, functions created,..)
rm(list = ls())
pkgs <- c('lubridate', "rvest", "readr","sjmisc","dplyr","stringr","qdapRegex","gsubfn","tidyverse", "httr", "jsonlite","readxl","yesno")
for (i in pkgs) {
if (!i %in% installed.packages()) {
install.packages(i)
}
library(i, character.only = TRUE)
}
folder<-dirname(rstudioapi::getSourceEditorContext()$path)
if (str_contains(folder,'/WebScraping')){
folder<-dirname(folder)
} else {
folder<-dirname(dirname(folder)) # move two dirs above HIM
#condition if executed from CntrImport.R, move it one more time up
if(str_contains(folder,"/HIM")){
folder<-dirname(folder)
}
}
folder<-paste0(folder,'/OutWebsitesData')
if(!dir.exists(paste0(folder))){
dir.create(paste0(folder)) #create folder for raw files
}
functions_folder<-paste0(dirname(rstudioapi::getSourceEditorContext()$path),'/DataCountries')
#source other functions
functions <- list.files(functions_folder, pattern = ".R$", recursive = TRUE)
for (i in 1:length(functions)) {
source(paste0(functions_folder,'/',functions[i]))
}
#Temporary on hold
data_yesterday<-read.csv(paste0(dirname(folder),'/',format(Sys.Date()-1,'%y%m%d'),'_qry_COVID_running_cases_country_date.CSV')) %>%
mutate(DateReport1=as.Date(parse_date_time(DateReport1, c("dmy","ymd","mdy")))) %>%
filter(DateReport1==Sys.Date()-1) %>%
mutate(ADM0NAME=str_to_title(ADM0NAME))
#Switch true= verified countries, false= all countries
exp_ready = TRUE
if(exp_ready == TRUE){
#Important to keep same spelling as in function name (one country = one word)
listcountries<-c('Georgia',
'Russia',
'Serbia',
'Slovakia',
'Kyrgyzstan',
'Ukraine',
'France',
'Denmark',
'Moldova',
'Romania',
'Portugal',
'North Macedonia',
'Croatia',
'Faroe',
'Gibraltar'
)
}else{
#Important to keep same spelling as in function name (one country = one word)
listcountries<-gsub('\\.R','',functions)
}
#General function that gets data but handles errors if needed
getdata <- function(ctr) {
out <- tryCatch(
{ print(paste('Getting data for ',ctr))
suppressWarnings(get(paste0('Data_',ctr))())
},
error=function(cond) {
message(paste("Could not read data for ",ctr,'. Please contact Celine or Roman and take data manually'))
return(data.frame(ADM0NAME=ctr,
TotalCases=NA,
NewCases=NA,
TotalDeaths=NA,
NewDeaths=NA,
NewTests=NA,
TotalTests=NA,
Hosp_occ=NA,
ICU_occ=NA))
},
finally={
print('Over')
}
)
return(out)
}
web_scrap<-data.frame()
for (ctry in listcountries){
web_scrap_<-getdata(ctry)
web_scrap<-bind_rows(web_scrap,web_scrap_)
}
#read reference Cntry_web_ref.csv
cntry_ref <- read.csv(paste0(dirname(rstudioapi::getSourceEditorContext()$path),'/Cntry_web_ref.csv'))
cntry_ref <- cntry_ref %>%
mutate(country=str_to_title(ADM0NAME))
#%>% mutate(country=if_else(country=="Czech Republic","CzechRepublic",country))
web_scrap_epi<- cntry_ref %>%
left_join(web_scrap,by=c('country'='ADM0NAME')) %>%
mutate(DateReport1=Sys.Date()) %>%
mutate(ImportFlag=if_else(!is.na(TotalCases) | !is.na(TotalDeaths) ,3,1))%>%
mutate(CasesTotal=if_else(!is.na(TotalCases),as.character(TotalCases),"")) %>%
mutate(DeathsTotal=if_else(!is.na(TotalDeaths),as.character(TotalDeaths),""))%>%
select(DateReport1,country,CasesTotal,DeathsTotal,ImportFlag,NewCases,NewDeaths)
cntry_ref_severity <- read.csv(paste0(dirname(rstudioapi::getSourceEditorContext()$path),'/Cntry_web_ref_severity.csv'),fileEncoding="UTF-8-BOM")
cntry_ref_severity <- cntry_ref_severity %>%
mutate(country=str_to_title(ADM0NAME))
web_scrap_severity<- cntry_ref_severity %>%
left_join(web_scrap,by=c('country'='ADM0NAME')) %>%
mutate(DateReport1=Sys.Date()) %>%
select(country,DateReport1,NewTests,Hosp_occ,ICU_occ) %>%
replace(is.na(.), '')
ComparingData<-function(ctr,Variable){
data_yesterday_ctr<-data_yesterday %>%
filter(ADM0NAME==ctr)
data_today_ctr<-web_scrap %>%
filter(ADM0NAME==ctr)
Variable<-all_of(Variable)
Variable_Today<-(data_today_ctr %>% select(Variable))[1,1]
Variable_Yesterday<-(data_yesterday_ctr %>% select(Variable))[1,1]
if(!is.na(Variable_Today)){
if(Variable_Today==Variable_Yesterday)
{message(paste0(ctr,': ',Variable,' is equal to yesterday. Website probably not yet updated. Please update manually later'))}
if(Variable=='TotalDeaths' | Variable=='TotalCases'){
if(Variable_Today<Variable_Yesterday)
{message(paste0(ctr,': ',Variable,' is lower than yesterday. Not normal. Please investigate'))}
}
}
}
QuickValidation<-function(ctr){
TotalCasesToday<-(web_scrap %>% filter(ADM0NAME==ctr) %>% select(TotalCases))[1,1]
NewCasesToday<-(web_scrap %>%
filter(ADM0NAME==ctr) %>% select(NewCases))[1,1]
TotalCasesYesterday<-(data_yesterday %>% filter(ADM0NAME==ctr) %>% select(TotalCases))[1,1]
if(!is.na(NewCasesToday) & !is.na(TotalCasesToday) & (TotalCasesToday!=TotalCasesYesterday)){
if(TotalCasesToday!=TotalCasesYesterday+NewCasesToday){
message(paste0(ctr,': Total Cases (',TotalCasesToday,') for today is not equal to Total Cases Yesterday (',
TotalCasesYesterday,') + New Cases Today (',NewCasesToday,'). Please investigate')) }
}
TotalDeathsToday<-(web_scrap %>% filter(ADM0NAME==ctr) %>% select(TotalDeaths))[1,1]
NewDeathsToday<-(web_scrap %>%
filter(ADM0NAME==ctr) %>% select(NewDeaths))[1,1]
TotalDeathsYesterday<-(data_yesterday %>% filter(ADM0NAME==ctr) %>% select(TotalDeaths))[1,1]
if(!is.na(NewDeathsToday)&!is.na(TotalDeathsToday) & (TotalDeathsToday!=TotalDeathsYesterday)){
if(TotalDeathsToday!=TotalDeathsYesterday+NewDeathsToday){
message(paste0(ctr,': Total Deaths (',TotalDeathsToday,') for today is not equal to Total Deaths Yesterday (',
TotalDeathsYesterday,') + New Deaths Today (',NewDeathsToday,'). Please investigate'))
}
}
}
#Uncomment to try this functionnality
listcountries_<-str_replace_all(
listcountries,
c("Russia" = "Russian Federation",
"CzechRepublic" = 'Czech Republic',
"Faroe" = 'Faroe Islands'))
for (ctr in listcountries_){
ComparingData(ctr,'NewCases')
ComparingData(ctr,'TotalCases')
ComparingData(ctr,'NewDeaths')
ComparingData(ctr,'TotalDeaths')
QuickValidation(ctr)
}
write.csv(web_scrap_epi,paste0(folder,'/WebScraping_Epi_',format(Sys.time(), "%Y-%m-%d"),'.csv'),row.names=FALSE)
write.csv(web_scrap_severity,paste0(folder,'/WebScraping_Severity_',format(Sys.time(), "%Y-%m-%d"),'.csv'),row.names=FALSE)
|
a03d8e8d1bb24f62ab5a063e67d85ef915c29612
|
e823078b011f333dcd28cd43b2e523d963558359
|
/001_ingest.R
|
cab417add76672b77ecd064a0a9f7eb80079d493
|
[] |
no_license
|
EvanT33/baseball
|
e254178c54c22ebbab2e41ad3e1082b822e1270e
|
4d046799fb6701fb2e579aff3d10cc6a2d934c63
|
refs/heads/master
| 2020-03-27T05:18:56.786069
| 2018-08-23T21:38:26
| 2018-08-23T21:38:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,466
|
r
|
001_ingest.R
|
#--------------------------
# This script produces play-by-play data for a particular year and team. Later we want it to ingest data for
# all teams and all years iteratively.
# Download this folder: https://www.retrosheet.org/events/2017eve.zip
# in command prompt, navigate to 2017eve folder you downloaded using "cd" command etc, and type:):
# C:\Users\ethompson\Desktop\baseball\2017eve>bevent -y 2017 2017ANA.eva >2017.txt
# for example, for 2017 LAA play-by-play data
# then you can run this script
#--------------------------
# ingest data - currently only ingesting 2017 LA Angels home games
ANA2017 <- as.data.frame(read.csv("2017eve/2017ANA.txt", stringsAsFactors=TRUE, header = FALSE))
header <- as.data.frame(read.csv("2017eve/header.csv", stringsAsFactors=FALSE, header = FALSE))
# add header
names(ANA2017) <- header[,1]
# create runs variable which is sum of home and visitor teams' scores at each plate appearance
ANA2017$runs <- with(ANA2017, vis_score + home_score)
# create key for each half-inning of the season
ANA2017$half_inning <- with(ANA2017, paste(game_id, inning, batting_team))
# total runs scored during half-inning
ANA2017$runs_scored <- with(ANA2017, (batter_dest > 3) +
(runner_on_1st_dest > 3) +
(runner_on_2nd_dest > 3) +
(runner_on_3rd_dest > 3))
runs_scored_inning <- aggregate(ANA2017$runs_scored, list(half_inning = ANA2017$half_inning), sum)
# total runs scored at start of inning
runs_scored_start <- aggregate(ANA2017$runs, list(half_inning = ANA2017$half_inning), "[", 1)
# create max of runs in the half inning (starting runs plus runs scored)
max <- data.frame(half_inning = runs_scored_start$half_inning)
max$x <- runs_scored_inning$x + runs_scored_start$x
ANA2017 <- merge(ANA2017, max)
n <- ncol(ANA2017)
names(ANA2017)[n] <- "max_runs"
# runs scored in remainder of inning
ANA2017$runs_roi <- with(ANA2017, max_runs - runs)
# binary variable to indicate whether base is occupied or not
runner1 <- ifelse(as.character(ANA2017[ , "first_runner"]) == "", 0, 1)
runner2 <- ifelse(as.character(ANA2017[ , "second_runner"]) == "", 0, 1)
runner3 <- ifelse(as.character(ANA2017[ , "third_runner"]) == "", 0, 1)
# get.state() function creates something like 010 to indicate a runner on second base only or 111 for bases loaded
get.state <- function(runner1, runner2, runner3, outs){
runners <- paste(runner1, runner2, runner3, sep = "")
paste(runners, outs)
}
ANA2017$state <- get.state(runner1, runner2, runner3, ANA2017$outs)
# number of runners on base and number of outs after the play
n_runner1 <- with(ANA2017, as.numeric(runner_on_1st_dest == 1 | batter_dest== 1))
n_runner2 <- with(ANA2017, as.numeric(runner_on_1st_dest == 2 | runner_on_2nd_dest == 2 | batter_dest== 2))
n_runner3 <- with(ANA2017, as.numeric(runner_on_1st_dest == 3 | runner_on_2nd_dest == 3 | runner_on_3rd_dest == 3 | batter_dest== 3))
n_outs <- with(ANA2017, outs + outs_on_play)
ANA2017$new_state <- get.state(n_runner1, n_runner2, n_runner3, n_outs)
# filter for plays where the base-out state changed or runs scored on the play
ANA2017 <- subset(ANA2017, (state != new_state) | (runs_scored > 0))
library(plyr)
# filter for full innings only (not walk-offs etc) - introduces bias
data_outs <- ddply(ANA2017, .(half_inning), summarise, outs_inning = sum(outs_on_play))
ANA2017 <- merge(ANA2017, data_outs)
ANA2017c <- subset(ANA2017, outs_inning == 3)
ANA2017c <- subset(ANA2017c, batter_event_flag = TRUE)
library(car)
ANA2017c$new_state <- recode(ANA2017c$new_state,
"c('000 3', '100 3', '010 3', '001 3',
'110 3', '101 3', '011 3', '111 3') = '3'")
# expected runs scored in the remainder of the inning (runs expectancy)
runs <- with(ANA2017c, aggregate(runs_roi, list(state), mean))
# display in a matrix, but first sort runs dataframe by number of outs
runs$outs <- substr(runs$Group.1, 5, 5)
runs <- runs[order(runs$outs), ]
runs_out <- matrix(round(runs$x, 2), 8, 3)
# dimnames for run expectancy matrix
dimnames(runs_out)[[2]] <- c("0 outs", "1 out", "2 outs")
dimnames(runs_out)[[1]] <- c("000", "001", "010", "011", "100", "101", "110", "111")
# print run expectancy matrix
runs_out
# go back to dply (instead of plyr) to be safe
library(dplyr)
# compute transition probabilities
T.matrix <- with(ANA2017c, table(state, new_state))
P.matrix <- prop.table(T.matrix, 1)
P.matrix <- rbind(P.matrix, c(rep(0, 24), 1))
# (not required) examples of transition probabilities:
# the probability of moving from the "000 0" state to the
# "000 0" state is .04. In other words, the probability
# of a HR was 4%.
P1 <- round(P.matrix["000 0", ], 3)
data.frame(Prob = P1[P1 > 0])
# probability of inning ending is 60.9% when you have runner
# on second with 2 outs.
P2 <- round(P.matrix["010 2", ], 3)
data.frame(Prob = P2[P2 > 0])
# simulating the Markov chain. idea is that
# runs scored on play equals:
# (state B runners on base + state B outs + 1) - (state A runners on base + state A outs)
count_runners_outs <- function(s)
sum(as.numeric(strsplit(s, "")[[1]]), na.rm = TRUE)
runners_outs <- sapply(dimnames(T.matrix)[[1]], count_runners_outs)[c(-25, -26, -27, -28, -29, -30)]
R <- outer(runners_outs + 1, runners_outs, FUN = "-")
dimnames(R)[[1]] <- dimnames(T.matrix)[[1]][-25]
dimnames(R)[[2]] <- dimnames(T.matrix)[[1]][-25]
R <- cbind(R, rep(0, 24))
simulate_half_inning <- function(P, R, start = 1){
s <- start; path <- NULL; runs <- 0
while(s < 25){
s.new <- sample(1:25, 1, prob = P[s, ])
path <- c(path, s.new)
runs <- runs + R[s, s.new]
s <- s.new
}
runs
}
set.seed(555)
# simulate from 000 0
runs_simulation <- replicate(500, simulate_half_inning(T.matrix, R, start = 1))
# 6.4% of innings are >= 5 runs
sum(runs_simulation[runs_simulation >= 5]) / 500
# simulate half-innings with different starting states
runs_j <- function(j){
mean(replicate(500, simulate_half_inning(T.matrix, R, j)))
}
runs_expectancy <- sapply(1:24, runs_j)
runs_expectancy <- t(round(matrix(runs_expectancy, 3, 8), 2))
dimnames(runs_expectancy)[[2]] <- c("0 outs", "1 out", "2 outs")
dimnames(runs_expectancy)[[1]] <- c("000", "001", "010", "011", "100", "101", "110", "111")
# runs expectancy matrix after Markov simulation is similar
# to runs expectancy matrix built from historical data
runs_expectancy
|
d9452fe3770a9a5b6d7d708c03841d0928944daa
|
0a60c49b4647d110b63f9ac38219f1ca826be340
|
/tests/testthat/test-compat-cli.R
|
7a097e068e76180ea91a56cdf7871be08ef22394
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
pkq/rlang
|
2c7653d2575b4af006d26aadc894ede59de340cd
|
b501cf763097f796f90982dead5e646bdd9dd3f0
|
refs/heads/master
| 2021-08-06T11:03:50.944095
| 2021-06-08T15:33:59
| 2021-06-08T15:33:59
| 135,239,116
| 0
| 0
| null | 2018-05-29T03:58:27
| 2018-05-29T03:58:27
| null |
UTF-8
|
R
| false
| false
| 674
|
r
|
test-compat-cli.R
|
skip_if_not_installed("cli")
cli::test_that_cli(configs = c("plain", "ansi"), "can style strings with cli", {
expect_snapshot({
style_emph("foo")
style_strong("foo")
style_code("foo")
style_q("foo")
style_pkg("foo")
style_fn("foo")
style_arg("foo")
style_kbd("foo")
style_key("foo")
style_file("foo")
style_path("foo")
style_email("foo")
style_url("foo")
style_var("foo")
style_envvar("foo")
style_field("foo")
style_cls("foo")
style_cls(c("foo", "bar"))
})
})
cli::test_that_cli(configs = "plain", "styled strings may contain `{` syntax", {
expect_equal(style_emph("{foo {}"), "_{foo {}_")
})
|
6a4dfc575aed9c8b9700f09934d79e55070d1003
|
44b083e16634d27e18553082786c46749493cf4d
|
/R/panoids.R
|
66a2705e4ad5e5d4f5ef2934e0bcbdf530167996
|
[] |
no_license
|
DrRoad/streetview-1
|
069755458b17238c0aaf0d21de9c53c792797228
|
94af03ac8acd81b817897336240cab0239ce91c5
|
refs/heads/master
| 2020-04-08T06:32:35.540083
| 2017-08-18T16:31:15
| 2017-08-18T16:31:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,669
|
r
|
panoids.R
|
#' \code{streetview} package
#'
#' panoids
#'
#' See the README on
#'
#' @docType package
#' @name streetview
NULL
## quiets concerns of R CMD check re: the .'s that appear in pipelines
if(getRversion() >= "2.15.1") utils::globalVariables(c("."))
#' @title get_panoids
#'
#' @description Retrieve list of panoids and locations
#' @param path.root location of current dropbox root Example: ~/Dropbox/pkg.data/
#' @param query.addresses character vector of locations to find nearest panoids Example: 11804 Ridge Pkwy, Broomfield, CO 80021
#' @param n.echo Number of queries before backup and display
#' @keywords streetview google panoids
#' @export
#' @importFrom pkg.data.paths paths
#' @importFrom lubridate ymd year quarter
#' @importFrom data.table data.table rbindlist
#' @importFrom pkg.data.paths paths
#' @import utils
get_panoids <- function(path.root = NULL, query.addresses = NULL, n.echo =10){
pano_id <- NULL
# Initialize api and paths
api.key <- api.keys::import.key(str.api.name = 'google')
# Load package data from dropbox
get.panoids.paths <- pkg.data.paths::paths(path.root = path.root, str.pkg.name = 'streetview')
panoids.raw.path <- paste0(get.panoids.paths$pkg.root[[1]], '/raw/panoids.rdata')
panoids.clean.path <- paste0(get.panoids.paths$pkg.root[[1]], '/clean/panoids.rdata')
if (!is.null(query.addresses)){
load(panoids.clean.path)
} else {
# Load parcel data to build address list
if(file.exists(panoids.raw.path)){
load(panoids.raw.path)
} else {
panoids <- data.table::data.table(address = as.character())
}
# Subset to only include new addresses
query.addresses <- query.addresses[!(query.addresses %in% panoids$address)]
cat('Processing', length(query.addresses), 'addresses')
i <- 0
tic <- Sys.time()
n.echo <- 100
n.queries <- length(query.addresses)
for (query.address in query.addresses){
api <- list()
api[[1]] <- 'https://maps.googleapis.com/maps/api/streetview/metadata?size=600x300&location='
api[[2]] <- paste0(URLencode(query.address), '&')
api[[3]] <- paste0('key=', api.key)
api.url <- paste0(unlist(api), collapse = '')
panorama <- try(unlist(rjson::fromJSON(file=api.url)))
panoid <- try(data.table::data.table(address = query.address, t(panorama)))
panoids <- try(data.table::rbindlist(list(panoids, panoid), use.names = TRUE, fill=TRUE))
i <- i + 1;
if (i %% n.echo == 0){
toc <- Sys.time()
time.elapsed <- as.numeric(difftime(toc, tic, units='days'))
days.remain <- (n.queries-i)/n.echo * time.elapsed
cat(i, 'of', n.queries, '\n', 'Days remaining: ', days.remain, '\n')
print(panoid)
save(panoids, file=panoids.raw.path)
tic <- Sys.time()
}
}
# Collapse to unique panoids
# Drop panoids with no results
data.table::setkey(panoids, pano_id)
panoids <- unique(panoids, by=c('pano_id', 'date'))
# Remove NA rows
na.rows <- unlist(lapply(names(panoids), function(x) which(is.na(panoids[, x, with=FALSE]))))
panoids <- panoids[!(seq(1,nrow(panoids)) %in% na.rows )]
panoids$dt.date <- lubridate::ymd(paste0(panoids$date, '-01'))
panoids$dt.year <- lubridate::year(panoids$dt.date)
panoids$dt.quarter <- lubridate::quarter(panoids$dt.date)
panoids$address <- NULL
panoids$copyright <- NULL
panoids$date <- NULL
save(panoids, file=panoids.clean.path)
}
return(panoids)
}
#' @title plot_panoids
#'
#' @description Plot panoids by snapshot date
#' @param path.root location of current dropbox root Example: ~/Dropbox/pkg.data/
#' @param l.extent list l.extent$ lng.min, lng.max, lat.min,lat.max
#' @keywords streetview google panoids plot
#' @export
#' @importFrom pkg.data.paths paths
#' @importFrom ggthemes theme_map theme_tufte
#' @importFrom ggplot2 ggplot geom_point scale_color_brewer geom_bar scale_fill_brewer aes
plot_panoids <- function(path.root = NULL, l.extent= NULL){
dt.quarter <- NULL; dt.year <- NULL; location.lat <- NULL; location.lng <- NULL; pano_id <- NULL;
get.panoids.paths <- pkg.data.paths::paths(path.root = path.root, str.pkg.name = 'streetview')
panoids.clean.path <- paste0(get.panoids.paths$pkg.root[[1]], '/clean/panoids.rdata')
load(panoids.clean.path)
if (!is.null(l.extent)) panoids <- panoids[location.lat > l.extent$lat.min &
location.lat < l.extent$lat.max &
location.lng > l.extent$lng.min &
location.lng < l.extent$lng.max]
# Define data types for panoids
# Plot years
l.plot <- list()
l.plot$year <- ggplot2::ggplot(panoids, ggplot2::aes(location.lng, location.lat, colour=as.factor(dt.year))) +
ggplot2::geom_point(size=0.4, alpha=0.75) +
ggplot2::scale_color_brewer(palette = "Spectral", guide = "legend", name=NULL) +
ggthemes::theme_map(base_size = 12)
l.plot$quarter <- ggplot2::ggplot(panoids, ggplot2::aes(location.lng, location.lat,
colour=as.factor(dt.quarter))) +
ggplot2::geom_point(size=0.4, alpha=0.75) +
ggplot2::scale_color_brewer(palette = "RdYlBu", guide = "legend", name='Quarter', direction=-1) +
ggthemes::theme_map(base_size = 12)
l.plot$hist <- ggplot2::ggplot(panoids, ggplot2::aes(x=as.factor(dt.year), fill=as.factor(dt.year))) +
ggplot2::geom_bar() +
ggplot2::scale_fill_brewer(palette = 'Spectral', guide = FALSE) +
ggplot2::xlab(label= NULL) +
ggplot2::ylab(label= NULL) +
ggthemes::theme_tufte()
return(l.plot)
}
|
634ec282834bac4625f82d49586191ae2ba1682b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/event/examples/hgextval.Rd.R
|
2bfae7a6392a28479e349d3808dc5c79a2617ea8
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 187
|
r
|
hgextval.Rd.R
|
library(event)
### Name: hgextval
### Title: Log Hazard Function for an Extreme Value Process
### Aliases: hgextval
### Keywords: distribution
### ** Examples
hgextval(1, 2, 1, 2)
|
a09a0908a607559c469d166e02585f4d2831be89
|
8468b7047206beb226a84ad7d3a9b67612c9b7e0
|
/0.4.LST_ViewAngle_data.processing.R
|
32c09d1120774c68914a87427dd82d1564b8ef29
|
[
"Apache-2.0"
] |
permissive
|
GeoscienceAustralia/lst-gde
|
6038b19aac99f204a11f184b12b5e7e33fd6c9c5
|
bc94df556128f6abebb8a52bf219083d6057254a
|
refs/heads/master
| 2021-05-15T12:02:05.073656
| 2017-10-23T07:46:23
| 2017-10-23T07:46:23
| 106,478,479
| 0
| 2
| null | 2017-11-14T11:44:35
| 2017-10-10T22:31:40
|
R
|
UTF-8
|
R
| false
| false
| 1,624
|
r
|
0.4.LST_ViewAngle_data.processing.R
|
rm(list=ls())
setwd('C:/Local Data/')
X = read.csv('C:/Local Data/Code/CB_Date_Range.csv')
Date = strptime(X[,1],'%Y-%m-%d')
CURRENT = as.POSIXlt(Date)
CURRENT.YMD = format(CURRENT,'%Y%m%d')
#---------------------------------
#--- Define Area of Interest -----
#---------------------------------
### 250 m grid coordinates of interst
ROI_latlongs = read.csv('C:/Local Data/Code/ROI_BAWAP_CentreCoords_250m.csv')
lats = ROI_latlongs$latitude
lons = ROI_latlongs$longitude
### MODIS 1 km dataset coords
MODIS.NROWS = 3726
MODIS.NCOLS = 4790
### boundary latitude and longitudes of the MODIS region
# see http://remote-sensing.nci.org.au/u39/public/html/modis/lpdaac-mosaics-cmar/templates/
MODIS.LLATS = seq(-10.,by=-0.009394,length.out=MODIS.NROWS+1)
MODIS.LLONS = seq(110.,by= 0.009394,length.out=MODIS.NCOLS+1)
### pixels of interest from MODIS data
MODIS.ppp = (findInterval(-lats,-MODIS.LLATS) - 1)*MODIS.NCOLS + findInterval(lons,MODIS.LLONS)
fl = list.files('M:/PhD/ViewAngle',pattern='.envi',full.names=T)
seq = seq(1,length(fl),by=2)
fl = fl[seq]
#---------------------------------
### loop [temporal/date] function:
for (k in 1:length(fl)) {
#for (k in 21:31) {
#k = 21
###open file
phi = readBin(fl[k],'integer',size=1,n=3726*4790)[MODIS.ppp]
phi[phi<0] = NA # convert no data values to nulls
phi = phi-65 # apply offset
print(range(phi))
### write files
ofn = paste('C:/Local Data/F95/LST/MODIS_LST_1km_ViewAngle/',substr(fl[k],26,33),'_MODIS_LST_1km_ViewAngle.flt',sep='')
writeBin(as.vector(phi),ofn,size=4)
}
|
b422a135f0da2814f72741ba63ac695f24486e0f
|
c23c8ded85d46acfed25cadba5738380dc1d7ee7
|
/static/expr_med.r
|
0d9d3b93b13ab2032844247a836d15953dabe354
|
[
"MIT"
] |
permissive
|
raysinensis/tcgaAPP
|
c6a67b2abd2423cd45b01c7dff4841e8865a4941
|
96efc39a6d1e049579ff2765ba6450ca2ad886e0
|
refs/heads/master
| 2020-12-31T00:10:52.145391
| 2017-06-26T21:30:16
| 2017-06-26T21:30:16
| 86,538,455
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,745
|
r
|
expr_med.r
|
##adjust p values for multiple comparisons
mainpath=getwd()
filepath=paste(getwd(),'/static/OUT',sep='')
setwd(filepath)
mainpath=filepath
csvfiles <- list.files(path=mainpath, pattern="output.csv")
cancerlist=c()
for (singlecsv in csvfiles) {
fullfilename <-paste(mainpath,"/",singlecsv,sep="")
generesult <- read.csv(file=fullfilename, header=FALSE, sep=",")
pvalues <- generesult$V4
fdrs <- p.adjust(pvalues,method="BH",n=length(pvalues))
generesult$V7 <- formatC(fdrs,digits=5)
oldfilename <- gsub("-output.csv","",fullfilename)
newfilename <- paste(oldfilename,"-adj.csv",sep="")
write.table(generesult,"adj.csv",append=F, col.names = F, row.names = F, sep=",")
cancerlist<-(generesult$V2)
}
diff.Exp.Genes = as.character(generesult$V1[1])
df=data.frame(matrix(NA, nrow = length(cancerlist), ncol = 4))
df2=data.frame(matrix(NA, nrow = length(cancerlist)*2, ncol = 8))
i=1
for (cancer in cancerlist){
cancer.Type=as.character(cancer)
library(FirebrowseR)
cohorts = Metadata.Cohorts(format = "csv")
##looking for cancer type
cancertype.Pats = Samples.Clinical(cohort = cancer.Type, format="tsv")
##dim(cancertype.Pats)
##pulling all patients
all.Received = F
page.Counter = 1
page.size = 150
cancertype.Pats = list()
while(all.Received == F){
cancertype.Pats[[page.Counter]] = Samples.Clinical(format = "csv",
cohort = cancer.Type,
page_size = page.size,
page = page.Counter)
if(page.Counter > 1)
colnames(cancertype.Pats[[page.Counter]]) = colnames(cancertype.Pats[[page.Counter-1]])
if(nrow(cancertype.Pats[[page.Counter]]) < page.size){
all.Received = T
} else{
page.Counter = page.Counter + 1
}
}
cancertype.Pats = do.call(rbind, cancertype.Pats)
##dim(cancertype.Pats)
##pulling gene expression info
all.Found = F
page.Counter = 1
mRNA.Exp = list()
page.Size = 2000 # using a bigger page size is faster
while(all.Found == F){
mRNA.Exp[[page.Counter]] = Samples.mRNASeq(format = "tsv",
gene = diff.Exp.Genes,
cohort = cancer.Type,
page_size = page.Size,
page = page.Counter)
if(nrow(mRNA.Exp[[page.Counter]]) < page.Size){
all.Found = T
} else {
page.Counter = page.Counter + 1
}
}
mRNA.Exp = do.call(rbind, mRNA.Exp)
dim(mRNA.Exp)
##normal.Tissue.Pats = which(mRNA.Exp$sample_type[1] == "N")
##cancer.Tissue.Pats = which(mRNA.Exp$sample_type[1] == "T")
##patient.Barcodes = mRNA.Exp$tcga_participant_barcode[normal.Tissue.Pats]
##mRNA.Exp = mRNA.Exp[which(mRNA.Exp$tcga_participant_barcode %in% patient.Barcodes &
## mRNA.Exp$sample_type %in% c("NT", "TP")), ]
normal_med=median(mRNA.Exp$expression_log2[substr(mRNA.Exp$sample_type,start=1,stop=1)=="N"])
cancer_med=median(mRNA.Exp$expression_log2[substr(mRNA.Exp$sample_type,start=1,stop=1)=="T"])
if (is.na(normal_med)){
df$"X1"[i]=0
} else {
df$"X1"[i]=normal_med}
df$"X2"[i]=cancer_med
df$"X3"[i]=generesult$"V5"[i]
df$"X4"[i]=cancer.Type
normalq=quantile(mRNA.Exp$expression_log2[substr(mRNA.Exp$sample_type,start=1,stop=1)=="N"],names=FALSE)
cancerq=quantile(mRNA.Exp$expression_log2[substr(mRNA.Exp$sample_type,start=1,stop=1)=="T"],names=FALSE)
j=(2*i-1)
k=j+1
df2$"X1"[j]=normalq[1]
df2$"X2"[j]=normalq[2]
df2$"X3"[j]=normalq[3]
df2$"X4"[j]=normalq[4]
df2$"X5"[j]=normalq[5]
df2$"X1"[k]=cancerq[1]
df2$"X2"[k]=cancerq[2]
df2$"X3"[k]=cancerq[3]
df2$"X4"[k]=cancerq[4]
df2$"X5"[k]=cancerq[5]
df2$"X6"[k]=generesult$"V5"[i]
#df2$"X6"[2*i-1]=generesult$"V5"[i]
df2$"X7"[k]=cancer.Type
df2$"X7"[j]=cancer.Type
df2$"X8"[k]='tumor'
df2$"X8"[j]='normal'
i=i+1
}
widthcal=300*length(cancerlist)
colnames(df)=c("normal_median","tumor_median","surv_cutoff","tumor_type")
write.table(df,"level.csv",append=F, col.names = T, row.names = F, sep=",")
colnames(df2)=c("min","25%","median","75%","max","surv_cutoff","tumor_type","condition")
write.table(df2,"levels.csv",append=F, col.names = T, row.names = F, sep=",")
#library(ggplot2)
#library(reshape)
#df2 <- melt(df, id.vars = "tumor_type")
#colnames(df2)=c("tumor_type","condition","RSEM_log2")
#p<-ggplot(data=df2, aes(x=tumor_type, y=RSEM_log2, fill=condition)) +
# geom_bar(stat="identity", position=position_dodge())+
# geom_text(aes(label=sprintf("%0.2f",RSEM_log2)), vjust=1.5, color="black",
# position = position_dodge(1), size=2.5)+
# scale_fill_brewer(palette="Paired")+
# theme_classic(base_size = 10, base_family = "Helvetica")
#png("RSEM.png",width = widthcal, height = 600, res = 200)
#p
#dev.off()
|
7209cf34381df301c54732e0380a05d7a03e4a99
|
91ef1638c42ad7e3584b56ed32970b12c66b6c14
|
/cachematrix.R
|
8192614142877efcb4c2f85b6002cf339d09a649
|
[] |
no_license
|
tradewind/ProgrammingAssignment2
|
2055a0a784d931807907032cb9aba153d9fcee11
|
5a319317d5831dbeec6abb7cc56a41c94e43d570
|
refs/heads/master
| 2020-12-24T23:54:11.652204
| 2015-02-19T15:55:13
| 2015-02-19T15:55:13
| 31,003,348
| 0
| 0
| null | 2015-02-19T05:57:10
| 2015-02-19T05:57:10
| null |
UTF-8
|
R
| false
| false
| 1,093
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix takes a matrix as argument and has the following functions,
## get(): return the matrix
## set(): reset the matrix and nullify the previously cached inverse
## getInverse(): return the inverse of the matrix
## setInverse(): set the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv_x <- NULL
set <- function(y) {
x <<- y
inv_x <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv_x <<- inverse
getInverse <- function() inv_x
list (set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## cacheSolve checks first if there exists a cache of the invesrse of a matrix;
## if not, it would solve for the inverse of the matrix, cache it and then return it
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv_x <- x$getInverse()
if(!is.null(inv_x)) {
message("getting chached inverse matrix")
return(inv_x)
}
matrix <- x$get()
inv_x <- solve(matrix)
x$setInverse(inv_x)
inv_x
}
|
b70d450589c58e615c2b1d8ca0ed4081ae5dc6b5
|
fa4b331d6804c877eb62fc9566c3a652bccd08f1
|
/man/ConnectionAttributes.Rd
|
c2521d4d37c02fd98392d39aca802b50854f16f1
|
[
"MIT"
] |
permissive
|
r-dbi/odbc
|
0091c72371abfe95f6d2e5ea940ab06c134e2063
|
56eef6949b4c63468015cd533bd6539f952877cd
|
refs/heads/main
| 2023-08-31T15:19:29.556401
| 2023-08-04T00:49:58
| 2023-08-04T00:49:58
| 63,273,973
| 252
| 98
|
NOASSERTION
| 2023-09-04T18:48:42
| 2016-07-13T19:32:07
|
C++
|
UTF-8
|
R
| false
| true
| 833
|
rd
|
ConnectionAttributes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Connection.R
\docType{data}
\name{SUPPORTED_CONNECTION_ATTRIBUTES}
\alias{SUPPORTED_CONNECTION_ATTRIBUTES}
\alias{ConnectionAttributes}
\title{Supported Connection Attributes}
\description{
These (pre) connection attributes are supported and can be passed as
part of the \code{dbConnect} call in the named list \code{attributes} parameter:
}
\details{
\itemize{
\item \code{azure_token}: This should be a string scalar; in particular Azure Active
Directory authentication token. Only for use with Microsoft SQL Server and
with limited support away from the OEM Microsoft driver.
}
}
\examples{
\dontrun{
conn <- dbConnect(
odbc::odbc(),
dsn = "my_azure_mssql_db",
Encrypt = "yes",
attributes = list("azure_token" = .token)
}
}
\keyword{datasets}
|
ab85683657dc803737d7463fef14271b6849d102
|
9671527e351255e4b2a412d5c57118e369d82a68
|
/man/geom_rocci.Rd
|
3bae2d3767838e3cbdb22192ccf39d787c418269
|
[
"MIT"
] |
permissive
|
sachsmc/plotROC
|
1e80902e2f588698d3c620c654e9851c57e7848a
|
c8664ed9ba3677f5008a4b47b1e310e733ea2ea7
|
refs/heads/master
| 2022-06-20T17:32:29.866861
| 2022-05-27T09:34:16
| 2022-05-27T09:34:16
| 24,857,963
| 83
| 13
|
NOASSERTION
| 2020-03-16T15:18:49
| 2014-10-06T18:12:40
|
HTML
|
UTF-8
|
R
| false
| true
| 4,976
|
rd
|
geom_rocci.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_rocci.R
\docType{data}
\name{geom_rocci}
\alias{geom_rocci}
\alias{GeomRocci}
\title{Confidence regions for the ROC curve}
\format{
An object of class \code{GeomRocci} (inherits from \code{Geom}, \code{ggproto}, \code{gg}) of length 6.
}
\usage{
geom_rocci(
mapping = NULL,
data = NULL,
stat = "rocci",
ci.at = NULL,
sig.level = 0.05,
na.rm = TRUE,
alpha.box = 0.3,
labels = TRUE,
labelsize = 3.88,
labelround = 1,
position = "identity",
show.legend = NA,
inherit.aes = TRUE,
...
)
GeomRocci
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link[ggplot2:aes]{aes()}} or
\code{\link[ggplot2:aes_]{aes_()}}. If specified and \code{inherit.aes = TRUE} (the
default), it is combined with the default mapping at the top level of the
plot. You must supply \code{mapping} if there is no plot mapping.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link[ggplot2:ggplot]{ggplot()}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link[ggplot2:fortify]{fortify()}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame}, and
will be used as the layer data. A \code{function} can be created
from a \code{formula} (e.g. \code{~ head(.x, 10)}).}
\item{stat}{Use to override the default connection between
\code{geom_rocci} and \code{stat_rocci}.}
\item{ci.at}{Vector of values in the range of the biomarker where confidence regions will be displayed}
\item{sig.level}{Significance level for the confidence regions}
\item{na.rm}{If \code{FALSE}, the default, missing values are removed with
a warning. If \code{TRUE}, missing values are silently removed.}
\item{alpha.box}{Alpha level for the confidence regions}
\item{labels}{If TRUE, adds text labels for the cutoffs where the confidence regions are displayed}
\item{labelsize}{Size of cutoff text labels}
\item{labelround}{Integer, number of significant digits to round cutoff labels}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.
It can also be a named logical vector to finely select the aesthetics to
display.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link[ggplot2:borders]{borders()}}.}
\item{...}{Other arguments passed on to \code{\link[ggplot2:layer]{layer()}}. These are
often aesthetics, used to set an aesthetic to a fixed value, like
\code{colour = "red"} or \code{size = 3}. They may also be parameters
to the paired geom/stat.}
}
\description{
Display rectangular confidence regions for the empirical ROC curve.
}
\section{Aesthetics}{
\code{geom_rocci} understands the following aesthetics (required aesthetics
are in bold). \code{stat_rocci} automatically maps the estimates to the required aesthetics:
\itemize{
\item \strong{\code{x}} The FPF estimate
\item \strong{\code{y}} The TPF estimate
\item \strong{\code{xmin}} Lower confidence limit for the FPF
\item \strong{\code{xmax}} Upper confidence limit for the FPF
\item \strong{\code{ymin}} Lower confidence limit for the TPF
\item \strong{\code{ymax}} Upper confidence limit for the TPF
\item \code{alpha}
\item \code{color}
\item \code{fill}
\item \code{linetype}
\item \code{size}
}
}
\examples{
D.ex <- rbinom(50, 1, .5)
rocdata <- data.frame(D = c(D.ex, D.ex),
M = c(rnorm(50, mean = D.ex, sd = .4), rnorm(50, mean = D.ex, sd = 1)),
Z = c(rep("A", 50), rep("B", 50)))
ggplot(rocdata, aes(m = M, d = D)) + geom_roc() + geom_rocci()
ggplot(rocdata, aes(m = M, d = D, color = Z)) + geom_roc() + geom_rocci()
ggplot(rocdata, aes(m = M, d = D, color = Z)) + geom_roc() + geom_rocci(sig.level = .01)
ggplot(rocdata, aes(m = M, d = D)) + geom_roc(n.cuts = 0) +
geom_rocci(ci.at = quantile(rocdata$M, c(.1, .25, .5, .75, .9)))
ggplot(rocdata, aes(m = M, d = D, color = Z)) + geom_roc() + geom_rocci(linetype = 1)
}
\seealso{
See \code{\link{geom_roc}} for the empirical ROC curve, \code{\link{style_roc}} for
adding guidelines and labels, and \code{\link{direct_label}} for adding direct labels to the
curves. Also \link{export_interactive_roc} for creating interactive ROC curve plots for use in a web browser.
}
\keyword{datasets}
|
344e03b65194cea506f14ac08179433fd608f7b5
|
09e20b6464db79866dd68eb5eed1fadef97e8b35
|
/man/balance_errors.Rd
|
c1ad5da49a14cd9f5c77c5f52266499064ed5c50
|
[] |
no_license
|
tdienlin/td
|
8f18604d8b990ec59263c8c83c6dd32ba88f18cd
|
4e970efb92dd28367d7326a780322bf57baabca3
|
refs/heads/master
| 2021-08-17T21:20:33.222171
| 2021-06-09T15:25:14
| 2021-06-09T15:25:14
| 129,587,660
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 299
|
rd
|
balance_errors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/balance_errors.R
\name{balance_errors}
\alias{balance_errors}
\title{Balance Alpha and Beta Error}
\usage{
balance_errors(sesoi, n, one_tailed)
}
\description{
Find alpha-value for which 1-power and alpha are balanced.
}
|
da2a897166fe7c1d5ca8e22a123461c1a6f1c29f
|
21cd74bf56e9b101dc885e45cb05c219b8a5d211
|
/Exercise11 - [unsupervised learning] - K mean clustering.R
|
06ee4f4b707a913cbb3ad4f7042dcdbe21615b59
|
[] |
no_license
|
ariannalangwang/R-Exercises
|
242aeea98ec38adb1b0641d7278ffcb192e39b24
|
f1b8f79e9118d40487c3fe9a6a0e87337c01aaf2
|
refs/heads/master
| 2021-09-13T18:17:59.909694
| 2018-05-02T19:38:03
| 2018-05-02T19:38:03
| 106,767,559
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,537
|
r
|
Exercise11 - [unsupervised learning] - K mean clustering.R
|
####################################################
#### Unsupervised Learning - K Means Clustering ####
####################################################
# Methodology:
# K Means Clustering is an unsupervised learning algorithm that tries to cluster data based on their similarity.
# Unsupervised learning means that there is no outcome to be predicted, and the algorithm just tries to find patterns in the data.
# In k means clustering, we have to specify the number of clusters we want the data to be grouped into (k = ).
# The algorithm randomly assigns each observation to a cluster, and finds the centroid of each cluster.
# Then, the algorithm iterates through two steps:
# 1. Reassign data points to the cluster whose centroid is closest.
# 2. Calculate new centroid of each cluster.
# These two steps are repeated till the within cluster variation cannot be reduced any further.
# The within cluster variation is calculated as the sum of the euclidean distance between the data points
# and their respective cluster centroids.
####
## Get The Data
####
library(datasets)
head(iris)
####
## Exploratory Data Analysis (EDA)
####
library(tidyverse)
ggplot(iris, aes(Petal.Length, Petal.Width, color = Species)) + geom_point()
# Note that we can put color = Species inside the aes() directly instead of putting it inside geom_point()
####
## Build the Model
####
# Now let's attempt to use the K-means algorithm to cluster the data.
# Remember that this is an unsupervised learning algorithm,
# meaning we won't give any information on the correct labels.
help(kmeans)
# Perform k-means clustering on a data matrix.
# kmeans(x, centers, iter.max = 10, nstart = 1,
# algorithm = c("Hartigan-Wong", "Lloyd", "Forgy", "MacQueen"),
# trace=FALSE)
# Luckily we already know how many clusters to expect
irisCluster <- kmeans(iris[, 1:4], 3, nstart = 20)
irisCluster
# names we can call of the model:
names(irisCluster)
# "cluster" "centers" "totss" "withinss" "tot.withinss"
# "betweenss" "size" "iter" "ifault"
irisCluster$cluster
# A vector of integers (from 1:k) indicating the cluster to which each point is allocated.
# confusion table
table(iris$Species, irisCluster$cluster)
####
## Cluster Visualizations
####
# Draws a 2-dimensional “clusplot” on the current graphics device.
library(cluster)
clusplot(iris, irisCluster$cluster, color=TRUE, shade=TRUE, labels=0, lines=0)
# read this documentation.
help(clusplot)
|
82a6a910081ad2b7820ed41f04af23fedb97a034
|
2d47450c41c23f6d008bfca5bf08d3161bb13491
|
/vignettes/tutorial.R
|
76b5afa8db3803da7a1066de5be0859bb1f3bbc7
|
[] |
no_license
|
khaled-alshamaa/brapi
|
2c14727d65fc82a77d243bdc40c10b67955a04d5
|
5f2a5caa48d72e2412ead128b9143cc1882a060c
|
refs/heads/master
| 2022-03-21T20:19:07.470329
| 2019-10-16T15:51:00
| 2019-10-16T15:51:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,190
|
r
|
tutorial.R
|
## ---- message=TRUE, warning=TRUE-----------------------------------------
library(brapi)
library(magrittr)
white_list <- ba_db()
# print names of databases from whitelist
white_list
sp_base <- ba_db()$sweetpotatobase
# print summary of sp_base object
sp_base
## ------------------------------------------------------------------------
ba_show_info(TRUE)
## ------------------------------------------------------------------------
ba_calls(sp_base)
ba_show_info(FALSE)
## ------------------------------------------------------------------------
ba_calls(con = sp_base, rclass = "data.frame")
## ------------------------------------------------------------------------
ba_programs(sp_base, rclass = "data.frame")
## ------------------------------------------------------------------------
ba_studies_search(sp_base, programDbId = "140")
## ---- message=FALSE, warning=FALSE---------------------------------------
# Currently not working!!!
#dt = ba_studies_table(sp_base,
# studyDbId = "151")
## ---- echo=FALSE---------------------------------------------------------
#library(DT)
#datatable(
# dt,
# options=list(pageLength = 5, scrollX = TRUE)
# )
|
6280ca7a050b7aab8551243d90b69261a9acba1a
|
10e6d3b9a993e1b861559bb9a9527af842296d15
|
/r/day04.R
|
544c704a5f914aef52273e9ae64b331b2c7fe5b4
|
[] |
no_license
|
mayrop/adventofcode
|
f968c1716d9df16f7c9e9177d6c08344aecc0feb
|
0b9d38440111f9cc8f2b37d7db8d5949c966d261
|
refs/heads/master
| 2023-02-08T23:48:45.366892
| 2021-01-04T02:17:07
| 2021-01-04T02:17:07
| 318,456,771
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,873
|
r
|
day04.R
|
library(tidyverse)
################################################################
# Read Data
input.prod <- read_file("../data/day04/prod.txt")
input.test <- read_file("../data/day04/dev.txt")
input <- input.prod
input <- input.test
# Transform Data
df <- strsplit(input, "\n\n")[[1]] %>%
as_tibble() %>%
dplyr::mutate(value = gsub("\\n", " ", value)) %>%
tidyr::extract(value, "byr", regex="byr:([^ ]+)", remove=FALSE) %>%
tidyr::extract(value, "iyr", regex="iyr:([^ ]+)", remove=FALSE) %>%
tidyr::extract(value, "eyr", regex="eyr:([^ ]+)", remove=FALSE) %>%
tidyr::extract(value, "hgt", regex="hgt:([^ ]+)", remove=FALSE) %>%
tidyr::extract(value, "hcl", regex="hcl:([^ ]+)", remove=FALSE) %>%
tidyr::extract(value, "ecl", regex="ecl:([^ ]+)", remove=FALSE) %>%
tidyr::extract(value, "pid", regex="pid:([^ ]+)", remove=FALSE)
################################################################
# Part 1
missing_values <- apply(df, 1, function(row) {
sum(is.na(row))
})
sum(missing_values == 0)
################################################################
# Part 2
df %>%
tidyr::extract(pid, "pid", regex="([0-9]+)", remove=FALSE) %>%
tidyr::extract(hgt, c("hgt_val", "hgt_measure"), regex="(\\d+)(cm|in)", remove=FALSE) %>%
tidyr::extract(hcl, "hcl_val", regex="#([a-f0-9]+)", remove=FALSE) %>%
dplyr::mutate(
has_hgt_valid = ifelse(hgt_measure == "cm",
hgt_val >= 150 & hgt_val <= 193,
hgt_val >= 59 & hgt_val <= 76),
) %>%
dplyr::filter(
byr >= 1920 & byr <= 2002,
iyr >= 2010 & iyr <= 2020,
eyr >= 2020 & eyr <= 2030,
has_hgt_valid,
nchar(hcl_val) == 6,
nchar(hcl) == 7,
ecl %in% c("amb", "blu", "brn", "gry", "grn", "hzl", "oth"),
nchar(pid) == 9,
) %>%
nrow()
################################################################
|
44baa36772c4bcd0b05597abacada0aba8aeb9a2
|
66242c594c285bdacfa37dd7cf8c7a351670bd2c
|
/rgujja14.r
|
26dd925263860a3bd77e4f7287231843335f7b6b
|
[] |
no_license
|
rithinrao/QuantManagement
|
11b47d09b95a0ac698a99e9d5f0d914c1611ee96
|
2167edd3d5c48a31237e5367dca66633c03632d9
|
refs/heads/master
| 2020-07-22T09:18:45.262141
| 2019-11-04T18:40:22
| 2019-11-04T18:40:22
| 207,147,593
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 580
|
r
|
rgujja14.r
|
getwd()
library(lpSolveAPI)
lprec<-make.lp(0,8)
lp.control(lprec,sense='min')
#objective function
set.objfn(lprec,c(622,614,630,0,641,645,649,0))
#constraints
add.constraint(lprec,rep(1,4),"=",100,indices =c(1,2,3,4))
add.constraint(lprec,rep(1,4),"=",120,indices =c(5,6,7,8))
add.constraint(lprec,rep(1,2),"=",80,indices =c(1,5))
add.constraint(lprec,rep(1,2),"=",60,indices =c(2,6))
add.constraint(lprec,rep(1,2),"=",70,indices =c(3,7))
add.constraint(lprec,rep(1,2),"=",10,indices=c(4,8))
#to solve
solve(lprec)
get.objective(lprec)
get.constraints(lprec)
get.variables(lprec)
|
58f8bfbc560cf62516e603207e1021c1a9bed7a6
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/distrMod/examples/fiHampel.Rd.R
|
8175c1ef940b4de126ca95bd473b2c2e4940d483
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 261
|
r
|
fiHampel.Rd.R
|
library(distrMod)
### Name: fiHampel
### Title: Generating function for fiHampel-class
### Aliases: fiHampel
### Keywords: robust
### ** Examples
fiHampel()
## The function is currently defined as
function(bound = Inf){ new("fiHampel", bound = bound) }
|
bc1d2afdd16145c536c95d6c27394985de418fe2
|
754ad848531df93b28b8c4320b4e3df4103a5347
|
/R/geo.R
|
a7f6d95f3b631fbe4c8e61a7057475cf33ee9aa4
|
[] |
no_license
|
pokyah/agrometeoR-mlr
|
0fa95a3266c75e8951283e39328d3951ea8f5912
|
8288d1616a3f235a84856def4e2def64cccce8ed
|
refs/heads/master
| 2020-03-21T16:04:26.784576
| 2018-09-07T07:31:29
| 2018-09-07T07:31:29
| 138,749,018
| 0
| 1
| null | 2018-07-12T13:12:18
| 2018-06-26T14:21:23
|
HTML
|
UTF-8
|
R
| false
| false
| 25,207
|
r
|
geo.R
|
#' Build a topographic rasters stack (elevation, slope, aspect)
#'
#' This function builds a topographic rasters stack for Belgium (elevation, slope, aspect). The rasters are projected in the
#' 3812 EPSG code. No input parameters are required.
#' @author Thomas Goossens - pokyah.github.io
#' @return A stack of topographic rasters
#' @export
build_lowRes_terrain_rasters.fun <- function() {
# Get the Belgium DEM
bel.ele.ras = raster::getData("alt", country = "BE", mask = TRUE)
# The data are not projected but are in longlat so we need to project it to get the distance units
bel.ele.ras <- raster::projectRaster(bel.ele.ras, crs = toString((dplyr::filter(rgdal::make_EPSG(), code=="3812"))$prj4))
# compute the slope from the elevation
bel.slope.ras <- raster::terrain(bel.ele.ras, opt="slope", unit="degrees")
# compute the aspect from the elevation
bel.aspect.ras <- raster::terrain(bel.ele.ras, opt="aspect", unit="degrees")
# create the stack of rasters
topo.stack.ras <- stack(bel.ele.ras, bel.slope.ras, bel.aspect.ras)
# Return the stack of rasters
return(topo.stack.ras)
}
#' Build a high resolution topographic rasters stack (elevation, slope, aspect)
#'
#' This function builds a topographic rasters stack for Belgium (elevation, slope, aspect). The rasters are projected in the
#' 3812 EPSG code.
#' @author Thomas Goossens - pokyah.github.io
#' @param country_code.chr a character specifying the ISO contrycode. Ex : BE for belgium
#' @param NAME_1.chr a character specifying the NAME_1 value for lower than country level information
#' @param aggregation_factor.num a numeric specifying the aggregation factor to get the desired spatial resolution
#' @param EPSG.chr a character specifying the EPSG code of the desired Coordiante Reference System (CRS)
#' @param path.chr a character specifying the path where to dowload the SRTM data
#' @return A stack of topographic rasters
#' @export
build.SRTM.terrain.90m.ras.fun <- function(country_code.chr, NAME_1.chr=NULL, aggregation_factor.num=NULL, EPSG.chr=NULL, path.chr) {
# Path to downloaded SRTM Tiles refs
srtm.tiles.ref <- raster::shapefile("./external-data/Digital_Elevation_Model/90m_resolution/srtm/tiles.shp")
# Get country geometry first
if(length(list.files(paste0(path.chr,"/Boundaries"), all.files = TRUE, include.dirs = TRUE, no.. = TRUE))>0){
extent.sp <- readRDS(paste0(path.chr,"/Boundaries/", "GADM_2.8_BEL_adm1.rds"))
}else{
extent.sp <- raster::getData('GADM', country=country_code.chr, level=1)
crs <- crs(extent.sp)
}
if(!is.null(NAME_1.chr)){
extent.sp <- subset(extent.sp, NAME_1 == NAME_1.chr)
}
# to compute slope, aspect, etc, we need neighbourings pixels out of extent boundary .So we buffer it :
# https://gis.stackexchange.com/questions/234135/enlarging-polygon-slightly-using-r
extent.sf <- sf::st_transform(sf::st_as_sf(extent.sp), 3812)
larger.extent.sp <- rgeos::gBuffer(as(extent.sf, "Spatial"), width = 5000)
larger.extent.sp <- spTransform(larger.extent.sp, crs(extent.sp))
# Intersect extent geometry and tile grid
intersects <- rgeos::gIntersects(larger.extent.sp, srtm.tiles.ref, byid=T)
tiles <- srtm.tiles.ref[intersects[,1],]
# Download tiles using getData
# inspired from https://www.gis-blog.com/download-srtm-for-an-entire-country/
srtm_list <- list()
for(i in 1:length(tiles)){
lon <- raster::extent(tiles[i,])[1] + (raster::extent(tiles[i,])[2] - raster::extent(tiles[i,])[1]) / 2
lat <- raster::extent(tiles[i,])[3] + (raster::extent(tiles[i,])[4] - raster::extent(tiles[i,])[3]) / 2
tile <- raster::getData('SRTM', #data are downloaded from http://www.cgiar-csi.org/. See getData do of pokyah/raster repo on github
lon=lon,
lat=lat,
download = TRUE,
path = path.chr)
srtm_list[[i]] <- tile
}
# Mosaic tiles
srtm_list$fun <- mean
devtools::use_data(srtm_list, overwrite = TRUE)
srtm_mosaic.ras <- do.call(raster::mosaic, srtm_list)
devtools::use_data(srtm_mosaic.ras, overwrite = TRUE)
# Crop tiles to extent borders
extent.elevation.ras <- raster::crop(srtm_mosaic.ras, larger.extent.sp)
extent.elevation.ras <- raster::mask(extent.elevation.ras, larger.extent.sp)
# transform to desired CRS
if(!is.null(EPSG.chr)){
raster::projectRaster(extent.elevation.ras, crs = toString((dplyr::filter(rgdal::make_EPSG(), code==EPSG.chr))$prj4))
}
# aggregate to lower resolution
# inspired from https://stackoverflow.com/questions/32278825/how-to-change-the-resolution-of-a-raster-layer-in-r
if(!is.null(aggregation_factor.num)){
extent.elevation.ras <- raster::aggregate(extent.elevation.ras, fact=aggregation_factor.num)
}
# compute the slope from the elevation
# inspired from https://rpubs.com/etiennebr/visualraster
extent.slope.ras <- raster::terrain(extent.elevation.ras, opt="slope", unit="degrees")
extent.aspect.ras <- raster::terrain(extent.elevation.ras, opt="aspect", unit="degrees")
extent.roughness.ras <- raster::terrain(extent.elevation.ras, opt="roughness")
# stack the rasters
extent.terrain.ras = raster::stack(
extent.elevation.ras,
extent.slope.ras,
extent.aspect.ras,
extent.roughness.ras)
# crop to non enlarged extent
extent.terrain.ras <- raster::crop(extent.terrain.ras, extent.sp)
devtools::use_data(extent.terrain.ras, overwrite = TRUE)
}
#' Build a sp/sf that contains the locations of the Pameseb automatic weather stations
#'
#' This function builds a spatial sp object that contains the he rasters are projected in the
#' 3812 EPSG code. No input parameters are required.
#' @author Thomas Goossens - pokyah.github.io
#' @param sf.bool A boolean specifying if we want as sp or sf (TRUE for sf)
#' @param EPSG.chr a character specifying the EPSG code of the desired Coordiante Reference System (CRS)
#' @return A sp spatial grid with the desired resolution clipped to the Wallonia Polygon
#' @export
build.ps.locations.points_sf.fun <- function(sf.bool, EPSG.chr){
# proj4 of the Agromet API data
proj4.chr <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
# Retrieving useless data from API
demo.records.df <- prepare_agromet_API_data.fun(
get_from_agromet_API.fun(
user_token.chr = Sys.getenv("AGROMET_API_V1_KEY"),
table_name.chr = "cleandata",
stations_ids.chr = "all",
sensors.chr = "tsa",
dfrom.chr = as.character(Sys.Date()-60),
dto.chr = as.character(Sys.Date()-59),
api_v.chr = "v2"
), table_name.chr = "cleandata"
)
# Filtering records to keep only the useful ones (removing unecessary stations)
demo.records.df <- dplyr::filter(demo.records.df, network_name == "pameseb")
demo.records.df <- dplyr::filter(demo.records.df, type_name != "Sencrop")
demo.records.df <- dplyr::filter(demo.records.df, !is.na(to))
demo.records.df <- dplyr::filter(demo.records.df, state == "Ok")
demo.records.df <- dplyr::filter(demo.records.df, !is.na(tsa))
# Selecting only the useful features
demo.records.df <- dplyr::select(demo.records.df, one_of(c("sid", "mtime", "longitude", "latitude", "altitude")))
# defining the stations locations sp object
stations.df <- dplyr::filter( demo.records.df, mtime == min(mtime, na.rm = TRUE))
stations.sf <- sf::st_as_sf(
x = stations.df,
coords = c("longitude", "latitude"),
crs = proj4.chr)
# transform to desired CRS
if(!is.null(EPSG.chr)){
stations.sf <- sf::st_transform(x = stations.sf, crs = as.numeric(EPSG.chr) )
}
if(sf.bool == TRUE){
stations.sf
}
# else transform to sf
else
{
stations.sp <- as(stations.sf, "Spatial")
}
}
#' Build a sp/sf interpolation grid with the desired spatial resolution for Wallonia
#'
#' Inspired from https://stackoverflow.com/questions/41787313/how-to-create-a-grid-of-spatial-points,
#' https://www.nceas.ucsb.edu/~frazier/RSpatialGuides/OverviewCoordinateReferenceSystems.pdf, https://gis.stackexchange.com/questions/22843/converting-decimal-degrees-units-to-km-in-r,
#' https://stackoverflow.com/questions/48727511/r-grid-of-points-from-polygon-input
#' @author Thomas Goossens - pokyah.github.io
#' @param country_code.chr a character specifying the ISO contrycode. Ex : BE for belgium
#' @param NAME_1.chr a character specifying the NAME_1 value for lower than country level information
#' @param res.num A numeric representing the spatial resolution of the desired grid (in meters)
#' @param geom.chr A character specifying the geometry of the interpolation grid. Cant take a value among ("polygons", "centers" or "corners")
#' @param sf.bool A boolean specifying if we want as sp or sf (TRUE for sf)
#' @param EPSG.chr A character specifying the EPSG code of the desired Coordiante Reference System (CRS)
#' @return A sf spatial grid with the desired resolution clipped to the Wallonia Polygon
#' @export
build.vs.grid.fun <- function(country_code.chr, NAME_1.chr, res.num, geom.chr, sf.bool, EPSG.chr = NULL){ #build.SRTM_terrain.90m.ras.fun
# Get country geometry first
extent.sp <- raster::getData('GADM', country=country_code.chr, level=1, path = "./external-data/Boundaries")
if(!is.null(NAME_1.chr)){
extent.sp <- subset(extent.sp, NAME_1 == NAME_1.chr)
}
# convert from geographic lat/lon to projected EPSG for Belgian Lambert 2008 (needed to make grid in meters)
extent.sf <- sf::st_transform(sf::st_as_sf(extent.sp), 3812)
# make the grid and clip it with the extent boundaries
grid.sf <- sf::st_intersection(
sf::st_sf(
sf::st_make_grid(
extent.sf, cellsize= res.num, n=c(500,500), what= geom.chr, crs = 3812)
),
extent.sf)
# Create column cell_area of each cell
if(geom.chr=="polygons"){
grid.sf <- grid.sf %>% dplyr::mutate(cell_area = sf::st_area(.))
}
# transform to desired CRS
if(!is.null(EPSG.chr)){
grid.sf <- sf::st_transform(x = grid.sf, crs = as.numeric(EPSG.chr) )
}
# append an id to each cell
grid.sf$sid <- paste0(seq_along(1:nrow(data.frame(grid.sf))))
# select useful features
grid.sf <- grid.sf %>% dplyr::select(ISO, NAME_0, NAME_1, sid, sf..st_make_grid.extent.sf..cellsize...res.num..n...c.500..500...)
# rename geometry column
names(grid.sf)[names(grid.sf) == "sf..st_make_grid.extent.sf..cellsize...res.num..n...c.500..500..."] <- "geometry"
sf::st_geometry(grid.sf) <- "geometry"
if(sf.bool == TRUE){
grid.sf
}else{
grid.sp <- as(grid.sf, "Spatial")
}
}
#' Build a sf object containing reclassified CLC data
#'
#' @author Thomas Goossens - pokyah.github.io
#' @param country_code.chr a character specifying the ISO contrycode. Ex : BE for belgium
#' @param NAME_1.chr a character specifying the NAME_1 value for lower than country level information
#' @param EPSG.chr A character specifying the EPSG code of the desired Coordiante Reference System (CRS)
#' @param EPSG.corine.chr A character specifying the EPSG code of the downloaded Corine Data
#' @path.corine.shapefile.chr A character specifying the path where th corine shapefiles resides
#' @return a sf data frame containing reclasssified corine land cover data
build_cover.sf.fun <- function(
country_code.chr,
NAME_1.chr,
EPSG.chr,
path.corine.shapefile.chr,
EPSG.corine.chr){
# Get country geometry first
extent.sp <- raster::getData('GADM', country=country_code.chr, level=1)
file.remove(list.files(pattern = "GADM_"))
crs <- crs(extent.sp)
if(!is.null(NAME_1.chr)){
extent.sp <- subset(extent.sp, NAME_1 == NAME_1.chr)
}
# reproject in the desired CRS
extent.sp <- sp::spTransform(extent.sp, sp::CRS(projargs = dplyr::filter(rgdal::make_EPSG(), code == EPSG.chr)$prj4))
# Download CORINE land cover for Belgium from http://inspire.ngi.be/download-free/atomfeeds/AtomFeed-en.xml
corine.sp <- maptools::readShapePoly(path.corine.shapefile.chr)
# Define the CRS of corine land cover data
# We know the crs from the metadata provided on the website http://inspire.ngi.be/download-free/atomfeeds/AtomFeed-en.xml
raster::crs(corine.sp) <- as.character(dplyr::filter(rgdal::make_EPSG(), code == "3812")$prj4)
# Crop corine to extent
corine.extent.sp <- raster::crop(corine.sp, extent.sp)
# legend of corine
download.file("http://www.eea.europa.eu/data-and-maps/data/corine-land-cover-2006-raster-1/corine-land-cover-classes-and/clc_legend.csv/at_download/file",
destfile = "corine.legend.csv")
legend <- read.csv(file = "corine.legend.csv", header = TRUE, sep = ",")
file.remove("corine.legend.csv")
# Legend codes present in extent
legend.extent <- data.frame(unique(corine.extent.sp$code_12))
# https://stackoverflow.com/questions/38850629/subset-a-column-in-data-frame-based-on-another-data-frame-list
legend.extent <- subset(legend, CLC_CODE %in% legend.extent$unique.corine.extent.sp.code_12.)
# CLC_CODE class from integer to numeric
legend.extent$CLC_CODE <- as.numeric(legend.extent$CLC_CODE)
# from sp to sf
corine.extent.sf <- sf::st_as_sf(corine.extent.sp)
corine.extent.sf$code_12 <- as.numeric(paste(corine.extent.sf$code_12))
# Reclass Corine according to the following reclassification table
cover.sf <-
sf::st_as_sf(
dplyr::mutate(
corine.extent.sf,
CLASS = dplyr::case_when(
code_12 <= 142 ~ "Artificials surfaces",
code_12 == 211 ~ "Agricultural areas",
code_12 == 222 ~ "Agricultural areas",
code_12 == 231 ~ "Herbaceous vegetation",
code_12 == 242 ~ "Agricultural areas",
code_12 == 243 ~ "Agricultural areas",
code_12 == 311 ~ "Forest",
code_12 == 312 ~ "Forest",
code_12 == 313 ~ "Forest",
code_12 == 321 ~ "Herbaceous vegetation",
code_12 == 322 ~ "Herbaceous vegetation",
code_12 == 324 ~ "Forest",
code_12 > 400 ~ "Water"))
)
}
#' Get cover classes percentages for buffered points with custom radius
#'
#' @author Thomas Goossens - pokyah.github.io
#' @param cover.sf a sf polygon of the land cover
#' @param points a sf points of the locations on which surrounding cover needs to be summarized
#' @param radius.num a numeric specifying the radius of the buffere th corine shapefiles resides
#' @return a sf containing the %
get.points.cover_pct.fun <- function(
cover.sf,
points.sf,
radius.num){
# transposing to dataframe for data spreading (impossible (?) to achieve with dplyr spread)
cover_2mlr.fun <- function(data.sf) {
# Delete geometry column
data.df <- data.frame(data.sf)
# Reshape data with CLASS labels as columns names
# https://stackoverflow.com/questions/39053451/using-spread-with-duplicate-identifiers-for-rows
data.df <- data.df %>%
dplyr::select(sid, CLASS, cover_rate) %>%
reshape2::dcast(sid ~ CLASS, fun = sum)
# https://stackoverflow.com/questions/5620885/how-does-one-reorder-columns-in-a-data-frame
return(data.df)
}
# reproject the cover in the same CRS as grid and physical stations
sf::st_transform(cover.sf, sf::st_crs(points.sf))
# Make a buffer around points
# https://gis.stackexchange.com/questions/229453/create-a-circle-of-defined-radius-around-a-point-and-then-find-the-overlapping-a
# https://stackoverflow.com/questions/46704878/circle-around-a-geographic-point-with-st-buffer
points.sf <- sf::st_buffer(x = points.sf, dist = radius.num)
# extract cover information into the buffered points
cover.points.sf <- sf::st_intersection(points.sf, cover.sf)
cover.points.sf <- cover.points.sf %>%
dplyr::mutate(
bid = paste0(seq_along(1:nrow(cover.points.sf))))
# create new column with area of each intersected cover polygon
cover.area.points.sf <- cover.points.sf %>%
dplyr:: group_by(bid) %>%
dplyr::summarise() %>%
mutate(shape.area = st_area(.))
# Make a column with percentage of occupation of each land cover inside each grid point buffer
# https://github.com/r-spatial/sf/issues/239
cover_rate.points.sf <- sf::st_join(
x = cover.points.sf,
y = cover.area.points.sf,
join = sf::st_covered_by
) %>%
dplyr::select(sid, CLASS, shape.area) %>%
dplyr::mutate(cover_rate = as.numeric(shape.area)/(pi*radius.num^2) * 100) #500 = buffer radius
# transposing to dataframe for data spreading (impossible (?) to achieve with dplyr spread)
cover_rate.points.df <- cover_2mlr.fun(cover_rate.points.sf)
colnames(cover_rate.points.df) <- gsub(" ","_",colnames(cover_rate.points.df))
# merge cover data with points.1000.pt.sf
cover_rate.points.sf = merge(points.1000.pt.sf, cover_rate.points.df, by = "sid")
# only keep relevant columns
cover_rate.points.sf <- cover_rate.points.sf %>%
dplyr::select(1,15:19)
}
#' Build a responsive leaflet map displaying agromet AWS network data
#' @author Thomas Goossens - pokyah.github.io
#' @param records.sf A sf containing the records to be displayed
#' @return a leaflet map object
#' @export
build_leaflet_template.fun <- function(records.sf){
responsiveness.chr = "\'<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\'"
template.map <- leaflet::leaflet() %>%
addProviderTiles(group = "Stamen",
providers$Stamen.Toner,
options = providerTileOptions(opacity = 0.25)
) %>%
addProviderTiles(group = "Satellite",
providers$Esri.WorldImagery,
options = providerTileOptions(opacity = 1)
) %>%
fitBounds(sf::st_bbox(records.sf)[[1]],
sf::st_bbox(records.sf)[[2]],
sf::st_bbox(records.sf)[[3]],
sf::st_bbox(records.sf)[[4]]
) %>%
addLayersControl(baseGroups = c("Stamen", "Satellite"),
overlayGroups = c("KNMI rain radar", "stations", "MNT", "slope", "aspect"),
options = layersControlOptions(collapsed = TRUE)
) %>%
addEasyButton(easyButton(
icon="fa-crosshairs", title="Locate Me",
onClick=JS("function(btn, map){ map.locate({setView: true}); }"))) %>%
htmlwidgets::onRender(paste0("
function(el, x) {
$('head').append(",responsiveness.chr,");
}"))
return(template.map)
}
#' Build a static map displaying predictions and their related error
#' @author Loïc Davadan <- ldavadan.github.io
#' @param gridded.data.df A data frame obtained from a SpatialGridDataFrame containing data
#' @param boundaries.sf A sf containing data from Wallonia boundaries
#' @param layer.error.bool A boolean specifying if you want to display the layer with error
#' @param legend.error.bool A boolean specifying if you want to display the legend of the error layer
#' @param pretty_breaks.bool A boolean specifying the type of legend you want. TRUE for pretty breaks, FALSE for quantile scale
#' @param title.chr A character specifying the title you want for your map
#' @param target.chr A character specifying the predicted parameter. One of "tsa", "hra" or "hct"
#' @return a ggplot map object
#' @export
static.ggmap <- function(
gridded.data.df,
boundaries.sf,
layer.error.bool,
legend.error.bool,
pretty_breaks.bool,
title.chr,
target.chr
){
library(ggplot2)
library(grid)
library(maps)
library(maptools)
library(ggsn)
library(RColorBrewer)
if(target.chr == "tsa"){ legend_title = "Temperature (°C)"}
if(target.chr == "hra"){ legend_title = "Relative humidity (%)"}
if(target.chr == "hct"){ legend_title = "Leaves wetness (%)"}
if(pretty_breaks.bool == TRUE){
# inspired by https://timogrossenbacher.ch/2016/12/beautiful-thematic-maps-with-ggplot2-only/
# prepare legend with pretty breaks
# compute quantiles from predictions values
quantiles <- unique(stats::quantile(gridded.data.df$response,
probs = seq(0, 1, length.out = 11), na.rm=T))
labels <- c()
breaks <- unique(round(c(-60,
min(gridded.data.df$response, na.rm = TRUE),
quantiles,
max(gridded.data.df$response, na.rm = TRUE)), 1))
labels <- paste0(labels, paste0(format(round(breaks, 1), nsmall = 1)))
labels <- labels[2:length(labels)]
gridded.data.df$response_quantiles <- cut(gridded.data.df$response,
breaks = breaks,
labels = labels,
include.lowest = T)
breaks_scale <- levels(gridded.data.df$response_quantiles)
labels_scale <- rev(breaks_scale)
}
if(pretty_breaks.bool == FALSE){
# inspired by https://timogrossenbacher.ch/2016/12/beautiful-thematic-maps-with-ggplot2-only/
quantiles <- unique(stats::quantile(gridded.data.df$response,
probs = seq(0, 1, length.out = 11), na.rm=T))
labels <- c()
labels <- paste0(labels, paste0(format(round(quantiles, 1), nsmall = 1),
" – ",
format(round(quantiles[2:length(quantiles)], 1), nsmall = 1)))
labels <- labels[1:length(labels)-1]
gridded.data.df$response_quantiles <- cut(gridded.data.df$response,
breaks = quantiles,
labels = labels,
include.lowest = T)
}
ggmap <- ggplot2::ggplot(gridded.data.df) +
# choose data to display on the layer
ggplot2::geom_raster(mapping = ggplot2::aes(coords.x1, coords.x2, fill = response_quantiles), na.rm = TRUE, interpolate = T)
# choose color palette and create a legend with pretty breaks
if(pretty_breaks.bool == TRUE){
ggmap <- ggmap +
ggplot2::scale_fill_manual(
values = rev(RColorBrewer::brewer.pal(n = length(labels_scale), name = "RdYlBu")), # palette to use
breaks = rev(breaks_scale), # legend breaks
name = legend_title,
drop = FALSE,
labels = labels_scale, # legend labels
# legend parameters
guide = ggplot2::guide_legend(
direction = "vertical",
keyheight = grid::unit(7, units = "mm"),
keywidth = grid::unit(3, units = "mm"),
title.position = 'top',
title.vjust = 0.5,
label.vjust = 1,
ncol = 1,
bycol = T,
reverse = F,
label.position = "right"
)
)
}
# color palette with discrete classes with quantile scale
if(pretty_breaks.bool == FALSE){
ggmap <- ggmap +
ggplot2::scale_fill_brewer(legend_title, palette = "RdYlBu", direction = -1)
}
if(layer.error.bool == TRUE){
ggmap <- ggmap +
# display a layer with standard error
ggplot2::geom_raster(ggplot2::aes(coords.x1, coords.x2, alpha = se), fill = "white", na.rm = TRUE, interpolate = TRUE) +
# whitening it
ggplot2::scale_alpha_continuous("Standard\nError",range = c(0.1,1), guide = legend.error.bool)
# order the two legends if they both are displayed
if(legend.error.bool == TRUE){
ggmap <- ggmap + ggplot2::guides(fill = ggplot2::guide_legend(order = 1),
alpha = ggplot2::guide_legend(order = 0))
}
}
ggmap <- ggmap +
ggplot2::ggtitle(title.chr) + # add title
# add boundaries layer
ggplot2::geom_sf(data = boundaries.sf, ggplot2::aes(fill = ISO), fill = NA, color = "black", size = 0.6) +
# add north symbol
ggsn::north(boundaries.sf, scale = 0.1, location = "bottomleft",
anchor = c(x = 780000, y = 550000), symbol = 12) +
# add scalebar
ggsn::scalebar(boundaries.sf, dist = 50, dd2km = FALSE, model = "GRS80",
st.dist = 0.03, st.size = 4, box.fill = c("black", "white"),
box.color = "black", anchor = c(x = 700000, y = 520000)) +
# add copyright
ggplot2::annotation_custom(grob = grid::textGrob("© CRA-W"),
xmin = 790000, xmax = 790000, ymin = 520000, ymax = 520000) +
# display resolution of the map
ggplot2::annotation_custom(grob = grid::textGrob("Resolution : 1 km²"),
xmin = 558000, xmax = 558000, ymin = 671000, ymax = 671000) +
# parameters for visualization
ggplot2::theme(panel.background = ggplot2::element_rect(fill = "white"),
axis.title = ggplot2::element_text(color = NA),
panel.grid = ggplot2::element_line(color = NA),
axis.ticks = ggplot2::element_line(color = NA),
axis.text = ggplot2::element_text(colour = NA),
legend.title = ggplot2::element_text(size = 12, face = "bold", vjust = 1),
legend.text = ggplot2::element_text(size = 11, margin(b = 1)),
legend.background = ggplot2::element_rect(fill = "transparent"),
legend.position = c(0.12,0.38),
legend.box = "horizontal")
ggmap
}
|
1fd426cd98507fc598dad2ead7553ee69ea9250c
|
de58bb051901977ca144e4d27f155b4ac8a5c46e
|
/resultados/resultsJoin.R
|
f99406beddbd4a28d85426e082305d486963d799
|
[] |
no_license
|
MiguelGubern/TFG_Hadoop_Spark_GraphX
|
a9c53be3781418dd49ac51f94cf77a3c27fb90a2
|
c0407c60e47a4f1cb0a6e3458af0aa2d8e0dc1a6
|
refs/heads/master
| 2020-04-14T15:09:35.918128
| 2019-01-03T03:34:36
| 2019-01-03T03:34:36
| 163,917,288
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,796
|
r
|
resultsJoin.R
|
library(dplyr)
library(purrr)
library(tidyr)
library(lubridate)
library(plyr)
library(bit64)
library(data.table)
library(matrixStats)
####################### JOINING ALL BETWEENNESS RESULTS ###########################
path <- "c:/Users/migue/Documents/results/betweenness/"
allFiles <- list.files(path = path, pattern = ".*_v")
file <- allFiles[1]
filePath <- paste(paste(path, file, sep=""), "/part-00000", sep="")
print(filePath)
txt <- gsub("[()]", "", readLines(filePath))
btTable = fread(text=txt, sep = ",")
for (i in 2:16){
file <- allFiles[i]
filePath <- paste(paste(path, file, sep=""), "/part-00000", sep="")
print(filePath)
txt <- gsub("[()]", "", readLines(filePath))
auxTable <- fread(text=txt, sep = ",")
btTable <- merge(btTable,auxTable,by="V1",all=TRUE)
names(btTable)[ncol(btTable)] = paste("V2_", i, sep = "")
}
btTable$bt_mean = rowMeans(btTable[,2:7], na.rm = TRUE)
btTable$bt_sd = rowSds(data.matrix(btTable[,2:7]), na.rm = TRUE)
####################### JOINING ALL EIGEN RESULTS ###########################
path <- "c:/Users/migue/Documents/results/eigen/"
allFiles <- list.files(path = path, pattern = ".*_v")
file <- allFiles[1]
filePath <- paste(paste(path, file, sep=""), "/part-00000", sep="")
print(filePath)
txt <- gsub("[()]", "", readLines(filePath))
eiTable = fread(text=txt, sep = ",")
for (i in 2:16){
file <- allFiles[i]
filePath <- paste(paste(path, file, sep=""), "/part-00000", sep="")
print(filePath)
txt <- gsub("[()]", "", readLines(filePath))
auxTable <- fread(text=txt, sep = ",")
eiTable <- merge(eiTable,auxTable,by="V1",all=TRUE)
names(eiTable)[ncol(eiTable)] = paste("V2_", i, sep = "")
}
eiTable$ei_mean = rowMeans(eiTable[,2:17], na.rm = TRUE)
eiTable$ei_sd = rowSds(data.matrix(eiTable[,2:17]), na.rm = TRUE)
####################### JOINING ALL PAGERANK RESULTS ###########################
path <- "c:/Users/migue/Documents/results/pageRank/"
allFiles <- list.files(path = path, pattern = ".*_v")
file <- allFiles[1]
filePath <- paste(paste(path, file, sep=""), "/part-00000", sep="")
print(filePath)
txt <- gsub("[()]", "", readLines(filePath))
prTable = fread(text=txt, sep = ",")
for (i in 2:16){
file <- allFiles[i]
filePath <- paste(paste(path, file, sep=""), "/part-00000", sep="")
print(filePath)
txt <- gsub("[()]", "", readLines(filePath))
auxTable <- fread(text=txt, sep = ",")
prTable <- merge(prTable,auxTable,by="V1",all=TRUE)
names(prTable)[ncol(prTable)] = paste("V2_", i, sep = "")
}
prTable$pr_mean = rowMeans(prTable[,2:17], na.rm = TRUE)
prTable$pr_sd = rowSds(data.matrix(prTable[,2:17]), na.rm = TRUE)
####################### TABLE MERGING AND SAVING ###########################
table <- merge(eiTable[,c(1,18,19)], prTable[, c(1,18,19)], by="V1", all=TRUE)
table <- merge(table, btTable[, c(1,18,19)], by="V1", all=TRUE)
get_Lon_Lat <- function(vertex_ID, zone, sampling_step){
#returns Lon and Lat for a vertex_ID (Grid element mid point)
grid_ncols <- ceiling((zone[3] - zone[1])/sampling_step)
row <- ceiling(vertex_ID/grid_ncols)
col <- vertex_ID %% grid_ncols
Lon <- zone[1] + (col-1)*sampling_step + sampling_step/2
Lat <- zone[2] + (row-1)*sampling_step + sampling_step/2 #Middle point of each grid
return (cbind(Lon, Lat))
}
table$Lon <- get_Lon_Lat(as.double(table$V1), c(22.82,40.49,23.00,40.71), 1E-6)[,1]
table$Lat <- get_Lon_Lat(as.double(table$V1), c(22.82,40.49,23.00,40.71), 1E-6)[,2]
write.table(table, file = "C:/Users/migue/Documents/results/all_means_sds.csv", sep = ",",
na = "", col.names = TRUE, row.names = FALSE)
|
5e61b5b3659afe4e657a91b11c1f1eb773b3a2c2
|
a74a1820fcc5538848ec4b90c7569452ba15a5f0
|
/R/githug-package.r
|
2de58aa5cdabda829fb8020b624dfed59162eb61
|
[] |
no_license
|
jrnold/githug
|
cfa5fa23c0fcfccb2d260c64014f603329d1aa93
|
675269266c06335365b8410a898fe3c1d08979f8
|
refs/heads/master
| 2021-01-21T18:50:41.565967
| 2016-09-23T15:30:22
| 2016-09-23T15:30:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 81
|
r
|
githug-package.r
|
#' githug.
#'
#' @importFrom purrr %||%
#' @name githug
#' @docType package
NULL
|
d9b90cfa2bdeffb23df3dfb9c7736331b6871f56
|
066278fee756b36693832e5572d3c4c346972170
|
/R/mt.ttdetect.R
|
d2d38935f497979ef1f3f267663ecb12777bae77
|
[] |
no_license
|
jmbh/mt.analysis
|
53a5303d12fd05e8ba82fef55a6fa6e807219c7f
|
f1cb6528b314cec86bb69eb1c1740e45fbbe7853
|
refs/heads/master
| 2016-09-09T21:22:41.476501
| 2015-04-14T09:47:28
| 2015-04-14T09:47:28
| 29,824,603
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 887
|
r
|
mt.ttdetect.R
|
#setwd("G:\\MPI\\__trajtypes_paper_2015\\RawData")
#data <- readRDS("koop_processed.RDS")
#library(ggplot2)
## input:
# data matrix with MAD column labeled "MAD"
# number of clusters: nclust
# vector of column names of variables used for clustering
## output:
# data matrix with one additional column = cluster-membership
mt.ttdetect <- function(data, kclust, varclust) {
fit <- kmeans(data[,varclust], kclust)
data$clusters <- fit$cluster
return(data)
} # end of function
#table(data$clusters)
#visualize
#setwd("G:\\MPI\\__trajtypes_paper_2015\\analysis")
#pdf(file="kmeans2.pdf", height=8, width=11)
#p <- ggplot(data, aes(xflip,y))
#p + geom_point(alpha=.5, color="#FF6666") + theme_bw()
#p <- ggplot(data, aes(xflip,y))
#p + geom_point(aes(colour = factor(cluster)), alpha=.5) + theme_bw() + theme(legend.position="none")
#dev.off()
#boxplot(MAD~cluster, data)
|
ce9fd753b965e5832843d9a97f3c43cf4c3a0848
|
3ac99ff4e4c9f54adfd1227df92a39123cef9cdf
|
/R/monthly_plot.R
|
0f37cb4f5fbe7eb32092e1e5b5e6e2bddcf3c870
|
[] |
no_license
|
sukhyun23/wage
|
d84f7caa8e90013401b128167dc93a36dd80b2c1
|
86f2eb65ca41c2a5141214c8b284313d1f14ec49
|
refs/heads/master
| 2020-12-04T19:07:31.508869
| 2020-04-11T13:45:25
| 2020-04-11T13:45:25
| 231,876,228
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,771
|
r
|
monthly_plot.R
|
monthly_plot <- function(data) {
data <- data.table::data.table(data)
gdat <- data
gdat$x <- lubridate::day(gdat$start)
gdat$y <- -as.numeric(gdat$id)
gdat$base_hour <- gdat$base_hour_day + gdat$base_hour_night
gdat$y <- ifelse(gdat$base_hour <= 0, NA, gdat$y)
ydat <- unique(gdat[!is.na(y), .(y, 이름)])
ydat$x <- -0.8
tmp_date <- as.Date(gdat$date)
xdat <- data.frame(date = seq(min(tmp_date), max(tmp_date), by = 1))
xdat$day <- weekdays(xdat$date)
xdat$x <- lubridate::day(xdat$date)
xdat$day <- stringr::str_sub(xdat$day, 1, 1)
xdat$y <- min(ydat$y) - 1.3
segdat <- xdat[xdat$day %in% c('월', '금'), ]
segdat$yend <- max(ydat$y) + 1
segdat$y <- min(ydat$y) - 1
g <- ggplot2::ggplot(gdat, aes(x = x, y = y)) +
ggplot2::geom_segment(
ggplot2::aes(x = x, xend = x, y = y, yend = yend),
color = 'grey60',
data = segdat
) +
ggplot2::geom_tile(ggplot2::aes(fill = night), color = 'black') +
ggplot2::scale_fill_manual(values = c('yellow', 'navy')) +
ggplot2::scale_x_continuous(breaks = min(gdat$x):max(gdat$x)) +
ggplot2::scale_y_continuous(
breaks = min(gdat$y, na.rm = T):max(gdat$y, na.rm = T)
) +
ggplot2::geom_text(
ggplot2::aes(x = x, y = y, label = 이름), data = ydat
) +
ggplot2::geom_text(
ggplot2::aes(x = x, y = y, label = day), data = xdat
) +
ggplot2::guides(fill = F) +
ggplot2::theme(
axis.ticks = ggplot2::element_blank(),
axis.text = ggplot2::element_blank(),
axis.title = ggplot2::element_blank(),
panel.background = ggplot2::element_rect(fill = 'white'),
panel.grid.minor = ggplot2::element_blank()
# panel.grid.major = element_line(color = 'grey85')
)
g
}
|
e99ffb13786d675315d2cd6c10b45839ca8c96f6
|
428dad6718179a377e250c40d0adf0c5070e7d51
|
/functions/plot_tools.R
|
a972745a882d5ebb225bab2674c08d8e1db70bc4
|
[] |
no_license
|
michbur/malarial_signal_peptides
|
5156ea01192768cc92c0580b0e7e7c9db860fa23
|
aa6651cacfa39970963f60d605f59e185b115967
|
refs/heads/master
| 2020-04-10T03:56:10.127236
| 2016-09-28T04:43:41
| 2016-09-28T04:43:41
| 40,606,471
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,140
|
r
|
plot_tools.R
|
size_mod <- -5
my_theme <- theme(plot.background=element_rect(fill = "transparent",
colour = "transparent"),
panel.grid.major = element_line(colour="grey", linetype = "dashed", size = 0.5),
panel.grid.major = element_line(colour="lightgrey", linetype = "dashed", size = 0.5),
panel.background = element_rect(fill = "transparent",colour = "black"),
legend.background = element_rect(fill = "NA"),
legend.position = "bottom",
axis.text = element_text(size=13 + size_mod),
axis.title.x = element_text(size=16 + size_mod, vjust = -1),
axis.title.y = element_text(size=16 + size_mod, vjust = 1),
strip.text = element_text(size=17 + size_mod, face = "bold"),
legend.text = element_text(size=13 + size_mod),
legend.title = element_text(size=17 + size_mod),
plot.title = element_text(size=20 + size_mod),
strip.background = element_rect(fill = "NA", colour = "NA"))
|
e591092949a3b2ca2fe1d90b1b54211a8c1f5c3b
|
fcf6a44685bc68e2dc31ef88b5bd3b74ef158c52
|
/sparseWtime.R
|
bd348fc3d53c37c5489e086f17fa4bec9060cd56
|
[] |
no_license
|
scwatson812/BayesianSpaTemQuantileRegression
|
ca584630d6835904ffdb62497332eb2f43f99a3f
|
50426fefd115d71291ebdbe5142956ae6fa62af0
|
refs/heads/master
| 2022-11-23T15:01:41.793825
| 2020-07-17T16:43:19
| 2020-07-17T16:43:19
| 274,753,118
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 489
|
r
|
sparseWtime.R
|
sparseWtime<-function(g.s,tm){
W.base = sparseW(g.s)
W.time = Matrix(matrix(0,g.s*g.s*tm,g.s*g.s*tm),sparse = TRUE)
for(i in 1:tm){
W.time[((i-1)*g.s*g.s+1):(i*g.s*g.s),((i-1)*g.s*g.s+1):(i*g.s*g.s)] = W.base
}
for(i in 1:(g.s^2)){
W.time[i,(i+g.s^2)] = 1
}
for(i in (g.s^2 +1):((tm-1)*g.s^2)){
W.time[i,i+g.s^2] = 1
W.time[i,i-g.s^2] = 1
}
for(i in ((tm-1)*g.s^2 + 1):(tm*g.s*g.s)){
W.time[i,i-g.s^2] = 1
}
return(W.time)
}
|
90a973d4c544b628c3e067a2725fed599db12a5b
|
efa0fb0cc58bb692e60d324caa87bc89c5eadcb7
|
/Boston practice.R
|
5ad4e2192871d836c9670ff5ed5595657af3794d
|
[] |
no_license
|
chetanbommu/RStudioProject
|
0621dc39d5482b054929617f7fe5b766770be8d8
|
b17fa09089459128196f0274837b90c147d5e450
|
refs/heads/master
| 2020-04-11T15:45:12.475266
| 2018-12-06T01:44:11
| 2018-12-06T01:44:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 608
|
r
|
Boston practice.R
|
library(MASS)
data("Boston")
?Boston
head(Boston)
summary(Boston)
str(Boston)
Boston$chas=as.factor(Boston$chas)
Boston$rad=as.factor(Boston$rad)
hist(Boston$medv)
hist(sqrt(Boston$medv))
Boston$medv_sqrt=sqrt(Boston$medv)
## simpletest linear regression
plot(Boston$lstat,Boston$medv)
cor(Boston$lstat,Boston$medv)
rows=1:nrow(Boston)
trainRows=sample(rows,round(nrow(Boston)*0.7))
trainData = Boston[trainRows,]
testData = Boston[-trainRows,]
model=lm(medv_sqrt~lstat+black,data = Boston)
summary(model)
preds=predict(model,testData)
preds= preds**2
sqrt(mean((testData$medv-preds)**2))
|
c31423eafe86e13671de6708c939608d702b7283
|
d510eba7bbfeff11bc2498ddbdf91a21ef05c2ff
|
/Course04_Exploratory_Data_Analysis/project_1/plot4.R
|
1d5aaa88be243643e7927ef29fd9684dbabada43
|
[] |
no_license
|
git-comp/datasciencecoursera
|
5d0200073fe44e0a125f837f70a6332952ac7ebd
|
69cdf910c0ad5b3cf31f2f721279f4347fc45fc4
|
refs/heads/main
| 2023-05-09T05:03:58.247752
| 2021-05-25T08:58:39
| 2021-05-25T08:58:39
| 362,227,505
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,201
|
r
|
plot4.R
|
# Read data to powerDT
powerDT <- data.table::fread(input = "household_power_consumption.txt", na.strings="?")
# Adjust date format
powerDT[, Date := lapply(.SD, as.Date, "%d/%m/%Y"), .SDcols = c("Date")]
# Restrict to period of 2007-02-01 and 2007-02-02
powerDT <- powerDT[(Date >= "2007-02-01") & (Date <= "2007-02-02")]
# Add merged date and time
powerDT[, dateTime := as.POSIXct(paste(Date, Time), format = "%Y-%m-%d %H:%M:%S")]
png("plot4.png", width=480, height=480)
par(mfrow=c(2,2))
plot(x=powerDT[,dateTime], y=powerDT[,Global_active_power], type="l", xlab="", ylab="Global Active Power (kW)")
plot(x=powerDT[,dateTime], y=powerDT[,Voltage], type="l", xlab="", ylab="Voltage")
plot(powerDT[,dateTime], powerDT[,Sub_metering_1], type="l", xlab="", ylab="Energy sub metering")
lines(powerDT[,dateTime], powerDT[,Sub_metering_2],col="red")
lines(powerDT[,dateTime], powerDT[,Sub_metering_3],col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"),lty=c(1,1), lwd=c(1,1))
plot(x=powerDT[,dateTime], y=powerDT[,Global_reactive_power], type="l", xlab="datetime", ylab="global_reactive_power")
# Close png after writing
dev.off()
|
4498cd9c98ba034b9eb20dcc1cda262b31e8d8c5
|
411aab55a0cc48e2fefadd20da10edeb5922a945
|
/src/000_moving_window_test.R
|
1f2a9306f791a277930942e1bbff4936c14d8dd6
|
[] |
no_license
|
GeoMOER-Students-Space/Envimaster-Geomorph
|
3ed0d65692a9bd71fea22d035452d49b7868f425
|
8171383f5681b25bc36c8bfc1d3db083a3d934af
|
refs/heads/master
| 2020-06-02T11:09:51.322486
| 2019-09-27T22:06:56
| 2019-09-27T22:06:56
| 191,135,581
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,693
|
r
|
000_moving_window_test.R
|
#############################################################################################
###--- Setup Environment -------------------------------------------------------------------#
############################################### #
# require libs for setup #EEEE n n v v rrrr m m ttttt # #
require(raster) #E nn n v v r r m m m m t # #
require(envimaR) #EE n n n v v rrrr m m m m t # #
require(link2GI) #E n nn v v r r m m m m t # #
#EEEE n n v r r m m m t # #
############################################### #
#
# define needed libs and src folder #
libs = c("link2GI","ForestTools","uavRst","mapview")
pathdir = "repo/src/"
#set root folder for uniPC or laptop #
root_folder = alternativeEnvi(root_folder = "~/edu/Envimaster-Geomorph", #
alt_env_id = "COMPUTERNAME", #
alt_env_value = "PCRZP", #
alt_env_root_folder = "F:/edu/Envimaster-Geomorph") #
#source environment script #
source(file.path(root_folder, paste0(pathdir,"001_setup_geomorph_v1.R")))
###---------------------------------------------------------------------------------------###
#############################################################################################
# script to test optimal moving window with CENITH Validation V2
#source Cenith Validation V2
source(file.path(root_folder, paste0(pathdir,"Cenith_V2/002_cenith_val_v2.R")))
source(file.path(root_folder, paste0(pathdir,"Cenith_V2/dev_sf_cenith_val_a.R")))
source(file.path(root_folder, paste0(pathdir,"Cenith_V2/dev_sf_cenith_val_b.R")))
#source CENITH V2
source(file.path(root_folder, file.path(pathdir,"Cenith_V2/000_cenith_v2.R")))
source(file.path(root_folder, file.path(pathdir,"Cenith_V2/cenith_tiles.R")))
source(file.path(root_folder, file.path(pathdir,"Cenith_V2/cenith_tp_v2.R")))
source(file.path(root_folder, file.path(pathdir,"Cenith_V2/cenith_seg_tiles.R")))
source(file.path(root_folder, file.path(pathdir,"Cenith_V2/cenith_merge.R")))
source(file.path(root_folder, file.path(pathdir,"Cenith_V2/cenith_seg_v1.R")))
# load data
som <- raster::raster(file.path(envrmt$path_Cenith_V2,"exmpl_som.tif"))
vp <- rgdal::readOGR(file.path(envrmt$path_Cenith_V2,"exmpl_vp.shp"))
vp <- spTransform(vp,"+proj=utm +zone=32 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs")
compareCRS(som,vp) #check if projection is correct
# run several tests on som
# (start with checking for min an max cause worng values will lead to an abort)
var <- cenith_val_v2(chm=som,f=1,a=c(0.5,0.9),b=c(0.5,0.9),h=c(0.5,0.7),vp=vp)
# plot best hit rate
maxrow <- var[which.max(var$hit),] # search max vale but rturn only 1 value
maxhit <- maxrow$hit
var[which(var$hit==maxhit),]
### run Segmentation with CENITH V2
seg <- Cenith(chm=som,h=0.7,a=0.9,b=0.1)
#plot result for visual check
mapview::mapview(seg$polygons)+som
#end of script
|
29539c0f0cdfa945035a12be9d79200c7ebf29d5
|
dc1f17859c4d14d2d18e34a377a474b7d955c09f
|
/PEPATACr/man/narrowPeakToBigBed.Rd
|
8ea4118417fd83447492c8861743f8de821aeec6
|
[
"BSD-2-Clause"
] |
permissive
|
databio/pepatac
|
55f4b7947333c3543f892e19e60803d04003eba5
|
9ee0b6c1251b1addae8265c12f16cbfeae76d489
|
refs/heads/master
| 2023-08-08T22:02:23.327668
| 2023-07-31T21:32:28
| 2023-07-31T21:32:28
| 58,678,230
| 46
| 11
|
BSD-2-Clause
| 2023-07-31T21:32:30
| 2016-05-12T21:29:13
|
R
|
UTF-8
|
R
| false
| true
| 633
|
rd
|
narrowPeakToBigBed.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PEPATACr.R
\name{narrowPeakToBigBed}
\alias{narrowPeakToBigBed}
\title{Convert a narrowPeak file to a bigBED format file.}
\usage{
narrowPeakToBigBed(
input = input,
chr_sizes = chr_sizes,
ucsc_tool = ucsc_tool,
keep = FALSE
)
}
\arguments{
\item{input}{Path to narrowPeak file}
\item{chr_sizes}{Genome chromosome sizes file. <Chr> <Size>}
\item{ucsc_tool}{Path to UCSC tool "bedToBigBed"}
\item{keep}{Keep BED format intermediate file}
}
\description{
Convert a narrowPeak file to a bigBED format file.
}
\keyword{bigBed}
\keyword{narrowPeak}
|
876eca34f456d3b315db50e018d0ac542a89627d
|
ca019df5543ac6378cca22e974a6ee8ac711ebaf
|
/week 1 quiz.R
|
a0de5582c96745d12b7bde008e5f2ec39c1bf75a
|
[] |
no_license
|
arkhamknight1234/courseracapstone
|
8ea231a8351f9038f2e7116e26370be5f976a4fa
|
914faa7c0e3cac09bf71c3fadf1f84a4321d0511
|
refs/heads/master
| 2020-05-05T06:45:31.322293
| 2019-04-06T07:34:48
| 2019-04-06T07:34:48
| 178,858,448
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,199
|
r
|
week 1 quiz.R
|
#Question - 2
twitter <- readLines(con <- file("en_US.twitter.txt"), encoding = "UTF-8", skipNul = TRUE)
length(twitter)
#Question 3
#What is the length of the longest line seen in any of the three en_US data sets?
# Blogs file
blogs<-file("en_US.blogs.txt","r")
blogs_lines<-readLines(blogs)
close(blogs)
summary(nchar(blogs_lines))
#News file
news<-file("en_US.news.txt","r")
news_lines<-readLines(news)
close(news)
summary(nchar(news_lines))
#twitter
summary(nchar(twitter))
#Question 4
#In the en_US twitter data set, if you divide the number of lines where the word "love" (all lowercase)
#occurs by the number of lines the word "hate" (all lowercase) occurs, about what do you get?
love<-length(grep("love", twitter))
hate<-length(grep("hate", twitter))
love/hate
#Question 5
#The one tweet in the en_US twitter data set that matches the word "biostats" says what?
grep("biostats", twitter, value = T)
#Question 6
#How many tweets have the exact characters "A computer once beat me at chess, but it was no match for me at kickboxing".
#(I.e. the line matches those characters exactly.)
grep("A computer once beat me at chess, but it was no match for me at kickboxing", twitter)
|
479c46554030f953ddab16feaa896381b768fab9
|
9afc1ed9b218051b1b531539a49ebe65b661d861
|
/download_Prelictum_GOterms.R
|
d354814d68074c773a6a70b5e823518fd38e0c99
|
[] |
no_license
|
vincenzoaellis/sequence_capture_code
|
52eeed07f232d8d76284059bb7a7dfb2a83b459a
|
9f674ab9459241a3f58fe2c2689f0fda5f774a5c
|
refs/heads/master
| 2020-03-29T17:36:51.404612
| 2018-10-30T12:54:07
| 2018-10-30T12:54:07
| 150,173,135
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 683
|
r
|
download_Prelictum_GOterms.R
|
#### Download GO terms for P. relictum from PlasmoDB website
#### Vincenzo A. Ellis
#### 25 September 2018
prel_go <- function(){
gaf.colnames <- c("DB", "DB Object ID", "DB Object Symbol", "Qualifier", "GO_ID",
"DB_Reference", "Evidence_Code", "With_or_From", "Aspect", "DB_Object_Name",
"DB_Object_Synonym", "DB_Object_Type", "Taxon", "Date", "Assigned_By",
"Annotation_Extension", "Gene_Product_Form_ID")
x <- read.delim("http://plasmodb.org/common/downloads/Current_Release/PrelictumSGS1-like/gaf/PlasmoDB-39_PrelictumSGS1-like_GO.gaf",
skip = 1,
col.names = gaf.colnames)
return(x)
}
|
488b95eb25c7c91a6cdba62aee2b31f15d4d2e7e
|
7f928e44ff33be967054c302d4007f92f895929e
|
/Statistics/Assignments/assignment3/assignment3.R
|
506b156d32f03cde34c2b06612cf6c7722c3061d
|
[] |
no_license
|
pranjalijambhule/GreyCampus-DS3
|
9f28c78dfdd70b2226e4898c7d1c5d954d01430a
|
1d02112ed953d0fa32c003b89536878b8e906eab
|
refs/heads/main
| 2023-05-18T03:34:29.401981
| 2021-06-04T11:32:58
| 2021-06-04T11:32:58
| 353,991,701
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,550
|
r
|
assignment3.R
|
library(dplyr)
library(manipulate)
library(ggplot2)
library(moments)
getwd()
setwd("C:/sovi/Data Science/Stats/week3/assignment")
covidFile <- read.csv("COVID19.csv",
na.strings = c("", "NA"),
stringsAsFactors = FALSE)
str(covidFile)
head(covidFile,10)
colSums(is.na(covidFile))
# removing the empty column "X."
covidFile <- covidFile %>%
select(!"X.")
#location of empty country data
which(is.na(covidFile$Country.Other))
#removing empty country row
covidFile <- covidFile[ complete.cases(covidFile$Country.Other), ]
head(covidFile,10)
# getting the NA percentage of each column
colMeans(is.na(covidFile)*100)
# removing the columns with more than 5% NAs
covidFile1 <- covidFile %>%
select(!c(4:6,8,10:15,17:19))
# covidFile1 <- covidFile %>%
# purrr::discard(~sum(is.na(.x))/length(.x)*100 >= 10)
# finding the unwanted rows
# covid <- covidFile %>%
# filter(Country.Other == "Total:")
#removing unwanted rows
covidFile1 <- covidFile1[-(229:237),]
# converting string to integer for further calculation
# #Removing commas from strings
for (i in c(3:15,17:19)) {
covidFile[,i] <- as.numeric(gsub(",","",covidFile[,i]))
}
############### NOTE:::::
# Whenever dataset is showing certain columns as factor and we want to convert those into numeric, never ever
# directly try to convert it into numerics.
# If we use as.numeric first, then level of factor variables will become your numeric data, not your actual data.
# Hence, first convert factor columns into character and then convert them into numeric.
head(covidFile)
str(covidFile)
# giving new column names
#
# covidFile1 <- covidFile1 %>%
# rename(Country = "Country.Other",
# .
# .
# .
# .
# )
colnames(covidFile) <- c("Serial No.", "Country", "TotalCases","NewCases","TotalDeaths","NewDeaths", "TotalRecovered","NewRecovered", "ActiveCases","SeriousCritical","million_Pop_Cases", "million_Pop_Deaths", "TotalTests","million_Pop_Tests","Population", "Continent", "CaseEvery_X_PPL", "DeathEvery_X_PPL","TestEvery_X_PPL")
#converting continent into factor
#covidFile <- transform(covidFile, Continent = factor(Continent))
#################################
##3.1
#plot for Total cases
#hist(covidFile$TotalCases) #looks like right skewed
ggplot(covidFile, aes(x = TotalCases)) + geom_histogram(bins = 500)
# as data is highly skewed we try to do the log transformation
ggplot(covidFile, aes(x = TotalCases)) + scale_x_log10() + geom_histogram()
#For total recovered
ggplot(covidFile, aes(x = TotalRecovered)) + scale_x_log10() + geom_histogram()
# For Total Deaths
ggplot(covidFile, aes(x = TotalDeaths)) + scale_x_log10() + geom_histogram()
# #looks like the Total cases > Total Recovery > Total Deaths
# boxplot(cbind(covidFile$TotalCases, covidFile$TotalRecovered,
# covidFile$TotalDeaths ), main = 'Plots for cases, recovery and deaths.',
# names = c('Total Cases', 'Total Recovery', 'Total deaths'))
####################################
#3.2 Relation between cases and population
ggplot(covidFile, aes(TotalCases, Population, colour = Continent)) + geom_point() + scale_x_log10() + scale_y_log10()
skewness(covidFile$TotalCases)
skewness(covidFile$Population, na.rm = TRUE)
# when data is highly skewed we don't use mean and SD, instead we use median and IQR
median(covidFile$TotalCases)
IQR(covidFile$TotalCases)
####################################
#
# # 3.3. Create a plot to examine the correlation between Tot Cases/1M pop and total population.
ggplot(covidFile, aes(million_Pop_Cases, Population)) + geom_point() + scale_y_log10() + scale_x_log10()
#####################################
# 3.4 Which column do you feel is better for comparison purposes, total cases or TotCases/1M pop.
# Total cases / 1M pop is better for comparison purpose. Original numbers in this types of cases can be superficial.
# Rates will be better to compare countries or continent.
##################################
## 3.5. Create a plot to examine the correlation between total cases and total death.
ggplot(covidFile, aes(TotalCases, TotalDeaths)) + geom_point() + scale_x_log10() + scale_y_log10()
# looks like the deaths are more when the cases are high
#####################################
## 3.6. Create a plot to examine the correlation between total cases and Deaths/1M pop. Explain the figure.
# Which column is more suitable to compare the result, total death or Death/1Mpop?
# we should never compare "RATE" with "NUMBER"
ggplot(covidFile, aes(TotalCases, million_Pop_Deaths)) + geom_point() + scale_x_log10() + scale_y_log10()
##############################################
## 3. 7. Compare Tot Cases/1M pop by continent, and explain your result.
ggplot(covidFile, aes(x = Continent, y = million_Pop_Cases, fill = Continent)) + geom_bar(position = 'dodge', stat = 'identity')
# Europe is having the highest ratio of Cases/1M pop
# removing NA bar
covidFile %>%
na.omit(Continent) %>%
ggplot(aes(x = Continent, y = million_Pop_Cases, fill = Continent)) +
geom_bar(position = 'dodge', stat = 'identity')
###############################
# 3.8.Compare Deaths/1M pop by continent, and explain your result.
ggplot(covidFile, aes(x = Continent, y = million_Pop_Deaths, fill = Continent)) + geom_bar(position = 'dodge', stat = 'identity')
#Again Europe is having the highest ratio of Deaths/1M pop
# removing NA bar
covidFile %>%
na.omit(Continent) %>%
ggplot(aes(x = Continent, y = million_Pop_Deaths, fill = Continent)) +
geom_bar(position = 'dodge', stat = 'identity')
###############################
# 3. 9. Which country is best among testing the COVID19 and which country is worst? There are two columns total test vs. test/M. Choose appropriate column.
head(covidFile)
summary(covidFile$million_Pop_Tests)
covidFile%>%
filter(million_Pop_Tests == c(575, 5540672))%>%
select(2:2)
##############################
# 3. 10. Compare your COVID19 test results by continent? There are two columns total test vs test/M. Choose appropriate column.
covidFile%>%
group_by(Continent)%>%
ggplot(aes(x = Continent, y = TestEvery_X_PPL)) + geom_boxplot()
####################################
# 3. 11. Check if Tests/1M pop is skewed or normally distributed.
skewness(covidFile$million_Pop_Tests, na.rm = TRUE)
summary(covidFile$million_Pop_Tests)
#As mean is greater than median it is right skewed
hist(covidFile$million_Pop_Tests)
|
08d711612e16849d7a73f69cc51476dd82a9aa5e
|
748d8aa1622b35e27454f25893de6229134eea21
|
/Segundo-encontro/predicao-de-despesas-medicas.r
|
68519d9d60ac75592a94dfed3bd9fdbdd1f8aafe
|
[] |
no_license
|
luizaalves/CursoR
|
b16cf854a30c166d99c395665cee29e531b13559
|
1629a07d42011d620cdddfb04ae537065b6fcd3e
|
refs/heads/master
| 2020-07-23T02:20:20.900434
| 2019-09-12T01:13:14
| 2019-09-12T01:13:14
| 207,415,014
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,130
|
r
|
predicao-de-despesas-medicas.r
|
insurance <-read.csv("insurance.csv", stringsAsFactors = TRUE)
#variaveis vão impactar no valor das despesas medicas
str(insurance)
#ver o minimo e o máximo;
summary(insurance$expenses)
#ver o histograma, nota-se que quanto maior os gastos, menos pessoas estão nesses dados
hist(insurance$expenses)
table(insurance$region)
table(insurance$sex)
table(insurance$smoker)
#faz uma matriz correlação entre essas variaveis
cor(insurance[c("age","bmi","children","expenses")])
#fazer o treinamento do modelo, lm = modelo lineares; cria uma relação entre gastos e essas variaveis ~. (pega todas as variaveis)
ins_model <- lm(expenses ~ age + children + bmi + sex + smoker + region, data = insurance)
#analisando o modelo
ins_model
#avaliando o desempenho do modelo
summary(ins_model)
#segunda parte, quanto cada variavel está impactando no resultado (quanto mais asteristico mais interfere no resultado)
insurance$age2 <- insurance$age^2
insurance$bmi30 <- ifelse(insurance$bmi >=30, 1,0)
ins_model2 <- lm(expenses ~ age2 + children*bmi + smoker*bmi30+smoker + region*children, data = insurance)
summary(ins_model2)
|
2b5c11efd1da3a8da0759dcf0bf7996f8c7b83ca
|
1fcaaafb1f597ec8ec80fd6a8e0ce46b518436a3
|
/cachematrix.R
|
b2bf0d1e84dd4ac307f2da71621e351a50d28c42
|
[] |
no_license
|
gcrowder/ProgrammingAssignment2
|
4db11bb50f7841c5008703d117104936d183b9a9
|
11a37fcabde4d96779d97cc11d9948d897c1285c
|
refs/heads/master
| 2021-01-22T00:24:32.230603
| 2016-07-17T20:15:30
| 2016-07-17T20:15:30
| 63,441,111
| 0
| 0
| null | 2016-07-15T17:57:30
| 2016-07-15T17:57:29
| null |
UTF-8
|
R
| false
| false
| 1,044
|
r
|
cachematrix.R
|
## Together, these two functions find the
## inverse of a matrix and caches the result.
## makeCacheMatrix takes an matrix and returns a list of functions to set or get
## the matrix and its inverse.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(solve) inverse <<- solve
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Given the list provided by makeCacheMatrix, cacheSolve provides the inverse
## of the matrix. If the inverse has already been calculated, it pulls the
## solved matrix from cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
}
|
77eee7fa6f0a3fdee1bd9922fae15d4673b8d350
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/NlsyLinks/examples/ExtraOutcomes79.Rd.R
|
a12855bd08dda605b0872a9e4ce69147336578e9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 687
|
r
|
ExtraOutcomes79.Rd.R
|
library(NlsyLinks)
### Name: ExtraOutcomes79
### Title: Extra outcome variables in the NLSY79
### Aliases: ExtraOutcomes79
### Keywords: datasets
### ** Examples
library(NlsyLinks) #Load the package into the current R session.
gen2Outcomes <- subset(ExtraOutcomes79, Generation==2) #Create a dataset of only Gen2 subjects.
#plot(ExtraOutcomes79) #Uncomment to see a large scatterplot matrix.
summary(ExtraOutcomes79)
oldPar <- par(mfrow=c(3,2))
hist(ExtraOutcomes79$Generation)
hist(ExtraOutcomes79$MathStandardized)
hist(ExtraOutcomes79$HeightZGenderAge)
hist(ExtraOutcomes79$WeightZGenderAge)
hist(ExtraOutcomes79$Afi)
hist(ExtraOutcomes79$Afm)
par(oldPar)
|
a9d353ca5682e352fa18740457504477734df7cd
|
8fe6731c8cca05a9d9989fb178b63e6297303312
|
/man/add_column.Rd
|
d4bf961386e2956ec1f8105eade19ec7ec4bc1d8
|
[
"MIT"
] |
permissive
|
bradleyboehmke/tibble
|
a375eb7ce4cf9430782ddae001ecdce4ffa7bb4b
|
2b3ab6e56e7c0aef24c665552a37513659468dc7
|
refs/heads/master
| 2020-09-15T10:45:23.342089
| 2019-11-18T18:13:44
| 2019-11-18T18:15:19
| 223,424,494
| 0
| 1
|
NOASSERTION
| 2019-11-22T14:49:16
| 2019-11-22T14:49:15
| null |
UTF-8
|
R
| false
| true
| 1,346
|
rd
|
add_column.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add.R
\name{add_column}
\alias{add_column}
\title{Add columns to a data frame}
\usage{
add_column(.data, ..., .before = NULL, .after = NULL)
}
\arguments{
\item{.data}{Data frame to append to.}
\item{...}{Name-value pairs, passed on to \code{\link[=tibble]{tibble()}}. All values must have
one element for each row in the data frame, or be of length 1.
These arguments are passed on to \code{\link[=tibble]{tibble()}}, and therefore also support
unquote via \verb{!!} and unquote-splice via \verb{!!!}. However, unlike in
\pkg{dplyr} verbs, columns in \code{.data} are not available for the
expressions. Use \code{\link[dplyr:mutate]{dplyr::mutate()}} if you need to add a column based on
existing data.}
\item{.before, .after}{One-based column index or column name where to add the
new columns, default: after last column.}
}
\description{
This is a convenient way to add one or more columns to an existing data
frame.
}
\examples{
# add_column ---------------------------------
df <- tibble(x = 1:3, y = 3:1)
add_column(df, z = -1:1, w = 0)
# You can't overwrite existing columns
\dontrun{
add_column(df, x = 4:6)
}
# You can't create new observations
\dontrun{
add_column(df, z = 1:5)
}
}
\seealso{
Other addition:
\code{\link{add_row}()}
}
\concept{addition}
|
f00ceba337776e7df43b0cfa0b8f102d5990eaf3
|
e649b8474ab3c5f5367abf806c6c750580ebbdfa
|
/ui.R
|
08b48d40b7bc2dabebb45e623d40884763512caa
|
[] |
no_license
|
joelpolanco/Coursera-Shiny-Project
|
4951b0857819343fca4159d78b84eac39df797bf
|
f207d5d22294fe71eba1e8ecc6697e460b2324fd
|
refs/heads/master
| 2021-01-10T09:56:20.293030
| 2016-04-01T22:13:20
| 2016-04-01T22:13:20
| 55,262,190
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 263
|
r
|
ui.R
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("CRM Analytics Single Purchaser Dashboard"),
sidebarPanel(
sliderInput('mu', 'Guess at the mu',value = 70, min = 0, max = 500, step = 10,) ),
mainPanel(
plotOutput('myHist')
)
))
|
ce0c3b2964aba9af9a1d684b15139b39d463e1fc
|
983b875a39f510b9ac134b3a592165be0f260090
|
/analysis/1_data_cleaning.R
|
f03abecc036c92a7d7ddc259448a9ff952585c5e
|
[
"MIT"
] |
permissive
|
adelaidetovar/ozone-strain-survey
|
25f3a19f23d9d2d83409609166a34105eb1814f9
|
d5f1f964c37646f981ae5e299d8b339a8c9abe0a
|
refs/heads/main
| 2023-08-12T05:39:41.682131
| 2021-10-08T19:24:13
| 2021-10-08T19:24:13
| 404,029,146
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,356
|
r
|
1_data_cleaning.R
|
# contributed by Wes Crouse and Adelaide Tovar
source("0_functions.R")
setwd("../data")
##############################
#### Phenotype Data Input ####
##############################
raw_data <- read.xlsx('raw_data.xlsx', sheet = 2, startRow = 2)
raw_data[,c(2:4)] <- sapply(raw_data[,c(2:4)], as.factor)
raw_data$strain <- factor(raw_data$strain, levels = c("C57BL/6J", "CC003", "CC017", "CC025", "CC039", "CC059"))
# BAL
bal <- raw_data[,c(1:14)]
bal[,c(6:14)] <- sapply(bal[,c(6:14)], as.numeric)
# protein
protein <- raw_data[,c(1:5, 15)]
protein$protein_conc <- as.numeric(protein$protein_conc)
# albumin
albumin <- raw_data[,c(1:5, 16)]
# remove samples that weren't measured
albumin <- albumin[complete.cases(albumin),]
# not detected samples become NA
albumin$albumin_conc <- as.numeric(albumin$albumin_conc)
# cytokines
cytokines <- raw_data[,c(1:5, 17:31)]
# remove samples that weren't measured
cytokines <- cytokines[complete.cases(cytokines),]
# not detected samples become NA
cytokines[,c(6:20)] <- sapply(cytokines[,c(6:20)], as.numeric)
####################################
#### Gene Expression Data Input ####
####################################
#load subject information
info <- data.frame(read_xlsx("rnaseq_ids.xlsx"))
rownames(info) <- paste0("X", info$mouse_no)
info$strain[info$strain=="C57BL/6J"] <- "B6"
# Download expression data from GEO
getGEOSuppFiles("GSE174205", filter_regex = ".txt.gz")
df <- read.table(gzfile("../GSE174205/GSE174205_summary_count_matrix.txt.gz"), head=T, row.names=NULL)
df <- df[!duplicated(df$Genes),]
rownames(df) <- NULL
df <- df %>% column_to_rownames(var = "Genes")
df <- t(df)
rownames(df) <- sapply(rownames(df), function(x){unlist(strsplit(x, split="_"))[1]})
# Make phenotype data for Wes's gene expression analyses
ids <- rownames(info)
ids <- gsub("X", "", ids)
set <- c("mouse_no","eotaxin", "gcsf", "gmcsf", "il10", "il12p70", "il6",
"ip10", "kc", "lix", "mcp1", "mip1a", "mip1b", "mip2")
pheno <- full_join(cytokines[cytokines$mouse_no%in%ids,set],
bal[bal$mouse_no%in%ids,c("mouse_no","per_neu", "no_neu", "per_macs", "no_macs")],
by = "mouse_no")
pheno <- full_join(pheno,
protein[protein$mouse_no%in%ids, c("mouse_no","protein_conc")],
by = "mouse_no")
pheno$mouse_no <- paste0("X",pheno$mouse_no)
pheno <- pheno %>% column_to_rownames("mouse_no")
####################
#data processing and formatting
#drop subjects without pairs
info <- info[info$pair!="x",]
df <- df[rownames(info),]
pheno <- pheno[rownames(info),]
#drop genes with fewer counts than samples
df <- df[,colSums(df) >= nrow(df)]
#ensure genes are integer counts
mode(df) <- "integer"
#drop genes with identical counts
id <- apply(df, 2, paste, collapse=",")
id_table <- table(id)
id_table <- id_table[id_table!=1]
df <- df[,!(colnames(df) %in% unlist(lapply(names(id_table), function(x){names(which(id==x))})))]
save(df, file="df.RData")
#create unique pair ID
info$pair_ID <- apply(info, 1, function(x){paste(x["strain"], x["pair"], sep="_") })
info$pair_ID <- as.factor(info$pair_ID)
#format variables
info <- info[,-(1:2)]
info$strain <- as.factor(info$strain)
info$rx <- as.factor(info$rx)
info$sex <- as.factor(info$sex)
info$pair <- as.factor(info$pair)
info <- info[,!(c("mouse_no", "file") %in% rownames(info))]
|
a27fe8fa19eca19cdea9e246940829797aff243e
|
7c022651a9545e83efbe2fcc0054beb2d268b36c
|
/plot4.R
|
c0cda6add3dee179a7b3890e10665f226ae42355
|
[] |
no_license
|
debrakesner/ExData_Plotting1
|
653152e5879b579c0b280033ea0a43c39b587a61
|
8bc09d6e0e0794fa6279c400b8aa5b638c5bf8f4
|
refs/heads/master
| 2021-08-26T08:32:48.181008
| 2017-11-22T13:55:27
| 2017-11-22T13:55:27
| 111,604,186
| 0
| 0
| null | 2017-11-21T21:39:39
| 2017-11-21T21:39:39
| null |
UTF-8
|
R
| false
| false
| 1,882
|
r
|
plot4.R
|
## creates 4 plots
#read file
setwd("C:/Users/Debra/datasciencecoursera/explorProject1")
fileLoc <- "C:/Users/Debra/datasciencecoursera/explorProject1/household_power_consumption.txt"
data <- read.table(fileLoc,header = TRUE,sep = ";")
#rename columns
names(data) <- c("date","time","globalactivepower","globalreactivepower","voltage","globalintensity","meterkitchen","meterlaundry","meterairh2o")
#format/add date/time columns
data$datetime <- strptime(paste(data$date,data$time, sep = " "), format = "%d/%m/%Y %H:%M:%S")
data$date <- as.Date(data$date, "%d/%m/%Y")
#restrict dataset
smalldata <- data[(data$date >=as.Date("02/01/2007", format = "%m/%d/%Y") & data$date <=as.Date("02/02/2007", format = "%m/%d/%Y")),]
#format number columns
for (i in 3:8) {smalldata[,i] <- as.numeric(as.character(smalldata[,i]))}
# set the panel
par(mfcol = c(2,2))
#create line graph #1
with(smalldata, plot(smalldata$datetime,smalldata$globalactivepower, type = "l", col = "black",xlab = "",ylab = "Global Active Power (kilowatts)"))
#create graph #2
with(smalldata, plot(smalldata$datetime,smalldata$meterkitchen, type = "l", col = "black",xlab = "",ylab = "Energy sub metering"))
lines(smalldata$datetime,smalldata$meterlaundry, col = "red")
lines(smalldata$datetime,smalldata$meterairh2o, col = "blue")
legend("topright",legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1), col = c("black","red","blue"),cex = .5, bty = "n",y.intersp = .4, xjust =1)
#create line graph #3
with(smalldata, plot(smalldata$datetime,smalldata$voltage, type = "l", col = "black",xlab = "datetime",ylab = "Voltage"))
#create line graph #4
with(smalldata, plot(smalldata$datetime,smalldata$globalreactivepower, type = "l", col = "black",xlab = "datetime",ylab = "Global_reactive_power"))
#copy graph to png fle
dev.copy(png, file = "plot4.png",width = 480, height = 480)
dev.off()
|
8c746c4c6a5f02b5484cca6319f0eabcc49ad133
|
4df9da5cbe5af504e5f668929006e5a423bfeb77
|
/R/convertRows.R
|
9f08ee4c8a441ea8cca2903a3adbf78a28d0b5c1
|
[] |
no_license
|
UBod/msa
|
b1e57917d7f405f320fda4a549b0f4dbd62962c3
|
7688d0547e209ee1fc19a692a4713ea6621d1d39
|
refs/heads/master
| 2023-07-20T16:20:08.111418
| 2023-07-11T10:35:17
| 2023-07-11T10:35:17
| 133,512,829
| 12
| 8
| null | 2023-02-20T12:25:05
| 2018-05-15T12:27:12
|
C
|
UTF-8
|
R
| false
| false
| 1,534
|
r
|
convertRows.R
|
convertAlnRows <- function(rows, type)
{
version <- rows[1]
if (length(rows) < 3 ||
##!identical(grep("^CLUSTAL", rows[1L]), 1L) ||
!identical(sub("^\\s+$", "", rows[2:3]), c("", "")))
stop("There is an invalid aln file!")
rows <- tail(rows, -3)
rows <- sub("^(\\S+\\s+\\S+)\\s*\\d*$", "\\1", rows)
markupPattern <- "^(\\s|\\*|:|\\.)*$"
markupLines <- grep(markupPattern, rows, perl=TRUE)
alnLines <- gaps(as(markupLines, "IRanges"), start=1, end=length(rows))
nseq <- unique(width(alnLines))
if (length(nseq) != 1)
stop("There are missing alignment rows!")
rows <- extractROWS(rows, alnLines)
spaces <- regexpr("\\s+", rows)
ids <- substr(rows, 1L, spaces - 1L)
nsplits <- length(rows) %/% nseq
if (!identical(ids, rep.int(head(ids, nseq), nsplits)))
stop("The alignment rows are out of order!")
alns <- substr(rows, spaces + attr(spaces, "match.length"), nchar(rows))
chrs <- structure(do.call(paste,
c(split(alns, rep(seq_len(nsplits),
each=nseq)), sep="")),
names=head(ids, nseq))
type <- switch(type, dna="DNA", rna="RNA", protein="AA")
out <- new(paste0("Msa", type, "MultipleAlignment"),
unmasked=do.call(paste0(type, "StringSet"), list(chrs)),
rowmask=as(IRanges(), "NormalIRanges"),
colmask=as(IRanges(), "NormalIRanges"),
version=version)
}
|
9c4fafeaa10fbdc7f3bb083eb4cef4d4bbd98394
|
3748be5371ba854978d2c88ad72044319ca9d528
|
/man/sle.Rd
|
225715aec402e63924dc80c16297419fcd39575d
|
[] |
no_license
|
rushkin/dla
|
0c93e0f90d3233021cc5590f4d20ae6574f3cfdb
|
5f22ab13276a91b683b904c9ce786330a3f5d4c8
|
refs/heads/master
| 2020-03-23T08:46:45.750688
| 2019-01-31T18:42:13
| 2019-01-31T18:42:13
| 141,344,916
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,489
|
rd
|
sle.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sle.R
\name{sle}
\alias{sle}
\title{Stochastic Loewner Evolution}
\usage{
sle(kappa = 4, tmax = 1, a = 1, kappaL = 0, nsteps = 2000,
p_timescaling = 0.5, verbose = TRUE, forcing = NULL)
}
\arguments{
\item{kappa}{strength of the 1d Brownian motion in the driving function.}
\item{tmax}{max time to which the trace is developed.}
\item{a}{exponent of the Levy flights component in the driving function.}
\item{kappaL}{strength of the Levy flights component in the driving function.}
\item{nsteps}{number of steps.}
\item{p_timescaling}{exponent determining the distribution of time steps. The succession of times will be made uniform in the variable \code{t^p_timescaling}.}
\item{verbose}{boolean, to print progress statements or not.}
\item{forcing}{if not NULL, should be a dataframe of the driving function, which will then be used, overriding other driving-related arguments.
The dataframe should have columns t and xi, starting from t=0 and xi=0, sorted.}
}
\value{
List with components: \code{t} - vector of time values, \code{xi} - vector of values of the driving function, \code{gamma} - data frame of x and y coordinates of the generated trace,\code{t_cross} - crossover time between Brownian and Levy components (NULL if \code{kappaL = 0}), \code{call_params} - list of call parameters, \code{runtime} - elapsed time in seconds.
}
\description{
Generate SLE trace driven by Brownian motion, possibly with the addition of Levy-flights
}
\note{
SLE (Stochastic Loewner Evolution) is a generative stochastic process for growing a stocastic curve (trace) out of a boundary of a 2D domain.
It uses a continuous family of conformal maps, parametrized by a "time" parameter: w(z,t). The SLE equation is dw(z,t)/dt = 2/(w(z,t)-xi(t)), w(z,0)=z. Here xi(t) is the real-valued driving function of the process, assumed to be a stochastic process.
The mapping w(z,t) is from the upper half plane. In the standard SLE, xi(t) is the 1D Brownian motion with diffusion constant kappa (and an intricate connection to conformal field theory exists).
As a generalization, we also allow xi(t) to be a sum of 1D Brownian motion and Levy fligts.
For more details see this publication and references therein:
Rushkin, I., Oikonomou, P., Kadanoff, L.P. and Gruzberg, I.A., 2006. Stochastic Loewner evolution driven by Lévy processes. Journal of Statistical Mechanics: Theory and Experiment, 2006(01), p.P01001.
}
|
fe42393c7354d44af0c7fce99a910cc8ce52bd7d
|
625b520f0e6390bf2a756008fc5a04fe81b76c1f
|
/scripts/lat_fe_stderr_qc.R
|
cd8739702a54dc47073a52c9ecb08aab7c9392fe
|
[] |
no_license
|
harrymengpku/trans_ethnic_ma
|
982f8fa88cde18828a744065e434139eebfd5796
|
927c8894d2874e03ae33ad9b003cbffa79139783
|
refs/heads/master
| 2023-08-27T07:10:22.966883
| 2021-10-24T11:09:18
| 2021-10-24T11:09:18
| 367,319,799
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,769
|
r
|
lat_fe_stderr_qc.R
|
# QC for fixed effect MA implemented by METAL,
setwd("/SAN/ugi/mdd/trans_ethnic_ma/results/")
library(data.table)
df <- fread("METAANALYSIS_HIS_1.tbl")
# Read 1000G all bim files and create a FileName column to store filenames
list_of_files <- list.files(path = "/SAN/ugi/ukhls/1000G/1KGP3_bim", recursive = TRUE,
pattern = "\\.bim.gz$",
full.names = TRUE)
DT <- rbindlist(sapply(list_of_files, fread, simplify = FALSE),
use.names = TRUE)
head(DT)
# chr rsid cm pos A1 A2 ID
#1: 1 rs367896724 0 10177 AC A 1:10177:AC:A
#2: 1 rs540431307 0 10235 TA T 1:10235:TA:T
#3: 1 rs555500075 0 10352 TA T 1:10352:TA:T
#4: 1 rs548419688 0 10505 T A 1:10505:A:T
#5: 1 rs568405545 0 10506 G C 1:10506:C:G
#6: 1 rs534229142 0 10511 A G 1:10511:A:G
dim(DT)
#names(DT) <- paste0(names(DT),"_1KG")
summary(df$HetIsq)
heter <- subset(df, HetISq<75)
dim(heter)
df <- merge(df,DT,by.x="MarkerName",by.y="ID",all.x=T,all.y=F)
df$N_study <- df$HetDf+1
cols <- c("MarkerName","rsid","chr","pos","Allele1","Allele2","Freq1","Effect","StdErr","P-value","N_study")
df <- df[,..cols]
names(df) <- c("MarkerName","RSID","Chromosome","Position","EA","NEA","EAF","BETA","SE","P","N_study")
df <- df[df$N_study>1,]
df <- df[order(df$Chromosome,df$Position),]
dim(df)
write.table(df,"/SAN/ugi/mdd/trans_ethnic_ma/results/FE_lat_qced.txt", sep="\t", row.names=FALSE, col.names=TRUE, quote = FALSE)
#manhattan plot
dat <- df[!is.na(df$Chromosome)&!is.na(df$Position)&!is.na(df$P),]
library(qqman)
png("/SAN/ugi/mdd/trans_ethnic_ma/results/fe.his.stderr.manhattan.png",w=2300, h=1200, pointsize=20)
manhattan(dat,snp="RSID",chr="Chromosome",bp="Position",p="P")
dev.off()
|
db836bf610528b9761538f9110fc62b08c6bfaa3
|
396ec8eba748b30f8d7134761ff80fa291b5d63b
|
/R/06_modelling_ANN2.R
|
c22c94018a5caefd68c66cd81f7e5e975c36b7ad
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
rforbiodatascience/2020_group03
|
589e5a9b90def83e84ae84e393ba19b6ee202127
|
eb9bc3b18be68596bf6462b0d0c41406125390f2
|
refs/heads/master
| 2021-04-05T22:48:58.222615
| 2020-05-13T21:34:23
| 2020-05-13T21:34:23
| 249,718,909
| 0
| 2
| null | 2020-05-05T09:14:54
| 2020-03-24T13:43:35
|
R
|
UTF-8
|
R
| false
| false
| 3,059
|
r
|
06_modelling_ANN2.R
|
# Clear workspace
# ------------------------------------------------------------------------------
rm(list = ls())
# Load libraries
# ------------------------------------------------------------------------------
library("tidyverse")
library("caret")
library("UniprotR")
library("ANN2")
library("yardstick")
# Define functions
# ------------------------------------------------------------------------------
source(file = "./R/99_project_functions.R")
# Load data
# ------------------------------------------------------------------------------
data_set_1 <- read_tsv(file = "./data/03_aug_data_set_1.tsv")
data_set_2 <- read_tsv(file = "./data/03_aug_data_set_2.tsv")
data_set_3 <- read_tsv(file = "./data/03_aug_data_set_3.tsv")
data_set_4 <- read_tsv(file = "./data/03_aug_data_set_4.tsv")
# Hello from ANN2 and set folder
# ------------------------------------------------------------------------------
folder = "./results/06_ANN"
print('06_modelling_ANN2.R...')
# Data set 1 - ANN2 artificial neural network
# ------------------------------------------------------------------------------
ANN2(
name = "data_set_1",
df = data_set_1,
folder = folder,
epochs = 10, # number of epochs
hidden_layers = c(100, 100),
scale = "z_scales",
train_size = 0.75,
seed_value = 42)
ANN2(
name = "data_set_1",
df = data_set_1,
folder = folder,
epochs = 10, # number of epochs
hidden_layers = c(100, 100),
scale = "blosum62",
train_size = 0.75,
seed_value = 42)
# Data set 2 - ANN2 artificial neural network
# ------------------------------------------------------------------------------
ANN2(
name = "data_set_2",
df = data_set_2,
folder = folder,
epochs = 10, # number of epochs
hidden_layers = c(100, 100),
scale = "z_scales",
train_size = 0.75,
seed_value = 42)
ANN2(
name = "data_set_2",
df = data_set_2,
folder = folder,
epochs = 10, # number of epochs
hidden_layers = c(100, 100),
scale = "blosum62",
train_size = 0.75,
seed_value = 42)
# Data set 3 - ANN2 artificial neural network
# ------------------------------------------------------------------------------
ANN2(
name = "data_set_3",
df = data_set_3,
folder = folder,
epochs = 10, # number of epochs
hidden_layers = c(100, 100),
scale = "z_scales",
train_size = 0.75,
seed_value = 42)
ANN2(
name = "data_set_3",
df = data_set_3,
folder = folder,
epochs = 10, # number of epochs
hidden_layers = c(100, 100),
scale = "blosum62",
train_size = 0.75,
seed_value = 42)
# Data set 4 - ANN2 artificial neural network
# ------------------------------------------------------------------------------
ANN2(
name = "data_set_4",
df = data_set_4,
folder = folder,
epochs = 10, # number of epochs
hidden_layers = c(100, 100),
scale = "z_scales",
train_size = 0.75,
seed_value = 42)
ANN2(
name = "data_set_4",
df = data_set_4,
folder = folder,
epochs = 10, # number of epochs
hidden_layers = c(100, 100),
scale = "blosum62",
train_size = 0.75,
seed_value = 42)
|
b3eb131f548fb322244bda48a9865220f11c15a2
|
5095ee2491ad9d5129802954e11681b21acfc499
|
/Functions/resultsConcatenator-20161020.R
|
3c8ec5f78bad7c34d4ff8a4b384da76a4834a8c6
|
[] |
no_license
|
IssieWinney/Antirrhinum-ImageAnalysis
|
2069a68e3566a9c0db4b67ddd102038b0087615b
|
db160c3cc0bd2ac936d451ed99daf71569470a04
|
refs/heads/master
| 2020-09-21T16:57:29.104027
| 2016-12-12T13:20:16
| 2016-12-12T13:20:16
| 66,936,806
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,509
|
r
|
resultsConcatenator-20161020.R
|
# Isabel Winney
# 2016-10-20
# Results concatenator
# Apparently adding continuously to a results file is
# a hard task in imageJ/Fiji. This script is to take the
# .txt files produced by imagej and turn them in to a single file.
resultsConcatenator <- function(filepath, output){
# INPUT filepath the relative filepath to the folder of
# imageJ result tables that you want to
# merge.
# OUTPUT output a .txt file named 'output' containing
# your concatenated data.
# WARNING DELETES ALL SMALLER RESULT FILES IN THE FOLDER
results <- list.files(path = filepath,
include.dirs= FALSE)
for(i in 1:length(results)){
if(i==1){
allresults <- read.table(paste(filepath, "/", results[i], sep=""),
header=T)
} else {
tempresults <- read.table(paste(filepath, "/", results[i], sep=""),
header=T)
allresults <- rbind(allresults, tempresults)
}
}
file.remove(paste(filepath, "/", results, sep=""))
write.table(allresults,
file = paste(filepath, "/", output, ".txt", sep=""),
row.names = FALSE,
sep = "\t")
system('CMD /C "ECHO Your results have been concatenated: please move on to step 4 && PAUSE"',
invisible=FALSE, wait=FALSE)
}
|
2ebceb05dc1e9aba24b84c9efe881acb811c4d33
|
f7575b705a341ed4c808e8e2ebfa9e055395275f
|
/annotateCoords_visualization.R
|
c1ea67ffdd979850395d2d9388cd64ae05b1c4f2
|
[] |
no_license
|
canderson44/ProfileHMM_scriptsForServer
|
97084900d058b8c64becc9eddebeb4bbfbcb2560
|
120b2c3bc28a3909c5074460b6e22ff6c8b490a4
|
refs/heads/master
| 2020-06-04T09:30:44.115100
| 2019-08-09T01:36:24
| 2019-08-09T01:36:24
| 191,966,790
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,252
|
r
|
annotateCoords_visualization.R
|
#!/usr/bin/env Rscript
#### Description ####
# Counts combinations of regions identified per zmw. Outputs a csv of format RegionCombo, count
# Goal: determine where Adapter, 3' barcode, 5' barcode, and their reverse complements
# lie within a given CCS
# The pipe taken from script by Colin Dewey
#### load packages and data ####
library(tidyverse)
install.packages("gridExtra")
library(gridExtra)
#FOR SERVER
coords_2B01 <- read.csv("/tier2/deweylab/scratch/ipsc_pacbio/demultiplexing/profile_hmm/annotated_ccs/2_B01_annotation_coords.csv")
coords_3C01 <- read.csv("/tier2/deweylab/scratch/ipsc_pacbio/demultiplexing/profile_hmm/annotated_ccs/3_C01_annotation_coords.csv")
coords_4D01 <- read.csv("/tier2/deweylab/scratch/ipsc_pacbio/demultiplexing/profile_hmm/annotated_ccs/4_D01_annotation_coords.csv")
#FOR CATHERINE MAC
#coords_2B01 <- read.csv("../data/2_B01_annotation_coords_2019-08-5.csv")
# coords_3C01 <- read.csv("../data/toy_coords_B.csv")
# coords_4D01 <- read.csv("../data/toy_coords_C.csv")
#### pipe by Colin Dewey ####
#2_B01
pattern_counts_2 <- coords_2B01 %>%
filter(region != "CCS") %>%
arrange(ZMW, start) %>%
group_by(ZMW) %>%
summarise(pattern = paste(region, collapse = " ")) %>%
count(pattern) %>%
arrange(desc(n))
#3_C01
pattern_counts_3 <- coords_3C01 %>%
filter(region != "CCS") %>%
arrange(ZMW, start) %>%
group_by(ZMW) %>%
summarise(pattern = paste(region, collapse = " ")) %>%
count(pattern) %>%
arrange(desc(n))
#4_D01
pattern_counts_4 <- coords_4D01 %>%
filter(region != "CCS") %>%
arrange(ZMW, start) %>%
group_by(ZMW) %>%
summarise(pattern = paste(region, collapse = " ")) %>%
count(pattern) %>%
arrange(desc(n))
#### save result ####
# for server
write_csv(pattern_counts_2, "/tier2/deweylab/scratch/ipsc_pacbio/demultiplexing/profile_hmm/annotated_ccs/2_B01_annotationAllRegionCombos.csv")
write_csv(pattern_counts_3, "/tier2/deweylab/scratch/ipsc_pacbio/demultiplexing/profile_hmm/annotated_ccs/3_C01_annotationAllRegionCombos.csv")
write_csv(pattern_counts_4, "/tier2/deweylab/scratch/ipsc_pacbio/demultiplexing/profile_hmm/annotated_ccs/4_D01_annotationAllRegionCombos.csv")
#for Catherine Mac
#write_csv(pattern_counts_2,"../2_B01_annotationRegionCombos.csv")
|
3bb55d829d1a66ec7ffa992dfc93fd882c79c659
|
cbc51337357b46d5f159eaa5a820c1fc0b9f7cdc
|
/MachineLearning/caretPackage.R
|
729888ffd503ac54a84a4c11116c347c8d261eeb
|
[] |
no_license
|
jvaldivial/coursera
|
b559629e7013f186b1a76465f65dfec52a5f563a
|
93a38268c9eba077f0c44684012fcde140eccc40
|
refs/heads/master
| 2021-04-25T04:01:49.383821
| 2018-10-04T11:00:50
| 2018-10-04T11:00:50
| 115,521,610
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,021
|
r
|
caretPackage.R
|
# Sample Code for Machine Learning Course @ Coursera
setwd("~/GitHub/coursera/MachineLearning")
library(caret)
library(kernlab)
library(e1071)
data("spam")
# Subsetting the data set for Trainig and Testing
inTrain <- createDataPartition(y=spam$type, p=0.75, list = FALSE)
training <- spam[inTrain, ]
testing <- spam[-inTrain, ]
# Fitting a GLM model for the varable TYPE
set.seed(32343)
modelFit <- train(type ~ . , data = training, method="glm" )
modelFit
# Reviewing the output of the fitted model
modelFit$finalModel
# Predicting on the testing data set
predictions <- predict(modelFit, newdata=testing)
predictions
# Assessing the model using the Confusion Matrix
confusionMatrix(predictions, testing$type)
# Doing Cross Validation
set.seed(32323)
folds <- createFolds(y=spam$type, k=10, list = TRUE, returnTrain = TRUE)
sapply(folds, length)
# Creating Time Slices
set.seed(32323)
tme <- 1:1000
folds <- createTimeSlices(y=tme, initialWindow = 20, horizon = 10)
names(folds)
folds$train[[1]]
folds$test[[1]]
|
49ff1cbeda3684aee503663973875f81e80a02f0
|
3fa1b23746232975b3b014db2f525007a3b49991
|
/anna_code/qc/real_or_dup_data_after_nov2019/out/plot_survey_qc.R
|
565e391571af73ff2ef9445ef0a75a6a74a09884
|
[] |
no_license
|
AshleyLab/myheartcounts
|
ba879e10abbde085b5c9550f0c13ab3f730d7d03
|
0f80492f7d3fc53d25bdb2c69f14961326450edf
|
refs/heads/master
| 2021-06-17T05:41:58.405061
| 2021-02-28T05:33:08
| 2021-02-28T05:33:08
| 32,551,526
| 7
| 1
| null | 2020-08-17T22:37:43
| 2015-03-19T23:25:01
|
OpenEdge ABL
|
UTF-8
|
R
| false
| false
| 1,235
|
r
|
plot_survey_qc.R
|
rm(list=ls())
library(ggplot2)
source("~/helpers.R")
fnames=dir()
for (fname in fnames){
if (endsWith(fname,'.tsv')){
print(fname)
data=read.table(fname,header=TRUE,sep='\t')
data$Date=as.Date(data$Date)
data=data[order(data$Date),]
p1=ggplot(data=data,
aes(x=data$Date,
y=data$Uploads))+
geom_bar(stat='identity')+
xlab("Date")+
ylab("Uploads")+
ggtitle(fname)+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
p2=ggplot(data=data,
aes(x=data$Date,
y=data$Subjects))+
geom_bar(stat='identity')+
xlab("Date")+
ylab("Subjects")+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
p3=ggplot(data=data,
aes(x=data$Date,
y=data$NewSubjects))+
geom_bar(stat='identity')+
xlab("Date")+
ylab("New Subjects")+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
p4=ggplot(data=data,
aes(x=data$Date,
y=data$MeanUploadsPerSubjectPerDay))+
geom_bar(stat='identity')+
xlab("Date")+
ylab("Mean Uploads per Subject per Day")+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
png(paste(fname,'png',sep='.'),heigh=8,width = 4,units='in',res=120)
multiplot(p1,p2,p3,p4,cols=1)
dev.off()
}
}
|
a475f29c509dcacb18075ee3b8546c4b4de8d17d
|
5e45d9217574199d680f21c9d5651d06c63c9377
|
/man/continuous.SuperLearner.Rd
|
7bb890871abb45ab020fa81c18f1c1420b77f05c
|
[] |
no_license
|
tiantiy/superMICE
|
c358924a3e8c2607daaffe5305753c5b4da80a89
|
e7dc81015f046dcec15893522212573ef94ab1f5
|
refs/heads/master
| 2022-12-02T11:55:16.338673
| 2020-08-12T06:36:13
| 2020-08-12T06:36:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,731
|
rd
|
continuous.SuperLearner.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/continuous_SuperLearner_regression.R
\name{continuous.SuperLearner}
\alias{continuous.SuperLearner}
\title{Function to generate imputations using regression and SuperLearner for data with a continuous outcome}
\usage{
continuous.SuperLearner(
y,
x,
wy,
SL.library,
kernel,
bw,
lambda,
imputation,
weights,
...
)
}
\arguments{
\item{y}{Vector of observed values of the variable to be imputed.}
\item{x}{Numeric matrix of variables to be used as predictors in H2O methods
with rows corresponding to observed values of the variable to be imputed.}
\item{wy}{Logical vector of length length(y). A TRUE value indicates
locations in y for which imputations are created.}
\item{SL.library}{Either a character vector of prediction algorithms or a
list containing character vectors. A list of functions included in the
SuperLearner package can be found with SuperLearner::listWrappers().}
\item{kernel}{One of "gaussian",... Kernel function used to compute weights.}
\item{bw}{NULL or numeric value for bandwidth of kernel function (as standard deviations of the kernel).}
\item{lambda}{NULL or numeric value for bandwidth for kernel (as half-width of the kernel).}
\item{imputation}{One of "semiparametric" or "nonparametric". Determines
distribution from which imputed values are drawn. See
mice.impute.SuperLearner() documentation for more details.}
\item{weights}{One of "nadaraya-watson", ...}
\item{...}{further arguments passed to SuperLearner.}
}
\value{
Numeric vector of randomly drawn imputed values.
}
\description{
Function to generate imputations using regression and SuperLearner for data with a continuous outcome
}
|
23d0ccef850c677e19457b1c322f6b71d8703428
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/SPIn/R/bootSPIn.R
|
fa8a607293c7ade434b653e887407c7477cf89e4
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,268
|
r
|
bootSPIn.R
|
bootSPIn <-
function(x, n.boot=50, conf = 0.95, bw = 0, lb = -Inf, ub = Inf, l=NA, u=NA){
n.sims <- length(x)
x <- sort(x)
gaps <- x[2:n.sims] - x[1:(n.sims-1)]
gaps <- c (gaps[1], gaps, gaps[n.sims-1])
gap.bandwidth <- 2
mean.gap <- rep (NA, n.sims)
for (j in 1:n.sims){
mean.gap[j] <- mean (gaps[max (1, j-(gap.bandwidth-1)) : min (n.sims+1, j+gap.bandwidth)])
}
theta.ordered.0 <- x
if (lb != -Inf)
n.sims <- n.sims+1
if (ub != Inf)
n.sims <- n.sims+1
w.l <- matrix(0,nrow=n.sims,ncol=n.boot)
w.u <- w.l
for (i in 1:n.boot){
# print(i)
x <- sample(theta.ordered.0,n.sims,T)
x <- x + mean.gap*runif (n.sims,-1,1)/20
r <- SPIn(x,conf = conf, bw = bw, lb = lb, ub = ub, l=l, u=u)
w.l[r$l.l:r$l.u,i] <- r$w.l
w.u[r$u.l:r$u.u,i] <- r$w.u
}
x <- theta.ordered.0
if (lb != -Inf)
x <- c(x, lb)
if (ub != Inf)
x <- c(x, ub)
x <- sort(x)
w.l <- rowMeans(w.l)
x1 <- w.l%*%x
w.u <- rowMeans(w.u)
x2 <- w.u%*%x
l.ind <- which(w.l!=0)
l.l <- l.ind[1]
l.u <- l.ind[length(l.ind)]
u.ind <- which(w.u!=0)
u.l <- u.ind[1]
u.u <- u.ind[length(u.ind)]
hpd <- list(spin = c(x1, x2), conf = conf, x = x, w.l=w.l, w.u=w.u, l.l=l.l, l.u=l.u, u.l=u.l, u.u=u.u)
class(hpd) <- "SPIn"
return(hpd)
}
|
2f00b1dfa6f1308ceec234f4bf54c121941fc2da
|
fd570307c637f9101ab25a223356ec32dacbff0a
|
/src-local/specpr/src.specpr/fcn48-51/getpt.r
|
942df6e7f81a0deabff5f58c722988c6b59e92e7
|
[] |
no_license
|
ns-bak/tetracorder-tutorial
|
3ab4dd14950eff0d63429291c648820fb14bb4cb
|
fd07c008100f6021c293ce3c1f69584cc35de98a
|
refs/heads/master
| 2022-07-30T06:04:07.138507
| 2021-01-03T22:19:09
| 2021-01-03T22:49:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 990
|
r
|
getpt.r
|
subroutine getpt(ichk,x,y,xmin,ymin)
implicit integer*4 (i-n)
#ccc version date: 06/01/83
#ccc author(s): Roger Clark & Jeff Hoover
#ccc language: Ratfor
#ccc
#ccc short description:
#ccc This subroutine
#ccc algorithm description: none
#ccc system requirements: none
#ccc subroutines called:
#ccc tabpos
#ccc argument list description:
#ccc arguments: ifd,x,y,xmin,ymin
#ccc parameter description:
#ccc common description:
#ccc message files referenced:
#ccc internal variables:
#ccc file description:
#ccc user command lines:
#ccc update information:
#ccc NOTES:
#ccc
include "../common/tablet"
include "../common/alphabet"
ichk=0
call tabpos(ichk,iix,iiy)
if (ichk==ihe || ichk==ihx || ichk==ihd) {
return
}
iix = iix - xzero
iiy = iiy - yzero
x = (iix * costh) + (iiy * sinth)
y = -(iix * sinth) + (iiy * costh)
if (xscale != 0.0) x = x / xscale + xmin
if (yscale != 0.0) y = y / yscale + ymin
return
end
|
afcf40cafca39a3ccb4087c3eed46089ff90a378
|
bd454c45d38cc48f6247d9dec829de0533793549
|
/man/piat.feedback.simple_score.Rd
|
0883b57e6673381a63451e81edc3f1981715887d
|
[
"MIT"
] |
permissive
|
pmcharrison/piat
|
f445431e6d59cbf63228619547ad4e078af58c2f
|
73c77acf379c233480819738214187cd9b1ba3f7
|
refs/heads/master
| 2023-08-14T17:02:04.665315
| 2023-07-26T21:27:39
| 2023-07-26T21:27:39
| 131,727,383
| 2
| 3
|
NOASSERTION
| 2022-12-21T10:09:03
| 2018-05-01T15:09:06
|
R
|
UTF-8
|
R
| false
| true
| 421
|
rd
|
piat.feedback.simple_score.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/feedback.R
\name{piat.feedback.simple_score}
\alias{piat.feedback.simple_score}
\title{PIAT feedback (simple score)}
\usage{
piat.feedback.simple_score(dict = piat::piat_dict)
}
\arguments{
\item{dict}{The psychTestR dictionary used for internationalisation.}
}
\description{
Here the participant's score is reported at the end of the test.
}
|
39b4127396f9a61719d4fea13e9383b7ed5356ea
|
625c6620f117f50ab79f5fd3296e9576a0910187
|
/man/blackgrass.Rd
|
1e2d5556ac855bfbb7c6f5620be1ee200b4f181e
|
[] |
no_license
|
DoseResponse/drcData
|
378c850587d3332caa076192e480b4efb6904ba9
|
09f9da308aeea62322b0a7b67946435a87c36589
|
refs/heads/master
| 2023-02-24T02:20:00.374757
| 2021-01-28T12:04:31
| 2021-01-28T12:04:31
| 108,513,898
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,313
|
rd
|
blackgrass.Rd
|
\name{blackgrass}
\alias{blackgrass}
\docType{data}
\title{Seedling Emergence of Blackgrass (Alopecurus myosuroides)}
\description{Seedling emergence of herbicide susceptible (S) and resistant (R) Alopecurus myosuroides in reponse to sowing depth and suboptimal temperature regimes (10/5C) and optimal temperature regimes (17/10C).}
\usage{data("blackgrass")}
\format{
A data frame with 2752 observations on the following 12 variables.
\describe{
\item{\code{Exp}}{a numeric vector}
\item{\code{Temp}}{a numeric vector}
\item{\code{Popu}}{a numeric vector}
\item{\code{Bio}}{a factor with two levels}
\item{\code{Depth}}{a numeric vector}
\item{\code{Rep}}{a numeric vector}
\item{\code{Start.Day}}{a numeric vector}
\item{\code{End.Day}}{a numeric vector}
\item{\code{Ger}}{a numeric vector}
\item{\code{Accum.Ger}}{a numeric vector}
\item{\code{TotalSeed}}{a numeric vector}
\item{\code{Pot}}{a numeric vector}
}
}
\references{Keshtkar, E., Mathiassen, S. K., Beffa, R., Kudsk, P. (2017). Seed Germination and Seedling Emergence of Blackgrass (Alopecurus myosuroides) as Affected by Non-Target-Site Herbicide Resistance. Weed Science, 65, 732-742. https://doi.org/10.1017/wsc.2017.44
%% ~~ possibly secondary sources and usages ~~
}
\keyword{datasets}
|
c4a735d95aad6ac9e58e11e1d9c2c0ccbb2402a1
|
257ad4d98f21db8c9930f18efd6ce7e47c0a1ff1
|
/FPKMGCLM.R
|
68bcff5a87a27448f32f4b9946d3530940308208
|
[] |
no_license
|
SethMagnusJarvis/QuantSeqComparison
|
b37357a62757cbd1b517c1776e0bdeddb44a88f3
|
9e1c5b9e6d253f58ece2b2e5bffba013acd51599
|
refs/heads/master
| 2022-02-16T06:30:10.912423
| 2019-09-13T11:16:29
| 2019-09-13T11:16:29
| 208,249,672
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,583
|
r
|
FPKMGCLM.R
|
library(tidyverse)
library(DESeq2)
GetStats <- function(RPKM){
RPKM <- RPKM %>%
column_to_rownames("ensemblID")
WT <- select(RPKM, contains("WT"))
WT$WTMean <- rowMeans(WT)
WT <- WT %>%
rownames_to_column("ensemblID")
HOM <- select(RPKM, -contains("WT"))
HOM$HOMMean <- rowMeans(HOM)
HOM <- HOM %>%
rownames_to_column("ensemblID")
Means <- full_join(WT, HOM, by="ensemblID")
Means <- mutate(Means, "Diff"= WTMean - HOMMean)
return(Means)
}
LinearMaker <- function(Quant, Total, GC)
{
Join <- full_join(Total, Quant, by = "ensemblID" , suffix = c(".RNASeq", ".Quant")) %>%
left_join(GC, by = "ensemblID") %>%
mutate('Compare' = Diff.Quant - Diff.RNASeq)
Join <- Join[complete.cases(Join),]
Lm <- summary(lm(Join$Compare ~ Join$GCount))
}
LMBreakup <- LinearMaker <- function(RPKM, GC){
Join <- left_join(RPKM, GC, by = "ensemblID")
Join <- Join[complete.cases(Join),]
Lm <- summary(lm(Join$Diff ~ Join$GCount))
}
d14UnsampledRPKM <- read_csv("d14UnsampledRPKM.csv")
d14UnsampledMean <- GetStats(d14UnsampledRPKM)
KOUnsampledRPKM <- read_csv("KOUnsampledRPKM.csv")
KOUnsampledMean <- GetStats(KOUnsampledRPKM)
d14SampledRPKM <- read_csv("d14SampledRPKM.csv")
d14SampledMean <- GetStats(d14SampledRPKM)
KOSampledFPKM <- read_csv("KOSampledRPKM.csv")
KOSampledMean <- GetStats(KOSampledFPKM)
d14QuantFPKM <- read_csv("d14QuantRPKM.csv")
d14QuantMean <- GetStats(d14QuantFPKM)
KOQuantFPKM <- read_csv("KOQuantRPKM.csv")
KOQuantMean<- GetStats(KOQuantFPKM)
AllGC20 <- read_csv("AllNamedGenes20WithNames.csv")
SummarisedGC <- AllGC20 %>%
group_by(ensemblID) %>%
dplyr::summarise(GCount = mean(GCount))
KOSampleLM <- LinearMaker(KOSampledMean, KOQuantMean, AllGC20)
d14SampleLM <- LinearMaker(d14SampledMean, d14QuantMean, AllGC20)
KOUnsampledLM <- LinearMaker(KOUnsampledMean, KOQuantMean, AllGC20)
d14UnsampledLM <- LinearMaker(d14UnsampledMean, d14QuantMean, AllGC20)
KOSampleLMMean <- LinearMaker(KOSampledMean, KOQuantMean, SummarisedGC)
d14SampledLMMean <- LinearMaker(d14SampledMean, d14QuantMean, SummarisedGC)
KOUnsampledLMMean <- LinearMaker(KOUnsampledMean, KOQuantMean, SummarisedGC)
d14UnsampledLMMean <- LinearMaker(d14UnsampledMean, d14QuantMean, SummarisedGC)
PlotScatter <- function(Quant, Total, GC)
{
Join <- full_join(Total, Quant, by = "ensemblID" , suffix = c(".RNASeq", ".Quant")) %>%
left_join(GC, by = "ensemblID") %>%
mutate('Compare' = Diff.Quant - Diff.RNASeq)
Join <- Join[complete.cases(Join),]
GCPlot <- ggplot(Join, aes(x=log(Compare), y=GCount)) +
geom_point() +
theme(text = element_text(size=20)) +
xlab("Log of difference in RPKM") +
ylab("Number of GC")
return(GCPlot)
}
KOSampleScatter <- PlotScatter(KOSampledMean, KOQuantMean, AllGC20) + ggtitle("Sampled KO and Quant RPKM vs GC")
d14SampledScatter <- PlotScatter(d14SampledMean, d14QuantMean, AllGC20) + ggtitle("Sampled d14 and Quant RPKM vs GC")
KOUnsampledScatter <- PlotScatter(KOUnsampledMean, KOQuantMean, AllGC20) + ggtitle("Unsampled KO and Quant RPKM vs GC")
d14UnsampledScatter <- PlotScatter(d14UnsampledMean, d14QuantMean, AllGC20) + ggtitle("Unsampled d14 and Quant RPKM vs GC")
d14SampledScatter+KOSampleScatter+d14UnsampledScatter+KOUnsampledScatter
KOSampleOnlyLM <- LMBreakup(KOSampledMean, AllGC20)
d14SampledOnlyLM <- LMBreakup(d14SampledMean, AllGC20)
KOUnsampledOnlyLM <- LMBreakup(KOUnsampledMean, AllGC20)
d14UnsampledOnlyLM <- LMBreakup(d14UnsampledMean, AllGC20)
KOQuantOnlyLM <- LMBreakup(KOQuantMean, AllGC20)
d14UQuantOnlyLM <- LMBreakup(d14QuantMean, AllGC20)
|
9794bb648ec1230a243d32b7a3f261c947938ce9
|
a608046e295e1030abe6977a725e3b1cb129a482
|
/plot2.R
|
f765865d8836770b4829cd1c4989fa9834b9b5d6
|
[] |
no_license
|
Halcyonhx/ExData_Plotting1
|
98e489c04644f799cc5d8b886f5ddcf1fe5fbb22
|
34aaeeecc2b0f5dfa8da2afe0fc8af07a19c79f2
|
refs/heads/master
| 2020-03-14T07:13:59.662053
| 2018-04-29T14:37:52
| 2018-04-29T14:37:52
| 131,500,478
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 253
|
r
|
plot2.R
|
subData$Time <- strptime(paste(subData$Date, subData$Time), format = '%d/%m/%Y %H:%M:%S')
png(filename = "plot2.png", width = 480)
with(subData, plot(Time, Global_active_power, type = 'l', xlab = "", ylab = "Global Active Power (kilowatts)"))
dev.off()
|
4a2b664e19e6fad33e282da3dc64ed70ee76336e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rminer/examples/imputation.Rd.R
|
89abc688e3ff5ae92d7a892675e55d67c8e02174
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,003
|
r
|
imputation.Rd.R
|
library(rminer)
### Name: imputation
### Title: Missing data imputation (e.g. substitution by value or hotdeck
### method).
### Aliases: imputation
### Keywords: manip
### ** Examples
d=matrix(ncol=5,nrow=5)
d[1,]=c(5,4,3,2,1)
d[2,]=c(4,3,4,3,4)
d[3,]=c(1,1,1,1,1)
d[4,]=c(4,NA,3,4,4)
d[5,]=c(5,NA,NA,2,1)
d=data.frame(d); d[,3]=factor(d[,3])
print(d)
print(imputation("value",d,3,Value="3"))
print(imputation("value",d,2,Value=median(na.omit(d[,2]))))
print(imputation("value",d,2,Value=c(1,2)))
print(imputation("hotdeck",d,"X2",Value=1))
print(imputation("hotdeck",d,Value=1))
## Not run:
##D # hotdeck 1-nearest neighbor substitution on a real dataset:
##D d=read.table(
##D file="http://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data",
##D sep=",",na.strings="?")
##D print(summary(d))
##D d2=imputation("hotdeck",d,Value=1)
##D print(summary(d2))
##D par(mfrow=c(2,1))
##D hist(d$V26)
##D hist(d2$V26)
##D par(mfrow=c(1,1)) # reset mfrow
## End(Not run)
|
25757fa8afe5e2686003397f254260391c355b93
|
a8148b19c2675fc14901bdb29654fba677693f56
|
/man/projections_accessors.Rd
|
3717803657fd0a6ce2be34118e1b0c3b8fd15cff
|
[] |
no_license
|
sangeetabhatia03/projections
|
50277da32c9ee7c3aedb28778866e046f6ffbd6d
|
f99b4d87ecdd1bb877129e98c3bb66cac5b68ffd
|
refs/heads/master
| 2023-01-10T23:06:02.357274
| 2021-04-22T09:17:47
| 2021-04-22T09:17:47
| 133,662,783
| 0
| 0
| null | 2018-05-16T12:31:45
| 2018-05-16T12:31:45
| null |
UTF-8
|
R
| false
| true
| 1,105
|
rd
|
projections_accessors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/accessors.R
\name{get_dates}
\alias{get_dates}
\alias{get_dates.projections}
\title{Access content projections objects}
\usage{
\method{get_dates}{projections}(x, ...)
}
\arguments{
\item{x}{A \code{projections} object.}
\item{...}{Further arguments passed to methods; currently not used.}
}
\description{
These simple helper functions retrieve content from \code{projections}
objects. They currently include:
}
\details{
\itemize{
\item \code{get_dates}: get dates of the predictions.
}
}
\examples{
if (require(distcrete) && require(incidence)) { withAutoprint({
## prepare input: epicurve and serial interval
dat <- c(0, 2, 2, 3, 3, 5, 5, 5, 6, 6, 6, 6)
i <- incidence(dat)
si <- distcrete("gamma", interval = 1L,
shape = 1.5,
scale = 2, w = 0)
## make predictions
pred_1 <- project(i, 1.2, si, n_days = 30)
pred_1
## retrieve content
get_dates(pred_1)
max(i$dates) # predictions start 1 day after last incidence
})}
}
\author{
Thibaut Jombart \email{thibautjombart@gmail.com}
}
|
7abec5ee9b939016ca7e2c85292c6c7f3707951a
|
12ea178f7c8dda5267269f31b3b02fdab29e5bee
|
/man/format.mondate.rd
|
78e079d968f1724b47ac2d5e5d6bd4ceabb75316
|
[] |
no_license
|
chiefmurph/mondate
|
0696c656c89843caba98122db2c621bbb89d23dd
|
67d2d11a5abdf94bbd7579f1f91d326f28da9276
|
refs/heads/master
| 2022-09-14T20:27:33.599641
| 2022-08-29T08:54:38
| 2022-08-29T08:54:38
| 42,559,910
| 1
| 2
| null | 2015-10-21T17:56:13
| 2015-09-16T02:34:33
|
R
|
UTF-8
|
R
| false
| false
| 738
|
rd
|
format.mondate.rd
|
\name{format.mondate}
\alias{format.mondate}
\title{Format a mondate}
\description{
Function to format a \code{mondate} into its character
representation according to the \code{displayFormat} property.
}
\usage{
\method{format}{mondate}(x, \dots)
}
\arguments{
\item{x}{
a \code{mondate}.
}
\item{\dots}{
further arguments passed to or from other methods.
}
}
\details{
For more details see \code{\link{format}} and especially \code{\link{strptime}}.
}
\value{
\code{character} representation of the \code{mondate}.
}
\seealso{
\code{\link{strptime}}.
}
\examples{
(b<-mondate(1)) # end of first month of millennium
format(b) # "01/31/2000" -- with quotes -- in the U.S. locale
format(b, format="\%Y-\%m-\%d") # "2000-12-31"
}
|
4815af80ad875048fc5e06c5149abd32e4270ce8
|
2579ef45fce30a693d90b8c83338cc8f107125c0
|
/inst/shiny-examples/firstShiny/app.R
|
f0fcb832a0f4d2609b5a00910255de5976cf3ea9
|
[
"MIT"
] |
permissive
|
yut4916/MATH5793YUT
|
c5473980b8da01192303fc14b9fc7a99757f3bd0
|
3c14a832d9e350de5b47457bc4f2c80f9d0ca97f
|
refs/heads/master
| 2023-03-26T09:04:54.416150
| 2021-03-30T02:28:59
| 2021-03-30T02:28:59
| 335,433,993
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,994
|
r
|
app.R
|
# First Shiny App
# Katy Yut
# Feb 22, 2021
# Load necessary packages
library(shiny)
library(shinydashboard)
library(ggplot2)
library(dplyr)
library(latex2exp)
library(rootSolve)
# Read in data
# data <- read.csv("/Users/Katy/Desktop/07_school/MATH5793/06_data/fourmeasure.csv", header=TRUE)
data("T4.3")
data <- T4.3
### UI ============================================================================#######
ui <- fluidPage(
titlePanel("Katy Yut's Shiny App"),
sidebarLayout(
sidebarPanel(
# Task 1 sidebar
h1("Task 1 Plot"),
selectInput("x_axis1", "Horizontal Axis Variable",
choices = colnames(data)),
selectInput("y_axis1", "Vertical Axis Variable",
choices = colnames(data)),
sliderInput("t1_pch", "Point Size",
min=0, max=5, value = 2
),
selectInput("t1_color", "Point Color",
choices = c("Red", "Orange", "Yellow", "Green", "Blue", "Purple", "Black", "White")),
br(), br(), br(), br(), br(), br(), br(), br(), br(), br(),
# Task 2 sidebar
h1("Task 2 Plot"),
selectInput("x_axis2", "Horizontal Axis Variable",
choices = colnames(data)),
selectInput("y_axis2", "Vertical Axis Variable",
choices = colnames(data)),
sliderInput(
inputId = "theta",
label="Theta",
min=0,
max=round(pi/2, 2),
value=0,
post = " radians"
)
),
mainPanel(
plotOutput("task1", click = "plot_click"),
textOutput("clickCor"),
br(), br(), br(),
plotOutput("task2")
)
)
)
### SERVER ========================================================================#######
server <- function(input, output) {
# Task 1
output$task1 <- renderPlot({
ggplot(data=data, aes(x=data[,input$x_axis1], y=data[,input$y_axis1])) +
geom_point(size=as.numeric(input$t1_pch), color=input$t1_color) +
ggtitle("Task 1: Drop-One Correlation") +
xlab(input$x_axis1) +
ylab(input$y_axis1) +
labs(subtitle="Click data point to calculate correlations")
})
output$clickCor <- renderText({
req(input$plot_click)
data$rowN <- 1:dim(data)[1]
droppedPoint <- (nearPoints(data, input$plot_click, xvar=input$x_axis1, yvar=input$y_axis1))
dropCor <- round(cor(data[-droppedPoint$rowN, c(input$x_axis1, input$y_axis1)])[1,2], 3)
paste0("Correlation = ", round(cor(x=data[,input$x_axis1], y=data[,input$y_axis1]), 3), ", ", "\nDrop-One Correlation = ", dropCor)
})
# Task 2
# calculate s12t
corMat <- reactive({cor(data[,c(input$x_axis2,input$y_axis2)])})
s12 <- reactive({corMat()[1,2]})
s11 <- reactive({corMat()[1,1]})
s22 <- reactive({corMat()[2,2]})
x <- reactive({as.numeric(input$theta)})
s12t <- reactive({s12()*(cos(x())^2 - sin(x())^2) + (s22() - s11())*sin(x())*cos(x())})
# calculate s12t=0 solution for quadrant 1
q1_sol <- reactive({
uniroot(function(x){s12()*(cos(x)^2 - sin(x)^2) + (s22() - s11())*sin(x)*cos(x)}, c(0, pi/2))$root
})
output$task2 <- renderPlot({
ggplot(data=data, aes(x=data[,input$x_axis2], y=data[,input$y_axis2])) +
geom_point() +
geom_abline(slope=tan(q1_sol()), intercept=0, color="grey", linetype="dashed") +
geom_text(aes(x=0.8*max(data[,input$x_axis2]), y=0.1*min(data[,input$y_axis2]), label=paste0("Q1 solution: theta = ", round(q1_sol(), 2))), color="grey") +
geom_hline(yintercept=0) +
geom_vline(xintercept=0) +
geom_abline(slope=tan(input$theta), intercept=0, color="red") +
geom_abline(slope=tan(input$theta - pi/2), intercept=0, color="red") +
coord_fixed() +
ggtitle("Task 2: Rotating Axes") +
xlab(input$x_axis2) +
ylab(input$y_axis2) +
labs(subtitle = TeX(sprintf('$\\tilde{s_{12}} = %f$', s12t())))
})
}
### CALL APP ========================================================================#######
shinyApp(ui = ui, server = server)
|
055c69ed1210c0d6b56c180ee9a5026a53237a40
|
3466fe41d18e0c76cec8220d9e49019b1fd8be66
|
/April20_in_class_work.R
|
8f806dbb8d491633614a02e3a75598c1741b3255
|
[] |
no_license
|
pantp/DataAnalyticsSpring2020
|
868cc1bd21fe5a20a86ef64d6976488fbaa4c53a
|
3fa036779928a0597f70da8a8d41971f58103fa0
|
refs/heads/master
| 2020-12-19T22:14:00.976263
| 2020-05-06T17:24:44
| 2020-05-06T17:24:44
| 235,868,063
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,678
|
r
|
April20_in_class_work.R
|
# LOESS Example 1:
data(economics, package="ggplot2") # load data
economics$index <- 1:nrow(economics) # create index variable
economics <- economics[1:80, ] # retail 80rows for better graphical understanding
loessMod10 <- loess(uempmed ~ index, data=economics, span=0.10) # 10% smoothing span
loessMod25 <- loess(uempmed ~ index, data=economics, span=0.25) # 25% smoothing span
loessMod50 <- loess(uempmed ~ index, data=economics, span=0.50) # 50% smoothing span
# predict Loess
smoothed10 <- predict(loessMod10)
smoothed25 <- predict(loessMod25)
smoothed50 <- predict(loessMod50)
# plot the predictions
plot(economics$uempmed, x=economics$date, type="l", main="Loess Smoothing and Prediction",
xlab="Date", ylab="Unemployment (Median)")
lines(smoothed10, x=economics$date, col="red")
lines(smoothed25, x=economics$date, col="green")
lines(smoothed50, x=economics$date, col="blue")
# LOESS Example 2:
# Fitting a curve to the data
data("cars")
str(cars) # we see 50 observation and 2 variables
# create a plot, speed Vs distance
plot(speed ~ dist, data = cars)
help("lowess")
lowess(cars$speed ~ cars$dist)
# use the lowess() function along with the line() function to draw the lines
lines(lowess(cars$speed ~ cars$dist, f=2/3), col="blue")
# here the f value is the the smoother span, f= 2/3 = 0.666
# the default value for smoother span is 0.666 in RStudio.
#This gives the proportion of points in the plot which influence the smooth at eachvalue.
# Larger values give more smoothness.
# Change the "f" value and observe the shape of the line.
# lines(lowess(cars$speed ~ cars$dist, f=0.75), col="gray") # f = 0.75
lines(lowess(cars$speed ~ cars$dist, f=0.8), col="red") # f = 0.8
lines(lowess(cars$speed ~ cars$dist, f=0.9), col="green") # f = 0.9
lines(lowess(cars$speed ~ cars$dist, f=0.1), col= 5) # f = 0.1
lines(lowess(cars$speed ~ cars$dist, f=0.01), col= 6) # f = 0.01
# Linear Discriminant Analysis Example:
# Multiclass Classification
library(MASS)
names(iris)
dim(iris) # check the dimensions of the iris dataset, you will see 150 rows and 5 columns
head(iris)
# set the seed value and create training dataset
set.seed(555)
Train <- sample(1:nrow(iris), nrow(iris)/2)
iris_Train <- iris[Train,] # Traning dataset
irist_Test <- iris[-Train,] # Testing dataset
help(lda)
fit1 <- lda(Species ~ Sepal.Length + Sepal.Width +
Petal.Length + Petal.Width, data = iris_Train)
predict1 <- predict(fit1, iris_Train)
predict1_class <- predict1$class
# generating the confusion matrix using the table() function
table1 <- table(predict1_class, iris_Train$Species)
table1
# Calculating the Accuracy of the prediction
sum(diag(table1))/sum(table1)
|
2a8d8e795f20b96b1628646beaf0639ed300ecb0
|
5d7740f555e642a7679cc91ffa31193993fd911b
|
/R/wlogr2.R
|
0691146c45522369f0c812e289652112cce0a385
|
[] |
no_license
|
cran/WWR
|
c75c0ea6b965f15502c5a616525b3de5a87c55fe
|
827c382cd8e4834d87dcd236619e883a81de74c4
|
refs/heads/master
| 2021-01-22T05:47:06.752627
| 2017-10-25T02:40:18
| 2017-10-25T02:40:18
| 81,703,172
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 472
|
r
|
wlogr2.R
|
wlogr2<-function(y,d,z,wty=1)
{
n<-length(y)
ay<-rank(y,ties.method="min")
da<-cbind(ay,d,z)
db<-da[order(ay,d,z),]
stat<-0.0
vstat<-1.0
abc2<-.Fortran("logrank2",as.integer(n),as.integer(db[,1]),as.integer(db[,2]),as.integer(db[,3]),as.integer(wty),stat=as.double(stat),vstat=as.double(vstat))
tstat<-abc2$stat/sqrt(n*abc2$vstat);pstat<-2*(1-pnorm(abs(tstat)))
list(wty=wty,stat=abc2$stat,vstat=abc2$vstat,tstat=tstat,pstat=pstat)
}
|
b1c702dbfb3f5a7ab872463c38d7e177edd77db7
|
fcb5767cd47bf52162a781d5db9b3dfbc37d535f
|
/app.R
|
d75e26a03670a1f5954c42f8291c0bc8ceae270a
|
[] |
no_license
|
crimpfjodde/daoc
|
54211637a442d6de756e056aab899d7c6ecdab37
|
1efd1c520383664697cd21df4d3e748d4fc54426
|
refs/heads/main
| 2023-01-19T07:46:32.119535
| 2020-11-24T13:06:06
| 2020-11-24T13:06:06
| 315,639,433
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,148
|
r
|
app.R
|
ui <- dashboardPage(
dashboardHeader(title = 'DAoC'),
dashboardSidebar(
sidebarMenu(
menuItem("Herald",
tabName = 'herald',
icon = icon("broom")),
menuItem("Castspeed/buff",
tabName = "castspeed",
icon = icon("dashboard")),
menuItem("Crafting",
tabName = "craftprobability",
icon = icon("bomb")),
menuItem('Maps etc',
tabName = 'taskmap',
icon = icon('map-signs')),
menuItem('About',
tabName = 'about',
icon = icon('question'))
),
# Custom CSS to hide the default logout panel
# The dynamically-generated user panel
uiOutput("userpanel")
),
dashboardBody(
tags$head(tags$link(rel="shortcut icon", href="https://playphoenix.online/assets/images/favicon.png")),
tags$head(tags$style(HTML('.shiny-server-account { display: none; }
opacity:0.5 !important;'))),
# uiChangeThemeDropdown(),
shinyDashboardThemes(
theme = "grey_dark"
),
tabItems(
tabItem(tabName = 'herald',
fluidRow(width = 8,
infoBoxOutput("guild_rp"),
infoBoxOutput("guild_48h"),
infoBoxOutput("guild_lastweek")
),
fluidRow(width = 8,
)
),
tabItem(tabName = "craftprobability",
# textOutput('testtext'),
column(width = 6,
fluidRow(#width = 6,
box(width = 6, #style = "font-size: 14px;",
title = "Question", 'What is the probability of making atleast',
tags$i('x'), 'Master pieces in', tags$i('y'), 'number of tries?\n', br(),
tags$br(),
numericInput('no_mp',
value = 1,
step = 1,
min = 0,
label = "Number of MP's: "),
numericInput('no_tries',
value = 50,
step = 1,
min = 1,
label = 'Number of tries: ')
),
box(width = 6,
title = "Answer",
solidHeader = TRUE,
collapsible = TRUE,
collapsed = TRUE,
span(textOutput("probability_value"), style = "font-size:30px;font-weight: bold;font-style:oblique"),
textOutput('probability_text')
)),
fluidRow(
box(width = 12,
title = "View a plot",
solidHeader = TRUE,
collapsible = TRUE,
collapsed = TRUE,
plotlyOutput('distrib_plot')
)
)
),
column(width = 6,
box(#width = 3,
title = 'Unknown master crafter',
solidHeader = TRUE,
collapsible = TRUE,
collapsed = TRUE,
img(src='daoc_flex.jpg', height="100%", width="100%", align = "center"))
)
),
tabItem(tabName = 'castspeed',
fluidRow(
box(width = 2,
numericInput("delve",
"Delve:",
min = 1,
max = 10,
value = 3,
step = 0.1),
numericInput("currentdex",
"Current dex:",
min = 0,
max = 500,
value = 300,
step = 1),
numericInput("mota2",
"MotA:",
min = 0,
max = 9,
value = 0,
step = 1),
numericInput('dexaug2',
label = 'Aug Dex:',
min = 0,
max = 9,
step = 1,
value = 0)
),
column(width = 2,
fluidRow(
box(width = 12,
span(textOutput("casttimenew"), style = "font-size:30px;font-weight: bold;font-style:oblique"),
textOutput("casttimecurrent"),
textOutput("diff"),
textOutput('pointsspent'),
textOutput('relativevalue'),
textOutput('percentDecrease'))
),
fluidRow(
box( width = 12,
helpText('Pretty much stolen from' ,
tags$b(tags$a(href = "https://docs.google.com/spreadsheets/d/1CslgNBWCDhdfEYrCxDyklMosSCPAnAacTEdpbK32esE/edit#gid=0","here"))))
)
),
tabBox(width = 6,
title = "",
tabPanel(title = 'Time difference',
plotlyOutput('plotutDiff')
),
tabPanel(title = 'Cast speed',
plotlyOutput('plotutSpeed')
),
tabPanel(title = 'Percent',
plotlyOutput('plotutPercent'))
)
)#,
# fluidRow(width = 10, box(width = 10, DT::dataTableOutput('buff_tbl')))
),
tabItem(tabName = 'about',
box('This is a hobby project of mine where i play with a raspberry Pi.',
tags$br(),
'I am not a programmer or an IT proffesional of any kind, but just a guy that likes to play around with stuff like this.',
tags$br(),
'And play DAoC ofc!',
tags$br(),
'Get in touch with ',
actionButton("show", "me"),
tags$br(),
tags$br(),
'Written in', tags$a(href = "https://shiny.rstudio.com/","Shiny"),
tags$br(),
'Plots produced with', tags$a(href = "https://plotly.com/r/","Plotly"),
tags$br(),
'On', tags$a(href = "https://rstudio.com/products/rstudio/download-server/", "RStudio server"),
tags$br(),
'Hosted on a ',tags$a(href = "https://www.raspberrypi.org/products/raspberry-pi-4-model-b/?resellerType=home","Raspberry Pi 4"), ''
),
tags$style(type = 'text/css', '#show
{background-color: #f2f2f2;
box-shadow: 0 4px 8px 0 rgba(0,0,0,0.2), 0 3px 10px 0 rgba(0,0,0,0.19);
color: #595959;
padding: 1px 10px;
font-family: Helvetica;
text-align: center;
font-style: italic;
text-decoration: none;
display: inline-block;
font-size: 12px;}')),
tabItem('taskmap',
fluidRow(
tabBox(width = 5,
title = "Task maps",
tabPanel(title = 'Alb',
img(src='task_alb.png', height="100%", align = "center")
),
tabPanel(title = 'Mid',
img(src='task_mid.png', height="100%", width="100%", align = "center")
),
tabPanel(title = 'Hib',
img(src='task_hib.png', height="100%", align = "center")
)
),
box(Title = 'Order')
)
# ,
# img(src='daoc_original_map.jpg', height="100%", width = "100%", align = "center")
# fluidRow(
# box(title = 'Original daoc map',
# solidHeader = TRUE,
# collapsible = TRUE,
# actionButton("go", "Go")
# ,bsModal("window", "", "go"
# ,uiOutput("myImage")),
# img(src='daoc_original_map.jpg', height="100%", width = "100%", align = "center")
# )
# )
)
),
fluidRow(
tags$head(
# tags$style(HTML(".main-sidebar { font-size: 18px; }")),
)
)
)
)
###########################################################################################################################
server <- function(input, output, session) {
get_phoenix <- function() {
url <- paste0('https://herald.playphoenix.online/g/Janitors')
temp <- getURL(url)
li <- readHTMLTable(temp, as.data.frame = TRUE)
li
}
as_numeric = function() {
x <- get_phoenix()[[7]]
for (i in 5:9) {
x[[i]] <- gsub(',', '', x[[i]])
x[[i]] <- as.numeric(x[[i]])
}
x
}
higest_rp <- function() {
as_numeric() %>% arrange(`RP 48h`)
}
output$guild_rp <- renderValueBox({
li <- get_phoenix()
infoBox(
title = HTML(paste('Total rp<b>', br(), li[[1]][[2]][1], '</b>')),
subtitle = HTML(paste(li[[1]][[3]][1],"server rank", br(), li[[1]][[4]][1], 'realm rank')),
icon = icon("chart-bar"),
color = "green"
)
})
output$guild_48h <- renderValueBox({
li <- get_phoenix()
infoBox(
title = HTML(paste('rp last 48h<b>', br(), li[[1]][[2]][4], '</b>')),
subtitle = HTML(paste(li[[1]][[3]][4],"server rank", br(), li[[1]][[4]][4], 'realm rank')),
icon = icon("chart-bar"),
color = "green"
)
})
output$guild_lastweek <- renderValueBox({
li <- get_phoenix()
infoBox(
title = HTML(paste('rp last week<b>', br(), li[[1]][[2]][2], '</b>')),
subtitle = HTML(paste(li[[1]][[3]][2],"server rank", br(), li[[1]][[4]][2], 'realm rank')),
icon = icon("chart-bar"),
color = "green"
)
})
output$myImage <- renderUI({
img(src='daoc_original_map.jpg', height="727px", width = "1689px", align = 'center')
})
# Crafting ###################################################################
output$probability_value <- renderText({
paste0(round(tries_ut(),0), '%')
})
output$probability_text <- renderText({
paste0('The probability of making atleast ', input$no_mp, ' MP(s) in ',
input$no_tries, ' tries is ', round(tries_ut(),3), '%')
})
# output$probability_out <- renderText({
# paste0('(', tries_ut(), '%)')
# })
tries_ut <- function() {
tries = input$no_tries
mps = input$no_mp - 1
if(mps == -1) {
ut = (dbinom(0, (tries), 0.02))*100
} else {
ut = (pbinom(mps, tries, 0.02, lower.tail = F))*100
}
round(ut, 4)
}
plot_prob <- function() {
z = 1.96
tries = 1:input$no_tries
mps = input$no_mp - 1
if(mps == -1) {
ut = (dbinom(0, (tries), 0.02))
} else {
ut = (pbinom(mps, tries, 0.02, lower.tail = F))
}
ci_up <- ut + z*sqrt(ut*(1-ut)/tries)
ci_up <- round(ci_up*100, 3)
ci_up <- replace(ci_up, ci_up > 100, 100)
ci_lo <- ut - z*sqrt(ut*(1-ut)/tries)
ci_lo <- replace(ci_lo, ci_lo<=0, 0)
ci_lo <- round(ci_lo*100, 3)
ut <- round(ut*100, 3)
df <- data.frame(tries, ut, ci_lo, ci_up)
df
}
output$distrib_plot <- renderPlotly({
plot_ly(data = plot_prob(), x = ~tries) %>%
add_trace(y = ~ut,
type = 'scatter',
mode = 'lines+markers',
marker = list(size = 5,
color = 'rgb(255, 230, 230)',
line = list(color = 'rgb(255, 230, 230)',
width = 0)),
hoverinfo = 'text',
text = ~paste('Tries: ', tries,
'\nProbability: ', round(ut,3), '%',
'\nCI high', ci_up, '%',
'\nCI low', ci_lo, '%')) %>%
add_trace(y = ~ci_up,
type = 'scatter',
mode = 'lines',
hoverinfo = 'none',
line = list(color = 'transparent'),
showlegend = FALSE, name = 'Upper') %>%
add_trace(y = ~ci_lo,
type = 'scatter',
mode = 'lines',
hoverinfo = 'none',
fill = 'tonexty',
fillcolor='rgba(255, 230, 230, 0.2)',
line = list(color = 'transparent'),
showlegend = FALSE, name = 'Lower') %>%
layout(plot_bgcolor='#343E48',
paper_bgcolor='#343E48',
xaxis = list(color = "white",
title = 'Number of tries',
gridcolor = toRGB("gray50"),
showgrid = TRUE),
yaxis = list(color = "white",
gridcolor = toRGB("gray50"),
title = 'Probability ( 95% CI )',
showgrid = TRUE))
})
######## Casttime #######################################################################################
plotdf <- function(){
cdelve <- input$delve
cdex <- input$currentdex
points <- c(0,1,2,4,7,10,15,20,27,34)
plotgrid <- expand.grid(dex=c(0,4,8,12,17,22,28,34,41,48),mota=c(0,0.01,0.02,0.03,0.05,0.07,0.09,0.11,0.13,0.15))
# dexgrid <- (cdex + plotgrid$dex)
plotgrid$Speed <- cspeed(cdelve, (cdex + plotgrid$dex), plotgrid$mota)
speedBase <- round(cspeed(cdelve, cdex, 0), 4)
plotgrid <- plotgrid %>% mutate(cdelve,
adex = rep(0:9, 10),
amota = rep(0:9,
each = 10),
dpoints = rep(points, 10),
mpoints = rep(points, each = 10)) %>%
mutate(Points = dpoints + mpoints) %>%
arrange(Points)%>%
mutate(Diff = round((Speed - speedBase), 4),
`Diff %` = (1 - Speed/speedBase),
Percent = round((Speed/speedBase)*100, 4))
plotgrid
}
# output$table <- DT::renderDT(plotdf())
output$plotutDiff <- renderPlotly({
plot_ly(data = plotdf(),
x = ~Points) %>%
add_trace(y = ~Diff,
marker = list(size = 5,
color = 'rgb(255, 230, 230)',
line = list(color = 'rgb(255, 230, 230)',
width = 0)),
type = 'scatter',
mode = 'markers',
hoverinfo = 'text',
text = ~paste('MotA: ', amota, '\nADex: ',
adex, '\nPoints:', Points, '\nSpeed: ',
Speed,'\nDiff: ', Diff)) %>%
# layout(title = ~paste('All possible distributions of points\n', 'Delve:',input$delve, 'Dex:', input$currentdex)) %>%
layout(
# title = list(text = ~paste0(#'All possible distributions of points',
# 'Delve:',input$delve, ' Dex:', '', input$currentdex, '\n'),
# x = 0.15, yref = 'Diff', y = -0.2,
# font=list(size=16, color = "white")
# ),
margin = list( pad = 5)) %>%
layout(plot_bgcolor='#343E48') %>%
layout(paper_bgcolor='#343E48') %>%
layout(xaxis = list(color = "white",
gridcolor = toRGB("gray50"),
showgrid = TRUE)) %>%
layout(yaxis = list(color = "white",
gridcolor = toRGB("gray50"),
title = 'Diff (secunds)',
showgrid = TRUE))
# add_trace(y = ~diff.1, type = 'scatter', mode = 'lines+markers')
})
output$plotutSpeed <- renderPlotly({
plot_ly(data = plotdf(),
x = ~Points) %>%
add_trace(y = ~Speed,
marker = list(size = 5,
color = 'rgb(255, 230, 230)',
line = list(color = 'rgb(255, 230, 230)',
width = 0)),
type = 'scatter',
mode = 'markers',
hoverinfo = 'text',
text = ~paste('MotA: ', amota, '\nADex: ',
adex, '\nPoints:', Points, '\nSpeed: ',
Speed)) %>%
# layout(title = ~paste('All possible distributions of points\n', 'Delve:',input$delve, 'Dex:', input$currentdex)) %>%
layout(
# title = list(text = ~paste0(#'All possible distributions of points',
# 'Delve:',input$delve, ' Dex:', '', input$currentdex, '\n'),
# x = 0.15,
# font=list(size=16, color = "white")
# )
) %>%
layout(plot_bgcolor='#343E48') %>%
layout(paper_bgcolor='#343E48') %>%
layout(xaxis = list(color = "white",
gridcolor = toRGB("gray50"),
showgrid = TRUE)) %>%
layout(yaxis = list(color = "white",
gridcolor = toRGB("gray50"),
showgrid = TRUE,
title = 'Speed (secunds)'))
})
output$plotutPercent <- renderPlotly({
plot_ly(data = plotdf(),
x = ~Points) %>%
add_trace(y = ~Percent,
marker = list(size = 5,
color = 'rgb(255, 230, 230)',
line = list(color = 'rgb(255, 230, 230)',
width = 0)),
type = 'scatter',
mode = 'markers',
hoverinfo = 'text',
text = ~paste('MotA: ', amota, '\nADex: ',
adex, '\nPoints:', Points, '\nSpeed: ',
Speed,'\nPercent ', Percent)) %>%
# layout(title = ~paste('All possible distributions of points\n', 'Delve:',input$delve, 'Dex:', input$currentdex)) %>%
layout(
# title = list(text = ~paste0(#'All possible distributions of points',
# 'Delve:',input$delve, ' Dex:', '', input$currentdex, '\n'),
# x = 0.15,
# font=list(size=16, color = "white")
# )
) %>%
layout(plot_bgcolor='#343E48') %>%
layout(paper_bgcolor='#343E48') %>%
layout(xaxis = list(color = "white",
gridcolor = toRGB("gray50"),
showgrid = TRUE)) %>%
layout(yaxis = list(color = "white",
gridcolor = toRGB("gray50"),
showgrid = TRUE,
title = 'Percent of'))
})
dexcalc <- function(dex, augdex) {
dex + dexut(augdex)
}
castspeed <- function(delve, dex, mota) {
delve * (1 - ((dex - 60)/600)) * (1 - motaut(mota))
}
cspeed <- function(delve, dex, mota) {
(delve) * (1 - ((dex - 60)/600)) * (1 - (mota))
}
dex1 <- function() {
input$currentdex + dexut(input$dexaug1)
}
output$casttimecurrent <- renderText({
paste("Old casttime: ",
round(
castspeed(
input$delve,
(input$currentdex),
0), 4), " sec")
})
output$casttimenew <- renderText({
paste(
round(
castspeed(
input$delve,
(input$currentdex + dexut(input$dexaug2)),
input$mota2), 3), " sec")
})
output$diff <- renderText({
paste('Diff:',
round(
castspeed(
input$delve,
(input$currentdex + dexut(input$dexaug2)),
input$mota2) - castspeed(
input$delve,
(input$currentdex),
0),4), ' sec')
})
output$pointsspent <- renderText({
paste('Points spent:', costut(input$mota2) + costut(input$dexaug2))
})
output$relativevalue <- renderText({
paste('Cast time decrease per point:', round(
(castspeed(
input$delve,
(input$currentdex + dexut(input$dexaug2)),
input$mota2) -
castspeed(
input$delve,
(input$currentdex),
0)) / (costut(input$mota2) + costut(input$dexaug2)),4))
})
output$percentDecrease <- renderText({
paste('Cast time decrease in %:',
round((1 -
castspeed(
input$delve,
(input$currentdex + dexut(input$dexaug2)),
input$mota2) / castspeed(
input$delve,
(input$currentdex),
0))*100, 4))
})
motaut <- function(m) {
if (m == 0) {
mot = 0.00
} else if(m == 1) {
mot = 0.01
} else if(m == 2) {
mot = 0.02
} else if(m == 3) {
mot = 0.03
} else if(m == 4) {
mot = 0.05
} else if(m == 5) {
mot = 0.07
} else if(m == 6) {
mot = 0.09
} else if(m == 7) {
mot = 0.11
} else if(m == 8) {
mot = 0.13
} else {
mot = 0.15
}
mot
}
dexut <- function(d) {
if (d == 0) {
mot = 0.00
} else if(d == 1) {
mot = 4
} else if(d == 2) {
mot = 8
} else if(d == 3) {
mot = 12
} else if(d == 4) {
mot = 17
} else if(d == 5) {
mot = 22
} else if(d == 6) {
mot = 28
} else if(d == 7) {
mot = 34
} else if(d == 8) {
mot = 41
} else {
mot = 48
}
mot
}
costut <- function(c) {
if (c == 0) {
mot = 0
} else if(c == 1) {
mot = 1
} else if(c == 2) {
mot = 2
} else if(c == 3) {
mot = 4
} else if(c == 4) {
mot = 7
} else if(c == 5) {
mot = 10
} else if(c == 6) {
mot = 15
} else if(c == 7) {
mot = 20
} else if(c == 8) {
mot = 27
} else {
mot = 34
}
mot
}
output$userpanel <- renderUI({
# session$user is non-NULL only in authenticated sessions
if (!is.null(session$user)) {
sidebarUserPanel(
span("Logged in as ", session$user),
subtitle = a(icon("sign-out"), "Logout", href="__logout__"))
}
})
# output$testtext <- renderText(names(plot_prob()))
observeEvent(input$show, {
showModal(modalDialog(
title = NULL,
"bimbo@bimbo.rocks",
easyClose = TRUE,
footer = NULL
))
})
}
shinyApp(ui, server)
|
ffd07f45ce96af7c27b9b0bc8764c7ff4b25caa4
|
a06dc5d2c6a50c626d90978e0939b01be96163d9
|
/scripts/buildSampleTemplate.R
|
153007bcf4915bc1740c9c6d82ecbc3d41c573b6
|
[] |
no_license
|
a-lud/sample-management
|
f398a0d506f7dee77f9a9bd61db030e664db50bb
|
90ef51fe574695b949a8cd6e7dcb805412ebe977
|
refs/heads/main
| 2023-08-31T04:06:21.455239
| 2021-09-21T09:01:38
| 2021-09-21T09:01:38
| 408,292,160
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 362
|
r
|
buildSampleTemplate.R
|
# Generate template for sample upload ---
# - Get the column names from the col_types object
# - When the user uploads some new data, check the types against the col_types
buildSampleTemplate <- function(file, rds_coltypes) {
col_spec <- readr::read_rds(rds_coltypes)
write(
x = paste(names(col_spec$cols), collapse = ','),
file = file
)
}
|
6d367acb2a6234455d4073bd9477f3bdcc9c9dd7
|
388e05b9ad100c9310b1dc4a207a33d8797862da
|
/LinearRegression_Rcode.R
|
9680560ba38b0a7482e5753dca5613b850da9922
|
[] |
no_license
|
nithingautham/Linear-regression-1
|
c0bd33b42028389ca6c82f97d0526730193b27b8
|
ca5eaeb3973e3dcb4fd58d26371f13b59006dac6
|
refs/heads/master
| 2021-05-04T07:24:08.003413
| 2016-10-11T19:06:38
| 2016-10-11T19:06:38
| 70,626,299
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,790
|
r
|
LinearRegression_Rcode.R
|
setwd("E:\\Nitin")
## Rcode
library(moments)
library(ggplot2)
library(GGally)
library(MASS)
library(car)
carmileage <- read.csv("carMPG.csv" )
# Checking data redundancy using de duplication
sum( duplicated( carmileage[,] ))
# Data prepration
# Converting all car names to lower case
carmileage$Car_Name <- tolower(carmileage$Car_Name)
## Converting all continuous variables into numeric type for further EDA analysis
carmileage[ , c('MPG','Displacement','Horsepower','Weight','Acceleration','Cylinders')] <-
sapply(carmileage[ , c('MPG','Displacement','Horsepower','Weight','Acceleration','Cylinders')], function(x) as.numeric( as.character( x) ))
#Checking for NA values
sum(is.na( carmileage))
## Replacing "?"/NA values by median
carmileage$Horsepower[is.na(carmileage$Horsepower)] <- median( carmileage$Horsepower , na.rm = T )
## Checkpoint 1: Business Understanding and Data Understanding
## EDA & Statistics of Continuous variables in dataset
# MPG -EDA - To understand normality and other features
summary(carmileage$MPG)
boxplot(carmileage$MPG)
skewness(carmileage$MPG)
kurtosis((carmileage$MPG))
ggplot(carmileage , aes(x=carmileage$MPG)) + geom_histogram()
boxplot.stats(carmileage$MPG)$out
# Displacement -EDA - To understand normality and other features
boxplot(MPG~ as.factor(Displacement),data=carmileage, main="Car Milage Data",
xlab="model year", ylab="Miles Per Gallon")
summary(carmileage$Displacement)
skewness(carmileage$Displacement)
skewness( (carmileage$Displacement))
kurtosis((carmileage$Displacement))
boxplot(carmileage$Displacement)
ggplot(carmileage , aes(x=(carmileage$Displacement))) + geom_histogram()
boxplot.stats(carmileage$Displacement)$out
# Horsepower -EDA - To understand normality and other features
boxplot(MPG~ as.factor(Horsepower),data=carmileage, main="Car Milage Data",
xlab="model year", ylab="Miles Per Gallon")
summary(carmileage$Horsepower)
skewness(carmileage$Horsepower)
kurtosis((carmileage$Horsepower))
boxplot(carmileage$Horsepower)
ggplot(carmileage , aes(x=carmileage$Horsepower)) + geom_histogram()
## Outlier treatment - using capping and flooring method
boxplot.stats(carmileage$Horsepower)$out
carmileage$Horsepower[ carmileage$Horsepower > quantile(carmileage$Horsepower, probs = 0.95)]<-quantile(carmileage$Horsepower, probs = 0.95)
carmileage$Horsepower[ carmileage$Horsepower < quantile(carmileage$Horsepower, probs = 0.05)]<-quantile(carmileage$Horsepower, probs = 0.05)
# Weight - EDA - To understand normality and other features
boxplot(MPG~ as.factor(Weight),data=carmileage, main="Car Milage vs weight data",
xlab="weight", ylab="Miles Per Gallon")
summary(carmileage$Weight)
skewness(carmileage$Weight)
kurtosis((carmileage$Weight))
boxplot(carmileage$Weight)
ggplot(carmileage , aes(x=carmileage$Weight)) + geom_histogram()
boxplot.stats(carmileage$Weight)$out
## Accelaration -EDA - To understand normality and other features
boxplot(MPG~ as.factor(Acceleration),data=carmileage, main="Car Milage Data",
xlab="model year", ylab="Miles Per Gallon")
summary(carmileage$Acceleration)
skewness(carmileage$Acceleration)
kurtosis((carmileage$Acceleration))
boxplot(carmileage$Acceleration)
ggplot(carmileage , aes(x=carmileage_train$Acceleration)) + geom_histogram()
## Outlier treatment - using capping and flooring method
boxplot.stats(carmileage$Acceleration)$out
carmileage$Acceleration[ carmileage$Acceleration > quantile(carmileage$Acceleration, probs = 0.95)]<-quantile(carmileage$Acceleration, probs = 0.95)
carmileage$Acceleration[ carmileage$Acceleration < quantile(carmileage$Acceleration, probs = 0.05)]<-quantile(carmileage$Acceleration, probs = 0.05)
## multi-valued discrete
## Cylinders - To understand normality and other features
carmileage$Cylinders <- as.factor( as.numeric( carmileage$Cylinders ))
summary(carmileage$Cylinders )
ggplot( carmileage , aes( x= carmileage$Cylinders ) ) + geom_bar()
## Observation : cars with Cylinders 4 & 5 have higher MPG than the rest ( 3 ,6 ,8)
boxplot(MPG~Cylinders,data=carmileage, main="Car Milage Data",
xlab="model year", ylab="Miles Per Gallon")
## Model year
carmileage$Model_year <- as.factor(carmileage$Model_year )
## EDA to observe pattern / relation between model year(categorical) and MPG(Quantitative)
## We can clearly observe that older vehicles(2003-2006) have higher mileage than vehicles after 2006
boxplot(MPG~Model_year,data=carmileage, main="Car Milage Data",
xlab="model year", ylab="Miles Per Gallon")
summary(carmileage$Model_year )
ggplot( carmileage , aes( x= carmileage$Model_year ) ) + geom_bar()
## Therefore ,i will be binning/classifying model year data into two groups ( 2003-2005) and (2006-2015)
## Origin
carmileage$Origin <- as.factor(carmileage$Origin )
summary(carmileage$Origin )
ggplot( carmileage , aes( x= carmileage$Origin ) ) + geom_bar()
## EDA to observe pattern / relation between origin(categorical) and MPG(Quantitative)
## We can clearly observe that origin 1 ,2 , 3 have different range and centers
boxplot(MPG~Origin,data=carmileage, main="Car Milage Data",
xlab="origin", ylab="Miles Per Gallon")
## Car name
unique( carmileage$Car_Name)
## As part of data understanding , Association Between Variables is analysed
ggpairs(data=carmileage[ ,c( 'Displacement','Horsepower','Weight','Acceleration' )],
columns=1:4)
##### Checkpoint - 2 Data Cleaning and Preparation
## Feature extraction - Extracting Car company name from model names
carmileage$company_name <- lapply(carmileage$Car_Name , function(x) tolower( strsplit( as.character(x) ," ")[[1]][1]) )
unique( carmileage$company_name)
## Data cleaning : replace all "chevrolet" with chevy and "toyouta" with "toyota"
carmileage$company_name[ carmileage$company_name == "chevrolet" ] <- "chevy"
carmileage$company_name[ carmileage$company_name == "chevroelt" ] <- "chevy"
carmileage$company_name[ carmileage$company_name == "toyouta" ] <- "toyota"
carmileage$company_name[ carmileage$company_name == "mercedes" ] <- "mercedes-benz"
carmileage$company_name[ carmileage$company_name == "vokswagen" ] <- "volkswagen"
carmileage$company_name[ carmileage$company_name == "vw" ] <- "volkswagen"
carmileage$company_name[ carmileage$company_name == "maxda" ] <- "mazda"
carmileage$company_name <- as.factor( as.character( carmileage$company_name ))
## Clasifying company_name into three categories based on their median MPG observed in boxplot below
boxplot(MPG~company_name,data=carmileage, main="Car Milage Data",
xlab="model year", ylab="Miles Per Gallon")
company_names_vs_mpg <- as.data.frame( aggregate(MPG ~ company_name , data = carmileage, FUN = median) )
company_names_vs_mpg$MPG <- as.numeric( company_names_vs_mpg$MPG )
## after analysis using boxplot and () company name vs MPG median) reducing the levels from of
## car company name into three buckets related to MPG
carmileage$company_name_three_group<- sapply(carmileage$company_name, function(x) if(company_names_vs_mpg$MPG[company_names_vs_mpg$company_name == x]
< 18 ) "low_mileage_brand_name"
else if(company_names_vs_mpg$MPG[company_names_vs_mpg$company_name == x]
> 28 ) "high_mileage_brand_name"
else "medium_mileage_brand_name")
carmileage$company_name_three_group <- as.factor(carmileage$company_name_three_group)
company_name_three_group_dummy <- as.data.frame( model.matrix(~ company_name_three_group -1 , data = carmileage))
## removing one variable from dummy variable to meet N-1 critiria ,so chose to remove "medium_mileage_brand_name"
carmileage <- cbind( carmileage , company_name_three_group_dummy[,-3] )
## Binning /Classify model years into two groups . high mileage model years and low mileage model years
## due to the timely trend in decrease in mileage observed across time
## viewed in boxplot
boxplot(MPG~Model_year,data=carmileage, main="Car Milage Data",
xlab="model year", ylab="Miles Per Gallon")
carmileage$Model_year <- as.numeric( as.character( carmileage$Model_year))
carmileage$Model_year_Two_GROUP <- sapply( carmileage$Model_year , function(x) if( x >=2003 & x<=2005) 1
else if( x >=2006 & x<=2015) 0 )
## converting categorical variables( origin ,Model_year ) into numerical using model.matrix
carmileage$Origin <- as.factor( ( carmileage$Origin) )
origin <- as.data.frame( model.matrix( ~ Origin -1 , data=carmileage ))
unique( carmileage$Origin )
carmileage <- cbind( carmileage , origin[,-2] )
## Binning cylinder values into two grous
## observation made from boxplot of cylinder and mpg
## Cylinders 4 and 5 have high MPG values wheres the rest have low MPG values
## so Cylinders 4 and 5 will be replace by "1" and the rest by "0" ( shortcut of dummy variables)
carmileage$Cylinders <- sapply( carmileage$Cylinders , function(x) if( x ==4 | x==5 ) 1 else 0 )
## Remove categorical variables like origin , model year , car name , company name
carmileage$Model_year <- NULL
carmileage$Origin <- NULL
carmileage$Car_Name <- NULL
carmileage$company_name <- NULL
carmileage$company_name_three_group <- NULL
## converting all variables into numerical before model building
carmileage$MPG <- as.numeric( carmileage$MPG)
carmileage$Cylinders <- as.numeric( carmileage$Cylinders)
carmileage$Displacement <- as.numeric( carmileage$Displacement)
carmileage$Horsepower <- as.numeric( carmileage$Horsepower)
carmileage$Weight <- as.numeric( carmileage$Weight)
carmileage$Acceleration <- as.numeric( carmileage$Acceleration)
carmileage$company_name_three_grouplow_mileage_brand_name <- as.numeric( carmileage$company_name_three_grouplow_mileage_brand_name)
carmileage$company_name_three_grouphigh_mileage_brand_name <- as.numeric( carmileage$company_name_three_grouphigh_mileage_brand_name)
carmileage$Origin1 <- as.numeric( carmileage$Origin1)
carmileage$Origin3 <- as.numeric( carmileage$Origin3)
carmileage$Model_year_Two_GROUP <- as.numeric( carmileage$Model_year_Two_GROUP)
## Splitting data into Training and test set according to business rule of 70:30 respectively
set.seed(100)
train_data_Set_indexes <- sample(1:nrow(carmileage), size=0.7*nrow(carmileage))
carmileage_train <- carmileage[train_data_Set_indexes,]
carmileage_test <- carmileage[ -train_data_Set_indexes,]
## Model Bulding Starts
model_1 <- lm( MPG ~ . ,data = carmileage_train[,] )
summary(model_1)
## Using stepAIC to apply stepwise variable reduction method
step <- stepAIC(model_1 , direction = "both" )
## to get LM code/formula
step$call
## model_2 after stepwise selection method using stepAIC
model_2 <- lm(formula = MPG ~ Cylinders + Displacement + Horsepower + Weight +
Acceleration + company_name_three_grouphigh_mileage_brand_name +
Model_year_Two_GROUP, data = carmileage_train[, ])
# Summary of model 2
summary(model_2)
# Checking VIF value
vif( model_2 )
## model 3 , in previous model "Displacement" had high VIF above 2 and very low significance
## therefore , Displacement is removed from model 3
model_3 <- lm(formula = MPG ~ Cylinders + Horsepower + Weight +
Acceleration + company_name_three_grouphigh_mileage_brand_name +
Model_year_Two_GROUP, data = carmileage_train[, ])
# Summary of model 3
summary(model_3)
# Checking VIF value
vif( model_3 )
cor( carmileage_train$Horsepower , carmileage_train$Weight)
## model 4 , in previous model "Acceleration" had high VIF above 2 and very low significance
## therefore , Acceleration is removed from model 4
model_4 <- lm(formula = MPG ~ Cylinders + Horsepower + Weight +
company_name_three_grouphigh_mileage_brand_name +
Model_year_Two_GROUP, data = carmileage_train[, ])
# Summary of model 4
summary(model_4)
# Checking VIF value
vif( model_4 )
## model 5 , in previous model "Horsepower" had high VIF above 2 and low significance
## and High correlation between Horsepower and Weight
## therefore , Horsepower is removed from model 5
model_5 <- lm(formula = MPG ~ Cylinders + Weight +
company_name_three_grouphigh_mileage_brand_name +
Model_year_Two_GROUP, data = carmileage_train[, ])
# Summary of model 3
summary(model_5)
# Checking VIF value
vif( model_5 )
## model 6 , in previous model "Cylinder" had high VIF above 2 and comparatively low significance
## than weight
## therefore , Cylinder is removed from model 6
model_6 <- lm(formula = MPG ~ Weight +
company_name_three_grouphigh_mileage_brand_name +
Model_year_Two_GROUP, data = carmileage_train[, ])
# Summary of model 3
summary(model_6)
# Checking VIF value
vif( model_6 )
# Predicting test data MPG using final model(model 2)
Predict_1 <- predict(model_6 , carmileage_test[,-c(1)])
carmileage_test$predicted_mpg <- Predict_1
# Correlation between predicter test MPG and original test MPG
cor( carmileage_test$MPG , carmileage_test$predicted_mpg )
# R squared value meets business requirement of 80 above
cor( carmileage_test$MPG , carmileage_test$predicted_mpg )^2
plot(model_6)
|
8914e1a4467bcb746212b27344378cfbcd8898d0
|
e82c2b2cee78f6a599e432b41921ebd72e7dfd03
|
/inputs.R
|
c1c34bc96e6201336a558c4768819c37df3abb6a
|
[] |
no_license
|
BU-IE-360/spring20-berkayakbas
|
1449cf9b24fb0f71090df1fd2262738af70634af
|
8415ed61223754147dc1eb3175d1041177eadbd4
|
refs/heads/master
| 2021-01-06T14:04:45.200979
| 2020-07-05T17:13:41
| 2020-07-05T17:13:41
| 241,353,284
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 152
|
r
|
inputs.R
|
forecasted_product_number = 2
filter_last_n_days = as.numeric(as.Date(Sys.time()) - as.Date('2020-03-13'))
test_period = 2
parallel_processing_on = TRUE
|
c97280a8e05c77f330d22224fa1b447d0abafaf3
|
f84ad3a13ef2c0e60e0a0cca0c9b9795efd3c80a
|
/man/Plot.vars.Rd
|
0ca634641327cb86ea9b55bf9ffc69e84649a66c
|
[] |
no_license
|
baccione-eawag/EawagSchoolTools
|
4906194eeb8e83dacb1f44ed66ed379427553d39
|
af79a45a72ca4c1052ac9f2977cffc764fba95a8
|
refs/heads/master
| 2023-08-04T20:53:57.532002
| 2021-09-27T15:24:28
| 2021-09-27T15:24:28
| 409,323,083
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,404
|
rd
|
Plot.vars.Rd
|
\name{Plot.vars}
\alias{Plot.vars}
\title{Plotting functions (eawagSummerSchoolTools)}
\description{
Function to plot several sets of results.
}
\usage{
Plot.vars(vars,L,ncol=NA,mar=NA,ylim=list(),markers=F,headers=list(),
xlab="",ylab="",pos="topright")
}
\arguments{
\item{vars}{matrix or data frame with variables, and variable information encoded in row names}
\item{L}{observation layout}
\item{ncol}{optional number of columns of sub-panels of the plot}
\item{mar}{optional specification of margins in the form c(bottom,left,top,right)}
\item{ylim}{optional named (by variable name) list of limits of the y axes}
\item{markers}{if TRUE plot markers instead of lines}
\item{headers}{optional named (by variable name) list of headers of the plots}
\item{xlab}{optional label of the x axis}
\item{ylab}{optional label of the y axis}
\item{pos}{position of legend (only if more than one variable)}
}
\details{
Results should be stored in a data frame or matrix, with the corresponding variable information encoded in the row names (for example C_M_1 for variable C_M at time 1).
}
\value{
Plot of all variables as a function of the independent variable.
}
\seealso{
\code{\link{Plot.res}}, \code{\link{contourpdf}}, \code{\link{Plot.chains}}, \code{\link{Plot.margs}}
}
\author{
Peter Reichert <peter.reichert@eawag.ch>
}
|
af07943b37fb65842c641d062a431535aedc9c69
|
7cdc5ce9fe9a7cff542d495d21955891b7042b3f
|
/summary.mars.R
|
b0f4cf015c9f678aaeae945454db38cfeaa1b1e6
|
[] |
no_license
|
utoor1705/Multivariate-Adaptive-Regression-Splines
|
7ba7a8d7d945de2f0ffd02df2943177416d99370
|
b31dbab0620ef76be0bdab7dcdeddc7f6128dfc1
|
refs/heads/main
| 2023-05-12T13:33:21.002885
| 2021-05-14T17:48:16
| 2021-05-14T17:48:16
| 361,565,149
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 500
|
r
|
summary.mars.R
|
summary.mars <- function(object) {
xn <- object$x_names
bn <- names(object$coefficients)
ss <- object$splits
cat("Basis functions:\nB0:\n Intercept\n")
for(i in 2:length(ss)) {
cat(paste0(bn[i],":\n"))
for(j in 2:nrow(ss[[i]])) {
cat(paste0(" Component ",j-1,": variable ",xn[ss[[i]][j,"v"]],";"))
cat(paste0(" sign ",ss[[i]][j,"s"],";"))
cat(paste0(" split at value ",ss[[i]][j,"t"],"\n"))
}
}
cat("\n\nCoefficients:\n")
print(object$coefficients)
}
|
89989499c3cce31c98fd308a3c84241a4e9bd557
|
d6284537be05b1835e6e81d50872f93d9c7e68dc
|
/CO2_extract_graph.R
|
83e769c00a3a9fca637b7f48663fed1da20c0d80
|
[] |
no_license
|
ArminUCSD/CO2detect-Rcode
|
7662a8acd5c601eb01fbad20a95e11cd2039e611
|
5057909306528903d48570237bbb1abaad35d71d
|
refs/heads/master
| 2020-12-09T01:15:44.423857
| 2020-02-12T06:11:52
| 2020-02-12T06:11:52
| 233,148,321
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,270
|
r
|
CO2_extract_graph.R
|
# Extract graph from Peters 2017 - Fig 2
library(digitize)
file.name = 'Peters2017_Fig2_edited.png'
#---------------------------------------------------------------------
# Past time series
cal = ReadAndCal(file.name)
series1 = DigitData(col = 'red')
series2 = DigitData(col = 'blue')
series1.cal = Calibrate(series1, cal, 1959, 2017, 0, 30)
series2.cal = Calibrate(series2, cal, 1959, 2017, 0, 30)
# Fix time axis
t = 1959:2017
observations = series1.cal$y
reconstructed = series2.cal$y
save(t, observations, reconstructed, file = 'Peters2017_Fig2.Rdata')
plot(t, reconstructed, type="l", col='blue')
lines(t, observations, col='red')
imb = observations - reconstructed
mean(imb)
sd(imb)
# Save as text file
write.table(data.frame(t, observations, reconstructed), row.names = F, file = 'Peters2017_Fig2_past.txt')
#---------------------------------------------------------------------
# Future scenarios
cal = ReadAndCal(file.name)
series1 = DigitData(col = 'blue')
series2 = DigitData(col = 'red')
series3 = DigitData(col = 'green')
series1.cal = Calibrate(series1, cal, 2017, 2040, 0, 30)
series2.cal = Calibrate(series2, cal, 2017, 2040, 0, 30)
series3.cal = Calibrate(series3, cal, 2017, 2040, 0, 30)
series1.cal[1] = series2.cal[1] = series3.cal[1] = (series1.cal[1] + series2.cal[1] + series3.cal[1])/3
future = list(t = c(2017, 2020, 2030, 2040), '1'=series1.cal$y, '0'=series2.cal$y,
'-1'=series3.cal$y)
# series1 = digitize(file.name)
save(future, file = 'Peters2017_Fig2_future.Rdata')
# Fit polynomial
load(file = 'Peters2017_Fig2_future.Rdata')
t.in = future$t
y.in = cbind(future$'1', future$'0', future$`-1`)
# Fit functional form
fit = lm(y.in ~ t.in + I(t.in^2))
t = 2017:2040
tt = cbind(rep(1, length(t)), t, t^2)
y = tt %*% fit$coefficients
# Check fit
matplot(t.in, y.in)
matlines(t, y)
tmp = data.frame(t, y); names(tmp) = c('t', '1%', '0%', '-1%')
write.table(tmp, row.names = F, file = 'Peters2017_Fig2_future.txt')
# Extrapolate to longer time frame
t = 2017:2050
tt = cbind(rep(1, length(t)), t, t^2)
y = tt %*% fit$coefficients
matplot(t, y, type='l')
matpoints(t.in, y.in)
tmp = data.frame(t, y); names(tmp) = c('t', '1%', '0%', '-1%')
write.table(tmp, row.names = F, file = 'Peters2017_Fig2_future2050.txt')
|
260c86a05b96f2318b3b348a24bc47c865b5f3bf
|
9b7dea6c59b2ca3cb68d73e4af65783d20314810
|
/cx2scratch2.2.R
|
e232ed737f447b4de792e3a908a2aca10c826c23
|
[] |
no_license
|
chriseshleman/cx2
|
57213ba60c644ed4745e9a0e4c59c01297029986
|
55ae0bda43b8b4eed4104de8d302ad3c3949768e
|
refs/heads/master
| 2020-09-04T12:36:00.233652
| 2019-12-30T02:37:25
| 2019-12-30T02:37:25
| 219,733,347
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,322
|
r
|
cx2scratch2.2.R
|
setwd("~/Dropbox/Work and research/Port Authority/cx2")
library(dplyr)
library(ggplot2)
library(beepr)
library(tidyr)
library(tidyverse)
library(tree)
library(randomForest)
library(rpart)
library(rpart.plot)
library(rsample)
library(caret)
rm(list = ls()) # clear global environment
cat("\014") # clear the console
options(warn=-1) # suppress annoying warnings
pmiss = function(x){sum(is.na(x))/length(x)*100}
# This is Part 2, trees (basic and advanced). Part 1 was an NA analysis.
#1. Load data and munge - classify
#2. Tree JDP (and interpret)
#3. Cross-validate and prune (and interpret)
#4. Extension (bagging, random forest, boosting) (and interpret)
# Complete 1-4 above with discussions.
# Catalog visualizations and save to folder as pdf or png.
# Pick 2 key visualizations and toss the rest, and the summary outputs, into an appendix.
# Visualization from page 311 and a variable importance plot from website (listed below)
###1. Load data and munge. Predictor is almost continuous, so don't classify.
jdp = read.csv("./JDPower18_noNA.csv")
jdp.index = jdp$X
jdp$X = NULL
sat = ggplot(jdp, aes(Overall.Satisfaction.Index, fill="gray")) +
geom_density(alpha = 0.5) +
theme(legend.position = "none") +
labs(x = "Overall Satisfaction", y = "")
ggsave(sat, filename = "./Satisfaction.png", width = 5, height = 4, dpi = 300, units = "in", device='png')
rm(sat)
classIntervals(jdp$Overall.Satisfaction.Index, n = 2, style = "kmeans")
classIntervals(jdp$Overall.Satisfaction.Index, n = 2, style = "fisher") # jenks substitute for larger data sets
# both kmeans and fisher (jenks substitute) give us around 730 as a natural breaking point.
# but I won't classify - not sure what the analysis would gain aside from interpretability.
# jdp$high = ifelse(jdp$Overall.Satisfaction.Index > 730, "high", "low") # if was to classify
names(jdp)
# Subtract airport, state, RS1_96.Verbatim, RS1_97.Verbatim, MRP_SURVEY_DP_STACK_ID, Zip.Postal.code,
# Departure.flight...Travel.Dates, Arrival.flight...Travel.Dates, X, What.food.Beverages.want.to.find,
# Regional.art..culture..or.historical.displays, Robots..tablet.interfacing..or.other.new.technology, weight
nombre = names(jdp) %in% c("airport","state", "RS1_96.Verbatim", "RS1_97.Verbatim", "MRP_SURVEY_DP_STACK_ID", "Zip.Postal.code",
"Departure.flight...Travel.Dates", "Arrival.flight...Travel.Dates", "X", "What.food.Beverages.want.to.find",
"Regional.art..culture..or.historical.displays", "Robots..tablet.interfacing..or.other.new.technology", "weight")
jdp = jdp[!nombre]
# More cleaning ...
table(jdp$Took.transportation.to.the.gate)
table(jdp$Clarity.of.signs.directions.inside.the.terminal)
# Convert some ordinal variables to binary for interpretative ease.
jdp$clarity.signs = ifelse(jdp$Clarity.of.signs.directions.inside.the.terminal < 7,"unclear","clear")
jdp$Clarity.of.signs.directions.inside.the.terminal = NULL
jdp$no.merch.srvcs = jdp$Didn.t.purchase.any.merchandise.services
jdp$Didn.t.purchase.any.merchandise.services = NULL
jdp$Food.beverages.purchased=NULL
jdp$Books.Magazines=NULL
jdp$Clothing=NULL
jdp$Sunglasses=NULL
jdp$Electronics=NULL
jdp$Toiletries=NULL
jdp$Other.merchandise=NULL
jdp$Other.services=NULL
# drop the 99s - this drops the population from 40k to 16k. (Try using a loop later)
jdp = subset(jdp,jdp$Took.transportation.to.the.gate!=99 & #jdp$Clarity.of.signs.directions.inside.the.terminal!=99 &
jdp$Cleanliness.of.terminal.concourses.and.hallways!=99 &
jdp$Comfort.in.airport..e.g...seating..roominess..etc..!=99 &
jdp$Availability.of.activity.entertainment.options.in.the.airport!=99 &
jdp$Variety.of.food..beverage..and.retail.options!=99 &
jdp$Cleanliness.of.terminal.restrooms!=99 &
jdp$The.signage.directions.were.clear.easy.to.understand!=99 &
jdp$There.were.enough.signs.directions.throughout.the.terminal!=99 &
jdp$Recent.completed.renovations.or.new.building.s.!=99 &
jdp$I.was.able.to.clearly.hear.and.understand.the.announcements.within.the.gate!=99 &
jdp$The.gate.area.was.clean!=99 &
jdp$There.were.enough.seats.at.the.gate!=99 &
jdp$The.gate.area.was.comfortable!=99 &
jdp$There.were.enough.electrical.outlets.for.charging.phones.laptops!=99 &
jdp$The.gate.area.was.worn.out.or.outdated!=99)
jdp$Terminal.Facility.Index=NULL
cor(jdp$Overall.Satisfaction.Index, jdp$How.would.you.rate.your.overall.experience.at.Airport.Evaluated.)
samp = sample_n(jdp, 1000, replace = FALSE)
plot(samp$Overall.Satisfaction.Index, samp$How.would.you.rate.your.overall.experience.at.Airport.Evaluated.)
par(mfrow=c(1,2))
plot(jdp$How.would.you.rate.your.overall.experience.at.Airport.Evaluated., jdp$Overall.Satisfaction.Index)
plot(jdp$Overall.terminal.facilities.experience, jdp$Overall.Satisfaction.Index)
cor(jdp$How.would.you.rate.your.overall.experience.at.Airport.Evaluated., jdp$Overall.Satisfaction.Index)
cor(jdp$Overall.terminal.facilities.experience, jdp$Overall.Satisfaction.Index)
par(mfrow=c(1,1))
jdp$Overall.terminal.facilities.experience=NULL
jdp$How.would.you.rate.your.overall.experience.at.Airport.Evaluated.=NULL
jdp$OSAT.Zones.of.Satisfaction=NULL #I just don't know what this is
# Brief exploration
ggplot(jdp, aes(Overall.Satisfaction.Index,Comfort.in.airport..e.g...seating..roominess..etc..)) +
geom_point(size = 1) + #, alpha = .05
geom_smooth() #method="loess" #method="lm"
jdp$bath = ifelse(jdp$Cleanliness.of.terminal.restrooms>7,"clean_bathroom","nasty")
ggplot(jdp, aes(Overall.Satisfaction.Index,Comfort.in.airport..e.g...seating..roominess..etc..,color=jdp$Cleanliness.of.terminal.restrooms)) + #bath
geom_point(size = 1) + #, alpha = .05
geom_smooth() #method="loess" #method="lm"
### 2. Trees
# If assuming a non-linear relationship between satisfaction and predictors.
#http://www.di.fc.ul.pt/~jpn/r/tree/tree.html#classification-trees
#2a. Grow a tree
#2b. Prune to get a sequence of subtrees. Use cross-validation to chose an alpha ...
# ... and pick the subtree corresponding to that alpha.
#2c.
# ISL ch. 8 and https://uc-r.github.io/regression_trees
# Training and test data
set.seed(12345)
jdp$high=NULL
jdp_split = initial_split(jdp, prop = .7)
jdp_training = training(jdp_split)
jdp_test = testing(jdp_split)
# Tree
tree.train = tree(Overall.Satisfaction.Index~., jdp_training)
summary(tree.train)
pdf("./Tree.pdf", width=6, height=4.5)
plot(tree.train)
text(tree.train, pretty=0, cex=0.6)
dev.off()
### 3. Cross-validation
cv.train = cv.tree(tree.train)
options(scipen=999)
pdf("./Tree_cross_validated.pdf", width=6, height=4.5)
plot(cv.train$size, cv.train$dev, type="b")
dev.off()
# Three variables get used as predictors in the training tree, some of them more than once.
# The most important variable that has the largest reduction in SSE is general comfort.
# The top split assigns travelers reporting "comfort" of 7 or less to the left; they account for 38 percent of travelers.
# Their predicted index score is 640.
# Those reporting 8 or above "comfort" average around 882.
# Those reporting high comfort can be further split into those with a perfect 10 comfort level (961) and
# those with 8-9, for which restroom cleanliness was a significant predictor.
# Of the 38% with weak "comfort" scores, roughly half were adequately pleased with the concourses and hallways and averaged 719 points, just below the average overall.
# The other half might have been comfortable with seating availability but were frequently unsatisfied with activity and entertainment options in the airport as a whole.
# A fraction (3%) were unsatisfied with comfort level, cleanliness and seating, averaging 413.
# In many cases you prune the tree
prune.train = prune.tree(tree.train, best=5)
plot(prune.train)
text(prune.train,pretty=0, cex=0.75)
# But cross-validation did the trick, so use the unpruned tree
predict.jdp = predict(tree.train,newdata=jdp_test)
plot(predict.jdp,jdp_test$Overall.Satisfaction.Index)
abline(0,1)
### 4a. Bagging (check the mtry and ntree functions)
jdp_training$clarity.signs = NULL
jdp_training$bath = NULL
bag.train = randomForest(Overall.Satisfaction.Index~., data=jdp_training, mtry=8) #, ntree=15
predict.bag = predict(bag.train,newdata=jdp_test)
# Performance check
mean((predict.jdp-jdp_test$Overall.Satisfaction.Index)^2)
mean((predict.bag-jdp_test$Overall.Satisfaction.Index)^2)
### 4b. Random forest
# (Question - is it dumb to try and predict like this using this many ordinal variables as predictors?)
rf.jdp = randomForest(Overall.Satisfaction.Index~., data=jdp_training, mtry=5, importance=T)
predict.rf = predict(rf.jdp,newdata=jdp_test)
mean((predict.rf-jdp_test$Overall.Satisfaction.Index)^2)
### 4c. Variable importance, if there's time.
importance(rf.jdp)
varImpPlot(rf.jdp)
varImpPlot(rf.jdp, type=2)
# Consider something pretty like this https://topepo.github.io/caret/variable-importance.html
# Question: what is a tree an obvious alternative to?
# How might I expect that alternative to perform?
|
b4e090155a9728994e724d2e0fcf505dbbfdd914
|
9549d0226a9ef21c86d450519c114db595889f9c
|
/R/get_data.R
|
e0e1455261310271ac4fc6afe6df570f3949745b
|
[
"Apache-2.0"
] |
permissive
|
jimtyhurst/stackR
|
08f671f7908f39416fd7b4f1f942514e34d6aa36
|
e1ef0cd047f1b556711b97679f9fbaf36e874b9c
|
refs/heads/master
| 2020-05-05T10:16:34.158581
| 2019-04-28T21:54:58
| 2019-04-28T21:54:58
| 179,937,734
| 0
| 0
|
NOASSERTION
| 2019-04-28T21:54:59
| 2019-04-07T08:31:52
|
R
|
UTF-8
|
R
| false
| false
| 474
|
r
|
get_data.R
|
# Functions to load Stack Exchange data.
library(readr)
library(tibble)
#' @export
get_stack_exchange_data <- function() {
as_tibble(list(x = "not", y = "implemented", z = "yet"))
}
download_data <- function() {
# temp <- tempfile()
# download.file("https://archive.org/download/stackexchange/stackoverflow.com-Tags.7z", temp)
# data <- read.table(unz(temp, "Tags.xml"))
# unlink(temp)
XML::xmlParse(system.file("extdata", "Tags.xml", package = "stackR"))
}
|
a720cd396691986da8b00c50273a3b4c7569b5be
|
12e66d45837f4ab8e505e6b6adce840e70063356
|
/titanic_test_v2.R
|
30699c26d234adbf2b1631fe0cef508949e4c20f
|
[] |
no_license
|
lukezheng2/Titanic
|
81a6aaeab973a995f21eea1b9194916c81c87c2a
|
a062e6f6128e91ff1ac2ee63c5795c0fd52ec028
|
refs/heads/master
| 2020-05-02T17:26:52.044762
| 2019-03-28T00:59:17
| 2019-03-28T00:59:17
| 178,098,595
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,884
|
r
|
titanic_test_v2.R
|
library(tidyverse)
library(magrittr)
library(data.table)
library(MLmetrics)
library(stringr)
library(glmnet)
# tratando a base -------------------------------------------------------------------
train <- fread("../train.csv")
test <- fread("../test.csv")
test <- test %>% mutate(Survived = 0, Embarked = factor(Embarked, levels = c("", "C", "Q", "S")))
#Verificando quantidades de Na em cada variável
#train %>% sapply(function(x) sum(is.na(x)))
new_base <- function(base){
base %>%
mutate(#removendo os Na's
FLAG_Age = ifelse(is.na(Age),"0","1"),
Age = ifelse(is.na(Age),0,Age),
#tratando os nomes
Name_level = ifelse(str_detect(Name, "Mr."), "Mr", "Others"),
Name_level = ifelse(str_detect(Name, "Mrs."), "Mrs", Name_level),
Name_level = ifelse(str_detect(Name, "Miss."), "Miss", Name_level),
#FACTOR PCLASS
Pclass = as.factor(Pclass))
}
new_train <- new_base(train)
# Modelo de regressão logística -------------------------------------------------------------------
glm <- glm(Survived ~ . , family = binomial, data = new_train %>% select(-PassengerId, -Name,- Ticket, -Cabin, -Fare))
summary(glm)
step <- step(glm, direction = "backward", trace = F)
summary(step)
pre <- predict(glm, newdata = new_train, type = "response") %>% as.data.frame %>%
mutate(Survived = ifelse(.>.4,"S","N"))
desempenho <- function(pre, train=fread("../train.csv")){
KS = KS_Stat(pre$Survived, train$Survived)
A = table(pre$Survived, train$Survived)
ACC = table(pre$Survived, train$Survived) %>% as.data.frame() %>% {.[c(1,4),"Freq"] %>% sum}/nrow(train)
return(list(KS = KS, ACC = ACC, TABLE = A))
}
#desempenho(pre)
resp <- new_base(test) %>%
bind_cols(Prob = predict(glm, newdata = new_base(test), type = "response")) %>%
mutate(Survived = ifelse(Prob>.4,1,0)) %>%
select(PassengerId, Survived)
resp %>% write.csv(file = "../resp_logistic_regression.csv", row.names = F)
#ACC = 0.76076
# Lasso & Ridge-------------------------------------------------------------------
elasticnet_model <- function(base, alpha = 1){ #Falta arrumar
base <- base %>% select(-PassengerId, -Name,- Ticket, -Cabin, -Fare)
#treino e teste
set.seed(123456)
n <- sample(1:nrow(base), size = trunc(0.9*nrow(base)), replace = F)
matriz = model.matrix(Survived ~. , base)[,-1] %>% apply(2, scale)
matriz_treino = matriz[n,]
matriz_teste = matriz[-n,]
fit = cv.glmnet(x = matriz_treino, y = base[n,]$Survived, alpha = alpha, family = "binomial")
acc=array()
for(i in 1:length(fit$lambda)){
#cat(i, "\n")
acc[[i]] = base[-n,] %>%
mutate(Prob = fit %>% predict(s=fit$lambda[i], newx=matriz_teste, type= "response") %>% c()) %>%
mutate(Survived = ifelse(Prob>.59,1,0)) %>%
select(Survived) %>%
desempenho(train = base[-n,]) %>% {.$KS}
}
lambda_acc = fit$lambda[which.max(acc)]
warning("Alpha = ",alpha, "\n Alpha = 1 -> Lasso \n Alpha = 0 -> Ridge")
return(list(fit = fit, lambda_acc = lambda_acc, acc=acc))
}
lasso_model = elasticnet_model(new_train)
ridge_model = elasticnet_model(new_train, alpha = 0)
coef(lasso_model$fit, s=lasso_model$lambda_acc)
coef(ridge_model$fit, s=ridge_model$lambda_acc)
predict_elasticnet <- function(base,fit){
base2 <- base %>% select(-PassengerId, -Name,- Ticket, -Cabin, -Fare)
#base %>% sapply(function(x) unique(x)) %>% View
matriz = model.matrix(Survived ~. , base2)[,-1] %>% apply(2, scale)
P = base %>%
bind_cols(Prob = fit$fit %>% predict(newx = matriz, s=fit$lambda_acc, type = "response") %>% c()) %>%
mutate(Survived = ifelse(Prob>.59,1,0)) %>%
select(PassengerId, Survived)
return(P)
}
predict_elasticnet(new_train, lasso_model) %>% desempenho()
predict_elasticnet(new_train, ridge_model) %>% desempenho()
predict_elasticnet(new_base(test), lasso_model) %>%
write.csv(file = "../resp_logistic_regression_lasso3.csv", row.names = F)
#ACC -.5 = 0.78468
#ACC -.59 = 0.79425
predict_elasticnet(new_base(test), ridge_model) %>%
write.csv(file = "../resp_logistic_regression_ridge.csv", row.names = F)
#ACC -.59 = 0.79425
# Decison Tree and Random Forest ------------------------------------------
make_tree <- function(base){
base <- base %>%
dplyr::select(-PassengerId, -Name, -Ticket, -Cabin, -Fare) %>%
mutate_if(is.character, funs(as.factor)) %>%
mutate(Survived = factor(Survived))
#set.seed(1234)
#n <- sample(1:nrow(base), size = trunc(0.9*nrow(base)), replace = F)
tree <- party::ctree(Survived ~. , data = base)
plot(tree)
return(tree)
}
tree <- make_tree(new_train)
predict_tree <- function(base, model){
base2 <- base %>%
dplyr::select(-PassengerId, -Name, -Ticket, -Cabin, -Fare) %>%
mutate_if(is.character, funs(as.factor)) %>%
mutate(Survived = factor(Survived))
#predict(model, base2) %>% c()
P = base %>%
dplyr::select(PassengerId) %>%
bind_cols(Survived = predict(model, base2) %>% c()) %>%
mutate(Survived = Survived-1)
return(P)
}
predict_tree(new_train, tree) %>% desempenho()
predict_tree(new_base(test), tree) %>%
write.csv(file = "../resp_tree", row.names = F)
make_forest <- function(base){
base <- base %>%
dplyr::select(-PassengerId, -Name, -Ticket, -Cabin, -Fare) %>%
mutate_if(is.character, funs(as.factor)) %>%
mutate(Survived = factor(Survived))
titanic.rf=randomForest::randomForest(Survived ~ . , data = base , subset = c(1:nrow(base)))
plot(titanic.rf)
return(titanic.rf)
}
random_forest <- make_forest(new_train)
predict_tree(new_train, random_forest) %>% desempenho()
predict_tree(new_base(test), random_forest) %>%
write.csv(file = "../resp_rf", row.names = F)
|
544d400067388177650d97c0f04cc710fbf226a1
|
073e4e7c9c2f4822e798f4a56e4ff90b11b5a85c
|
/Code/table_to_maf.R
|
6599fe8438f406b4fcdbda513b7a0015741148fc
|
[] |
no_license
|
peteryzheng/RET_ACCESS
|
2cff82bd261beff926affd24798ac02ef2b8775a
|
ac4e3544d85c90ef723aa3dc433d468515020133
|
refs/heads/master
| 2022-12-13T08:56:32.229201
| 2020-08-06T04:19:45
| 2020-08-06T04:19:45
| 285,464,497
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,469
|
r
|
table_to_maf.R
|
library(data.table)
library(tidyverse)
table_to_maf = function(tmp.table,sample.table){
# tmp.table = fillouts.dt
# sample.table = sample.sheet
# tmp.table = ret.006.table
# sample.table = ret.006.sample.sheet
# extract information for plasma and tumor
tmp.table = data.table(tmp.table)
lapply(sample.table[Sample_Type %in% c('duplex')]$Sample_Barcode,function(y){
sample.call.status.colname = paste0(y,'___duplex.called')
sample.af.colname = paste0(y,'___total')
tmp.table[,eval(y) := paste0(get(sample.call.status.colname),' | ',get(sample.af.colname))]
})
lapply(sample.table[Sample_Type %in% c('Tumor')]$column.names,function(y){
tmp.table[,eval(gsub('___.*.','',y)) := paste0(case_when(
!is.na(get('DMP')) & get(paste0(sample.table[Sample_Type %in% c('duplex')]$Sample_Barcode[1],'___duplex.called')) != 'Not Covered' ~ 'Called',
!is.na(get('DMP')) & get(paste0(sample.table[Sample_Type %in% c('duplex')]$Sample_Barcode[1],'___duplex.called')) == 'Not Covered' ~ 'Called (but not covered in ACCESS)',
is.na(get('DMP')) & as.numeric(gsub('/.*','',get(y))) > 3 ~ 'Genotyped',
TRUE ~ 'Not Called'
),' | ',get(y))]
})
processed.tmp.table = tmp.table[,!grep('___',colnames(tmp.table)),with = F] %>%
# melting data frame by tumor samples
melt(id.vars = c('Hugo_Symbol','Chromosome','Start_Position','End_Position','Variant_Classification','HGVSp_Short',
'Reference_Allele','Tumor_Seq_Allele2','ExAC_AF','Hotspot','DMP','duplex_support_num','call_confidence','CH'),
variable.name = "Tumor_Sample_Barcode", value.name = "call_info") %>%
mutate(call_confidence = gsub(' \\| ','',str_extract(call_info,'.*.\\| ')),call_info = gsub('.*.\\| ','',call_info)) %>% rowwise() %>%
mutate(t_alt_count = ifelse(grepl('-[0-9]+-',call_info),
# SV parsing
sum(as.numeric(str_split(call_info,'-|\\(')[[1]][1:2])),
# SNV parsing
as.numeric(gsub(' |\\/.*.','',call_info))),
t_total_count = ifelse(grepl('-[0-9]+-',call_info),
# SV parsing
as.numeric(str_split(call_info,'-|\\(')[[1]][3]),
# SNV parsing
as.numeric(gsub('.*.\\/|\\(.*.','',call_info)))) %>% data.table()
return(processed.tmp.table)
}
|
f6055a3f565c7a600fd923bf69c44b95228c83a4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Aoptbdtvc/examples/aoptgdtd.Rd.R
|
5c215efeae1a2379c5e4fc37c6950e232f67a953
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 542
|
r
|
aoptgdtd.Rd.R
|
library(Aoptbdtvc)
### Name: aoptgdtd
### Title: A-optimal group divisible treatment designs
### Aliases: aoptgdtd
### Keywords: group divisible treatment design A-optimal
### ** Examples
## construct an A-optimal GDT design with 12 (= 4 x 3) test treatments
##in 12 blocks each of size 6
aoptgdtd(m=4,n=3,b=12,k=6)
## construct an A-optimal GDT design with 8 (= 4 x 2) test treatments
##in 8 blocks each of size 4
aoptgdtd(m=4,n=2,b=8,k=4)
##design does not exist
aoptgdtd(4,2,8,2)
##Design not found
## Not run: aoptgdtd(3,3,15,3)
|
f0082f1f7ca569cd6a03ad872ef8d1c1c99760a0
|
e0d533a9f4d79ee01fee5e638ddd56ef04535dcd
|
/R/runmodelscript.R
|
a170eb13b003befd29d1acbf0a395693f3c8a092
|
[] |
no_license
|
Sandy4321/Tree-Death-Physiological-Models
|
d54f83485974156c5ec1872fe5da45bb2c5fce32
|
5202b5800124a19cdd294dd5d0fb2caaeaa8b2ab
|
refs/heads/master
| 2021-01-11T10:05:45.097326
| 2011-07-29T19:02:14
| 2011-07-29T19:02:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,564
|
r
|
runmodelscript.R
|
#This is a run file file the tree physiology model
require(deSolve) #load relevant packages and function files
require(rootSolve)
require(gdata)
source("R/treephys_ODEs.r")
source("R/weibullcurves.R")
source("R/modelplots.R")
source("R/parameterfile.R") #load a parameter file file
modelout = 0
plot_weibull(parms$k.K,parms$l.K)
modelout = lsoda(states0, times, tree_odes, parms)
par(mfrow=c(2,2))
plot.Psis(modelout,parms)
plot.carbon(modelout,parms)
plot.stress(modelout,parms)
plot(modelout[,"time"],modelout[,"G"],type="l",ylab="Stomatal Conductance",xlab="Time",mgp=c(2,1,0))
%plot(modelout[,"time"],modelout[,"Psi_l"],type="l",ylab="Leaf Water Potential",xlab="Time")
save(times, states0, parms, modelout, #save parameters and output data in ASCII file named "Modelrun_YYYYMMDD_HHMM.R"
file=paste("Outputs/BiggerG_" ,format(Sys.time(), "%Y%m%d_%H%M%S"), ".R", sep=""),
ascii=TRUE)
tablefile=paste("Outputs/Tabledata_" ,format(Sys.time(), "%Y%m%d_%H%M%S"), ".R", sep="")
write.fwf(as.data.frame(parms),file=tablefile,append=TRUE)
write.fwf(as.data.frame(modelout),file=tablefile,append=TRUE)
loaded=as.matrix(read.table(tablefile,skip=2,header=TRUE))
loadedparms=as.list(read.table(tablefile,header=TRUE,nrows=1))
par(mfrow=c(2,3))
plot.stress(modelout1,parms)
text(700,8,expression(G[min] == 0.06),cex=1.3)
plot.stress(modelout15,parms)
text(700,8,expression(G[min] == 0.05),cex=1.3)
plot.stress(modelout2,parms)
text(700,8,expression(G[min] == 0.045),cex=1.3)
plot.stress(modelout3,parms)
text(700,1,expression(G[min] == 0.03),cex=1.3)
plot.stress(modelout4,parms)
text(700,1,expression(G[min] == 0.01),cex=1.3)
plot.stress(modelout5,parms)
text(700,1,expression(G[min] == 0.00),cex=1.3)
mtext(side=1,"Time",outer=TRUE, padj=-2)
mtext(side=2,"Carbohydrates (S) (g)",outer=TRUE, padj=2)
mtext(side=4,"Conductance (K) (mmol s-1 MPa-1)",col="red",outer=TRUE,padj=-2)
plot.stress = function(modelout, parms) {
par(mar=c(4,4,3,4))
plot(modelout[,"time"], modelout[,"S"],type="l",lwd=2, xlab="",ylab="",mgp=c(2,1,0),ylim=c(1,50))
# legend("topright",c("Carbs","Conductance"),lty=c(1,2),cex=0.75,col=c("black","red"))
par(new=TRUE)
plot(modelout[,"time"], modelout[,"K"],type="l",col="red",lty=1,lwd=2,xaxt="n",yaxt="n",xlab="",ylab="",ylim=c(0,max(modelout[,"K"])))
axis(4,col.ticks="red",col.axis="red",col="red")
mtext("",side=4,line=3,col="red")
}
Carbohydrates (S) (g)
Time
Conductance (K) (mmol s-1 MPa-1)
|
1bb1e3e7982409ef54359f2e56fc069e934c9bba
|
f0f91ff5dee7a1d7f1ff10fa47445df3e23cb97a
|
/Read_data_16s.R
|
3fd8fa3d40f203395d4557cbb72d99e9ae5999f8
|
[] |
no_license
|
mariofajardo/Trans_NSW
|
5a471c16e9553ede7274f33ca17b8121d0d7b1fd
|
1ad12cd78ff0ea030b8ea972ffb858327b49d376
|
refs/heads/master
| 2021-01-10T07:14:41.308088
| 2015-12-15T00:23:49
| 2015-12-15T00:23:49
| 48,009,154
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,965
|
r
|
Read_data_16s.R
|
require(phyloseq)
###Import BIOM table data###
OTUS_16S<-import_biom(BIOMfilename = 'Y:/VPino_hpc/ubuntu_qiime/16SvpNS_output/Diversity/7otus_16SvpNS/json_biom.biom',
treefilename ='Y:/VPino_hpc/ubuntu_qiime/16SvpNS_output/Diversity/7otus_16SvpNS/rep_set.tre',
# refseqfilename ='DATA/otus_join_nf_def/new_refseqs.fna',
version=1.9)
colnames(data.frame(OTUS_16S@otu_table@.Data)) ###check what I have
####join sample data from mapfiles####
mapfile_in_use <- read.table('Y:/VPino_hpc/ubuntu_qiime/16SvpNS_output/mapfile_16SvpNS_for_R.txt',sep='\t',stringsAsFactors = F,header=T)
mapfile_in_use <- mapfile_in_use[order(mapfile_in_use[,1]),]#### need to order first
mapfile_in_use$Site <- c(rep(0:26,each = 6),rep('Blank',16),rep('Empty',10),rep('MC',4))
mapfile_in_use$Rep <- c(rep(1:3,length.out = 162),c(1,10,11:16,2:9),c(1,10,2:9),1:4)
mapfile_in_use$top <- 0
mapfile_in_use$bottom <-5
mapfile_in_use$system <- c(rep(c(rep('Nat',3),rep('Crop',3)),27),rep('Blank',16),rep('Empty',10),rep('MC',4))
mapfile_in_use$index <-toupper(with(mapfile_in_use,paste(Site,system,top,bottom,'R1',sep='_'))) #0_NAT_0_5_R1
#####Import Chemical Data ####
Lab_data <-readRDS('RData/NSW_datasetFri_Oct_30_11_38_43_2015.rds')[c('responses','predictions')]
Lab_data <- cbind(Lab_data$responses,Lab_data$predictions)
colnames(Lab_data)
Lab_data <- Lab_data[,-c(1,4:7)]
####JUST FOR NOW####
#copy the data of Site 12 nat 5 to 10 to Site 12 0 to 5 ... because that sample was not analysed#
Lab_data_corrected_tmp <-rbind(Lab_data,Lab_data[Lab_data$index=='12_NAT_5_10_R1',])
Lab_data_corrected_tmp[nrow(Lab_data_corrected_tmp),]$Depth <- '0-5'
Lab_data_corrected_tmp[nrow(Lab_data_corrected_tmp),]$index <- '12_NAT_0_5_R1'
Lab_data_corrected_tmp[nrow(Lab_data_corrected_tmp),]$top <- '0'
Lab_data_corrected_tmp[nrow(Lab_data_corrected_tmp),]$bottom <- '5'
Sample_data <- merge(Lab_data_corrected_tmp,mapfile_in_use,by.x = 'index',by.y = 'index')
# write.csv(Sample_data,'DATA/Sample_data_export.csv')
####Now add that to the OTUS ####
#first order in the same order than the OTU table#
Sample_data<-Sample_data[match(colnames(OTUS_16S@otu_table@.Data),Sample_data$SampleID),]
rownames(Sample_data)<-colnames(OTUS_16S@otu_table@.Data)
Sample_data[is.na(Sample_data)]<-0
####How is the structure of sample data ????####
# data(GlobalPatterns) #example
# str(GlobalPatterns@sam_data)
#this command is from phyloseq package
Sample_data <-sample_data(Sample_data)
OTUS_16S<-merge_phyloseq(OTUS_16S,Sample_data)
colnames(tax_table(OTUS_16S)) <- c(k = "Kingdom", p = "Phylum", c = "Class",
o = "Order", f = "Family", g = "Genus", s = "Species")
rank_names(OTUS_16S)
OTUS_16S <- prune_samples(grepl('R1',as.data.frame(OTUS_16S@sam_data@.Data)[,1]),OTUS_16S)
# date<-gsub(' |:','_',date())
# saveRDS(OTUS_16S,paste0('RData/','OTUS_16S_',date,'.rds'))
|
627718623ba0afc16b14cad6c757f8ed8dc4dcb8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gPCA/examples/gDist.Rd.R
|
4e1d87b4c0996a21db1408c14ff1b1ff73326802
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 157
|
r
|
gDist.Rd.R
|
library(gPCA)
### Name: gDist
### Title: Density/Distribution Plot for gPCA
### Aliases: gDist
### Keywords: ~kwd1 ~kwd2
### ** Examples
# gDist(out)
|
03afc043e334a56d518a69f3661a58fc50f1e3bd
|
405cfc8d0a48719214ea3f216a9ff87c24a2460c
|
/Script cats and dogs table.R
|
2821004e4b010ea8c684e134a05ec73c6272d3e7
|
[] |
no_license
|
Anavoron/Rstudio-table-contest-2020
|
ddc5f72584dc63a71c2db64b28a4d189b8a5c0d0
|
84cdf2fdb9a44ae88852a428a842615ed28ba746
|
refs/heads/main
| 2023-01-09T01:07:31.244081
| 2020-11-12T15:22:33
| 2020-11-12T15:22:33
| 312,292,247
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,639
|
r
|
Script cats and dogs table.R
|
# loading the libraries
library(tidyverse)
library(gt)
library(showtext)
library(extrafont)
library(extrafontdb)
library(webshot)
### loading and organising the data
# uploading cat density
postcode_c <- read_csv("APHA0372-Cat_Density_Postcode_District.csv")
# uploading dog density
postcode_d <- read_csv("APHA0375-Dog_Density_Postcode_District.csv")
## merging the datasets into one density dataset of cats and dogs
postcode_dens <- postcode_c %>%
inner_join(postcode_d, by = "PostcodeDistrict")
postcode_dens <- postcode_dens %>%
rename(postcode_district = PostcodeDistrict,
catpop = EstimatedCatPopulation,
dogpop = EstimatedDogPopulation)
## uploading a dataset with areas, districts and human population
postcode_towns <- read_csv("Postcode districts.csv")
postcode_towns <- postcode_towns %>%
select(Postcode, 'Town/Area', Region, Population, Households) %>%
rename(postcode_district = Postcode,
town = 'Town/Area',
region = Region,
humpop = Population,
households = Households)
# adding it to the previous dataframe
cats_dogs <- left_join(postcode_dens, postcode_towns, by = "postcode_district")
# filtering out postcode districts with no info on either of the living creatures
cats_dogs <- cats_dogs %>%
filter(catpop > 0 & dogpop > 0 & humpop > 0 & !is.na(humpop))
# uploading the remaining data frames
max_cats <- read_csv("APHA0380-Cats_Per_Household_upper95.csv")
max_cats <- max_cats %>%
rename(postcode_district = PostcodeDistrict)
max_dogs <- read_csv("APHA0383-Dogs_Per_Household_upper95.csv")
max_dogs <- max_dogs %>%
rename(postcode_district = PostcodeDistrict)
# joining all this with the main dataset
cats_dogs <- cats_dogs %>%
inner_join(max_cats, by = "postcode_district") %>%
inner_join(max_dogs, by = "postcode_district")
# checking which areas are top and bottom five by pets number
cats_dogs %>%
group_by(region) %>%
summarise(pets = sum(sum(catpop) + sum(dogpop)),
pop = sum(humpop),
cat_place = town[which.max(CatsPerHousehold_upper95)],
cat_household = max(CatsPerHousehold_upper95),
dog_town = town[which.max(DogsPerHousehold_upper95)],
dog_household = max(DogsPerHousehold_upper95)) %>%
ungroup() %>%
slice_max(pets, n = 5)
# bottom 5
cats_dogs %>%
group_by(region) %>%
summarise(pets = sum(sum(catpop) + sum(dogpop)),
pop = sum(humpop),
cat_place = town[which.max(CatsPerHousehold_upper95)],
cat_household = max(CatsPerHousehold_upper95),
dog_place = town[which.max(DogsPerHousehold_upper95)],
dog_household = max(DogsPerHousehold_upper95)) %>%
ungroup() %>%
slice_min(pets, n = 5) %>%
arrange(desc(pets))
# this is a very primitive way to shorten the two long entries in the "town" variable
# unfortunately couldn't figure out how to do it with gsub or str_replace
cats_dogs$town <- as.factor(cats_dogs$town)
cats_dogs <- cats_dogs %>%
mutate(town = fct_recode(town,
"Saxton, Stutton, Ulleskelf, etc." = "Saxton, Stutton, Ulleskelf, Church Fenton, Tadcaster, Toulston",
"Barlestone, Barton in the Beans, etc." = "Barlestone, Barton in the Beans, Bilstone, Cadeby, Carlton, Congerstone, Dadlington, Fenny Drayton, Higham on the Hill, Market Bosworth, Nailstone, Odstone, Osbaston, Shackerstone, Shenton, Stoke Golding, Sutton Cheney, Upton, Wellsborough"))
# making a plot function for the future table
# comparing the density per household of humans, dogs and cats
plot_comparison <- function(region, data) {
data_p <-
cats_dogs %>%
filter(region == {{ region }}) %>%
group_by(region) %>%
summarise(pets = sum(sum(catpop) + sum(dogpop)),
human_d = sum(humpop)/sum(households),
cat_d = sum(catpop)/sum(households),
dog_d = sum(dogpop)/sum(households)) %>%
ungroup() %>%
gather("stat", "value", -c(region, pets)) %>%
mutate(stat = factor(stat, levels = c("human_d", "dog_d", "cat_d")))
plot <-
data_p %>%
ggplot(aes(x = region, y = value, fill = stat)) +
geom_col(position = "dodge") +
theme_void() +
theme(
legend.position = "none"
) +
scale_fill_manual(values=c("#B62A3D", "#EDCB64","#B5966D")) +
scale_y_continuous(breaks=c(0, 2.5))
plot
}
### main parts of the table are tested and wrangled, time to assemble
# top 5
top_5 <- cats_dogs %>%
group_by(region) %>%
summarise(pets = sum(sum(catpop) + sum(dogpop)),
pop = sum(humpop),
cat_place = town[which.max(CatsPerHousehold_upper95)],
cat_postcode = postcode_district[which.max(CatsPerHousehold_upper95)],
cat_household = max(CatsPerHousehold_upper95),
dog_place = town[which.max(DogsPerHousehold_upper95)],
dog_household = max(DogsPerHousehold_upper95)) %>%
ungroup() %>%
slice_max(pets, n = 5)
# bottom 5
bottom_5 <- cats_dogs %>%
group_by(region) %>%
summarise(pets = sum(sum(catpop) + sum(dogpop)),
pop = sum(humpop),
cat_place = town[which.max(CatsPerHousehold_upper95)],
cat_postcode = postcode_district[which.max(CatsPerHousehold_upper95)],
cat_household = max(CatsPerHousehold_upper95),
dog_place = town[which.max(DogsPerHousehold_upper95)],
dog_household = max(DogsPerHousehold_upper95)) %>%
ungroup() %>%
slice_min(pets, n = 5) %>%
arrange(desc(pets))
# main table
table <- rbind(top_5, bottom_5)
table_graphs <- table %>%
mutate(plots = map(region, plot_comparison, data = cats_dogs)) %>%
# the order of cats and dogs postcodes is the same, so I am just removing the dog postcodes/place columns
select(region, pop, pets, plots,cat_postcode, cat_place, cat_household, dog_household)
## gt table with plots
gt_table <- table_graphs %>%
gt() %>%
cols_align(
align = "left",
columns = 1
) %>%
fmt_number(
columns = vars(cat_household, dog_household),
decimals = 1,
use_seps = FALSE
) %>%
fmt_number(
columns = vars(pop, pets),
decimals = 0,
use_seps = TRUE
) %>%
tab_header(
title = md("**It's raining cats and dogs**"),
subtitle = html("<span style='color: black'>The <span style='color: #5299B7'><b>top</b></span> and</span>
<span style='color: #F39097'><b>bottom</b></span> five <b>UK</b> areas by pet population")
) %>%
tab_footnote(
footnote = html("<span style='font-family:Muli'><i>Cats and dogs, populations are estimated</i></span>"),
locations = cells_title(groups = "subtitle")
) %>%
tab_spanner(
label = html("<span style='font-family:Muli; font-size: 18px'><b>Postcodes with most pets per household</b></span>"),
columns = vars(cat_postcode, cat_place, cat_household, dog_household)
) %>%
tab_source_note(
source_note = html("<span style='font-family:Muli'><b>Table:</b> @Ana_Voronkova // <b>Source:</b> data.gov.uk</span>")
) %>%
cols_label(
region = html(""),
pop = html("Humans"),
pets = html("Pets"),
plots = html("<span style='color: #B62A3D;font-size: 18px'>Humans</span>,
<span style='color: #EDCB64;font-size: 18px'>dogs</span> <span style = 'font-size:14px'>&</span> <span style='color: #B5966D;font-size: 18px'>cats</span><br>
<span style = 'font-size:14px'>per household</span>"),
cat_postcode = html(local_image(filename = "letter.png",
height = 35)),
cat_place = html(local_image(filename = "postcode.png",
height = 35)),
cat_household = html(local_image(filename = "cat.png",
height = 35)),
dog_household = html(local_image(filename = "dog.png",
height = 40))
) %>%
tab_style(
style = cell_text(size = px(18)),
locations = cells_column_labels(everything())
) %>%
cols_align(
align = "center",
columns = vars(pop, pets, cat_postcode, cat_place, cat_household, dog_household)
) %>%
tab_style(
cell_text(weight = "bold"),
locations = cells_column_labels(vars(region, pop, pets, plots))
) %>%
tab_style(
style = cell_fill(color = "#C5DDE7"),
locations = cells_body(
columns = vars(region),
rows = c(1:5))
) %>%
tab_style(
style = cell_fill(color = "#FDECED"),
locations = cells_body(
columns = vars(region),
rows = c(6:10))
) %>%
tab_style(
style = cell_borders(
sides = c("bottom"),
color = "grey",
weight = px(1),
style = "solid"
),
locations = cells_body(
columns = everything(),
rows = 5
)
) %>%
tab_style(
style = cell_text(size = px(25), weight = "bold"),
locations = cells_body(columns = vars(region),
rows = everything())
) %>%
tab_style(
style = cell_text(font = "Hashed Browns", size = px(60)),
locations = cells_title("title")
) %>%
tab_style(
style = cell_text(font = "Muli", size = px(20)),
locations = cells_title("subtitle")
) %>%
tab_style(
style = cell_text(font = "Muli"),
locations = cells_body(
columns = everything(),
rows = everything()
)
) %>%
tab_style(
style = cell_text(font = "Muli"),
locations = cells_column_labels(
everything()
)
) %>%
tab_style(
style = cell_text(font = "Muli"),
locations = cells_column_spanners(
everything()
)
) %>%
tab_options(
column_labels.border.top.color = "white",
column_labels.border.top.width = px(30),
column_labels.border.bottom.color = "grey",
column_labels.border.bottom.width = px(2),
table_body.hlines.color = "white",
table.border.bottom.color = "white",
table_body.border.bottom.color = "grey",
table_body.border.bottom.width = px(2),
table.border.top.color = "white"
) %>%
text_transform(
locations = cells_body(vars(plots)),
fn = function(x) {
map(table_graphs$plots, ggplot_image, height = px(80), aspect_ratio = 1.4)
}
)
# saving it as an image
gt_table %>%
gt::gtsave(
"catdog_table.png",
path = "../R table competition"
)
|
b16ad748423f7f785a3dac6813fa480aa3214668
|
5f4696ef6b9ece4dc7efa0e72a9138dacfbb5fca
|
/tests/testthat/test_ICTpower.R
|
db5d417bbfd99ef4e9be54e47700d0405a9490c5
|
[] |
no_license
|
ICTatRTI/PersonAlyticsPower
|
fb37bbbb5c6a68c577774059fef19ffdb21bdb0a
|
507ad4bded6bb01493040dbb029aecc2f92cd03c
|
refs/heads/master
| 2022-12-04T20:35:24.718140
| 2022-06-10T22:40:06
| 2022-06-10T22:40:06
| 215,357,407
| 0
| 0
| null | 2021-12-02T14:29:20
| 2019-10-15T17:26:48
|
R
|
UTF-8
|
R
| false
| false
| 1,465
|
r
|
test_ICTpower.R
|
context("ICTpower")
library(PersonAlyticsPower)
test_that("piecewise",
{
myPolyICT <- polyICT$new(
groups = c(group1=10, group2=10) ,
phases = makePhase() ,
propErrVar = c(randFx=.5, res=.25, mserr=.25) ,
randFxOrder = 1 ,
randFxCor = 0.2 ,
randFxVar = c(1, 1) ,
error = armaErr$new() ,
merror = armaErr$new(list()) ,
ySD = 15 ,
yMean = 100 ,
)
myPolyICTnonPar <- myPolyICT$clone(deep=TRUE)
myPolyICTnonPar$inputMat$n <- 500
Data <- myPolyICTnonPar$makeData()
save(Data, file = "Data.RData")
# this fails, no convergence
if(1==2)
{
ICTpower(outFile = c("piecewise", "csv"),
B = 3 ,
dataFile = "Data.RData" ,
sampleSizes = c(25,25) ,
alignPhase = 'piecewise' ,
prompt = FALSE ,
debugforeach = FALSE )
}
txts <- dir(getwd(), glob2rx("*.txt"))
csvs <- dir(getwd(), glob2rx("*.csv"))
pdfs <- dir(getwd(), glob2rx("*.pdf"))
file.remove("Data.RData", txts, csvs, pdfs)
})
|
ed0eb66e60c16ca1769fb76871b8a381edff102b
|
a8b6bfc9e1e34d5d260b833587b9fc7ff84215da
|
/R/nbss.R
|
11ae8930156aaf2fcbc0e3a0acf604c9873819a1
|
[] |
no_license
|
jiho/nbssr
|
d93d92efaa92fe3099187c74527eed22aa77844d
|
838ddfe8829829aed2a0023d63798d377e5090b1
|
refs/heads/master
| 2022-07-26T15:18:55.354762
| 2020-05-14T23:40:21
| 2020-05-14T23:40:21
| 262,024,579
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,300
|
r
|
nbss.R
|
#' Normalised Biomass Size Spectrum
#'
#' @param x vector of biomasses, biovolumes, or lengths.
#' @param w vector of weights, typically concentrations associated with individual measurements in `x`.
#' @param type whether to compute a biomass/biovolume or abundance spectrum.
#' @param base base of the logarithm for the computation of bins.
#' @param binwidth width of bins in log10 scale.
#'
#' @return a data.frame with columns
#' - `bin_log` value of the bin center in log scale;
#' - `bin` value of the bin center in original scale;
#' - `binwidth` width of the bin center in original scale;
#' - `y` sum of biomass/biovolume for a biomass spectrum; count of the objects in the bin for an abundance spectrum;
#' - `norm_y` `y/binwidth`
#'
#' @export
#'
#' @examples
#' # Biovolume spectrum
#' ss <- nbss(uvp$volume_mm3)
#' head(ss)
#' autoplot(ss) + labs(
#' x=expression("Biovolume (mm"^3*")"),
#' y="Normalised biovolume"
#' )
#' # Abundance spectrum
#' ss <- nbss(uvp$length_mm, binwidth=0.05)
#' autoplot(ss) + labs(x="Length (mm)")
nbss <- function(x, w=rep(1, length(x)), type=c("biomass", "abundance"), base=10, binwidth=0.1) {
# check arguments
type <- match.arg(type)
if (! base %in% c(2,10)) {
stop("`base` must be either 2 for natural logarithm or 10 for base 10 logarithm")
}
if (base == 2) {
l <- log
il <- exp
} else {
l <- log10
il <- function(x) {10^x}
}
if (length(x) != length(w)) {
stop("x and w must be the same length")
}
# log transform input data
x_log <- l(x)
# bin with precision `binwidth`
x_log_bin <- round(x_log/binwidth) * binwidth
# compute biomass/abundance in each bin
if (type == "biomass") {
xx <- x * w
} else {
xx <- w
}
ss <- aggregate(xx, list(x_log_bin), FUN=sum)
names(ss) <- c("bin_log", "y")
# compute middle of bin in the original scale
ss$bin <- il(ss$bin_log)
# compute bin widths in the original scale
ss$binwidth <- il(ss$bin_log + binwidth/2) - il(ss$bin_log - binwidth/2)
# compute the normalised biomass/volume
ss$norm_y <- ss$y / ss$binwidth
# reorder columns
ss <- ss[,c("bin_log", "bin", "binwidth", "y", "norm_y")]
# prepare output
attr(ss, "type") <- type
attr(ss, "base") <- base
class(ss) <- c("nbss", class(ss))
return(ss)
}
|
55cfc78977dc6a2d5dcf4b1c07643366ac781f29
|
f2c77a19bf0c7532363cd5f521d1f674b88f1b8d
|
/Plot2.R
|
25b8996f9df9daf3bdebde3465089485c83bb315
|
[] |
no_license
|
Yambcn/ExData_Plotting1
|
ec3a273d75da1e112d1ad8d8849c695fad5d0c72
|
2d537e802cae8fc151e9b0121a05c9b4f83c578f
|
refs/heads/master
| 2020-12-30T23:22:04.191273
| 2014-05-09T19:23:28
| 2014-05-09T19:23:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 891
|
r
|
Plot2.R
|
##Process the data
##I have created a tiny data file with the data of the selected days, to avoid the use of a txt file of 100MB
project<-read.csv("Dataproject.txt", sep=";", stringsAsFactor=FALSE) ## Read the Tiny Data, avoid columns as class Factor
project$Time<-paste(project$Date, project$Time, sep=" ") ## Merge the date and the time in one column
project$Date<-as.Date(project$Date, format="%d/%m/%Y") ## Convert column Dates into class Date, just needed to obtain the tiny data
project$Time<-as.POSIXct(project$Time, format="%d/%m/%Y %H:%M:%S") ## Convert the class character to POSIXct
## Plot 2
plot(project$Time, project$Global_active_power, type="l", xlab="", xaxt="n", ylab="Global Active Power (kilowatts)")
axis(1, at=c(min(project$Time), min(project$Time)+86400, min(project$Time)+2*86400), labels=c("Thu", "Fri", "Sat"))
dev.copy(png, file= "Plot2.png")
dev.off()
|
d52bbc007a5efd09446799886bd0860774baa012
|
e68e99f52f3869c60d6488f0492905af4165aa64
|
/man/cuda_get_device_capability.Rd
|
89594e151d37eeb15fca579d50ca1359af544d78
|
[
"MIT"
] |
permissive
|
mlverse/torch
|
a6a47e1defe44b9c041bc66504125ad6ee9c6db3
|
f957d601c0295d31df96f8be7732b95917371acd
|
refs/heads/main
| 2023-09-01T00:06:13.550381
| 2023-08-30T17:44:46
| 2023-08-30T17:44:46
| 232,347,878
| 448
| 86
|
NOASSERTION
| 2023-09-11T15:22:22
| 2020-01-07T14:56:32
|
C++
|
UTF-8
|
R
| false
| true
| 458
|
rd
|
cuda_get_device_capability.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cuda.R
\name{cuda_get_device_capability}
\alias{cuda_get_device_capability}
\title{Returns the major and minor CUDA capability of \code{device}}
\usage{
cuda_get_device_capability(device = cuda_current_device())
}
\arguments{
\item{device}{Integer value of the CUDA device to return capabilities of.}
}
\description{
Returns the major and minor CUDA capability of \code{device}
}
|
24e5ec147aff087bc7051ff883a37f179b48db87
|
9b62a8e5ba50b9f424e14dbb56f299285aa1b03b
|
/inf_prj.R
|
bedc5b71b84e4b519491374a710738813c963a0d
|
[] |
no_license
|
sj-choi/inf_prj
|
b5fba9ecfae497183029d15835ee76226d7f3353
|
f799ecf8466bdf0f710a6b62f5ac97b3ef6e2791
|
refs/heads/master
| 2021-01-10T03:19:11.078399
| 2015-05-26T14:42:42
| 2015-05-26T14:42:42
| 36,045,464
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,455
|
r
|
inf_prj.R
|
## Distribution of 40 random exponentials (with rate = 2)
set.seed(2) # This line is intended to perform a reproducible simulation.
ss40 <- rexp(n = 40, rate = 0.2)
hist(ss40)
mean(ss40)
var(ss40)
## Sample mean vs. Theoretical mean
n <- 1000
set.seed(3)
means <- cumsum(rexp(n, rate = 0.2))/(1:n)
plot(means ~ c(1:n),
xlab= "Number of Trials",
ylab = "Mean Value",
main = "Law of Large Number: LLN",
type = "l", lwd = 3, col = "red")
abline(h = 5, col = "blue", lwd = 3)
## Alternatively
set.seed(3)
exp <- NULL
mexp <- NULL
vexp <- NULL
for (i in 1 : 1000) {
exp <- c(exp, rexp(1, rate = 0.2))
mexp <- c(mexp, mean(exp))
vexp <- c(vexp, var(exp))
}
mean(exps)
var(exps)
## Sample mean vs. Theoretical mean
plot(mexp ~ c(1:n),
xlab= "Number of Trials",
ylab = "Mean",
main = "Law of Large Number: LLN",
type = "l", lwd = 3, col = "red")
abline(h = 5, col = "blue", lwd = 3)
## Sample variance vs. Theoretical variance
plot(vexp ~ c(1:n),
xlab= "Number of Trials",
ylab = "Variance",
main = "Law of Large Number: LLN",
type = "l", lwd = 3, col = "red")
abline(h = 25, col = "blue", lwd = 3)
## Law of Large Number
## Distribution of 1000 random exponentials (with rate = 2)
set.seed(3)
ss1000 <- rexp(n = 1000, rate = 0.2)
hist(ss1000, xlab = "Value", main = "1000 random exponentials")
box()
mean(ss1000)
var(ss1000)
## Central Limit Theorem
## Distribution of 1000 of averages of 40 random exponentials (with rate = 2)
set.seed(2)
mexps <- NULL
for (i in 1 : 1000) {
mexps <- c(mexps, mean(rexp(n = 40, rate = 0.2)))
}
hist(mexps, xlab = "Value", main = "1000 means of 40 random exponentials", prob = TRUE)
box()
mean(mexps)
var(mexps)
## Part 2
d.f <- data.frame(ToothGrowth)
library(dplyr)
d.f %>% group_by(supp)
d.f %>% group_by(dose)
library(ggplot2)
ggplot(d.f, aes(x = supp, y = len, fill = supp)) + geom_boxplot()
d.f.vc <- filter(d.f, supp == "VC")
d.f.oj <- filter(d.f, supp == "OJ")
t.test(d.f.vc$len, d.f.oj$len)
## Not significant. H0 could not be rejected. No difference between delivery methods.
ggplot(d.f, aes(x = as.factor(dose), y = len, fill = as.factor(dose))) + geom_boxplot()
d.f.0_5 <- filter(d.f, dose == 0.5)
d.f.1 <- filter(d.f, dose == 1)
d.f.2 <- filter(d.f, dose == 2)
var(d.f.0_5$len)
var(d.f.1$len)
var(d.f.2$len)
t.test(d.f.2$len, d.f.1$len)
t.test(d.f.1$len, d.f.0_5$len)
t.test(d.f.2$len, d.f.0_5$len)
|
3754cdcc1b0973642fb496b683526866bfa5f1f0
|
053257d525da78078c77c624649f8c3c7aab7cb8
|
/R/BLasso_DL_Joo.R
|
a33133c6210cf99f988eb8616db4cd92b954c80c
|
[] |
no_license
|
guhjy/BLasso
|
8febfd926c8ba41927daaa2986494cd29a5c7dc4
|
5efa658d9fc6e3f143c64570c8a93fd0f580dd85
|
refs/heads/master
| 2020-06-21T00:33:33.421257
| 2017-01-01T14:49:11
| 2017-01-01T14:49:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,215
|
r
|
BLasso_DL_Joo.R
|
#' Bayesian Lasso by Variational Bayes + Dirichlet-Laplace Priors
#'
#' @description Extended Bayesian Lasso (Park \& Casella (2008)) with Dirichlet-Laplace Priors (Bhattacharya et al. 2015)
#'
#' @references Bhattacharya, Anirban, et al. "Dirichlet–Laplace priors for optimal shrinkage." Journal of the American Statistical Association 110.512 (2015): 1479-1490.
#' @references Park, Trevor, and George Casella. "The bayesian lasso." Journal of the American Statistical Association 103.482 (2008): 681-686.
#' @references Joo, Lijin. "Bayesian Lasso: An Extension for Genome-wide Assoication Study." New York University, 2017
#'
#' @param x: predictor variables (numertic only)
#' @param y: outcome (numertic only)
#' @param print.it = TRUE/FALSE (default: FALSE, suppressing to print the number of iterations)
#'
#' @return beta
#' @return beta.sig: standard deviation of beta
#' @retrun tau2: local scale
#' @return p.value: p-value for t-test for beta (for variable selection)
#' @return sigma2
#' @return lambda: penalty, or global scale
#' @return convergence: 1/0 converged if convergence = 1
#'
#' @examples ex1<-DLasso(x=data1[,-1], y=data1[,1]); ex1$beta; ex1$lambda; sum(ex1$p.value<0.05); #n of selected variables#
#'
#' @export
#'
DLasso<-function(x, y, print.it=FALSE){
##This is a function for Bayesian Lasso written for my dissertation ##
##The algoritm is implemented by Variational Bayes##
require(mvtnorm)
require(MASS)
require(pscl)
require(statmod)
n <- nrow(x)
p <- ncol(x)
x <- as.matrix(x, ncol=p, byrow=TRUE)
meanx <- apply(x, 2, mean)
x <- scale(x, meanx, FALSE)
mu <- mean(y)
y <- drop(y - mu)
XtX <- t(x) %*% x
xy <- t(x) %*% y
#initial values#
beta <- drop(backsolve(XtX + diag(nrow=p), xy))
resid <- drop(y - x %*% beta)
sigma2 <- drop((t(resid) %*% resid) / n)
tau2 <- 1 / (beta * beta)
inv.tau2 <- 1/tau2
psi <- 1
alpha <- 1/2
inv.D<-diag(as.vector(inv.tau2))
A <- XtX +inv.D
inv.A <- ginv(A)
tol <-10^-2
maxiter <- 10^2
i<-0
conv <-0
L.comp <- function() {
-((n+p-1)/2+1)*log(sigma2)-1/(2*sigma2)*sum(resid^2)-1/2*sum(log(tau2))-1/(2*sigma2)*sum(beta^2/(tau2))+sum(log(psi/2))-sum(psi/2*tau2)+(alpha-1)*I(alpha-1>0)*sum(log(psi)) - 1/2*sum(psi)
}
Approx <- function() {
-((n+p-1)/2+1)*log(sigma21)-1/(2*sigma21)*sum(resid1^2)-1/2*sum(log(tau21))-1/(2*sigma21)*sum(beta1^2/(tau21))+sum(log(psi1/2))-sum(psi1/2*tau21)+(alpha1-1)*I(alpha1-1>0)*sum(log(psi1)) - 1/2*sum(psi1)
}
ELBO <- L.comp()
diff1 <- ELBO
if(print.it == TRUE){cat(" Iter. FE", alpha, ELBO , fill=T)}
repeat {
inv.D<-diag(as.vector(inv.tau2))
A <- XtX +inv.D
inv.A1 <- ginv(A)
beta1 <- inv.A1%*%t(x)%*%y
beta2 <- beta1^2
xb <- x %*% beta1
resid1 <- (y-xb)
a <- (n+p+1)/2
b <-t(resid1) %*% resid1/2 + 1/2* t(beta2) %*%inv.tau2
sigma21 <-b/(a+1)
inv.tau21 <- sqrt(psi^2*as.numeric(sigma21)/beta1^2)
tau21 <- 1/inv.tau21 + 1/(psi^2)
psi1 <- (alpha+1)/tau21
psi1<-psi1/sum(psi1)
s <- log(mean(psi1^2)) - mean(log(psi1^2))
a.grad<- function(aa){
return(log(aa) - digamma(aa))
}
a.hess <- function(aa){
return(1/aa - trigamma(aa))
}
a0 <-(alpha+1)/2
a.diff<-1
while(a.diff > tol){
a1 <- a0 - runif(1, 0, tol)*(a.grad(a0)-s)/a.hess(a0)
a1 <- ifelse(a1<=0.5, a0+runif(1,0.5,1), a1)
a.diff <- abs(a1-a0)
a0 <-a1
}
alpha1 <-2*a1-1
ELBO1 <- Approx()
diff1 <- ELBO1 - ELBO
if(print.it == TRUE){cat(" ", i," ", alpha1," "," ",ELBO1, fill=T)}
if(i==maxiter) {
conv<-0
break}
if(i>5 && diff1<tol) {
conv <- 1
break
}
i <- i + 1
ELBO <- ELBO1
beta<-beta1
sigma2<-sigma21
tau2<- tau21
alpha <- alpha1
inv.tau2 <- inv.tau21
psi <- psi1
resid <- resid1
inv.A <- inv.A1
}
beta.sig<-sqrt(diag(inv.A)*sigma2)
beta.t <-beta/beta.sig
H <- x%*%inv.A%*%t(x)
df.t <- sum(diag(H))
t.pval= round(apply(cbind(pt(beta.t, df.t), 1-pt(beta.t, df.t)),1, min), 3)
list(beta=round(beta,3), beta.sig=beta.sig, tau2=tau2, p.value=t.pval, sigma2=sigma2,alpha=alpha, convergence = conv)
}
|
10742fbb4ae6b0e472d98530ce97253847420612
|
3e74b2d423d7b4d472ffce4ead1605621fb2d401
|
/thirdparty/R_FindAllMarkers/require.R
|
fb1e3fb888d15cd98aa5d4542577756e315cb554
|
[] |
no_license
|
jamesjcai/My_Code_Collection
|
954988ee24c7bd34139d35c880a2093b01cef8d1
|
99905cc5d063918cbe6c4126b5d7708a4ddffc90
|
refs/heads/master
| 2023-07-06T07:43:00.956813
| 2023-07-03T22:17:32
| 2023-07-03T22:17:32
| 79,670,576
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 32
|
r
|
require.R
|
require(Seurat)
require(Matrix)
|
6dae97eae133f6c9567a40ebfba37ee7d51435be
|
810ea69a7d07656d7f4956ad1698c4be524e007f
|
/Taller2_Octubre18/solucion/Taller2/Solucion.R
|
dd2a0ae058a7e54d8d7cdc49be91a6ea0440c8c9
|
[] |
no_license
|
Jegomezre/EstadisticaII
|
72ae2ec70e71492c9b4b051082b3980699af6481
|
c08db34e139aff2c6b6cfd994dc642b0c95132b0
|
refs/heads/main
| 2023-08-30T04:37:04.962853
| 2021-10-25T22:21:35
| 2021-10-25T22:21:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,208
|
r
|
Solucion.R
|
library(tidyverse) #cargando el tidyverse
#Leyendo la base de datos
#Ejercicio 1
datos <- read.csv("Ecommerce_Customers.csv")
str(datos) #esto nos muestra la estructura de la base de datos
datos.modelo <- datos %>%
select(Avg..Session.Length:Length.of.Membership, Yearly.Amount.Spent)
#Ejercicio 2
plot(datos.modelo) #haciendo matriz de dispersion
datos.modelo.final <- datos.modelo %>%
select(Length.of.Membership, Yearly.Amount.Spent)
#Ejercicio 3
#seleccionando el 80% de los datos
n <- nrow(datos.modelo.final) #extrayendo el numero de filas
set.seed(314159) #fijando la muestra a seleccionar
index <- sample(1:n, 0.8*n) #seleccionando la muestra
datos.ajuste <- datos.modelo.final[index,] #seleccionando filas del 80%
datos.prediccion <- datos.modelo.final[-index, ] #seleccionando filas del 20% restante
#ajustando el modelo
# Yearly.Amount.Spent_i = beta0 + beta1*Length.of.Membership_i + epsilon_i
# epsilon ~ N(0, sigma^2)
mod <- lm(Yearly.Amount.Spent ~ Length.of.Membership, data = datos.ajuste)
ggplot(datos.ajuste, aes(Length.of.Membership, Yearly.Amount.Spent)) +
geom_point() +
geom_smooth(method = "lm", formula = "y~x", se = F)
#Ejercicio 4
resumen <- summary(mod) #mirando prueba significancia de la pendiente
anova(mod) #mirando prueba significancia de la regresion
#Ejercicio 5
coef(mod) #coefficients(mod)
#beta 0 no tiene interpretacion util
#beta 1 por cada año que aumenta la suscripcion se estima un aumento de
#63.58399 dolares en el gasto anual promedio
#Ejercicio 6
resumen$r.squared #extrayendo el R²
#sacar los valores estimados de y
y.estimado <- fitted(mod)
y.real <- datos.ajuste$Yearly.Amount.Spent
R.2 <- cor(y.estimado, y.real)^2 #R² con correlaciones
#Recordar que se puede sacar de la tabla ANOVA
#El R² nos inidca que la recta logra explicar el 66.25% de la variabilidad
#asociada a los datos
#Ejercicio 7
predict(mod, newdata = datos.prediccion, interval = "confidence")[1:5, ] #intervalos de confianza para la respuesta media
predict(mod, newdata = datos.prediccion, interval = "prediction")[1:5, ] #intervalos de prediccion
#Los intervalos de prediccion son SIEMPRE mas anchos que los intervalos de confianza para
#la respuesta media
|
26f6612c0b535e3894b08caf9a4ca9f39ff95a55
|
d7c06c71c00235be14b06ef74857ae34fa88a213
|
/R/shinyTurkTools-package.R
|
d6b53259597edff1e71d603fe4b39c7423089f00
|
[] |
no_license
|
trinker/shinyTurkTools
|
af78a841bf5a53ccd9848dbed9ffde842a453b7c
|
bbdfa0ebe51e43b325ce100b41e974537a7651ab
|
refs/heads/master
| 2021-01-17T20:50:54.075141
| 2016-08-05T13:48:25
| 2016-08-05T13:48:25
| 62,141,989
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 179
|
r
|
shinyTurkTools-package.R
|
#' Tools to accompany the shinyTurk.
#'
#' Tools to accompany the shinyTurk..
#' @docType package
#' @name shinyTurkTools
#' @aliases shinyTurkTools package-shinyTurkTools
NULL
|
858e1e8c818985e75e2ca27393e3cb4fe6da0bb2
|
2ede61c76a368a328c5e490d630125b5f272b3e0
|
/man/iv.replace.woe.Rd
|
818f19a69ce809b1898b35d675f50e828dd713f2
|
[] |
no_license
|
l0o0/woe
|
289d6a422e7f8765545d1d9426ff7da2e5d6f7ef
|
678be360a3517b2c902bd3bdc7dba137aa4c8a03
|
refs/heads/master
| 2020-03-18T23:44:59.670437
| 2018-11-14T03:59:04
| 2018-11-14T03:59:04
| 135,425,130
| 1
| 0
| null | 2018-05-30T10:10:26
| 2018-05-30T10:10:25
| null |
UTF-8
|
R
| false
| false
| 1,016
|
rd
|
iv.replace.woe.Rd
|
\name{iv.replace.woe}
\alias{iv.replace.woe}
\title{Replace raw variables with Weight of Evidence}
\usage{
iv.replace.woe(df, iv, verbose = FALSE)
}
\arguments{
\item{df}{data frame with original data}
\item{iv}{list of information values for variables -
output from \code{\link{iv.mult}} with
\code{summary=FALSE}.}
\item{verbose}{Prints additional details when TRUE.
Useful mainly for debugging.}
}
\description{
Replaces variables in data frame with Weight of Evidence.
This will add new columns with "_woe" suffix to specified
data frame.
}
\examples{
# Replace WoE for list of variables
outiv <- iv.mult(german_data,"gb",vars=c("ca_status","housing","duration"))
x <- iv.replace.woe(german_data,outiv)
str(x)
# Replace WoE for all variables
outiv <- iv.mult(german_data,"gb")
x <- iv.replace.woe(german_data,outiv)
str(x)
# Replace WoE for all numeric variables- ultimate one-liner
x <- iv.replace.woe(german_data,iv.mult(german_data,"gb",vars=varlist(german_data,"numeric")))
str(x)
}
|
da6a9f6d8d8dd58e073123036660cd143a0171b5
|
22540d050618fa7c69c40c89d1397609e2f39936
|
/man/dct_object.Rd
|
5099e2ddd1ef04964f89e9927b9ae15c4c237f89
|
[] |
no_license
|
cran/psyverse
|
8d3e6723d66c292f02a4d0b8978d85f868ca52b9
|
d1e2dc7f6be23f674f7b6cc1d21089995a331ba0
|
refs/heads/master
| 2023-03-17T00:04:47.391838
| 2023-03-05T21:00:07
| 2023-03-05T21:00:07
| 250,514,413
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,231
|
rd
|
dct_object.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dct_object.R
\name{dct_object}
\alias{dct_object}
\title{Create a DCT object}
\usage{
dct_object(
version = as.character(utils::packageVersion("psyverse")),
id = NULL,
prefix = paste(sample(letters, 4), collapse = ""),
label = "",
date = as.character(Sys.Date()),
dct_version = "1",
ancestry = "",
retires = "",
definition = "",
measure_dev = "",
measure_code = "",
aspect_dev = "",
aspect_code = "",
comments = "",
rel = NULL
)
}
\arguments{
\item{version}{The version of the DCT specification format (normally the version
of the \code{psyverse} package).}
\item{id}{The Unique Construct Identifier (UCID); if not provided,
this is created using the \code{prefix}.}
\item{prefix}{The prefix to use to construct the Unique Construct Identifier
(UCID); ignored i \code{id} is provided.}
\item{label}{The human-readable label for the construct.}
\item{date}{The date at which the construct was created.}
\item{dct_version}{The version of the DCT specification. This can optionally
be used to manage consecutive DCT versions.}
\item{ancestry}{The DCT specification or specifications that this DCT was
based on.}
\item{retires}{The DCT specification or specifications that this DCT renders
obsolete (note that this doesn't mean anything in itself; \code{psyverse} does not
enforce this automatically, nor does PsyCoRe, without configuration).}
\item{definition}{The definition of the construct. This has to be comprehensive,
detailed, accurate, and clearly delineate the relevant aspects of the human
psychology.}
\item{measure_dev}{Instructions for developing measurement instruments that
measure this construct.}
\item{measure_code}{Instructions for coding measurement instruments (e.g. in
systematic reviews) as measurement instruments that measure this construct.
Note that explicitly defining boundary conditions often helps, for example by
explaining the features that coders should look for to distinguish this
construct from closely related constructs (ideally linking to those other
constructs using the \code{dct:UCID} notations).}
\item{aspect_dev}{Instructions for eliciting construct content. Note that
this is not sensible for all constructs; some may be defined at a very
general level, rendering their content insufficiently specific to discuss
or describe.}
\item{aspect_code}{Instructions for coding construct content (i.e. aspects).
Note that explicitly defining boundary conditions often helps, for example by
explaining the features that coders should look for to distinguish this
construct from closely related constructs (ideally linking to those other
constructs using the \code{dct:UCID} notations).}
\item{comments}{Any additional comments.}
\item{rel}{Relationships with other constructs.}
}
\value{
The DCT object.
}
\description{
Create a DCT object
}
\examples{
exampleDCT <-
psyverse::dct_object(
prefix = "exampleConstruct",
label = "An example construct",
definition = "The definition goes here",
measure_dev = "Here you can explain how to measure the construct"
);
}
|
06a1898c1323cce72592d02e81dd52e38c9c160c
|
c4d4329c7cae09599d1b07f6486d7e530b052654
|
/plot1.R
|
32c9f2fb5c87b54844adc3c1b66d3daa07a4e6f9
|
[] |
no_license
|
LukaSlov/ExData_Plotting1
|
ead5d8b8199428ff7bc56889a6a30ed971e9bbb2
|
0ef096fa5ea56de0575834f2222367c0a399a739
|
refs/heads/master
| 2020-12-30T20:43:25.637915
| 2014-12-07T09:17:39
| 2014-12-07T09:17:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 514
|
r
|
plot1.R
|
install.packages('sqldf')
library(sqldf)
#Separator is ';'
data <- read.csv.sql('household_power_consumption.txt', sql="select * from file where Date = '1/2/2007' or Date='2/2/2007'", sep=';')
#Use datetime format
time <- paste(data$Date, data$Time)
data$Time <- strptime(time, "%d/%m/%Y %H:%M:%S")
#check if loaded data is expected
#str(data)
#head(data)
#tail(data)
png("plot1.png")
hist(data$Global_active_power, col="#FF2500", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
|
98e164a8c10c706458edb2c0eea1f046d064debb
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/DiscreteWeibull/R/Edweibull.R
|
ed9bf061e8d79d3dfd72532700b6016af36637d1
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,555
|
r
|
Edweibull.R
|
Edweibull<-function (q, beta, eps = 1e-04, nmax = 1000, zero = FALSE)
{
if (beta == 1 & !zero)
e <- 1/(1 - q)
else if (beta == 1 & zero)
e <- q/(1 - q)
else
{
xmax <- min(2 * qdweibull(1 - eps, q, beta, zero), nmax)
if(xmax < nmax)
{
x <- 1:xmax
e<-sum(ddweibull(x, q, beta, zero) * x)
}
else # approximation with expected value of continuous Weibull
{
lambda<-(-1/log(q))^(1/beta)
e <- lambda*gamma(1+1/beta)
e <- e + 1/2 -zero
}
}
return(e)
}
E2dweibull<-function (q, beta, eps = 1e-04, nmax = 1000, zero = FALSE)
{
if (beta == 1 & !zero)
e <- (1 + q)/(1 - q)^2
if (beta == 1 & zero)
e <- q * (1 + q)/(1 - q)^2
else {
xmax <- 2*qdweibull(1 - eps, q, beta, zero)
if (xmax < nmax)
{
x <- 1:xmax
e <- sum(ddweibull(x, q, beta, zero) * x^2)
}
else # approximation
{
lambda <- (-1/log(q))^(1/beta)
e <- lambda^2*(gamma(1+2/beta)-(gamma(1+1/beta)^2)) +
(Edweibull(q, beta, eps = eps, nmax = nmax, zero = zero))^2
e <- ceiling(e) - zero
}
}
return(e)
}
Vdweibull <-
function(q, beta, eps=0.0001, nmax=1000, zero=FALSE)
{
if(beta==1)
q/(1-q)^2
else
{
E2dweibull(q, beta, eps, nmax, zero) - Edweibull(q, beta, eps, nmax, zero)^2
}
}
ERdweibull <-
function(q, beta, eps=0.0001, nmax=1000)
{
if(beta==1)
(1-q)/q*log(1/(1-q))
else
{
xmax<-min(2*qdweibull(1-eps, q, beta),nmax)
x <- 1:xmax
sum((q^(x-1)^beta-q^x^beta)*1/x)
}
}
|
f51679aea474b2f7841b607607585228a1997b48
|
e6ba7aa1d351004a7a816e0f131c3f1740cb0984
|
/plot2.R
|
5a6e24db4b2b9a63bb3fc4cd02c80dc5be9aa0f0
|
[] |
no_license
|
jerry-ban/coursera_rexploratory
|
b21ae716ca42cd3c5bcfe918a1c2a94a837eb63c
|
a791ed23ced88724efcf119818a9e174ad79fbb5
|
refs/heads/master
| 2021-05-09T09:33:51.965499
| 2018-02-12T23:42:22
| 2018-02-12T23:42:22
| 119,446,986
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,126
|
r
|
plot2.R
|
getwd()
setwd("C:/_research/homeworks/hw001/ExData_Plotting1")
raw_data <- read.table("./household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors = FALSE)
col_names <-names(raw_data)
raw_data$Timestamp = strptime(paste(raw_data$Date, raw_data$Time), "%d/%m/%Y %H:%M:%S")
raw_data["Date"]=as.Date(raw_data$Date, format="%d/%m/%Y")
data = subset(raw_data, Date >= "2007-02-01" & Date <=" 2007-02-02")
#data$time = strptime(data$Time, format=" %H:%M:%S")
head(data)
str(data)
head(raw_data$Timestamp)
data$Global_active_power = as.numeric(data$Global_active_power)
data$Global_reactive_power = as.numeric(data$Global_reactive_power)
data$Voltage = as.numeric(data$Voltage)
data$Global_intensity = as.numeric(data$Global_intensity)
data$Sub_metering_1 = as.numeric(data$Sub_metering_1)
data$Sub_metering_2 = as.numeric(data$Sub_metering_2)
data$Sub_metering_3 = as.numeric(data$Sub_metering_3)
par(mfrow = c(1,1))
with(data, plot(Timestamp, Global_active_power, type = "l", ylab="Global Active Power (kilowatts)", xlab="" ) )
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off()
|
14acb1367fb49ec6229f8bc43b7ce3c9ebc18ef3
|
a528173483407425c55cbbdf278a2b724830a01e
|
/man/load.images.Rd
|
eb9f6ac67d60d25dcc59f359e5529fb2bfb4092b
|
[
"MIT"
] |
permissive
|
gmke/zernike
|
7ea52f89dc353f7d72a8385078e03bc2853a22c1
|
397a5d2f316e2f95cc1a1209007780855da16b13
|
refs/heads/master
| 2023-05-28T21:58:50.075555
| 2023-05-10T15:07:23
| 2023-05-10T15:07:23
| 166,230,701
| 0
| 0
|
MIT
| 2021-06-18T12:00:04
| 2019-01-17T13:30:49
|
R
|
UTF-8
|
R
| false
| false
| 1,018
|
rd
|
load.images.Rd
|
\name{load.images}
\alias{load.images}
\alias{load.pgm}
\title{Read images}
\description{
Loads image files in jpeg, tiff or raw format.
\code{load.pgm} provides legacy support for reading
files in pgm format.
}
\usage{
load.images(files, channels=c(1,0,0), scale=1, FLIP=FALSE)
load.pgm(files, imdiff=NULL)
}
\arguments{
\item{files}{A vector of character strings with file names}
\item{channels}{channel weights}
\item{scale}{scale factor for image resize}
\item{FLIP}{flip image left for right?}
}
\details{
set FLIP=TRUE to reverse mirror imaged interferograms.
Any file extension other than jpg, jpeg, tif, tiff is assumed to be in RAW format. Supported raw formats
are determined by libraw and may not be up to date
}
\value{
An array containing the contents of the image files.
}
\author{M.L. Peck \email{mpeck1@ix.netcom.com}}
\note{
\code{load.pgm} is the original \code{load.images}
included for legacy support of greyscale portable
anymap files.
}
\keyword{IO}
\keyword{file}
|
6754b0ceb471e0eb23d32c3b1e113e4c92fc5253
|
2657eac7e42b17c815bee869a860254d6eb4640e
|
/deathstar.worker
|
5e94cf4f6451a5df4fd5c5a73b01ecf99f29ef8a
|
[] |
no_license
|
armstrtw/deathstar.core
|
5773edbd49ea303ab9c2aec8a1ce9d4189aa1ecf
|
d480bc63a960ff13f5a4ce5e28a3d4be51170a56
|
refs/heads/master
| 2016-08-04T18:44:26.320823
| 2012-05-04T15:43:52
| 2012-05-04T15:43:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 868
|
worker
|
deathstar.worker
|
#!/usr/bin/env Rscript
library(rzmq)
worker.id <- paste(Sys.info()["nodename"],Sys.getpid(),sep=":")
cmd.args <- commandArgs(trailingOnly=TRUE)
print(cmd.args)
work.endpoint <- cmd.args[1]
ready.endpoint <- cmd.args[2]
log.file <- cmd.args[3]
sink(log.file)
context = init.context()
ready.socket = init.socket(context,"ZMQ_PUSH")
work.socket = init.socket(context,"ZMQ_REP")
connect.socket(ready.socket,ready.endpoint)
connect.socket(work.socket,work.endpoint)
while(1) {
## send control message to indicate worker is up
send.null.msg(ready.socket)
## wait for work
msg = receive.socket(work.socket);
index <- msg$index
fun <- msg$fun
args <- msg$args
print(system.time(result <- try(do.call(fun,args),silent=TRUE)))
send.socket(work.socket,list(index=index,result=result,node=worker.id));
print(gc(verbose=TRUE))
}
|
d724485bc43455e880be9abf50c105531f7fc03b
|
7a5fd9fb60ee6e1715e111b8dc46f60bad2ee524
|
/ui.R
|
b9616214d735f411175e831c085a98c3a877337a
|
[] |
no_license
|
filippomiramonti/developingDataProducts
|
b14a22da233420c9145c4cbfc26b56019332d855
|
778d4df246589b3d23bed0ffb83e1a46c773e450
|
refs/heads/master
| 2022-12-05T04:00:10.743697
| 2020-08-26T12:30:13
| 2020-08-26T12:30:13
| 290,468,030
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,342
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
shinyUI(fluidPage(
tags$style(HTML(".js-irs-0 .irs-single, .js-irs-0 .irs-bar-edge, .js-irs-0 .irs-bar {background: orange}")),
# Application title
titlePanel("Trend of the spread of Covid-19"),
sidebarLayout(
sidebarPanel(
h4("Select Data"),
radioButtons(inputId="choice", "What do you want to see?",
choices=c("New Cases" = "newCases", "Total Cases" = "totCases", "Total Deaths" = "totDeaths"), selected = "newCases"),
h4("Select a Date"),
sliderInput("date",
"",
min = as.Date("2020-02-24","%Y-%m-%d"),
max = Sys.Date() - 1,
value = as.Date("2020-02-24","%Y-%m-%d"),
timeFormat = "%Y-%m-%d",
animate = animationOptions(interval = 300, loop = TRUE),
)
),
mainPanel(
plotOutput("barPlot"),
h4(textOutput("text"))
)
)
))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.