content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# Chi Square test Goodness of fit
fruit <- c(32, 28, 16, 14, 10)
chisq.test(fruit) -> res
res
res$p.value
res$statistic
res$parameter
res$method
res$data.name
O <- res$observed
E <- res$expected
res$residuals
res$stdres
# manually
chi.stat <- sum((O-E)^2/E)
pchisq(chi.stat, df=4, lower.tail=F)
# Retired executives
exec <- c(122, 85, 76, 17)
p <- c(0.38, 0.32, 0.23, 0.07)
res <- chisq.test(exec, p=p)
res$expected
qchisq(0.90, df=3)
# Firearm
fire <- c(68, 27, 5)
p <- c(0.74, 0.16, 0.10)
chisq.test(fire, p=p)
| /Topics/ChiGoodnessOfFitTest.R | no_license | statisticallyfit/RStatistics | R | false | false | 516 | r | # Chi Square test Goodness of fit
fruit <- c(32, 28, 16, 14, 10)
chisq.test(fruit) -> res
res
res$p.value
res$statistic
res$parameter
res$method
res$data.name
O <- res$observed
E <- res$expected
res$residuals
res$stdres
# manually
chi.stat <- sum((O-E)^2/E)
pchisq(chi.stat, df=4, lower.tail=F)
# Retired executives
exec <- c(122, 85, 76, 17)
p <- c(0.38, 0.32, 0.23, 0.07)
res <- chisq.test(exec, p=p)
res$expected
qchisq(0.90, df=3)
# Firearm
fire <- c(68, 27, 5)
p <- c(0.74, 0.16, 0.10)
chisq.test(fire, p=p)
|
%% File Name: systime.Rd
%% File Version: 0.14
\name{systime}
\alias{systime}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
\R Utilities: Various Strings Representing System Time
}
\description{
This function generates system time strings in several formats.
}
\usage{
systime()
}
%- maybe also 'usage' for other objects documented here.
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
\value{
A vector with entries of system time (see Examples).
}
%\references{
%% ~put references to the literature/web site here ~
%}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
\examples{
#############################################################################
# EXAMPLE 1: Output of systime
#############################################################################
systime()
##
## > miceadds::systime()
## [1] "2016-02-29 10:25:44"
## [2] "2016-02-29"
## [3] "20160229"
## [4] "2016-02-29_1025"
## [5] "2016-02-29_1000"
## [6] "20160229_102544"
## [7] "20160229102544"
## [8] "IPNERZW-C014_20160229102544"
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{R utilities}
%%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/systime.Rd | no_license | stefvanbuuren/miceadds | R | false | false | 1,383 | rd | %% File Name: systime.Rd
%% File Version: 0.14
\name{systime}
\alias{systime}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
\R Utilities: Various Strings Representing System Time
}
\description{
This function generates system time strings in several formats.
}
\usage{
systime()
}
%- maybe also 'usage' for other objects documented here.
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
\value{
A vector with entries of system time (see Examples).
}
%\references{
%% ~put references to the literature/web site here ~
%}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
\examples{
#############################################################################
# EXAMPLE 1: Output of systime
#############################################################################
systime()
##
## > miceadds::systime()
## [1] "2016-02-29 10:25:44"
## [2] "2016-02-29"
## [3] "20160229"
## [4] "2016-02-29_1025"
## [5] "2016-02-29_1000"
## [6] "20160229_102544"
## [7] "20160229102544"
## [8] "IPNERZW-C014_20160229102544"
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{R utilities}
%%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
libs <- c("dplyr", "tidyr",
"ggplot2", "ggpubr",
"ncdf4", "raster", "rgdal",
"lubridate",
"rgeos", "smoothr", "sf",
"reshape",
"tidyverse")
install.libraries <- function(lib=NULL){
new <- lib[!(lib %in% installed.packages()[, "Package"])]
if (length(new)){
install.packages(new, dependencies = TRUE)
}
}
load.libraries <- function(lib=NULL){
sapply(libs, require, character.only = TRUE)
}
install.libraries(libs)
load.libraries(libs)
#### Bring in site data
lameroo <- read.csv("W:/Pastures/Gridded_seasonal_break/Check_code_selected_sites/Lameroo_seasonal_break_yrs.csv")
lameroo_look_up <- gather(lameroo,
year, day_of_year,
Year_1971:Year_2018)
#change year clm to number and remove the Year_ prefix
lameroo_look_up <- separate(lameroo_look_up,
year,
into = c("junk", "year_numb"),
sep = "Year_")
head(lameroo_look_up)
lameroo_look_up <- dplyr::select(lameroo_look_up, year_numb, day_of_year)
#This the table that will need to be populated with rainfall add clm that looks up what was the rainfall that triggered this
##################################################################################################################
################### Start here ############################################################################################
file_save <- ("W:/Pastures/Gridded_seasonal_break") #jackie
#setwd("T:/Pastures/Gridded_seasonal_break") #bonny
setwd("I:/work/silo") #the folder now has curley bracket which is means something in R so the is a work around
getwd()
#------------------------------------------------------------------------------------------------------------------
#bring in my spatial data
#set the area for running the analysis
#site_import <- st_read("W:/Pastures/Gridded_seasonal_break/Boundary_for_analysis/Lamaroo_rectangle.shp")
site_import <- st_read("W:/Pastures/Gridded_seasonal_break/Boundary_for_analysis/GRDC_AgroEcological_zones_boundaries_06_region_jax.shp")
site_sf <- as(site_import, "Spatial") #convert to a sp object
site_name <- "Aust"
site <- site_sf
plot(site)
#------------------------------------------------------------------------------------------------------------
##1. define the boundary with and use a single layer raster
daily_rain_1 <- brick(
paste("daily_rain/",
"2000", ".daily_rain.nc", sep = ""),varname = "daily_rain")
#crop to a fix area
daily_rain_crop <- crop(daily_rain_1, site)
daily_rain_crop
site_bound_raster <- daily_rain_crop$ X2000.01.01
plot(site_bound_raster)
site_bound_raster
##2. extract points from the raster as a point shapefile
site_bound_pts <- rasterToPoints(site_bound_raster)
names(site_bound_pts) <- c("longitude", "latitude", "value")
site_bound_pts_df <- as.data.frame(site_bound_pts)
site_bound_pts_df <- dplyr::select(site_bound_pts_df, x, y)
site_bound_pts_df_point <- SpatialPointsDataFrame(site_bound_pts_df[,c("x", "y")], site_bound_pts_df)
head(site_bound_pts_df_point)
#-----------------
site_sf <- as(site_import, "Spatial") #convert to a sp object
year_input <- 1975
site_name <- "Aust"
site <- site_sf
plot(site)
rolling_avearge_days = 5
daily_rain <- brick(
paste("daily_rain/",
year_input, ".daily_rain.nc", sep = ""),varname = "daily_rain")
#crop to a fix area
daily_rain_crop <- crop(daily_rain, site)
#only use a few days
daily_rain_crop_subset_day <- subset(daily_rain_crop, 61:213) #pull out the 1stMarch to 31th July leap year
#Add the moving window avearge of 7 days ? should this be sum?
seasonal_break_rainfall_MovMean7 <- calc(daily_rain_crop_subset_day, function(x) movingFun(x, rolling_avearge_days, sum, "to"))
#seasonal_break_rainfall_MovMean7 <- calc(daily_rain_crop_subset_day, function(x) movingFun(x, 1, sum, "to"))
seasonal_break_rainfall_MovMean7
#---------------------------------------------------------------------------------------------------------
Rain <- seasonal_break_rainfall_MovMean7
Rain_extract <- raster::extract(Rain,
site_bound_pts_df_point, method="simple")
Rain_extract_wide <- data.frame(site_bound_pts_df_point$x,
site_bound_pts_df_point$y,
Rain_extract)
##### assign names for all the layers this will days
names(Rain_extract_wide) <- c("POINT_X", "POINT_Y",
"61", "62", "63", "64", "65", "66","67","68","69","70",
"71", "72", "73", "74", "75", "76","77","78","79","80",
"81", "82", "83", "84", "85", "86","87","88","89","90",
"91", "92", "93", "94", "95", "96","97","98","99","100",
"101", "102", "103", "104", "105", "106","107","108","109","110",
"111", "112", "113", "114", "115", "116","117","118","119","120",
"121", "122", "123", "124", "125", "126","127","128","129","130",
"131", "132", "133", "134", "135", "136","137","138","139","140",
"141", "142", "143", "144", "145", "146","147","148","149","150",
"151", "152", "153", "154", "155", "156","157","158","159","160",
"161", "162", "163", "164", "165", "166","167","168","169","170",
"171", "172", "173", "174", "175", "176","177","178","179","180",
"181", "182","183", "184", "185", "186", "187", "188" , "189",
"190", "191", "192", "193", "194", "195", "196", "197", "198",
"199", "200", "201", "202", "203", "204", "205", "206", "207",
"208", "209", "210", "211", "212", "213")
#Remove the clm that have no data for Rain_evap and add the coords
#str(Rain_extract_wide)
#tail(Rain_extract_wide)
Rain_extract_wide <- dplyr::select(Rain_extract_wide, -"61", -"62", -"63", -"64", -"65", -"66" )
Rain_extract_wide_x_y <- dplyr::select(Rain_extract_wide, "POINT_X", "POINT_Y")
Rain_extract_wide_values <- dplyr::select(Rain_extract_wide,"67":"213")
#make a df with cood and values also add a clm that has a unquie id for grid cell
Rain_extract_df <- cbind(Rain_extract_wide_x_y, Rain_extract_wide_values)
Rain_extract_df <- mutate(Rain_extract_df, x_y = paste0(POINT_X, "_", POINT_Y))
Rain_extract_df
Rain_extract_df_narrow <- gather(Rain_extract_df,
key = "day", value = "Rain", `67`:`213` )
head(Rain_extract_df_narrow) # this is only for one year and one site
#for the day clm I want to look up the rain value
head(Rain_extract_df_narrow) #1972
# Rename the clm called Rain to match the year eg Rain_Yr
Rain_extract_df_narrow <- rename(Rain_extract_df_narrow, c("Rain"= paste0("Rain_", year_input)))
head(Rain_extract_df_narrow ) #this is all of Aust rainfall for ach day but just one year
### Lets try and build this up for a few years
#Rain_extract_df_narrow_1972_1975 <- Rain_extract_df_narrow
Rain_extract_df_narrow_1972_1975 <- left_join(Rain_extract_df_narrow_1972_1975, Rain_extract_df_narrow)
head(Rain_extract_df_narrow_1972_1975 )
#subset my data for x_y = 114_-27.15
subset_Rain_extract_df_narrow_1972_1975 <- filter(Rain_extract_df_narrow_1972_1975, x_y == "146.1_-30.7")
head(subset_Rain_extract_df_narrow_1972_1975)
# I want a list of ID numbers for the sites I am interested in
### Bring in the data that I want to look up.
seasonal_break_output <-read.csv("W:/Pastures/Gridded_seasonal_break/Check_code_selected_sites/GRDC_zone_seasonal_break_yrs_v3_join_study_sites.csv")
subset_seasonal_break_output <- filter(seasonal_break_output, x_y == "146.1_-30.7")
head(subset_seasonal_break_output)
#make this look better....narrow dataset
subset_seasonal_break_output <- gather(subset_seasonal_break_output,
key = "Year", value = "day", 'Year_1971':'Year_2018' )
head(subset_seasonal_break_output)
subset_seasonal_break_output <- separate(subset_seasonal_break_output,Year, c("junk", "year"), "_" )
subset_seasonal_break_output <- dplyr::select(subset_seasonal_break_output, year, day)
head(subset_seasonal_break_output) #df
head(subset_Rain_extract_df_narrow_1972_1975) #lookup
rain_long <- gather(subset_Rain_extract_df_narrow_1972_1975,
key = "year", value = "rain", "Rain_1972": "Rain_1975" ) # this is the range of years
head(rain_long)
#strip the rain out of the name
rain_long <- separate(rain_long,year, c("junk", "year"), "_" )
rain_long$year <- as.integer(rain_long$year)
subset_seasonal_break_output$year <- as.integer(subset_seasonal_break_output$year)
day_break_rain <- subset_seasonal_break_output %>%
left_join(rain_long, by = c("day","year"))
head(day_break_rain)
str(subset_seasonal_break_output)
str(subset_Rain_extract_df_narrow_1972_1975)
subset_Rain_extract_df_narrow_1972_1975$day <- as.numeric(subset_Rain_extract_df_narrow_1972_1975$day )
#I want to add a clm to subset_seasonal_break_output
#Can I join it???
test <- left_join(subset_seasonal_break_output, subset_Rain_extract_df_narrow_1972_1975)
head(test)
test <- mutate(test, look_up_clm = paste0("Rain_", year))
filter(subset_seasonal_break_output, day %in% rain_in_year_I_want) #this pulls out value of 186
test2 <- 1973
dplyr::select(test, paste0("Rain_", test2))
test3 <- mutate(test, report_rain = (dplyr::select(test, paste0("Rain_", test2))))
test3 <- mutate(test, report_rain = Rain_1973)
head(test3)
# test3 <- test %>% rowwise() %>% mutate(new_clm = min(Rain_1972,Rain_1975))
# head(test3)
head(test)
test %>%
mutate(new_clm = ifelse(day == "1971", "XXXX", ColB))
#for lamaroo this might be 93953 (i can't remember how I did this???)
year_I_want <- filter(lameroo_look_up, year_numb == "1972")
year_I_want <- year_I_want[1,2]
year_I_want
rain_in_year_I_want <- filter(Rain_extract_df_narrow, day == year_I_want)
rain_in_year_I_want
filter(Rain_extract_df_narrow, day %in% rain_in_year_I_want) #this pulls out value of 186
| /Trigger_Rain_event_April.R | no_license | JackieOuzman/Seasonal_break | R | false | false | 10,337 | r |
libs <- c("dplyr", "tidyr",
"ggplot2", "ggpubr",
"ncdf4", "raster", "rgdal",
"lubridate",
"rgeos", "smoothr", "sf",
"reshape",
"tidyverse")
install.libraries <- function(lib=NULL){
new <- lib[!(lib %in% installed.packages()[, "Package"])]
if (length(new)){
install.packages(new, dependencies = TRUE)
}
}
load.libraries <- function(lib=NULL){
sapply(libs, require, character.only = TRUE)
}
install.libraries(libs)
load.libraries(libs)
#### Bring in site data
lameroo <- read.csv("W:/Pastures/Gridded_seasonal_break/Check_code_selected_sites/Lameroo_seasonal_break_yrs.csv")
lameroo_look_up <- gather(lameroo,
year, day_of_year,
Year_1971:Year_2018)
#change year clm to number and remove the Year_ prefix
lameroo_look_up <- separate(lameroo_look_up,
year,
into = c("junk", "year_numb"),
sep = "Year_")
head(lameroo_look_up)
lameroo_look_up <- dplyr::select(lameroo_look_up, year_numb, day_of_year)
#This the table that will need to be populated with rainfall add clm that looks up what was the rainfall that triggered this
##################################################################################################################
################### Start here ############################################################################################
file_save <- ("W:/Pastures/Gridded_seasonal_break") #jackie
#setwd("T:/Pastures/Gridded_seasonal_break") #bonny
setwd("I:/work/silo") #the folder now has curley bracket which is means something in R so the is a work around
getwd()
#------------------------------------------------------------------------------------------------------------------
#bring in my spatial data
#set the area for running the analysis
#site_import <- st_read("W:/Pastures/Gridded_seasonal_break/Boundary_for_analysis/Lamaroo_rectangle.shp")
site_import <- st_read("W:/Pastures/Gridded_seasonal_break/Boundary_for_analysis/GRDC_AgroEcological_zones_boundaries_06_region_jax.shp")
site_sf <- as(site_import, "Spatial") #convert to a sp object
site_name <- "Aust"
site <- site_sf
plot(site)
#------------------------------------------------------------------------------------------------------------
##1. define the boundary with and use a single layer raster
daily_rain_1 <- brick(
paste("daily_rain/",
"2000", ".daily_rain.nc", sep = ""),varname = "daily_rain")
#crop to a fix area
daily_rain_crop <- crop(daily_rain_1, site)
daily_rain_crop
site_bound_raster <- daily_rain_crop$ X2000.01.01
plot(site_bound_raster)
site_bound_raster
##2. extract points from the raster as a point shapefile
site_bound_pts <- rasterToPoints(site_bound_raster)
names(site_bound_pts) <- c("longitude", "latitude", "value")
site_bound_pts_df <- as.data.frame(site_bound_pts)
site_bound_pts_df <- dplyr::select(site_bound_pts_df, x, y)
site_bound_pts_df_point <- SpatialPointsDataFrame(site_bound_pts_df[,c("x", "y")], site_bound_pts_df)
head(site_bound_pts_df_point)
#-----------------
site_sf <- as(site_import, "Spatial") #convert to a sp object
year_input <- 1975
site_name <- "Aust"
site <- site_sf
plot(site)
rolling_avearge_days = 5
daily_rain <- brick(
paste("daily_rain/",
year_input, ".daily_rain.nc", sep = ""),varname = "daily_rain")
#crop to a fix area
daily_rain_crop <- crop(daily_rain, site)
#only use a few days
daily_rain_crop_subset_day <- subset(daily_rain_crop, 61:213) #pull out the 1stMarch to 31th July leap year
#Add the moving window avearge of 7 days ? should this be sum?
seasonal_break_rainfall_MovMean7 <- calc(daily_rain_crop_subset_day, function(x) movingFun(x, rolling_avearge_days, sum, "to"))
#seasonal_break_rainfall_MovMean7 <- calc(daily_rain_crop_subset_day, function(x) movingFun(x, 1, sum, "to"))
seasonal_break_rainfall_MovMean7
#---------------------------------------------------------------------------------------------------------
Rain <- seasonal_break_rainfall_MovMean7
Rain_extract <- raster::extract(Rain,
site_bound_pts_df_point, method="simple")
Rain_extract_wide <- data.frame(site_bound_pts_df_point$x,
site_bound_pts_df_point$y,
Rain_extract)
##### assign names for all the layers this will days
names(Rain_extract_wide) <- c("POINT_X", "POINT_Y",
"61", "62", "63", "64", "65", "66","67","68","69","70",
"71", "72", "73", "74", "75", "76","77","78","79","80",
"81", "82", "83", "84", "85", "86","87","88","89","90",
"91", "92", "93", "94", "95", "96","97","98","99","100",
"101", "102", "103", "104", "105", "106","107","108","109","110",
"111", "112", "113", "114", "115", "116","117","118","119","120",
"121", "122", "123", "124", "125", "126","127","128","129","130",
"131", "132", "133", "134", "135", "136","137","138","139","140",
"141", "142", "143", "144", "145", "146","147","148","149","150",
"151", "152", "153", "154", "155", "156","157","158","159","160",
"161", "162", "163", "164", "165", "166","167","168","169","170",
"171", "172", "173", "174", "175", "176","177","178","179","180",
"181", "182","183", "184", "185", "186", "187", "188" , "189",
"190", "191", "192", "193", "194", "195", "196", "197", "198",
"199", "200", "201", "202", "203", "204", "205", "206", "207",
"208", "209", "210", "211", "212", "213")
#Remove the clm that have no data for Rain_evap and add the coords
#str(Rain_extract_wide)
#tail(Rain_extract_wide)
Rain_extract_wide <- dplyr::select(Rain_extract_wide, -"61", -"62", -"63", -"64", -"65", -"66" )
Rain_extract_wide_x_y <- dplyr::select(Rain_extract_wide, "POINT_X", "POINT_Y")
Rain_extract_wide_values <- dplyr::select(Rain_extract_wide,"67":"213")
#make a df with cood and values also add a clm that has a unquie id for grid cell
Rain_extract_df <- cbind(Rain_extract_wide_x_y, Rain_extract_wide_values)
Rain_extract_df <- mutate(Rain_extract_df, x_y = paste0(POINT_X, "_", POINT_Y))
Rain_extract_df
Rain_extract_df_narrow <- gather(Rain_extract_df,
key = "day", value = "Rain", `67`:`213` )
head(Rain_extract_df_narrow) # this is only for one year and one site
#for the day clm I want to look up the rain value
head(Rain_extract_df_narrow) #1972
# Rename the clm called Rain to match the year eg Rain_Yr
Rain_extract_df_narrow <- rename(Rain_extract_df_narrow, c("Rain"= paste0("Rain_", year_input)))
head(Rain_extract_df_narrow ) #this is all of Aust rainfall for ach day but just one year
### Lets try and build this up for a few years
#Rain_extract_df_narrow_1972_1975 <- Rain_extract_df_narrow
Rain_extract_df_narrow_1972_1975 <- left_join(Rain_extract_df_narrow_1972_1975, Rain_extract_df_narrow)
head(Rain_extract_df_narrow_1972_1975 )
#subset my data for x_y = 114_-27.15
subset_Rain_extract_df_narrow_1972_1975 <- filter(Rain_extract_df_narrow_1972_1975, x_y == "146.1_-30.7")
head(subset_Rain_extract_df_narrow_1972_1975)
# I want a list of ID numbers for the sites I am interested in
### Bring in the data that I want to look up.
seasonal_break_output <-read.csv("W:/Pastures/Gridded_seasonal_break/Check_code_selected_sites/GRDC_zone_seasonal_break_yrs_v3_join_study_sites.csv")
subset_seasonal_break_output <- filter(seasonal_break_output, x_y == "146.1_-30.7")
head(subset_seasonal_break_output)
#make this look better....narrow dataset
subset_seasonal_break_output <- gather(subset_seasonal_break_output,
key = "Year", value = "day", 'Year_1971':'Year_2018' )
head(subset_seasonal_break_output)
subset_seasonal_break_output <- separate(subset_seasonal_break_output,Year, c("junk", "year"), "_" )
subset_seasonal_break_output <- dplyr::select(subset_seasonal_break_output, year, day)
head(subset_seasonal_break_output) #df
head(subset_Rain_extract_df_narrow_1972_1975) #lookup
rain_long <- gather(subset_Rain_extract_df_narrow_1972_1975,
key = "year", value = "rain", "Rain_1972": "Rain_1975" ) # this is the range of years
head(rain_long)
#strip the rain out of the name
rain_long <- separate(rain_long,year, c("junk", "year"), "_" )
rain_long$year <- as.integer(rain_long$year)
subset_seasonal_break_output$year <- as.integer(subset_seasonal_break_output$year)
day_break_rain <- subset_seasonal_break_output %>%
left_join(rain_long, by = c("day","year"))
head(day_break_rain)
str(subset_seasonal_break_output)
str(subset_Rain_extract_df_narrow_1972_1975)
subset_Rain_extract_df_narrow_1972_1975$day <- as.numeric(subset_Rain_extract_df_narrow_1972_1975$day )
#I want to add a clm to subset_seasonal_break_output
#Can I join it???
test <- left_join(subset_seasonal_break_output, subset_Rain_extract_df_narrow_1972_1975)
head(test)
test <- mutate(test, look_up_clm = paste0("Rain_", year))
filter(subset_seasonal_break_output, day %in% rain_in_year_I_want) #this pulls out value of 186
test2 <- 1973
dplyr::select(test, paste0("Rain_", test2))
test3 <- mutate(test, report_rain = (dplyr::select(test, paste0("Rain_", test2))))
test3 <- mutate(test, report_rain = Rain_1973)
head(test3)
# test3 <- test %>% rowwise() %>% mutate(new_clm = min(Rain_1972,Rain_1975))
# head(test3)
head(test)
test %>%
mutate(new_clm = ifelse(day == "1971", "XXXX", ColB))
#for lamaroo this might be 93953 (i can't remember how I did this???)
year_I_want <- filter(lameroo_look_up, year_numb == "1972")
year_I_want <- year_I_want[1,2]
year_I_want
rain_in_year_I_want <- filter(Rain_extract_df_narrow, day == year_I_want)
rain_in_year_I_want
filter(Rain_extract_df_narrow, day %in% rain_in_year_I_want) #this pulls out value of 186
|
# https://machinelearningmastery.com/machine-learning-in-r-step-by-step/
install.packages("caret")
install.packages("caret", dependencies=c("Depends", "Suggests"))
library(caret)
# attach the iris dataset to the environment
data(iris)
# rename the dataset
dataset <- iris
head(dataset)
# create a list of 80% of the rows in the original dataset we can use for training
validation_index <- createDataPartition(dataset$Species, p=0.80, list=FALSE)
# select 20% of the data for validation
validation <- dataset[-validation_index,]
# use the remaining 80% of data to training and testing the models
dataset <- dataset[validation_index,]
# dimensions of dataset
dim(dataset)
# list types for each attribute
sapply(dataset, class)
# take a peek at the first 5 rows of the data
head(dataset)
# list the levels for the class
levels(dataset$Species)
# summarize the class distribution
help("prop.table")
percentage <- prop.table(table(dataset$Species)) * 100
cbind(freq=table(dataset$Species), percentage=percentage)
# summarize attribute distributions
summary(dataset)
# plots
# split input and output
x <- dataset[,1:4]
y <- dataset[,5]
# boxplot for each attribute on one image
par(mfrow=c(1,4))
for(i in 1:4) {
boxplot(x[,i], main=names(iris)[i])
}
par(mfrow=c(1,1))
# barplot for class breakdown
plot(y)
# Multivariate Plots
# scatterplot matrix
featurePlot(x=x, y=y, plot="ellipse")
# box and whisker plots for each attribute
featurePlot(x=x, y=y, plot="box")
# density plots for each attribute by class value
scales <- list(x=list(relation="free"), y=list(relation="free"))
featurePlot(x=x, y=y, plot="density", scales=scales)
# Run algorithms using 10-fold cross validation
control <- trainControl(method="cv", number=10)
metric <- "Accuracy"
#Build Models
# a) linear algorithms
set.seed(7)
fit.lda <- train(Species~., data=dataset, method="lda", metric=metric, trControl=control)
# b) nonlinear algorithms
# CART
set.seed(7)
fit.cart <- train(Species~., data=dataset, method="rpart", metric=metric, trControl=control)
# kNN
set.seed(7)
fit.knn <- train(Species~., data=dataset, method="knn", metric=metric, trControl=control)
# c) advanced algorithms
# SVM
set.seed(7)
fit.svm <- train(Species~., data=dataset, method="svmRadial", metric=metric, trControl=control)
# Random Forest
set.seed(7)
fit.rf <- train(Species~., data=dataset, method="rf", metric=metric, trControl=control)
# summarize accuracy of models
results <- resamples(list(lda=fit.lda, cart=fit.cart, knn=fit.knn, svm=fit.svm, rf=fit.rf))
summary(results)
# compare accuracy of models
dotplot(results)
# summarize Best Model
print(fit.lda)
# estimate skill of LDA on the validation dataset
predictions <- predict(fit.lda, validation)
confusionMatrix(predictions, validation$Species)
# Read: What is Kappa: https://www.r-bloggers.com/k-is-for-cohens-kappa/
| /multiple_model_evaluation in-class exercise.R | no_license | KehanWang/DataAnalyticsSpring2020 | R | false | false | 2,917 | r | # https://machinelearningmastery.com/machine-learning-in-r-step-by-step/
install.packages("caret")
install.packages("caret", dependencies=c("Depends", "Suggests"))
library(caret)
# attach the iris dataset to the environment
data(iris)
# rename the dataset
dataset <- iris
head(dataset)
# create a list of 80% of the rows in the original dataset we can use for training
validation_index <- createDataPartition(dataset$Species, p=0.80, list=FALSE)
# select 20% of the data for validation
validation <- dataset[-validation_index,]
# use the remaining 80% of data to training and testing the models
dataset <- dataset[validation_index,]
# dimensions of dataset
dim(dataset)
# list types for each attribute
sapply(dataset, class)
# take a peek at the first 5 rows of the data
head(dataset)
# list the levels for the class
levels(dataset$Species)
# summarize the class distribution
help("prop.table")
percentage <- prop.table(table(dataset$Species)) * 100
cbind(freq=table(dataset$Species), percentage=percentage)
# summarize attribute distributions
summary(dataset)
# plots
# split input and output
x <- dataset[,1:4]
y <- dataset[,5]
# boxplot for each attribute on one image
par(mfrow=c(1,4))
for(i in 1:4) {
boxplot(x[,i], main=names(iris)[i])
}
par(mfrow=c(1,1))
# barplot for class breakdown
plot(y)
# Multivariate Plots
# scatterplot matrix
featurePlot(x=x, y=y, plot="ellipse")
# box and whisker plots for each attribute
featurePlot(x=x, y=y, plot="box")
# density plots for each attribute by class value
scales <- list(x=list(relation="free"), y=list(relation="free"))
featurePlot(x=x, y=y, plot="density", scales=scales)
# Run algorithms using 10-fold cross validation
control <- trainControl(method="cv", number=10)
metric <- "Accuracy"
#Build Models
# a) linear algorithms
set.seed(7)
fit.lda <- train(Species~., data=dataset, method="lda", metric=metric, trControl=control)
# b) nonlinear algorithms
# CART
set.seed(7)
fit.cart <- train(Species~., data=dataset, method="rpart", metric=metric, trControl=control)
# kNN
set.seed(7)
fit.knn <- train(Species~., data=dataset, method="knn", metric=metric, trControl=control)
# c) advanced algorithms
# SVM
set.seed(7)
fit.svm <- train(Species~., data=dataset, method="svmRadial", metric=metric, trControl=control)
# Random Forest
set.seed(7)
fit.rf <- train(Species~., data=dataset, method="rf", metric=metric, trControl=control)
# summarize accuracy of models
results <- resamples(list(lda=fit.lda, cart=fit.cart, knn=fit.knn, svm=fit.svm, rf=fit.rf))
summary(results)
# compare accuracy of models
dotplot(results)
# summarize Best Model
print(fit.lda)
# estimate skill of LDA on the validation dataset
predictions <- predict(fit.lda, validation)
confusionMatrix(predictions, validation$Species)
# Read: What is Kappa: https://www.r-bloggers.com/k-is-for-cohens-kappa/
|
shinyServer(function(input, output, session) {
source("code/fineParticles.R", local=TRUE)
source("code/ozone.R", local=TRUE)
}) # end
| /server.R | no_license | pssguy/bcGov | R | false | false | 158 | r |
shinyServer(function(input, output, session) {
source("code/fineParticles.R", local=TRUE)
source("code/ozone.R", local=TRUE)
}) # end
|
library(matlib)
### Name: pointOnLine
### Title: Position of a point along a line
### Aliases: pointOnLine
### ** Examples
x1 <- c(0, 0)
x2 <- c(1, 4)
pointOnLine(x1, x2, 0.5)
pointOnLine(x1, x2, 0.5, absolute=FALSE)
pointOnLine(x1, x2, 1.1)
y1 <- c(1, 2, 3)
y2 <- c(3, 2, 1)
pointOnLine(y1, y2, 0.5)
pointOnLine(y1, y2, 0.5, absolute=FALSE)
| /data/genthat_extracted_code/matlib/examples/pointOnLine.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 350 | r | library(matlib)
### Name: pointOnLine
### Title: Position of a point along a line
### Aliases: pointOnLine
### ** Examples
x1 <- c(0, 0)
x2 <- c(1, 4)
pointOnLine(x1, x2, 0.5)
pointOnLine(x1, x2, 0.5, absolute=FALSE)
pointOnLine(x1, x2, 1.1)
y1 <- c(1, 2, 3)
y2 <- c(3, 2, 1)
pointOnLine(y1, y2, 0.5)
pointOnLine(y1, y2, 0.5, absolute=FALSE)
|
cacheSolve <- function(x, ...) {
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
} | /cacheSolve.R | no_license | lemabe/Data-science2016 | R | false | false | 211 | r | cacheSolve <- function(x, ...) {
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
} |
本人在SpringMvc配置thymeleaf时,遇到html中输入th: 没有自动提示的现象,苦寻无果,后查找官网信息,要加载插件才行。
打开eclipse的插件安装,Help—>Installations new SoftWare—>add
插件地址为: http://www.thymeleaf.org/eclipse-plugin-update-site/
一路next,最后重启Eclipse即可。
插件官方文档说明https://github.com/thymeleaf/thymeleaf-extras-eclipse-plugin<!-- Baidu Button BEGIN --> <!-- Baidu Button END --><!--172.16.140.12--><!-- Baidu Button BEGIN --> <!-- Baidu Button END -->
添加DTD 类型约束文件,下载地址:http://www.thymeleaf.org/xsd/thymeleaf-extras-dialect-2.1.xsd
Window->Preferences->XML->XML Catalog->User Specified Entries窗口中,选择Add 按纽,选择上面下载的文件
最后:右键项目 >> Thymeleaf >> Add Thymeleaf Nature.
本项目为百度搜索引擎类似demo,输入数字模糊查询数据库,返回数组显示在列表中。 | /sts/demo/readme.rd | permissive | cheliangmin/myProject | R | false | false | 997 | rd | 本人在SpringMvc配置thymeleaf时,遇到html中输入th: 没有自动提示的现象,苦寻无果,后查找官网信息,要加载插件才行。
打开eclipse的插件安装,Help—>Installations new SoftWare—>add
插件地址为: http://www.thymeleaf.org/eclipse-plugin-update-site/
一路next,最后重启Eclipse即可。
插件官方文档说明https://github.com/thymeleaf/thymeleaf-extras-eclipse-plugin<!-- Baidu Button BEGIN --> <!-- Baidu Button END --><!--172.16.140.12--><!-- Baidu Button BEGIN --> <!-- Baidu Button END -->
添加DTD 类型约束文件,下载地址:http://www.thymeleaf.org/xsd/thymeleaf-extras-dialect-2.1.xsd
Window->Preferences->XML->XML Catalog->User Specified Entries窗口中,选择Add 按纽,选择上面下载的文件
最后:右键项目 >> Thymeleaf >> Add Thymeleaf Nature.
本项目为百度搜索引擎类似demo,输入数字模糊查询数据库,返回数组显示在列表中。 |
#' Enable bookmarking mode, using values from the URL, if present.
#'
#' @import shiny
#' @export
start_app <- function() {
enableBookmarking("url")
shinyApp(ui = ui, server = server)
}
.onAttach <- function(libname, pkgname) {
packageStartupMessage("Run covidshiny::start_app() to launch the app")
}
| /R/global.R | permissive | karthik/CovidShinyModel | R | false | false | 308 | r | #' Enable bookmarking mode, using values from the URL, if present.
#'
#' @import shiny
#' @export
start_app <- function() {
enableBookmarking("url")
shinyApp(ui = ui, server = server)
}
.onAttach <- function(libname, pkgname) {
packageStartupMessage("Run covidshiny::start_app() to launch the app")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fusiontables_objects.R
\name{Column}
\alias{Column}
\title{Column Object}
\usage{
Column(Column.baseColumn = NULL, baseColumn = NULL, columnId = NULL,
columnJsonSchema = NULL, columnPropertiesJson = NULL,
description = NULL, formatPattern = NULL, graphPredicate = NULL,
name = NULL, type = NULL, validValues = NULL, validateData = NULL)
}
\arguments{
\item{Column.baseColumn}{The \link{Column.baseColumn} object or list of objects}
\item{baseColumn}{Identifier of the base column}
\item{columnId}{Identifier for the column}
\item{columnJsonSchema}{JSON schema for interpreting JSON in this column}
\item{columnPropertiesJson}{JSON object containing custom column properties}
\item{description}{Column description}
\item{formatPattern}{Format pattern}
\item{graphPredicate}{Column graph predicate}
\item{name}{Name of the column}
\item{type}{Type of the column}
\item{validValues}{List of valid values used to validate data and supply a drop-down list of values in the web application}
\item{validateData}{If true, data entered via the web application is validated}
}
\value{
Column object
}
\description{
Column Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Specifies the details of a column in a table.
}
\seealso{
Other Column functions: \code{\link{Column.baseColumn}},
\code{\link{column.insert}}, \code{\link{column.patch}},
\code{\link{column.update}}
}
| /googlefusiontablesv2.auto/man/Column.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 1,501 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fusiontables_objects.R
\name{Column}
\alias{Column}
\title{Column Object}
\usage{
Column(Column.baseColumn = NULL, baseColumn = NULL, columnId = NULL,
columnJsonSchema = NULL, columnPropertiesJson = NULL,
description = NULL, formatPattern = NULL, graphPredicate = NULL,
name = NULL, type = NULL, validValues = NULL, validateData = NULL)
}
\arguments{
\item{Column.baseColumn}{The \link{Column.baseColumn} object or list of objects}
\item{baseColumn}{Identifier of the base column}
\item{columnId}{Identifier for the column}
\item{columnJsonSchema}{JSON schema for interpreting JSON in this column}
\item{columnPropertiesJson}{JSON object containing custom column properties}
\item{description}{Column description}
\item{formatPattern}{Format pattern}
\item{graphPredicate}{Column graph predicate}
\item{name}{Name of the column}
\item{type}{Type of the column}
\item{validValues}{List of valid values used to validate data and supply a drop-down list of values in the web application}
\item{validateData}{If true, data entered via the web application is validated}
}
\value{
Column object
}
\description{
Column Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Specifies the details of a column in a table.
}
\seealso{
Other Column functions: \code{\link{Column.baseColumn}},
\code{\link{column.insert}}, \code{\link{column.patch}},
\code{\link{column.update}}
}
|
#' International Trade Network Data
#'
#' Contains international trade data; value of exports from one country to another in a given year.
#'
#' @format A data frame with 114980 rows and 5 variables:
#' \describe{
#' \item{ .row }{ integer: row number }
#' \item{ country1 }{ character: country name of exporter }
#' \item{ country2 }{ character: country name of importer }
#' \item{ year }{ integer: year }
#' \item{ exports }{ numeric: total value of exports (in tens of millions of dollars) }
#' }
#'
#'
#' @details
#' See \emph{QSS} Table 5.7.
#'
#'
#' @references
#' \itemize{
#' \item{ Imai, Kosuke. 2017. \emph{Quantitative Social Science: An Introduction}.
#' Princeton University Press. \href{http://press.princeton.edu/titles/11025.html}{URL}. }
#' \item { Luca De Benedictis and Lucia Tajoli. (2011). 'The World Trade Network.'
#' \emph{The World Economy}, 34:8, pp.1417-1454. doi = 10.1111/j.1467-9701.2011.01360.x }
#'}
"trade"
| /R/trade.R | no_license | Musaab-Farooqui/qss-package | R | false | false | 946 | r | #' International Trade Network Data
#'
#' Contains international trade data; value of exports from one country to another in a given year.
#'
#' @format A data frame with 114980 rows and 5 variables:
#' \describe{
#' \item{ .row }{ integer: row number }
#' \item{ country1 }{ character: country name of exporter }
#' \item{ country2 }{ character: country name of importer }
#' \item{ year }{ integer: year }
#' \item{ exports }{ numeric: total value of exports (in tens of millions of dollars) }
#' }
#'
#'
#' @details
#' See \emph{QSS} Table 5.7.
#'
#'
#' @references
#' \itemize{
#' \item{ Imai, Kosuke. 2017. \emph{Quantitative Social Science: An Introduction}.
#' Princeton University Press. \href{http://press.princeton.edu/titles/11025.html}{URL}. }
#' \item { Luca De Benedictis and Lucia Tajoli. (2011). 'The World Trade Network.'
#' \emph{The World Economy}, 34:8, pp.1417-1454. doi = 10.1111/j.1467-9701.2011.01360.x }
#'}
"trade"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/app_utilities.R
\name{prepPredictors}
\alias{prepPredictors}
\title{Prepare predictors based on inputs}
\usage{
prepPredictors(preds = NULL)
}
\arguments{
\item{preds}{predictors, as input to the app}
}
\value{
prepared predictors (or 1 if no predictors)
}
\description{
Prepare predictor inputs from the app for use in the model
function
}
| /man/prepPredictors.Rd | permissive | ddalthorp/GenEst | R | false | true | 439 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/app_utilities.R
\name{prepPredictors}
\alias{prepPredictors}
\title{Prepare predictors based on inputs}
\usage{
prepPredictors(preds = NULL)
}
\arguments{
\item{preds}{predictors, as input to the app}
}
\value{
prepared predictors (or 1 if no predictors)
}
\description{
Prepare predictor inputs from the app for use in the model
function
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PersonTests.R
\name{createPersonTests}
\alias{createPersonTests}
\title{Run the person tests}
\usage{
createPersonTests()
}
\description{
Run the person tests
}
| /man/CERNER/TEST CASES/CernerTesting/man/createPersonTests.Rd | permissive | OHDSI/ETL-CDMBuilder | R | false | true | 239 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PersonTests.R
\name{createPersonTests}
\alias{createPersonTests}
\title{Run the person tests}
\usage{
createPersonTests()
}
\description{
Run the person tests
}
|
tbl_features <- function(features){
function(...){
list(as_tibble(squash(map(features, function(.fn, ...) as.list(.fn(...)), ...))))
}
}
#' Extract features from a dataset
#'
#' @param .tbl A dataset
#' @param .var,.vars The variable(s) to compute features on
#' @param features A list of functions (or lambda expressions) for the features to compute.
#' @param .predicate A predicate function (or lambda expression) to be applied to the columns or a logical vector. The variables for which .predicate is or returns TRUE are selected.
#' @param ... Additional arguments to be passed to each feature.
#'
#' @export
features <- function(.tbl, .var, features, ...){
UseMethod("features")
}
#' @export
features.tbl_ts <- function(.tbl, .var = NULL, features = list(), ...){
dots <- dots_list(...)
if(is_function(features)){
features <- list(features)
}
features <- map(squash(features), rlang::as_function)
.var <- enquo(.var)
if(quo_is_null(.var)){
inform(sprintf(
"Feature variable not specified, automatically selected `.var = %s`",
measured_vars(.tbl)[1]
))
.var <- as_quosure(syms(measured_vars(.tbl)[[1]]), env = empty_env())
}
else if(possibly(compose(is_quosures, eval_tidy), FALSE)(.var)){
abort("`features()` only supports a single variable. To compute features across multiple variables consider scoped variants like `features_at()`")
}
if(is.null(dots$.period)){
dots$.period <- get_frequencies(NULL, .tbl, .auto = "smallest")
}
as_tibble(.tbl) %>%
group_by(!!!key(.tbl), !!!dplyr::groups(.tbl)) %>%
dplyr::summarise(
.funs = tbl_features(features)(!!.var, !!!dots),
) %>%
unnest(!!sym(".funs")) %>%
dplyr::ungroup()
}
#' @rdname features
#' @export
features_at <- function(.tbl, .vars, features, ...){
UseMethod("features_at")
}
#' @export
features_at.tbl_ts <- function(.tbl, .vars = NULL, features = list(), ...){
dots <- dots_list(...)
if(is_function(features)){
features <- list(features)
}
features <- map(squash(features), rlang::as_function)
quo_vars <- enquo(.vars)
if(quo_is_null(quo_vars)){
inform(sprintf(
"Feature variable not specified, automatically selected `.vars = %s`",
measured_vars(.tbl)[1]
))
.vars <- as_quosures(syms(measured_vars(.tbl)[1]), env = empty_env())
}
else if(!possibly(compose(is_quosures, eval_tidy), FALSE)(.vars)){
.vars <- new_quosures(list(quo_vars))
}
if(is.null(dots$.period)){
dots$.period <- get_frequencies(NULL, .tbl, .auto = "smallest")
}
as_tibble(.tbl) %>%
group_by(!!!key(.tbl), !!!dplyr::groups(.tbl)) %>%
dplyr::summarise_at(
.vars = .vars,
.funs = tbl_features(features),
!!!dots
) %>%
unnest(!!!.vars, .sep = "_") %>%
dplyr::ungroup()
}
#' @rdname features
#' @export
features_all <- function(.tbl, features, ...){
UseMethod("features_all")
}
#' @export
features_all.tbl_ts <- function(.tbl, features = list(), ...){
features_at(.tbl, .vars = as_quosures(syms(measured_vars(.tbl)), empty_env()),
features = features, ...)
}
#' @rdname features
#' @export
features_if <- function(.tbl, .predicate, features, ...){
UseMethod("features_if")
}
#' @export
features_if.tbl_ts <- function(.tbl, .predicate, features = list(), ...){
mv_if <- map_lgl(.tbl[measured_vars(.tbl)], rlang::as_function(.predicate))
features_at(.tbl,
.vars = as_quosures(syms(measured_vars(.tbl)[mv_if]), empty_env()),
features = features, ...)
}
#' @inherit tsfeatures::crossing_points
#' @importFrom stats median
#' @export
crossing_points <- function(x)
{
midline <- median(x, na.rm = TRUE)
ab <- x <= midline
lenx <- length(x)
p1 <- ab[1:(lenx - 1)]
p2 <- ab[2:lenx]
cross <- (p1 & !p2) | (p2 & !p1)
c(crossing_points = sum(cross, na.rm = TRUE))
}
#' @inherit tsfeatures::arch_stat
#' @importFrom stats lm embed
#' @export
arch_stat <- function(x, lags = 12, demean = TRUE)
{
if (length(x) <= 13) {
return(c(arch_lm = NA_real_))
}
if (demean) {
x <- x - mean(x, na.rm = TRUE)
}
mat <- embed(x^2, lags + 1)
fit <- try(lm(mat[, 1] ~ mat[, -1]), silent = TRUE)
if ("try-error" %in% class(fit)) {
return(c(arch_lm = NA_real_))
}
arch.lm <- summary(fit)
c(arch_lm = arch.lm$r.squared)
}
#' STL features
#'
#' Computes a variety of measures extracted from an STL decomposition of the
#' time series. This includes details about the strength of trend and seasonality.
#'
#' @param x A vector to extract features from.
#' @param .period The period of the seasonality.
#' @param s.window The seasonal window of the data (passed to [`stats::stl()`])
#' @param ... Further arguments passed to [`stats::stl()`]
#'
#' @seealso
#' [Forecasting Principle and Practices: Measuring strength of trend and seasonality](https://otexts.com/fpp3/seasonal-strength.html)
#'
#' @importFrom stats var coef
#' @export
stl_features <- function(x, .period, s.window = 13, ...){
dots <- dots_list(...)
dots <- dots[names(dots) %in% names(formals(stats::stl))]
season.args <- list2(!!(names(.period)%||%as.character(.period)) :=
list(period = .period, s.window = s.window))
dcmp <- eval_tidy(quo(estimate_stl(x, trend.args = list(),
season.args = season.args, lowpass.args = list(), !!!dots)))
trend <- dcmp[["trend"]]
remainder <- dcmp[["remainder"]]
seas_adjust <- dcmp[["seas_adjust"]]
seasonalities <- dcmp[seq_len(length(dcmp) - 3) + 1]
names(seasonalities) <- sub("season_", "", names(seasonalities))
var_e <- var(remainder, na.rm = TRUE)
n <- length(x)
# Spike
d <- (remainder - mean(remainder, na.rm = TRUE))^2
var_loo <- (var_e * (n - 1) - d)/(n - 2)
spike <- var(var_loo, na.rm = TRUE)
# Linearity & curvature
tren.coef <- coef(lm(trend ~ poly(seq(n), degree = 2L)))[2L:3L]
linearity <- tren.coef[[1L]]
curvature <- tren.coef[[2L]]
# Strength of terms
trend_strength <- max(0, min(1, 1 - var_e/var(seas_adjust, na.rm = TRUE)))
seasonal_strength <- map_dbl(seasonalities, function(seas){
max(0, min(1, 1 - var_e/var(remainder + seas, na.rm = TRUE)))
})
# Position of peaks and troughs
seasonal_peak <- map_dbl(seasonalities, function(seas){
which.max(seas) %% .period
})
seasonal_trough <- map_dbl(seasonalities, function(seas){
which.min(seas) %% .period
})
c(trend_strength = trend_strength, seasonal_strength = seasonal_strength,
spike = spike, linearity = linearity, curvature = curvature,
seasonal_peak = seasonal_peak, seasonal_trough = seasonal_trough)
}
#' Unit root tests
#'
#' Performs a test for the existence of a unit root in the vector.
#'
#' \code{unitroot_kpss} computes the statistic for the Kwiatkowski et al. unit root test with linear trend and lag 1.
#'
#' \code{unitroot_pp} computes the statistic for the `'Z-tau'' version of Phillips & Perron unit root test with constant trend and lag 1.
#'
#' @param x A vector to be tested for the unit root.
#' @inheritParams urca::ur.kpss
#' @param ... Unused.
#'
#' @seealso [urca::ur.kpss()]
#'
#' @rdname unitroot
#' @export
unitroot_kpss <- function(x, type = c("mu", "tau"), lags = c("short", "long", "nil"),
use.lag = NULL, ...) {
require_package("urca")
result <- urca::ur.kpss(x, type = type, lags = lags, use.lag = use.lag)
pval <- tryCatch(
stats::approx(result@cval[1,], as.numeric(sub("pct", "", colnames(result@cval)))/100, xout=result@teststat[1], rule=2)$y,
error = function(e){
NA
}
)
c(kpss_stat = result@teststat, kpss_pval = pval)
}
#' @inheritParams urca::ur.pp
#' @rdname unitroot
#'
#' @seealso [urca::ur.pp()]
#'
#' @export
unitroot_pp <- function(x, type = c("Z-tau", "Z-alpha"), model = c("constant", "trend"),
lags = c("short", "long"), use.lag = NULL, ...) {
require_package("urca")
result <- urca::ur.pp(x, type = type, model = model, lags = lags, use.lag = use.lag)
pval <- tryCatch(
stats::approx(result@cval[1,], as.numeric(sub("pct", "", colnames(result@cval)))/100, xout=result@teststat[1], rule=2)$y,
error = function(e){
NA
}
)
c(pp_stat = result@teststat, pp_pval = pval)
}
#' Number of differences required for a stationary series
#'
#' Use a unit root function to determine the minimum number of differences
#' necessary to obtain a stationary time series.
#'
#' @inheritParams unitroot_kpss
#' @param alpha The level of the test.
#' @param unitroot_fn A function (or lambda) that provides a p-value for a unit root test.
#' @param differences The possible differences to consider.
#' @param ... Additional arguments passed to the `unitroot_fn` function
#'
#' @export
unitroot_ndiffs <- function(x, alpha = 0.05, unitroot_fn = ~ unitroot_kpss(.)["kpss_pval"],
differences = 0:2, ...) {
unitroot_fn <- as_function(unitroot_fn)
diff <- function(x, differences, ...){
if(differences == 0) return(x)
base::diff(x, differences = differences, ...)
}
# Non-missing x
keep <- map_lgl(differences, function(.x){
dx <- diff(x, differences = .x)
!all(is.na(dx))
})
differences <- differences[keep]
# Estimate the test
keep <- map_lgl(differences[-1]-1, function(.x) {
unitroot_fn(diff(x, differences = .x), ...) < alpha
})
c(ndiffs = max(differences[c(TRUE, keep)], na.rm = TRUE))
}
#' @rdname unitroot_ndiffs
#' @param .period The period of the seasonality.
#'
#' @export
unitroot_nsdiffs <- function(x, alpha = 0.05, unitroot_fn = ~ stl_features(.,.period)[2]<0.64,
differences = 0:2, .period = 1, ...) {
if(.period == 1) return(c(nsdiffs = min(differences)))
unitroot_fn <- as_function(unitroot_fn)
environment(unitroot_fn) <- new_environment(parent = get_env(unitroot_fn))
environment(unitroot_fn)$.period <- .period
diff <- function(x, differences, ...){
if(differences == 0) return(x)
base::diff(x, differences = differences, ...)
}
# Non-missing x
keep <- map_lgl(differences, function(.x){
dx <- diff(x, lag = .period, differences = .x)
!all(is.na(dx))
})
differences <- differences[keep]
# Estimate the test
keep <- map_lgl(differences[-1]-1, function(.x) {
unitroot_fn(diff(x, lag = .period, differences = .x)) < alpha
})
c(nsdiffs = max(differences[c(TRUE, keep)], na.rm = TRUE))
}
#' Number of flat spots
#'
#' Number of flat spots in a time series
#' @param x a vector
#' @param ... Unused.
#' @return A numeric value.
#' @author Earo Wang and Rob J Hyndman
#' @export
flat_spots <- function(x) {
cutx <- try(cut(x, breaks = 10, include.lowest = TRUE, labels = FALSE),
silent = TRUE
)
if (class(cutx) == "try-error") {
return(c(flat_spots = NA))
}
rlex <- rle(cutx)
return(c(flat_spots = max(rlex$lengths)))
}
#' Hurst coefficient
#'
#' Computes the Hurst coefficient indicating the level of fractional differencing
#' of a time series.
#'
#' @param x a vector. If missing values are present, the largest
#' contiguous portion of the vector is used.
#' @param ... Unused.
#' @return A numeric value.
#' @author Rob J Hyndman
#'
#' @export
hurst <- function(x, ...) {
require_package("fracdiff")
# Hurst=d+0.5 where d is fractional difference.
return(c(hurst = suppressWarnings(fracdiff::fracdiff(na.contiguous(x), 0, 0)[["d"]] + 0.5)))
}
#' Sliding window features
#'
#' Computes feature of a time series based on sliding (overlapping) windows.
#' \code{max_level_shift} finds the largest mean shift between two consecutive windows.
#' \code{max_var_shift} finds the largest var shift between two consecutive windows.
#' \code{max_kl_shift} finds the largest shift in Kulback-Leibler divergence between
#' two consecutive windows.
#'
#' Computes the largest level shift and largest variance shift in sliding mean calculations
#' @param x a univariate time series
#' @param .size size of sliding window, if NULL `.size` will be automatically chosen using `.period`
#' @param .period The seasonal period (optional)
#' @param ... Unused.
#' @return A vector of 2 values: the size of the shift, and the time index of the shift.
#'
#' @author Earo Wang, Rob J Hyndman and Mitchell O'Hara-Wild
#'
#' @export
max_level_shift <- function(x, .size = NULL, .period = 1, ...) {
if(is.null(.size)){
.size <- ifelse(.period == 1, 10, .period)
}
rollmean <- tsibble::slide_dbl(x, mean, .size = .size, na.rm = TRUE)
means <- abs(diff(rollmean, .size))
if (length(means) == 0L) {
maxmeans <- 0
maxidx <- NA_real_
}
else if (all(is.na(means))) {
maxmeans <- NA_real_
maxidx <- NA_real_
}
else {
maxmeans <- max(means, na.rm = TRUE)
maxidx <- which.max(means) + 1L
}
return(c(level_shift_max = maxmeans, level_shift_index = maxidx))
}
#' @rdname max_level_shift
#' @export
max_var_shift <- function(x, .size = NULL, .period = 1, ...) {
if(is.null(.size)){
.size <- ifelse(.period == 1, 10, .period)
}
rollvar <- tsibble::slide_dbl(x, var, .size = .size, na.rm = TRUE)
vars <- abs(diff(rollvar, .size))
if (length(vars) == 0L) {
maxvar <- 0
maxidx <- NA_real_
}
else if (all(is.na(vars))) {
maxvar <- NA_real_
maxidx <- NA_real_
}
else {
maxvar <- max(vars, na.rm = TRUE)
maxidx <- which.max(vars) + 1L
}
return(c(var_shift_max = maxvar, var_shift_index = maxidx))
}
#' @rdname max_level_shift
#' @export
max_kl_shift <- function(x, .size = NULL, .period = 1, ...) {
if(is.null(.size)){
.size <- ifelse(.period == 1, 10, .period)
}
gw <- 100 # grid width
xgrid <- seq(min(x, na.rm = TRUE), max(x, na.rm = TRUE), length = gw)
grid <- xgrid[2L] - xgrid[1L]
tmpx <- x[!is.na(x)] # Remove NA to calculate bw
bw <- stats::bw.nrd0(tmpx)
lenx <- length(x)
if (lenx <= (2 * .size)) {
return(c(max_kl_shift = NA_real_, time_kl_shift = NA_real_))
}
densities <- map(xgrid, function(xgrid) stats::dnorm(xgrid, mean = x, sd = bw))
densities <- map(densities, pmax, stats::dnorm(38))
rmean <- map(densities, function(x)
tsibble::slide_dbl(x, mean, .size = .size, na.rm = TRUE, .align = "right")
) %>%
transpose() %>%
map(unlist)
kl <- map2_dbl(
rmean[seq_len(lenx - .size)],
rmean[seq_len(lenx - .size) + .size],
function(x, y) sum(x * (log(x) - log(y)) * grid, na.rm = TRUE)
)
diffkl <- diff(kl, na.rm = TRUE)
if (length(diffkl) == 0L) {
diffkl <- 0
maxidx <- NA_real_
}
else {
maxidx <- which.max(diffkl) + 1L
}
return(c(kl_shift_max = max(diffkl, na.rm = TRUE), kl_shift_index = maxidx))
}
#' Spectral entropy of a time series
#'
#' Computes the spectral entropy of a time series
#'
#' @inheritParams max_level_shift
#'
#' @return A numeric value.
#' @author Rob J Hyndman
#' @export
entropy <- function(x, ...) {
require_package("ForeCA")
entropy <- try(ForeCA::spectral_entropy(na.contiguous(x))[1L], silent = TRUE)
if (class(entropy) == "try-error") {
entropy <- NA
}
return(c(entropy = entropy))
}
#' Time series features based on tiled windows
#'
#' Computes feature of a time series based on tiled (non-overlapping) windows.
#' Means or variances are produced for all tiled windows. Then stability is
#' the variance of the means, while lumpiness is the variance of the variances.
#'
#' @inheritParams max_level_shift
#' @return A numeric vector of length 2 containing a measure of lumpiness and
#' a measure of stability.
#' @author Earo Wang and Rob J Hyndman
#'
#' @rdname tile_features
#'
#' @importFrom stats var
#' @export
lumpiness <- function(x, .size = NULL, .period = 1, ...) {
if(is.null(.size)){
.size <- ifelse(.period == 1, 10, .period)
}
x <- scale(x, center = TRUE, scale = TRUE)
varx <- tsibble::tile_dbl(x, var, na.rm = TRUE, .size = .size)
if (length(x) < 2 * .size) {
lumpiness <- 0
} else {
lumpiness <- var(varx, na.rm = TRUE)
}
return(c(lumpiness = lumpiness))
}
#' @rdname tile_features
#' @export
stability <- function(x, .size = NULL, .period = 1, ...) {
if(is.null(.size)){
.size <- ifelse(.period == 1, 10, .period)
}
x <- scale(x, center = TRUE, scale = TRUE)
meanx <- tsibble::tile_dbl(x, mean, na.rm = TRUE, .size = .size)
if (length(x) < 2 * .size) {
stability <- 0
} else {
stability <- var(meanx, na.rm = TRUE)
}
return(c(stability = stability))
}
#' Autocorrelation-based features
#'
#' Computes various measures based on autocorrelation coefficients of the
#' original series, first-differenced series and second-differenced series
#'
#' @inheritParams stability
#'
#' @return A vector of 6 values: first autocorrelation coefficient and sum of squared of
#' first ten autocorrelation coefficients of original series, first-differenced series,
#' and twice-differenced series.
#' For seasonal data, the autocorrelation coefficient at the first seasonal lag is
#' also returned.
#'
#' @author Thiyanga Talagala
#' @export
acf_features <- function(x, .period = 1, ...) {
acfx <- stats::acf(x, lag.max = max(.period, 10L), plot = FALSE, na.action = stats::na.pass)
acfdiff1x <- stats::acf(diff(x, differences = 1), lag.max = 10L, plot = FALSE, na.action = stats::na.pass)
acfdiff2x <- stats::acf(diff(x, differences = 2), lag.max = 10L, plot = FALSE, na.action = stats::na.pass)
# first autocorrelation coefficient
acf_1 <- acfx$acf[2L]
# sum of squares of first 10 autocorrelation coefficients
sum_of_sq_acf10 <- sum((acfx$acf[2L:11L])^2)
# first autocorrelation coefficient of differenced series
diff1_acf1 <- acfdiff1x$acf[2L]
# Sum of squared of first 10 autocorrelation coefficients of differenced series
diff1_acf10 <- sum((acfdiff1x$acf[-1L])^2)
# first autocorrelation coefficient of twice-differenced series
diff2_acf1 <- acfdiff2x$acf[2L]
# Sum of squared of first 10 autocorrelation coefficients of twice-differenced series
diff2_acf10 <- sum((acfdiff2x$acf[-1L])^2)
output <- c(
x_acf1 = unname(acf_1),
x_acf10 = unname(sum_of_sq_acf10),
diff1_acf1 = unname(diff1_acf1),
diff1_acf10 = unname(diff1_acf10),
diff2_acf1 = unname(diff2_acf1),
diff2_acf10 = unname(diff2_acf10)
)
if (.period > 1) {
output <- c(output, seas_acf1 = unname(acfx$acf[.period + 1L]))
}
return(output)
}
#' Partial autocorrelation-based features
#'
#' Computes various measures based on partial autocorrelation coefficients of the
#' original series, first-differenced series and second-differenced series.
#'
#' @inheritParams acf_features
#'
#' @return A vector of 3 values: Sum of squared of first 5
#' partial autocorrelation coefficients of the original series, first differenced
#' series and twice-differenced series.
#' For seasonal data, the partial autocorrelation coefficient at the first seasonal
#' lag is also returned.
#' @author Thiyanga Talagala
#' @export
pacf_features <- function(x, .period = 1, ...) {
pacfx <- stats::pacf(x, lag.max = max(5L, .period), plot = FALSE)$acf
# Sum of squared of first 5 partial autocorrelation coefficients
pacf_5 <- sum((pacfx[seq(5L)])^2)
# Sum of squared of first 5 partial autocorrelation coefficients of difference series
diff1_pacf_5 <- sum((stats::pacf(diff(x, differences = 1), lag.max = 5L, plot = FALSE)$acf)^2)
# Sum of squared of first 5 partial autocorrelation coefficients of twice differenced series
diff2_pacf_5 <- sum((stats::pacf(diff(x, differences = 2), lag.max = 5L, plot = FALSE)$acf)^2)
output <- c(
x_pacf5 = unname(pacf_5),
diff1x_pacf5 = unname(diff1_pacf_5),
diff2x_pacf5 = unname(diff2_pacf_5)
)
if (.period > 1) {
output <- c(output, seas_pacf = pacfx[.period])
}
return(output)
}
| /R/features.R | no_license | Sprinterzzj/feasts | R | false | false | 19,720 | r | tbl_features <- function(features){
function(...){
list(as_tibble(squash(map(features, function(.fn, ...) as.list(.fn(...)), ...))))
}
}
#' Extract features from a dataset
#'
#' @param .tbl A dataset
#' @param .var,.vars The variable(s) to compute features on
#' @param features A list of functions (or lambda expressions) for the features to compute.
#' @param .predicate A predicate function (or lambda expression) to be applied to the columns or a logical vector. The variables for which .predicate is or returns TRUE are selected.
#' @param ... Additional arguments to be passed to each feature.
#'
#' @export
features <- function(.tbl, .var, features, ...){
UseMethod("features")
}
#' @export
features.tbl_ts <- function(.tbl, .var = NULL, features = list(), ...){
dots <- dots_list(...)
if(is_function(features)){
features <- list(features)
}
features <- map(squash(features), rlang::as_function)
.var <- enquo(.var)
if(quo_is_null(.var)){
inform(sprintf(
"Feature variable not specified, automatically selected `.var = %s`",
measured_vars(.tbl)[1]
))
.var <- as_quosure(syms(measured_vars(.tbl)[[1]]), env = empty_env())
}
else if(possibly(compose(is_quosures, eval_tidy), FALSE)(.var)){
abort("`features()` only supports a single variable. To compute features across multiple variables consider scoped variants like `features_at()`")
}
if(is.null(dots$.period)){
dots$.period <- get_frequencies(NULL, .tbl, .auto = "smallest")
}
as_tibble(.tbl) %>%
group_by(!!!key(.tbl), !!!dplyr::groups(.tbl)) %>%
dplyr::summarise(
.funs = tbl_features(features)(!!.var, !!!dots),
) %>%
unnest(!!sym(".funs")) %>%
dplyr::ungroup()
}
#' @rdname features
#' @export
features_at <- function(.tbl, .vars, features, ...){
UseMethod("features_at")
}
#' @export
features_at.tbl_ts <- function(.tbl, .vars = NULL, features = list(), ...){
dots <- dots_list(...)
if(is_function(features)){
features <- list(features)
}
features <- map(squash(features), rlang::as_function)
quo_vars <- enquo(.vars)
if(quo_is_null(quo_vars)){
inform(sprintf(
"Feature variable not specified, automatically selected `.vars = %s`",
measured_vars(.tbl)[1]
))
.vars <- as_quosures(syms(measured_vars(.tbl)[1]), env = empty_env())
}
else if(!possibly(compose(is_quosures, eval_tidy), FALSE)(.vars)){
.vars <- new_quosures(list(quo_vars))
}
if(is.null(dots$.period)){
dots$.period <- get_frequencies(NULL, .tbl, .auto = "smallest")
}
as_tibble(.tbl) %>%
group_by(!!!key(.tbl), !!!dplyr::groups(.tbl)) %>%
dplyr::summarise_at(
.vars = .vars,
.funs = tbl_features(features),
!!!dots
) %>%
unnest(!!!.vars, .sep = "_") %>%
dplyr::ungroup()
}
#' @rdname features
#' @export
features_all <- function(.tbl, features, ...){
UseMethod("features_all")
}
#' @export
features_all.tbl_ts <- function(.tbl, features = list(), ...){
features_at(.tbl, .vars = as_quosures(syms(measured_vars(.tbl)), empty_env()),
features = features, ...)
}
#' @rdname features
#' @export
features_if <- function(.tbl, .predicate, features, ...){
UseMethod("features_if")
}
#' @export
features_if.tbl_ts <- function(.tbl, .predicate, features = list(), ...){
mv_if <- map_lgl(.tbl[measured_vars(.tbl)], rlang::as_function(.predicate))
features_at(.tbl,
.vars = as_quosures(syms(measured_vars(.tbl)[mv_if]), empty_env()),
features = features, ...)
}
#' @inherit tsfeatures::crossing_points
#' @importFrom stats median
#' @export
crossing_points <- function(x)
{
midline <- median(x, na.rm = TRUE)
ab <- x <= midline
lenx <- length(x)
p1 <- ab[1:(lenx - 1)]
p2 <- ab[2:lenx]
cross <- (p1 & !p2) | (p2 & !p1)
c(crossing_points = sum(cross, na.rm = TRUE))
}
#' @inherit tsfeatures::arch_stat
#' @importFrom stats lm embed
#' @export
arch_stat <- function(x, lags = 12, demean = TRUE)
{
if (length(x) <= 13) {
return(c(arch_lm = NA_real_))
}
if (demean) {
x <- x - mean(x, na.rm = TRUE)
}
mat <- embed(x^2, lags + 1)
fit <- try(lm(mat[, 1] ~ mat[, -1]), silent = TRUE)
if ("try-error" %in% class(fit)) {
return(c(arch_lm = NA_real_))
}
arch.lm <- summary(fit)
c(arch_lm = arch.lm$r.squared)
}
#' STL features
#'
#' Computes a variety of measures extracted from an STL decomposition of the
#' time series. This includes details about the strength of trend and seasonality.
#'
#' @param x A vector to extract features from.
#' @param .period The period of the seasonality.
#' @param s.window The seasonal window of the data (passed to [`stats::stl()`])
#' @param ... Further arguments passed to [`stats::stl()`]
#'
#' @seealso
#' [Forecasting Principle and Practices: Measuring strength of trend and seasonality](https://otexts.com/fpp3/seasonal-strength.html)
#'
#' @importFrom stats var coef
#' @export
stl_features <- function(x, .period, s.window = 13, ...){
dots <- dots_list(...)
dots <- dots[names(dots) %in% names(formals(stats::stl))]
season.args <- list2(!!(names(.period)%||%as.character(.period)) :=
list(period = .period, s.window = s.window))
dcmp <- eval_tidy(quo(estimate_stl(x, trend.args = list(),
season.args = season.args, lowpass.args = list(), !!!dots)))
trend <- dcmp[["trend"]]
remainder <- dcmp[["remainder"]]
seas_adjust <- dcmp[["seas_adjust"]]
seasonalities <- dcmp[seq_len(length(dcmp) - 3) + 1]
names(seasonalities) <- sub("season_", "", names(seasonalities))
var_e <- var(remainder, na.rm = TRUE)
n <- length(x)
# Spike
d <- (remainder - mean(remainder, na.rm = TRUE))^2
var_loo <- (var_e * (n - 1) - d)/(n - 2)
spike <- var(var_loo, na.rm = TRUE)
# Linearity & curvature
tren.coef <- coef(lm(trend ~ poly(seq(n), degree = 2L)))[2L:3L]
linearity <- tren.coef[[1L]]
curvature <- tren.coef[[2L]]
# Strength of terms
trend_strength <- max(0, min(1, 1 - var_e/var(seas_adjust, na.rm = TRUE)))
seasonal_strength <- map_dbl(seasonalities, function(seas){
max(0, min(1, 1 - var_e/var(remainder + seas, na.rm = TRUE)))
})
# Position of peaks and troughs
seasonal_peak <- map_dbl(seasonalities, function(seas){
which.max(seas) %% .period
})
seasonal_trough <- map_dbl(seasonalities, function(seas){
which.min(seas) %% .period
})
c(trend_strength = trend_strength, seasonal_strength = seasonal_strength,
spike = spike, linearity = linearity, curvature = curvature,
seasonal_peak = seasonal_peak, seasonal_trough = seasonal_trough)
}
#' Unit root tests
#'
#' Performs a test for the existence of a unit root in the vector.
#'
#' \code{unitroot_kpss} computes the statistic for the Kwiatkowski et al. unit root test with linear trend and lag 1.
#'
#' \code{unitroot_pp} computes the statistic for the `'Z-tau'' version of Phillips & Perron unit root test with constant trend and lag 1.
#'
#' @param x A vector to be tested for the unit root.
#' @inheritParams urca::ur.kpss
#' @param ... Unused.
#'
#' @seealso [urca::ur.kpss()]
#'
#' @rdname unitroot
#' @export
unitroot_kpss <- function(x, type = c("mu", "tau"), lags = c("short", "long", "nil"),
use.lag = NULL, ...) {
require_package("urca")
result <- urca::ur.kpss(x, type = type, lags = lags, use.lag = use.lag)
pval <- tryCatch(
stats::approx(result@cval[1,], as.numeric(sub("pct", "", colnames(result@cval)))/100, xout=result@teststat[1], rule=2)$y,
error = function(e){
NA
}
)
c(kpss_stat = result@teststat, kpss_pval = pval)
}
#' @inheritParams urca::ur.pp
#' @rdname unitroot
#'
#' @seealso [urca::ur.pp()]
#'
#' @export
unitroot_pp <- function(x, type = c("Z-tau", "Z-alpha"), model = c("constant", "trend"),
lags = c("short", "long"), use.lag = NULL, ...) {
require_package("urca")
result <- urca::ur.pp(x, type = type, model = model, lags = lags, use.lag = use.lag)
pval <- tryCatch(
stats::approx(result@cval[1,], as.numeric(sub("pct", "", colnames(result@cval)))/100, xout=result@teststat[1], rule=2)$y,
error = function(e){
NA
}
)
c(pp_stat = result@teststat, pp_pval = pval)
}
#' Number of differences required for a stationary series
#'
#' Use a unit root function to determine the minimum number of differences
#' necessary to obtain a stationary time series.
#'
#' @inheritParams unitroot_kpss
#' @param alpha The level of the test.
#' @param unitroot_fn A function (or lambda) that provides a p-value for a unit root test.
#' @param differences The possible differences to consider.
#' @param ... Additional arguments passed to the `unitroot_fn` function
#'
#' @export
unitroot_ndiffs <- function(x, alpha = 0.05, unitroot_fn = ~ unitroot_kpss(.)["kpss_pval"],
differences = 0:2, ...) {
unitroot_fn <- as_function(unitroot_fn)
diff <- function(x, differences, ...){
if(differences == 0) return(x)
base::diff(x, differences = differences, ...)
}
# Non-missing x
keep <- map_lgl(differences, function(.x){
dx <- diff(x, differences = .x)
!all(is.na(dx))
})
differences <- differences[keep]
# Estimate the test
keep <- map_lgl(differences[-1]-1, function(.x) {
unitroot_fn(diff(x, differences = .x), ...) < alpha
})
c(ndiffs = max(differences[c(TRUE, keep)], na.rm = TRUE))
}
#' @rdname unitroot_ndiffs
#' @param .period The period of the seasonality.
#'
#' @export
unitroot_nsdiffs <- function(x, alpha = 0.05, unitroot_fn = ~ stl_features(.,.period)[2]<0.64,
differences = 0:2, .period = 1, ...) {
if(.period == 1) return(c(nsdiffs = min(differences)))
unitroot_fn <- as_function(unitroot_fn)
environment(unitroot_fn) <- new_environment(parent = get_env(unitroot_fn))
environment(unitroot_fn)$.period <- .period
diff <- function(x, differences, ...){
if(differences == 0) return(x)
base::diff(x, differences = differences, ...)
}
# Non-missing x
keep <- map_lgl(differences, function(.x){
dx <- diff(x, lag = .period, differences = .x)
!all(is.na(dx))
})
differences <- differences[keep]
# Estimate the test
keep <- map_lgl(differences[-1]-1, function(.x) {
unitroot_fn(diff(x, lag = .period, differences = .x)) < alpha
})
c(nsdiffs = max(differences[c(TRUE, keep)], na.rm = TRUE))
}
#' Number of flat spots
#'
#' Number of flat spots in a time series
#' @param x a vector
#' @param ... Unused.
#' @return A numeric value.
#' @author Earo Wang and Rob J Hyndman
#' @export
flat_spots <- function(x) {
cutx <- try(cut(x, breaks = 10, include.lowest = TRUE, labels = FALSE),
silent = TRUE
)
if (class(cutx) == "try-error") {
return(c(flat_spots = NA))
}
rlex <- rle(cutx)
return(c(flat_spots = max(rlex$lengths)))
}
#' Hurst coefficient
#'
#' Computes the Hurst coefficient indicating the level of fractional differencing
#' of a time series.
#'
#' @param x a vector. If missing values are present, the largest
#' contiguous portion of the vector is used.
#' @param ... Unused.
#' @return A numeric value.
#' @author Rob J Hyndman
#'
#' @export
hurst <- function(x, ...) {
require_package("fracdiff")
# Hurst=d+0.5 where d is fractional difference.
return(c(hurst = suppressWarnings(fracdiff::fracdiff(na.contiguous(x), 0, 0)[["d"]] + 0.5)))
}
#' Sliding window features
#'
#' Computes feature of a time series based on sliding (overlapping) windows.
#' \code{max_level_shift} finds the largest mean shift between two consecutive windows.
#' \code{max_var_shift} finds the largest var shift between two consecutive windows.
#' \code{max_kl_shift} finds the largest shift in Kulback-Leibler divergence between
#' two consecutive windows.
#'
#' Computes the largest level shift and largest variance shift in sliding mean calculations
#' @param x a univariate time series
#' @param .size size of sliding window, if NULL `.size` will be automatically chosen using `.period`
#' @param .period The seasonal period (optional)
#' @param ... Unused.
#' @return A vector of 2 values: the size of the shift, and the time index of the shift.
#'
#' @author Earo Wang, Rob J Hyndman and Mitchell O'Hara-Wild
#'
#' @export
max_level_shift <- function(x, .size = NULL, .period = 1, ...) {
if(is.null(.size)){
.size <- ifelse(.period == 1, 10, .period)
}
rollmean <- tsibble::slide_dbl(x, mean, .size = .size, na.rm = TRUE)
means <- abs(diff(rollmean, .size))
if (length(means) == 0L) {
maxmeans <- 0
maxidx <- NA_real_
}
else if (all(is.na(means))) {
maxmeans <- NA_real_
maxidx <- NA_real_
}
else {
maxmeans <- max(means, na.rm = TRUE)
maxidx <- which.max(means) + 1L
}
return(c(level_shift_max = maxmeans, level_shift_index = maxidx))
}
#' @rdname max_level_shift
#' @export
max_var_shift <- function(x, .size = NULL, .period = 1, ...) {
if(is.null(.size)){
.size <- ifelse(.period == 1, 10, .period)
}
rollvar <- tsibble::slide_dbl(x, var, .size = .size, na.rm = TRUE)
vars <- abs(diff(rollvar, .size))
if (length(vars) == 0L) {
maxvar <- 0
maxidx <- NA_real_
}
else if (all(is.na(vars))) {
maxvar <- NA_real_
maxidx <- NA_real_
}
else {
maxvar <- max(vars, na.rm = TRUE)
maxidx <- which.max(vars) + 1L
}
return(c(var_shift_max = maxvar, var_shift_index = maxidx))
}
#' @rdname max_level_shift
#' @export
max_kl_shift <- function(x, .size = NULL, .period = 1, ...) {
if(is.null(.size)){
.size <- ifelse(.period == 1, 10, .period)
}
gw <- 100 # grid width
xgrid <- seq(min(x, na.rm = TRUE), max(x, na.rm = TRUE), length = gw)
grid <- xgrid[2L] - xgrid[1L]
tmpx <- x[!is.na(x)] # Remove NA to calculate bw
bw <- stats::bw.nrd0(tmpx)
lenx <- length(x)
if (lenx <= (2 * .size)) {
return(c(max_kl_shift = NA_real_, time_kl_shift = NA_real_))
}
densities <- map(xgrid, function(xgrid) stats::dnorm(xgrid, mean = x, sd = bw))
densities <- map(densities, pmax, stats::dnorm(38))
rmean <- map(densities, function(x)
tsibble::slide_dbl(x, mean, .size = .size, na.rm = TRUE, .align = "right")
) %>%
transpose() %>%
map(unlist)
kl <- map2_dbl(
rmean[seq_len(lenx - .size)],
rmean[seq_len(lenx - .size) + .size],
function(x, y) sum(x * (log(x) - log(y)) * grid, na.rm = TRUE)
)
diffkl <- diff(kl, na.rm = TRUE)
if (length(diffkl) == 0L) {
diffkl <- 0
maxidx <- NA_real_
}
else {
maxidx <- which.max(diffkl) + 1L
}
return(c(kl_shift_max = max(diffkl, na.rm = TRUE), kl_shift_index = maxidx))
}
#' Spectral entropy of a time series
#'
#' Computes the spectral entropy of a time series
#'
#' @inheritParams max_level_shift
#'
#' @return A numeric value.
#' @author Rob J Hyndman
#' @export
entropy <- function(x, ...) {
require_package("ForeCA")
entropy <- try(ForeCA::spectral_entropy(na.contiguous(x))[1L], silent = TRUE)
if (class(entropy) == "try-error") {
entropy <- NA
}
return(c(entropy = entropy))
}
#' Time series features based on tiled windows
#'
#' Computes feature of a time series based on tiled (non-overlapping) windows.
#' Means or variances are produced for all tiled windows. Then stability is
#' the variance of the means, while lumpiness is the variance of the variances.
#'
#' @inheritParams max_level_shift
#' @return A numeric vector of length 2 containing a measure of lumpiness and
#' a measure of stability.
#' @author Earo Wang and Rob J Hyndman
#'
#' @rdname tile_features
#'
#' @importFrom stats var
#' @export
lumpiness <- function(x, .size = NULL, .period = 1, ...) {
if(is.null(.size)){
.size <- ifelse(.period == 1, 10, .period)
}
x <- scale(x, center = TRUE, scale = TRUE)
varx <- tsibble::tile_dbl(x, var, na.rm = TRUE, .size = .size)
if (length(x) < 2 * .size) {
lumpiness <- 0
} else {
lumpiness <- var(varx, na.rm = TRUE)
}
return(c(lumpiness = lumpiness))
}
#' @rdname tile_features
#' @export
stability <- function(x, .size = NULL, .period = 1, ...) {
if(is.null(.size)){
.size <- ifelse(.period == 1, 10, .period)
}
x <- scale(x, center = TRUE, scale = TRUE)
meanx <- tsibble::tile_dbl(x, mean, na.rm = TRUE, .size = .size)
if (length(x) < 2 * .size) {
stability <- 0
} else {
stability <- var(meanx, na.rm = TRUE)
}
return(c(stability = stability))
}
#' Autocorrelation-based features
#'
#' Computes various measures based on autocorrelation coefficients of the
#' original series, first-differenced series and second-differenced series
#'
#' @inheritParams stability
#'
#' @return A vector of 6 values: first autocorrelation coefficient and sum of squared of
#' first ten autocorrelation coefficients of original series, first-differenced series,
#' and twice-differenced series.
#' For seasonal data, the autocorrelation coefficient at the first seasonal lag is
#' also returned.
#'
#' @author Thiyanga Talagala
#' @export
acf_features <- function(x, .period = 1, ...) {
acfx <- stats::acf(x, lag.max = max(.period, 10L), plot = FALSE, na.action = stats::na.pass)
acfdiff1x <- stats::acf(diff(x, differences = 1), lag.max = 10L, plot = FALSE, na.action = stats::na.pass)
acfdiff2x <- stats::acf(diff(x, differences = 2), lag.max = 10L, plot = FALSE, na.action = stats::na.pass)
# first autocorrelation coefficient
acf_1 <- acfx$acf[2L]
# sum of squares of first 10 autocorrelation coefficients
sum_of_sq_acf10 <- sum((acfx$acf[2L:11L])^2)
# first autocorrelation coefficient of differenced series
diff1_acf1 <- acfdiff1x$acf[2L]
# Sum of squared of first 10 autocorrelation coefficients of differenced series
diff1_acf10 <- sum((acfdiff1x$acf[-1L])^2)
# first autocorrelation coefficient of twice-differenced series
diff2_acf1 <- acfdiff2x$acf[2L]
# Sum of squared of first 10 autocorrelation coefficients of twice-differenced series
diff2_acf10 <- sum((acfdiff2x$acf[-1L])^2)
output <- c(
x_acf1 = unname(acf_1),
x_acf10 = unname(sum_of_sq_acf10),
diff1_acf1 = unname(diff1_acf1),
diff1_acf10 = unname(diff1_acf10),
diff2_acf1 = unname(diff2_acf1),
diff2_acf10 = unname(diff2_acf10)
)
if (.period > 1) {
output <- c(output, seas_acf1 = unname(acfx$acf[.period + 1L]))
}
return(output)
}
#' Partial autocorrelation-based features
#'
#' Computes various measures based on partial autocorrelation coefficients of the
#' original series, first-differenced series and second-differenced series.
#'
#' @inheritParams acf_features
#'
#' @return A vector of 3 values: Sum of squared of first 5
#' partial autocorrelation coefficients of the original series, first differenced
#' series and twice-differenced series.
#' For seasonal data, the partial autocorrelation coefficient at the first seasonal
#' lag is also returned.
#' @author Thiyanga Talagala
#' @export
pacf_features <- function(x, .period = 1, ...) {
pacfx <- stats::pacf(x, lag.max = max(5L, .period), plot = FALSE)$acf
# Sum of squared of first 5 partial autocorrelation coefficients
pacf_5 <- sum((pacfx[seq(5L)])^2)
# Sum of squared of first 5 partial autocorrelation coefficients of difference series
diff1_pacf_5 <- sum((stats::pacf(diff(x, differences = 1), lag.max = 5L, plot = FALSE)$acf)^2)
# Sum of squared of first 5 partial autocorrelation coefficients of twice differenced series
diff2_pacf_5 <- sum((stats::pacf(diff(x, differences = 2), lag.max = 5L, plot = FALSE)$acf)^2)
output <- c(
x_pacf5 = unname(pacf_5),
diff1x_pacf5 = unname(diff1_pacf_5),
diff2x_pacf5 = unname(diff2_pacf_5)
)
if (.period > 1) {
output <- c(output, seas_pacf = pacfx[.period])
}
return(output)
}
|
library(proportion)
### Name: PlotpCOpBITW
### Title: Plots p-confidence and p-bias for base Wald-T method
### Aliases: PlotpCOpBITW
### ** Examples
n=5; alp=0.05
PlotpCOpBITW(n,alp)
| /data/genthat_extracted_code/proportion/examples/PlotpCOpBITW.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 190 | r | library(proportion)
### Name: PlotpCOpBITW
### Title: Plots p-confidence and p-bias for base Wald-T method
### Aliases: PlotpCOpBITW
### ** Examples
n=5; alp=0.05
PlotpCOpBITW(n,alp)
|
install.packages("tidyverse") #data manipulating
install.packages('ggplot2') #visualisation
install.packages('RColorBrewer') #Color palette
install.packages('readr') #the read_csv function
install.packages('ggfittext')
install.packages('treemapify')
install.packages("reshape2")
#zmiana języka błędów
Sys.setenv(LANG="en")
require("dplyr")
require("ggplot2")
require("RColorBrewer")
require('readr')
require('treemapify')
require("reshape2")
#data import
setwd("/Users/rafalpietrak/Programowanie w R/Sety danych")
summer <- read.table("summer.csv",header = TRUE,sep=",")
winter <- read.table("winter.csv",header=TRUE, sep=",")
#adding column with type of olympics when combining sets
summer$type <- rep("summer",nrow(summer))
winter$type <- rep("winter",nrow(winter))
#Loading dictionary file
dict <- read_csv("dictionary.csv")
head(dict)
dict$GDP <- dict$`GDP per Capita`
dict$`GDP per Capita`<- NULL
#combining sets
all <- bind_rows(summer,winter)
#----Questions----
#Questions which I would like to answer :
# 1.How many countries were present at olympics games ?
# 2.How many medals were gained throughout history ?
# 3.How Poles performed ?
#----Answers----
#1.
summer %>% group_by(Year,Country) %>%
summarise(Total=n()) %>%
ggplot(mapping=aes(x=Year,y=Total))+
geom_point(shape=21, fill="blue", color="#56B4E9", size=1) +
scale_x_continuous(minor_breaks = seq(min(summer$Year) , max(summer$Year), 4), breaks = seq(min(summer$Year), max(summer$Year), 4))+theme_minimal()
winter %>% group_by(Year,Country) %>%
summarise(Total=n()) %>%
ggplot(mapping=aes(x=Year,y=Total))+
geom_point(shape=21, fill="green", color="#56B4E9", size=1) +
scale_x_continuous(minor_breaks = seq(min(winter$Year) , max(winter$Year), 4), breaks = seq(min(winter$Year), max(winter$Year), 4))+theme_minimal()
all %>% group_by(Year,Country,type) %>%
summarise(Total=n()) %>%
ggplot(mapping=aes(x=Year,y=Total,colour = type))+
geom_point(shape=21, size=1) +
scale_x_continuous(minor_breaks = seq(min(all$Year) , max(all$Year), 4), breaks = seq(min(all$Year), max(all$Year), 4))+theme_minimal()
n_medal <- all %>% group_by(Year,type) %>%
summarise(Total=n())
cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
#2.
n_medal %>%
ggplot(mapping=aes(x=Year,y=Total,fill=type)) + geom_bar(stat="identity") +geom_text(aes(label=Total), vjust=-1)+scale_fill_brewer(palette = "Paired")+
scale_x_continuous(minor_breaks = seq(min(n_medal$Year),max(n_medal$Year),4),breaks = seq(min(n_medal$Year), max(n_medal$Year), 4))+
ylab(expression("vol")) + xlab(expression("Year"))+theme_minimal()+
ggtitle("Volume of gained medals (years 1896 - 2012)")
#pokazuje wszystkie dostępne palety w pakiecie RColorBrewer
display.brewer.all()
#3. Now Let's check how many Polish representats were awarded with medal
# for each type of olympics ?
Poles_summer <- all %>% filter(Country=="POL",type=="summer") %>% group_by(Year,type,Medal) %>%
summarise(Total=n())
Poles_winter <- all %>% filter(Country=="POL",type=="winter") %>% group_by(Year,type,Medal) %>%
summarise(Total=n())
#Showing polish medals from summer Olympics
Poles_summer %>%
ggplot(mapping=aes(x=Year,y=Total,fill=Medal)) + geom_bar(stat="identity")+geom_text(aes(label=Total), size = 3, position = position_stack(vjust = 0.5))+
scale_fill_brewer(palette = "Paired")+
scale_x_continuous(minor_breaks = seq(min(Poles$Year),max(Poles$Year),4),breaks = seq(min(Poles$Year), max(Poles$Year), 4))+
ylab(expression("vol")) + xlab(expression("Year"))+theme_minimal()+
ggtitle("Structure of medals of Poles on summer Olympics (years 1896 - 2012)")+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
#Showing polish medals from winter Olympics
Poles_winter %>%
ggplot(mapping=aes(x=Year,y=Total,fill=Medal)) + geom_bar(stat="identity")+geom_text(aes(label=Total), size = 3, position = position_stack(vjust = 0.5))+
scale_fill_brewer(palette = "Paired")+
scale_x_continuous(minor_breaks = seq(min(Poles_winter$Year),max(Poles_winter$Year),4),breaks = seq(min(Poles_winter$Year), max(Poles_winter$Year), 4))+
ylab(expression("vol")) + xlab(expression("Year"))+theme_minimal()+
ggtitle("Structure of medals of Poles on winter Olympics (years 1896 - 2012)")+
theme(axis.text.x = element_text(angle = 0, hjust = 1))
#Looks like polish winter successes seems to occure in last for winter games,
# especially in 2012 and 2016
#But how Poland looks compared to rest of world comparing number of medals and GDP ?
#Lets prepare tree map
#adding full country name to our data from dictionary
country_medals <- all %>% group_by(Country) %>% summarise(medals = n())
country_medals <- country_medals %>%
mutate(Country_full = factor(Country,
levels=c(dict$Code), #we collect the full country names from dictionary
labels=c(dict$Country))) #add full country name to our database
#adding country's Population to our combined set
country_medals <- country_medals %>% inner_join(dict,by = c("Country"="Code"))
country_medals$Population.x<- NULL
country_medals$Country.y<- NULL
colnames(country_medals) <- c("Country","medals","Country_full","Population","GDP")
#Creating new categorical variable with GDP value
country_medals$gdp_cat[country_medals$GDP > 50000] <- ">50"
country_medals$gdp_cat[country_medals$GDP > 35000 & country_medals$GDP <= 50000] <- "35-50"
country_medals$gdp_cat[country_medals$GDP > 20000 & country_medals$GDP <= 35000] <- "20-35"
country_medals$gdp_cat[country_medals$GDP > 10000 & country_medals$GDP <= 20000] <- "10-20"
country_medals$gdp_cat[country_medals$GDP > 5000 & country_medals$GDP <= 10000] <- "5-10"
country_medals$gdp_cat[country_medals$GDP > 2000 & country_medals$GDP <= 5000] <- "2-5"
country_medals$gdp_cat[country_medals$GDP < 2000 ] <- "< 2"
country_medals$gdp_cat[is.na(country_medals$GDP)] <- "no data"
#Preparing variable for treemap
country_medals <- mutate(country_medals, Country_full = as.character(Country_full))
country_medals <- mutate(country_medals, GDP = as.factor(GDP))
country_medals <- mutate(country_medals, medals = as.numeric(medals))
country_medals$gdp_cat<- as.factor(country_medals$gdp_cat)
#Treemap with countries gained medals
country_medals$label <- paste(country_medals$Country_full, country_medals$medals, sep = ", ")
ggplot(country_medals, aes(area = medals,fill=country_medals$gdp_cat, label = label)) +
geom_treemap() +
geom_treemap_text(
fontface = "italic",
colour= "white",
place = "centre",
grow = TRUE
)+scale_fill_brewer(palette = "Set2")+theme(legend.position = "bottom")+
labs(
title = "Countries by all medals won in history",
caption = "The area of each tile represents the country's amount of gained medals grouped by categories of GDP", fill="GDP in k USD"
)
# Let's check whether is a trend in number of medals compared to GDP per Capita ?
#conclusion_1: it seems that with higher GDP per Capita, amount of medals increase...
country_medals$GDP <- as.numeric(as.character(country_medals$GDP))
country_medals %>% ggplot(aes(x=as.numeric(as.character(GDP)),y=medals,na.rm=TRUE))+scale_x_continuous(breaks = c(25, 50, 75,100))+geom_point()+xlab("GDP per Capita")+
ylab("Number of medals")+geom_smooth(span=0.1,method=lm,se=T, size=2,colour="green")+theme_minimal()
# Loess-ważona regresja lokalnie wielomianowa
# (local polynomial regression fitting) Przeprowadzana dla każdego punktu, polega na
# wygładzeniu linii regresji w kierunku zera.
country_medals$GDP <- as.numeric(as.character(country_medals$GDP))
country_medals %>% ggplot(aes(x=as.numeric(as.character(GDP)),y=medals,na.rm=TRUE))+scale_x_continuous(breaks = c(25, 50, 75,100))+geom_point()+xlab("GDP per Capita")+
ylab("Number of medals")+geom_smooth(span=0.1,method=loess,se=T, size=2,colour="green")+theme_minimal()
#conclusion_2: however using loess polynomial regression fitting, amount of medals start to grow
# from specific moment
| /Kaggle_medaliści.R | no_license | Rafal-Pietrak/kaggle | R | false | false | 8,125 | r | install.packages("tidyverse") #data manipulating
install.packages('ggplot2') #visualisation
install.packages('RColorBrewer') #Color palette
install.packages('readr') #the read_csv function
install.packages('ggfittext')
install.packages('treemapify')
install.packages("reshape2")
#zmiana języka błędów
Sys.setenv(LANG="en")
require("dplyr")
require("ggplot2")
require("RColorBrewer")
require('readr')
require('treemapify')
require("reshape2")
#data import
setwd("/Users/rafalpietrak/Programowanie w R/Sety danych")
summer <- read.table("summer.csv",header = TRUE,sep=",")
winter <- read.table("winter.csv",header=TRUE, sep=",")
#adding column with type of olympics when combining sets
summer$type <- rep("summer",nrow(summer))
winter$type <- rep("winter",nrow(winter))
#Loading dictionary file
dict <- read_csv("dictionary.csv")
head(dict)
dict$GDP <- dict$`GDP per Capita`
dict$`GDP per Capita`<- NULL
#combining sets
all <- bind_rows(summer,winter)
#----Questions----
#Questions which I would like to answer :
# 1.How many countries were present at olympics games ?
# 2.How many medals were gained throughout history ?
# 3.How Poles performed ?
#----Answers----
#1.
summer %>% group_by(Year,Country) %>%
summarise(Total=n()) %>%
ggplot(mapping=aes(x=Year,y=Total))+
geom_point(shape=21, fill="blue", color="#56B4E9", size=1) +
scale_x_continuous(minor_breaks = seq(min(summer$Year) , max(summer$Year), 4), breaks = seq(min(summer$Year), max(summer$Year), 4))+theme_minimal()
winter %>% group_by(Year,Country) %>%
summarise(Total=n()) %>%
ggplot(mapping=aes(x=Year,y=Total))+
geom_point(shape=21, fill="green", color="#56B4E9", size=1) +
scale_x_continuous(minor_breaks = seq(min(winter$Year) , max(winter$Year), 4), breaks = seq(min(winter$Year), max(winter$Year), 4))+theme_minimal()
all %>% group_by(Year,Country,type) %>%
summarise(Total=n()) %>%
ggplot(mapping=aes(x=Year,y=Total,colour = type))+
geom_point(shape=21, size=1) +
scale_x_continuous(minor_breaks = seq(min(all$Year) , max(all$Year), 4), breaks = seq(min(all$Year), max(all$Year), 4))+theme_minimal()
n_medal <- all %>% group_by(Year,type) %>%
summarise(Total=n())
cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
#2.
n_medal %>%
ggplot(mapping=aes(x=Year,y=Total,fill=type)) + geom_bar(stat="identity") +geom_text(aes(label=Total), vjust=-1)+scale_fill_brewer(palette = "Paired")+
scale_x_continuous(minor_breaks = seq(min(n_medal$Year),max(n_medal$Year),4),breaks = seq(min(n_medal$Year), max(n_medal$Year), 4))+
ylab(expression("vol")) + xlab(expression("Year"))+theme_minimal()+
ggtitle("Volume of gained medals (years 1896 - 2012)")
#pokazuje wszystkie dostępne palety w pakiecie RColorBrewer
display.brewer.all()
#3. Now Let's check how many Polish representats were awarded with medal
# for each type of olympics ?
Poles_summer <- all %>% filter(Country=="POL",type=="summer") %>% group_by(Year,type,Medal) %>%
summarise(Total=n())
Poles_winter <- all %>% filter(Country=="POL",type=="winter") %>% group_by(Year,type,Medal) %>%
summarise(Total=n())
#Showing polish medals from summer Olympics
Poles_summer %>%
ggplot(mapping=aes(x=Year,y=Total,fill=Medal)) + geom_bar(stat="identity")+geom_text(aes(label=Total), size = 3, position = position_stack(vjust = 0.5))+
scale_fill_brewer(palette = "Paired")+
scale_x_continuous(minor_breaks = seq(min(Poles$Year),max(Poles$Year),4),breaks = seq(min(Poles$Year), max(Poles$Year), 4))+
ylab(expression("vol")) + xlab(expression("Year"))+theme_minimal()+
ggtitle("Structure of medals of Poles on summer Olympics (years 1896 - 2012)")+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
#Showing polish medals from winter Olympics
Poles_winter %>%
ggplot(mapping=aes(x=Year,y=Total,fill=Medal)) + geom_bar(stat="identity")+geom_text(aes(label=Total), size = 3, position = position_stack(vjust = 0.5))+
scale_fill_brewer(palette = "Paired")+
scale_x_continuous(minor_breaks = seq(min(Poles_winter$Year),max(Poles_winter$Year),4),breaks = seq(min(Poles_winter$Year), max(Poles_winter$Year), 4))+
ylab(expression("vol")) + xlab(expression("Year"))+theme_minimal()+
ggtitle("Structure of medals of Poles on winter Olympics (years 1896 - 2012)")+
theme(axis.text.x = element_text(angle = 0, hjust = 1))
#Looks like polish winter successes seems to occure in last for winter games,
# especially in 2012 and 2016
#But how Poland looks compared to rest of world comparing number of medals and GDP ?
#Lets prepare tree map
#adding full country name to our data from dictionary
country_medals <- all %>% group_by(Country) %>% summarise(medals = n())
country_medals <- country_medals %>%
mutate(Country_full = factor(Country,
levels=c(dict$Code), #we collect the full country names from dictionary
labels=c(dict$Country))) #add full country name to our database
#adding country's Population to our combined set
country_medals <- country_medals %>% inner_join(dict,by = c("Country"="Code"))
country_medals$Population.x<- NULL
country_medals$Country.y<- NULL
colnames(country_medals) <- c("Country","medals","Country_full","Population","GDP")
#Creating new categorical variable with GDP value
country_medals$gdp_cat[country_medals$GDP > 50000] <- ">50"
country_medals$gdp_cat[country_medals$GDP > 35000 & country_medals$GDP <= 50000] <- "35-50"
country_medals$gdp_cat[country_medals$GDP > 20000 & country_medals$GDP <= 35000] <- "20-35"
country_medals$gdp_cat[country_medals$GDP > 10000 & country_medals$GDP <= 20000] <- "10-20"
country_medals$gdp_cat[country_medals$GDP > 5000 & country_medals$GDP <= 10000] <- "5-10"
country_medals$gdp_cat[country_medals$GDP > 2000 & country_medals$GDP <= 5000] <- "2-5"
country_medals$gdp_cat[country_medals$GDP < 2000 ] <- "< 2"
country_medals$gdp_cat[is.na(country_medals$GDP)] <- "no data"
#Preparing variable for treemap
country_medals <- mutate(country_medals, Country_full = as.character(Country_full))
country_medals <- mutate(country_medals, GDP = as.factor(GDP))
country_medals <- mutate(country_medals, medals = as.numeric(medals))
country_medals$gdp_cat<- as.factor(country_medals$gdp_cat)
#Treemap with countries gained medals
country_medals$label <- paste(country_medals$Country_full, country_medals$medals, sep = ", ")
ggplot(country_medals, aes(area = medals,fill=country_medals$gdp_cat, label = label)) +
geom_treemap() +
geom_treemap_text(
fontface = "italic",
colour= "white",
place = "centre",
grow = TRUE
)+scale_fill_brewer(palette = "Set2")+theme(legend.position = "bottom")+
labs(
title = "Countries by all medals won in history",
caption = "The area of each tile represents the country's amount of gained medals grouped by categories of GDP", fill="GDP in k USD"
)
# Let's check whether is a trend in number of medals compared to GDP per Capita ?
#conclusion_1: it seems that with higher GDP per Capita, amount of medals increase...
country_medals$GDP <- as.numeric(as.character(country_medals$GDP))
country_medals %>% ggplot(aes(x=as.numeric(as.character(GDP)),y=medals,na.rm=TRUE))+scale_x_continuous(breaks = c(25, 50, 75,100))+geom_point()+xlab("GDP per Capita")+
ylab("Number of medals")+geom_smooth(span=0.1,method=lm,se=T, size=2,colour="green")+theme_minimal()
# Loess-ważona regresja lokalnie wielomianowa
# (local polynomial regression fitting) Przeprowadzana dla każdego punktu, polega na
# wygładzeniu linii regresji w kierunku zera.
country_medals$GDP <- as.numeric(as.character(country_medals$GDP))
country_medals %>% ggplot(aes(x=as.numeric(as.character(GDP)),y=medals,na.rm=TRUE))+scale_x_continuous(breaks = c(25, 50, 75,100))+geom_point()+xlab("GDP per Capita")+
ylab("Number of medals")+geom_smooth(span=0.1,method=loess,se=T, size=2,colour="green")+theme_minimal()
#conclusion_2: however using loess polynomial regression fitting, amount of medals start to grow
# from specific moment
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/str_PM25.R
\name{str_PM25}
\alias{str_PM25}
\title{str_PM25}
\usage{
str_PM25(
format = c("character", "utf8", "html", "TeX", "markdown"),
verbose = getOption("verbose", default = FALSE)
)
}
\arguments{
\item{format}{choice of output format}
\item{verbose}{(logical)}
}
\description{
str_PM25
}
| /man/str_PM25.Rd | no_license | BAAQMD/strtools | R | false | true | 378 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/str_PM25.R
\name{str_PM25}
\alias{str_PM25}
\title{str_PM25}
\usage{
str_PM25(
format = c("character", "utf8", "html", "TeX", "markdown"),
verbose = getOption("verbose", default = FALSE)
)
}
\arguments{
\item{format}{choice of output format}
\item{verbose}{(logical)}
}
\description{
str_PM25
}
|
train.ratio = 0.9
spam <- read.csv("./data/spambase.data", header=FALSE)
data <- spam[, -c(55, 56, 57)]
set.seed(1)
train.index <- sample(1:dim(data)[1], as.integer(train.ratio * dim(data)[1]))
training <- data[train.index, ]
testing <- data[-train.index, ]
write.table(training, "./data/training", quote=FALSE, row.names=FALSE, col.names=FALSE)
write.table(testing, "./data/testing", quote=FALSE, row.names=FALSE, col.names=FALSE)
| /R/0_spam_data_prepare.R | no_license | kiendang/sparkr-naivebayes-example | R | false | false | 436 | r | train.ratio = 0.9
spam <- read.csv("./data/spambase.data", header=FALSE)
data <- spam[, -c(55, 56, 57)]
set.seed(1)
train.index <- sample(1:dim(data)[1], as.integer(train.ratio * dim(data)[1]))
training <- data[train.index, ]
testing <- data[-train.index, ]
write.table(training, "./data/training", quote=FALSE, row.names=FALSE, col.names=FALSE)
write.table(testing, "./data/testing", quote=FALSE, row.names=FALSE, col.names=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_npp_cbpm.R
\name{get_npp_cbpm}
\alias{get_npp_cbpm}
\title{get_npp_cbpm}
\usage{
get_npp_cbpm(
file.path,
grid.size = "low",
time.span = "monthly",
satellite = "MODIS",
mindate,
maxdate
)
}
\arguments{
\item{file.path}{The folder(an empty folder) path where your want to save your file (avoid Chinese characters).}
\item{grid.size}{The grid size that you choose. There are two grid sizes can be choosed:
'low'(default): 2160x1080, 'high': 2160x4320.}
\item{time.span}{The time span of npp data. There two time spans: 'monthly' represent monthly npp data.
'dayly' represent 8 days data.}
\item{satellite}{Choose satellites, 'MODIS', 'VIIRS', and 'SeaWiFS'. The default is 'MODIS'.}
\item{mindate}{The minimum date of data you want to download.}
\item{maxdate}{The maximum date of data you want to download.}
}
\value{
download some files in your folder.
}
\description{
get_npp_cbpm() is used for automatically downloadiing, decompressing and renaming
ocean net primary production data of CBPM model by custom grid size, time
span and satellite.
}
\note{
units: mg C m-2 d-1
}
\examples{
\dontrun{
library(nppr)
library(RCurl)
library(XML)
library(R.utils)
library(tidyverse)
library(lubridate)
get_npp_cbpm(file.path = 'C:\\\\Users\\\\xucha\\\\Desktop\\\\DATA',
mindate = '2016-02-04', maxdate ='2016-06-28')
}
}
\author{
Chao Xu
}
| /man/get_npp_cbpm.Rd | no_license | chaoxv/nppr | R | false | true | 1,439 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_npp_cbpm.R
\name{get_npp_cbpm}
\alias{get_npp_cbpm}
\title{get_npp_cbpm}
\usage{
get_npp_cbpm(
file.path,
grid.size = "low",
time.span = "monthly",
satellite = "MODIS",
mindate,
maxdate
)
}
\arguments{
\item{file.path}{The folder(an empty folder) path where your want to save your file (avoid Chinese characters).}
\item{grid.size}{The grid size that you choose. There are two grid sizes can be choosed:
'low'(default): 2160x1080, 'high': 2160x4320.}
\item{time.span}{The time span of npp data. There two time spans: 'monthly' represent monthly npp data.
'dayly' represent 8 days data.}
\item{satellite}{Choose satellites, 'MODIS', 'VIIRS', and 'SeaWiFS'. The default is 'MODIS'.}
\item{mindate}{The minimum date of data you want to download.}
\item{maxdate}{The maximum date of data you want to download.}
}
\value{
download some files in your folder.
}
\description{
get_npp_cbpm() is used for automatically downloadiing, decompressing and renaming
ocean net primary production data of CBPM model by custom grid size, time
span and satellite.
}
\note{
units: mg C m-2 d-1
}
\examples{
\dontrun{
library(nppr)
library(RCurl)
library(XML)
library(R.utils)
library(tidyverse)
library(lubridate)
get_npp_cbpm(file.path = 'C:\\\\Users\\\\xucha\\\\Desktop\\\\DATA',
mindate = '2016-02-04', maxdate ='2016-06-28')
}
}
\author{
Chao Xu
}
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Adults advised to quit smoking
if(year == 2002)
FYC <- FYC %>% rename(ADNSMK42 = ADDSMK42)
FYC <- FYC %>%
mutate(
adult_nosmok = recode_factor(ADNSMK42, .default = "Missing", .missing = "Missing",
"1" = "Told to quit",
"2" = "Not told to quit",
"3" = "Had no visits in the last 12 months",
"-9" = "Not ascertained",
"-1" = "Inapplicable"))
# Perceived health status
if(year == 1996)
FYC <- FYC %>% mutate(RTHLTH53 = RTEHLTH2, RTHLTH42 = RTEHLTH2, RTHLTH31 = RTEHLTH1)
FYC <- FYC %>%
mutate_at(vars(starts_with("RTHLTH")), funs(replace(., .< 0, NA))) %>%
mutate(
health = coalesce(RTHLTH53, RTHLTH42, RTHLTH31),
health = recode_factor(health, .default = "Missing", .missing = "Missing",
"1" = "Excellent",
"2" = "Very good",
"3" = "Good",
"4" = "Fair",
"5" = "Poor"))
SAQdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~SAQWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~adult_nosmok, FUN = svytotal, by = ~health, design = subset(SAQdsgn, ADSMOK42==1))
print(results)
| /mepstrends/hc_care/json/code/r/totPOP__health__adult_nosmok__.r | permissive | HHS-AHRQ/MEPS-summary-tables | R | false | false | 1,704 | r | # Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Adults advised to quit smoking
if(year == 2002)
FYC <- FYC %>% rename(ADNSMK42 = ADDSMK42)
FYC <- FYC %>%
mutate(
adult_nosmok = recode_factor(ADNSMK42, .default = "Missing", .missing = "Missing",
"1" = "Told to quit",
"2" = "Not told to quit",
"3" = "Had no visits in the last 12 months",
"-9" = "Not ascertained",
"-1" = "Inapplicable"))
# Perceived health status
if(year == 1996)
FYC <- FYC %>% mutate(RTHLTH53 = RTEHLTH2, RTHLTH42 = RTEHLTH2, RTHLTH31 = RTEHLTH1)
FYC <- FYC %>%
mutate_at(vars(starts_with("RTHLTH")), funs(replace(., .< 0, NA))) %>%
mutate(
health = coalesce(RTHLTH53, RTHLTH42, RTHLTH31),
health = recode_factor(health, .default = "Missing", .missing = "Missing",
"1" = "Excellent",
"2" = "Very good",
"3" = "Good",
"4" = "Fair",
"5" = "Poor"))
SAQdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~SAQWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~adult_nosmok, FUN = svytotal, by = ~health, design = subset(SAQdsgn, ADSMOK42==1))
print(results)
|
#' mathTport
#'
#' @param returns
#' @param rf
#' @param digits
#'
#' @return
#' @export
#'
#' @examples
mathTport = function(returns, rf = 0.01,digits = NULL)
{
mu <- apply(returns, 2, mean)
C <- var(returns)
one <- rep(1, nrow(C))
mu.e <- mu - rf * one # Compute excess returns
z <- solve(C, mu.e) # z = C.inv * mu.e
cc <- t(one) %*% z # cc = 1.transpose * C.inv. * mu.e
cc <- as.numeric(cc) # Convert 1-by-1 matrix to a scalar
wtsTan <- z/cc
muTan <- as.numeric(t(mu) %*% wtsTan)
volTan <- (t(mu.e) %*% z)^0.5/abs(cc)
if(is.null(digits))
{out = list(wts = wtsTan, mu = muTan, vol = volTan)}
else
{out = list(WTS.TAN= wtsTan, MU.TAN = muTan, VOL.GMV = volTan)
out = lapply(out,round,digits=digits)}
out
}
| /R/mathTport.R | permissive | kecoli/PCRM | R | false | false | 794 | r | #' mathTport
#'
#' @param returns
#' @param rf
#' @param digits
#'
#' @return
#' @export
#'
#' @examples
mathTport = function(returns, rf = 0.01,digits = NULL)
{
mu <- apply(returns, 2, mean)
C <- var(returns)
one <- rep(1, nrow(C))
mu.e <- mu - rf * one # Compute excess returns
z <- solve(C, mu.e) # z = C.inv * mu.e
cc <- t(one) %*% z # cc = 1.transpose * C.inv. * mu.e
cc <- as.numeric(cc) # Convert 1-by-1 matrix to a scalar
wtsTan <- z/cc
muTan <- as.numeric(t(mu) %*% wtsTan)
volTan <- (t(mu.e) %*% z)^0.5/abs(cc)
if(is.null(digits))
{out = list(wts = wtsTan, mu = muTan, vol = volTan)}
else
{out = list(WTS.TAN= wtsTan, MU.TAN = muTan, VOL.GMV = volTan)
out = lapply(out,round,digits=digits)}
out
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reporting.R
\name{BridgeReport}
\alias{BridgeReport}
\title{Shinyapp reporting for drawing RNA decay curve.}
\usage{
BridgeReport(inputFile, group = c("Control", "Knockdown"), hour = c(0, 1, 2,
4, 8, 12), comparisonFile = c("Control", "Knockdown"),
searchRowName = "symbol", inforColumn = 4, color = c("black", "red"),
TimePointRemoval1 = c(1, 2), TimePointRemoval2 = c(8, 12))
}
\arguments{
\item{inputFile}{The vector of tab-delimited matrix file.}
\item{group}{The vector of group names.}
\item{hour}{The vector of time course about BRIC-seq experiment.}
\item{comparisonFile}{The vector of group names.}
\item{searchRowName}{Row name for searching.}
\item{inforColumn}{The number of information columns.}
\item{color}{color of line graph for two decay curve.}
\item{TimePointRemoval1}{The candicate_1 of time point removal.}
\item{TimePointRemoval2}{The candicate_2 of time point removal.}
}
\value{
shiny.appobj object for searching and showing RNA decay curve for each gene.
}
\description{
\code{BridgeReport} returns a shinyapp object to draw RNA decay curve.
You can easily check RNA half-life and RNA decay fitting curve on
your web browser.
}
\examples{
library(data.table)
normalized_rpkm_matrix <- data.table(gr_id = c(8, 9, 14),
symbol = c("AAAS", "AACS", "AADAT"),
accession_id = c("NM_015665", "NM_023928", "NM_182662"),
locus = c("chr12", "chr12", "chr4"),
CTRL_1_0h = c(1.00, 1.00, 1.00),
CTRL_1_1h = c(1.00, 0.86, 0.96),
CTRL_1_2h = c(1.00, 0.96, 0.88),
CTRL_1_4h = c(1.00, 0.74, 0.85),
CTRL_1_8h = c(1.00, 0.86, 0.68),
CTRL_1_12h = c(1.01, 0.65, 0.60),
gr_id = c(8, 9, 14),
symbol = c("AAAS", "AACS", "AADAT"),
accession_id = c("NM_015665", "NM_023928", "NM_182662"),
locus = c("chr12", "chr12", "chr4"),
KD_1_0h = c(1.00, 1.00, 1.00),
KD_1_1h = c(1.01, 0.73, 0.71),
KD_1_2h = c(1.01, 0.77, 0.69),
KD_1_4h = c(1.01, 0.72, 0.67),
KD_1_8h = c(1.01, 0.64, 0.38),
KD_1_12h = c(1.00, 0.89, 0.63))
group <- c("Control", "Knockdown")
hour <- c(0, 1, 2, 4, 8, 12)
halflife_table <- BridgeRHalfLifeCalcR2Select(normalized_rpkm_matrix,
group = group,
hour = hour,
save = FALSE)
pvalue_table <- BridgeRPvalueEvaluation(halflife_table,
save = FALSE)
shiny_test <- BridgeReport(pvalue_table)
}
| /man/BridgeReport.Rd | no_license | cran/bridger2 | R | false | true | 3,248 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reporting.R
\name{BridgeReport}
\alias{BridgeReport}
\title{Shinyapp reporting for drawing RNA decay curve.}
\usage{
BridgeReport(inputFile, group = c("Control", "Knockdown"), hour = c(0, 1, 2,
4, 8, 12), comparisonFile = c("Control", "Knockdown"),
searchRowName = "symbol", inforColumn = 4, color = c("black", "red"),
TimePointRemoval1 = c(1, 2), TimePointRemoval2 = c(8, 12))
}
\arguments{
\item{inputFile}{The vector of tab-delimited matrix file.}
\item{group}{The vector of group names.}
\item{hour}{The vector of time course about BRIC-seq experiment.}
\item{comparisonFile}{The vector of group names.}
\item{searchRowName}{Row name for searching.}
\item{inforColumn}{The number of information columns.}
\item{color}{color of line graph for two decay curve.}
\item{TimePointRemoval1}{The candicate_1 of time point removal.}
\item{TimePointRemoval2}{The candicate_2 of time point removal.}
}
\value{
shiny.appobj object for searching and showing RNA decay curve for each gene.
}
\description{
\code{BridgeReport} returns a shinyapp object to draw RNA decay curve.
You can easily check RNA half-life and RNA decay fitting curve on
your web browser.
}
\examples{
library(data.table)
normalized_rpkm_matrix <- data.table(gr_id = c(8, 9, 14),
symbol = c("AAAS", "AACS", "AADAT"),
accession_id = c("NM_015665", "NM_023928", "NM_182662"),
locus = c("chr12", "chr12", "chr4"),
CTRL_1_0h = c(1.00, 1.00, 1.00),
CTRL_1_1h = c(1.00, 0.86, 0.96),
CTRL_1_2h = c(1.00, 0.96, 0.88),
CTRL_1_4h = c(1.00, 0.74, 0.85),
CTRL_1_8h = c(1.00, 0.86, 0.68),
CTRL_1_12h = c(1.01, 0.65, 0.60),
gr_id = c(8, 9, 14),
symbol = c("AAAS", "AACS", "AADAT"),
accession_id = c("NM_015665", "NM_023928", "NM_182662"),
locus = c("chr12", "chr12", "chr4"),
KD_1_0h = c(1.00, 1.00, 1.00),
KD_1_1h = c(1.01, 0.73, 0.71),
KD_1_2h = c(1.01, 0.77, 0.69),
KD_1_4h = c(1.01, 0.72, 0.67),
KD_1_8h = c(1.01, 0.64, 0.38),
KD_1_12h = c(1.00, 0.89, 0.63))
group <- c("Control", "Knockdown")
hour <- c(0, 1, 2, 4, 8, 12)
halflife_table <- BridgeRHalfLifeCalcR2Select(normalized_rpkm_matrix,
group = group,
hour = hour,
save = FALSE)
pvalue_table <- BridgeRPvalueEvaluation(halflife_table,
save = FALSE)
shiny_test <- BridgeReport(pvalue_table)
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 3.26959793156717e+296, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615784260-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 329 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 3.26959793156717e+296, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
# Exploratory Data Analysis - Course Assignment 2 - Plot2
# Dataset:
# PM2.5 Emissions Data:
# summarySCC_PM25: data frame with all of the PM2.5 emissions data for
# 1999, 2002, 2005, and 2008. For each year, the table
# contains number of tons of PM2.5 emitted from a specific
# type of source for the entire year.
#
# Source_Classification_Code: mapping from the SCC digit strings in the Emissions table
# to the actual name of the PM2.5 source.
# Remove everything from the workspace
rm(list = ls())
## Set the working directory
## setwd('C:/Users/ABaker/Documents/GitHub/Coursera/Exploratory Data Analysis/assignment 2')
list.files("exdata-data-NEI_data")
## Read in both data files
NEI <- readRDS("exdata-data-NEI_data/summarySCC_PM25.rds")
# SCC <- readRDS("exdata-data-NEI_data/Source_Classification_Code.rds")
str(NEI)
# str(SCC)
## We need to combine the two data sets based on the SCC code
# mergedData = merge(NEI, SCC, by.x="SCC", by.y="SCC", all = TRUE) # all=TRUE includes NAs like a full outer join
# head(mergedData)
## We can now remove the old variables
# rm(NEI)
# rm(SCC)
## Plot 2 - Have total emissions from PM2.5 decreased in Baltimore City, Maryland
## (fips == "24510") from 1999 to 2008? Use the base plotting system
## to answer this question
## First need to convert the year variable to a factor
NEI[,"year"] = as.factor(NEI[,"year"])
str(NEI)
## I'll also convert Pollutant to a factor just to check the values
## mergedData[,"Pollutant"] = as.factor(mergedData[,"Pollutant"])
## str(mergedData$Pollutant)
## Now we need to filter on only fips == 24510
NEI.24510 <- NEI[which(NEI$fips == "24510"),]
str(NEI.24510)
## Now we need to summarise the data by year.
## We use aggregate and create a new data.frame
NEI.24510_sum_by_year <- aggregate(NEI.24510$Emissions, by=list(NEI.24510$year), FUN=sum)
## Set default plotting parameters
par(mar=c(5.1, 4.1, 4.1, 2.1), mgp=c(3, 1, 0), las=0, mfrow = c(1, 1))
## Now we can use the barplot function to plot by year the sum of emissions
barplot(NEI.24510_sum_by_year$x, names = NEI.24510_sum_by_year$Group.1, xlab = "Year",
ylab = expression("Total Emissions (tonnes) " * PM[2.5]),
main = "Baltimore (24510) Emissions (tonnes) / Year",
col = "lightcyan2")
dev.copy(png, file = "plot2.png")
dev.off() | /Exploratory Data Analysis/assignment 2/plot2.R | no_license | Ads99/Coursera | R | false | false | 2,465 | r | # Exploratory Data Analysis - Course Assignment 2 - Plot2
# Dataset:
# PM2.5 Emissions Data:
# summarySCC_PM25: data frame with all of the PM2.5 emissions data for
# 1999, 2002, 2005, and 2008. For each year, the table
# contains number of tons of PM2.5 emitted from a specific
# type of source for the entire year.
#
# Source_Classification_Code: mapping from the SCC digit strings in the Emissions table
# to the actual name of the PM2.5 source.
# Remove everything from the workspace
rm(list = ls())
## Set the working directory
## setwd('C:/Users/ABaker/Documents/GitHub/Coursera/Exploratory Data Analysis/assignment 2')
list.files("exdata-data-NEI_data")
## Read in both data files
NEI <- readRDS("exdata-data-NEI_data/summarySCC_PM25.rds")
# SCC <- readRDS("exdata-data-NEI_data/Source_Classification_Code.rds")
str(NEI)
# str(SCC)
## We need to combine the two data sets based on the SCC code
# mergedData = merge(NEI, SCC, by.x="SCC", by.y="SCC", all = TRUE) # all=TRUE includes NAs like a full outer join
# head(mergedData)
## We can now remove the old variables
# rm(NEI)
# rm(SCC)
## Plot 2 - Have total emissions from PM2.5 decreased in Baltimore City, Maryland
## (fips == "24510") from 1999 to 2008? Use the base plotting system
## to answer this question
## First need to convert the year variable to a factor
NEI[,"year"] = as.factor(NEI[,"year"])
str(NEI)
## I'll also convert Pollutant to a factor just to check the values
## mergedData[,"Pollutant"] = as.factor(mergedData[,"Pollutant"])
## str(mergedData$Pollutant)
## Now we need to filter on only fips == 24510
NEI.24510 <- NEI[which(NEI$fips == "24510"),]
str(NEI.24510)
## Now we need to summarise the data by year.
## We use aggregate and create a new data.frame
NEI.24510_sum_by_year <- aggregate(NEI.24510$Emissions, by=list(NEI.24510$year), FUN=sum)
## Set default plotting parameters
par(mar=c(5.1, 4.1, 4.1, 2.1), mgp=c(3, 1, 0), las=0, mfrow = c(1, 1))
## Now we can use the barplot function to plot by year the sum of emissions
barplot(NEI.24510_sum_by_year$x, names = NEI.24510_sum_by_year$Group.1, xlab = "Year",
ylab = expression("Total Emissions (tonnes) " * PM[2.5]),
main = "Baltimore (24510) Emissions (tonnes) / Year",
col = "lightcyan2")
dev.copy(png, file = "plot2.png")
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rox_args_docs.R
\name{idx_Param}
\alias{idx_Param}
\title{idx}
\arguments{
\item{idx}{Numeric vector indicating the data indices (columns) to read. If
\code{NULL} (default), read in all the data. Must be a subset of the indices
present in the file, or an error will occur.
For high-resolution CIFTI files, reading in only a subset of the data saves
memory, but will be slower than reading in the entire file due to the
required intermediate steps.}
}
\description{
idx
}
\keyword{internal}
| /man/idx_Param.Rd | no_license | mandymejia/ciftiTools | R | false | true | 569 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rox_args_docs.R
\name{idx_Param}
\alias{idx_Param}
\title{idx}
\arguments{
\item{idx}{Numeric vector indicating the data indices (columns) to read. If
\code{NULL} (default), read in all the data. Must be a subset of the indices
present in the file, or an error will occur.
For high-resolution CIFTI files, reading in only a subset of the data saves
memory, but will be slower than reading in the entire file due to the
required intermediate steps.}
}
\description{
idx
}
\keyword{internal}
|
###############################################################################
##
## Create some test data for surround index
## Input format is neighborhood matrices:
## - In each matrix, rows correspond to a target
## - Matrices:
## * distances between target and neighbors
## * size of neighbors
## * species of neighbor
## * direction_x to neighbor from target
## * direction_y to neighbor from target
## * number_neighbors: the number of neighbors in a targets neighborhood
##
###############################################################################
source("~/work/neighborhoods/surround/functions.R")
source("~/work/functions/functions-neighborhood.R")
## Neighborhood variables
## - nsize is 9,25, etc
## - alpha, beta are neighborhood parameters (distance, size)
## - theta is direction, slope params
## - C is size of connected components
## - dep.var is neighbor size variable
## - ind.var is comparison variable between target and neighbor
## (if only looking a neighbors larger than target, this variable determines
## whether a neighbor is included in the neighborhood analysis)
## - spec: species of targets we are interested in (all species are used as neighbors)
nsize <- 9
alpha <- beta <- 1
theta <- .05
C <- 2
dep.var <- "bagrowth"
ind.var <- "ba"
spec <- "ABBA"
## Derived parameters
side_length <- sqrt(nsize) # length along one side of the neighborhood
sr <- side_length - 1
###############################################################################
##
## Real data:
##
pp <- read.csv("~/work/data/moose/moose-long.csv")
pnum <- 9
dat <- subset(pp, pplot %in% c(pnum))
## define targets and neighbors
targets <- subset(dat, bqudx < (12-sr) & bqudx > (-1 + sr) & bqudy < (12 - sr) &
bqudy > (-1 + sr) & stat=="ALIVE")
neighbors <- subset(dat, bqudx < 11 & bqudx > 0 & bqudy < 11 &
bqudy > 0 & stat=="ALIVE")
## remove trees that dont satisfy certain conditions
grew <- which(!is.na(targets[,dep.var]) & targets$spec==spec & targets[,dep.var]>0)
abbas <- targets[grew,]
## make neighbor matrices using square radius (i.e bqudx,bqudy)
abba_mats <- mnm(abbas, neighbors, sr, ind.var=ind.var)
## compute nsi
i <- 1
num_nebs <- abba_mats$number_neighbors[i]
nbrs <- data.frame(x=abba_mats$direction_x[i:num_nebs],
y=abba_mats$direction_y[i:num_nebs],
distance=abba_mats$distances[i:num_nebs],
size=abba_mats$variable[i:num_nebs],
z=abba_mats$direction_z[i:num_nebs])
nsi(nbrs=nbrs, C=C, alpha = alpha, beta = beta, theta = theta, nsize = 9)
###############################################################################
##
## Create test cases, neighborhoods for single trees
##
###############################################################################
##
## Test case variables
## - srange: size range for neighbors
srange <- c(0.00007854*5^2, 0.00007854*50^2) # 5 - 50 cm DBH range
## Case 1: full surround, uniform neighbor size, single neighbor/quadrat
## - 1 neighbor each quadrat
## - all neighbors same size
targ <- c(0,0)
dirx1 <- c(rep(-1, 3), rep(0, 2), rep(1, 3))
diry1 <- c(-1,0,1,-1,1,-1,0,1)
dist1 <- apply(cbind(dirx1, diry1), 1, function(x)
euc(targ, as.numeric(x)))
size1 <- rep(0.3, 8)
nebs <- data.frame(x = dirx1, y = diry1, distance = dist1, size = size1)
## Case 2: full surround, uniform neighbor size, single neighbor/quadrat
## - 1 neighbor each quadrat
## - all neighbors same size
targ <- c(0,0)
dirx1 <- c(rep(-1, 3), rep(0, 2), rep(1, 3))
diry1 <- c(-1,0,1,-1,1,-1,0,1)
dist1 <- apply(cbind(dirx1, diry1), 1, function(x)
euc(targ, as.numeric(x)))
size1 <- rep(0.3, 8)
nebs <- data.frame(x = dirx1, y = diry1, distance = dist1, size = size1)
plot(nebs$x, nebs$y)
| /surround/test/make-sample-data.R | no_license | nverno/neighborhoods | R | false | false | 3,796 | r | ###############################################################################
##
## Create some test data for surround index
## Input format is neighborhood matrices:
## - In each matrix, rows correspond to a target
## - Matrices:
## * distances between target and neighbors
## * size of neighbors
## * species of neighbor
## * direction_x to neighbor from target
## * direction_y to neighbor from target
## * number_neighbors: the number of neighbors in a targets neighborhood
##
###############################################################################
source("~/work/neighborhoods/surround/functions.R")
source("~/work/functions/functions-neighborhood.R")
## Neighborhood variables
## - nsize is 9,25, etc
## - alpha, beta are neighborhood parameters (distance, size)
## - theta is direction, slope params
## - C is size of connected components
## - dep.var is neighbor size variable
## - ind.var is comparison variable between target and neighbor
## (if only looking a neighbors larger than target, this variable determines
## whether a neighbor is included in the neighborhood analysis)
## - spec: species of targets we are interested in (all species are used as neighbors)
nsize <- 9
alpha <- beta <- 1
theta <- .05
C <- 2
dep.var <- "bagrowth"
ind.var <- "ba"
spec <- "ABBA"
## Derived parameters
side_length <- sqrt(nsize) # length along one side of the neighborhood
sr <- side_length - 1
###############################################################################
##
## Real data:
##
pp <- read.csv("~/work/data/moose/moose-long.csv")
pnum <- 9
dat <- subset(pp, pplot %in% c(pnum))
## define targets and neighbors
targets <- subset(dat, bqudx < (12-sr) & bqudx > (-1 + sr) & bqudy < (12 - sr) &
bqudy > (-1 + sr) & stat=="ALIVE")
neighbors <- subset(dat, bqudx < 11 & bqudx > 0 & bqudy < 11 &
bqudy > 0 & stat=="ALIVE")
## remove trees that dont satisfy certain conditions
grew <- which(!is.na(targets[,dep.var]) & targets$spec==spec & targets[,dep.var]>0)
abbas <- targets[grew,]
## make neighbor matrices using square radius (i.e bqudx,bqudy)
abba_mats <- mnm(abbas, neighbors, sr, ind.var=ind.var)
## compute nsi
i <- 1
num_nebs <- abba_mats$number_neighbors[i]
nbrs <- data.frame(x=abba_mats$direction_x[i:num_nebs],
y=abba_mats$direction_y[i:num_nebs],
distance=abba_mats$distances[i:num_nebs],
size=abba_mats$variable[i:num_nebs],
z=abba_mats$direction_z[i:num_nebs])
nsi(nbrs=nbrs, C=C, alpha = alpha, beta = beta, theta = theta, nsize = 9)
###############################################################################
##
## Create test cases, neighborhoods for single trees
##
###############################################################################
##
## Test case variables
## - srange: size range for neighbors
srange <- c(0.00007854*5^2, 0.00007854*50^2) # 5 - 50 cm DBH range
## Case 1: full surround, uniform neighbor size, single neighbor/quadrat
## - 1 neighbor each quadrat
## - all neighbors same size
targ <- c(0,0)
dirx1 <- c(rep(-1, 3), rep(0, 2), rep(1, 3))
diry1 <- c(-1,0,1,-1,1,-1,0,1)
dist1 <- apply(cbind(dirx1, diry1), 1, function(x)
euc(targ, as.numeric(x)))
size1 <- rep(0.3, 8)
nebs <- data.frame(x = dirx1, y = diry1, distance = dist1, size = size1)
## Case 2: full surround, uniform neighbor size, single neighbor/quadrat
## - 1 neighbor each quadrat
## - all neighbors same size
targ <- c(0,0)
dirx1 <- c(rep(-1, 3), rep(0, 2), rep(1, 3))
diry1 <- c(-1,0,1,-1,1,-1,0,1)
dist1 <- apply(cbind(dirx1, diry1), 1, function(x)
euc(targ, as.numeric(x)))
size1 <- rep(0.3, 8)
nebs <- data.frame(x = dirx1, y = diry1, distance = dist1, size = size1)
plot(nebs$x, nebs$y)
|
\name{lm.case}
\alias{case}
\alias{case.lm}
%% \alias{lm.case} %% leaving this in case someone searches on the older name
\alias{plot.case}
\alias{panel.case}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ case statistics for regression analysis}
\description{
Case statistics for regression analysis.
\code{case.lm} calculates the statistics.
\code{plot.case} plots the cases, one statistic per panel, and
illustrates and flags all observations for which the standard
thresholds are exceeded. \code{plot.case} returns an object with
class \code{c("trellis.case", "trellis")}
containing the plot and the row.names of the
flagged observations. The object is printed by a method which
displays the set of graphs and prints the list of flagged cases.
\code{panel.case} is a panel function for \code{plot.case}.
}
\usage{
case(fit, ...)
\method{case}{lm}(fit, lms = summary.lm(fit), lmi = lm.influence(fit), ...)
\method{plot}{case}(x, fit,
which=c("stu.res","si","h","cook","dffits",
dimnames(x)[[2]][-(1:8)]), ##DFBETAS
between.in=list(y=4, x=9),
cex.threshold=1.2,
main.in=list(
paste(deparse(fit$call), collapse=""),
cex=main.cex),
sigma.in=summary.lm(fit)$sigma,
p.in=summary.lm(fit)$df[1]-1,
main.cex=NULL,
...)
panel.case(x, y, subscripts, rownames, group.names,
thresh, case.large,
nn, pp, ss, cex.threshold,
...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
%%case.lm
\item{fit}{\code{"lm"} object computed with \code{x=TRUE}}
\item{lms}{\code{summary.lm(fit)}}
\item{lmi}{\code{lm.influence(fit)}}
%% plot.case
\item{x}{In \code{plot.case}, the matrix output from \code{case.lm}
containing case diagnostics on each observation in the original
dataset.
In \code{panel.case}, the x variable to be plotted}
\item{which}{In \code{plot.case}, the names of the columns of x
that are to be graphed.}
\item{between.in}{\code{between} trellis/lattice argument.}
%% \item{oma}{In S-Plus, change \code{par()$oma} to make room for the
%% threshold values. A warning is printed when \code{par()$oma}
%% is changed as the delayed printing of trellis objects implies we can't
%% return it to the original value automatically.
%% In R, this argument is ignored. Instead, we use the
%% \code{par.settings} argument to \code{xyplot} inside \code{plot.case}.
%% The \code{par.settings} becomes one component of the \code{"trellis"}
%% object that is the value of \code{plot.case} and is therefore
%% automatically applied every time the object is printed.}
\item{cex.threshold}{Multiplier for \code{cex} for the threshold values.}
\item{main.in}{\code{main} title for \code{xyplot}. The default main title
displays the linear model formula from \code{fit}.}
\item{sigma.in}{standard error for the \code{fit}.}
\item{p.in}{The number of degrees of freedom associated with the fitted
model.}
%% \item{obs.large}{Object name where the names of
%% all observations for which the standard
%% thresholds are exceeded will be stored. The default name is
%% \code{.lm.case.large}.}
%% \item{obs.large.env}{Environment in
%% R (defaults to \code{globalenv()}) where \code{.lm.case.large} will be stored.}
\item{main.cex}{\code{cex} for main title.}
\item{\dots}{other arguments to \code{xyplot}}
%% panel.case
\item{y}{the y variable to be plotted.}
\item{thresh}{Named list of lists. Each list contains the components
threshold ($y$-locations where a reference line will be drawn),
thresh.label (the right-axis labels for the reference lines), thresh.id
(the bounds defining "Noteworthy Observations").}
\item{case.large}{Named list of "Noteworthy Observations".}
\item{nn}{Number of rows in original dataset.}
\item{pp}{The number of degrees of freedom associated with the fitted model.}
\item{ss}{Standard error for the \code{fit}.}
\item{subscripts}{trellis/lattice argument, position in the reshaped
dataset constructed by \code{plot.case} before calling \code{xyplot}.}
\item{rownames}{row name in the original data.frame.}
\item{group.names}{names of the individual statistics.}
%% \item{par.settings}{Used in R as part of the call to \code{xyplot}.
%% Although this argument is not used in the panel function,
%% it is needed as a formal argument in S-Plus to absorb it out of \code{\dots}
%% and thereby prevent it from being forwarded to \code{points}.}
}
\details{
\code{lm.influence} is part of S-Plus and R
\code{case.lm} and \code{plot.case} are based on:
Section 4.3.3 "Influence of Individual Obervations
in Chambers and Hastie", \emph{Statistical Models in S}.
}
\value{
\code{case.lm} returns a matrix, with one row for each observation
in the original dataset. The columns contain the diagnostic statistics:
\code{e} (residuals),
\code{h}* (hat diagonals),
\code{si}* (deleted standard deviation),
\code{sta.res} (standardized residuals),
\code{stu.res}* (Studentized deleted resididuals),
\code{dffit} (difference in fits, change in predicted y when
observation i is deleted),
\code{dffits}* (standardized difference in fits, standardized change
in predicted y when observation i is deleted),
\code{cook}* (Cook's distance),
and DFBETAs* (standardized difference in regression coefficients when
observation i is deleted, one for each column of the x-matrix,
including the intercept).
\code{plot.case} returns a \code{c("trellis.case", "trellis")} object
containing the plot
(including the starred columns by default) and also retains the
row.names of the flagged observations in the
\code{$panel.args.common$case.large}
component. The print method for the \code{c("trellis.case",
"trellis")}
object prints the graph and the list of flagged observations.
\code{panel.case} is a panel function for \code{plot.case}.
}
\references{
Heiberger, Richard M. and Holland, Burt (2015).
\emph{Statistical Analysis and Data Display: An Intermediate Course with Examples in R}.
Second Edition.
Springer-Verlag, New York.
\url{https://www.springer.com/us/book/9781493921218}
}
\author{ Richard M. Heiberger <rmh@temple.edu> }
\seealso{
\code{\link[stats]{lm.influence}}.
}
\examples{
data(kidney)
kidney2.lm <- lm(clearance ~ concent + age + weight + concent*age,
data=kidney,
na.action=na.exclude) ## recommended
kidney2.case <- case(kidney2.lm)
## this picture looks much better in portrait, specification is device dependent
plot(kidney2.case, kidney2.lm, par.strip.text=list(cex=.9),
layout=c(2,3))
}
\keyword{hplot}
\keyword{regression}
| /man/lm.case.Rd | no_license | DevGri/HH | R | false | false | 6,884 | rd | \name{lm.case}
\alias{case}
\alias{case.lm}
%% \alias{lm.case} %% leaving this in case someone searches on the older name
\alias{plot.case}
\alias{panel.case}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ case statistics for regression analysis}
\description{
Case statistics for regression analysis.
\code{case.lm} calculates the statistics.
\code{plot.case} plots the cases, one statistic per panel, and
illustrates and flags all observations for which the standard
thresholds are exceeded. \code{plot.case} returns an object with
class \code{c("trellis.case", "trellis")}
containing the plot and the row.names of the
flagged observations. The object is printed by a method which
displays the set of graphs and prints the list of flagged cases.
\code{panel.case} is a panel function for \code{plot.case}.
}
\usage{
case(fit, ...)
\method{case}{lm}(fit, lms = summary.lm(fit), lmi = lm.influence(fit), ...)
\method{plot}{case}(x, fit,
which=c("stu.res","si","h","cook","dffits",
dimnames(x)[[2]][-(1:8)]), ##DFBETAS
between.in=list(y=4, x=9),
cex.threshold=1.2,
main.in=list(
paste(deparse(fit$call), collapse=""),
cex=main.cex),
sigma.in=summary.lm(fit)$sigma,
p.in=summary.lm(fit)$df[1]-1,
main.cex=NULL,
...)
panel.case(x, y, subscripts, rownames, group.names,
thresh, case.large,
nn, pp, ss, cex.threshold,
...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
%%case.lm
\item{fit}{\code{"lm"} object computed with \code{x=TRUE}}
\item{lms}{\code{summary.lm(fit)}}
\item{lmi}{\code{lm.influence(fit)}}
%% plot.case
\item{x}{In \code{plot.case}, the matrix output from \code{case.lm}
containing case diagnostics on each observation in the original
dataset.
In \code{panel.case}, the x variable to be plotted}
\item{which}{In \code{plot.case}, the names of the columns of x
that are to be graphed.}
\item{between.in}{\code{between} trellis/lattice argument.}
%% \item{oma}{In S-Plus, change \code{par()$oma} to make room for the
%% threshold values. A warning is printed when \code{par()$oma}
%% is changed as the delayed printing of trellis objects implies we can't
%% return it to the original value automatically.
%% In R, this argument is ignored. Instead, we use the
%% \code{par.settings} argument to \code{xyplot} inside \code{plot.case}.
%% The \code{par.settings} becomes one component of the \code{"trellis"}
%% object that is the value of \code{plot.case} and is therefore
%% automatically applied every time the object is printed.}
\item{cex.threshold}{Multiplier for \code{cex} for the threshold values.}
\item{main.in}{\code{main} title for \code{xyplot}. The default main title
displays the linear model formula from \code{fit}.}
\item{sigma.in}{standard error for the \code{fit}.}
\item{p.in}{The number of degrees of freedom associated with the fitted
model.}
%% \item{obs.large}{Object name where the names of
%% all observations for which the standard
%% thresholds are exceeded will be stored. The default name is
%% \code{.lm.case.large}.}
%% \item{obs.large.env}{Environment in
%% R (defaults to \code{globalenv()}) where \code{.lm.case.large} will be stored.}
\item{main.cex}{\code{cex} for main title.}
\item{\dots}{other arguments to \code{xyplot}}
%% panel.case
\item{y}{the y variable to be plotted.}
\item{thresh}{Named list of lists. Each list contains the components
threshold ($y$-locations where a reference line will be drawn),
thresh.label (the right-axis labels for the reference lines), thresh.id
(the bounds defining "Noteworthy Observations").}
\item{case.large}{Named list of "Noteworthy Observations".}
\item{nn}{Number of rows in original dataset.}
\item{pp}{The number of degrees of freedom associated with the fitted model.}
\item{ss}{Standard error for the \code{fit}.}
\item{subscripts}{trellis/lattice argument, position in the reshaped
dataset constructed by \code{plot.case} before calling \code{xyplot}.}
\item{rownames}{row name in the original data.frame.}
\item{group.names}{names of the individual statistics.}
%% \item{par.settings}{Used in R as part of the call to \code{xyplot}.
%% Although this argument is not used in the panel function,
%% it is needed as a formal argument in S-Plus to absorb it out of \code{\dots}
%% and thereby prevent it from being forwarded to \code{points}.}
}
\details{
\code{lm.influence} is part of S-Plus and R
\code{case.lm} and \code{plot.case} are based on:
Section 4.3.3 "Influence of Individual Obervations
in Chambers and Hastie", \emph{Statistical Models in S}.
}
\value{
\code{case.lm} returns a matrix, with one row for each observation
in the original dataset. The columns contain the diagnostic statistics:
\code{e} (residuals),
\code{h}* (hat diagonals),
\code{si}* (deleted standard deviation),
\code{sta.res} (standardized residuals),
\code{stu.res}* (Studentized deleted resididuals),
\code{dffit} (difference in fits, change in predicted y when
observation i is deleted),
\code{dffits}* (standardized difference in fits, standardized change
in predicted y when observation i is deleted),
\code{cook}* (Cook's distance),
and DFBETAs* (standardized difference in regression coefficients when
observation i is deleted, one for each column of the x-matrix,
including the intercept).
\code{plot.case} returns a \code{c("trellis.case", "trellis")} object
containing the plot
(including the starred columns by default) and also retains the
row.names of the flagged observations in the
\code{$panel.args.common$case.large}
component. The print method for the \code{c("trellis.case",
"trellis")}
object prints the graph and the list of flagged observations.
\code{panel.case} is a panel function for \code{plot.case}.
}
\references{
Heiberger, Richard M. and Holland, Burt (2015).
\emph{Statistical Analysis and Data Display: An Intermediate Course with Examples in R}.
Second Edition.
Springer-Verlag, New York.
\url{https://www.springer.com/us/book/9781493921218}
}
\author{ Richard M. Heiberger <rmh@temple.edu> }
\seealso{
\code{\link[stats]{lm.influence}}.
}
\examples{
data(kidney)
kidney2.lm <- lm(clearance ~ concent + age + weight + concent*age,
data=kidney,
na.action=na.exclude) ## recommended
kidney2.case <- case(kidney2.lm)
## this picture looks much better in portrait, specification is device dependent
plot(kidney2.case, kidney2.lm, par.strip.text=list(cex=.9),
layout=c(2,3))
}
\keyword{hplot}
\keyword{regression}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/betapriors.R
\name{CCH}
\alias{CCH}
\title{Generalized g-Prior Distribution for Coefficients in BMA Models}
\usage{
CCH(alpha, beta, s = 0)
}
\arguments{
\item{alpha}{a scalar > 0, recommended alpha=.5 (betaprime) or 1 for CCH.
The hyper.g(alpha) is equivalent to CCH(alpha -2, 2, 0). Liang et al
recommended values in the range 2 < alpha_h <= 4}
\item{beta}{a scalar > 0. The value is not updated by the data; beta should
be a function of n for consistency under the null model. The hyper-g
corresonds to b = 2}
\item{s}{a scalar, recommended s=0}
}
\value{
returns an object of class "prior", with the family and
hyerparameters.
}
\description{
Creates an object representing the CCH mixture of g-priors on coefficients
for BAS .
}
\details{
Creates a structure used for \code{\link{bas.glm}}.
}
\examples{
CCH(alpha=.5, beta=100, s=0)
}
\seealso{
\code{\link{IC.prior}}, \code{\link{bic.prior}},
\code{\link{bas.glm}}
Other beta priors: \code{\link{EB.local}},
\code{\link{IC.prior}}, \code{\link{Jeffreys}},
\code{\link{TG}}, \code{\link{beta.prime}},
\code{\link{g.prior}}, \code{\link{hyper.g.n}},
\code{\link{hyper.g}}, \code{\link{intrinsic}},
\code{\link{robust}}, \code{\link{tCCH}},
\code{\link{testBF.prior}}
}
\author{
Merlise A Clyde
}
| /man/CCH.Rd | no_license | akashrajkn/BAS | R | false | true | 1,347 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/betapriors.R
\name{CCH}
\alias{CCH}
\title{Generalized g-Prior Distribution for Coefficients in BMA Models}
\usage{
CCH(alpha, beta, s = 0)
}
\arguments{
\item{alpha}{a scalar > 0, recommended alpha=.5 (betaprime) or 1 for CCH.
The hyper.g(alpha) is equivalent to CCH(alpha -2, 2, 0). Liang et al
recommended values in the range 2 < alpha_h <= 4}
\item{beta}{a scalar > 0. The value is not updated by the data; beta should
be a function of n for consistency under the null model. The hyper-g
corresonds to b = 2}
\item{s}{a scalar, recommended s=0}
}
\value{
returns an object of class "prior", with the family and
hyerparameters.
}
\description{
Creates an object representing the CCH mixture of g-priors on coefficients
for BAS .
}
\details{
Creates a structure used for \code{\link{bas.glm}}.
}
\examples{
CCH(alpha=.5, beta=100, s=0)
}
\seealso{
\code{\link{IC.prior}}, \code{\link{bic.prior}},
\code{\link{bas.glm}}
Other beta priors: \code{\link{EB.local}},
\code{\link{IC.prior}}, \code{\link{Jeffreys}},
\code{\link{TG}}, \code{\link{beta.prime}},
\code{\link{g.prior}}, \code{\link{hyper.g.n}},
\code{\link{hyper.g}}, \code{\link{intrinsic}},
\code{\link{robust}}, \code{\link{tCCH}},
\code{\link{testBF.prior}}
}
\author{
Merlise A Clyde
}
|
##
## Continuous scraping: Approach 2
##
# This script runs a single iteration, however, can be scheduled with tools
# such as cron
# Loading packages from library path
library_directory <- "/home/ec2-user/r_libraries/"
.libPaths(library_directory)
library("DBI")
library("rvest")
library("lubridate")
# Setting/creating output directory
output_folder <- "/home/ec2-user/output"
if (!file.exists(output_folder)){dir.create(output_folder)}
setwd("/home/ec2-user/output")
# Connecting to/creating SQLite database
db <- dbConnect(RSQLite::SQLite(), "wikipedia.sqlite")
# If no .csv file exists, create one
if (file.exists("featured_articles.csv") == FALSE)
{writeLines(c("date, summary, link"), "featured_articles.csv")}
# Creating a data frame with one row
df <- data.frame(date = as_datetime(Sys.time()), summary = "", link = "")
# Reading the HTML code
wikipedia_main_page <- read_html("https://en.wikipedia.org/wiki/Main_Page")
# Article summary
df[1,"summary"] <- wikipedia_main_page %>%
html_nodes(css = "#mp-tfa > p") %>%
html_text()
# Link to full article
all_links <- wikipedia_main_page %>% html_nodes(css = "a")
tfa_partial_link <- all_links[grepl("Full", all_links) & grepl("article", all_links)] %>%
html_attr("href")
df[1,"link"] <- paste0("https://en.wikipedia.org", tfa_partial_link)
# Now the df is either appended to the table within the database or to the csv file
# Option i: Add to table in SQL datase
dbWriteTable(db, "featured_articles", df, append = TRUE)
# Option ii: Append as row to csv
write.table(df, file="featured_articles.csv",
append = TRUE,
row.names = FALSE,
col.names = FALSE,
sep=',')
# Status update
print(paste("Article scraped at", Sys.time()))
# Disconnect from the database
dbDisconnect(db)
| /week11/scraping_example_to_schedule.R | no_license | lse-my472/lse-my472.github.io | R | false | false | 1,809 | r |
##
## Continuous scraping: Approach 2
##
# This script runs a single iteration, however, can be scheduled with tools
# such as cron
# Loading packages from library path
library_directory <- "/home/ec2-user/r_libraries/"
.libPaths(library_directory)
library("DBI")
library("rvest")
library("lubridate")
# Setting/creating output directory
output_folder <- "/home/ec2-user/output"
if (!file.exists(output_folder)){dir.create(output_folder)}
setwd("/home/ec2-user/output")
# Connecting to/creating SQLite database
db <- dbConnect(RSQLite::SQLite(), "wikipedia.sqlite")
# If no .csv file exists, create one
if (file.exists("featured_articles.csv") == FALSE)
{writeLines(c("date, summary, link"), "featured_articles.csv")}
# Creating a data frame with one row
df <- data.frame(date = as_datetime(Sys.time()), summary = "", link = "")
# Reading the HTML code
wikipedia_main_page <- read_html("https://en.wikipedia.org/wiki/Main_Page")
# Article summary
df[1,"summary"] <- wikipedia_main_page %>%
html_nodes(css = "#mp-tfa > p") %>%
html_text()
# Link to full article
all_links <- wikipedia_main_page %>% html_nodes(css = "a")
tfa_partial_link <- all_links[grepl("Full", all_links) & grepl("article", all_links)] %>%
html_attr("href")
df[1,"link"] <- paste0("https://en.wikipedia.org", tfa_partial_link)
# Now the df is either appended to the table within the database or to the csv file
# Option i: Add to table in SQL datase
dbWriteTable(db, "featured_articles", df, append = TRUE)
# Option ii: Append as row to csv
write.table(df, file="featured_articles.csv",
append = TRUE,
row.names = FALSE,
col.names = FALSE,
sep=',')
# Status update
print(paste("Article scraped at", Sys.time()))
# Disconnect from the database
dbDisconnect(db)
|
\name{disaggregate-methods}
\docType{methods}
\alias{disaggregate}
\alias{disaggregate-methods}
\alias{disaggregate,SpatialPolygons-method}
\alias{disaggregate,SpatialPolygonsDataFrame-method}
\alias{disaggregate,SpatialLines-method}
\alias{disaggregate,SpatialLinesDataFrame-method}
\title{ disaggregate SpatialLines, SpatialLinesDataFrame,
SpatialPolygons, or SpatialPolygonsDataFrame objects }
\description{ disaggregate SpatialLines, SpatialLinesDataFrame,
SpatialPolygons, or SpatialPolygonsDataFrame objects, using functions from rgeos to handle polygon hole nesting }
\usage{ disaggregate(x, ...) }
\arguments{
\item{x}{object of class \link{SpatialLines} or \link{SpatialPolygons}}
\item{...}{ignored}
}
\value{
object of class \link{SpatialLines} or \link{SpatialPolygons},
where groups of \link{Line} or \link{Polygon} are disaggregated to
one \link{Line} per \link{Lines}, or one \link{Polygon}
per \link{Polygons}, respectively.
}
\author{ Robert Hijmans, Edzer Pebesma }
\examples{
if (require(rgeos, quietly = TRUE)) {
Sr1 = Polygon(cbind(c(2,4,4,1,2),c(2,3,5,4,2)), hole = FALSE)
Sr2 = Polygon(cbind(c(5,4,2,5),c(2,3,2,2)), hole = FALSE)
Sr3 = Polygon(cbind(c(4,4,5,10,4),c(5,3,2,5,5)), hole = FALSE)
Sr4 = Polygon(cbind(c(5,6,6,5,5),c(4,4,3,3,4)), hole = TRUE)
Srs1 = Polygons(list(Sr1, Sr2), "s1/2")
Srs3 = Polygons(list(Sr3, Sr4), "s3/4")
sp = SpatialPolygons(list(Srs1,Srs3), 1:2)
length(sp) ## [1] 2
length(disaggregate(sp)) ## [1] 3
l1 = cbind(c(1,2,3),c(3,2,2))
l1a = cbind(l1[,1]+.05,l1[,2]+.05)
l2 = cbind(c(1,2,3),c(1,1.5,1))
Sl1 = Line(l1)
Sl1a = Line(l1a)
Sl2 = Line(l2)
S1 = Lines(list(Sl1, Sl1a), ID="a")
S2 = Lines(list(Sl2), ID="b")
sl = SpatialLines(list(S1,S2))
length(sl)
length(disaggregate(sl))
}
}
\keyword{methods}
| /man/disaggregate.Rd | no_license | edzer/sp | R | false | false | 1,760 | rd | \name{disaggregate-methods}
\docType{methods}
\alias{disaggregate}
\alias{disaggregate-methods}
\alias{disaggregate,SpatialPolygons-method}
\alias{disaggregate,SpatialPolygonsDataFrame-method}
\alias{disaggregate,SpatialLines-method}
\alias{disaggregate,SpatialLinesDataFrame-method}
\title{ disaggregate SpatialLines, SpatialLinesDataFrame,
SpatialPolygons, or SpatialPolygonsDataFrame objects }
\description{ disaggregate SpatialLines, SpatialLinesDataFrame,
SpatialPolygons, or SpatialPolygonsDataFrame objects, using functions from rgeos to handle polygon hole nesting }
\usage{ disaggregate(x, ...) }
\arguments{
\item{x}{object of class \link{SpatialLines} or \link{SpatialPolygons}}
\item{...}{ignored}
}
\value{
object of class \link{SpatialLines} or \link{SpatialPolygons},
where groups of \link{Line} or \link{Polygon} are disaggregated to
one \link{Line} per \link{Lines}, or one \link{Polygon}
per \link{Polygons}, respectively.
}
\author{ Robert Hijmans, Edzer Pebesma }
\examples{
if (require(rgeos, quietly = TRUE)) {
Sr1 = Polygon(cbind(c(2,4,4,1,2),c(2,3,5,4,2)), hole = FALSE)
Sr2 = Polygon(cbind(c(5,4,2,5),c(2,3,2,2)), hole = FALSE)
Sr3 = Polygon(cbind(c(4,4,5,10,4),c(5,3,2,5,5)), hole = FALSE)
Sr4 = Polygon(cbind(c(5,6,6,5,5),c(4,4,3,3,4)), hole = TRUE)
Srs1 = Polygons(list(Sr1, Sr2), "s1/2")
Srs3 = Polygons(list(Sr3, Sr4), "s3/4")
sp = SpatialPolygons(list(Srs1,Srs3), 1:2)
length(sp) ## [1] 2
length(disaggregate(sp)) ## [1] 3
l1 = cbind(c(1,2,3),c(3,2,2))
l1a = cbind(l1[,1]+.05,l1[,2]+.05)
l2 = cbind(c(1,2,3),c(1,1.5,1))
Sl1 = Line(l1)
Sl1a = Line(l1a)
Sl2 = Line(l2)
S1 = Lines(list(Sl1, Sl1a), ID="a")
S2 = Lines(list(Sl2), ID="b")
sl = SpatialLines(list(S1,S2))
length(sl)
length(disaggregate(sl))
}
}
\keyword{methods}
|
library(yorkr)
### Name: getAllMatchesAllOpposition
### Title: Get data on all matches against all opposition
### Aliases: getAllMatchesAllOpposition
### ** Examples
## Not run:
##D # Get all matches for team India
##D getAllMatchesAllOpposition("India",dir="../data/",save=TRUE)
##D getAllMatchesAllOpposition("Australia",dir="./mysavedata/",save=TRUE)
## End(Not run)
| /data/genthat_extracted_code/yorkr/examples/getAllMatchesAllOpposition.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 379 | r | library(yorkr)
### Name: getAllMatchesAllOpposition
### Title: Get data on all matches against all opposition
### Aliases: getAllMatchesAllOpposition
### ** Examples
## Not run:
##D # Get all matches for team India
##D getAllMatchesAllOpposition("India",dir="../data/",save=TRUE)
##D getAllMatchesAllOpposition("Australia",dir="./mysavedata/",save=TRUE)
## End(Not run)
|
testlist <- list(iK = -28L)
result <- do.call(eDMA:::PowerSet,testlist)
str(result) | /eDMA/inst/testfiles/PowerSet/AFL_PowerSet/PowerSet_valgrind_files/1609870027-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 83 | r | testlist <- list(iK = -28L)
result <- do.call(eDMA:::PowerSet,testlist)
str(result) |
# Learn about API authentication here: {{BASE_URL}}/r/getting-started
# Find your api_key here: {{BASE_URL}}/settings/api
library(plotly)
size <- 100
x <- seq(-2*pi, 2*pi, length=size)
y <- seq(-2*pi, 2*pi, length=size)
z <- matrix(0, size, size)
for(i in 1:size) {
for(j in 1:size) {
r2 <- x[i]^2 + y[j]^2
z[i, j] <- sin(x[i])*cos(y[j])*sin(r2)/log(r2+1)
}
}
py <- plotly(username='TestBot', key='r1neazxo9w')
data <- list(
list(
z = z,
x = x,
y = y,
type = "contour"
)
)
response <- py$plotly(data, kwargs=list(filename="simple-contour", fileopt="overwrite"))
url <- response$url
| /auto-docs/executables/r/simple_contour.r | no_license | VukDukic/documentation | R | false | false | 632 | r | # Learn about API authentication here: {{BASE_URL}}/r/getting-started
# Find your api_key here: {{BASE_URL}}/settings/api
library(plotly)
size <- 100
x <- seq(-2*pi, 2*pi, length=size)
y <- seq(-2*pi, 2*pi, length=size)
z <- matrix(0, size, size)
for(i in 1:size) {
for(j in 1:size) {
r2 <- x[i]^2 + y[j]^2
z[i, j] <- sin(x[i])*cos(y[j])*sin(r2)/log(r2+1)
}
}
py <- plotly(username='TestBot', key='r1neazxo9w')
data <- list(
list(
z = z,
x = x,
y = y,
type = "contour"
)
)
response <- py$plotly(data, kwargs=list(filename="simple-contour", fileopt="overwrite"))
url <- response$url
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nmf_utils.R
\name{nmf_estim_plot}
\alias{nmf_estim_plot}
\title{Generate individual plot for estimating K}
\usage{
nmf_estim_plot(estim.r)
}
\arguments{
\item{res}{NMF run result}
}
\description{
This function allows you to express your love of cats.
}
\examples{
nmf_estim_plot(estim.r)
}
\keyword{silhouette}
| /man/nmf_estim_plot.Rd | no_license | ishspsy/sake | R | false | true | 390 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nmf_utils.R
\name{nmf_estim_plot}
\alias{nmf_estim_plot}
\title{Generate individual plot for estimating K}
\usage{
nmf_estim_plot(estim.r)
}
\arguments{
\item{res}{NMF run result}
}
\description{
This function allows you to express your love of cats.
}
\examples{
nmf_estim_plot(estim.r)
}
\keyword{silhouette}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paired_sine_model.R
\name{squared_epsilon}
\alias{squared_epsilon}
\title{Paired-sine model squared error term}
\usage{
squared_epsilon(psi, x1, x2)
}
\arguments{
\item{psi}{Phase shift between the gene expression of the two genes.}
\item{x1}{Vector of gene expression values for gene 1.}
\item{x2}{Vector of gene expression values for gene 2.}
}
\value{
Squared error term, i.e., \eqn{\epsilon_{1,2}^2}.
}
\description{
This function returns the squared error term as described in
\href{https://doi.org/10.1038/nmeth.3549}{Leng et al. 2015}, section
\emph{Oscope: paired-sine model}.
}
\details{
The definition of the error term is:
\deqn{\epsilon_{1,2}^2 = \sum_s ( x_{1,s}^2 + x_{2,s}^2 - 2 x_{1,s} x_{2,s} cos(\psi) - sin(\psi)^2)^2}
}
| /man/squared_epsilon.Rd | permissive | ramiromagno/oscillation | R | false | true | 820 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paired_sine_model.R
\name{squared_epsilon}
\alias{squared_epsilon}
\title{Paired-sine model squared error term}
\usage{
squared_epsilon(psi, x1, x2)
}
\arguments{
\item{psi}{Phase shift between the gene expression of the two genes.}
\item{x1}{Vector of gene expression values for gene 1.}
\item{x2}{Vector of gene expression values for gene 2.}
}
\value{
Squared error term, i.e., \eqn{\epsilon_{1,2}^2}.
}
\description{
This function returns the squared error term as described in
\href{https://doi.org/10.1038/nmeth.3549}{Leng et al. 2015}, section
\emph{Oscope: paired-sine model}.
}
\details{
The definition of the error term is:
\deqn{\epsilon_{1,2}^2 = \sum_s ( x_{1,s}^2 + x_{2,s}^2 - 2 x_{1,s} x_{2,s} cos(\psi) - sin(\psi)^2)^2}
}
|
# DATS 6450 Bayesian Final Project Team 2
# Team Member: Hao Ning, Xi Zhang
# Approach 1 by Hao Ning
############################################################################################################
# load packages
library('dplyr')
library('stringr')
library('ggplot2')
library('rjags')
library('coda')
library('data.table')
library('reshape2')
source('DBDA2E-utilities.R')
############################################################################################################
# LoadData
df_raw = read.csv('FMEL_Dataset.csv')
sum(is.na(df_raw$col))
# checked, no missing values
# Preprocessing, only include division 1
df_division1 = filter(df_raw, division == '1')
# After 1997, there are total 20 teams in divison1
df_division1$year = paste(str_sub(df_division1$season, start = 1, end = 4))
df_division1 = filter(df_division1, year>=1997)
df = select(df_division1,-c(id,season,division,date,timestamp))
# rename columns
df = rename(df, home=localTeam, away=visitorTeam, homeGoals = localGoals, awayGoals = visitorGoals)
df$score_diff = df$homeGoals-df$awayGoals
# results 1: home win, 0: draw, -1: away win
#df$results_code = sign(df$homeGoals - df$awayGoals)
df = df %>% mutate(results = ifelse(df$score_diff > 0, "Home", ifelse(df$score_diff < 0,"Away","Draw")))
str(df)
df$home = as.character(df$home)
df$away = as.character(df$away)
str(df)
unique(df$year)
############################################################################################################
# Home advantage analysis
# plot home_win/draw/away_win distribution
df_results = df %>% group_by(results, year) %>% summarise(games = n()) %>% arrange(year)
df_results = full_join(df_results, df_results %>% group_by(year) %>% summarise(total_games_year = sum(games)), by = "year")
df_results = full_join(df_results, df_results %>% group_by(year) %>% filter(results == "Home") %>% select(year,Home_wins = games), by = "year")
df_results$Home_wins_percent = paste0(as.character(round(df_results$Home_wins*100/df_results$total_games_year,2)),"%")
openGraph( width=12 , height=8 )
ggplot(df_results) + geom_bar(aes(year, games, fill=results), stat = 'identity') +
geom_text(aes(x = year, y = games, label = Home_wins_percent), size = 3, hjust = 0.5, vjust = 1, position = "fill") +
ggtitle('Game Results Since 1997') + theme(plot.title = element_text(hjust = 0.5))
saveGraph( file="Game Results Since 1997" , type="png" )
home_win = df_results$Home_wins*100/df_results$total_games_year
print(mean(home_win))
############################################################################################################
# Work on season/year from 2015 to 2017
mydata = filter(df, year >= '2015')
# our target is to predict home win probability
mydata = mydata %>% mutate(home_win = ifelse(mydata$score_diff > 0, 1, 0))
teams = unique(c(mydata$home, mydata$away))
seasons <- unique(mydata$year)
n_teams = length(teams)
n_games = nrow(mydata)
n_seasons = length(seasons)
############################################################################################################
############# pre processing and EDA completed ##############
############################################################################################################
# start to build model
# Approch 1 - Using home win 0,1 for modelling and prediction - by Hao Ning
# assign ID for home and away team
mydata$home_ID = as.numeric(factor(mydata$home, levels=teams))
mydata$away_ID = as.numeric(factor(mydata$away, levels=teams))
str(mydata)
mydata_17 = filter(mydata, year==2017)
############################################################################################################
# first, we will work on all 3 seasons from 2015-2017
# we will compare the team abilities for good teams vs good teams and good teams vs normal team
# find out the best teams !?
# compare if there's significant differece between the teams
datalist1 = list(
home_ID = mydata$home_ID,
away_ID = mydata$away_ID,
home_win = mydata$home_win
)
############################################################################################################
modelString_RM = "
model{
# team ability, log ability ~ dnorm
for (i in 1:24)
{
log_ability[i] ~ dnorm(0,1/performance^2)
ability[i] = exp(log_ability[i])
}
# using 900 games of 1180 total for likelihood
for (i in 1:900)
{
p[i] = (ability[home_ID[i]]*h)/(ability[home_ID[i]]*h + ability[away_ID[i]])
home_win[i] ~ dbin(p[i], 1)
}
# a team mihgt perform better/equal/worse than their ability/expectations
# we assume they usualy perform with some ups and downs, but generally as they are
# there will be variations, define here
performance ~ dunif(0,2)
# home advantage truely exist, define a h factor
h ~ dunif(1,1.5)
# predict results, when Real Madrid (ID 15) playing at home
for (away in 1:24){
p_vs[away] = (ability[15]*h)/(ability[15]*h + ability[away])
h_win[away] ~ dbin(p_vs[away],1)
}
}
"
writeLines( modelString_RM , con="TEMPmodel_RM.txt")
burn_in = 1000
steps = 10000
thin = 1
variable_names_RM=c("h_win","ability")
jagsModel_RM = jags.model( "TEMPmodel_RM.txt",data = datalist1)
s_RM <- coda.samples(jagsModel_RM, 20000, thin = 1, variable.names = variable_names_RM)
# trace plot of ability of teams
for(i in seq(1, 24, by = 4)){
openGraph( width=8 , height=8 )
plot(s_RM[, i:(i+3)])
}
####################################################################################################
####################################################################################################
pred_RM = s_RM %>% as.matrix() %>% as.data.frame()
pred_RM = select(pred_RM,-c('h_win[15]'))
##########################################################################
# fun part, Barcelona and Real Madrid, which team is better in 2015-2017 3 seasons ?
ms_RM = as.matrix(s_RM)
##compare all team, I commend this part, otherwise there'll be too many plots
# for (i in 1:24){
# for (j in 1:24){
# if (i==j) next
# openGraph( width=5 , height=5 )
# plotPost(ms_RM[, i] - ms_RM[,j], compVal = 0)
# }
# }
# team ability comparison
# Barcelona vs Real Madrid, two good teams
openGraph( width= 7, height=7)
plotPost(ms_RM[, 13] - ms_RM[,15], compVal = 0, main= "Barcelona - Real Madrid 2015-2017",xlab="Team Abilities Difference" )
saveGraph( file="Barcelona - Real Madrid 2015-2017" , type="png" )
# Espanol vs Real Madrid, normal vs good team
openGraph( width= 7, height=7)
plotPost(ms_RM[, 3] - ms_RM[,15], compVal = 0, main= "Espanol - Real Madrid 2015-2017",xlab="Team Abilities Difference" )
saveGraph( file="Espanol - Real Madrid 2015-2017" , type="png" )
####################################################################################################
# BOXPLOT
#openGraph( width=12 , height=8)
#ggplot(data = melt(ability_teams), aes(x=variable, y=value)) + geom_boxplot(aes(fill=variable)) +
# theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
# ggtitle("Boxplot of Team Abilities") + theme(plot.title = element_text(hjust = 0.5))
#saveGraph( file="Boxplot of Team Abilities 2015-2017" , type="png" )
####################################################################################################
away_RM = paste("h_win[",1:24,"]", sep="")
away_RM = away_RM[ away_RM != 'h_win[15]' ]
h_win_RM = select(pred_RM, away_RM)
p_RM_win = colMeans(h_win_RM)
for (i in 1:23){
if(i==15) next
print(paste("Real Madrid playing at home, winning chance against team", i , ':',p_RM_win [i]*100 ,"%"))
}
####################################################################################################
# study on season 2017 for all teams
# We will model the first 300 games then make predictions using the rest of the games (80)
datalist2 = list(
home_ID = mydata_17$home_ID,
away_ID = mydata_17$away_ID,
home_win = mydata_17$home_win
)
modelString_all = "
model{
# team ability
for (i in 1:24)
{
log_ability[i] ~ dnorm(0,1/performance^2)
ability[i] = exp(log_ability[i])
}
# likelihood
for (i in 1:200)
{
p[i] = (ability[home_ID[i]]*h)/(ability[home_ID[i]]*h + ability[away_ID[i]])
home_win[i] ~ dbin(p[i], 1)
}
# a team mihgt perform better/equal/worse than their ability/expectations
# we assume they usualy perform with some ups and downs, but generally as they are
# there will be variations, define here
performance ~ dunif(0,2)
# home advantage truely exist, define a h factor
h ~ dunif(1,1.5)
# predict
for (home in 1:24){
for (away in 1:24){
p_vs[home,away] = (ability[home]*h)/(ability[home]*h + ability[away])
h_win[home,away]~ dbin(p_vs[home,away],1)
}
}
}
"
writeLines( modelString_all , con="TEMPmodel_all.txt")
burn_in = 1000
steps = 10000
thin = 1
variable_names_all=c("h_win","ability")
jagsModel_all = jags.model( "TEMPmodel_all.txt",data = datalist2)
s_all = coda.samples(jagsModel_all, 20000, thin = 1, variable.names = variable_names_all)
ms_all = s_all%>% as.matrix()
pred_all = s_all %>% as.matrix() %>% as.data.frame()
pred_all_home_away = pred_all[25:600]
# this is the win probability of home vs away for all teams !
pred_home_win_all = colMeans(pred_all_home_away) %>% as.data.frame()
HvsA = rownames(pred_home_win_all)
pred_home_win_all = data.frame(HvsA,pred_home_win_all[1], row.names = NULL)
pred_home_win_all = rename(pred_home_win_all, HvsA_pred=.)
mydata_17$HvsA = paste('h_win[',mydata_17$home_ID,',',mydata_17$away_ID,']', sep='')
# we use first 200 games (20 round) for modelling, use the rest for prediction
# When predicted probability > 0.6, > 0.7, we believe the home team will win, check the accuracy
df_pred_0.6 = merge(mydata_17, pred_home_win_all, by = "HvsA") %>% filter(round>20 & HvsA_pred>0.6)
df_pred_0.7 = merge(mydata_17, pred_home_win_all, by = "HvsA") %>% filter(round>20 & HvsA_pred>0.7)
# the dataframe is are the subset of predicted probability that >0.6
# thus the accuracy can be calculated by the sum of home_win/nrow
accuracy_0.6 = sum(df_pred_0.6$home_win)/nrow(df_pred_0.6)
print(paste('P_pred >0.6, we bet the home team win, prediction accuracy :',round(accuracy_0.6*100,2) ,"%"))
accuracy_0.7= sum(df_pred_0.7$home_win)/nrow(df_pred_0.7)
print(paste('P_pred >0.7, we bet the home team win, prediction accuracy :',round(accuracy_0.7*100,2) ,"%"))
####################################################################################################
# final ability rank for season 2017
# We will compare with the final team rank of this season
ability_teams_17 = select(pred_all, c(1:24))
old_names_17 = colnames(ability_teams_17 )
new_names_17 = c(teams)
ability_teams_17 = setnames(ability_teams_17, old=old_names_17, new=new_names_17)
ability_teams_17 = ability_teams_17[-c(5,7,10,21)]
ability_teams_avg_17 = colMeans(ability_teams_17)%>% sort(decreasing = TRUE) %>% as.data.frame()
ability_teams_avg_17$rank = seq.int(nrow(ability_teams_avg_17))
ability_teams_avg_17 = rename(ability_teams_avg_17, ability_avg=.)
print(ability_teams_avg_17)
####################################################################################################
# Approach 1 complete
# Summary
# Game results prediction is not an easy field since many factors can impact the outcomes.
# Approach 1 that we have demonstrated could set things straight by only considering home team winning probability
# with parameters of team abilities, performance variations and home advantages.
# We can get a pretty good accuracy using this model
# Including more factors will be very interesting and for future works to dive deeper!
# Thank you! | /Final Project Team 2 - Approach 1.R | no_license | hning87/Predicting-Football-Match-Results-of-Spanish-League-using-Bayesian-Hierarchical-Model | R | false | false | 11,889 | r | # DATS 6450 Bayesian Final Project Team 2
# Team Member: Hao Ning, Xi Zhang
# Approach 1 by Hao Ning
############################################################################################################
# load packages
library('dplyr')
library('stringr')
library('ggplot2')
library('rjags')
library('coda')
library('data.table')
library('reshape2')
source('DBDA2E-utilities.R')
############################################################################################################
# LoadData
df_raw = read.csv('FMEL_Dataset.csv')
sum(is.na(df_raw$col))
# checked, no missing values
# Preprocessing, only include division 1
df_division1 = filter(df_raw, division == '1')
# After 1997, there are total 20 teams in divison1
df_division1$year = paste(str_sub(df_division1$season, start = 1, end = 4))
df_division1 = filter(df_division1, year>=1997)
df = select(df_division1,-c(id,season,division,date,timestamp))
# rename columns
df = rename(df, home=localTeam, away=visitorTeam, homeGoals = localGoals, awayGoals = visitorGoals)
df$score_diff = df$homeGoals-df$awayGoals
# results 1: home win, 0: draw, -1: away win
#df$results_code = sign(df$homeGoals - df$awayGoals)
df = df %>% mutate(results = ifelse(df$score_diff > 0, "Home", ifelse(df$score_diff < 0,"Away","Draw")))
str(df)
df$home = as.character(df$home)
df$away = as.character(df$away)
str(df)
unique(df$year)
############################################################################################################
# Home advantage analysis
# plot home_win/draw/away_win distribution
df_results = df %>% group_by(results, year) %>% summarise(games = n()) %>% arrange(year)
df_results = full_join(df_results, df_results %>% group_by(year) %>% summarise(total_games_year = sum(games)), by = "year")
df_results = full_join(df_results, df_results %>% group_by(year) %>% filter(results == "Home") %>% select(year,Home_wins = games), by = "year")
df_results$Home_wins_percent = paste0(as.character(round(df_results$Home_wins*100/df_results$total_games_year,2)),"%")
openGraph( width=12 , height=8 )
ggplot(df_results) + geom_bar(aes(year, games, fill=results), stat = 'identity') +
geom_text(aes(x = year, y = games, label = Home_wins_percent), size = 3, hjust = 0.5, vjust = 1, position = "fill") +
ggtitle('Game Results Since 1997') + theme(plot.title = element_text(hjust = 0.5))
saveGraph( file="Game Results Since 1997" , type="png" )
home_win = df_results$Home_wins*100/df_results$total_games_year
print(mean(home_win))
############################################################################################################
# Work on season/year from 2015 to 2017
mydata = filter(df, year >= '2015')
# our target is to predict home win probability
mydata = mydata %>% mutate(home_win = ifelse(mydata$score_diff > 0, 1, 0))
teams = unique(c(mydata$home, mydata$away))
seasons <- unique(mydata$year)
n_teams = length(teams)
n_games = nrow(mydata)
n_seasons = length(seasons)
############################################################################################################
############# pre processing and EDA completed ##############
############################################################################################################
# start to build model
# Approch 1 - Using home win 0,1 for modelling and prediction - by Hao Ning
# assign ID for home and away team
mydata$home_ID = as.numeric(factor(mydata$home, levels=teams))
mydata$away_ID = as.numeric(factor(mydata$away, levels=teams))
str(mydata)
mydata_17 = filter(mydata, year==2017)
############################################################################################################
# first, we will work on all 3 seasons from 2015-2017
# we will compare the team abilities for good teams vs good teams and good teams vs normal team
# find out the best teams !?
# compare if there's significant differece between the teams
datalist1 = list(
home_ID = mydata$home_ID,
away_ID = mydata$away_ID,
home_win = mydata$home_win
)
############################################################################################################
modelString_RM = "
model{
# team ability, log ability ~ dnorm
for (i in 1:24)
{
log_ability[i] ~ dnorm(0,1/performance^2)
ability[i] = exp(log_ability[i])
}
# using 900 games of 1180 total for likelihood
for (i in 1:900)
{
p[i] = (ability[home_ID[i]]*h)/(ability[home_ID[i]]*h + ability[away_ID[i]])
home_win[i] ~ dbin(p[i], 1)
}
# a team mihgt perform better/equal/worse than their ability/expectations
# we assume they usualy perform with some ups and downs, but generally as they are
# there will be variations, define here
performance ~ dunif(0,2)
# home advantage truely exist, define a h factor
h ~ dunif(1,1.5)
# predict results, when Real Madrid (ID 15) playing at home
for (away in 1:24){
p_vs[away] = (ability[15]*h)/(ability[15]*h + ability[away])
h_win[away] ~ dbin(p_vs[away],1)
}
}
"
writeLines( modelString_RM , con="TEMPmodel_RM.txt")
burn_in = 1000
steps = 10000
thin = 1
variable_names_RM=c("h_win","ability")
jagsModel_RM = jags.model( "TEMPmodel_RM.txt",data = datalist1)
s_RM <- coda.samples(jagsModel_RM, 20000, thin = 1, variable.names = variable_names_RM)
# trace plot of ability of teams
for(i in seq(1, 24, by = 4)){
openGraph( width=8 , height=8 )
plot(s_RM[, i:(i+3)])
}
####################################################################################################
####################################################################################################
pred_RM = s_RM %>% as.matrix() %>% as.data.frame()
pred_RM = select(pred_RM,-c('h_win[15]'))
##########################################################################
# fun part, Barcelona and Real Madrid, which team is better in 2015-2017 3 seasons ?
ms_RM = as.matrix(s_RM)
##compare all team, I commend this part, otherwise there'll be too many plots
# for (i in 1:24){
# for (j in 1:24){
# if (i==j) next
# openGraph( width=5 , height=5 )
# plotPost(ms_RM[, i] - ms_RM[,j], compVal = 0)
# }
# }
# team ability comparison
# Barcelona vs Real Madrid, two good teams
openGraph( width= 7, height=7)
plotPost(ms_RM[, 13] - ms_RM[,15], compVal = 0, main= "Barcelona - Real Madrid 2015-2017",xlab="Team Abilities Difference" )
saveGraph( file="Barcelona - Real Madrid 2015-2017" , type="png" )
# Espanol vs Real Madrid, normal vs good team
openGraph( width= 7, height=7)
plotPost(ms_RM[, 3] - ms_RM[,15], compVal = 0, main= "Espanol - Real Madrid 2015-2017",xlab="Team Abilities Difference" )
saveGraph( file="Espanol - Real Madrid 2015-2017" , type="png" )
####################################################################################################
# BOXPLOT
#openGraph( width=12 , height=8)
#ggplot(data = melt(ability_teams), aes(x=variable, y=value)) + geom_boxplot(aes(fill=variable)) +
# theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
# ggtitle("Boxplot of Team Abilities") + theme(plot.title = element_text(hjust = 0.5))
#saveGraph( file="Boxplot of Team Abilities 2015-2017" , type="png" )
####################################################################################################
away_RM = paste("h_win[",1:24,"]", sep="")
away_RM = away_RM[ away_RM != 'h_win[15]' ]
h_win_RM = select(pred_RM, away_RM)
p_RM_win = colMeans(h_win_RM)
for (i in 1:23){
if(i==15) next
print(paste("Real Madrid playing at home, winning chance against team", i , ':',p_RM_win [i]*100 ,"%"))
}
####################################################################################################
# study on season 2017 for all teams
# We will model the first 300 games then make predictions using the rest of the games (80)
datalist2 = list(
home_ID = mydata_17$home_ID,
away_ID = mydata_17$away_ID,
home_win = mydata_17$home_win
)
modelString_all = "
model{
# team ability
for (i in 1:24)
{
log_ability[i] ~ dnorm(0,1/performance^2)
ability[i] = exp(log_ability[i])
}
# likelihood
for (i in 1:200)
{
p[i] = (ability[home_ID[i]]*h)/(ability[home_ID[i]]*h + ability[away_ID[i]])
home_win[i] ~ dbin(p[i], 1)
}
# a team mihgt perform better/equal/worse than their ability/expectations
# we assume they usualy perform with some ups and downs, but generally as they are
# there will be variations, define here
performance ~ dunif(0,2)
# home advantage truely exist, define a h factor
h ~ dunif(1,1.5)
# predict
for (home in 1:24){
for (away in 1:24){
p_vs[home,away] = (ability[home]*h)/(ability[home]*h + ability[away])
h_win[home,away]~ dbin(p_vs[home,away],1)
}
}
}
"
writeLines( modelString_all , con="TEMPmodel_all.txt")
burn_in = 1000
steps = 10000
thin = 1
variable_names_all=c("h_win","ability")
jagsModel_all = jags.model( "TEMPmodel_all.txt",data = datalist2)
s_all = coda.samples(jagsModel_all, 20000, thin = 1, variable.names = variable_names_all)
ms_all = s_all%>% as.matrix()
pred_all = s_all %>% as.matrix() %>% as.data.frame()
pred_all_home_away = pred_all[25:600]
# this is the win probability of home vs away for all teams !
pred_home_win_all = colMeans(pred_all_home_away) %>% as.data.frame()
HvsA = rownames(pred_home_win_all)
pred_home_win_all = data.frame(HvsA,pred_home_win_all[1], row.names = NULL)
pred_home_win_all = rename(pred_home_win_all, HvsA_pred=.)
mydata_17$HvsA = paste('h_win[',mydata_17$home_ID,',',mydata_17$away_ID,']', sep='')
# we use first 200 games (20 round) for modelling, use the rest for prediction
# When predicted probability > 0.6, > 0.7, we believe the home team will win, check the accuracy
df_pred_0.6 = merge(mydata_17, pred_home_win_all, by = "HvsA") %>% filter(round>20 & HvsA_pred>0.6)
df_pred_0.7 = merge(mydata_17, pred_home_win_all, by = "HvsA") %>% filter(round>20 & HvsA_pred>0.7)
# the dataframe is are the subset of predicted probability that >0.6
# thus the accuracy can be calculated by the sum of home_win/nrow
accuracy_0.6 = sum(df_pred_0.6$home_win)/nrow(df_pred_0.6)
print(paste('P_pred >0.6, we bet the home team win, prediction accuracy :',round(accuracy_0.6*100,2) ,"%"))
accuracy_0.7= sum(df_pred_0.7$home_win)/nrow(df_pred_0.7)
print(paste('P_pred >0.7, we bet the home team win, prediction accuracy :',round(accuracy_0.7*100,2) ,"%"))
####################################################################################################
# final ability rank for season 2017
# We will compare with the final team rank of this season
ability_teams_17 = select(pred_all, c(1:24))
old_names_17 = colnames(ability_teams_17 )
new_names_17 = c(teams)
ability_teams_17 = setnames(ability_teams_17, old=old_names_17, new=new_names_17)
ability_teams_17 = ability_teams_17[-c(5,7,10,21)]
ability_teams_avg_17 = colMeans(ability_teams_17)%>% sort(decreasing = TRUE) %>% as.data.frame()
ability_teams_avg_17$rank = seq.int(nrow(ability_teams_avg_17))
ability_teams_avg_17 = rename(ability_teams_avg_17, ability_avg=.)
print(ability_teams_avg_17)
####################################################################################################
# Approach 1 complete
# Summary
# Game results prediction is not an easy field since many factors can impact the outcomes.
# Approach 1 that we have demonstrated could set things straight by only considering home team winning probability
# with parameters of team abilities, performance variations and home advantages.
# We can get a pretty good accuracy using this model
# Including more factors will be very interesting and for future works to dive deeper!
# Thank you! |
library(compositions)
### Name: print.acomp
### Title: Printing compositional data.
### Aliases: print.acomp print.rcomp print.rplus print.aplus
### Keywords: classes
### ** Examples
data(SimulatedAmounts)
mydata <- simulateMissings(sa.groups5,dl=0.01,knownlimit=TRUE,
MAR=0.05,MNARprob=0.05,SZprob=0.05)
mydata[1,1]<-BDLvalue
print(aplus(mydata))
print(aplus(mydata),digits=3)
print(acomp(mydata))
print(rplus(mydata))
print(rcomp(mydata))
| /data/genthat_extracted_code/compositions/examples/print.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 470 | r | library(compositions)
### Name: print.acomp
### Title: Printing compositional data.
### Aliases: print.acomp print.rcomp print.rplus print.aplus
### Keywords: classes
### ** Examples
data(SimulatedAmounts)
mydata <- simulateMissings(sa.groups5,dl=0.01,knownlimit=TRUE,
MAR=0.05,MNARprob=0.05,SZprob=0.05)
mydata[1,1]<-BDLvalue
print(aplus(mydata))
print(aplus(mydata),digits=3)
print(acomp(mydata))
print(rplus(mydata))
print(rcomp(mydata))
|
###################################################
### Dodatkowe materiały
###################################################
# https://shiny.rstudio.com/tutorial/ (jest tutorial video)
# https://shiny.rstudio.com/articles/reactivity-overview.html
# install.packages('shiny')
library(shiny)
# Wyrażenia rekatywne to takie wyrażnie, które jeżeli się zmieniło
# coś na podstawie czego były obliczone, to zostaną ponownie przeliczane
# stwórzmy listę reaktywnych wartości
?reactiveValues
v <- reactiveValues(
a = 1,
b = 2
)
isolate(v$a) # żeby odczytać wartości reaktywne poza kontekstem, trzeba użyć funckji isolate()
isolate(v$b)
# oraz wyrażenie, w którym je wykorzystujemy
c = reactive({
v$a + v$b
})
isolate(c()) # to co powstało jako reactive() musi być odczytywane jak funkcja
isolate(v$a)
isolate(v$b)
?reactive
###
# W tym momencie mamy zbudowaną zależność między c oraz v$a i v$b,
# dlatego gdy v$a albo v$b się zmieni, wtedy c zostanie o tym
# poinformowane, i będzie wiedziało, że musi się ponownie przeliczyć.
###
# zobaczmy to w działaniu
v$a = 5 # zmieńmy wartość jednego składnika
isolate(c()) # wartość się zmieniła
isolate(v$a)
isolate(v$b)
# zresetujmy wartości
v <- reactiveValues(
a = 1,
b = 2
)
isolate(c()) # w tym momencie c się nie zmieniło, bo nie zmieniły się zmienne,
# a kontener je zawierający, w którym są nowe zmienne a i b.
# c dalej czeka na informacje o zmianach od starego v$a oraz v$b
# w starej zmiennej v
c = reactive({ # zdefiniujmy c jeszcze raz, żeby reagowało na zmiany
v$a + v$b
})
isolate(c()) # teraz się wartość zmieniła, ole trzeba pamiętać, że to jest nowe c
# które tak samo nie będzie informować o swoich zmiana, wyrażeń gdzie było użyte
# tym razem zdefiniujmy c tak żeby:
# - było przeliczane gdy zmienimy v$b
# - NIE było przeliczane gdy zmienimy v$a
c = reactive({
isolate(v$a) + v$b # isolate() znaczy "weź to co jest obecnie i nie reaguj na zmiany tej wartości"
})
isolate(c())
v$a = 5
isolate(c()) # a się zmieniło, ale to nas nie interesuje, więc c nie zostało przeliczone
v$b = 3
isolate(c()) # b się zmieniło, więc przeliczamy (to uwzględnia też nową wartość a) -> 5 + 3.
# c czeka z przeliczeniem do momentu, aż nieizolowane zmienna się zmieni, ale
# gdy to już nastąpi, to korzysta z aktualnych wartości każdej zmiennej
###
# Ale czym to się różni od zwykłej funkcji???
###
# zresetujmy v raz jeszcze
v <- reactiveValues(
a = 1,
b = 2
)
# c tym razem oprócz zsumowania v$a i v$b, wypisze tekst na konsoli,
# będziemy więc wiedzieć kiedy się wykonuje
c = reactive({
print('UWAGA!!! ODPALAM!!!!!!!!!!!!')
v$a +v$b
})
isolate(c()) # wypisało, a więc za pierwszym razem się przeliczyło (w momencie gdy zażądaliśmy wartości, a wiedziało, że nie zna aktualnej)
isolate(c()) # nie obliczyło się ponownie. Żadna z reaktywnych wartości się nie zmieniła, wiec zwróciło jedynie zapamiętaną wartość
v$a = 5 # zmieńmy v$a
isolate(c()) # po zmianie musiało przeliczyć, więc wypisało
isolate(c()) # ale jak poprzednio, tylko za pierwszym razem
###
# obserwatory
# - przeliczają się kiedy tylko mogą (a nie w
# momencie kiedy są odczytywane). No dobra,
# dopiero w momencie flush'a
# - nie mają wartości
###
?observe
# jeszcze raz zresetujmy v
v <- reactiveValues(
a = 1,
b = 2
)
# zróbmy obserwatora, który w momencie wykonania będzie
# do v$d przypisywał iloczyn v$a i v$b
observe({
cat('Policzyłem:', v$a * v$b)
v$d = v$a * v$b
})
isolate(v$d) # nie było flush'a - NULL
shiny:::flushReact() # robimy flush'a; poza Shinym trzeba ręcznie; w appce Shiniego
# nie trzeba, bo Shiny to robi automatycznie w odpowiednim momencie
isolate(v$d) # był flush - 2
v$a = 5 # zmieniamy v$a
isolate(v$d) # nie było flush'a - 2, czyli po staremu
shiny:::flushReact() # robimy flush'a
isolate(v$d) # był flush - 10
# observeEvent działa podobnie jak observe(), ale
# osobno określamy kiedy ma się wykonać przeliczenie,
# i osobno co ma się wtedy stać
?observeEvent
# eventReactive działa podobnie jak reactive(), ale
# osobno określamy kiedy ma ponownie przeliczyć wartości,
# i osobno jak ma być obliczone
?eventReactive
| /05_shiny_05_reaktywnosc.R | no_license | MalgorzataS/podyplomowe_R | R | false | false | 4,393 | r | ###################################################
### Dodatkowe materiały
###################################################
# https://shiny.rstudio.com/tutorial/ (jest tutorial video)
# https://shiny.rstudio.com/articles/reactivity-overview.html
# install.packages('shiny')
library(shiny)
# Wyrażenia rekatywne to takie wyrażnie, które jeżeli się zmieniło
# coś na podstawie czego były obliczone, to zostaną ponownie przeliczane
# stwórzmy listę reaktywnych wartości
?reactiveValues
v <- reactiveValues(
a = 1,
b = 2
)
isolate(v$a) # żeby odczytać wartości reaktywne poza kontekstem, trzeba użyć funckji isolate()
isolate(v$b)
# oraz wyrażenie, w którym je wykorzystujemy
c = reactive({
v$a + v$b
})
isolate(c()) # to co powstało jako reactive() musi być odczytywane jak funkcja
isolate(v$a)
isolate(v$b)
?reactive
###
# W tym momencie mamy zbudowaną zależność między c oraz v$a i v$b,
# dlatego gdy v$a albo v$b się zmieni, wtedy c zostanie o tym
# poinformowane, i będzie wiedziało, że musi się ponownie przeliczyć.
###
# zobaczmy to w działaniu
v$a = 5 # zmieńmy wartość jednego składnika
isolate(c()) # wartość się zmieniła
isolate(v$a)
isolate(v$b)
# zresetujmy wartości
v <- reactiveValues(
a = 1,
b = 2
)
isolate(c()) # w tym momencie c się nie zmieniło, bo nie zmieniły się zmienne,
# a kontener je zawierający, w którym są nowe zmienne a i b.
# c dalej czeka na informacje o zmianach od starego v$a oraz v$b
# w starej zmiennej v
c = reactive({ # zdefiniujmy c jeszcze raz, żeby reagowało na zmiany
v$a + v$b
})
isolate(c()) # teraz się wartość zmieniła, ole trzeba pamiętać, że to jest nowe c
# które tak samo nie będzie informować o swoich zmiana, wyrażeń gdzie było użyte
# tym razem zdefiniujmy c tak żeby:
# - było przeliczane gdy zmienimy v$b
# - NIE było przeliczane gdy zmienimy v$a
c = reactive({
isolate(v$a) + v$b # isolate() znaczy "weź to co jest obecnie i nie reaguj na zmiany tej wartości"
})
isolate(c())
v$a = 5
isolate(c()) # a się zmieniło, ale to nas nie interesuje, więc c nie zostało przeliczone
v$b = 3
isolate(c()) # b się zmieniło, więc przeliczamy (to uwzględnia też nową wartość a) -> 5 + 3.
# c czeka z przeliczeniem do momentu, aż nieizolowane zmienna się zmieni, ale
# gdy to już nastąpi, to korzysta z aktualnych wartości każdej zmiennej
###
# Ale czym to się różni od zwykłej funkcji???
###
# zresetujmy v raz jeszcze
v <- reactiveValues(
a = 1,
b = 2
)
# c tym razem oprócz zsumowania v$a i v$b, wypisze tekst na konsoli,
# będziemy więc wiedzieć kiedy się wykonuje
c = reactive({
print('UWAGA!!! ODPALAM!!!!!!!!!!!!')
v$a +v$b
})
isolate(c()) # wypisało, a więc za pierwszym razem się przeliczyło (w momencie gdy zażądaliśmy wartości, a wiedziało, że nie zna aktualnej)
isolate(c()) # nie obliczyło się ponownie. Żadna z reaktywnych wartości się nie zmieniła, wiec zwróciło jedynie zapamiętaną wartość
v$a = 5 # zmieńmy v$a
isolate(c()) # po zmianie musiało przeliczyć, więc wypisało
isolate(c()) # ale jak poprzednio, tylko za pierwszym razem
###
# obserwatory
# - przeliczają się kiedy tylko mogą (a nie w
# momencie kiedy są odczytywane). No dobra,
# dopiero w momencie flush'a
# - nie mają wartości
###
?observe
# jeszcze raz zresetujmy v
v <- reactiveValues(
a = 1,
b = 2
)
# zróbmy obserwatora, który w momencie wykonania będzie
# do v$d przypisywał iloczyn v$a i v$b
observe({
cat('Policzyłem:', v$a * v$b)
v$d = v$a * v$b
})
isolate(v$d) # nie było flush'a - NULL
shiny:::flushReact() # robimy flush'a; poza Shinym trzeba ręcznie; w appce Shiniego
# nie trzeba, bo Shiny to robi automatycznie w odpowiednim momencie
isolate(v$d) # był flush - 2
v$a = 5 # zmieniamy v$a
isolate(v$d) # nie było flush'a - 2, czyli po staremu
shiny:::flushReact() # robimy flush'a
isolate(v$d) # był flush - 10
# observeEvent działa podobnie jak observe(), ale
# osobno określamy kiedy ma się wykonać przeliczenie,
# i osobno co ma się wtedy stać
?observeEvent
# eventReactive działa podobnie jak reactive(), ale
# osobno określamy kiedy ma ponownie przeliczyć wartości,
# i osobno jak ma być obliczone
?eventReactive
|
household_data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?",stringsAsFactors=FALSE)
household_data$DateTime <- paste(household_data$Date, household_data$Time)
household_data$DateTime <- strptime(household_data$DateTime, "%d/%m/%Y %H:%M:%S")
filtered_data <- household_data[household_data$Date %in% c("1/2/2007","2/2/2007") ,]
png("plot1.png")
hist(filtered_data$Global_active_power, xlab= "Global Active Power (kilowatts)", main = "Global Active Power", col = c("red"))
dev.off()
| /plot1.R | no_license | johnffarmer/ExData_Plotting1 | R | false | false | 525 | r | household_data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?",stringsAsFactors=FALSE)
household_data$DateTime <- paste(household_data$Date, household_data$Time)
household_data$DateTime <- strptime(household_data$DateTime, "%d/%m/%Y %H:%M:%S")
filtered_data <- household_data[household_data$Date %in% c("1/2/2007","2/2/2007") ,]
png("plot1.png")
hist(filtered_data$Global_active_power, xlab= "Global Active Power (kilowatts)", main = "Global Active Power", col = c("red"))
dev.off()
|
library(ggplot2)
library(plotly)
library(Rtsne)
import::from(ape, "pcoa")
import::from(dplyr, "bind_cols")
import::from(htmlwidgets, "saveWidget")
import::from(purrr, "map", "map_dfc", "pmap", "reduce")
import::from(magrittr, "%>%")
import::from(readr, "read_csv")
import::from(stringr, "str_c")
import::from(tibble, "tibble")
# simulate major allele frequencies
# set.seed(100)
freqs <- runif(6, 0, 1)
mjafs <- pmax(freqs, 1 - freqs)
# calculate genotype frequencies
gfs <- tibble(
p = mjafs ^ 2,
pq = 2 * (mjafs * (1 - mjafs)),
q = (1 - mjafs) ^ 2
)
pop <- pmap(gfs, list) %>% expand.grid() %>% data.matrix()
geno_freq <- pmap(pop %>% as.data.frame(), prod) %>% as.numeric()
gvs <- map_dfc(seq_along(mjafs), function (i) {
sample.int(3, 3)
}) %>% t()
popvs <- pmap(gvs %>% as.data.frame(), list) %>% expand.grid() %>% data.matrix()
popvs_mean <- rowMeans(popvs)
pd <- dist(pop)
gfsd <- geno_freq %>% sort()
################################################################################
histo <- plot_ly(
x = log10(geno_freq),
type = "histogram",
cumulative = list(enabled = TRUE, direction = "decreasing")
)
saveWidget(
as_widget(histo),
"/home/maxh/projects/wheat-pgda/results/cdf.html"
)
histo <- plot_ly(
x = log10(geno_freq),
type = "histogram"
)
saveWidget(
as_widget(histo),
"/home/maxh/projects/wheat-pgda/results/histo.html"
)
################################################################################
pca <- prcomp(pd)
tsne <- Rtsne(pd, dims = 3, perplexity = 20)
pco <- pcoa(pd)
values <- pca$x %>% as.data.frame()
values2 <- tsne$Y %>% as.data.frame()
values3 <- pco$vectors %>% as.data.frame()
scatter <- plot_ly() %>%
add_markers(x = values$PC1, y = values$PC2, z = values$PC3, color = geno_freq)
saveWidget(
as_widget(scatter),
"/home/maxh/projects/wheat-pgda/results/scatter_pca.html"
)
scatter <- plot_ly() %>%
add_markers(x = values2[,1], y = values2[,2], z = values2[,3], color = geno_freq)
saveWidget(
as_widget(scatter),
"/home/maxh/projects/wheat-pgda/results/scatter_tsne.html"
)
scatter <- plot_ly() %>%
add_markers(x = values3[,1], y = values3[,2], z = values3[,3], color = geno_freq)
saveWidget(
as_widget(scatter),
"/home/maxh/projects/wheat-pgda/results/scatter_pcoa.html"
)
scatter <- plot_ly() %>%
add_trace(type = 'histogram2dcontour', x = popvs_mean, y = log10(geno_freq)) %>%
add_trace(type = "scatter", x = popvs_mean[you], y = log10(geno_freq)[you])
saveWidget(
as_widget(scatter),
"/home/maxh/projects/wheat-pgda/results/density2d_values_freq.html"
) | /src/R/testing/test.R | permissive | DiDeoxy/wheat-pgda | R | false | false | 2,686 | r | library(ggplot2)
library(plotly)
library(Rtsne)
import::from(ape, "pcoa")
import::from(dplyr, "bind_cols")
import::from(htmlwidgets, "saveWidget")
import::from(purrr, "map", "map_dfc", "pmap", "reduce")
import::from(magrittr, "%>%")
import::from(readr, "read_csv")
import::from(stringr, "str_c")
import::from(tibble, "tibble")
# simulate major allele frequencies
# set.seed(100)
freqs <- runif(6, 0, 1)
mjafs <- pmax(freqs, 1 - freqs)
# calculate genotype frequencies
gfs <- tibble(
p = mjafs ^ 2,
pq = 2 * (mjafs * (1 - mjafs)),
q = (1 - mjafs) ^ 2
)
pop <- pmap(gfs, list) %>% expand.grid() %>% data.matrix()
geno_freq <- pmap(pop %>% as.data.frame(), prod) %>% as.numeric()
gvs <- map_dfc(seq_along(mjafs), function (i) {
sample.int(3, 3)
}) %>% t()
popvs <- pmap(gvs %>% as.data.frame(), list) %>% expand.grid() %>% data.matrix()
popvs_mean <- rowMeans(popvs)
pd <- dist(pop)
gfsd <- geno_freq %>% sort()
################################################################################
histo <- plot_ly(
x = log10(geno_freq),
type = "histogram",
cumulative = list(enabled = TRUE, direction = "decreasing")
)
saveWidget(
as_widget(histo),
"/home/maxh/projects/wheat-pgda/results/cdf.html"
)
histo <- plot_ly(
x = log10(geno_freq),
type = "histogram"
)
saveWidget(
as_widget(histo),
"/home/maxh/projects/wheat-pgda/results/histo.html"
)
################################################################################
pca <- prcomp(pd)
tsne <- Rtsne(pd, dims = 3, perplexity = 20)
pco <- pcoa(pd)
values <- pca$x %>% as.data.frame()
values2 <- tsne$Y %>% as.data.frame()
values3 <- pco$vectors %>% as.data.frame()
scatter <- plot_ly() %>%
add_markers(x = values$PC1, y = values$PC2, z = values$PC3, color = geno_freq)
saveWidget(
as_widget(scatter),
"/home/maxh/projects/wheat-pgda/results/scatter_pca.html"
)
scatter <- plot_ly() %>%
add_markers(x = values2[,1], y = values2[,2], z = values2[,3], color = geno_freq)
saveWidget(
as_widget(scatter),
"/home/maxh/projects/wheat-pgda/results/scatter_tsne.html"
)
scatter <- plot_ly() %>%
add_markers(x = values3[,1], y = values3[,2], z = values3[,3], color = geno_freq)
saveWidget(
as_widget(scatter),
"/home/maxh/projects/wheat-pgda/results/scatter_pcoa.html"
)
scatter <- plot_ly() %>%
add_trace(type = 'histogram2dcontour', x = popvs_mean, y = log10(geno_freq)) %>%
add_trace(type = "scatter", x = popvs_mean[you], y = log10(geno_freq)[you])
saveWidget(
as_widget(scatter),
"/home/maxh/projects/wheat-pgda/results/density2d_values_freq.html"
) |
library(tidyverse)
library(readr)
library(readxl)
irs <- read_csv('data/vector_control_ministry/IRS_mozambique_clean_final.csv')
# Fix Matola
irs$district[irs$district == 'M.Matola'] <- 'Matola'
irs$province[irs$district == 'Matola'] <- 'Maputo'
# Keep only gaza and maputo
irs <- irs %>%
filter(province %in% c('Gaza', 'Maputo'))
# Standardize names to match those in bes
irs$district <- toupper(irs$district)
irs_districts <- sort(unique(irs$district))
irs <-
irs %>%
mutate(district = ifelse(district == 'C.MATOLA', 'MATOLA',
ifelse(district == 'C.XAI-XAI', 'XAI-XAI CITY',
ifelse(district == 'D.XAI-XAI',
'XAI-XAI DISTRICT',
ifelse(district == 'MATUTUÍNE MOZAL', 'MATUTUINE',
ifelse(district == 'XINAVANE', 'MANHICA', district))))))
irs <-
irs %>%
mutate(district = ifelse(district == 'MANHIÇA', 'MANHICA',
ifelse(district == 'MAT MOZAL', 'MATOLA',
ifelse(district == 'MATUTUÍNE', 'MATUTUINE',
ifelse(district == 'XXAI CIDADE', 'XAI-XAI CITY',
ifelse(district == 'XXAI DISTRITO', 'XAI-XAI DISTRICT',
ifelse(district == 'ZONA 1A*', 'MATOLA', district)))))))
# Clean up
irs <-
irs %>%
filter(!is.na(district)) %>%
group_by(province,
district,
year) %>%
summarise(houses_irs = sum(as.numeric(as.character(gsub(',', '', houses))), na.rm = TRUE),
people_irs = sum(as.numeric(as.character(gsub(',', '', people))), na.rm = TRUE)) %>%
ungroup
| /get_irs_itn_data.R | no_license | joebrew/maltem_cost_effectiveness | R | false | false | 1,819 | r | library(tidyverse)
library(readr)
library(readxl)
irs <- read_csv('data/vector_control_ministry/IRS_mozambique_clean_final.csv')
# Fix Matola
irs$district[irs$district == 'M.Matola'] <- 'Matola'
irs$province[irs$district == 'Matola'] <- 'Maputo'
# Keep only gaza and maputo
irs <- irs %>%
filter(province %in% c('Gaza', 'Maputo'))
# Standardize names to match those in bes
irs$district <- toupper(irs$district)
irs_districts <- sort(unique(irs$district))
irs <-
irs %>%
mutate(district = ifelse(district == 'C.MATOLA', 'MATOLA',
ifelse(district == 'C.XAI-XAI', 'XAI-XAI CITY',
ifelse(district == 'D.XAI-XAI',
'XAI-XAI DISTRICT',
ifelse(district == 'MATUTUÍNE MOZAL', 'MATUTUINE',
ifelse(district == 'XINAVANE', 'MANHICA', district))))))
irs <-
irs %>%
mutate(district = ifelse(district == 'MANHIÇA', 'MANHICA',
ifelse(district == 'MAT MOZAL', 'MATOLA',
ifelse(district == 'MATUTUÍNE', 'MATUTUINE',
ifelse(district == 'XXAI CIDADE', 'XAI-XAI CITY',
ifelse(district == 'XXAI DISTRITO', 'XAI-XAI DISTRICT',
ifelse(district == 'ZONA 1A*', 'MATOLA', district)))))))
# Clean up
irs <-
irs %>%
filter(!is.na(district)) %>%
group_by(province,
district,
year) %>%
summarise(houses_irs = sum(as.numeric(as.character(gsub(',', '', houses))), na.rm = TRUE),
people_irs = sum(as.numeric(as.character(gsub(',', '', people))), na.rm = TRUE)) %>%
ungroup
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/bfa-package.R
\docType{package}
\name{bfa-package}
\alias{bfa-package}
\title{Bayesian Factor Analysis}
\description{
Bayesian Factor Analysis
}
\details{
\tabular{ll}{
Package: \tab bfa\cr
Type: \tab Package\cr
Version: \tab 0.4\cr
Date: \tab 2016-9-07\cr
License: \tab GPL-3\cr
LazyLoad: \tab yes\cr
}
This package provides model fitting for several Bayesian factor models including
Gaussian, ordinal probit, mixed and semiparametric Gaussian copula factor models
under a range of priors.
}
\author{
Jared Murray \email{jsmurray@stat.cmu.edu}
}
\keyword{package}
| /man/bfa-package.Rd | no_license | david-dunson/gaussian-copula-factor-model | R | false | false | 653 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/bfa-package.R
\docType{package}
\name{bfa-package}
\alias{bfa-package}
\title{Bayesian Factor Analysis}
\description{
Bayesian Factor Analysis
}
\details{
\tabular{ll}{
Package: \tab bfa\cr
Type: \tab Package\cr
Version: \tab 0.4\cr
Date: \tab 2016-9-07\cr
License: \tab GPL-3\cr
LazyLoad: \tab yes\cr
}
This package provides model fitting for several Bayesian factor models including
Gaussian, ordinal probit, mixed and semiparametric Gaussian copula factor models
under a range of priors.
}
\author{
Jared Murray \email{jsmurray@stat.cmu.edu}
}
\keyword{package}
|
################################################################################################
#
# MODELAGEM PREDITIVA - MBA Business Analytics e Big Data
# Por: RICARDO REIS
#
# CASE - FRAMINGHAM HEART STUDY
#
# male: 0 = Female; 1 = Male
# age: Age at exam time.
# education: 1 = Some High School; 2 = High School or GED; 3 = Some College or Vocational School; 4 = college
# currentSmoker: 0 = nonsmoker; 1 = smoker
# cigsPerDay: number of cigarettes smoked per day (estimated average)
# BPMeds: 0 = Not on Blood Pressure medications; 1 = Is on Blood Pressure medications
# prevalentStroke: AVC
# prevalentHyp: Hipertensão
# diabetes: 0 = No; 1 = Yes
# totChol: Colesterol total mg/dL
# sysBP: Pressão sistólica mmHg
# diaBP: Pressão diastólica mmHg
# BMI: Body Mass Index calculated as: Weight (kg) / Height(meter-squared)
# heartRate: Beats/Min (Ventricular)
# glucose: Glicemia mg/dL
# TenYearCHD: Prever se o paciente vai ter doenças coronarianas em 10 anos
#
################################################################################################
# LENDO OS DADOS
path <- "C:/Users/Ricardo/Documents/R-Projetos/FraminghamHeartStudy/"
baseRL <- read.csv(paste(path,"framingham.csv",sep=""),
sep=",",header = T,stringsAsFactors = T)
# Checando Missing Values
summary(baseRL)
library("VIM")
matrixplot(baseRL)
aggr(baseRL)
#Estratégia Adotada:
#Excluindo linhas com Missing Values
index_glucose <- which(is.na(baseRL$glucose))
index_heartRate <- which(is.na(baseRL$heartRate))
index_BMI <- which(is.na(baseRL$BMI))
index_totChol <- which(is.na(baseRL$totChol))
index_BPMeds <- which(is.na(baseRL$BPMeds))
index_cigsPerDay <- which(is.na(baseRL$cigsPerDay))
index_education <- which(is.na(baseRL$education))
baseRL_sem_mv <- baseRL[-c(index_glucose,index_heartRate,index_BMI,index_totChol,index_BPMeds,index_cigsPerDay,index_education),]
matrixplot(baseRL_sem_mv)
aggr(baseRL_sem_mv)
# ANALISE BIVARIADA
# Variáveis quantitativas
boxplot(baseRL_sem_mv$male ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$age ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$education ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$currentSmoker ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$cigsPerDay ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$BPMeds ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$prevalentStroke ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$prevalentHyp ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$diabetes ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$totChol ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$sysBP ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$diaBP ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$BMI ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$heartRate ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$glucose ~ baseRL_sem_mv$TenYearCHD)
#Variáveis quantitativas e quali
prop.table(table(baseRL_sem_mv$TenYearCHD))
prop.table(table(baseRL_sem_mv$male, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$age, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$education, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$currentSmoker, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$cigsPerDay, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$BPMeds, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$prevalentStroke, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$prevalentHyp, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$diabetes, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$totChol, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$sysBP, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$diaBP, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$BMI, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$heartRate, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$glucose, baseRL_sem_mv$TenYearCHD),1)
################################################################################################
# AMOSTRAGEM DO DADOS
library(caret)
set.seed(12345)
index <- createDataPartition(baseRL_sem_mv$TenYearCHD, p= 0.7,list = F)
data.train <- baseRL_sem_mv[index, ] # base de desenvolvimento: 70%
data.test <- baseRL_sem_mv[-index,] # base de teste: 30%
# Checando se as proporções das amostras são próximas à base original
prop.table(table(baseRL_sem_mv$TenYearCHD))
prop.table(table(data.train$TenYearCHD))
prop.table(table(data.test$TenYearCHD))
################################################################################################
# MODELAGEM DOS DADOS - REGRESSÃO LOGISTICA
# Avaliando multicolinearidade - vars quantitativas
library(mctest)
vars.quant <- data.train[,c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16)]
imcdiag(vars.quant,data.train$TenYearCHD)
names <- names(data.train) # salva o nome de todas as variáveis e escreve a fórmula
f_full <- as.formula(paste("TenYearCHD ~",
paste(names[!names %in% "TenYearCHD"], collapse = " + ")))
glm.full <- glm(f_full, data= data.train, family= binomial(link='logit'))
summary(glm.full)
# observam-se variáveis não significantes, podemos remover uma de cada vez e testar, ou
# usar o método stepwise que escolhe as variáveis que minimizem o AIC
library(MASS)
# seleção de variáveis
glm.step <- stepAIC(glm.full,direction = 'both', trace = TRUE)
summary(glm.step)
# O método manteve apenas variáveis que minimizaram o AIC
# Aplicando o modelo nas amostras e determinando as probabilidades
glm.prob.train <- predict(glm.step,type = "response")
glm.prob.test <- predict(glm.step, newdata = data.test, type= "response")
#length(glm.prob.train)
# Verificando a aderência do ajuste logístico
library(rms)
val.prob(glm.prob.train, data.train$TenYearCHD, smooth = F)
# p valor > 5%, não podemos rejeitar a hipotese nula
# Comportamento da saida do modelo
hist(glm.prob.test, breaks = 25, col = "lightblue",xlab= "Probabilidades",
ylab= "Frequência",main= "Regressão Logística")
boxplot(glm.prob.test ~ data.test$TenYearCHD,col= c("red", "green"), horizontal= T)
#guardando o histograma
hist <- hist(glm.prob.test, breaks= 20, probability= T, ylim= c(0,5))
score_1 <- density(baseRL_sem_mv$TenYearCHD[baseRL_sem_mv$TenYearCHD == 1], na.rm = T)
score_0 <- density(baseRL_sem_mv$TenYearCHD[baseRL_sem_mv$TenYearCHD == 0], na.rm = T)
lines(score_1,col = 'red')
lines(score_0,col = 'blue')
################################################################################################
# AVALIANDO A PERFORMANCE
# Métricas de discriminação para ambos modelos
library(hmeasure)
glm.train <- HMeasure(data.train$TenYearCHD,glm.prob.train)
glm.test <- HMeasure(data.test$TenYearCHD, glm.prob.test)
summary(glm.train)
summary(glm.test)
glm.train$metrics
glm.test$metrics
library(pROC)
roc1 <- roc(data.test$TenYearCHD,glm.prob.test)
y1 <- roc1$sensitivities
x1 <- 1-roc1$specificities
plot(x1,y1, type="n",
xlab = "1 - Especificidade",
ylab= "Sensitividade")
lines(x1, y1,lwd=3,lty=1, col="purple")
abline(0,1, lty=2)
################################################################################################
################################################################################################
################################################################################################
# MATRIZ DE CONFUSAO
observado <- as.factor(data.test$TenYearCHD)
modelado <- as.factor(ifelse(glm.prob.test >= 0.2, 1.0, 0.0))
library(gmodels)
CrossTable(observado, modelado, prop.c= F, prop.t= F, prop.chisq= F)
library(caret)
confusionMatrix(modelado,observado, positive = "1")
################################################################################################
################################################################################################
| /model-logistic-regression.r | no_license | ricardobreis/Classification-Algorithms-Comparison-Framingham-Heart-Study | R | false | false | 8,070 | r | ################################################################################################
#
# MODELAGEM PREDITIVA - MBA Business Analytics e Big Data
# Por: RICARDO REIS
#
# CASE - FRAMINGHAM HEART STUDY
#
# male: 0 = Female; 1 = Male
# age: Age at exam time.
# education: 1 = Some High School; 2 = High School or GED; 3 = Some College or Vocational School; 4 = college
# currentSmoker: 0 = nonsmoker; 1 = smoker
# cigsPerDay: number of cigarettes smoked per day (estimated average)
# BPMeds: 0 = Not on Blood Pressure medications; 1 = Is on Blood Pressure medications
# prevalentStroke: AVC
# prevalentHyp: Hipertensão
# diabetes: 0 = No; 1 = Yes
# totChol: Colesterol total mg/dL
# sysBP: Pressão sistólica mmHg
# diaBP: Pressão diastólica mmHg
# BMI: Body Mass Index calculated as: Weight (kg) / Height(meter-squared)
# heartRate: Beats/Min (Ventricular)
# glucose: Glicemia mg/dL
# TenYearCHD: Prever se o paciente vai ter doenças coronarianas em 10 anos
#
################################################################################################
# LENDO OS DADOS
path <- "C:/Users/Ricardo/Documents/R-Projetos/FraminghamHeartStudy/"
baseRL <- read.csv(paste(path,"framingham.csv",sep=""),
sep=",",header = T,stringsAsFactors = T)
# Checando Missing Values
summary(baseRL)
library("VIM")
matrixplot(baseRL)
aggr(baseRL)
#Estratégia Adotada:
#Excluindo linhas com Missing Values
index_glucose <- which(is.na(baseRL$glucose))
index_heartRate <- which(is.na(baseRL$heartRate))
index_BMI <- which(is.na(baseRL$BMI))
index_totChol <- which(is.na(baseRL$totChol))
index_BPMeds <- which(is.na(baseRL$BPMeds))
index_cigsPerDay <- which(is.na(baseRL$cigsPerDay))
index_education <- which(is.na(baseRL$education))
baseRL_sem_mv <- baseRL[-c(index_glucose,index_heartRate,index_BMI,index_totChol,index_BPMeds,index_cigsPerDay,index_education),]
matrixplot(baseRL_sem_mv)
aggr(baseRL_sem_mv)
# ANALISE BIVARIADA
# Variáveis quantitativas
boxplot(baseRL_sem_mv$male ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$age ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$education ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$currentSmoker ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$cigsPerDay ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$BPMeds ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$prevalentStroke ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$prevalentHyp ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$diabetes ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$totChol ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$sysBP ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$diaBP ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$BMI ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$heartRate ~ baseRL_sem_mv$TenYearCHD)
boxplot(baseRL_sem_mv$glucose ~ baseRL_sem_mv$TenYearCHD)
#Variáveis quantitativas e quali
prop.table(table(baseRL_sem_mv$TenYearCHD))
prop.table(table(baseRL_sem_mv$male, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$age, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$education, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$currentSmoker, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$cigsPerDay, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$BPMeds, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$prevalentStroke, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$prevalentHyp, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$diabetes, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$totChol, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$sysBP, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$diaBP, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$BMI, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$heartRate, baseRL_sem_mv$TenYearCHD),1)
prop.table(table(baseRL_sem_mv$glucose, baseRL_sem_mv$TenYearCHD),1)
################################################################################################
# AMOSTRAGEM DO DADOS
library(caret)
set.seed(12345)
index <- createDataPartition(baseRL_sem_mv$TenYearCHD, p= 0.7,list = F)
data.train <- baseRL_sem_mv[index, ] # base de desenvolvimento: 70%
data.test <- baseRL_sem_mv[-index,] # base de teste: 30%
# Checando se as proporções das amostras são próximas à base original
prop.table(table(baseRL_sem_mv$TenYearCHD))
prop.table(table(data.train$TenYearCHD))
prop.table(table(data.test$TenYearCHD))
################################################################################################
# MODELAGEM DOS DADOS - REGRESSÃO LOGISTICA
# Avaliando multicolinearidade - vars quantitativas
library(mctest)
vars.quant <- data.train[,c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16)]
imcdiag(vars.quant,data.train$TenYearCHD)
names <- names(data.train) # salva o nome de todas as variáveis e escreve a fórmula
f_full <- as.formula(paste("TenYearCHD ~",
paste(names[!names %in% "TenYearCHD"], collapse = " + ")))
glm.full <- glm(f_full, data= data.train, family= binomial(link='logit'))
summary(glm.full)
# observam-se variáveis não significantes, podemos remover uma de cada vez e testar, ou
# usar o método stepwise que escolhe as variáveis que minimizem o AIC
library(MASS)
# seleção de variáveis
glm.step <- stepAIC(glm.full,direction = 'both', trace = TRUE)
summary(glm.step)
# O método manteve apenas variáveis que minimizaram o AIC
# Aplicando o modelo nas amostras e determinando as probabilidades
glm.prob.train <- predict(glm.step,type = "response")
glm.prob.test <- predict(glm.step, newdata = data.test, type= "response")
#length(glm.prob.train)
# Verificando a aderência do ajuste logístico
library(rms)
val.prob(glm.prob.train, data.train$TenYearCHD, smooth = F)
# p valor > 5%, não podemos rejeitar a hipotese nula
# Comportamento da saida do modelo
hist(glm.prob.test, breaks = 25, col = "lightblue",xlab= "Probabilidades",
ylab= "Frequência",main= "Regressão Logística")
boxplot(glm.prob.test ~ data.test$TenYearCHD,col= c("red", "green"), horizontal= T)
#guardando o histograma
hist <- hist(glm.prob.test, breaks= 20, probability= T, ylim= c(0,5))
score_1 <- density(baseRL_sem_mv$TenYearCHD[baseRL_sem_mv$TenYearCHD == 1], na.rm = T)
score_0 <- density(baseRL_sem_mv$TenYearCHD[baseRL_sem_mv$TenYearCHD == 0], na.rm = T)
lines(score_1,col = 'red')
lines(score_0,col = 'blue')
################################################################################################
# AVALIANDO A PERFORMANCE
# Métricas de discriminação para ambos modelos
library(hmeasure)
glm.train <- HMeasure(data.train$TenYearCHD,glm.prob.train)
glm.test <- HMeasure(data.test$TenYearCHD, glm.prob.test)
summary(glm.train)
summary(glm.test)
glm.train$metrics
glm.test$metrics
library(pROC)
roc1 <- roc(data.test$TenYearCHD,glm.prob.test)
y1 <- roc1$sensitivities
x1 <- 1-roc1$specificities
plot(x1,y1, type="n",
xlab = "1 - Especificidade",
ylab= "Sensitividade")
lines(x1, y1,lwd=3,lty=1, col="purple")
abline(0,1, lty=2)
################################################################################################
################################################################################################
################################################################################################
# MATRIZ DE CONFUSAO
observado <- as.factor(data.test$TenYearCHD)
modelado <- as.factor(ifelse(glm.prob.test >= 0.2, 1.0, 0.0))
library(gmodels)
CrossTable(observado, modelado, prop.c= F, prop.t= F, prop.chisq= F)
library(caret)
confusionMatrix(modelado,observado, positive = "1")
################################################################################################
################################################################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classes.R
\docType{class}
\name{eco.mctree-class}
\alias{eco.mctree-class}
\title{eco.mctree-class}
\description{
eco.mctree-class
}
\section{Slots}{
\describe{
\item{\code{TREES}}{trees obtained}
\item{\code{PREDICTIONS}}{predictions of the analysis}
\item{\code{FREQUENCIES}}{frequencies of individuals per class in nodes}
\item{\code{DF1}}{data frame}
\item{\code{DF2}}{data frame}
}}
\author{
Leandro Roser \email{leandroroser@ege.fcen.uba.ar}
}
\keyword{internal}
| /man/eco.mctree-class.Rd | no_license | jcassiojr/EcoGenetics | R | false | true | 553 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classes.R
\docType{class}
\name{eco.mctree-class}
\alias{eco.mctree-class}
\title{eco.mctree-class}
\description{
eco.mctree-class
}
\section{Slots}{
\describe{
\item{\code{TREES}}{trees obtained}
\item{\code{PREDICTIONS}}{predictions of the analysis}
\item{\code{FREQUENCIES}}{frequencies of individuals per class in nodes}
\item{\code{DF1}}{data frame}
\item{\code{DF2}}{data frame}
}}
\author{
Leandro Roser \email{leandroroser@ege.fcen.uba.ar}
}
\keyword{internal}
|
# advanced packageloading utility for the bioconductor ecosystem
# as provided @ https://www.huber.embl.de/msmb/install_packages.R
options(install.packages.check.source = "no")
options(install.packages.compile.from.source = "never")
Sys.setenv(R_REMOTES_UPGRADE = "never")
## Function to install packages one at a time with indication of time left
## Overall probably slower than install.packages if everything works
## but doesn't require downloading all packages first before trying to install any
installer_with_progress <- function(pkgs) {
if(length(pkgs) == 0) { invisible(return(NULL)) }
toInstall <- pkgs
bp <- progress::progress_bar$new(total = length(toInstall),
format = "Installed :current of :total (:percent ) - current package: :package",
show_after = 0,
clear = FALSE)
length_prev <- length(toInstall)
fail <- NULL
while(length(toInstall)) {
pkg <- toInstall[1]
bp$tick(length_prev - length(toInstall), tokens = list(package = pkg))
length_prev <- length(toInstall)
tryCatch(
suppressMessages( BiocManager::install(pkg, quiet = TRUE, update = FALSE, ask = FALSE, type = "binary") ),
error = function(e) { fail <<- c(fail, pkg) },
warning = function(w) { fail <<- c(fail, pkg) },
## remove current package, otherwise we loop in event of failure
## update the list to reflect any dependencies that are now installed
finally = { toInstall <- setdiff(toInstall, installed.packages()[, "Package"]) }
)
}
bp$tick(length_prev - length(toInstall), tokens = list(package = "DONE!"))
return(fail)
}
## these packages are needed prior to the installation
if(!requireNamespace("BiocManager", quietly = TRUE)) {
install.packages(c('BiocManager'), repos = "https://cloud.r-project.org",
quiet = TRUE, update = FALSE, ask = FALSE, type = "both")
}
## update any existing packages
BiocManager::install(update = TRUE, ask = FALSE)
if(!requireNamespace("remotes", quietly = TRUE)) {
install.packages(c('remotes'), quiet = TRUE, update = FALSE, ask = FALSE, type = "both")
}
if(!requireNamespace("magrittr", quietly = TRUE)) {
BiocManager::install('magrittr', quiet = TRUE, update = FALSE, ask = FALSE, type = "both")
}
if(!requireNamespace("progress", quietly = TRUE)) {
BiocManager::install('progress', quiet = TRUE, update = FALSE, ask = FALSE, type = "both")
}
## structSSI is currently deprecated and has been removed from CRAN for now (24-06-2020)
## This will install a CRAN version by default if it reappears, otherwise use an archive version
## Update 17-05-2021: This isn't coming back to CRAN any time soon, so lets use the GitHub version
if(!requireNamespace("structSSI", quietly = TRUE)) {
BiocManager::install('krisrs1128/structSSI', upgrade = FALSE, quiet = TRUE, ask = FALSE, type = "both")
}
## list of packages required for each chapters
chapter_pkgs <- readRDS(url("https://www.huber.embl.de/msmb/chapter_pkgs.rds"))
## subset a selection of chapters if specified
if(exists('chapter_index') && is.numeric(chapter_index)) {
chapter_pkgs <- chapter_pkgs[ chapter_index ]
}
for(i in seq_along(chapter_pkgs)) {
message("### CHAPTER: ", i, " ###")
pkgsAvailable = installed.packages()[, "Package"]
pkgsToInstall = setdiff(chapter_pkgs[[i]], pkgsAvailable)
BiocManager::install(pkgsToInstall, update = FALSE, upgrade = FALSE, ask = FALSE, type = "both")
}
## report packages no installed
## find only those not currently installed
pkgsAvailable = installed.packages()[, "Package"]
pkgsNeeded = unique(unlist(chapter_pkgs))
pkgsToInstall = setdiff(pkgsNeeded, pkgsAvailable)
if(length(pkgsToInstall)) {
message("The following packages failed to install: \n",
paste(pkgsToInstall, collapse = ", "))
message("You can try re-running this installation script.\n",
"It will only try to install the missing packages.\n",
"This may make it easier to see the information R gives about why the installation failed.\n",
"Please contact mike.smith@embl.de if you need additional help.")
}
Sys.unsetenv("R_REMOTES_UPGRADE")
| /packageloader_bioc.R | no_license | benearnthof/geostatsBA | R | false | false | 4,184 | r | # advanced packageloading utility for the bioconductor ecosystem
# as provided @ https://www.huber.embl.de/msmb/install_packages.R
options(install.packages.check.source = "no")
options(install.packages.compile.from.source = "never")
Sys.setenv(R_REMOTES_UPGRADE = "never")
## Function to install packages one at a time with indication of time left
## Overall probably slower than install.packages if everything works
## but doesn't require downloading all packages first before trying to install any
installer_with_progress <- function(pkgs) {
if(length(pkgs) == 0) { invisible(return(NULL)) }
toInstall <- pkgs
bp <- progress::progress_bar$new(total = length(toInstall),
format = "Installed :current of :total (:percent ) - current package: :package",
show_after = 0,
clear = FALSE)
length_prev <- length(toInstall)
fail <- NULL
while(length(toInstall)) {
pkg <- toInstall[1]
bp$tick(length_prev - length(toInstall), tokens = list(package = pkg))
length_prev <- length(toInstall)
tryCatch(
suppressMessages( BiocManager::install(pkg, quiet = TRUE, update = FALSE, ask = FALSE, type = "binary") ),
error = function(e) { fail <<- c(fail, pkg) },
warning = function(w) { fail <<- c(fail, pkg) },
## remove current package, otherwise we loop in event of failure
## update the list to reflect any dependencies that are now installed
finally = { toInstall <- setdiff(toInstall, installed.packages()[, "Package"]) }
)
}
bp$tick(length_prev - length(toInstall), tokens = list(package = "DONE!"))
return(fail)
}
## these packages are needed prior to the installation
if(!requireNamespace("BiocManager", quietly = TRUE)) {
install.packages(c('BiocManager'), repos = "https://cloud.r-project.org",
quiet = TRUE, update = FALSE, ask = FALSE, type = "both")
}
## update any existing packages
BiocManager::install(update = TRUE, ask = FALSE)
if(!requireNamespace("remotes", quietly = TRUE)) {
install.packages(c('remotes'), quiet = TRUE, update = FALSE, ask = FALSE, type = "both")
}
if(!requireNamespace("magrittr", quietly = TRUE)) {
BiocManager::install('magrittr', quiet = TRUE, update = FALSE, ask = FALSE, type = "both")
}
if(!requireNamespace("progress", quietly = TRUE)) {
BiocManager::install('progress', quiet = TRUE, update = FALSE, ask = FALSE, type = "both")
}
## structSSI is currently deprecated and has been removed from CRAN for now (24-06-2020)
## This will install a CRAN version by default if it reappears, otherwise use an archive version
## Update 17-05-2021: This isn't coming back to CRAN any time soon, so lets use the GitHub version
if(!requireNamespace("structSSI", quietly = TRUE)) {
BiocManager::install('krisrs1128/structSSI', upgrade = FALSE, quiet = TRUE, ask = FALSE, type = "both")
}
## list of packages required for each chapters
chapter_pkgs <- readRDS(url("https://www.huber.embl.de/msmb/chapter_pkgs.rds"))
## subset a selection of chapters if specified
if(exists('chapter_index') && is.numeric(chapter_index)) {
chapter_pkgs <- chapter_pkgs[ chapter_index ]
}
for(i in seq_along(chapter_pkgs)) {
message("### CHAPTER: ", i, " ###")
pkgsAvailable = installed.packages()[, "Package"]
pkgsToInstall = setdiff(chapter_pkgs[[i]], pkgsAvailable)
BiocManager::install(pkgsToInstall, update = FALSE, upgrade = FALSE, ask = FALSE, type = "both")
}
## report packages no installed
## find only those not currently installed
pkgsAvailable = installed.packages()[, "Package"]
pkgsNeeded = unique(unlist(chapter_pkgs))
pkgsToInstall = setdiff(pkgsNeeded, pkgsAvailable)
if(length(pkgsToInstall)) {
message("The following packages failed to install: \n",
paste(pkgsToInstall, collapse = ", "))
message("You can try re-running this installation script.\n",
"It will only try to install the missing packages.\n",
"This may make it easier to see the information R gives about why the installation failed.\n",
"Please contact mike.smith@embl.de if you need additional help.")
}
Sys.unsetenv("R_REMOTES_UPGRADE")
|
### plot for diamond dataset ####
| /poster plot /poster plot.R | no_license | wangy63/Leverage-Subsampling | R | false | false | 34 | r | ### plot for diamond dataset ####
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
lm_iv_rcpp <- function(X, Y, Z) {
.Call(tidymodelR_lm_iv_rcpp, X, Y, Z)
}
lm_rcpp <- function(X, y) {
.Call(tidymodelR_lm_rcpp, X, y)
}
| /R/RcppExports.R | no_license | elben10/tidymodelR | R | false | false | 273 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
lm_iv_rcpp <- function(X, Y, Z) {
.Call(tidymodelR_lm_iv_rcpp, X, Y, Z)
}
lm_rcpp <- function(X, y) {
.Call(tidymodelR_lm_rcpp, X, y)
}
|
calculateBMI <- function(weight,height) weight/(height ^ 2)
shinyServer(
function(input, output) {
output$inputValue1 <- renderPrint({input$weight})
output$inputValue2 <- renderPrint({input$height})
output$odate <- renderPrint({input$date})
output$prediction <- renderPrint({calculateBMI(input$weight,input$height)})
}
)
| /shiny Application/server.R | no_license | nikhil-chandra/datasciencecoursera | R | false | false | 351 | r | calculateBMI <- function(weight,height) weight/(height ^ 2)
shinyServer(
function(input, output) {
output$inputValue1 <- renderPrint({input$weight})
output$inputValue2 <- renderPrint({input$height})
output$odate <- renderPrint({input$date})
output$prediction <- renderPrint({calculateBMI(input$weight,input$height)})
}
)
|
fileUrl<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="data.zip") ##Download the data file
unzip("data.zip") ##unzip the data file
setwd("UCI HAR Dataset")##set working directory to file
#load the various data files
##extract second column of txt file, corresponding to activity labels
activitylabels<- read.table("activity_labels.txt")[,2]
##extract second column of txt file, corresponding to the featuresm- this will be the column names
features<-read.table("features.txt")[,2]
# Load and process x and y data and the subject test data
Xtest <- read.table("./test/X_test.txt")
Ytest <- read.table("./test/y_test.txt")
subjectTest <- read.table("./test/subject_test.txt")
##Objective is to extract only mean and standard deviation for each measurement
##features 2 is a variable that applies extraction of mean and std only
features2<-grepl("mean|std",features)
##allocating names to Xtest by the features extracted table
names(Xtest)=features
##extract variables that represent only mean and std of Xtest
Xtest=Xtest[,features2]
##add a column to Y test with the labels extracted before w.r.t to column 1 in Ytest
Ytest[,2]=activitylabels[Ytest[,1]]
##change the variable names in Y test 1
names(Ytest)=c("id","activity")
##change variable name in Subject Test
names(subjectTest)= "subject"
##creating a tidy table
testdata <- cbind(subjectTest, Ytest, Xtest)
##repeating the procedure for the training set
Xtrain <- read.table("./train/X_train.txt")
Ytrain <- read.table("./train/y_train.txt")
subjectTrain <- read.table("./train/subject_train.txt")
##allocating names to Xtest by the features extracted table
names(Xtrain)=features
##extract variables that represent only mean and std of Xtest
Xtrain=Xtrain[,features2]
##add a column to Y test with the labels extracted before w.r.t to column 1 in Ytest
Ytrain[,2]=activitylabels[Ytrain[,1]]
##change the variable names in Y test 1
names(Ytrain)=c("id","activity")
##change variable name in Subject Test
names(subjectTrain)= "subject"
##creating a tidy table
traindata <- cbind(subjectTrain, Ytrain, Xtrain)
##merging the two tidy tables
table=rbind(testdata,traindata)
library(reshape2)
idlabels = c("subject", "id", "activity")
datalabels = setdiff(colnames(table), idlabels)
melttable = melt(table, id = idlabels, measure.vars = datalabels)
# Use dcast function to apply the mean to the dataset
tidydata = dcast(melttable, subject + activity ~ variable, mean)
##write the new table of means to the users pc
write.table(tidydata, file = "./tidy_data.txt") | /run_analysis.R | no_license | ccunha85/Getting_and_Cleaning_Data_Final_Project | R | false | false | 2,617 | r | fileUrl<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="data.zip") ##Download the data file
unzip("data.zip") ##unzip the data file
setwd("UCI HAR Dataset")##set working directory to file
#load the various data files
##extract second column of txt file, corresponding to activity labels
activitylabels<- read.table("activity_labels.txt")[,2]
##extract second column of txt file, corresponding to the featuresm- this will be the column names
features<-read.table("features.txt")[,2]
# Load and process x and y data and the subject test data
Xtest <- read.table("./test/X_test.txt")
Ytest <- read.table("./test/y_test.txt")
subjectTest <- read.table("./test/subject_test.txt")
##Objective is to extract only mean and standard deviation for each measurement
##features 2 is a variable that applies extraction of mean and std only
features2<-grepl("mean|std",features)
##allocating names to Xtest by the features extracted table
names(Xtest)=features
##extract variables that represent only mean and std of Xtest
Xtest=Xtest[,features2]
##add a column to Y test with the labels extracted before w.r.t to column 1 in Ytest
Ytest[,2]=activitylabels[Ytest[,1]]
##change the variable names in Y test 1
names(Ytest)=c("id","activity")
##change variable name in Subject Test
names(subjectTest)= "subject"
##creating a tidy table
testdata <- cbind(subjectTest, Ytest, Xtest)
##repeating the procedure for the training set
Xtrain <- read.table("./train/X_train.txt")
Ytrain <- read.table("./train/y_train.txt")
subjectTrain <- read.table("./train/subject_train.txt")
##allocating names to Xtest by the features extracted table
names(Xtrain)=features
##extract variables that represent only mean and std of Xtest
Xtrain=Xtrain[,features2]
##add a column to Y test with the labels extracted before w.r.t to column 1 in Ytest
Ytrain[,2]=activitylabels[Ytrain[,1]]
##change the variable names in Y test 1
names(Ytrain)=c("id","activity")
##change variable name in Subject Test
names(subjectTrain)= "subject"
##creating a tidy table
traindata <- cbind(subjectTrain, Ytrain, Xtrain)
##merging the two tidy tables
table=rbind(testdata,traindata)
library(reshape2)
idlabels = c("subject", "id", "activity")
datalabels = setdiff(colnames(table), idlabels)
melttable = melt(table, id = idlabels, measure.vars = datalabels)
# Use dcast function to apply the mean to the dataset
tidydata = dcast(melttable, subject + activity ~ variable, mean)
##write the new table of means to the users pc
write.table(tidydata, file = "./tidy_data.txt") |
niche.overlap.boot.pair <-
function (vectorA, vectorB, method = c("levins","schoener","petraitis","pianka","czech","morisita"),
times = 999, quant = c(0.025, 0.975))
{
method <- match.arg(method)
if(!length(vectorA)==length(vectorB)){
stop("Length of vectorA differs from lengths of vectorB")
}
booted <- rep(NA, times)
obs <- niche.overlap.pair(vectorA, vectorB, method = method)
for (i in 1:times){
ind <- sample(1:length(vectorA), size = length(vectorA), replace = TRUE)
booted[i] <- niche.overlap.pair(vectorA[ind], vectorB[ind], method = method)
}
result <- c(obs, mean(booted), sd(booted), quantile(booted, quant, na.rm = TRUE), times)
names(result) <- c("Observed","Boot mean","Boot std","Boot CI1", "Boot CI2", "times")
return(round(result,3))
}
| /R/niche.overlap.boot.pair.R | no_license | SiyuHuang91/spaa | R | false | false | 862 | r | niche.overlap.boot.pair <-
function (vectorA, vectorB, method = c("levins","schoener","petraitis","pianka","czech","morisita"),
times = 999, quant = c(0.025, 0.975))
{
method <- match.arg(method)
if(!length(vectorA)==length(vectorB)){
stop("Length of vectorA differs from lengths of vectorB")
}
booted <- rep(NA, times)
obs <- niche.overlap.pair(vectorA, vectorB, method = method)
for (i in 1:times){
ind <- sample(1:length(vectorA), size = length(vectorA), replace = TRUE)
booted[i] <- niche.overlap.pair(vectorA[ind], vectorB[ind], method = method)
}
result <- c(obs, mean(booted), sd(booted), quantile(booted, quant, na.rm = TRUE), times)
names(result) <- c("Observed","Boot mean","Boot std","Boot CI1", "Boot CI2", "times")
return(round(result,3))
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## Pair of functions that cache the inverse of a matrix
## Usage: Pass the result of a makeCacheMatrix call to cacheSolve
#' Util function that set the matrix and the inverse in an environment
#' @param x an invertible matrix
#' examples
#' x = makeCacheMatrix(matrix(rnorm(9), 3, 3))
#' x$set(matrix(rnorm(16), 4, 4))
makeCacheMatrix <- function(x = matrix()) {
# todo error if x is not a matrix
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
}
## Write a short comment describing this function
#' Compute and cache the inverse of a matrix
#' @param x the result of a previous makeCacheMatrix call
#' @param ... additional arguments to pass to solve function
#' examples
#' x = makeCacheMatrix(matrix(rnorm(9), 3, 3))
#' cacheSolve(x)
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached matrix inverse")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | gokulakrishnan77/ProgrammingAssignment2 | R | false | false | 1,434 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## Pair of functions that cache the inverse of a matrix
## Usage: Pass the result of a makeCacheMatrix call to cacheSolve
#' Util function that set the matrix and the inverse in an environment
#' @param x an invertible matrix
#' examples
#' x = makeCacheMatrix(matrix(rnorm(9), 3, 3))
#' x$set(matrix(rnorm(16), 4, 4))
makeCacheMatrix <- function(x = matrix()) {
# todo error if x is not a matrix
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
}
## Write a short comment describing this function
#' Compute and cache the inverse of a matrix
#' @param x the result of a previous makeCacheMatrix call
#' @param ... additional arguments to pass to solve function
#' examples
#' x = makeCacheMatrix(matrix(rnorm(9), 3, 3))
#' cacheSolve(x)
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached matrix inverse")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
#' @title Create credentials database
#'
#' @description Create a SQLite database with credentials data protected by a password.
#'
#' @param credentials_data A \code{data.frame} with information about users, \code{user} and \code{password} are required.
#' @param sqlite_path Path to the SQLite database.
#' @param passphrase A password to protect the data inside the database.
#'
#' @export
#'
#' @details The credentials \code{data.frame} can have the following columns:
#' \itemize{
#' \item \strong{user (mandatory)} : the user's name.
#' \item \strong{password (mandatory)} : the user's password.
#' \item \strong{admin (optional)} : logical, is user have admin right ? If so,
#' user can access the admin mode (only available using a SQLite database)
#' \item \strong{start (optional)} : the date from which the user will have access to the application
#' \item \strong{expire (optional)} : the date from which the user will no longer have access to the application
#' \item \strong{applications (optional)} : the name of the applications to which the user is authorized,
#' separated by a semicolon. The name of the application corresponds to the name of the directory,
#' or can be declared using : \code{options("shinymanager.application" = "my-app")}
#' \item \strong{additional columns} : add others columns to retrieve the values server-side after authentication
#' }
#'
#' @importFrom DBI dbConnect dbDisconnect dbWriteTable
#' @importFrom RSQLite SQLite
#'
#' @seealso \code{\link{read_db_decrypt}}
#'
#' @examples
#' \dontrun{
#'
#' # Credentials data
#' credentials <- data.frame(
#' user = c("shiny", "shinymanager"),
#' password = c("azerty", "12345"),
#' stringsAsFactors = FALSE
#' )
#'
#' # you can use keyring package to set database key
#' library(keyring)
#' key_set("R-shinymanager-key", "obiwankenobi")
#'
#' # Create the database
#' create_db(
#' credentials_data = credentials,
#' sqlite_path = "path/to/database.sqlite", # will be created
#' passphrase = key_get("R-shinymanager-key", "obiwankenobi")
#' )
#'
#' }
create_db <- function(credentials_data, sqlite_path, passphrase = NULL) {
if (!all(c("user", "password") %in% names(credentials_data))) {
stop("credentials_data must contains columns: 'user', 'password'", call. = FALSE)
}
if(!"admin" %in% names(credentials_data)){
credentials_data$admin <- FALSE
}
if(!"start" %in% names(credentials_data)){
credentials_data$start <- NA
}
if(!"expire" %in% names(credentials_data)){
credentials_data$expire <- NA
}
default_col <- c("user", "password", "start", "expire", "admin")
credentials_data <- credentials_data[, c(default_col,
setdiff(colnames(credentials_data), default_col))]
conn <- dbConnect(SQLite(), dbname = sqlite_path)
on.exit(dbDisconnect(conn))
credentials_data[] <- lapply(credentials_data, as.character)
write_db_encrypt(
conn = conn,
name = "credentials",
value = credentials_data,
passphrase = passphrase
)
write_db_encrypt(
conn = conn,
name = "pwd_mngt",
value = data.frame(
user = credentials_data$user,
must_change = as.character(FALSE),
have_changed = as.character(FALSE),
date_change = character(length(credentials_data$user)),
stringsAsFactors = FALSE
),
passphrase = passphrase
)
write_db_encrypt(
conn = conn,
name = "logs",
value = data.frame(
user = character(0),
server_connected = character(0),
token = character(0),
logout = character(0),
app = character(0),
stringsAsFactors = FALSE
),
passphrase = passphrase
)
}
#' Read / Write crypted table from / to a SQLite database
#'
#' @param conn A DBIConnection object, as returned by \code{\link[DBI]{dbConnect}}.
#' @param value A \code{data.frame}.
#' @param name A character string specifying the unquoted DBMS table name.
#' @param passphrase A secret passphrase to crypt the table inside the database
#'
#' @return a \code{data.frame} for \code{read_db_decrypt}.
#' @export
#'
#' @name db-crypted
#'
#' @importFrom DBI dbConnect dbDisconnect dbWriteTable
#' @importFrom RSQLite SQLite
#' @importFrom openssl sha256 aes_cbc_encrypt
#'
#' @seealso \code{\link{create_db}}
#'
#' @examples
#' # connect to database
#' conn <- DBI::dbConnect(RSQLite::SQLite(), dbname = ":memory:")
#'
#' # write to database
#' write_db_encrypt(conn, value = head(iris), name = "iris", passphrase = "supersecret")
#'
#' # read
#' read_db_decrypt(conn = conn, name = "iris", passphrase = "supersecret")
#'
#' # with wrong passphrase
#' \dontrun{
#' read_db_decrypt(conn = conn, name = "iris", passphrase = "forgotten")
#' }
#'
#' # with DBI method you'll get a crypted blob
#' DBI::dbReadTable(conn = conn, name = "iris")
#'
#' # add some users to database
#' \dontrun{
#' conn <- DBI::dbConnect(RSQLite::SQLite(), dbname = "path/to/database.sqlite")
#'
#' # update "credentials" table
#' current_user <- read_db_decrypt(
#' conn,
#' name = "credentials",
#' passphrase = key_get("R-shinymanager-key", "obiwankenobi")
#' )
#'
#' add_user <- data.frame(user = "new", password = "pwdToChange",
#' start = NA, expire = NA, admin = TRUE)
#'
#' new_users <- rbind.data.frame(current_user, add_user)
#'
#' write_db_encrypt(
#' conn,
#' value = new_users,
#' name = "credentials",
#' key_get("R-shinymanager-key", "obiwankenobi")
#' )
#'
#' # update "pwd_mngt" table
#' pwd_mngt <- read_db_decrypt(
#' conn,
#' name = "pwd_mngt",
#' passphrase = key_get("R-shinymanager-key", "obiwankenobi")
#' )
#'
#' pwd_mngt <- rbind.data.frame(
#' pwd_mngt,
#' data.frame(user = "new", must_change = T, have_changed = F, date_change = "")
#' )
#'
#' write_db_encrypt(
#' conn,
#' value = pwd_mngt,
#' name = "pwd_mngt",
#' passphrase = key_get("R-shinymanager-key", "obiwankenobi")
#' )
#' }
#'
write_db_encrypt <- function(conn, value, name = "credentials", passphrase = NULL) {
if (is.character(conn)) {
conn <- dbConnect(RSQLite::SQLite(), dbname = conn)
on.exit(dbDisconnect(conn))
}
if (!is.null(passphrase)) {
passphrase <- as.character(passphrase)
passphrase <- charToRaw(passphrase)
key <- sha256(passphrase)
value_serialized <- serialize(value, NULL)
value_encrypted <- aes_cbc_encrypt(data = value_serialized, key = key)
value <- data.frame(value = I(list(value_encrypted)), iv = I(list(attr(value_encrypted, "iv"))))
}
dbWriteTable(conn = conn, name = name, value = value, overwrite = TRUE)
}
#' @export
#'
#' @rdname db-crypted
#'
#' @importFrom DBI dbConnect dbDisconnect dbReadTable
#' @importFrom RSQLite SQLite
#' @importFrom openssl sha256 aes_cbc_decrypt
#'
read_db_decrypt <- function(conn, name = "credentials", passphrase = NULL) {
if (is.character(conn)) {
conn <- dbConnect(RSQLite::SQLite(), dbname = conn)
on.exit(dbDisconnect(conn))
}
out <- dbReadTable(conn = conn, name = name)
if (!is.null(passphrase)) {
passphrase <- as.character(passphrase)
passphrase <- charToRaw(passphrase)
key <- sha256(passphrase)
value <- out$value[[1]]
attr(value, "iv") <- out$iv[[1]]
out <- aes_cbc_decrypt(value, key = key)
out <- unserialize(out)
}
return(out)
}
| /R/credentials-db.R | no_license | abhik1368/shinymanager | R | false | false | 7,308 | r |
#' @title Create credentials database
#'
#' @description Create a SQLite database with credentials data protected by a password.
#'
#' @param credentials_data A \code{data.frame} with information about users, \code{user} and \code{password} are required.
#' @param sqlite_path Path to the SQLite database.
#' @param passphrase A password to protect the data inside the database.
#'
#' @export
#'
#' @details The credentials \code{data.frame} can have the following columns:
#' \itemize{
#' \item \strong{user (mandatory)} : the user's name.
#' \item \strong{password (mandatory)} : the user's password.
#' \item \strong{admin (optional)} : logical, is user have admin right ? If so,
#' user can access the admin mode (only available using a SQLite database)
#' \item \strong{start (optional)} : the date from which the user will have access to the application
#' \item \strong{expire (optional)} : the date from which the user will no longer have access to the application
#' \item \strong{applications (optional)} : the name of the applications to which the user is authorized,
#' separated by a semicolon. The name of the application corresponds to the name of the directory,
#' or can be declared using : \code{options("shinymanager.application" = "my-app")}
#' \item \strong{additional columns} : add others columns to retrieve the values server-side after authentication
#' }
#'
#' @importFrom DBI dbConnect dbDisconnect dbWriteTable
#' @importFrom RSQLite SQLite
#'
#' @seealso \code{\link{read_db_decrypt}}
#'
#' @examples
#' \dontrun{
#'
#' # Credentials data
#' credentials <- data.frame(
#' user = c("shiny", "shinymanager"),
#' password = c("azerty", "12345"),
#' stringsAsFactors = FALSE
#' )
#'
#' # you can use keyring package to set database key
#' library(keyring)
#' key_set("R-shinymanager-key", "obiwankenobi")
#'
#' # Create the database
#' create_db(
#' credentials_data = credentials,
#' sqlite_path = "path/to/database.sqlite", # will be created
#' passphrase = key_get("R-shinymanager-key", "obiwankenobi")
#' )
#'
#' }
create_db <- function(credentials_data, sqlite_path, passphrase = NULL) {
if (!all(c("user", "password") %in% names(credentials_data))) {
stop("credentials_data must contains columns: 'user', 'password'", call. = FALSE)
}
if(!"admin" %in% names(credentials_data)){
credentials_data$admin <- FALSE
}
if(!"start" %in% names(credentials_data)){
credentials_data$start <- NA
}
if(!"expire" %in% names(credentials_data)){
credentials_data$expire <- NA
}
default_col <- c("user", "password", "start", "expire", "admin")
credentials_data <- credentials_data[, c(default_col,
setdiff(colnames(credentials_data), default_col))]
conn <- dbConnect(SQLite(), dbname = sqlite_path)
on.exit(dbDisconnect(conn))
credentials_data[] <- lapply(credentials_data, as.character)
write_db_encrypt(
conn = conn,
name = "credentials",
value = credentials_data,
passphrase = passphrase
)
write_db_encrypt(
conn = conn,
name = "pwd_mngt",
value = data.frame(
user = credentials_data$user,
must_change = as.character(FALSE),
have_changed = as.character(FALSE),
date_change = character(length(credentials_data$user)),
stringsAsFactors = FALSE
),
passphrase = passphrase
)
write_db_encrypt(
conn = conn,
name = "logs",
value = data.frame(
user = character(0),
server_connected = character(0),
token = character(0),
logout = character(0),
app = character(0),
stringsAsFactors = FALSE
),
passphrase = passphrase
)
}
#' Read / Write crypted table from / to a SQLite database
#'
#' @param conn A DBIConnection object, as returned by \code{\link[DBI]{dbConnect}}.
#' @param value A \code{data.frame}.
#' @param name A character string specifying the unquoted DBMS table name.
#' @param passphrase A secret passphrase to crypt the table inside the database
#'
#' @return a \code{data.frame} for \code{read_db_decrypt}.
#' @export
#'
#' @name db-crypted
#'
#' @importFrom DBI dbConnect dbDisconnect dbWriteTable
#' @importFrom RSQLite SQLite
#' @importFrom openssl sha256 aes_cbc_encrypt
#'
#' @seealso \code{\link{create_db}}
#'
#' @examples
#' # connect to database
#' conn <- DBI::dbConnect(RSQLite::SQLite(), dbname = ":memory:")
#'
#' # write to database
#' write_db_encrypt(conn, value = head(iris), name = "iris", passphrase = "supersecret")
#'
#' # read
#' read_db_decrypt(conn = conn, name = "iris", passphrase = "supersecret")
#'
#' # with wrong passphrase
#' \dontrun{
#' read_db_decrypt(conn = conn, name = "iris", passphrase = "forgotten")
#' }
#'
#' # with DBI method you'll get a crypted blob
#' DBI::dbReadTable(conn = conn, name = "iris")
#'
#' # add some users to database
#' \dontrun{
#' conn <- DBI::dbConnect(RSQLite::SQLite(), dbname = "path/to/database.sqlite")
#'
#' # update "credentials" table
#' current_user <- read_db_decrypt(
#' conn,
#' name = "credentials",
#' passphrase = key_get("R-shinymanager-key", "obiwankenobi")
#' )
#'
#' add_user <- data.frame(user = "new", password = "pwdToChange",
#' start = NA, expire = NA, admin = TRUE)
#'
#' new_users <- rbind.data.frame(current_user, add_user)
#'
#' write_db_encrypt(
#' conn,
#' value = new_users,
#' name = "credentials",
#' key_get("R-shinymanager-key", "obiwankenobi")
#' )
#'
#' # update "pwd_mngt" table
#' pwd_mngt <- read_db_decrypt(
#' conn,
#' name = "pwd_mngt",
#' passphrase = key_get("R-shinymanager-key", "obiwankenobi")
#' )
#'
#' pwd_mngt <- rbind.data.frame(
#' pwd_mngt,
#' data.frame(user = "new", must_change = T, have_changed = F, date_change = "")
#' )
#'
#' write_db_encrypt(
#' conn,
#' value = pwd_mngt,
#' name = "pwd_mngt",
#' passphrase = key_get("R-shinymanager-key", "obiwankenobi")
#' )
#' }
#'
write_db_encrypt <- function(conn, value, name = "credentials", passphrase = NULL) {
if (is.character(conn)) {
conn <- dbConnect(RSQLite::SQLite(), dbname = conn)
on.exit(dbDisconnect(conn))
}
if (!is.null(passphrase)) {
passphrase <- as.character(passphrase)
passphrase <- charToRaw(passphrase)
key <- sha256(passphrase)
value_serialized <- serialize(value, NULL)
value_encrypted <- aes_cbc_encrypt(data = value_serialized, key = key)
value <- data.frame(value = I(list(value_encrypted)), iv = I(list(attr(value_encrypted, "iv"))))
}
dbWriteTable(conn = conn, name = name, value = value, overwrite = TRUE)
}
#' @export
#'
#' @rdname db-crypted
#'
#' @importFrom DBI dbConnect dbDisconnect dbReadTable
#' @importFrom RSQLite SQLite
#' @importFrom openssl sha256 aes_cbc_decrypt
#'
read_db_decrypt <- function(conn, name = "credentials", passphrase = NULL) {
if (is.character(conn)) {
conn <- dbConnect(RSQLite::SQLite(), dbname = conn)
on.exit(dbDisconnect(conn))
}
out <- dbReadTable(conn = conn, name = name)
if (!is.null(passphrase)) {
passphrase <- as.character(passphrase)
passphrase <- charToRaw(passphrase)
key <- sha256(passphrase)
value <- out$value[[1]]
attr(value, "iv") <- out$iv[[1]]
out <- aes_cbc_decrypt(value, key = key)
out <- unserialize(out)
}
return(out)
}
|
library(tidyverse)
#Zad 5-1
library(readr)
movies <- read_csv("movies.csv")
View(movies)
#Zad 5-2
filter(movies, year == 2005, Comedy == 1)
#Zad 5-3
select(movies, title, year, budget) %>%
arrange(movies, desc(budget))
#Zad 5-4
filter(movies, year >= 1990 & year < 2000, Animation == 1) %>%
arrange(desc(budget))
#Zad 5-5
dramy <- filter(movies, Drama == 1)
arrange(dramy, desc(length))
#Zad 5-6
movies %>%
group_by(mpaa) %>%
summarise(srednia = mean(rating), odchylenie = mad(rating)
) | /zadania5.R | no_license | miriamkaminska/tipn_zad-kaminska | R | false | false | 530 | r | library(tidyverse)
#Zad 5-1
library(readr)
movies <- read_csv("movies.csv")
View(movies)
#Zad 5-2
filter(movies, year == 2005, Comedy == 1)
#Zad 5-3
select(movies, title, year, budget) %>%
arrange(movies, desc(budget))
#Zad 5-4
filter(movies, year >= 1990 & year < 2000, Animation == 1) %>%
arrange(desc(budget))
#Zad 5-5
dramy <- filter(movies, Drama == 1)
arrange(dramy, desc(length))
#Zad 5-6
movies %>%
group_by(mpaa) %>%
summarise(srednia = mean(rating), odchylenie = mad(rating)
) |
context("rank")
ntile_h <- function(x, n) {
tibble(x = x) %>%
mutate(y = ntile(x, n)) %>%
pull(y)
}
ntile_h_dplyr <- function(x, n) {
tibble(x = x) %>%
mutate(y = dplyr::ntile(x, n)) %>%
pull(y)
}
test_that("ntile ignores number of NAs", {
x <- c(1:3, NA, NA, NA)
expect_equal(ntile(x, 3), x)
expect_equal(ntile_h(x, 3), x)
x1 <- c(1L, 1L, 1L, NA, NA, NA)
expect_equal(ntile(x, 1), x1)
expect_equal(ntile_h(x, 1), x1)
})
test_that("ntile always returns an integer", {
expect_equal(ntile(numeric(), 3), integer())
expect_equal(ntile_h(numeric(), 3), integer())
expect_equal(ntile(NA, 3), NA_integer_)
expect_equal(ntile_h(NA, 3), NA_integer_)
})
test_that("ntile handles character vectors consistently", {
charvec_sort_test <- function() {
x1 <- c("[", "]", NA, "B", "y", "a", "Z")
x2 <- c("a", "b", "C")
expect_equal(ntile_h(x1, 3), ntile_h_dplyr(x1, 3))
expect_equal(ntile_h(x2, 2), ntile_h_dplyr(x2, 2))
}
# Test against both the local, and the C locale for collation
charvec_sort_test()
withr::with_collate("C", charvec_sort_test())
})
test_that("ntile() does not overflow (#4186)", {
skip("not sure what the problem is, but it sometimes fails")
res <- tibble(a = 1:1e5) %>%
mutate(b = ntile(n = 1e5)) %>%
count(b) %>%
pull()
expect_true(all(res == 1L))
})
| /tests/testthat/test-rank.R | permissive | krlmlr/dplyr | R | false | false | 1,356 | r | context("rank")
ntile_h <- function(x, n) {
tibble(x = x) %>%
mutate(y = ntile(x, n)) %>%
pull(y)
}
ntile_h_dplyr <- function(x, n) {
tibble(x = x) %>%
mutate(y = dplyr::ntile(x, n)) %>%
pull(y)
}
test_that("ntile ignores number of NAs", {
x <- c(1:3, NA, NA, NA)
expect_equal(ntile(x, 3), x)
expect_equal(ntile_h(x, 3), x)
x1 <- c(1L, 1L, 1L, NA, NA, NA)
expect_equal(ntile(x, 1), x1)
expect_equal(ntile_h(x, 1), x1)
})
test_that("ntile always returns an integer", {
expect_equal(ntile(numeric(), 3), integer())
expect_equal(ntile_h(numeric(), 3), integer())
expect_equal(ntile(NA, 3), NA_integer_)
expect_equal(ntile_h(NA, 3), NA_integer_)
})
test_that("ntile handles character vectors consistently", {
charvec_sort_test <- function() {
x1 <- c("[", "]", NA, "B", "y", "a", "Z")
x2 <- c("a", "b", "C")
expect_equal(ntile_h(x1, 3), ntile_h_dplyr(x1, 3))
expect_equal(ntile_h(x2, 2), ntile_h_dplyr(x2, 2))
}
# Test against both the local, and the C locale for collation
charvec_sort_test()
withr::with_collate("C", charvec_sort_test())
})
test_that("ntile() does not overflow (#4186)", {
skip("not sure what the problem is, but it sometimes fails")
res <- tibble(a = 1:1e5) %>%
mutate(b = ntile(n = 1e5)) %>%
count(b) %>%
pull()
expect_true(all(res == 1L))
})
|
library(tidyverse)
library(httr)
# GET("http://stapi.co/api/v1/rest/ship?")
alcohol <- read_csv(here::here("data/open_units.csv"),
col_names = c("Product", "Brand", "Category", "Style",
"Quantity", "Quantity Units", "Volume", "Package",
"ABV", "Units", "Units.precise", "Units.per.100mL")) %>%
mutate(Style_simple = str_extract(Style, "IPA|Lager|Ale|Cider|Beer|Wine|Stout"),
Style_simple = ifelse(is.na(Style_simple), "Other", Style_simple))
ggplot(alcohol, aes(x = Style_simple, y = ABV, color = Category )) + geom_boxplot() + geom_jitter()
bob_ross <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-08-06/bob-ross.csv") %>%
janitor::clean_names() %>%
separate(episode, into = c("season", "episode"), sep = "E") %>%
mutate(season = str_extract(season, "[:digit:]+")) %>%
mutate_at(vars(season, episode), as.integer)
bob_ross %>%
pivot_longer(cols = -c(1:3), names_to = "feature", values_to = "present") %>%
group_by(feature) %>% summarize(pct = mean(present)) %>% arrange(desc(pct))
bob_ross %>% group_by(season, episode, title) %>%
filter(!guest) %>%
mutate(has_tree = tree | conifer | deciduous | trees | palm_trees,
both = deciduous * conifer,
deciduous_only = deciduous * !conifer * !palm_trees,
conifer_only = conifer * !deciduous * !palm_trees,
palm_only = palm_trees * !conifer * !deciduous,
unspecified = (tree | trees) * (!conifer) * (!deciduous) * (!palm_trees)) %>%
ungroup() %>%
select(has_tree:unspecified) %>%
summarize_each(sum)
sum(!bob_ross$guest)
# Squirrels in NYC
https://github.com/mine-cetinkaya-rundel/nycsquirrels18
| /code/Exam-data-exploration.R | no_license | srvanderplas/unl-stat218-materials | R | false | false | 1,770 | r | library(tidyverse)
library(httr)
# GET("http://stapi.co/api/v1/rest/ship?")
alcohol <- read_csv(here::here("data/open_units.csv"),
col_names = c("Product", "Brand", "Category", "Style",
"Quantity", "Quantity Units", "Volume", "Package",
"ABV", "Units", "Units.precise", "Units.per.100mL")) %>%
mutate(Style_simple = str_extract(Style, "IPA|Lager|Ale|Cider|Beer|Wine|Stout"),
Style_simple = ifelse(is.na(Style_simple), "Other", Style_simple))
ggplot(alcohol, aes(x = Style_simple, y = ABV, color = Category )) + geom_boxplot() + geom_jitter()
bob_ross <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-08-06/bob-ross.csv") %>%
janitor::clean_names() %>%
separate(episode, into = c("season", "episode"), sep = "E") %>%
mutate(season = str_extract(season, "[:digit:]+")) %>%
mutate_at(vars(season, episode), as.integer)
bob_ross %>%
pivot_longer(cols = -c(1:3), names_to = "feature", values_to = "present") %>%
group_by(feature) %>% summarize(pct = mean(present)) %>% arrange(desc(pct))
bob_ross %>% group_by(season, episode, title) %>%
filter(!guest) %>%
mutate(has_tree = tree | conifer | deciduous | trees | palm_trees,
both = deciduous * conifer,
deciduous_only = deciduous * !conifer * !palm_trees,
conifer_only = conifer * !deciduous * !palm_trees,
palm_only = palm_trees * !conifer * !deciduous,
unspecified = (tree | trees) * (!conifer) * (!deciduous) * (!palm_trees)) %>%
ungroup() %>%
select(has_tree:unspecified) %>%
summarize_each(sum)
sum(!bob_ross$guest)
# Squirrels in NYC
https://github.com/mine-cetinkaya-rundel/nycsquirrels18
|
.onLoad <- function(libname, pkgname) {
opts <- options()
my_opts <- list(
addinexamples.clearAddin.nrow = 50
)
new_opts <- !(names(my_opts) %in% names(opts))
if (any(new_opts)) {
options(my_opts[new_opts])
}
invisible()
} | /R/zzz.R | no_license | nathan-russell/addinexamples | R | false | false | 282 | r | .onLoad <- function(libname, pkgname) {
opts <- options()
my_opts <- list(
addinexamples.clearAddin.nrow = 50
)
new_opts <- !(names(my_opts) %in% names(opts))
if (any(new_opts)) {
options(my_opts[new_opts])
}
invisible()
} |
#!/usr/bin/env Rscript
#Auther: Shreeti Tuladhar
#Date: 2nd October, 2015
#Version: 0.02
#creating a vector
# SOME CHANGES MADE BY NATHAN ~~~~
#just for test for the Git
#call the package ggplot2
#you can check if installed first with a condition before calling
#the package ggplot2
library(ggplot2)
#assume you have a data/gapminder.RData, add a condition later
#or read the data from the csv file
load("data/gapminder.RData")
#Create a function to plot dots per continent
#receives a data frame input and makes a plot
draw_dots_continent <- function(df){
#plot the continents multifigure
p <- ggplot(data=df,aes(x=year,y=lifeExp, color=country))+
geom_point(aes(color=continent))
$the facet_grid
facet_grid(.~continent)
print(p)
}
#create function to create a png plot
make_pngplot <- function(name){
png(file=paste(name,"png", sep=""))
draw_dots_continent(gapminder)
dev.off()
}
#how to receive a input from the user
arg <- commandArgs(TRUE)
try(make_pngplot(arg[1]))
| /scripts/continents.R | no_license | shreeti248/exampleproject | R | false | false | 1,005 | r | #!/usr/bin/env Rscript
#Auther: Shreeti Tuladhar
#Date: 2nd October, 2015
#Version: 0.02
#creating a vector
# SOME CHANGES MADE BY NATHAN ~~~~
#just for test for the Git
#call the package ggplot2
#you can check if installed first with a condition before calling
#the package ggplot2
library(ggplot2)
#assume you have a data/gapminder.RData, add a condition later
#or read the data from the csv file
load("data/gapminder.RData")
#Create a function to plot dots per continent
#receives a data frame input and makes a plot
draw_dots_continent <- function(df){
#plot the continents multifigure
p <- ggplot(data=df,aes(x=year,y=lifeExp, color=country))+
geom_point(aes(color=continent))
$the facet_grid
facet_grid(.~continent)
print(p)
}
#create function to create a png plot
make_pngplot <- function(name){
png(file=paste(name,"png", sep=""))
draw_dots_continent(gapminder)
dev.off()
}
#how to receive a input from the user
arg <- commandArgs(TRUE)
try(make_pngplot(arg[1]))
|
# Copyright (C) 2016 Gen Kamita
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#' Create a new likelihood model
#'
#' @param type name of the likelihood model
#' @param ... udfsed
#' @export
# implementation of non-standardized student's t likelihood.
new.likelihood.student_t <- function(df, sigma, ..) {
#df: degree of freedom, sigma: scale parameter.
result <- list(df = df, sigma = sigma)
class(result) <- c("likelihood.student_t", "likelihood")
result
}
logp.likelihood.student_t <- function(model, y, mean, ...)
{
if (!is.vector(mean)) {
mean <- as.vector(mean)
}
if (!is.vector(y)) {
y <- as.vector(y)
}
df <- model$df #Note: This line overrides df, a function in the R global namespace
sigma <- model$sigma
result <- dt( (mean - y) / sigma, df, 0, log = TRUE) - log(sigma)
return (as.matrix(result))# dt (R) and tpdf (matlab) matches
}
gradient.likelihood.student_t <- function(likelihood, link, f, yp, n) {
# d: d/dx log p(y|f)
d <- as.matrix(rep(0, n))
# parameter of the student_t likelihood
df <- likelihood$df
sigma <- likelihood$sigma
#calculate mu here.
for (i in 1:n) {
# current f value at x[[i]]
fx <- f[[i]]
# observation at position x[[i]]
yx <- yp[[i]]
r <- yx - fx
rsqwr <- r*r
a <- rsqwr+df*sigma^2
# gradient
d[[i]] <- (df+1)*r/a
}
return (d)
}
#' Compute the Hessian of a likelihood model
#'
#' @param model probabilistic model
#' @param ... arguments to be passed to methods
hessian.likelihood.student_t <- function(likelihood, link, f, yp, n, form = "matrix") {
# W: Hessian of log p(y|f)
W <- vector(mode = "numeric", length = n)
# parameter of the student_t likelihood
df <- likelihood$df
sigma <- likelihood$sigma
sn2 = sigma^2
for (i in 1:n) {#FIXME: vectorise the for loop.
# current f value at x[[i]]
fx <- f[[i]]
# observation at position x[[i]]
yx <- yp[[i]]
r <- yx - fx
rsqwr <- r*r
a <- rsqwr+df*sigma^2;
# Hessian
W[[i]] <- (df+1)*(rsqwr-df*sn2)/a^2;#check df is correctly defined, likely to need +1.
}
if (form == "vector") return(W)
else return(diag(W))
}
| /R/gp.likelihood.student_t.R | no_license | pbenner/gp.regression | R | false | false | 2,931 | r | # Copyright (C) 2016 Gen Kamita
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#' Create a new likelihood model
#'
#' @param type name of the likelihood model
#' @param ... udfsed
#' @export
# implementation of non-standardized student's t likelihood.
new.likelihood.student_t <- function(df, sigma, ..) {
#df: degree of freedom, sigma: scale parameter.
result <- list(df = df, sigma = sigma)
class(result) <- c("likelihood.student_t", "likelihood")
result
}
logp.likelihood.student_t <- function(model, y, mean, ...)
{
if (!is.vector(mean)) {
mean <- as.vector(mean)
}
if (!is.vector(y)) {
y <- as.vector(y)
}
df <- model$df #Note: This line overrides df, a function in the R global namespace
sigma <- model$sigma
result <- dt( (mean - y) / sigma, df, 0, log = TRUE) - log(sigma)
return (as.matrix(result))# dt (R) and tpdf (matlab) matches
}
gradient.likelihood.student_t <- function(likelihood, link, f, yp, n) {
# d: d/dx log p(y|f)
d <- as.matrix(rep(0, n))
# parameter of the student_t likelihood
df <- likelihood$df
sigma <- likelihood$sigma
#calculate mu here.
for (i in 1:n) {
# current f value at x[[i]]
fx <- f[[i]]
# observation at position x[[i]]
yx <- yp[[i]]
r <- yx - fx
rsqwr <- r*r
a <- rsqwr+df*sigma^2
# gradient
d[[i]] <- (df+1)*r/a
}
return (d)
}
#' Compute the Hessian of a likelihood model
#'
#' @param model probabilistic model
#' @param ... arguments to be passed to methods
hessian.likelihood.student_t <- function(likelihood, link, f, yp, n, form = "matrix") {
# W: Hessian of log p(y|f)
W <- vector(mode = "numeric", length = n)
# parameter of the student_t likelihood
df <- likelihood$df
sigma <- likelihood$sigma
sn2 = sigma^2
for (i in 1:n) {#FIXME: vectorise the for loop.
# current f value at x[[i]]
fx <- f[[i]]
# observation at position x[[i]]
yx <- yp[[i]]
r <- yx - fx
rsqwr <- r*r
a <- rsqwr+df*sigma^2;
# Hessian
W[[i]] <- (df+1)*(rsqwr-df*sn2)/a^2;#check df is correctly defined, likely to need +1.
}
if (form == "vector") return(W)
else return(diag(W))
}
|
library(shiny)
ui <- fluidPage(
textOutput("text"),
verbatimTextOutput("code"),
tableOutput("static"),
dataTableOutput("dynamic"),
plotOutput("plot", width="400px")
)
server <- function(input, output, session){
output$text <- renderText("Hello friend!")
output$code <- renderPrint(summary(1:10))
output$static <- renderTable(head(mtcars))
output$dynamic <- renderDataTable(mtcars, options = list(pageLength = 5))
output$plot <- renderPlot(plot(1:5), res = 96)
}
shinyApp(ui = ui, server = server) | /mastering-shiny/chapter_3/worked_example_output_app.R | permissive | AnkithMohan95/shiny_apps_dojo | R | false | false | 547 | r |
library(shiny)
ui <- fluidPage(
textOutput("text"),
verbatimTextOutput("code"),
tableOutput("static"),
dataTableOutput("dynamic"),
plotOutput("plot", width="400px")
)
server <- function(input, output, session){
output$text <- renderText("Hello friend!")
output$code <- renderPrint(summary(1:10))
output$static <- renderTable(head(mtcars))
output$dynamic <- renderDataTable(mtcars, options = list(pageLength = 5))
output$plot <- renderPlot(plot(1:5), res = 96)
}
shinyApp(ui = ui, server = server) |
det2<-function(Q)
{
e<-eigen(Q)$values
e<-e[e>1e-10]
loge<-sum(log(e))
rank<-length(e)
return(list("logdet"=loge,"rank"=rank))
}
| /R/det2.R | no_license | bioimaginggroup/BayGMRF | R | false | false | 139 | r | det2<-function(Q)
{
e<-eigen(Q)$values
e<-e[e>1e-10]
loge<-sum(log(e))
rank<-length(e)
return(list("logdet"=loge,"rank"=rank))
}
|
#Compare emissions from motor vehicle sources in Baltimore City with
#emissions from motor vehicle sources in Los Angeles County, California
#(fips == "06037"). Which city has seen greater changes over time in motor vehicle
#emissions?
#
# Load ggplot2
library(ggplot2)
# Load data
SCC <- readRDS("Source_Classification_Code.rds")
NEI <- readRDS("summarySCC_PM25.rds")
# Make list of cities to filter by
city_list <- c("24510", "06037")
# Make list of sources
motor_list <- c("Mobile - On-Road Gasoline Light Duty Vehicles",
"Mobile - On-Road Gasoline Heavy Duty Vehicles",
"Mobile - On-Road Diesel Light Duty Vehicles",
"Mobile - On-Road Diesel Heavy Duty Vehicles")
# Filter SCC codes by motor vehicle sources
SCCx <- SCC[is.element(SCC$EI.Sector, motor_list),c("SCC")]
# Subset NEI by motor vehicle sources
NEIx <- NEI[is.element(NEI$SCC, SCCx), c("Emissions", "fips", "year")]
# Subset NEI by cities: Baltimore and LA
NEIx <- NEIx[is.element(NEIx$fips, city_list), c("Emissions", "fips", "year")]
# Sum emissions by year and city
agg <- aggregate(NEIx$Emissions~NEIx$fips+NEIx$year, data=NEIx, sum)
# Simplify column names
colnames(agg) <- c("fips","year","emissions")
# Create image file and plot
png("plot6_ex2.png", width=600, height=480)
compare_plot <- qplot(x=agg$year, y=agg$emissions, data=agg, color=fips, geom="line",
main="Emissions comparison: LA(06037) and Baltimore (24510)",
xlab="years", ylab="emissions")
print(compare_ploSt)
dev.off()
| /emissionsComparison.R | no_license | jayzuniga/R | R | false | false | 1,534 | r | #Compare emissions from motor vehicle sources in Baltimore City with
#emissions from motor vehicle sources in Los Angeles County, California
#(fips == "06037"). Which city has seen greater changes over time in motor vehicle
#emissions?
#
# Load ggplot2
library(ggplot2)
# Load data
SCC <- readRDS("Source_Classification_Code.rds")
NEI <- readRDS("summarySCC_PM25.rds")
# Make list of cities to filter by
city_list <- c("24510", "06037")
# Make list of sources
motor_list <- c("Mobile - On-Road Gasoline Light Duty Vehicles",
"Mobile - On-Road Gasoline Heavy Duty Vehicles",
"Mobile - On-Road Diesel Light Duty Vehicles",
"Mobile - On-Road Diesel Heavy Duty Vehicles")
# Filter SCC codes by motor vehicle sources
SCCx <- SCC[is.element(SCC$EI.Sector, motor_list),c("SCC")]
# Subset NEI by motor vehicle sources
NEIx <- NEI[is.element(NEI$SCC, SCCx), c("Emissions", "fips", "year")]
# Subset NEI by cities: Baltimore and LA
NEIx <- NEIx[is.element(NEIx$fips, city_list), c("Emissions", "fips", "year")]
# Sum emissions by year and city
agg <- aggregate(NEIx$Emissions~NEIx$fips+NEIx$year, data=NEIx, sum)
# Simplify column names
colnames(agg) <- c("fips","year","emissions")
# Create image file and plot
png("plot6_ex2.png", width=600, height=480)
compare_plot <- qplot(x=agg$year, y=agg$emissions, data=agg, color=fips, geom="line",
main="Emissions comparison: LA(06037) and Baltimore (24510)",
xlab="years", ylab="emissions")
print(compare_ploSt)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HighFreq.R
\name{agg_regate}
\alias{agg_regate}
\title{Calculate the aggregation (weighted average) of a statistical estimator over
a \emph{OHLC} time series.}
\usage{
agg_regate(oh_lc, mo_ment = "run_variance", weight_ed = TRUE, ...)
}
\arguments{
\item{oh_lc}{\emph{OHLC} time series of prices and trading volumes, in
\emph{xts} format.}
\item{mo_ment}{\emph{character} string representing function for
estimating the moment.}
\item{weight_ed}{\emph{Boolean} argument: should estimate be weighted by
the trading volume? (default is \code{TRUE})}
\item{...}{additional parameters to the mo_ment function.}
}
\value{
A single \emph{numeric} value equal to the volume weighted average of
an estimator over the time series.
}
\description{
Calculate the aggregation (weighted average) of a statistical estimator over
a \emph{OHLC} time series.
}
\details{
The function \code{agg_regate()} calculates a single number
representing the volume weighted average of an estimator over the
\emph{OHLC} time series of prices. By default the sum is trade volume
weighted.
}
\examples{
# calculate weighted average variance for SPY (single number)
vari_ance <- agg_regate(oh_lc=SPY, mo_ment="run_variance")
# calculate time series of daily skew estimates for SPY
skew_daily <- apply.daily(x=SPY, FUN=agg_regate, mo_ment="run_skew")
}
| /man/agg_regate.Rd | no_license | IanMadlenya/HighFreq | R | false | true | 1,412 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HighFreq.R
\name{agg_regate}
\alias{agg_regate}
\title{Calculate the aggregation (weighted average) of a statistical estimator over
a \emph{OHLC} time series.}
\usage{
agg_regate(oh_lc, mo_ment = "run_variance", weight_ed = TRUE, ...)
}
\arguments{
\item{oh_lc}{\emph{OHLC} time series of prices and trading volumes, in
\emph{xts} format.}
\item{mo_ment}{\emph{character} string representing function for
estimating the moment.}
\item{weight_ed}{\emph{Boolean} argument: should estimate be weighted by
the trading volume? (default is \code{TRUE})}
\item{...}{additional parameters to the mo_ment function.}
}
\value{
A single \emph{numeric} value equal to the volume weighted average of
an estimator over the time series.
}
\description{
Calculate the aggregation (weighted average) of a statistical estimator over
a \emph{OHLC} time series.
}
\details{
The function \code{agg_regate()} calculates a single number
representing the volume weighted average of an estimator over the
\emph{OHLC} time series of prices. By default the sum is trade volume
weighted.
}
\examples{
# calculate weighted average variance for SPY (single number)
vari_ance <- agg_regate(oh_lc=SPY, mo_ment="run_variance")
# calculate time series of daily skew estimates for SPY
skew_daily <- apply.daily(x=SPY, FUN=agg_regate, mo_ment="run_skew")
}
|
## My function
## This function does 4 computations
## First, it stores the cached value as NULL
## Then it creates the matrix "y" in the working environment
## It gets the value of the matrix
## Uses solve to compute the inverse & store it in the cache
## And finally returns the functions to the working environment
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(solve) m <<- solve
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This tells R to get the inverse if it has already been calculated
## R displays the message "getting cached data" if it gets the inverse
## Otherwise, it calculates the inverse using solve(data, ...)
## then stores the inverse in the cache
cacheSolve <- function(x, ...) {
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
| /cachematrix.R | no_license | peteabbate/ProgrammingAssignment2 | R | false | false | 1,255 | r | ## My function
## This function does 4 computations
## First, it stores the cached value as NULL
## Then it creates the matrix "y" in the working environment
## It gets the value of the matrix
## Uses solve to compute the inverse & store it in the cache
## And finally returns the functions to the working environment
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(solve) m <<- solve
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This tells R to get the inverse if it has already been calculated
## R displays the message "getting cached data" if it gets the inverse
## Otherwise, it calculates the inverse using solve(data, ...)
## then stores the inverse in the cache
cacheSolve <- function(x, ...) {
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
|
#' caretEnsemble: Make ensembles of caret models.
#'
#' Functions for creating ensembles of caret models: caretList and caretStack
#' @docType package
#' @name caretEnsemble
#' @importFrom graphics plot
#' @importFrom methods is
#' @importFrom stats coef median model.frame model.response predict qnorm reshape resid residuals weighted.mean weights
NULL
#' @title caretList of classification models
#' @name models.class
#' @description Data for the caretEnsemble package
#' @docType data
#' @rdname data
#' @author Zachary Deane-Mayer \email{zach.mayer@@gmail.com}
#' @keywords data
NULL
#' @title caretList of regression models
#' @name models.reg
#' @docType data
#' @rdname data
#' @author Zachary Deane-Mayer \email{zach.mayer@@gmail.com}
#' @keywords data
NULL
#' @title data for classification
#' @name X.class
#' @docType data
#' @rdname data
#' @author Zachary Deane-Mayer \email{zach.mayer@@gmail.com}
#' @keywords data
NULL
#' @title data for classification
#' @name Y.class
#' @docType data
#' @rdname data
#' @keywords data
NULL
#' @title data for classification
#' @name X.reg
#' @docType data
#' @rdname data
#' @author Zachary Deane-Mayer \email{zach.mayer@@gmail.com}
#' @keywords data
NULL
#' @title data for regression
#' @name Y.reg
#' @docType data
#' @rdname data
#' @author Zachary Deane-Mayer \email{zach.mayer@@gmail.com}
#' @keywords data
NULL
#Hack to make data.table functions work with devtools::load_all
#http://stackoverflow.com/questions/23252231/r-data-table-breaks-in-exported-functions
#http://r.789695.n4.nabble.com/Import-problem-with-data-table-in-packages-td4665958.html
assign(".datatable.aware", TRUE)
#Avoid false positives in R CMD CHECK:
utils::globalVariables(
c(".fitted", ".resid", "method", "id", "yhat",
"ymax", "yavg", "ymin", "metric", "metricSD", "n"))
| /R/caretEnsemble-package.R | permissive | zachmayer/caretEnsemble | R | false | false | 1,819 | r | #' caretEnsemble: Make ensembles of caret models.
#'
#' Functions for creating ensembles of caret models: caretList and caretStack
#' @docType package
#' @name caretEnsemble
#' @importFrom graphics plot
#' @importFrom methods is
#' @importFrom stats coef median model.frame model.response predict qnorm reshape resid residuals weighted.mean weights
NULL
#' @title caretList of classification models
#' @name models.class
#' @description Data for the caretEnsemble package
#' @docType data
#' @rdname data
#' @author Zachary Deane-Mayer \email{zach.mayer@@gmail.com}
#' @keywords data
NULL
#' @title caretList of regression models
#' @name models.reg
#' @docType data
#' @rdname data
#' @author Zachary Deane-Mayer \email{zach.mayer@@gmail.com}
#' @keywords data
NULL
#' @title data for classification
#' @name X.class
#' @docType data
#' @rdname data
#' @author Zachary Deane-Mayer \email{zach.mayer@@gmail.com}
#' @keywords data
NULL
#' @title data for classification
#' @name Y.class
#' @docType data
#' @rdname data
#' @keywords data
NULL
#' @title data for classification
#' @name X.reg
#' @docType data
#' @rdname data
#' @author Zachary Deane-Mayer \email{zach.mayer@@gmail.com}
#' @keywords data
NULL
#' @title data for regression
#' @name Y.reg
#' @docType data
#' @rdname data
#' @author Zachary Deane-Mayer \email{zach.mayer@@gmail.com}
#' @keywords data
NULL
#Hack to make data.table functions work with devtools::load_all
#http://stackoverflow.com/questions/23252231/r-data-table-breaks-in-exported-functions
#http://r.789695.n4.nabble.com/Import-problem-with-data-table-in-packages-td4665958.html
assign(".datatable.aware", TRUE)
#Avoid false positives in R CMD CHECK:
utils::globalVariables(
c(".fitted", ".resid", "method", "id", "yhat",
"ymax", "yavg", "ymin", "metric", "metricSD", "n"))
|
### R code from vignette source 'adephylo.Rnw'
### Encoding: UTF-8
###################################################
### code chunk number 1: adephylo.Rnw:105-106 (eval = FALSE)
###################################################
## vignette("phylobase")
###################################################
### code chunk number 2: load
###################################################
library(ape)
library(phylobase)
library(ade4)
library(adephylo)
search()
###################################################
### code chunk number 3: kludge
###################################################
cat("\n=== Old - deprecated- version ===\n")
orthogram <- ade4::orthogram
args(orthogram)
cat("\n=== New version === \n")
orthogram <- adephylo::orthogram
args(orthogram)
###################################################
### code chunk number 4: adephylo.Rnw:168-169 (eval = FALSE)
###################################################
## ?adephylo
###################################################
### code chunk number 5: adephylo.Rnw:174-175 (eval = FALSE)
###################################################
## help("adephylo", package="adephylo", html=TRUE)
###################################################
### code chunk number 6: adephylo.Rnw:179-180 (eval = FALSE)
###################################################
## options(htmlhelp = FALSE)
###################################################
### code chunk number 7: readTree
###################################################
data(ungulates)
ungulates$tre
myTree <- read.tree(text=ungulates$tre)
myTree
plot(myTree, main="ape's plotting of a tree")
###################################################
### code chunk number 8: adephylo.Rnw:226-231
###################################################
temp <- as(myTree, "phylo4")
class(temp)
temp <- as(temp, "phylo")
class(temp)
all.equal(temp, myTree)
###################################################
### code chunk number 9: phylo4d
###################################################
ung <- phylo4d(myTree, ungulates$tab)
class(ung)
table.phylo4d(ung)
###################################################
### code chunk number 10: adephylo.Rnw:271-273
###################################################
x <- tdata(ung, type="tip")
head(x)
###################################################
### code chunk number 11: moranI
###################################################
W <- proxTips(myTree, met="Abouheif")
moran.idx(tdata(ung, type="tip")$afbw, W)
moran.idx(tdata(ung, type="tip")[,1], W, addInfo=TRUE)
###################################################
### code chunk number 12: adephylo.Rnw:320-332
###################################################
afbw <- tdata(ung, type="tip")$afbw
sim <- replicate(499, moran.idx(sample(afbw), W)) # permutations
sim <- c(moran.idx(afbw, W), sim)
cat("\n=== p-value (right-tail) === \n")
pval <- mean(sim>=sim[1])
pval
plot(density(sim), main="Moran's I Monte Carlo test for 'bif'") # plot
mtext("Density of permutations, and observation (in red)")
abline(v=sim[1], col="red", lwd=3)
###################################################
### code chunk number 13: abouheif
###################################################
ung.abTests <- abouheif.moran(ung)
ung.abTests
plot(ung.abTests)
###################################################
### code chunk number 14: adephylo.Rnw:376-378
###################################################
hasEdgeLength(ung)
myTree.withBrLe <- compute.brlen(myTree)
###################################################
### code chunk number 15: adephylo.Rnw:384-386
###################################################
myProx <- vcv.phylo(myTree.withBrLe)
abouheif.moran(ung, W=myProx)
###################################################
### code chunk number 16: adephylo.Rnw:413-415
###################################################
x <- as(rtree(5),"phylo4")
plot(x,show.n=TRUE)
###################################################
### code chunk number 17: adephylo.Rnw:418-420
###################################################
x.part <- treePart(x)
x.part
###################################################
### code chunk number 18: adephylo.Rnw:423-425
###################################################
temp <- phylo4d(x, x.part)
table.phylo4d(temp, cent=FALSE, scale=FALSE)
###################################################
### code chunk number 19: adephylo.Rnw:435-437
###################################################
args(treePart)
temp <- phylo4d(x, treePart(x, result="orthobasis") )
###################################################
### code chunk number 20: orthobas1
###################################################
temp <- phylo4d(myTree, treePart(myTree, result="orthobasis") )
par(mar=rep(.1,4))
table.phylo4d(temp, repVar=1:8, ratio.tree=.3)
###################################################
### code chunk number 21: orthogram
###################################################
afbw.ortgTest <- orthogram(afbw, myTree)
afbw.ortgTest
###################################################
### code chunk number 22: adephylo.Rnw:483-484
###################################################
me.phylo(myTree.withBrLe)
###################################################
### code chunk number 23: figFourBas
###################################################
ung.listBas <- list()
ung.listBas[[1]] <- phylo4d(myTree, as.data.frame(me.phylo(myTree.withBrLe, method="patristic")))
ung.listBas[[2]] <- phylo4d(myTree, as.data.frame(me.phylo(myTree, method="nNodes")))
ung.listBas[[3]]<- phylo4d(myTree, as.data.frame(me.phylo(myTree, method="Abouheif")))
ung.listBas[[4]] <- phylo4d(myTree, as.data.frame(me.phylo(myTree, method="sumDD")))
par(mar=rep(.1,4), mfrow=c(2,2))
invisible(lapply(ung.listBas, table.phylo4d, repVar=1:5, cex.sym=.7, show.tip.label=FALSE, show.node=FALSE))
###################################################
### code chunk number 24: lm1
###################################################
afbw <- log(ungulates$tab[,1])
neonatw <- log((ungulates$tab[,2]+ungulates$tab[,3])/2)
names(afbw) <- myTree$tip.label
names(neonatw) <- myTree$tip.label
plot(afbw, neonatw, main="Relationship between afbw and neonatw")
lm1 <- lm(neonatw~afbw)
abline(lm1, col="blue")
anova(lm1)
###################################################
### code chunk number 25: resid
###################################################
resid <- residuals(lm1)
names(resid) <- myTree$tip.label
temp <- phylo4d(myTree,data.frame(resid))
abouheif.moran(temp)
table.phylo4d(temp)
###################################################
### code chunk number 26: adephylo.Rnw:537-544
###################################################
myBasis <- me.phylo(myTree, method="Abouheif")
lm2 <- lm(neonatw~myBasis[,1] + afbw)
resid <- residuals(lm2)
names(resid) <- myTree$tip.label
temp <- phylo4d(myTree,data.frame(resid))
abouheif.moran(temp)
anova(lm2)
###################################################
### code chunk number 27: adephylo.Rnw:570-575
###################################################
W <- proxTips(myTree, method="Abouheif", sym=FALSE)
lagNeonatw <- W %*% neonatw
lm3 <- lm(neonatw ~ lagNeonatw + afbw)
resid <- residuals(lm3)
abouheif.moran(resid,W)
###################################################
### code chunk number 28: pca1
###################################################
f1 <- function(x){
m <- mean(x,na.rm=TRUE)
x[is.na(x)] <- m
return(x)
}
data(maples)
traits <- apply(maples$tab, 2, f1)
pca1 <- dudi.pca(traits, scannf=FALSE, nf=1)
barplot(pca1$eig, main="PCA eigenvalues", col=heat.colors(16))
###################################################
### code chunk number 29: pca2
###################################################
tre <- read.tree(text=maples$tre)
W <- proxTips(tre)
myComp <- data.frame(PC1=pca1$li[,1], lagPC1=W %*% pca1$li[,1])
myComp.4d <- phylo4d(tre, myComp)
nodeLabels(myComp.4d) <- names(nodeLabels(myComp.4d))
table.phylo4d(myComp.4d)
###################################################
### code chunk number 30: aboutest
###################################################
myTest <- abouheif.moran(myComp[,1], W=W)
plot(myTest, main="Abouheif's test using patristic proximity")
mtext("First principal component - maples data", col="blue", line=1)
###################################################
### code chunk number 31: loadings
###################################################
ldgs <- pca1$c1[,1]
plot(ldgs, type="h", xlab="Variable", xaxt="n", ylab="Loadings")
s.label(cbind(1:31, ldgs), lab=colnames(traits), add.p=TRUE, clab=.8)
temp <- abs(ldgs)
thres <- quantile(temp, .75)
abline(h=thres * c(-1,1), lty=2, col="blue3", lwd=3)
title("Loadings for PC1")
mtext("Quarter of most contributing variables indicated in blue", col="blue")
| /adephylo/inst/doc/adephylo.R | no_license | ingted/R-Examples | R | false | false | 8,809 | r | ### R code from vignette source 'adephylo.Rnw'
### Encoding: UTF-8
###################################################
### code chunk number 1: adephylo.Rnw:105-106 (eval = FALSE)
###################################################
## vignette("phylobase")
###################################################
### code chunk number 2: load
###################################################
library(ape)
library(phylobase)
library(ade4)
library(adephylo)
search()
###################################################
### code chunk number 3: kludge
###################################################
cat("\n=== Old - deprecated- version ===\n")
orthogram <- ade4::orthogram
args(orthogram)
cat("\n=== New version === \n")
orthogram <- adephylo::orthogram
args(orthogram)
###################################################
### code chunk number 4: adephylo.Rnw:168-169 (eval = FALSE)
###################################################
## ?adephylo
###################################################
### code chunk number 5: adephylo.Rnw:174-175 (eval = FALSE)
###################################################
## help("adephylo", package="adephylo", html=TRUE)
###################################################
### code chunk number 6: adephylo.Rnw:179-180 (eval = FALSE)
###################################################
## options(htmlhelp = FALSE)
###################################################
### code chunk number 7: readTree
###################################################
data(ungulates)
ungulates$tre
myTree <- read.tree(text=ungulates$tre)
myTree
plot(myTree, main="ape's plotting of a tree")
###################################################
### code chunk number 8: adephylo.Rnw:226-231
###################################################
temp <- as(myTree, "phylo4")
class(temp)
temp <- as(temp, "phylo")
class(temp)
all.equal(temp, myTree)
###################################################
### code chunk number 9: phylo4d
###################################################
ung <- phylo4d(myTree, ungulates$tab)
class(ung)
table.phylo4d(ung)
###################################################
### code chunk number 10: adephylo.Rnw:271-273
###################################################
x <- tdata(ung, type="tip")
head(x)
###################################################
### code chunk number 11: moranI
###################################################
W <- proxTips(myTree, met="Abouheif")
moran.idx(tdata(ung, type="tip")$afbw, W)
moran.idx(tdata(ung, type="tip")[,1], W, addInfo=TRUE)
###################################################
### code chunk number 12: adephylo.Rnw:320-332
###################################################
afbw <- tdata(ung, type="tip")$afbw
sim <- replicate(499, moran.idx(sample(afbw), W)) # permutations
sim <- c(moran.idx(afbw, W), sim)
cat("\n=== p-value (right-tail) === \n")
pval <- mean(sim>=sim[1])
pval
plot(density(sim), main="Moran's I Monte Carlo test for 'bif'") # plot
mtext("Density of permutations, and observation (in red)")
abline(v=sim[1], col="red", lwd=3)
###################################################
### code chunk number 13: abouheif
###################################################
ung.abTests <- abouheif.moran(ung)
ung.abTests
plot(ung.abTests)
###################################################
### code chunk number 14: adephylo.Rnw:376-378
###################################################
hasEdgeLength(ung)
myTree.withBrLe <- compute.brlen(myTree)
###################################################
### code chunk number 15: adephylo.Rnw:384-386
###################################################
myProx <- vcv.phylo(myTree.withBrLe)
abouheif.moran(ung, W=myProx)
###################################################
### code chunk number 16: adephylo.Rnw:413-415
###################################################
x <- as(rtree(5),"phylo4")
plot(x,show.n=TRUE)
###################################################
### code chunk number 17: adephylo.Rnw:418-420
###################################################
x.part <- treePart(x)
x.part
###################################################
### code chunk number 18: adephylo.Rnw:423-425
###################################################
temp <- phylo4d(x, x.part)
table.phylo4d(temp, cent=FALSE, scale=FALSE)
###################################################
### code chunk number 19: adephylo.Rnw:435-437
###################################################
args(treePart)
temp <- phylo4d(x, treePart(x, result="orthobasis") )
###################################################
### code chunk number 20: orthobas1
###################################################
temp <- phylo4d(myTree, treePart(myTree, result="orthobasis") )
par(mar=rep(.1,4))
table.phylo4d(temp, repVar=1:8, ratio.tree=.3)
###################################################
### code chunk number 21: orthogram
###################################################
afbw.ortgTest <- orthogram(afbw, myTree)
afbw.ortgTest
###################################################
### code chunk number 22: adephylo.Rnw:483-484
###################################################
me.phylo(myTree.withBrLe)
###################################################
### code chunk number 23: figFourBas
###################################################
ung.listBas <- list()
ung.listBas[[1]] <- phylo4d(myTree, as.data.frame(me.phylo(myTree.withBrLe, method="patristic")))
ung.listBas[[2]] <- phylo4d(myTree, as.data.frame(me.phylo(myTree, method="nNodes")))
ung.listBas[[3]]<- phylo4d(myTree, as.data.frame(me.phylo(myTree, method="Abouheif")))
ung.listBas[[4]] <- phylo4d(myTree, as.data.frame(me.phylo(myTree, method="sumDD")))
par(mar=rep(.1,4), mfrow=c(2,2))
invisible(lapply(ung.listBas, table.phylo4d, repVar=1:5, cex.sym=.7, show.tip.label=FALSE, show.node=FALSE))
###################################################
### code chunk number 24: lm1
###################################################
afbw <- log(ungulates$tab[,1])
neonatw <- log((ungulates$tab[,2]+ungulates$tab[,3])/2)
names(afbw) <- myTree$tip.label
names(neonatw) <- myTree$tip.label
plot(afbw, neonatw, main="Relationship between afbw and neonatw")
lm1 <- lm(neonatw~afbw)
abline(lm1, col="blue")
anova(lm1)
###################################################
### code chunk number 25: resid
###################################################
resid <- residuals(lm1)
names(resid) <- myTree$tip.label
temp <- phylo4d(myTree,data.frame(resid))
abouheif.moran(temp)
table.phylo4d(temp)
###################################################
### code chunk number 26: adephylo.Rnw:537-544
###################################################
myBasis <- me.phylo(myTree, method="Abouheif")
lm2 <- lm(neonatw~myBasis[,1] + afbw)
resid <- residuals(lm2)
names(resid) <- myTree$tip.label
temp <- phylo4d(myTree,data.frame(resid))
abouheif.moran(temp)
anova(lm2)
###################################################
### code chunk number 27: adephylo.Rnw:570-575
###################################################
W <- proxTips(myTree, method="Abouheif", sym=FALSE)
lagNeonatw <- W %*% neonatw
lm3 <- lm(neonatw ~ lagNeonatw + afbw)
resid <- residuals(lm3)
abouheif.moran(resid,W)
###################################################
### code chunk number 28: pca1
###################################################
f1 <- function(x){
m <- mean(x,na.rm=TRUE)
x[is.na(x)] <- m
return(x)
}
data(maples)
traits <- apply(maples$tab, 2, f1)
pca1 <- dudi.pca(traits, scannf=FALSE, nf=1)
barplot(pca1$eig, main="PCA eigenvalues", col=heat.colors(16))
###################################################
### code chunk number 29: pca2
###################################################
tre <- read.tree(text=maples$tre)
W <- proxTips(tre)
myComp <- data.frame(PC1=pca1$li[,1], lagPC1=W %*% pca1$li[,1])
myComp.4d <- phylo4d(tre, myComp)
nodeLabels(myComp.4d) <- names(nodeLabels(myComp.4d))
table.phylo4d(myComp.4d)
###################################################
### code chunk number 30: aboutest
###################################################
myTest <- abouheif.moran(myComp[,1], W=W)
plot(myTest, main="Abouheif's test using patristic proximity")
mtext("First principal component - maples data", col="blue", line=1)
###################################################
### code chunk number 31: loadings
###################################################
ldgs <- pca1$c1[,1]
plot(ldgs, type="h", xlab="Variable", xaxt="n", ylab="Loadings")
s.label(cbind(1:31, ldgs), lab=colnames(traits), add.p=TRUE, clab=.8)
temp <- abs(ldgs)
thres <- quantile(temp, .75)
abline(h=thres * c(-1,1), lty=2, col="blue3", lwd=3)
title("Loadings for PC1")
mtext("Quarter of most contributing variables indicated in blue", col="blue")
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyUI(navbarPage(
theme = "bootstrap.css",
title = "tSVE",
id = "tSVE",
navbarMenu(
title = "Input",
tabPanel(
title = "Phenotypes",
# Phenotype options ----
wellPanel(
h4("Phenotypes"),
hr(),
fluidRow(
column(
width = 2,
p(strong("Phenotype file")),
actionButton(
"selectPheno", "Browse",
icon = icon("file"), width = '100%')
),
column(
width = 6,
strong("File"),
br(),
uiOutput("phenoFile"),
# Wrap long file names
tags$head(tags$style(
"#phenoFile{
display:block;
word-wrap:break-word;
}"
))
),
column(
width = 4,
strong("Summary"), br(),
htmlOutput("phenoFileSummary")
)
),
fluidRow(
column(
width = 2,
br(),
actionButton(
"demoPheno", "Sample file",
icon = icon("file-text"), width = '100%')
)
)
),
fluidRow(
column(
width = 12,
tags$h4("Notes"),
"The phenotype file must be formatted as follows:",
tags$ul(
tags$li(
"Fields separator must be 'white space', the default",
tags$a(
href="http://stat.ethz.ch/R-manual/R-devel/library/utils/html/read.table.html",
tags$code("read.table")
),
"field separator."
),
tags$li(
"First row must be phenotype names."
),
tags$li(
"First column must be samples identifiers matching those in",
"the VCF file(s)."
)
)
)
)
),
tabPanel(
title = "Genomic ranges",
# GRanges options ----
wellPanel(
h4("Genomic ranges"),
hr(),
fluidRow(
column(
width = 2,
selectInput(
"grangesInputMode", "Input type",
choices = list(
"BED file" = "bed",
"UCSC browser" = "ucsc",
"EnsDb package" = "EnsDb"
),
selected = "bed",
width = '100%')
),
column(
width = 4, offset = 6,
strong("Summary"),
htmlOutput("rangesSummary")
)
),
fluidRow(
conditionalPanel(
condition = "input.grangesInputMode == 'bed'",
fluidRow(
column(
width = 2,
br(),
actionButton(
"selectBed", "Browse",
icon = icon("file"), width = '100%')
),
column(
width = 6,
strong("File"), br(),
uiOutput("bedFile")
),
# Wrap long file names
tags$head(tags$style(
"#bedFile{
display:block;
scanVcfHeader word-wrap:break-word;
}"
))
),
fluidRow(
column(
width = 2,
br(),
actionButton(
"demoBed", "Sample file",
icon = icon("file-text"), width = '100%')
)
)
),
conditionalPanel(
condition = "input.grangesInputMode == 'ucsc'",
column(
width = 8,
textInput(
"ucscRanges", "UCSC-type genomic ranges",
value = "",
placeholder = paste(
"chr21:33,031,597-33,041,570",
"chr2:2,031,597-2,041,570",
"...",
sep = " ; "),
width = "100%")
),
column(
width = 2,
br(),
actionButton(
"demoUCSC", "Sample input",
icon = icon("font"), width = '100%')
)
),
conditionalPanel(
condition = "input.grangesInputMode == 'EnsDb'",
column(
width = 2,
selectInput(
"ensDb.type", "Type",
choices = list("Gene name" = "Genename"),
selected = "Genename")
),
column(
width = 1,
selectInput(
"ensDb.condition", "Condition",
choices = c("=", "!=", "like", "in"),
selected = "=")
),
column(
2,
textInput(
"ensDb.value", "Value",
value = "",
placeholder = "SLC24A5,IL17A,...")
),
column(
width = 2,
actionButton(
"demoEnsDb", "Sample input",
icon = icon("font"), width = '100%')
),
column(
width = 4, offset = 1,
strong("Note"),
p(
"For the ", code("like"), "filter,",
"use ", code("%"), "as wildcard."
)
),
tabsetPanel(
id = "ensDb.resultTab",
selected = "Genes",
tabPanel(
title = 'Genes',
DT::dataTableOutput("ensDb.Genes")
)#, # TODO
# tabPanel('Transcripts',
# dataTableOutput("Transcripts")
# ),
# tabPanel('Exons',
# dataTableOutput("Exons")
# )
)
)
)
)
),
tabPanel(
title = "Variants",
# VCF options ----
wellPanel(
h4("VCF file(s)"),
hr(),
fluidRow(
column(
width = 2,
selectInput(
"vcfInputMode", "VCF input type",
choices = list(
"Single VCF" = "SingleVcf",
"One per chromosome" = "OnePerChr"
),
selected = "OnePerChr",
width = '100%')
),
conditionalPanel(
condition = "input.vcfInputMode == 'SingleVcf'",
column(
width = 2,
br(),
actionButton(
"selectVcf", "Browse",
icon = icon("file"), width = '100%')
),
column(
width = 2,
br(),
actionButton(
"demoVcf", "Sample file",
icon = icon("file-text"), width = '100%')
),
column(
width = 4, offset = 2,
strong("Summary"),
br(),
textOutput("selectedVcf"),
# Wrap long file names
tags$head(tags$style(
"#selectedVcf{
display:block;
word-wrap:break-word;
}"
))
)
),
conditionalPanel(
condition = "input.vcfInputMode == 'OnePerChr'",
column(
width = 6,
textInput(
"vcfFolder", "Folder of VCF files",
value = system.file("extdata", package = "TVTB"),
width = '100%',
placeholder = "/path/to/VCF/folder")
),
column(
width = 4,
strong("Summary"),
htmlOutput("vcfFolderSummary")
)
)
),
fluidRow(
conditionalPanel(
condition = "input.vcfInputMode == 'OnePerChr'",
fluidRow(
column(
width = 6, offset = 2,
textInput(
"vcfPattern",
paste(
"Pattern of VCF files",
"(%s : chromosome placeholder)"
),
value = "^chr%s\\..*\\.vcf\\.gz$",
width = '100%',
placeholder = "^chr%s_.*\\.vcf\\.gz$")
)
)
)
)
),
wellPanel(
h4("VCF scan parameters"),
hr(),
fluidRow(
# ScanVcfParam ----
# INFO fields (except VEP) ----
column(
width = 8,
fluidRow(
column(
width = 12,
selectInput(
"vcfInfoKeys", "INFO fields",
choices = character(),
multiple = TRUE)
)
),
fluidRow(
column(
width = 2,
actionButton(
"tickAllInfo", "Select all",
icon = icon("check-square-o"))
),
column(
width = 2,
actionButton(
"untickAllInfo", "Deselect all",
icon = icon("square-o"))
),
column(
width = 7, offset = 1,
strong("Note:"),
"VEP field implicitely required"
)
)
),
# VEP prediction INFO field ----
column(
width = 2,
textInput(
"vepKey", "VEP field (INFO)",
value = get("vepKey", .tSVE),
placeholder = 'CSQ, ANN, ...')
),
# FORMAT fields ----
column(
width = 2,
selectInput(
"vcfFormatKeys", "FORMAT fields",
choices = character(),
multiple = TRUE),
strong("Note:"), "\"GT\" implicitely required"
)
)
),
wellPanel(
fluidRow(
# VCF import button! ----
column(
width = 2,
checkboxInput(
"autodetectGTimport", "Autodetect genotypes",
value = get("autodetectGTimport", .tSVE)
)
),
column(
width = 2, offset = 3,
br(),
actionButton(
"importVariants", "Import variants",
icon = icon("open", lib = "glyphicon")
)
),
column(
width = 4, offset = 1,
strong("Summary"),
htmlOutput("vcfSummary")
)
)
),
hr(),
fluidRow(
column(
width = 6,
h4("Content of folder"),
hr(),
DT::dataTableOutput("vcfContent")
),
column(
width = 6,
h4("VCF file(s) matching pattern"),
hr(),
DT::dataTableOutput("vcfFiles")
)
)
),
tabPanel(
title = "Annotations",
# Genome annotation package ----
wellPanel(
h4("Annotations"),
hr(),
fluidRow(
column(
width = 3,
selectInput(
"annotationPackage",
"Select installed EnsDb package",
choices = as.list(.EnsDbPacks),
width = '100%')
),
column(
3,
strong("EnsDb annotation"),
htmlOutput("ensembl_organism"),
htmlOutput("ensembl_version"),
htmlOutput("ensembl_genome")
),
column(
width = 6,
strong("Note"),
p(
"Only",
tags$code("EnsDb"),
"annotation packages supported for starters.",
"Ideally, ",
tags$code("TxDb"),
"and",
tags$code("OrganismDb"),
"packages supported soon.")
)
)
)
)
),
# Calculate frequencies ----
tabPanel(
title = "Frequencies", icon = icon("calculator "),
uiOutput("TVTBparamWarning"),
wellPanel(
fluidRow(
column(
width = 2,
strong("Latest changes:")
),
column(
width = 10,
uiOutput("latestFrequenciesCalculated")
)
)
),
wellPanel(
fluidRow(
h4("Overall frequencies"), hr(),
column(
width = 1, offset = 1,
actionButton(
"addOverallFrequencies", "Add",
icon = icon("plus")
)
),
column(
width = 1,
actionButton(
"removeOverallFrequencies", "Remove",
icon = icon("minus")
)
)
)
),
wellPanel(
fluidRow(
h4("Frequencies in phenotype levels"), hr(),
column(
width = 2,
selectInput(
"phenoAddFrequencies",
"Phenotype",
choices = character()
)
),
column(
width = 2, offset = 1,
actionButton(
"tickAllPhenoLevelsFreq",
"Select all",
icon = icon("check-square-o"),
width = "100%"
), br(),
actionButton(
"untickAllPhenoLevelsFreq",
"Deselect all",
icon = icon("square-o"),
width = "100%"
)
),
column(
width = 2, offset = 1,
br(),
actionButton(
"buttonFrequencies", "Refresh",
icon = icon("refresh"), width = "100%"
)
)
),
fluidRow(
column(
width = 12,
checkboxGroupInput(
"phenoLevelFreqCheckboxes", "Phenotype levels",
choices = c(), inline = TRUE
)
)
)
)
),
# VCF filter Rules ----
tabPanel(
title = "Filters", icon = icon("filter"),
wellPanel(
h4("Add filter"),
fluidRow(
column(
width = 1,
br(),
actionButton(
"addNewFilter", "Add filter",
icon = icon("plus")
)
),
column(
width = 1,
selectInput(
"newFilterClass", "Type",
choices = list(
"fixed" = "VcfFixedRules",
"info" = "VcfInfoRules",
"VEP" = "VcfVepRules"
),
selected = "VcfFixedRules"
)
),
column(
width = 1,
br(),
checkboxInput(
"newFilterActive", "Active?",
value = TRUE
)
),
column(
width = 7,
textInput(
"newFilterExpression", "Expression",
placeholder = paste(
"grepl(\"pass\", tolower(FILTER))",
"ALT + HET > 0",
"IMPACT %in% c(\"HIGH\", \"MODERATE\")",
sep = " - or - "
)
)
),
column(
width = 2,
br(),
actionButton(
"demoFilter", "Sample input",
icon = icon("font"), width = '100%')
)
),
fluidRow(
column(
width = 12,
uiOutput("vcfFilterTest")
)
),
fluidRow(
br(),
p(strong("Notes:")),
tags$ul(
tags$li(
"Filters are tested against variants to ensure the",
"validity of filters. Therefore, variants must be",
"loaded", em("before"), "filters can be created."
),
tags$li(
"Currently, filters are not re-tested if variants are",
"updated. If variants are refreshed, users should",
"ensure filters remain valid, or remove filters",
"manually."
),
tags$li(
"Users may ignore auto-correction of quotes in the",
strong("Expression"), "field. The application",
"automatically substitutes",
"curly quotes (single and double) by their",
"corresponding regular quotes (",
em("i.e."), code("\""), "and", code("'"), ")"
)
)
)
),
wellPanel(
fluidRow(
column(
width = 4, offset = 1,
strong("Summary"), br(),
uiOutput("filtersSummary")
),
column(
width = 2,
actionButton(
"filterVariants", "Apply filters",
icon = icon("filter"), width = "100%"
)
),
column(
width = 4,
strong("Summary"), br(),
uiOutput("filteredVcfSummary")
)
)
),
wellPanel(
fluidRow(
column(
width = 1,
strong("Class")
),
column(
width = 1,
strong("Active?")
),
column(
width = 8,
strong("Expression")
)
),
br(),
uiOutput("vcfFilterControls")
),
wellPanel(
fluidRow(
column(
width = 12,
verbatimTextOutput("vcfRules")
)
)
)
),
tabPanel(
title = "Views", icon = icon("picture-o"),
tabsetPanel(
id = "tabset.views",
# Genomic ranges view ----
tabPanel(
title = "Genomic ranges",
fluidRow(
column(
width = 12,
DT::dataTableOutput("rangesTableView")
)
)
),
# Variants view ----
tabPanel(
title = "Variants",
fluidRow(
column(
width = 12,
wellPanel(
uiOutput("vcfCols")
)
)
),
fluidRow(
column(
width = 12,
DT::dataTableOutput("vcfRowRangesView")
)
)
),
# Variants INFO view ----
tabPanel(
title = "INFO",
fluidRow(
column(
width = 12,
wellPanel(
uiOutput("vcfInfoCols"),
br(),
p(strong("Notes:")),
tags$ul(
tags$li(
"Fields that contain more than one value",
"(", tags$em("e.g."), "confidence intervals)",
"may not display properly."
)
)
)
)
),
fluidRow(
column(
width = 12,
DT::dataTableOutput("vcfInfoView")
)
)
),
# VEP predictions view ----
tabPanel(
title = "VEP",
fluidRow(
column(
width = 12,
wellPanel(
uiOutput("vepCols")
)
)
),
fluidRow(
column(
width = 12,
DT::dataTableOutput("vcfVepView")
)
)
),
# Phenotypes view ----
tabPanel(
title = "Phenotypes",
fluidRow(
column(
width = 12,
"This panel displays phenotype information attached to",
"the imported VCF object.",
wellPanel(
uiOutput("phenoCols")
)
)
),
fluidRow(
column(
width = 12,
DT::dataTableOutput("phenotypesView")
)
)
),
# Genotypes view ----
tabPanel(
title = "Genotypes",
tabsetPanel(
tabPanel(
title = "Matrix",
fluidRow(
wellPanel(
column(
width = 6,
uiOutput("genoNumRows")
),
column(
width = 6,
uiOutput("genoFirstRow")
)
)
),
fluidRow(
wellPanel(
column(
width = 6,
uiOutput("genoNumCols")
),
column(
width = 6,
uiOutput("genoFirstCol")
)
)
),
fluidRow(
column(
width = 12,
tableOutput("genotypesSample")
)
)
),
tabPanel(
title = "Heatmap",
p(
"Click the button after loading variants",
"to generate/update the figure",
actionButton(
"doGenoHeatmap", "Go!",
icon = icon("time")
)
),
fluidRow(
column(
width = 12,
plotOutput(
"heatmapGenotype",
height = get("genoHeatmap.height", .tSVE)
)
)
),
p(
"Notes",
tags$ul(
tags$li(
"This may take some time to plot.",
em(
"(~15s for 218 variants & 5844",
"samples)"
)
),
tags$li(
"Only genotypes codes found in the data",
"are listed in the legend, irrespective",
"of those defined in the",
tags$strong("Advanced settings"), "."
)
)
)
),
tabPanel(
title = "Info",
shiny::h4("Encoding"),
uiOutput("genotypeEncoding")
)
)
)
)
),
navbarMenu(
title = "Settings", icon = icon("wrench"),
# Advanced settings ----
tabPanel(
title = "Advanced",
wellPanel(
h4("Genotypes"),
hr(),
fluidRow(
column(
width = 1,
br(),
actionButton(
"genotypeAutofill", "Autofill", icon("magic")
)
),
column(
width = 3,
selectInput(
"refGenotypes", "Reference homozygote genotype(s)",
choices = c(get("refGT", .tSVE), get("hetGT", .tSVE), get("altGT", .tSVE)),
selected = get("refGT", .tSVE),
multiple = TRUE
)
),
column(
width = 4,
selectInput(
"hetGenotypes", "Heterozygote genotype(s)",
choices = c(get("refGT", .tSVE), get("hetGT", .tSVE), get("altGT", .tSVE)),
selected = get("hetGT", .tSVE),
multiple = TRUE
)
),
column(
width = 4,
selectInput(
"altGenotypes", "Alternate homozygote genotype(s)",
choices = c(get("refGT", .tSVE), get("hetGT", .tSVE), get("altGT", .tSVE)),
selected = get("altGT", .tSVE),
multiple = TRUE
)
)
),
fluidRow(
column(
width = 1,
textInput(
"refSuffix", "Suffix",
value = get("refSuffix", .tSVE),
placeholder = get("refSuffix", .tSVE)
)
),
column(
width = 1, offset = 3,
textInput(
"hetSuffix", "Suffix",
value = get("hetSuffix", .tSVE),
placeholder = get("hetSuffix", .tSVE)
)
),
column(
width = 1, offset = 3,
textInput(
"altSuffix", "Suffix",
value = get("altSuffix", .tSVE),
placeholder = get("altSuffix", .tSVE)
)
)
),
fluidRow(
column(
width = 12,
tags$strong("Notes:"), br(),
tags$ul(
tags$li(
"The",tags$strong("choices"),"of genotypes are updated when",
"new variants are imported."
),
tags$li(
"The",tags$strong("selected"),"genotypes may be automatically",
"updated immediately after import using the",
tags$strong("Autodetect genotypes"), "checkbox in the",
tags$strong("Input"), "panel, or manually after import using",
"the", tags$strong("Autofill"), "button in this panel."
),
tags$li(
"Selected genotypes are not allowed to overlap.",
"Selecting a genotype removes it from the choices",
"available in the other widgets. As a consequence, genotypes",
"must first be unselected from a widget before it can be",
"selected in another one."
)
)
)
)
),
wellPanel(
h4("INFO suffixes"),
hr(),
fluidRow(
column(
width = 3,
textInput(
"aafSuffix", "ALT allele freq.",
value = get("aafSuffix", .tSVE),
placeholder = get("aafSuffix", .tSVE)
)
),
column(
width = 3,
textInput(
"mafSuffix", "Minor allele freq.",
value = get("mafSuffix", .tSVE),
placeholder = get("mafSuffix", .tSVE)
)
)
)
),
wellPanel(
h4("VCF file(s)"),
hr(),
fluidRow(
column(
width = 2,
numericInput(
"yieldSize", "VCF yield size (100-100^3)",
min = 100, max = 100E3,
value = 4E3,
step = 1E3
)
)
)
)
),
tabPanel(
title = "Parallel",
wellPanel(
h4("Parallel settings"),
hr(),
fluidRow(
column(
width = 3,
numericInput(
"bpCores", "Cores",
value = .PS[["default.bpCores"]],
min = 1, max = .PS[["default.bpCores"]], step = 1)
),
column(
width = 3,
selectInput(
"bpConfig", "Cluster configuration",
choices = structure(
.PS[["choices.bpClass"]],
names = gsub(
"Param", "", .PS[["choices.bpClass"]])),
selected = .PS[["default.bpClass"]])
),
conditionalPanel(
condition = "input.bpConfig != 'SerialParam'",
column(
width = 3,
selectInput(
"bpType", "Cluster type",
choices = structure(
.PS[["choices.bpType"]],
names = gsub(
"Param", "", .PS[["choices.bpType"]])),
selected = .PS[["default.bpType"]])
)
)
) # fluidRow
), # wellPanel
wellPanel(
fluidRow(
column(
width = 12,
h1("Platforms tested"),
DT::dataTableOutput("parallelReport")
)
)
),
tags$h4(
"Notes",
tags$ul(
tags$li(
"Report"
), br(),
tags$ul(
tags$li(
tags$strong("Hang:"),
"Application hangs while CPUs work infinitely at full capacity."
)
)
)
)
)
),
# Session settings view ----
tabPanel(
title = "Session",
tabsetPanel(
id = "tabset.session",
tabPanel(
title = "Session info",
verbatimTextOutput("sessionInfo")
),
tabPanel(
title = "TVTB settings",
verbatimTextOutput("TVTBsettings")
),
tabPanel(
title = "General settings",
verbatimTextOutput("generalSettings")
),
tabPanel(
title = "Advanced settings",
verbatimTextOutput("advancedSettings")
),
tabPanel(
title = "ExpandedVCF",
"This panel displays the structure of the imported",
tags$code("ExpandedVCF"), "object:",
verbatimTextOutput("ExpandedVCF"),
"and the attached", tags$code("metadata"), ":",
verbatimTextOutput("vcfMetadata")
),
tabPanel(
title = "VEP",
verbatimTextOutput("vepStructure")
),
tabPanel(
title = "Errors",
verbatimTextOutput("Errors")
)
)
)
))
| /inst/shinyApp/ui.R | permissive | lptolik/TVTB | R | false | false | 28,842 | r |
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyUI(navbarPage(
theme = "bootstrap.css",
title = "tSVE",
id = "tSVE",
navbarMenu(
title = "Input",
tabPanel(
title = "Phenotypes",
# Phenotype options ----
wellPanel(
h4("Phenotypes"),
hr(),
fluidRow(
column(
width = 2,
p(strong("Phenotype file")),
actionButton(
"selectPheno", "Browse",
icon = icon("file"), width = '100%')
),
column(
width = 6,
strong("File"),
br(),
uiOutput("phenoFile"),
# Wrap long file names
tags$head(tags$style(
"#phenoFile{
display:block;
word-wrap:break-word;
}"
))
),
column(
width = 4,
strong("Summary"), br(),
htmlOutput("phenoFileSummary")
)
),
fluidRow(
column(
width = 2,
br(),
actionButton(
"demoPheno", "Sample file",
icon = icon("file-text"), width = '100%')
)
)
),
fluidRow(
column(
width = 12,
tags$h4("Notes"),
"The phenotype file must be formatted as follows:",
tags$ul(
tags$li(
"Fields separator must be 'white space', the default",
tags$a(
href="http://stat.ethz.ch/R-manual/R-devel/library/utils/html/read.table.html",
tags$code("read.table")
),
"field separator."
),
tags$li(
"First row must be phenotype names."
),
tags$li(
"First column must be samples identifiers matching those in",
"the VCF file(s)."
)
)
)
)
),
tabPanel(
title = "Genomic ranges",
# GRanges options ----
wellPanel(
h4("Genomic ranges"),
hr(),
fluidRow(
column(
width = 2,
selectInput(
"grangesInputMode", "Input type",
choices = list(
"BED file" = "bed",
"UCSC browser" = "ucsc",
"EnsDb package" = "EnsDb"
),
selected = "bed",
width = '100%')
),
column(
width = 4, offset = 6,
strong("Summary"),
htmlOutput("rangesSummary")
)
),
fluidRow(
conditionalPanel(
condition = "input.grangesInputMode == 'bed'",
fluidRow(
column(
width = 2,
br(),
actionButton(
"selectBed", "Browse",
icon = icon("file"), width = '100%')
),
column(
width = 6,
strong("File"), br(),
uiOutput("bedFile")
),
# Wrap long file names
tags$head(tags$style(
"#bedFile{
display:block;
scanVcfHeader word-wrap:break-word;
}"
))
),
fluidRow(
column(
width = 2,
br(),
actionButton(
"demoBed", "Sample file",
icon = icon("file-text"), width = '100%')
)
)
),
conditionalPanel(
condition = "input.grangesInputMode == 'ucsc'",
column(
width = 8,
textInput(
"ucscRanges", "UCSC-type genomic ranges",
value = "",
placeholder = paste(
"chr21:33,031,597-33,041,570",
"chr2:2,031,597-2,041,570",
"...",
sep = " ; "),
width = "100%")
),
column(
width = 2,
br(),
actionButton(
"demoUCSC", "Sample input",
icon = icon("font"), width = '100%')
)
),
conditionalPanel(
condition = "input.grangesInputMode == 'EnsDb'",
column(
width = 2,
selectInput(
"ensDb.type", "Type",
choices = list("Gene name" = "Genename"),
selected = "Genename")
),
column(
width = 1,
selectInput(
"ensDb.condition", "Condition",
choices = c("=", "!=", "like", "in"),
selected = "=")
),
column(
2,
textInput(
"ensDb.value", "Value",
value = "",
placeholder = "SLC24A5,IL17A,...")
),
column(
width = 2,
actionButton(
"demoEnsDb", "Sample input",
icon = icon("font"), width = '100%')
),
column(
width = 4, offset = 1,
strong("Note"),
p(
"For the ", code("like"), "filter,",
"use ", code("%"), "as wildcard."
)
),
tabsetPanel(
id = "ensDb.resultTab",
selected = "Genes",
tabPanel(
title = 'Genes',
DT::dataTableOutput("ensDb.Genes")
)#, # TODO
# tabPanel('Transcripts',
# dataTableOutput("Transcripts")
# ),
# tabPanel('Exons',
# dataTableOutput("Exons")
# )
)
)
)
)
),
tabPanel(
title = "Variants",
# VCF options ----
wellPanel(
h4("VCF file(s)"),
hr(),
fluidRow(
column(
width = 2,
selectInput(
"vcfInputMode", "VCF input type",
choices = list(
"Single VCF" = "SingleVcf",
"One per chromosome" = "OnePerChr"
),
selected = "OnePerChr",
width = '100%')
),
conditionalPanel(
condition = "input.vcfInputMode == 'SingleVcf'",
column(
width = 2,
br(),
actionButton(
"selectVcf", "Browse",
icon = icon("file"), width = '100%')
),
column(
width = 2,
br(),
actionButton(
"demoVcf", "Sample file",
icon = icon("file-text"), width = '100%')
),
column(
width = 4, offset = 2,
strong("Summary"),
br(),
textOutput("selectedVcf"),
# Wrap long file names
tags$head(tags$style(
"#selectedVcf{
display:block;
word-wrap:break-word;
}"
))
)
),
conditionalPanel(
condition = "input.vcfInputMode == 'OnePerChr'",
column(
width = 6,
textInput(
"vcfFolder", "Folder of VCF files",
value = system.file("extdata", package = "TVTB"),
width = '100%',
placeholder = "/path/to/VCF/folder")
),
column(
width = 4,
strong("Summary"),
htmlOutput("vcfFolderSummary")
)
)
),
fluidRow(
conditionalPanel(
condition = "input.vcfInputMode == 'OnePerChr'",
fluidRow(
column(
width = 6, offset = 2,
textInput(
"vcfPattern",
paste(
"Pattern of VCF files",
"(%s : chromosome placeholder)"
),
value = "^chr%s\\..*\\.vcf\\.gz$",
width = '100%',
placeholder = "^chr%s_.*\\.vcf\\.gz$")
)
)
)
)
),
wellPanel(
h4("VCF scan parameters"),
hr(),
fluidRow(
# ScanVcfParam ----
# INFO fields (except VEP) ----
column(
width = 8,
fluidRow(
column(
width = 12,
selectInput(
"vcfInfoKeys", "INFO fields",
choices = character(),
multiple = TRUE)
)
),
fluidRow(
column(
width = 2,
actionButton(
"tickAllInfo", "Select all",
icon = icon("check-square-o"))
),
column(
width = 2,
actionButton(
"untickAllInfo", "Deselect all",
icon = icon("square-o"))
),
column(
width = 7, offset = 1,
strong("Note:"),
"VEP field implicitely required"
)
)
),
# VEP prediction INFO field ----
column(
width = 2,
textInput(
"vepKey", "VEP field (INFO)",
value = get("vepKey", .tSVE),
placeholder = 'CSQ, ANN, ...')
),
# FORMAT fields ----
column(
width = 2,
selectInput(
"vcfFormatKeys", "FORMAT fields",
choices = character(),
multiple = TRUE),
strong("Note:"), "\"GT\" implicitely required"
)
)
),
wellPanel(
fluidRow(
# VCF import button! ----
column(
width = 2,
checkboxInput(
"autodetectGTimport", "Autodetect genotypes",
value = get("autodetectGTimport", .tSVE)
)
),
column(
width = 2, offset = 3,
br(),
actionButton(
"importVariants", "Import variants",
icon = icon("open", lib = "glyphicon")
)
),
column(
width = 4, offset = 1,
strong("Summary"),
htmlOutput("vcfSummary")
)
)
),
hr(),
fluidRow(
column(
width = 6,
h4("Content of folder"),
hr(),
DT::dataTableOutput("vcfContent")
),
column(
width = 6,
h4("VCF file(s) matching pattern"),
hr(),
DT::dataTableOutput("vcfFiles")
)
)
),
tabPanel(
title = "Annotations",
# Genome annotation package ----
wellPanel(
h4("Annotations"),
hr(),
fluidRow(
column(
width = 3,
selectInput(
"annotationPackage",
"Select installed EnsDb package",
choices = as.list(.EnsDbPacks),
width = '100%')
),
column(
3,
strong("EnsDb annotation"),
htmlOutput("ensembl_organism"),
htmlOutput("ensembl_version"),
htmlOutput("ensembl_genome")
),
column(
width = 6,
strong("Note"),
p(
"Only",
tags$code("EnsDb"),
"annotation packages supported for starters.",
"Ideally, ",
tags$code("TxDb"),
"and",
tags$code("OrganismDb"),
"packages supported soon.")
)
)
)
)
),
# Calculate frequencies ----
tabPanel(
title = "Frequencies", icon = icon("calculator "),
uiOutput("TVTBparamWarning"),
wellPanel(
fluidRow(
column(
width = 2,
strong("Latest changes:")
),
column(
width = 10,
uiOutput("latestFrequenciesCalculated")
)
)
),
wellPanel(
fluidRow(
h4("Overall frequencies"), hr(),
column(
width = 1, offset = 1,
actionButton(
"addOverallFrequencies", "Add",
icon = icon("plus")
)
),
column(
width = 1,
actionButton(
"removeOverallFrequencies", "Remove",
icon = icon("minus")
)
)
)
),
wellPanel(
fluidRow(
h4("Frequencies in phenotype levels"), hr(),
column(
width = 2,
selectInput(
"phenoAddFrequencies",
"Phenotype",
choices = character()
)
),
column(
width = 2, offset = 1,
actionButton(
"tickAllPhenoLevelsFreq",
"Select all",
icon = icon("check-square-o"),
width = "100%"
), br(),
actionButton(
"untickAllPhenoLevelsFreq",
"Deselect all",
icon = icon("square-o"),
width = "100%"
)
),
column(
width = 2, offset = 1,
br(),
actionButton(
"buttonFrequencies", "Refresh",
icon = icon("refresh"), width = "100%"
)
)
),
fluidRow(
column(
width = 12,
checkboxGroupInput(
"phenoLevelFreqCheckboxes", "Phenotype levels",
choices = c(), inline = TRUE
)
)
)
)
),
# VCF filter Rules ----
tabPanel(
title = "Filters", icon = icon("filter"),
wellPanel(
h4("Add filter"),
fluidRow(
column(
width = 1,
br(),
actionButton(
"addNewFilter", "Add filter",
icon = icon("plus")
)
),
column(
width = 1,
selectInput(
"newFilterClass", "Type",
choices = list(
"fixed" = "VcfFixedRules",
"info" = "VcfInfoRules",
"VEP" = "VcfVepRules"
),
selected = "VcfFixedRules"
)
),
column(
width = 1,
br(),
checkboxInput(
"newFilterActive", "Active?",
value = TRUE
)
),
column(
width = 7,
textInput(
"newFilterExpression", "Expression",
placeholder = paste(
"grepl(\"pass\", tolower(FILTER))",
"ALT + HET > 0",
"IMPACT %in% c(\"HIGH\", \"MODERATE\")",
sep = " - or - "
)
)
),
column(
width = 2,
br(),
actionButton(
"demoFilter", "Sample input",
icon = icon("font"), width = '100%')
)
),
fluidRow(
column(
width = 12,
uiOutput("vcfFilterTest")
)
),
fluidRow(
br(),
p(strong("Notes:")),
tags$ul(
tags$li(
"Filters are tested against variants to ensure the",
"validity of filters. Therefore, variants must be",
"loaded", em("before"), "filters can be created."
),
tags$li(
"Currently, filters are not re-tested if variants are",
"updated. If variants are refreshed, users should",
"ensure filters remain valid, or remove filters",
"manually."
),
tags$li(
"Users may ignore auto-correction of quotes in the",
strong("Expression"), "field. The application",
"automatically substitutes",
"curly quotes (single and double) by their",
"corresponding regular quotes (",
em("i.e."), code("\""), "and", code("'"), ")"
)
)
)
),
wellPanel(
fluidRow(
column(
width = 4, offset = 1,
strong("Summary"), br(),
uiOutput("filtersSummary")
),
column(
width = 2,
actionButton(
"filterVariants", "Apply filters",
icon = icon("filter"), width = "100%"
)
),
column(
width = 4,
strong("Summary"), br(),
uiOutput("filteredVcfSummary")
)
)
),
wellPanel(
fluidRow(
column(
width = 1,
strong("Class")
),
column(
width = 1,
strong("Active?")
),
column(
width = 8,
strong("Expression")
)
),
br(),
uiOutput("vcfFilterControls")
),
wellPanel(
fluidRow(
column(
width = 12,
verbatimTextOutput("vcfRules")
)
)
)
),
tabPanel(
title = "Views", icon = icon("picture-o"),
tabsetPanel(
id = "tabset.views",
# Genomic ranges view ----
tabPanel(
title = "Genomic ranges",
fluidRow(
column(
width = 12,
DT::dataTableOutput("rangesTableView")
)
)
),
# Variants view ----
tabPanel(
title = "Variants",
fluidRow(
column(
width = 12,
wellPanel(
uiOutput("vcfCols")
)
)
),
fluidRow(
column(
width = 12,
DT::dataTableOutput("vcfRowRangesView")
)
)
),
# Variants INFO view ----
tabPanel(
title = "INFO",
fluidRow(
column(
width = 12,
wellPanel(
uiOutput("vcfInfoCols"),
br(),
p(strong("Notes:")),
tags$ul(
tags$li(
"Fields that contain more than one value",
"(", tags$em("e.g."), "confidence intervals)",
"may not display properly."
)
)
)
)
),
fluidRow(
column(
width = 12,
DT::dataTableOutput("vcfInfoView")
)
)
),
# VEP predictions view ----
tabPanel(
title = "VEP",
fluidRow(
column(
width = 12,
wellPanel(
uiOutput("vepCols")
)
)
),
fluidRow(
column(
width = 12,
DT::dataTableOutput("vcfVepView")
)
)
),
# Phenotypes view ----
tabPanel(
title = "Phenotypes",
fluidRow(
column(
width = 12,
"This panel displays phenotype information attached to",
"the imported VCF object.",
wellPanel(
uiOutput("phenoCols")
)
)
),
fluidRow(
column(
width = 12,
DT::dataTableOutput("phenotypesView")
)
)
),
# Genotypes view ----
tabPanel(
title = "Genotypes",
tabsetPanel(
tabPanel(
title = "Matrix",
fluidRow(
wellPanel(
column(
width = 6,
uiOutput("genoNumRows")
),
column(
width = 6,
uiOutput("genoFirstRow")
)
)
),
fluidRow(
wellPanel(
column(
width = 6,
uiOutput("genoNumCols")
),
column(
width = 6,
uiOutput("genoFirstCol")
)
)
),
fluidRow(
column(
width = 12,
tableOutput("genotypesSample")
)
)
),
tabPanel(
title = "Heatmap",
p(
"Click the button after loading variants",
"to generate/update the figure",
actionButton(
"doGenoHeatmap", "Go!",
icon = icon("time")
)
),
fluidRow(
column(
width = 12,
plotOutput(
"heatmapGenotype",
height = get("genoHeatmap.height", .tSVE)
)
)
),
p(
"Notes",
tags$ul(
tags$li(
"This may take some time to plot.",
em(
"(~15s for 218 variants & 5844",
"samples)"
)
),
tags$li(
"Only genotypes codes found in the data",
"are listed in the legend, irrespective",
"of those defined in the",
tags$strong("Advanced settings"), "."
)
)
)
),
tabPanel(
title = "Info",
shiny::h4("Encoding"),
uiOutput("genotypeEncoding")
)
)
)
)
),
navbarMenu(
title = "Settings", icon = icon("wrench"),
# Advanced settings ----
tabPanel(
title = "Advanced",
wellPanel(
h4("Genotypes"),
hr(),
fluidRow(
column(
width = 1,
br(),
actionButton(
"genotypeAutofill", "Autofill", icon("magic")
)
),
column(
width = 3,
selectInput(
"refGenotypes", "Reference homozygote genotype(s)",
choices = c(get("refGT", .tSVE), get("hetGT", .tSVE), get("altGT", .tSVE)),
selected = get("refGT", .tSVE),
multiple = TRUE
)
),
column(
width = 4,
selectInput(
"hetGenotypes", "Heterozygote genotype(s)",
choices = c(get("refGT", .tSVE), get("hetGT", .tSVE), get("altGT", .tSVE)),
selected = get("hetGT", .tSVE),
multiple = TRUE
)
),
column(
width = 4,
selectInput(
"altGenotypes", "Alternate homozygote genotype(s)",
choices = c(get("refGT", .tSVE), get("hetGT", .tSVE), get("altGT", .tSVE)),
selected = get("altGT", .tSVE),
multiple = TRUE
)
)
),
fluidRow(
column(
width = 1,
textInput(
"refSuffix", "Suffix",
value = get("refSuffix", .tSVE),
placeholder = get("refSuffix", .tSVE)
)
),
column(
width = 1, offset = 3,
textInput(
"hetSuffix", "Suffix",
value = get("hetSuffix", .tSVE),
placeholder = get("hetSuffix", .tSVE)
)
),
column(
width = 1, offset = 3,
textInput(
"altSuffix", "Suffix",
value = get("altSuffix", .tSVE),
placeholder = get("altSuffix", .tSVE)
)
)
),
fluidRow(
column(
width = 12,
tags$strong("Notes:"), br(),
tags$ul(
tags$li(
"The",tags$strong("choices"),"of genotypes are updated when",
"new variants are imported."
),
tags$li(
"The",tags$strong("selected"),"genotypes may be automatically",
"updated immediately after import using the",
tags$strong("Autodetect genotypes"), "checkbox in the",
tags$strong("Input"), "panel, or manually after import using",
"the", tags$strong("Autofill"), "button in this panel."
),
tags$li(
"Selected genotypes are not allowed to overlap.",
"Selecting a genotype removes it from the choices",
"available in the other widgets. As a consequence, genotypes",
"must first be unselected from a widget before it can be",
"selected in another one."
)
)
)
)
),
wellPanel(
h4("INFO suffixes"),
hr(),
fluidRow(
column(
width = 3,
textInput(
"aafSuffix", "ALT allele freq.",
value = get("aafSuffix", .tSVE),
placeholder = get("aafSuffix", .tSVE)
)
),
column(
width = 3,
textInput(
"mafSuffix", "Minor allele freq.",
value = get("mafSuffix", .tSVE),
placeholder = get("mafSuffix", .tSVE)
)
)
)
),
wellPanel(
h4("VCF file(s)"),
hr(),
fluidRow(
column(
width = 2,
numericInput(
"yieldSize", "VCF yield size (100-100^3)",
min = 100, max = 100E3,
value = 4E3,
step = 1E3
)
)
)
)
),
tabPanel(
title = "Parallel",
wellPanel(
h4("Parallel settings"),
hr(),
fluidRow(
column(
width = 3,
numericInput(
"bpCores", "Cores",
value = .PS[["default.bpCores"]],
min = 1, max = .PS[["default.bpCores"]], step = 1)
),
column(
width = 3,
selectInput(
"bpConfig", "Cluster configuration",
choices = structure(
.PS[["choices.bpClass"]],
names = gsub(
"Param", "", .PS[["choices.bpClass"]])),
selected = .PS[["default.bpClass"]])
),
conditionalPanel(
condition = "input.bpConfig != 'SerialParam'",
column(
width = 3,
selectInput(
"bpType", "Cluster type",
choices = structure(
.PS[["choices.bpType"]],
names = gsub(
"Param", "", .PS[["choices.bpType"]])),
selected = .PS[["default.bpType"]])
)
)
) # fluidRow
), # wellPanel
wellPanel(
fluidRow(
column(
width = 12,
h1("Platforms tested"),
DT::dataTableOutput("parallelReport")
)
)
),
tags$h4(
"Notes",
tags$ul(
tags$li(
"Report"
), br(),
tags$ul(
tags$li(
tags$strong("Hang:"),
"Application hangs while CPUs work infinitely at full capacity."
)
)
)
)
)
),
# Session settings view ----
tabPanel(
title = "Session",
tabsetPanel(
id = "tabset.session",
tabPanel(
title = "Session info",
verbatimTextOutput("sessionInfo")
),
tabPanel(
title = "TVTB settings",
verbatimTextOutput("TVTBsettings")
),
tabPanel(
title = "General settings",
verbatimTextOutput("generalSettings")
),
tabPanel(
title = "Advanced settings",
verbatimTextOutput("advancedSettings")
),
tabPanel(
title = "ExpandedVCF",
"This panel displays the structure of the imported",
tags$code("ExpandedVCF"), "object:",
verbatimTextOutput("ExpandedVCF"),
"and the attached", tags$code("metadata"), ":",
verbatimTextOutput("vcfMetadata")
),
tabPanel(
title = "VEP",
verbatimTextOutput("vepStructure")
),
tabPanel(
title = "Errors",
verbatimTextOutput("Errors")
)
)
)
))
|
library(readr)
library(dplyr)
library(sf)
library(leaflet)
library(htmltools)
library(tidytransit)
tracts <- st_read("data/tracts/cb_2019_36_tract_500k.shp") %>%
filter(COUNTYFP %in% c("005", "047", "061", "081", "085")) %>%
st_transform(st_crs("+proj=longlat +datum=WGS84 +no_defs"))
subway <- st_read(
"data/subway/geo_export_f573270e-5856-4601-95ce-7c8c24e78273.shp") %>%
st_transform(st_crs("+proj=longlat +datum=WGS84 +no_defs"))
bikes <- st_read(
"data/bicycle/geo_export_9689df31-46e7-4799-8c5c-1e9521582b36.shp") %>%
st_transform(st_crs("+proj=longlat +datum=WGS84 +no_defs"))
create_bus_sf <- function(borough) {
read_gtfs(
paste0("data/bus_", borough, ".zip"),
c("shapes", "stops")
) %>%
gtfs_as_sf() %>%
`$`(shapes)
}
bus <- rbind(
bus_manhattan <- create_bus_sf("manhattan"),
bus_brooklyn <- create_bus_sf("brooklyn"),
bus_queens <- create_bus_sf("queens"),
bus_bronx <- create_bus_sf("bronx"),
bus_staten <- create_bus_sf("staten_island")
)
veh <- read_csv("data/nyc_vehicles_avail_acs2019_5y_tract.csv")
vehpct <- veh %>%
filter(total > 0) %>%
mutate(pct = zero / total) %>%
select(COUNTYFP = county, TRACTCE = tract, total, pct)
pal_veh <- colorNumeric(
palette = "inferno",
domain = c(0, 1)
)
pal_veh_rev <- colorNumeric(
palette = "inferno",
domain = c(0, 1),
reverse = TRUE
)
labelfunc <- function(percent, households) {
paste0(
'<span style="font-weight:bold;font-size:14pt">',
scales::percent(percent, accuracy = 1), "</span><br/>",
" of households do NOT have access to a car<br/>(",
households, " households)"
)
}
joined <- tracts %>%
inner_join(vehpct, by = c("COUNTYFP", "TRACTCE")) %>%
mutate(lab = labelfunc(pct, total))
joined_bbox <- st_bbox(joined)
leaf <- joined %>%
leaflet(options = leafletOptions(minZoom = 10)) %>%
addPolygons(
fillColor = ~pal_veh(pct),
color = ~pal_veh(pct),
weight = 2,
fillOpacity = 1,
label = lapply(joined$lab, htmltools::HTML),
labelOptions = labelOptions(style = list(
"text-align" = "center",
"background-color" = "#333333",
"color" = "white"
)),
group = "Car ownership"
) %>%
addProviderTiles(providers$Stamen.TonerBackground) %>%
addProviderTiles(
providers$Stamen.TerrainLabels,
group = "Place labels"
) %>%
addLegend(
"topleft",
pal = pal_veh_rev,
values = 0:5/5,
title = "% of households<br/>WITHOUT access<br/>to a car",
opacity = 1,
labFormat = function(type, x) scales::percent(sort(x, decreasing = TRUE)),
className = "info legend leaf-legend",
group = "Car ownership"
) %>%
addMapPane("bikes", 470) %>%
addMapPane("buses", 475) %>%
addMapPane("subways", 480) %>%
addPolylines(
data = subway,
opacity = 1,
color = "#333333",
weight = 7,
options = pathOptions(pane = "subways"),
group = "Subway lines"
) %>%
addPolylines(
data = subway,
opacity = 1,
color = "#44ffaa",
weight = 5,
options = pathOptions(pane = "subways"),
group = "Subway lines"
) %>%
addPolylines(
data = bikes,
opacity = 1,
color = "#333333",
weight = 4,
options = pathOptions(pane = "bikes"),
group = "Bike routes"
) %>%
addPolylines(
data = bikes,
opacity = 1,
color = "#44aaff",
weight = 2,
options = pathOptions(pane = "bikes"),
group = "Bike routes"
) %>%
addPolylines(
data = bus,
opacity = 1,
color = "#333333",
weight = 4,
options = pathOptions(pane = "buses"),
group = "Bus routes"
) %>%
addPolylines(
data = bus,
opacity = 1,
color = "#ff3333",
weight = 2,
options = pathOptions(pane = "buses"),
group = "Bus routes"
) %>%
addLayersControl(
overlayGroups = c(
"Car ownership",
"Subway lines",
"Bus routes",
"Bike routes",
"Place labels"
),
options = layersControlOptions(collapsed = FALSE)
) %>%
hideGroup("Subway lines") %>%
hideGroup("Place labels") %>%
hideGroup("Bike routes") %>%
hideGroup("Bus routes") %>%
setMaxBounds(
joined_bbox[[1]], joined_bbox[[2]], joined_bbox[[3]], joined_bbox[[4]]
) %>%
setView(
mean(joined_bbox[[1]], joined_bbox[[3]]),
sum(joined_bbox[[2]], joined_bbox[[4]]*2)/3,
11
)
leaf$sizingPolicy$defaultHeight <- "calc(100vh - 20px)"
css <- read_file("style.css")
browsable(
tagList(list(
tags$head(
tags$style(css)
),
leaf
))
)
# TO DO
# Add PUMA names to tooltips?
# Get PUMS data for income x car ownership analysis (Megan?)
# Set up git repo
# Add title
# Figure out how to publish | /maps.R | no_license | mnbram/nyc-carless | R | false | false | 4,632 | r | library(readr)
library(dplyr)
library(sf)
library(leaflet)
library(htmltools)
library(tidytransit)
tracts <- st_read("data/tracts/cb_2019_36_tract_500k.shp") %>%
filter(COUNTYFP %in% c("005", "047", "061", "081", "085")) %>%
st_transform(st_crs("+proj=longlat +datum=WGS84 +no_defs"))
subway <- st_read(
"data/subway/geo_export_f573270e-5856-4601-95ce-7c8c24e78273.shp") %>%
st_transform(st_crs("+proj=longlat +datum=WGS84 +no_defs"))
bikes <- st_read(
"data/bicycle/geo_export_9689df31-46e7-4799-8c5c-1e9521582b36.shp") %>%
st_transform(st_crs("+proj=longlat +datum=WGS84 +no_defs"))
create_bus_sf <- function(borough) {
read_gtfs(
paste0("data/bus_", borough, ".zip"),
c("shapes", "stops")
) %>%
gtfs_as_sf() %>%
`$`(shapes)
}
bus <- rbind(
bus_manhattan <- create_bus_sf("manhattan"),
bus_brooklyn <- create_bus_sf("brooklyn"),
bus_queens <- create_bus_sf("queens"),
bus_bronx <- create_bus_sf("bronx"),
bus_staten <- create_bus_sf("staten_island")
)
veh <- read_csv("data/nyc_vehicles_avail_acs2019_5y_tract.csv")
vehpct <- veh %>%
filter(total > 0) %>%
mutate(pct = zero / total) %>%
select(COUNTYFP = county, TRACTCE = tract, total, pct)
pal_veh <- colorNumeric(
palette = "inferno",
domain = c(0, 1)
)
pal_veh_rev <- colorNumeric(
palette = "inferno",
domain = c(0, 1),
reverse = TRUE
)
labelfunc <- function(percent, households) {
paste0(
'<span style="font-weight:bold;font-size:14pt">',
scales::percent(percent, accuracy = 1), "</span><br/>",
" of households do NOT have access to a car<br/>(",
households, " households)"
)
}
joined <- tracts %>%
inner_join(vehpct, by = c("COUNTYFP", "TRACTCE")) %>%
mutate(lab = labelfunc(pct, total))
joined_bbox <- st_bbox(joined)
leaf <- joined %>%
leaflet(options = leafletOptions(minZoom = 10)) %>%
addPolygons(
fillColor = ~pal_veh(pct),
color = ~pal_veh(pct),
weight = 2,
fillOpacity = 1,
label = lapply(joined$lab, htmltools::HTML),
labelOptions = labelOptions(style = list(
"text-align" = "center",
"background-color" = "#333333",
"color" = "white"
)),
group = "Car ownership"
) %>%
addProviderTiles(providers$Stamen.TonerBackground) %>%
addProviderTiles(
providers$Stamen.TerrainLabels,
group = "Place labels"
) %>%
addLegend(
"topleft",
pal = pal_veh_rev,
values = 0:5/5,
title = "% of households<br/>WITHOUT access<br/>to a car",
opacity = 1,
labFormat = function(type, x) scales::percent(sort(x, decreasing = TRUE)),
className = "info legend leaf-legend",
group = "Car ownership"
) %>%
addMapPane("bikes", 470) %>%
addMapPane("buses", 475) %>%
addMapPane("subways", 480) %>%
addPolylines(
data = subway,
opacity = 1,
color = "#333333",
weight = 7,
options = pathOptions(pane = "subways"),
group = "Subway lines"
) %>%
addPolylines(
data = subway,
opacity = 1,
color = "#44ffaa",
weight = 5,
options = pathOptions(pane = "subways"),
group = "Subway lines"
) %>%
addPolylines(
data = bikes,
opacity = 1,
color = "#333333",
weight = 4,
options = pathOptions(pane = "bikes"),
group = "Bike routes"
) %>%
addPolylines(
data = bikes,
opacity = 1,
color = "#44aaff",
weight = 2,
options = pathOptions(pane = "bikes"),
group = "Bike routes"
) %>%
addPolylines(
data = bus,
opacity = 1,
color = "#333333",
weight = 4,
options = pathOptions(pane = "buses"),
group = "Bus routes"
) %>%
addPolylines(
data = bus,
opacity = 1,
color = "#ff3333",
weight = 2,
options = pathOptions(pane = "buses"),
group = "Bus routes"
) %>%
addLayersControl(
overlayGroups = c(
"Car ownership",
"Subway lines",
"Bus routes",
"Bike routes",
"Place labels"
),
options = layersControlOptions(collapsed = FALSE)
) %>%
hideGroup("Subway lines") %>%
hideGroup("Place labels") %>%
hideGroup("Bike routes") %>%
hideGroup("Bus routes") %>%
setMaxBounds(
joined_bbox[[1]], joined_bbox[[2]], joined_bbox[[3]], joined_bbox[[4]]
) %>%
setView(
mean(joined_bbox[[1]], joined_bbox[[3]]),
sum(joined_bbox[[2]], joined_bbox[[4]]*2)/3,
11
)
leaf$sizingPolicy$defaultHeight <- "calc(100vh - 20px)"
css <- read_file("style.css")
browsable(
tagList(list(
tags$head(
tags$style(css)
),
leaf
))
)
# TO DO
# Add PUMA names to tooltips?
# Get PUMS data for income x car ownership analysis (Megan?)
# Set up git repo
# Add title
# Figure out how to publish |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/router.R
\name{add_route}
\alias{add_route}
\alias{add_route.default}
\alias{add_route.router}
\title{add_route: add new route to a router}
\usage{
add_route(router, method, endpoint, fun)
\method{add_route}{default}(router, method, endpoint, fun)
\method{add_route}{router}(router, method, endpoint, fun)
}
\arguments{
\item{router}{router class}
\item{method}{http verb such "GET", "POST", "PUT" or "DELETE"}
\item{endpoint}{string e.g. "/add/", slashes are important!!}
\item{fun}{function}
}
\value{
router
\code{Object}
\code{Object}
}
\description{
A description of add_route
}
\details{
A details of add_route
}
\examples{
r <- router()
r <- add_route(r,
method = "GET",
endpoint = "/add/",
fun = function(a, b) {
return(as.numeric(a) + as.numeric(b))
}
)
}
| /man/add_route.Rd | no_license | gabaligeti/routeR | R | false | true | 875 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/router.R
\name{add_route}
\alias{add_route}
\alias{add_route.default}
\alias{add_route.router}
\title{add_route: add new route to a router}
\usage{
add_route(router, method, endpoint, fun)
\method{add_route}{default}(router, method, endpoint, fun)
\method{add_route}{router}(router, method, endpoint, fun)
}
\arguments{
\item{router}{router class}
\item{method}{http verb such "GET", "POST", "PUT" or "DELETE"}
\item{endpoint}{string e.g. "/add/", slashes are important!!}
\item{fun}{function}
}
\value{
router
\code{Object}
\code{Object}
}
\description{
A description of add_route
}
\details{
A details of add_route
}
\examples{
r <- router()
r <- add_route(r,
method = "GET",
endpoint = "/add/",
fun = function(a, b) {
return(as.numeric(a) + as.numeric(b))
}
)
}
|
\name{mcmcNorm}
\docType{methods}
\alias{mcmcNorm}
\alias{mcmcNorm.default}
\alias{mcmcNorm.formula}
\alias{mcmcNorm.norm}
\title{ MCMC algorithm for incomplete multivariate normal data}
\description{
Simulates parameters and missing values from a joint posterior
distribution under a normal model using Markov chain Monte Carlo.
}
\usage{
% the generic function
mcmcNorm(obj, \dots)
% the default method
\method{mcmcNorm}{default}(obj, x = NULL, intercept = TRUE,
starting.values, iter = 1000, multicycle = NULL,
seeds = NULL, prior = "uniform",
prior.df = NULL, prior.sscp = NULL, save.all.series = TRUE,
save.worst.series = FALSE, worst.linear.coef = NULL,
impute.every = NULL, \ldots)
% method for class formula
\method{mcmcNorm}{formula}(formula, data, starting.values,
iter = 1000, multicycle = NULL, seeds = NULL, prior = "uniform",
prior.df = NULL, prior.sscp = NULL, save.all.series = TRUE,
save.worst.series = FALSE, worst.linear.coef = NULL,
impute.every=NULL, \ldots)
% method for class norm
\method{mcmcNorm}{norm}(obj, starting.values = obj$param,
iter = 1000, multicycle = obj$multicycle,
seeds = NULL, prior = obj$prior, prior.df = obj$prior.df,
prior.sscp = obj$prior.sscp,
save.all.series = !(obj$method=="MCMC" & is.null( obj$series.beta )),
save.worst.series = !is.null( obj$worst.linear.coef ),
worst.linear.coef = obj$worst.linear.coef,
impute.every = obj$impute.every, \ldots)
}
\arguments{
\item{obj}{an object used to select a method. It may be \code{y},
a numeric matrix, vector or data frame containing response variables
to be modeled as normal. Missing values (\code{NA}s)
are allowed. If \code{y}
is a data frame, any factors or ordered factors will be
replaced by their internal codes, and a warning will be given.
Alternatively, this first argument may be a \code{formula} as described
below, or an object of class \code{"norm"}
resulting from a call to \code{emNorm} or
\code{\link{mcmcNorm}}; see DETAILS.}
\item{x}{a numeric matrix, vector or data frame of covariates to be
used as predictors for \code{y}. Missing values (\code{NA}'s) are
not allowed. If \code{x} is a matrix, it must have the same number
of rows as \code{y}. If \code{x} is a data frame, any factors or
ordered factors are replaced by their internal codes, and a
warning is given. If \code{NULL}, it defaults to \code{x =
rep(1,nrow(y))}, an intercept-only model.}
\item{intercept}{if \code{TRUE}, then a column of \code{1}'s is
appended to \code{x}. Ignored if \code{x = NULL}.}
\item{formula}{an object of class \code{"\link{formula}"} (or one
that can be coerced to that class): a symbolic description of the
model which is provided in lieu of \code{y} and \code{x}. The
details of model specification are given
under DETAILS.}
\item{data}{an optional data frame, list or environment (or object
coercible by \code{\link{as.data.frame}} to a data frame) containing
the variables in the model. If not found in \code{data}, the variables are
taken from \code{environment(formula)}, typically the environment
from which \code{mcmcNorm} is called.}
\item{starting.values}{starting values for the model
parameters. This must be a list with two named components,
\code{beta} and \code{sigma}, which are numeric matrices with correct
dimensions. In most circumstances, the starting
values will be obtained from a prior run of \code{\link{emNorm}} or
\code{mcmcNorm}; see DETAILS.}
\item{iter}{number of iterations to be performed. By default, each
iteration consists of one Imputation or I-step followed by
one Posterior or P-step, but this can be changed by
\code{multicycle}.}
\item{multicycle}{number of cycles per iteration, with
\code{NULL} equivalent to \code{multicycle=1}.
Specifying
\code{multicycle=}\emph{k} for some \emph{k}>1 instructs
\code{mcmcNorm} to perform the I-step and P-step cycle \code{k}
times within each iteration; see DETAILS.}
\item{seeds}{two integers to initialize the random number
generator; see DETAILS.}
\item{prior}{should be \code{"uniform"}, \code{"jeffreys"},
\code{"ridge"} or \code{"invwish"}. If \code{"ridge"} then
\code{prior.df} must be supplied. If \code{"invwish"} then
\code{prior.df} and \code{prior.sscp} must be
supplied. For more information, see DETAILS.}
\item{prior.df}{prior degrees of freedom for a ridge
(\code{prior="ridge"}) or inverted Wishart (\code{prior="invwish"})
prior.}
\item{prior.sscp}{prior sums of squares and cross-products (SSCP)
matrix for an inverted Wishart prior (\code{prior="invwish"}).}
\item{save.all.series}{if \code{TRUE}, then the simulated values of all
parameters at all iterations will be saved.}
\item{save.worst.series}{if \code{TRUE}, then the simulated values
of the worst linear function of the parameters will be saved. Under
ordinary circumstances, this function will have been estimated by
\code{\link{emNorm}} after the EM algorithm converged.}
\item{worst.linear.coef}{vector or coefficients that define the worst
linear function of the parameters. Under ordinary circumstances,
these are provided automatically in the result from \code{\link{emNorm}}.}
\item{impute.every}{how many iterations to perform between
imputations? If \code{impute.every=}\emph{k}, then the simulated
values for the missing data after every \emph{k} iterations will be
saved, resulting in \code{floor(iter/impute.every)} multiple
imputations. If \code{NULL}, then no imputations will be saved.}
\item{\dots}{values to be passed to the methods.}
}
\details{
There are three different ways to specify the data and model when
calling \code{mcmcNorm}:
\itemize{
\item by directly supplying as the initial argument a matrix of
numeric response variables \code{y}, along with an optional
matrix of predictor variables \code{x};
\item by supplying a model specification
\code{formula}, along with an optional data frame \code{data}; or
\item by supplying an object of class
\code{"norm"}, which was produced by an earlier call to
\code{emNorm} or \code{\link{mcmcNorm}}.
}
In the first case, the matrix \code{y} is assumed to have a
multivariate normal
linear regression on \code{x} with coefficients \code{beta} and
covariance matrix \code{sigma}, where
\code{dim(beta)=c(ncol(x),ncol(y))} and
\code{dim(sigma)=c(ncol(y),ncol(y))}. Missing values \code{NA}
are allowed in \code{y} but not in \code{x}.
In the second case, \code{formula} is a formula for a (typically
multivariate) linear regression model in the manner expected by
\code{\link{lm}}. A formula is given as \code{y ~ model}, where
\code{y} is either a single numeric variable or a matrix of numeric
variables bound together with the function \code{\link{cbind}}. The
right-hand side of the formula (everything to the right of \code{~}) is a
linear predictor, a series of terms separated by operators \code{+},
\code{:} or \code{*} to specify main effects and
interactions. Factors are allowed on the right-hand side and will
enter the model as contrasts among the \code{\link{levels}}. The
intercept term \code{1} is included by default; to remove the
intercept, use \code{-1}.
In the third case, the initial argument to \code{mcmcNorm} is an
object of class
\code{"norm"} returned by a previous call to \code{emNorm}
or \code{\link{mcmcNorm}}. The value of the parameters
carried in this object (the estimates from the last iteration of
EM or the simulated values from the last iteration of MCMC) will be
used as the starting values.
The matrix \code{y} is assumed to have a multivariate normal
linear regression on \code{x} with coefficients \code{beta} and
covariance matrix \code{sigma}, where
\code{dim(beta)=c(ncol(x),ncol(y))} and
\code{dim(sigma)=c(ncol(y),ncol(y))}.
Starting values for the parameters must be provided. In most cases
these will be the result of a previous call to \code{emNorm} or
\code{mcmcNorm}. If the starting
values are close to the mode (i.e., if they are the result of an EM
run that converged) then the worst linear function of the
parameters will be saved at each iteration. If the starting
values are the result of a previous run of MCMC, then the new
run will be a continuation of the same Markov chain.
If \code{multicycle=}\emph{k} for some \emph{k}>1,
then the length of the saved parameter
series will be reduced by a factor of \emph{k}, and the serial
correlation in the series will also be reduced. This option is
useful in large problems with many parameters and in slowly
converging problems for which many iterations are needed.
\code{norm2} functions use their own internal random number generator which
is seeded by two integers, for example, \code{seeds=c(123,456)},
which allows results to be reproduced in the future. If
\code{seeds=NULL} then
the function will seed itself with two random
integers from R. Therefore, results can also be made reproducible by
calling \code{\link{set.seed}} beforehand and taking \code{seeds=NULL}.
If \code{prior="invwish"} then an inverted Wishart prior distribution
is applied to \code{sigma} with hyperparameters \code{prior.df} (a
scalar) and \code{prior.sscp} (a symmetric, positive definite matrix
of the same dimension as \code{sigma}). Using the device of imaginary
results, we can interpret \code{prior.sscp/prior.df} as a prior guess
for \code{sigma}, and \code{prior.df} as the prior degrees of
freedom on which this guess is based.
The usual noninformative prior for the normal regression model
(\code{prior="jeffreys"}) is equivalent to the inverted
Wishart density with \code{prior.df} equal to 0 and
\code{prior.sscp} equal to a matrix of 0's.
The ridge prior (\code{prior="ridge"}) is a special case of the
inverted Wishart (Schafer, 1997). The prior
guess for \code{sigma} is a diagonal matrix with diagonal elements
estimated by regressing the observed values in each column of
\code{y} on the corresponding rows of \code{x}. When
\code{prior="ridge"}, the user must supply a value for
\code{prior.df}, which
determines how strongly the estimated correlations are smoothed
toward zero.
If the first argument to \code{mcmcNorm} is an object of class
\code{"norm"}, then the parameter values stored in that object will
automatically be used as starting values.
For details of the MCMC algorithm, see the manual distributed
with the NORM package in the subdirectory \code{doc}.
}
\value{
a list whose
\code{class} attribute has been set to \code{"norm"}.
This object may be
passed as the first argument in subsequent calls to \code{emNorm},
\code{\link{mcmcNorm}}, \code{\link{impNorm}},
\code{\link{loglikNorm}} or \code{\link{logpostNorm}}. The
object also carries the original data and specifies the prior
distribution, so that these do not need to be provided again.
\cr
To see a summary of
this object, use the generic function \code{summary},
which passes the object to \code{\link{summary.norm}}.
\cr
Components of the list may also be directly accessed
and examined by the user. Components which may be of interest
include:
\item{iter}{number of MCMC iterations performed.}
\item{param}{a list with elements \code{beta} and \code{sigma}
containing the estimated parameters after the final iteration of
MCMC. This may be supplied as starting values to \code{emNorm} or
\code{\link{mcmcNorm}}, or as an argument to \code{\link{impNorm}},
\code{\link{loglikNorm}} or \code{\link{logpostNorm}}.}
\item{loglik}{a numeric vector of length \code{iter} reporting the
logarithm of the observed-data likelihood function at the start of
each iteration.}
\item{logpost}{a numeric vector of length \code{iter} reporting the
logarithm of the observed-data posterior density function at the start of
each iteration.}
\item{series.worst}{a time-series object (class \code{"ts"}) which
contains the simulated values of the worst linear function of the
parameters from all iterations. This will be present if the
starting values provided to \code{mcmcNorm} were close enough to the
mode to provide a reliable estimate of the worst linear function.
The dependence in this series tends to be higher than for
other parameters, so examining the dependence by plotting the series
with \code{\link{plot}} or its autocorrelation function with
\code{\link{acf}} may help the user to judge how quickly the Markov
chain achieves stationarity. For the definition of the worst linear
function, see the manual accompanying the NORM package
in the subdirectory \code{doc}.}
\item{series.beta}{a multivariate time-series object (class
\code{"ts"}) which
contains the simulated values of the coefficients \code{beta}
from all iterations. This will present if \code{save.all.series=TRUE}.}
\item{series.sigma}{a multivariate time-series object (class
\code{"ts"}) which
contains the simulated values of the variances and
covariances (elements of the lower triangle of \code{sigma})
from all iterations. This will be present if \code{save.all.series=TRUE}.}
\item{imp.list}{a list containing the multiple imputations. Each
component of this list is a data matrix resembling \code{y}, but
with \code{NA}'s replaced by imputed values. The
length of the list depends on the values of \code{iter} and
\code{impute.every}.}
\item{miss.patt}{logical matrix with \code{ncol(y)} columns
reporting the missingness patterns seen in \code{y}. Each row of
\code{miss.patt} corresponds to a distinct missingness pattern, with
\code{TRUE} indicating that the variable is missing and
\code{FALSE} indicating that the variable is observed.}
\item{miss.patt.freq}{integer vector of length
\code{nrow(miss.patt)} indicating, for each missingness pattern, the
number of cases or rows of \code{y} having that pattern.}
\item{which.patt}{integer vector of length \code{nrow(y)} indicating
the missingness pattern for each
row of \code{y}. Thus \code{is.na( y[i,] )} is the same thing as
\code{miss.patt[ which.patt[i], ]}.}
}
\references{
Schafer, J.L. (1997) \emph{Analysis of Incomplete Multivariate
Data}. London: Chapman & Hall/CRC Press.
\cr
For more information about this function and other functions in
the NORM package, see \emph{User's Guide for \code{norm2}}
in the library subdirectory \code{doc}.
}
\author{Joe Schafer \email{Joseph.L.Schafer@census.gov} }
\seealso{\code{\link{emNorm}}, \code{\link{summary.norm}},
\code{\link{impNorm}},
\code{\link{loglikNorm}},
\code{\link{logpostNorm}}
}
\examples{
## run EM for marijuana data with ridge prior
data(marijuana)
emResult <- emNorm(marijuana, prior="ridge", prior.df=0.5)
## run MCMC for 5,000 iterations starting from the
## posterior mode using the same prior
mcmcResult <- mcmcNorm(emResult, iter=5000)
## summarize and plot worst linear function
summary(mcmcResult)
plot(mcmcResult$series.worst)
acf(mcmcResult$series.worst, lag.max=50)
## generate 25 multiple imputations, taking
## 100 steps between imputations, and look st
## the first imputed dataset
mcmcResult <- mcmcNorm(emResult, iter=2500, impute.every=100)
mcmcResult$imp.list[[1]]
}
\keyword{ multivariate }
\keyword{ NA }
| /man/mcmcNorm.Rd | no_license | cran/norm2 | R | false | false | 15,876 | rd | \name{mcmcNorm}
\docType{methods}
\alias{mcmcNorm}
\alias{mcmcNorm.default}
\alias{mcmcNorm.formula}
\alias{mcmcNorm.norm}
\title{ MCMC algorithm for incomplete multivariate normal data}
\description{
Simulates parameters and missing values from a joint posterior
distribution under a normal model using Markov chain Monte Carlo.
}
\usage{
% the generic function
mcmcNorm(obj, \dots)
% the default method
\method{mcmcNorm}{default}(obj, x = NULL, intercept = TRUE,
starting.values, iter = 1000, multicycle = NULL,
seeds = NULL, prior = "uniform",
prior.df = NULL, prior.sscp = NULL, save.all.series = TRUE,
save.worst.series = FALSE, worst.linear.coef = NULL,
impute.every = NULL, \ldots)
% method for class formula
\method{mcmcNorm}{formula}(formula, data, starting.values,
iter = 1000, multicycle = NULL, seeds = NULL, prior = "uniform",
prior.df = NULL, prior.sscp = NULL, save.all.series = TRUE,
save.worst.series = FALSE, worst.linear.coef = NULL,
impute.every=NULL, \ldots)
% method for class norm
\method{mcmcNorm}{norm}(obj, starting.values = obj$param,
iter = 1000, multicycle = obj$multicycle,
seeds = NULL, prior = obj$prior, prior.df = obj$prior.df,
prior.sscp = obj$prior.sscp,
save.all.series = !(obj$method=="MCMC" & is.null( obj$series.beta )),
save.worst.series = !is.null( obj$worst.linear.coef ),
worst.linear.coef = obj$worst.linear.coef,
impute.every = obj$impute.every, \ldots)
}
\arguments{
\item{obj}{an object used to select a method. It may be \code{y},
a numeric matrix, vector or data frame containing response variables
to be modeled as normal. Missing values (\code{NA}s)
are allowed. If \code{y}
is a data frame, any factors or ordered factors will be
replaced by their internal codes, and a warning will be given.
Alternatively, this first argument may be a \code{formula} as described
below, or an object of class \code{"norm"}
resulting from a call to \code{emNorm} or
\code{\link{mcmcNorm}}; see DETAILS.}
\item{x}{a numeric matrix, vector or data frame of covariates to be
used as predictors for \code{y}. Missing values (\code{NA}'s) are
not allowed. If \code{x} is a matrix, it must have the same number
of rows as \code{y}. If \code{x} is a data frame, any factors or
ordered factors are replaced by their internal codes, and a
warning is given. If \code{NULL}, it defaults to \code{x =
rep(1,nrow(y))}, an intercept-only model.}
\item{intercept}{if \code{TRUE}, then a column of \code{1}'s is
appended to \code{x}. Ignored if \code{x = NULL}.}
\item{formula}{an object of class \code{"\link{formula}"} (or one
that can be coerced to that class): a symbolic description of the
model which is provided in lieu of \code{y} and \code{x}. The
details of model specification are given
under DETAILS.}
\item{data}{an optional data frame, list or environment (or object
coercible by \code{\link{as.data.frame}} to a data frame) containing
the variables in the model. If not found in \code{data}, the variables are
taken from \code{environment(formula)}, typically the environment
from which \code{mcmcNorm} is called.}
\item{starting.values}{starting values for the model
parameters. This must be a list with two named components,
\code{beta} and \code{sigma}, which are numeric matrices with correct
dimensions. In most circumstances, the starting
values will be obtained from a prior run of \code{\link{emNorm}} or
\code{mcmcNorm}; see DETAILS.}
\item{iter}{number of iterations to be performed. By default, each
iteration consists of one Imputation or I-step followed by
one Posterior or P-step, but this can be changed by
\code{multicycle}.}
\item{multicycle}{number of cycles per iteration, with
\code{NULL} equivalent to \code{multicycle=1}.
Specifying
\code{multicycle=}\emph{k} for some \emph{k}>1 instructs
\code{mcmcNorm} to perform the I-step and P-step cycle \code{k}
times within each iteration; see DETAILS.}
\item{seeds}{two integers to initialize the random number
generator; see DETAILS.}
\item{prior}{should be \code{"uniform"}, \code{"jeffreys"},
\code{"ridge"} or \code{"invwish"}. If \code{"ridge"} then
\code{prior.df} must be supplied. If \code{"invwish"} then
\code{prior.df} and \code{prior.sscp} must be
supplied. For more information, see DETAILS.}
\item{prior.df}{prior degrees of freedom for a ridge
(\code{prior="ridge"}) or inverted Wishart (\code{prior="invwish"})
prior.}
\item{prior.sscp}{prior sums of squares and cross-products (SSCP)
matrix for an inverted Wishart prior (\code{prior="invwish"}).}
\item{save.all.series}{if \code{TRUE}, then the simulated values of all
parameters at all iterations will be saved.}
\item{save.worst.series}{if \code{TRUE}, then the simulated values
of the worst linear function of the parameters will be saved. Under
ordinary circumstances, this function will have been estimated by
\code{\link{emNorm}} after the EM algorithm converged.}
\item{worst.linear.coef}{vector or coefficients that define the worst
linear function of the parameters. Under ordinary circumstances,
these are provided automatically in the result from \code{\link{emNorm}}.}
\item{impute.every}{how many iterations to perform between
imputations? If \code{impute.every=}\emph{k}, then the simulated
values for the missing data after every \emph{k} iterations will be
saved, resulting in \code{floor(iter/impute.every)} multiple
imputations. If \code{NULL}, then no imputations will be saved.}
\item{\dots}{values to be passed to the methods.}
}
\details{
There are three different ways to specify the data and model when
calling \code{mcmcNorm}:
\itemize{
\item by directly supplying as the initial argument a matrix of
numeric response variables \code{y}, along with an optional
matrix of predictor variables \code{x};
\item by supplying a model specification
\code{formula}, along with an optional data frame \code{data}; or
\item by supplying an object of class
\code{"norm"}, which was produced by an earlier call to
\code{emNorm} or \code{\link{mcmcNorm}}.
}
In the first case, the matrix \code{y} is assumed to have a
multivariate normal
linear regression on \code{x} with coefficients \code{beta} and
covariance matrix \code{sigma}, where
\code{dim(beta)=c(ncol(x),ncol(y))} and
\code{dim(sigma)=c(ncol(y),ncol(y))}. Missing values \code{NA}
are allowed in \code{y} but not in \code{x}.
In the second case, \code{formula} is a formula for a (typically
multivariate) linear regression model in the manner expected by
\code{\link{lm}}. A formula is given as \code{y ~ model}, where
\code{y} is either a single numeric variable or a matrix of numeric
variables bound together with the function \code{\link{cbind}}. The
right-hand side of the formula (everything to the right of \code{~}) is a
linear predictor, a series of terms separated by operators \code{+},
\code{:} or \code{*} to specify main effects and
interactions. Factors are allowed on the right-hand side and will
enter the model as contrasts among the \code{\link{levels}}. The
intercept term \code{1} is included by default; to remove the
intercept, use \code{-1}.
In the third case, the initial argument to \code{mcmcNorm} is an
object of class
\code{"norm"} returned by a previous call to \code{emNorm}
or \code{\link{mcmcNorm}}. The value of the parameters
carried in this object (the estimates from the last iteration of
EM or the simulated values from the last iteration of MCMC) will be
used as the starting values.
The matrix \code{y} is assumed to have a multivariate normal
linear regression on \code{x} with coefficients \code{beta} and
covariance matrix \code{sigma}, where
\code{dim(beta)=c(ncol(x),ncol(y))} and
\code{dim(sigma)=c(ncol(y),ncol(y))}.
Starting values for the parameters must be provided. In most cases
these will be the result of a previous call to \code{emNorm} or
\code{mcmcNorm}. If the starting
values are close to the mode (i.e., if they are the result of an EM
run that converged) then the worst linear function of the
parameters will be saved at each iteration. If the starting
values are the result of a previous run of MCMC, then the new
run will be a continuation of the same Markov chain.
If \code{multicycle=}\emph{k} for some \emph{k}>1,
then the length of the saved parameter
series will be reduced by a factor of \emph{k}, and the serial
correlation in the series will also be reduced. This option is
useful in large problems with many parameters and in slowly
converging problems for which many iterations are needed.
\code{norm2} functions use their own internal random number generator which
is seeded by two integers, for example, \code{seeds=c(123,456)},
which allows results to be reproduced in the future. If
\code{seeds=NULL} then
the function will seed itself with two random
integers from R. Therefore, results can also be made reproducible by
calling \code{\link{set.seed}} beforehand and taking \code{seeds=NULL}.
If \code{prior="invwish"} then an inverted Wishart prior distribution
is applied to \code{sigma} with hyperparameters \code{prior.df} (a
scalar) and \code{prior.sscp} (a symmetric, positive definite matrix
of the same dimension as \code{sigma}). Using the device of imaginary
results, we can interpret \code{prior.sscp/prior.df} as a prior guess
for \code{sigma}, and \code{prior.df} as the prior degrees of
freedom on which this guess is based.
The usual noninformative prior for the normal regression model
(\code{prior="jeffreys"}) is equivalent to the inverted
Wishart density with \code{prior.df} equal to 0 and
\code{prior.sscp} equal to a matrix of 0's.
The ridge prior (\code{prior="ridge"}) is a special case of the
inverted Wishart (Schafer, 1997). The prior
guess for \code{sigma} is a diagonal matrix with diagonal elements
estimated by regressing the observed values in each column of
\code{y} on the corresponding rows of \code{x}. When
\code{prior="ridge"}, the user must supply a value for
\code{prior.df}, which
determines how strongly the estimated correlations are smoothed
toward zero.
If the first argument to \code{mcmcNorm} is an object of class
\code{"norm"}, then the parameter values stored in that object will
automatically be used as starting values.
For details of the MCMC algorithm, see the manual distributed
with the NORM package in the subdirectory \code{doc}.
}
\value{
a list whose
\code{class} attribute has been set to \code{"norm"}.
This object may be
passed as the first argument in subsequent calls to \code{emNorm},
\code{\link{mcmcNorm}}, \code{\link{impNorm}},
\code{\link{loglikNorm}} or \code{\link{logpostNorm}}. The
object also carries the original data and specifies the prior
distribution, so that these do not need to be provided again.
\cr
To see a summary of
this object, use the generic function \code{summary},
which passes the object to \code{\link{summary.norm}}.
\cr
Components of the list may also be directly accessed
and examined by the user. Components which may be of interest
include:
\item{iter}{number of MCMC iterations performed.}
\item{param}{a list with elements \code{beta} and \code{sigma}
containing the estimated parameters after the final iteration of
MCMC. This may be supplied as starting values to \code{emNorm} or
\code{\link{mcmcNorm}}, or as an argument to \code{\link{impNorm}},
\code{\link{loglikNorm}} or \code{\link{logpostNorm}}.}
\item{loglik}{a numeric vector of length \code{iter} reporting the
logarithm of the observed-data likelihood function at the start of
each iteration.}
\item{logpost}{a numeric vector of length \code{iter} reporting the
logarithm of the observed-data posterior density function at the start of
each iteration.}
\item{series.worst}{a time-series object (class \code{"ts"}) which
contains the simulated values of the worst linear function of the
parameters from all iterations. This will be present if the
starting values provided to \code{mcmcNorm} were close enough to the
mode to provide a reliable estimate of the worst linear function.
The dependence in this series tends to be higher than for
other parameters, so examining the dependence by plotting the series
with \code{\link{plot}} or its autocorrelation function with
\code{\link{acf}} may help the user to judge how quickly the Markov
chain achieves stationarity. For the definition of the worst linear
function, see the manual accompanying the NORM package
in the subdirectory \code{doc}.}
\item{series.beta}{a multivariate time-series object (class
\code{"ts"}) which
contains the simulated values of the coefficients \code{beta}
from all iterations. This will present if \code{save.all.series=TRUE}.}
\item{series.sigma}{a multivariate time-series object (class
\code{"ts"}) which
contains the simulated values of the variances and
covariances (elements of the lower triangle of \code{sigma})
from all iterations. This will be present if \code{save.all.series=TRUE}.}
\item{imp.list}{a list containing the multiple imputations. Each
component of this list is a data matrix resembling \code{y}, but
with \code{NA}'s replaced by imputed values. The
length of the list depends on the values of \code{iter} and
\code{impute.every}.}
\item{miss.patt}{logical matrix with \code{ncol(y)} columns
reporting the missingness patterns seen in \code{y}. Each row of
\code{miss.patt} corresponds to a distinct missingness pattern, with
\code{TRUE} indicating that the variable is missing and
\code{FALSE} indicating that the variable is observed.}
\item{miss.patt.freq}{integer vector of length
\code{nrow(miss.patt)} indicating, for each missingness pattern, the
number of cases or rows of \code{y} having that pattern.}
\item{which.patt}{integer vector of length \code{nrow(y)} indicating
the missingness pattern for each
row of \code{y}. Thus \code{is.na( y[i,] )} is the same thing as
\code{miss.patt[ which.patt[i], ]}.}
}
\references{
Schafer, J.L. (1997) \emph{Analysis of Incomplete Multivariate
Data}. London: Chapman & Hall/CRC Press.
\cr
For more information about this function and other functions in
the NORM package, see \emph{User's Guide for \code{norm2}}
in the library subdirectory \code{doc}.
}
\author{Joe Schafer \email{Joseph.L.Schafer@census.gov} }
\seealso{\code{\link{emNorm}}, \code{\link{summary.norm}},
\code{\link{impNorm}},
\code{\link{loglikNorm}},
\code{\link{logpostNorm}}
}
\examples{
## run EM for marijuana data with ridge prior
data(marijuana)
emResult <- emNorm(marijuana, prior="ridge", prior.df=0.5)
## run MCMC for 5,000 iterations starting from the
## posterior mode using the same prior
mcmcResult <- mcmcNorm(emResult, iter=5000)
## summarize and plot worst linear function
summary(mcmcResult)
plot(mcmcResult$series.worst)
acf(mcmcResult$series.worst, lag.max=50)
## generate 25 multiple imputations, taking
## 100 steps between imputations, and look st
## the first imputed dataset
mcmcResult <- mcmcNorm(emResult, iter=2500, impute.every=100)
mcmcResult$imp.list[[1]]
}
\keyword{ multivariate }
\keyword{ NA }
|
rankall <- function(outcome, num = 1){
library(dplyr)
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
data <- arrange(data, State)
state <- unique(data$State)
if (num != "worst"){
hospital <- c()
for (i in state){
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
data <- filter(data, State == i)
if (outcome == "heart attack"){
data[,11] <- as.numeric(data[,11])
data <- arrange(data, data[,11], data[,2])
}
if (outcome == "heart failure"){
data[,17] <- as.numeric(data[,17])
data <- arrange(data, data[,17], data[,2])
}
if (outcome == "pneumonia"){
data[,23] <- as.numeric(data[,23])
data <- arrange(data, data[,23], data[,2])
}
hospital <- c(hospital, data[num,2])
}
return(as.data.frame(cbind(hospital, state)))
}
if (num == "worst"){
hospital <- c()
for (i in state){
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
data <- filter(data, State == i)
if (outcome == "heart attack"){
worst_rate <- max(as.numeric(data[,11]), na.rm = TRUE)
location <- which(as.numeric(data[,11]) == worst_rate)
}
if (outcome == "heart failure"){
worst_rate <- max(as.numeric(data[,17]), na.rm = TRUE)
location <- which(as.numeric(data[,17]) == worst_rate)
}
if (outcome == "pneumonia"){
worst_rate <- max(as.numeric(data[,23]), na.rm = TRUE)
location <- which(as.numeric(data[,23]) == worst_rate)
}
names <- data[,2][location]
hospital <- c(hospital, max(names))
}
return(as.data.frame(cbind(hospital, state)))
}
} | /script/rankall.R | no_license | JonathanRyanW/R_Programming_Quiz4 | R | false | false | 1,787 | r | rankall <- function(outcome, num = 1){
library(dplyr)
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
data <- arrange(data, State)
state <- unique(data$State)
if (num != "worst"){
hospital <- c()
for (i in state){
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
data <- filter(data, State == i)
if (outcome == "heart attack"){
data[,11] <- as.numeric(data[,11])
data <- arrange(data, data[,11], data[,2])
}
if (outcome == "heart failure"){
data[,17] <- as.numeric(data[,17])
data <- arrange(data, data[,17], data[,2])
}
if (outcome == "pneumonia"){
data[,23] <- as.numeric(data[,23])
data <- arrange(data, data[,23], data[,2])
}
hospital <- c(hospital, data[num,2])
}
return(as.data.frame(cbind(hospital, state)))
}
if (num == "worst"){
hospital <- c()
for (i in state){
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
data <- filter(data, State == i)
if (outcome == "heart attack"){
worst_rate <- max(as.numeric(data[,11]), na.rm = TRUE)
location <- which(as.numeric(data[,11]) == worst_rate)
}
if (outcome == "heart failure"){
worst_rate <- max(as.numeric(data[,17]), na.rm = TRUE)
location <- which(as.numeric(data[,17]) == worst_rate)
}
if (outcome == "pneumonia"){
worst_rate <- max(as.numeric(data[,23]), na.rm = TRUE)
location <- which(as.numeric(data[,23]) == worst_rate)
}
names <- data[,2][location]
hospital <- c(hospital, max(names))
}
return(as.data.frame(cbind(hospital, state)))
}
} |
# Function: maxdepth
#
# This function takes a cryostratigraphic dataset and returns the maximum depth for each unique borehole
#
# data: Cryostratigraphic dataset with top and bottom depth values for each interval
#
# Output: This function outputs the input dataframe with a "maxdepth" column indicating the maximum depth of the borehole
maxdepth <- function(data) {
data <- data[order(data$Bottom.depth),] #Make sure the depths are in ascending order
nrow <- nrow(data)
maxdepth <- data$Bottom.depth[nrow]
data$maxdepth <- maxdepth
return(data)
} | /src/maxdepth.R | permissive | arianecast/ExcessIceBetaRegression | R | false | false | 573 | r | # Function: maxdepth
#
# This function takes a cryostratigraphic dataset and returns the maximum depth for each unique borehole
#
# data: Cryostratigraphic dataset with top and bottom depth values for each interval
#
# Output: This function outputs the input dataframe with a "maxdepth" column indicating the maximum depth of the borehole
maxdepth <- function(data) {
data <- data[order(data$Bottom.depth),] #Make sure the depths are in ascending order
nrow <- nrow(data)
maxdepth <- data$Bottom.depth[nrow]
data$maxdepth <- maxdepth
return(data)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AuxDelaunay.R
\name{rel.verts.tri}
\alias{rel.verts.tri}
\title{The indices of the vertex regions in a triangle
that contains the points in a give data set}
\usage{
rel.verts.tri(Xp, tri, M)
}
\arguments{
\item{Xp}{A set of 2D points representing the set of data points
for which indices of the vertex regions containing them are to be determined.}
\item{tri}{A \eqn{3 \times 2} matrix
with each row representing a vertex of the triangle.}
\item{M}{A 2D point in Cartesian coordinates
or a 3D point in barycentric coordinates
which serves as a center in the interior of the triangle \code{tri}
or the circumcenter of \code{tri}.}
}
\value{
A \code{list} with two elements
\item{rv}{Indices of the vertices
whose regions contains points in \code{Xp}.}
\item{tri}{The vertices of the triangle,
where row number corresponds to the vertex index in \code{rv}.}
}
\description{
Returns the indices of the vertices
whose regions contain the points in data set \code{Xp} in
a triangle \code{tri}\eqn{=T(A,B,C)}.
Vertex regions are based on center \eqn{M=(m_1,m_2)}
in Cartesian coordinates or \eqn{M=(\alpha,\beta,\gamma)}
in barycentric coordinates in the interior of the triangle
to the edges on the extension of the lines joining \code{M} to the vertices
or based on the circumcenter of \code{tri}.
Vertices of triangle \code{tri} are labeled as \eqn{1,2,3}
according to the row number the vertex is recorded.
If a point in \code{Xp} is not inside \code{tri},
then the function yields \code{NA} as output for that entry.
The corresponding vertex region is the polygon with the vertex, \code{M}, and
projection points from \code{M} to the edges crossing the vertex
(as the output of \code{prj.cent2edges(Tr,M)})
or \eqn{CC}-vertex region
(see the examples for an illustration).
See also (\insertCite{ceyhan:Phd-thesis,ceyhan:dom-num-NPE-Spat2011,ceyhan:comp-geo-2010,ceyhan:mcap2012;textual}{pcds}).
}
\examples{
\dontrun{
A<-c(1,1); B<-c(2,0); C<-c(1.5,2);
Tr<-rbind(A,B,C);
M<-c(1.6,1.0)
P<-c(.4,.2)
rel.verts.tri(P,Tr,M)
n<-20 #try also n<-40
set.seed(1)
Xp<-runif.tri(n,Tr)$g
M<-as.numeric(runif.tri(1,Tr)$g) #try also #M<-c(1.6,1.0)
rel.verts.tri(Xp,Tr,M)
rel.verts.tri(rbind(Xp,c(2,2)),Tr,M)
rv<-rel.verts.tri(Xp,Tr,M)
rv
ifelse(identical(M,circumcenter.tri(Tr)),
Ds<-rbind((B+C)/2,(A+C)/2,(A+B)/2),Ds<-prj.cent2edges(Tr,M))
Xlim<-range(Tr[,1],M[1],Xp[,1])
Ylim<-range(Tr[,2],M[2],Xp[,2])
xd<-Xlim[2]-Xlim[1]
yd<-Ylim[2]-Ylim[1]
if (dimension(M)==3) {M<-bary2cart(M,Tr)}
#need to run this when M is given in barycentric coordinates
plot(Tr,pch=".",xlab="",ylab="",
main="Scatterplot of data points \n and M-vertex regions in a triangle",
axes=TRUE,xlim=Xlim+xd*c(-.05,.05),ylim=Ylim+yd*c(-.05,.05))
polygon(Tr)
points(Xp,pch=".",col=1)
L<-rbind(M,M,M); R<-Ds
segments(L[,1], L[,2], R[,1], R[,2], lty = 2)
xc<-Tr[,1]
yc<-Tr[,2]
txt.str<-c("rv=1","rv=2","rv=3")
text(xc,yc,txt.str)
txt<-rbind(M,Ds)
xc<-txt[,1]+c(.02,.04,-.03,0)
yc<-txt[,2]+c(.07,.04,.05,-.07)
txt.str<-c("M","D1","D2","D3")
text(xc,yc,txt.str)
text(Xp,labels=factor(rv$rv))
}
}
\references{
\insertAllCited{}
}
\seealso{
\code{\link{rel.verts.triCM}}, \code{\link{rel.verts.triCC}},
and \code{\link{rel.verts.tri.nondegPE}}
}
\author{
Elvan Ceyhan
}
| /man/rel.verts.tri.Rd | no_license | elvanceyhan/pcds | R | false | true | 3,314 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AuxDelaunay.R
\name{rel.verts.tri}
\alias{rel.verts.tri}
\title{The indices of the vertex regions in a triangle
that contains the points in a give data set}
\usage{
rel.verts.tri(Xp, tri, M)
}
\arguments{
\item{Xp}{A set of 2D points representing the set of data points
for which indices of the vertex regions containing them are to be determined.}
\item{tri}{A \eqn{3 \times 2} matrix
with each row representing a vertex of the triangle.}
\item{M}{A 2D point in Cartesian coordinates
or a 3D point in barycentric coordinates
which serves as a center in the interior of the triangle \code{tri}
or the circumcenter of \code{tri}.}
}
\value{
A \code{list} with two elements
\item{rv}{Indices of the vertices
whose regions contains points in \code{Xp}.}
\item{tri}{The vertices of the triangle,
where row number corresponds to the vertex index in \code{rv}.}
}
\description{
Returns the indices of the vertices
whose regions contain the points in data set \code{Xp} in
a triangle \code{tri}\eqn{=T(A,B,C)}.
Vertex regions are based on center \eqn{M=(m_1,m_2)}
in Cartesian coordinates or \eqn{M=(\alpha,\beta,\gamma)}
in barycentric coordinates in the interior of the triangle
to the edges on the extension of the lines joining \code{M} to the vertices
or based on the circumcenter of \code{tri}.
Vertices of triangle \code{tri} are labeled as \eqn{1,2,3}
according to the row number the vertex is recorded.
If a point in \code{Xp} is not inside \code{tri},
then the function yields \code{NA} as output for that entry.
The corresponding vertex region is the polygon with the vertex, \code{M}, and
projection points from \code{M} to the edges crossing the vertex
(as the output of \code{prj.cent2edges(Tr,M)})
or \eqn{CC}-vertex region
(see the examples for an illustration).
See also (\insertCite{ceyhan:Phd-thesis,ceyhan:dom-num-NPE-Spat2011,ceyhan:comp-geo-2010,ceyhan:mcap2012;textual}{pcds}).
}
\examples{
\dontrun{
A<-c(1,1); B<-c(2,0); C<-c(1.5,2);
Tr<-rbind(A,B,C);
M<-c(1.6,1.0)
P<-c(.4,.2)
rel.verts.tri(P,Tr,M)
n<-20 #try also n<-40
set.seed(1)
Xp<-runif.tri(n,Tr)$g
M<-as.numeric(runif.tri(1,Tr)$g) #try also #M<-c(1.6,1.0)
rel.verts.tri(Xp,Tr,M)
rel.verts.tri(rbind(Xp,c(2,2)),Tr,M)
rv<-rel.verts.tri(Xp,Tr,M)
rv
ifelse(identical(M,circumcenter.tri(Tr)),
Ds<-rbind((B+C)/2,(A+C)/2,(A+B)/2),Ds<-prj.cent2edges(Tr,M))
Xlim<-range(Tr[,1],M[1],Xp[,1])
Ylim<-range(Tr[,2],M[2],Xp[,2])
xd<-Xlim[2]-Xlim[1]
yd<-Ylim[2]-Ylim[1]
if (dimension(M)==3) {M<-bary2cart(M,Tr)}
#need to run this when M is given in barycentric coordinates
plot(Tr,pch=".",xlab="",ylab="",
main="Scatterplot of data points \n and M-vertex regions in a triangle",
axes=TRUE,xlim=Xlim+xd*c(-.05,.05),ylim=Ylim+yd*c(-.05,.05))
polygon(Tr)
points(Xp,pch=".",col=1)
L<-rbind(M,M,M); R<-Ds
segments(L[,1], L[,2], R[,1], R[,2], lty = 2)
xc<-Tr[,1]
yc<-Tr[,2]
txt.str<-c("rv=1","rv=2","rv=3")
text(xc,yc,txt.str)
txt<-rbind(M,Ds)
xc<-txt[,1]+c(.02,.04,-.03,0)
yc<-txt[,2]+c(.07,.04,.05,-.07)
txt.str<-c("M","D1","D2","D3")
text(xc,yc,txt.str)
text(Xp,labels=factor(rv$rv))
}
}
\references{
\insertAllCited{}
}
\seealso{
\code{\link{rel.verts.triCM}}, \code{\link{rel.verts.triCC}},
and \code{\link{rel.verts.tri.nondegPE}}
}
\author{
Elvan Ceyhan
}
|
####################################################################
#' Download Historical Currency Exchange Rate
#'
#' This function lets the user download historical currency exchange
#' rate between two currencies
#'
#' @family Currency
#' @param currency_pair Character. Which currency exchange do you
#' wish to get the history from? i.e, USD/COP, EUR/USD...
#' @param from Date. From date
#' @param to Date. To date
#' @param fill Boolean. Fill weekends and non-quoted dates with
#' previous values?
#' @examples
#' \dontrun{
#' # For today (or any one single date)
#' get_currency("USD/ARS", from = Sys.Date())
#' # For multiple dates
#' get_currency("EUR/USD", from = Sys.Date() - 7, fill = TRUE)
#' }
#' @export
get_currency <- function(currency_pair,
from = Sys.Date() - 99,
to = Sys.Date(),
fill = FALSE) {
try_require("quantmod")
string <- paste0(toupper(cleanText(currency_pair)), "=X")
if (is.na(from) | is.na(to))
stop("You must insert a valid date")
from <- as.Date(from)
to <- as.Date(to)
if (from == to) to <- from + 1
if (to > Sys.Date()) to <- Sys.Date()
if (Sys.Date() == from) {
x <- suppressWarnings(getQuote(string, auto.assign = FALSE))
rownames(x) <- Sys.Date()
x[,1] <- NULL
} else {
x <- data.frame(suppressWarnings(getSymbols(
string,
env = NULL,
from = from, to = to,
src = "yahoo")))
if (substr(rownames(x),1,1)[1] == "X") {
x <- x[1,]
rownames(x) <- Sys.Date()
}
}
rate <- data.frame(date = as.Date(rownames(x)), rate = x[,1])
if (fill) {
rate <- data.frame(date = as.character(
as.Date(as.Date(from):Sys.Date(), origin = "1970-01-01"))) %>%
left_join(rate %>% mutate(date = as.character(date)), "date") %>%
tidyr::fill(rate, .direction = "down") %>%
tidyr::fill(rate, .direction = "up") %>%
mutate(date = as.Date(date)) %>%
filter(date >= as.Date(from))
}
return(rate)
}
| /R/currency.R | no_license | alexandereric995/lares | R | false | false | 2,050 | r | ####################################################################
#' Download Historical Currency Exchange Rate
#'
#' This function lets the user download historical currency exchange
#' rate between two currencies
#'
#' @family Currency
#' @param currency_pair Character. Which currency exchange do you
#' wish to get the history from? i.e, USD/COP, EUR/USD...
#' @param from Date. From date
#' @param to Date. To date
#' @param fill Boolean. Fill weekends and non-quoted dates with
#' previous values?
#' @examples
#' \dontrun{
#' # For today (or any one single date)
#' get_currency("USD/ARS", from = Sys.Date())
#' # For multiple dates
#' get_currency("EUR/USD", from = Sys.Date() - 7, fill = TRUE)
#' }
#' @export
get_currency <- function(currency_pair,
from = Sys.Date() - 99,
to = Sys.Date(),
fill = FALSE) {
try_require("quantmod")
string <- paste0(toupper(cleanText(currency_pair)), "=X")
if (is.na(from) | is.na(to))
stop("You must insert a valid date")
from <- as.Date(from)
to <- as.Date(to)
if (from == to) to <- from + 1
if (to > Sys.Date()) to <- Sys.Date()
if (Sys.Date() == from) {
x <- suppressWarnings(getQuote(string, auto.assign = FALSE))
rownames(x) <- Sys.Date()
x[,1] <- NULL
} else {
x <- data.frame(suppressWarnings(getSymbols(
string,
env = NULL,
from = from, to = to,
src = "yahoo")))
if (substr(rownames(x),1,1)[1] == "X") {
x <- x[1,]
rownames(x) <- Sys.Date()
}
}
rate <- data.frame(date = as.Date(rownames(x)), rate = x[,1])
if (fill) {
rate <- data.frame(date = as.character(
as.Date(as.Date(from):Sys.Date(), origin = "1970-01-01"))) %>%
left_join(rate %>% mutate(date = as.character(date)), "date") %>%
tidyr::fill(rate, .direction = "down") %>%
tidyr::fill(rate, .direction = "up") %>%
mutate(date = as.Date(date)) %>%
filter(date >= as.Date(from))
}
return(rate)
}
|
library(ArchR)
library(ggplot2)
library(tidyverse)
addArchRThreads(threads = 12)
setwd('/project2/gca/aselewa/heart_atlas_project/')
macs2 <- '/project2/gca/software/miniconda3/bin/macs2'
source('R/analysis_utils.R')
archr_project_path <- 'ArchR/ArchR_heart_latest_noAtrium/'
projHeart <- loadArchRProject(archr_project_path)
projHeart <- addGroupCoverages(ArchRProj = projHeart, groupBy = "CellTypes", force = T, maxCells = 10000)
projHeart <- addReproduciblePeakSet(ArchRProj = projHeart, groupBy = "CellTypes", pathToMacs2 = macs2, cutOff = 0.01, verbose = T)
projHeart <- addPeakMatrix(projHeart, force = T)
# cell-type specific peaks
markersPeaks <- getMarkerFeatures(ArchRProj = projHeart,
useMatrix = "PeakMatrix",
groupBy = "CellTypes",
bias = c("TSSEnrichment", "log10(nFrags)"))
saveRDS(markersPeaks, paste0(archr_project_path,'/PeakCalls/DA_markerPeaks.rds'))
markers <- getMarkers(markersPeaks, cutOff = "FDR <= 0.1 & Log2FC >= 0.1", returnGR = T)
saveRDS(markers, file = paste0(archr_project_path,'/PeakCalls/DA_MARKERS_FDRP_10_log2FC_0.rds'))
saveArchRProject(projHeart)
# Motif Enrichment
projHeart <- addMotifAnnotations(projHeart, name = "Motif")
projHeart <- addDeviationsMatrix(ArchRProj = projHeart, peakAnnotation = "Motif", force = T)
# Co-accessibility
satac <- addCoAccessibility(ArchRProj = satac, reducedDims = 'harmony', maxDist = 1e6)
# BigWigs by cell-type
getGroupBW(ArchRProj = satac, groupBy = "CellTypes")
saveArchRProject(satac)
| /R/peak_calling.R | no_license | sq-96/heart_atlas | R | false | false | 1,588 | r | library(ArchR)
library(ggplot2)
library(tidyverse)
addArchRThreads(threads = 12)
setwd('/project2/gca/aselewa/heart_atlas_project/')
macs2 <- '/project2/gca/software/miniconda3/bin/macs2'
source('R/analysis_utils.R')
archr_project_path <- 'ArchR/ArchR_heart_latest_noAtrium/'
projHeart <- loadArchRProject(archr_project_path)
projHeart <- addGroupCoverages(ArchRProj = projHeart, groupBy = "CellTypes", force = T, maxCells = 10000)
projHeart <- addReproduciblePeakSet(ArchRProj = projHeart, groupBy = "CellTypes", pathToMacs2 = macs2, cutOff = 0.01, verbose = T)
projHeart <- addPeakMatrix(projHeart, force = T)
# cell-type specific peaks
markersPeaks <- getMarkerFeatures(ArchRProj = projHeart,
useMatrix = "PeakMatrix",
groupBy = "CellTypes",
bias = c("TSSEnrichment", "log10(nFrags)"))
saveRDS(markersPeaks, paste0(archr_project_path,'/PeakCalls/DA_markerPeaks.rds'))
markers <- getMarkers(markersPeaks, cutOff = "FDR <= 0.1 & Log2FC >= 0.1", returnGR = T)
saveRDS(markers, file = paste0(archr_project_path,'/PeakCalls/DA_MARKERS_FDRP_10_log2FC_0.rds'))
saveArchRProject(projHeart)
# Motif Enrichment
projHeart <- addMotifAnnotations(projHeart, name = "Motif")
projHeart <- addDeviationsMatrix(ArchRProj = projHeart, peakAnnotation = "Motif", force = T)
# Co-accessibility
satac <- addCoAccessibility(ArchRProj = satac, reducedDims = 'harmony', maxDist = 1e6)
# BigWigs by cell-type
getGroupBW(ArchRProj = satac, groupBy = "CellTypes")
saveArchRProject(satac)
|
mydata <- InsectSprays
install.packages("ggplot2", dependencies = TRUE)
library(ggplot2)
mydata <- ggplot2::diamonds
mydata
mydata [mydata$carat > 0.50 & mydata$color=="E",]
mydata [mydata$carat > 0.50 | mydata$color=="E",]
mydata1 <- subset(mydata , color="E")
mydata1
mydata2 <- transform(mydata, carat= log(carat))
mydata2
| /week4.R | no_license | mervecaglarer/StatisticalComputing | R | false | false | 330 | r | mydata <- InsectSprays
install.packages("ggplot2", dependencies = TRUE)
library(ggplot2)
mydata <- ggplot2::diamonds
mydata
mydata [mydata$carat > 0.50 & mydata$color=="E",]
mydata [mydata$carat > 0.50 | mydata$color=="E",]
mydata1 <- subset(mydata , color="E")
mydata1
mydata2 <- transform(mydata, carat= log(carat))
mydata2
|
\name{pSdat1}
\alias{pSdat1}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
pSdat1(dat)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dat}{
%% ~~Describe \code{dat} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (dat)
{
dt = dtt = dat$d0
about = dat$about
titl = dat$titl
unit = dat$unit
pee = dat$p
ln = dat$ln
neyer = dat$neyer
tmu = dat$tmu
tsig = dat$tsig
M = dat$M
dm = dat$dm
ds = dat$ds
iseed = dat$iseed
rmzm = round(tmu, 4)
rmzs = round(tsig, 4)
if (iseed < 0) {
titl1 = substitute(paste(titl, ": (", mu[t], ", ", sigma[t],
") = (", rmzm, ", ", rmzs, "), ", delta[t], " = (",
dm, ", ", ds, ")", sep = ""))
}
else {
titl1 = substitute(paste(titl, ": (", mu[t], ", ", sigma[t],
") = (", rmzm, ", ", rmzs, "), ", delta[t], " = (",
dm, ", ", ds, "), ", i[seed], " = ", iseed, sep = ""))
}
if (length(pee) == 0)
pee = 0
x = dt$X
y = dt$Y
id = dt$ID
nid = length(id)
fini = 0
if (id[nid] == "III3")
fini = 1
if (fini == 1) {
dtt = dtt[-nid, ]
x = x[-nid]
y = y[-nid]
id = id[-nid]
nid = nid - 1
}
zee = tzee = x[1]
if (pee * (1 - pee) > 0 & fini == 1) {
yu = glmmle(dtt)
zee = yu$mu + qnorm(pee) * yu$sig
tzee = dat$tmu + qnorm(pee) * dat$tsig
}
if (M == 1)
about1 = expression(paste("{", mu[lo], ",", mu[hi], ",",
sigma[g], "|", n[11], ",", n[12], ",", n[2], ",",
n[3], "|p,", lambda, ",res}", sep = ""))
else about1 = expression(paste("{", mu[lo], ",", mu[hi],
",", sigma[g], "|", n[11], ",", n[12], ",", n[2], ",",
n[3], "|p,", lambda, ",res,M}", sep = ""))
ens = 1:nid
rd = which(y == 1)
gr = which(y == 0)
xtz = c(x, tzee, zee)
ylm = range(pretty(c(xtz, max(xtz, na.rm = T) + diff(range(xtz))/80),
n = 10))
lb = nid - 1
if (lb > 30)
lb = ceiling(lb/2)
if (nid == 1)
return()
if (nid > 1) {
par(mar = c(4, 4, 5, 2) + 0.1)
lnum = 2.3
if (!ln)
plot(c(ens, 1), c(x, zee), type = "n", xlab = "",
ylab = "", ylim = ylm, lab = c(lb, 5, 7))
else {
par(mar = c(4, 3, 5, 3) + 0.1)
plot(c(ens, 1), c(x, zee), type = "n", xlab = "",
ylab = "", ylim = ylm, yaxt = "n")
w7 = pretty(exp(x), n = 6)
axis(2, at = log(w7), lab = round(w7, 1), srt = 90,
tcl = -0.4, mgp = c(1, 0.5, 0))
w8 = pretty(x, n = 6)
axis(4, at = w8, lab = round(w8, 1), srt = 90, tcl = -0.4,
mgp = c(1, 0.5, 0))
mtext("Log Scale", side = 4, line = 1.6)
lnum = 1.8
}
mtext(paste("Test Level (", unit, ")", sep = ""), side = 2,
line = lnum)
mtext("Trial Number", side = 1, line = 2.2)
points(ens[rd], x[rd], pch = 25, cex = 0.7, bg = 4)
points(ens[gr], x[gr], pch = 24, cex = 0.7, bg = 3)
if (neyer)
g7 = addneyr(dtt, ylm, sim = T)
else g7 = add3pod(dtt, ylm, sim = T)
kp = g7[2]
mtext(titl1, side = 3, line = 3.4, cex = 1.2, col = 1)
mtext(about1, side = 3, line = 1.8, cex = 1.2)
mtext(about, side = 3, line = 0.5, cex = 1.2)
if (fini == 1) {
axis(4, label = F, at = dt$RX[nid + 1], tcl = 0.25,
lwd = 2)
axis(4, label = F, at = zee, tcl = -0.25, lwd = 2)
axis(4, label = F, at = tzee, tcl = -0.25, lwd = 2,
col = 8)
axis(4, label = F, at = tzee, tcl = 0.25, lwd = 2,
col = 8)
}
}
reset()
return()
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/pSdat1.Rd | no_license | Auburngrads/3pod | R | false | false | 4,859 | rd | \name{pSdat1}
\alias{pSdat1}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
pSdat1(dat)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dat}{
%% ~~Describe \code{dat} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (dat)
{
dt = dtt = dat$d0
about = dat$about
titl = dat$titl
unit = dat$unit
pee = dat$p
ln = dat$ln
neyer = dat$neyer
tmu = dat$tmu
tsig = dat$tsig
M = dat$M
dm = dat$dm
ds = dat$ds
iseed = dat$iseed
rmzm = round(tmu, 4)
rmzs = round(tsig, 4)
if (iseed < 0) {
titl1 = substitute(paste(titl, ": (", mu[t], ", ", sigma[t],
") = (", rmzm, ", ", rmzs, "), ", delta[t], " = (",
dm, ", ", ds, ")", sep = ""))
}
else {
titl1 = substitute(paste(titl, ": (", mu[t], ", ", sigma[t],
") = (", rmzm, ", ", rmzs, "), ", delta[t], " = (",
dm, ", ", ds, "), ", i[seed], " = ", iseed, sep = ""))
}
if (length(pee) == 0)
pee = 0
x = dt$X
y = dt$Y
id = dt$ID
nid = length(id)
fini = 0
if (id[nid] == "III3")
fini = 1
if (fini == 1) {
dtt = dtt[-nid, ]
x = x[-nid]
y = y[-nid]
id = id[-nid]
nid = nid - 1
}
zee = tzee = x[1]
if (pee * (1 - pee) > 0 & fini == 1) {
yu = glmmle(dtt)
zee = yu$mu + qnorm(pee) * yu$sig
tzee = dat$tmu + qnorm(pee) * dat$tsig
}
if (M == 1)
about1 = expression(paste("{", mu[lo], ",", mu[hi], ",",
sigma[g], "|", n[11], ",", n[12], ",", n[2], ",",
n[3], "|p,", lambda, ",res}", sep = ""))
else about1 = expression(paste("{", mu[lo], ",", mu[hi],
",", sigma[g], "|", n[11], ",", n[12], ",", n[2], ",",
n[3], "|p,", lambda, ",res,M}", sep = ""))
ens = 1:nid
rd = which(y == 1)
gr = which(y == 0)
xtz = c(x, tzee, zee)
ylm = range(pretty(c(xtz, max(xtz, na.rm = T) + diff(range(xtz))/80),
n = 10))
lb = nid - 1
if (lb > 30)
lb = ceiling(lb/2)
if (nid == 1)
return()
if (nid > 1) {
par(mar = c(4, 4, 5, 2) + 0.1)
lnum = 2.3
if (!ln)
plot(c(ens, 1), c(x, zee), type = "n", xlab = "",
ylab = "", ylim = ylm, lab = c(lb, 5, 7))
else {
par(mar = c(4, 3, 5, 3) + 0.1)
plot(c(ens, 1), c(x, zee), type = "n", xlab = "",
ylab = "", ylim = ylm, yaxt = "n")
w7 = pretty(exp(x), n = 6)
axis(2, at = log(w7), lab = round(w7, 1), srt = 90,
tcl = -0.4, mgp = c(1, 0.5, 0))
w8 = pretty(x, n = 6)
axis(4, at = w8, lab = round(w8, 1), srt = 90, tcl = -0.4,
mgp = c(1, 0.5, 0))
mtext("Log Scale", side = 4, line = 1.6)
lnum = 1.8
}
mtext(paste("Test Level (", unit, ")", sep = ""), side = 2,
line = lnum)
mtext("Trial Number", side = 1, line = 2.2)
points(ens[rd], x[rd], pch = 25, cex = 0.7, bg = 4)
points(ens[gr], x[gr], pch = 24, cex = 0.7, bg = 3)
if (neyer)
g7 = addneyr(dtt, ylm, sim = T)
else g7 = add3pod(dtt, ylm, sim = T)
kp = g7[2]
mtext(titl1, side = 3, line = 3.4, cex = 1.2, col = 1)
mtext(about1, side = 3, line = 1.8, cex = 1.2)
mtext(about, side = 3, line = 0.5, cex = 1.2)
if (fini == 1) {
axis(4, label = F, at = dt$RX[nid + 1], tcl = 0.25,
lwd = 2)
axis(4, label = F, at = zee, tcl = -0.25, lwd = 2)
axis(4, label = F, at = tzee, tcl = -0.25, lwd = 2,
col = 8)
axis(4, label = F, at = tzee, tcl = 0.25, lwd = 2,
col = 8)
}
}
reset()
return()
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
#' Run simulation wrapper
#' @export
do_sim <- function(
user = NULL,
input = list(),
userData = NULL,
userDetails = NULL,
jobscheduler = FALSE,
description = "No description",
nPatients = 1,
regimen = NULL,
memsFile = NULL,
drugNames = c()) {
if(!is.null(userData)) {
cat("Saving results to database!!")
}
## read in templates and define some constants
therapy <- regimen
adherence <- tb_read_init("adh2.txt")
immune <- tb_read_init("Immune.txt")
if(!is.null(input$id)) {
message("Created random string!")
id <- TBsim::random_string()
} else {
if(!is.null(input$id)) {
id <- input$id
} else {
id <- TBsim::random_string()
warning("No run ID specified, created random id.")
}
}
if(!is.null(userDetails)) {
user <- userDetails$emails$value[1]
folder <- TBsim::new_tempdir(user = user, id = id)
} else {
if(is.null(user)) {
stop("No userID specified!")
}
folder <- TBsim::new_tempdir(user = user, id = id)
}
suffix <- "Single"
if(nPatients > 1) {
suffix <- "Pop"
}
output_data <- ifelse(nPatients > 1 && input$isQuickSim, 0, 1)
is_bootstrap <- ifelse(input$isBootstrap, 1, 0)
settings <- list(
isBootstrap = is_bootstrap, # not sure what this is...
isImmuneKill = ifelse(input[[paste0("isImmuneKill",suffix)]] == "Yes", 1, 0),
isDrugEffect = 1,
isResistance = ifelse(input[[paste0("isResistance",suffix)]] == "Yes", 1, 0),
isClearResist = input$isClearResist * 1, # checkbox
isGradualDiffusion = input$isGradualDiffusion * 1,
isAdherence = 1,
isGranuloma = input$isGranuloma * 1,
isGranImmuneKill = input$isGranImmuneKill * 1,
isGranulomaInfec = input$isGranulomaInfec * 1,
isSaveAdhDose = output_data,
isSaveConc = output_data,
isSaveConcKill = output_data,
isSaveImmune = output_data,
isSaveMacro = output_data,
isSaveBact = output_data,
isSaveBactRes = output_data,
isSaveEffect = output_data,
isSavePatientResults = 0
)
text_inputs <- c( # or sliders
"bactThreshold", "bactThresholdRes", "growthLimit", "resistanceRatio",
"resistanceFitness", "isPersistance", "persistTime",
"freeBactLevel", "latentBactLevel", "infI", "infII", "infIII", "infIV",
"immuneMean", "initialValueStdv", "parameterStdv", "timeStepStdv", "immuneStdv",
"adherenceSwitchDay", "nTime",
"adherenceMean", "therapyStart", "nPopulations")
for(i in seq(text_inputs)) {
settings[[text_inputs[i]]] <- input[[text_inputs[i]]]
}
settings$adherenceType1 <- 9
settings$adherenceType2 <- 9
settings$adherenceMEMS <- 0
if(input$adherenceType == "Random draw") {
settings$adherenceType1 <- 0
settings$adherenceType2 <- 0
}
if(input$adherenceType == "Switched") {
if(input$adherenceType1 == "Random draw") {
settings$adherenceType1 <- 0
}
if(input$adherenceType2 == "Random draw") {
settings$adherenceType2 <- 0
}
}
if(input$adherenceType == "MEMS") {
settings$adherenceType1 <- 0
settings$adherenceType2 <- 0
settings$adherenceMEMS <- 1
}
Stdv <- list(
initialValueStdv = settings$initialValueStdv,
parameterStdv = settings$parameterStdv,
timeStepStdv = 0.05,
immuneStdv = settings$immuneStdv
)
drugVariability <- 1
seed <- input$simSeed
if(nPatients == 1) seed <- NULL
if(nPatients == 1 && input$patientTypeSingle == "Typical") {
for(key in names(Stdv)) { Stdv[[key]] <- 0 }
drugVariability <- 0
}
## get drug definitions
drugDefinitions <- reload_all_drug_definitions(drugNames, user)
immune <- tb_read_init("Immune.txt")
#print(immune)
sim1 <- TBsim::tb_new_sim(
folder = folder,
id = id,
user = user,
description = description,
therapy = therapy,
adherence = adherence,
immune=immune,
drugs = drugDefinitions,
memsFile = memsFile,
nPatients = nPatients,
therapyStart = as.numeric(as.character(settings$therapyStart)),
drugVariability = drugVariability,
nTime = settings$nTime, # TBsim::max_time_from_therapy(therapy) + as.num(settings$therapyStart),
isBootstrap = settings$isBootstrap, # not sure what this is...
isImmuneKill = settings$isImmuneKill,
isDrugEffect = settings$isDrugEffect,
isResistance = settings$isResistance,
isPersistance = settings$isPersistance,
persistTime = settings$persistTime,
bactThreshold = settings$bactThreshold,
bactThresholdRes = settings$bactThresholdRes,
growthLimit = settings$growthLimit,
isClearResist = settings$isClearResist,
resistanceRatio = settings$resistanceRatio,
resistanceFitness = settings$resistanceFitness,
freeBactLevel = settings$freeBactLevel,
latentBactLevel = settings$latentBactLevel,
infI = settings$infI,
infII = settings$infII,
infIII = settings$infIII,
infIV = settings$infIV,
immuneMean = settings$immuneMean,
isGradualDiffusion = settings$isGradualDiffusion,
isGranuloma = settings$isGranuloma,
isGranImmuneKill = settings$isGranImmuneKill,
isGranulomaInfec = settings$isGranulomaInfec,
initialValueStdv = Stdv$initialValueStdv,
parameterStdv = Stdv$parameterStdv,
# timeStepStdv = settings$timeStepStdv,
immuneStdv = Stdv$immuneStdv,
isAdherence = settings$isAdherence,
adherenceType1 = settings$adherenceType1,
adherenceType2 = settings$adherenceType2,
adherenceSwitchDay = as.numeric(settings$adherenceSwitchDay) + as.numeric(as.character(settings$therapyStart)),
adherenceMean = settings$adherenceMean,
adherenceStdv = input$adherenceStdv,
adherenceStdvDay = input$adherenceStdvDay,
adherenceMEMS = settings$adherenceMEMS,
nPopulations = settings$nPopulations,
nIterations = input$nIterations,
isSaveAdhDose = settings$isSaveAdhDose,
isSaveConc = settings$isSaveConc,
isSaveConcKill = settings$isSaveConcKill,
isSaveImmune = settings$isSaveImmune,
isSaveMacro = settings$isSaveMacro,
isSaveBact = settings$isSaveBact,
isSaveBactRes = settings$isSaveBactRes,
isSaveEffect = settings$isSaveEffect,
isSavePopulationResults = 1,
isSavePatientResults = settings$isSavePatientResults,
adherenceType1 = 9,
adherenceType2 = 9,
seed = seed)
## Start the simulation based on the given definitions
res <- TBsim::tb_run_sim (sim1, jobscheduler = jobscheduler,queue="all.q")
res$id <- id
return(res)
}
| /tbsim_app/R/do_sim.R | no_license | saviclab/TBsim | R | false | false | 6,508 | r |
#' Run simulation wrapper
#' @export
do_sim <- function(
user = NULL,
input = list(),
userData = NULL,
userDetails = NULL,
jobscheduler = FALSE,
description = "No description",
nPatients = 1,
regimen = NULL,
memsFile = NULL,
drugNames = c()) {
if(!is.null(userData)) {
cat("Saving results to database!!")
}
## read in templates and define some constants
therapy <- regimen
adherence <- tb_read_init("adh2.txt")
immune <- tb_read_init("Immune.txt")
if(!is.null(input$id)) {
message("Created random string!")
id <- TBsim::random_string()
} else {
if(!is.null(input$id)) {
id <- input$id
} else {
id <- TBsim::random_string()
warning("No run ID specified, created random id.")
}
}
if(!is.null(userDetails)) {
user <- userDetails$emails$value[1]
folder <- TBsim::new_tempdir(user = user, id = id)
} else {
if(is.null(user)) {
stop("No userID specified!")
}
folder <- TBsim::new_tempdir(user = user, id = id)
}
suffix <- "Single"
if(nPatients > 1) {
suffix <- "Pop"
}
output_data <- ifelse(nPatients > 1 && input$isQuickSim, 0, 1)
is_bootstrap <- ifelse(input$isBootstrap, 1, 0)
settings <- list(
isBootstrap = is_bootstrap, # not sure what this is...
isImmuneKill = ifelse(input[[paste0("isImmuneKill",suffix)]] == "Yes", 1, 0),
isDrugEffect = 1,
isResistance = ifelse(input[[paste0("isResistance",suffix)]] == "Yes", 1, 0),
isClearResist = input$isClearResist * 1, # checkbox
isGradualDiffusion = input$isGradualDiffusion * 1,
isAdherence = 1,
isGranuloma = input$isGranuloma * 1,
isGranImmuneKill = input$isGranImmuneKill * 1,
isGranulomaInfec = input$isGranulomaInfec * 1,
isSaveAdhDose = output_data,
isSaveConc = output_data,
isSaveConcKill = output_data,
isSaveImmune = output_data,
isSaveMacro = output_data,
isSaveBact = output_data,
isSaveBactRes = output_data,
isSaveEffect = output_data,
isSavePatientResults = 0
)
text_inputs <- c( # or sliders
"bactThreshold", "bactThresholdRes", "growthLimit", "resistanceRatio",
"resistanceFitness", "isPersistance", "persistTime",
"freeBactLevel", "latentBactLevel", "infI", "infII", "infIII", "infIV",
"immuneMean", "initialValueStdv", "parameterStdv", "timeStepStdv", "immuneStdv",
"adherenceSwitchDay", "nTime",
"adherenceMean", "therapyStart", "nPopulations")
for(i in seq(text_inputs)) {
settings[[text_inputs[i]]] <- input[[text_inputs[i]]]
}
settings$adherenceType1 <- 9
settings$adherenceType2 <- 9
settings$adherenceMEMS <- 0
if(input$adherenceType == "Random draw") {
settings$adherenceType1 <- 0
settings$adherenceType2 <- 0
}
if(input$adherenceType == "Switched") {
if(input$adherenceType1 == "Random draw") {
settings$adherenceType1 <- 0
}
if(input$adherenceType2 == "Random draw") {
settings$adherenceType2 <- 0
}
}
if(input$adherenceType == "MEMS") {
settings$adherenceType1 <- 0
settings$adherenceType2 <- 0
settings$adherenceMEMS <- 1
}
Stdv <- list(
initialValueStdv = settings$initialValueStdv,
parameterStdv = settings$parameterStdv,
timeStepStdv = 0.05,
immuneStdv = settings$immuneStdv
)
drugVariability <- 1
seed <- input$simSeed
if(nPatients == 1) seed <- NULL
if(nPatients == 1 && input$patientTypeSingle == "Typical") {
for(key in names(Stdv)) { Stdv[[key]] <- 0 }
drugVariability <- 0
}
## get drug definitions
drugDefinitions <- reload_all_drug_definitions(drugNames, user)
immune <- tb_read_init("Immune.txt")
#print(immune)
sim1 <- TBsim::tb_new_sim(
folder = folder,
id = id,
user = user,
description = description,
therapy = therapy,
adherence = adherence,
immune=immune,
drugs = drugDefinitions,
memsFile = memsFile,
nPatients = nPatients,
therapyStart = as.numeric(as.character(settings$therapyStart)),
drugVariability = drugVariability,
nTime = settings$nTime, # TBsim::max_time_from_therapy(therapy) + as.num(settings$therapyStart),
isBootstrap = settings$isBootstrap, # not sure what this is...
isImmuneKill = settings$isImmuneKill,
isDrugEffect = settings$isDrugEffect,
isResistance = settings$isResistance,
isPersistance = settings$isPersistance,
persistTime = settings$persistTime,
bactThreshold = settings$bactThreshold,
bactThresholdRes = settings$bactThresholdRes,
growthLimit = settings$growthLimit,
isClearResist = settings$isClearResist,
resistanceRatio = settings$resistanceRatio,
resistanceFitness = settings$resistanceFitness,
freeBactLevel = settings$freeBactLevel,
latentBactLevel = settings$latentBactLevel,
infI = settings$infI,
infII = settings$infII,
infIII = settings$infIII,
infIV = settings$infIV,
immuneMean = settings$immuneMean,
isGradualDiffusion = settings$isGradualDiffusion,
isGranuloma = settings$isGranuloma,
isGranImmuneKill = settings$isGranImmuneKill,
isGranulomaInfec = settings$isGranulomaInfec,
initialValueStdv = Stdv$initialValueStdv,
parameterStdv = Stdv$parameterStdv,
# timeStepStdv = settings$timeStepStdv,
immuneStdv = Stdv$immuneStdv,
isAdherence = settings$isAdherence,
adherenceType1 = settings$adherenceType1,
adherenceType2 = settings$adherenceType2,
adherenceSwitchDay = as.numeric(settings$adherenceSwitchDay) + as.numeric(as.character(settings$therapyStart)),
adherenceMean = settings$adherenceMean,
adherenceStdv = input$adherenceStdv,
adherenceStdvDay = input$adherenceStdvDay,
adherenceMEMS = settings$adherenceMEMS,
nPopulations = settings$nPopulations,
nIterations = input$nIterations,
isSaveAdhDose = settings$isSaveAdhDose,
isSaveConc = settings$isSaveConc,
isSaveConcKill = settings$isSaveConcKill,
isSaveImmune = settings$isSaveImmune,
isSaveMacro = settings$isSaveMacro,
isSaveBact = settings$isSaveBact,
isSaveBactRes = settings$isSaveBactRes,
isSaveEffect = settings$isSaveEffect,
isSavePopulationResults = 1,
isSavePatientResults = settings$isSavePatientResults,
adherenceType1 = 9,
adherenceType2 = 9,
seed = seed)
## Start the simulation based on the given definitions
res <- TBsim::tb_run_sim (sim1, jobscheduler = jobscheduler,queue="all.q")
res$id <- id
return(res)
}
|
library("MASS")
getLdaError <- function(train, test){
lda.fit <- lda(crim ~ ., data = train)
lda.pred <- predict(lda.fit, test)
lda.error <- mean(lda.pred$class != test$crim)
return(lda.error)
}
getQdaError <- function(train, test){
qda.fit <- qda(crim ~ ., data = train)
qda.pred <- predict(qda.fit, test)
qda.error <- mean(qda.pred$class != test$crim)
return(qda.error)
}
getGlmError <- function(train, test){
glm.fit <- glm(crim ~ ., data = train, family = "binomial")
glm.probs <- predict(glm.fit, test)
glm.pred <- rep(times = nrow(test), 0)
glm.pred[glm.probs > 0.5] <- 1
glm.error <- mean(glm.pred != test$crim)
return(glm.error)
}
getKnnError <- function(k, train, test){
crim_ind <- which(colnames(train)=="crim")
knn.pred <- knn(scale(train[, -crim_ind]), scale(test[, -crim_ind]), train[, crim_ind], k=k)
knn.error <- mean(knn.pred != test$crim)
return(knn.error)
}
getMinError <- function(names){
set.seed(1)
allNames <- c("crim", names)
frame <- Boston[allNames]
frame$crim <- ifelse(Boston$crim > median(Boston$crim), 1, 0)
train_size <- floor(0.75 * nrow(frame))
train_ind <- sample.int(n = nrow(frame), size = train_size)
train <- frame[train_ind, ]
test <- frame[-train_ind, ]
lda.error <- getLdaError(train, test)
qda.error <- getQdaError(train, test)
glm.error <- getGlmError(train, test)
knn.errors <- unlist(lapply(1:5, getKnnError, train, test))
min.error <- min(c(lda.error, qda.error, glm.error, knn.errors))
return(min.error)
}
allColumns <- c("tax", "rad", "lstat", "nox", "indus")
bestCombination <- allColumns
bestError <- 1
for(subset_size in 1:length(allColumns)){
sets <- combn(x = allColumns, subset_size, simplify = FALSE)
errors <- unlist(lapply(sets, getMinError))
minIndex <- which(errors == min(errors))
if(min(errors) < bestError){
bestError <- min(errors)
bestCombination <- unlist(sets[minIndex])
}
}
print(bestCombination)
print(bestError)
| /classification/classification_lab_13.R | no_license | AnatoliiStepaniuk/ISLR | R | false | false | 1,980 | r | library("MASS")
getLdaError <- function(train, test){
lda.fit <- lda(crim ~ ., data = train)
lda.pred <- predict(lda.fit, test)
lda.error <- mean(lda.pred$class != test$crim)
return(lda.error)
}
getQdaError <- function(train, test){
qda.fit <- qda(crim ~ ., data = train)
qda.pred <- predict(qda.fit, test)
qda.error <- mean(qda.pred$class != test$crim)
return(qda.error)
}
getGlmError <- function(train, test){
glm.fit <- glm(crim ~ ., data = train, family = "binomial")
glm.probs <- predict(glm.fit, test)
glm.pred <- rep(times = nrow(test), 0)
glm.pred[glm.probs > 0.5] <- 1
glm.error <- mean(glm.pred != test$crim)
return(glm.error)
}
getKnnError <- function(k, train, test){
crim_ind <- which(colnames(train)=="crim")
knn.pred <- knn(scale(train[, -crim_ind]), scale(test[, -crim_ind]), train[, crim_ind], k=k)
knn.error <- mean(knn.pred != test$crim)
return(knn.error)
}
getMinError <- function(names){
set.seed(1)
allNames <- c("crim", names)
frame <- Boston[allNames]
frame$crim <- ifelse(Boston$crim > median(Boston$crim), 1, 0)
train_size <- floor(0.75 * nrow(frame))
train_ind <- sample.int(n = nrow(frame), size = train_size)
train <- frame[train_ind, ]
test <- frame[-train_ind, ]
lda.error <- getLdaError(train, test)
qda.error <- getQdaError(train, test)
glm.error <- getGlmError(train, test)
knn.errors <- unlist(lapply(1:5, getKnnError, train, test))
min.error <- min(c(lda.error, qda.error, glm.error, knn.errors))
return(min.error)
}
allColumns <- c("tax", "rad", "lstat", "nox", "indus")
bestCombination <- allColumns
bestError <- 1
for(subset_size in 1:length(allColumns)){
sets <- combn(x = allColumns, subset_size, simplify = FALSE)
errors <- unlist(lapply(sets, getMinError))
minIndex <- which(errors == min(errors))
if(min(errors) < bestError){
bestError <- min(errors)
bestCombination <- unlist(sets[minIndex])
}
}
print(bestCombination)
print(bestError)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/retain.R
\name{retain}
\alias{retain}
\title{Decides if a file should be retiained or removed based on its status.}
\usage{
retain(
meta_files,
make_decision = c("maxi", "mini", "unique"),
Status = "Status",
CellspML = "CellspML"
)
}
\arguments{
\item{meta_files}{dataframe from meta file that has been preprocessed by the
\code{\link{goodFcs}} function.}
\item{make_decision}{decision to be made should more than one
\eqn{cells/\mu L} be good.}
\item{Status}{column name in meta_files containing status obtained from the
\code{\link{goodFcs}} function.}
\item{CellspML}{column name in meta_files containing \eqn{cells/\mu L}
measurements.}
}
\value{
a character vector with entries "Retain" for a file to be retained
or "No!" for a file to be discarded.
}
\description{
Function to determine what files to retain and finally read
from the flow cytometer FCS file.
}
\details{
It is typically not known in advance which dilution level would
result in the desired \eqn{cells/\mu L}, therefore
the samples are ran through the flow cytometer at two or more
dilution levels. Out of these, one has to decide which
to retain and finally use for further analysis. This function and
\code{\link{goodFcs}} are to help you decide that.
If more than one of the dilution levels are judged good,
the option \emph{make_decision = "maxi"} will give "Retain" to the
row with the maximum \eqn{cells/\mu L} while the opposite occurs
for \emph{make_decision = "mini"}. \emph{make_decision = "unique"}
i there is only one measurement for that particular sample,
while \emph{make_decision = "maxi"}
and \emph{make_decision = "mini"} should be used for files with more
than one measurement for the sample in question.
}
\examples{
require("stringr")
metadata <- system.file("extdata", "2019-03-25_Rstarted.csv",
package = "cyanoFilter",
mustWork = TRUE)
metafile <- read.csv(metadata, skip = 7, stringsAsFactors = FALSE,
check.names = TRUE, encoding = "UTF-8")
metafile <- metafile[, seq_len(65)] #first 65 columns contain useful information
#extract the part of the Sample.ID that corresponds to BS4 or BS5
metafile$Sample.ID2 <- stringr::str_extract(metafile$Sample.ID, "BS*[4-5]")
#clean up the Cells.muL column
names(metafile)[which(stringr::str_detect(names(metafile), "Cells."))] <-
"CellspML"
metafile$Status <- cyanoFilter::goodFcs(metafile = metafile, col_cpml =
"CellspML",
mxd_cellpML = 1000, mnd_cellpML = 50)
metafile$Retained <- NULL
# first 3 rows contain BS4 measurements at 3 dilution levels
metafile$Retained[seq_len(3)] <-
cyanoFilter::retain(meta_files = metafile[seq_len(3),],
make_decision = "maxi",
Status = "Status", CellspML = "CellspML")
# last 3 rows contain BS5 measurements at 3 dilution levels as well
metafile$Retained[seq(4, 6, by = 1)] <-
cyanoFilter::retain(meta_files = metafile[seq(4, 6, by = 1),],
make_decision = "maxi",
Status = "Status", CellspML = "CellspML")
}
\seealso{
\code{\link{goodFcs}}
}
| /man/retain.Rd | no_license | fomotis/cyanoFilter | R | false | true | 3,214 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/retain.R
\name{retain}
\alias{retain}
\title{Decides if a file should be retiained or removed based on its status.}
\usage{
retain(
meta_files,
make_decision = c("maxi", "mini", "unique"),
Status = "Status",
CellspML = "CellspML"
)
}
\arguments{
\item{meta_files}{dataframe from meta file that has been preprocessed by the
\code{\link{goodFcs}} function.}
\item{make_decision}{decision to be made should more than one
\eqn{cells/\mu L} be good.}
\item{Status}{column name in meta_files containing status obtained from the
\code{\link{goodFcs}} function.}
\item{CellspML}{column name in meta_files containing \eqn{cells/\mu L}
measurements.}
}
\value{
a character vector with entries "Retain" for a file to be retained
or "No!" for a file to be discarded.
}
\description{
Function to determine what files to retain and finally read
from the flow cytometer FCS file.
}
\details{
It is typically not known in advance which dilution level would
result in the desired \eqn{cells/\mu L}, therefore
the samples are ran through the flow cytometer at two or more
dilution levels. Out of these, one has to decide which
to retain and finally use for further analysis. This function and
\code{\link{goodFcs}} are to help you decide that.
If more than one of the dilution levels are judged good,
the option \emph{make_decision = "maxi"} will give "Retain" to the
row with the maximum \eqn{cells/\mu L} while the opposite occurs
for \emph{make_decision = "mini"}. \emph{make_decision = "unique"}
i there is only one measurement for that particular sample,
while \emph{make_decision = "maxi"}
and \emph{make_decision = "mini"} should be used for files with more
than one measurement for the sample in question.
}
\examples{
require("stringr")
metadata <- system.file("extdata", "2019-03-25_Rstarted.csv",
package = "cyanoFilter",
mustWork = TRUE)
metafile <- read.csv(metadata, skip = 7, stringsAsFactors = FALSE,
check.names = TRUE, encoding = "UTF-8")
metafile <- metafile[, seq_len(65)] #first 65 columns contain useful information
#extract the part of the Sample.ID that corresponds to BS4 or BS5
metafile$Sample.ID2 <- stringr::str_extract(metafile$Sample.ID, "BS*[4-5]")
#clean up the Cells.muL column
names(metafile)[which(stringr::str_detect(names(metafile), "Cells."))] <-
"CellspML"
metafile$Status <- cyanoFilter::goodFcs(metafile = metafile, col_cpml =
"CellspML",
mxd_cellpML = 1000, mnd_cellpML = 50)
metafile$Retained <- NULL
# first 3 rows contain BS4 measurements at 3 dilution levels
metafile$Retained[seq_len(3)] <-
cyanoFilter::retain(meta_files = metafile[seq_len(3),],
make_decision = "maxi",
Status = "Status", CellspML = "CellspML")
# last 3 rows contain BS5 measurements at 3 dilution levels as well
metafile$Retained[seq(4, 6, by = 1)] <-
cyanoFilter::retain(meta_files = metafile[seq(4, 6, by = 1),],
make_decision = "maxi",
Status = "Status", CellspML = "CellspML")
}
\seealso{
\code{\link{goodFcs}}
}
|
mydata <- read.csv('2002to2016Annual_Summary.csv',header=TRUE,na.strings=c("","-",NA))
msubdata <- read.csv('msubdata.csv',row.names = 1)
mysubdata <- as.matrix(msubdata)
mset <- read.table('b.txt')
myperiod <- read.table('subperiod.txt')
data <- read.table('vitalchart.txt')
names(data)[2] <- "Region"
data$Region <- as.factor(data$Region)
# subdata <- subset(mydata[,c(1,16,18)])
# library(doBy)
# csubdata <- summaryBy(Num_Customers_Affected ~ Began_Year + Event_Type, data = subdata,
# FUN = function(x){c(mean(x))})
# names(csubdata)[3]<-"m.Num"
# msubdata <- reshape(csubdata, timevar = "Event_Type", idvar = "Began_Year", direction = "wide")
# attr(msubdata, "row.names") <- msubdata$Began_Year
# msubdata <- msubdata[,-1]
# write.csv(msubdata,"D:/mxs92/Documents/Innovizo/app/msubdata.csv")
# subdata2 <- subset(mydata[,c(1,12)])
# csubdata2 <- table(subdata2)
# csubdata2[,2] <- substr(csubdata2[,2], 1, 6)
# a <- csubdata2[1:15,]
# b <- as.matrix(a)
# colnames(b) <- substr(colnames(b),1,15)
# write.table(b,"D:/mxs92/Documents/Innovizo/app/b.txt")
# subperiod <- subset(mydata[,c(6:10,12)])
# write.table(subperiod,"D:/mxs92/Documents/Innovizo/app/subperiod.txt")
# vital <- subset(mydata[,c(1,14,17,18)])
# library(doBy)
# vitalg <- summaryBy(Num_Customers_Affected + Demand_Loss_MW ~ Began_Year + NERC_Region, data = vital,
# FUN = function(x){c(mean(x))})
# names(vitalg)[3] <- "CustomerAffected"
# names(vitalg)[4] <- "DemandLossMW"
# write.table(vitalg,"D:/mxs92/Documents/Innovizo/app/vitalchart.txt") | /Global.R | no_license | XiusiMa/ShinyProject | R | false | false | 1,607 | r | mydata <- read.csv('2002to2016Annual_Summary.csv',header=TRUE,na.strings=c("","-",NA))
msubdata <- read.csv('msubdata.csv',row.names = 1)
mysubdata <- as.matrix(msubdata)
mset <- read.table('b.txt')
myperiod <- read.table('subperiod.txt')
data <- read.table('vitalchart.txt')
names(data)[2] <- "Region"
data$Region <- as.factor(data$Region)
# subdata <- subset(mydata[,c(1,16,18)])
# library(doBy)
# csubdata <- summaryBy(Num_Customers_Affected ~ Began_Year + Event_Type, data = subdata,
# FUN = function(x){c(mean(x))})
# names(csubdata)[3]<-"m.Num"
# msubdata <- reshape(csubdata, timevar = "Event_Type", idvar = "Began_Year", direction = "wide")
# attr(msubdata, "row.names") <- msubdata$Began_Year
# msubdata <- msubdata[,-1]
# write.csv(msubdata,"D:/mxs92/Documents/Innovizo/app/msubdata.csv")
# subdata2 <- subset(mydata[,c(1,12)])
# csubdata2 <- table(subdata2)
# csubdata2[,2] <- substr(csubdata2[,2], 1, 6)
# a <- csubdata2[1:15,]
# b <- as.matrix(a)
# colnames(b) <- substr(colnames(b),1,15)
# write.table(b,"D:/mxs92/Documents/Innovizo/app/b.txt")
# subperiod <- subset(mydata[,c(6:10,12)])
# write.table(subperiod,"D:/mxs92/Documents/Innovizo/app/subperiod.txt")
# vital <- subset(mydata[,c(1,14,17,18)])
# library(doBy)
# vitalg <- summaryBy(Num_Customers_Affected + Demand_Loss_MW ~ Began_Year + NERC_Region, data = vital,
# FUN = function(x){c(mean(x))})
# names(vitalg)[3] <- "CustomerAffected"
# names(vitalg)[4] <- "DemandLossMW"
# write.table(vitalg,"D:/mxs92/Documents/Innovizo/app/vitalchart.txt") |
##################################################
### Demo code for Unit 2 of Stat243,
### "Data input/output and webscraping"
### Chris Paciorek, August 2019
##################################################
## @knitr
#####################################################
# 2: Reading data from text files into R
#####################################################
### 2.1 Core R functions
## @knitr readcsv
dat <- read.table(file.path('..', 'data', 'RTADataSub.csv'),
sep = ',', header = TRUE)
sapply(dat, class)
## whoops, there is an 'x', presumably indicating missingness:
unique(dat[ , 2])
## let's treat 'x' as a missing value indicator
dat2 <- read.table(file.path('..', 'data', 'RTADataSub.csv'),
sep = ',', header = TRUE,
na.strings = c("NA", "x"))
unique(dat2[ ,2])
## hmmm, what happened to the blank values this time?
which(dat[ ,2] == "")
dat2[which(dat[, 2] == "")[1], ] # pull out a line with a missing string
# using 'colClasses'
sequ <- read.table(file.path('..', 'data', 'hivSequ.csv'),
sep = ',', header = TRUE,
colClasses = c('integer','integer','character',
'character','numeric','integer'))
## let's make sure the coercion worked - sometimes R is obstinant
sapply(sequ, class)
## that made use of the fact that a data frame is a list
## @knitr readLines
dat <- readLines(file.path('..', 'data', 'precip.txt'))
id <- as.factor(substring(dat, 4, 11) )
year <- substring(dat, 18, 21)
year[1:5]
class(year)
year <- as.integer(substring(dat, 18, 21))
month <- as.integer(substring(dat, 22, 23))
nvalues <- as.integer(substring(dat, 28, 30))
## @knitr connections
dat <- readLines(pipe("ls -al"))
dat <- read.table(pipe("unzip dat.zip"))
dat <- read.csv(gzfile("dat.csv.gz"))
dat <- readLines("http://www.stat.berkeley.edu/~paciorek/index.html")
## @knitr curl
wikip1 <- readLines("https://wikipedia.org")
wikip2 <- readLines(url("https://wikipedia.org"))
library(curl)
wikip3 <- readLines(curl("https://wikipedia.org"))
## @knitr streaming
con <- file(file.path("..", "data", "precip.txt"), "r")
## "r" for 'read' - you can also open files for writing with "w"
## (or "a" for appending)
class(con)
blockSize <- 1000 # obviously this would be large in any real application
nLines <- 300000
for(i in 1:ceiling(nLines / blockSize)){
lines <- readLines(con, n = blockSize)
# manipulate the lines and store the key stuff
}
close(con)
## @knitr stream-curl
URL <- "https://www.stat.berkeley.edu/share/paciorek/2008.csv.gz"
con <- gzcon(curl(URL, open = "r"))
## url() in place of curl() works too
for(i in 1:8) {
print(i)
print(system.time(tmp <- readLines(con, n = 100000)))
print(tmp[1])
}
close(con)
## @knitr text-connection
dat <- readLines('../data/precip.txt')
con <- textConnection(dat[1], "r")
read.fwf(con, c(3,8,4,2,4,2))
## @knitr
### 2.2 File paths
## @knitr relative-paths
dat <- read.csv('../data/cpds.csv')
## @knitr path-separators
## good: will work on Windows
dat <- read.csv('../data/cpds.csv')
## bad: won't work on Mac or Linux
dat <- read.csv('..\\data\\cpds.csv')
## @knitr file.path
## good: operating-system independent
dat <- read.csv(file.path('..', 'data', 'cpds.csv'))
## @knitr
### 2.3 The readr package
## @knitr readr
library(readr)
## I'm violating the rule about absolute paths here!!
## (airline.csv is big enough that I don't want to put it in the
## course repository)
setwd('~/staff/workshops/r-bootcamp-2018/data')
system.time(dat <- read.csv('airline.csv', stringsAsFactors = FALSE))
system.time(dat2 <- read_csv('airline.csv'))
## @knitr
#####################################################
# 3: Webscraping and working with HTML, XML, and JSON
#####################################################
## 3.1 Reading HTML
## @knitr https
library(rvest) # uses xml2
URL <- "https://en.wikipedia.org/wiki/List_of_countries_and_dependencies_by_population"
html <- read_html(URL)
tbls <- html_table(html_nodes(html, "table"))
sapply(tbls, nrow)
pop <- tbls[[1]]
head(pop)
## @knitr https-pipe
library(magrittr)
tbls <- URL %>% read_html("table") %>% html_table()
## @knitr htmlLinks
URL <- "http://www1.ncdc.noaa.gov/pub/data/ghcn/daily/by_year"
## approach 1: search for elements with href attribute
links <- read_html(URL) %>% html_nodes("[href]") %>% html_attr('href')
## approach 2: search for HTML 'a' tags
links <- read_html(URL) %>% html_nodes("a") %>% html_attr('href')
head(links, n = 10)
## @knitr XPath
## find all 'a' elements that have attribute href; then
## extract the 'href' attribute
links <- read_html(URL) %>% html_nodes(xpath = "//a[@href]") %>%
html_attr('href')
head(links)
## we can extract various information
listOfANodes <- read_html(URL) %>% html_nodes(xpath = "//a[@href]")
listOfANodes %>% html_attr('href') %>% head(n = 10)
listOfANodes %>% html_name() %>% head(n = 10)
listOfANodes %>% html_text() %>% head(n = 10)
## @knitr XPath2
URL <- "https://www.nytimes.com"
headlines <- read_html(URL) %>% html_nodes("h2") %>% html_text()
head(headlines)
## @knitr
### 3.2 XML
## @knitr xml
library(xml2)
doc <- read_xml("https://api.kivaws.org/v1/loans/newest.xml")
data <- as_list(doc)
names(data)
names(data$response)
length(data$response$loans)
data$response$loans[[2]][c('name', 'activity',
'sector', 'location', 'loan_amount')]
## alternatively, extract only the 'loans' info (and use pipes)
loansNode <- doc %>% xml_nodes('loans')
loanInfo <- loansNode %>% xml_children() %>% as_list()
length(loanInfo)
names(loanInfo[[1]])
names(loanInfo[[1]]$location)
## suppose we only want the country locations of the loans (using XPath)
xml_find_all(loansNode, '//location//country') %>% xml_text()
## or extract the geographic coordinates
xml_find_all(loansNode, '//location//geo/pairs')
## @knitr
### 3.3 Reading JSON
## @knitr json
library(jsonlite)
data <- fromJSON("http://api.kivaws.org/v1/loans/newest.json")
names(data)
class(data$loans) # nice!
head(data$loans)
## @knitr
### 3.4 Using web APIs to get data
## @knitr
### 3.4.3 REST- and SOAP-based web services
## @knitr REST
times <- c(2080, 2099)
countryCode <- 'USA'
baseURL <- "http://climatedataapi.worldbank.org/climateweb/rest/v1/country"
##" http://climatedataapi.worldbank.org/climateweb/rest/v1/country"
type <- "mavg"
var <- "pr"
data <- read.csv(paste(baseURL, type, var, times[1], times[2],
paste0(countryCode, '.csv'), sep = '/'))
head(data)
### 3.4.4 HTTP requests by deconstructing an (undocumented) API
## @knitr http-byURL
## example URL:
## http://data.un.org/Handlers/DownloadHandler.ashx?DataFilter=itemCode:526;
##year:2012,2013,2014,2015,2016,2017&DataMartId=FAO&Format=csv&c=2,4,5,6,7&
##s=countryName:asc,elementCode:asc,year:desc
itemCode <- 526
baseURL <- "http://data.un.org/Handlers/DownloadHandler.ashx"
yrs <- paste(as.character(2012:2017), collapse = ",")
filter <- paste0("?DataFilter=itemCode:", itemCode, ";year:", yrs)
args1 <- "&DataMartId=FAO&Format=csv&c=2,3,4,5,6,7&"
args2 <- "s=countryName:asc,elementCode:asc,year:desc"
url <- paste0(baseURL, filter, args1, args2)
## if the website provided a CSV we could just do this:
## apricots <- read.csv(url)
## but it zips the file
temp <- tempfile() ## give name for a temporary file
download.file(url, temp)
dat <- read.csv(unzip(temp)) ## using a connection (see Section 2)
head(dat)
## @knitr
### 3.4.5 More details on http requests
## @knitr http-get2
library(httr)
output2 <- GET(baseURL, query = list(
DataFilter = paste0("itemCode:", itemCode, ";year:", yrs),
DataMartID = "FAO", Format = "csv", c = "2,3,4,5,6,7",
s = "countryName:asc,elementCode:asc,year:desc"))
temp <- tempfile() ## give name for a temporary file
writeBin(content(output2, 'raw'), temp) ## write out as zip file
dat <- read.csv(unzip(temp))
head(dat)
## @knitr http-post
if(url.exists('http://www.wormbase.org/db/searches/advanced/dumper')) {
x = postForm('http://www.wormbase.org/db/searches/advanced/dumper',
species="briggsae",
list="",
flank3="0",
flank5="0",
feature="Gene Models",
dump = "Plain TEXT",
orientation = "Relative to feature",
relative = "Chromsome",
DNA ="flanking sequences only",
.cgifields = paste(c("feature", "orientation", "DNA",
"dump","relative"), collapse=", "))
}
## @knitr
#####################################################
# 4: File and string encodings
#####################################################
## @knitr ascii
## 39 in hexadecimal is '9'
## 0a is a newline (at least in Linux/Mac)
## 3a is ':'
x <- as.raw(c('0x4d','0x6f', '0x6d','0x0a')) ## i.e., "Mom\n" in ascii
x
charToRaw('Mom\n:')
writeBin(x, 'tmp.txt')
readLines('tmp.txt')
system('ls -l tmp.txt', intern = TRUE)
system('cat tmp.txt')
## @knitr unicode-example
## n-tilde and division symbol as Unicode 'code points'
x2 <- 'Pe\u00f1a 3\u00f72'
Encoding(x2)
x2
writeBin(x2, 'tmp2.txt')
## here n-tilde and division symbol take up two bytes
## but there is an extraneous null byte in there; not sure why
system('ls -l tmp2.txt')
## so the system knows how to interpret the UTF-8 encoded file
## and represent the Unicode character on the screen:
system('cat tmp2.txt')
## @knitr locale
Sys.getlocale()
## @knitr iconv
text <- "Melhore sua seguran\xe7a"
Encoding(text)
Encoding(text) <- "latin1"
text ## this prints out correctly in R, but is not correct in the PDF
text <- "Melhore sua seguran\xe7a"
textUTF8 <- iconv(text, from = "latin1", to = "UTF-8")
Encoding(textUTF8)
textUTF8
iconv(text, from = "latin1", to = "ASCII", sub = "???")
## @knitr encoding
x <- "fa\xE7ile"
Encoding(x) <- "latin1"
x
## playing around...
x <- "\xa1 \xa2 \xa3 \xf1 \xf2"
Encoding(x) <- "latin1"
x
## @knitr encoding-error
load('../data/IPs.RData') # loads in an object named 'text'
tmp <- substring(text, 1, 15)
## the issue occurs with the 6402th element (found by trial and error):
tmp <- substring(text[1:6401],1,15)
tmp <- substring(text[1:6402],1,15)
text[6402] # note the Latin-1 character
table(Encoding(text))
## Option 1
Encoding(text) <- "latin1"
tmp <- substring(text, 1, 15)
tmp[6402]
## Option 2
load('../data/IPs.RData') # loads in an object named 'text'
tmp <- substring(text, 1, 15)
text <- iconv(text, from = "latin1", to = "UTF-8")
tmp <- substring(text, 1, 15)
## @knitr
#####################################################
# 5: Output from R
#####################################################
### 5.2 Formatting output
## @knitr print
val <- 1.5
cat('My value is ', val, '.\n', sep = '')
print(paste('My value is ', val, '.', sep = ''))
## @knitr cat
## input
x <- 7
n <- 5
## display powers
cat("Powers of", x, "\n")
cat("exponent result\n\n")
result <- 1
for (i in 1:n) {
result <- result * x
cat(format(i, width = 8), format(result, width = 10),
"\n", sep = "")
}
x <- 7
n <- 5
## display powers
cat("Powers of", x, "\n")
cat("exponent result\n\n")
result <- 1
for (i in 1:n) {
result <- result * x
cat(i, '\t', result, '\n', sep = '')
}
## @knitr sprintf
temps <- c(12.5, 37.234324, 1342434324.79997234, 2.3456e-6, 1e10)
sprintf("%9.4f C", temps)
city <- "Boston"
sprintf("The temperature in %s was %.4f C.", city, temps[1])
sprintf("The temperature in %s was %9.4f C.", city, temps[1])
| /units/unit2-dataTech.R | no_license | feihua813/stat243-fall-2020 | R | false | false | 11,536 | r | ##################################################
### Demo code for Unit 2 of Stat243,
### "Data input/output and webscraping"
### Chris Paciorek, August 2019
##################################################
## @knitr
#####################################################
# 2: Reading data from text files into R
#####################################################
### 2.1 Core R functions
## @knitr readcsv
dat <- read.table(file.path('..', 'data', 'RTADataSub.csv'),
sep = ',', header = TRUE)
sapply(dat, class)
## whoops, there is an 'x', presumably indicating missingness:
unique(dat[ , 2])
## let's treat 'x' as a missing value indicator
dat2 <- read.table(file.path('..', 'data', 'RTADataSub.csv'),
sep = ',', header = TRUE,
na.strings = c("NA", "x"))
unique(dat2[ ,2])
## hmmm, what happened to the blank values this time?
which(dat[ ,2] == "")
dat2[which(dat[, 2] == "")[1], ] # pull out a line with a missing string
# using 'colClasses'
sequ <- read.table(file.path('..', 'data', 'hivSequ.csv'),
sep = ',', header = TRUE,
colClasses = c('integer','integer','character',
'character','numeric','integer'))
## let's make sure the coercion worked - sometimes R is obstinant
sapply(sequ, class)
## that made use of the fact that a data frame is a list
## @knitr readLines
dat <- readLines(file.path('..', 'data', 'precip.txt'))
id <- as.factor(substring(dat, 4, 11) )
year <- substring(dat, 18, 21)
year[1:5]
class(year)
year <- as.integer(substring(dat, 18, 21))
month <- as.integer(substring(dat, 22, 23))
nvalues <- as.integer(substring(dat, 28, 30))
## @knitr connections
dat <- readLines(pipe("ls -al"))
dat <- read.table(pipe("unzip dat.zip"))
dat <- read.csv(gzfile("dat.csv.gz"))
dat <- readLines("http://www.stat.berkeley.edu/~paciorek/index.html")
## @knitr curl
wikip1 <- readLines("https://wikipedia.org")
wikip2 <- readLines(url("https://wikipedia.org"))
library(curl)
wikip3 <- readLines(curl("https://wikipedia.org"))
## @knitr streaming
con <- file(file.path("..", "data", "precip.txt"), "r")
## "r" for 'read' - you can also open files for writing with "w"
## (or "a" for appending)
class(con)
blockSize <- 1000 # obviously this would be large in any real application
nLines <- 300000
for(i in 1:ceiling(nLines / blockSize)){
lines <- readLines(con, n = blockSize)
# manipulate the lines and store the key stuff
}
close(con)
## @knitr stream-curl
URL <- "https://www.stat.berkeley.edu/share/paciorek/2008.csv.gz"
con <- gzcon(curl(URL, open = "r"))
## url() in place of curl() works too
for(i in 1:8) {
print(i)
print(system.time(tmp <- readLines(con, n = 100000)))
print(tmp[1])
}
close(con)
## @knitr text-connection
dat <- readLines('../data/precip.txt')
con <- textConnection(dat[1], "r")
read.fwf(con, c(3,8,4,2,4,2))
## @knitr
### 2.2 File paths
## @knitr relative-paths
dat <- read.csv('../data/cpds.csv')
## @knitr path-separators
## good: will work on Windows
dat <- read.csv('../data/cpds.csv')
## bad: won't work on Mac or Linux
dat <- read.csv('..\\data\\cpds.csv')
## @knitr file.path
## good: operating-system independent
dat <- read.csv(file.path('..', 'data', 'cpds.csv'))
## @knitr
### 2.3 The readr package
## @knitr readr
library(readr)
## I'm violating the rule about absolute paths here!!
## (airline.csv is big enough that I don't want to put it in the
## course repository)
setwd('~/staff/workshops/r-bootcamp-2018/data')
system.time(dat <- read.csv('airline.csv', stringsAsFactors = FALSE))
system.time(dat2 <- read_csv('airline.csv'))
## @knitr
#####################################################
# 3: Webscraping and working with HTML, XML, and JSON
#####################################################
## 3.1 Reading HTML
## @knitr https
library(rvest) # uses xml2
URL <- "https://en.wikipedia.org/wiki/List_of_countries_and_dependencies_by_population"
html <- read_html(URL)
tbls <- html_table(html_nodes(html, "table"))
sapply(tbls, nrow)
pop <- tbls[[1]]
head(pop)
## @knitr https-pipe
library(magrittr)
tbls <- URL %>% read_html("table") %>% html_table()
## @knitr htmlLinks
URL <- "http://www1.ncdc.noaa.gov/pub/data/ghcn/daily/by_year"
## approach 1: search for elements with href attribute
links <- read_html(URL) %>% html_nodes("[href]") %>% html_attr('href')
## approach 2: search for HTML 'a' tags
links <- read_html(URL) %>% html_nodes("a") %>% html_attr('href')
head(links, n = 10)
## @knitr XPath
## find all 'a' elements that have attribute href; then
## extract the 'href' attribute
links <- read_html(URL) %>% html_nodes(xpath = "//a[@href]") %>%
html_attr('href')
head(links)
## we can extract various information
listOfANodes <- read_html(URL) %>% html_nodes(xpath = "//a[@href]")
listOfANodes %>% html_attr('href') %>% head(n = 10)
listOfANodes %>% html_name() %>% head(n = 10)
listOfANodes %>% html_text() %>% head(n = 10)
## @knitr XPath2
URL <- "https://www.nytimes.com"
headlines <- read_html(URL) %>% html_nodes("h2") %>% html_text()
head(headlines)
## @knitr
### 3.2 XML
## @knitr xml
library(xml2)
doc <- read_xml("https://api.kivaws.org/v1/loans/newest.xml")
data <- as_list(doc)
names(data)
names(data$response)
length(data$response$loans)
data$response$loans[[2]][c('name', 'activity',
'sector', 'location', 'loan_amount')]
## alternatively, extract only the 'loans' info (and use pipes)
loansNode <- doc %>% xml_nodes('loans')
loanInfo <- loansNode %>% xml_children() %>% as_list()
length(loanInfo)
names(loanInfo[[1]])
names(loanInfo[[1]]$location)
## suppose we only want the country locations of the loans (using XPath)
xml_find_all(loansNode, '//location//country') %>% xml_text()
## or extract the geographic coordinates
xml_find_all(loansNode, '//location//geo/pairs')
## @knitr
### 3.3 Reading JSON
## @knitr json
library(jsonlite)
data <- fromJSON("http://api.kivaws.org/v1/loans/newest.json")
names(data)
class(data$loans) # nice!
head(data$loans)
## @knitr
### 3.4 Using web APIs to get data
## @knitr
### 3.4.3 REST- and SOAP-based web services
## @knitr REST
times <- c(2080, 2099)
countryCode <- 'USA'
baseURL <- "http://climatedataapi.worldbank.org/climateweb/rest/v1/country"
##" http://climatedataapi.worldbank.org/climateweb/rest/v1/country"
type <- "mavg"
var <- "pr"
data <- read.csv(paste(baseURL, type, var, times[1], times[2],
paste0(countryCode, '.csv'), sep = '/'))
head(data)
### 3.4.4 HTTP requests by deconstructing an (undocumented) API
## @knitr http-byURL
## example URL:
## http://data.un.org/Handlers/DownloadHandler.ashx?DataFilter=itemCode:526;
##year:2012,2013,2014,2015,2016,2017&DataMartId=FAO&Format=csv&c=2,4,5,6,7&
##s=countryName:asc,elementCode:asc,year:desc
itemCode <- 526
baseURL <- "http://data.un.org/Handlers/DownloadHandler.ashx"
yrs <- paste(as.character(2012:2017), collapse = ",")
filter <- paste0("?DataFilter=itemCode:", itemCode, ";year:", yrs)
args1 <- "&DataMartId=FAO&Format=csv&c=2,3,4,5,6,7&"
args2 <- "s=countryName:asc,elementCode:asc,year:desc"
url <- paste0(baseURL, filter, args1, args2)
## if the website provided a CSV we could just do this:
## apricots <- read.csv(url)
## but it zips the file
temp <- tempfile() ## give name for a temporary file
download.file(url, temp)
dat <- read.csv(unzip(temp)) ## using a connection (see Section 2)
head(dat)
## @knitr
### 3.4.5 More details on http requests
## @knitr http-get2
library(httr)
output2 <- GET(baseURL, query = list(
DataFilter = paste0("itemCode:", itemCode, ";year:", yrs),
DataMartID = "FAO", Format = "csv", c = "2,3,4,5,6,7",
s = "countryName:asc,elementCode:asc,year:desc"))
temp <- tempfile() ## give name for a temporary file
writeBin(content(output2, 'raw'), temp) ## write out as zip file
dat <- read.csv(unzip(temp))
head(dat)
## @knitr http-post
if(url.exists('http://www.wormbase.org/db/searches/advanced/dumper')) {
x = postForm('http://www.wormbase.org/db/searches/advanced/dumper',
species="briggsae",
list="",
flank3="0",
flank5="0",
feature="Gene Models",
dump = "Plain TEXT",
orientation = "Relative to feature",
relative = "Chromsome",
DNA ="flanking sequences only",
.cgifields = paste(c("feature", "orientation", "DNA",
"dump","relative"), collapse=", "))
}
## @knitr
#####################################################
# 4: File and string encodings
#####################################################
## @knitr ascii
## 39 in hexadecimal is '9'
## 0a is a newline (at least in Linux/Mac)
## 3a is ':'
x <- as.raw(c('0x4d','0x6f', '0x6d','0x0a')) ## i.e., "Mom\n" in ascii
x
charToRaw('Mom\n:')
writeBin(x, 'tmp.txt')
readLines('tmp.txt')
system('ls -l tmp.txt', intern = TRUE)
system('cat tmp.txt')
## @knitr unicode-example
## n-tilde and division symbol as Unicode 'code points'
x2 <- 'Pe\u00f1a 3\u00f72'
Encoding(x2)
x2
writeBin(x2, 'tmp2.txt')
## here n-tilde and division symbol take up two bytes
## but there is an extraneous null byte in there; not sure why
system('ls -l tmp2.txt')
## so the system knows how to interpret the UTF-8 encoded file
## and represent the Unicode character on the screen:
system('cat tmp2.txt')
## @knitr locale
Sys.getlocale()
## @knitr iconv
text <- "Melhore sua seguran\xe7a"
Encoding(text)
Encoding(text) <- "latin1"
text ## this prints out correctly in R, but is not correct in the PDF
text <- "Melhore sua seguran\xe7a"
textUTF8 <- iconv(text, from = "latin1", to = "UTF-8")
Encoding(textUTF8)
textUTF8
iconv(text, from = "latin1", to = "ASCII", sub = "???")
## @knitr encoding
x <- "fa\xE7ile"
Encoding(x) <- "latin1"
x
## playing around...
x <- "\xa1 \xa2 \xa3 \xf1 \xf2"
Encoding(x) <- "latin1"
x
## @knitr encoding-error
load('../data/IPs.RData') # loads in an object named 'text'
tmp <- substring(text, 1, 15)
## the issue occurs with the 6402th element (found by trial and error):
tmp <- substring(text[1:6401],1,15)
tmp <- substring(text[1:6402],1,15)
text[6402] # note the Latin-1 character
table(Encoding(text))
## Option 1
Encoding(text) <- "latin1"
tmp <- substring(text, 1, 15)
tmp[6402]
## Option 2
load('../data/IPs.RData') # loads in an object named 'text'
tmp <- substring(text, 1, 15)
text <- iconv(text, from = "latin1", to = "UTF-8")
tmp <- substring(text, 1, 15)
## @knitr
#####################################################
# 5: Output from R
#####################################################
### 5.2 Formatting output
## @knitr print
val <- 1.5
cat('My value is ', val, '.\n', sep = '')
print(paste('My value is ', val, '.', sep = ''))
## @knitr cat
## input
x <- 7
n <- 5
## display powers
cat("Powers of", x, "\n")
cat("exponent result\n\n")
result <- 1
for (i in 1:n) {
result <- result * x
cat(format(i, width = 8), format(result, width = 10),
"\n", sep = "")
}
x <- 7
n <- 5
## display powers
cat("Powers of", x, "\n")
cat("exponent result\n\n")
result <- 1
for (i in 1:n) {
result <- result * x
cat(i, '\t', result, '\n', sep = '')
}
## @knitr sprintf
temps <- c(12.5, 37.234324, 1342434324.79997234, 2.3456e-6, 1e10)
sprintf("%9.4f C", temps)
city <- "Boston"
sprintf("The temperature in %s was %.4f C.", city, temps[1])
sprintf("The temperature in %s was %9.4f C.", city, temps[1])
|
\alias{gtkOptionMenuGetHistory}
\name{gtkOptionMenuGetHistory}
\title{gtkOptionMenuGetHistory}
\description{
Retrieves the index of the currently selected menu item. The menu
items are numbered from top to bottom, starting with 0.
\strong{WARNING: \code{gtk_option_menu_get_history} has been deprecated since version 2.4 and should not be used in newly-written code. Use \code{\link{GtkComboBox}} instead.}
}
\usage{gtkOptionMenuGetHistory(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkOptionMenu}}] a \code{\link{GtkOptionMenu}}}}
\value{[integer] index of the selected menu item, or -1 if there are no menu items}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /man/gtkOptionMenuGetHistory.Rd | no_license | cran/RGtk2.10 | R | false | false | 700 | rd | \alias{gtkOptionMenuGetHistory}
\name{gtkOptionMenuGetHistory}
\title{gtkOptionMenuGetHistory}
\description{
Retrieves the index of the currently selected menu item. The menu
items are numbered from top to bottom, starting with 0.
\strong{WARNING: \code{gtk_option_menu_get_history} has been deprecated since version 2.4 and should not be used in newly-written code. Use \code{\link{GtkComboBox}} instead.}
}
\usage{gtkOptionMenuGetHistory(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkOptionMenu}}] a \code{\link{GtkOptionMenu}}}}
\value{[integer] index of the selected menu item, or -1 if there are no menu items}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
library(diversitree)
phy <- read.tree("tree.tree")
mydata2 <- read.csv ("mydata.csv")
states <- as.character(mydata2$Species)
names(states) <- mydata2$Area
setdiff(phy$tip.label, states)
setdiff(states,phy$tip.label)
phy$tip.state <- names(states)[match(phy$tip.label,states)]
names(phy$tip.state) <- states[match(phy$tip.label,states)]
p <- starting.point.geosse(phy)
p
phy$tip.state[phy$tip.state == "A"] <- 1
phy$tip.state[phy$tip.state == "B"] <- 2
phy$tip.state[phy$tip.state == "AB"] <- 0
phy$tip.state <- as.numeric(phy$tip.state)
names(phy$tip.state) <- states[match(phy$tip.label,states)]
lik1 <- make.geosse(phy, phy$tip.state, sampling.f = c(0.6,0.6,0.6))
lik2 <- constrain(lik1, sAB ~ 0)
lik3 <- constrain(lik1, sA ~ sB, xA ~ xB)
ml1 <- find.mle(lik1, p)
p <- coef(ml1)
ml2 <- find.mle(lik2, p[argnames(lik2)])
ml3 <- find.mle(lik3, p[argnames(lik3)])
round(rbind(full = coef(ml1),no.sAB = coef(ml2, TRUE),eq.div = coef(ml3, TRUE)), 3)
anova(ml1, no.sAB = ml2, eq.div = ml3)
p <- coef(ml2)
prior <- make.prior.exponential(1/2)
set.seed(1)
tmp <- mcmc(lik2, p, nsteps=1000, prior=prior, w=1, print.every=0)
w <- diff(sapply(tmp[2:7], quantile, c(0.025, 0.975)))
mcmc2 <- mcmc(lik2, p, nsteps=1000000, prior=prior, w=w)
save.image(file="samples2.RData")
| /combpurebirthanalyses/run2/script2.R | no_license | amesclir/LinumDiversification | R | false | false | 1,279 | r | library(diversitree)
phy <- read.tree("tree.tree")
mydata2 <- read.csv ("mydata.csv")
states <- as.character(mydata2$Species)
names(states) <- mydata2$Area
setdiff(phy$tip.label, states)
setdiff(states,phy$tip.label)
phy$tip.state <- names(states)[match(phy$tip.label,states)]
names(phy$tip.state) <- states[match(phy$tip.label,states)]
p <- starting.point.geosse(phy)
p
phy$tip.state[phy$tip.state == "A"] <- 1
phy$tip.state[phy$tip.state == "B"] <- 2
phy$tip.state[phy$tip.state == "AB"] <- 0
phy$tip.state <- as.numeric(phy$tip.state)
names(phy$tip.state) <- states[match(phy$tip.label,states)]
lik1 <- make.geosse(phy, phy$tip.state, sampling.f = c(0.6,0.6,0.6))
lik2 <- constrain(lik1, sAB ~ 0)
lik3 <- constrain(lik1, sA ~ sB, xA ~ xB)
ml1 <- find.mle(lik1, p)
p <- coef(ml1)
ml2 <- find.mle(lik2, p[argnames(lik2)])
ml3 <- find.mle(lik3, p[argnames(lik3)])
round(rbind(full = coef(ml1),no.sAB = coef(ml2, TRUE),eq.div = coef(ml3, TRUE)), 3)
anova(ml1, no.sAB = ml2, eq.div = ml3)
p <- coef(ml2)
prior <- make.prior.exponential(1/2)
set.seed(1)
tmp <- mcmc(lik2, p, nsteps=1000, prior=prior, w=1, print.every=0)
w <- diff(sapply(tmp[2:7], quantile, c(0.025, 0.975)))
mcmc2 <- mcmc(lik2, p, nsteps=1000000, prior=prior, w=w)
save.image(file="samples2.RData")
|
library(animation)
oopt = ani.options(interval = 0.05)
if (require('rgl')) {
## ajust the view
uM =
matrix(c(-0.370919227600098, -0.513357102870941,
-0.773877620697021, 0, -0.73050606250763, 0.675815105438232,
-0.0981751680374146, 0, 0.573396027088165, 0.528906404972076,
-0.625681936740875, 0, 0, 0, 0, 1), 4, 4)
open3d(userMatrix = uM, windowRect = c(10, 10, 510, 510))
plot3d(pollen[, 1:3])
zm = seq(1, 0.045, length = 200)
par3d(zoom = 1)
for (i in 1:length(zm)) {
par3d(zoom = zm[i])
## remove the comment if you want to save the snapshots
## rgl.snapshot(paste(formatC(i, width = 3, flag = 0), ".png", sep = ""))
ani.pause()
}
} else warning("You have to install the 'rgl' package to view this demo.")
ani.options(oopt)
| /demo/pollen.R | no_license | snowdj/animation | R | false | false | 851 | r | library(animation)
oopt = ani.options(interval = 0.05)
if (require('rgl')) {
## ajust the view
uM =
matrix(c(-0.370919227600098, -0.513357102870941,
-0.773877620697021, 0, -0.73050606250763, 0.675815105438232,
-0.0981751680374146, 0, 0.573396027088165, 0.528906404972076,
-0.625681936740875, 0, 0, 0, 0, 1), 4, 4)
open3d(userMatrix = uM, windowRect = c(10, 10, 510, 510))
plot3d(pollen[, 1:3])
zm = seq(1, 0.045, length = 200)
par3d(zoom = 1)
for (i in 1:length(zm)) {
par3d(zoom = zm[i])
## remove the comment if you want to save the snapshots
## rgl.snapshot(paste(formatC(i, width = 3, flag = 0), ".png", sep = ""))
ani.pause()
}
} else warning("You have to install the 'rgl' package to view this demo.")
ani.options(oopt)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R
\name{leaf-attr}
\alias{leaf-attr}
\alias{is_pos}
\alias{is_neg}
\title{Attributes of an Expression Leaf}
\usage{
is_pos(object)
is_neg(object)
}
\arguments{
\item{object}{A \linkS4class{Leaf} object.}
}
\value{
A logical value.
}
\description{
Determine if an expression is positive or negative.
}
| /CVXR/man/leaf-attr.Rd | no_license | akhikolla/TestedPackages-NoIssues | R | false | true | 389 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R
\name{leaf-attr}
\alias{leaf-attr}
\alias{is_pos}
\alias{is_neg}
\title{Attributes of an Expression Leaf}
\usage{
is_pos(object)
is_neg(object)
}
\arguments{
\item{object}{A \linkS4class{Leaf} object.}
}
\value{
A logical value.
}
\description{
Determine if an expression is positive or negative.
}
|
# pull in data from the usda db
# help site: https://ndb.nal.usda.gov/ndb/doc/index#
library(httr)
library(tidyverse)
library(jsonlite)
library(tidyjson)
source("./other/api/key.R")
# get all foods
# max per request is 1500, default is 50 so specify 1500
# use offset to specify beginning row
# set subset to 1 so get most common foods. else a 1:1500 query only brings you from a to beef
dat <- fromJSON(paste0("http://api.nal.usda.gov/ndb/nutrients/?format=json&api_key=",
key2, "&subset=1&max=1500&nutrients=205&nutrients=204&nutrients=208&nutrients=269"),
flatten = TRUE) # same if false
# grab the foods report
all_foods <- as_tibble(dat$report$foods)
# make all gm elements in the nutrients list-column characters so that we can unnest
# this list-column
for (i in 1:length(all_foods$nutrients)) {
for (j in 1:4) {
all_foods$nutrients[[i]]$gm[j] <- as.character(all_foods$nutrients[[i]]$gm[j])
all_foods$nutrients[[i]]$value[j] <- as.character(all_foods$nutrients[[i]]$value[j])
}
}
# unnest it
all_foods <- all_foods %>% unnest()
# code NAs
all_foods <- all_foods %>%
mutate(
gm = ifelse(gm == "--", NA, gm),
value = ifelse(value == "--", NA, value)
)
# --------------------- set datatypes --------------------
# numeric: ndbno, nutrient_id, value, gm
all_foods$ndbno <- as.numeric(all_foods$ndbno)
all_foods$nutrient_id <- as.numeric(all_foods$nutrient_id)
all_foods$value <- as.numeric(all_foods$value)
all_foods$gm <- as.numeric(all_foods$gm)
# factors: name, nutrient, unit
all_foods$name <- factor(all_foods$name)
all_foods$nutrient <- factor(all_foods$nutrient)
all_foods$unit <- factor(all_foods$unit)
# value: 100 g equivalent value of the nutrient
# get per gram
# ---------
# order by most sugar
fried <- all_foods %>%
filter(
nutrient == "Sugars, total"
) %>%
arrange(
desc(gm)
)
by_nutrient <- all_foods %>%
group_by(
nutrient
) %>%
arrange(
desc(value)
)
all_nutrients <- fromJSON(paste0("http://api.nal.usda.gov/ndb/nutrients/?format=json&api_key=",
key, "&subset=1&max=1500&nutrients=205&nutrients=204&nutrients=208&nutrients=269"),
flatten = TRUE) # same if false
| /other/api/connect.R | no_license | aedobbyn/menu-builder | R | false | false | 2,294 | r | # pull in data from the usda db
# help site: https://ndb.nal.usda.gov/ndb/doc/index#
library(httr)
library(tidyverse)
library(jsonlite)
library(tidyjson)
source("./other/api/key.R")
# get all foods
# max per request is 1500, default is 50 so specify 1500
# use offset to specify beginning row
# set subset to 1 so get most common foods. else a 1:1500 query only brings you from a to beef
dat <- fromJSON(paste0("http://api.nal.usda.gov/ndb/nutrients/?format=json&api_key=",
key2, "&subset=1&max=1500&nutrients=205&nutrients=204&nutrients=208&nutrients=269"),
flatten = TRUE) # same if false
# grab the foods report
all_foods <- as_tibble(dat$report$foods)
# make all gm elements in the nutrients list-column characters so that we can unnest
# this list-column
for (i in 1:length(all_foods$nutrients)) {
for (j in 1:4) {
all_foods$nutrients[[i]]$gm[j] <- as.character(all_foods$nutrients[[i]]$gm[j])
all_foods$nutrients[[i]]$value[j] <- as.character(all_foods$nutrients[[i]]$value[j])
}
}
# unnest it
all_foods <- all_foods %>% unnest()
# code NAs
all_foods <- all_foods %>%
mutate(
gm = ifelse(gm == "--", NA, gm),
value = ifelse(value == "--", NA, value)
)
# --------------------- set datatypes --------------------
# numeric: ndbno, nutrient_id, value, gm
all_foods$ndbno <- as.numeric(all_foods$ndbno)
all_foods$nutrient_id <- as.numeric(all_foods$nutrient_id)
all_foods$value <- as.numeric(all_foods$value)
all_foods$gm <- as.numeric(all_foods$gm)
# factors: name, nutrient, unit
all_foods$name <- factor(all_foods$name)
all_foods$nutrient <- factor(all_foods$nutrient)
all_foods$unit <- factor(all_foods$unit)
# value: 100 g equivalent value of the nutrient
# get per gram
# ---------
# order by most sugar
fried <- all_foods %>%
filter(
nutrient == "Sugars, total"
) %>%
arrange(
desc(gm)
)
by_nutrient <- all_foods %>%
group_by(
nutrient
) %>%
arrange(
desc(value)
)
all_nutrients <- fromJSON(paste0("http://api.nal.usda.gov/ndb/nutrients/?format=json&api_key=",
key, "&subset=1&max=1500&nutrients=205&nutrients=204&nutrients=208&nutrients=269"),
flatten = TRUE) # same if false
|
#intall packages
install.packages("data.table")
install.packages("dplyr")
install.packages("tidyr")
install.packages("ggplot2")
install.packages("stringr")
install.packages("DT")
install.packages("knitr")
install.packages("grid")
install.packages("gridExtra")
install.packages("corrplot")
install.packages("methods")
#install.packages("Matrix")
install.packages("reshape2")
install.packages("Rcampdf")
install.packages("ggthemes")
install.packages("qdap")
install.packages("dplyr")
install.packages("tm")
install.packages("wordcloud")
install.packages("plotrix")
install.packages("dendextend")
install.packages("ggplot2")
install.packages("ggthemes")
install.packages("RWeka")
install.packages("reshape2")
install.packages("caret")
library(qdap)
library(dplyr)
library(tm)
library(wordcloud)
library(plotrix)
library(dendextend)
library(ggplot2)
library(ggthemes)
library(RWeka)
library(reshape2)
library(quanteda)
library(irlba)
library(e1071)
library(caret)
library(randomForest)
library(rpart)
library(rpart.plot)
library(ggplot2)
library(SnowballC)
library(RColorBrewer)
library(wordcloud)
library(biclust)
library(igraph)
library(fpc)
library(Rcampdf)
# load libraries
library(plyr)
library(dtplyr)
library(data.table)
library(ggplot2)
library(tidyverse)
library(lubridate)
library(stringr)
library(rvest)
library(XML)
library(xml2)
library(tidytext)
library(RColorBrewer)
library(wordcloud)
library(DT)
library(gridExtra)
library(devtools)
library(skimr)
library(tm)
library(qdapTools)
library(ggthemes)
library(plot.matrix)
library(dendextend)
library(reshape2)
library(quanteda)
library(corpus)
library(ngram)
#start by loading some libraries
library(data.table)
library(dplyr)
library(tidyr)
library(ggplot2)
library(stringr)
library(DT)
library(knitr)
library(grid)
library(gridExtra)
library(corrplot)
library(methods)
library(Matrix)
library(reshape2)
#set up working directory - this will set the working directory to the same folder as your R studio RMD file - ensure that the CSVs outlined below are also in this folder
current_path <- "C:/Users/skamto/Documents/GitHub/CSDA-1050F18S1/skamto_11060"
setwd("C:/Users/skamto/Documents/GitHub/CSDA-1050F18S1/skamto_11060")
setwd("data")
print( getwd() )
# Read CSV files along with header and replace empty values with "NA" when read the CSV file.
courses_df <- fread("courses.csv",header = TRUE,na.strings = c("") )
assessments_df <- fread("assessments.csv",header = TRUE,na.strings = c("") )
vle_df <- fread("vle.csv",header = TRUE,na.strings = c("") )
studentInfo_df <- fread("studentInfo.csv",header = TRUE,na.strings = c("") )
studentRegistration_df <- fread("studentRegistration.csv",header = TRUE,na.strings = c("") )
studentAssessment_df <- fread("studentAssessment.csv",header = TRUE,na.strings = c("") )
studentVle_df <- fread("studentVle.csv",header = TRUE,na.strings = c("") )
# The dimension of the data
dim(courses_df)
glimpse(courses_df)
names(courses_df)
dim(studentInfo_df)
glimpse(studentInfo_df)
names(studentInfo_df)
#Step 1: Data Summary
cat("The number of observations are", nrow(courses_df))
cat("The number of observations are", nrow(assessments_df))
cat("The number of observations are", nrow(vle_df))
cat("The number of observations are", nrow(studentInfo_df))
cat("The number of observations are", nrow(studentRegistration_df))
cat("The number of observations are", nrow(studentAssessment_df))
cat("The number of observations are", nrow(studentVle_df))
summary(courses_df)
summary(assessments_df)
summary(vle_df)
summary(studentInfo_df)
summary(studentRegistration_df)
summary(studentAssessment_df)
summary(studentVle_df)
fillColor = "#FFA07A"
fillColor2 = "#FFA07A"
#student by gender
studentInfo_df %>%
group_by(gender) %>%
filter(!is.na(gender)) %>%
summarise(Count = n()) %>%
ungroup() %>%
mutate(gender = reorder(gender,Count)) %>%
arrange(desc(Count)) %>%
head(10) %>%
ggplot(aes(x = gender,y = Count)) +
geom_bar(stat='identity',colour="white", fill = fillColor2) +
geom_text(aes(x = gender, y = 1, label = paste0("(",Count,")",sep="")),
hjust=0, vjust=.5, size = 4, colour = 'black',
fontface = 'bold') +
labs(x = 'gender',
y = 'Count',
title = 'Student by Gender') +
coord_flip() +
theme_bw()
#student by region
studentInfo_df %>%
group_by(region) %>%
filter(!is.na(region)) %>%
summarise(Count = n()) %>%
ungroup() %>%
mutate(region = reorder(region,Count)) %>%
arrange(desc(Count)) %>%
head(10) %>%
ggplot(aes(x = region,y = Count)) +
geom_bar(stat='identity',colour="white", fill = fillColor2) +
geom_text(aes(x = region, y = 1, label = paste0("(",Count,")",sep="")),
hjust=0, vjust=.5, size = 4, colour = 'black',
fontface = 'bold') +
labs(x = 'Region',
y = 'Count',
title = 'Count of Student by region') +
coord_flip() +
theme_bw()
#student by ages
studentInfo_df %>%
group_by(region) %>%
filter(!is.na(region)) %>%
summarise(Count = n()) %>%
ungroup() %>%
mutate(region = reorder(region,Count)) %>%
arrange(desc(Count)) %>%
head(10) %>%
ggplot(aes(x = region,y = Count)) +
geom_bar(stat='identity',colour="white", fill = fillColor2) +
geom_text(aes(x = region, y = 1, label = paste0("(",Count,")",sep="")),
hjust=0, vjust=.5, size = 4, colour = 'black',
fontface = 'bold') +
labs(x = 'Region',
y = 'Count',
title = 'Count of Student by region') +
coord_flip() +
theme_bw()
#Count of Student by ages
studentInfo_df %>%
group_by(age_band) %>%
filter(!is.na(age_band)) %>%
summarise(Count = n()) %>%
ungroup() %>%
mutate(age_band = reorder(age_band,Count)) %>%
arrange(desc(Count)) %>%
head(10) %>%
ggplot(aes(x = age_band,y = Count)) +
geom_bar(stat='identity',colour="white", fill = fillColor2) +
geom_text(aes(x = age_band, y = 1, label = paste0("(",Count,")",sep="")),
hjust=0, vjust=.5, size = 4, colour = 'black',
fontface = 'bold') +
labs(x = 'Ages',
y = 'Count',
title = 'Count of Student by age_band') +
coord_flip() +
theme_bw()
#Count of Student by final_result
studentInfo_df %>%
group_by(final_result) %>%
filter(!is.na(final_result)) %>%
summarise(Count = n()) %>%
ungroup() %>%
mutate(final_result = reorder(final_result,Count)) %>%
arrange(desc(Count)) %>%
head(10) %>%
ggplot(aes(x = final_result,y = Count)) +
geom_bar(stat='identity',colour="white", fill = fillColor2) +
geom_text(aes(x = final_result, y = 1, label = paste0("(",Count,")",sep="")),
hjust=0, vjust=.5, size = 4, colour = 'black',
fontface = 'bold') +
labs(x = 'final_result',
y = 'Count',
title = 'Summary of Student final_result') +
coord_flip() +
theme_bw()
p2 <- ggplot(studentInfo_df, aes(x = final_result)) + geom_bar(aes(fill = final_result)) +
theme(axis.text.x = element_blank()) + scale_fill_brewer(palette="Accent")
p3 <- ggplot(studentInfo_df, aes(x = age_band)) + geom_bar(aes(fill = age_band)) +
theme(axis.text.x = element_blank()) + scale_fill_brewer(palette="Dark2")
p4 <- ggplot(studentInfo_df, aes(x = final_result)) + geom_bar(aes(fill = region)) +
theme(axis.text.x = element_blank()) + scale_fill_brewer(palette="Accent")
p5 <- ggplot(studentInfo_df, aes(x = age_band)) + geom_bar(aes(fill = highest_education)) +
theme(axis.text.x = element_blank()) + scale_fill_brewer(palette="Dark2")
grid.arrange(p2, p3, p4, p5, nrow=2, ncol=2)
# These grahphs are having probles loading when zoomed
# Create a function that shows overview of each product about submission method, company response to consumer, timely response, and consumer disputed.
EDA.Sub.product <- as.data.frame(table(consumer$Product))
EDA.Sub.product <- function(dataframe, prod){
EDAdf <- consumer[consumer$Product == prod,]
colcount.subproduct = length(unique(consumer$Sub.product))
getPalette = colorRampPalette(brewer.pal(8, "Accent"))
p2.1 <- ggplot(EDAdf, aes(x = Submitted.via)) + geom_bar(aes(fill = Submitted.via)) +
theme(axis.text.x = element_blank()) + scale_fill_brewer(palette="Accent") +
labs(title = paste("Submission Method for ", prod))
p3.1 <- ggplot(EDAdf, aes(x = Company.response.to.consumer)) + geom_bar(aes(fill = Company.response.to.consumer)) +
theme(axis.text.x = element_blank()) + scale_fill_brewer(palette="Dark2") +
labs(title = paste("Company Response to Complaints regarding ", prod))
p4.1 <- ggplot(EDAdf[EDAdf$Timely.response. %in% "No",], aes(x = factor(1), fill = Sub.product)) + geom_bar(width = 1) +
coord_polar(theta = "y") + theme(axis.text.x = element_blank(), axis.text.y = element_blank(),
axis.title.y = element_blank(), axis.title.x = element_blank()) +
scale_fill_brewer(palette = "Set3") +
labs(title = paste(prod, " failed to responde timely",sep = ""))
p5.1 <- ggplot(EDAdf[EDAdf$Consumer.disputed %in% "Yes",], aes(x = factor(1), fill = Sub.product)) + geom_bar(width = 1) +
coord_polar(theta = "y") + theme(axis.text.x = element_blank(), axis.text.y = element_blank(),
axis.title.y = element_blank(), axis.title.x = element_blank()) +
scale_fill_brewer(palette = "Set3") +
labs(title = paste(prod, " Complaints that Consumer Disputed", sep=""))
if(EDAdf$Sub.product == ""){
grid.arrange(p2.1, p3.1, nrow=1, ncol=2)
}
else{
grid.arrange(p2.1, p3.1, p4.1, p5.1, nrow=2, ncol=2)
}
}
| /sprint_1.R | no_license | skamto/Sprint_1 | R | false | false | 9,880 | r |
#intall packages
install.packages("data.table")
install.packages("dplyr")
install.packages("tidyr")
install.packages("ggplot2")
install.packages("stringr")
install.packages("DT")
install.packages("knitr")
install.packages("grid")
install.packages("gridExtra")
install.packages("corrplot")
install.packages("methods")
#install.packages("Matrix")
install.packages("reshape2")
install.packages("Rcampdf")
install.packages("ggthemes")
install.packages("qdap")
install.packages("dplyr")
install.packages("tm")
install.packages("wordcloud")
install.packages("plotrix")
install.packages("dendextend")
install.packages("ggplot2")
install.packages("ggthemes")
install.packages("RWeka")
install.packages("reshape2")
install.packages("caret")
library(qdap)
library(dplyr)
library(tm)
library(wordcloud)
library(plotrix)
library(dendextend)
library(ggplot2)
library(ggthemes)
library(RWeka)
library(reshape2)
library(quanteda)
library(irlba)
library(e1071)
library(caret)
library(randomForest)
library(rpart)
library(rpart.plot)
library(ggplot2)
library(SnowballC)
library(RColorBrewer)
library(wordcloud)
library(biclust)
library(igraph)
library(fpc)
library(Rcampdf)
# load libraries
library(plyr)
library(dtplyr)
library(data.table)
library(ggplot2)
library(tidyverse)
library(lubridate)
library(stringr)
library(rvest)
library(XML)
library(xml2)
library(tidytext)
library(RColorBrewer)
library(wordcloud)
library(DT)
library(gridExtra)
library(devtools)
library(skimr)
library(tm)
library(qdapTools)
library(ggthemes)
library(plot.matrix)
library(dendextend)
library(reshape2)
library(quanteda)
library(corpus)
library(ngram)
#start by loading some libraries
library(data.table)
library(dplyr)
library(tidyr)
library(ggplot2)
library(stringr)
library(DT)
library(knitr)
library(grid)
library(gridExtra)
library(corrplot)
library(methods)
library(Matrix)
library(reshape2)
#set up working directory - this will set the working directory to the same folder as your R studio RMD file - ensure that the CSVs outlined below are also in this folder
current_path <- "C:/Users/skamto/Documents/GitHub/CSDA-1050F18S1/skamto_11060"
setwd("C:/Users/skamto/Documents/GitHub/CSDA-1050F18S1/skamto_11060")
setwd("data")
print( getwd() )
# Read CSV files along with header and replace empty values with "NA" when read the CSV file.
courses_df <- fread("courses.csv",header = TRUE,na.strings = c("") )
assessments_df <- fread("assessments.csv",header = TRUE,na.strings = c("") )
vle_df <- fread("vle.csv",header = TRUE,na.strings = c("") )
studentInfo_df <- fread("studentInfo.csv",header = TRUE,na.strings = c("") )
studentRegistration_df <- fread("studentRegistration.csv",header = TRUE,na.strings = c("") )
studentAssessment_df <- fread("studentAssessment.csv",header = TRUE,na.strings = c("") )
studentVle_df <- fread("studentVle.csv",header = TRUE,na.strings = c("") )
# The dimension of the data
dim(courses_df)
glimpse(courses_df)
names(courses_df)
dim(studentInfo_df)
glimpse(studentInfo_df)
names(studentInfo_df)
#Step 1: Data Summary
cat("The number of observations are", nrow(courses_df))
cat("The number of observations are", nrow(assessments_df))
cat("The number of observations are", nrow(vle_df))
cat("The number of observations are", nrow(studentInfo_df))
cat("The number of observations are", nrow(studentRegistration_df))
cat("The number of observations are", nrow(studentAssessment_df))
cat("The number of observations are", nrow(studentVle_df))
summary(courses_df)
summary(assessments_df)
summary(vle_df)
summary(studentInfo_df)
summary(studentRegistration_df)
summary(studentAssessment_df)
summary(studentVle_df)
fillColor = "#FFA07A"
fillColor2 = "#FFA07A"
#student by gender
studentInfo_df %>%
group_by(gender) %>%
filter(!is.na(gender)) %>%
summarise(Count = n()) %>%
ungroup() %>%
mutate(gender = reorder(gender,Count)) %>%
arrange(desc(Count)) %>%
head(10) %>%
ggplot(aes(x = gender,y = Count)) +
geom_bar(stat='identity',colour="white", fill = fillColor2) +
geom_text(aes(x = gender, y = 1, label = paste0("(",Count,")",sep="")),
hjust=0, vjust=.5, size = 4, colour = 'black',
fontface = 'bold') +
labs(x = 'gender',
y = 'Count',
title = 'Student by Gender') +
coord_flip() +
theme_bw()
#student by region
studentInfo_df %>%
group_by(region) %>%
filter(!is.na(region)) %>%
summarise(Count = n()) %>%
ungroup() %>%
mutate(region = reorder(region,Count)) %>%
arrange(desc(Count)) %>%
head(10) %>%
ggplot(aes(x = region,y = Count)) +
geom_bar(stat='identity',colour="white", fill = fillColor2) +
geom_text(aes(x = region, y = 1, label = paste0("(",Count,")",sep="")),
hjust=0, vjust=.5, size = 4, colour = 'black',
fontface = 'bold') +
labs(x = 'Region',
y = 'Count',
title = 'Count of Student by region') +
coord_flip() +
theme_bw()
#student by ages
studentInfo_df %>%
group_by(region) %>%
filter(!is.na(region)) %>%
summarise(Count = n()) %>%
ungroup() %>%
mutate(region = reorder(region,Count)) %>%
arrange(desc(Count)) %>%
head(10) %>%
ggplot(aes(x = region,y = Count)) +
geom_bar(stat='identity',colour="white", fill = fillColor2) +
geom_text(aes(x = region, y = 1, label = paste0("(",Count,")",sep="")),
hjust=0, vjust=.5, size = 4, colour = 'black',
fontface = 'bold') +
labs(x = 'Region',
y = 'Count',
title = 'Count of Student by region') +
coord_flip() +
theme_bw()
#Count of Student by ages
studentInfo_df %>%
group_by(age_band) %>%
filter(!is.na(age_band)) %>%
summarise(Count = n()) %>%
ungroup() %>%
mutate(age_band = reorder(age_band,Count)) %>%
arrange(desc(Count)) %>%
head(10) %>%
ggplot(aes(x = age_band,y = Count)) +
geom_bar(stat='identity',colour="white", fill = fillColor2) +
geom_text(aes(x = age_band, y = 1, label = paste0("(",Count,")",sep="")),
hjust=0, vjust=.5, size = 4, colour = 'black',
fontface = 'bold') +
labs(x = 'Ages',
y = 'Count',
title = 'Count of Student by age_band') +
coord_flip() +
theme_bw()
#Count of Student by final_result
studentInfo_df %>%
group_by(final_result) %>%
filter(!is.na(final_result)) %>%
summarise(Count = n()) %>%
ungroup() %>%
mutate(final_result = reorder(final_result,Count)) %>%
arrange(desc(Count)) %>%
head(10) %>%
ggplot(aes(x = final_result,y = Count)) +
geom_bar(stat='identity',colour="white", fill = fillColor2) +
geom_text(aes(x = final_result, y = 1, label = paste0("(",Count,")",sep="")),
hjust=0, vjust=.5, size = 4, colour = 'black',
fontface = 'bold') +
labs(x = 'final_result',
y = 'Count',
title = 'Summary of Student final_result') +
coord_flip() +
theme_bw()
p2 <- ggplot(studentInfo_df, aes(x = final_result)) + geom_bar(aes(fill = final_result)) +
theme(axis.text.x = element_blank()) + scale_fill_brewer(palette="Accent")
p3 <- ggplot(studentInfo_df, aes(x = age_band)) + geom_bar(aes(fill = age_band)) +
theme(axis.text.x = element_blank()) + scale_fill_brewer(palette="Dark2")
p4 <- ggplot(studentInfo_df, aes(x = final_result)) + geom_bar(aes(fill = region)) +
theme(axis.text.x = element_blank()) + scale_fill_brewer(palette="Accent")
p5 <- ggplot(studentInfo_df, aes(x = age_band)) + geom_bar(aes(fill = highest_education)) +
theme(axis.text.x = element_blank()) + scale_fill_brewer(palette="Dark2")
grid.arrange(p2, p3, p4, p5, nrow=2, ncol=2)
# These grahphs are having probles loading when zoomed
# Create a function that shows overview of each product about submission method, company response to consumer, timely response, and consumer disputed.
EDA.Sub.product <- as.data.frame(table(consumer$Product))
EDA.Sub.product <- function(dataframe, prod){
EDAdf <- consumer[consumer$Product == prod,]
colcount.subproduct = length(unique(consumer$Sub.product))
getPalette = colorRampPalette(brewer.pal(8, "Accent"))
p2.1 <- ggplot(EDAdf, aes(x = Submitted.via)) + geom_bar(aes(fill = Submitted.via)) +
theme(axis.text.x = element_blank()) + scale_fill_brewer(palette="Accent") +
labs(title = paste("Submission Method for ", prod))
p3.1 <- ggplot(EDAdf, aes(x = Company.response.to.consumer)) + geom_bar(aes(fill = Company.response.to.consumer)) +
theme(axis.text.x = element_blank()) + scale_fill_brewer(palette="Dark2") +
labs(title = paste("Company Response to Complaints regarding ", prod))
p4.1 <- ggplot(EDAdf[EDAdf$Timely.response. %in% "No",], aes(x = factor(1), fill = Sub.product)) + geom_bar(width = 1) +
coord_polar(theta = "y") + theme(axis.text.x = element_blank(), axis.text.y = element_blank(),
axis.title.y = element_blank(), axis.title.x = element_blank()) +
scale_fill_brewer(palette = "Set3") +
labs(title = paste(prod, " failed to responde timely",sep = ""))
p5.1 <- ggplot(EDAdf[EDAdf$Consumer.disputed %in% "Yes",], aes(x = factor(1), fill = Sub.product)) + geom_bar(width = 1) +
coord_polar(theta = "y") + theme(axis.text.x = element_blank(), axis.text.y = element_blank(),
axis.title.y = element_blank(), axis.title.x = element_blank()) +
scale_fill_brewer(palette = "Set3") +
labs(title = paste(prod, " Complaints that Consumer Disputed", sep=""))
if(EDAdf$Sub.product == ""){
grid.arrange(p2.1, p3.1, nrow=1, ncol=2)
}
else{
grid.arrange(p2.1, p3.1, p4.1, p5.1, nrow=2, ncol=2)
}
}
|
#' @param path The location where Miniconda is (or should be) installed. Note
#' that the Miniconda installer does not support paths containing spaces. See
#' [miniconda_path] for more details on the default path used by `reticulate`.
#'
#' @title miniconda-params
#' @keywords internal
#' @name miniconda-params
NULL
#' Install Miniconda
#'
#' Download the [Miniconda](https://docs.conda.io/en/latest/miniconda.html)
#' installer, and use it to install Miniconda.
#'
#' For arm64 builds of R on macOS, `install_miniconda()` will use
#' binaries from [miniforge](https://github.com/conda-forge/miniforge) instead.
#'
#' @inheritParams miniconda-params
#'
#' @param update Boolean; update to the latest version of Miniconda after
#' installation?
#'
#' @param force Boolean; force re-installation if Miniconda is already installed
#' at the requested path?
#'
#' @note If you encounter binary incompatibilities between R and Miniconda, a
#' scripted build and installation of Python from sources can be performed by
#' [`install_python()`]
#'
#' @family miniconda-tools
#' @export
install_miniconda <- function(path = miniconda_path(),
update = TRUE,
force = FALSE)
{
check_forbidden_install("Miniconda")
if (grepl(" ", path, fixed = TRUE))
stop("cannot install Miniconda into a path containing spaces")
# TODO: what behavior when miniconda is already installed?
# fail? validate installed and matches request? reinstall?
install_miniconda_preflight(path, force)
# download the installer
message("* Installing Miniconda -- please wait a moment ...")
url <- miniconda_installer_url()
installer <- miniconda_installer_download(url)
# run the installer
miniconda_installer_run(installer, update, path)
# validate the install succeeded
ok <- miniconda_exists(path) && miniconda_test(path)
if (!ok)
stopf("Miniconda installation failed [unknown reason]")
# update to latest version if requested
if (update)
miniconda_update(path)
# create r-reticulate environment
conda <- miniconda_conda(path)
python <- miniconda_python_package()
conda_create("r-reticulate", packages = c(python, "numpy"), conda = conda)
messagef("* Miniconda has been successfully installed at %s.", pretty_path(path))
path
}
#' Update Miniconda
#'
#' Update Miniconda to the latest version.
#'
#' @inheritParams miniconda-params
#'
#' @family miniconda-tools
#' @export
miniconda_update <- function(path = miniconda_path()) {
conda <- miniconda_conda(path)
local_conda_paths(conda)
system2t(conda, c("update", "--yes", "--name", "base", "conda"))
}
#' Remove Miniconda
#'
#' Uninstall Miniconda.
#'
#' @param path The path in which Miniconda is installed.
#'
#' @family miniconda-tools
#' @export
miniconda_uninstall <- function(path = miniconda_path()) {
unlink(path, recursive = TRUE)
}
install_miniconda_preflight <- function(path, force) {
# if we're forcing installation, then proceed
if (force)
return(invisible(TRUE))
# if the directory doesn't exist, that's fine
if (!file.exists(path))
return(invisible(TRUE))
# check for a miniconda installation
if (miniconda_exists(path)) {
fmt <- paste(
"Miniconda is already installed at path %s.",
"- Use `reticulate::install_miniconda(force = TRUE)` to overwrite the previous installation.",
sep = "\n"
)
stopf(fmt, pretty_path(path))
}
# ok to proceed
invisible(TRUE)
}
miniconda_installer_url <- function(version = "3") {
url <- getOption("reticulate.miniconda.url")
if (!is.null(url))
return(url)
# TODO: miniconda does not yet have arm64 binaries for macOS,
# so we'll just use miniforge instead
info <- as.list(Sys.info())
if (info$sysname == "Darwin" && info$machine == "arm64") {
base <- "https://github.com/conda-forge/miniforge/releases/latest/download"
name <- "Miniforge3-MacOSX-arm64.sh"
return(file.path(base, name))
}
base <- "https://repo.anaconda.com/miniconda"
info <- as.list(Sys.info())
arch <- miniconda_installer_arch(info)
version <- as.character(version)
name <- if (is_windows())
sprintf("Miniconda%s-latest-Windows-%s.exe", version, arch)
else if (is_osx())
sprintf("Miniconda%s-latest-MacOSX-%s.sh", version, arch)
else if (is_linux())
sprintf("Miniconda%s-latest-Linux-%s.sh", version, arch)
else
stopf("unsupported platform %s", shQuote(Sys.info()[["sysname"]]))
file.path(base, name)
}
miniconda_installer_arch <- function(info) {
# allow user override
arch <- getOption("reticulate.miniconda.arch")
if (!is.null(arch))
return(arch)
# miniconda url use x86_64 not x86-64 for Windows
if (info$machine == "x86-64")
return("x86_64")
# otherwise, use arch as-is
info$machine
}
miniconda_installer_download <- function(url) {
# reuse an already-existing installer
installer <- file.path(tempdir(), basename(url))
if (file.exists(installer))
return(installer)
# doesn't exist; try to download it
messagef("* Downloading %s ...", shQuote(url))
status <- download.file(url, destfile = installer, mode = "wb")
if (!file.exists(installer)) {
fmt <- "download of Miniconda installer failed [status = %i]"
stopf(fmt, status)
}
# download successful; provide file path
installer
}
miniconda_installer_run <- function(installer, update, path) {
args <- if (is_windows()) {
dir.create(path, recursive = TRUE, showWarnings = FALSE)
c(
"/InstallationType=JustMe",
"/AddToPath=0",
"/RegisterPython=0",
"/NoRegistry=1",
"/S",
paste("/D", utils::shortPathName(path), sep = "=")
)
} else if (is_unix()) {
c("-b", if (update) "-u", "-p", shQuote(path))
} else {
stopf("unsupported platform %s", shQuote(Sys.info()[["sysname"]]))
}
Sys.chmod(installer, mode = "0755")
# work around rpath issues on macOS
#
# dyld: Library not loaded: @rpath/libz.1.dylib
# Referenced from: /Users/kevinushey/Library/r-miniconda/conda.exe
# Reason: image not found
#
# https://github.com/rstudio/reticulate/issues/874
if (is_osx()) {
old <- Sys.getenv("DYLD_FALLBACK_LIBRARY_PATH")
new <- if (nzchar(old))
paste(old, "/usr/lib", sep = ":")
else
"/usr/lib"
Sys.setenv(DYLD_FALLBACK_LIBRARY_PATH = new)
on.exit(Sys.setenv(DYLD_FALLBACK_LIBRARY_PATH = old), add = TRUE)
}
if (is_windows())
status <- system2(installer, args)
if (is_unix()) {
##check for bash
bash_available <- system2("bash", "--version")
if (bash_available != 0)
stopf("bash is not available.")
args <- c(installer, args)
status <- system2("bash", args)
}
if (status != 0)
stopf("miniconda installation failed [exit code %i]", status)
invisible(path)
}
#' Path to Miniconda
#'
#' The path to the Miniconda installation to use. By default, an OS-specific
#' path is used. If you'd like to instead set your own path, you can set the
#' `RETICULATE_MINICONDA_PATH` environment variable.
#'
#' @family miniconda
#'
#' @export
miniconda_path <- function() {
Sys.getenv("RETICULATE_MINICONDA_PATH", unset = miniconda_path_default())
}
miniconda_path_default <- function() {
if (is_osx()) {
# on macOS, use different path for arm64 miniconda
path <- if (Sys.info()[["machine"]] == "arm64")
"~/Library/r-miniconda-arm64"
else
"~/Library/r-miniconda"
return(path.expand(path))
}
# otherwise, use rappdirs default
root <- normalizePath(rappdirs::user_data_dir(), winslash = "/", mustWork = FALSE)
file.path(root, "r-miniconda")
}
miniconda_exists <- function(path = miniconda_path()) {
conda <- miniconda_conda(path)
file.exists(conda)
}
miniconda_test <- function(path = miniconda_path()) {
python <- python_binary_path(path)
status <- tryCatch(python_version(python), error = identity)
!inherits(status, "error")
}
miniconda_conda <- function(path = miniconda_path()) {
exe <- if (is_windows()) "condabin/conda.bat" else "bin/conda"
file.path(path, exe)
}
miniconda_envpath <- function(env = NULL, path = miniconda_path()) {
env <- env %||% Sys.getenv("RETICULATE_MINICONDA_ENVNAME", unset = "r-reticulate")
file.path(path, "envs", env)
}
miniconda_meta_path <- function() {
root <- rappdirs::user_data_dir("r-reticulate")
file.path(root, "miniconda.json")
}
miniconda_meta_read <- function() {
path <- miniconda_meta_path()
if (!file.exists(path))
return(list())
json <- tryCatch(
jsonlite::read_json(path),
error = warning
)
if (is.list(json))
return(json)
list()
}
miniconda_meta_write <- function(data) {
path <- miniconda_meta_path()
dir.create(dirname(path), recursive = TRUE)
json <- jsonlite::toJSON(data, auto_unbox = TRUE, pretty = TRUE)
writeLines(json, con = path)
}
miniconda_installable <- function() {
meta <- miniconda_meta_read()
!identical(meta$DisableInstallationPrompt, TRUE)
}
miniconda_install_prompt <- function() {
if (!is_interactive())
return(FALSE)
text <- paste(
"No non-system installation of Python could be found.",
"Would you like to download and install Miniconda?",
"Miniconda is an open source environment management system for Python.",
"See https://docs.conda.io/en/latest/miniconda.html for more details.",
"",
sep = "\n"
)
message(text)
response <- readline("Would you like to install Miniconda? [Y/n]: ")
repeat {
ch <- tolower(substring(response, 1, 1))
if (ch == "y" || ch == "") {
install_miniconda()
return(TRUE)
}
if (ch == "n") {
meta <- miniconda_meta_read()
meta$DisableInstallationPrompt <- TRUE
miniconda_meta_write(meta)
message("Installation aborted.")
return(FALSE)
}
response <- readline("Please answer yes or no: ")
}
}
# the default environment path to use for miniconda
miniconda_python_envpath <- function() {
Sys.getenv(
"RETICULATE_MINICONDA_PYTHON_ENVPATH",
unset = miniconda_envpath()
)
}
# the version of python to use in the environment
miniconda_python_version <- function() {
Sys.getenv("RETICULATE_MINICONDA_PYTHON_VERSION", unset = "3.8")
}
miniconda_python_package <- function() {
paste("python", miniconda_python_version(), sep = "=")
}
miniconda_enabled <- function() {
enabled <- Sys.getenv("RETICULATE_MINICONDA_ENABLED", unset = "TRUE")
if (tolower(enabled) %in% c("false", "0"))
return(FALSE)
miniconda_installable()
}
| /R/miniconda.R | permissive | chainsawriot/reticulate | R | false | false | 10,555 | r |
#' @param path The location where Miniconda is (or should be) installed. Note
#' that the Miniconda installer does not support paths containing spaces. See
#' [miniconda_path] for more details on the default path used by `reticulate`.
#'
#' @title miniconda-params
#' @keywords internal
#' @name miniconda-params
NULL
#' Install Miniconda
#'
#' Download the [Miniconda](https://docs.conda.io/en/latest/miniconda.html)
#' installer, and use it to install Miniconda.
#'
#' For arm64 builds of R on macOS, `install_miniconda()` will use
#' binaries from [miniforge](https://github.com/conda-forge/miniforge) instead.
#'
#' @inheritParams miniconda-params
#'
#' @param update Boolean; update to the latest version of Miniconda after
#' installation?
#'
#' @param force Boolean; force re-installation if Miniconda is already installed
#' at the requested path?
#'
#' @note If you encounter binary incompatibilities between R and Miniconda, a
#' scripted build and installation of Python from sources can be performed by
#' [`install_python()`]
#'
#' @family miniconda-tools
#' @export
install_miniconda <- function(path = miniconda_path(),
update = TRUE,
force = FALSE)
{
check_forbidden_install("Miniconda")
if (grepl(" ", path, fixed = TRUE))
stop("cannot install Miniconda into a path containing spaces")
# TODO: what behavior when miniconda is already installed?
# fail? validate installed and matches request? reinstall?
install_miniconda_preflight(path, force)
# download the installer
message("* Installing Miniconda -- please wait a moment ...")
url <- miniconda_installer_url()
installer <- miniconda_installer_download(url)
# run the installer
miniconda_installer_run(installer, update, path)
# validate the install succeeded
ok <- miniconda_exists(path) && miniconda_test(path)
if (!ok)
stopf("Miniconda installation failed [unknown reason]")
# update to latest version if requested
if (update)
miniconda_update(path)
# create r-reticulate environment
conda <- miniconda_conda(path)
python <- miniconda_python_package()
conda_create("r-reticulate", packages = c(python, "numpy"), conda = conda)
messagef("* Miniconda has been successfully installed at %s.", pretty_path(path))
path
}
#' Update Miniconda
#'
#' Update Miniconda to the latest version.
#'
#' @inheritParams miniconda-params
#'
#' @family miniconda-tools
#' @export
miniconda_update <- function(path = miniconda_path()) {
conda <- miniconda_conda(path)
local_conda_paths(conda)
system2t(conda, c("update", "--yes", "--name", "base", "conda"))
}
#' Remove Miniconda
#'
#' Uninstall Miniconda.
#'
#' @param path The path in which Miniconda is installed.
#'
#' @family miniconda-tools
#' @export
miniconda_uninstall <- function(path = miniconda_path()) {
unlink(path, recursive = TRUE)
}
install_miniconda_preflight <- function(path, force) {
# if we're forcing installation, then proceed
if (force)
return(invisible(TRUE))
# if the directory doesn't exist, that's fine
if (!file.exists(path))
return(invisible(TRUE))
# check for a miniconda installation
if (miniconda_exists(path)) {
fmt <- paste(
"Miniconda is already installed at path %s.",
"- Use `reticulate::install_miniconda(force = TRUE)` to overwrite the previous installation.",
sep = "\n"
)
stopf(fmt, pretty_path(path))
}
# ok to proceed
invisible(TRUE)
}
miniconda_installer_url <- function(version = "3") {
url <- getOption("reticulate.miniconda.url")
if (!is.null(url))
return(url)
# TODO: miniconda does not yet have arm64 binaries for macOS,
# so we'll just use miniforge instead
info <- as.list(Sys.info())
if (info$sysname == "Darwin" && info$machine == "arm64") {
base <- "https://github.com/conda-forge/miniforge/releases/latest/download"
name <- "Miniforge3-MacOSX-arm64.sh"
return(file.path(base, name))
}
base <- "https://repo.anaconda.com/miniconda"
info <- as.list(Sys.info())
arch <- miniconda_installer_arch(info)
version <- as.character(version)
name <- if (is_windows())
sprintf("Miniconda%s-latest-Windows-%s.exe", version, arch)
else if (is_osx())
sprintf("Miniconda%s-latest-MacOSX-%s.sh", version, arch)
else if (is_linux())
sprintf("Miniconda%s-latest-Linux-%s.sh", version, arch)
else
stopf("unsupported platform %s", shQuote(Sys.info()[["sysname"]]))
file.path(base, name)
}
miniconda_installer_arch <- function(info) {
# allow user override
arch <- getOption("reticulate.miniconda.arch")
if (!is.null(arch))
return(arch)
# miniconda url use x86_64 not x86-64 for Windows
if (info$machine == "x86-64")
return("x86_64")
# otherwise, use arch as-is
info$machine
}
miniconda_installer_download <- function(url) {
# reuse an already-existing installer
installer <- file.path(tempdir(), basename(url))
if (file.exists(installer))
return(installer)
# doesn't exist; try to download it
messagef("* Downloading %s ...", shQuote(url))
status <- download.file(url, destfile = installer, mode = "wb")
if (!file.exists(installer)) {
fmt <- "download of Miniconda installer failed [status = %i]"
stopf(fmt, status)
}
# download successful; provide file path
installer
}
miniconda_installer_run <- function(installer, update, path) {
args <- if (is_windows()) {
dir.create(path, recursive = TRUE, showWarnings = FALSE)
c(
"/InstallationType=JustMe",
"/AddToPath=0",
"/RegisterPython=0",
"/NoRegistry=1",
"/S",
paste("/D", utils::shortPathName(path), sep = "=")
)
} else if (is_unix()) {
c("-b", if (update) "-u", "-p", shQuote(path))
} else {
stopf("unsupported platform %s", shQuote(Sys.info()[["sysname"]]))
}
Sys.chmod(installer, mode = "0755")
# work around rpath issues on macOS
#
# dyld: Library not loaded: @rpath/libz.1.dylib
# Referenced from: /Users/kevinushey/Library/r-miniconda/conda.exe
# Reason: image not found
#
# https://github.com/rstudio/reticulate/issues/874
if (is_osx()) {
old <- Sys.getenv("DYLD_FALLBACK_LIBRARY_PATH")
new <- if (nzchar(old))
paste(old, "/usr/lib", sep = ":")
else
"/usr/lib"
Sys.setenv(DYLD_FALLBACK_LIBRARY_PATH = new)
on.exit(Sys.setenv(DYLD_FALLBACK_LIBRARY_PATH = old), add = TRUE)
}
if (is_windows())
status <- system2(installer, args)
if (is_unix()) {
##check for bash
bash_available <- system2("bash", "--version")
if (bash_available != 0)
stopf("bash is not available.")
args <- c(installer, args)
status <- system2("bash", args)
}
if (status != 0)
stopf("miniconda installation failed [exit code %i]", status)
invisible(path)
}
#' Path to Miniconda
#'
#' The path to the Miniconda installation to use. By default, an OS-specific
#' path is used. If you'd like to instead set your own path, you can set the
#' `RETICULATE_MINICONDA_PATH` environment variable.
#'
#' @family miniconda
#'
#' @export
miniconda_path <- function() {
Sys.getenv("RETICULATE_MINICONDA_PATH", unset = miniconda_path_default())
}
miniconda_path_default <- function() {
if (is_osx()) {
# on macOS, use different path for arm64 miniconda
path <- if (Sys.info()[["machine"]] == "arm64")
"~/Library/r-miniconda-arm64"
else
"~/Library/r-miniconda"
return(path.expand(path))
}
# otherwise, use rappdirs default
root <- normalizePath(rappdirs::user_data_dir(), winslash = "/", mustWork = FALSE)
file.path(root, "r-miniconda")
}
miniconda_exists <- function(path = miniconda_path()) {
conda <- miniconda_conda(path)
file.exists(conda)
}
miniconda_test <- function(path = miniconda_path()) {
python <- python_binary_path(path)
status <- tryCatch(python_version(python), error = identity)
!inherits(status, "error")
}
miniconda_conda <- function(path = miniconda_path()) {
exe <- if (is_windows()) "condabin/conda.bat" else "bin/conda"
file.path(path, exe)
}
miniconda_envpath <- function(env = NULL, path = miniconda_path()) {
env <- env %||% Sys.getenv("RETICULATE_MINICONDA_ENVNAME", unset = "r-reticulate")
file.path(path, "envs", env)
}
miniconda_meta_path <- function() {
root <- rappdirs::user_data_dir("r-reticulate")
file.path(root, "miniconda.json")
}
miniconda_meta_read <- function() {
path <- miniconda_meta_path()
if (!file.exists(path))
return(list())
json <- tryCatch(
jsonlite::read_json(path),
error = warning
)
if (is.list(json))
return(json)
list()
}
miniconda_meta_write <- function(data) {
path <- miniconda_meta_path()
dir.create(dirname(path), recursive = TRUE)
json <- jsonlite::toJSON(data, auto_unbox = TRUE, pretty = TRUE)
writeLines(json, con = path)
}
miniconda_installable <- function() {
meta <- miniconda_meta_read()
!identical(meta$DisableInstallationPrompt, TRUE)
}
miniconda_install_prompt <- function() {
if (!is_interactive())
return(FALSE)
text <- paste(
"No non-system installation of Python could be found.",
"Would you like to download and install Miniconda?",
"Miniconda is an open source environment management system for Python.",
"See https://docs.conda.io/en/latest/miniconda.html for more details.",
"",
sep = "\n"
)
message(text)
response <- readline("Would you like to install Miniconda? [Y/n]: ")
repeat {
ch <- tolower(substring(response, 1, 1))
if (ch == "y" || ch == "") {
install_miniconda()
return(TRUE)
}
if (ch == "n") {
meta <- miniconda_meta_read()
meta$DisableInstallationPrompt <- TRUE
miniconda_meta_write(meta)
message("Installation aborted.")
return(FALSE)
}
response <- readline("Please answer yes or no: ")
}
}
# the default environment path to use for miniconda
miniconda_python_envpath <- function() {
Sys.getenv(
"RETICULATE_MINICONDA_PYTHON_ENVPATH",
unset = miniconda_envpath()
)
}
# the version of python to use in the environment
miniconda_python_version <- function() {
Sys.getenv("RETICULATE_MINICONDA_PYTHON_VERSION", unset = "3.8")
}
miniconda_python_package <- function() {
paste("python", miniconda_python_version(), sep = "=")
}
miniconda_enabled <- function() {
enabled <- Sys.getenv("RETICULATE_MINICONDA_ENABLED", unset = "TRUE")
if (tolower(enabled) %in% c("false", "0"))
return(FALSE)
miniconda_installable()
}
|
## ----setup, message = FALSE----------------------------------------------
library(nycflights13)
library(tidyverse)
options(tibble.width = Inf)
## ---- eval=FALSE---------------------------------------------------------
## install.packages("tidyverse")
## ------------------------------------------------------------------------
flights
## ------------------------------------------------------------------------
filter(flights, month == 1, day == 1)
## ------------------------------------------------------------------------
jan1 <- filter(flights, month == 1, day == 1)
## ------------------------------------------------------------------------
(dec25 <- filter(flights, month == 12, day == 25))
## ---- error = TRUE-------------------------------------------------------
filter(flights, month = 1)
## ------------------------------------------------------------------------
sqrt(2) ^ 2 == 2
1/49 * 49 == 1
## ------------------------------------------------------------------------
near(sqrt(2) ^ 2, 2)
near(1 / 49 * 49, 1)
## ----bool-ops, echo = FALSE, fig.cap = "Complete set of boolean operations. `x` is the left-hand circle, `y` is the right-hand circle, and the shaded region show which parts each operator selects."----
knitr::include_graphics("diagrams/transform-logical.png")
## ---- eval = FALSE-------------------------------------------------------
## filter(flights, month == 11 | month == 12)
## ---- eval = FALSE-------------------------------------------------------
## nov_dec <- filter(flights, month %in% c(11, 12))
## ---- eval = FALSE-------------------------------------------------------
## filter(flights, !(arr_delay > 120 | dep_delay > 120))
## filter(flights, arr_delay <= 120, dep_delay <= 120)
## ------------------------------------------------------------------------
NA > 5
10 == NA
NA + 10
NA / 2
## ------------------------------------------------------------------------
NA == NA
## ------------------------------------------------------------------------
# Let x be Mary's age. We don't know how old she is.
x <- NA
# Let y be John's age. We don't know how old he is.
y <- NA
# Are John and Mary the same age?
x == y
# We don't know!
## ------------------------------------------------------------------------
is.na(x)
## ------------------------------------------------------------------------
df <- tibble(x = c(1, NA, 3))
filter(df, x > 1)
filter(df, is.na(x) | x > 1)
## ------------------------------------------------------------------------
arrange(flights, year, month, day)
## ------------------------------------------------------------------------
arrange(flights, desc(arr_delay))
## ------------------------------------------------------------------------
df <- tibble(x = c(5, 2, NA))
arrange(df, x)
arrange(df, desc(x))
## ------------------------------------------------------------------------
# Select columns by name
select(flights, year, month, day)
# Select all columns between year and day (inclusive)
select(flights, year:day)
# Select all columns except those from year to day (inclusive)
select(flights, -(year:day))
## ------------------------------------------------------------------------
rename(flights, tail_num = tailnum)
## ------------------------------------------------------------------------
select(flights, time_hour, air_time, everything())
## ------------------------------------------------------------------------
vars <- c("year", "month", "day", "dep_delay", "arr_delay")
## ---- eval = FALSE-------------------------------------------------------
## select(flights, contains("TIME"))
## ------------------------------------------------------------------------
flights_sml <- select(flights,
year:day,
ends_with("delay"),
distance,
air_time
)
mutate(flights_sml,
gain = arr_delay - dep_delay,
speed = distance / air_time * 60
)
## ------------------------------------------------------------------------
mutate(flights_sml,
gain = arr_delay - dep_delay,
hours = air_time / 60,
gain_per_hour = gain / hours
)
## ------------------------------------------------------------------------
transmute(flights,
gain = arr_delay - dep_delay,
hours = air_time / 60,
gain_per_hour = gain / hours
)
## ------------------------------------------------------------------------
transmute(flights,
dep_time,
hour = dep_time %/% 100,
minute = dep_time %% 100
)
## ------------------------------------------------------------------------
(x <- 1:10)
lag(x)
lead(x)
## ------------------------------------------------------------------------
x
cumsum(x)
cummean(x)
## ------------------------------------------------------------------------
y <- c(1, 2, 2, NA, 3, 4)
min_rank(y)
min_rank(desc(y))
## ------------------------------------------------------------------------
row_number(y)
dense_rank(y)
percent_rank(y)
cume_dist(y)
## ---- eval = FALSE, echo = FALSE-----------------------------------------
## flights <- flights %>% mutate(
## dep_time = hour * 60 + minute,
## arr_time = (arr_time %/% 100) * 60 + (arr_time %% 100),
## airtime2 = arr_time - dep_time,
## dep_sched = dep_time + dep_delay
## )
##
## ggplot(flights, aes(dep_sched)) + geom_histogram(binwidth = 60)
## ggplot(flights, aes(dep_sched %% 60)) + geom_histogram(binwidth = 1)
## ggplot(flights, aes(air_time - airtime2)) + geom_histogram()
## ------------------------------------------------------------------------
summarise(flights, delay = mean(dep_delay, na.rm = TRUE))
## ------------------------------------------------------------------------
by_day <- group_by(flights, year, month, day)
summarise(by_day, delay = mean(dep_delay, na.rm = TRUE))
## ---- fig.width = 6------------------------------------------------------
by_dest <- group_by(flights, dest)
delay <- summarise(by_dest,
count = n(),
dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE)
)
delay <- filter(delay, count > 20, dest != "HNL")
# It looks like delays increase with distance up to ~750 miles
# and then decrease. Maybe as flights get longer there's more
# ability to make up delays in the air?
ggplot(data = delay, mapping = aes(x = dist, y = delay)) +
geom_point(aes(size = count), alpha = 1/3) +
geom_smooth(se = FALSE)
## ------------------------------------------------------------------------
delays <- flights %>%
group_by(dest) %>%
summarise(
count = n(),
dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE)
) %>%
filter(count > 20, dest != "HNL")
## ------------------------------------------------------------------------
flights %>%
group_by(year, month, day) %>%
summarise(mean = mean(dep_delay))
## ------------------------------------------------------------------------
flights %>%
group_by(year, month, day) %>%
summarise(mean = mean(dep_delay, na.rm = TRUE))
## ------------------------------------------------------------------------
not_cancelled <- flights %>%
filter(!is.na(dep_delay), !is.na(arr_delay))
flights %>%
drop_na(dep_delay, arr_delay) %>% nrow()
flights %>%
drop_na() %>% nrow()
head(flights)
flights %>%
group_by(year, month, day) %>%
summarise(n= n())
not_cancelled %>%
group_by(year, month, day) %>%
summarise(mean = mean(dep_delay))
## ------------------------------------------------------------------------
delays <- not_cancelled %>%
group_by(tailnum) %>%
summarise(
delay = mean(arr_delay)
)
ggplot(data = delays, mapping = aes(x = delay)) +
geom_freqpoly(binwidth = 10)
## ------------------------------------------------------------------------
delays <- not_cancelled %>%
group_by(tailnum) %>%
summarise(
delay = mean(arr_delay, na.rm = TRUE),
n = n()
)
ggplot(data = delays, mapping = aes(x = n, y = delay)) +
geom_point(alpha = 1/10)
## ------------------------------------------------------------------------
delays %>%
filter(n > 25) %>%
ggplot(mapping = aes(x = n, y = delay)) +
geom_point(alpha = 1/10)
## ------------------------------------------------------------------------
# Convert to a tibble so it prints nicely
batting <- as_tibble(Lahman::Batting)
batters <- batting %>%
group_by(playerID) %>%
summarise(
ba = sum(H, na.rm = TRUE) / sum(AB, na.rm = TRUE),
ab = sum(AB, na.rm = TRUE)
)
batters %>%
filter(ab > 100) %>%
ggplot(mapping = aes(x = ab, y = ba)) +
geom_point() +
geom_smooth(se = FALSE)
## ------------------------------------------------------------------------
batters %>%
arrange(desc(ba))
## ------------------------------------------------------------------------
not_cancelled %>%
group_by(year, month, day) %>%
summarise(
avg_delay1 = mean(arr_delay),
avg_delay2 = mean(arr_delay[arr_delay > 0]) # the average positive delay
)
## ------------------------------------------------------------------------
# Why is distance to some destinations more variable than to others?
not_cancelled %>%
group_by(dest) %>%
summarise(distance_sd = sd(distance)) %>%
arrange(desc(distance_sd))
## ------------------------------------------------------------------------
# When do the first and last flights leave each day?
not_cancelled %>%
group_by(year, month, day) %>%
summarise(
first = min(dep_time),
last = max(dep_time)
)
## ------------------------------------------------------------------------
not_cancelled %>%
group_by(year, month, day) %>%
summarise(
first_dep = first(dep_time),
last_dep = last(dep_time)
)
## ------------------------------------------------------------------------
not_cancelled %>%
group_by(year, month, day) %>%
mutate(rmin = min_rank(desc(dep_time))) %>%
select(dep_time, rmin, everything()) %>%
filter(rmin == 1)
## ------------------------------------------------------------------------
# Which destinations have the most carriers?
not_cancelled %>%
group_by(dest) %>%
summarise(carriers = n_distinct(carrier)) %>%
arrange(desc(carriers))
## ------------------------------------------------------------------------
not_cancelled %>%
count(dest)
## ------------------------------------------------------------------------
not_cancelled %>%
count(tailnum, wt = distance)
## ------------------------------------------------------------------------
# How many flights left before 5am? (these usually indicate delayed
# flights from the previous day)
not_cancelled %>%
group_by(year, month, day) %>%
summarise(n_early = sum(dep_time < 500))
# What proportion of flights are delayed by more than an hour?
not_cancelled %>%
group_by(year, month, day) %>%
summarise(hour_perc = mean(arr_delay > 60))
## ------------------------------------------------------------------------
daily <- group_by(flights, year, month, day)
(per_day <- summarise(daily, flights = n()))
(per_month <- summarise(per_day, flights = sum(flights)))
(per_year <- summarise(per_month, flights = sum(flights)))
## ------------------------------------------------------------------------
daily %>%
ungroup() %>% # no longer grouped by date
summarise(flights = n()) # all flights
## ------------------------------------------------------------------------
flights_sml %>%
group_by(year, month, day) %>%
filter(rank(desc(arr_delay)) < 10)
## ------------------------------------------------------------------------
popular_dests <- flights %>%
group_by(dest) %>%
filter(n() > 365)
popular_dests
## ------------------------------------------------------------------------
popular_dests %>%
filter(arr_delay > 0) %>%
mutate(prop_delay = arr_delay / sum(arr_delay)) %>%
select(year:day, dest, arr_delay, prop_delay)
| /Session3_r4ds_transform/Session3_transform_report.R | no_license | karaesmen/WorkshopMaterials_May2019 | R | false | false | 11,803 | r | ## ----setup, message = FALSE----------------------------------------------
library(nycflights13)
library(tidyverse)
options(tibble.width = Inf)
## ---- eval=FALSE---------------------------------------------------------
## install.packages("tidyverse")
## ------------------------------------------------------------------------
flights
## ------------------------------------------------------------------------
filter(flights, month == 1, day == 1)
## ------------------------------------------------------------------------
jan1 <- filter(flights, month == 1, day == 1)
## ------------------------------------------------------------------------
(dec25 <- filter(flights, month == 12, day == 25))
## ---- error = TRUE-------------------------------------------------------
filter(flights, month = 1)
## ------------------------------------------------------------------------
sqrt(2) ^ 2 == 2
1/49 * 49 == 1
## ------------------------------------------------------------------------
near(sqrt(2) ^ 2, 2)
near(1 / 49 * 49, 1)
## ----bool-ops, echo = FALSE, fig.cap = "Complete set of boolean operations. `x` is the left-hand circle, `y` is the right-hand circle, and the shaded region show which parts each operator selects."----
knitr::include_graphics("diagrams/transform-logical.png")
## ---- eval = FALSE-------------------------------------------------------
## filter(flights, month == 11 | month == 12)
## ---- eval = FALSE-------------------------------------------------------
## nov_dec <- filter(flights, month %in% c(11, 12))
## ---- eval = FALSE-------------------------------------------------------
## filter(flights, !(arr_delay > 120 | dep_delay > 120))
## filter(flights, arr_delay <= 120, dep_delay <= 120)
## ------------------------------------------------------------------------
NA > 5
10 == NA
NA + 10
NA / 2
## ------------------------------------------------------------------------
NA == NA
## ------------------------------------------------------------------------
# Let x be Mary's age. We don't know how old she is.
x <- NA
# Let y be John's age. We don't know how old he is.
y <- NA
# Are John and Mary the same age?
x == y
# We don't know!
## ------------------------------------------------------------------------
is.na(x)
## ------------------------------------------------------------------------
df <- tibble(x = c(1, NA, 3))
filter(df, x > 1)
filter(df, is.na(x) | x > 1)
## ------------------------------------------------------------------------
arrange(flights, year, month, day)
## ------------------------------------------------------------------------
arrange(flights, desc(arr_delay))
## ------------------------------------------------------------------------
df <- tibble(x = c(5, 2, NA))
arrange(df, x)
arrange(df, desc(x))
## ------------------------------------------------------------------------
# Select columns by name
select(flights, year, month, day)
# Select all columns between year and day (inclusive)
select(flights, year:day)
# Select all columns except those from year to day (inclusive)
select(flights, -(year:day))
## ------------------------------------------------------------------------
rename(flights, tail_num = tailnum)
## ------------------------------------------------------------------------
select(flights, time_hour, air_time, everything())
## ------------------------------------------------------------------------
vars <- c("year", "month", "day", "dep_delay", "arr_delay")
## ---- eval = FALSE-------------------------------------------------------
## select(flights, contains("TIME"))
## ------------------------------------------------------------------------
flights_sml <- select(flights,
year:day,
ends_with("delay"),
distance,
air_time
)
mutate(flights_sml,
gain = arr_delay - dep_delay,
speed = distance / air_time * 60
)
## ------------------------------------------------------------------------
mutate(flights_sml,
gain = arr_delay - dep_delay,
hours = air_time / 60,
gain_per_hour = gain / hours
)
## ------------------------------------------------------------------------
transmute(flights,
gain = arr_delay - dep_delay,
hours = air_time / 60,
gain_per_hour = gain / hours
)
## ------------------------------------------------------------------------
transmute(flights,
dep_time,
hour = dep_time %/% 100,
minute = dep_time %% 100
)
## ------------------------------------------------------------------------
(x <- 1:10)
lag(x)
lead(x)
## ------------------------------------------------------------------------
x
cumsum(x)
cummean(x)
## ------------------------------------------------------------------------
y <- c(1, 2, 2, NA, 3, 4)
min_rank(y)
min_rank(desc(y))
## ------------------------------------------------------------------------
row_number(y)
dense_rank(y)
percent_rank(y)
cume_dist(y)
## ---- eval = FALSE, echo = FALSE-----------------------------------------
## flights <- flights %>% mutate(
## dep_time = hour * 60 + minute,
## arr_time = (arr_time %/% 100) * 60 + (arr_time %% 100),
## airtime2 = arr_time - dep_time,
## dep_sched = dep_time + dep_delay
## )
##
## ggplot(flights, aes(dep_sched)) + geom_histogram(binwidth = 60)
## ggplot(flights, aes(dep_sched %% 60)) + geom_histogram(binwidth = 1)
## ggplot(flights, aes(air_time - airtime2)) + geom_histogram()
## ------------------------------------------------------------------------
summarise(flights, delay = mean(dep_delay, na.rm = TRUE))
## ------------------------------------------------------------------------
by_day <- group_by(flights, year, month, day)
summarise(by_day, delay = mean(dep_delay, na.rm = TRUE))
## ---- fig.width = 6------------------------------------------------------
by_dest <- group_by(flights, dest)
delay <- summarise(by_dest,
count = n(),
dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE)
)
delay <- filter(delay, count > 20, dest != "HNL")
# It looks like delays increase with distance up to ~750 miles
# and then decrease. Maybe as flights get longer there's more
# ability to make up delays in the air?
ggplot(data = delay, mapping = aes(x = dist, y = delay)) +
geom_point(aes(size = count), alpha = 1/3) +
geom_smooth(se = FALSE)
## ------------------------------------------------------------------------
delays <- flights %>%
group_by(dest) %>%
summarise(
count = n(),
dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE)
) %>%
filter(count > 20, dest != "HNL")
## ------------------------------------------------------------------------
flights %>%
group_by(year, month, day) %>%
summarise(mean = mean(dep_delay))
## ------------------------------------------------------------------------
flights %>%
group_by(year, month, day) %>%
summarise(mean = mean(dep_delay, na.rm = TRUE))
## ------------------------------------------------------------------------
not_cancelled <- flights %>%
filter(!is.na(dep_delay), !is.na(arr_delay))
flights %>%
drop_na(dep_delay, arr_delay) %>% nrow()
flights %>%
drop_na() %>% nrow()
head(flights)
flights %>%
group_by(year, month, day) %>%
summarise(n= n())
not_cancelled %>%
group_by(year, month, day) %>%
summarise(mean = mean(dep_delay))
## ------------------------------------------------------------------------
delays <- not_cancelled %>%
group_by(tailnum) %>%
summarise(
delay = mean(arr_delay)
)
ggplot(data = delays, mapping = aes(x = delay)) +
geom_freqpoly(binwidth = 10)
## ------------------------------------------------------------------------
delays <- not_cancelled %>%
group_by(tailnum) %>%
summarise(
delay = mean(arr_delay, na.rm = TRUE),
n = n()
)
ggplot(data = delays, mapping = aes(x = n, y = delay)) +
geom_point(alpha = 1/10)
## ------------------------------------------------------------------------
delays %>%
filter(n > 25) %>%
ggplot(mapping = aes(x = n, y = delay)) +
geom_point(alpha = 1/10)
## ------------------------------------------------------------------------
# Convert to a tibble so it prints nicely
batting <- as_tibble(Lahman::Batting)
batters <- batting %>%
group_by(playerID) %>%
summarise(
ba = sum(H, na.rm = TRUE) / sum(AB, na.rm = TRUE),
ab = sum(AB, na.rm = TRUE)
)
batters %>%
filter(ab > 100) %>%
ggplot(mapping = aes(x = ab, y = ba)) +
geom_point() +
geom_smooth(se = FALSE)
## ------------------------------------------------------------------------
batters %>%
arrange(desc(ba))
## ------------------------------------------------------------------------
not_cancelled %>%
group_by(year, month, day) %>%
summarise(
avg_delay1 = mean(arr_delay),
avg_delay2 = mean(arr_delay[arr_delay > 0]) # the average positive delay
)
## ------------------------------------------------------------------------
# Why is distance to some destinations more variable than to others?
not_cancelled %>%
group_by(dest) %>%
summarise(distance_sd = sd(distance)) %>%
arrange(desc(distance_sd))
## ------------------------------------------------------------------------
# When do the first and last flights leave each day?
not_cancelled %>%
group_by(year, month, day) %>%
summarise(
first = min(dep_time),
last = max(dep_time)
)
## ------------------------------------------------------------------------
not_cancelled %>%
group_by(year, month, day) %>%
summarise(
first_dep = first(dep_time),
last_dep = last(dep_time)
)
## ------------------------------------------------------------------------
not_cancelled %>%
group_by(year, month, day) %>%
mutate(rmin = min_rank(desc(dep_time))) %>%
select(dep_time, rmin, everything()) %>%
filter(rmin == 1)
## ------------------------------------------------------------------------
# Which destinations have the most carriers?
not_cancelled %>%
group_by(dest) %>%
summarise(carriers = n_distinct(carrier)) %>%
arrange(desc(carriers))
## ------------------------------------------------------------------------
not_cancelled %>%
count(dest)
## ------------------------------------------------------------------------
not_cancelled %>%
count(tailnum, wt = distance)
## ------------------------------------------------------------------------
# How many flights left before 5am? (these usually indicate delayed
# flights from the previous day)
not_cancelled %>%
group_by(year, month, day) %>%
summarise(n_early = sum(dep_time < 500))
# What proportion of flights are delayed by more than an hour?
not_cancelled %>%
group_by(year, month, day) %>%
summarise(hour_perc = mean(arr_delay > 60))
## ------------------------------------------------------------------------
daily <- group_by(flights, year, month, day)
(per_day <- summarise(daily, flights = n()))
(per_month <- summarise(per_day, flights = sum(flights)))
(per_year <- summarise(per_month, flights = sum(flights)))
## ------------------------------------------------------------------------
daily %>%
ungroup() %>% # no longer grouped by date
summarise(flights = n()) # all flights
## ------------------------------------------------------------------------
flights_sml %>%
group_by(year, month, day) %>%
filter(rank(desc(arr_delay)) < 10)
## ------------------------------------------------------------------------
popular_dests <- flights %>%
group_by(dest) %>%
filter(n() > 365)
popular_dests
## ------------------------------------------------------------------------
popular_dests %>%
filter(arr_delay > 0) %>%
mutate(prop_delay = arr_delay / sum(arr_delay)) %>%
select(year:day, dest, arr_delay, prop_delay)
|
library(tidyverse)
library(likert)
library(readxl)
setwd("F://Metodos 2//Trabalho")
BD = read_excel("F:/Metodos 2/Trabalho/BD.xlsx")
BD2= BD %>%
select(6:15)
BD2 = mutate_at(BD2,vars(1:3),function(x)factor(x,levels =1:5,labels=c("Muito Ruim","Ruim","Regular","Bom","Ótimo")))
BD2 = mutate_at(BD2,vars(4:10),function(x)ordered(x,levels=c("Muito Ruim","Ruim","Regular","Bom","Ótimo"),labels=c("Muito Ruim","Ruim","Regular","Bom","Ótimo")))
vet = NULL
for( i in 1:10 ){
vet[i] = str_sub(names(BD2[,i]),start = 4)
}
names(BD2) = vet
BD2 = as.data.frame(BD2)
plot(likert(BD2)) + ggtitle("Nota Geral do Evento")
x= c("12345")
substr(x,2,199)
| /Script e BD/Script.R | no_license | Lyncoln/slides-para-a-semext | R | false | false | 649 | r | library(tidyverse)
library(likert)
library(readxl)
setwd("F://Metodos 2//Trabalho")
BD = read_excel("F:/Metodos 2/Trabalho/BD.xlsx")
BD2= BD %>%
select(6:15)
BD2 = mutate_at(BD2,vars(1:3),function(x)factor(x,levels =1:5,labels=c("Muito Ruim","Ruim","Regular","Bom","Ótimo")))
BD2 = mutate_at(BD2,vars(4:10),function(x)ordered(x,levels=c("Muito Ruim","Ruim","Regular","Bom","Ótimo"),labels=c("Muito Ruim","Ruim","Regular","Bom","Ótimo")))
vet = NULL
for( i in 1:10 ){
vet[i] = str_sub(names(BD2[,i]),start = 4)
}
names(BD2) = vet
BD2 = as.data.frame(BD2)
plot(likert(BD2)) + ggtitle("Nota Geral do Evento")
x= c("12345")
substr(x,2,199)
|
`dLRs` <-
function(x) {
return(IQR(diff(na.omit(x))) / (4 * qnorm((1 + 0.5) / 2) / sqrt(2)))
}
| /R/dLRs.R | no_license | tf2/CNsolidate | R | false | false | 96 | r | `dLRs` <-
function(x) {
return(IQR(diff(na.omit(x))) / (4 * qnorm((1 + 0.5) / 2) / sqrt(2)))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/myf2.R
\name{myf2}
\alias{myf2}
\title{myf2}
\usage{
myf2(x, xk, xk2, coef)
}
\arguments{
\item{x}{points on the curve}
\item{xk}{the first x knot}
\item{xk2}{the second k knot}
\item{coef}{linear model coefficients}
}
\value{
coefficients of the linear model used for plotting linear equations
}
\description{
Takes in data and x knot values to create a linear equation.
}
\examples{
myf2(x,xk=input$xk1,xk2 = input$xk2, coef=coef(lmp)
}
| /man/myf2.Rd | permissive | cil0834/MATH4773CLAG | R | false | true | 520 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/myf2.R
\name{myf2}
\alias{myf2}
\title{myf2}
\usage{
myf2(x, xk, xk2, coef)
}
\arguments{
\item{x}{points on the curve}
\item{xk}{the first x knot}
\item{xk2}{the second k knot}
\item{coef}{linear model coefficients}
}
\value{
coefficients of the linear model used for plotting linear equations
}
\description{
Takes in data and x knot values to create a linear equation.
}
\examples{
myf2(x,xk=input$xk1,xk2 = input$xk2, coef=coef(lmp)
}
|
#' @title Update user details
#' @description Function to Update user details on pipedrive.
#' @param id ID of the user
#' @param active_flag Whether the user is active or not. 0 = Not activated, 1 = Activated This field has the following domains: (0; 1)
#' @param api_token To validate your requests, you'll need your api_token - this means that our system will need to know who you are and be able to connect all actions you do with your chosen Pipedrive account. Have in mind that a user has a different api_token for each company. Please access the following link for more information: <https://pipedrive.readme.io/docs/how-to-find-the-api-token?utm_source=api_reference>
#' @param company_domain How to get the company domain: <https://pipedrive.readme.io/docs/how-to-get-the-company-domain>
#' @param return_type the default return is an object List with all informations of process, or you can set boolean (TRUE = success, FALSE = error)
#' @return customizable return, the default is an object List
#' @export
#' @examples \donttest{
#' users.update(id='e.g.',active_flag='e.g.',api_token='token',company_domain='exp')
#' }
users.update <- function(id, active_flag, api_token=NULL, company_domain='api', return_type = c('complete','boolean')){
api_token <- check_api_token_(api_token)
url <- 'https://{company_domain}.pipedrive.com/v1/users/{id}?'
bodyList <- list(id=id,active_flag=active_flag)
bodyList <- clear_list_(bodyList)
url <- sub('{company_domain}',company_domain, url, fixed = TRUE)
url <- paste0(url, 'api_token={api_token}')
url <- sub('{api_token}',api_token, url, fixed = TRUE)
url <- sub('{id}',id, url, fixed = TRUE)
bodyList$id <- NULL
r <- httr::PUT(url, body = bodyList, encode = 'json')
if(return_type[1] == 'boolean'){
if(r$status_code %in% c(200,201)){return(TRUE)}else{return(FALSE)}
}else{return(r)}
}
| /R/users.update.R | no_license | cran/Rpipedrive | R | false | false | 1,864 | r | #' @title Update user details
#' @description Function to Update user details on pipedrive.
#' @param id ID of the user
#' @param active_flag Whether the user is active or not. 0 = Not activated, 1 = Activated This field has the following domains: (0; 1)
#' @param api_token To validate your requests, you'll need your api_token - this means that our system will need to know who you are and be able to connect all actions you do with your chosen Pipedrive account. Have in mind that a user has a different api_token for each company. Please access the following link for more information: <https://pipedrive.readme.io/docs/how-to-find-the-api-token?utm_source=api_reference>
#' @param company_domain How to get the company domain: <https://pipedrive.readme.io/docs/how-to-get-the-company-domain>
#' @param return_type the default return is an object List with all informations of process, or you can set boolean (TRUE = success, FALSE = error)
#' @return customizable return, the default is an object List
#' @export
#' @examples \donttest{
#' users.update(id='e.g.',active_flag='e.g.',api_token='token',company_domain='exp')
#' }
users.update <- function(id, active_flag, api_token=NULL, company_domain='api', return_type = c('complete','boolean')){
api_token <- check_api_token_(api_token)
url <- 'https://{company_domain}.pipedrive.com/v1/users/{id}?'
bodyList <- list(id=id,active_flag=active_flag)
bodyList <- clear_list_(bodyList)
url <- sub('{company_domain}',company_domain, url, fixed = TRUE)
url <- paste0(url, 'api_token={api_token}')
url <- sub('{api_token}',api_token, url, fixed = TRUE)
url <- sub('{id}',id, url, fixed = TRUE)
bodyList$id <- NULL
r <- httr::PUT(url, body = bodyList, encode = 'json')
if(return_type[1] == 'boolean'){
if(r$status_code %in% c(200,201)){return(TRUE)}else{return(FALSE)}
}else{return(r)}
}
|
\name{plotbasemap}
\alias{plotbasemap}
\title{ Plot land area on a map with colored polygons}
\description{
Plots a map within given rectangular region showing land areas as
colored polygons. Requires the mapping utility GMT.
}
\usage{
plotbasemap(lon1, lon2, lat1, lat2, grid=FALSE, zoom=FALSE,
landcolor="darkgreen", seacolor="lightblue", data=gmt3)
}
\arguments{
\item{lon1}{Longitude of lower left corner of rectangle }
\item{lon2}{Longitude of upper right corner of rectangle }
\item{lat1}{Latitude of lower left corner of rectangle }
\item{lat2}{Latitude of upper right corner of rectangle }
\item{grid}{Whether to plot grid lines on map }
\item{zoom}{Whether to start in interactive zoom mode }
\item{landcolor}{Color of polygons }
\item{seacolor}{Color of ocean }
\item{data}{dataset to use}
}
\details{
A map is plotted with polygons clipped at borders of map region.
If the function is started in zoom mode two left-clicks on the
map will zoom it to the rectangle spanned by the two points. This
zooming is repeated until a right-click on the map is done.
}
\value{
Value is \code{NULL}
}
\author{Anders Nielsen \email{anders.nielsen@hawaii.edu}, and Pierre Kleiber.}
\examples{
plotbasemap(8,13,53,58)
}
\keyword{models}
| /deprecated/trackit/trackit/man/plotbasemap.Rd | no_license | positioning/kalmanfilter | R | false | false | 1,286 | rd | \name{plotbasemap}
\alias{plotbasemap}
\title{ Plot land area on a map with colored polygons}
\description{
Plots a map within given rectangular region showing land areas as
colored polygons. Requires the mapping utility GMT.
}
\usage{
plotbasemap(lon1, lon2, lat1, lat2, grid=FALSE, zoom=FALSE,
landcolor="darkgreen", seacolor="lightblue", data=gmt3)
}
\arguments{
\item{lon1}{Longitude of lower left corner of rectangle }
\item{lon2}{Longitude of upper right corner of rectangle }
\item{lat1}{Latitude of lower left corner of rectangle }
\item{lat2}{Latitude of upper right corner of rectangle }
\item{grid}{Whether to plot grid lines on map }
\item{zoom}{Whether to start in interactive zoom mode }
\item{landcolor}{Color of polygons }
\item{seacolor}{Color of ocean }
\item{data}{dataset to use}
}
\details{
A map is plotted with polygons clipped at borders of map region.
If the function is started in zoom mode two left-clicks on the
map will zoom it to the rectangle spanned by the two points. This
zooming is repeated until a right-click on the map is done.
}
\value{
Value is \code{NULL}
}
\author{Anders Nielsen \email{anders.nielsen@hawaii.edu}, and Pierre Kleiber.}
\examples{
plotbasemap(8,13,53,58)
}
\keyword{models}
|
## ---------------------- Setup and Configuration
nodename = Sys.info()['nodename'] #Get OS name for dynamic working directory setting
if (grepl('SKYLLA', nodename)){
Sys.setlocale("LC_TIME", "C") #LOCALE ISSUES WITH DATETIME ON WINDOWS
setwd("G:/Dev/DataScience/TSA-Finance/data") #Pascal Desktop
} else if (grepl('ARES', nodename)) {
Sys.setlocale("LC_TIME", "C") #LOCALE ISSUES WITH DATETIME ON WINDOWS
setwd("C:/Users/Pascal/Documents/Repository/DataScience/TSA-Finance/data") #Pascal Laptop
} else {
setwd("~/Code/TSA-Finance/data") #Nic
}
library(fBasics)
library(collections) # install.packages("collections")
library(ggfortify)
library(TTR)
library(dplyr)
library(matrixStats)
## ---------------------- Read-In of serialized objects
top50 <- readRDS(file = "tso.decomposed.top50.rds") # decomposed time-series of top 50 currencies
tso.top50 <- readRDS(file = "tso.top50.rds") # original time-series of top 50 currencies
rows = function(tab) lapply(
seq_len(nrow(tab)),
function(i) unclass(tab[i,,drop=F])
)
## - - - - - - - - - - - - - - - - - - - FIND CORRELATING CURRENCIY TRENDS
# cartesian product
cart.prod <- expand.grid(top50$keys(),top50$keys())
cor_limit <- 0.7 # Faustregel for statistically significant correlation
for (row in rows(cart.prod)){
print('')
Var1 <- paste(row$Var1)
Var2 <- paste(row$Var2)
if (Var1 == Var2){
next()
}else{
s1 <- as.numeric((top50$get(Var1))$seasonal)
s2 <- as.numeric((top50$get(Var2))$seasonal)
dim_min_len <- min(length(s1), length(s2))
correlation_coefficient <- cor(s1[0:dim_min_len],s2[0:dim_min_len], method = c("pearson", "kendall", "spearman"))
if (abs(correlation_coefficient) >= cor_limit){
cat('Looking at combination of ',Var1, Var2)
df <- data.frame(s1[0:dim_min_len],s2[0:dim_min_len])
plot(df$s1, type='l', main=paste(Var1, Var2, correlation_coefficient, sep=' - '), xlab='', ylab='Value', col='blue')
lines(df$s2, col='red')
}else{
cat('Skipped combination of ',Var1, Var2)
}
}
}
## - - - - - - - - - - - - - - - - - - - FIND SIMILAR BEHAVIOR ACCROSS CURRENCIES
get_bigger_date_vector <- function(date1, date2) {
if(date1[1] > date2[1]){
return(date1)
}else if(date1[1] < date2[1]){
return(date2)
}else{
if(date1[2] > date2[2]){
return(date1)
}else if(date1[2] < date2[2]){
return(date2)
}else{
return(date1)
}
}
}
get_smaller_date_vector <- function(date1, date2){
result <- get_bigger_date_vector(date1, date2)
if (result == date1){
return(date2)
}else{
return(date1)
}
}
max_start_date <- c(1970, 1)
min_end_date <- c(2999, 300)
# finding latest start date and earliest finish date
for (currency in tso.top50$keys()) {
currency.ts <- tso.top50$get(currency)
max_start_date <- get_bigger_date_vector(start(currency.ts), max_start_date)
min_end_date <- get_smaller_date_vector(end(currency.ts), min_end_date)
}
currency.tsos <- Dict()
length.tso <- 0
# windowing the timeseries -> all should have the same dimensions
for (currency in tso.top50$keys()) {
currency.ts <- tso.top50$get(currency)
sub.ts <- window(currency.ts, start=max_start_date, end=min_end_date)
length.tso <- length(sub.ts)
currency.tsos$set(currency, sub.ts)
}
currency.df <- data.frame(matrix(ncol = length(currency.tsos$keys()), nrow = length.tso))
colnames(currency.df) <- currency.tsos$keys()
# Build seasonality DF
for (currency in currency.tsos$keys()){
decomposed <- stl(currency.tsos$get(currency), s.window='periodic', na.action = na.omit)
seasonal.part <- as.numeric(decomposed$time.series[,'seasonal'])
seasonal.part.max <- max(seasonal.part)
seasonal.part.min <- min(seasonal.part)
# normalize data to -1 and 1
#currency.df[currency] <- 2 * ((seasonal.part - seasonal.part.min)/(seasonal.part.max - seasonal.part.min)) - 1
# normalize data to 0 and 1
currency.df[currency] <- (((seasonal.part - seasonal.part.min)/(seasonal.part.max - seasonal.part.min)))
}
row.names(currency.df) <- seq(from = as.Date(toString(max_start_date), '%Y, %j'), by = "day", length.out = length.tso)
# calculate row stats
currency.df <- transform(currency.df, row.sd=apply(currency.df, 1, sd, na.rm=TRUE))
currency.df <- transform(currency.df, row.mean=apply(currency.df, 1, mean, na.rm=TRUE))
# generate window
window.size <- .23
min.sd <- 0 #min(currency.df$row.sd)
currency.df <- mutate(currency.df, in.window = seq(from=FALSE, by=FALSE,length.out = length.tso))
currency.df$in.window <- apply(currency.df,1, function(row) { row[['row.sd']] <= min.sd + window.size } )
# plot
ccy <- 'BTC'
rn <- row.names(currency.df)
plot.data.out.window <- data.frame(matrix(ncol = 1, nrow = length.tso))
plot.data.in.window <- data.frame(matrix(ncol = 1, nrow = length.tso))
row.names(plot.data.out.window) <- rn
row.names(plot.data.in.window) <- rn
colnames(plot.data.out.window) <- c(ccy)
colnames(plot.data.in.window) <- c(ccy)
currency.df.bkp <- currency.df
currency.df[currency.df$in.window == TRUE,][[ccy]] <- NA
plot.data.out.window[[ccy]] <- currency.df[[ccy]]
currency.df <- currency.df.bkp
currency.df[currency.df$in.window == FALSE,][[ccy]] <- NA
plot.data.in.window[[ccy]] <- currency.df[[ccy]]
currency.df <- currency.df.bkp
par(mar=c(7,4,4,2))
day.interval <- 90
plot(plot.data.out.window[[ccy]],
type='l',
main=ccy,
col='orange',
ylab = 'Seasonality',
xaxt = 'n', xlab=''
)
axis(1, at=seq(from = 0, to = length.tso, by = day.interval), labels=rn[seq(1,length(rn), day.interval)], las=2)
abline(v=seq(from = 0, to = length.tso, by = day.interval))
lines(plot.data.in.window[[ccy]], col='purple')
legend('bottomright', legend=c('Common', 'Unique'), col=c('purple', 'orange'), lty=1)
# get common dates
row.names(currency.df) <- seq(from = as.Date(toString(max_start_date), '%Y, %j'), by = "day", length.out = length.tso)
common.dates <- row.names(currency.df[currency.df$in.window == TRUE,])
common.dates <- as.Date(common.dates, format="%Y-%m-%d")
#
# hist(as.integer(format(common.dates, format = '%j')),
# breaks = 365,
# main='Frequency of common days in a year',
# xlim=c(1,365),
# xlab='Day of the Year', xaxt='n')
# axis(side=1, at=seq(0,365, 5), labels=seq(0,365,5), las=2)
hist(as.integer(format(common.dates, format = '%j')),
breaks = 73,
main='Frequency of common days in a year',
xlim=c(1,365),
xlab='Day of the Year', xaxt='n',
col='gray')
axis(side=1, at=seq(0,365, 5), labels=seq(0,365,5), las=2)
# EXPERIMENTS
library(TSA)
p <- periodogram(as.numeric(tso.top50$get('BTC')))
dd <- data.frame(freq=p$freq, spec=p$spec)
order <- dd[order(-dd$spec),]
top2 <- head(order, 2)
top2
time = 1/top2$f
time
#[1] 937.5 625.0
s <- stl(tso.top50$get('BTC'), s.window='periodic', na.action = na.omit)
m <- mstl(tso.top50$get('BTC'))
d <- decompose(tso.top50$get('BTC'))
plot(m[,'Seasonal365'], type='l', ylab='Seasonality', col='darkgreen')
lines(d$seasonal, col='lightblue')
lines(s$time.series[,'seasonal'], col='darkblue')
legend('bottomright', legend=c('stl', 'mstl', 'decompose'), col=c('darkblue', 'darkgreen', 'lightblue'), lty=1)
# SEASONPLOT
library(forecast)
ggseasonplot(tso.top50$get('BTC') ,year.labels=TRUE, continuous=TRUE, main='BTC Seasonplot')
| /analyze.R | no_license | highproformas/TSA-Finance | R | false | false | 7,304 | r | ## ---------------------- Setup and Configuration
nodename = Sys.info()['nodename'] #Get OS name for dynamic working directory setting
if (grepl('SKYLLA', nodename)){
Sys.setlocale("LC_TIME", "C") #LOCALE ISSUES WITH DATETIME ON WINDOWS
setwd("G:/Dev/DataScience/TSA-Finance/data") #Pascal Desktop
} else if (grepl('ARES', nodename)) {
Sys.setlocale("LC_TIME", "C") #LOCALE ISSUES WITH DATETIME ON WINDOWS
setwd("C:/Users/Pascal/Documents/Repository/DataScience/TSA-Finance/data") #Pascal Laptop
} else {
setwd("~/Code/TSA-Finance/data") #Nic
}
library(fBasics)
library(collections) # install.packages("collections")
library(ggfortify)
library(TTR)
library(dplyr)
library(matrixStats)
## ---------------------- Read-In of serialized objects
top50 <- readRDS(file = "tso.decomposed.top50.rds") # decomposed time-series of top 50 currencies
tso.top50 <- readRDS(file = "tso.top50.rds") # original time-series of top 50 currencies
rows = function(tab) lapply(
seq_len(nrow(tab)),
function(i) unclass(tab[i,,drop=F])
)
## - - - - - - - - - - - - - - - - - - - FIND CORRELATING CURRENCIY TRENDS
# cartesian product
cart.prod <- expand.grid(top50$keys(),top50$keys())
cor_limit <- 0.7 # Faustregel for statistically significant correlation
for (row in rows(cart.prod)){
print('')
Var1 <- paste(row$Var1)
Var2 <- paste(row$Var2)
if (Var1 == Var2){
next()
}else{
s1 <- as.numeric((top50$get(Var1))$seasonal)
s2 <- as.numeric((top50$get(Var2))$seasonal)
dim_min_len <- min(length(s1), length(s2))
correlation_coefficient <- cor(s1[0:dim_min_len],s2[0:dim_min_len], method = c("pearson", "kendall", "spearman"))
if (abs(correlation_coefficient) >= cor_limit){
cat('Looking at combination of ',Var1, Var2)
df <- data.frame(s1[0:dim_min_len],s2[0:dim_min_len])
plot(df$s1, type='l', main=paste(Var1, Var2, correlation_coefficient, sep=' - '), xlab='', ylab='Value', col='blue')
lines(df$s2, col='red')
}else{
cat('Skipped combination of ',Var1, Var2)
}
}
}
## - - - - - - - - - - - - - - - - - - - FIND SIMILAR BEHAVIOR ACCROSS CURRENCIES
get_bigger_date_vector <- function(date1, date2) {
if(date1[1] > date2[1]){
return(date1)
}else if(date1[1] < date2[1]){
return(date2)
}else{
if(date1[2] > date2[2]){
return(date1)
}else if(date1[2] < date2[2]){
return(date2)
}else{
return(date1)
}
}
}
get_smaller_date_vector <- function(date1, date2){
result <- get_bigger_date_vector(date1, date2)
if (result == date1){
return(date2)
}else{
return(date1)
}
}
max_start_date <- c(1970, 1)
min_end_date <- c(2999, 300)
# finding latest start date and earliest finish date
for (currency in tso.top50$keys()) {
currency.ts <- tso.top50$get(currency)
max_start_date <- get_bigger_date_vector(start(currency.ts), max_start_date)
min_end_date <- get_smaller_date_vector(end(currency.ts), min_end_date)
}
currency.tsos <- Dict()
length.tso <- 0
# windowing the timeseries -> all should have the same dimensions
for (currency in tso.top50$keys()) {
currency.ts <- tso.top50$get(currency)
sub.ts <- window(currency.ts, start=max_start_date, end=min_end_date)
length.tso <- length(sub.ts)
currency.tsos$set(currency, sub.ts)
}
currency.df <- data.frame(matrix(ncol = length(currency.tsos$keys()), nrow = length.tso))
colnames(currency.df) <- currency.tsos$keys()
# Build seasonality DF
for (currency in currency.tsos$keys()){
decomposed <- stl(currency.tsos$get(currency), s.window='periodic', na.action = na.omit)
seasonal.part <- as.numeric(decomposed$time.series[,'seasonal'])
seasonal.part.max <- max(seasonal.part)
seasonal.part.min <- min(seasonal.part)
# normalize data to -1 and 1
#currency.df[currency] <- 2 * ((seasonal.part - seasonal.part.min)/(seasonal.part.max - seasonal.part.min)) - 1
# normalize data to 0 and 1
currency.df[currency] <- (((seasonal.part - seasonal.part.min)/(seasonal.part.max - seasonal.part.min)))
}
row.names(currency.df) <- seq(from = as.Date(toString(max_start_date), '%Y, %j'), by = "day", length.out = length.tso)
# calculate row stats
currency.df <- transform(currency.df, row.sd=apply(currency.df, 1, sd, na.rm=TRUE))
currency.df <- transform(currency.df, row.mean=apply(currency.df, 1, mean, na.rm=TRUE))
# generate window
window.size <- .23
min.sd <- 0 #min(currency.df$row.sd)
currency.df <- mutate(currency.df, in.window = seq(from=FALSE, by=FALSE,length.out = length.tso))
currency.df$in.window <- apply(currency.df,1, function(row) { row[['row.sd']] <= min.sd + window.size } )
# plot
ccy <- 'BTC'
rn <- row.names(currency.df)
plot.data.out.window <- data.frame(matrix(ncol = 1, nrow = length.tso))
plot.data.in.window <- data.frame(matrix(ncol = 1, nrow = length.tso))
row.names(plot.data.out.window) <- rn
row.names(plot.data.in.window) <- rn
colnames(plot.data.out.window) <- c(ccy)
colnames(plot.data.in.window) <- c(ccy)
currency.df.bkp <- currency.df
currency.df[currency.df$in.window == TRUE,][[ccy]] <- NA
plot.data.out.window[[ccy]] <- currency.df[[ccy]]
currency.df <- currency.df.bkp
currency.df[currency.df$in.window == FALSE,][[ccy]] <- NA
plot.data.in.window[[ccy]] <- currency.df[[ccy]]
currency.df <- currency.df.bkp
par(mar=c(7,4,4,2))
day.interval <- 90
plot(plot.data.out.window[[ccy]],
type='l',
main=ccy,
col='orange',
ylab = 'Seasonality',
xaxt = 'n', xlab=''
)
axis(1, at=seq(from = 0, to = length.tso, by = day.interval), labels=rn[seq(1,length(rn), day.interval)], las=2)
abline(v=seq(from = 0, to = length.tso, by = day.interval))
lines(plot.data.in.window[[ccy]], col='purple')
legend('bottomright', legend=c('Common', 'Unique'), col=c('purple', 'orange'), lty=1)
# get common dates
row.names(currency.df) <- seq(from = as.Date(toString(max_start_date), '%Y, %j'), by = "day", length.out = length.tso)
common.dates <- row.names(currency.df[currency.df$in.window == TRUE,])
common.dates <- as.Date(common.dates, format="%Y-%m-%d")
#
# hist(as.integer(format(common.dates, format = '%j')),
# breaks = 365,
# main='Frequency of common days in a year',
# xlim=c(1,365),
# xlab='Day of the Year', xaxt='n')
# axis(side=1, at=seq(0,365, 5), labels=seq(0,365,5), las=2)
hist(as.integer(format(common.dates, format = '%j')),
breaks = 73,
main='Frequency of common days in a year',
xlim=c(1,365),
xlab='Day of the Year', xaxt='n',
col='gray')
axis(side=1, at=seq(0,365, 5), labels=seq(0,365,5), las=2)
# EXPERIMENTS
library(TSA)
p <- periodogram(as.numeric(tso.top50$get('BTC')))
dd <- data.frame(freq=p$freq, spec=p$spec)
order <- dd[order(-dd$spec),]
top2 <- head(order, 2)
top2
time = 1/top2$f
time
#[1] 937.5 625.0
s <- stl(tso.top50$get('BTC'), s.window='periodic', na.action = na.omit)
m <- mstl(tso.top50$get('BTC'))
d <- decompose(tso.top50$get('BTC'))
plot(m[,'Seasonal365'], type='l', ylab='Seasonality', col='darkgreen')
lines(d$seasonal, col='lightblue')
lines(s$time.series[,'seasonal'], col='darkblue')
legend('bottomright', legend=c('stl', 'mstl', 'decompose'), col=c('darkblue', 'darkgreen', 'lightblue'), lty=1)
# SEASONPLOT
library(forecast)
ggseasonplot(tso.top50$get('BTC') ,year.labels=TRUE, continuous=TRUE, main='BTC Seasonplot')
|
# Created on
# Course work:
# @author:
# Source:
# the factor function stores distinct values of a vector
colours <- c("red","red","orange","green","blue","green","green")
factor_colours = factor(colours)
print(factor_colours)
# accessing factor elements
print(factor_colours[2])
# printing the number of levels of the vector
print(nlevels(factor_colours))
# table function
print(table(factor_colours))
# changing the levels of the factor
levels(factor_colours)[6] = "yellow"
print(factor_colours)
| /chaaya/factor_function.r | no_license | tactlabs/r-samples | R | false | false | 514 | r | # Created on
# Course work:
# @author:
# Source:
# the factor function stores distinct values of a vector
colours <- c("red","red","orange","green","blue","green","green")
factor_colours = factor(colours)
print(factor_colours)
# accessing factor elements
print(factor_colours[2])
# printing the number of levels of the vector
print(nlevels(factor_colours))
# table function
print(table(factor_colours))
# changing the levels of the factor
levels(factor_colours)[6] = "yellow"
print(factor_colours)
|
library(dplyr)
soil_df <- read.csv("Modeling/Recommendation Models/Soil_Nutrient_Summarized.csv")
weather_df <- read.csv("Modeling/Recommendation Models/SummarizedTidyWeather2012_2017.csv")
water_df <- read.csv("Modeling/Recommendation Models/Summarized_Water_Depth.csv")
crop_df <- read.csv("Modeling/Recommendation Models/Yield/Gram_Normalized.csv")
str(soil_df)
str(weather_df)
str(water_df)
str(crop_df)
#Gram Sowing Time is October
# Taking only October Month's weather in consideration
weather_df <- weather_df[weather_df$month==10,]
#Taking Water Depth of November Quarter
water_df <- water_df[water_df$Month == 'November',]
crop_df$X.1 <- NULL
crop_df$X <- NULL
crop_df$Crop <- NULL
#Data Join
#1. Join Soil and Weather
data_df <- soil_df %>% inner_join(weather_df, by = c("District.Id"="District.Id","Year"="year"))
#2. Join data with water
data_df <- data_df %>% inner_join(water_df, by = c("District.Id"="District.Id","Year"="Year"))
#3. Joi with Crop
data_df <- data_df %>% inner_join(crop_df, by = c("Block.Id"="Block","Year"="Year"))
head(data_df)
data_df$Month <- NULL
data_df$Block <- NULL
str(data_df)
write.csv(data_df, "Modeling/Recommendation Models/Final Data/gram_final.csv", row.names = F)
rm(list = ls())
| /Modeling/Recommendation Models/Data Joining/Data_Combining_Gram.R | no_license | abhisheksinha08/DataDrivenAgriculture | R | false | false | 1,245 | r | library(dplyr)
soil_df <- read.csv("Modeling/Recommendation Models/Soil_Nutrient_Summarized.csv")
weather_df <- read.csv("Modeling/Recommendation Models/SummarizedTidyWeather2012_2017.csv")
water_df <- read.csv("Modeling/Recommendation Models/Summarized_Water_Depth.csv")
crop_df <- read.csv("Modeling/Recommendation Models/Yield/Gram_Normalized.csv")
str(soil_df)
str(weather_df)
str(water_df)
str(crop_df)
#Gram Sowing Time is October
# Taking only October Month's weather in consideration
weather_df <- weather_df[weather_df$month==10,]
#Taking Water Depth of November Quarter
water_df <- water_df[water_df$Month == 'November',]
crop_df$X.1 <- NULL
crop_df$X <- NULL
crop_df$Crop <- NULL
#Data Join
#1. Join Soil and Weather
data_df <- soil_df %>% inner_join(weather_df, by = c("District.Id"="District.Id","Year"="year"))
#2. Join data with water
data_df <- data_df %>% inner_join(water_df, by = c("District.Id"="District.Id","Year"="Year"))
#3. Joi with Crop
data_df <- data_df %>% inner_join(crop_df, by = c("Block.Id"="Block","Year"="Year"))
head(data_df)
data_df$Month <- NULL
data_df$Block <- NULL
str(data_df)
write.csv(data_df, "Modeling/Recommendation Models/Final Data/gram_final.csv", row.names = F)
rm(list = ls())
|
testthat::context("Testing Group Consecutive Claims Function")
testthat::test_that("Checking if correct data frame is being returned for threshold 10", {
data_df <- data.frame(
UPIN = c("A", "A", "A", "A"),
min_ssd = c('2015-01-01', '2015-01-06', '2015-01-12', '2015-01-31'),
max_ssd = c('2015-01-08', '2015-01-10', '2015-01-20', '2015-02-10'),
ClaimNumber = c('25', '18', '19', '20'),
stringsAsFactors = FALSE
)
threshold <- 10
output_df <- data.frame(
UPIN = c("A", "A"),
min_ssd = c("2015-01-01", "2015-01-31"),
max_ssd = c("2015-01-20", "2015-02-10"),
ClaimNumber = c("25", "20"),
stringsAsFactors = FALSE
)
expect_equal(output_df, group_consecutive_claims(data_df, threshold),
check.attributes = FALSE)
})
testthat::test_that("Checking if correct data frame is being returned for threshold 5", {
data_df <- data.frame(
UPIN = c("A", "A", "A", "A"),
min_ssd = c('2015-01-01', '2015-01-06', '2015-01-12', '2015-01-26'),
max_ssd = c('2015-01-08', '2015-01-10', '2015-01-20', '2015-02-10'),
ClaimNumber = c('25', '18', '19', '20'),
stringsAsFactors = FALSE
)
threshold <- 5
output_df <- data.frame(
UPIN = c("A", "A"),
min_ssd = c("2015-01-01", "2015-01-26"),
max_ssd = c("2015-01-20", "2015-02-10"),
ClaimNumber = c("25", "20"),
stringsAsFactors = FALSE
)
expect_equal(output_df, group_consecutive_claims(data_df, threshold),
check.attributes = FALSE)
})
| /tests/testthat/test-group-consecutive-claims.R | no_license | jfontestad/hospital-readmission | R | false | false | 1,494 | r | testthat::context("Testing Group Consecutive Claims Function")
testthat::test_that("Checking if correct data frame is being returned for threshold 10", {
data_df <- data.frame(
UPIN = c("A", "A", "A", "A"),
min_ssd = c('2015-01-01', '2015-01-06', '2015-01-12', '2015-01-31'),
max_ssd = c('2015-01-08', '2015-01-10', '2015-01-20', '2015-02-10'),
ClaimNumber = c('25', '18', '19', '20'),
stringsAsFactors = FALSE
)
threshold <- 10
output_df <- data.frame(
UPIN = c("A", "A"),
min_ssd = c("2015-01-01", "2015-01-31"),
max_ssd = c("2015-01-20", "2015-02-10"),
ClaimNumber = c("25", "20"),
stringsAsFactors = FALSE
)
expect_equal(output_df, group_consecutive_claims(data_df, threshold),
check.attributes = FALSE)
})
testthat::test_that("Checking if correct data frame is being returned for threshold 5", {
data_df <- data.frame(
UPIN = c("A", "A", "A", "A"),
min_ssd = c('2015-01-01', '2015-01-06', '2015-01-12', '2015-01-26'),
max_ssd = c('2015-01-08', '2015-01-10', '2015-01-20', '2015-02-10'),
ClaimNumber = c('25', '18', '19', '20'),
stringsAsFactors = FALSE
)
threshold <- 5
output_df <- data.frame(
UPIN = c("A", "A"),
min_ssd = c("2015-01-01", "2015-01-26"),
max_ssd = c("2015-01-20", "2015-02-10"),
ClaimNumber = c("25", "20"),
stringsAsFactors = FALSE
)
expect_equal(output_df, group_consecutive_claims(data_df, threshold),
check.attributes = FALSE)
})
|
library(ape)
testtree <- read.tree("2652_13.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2652_13_unrooted.txt") | /codeml_files/newick_trees_processed/2652_13/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("2652_13.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2652_13_unrooted.txt") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list_sites.R
\name{list_sites}
\alias{list_sites}
\title{Get a list of the sites on ScienceBase}
\arguments{
\item{with_var_src}{character vector of data variables (i.e., 1+ of those
listed in get_var_src_codes(out='var_src'))}
\item{logic}{how to join the constraints in with_var_src, ...: is any of the
listed parameters sufficient, or do you need all of them to be available
for a site to qualify?}
\item{...}{additional querying arguments yet to be implemented}
}
\value{
a character vector of site IDs
}
\description{
The with_var_src argument optionally limits the list to those sites that
contain specific timeseries variables.
}
\examples{
\dontrun{
list_sites()
list_sites(with_var_src=c("wtr_nwis","doobs_nwis","shed_nhdplus"), logic="any")
list_sites(list("wtr_nwis",any=c("doobs_nwis","doobs_simModel"),
any=list("disch_nwis", all=c("depth_calcDisch","stage_nwis"))), logic="all")
}
}
| /man/list_sites.Rd | permissive | ehstanley/powstreams | R | false | true | 998 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list_sites.R
\name{list_sites}
\alias{list_sites}
\title{Get a list of the sites on ScienceBase}
\arguments{
\item{with_var_src}{character vector of data variables (i.e., 1+ of those
listed in get_var_src_codes(out='var_src'))}
\item{logic}{how to join the constraints in with_var_src, ...: is any of the
listed parameters sufficient, or do you need all of them to be available
for a site to qualify?}
\item{...}{additional querying arguments yet to be implemented}
}
\value{
a character vector of site IDs
}
\description{
The with_var_src argument optionally limits the list to those sites that
contain specific timeseries variables.
}
\examples{
\dontrun{
list_sites()
list_sites(with_var_src=c("wtr_nwis","doobs_nwis","shed_nhdplus"), logic="any")
list_sites(list("wtr_nwis",any=c("doobs_nwis","doobs_simModel"),
any=list("disch_nwis", all=c("depth_calcDisch","stage_nwis"))), logic="all")
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.