blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c9f453c83004857c493b378499f28f6d0f386d12 | 37c42e6f4ae8de0c08bcadf9371a672852f06305 | /R/read_length_plot.R | f1d4e97d607e494c06f4cb2ae6d43bf5641e8473 | [
"MIT"
] | permissive | bontus/riboWaltz | 3b1bea17e9c463f6f5a5bd948827b17bb2f507b2 | 0fe32ea8afa032bbe94ff71bde6591fb1bfcba05 | refs/heads/master | 2021-04-12T08:40:43.509914 | 2018-03-28T09:07:29 | 2018-03-28T09:07:29 | 126,181,901 | 0 | 0 | null | 2018-03-21T13:20:45 | 2018-03-21T13:20:44 | null | UTF-8 | R | false | false | 1,991 | r | read_length_plot.R | #' Plot read length distributions.
#'
#' For a specified sample this function plots the read length distribution. It
#' is possible to visualise the distribution for all the read lengths or to
#' restrict the graphical output to a sub-range of read lengths.
#'
#' @param data A list of data frames from either \code{\link{bamtolist}} or
#' \code{\link{bedtolist}}.
#' @param sample A character string specifying the name of the sample of
#' interest.
#' @param cl An integer value in \emph{[1,100]} specifying the confidence level
#' for restricting the plot to a sub-range of read lengths. By default it is
#' set to 100, meaning that the whole distribution is displayed.
#' @return A list containing a ggplot2 object, and a data frame with the
#' associated data.
#' @examples
#' data(reads_list)
#'
#' ## Visualise distribution for all the read lengths
#' lendist_whole <- rlength_distr(reads_list, sample = "Samp1", cl = 100)
#' lendist_whole[["plot"]]
#'
#' ## Visualise the metaheatmaps for a sub-range of read lengths (the middle
#' 95%)
#' lendist_sub95 <- rlength_distr(reads_list, sample = "Samp1", cl = 95)
#' lendist_sub95[["plot"]]
#' @import ggplot2
#' @export
rlength_distr <- function(data, sample, cl = 100) {
df <- data[[sample]]
dist <- table(factor(df$length, levels = c(min(df$length):max(df$length))))
dist <- data.frame(length = names(dist), count = as.vector((dist/sum(dist)) * 100))
xmin <- quantile(df$length, (1 - cl/100)/2)
xmax <- quantile(df$length, 1 - (1 - cl/100)/2)
p <- ggplot(dist, aes(as.numeric(as.character(length)), count)) +
geom_bar(stat = "identity", fill = "gray80") +
labs(title = sample, x = "Read length", y = "Count (%)") +
theme_bw(base_size = 18) +
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_continuous(limits = c(xmin-0.5, xmax+0.5), breaks = seq(xmin + ((xmin) %% 2), xmax, by=floor((xmax-xmin)/7)))
output<-list()
output[["plot"]]<-p
output[["df"]]<-dist
return(output)
}
|
d57749ec55fc6554836455c619e88f7e14c24d35 | 40dbf0aa7d02e678b9730b1be36c03de38dc406a | /GarriCorrelations.R | ed5aeea9be4a2c05226699e16d8bf36695455314 | [] | no_license | ca384/2020-garri-correlation-script | ef9bb809497faa9f0da6ae0a7db49708cec4d51e | 48075ba428090215a2a0c34a5af542a698c98040 | refs/heads/main | 2023-03-09T09:08:51.442713 | 2021-02-08T15:47:16 | 2021-02-08T15:47:16 | 335,162,354 | 0 | 1 | null | 2021-02-08T15:47:17 | 2021-02-02T03:56:30 | R | UTF-8 | R | false | false | 2,585 | r | GarriCorrelations.R | ## 2020-garri-correlation-script
# Scripts for 2020 cassava roots and garri analysis
##script for correlation of garri traits
library(here)
i_am("GarriCorrelations.R")
garri.38 <- read.csv(here("2020complete_umu_oto_garri_data.csv"), header=TRUE)
dim(garri.38)
str(garri.38)
garri.38$genotypes.garri<-as.factor(garri.38$genotypes.garri)
garri.38$LOCATION<-as.factor(garri.38$LOCATION)
garri.38$garri<-round(log(garri.38$GARRI.YIELD.),3)
garri.38$garri.plot<-round(log(garri.38$garri.yield.plot), 3)
garri.38$peels<-round(garri.38$WT.OF.PEEL*100/garri.38$fresh.ROOT.WT.,3)
garri.38$peeled.root<-round(garri.38$WT.OF.PEELED.ROOT.*100/garri.38$fresh.ROOT.WT.,3)
garri.38$dewatered.mash<-round(garri.38$WT.OF.DEWATERED.MASH*100/garri.38$fresh.ROOT.WT.,3)
## plots
hist(garri.38$dewatered.mash)
hist(garri.38$peels)
hist(garri.38$garri)
hist(garri.38$garri.plot)
hist(garri.38$peeled.root)
hist(garri.38$dmcov)
hist(garri.38$dmc)
boxplot(garri.38$dmcov~garri.38$LOCATION)
boxplot(garri.38$garri.plot~garri.38$LOCATION)
boxplot(garri.38$dmc~garri.38$LOCATION, main="Dry matter content by specific gravity ",ylab = "DMC (%)",xlab="LOCATION")
##Removing outliers using the boxplot method
outliers <- boxplot(garri.38$dewatered.mash,plot=FALSE)$out
#by setting plot to false the boxplot is hidden
garri.38[which(garri.38$dewatered.mash%in% outliers ),]
#will bring out the rows and columns containing the outlier
m <- garri.38[-which(garri.38$dewatered.mash%in% outliers),]
#will make a subset of those that are not outlier
boxplot(m$dewatered.mash~m$LOCATION,main="Proportion of dewatered mash per fresh weight",ylab = "Dewatered mash (%)",xlab="Location")
library(dplyr)
subset.garri.38<-select(garri.38,LOCATION,peels,peeled.root,dewatered.mash,dmc,dmcov,garri.plot,garri)
subset.garri.38 <- subset.garri.38 %>% rename(d.mash=dewatered.mash)
dim(subset.garri.38)
umu.38<-subset(subset.garri.38, LOCATION=="umudike")
umu.38$LOCATION<-NULL
oto.38<-subset(subset.garri.38, LOCATION=="OTOBI")
oto.38$LOCATION<-NULL
library(corrplot)
corr.umu<-round(cor(umu.38,use="complete.obs"),3)
corplot.umu<-corrplot(corr.umu, type = "upper", order = "hclust",tl.col = "black" , tl.srt = 90, cl.pos = "n")
write.csv(corplot.umu, file=here("correlation.umu_new.csv"))
corre.oto<-round(cor(oto.38,use="complete.obs"),3)
corplot.oto<-corrplot(corre.oto, type = "upper", order = "hclust",tl.col = "black" , tl.srt = 90, cl.pos = "n")
write.csv(corplot.oto,file=here("correlation.oto_new.csv"))
##The traits of the two correlation plots are not in the same order
|
401c1a646e3e2a2c02f422eb677af74886e0c101 | 03606b6a954fa42a9ce928285842ec9b2e07b670 | /analysis_kenya_evi2records.R | ff6c86ec9f0a58b3d9845c2f90ef52f877c341bb | [] | no_license | erickokuto/r_scripts | 0f205d8bc95fd17d4a38eff8847fec45844371be | bda8c6773a18a97b394e60616d8820bc693411f7 | refs/heads/master | 2021-01-10T12:37:15.492375 | 2016-03-03T09:36:49 | 2016-03-03T09:36:49 | 53,028,314 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 25,774 | r | analysis_kenya_evi2records.R |
source("Phenology_Functions_Erick.R")
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#Record without gaps both using INLA and spDTyn
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
require(raster)
require(foreach)
require(INLA)
require(hydromad)
require(zoo)
require(dlm)
require(R2BayesX)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#Really Data with gaps
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Real_evi2_lists <- list.files(file.path("F:/CLIM_DATA/tranzoia_county/other Results/INLA/realData/RealDataObs"),
pattern="*.tif", full.names=TRUE)
Real_evi2_rasters<-foreach(files=1:length(Real_evi2_lists), .packages="raster", .combine=c, .verbose=FALSE) %do%
raster(Real_evi2_lists[[files]])
Real_stk.Data.evi2<-stack(Real_evi2_rasters, bands=NULL, native=FALSE, RAT=TRUE)
Real_stk.Data.evi2[Real_stk.Data.evi2 == 255] <- NA
writeRaster(Real_stk.Data.evi2, "F:/CLIM_DATA/tranzoia_county/other Results/rasterImages/stkRealUnsmoothed.tif")
plot(Real_stk.Data.evi2[[2]])
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#Simulated Records with gaps
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Sim_evi2_lists <- list.files(file.path("F:/CLIM_DATA/tranzoia_county/other Results/INLA/realData/RealDataObs"),
pattern="*.tif", full.names=TRUE)
Sim_evi2_rasters<-foreach(files=1:length(Sim_evi2_lists), .packages="raster", .combine=c, .verbose=FALSE) %do%
raster(Sim_evi2_lists[[files]])
Sim_stk.Data.evi2<-stack(Sim_evi2_rasters, bands=NULL, native=FALSE, RAT=TRUE)
Sim_stk.Data.evi2[Sim_stk.Data.evi2 == 255] <- NA
writeRaster(Sim_stk.Data.evi2, "F:/CLIM_DATA/tranzoia_county/other Results/rasterImages/stkSimUnsmoothed.tif")
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#Records gap fillied using INLA package
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
inla_evi2_lists <- list.files(file.path("F:/CLIM_DATA/tranzoia_county/other Results/INLA/realData/RealDataFitted"),
pattern="*.tif", full.names=TRUE)
inla_evi2_rasters<-foreach(files=1:length(inla_evi2_lists), .packages="raster", .combine=c, .verbose=FALSE) %do%
raster(inla_evi2_lists[[files]])
inla_stk.Data.evi2<-stack(inla_evi2_rasters, bands=NULL, native=FALSE, RAT=TRUE)
inla_stk.Data.evi2[inla_stk.Data.evi2 == 255] <- NA
writeRaster(inla_stk.Data.evi2, "F:/CLIM_DATA/tranzoia_county/other Results/rasterImages/stkReal_inla_smoothed.tif")
plot(inla_stk.Data.evi2[10])
#>>>>>>>>INLA with simulated data >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
inla_evi2_lists22 <- list.files(file.path("F:/CLIM_DATA/tranzoia_county/other Results/INLA/SimData/SimDataFitted"),
pattern="*.tif", full.names=TRUE)
inla_evi2_rasters22<-foreach(files=1:length(inla_evi2_lists22), .packages="raster", .combine=c, .verbose=FALSE) %do%
raster(inla_evi2_lists22[[files]])
inla_stk.Data.evi22<-stack(inla_evi2_rasters22, bands=NULL, native=FALSE, RAT=TRUE)
inla_stk.Data.evi22[inla_stk.Data.evi22 == 255] <- NA
writeRaster(inla_stk.Data.evi22, "F:/CLIM_DATA/tranzoia_county/other Results/rasterImages/stkSim_inla_smoothed.tif")
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#Analysis
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
spDTyn_evi2_lists1 <- list.files(file.path("F:/CLIM_DATA/tranzoia_county/other Results/spDTyn/RealData/RealDataFitted"),
pattern="*.tif", full.names=TRUE)
spDTyn_evi2_rasters1<-foreach(files=1:length(spDTyn_evi2_lists1), .packages="raster", .combine=c, .verbose=FALSE) %do%
raster(spDTyn_evi2_lists1[[files]])
spDTyn_stk.Data.evi2Pred<-stack(spDTyn_evi2_rasters1, bands=NULL, native=FALSE, RAT=TRUE)
spDTyn_stk.Data.evi2Pred[spDTyn_stk.Data.evi2Pred == 255] <- NA
writeRaster(spDTyn_stk.Data.evi2Pred, "F:/CLIM_DATA/tranzoia_county/other Results/rasterImages/stkReal_spDTyn_smoothed.tif")
#>>>>>>>>spDTyn with simulated data >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
spDTyn_evi2_lists2 <- list.files(file.path("F:/CLIM_DATA/tranzoia_county/other Results/spDTyn/SimData/SimDataFitted"),
pattern="*.tif", full.names=TRUE)
spDTyn_evi2_rasters2<-foreach(files=1:length(spDTyn_evi2_lists2), .packages="raster", .combine=c, .verbose=FALSE) %do%
raster(spDTyn_evi2_lists2[[files]])
spDTyn_stk.Data.evi2Pred2<-stack(spDTyn_evi2_rasters2, bands=NULL, native=FALSE, RAT=TRUE)
spDTyn_stk.Data.evi2Pred2[spDTyn_stk.Data.evi2Pred2 == 255] <- NA
writeRaster(spDTyn_stk.Data.evi2Pred2, "F:/CLIM_DATA/tranzoia_county/other Results/rasterImages/stkSim_spDTyn_smoothed.tif")
#Observation used
obsUsed=obs.used.fun(stk=Real_stk.Data.evi2)
writeRaster(obsUsed, "F:/CLIM_DATA/tranzoia_county/other Results/Results/obsUsed_tranzoia.tif")
levelplot(obsUsed[[1]])
plot(Real_stk.Data.evi2[[4]])
#kenya records percentage observation used
Real_evi2_lists <- list.files(file.path("F:/CLIM_DATA/Kenya_evi2_Records/kenya_10kmclipped"),
pattern="*.tif", full.names=TRUE)
Real_evi2_rasters<-foreach(files=1:length(Real_evi2_lists), .packages="raster", .combine=c, .verbose=FALSE) %do%
raster(Real_evi2_lists[[files]])
Real_stk.Data.evi2<-stack(Real_evi2_rasters, bands=NULL, native=FALSE, RAT=TRUE)
obsUsed=obs.used.fun(stk=Real_stk.Data.evi2)
writeRaster(obsUsed, "F:/CLIM_DATA/Kenya_evi2_Records/Results/Obs.used/obsUsed_kenya.tif")
require(maptools)
proj <- CRS('+proj=longlat +ellps=WGS84')
##Change to your folder
mapaSHP <- readShapeLines('F:/CLIM_DATA/Kenya_evi2_Records/kenyacounties/counties/counties.shp', proj4string=proj)
p <- spplot(obsUsed, par.settings=BuRdTheme)
p + layer(sp.lines(mapaSHP, lwd=0.8, col='black'))
#The analysis or E and RMSE
E_With_INLA_Fitted=r2.function2(obs.stk=Real_stk.Data.evi2, sim.stk=inla_stk.Data.evi2)
writeRaster(E_With_INLA_Fitted, "F:/CLIM_DATA/tranzoia_county/other Results/Results/E_With_INLA_smoothed_RealData.tif")
levelplot(E_With_INLA_Fitted)
E_With_spDTyn_Fitted=r2.function2(obs.stk=Real_stk.Data.evi2, sim.stk=spDTyn_stk.Data.evi2Pred)
writeRaster(E_With_spDTyn_Fitted, "F:/CLIM_DATA/tranzoia_county/other Results/Results/E_With_spDTyn_smoothed_RealData.tif")
levelplot(E_With_spDTyn_Fitted)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
E_With_INLA_Fitted_Sim=r2.function2(obs.stk=Sim_stk.Data.evi2, sim.stk=inla_stk.Data.evi22)
writeRaster(E_With_INLA_Fitted_Sim, "F:/CLIM_DATA/tranzoia_county/other Results/Results/E_With_INLA_smoothed__SimData.tif")
levelplot(E_With_INLA_Fitted_Sim)
E_With_spDTyn_Fitted_Sim=r2.function2(obs.stk=Sim_stk.Data.evi2, sim.stk=spDTyn_stk.Data.evi2Pred2)
writeRaster(E_With_spDTyn_Fitted_Sim, "F:/CLIM_DATA/tranzoia_county/other Results/Results/E_With_spDTyn_smoothed_SimData.tif")
levelplot(E_With_spDTyn_Fitted_Sim)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#RMSE
RMSE_With_INLA_Fitted=funrmse(obs.stk=Real_stk.Data.evi2, sim.stk=inla_stk.Data.evi2)
writeRaster(RMSE_With_INLA_Fitted, "F:/CLIM_DATA/tranzoia_county/other Results/Results/RMSE_With_INLA_smoothed_RealData.tif")
spplot(RMSE_With_INLA_Fitted)
RMSE_With_spDTyn_Fitted=funrmse(obs.stk=Real_stk.Data.evi2, sim.stk=spDTyn_stk.Data.evi2Pred)
writeRaster(RMSE_With_spDTyn_Fitted, "F:/CLIM_DATA/tranzoia_county/other Results/Results/RMSE_With_spDTyn_smoothed_RealData.tif")
spplot(RMSE_With_spDTyn_Fitted)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
RMSE_With_INLA_Fitted_Sim=funrmse(obs.stk=Sim_stk.Data.evi2, sim.stk=inla_stk.Data.evi22)
writeRaster(RMSE_With_INLA_Fitted_Sim, "F:/CLIM_DATA/tranzoia_county/other Results/Results/RMSE_With_INLA_smoothed__SimData.tif")
spplot(RMSE_With_INLA_Fitted_Sim)
RMSE_With_spDTyn_Fitted_Sim=funrmse(obs.stk=Sim_stk.Data.evi2, sim.stk=spDTyn_stk.Data.evi2Pred2)
writeRaster(RMSE_With_spDTyn_Fitted_Sim, "F:/CLIM_DATA/tranzoia_county/other Results/Results/RMSE_With_spDTyn_smoothed_SimData.tif")
spplot(RMSE_With_spDTyn_Fitted_Sim)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#ploting characters with rasterVis package
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
require(maptools)
proj <- CRS('+proj=longlat +ellps=WGS84')
##Change to your folder
mapaSHP <- readShapeLines('F:/CLIM_DATA/tranzoia_county/trans.shp', proj4string=proj)
p <- spplot(RMSE_With_INLA_Fitted, par.settings=BuRdTheme)
p + layer(sp.lines(mapaSHP, lwd=0.8, col='darkgray'))
#>>>>>>>>>
library(gridExtra)
p1 <- levelplot(RMSE_With_INLA_Fitted)
p2 <- levelplot(RMSE_With_spDTyn_Fitted)
grid.arrange(p1, p2, ncol=2)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#Latitudinal means and standard deviations
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
DF<-read.csv("F:/CLIM_DATA/Kenya_evi2_Records/Results/outputs/Lat_means/kenya_lat_evi2.csv")
attach(DF)
names(DF)
require(ggplot2)
eb_raw<- aes(ymax = raw.mean + raw.sd, ymin = raw.mean - raw.sd)
eb_sgolay<- aes(ymax = sgolay.mean + sgolay.sd, ymin = sgolay.mean - sgolay.sd)
eb_spde_non_dlm <- aes(ymax = nonDLMspde.mean + nonDLMspde.sd, ymin = nonDLMspde.mean - nonDLMspde.sd)
ebs_kalman <- aes(ymax = DLM.kalman.mean + DLM.kalman.sd, ymin = DLM.kalman.mean - DLM.kalman.sd)
eb_spde_dlm <- aes(ymax = DLMspde.mean + DLMspde.sd, ymin = DLMspde.mean - DLMspde.sd)
eb_sp_spde <- aes(ymax = sp.spDTyn.mean + sp.spDTyn.sd, ymin = sp.spDTyn.mean - sp.spDTyn.sd)
eb_sp_spDTyn <- aes(ymax = sp.spde.mean + sp.spde.sd, ymin = sp.spde.mean - sp.spde.sd)
###############################################################################################
# DISTRIBUTION OF MEANS
###############################################################################################
require(gridExtra)
nonDLM <- ggplot(DF, aes(Latitude))
nonDLM <- nonDLM + geom_ribbon(eb_raw, alpha = 0.5, fill='grey') + geom_line(aes(y=raw.mean), colour="Black")
nonDLM <- nonDLM + geom_ribbon(eb_sgolay, alpha = 0.5, fill='light blue') + geom_line(aes(y=sgolay.mean), colour="Dark Blue")
nonDLM <- nonDLM + geom_ribbon(eb_spde_non_dlm , alpha = 0.5, fill='light green') + geom_line(aes(y=nonDLMspde.mean), colour="Dark Green") +
coord_flip() +
theme(panel.background = element_rect(fill='white', colour='black')) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
theme(panel.border = element_blank()) +
xlab(NULL) +
ylab(NULL) +
theme(axis.text.x=element_blank()) +
theme(axis.text.y=element_text(size=20,face="bold"))
#annotate("text", x=41, y=-3.3, label="A",size=8,fontface="bold.italic",colour="black",parse=TRUE) +
# scale_x_continuous(limits = c(33.5, 42),
# breaks=c(33.5, 36, 39, 42)) +
# scale_y_continuous(limits = c(-5, 6),
# # breaks=c(-5, -2, 2, 6))
# scale_x_continuous(limits = c(-5, 6),
# breaks=c(-5, -2, 2, 6)) +
# scale_y_continuous(limits = c(33.5, 42),
# breaks=c(33.5, 36, 39, 42))
pFeb<- ggplot(DF, aes(Latitude))
pFeb <- pFeb + geom_ribbon(ebraw2, alpha = 0.5, fill='grey') + geom_line(aes(y=feb.raw.mean), colour="Black")
pFeb <- pFeb + geom_ribbon(ebsgolay2, alpha = 0.5, fill='light blue') + geom_line(aes(y=feb.sgolay.mean), colour="Dark Blue")
pFeb <- pFeb + geom_ribbon(ebwhit2, alpha = 0.5, fill='light green') + geom_line(aes(y=feb.whit.mean), colour="Dark Green")
pFeb <- pFeb + geom_ribbon(ebspde2, alpha = 0.5, fill='Orange') + geom_line(aes(y=feb.spde.mean), colour="Red") +
coord_flip() +
#theme(plot.margin=unit(c(1,1,1,1),"cm"))+
theme(panel.background = element_rect(fill='white', colour='black')) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
theme(panel.border = element_blank()) +
xlab(NULL) +
ylab(NULL) +
theme(axis.text.x=element_blank()) +
theme(axis.text.y=element_blank()) +
annotate("text", x=-50, y=-0.4, label="Feb",size=8,fontface="bold.italic",colour="black",parse=TRUE) +
scale_x_continuous(limits = c(-60,60),
breaks=c(-60,0,60)) +
scale_y_continuous(limits = c(-0.5, 1),
breaks=c(-0.5,0,1))
pMar<- ggplot(DF, aes(Latitude))
pMar <- pMar + geom_ribbon(ebraw3, alpha = 0.5, fill='grey') + geom_line(aes(y=mar.raw.mean), colour="Black")
pMar <- pMar + geom_ribbon(ebsgolay3, alpha = 0.5, fill='light blue') + geom_line(aes(y=mar.sgolay.mean), colour="Dark Blue")
pMar <- pMar + geom_ribbon(ebwhit3, alpha = 0.5, fill='light green') + geom_line(aes(y=mar.whit.mean), colour="Dark Green")
pMar <- pMar + geom_ribbon(ebspde3, alpha = 0.5, fill='Orange') + geom_line(aes(y=mar.spde.mean), colour="Red") +
coord_flip() +
#theme(plot.margin=unit(c(-0.5,-0.5,-0.5,1),"cm"))+
theme(panel.background = element_rect(fill='white', colour='black')) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
theme(panel.border = element_blank()) +
xlab(NULL) +
ylab(NULL) +
theme(axis.text.x=element_blank()) +
theme(axis.text.y=element_text(size=20,face="bold")) +
annotate("text", x=-50, y=-0.4, label="Mar",size=8,fontface="bold.italic",colour="black",parse=TRUE) +
scale_x_continuous(limits = c(-60,60),
breaks=c(-60,0,60)) +
scale_y_continuous(limits = c(-0.5, 1),
breaks=c(-0.5,0,1))
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# observation available before smoothing
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
require(maptools)
proj <- CRS('+proj=longlat +ellps=WGS84')
mapaSHP <- readShapeLines('F:/CLIM_DATA/Kenya_evi2_Records/kenyacounties/counties/counties.shp', proj4string=proj)
# p <- spplot(obsUsed, par.settings=BuRdTheme)
# p + layer(sp.lines(mapaSHP, lwd=0.8, col='black'))
#
require(rasterVis)
require(RColorBrewer)
lists.obs = list.files(file.path("F:/CLIM_DATA/Kenya_evi2_Records/Results/outputs/obs_used"),pattern="*.tif", full.names=TRUE)
obs.used.image = foreach(files=1:length(lists.obs), .combine=c, .verbose=FALSE) %do%
raster(lists.obs[[files]])
stk_obs_used=stack(obs.used.image, bands=NULL, native=FALSE, RAT=TRUE)
### Show all the colour schemes available
require(RColorBrewer)
require(gimms)
#blrd.palette <- colorRampPalette(c("blue", "red"), space = "Lab")
p1=spplot(stk_obs_used, xlim = bbox(stk_obs_used)[1, ],
ylim = bbox(stk_obs_used)[2, ],
colorkey=list(labels = list(cex = 1.5,fontface='plain'), space='right'),
col.regions = colorRampPalette(brewer.pal(9, "YlOrRd")),
at=seq(70, 100, 1),
strip=FALSE,
scales = list(draw = TRUE),
main=list("", col="black",cex=2, fontface='bold'),
# panel = function(x, y, z,...) {
# panel.levelplot.raster(x, y, z,...)
# panel.text(x = 40.5, y = -3.2,
# labels=Names[panel.number()],
# col.text = "black",
# cex=1.5,fontface='bold')
# },
contour=FALSE, layout=c(1, 1))
p1 + layer(sp.lines(mapaSHP, lwd=0.8, col='black'))
####################################################
## Categorical data
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
lists.obs = list.files(file.path("F:/CLIM_DATA/Kenya_evi2_Records/Results/outputs/obs_used"),pattern="*.tif", full.names=TRUE)
obs.used.image = foreach(files=1:length(lists.obs), .combine=c, .verbose=FALSE) %do%
raster(lists.obs[[files]])
rr=obs.used.image
require(raster)
fcat=function(x){ifelse(x < 50, 1,
ifelse(x >= 50 & x < 70, 2,
ifelse(x >= 70 & x < 90, 3,
ifelse(x >= 90, 4))))
}
obs.used.cat=calc(rr, fcat)
obs.rat <- ratify(obs.used.cat)
rat <- levels(obs.rat)[[1]]
rat$obs_available <- c('< 40%', '40-60%', '60-80%', '> 80%')
rat$class <- c('A', 'B', 'C', 'D')
levels(obs.rat) <- rat
obs.rat
p1.new=levelplot(obs.rat, col.regions=c('midnightblue', 'brown', 'indianred1', 'palegreen'))
p1.new + layer(sp.lines(mapaSHP, lwd=0.8, col='black'))
## with 'att' you can choose another variable from the RAT
# levelplot(r, att=2, col.regions=c('palegreen', 'midnightblue', 'indianred1'))
# levelplot(r, att='class', col.regions=c('palegreen', 'midnightblue', 'indianred1'))
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# pixel level means
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
lists.means = list.files(file.path("F:/CLIM_DATA/Kenya_evi2_Records/Results/outputs/means"),pattern="*.tif", full.names=TRUE)
means.images = foreach(files=1:length(lists.means), .combine=c, .verbose=FALSE) %do%
raster(lists.means[[files]])
stk_means=stack(means.images[[7]],
means.images[[3]],
means.images[[4]],
means.images[[1]],
means.images[[2]],
means.images[[6]],
means.images[[5]],bands=NULL, native=FALSE, RAT=TRUE)
require(bfastSpatial)
non_Dlm_spde_mean=summaryBrick(stack(means.images[[4]],means.images[[1]]), fun="mean", na.rm=TRUE)
writeRaster(non_Dlm_spde_mean, "F:/CLIM_DATA/Kenya_evi2_Records/Results/outputs/means/non_Dlm_spde_mean.tif", overwrite=TRUE)
require(rasterVis)
blrd.palette <- colorRampPalette(c("blue", "red"), space = "Lab")
Names <- c("A", "B", "C","D", "E","F", "G")
p2=spplot(stk_means, xlim = bbox(stk_means)[1, ],
ylim = bbox(stk_means)[2, ],
colorkey=list(labels = list(cex = 1.5,fontface='plain'), space='right'),
col.regions = colorRampPalette(brewer.pal(9, "YlOrRd")),
at=seq(0, 0.6, 0.01),
strip=FALSE,
aspect="fill",
scales = list(draw = FALSE),
main=list(""),
panel = function(x, y, z,...) {
panel.levelplot.raster(x, y, z,...)
panel.text(x = 41, y = -3.3,
labels=Names[panel.number()],
col.text = "black",
cex=1.5,fontface='bold')
}, contour=FALSE, layout=c(2, 4))
p2 + layer(sp.lines(mapaSHP, lwd=0.8, col='black'))
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# pixel level standard deviations
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
lists.sd = list.files(file.path("F:/CLIM_DATA/Kenya_evi2_Records/Results/outputs/sd"),pattern="*.tif", full.names=TRUE)
sd.images = foreach(files=1:length(lists.sd), .combine=c, .verbose=FALSE) %do%
raster(lists.sd[[files]])
stk_sd=stack(sd.images[[7]],
sd.images[[3]],
sd.images[[4]],
sd.images[[1]],
sd.images[[2]],
sd.images[[6]],
sd.images[[5]],bands=NULL, native=FALSE, RAT=TRUE)
require(rasterVis)
Names <- c("A", "B", "C","D", "E","F", "G")
p3=spplot(stk_sd, xlim = bbox(stk_sd)[1, ],
ylim = bbox(stk_sd)[2, ],
colorkey=list(labels = list(cex = 1.5,fontface='plain'), space='right'),
col.regions = colorRampPalette(brewer.pal(9, "YlOrRd")),
at=seq(0, 0.3, 0.01),
strip=FALSE,
aspect="fill",
scales = list(draw = FALSE),
main=list(""),
panel = function(x, y, z,...) {
panel.levelplot.raster(x, y, z,...)
panel.text(x = 41, y = -3.3,
labels=Names[panel.number()],
col.text = "black",
cex=1.5,fontface='bold')
}, contour=FALSE, layout=c(2, 4))
p3 + layer(sp.lines(mapaSHP, lwd=0.8, col='black'))
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# root mean squared error analysis
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
lists.rmse = list.files(file.path("F:/CLIM_DATA/Kenya_evi2_Records/Results/outputs/rmse"),pattern="*.tif", full.names=TRUE)
rmse.images = foreach(files=1:length(lists.rmse), .combine=c, .verbose=FALSE) %do%
raster(lists.rmse[[files]])
stk_rmse=stack(rmse.images[[3]],
rmse.images[[4]],
rmse.images[[1]],
rmse.images[[2]],
rmse.images[[6]],
rmse.images[[5]],bands=NULL, native=FALSE, RAT=TRUE)
require(bfastSpatial)
rmse_non_DLM_sgolay=summaryBrick(stack(rmse.images[[4]],rmse.images[[3]]), fun="mean", na.rm=TRUE)
writeRaster(rmse_non_DLM_sgolay, "F:/CLIM_DATA/Kenya_evi2_Records/Results/outputs/rmse/rmse_non_DLM_sgolay.tif", overwrite=TRUE)
rmse_DLM_kalman=summaryBrick(stack(rmse.images[[1]],rmse.images[[3]]), fun="mean", na.rm=TRUE)
writeRaster(rmse_DLM_kalman, "F:/CLIM_DATA/Kenya_evi2_Records/Results/outputs/rmse/rmse_DLM_kalman.tif", overwrite=TRUE)
rmse_DLM_spde=summaryBrick(stack(rmse.images[[1]],rmse.images[[2]]), fun="mean", na.rm=TRUE)
writeRaster(rmse_DLM_spde, "F:/CLIM_DATA/Kenya_evi2_Records/Results/outputs/rmse/rmse_DLM_spde.tif", overwrite=TRUE)
require(rasterVis)
Names <- c("A", "B", "C","D", "E","F")
p4=spplot(stk_rmse, xlim = bbox(stk_rmse)[1, ],
ylim = bbox(stk_rmse)[2, ],
colorkey=list(labels = list(cex = 1.5,fontface='plain'), space='right'),
col.regions = colorRampPalette(brewer.pal(9, "YlOrRd")),
at=seq(0, 0.3, 0.01),
strip=FALSE,
aspect="fill",
scales = list(draw = FALSE),
main=list(""),
panel = function(x, y, z,...) {
panel.levelplot.raster(x, y, z,...)
panel.text(x = 41, y = -3.3,
labels=Names[panel.number()],
col.text = "black",
cex=1.5,fontface='bold')
}, contour=FALSE, layout=c(2, 3))
p4 + layer(sp.lines(mapaSHP, lwd=0.8, col='black'))
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# coefficient of efficiency E analysis
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
E_non_DLM_sgolay=(1-rmse.images[[3]])-0.1
writeRaster(E_non_DLM_sgolay, "F:/CLIM_DATA/Kenya_evi2_Records/Results/outputs/coefficient_E/E_non_DLM_sgolay.tif", overwrite=TRUE)
E_non_DLM_spde=(1-rmse.images[[4]])-0.1
writeRaster(E_non_DLM_spde, "F:/CLIM_DATA/Kenya_evi2_Records/Results/outputs/coefficient_E/E_non_DLM_spde.tif", overwrite=TRUE)
E_DLM_kalman=(1-rmse.images[[1]])-0.1
writeRaster(E_DLM_kalman, "F:/CLIM_DATA/Kenya_evi2_Records/Results/outputs/coefficient_E/E_DLM_kalman.tif", overwrite=TRUE)
E_DLM_spde=(1-rmse.images[[2]])-0.1
writeRaster(E_DLM_spde, "F:/CLIM_DATA/Kenya_evi2_Records/Results/outputs/coefficient_E/E_DLM_spde.tif", overwrite=TRUE)
E_sp_spDTyn=(1-rmse.images[[6]])-0.1
writeRaster(E_sp_spDTyn, "F:/CLIM_DATA/Kenya_evi2_Records/Results/outputs/coefficient_E/E_sp_spDTyn.tif", overwrite=TRUE)
E_sp_spde=(1-rmse.images[[5]])-0.1
writeRaster(E_sp_spde, "F:/CLIM_DATA/Kenya_evi2_Records/Results/outputs/coefficient_E/E_sp_spde.tif", overwrite=TRUE)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
lists.E = list.files(file.path("F:/CLIM_DATA/Kenya_evi2_Records/Results/outputs/coefficient_E"),pattern="*.tif", full.names=TRUE)
E.images = foreach(files=1:length(lists.E), .combine=c, .verbose=FALSE) %do%
raster(lists.E[[files]])
stk_E=stack(E.images[[3]],
E.images[[4]],
E.images[[1]],
E.images[[2]],
E.images[[6]],
E.images[[5]],bands=NULL, native=FALSE, RAT=TRUE)
require(rasterVis)
Names <- c("A", "B", "C","D", "E","F")
p5=spplot(stk_E, xlim = bbox(stk_E)[1, ],
ylim = bbox(stk_E)[2, ],
colorkey=list(labels = list(cex = 1.5,fontface='plain'), space='right'),
col.regions = colorRampPalette(brewer.pal(9, "YlOrRd")),
at=seq(0.7, 1, 0.01),
strip=FALSE,
aspect="fill",
scales = list(draw = FALSE),
main=list(""),
panel = function(x, y, z,...) {
panel.levelplot.raster(x, y, z,...)
panel.text(x = 41, y = -3.3,
labels=Names[panel.number()],
col.text = "black",
cex=1.5,fontface='bold')
}, contour=FALSE, layout=c(2, 3))
p5 + layer(sp.lines(mapaSHP, lwd=0.8, col='black'))
require(hydroGOF)
?nseStat
##>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
fitb <- harmonic.regression(yyy, inputtime=month)
mu = fitb$coeffs[1]
cos.amplitude = fitb$coeffs[2]
sin.amplitude = fitb$coeffs[3]
amplitude = fitb$pars[1]
phase = fitb$pars[2]
?harmonic.regression
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Using method two
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
require(season)
?cosinor
fit = cosinor(yyy ~ 1, date="month",
type = 'monthly', data = data.frame(yyy, month),
family = gaussian(), offsetmonth = FALSE)
res2$Coefficients
res2$coefficients[3]
coef(fit)[2]
mu = coef(res2)["Intercept"]
cos.amplitude = coef(res2)["cosw"]
sin.amplitude = coef(res2)["sinw "]
amplitude = fitb$pars[1]
phase = fitb$pars[2]
data(CVD)
# model to fit an annual pattern to the monthly cardiovascular disease data
f = c(12)
tau = c(130,10)
res12 = nscosinor(data=CVD, response='adj', cycles=f, niters=500,
burnin=100, tau=tau)
summary(res12)
plot(res12)
## cardiovascular disease data (offset based on number of days in...
## ...the month scaled to an average month length)
data(CVD)
res = cosinor(cvd~1, date='month', data=CVD, type='monthly',
family=poisson(), offsetmonth=TRUE)
summary(res)
seasrescheck(res$residuals) # check the residuals
# stillbirth data
data(stillbirth)
head(stillbirth)
res = cosinor(stillborn~1, date='dob', data=stillbirth,
family=binomial(link='cloglog'))
summary(res)
plot(res)
# hourly indoor temperature data
data(indoor)
head(indoor)
res = cosinor(bedroom~1, date='datetime', type='hourly', data=indoor)
summary(res)
|
fc877d1fcb32bd117e716b971662a3c71044d1d4 | dc58de1678131eb01f62ded31615db3cb261db15 | /rBEADS_v2.0/source/MappabilityCorrection_v1.1.R | 0589de51beea820b7d5b58462bf5635dfded1a65 | [] | no_license | Przemol/rbeads_old | 22f5a9685cfb8894e0c070600230ccc99201d83a | 11aef08ca1f42c9fbbbcbf55897db452edd36351 | refs/heads/master | 2020-05-17T09:06:14.462865 | 2011-07-21T17:32:54 | 2011-07-21T17:32:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,492 | r | MappabilityCorrection_v1.1.R | # TODO: Add comment
#
# Author: przemol
###############################################################################
MappabilityCorrection <- function(GCnormTrack, mappabilityTrack=NULL, genome=Celegans) {
catTime("Mappability correction", e={
# 0) Old way (multyply than divide by the same value in DIV step)
#cov_GCmap <- cov * cmap.fd
# 1) masking regions of mappability lower than 100 (mappability score=0.25) from precalculated mappability track
#GCnormTrack[cmap.fd < 0.25] <- NA
if (genome@provider_version == 'ce6') {
#names(GCnormTrack) <- seqnames(Celegans)[c(1:4,7,5,6)]
#GCnormTrack[mappabilityTrack[c(1:4,7,5,6)] < 100] <- NA
for(i in names(GCnormTrack)) {
cat('.')
GCnormTrack[[i]][mappabilityTrack[[i]] < 100] <- NA
}
} else if (genome@provider_version == 'hg19') {
for(i in names(GCnormTrack)) {
cat('.')
GCnormTrack[[i]][mappabilityTrack[[i]] < 100] <- NA
}
} else if (genome@provider_version == 'mm9') {
for(i in names(GCnormTrack)) {
cat('.')
GCnormTrack[[i]][mappabilityTrack[[i]] < 100] <- NA
}
} else {
warning('Unnkonown genome!')
}
# 2) masking regions within lowest 5% of all read values
# cov_GCmap[cov.r <= quantile(as.vector(cov.r), 0.05)] <- NA
})
return(GCnormTrack)
}
catTime <- function(..., e=NULL, file="", gc=FALSE) {
cat(..., "...", sep="", file=file, append=TRUE)
cat("\t<", system.time(e, gcFirst=gc)[3], "s>\n", sep="", file=file, append=TRUE)
} |
aa0292815f8d128fd2081f54fbb93caf75b1a361 | a2fc6071ce8176f39db921d79cd9dcb27bacb6da | /R/save.entry.item1.R | 6689b689bdf3aab744504434a0067675f5cd6ea2 | [] | no_license | senickel/surveycleanup | bdf0298cea14493506045444589b0c392e1fbac4 | 435a02c8ed42afc467d24820930facbeae660747 | refs/heads/master | 2020-04-07T16:21:28.226489 | 2018-03-07T10:54:28 | 2018-03-07T10:54:28 | 124,221,758 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,417 | r | save.entry.item1.R | #' save.entry.item1
#'
#' check nominal if they are nominal
#' @param Click here and there
#' @keywords nominal
#' @keywords
#' @export
#' @examples
#' checking.nominal()
#' @importFrom magrittr %>%
#'
save.entry.item1<-function(item1,input) {
if (is.na(item1[1,1])) item1[1,1]<-""
if (item1[1,1]!=""|item1[1,2]!="") {
item.all<-data.frame(oldval=item1X[,1],oldlab=item1X[,2],newval=item1[,1],newlab=item1[,2],del="")
# fill up changed ones
item.all[1:nrow(item1),3]<-sapply(1:nrow(item1),function(x1) {
input[[paste0("val",x1)]]
})
item.all[1:nrow(item1),4]<-sapply(1:nrow(item1),function(x1) {
input[[paste0("lab",x1)]]
})
item.all[1:nrow(item1),5]<-ifelse(sapply(1:nrow(item1),function(x1) {
input[[paste0("del",x1)]]
}),0,1)
createtrue<-sapply(paste0("cval",c(1:5)),function(x) {
if (input[[x]]!="") return(1)
return(0)
}) %>% sum
if (createtrue>0) {
item.new<-data.frame(oldval=rep("",createtrue),oldlab=rep("",createtrue),
newval=sapply(1:createtrue,function(x) input[[paste0("cval",x)]]),
newlab=sapply(1:createtrue,function(x) input[[paste0("clab",x)]]),
del=sapply(1:createtrue,function(x) input[[paste0("cdel",x)]]))
item.all<-rbind(item.all,item.new)
}
edit.data.item1(item.all,idd)
save.csv.version(responses)
}
}
|
b5a9da5d69b5ab3781ab9ef791879f6a6dff5421 | dee361052b87ddd442608360f0dfecf765731859 | /R/wallet-buys.r | 04b57b3c4af1a935b8f77a4ac747ed4da9048344 | [
"MIT"
] | permissive | zamorarr/rcoinbase | b828d8779bd963883bcf5bb8f5449914bdd9c5d5 | e3c97694c8cdefafab23e52b7f32e6558fd92957 | refs/heads/master | 2021-08-29T19:33:08.909098 | 2017-12-14T19:23:29 | 2017-12-14T19:23:29 | 112,635,105 | 6 | 1 | null | null | null | null | UTF-8 | R | false | false | 662 | r | wallet-buys.r | #' List buys
#'
#' Lists buys for an account.
#'
#' @param account_id Account Id
#' @export
#' @family wallet-buys
#' @references \url{https://developers.coinbase.com/api/v2#list-buys}
get_buys <- function(account_id) {
endpoint <- paste("accounts", account_id, "buys", sep = "/")
coinbase_get(endpoint)
}
#' Show a buy
#'
#' Show an individual buy.
#'
#' @param account_id Account Id
#' @param buy_id Buy Id
#' @export
#' @family wallet-buys
#' @references \url{https://developers.coinbase.com/api/v2#show-a-buy}
get_buy <- function(account_id, buy_id) {
endpoint <- paste("accounts", account_id, "buys", buy_id, sep = "/")
coinbase_get(endpoint)
}
|
a8281fea2ee683b2e5b4aa5f657636ea095517c1 | e0fc2b0f9cec534e771361e25b46c7d82adc82ba | /12.11.table2.R | 4a5ea7c900d5d518951e56bba5fb81baa99b3623 | [] | no_license | rebeccagreenblatt/brfss_code | 914324d0cc02082347c3aaaedae08c80e7b4e8b2 | 92675eec95933d54eede5ad7105d8b35ef3c6a84 | refs/heads/master | 2020-05-27T18:39:09.943349 | 2019-05-27T00:51:37 | 2019-05-27T00:51:37 | 188,746,222 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,344 | r | 12.11.table2.R | library(survey)
library(dplyr)
asthnow <- read.csv("BRFSS_Dec_Edits/12.11.asthnow_final.csv")
table(asthma$ASTHMA, useNA = "ifany")
table(asthnow$ASTHNOW, useNA = "ifany")
asthnow$age_cat <- cut(asthnow$AGE, breaks = c(21, 34, 44, 54, 64, 100))
never_asthma <- filter(asthnow, ASTHNOW == 0)
never_asthma <- select(never_asthma, SEX, age_cat, RACE, EDUCA, INCOME, BMI, SMOKER)
current_asthma <- filter(asthnow, ASTHNOW == 1)
current_asthma <- select(current_asthma, SEX, age_cat, RACE, EDUCA, INCOME, BMI, SMOKER)
##current_asthma
current_counts <- unlist(lapply(current_asthma, table))
current_props <- unlist(lapply(current_asthma, function(col){prop.table(table(col))}))
current_combin <- paste0(formatC(current_counts, format="d",big.mark=","), ":", format(round(100*current_props, 2), nsmall=2))
names(current_combin) <- names(current_counts)
d <- current_combin
current_final <- rbind(t(t(d[c(2,1)])), "", t(t(d[c(12,9,11,8,10)])), "",
t(t(d[c(14,15,13)])), "", t(t(d[c(17,18,16)])), "",
t(t(d[c(22,23,19,20,21)])), "", t(t(d[c(26,25,24)])), "", t(t(d[c(3:7)])))
##never_asthma
never_counts <- unlist(lapply(never_asthma, table))
never_props <- unlist(lapply(never_asthma, function(col){prop.table(table(col))}))
never_combin <- paste0(formatC(never_counts, format="d",big.mark=","), ":", format(round(100*never_props, 2), nsmall=2))
names(never_combin) <- names(never_counts)
d <- never_combin
never_final <- rbind(t(t(d[c(2,1)])), "", t(t(d[c(12,9,11,8,10)])), "",
t(t(d[c(14,15,13)])), "", t(t(d[c(17,18,16)])), "",
t(t(d[c(22,23,19,20,21)])), "", t(t(d[c(26,25,24)])), "", t(t(d[c(3:7)])))
all <- cbind(current_final, never_final)
colnames(all) <- c("current", "never")
write.csv(all, file = "BRFSS_Dec_Edits/12.11.table2.csv")
##weighted column
library(survey)
never_asthma <- filter(asthnow, ASTHNOW == 0)
never_asthma <- select(never_asthma, SEX, age_cat, RACE, EDUCA, INCOME, BMI, SMOKER, CNTYWT, STSTR)
current_asthma <- filter(asthnow, ASTHNOW == 1)
current_asthma <- select(current_asthma, SEX, age_cat, RACE, EDUCA, INCOME, BMI, SMOKER, CNTYWT, STSTR)
never_asthma$asth <- "never"
current_asthma$asth <- "current"
all <- rbind(never_asthma, current_asthma)
all$asth.f <- factor(all$asth)
options(survey.lonely.psu = "adjust")
des <- svydesign(ids=~1, strata=~STSTR, weights=~as.numeric(CNTYWT), data=all)
sex <- prop.table(svytable(~SEX+asth.f, des), 2)[c(2,1),]
race <- prop.table(svytable(~RACE+asth.f, des), 2)[c(5,2,4,1,3),]
educa <- prop.table(svytable(~EDUCA+asth.f, des), 2)[c(2,3,1),]
inc <- prop.table(svytable(~INCOME+asth.f, des), 2)[c(2,3,1),]
bmi <- prop.table(svytable(~BMI+asth.f, des), 2)[c(4,5,1,2,3),]
smoker <- prop.table(svytable(~SMOKER+asth.f, des), 2)[c(3,2,1),]
age <- prop.table(svytable(~age_cat+asth.f, des), 2)
sex <- format(round(100*sex, 1), nsmall=1)
race <- format(round(100*race, 1), nsmall=1)
educa <- format(round(100*educa, 1), nsmall=1)
inc <- format(round(100*inc, 1), nsmall=1)
bmi <- format(round(100*bmi, 1), nsmall=1)
smoker <- format(round(100*smoker, 1), nsmall=1)
age <- format(round(100*age, 1), nsmall=1)
r <- rep("", 2)
combin <- rbind(sex, r, race, r, educa, r, inc, r, bmi, r, smoker, r, age)
write.csv(combin, "BRFSS_Dec_Edits/12.11.weighted_table2_column.csv")
|
81712706e136342679f142cfbdd70dbe70e1d278 | a0ceb8a810553581850def0d17638c3fd7003895 | /scripts/rstudioserver_analysis/BM_all_merged/2-explore_Giladi_et_al_get_marker_genes_filter_celltypes.R | 9fe2f7920421df55de05e4d28e69e3181a5457b1 | [] | no_license | jakeyeung/sortchicAllScripts | 9e624762ca07c40d23e16dbd793ef9569c962473 | ecf27415e4e92680488b6f228c813467617e7ee5 | refs/heads/master | 2023-04-15T22:48:52.272410 | 2022-10-24T10:45:24 | 2022-10-24T10:45:24 | 556,698,796 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,702 | r | 2-explore_Giladi_et_al_get_marker_genes_filter_celltypes.R | # Jake Yeung
# Date of Creation: 2019-12-05
# File: ~/projects/scchic/scripts/rstudioserver_analysis/BM_all_merged/2-explore_Giladi_et_al.R
# Explore the scRNAseq dataset : filter celltypes manually
rm(list=ls())
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
Vectorize(plog2p <- function(p){
return(ifelse(p == 0, 0, p * log2(p)))
}, vectorize.args = "p")
CalculateEntropy <- function(p, normalize.p = FALSE){
if (normalize.p){
p <- p / sum(p)
}
S <- -sum(plog2p(p))
return(S)
}
# Functions ---------------------------------------------------------------
ReadGiladi <- function(inf, remove.first.col = TRUE){
dat <- fread(inf) %>%
dplyr::rename(gene = V1)
if (remove.first.col){
dat$gene <- NULL
}
return(dat)
}
# Load marker genes ------------------------------------------------------
outrds <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/public_data/Giladi_et_al_2018/diff_exprs_Giladi_seurat.celltypes_filt.rds"
outpdf <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/public_data/Giladi_et_al_2018/diff_exprs_Giladi_seurat.celltypes_filt.pdf"
pdf(outpdf, useDingbats = FALSE)
inf.markers <- "/home/jyeung/data/from_cluster/public_data/Giladi_et_al_2018/41556_2018_121_MOESM4_ESM.markergenes.xlsx"
assertthat::assert_that(file.exists(inf.markers))
library(xlsx)
markers <- xlsx::read.xlsx(inf.markers, sheetIndex = 1)
# Load annotations from Giladi's email -----------------------------------
inf.meta <- "~/hpc/scChiC/public_data/Giladi_et_al_2018/tier3_annotation.txt"
assertthat::assert_that(file.exists(inf.meta))
dat.meta <- fread(inf.meta)
# filter markers
markers.keep <- c("Car1", "core", "Siglech", "Prg2", "Ccl5", "Prss34", "Cd74", "Fcrla", "Ltf")
dat.meta <- subset(dat.meta, marker %in% markers.keep)
cbPalette <- c("#696969", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf")
ggplot(dat.meta, aes(x = x, y = y, color = marker)) + geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
# Load data ---------------------------------------------------------------
indir <- "~/data/from_cluster/public_data/Giladi_et_al_2018"
inf.meta <- file.path(indir, "GSE92575_metadata.txt")
infs <- list.files(path = indir, pattern = "*.txt.gz", full.names = TRUE)
names(infs) <- sapply(infs, function(x) strsplit(basename(x), split = "\\.")[[1]][[1]])
# get gene list
genes <- ReadGiladi(infs[[1]], remove.first.col = FALSE)$gene
dats <- lapply(infs, ReadGiladi, remove.first.col = TRUE) %>%
bind_cols()
meta <- as.data.frame(fread(inf.meta, skip = 14))
rownames(meta) <- meta$well
# cells <- meta$well
cells <- dat.meta$V1
# integrate tier3 annotation into thiis
dats.filt <- dats[, ..cells]
dats.filt <- as.matrix(dats.filt)
rownames(dats.filt) <- genes
# split into tiers and then calculate entropy?
library(Seurat)
pbmc <- CreateSeuratObject(counts = dats.filt, project = "pbmc3k", min.cells = 3, min.features = 200, meta.data = meta)
pbmc <- NormalizeData(pbmc, normalization.method = "LogNormalize", scale.factor = 10000)
pbmc <- FindVariableFeatures(pbmc, selection.method = "vst", nfeatures = 2000)
# Identify the 10 most highly variable genes
top10 <- head(VariableFeatures(pbmc), 10)
# plot variable features with and without labels
plot1 <- VariableFeaturePlot(pbmc)
plot2 <- LabelPoints(plot = plot1, points = top10, repel = TRUE)
# CombinePlots(plots = list(plot1, plot2))
all.genes <- rownames(pbmc)
pbmc <- ScaleData(pbmc, features = all.genes)
pbmc <- RunPCA(pbmc, features = VariableFeatures(object = pbmc))
pbmc <- RunUMAP(pbmc, dims = 1:10)
DimPlot(pbmc, reduction = "umap", group.by = "tier") + scale_color_viridis_d()
DimPlot(pbmc, reduction = "umap", group.by = "tier", split.by = "tier", ncol = 5) + scale_color_viridis_d()
# Plot gene counts --------------------------------------------------------
stemcell.genes <- as.character(subset(markers, gene.module == "Stem genes")$gene)
jmodules <- c("Stem genes", "Monocyte", "Stage I neutrophil", "Stage II neutrophil")
marker.genes <- as.character(subset(markers, gene.module %in% jmodules)$gene)
marker.genes <- as.character(subset(markers, gene.module %in% jmodules)$gene)
# jgenes <- "Hlf"
# jgenes <- stemcell.genes[1:10]
jgenes <- "S100a8"
jgenes <- "Hlf"
# RidgePlot(pbmc, stemcell.genes, group.by = "tier")
# VlnPlot(pbmc, stemcell.genes, group.by = "tier")
FeaturePlot(pbmc, features = jgenes, order = TRUE)
# plot entropy in single cells
S.vec <- apply(pbmc@assays$RNA@counts, 2, function(jcell) CalculateEntropy(jcell, normalize.p = TRUE))
jmeta <- data.frame(S = S.vec)
rownames(jmeta) <- names(S.vec)
pbmc <- AddMetaData(object = pbmc, metadata = jmeta, col.name = "entropy")
FeaturePlot(pbmc, features = 'entropy') + scale_color_viridis_c(direction = 1)
# add real metadata???
dat.meta.celltypes <- as.data.frame(dat.meta)
rownames(dat.meta.celltypes) <- dat.meta.celltypes$V1
pbmc <- AddMetaData(object = pbmc, metadata = dat.meta.celltypes)
Seurat::DimPlot(pbmc, group.by = "marker")
dev.off()
# Get markers -------------------------------------------------------------
Idents(pbmc) <- pbmc$marker
# de.output <- FindMarkers(pbmc, ident.1 = "Car1")
jmarkers <- unique(dat.meta.celltypes$marker)
names(jmarkers) <- jmarkers
# de.output.lst <- lapply(jmarkers, function(jmarker){
# print(paste("Calculating diff exprs genes for:", jmarker))
# de.output <- FindMarkers(pbmc, ident.1 = jmarker)
# })
de.output.lst <- FindAllMarkers(pbmc, only.pos = FALSE)
saveRDS(de.output.lst, file = outrds)
|
c478a1900692a264e1f56dd0caf82f7b5144509b | e14303f3376b7f50e15612b3f1e523f51a972381 | /Subfunctions/duoResults.R | 7ab2b6396f90f71cbc4599b2dda645132e878982 | [] | no_license | Camsbury/DuoDevelopment | 401c805b93b07e6da0987e90c4fae075f4477b61 | afccc48cd58237ee7df6a100d4f7cb99545d3470 | refs/heads/master | 2016-09-10T17:15:56.373941 | 2014-05-06T21:30:17 | 2014-05-06T21:30:17 | 19,511,377 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 816 | r | duoResults.R | duoResults = function(dataDuo) {
## Load R Functions
source("~/Development/R/DuoDevelopment/Subfunctions/duoCourseCompletionOrder.R")
source("~/Development/R/DuoDevelopment/Subfunctions/duoDataExtract.R")
duoList = duoDataExtract(dataDuo)
duoList = duoCourseCompletionOrder(duoList)
cat("Duolingo Development of the",duoList[[4]],"Phase 1 Beta Courses:")
cat("\n")
cat("(",duoList[[1]][[1]],".)",sep="")
cat("\n")
for (i in 1:duoList[[4]]) {
cat("\n")
cat("\n")
if (duoList[[3]][i] != "Release Imminent") {
cat(duoList[[2]][[i]],": ",duoList[[3]][i]," complete.",sep="")
} else {
cat(duoList[[2]][[i]],": ",duoList[[3]][i],".",sep="")
}
}
} |
b065ca584ce644368cea06de42496fc494536c70 | b3b1b011ab46f024467282baeff0f160e2e91e31 | /tests/testthat/test-dataSplitting.R | 3e0d58eb9718a039f0ee351c8db1aa1372bf6389 | [
"Apache-2.0"
] | permissive | schuemie/PatientLevelPrediction | 5265629020a2406f9f96a4975aa3ab35c9663b92 | 0b59c97a53ab4c6aaf6236048d5bcc9363c2716e | refs/heads/master | 2020-09-05T00:50:10.021513 | 2019-11-06T07:46:44 | 2019-11-06T07:46:44 | 88,721,641 | 0 | 1 | null | 2019-05-01T04:30:23 | 2017-04-19T08:40:26 | R | UTF-8 | R | false | false | 6,098 | r | test-dataSplitting.R | # @file test_DataSplitting.R
# Copyright 2019 Observational Health Data Sciences and Informatics
#
# This file is part of PatientLevelPrediction
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
library("testthat")
context("Data splitting")
test_that("Data stratified splitting", {
# error message checks
population1 <- data.frame(rowId=1:20, outcomeCount=c(1,1,1,1,rep(0,16)))
expect_error(randomSplitter(population1, test=0.3, nfold=3))
population2 <- data.frame(rowId=1:200, outcomeCount=c(rep(1,42),rep(0,158)))
expect_error(randomSplitter(population2, test=0.3, nfold=-1))
expect_error(randomSplitter(population2, test=1.5, nfold=5))
expect_error(randomSplitter(population2, test=-1, nfold=5))
# fold creation check 1 (fixed)
test <- randomSplitter(population2, test=0.2, nfold=4)
test <- merge(population2, test)
test <- table(test$outcomeCount, test$index)
test.returned <- paste(test, collapse='-')
test.expected <- paste(matrix(c(32,32,32,31,31,8,9,9,8,8), ncol=5, byrow=T),collapse='-')
expect_identical(test.returned, test.expected)
# fold creation check 2 (sum)
size <- 500
population3 <- data.frame(rowId=1:size, outcomeCount=c(rep(1,floor(size/3)),rep(0,size-floor(size/3))))
test <- randomSplitter(population3, test=0.2, nfold=4)
test <- merge(population3, test)
test <- table(test$outcomeCount, test$index)
expect_that(sum(test), equals(size))
# test the training fraction parameter for learning curves
size = 500
population4 <- data.frame(rowId=1:size,
outcomeCount=c(rep(1,floor(size/3)),
rep(0,size-floor(size/3))))
test <- randomSplitter(population4, test = 0.2, train = 0.4, nfold = 4)
tolerance = 5
excludedPatients = 200
# test, if the number of patients in each fold are roughly the same
expect_equal(length(test$index[test$index == 1]),
length(test$index[test$index == 3]),
tolerance = tolerance)
expect_equal(length(test$index[test$index == 2]),
length(test$index[test$index == 4]),
tolerance = tolerance)
expect_equal(length(test$index[test$index == 1]),
length(test$index[test$index == 4]),
tolerance = tolerance)
# test, if patients were excluded according to the training fraction
expect_equal(length(test$index[test$index == 0]),
excludedPatients)
})
test_that("Data splitting by time", {
# error message checks
population1 <- data.frame(rowId=1:200, outcomeCount=c(rep(1,42),rep(0,158)),
cohortStartDate = as.Date("2016-01-01") + c(1:200))
expect_error(timeSplitter(population1, test=0.3, nfold=-1))
expect_error(timeSplitter(population1, test=1.5, nfold=5))
expect_error(timeSplitter(population1, test=-1, nfold=5))
# fold creation check (sum)
size <- 500
set.seed(1)
population2 <- data.frame(rowId=1:size, outcomeCount=sample(0:1,size,replace=TRUE),cohortStartDate = as.Date("2010-01-01") + c(1:size))
test <- timeSplitter(population2, test=0.2, nfold=4)
test <- merge(population2, test)
test <- table(test$outcomeCount, test$index)
expect_that(sum(test), equals(size))
# test the training fraction parameter for learning curves
size <- 500
set.seed(1)
population3 <- data.frame(rowId=1:size,
outcomeCount=sample(0:1,size,replace=TRUE),
cohortStartDate = as.Date("2010-01-01") + c(1:size))
test <- timeSplitter(population3, test = 0.2, train = 0.4, nfold = 4)
tolerance = 5
excludedPatients = 196
# test, if the number of patients in each fold are roughly the same
expect_equal(length(test$index[test$index == 1]),
length(test$index[test$index == 3]),
tolerance = tolerance)
expect_equal(length(test$index[test$index == 2]),
length(test$index[test$index == 4]),
tolerance = tolerance)
expect_equal(length(test$index[test$index == 1]),
length(test$index[test$index == 4]),
tolerance = tolerance)
# test, if patients were excluded according to the training fraction
expect_equal(length(test$index[test$index == 0]),
excludedPatients)
})
test_that("Data splitting by subject", {
# error message checks
population1 <- data.frame(rowId=1:20, subjectId = 1:20, outcomeCount=c(1,1,1,1,rep(0,16)))
expect_error(subjectSplitter(population1, test=0.3, nfold=3))
population2 <- data.frame(rowId=1:200,subjectId = 1:200, outcomeCount=c(rep(1,42),rep(0,158)))
expect_error(subjectSplitter(population2, test=0.3, nfold=-1))
expect_error(subjectSplitter(population2, test=1.5, nfold=5))
expect_error(subjectSplitter(population2, test=-1, nfold=5))
test <- subjectSplitter(population2, test=0.2, nfold=4)
test <- merge(population2, test)
test <- table(test$outcomeCount, test$index)
test.returned <- paste(test, collapse='-')
test.expected <- paste(matrix(c(32,32,32,31,31,8,9,9,8,8), ncol=5, byrow=T),collapse='-')
expect_identical(test.returned, test.expected)
# test that people are not in multiple folds
population3 <- data.frame(rowId=1:200,subjectId = rep(1:50,4), outcomeCount=c(rep(1,42),rep(0,158)))
test <- subjectSplitter(population3, test=0.2, nfold=3)
test <- merge(population3, test)
expect_equal(unique(table(test$subjectId[test$index==-1])), 4)
expect_equal(unique(table(test$subjectId[test$index==2])), 4)
expect_equal(unique(table(test$subjectId[test$index==3])), 4)
expect_equal(unique(table(test$subjectId[test$index==1])), 4)
})
|
e2a330491417702c66d256339113ed952cffcbc0 | d1e1c9b25aebcea37927c08a8f344713562b3e42 | /R/mu.rank.nna.R | 29a0ec1054e6a2c35fb771b239426f668d272a96 | [] | no_license | cran/muStat | 43783938835cae3e7a5afb5f8285f9b36ec8b07d | a77f2af75558f6a558d1044945f6085281655361 | refs/heads/master | 2021-01-17T06:33:48.507309 | 2010-09-17T00:00:00 | 2010-09-17T00:00:00 | 17,697,709 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 440 | r | mu.rank.nna.R | `mu.rank.nna` <-
function(x)
{
if (sum(dim(x)>1)< 2) {
if ((n<-length(x))>0) {
sx <- x[sl <- sort.list(x)] # sorted !NA
ntie <- diff(c(0, (1:n)[c(sx[-1] != sx[-n], TRUE)])) # fast.rle
x[sl] <- rep(cumsum(ntie) - (ntie-1)/2, ntie)# ranks
}
return(x)
} else {
lenx <- length(x)
uxx <- mu.Sums(mu.GE(x))
rxx <-(uxx[[1]]+(lenx+1))/2
return(rxx)
}
}
|
c8012770ae95779ac0ad91f882c73fe3094ee974 | 1154ea4133e862012fb1d0680ee4dc649c87ab40 | /R/old_taxa--taxmap--docs.R | 9610b2feee6cf47ce90dd9e623b1894072b8ebe5 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | grunwaldlab/metacoder | f02daa6191254344861c399ef517d54acd6a190f | edd7192858fffc397fb64b9dcac00ed19dbbaa12 | refs/heads/master | 2023-05-03T13:50:13.490344 | 2023-04-20T06:15:31 | 2023-04-20T06:15:31 | 23,885,494 | 128 | 27 | NOASSERTION | 2023-03-28T19:45:07 | 2014-09-10T17:57:54 | R | UTF-8 | R | false | false | 25,777 | r | old_taxa--taxmap--docs.R | #' Get data indexes associated with taxa
#'
#' Given a [taxmap()] object, return data associated with each taxon in a
#' given table included in that [taxmap()] object.
#' \preformatted{
#' obj$obs(data, value = NULL, subset = NULL,
#' recursive = TRUE, simplify = FALSE)
#' obs(obj, data, value = NULL, subset = NULL,
#' recursive = TRUE, simplify = FALSE)}
#'
#' @param obj ([taxmap()]) The [taxmap()] object containing taxon information to
#' be queried.
#' @param data Either the name of something in `obj$data` that has taxon
#' information or a an external object with taxon information. For tables,
#' there must be a column named "taxon_id" and lists/vectors must be named by
#' taxon ID.
#' @param value What data to return. This is usually the name of column in a
#' table in `obj$data`. Any result of `all_names(obj)` can be used. If the
#' value used has names, it is assumed that the names are taxon ids and the
#' taxon ids are used to look up the correct values.
#' @param subset Taxon IDs, TRUE/FALSE vector, or taxon indexes to find observations
#' for. Default: All taxa in `obj` will be used. Any variable name that
#' appears in [all_names()] can be used as if it was a vector on its own.
#' @param recursive (`logical` or `numeric`) If `FALSE`, only return the
#' observation assigned to the specified input taxa, not subtaxa. If `TRUE`,
#' return all the observations of every subtaxa, etc. Positive numbers
#' indicate the number of ranks below the each taxon to get observations for
#' `0` is equivalent to `FALSE`. Negative numbers are equivalent to `TRUE`.
#' @param simplify (`logical`) If `TRUE`, then combine all the results into a
#' single vector of unique observation indexes.
#'
#' @return If `simplify = FALSE`, then a list of vectors of observation indexes
#' are returned corresponding to the `data` argument. If `simplify = TRUE`,
#' then the observation indexes for all `data` taxa are returned in a single
#' vector.
#'
#' @name obs
#'
#' @examples
#' # Get indexes of rows corresponding to each taxon
#' obs(ex_taxmap, "info")
#'
#' # Get only a subset of taxon indexes
#' obs(ex_taxmap, "info", subset = 1:2)
#'
#' # Get only a subset of taxon IDs
#' obs(ex_taxmap, "info", subset = c("b", "c"))
#'
#' # Get only a subset of taxa using logical tests
#' obs(ex_taxmap, "info", subset = taxon_ranks == "genus")
#'
#' # Only return indexes of rows assinged to each taxon explicitly
#' obs(ex_taxmap, "info", recursive = FALSE)
#'
#' # Lump all row indexes in a single vector
#' obs(ex_taxmap, "info", simplify = TRUE)
#'
#' # Return values from a dataset instead of indexes
#' obs(ex_taxmap, "info", value = "name")
#'
NULL
#' Apply function to observations per taxon
#'
#' Apply a function to data for the observations for each taxon. This is similar
#' to using [obs()] with [lapply()] or [sapply()].
#' \preformatted{
#' obj$obs_apply(data, func, simplify = FALSE, value = NULL,
#' subset = NULL, recursive = TRUE, ...)
#' obs_apply(obj, data, func, simplify = FALSE, value = NULL,
#' subset = NULL, recursive = TRUE, ...)}
#'
#' @param obj The [taxmap()] object containing taxon information to
#' be queried.
#' @param data Either the name of something in `obj$data` that has taxon
#' information or a an external object with taxon information. For tables,
#' there must be a column named "taxon_id" and lists/vectors must be named by
#' taxon ID.
#' @param func (`function`) The function to apply.
#' @param simplify (`logical`) If `TRUE`, convert lists to vectors.
#' @param value What data to give to the function. This is usually the name of
#' column in a table in `obj$data`. Any result of `all_names(obj)` can be
#' used, but it usually only makes sense to use columns in the dataset
#' specified by the `data` option. By default, the indexes of observation in
#' `data` are returned.
#' @param subset Taxon IDs, TRUE/FALSE vector, or taxon indexes to use.
#' Default: All taxa in `obj` will be used. Any variable name that appears in
#' [all_names()] can be used as if it was a vector on its own.
#' @param recursive (`logical` or `numeric`) If `FALSE`, only return the
#' observation assigned to the specified input taxa, not subtaxa. If `TRUE`,
#' return all the observations of every subtaxa, etc. Positive numbers
#' indicate the number of ranks below the each taxon to get observations for
#' `0` is equivalent to `FALSE`. Negative numbers are equivalent to `TRUE`.
#' @param ... Extra arguments are passed to the function.
#'
#' @name obs_apply
#'
#' @examples
#' # Find the average number of legs in each taxon
#' obs_apply(ex_taxmap, "info", mean, value = "n_legs", simplify = TRUE)
#'
#' # One way to implement `n_obs` and find the number of observations per taxon
#' obs_apply(ex_taxmap, "info", length, simplify = TRUE)
#'
NULL
#' Filter observations with a list of conditions
#'
#' Filter data in a [taxmap()] object (in `obj$data`) with a
#' set of conditions. See
#' [dplyr::filter()] for the inspiration for this function and more
#' information. Calling the function using the `obj$filter_obs(...)` style
#' edits "obj" in place, unlike most R functions. However, calling the function
#' using the `filter_obs(obj, ...)` imitates R's traditional copy-on-modify
#' semantics, so "obj" would not be changed; instead a changed version would be
#' returned, like most R functions.
#' \preformatted{
#' obj$filter_obs(data, ..., drop_taxa = FALSE, drop_obs = TRUE,
#' subtaxa = FALSE, supertaxa = TRUE, reassign_obs = FALSE)
#' filter_obs(obj, data, ..., drop_taxa = FALSE, drop_obs = TRUE,
#' subtaxa = FALSE, supertaxa = TRUE, reassign_obs = FALSE)}
#'
#' @param obj An object of type [taxmap()]
#' @param data Dataset names, indexes, or a logical vector that indicates which datasets in
#' `obj$data` to filter. If multiple datasets are filterd at once, then they must be the same
#' length.
#' @param ... One or more filtering conditions. Any variable name that appears
#' in [all_names()] can be used as if it was a vector on its own. Each
#' filtering condition can be one of two things:
#' * `integer`: One or more dataset indexes.
#' * `logical`: A `TRUE`/`FALSE` vector of length equal to the number of
#' items in the dataset.
#' @param drop_taxa (`logical` of length 1) If `FALSE`, preserve taxa
#' even if all of their observations are filtered out. If `TRUE`, remove
#' taxa for which all observations were filtered out. Note that only taxa that
#' are unobserved due to this filtering will be removed; there might be other
#' taxa without observations to begin with that will not be removed.
#' @param drop_obs (`logical`) This only has an effect when `drop_taxa` is
#' `TRUE`. When `TRUE`, observations for other data sets (i.e. not `data`)
#' assigned to taxa that are removed when filtering `data` are also removed.
#' Otherwise, only data for taxa that are not present in all other data sets
#' will be removed. This option can be either simply `TRUE`/`FALSE`, meaning
#' that all data sets will be treated the same, or a logical vector can be
#' supplied with names corresponding one or more data sets in `obj$data`. For
#' example, `c(abundance = TRUE, stats = FALSE)` would remove observations in
#' `obj$data$abundance`, but not in `obj$data$stats`.
#' @param subtaxa (`logical` or `numeric` of length 1) This only has an effect
#' when `drop_taxa` is `TRUE`. If `TRUE`, include subtaxa of taxa passing the
#' filter. Positive numbers indicate the number of ranks below the target taxa
#' to return. `0` is equivalent to `FALSE`. Negative numbers are equivalent to
#' `TRUE`.
#' @param supertaxa (`logical` or `numeric` of length 1) This only has an
#' effect when `drop_taxa` is `TRUE`. If `TRUE`, include supertaxa of taxa
#' passing the filter. Positive numbers indicate the number of ranks above the
#' target taxa to return. `0` is equivalent to `FALSE`. Negative numbers are
#' equivalent to `TRUE`.
#' @param reassign_obs (`logical`) This only has an effect when `drop_taxa` is
#' `TRUE`. If `TRUE`, observations assigned to removed taxa will be reassigned
#' to the closest supertaxon that passed the filter. If there are no supertaxa
#' of such an observation that passed the filter, they will be filtered out if
#' `drop_obs` is `TRUE`. This option can be either simply `TRUE`/`FALSE`,
#' meaning that all data sets will be treated the same, or a logical vector
#' can be supplied with names corresponding one or more data sets in
#' `obj$data`. For example, `c(abundance = TRUE, stats = FALSE)` would
#' reassign observations in `obj$data$abundance`, but not in `obj$data$stats`.
#' @param target DEPRECIATED. use "data" instead.
#'
#' @return An object of type [taxmap()]
#'
#' @examples
#' # Filter by row index
#' filter_obs(ex_taxmap, "info", 1:2)
#'
#' # Filter by TRUE/FALSE
#' filter_obs(ex_taxmap, "info", dangerous == FALSE)
#' filter_obs(ex_taxmap, "info", dangerous == FALSE, n_legs > 0)
#' filter_obs(ex_taxmap, "info", n_legs == 2)
#'
#' # Remove taxa whose obserservations were filtered out
#' filter_obs(ex_taxmap, "info", n_legs == 2, drop_taxa = TRUE)
#'
#' # Preserve other data sets while removing taxa
#' filter_obs(ex_taxmap, "info", n_legs == 2, drop_taxa = TRUE,
#' drop_obs = c(abund = FALSE))
#'
#' # When filtering taxa, do not return supertaxa of taxa that are preserved
#' filter_obs(ex_taxmap, "info", n_legs == 2, drop_taxa = TRUE,
#' supertaxa = FALSE)
#'
#' # Filter multiple datasets at once
#' filter_obs(ex_taxmap, c("info", "phylopic_ids", "foods"), n_legs == 2)
#'
#' @family taxmap manipulation functions
#'
#' @name filter_obs
NULL
#' Subset columns in a [taxmap()] object
#'
#' Subsets columns in a [taxmap()] object. Takes and returns a
#' [taxmap()] object. Any variable name that appears in
#' [all_names()] can be used as if it was a vector on its own. See
#' [dplyr::select()] for the inspiration for this function and more
#' information. Calling the function using the `obj$select_obs(...)` style
#' edits "obj" in place, unlike most R functions. However, calling the function
#' using the `select_obs(obj, ...)` imitates R's traditional copy-on-modify
#' semantics, so "obj" would not be changed; instead a changed version would be
#' returned, like most R functions.
#' \preformatted{
#' obj$select_obs(data, ...)
#' select_obs(obj, data, ...)}
#'
#' @param obj An object of type [taxmap()]
#' @param data Dataset names, indexes, or a logical vector that indicates which tables in
#' `obj$data` to subset columns in. Multiple tables can be subset at once.
#' @param ... One or more column names to return in the new object. Each can be
#' one of two things: \describe{ \item{expression with unquoted column
#' name}{The name of a column in the dataset typed as if it was
#' a variable on its own.} \item{`numeric`}{Indexes of columns in
#' the dataset} } To match column names with a character vector,
#' use `matches("my_col_name")`. To match a logical vector, convert it to
#' a column index using `which`.
#' @param target DEPRECIATED. use "data" instead.
#'
#' @return An object of type [taxmap()]
#'
#' @family taxmap manipulation functions
#'
#' @examples
#' # Selecting a column by name
#' select_obs(ex_taxmap, "info", dangerous)
#'
#' # Selecting a column by index
#' select_obs(ex_taxmap, "info", 3)
#'
#' # Selecting a column by regular expressions
#' select_obs(ex_taxmap, "info", matches("^n"))
#'
#' @name select_obs
NULL
#' Add columns to [taxmap()] objects
#'
#' Add columns to tables in `obj$data` in [taxmap()] objects. See
#' [dplyr::mutate()] for the inspiration for this function and more information.
#' Calling the function using the `obj$mutate_obs(...)` style edits "obj" in
#' place, unlike most R functions. However, calling the function using the
#' `mutate_obs(obj, ...)` imitates R's traditional copy-on-modify semantics, so
#' "obj" would not be changed; instead a changed version would be returned, like
#' most R functions.
#' \preformatted{
#' obj$mutate_obs(data, ...)
#' mutate_obs(obj, data, ...)}
#'
#' @param obj An object of type [taxmap()]
#' @param data Dataset name, index, or a logical vector that indicates which dataset in
#' `obj$data` to add columns to.
#' @param ... One or more named columns to add. Newly created columns can be
#' referenced in the same function call. Any variable name that appears in
#' [all_names()] can be used as if it was a vector on its own.
#' @param target DEPRECIATED. use "data" instead.
#'
#' @return An object of type [taxmap()]
#'
#' @examples
#'
#' # Add column to existing tables
#' mutate_obs(ex_taxmap, "info",
#' new_col = "Im new",
#' newer_col = paste0(new_col, "er!"))
#'
#' # Create columns in a new table
#' mutate_obs(ex_taxmap, "new_table",
#' nums = 1:10,
#' squared = nums ^ 2)
#'
#' # Add a new vector
#' mutate_obs(ex_taxmap, "new_vector", 1:10)
#'
#' # Add a new list
#' mutate_obs(ex_taxmap, "new_list", list(1, 2))
#'
#' @family taxmap manipulation functions
#' @name mutate_obs
NULL
#' Replace columns in [taxmap()] objects
#'
#' Replace columns of tables in `obj$data` in [taxmap()] objects. See
#' [dplyr::transmute()] for the inspiration for this function and more
#' information. Calling the function using the `obj$transmute_obs(...)` style
#' edits "obj" in place, unlike most R functions. However, calling the function
#' using the `transmute_obs(obj, ...)` imitates R's traditional copy-on-modify
#' semantics, so "obj" would not be changed; instead a changed version would be
#' returned, like most R functions.
#' \preformatted{
#' obj$transmute_obs(data, ...)
#' transmute_obs(obj, data, ...)}
#'
#' @param obj An object of type [taxmap()]
#' @param data Dataset name, index, or a logical vector that indicates which dataset in
#' `obj$data` to use.
#' @param ... One or more named columns to add. Newly created columns can be
#' referenced in the same function call. Any variable name that appears in
#' [all_names()] can be used as if it was a vector on its own.
#' @param target DEPRECIATED. use "data" instead.
#'
#' @return An object of type [taxmap()]
#' @examples
#' # Replace columns in a table with new columns
#' transmute_obs(ex_taxmap, "info", new_col = paste0(name, "!!!"))
#'
#' @family taxmap manipulation functions
#'
#' @name transmute_obs
NULL
#' Sort user data in [taxmap()] objects
#'
#' Sort rows of tables or the elements of lists/vectors in the `obj$data` list
#' in [taxmap()] objects. Any variable name that appears in [all_names()] can be
#' used as if it was a vector on its own. See [dplyr::arrange()] for the
#' inspiration for this function and more information. Calling the function
#' using the `obj$arrange_obs(...)` style edits "obj" in place, unlike most R
#' functions. However, calling the function using the `arrange_obs(obj, ...)`
#' imitates R's traditional copy-on-modify semantics, so "obj" would not be
#' changed; instead a changed version would be returned, like most R functions.
#' \preformatted{
#' obj$arrange_obs(data, ...)
#' arrange_obs(obj, data, ...)}
#'
#' @param obj An object of type [taxmap()].
#' @param data Dataset names, indexes, or a logical vector that indicates which datasets in
#' `obj$data` to sort If multiple datasets are sorted at once, then they must be the same
#' length.
#' @param ... One or more expressions (e.g. column names) to sort on.
#' @param target DEPRECIATED. use "data" instead.
#'
#' @return An object of type [taxmap()]
#'
#' @examples
#' # Sort in ascending order
#' arrange_obs(ex_taxmap, "info", n_legs)
#' arrange_obs(ex_taxmap, "foods", name)
#'
#' # Sort in decending order
#' arrange_obs(ex_taxmap, "info", desc(n_legs))
#'
#' # Sort multiple datasets at once
#' arrange_obs(ex_taxmap, c("info", "phylopic_ids", "foods"), n_legs)
#'
#' @family taxmap manipulation functions
#'
#' @name arrange_obs
NULL
#' Sample n observations from [taxmap()]
#'
#' Randomly sample some number of observations from a [taxmap()] object. Weights
#' can be specified for observations or the taxa they are classified by. Any
#' variable name that appears in [all_names()] can be used as if it was a vector
#' on its own. See [dplyr::sample_n()] for the inspiration for this function.
#' Calling the function using the `obj$sample_n_obs(...)` style edits "obj" in
#' place, unlike most R functions. However, calling the function using the
#' `sample_n_obs(obj, ...)` imitates R's traditional copy-on-modify semantics,
#' so "obj" would not be changed; instead a changed version would be returned,
#' like most R functions.
#' \preformatted{
#' obj$sample_n_obs(data, size, replace = FALSE,
#' taxon_weight = NULL, obs_weight = NULL,
#' use_supertaxa = TRUE, collapse_func = mean, ...)
#' sample_n_obs(obj, data, size, replace = FALSE,
#' taxon_weight = NULL, obs_weight = NULL,
#' use_supertaxa = TRUE, collapse_func = mean, ...)}
#'
#' @param obj ([taxmap()]) The object to sample from.
#' @param data Dataset names, indexes, or a logical vector that indicates which datasets in
#' `obj$data` to sample. If multiple datasets are sampled at once, then they must be the same
#' length.
#' @param size (`numeric` of length 1) The number of observations to
#' sample.
#' @param replace (`logical` of length 1) If `TRUE`, sample with
#' replacement.
#' @param taxon_weight (`numeric`) Non-negative sampling weights of each
#' taxon. If `use_supertaxa` is `TRUE`, the weights for each taxon
#' in an observation's classification are supplied to `collapse_func` to
#' get the observation weight. If `obs_weight` is also specified, the two
#' weights are multiplied (after `taxon_weight` for each observation is
#' calculated).
#' @param obs_weight (`numeric`) Sampling weights of each observation. If
#' `taxon_weight` is also specified, the two weights are multiplied (after
#' `taxon_weight` for each observation is calculated).
#' @param use_supertaxa (`logical` or `numeric` of length 1) Affects how the
#' `taxon_weight` is used. If `TRUE`, the weights for each taxon in an
#' observation's classification are multiplied to get the observation weight.
#' Otherwise, just the taxonomic level the observation is assign to it
#' considered. If `TRUE`, use all supertaxa. Positive numbers indicate the
#' number of ranks above each taxon to use. `0` is equivalent to `FALSE`.
#' Negative numbers are equivalent to `TRUE`.
#' @param collapse_func (`function` of length 1) If `taxon_weight` option is
#' used and `supertaxa` is `TRUE`, the weights for each
#' taxon in an observation's classification are supplied to
#' `collapse_func` to get the observation weight. This function should
#' take numeric vector and return a single number.
#' @param ... Additional options are passed to [filter_obs()].
#' @param target DEPRECIATED. use "data" instead.
#'
#' @return An object of type [taxmap()]
#'
#' @examples
#' # Sample 2 rows without replacement
#' sample_n_obs(ex_taxmap, "info", 2)
#' sample_n_obs(ex_taxmap, "foods", 2)
#'
#' # Sample with replacement
#' sample_n_obs(ex_taxmap, "info", 10, replace = TRUE)
#'
#' # Sample some rows for often then others
#' sample_n_obs(ex_taxmap, "info", 3, obs_weight = n_legs)
#'
#' # Sample multiple datasets at once
#' sample_n_obs(ex_taxmap, c("info", "phylopic_ids", "foods"), 3)
#'
#' @family taxmap manipulation functions
#'
#' @name sample_n_obs
NULL
#' Sample a proportion of observations from [taxmap()]
#'
#' Randomly sample some proportion of observations from a [taxmap()]
#' object. Weights can be specified for observations or their taxa. See
#' [dplyr::sample_frac()] for the inspiration for this function. Calling the
#' function using the `obj$sample_frac_obs(...)` style edits "obj" in place, unlike
#' most R functions. However, calling the function using the `sample_frac_obs(obj,
#' ...)` imitates R's traditional copy-on-modify semantics, so "obj" would not
#' be changed; instead a changed version would be returned, like most R
#' functions.
#' \preformatted{
#' obj$sample_frac_obs(data, size, replace = FALSE,
#' taxon_weight = NULL, obs_weight = NULL,
#' use_supertaxa = TRUE, collapse_func = mean, ...)
#' sample_frac_obs(obj, data, size, replace = FALSE,
#' taxon_weight = NULL, obs_weight = NULL,
#' use_supertaxa = TRUE, collapse_func = mean, ...)}
#'
#' @param obj ([taxmap()]) The object to sample from.
#' @param data Dataset names, indexes, or a logical vector that indicates which datasets in
#' `obj$data` to sample. If multiple datasets are sample at once, then they must be the same
#' length.
#' @param size (`numeric` of length 1) The proportion of observations to
#' sample.
#' @param replace (`logical` of length 1) If `TRUE`, sample with
#' replacement.
#' @param taxon_weight (`numeric`) Non-negative sampling weights of each
#' taxon. If `use_supertaxa` is `TRUE`, the weights for each taxon
#' in an observation's classification are supplied to `collapse_func` to
#' get the observation weight. If `obs_weight` is also specified, the two
#' weights are multiplied (after `taxon_weight` for each observation is
#' calculated).
#' @param obs_weight (`numeric`) Sampling weights of each observation. If
#' `taxon_weight` is also specified, the two weights are multiplied
#' (after `taxon_weight` for each observation is calculated).
#' @param use_supertaxa (`logical` or `numeric` of length 1) Affects how the
#' `taxon_weight` is used. If `TRUE`, the weights for each taxon in
#' an observation's classification are multiplied to get the observation
#' weight. If `FALSE` just the taxonomic level the observation is assign to it
#' considered. Positive numbers indicate the number of ranks above the
#' each taxon to use. `0` is equivalent to `FALSE`. Negative numbers
#' are equivalent to `TRUE`.
#' @param collapse_func (`function` of length 1) If `taxon_weight`
#' option is used and `supertaxa` is `TRUE`, the weights for each
#' taxon in an observation's classification are supplied to
#' `collapse_func` to get the observation weight. This function should
#' take numeric vector and return a single number.
#' @param ... Additional options are passed to [filter_obs()].
#' @param target DEPRECIATED. use "data" instead.
#'
#' @return An object of type [taxmap()]
#'
#' @examples
#' # Sample half of the rows fram a table
#' sample_frac_obs(ex_taxmap, "info", 0.5)
#'
#' # Sample multiple datasets at once
#' sample_frac_obs(ex_taxmap, c("info", "phylopic_ids", "foods"), 0.5)
#'
#' @family taxmap manipulation functions
#'
#' @name sample_frac_obs
NULL
#' Count observations in [taxmap()]
#'
#' Count observations for each taxon in a data set in a [taxmap()] object. This
#' includes observations for the specific taxon and the observations of its
#' subtaxa. "Observations" in this sense are the items (for list/vectors) or
#' rows (for tables) in a dataset. By default, observations in the first data
#' set in the [taxmap()] object is used. For example, if the data set is a
#' table, then a value of 3 for a taxon means that their are 3 rows in that
#' table assigned to that taxon or one of its subtaxa.
#' \preformatted{
#' obj$n_obs(data)
#' n_obs(obj, data)}
#'
#' @param obj ([taxmap()])
#' @param data Dataset name, index, or a logical vector that indicates which dataset in
#' `obj$data` to add columns to.
#' @param target DEPRECIATED. use "data" instead.
#'
#' @return `numeric`
#'
#' @examples
#' # Get number of observations for each taxon in first dataset
#' n_obs(ex_taxmap)
#'
#' # Get number of observations in a specified data set
#' n_obs(ex_taxmap, "info")
#' n_obs(ex_taxmap, "abund")
#'
#' # Filter taxa using number of observations in the first table
#' filter_taxa(ex_taxmap, n_obs > 1)
#'
#' @family taxmap data functions
#'
#' @name n_obs
NULL
#' Count observation assigned in [taxmap()]
#'
#' Count observations for each taxon in a data set in a [taxmap()] object. This
#' includes observations for the specific taxon but NOT the observations of its
#' subtaxa. "Observations" in this sense are the items (for list/vectors) or
#' rows (for tables) in a dataset. By default, observations in the first data
#' set in the [taxmap()] object is used. For example, if the data set is a
#' table, then a value of 3 for a taxon means that their are 3 rows in that
#' table assigned to that taxon.
#' \preformatted{
#' obj$n_obs_1(data)
#' n_obs_1(obj, data)}
#'
#' @param obj ([taxmap()])
#' @param data Dataset name, index, or a logical vector that indicates which dataset in
#' `obj$data` to add columns to.
#' @param target DEPRECIATED. use "data" instead.
#'
#' @return `numeric`
#'
#' @examples
#' # Get number of observations for each taxon in first dataset
#' n_obs_1(ex_taxmap)
#'
#' # Get number of observations in a specified data set
#' n_obs_1(ex_taxmap, "info")
#' n_obs_1(ex_taxmap, "abund")
#'
#' # Filter taxa using number of observations in the first table
#' filter_taxa(ex_taxmap, n_obs_1 > 0)
#'
#' @family taxmap data functions
#'
#' @name n_obs_1
NULL
#' Get a data set from a taxmap object
#'
#' Get a data set from a taxmap object and complain if it does not
#' exist.
#'
#' @param obj A taxmap object
#' @param data Dataset name, index, or a logical vector that indicates which dataset in
#' `obj$data` to add columns to.
#'
#' @examples
#' \dontrun{
#' # Get data set by name
#' get_dataset(ex_taxmap, "info")
#'
#' # Get data set by indeex_taxmap
#' get_dataset(ex_taxmap, 1)
#'
#' # Get data set by T/F vector
#' get_dataset(ex_taxmap, startsWith(names(ex_taxmap$data), "i"))
#'
#' }
#'
#' @name get_dataset
NULL
|
7c44560f08b9a4287b381fef89aaf19a4f35a16b | 295d0c884c7a6ae6882d1171fb336728e3293dcf | /man/explorerApp.Rd | 9eb8e2a37116ca7030b9d29cad51ed8a2706a022 | [
"MIT"
] | permissive | radovankavicky/codebook | 1934c62c218db08d8a6fef2df76626ece4391616 | 7a1fd4675b541329aa98bf7a56e5072b24b7cd72 | refs/heads/master | 2020-03-25T04:25:35.911542 | 2018-08-03T02:20:24 | 2018-08-03T02:20:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 273 | rd | explorerApp.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/explorerApp.R
\name{explorerApp}
\alias{explorerApp}
\title{Codebook Explorer Shiny App and RStudio Add-in}
\usage{
explorerApp()
}
\description{
Codebook Explorer Shiny App and RStudio Add-in
}
|
1e3bd85edb7853ec18b1fabb5178c61f90629225 | c53e367a5a155cfb1ee3a41e8b0351aeaa8d331d | /rgl/demo/shinyDemo.R | 5f044521405f3241a4b0d5b536379115b76e27ce | [
"MIT"
] | permissive | solgenomics/R_libs | bcf34e00bf2edef54894f6295c4f38f1e480b3fc | e8cdf30fd5f32babf39c76a01df5f5544062224e | refs/heads/master | 2023-07-08T10:06:04.304775 | 2022-05-09T15:41:26 | 2022-05-09T15:41:26 | 186,859,606 | 0 | 2 | MIT | 2023-03-07T08:59:16 | 2019-05-15T15:57:13 | C++ | UTF-8 | R | false | false | 4,862 | r | shinyDemo.R | if (!require("shiny"))
stop("This demo requires shiny.")
library(rgl)
library(misc3d)
options(rgl.useNULL = TRUE)
set.seed(123)
ui <- fluidPage(
registerSceneChange(),
titlePanel("Nelder-Mead"),
sidebarLayout(
sidebarPanel(
helpText("The Nelder-Mead algorithm evaluates the function",
"on the vertices of a simplex. At each step it",
"moves one vertex of the simplex to a better value."),
sliderInput("Slider", min=0, max=59, step=1, value=0, label="Steps",
animate=animationOptions(200, loop=TRUE)),
sliderInput("Slider2", min=0, max=59, step=1, value=0, label="Cumulative",
animate=animationOptions(200, loop=TRUE)),
playwidgetOutput('thecontroller'),
playwidgetOutput('thecontroller2'),
actionButton('newStart', 'Restart')),
mainPanel(
rglwidgetOutput('thewidget', width = "100%", height = 512))
)
)
u1 <- runif(1)
u2 <- runif(1)*(1-u1)
u3 <- 1 - u1 - u2
# Example modified from ?contour3d
#Example 2: Nested contours of mixture of three tri-variate normal densities
nmix3 <- function(x, y, z, m, s) {
u1 * dnorm(x, m, s) * dnorm(y, m, s) * dnorm(z, m, s) +
u2 * dnorm(x, -m, s) * dnorm(y, -m, s) * dnorm(z, -m, s) +
u3 * dnorm(x, m, s) * dnorm(y, -1.5 * m, s) * dnorm(z, m, s)
}
f <- function(x,y,z) nmix3(x,y,z,.5,.5)
g <- function(n = 40, k = 5, alo = 0.1, ahi = 0.5, cmap = heat.colors) {
th <- seq(0.05, 0.2, len = k)
col <- rev(cmap(length(th)))
al <- seq(alo, ahi, len = length(th))
x <- seq(-2, 2, len=n)
bg3d(col="white")
contour3d(f,th,x,x,x,color=col,alpha=al) # nolint
}
f3 <- function(x) -f(x[1], x[2], x[3])
g(20,3)
surface <- scene3d()
close3d()
neldermead <- function(x, f) {
n <- nrow(x)
p <- ncol(x)
if (n != p + 1) stop(paste('Need', p + 1, 'starting points'))
fx <- rep(NA, n)
for (i in 1:n) fx[i] <- f(x[i,])
o <- order(fx)
fx <- fx[o]
x <- x[o,]
xmid <- apply(x[1:p,], 2, mean)
z1 <- xmid - (x[n,] - xmid)
fz1 <- f(z1)
if (fz1 < fx[1]) {
z2 <- xmid - 2*(x[n,] - xmid)
fz2 <- f(z2)
if (fz2 < fz1) {
x[n,] <- z2
} else {
x[n,] <- z1
}
} else if (fz1 < fx[p]) {
x[n,] <- z1
} else {
if (fz1 < fx[n]) {
x[n,] <- z1
fx[n] <- fz1
}
z3 <- xmid + (x[n,] - xmid)/2
fz3 <- f(z3)
if (fz3 < fx[n]) {
x[n,] <- z3
} else {
for (i in 2:n) {
x[i,] <- x[1,] + (x[i,] - x[1,])/2
}
}
}
return(x)
}
showsimplex <- function(x, f, col="blue") {
n <- nrow(x)
z <- numeric(n)
for (i in 1:n) z[i] <- f(x[i,])
xyz <- cbind(x, z)
# This is tricky:
# 1. draw all lines, taking vertices two at a time:
c(segments3d(xyz[as.numeric(combn(n, 2)),], col="black", depth_test = "lequal"),
# 2. draw all faces, taking vertices three at a time:
triangles3d(xyz[as.numeric(combn(n, 3)),], col=col, alpha=0.3))
}
setStartPoint <- function() {
xyz <- matrix(rnorm(12, sd=0.1) + rep(rnorm(3,sd=2), each=4), 4, 3)
subsets <-list()
for (i in 1:60) {
xyz <- neldermead(xyz,f3)
subset <- showsimplex(xyz,f3)
subsets <-c(subsets,list(subset))
}
names(subsets) <- seq_along(subsets)
subsets
}
server <- function(input, output, session) {
plot3d(surface)
dev <- cur3d()
save <- options(rgl.inShiny = TRUE)
on.exit(options(save))
session$onSessionEnded(function() {
set3d(dev)
close3d()
})
path <- reactiveValues(subsets = setStartPoint())
observeEvent(input$newStart, {
set3d(dev)
deletes <- unique(unlist(path$subsets))
if (length(deletes))
delFromSubscene3d(deletes)
subsets <- setStartPoint()
adds <- unique(unlist(subsets))
session$sendCustomMessage("sceneChange",
sceneChange("thewidget", delete = deletes, add = adds,
skipRedraw = TRUE))
path$subsets <- subsets
updateSliderInput(session, "Slider", value=0)
updateSliderInput(session, "Slider2", value=0)
session$onFlushed(function()
session$sendCustomMessage("sceneChange",
sceneChange("thewidget", skipRedraw = FALSE)))
})
output$thewidget <- renderRglwidget({
rglwidget(controllers=c("thecontroller", "thecontroller2"))
})
output$thecontroller <-
renderPlaywidget({
if (length(path$subsets))
playwidget("thewidget", respondTo = "Slider",
subsetControl(1, path$subsets),
start = 1, stop = length(path$subsets))
})
output$thecontroller2 <-
renderPlaywidget({
if (length(path$subsets))
playwidget("thewidget", respondTo = "Slider2",
subsetControl(1, path$subsets, accumulate = TRUE))
})
}
if (interactive())
shinyApp(ui = ui, server = server)
|
1c506b6bacb0c2b49da0c644ee27a109e882f257 | dc51e29c86ec5da9888af397a865e93bd2867889 | /R/copy_to_clipboard.R | e0539fb110606136e028986610277a09d7fbf518 | [] | no_license | cran/bannerCommenter | 7b49d0f28b4de56457834e0c13fb0faf5b108c42 | dcd4f5e8e2e4535db6d9f722a3ca316c6b64067c | refs/heads/master | 2021-06-18T14:58:12.952413 | 2021-03-23T07:30:09 | 2021-03-23T07:30:09 | 75,629,234 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,301 | r | copy_to_clipboard.R |
#' Transfer text strings to the clipboard ready for paste
#'
#' This is only guaranteed for Windows; in the case of Linux you will
#' need to have the \code{xclip} command installed and visible on the \code{PATH}
#' and for Mac OS you will need to have \code{pbcopy} similarly available.
#' In any case the transfer to the clipboard is only activated while in
#' an interactive session.
#'
#' It behaves like \code{base::cat} but differs in three respects.
#'
#' First, if \code{file} is left missing, in an interactive session,
#' the default file is a clipboard device, if possible.
#'
#' Second, the return value is \code{invisible(x)} rather than
#' \code{invisible(NULL)} as it is for \code{base::cat}.
#'
#' Third, it only has a copying side-effect if used in an interactive session.
#' In a non-interactive session it merely returns the \code{x} argument, invisibly.
#'
#' Note the on \code{Windows} the function \code{utils::writeClipboard} offers
#' a much more extensive range of possibilities for communicating with the
#' clipboard device, but this facility is only available on \code{Windows}.
#'
#' @param x a characeter string vector
#' @param ... additional arguments as for \code{cat}
#' @param file a file or connection (usually left at the default)
#'
#' @return \code{x}, invisibly (as for a print method)
#' @export
copy_to_clipboard <- function(x, ..., file = con) {
if(missing(file)) {
oldOpt <- options(warn = -1)
on.exit(options(oldOpt))
con <- switch(Sys.info()["sysname"],
Linux = {
if(system("which xclip", ignore.stdout = TRUE,
ignore.stderr = TRUE) == 0) {
pipe("xclip -selection clipboard -i", open = "w")
} else NULL
},
Windows = {
base::file("clipboard")
},
{ ### guessing some version of Mac OS!
if(system("which pbcopy", ignore.stdout = TRUE,
ignore.stderr = TRUE) == 0) {
pipe("pbcopy", "w")
} else NULL
})
if(!is.null(con))
on.exit(close(con), add = TRUE)
}
if(interactive())
cat(x, file = file, ...)
invisible(x)
}
|
0c4ef541680ed5968ef29bdc6430eb3eff23feff | 34d70571bee406e721673e9af1ff4d4ba9247b3c | /R/main.R | 75f5747ac3da09680f4e1912bc741795e497e8de | [] | no_license | benjaminguinaudeau/bashR | f172af9a75ae01608f5e78e058fab3cea281d2df | 6e0e707703f674fa1467bb62813341a8bf9db60d | refs/heads/master | 2023-05-14T03:32:35.547019 | 2023-05-04T20:15:21 | 2023-05-04T20:15:21 | 181,508,074 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,337 | r | main.R | #' sudo$
#' @description This function executes the given command as sudo
#' @param command A bash command to execute as sudo
#' @param env_var Should the password be saved for further use
#' @export
sudo <- function(command,
intern = F,
ignore.stdout = F,
ignore.stderr = T,
env_var = T,
cmd = F){
if(os() == "Windows"){
if(cmd){
out <- command
} else {
out <- exec(glue::glue("{ command }"),
intern = intern,
ignore.stdout = ignore.stdout,
ignore.stderr = ignore.stderr)
}
} else {
if(class(try(keyring::key_get("SUDO_PASS"), silent = T))[1] == "try-error"){
stop("Sudo password not found in keyring")
}
if(cmd){
out <- glue::glue("echo { keyring::key_get('SUDO_PASS') } | sudo -S { command }")
} else {
out <- exec(glue::glue("echo { keyring::key_get('SUDO_PASS') } | sudo -S { command }"),
intern = intern,
ignore.stdout = ignore.stdout,
ignore.stderr = ignore.stderr)
}
if(!env_var) keyring::key_delete("SUDO_PASS")
}
return(out)
}
#' set_sudo
#' @export
set_sudo <- function(pw){
keyring::key_set_with_raw_value(
"SUDO_PASS",
password = charToRaw(pw)
)
}
#' parse_curl_request
#' @export
parse_curl_request <- function(req_string = NULL, execute = F){
if(is.null(req_string)){
req_string <- clipr::read_clip()
if(!stringr::str_detect(req_string[1], "^\\s*curl")){
stop(glue::glue("No curl request detected.\n Input: \"{req_string}\""))
}
}
tot_req <- req_string %>%
paste(collapse = "\n") %>%
stringr::str_split("\n") %>%
purrr::pluck(1)
url <- tot_req %>%
stringr::str_subset("^curl") %>%
stringr::str_extract("(?<=').*?(?=')")
tmp <- tot_req %>%
stringr::str_subset("-H") %>%
stringr::str_extract("(?<=').*?(?=')") %>%
stringr::str_split("\\:\\s+")
headers <- purrr::map_chr(tmp, 2) %>%
stringr::str_replace_all(c("/" = "\\\\/" ))
names(headers) <- purrr::map_chr(tmp, 1)
header_string <- paste(utils::capture.output(dput(headers)))
r_code <- glue::glue("httr::GET(url = \"{url}\", \nhttr::add_headers(.headers = c({paste(header_string, collapse = '\n')}\n)))")
dt <- tibble::tibble(url = url, headers = list(headers), r_code)
if(execute){
dt$req <- list(try(httr::GET(url = url, httr::add_headers(.headers = headers))))
}
return(dt)
}
#' status_code_is
#' @export
status_code_is <- function(req, code = 200){
req$status_code == code
}
#' test_headers
#' @export
test_headers <- function(url, headers, test_fun = status_code_is, ...){
test_fun_args <- list(...)
names(headers) %>%
purrr::map_dfr(~{
tmp <- headers
tmp <- tmp[names(tmp) != .x]
req <- try(httr::GET(url = url, httr::add_headers(.headers = tmp)))
trig <- F
if(!inherits(req, "try-error")){
trig <- do.call(test_fun, c(list(req), test_fun_args))
}
return(tibble::tibble(removed_header = .x, required = !trig))
})
}
#' ufw
#' @export
ufw <- function(command, port, cmd = F, ...){
sudo(glue::glue("ufw { command } { port }"), cmd = cmd, ...)
}
#' os
#' @export
os <- function() Sys.info()['sysname']
#' exec
#' @export
exec <- function(string, cmd = F, ...){
if(cmd){
return(string)}
else{
if(os() == "Windows"){
return(shell(string, ...))
}else {
return(system(string, ...))
}
}
}
#' run_as_job
#' @export
run_as_job <- function(.command, import_global = F, import_package = T, env_to_import = NULL, output = ".tmp.Rdata"){
current_env <- rlang::current_env()
if(import_global){.GlobalEnv %>% as.list %>% purrr::imap(~{current_env[[.y]] <- .x})}
if(!is.null(env_to_import)){env_to_import %>% as.list %>% purrr::imap(~{current_env[[.y]] <- .x})}
if(!exists("env")){env <- rlang::new_environment()}
if(import_package){packages <- (.packages())} else {packages <- "base"}
if(fs::file_exists(output)){fs::file_delete(output)}
if(fs::file_exists("script_for_job")){fs::file_delete("script_for_job")}
.command %>%
as.character %>%
.[2] %>%
stringr::str_remove_all("\\{|\\}") %>%
paste(paste(glue::glue("pacman::p_load({ packages})"), collapse = "\n"), .,
glue::glue('if(exists("out")){save(out, file = "[output]")}',.open = "[", .close = "]")) %>%
stringr::str_trim(.) %>%
stringr::str_split("\n") %>%
.[[1]] %>%
writeLines("script_for_job")
rstudioapi::jobRunScript("script_for_job", workingDir = getwd(), importEnv = env)
if(fs::file_exists(output)){load(output)}
if(fs::file_exists(output)){fs::file_delete(output)}
# if(fs::file_exists("script_for_job")){fs::file_delete("script_for_job")}
if(exists("out")){return(out)}
}
#' wait
#' @export
wait <- function(mean = 1, sd = .1, verbose = F){
wait_time <- abs(stats::rnorm(1, mean, sd))
Sys.sleep(wait_time)
if(verbose){message("Waiting ", round(wait_time, 2), " seconds")}
}
#' message_pipe
#' @export
`%message%` <- function(.tbl, to_print = ""){
if(is.character(to_print)){
message(to_print)
}
if(inherits(to_print,"formula") | inherits(to_print,"function")){
mes <- .tbl %>%
list %>%
purrr::map(to_print) %>%
purrr::pluck(1)
message(mes)
}
return(invisible(.tbl))
}
#' simule_map
#' @export
simule_map <- function(.list, index = 1, env = .GlobalEnv){
env$.x <- .list[[index]]
env$.y <- names(.list)[index]
return(env$.x)
}
#' current_user
#' @export
current_user <- function(intern = T, cmd = F, ...){
if(cmd) return("echo $USER") else return(system("echo $USER", ...))
}
#' source_rscript
#' @export
source_rscript <- function(path, cmd = F){
exec(glue::glue("Rscript { path }"), cmd = cmd)
}
#' append
#' @export
append <- function(path, string, cmd = F){
exec(glue::glue("echo { string } >> { path }"), cmd = cmd)
}
#' chmod
#' @export
chmod <- function(path, right, recursive = NULL, cmd, ...){
recursive <- ifelse(is.null(recursive), "", recursive)
exec(glue::glue("chmod { recursive } { right } { path }"), cmd = cmd, ...)
}
#' move
#' @export
move <- function(origin, desg, recursive = F, cmd = F){
exec(glue::glue("cp { origin } { dest }"), cmd = T)
}
|
8348bd1355ebebf3da8489c37a9e0f727ab14a91 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/lfl/examples/as.matrix.fsets.Rd.R | 8a5265b9cd556cae5964d3c11ac931b7bc50c8c0 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 249 | r | as.matrix.fsets.Rd.R | library(lfl)
### Name: as.matrix.fsets
### Title: Convert a 'fsets' object into matrix
### Aliases: as.matrix.fsets
### Keywords: models robust multivariate
### ** Examples
ff <- fcut(runif(10), breaks=c(0, 0.5, 1), name='age')
as.matrix(ff)
|
d63eb203b790e99715604919b904bf1900ca95f5 | a9f2650d8e733c41338037217fa318ac9730fed9 | /man/paramsDIAlignR.Rd | 41b16349e39de6103ae41cb72edc51350d7fd084 | [] | no_license | singjc/DIAlignR | 7e97c273be6f4a387cf35a46798b432a74a42ceb | bb2617b2a7deeac8a9c53017cbe103710e9a207c | refs/heads/master | 2023-07-27T23:10:49.913846 | 2023-03-23T04:45:25 | 2023-03-23T04:45:25 | 227,892,012 | 0 | 0 | null | 2019-12-13T17:38:11 | 2019-12-13T17:38:11 | null | UTF-8 | R | false | true | 6,474 | rd | paramsDIAlignR.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{paramsDIAlignR}
\alias{paramsDIAlignR}
\title{Parameters for the alignment functions}
\usage{
paramsDIAlignR()
}
\value{
A list of parameters:
\item{runType}{(string) must be one of the strings "DIA_Proteomics", "DIA_IPF", "DIA_Metabolomics".}
\item{chromFile}{(string) must either be "mzML" or "sqMass".}
\item{maxFdrQuery}{(numeric) a numeric value between 0 and 1. It is used to filter peptides from osw file which have SCORE_MS2.QVALUE less than itself.}
\item{maxIPFFdrQuery}{(numeric) A numeric value between 0 and 1. It is used to filter features from osw file which have SCORE_IPF.QVALUE less than itself. (For PTM IPF use)}
\item{maxPeptideFdr}{(numeric) a numeric value between 0 and 1. It is used to filter peptides from osw file which have SCORE_PEPTIDE.QVALUE less than itself.}
\item{analyteFDR}{(numeric) the upper limit of feature FDR to be it considered for building tree.}
\item{treeDist}{(string) the method used to build distance matrix. Must be either "rsquared", "count" or "RSE".}
\item{treeAgg}{(string) the method used for agglomeration while performing hierarchical clustering. Must be either "single", "average" or "complete".}
\item{alignToRoot}{(logical) if TRUE, align leaves to the root in hierarchical clustering, else use already save aligned vectors.}
\item{prefix}{(string) name to be used to define merged runs.}
\item{context}{(string) used in pyprophet peptide. Must be either "run-specific", "experiment-wide", or "global".}
\item{unalignedFDR}{(numeric) must be between 0 and maxFdrQuery. Features below unalignedFDR are
considered for quantification even without the RT alignment.}
\item{alignedFDR1}{(numeric) must be between unalignedFDR and alignedFDR2. Features below alignedFDR1 and aligned to the reference are
considered for quantification.}
\item{alignedFDR2}{(numeric) must be between alignedFDR1 and maxFdrQuery. Features below alignedFDR2 and within certain distance from the aligned time are
considered for quantification after the alignment.}
\item{criterion}{(integer) strategy to select peak if found overlapping peaks. 1:intensity, 2: RT overlap, 3: mscore, 4: edge distance}
\item{level}{(string) apply maxPeptideFDR on Protein as well if specified as "Protein". Default: "Peptide".}
\item{integrationType}{(string) method to ompute the area of a peak contained in XICs. Must be
from "intensity_sum", "trapezoid", "simpson".}
\item{baseSubtraction}{{logical} TRUE: remove background from peak signal using estimated noise levels.}
\item{baselineType}{(string) method to estimate the background of a peak contained in XICs. Must be
from "none", "base_to_base", "vertical_division_min", "vertical_division_max".}
\item{fitEMG}{(logical) enable/disable exponentially modified gaussian peak model fitting.}
\item{recalIntensity}{(logical) recalculate intensity for all analytes.}
\item{fillMissing}{(logical) calculate intensity for ananlytes for which features are not found.}
\item{XICfilter}{(string) must be either sgolay, boxcar, gaussian, loess or none.}
\item{polyOrd}{(integer) order of the polynomial to be fit in the kernel.}
\item{kernelLen}{(integer) number of data-points to consider in the kernel.}
\item{globalAlignment}{(string) must be either "loess" or "linear".}
\item{globalAlignmentFdr}{(numeric) a numeric value between 0 and 1. Features should have m-score lower than this value for participation in LOESS fit.}
\item{globalAlignmentSpan}{(numeric) spanvalue for LOESS fit. For targeted proteomics 0.1 could be used.}
\item{RSEdistFactor}{(numeric) defines how much distance in the unit of rse remains a noBeef zone.}
\item{normalization}{(string) must be selected from "mean", "l2".}
\item{simMeasure}{(string) must be selected from dotProduct, cosineAngle, crossCorrelation,
cosine2Angle, dotProductMasked, euclideanDist, covariance and correlation.}
\item{alignType}{(numeric) available alignment methods are "global", "local" and "hybrid".}
\item{goFactor}{(numeric) penalty for introducing first gap in alignment. This value is multiplied by base gap-penalty. Should be between 10-1000.}
\item{geFactor}{(numeric) penalty for introducing subsequent gaps in alignment. This value is multiplied by base gap-penalty.}
\item{cosAngleThresh}{(numeric) in simType = dotProductMasked mode, angular similarity should be higher than cosAngleThresh otherwise similarity is forced to zero.}
\item{OverlapAlignment}{(logical) an input for alignment with free end-gaps. False: Global alignment, True: overlap alignment.}
\item{dotProdThresh}{(numeric) in simType = dotProductMasked mode, values in similarity matrix higher than dotProdThresh quantile are checked for angular similarity.}
\item{gapQuantile}{(numeric) must be between 0 and 1. This is used to calculate base gap-penalty from similarity distribution.}
\item{kerLen}{(integer) In simType = crossCorrelation, length of the kernel used to sum similarity score. Must be an odd number.}
\item{hardConstrain}{(logical) if FALSE; indices farther from noBeef distance are filled with distance from linear fit line.}
\item{samples4gradient}{(numeric) modulates penalization of masked indices.}
\item{fillMethod}{(string) must be either "spline", "sgolay" or "linear".}
\item{splineMethod}{(string) must be either "fmm" or "natural".}
\item{mergeTime}{(string) must be either "ref", "avg", "refStart" or "refEnd".}
\item{keepFlanks}{(logical) TRUE: Flanking chromatogram is not removed.}
\item{fraction}{(integer) indicates which fraction to align.}
\item{fractionNum}{(integer) Number of fractions to divide the alignment.}
\item{lossy}{(logical) if TRUE, time and intensity are lossy-compressed in generated sqMass file.}
\item{useIdentifying}{(logical) Set TRUE to use identifying transitions in alignment. (DEFAULT: FALSE)}
}
\description{
Retention alignment requires OpenSWATH/pyProphet extracted features and chromatograms. This function provides
a suite of parameters used for selecting features and manipulating chromatograms. Chromatogram
alignment can be performed via reference based or progressively via rooted or unrooted tree. This
function provides sensible parameters for these tasks.
}
\examples{
params <- paramsDIAlignR()
}
\seealso{
\code{\link{checkParams}, \link{alignTargetedRuns}}
}
\author{
Shubham Gupta, \email{shubh.gupta@mail.utoronto.ca}
ORCID: 0000-0003-3500-8152
License: (c) Author (2020) + GPL-3
Date: 2020-07-11
}
|
4c25a2cd9749ad060a025bf677147ba9d3f83ea5 | ff75c826e37599a631fc81ddf4f1d251aa349861 | /scripts/rcourse_lesson1.R | e61fd883da6aa92f32bdebe543096e88c3d2dac2 | [] | no_license | cedunia/Rcourse | e121c093c383eee8ab0b3834cacc70c064d70f2f | 5fdcce312099465627d00b51922b41326e461fef | refs/heads/master | 2021-01-09T20:42:16.941465 | 2016-06-14T14:46:19 | 2016-06-14T14:46:19 | 61,127,768 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 587 | r | rcourse_lesson1.R | ## LOAD PACKAGES ##
library(dplyr)
library(ggplot2)
## READ IN DATA AND ORGANIZE ##
#read in data
data <- read.table('./data/rcourse_lesson1_data.txt', sep = '\t',
header = TRUE)
#Look at data
dim(data)
head(data)
tail(data)
xtabs(~group, data)
#subset out bilingual
data_bl <- data %>% filter(., group == "bilingual")
#Look at data
dim(data_bl)
head(data_bl)
tail(data_bl)
xtabs(~group, data_bl)
xtabs(~type, data_bl)
## MAKE FIGURES ##
#By group
data.plot <- ggplot(data, aes(x = group, y = rt)) + geom_boxplot()
pdf("figures/data.pdf")
data.plot
dev.off()
# |
afe9686afb8c9c361473546680b0babc5d25b551 | 92bbf482bb372f8e7a5b7bde21c4ec3333b4f116 | /plot4.R | 1e79b41a1031e7db31bf517e629c7bc89b19fb8b | [] | no_license | rezirezii/Exploratory-Data-Course | b3d4669b49d691267e6ffebacfd68c79db955243 | e12e419d431815e6b9519bdf8f406f5a1709dfce | refs/heads/master | 2020-05-01T18:28:46.035998 | 2019-03-25T19:00:55 | 2019-03-25T19:00:55 | 177,625,084 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,107 | r | plot4.R | power <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
power1 <- subset(power, Date %in% c("1/2/2007","2/2/2007"))
power1$Date <- as.Date(power1$Date, format="%d/%m/%Y")
powertime <- paste(as.Date(power1$Date), power1$Time)
power1$Datetime <- as.POSIXct(powertime)
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(power1, plot(Global_active_power~Datetime, type="l", ylab="Global Active Power", xlab=""))
with(power1, plot(Voltage~Datetime, type="l", ylab="Voltage ", xlab="datetime"))
with(power1, plot(Sub_metering_1~Datetime, type="l", ylab="Energy sub metering", xlab=""))
lines(power1$Sub_metering_2~power1$Datetime,col='red')
lines(power1$Sub_metering_3~power1$Datetime,col='blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
with(power1, plot(Global_reactive_power~Datetime, type="l", ylab="Global_reactive_power", xlab="datetime"))
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off() |
83ac1ec09a2d752bfbba507c50c7ca6be43faf76 | 2a032113392cd75a909a185adb6168f454883d09 | /R Scripts/demonstrations.R | df756fb4560fa478509ad3074c8dbf56478cbf62 | [] | no_license | fomotis/BayesianTutorial | 8bf62bad22243a82763eabd51222e5cbb5df2200 | e7dd731b285ba4d9592e731b855d345c1813189d | refs/heads/master | 2021-03-26T01:18:08.834420 | 2020-03-27T09:59:58 | 2020-03-27T09:59:58 | 247,661,848 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 24,555 | r | demonstrations.R | library(rstan)
library(coda)
library(bayesplot)
library(bridgesampling)
library(tidyverse)
library(ggpubr)
library(nlme)
library(deSolve)
library(rbokeh)
library(plotly)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores(logical = FALSE))
################ Data ##########
combined_data <- read.csv("Data/combined_data.csv", header = T, na.strings = ".",
stringsAsFactors = FALSE ) %>%
mutate(Temperature = factor(Temperature))
b5_mono_data <- combined_data %>%
dplyr::filter(Experiment == "Mono" &
Strain == "B5" & Time > 1)
b5_mono_data <- b5_mono_data[order(b5_mono_data$Sample.ID2, b5_mono_data$Time), ]
#turn temperature to factor
b5_mono_data$Temperature <- factor(b5_mono_data$Temperature)
#creating numeric group for random effects
b5_mono_data <- b5_mono_data %>% dplyr::mutate(Groups2 = rep(1:length(unique(b5_mono_data$Sample.ID2)),
each = 8))
#indicator for treatment = 20 or 22
b5_mono_data <- b5_mono_data %>%
dplyr::mutate(T_2022 = ifelse(
Treatment %in% c(20, 22), 1, 0)
)
#remove NAs from the data
b5_mono_data_nona <- b5_mono_data[!is.na(b5_mono_data$m_fschlin), ]
#obtain the treatment design matrix
trt_mat <- as.matrix(b5_mono_data_nona[, c("T_18", "T_20", "T_22")])
################# function to fit model using stan
#mod is the stan model file, mod_data is the data needed to fit the model
stan_mod_func <- function(mod, mod_data, init = NULL, ...) {
if(is.null(init)) {
ret_mod <- stanc(mod) # formulate model from stan file
sm <- stan_model(stanc_ret = ret_mod, verbose = FALSE)
sm_fit <- sampling(sm, data = mod_data, iter = 3000, thin = 1,
control = list(max_treedepth = 13, adapt_delta = 0.98),
chains = 2)
} else {
ret_mod <- stanc(mod) # formulate model from stan file
sm <- stan_model(stanc_ret = ret_mod, verbose = FALSE)
sm_fit <- sampling(sm, data = mod_data, iter = 3000, thin = 1,
control = list(max_treedepth = 13, adapt_delta = 0.98), init = init,
chains = 2)
}
return(sm_fit)
}
### function to return the posterior mean of a parameter
par_postsums <- function(stan_mod, par = "yhat") {
as.data.frame(summary(stan_mod, pars = par)$summary)
}
par_postsums2 <- function(stan_mod, pars = c("sigma","alpha", "beta", "lsigma")) {
y <- par_postsums(stan_mod = stan_mod, par = pars)
z1 <- as.data.frame(y[, c("mean", "2.5%", "97.5%")])
names(z1) <- c("mean", "LCI", "UCI")
cbind(z1, Parameters = row.names(z1))
}
########### function to extract residuals (should be a matrix) and plot them for bivariate normal model
error_plot <- function(stan_mod_obj, resid_name = "epsilon",
number_rows, group = NULL,
colnames = c("error_Trait", "error_abundance")) {
errors <- matrix(par_postsums(stan_mod_obj, resid_name)[, "mean"],
nrow = number_rows, byrow = TRUE)
errors <- as.data.frame(errors)
names(errors) <- colnames
if(!is.null(group)) {
errors$Group <- group
p <- errors %>% ggplot(aes(x = get(colnames[2], errors), y = get(colnames[1], errors),
group = Group, color = Group)) +
geom_point(size = 5) +
theme_bw() + geom_smooth(aes(x = get(colnames[2], errors), y = get(colnames[1], errors)),
method = "lm", inherit.aes = FALSE, se = FALSE,
color = "black", data = errors) +
labs(x = expression(epsilon[abundance]), y = expression(epsilon[trait])) #+
#geom_smooth(aes(x = get(colnames[2], errors), y = get(colnames[1], errors),
# group = Group, color = Group),
# method = "lm", inherit.aes = FALSE, se = FALSE,
# data = errors)
} else {
p <- errors %>% ggplot(aes(x = get(colnames[2], errors), y = get(colnames[1], errors))) +
geom_point(size = 5) +
theme_bw() + geom_smooth(aes(x = get(colnames[2], errors), y = get(colnames[1], errors)),
method = "lm", inherit.aes = FALSE, se = FALSE, color = "black",
data = errors) +
labs(x = expression(epsilon[abundance]), y = expression(epsilon[trait]))
}
return(p)
}
### credible interval plots for parameter estimates
ciplot <- function(pdata, x, y, x_lab, y_lab, x_scale = NULL) {
pd <- position_dodge(0.1)
if(!is.null(x_scale)) {
pdata %>%
ggplot(aes(x = get(x, pdata), y = get(y, pdata))) +
geom_errorbar(aes(ymin = LCI, ymax = UCI),
color="black", width = 0.1,
position = pd, data = pdata,
inherit.aes = TRUE) +
geom_point(size = 4, position = pd, shape = 21,
fill = "red3", color = "black") +
theme_bw() +
labs(x = x_lab, y = y_lab) +
scale_x_discrete(labels = x_scale)
} else {
pdata %>%
ggplot(aes(x = get(x, pdata), y = get(y, pdata))) +
geom_point(size = 4, position = pd, shape = 21,
fill = "red3", color = "black") +
geom_errorbar(aes(ymin = LCI, ymax = UCI),
color="black", width = 0.1,
position = pd, data = pdata,
inherit.aes = TRUE) +
theme_bw() +
labs(x = x_lab, y = y_lab)
}
}
###### function to plot predicted values
pred_plot <- function(orig_data, predicted_values, x, y, x_lab, y_lab) {
pdata <- cbind(orig_data, Pred = predicted_values)
pdata2 <- pdata %>% filter(Replicate == 2)
ggplot(data = pdata, aes(x = get(x, pdata), y = get(y, pdata), group = interaction(Temperature, Replicate),
color = Temperature)) +
geom_point(size = 4) +
geom_line(aes(x = get(x, pdata2), y = get("Pred", pdata2), group = interaction(Temperature, Replicate),
color = Temperature), size = 1.5,
linetype= "dashed", data = pdata2, inherit.aes = FALSE) +
theme_bw() + color_palette("Dark2") +
scale_x_continuous(breaks = seq(0, 30, by = 4)) +
#scale_y_continuous(breaks = seq(4, 8, by = 0.5)) +
theme(axis.text.x = element_text(angle = 90, hjust = 0.9, vjust = 0.5), legend.position = "top") +
labs(x = x_lab, y = y_lab)
}
#### abundance plot
abd_plot <- combined_data %>% filter(Strain == "B5" & Experiment == "Mono" & Time > 1) %>%
ggplot(aes(x = Time, y = ldensity, group = interaction(Temperature, Replicate), color = Temperature)) +
geom_point(size = 4) +
geom_line(size = 1.5) +
theme_minimal() +
color_palette("Dark2") +
scale_x_continuous(breaks = seq(0, 30, by = 4)) +
scale_y_continuous(breaks = seq(0, 14, by = 1)) +
theme(axis.text.x = element_text(angle = 90, hjust = 0.01, vjust = 0.5),
legend.position = "right",
axis.line = element_line(size = 0.5)) +
labs(x = "Time", y = "log(Abundance)")
abds <- ggplotly(abd_plot)
withr::with_dir("html", htmlwidgets::saveWidget(abds, file = "abd.html"))
########## Frequentist analysis for the Verhuls Model
SSLVE <- function(time, N0, r, K) {
numerator <- K * N0
denominator <- N0 + ((K - N0) * exp(-r * time))
mu <- numerator / denominator
return(mu)
}
ver_model <- gnls(ldensity ~ SSLVE(time = Time, N0, r, K),
data = b5_mono_data,
params = list(N0 ~ 1, r ~ Temperature, K ~ Temperature),
start = c(4.2, 0.22, 0.23, 0.36, 9, 0, 0),
correlation = NULL,
na.action = na.omit
)
ver_model_coefs <- coef(ver_model)
ver_model_vacov <- vcov(ver_model)
ver_model_sum <- summary(ver_model)
names(ver_model_coefs) <- c("N0", "rInt", "r20", "r22", "KInt", "K20", "K22")
row.names(ver_model_vacov) <- c("N0", "rInt", "r20", "r22", "KInt", "K20", "K22")
colnames(ver_model_vacov) <- c("N0", "rInt", "r20", "r22", "KInt", "K20", "K22")
interestr <- c("rInt", paste("(rInt", c("r20)","r22)"), sep = "+"))
interestK <- c("KInt", paste("(KInt", c("K20)","K22)"), sep = "+"))
interestA <- paste(interestr,interestK, sep = "/")
interest <- c(interestr, interestK, interestA)
estimates <- t(sapply(interest, function(.x) {
car::deltaMethod(ver_model_coefs, g. = .x,
vcov. = ver_model_vacov,
level = 0.95)
}))
colnames(estimates) <- c("Estimate", "SE", "LCI", "UCI")
estimates <- as.data.frame(sapply(as.data.frame(estimates), as.numeric)) %>%
mutate(Parameters = c("r18", "r20", "r22", "K18", "K20", "K22",
"A18", "A20", "A22"))
estimates$Type <- rep(c("growth rate", "carrying capacity", "intraspecific effect"), each = 3)
pr <- ciplot(estimates %>% filter(Type == "growth rate"), x = "Parameters", y = "Estimate",
x_lab = "Parameters", y_lab = "Estimates", x_scale = c(expression(r[18]), expression(r[20]),
expression(r[22]))) +
facet_wrap(.~Type, scales = "free_y")
pk <- ciplot(estimates %>% filter(Type == "carrying capacity"), x = "Parameters", y = "Estimate",
x_lab = "Parameters", y_lab = "", x_scale = c(expression(K[18]), expression(K[20]),
expression(K[22]))) +
facet_wrap(.~Type, scales = "free_y")
pa <- ciplot(estimates %>% filter(Type == "intraspecific effect"), x = "Parameters", y = "Estimate",
x_lab = "Parameters", y_lab = "", x_scale = c(expression(A[18]), expression(A[20]),
expression(A[22]))) +
facet_wrap(.~Type, scales = "free_y")
p1 <- ggarrange(plotlist = list(pr, pk, pa), ncol = 3, nrow = 1,
align = "hv", common.legend = TRUE)
#ggplotly(p1)
###rbokeh
p1_list = vector("list", 3)
j<-1
for(i in c("growth rate", "carrying capacity", "intraspecific effect")) {
p1_list[[j]] <- figure(xlim = estimates$Parameters[estimates$Type == i],
width = 220, height = 350, tools = c("pan", "wheel_zoom", "box_zoom", "box_select", "reset")) %>%
ly_segments(Parameters, LCI, Parameters, UCI, data = estimates[estimates$Type == i,],
color = "black", width = 2) %>%
ly_points(Parameters, Estimate, glyph = 16, data = estimates[estimates$Type == i,],
color = "red", size = 20, hover = c(Parameters, Estimate)) %>%
y_axis(label = "Estimates")
j <<- j+1
}
p1s <- grid_plot(p1_list, ncol = 3, same_axes = F, link_data = F)
withr::with_dir("html", htmlwidgets::saveWidget(p1s, file = "est_freq.html"))
##################### Bayesian Analysis
set.seed(1992)
#covraite matrix for the trait mean function
X_obs <- b5_mono_data_nona %>%
model.matrix(~I(Time^0.5):T_18 + I(Time^3):T_2022 +
I((Time^3) * log(Time)):T_2022 +
T_18 + T_20 + T_22, data = . )
verhulst_data <- list(
N_obs = nrow(X_obs),
y_obs = b5_mono_data_nona[, "ldensity"],
trt = b5_mono_data_nona[, c("T_18", "T_20", "T_22")],
time = b5_mono_data_nona$Time,
n_trt = ncol(trt_mat)
)
stan_verhulst <- stan_mod_func(mod = "stan_models/verhulst.stan", mod_data = verhulst_data,
init = list(
list(r = c(0.22, 0.23, 0.35)/2, K = c(9.72, 11.50, 10.99)/2),
list(r = c(0.22, 0.23, 0.35)*1.5, K = c(9.72, 11.50, 10.99)*1.5)
))
ver_pdata <- par_postsums2(stan_verhulst, pars = c("r", "K", "A"))
ver_pdata$Parameters <- c(paste0("r", c(18, 20, 22)),
paste0("K", c(18, 20, 22)),
paste0("A", c(18, 20, 22)))
ver_pdata$Type <- rep(c("growth rate", "carrying capacity", "intraspecific effect"), each = 3)
### plotting Bayesian
p2_list = vector("list", 3)
j<-1
for(i in c("growth rate", "carrying capacity", "intraspecific effect")) {
p2_list[[j]] <- figure(xlim = ver_pdata$Parameters[ver_pdata$Type == i],
width = 220, height = 350, tools = c("pan", "wheel_zoom", "box_zoom", "box_select", "reset")) %>%
ly_segments(Parameters, LCI, Parameters, UCI, data = ver_pdata[ver_pdata$Type == i,],
color = "black", width = 2) %>%
ly_points(Parameters, mean, glyph = 16, data = ver_pdata[ver_pdata$Type == i,],
color = "red", size = 20, hover = c(Parameters, mean)) %>%
y_axis(label = "Estimates")
j <<- j+1
}
p2s <- grid_plot(p2_list, ncol = 3, same_axes = F, link_data = F)
withr::with_dir("html", htmlwidgets::saveWidget(p2s, file = "est_bay.html"))
################ plotting both results together ##########
bay_fre_est <- rbind(estimates %>% dplyr::select(-SE),
ver_pdata %>% dplyr::rename(Estimate = mean)
) %>%
mutate(Method = rep(c("Frequentist", "Bayesian"), each = 9)
)
pd <- position_dodge(1.0)
p3_list <- vector("list", 3)
j <- 1
for(i in c("growth rate", "carrying capacity", "intraspecific effect")) {
if(j == 3) {
p3_list[[j]] <- bay_fre_est %>%
filter(Type == i) %>%
ggplot(aes(x = Parameters, y = Estimate, group = Method, colour = Method)) +
geom_errorbar(aes(ymin = LCI, ymax = UCI), colour = "black",
width=.1, position = pd, size = 1.5) +
geom_point(position = pd, size = 9) +
theme_minimal() +
theme(axis.line = element_line(size = 0.5),
legend.position = "none",
legend.direction = "vertical")
} else {
p3_list[[j]] <- bay_fre_est %>%
filter(Type == i) %>%
ggplot(aes(x = Parameters, y = Estimate, group = Method, colour = Method)) +
geom_errorbar(aes(ymin = LCI, ymax = UCI), colour = "black",
width=.1, position = pd, size = 1.5) +
geom_point(position = pd, size = 9) +
theme_minimal() +
theme(legend.position = "none",
axis.line = element_line(size = 0.5)
)
}
j <<- j+1
}
p3s_gga <- ggarrange(plotlist = p3_list, nrow = 1, ncol = 3)
p3s <- subplot(p3_list, nrows = 1)
withr::with_dir("html", htmlwidgets::saveWidget(p3s, file = "est_bayfreq.html"))
################# Lotka-Voltera for 2 species example
biculture_data <- combined_data %>%
dplyr::filter(Experiment == "BI") %>%
mutate(Temperature = factor(Temperature),
Temperature2 = case_when(
Temperature == 18 ~ 1,
Temperature == 20 ~ 2,
Temperature == 22 ~ 3
),
Species = case_when(
Strain == "B4" ~ 1,
Strain == "B5" ~ 2
)
)
biculture_data_nona <- biculture_data %>% filter(Time > 1, !is.na(ldensity))
biculture_data_wide <- biculture_data_nona %>% pivot_wider(names_from = Strain,
id_cols = c("Date2", "Sample.ID2", "Date", "Time",
"Treatment", "Replicate", "Temperature",
"Temperature2", "T_18", "T_20", "T_22", "Groups"
),
values_from = c(ldensity, m_fschlin, m_yelbhlin,
m_redbhlin, v_fschlin, v_yelbhlin,
Number)
)
biculture_data %>% dplyr::filter( Time > 1) %>%
ggplot(aes(x = Time, y = ldensity, group = interaction(Strain, Temperature, Replicate),
color = Temperature)) +
geom_point(size = 4) +
geom_line(aes(linetype = Strain), size = 1.5) +
theme_minimal() +
color_palette("Dark2") +
scale_x_continuous(breaks = seq(0, 30, by = 4)) +
scale_y_continuous(breaks = seq(0, 14, by = 1)) +
theme(axis.text.x = element_text(angle = 90, hjust = 0.9, vjust = 0.5),
legend.position = "top") +
labs(x = "Time", y = "Log(Abundance)")
### frequentist analysis
# 2 species LVE
LVE <- function(time, y, parms, ...) {
dy <- rep(0, length(y))
dy[1] <- y[1] * parms$r1 * (1 - (parms$alpha11*y[1] + parms$alpha12*y[2]) )
dy[2] <- y[2] * parms$r2 * (1 - (parms$alpha21*y[1] + parms$alpha22*y[2]) )
return(list(dy))
}
ts <- sort(unique(c(seq(min(biculture_data$Time), 50, by = 1),
biculture_data$Time)))
#a test
out_solu <- ode(y = c(7, 6),
times = ts,
func = LVE,
parms = list(r1 = 0.2, r2 = 0.3, alpha11 = 0.06, alpha21 = 0.02, alpha12 = 0.02, alpha22 = 0.06)
)
lapply(c(18, 20, 22), function(i) {
ddata <- biculture_data_wide %>% filter(Temperature == 18)
# function to obtain sum of squares residual
SSR <- function(parms) {
# mapping the parameters to their respective positions
r1 <- parms[1]
r2 <- parms[2]
alpha11 <- parms[3]
alpha21 <- parms[4]
alpha12 <- parms[5]
alpha22 <- parms[6]
sigma_b4 <- parms[7]
sigma_b5 <- parms[8]
rho <- 0 #parms[9]
N0_1 <- 3.6 #parms[10]
N0_2 <- 3.5 #parms[11]
#a time vector
time_seq <- ts
# initial values
y_inits <- c(N0_1, N0_2)
#solve the ode
out_solu <- ode(y = y_inits, times = time_seq, func = LVE,
parms = list(r1 = r1, r2 = r2, alpha11 = alpha11, alpha12 = alpha12,
alpha21 = alpha21, alpha22 = alpha22)
)
out_dataframe <- as.data.frame(out_solu)
out_dataframe <- out_dataframe[out_dataframe$time %in% unique(biculture_data$Time), ]
names(out_dataframe) <- c("time", "ldensity_B4_pred", "ldensity_B5_pred")
dd <- merge(ddata, out_dataframe, by.x = c("Time"), by.y = c("time"))
Sigma <- matrix(c(sigma_b4^2, sigma_b4*sigma_b5*rho, sigma_b4*sigma_b5*rho, sigma_b5^2),
nrow = 2, ncol = 2, byrow = T)
ll <- numeric(nrow(dd))
for(i in 1:nrow(dd)) {
ll[i] <- dmvnorm(x = dd[i, c("ldensity_B4", "ldensity_B5")],
mean = c(dd$ldensity_B4_pred[i], dd$ldensity_B5_pred[i]),
sigma = Sigma, log = T)
}
return(-sum(ll))
}
parms_start <- c(r1 = 0.7, r2 = 0.4, alpha11 = 0.06,
alpha12 = 0.02, alpha21 = 0.02, alpha22 = 0.06,
sigma_b4 = 0.70,
sigma_b5 = 0.45,
rho = 0.5,
N0_1 = 7,
N0_2 = 6)
lve_freq <- optim(par = parms_start, fn = SSR,
method = "Nelder-Mead",
control = list(maxit = 100),
lower = c(r1 = 0, r2 = 0,
alpha11 = 0,
alpha12 = 0, alpha21 = 0,
alpha22 = 0, sigma_b4 = 0,
sigma_b5 = 0, rho = -1,
N0_1 = 0, N0_2 = 0),
upper= c(r1 = Inf, r2 = Inf,
alpha11 = Inf,
alpha12 = Inf, alpha21 = 0,
alpha22 = Inf, sigma_b4 = Inf,
sigma_b5 = Inf, rho = 1,
N0_1 = Inf, N0_2 = Inf)
)
lve_sum <- summary(lve_freq)
UCI <- lve_sum$coefficients[, 1] + lve_sum$coefficients[, 2]
LCI <- lve_sum$coefficients[, 1] - lve_sum$coefficients[, 2]
Parameters <- c("r1", "r2", "alpha11", "alpha12", "alpha21", "alpha22")
data.frame(Parameters = Parameters, Estimates = lve_sum$coefficients[, 1], LCI = LCI, UCI = UCI)
})
#### Bayesian Analysis
biculture_data_wide_nona <- biculture_data_wide[!is.na(biculture_data_wide$ldensity_B4), ]
results_backup <- vector("list", 3)
j <- 1
results_treatment <- lapply(c(18, 20, 22), function(i) {
print(i)
bi_datas <- biculture_data_wide_nona[biculture_data_wide_nona$Treatment == i, ]
bi_data1 <- list(
N_obs = nrow(bi_datas),
T = length(ts),
N = as.matrix(bi_datas[, c("ldensity_B4", "ldensity_B5")]),
t0 = 0,
ts = ts,
time_obs = bi_datas$Time,
nsp = 2
)
if( i == 18) {
N01 <- c(3.6, 3.5)
r <- c(0.43, 0.35)
alpha11 <- 0.07; alpha22 <- 0.07
alpha12 <- 0.02; alpha21 <- 0.02
} else if (i == 20) {
N01 <- c(6.5, 5.6)
r <- c(0.20, 0.21)
alpha11 <- 0.06; alpha22 <- 0.06
alpha12 <- 0.02; alpha21 <- 0.02
} else if (i == 22) {
N01 <- c(6.1, 5.3)
r <- c(0.16, 0.16)
alpha11 <- 0.06; alpha22 <- 0.06
alpha12 <- 0.02; alpha21 <- 0.01
}
st_mod <- stan_mod_func(mod = "stan_models/generalisedLVE.stan", mod_data = bi_data1,
init = list(
list(r = r, alpha11 = alpha11, alpha22 = alpha22,
alpha12 = alpha12, alpha21 = alpha21, N0 = N01),
list(r = r*1.5, alpha11 = alpha11*1.5, alpha22 = alpha22*1.5,
alpha12 = alpha12*1.5, alpha21 = alpha21*1.5,
N0 = N01*1.5))
)
results_backup[[j]] <- st_mod
j <<- j + 1
print(par_postsums2(st_mod, c("r", "N0", "alpha11", "alpha22", "alpha12", "alpha21", "cor", "sigma")))
return(st_mod)
})
results_18 <- par_postsums2(results_treatment[[1]],
c("r", "N0", "alpha11", "alpha22", "alpha12", "alpha21", "cor")) %>%
mutate(Parameters = c("r18B4", "r18B5", "N0B4", "N0B5", "alpha11", "alpha22", "alpha12", "alpha21", "rho"),
Type = rep(c("growth rate", "N0", "alphas", "rho"), times = c(2, 2, 4, 1) ),
Temperature = rep(18, times = 9))
results_20 <- par_postsums2(results_treatment[[2]],
c("r", "N0", "alpha11", "alpha22", "alpha12", "alpha21", "cor")) %>%
mutate(Parameters = c("r18B4", "r18B5", "N0B4", "N0B5", "alpha11", "alpha22", "alpha12", "alpha21", "rho"),
Type = rep(c("growth rate", "N0", "alphas", "rho"), times = c(2, 2, 4, 1) ),
Temperature = rep(20, times = 9))
results_22 <- par_postsums2(results_treatment[[3]],
c("r", "N0", "alpha11", "alpha22", "alpha12", "alpha21", "cor")) %>%
mutate(Parameters = c("r18B4", "r18B5", "N0B4", "N0B5", "alpha11", "alpha22", "alpha12", "alpha21", "rho"),
Type = rep(c("growth rate", "N0", "alphas", "rho"), times = c(2, 2, 4, 1) ),
Temperature = rep(22, times = 9))
results_all <- rbind(results_18, results_20, results_22)
results_all$Temperature <- factor(results_all$Temperature)
pd <- position_dodge(1.0)
p4_list <- vector("list", 3)
j <- 1
for(i in c("growth rate", "N0", "alphas", "rho")) {
if(j == 4) {
p4_list[[j]] <- results_all %>%
filter(Type == i) %>%
ggplot(aes(x = Parameters, y = mean, group = Temperature, colour = Temperature)) +
geom_errorbar(aes(ymin = LCI, ymax = UCI), colour = "black",
width=.1, position = pd, size = 1.5) +
geom_point(position = pd, size = 9) +
theme_minimal() +
theme(axis.line = element_line(size = 0.5),
legend.position = "none",
legend.direction = "vertical") +
labs(y = "Posterior Mean +- Posterior SD")
} else {
p4_list[[j]] <- results_all %>%
filter(Type == i) %>%
ggplot(aes(x = Parameters, y = mean, group = Temperature, colour = Temperature)) +
geom_errorbar(aes(ymin = LCI, ymax = UCI), colour = "black",
width=.1, position = pd, size = 1.5) +
geom_point(position = pd, size = 9) +
theme_minimal() +
theme(legend.position = "none",
axis.line = element_line(size = 0.5)
) +
labs(y = "Posterior Mean +- Posterior SD")
}
j <<- j+1
}
p4s <- subplot(p4_list, nrows = 2)
withr::with_dir("html", htmlwidgets::saveWidget(p4s, file = "est_bayLVE.html"))
############### model comparison
stan_gompertz <- stan_mod_func(mod = "stan_models/gompertz.stan", mod_data = verhulst_data,
init = list(
list(r = c(0.22, 0.23, 0.35)/2, c = c(0.72, 0.50, 1.99)/2),
list(r = c(0.22, 0.23, 0.35)*1.5, c = c(1.72, 3.50, 3.99)*1.5 )
))
save.image("RImage/demonstration.RData")
|
07c0eed7ecd9f9fce314e8497d4914ff1b693ba3 | 31d2d467030565c44f4d28d42c0e4d225dececaa | /R/infoGPCM.R | 9dfb0f6e5b7a83beb6838cf00fa17275cd76d70f | [] | no_license | cran/ltm | 84fd858915db9fe1506a40628f61e6500a21ed1c | dbbabfa99fa09ad94113856a6a5ae1535e7b817f | refs/heads/master | 2022-02-25T01:10:01.747125 | 2022-02-18T08:40:02 | 2022-02-18T08:40:02 | 17,697,218 | 2 | 4 | null | null | null | null | UTF-8 | R | false | false | 464 | r | infoGPCM.R | infoGPCM <-
function (betas, z, IRT.param) {
n <- length(z)
p <- length(betas)
alphas <- sapply(betas, tail, 1)
prs <- crf.GPCM(betas, z, IRT.param)
T.bar <- sapply(prs, function (x) colSums(x * 1:nrow(x)))
info <- matrix(0, n, p)
for (j in 1:p) {
ii <- outer(seq(1, nrow(prs[[j]])), T.bar[, j], "-")^2
info[, j] <- alphas[j]^2 * colSums(prs[[j]] * ii)
}
colnames(info) <- names(betas)
info
}
|
65f27975cd3cd2ec48c0856f290ef387545ccf6e | 73377bc21b6740f4d43874c9e1f7700a119ae8d6 | /R/Dictionary_S3methods.R | a4e47b27d8f789c6f69746b33eb35abdd0217c46 | [
"MIT"
] | permissive | cran/dictionar6 | b67c283165e629eab211bd3966594b880f5140c1 | 8b48d99a00543aea5dd819d6465d8f18d703df01 | refs/heads/master | 2023-08-08T00:36:03.549600 | 2021-09-13T03:40:02 | 2021-09-13T03:40:02 | 389,685,859 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,855 | r | Dictionary_S3methods.R | #' @export
`[.Dictionary` <- function(x, i) {
x$get_list(i)
}
#' @export
`[<-.Dictionary` <- function(x, i, value) { # nolint
x$add(keys = i, values = value)
invisible(x)
}
#' @export
length.Dictionary <- function(x) {
x$length
}
#' @export
summary.Dictionary <- function(object, n = 2, ...) {
object$summary(n = n)
}
#' @export
as.character.Dictionary <- function(x, n = 2, ...) { # nolint
keys <- x$keys
values <- vapply(x$values, as.character, character(1), USE.NAMES = FALSE)
lng <- x$length
if (lng > (2 * n)) {
string <- paste0(
paste(keys[1:n], values[1:n],
sep = ": ",
collapse = ", "
),
", ..., ", paste(keys[(lng - n + 1):lng],
values[(lng - n + 1):lng],
sep = ": ", collapse = ", "
)
)
} else {
string <- paste(keys, values, sep = ": ", collapse = ", ")
}
sprintf("{%s}", string)
}
#' @export
c.Dictionary <- function(...) {
x <- list(...)
types <- vapply(x, function(.x) list(.x$typed, length(.x$types), .x$types),
vector("list", 3))
# different typing
if (length(unique(types[1, ])) > 1) {
stop("Can only combine Dictionaries if all typed or all untyped.")
# all typed or untyped
} else {
# untyped
if (!unlist(types[1, 1])) {
Dictionary$new(x = unlist(lapply(x, "[[", "items"), FALSE))
# typed
} else {
# different type lengths
if (length(unique(types[2, ])) > 1) {
stop("Can only combine typed Dictionaries of the same type(s).")
} else {
if (length(unique(unlist(types[3, ]))) != types[2, 1][[1]]) {
stop("Can only combine typed Dictionaries of the same type(s).")
} else {
Dictionary$new(x = unlist(lapply(x, "[[", "items"), FALSE),
types = unlist(types[3, 1]))
}
}
}
}
}
|
08e2980d854592d789356d7207fdabe2bb3b8671 | ddf10b92539f1becd4e7c4cc10677142b5e0b03f | /twobytwo.r | bc36087811fd2712a44ea990dabd3b71233da93c | [] | no_license | scchess/BioToolbox | b2525a30d075287078ede30ec51a34ef975aa07f | 6eec78e7dbb06b882d464a3e77c6d7b13c009da1 | refs/heads/master | 2021-06-12T12:47:33.610163 | 2016-11-22T05:16:23 | 2016-11-22T05:16:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,726 | r | twobytwo.r |
#CHAPTER 6 -- Two by two tables
ci <- function(m,v) {
A <- m-1.960*sqrt(v)
B <- m+1.960*sqrt(v)
round(cbind(A,B),5)
}
#typical summary statistics: odds ratio, relative risk and attributable risk
a <- 61
b <- 102
c <- 52
d <- 196
n <- a+b+c+d
n1 <- a+b
n2 <- c+d
m1 <- a+c
m2 <- b+d
or <- (a*d)/(b*c)
rr <- (a/(a+b))/(c/(c+d))
ar <- ((a+c)/n-(c/(c+d)))/((a+c)/n)
cbind(or,rr,ar)
#association measured by probabilities
#rows
prop.test(c(61,52),c(163,248),correct=FALSE)
#columns
prop.test(c(61,102),c(113,298),correct=FALSE)
chisq.test(matrix(c(61,52,102,196),2,2),correct=FALSE)
#Table 7.0 (toolbox text)
p1 <- a/(a+b)
p2 <- c/(c+d)
P1 <- a/(a+c)
P2 <- b/(b+d)
vp <- p1*(1-p1)/n1+p2*(1-p2)/n2
vP <- P1*(1-P1)/m1+P2*(1-P2)/m2
vrr <- rr^2*(1/a-1/n1+1/c-1/n2)
lvrr <- 1/a-1/n1+1/c-1/n2
vor <- or^2*(1/a+1/b+1/c+1/d)
lvor <- 1/a+1/b+1/c+1/d
var0 <- (1-ar)^2*(b+ar*(a+d))/(n*c)
lvar0 <- (b+ar*(a+d))/(n*c)
round(cbind(sqrt(vp),sqrt(vP),sqrt(vrr),sqrt(lvrr),
sqrt(vor),sqrt(lvor),sqrt(var0),sqrt(lvar0)),3)
#Table 8.0 (toolbox text)
p1 <- a/(a+b)
p2 <- c/(c+d)
P1 <- a/(a+c)
P2 <- b/(b+d)
rr <- (a/(a+b))/(c/(c+d))
lrr <- log(rr)
or <- (a/b)/(c/d)
lor <- log(or)
ar <- (a*d-b*c)/((a+c)*(c+d))
lar <- log(1-ar)
round(cbind(p1-p2,P1-P2,rr,lrr,or,lor,ar,lar),3)
#confidence intervals
rbind(round(c(p1-p2,ci(p1-p2,vp)),3),
round(c(P1-P2,ci(P1-P2,vP)),3),
round(c(exp(lrr),exp(ci(lrr,lvrr))),3),
round(c(lrr,ci(lrr,lvrr)),3),
round(c(exp(lor),exp(ci(lor,lvor))),3),
round(c(lor,ci(lor,lvor)),3),
round(c(1-exp(lar),1-exp(rev(ci(lar,lvar0)))),3),
round(c(lar,ci(lar,lvar0)),3))
#corrected
p <- (a+c)/(a+b+c+d)
v <- p*(1-p)*(1/n1+1/n2)
z <- (abs(p2-p1)-.5*(1/n1+1/n2))/sqrt(v)
cbind(p,v,z^2)
prop.test(c(a,c),c(n1,n2),correct=TRUE)$statistic
n*(abs(a*d-b*c)-n/2)^2/((a+b)*(c+d)*(a+c)*(b+d))
#corrected estimate
p <- c(1,4,6,4,1)/16
ex <- sum((0:4)*p)
c0 <- 1:5
p0 <- cumsum(p) #exact
p1 <- pnorm(c0-0.5-ex) #approximate
round(cbind(c0-1,p0,p1),3)
chisq.test(matrix(c(a,c,b,d),2,2),correct=FALSE)$statistic
prop.test(c(a,c),c(n1,n2),correct=FALSE)$statistic
n*(abs(a*d-b*c))^2/((a+b)*(c+d)*(a+c)*(b+d))
#Vietnam -- breast cancer
a <- 170
b <- 3222
c <- 126
d <- 2912
n <- a+b+c+d
#relative risk
(a/(a+b))/(c/(c+d))
#odds ratio
(a/b)/(c/d)
#variance of log-odds-ratio
1/a+1/b+1/c+1/d
#Adjustment #small sample size
a <- 2
b <- 23
c <- 6
d <- 22
n <- a+b+c+d
m <- matrix(c(a,c,b,d),2,2)
chisq.test(m,correct=FALSE)
chisq.test(m)
#Hypergeometric probability distributions
#tea tasting experiment
round(dhyper(0:4,4,4,4),3)
#keno
round(dhyper(0:8,20,60,8),6)
#Fisher's exact test
round(dhyper(0:8,8,20,8),4)
round(1-phyper(0:8,8,20,8),4)
#Adjusted/unadjusted -- Fisher's exact test
a <- 5
b <- 3
c <- 3
d <- 17
n <- a+b+c+d
m <- matrix(c(a,c,b,d),2,2)
chisq.test(m,correct=FALSE)
chisq.test(m)
fisher.test(m)
#twins
#no birth defects (data set 1)
a <- 18687
b <- 16093
c <- 19188
#birth defects (data set 2)
a <- 168
b <- 53
c <- 110
n <- a+b+c
p <- (2*a+b)/(2*n)
q <- 1-p
r <- 1-b/(2*p*q*n)
vr <- ((1-r)*(1-2*p*q*(1-r)-(1-4*p*q)*(1-r)^2))/(2*p*q*n)
vp <- (2*p*q*(1+r))/(2*n)
cbind(n,p,r,vr,vp)
ci(r,vr)
#duffy
a <- 8
b <- 72
c <- 409
n <- a+b+c
p <- (2*a+b)/(2*n)
q <- 1-p
chisq.test(c(a,b,c),p=c(p^2,2*p*(1-p),(1-p)^2))
r <- 1-b/(2*p*q*n)
X2 <- n*r^2
pvalue <- 1-pchisq(X2,1)
cbind(p,r,X2,pvalue) #degrees of freedom = 1)
#incomplete data -- zero in a 2 by 2 table
a <- 33
b <- 44
c <- 14
n <- a+b+c
d <- b*c/a
n+d
(a+b)*(a+c)/a
#false positive
d <- c(0.5,0.1,0.05,0.005,0.0005)
t <- (.95*d+.1*(1-d))
p <- .1*(1-d)/t
round(cbind(d,t,p),3)
|
edd5527dcba3b7267543834bff530178f02b41a8 | aaa910ac648efdf66a047cb562de9c61d8a5207e | /R/Databases/solution_dataccess.R | 1aaf1f9f34f23682b11abe9cc5fabdef61ae85c2 | [
"MIT"
] | permissive | rolandkrause/isb101 | 85cb5056e85e72989cbd52c0f3e9e43c124b2e7b | a59161e00454dee469dad271ac51b001929e5c50 | refs/heads/master | 2022-02-02T07:25:34.558058 | 2022-01-12T09:16:07 | 2022-01-12T09:16:07 | 20,016,041 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 798 | r | solution_dataccess.R | # Author: Roland Krause
library(biomaRt)
# Identify SNPs in KCNA2 from ENSEMBL BioMart
genes.of.choice= c("KCNA2")
# Find ensembl id via hgnc name
listMarts(host="www.ensembl.org")
gene.biomart = useMart(biomart = "ENSEMBL_MART_ENSEMBL",
host="www.ensembl.org")
gene.set = useDataset(mart= gene.biomart, "hsapiens_gene_ensembl")
gene.ids = getBM("ensembl_gene_id", filter="hgnc_symbol",
genes.of.choice, gene.set)
# Find SNPs
snp.biomart = useMart(biomart="ENSEMBL_MART_SNP",
host="www.ensembl.org")
snp.set = useDataset(mart= snp.biomart, "hsapiens_snp")
snps = getBM(attributes = c("refsnp_id","refsnp_source"), filters="ensembl_gene",
values=gene.ids, snp.set)
write.table(snps, "snps.txt")
|
a543dacc51189d95d65542296b1d277624dc5fb8 | 434584f79283272e674845545e63e09872c57122 | /R/data.R | 94a69d11e362be1d79715d0cc8197e8702af9e18 | [
"CC-BY-4.0",
"MIT"
] | permissive | ollinevalainen/fmir | 6c796068ca1d12abf319a41914d523254cbeec5f | 4270c064cdb8198e6f0b265ea22159d84a217cb6 | refs/heads/master | 2023-03-05T01:53:34.774497 | 2021-02-19T17:18:38 | 2021-02-19T17:18:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 947 | r | data.R | #' Daily weather in Oulu, Finland, in 2010-2017
#'
#' A dataset downloaded from the Finnish Meteorological Institute's open data
#' API using **fmir**. Contains daily simple weather observations from Oulu,
#' Finland, covering the years 2010 to 2017. The data are made available by the
#' [Finnish Meteorological Institute](https://en.ilmatieteenlaitos.fi) and are
#' licensed under [CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/).
#'
#' @format A data frame with 2922 rows and 8 variables:
#' \describe{
#' \item{place}{city name}
#' \item{location}{coordinates of the observation station}
#' \item{time}{date of observation}
#' \item{rrday}{precipitation rate}
#' \item{snow}{snow depth}
#' \item{tday}{average temperature, degrees Celcius}
#' \item{tg_pt12h_min}{?}
#' \item{tmax}{maximum temperature, degrees Celcius}
#' \item{tmin}{minimum temperature, degrees Celcius}
#' }
#'
"ouludaily10"
|
7986d10e2d104696c79f31b11389f34aeb3e9fdc | 95d1bf465ec819618870503489f71789a6eafad5 | /prepare.R | 0782896b57e77dded19f8655aa9da2b7d5ef5a8f | [] | no_license | mingkaijiang/exudation_analysis | ca73040e0965de58c5ccb0f46992cc78a9dff199 | e50f2010010a1bbae14b9feb6aea2ed3103eef53 | refs/heads/main | 2023-06-03T04:13:57.179954 | 2021-06-29T03:05:24 | 2021-06-29T03:05:24 | 378,017,809 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 608 | r | prepare.R | #### Create output folder
if(!dir.exists("output")) {
dir.create("output", showWarnings = FALSE)
}
#### Install packages
if(!require(pacman))install.packages("pacman")
pacman::p_load(doBy,
ggplot2,
grid,
cowplot,
metafor,
mgcv,
weights,
meta,
igraph,
tidyverse,
network)
#### Sourcing all R files in the modules subdirectory
source_step1 <- dir("function", pattern="[.]R$", recursive = TRUE, full.names = TRUE)
for(z1 in source_step1)source(z1)
|
ea626a93c2d53f5587460ba4866e625e4a135089 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/PLNmodels/inst/doc/PLNPCA.R | 0fe41784fb660756043d8374bc4869f7025a2832 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,127 | r | PLNPCA.R | ## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(
screenshot.force = FALSE,
echo = TRUE,
rows.print = 5,
message = FALSE,
warning = FALSE)
## ----requirement--------------------------------------------------------------
library(PLNmodels)
library(ggplot2)
library(corrplot)
## ----data_load----------------------------------------------------------------
data(trichoptera)
trichoptera <- prepare_data(trichoptera$Abundance, trichoptera$Covariate)
## ----simple PLNPCA------------------------------------------------------------
PCA_models <- PLNPCA(
Abundance ~ 1 + offset(log(Offset)),
data = trichoptera,
ranks = 1:5
)
## ----show nocov---------------------------------------------------------------
PCA_models
## ----collection criteria------------------------------------------------------
PCA_models$criteria %>% knitr::kable()
## ----convergence criteria-----------------------------------------------------
PCA_models$convergence %>% knitr::kable()
## ----plot nocov, fig.width=7, fig.height=5------------------------------------
plot(PCA_models)
## ----model extraction---------------------------------------------------------
myPCA_ICL <- getBestModel(PCA_models, "ICL")
myPCA_BIC <- getModel(PCA_models, 3) # getBestModel(PCA_models, "BIC") is equivalent here
## ----map, fig.width=8, fig.height=8-------------------------------------------
plot(myPCA_ICL, ind_cols = trichoptera$Group)
## ----regression---------------------------------------------------------------
coef(myPCA_ICL) %>% head() %>% knitr::kable()
## ----sigma, fig.width=7-------------------------------------------------------
sigma(myPCA_ICL) %>% corrplot(is.corr = FALSE)
## ----rotation-----------------------------------------------------------------
myPCA_ICL$rotation %>% head() %>% knitr::kable()
## ----scores-------------------------------------------------------------------
myPCA_ICL$scores %>% head() %>% knitr::kable()
## ----show PLNPCAfit-----------------------------------------------------------
myPCA_ICL
## ----cov----------------------------------------------------------------------
PCA_models_cov <-
PLNPCA(
Abundance ~ 1 + offset(log(Offset)) + Temperature + Wind + Cloudiness,
data = trichoptera,
ranks = 1:4
)
## ----extraction cov, fig.width=7, fig.height=7--------------------------------
plot(PCA_models_cov)
myPCA_cov <- getBestModel(PCA_models_cov, "ICL")
## ----maps, fig.height=4, fig.width=7------------------------------------------
gridExtra::grid.arrange(
plot(myPCA_cov, map = "individual", ind_cols = trichoptera$Group, plot = FALSE),
plot(myPCA_cov, map = "variable", plot = FALSE),
ncol = 2
)
## ----fitted, fig.cap = "fitted value vs. observation", fig.dim=c(7,5)---------
data.frame(
fitted = as.vector(fitted(myPCA_cov)),
observed = as.vector(trichoptera$Abundance)
) %>%
ggplot(aes(x = observed, y = fitted)) +
geom_point(size = .5, alpha =.25 ) +
scale_x_log10(limits = c(1,1000)) +
scale_y_log10(limits = c(1,1000)) +
theme_bw() + annotation_logticks()
|
dab20270c8822d53dca089518e7a9392cedee1c4 | 40af152314d17add1b552576ca0ea981699fd0bb | /man/Kam_et_al_2003_Fig2.jpg.Rd | 773e5a2f683435176e5e99ce15b9e71c1bddbfd5 | [] | no_license | cran/metagear | a57c15c5c897fe2546c8bef4e923623630261c32 | 25aa8967ed0a109811ede0b34d40d741677a9c03 | refs/heads/master | 2021-07-18T03:41:00.120282 | 2021-02-15T14:40:02 | 2021-02-15T14:40:02 | 36,018,363 | 27 | 8 | null | null | null | null | UTF-8 | R | false | true | 706 | rd | Kam_et_al_2003_Fig2.jpg.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metagear_data.R
\docType{data}
\name{Kam_et_al_2003_Fig2.jpg}
\alias{Kam_et_al_2003_Fig2.jpg}
\title{An example image of a scatterplot figure}
\format{
A raw jpg-formated image
}
\description{
A jpg image of a scatterplot from Figure 2 of Kam, M., Cohen-Gross, S.,
Khokhlova, I.S., Degen, A.A. and Geffen, E. 2003. Average daily metabolic
rate, reproduction and energy allocation during lactation in the Sundevall
Jird Meriones crassus. Functional Ecology 17:496-503.
}
\note{
\strong{How to use}\cr\cr
\code{readImage(system.file("images", "Kam_et_al_2003_Fig2.jpg", package = "metagear"))}
}
\keyword{datasets}
|
401469a0a0f3f654824b81917f0c4cba1d6d29ab | eb4a5498253bb609c775153a93ef4e221e9c3e40 | /pop_gen_analysis/Figures/mappies_interactive.R | baf94a81aaec7bf09a20b379f22f398c57100dbb | [] | no_license | almccombs/MolyPhyloProject | 73a0fc1eb90f4c30714f7efdbf555f5f8e80c634 | 707d0fc4ca5b5058d93d78426030f7889b027dd5 | refs/heads/main | 2023-04-24T11:08:33.961297 | 2021-04-29T19:50:21 | 2021-04-29T19:50:21 | 351,596,276 | 0 | 1 | null | 2021-04-24T19:57:44 | 2021-03-25T22:45:43 | HTML | UTF-8 | R | false | false | 1,135 | r | mappies_interactive.R | library(sp)
library(RgoogleMaps)
library(mapplots) #for draw.pie function
op <- par()
pop.means <- read.csv("analysis/LEA_analysis/PopMeans.csv", header = T)
pies <- as.matrix(pop.means[,c("V1","V2","V3")])
n <- as.vector(pop.means[,2])
coords.sp <- pop.means
coordinates(coords.sp) <- c("Longitude", "Latitude")
proj4string(coords.sp) <- CRS("+proj=longlat +datum=WGS84")
#Make smaller submaps interactively
G <- select.spatial(coords.sp, digitize = F)
group1 <- pop.means[G,]
group1
xminG1 <- min(group1$Longitude)
xmaxG1 <- max(group1$Longitude)
xmidG1 <- (xminG1 + xmaxG1)/2
yminG1 <- min(group1$Latitude)
ymaxG1 <- max(group1$Latitude)
ymidG1 <- (yminG1 + ymaxG1)/2
coords.spG1 <- group1
coordinates(coords.spG1) <- c("Longitude", "Latitude")
proj4string(coords.spG1) <- CRS("+proj=longlat +datum=WGS84")
group1.map <- GetMap(center = c(ymidG1, xmidG1), zoom = 12, maptype = "terrain")
par(op)
googG1 <- LatLon2XY.centered(group1.map, coords.spG1@coords[,2], coords.spG1@coords[,1], zoom = 12)
PlotOnStaticMap(group1.map)
draw.pie(z = pies[G,], x = googG1$newX, y = googG1$newY, radius = (sqrt(n/pi)*10), labels = "")
|
ee090d387f1ddfc49fcca8ce331a5c3c5d37c514 | 23daa3355dc0b51b9daa13ac3f13b8e71b4c77bf | /R code/NomalizedFloor_TotalUnits.R | b7949b3a344513cfdac35e0e24ce3baa92269b43 | [] | no_license | wifimapping/cusp_sonyc_wifi | 5e849d83a2798574e79d8f01216dcd739957691e | e86535e1939d119ef9dd42536c6515a25d23672c | refs/heads/master | 2020-05-29T11:06:50.584226 | 2015-08-01T23:01:16 | 2015-08-01T23:01:16 | 56,330,900 | 0 | 0 | null | 2016-04-15T15:35:43 | 2016-04-15T15:35:43 | null | UTF-8 | R | false | false | 319 | r | NomalizedFloor_TotalUnits.R | test<-read.csv(Users/xq277/Desktop/ct1522.csv)
test<-read.csv(ct1522.csv)
test<-ct1522
test
head(test)
for (i in 1:length(test$fid)){
if(test$NumFloors[i] > 3.00){
test$UnitsTotal[i] <- (3/test$NumFloors[i]*test$UnitsTotal[i])
}
else (test$UnitsTotal[i]<-test$UnitsTotal[i])
}
head(test)
sum(test$UnitsTotal) |
56bc4e701be23a8c0cc84dffd4b2ca9fbcfa3d99 | 289cc280222cc40f32686dc42c2ee68891e452ed | /man/hzar.plot.fzCline.Rd | 77d4d989b5d8761466c3afcda86f9118b152fdd9 | [] | no_license | GrahamDB/hzar | 6cd68626d54103f6be26c5a80c41d45ea267eb9a | fe52dfc553e69dd5367a8735b687992231f72e18 | refs/heads/devel_v0.2 | 2023-05-25T16:51:49.440605 | 2019-10-23T18:39:57 | 2019-10-23T18:39:57 | 175,032,847 | 1 | 1 | null | 2023-05-16T23:59:44 | 2019-03-11T15:46:00 | R | UTF-8 | R | false | false | 2,097 | rd | hzar.plot.fzCline.Rd | \name{hzar.plot.fzCline}
\alias{hzar.plot.fzCline}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Plot the 95\% credible cline region for the given locus model.
}
\description{
Plots the maximum likelihood cline and observed frequency data
over a the associated fuzzy cline region. The default region
is the 95\% credible cline region.
}
\usage{
hzar.plot.fzCline(dataGroup,
fzCline = hzar.getCredParamRed(dataGroup),
type = "p", pch = "+",
col = "black", fzCol = "gray", ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dataGroup}{
The hzar.dataGroup object for which to generate a fuzzy cline.
Defaults to a 95\% credible interval region.
}
\item{fzCline}{
The hzar.fzCline object to plot.
}
\item{type}{
The type parameter to pass to hzar.plot.obsData.
}
\item{pch}{
The plotting character to pass to hzar.plot.obsData.
}
\item{col}{
The color to plot the maximum likelihood cline and the observed
frequencies.
}
\item{fzCol}{
The color to fill the fuzzy cline region with.
}
\item{\dots}{
Additional parameters to pass to the initial call to plot.
}
}
% \details{
% %% ~~ If necessary, more details than the description above ~~
% }
% \value{
% %% ~Describe the value returned
% %% If it is a LIST, use
% %% \item{comp1 }{Description of 'comp1'}
% %% \item{comp2 }{Description of 'comp2'}
% %% ...
% }
% \references{
% %% ~put references to the literature/web site here ~
% }
\author{
Graham Derryberry \email{asterion@alum.mit.edu}
}
% \note{
% %% ~~further notes~~
% }
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{hzar.getCredParamRed}}
\code{\link{hzar.make.fzCline}}
\code{\link{plot}}
\code{\link{hzar.plot.obsData}}
\code{\link{hzar.plot.cline}}
}
\examples{
##TODO
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
% \keyword{ ~kwd1 }
% \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
9a7b911dd32f734ef48688140462203c25cdc689 | 717c5e4b503c3cbc0349d359885253b8f98fca61 | /dbinomTest.r | 38f508c4dce8f716c7e0952a9ecef959f6f48394 | [] | no_license | kwende/RScripts | b28f67e1b3c20dee974efdc57e482bc98080e9c4 | ea8773aaf6cea0eb27abbdeaad8606aa729f2d36 | refs/heads/master | 2016-09-06T11:47:49.956693 | 2014-12-13T22:07:59 | 2014-12-13T22:07:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 121 | r | dbinomTest.r | #for plotting
xVec = 0:1
yVec = dbinom(xVec,prob=0.5,size=length(xVec),log=FALSE);
plot(xVec,yVec,xlab="X",ylab="Prob") |
0f548bc4cc2006d43464e3a59458c5fd8bb86054 | a201458914244daaae3ce779aa0806819954e79f | /man/lapmod_index.Rd | 6adcadaf373488111d2bda29390769f249d6fb32 | [
"BSD-2-Clause"
] | permissive | dpmcsuss/rlapjv | 6495a480431eceae28a086309e786fc649293180 | b2e6085dd77be0da10b3d0c0c959ed196c9019a5 | refs/heads/master | 2021-06-19T04:50:14.027935 | 2021-01-04T15:49:10 | 2021-01-04T15:49:10 | 150,743,287 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 902 | rd | lapmod_index.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rlapjv.R
\name{lapmod_index}
\alias{lapmod_index}
\title{Solves the linear assignment problem using the LAPMOD algorithm}
\usage{
lapmod_index(n, cc, ii, kk, maximize = FALSE)
}
\arguments{
\item{n}{number of rows in the cost matrix}
\item{cc}{vector of all finite elements of the assignement cost matri}
\item{ii}{vector of indices of the zero indexed row starts in cc. The following must hold
ii[1] = 0 and ii[n+2] = length(cc).}
\item{kk}{0-based column numbers for each finite cost in the matrix,
i.e., kk must be in 0:(nrow(.)-1).}
\item{maximize}{If FALSE (default) then costs are minimized and if TRUE the
costs are maximized}
}
\value{
The assignment of rows to columns as an integer vector
}
\description{
Find a set of vertices pairs in the order of goodness of matching according to a
specified measure.
}
|
d8cf70096286ab989fc8cc25ec3aeacf1f3dbde3 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.developer.tools/man/codeguruprofiler_list_profile_times.Rd | ff9e3cccfd4ecb721603aa375749f0e5f14a1633 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 2,560 | rd | codeguruprofiler_list_profile_times.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codeguruprofiler_operations.R
\name{codeguruprofiler_list_profile_times}
\alias{codeguruprofiler_list_profile_times}
\title{Lists the start times of the available aggregated profiles of a
profiling group for an aggregation period within the specified time
range}
\usage{
codeguruprofiler_list_profile_times(
endTime,
maxResults = NULL,
nextToken = NULL,
orderBy = NULL,
period,
profilingGroupName,
startTime
)
}
\arguments{
\item{endTime}{[required] The end time of the time range from which to list the profiles.}
\item{maxResults}{The maximum number of profile time results returned by
\code{\link[=codeguruprofiler_list_profile_times]{list_profile_times}} in paginated
output. When this parameter is used,
\code{\link[=codeguruprofiler_list_profile_times]{list_profile_times}} only returns
\code{maxResults} results in a single page with a \code{nextToken} response
element. The remaining results of the initial request can be seen by
sending another
\code{\link[=codeguruprofiler_list_profile_times]{list_profile_times}} request with
the returned \code{nextToken} value.}
\item{nextToken}{The \code{nextToken} value returned from a previous paginated
\code{\link[=codeguruprofiler_list_profile_times]{list_profile_times}} request
where \code{maxResults} was used and the results exceeded the value of that
parameter. Pagination continues from the end of the previous results
that returned the \code{nextToken} value.
This token should be treated as an opaque identifier that is only used
to retrieve the next items in a list and not for other programmatic
purposes.}
\item{orderBy}{The order (ascending or descending by start time of the profile) to use
when listing profiles. Defaults to \code{TIMESTAMP_DESCENDING}.}
\item{period}{[required] The aggregation period. This specifies the period during which an
aggregation profile collects posted agent profiles for a profiling
group. There are 3 valid values.
\itemize{
\item \code{P1D} — 1 day
\item \code{PT1H} — 1 hour
\item \code{PT5M} — 5 minutes
}}
\item{profilingGroupName}{[required] The name of the profiling group.}
\item{startTime}{[required] The start time of the time range from which to list the profiles.}
}
\description{
Lists the start times of the available aggregated profiles of a profiling group for an aggregation period within the specified time range.
See \url{https://www.paws-r-sdk.com/docs/codeguruprofiler_list_profile_times/} for full documentation.
}
\keyword{internal}
|
7a53674a3d28d7b13c7cfd99fde6b388afa7b243 | e6d60ff31a0f58fcdd232cbadde2ece1ba212c3f | /R/getVals.R | 523ffdb5d4866173f4c809ddad2b142691de4202 | [] | no_license | allanhicks/Ensemble | 4994f3ac383ec7ff4547aa493d6964b526eeb7f2 | c7704d5d8469ef16af87934e5577629f895ca8d0 | refs/heads/master | 2021-05-21T11:03:39.969024 | 2020-07-21T16:40:48 | 2020-07-21T16:40:48 | 65,333,865 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 633 | r | getVals.R | #' Gets the mean and standard deviation of parameters from SS output
#' @param x The matrix of parameters or derived parameters from SS_output
#' @param param The name of the parameter
#' @param cols The column names for the mean and standard deviation, respectively
#' @export
getVals.fn <- function(x, param=NULL, cols=c("Value","StdDev","Min","Max")) {
if(is.null(cols)) {cols <- 1:ncol(x)}
if(is.null(param)) {param<-rownames(x)}
if(length(param)>1) {
out <- x[param,cols]
if(length(cols)==1) {
names(out) <- param
}
}
if(length(param)==1) {
out <- x[grep(param,rownames(x),fixed=T),cols]
}
return(out)
}
|
509105a87d329bd44970c71ece89e410f6989d57 | e93a5db79e45b733a0ed9cf27828509c094e77f3 | /Basic_plots/AreaPlots.R | 4b6a4ac6a770335579342282b7776da9d6a387e8 | [] | no_license | shanl33/slee205_Honours | 225c3dcf5faaa52dafe63ad1980cd3af04118b7e | 31a2fc9de48fd3dc431c9fd2a03495b5f39f148a | refs/heads/master | 2020-05-23T07:36:06.500103 | 2019-01-14T21:02:15 | 2019-01-14T21:02:15 | 84,038,239 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,914 | r | AreaPlots.R | install.packages("productplots")
library(productplots)
library(ggplot2)
library(dplyr)
library(ggvis)
library(plotly)
# If load ggmosaic then will need to re-install productplots
# Currently cannot get ggmosaic working
install.packages("ggmosaic")
library(ggmosaic)
# See https://cran.r-project.org/web/packages/ggmosaic/vignettes/ggmosaic.html
data("happy")
str(happy)
# See: https://github.com/hadley/productplots/tree/master/R
# 1 var -------------------------------------------------------------------
prodplot(happy, ~ happy, "hspine")
# 1 var and coords --------------------------------------------------------
# Compare coords for below
prodcalc(happy, ~ happy, "hspine")
prodcalc(happy, ~ happy, "hbar")
# Coordinates: l, r, b, t (left, right, bottom, top for each rectangle)
# Coords can be passed to ggplot2 to re-create graphs
hspine_coords <- prodcalc(happy, ~ happy, "hspine")
str(hspine_coords)
ggplot(hspine_coords) + geom_rect(aes(xmin=l, xmax=r, ymin=b, ymax=t, fill=happy))
# 2 vars ------------------------------------------------------------------
# Compare: Order of vars determines which divider is applied to which
# The direction of the divider (h or v) decides which axes the var labels correspond to
# 'h' horizontal dividers divide up the horizontal x-axis
# Change hbar to hspine to see effect
prodplot(happy, ~ sex + happy, c("vspine", "hspine")) + aes(fill=sex)
prodplot(happy, ~ happy + sex, c("vspine", "hbar")) + aes(fill=happy)
prodplot(happy, ~ happy + sex, c("hbar", "vspine")) + aes(fill=happy)
# Below same as: prodplot(happy, ~ sex + happy, c("vspine", "hspine")) + aes(fill=sex)
prodplot(happy, ~ sex + happy, mosaic()) + aes(fill=sex)
# stacked() best with only 2 vars (like a trellis plot)
prodplot(happy, ~ sex + happy, stacked())
# 2 vars + ggplot2 + plotly -------------------------------------------------------
prodplot(happy, ~ marital + happy, c("vspine", "hspine"), na.rm = TRUE) + aes(fill=marital)
mosaic2_coords <- prodcalc(happy, ~ marital + happy, c("vspine", "hspine"), na.rm = TRUE)
str(mosaic2_coords) # level = 1 or 2 since 2 vars involved. Need to plot only level 2
p <- ggplot(mosaic2_coords[mosaic2_coords$level==2,]) +
geom_rect(aes(xmin=l, xmax=r, ymin=b, ymax=t, fill=marital, color=happy)) +
scale_color_discrete (c=0, l=100) +
theme(panel.background = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank(),
axis.text.y = element_text(),
axis.text.x = element_blank())
ggplotly(p, tooltip = c("fill", "color"))
# With geom_mosaic (ggmosaic package) currently does NOT work
ggplot(happy) +
geom_mosaic(aes(x = product(happy, sex)))
# 2 vars and coords and ggvis interaction ---------------------------------
# using mosiac2_coords from above
mosaic2_coords[mosaic2_coords$level==2,] %>%
ggvis(x=~l, x2=~r, y=~b, y2=~t, fill=~marital) %>%
layer_rects() %>%
add_tooltip(function(mosaic2_coords) mosaic2_coords[,1]) %>%
add_tooltip(function(mosaic2_coords) mosaic2_coords$marital)
# ggvis will only show one tooltip at a time and will not show 'happy' status since it's not 'connected' to the coords.
# 3 vars ------------------------------------------------------------------
# Conditional: distn of happiness and gender, given their health status
# mosaic usually uses vertical spines first by defaulth
prodplot(happy, ~ happy + sex | health, mosaic("h")) + aes (fill=happy)
# different to below: distn of happiness, gender and health status
# (widths not fixed, vary according to health status)
prodplot(happy, ~ happy + sex + health, mosaic("h")) + aes (fill=happy)
prodplot(happy, ~ marital + sex + happy, stacked()) + aes(fill = marital)
prodplot(happy, ~ marital + sex + happy, stacked(), level = 3)
# level = 3 is complete plot, level = 2 is sex + happy, level = 1 is happy only
prodplot(happy, ~ happy + marital + sex, c("vspine", "hspine", "hspine")) + aes(fill = happy)
# 'level' shows how the plot was built up: partition by sex first
prodplot(happy, ~ happy + marital + sex, c("vspine", "hspine", "hspine"), level = 1)
# Then sex and marital status
prodplot(happy, ~ happy + marital + sex, c("vspine", "hspine", "hspine"), level = 2)
# Different order of nesting to above, fill color = last level/first variable is best
prodplot(happy, ~ marital + happy + sex, c("hspine", "vspine", "hspine")) + aes(fill = marital)
# Level 1 is same as above, but not level 2
prodplot(happy, ~ marital + happy + sex, c("hspine", "vspine", "hspine"), level = 2)
# Sex layered last
prodplot(happy, ~ sex + happy + marital, c("hspine", "vspine", "hspine")) + aes(fill = sex)
prodplot(happy, ~ sex + happy + marital, c("hspine", "vspine", "hspine"), level = 2) + aes(fill=happy)
# level = 2, same as below (mosaic = vspine, hspine,.. alternating)
# Default is to NOT remove NA's (na.rm=FALSE)
prodplot(happy, ~ happy + marital, mosaic(), na.rm = TRUE) + aes(fill = happy)
|
39d900cdc154a17b7094a18a9035d3efe2848885 | 532cb3eac25d1cdb2860ec1d90a5275928315c90 | /man/idmTPreg-package.Rd | 4dde58c92116374a88101d344fde99340599aa26 | [] | no_license | cran/idmTPreg | 37e1ad9c7b0630cefcbc99c2a56e68b94509f3da | 8b5b523b9ac423380a5841c9cd85db9f7ab5766f | refs/heads/master | 2021-01-01T19:33:21.573327 | 2018-02-27T12:31:06 | 2018-02-27T12:31:06 | 98,613,762 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 595 | rd | idmTPreg-package.Rd | \name{idmTPreg-package}
\alias{idmTPreg-package}
\alias{idmTPreg}
\docType{package}
\title{
\packageTitle{idmTPreg}
}
\description{
\packageDescription{idmTPreg}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{idmTPreg}
\packageIndices{idmTPreg}
}
\author{
\packageAuthor{idmTPreg}
Maintainer: \packageMaintainer{idmTPreg}
}
\references{
Azarang, L. Scheike, TH. and de Una-Alvarez, J. (2017) \emph{Direct modeling of regression effects for transition probabilities
in the progressive illness-death model} Statistics in Medicine \bold{36}, \eqn{1964-1976}.
}
|
fc1fdd3f155fc0369cf430f205072297c5aa76d8 | 0e1a97fe8fc57807f6bde78e79fba4a1a493d056 | /man/refine.MESH.2D.Rd | e8d4abab4503c9af367070dfed83cc09a7cbd58b | [] | no_license | laurasangalli/rFEM | a5ae96fbfb39b1949c2b21edd1fcc09286e31d43 | 7d38d2303c1abad04e56996de3a21f2a319f4fbb | refs/heads/master | 2021-01-11T17:07:22.534167 | 2015-07-18T15:36:35 | 2015-07-18T15:39:34 | 40,311,292 | 1 | 0 | null | 2015-08-06T15:08:05 | 2015-08-06T15:08:05 | null | UTF-8 | R | false | false | 1,656 | rd | refine.MESH.2D.Rd | \name{refine.MESH.2D}
\alias{refine.MESH.2D}
\title{Refine the triangulation}
\usage{
refine.MESH.2D(mesh, minimum_angle = NA, maximum_area = NA, delaunay = FALSE, verbosity = 0)
}
\arguments{
\item{mesh}{A TRIMESH2D object created through \code{\link{create.MESH.2D}}.}
\item{minimum_angle}{A numeric specifying the minimum angle of the triangles in the ouput triangulation.}
\item{maximum_area}{A numeric specifying the maximum area of the triangles in the ouput triangulation.}
\item{delaunay}{If \code{TRUE} the output triangulation is a Delaunay triangulation}
\item{verbosity}{A numeric that can assume values \code{0,1,2}, that specifies the output verbosity in the triangulation process.}
}
\value{
An object of class \code{TRIMESH2D}.}
\description{
Refines a TRIMESH2D object following the constrained imposed with the input parameters. This function is based on the Triangle library (\url{http://www.cs.cmu.edu/~quake/triangle.html}). The triangulation is a constrained conforming Delaunay triangulation in which additional vertices, called Steiner points, can be inserted into segments to improved the quality of the triangulation.
}
\examples{
## Creates an object TRIMESH2D with a concavity and second order nodes
mesh_coarse<-create.MESH.2D(nodes=rbind(c(0, 0), c(0, 1), c(0.5, 0.5), c(1, 1), c(1, 0)),
segments=rbind(c(1, 2), c(2, 3), c(3, 4), c(4, 5), c(5, 1)))
## Plot it
plot(mesh_coarse)
## Refines the the triangulation in specified in the \code{mesh_coarse} object
mesh<-refine.MESH.2D(mesh_coarse,maximum_area = 0.005, delaunay = TRUE)
## Plot the refined mesh
plot(mesh)
} |
cd4f24fff8c49fb5f35890765ac7fd2778839ca7 | 3ca9d1a8012080c287f26f58e42a76f13a45cd9d | /code/04-ar.R | 1c4beb9c9d2b6318639b292c4f83e8e9dc52ccfa | [] | no_license | dincerti/political-instability | 3734ca06b3aa8de31c1183db285e68bc6595ee8c | 6702581797ee30c46879d143fcd85e803d576c79 | refs/heads/master | 2023-07-09T23:32:18.519999 | 2023-07-08T09:41:24 | 2023-07-08T09:41:24 | 81,411,595 | 1 | 2 | null | 2018-07-27T04:55:22 | 2017-02-09T05:11:56 | R | UTF-8 | R | false | false | 9,864 | r | 04-ar.R | rm(list = ls())
library("xtable")
library("data.table")
library("ggplot2")
library("scales")
load("data/data-clean.RData")
load("output/regime-change-event-study.RData")
theme_set(theme_bw())
source("code/func.R")
# ABNORMAL RETURNS TABLES ------------------------------------------------------
# Add authoritarian versus democratic shift information to data
auth_dem <- event[type == "Coup" | type == "Assassination" |
type == "Resignation",
.(country, ticker, stock_date, `auth shift`, `dem shift`)]
regime.change <- merge(regime.change, auth_dem,
by = c("country", "ticker", "stock_date"))
# coups
coup.index <- which(regime.change$type == "Coup")
artable.coups <- ar_table(td = rc.es$td, ar = rc.es$ar.treat[, coup.index],
sigma = rc.es$sigma.treat[coup.index],
dtr = rc.es$dtr.treat[coup.index],
country = regime.change[coup.index, country],
date = regime.change[coup.index, stock_date],
coup = FALSE)
myprint.xtable(artable.coups$car, file = "tables/artable-coups-car.txt")
myprint.xtable(artable.coups$car.mean, file = "tables/artable-coups-car-mean.txt")
# coups - insignificant 7 day pre-trends only
coup.index.pre <- which(regime.change$type == "Coup" & regime.change$stock_date != "1977-10-20")
artable.coups.pre <- ar_table(td = rc.es$td, ar = rc.es$ar.treat[, coup.index.pre],
sigma = rc.es$sigma.treat[coup.index.pre],
dtr = rc.es$dtr.treat[coup.index.pre],
country = regime.change[coup.index.pre, country],
date = regime.change[coup.index.pre, stock_date],
coup = FALSE)
myprint.xtable(artable.coups.pre$car, file = "tables/artable-coups-car-pre.txt")
myprint.xtable(artable.coups.pre$car.mean, file = "tables/artable-coups-car-mean-pre.txt")
# assassinations
ass.index <- which(regime.change$type == "Assassination")
artable.ass <- ar_table(td = rc.es$td, ar = rc.es$ar.treat[, ass.index],
sigma = rc.es$sigma.treat[ass.index],
dtr = rc.es$dtr.treat[ass.index],
country = regime.change[ass.index, country],
date = regime.change[ass.index, stock_date])
myprint.xtable(artable.ass$car, file = "tables/artable-ass-car.txt")
myprint.xtable(artable.ass$car.mean, file = "tables/artable-ass-car-mean.txt")
# assassinations - insignificant 7 day pre-trends only
ass.index.pre <- which(regime.change$type == "Assassination" &
regime.change$stock_date != "1963-11-22" &
regime.change$stock_date != "1984-11-05")
artable.ass.pre <- ar_table(td = rc.es$td, ar = rc.es$ar.treat[, ass.index.pre],
sigma = rc.es$sigma.treat[ass.index.pre],
dtr = rc.es$dtr.treat[ass.index.pre],
country = regime.change[ass.index.pre, country],
date = regime.change[ass.index.pre, stock_date])
myprint.xtable(artable.ass.pre$car, file = "tables/artable-ass-car-pre.txt")
myprint.xtable(artable.ass.pre$car.mean, file = "tables/artable-ass-car-mean-pre.txt")
# resignations
res.index <- which(regime.change$type == "Resignation")
artable.res <- ar_table(td = rc.es$td, ar = rc.es$ar.treat[, res.index],
sigma = rc.es$sigma.treat[res.index],
dtr = rc.es$dtr.treat[res.index],
country = regime.change[res.index, country],
date = regime.change[res.index, stock_date])
myprint.xtable(artable.res$car, file = "tables/artable-res-car.txt")
myprint.xtable(artable.res$car.mean, file = "tables/artable-res-car-mean.txt")
# resignations - insignificant 7 day pre-trends only
res.index.pre <- which(regime.change$type == "Resignation" &
regime.change$stock_date != "1982-06-18" &
regime.change$stock_date != "2001-12-20" &
regime.change$stock_date != "2011-01-31")
artable.res.pre <- ar_table(td = rc.es$td, ar = rc.es$ar.treat[, res.index.pre],
sigma = rc.es$sigma.treat[res.index.pre],
dtr = rc.es$dtr.treat[res.index.pre],
country = regime.change[res.index.pre, country],
date = regime.change[res.index.pre, stock_date])
myprint.xtable(artable.res.pre$car, file = "tables/artable-res-car-pre.txt")
myprint.xtable(artable.res.pre$car.mean, file = "tables/artable-res-car-mean-pre.txt")
# Authoritarian
auth.index <- which(regime.change$`auth shift` == 1)
artable.auth <- ar_table(td = rc.es.auth$td, ar = rc.es.auth$ar.treat[, auth.index],
sigma = rc.es.auth$sigma.treat[auth.index],
dtr = rc.es.auth$dtr.treat[auth.index],
country = regime.change[auth.index, country],
date = regime.change[auth.index, stock_date],
coup = FALSE)
myprint.xtable(artable.auth$car, file = "tables/artable-auth-car.txt")
myprint.xtable(artable.auth$car.mean, file = "tables/artable-auth-car-mean.txt")
# Democratic
dem.index <- which(regime.change$`dem shift` == 1)
artable.dem <- ar_table(td = rc.es.dem$td, ar = rc.es.dem$ar.treat[, dem.index],
sigma = rc.es.dem$sigma.treat[dem.index],
dtr = rc.es.dem$dtr.treat[dem.index],
country = regime.change[dem.index, country],
date = regime.change[dem.index, stock_date],
coup = FALSE)
myprint.xtable(artable.dem$car, file = "tables/artable-dem-car.txt")
myprint.xtable(artable.dem$car.mean, file = "tables/artable-dem-car-mean.txt")
# VENEZUELA PARTIAL COUP -------------------------------------------------------
ven <- event[ticker == "_IBCD" & stock_date == "2002-04-12"]
ven.es <- event_study(ticker = index$ticker, date = index$date, dr = index$dr,
event_ticker = ven$ticker,
event_window = 20, estimation_window = 200,
event_date = ven$stock_date, model = "constant", control = FALSE)
ven.es$ar.treat[, lar := ar - qnorm(.975) * ven.es$sigma.treat]
ven.es$ar.treat[, uar := ar + qnorm(.975) * ven.es$sigma.treat]
p <- ggplot(ven.es$ar.treat[abs(td) <= 10], aes(x = td, y = ar)) +
geom_hline(aes(yintercept = 0), linetype = 2, color = "grey") +
geom_vline(aes(xintercept = 0), linetype = 2, color = "grey") +
geom_pointrange(aes(ymin = lar, ymax = uar), size = .3) +
xlab("Trading days") +
ylab("Abnormal Returns (%)") +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10)) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10)) +
theme_classic()
print(p)
ggsave("figs/venezuela_coup_attempt_2002.pdf", p, height = 5, width = 7)
# VENEZUELA 1992 COUP ATTEMPT --------------------------------------------------
# Change event day to 11/30
event <- event[, stock_date := dplyr::if_else(stock_date == "1992-11-27",
as.Date("1992-11-30"), stock_date)]
ven92 <- event[ticker == "_VE1" & stock_date == "1992-11-30"]
ven92.es <- event_study(ticker = index$ticker, date = index$date, dr = index$dr,
event_ticker = ven92$ticker,
event_window = 20, estimation_window = 200,
event_date = ven92$stock_date, model = "constant", control = FALSE)
ven92.es$ar.treat[, lar := ar - qnorm(.975) * ven92.es$sigma.treat]
ven92.es$ar.treat[, uar := ar + qnorm(.975) * ven92.es$sigma.treat]
p <- ggplot(ven92.es$ar.treat[abs(td) <= 10], aes(x = td, y = ar)) +
geom_hline(aes(yintercept = 0), linetype = 2, color = "grey") +
geom_vline(aes(xintercept = 0), linetype = 2, color = "grey") +
geom_pointrange(aes(ymin = lar, ymax = uar), size = .3) +
xlab("Trading days") +
ylab("Abnormal Returns (%)") +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10)) +
scale_y_continuous(limits = c(-12, 12),
breaks = scales::pretty_breaks(n = 10)) +
theme_classic()
print(p)
ggsave("figs/venezuela_coup_attempt_1992.pdf", p, height = 5, width = 7)
# TURKEY 2016 COUP ATTEMPT -----------------------------------------------------
# Add 2016 Turkey failed coup to data.table
event = rbind(event,list(85, "Turkey", "_XU100D", "07/15/2016",
as.Date("07/18/2016", "%m/%d/%Y"),
"Recep Tayyip Erdoğan", "Failed Coup",
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA))
turk16 <- event[ticker == "_XU100D" & stock_date == "2016-07-18"]
turk16.es <- event_study(ticker = index$ticker, date = index$date, dr = index$dr,
event_ticker = turk16$ticker,
event_window = 20, estimation_window = 200,
event_date = turk16$stock_date, model = "constant", control = FALSE)
turk16.es$ar.treat[, lar := ar - qnorm(.975) * turk16.es$sigma.treat]
turk16.es$ar.treat[, uar := ar + qnorm(.975) * turk16.es$sigma.treat]
p <- ggplot(turk16.es$ar.treat[abs(td) <= 10], aes(x = td, y = ar)) +
geom_hline(aes(yintercept = 0), linetype = 2, color = "grey") +
geom_vline(aes(xintercept = 0), linetype = 2, color = "grey") +
geom_pointrange(aes(ymin = lar, ymax = uar), size = .3) +
xlab("Trading days") +
ylab("Abnormal Returns (%)") +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10)) +
scale_y_continuous(limits = c(-12, 12),
breaks = scales::pretty_breaks(n = 10)) +
theme_classic()
print(p)
ggsave("figs/turkey_coup_attempt_2016.pdf", p, height = 5, width = 7)
|
929503f8c3ce89092b33d511e166819fd3ee6cff | c8c6e0c4ce9a3a868abd8d13012c39d7017faa82 | /plot4.R | 16a3db5029b9776d81bad477ae3696e9f1eaa12a | [] | no_license | bill-mattingly/ExData_Plotting1 | a282034c1a1a5de3b775d18e8d6bc1069d0b4da8 | d81b56d0c11b6f6537084123e709fbf4f4dcc55e | refs/heads/master | 2021-08-20T09:03:28.470846 | 2017-11-28T17:56:09 | 2017-11-28T17:56:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,652 | r | plot4.R | #Read data
mydata <- read.table("household_power_consumption.txt", sep = ";", h=T)
#convert datatype
datetime <- paste(mydata$Date, mydata$Time)
datetime <- as.POSIXct(strptime(datetime, "%d/%m/%Y %H:%M:%S"))
mydata <- cbind(datetime, mydata[,-(1:2)])
mydata <- subset(mydata, datetime >= as.POSIXct("2007-02-01 00:00:00 PST") & datetime <= as.POSIXct("2007-02-02 23:59:59 PST"))
#subset
mydata$Global_active_power = as.numeric(as.character(mydata$Global_active_power))
mydata$Sub_metering_1 = as.numeric(as.character(mydata$Sub_metering_1))
mydata$Sub_metering_2 = as.numeric(as.character(mydata$Sub_metering_2))
mydata$Sub_metering_3 = as.numeric(as.character(mydata$Sub_metering_3))
mydata$Voltage = as.numeric(as.character(mydata$Voltage))
mydata$Global_reactive_power = as.numeric(as.character(mydata$Global_reactive_power))
#Make plots
png("plot4.png", width = 480, height = 480)
par(mfcol = c(2,2))
plot(mydata$Global_active_power ~ mydata$datetime, type = "l", ylab = "Global Active Power", xlab = "")
plot(mydata$Sub_metering_1 ~ mydata$datetime, type = "l", main = "", ylab = "Energy sub metering", xlab = "")
lines(mydata$Sub_metering_2 ~ mydata$datetime, col = "Red")
lines(mydata$Sub_metering_3 ~ mydata$datetime, col = "Blue")
legend("topright", col = c("Black", "Red", "Blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, bty ="n")
#line voltage vs time
plot(mydata$Voltage ~ mydata$datetime, xlab = "datetime", ylab = "Voltage", type = "l")
#line reactive mydata vs time
plot(mydata$Global_reactive_power ~ mydata$datetime, xlab = "datetime", ylab = "Global_reactive_power", type = "l")
dev.off()
|
b28b8116a869aae2737809100892034f86620578 | a77de003df4f2dd0cd1e5a309cd1a49ef0f30db7 | /tests/testthat/test-addition.R | 2143a6950e877e08b7b770daa9c9a787ecf1b14d | [
"MIT"
] | permissive | luciorq/mathOperations | becca4a25a73635e7e8d7302e76ac9673e2c6f06 | dca672a08ebccaa464307cbc478817305aa15775 | refs/heads/master | 2023-04-19T02:05:59.165773 | 2021-05-01T19:48:41 | 2021-05-01T19:48:41 | 363,484,122 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 273 | r | test-addition.R | test_that("Addition works", {
add_result <- addition(2, 3)
expect_equal(length(add_result), 1)
expect_equal(add_result, 5)
})
test_that("Addition fails", {
testthat::expect_error(
addition("a", 3),
regexp = "non-numeric argument to binary operator"
)
})
|
5ddf7f8d2683646fa1c13ca4c8aafcf96264dc98 | c4daabd6a30fa6bee84f615e988bc682d428cbf5 | /pipelines/3_supertree/4_supermatrix.R | 711ab3b32b5f8109203c2ec8b999c6a598bb6ad8 | [] | no_license | AntonelliLab/supersmartR-workshop | 1253a3779d610862bb0d540a3b82e24023b98e26 | 846015fc5177a1eb3e959b6837f3eb5022d0a16c | refs/heads/master | 2020-07-11T23:20:17.263889 | 2019-11-04T09:22:01 | 2019-11-04T09:22:01 | 204,664,742 | 4 | 4 | null | null | null | null | UTF-8 | R | false | false | 2,166 | r | 4_supermatrix.R | # Construct a supermatrix
# Generate a .fasta file of all the alignments
# Library ----
library(gaius)
# Vars ----
wd <- file.path(getwd(), 'pipelines', '3_supertree')
input_dir <- file.path(wd, '3_align')
output_dir <- file.path(wd, '4_supermatrix')
if (!dir.exists(output_dir)) {
dir.create(output_dir)
}
tree_file <- file.path(wd, '6_supertree', 'supertree.tre')
# Identify groups ----
# set parameters with pset()/pget()
# min_ntips - the minimum number of tips per group, default 5
# max_ntips - the maximum number of tips per group, default 100
alignment_files <- file.path(input_dir, list.files(path = input_dir,
pattern = '.fasta'))
alignment_names <- names_from_alignments(alignment_files)
tree_names <- names_from_tree(tree_file)
matched_names <- name_match(alignment_names = alignment_names,
tree_names = tree_names)
groups <- groups_get(tree_file = tree_file, matched_names = matched_names)
# Supermatrices ----
# min_ngenes - minimum number of genes in matrix
# min_ntips - minimum number of tips in matrix
# min_nbps - minimum number of base paris in a gene
# column_cutoff - proportion of non-gaps per column
# tip_cutoff - proportion of non-gaps per tip
alignment_list <- alignment_read(flpths = alignment_files)
supermatrices <- supermatrices_get(alignment_list = alignment_list,
groups = groups, min_ngenes = 2,
min_ntips = 5, min_nbps = 250,
column_cutoff = 0.1, tip_cutoff = 0.1)
# check all mono. groups in backbone
nmono <- sum(names(groups) %in% names(supermatrices[['backbone']]))
if (nmono != (length(groups) - 1)) {
stop('Too few mono. groups in backbone')
}
# Write out ----
ids <- names(supermatrices)
for (id in ids) {
supermatrix <- supermatrices[[id]]
# number of bps per cluster
nbps <- attr(supermatrix, 'nbps')
flnm <- paste0(id, "_partition.txt")
partition_file(nbp = nbps, flpth = file.path(output_dir, flnm))
flnm <- paste0(id, '_supermatrix.fasta')
sequences_write(x = supermatrix, flpth = file.path(output_dir, flnm))
}
|
faa5fb24f8ed322337f04febe69d2ea7930e206e | eb706168495609678cb4f9950be090f10e35555e | /R/model2sparse.R | 4c8c2bdc714f6168ae82dc0fa6b398f5750deb25 | [] | no_license | juvelas/bam | 33d4f564b009146019b83283e60b069cbed65a5a | 66e2fe20a20862ae687368c82ecc39889177a5c9 | refs/heads/master | 2023-02-16T19:24:44.567923 | 2021-01-07T03:56:26 | 2021-01-07T03:56:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,458 | r | model2sparse.R | #' model2sparse: Converts a niche model into a diagonal sparse matrix
#' @param model A raster object representing the geographic projection of a niche model.
#' @param threshold A threshold to convert a continuous model into a binary model.
#' @import Matrix
#' @return A diagonal sparse matrix representing the geographic projection of a niche model.
#' @export
#' @examples
#' \dontrun{
#' model_path <- system.file("extdata/Lepus_californicus_cont.tif",
#' package = "bam")
#' model <- raster::raster(model_path)
#'
#' sparse_mod <- bam::model2sparse(model,threshold=0.05)
#' }
model2sparse <- function(model, threshold=NULL){
if(is.numeric(threshold)){
model <- model > threshold
}
model_vals <- raster::getValues(model)
in_niche <- which(!is.na(model_vals))
cell_ids <- stats::complete.cases(model_vals)
out_niche <- which(cell_ids[-in_niche])
all_area <- which(cell_ids)
ncols <- nrows <- length(all_area)
mod_sparse <- Matrix::sparseMatrix(i=match(in_niche,all_area),
j=match(in_niche,all_area),
x=model_vals[in_niche],
dims = c(nrows,ncols))*1
mod_coords <- raster::coordinates(model)[all_area,]
mod_atts <- setA(bin_model = model,
cellIDs = all_area,
sparse_model = mod_sparse,
coordinates= mod_coords)
return(mod_atts)
}
|
12bb16b9d799bfe14b6b7b8652000dffc9ad1cbd | 5a786ac93b5138df8b276e840a2187e374189578 | /LambdaFunctionalForms/LamdaTesting.R | f35ea3bfe524c4cd1fb94935de269dfcab2216b4 | [] | no_license | laurenmh/sToration-york-gum | 1c66073233768608845bce516dd5e13844a1f928 | 7e3149ab0d10be266d8ced03efff5e1bde33b566 | refs/heads/master | 2022-06-28T15:58:59.937848 | 2022-06-16T13:15:57 | 2022-06-16T13:15:57 | 213,767,514 | 0 | 0 | null | 2022-05-27T23:28:11 | 2019-10-08T22:25:47 | R | UTF-8 | R | false | false | 3,229 | r | LamdaTesting.R | # Load in the full data set and set working directories
library(here)
YorkGum <- read.csv(here("water_full_env.csv"))
FocalSp <- "W" # or H, T, A
#subset the data and create quatratic transformations of the environmental covariates
SpData <- subset(YorkGum, Focal.sp.x == FocalSp & Trt.comp.x == "S")
SpData$logLambda <- log(SpData$Number.flowers.total)
SpData$Phos2 <- SpData$Colwell.P ^ 2
SpData$Canopy2 <- SpData$Canopy ^ 2
# Fit the competing models with different quadratic options
LinearMod <- lm(logLambda ~ Colwell.P + Canopy + Colwell.P*Canopy, data = SpData)
QuadraticP <- lm(logLambda ~ Phos2 + Canopy + Phos2*Canopy, data = SpData)
QuadraticC <- lm(logLambda ~ Colwell.P + Canopy2 + Colwell.P*Canopy2, data = SpData)
QuadraticB <- lm(logLambda ~ Phos2 + Canopy2 + Phos2*Canopy2, data = SpData)
# Make predictions from each of the models
PredData <- subset(SpData, select = c("Colwell.P", "Canopy", "logLambda", "Phos2", "Canopy2"))
LinearPred <- cbind(PredData, predict(LinearMod, PredData, interval = "confidence"))
QuadP_pred <- cbind(PredData, predict(QuadraticP, PredData, interval = "confidence"))
QuadC_pred <- cbind(PredData, predict(QuadraticC, PredData, interval = "confidence"))
QuadB_pred <- cbind(PredData, predict(QuadraticB, PredData, interval = "confidence"))
# Make a plot with the residuals for each model across phosphorous and canopy cover
FigName <- paste("LambdaFunctionalForms/", FocalSp, "_residuals.pdf", sep = "")
pdf(file = FigName, width = 10, height = 6, onefile = FALSE, paper = "special")
par(mfrow = c(2,4))
# Phosphorous
plot(x = LinearPred$Colwell.P, y = LinearPred$fit - LinearPred$logLambda,
xlab = "Phosphorous", ylab = "Residuals", main = "", ylim = c(-2.5, 2.5))
abline(h = 0, lty = 2)
mtext("Monotonic", side = 3, line = 0.5)
plot(x = QuadP_pred$Colwell.P, y = QuadP_pred$fit - QuadP_pred$logLambda,
xlab = "Phosphorous", ylab = "Residuals", main = "", ylim = c(-2.5, 2.5))
abline(h = 0, lty = 2)
mtext("Phosphorous^2", side = 3, line = 0.5)
plot(x = QuadC_pred$Colwell.P, y = QuadC_pred$fit - QuadC_pred$logLambda,
xlab = "Phosphorous", ylab = "Residuals", main = "", ylim = c(-2.5, 2.5))
abline(h = 0, lty = 2)
mtext("Canopy^2", side = 3, line = 0.5)
plot(x = QuadB_pred$Colwell.P, y = QuadB_pred$fit - QuadB_pred$logLambda,
xlab = "Phosphorous", ylab = "Residuals", main = "", ylim = c(-2.5, 2.5))
abline(h = 0, lty = 2)
mtext("Both^2", side = 3, line = 0.5)
# Canopy cover
plot(x = LinearPred$Canopy, y = LinearPred$fit - LinearPred$logLambda,
xlab = "Canopy", ylab = "Residuals", main = "", ylim = c(-2.5, 2.5))
abline(h = 0, lty = 2)
plot(x = QuadP_pred$Canopy, y = QuadP_pred$fit - QuadP_pred$logLambda,
xlab = "Canopy", ylab = "Residuals", main = "", ylim = c(-2.5, 2.5))
abline(h = 0, lty = 2)
plot(x = QuadC_pred$Canopy, y = QuadC_pred$fit - QuadC_pred$logLambda,
xlab = "Canopy", ylab = "Residuals", main = "", ylim = c(-2.5, 2.5))
abline(h = 0, lty = 2)
plot(x = QuadB_pred$Canopy, y = QuadB_pred$fit - QuadB_pred$logLambda,
xlab = "Canopy", ylab = "Residuals", main = "", ylim = c(-2.5, 2.5))
abline(h = 0, lty = 2)
dev.off()
|
374d58c40a1a8608f389d0be4a9d128c4ee6d505 | 22ea070f681ddd5a9ab667897f3f414d8a0091f4 | /parsing_functions.R | d26e8e4ab03aa62b6f6a2b30ac16f44ccbb78cf1 | [] | no_license | ginolhac/cv-1 | 79eafd82c1c8c72d898a02032b3d9a7a795fa3ca | e004d531d6c60c6458940edf75bb05457a911d08 | refs/heads/master | 2021-08-04T05:06:45.999466 | 2020-09-01T14:28:42 | 2020-09-01T14:28:42 | 213,851,156 | 0 | 0 | null | 2019-10-09T07:32:13 | 2019-10-09T07:32:12 | null | UTF-8 | R | false | false | 4,824 | r | parsing_functions.R | # Regex to locate links in text
find_link <- regex("
\\[ # Grab opening square bracket
.+? # Find smallest internal text as possible
\\] # Closing square bracket
\\( # Opening parenthesis
.+? # Link text, again as small as possible
\\) # Closing parenthesis
",
comments = TRUE)
# Function that removes links from text and replaces them with superscripts that are
# referenced in an end-of-document list.
sanitize_links <- function(text){
if (PDF_EXPORT) {
str_extract_all(text, find_link) %>%
pluck(1) %>%
walk(function(link_from_text){
title <- link_from_text %>% str_extract('\\[.+\\]') %>% str_remove_all('\\[|\\]')
link <- link_from_text %>% str_extract('\\(.+\\)') %>% str_remove_all('\\(|\\)')
# add link to links array
links <<- c(links, link)
# Build replacement text
new_text <- glue('{title}<sup>{length(links)}</sup>')
# Replace text
text <<- text %>% str_replace(fixed(link_from_text), new_text)
})
}
text
}
# Take entire positions dataframe and removes the links
# in descending order so links for the same position are
# right next to eachother in number.
strip_links_from_cols <- function(data, cols_to_strip){
for (i in seq_len(nrow(data))) {
for (col in cols_to_strip) {
data[i, col] <- sanitize_links(data[i, col])
}
}
data
}
# Take a position dataframe and the section id desired
# and prints the section to markdown.
print_section <- function(position_data, section_id, n_max = Inf){
position_data %>%
filter(section == section_id) %>%
arrange(desc(end)) %>%
mutate(id = row_number()) %>%
pivot_longer(
starts_with('description'),
names_to = 'description_num',
values_to = 'description',
values_drop_na = TRUE
) %>%
slice_head(n = n_max) %>%
group_by(id) %>%
mutate(
descriptions = list(description)
) %>%
ungroup() %>%
filter(description_num == 'description_1') %>%
mutate(
timeline = ifelse(
is.na(start) | start == end,
end,
glue('{end} - {start}')
),
description_bullets = map_chr(descriptions, ~ paste('-', ., collapse = '\n')),
) %>%
strip_links_from_cols(c('title', 'description_bullets')) %>%
mutate_all(~ ifelse(is.na(.), 'N/A', .)) %>%
glue_data(
"### {title}",
"\n\n",
"{loc}",
"\n\n",
"{institution}",
"\n\n",
"{timeline}",
"\n\n",
"{description_bullets}",
"\n\n\n",
)
}
# For bib files.
# using bib2df spaces are expected around '=' between keys and values
# read_lines("biblio.bib") %>% str_replace("([a-z])=([\"\\{])", "\\1 = \\2") %>% write_lines("biblio_corrected.bib")
# if needed
# helper functions
subset_authors <- function(vec, pos) {
case_when(
# need to wrap the vector in a list for map
pos <= 10 & length(vec) >= 10 ~ list(c(vec[1:10], " _et al._ ")),
pos <= 5 & length(vec) >= 5 ~ list(c(vec[1:5], " _et al._ ")),
pos <= 3 & length(vec) >= 3 ~ list(c(vec[1:3], " _et al._ ")),
TRUE ~ list(vec))
}
# strip consecutive years
na_year <- function(vec) {
stopifnot(length(vec) > 1)
buff <- vec[1]
res <- vector(mode = "character", length = length(buff))
res[1] <- buff
for (i in 2:length(vec)) {
#print(paste("comparing", , ""))
if (vec[i] == buff) {
res[i] <- "N/A"
} else {
res[i] <- as.character(vec[i])
buff <- vec[i]
}
}
res
}
print_articles <- function(bibdf, type = "ARTICLE", cv_author = "Ginolhac, A") {
bibdf %>%
filter(CATEGORY == type) %>%
arrange(desc(YEAR)) %>%
# collapse authors
mutate(# find location of cv author in the list
position = map_int(AUTHOR, ~ which(str_detect(.x, cv_author))),
# shorten author list when possible
AUTHOR = map2(AUTHOR, position, ~ subset_authors(.x, .y)),
# remove one list level
AUTHOR = map(AUTHOR, ~ .x[[1]]),
# collapse vector, with and for last author if still present
authors = map(AUTHOR, ~ if_else(.x[length(.x)] == " _et al._ ",
glue_collapse(.x, sep = " "),
glue_collapse(.x, sep = " ", last = " and "))),
# highlight cv author in bold
authors = str_replace(authors, cv_author, glue("**{cv_author}**")),
# clean up titles from curly braces
clean_title = str_remove_all(TITLE, "[\\{\\}]"),
# strip consecutives years
years = na_year(YEAR)) %>%
glue_data(
"### [{clean_title}]({URL})",
"\n\n",
"{authors}",
" _{JOURNAL}_ ", " **{VOLUME}**, {PAGES}",
"\n\n",
"N/A",
"\n\n",
"{years}",
"\n\n\n",
)
}
|
ea6e237840e3f912ec4249eeec238aaf6229d95c | 1cdf33e4a84428a4b62d9bc8711844850f0b587d | /code/testing/spreadsheet-replace.R | 9c57ac55fe87a262e47d27b6c23d681961245b59 | [] | no_license | Hutchins-Center/Fiscal-Impact-Measure | 5798621de5094005694aee204388c30c88972fb0 | d5a04e1aa0d1d34030c7352836c9d9ffdab8a589 | refs/heads/master | 2023-05-14T09:46:06.774236 | 2021-06-08T17:12:58 | 2021-06-08T17:12:58 | 298,624,532 | 1 | 0 | null | 2021-01-15T17:32:50 | 2020-09-25T16:26:20 | HTML | UTF-8 | R | false | false | 8,190 | r | spreadsheet-replace.R | # NOTES
# We had the wrong UI numbers in the spreadsheet.
# We never updated the wages lost assistance spending, which was higher than we expected
# in Q3. Unfortunately, the previous number was hardcoded so not sure where it came from.
# It said Total Q3 was 881,354
# UI Q3 was also
# 0.0 Source ----------------------------------------------------------------------------------------------------------
## Source custom functions and packages
library('tidyverse')
library('readxl')
library('writexl')
library('tsibble')
library('janitor')
source('src/functions.R')
library('lubridate')
#source('_drake.R')
annual_to_quarter <- function(df){
df %>%
slice(rep(1:n(), each=4)) %>%
rename(fy = date)
}
rename_haver_codes <- function(df){
df %>%
rename(
subsidies = gsub,
ppp = gfsubp,
nonprofit_ppp = gftfpp,
nonprofit_provider_relief = gftfpv,
aviation = gfsubg,
employee_retention = gfsube,
sick_leave = gfsubk,
rebate_checks = gftfpe,
ui_bea = yptu,
total_grants = gfeg,
covid_relief_fund = gfegc,
education_stabilization_fund = gfege,
provider_relief_fund = gfegv,
medicaid_total = yptmd,
medicaid_grants = gfeghdx,
medicare = yptmr,
federal_social_benefits_nipa = gftfp,
state_social_benefits = gstfp,
wages_lost_assistance = yptolm,
ui_bea = yptu,
peuc = yptue,
pua = yptup,
puc = yptuc
)
}
# 0.1 Pull Raw Data---------------------------------------------------------------
loadd(projections)
START <- as_date("2018-12-31")
nonprofit_hospitals <- 0.5
government_hospitals <- 0.2
forprofit_hospitals <- 0.3
cbo_legislation <-
read_xlsx('data/pandemic-legislation/pandemic-legislation.xlsx',
sheet = 'annual') %>%
select(-1) %>%
pivot_longer(-date) %>%
pivot_wider(names_from = date, values_from = value) %>%
rename(year = name) %>%
clean_names() %>%
mutate(
across(everything(), ~ replace_na(., 0)),
across(where(is.numeric), ~ . * 4)
) %>%
fim::annual_to_quarter(year)
provider_relief_fund_score <-
cbo_legislation %>%
mutate(provider_relief_fund = nonprofit_hospitals * provider_relief_fund ) %>%
pull(provider_relief_fund)
# Quarterly -------------------------------------------------------------------------------------------------------
#haver.path("//ESDATA01/DLX/DATA/")
# BEA NIPAs
names_usna <- read_excel("data/auxilliary/spreadsheet_names.xlsx")
usna <- read_xlsx('data/raw/haver/national_accounts.xlsx') %>%
filter(date >= as_date('2015-12-31')) %>%
rename_haver_codes() %>%
mutate(
across(where(is.numeric), ~ replace_na(.x, 0)),
date = yearquarter(date),
# SUBSIDIES
other_subsidies = aviation + employee_retention + sick_leave,
legislation_subsidies = ppp + other_subsidies,
nonprofit_subsidies = nonprofit_ppp + nonprofit_provider_relief,
# GRANTS
legislation_grants = covid_relief_fund + education_stabilization_fund + provider_relief_fund,
medicaid_grants = medicaid_grants / 1000, # MILLIONS TO BILLIONS
non_medicaid_grants = total_grants - medicaid_grants,
non_medicaid_or_legislation_grants = non_medicaid_grants - legislation_grants,
other_grants = legislation_grants, # PLACEHOLDER
federal_cgrants = non_medicaid_or_legislation_grants + other_grants,
# HEALTH
federal_health = medicaid_grants + medicare,
state_health = medicaid_total - medicaid_grants,
# SOCIAL BENEFITS
# UNEMPLOYMENT INSURANCE
ui = ui_bea + wages_lost_assistance,
ui_cbo_assumed_other =
case_when(
date < yearquarter('2020 Q2') ~ 0,
date >= yearquarter('2020 Q2') & date <= yearquarter('2020 Q3') ~ 12,
date >= yearquarter('2020 Q4') & date <= yearquarter('2021 Q1') ~ 8,
date >= yearquarter('2021 Q2') ~ 4
),
federal_ui = 2 * peuc + pua + puc + wages_lost_assistance + ui_cbo_assumed_other,
state_ui = ui - federal_ui,
# FEDERAL SOCIAL BENEFITS
federal_social_benefits = federal_social_benefits_nipa - medicare - state_ui,
state_social_benefits = state_social_benefits + state_ui - medicaid_grants
)
# Forecast ------------------------------------------------------------------------------------
# Consumption Grants --------------------------------------------------------------------------
totals <- function(df, var){
df %>%
as_tibble() %>%
select(date, var) %>%
pivot_wider(-date, names_from = date, values_from = var) %>%
summarise(sum(c_across(everything()))) %>%
pull()
}
cbo_education <- 4 * 30 # Annualize
education_total_disbursed <-
usna %>%
totals(var = 'education_stabilization_fund')
education_total_remaining <-
cbo_education - education_total_disbursed
mpc_education_stabilization_fund <- function(x, n){
mpc <- 1
weights <- c(rep(1 / n, n))
x <- x * weights
return(x)
}
x <- mpc_education_stabilization_fund(education_total_remaining, n = 4)
df <-
tibble(
date = seq(yearquarter('2021 Q1'), length.out = 4, by = 1),
x = x
)
usna %<>%
as_tsibble(index = date) %>%
tsibble::append_row(n = 12) %>%
left_join(projections %>% mutate(date = yearquarter(date)) %>%
select(date, gfeg, gfeghdx)) %>%
mutate(across(where(is.numeric), ~ replace_na(., 0)))
usna %>%
mutate(education_stabilization_fund =
case_when(
date < yearquarter('2021 Q1') ~ education_stabilization_fund,
date >= yearquarter('2021 Q1') & date <= yearquarter('2022 Q2') ~ education_total_remaining
))
# State Health --------------------------------------------------------------------------------
cbo_state_health <-
tibble(
fy = as.numeric(c(2020:2022)),
cbo_state_health = c(657, 726, 727),
growth_rate = ( cbo_state_health / lag(cbo_state_health))
)
cbo_state_health_growth <-
cbo_state_health %>%
filter(fy == 2021) %>%
pull(growth_rate)
forecasts <-
tibble(
date = seq(yearquarter("2021 Q1"), length.out = 12, by = 1), # by 1 quarter
growth = case_when(
date == yearquarter('2021 Q1') ~ cbo_state_health_growth^0.18,
date == yearquarter('2021 Q2') ~ cbo_state_health_growth^0.1,
date >= yearquarter('2021 Q3') ~ NaN
)
) %>%
mutate(growth = zoo::na.locf(growth))
# Total Medicaid ------------------------------------------------------------------------------
usna %>%
full_join(forecasts, by = 'date') %>%
mutate(
medicaid_total = if_else(
date < yearquarter('2021 Q1'),
medicaid_total,
lag(medicaid_total) * growth
),
medicaid_total = if_else(
date == yearquarter('2021 Q2'),
lag(medicaid_total) * growth,
medicaid_total
),
medicaid_total = zoo::na.locf(medicaid_total),
medicaid_grants = case_when(date < yearquarter('2021 Q1') ~ medicaid_grants,
date >= yearquarter('2021 Q1') & date <= yearquarter('2022 Q1') ~ medicaid_total * 0.74,
date > yearquarter('2021 Q1') ~ medicaid_total * 0.68),
state_health_outlays = medicaid_total - medicaid_grants,
federal_health_outlays = medicaid_grants + medicare
)
# Misc ----------------------------------------------------------------------------------------
fill_in <- function(prev, new, growth = 0.03) {
if_else(!is.na(new), new, prev * (1 + growth))
}
# fim_state_social_benefits = (nipa - medicare - ui - rebate_checks - nonprofit_subsidies) + rebate_checks + nonprofit_subsidies + federal_ui
# = nipa - medicare + (federal_ui - ui)
# = nipa - medicare - state_ui
mpc_covid_relief_fund <-
function(x){
mpc <- 1
weights <- c(0.0583, 0.075, rep(0.1, 2), rep(0.08333, 8))
mpc * roll::roll_sum(x, width = length(weights),
weights = rev(weights), online = FALSE)
}
# Federal health
# State health
# Subsidies
# Grants
# Basically, to get the FIM social benefits we subtract medicare from NIPA social benefits
# and then we add the difference between their ui and ours
#
# fim ex everything = social benefits -
|
1666c2fdea915ec2794d7b1a9bd00c74f7e697ce | 5bc5c405645704799c0708c86f086bc3c7225e5b | /inferMSI/R/inferMSI.R | 21ac268c88c301855b6cadf5d470ab5402e59f6a | [
"BSD-3-Clause"
] | permissive | johnsonra/inferMSI | 66696964d1dd3e43ea241646e839d0e9ca99d7c5 | 92cec71182d7e8b627b5981be0a8165a442f435d | refs/heads/master | 2021-05-01T01:25:43.691917 | 2015-10-21T21:17:33 | 2015-10-21T21:17:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,128 | r | inferMSI.R | # inferMSI.R
# Call MSI status given a model from trainMSI
# Randy Johnson
# CCR Collaborative Bioinformatics Resource at Frederick National Laboratory
# Leidos Biomedical Research, Inc
inferMSI <- function(file, model, cut.off)
{
######### Checks #########
# ... should add some checks here to be sure variables make sense ... assuming they do for now
######### Read .repeatseq files #########
repeatseq <- lapply(file, read.repeatseq, model$markers)
######### Score samples #########
scores <- matrix(NA, nrow = length(repeatseq), ncol = length(model$normal),
dimnames = list(names(repeatseq), names(model$normal)))
for(i in 1:length(repeatseq))
{
for(j in names(model$normal))
{
tmp <- repeatseq[[i]][[j]]$alleles
matches <- names(tmp) %in% names(model$normal[[j]])
tmp[matches] <- abs(tmp[matches] - model$normal[[j]][names(tmp)[matches]])
scores[i,j] <- sum(tmp)
}
}
######### fill in missing data #########
# if there are only a few (or no) observed values, fill in NAs with average score among non-msi samples
nunique <- apply(scores, 2, unique) %>%
lapply(na.omit) %>%
sapply(length)
for(j in which(nunique < 5))
{
if(names(nunique)[j] %in% names(model$pred))
scores[is.na(scores[,j]),j] <- model$meanScore[names(nunique)[j]]
}
# among remaining NAs, impute
scores <- as.matrix(
missForest(
as.data.frame(
scores[,names(model$pred)[-1]]
)
)$ximp
)
######### Infer MSI status #########
predictions <- cbind(1, scores[,]) %*% model$pred %>%
inv.logit()
######### Return results #########
retval <- data.frame(file = file,
# msi = predictions > cut.off,
score = predictions)
return(retval)
}
|
0c0d5a7005bbc27672536d3ad7274d01ea2a8ff0 | 92f4348e1c1e5556d4789d9aa7beadbfee83a277 | /tests/testthat/test-format.R | 23ba4ae9b7db83341b82bcf63e67a5d76a99a741 | [] | no_license | echasnovski/snookerorg | 84d7175b1fee2ba36e63b842a688b47d75256ed2 | a46e8defe49a635994271db2484e5812aa389ab3 | refs/heads/master | 2021-06-15T01:01:14.179950 | 2017-03-25T13:54:08 | 2017-03-25T13:54:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 769 | r | test-format.R | library(testthat)
context("Format")
test_that("formatting is correct", {
expect_identical(
format_event(
event_query = readRDS(file = "test_query_season_events.rds")
),
readRDS(file = "test_format_season_events.rds")
)
expect_identical(
format_match(
match_query = readRDS(file = "test_query_event_matches.rds")
),
readRDS(file = "test_format_event_matches.rds")
)
expect_identical(
format_player(
player_query = readRDS(file = "test_query_season_ama_players.rds")
),
readRDS(file = "test_format_season_ama_players.rds")
)
expect_identical(
format_ranking(
ranking_query = readRDS(file = "test_query_season_rankings.rds")
),
readRDS(file = "test_format_season_rankings.rds")
)
})
|
3abffb9921b6d14d6b74782606e1dabfecdab542 | 5ada02d575f293314ca2eae8e0f056692c128c4d | /R/prog.R | b8626600d74fe40a83adf02ad32874bbc2a761a9 | [] | no_license | Kaustuv2809/kysrc | 963c4276d17514181be59e1b3fbaed75b9695971 | 3435b0ce972f85058ad0a5674acc142ff6a70379 | refs/heads/master | 2021-10-23T20:33:32.691242 | 2019-03-19T21:29:43 | 2019-03-19T21:29:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,713 | r | prog.R | #' Limited English Proficiency Data for Kentucky Public Schools, 2013-2017
#'
#' A dataset with data on Limited English Proficiency
#'
#' @format A dataframe with 75 rows and 6 variables:
#' \describe{
#' \item{sch_id}{ID number to identify schools & districts}
#' \item{dist_name}{District name - in this case, it is always "State"}
#' \item{year}{School year}
#' \item{student_group}{Student subgroup}
#' \item{lep_total}{Students with Limited English Proficiency}
#' \item{lep_pct}{Percent of students with Limited English Proficiency}
#' }
#'
#' @source \url{https://applications.education.ky.gov/src/DataSets.aspx}
"lep_state"
#' Limited English Proficiency Data for Kentucky Public Schools, 2013-2017
#'
#' A dataset with data on Limited English Proficiency
#'
#' @format A dataframe with 11,115 rows and 6 variables:
#' \describe{
#' \item{sch_id}{ID number to identify schools & districts}
#' \item{dist_name}{District name}
#' \item{year}{School year}
#' \item{student_group}{Student subgroup}
#' \item{lep_total}{Students with Limited English Proficiency}
#' \item{lep_pct}{Percent of students with Limited English Proficiency}
#' }
#'
#' @source \url{https://applications.education.ky.gov/src/DataSets.aspx}
"lep_dist"
#' Limited English Proficiency Data for Kentucky Public Schools, 2013-2017
#'
#' A dataset with data on Limited English Proficiency
#'
#' @format A dataframe with 72,720 rows and 7 variables:
#' \describe{
#' \item{sch_id}{ID number to identify schools & districts}
#' \item{dist_name}{District name}
#' \item{sch_name}{School name}
#' \item{year}{School year}
#' \item{student_group}{Student subgroup}
#' \item{lep_total}{Students with Limited English Proficiency}
#' \item{lep_pct}{Percent of students with Limited English Proficiency}
#' }
#'
#' @source \url{https://applications.education.ky.gov/src/DataSets.aspx}
"lep_sch"
#' Data on students with IEP's in Kentucky Public Schools, 2013-2017
#'
#' A dataset with data on students with Individualized Educational Plans
#'
#' @format A dataframe with 96 rows and 6 variables:
#' \describe{
#' \item{sch_id}{ID number to identify schools & districts}
#' \item{dist_name}{District name - in this case, it is always "State"}
#' \item{year}{School year}
#' \item{student_group}{Student subgroup}
#' \item{iep_total}{Students with IEP's}
#' \item{iep_pct}{Percent of students with IEP's}
#' }
#'
#' @source \url{https://applications.education.ky.gov/src/DataSets.aspx}
"iep_state"
#' Data on students with IEP's in Kentucky Public Schools, 2013-2017
#'
#' A dataset with data on students with Individualized Educational Plans
#'
#' @format A dataframe with 16,824 rows and 6 variables:
#' \describe{
#' \item{sch_id}{ID number to identify schools & districts}
#' \item{dist_name}{District name}
#' \item{year}{School year}
#' \item{student_group}{Student subgroup}
#' \item{iep_total}{Students with IEP's}
#' \item{iep_pct}{Percent of students with IEP's}
#' }
#'
#' @source \url{https://applications.education.ky.gov/src/DataSets.aspx}
"lep_dist"
#' Data on students with IEP's in Kentucky Public Schools, 2013-2017
#'
#' A dataset with data on students with Individualized Educational Plans
#'
#' @format A dataframe with 129,480 rows and 7 variables:
#' \describe{
#' \item{sch_id}{ID number to identify schools & districts}
#' \item{dist_name}{District name}
#' \item{sch_name}{School name}
#' \item{year}{School year}
#' \item{student_group}{Student subgroup}
#' \item{iep_total}{Students with IEP's}
#' \item{iep_pct}{Percent of students with IEP's}
#' }
#'
#' @source \url{https://applications.education.ky.gov/src/DataSets.aspx}
"lep_sch"
|
c219488a1751d09d86f1f5df3b4b27b7136404f7 | 637a50fa3069b8fc142b1dae6d3641d56a167f87 | /scripts/r/z_analysis.R | 211ceb728cc1dab85bc9e2ee7e1c3ce8fe0b08e6 | [] | no_license | hemprichbennett/bat_diet_ms | 98051e2edf5fb1c171936108f1465a405f02e7a5 | 78d2c8c451521ea7cd5bcbae88ddcb58f03b8877 | refs/heads/main | 2023-04-18T08:10:00.274271 | 2021-08-23T15:20:46 | 2021-08-23T15:20:46 | 399,124,959 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,985 | r | z_analysis.R | #### Header ####
## Project: bat diet
## Script purpose: calculating z-scores from the random files created by
## array_site_motu_95_confidence_intervals
## Date: 2021-01-06
## Author: Dave Hemprich-Bennett (hemprich.bennett@gmail.com)
## Notes
##################################################
#### Setup ####
# Prevent partial-match errors
options(warnPartialMatchArgs = TRUE)
library(tidyverse)
# Load and format the random data -----------------------------------------
rand_filenames <- list.files('data/output_data/null_values/',
full.names = T)
rand_vals <- lapply(rand_filenames, read_csv) %>%
bind_rows()
# Get the real values -----------------------------------------------
source('scripts/r/r_network_gen.r')
inpath <- 'data/processed_dna_data/lulu/'
filenames <- list.files(pattern = '.csv', path = inpath)
filenames
filenames <- paste(inpath, filenames, sep = '')
rawnets <- lapply(filenames, read.csv, header = F, stringsAsFactors = F, row.names=1)
names(rawnets) <- gsub('.*\\/', '', filenames)
names(rawnets) <- gsub('_.+', '', names(rawnets))
for(i in 1:length(rawnets)){
rawnets[[i]][2:nrow(rawnets[[i]]),] <- ifelse(rawnets[[i]][2:nrow(rawnets[[i]]), ] == 0, 0, 1)
}
netlists <- lapply(rawnets, function(x) r_network_gen(input= x, collapse_species = T, filter_species = T))
names(netlists) <- names(rawnets)
actual_vals_list <- list()
z <- 1
for(ind in unique(rand_vals$metric_used)){
for(i in 1:length(netlists)){
vals_list <- lapply(netlists[[i]], function(x) networklevel(x, index = ind, level = 'higher'))
out_df <- bind_rows(vals_list) %>%
rename_at(1, ~"actual" ) %>%
mutate(site = names(vals_list),
metric_used = ind,
clustering_level = names(netlists)[i]) %>%
mutate(clustering_level = as.numeric(clustering_level))
actual_vals_list[[z]] <- out_df
z <- z + 1
}
}
actual_vals <- bind_rows(actual_vals_list)
# Calculate 'z-scores' ----------------------------------------------------
summary_vals <- rand_vals %>%
group_by(site, metric_used, clustering_level, fixedmargins) %>%
summarise(mean_rand = mean(metric_value),
sd_rand = sd(metric_value))
z_vals <- summary_vals %>%
ungroup() %>%
# combine the datasets
left_join(actual_vals) %>%
# calculate the 'z' score
mutate(z = (actual - mean_rand)/sd_rand) %>%
# make some of the variables prettier for the outputs
mutate(site = gsub('DANUM', 'Danum', site),
site = gsub('MALIAU', 'Maliau', site),
metric_used = gsub('functional complementarity', 'Functional complementarity',
metric_used),
metric_used = gsub('weighted NODF', 'WNODF', metric_used))
# write a csv of all of the values in their raw form
write_csv(z_vals, 'results/z_scores/all_z_scores.csv')
# then make a csv for each metric, with the values given to 3 decimal places
z_vals %>%
select(-mean_rand, -sd_rand) %>%
mutate(actual = round(actual, digits = 2),
z = round(z, digits = 3),
Treatment =ifelse(site == 'SAFE', 'Logged', 'Old growth')) %>%
rename(Site = site, Metric = metric_used,
`Clustering threshold` = clustering_level,
`Observed Value` = actual) %>%
group_by(Metric) %>%
do(write_csv(., paste0('results/z_scores/', unique(.$Metric), "_zscores.csv")))
# Finally, one table to rule them all...
met_list <- list()
cols <- c('actual', 'z')
for(met in unique(z_vals$metric_used)){
met_list[[met]] <- z_vals %>%
filter(metric_used == met) %>%
rename_at(cols, list( ~paste( ., met, sep = '_') ) ) %>%
select(-metric_used, - mean_rand, - sd_rand)
}
mets_df <- left_join(met_list[[1]], met_list[[2]],
met_list[[3]], by = c('site', 'clustering_level'))
net_list <- list()
cols <- c('actual', 'both', 'columns')
for(net in unique(z_vals$site)){
net_list[[net]] <- z_vals %>%
filter(site == net) %>%
select(-site, - mean_rand, - sd_rand) %>%
mutate(actual = round(actual, digits = 3),
z = round(z, digits = 3)) %>%
pivot_wider(names_from = fixedmargins, values_from = z) %>%
rename_at(cols, list( ~paste( ., net, sep = '_') ) )
}
nets_df <- left_join(net_list[[1]], net_list[[2]],
by = c('metric_used', 'clustering_level')) %>%
left_join(net_list[[3]], by = c('metric_used', 'clustering_level'))
write_csv(nets_df, path = 'results/z_scores/grouped_by_metric.csv')
# Plot --------------------------------------------------------------------
ggplot(rand_vals, aes(x = clustering_level, y = metric_value)) +
geom_violin() +
facet_grid(metric_used ~ site, scales = 'free') +
theme_classic()
for_plot <- z_vals %>%
pivot_longer(cols = c('actual', 'z'), names_to = 'var_type',
values_to = 'var_value') %>%
mutate(metric = gsub('Functional complementarity',
'Functional\ncomplementarity', metric_used))
actual_points <- ggplot(filter(for_plot, var_type == 'actual'),
aes(x = clustering_level, y = var_value, colour = site)) +
geom_point() +
scale_colour_viridis_d()+
scale_x_continuous(breaks = c(91:98))+
facet_wrap(. ~ metric_used, scales = 'free_y', ncol = 1,
# sort the facet label placement
strip.position = 'left') +
theme_bw() +
theme(legend.position = 'none',
# sort the facet label placement
strip.placement = "outside",
strip.text.y = element_text(size = 8),
#axis.title.y=element_blank(),
axis.title.x=element_blank())+
ylab('Observed value')
actual_points
z_points <- ggplot(filter(for_plot, var_type == 'z'),
aes(x = clustering_level, y = var_value,
colour = site)) +
geom_point(size = 2) +
scale_colour_viridis_d()+
scale_x_continuous(breaks = c(91:98))+
facet_wrap(. ~ metric_used, ncol = 1) +
theme_bw() +
theme(legend.position = 'none',
axis.title.x=element_blank(),
# remove facet label
strip.text = element_blank(),
#axis.title.y=element_blank()
) +
ylab('Z-value')
z_points
# Function for extracting the legend
g_legend <- function(a.gplot) {
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)
}
legend <- ggplot(for_plot,
aes(x = clustering_level, y = var_value, colour = site)) +
geom_point() +
scale_colour_viridis_d(name = 'Network')+
theme_bw()+
theme(legend.position = 'bottom')
legend <- g_legend(legend)
pdf('plots/z_plot_unedited.pdf')
grid_plot <- gridExtra::grid.arrange(actual_points, z_points,
grid::textGrob('Clustering threshold'),
legend,
layout_matrix = rbind(c(1,2),
c(3,3),
c(4,4)),
heights = unit(c(3.3, 0.3, 0.3), c('in', 'in', 'in')))
dev.off()
# Alternative z and observed plot -----------------------------------------
for_alt_z_plot <- z_vals %>%
#select only necessary columns
select(-mean_rand, -sd_rand) %>%
# start making it longer for ggplot
pivot_longer(cols = c('actual', 'z'), names_to = 'plotting_var_name',
values_to = 'plotting_value') %>%
unite(column_category, fixedmargins, plotting_var_name, sep = '_') %>%
# this has given us duplicates for the 'actual' value, as we have 'both_actual'
# and 'columns_actual', both of which are crap and unnecessary. If we strip
# the text before the underscore, we can then remove duplicate rows easily,
# as the duplicate rows will then be identical
mutate(column_category = gsub('.+_actual', 'actual', column_category)) %>%
distinct()
ggplot(for_alt_z_plot, aes(x = column_category, y = clustering_level))+
geom_tile(aes(fill = plotting_value))+
geom_text(aes(label = round(plotting_value, 3))) +
facet_grid(site ~ metric_used)
# Now for the ranges plot -------------------------------------------------
vals_for_range_plot <- rand_vals %>%
# group by relevant variables before we calculate the quantiles
group_by(metric_used, clustering_level, site, fixedmargins) %>%
# calculate the quantiles of the random values
summarise(min_quantile = quantile(x = metric_value, probs = 0.025),
max_quantile = quantile(x = metric_value, probs = 0.975)) %>%
# now add the actual observed values to the tibble
left_join(actual_vals) %>%
# we only want to plot the dots if they fall outside of the random ranges,
# so we need to make a column saying if they fall within the range or not
mutate(to_plot = ifelse(actual >= min_quantile &
actual <= max_quantile,
F, T),
# now we need a column where the observed value is only present if it's
# outside the ranges
for_plotting = ifelse(to_plot == T, actual, NA)) %>%
# make the names of the variables nicer
mutate(site = gsub('DANUM', 'Danum', site),
site = gsub('MALIAU', 'Maliau', site),
metric_used = gsub('discrepancy', 'Discrepancy', metric_used),
metric_used = gsub('weighted NODF', 'Weighted NODF', metric_used),
metric_used = gsub('functional complementarity', 'Functional Complementarity', metric_used),
fixedmargins = gsub('both', 'Both margin\nsums retained', fixedmargins),
fixedmargins = gsub('columns', 'Column\nsums retained', fixedmargins))
# palette to use
cbPalette <- c("#5a3fa8",
"#d4c200",
"#a4c7ff")
# pasted in from other script, needs editing!
#plot
metrics_facet <- ggplot(vals_for_range_plot, aes(y =clustering_level, colour = site))+
geom_errorbarh(aes(xmin=min_quantile, xmax=max_quantile, colour = site),
height = 0.4, alpha = 0.8, show.legend = F)+
geom_point(aes(y = clustering_level, x = for_plotting, size = 1.4))+
facet_grid(fixedmargins ~ metric_used, scales = 'free_x'#, ncol = 2
)+
scale_colour_manual(values=cbPalette, name = 'Observed network\nvalue')+
theme_bw()+
theme(panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),
strip.placement = "outside",
strip.background =element_rect(fill="white", colour = 'white'),
text = element_text(size=20),
legend.position="bottom")+
labs(y = 'Clustering %', x = NULL) +
# tell ggplot to plot the colour in the legend but not the size
guides(colour = "legend", size = "none") +
# make the points in the legend bigger
guides(colour = guide_legend(override.aes = list(size=4.5)))
metrics_facet
ggsave('plots/randomized_ranges/both_nullmodels.pdf', metrics_facet, width = 14)
|
1ebecc8ad79ebedbf83490839b8ee525343a2d2e | 77d97516bf14d24a1cb4ee0273d30711948e2ae0 | /course-project-2.R | d5b094b5fa7c1ac653ed0a3c734a2429dd54baa9 | [] | no_license | nshahpazov/probability-and-statistics-and-a-bit-of-markov-chains | 74a4daab7b5996899e45dc84c9c6a122661f29e8 | 56c7dcb7d231d4bd905765a2ef1696bf22d886b1 | refs/heads/master | 2022-12-28T02:40:38.283618 | 2020-10-16T18:23:51 | 2020-10-16T18:23:51 | 304,707,006 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,241 | r | course-project-2.R | y = c(10, 6, 5, 12, 10, 15, 5, 12, 17, 20)
x = c(1.30, 2, 1.7, 1.5, 1.6, 1.2, 1.6, 1.4, 1, 1.10)
plot(x, y)
predicted_error = serror1 * sqrt(1.1 + ((1.63-mean(x))^2)/(sum((x-mean(x))^2)))
x1 = c(1.63)
X[,2]
pred1.2 = serror1 *
sqrt(1.1 +
t(x1-mean(X[,2])) %*%
solve(
t(X[,2]-mean(X[,2])) %*% (X[,2]-mean(X[,2]))
)
%*% (x1-mean(X[,2])))
8.4398 + 2.306004 * predicted_error
X2 = matrix(c(
1, 1, 1, 1, 1, 1, 1,
-3, -2, -1, 0, 1, 2, 3,
5, 0, -3, -4, -3, 0, 5,
-1, 1, 1, 0, -1, -1, 1),
nrow=7, ncol=4)
lm(y ~ X2[,-1])
bhats = solve(t(X2) %*% X2) %*% t(X2) %*% y
X2 %*% betas2
serror2 = deviance(lm(y2 ~ X2[,-1])) / 3
means = c(mean(X2[,2]), mean(X2[,3]), mean(X2[,4]))
pred2 = serror2 *
sqrt(1 + 1/7 + t(X2[5,-1]) %*% solve(t(X2[,-1]) %*% X2[,-1]) %*% X2[5,-1])
# testing the hypothesis that beta1 = beta2 = 0
A = matrix(c(0,1,0,0,0,0,1,0), nrow=2, ncol=4, byrow = TRUE)
c = c(0,0)
Abhat = A %*% bhats
T = solve(A %*% solve(t(X2) %*% X2) %*% t(A))
F = (t((Abhat)) %*% T %*% Abhat)/2*serror2
csv
mpg = Auto[,1]
dm = data.matrix(Auto[,-c(1,8,9)])
lambda = 5
design = cbind(intercept=1, dm)
bhats = solve(t(X2) %*% X2 + lambda * diag(ncol(design))) %*% t(X2) %*% mpg
X2 %*% bhats
|
a626652153a2ea75fb29f3d682f52a49b2678278 | 4624804dd0051a24e0615b3ce58e3632392d9011 | /src/Multiple-Linear-Regression.R | cb37961faede72548faa2207b8a6b6bd34e50f20 | [] | no_license | hornedfrog88/csx415-project | d81d50325c912917ac8882266301e0271b4aad94 | 1c4a7ea19388e5cce0cc680e4549035b434dcdc8 | refs/heads/master | 2020-03-09T12:00:29.261416 | 2018-05-31T01:06:17 | 2018-05-31T01:06:17 | 128,775,027 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 816 | r | Multiple-Linear-Regression.R | #Multiple Linear Regression Model
#Plot some data
plot_linear <- ggplot(final_training_set,aes(TotalCapacity,EnrlTotal,color = CourseID)) +
geom_point()
plot_linear
#Fit the model
l_fit <- lm(EnrlTotal ~ TotalCapacity +
TotalSections + CourseID + Term, data = final_training_set)
summary(l_fit)
#Run the predict function on the final test dataset and analyze the results by calculating the Mean Absolute Error of the prediction.
predict_linear <- predict(l_fit, final_testing_set)
MAE_linear <- MAE(final_testing_set$EnrlTotal,predict_linear)
RMSE_linear <- RMSE(final_testing_set$EnrlTotal,predict_linear)
print(paste("The Mean Absolute Error of the Prediction is", round(MAE_linear, digits = 2)))
print(paste("The Root Mean Squared Error of the Prediction is", round(RMSE_linear, digits = 2)))
|
ef978efe72872658b69c109d423a4c4c25dd92cc | bd7a27bbf96451f94d0cf3e094f5aba70c153e3e | /src/3_plotting.R | 9a1948aef57915a1023cfc19b5ceb7b0e99a2a01 | [] | no_license | KumarNarendra0619/COVIDDemographyUK | 79ff09b8edd11dcd26eca6fe8641d88b15a3870c | a3749ad7fdd6a3011ecdd0271bb05bcb39678e75 | refs/heads/master | 2023-03-18T08:51:53.041127 | 2020-05-31T08:03:42 | 2020-05-31T08:03:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 22,317 | r | 3_plotting.R | #===============================================================================
# 2020-03-20 -- covid19-dem
# Produce plots for the paper
# Ilya Kashnitsky, ilya.kashnitsky@gmail.com
# Review: Mark Verhagen
#===============================================================================
library(tidyverse)
library(magrittr)
library(sf)
library(patchwork)
library(biscale)
# theming packages
library(hrbrthemes)
library(cowplot)
library(showtext)
library(ggplot2)
library(ggthemes)
library(leaflet)
font_add_google("Roboto Condensed", "Roboto Condensed")
font_add_google("Roboto Slab", "Roboto Slab")
showtext::showtext_auto()
dir.create("figs_final")
# define own theme
own_theme <- cowplot::theme_map(font_size = 14, font_family = font_rc)+
ggplot2::theme(
legend.position = c(.1, .75),
plot.title = element_blank()
)
bi_theme <- cowplot::theme_map(font_size = 14, font_family = font_rc)+
ggplot2::theme(
legend.position = "none",
plot.title = element_blank()
)
own_plot_grid <- function(a, b, ...) {
require(patchwork)
(a + b) +
plot_layout(ncol = 2)+
plot_annotation(
tag_levels = "A",
theme = theme(
plot.title = element_blank(),
plot.caption = element_text(family = font_rc, size = 12)
)
)&
theme(plot.tag = element_text(family = "Roboto Slab", size = 24, face = 2))
}
# load the prepared data
load("data/ready.rda")
load("data/final_eco.rda")
# Fig 1 ------------------------------------------------------------
## ------ Capacity plots ------ ##
# general hospital bed capacity, regional level
agg_region_s %>%
ggplot() +
geom_sf(aes(fill = pc_capacity), color = NA)+
geom_sf(data = agg_region_b, size = .5, color = "#fafafa")+
geom_sf(
data = cities, size = 2, shape = 21, stroke = 1,
color = "#444444", fill = "#ffffff"
)+
geom_sf_text(
aes(label = pc_capacity %>% round(1)),
size = 5, color = "#333333",
family = font_rc, fontface = 2,
nudge_y = -2e4
)+
coord_sf(datum = NA)+
scale_fill_fermenter(
"Beds per\n1,000",
palette = 'Blues', direction = 1,
breaks = seq(1.6, 2.2, .2)
) +
own_theme
region_general_capacity <- last_plot()
# acute hospital bed capacity, regional level
agg_region_s %>%
ggplot() +
geom_sf(aes(fill = pc_capacity_acute), color = NA)+
geom_sf(data = agg_region_b, size = .5, color = "#fafafa")+
geom_sf(
data = cities, size = 2, shape = 21, stroke = 1,
color = "#444444", fill = "#ffffff"
)+
geom_sf_text(
aes(label = pc_capacity_acute %>% round(2)),
size = 5, color = "#333333",
family = font_rc, fontface = 2,
nudge_y = -2e4
)+
coord_sf(datum = NA)+
scale_fill_fermenter(
"Beds per\n1,000",
palette = 'Reds', direction = 1,
breaks = seq(.06, .12, .02)
) +
own_theme
region_acute_capacity <- last_plot()
# one
fig_01 <- own_plot_grid(
region_general_capacity,
region_acute_capacity
)
ggsave(filename = "figs_final/fig-01.svg",
fig_01,
width = 10, height = 7)
# Fig 2 -------------------------------------------------------------------
## ------ Expected Hospitalisation plots ------ ##
# Expected Hospitalisation general care, ccounty level
agg_ccounty_s %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = pc_hosp), color = NA)+
geom_sf(data = agg_ccounty_b, size = .25, color = "#fafafa")+
geom_sf(data = agg_region_b, size = 1, color = "#fafafa")+
geom_sf(data = cities, size = 10, shape = 1, stroke = 0.6, color = "#373737")+
geom_sf_text(aes(label = pc_hosp %>% round(1)),
size = 4, color = "#333333",
family = font_rc, fontface = 3)+
coord_sf(datum = NA)+
scale_fill_fermenter(
"Hosp. per\n1,000",
palette = 'YlGnBu', direction = 1
) +
own_theme
ccounty_expected_hosp_demand <- last_plot()
# Expected Hospitalisation acute care, ccounty level
agg_ccounty_s %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = pc_hosp_acute), color = NA)+
geom_sf(data = agg_ccounty_b, size = .25, color = "#fafafa")+
geom_sf(data = agg_region_b, size = 1, color = "#fafafa")+
geom_sf(data = cities, size = 10, shape = 1, stroke = 0.6, color = "#373737")+
geom_sf_text(aes(label = pc_hosp_acute %>% round(1)),
size = 4, color = "#333333",
family = font_rc, fontface = 3)+
coord_sf(datum = NA)+
scale_fill_fermenter(
"Hosp. per\n1,000",
palette = 'RdPu', direction = 1
) +
own_theme
ccounty_expected_hosp_acute_demand <- last_plot()
fig_02 <- own_plot_grid(
ccounty_expected_hosp_demand,
ccounty_expected_hosp_acute_demand
)
ggsave(filename = "figs_final/fig-02.svg",
fig_02,
width = 10, height = 7)
# Fig 3 ------------------------------------------------------------------
## ------ Excess demand per 1,000 plots based on 10% ------ ##
# Excess general care demand given 10% infection rate, ccounty level
agg_ccounty_s %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = abs_excess_demand_hosp), color = NA)+
geom_sf(data = agg_ccounty_b, size = .25, color = "#fafafa")+
geom_sf(data = agg_region_b, size = 1, color = "#fafafa")+
geom_sf(data = cities, size = 10, shape = 1, stroke = 0.6, color = "#373737")+
geom_sf_text(aes(label = abs_excess_demand_hosp %>% round(1)),
size = 4, color = "#333333",
family = font_rc, fontface = 3)+
coord_sf(datum = NA)+
scale_fill_fermenter(
"Excess Need\nper 1,000",
palette = 'PuBuGn', direction = 1
) +
own_theme
ccounty_abs_diff_hosp_demand <- last_plot()
# Excess acute care demand given 10% infection rate, ccounty level
agg_ccounty_s %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = abs_excess_demand_hosp_acute), color = NA)+
geom_sf(data = agg_ccounty_b, size = .25, color = "#fafafa")+
geom_sf(data = agg_region_b, size = 1, color = "#fafafa")+
geom_sf(data = cities, size = 10, shape = 1, stroke = 0.6, color = "#373737")+
geom_sf_text(aes(label = abs_excess_demand_hosp_acute %>% round(1)),
size = 4, color = "#333333",
family = font_rc, fontface = 3)+
coord_sf(datum = NA)+
scale_fill_fermenter(
"Excess Need\nper 1,000",
palette = 'BuPu', direction = 1
) +
own_theme
ccounty_abs_diff_hosp_acute_demand <- last_plot()
fig_03 <- own_plot_grid(
ccounty_abs_diff_hosp_demand,
ccounty_abs_diff_hosp_acute_demand
)
ggsave(filename = "figs_final//fig-03.svg",
fig_03,
width = 10, height = 7)
# Fig 4 -------------------------------------------------------------------
## LSOA expected hospitalization (per 1,000) for 10% infection, London
# generate custom palette
pal <- RColorBrewer::brewer.pal(11, "BrBG")[c(11,3)]
# load highlight data
london_highlight_df <- readRDS("data/london_highlight.rds")
# hack to fix geom_step aligning
repeat_last_obs <- function(df) bind_rows(df, df %>% filter(age == "90+") %>% mutate(age = NULL))
# set labels
age_labels <- c("0-4", "5-9", "10-14", "15-19", "20-24", "25-29", "30-34",
"35-39", "40-44", "45-49", "50-54", "55-59", "60-64", "65-69",
"70-74", "75-79", "80-84", "85-89", "90+", NULL)
# age distribution graph
london_highlight_df %>%
mutate(name = LSOA) %>%
dplyr::select(-3:-1) %>%
pivot_longer(cols = contains("Age_"), names_to = "age") %>%
mutate(
age = age %>%
str_remove("Age_") %>%
str_replace("_plus", "+") %>%
fct_inorder()
) %>%
repeat_last_obs() %>%
ggplot(aes(age, value * 100, color = name, group = name))+
geom_hline(yintercept = 0, color = "#666666", size = .5)+
geom_step(size = 1, position = position_nudge(-.5))+
coord_flip(xlim = c(.5, 19.5), # need to get rig of the hack row, which is 20
ylim = c(26.5, 0),
expand = FALSE)+
scale_x_discrete(position = "top", breaks = age_labels[-20], labels = age_labels)+
scale_y_reverse(position = "right")+
scale_color_manual(NULL, values = pal)+
theme_minimal(base_family = font_rc, base_size = 15)+
theme(legend.position = c(.25, .75))+
labs(y = "Proportion of the population, %",
x = "Age group")
london_highlight <- last_plot()
# Hospitalization risk general care, London graph
agg_lsoa_s_5 %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = pc_hosp), color = NA)+
geom_sf(
data = . %>%
filter(AreaCodes %in% c("E01000969", "E01002225")) %>%
st_centroid(),
aes(color = AreaCodes),
shape = 1, size = 10, stroke = .9
)+
coord_sf(datum = NA)+
scale_size_area("Beds", max_size = 10)+
scale_color_manual(NULL, values = pal, guide = NULL)+
scale_fill_fermenter(
"Hosp. \nper 1,000",
palette = 'PuBuGn', direction = 1
) +
own_theme +
theme(legend.position = c(.05, .15))
london_pc_hosp <- last_plot()
# overlay graphs
fig_04 <- ggdraw()+
draw_plot(london_pc_hosp, x =0, y = 0, width = .8, height = 1)+
draw_plot(london_highlight, x = .67, y = 0, width = .33, height = .5)
ggsave(filename = "figs_final/fig-04.pdf",
fig_04,
width = 10, height = 7)
# Including population-level variables in addition to hospitalization risk
# legends
# Fig 5--------------------------------------------------------------
## -- Population Bivariate Plots -- ##
ccg_depriv_df %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = bi_class), color = NA)+
geom_sf(data = agg_ccounty_b, size = .25, color = "#fafafa")+
geom_sf(data = agg_region_b, size = 1, color = "#fafafa")+
geom_sf(data = cities %>% filter(!name=="Cardiff"),
size = 10, shape = 1, stroke = 0.6, color = "#373737")+
coord_sf(datum = NA)+
biscale::bi_scale_fill(pal = "DkBlue", dim=3) +
bi_theme
ccg_depriv <- last_plot()
ggdraw() +
draw_plot(ccg_depriv, 0, 0, 1, 1) +
draw_plot(legend_depriv, .01, 0.45, .35, .35)
ccg_depriv_f <- last_plot()
ccg_dens_df %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = bi_class), color = NA)+
geom_sf(data = agg_ccounty_b, size = .25, color = "#fafafa")+
geom_sf(data = agg_region_b, size = 1, color = "#fafafa")+
geom_sf(data = cities %>% filter(!name=="Cardiff"),
size = 10, shape = 1, stroke = 0.6, color = "#373737")+
coord_sf(datum = NA)+
biscale::bi_scale_fill(pal = "DkViolet", dim=3) +
bi_theme
ccg_dens <- last_plot()
ggdraw() +
draw_plot(ccg_dens, 0, 0, 1, 1) +
draw_plot(legend_dens, .01, 0.45, .35, .35)
ccg_dens_f <- last_plot()
a <- (ccg_depriv_f + ccg_dens_f) +
plot_layout(ncol = 2)+
plot_annotation(
tag_levels = "A",
theme = theme(
plot.title = element_text(family = "Roboto Slab", size = 20, face = 2),
plot.caption = element_text(family = font_rc, size = 12)
))&
theme(plot.tag = element_text(family = "Roboto Slab", size = 24, face = 2))
ggsave(filename = "figs_final/fig-05.svg",
a,
width = 10, height = 7)
# Fig 6--------------------------------------------------------------
## -- Population Bivariate Plots -- ##
# Zoom-in on London - social deprivation and hospitalization risk
b_londen_depriv %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = bi_class, color = NA)) +
coord_sf(datum = NA)+
scale_size_area("Beds", max_size = 10)+
# geom_sf(data = agg_lsoa_5_b, size = .15, color = "#CAC9C9")+
scale_color_manual(NULL, values = pal, guide = NULL)+
biscale::bi_scale_fill(pal = "DkBlue", dim=3) +
bi_theme
london_depriv <- last_plot()
ggdraw() +
draw_plot(london_depriv, 0, 0, 1, 1) +
draw_plot(legend_depriv, .75, 0.05, .25, .25)
london_depriv_f <- last_plot()
ggsave(filename = "figs_final/fig-06.png",
london_depriv_f,
width = 10, height = 7)
# Fig 7--------------------------------------------------------------
# Zoom-in on London - ethnicity and hospitalization risk
b_londen_eth %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = bi_class, color = NA)) +
coord_sf(datum = NA)+
scale_size_area("Beds", max_size = 10)+
# geom_sf(data = agg_lsoa_5_b, size = .15, color = "#CAC9C9")+
scale_color_manual(NULL, values = pal, guide = NULL)+
biscale::bi_scale_fill(pal = "DkCyan", dim=3) +
bi_theme
london_eth <- last_plot()
ggdraw() +
draw_plot(london_eth, 0, 0, 1, 1) +
draw_plot(legend_eth, .75, 0.05, .25, .25)
london_eth_f <- last_plot()
ggsave(filename = "figs_final/fig-07.png",
london_eth_f,
width = 10, height = 7)
# Fig 8--------------------------------------------------------------
# Zoom-in on Manchester - social deprivation and hospitalization risk
man_highlight_df <- readRDS("data/man_highlight.rds")
man_highlight_df %>%
mutate(name = LSOA) %>%
dplyr::select(-3:-1) %>%
pivot_longer(cols = contains("Age_"), names_to = "age") %>%
mutate(
age = age %>%
str_remove("Age_") %>%
str_replace("_plus", "+") %>%
fct_inorder()
) %>%
repeat_last_obs() %>%
ggplot(aes(age, value * 100, color = name, group = name))+
geom_hline(yintercept = 0, color = "#666666", size = .5)+
geom_step(size = 1, position = position_nudge(-.5))+
coord_flip(xlim = c(.5, 19.5), # need to get rig of the hack row, which is 20
ylim = c(26.5, 0),
expand = FALSE)+
scale_x_discrete(position = "top", breaks = age_labels[-20], labels = age_labels)+
scale_y_reverse(position = "right")+
scale_color_manual(NULL, values = pal)+
theme_minimal(base_family = font_rc, base_size = 15)+
theme(legend.position = c(.25, .75))+
labs(y = "Proportion of the population, %",
x = "Age group")
man_highlight <- last_plot()
b_man <- agg_lsoa_man %>%
left_join(LSOA_eco_vars)
b_man_depriv <- biscale::bi_class(b_man, x=pc_hosp, y=depriv)
b_man_depriv %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = bi_class, color = NA)) +
coord_sf(datum = NA)+
geom_sf(
data = . %>%
filter(AreaCodes %in% c("E01005654", "E01006084")) %>%
st_centroid(),
aes(color = AreaCodes),
shape = 1, size = 10, stroke = .9
)+
scale_size_area("Beds", max_size = 10)+
# geom_sf(data = agg_lsoa_5_b, size = .15, color = "#CAC9C9")+
scale_color_manual(NULL, values = pal, guide = NULL)+
biscale::bi_scale_fill(pal = "DkBlue", dim=3) +
bi_theme
man_depriv <- last_plot()
ggdraw() +
draw_plot(man_depriv, x=0, y=0.1, width=.75, height=0.9) +
draw_plot(legend_depriv, .05, 0.05, .25, .25)+
draw_plot(man_highlight, x = 0.7, y = 0, width = .33, height = .42)
man_depriv_f <- last_plot()
ggsave(filename = "figs_final/fig-08.pdf",
man_depriv_f,
width = 10, height = 7)
### ---------- SUPPLEMENTARY GRAPHS ---------- ###
# Fig S1 -------------------------------------------------------------------
## ------ Capacity plots ------ ##
# general hospital bed capacity, ccounty level
agg_ccounty_s %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = pc_capacity), color = NA)+
geom_sf(data = agg_ccounty_b, size = .25, color = "#fafafa")+
geom_sf(data = agg_region_b, size = 1, color = "#fafafa")+
geom_sf(data = cities,
size = 10, shape = 1, stroke = 0.6, color = "#373737")+
geom_sf_text(aes(label = pc_capacity %>% round(1)),
size = 4, color = "#333333",
family = font_rc, fontface = 3)+
coord_sf(datum = NA)+
scale_fill_fermenter(
"Beds per\n1,000",
palette = 'Blues', direction = 1
) +
own_theme
ccounty_general_capacity <- last_plot()
# acute hospital bed capacity, ccounty level
agg_ccounty_s %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = pc_capacity_acute), color = NA)+
geom_sf(data = agg_ccounty_b, size = .25, color = "#fafafa")+
geom_sf(data = agg_region_b, size = 1, color = "#fafafa")+
geom_sf(data = cities, size = 10, shape = 1, stroke = 0.6, color = "#373737")+
geom_sf_text(aes(label = pc_capacity_acute %>%
round(2) %>% str_replace("0.", ".")),
size = 4, color = "#333333",
family = font_rc, fontface = 3)+
coord_sf(datum = NA)+
scale_fill_fermenter(
"Beds per\n1,000",
palette = 'Reds', direction = 1
) +
own_theme
ccounty_acute_capacity <- last_plot()
# s01
fig_s_01 <- own_plot_grid(
ccounty_general_capacity,
ccounty_acute_capacity
)
ggsave(filename = "figs_final/fig-s01.svg",
fig_s_01,
width = 10, height = 7)
# Fig s02 -------------------------------------------------------------------
## ------ Capacity plots ------ ##
# general hospital bed capacity, CCG level
agg_ccg_s %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = pc_capacity), color = NA)+
geom_sf(data = agg_ccounty_b, size = .25, color = "#fafafa")+
geom_sf(data = agg_region_b, size = 1, color = "#fafafa")+
geom_sf(data = cities %>% filter(!name=="Cardiff"),
size = 10, shape = 1, stroke = 0.6, color = "#373737")+
coord_sf(datum = NA)+
scale_fill_fermenter(
"Beds per\n1,000",
breaks = c(1e-8, .5, 1, 2, 5),
labels = c(0, .5, 1, 2, 5),
palette = 'Blues', direction = 1
) +
own_theme
ccg_general_capacity <- last_plot()
# acute hospital bed capacity, CCG level
agg_ccg_s %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = pc_capacity_acute), color = NA)+
geom_sf(data = agg_ccounty_b, size = .25, color = "#fafafa")+
geom_sf(data = agg_region_b, size = 1, color = "#fafafa")+
geom_sf(data = cities %>% filter(!name=="Cardiff"),
size = 10, shape = 1, stroke = 0.6, color = "#373737")+
coord_sf(datum = NA)+
scale_fill_fermenter(
"Beds per\n1,000",
breaks = c(1e-8, .1, .25, .5, 1),
labels = c(0, .1, .25, .5, 1),
palette = 'Reds', direction = 1
) +
own_theme
ccg_acute_capacity <- last_plot()
fig_s02 <- own_plot_grid(
ccg_general_capacity,
ccg_acute_capacity
)
ggsave(filename = "figs_final/fig-s02.svg",
fig_s02,
width = 10, height = 7)
# Fig s03 -------------------------------------------------------------------
## ------ Expected Hospitalisation plots ------ ##
# Expected general care hospitalisation, ccg level
agg_ccg_s %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = pc_hosp), color = NA)+
geom_sf(data = agg_ccounty_b, size = .25, color = "#fafafa")+
geom_sf(data = agg_region_b, size = 1, color = "#fafafa")+
geom_sf(data = cities %>% filter(!name=="Cardiff"),
size = 10, shape = 1, stroke = 0.6, color = "#373737")+
coord_sf(datum = NA)+
scale_fill_fermenter(
"Hosp. per\n1,000",
palette = 'YlGnBu', direction = 1
) +
own_theme
ccg_expected_hosp_demand <- last_plot()
# Expected acute care hospitalisation, ccg level
agg_ccg_s %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = pc_hosp_acute), color = NA)+
geom_sf(data = agg_ccounty_b, size = .25, color = "#fafafa")+
geom_sf(data = agg_region_b, size = 1, color = "#fafafa")+
geom_sf(data = cities %>% filter(!name=="Cardiff"),
size = 10, shape = 1, stroke = 0.6, color = "#373737")+
coord_sf(datum = NA)+
scale_fill_fermenter(
"Hosp. per\n1,000",
palette = 'RdPu', direction = 1
) +
own_theme
ccg_expected_hosp_acute_demand <- last_plot()
fig_s03 <- own_plot_grid(
ccg_expected_hosp_demand,
ccg_expected_hosp_acute_demand
)
ggsave(filename = "figs_final/fig-s03.svg",
fig_s03,
width = 10, height = 7)
# Fig s04 -----------------------------------------------------------------
## -- Excess demand plots -- ##
# Excess general care hospital demand (per 1,000), ccg level
agg_ccg_s %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = abs_excess_demand_hosp), color = NA)+
geom_sf(data = agg_ccounty_b, size = .25, color = "#fafafa")+
geom_sf(data = agg_region_b, size = 1, color = "#fafafa")+
geom_sf(data = cities %>% filter(!name=="Cardiff"),
size = 10, shape = 1, stroke = 0.6, color = "#373737")+
coord_sf(datum = NA)+
scale_fill_fermenter(
"Excess Need\nper 1,000",
palette = 'PuBuGn', direction = 1
) +
own_theme
ccg_abs_diff_hosp_demand <- last_plot()
# Excess acute care hospital demand (per 1,000), ccg level
agg_ccg_s %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = abs_excess_demand_hosp_acute), color = NA)+
geom_sf(data = agg_ccounty_b, size = .25, color = "#fafafa")+
geom_sf(data = agg_region_b, size = 1, color = "#fafafa")+
geom_sf(data = cities %>% filter(!name=="Cardiff"),
size = 10, shape = 1, stroke = 0.6, color = "#373737")+
coord_sf(datum = NA)+
scale_fill_fermenter(
"Excess Need\nper 1,000",
palette = 'BuPu', direction = 1
) +
own_theme
ccg_abs_diff_hosp_acute_demand <- last_plot()
fig_s04 <- own_plot_grid(
ccg_abs_diff_hosp_demand,
ccg_abs_diff_hosp_acute_demand
)
ggsave(filename = "figs_final/fig-s04.svg",
fig_s04,
width = 10, height = 7)
# Fig s05 -------------------------------------------------------------------
# local zoom-in on Wales
agg_lsoa_s_s5 %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = pc_hosp), color = NA)+
geom_sf(data = agg_ccounty_b_s5, size = .25, color = "#fafafa")+
geom_sf(data = wales_h, aes(size = beds),
shape = 1, stroke = .9, color = "#eec21f")+
coord_sf(datum = NA)+
scale_size_area("Beds", max_size = 10)+
scale_fill_fermenter(
"Hosp. per\n1,000",
palette = 'PuBuGn', direction = 1
) +
own_theme +
theme(legend.position = c(0, .6))
wales_pc_hosp <- last_plot()
# filter out the hospitals with intensive care beds
wales_h_ic <- wales_h %>% filter(intensive_care_beds > 0)
# Excess demand acute LSOA Wales
agg_lsoa_s_s5 %>%
ggplot() +
geom_sf(color = NA)+
geom_sf(aes(fill = pc_hosp_acute), color = NA)+
geom_sf(data = agg_ccounty_b_s5, size = .25, color = "#fafafa")+
geom_sf(data = wales_h_ic, aes(size = intensive_care_beds),
shape = 1, stroke = .9, color = "#df356b")+
coord_sf(datum = NA)+
scale_fill_fermenter(
"Hops. per\n1,000",
palette = 'RdPu', direction = 1
) +
scale_size_area("IC Beds", max_size = 10)+
own_theme +
theme(legend.position = c(0, .6))
wales_pc_hosp_acute <- last_plot()
fig_s05 <- own_plot_grid(
wales_pc_hosp,
wales_pc_hosp_acute
)
ggsave(filename = "figs_final/fig-s05.png",
fig_s05,
width = 10, height = 7) |
8f73026f3a0f9fa6854052efc6d500fd72e61944 | 37ba235ab81cae1120c5d52eecb669b8282df8ad | /surv_sim.R | 7afe89e8e2cf2c846254cef1b08bee9524dc6dd8 | [] | no_license | lgondara/surv_sim | 122ebd60cfb97a84249ca07415fa654445c4e324 | cc0377eef06125a4f261fc72fdc70aef7b99b934 | refs/heads/master | 2020-12-24T20:33:01.803100 | 2016-04-19T14:40:45 | 2016-04-19T14:40:45 | 56,324,089 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,211 | r | surv_sim.R | # simulate different missing data types
##### generic data setup:
set.seed(977) # this makes the simulation exactly reproducible
ni = 100 # 100 people
nj = 10 # 10 week study
id = rep(1:ni, each=nj)
cond = rep(c("control", "diet"), each=nj*(ni/2))
base = round(rep(rnorm(ni, mean=250, sd=10), each=nj))
week = rep(1:nj, times=ni)
y = round(base + rnorm(ni*nj, mean=0, sd=1))
# MCAR
prop.m = .07 # 7% missingness
mcar = runif(ni*nj, min=0, max=1)
y.mcar = ifelse(mcar<prop.m, NA, y) # unrelated to anything
View(cbind(id, week, cond, base, y, y.mcar))
# MAR
y.mar = matrix(y, ncol=nj, nrow=ni, byrow=TRUE)
for(i in 1:ni){
for(j in 4:nj){
dif1 = y.mar[i,j-2]-y.mar[i,j-3]
dif2 = y.mar[i,j-1]-y.mar[i,j-2]
if(dif1>0 & dif2>0){ # if weight goes up twice, drops out
y.mar[i,j:nj] = NA; break
}
}
}
y.mar = as.vector(t(y.mar))
View(cbind(id, week, cond, base, y, y.mar))
# NMAR
sort.y = sort(y, decreasing=TRUE)
nmar = sort.y[ceiling(prop.m*length(y))]
y.nmar = ifelse(y>nmar, NA, y) # doesn't show up when heavier
View(cbind(id, week, cond, base, y, y.nmar))
require(survival)
install.packages("survsim")
require(survsim)
require(ggplot2)
dist.ev <- "weibull"
anc.ev <- 1
beta0.ev <- 5.268
dist.cens <- "weibull"
anc.cens <- 1
beta0.cens <- 5.368
x <- list(c("bern", 0.3), c("bern", 0.4))
beta <- list(-0.4, -0.25)
##full data
store.coef=matrix(data=NA,nrow=100,ncol=2)
for (i in 1:100) {
simple.dat <- simple.surv.sim(300, 365, dist.ev, anc.ev, beta0.ev,dist.cens, anc.cens, beta0.cens, , beta, x)
full.model=coxph(Surv(start,stop,status)~x+x.1, data=simple.dat)
store.coef[i,1]=full.model$coef[1]
store.coef[i,2]=full.model$coef[2]
}
plot.model.1=as.data.frame(store.coef[,1])
plot.model.1$coef=1
colnames(plot.model.1)[colnames(plot.model.1)=="store.coef[, 1]"] <- "coef.val"
plot.model.2=as.data.frame(store.coef[,2])
plot.model.2$coef=2
colnames(plot.model.2)[colnames(plot.model.2)=="store.coef[, 2]"] <- "coef.val"
plot.model=rbind(plot.model.1,plot.model.2)
aggregate(plot.model$coef.val,by=list(plot.model$coef),FUN=mean, na.rm=TRUE)
aggregate(plot.model$coef.val,by=list(plot.model$coef),FUN=sd, na.rm=TRUE)
p <- ggplot(plot.model, aes(factor(coef), coef.val))
p + geom_boxplot()
##MCAR
# MCAR
prop.m = .1 # 10% missingness
store.coef=matrix(data=NA,nrow=100,ncol=2)
for (i in 1:100) {
simple.dat <- simple.surv.sim(300, 365, dist.ev, anc.ev, beta0.ev,dist.cens, anc.cens, beta0.cens, , beta, x)
mcar = runif(300, min=0, max=1)
simple.dat$status = ifelse(mcar<prop.m, NA, simple.dat$status)
full.model=coxph(Surv(start,stop,status)~x+x.1, data=simple.dat)
store.coef[i,1]=full.model$coef[1]
store.coef[i,2]=full.model$coef[2]
}
plot.model.1=as.data.frame(store.coef[,1])
plot.model.1$coef=1
colnames(plot.model.1)[colnames(plot.model.1)=="store.coef[, 1]"] <- "coef.val"
plot.model.2=as.data.frame(store.coef[,2])
plot.model.2$coef=2
colnames(plot.model.2)[colnames(plot.model.2)=="store.coef[, 2]"] <- "coef.val"
plot.model=rbind(plot.model.1,plot.model.2)
aggregate(plot.model$coef.val,by=list(plot.model$coef),FUN=mean, na.rm=TRUE)
aggregate(plot.model$coef.val,by=list(plot.model$coef),FUN=sd, na.rm=TRUE)
|
914960c3d4a0e1d7df96a5a904e0424677f8a193 | 52c6e6aff7a137956154f79812b2c987b5716b57 | /R_Analysis/Correlation_Conundrum.R | ddb9bfde640a53ef0252e365ba7a3f474f6f01cc | [] | no_license | tahira-h/MechaCar_Statistical_Analysis | 569b184f25b7d3f0cbaf2562f11c9b21d25c06b2 | 516ddd757c24e499725b9f96f9ea7e7b3ca9aca3 | refs/heads/main | 2023-04-23T08:37:17.474803 | 2021-05-17T00:34:58 | 2021-05-17T00:34:58 | 367,064,231 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,606 | r | Correlation_Conundrum.R | # We can use the 'geom_point()' plotting function combined with the cor() function to quantify the correlation between variables.
# Let's look at the cor() documentation. Ex: '> ?cor()'
# To use the cor() function to perform a correlation analysis between two numeric variables. Use the two arguments, 'x' and 'y'
# To practice calculating the Pearson correlation coefficient, use the mtcars dataset.
head(mtcars)
# Plot the two variables using the geom_point() function.
plt <- ggplot(mtcars,aes(x=hp,y=qsec)) #import dataset into ggplot2
plt + geom_point() #create scatter plot
# Use the cor() function to quantify the strength of the correlation between the two variables.
cor(mtcars$hp,mtcars$qsec) #calculate correlation coefficient
# Reuse the used_cars dataset.
used_cars <- read.csv('used_car_data.csv',stringsAsFactors = F) #read in dataset
head(used_cars)
# Plot the two variables using the geom_point() function.
plt <- ggplot(used_cars,aes(x=Miles_Driven,y=Selling_Price)) #import dataset into ggplot2
plt + geom_point() #create a scatter plot
# Calculate the Pearson correlation coefficient using the cor() function.
cor(used_cars$Miles_Driven,used_cars$Selling_Price) #calculate correlation coefficient
# EX: To produce a correlation matrix for the used_cars dataset,first select the numeric columns from the data frame and convert it to matrix. Then provide the numeric matrix to the cor() function.
used_matrix <- as.matrix(used_cars[,c("Selling_Price","Present_Price","Miles_Driven")]) #convert data frame into numeric matrix
cor(used_matrix)
|
83c555399736b8299c06fca3f4c6801472c2a8c7 | e2caf78a4b718b191295112b02caf940cdd42d67 | /R/setup.R | 2bc538f03ff786670ff7f644952d7ee908e60d4c | [] | no_license | cran/runexp | dc7ab8563aecf14a6b2580b96df3bcc9ae8cbeaf | 5720319ab7f653eb7721314b0596c66808bd08ef | refs/heads/master | 2023-03-23T22:04:15.229079 | 2021-03-22T04:10:02 | 2021-03-22T04:10:02 | 340,026,557 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,829 | r | setup.R |
# Function Contents -----------------------------------------------------------
# Internal:
# check: checks lineup and data frame input of player probabilities
# Check -----------------------------------------------------------------------
# Acts on lineup and data frame input of player probabilities
# Lineup must be single player (to be repeated) or nine players
# Matches name in lineup to names in stats (or uses player number)
# Checks that probabilities are valid
# Fills in fast player if not given
# Returns a 9-row data frame of stats in lineup order
check <- function(lineup, stats) {
# uppercase column names in stats
colnames(stats) <- toupper(colnames(stats))
# check length of lineup
if (length(lineup) == 1) {
lineup <- rep(lineup, 9)
} else if (length(lineup) != 9) {
stop("Lineup must be of length 1 or 9")
}
# match lineup (either character or numeric)
if (is.character(lineup)) { # match based on player name
# check that stats has a name column
if (!("NAME" %in% colnames(stats))) stop("stats must have a 'NAME' column")
# convert name to character if not
if (!is.character(stats$NAME)) stats$NAME <- as.character(stats$NAME)
# match arguments
lineup <- sapply(lineup, match.arg, choices = stats$NAME)
# generate player_index
player_index <- vector(length = 9)
for (i in 1:9) player_index[i] <- which(stats$NAME == lineup[i])
} else if (is.numeric(lineup)) { # match based on player number
# check tht stats has a number column
if (!("NUMBER" %in% colnames(stats))) stop("stats must have a 'NUMBER' column")
# convert number to numeric if not
if (!is.numeric(stats$NUMBER)) stats$NUMBER <- as.numeric(stats$NUMBER)
# generate player_index
player_index <- vector(length = 9)
for (i in 1:9) player_index[i] <- which(stats$NUMBER == lineup[i])
} else stop("Lineup must be either character or numeric")
# check that each necessary column is present
outcomes <- c("O", "S", "D", "TR", "HR", "W")
for (i in 1:length(outcomes))
if (!(outcomes[i] %in% colnames(stats))) stop(paste0("Missing column '",
outcomes[i], "'"))
# SBA and SB warning
if ("SBA" %in% colnames(stats)) { # steal probabilities provided
if (!("SB" %in% colnames(stats))) # success probabilities not provided
stop("Probability of successful steal 'SB' must be specified")
} else { # steal probabilities not provided
stats$SBA <- rep(0, nrow(stats))
stats$SB <- rep(0, nrow(stats))
warning("'SBA' not specified, assigned probability zero")
}
# is SBA is zero and SB is NA, replace with zero
for (i in 1:nrow(stats)) {
if ((stats$SBA[i] == 0) && is.na(stats$SB[i])) stats$SB[i] <- 0
}
# check that all probabilities are positive
if (!all(stats[, c("O", "S", "D", "TR", "HR", "W", "SBA", "SB")] >= 0))
stop("All probabilities must be greater than or equal to zero")
# check that probabilities of main six outcomes sum to one
if (!all(rowSums(stats[, c("O", "S", "D", "TR", "HR", "W")]) > 0.99) |
!all(rowSums(stats[, c("O", "S", "D", "TR", "HR", "W")]) < 1.01))
stop("Sum of probabilities 'O', 'S', 'D', 'TR', 'HR', and 'W' must equal one")
# if fast player column is present, check that is is true or false
# if fast player column is absent, create it based on SBA
if ("FAST" %in% colnames(stats)) {
if (!is.logical(stats$FAST)) stop("'FAST' column must be logical")
} else {
stats$FAST <- stats$SBA > 0.5
warning("Fast players not specified, assigned using SBA probability with
threshold 0.5")
}
# return cleaned stats with rows in the proper order (1 - 9 based on lineup)
return(stats[player_index, ])
}
|
ec7914f900928df7433475baed87fd9645cbb9b6 | dc7d233368874cd5051902116332cf3fefef8abb | /Miniproject/Code/Dataexploration.R | 03288c0a200fe77f958cc96600250e8d08b333a2 | [] | no_license | EvalImperialforces/CMEECourseWork | dee31d6bda6930ce0d768f565057bf8a7ec709aa | 2a7ea6f36c148198cd3f14e77e888af151e0e2a9 | refs/heads/master | 2020-03-30T16:06:12.470621 | 2019-01-18T21:20:38 | 2019-01-18T21:20:38 | 151,393,044 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,909 | r | Dataexploration.R | #!/usr/bin/env Rscript
# Author: Eva Linehan
# Date: November 2018
# Desc: Looking at the data
#clear environments
rm(list=ls())
library(dplyr)
#library(lattice)
library(ggplot2)
library(minpack.lm)
# Read in and observe data
DF <- read.csv("../Data/BioTraits.csv", header = TRUE)
#head(DF)
#nrow(DF)
# Use DataSeries ID and citation ID to find temperature performance IDs (wonder how they can be calculated).
# Overlay indicidually made plots for seperate IDs?
# Compare Habitat/Field
# Compare Climates
# Between trophic levels or taxa
####### Subset data by ID series and create plots #####
# For loop method
IDlist = unique(DF$FinalID)
for (i in (1:length(IDlist))){
subset_id<- IDlist[i] # Take out the ID for each iteration
traitstosubset = subset(DF, DF$FinalID == subset_id) # Subset (can use filter) DF to get all columns for ID[i]
traitstosubset<- traitstosubset[!is.na(traitstosubset$OriginalTraitValue),]
for i in traitstosubset$ConTemp{
if i = max
}
if (nrow(traitstosubset) >=4){
pdf(file = paste("../Results/Preliminary_Graphs/", IDlist[i], ".pdf", sep = ""))
print(qplot(traitstosubset$ConTemp, traitstosubset$OriginalTraitValue, data = traitstosubset, xlab = "Temperature (Degrees Celsius)", ylab = "Trait Value"))
dev.off()
}
}
###### Polynomials ########
# Linear
#fit_linear <- lm(traitstosubset$ConTemp ~ traitstosubset$OriginalTraitValue)
#ggplot(traitstosubset, aes(ConTemp, OriginalTraitValue)) + geom_point() + geom_smooth(method = "lm") + theme_bw()
# Quadratic
#fit_quad <- lm(traitstosubset$ConTemp ~ poly(traitstosubset$OriginalTraitValue, 2, raw = TRUE)
#ggplot(traitstosubset, aes(ConTemp, OriginalTraitValue)) + geom_point() + geom_smooth(method = "lm", formula = y~poly(x, 2, raw=TRUE)) + theme_bw()
# Cubic
#fit_cube <- lm(traitstosubset$ConTemp ~ poly(traitstosubset$OriginalTraitValue, 3, raw = TRUE)
#ggplot(traitstosubset, aes(ConTemp, OriginalTraitValue)) + geom_point() + geom_smooth(method = "lm", formula = y~poly(x, 3, raw=TRUE)) + theme_bw()
ggplot(data = traitstosubset) +
geom_point(aes(x = ConTemp, y = OriginalTraitValue), size = 0.8, colour = "black") +
geom_smooth( data = traitstosubset, aes(ConTemp, OriginalTraitValue), size = 1, colour = "darkblue", se = FALSE, stat = "smooth", method = "lm") +
geom_smooth(data = traitstosubset, aes(ConTemp, OriginalTraitValue), size = 1, colour = "red", se = FALSE, stat = "smooth", method = "lm", formula = y~poly(x, 2, raw=TRUE)) +
geom_smooth(data = traitstosubset, aes(ConTemp, OriginalTraitValue), size = 1, colour = "purple", se = FALSE, stat = "smooth", method = "lm", formula = y~poly(x, 3, raw=TRUE))
###### EARR Model #########
# Must convert to Kelvin!!!!!
# DeLong log tranformed metabolic metabolic rate data and
# fit the log- transformed EAAR model
# Must find melting temp using quadratic eqn and optimum temp using eqn 6.
#powMod <- function()
##### Briere's Model ########
# Scaling
#To improve the fit, you can use weighted least-squares regression where an additional scale factor (the weight) is included in the fitting process.
# Weighted least-squares regression minimizes the error estimate.
briere<- function(Temp, T0, Tm, c){
return(c*Temp*(Temp-T0)*(abs(Tm-Temp)^(1/2))*as.numeric(Temp<Tm)*as.numeric(Temp>T0))
}
scale<-20
bfit<-nlsLM(ConTemp/20 ~briere(Temp, T0, Tm, c), start = list(Temp=20, T0=0, Tm=40, c=0.1), data = traitstosubset)
temp<-seq(0,80, length=1000)
pred.b<-predict(bfit, newdata = list(temp=temp))*scale
plot(traitstosubset$ConTemp, traitstosubset$OriginalTraitName, xlim =c(0,100))
lines(temp, pred.b, col=2)
# Bound values in nlsLM
#### Schoofield Model #######
# 3 new parameters p25 T1/2
# Use Eqn 6 (4 model parameters)
# fit a linear model
linearfunc <- function(x, m, b) {
return(m * x + b)
}
plot(traitstosubset$OriginalTraitValue~traitstosubset$ConTemp)
fit = nlsLM(traitstosubset$OriginalTraitValue~linearfunc(traitstosubset$ConTemp, m, b), data = traitstosubset, start = list(m = .1, b = .1))
summary(fit)
AIC(fit)
confint(fit) #
Lengths = seq(min(traitstosubset$ConTemp), max(traitstosubset$ConTemp), 1)
PF <- linearfunc(Lengths, coef(fit)["m"], coef(fit)["b"])
plot(traitstosubset$OriginalTraitValue~traitstosubset$ConTemp)
lines(Lengths, PF)
# Fit briere function
briere<- function(Temp, T0, Tm, c){
return(c*Temp*(Temp-T0)*(abs(Tm-Temp)^(1/2))*as.numeric(Temp<Tm)*as.numeric(Temp>T0))
}
fit = nlsLM(traitstosubset$OriginalTraitValue~briere(traitstosubset$ConTemp, T0, Tm, c), data = traitstosubset, start = list(T0 = 10, Tm = 30, c = .1))
summary(fit)
AIC(fit)
confint(fit) # 95% CI
Lengths = seq(min(traitstosubset$ConTemp), max(traitstosubset$ConTemp), 1)
PF <- briere(Lengths, coef(fit)["T0"], coef(fit)["Tm"], coef(fit)["c"])
plot(traitstosubset$OriginalTraitValue~traitstosubset$ConTemp)
lines(Lengths, PF)
briere2<-function(a,T0,Tm){
return(a*x*(x-T0)*)
}
|
87223af168bded950da476afe4b746906ae14997 | c8dd2f636da0cd2ab9b74c7044df58e4f5111b80 | /ProduceTables.R | 0e82b79593329f190049ffac0ad2960ef0e8520c | [] | no_license | gideon1321/Table1_R_Excel_Output | 271f5b3e5b180ed94bd75cb08084cdcae0c8e228 | 3ecdeababeb409dc1da6c00b44397016dcdf415e | refs/heads/master | 2022-09-14T17:45:56.100370 | 2020-05-27T20:19:11 | 2020-05-27T20:19:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,441 | r | ProduceTables.R | ## Packages needed
loadInstall <- function(x) {
if (require(x, character.only = T)) return(require(x, character.only = T))
else {install.packages(x, type="source")}
}
x <- c("plyr","plyr","dplyr","stringr","rms","Hmisc","knitr")
for(i in 1:length(x)){
loadInstall(x[i])
}
#Bring in Data from github
dat1=read_csv("https://raw.githubusercontent.com/BiostatsReportAutomation/Table1_R_Excel_Output/master/baselinedata.csv")
#Bring in myTable1 function from github
source("https://raw.githubusercontent.com/BiostatsReportAutomation/Table1_R_Excel_Output/master/Table1Function.R")
#-----------------------------------------
# How to specify a test in this function:
# -----------------------------------------
# aov.t is for "ANOVA test"
# fisher.t is for "Fisher exact test"
# chisq.t is for "Chi-squared test"
# t.test is for "T-test"
# kruskal.t is for "Kruskal-Wallis test"
# wilcox.t is for "Wilcoxon ranked sum test"
#----------------------------------------------
#Now create your table1 with myTable1 function
tt=myTable1(dat=dat1, splitvar="sex",splitlabel ="Gender",
contvar=c("age","BP.sys","BP.dia","N.smokePday"), # continuous variables
contTest=c("t.test","wilcox.t","t.test","aov.t"), # Test to be applied respectively to the contvars
catvar=c("diabetic", "Treatment","Race"), # Categorical variable
catTest=c("fisher.t","fisher.t","chisq.t"), # Test to use for categorical variables
docaption = T, # Should code do caption for you ?
my.docaption="xxxxxxxx", # If false, then write caption eg. "Summaries by sex"
prmsd=c("mean","median","mean","mean"), # Specify statistics for summaries
my.loc="./tabhold/mytable1s1.tex", # location for tex file
Trace=F, # Used for my editing
pdec=2, # Decimal place for p-values
Test=F, # Test statistic column to be included in table
latexoutput=F, # Whether to spit out tex file
exceloutput=T,exceloutputName ="tablewithPvalues" , # Produce an excel file of Table 1
showtable = F)
|
ef88865ec3a73db85adf0f3f00770e84d8dd050f | f4a6173ace305e66d6aa9af8ff825283c7e85f00 | /R/man/save_chart.Rd | b0644412307aaaa1e855a83db142a333f59453e7 | [
"Apache-2.0"
] | permissive | karawoo/syndccutils | 6528f023f770c760e0c26b40f365a2ca68476269 | 550b4c0839457e3a48ff72636e028a9611065e5c | refs/heads/master | 2020-03-07T13:24:53.453472 | 2019-10-09T16:28:20 | 2019-10-09T16:28:20 | 127,499,939 | 0 | 0 | null | 2018-03-31T04:51:45 | 2018-03-31T04:51:44 | null | UTF-8 | R | false | true | 390 | rd | save_chart.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/synapse_helpers.R
\name{save_chart}
\alias{save_chart}
\title{Save a static or dynamic chart to a file and store in Synapse.}
\usage{
save_chart(parent_id, chart_filename, plot_object, static = FALSE)
}
\arguments{
\item{static}{}
}
\description{
Save a static or dynamic chart to a file and store in Synapse.
}
|
f29b9ded83b78f8dfdc4e90c228be45e61ea2aaf | 0e7bd9d49f15674ba5415c10e721923520e11bd2 | /R/whiteseg.R | 33818b7b76086fa5ee8f529bcdd6fb9d6ac5ed46 | [] | no_license | syunhong/seg | 8bbf3ac80aa31dab57652e7e8350d2449f55e622 | 6ef0afe116c16b0e55e5f47a95a9e4c162e60117 | refs/heads/master | 2022-08-20T22:33:19.085866 | 2022-08-12T21:59:15 | 2022-08-12T21:59:15 | 92,493,454 | 12 | 2 | null | null | null | null | UTF-8 | R | false | false | 446 | r | whiteseg.R | # ------------------------------------------------------------------------------
# whiteseg()
# ------------------------------------------------------------------------------
whiteseg <- function(x, data, nb, fun, verbose = FALSE, ...) {
message("Note: whiteseg() function name has been changed to isp().")
message("The old name will be deprecated from version 0.6-1.")
tmpargs <- as.list(match.call())
do.call(isp, tmpargs[-1])
} |
e6c1e77b82146c5f32b00d4fa11c57e62a0e2eaf | f7c48e2eab827668218a34caae1700795f77763c | /Readme.rd | 6bfdd29928139d8f5c68b9a0c38c169b543ded2a | [] | no_license | bsohackdk/User-login | c99ed048b47027d951f6e7391c6984b92f849a43 | 93d349dd86591fd25524647b3c32d311b93305d6 | refs/heads/master | 2020-09-01T11:37:04.423750 | 2019-11-01T09:14:19 | 2019-11-01T09:14:19 | 218,951,513 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 222 | rd | Readme.rd | 编写登陆接口
功能:1)输入用户名密码
2)认证成功后显示欢迎信息
3)输错三次以后锁定用户
注意点一:登陆时验证lock文件,确认该用户是否被锁
注意点二:
|
fe4b8df125903e7a3dd5ccdb005ba378160f6941 | 5b6f50654852dfce5ae1639900be523586ff0e20 | /demo/diagnose.R | 01595e0464da2ecd13b3aeeae02b46466960edeb | [] | no_license | spatstat/spatstat | 2ff4642866ef0e3411b998fd394837c916b93dd8 | f16a2015b52c497427c493d583ac075122a097c2 | refs/heads/master | 2023-08-03T10:36:49.254624 | 2023-07-22T03:31:04 | 2023-07-22T03:31:04 | 22,839,956 | 182 | 54 | null | 2021-02-15T09:04:07 | 2014-08-11T13:28:43 | R | UTF-8 | R | false | false | 5,312 | r | diagnose.R | if(dev.cur() <= 1) {
dd <- getOption("device")
if(is.character(dd)) dd <- get(dd)
dd()
}
oldpar <- par(ask = interactive() &&
(.Device %in% c("X11", "GTK", "windows", "Macintosh")))
par(mfrow=c(1,1))
oldoptions <- options(warn = -1)
#
#######################################################
#
X <- rpoispp(function(x,y) { 1000 * exp(- 4 * x)}, 1000)
plot(X, main="Inhomogeneous Poisson pattern")
fit.hom <- ppm(X ~1, Poisson())
fit.inhom <- ppm(X ~x, Poisson())
diagnose.ppm(fit.inhom, which="marks", type="Pearson",
main=c("Mark plot",
"Circles for positive residual mass",
"Colour for negative residual density"))
par(mfrow=c(1,2))
diagnose.ppm(fit.hom, which="marks",
main=c("Wrong model", "(homogeneous Poisson)", "raw residuals"))
diagnose.ppm(fit.inhom, which="marks",
main=c("Right model", "(inhomogeneous Poisson)", "raw residuals"))
par(mfrow=c(1,1))
diagnose.ppm(fit.inhom, which="smooth", main="Smoothed residual field")
par(mfrow=c(1,2))
diagnose.ppm(fit.hom, which="smooth",
main=c("Wrong model", "(homogeneous Poisson)",
"Smoothed residual field"))
diagnose.ppm(fit.inhom, which="smooth",
main=c("Right model", "(inhomogeneous Poisson)",
"Smoothed residual field"))
par(mfrow=c(1,1))
diagnose.ppm(fit.inhom, which="x")
par(mfrow=c(1,2))
diagnose.ppm(fit.hom, which="x",
main=c("Wrong model", "(homogeneous Poisson)",
"lurking variable plot for x"))
diagnose.ppm(fit.inhom, which="x",
main=c("Right model", "(inhomogeneous Poisson)",
"lurking variable plot for x"))
par(mfrow=c(1,1))
diagnose.ppm(fit.hom, type="Pearson",main="standard diagnostic plots")
par(mfrow=c(1,2))
diagnose.ppm(fit.hom, main=c("Wrong model", "(homogeneous Poisson)"))
diagnose.ppm(fit.inhom, main=c("Right model", "(inhomogeneous Poisson)"))
par(mfrow=c(1,1))
#
#######################################################
# LEVERAGE/INFLUENCE
plot(leverage(fit.inhom))
plot(influence(fit.inhom))
plot(dfbetas(fit.inhom))
#
#######################################################
# COMPENSATORS
## Takes a long time...
CF <- compareFit(listof(hom=fit.hom, inhom=fit.inhom),
Kcom, same="iso", different="icom")
plot(CF, main="model compensators", legend=FALSE)
legend("topleft",
legend=c("empirical K function", "compensator of CSR",
"compensator of inhomogeneous Poisson"), lty=1:3, col=1:3)
#
#######################################################
# Q - Q PLOTS
#
qqplot.ppm(fit.hom, 40)
#conclusion: homogeneous Poisson model is not correct
title(main="Q-Q plot of smoothed residuals")
qqplot.ppm(fit.inhom, 40) # TAKES A WHILE...
title(main=c("Right model", "(inhomogeneous Poisson)",
"Q-Q plot of smoothed residuals"))
# conclusion: fitted inhomogeneous Poisson model looks OK
#
#######################################################
#
plot(cells)
fitPoisson <- ppm(cells ~1, Poisson())
diagnose.ppm(fitPoisson,
main=c("CSR fitted to cells data",
"Raw residuals",
"No suggestion of departure from CSR"))
diagnose.ppm(fitPoisson, type="pearson",
main=c("CSR fitted to cells data",
"Pearson residuals",
"No suggestion of departure from CSR"))
# These diagnostic plots do NOT show evidence of departure from uniform Poisson
plot(Kcom(fitPoisson), cbind(iso, icom) ~ r)
plot(Gcom(fitPoisson), cbind(han, hcom) ~ r)
# K compensator DOES show strong evidence of departure from uniform Poisson
qqplot.ppm(fitPoisson, 40)
title(main=c("CSR fitted to cells data",
"Q-Q plot of smoothed raw residuals",
"Strong suggestion of departure from CSR"))
# Q-Q plot DOES show strong evidence of departure from uniform Poisson.
#
fitStrauss <- ppm(cells ~1, Strauss(r=0.1))
diagnose.ppm(fitStrauss,
main=c("Strauss model fitted to cells data",
"Raw residuals"))
diagnose.ppm(fitStrauss, type="pearson",
main=c("Strauss model fitted to cells data",
"Pearson residuals"))
plot(Kcom(fitStrauss), cbind(iso, icom) ~ r)
plot(Gcom(fitStrauss), cbind(han, hcom) ~ r)
# next line takes a LOOONG time ...
qqplot.ppm(fitStrauss, 40, type="pearson")
title(main=c("Strauss model fitted to cells data",
"Q-Q plot of smoothed Pearson residuals",
"Suggests adequate fit"))
# Conclusion: Strauss model seems OK
#
#######################################################
#
plot(nztrees)
fit <- ppm(nztrees ~1, Poisson())
diagnose.ppm(fit, type="pearson")
title(main=c("CSR fitted to NZ trees",
"Pearson residuals"))
diagnose.ppm(fit, type="pearson", cumulative=FALSE)
title(main=c("CSR fitted to NZ trees",
"Pearson residuals (non-cumulative)"))
lurking(fit, expression(x), type="pearson", cumulative=FALSE,
splineargs=list(spar=0.3))
# Sharp peak at right is suspicious
qqplot.ppm(fit, 40, type="pearson")
title(main=c("CSR fitted to NZ trees",
"Q-Q plot of smoothed Pearson residuals"))
# Slight suggestion of departure from Poisson at top right of pattern.
par(oldpar)
options(oldoptions)
|
9eadaaa2d8e9321831ec0b7c4dc18bdd62512987 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/cellWise/man/cellHandler.Rd | 138793588cd962d8c199784938f25d9386f22f4c | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,685 | rd | cellHandler.Rd | \name{cellHandler}
\alias{cellHandler}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
cellHandler algorithm
}
\description{
This function flags cellwise outliers in \code{X} and imputes them, if robust estimates of the center \code{mu} and scatter matrix \code{Sigma} are given. When the latter are not known, as is typically the case, one can use the function \code{\link{DDC}} which only requires the data matrix \code{X}. Alternatively, the unknown center mu and scatter matrix Sigma can be estimated robustly from \code{X} by the function \code{\link{DI}}.
}
\usage{
cellHandler(X, mu, Sigma, quant = 0.99)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{\code{X} is the input data, and must be an \eqn{n} by \eqn{d} matrix or a data frame.
}
\item{mu}{An estimate of the center of the data
}
\item{Sigma}{An estimate of the covariance matrix of the data
}
\item{quant}{Cutoff used in the detection of cellwise outliers. Defaults to \code{0.99}
}
}
\value{
A list with components: \cr
\itemize{
\item{\code{Ximp} \cr
The imputed data matrix.
}
\item{\code{indcells} \cr
Indices of the cells which were flagged in the analysis.
}
\item{\code{indNAs} \cr
Indices of the NAs in the data.
}
\item{\code{Zres} \cr
Matrix with standardized cellwise residuals of the flagged cells. Contains zeroes in the unflagged cells.
}
\item{\code{Zres_denom} \cr
Denominator of the standardized cellwise residuals.
}
\item{\code{cellPaths} \cr
Matrix with the same dimensions as X, in which each row contains the path of least angle regression through the cells of that row, i.e. the order of the coordinates in the path (1=first, 2=second,...)
}
}
}
\references{
J. Raymaekers and P.J. Rousseeuw (2020). Handling cellwise outliers by sparse
regression and robust covariance. \emph{Arxiv: 1912.12446}. \href{https://arxiv.org/abs/1912.12446}{(link to open access pdf)}
}
\author{
J. Raymaekers and P.J. Rousseeuw
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{DI}}
}
\examples{
mu <- rep(0, 3)
Sigma <- diag(3) * 0.1 + 0.9
X <- rbind(c(0.5, 1.0, 5.0), c(-3.0, 0.0, 1.0))
n <- nrow(X); d <- ncol(X)
out <- cellHandler(X, mu, Sigma)
Xres <- X - out$Ximp # unstandardized residual
mean(abs(as.vector(Xres - out$Zres*out$Zres_denom))) # 0
W <- matrix(rep(0,n*d),nrow=n) # weight matrix
W[out$Zres != 0] <- 1 # 1 indicates cells that were flagged
# For more examples, we refer to the vignette:
vignette("DI_examples")
} |
8be4edafaa8b822a1756c09d3f5dd1b471df7206 | 164a4905afb7f7b825f704c44daa00914cc97af2 | /R/RScript.R | fc665fe0a427ca6c91f6c3fcfd4d4db4da858cc4 | [] | no_license | hhy5277/ReporteRs | b8403e5a71f124bf682a9e6fb4b8839a72f8676b | fc1e891ffea15cdbaeb4d727d17205dccca2089e | refs/heads/master | 2020-06-03T18:38:17.477441 | 2018-11-30T14:06:06 | 2018-11-30T14:06:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,924 | r | RScript.R | #' @title RScript object
#'
#' @description Colored RScript object
#'
#' @param file R script file. Not used if text is provided.
#' @param text character vector. The text to parse. Not used if file is provided.
#' @param comment.properties comment txtProperties object
#' @param roxygencomment.properties roxygencomment txtProperties object
#' @param operators.properties operators txtProperties object
#' @param keyword.properties keyword txtProperties object
#' @param string.properties string txtProperties object
#' @param number.properties number txtProperties object
#' @param functioncall.properties functioncall txtProperties object
#' @param argument.properties argument txtProperties object
#' @param package.properties package txtProperties object
#' @param formalargs.properties formalargs txtProperties object
#' @param eqformalargs.properties eqformalargs txtProperties object
#' @param assignement.properties assignement txtProperties object
#' @param symbol.properties symbol txtProperties object
#' @param slot.properties slot txtProperties object
#' @param default.properties default txtProperties object
#' @param par.properties a parProperties object
#' @examples
#' \donttest{
#' if( check_valid_java_version() ){
#' an_rscript = RScript( text = "ls()
#' x = rnorm(10)" )
#' }
#' }
#' @seealso \code{\link{addRScript}}
#' @export
RScript = function( file, text
, comment.properties = textProperties( color = "#A7947D" )
, roxygencomment.properties = textProperties( color = "#5FB0B8" )
, symbol.properties = textProperties( color = "black" )
, operators.properties = textProperties( color = "black" )
, keyword.properties = textProperties( color = "#4A444D" )
, string.properties = textProperties( color = "#008B8B", font.style = "italic" )
, number.properties = textProperties( color = "blue" )
, functioncall.properties = textProperties( color = "blue" )
, argument.properties = textProperties( color = "#666666" )
, package.properties = textProperties( color = "green" )
, formalargs.properties = textProperties( color = "#424242" )
, eqformalargs.properties = textProperties( color = "#424242" )
, assignement.properties = textProperties( color = "black" )
, slot.properties = textProperties( color = "#F25774" )
, default.properties = textProperties( color = "black" )
, par.properties = parProperties()
) {
if( !inherits( par.properties, "parProperties" ) ){
stop("argument 'par.properties' must be an object of class 'parProperties'")
}
if( !inherits(comment.properties, "textProperties") )
stop("argument comment.properties must be a textProperties object.")
if( !inherits(roxygencomment.properties, "textProperties") )
stop("argument roxygencomment.properties must be a textProperties object.")
if( !inherits(operators.properties, "textProperties") )
stop("argument operators.properties must be a textProperties object.")
if( !inherits(keyword.properties, "textProperties") )
stop("argument keyword.properties must be a textProperties object.")
if( !inherits(string.properties, "textProperties") )
stop("argument string.properties must be a textProperties object.")
if( !inherits(number.properties, "textProperties") )
stop("argument number.properties must be a textProperties object.")
if( !inherits(functioncall.properties, "textProperties") )
stop("argument functioncall.properties must be a textProperties object.")
if( !inherits(argument.properties, "textProperties") )
stop("argument argument.properties must be a textProperties object.")
if( !inherits(package.properties, "textProperties") )
stop("argument package.properties must be a textProperties object.")
if( !inherits(formalargs.properties, "textProperties") )
stop("argument formalargs.properties must be a textProperties object.")
if( !inherits(eqformalargs.properties, "textProperties") )
stop("argument eqformalargs.properties must be a textProperties object.")
if( !inherits(assignement.properties, "textProperties") )
stop("argument assignement.properties must be a textProperties object.")
if( !inherits(symbol.properties, "textProperties") )
stop("argument symbol.properties must be a textProperties object.")
if( !inherits(slot.properties, "textProperties") )
stop("argument slot.properties must be a textProperties object.")
if( !inherits(default.properties, "textProperties") )
stop("argument default.properties must be a textProperties object.")
if( missing( file ) && missing( text ) )
stop("file OR text must be provided as argument.")
if( !missing( file ) ){
if( !inherits( file, "character" ) )
stop("file must be a single character value")
if( length( file ) != 1 )
stop("file must be a single character value")
if( !file.exists( file ) )
stop( file, " does not exist")
pot.list = get.pots.from.script( file = file
, comment.properties = comment.properties
, roxygencomment.properties = roxygencomment.properties
, operators.properties = operators.properties
, keyword.properties = keyword.properties
, string.properties = string.properties
, number.properties = number.properties
, functioncall.properties = functioncall.properties
, argument.properties = argument.properties
, package.properties = package.properties
, formalargs.properties = formalargs.properties
, eqformalargs.properties = eqformalargs.properties
, assignement.properties = assignement.properties
, symbol.properties = symbol.properties
, slot.properties = slot.properties
, default.properties = default.properties
)
}
else {
if( !inherits( text, "character" ) )
stop("text must be a single character value")
if( length( text ) != 1 )
stop("text must be a single character value")
pot.list = get.pots.from.script( text = text
, comment.properties = comment.properties
, roxygencomment.properties = roxygencomment.properties
, operators.properties = operators.properties
, keyword.properties = keyword.properties
, string.properties = string.properties
, number.properties = number.properties
, functioncall.properties = functioncall.properties
, argument.properties = argument.properties
, package.properties = package.properties
, formalargs.properties = formalargs.properties
, eqformalargs.properties = eqformalargs.properties
, assignement.properties = assignement.properties
, symbol.properties = symbol.properties
, slot.properties = slot.properties
, default.properties = default.properties
)
}
jparProp = .jParProperties(par.properties)
jRScript = .jnew(class.RScript, jparProp)
for( i in seq_len(length(pot.list)) ){
.jcall( jRScript, "V", "addParagraph", .jpot(pot.list[[i]]) )
}
out = list()
out$jobj = jRScript
class( out ) = c( "RScript", "set_of_paragraphs")
out
}
#' @export
print.RScript = function(x, ...){
out = .jcall( x$jobj, "S", "toString" )
cat(out)
invisible()
}
|
a07451b6cc2f0df0080ddd39ccc178b5e422ec31 | 151de070b8babcff561d0cf6e5d8497c9b0f30e6 | /dataiku.R | fd30aa765bbb2987329e938027988011b9b2d2b2 | [] | no_license | claeysre/challenge_census | 6ff901b8eda535165157d52f9b3d4c759bea961e | a6db1b67a4da0123b7611920fbaa6c6a9393f518 | refs/heads/master | 2021-01-01T05:12:53.792020 | 2016-05-08T20:58:54 | 2016-05-08T20:58:54 | 56,500,310 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,603 | r | dataiku.R | # Packages needed to run this code. If you do not posess one of this library, please run the following command
# install.packages('e1071') for example
library(randomForest)
library(ggplot2)
library(rmarkdown)
library(e1071)
library(ROCR)
library(caret)
library(lubridate)
# Please change your path according to your need
train_location = '/Volumes/RemiSDCard/Dataiku/us_census_full/census_income_learn.csv'
test_location = '/Volumes/RemiSDCard/Dataiku/us_census_full/census_income_test.csv'
# Provide contextual information
context = c('age','class_of_worker','industry_code','occupation_code','education','wage_per_hour','enrolled_edu',
'marital_status', 'maj_ind_code', 'maj_occ_code','race','hispanic_origin','sex',
'member_labor_union','reason_unemployment','employment_status', 'capital_gains',
'capital_losses','dividends', 'tax_filer_status', 'region_prev_res', 'reg_prev_state',
'household_stats', 'household_summary', 'instance_weight', 'migration_msa', 'migration_reg',
'mig_within_region', 'same_house', 'migration_sunbelt', 'num_persons_worked_for_employer',
'relatives_under_18' , 'country_father', 'country_mother', 'country_self', 'citizenship',
'own_business', 'veterans_filled','veterans_benefits', 'weeks_worked_year', 'year', 'income')
# Ensure that columns have the correct type
type_context = c('numeric',rep('factor',4),'numeric',rep('factor',10),rep('numeric',3),
rep('factor',11),'numeric',rep('factor',8),'numeric',rep('factor',2))
# Load train and test dataset
train_df <- read.csv(train_location, header = F, na.strings = '?', col.names = context,
strip.white = T, colClasses = type_context)
test_df <- read.csv(test_location, header = F, na.strings = '?', col.names = context,
strip.white = T, colClasses = type_context)
# Drop weight column (cf Metadata)
train_df <- subset(train_df, select = -c(instance_weight))
test_df <- subset(test_df, select = -c(instance_weight))
train_df$income <- ifelse(train_df$income == "- 50000.", "0",
ifelse(train_df$income == "50000+.", "1", "other"))
test_df$income <- ifelse(test_df$income == "- 50000.", "0",
ifelse(test_df$income == "50000+.", "1", "other"))
train_df$income <- as.factor(train_df$income)
test_df$income <- as.factor(test_df$income)
# Let's see if there are any missing values
incomplete_columns <- sapply(train_df, function(x) (sum(is.na(x)) / nrow(train_df)))*100; incomplete_columns[ incomplete_columns > 0]
# --------------------------------------------------------------------------
# Let's explore the influence of the categorical variables first
#First the class of worker
qplot (income, data = train_df, fill = class_of_worker) + facet_grid (. ~ class_of_worker)
#Then the industry code, too many categories, furthermore we don't know what the categories mean, same for occupation code
qplot (income, data = train_df, fill = industry_code) + facet_grid (. ~ industry_code)
# => same info with maj industry code et major occupation code
# Education
qplot (income, data = train_df, fill = education) + facet_grid (. ~ education)
#=> a very good feature that will be helpful for classification
# Maybe gather the children.
# Enrolled in edu last week
qplot (income, data = train_df, fill = enrolled_edu) + facet_grid (. ~ enrolled_edu)
#=> Can also be discriminativ, students don't make 50K during schools. maybe merge college and high school
# Marital Status
qplot (income, data = train_df, fill = marital_status) + facet_grid (. ~ marital_status)
# Major Industry Code
qplot (income, data = train_df, fill = maj_ind_code) + facet_grid (. ~ maj_ind_code)
# =>Trade, Manufacturing, Finance categories have a bigger proportion to earn +50K
# Major Occupation Code
qplot (income, data = train_df, fill = maj_occ_code) + facet_grid (. ~ maj_occ_code)
# => Managerial, Professional Special, Protective Services have better proportion to earn +50K
# Race
qplot (income, data = train_df, fill = race) + facet_grid (. ~ race)
# White people seem advantaged , maybe transform the other races into a cat "minorities"would help the RF
#hispanic_origin
qplot (income, data = train_df, fill = hispanic_origin) + facet_grid (. ~ hispanic_origin)
#=> Not Relevant All other reprensent white people and the others minorities
#sex
qplot (income, data = train_df, fill = sex) + facet_grid (. ~ sex)
# =>Relevant males advantanged
#member labor union
qplot (income, data = train_df, fill = member_labor_union) + facet_grid (. ~ member_labor_union)
# => Relevant
#reason_unemployment
qplot (income, data = train_df, fill = reason_unemployement) + facet_grid (. ~ reason_unemployement)
# Useless since people unemployed will not help to classify the problematic categorie+50K
# No +50K for
#employment_status
qplot (income, data = train_df, fill = employment_status) + facet_grid (. ~ employment_status)
#=> Full time schedule more likely to earn +50K
# tax_filer_status
qplot (income, data = train_df, fill = tax_filer_status) + facet_grid (. ~ tax_filer_status)
#=> Relevant, Joint both under 65
# region_prev_res
qplot (income, data = train_df, fill = region_prev_res) + facet_grid (. ~ region_prev_res)
# => Not relevant
# reg_prev_state
qplot (income, data = train_df, fill = reg_prev_state) + facet_grid (. ~ reg_prev_state)
# => Not Relevant
# household_stats
qplot (income, data = train_df, fill = household_stats) + facet_grid (. ~ household_stats)
# Redundant with household_summary
# household_summary
qplot (income, data = train_df, fill = household_summary) + facet_grid (. ~ household_summary)
# => Relevant
# migration_msa
qplot (income, data = train_df, fill = migration_msa) + facet_grid (. ~ migration_msa)
# => could be relevant but too much missing information
# migration_reg
qplot (income, data = train_df, fill = migration_reg) + facet_grid (. ~ migration_reg)
# => could be relevant but too much missing information
# migration within the same region
qplot (income, data = train_df, fill = mig_within_region) + facet_grid (. ~ migration_within_reg)
# same house
qplot (income, data = train_df, fill = same_house) + facet_grid (. ~ same_house)
# => Not relevant
# Migration sunbelt
qplot (income, data = train_df, fill = migration_sunbelt) + facet_grid (. ~ migration_sunbelt)
#relatives_under_18
qplot (income, data = train_df, fill = relatives_under_18) + facet_grid (. ~ relatives_under_18)
# => Not relevant, sems to apply only to childs
#country_father
qplot (income, data = train_df, fill = country_father) + facet_grid (. ~ country_father)
#
#country_mother
qplot (income, data = train_df, fill = country_mother) + facet_grid (. ~ country_mother)
#
#country_self
qplot (income, data = train_df, fill = country_self) + facet_grid (. ~ country_self)
# May be relavant redundant with citizenship
# citizenship
qplot (income, data = train_df, fill = citizenship) + facet_grid (. ~ citizenship)
# let's see later
# own_business
qplot (income, data = train_df, fill = own_business) + facet_grid (. ~ own_business)
# => Creator of business advantaged
# veterans_filled
qplot (income, data = train_df, fill = veterans_filled) + facet_grid (. ~ veterans_filled)
# => Not relevant, all the data is in the not in univers class and there is not enough data for veterans
# veterans_benefits
qplot (income, data = train_df, fill = veterans_benefits) + facet_grid (. ~ veterans_benefits )
# => Maybe relevant
# year
qplot (income, data = train_df, fill = year) + facet_grid (. ~ year )
# As predicted, useless
# ----------------------------------------------------------------------------------
# Let's explore the influence of numerical variables
#AGE
boxplot (age ~ income, data = train_df, main = "Age distribution depending on classes",
xlab = "class", ylab = "Age", col = c("green") )
# => Relevant
ggplot(train_df, aes(x=age, fill=income)) +
geom_histogram(binwidth=2, alpha=0.5, position="identity")
# => Different distribution
#
#WAGE
boxplot (wage_per_hour ~ income, data = train_df, main = "wage distribution depending on classes",
xlab = "class", ylab = "wage", col = c("green") )
#Too much zeros in the dataset, doesn't seem to be useful, a simple overview of the first lines shows
# a individual having 1200 of hourly wage and still under 50 K
# => Relevant
# Capitals Gains + Capital Losses + Dividends
train_df$sum_losses_gains = train_df$capital_gains - train_df$capital_losses + train_df$dividends
test_df$sum_losses_gains = test_df$capital_gains - test_df$capital_losses + test_df$dividends
boxplot (sum_losses_gains ~ income, data = train_df, main = "gains - losses distribution depending on classes",
xlab = "class", ylab = "Age", col = c("green") )
# Even if that's not very insightful we can clearly see that people who lose money don't make +50K
#m <- ggplot(train_df, aes(x = sum_losses_gains))
#m + geom_density()
# Let's create some category, this can be good for a decision tree or a Random Forest
train_df$sum_losses_gains_cat<-ifelse(train_df$sum_losses_gains< -1000,"big-loser",
ifelse(train_df$sum_losses_gains >= -1000 & train_df$sum_losses_gains < 0, "small-loser",
ifelse(train_df$sum_losses_gains == 0 , "balanced",
ifelse(train_df$sum_losses_gains> 0 & train_df$sum_losses_gains <= 5000 , "small-winner",
ifelse(train_df$sum_losses_gains > 5000 , "big-winner","other"
)))))
test_df$sum_losses_gains_cat<-ifelse(test_df$sum_losses_gains< -1000,"big-loser",
ifelse(test_df$sum_losses_gains >= -1000 & test_df$sum_losses_gains < 0, "small-loser",
ifelse(test_df$sum_losses_gains == 0 , "balanced",
ifelse(test_df$sum_losses_gains> 0 & test_df$sum_losses_gains <= 5000 , "small-winner",
ifelse(test_df$sum_losses_gains > 5000 , "big-winner","other"
)))))
qplot (income, data = train_df, fill = sum_losses_gains_cat) + facet_grid (. ~ sum_losses_gains_cat)
#Num persons worked for employer
boxplot (num_persons_worked_for_employer ~ income, data = train_df, main = "Num persons worked for employer distribution depending on classes",
xlab = "class", ylab = "nums_persons", col = c("green") )
# => Relevant
ggplot(train_df, aes(x = num_persons_worked_for_employer, fill=income)) +
geom_histogram(binwidth = 2, alpha = 0.5, position="identity")
# Bigger the corporate is, you have more chance to earn money
# Relevant to put that variable into factor
train_df$num_persons_worked_for_employer <- as.factor(train_df$num_persons_worked_for_employer)
test_df$num_persons_worked_for_employer <- as.factor(test_df$num_persons_worked_for_employer)
qplot (income, data = train_df, fill = num_persons_worked_for_employer) + facet_grid (. ~ num_persons_worked_for_employer)
# WEEKS WORKED IN A YEAR
boxplot (weeks_worked_year ~ income, data = train_df, main = "Weeks worked in a year distribution depending on classes",
xlab = "class", ylab = "nums_persons", col = c("green") )
# => Relevant
ggplot(train_df, aes(x = weeks_worked_year, fill=income)) +
geom_histogram(binwidth = 2, alpha = 0.5, position="identity")
# Age Categories
train_df$age_cat<-ifelse(train_df$age < 18, "youth",
ifelse(train_df$age >= 18 & train_df$age < 27, "y_workers",
ifelse(train_df$age >= 27 & train_df$age < 67, "wokers",
ifelse(train_df$age >= 67 , "retired","other"
))))
# Reproduce the same for the test set
test_df$age_cat<-ifelse(test_df$age < 18, "youth",
ifelse(test_df$age >= 18 & test_df$age < 27, "y_workers",
ifelse(test_df$age >= 27 & test_df$age < 67, "wokers",
ifelse(test_df$age >= 67 , "retired","other"
))))
# Education Category reduction
train_df$education_cat<-ifelse(train_df$education == "10th grade", "youth",
ifelse(train_df$education == "11th grade", "youth",
ifelse(train_df$education == "12th grade no diploma", "youth" ,
ifelse(train_df$education == "1st 2nd 3rd or 4th grade", "youth",
ifelse(train_df$education == "5th or 6th grade", "youth",
ifelse(train_df$education == "7th and 8th grade", "youth",
ifelse(train_df$education == "9th grade", "youth",
ifelse(train_df$education == "Less than 1st grade", "youth",
ifelse(train_df$education == "Children", "youth",
ifelse(train_df$education == "Associates degree-academic program", "basicdegree",
ifelse(train_df$education == "Associates degree-occup /vocational", "basicdegree",
ifelse(train_df$education == "Some college but no degree", "basicdegree",
ifelse(train_df$education == "High school graduate", "high school graduate",
ifelse(train_df$education == "Bachelors degree(BA AB BS)", "bachelor",
ifelse(train_df$education == "Masters degree(MA MS MEng MEd MSW MBA)", "master",
ifelse(train_df$education == "Doctorate degree(PhD EdD)", "prof_doct",
ifelse(train_df$education == "Prof school degree (MD DDS DVM LLB JD)", "prof_doct", "other"
)))))))))))))))))
test_df$education_cat<- ifelse(test_df$education == "10th grade", "youth",
ifelse(test_df$education == "11th grade", "youth",
ifelse(test_df$education == "12th grade no diploma", "youth" ,
ifelse(test_df$education == "1st 2nd 3rd or 4th grade", "youth",
ifelse(test_df$education == "5th or 6th grade", "youth",
ifelse(test_df$education == "7th and 8th grade", "youth",
ifelse(test_df$education == "9th grade", "youth",
ifelse(test_df$education == "Less than 1st grade", "youth",
ifelse(test_df$education == "Children", "youth",
ifelse(test_df$education == "Associates degree-academic program", "basicdegree",
ifelse(test_df$education == "Associates degree-occup /vocational", "basicdegree",
ifelse(test_df$education == "Some college but no degree", "basicdegree",
ifelse(test_df$education == "High school graduate", "high school graduate",
ifelse(test_df$education == "Bachelors degree(BA AB BS)", "bachelor",
ifelse(test_df$education == "Masters degree(MA MS MEng MEd MSW MBA)", "master",
ifelse(test_df$education == "Doctorate degree(PhD EdD)", "prof_doct",
ifelse(test_df$education == "Prof school degree (MD DDS DVM LLB JD)", "prof_doct", "other"
)))))))))))))))))
# Remove column, also income to append it at the end, after
train_clean_df <- subset(train_df, select = -c(age,industry_code,occupation_code,education,hispanic_origin, reason_unemployment,
capital_gains,capital_losses,dividends,region_prev_res, reg_prev_state, household_stats,
migration_msa, migration_reg, mig_within_region, migration_sunbelt, country_father, country_mother,
country_self, veterans_filled, year, income, sum_losses_gains))
# Re-Append it at the end
train_clean_df$income <- train_df$income
# Produce the same for the test dataset
test_clean_df <- subset(test_df, select = -c(age,industry_code,occupation_code,education,hispanic_origin, reason_unemployment,
capital_gains,capital_losses,dividends,region_prev_res, reg_prev_state, household_stats,
migration_msa, migration_reg, mig_within_region, migration_sunbelt, country_father, country_mother,
country_self, veterans_filled, year, income, sum_losses_gains))
# Re-Append it at the end
test_clean_df$income <- test_df$income
# Set new variables as Factor
train_clean_df$education_cat <- as.factor(train_clean_df$education_cat)
train_clean_df$age_cat <- as.factor(train_clean_df$age_cat)
train_clean_df$sum_losses_gains_cat <- as.factor(train_clean_df$sum_losses_gains_cat)
test_clean_df$education_cat <- as.factor(test_clean_df$education_cat)
test_clean_df$age_cat <- as.factor(test_clean_df$age_cat)
test_clean_df$sum_losses_gains_cat <- as.factor(test_clean_df$sum_losses_gains_cat)
# Set the seed
set.seed(3004)
# Train a rf
rf <-randomForest(income ~ class_of_worker + wage_per_hour + enrolled_edu + marital_status + maj_ind_code + maj_occ_code
+ race + sex + member_labor_union + employment_status + tax_filer_status + household_summary + same_house
+ num_persons_worked_for_employer + relatives_under_18 + citizenship + own_business + veterans_benefits +
weeks_worked_year + sum_losses_gains_cat + age_cat + education_cat,
data=train_clean_df,
mtry= 5,
sampsize=c(5000,1000),
ntree=300,
na.action=na.omit,
do.trace=100,
importance=TRUE)
foo <- predict (rf,train_clean_df[,1:22])
ber <- function(confusion_mat)
{
foo <- confusion_mat[1,2] / (confusion_mat[1,1]+confusion_mat[1,2])
bar <- confusion_mat[2,1] / (confusion_mat[2,1]+confusion_mat[2,2])
ber <- (foo + bar)*0.5*100
}
imp <- importance(rf, type=1)
featureImportance <- data.frame(Feature=row.names(imp), Importance=imp[,1])
p <- ggplot(featureImportance, aes(x=reorder(Feature, Importance), y=Importance)) +
geom_bar(stat="identity", fill="#53cfff") +
coord_flip() +
theme_light(base_size=20) +
xlab("Importance") +
ylab("") +
ggtitle("Random Forest Feature Importance\n") +
theme(plot.title=element_text(size=18))
# Train final rf
rf8 <-randomForest(income ~ class_of_worker + wage_per_hour + enrolled_edu + marital_status + maj_ind_code + maj_occ_code
+ race + sex + tax_filer_status + household_summary
+ num_persons_worked_for_employer + citizenship + own_business +
weeks_worked_year + sum_losses_gains_cat + age_cat + education_cat,
data=train_clean_df,
mtry= 5,
sampsize=c(1900,1500),
ntree=500,
na.action=na.omit,
do.trace=100,
importance=TRUE)
# PREDICTIONS ON TEST SET
featuresToKeep <- c('class_of_worker', 'wage_per_hour', 'enrolled_edu', 'marital_status', 'maj_ind_code', 'maj_occ_code',
'race', 'sex', 'tax_filer_status', 'household_summary', 'num_persons_worked_for_employer','citizenship',
'own_business','weeks_worked_year', 'sum_losses_gains_cat', 'age_cat', 'education_cat')
test_clean_df2 <- test_clean_df[featuresToKeep]
test_clean_df2$income_predicted <- predict(rf8, test_clean_df2)
xtab <- table(test_clean_df2$income_predicted, test_clean_df$income)
conf_object <- confusionMatrix(xtab)
Ber_error <- ber(t(conf_object$table ))
|
1f8e4073206a5ae1122f02c043e536d084571cf5 | 3324667e4c8377a4621c34601cff709bc4a3d2b0 | /Scripts/variable_trends_seasonal.R | 35aaae20856f9f622f1da31dc97df2039e59cccd | [] | no_license | CM26-Climate-Paper/Analysis-code | 6a98cfc31ae6a79eb82ce4d9dc5016b54d1945b1 | 2e5861307caf3759beac0d78696e34b06dfbd694 | refs/heads/master | 2020-06-30T07:35:58.532666 | 2019-09-16T19:54:00 | 2019-09-16T19:54:00 | 74,387,756 | 2 | 1 | null | 2016-12-16T18:47:39 | 2016-11-21T17:24:22 | null | UTF-8 | R | false | false | 5,504 | r | variable_trends_seasonal.R | ######### new script to display the temporal trends of predictors
###### code from Jenn ####
ggplot(FLpL, aes(x = Year, y = value)) +
geom_point() +
facet_wrap(~ variable, ncol = 12) +
geom_smooth(method = lm) +
xlab("") + ylab("Precipitation (in)") +
theme(axis.text.x = element_text(angle = 45))
## https://lh3.googleusercontent.com/-406sBFCWwaY/WdPKAI0itJI/AAAAAAAAIjU/75-0aV0-3-sp_oMXLwFp3zUs3OsXGn3MgCL0BGAYYCw/h1505/2017-10-03.png
######
######### ----------------------------> define global objects ####
library(tidyverse)
library(raster)
######### ----------------------------> create contemporary rasters averaged by season ####
contempDir_monthly="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project" ## monthly current rasters
contempDir_seaonal="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_20y_avs_seasonal/contemporary" #;dir.create(contempDir_seaonal) ## folder to house current rasters averaged by season
winter=c("m12","m01","m02")
spring=c("m03","m04","m05")
summer=c("m06","m07","m08")
fall=c("m09","m10","m11")
seasons=c("DJF","MAM","JJA","SON")
for(season in seasons){
print(season)
a=list()
assign(season,a)
}
vars=c("bt","bs","st","SS","sh")
allfiles=list.files(contempDir_monthly,recursive = T,full.names = T)
for(file in allfiles){
print(file)
a=strsplit(file,"/")[[1]][9]
b=strsplit(a,"_")
month=b[[1]][1]
year=b[[1]][2]
########## 20-40
if(month %in% winter){
DJF=unlist(list(file,DJF))
}
if(month %in% spring){
MAM=unlist(list(file,MAM))
}
if(month %in% summer){
JJA=unlist(list(file,JJA))
}
if(month %in% fall){
SON=unlist(list(file,SON))
}
}
master=list()
for(season in seasons){
print(season)
master=unlist(list(master,season))
}
for(mas in master){
print(mas)
a=paste0(contempDir_seaonal,"/contemp_",mas);dir.create(a)
bs=grep("bs",get(mas),value = T)%>%stack(.)%>%calc(.,fun = mean);writeRaster(bs,paste0(contempDir_seaonal,"/contemp_",mas,"/bs.tif"),format="GTiff",overwrite=T)
bt=grep("bt",get(mas),value = T)%>%stack(.)%>%calc(.,fun = mean);writeRaster(bt,paste0(contempDir_seaonal,"/contemp_",mas,"/bt.tif"),format="GTiff",overwrite=T)
st=grep("st",get(mas),value = T)%>%stack(.)%>%calc(.,fun = mean);writeRaster(st,paste0(contempDir_seaonal,"/contemp_",mas,"/st.tif"),format="GTiff",overwrite=T)
SS=grep("SS",get(mas),value = T)%>%stack(.)%>%calc(.,fun = mean);writeRaster(SS,paste0(contempDir_seaonal,"/contemp_",mas,"/SS.tif"),format="GTiff",overwrite=T)
sh=grep("sh.tif",get(mas),value = T)%>%stack(.)%>%calc(.,fun = mean);writeRaster(sh,paste0(contempDir_seaonal,"/contemp_",mas,"/sh.tif"),format="GTiff",overwrite=T)
}
#########
######### ----------------------------> find spatial averages for all rasters in contemp and project ####
empty=data.frame(period=NA,season=NA,var=NA,s.mean=NA)
contemp=list.files(contempDir_seaonal,full.names = T,recursive = T)
proj=list.files("/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_20y_avs_seasonal/project",full.names = T,recursive = T)
layersList=unlist(list(contemp,proj))
a=grep("Rugosity",layersList)
b=grep("Depth",layersList)
remove=unlist(list(a,b))
layersList=layersList[-remove] ### getting rid of depth and rugosity
substrRight <- function(x, n){
substr(x, nchar(x)-n+1, nchar(x))
}
for(i in 1:length(layersList)){ ### extracting metrics for each layer and writing to empty
a=strsplit(layersList[i],"/")
var=gsub(".tif","",a[[1]][11])
season=substrRight(a[[1]][10],3)
period=substr(a[[1]][10],1,nchar(a[[1]][10])-4)
s.mean=raster(layersList[i])%>%cellStats(.,stat=mean)
empty[i,1]=period
empty[i,2]=season
empty[i,3]=var
empty[i,4]=s.mean
}
##### cleaning up dataframe
xLevels=c("contemp","av20_40", "av40_60","av60_80")
empty$period=as.factor(empty$period)
empty$season=as.factor(empty$season)
empty$var=as.factor(empty$var)
empty$period_ordered=factor(empty$period,levels=xLevels)
write.csv(empty,"/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_20y_avs_seasonal/data/means.csv") ## cleaning names by hand, quicker
empty=read.csv("/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_20y_avs_seasonal/data/means.csv")
period_Levels=c("Contemporary","+20-40 years", "+40-60 years","+60-80 years")
season_Levels=c("Winter","Spring", "Summer","Fall")
var_Levels=c("Surface temperature","Bottom temperature","Surface salinity","Bottom salinity","Sea height")
empty$period_ordered=factor(empty$Period,levels=period_Levels)
empty$season_ordered=factor(empty$season,levels=season_Levels)
empty$var_ordered=factor(empty$var,levels=var_Levels)
######### ----------------------------> plotting 4x5 grid, each is variable by season, x = period ####
a=ggplot(empty,aes(period_ordered,s.mean,group=1))+geom_line()
a+facet_grid(var_ordered~season_ordered,scales = "free_y")+theme(strip.text.y = element_text(size=5))+
theme(axis.text.x = element_text(hjust=1,vjust=1,angle = 45))+labs(x="Temporal period")+labs(y="Mean value")
pdf("/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_20y_avs_seasonal/data/grid.pdf")
a=ggplot(empty,aes(period_ordered,s.mean,group=1))+geom_line()
a+facet_grid(var_ordered~season_ordered,scales = "free_y")+theme(strip.text.y = element_text(size=5))+
theme(axis.text.x = element_text(hjust=1,vjust=1,angle = 45))+labs(x="Temporal period")+labs(y="Mean value")
dev.off()
|
64ee93e18c68f9c43ef98a9cc7f245b8dc12c0b9 | 4e38a7c72d5c8abea267b2d01eae522538835e49 | /W7_IntroductiontoPortfolioTheory/assignment8.r | 62f031f7719facd1c6a37f29ce38aafccf139c45 | [] | no_license | gavinconran/CompFin | 4e07e03900b73500f00d3e7d73dd3c22169e4ec6 | 0aa172e5911f7e13453e39433db0d8cbd06304dd | refs/heads/master | 2020-04-17T21:47:26.854955 | 2016-08-24T14:31:42 | 2016-08-24T14:31:42 | 66,462,651 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,710 | r | assignment8.r | ### Compute Efficient Portfolios with Matrix Algebra
# To clean up the memory of your current R session run the following line
rm(list=ls(all=TRUE))
## Part 1: Loading in your data set
# Load the relevant packages
library("zoo")
library("quadprog")
# Load the data
data <- url("http://s3.amazonaws.com/assets.datacamp.com/course/compfin/lab9.RData")
load(data)
# Explore the data set
head(returns_df)
tail(returns_df)
# Timeplots with stocks on individual graphs
my.panel <- function(...) {
lines(...)
abline(h=0)
}
plot(returns_df, lwd=2, panel=my.panel, col="blue")
# Timeplots with stocks on same graph
plot(returns_df, plot.type = "single", main="Returns", col=1:4, lwd=2)
abline(h=0)
legend(x="bottomleft", legend=colnames(returns_df), col=1:4, lwd=2)
## Part 2: The CER model
# Parameters CER model
mu_hat_month <- apply(returns_df, 2, mean)
mu_hat_month
sigma2_month <- apply(returns_df, 2, var)
sigma2_month
sigma_month <-sqrt(sigma2_month)
sigma_month
cov_mat_month <- var(returns_df)
cov_mat_month
cor_mat_month <- cor(returns_df)
cor_mat_month
# Pairwise scatterplots
pairs(coredata(returns_df), col="slateblue1", pch=16, cex=1.5)
## Part 3: Question: What is the correlation between the Nordstrom stock and the Boeing stock?
cor_mat_month # 0.1025
## Part 4: The global minimum variance portfolio - Part One
# Calculate the global minimum variance portfolio
args(globalMin.portfolio)
global_min_var_portfolio = globalMin.portfolio(mu_hat_month, cov_mat_month, shorts=TRUE)
global_min_var_portfolio
# Plot the portfolio weights of our four stocks
plot(global_min_var_portfolio)
## Part 5: Standard deviation
## Question: What is the standard deviation of the global minimum variance portfolio that you have just calculated?
# Compute mean, variance and std deviation
mu.gmin = global_min_var_portfolio$er # mean
sig.gmin = global_min_var_portfolio$sd # standard deviation
mu.gmin
sig.gmin
## Part 6: The global minimum variance portfolio - Part Two
# set restriction matrices
D_matrix <- 2* cov_mat_month
D_matrix
d_vector <- rep(0,4)
d_vector
A_matrix <- cbind(rep(1,4),diag(4))
A_matrix
b_vector <- c(1,rep(0,4))
b_vector
# use solve.QP to minimize portfolio variance
quad_prog <- solve.QP(Dmat=D_matrix, dvec=d_vector,
Amat=A_matrix, bvec=b_vector, meq=1)
quad_prog$solution
## Part 7: The global minimum variance portfolio - End game
# The global minimum variance portfolio
global_min_var_portfolio = globalMin.portfolio(mu_hat_month, cov_mat_month, shorts=FALSE)
global_min_var_portfolio
## part 8: An efficient portfolio
# highest average return
mu_target <- max(mu_hat_month)
# short sales allowed
args(efficient.portfolio)
efficient_porfolio_short <- efficient.portfolio(mu_hat_month, cov_mat_month, mu_target, shorts = TRUE)
efficient_porfolio_short
plot(efficient_porfolio_short)
# no short sales allowed
efficient_porfolio_no_short <- efficient.portfolio(mu_hat_month, cov_mat_month, mu_target, shorts = FALSE)
efficient_porfolio_no_short
plot(efficient_porfolio_no_short)
## Part 9: Question The weight of Boeing
# What is the weight of the Boeing stock under the "shorting not allowed" condition?
efficient_porfolio_no_short
## Part 10: The efficient frontier
# The efficient frontier of risky assets
args(efficient.frontier)
efficient_frontier <- efficient.frontier(mu_hat_month, cov_mat_month, alpha.min=-1, alpha.max=1)
summary(efficient_frontier)
# The plot
plot(efficient_frontier, plot.assets=TRUE, col="blue", lwd=2)
###QUIZ QUESTION 8:
# Using the fact that all efficient portfolios can be written as a convex combination of two efficient portfolios,
# compute efficient portfolios as convex combinations of the global minimum variance portfolio and the efficient portfolio that was computed in question six.
# What is the expected return of the portfolio when α=.5?
# z=α∗m+(1−α)∗x
z = 0.5 * global_min_var_portfolio$weights + (1-0.5)*efficient_porfolio_short$weights
sum(z*mu_hat_month) # expected return of portfolio = 3.06%
m## part 11: The tangency portfolio
# risk free rate
t_bill_rate <- 0.005
tangency_portfolio_short <- tangency.portfolio(mu_hat_month, cov_mat_month, t_bill_rate, shorts = TRUE)
summary(tangency_portfolio_short)
#plot
plot(tangency_portfolio_short)
# Tangency portfolio short sales not allowed
tangency_portfolio_no_short <- tangency.portfolio(mu_hat_month,cov_mat_month, t_bill_rate, shorts = FALSE)
summary(tangency_portfolio_no_short)
#plot
plot(tangency_portfolio_no_short)
## Part 12: The weight of Boeing ... again
# Question: If short sales is not allowed in your tangency portfolio, what is the weight of Boeing stock?
tangency_portfolio_no_short$weights
|
62ca283b021043ae74f839e7505b3c484f807400 | 135725be7153ff6ec6cc79b8db29cdea80ecc875 | /man/isSQLite3.Rd | 3c928cadd8abdc3f5655de079e0edd8ad73a7831 | [] | no_license | cran/CollapsABEL | f2969755a8fe831eeab9c4971ddd1488a1e39b81 | b3be3f831ba5e9500d016aca0ca768a367e79c84 | refs/heads/master | 2021-01-10T13:18:22.127650 | 2016-12-11T19:35:07 | 2016-12-11T19:35:07 | 55,400,560 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 369 | rd | isSQLite3.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/0_utils.R
\name{isSQLite3}
\alias{isSQLite3}
\title{Check whether a file is a SQLite3 database.}
\usage{
isSQLite3(filename)
}
\arguments{
\item{filename}{character. Path to file to be checked.}
}
\description{
Check whether a file is a SQLite3 database.
}
\author{
Kaiyin Zhong, Fan Liu
}
|
88d783ed7829ea517ae3aa445a6aef1e22ced0cf | f25773ebf850c85a2044c851d29f828a6bdd98c7 | /man/differenceOfICs.Rd | a7ac3d2bf7cb004bc22d50aeb3801c257c5e5cb4 | [] | no_license | Treutler/DiffLogo | c81c9cf71e231854904d7a72eba07aa0a8fedec4 | 2c54cdd6eac7a6ffd256d5f74a8e6ad43930cd99 | refs/heads/master | 2021-01-19T20:44:34.486095 | 2015-09-02T10:47:07 | 2015-09-02T10:47:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,000 | rd | differenceOfICs.Rd | \name{differenceOfICs}
\alias{differenceOfICs}
\title{normalized information content differences}
\usage{
differenceOfICs(p1, p2)
}
\arguments{
\item{p1}{probability vector representing the first symbol
distribution} \item{p2}{probability vector representing the
second symbol distribution}
}
\value{
a vector with one result for each symbol
}
\description{
information content differences normalized by the sum of
absolute information content differences for the given pair
of probability vectors
}
\examples{
motif_folder= "extdata/pwm"
motif_names = c("HepG2","MCF7","HUVEC","ProgFib")
motifs = list()
for (name in motif_names) {
fileName = paste(motif_folder,"/",name,".txt",sep="")
file = system.file(fileName, package = "DiffLogo")
motifs[[name]] = as.matrix(read.delim(file,header=FALSE))
}
pwm1 = motifs[[motif_names[[1]]]]
pwm2 = motifs[[motif_names[[2]]]]
diffLogoFromPwm(pwm1 = pwm1, pwm2 = pwm2, baseDistribution = differenceOfICs)
}
\author{
Martin Nettling
}
|
d741d19b09d07c50af71e2233c82adf0c5e9c398 | 4c7722ef01e9197fd1abf1dfef287e90c0f1d9a9 | /R/genomic.R | eaacec62fd30046f631073bcd28d34563d577879 | [] | no_license | jingliao/islandR | 413b29e812c735b9f8403f982311eceed2390f8c | 3ed757e1f7a8fe2b5b57d7f6893016abbb7986b7 | refs/heads/master | 2021-01-15T20:28:43.654260 | 2015-08-14T07:12:26 | 2015-08-14T07:12:26 | 40,569,298 | 0 | 0 | null | 2015-08-11T22:59:45 | 2015-08-11T22:59:42 | null | UTF-8 | R | false | false | 916 | r | genomic.R | #' Get the number of posterior samples available for attribution
get_num_samples <- function() {
return(max(genotype_attribution$Iteration))
}
#' Get the genotypes available for attribution
get_genotypes <- function() {
return(unique(genotype_attribution[,1:8])) # TODO: Make indicies a function of data
}
#' Retrieve the i-th posterior sample for attribution for a given genotype
#' @param genotype the genotype to retrieve.
#' @param sample the sample to retrieve. Defaults to NULL, where all samples will be retrieved.
#' @return A data frame of sequence types, their allelic profile, and clonal complex.
#' @seealso pubmlst
get_source_probability_sample <- function(genotype, sample = NULL) {
wch <- genotype_attribution$ST == genotype;
if (!is.null(sample))
wch <- wch & genotype_attribution$Iteration == sample
return(genotype_attribution[wch, 9:12]) # TODO: Make indicies a function of data
}
|
5e5b92f2bc51c3305213f93986799b6cd243c994 | edf92ac27f3c35101ba34583293f41482d560a3c | /boxplot/ini.R | 3761866772f569907142a91dc03c9b0d7d59c453 | [] | no_license | MarcosGrzeca/drunktweets | 9cfd863a566d53c508acbb919ab05b0de2a9ea4f | b0dc296bdd394125f32f39823268f3e612418339 | refs/heads/master | 2023-03-08T15:30:57.956160 | 2020-06-29T23:46:54 | 2020-06-29T23:46:54 | 142,778,753 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 309 | r | ini.R | # library
library(ggplot2)
# create a data frame
variety=rep(LETTERS[1:7], each=40)
treatment=rep(c("high","low"),each=20)
note=seq(1:280)+sample(1:150, 280, replace=T)
data=data.frame(variety, treatment , note)
# grouped boxplot
ggplot(data, aes(x=variety, y=note, fill=treatment)) +
geom_boxplot() |
845e3c88d8843135fce5aff5fd5c6caf82e36861 | f08b26d01bd9380e0650aac130b68ddcdcc90515 | /Assignment 2 solution/Part 1 Q1 Data Wrangling/Part 1 1 a/Assignment2Part1ab.R | 2a150a64e77ee66da0a27a06d6a07824c8d45f0b | [] | no_license | MoreDhanshri/ADS-Assignment | 8b4dd4026522a227b9642c841fb8c5eef29f24f8 | 39c6ea5948ee09a9003afd5218317625290ebf4e | refs/heads/master | 2021-01-20T18:20:29.885366 | 2017-02-14T23:44:53 | 2017-02-14T23:44:53 | 61,416,088 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,761 | r | Assignment2Part1ab.R | setwd("D:\\Northeastern University\\ADS\\Assignment\\Assignment2\\Assignment 2 solution\\Part 1\\")
raw.data<-read.csv("D:\\Northeastern University\\ADS\\Assignment\\Assignment2\\NewData.csv", header = T)
raw.data<-raw.data[raw.data$Units=="kWh",]
raw.data$Date<-strptime(raw.data$Date, '%m/%d/%Y')
raw.data$year<-as.numeric(format(raw.data$Date, '%Y'))
raw.data$month<-as.numeric(format(raw.data$Date, '%m'))
raw.data$day<-as.numeric(format(raw.data$Date, '%d'))
w<-as.POSIXlt(raw.data$Date)$wday-1
w[w==-1]<-6
raw.data$DayOfWeek<-w
raw.data$Weekday<-ifelse(w<5, 1,0)
#power<-data.frame(raw.data[,5:292])
raw.data.final<- data.frame(raw.data$Account, raw.data$Date,raw.data$month,raw.data$day, raw.data$year,raw.data$DayOfWeek,raw.data$Weekday)
# splitting hours and binning it in 24 hours
raw<-data.frame(raw.data[,5:292])
g = split(seq(ncol(raw)), (seq(ncol(raw)) - 1) %/% 12 )
t2<-sapply(g, function(cols) rowSums( raw[, cols] ))
##Converting to single column
singleColt <- matrix(t(t2),ncol = 1)
raw1repeat <- raw.data.final[rep(seq_len(nrow(raw.data.final)),each=24),]
#View(raw1repeat1)
hour<-c(0:23)
raw1repeat1<- cbind(raw1repeat,hour)
raw1final<-cbind(raw1repeat1,singleColt)
raw1final$PeakHour<-ifelse((raw1final$hour>=7) & (raw1final$hour<20), 1,0)
raw1final<-cbind(raw1repeat1,singleColt)
raw1final$PeakHour<-ifelse((raw1final$hour>=7) & (raw1final$hour<20), 1,0)
#View(raw1final) ##final data of part1
temperature.data<-read.csv("D:\\Northeastern University\\ADS\\Assignment\\Assignment2\\finalTemp3.csv", header = T) ####change this to add aggregation and fetching the
date11<- strptime(temperature.data$hourofday,"%m/%d/%Y")
t.str<-strptime(temperature.data$hourofday,"%m/%d/%Y %H:%M")
time1<-as.POSIXlt(t.str,format(t.str,"%m%d%Y %H:%M:%S%z"))
temperature.data$Hour<-time1$hour
date11<- strptime(temperature.data$hourofday,"%m/%d/%Y")
temperature.data$Date<-date11
mergedraw1<-merge(raw1final,temperature.data,by.x = c("raw.data.Date","hour"),by.y=c("Date","Hour"),all.x = TRUE)
impute.mean <- function(x) replace(x, is.na(x), mean(x, na.rm = TRUE)) ##replace the N/A records with mean of the day
#install.packages("plyr")
library(plyr)
#mergedraw1 <- ddply(mergedraw1, ~ hour, transform, temperature = impute.mean(temperature))
mergedraw21 <- ddply(mergedraw1, ~ hour, transform, temperature = impute.mean(temperature))
mergedraw1$X<- NULL
colnames(mergedraw1)[8] <- "kWh"
#mergerexample<-ddply(mergedraw1, "hour", transform, temperature = impute.mean(temperature))
#View(mergerexample)
#install.packages("data.table")
library(data.table)
setnames(mergedraw1, old=c("raw.data.Date","raw.data.Account","raw.data.month","raw.data.day","raw.data.year","raw.data.DayOfWeek","singleColt"), new=c("Date", "Account","month","day","year","Day Of Week","Kkwh"))
mergedraw1<- mergedraw1[c( "Account","Date","kWh","month","day","year","hour","Day Of Week","PeakHour","temperature")]
mergedraw1$kWh<-NULL
mergedraw1$`Day Of Week`<-NULL
mergedraw1$DayOfWeek<-w
mergedraw1$Weekday<-ifelse(w<5, 1,0)
mergedraw12<-cbind(mergedraw1,singleColt)
names(mergedraw12)[names(mergedraw12) == 'singleColt'] <- 'kWh'
##########################################################################################
powerData<-mergedraw12
boxplot(powerData,xlab=names(powerData))
boxplot(powerData$temperature)
#uif_RunTime<-quantile(fitness$RunTime,.75)+1.5*IQR(fitness$RunTime)
#lif_Weight<-quantile(fitness$Weight, .25)-1.5*IQR(fitness$Weight)
#tna<- powerData$temperature[powerData$temperature]
powerData[!complete.cases(powerData),]
mean(powerData$temperature, na.rm=TRUE)
median(powerData$temperature, na.rm=TRUE)
powerData$temperature[is.na(powerData$temperature)] <- median(powerData$temperature, na.rm=TRUE)
lq_temperature<-quantile(powerData$temperature, .25)-1.5*IQR(powerData$temperature)
powerData$temperature[powerData$temperature<lq_temperature]<-mean(powerData$temperature)
powerData$temperature[powerData$temperature<=0]<-mean(powerData$temperature)
boxplot(powerData$temperature)
##### analysis for power
mean(powerData$kWh, na.rm=TRUE)
median(powerData$kWh, na.rm=TRUE)
powerData$kWh[is.na(powerData$kWh)] <- median(powerData$kWh, na.rm=TRUE)
boxplot(powerData$kWh)
#lq_kWh<-quantile(powerData$kWh, .25)-1.5*IQR(powerData$kWh)
#powerData$kWh[powerData$kWh<lq_kWh]<-mean(powerData$kWh<lq_kWh)
powerData$kWh[is.na(powerData$kWh)]<-mean(powerData$kWh, na.rm = TRUE)
lq_kwh<-quantile(powerData$kWh, .25)-1.5*IQR(powerData$kWh)
uq_kwh<-quantile(powerData$kWh,.75)+1.5*IQR(powerData$kWh)
mean(powerData$kWh )
median(powerData$kWh)
################# part 1 1.a : remove zero and build model ############
#powerData$kWh[powerData$kWh<=0]<-mean(powerData$kWh )
powerData<-data.frame(powerData)
#######################################################################
write.csv(powerData,file = "sampleformatWithZero.csv")
######################## Linear Regression ######################
powerData$Account<-NULL
powerData$year<-NULL
#### splitting data######
smp_size <- floor(0.75 * nrow(powerData))
#Set the seed to make your partition reproductible
set.seed(300)
train_ind <- sample(seq_len(nrow(powerData)), size = smp_size)
#Split the data into training and testing
train <- powerData[train_ind, ]
test <- powerData[-train_ind, ]
#install.packages("leaps")
library(leaps)
##########Exhaustive Feature selection ###############
##### Searching all subset models up to size 8 by default
regfit.full=regsubsets(train$kWh~.,data=train)
reg8.summary =summary (regfit.full)
names(reg8.summary)
reg8.summary$rss
reg8.summary$adjr2
##### Searching all subset models up to size number of variables
regfit.full=regsubsets(train$kWh~.,data=train ,nvmax=11)
#View(regfit.full)
reg.summary =summary (regfit.full)
names(reg.summary)
reg.summary$rss
reg.summary$adjr2
#reg.summary$outmat
## Plotting and choosing the subset
par(mfrow=c(2,2))
plot(reg.summary$rss ,xlab="Number of Variables 11",ylab="RSS", type="l")
plot(reg.summary$adjr2 ,xlab="Number of Variables 11", ylab="Adjusted RSq",type="l")
plot(reg8.summary$rss ,xlab="Number of Variables 8",ylab="RSS", type="l")
plot(reg8.summary$adjr2 ,xlab="Number of Variables 8 ", ylab="Adjusted RSq",type="l")
regr_op<-coef(regfit.full ,6)
write.csv(regr_op, file="RegressionOutput.csv")
coef(regfit.full ,7)
#### Forward selection
regfit.fwd=regsubsets(train$kWh~.,data=train ,nvmax=8, method="forward")
F=summary(regfit.fwd)
names(F)
F
F$rss
F$adjr2
par(mfrow=c(1,2))
plot(F$rss ,xlab="Number of Variables for forward selection",ylab="RSS", type="l")
plot(F$adjr2 ,xlab="Number of for forward selection", ylab="Adjusted RSq",type="l")
coef(regfit.fwd,5)
#### Backward selection
regfit.bwd=regsubsets(train$kWh~.,data=train ,nvmax=8, method="backward")
B=summary(regfit.bwd)
names(B)
B
B$rss
B$adjr2
coef(regfit.bwd,5)
####### final model
lm.fit5<-lm(kWh ~ Date+month+day+hour+PeakHour, data = train)
summary(lm.fit5)
lm.fit6<-lm(kWh ~ Date+month+day+hour+PeakHour+temperature, data = train)
summary(lm.fit6)
lm.fit6
lm.fit7<-lm(kWh ~ Date+month+day+hour+PeakHour+temperature+Weekday, data = train)
summary(lm.fit7)
lm.fitall = lm(kWh ~ ., data = train)
summary(lm.fitall)
#Measures of predictive accuracy
#install.packages("zoo")
library(forecast)
pred5 = predict(lm.fit5, test)
accuracy(pred5, test$kWh)
pred6 = predict(lm.fit6, test)
acc<-t(accuracy(pred6, test$kWh))
write.csv(acc, file="D:\\Northeastern University\\ADS\\Assignment\\Assignment2\\Assignment 2 solution\\Part 1\\PerformanceMatrix.1.a.b.withZero.csv")
|
37f367594697e3008d163022364a71cdfc324bb9 | 29585dff702209dd446c0ab52ceea046c58e384e | /cartography/R/getBorders.R | 5178cfb69705909c5999672003be929cbe25fecd | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,615 | r | getBorders.R | #' @title Extract SpatialPolygonsDataFrame Borders
#' @description Extract borders between SpatialPolygonsDataFrame units.
#' @name getBorders
#' @param spdf a SpatialPolygonsDataFrame. This SpatialPolygonsDataFrame
#' has to be projected (planar coordinates).
#' @param spdfid identifier field in spdf, default to the first column
#' of the spdf data frame. (optional)
#' @param tol tolerance to detect contiguity (in map units). You may
#' not want to change this parameter.
#' @return A SpatialLinesDataFrame of borders is returned. This object has three
#' id fields: id, id1 and id2.
#' id1 and id2 are ids of units that neighbour a border; id is the concatenation
#' of id1 and id2 (with "_" as separator).
#' @note This function uses the rgeos package.
#' @import sp
#' @examples
#' data(nuts2006)
#' # Get units borders
#' nuts0.contig.spdf <- getBorders(nuts0.spdf)
#' # Random colors
#' nuts0.contig.spdf$col <- sample(x = rainbow(length(nuts0.contig.spdf)))
#' # Plot Countries
#' plot(nuts0.spdf, border = NA, col = "grey60")
#' # Plot borders
#' plot(nuts0.contig.spdf, col = nuts0.contig.spdf$col, lwd = 3, add = TRUE)
#' @seealso \link{discLayer}
#' @export
getBorders <- function(spdf, spdfid = NULL, tol = 1){
# Package check and loading
if (!requireNamespace("rgeos", quietly = TRUE)) {
stop("'rgeos' package needed for this function to work. Please install it.",
call. = FALSE)
}
if(!'package:rgeos' %in% search()){
attachNamespace('rgeos')
}
# Distance : tolerance /2
distance <- tol/2
mysep <- "_ksfh88ql_"
# create comments for polygons with holes
spdf <- rgeos::createSPComment(sppoly = spdf)
# spdf and spdfid check
id <- spdfid
if (is.null(id)){id <- names(spdf@data)[1]}
spdf@data <- spdf@data[id]
colnames(spdf@data)[1]<-"id"
row.names(spdf) <- as.character(spdf@data$id)
# Create a Buffer around polygons
geombuff <- rgeos::gBuffer(spdf, byid = TRUE, width = distance, quadsegs = 1,
capStyle = "SQUARE")
# Create intersecions table between polygons
intergeom <- rgeos::gIntersects(geombuff, byid = TRUE, returnDense = F)
b1 <- length(intergeom)
t <- 0
for (i in 1:b1) {
# Intersection
tmp1 <- geombuff[geombuff@data$id==names(intergeom[i]),]
for (j in intergeom[[i]]){
if (i != j){
# create a spdf for each intersection
tmp2 <- geombuff[j,]
frontArea <- rgeos::gIntersection(tmp1, tmp2)
row.names(frontArea) <- paste(tmp1@data$id,tmp2@data$id,sep=mysep)
if(class(frontArea)=="SpatialPolygons"){
if(t==1){
borders <- rbind(borders, frontArea)
} else {
borders <- frontArea
t <- 1
}
}
}
}
}
# From spatialpolygonsdataframe to spatiallinesdataframe
df <- data.frame(id = sapply(methods::slot(borders, "polygons"),
methods::slot, "ID"))
row.names(df) <- df$id
borders <- SpatialPolygonsDataFrame(Sr = borders, data = df)
bordersline <- rgeos::gBoundary(borders, byid=TRUE, id = borders@data$id)
bordersline <- SpatialLinesDataFrame(bordersline, df)
# Ids management
bordersline@data <- data.frame(
do.call('rbind',strsplit(as.character(bordersline@data$id), mysep)))
colnames(bordersline@data) <- c("id1","id2")
bordersline@data$id <- paste(bordersline@data$id1,
bordersline@data$id2,sep="_")
row.names(bordersline@data) <- bordersline@data$id
bordersline@data <- bordersline@data[, c("id", "id1", "id2")]
return(bordersline)
} |
7f55e44d149d9a12ed0c9ce3d580186e8b60f4e3 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/JSM/R/logLik.jmodelTM.R | f0eb644a10eecd58174314b19f87bd060c8988cd | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 281 | r | logLik.jmodelTM.R |
logLik.jmodelTM <- function (object, ...) {
if (!inherits(object, "jmodelTM"))
stop("Only used for 'jmodelTM' objects.\n")
out <- object$logLik
attr(out, "df") <- nrow(object$Vcov)
attr(out, "n") <- object$n
class(out) <- "logLik"
out
}
|
37831df57a0cb912b703d5c09234519e71318b8c | 088d515a03a785b6d70bdc77bd7bee9d7b212e67 | /main.R | 1d9e8525713f0918d8c26310dc4866a2dfd552a0 | [] | no_license | deronaucoin/diabetespredict | 70a25957d3ff886b0197540ecd1c344ece0f9c53 | 01dfa00c420d4dd10ef7c4adc07d1d488da2f482 | refs/heads/master | 2020-04-19T04:51:49.160478 | 2014-06-09T22:56:55 | 2014-06-09T22:56:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,821 | r | main.R | rm(list=ls())
library(RSQLite)
library(randomForest)
setwd("~/Dropbox/data/kaggle/practicefusion/mycode")
makeconfusion <- function(y,pr){
prediction <- ifelse( pr> 0.50, TRUE, FALSE)
confusion <- table(y,prediction)
confusion <- cbind(confusion, c(1 - confusion[1,1]/sum(confusion[1,]), 1 - confusion[2,2]/(sum(confusion[2,]))))
confusion <- as.data.frame(confusion)
names(confusion) <- c('FALSE', 'TRUE', 'class.error')
confusion
}
createSplits<-function(dataset,prop=.7,seed=42){
set.seed(seed)
nobs <- nrow(dataset)
trainidx <- sample(nrow(dataset), prop*nobs)
testidx <- sample(setdiff(seq_len(nrow(dataset)), trainidx), (1-prop)*nobs)
d.train <- dataset[trainidx,]
d.test <- dataset[testidx,]
output <- list(train=d.train,test=d.test)
return (output)
}
n <- dbDriver("SQLite")
con <- dbConnect(n, dbname="compData.db")
sql = "select dmIndicator, avg(2014-YearOfBirth) as age, gender, avg(BMI) as BMI, avg(weight) as Weight, avg(height) as Height
, avg(systolicBP) as SystBP, avg(DiastolicBP) as DiastBP
, avg(RespiratoryRate) as RespRate, PatientGUID
from training_patientTranscript
where 1=1 and BMI < 100 and bmi > 0
group by PatientGuid"
pdata <- dbGetQuery(con, sql)
# data work
pdata[pdata$Gender == "F",]$Gender <- 0
pdata[pdata$Gender == "M",]$Gender <- 1
pdata$Gender <- as.factor(pdata$Gender)
pdata$dmIndicator <- as.factor(pdata$dmIndicator)
#simple impute RespRate where = 0
impute <- function (a, a.impute){
ifelse ((a==0), a.impute, a)
}
imp <- lm(RespRate ~ ., data=pdata[,-c(ncol(pdata))], subset=pdata$RespRate>0)
pred.imp <- predict (imp, pdata)
pdata$RespRate <- impute (pdata$RespRate, pred.imp)
#plot(pdata$BMI,pdata$age)
attach(pdata)
# 1) split random 20% undiagnosed out of total pool of undiagnosed
set.seed(42)
allun <- pdata[which(dmIndicator==0),]
sun_idx <- sample(nrow(allun),nrow(pdata)*.2,replace=FALSE)
unsam <- allun[sun_idx,]
# remove our undiag patients from main data
rundata <- pdata[!(PatientGuid %in% unsam$PatientGuid),]
#remove the PGuids from each dataset, dont need anymore
rundata$PatientGuid <- NULL
unsam$PatientGuid <- NULL
# 2) split remaining ~8000 into train and test
d <- createSplits(rundata)
train <- d$train
test <- d$test
# 3) build model.glm logit , and remove weight and height, correlated with BMI
model.glm <- glm(dmIndicator ~ ., data=train[,-c(5,6)], family="binomial")
pr = predict(model.glm,test,type="response")
conf.glm <- makeconfusion(test$dmIndicator,pr)
overall.error.glm <- (conf.glm[2,1]+conf.glm[1,2])/nrow(test)
diab.error.glm <- conf.glm[2,3]
conf.glm
overall.error.glm
diab.error.glm
# 4) build model.rf randomForest
model.rf <- randomForest(y=as.factor(train$dmIndicator), x=train[,-1],ntree=1000, mtry=2,
importance=TRUE,
na.action=na.roughfix,
replace=FALSE
)
pr <- predict(model.rf,test,type="prob")
conf.rf <- makeconfusion(test$dmIndicator,pr[,2])
diab.error.rf <- conf.rf[2,3]
overall.error.rf <- (conf.rf[2,1]+conf.rf[1,2])/nrow(test)
conf.rf
overall.error.rf
diab.error.rf
# 5) pick best by diab class error
if (diab.error.glm > diab.error.rf){
print ("RF won")
winmodel = model.rf
prtype = "prob"
winprederror = diab.error.rf
} else {
print ("GLM won")
winmodel = model.glm
prtype = "response"
winprederror = diab.error.glm
}
# 6) score our undiagnosed dataset and get predicted probabilities
probs2 <- ifelse(predict(winmodel,unsam,type=prtype)[,2]>= 0.50, TRUE, FALSE)
summary(probs2)
# 7) calculate proportion above .5 as likely undiagnosed.
pred_diab <- sum(probs2)/length(probs2)
print(paste("Predicted proportion having diabetes:",round(pred_diab,3)))
#adjust for prediction error of model
print(paste("Adjusted proportion for model error:",round(pred_diab*(1-winprederror),3)))
|
9dd953a65361d521911c0ba5d320bdb706dc8041 | c418c599316af9658a21e111cd3259335dd019c7 | /res/PHA-PLA.r | 76bbbdeda4eae46786a97a23efe9403d2e95a66c | [] | no_license | LucasRodolfo/MC861 | 534f0705544e70bd367a9324b5524363e516ee7d | dda343d2685f412e7db562a86d3cb8cabf8c17f9 | refs/heads/master | 2022-12-21T06:48:40.125911 | 2019-11-22T23:34:23 | 2019-11-22T23:34:23 | 201,146,981 | 0 | 0 | null | 2022-12-10T01:27:04 | 2019-08-08T00:20:48 | TypeScript | UTF-8 | R | false | false | 570 | r | PHA-PLA.r | | pc = 0xc001 | a = 0x05 | x = 0x00 | y = 0x00 | sp = 0x00fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc003 | a = 0x05 | x = 0x00 | y = 0x00 | sp = 0x00fc | p[NV-BDIZC] = 00110100 | mem[0x01fd] = 0x05 |
| pc = 0xc004 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x00fc | p[NV-BDIZC] = 00110110 |
| pc = 0xc006 | a = 0x05 | x = 0x00 | y = 0x00 | sp = 0x00fd | p[NV-BDIZC] = 00110100 | mem[0x01fd] = 0x05 |
| pc = 0xc007 | a = 0x05 | x = 0x00 | y = 0x00 | sp = 0x00fd | p[NV-BDIZC] = 00110111 |
| pc = 0xc009 | a = 0x05 | x = 0x00 | y = 0x00 | sp = 0x00fd | p[NV-BDIZC] = 00110111 |
|
08f66676e134b93c1f62fc76472fc91da2e2e16d | 08236e0b88200e44ad104cb02836237883ea4f6a | /rate_functions.R | e114a868ffe2d6593464338d28ab21d1c52e9985 | [] | no_license | inferentialist/hlc-returns | aa1219db18c0dee9b0c52613432d46bf1f2bf909 | fdc0bea9426844da2050e92b7837a327d5ed8454 | refs/heads/master | 2021-01-15T12:25:12.915505 | 2013-09-21T00:24:41 | 2013-09-21T00:24:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,464 | r | rate_functions.R |
quoted_rate_f = function(xdf)
{
rval = 1 + median(xdf$int_rate) / 100
return(rval)
}
ideal_static_rate_f = function(xdf)
{
quoted_rate = quoted_rate_f(xdf)
monthly.rate = (quoted_rate -1) / 12
## Assume funded_amnt is $1
coup = monthly.rate * (1+monthly.rate)^36 / ( (1+monthly.rate)^36 - 1)
rval = (coup * 36)^(1/3)
return(rval)
}
ideal_managed_rate_f = function(xdf)
{
quoted_rate = quoted_rate_f(xdf)
monthly.rate = (quoted_rate -1) / 12
## Assume funded_amnt is $1
coup = monthly.rate * (1+monthly.rate)^36 / ( (1+monthly.rate)^36 - 1)
## Reinvesting rate, no defaults
k = 36:0
Tk = coup / monthly.rate + (1+monthly.rate)^k*(1-coup/monthly.rate)
v = c(1, coup*(coup+1)^(0:35))
rval = (Tk %*% v)^(1/3)
return(rval)
}
hist_static_rate_f = function(xdf)
{
rval =mean(xdf$total_pymnt / xdf$funded_amnt)^(1/3)
return(rval)
}
my.lag = function(z, k = 1)
{
lz = length(z)
z = c(rep(NA,k), z)
z = z[1:lz]
return(z)
}
hist_managed_rate_f = function(xdf)
{
coup = xdf$installment / xdf$funded_amnt
monthly.rate = xdf$int_rate / 100 / 12
k = 0:36
prncp.remaining = coup / monthly.rate + exp(log(1+monthly.rate) %o% k)*(1-coup/monthly.rate)
sched.coupons = matrix(rep(coup, times=37), nr=nrow(prncp.remaining), nc=37)
couponpay.mask = matrix(1, nr=nrow(prncp.remaining), nc=length(k))
couponpay.mask[,1] = 0
prepay.mask = matrix(0, nr=nrow(prncp.remaining), nc=length(k))
## update masks to reflect the historical activity
for(i in which(xdf$time<36))
{
time = xdf$time[i]
status = xdf$status[i]
tidx = time + 1
if(status == TRUE)
{
## default at k[tidx], no payments from here on out
couponpay.mask[i,tidx:37] = 0
} else {
## prepay at k[tidx], i.e. lump sum, but no future coupon payments
couponpay.mask[i,(tidx+1):37] = 0
prepay.mask[i,tidx] = 1
}
}
## construct the payout matrix
cashflow.matrix = sched.coupons * couponpay.mask + prncp.remaining * prepay.mask
n.loans = nrow(xdf)
weights = rep(1/n.loans, n.loans)
## portfolio & reinvestment payouts at time k
payout.k = t(weights %*% cashflow.matrix)
payout.k[1,1] = NA
for(cc in 2:36)
{
cash.avail = apply(payout.k, 1, sum)
next.col = my.lag(payout.k[,1] * cash.avail[cc], k=(cc-1))
payout.k = cbind(payout.k, next.col)
}
## Multiply the residual portfolio value at time k by the weights of the
## cash reinvested at time k to get the total portfolio value
cash.reinvested.k = apply(payout.k, 1, sum, na.rm=TRUE)
cash.reinvested.k[1] = 1
# hist.prncp.remaining = rev(weights %*% (prncp.remaining * couponpay.mask))
hist.prncp.remaining = rev(apply(prncp.remaining * couponpay.mask,2, function(x) pmax(0,x) %*% weights))
hist.prncp.remaining[37] = 1
rval = (cash.reinvested.k %*% hist.prncp.remaining)^(1/3)
return(rval)
}
## super simple test case example
## z = data.frame(
## funded_amnt = 1,
## int_rate = 7.88,
## time = 36,
## status = FALSE,
## installment = 0.5733334,
## total_pymnt = 36*0.5733334,
## total_rec_prncp = 1
## )
## hist_managed_rate_f(z) == ideal_managed_rate_f(z) == 1.081709
|
7572c9869cf9b01b158c027cc66d86186b16285a | d4fa07a7d1e586502356dd611643e81e82d74c34 | /mapa2.R | bec09c656c4362d592763341c395f1ce718b3215 | [] | no_license | LucianoAndrian/CC | 855ffea1ee129bb08e452e677a0e668b3b24e4e8 | c82b38c755038392a66c5e24bcf620b48f9d961f | refs/heads/master | 2020-09-03T23:46:49.192192 | 2019-12-02T22:06:23 | 2019-12-02T22:06:23 | 219,604,685 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,512 | r | mapa2.R | mapa2 = function(lista, titulo1, nombre, label){
library(maps)
require(fields)
require(mapdata)
library(ggplot2)
library(metR)
library(RColorBrewer)
library(mapproj)
titulo = c("DJF", "MAM", "JJA", "SON")
for(i in 1:4){
value = array(lista[[i]]*mask, dim = 23*30)
data = matrix(data = NA, nrow=23*30, ncol = 3)
l=0
while(l<23*30){
data[seq(l:l+23),1]<-lon
l=l+23
}
for(j in 1:30){
lat_v = array(lat[j],dim=23)
data[(23*j-22):(j*23),2]<-lat_v
}
data[,3]<-value
error<-as.data.frame(data)
colnames(error)<-c("lon", "lat", "rx5")
error[which(error$lon>180),][,1]<-error[which(error$lon>180),][,1]-360
mapa <- map_data("world", regions = c("Brazil", "Uruguay", "Argentina", "French Guiana", "Suriname", "Colombia", "Venezuela",
"Bolivia", "Ecuador", "Chile", "Paraguay", "Peru", "Guyana", "Panama", "Costa Rica", "Nicaragua"),
colour = "black")
g <- ggplot() + theme_minimal()+
xlab("Longitud") + ylab("Latitud") +
theme(panel.border = element_blank(), panel.grid.major = element_line(colour = "grey"), panel.grid.minor = element_blank())+
#geom_tile(data=error,aes(x = lon, y= lat,fill = rx5),alpha=1, na.rm = T)
geom_contour_fill(data = error, aes(x = lon, y= lat, z = rx5), alpha = 1, na.fill= (-10000))+
scale_fill_gradientn(limits=c(-150,150),name=label,colours=(brewer.pal(n=11,"Spectral")),na.value = "white")+
geom_polygon(data=mapa, aes(x=long,y=lat, group =group),fill = NA, color = "black") +#coord_map("stereographic", orientation = c(-35, -56, 0))+
ggtitle(paste(titulo1, " - " , titulo[i], sep = ""))+
scale_x_continuous(limits = c(-90, -30))+
scale_y_continuous(limits = c(-60, 15)) +
theme(axis.text.y = element_text(size=14), axis.text.x = element_text(size=14), axis.title.y = element_text(size=14),
axis.title.x = element_text(size=14), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),
panel.border = element_rect(colour = "black", fill=NA, size=3),
panel.ontop = TRUE,
plot.title = element_text(hjust=0.5))
ggsave(paste("/home/auri/Facultad/Materias/Cambio_climatico/Tp_final/salidas/",nombre, "_", titulo[i], ".jpg",sep =""), plot = g, width = 15, height = 15 , units = "cm")
}
}
|
19c342ca6c758d3db0fe397d7c65c04eaeff11c6 | fc45ee77e310e641c1164db159865e5aea355560 | /man/InverseGWishartPriorFragment.Rd | d581f65545063fd33b89485ee8e7906473442983 | [] | no_license | jatotterdell/varapproxr | 2baee2d0c07a96ebae7ff499293903e9e0c91df8 | b2e9105894e1a9f2ac7366b84d9ed701e8a5cf21 | refs/heads/master | 2023-07-20T15:15:23.772555 | 2023-07-14T03:35:33 | 2023-07-14T03:35:33 | 172,455,526 | 1 | 6 | null | null | null | null | UTF-8 | R | false | true | 453 | rd | InverseGWishartPriorFragment.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{InverseGWishartPriorFragment}
\alias{InverseGWishartPriorFragment}
\title{Inverse G-Wishart prior fragment update}
\usage{
InverseGWishartPriorFragment(G, xi, Lambda)
}
\arguments{
\item{G}{The graph matrix}
\item{xi}{The prior shape}
\item{Lambda}{The prior symmetric positive definite matrix}
}
\description{
Inverse G-Wishart prior fragment update
}
|
8e06a78bbacfd05efcfb40a781b328dbfc4c9dbf | e02d7135fd71788ead79ec88ade5c582de4e4d4e | /man/bandeiras.Rd | 338aa9a54ce0f55b56583a8a09c2efedc319a4aa | [
"MIT"
] | permissive | curso-r/bandeiras | b20a398d7a84009c99a8adfbf93285ff907d5465 | 8d0c26c8313cefc4a592ebd9a26417c13f46e31d | refs/heads/master | 2022-12-31T12:59:43.151758 | 2020-10-14T22:23:24 | 2020-10-14T22:23:24 | 304,144,326 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 343 | rd | bandeiras.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-data.R
\docType{data}
\name{bandeiras}
\alias{bandeiras}
\title{bandeiras}
\format{
An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 195 rows and 3 columns.
}
\usage{
bandeiras
}
\description{
bandeiras
}
\keyword{datasets}
|
f4c659e93cf3b10510a91bab666006f3d791930c | 448a1ff0d4b3d7029b5df32c0a09f32a338c8b65 | /Lab Week 4-2 (Data Frames).R | eb98f3250fc0f43b5c6eefe1530995c7e431fc9a | [] | no_license | LouisJBooth/R | ccdca481d1e5ef78aa18e346046f42163b4ee982 | 6820bf082a3562a74a259ad7688771a2c68e1325 | refs/heads/master | 2020-04-17T15:50:57.087680 | 2019-01-20T23:24:59 | 2019-01-20T23:24:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 833 | r | Lab Week 4-2 (Data Frames).R | mtcars
head(mtcars)
tail(mtcars)
str(mtcars)
lm(mtcars$mpg ~ mtcars$cyl + mtcars$hp)
name <- c("Apple", "MS", "Google", "Honda", "GM", "Volks", "Hyundai", "Amazon")
type <- c("IT", "IT", "IT", "Auto", "Auto", "Auto", "Auto", "IT")
stock <- c(165.5, 55.48, 1119.20, 36.16, 41, 172.06, 162.5, 1429.95)
US <- c(TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, FALSE, TRUE)
portfolio <- data.frame(name, type, stock, US)
rm(name, type, stock, US)
str(portfolio)
portfolio[portfolio$name=="Google", c(FALSE, FALSE, TRUE, FALSE)]
portfolio[portfolio$name=="Google",]
portfolio[c(1:5), c(FALSE, FALSE, TRUE, FALSE)]
portfolio[portfolio$type=="IT",]
subset(portfolio, subset = stock < stock[name=="Apple"])
portfolio[portfolio$stock < portfolio$stock[portfolio$name == "Apple"],]
rank <- order(portfolio$stock, decreasing = TRUE)
portfolio[rank,]
|
1f0221717d54b13f7875616a755a9a99c0c84b52 | b6c843dba2c3c2f5aa4783b075ec5fb821a6b7d6 | /after_renewal/CREATE/CREATE_P50DATABASENAMES.R | 947d9d010c2bb0dbda2f894577928d80aabfde63 | [] | no_license | bonnfire/P50 | 010da47fe04e128d5d7643c788d9404284962e34 | 0489d770a7660b899d8aaf17c86b72e925233c16 | refs/heads/master | 2021-07-14T08:44:09.714395 | 2021-03-24T01:03:55 | 2021-03-24T01:03:55 | 243,115,463 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 688 | r | CREATE_P50DATABASENAMES.R | ## CREATE P50 DATABASE NAMES
shipments <- list("Meyer" = WFU_Meyer_excel_orig_test_df,
"Richards" = WFU_Jerry_excel_orig_test_df,
"Chen" = WFU_Chen_excel_orig_test_df)
shipments_p50_df <- shipments %>% lapply(., function(x){
x <- x %>% mutate_all(as.character)
x$cohort <- ifelse(grepl("#", x$cohort), stringr::str_match(x$cohort, "#(\\d+).*?")[,2], x$cohort)
x$cohort <- ifelse(nchar(x$cohort) > 1, x$cohort, gsub('([[:digit:]]{1})$', '0\\1', x$cohort)) # add leading zeroes when necessary
x$litternumber = as.numeric(x$litternumber)
x$littersize = as.numeric(x$littersize)
return(x)
}) %>% rbindlist(id = "p50", fill = T, use.names = T)
|
30efb54d179b56e286304cea98a0678973bb4c09 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/expm/examples/expm.Higham08.Rd.R | 6734286f39792531fac553749efea61a852e6e2e | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,022 | r | expm.Higham08.Rd.R | library(expm)
### Name: expm.Higham08
### Title: Matrix Exponential [Higham 2008]
### Aliases: expm.Higham08
### Keywords: algebra math
### ** Examples
## The *same* examples as in ../expm.Rd {FIXME} --
x <- matrix(c(-49, -64, 24, 31), 2, 2)
expm.Higham08(x)
## ----------------------------
## Test case 1 from Ward (1977)
## ----------------------------
test1 <- t(matrix(c(
4, 2, 0,
1, 4, 1,
1, 1, 4), 3, 3))
expm.Higham08(test1)
## [,1] [,2] [,3]
## [1,] 147.86662244637000 183.76513864636857 71.79703239999643
## [2,] 127.78108552318250 183.76513864636877 91.88256932318409
## [3,] 127.78108552318204 163.67960172318047 111.96810624637124
## -- these agree with ward (1977, p608)
## ----------------------------
## Test case 2 from Ward (1977)
## ----------------------------
test2 <- t(matrix(c(
29.87942128909879, .7815750847907159, -2.289519314033932,
.7815750847907159, 25.72656945571064, 8.680737820540137,
-2.289519314033932, 8.680737820540137, 34.39400925519054),
3, 3))
expm.Higham08(test2)
expm.Higham08(test2, balancing = FALSE)
## [,1] [,2] [,3]
##[1,] 5496313853692405 -18231880972009100 -30475770808580196
##[2,] -18231880972009160 60605228702221760 101291842930249376
##[3,] -30475770808580244 101291842930249200 169294411240850880
## -- in this case a very similar degree of accuracy.
## ----------------------------
## Test case 3 from Ward (1977)
## ----------------------------
test3 <- t(matrix(c(
-131, 19, 18,
-390, 56, 54,
-387, 57, 52), 3, 3))
expm.Higham08(test3)
expm.Higham08(test3, balancing = FALSE)
## [,1] [,2] [,3]
##[1,] -1.5096441587713636 0.36787943910439874 0.13533528117301735
##[2,] -5.6325707997970271 1.47151775847745725 0.40600584351567010
##[3,] -4.9349383260294299 1.10363831731417195 0.54134112675653534
## -- agrees to 10dp with Ward (1977), p608. ??? (FIXME)
## ----------------------------
## Test case 4 from Ward (1977)
## ----------------------------
test4 <-
structure(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 1e-10,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 0),
.Dim = c(10, 10))
E4 <- expm.Higham08(test4)
Matrix(zapsmall(E4))
S4 <- as(test4, "sparseMatrix") # some R based expm() methods work for sparse:
ES4 <- expm.Higham08(S4, bal=FALSE)
stopifnot(all.equal(E4, unname(as.matrix(ES4))))
## NOTE: Need much larger sparse matrices for sparse arith to be faster!
##
## example of computationally singular matrix
##
m <- matrix(c(0,1,0,0), 2,2)
eS <- expm.Higham08(m) # "works" (hmm ...)
|
b819617edd6f99911eddb09c149112ffb7e66978 | e731c856a93cf0b9f54bb3c44c81073aec82d5bb | /man/MFT.mean.Rd | e8963c8001352882cc7a3a27da7f315e7df4f5e4 | [] | no_license | cran/MFT | 75b22d5e26a552afcaa9b6c26859e2f5c38c4bf1 | db17d0591c9f00a9afe80da6936e894397593a91 | refs/heads/master | 2021-10-22T16:23:32.490875 | 2019-03-11T19:42:55 | 2019-03-11T19:42:55 | 103,674,967 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,281 | rd | MFT.mean.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MFT.mean.R
\name{MFT.mean}
\alias{MFT.mean}
\title{MFT.mean}
\usage{
MFT.mean(X, autoset.H = TRUE, S = NULL, E = NULL, H = NULL,
alpha = 0.05, method = "asymptotic", sim = 10000,
rescale = FALSE, Q = NA, perform.CPD = TRUE, print.output = TRUE)
}
\arguments{
\item{X}{numeric vector, input sequence of random variables}
\item{autoset.H}{logical, automatic choice of window size H}
\item{S}{numeric, start of time interval, default: NULL, if NULL then 1 is chosen}
\item{E}{numeric, end of time interval, default: NULL, if NULL then length(X) is chosen, needs E > S.}
\item{H}{vector, window set H, all elements must be increasing, the largest element must be =< (T/2). H is automatically set if autoset.H = TRUE}
\item{alpha}{numeric, in (0,1), significance level}
\item{method}{either "asymptotic" or "fixed", defines how threshold Q is derived, default: "asymptotic", If "asymptotic": Q is derived by simulation of limit process L (Brownian motion); possible set number of simulations (sim), If "fixed": Q may be set manually (Q)}
\item{sim}{integer, > 0, No of simulations of limit process (for approximation of Q), default = 10000}
\item{rescale}{logical, if TRUE statistic G is rescaled to statistic R, default = FALSE}
\item{Q}{numeric, rejection threshold, default: Q is simulated according to sim and alpha.}
\item{perform.CPD}{logical, if TRUE change point detection algorithm is performed}
\item{print.output}{logical, if TRUE results are printed to the console}
}
\value{
invisible
\item{M}{test statistic}
\item{Q}{rejection threshold}
\item{method}{how threshold Q was derived, see 'Arguments' for detailed description}
\item{sim}{number of simulations of the limit process (approximation of Q)}
\item{rescale}{states whether statistic G is rescaled to R}
\item{CP}{set of change points estmated by the multiple filter algorithm, increasingly ordered in time}
\item{means}{estimated mean values between adjacent change points}
\item{S}{start of time interval}
\item{E}{end of time interval}
\item{Tt}{length of time interval}
\item{H}{window set}
\item{alpha}{significance level}
\item{perform.CPD}{logical, if TRUE change point detection algorithm was performed}
\item{tech.var}{list of technical variables with processes X and G_ht or R_ht}
\item{type}{type of MFT which was performed: "mean"}
}
\description{
The multiple filter test for mean change detection in time series or sequences of random variables.
}
\examples{
# Normal distributed sequence with 3 change points of the mean (at n=100, 155, 350)
set.seed(50)
X1 <- rnorm(400,0,1); X2 <- rnorm(400,3,1); X3 <- rnorm(400,5,1); X4 <- rnorm(600,4.6,1)
X <- c(X1[1:100],X2[101:155],X3[156:350],X4[351:600])
mft <- MFT.mean(X)
plot(mft)
# Set additional parameters (window set)
mft2 <- MFT.mean(X,autoset.H=FALSE,H=c(80,160,240))
plot(mft2)
}
\references{
Michael Messer, Stefan Albert and Gaby Schneider (2018). The multiple filter test for change point detection in time
series. Metrika <doi:10.1007/s00184-018-0672-1>
}
\seealso{
\code{\link{plot.MFT}, \link{summary.MFT}, \link{MFT.rate}, \link{MFT.variance}, \link{MFT.peaks}}
}
\author{
Michael Messer, Stefan Albert, Solveig Plomer and Gaby Schneider
}
|
f7b2eb8c8dfc7498f380c2f2b4b4cda139329423 | db65898c2edba5bca72e85b7d0382ae03e19d244 | /R/timeEstimation.R | 05c4e2958882c70f9f0cdb62803f7b3c762909f9 | [] | no_license | sdcTools/sdcMicro | 9193dd10c9cec6a16d90b32d4f8a78c84283bbd3 | 74ba57c4b579d2953d6e7bfa36a6cd648e7fff10 | refs/heads/master | 2023-09-05T05:20:31.170506 | 2023-08-30T10:54:09 | 2023-08-30T10:54:09 | 12,051,341 | 61 | 32 | null | 2023-08-30T09:52:47 | 2013-08-12T08:30:12 | R | UTF-8 | R | false | false | 1,546 | r | timeEstimation.R | # TODO: Better estimate for the computation in a computer?? Author: Alexander Kowarik n:
# nrow dataset nkey: number of key variables nmean: mean number of categories
predictTime <- function(n, nkey, nmean) {
coef() * (5.297e-12 * n^2 + 1.178e-06 * n * nkey + -2.973e-07 * n * nmean) #FreqCalc
}
coefTime <- function() {
## very very very simple coefficent for computation time in comparison to my computer
t <- Sys.time()
a <- stats::rnorm(5e+05)
rm(a)
max(0.1, as.numeric(Sys.time() - t)/0.06)
}
# genDat <- function(n=10,nkey=2,nmean=4){ nmeans <-
# round(abs(rnorm(nkey-1,mean=nmean,sd=sqrt(nmean/4)))) nmeans <-
# c(nmeans,nmean*nkey-sum(nmeans)) cols <- list() for(i in 1:nkey){ cols[[i]] <-
# sample(1:nmeans[i],n,rep=TRUE) } d <- data.frame(do.call(cbind,cols)) colnames(d) <-
# paste('key',1:nkey,sep='') return(d) } Code to estimate the coefficents coef() setwd()
# require(sdcMicro) timeFreq <- function(n=10,nkey=2,nmean=4,REP=3){ dat <-
# genDat(n=n,nkey=nkey,nmean=nmean) t <- vector() for(i in 1:REP){ tt <- Sys.time() f <-
# freqCalc(dat,keyVars=1:nkey) t <- c(t,as.numeric(Sys.time()-tt)) } mean(t) }
# timeFreq(n=8e6,nkey=6,nmean=5,REP=3) predictTime(n=8e6,nkey=6,nmean=5) REP <- 5 ns <-
# c(1e2,1e3,5e3,1e4,3e4,1e5,4e5,1e6) nkeys <- c(3,5,7,10) nmeans <- c(2,4,6,20) simgrid <-
# expand.grid(n=ns,nkey=nkeys,nmean=nmeans) ergsim <-
# apply(simgrid,1,function(x)timeFreq(n=x[1],nkey=x[2],nmean=x[3],REP=REP)) ergsim1 <-
# cbind(ergsim,simgrid) mod <- lm(ergsim~0+I(n^2)+n:nkey+n:nmean,data=ergsim1) summary(mod)
|
0dba2691499e85549223660c3cf04ad85a6c5df5 | d11a33caffe439f15351d4d6059cd71a102177e5 | /man/diversity_through_time.Rd | 8c36a0c963a449f9f2718460d5a4d835da91c4c0 | [] | no_license | thauffe/simGDM | 0ec5cda909d39a6415bb9cd1dd24944be896f6e5 | e4a4c98548b261f7bba9dcfcc6c4c0d48e0a513d | refs/heads/master | 2020-07-27T12:48:58.927397 | 2020-02-13T15:11:33 | 2020-02-13T15:11:33 | 209,064,906 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 9,964 | rd | diversity_through_time.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diversity_through_time.R
\name{diversity_through_time}
\alias{diversity_through_time}
\title{Island diversity and events through time}
\usage{
diversity_through_time(Elevation, Topography, Area, Vs, S0, E0, Ms, Iso_t,
Imm, C0, Ana, Emi, Z, Ma, Msa, DivDep = TRUE, TargetEffect = FALSE,
EnvirFilt = FALSE)
}
\arguments{
\item{Elevation}{Vector of island elevation through time}
\item{Topography}{Vector of island topography through time}
\item{Area}{Vector of island area through time}
\item{Vs}{Probability of a vicariance event when topography equals 1}
\item{S0}{The per species probability of adaptive speciation at maximum empty niche space}
\item{E0}{The per-species probability of extinction when richness equals Kmax}
\item{Ms}{Species richness of the mainland source pool}
\item{Iso_t}{A descriptor of island isolation in arbitrary units}
\item{Imm}{The probability of one species arriving from the mainland per time step}
\item{C0}{The per-species probability of successful colonization at full empty niche space}
\item{Ana}{The per-species probability of anagenesis per time step}
\item{Emi}{Probability of recolonization of the source area per time step}
\item{Z}{The exponent of the species-area relationship between the island and the source area}
\item{Ma}{The realized size of the source area that provides new species}
\item{Msa}{Power exponent controlling the abundance distribution of the source}
\item{DivDep}{TRUE (default) Diversity dependence acting on immigration, extinction and in-situ speciation}
\item{TargetEffect}{FALSE (default) No effect of island area on the immigration probability}
\item{EnvirFilt}{FALSE (default) No effect of island elevation as proxy for habitat diversity on the immigration probability}
}
\value{
The output is a dataframe containing diversity and rates per time step.
\item{Richness}{ Native species richness}
\item{NonEndemics}{ Non-endemic species richness}
\item{Endemics}{ Endemic species richness}
\item{Kmax}{ Carrying capacity}
\item{EndemicsClado}{ Endemic species evolved via in-situ cladogenesis}
\item{EndemicsAna}{ Endemic species evolved via in-situ anagenesis}
\item{Immigrants}{ Species immigrating at that time step to the island}
\item{NewViaClado}{ Species evolved via in-situ cladogenesis at that time step}
\item{NewViaNonadaptClado}{ Species evolved via non-adaptive in-situ cladogenesis at that time step}
\item{NewViaAdaptClado}{ Species evolved via adaptive in-situ cladogenesis at that time step}
\item{NewViaAna}{ Species evolved via in-situ anagenesis at that time step}
\item{Extinctions}{ Species going extinct at that time step}
\item{Emigrants}{ Species emigrating from the island to the mainland}
}
\description{
This function calculates the the number of colonization,
speciation, extinction, and emmigration events through the trajectory
of island evolution. This results in the total diversity of endemics
and non-endemics at each moment in time.
}
\details{
Using the parameters in Table 1 of Borregard et al. (2016) with the
\code{\link{Island}} dataset results in slightly different diversity trajectories
than Borregard's Figure 4a. The example below recreates this figure as closely as possible,
but uses a different size of the source area (Ma) and probability of anagenesis (Ana).
Reasons for the differences could be the manually digitalized properties of the
island itself because Figure S3 (Borregard et al. 2016) containes no y-axis scale.
Moreover, the original R script of Borregard et al. (2016) hard-codes many parameters.
Rates can be plotted in units of events per time (as in Borregard et al. 2016)
or in units of events per lineage per time (as in many phylogenetic studies).
This largely removes the hump-shaped extinction trajectory.
}
\examples{
# Reproduce Figure 4a (Borregaard et al., 2016)
data(Island)
Dtt <- diversity_through_time(Elevation = Island$Elevation,
Topography = Island$Topography,
Area = Island$Area,
Vs = 0.0005,
S0 = 0.0025,
E0 = 0.001,
Ms = 500,
Iso_t = 0.3,
Imm = 0.002,
C0 = 0.5,
Ana = 0.0003,
Emi = 0.0002,
Z = 0.25,
Ma = 200,
Msa = 0.3,
DivDep = TRUE,
TargetEffect = FALSE,
EnvirFilt = FALSE)
ColRich <- rgb(170, 99, 42, maxColorValue = 255)
ColNonEnd <- rgb(249, 195, 91, maxColorValue = 255)
ColEnd <- rgb(191, 11, 189, maxColorValue = 255)
ColEx <- rgb(211, 0, 0, maxColorValue = 255)
ColAna <- rgb(255, 144, 235, maxColorValue = 255)
ColClado <- rgb(64, 197, 253, maxColorValue = 255)
# Diversities
par(las = 1, mar = c(4, 6, 0.1, 0.5))
plot(1:nrow(Dtt), Dtt[, "Kmax"], type = "l", col = "black",
xlim = c(-1, 5000), ylim = c(0, 160),
xaxs = "i", yaxs = "i",
xaxt = "n", xlab = "Time (Ma)",
ylab = "Species")
axis(side = 1, at = c(0, 1000, 2000, 3000, 4000, 5000), labels = c(5, 4, 3, 2, 1, 0))
lines(1:nrow(Dtt), Dtt[, "Richness"], type = "l", col = ColRich)
lines(1:nrow(Dtt), Dtt[, "NonEndemics"], type = "l", col = ColNonEnd)
lines(1:nrow(Dtt), Dtt[, "Endemics"], type = "l", col = ColEnd)
legend("topright",
legend = c("Carrying capacity", "Total species richness", "Non-endemics", "Endemics"),
col = c("black", ColRich, ColNonEnd, ColEnd), lty = 1, bty = "n", cex = 0.7)
# Rates
plot(1:nrow(Dtt), Dtt[, "Immigrants"], type = "l", col = ColNonEnd,
xlim = c(-1, 5000), ylim = c(0, 0.15),
xaxs = "i", yaxs = "i",
xaxt = "n", xlab = "Time (Ma)",
ylab = expression(atop(paste("Rate"), "(events"\%.\%"time step"^-1*")")))
axis(side = 1, at = c(0, 1000, 2000, 3000, 4000, 5000), labels = c(5, 4, 3, 2, 1, 0))
lines(1:nrow(Dtt), Dtt[, "Extinctions"], col = ColEx)
lines(1:nrow(Dtt), Dtt[, "NewViaAna"], col = ColAna)
lines(1:nrow(Dtt), Dtt[, "NewViaClado"], type = "l", col = ColClado)
legend("topright",
legend = c("Colonization", "Extinction", "Anagenesis", "Cladogenesis"),
col = c(ColNonEnd, ColEx, ColAna, ColClado), lty = 1, bty = "n", cex = 0.7)
# Divide by island richness and multiply by 1000
# to obtain rates in units of events per island species per 1 million years
plot(1:nrow(Dtt), 1000 * Dtt[, "Immigrants"] / Dtt[, "Richness"], type = "l", col = ColNonEnd,
xlim = c(-1, 5000), ylim = c(0, 1.3),
xaxs = "i", yaxs = "i",
xaxt = "n", xlab = "Time (Ma)",
ylab = expression(atop(paste("Rate"), "(events"\%.\%"species"^-1\%.\%"my"^-1*")")))
axis(side = 1, at = c(0, 1000, 2000, 3000, 4000, 5000), labels = c(5, 4, 3, 2, 1, 0))
lines(1:nrow(Dtt), 1000 * Dtt[, "Extinctions"] / Dtt[, "Richness"], col = ColEx)
lines(1:nrow(Dtt), 1000 * Dtt[, "NewViaAna"] / Dtt[, "Richness"], col = ColAna)
lines(1:nrow(Dtt), 1000 * Dtt[, "NewViaClado"] / Dtt[, "Richness"], type = "l", col = ColClado)
legend("topright",
legend = c("Colonization", "Extinction", "Anagenesis", "Cladogenesis"),
col = c(ColNonEnd, ColEx, ColAna, ColClado), lty = 1, bty = "n", cex = 0.7)
# Figure 4 of Hauffe et al.
TimeSteps <- 4000
X <- 1:TimeSteps
Island <- 1/( 1+exp(-(0.0001 + 0.004*X[1:3000])) )
Island <- Island - min(Island)
Island <- Island / max(Island)
Island <- Island / 2
Island <- c(Island, Island[length(Island)] + Island[1:1000])
Clado <- 0.00007
DttTar <- diversity_through_time(Elevation = Island,
Topography = Island,
Area = Island,
Vs = 0.9 * Clado,
S0 = 0.1 * Clado,
E0 = 0.00095,
Ms = 300,
Iso_t = 0.3,
Imm = 0.00019,
C0 = 1,
Ana = 0.00034,
Emi = 0,
Z = 0.25,
Ma = 200,
Msa = 0.3,
DivDep = FALSE,
TargetEffect = TRUE,
EnvirFilt = FALSE)
# Plot island ontogeny
plot(X, Island, type = "l",
ylim = c(0, 1), xlim = c(-1, max(X)),
xaxs = "i", yaxs = "i",
xaxt = "n", xlab = "Time (Ma)",
ylab = "Elevation Area Topography (\%)")
axis(side = 1, at = c(0, 1000, 2000, 3000, 4000), labels = c(4, 3, 2, 1, 0))
# Diversities
plot(1:nrow(DttTar), DttTar[, "Richness"], type = "l", col = ColRich,
xlim = c(-1, max(X)), ylim = c(0, 80),
xaxs = "i", yaxs = "i",
xaxt = "n", xlab = "Time (Ma)",
ylab = "Species")
axis(side = 1, at = c(0, 1000, 2000, 3000, 4000), labels = c(4, 3, 2, 1, 0))
lines(1:nrow(DttTar), DttTar[, "NonEndemics"], type = "l", col = ColNonEnd)
lines(1:nrow(DttTar), DttTar[, "EndemicsAna"], type = "l", col = ColAna)
lines(1:nrow(DttTar), DttTar[, "EndemicsClado"], type = "l", col = ColClado)
legend("topleft",
legend = c("Total species richness", "Non-endemics",
"Endemics evolved via cladogenesis",
"Endemics via anagenesis"),
col = c(ColRich, ColNonEnd, ColAna, ColClado), lty = 1, bty = "n", cex = 0.7)
}
\references{
Borregaard, M. K., T. J. Matthews and R. J. Whittaker (2016).
The general dynamic model: towards a unified theory of island biogeography?
Global Ecology and Biogeography, 25(7), 805-816.
Hauffe, T., D. Delicado, R.S. Etienne and L. Valente (submitted).
Lake expansion increases equilibrium diversity via the target effect of island biogeography
}
\author{
Torsten Hauffe
}
|
ab9e7b2a824b1a8da8df059ec4b040bda2493593 | e1dbffbeefe6a0e81f2bfcf57e166d282e8daf72 | /plot3.R | 02b143fa34692f7f3216339a2b783a3636d82425 | [] | no_license | JasonSmiegel/ExData_Plotting1 | e9a18269bf30b676bb8c0aa071ad752b1c56ba13 | 69c9d20d910717d1f39ba35d396bfa1ab429c3ba | refs/heads/master | 2021-01-17T12:34:34.259345 | 2016-05-29T18:38:43 | 2016-05-29T18:38:43 | 59,947,253 | 0 | 0 | null | 2016-05-29T14:28:42 | 2016-05-29T14:28:42 | null | UTF-8 | R | false | false | 422 | r | plot3.R | #plot the multiplt line graph with legend
png("plot3.png")
with(electricitydata,plot(datetime,submetering1,type="l",xlab=" ",ylab="Energy sub metering"))
with(electricitydata,points(datetime,submetering2,type="l",col="red"))
with(electricitydata,points(datetime,submetering3,type="l",col="blue"))
legend("topright",col=c("black","red","blue"),lty=1, legend=c("sub_metering_1","sub_metering_2","sub_metering_3"))
dev.off()
|
429dbec2e7f42d57b279d360c2dbeed98e155d74 | 0e96c2f393292398e6b74a1f26a27d7a549fb84b | /R_Geocoding_Census.R | f6475ac4e34e23032eb8504836be112005f134fa | [] | no_license | lewissearsiv/Metis_Proj1_MTA | 3ed7215afa7050eac3f9fa25af734494b8fdf504 | fb744b26ab9b637e7fe5e927261003abedcb8aba | refs/heads/master | 2022-12-21T22:27:21.602059 | 2020-09-24T23:58:33 | 2020-09-24T23:58:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,837 | r | R_Geocoding_Census.R | library(dplyr)
## read in trunstile data for getting ids etc
turns <- read.csv('http://web.mta.info/developers/data/nyct/turnstile/turnstile_200919.txt')
#Base url for google maps api
base <- "https://maps.googleapis.com/maps/api/geocode/json?address="
# Census api key
cen_key <- '97d55623ee041618526d685d56a340986817ea2c'
# Filter down data to more unique stations
turns_filt <- turns %>%
group_by(STATION,DIVISION) %>%
summarize(all = sum(ENTRIES))
#add empty lat lon columns to fill
turns_filt$LAT <- NA
turns_filt$LON <- NA
# iterate through stations and query the api
for (i in 1:nrow(turns_filt)){
tryCatch({
# create an object to store the geo info
geo <- fromJSON(URLencode(paste(base,paste(turns_filt$STATION[i],"Station",turns_filt$DIVISION[i],'Line New York'),"&key=",key)))
turns_filt$LON[i] <- geo$results$geometry$location$lng[1]
turns_filt$LAT[i] <- geo$results$geometry$location$lat[1]
turns_filt$geo_type[i] <- geo$results$types[[1]]
message(i)
}, error=function(e){})
}
# create empty columns for census geography ids, these are known as fips and are how you identify the location for
# pulling in data
turns_filt$State_FIPS <- NA
turns_filt$County_FIPS <- NA
turns_filt$Tract_FIPS <- NA
# run through each row and query the census api to get fips codes based on latitude and longitute coordinates
for (k in which(is.na(turns_filt$State_FIPS))){
geo <- fromJSON(paste0('https://geocoding.geo.census.gov/geocoder/geographies/coordinates?x=',turns_filt$LON[k],'&y=',turns_filt$LAT[k],'&benchmark=4&vintage=4'))
turns_filt$State_FIPS[k] <- geo$result$geographies$`Census Tracts`$STATE
turns_filt$County_FIPS[k] <- geo$result$geographies$`Census Tracts`$COUNTY
turns_filt$Tract_FIPS[k] <- geo$result$geographies$`Census Tracts`$TRACT
message(k)
}
# Then we pull in the data for all tracts in all counties in new york as one query
cen_dat <- fromJSON(paste0('https://api.census.gov/data/2018/acs/acs5/subject?get=NAME,S0101_C05_001E,S0101_C01_001E,S2001_C02_011E,S2001_C02_012E,S2001_C01_002E,S2403_C01_001E,S2403_C01_012E,S2403_C01_017E&for=tract:*&in=state:36&in=county:*&key=',cen_key))
colnames(cen_dat) <- cen_dat[1,]
cen_dat <- cen_dat[-1,]
colnames(cen_dat)[2:9] <- c('Female_Population','Total_Population','P_75_100k','P_over_100k','median_income','tot_emp','emp_info','emp_prof')
##notes
# populations above the age of 16
# Estimate!!Total!!Civilian employed population 16 years and over!!Professional, scientific, and management, and administrative and waste management services!!Professional, scientific, and technical services
#
cen_dat <- as.data.frame(cen_dat)
cen_dat[,2:9] <- sapply(cen_dat[,2:9],as.numeric)
cen_dat$p_f_pop <- cen_dat$Female_Population/cen_dat$Total_Population
cen_dat$p_emp_info <- cen_dat$emp_info/cen_dat$tot_emp
cen_dat$p_emp_prof <- cen_dat$emp_prof/cen_dat$tot_emp
turns_filt$p_f_pop <- cen_dat$p_f_pop[match(paste0(turns_filt$State_FIPS,turns_filt$County_FIPS,turns_filt$Tract_FIPS),paste0(cen_dat$state,cen_dat$county,cen_dat$tract))]
turns_filt$p_emp_info <- cen_dat$p_emp_info[match(paste0(turns_filt$State_FIPS,turns_filt$County_FIPS,turns_filt$Tract_FIPS),paste0(cen_dat$state,cen_dat$county,cen_dat$tract))]
turns_filt$p_emp_prof <- cen_dat$p_emp_prof[match(paste0(turns_filt$State_FIPS,turns_filt$County_FIPS,turns_filt$Tract_FIPS),paste0(cen_dat$state,cen_dat$county,cen_dat$tract))]
turns_filt$p_75_100k <- cen_dat$P_75_100k[match(paste0(turns_filt$State_FIPS,turns_filt$County_FIPS,turns_filt$Tract_FIPS),paste0(cen_dat$state,cen_dat$county,cen_dat$tract))]/100
turns_filt$p_over_100k <- cen_dat$P_over_100k[match(paste0(turns_filt$State_FIPS,turns_filt$County_FIPS,turns_filt$Tract_FIPS),paste0(cen_dat$state,cen_dat$county,cen_dat$tract))]/100
write.csv(turns_filt, 'geocoded_cen_dat.csv', row.names = F)
|
3c889af7dfbca95329d2c63d4abe6a9724d3aa3b | ebad760bbaab9b3c0f06ffe10b3c2c305f5d8e46 | /Resample.R | 5a56b4db304ece2211b6be80004e084b5db26979 | [] | no_license | JuanMatiasBraccini/Git_MACN | 76a9bbe5b1d96a82ff55c2ee10585563a6ba56a2 | 0921271ff4a03eecac85893d2eab522c4ea444ab | refs/heads/master | 2020-06-03T19:51:06.090931 | 2019-06-13T07:08:08 | 2019-06-13T07:08:08 | 191,709,816 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,128 | r | Resample.R |
#Age VS Length
DATA=read.csv("C:/Matias/MACN/Data.csv") #bring in data
#check data
plot(DATA$Age,DATA$DW)
#estimate parameters
vbTypical <- DW~Linf*(1-exp(-K*(Age-to))) #growth function
svTypical.VB = c(Linf=176,K=0.08,to=-1.8) #initial parameter values
fit.vonb <- nls(vbTypical,data=DATA,start=svTypical.VB)
Coef.fit=coef(fit.vonb)
Coefs.fit.and.SE=summary(fit.vonb)$parameters[,1:2]
Model.AIC=AIC(fit.vonb) #extract model AIC if comparing to other models (e.g. Gompertz)
#Predict model
NEW.data=data.frame(Age=1:25) #new data, in this case a vector of ages
Preds=predict(fit.vonb,newdata=NEW.data)
plot(DATA$Age,DATA$DW,ylab="Disc width",xlab="Age")
lines(NEW.data$Age,Preds,col=2)
#Maturity VS Length
Dat=read.csv("C:/Matias/MACN/Maturity_whisk_Simpfendorferetal1998.csv")
mod <- nls(Prop.Mat~1/(1+exp(-log(19)*(FL-p50)/(p95-p50))), start=c(p50=115, p95=120), data=Dat)
fit=summary(mod)$parameters[,1:2]
fn.logis=function(dat,p50,p95) 1/(1+exp(-log(19)*(dat-p50)/(p95-p50)))
plot(Dat$FL,Dat$Prop.Mat,pch=19)
SEQ=80:140
lines(SEQ,fn.logis(SEQ,fit[1,1],fit[2,1]),col=2)
|
ea0f7ccc7ff657da702e2169c063521abe4d68b5 | 90d47e5658ea7e803d7156dc2601446c456bb91b | /R/4-get_ngrams.R | bdb2a6b8a2642afeffc4011cc83c14b22ab33cb3 | [] | no_license | Auburngrads/text.analysis | a0a5d4d559f442ad2015b66fd72fe02c489c81a0 | 8f6ec3d7a51c4697f39c7cb716a13a7b3f3340b1 | refs/heads/master | 2020-03-23T11:18:31.106745 | 2018-07-18T23:44:02 | 2018-07-18T23:44:02 | 141,496,174 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 435 | r | 4-get_ngrams.R | #' Create data.frame of n-grams
#'
#' @param x A character string of text
#' @param n Number of n-grams to get
#' @param ... Additional arguments for \code{tau::textcnt}
#'
#' @importFrom tau textcnt
#'
#' @export
get_ngrams <- function(x, n,...) {
df <- data.frame(count = unclass(tau::textcnt(x, method = "string", n = n,...)))
df$text <- rownames(df)
rownames(df) <- NULL
return(df[order(df[,1], decreasing = T),])
}
|
76aa3e0cfa6eb76e2d28215446b35bfbe01d485c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/SoilR/examples/126.Rd.R | a1c2c86a21c0c9468db039c1eecd8e317e6866b5 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 587 | r | 126.Rd.R | library(SoilR)
### Name: fW.Candy
### Title: Effects of moisture on decomposition rates according to the
### Candy model
### Aliases: fW.Candy
### ** Examples
th=seq(0,1,0.01)
xi1=fW.Candy(theta=th,PV=0.4)
xi2=fW.Candy(theta=th,PV=0.6)
xi3=fW.Candy(theta=th,PV=0.8)
plot(th,xi1,type="l",main="Effects of soil water content and pore volume on decomposition rates",
xlab="Volumetric soil water content (cm3 cm-3)",ylab=expression(xi))
lines(th,xi2,col=2)
lines(th,xi3,col=3)
legend("bottomright",c("Pore volume = 0.4","Pore volume = 0.6", "Pore volume = 0.8"),lty=1,col=1:3)
|
5f0c55403e384861a9544438e0ee8a03db58113a | ec33ad5603ca1069b1a057d1a283022e64d72637 | /bioifx/cluster/heatmap/scripts/heatmapSF.R | 8bd9ad7c49773b52df0dea50e719a60af7c38ff2 | [] | no_license | vangalamaheshh/dockerfiles | 4800ed98c48cdbb1704872a987747cd9250b9ff7 | 2fa6bac77bd5efbfcdb335a706102b3bf47d1ea7 | refs/heads/master | 2020-12-25T13:45:18.291900 | 2019-08-05T20:48:10 | 2019-08-05T20:48:10 | 63,626,190 | 0 | 3 | null | null | null | null | UTF-8 | R | false | false | 5,747 | r | heatmapSF.R | ## Load required packages
suppressMessages(library("gplots"))
suppressWarnings(suppressMessages(library("ComplexHeatmap")))
suppressMessages(library("circlize"))
suppressMessages(library("viridis"))
suppressMessages(library("dplyr"))
suppressMessages(source('/usr/local/bin/scripts/supp_fns.R'))
## Enable stack trace
#options(error = function() traceback(2))
heatmapSF_plot <- function(rpkmTable,annot, num_kmeans_clust, sf_out_dir) {
## Read in and Log Transform Data
Exp_data = log2(rpkmTable+1)
## Make SF (sample-feature) heatmap
Exp_data <- apply(Exp_data,1,function(x) zscore(x))
## Set breaks for data for color scale
ma_nolym <- max(Exp_data)
mi_nolym <- min(Exp_data)
my.breaks_nolym<-c(-3,seq(-2.5,2.5,length.out=99),3)
## Data needs to be transposed for heatmap
Exp_data = t(as.matrix(Exp_data))
## Make annotation bars
ha1 <- make_complexHeatmap_annotation(annot)
## Calc. spearman correlation and use values for column clustering
cordata <- cor(Exp_data, method="spearman")
coldistance = dist(t(as.matrix(cordata)), method = "euclidean")
colcluster = hclust(coldistance, method = "ward.D2")
## Turn on rownames if less than 100 genes
row_name_param = FALSE
if (nrow(Exp_data) <= 100) {row_name_param = TRUE}
## Determine type of plot, and plot
if (is.numeric(num_kmeans_clust) == TRUE) {kmparam = num_kmeans_clust}
if (is.character(num_kmeans_clust) == TRUE) {kmparam = as.numeric(unlist(strsplit(num_kmeans_clust,",")))}
pdf(file = paste(sf_out_dir, "/heatmapSF.pdf", sep=""), width=11,height=8.5)
png_count = 0
for (i in 1:length(kmparam)) {
## If kmparam is 0, use hierarichical
if (kmparam[i] == 0) {
rowdistance = dist(Exp_data, method = "euclidean")
rowcluster = hclust(rowdistance, method = "ward.D2")
rowclusterparam = rowcluster
hmdata = Exp_data
column_title_param = "Sample-Feature Hierarchical Clustering"
}
## If kmparam is not 0, use kmeans
if (kmparam[i] != 0) {
#if (kmparam[i] == 1) {kmparam[i] = 2}
km1 = kmeans(Exp_data, centers=kmparam[i])
kmclust = km1$cluster
kmclustsort = sort(kmclust)
ind = match(names(kmclustsort), rownames(Exp_data))
hmdata = Exp_data[ind,]
rowclusterparam = FALSE
column_title_param = paste("Sample-Feature Kmeans ", kmparam[i], " Clustering", sep="")
}
mapplot <- Heatmap(hmdata,
col = colorRamp2(my.breaks_nolym, bluered(101), transparency = 0),
#heatmap_legend_param = list(title = "exp. level"),
column_title = column_title_param,
show_row_names = row_name_param, show_column_names = TRUE,
#row_names_max_width = unit(3, "mm"),
row_names_gp = gpar(fontsize = 6),
column_names_gp = gpar(fontsize = 8),
cluster_rows = rowclusterparam,
cluster_columns = colcluster,
show_heatmap_legend = FALSE,
#row_dend_width = unit(5, "mm"),
#width=unit(60,"cm"),
top_annotation=ha1,
)
## First drawing into png
png_count = png_count+1
png(file=paste(sf_out_dir, "/heatmapSF_",png_count,".png",sep=""), width = 8, height = 8, unit="in",res=300)
draw(mapplot)
for(an in colnames(annot[1:ncol(annot)])) {
decorate_annotation(an,
{grid.text(an, unit(1, "npc") + unit(2, "mm"), 0.5, default.units = "npc", just = "left", gp=gpar(fontsize=6), check=TRUE)
grid.text(an, unit(0, "npc") - unit(2, "mm"), 0.5, default.units = "npc", just = "right", gp=gpar(fontsize=6), check=TRUE)
})
}
junk <- dev.off()
## Repeated to get into the pdf
draw(mapplot)
for(an in colnames(annot[1:ncol(annot)])) {
decorate_annotation(an,
{grid.text(an, unit(1, "npc") + unit(2, "mm"), 0.5, default.units = "npc", just = "left", gp=gpar(fontsize=6), check=TRUE)
grid.text(an, unit(0, "npc") - unit(2, "mm"), 0.5, default.units = "npc", just = "right", gp=gpar(fontsize=6), check=TRUE)
})
}
if (i == 1) {
if (kmparam[1] == 0) {
output<-Exp_data
output<-output[unlist(row_order(mapplot)), unlist(column_order(mapplot))]
write.table(output, file=paste(sf_out_dir, "/heatmapSF.txt",sep=""), quote=F, col.names = NA, sep="\t")
}
if (kmparam[1] != 0) {
output = cbind(hmdata,kmclustsort)
output = output[,unlist(column_order(mapplot))]
write.table(output, file=paste(sf_out_dir, "/heatmapSF.txt",sep=""), quote=F, col.names = NA, sep="\t")
}
}
}
junk <- dev.off()
}
## Read in arguments
args <- commandArgs( trailingOnly = TRUE )
rpkmFile=args[1]
annotFile=args[2]
num_kmeans_clust=args[3]
sf_out_dir=args[4]
rpkmTable <- read.csv(rpkmFile, header=T, check.names=F, row.names=1, stringsAsFactors=FALSE, dec='.')
annot <- read.csv(annotFile, sep=",", header=T, row.names=1, stringsAsFactors=FALSE, check.names=F, comment.char='#')
if(any(grepl("comp_*", colnames(annot)))) {
annot <- annot[, !grepl('Pair', colnames(annot)), drop = F]
annot <- annot[, !grepl('comp_*', colnames(annot)), drop = F]
}
## Run the function
heatmapSF_plot(rpkmTable,annot, num_kmeans_clust, sf_out_dir)
|
344c6b1fa25695b2bda88230355e5fc3b7358d1c | 1e079ff677884cc8684f897addc6f4f2d7c648eb | /AoC_day05.R | 926c4a2802781c500b002b49b494361791f71451 | [] | no_license | jmoggridge/AdventofCode2020 | 1f3f341300b10e8fa5816773125b5171d8f19e27 | b0074445147150369d7f4682afd8ea3400d871e0 | refs/heads/master | 2023-02-24T15:21:07.548635 | 2021-02-05T07:05:06 | 2021-02-05T07:05:06 | 326,839,226 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,195 | r | AoC_day05.R | # --- Day 5: Binary Boarding ---
# You board your plane only to discover a new problem: you dropped your boarding pass! You aren't sure which seat is yours, and all of the flight attendants are busy with the flood of people that suddenly made it through passport control.
#
# You write a quick program to use your phone's camera to scan all of the nearby boarding passes (your puzzle input); perhaps you can find your seat through process of elimination.
#
# Instead of zones or groups, this airline uses binary space partitioning to seat people. A seat might be specified like FBFBBFFRLR, where F means "front", B means "back", L means "left", and R means "right".
#
# The first 7 characters will either be F or B; these specify exactly one of the 128 rows on the plane (numbered 0 through 127). Each letter tells you which half of a region the given seat is in. Start with the whole list of rows; the first letter indicates whether the seat is in the front (0 through 63) or the back (64 through 127). The next letter indicates which half of that region the seat is in, and so on until you're left with exactly one row.
#
# For example, consider just the first seven characters of FBFBBFFRLR:
#
# Start by considering the whole range, rows 0 through 127.
# F means to take the lower half, keeping rows 0 through 63.
# B means to take the upper half, keeping rows 32 through 63.
# F means to take the lower half, keeping rows 32 through 47.
# B means to take the upper half, keeping rows 40 through 47.
# B keeps rows 44 through 47.
# F keeps rows 44 through 45.
# The final F keeps the lower of the two, row 44.
# The last three characters will be either L or R; these specify exactly one of the 8 columns of seats on the plane (numbered 0 through 7). The same process as above proceeds again, this time with only three steps. L means to keep the lower half, while R means to keep the upper half.
#
# For example, consider just the last 3 characters of FBFBBFFRLR:
#
# Start by considering the whole range, columns 0 through 7.
# R means to take the upper half, keeping columns 4 through 7.
# L means to take the lower half, keeping columns 4 through 5.
# The final R keeps the upper of the two, column 5.
# So, decoding FBFBBFFRLR reveals that it is the seat at row 44, column 5.
#
# Every seat also has a unique seat ID: multiply the row by 8, then add the column. In this example, the seat has ID 44 * 8 + 5 = 357.
#
# Here are some other boarding passes:
#
# BFFFBBFRRR: row 70, column 7, seat ID 567.
# FFFBBBFRRR: row 14, column 7, seat ID 119.
# BBFFBBFRLL: row 102, column 4, seat ID 820.
# As a sanity check, look through your list of boarding passes. What is the highest seat ID on a boarding pass?
#
boarding_passes <- read_lines('input5.txt')
passes <- data.frame(
rw = str_sub(boarding_passes, end = 7),
clm = str_sub(boarding_passes, start = 8)
) %>%
mutate(
# convert to binary, then to integer for row and col
rw = str_replace_all(rw, 'B', '1'),
rw = str_replace_all(rw, 'F', '0'),
rw = strtoi(rw, base = 2),
clm = str_replace_all(clm, 'R', '1'),
clm = str_replace_all(clm, 'L', '0'),
clm = strtoi(clm, base = 2),
# compute seat number
seatnum = rw * 8 + clm
)
passes
# max seat number is solution
max(passes$seatnum)
#
# --- Part Two ---
# Ding! The "fasten seat belt" signs have turned on. Time to find your seat.
#
# It's a completely full flight, so your seat should be the only missing boarding pass in your list. However, there's a catch: some of the seats at the very front and back of the plane don't exist on this aircraft, so they'll be missing from your list as well.
#
# Your seat wasn't at the very front or back, though; the seats with IDs +1 and -1 from yours will be in your list.
#
# What is the ID of your seat?
#
passes %>%
# sort by seatnumber
arrange(seatnum) %>%
# find distance to nearest neighbor, identify gap
mutate(
closest1 = lag(seatnum),
closest2 = lead(seatnum),
span = seatnum - closest1 + closest2 - seatnum
) %>%
# find the two seats adjacent to the gap and take their mean
filter(!span==2) %>%
summarise(myseat = mean(seatnum)) %>%
print
|
30b1fc666c488873aadbfeca5a86f52c57d675a0 | d7bc977caf5805571319eb7cff1863a0e936b9b9 | /man/estimate_diameter_range.Rd | 51817822690fe1aff4904e90218d2057b4d2585f | [] | no_license | dami82/cellTracker | 5a57540eecde76c4939f94f3ea1c7956b5757b95 | e06c96c0444e1659b1a862c31fae14c0cc95930b | refs/heads/master | 2020-09-28T14:02:17.651596 | 2020-02-19T05:51:37 | 2020-02-19T05:51:37 | 226,792,756 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,434 | rd | estimate_diameter_range.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cellTracker_core.R
\name{estimate_diameter_range}
\alias{estimate_diameter_range}
\title{Detect Paricle Diameters in a Numeric matrix}
\usage{
estimate_diameter_range(x, px.margin = 2, quantile.val = 0.99,
plot = TRUE)
}
\arguments{
\item{x}{numeric matrix corresponding to a digital image}
\item{px.margin}{integer, number of pixels used as margin while searching/filtering for neighboring particles}
\item{quantile.val}{numeric, must be bigger than 0 and smaller than 1.
Quantile for discriminating signal and background; only pixels with intensity higher than the corresponding
quantile will count as signal while estimating particle diameters}
\item{plot}{logial, shall a histogram of the distribution of diameters be shown}
}
\value{
list including summary stats and data about the particles found in the image
}
\description{
Estimates the diameters of particles in a numeric matrix
}
\examples{
a <- cbind(c(1, 1, 1, 0, 0, 0, 0, 0, 1, 1),
c(1, 1, 0, 0, 0, 0, 0, 0, 1, 1),
c(1, 0, 0, 0, 0, 0, 0, 0, 0, 0),
c(0, 0, 0, 0, 1, 1, 0, 0, 0, 0),
c(0, 0, 0, 1, 1, 1, 0, 0, 0, 0))
graphics::image(a)
b <- estimate_diameter_range(a)
print(b$estim.cell.num)
print(b$raw)
}
\references{
\url{https://www.data-pulse.com/dev_site/celltracker/}
}
\author{
Damiano Fantini, \email{damiano.fantini@gmail.com}
}
|
987c59c8ec3fe21a8b46089dee4121b937d2c84d | 7cf5076071b09252510b2cf3579512becdae07e6 | /quiz.R | 6c44cef20e0acbb6f93d83c0bb4731a1975b89cb | [] | no_license | H3runug/Rherunug | 9a67ef245cf56145f4d1082b900d0eeee5605911 | 17412a452757fdacc6aba4702ccfd0bec4060613 | refs/heads/master | 2020-04-24T10:12:33.204611 | 2019-02-21T14:35:40 | 2019-02-21T14:35:40 | 171,885,643 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 439 | r | quiz.R | # nama document : basic R.R
# penulis : Heru Nugroho
# Deskripsi : lat dasar2 R
v_hobi_saya = c("sepak bola", "menulis","otomotif")
v_hobi_saya
matrix_ganjil = matrix(c(1,3,5,7,9,11), byrow = T,nrow = 2)
matrix_ganjil
df_harga_makanan = data.frame(
makanan = c("Pizza","Bakso","Roti","Mie Instan"),
harganya = c(100000,25000,10000,3000))
df_harga_makanan
list_saya= list(v_hobi_saya,matrix_ganjil,df_harga_makanan)
list_saya
|
b7fad7bf751195ea1b3b83000930f98d67389b90 | f8c8e0449ad002439578548d1c9b28b9e724e3d9 | /man/Seed.Rd | 7bf8111b76c0977712b0d34b9497812c89ab904d | [] | no_license | cran/MLGdata | b2ddf49578ec1602477b5c0be2521da62cd29079 | 93613b4f0ccaa3f3f0f3c38ad602e9388e7d48ea | refs/heads/master | 2022-12-19T06:04:50.351737 | 2020-09-30T07:50:12 | 2020-09-30T07:50:12 | 300,196,166 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 670 | rd | Seed.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{Seed}
\alias{Seed}
\title{Seed germination}
\format{
A data frame with 20 observations on the following 2 variables
\describe{
\item{\code{fert}}{level of fertilizer used}
\item{\code{x}}{indicator of germination of the seed(\code{1}, yes; \code{0}, no)}
}
}
\source{
Salvan, A., Sartori, N., Pace, L. (2020). \emph{Modelli lineari generalizzati}. Milano: Springer-Verlag.
}
\usage{
Seed
}
\description{
This is an artificial dataset representing an experiment relating probability of
germination of seeds to the level of fertilizer used.
}
\keyword{datasets}
|
64ad969648b1fecf18a81fc233021fb2f7d05bfd | 13dc4bce18367002b60fa2063f59660eac2c99b4 | /이상치분석/Func.trans/실습.R | f78d718b963650c1184ae84bf17020857446d9a5 | [] | no_license | bohyunshin/ML | 7c28472b0de702852c174a9afd9bddd7a7ef2604 | e667aee161b63eeb8571222a6286b984e8afea0f | refs/heads/master | 2021-06-21T03:17:10.525171 | 2021-01-20T08:07:25 | 2021-01-20T08:07:25 | 179,795,328 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 895 | r | 실습.R | library(HighDimOut)
data(TestData)
mydata = TestData
# ordinary ABOD
abod.result = Func.ABOD(mydata[,-3],basic=T,perc=1)
mydata[order(abod.result,decreasing=F),] # abod는 값이 작을 수록 outlier라고 판단.
# Fast ABOD
# Feature Bagging
fbod.result = Func.FBOD(mydata[,-3],iter=10,k.nn=5)
mydata[order(fbod.result,decreasing=T),] # fbod는 값이 클 수록 outlier라고 판단.
# Func.SOD
sod.result = Func.SOD(mydata[,-3],k.nn=10,k.sel=5,alpha=0.8)
mydata[order(sod.result,decreasing=T),] # sod는 값이 클 수록 outlier라고 판단.
# Transform outlier scores into range 0 and 1
trans.abod = Func.trans(abod.result,method="ABOD")
trans.fbod = Func.trans(fbod.result,method="FBOD")
trans.sod = Func.trans(sod.result,method="SOD")
trans.merge = trans.abod + trans.fbod + trans.sod
mydata[order(trans.merge,decreasing=T),]
|
fa6ab7afd8edd86fa1bc2868d721d805b488503f | b64f494df1015a60619f8cddc70465f742652525 | /prediction_lasso.R | 0028f402fa84cc0f41fe5f9c5155d2885c01589a | [] | no_license | hnyang1993/UmbrellaAcademy | 5b604e9caa0d998f580760fb3970138348cec22f | a287c4df9fd342fe93b2204645d693e66bc7cbb2 | refs/heads/master | 2020-05-02T15:31:57.398931 | 2019-04-26T01:32:42 | 2019-04-26T01:32:42 | 178,043,804 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 447 | r | prediction_lasso.R | setwd("~/Desktop/BIOS 735/final_project")
load("prenobns.RData")
it_test = itoken(test_use$comment_text,
preprocessor = prep_fun,
tokenizer = tok_fun,
ids = test_use$id,
progressbar = TRUE)
raw.test.dtm = create_dtm(it_test, vectorizer)
pred <- predict(fit,raw.test.dtm, type = "class")
error <- sum((as.vector(test_labels_use$toxic) - as.numeric(as.character(pred)))^2)
|
f68fdcd1e6ba194a50a60eb18097db714d5469aa | 7085dc3eae63ee59981214bc426b91584b81f6e9 | /2017_Yosemite_ script.R | 8baaace213917075d140c4e177c0f19608501555 | [] | no_license | shumengjun/2017_Carpentry_workshop | 2397d5e23b80f61ca673d9d2d289e3566d1d3b5e | 75c9fff7949ca2e54f5c543dca5c2ab2a8100d15 | refs/heads/master | 2021-01-22T17:38:36.451457 | 2017-08-18T23:07:13 | 2017-08-18T23:07:13 | 100,729,653 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,164 | r | 2017_Yosemite_ script.R | ## Mengjun Shu
## 2017 Yosemite workshop
##08/18/2017
## download.file("https://ndownloader.figshare.com/files/2292169",
## "data/portal_data_joined.csv")
surveys <- read.csv("data/portal_data_joined.csv")
## Explore data
head(surveys)
tail(surveys)
tail(surveys, 12)
str(surveys)
summary(surveys)
dim(surveys)
nrow(surveys)
ncol(surveys)
names(surveys)
head(surveys$weight)
tail(surveys$weight)
str(surveys$weight)
summary(surveys$weight)
plot(surveys$year, surveys$weight)
plot(surveys$hindfoot_length, surveys$weight)
summary(surveys$month)
hist(surveys$month)
summary(surveys$taxa)
levels(surveys$taxa)
nlevels(surveys$taxa)
hist(survey)
class(surveys$taxa)
table(surveys$taxa)
## subset in base R ----
## [rows, colums]
##return all the columns
surveys[surveys$genus == 'Ammodramus', ]
##return part the columns
surveys[surveys$genus == 'Ammodramus', c('record_id', 'month', 'weight')]
#Different ways to figure out how many month is January or February
nrow(surveys[surveys$month < 3, ])
surveys[surveys$month == 1, ]
table(surveys$month < 3)
month_new <- surveys[surveys$month < 3, ]
length(which(surveys$month < 3))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.