content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
## ---- message = FALSE, warning = FALSE----------------------------------------
library(ape)
library(adephylo)
library(phylobase)
library(phylosignal)
data(carni19)
## -----------------------------------------------------------------------------
tre <- read.tree(text = carni19$tre)
## -----------------------------------------------------------------------------
dat <- data.frame(carni19$bm)
dat$random <- rnorm(dim(dat)[1], sd = 10)
dat$bm <- rTraitCont(tre)
## -----------------------------------------------------------------------------
p4d <- phylo4d(tre, dat)
## ----fig.width=8, fig.height=5------------------------------------------------
barplot(p4d)
dotplot(p4d)
gridplot(p4d)
## ----fig.width=8, fig.height=5------------------------------------------------
dotplot(p4d, tree.type = "cladogram")
## ----fig.width=6, fig.height=6------------------------------------------------
gridplot(p4d, tree.type = "fan", tip.cex = 0.6, show.trait = FALSE)
## ----fig.width=8, fig.height=5------------------------------------------------
barplot(p4d, tree.ratio = 0.5)
## ----fig.width=8, fig.height=5------------------------------------------------
barplot(p4d, trait = c("bm", "carni19.bm"))
## ----fig.width=8, fig.height=5------------------------------------------------
mat.e <- matrix(abs(rnorm(19 * 3, 0, 0.5)), ncol = 3,
dimnames = list(tipLabels(p4d), names(tdata(p4d))))
barplot(p4d, error.bar.sup = mat.e, error.bar.inf = mat.e)
## ----fig.width=8, fig.height=6------------------------------------------------
barplot(p4d, tree.type = "fan", tip.cex = 0.6, tree.open.angle = 160, trait.cex = 0.6)
## ----fig.width=8, fig.height=5------------------------------------------------
barplot(p4d, bar.col = rainbow(19))
## ----fig.width=8, fig.height=5------------------------------------------------
mat.col <- ifelse(tdata(p4d, "tip") < 0, "red", "grey35")
barplot(p4d, center = FALSE, bar.col = mat.col)
## ----fig.width=8, fig.height=5------------------------------------------------
barplot(p4d, trait.bg.col = c("#F6CED8", "#CED8F6", "#CEF6CE"), bar.col = "grey35")
## ----fig.width=5, fig.height=6------------------------------------------------
gridplot(p4d, tree.type = "fan", tree.ratio = 0.5,
show.trait = FALSE, show.tip = FALSE,
cell.col = terrain.colors(100))
## ----fig.width=8, fig.height=5------------------------------------------------
tip.col <- rep(1, nTips(p4d))
tip.col[(mat.col[, 2] == "red") | (mat.col[, 3] == "red")] <- 2
barplot(p4d, center = FALSE, trait.bg.col = c("#F6CED8", "#CED8F6", "#CEF6CE"),
bar.col = mat.col, tip.col = tip.col, trait.font = c(1, 2, 2))
## ----fig.width=8, fig.height=5------------------------------------------------
barplot(p4d)
focusTree()
add.scale.bar()
## ----fig.width=8, fig.height=5------------------------------------------------
barplot(p4d)
focusTraits(2)
abline(v = 1, col = 2)
## ----fig.width=8, fig.height=5------------------------------------------------
barplot(p4d)
focusTips()
rect(xleft = 0, ybottom = 0.5,
xright = 0.95, ytop = 3.5,
col = "#FF000020", border = NA)
|
/phylosignal/inst/doc/Demo_plots.R
|
no_license
|
akhikolla/Rcpp-TestPkgs-WebPages
|
R
| false
| false
| 3,119
|
r
|
## ---- message = FALSE, warning = FALSE----------------------------------------
library(ape)
library(adephylo)
library(phylobase)
library(phylosignal)
data(carni19)
## -----------------------------------------------------------------------------
tre <- read.tree(text = carni19$tre)
## -----------------------------------------------------------------------------
dat <- data.frame(carni19$bm)
dat$random <- rnorm(dim(dat)[1], sd = 10)
dat$bm <- rTraitCont(tre)
## -----------------------------------------------------------------------------
p4d <- phylo4d(tre, dat)
## ----fig.width=8, fig.height=5------------------------------------------------
barplot(p4d)
dotplot(p4d)
gridplot(p4d)
## ----fig.width=8, fig.height=5------------------------------------------------
dotplot(p4d, tree.type = "cladogram")
## ----fig.width=6, fig.height=6------------------------------------------------
gridplot(p4d, tree.type = "fan", tip.cex = 0.6, show.trait = FALSE)
## ----fig.width=8, fig.height=5------------------------------------------------
barplot(p4d, tree.ratio = 0.5)
## ----fig.width=8, fig.height=5------------------------------------------------
barplot(p4d, trait = c("bm", "carni19.bm"))
## ----fig.width=8, fig.height=5------------------------------------------------
mat.e <- matrix(abs(rnorm(19 * 3, 0, 0.5)), ncol = 3,
dimnames = list(tipLabels(p4d), names(tdata(p4d))))
barplot(p4d, error.bar.sup = mat.e, error.bar.inf = mat.e)
## ----fig.width=8, fig.height=6------------------------------------------------
barplot(p4d, tree.type = "fan", tip.cex = 0.6, tree.open.angle = 160, trait.cex = 0.6)
## ----fig.width=8, fig.height=5------------------------------------------------
barplot(p4d, bar.col = rainbow(19))
## ----fig.width=8, fig.height=5------------------------------------------------
mat.col <- ifelse(tdata(p4d, "tip") < 0, "red", "grey35")
barplot(p4d, center = FALSE, bar.col = mat.col)
## ----fig.width=8, fig.height=5------------------------------------------------
barplot(p4d, trait.bg.col = c("#F6CED8", "#CED8F6", "#CEF6CE"), bar.col = "grey35")
## ----fig.width=5, fig.height=6------------------------------------------------
gridplot(p4d, tree.type = "fan", tree.ratio = 0.5,
show.trait = FALSE, show.tip = FALSE,
cell.col = terrain.colors(100))
## ----fig.width=8, fig.height=5------------------------------------------------
tip.col <- rep(1, nTips(p4d))
tip.col[(mat.col[, 2] == "red") | (mat.col[, 3] == "red")] <- 2
barplot(p4d, center = FALSE, trait.bg.col = c("#F6CED8", "#CED8F6", "#CEF6CE"),
bar.col = mat.col, tip.col = tip.col, trait.font = c(1, 2, 2))
## ----fig.width=8, fig.height=5------------------------------------------------
barplot(p4d)
focusTree()
add.scale.bar()
## ----fig.width=8, fig.height=5------------------------------------------------
barplot(p4d)
focusTraits(2)
abline(v = 1, col = 2)
## ----fig.width=8, fig.height=5------------------------------------------------
barplot(p4d)
focusTips()
rect(xleft = 0, ybottom = 0.5,
xright = 0.95, ytop = 3.5,
col = "#FF000020", border = NA)
|
testlist <- list(b = numeric(0), p1 = c(8.5728629954997e-312, 1.48571064469896e-72, 8.50408730081934e+144, 2.12530129086192e-42, 9.30116759388657e+225, 3.19860070393215e+129, -1.22227646714106e-150, -2.48280557433659e+258, -9.13799141996196e-296, -1.88918554334287e+52, -4.11215093765371e-273, -6.93132091139805e-107, 2.79475968079386e-261, -3.76478564026971e+233, 1.03429569512332e-98, -8.18790785258901e-12, -3.80269803056297e+245, -6.75805164691332e-243, -2.08465040737023e+101, -7.36599172844076e+192, -1.10525061476907e-126), p2 = c(3.2667689008931e+187, -2.80363318787251e-287, 3.49300992181426e-315, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(metacoder:::intersect_line_rectangle,testlist)
str(result)
|
/metacoder/inst/testfiles/intersect_line_rectangle/AFL_intersect_line_rectangle/intersect_line_rectangle_valgrind_files/1615768435-test.R
|
permissive
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 887
|
r
|
testlist <- list(b = numeric(0), p1 = c(8.5728629954997e-312, 1.48571064469896e-72, 8.50408730081934e+144, 2.12530129086192e-42, 9.30116759388657e+225, 3.19860070393215e+129, -1.22227646714106e-150, -2.48280557433659e+258, -9.13799141996196e-296, -1.88918554334287e+52, -4.11215093765371e-273, -6.93132091139805e-107, 2.79475968079386e-261, -3.76478564026971e+233, 1.03429569512332e-98, -8.18790785258901e-12, -3.80269803056297e+245, -6.75805164691332e-243, -2.08465040737023e+101, -7.36599172844076e+192, -1.10525061476907e-126), p2 = c(3.2667689008931e+187, -2.80363318787251e-287, 3.49300992181426e-315, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(metacoder:::intersect_line_rectangle,testlist)
str(result)
|
electrical_data <- read.csv("household_power_consumption.txt",sep=';',stringsAsFactors=FALSE)
print(head(electrical_data))
electrical_data$date<-as.Date(electrical_data$Date,format="%d/%m/%Y")
data<-electrical_data[electrical_data$Date %in% c("1/2/2007","2/2/2007"),]
data$Global_active_power<-as.numeric(data$Global_active_power)
date_plus_time <- strptime(paste(data$Date,data$Time,sep=" "),"%d/%m/%Y %H:%M:%S")
data$date_plus_time<-as.POSIXct(date_plus_time)
data$Sub_metering_1 <- as.numeric(data$Sub_metering_1)
data$Sub_metering_2 <- as.numeric(data$Sub_metering_2)
data$Sub_metering_3 <- as.numeric(data$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(data$Sub_metering_1~data$date_plus_time,type="l", ylab="Energy sub metering",xlab="")
lines(data$Sub_metering_2~data$date_plus_time,col='Red')
lines(data$Sub_metering_3~data$date_plus_time,col='Blue')
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),col=c("black", "red", "blue"),lty=1,lwd=2)
dev.off()
|
/plot3.R
|
no_license
|
uit12345/ExData_Plotting1
|
R
| false
| false
| 1,011
|
r
|
electrical_data <- read.csv("household_power_consumption.txt",sep=';',stringsAsFactors=FALSE)
print(head(electrical_data))
electrical_data$date<-as.Date(electrical_data$Date,format="%d/%m/%Y")
data<-electrical_data[electrical_data$Date %in% c("1/2/2007","2/2/2007"),]
data$Global_active_power<-as.numeric(data$Global_active_power)
date_plus_time <- strptime(paste(data$Date,data$Time,sep=" "),"%d/%m/%Y %H:%M:%S")
data$date_plus_time<-as.POSIXct(date_plus_time)
data$Sub_metering_1 <- as.numeric(data$Sub_metering_1)
data$Sub_metering_2 <- as.numeric(data$Sub_metering_2)
data$Sub_metering_3 <- as.numeric(data$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(data$Sub_metering_1~data$date_plus_time,type="l", ylab="Energy sub metering",xlab="")
lines(data$Sub_metering_2~data$date_plus_time,col='Red')
lines(data$Sub_metering_3~data$date_plus_time,col='Blue')
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),col=c("black", "red", "blue"),lty=1,lwd=2)
dev.off()
|
\name{vect2rast.SpatialPoints}
\alias{vect2rast.SpatialPoints}
\title{Converts points to rasters}
\description{Converts object of class \code{"SpatialPoints*"} to a raster map, and (optional) writes it to an external file (GDAL-supported formats; it used the SAGA GIS format by default).}
\usage{
vect2rast.SpatialPoints(obj, fname = names(obj)[1], cell.size, bbox,
file.name, silent = FALSE, method = c("raster", "SAGA")[1], FIELD = 0,
MULTIPLE = 1, LINE_TYPE = 0, GRID_TYPE = 2, \dots )
}
\arguments{
\item{obj}{\code{"SpatialPoints*"} object}
\item{fname}{target variable name in the \code{"data"} slot}
\item{cell.size}{(optional) grid cell size in the output raster map}
\item{bbox}{(optional) output bounding box (class \code{"bbox"}) for cropping the data}
\item{file.name}{(optional) file name to export the resulting raster map}
\item{silent}{logical; specifies whether to print any output of processing}
\item{method}{character; specifies the gridding method}
\item{FIELD}{character; SAGA GIS argument attribute table field number}
\item{MULTIPLE}{character; SAGA GIS argument method for multiple values --- [0] first, [1] last, [2] minimum, [3] maximum, [4] mean}
\item{LINE_TYPE}{character; SAGA GIS argument method for rasterization --- [0] thin, [1] thick}
\item{GRID_TYPE}{character; SAGA GIS argument for coding type --- [0] integer (1 byte), [1] integer (2 byte), [2] integer (4 byte), [3] floating point (4 byte), [4] floating point (8 byte)}
\item{\dots}{additional arguments that can be passed to the \code{raster::rasterize} command}
}
\value{Returns an object of type \code{"SpatialGridDataFrame"}.}
\author{Tomislav Hengl }
\seealso{\code{\link{vect2rast}}}
\examples{
\dontrun{
library(sp)
data(meuse)
coordinates(meuse) <- ~x+y
# point map:
x <- vect2rast(meuse, fname = "om")
data(SAGA_pal)
sp.p <- list("sp.points", meuse, pch="+", cex=1.5, col="black")
spplot(x, col.regions=SAGA_pal[[1]], sp.layout=sp.p)
}
}
\keyword{spatial}
|
/plotKML/man/vect2rast.SpatialPoints.Rd
|
no_license
|
albrizre/spatstat.revdep
|
R
| false
| false
| 1,998
|
rd
|
\name{vect2rast.SpatialPoints}
\alias{vect2rast.SpatialPoints}
\title{Converts points to rasters}
\description{Converts object of class \code{"SpatialPoints*"} to a raster map, and (optional) writes it to an external file (GDAL-supported formats; it used the SAGA GIS format by default).}
\usage{
vect2rast.SpatialPoints(obj, fname = names(obj)[1], cell.size, bbox,
file.name, silent = FALSE, method = c("raster", "SAGA")[1], FIELD = 0,
MULTIPLE = 1, LINE_TYPE = 0, GRID_TYPE = 2, \dots )
}
\arguments{
\item{obj}{\code{"SpatialPoints*"} object}
\item{fname}{target variable name in the \code{"data"} slot}
\item{cell.size}{(optional) grid cell size in the output raster map}
\item{bbox}{(optional) output bounding box (class \code{"bbox"}) for cropping the data}
\item{file.name}{(optional) file name to export the resulting raster map}
\item{silent}{logical; specifies whether to print any output of processing}
\item{method}{character; specifies the gridding method}
\item{FIELD}{character; SAGA GIS argument attribute table field number}
\item{MULTIPLE}{character; SAGA GIS argument method for multiple values --- [0] first, [1] last, [2] minimum, [3] maximum, [4] mean}
\item{LINE_TYPE}{character; SAGA GIS argument method for rasterization --- [0] thin, [1] thick}
\item{GRID_TYPE}{character; SAGA GIS argument for coding type --- [0] integer (1 byte), [1] integer (2 byte), [2] integer (4 byte), [3] floating point (4 byte), [4] floating point (8 byte)}
\item{\dots}{additional arguments that can be passed to the \code{raster::rasterize} command}
}
\value{Returns an object of type \code{"SpatialGridDataFrame"}.}
\author{Tomislav Hengl }
\seealso{\code{\link{vect2rast}}}
\examples{
\dontrun{
library(sp)
data(meuse)
coordinates(meuse) <- ~x+y
# point map:
x <- vect2rast(meuse, fname = "om")
data(SAGA_pal)
sp.p <- list("sp.points", meuse, pch="+", cex=1.5, col="black")
spplot(x, col.regions=SAGA_pal[[1]], sp.layout=sp.p)
}
}
\keyword{spatial}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mlPipelineAndAlgorithmFunctions.R
\name{rfAlgorithm}
\alias{rfAlgorithm}
\title{RF algorithm function}
\usage{
rfAlgorithm(Training_set, Prediction_set, Ntree, Mtry)
}
\arguments{
\item{Training_set}{data frame to train the network}
\item{Prediction_set}{also a dataframe to test the model}
\item{Ntree, Mtry}{are numerical values, to fit the RF model}
}
\value{
ref resultant dataframe of RF algorithm
}
\description{
RF algorithm function
}
|
/man/rfAlgorithm.Rd
|
no_license
|
Kanjali/stencilRfunctions
|
R
| false
| true
| 523
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mlPipelineAndAlgorithmFunctions.R
\name{rfAlgorithm}
\alias{rfAlgorithm}
\title{RF algorithm function}
\usage{
rfAlgorithm(Training_set, Prediction_set, Ntree, Mtry)
}
\arguments{
\item{Training_set}{data frame to train the network}
\item{Prediction_set}{also a dataframe to test the model}
\item{Ntree, Mtry}{are numerical values, to fit the RF model}
}
\value{
ref resultant dataframe of RF algorithm
}
\description{
RF algorithm function
}
|
#print("hello from 2")
args <- commandArgs(trailingOnly = TRUE)
#print(args)
#print(arg1)
#print(args[3])
#print(length(args))
#df_scr2 = read.table(args[3], header=TRUE)
#print(df1)
suma <- function(){
algo <- 1
sum(algo + algo)
}
|
/test-r/test-launch-scripts/scr2.R
|
no_license
|
sergiocy/scripts-tests
|
R
| false
| false
| 247
|
r
|
#print("hello from 2")
args <- commandArgs(trailingOnly = TRUE)
#print(args)
#print(arg1)
#print(args[3])
#print(length(args))
#df_scr2 = read.table(args[3], header=TRUE)
#print(df1)
suma <- function(){
algo <- 1
sum(algo + algo)
}
|
## R NOTES ##
# Pound sign in front depicts notes. (Will de-activate a line of code)
# %>% essentially means "then" - continuing a function to next operation.
# df = dataframe - refers to your dataset that you are using, assuming it is named df
# load packages
library(tidyverse)
library(readxl)
library(lubridate)
library(stringr)
# Saving a file
write.csv(chinook_mcr, file = './data/chinook_mcr.csv')
# to check what packages are installed
installed.packages()
# ?function will bring up the help window for that function (bottom right window)
?summarise
# See All column names.
names(data)
# See all column types.
glimpse(tempdata)
# rm(x) will remove an object you have created.
rm(values)
# gather function switches from wide format to a long format
?gather #this gets a help window for this function
disp_purp <- gather(data, key= metric, value= x)
# spread is the opposite - switches from a long format to wide
# This will select fields from Facility to Species, Release_stream to disp_purp
select(Facility:Species, Release_stream:disp_purp) %>%
# this selects all but "Action" field, and renames "location" to "Release_location"
select(-Action, Release_location = location)
which(is.na(df$field)) # this will tell you which ROWS are 'NA' for the specified field.
which(is.null(df$field)) # this will tell you which rows are NULL for the specified field.
|
/R/ts_rnotes.R
|
no_license
|
tylerstright/fins
|
R
| false
| false
| 1,409
|
r
|
## R NOTES ##
# Pound sign in front depicts notes. (Will de-activate a line of code)
# %>% essentially means "then" - continuing a function to next operation.
# df = dataframe - refers to your dataset that you are using, assuming it is named df
# load packages
library(tidyverse)
library(readxl)
library(lubridate)
library(stringr)
# Saving a file
write.csv(chinook_mcr, file = './data/chinook_mcr.csv')
# to check what packages are installed
installed.packages()
# ?function will bring up the help window for that function (bottom right window)
?summarise
# See All column names.
names(data)
# See all column types.
glimpse(tempdata)
# rm(x) will remove an object you have created.
rm(values)
# gather function switches from wide format to a long format
?gather #this gets a help window for this function
disp_purp <- gather(data, key= metric, value= x)
# spread is the opposite - switches from a long format to wide
# This will select fields from Facility to Species, Release_stream to disp_purp
select(Facility:Species, Release_stream:disp_purp) %>%
# this selects all but "Action" field, and renames "location" to "Release_location"
select(-Action, Release_location = location)
which(is.na(df$field)) # this will tell you which ROWS are 'NA' for the specified field.
which(is.null(df$field)) # this will tell you which rows are NULL for the specified field.
|
library(tidyverse)
load("data/wybory.rda")
# wybory_prez <- read_csv("data/wybory.csv")
summary(wybory)
# braki danych
sd(wybory$percent_glosow_waznych, na.rm = TRUE)
wybory_braki <- wybory %>%
filter(!is.na(percent_glosow_waznych))
wybory_complete <- wybory %>%
select(-w_tym_z_powodu_postawienia_znaku_x_wylacznie_obok_skreslonego_nazwiska_kandydata) %>%
filter(complete.cases(.))
summary(wybory_complete)
# filtrowanie według kilku warunków
miasto80 <- wybory %>%
filter(typ_obszaru == "miasto", frekwencja > 80)
miasto80 <- wybory %>%
filter(typ_obszaru == "miasto" & frekwencja > 80)
miasto80 <- wybory %>%
filter(typ_obszaru == "miasto") %>%
filter(frekwencja > 80)
miasto80 <- wybory %>%
filter(typ_obszaru == "miasto" | frekwencja > 80)
# tylko te trzy miasta
miasta3 <- wybory %>%
filter(powiat %in% c("Poznań", "Kraków", "Wrocław"))
# wszystko oprócz tych trzech miast
miasta3 <- wybory %>%
filter(!powiat %in% c("Poznań", "Kraków", "Wrocław"))
# trzy warunki
wybory %>%
filter(wojewodztwo == "dolnośląskie",
frekwencja > 80,
typ_gminy == "gmina wiejska")
# wybieranie kolumn - select
wybrane_kolumny <- wybory %>%
# select(kod_teryt, frekwencja)
# select(symbol_kontrolny:numer_obwodu)
select(1:5)
# tworzenie nowej kolumny - mutate
wybory <- wybory %>%
mutate(suma_glosow=percent_glosow_niewaznych+percent_glosow_waznych) %>%
mutate(roznica=abs(rafal_kazimierz_trzaskowski-andrzej_sebastian_duda))
# podsumowania - summarise
wybory %>%
summarise(srednie_poparcie_rt=mean(rafal_kazimierz_trzaskowski, na.rm=T),
srednie_poparcie_ad=mean(andrzej_sebastian_duda, na.rm=T))
wybory %>%
summarise(mean(frekwencja), median(frekwencja), sd(frekwencja))
frekwencja_woj <- wybory %>%
group_by(wojewodztwo) %>%
summarise(srednia=mean(frekwencja))
frekwencja_woj_pow <- wybory %>%
group_by(wojewodztwo, powiat) %>%
summarise(srednia=mean(frekwencja))
# stworzenie cechy jakościowej z ilościowej
glosy_frekwencja <- wybory %>%
mutate(frekwencja_przedzialy=cut(x = frekwencja,
breaks = c(0,25,50,75,100),
include.lowest=TRUE)) %>%
group_by(frekwencja_przedzialy) %>%
summarise(srednia=mean(rafal_kazimierz_trzaskowski, na.rm=TRUE),
liczebnosc=n())
# zliczanie - count
wybory %>%
count(typ_gminy)
wybory %>%
count(typ_obszaru)
obszar_woj <- wybory %>%
count(wojewodztwo, typ_obszaru)
obszar_woj <- wybory %>%
group_by(wojewodztwo, typ_obszaru) %>%
summarise(n=n())
obszar_woj <- wybory %>%
count(wojewodztwo, typ_obszaru) %>%
count(wojewodztwo) %>%
filter(n != 3)
gm_frek <- wybory %>%
group_by(typ_gminy) %>%
summarise(liczebnosc=n(),
sr=mean(frekwencja))
|
/podstawy_programowania/2021_2022/ppr20220122_przetwarzanie2.R
|
no_license
|
lwawrowski/cdv_bigdata
|
R
| false
| false
| 2,830
|
r
|
library(tidyverse)
load("data/wybory.rda")
# wybory_prez <- read_csv("data/wybory.csv")
summary(wybory)
# braki danych
sd(wybory$percent_glosow_waznych, na.rm = TRUE)
wybory_braki <- wybory %>%
filter(!is.na(percent_glosow_waznych))
wybory_complete <- wybory %>%
select(-w_tym_z_powodu_postawienia_znaku_x_wylacznie_obok_skreslonego_nazwiska_kandydata) %>%
filter(complete.cases(.))
summary(wybory_complete)
# filtrowanie według kilku warunków
miasto80 <- wybory %>%
filter(typ_obszaru == "miasto", frekwencja > 80)
miasto80 <- wybory %>%
filter(typ_obszaru == "miasto" & frekwencja > 80)
miasto80 <- wybory %>%
filter(typ_obszaru == "miasto") %>%
filter(frekwencja > 80)
miasto80 <- wybory %>%
filter(typ_obszaru == "miasto" | frekwencja > 80)
# tylko te trzy miasta
miasta3 <- wybory %>%
filter(powiat %in% c("Poznań", "Kraków", "Wrocław"))
# wszystko oprócz tych trzech miast
miasta3 <- wybory %>%
filter(!powiat %in% c("Poznań", "Kraków", "Wrocław"))
# trzy warunki
wybory %>%
filter(wojewodztwo == "dolnośląskie",
frekwencja > 80,
typ_gminy == "gmina wiejska")
# wybieranie kolumn - select
wybrane_kolumny <- wybory %>%
# select(kod_teryt, frekwencja)
# select(symbol_kontrolny:numer_obwodu)
select(1:5)
# tworzenie nowej kolumny - mutate
wybory <- wybory %>%
mutate(suma_glosow=percent_glosow_niewaznych+percent_glosow_waznych) %>%
mutate(roznica=abs(rafal_kazimierz_trzaskowski-andrzej_sebastian_duda))
# podsumowania - summarise
wybory %>%
summarise(srednie_poparcie_rt=mean(rafal_kazimierz_trzaskowski, na.rm=T),
srednie_poparcie_ad=mean(andrzej_sebastian_duda, na.rm=T))
wybory %>%
summarise(mean(frekwencja), median(frekwencja), sd(frekwencja))
frekwencja_woj <- wybory %>%
group_by(wojewodztwo) %>%
summarise(srednia=mean(frekwencja))
frekwencja_woj_pow <- wybory %>%
group_by(wojewodztwo, powiat) %>%
summarise(srednia=mean(frekwencja))
# stworzenie cechy jakościowej z ilościowej
glosy_frekwencja <- wybory %>%
mutate(frekwencja_przedzialy=cut(x = frekwencja,
breaks = c(0,25,50,75,100),
include.lowest=TRUE)) %>%
group_by(frekwencja_przedzialy) %>%
summarise(srednia=mean(rafal_kazimierz_trzaskowski, na.rm=TRUE),
liczebnosc=n())
# zliczanie - count
wybory %>%
count(typ_gminy)
wybory %>%
count(typ_obszaru)
obszar_woj <- wybory %>%
count(wojewodztwo, typ_obszaru)
obszar_woj <- wybory %>%
group_by(wojewodztwo, typ_obszaru) %>%
summarise(n=n())
obszar_woj <- wybory %>%
count(wojewodztwo, typ_obszaru) %>%
count(wojewodztwo) %>%
filter(n != 3)
gm_frek <- wybory %>%
group_by(typ_gminy) %>%
summarise(liczebnosc=n(),
sr=mean(frekwencja))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dbx.R
\name{dbxUpsert}
\alias{dbxUpsert}
\title{Upsert records}
\usage{
dbxUpsert(conn, table, records, where_cols, batch_size = NULL)
}
\arguments{
\item{conn}{A DBIConnection object}
\item{table}{The table name to upsert}
\item{records}{A data frame of records to upsert}
\item{where_cols}{The columns to use for WHERE clause}
\item{batch_size}{The number of records to upsert in a single transaction (defaults to all)}
}
\description{
Upsert records
}
\examples{
\dontrun{
records <- data.frame(id=c(2, 3), temperature=c(20, 25))
upserts <- dbxUpsert(db, table, records, where_cols=c("id"))
}
}
|
/man/dbxUpsert.Rd
|
no_license
|
gridl/dbx
|
R
| false
| true
| 681
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dbx.R
\name{dbxUpsert}
\alias{dbxUpsert}
\title{Upsert records}
\usage{
dbxUpsert(conn, table, records, where_cols, batch_size = NULL)
}
\arguments{
\item{conn}{A DBIConnection object}
\item{table}{The table name to upsert}
\item{records}{A data frame of records to upsert}
\item{where_cols}{The columns to use for WHERE clause}
\item{batch_size}{The number of records to upsert in a single transaction (defaults to all)}
}
\description{
Upsert records
}
\examples{
\dontrun{
records <- data.frame(id=c(2, 3), temperature=c(20, 25))
upserts <- dbxUpsert(db, table, records, where_cols=c("id"))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.directconnect_operations.R
\name{create_connection}
\alias{create_connection}
\title{Creates a connection between a customer network and a specific AWS Direct Connect location}
\usage{
create_connection(location, bandwidth, connectionName, lagId = NULL)
}
\arguments{
\item{location}{[required] The location of the connection.}
\item{bandwidth}{[required] The bandwidth of the connection.}
\item{connectionName}{[required] The name of the connection.}
\item{lagId}{The ID of the LAG.}
}
\description{
Creates a connection between a customer network and a specific AWS Direct Connect location.
}
\details{
A connection links your internal network to an AWS Direct Connect location over a standard Ethernet fiber-optic cable. One end of the cable is connected to your router, the other to an AWS Direct Connect router.
To find the locations for your Region, use DescribeLocations.
You can automatically add the new connection to a link aggregation group (LAG) by specifying a LAG ID in the request. This ensures that the new connection is allocated on the same AWS Direct Connect endpoint that hosts the specified LAG. If there are no available ports on the endpoint, the request fails and no connection is created.
}
\section{Accepted Parameters}{
\preformatted{create_connection(
location = "string",
bandwidth = "string",
connectionName = "string",
lagId = "string"
)
}
}
|
/service/paws.directconnect/man/create_connection.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false
| true
| 1,470
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.directconnect_operations.R
\name{create_connection}
\alias{create_connection}
\title{Creates a connection between a customer network and a specific AWS Direct Connect location}
\usage{
create_connection(location, bandwidth, connectionName, lagId = NULL)
}
\arguments{
\item{location}{[required] The location of the connection.}
\item{bandwidth}{[required] The bandwidth of the connection.}
\item{connectionName}{[required] The name of the connection.}
\item{lagId}{The ID of the LAG.}
}
\description{
Creates a connection between a customer network and a specific AWS Direct Connect location.
}
\details{
A connection links your internal network to an AWS Direct Connect location over a standard Ethernet fiber-optic cable. One end of the cable is connected to your router, the other to an AWS Direct Connect router.
To find the locations for your Region, use DescribeLocations.
You can automatically add the new connection to a link aggregation group (LAG) by specifying a LAG ID in the request. This ensures that the new connection is allocated on the same AWS Direct Connect endpoint that hosts the specified LAG. If there are no available ports on the endpoint, the request fails and no connection is created.
}
\section{Accepted Parameters}{
\preformatted{create_connection(
location = "string",
bandwidth = "string",
connectionName = "string",
lagId = "string"
)
}
}
|
testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 1.21321365773924e-195, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537533e+266, 2.65348875902107e+136, 8.14965241967603e+92, 2.59677146539475e-173, 1.55228780425777e-91, 8.25550184376779e+105, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L)))
result <- do.call(dann:::calc_distance_C,testlist)
str(result)
|
/dann/inst/testfiles/calc_distance_C/AFL_calc_distance_C/calc_distance_C_valgrind_files/1609869454-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 1,199
|
r
|
testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 1.21321365773924e-195, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537533e+266, 2.65348875902107e+136, 8.14965241967603e+92, 2.59677146539475e-173, 1.55228780425777e-91, 8.25550184376779e+105, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L)))
result <- do.call(dann:::calc_distance_C,testlist)
str(result)
|
# Install packages if not exist
package_list=c('data.table', 'gridExtra', 'ggplot2', 'cowplot', 'plyr',
'dplyr', 'grid', 'pracma')
new_packages=package_list[!(package_list %in% installed.packages()[,'Package'])]
if(length(new_packages)) install.packages(new_packages)
# Load packages
library(data.table)
library(gridExtra)
library(ggplot2)
library(cowplot)
library(plyr)
library(dplyr)
library(grid)
library(pracma)
# TODO: Set path to code
setwd('')
# Define figure theme
theme_basic <- function(axis_size=0.5, title_size=8, subtitle_size=6,
col_gen='grey50', legend_title_size=0.5,
legend_text_size=0.4, legend_tick_size=0.08,
legend_width=0.5, legend_height=0.2,
legend_hjust_title=0.5) {
theme_bw() +
theme(
plot.title=element_text(size=title_size, colour=col_gen, face='bold'),
plot.subtitle=element_text(size=subtitle_size, colour=col_gen,
face='plain'),
plot.caption=element_text(size=(subtitle_size-1), colour=col_gen,
face='plain'),
legend.position='bottom',
legend.key.height=unit(legend_height,'cm'),
legend.key.width=unit(legend_width,'cm'),
axis.ticks.length=unit(legend_tick_size,'cm'),
legend.title=element_text(size=rel(legend_title_size), colour=col_gen,
hjust=legend_hjust_title, face='plain'),
legend.text=element_text(size=rel(legend_text_size), colour=col_gen),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank()
)
}
# Calculate area under the curve
auc <- function(y){
n <- length(y)
0.5*(y[1]+y[n]+2*sum(y[-c(1,n)]))
}
create_ptosis_output <- function(patient, eye, untaped_only=FALSE) {
# Set path for files containing taped and untaped exams
if(!untaped_only) {
file_taped <- paste0('../data/modified/', patient,
'_Superior_', eye, '_taped_eye_mod.csv')
}
file_untaped <- paste0('../data/modified/', patient,
'_Superior_', eye, '_eye_mod.csv')
if(untaped_only) {
lst <- list(file_untaped)
} else {
lst <- list(file_untaped, file_taped)
}
# Generate random patient identifier
patient_id <- paste(sample(0:9, 7, replace=TRUE), collapse="")
pdf(paste0('../output/', patient_id, '_Superior_', eye, '_eye.pdf'))
for (file in lst) {
print(file)
dt <- fread(file)
# Preprocess data table
names(dt) <- make.names(names(dt), unique=TRUE)
# Filter out untested points
dt_tested <- dt[Was.point.tested == TRUE]
# Determine margin
dt_hits <- dt_tested[Response.time.s. > 0]
dt_hits_max <- data.table(dt_hits %>% group_by(x) %>% top_n(n=1, wt=y))
dt_misses <- setdiff(dt_tested, dt_hits)
# Plot scatter without best fit
sctr_no_line <- ggplot() +
geom_point(dt_hits, mapping=aes(x, y), shape=8, size=2) +
geom_point(dt_misses, mapping=aes(x, y), shape=15, size=2) +
geom_hline(yintercept=0) + geom_vline(xintercept=0) +
geom_hline(yintercept=3, linetype='dashed') +
xlim(-3, 3) + ylim(-4, 4) +
annotate('text', x=-3, y=4, label=paste0('Patient: ', patient_id), hjust=0) +
annotate('text', x=-3, y=3.8, label=paste0('Eye: ', eye), hjust=0) +
theme_basic()
# Plot scatter with best fit
sctr <- ggplot() +
geom_point(dt_hits, mapping=aes(x, y), shape=8, size=2) +
geom_point(dt_misses, mapping=aes(x, y), shape=15, size=2) +
stat_smooth(dt_hits_max, mapping=aes(x, y), method='auto', se=FALSE,
color='black', size=0.8, linetype=1) +
geom_hline(yintercept=0) + geom_vline(xintercept=0) +
geom_hline(yintercept=3, linetype='dashed') +
xlim(-3, 3) + ylim(-4, 4) +
annotate('text', x=-3, y=4, label=paste0('Patient: ', patient_id), hjust=0) +
annotate('text', x=-3, y=3.8, label=paste0('Eye: ', eye), hjust=0) +
theme_basic()
auc_top=round(trapz(rep(3, 11)), 2)
# Include annotations and calculate area under the curve
if(grepl('taped', file)) {
auc_taped <- round(trapz(dt_hits_max$y), 2)
percent_change <- round((auc_taped - auc_untaped)/auc_taped * 100, 2)
sctr_no_line <- sctr_no_line + annotate('text', x=-3, y=3.6, label='Taped: Yes', hjust=0) +
annotate('text', x=-3, y=3.4, label=paste0('AUC: ', auc_taped), hjust=0) +
annotate('text', x=-3, y=3.2, label=paste0('Percent change: ', percent_change, '%'), hjust=0)
sctr <- sctr + annotate('text', x=-3, y=3.6, label='Taped: Yes', hjust=0) +
annotate('text', x=-3, y=3.4, label=paste0('AUC: ', auc_taped), hjust=0) +
annotate('text', x=-3, y=3.2, label=paste0('Percent change: ', percent_change, '%'), hjust=0)
} else {
auc_untaped=round(trapz(dt_hits_max$y), 2)
sctr_no_line <- sctr_no_line + annotate('text', x=-3, y=3.6, label='Taped: No', hjust=0) +
annotate('text', x=-3, y=3.4, label=paste0('AUC: ', auc_untaped), hjust=0) +
annotate('text', x=-3, y=3.2, label=paste0('AUC max: ', auc_top), hjust=0)
sctr <- sctr + annotate('text', x=-3, y=3.6, label='Taped: No', hjust=0) +
annotate('text', x=-3, y=3.4, label=paste0('AUC: ', auc_untaped), hjust=0) +
annotate('text', x=-3, y=3.2, label=paste0('AUC max: ', auc_top), hjust=0)
}
print(sctr_no_line)
print(sctr)
}
dev.off()
}
# Command line args specifying patient and eye for which to create output
args=commandArgs(trailingOnly=TRUE)
if(length(args) == 0) {
stop('Must specify [patient] and [eye]')
} else {
patient=args[1]
eye=args[2]
}
create_ptosis_output(patient, eye)
|
/create_ptosis_output.R
|
no_license
|
RetinaTechnologies/data-visualization
|
R
| false
| false
| 5,688
|
r
|
# Install packages if not exist
package_list=c('data.table', 'gridExtra', 'ggplot2', 'cowplot', 'plyr',
'dplyr', 'grid', 'pracma')
new_packages=package_list[!(package_list %in% installed.packages()[,'Package'])]
if(length(new_packages)) install.packages(new_packages)
# Load packages
library(data.table)
library(gridExtra)
library(ggplot2)
library(cowplot)
library(plyr)
library(dplyr)
library(grid)
library(pracma)
# TODO: Set path to code
setwd('')
# Define figure theme
theme_basic <- function(axis_size=0.5, title_size=8, subtitle_size=6,
col_gen='grey50', legend_title_size=0.5,
legend_text_size=0.4, legend_tick_size=0.08,
legend_width=0.5, legend_height=0.2,
legend_hjust_title=0.5) {
theme_bw() +
theme(
plot.title=element_text(size=title_size, colour=col_gen, face='bold'),
plot.subtitle=element_text(size=subtitle_size, colour=col_gen,
face='plain'),
plot.caption=element_text(size=(subtitle_size-1), colour=col_gen,
face='plain'),
legend.position='bottom',
legend.key.height=unit(legend_height,'cm'),
legend.key.width=unit(legend_width,'cm'),
axis.ticks.length=unit(legend_tick_size,'cm'),
legend.title=element_text(size=rel(legend_title_size), colour=col_gen,
hjust=legend_hjust_title, face='plain'),
legend.text=element_text(size=rel(legend_text_size), colour=col_gen),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank()
)
}
# Calculate area under the curve
auc <- function(y){
n <- length(y)
0.5*(y[1]+y[n]+2*sum(y[-c(1,n)]))
}
create_ptosis_output <- function(patient, eye, untaped_only=FALSE) {
# Set path for files containing taped and untaped exams
if(!untaped_only) {
file_taped <- paste0('../data/modified/', patient,
'_Superior_', eye, '_taped_eye_mod.csv')
}
file_untaped <- paste0('../data/modified/', patient,
'_Superior_', eye, '_eye_mod.csv')
if(untaped_only) {
lst <- list(file_untaped)
} else {
lst <- list(file_untaped, file_taped)
}
# Generate random patient identifier
patient_id <- paste(sample(0:9, 7, replace=TRUE), collapse="")
pdf(paste0('../output/', patient_id, '_Superior_', eye, '_eye.pdf'))
for (file in lst) {
print(file)
dt <- fread(file)
# Preprocess data table
names(dt) <- make.names(names(dt), unique=TRUE)
# Filter out untested points
dt_tested <- dt[Was.point.tested == TRUE]
# Determine margin
dt_hits <- dt_tested[Response.time.s. > 0]
dt_hits_max <- data.table(dt_hits %>% group_by(x) %>% top_n(n=1, wt=y))
dt_misses <- setdiff(dt_tested, dt_hits)
# Plot scatter without best fit
sctr_no_line <- ggplot() +
geom_point(dt_hits, mapping=aes(x, y), shape=8, size=2) +
geom_point(dt_misses, mapping=aes(x, y), shape=15, size=2) +
geom_hline(yintercept=0) + geom_vline(xintercept=0) +
geom_hline(yintercept=3, linetype='dashed') +
xlim(-3, 3) + ylim(-4, 4) +
annotate('text', x=-3, y=4, label=paste0('Patient: ', patient_id), hjust=0) +
annotate('text', x=-3, y=3.8, label=paste0('Eye: ', eye), hjust=0) +
theme_basic()
# Plot scatter with best fit
sctr <- ggplot() +
geom_point(dt_hits, mapping=aes(x, y), shape=8, size=2) +
geom_point(dt_misses, mapping=aes(x, y), shape=15, size=2) +
stat_smooth(dt_hits_max, mapping=aes(x, y), method='auto', se=FALSE,
color='black', size=0.8, linetype=1) +
geom_hline(yintercept=0) + geom_vline(xintercept=0) +
geom_hline(yintercept=3, linetype='dashed') +
xlim(-3, 3) + ylim(-4, 4) +
annotate('text', x=-3, y=4, label=paste0('Patient: ', patient_id), hjust=0) +
annotate('text', x=-3, y=3.8, label=paste0('Eye: ', eye), hjust=0) +
theme_basic()
auc_top=round(trapz(rep(3, 11)), 2)
# Include annotations and calculate area under the curve
if(grepl('taped', file)) {
auc_taped <- round(trapz(dt_hits_max$y), 2)
percent_change <- round((auc_taped - auc_untaped)/auc_taped * 100, 2)
sctr_no_line <- sctr_no_line + annotate('text', x=-3, y=3.6, label='Taped: Yes', hjust=0) +
annotate('text', x=-3, y=3.4, label=paste0('AUC: ', auc_taped), hjust=0) +
annotate('text', x=-3, y=3.2, label=paste0('Percent change: ', percent_change, '%'), hjust=0)
sctr <- sctr + annotate('text', x=-3, y=3.6, label='Taped: Yes', hjust=0) +
annotate('text', x=-3, y=3.4, label=paste0('AUC: ', auc_taped), hjust=0) +
annotate('text', x=-3, y=3.2, label=paste0('Percent change: ', percent_change, '%'), hjust=0)
} else {
auc_untaped=round(trapz(dt_hits_max$y), 2)
sctr_no_line <- sctr_no_line + annotate('text', x=-3, y=3.6, label='Taped: No', hjust=0) +
annotate('text', x=-3, y=3.4, label=paste0('AUC: ', auc_untaped), hjust=0) +
annotate('text', x=-3, y=3.2, label=paste0('AUC max: ', auc_top), hjust=0)
sctr <- sctr + annotate('text', x=-3, y=3.6, label='Taped: No', hjust=0) +
annotate('text', x=-3, y=3.4, label=paste0('AUC: ', auc_untaped), hjust=0) +
annotate('text', x=-3, y=3.2, label=paste0('AUC max: ', auc_top), hjust=0)
}
print(sctr_no_line)
print(sctr)
}
dev.off()
}
# Command line args specifying patient and eye for which to create output
args=commandArgs(trailingOnly=TRUE)
if(length(args) == 0) {
stop('Must specify [patient] and [eye]')
} else {
patient=args[1]
eye=args[2]
}
create_ptosis_output(patient, eye)
|
rm(list=ls(all=TRUE))
Metrics = read.csv("FilteredMartin.csv")
vNames = c("1.0.0","1.0.0.0","1.0.0.1","1.0.0.1a","1.0.0.2","1.0.0.3","2.0.0.1","2.0.1")
versions = c(vNames)
A_values = c(Metrics$A)
A = data.frame(versions, A_values)
A_plot <- ggplot(data=A, aes(x = factor(A$versions), y = A$A_values, group=1)) + xlab("Versions") + ylab("A") + theme(axis.text.x = element_blank())
A_plot <- A_plot + geom_line()
Ca_values = c(Metrics$Ca)
Ca = data.frame(versions, Ca_values)
Ca_plot <- ggplot(data=Ca, aes(x = factor(Ca$versions), y = Ca$Ca_values, group=1)) + xlab("Versions") + ylab("Ca") + theme(axis.text.x = element_blank())
Ca_plot <- Ca_plot + geom_line()
Ce_values = c(Metrics$Ce)
Ce = data.frame(versions, Ce_values)
Ce_plot <- ggplot(data=Ce, aes(x = factor(Ce$versions), y = Ce$Ce_values, group=1)) + xlab("Versions") + ylab("Ce") + theme(axis.text.x = element_blank())
Ce_plot <- Ce_plot + geom_line()
D_values = c(Metrics$D)
D = data.frame(versions, D_values)
D_plot <- ggplot(data=D, aes(x = factor(D$versions), y = D$D_values, group=1)) + xlab("Versions") + ylab("D") + theme(axis.text.x = element_blank())
D_plot <- D_plot + geom_line()
I_values = c(Metrics$I)
I = data.frame(versions, I_values)
I_plot <- ggplot(data=I, aes(x = factor(I$versions), y = I$I_values, group=1)) + xlab("Versions") + ylab("I") + theme(axis.text.x = element_blank())
I_plot <- I_plot + geom_line()
|
/QRScanner/ExecutionTraceBased/MartinVisualization/Martin.r
|
no_license
|
Hareem-E-Sahar/Replication-Package-
|
R
| false
| false
| 1,415
|
r
|
rm(list=ls(all=TRUE))
Metrics = read.csv("FilteredMartin.csv")
vNames = c("1.0.0","1.0.0.0","1.0.0.1","1.0.0.1a","1.0.0.2","1.0.0.3","2.0.0.1","2.0.1")
versions = c(vNames)
A_values = c(Metrics$A)
A = data.frame(versions, A_values)
A_plot <- ggplot(data=A, aes(x = factor(A$versions), y = A$A_values, group=1)) + xlab("Versions") + ylab("A") + theme(axis.text.x = element_blank())
A_plot <- A_plot + geom_line()
Ca_values = c(Metrics$Ca)
Ca = data.frame(versions, Ca_values)
Ca_plot <- ggplot(data=Ca, aes(x = factor(Ca$versions), y = Ca$Ca_values, group=1)) + xlab("Versions") + ylab("Ca") + theme(axis.text.x = element_blank())
Ca_plot <- Ca_plot + geom_line()
Ce_values = c(Metrics$Ce)
Ce = data.frame(versions, Ce_values)
Ce_plot <- ggplot(data=Ce, aes(x = factor(Ce$versions), y = Ce$Ce_values, group=1)) + xlab("Versions") + ylab("Ce") + theme(axis.text.x = element_blank())
Ce_plot <- Ce_plot + geom_line()
D_values = c(Metrics$D)
D = data.frame(versions, D_values)
D_plot <- ggplot(data=D, aes(x = factor(D$versions), y = D$D_values, group=1)) + xlab("Versions") + ylab("D") + theme(axis.text.x = element_blank())
D_plot <- D_plot + geom_line()
I_values = c(Metrics$I)
I = data.frame(versions, I_values)
I_plot <- ggplot(data=I, aes(x = factor(I$versions), y = I$I_values, group=1)) + xlab("Versions") + ylab("I") + theme(axis.text.x = element_blank())
I_plot <- I_plot + geom_line()
|
### Makeover Monday Week 19 2019
### Bar Chart Race Gif
# Load Libraries
library(gganimate)
library(janitor)
library(gifski)
library(dplyr)
library(zoo)
theme_set(theme_classic())
# Load Dataset
df <- read.csv("https://query.data.world/s/u3stzqkmghigjrrzio7vyfzch4gqe5", header=TRUE, stringsAsFactors=FALSE)
# Prepare Dataset
top_10_homerunners <- clean_names(df)
top_10_homerunners <- top_10_homerunners[,c('player_id','player_name','season','team','hr')]
top_10_homerunners <- top_10_homerunners %>%
group_by(player_id,player_name,season) %>%
mutate(team = paste(team, collapse="/"))
top_10_homerunners <- top_10_homerunners %>%
group_by(player_id,player_name,season,team) %>%
summarise(hr = sum(hr))
# Creating data so each player has every season associated with them in the data
for(i in 1985:2016){
missing_data <- data.frame(unique((top_10_homerunners %>% filter(season != i))[,c('player_id','player_name')]),season=i,team=NA,hr=0)
if(i == 1985){
missing_df <- missing_data
}
if(i != 1985){
missing_df <- rbind(missing_df,missing_data)
}
}
# Merge to existing dataset
top_10_homerunners <- rbind(as.data.frame(top_10_homerunners),missing_df)
top_10_homerunners <- top_10_homerunners %>% arrange(player_id,season)
top_10_homerunners$team <- c(rep('San Francisco Giants',19),na.locf(top_10_homerunners$team))
top_10_homerunners <- top_10_homerunners %>%
group_by(player_id,player_name,season,team) %>%
summarise(hr = sum(hr))
# Calculate running sum of home runs
p <- top_10_homerunners %>%
arrange(season) %>%
group_by(player_id) %>%
mutate(career_hrs = cumsum(hr))
p2 <- p %>%
group_by(season) %>%
mutate(rank = rank(-career_hrs)) %>%
arrange(season,-career_hrs)
# Find the top 10 and top 1 values for
p3 <- top_n(p2,10,career_hrs)
p3_top1 <- top_n(p2,1,career_hrs)
p3_top1 <- unique(p3_top1[,c('season','career_hrs')])
names(p3_top1) <- c('season','total_hrs')
# Manually filter out ties at rank 10
p3$id <- paste0(p3$player_id,p3$season)
p3 <- p3 %>% filter(!id %in% c('evansdw011987','mcgrifr011992','deerro011993','galaran011997','davisch011997'))
p3$bar_rank <- rep(c(1:10),32)
# Join Top 10 and Top 1 datasets and calculate relative weighting
p4 <- inner_join(p3,p3_top1, by = c("season" = "season"))
p4$Value_rel <- p4$career_hrs/p4$total_hrs
{as.character(p4$season)}
# Buid plot
p5 <- ggplot(p4, aes(bar_rank, season, player_name)) +
geom_tile(aes(y = Value_rel/2, height = Value_rel,width = 0.9), color = NA) +
geom_text(aes(y = Value_rel, label = paste(player_name, " ")), vjust = -0.5, hjust = 1, size = 5, color = "white") +
geom_text(aes(y = Value_rel, label = paste(team, " ")), vjust = 0.7, hjust = 1, size = 4, color = "white") +
geom_text(aes(y=Value_rel,label = paste0(" ",round(career_hrs,0)), hjust=0.1), size = 5) +
geom_text(aes(10, 1),label = {as.character(p4$season)}, size = 20, hjust = 0, vjust = -0.2,color = "grey") +
coord_flip(clip = "off", expand = FALSE) +
scale_x_reverse() +
ylim(0, 1.3) +
guides(color = FALSE, fill = FALSE) +
labs(title = 'Top 10 MLB Home Run Hitters, 1985-{closest_state}'
,subtitle = "Note: excludes players where salary data is not available."
,x = element_blank()
,y = 'Home Runs since 1985'
,caption = 'Author: @WJSutton12, Data: Lahman's Baseball Database') +
theme(plot.title = element_text(hjust = 0, size = 24),
plot.subtitle = element_text(hjust = 0, size = 12),
plot.caption = element_text(vjust = 0.3, size = 12),
axis.ticks.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.x = element_blank(),
axis.text.x = element_blank(),
axis.title.x = element_text(size = 12),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"),
plot.margin = margin(1,1,1,1, "cm")) +
transition_states(season, transition_length = 4, state_length = 1) +
ease_aes('cubic-in-out')
anim <- animate(p5,200, duration = 40, width = 700, height = 500,start_pause=3, end_pause=10)
anim_save("mlb_home_run_bar_chart_race.gif", anim)
|
/2019w19/mlb_chart_build.R
|
permissive
|
wjsutton/Makeover-Monday
|
R
| false
| false
| 4,181
|
r
|
### Makeover Monday Week 19 2019
### Bar Chart Race Gif
# Load Libraries
library(gganimate)
library(janitor)
library(gifski)
library(dplyr)
library(zoo)
theme_set(theme_classic())
# Load Dataset
df <- read.csv("https://query.data.world/s/u3stzqkmghigjrrzio7vyfzch4gqe5", header=TRUE, stringsAsFactors=FALSE)
# Prepare Dataset
top_10_homerunners <- clean_names(df)
top_10_homerunners <- top_10_homerunners[,c('player_id','player_name','season','team','hr')]
top_10_homerunners <- top_10_homerunners %>%
group_by(player_id,player_name,season) %>%
mutate(team = paste(team, collapse="/"))
top_10_homerunners <- top_10_homerunners %>%
group_by(player_id,player_name,season,team) %>%
summarise(hr = sum(hr))
# Creating data so each player has every season associated with them in the data
for(i in 1985:2016){
missing_data <- data.frame(unique((top_10_homerunners %>% filter(season != i))[,c('player_id','player_name')]),season=i,team=NA,hr=0)
if(i == 1985){
missing_df <- missing_data
}
if(i != 1985){
missing_df <- rbind(missing_df,missing_data)
}
}
# Merge to existing dataset
top_10_homerunners <- rbind(as.data.frame(top_10_homerunners),missing_df)
top_10_homerunners <- top_10_homerunners %>% arrange(player_id,season)
top_10_homerunners$team <- c(rep('San Francisco Giants',19),na.locf(top_10_homerunners$team))
top_10_homerunners <- top_10_homerunners %>%
group_by(player_id,player_name,season,team) %>%
summarise(hr = sum(hr))
# Calculate running sum of home runs
p <- top_10_homerunners %>%
arrange(season) %>%
group_by(player_id) %>%
mutate(career_hrs = cumsum(hr))
p2 <- p %>%
group_by(season) %>%
mutate(rank = rank(-career_hrs)) %>%
arrange(season,-career_hrs)
# Find the top 10 and top 1 values for
p3 <- top_n(p2,10,career_hrs)
p3_top1 <- top_n(p2,1,career_hrs)
p3_top1 <- unique(p3_top1[,c('season','career_hrs')])
names(p3_top1) <- c('season','total_hrs')
# Manually filter out ties at rank 10
p3$id <- paste0(p3$player_id,p3$season)
p3 <- p3 %>% filter(!id %in% c('evansdw011987','mcgrifr011992','deerro011993','galaran011997','davisch011997'))
p3$bar_rank <- rep(c(1:10),32)
# Join Top 10 and Top 1 datasets and calculate relative weighting
p4 <- inner_join(p3,p3_top1, by = c("season" = "season"))
p4$Value_rel <- p4$career_hrs/p4$total_hrs
{as.character(p4$season)}
# Buid plot
p5 <- ggplot(p4, aes(bar_rank, season, player_name)) +
geom_tile(aes(y = Value_rel/2, height = Value_rel,width = 0.9), color = NA) +
geom_text(aes(y = Value_rel, label = paste(player_name, " ")), vjust = -0.5, hjust = 1, size = 5, color = "white") +
geom_text(aes(y = Value_rel, label = paste(team, " ")), vjust = 0.7, hjust = 1, size = 4, color = "white") +
geom_text(aes(y=Value_rel,label = paste0(" ",round(career_hrs,0)), hjust=0.1), size = 5) +
geom_text(aes(10, 1),label = {as.character(p4$season)}, size = 20, hjust = 0, vjust = -0.2,color = "grey") +
coord_flip(clip = "off", expand = FALSE) +
scale_x_reverse() +
ylim(0, 1.3) +
guides(color = FALSE, fill = FALSE) +
labs(title = 'Top 10 MLB Home Run Hitters, 1985-{closest_state}'
,subtitle = "Note: excludes players where salary data is not available."
,x = element_blank()
,y = 'Home Runs since 1985'
,caption = 'Author: @WJSutton12, Data: Lahman's Baseball Database') +
theme(plot.title = element_text(hjust = 0, size = 24),
plot.subtitle = element_text(hjust = 0, size = 12),
plot.caption = element_text(vjust = 0.3, size = 12),
axis.ticks.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.x = element_blank(),
axis.text.x = element_blank(),
axis.title.x = element_text(size = 12),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"),
plot.margin = margin(1,1,1,1, "cm")) +
transition_states(season, transition_length = 4, state_length = 1) +
ease_aes('cubic-in-out')
anim <- animate(p5,200, duration = 40, width = 700, height = 500,start_pause=3, end_pause=10)
anim_save("mlb_home_run_bar_chart_race.gif", anim)
|
source('./parameters.R')
options(dplyr.summarise.inform=F, width = 110)
if(!exists("simData")) {simData <- readRDS("Data/simData.rds")}
rhoList <- levels(simData$rho)
shiftList <- levels(simData$shift)
copulaList <- levels(simData$copula)
iterations <- max(simData$iteration)
size <- max(simData$N)
Ncopula <- length(copulaList)
Nshifts <- length(shiftList)
Nrho <- length(rhoList)
t2ucl <- simData %>%
filter(shift == "0/0") %>%
group_by(copula, rho) %>%
summarise(UCLt2 = quantile(t2, probs = (cARL - 1)/cARL)) %>%
select(copula, rho, UCLt2) %>%
spread(rho, UCLt2)%>%
column_to_rownames(var="copula")
mewmaucl <- simData %>%
filter(shift == "0/0") %>%
group_by(copula, rho) %>%
summarise(UCLme = quantile(mewma, probs = (cARL - 1)/cARL)) %>%
select(copula, rho, UCLme) %>%
spread(rho, UCLme)%>%
column_to_rownames(var="copula")
mcusumucl <- simData %>%
filter(shift == "0/0") %>%
group_by(copula, rho) %>%
summarise(UCLmc = quantile(mcusum, probs = (cARL - 1)/cARL)) %>%
select(copula, rho, UCLmc) %>%
spread(rho, UCLmc)%>%
column_to_rownames(var="copula")
splitData <- simData %>%
group_split(copula,rho)
tmpData <- list()
index <- 1
for(i in 1:Ncopula) {
for(j in 1:Nrho) {
tmpData[[index]] <- splitData[[index]] %>%
group_by(copula, rho, shift, iteration) %>%
summarise(t2ARL = ifelse(shift == "0/0", size/sum(t2 > t2ucl[i,j]),
detect_index(t2, function(z)(z>t2ucl[i,j]))),
meARL = ifelse(shift == "0/0", size/sum(mewma > mewmaucl[i,j]),
detect_index(mewma, function(z)(z>mewmaucl[i,j]))),
mcARL = ifelse(shift == "0/0", size/sum(mcusum > mcusumucl[i,j]),
detect_index(mcusum, function(z)(z>mcusumucl[i,j])))) %>%
mutate(t2ARL = ifelse(is.infinite(t2ARL), size, t2ARL),
meARL = ifelse(is.infinite(meARL), size, meARL),
mcARL = ifelse(is.infinite(mcARL), size, mcARL))
index <- index + 1
}
}
rm(splitData)
gc()
ARL <- bind_rows(tmpData) %>%
group_by(copula, rho, shift) %>%
summarise(t2ci = list(mean_cl_normal(t2ARL) %>%
rename(t2ARLmean=y, t2ARLlwr=ymin, t2ARLupr=ymax)),
meci = list(mean_cl_normal(meARL) %>%
rename(meARLmean=y, meARLlwr=ymin, meARLupr=ymax)),
mcci = list(mean_cl_normal(mcARL) %>%
rename(mcARLmean=y, mcARLlwr=ymin, mcARLupr=ymax))) %>%
unnest(cols = c(t2ci, meci, mcci))
ARL %>%
filter(shift == "0/0") %>%
print(width = Inf)
options(dplyr.summarise.inform=T, width = 80)
|
/analysis.R
|
permissive
|
Heril/CopulaControl
|
R
| false
| false
| 2,633
|
r
|
source('./parameters.R')
options(dplyr.summarise.inform=F, width = 110)
if(!exists("simData")) {simData <- readRDS("Data/simData.rds")}
rhoList <- levels(simData$rho)
shiftList <- levels(simData$shift)
copulaList <- levels(simData$copula)
iterations <- max(simData$iteration)
size <- max(simData$N)
Ncopula <- length(copulaList)
Nshifts <- length(shiftList)
Nrho <- length(rhoList)
t2ucl <- simData %>%
filter(shift == "0/0") %>%
group_by(copula, rho) %>%
summarise(UCLt2 = quantile(t2, probs = (cARL - 1)/cARL)) %>%
select(copula, rho, UCLt2) %>%
spread(rho, UCLt2)%>%
column_to_rownames(var="copula")
mewmaucl <- simData %>%
filter(shift == "0/0") %>%
group_by(copula, rho) %>%
summarise(UCLme = quantile(mewma, probs = (cARL - 1)/cARL)) %>%
select(copula, rho, UCLme) %>%
spread(rho, UCLme)%>%
column_to_rownames(var="copula")
mcusumucl <- simData %>%
filter(shift == "0/0") %>%
group_by(copula, rho) %>%
summarise(UCLmc = quantile(mcusum, probs = (cARL - 1)/cARL)) %>%
select(copula, rho, UCLmc) %>%
spread(rho, UCLmc)%>%
column_to_rownames(var="copula")
splitData <- simData %>%
group_split(copula,rho)
tmpData <- list()
index <- 1
for(i in 1:Ncopula) {
for(j in 1:Nrho) {
tmpData[[index]] <- splitData[[index]] %>%
group_by(copula, rho, shift, iteration) %>%
summarise(t2ARL = ifelse(shift == "0/0", size/sum(t2 > t2ucl[i,j]),
detect_index(t2, function(z)(z>t2ucl[i,j]))),
meARL = ifelse(shift == "0/0", size/sum(mewma > mewmaucl[i,j]),
detect_index(mewma, function(z)(z>mewmaucl[i,j]))),
mcARL = ifelse(shift == "0/0", size/sum(mcusum > mcusumucl[i,j]),
detect_index(mcusum, function(z)(z>mcusumucl[i,j])))) %>%
mutate(t2ARL = ifelse(is.infinite(t2ARL), size, t2ARL),
meARL = ifelse(is.infinite(meARL), size, meARL),
mcARL = ifelse(is.infinite(mcARL), size, mcARL))
index <- index + 1
}
}
rm(splitData)
gc()
ARL <- bind_rows(tmpData) %>%
group_by(copula, rho, shift) %>%
summarise(t2ci = list(mean_cl_normal(t2ARL) %>%
rename(t2ARLmean=y, t2ARLlwr=ymin, t2ARLupr=ymax)),
meci = list(mean_cl_normal(meARL) %>%
rename(meARLmean=y, meARLlwr=ymin, meARLupr=ymax)),
mcci = list(mean_cl_normal(mcARL) %>%
rename(mcARLmean=y, mcARLlwr=ymin, mcARLupr=ymax))) %>%
unnest(cols = c(t2ci, meci, mcci))
ARL %>%
filter(shift == "0/0") %>%
print(width = Inf)
options(dplyr.summarise.inform=T, width = 80)
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% SingleArrayUnitModel.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{SingleArrayUnitModel}
\docType{class}
\alias{SingleArrayUnitModel}
\title{The SingleArrayUnitModel class}
\description{
Package: aroma.affymetrix \cr
\bold{Class SingleArrayUnitModel}\cr
\code{\link[R.oo]{Object}}\cr
\code{~~|}\cr
\code{~~+--}\code{\link[aroma.core]{ParametersInterface}}\cr
\code{~~~~~~~|}\cr
\code{~~~~~~~+--}\code{\link[aroma.affymetrix]{Model}}\cr
\code{~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{UnitModel}}\cr
\code{~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~+--}\emph{\code{SingleArrayUnitModel}}\cr
\bold{Directly known subclasses:}\cr
\cr
public abstract static class \bold{SingleArrayUnitModel}\cr
extends \emph{\link[aroma.affymetrix]{UnitModel}}\cr
This abstract class represents a unit model that fits one model per unit
based on signals from a single arrays.
The nature of a single-array unit model is that each array can be fitted
independently of the others.
}
\usage{
SingleArrayUnitModel(...)
}
\arguments{
\item{...}{Arguments passed to \code{\link{UnitModel}}.}
}
\section{Fields and Methods}{
\bold{Methods:}\cr
\tabular{rll}{
\tab \code{fit} \tab -\cr
}
\bold{Methods inherited from UnitModel}:\cr
findUnitsTodo, getAsteriskTags, getFitSingleCellUnitFunction, getParameters
\bold{Methods inherited from Model}:\cr
as.character, fit, getAlias, getAsteriskTags, getDataSet, getFullName, getName, getPath, getRootPath, getTags, setAlias, setTags
\bold{Methods inherited from ParametersInterface}:\cr
getParameterSets, getParameters, getParametersAsString
\bold{Methods inherited from Object}:\cr
$, $<-, [[, [[<-, as.character, attach, attachLocally, clearCache, clearLookupCache, clone, detach, equals, extend, finalize, getEnvironment, getFieldModifier, getFieldModifiers, getFields, getInstantiationTime, getStaticInstance, hasField, hashCode, ll, load, names, objectSize, print, save, asThis
}
\author{Henrik Bengtsson}
\keyword{classes}
|
/man/SingleArrayUnitModel.Rd
|
no_license
|
HenrikBengtsson/aroma.affymetrix
|
R
| false
| false
| 2,250
|
rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% SingleArrayUnitModel.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{SingleArrayUnitModel}
\docType{class}
\alias{SingleArrayUnitModel}
\title{The SingleArrayUnitModel class}
\description{
Package: aroma.affymetrix \cr
\bold{Class SingleArrayUnitModel}\cr
\code{\link[R.oo]{Object}}\cr
\code{~~|}\cr
\code{~~+--}\code{\link[aroma.core]{ParametersInterface}}\cr
\code{~~~~~~~|}\cr
\code{~~~~~~~+--}\code{\link[aroma.affymetrix]{Model}}\cr
\code{~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{UnitModel}}\cr
\code{~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~+--}\emph{\code{SingleArrayUnitModel}}\cr
\bold{Directly known subclasses:}\cr
\cr
public abstract static class \bold{SingleArrayUnitModel}\cr
extends \emph{\link[aroma.affymetrix]{UnitModel}}\cr
This abstract class represents a unit model that fits one model per unit
based on signals from a single arrays.
The nature of a single-array unit model is that each array can be fitted
independently of the others.
}
\usage{
SingleArrayUnitModel(...)
}
\arguments{
\item{...}{Arguments passed to \code{\link{UnitModel}}.}
}
\section{Fields and Methods}{
\bold{Methods:}\cr
\tabular{rll}{
\tab \code{fit} \tab -\cr
}
\bold{Methods inherited from UnitModel}:\cr
findUnitsTodo, getAsteriskTags, getFitSingleCellUnitFunction, getParameters
\bold{Methods inherited from Model}:\cr
as.character, fit, getAlias, getAsteriskTags, getDataSet, getFullName, getName, getPath, getRootPath, getTags, setAlias, setTags
\bold{Methods inherited from ParametersInterface}:\cr
getParameterSets, getParameters, getParametersAsString
\bold{Methods inherited from Object}:\cr
$, $<-, [[, [[<-, as.character, attach, attachLocally, clearCache, clearLookupCache, clone, detach, equals, extend, finalize, getEnvironment, getFieldModifier, getFieldModifiers, getFields, getInstantiationTime, getStaticInstance, hasField, hashCode, ll, load, names, objectSize, print, save, asThis
}
\author{Henrik Bengtsson}
\keyword{classes}
|
# Plot1
rm(list = ls(all = TRUE))
setwd("H:/2016_Samsung/Coursera_Video/Data Sciences/R_Exemples/Graphs")
###Download the zip file in Graphs folder
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile="H:/2016_Samsung/Coursera_Video/Data Sciences/R_Exemples/Graphs/power_consumption.zip")
unzip(zipfile="H:/2016_Samsung/Coursera_Video/Data Sciences/R_Exemples/Graphs/power_consumption.zip",
exdir="H:/2016_Samsung/Coursera_Video/Data Sciences/R_Exemples/Graphs")
list.files()
###Enable packages to use
library(data.table) # a prockage that handles dataframe better
library(dplyr) # for fancy data table manipulations and organization
###Read household power consumption data
consumption <- read.table("household_power_consumption.txt", sep=";", header = TRUE, na.strings = "?")
### Create 2 new variables with by transforning Date
consumption$Date_2 <- strptime(paste(consumption$Date, consumption$Time), "%d/%m/%Y %H:%M:%S")
consumption$Date_3 <- as.Date(consumption$Date_2)
###Subsetting household power consumption data
consumption1 <- subset(consumption, Date_3 >= as.Date("2007-02-01") & Date_3 <= as.Date("2007-02-02"))
#Plot1
png(file = "Plot1.png", width = 480, height = 480)
with(consumption1, hist(Global_active_power, col = "red",
xlab = "Global Active Power (kilowatts)",
main = "Global Active Power" ))
dev.off()
|
/Plot1.R
|
no_license
|
bachsamir/ExData_Plotting1
|
R
| false
| false
| 1,495
|
r
|
# Plot1
rm(list = ls(all = TRUE))
setwd("H:/2016_Samsung/Coursera_Video/Data Sciences/R_Exemples/Graphs")
###Download the zip file in Graphs folder
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile="H:/2016_Samsung/Coursera_Video/Data Sciences/R_Exemples/Graphs/power_consumption.zip")
unzip(zipfile="H:/2016_Samsung/Coursera_Video/Data Sciences/R_Exemples/Graphs/power_consumption.zip",
exdir="H:/2016_Samsung/Coursera_Video/Data Sciences/R_Exemples/Graphs")
list.files()
###Enable packages to use
library(data.table) # a prockage that handles dataframe better
library(dplyr) # for fancy data table manipulations and organization
###Read household power consumption data
consumption <- read.table("household_power_consumption.txt", sep=";", header = TRUE, na.strings = "?")
### Create 2 new variables with by transforning Date
consumption$Date_2 <- strptime(paste(consumption$Date, consumption$Time), "%d/%m/%Y %H:%M:%S")
consumption$Date_3 <- as.Date(consumption$Date_2)
###Subsetting household power consumption data
consumption1 <- subset(consumption, Date_3 >= as.Date("2007-02-01") & Date_3 <= as.Date("2007-02-02"))
#Plot1
png(file = "Plot1.png", width = 480, height = 480)
with(consumption1, hist(Global_active_power, col = "red",
xlab = "Global Active Power (kilowatts)",
main = "Global Active Power" ))
dev.off()
|
% Please edit documentation in R/shiny.R
\name{tidy_app}
\alias{tidy_app}
\title{A Shiny app to format R code}
\usage{
tidy_app()
}
\description{
This function calls \code{\link{tidy_source}()} to format R code in a Shiny
app. The arguments of \code{tidy_source()} are presented in the app as input
widgets such as checkboxes.
}
\examples{
if (interactive()) formatR::tidy_app()
}
|
/man/tidy_app.Rd
|
no_license
|
badbye/formatR
|
R
| false
| false
| 381
|
rd
|
% Please edit documentation in R/shiny.R
\name{tidy_app}
\alias{tidy_app}
\title{A Shiny app to format R code}
\usage{
tidy_app()
}
\description{
This function calls \code{\link{tidy_source}()} to format R code in a Shiny
app. The arguments of \code{tidy_source()} are presented in the app as input
widgets such as checkboxes.
}
\examples{
if (interactive()) formatR::tidy_app()
}
|
cols <- c("OZIP","D","F")
dta <- c(
"0",0.01,1,
"0.000014",0.01,1,
"0.0000305",0.01,1,
"0000",0.01,2,
"00000",0.01,1,
"006463607",1,0.01,
"00693",1,0.01,
"00719",1,0.01,
"00726-9324",1,0.01,
"00820",2,0.01,
"00907-2420",1,0.01,
"00924-4073",0.01,1,
"00926",1,2,
"00926-5117",1,0.01,
"00952-4055",1,0.01,
"01060-1639",1,0.01,
"010819602",1,0.01,
"01106",1,0.01,
"01107-1246",1,0.01,
"01450-1346",1,0.01,
"01501",1,0.01,
"01519",1,0.01,
"015241272",1,0.01,
"01534",1,0.01,
"01545",2,0.01,
"01564-1508",1,0.01,
"016031838",1,0.01,
"01604",1,0.01,
"01702",1,0.01,
"01720-4440",1,0.01,
"01740",1,0.01,
"017422225",1,0.01,
"017423454",1,0.01,
"017481516",1,0.01,
"017481845",1,0.01,
"01760",1,0.01,
"018032745",1,0.01,
"01810",0.01,1,
"01840-1025",1,0.01,
"01845",1,0.01,
"01845-5310",1,0.01,
"018456310",1,0.01,
"018641923",1,0.01,
"018673826",1,0.01,
"019151356",1,0.01,
"01922-1125",1,0.01,
"01940",1,0.01,
"02021-3192",1,0.01,
"02025",1,0.01,
"02038",1,0.01,
"020431961",1,0.01,
"020506372",1,0.01,
"020522907",1,0.01,
"020522910",1,0.01,
"020523146",1,0.01,
"020814363",1,0.01,
"020903041",1,0.01,
"020931720",1,0.01,
"02122",1,0.01,
"02122-2810",1,0.01,
"02129",1,0.01,
"021303439",1,0.01,
"02135",1,0.01,
"02135-2517",1,0.01,
"02136-3602",1,0.01,
"02139-3174",1,0.01,
"02143",1,0.01,
"021451027",1,0.01,
"02155",1,0.01,
"021691624",1,0.01,
"021804314",1,0.01,
"021847314",1,0.01,
"021862229",1,0.01,
"023014085",1,0.01,
"02332",1,0.01,
"023641366",1,0.01,
"024465428",1,0.01,
"02459",1,0.01,
"024591333",1,0.01,
"02466-1330",1,0.01,
"02476",1,0.01,
"024813103",1,0.01,
"024941418",1,0.01,
"02537-1262",1,0.01,
"026321948",1,0.01,
"02720-2734",1,0.01,
"02771-3306",1,0.01,
"02806",2,0.01,
"028064801",1,0.01,
"02809",2,0.01,
"028184104",1,0.01,
"02871",1,0.01,
"028714031",1,0.01,
"030332476",1,0.01,
"030543300",1,0.01,
"03076",1,0.01,
"03216",1,0.01,
"03301-6934",1,0.01,
"033016915",1,0.01,
"03814",1,0.01,
"038334016",1,0.01,
"03904",1,0.01,
"04074-9194",1,0.01,
"04074-9445",1,0.01,
"04102-1726",1,0.01,
"054827775",1,0.01,
"06001-3522",1,0.01,
"06010",1,0.01,
"06033-2849",1,0.01,
"06042",1,0.01,
"060701238",1,0.01,
"06106",1,0.01,
"061071603",1,0.01,
"061082817",1,0.01,
"06119-1057",1,0.01,
"06226",1,0.01,
"062813318",1,0.01,
"064432059",1,0.01,
"064432177",1,0.01,
"064506919",1,0.01,
"06459-3211",1,0.01,
"064611652",1,0.01,
"064611877",1,0.01,
"064893419",1,0.01,
"06511",1,0.01,
"06517",1,0.01,
"06606",1,0.01,
"06784-1132",1,0.01,
"06795",2,0.01,
"06798",1,0.01,
"06824",1,0.01,
"068246234",1,0.01,
"06840",1,0.01,
"068404400",1,0.01,
"068406511",1,0.01,
"06855",1,0.01,
"06855-2022",1,0.01,
"068772230",1,0.01,
"06880",1,0.01,
"068802013",1,0.01,
"06883",1,0.01,
"06897-2407",1,0.01,
"07002-3703",1,0.01,
"070091406",1,0.01,
"07042",1,0.01,
"07042-4518",1,0.01,
"07045-9694",1,0.01,
"07047",2,0.01,
"070683712",1,0.01,
"07112",1,0.01,
"07512",1,0.01,
"07645",1,0.01,
"076491318",1,0.01,
"07661",1,0.01,
"07676",1,0.01,
"077015640",1,0.01,
"07726-3304",1,0.01,
"07762-2159",1,0.01,
"07834-2149",1,0.01,
"07850",1,0.01,
"07869",1,0.01,
"07869-1021",1,0.01,
"079014050",1,0.01,
"07920",1,0.01,
"079201506",1,0.01,
"079202306",1,0.01,
"07930",1,0.01,
"079302650",1,0.01,
"07945-2932",1,0.01,
"079603416",1,0.01,
"07974",1,0.01,
"079742521",1,0.01,
"080032669",1,0.01,
"080033432",1,0.01,
"08006",0.01,1,
"08043",1,0.01,
"080543191",1,0.01,
"08204",1,0.01,
"08205",2,0.01,
"08225-1435",1,0.01,
"08330-3409",1,0.01,
"08361",1,0.01,
"084",0.01,1,
"085025346",1,0.01,
"08540",1,0.01,
"085403071",1,0.01,
"085407335",1,0.01,
"085423150",1,0.01,
"085503000",1,0.01,
"08730",1,0.01,
"08730-1421",1,0.01,
"08755",1,0.01,
"08807",1,0.01,
"08816",1,0.01,
"08816-1464",1,0.01,
"08833",1,0.01,
"09107-0008",0.01,1,
"096300011",1,0.01,
"1000",0.01,1,
"10000",0.01,2,
"10001",0.01,1,
"100014",0.01,1,
"10002",0.01,1,
"10002-1964",1,0.01,
"10003",1,0.01,
"100032",0.01,1,
"10004",0.01,1,
"10007",0.01,1,
"10008",0.01,2,
"100080",0.01,1,
"100083",1,1,
"100088",0.01,1,
"10011",2,0.01,
"10012",1,0.01,
"100143295",1,0.01,
"100147229",1,0.01,
"10016",2,0.01,
"1002",1,0.01,
"10021",1,0.01,
"10022",1,0.01,
"10025",1,0.01,
"10026",1,0.01,
"10027",1,0.01,
"10028",1,0.01,
"10029-5152",1,0.01,
"10031",1,0.01,
"10035-1334",1,0.01,
"10128",1,0.01,
"10128-1229",1,0.01,
"102209",0.01,1,
"10240",0.01,1,
"102627",0.01,1,
"10452",1,0.01,
"10456",1,0.01,
"10460-1235",1,0.01,
"10512-4110",1,0.01,
"10522",1,0.01,
"10523-2713",1,0.01,
"10524",1,0.01,
"10538",1,0.01,
"10538-2844",1,0.01,
"105521333",1,0.01,
"10566",1,0.01,
"10583",1,0.01,
"10583-7330",1,0.01,
"10591",1,0.01,
"106-0047",0.01,1,
"10605",0.01,1,
"10707",2,0.01,
"108032117",1,0.01,
"10901",0.01,1,
"109501428",1,0.01,
"109861622",1,0.01,
"110013",1,0.01,
"11020",1,0.01,
"11030",1,0.01,
"11050",3,0.01,
"111",0.01,1,
"11101",1,0.01,
"11102",2,0.01,
"11118",0.01,2,
"11201",3,0.01,
"11207",1,0.01,
"11208-3912",1,0.01,
"11210",1,0.01,
"11213",1,0.01,
"11215",3,0.01,
"11221",2,0.01,
"112255004",1,0.01,
"11226",1,0.01,
"11229-4406",1,0.01,
"11249",1,0.01,
"11254",0.01,1,
"11354",1,0.01,
"11360",1,0.01,
"11372",2,0.01,
"11374",1,0.01,
"11413-2120",1,0.01,
"11416",0.01,1,
"11423",1,0.01,
"11429",1,0.01,
"11431",0.01,1,
"11432",1,0.01,
"11432-2878",1,0.01,
"11510-2233",1,0.01,
"11511",0.01,1,
"11519",0.01,1,
"11520",0.01,1,
"11530-1227",1,0.01,
"115305048",1,0.01,
"11545",1,0.01,
"11560",0.01,1,
"11560-1602",1,0.01,
"115601224",1,0.01,
"115612131",1,0.01,
"11563-1752",1,0.01,
"11566",1,0.01,
"11570-2809",1,0.01,
"115802629",1,0.01,
"116031",0.01,1,
"11705-1231",1,0.01,
"117051756",1,0.01,
"11714",1,0.01,
"11725",1,0.01,
"11733",1,0.01,
"117332017",1,0.01,
"11735",1,0.01,
"11740",1,0.01,
"11741",1,0.01,
"117433414",1,0.01,
"11746",1,0.01,
"117463063",1,0.01,
"11749-5072",1,0.01,
"11758",1,0.01,
"11769",1,0.01,
"11786",1,0.01,
"11786-1822",1,0.01,
"11787",1,0.01,
"11795-3619",1,0.01,
"11801-6416",1,0.01,
"11803",2,0.01,
"11822",0.01,1,
"11940",1,0.01,
"11953",0.01,1,
"12020",1,0.01,
"120372",0.01,1,
"12047",1,0.01,
"12205",1,0.01,
"1230",1,0.01,
"12303",1,0.01,
"12309",1,0.01,
"12311",0.01,1,
"1239",1,0.01,
"12471",1,0.01,
"12514",1,0.01,
"12533",1,0.01,
"12560",0.01,1,
"12603",1,0.01,
"12604",1,0.01,
"127711340",1,0.01,
"12804-1230",1,0.01,
"12866",1,0.01,
"12871",1,0.01,
"13027",2,0.01,
"13031",1,0.01,
"13069",1,0.01,
"13078",1,0.01,
"13104-9657",1,0.01,
"13135",1,0.01,
"13152",1,0.01,
"131529000",1,0.01,
"131529633",1,0.01,
"13206",1,0.01,
"13210",1,0.01,
"13346",1,0.01,
"13421",1,0.01,
"13438",1,0.01,
"135-554",0.01,1,
"136761805",1,0.01,
"1370",1,0.01,
"13713",0.01,1,
"13730",1,0.01,
"13879",0.01,1,
"13904",1,0.01,
"14051",2,0.01,
"14051-1735",1,0.01,
"14063-2344",1,0.01,
"140721128",1,0.01,
"140721981",2,0.01,
"14092",1,0.01,
"14150",1,0.01,
"14170-9715",1,0.01,
"14214",3,0.01,
"14214-1609",1,0.01,
"14217",1,0.01,
"14217-2105",1,0.01,
"142172111",1,0.01,
"14219-1011",1,0.01,
"14220",1,0.01,
"14220-2749",1,0.01,
"14221",2,0.01,
"142212101",1,0.01,
"142221229",1,0.01,
"14223",1,0.01,
"14226",3,0.01,
"142263328",1,0.01,
"142264045",1,0.01,
"142283745",1,0.01,
"14450",1,0.01,
"14506",1,0.01,
"14526",1,0.01,
"14527-9418",1,0.01,
"14534",1,0.01,
"14559",1,0.01,
"145648920",1,0.01,
"145648985",1,0.01,
"14580",1,0.01,
"14605",2,0.01,
"14607",1,0.01,
"14618",1,0.01,
"14619",1,0.01,
"14620",1,0.01,
"14733-1315",1,0.01,
"14850",1,0.01,
"14901",1,0.01,
"14904",2,0.01,
"150092719",1,0.01,
"150103339",1,0.01,
"15024",2,0.01,
"150242501",1,0.01,
"15044",1,0.01,
"15057",1,0.01,
"15068",1,0.01,
"1507",1,0.01,
"15071-1119",1,0.01,
"15085-1300",1,0.01,
"15090",1,0.01,
"15102-3693",1,0.01,
"151391802",1,0.01,
"15143",1,0.01,
"15143-9328",1,0.01,
"15146",1,0.01,
"15156",1,0.01,
"15206",1,0.01,
"152061435",1,0.01,
"15208",1,0.01,
"15213",1,0.01,
"15218",1,0.01,
"152181412",1,0.01,
"15221",1,0.01,
"152262343",1,0.01,
"15227",1,0.01,
"15228",2,0.01,
"152282225",1,0.01,
"152341008",1,0.01,
"15235",1,0.01,
"15237",2,0.01,
"15241",1,0.01,
"15243",1,0.01,
"152431138",1,0.01,
"152431738",1,0.01,
"1532",1,0.01,
"15601",1,0.01,
"15683",1,0.01,
"15701",1,0.01,
"1581",1,0.01,
"16008",2,0.01,
"1608",1,0.01,
"16102",1,0.01,
"16117",1,0.01,
"16125",1,0.01,
"16146-3714",1,0.01,
"16188",0.01,2,
"16303",1,0.01,
"16316",1,0.01,
"16415",1,0.01,
"16502",1,0.01,
"16505",1,0.01,
"16506",2,0.01,
"165111423",1,0.01,
"16648",1,0.01,
"16801",2,0.01,
"16823",1,0.01,
"170032012",1,0.01,
"17013",1,0.01,
"17044",1,0.01,
"17050",1,0.01,
"172368715",1,0.01,
"17325-1400",1,0.01,
"173314116",1,0.01,
"175409740",1,0.01,
"1760",1,0.01,
"17815",2,0.01,
"17824",1,0.01,
"17837",1,0.01,
"18020-7848",1,0.01,
"18031",1,0.01,
"18036",1,0.01,
"18302-6661",1,0.01,
"1845",2,0.01,
"1854",0.01,1,
"18612",1,0.01,
"18612-8902",1,0.01,
"1864",2,0.01,
"187043506",1,0.01,
"1890",2,0.01,
"18901-2965",1,0.01,
"18929",1,0.01,
"18932",1,0.01,
"18974",1,0.01,
"189741846",1,0.01,
"19010",1,0.01,
"19041",1,0.01,
"190632114",1,0.01,
"19081-1512",1,0.01,
"19083",1,0.01,
"19087",2,0.01,
"19087-5543",1,0.01,
"19121",1,0.01,
"19135-3508",1,0.01,
"19146",1,0.01,
"19146-1610",1,0.01,
"19152-2214",1,0.01,
"19312-2801",1,0.01,
"19333",1,0.01,
"19335",1,0.01,
"193432647",1,0.01,
"19348",1,0.01,
"19355",2,0.01,
"193551262",1,0.01,
"19373",1,0.01,
"19382",1,0.01,
"19403-1221",1,0.01,
"19426",1,0.01,
"194261446",1,0.01,
"194403049",1,0.01,
"1945",1,0.01,
"19454",2,0.01,
"194543619",1,0.01,
"19606",1,0.01,
"196061401",1,0.01,
"19608-8502",1,0.01,
"19702",1,0.01,
"19703",1,0.01,
"19709",1,0.01,
"19709-2228",1,0.01,
"19810",1,0.01,
"1985",1,0.01,
"20001",1,0.01,
"200013509",1,0.01,
"20002",0.01,1,
"20003-2107",1,0.01,
"200052",0.01,1,
"20007",1,0.01,
"200072",0.01,1,
"200073907",1,0.01,
"20008",2,0.01,
"200083403",1,0.01,
"200086",0.01,1,
"20009",2,0.01,
"20009-1532",1,0.01,
"20010",1,0.01,
"200120",0.01,1,
"20015",2,0.01,
"20016",1,0.01,
"20017",1,0.01,
"20019",1,0.01,
"20019-1726",1,0.01,
"200433",0.01,1,
"200444",0.01,1,
"20080",0.01,1,
"20105",1,0.01,
"201204",0.01,1,
"201206",0.01,2,
"201242350",1,0.01,
"201301",0.01,1,
"201306",0.01,1,
"20136",1,0.01,
"20136-1901",1,0.01,
"201474482",1,0.01,
"20161",0.01,1,
"20165",1,0.01,
"201754335",1,0.01,
"202150",0.01,1,
"206015606",1,0.01,
"20603",1,0.01,
"2067",1,0.01,
"206772056",1,0.01,
"20705",1,0.01,
"20716",1,0.01,
"207213217",1,0.01,
"20740",1,0.01,
"207402758",1,0.01,
"20745",1,0.01,
"20746",1,0.01,
"20769",1,0.01,
"20782",1,0.01,
"207851348",1,0.01,
"2081",1,0.01,
"20815-4245",1,0.01,
"208154072",1,0.01,
"208155739",1,0.01,
"20816",1,0.01,
"208162314",1,0.01,
"20817",1,0.01,
"208173250",1,0.01,
"208176545",1,0.01,
"20832",3,0.01,
"208414353",1,0.01,
"208505470",1,0.01,
"208507502",1,0.01,
"20852",3,0.01,
"20854",2,0.01,
"208661938",1,0.01,
"208766341",1,0.01,
"20878",2,0.01,
"20895",1,0.01,
"2090",1,0.01,
"20901",2,0.01,
"20901-2505",1,0.01,
"20903",1,0.01,
"20904",1,0.01,
"20910",3,0.01,
"209104250",1,0.01,
"209125850",1,0.01,
"209126878",1,0.01,
"21012-1628",1,0.01,
"21017",1,0.01,
"21030",1,0.01,
"21042",2,0.01,
"210422044",1,0.01,
"21044-3749",1,0.01,
"21045",1,0.01,
"21090",1,0.01,
"21093",1,0.01,
"21113",1,0.01,
"21113-1521",1,0.01,
"211141835",1,0.01,
"21117",1,0.01,
"21128",1,0.01,
"21133",1,0.01,
"21146",1,0.01,
"21146-1384",1,0.01,
"2115",0.01,1,
"21157",1,0.01,
"211573806",1,0.01,
"21163",1,0.01,
"21204",1,0.01,
"21204-3510",1,0.01,
"212086370",1,0.01,
"21209",1,0.01,
"21209-3860",1,0.01,
"21215",1,0.01,
"21220-3768",1,0.01,
"21224",1,0.01,
"21228",2,0.01,
"21234",1,0.01,
"21237",1,0.01,
"2134",1,0.01,
"2139",1,0.01,
"21400",0.01,1,
"2144",1,0.01,
"2149",1,0.01,
"21702",1,0.01,
"21703",1,0.01,
"21742",1,0.01,
"21774-6808",1,0.01,
"21788",1,0.01,
"21793-9164",1,0.01,
"218634457",1,0.01,
"21921",1,0.01,
"22003",1,0.01,
"22015",1,0.01,
"22015-4414",1,0.01,
"22030",1,0.01,
"22033",2,0.01,
"22046",1,0.01,
"22066",2,0.01,
"22066-1517",1,0.01,
"221000",0.01,1,
"22101",1,0.01,
"22102",2,0.01,
"22152",1,0.01,
"22181",1,0.01,
"22182-5315",1,0.01,
"22192",1,0.01,
"22201",1,0.01,
"22203",1,0.01,
"222031113",1,0.01,
"222041449",1,0.01,
"22206",1,0.01,
"22209",1,0.01,
"22300",0.01,1,
"22301",1,0.01,
"223051213",1,0.01,
"223062408",1,0.01,
"22307",1,0.01,
"22314",1,0.01,
"224013739",1,0.01,
"22500",0.01,1,
"22554",2,0.01,
"226001",0.01,1,
"22602-6834",1,0.01,
"22611",3,0.01,
"22660-9779",1,0.01,
"22801",1,0.01,
"22802",1,0.01,
"23005",1,0.01,
"231",0.01,1,
"23112",1,0.01,
"23149",0.01,1,
"23188",1,0.01,
"23188-1023",1,0.01,
"23224",1,0.01,
"232251417",1,0.01,
"232331128",1,0.01,
"232363358",1,0.01,
"234",0.01,1,
"23452",1,0.01,
"23454",1,0.01,
"23457",1,0.01,
"23464",1,0.01,
"23508",1,0.01,
"23662",1,0.01,
"236621441",1,0.01,
"23666",1,0.01,
"236800",0.01,1,
"23693",1,0.01,
"2375",1,0.01,
"239229",0.01,1,
"240604925",1,0.01,
"241",0.01,1,
"24153",1,0.01,
"2421",1,0.01,
"2446",2,0.01,
"24502",2,0.01,
"2459",3,0.01,
"2465",1,0.01,
"2467",1,0.01,
"2482",2,0.01,
"2492",1,0.01,
"2536",1,0.01,
"25500",0.01,1,
"25801",1,0.01,
"26443",0.01,1,
"2648",1,0.01,
"26531",1,0.01,
"27025",1,0.01,
"27055",1,0.01,
"27103",3,0.01,
"27104",1,0.01,
"27106",1,0.01,
"272627460",1,0.01,
"27278",1,0.01,
"27358",1,0.01,
"27408-4415",1,0.01,
"27409",1,0.01,
"274103211",1,0.01,
"27516",1,0.01,
"27516-1925",1,0.01,
"275160450",1,0.01,
"275162357",1,0.01,
"27519",1,0.01,
"27560",1,0.01,
"27587",1,0.01,
"275879597",1,0.01,
"27592",1,0.01,
"27607",1,0.01,
"27610",1,0.01,
"27612",1,0.01,
"276137009",1,0.01,
"27617",1,0.01,
"27703",1,0.01,
"27713",3,0.01,
"27823",1,0.01,
"279549483",1,0.01,
"28023",1,0.01,
"28031",1,0.01,
"28036",2,0.01,
"280781252",1,0.01,
"28081",1,0.01,
"281056844",1,0.01,
"28172",1,0.01,
"28173",1,0.01,
"28213",1,0.01,
"28269",1,0.01,
"28314",1,0.01,
"2835",1,0.01,
"28412",1,0.01,
"28460",1,0.01,
"28539-4554",1,0.01,
"28540",1,0.01,
"28562",1,0.01,
"2860",2,0.01,
"28704",2,0.01,
"28729",1,0.01,
"2874",1,0.01,
"2879",1,0.01,
"288032434",1,0.01,
"29016",1,0.01,
"29020",1,0.01,
"29036",3,0.01,
"29045",2,0.01,
"29063",1,0.01,
"2908",1,0.01,
"2916",1,0.01,
"29163",1,0.01,
"29201",1,0.01,
"29225-4002",1,0.01,
"29302",2,0.01,
"29341",1,0.01,
"29379",1,0.01,
"29403",1,0.01,
"29625",1,0.01,
"29631",1,0.01,
"296504055",1,0.01,
"29651",1,0.01,
"296815154",1,0.01,
"29687",1,0.01,
"29707",1,0.01,
"29715",1,0.01,
"29920",1,0.01,
"29928",1,0.01,
"30004",2,0.01,
"30005",1,0.01,
"300071",0.01,2,
"30014",1,0.01,
"300193102",1,0.01,
"30021",1,0.01,
"30041",1,0.01,
"300419309",1,0.01,
"30062",1,0.01,
"30062-5793",1,0.01,
"30066",1,0.01,
"300664772",1,0.01,
"30071",1,0.01,
"30076",1,0.01,
"30080-6471",1,0.01,
"30087",1,0.01,
"300922252",1,0.01,
"30097",2,0.01,
"30126",1,0.01,
"30127",1,0.01,
"30269",2,0.01,
"30277",1,0.01,
"30297",1,0.01,
"30305",1,0.01,
"30308",1,0.01,
"303195047",1,0.01,
"30324-7203",1,0.01,
"303245222",1,0.01,
"30328",1,0.01,
"30338",1,0.01,
"30345",2,0.01,
"30350-3513",1,0.01,
"3038",2,0.01,
"3051",1,0.01,
"305423551",1,0.01,
"3060",1,0.01,
"30601",1,0.01,
"30605",1,0.01,
"30606",2,0.01,
"306066239",1,0.01,
"3064",1,0.01,
"306772488",1,0.01,
"307203088",1,0.01,
"30721",1,0.01,
"30809",1,0.01,
"30813",1,0.01,
"30892",0.01,1,
"309090119",1,0.01,
"310018",0.01,1,
"31005",1,0.01,
"31036",1,0.01,
"31088",2,0.01,
"31220",1,0.01,
"313001",0.01,1,
"31324",1,0.01,
"31401",1,0.01,
"31406",1,0.01,
"31410",1,2,
"314111337",1,0.01,
"31419",1,0.01,
"315300",0.01,1,
"31901",1,0.01,
"32024",1,0.01,
"320810547",1,0.01,
"32082",1,0.01,
"320920734",1,0.01,
"32128",1,0.01,
"32168-5347",1,0.01,
"32202",1,0.01,
"32211",1,0.01,
"32212-5112",1,0.01,
"32224",1,0.01,
"32256-9509",1,0.01,
"32259",1,0.01,
"3229",1,0.01,
"32301",1,0.01,
"32304",2,0.01,
"32540",1,0.01,
"32542",1,0.01,
"32605",1,0.01,
"32607",1,0.01,
"32608-2718",1,0.01,
"32701",1,0.01,
"32712",1,0.01,
"32746",1,0.01,
"32765",1,0.01,
"328031928",1,0.01,
"32804",1,0.01,
"32819-7133",1,0.01,
"32821",1,0.01,
"32821-6739",1,0.01,
"32828",1,0.01,
"32835",1,0.01,
"32837",1,0.01,
"32837-7097",1,0.01,
"32903",1,0.01,
"32940-2214",1,0.01,
"32965",1,0.01,
"330006",0.01,1,
"33009",2,0.01,
"330153966",1,0.01,
"33025",1,0.01,
"33028",1,0.01,
"33040",1,0.01,
"33054-2015",1,0.01,
"33054-6313",1,0.01,
"33062",1,0.01,
"33065",1,0.01,
"33071",1,0.01,
"330762438",1,0.01,
"33102",1,0.01,
"33114",1,0.01,
"33126",1,0.01,
"33129",1,0.01,
"33133",1,0.01,
"33134",1,0.01,
"331438613",1,0.01,
"331444918",1,0.01,
"33146-3145",1,0.01,
"33149",1,0.01,
"33154-2357",1,0.01,
"33155",2,0.01,
"33156-7954",1,0.01,
"331563944",1,0.01,
"33157",1,0.01,
"33160",1,0.01,
"33161",1,0.01,
"33165",1,0.01,
"331663250",1,0.01,
"33174",1,0.01,
"33175",1,0.01,
"33177-6159",1,0.01,
"33178",1,0.01,
"33181",1,0.01,
"33185",1,0.01,
"33308-3003",1,0.01,
"33309-6702",1,0.01,
"33313-5143",1,0.01,
"33319",2,0.01,
"333224807",1,0.01,
"33324",1,0.01,
"333313804",1,0.01,
"33332",1,0.01,
"334148124",1,0.01,
"334184570",1,0.01,
"33428",1,0.01,
"33435",1,0.01,
"33444",1,0.01,
"33445",1,0.01,
"334784764",1,0.01,
"33498-6602",1,0.01,
"33558",1,0.01,
"33559",1,0.01,
"33602",1,0.01,
"33606",1,0.01,
"33606-3747",1,0.01,
"33618",2,0.01,
"33624-4504",1,0.01,
"33629",2,0.01,
"33785",1,0.01,
"33812",1,0.01,
"33813",3,0.01,
"33837",1,0.01,
"33870",1,0.01,
"33884",1,0.01,
"33905-5539",1,0.01,
"33967",1,0.01,
"34002",1,0.01,
"34103",2,0.01,
"34105",2,0.01,
"34110",1,0.01,
"34112-5060",1,0.01,
"341123300",1,0.01,
"34114",1,0.01,
"341347421",1,0.01,
"34202",1,0.01,
"342037613",1,0.01,
"34210",1,0.01,
"34232",1,0.01,
"34243",1,0.01,
"34280",1,0.01,
"3431",1,0.01,
"34465-3703",1,0.01,
"34698",1,0.01,
"34711",1,0.01,
"347475001",1,0.01,
"34769",1,0.01,
"347717510",1,0.01,
"34786",1,0.01,
"34996",1,0.01,
"35007",1,0.01,
"35124",1,0.01,
"35127",1,0.01,
"35222",1,0.01,
"35223",1,0.01,
"352426433",1,0.01,
"35405",1,0.01,
"35470",1,0.01,
"35750",1,0.01,
"357586285",1,0.01,
"35802-3750",1,0.01,
"35811",1,0.01,
"36093",1,0.01,
"362651100",1,0.01,
"365323130",1,0.01,
"365326310",1,0.01,
"36605",1,0.01,
"36608",1,0.01,
"366082956",1,0.01,
"36830",1,0.01,
"37027",6,0.01,
"37027-8616",1,0.01,
"37027-8747",1,0.01,
"370275632",2,0.01,
"370278971",1,0.01,
"37040",1,0.01,
"37042",1,0.01,
"37057",0.01,1,
"37064",1,0.01,
"370649484",1,0.01,
"37069",2,0.01,
"37069-6551",1,0.01,
"370691823",1,0.01,
"37072",1,0.01,
"370764310",1,0.01,
"37082",1,0.01,
"370865264",1,0.01,
"370872503",1,0.01,
"37128",1,0.01,
"37135",1,0.01,
"37203",1,0.01,
"37204",1,0.01,
"37205",1,0.01,
"372052819",1,0.01,
"37206",1,0.01,
"37212",1,0.01,
"372152406",1,0.01,
"37220",1,0.01,
"37221-4372",1,0.01,
"374032318",1,0.01,
"37664",1,0.01,
"3768",1,0.01,
"37820",1,0.01,
"37909",1,0.01,
"37916",1,0.01,
"37919",1,0.01,
"37919-4246",1,0.01,
"37922",2,0.01,
"37923",1,0.01,
"380015",0.01,1,
"38002",1,0.01,
"38002-7014",1,0.01,
"38017",4,0.01,
"38017-1637",1,0.01,
"38018",1,0.01,
"38053",1,0.01,
"38103",1,0.01,
"38104",1,0.01,
"381043919",1,0.01,
"381112561",1,0.01,
"38112",3,0.01,
"38116",1,0.01,
"38117",2,0.01,
"38120-1332",1,0.01,
"381203304",1,0.01,
"38122",1,0.01,
"38134",1,0.01,
"381382352",1,0.01,
"38139",1,0.01,
"381396971",1,0.01,
"38255",1,0.01,
"38301",1,0.01,
"38305",1,0.01,
"38348",1,0.01,
"3842",1,0.01,
"38426",0.01,1,
"38580",1,0.01,
"38654-6234",1,0.01,
"39074",1,0.01,
"3908",1,0.01,
"39096",1,0.01,
"39110",1,0.01,
"39202",1,0.01,
"39206",1,0.01,
"392164108",1,0.01,
"39232",1,0.01,
"39401",2,0.01,
"39402",1,0.01,
"39503",1,0.01,
"39648",1,0.01,
"40059",1,0.01,
"400599503",1,0.01,
"400599581",1,0.01,
"40060",0.01,1,
"401122",0.01,1,
"40121",1,0.01,
"40204-1316",1,0.01,
"40206",2,0.01,
"40207",2,0.01,
"402071176",1,0.01,
"40217",1,0.01,
"40219",2,0.01,
"40220",1,0.01,
"40222",1,0.01,
"40223",1,0.01,
"40223-1366",1,0.01,
"40223-2371",1,0.01,
"402231615",1,0.01,
"402232371",1,0.01,
"40241",2,0.01,
"402413127",1,0.01,
"40243",1,0.01,
"40245",3,0.01,
"40245-4577",1,0.01,
"402451843",1,0.01,
"40258-2585",1,0.01,
"40299",1,0.01,
"403241073",1,0.01,
"403831673",1,0.01,
"403838815",1,0.01,
"40422",1,0.01,
"4046",1,0.01,
"40475",2,0.01,
"40502",3,0.01,
"405022313",1,0.01,
"405022444",1,0.01,
"40508",3,0.01,
"40509",4,0.01,
"405112012",1,0.01,
"405119034",1,0.01,
"40513-1400",1,0.01,
"40513-1826",1,0.01,
"40515",4,0.01,
"40515-1129",1,0.01,
"40517",1,0.01,
"40601",1,0.01,
"40965",3,0.01,
"41000",0.01,1,
"41017-4490",1,0.01,
"4105",1,0.01,
"41071-1798",1,0.01,
"41091",2,0.01,
"42223",1,0.01,
"42240-1227",1,0.01,
"42303",2,0.01,
"42701",1,0.01,
"42701-9094",1,0.01,
"430000",0.01,1,
"430064",0.01,1,
"43007",0.01,1,
"430073",0.01,1,
"430074",0.01,1,
"43015-1744",1,0.01,
"43016",3,0.01,
"43016-8659",1,0.01,
"430162221",1,0.01,
"43017",6,0.01,
"43017-8673",1,0.01,
"430178330",1,0.01,
"430178773",1,0.01,
"430219609",1,0.01,
"43023",2,0.01,
"43026",2,0.01,
"43040",1,0.01,
"43054",2,0.01,
"43054-9633",1,0.01,
"430548326",1,0.01,
"430549405",1,0.01,
"43065",2,0.01,
"43065-9594",1,0.01,
"430657051",1,0.01,
"43081",1,0.01,
"430813771",1,0.01,
"430818902",1,0.01,
"43082",1,0.01,
"43082-8919",1,0.01,
"430827757",1,0.01,
"43085",1,0.01,
"43085-2897",1,0.01,
"431131124",1,0.01,
"43119",1,0.01,
"43123",1,0.01,
"43130",1,0.01,
"43135",1,0.01,
"43201",7,0.01,
"43202",1,0.01,
"432042762",1,0.01,
"43205",2,0.01,
"432078619",1,0.01,
"43209",6,0.01,
"432091730",1,0.01,
"432091934",1,0.01,
"43212",2,0.01,
"432123237",1,0.01,
"432123567",1,0.01,
"43213",1,0.01,
"43214",2,0.01,
"43215",4,0.01,
"43219-2741",1,0.01,
"432192972",1,0.01,
"43220",2,0.01,
"432202970",1,0.01,
"432204068",1,0.01,
"43221",1,0.01,
"43221-3049",1,0.01,
"432211227",1,0.01,
"432212337",1,0.01,
"432213205",1,0.01,
"432213765",1,0.01,
"43227",1,0.01,
"43229-1345",1,0.01,
"43230",2,0.01,
"43231",1,0.01,
"432319210",1,0.01,
"432326396",1,0.01,
"43235",1,0.01,
"432357505",1,0.01,
"43326",1,0.01,
"43402",1,0.01,
"43403",1,0.01,
"434109710",1,0.01,
"43412",1,0.01,
"43412-9453",1,0.01,
"43511",1,0.01,
"43522",1,0.01,
"43551",1,0.01,
"43551-2274",1,0.01,
"435515809",1,0.01,
"435519474",1,0.01,
"43560",2,0.01,
"43560-1332",1,0.01,
"435601302",1,0.01,
"435609388",1,0.01,
"43566",1,0.01,
"435669418",1,0.01,
"435719545",1,0.01,
"43614-5508",1,0.01,
"43615",1,0.01,
"436151025",1,0.01,
"436171282",1,0.01,
"43623",1,0.01,
"43623-2646",1,0.01,
"436232930",1,0.01,
"43701",1,0.01,
"440114004",1,0.01,
"44012",3,0.01,
"44012-1317",1,0.01,
"440121977",1,0.01,
"440122534",2,0.01,
"440122929",1,0.01,
"44017",4,0.01,
"440171657",1,0.01,
"44022",5,0.01,
"44022-6675",1,0.01,
"440221314",1,0.01,
"440221334",1,0.01,
"440223300",1,0.01,
"440224245",1,0.01,
"440234568",1,0.01,
"44035",1,0.01,
"440357349",1,0.01,
"440394484",1,0.01,
"44040-9317",1,0.01,
"440409355",1,0.01,
"440409771",1,0.01,
"44056",1,0.01,
"440562411",1,0.01,
"44060",1,0.01,
"440673408",1,0.01,
"440701477",1,0.01,
"44072",2,0.01,
"44074",1,0.01,
"44076",1,0.01,
"440772265",1,0.01,
"440872924",1,0.01,
"44092",1,0.01,
"440945724",1,0.01,
"440949750",1,0.01,
"44095",1,0.01,
"440952504",1,0.01,
"44103",1,0.01,
"44103-2026",1,0.01,
"44106",1,0.01,
"441063220",1,0.01,
"441063241",1,0.01,
"44107",2,0.01,
"44107-1109",1,0.01,
"441082363",1,0.01,
"441092573",1,0.01,
"441093164",1,0.01,
"44111",1,0.01,
"441112846",1,0.01,
"441112847",1,0.01,
"441113948",1,0.01,
"441115817",1,0.01,
"44112-1207",1,0.01,
"44116",2,0.01,
"44116-2709",1,0.01,
"441161202",1,0.01,
"441161443",1,0.01,
"441161659",1,0.01,
"441161868",1,0.01,
"441162354",1,0.01,
"441162847",1,0.01,
"44118",6,0.01,
"44118-2807",1,0.01,
"44118-3506",1,0.01,
"441181224",1,0.01,
"441181342",1,0.01,
"441184509",1,0.01,
"441184661",1,0.01,
"44119-1939",1,0.01,
"44120",3,0.01,
"441201711",1,0.01,
"441203381",1,0.01,
"441203432",1,0.01,
"44121",1,0.01,
"44121-2948",1,0.01,
"44122",4,0.01,
"44122-2604",1,0.01,
"441221740",1,0.01,
"441222104",1,0.01,
"441222935",1,0.01,
"441224812",1,0.01,
"441225037",1,0.01,
"44123-4250",1,0.01,
"441232134",1,0.01,
"44124",1,0.01,
"44124-1427",1,0.01,
"441241305",1,0.01,
"441244818",1,0.01,
"44125",1,0.01,
"441263060",1,0.01,
"44130",2,0.01,
"44131",1,0.01,
"441341903",1,0.01,
"44135-2139",1,0.01,
"441351953",1,0.01,
"44136",3,0.01,
"441367870",1,0.01,
"44137",1,0.01,
"44138",2,0.01,
"441382116",1,0.01,
"44139",1,0.01,
"44139-3430",1,0.01,
"441394673",1,0.01,
"441395925",1,0.01,
"44140",4,0.01,
"441401329",1,0.01,
"441401574",1,0.01,
"441402505",1,0.01,
"441402517",1,0.01,
"44141",5,0.01,
"441411846",1,0.01,
"44142",2,0.01,
"44143",4,0.01,
"441431961",1,0.01,
"44145",3,0.01,
"44145-3064",1,0.01,
"44145-3706",1,0.01,
"441453770",1,0.01,
"441454957",1,0.01,
"441456507",1,0.01,
"441458121",1,0.01,
"44146",1,0.01,
"441463874",1,0.01,
"44147",1,0.01,
"44147-3613",1,0.01,
"44149",1,0.01,
"441496847",1,0.01,
"44202",2,0.01,
"44212",2,0.01,
"442125803",1,0.01,
"44221",1,0.01,
"44223-2989",1,0.01,
"44224-5120",1,0.01,
"442243755",1,0.01,
"44236",1,0.01,
"44236-3111",1,0.01,
"44236-3554",1,0.01,
"44241",1,0.01,
"44256",3,0.01,
"44256-2744",1,0.01,
"44256-4120",1,0.01,
"442567221",1,0.01,
"442569012",1,0.01,
"44273",1,0.01,
"44278",1,0.01,
"44313",2,0.01,
"443213033",1,0.01,
"44333",4,0.01,
"44333-2900",1,0.01,
"443332248",1,0.01,
"443339237",1,0.01,
"44406",1,0.01,
"44410",1,0.01,
"44484",1,0.01,
"44502",1,0.01,
"44511-1355",1,0.01,
"44513",1,0.01,
"44709",1,0.01,
"44714",1,0.01,
"447183223",1,0.01,
"44721",1,0.01,
"4473",1,0.01,
"44805",1,0.01,
"44814-9654",1,0.01,
"44820",1,0.01,
"44839",2,0.01,
"44870",2,0.01,
"45011",2,0.01,
"450119212",1,0.01,
"450144476",1,0.01,
"45030",1,0.01,
"45030-2009",1,0.01,
"45040",2,0.01,
"45040-1175",1,0.01,
"450409457",1,0.01,
"45050",1,0.01,
"450529615",1,0.01,
"45056",1,0.01,
"450668128",1,0.01,
"45069",2,0.01,
"450691167",1,0.01,
"451034047",1,0.01,
"45129",0.01,1,
"45140",3,0.01,
"451408336",1,0.01,
"45144",1,0.01,
"45150",1,0.01,
"451501880",1,0.01,
"45157-9173",1,0.01,
"45202",1,0.01,
"45206",1,0.01,
"45208",5,0.01,
"452081308",2,0.01,
"452081316",1,0.01,
"452081534",1,0.01,
"452081910",1,0.01,
"452082511",1,0.01,
"452082532",1,0.01,
"452082707",1,0.01,
"452083102",1,0.01,
"452083407",1,0.01,
"452084210",2,0.01,
"45209",1,0.01,
"45211",1,0.01,
"45212",1,0.01,
"45212-1924",1,0.01,
"452123219",1,0.01,
"45213",1,0.01,
"45215",3,0.01,
"45220",1,0.01,
"45223",1,0.01,
"45226",2,0.01,
"452261301",1,0.01,
"45227",2,0.01,
"45227-4248",1,0.01,
"452273021",1,0.01,
"45230",4,0.01,
"452303775",1,0.01,
"452305322",1,0.01,
"45231",2,0.01,
"452314440",1,0.01,
"45233",1,0.01,
"45233-4870",1,0.01,
"452331907",1,0.01,
"45236",4,0.01,
"452384330",1,0.01,
"452385807",1,0.01,
"452402812",1,0.01,
"45241",6,0.01,
"452414811",1,0.01,
"45242",7,0.01,
"45242-4531",1,0.01,
"452423201",1,0.01,
"452424458",1,0.01,
"452425907",1,0.01,
"45243",2,0.01,
"452432962",1,0.01,
"452434229",1,0.01,
"45244",1,0.01,
"45245",1,0.01,
"45248",2,0.01,
"45249",3,0.01,
"452492102",1,0.01,
"45251",1,0.01,
"45255",3,0.01,
"453051364",1,0.01,
"45309",1,0.01,
"45343",1,0.01,
"45371",2,0.01,
"45373",1,0.01,
"45402-4306",1,0.01,
"45408",1,0.01,
"45409",4,0.01,
"45415",1,0.01,
"45419",4,0.01,
"45419-3141",1,0.01,
"45420",1,0.01,
"45430",2,0.01,
"45431",1,0.01,
"45432",1,0.01,
"45458",2,0.01,
"454583263",1,0.01,
"454589267",1,0.01,
"45459",1,0.01,
"454591608",1,0.01,
"454591646",1,0.01,
"45631",1,0.01,
"45701",2,0.01,
"45810",1,0.01,
"45810-9762",1,0.01,
"45883",2,0.01,
"46011",1,0.01,
"46032",3,0.01,
"460327709",1,0.01,
"460329146",1,0.01,
"46033",4,0.01,
"460338690",1,0.01,
"460338959",1,0.01,
"46037",2,0.01,
"46037-4344",1,0.01,
"460379300",1,0.01,
"46038",3,0.01,
"46038-5238",1,0.01,
"460386853",1,0.01,
"46055",1,0.01,
"46074",1,0.01,
"46074-0090",1,0.01,
"46074-2228",1,0.01,
"460741406",1,0.01,
"46077",4,0.01,
"46112",1,0.01,
"46113",1,0.01,
"46123",1,0.01,
"46131",1,0.01,
"461311782",1,0.01,
"46135",1,0.01,
"461358781",1,0.01,
"46140",1,0.01,
"46142",1,0.01,
"46143-6028",1,0.01,
"46151",1,0.01,
"46163",2,0.01,
"46184",1,0.01,
"46202",1,0.01,
"46203",2,0.01,
"462041530",1,0.01,
"46205",4,0.01,
"462053423",1,0.01,
"46208",4,0.01,
"46208-2513",1,0.01,
"46214",1,0.01,
"462174343",1,0.01,
"46219",1,0.01,
"46220",3,0.01,
"46220-5803",1,0.01,
"462202880",1,0.01,
"462204245",1,0.01,
"46221",1,0.01,
"46226",1,0.01,
"46226-6385",1,0.01,
"46228",1,0.01,
"462286603",1,0.01,
"462286720",1,0.01,
"46229-1884",1,0.01,
"462314510",1,0.01,
"462353333",1,0.01,
"46236",2,0.01,
"462368980",1,0.01,
"46239",2,0.01,
"46240",3,0.01,
"46250",1,0.01,
"46250-3093",1,0.01,
"462503530",1,0.01,
"46254",1,0.01,
"462568501",1,0.01,
"46259",3,0.01,
"462604352",1,0.01,
"46268",1,0.01,
"46303",2,0.01,
"46303-8556",1,0.01,
"46304",4,0.01,
"463048892",1,0.01,
"46307",12,0.01,
"463071585",1,0.01,
"463079234",1,0.01,
"463079634",1,0.01,
"46311",3,0.01,
"463111108",1,0.01,
"463111283",1,0.01,
"463113072",1,0.01,
"46321",14,0.01,
"46321-2701",2,0.01,
"463211321",1,0.01,
"463212833",1,0.01,
"463213336",1,0.01,
"46322",8,0.01,
"46323",2,0.01,
"46323-3062",1,0.01,
"46324",2,0.01,
"463419162",1,0.01,
"46342",3,0.01,
"463423840",1,0.01,
"46350",3,0.01,
"463507941",1,0.01,
"46356",2,0.01,
"46360",4,0.01,
"46360-2081",1,0.01,
"463604524",1,0.01,
"463606129",1,0.01,
"46368-8713",1,0.01,
"463687735",1,0.01,
"46373",2,0.01,
"46375",4,0.01,
"46375-2386",1,0.01,
"46383",10,0.01,
"463833316",1,0.01,
"463834412",1,0.01,
"463834424",1,0.01,
"46385",5,0.01,
"46394",1,0.01,
"46404",1,0.01,
"46410",4,0.01,
"46506",1,0.01,
"46507",1,0.01,
"46514-4312",1,0.01,
"46517",2,0.01,
"46528",1,0.01,
"46530",5,0.01,
"46530-6844",1,0.01,
"46530-7013",1,0.01,
"465306512",1,0.01,
"465307268",1,0.01,
"465307862",1,0.01,
"465308399",1,0.01,
"465319543",1,0.01,
"46534",1,0.01,
"46545",1,0.01,
"46552",1,0.01,
"46561",1,0.01,
"465632720",1,0.01,
"465639023",1,0.01,
"46582-8227",1,0.01,
"46601-1030",1,0.01,
"46613",1,0.01,
"46614",2,0.01,
"466145045",1,0.01,
"46615-1003",1,0.01,
"466151036",1,0.01,
"46616",1,0.01,
"46616-1356",1,0.01,
"46617",1,0.01,
"46617-3320",1,0.01,
"46619",1,0.01,
"46619-9596",1,0.01,
"466284072",1,0.01,
"46637",1,0.01,
"466373826",1,0.01,
"46703",1,0.01,
"467259256",1,0.01,
"467742214",1,0.01,
"46783",1,0.01,
"46783-1003",1,0.01,
"46804",1,0.01,
"468041403",1,0.01,
"468043851",1,0.01,
"46805",3,0.01,
"46807",1,0.01,
"46814",3,0.01,
"468149427",1,0.01,
"46815",2,0.01,
"46816",2,0.01,
"46901",1,0.01,
"470069024",1,0.01,
"47012",1,0.01,
"47130",3,0.01,
"47161",1,0.01,
"47201",3,0.01,
"47203",1,0.01,
"47243",2,0.01,
"47374",1,0.01,
"47401",5,0.01,
"47403",1,0.01,
"47404",1,0.01,
"47404-5117",1,0.01,
"47406",2,0.01,
"47406-7514",1,0.01,
"47408",1,0.01,
"474082781",1,0.01,
"47421",1,0.01,
"47501",1,0.01,
"476160050",1,0.01,
"47630",2,0.01,
"47648",1,0.01,
"47711",1,0.01,
"47802-9606",1,0.01,
"47803",1,0.01,
"47834",1,0.01,
"47905",2,0.01,
"47906",5,0.01,
"47906-9671",1,0.01,
"47933",1,0.01,
"47977-8676",1,0.01,
"48009",6,0.01,
"480091420",1,0.01,
"480091905",1,0.01,
"480095768",1,0.01,
"480095865",1,0.01,
"48021",1,0.01,
"48025",7,0.01,
"480255137",1,0.01,
"480361645",1,0.01,
"48038-4946",1,0.01,
"48039",1,0.01,
"48042",3,0.01,
"48044",1,0.01,
"48062",1,0.01,
"48065",1,0.01,
"48066",2,0.01,
"48067",1,0.01,
"48070",2,0.01,
"480701560",1,0.01,
"48073",2,0.01,
"48075",1,0.01,
"48076",3,0.01,
"480761764",1,0.01,
"480765281",1,0.01,
"480765337",1,0.01,
"48081",1,0.01,
"48083",1,0.01,
"48084",2,0.01,
"480841220",1,0.01,
"480841400",1,0.01,
"48085",4,0.01,
"480851086",1,0.01,
"480857031",1,0.01,
"48090",1,0.01,
"48092",1,0.01,
"48094",1,0.01,
"480962521",1,0.01,
"48098",3,0.01,
"480984277",1,0.01,
"480985621",1,0.01,
"48101",1,0.01,
"481011439",1,0.01,
"48103",12,0.01,
"48103-3400",1,0.01,
"481032418",1,0.01,
"481032559",1,0.01,
"48104",8,0.01,
"48104-4414",1,0.01,
"48105",6,0.01,
"48105-2416",1,0.01,
"481052851",1,0.01,
"48108",9,0.01,
"481081720",1,0.01,
"48111",1,0.01,
"481147659",1,0.01,
"48116",1,0.01,
"481160453",1,0.01,
"481166776",1,0.01,
"481168593",1,0.01,
"48118",2,0.01,
"48124",1,0.01,
"48126",2,0.01,
"48127",1,0.01,
"48128",2,0.01,
"48130",3,0.01,
"481449655",1,0.01,
"48150",1,0.01,
"48152",6,0.01,
"48154",1,0.01,
"481618901",1,0.01,
"481659201",1,0.01,
"48167",6,0.01,
"481672719",1,0.01,
"481673945",1,0.01,
"481679380",1,0.01,
"48168",3,0.01,
"48168-8685",1,0.01,
"481681819",1,0.01,
"481683223",2,0.01,
"48169",2,0.01,
"48170",2,0.01,
"48170-1218",1,0.01,
"48170-1533",1,0.01,
"481705710",1,0.01,
"48176",2,0.01,
"48178",3,0.01,
"481781878",1,0.01,
"48180",1,0.01,
"48183",1,0.01,
"481832528",1,0.01,
"48185",2,0.01,
"48187",4,0.01,
"48188",2,0.01,
"48188-6241",1,0.01,
"48189",2,0.01,
"48189-9568",1,0.01,
"48192",1,0.01,
"48197",2,0.01,
"481974321",1,0.01,
"481974727",1,0.01,
"48198",1,0.01,
"48202",1,0.01,
"48203",1,0.01,
"482053141",1,0.01,
"48207",1,0.01,
"48208",1,0.01,
"48219",2,0.01,
"48221",1,0.01,
"482251649",1,0.01,
"48228",1,0.01,
"48230",8,0.01,
"48230-1063",1,0.01,
"48230-1310",1,0.01,
"482301460",1,0.01,
"482301814",1,0.01,
"482301924",1,0.01,
"48234",1,0.01,
"48236",7,0.01,
"482361161",1,0.01,
"482361874",1,0.01,
"482362617",1,0.01,
"482371481",1,0.01,
"48238",1,0.01,
"48239",1,0.01,
"48301",8,0.01,
"48301-2563",1,0.01,
"483013226",1,0.01,
"483013445",1,0.01,
"48302",4,0.01,
"48302-2729",1,0.01,
"483021106",1,0.01,
"483021217",1,0.01,
"483021570",1,0.01,
"483022534",1,0.01,
"483022849",1,0.01,
"483022850",1,0.01,
"48304",3,0.01,
"48304-1056",1,0.01,
"48304-1974",1,0.01,
"48304-2542",1,0.01,
"483041115",1,0.01,
"483041145",1,0.01,
"48306",3,0.01,
"483062835",1,0.01,
"483063593",1,0.01,
"48307",6,0.01,
"48307-1728",1,0.01,
"48307-2607",1,0.01,
"483076063",1,0.01,
"48309",3,0.01,
"483094510",1,0.01,
"48310",1,0.01,
"483141866",1,0.01,
"483144536",1,0.01,
"48315",1,0.01,
"48316",1,0.01,
"483161068",1,0.01,
"48320",2,0.01,
"48322",4,0.01,
"483222252",1,0.01,
"483224021",1,0.01,
"48323",5,0.01,
"483231372",1,0.01,
"483232452",1,0.01,
"483233821",1,0.01,
"48324",2,0.01,
"483241949",1,0.01,
"483243248",1,0.01,
"48326",1,0.01,
"483283453",1,0.01,
"48329",1,0.01,
"48331",3,0.01,
"483311934",1,0.01,
"483312055",1,0.01,
"483313529",1,0.01,
"48334",6,0.01,
"48334-4158",1,0.01,
"483343264",1,0.01,
"483344000",1,0.01,
"483344315",1,0.01,
"483344757",1,0.01,
"48335",2,0.01,
"483351240",1,0.01,
"48346",2,0.01,
"483462046",1,0.01,
"48347",1,0.01,
"48348",3,0.01,
"48348-4906",1,0.01,
"483482197",1,0.01,
"483482869",1,0.01,
"483484373",1,0.01,
"48356",1,0.01,
"48359",1,0.01,
"48360",1,0.01,
"483632642",1,0.01,
"48367",1,0.01,
"48371",1,0.01,
"48374",1,0.01,
"48374-2377",1,0.01,
"48374-3740",1,0.01,
"483742153",1,0.01,
"483743636",1,0.01,
"483743731",1,0.01,
"483743794",1,0.01,
"483743870",1,0.01,
"48375",3,0.01,
"483753934",1,0.01,
"483754755",1,0.01,
"483754761",1,0.01,
"48377",1,0.01,
"48380",1,0.01,
"48382",2,0.01,
"48383",1,0.01,
"48386",1,0.01,
"48413",2,0.01,
"48419",1,0.01,
"484238114",1,0.01,
"48430",1,0.01,
"484301467",1,0.01,
"484333706",1,0.01,
"48439",1,0.01,
"48439-9472",1,0.01,
"48455",2,0.01,
"48462",2,0.01,
"48473",1,0.01,
"48503",1,0.01,
"48507",1,0.01,
"48509-1251",1,0.01,
"48601",1,0.01,
"48602",2,0.01,
"486021945",1,0.01,
"48603",2,0.01,
"48611",1,0.01,
"48640",2,0.01,
"48640-2430",1,0.01,
"486402429",1,0.01,
"48641",1,0.01,
"48642",2,0.01,
"48661",1,0.01,
"48726",1,0.01,
"48734",1,0.01,
"48813",1,0.01,
"48821",1,0.01,
"48823",11,0.01,
"48832",1,0.01,
"48842",1,0.01,
"488428778",1,0.01,
"48843",2,0.01,
"48854",1,0.01,
"48858-1851",1,0.01,
"48858-6130",1,0.01,
"48864",4,0.01,
"48864-3849",1,0.01,
"488643468",1,0.01,
"48895",1,0.01,
"48906",1,0.01,
"48910",2,0.01,
"48915",1,0.01,
"48917",4,0.01,
"49001-2936",1,0.01,
"490029026",1,0.01,
"49006",1,0.01,
"49008",5,0.01,
"49008-1319",1,0.01,
"49009",2,0.01,
"49009-9381",1,0.01,
"49012-9710",1,0.01,
"49014",2,0.01,
"49017",1,0.01,
"49021",1,0.01,
"49024",1,0.01,
"490245520",1,0.01,
"49028",1,0.01,
"49033",1,0.01,
"49038",1,0.01,
"49048",1,0.01,
"490539777",1,0.01,
"490539780",1,0.01,
"49055",2,0.01,
"49057",1,0.01,
"49058",2,0.01,
"49071",2,0.01,
"49072",2,0.01,
"49078",1,0.01,
"49080",1,0.01,
"490809109",1,0.01,
"49085",10,0.01,
"490853429",1,0.01,
"49090",2,0.01,
"490999017",1,0.01,
"49102",1,0.01,
"491171473",1,0.01,
"49120",2,0.01,
"49203",1,0.01,
"49235",1,0.01,
"49270",1,0.01,
"49301",5,0.01,
"49306",1,0.01,
"49316",2,0.01,
"49331",3,0.01,
"49341",2,0.01,
"49401",3,0.01,
"49417",5,0.01,
"494178318",1,0.01,
"494178888",1,0.01,
"49418",2,0.01,
"49423",4,0.01,
"49424",2,0.01,
"494246104",1,0.01,
"49428",1,0.01,
"49441",2,0.01,
"49442",1,0.01,
"49444",1,0.01,
"494444273",1,0.01,
"49445",1,0.01,
"49456",2,0.01,
"494561875",1,0.01,
"49503",3,0.01,
"49504",2,0.01,
"49506",15,0.01,
"495063379",1,0.01,
"495064741",1,0.01,
"495065018",1,0.01,
"495086596",1,0.01,
"49509",2,0.01,
"49509-1480",1,0.01,
"49512",1,0.01,
"49525",1,0.01,
"495251213",1,0.01,
"49546",4,0.01,
"495465532",1,0.01,
"495467586",1,0.01,
"49636",1,0.01,
"49643",1,0.01,
"49648",1,0.01,
"49684",2,0.01,
"49685",1,0.01,
"49686",3,0.01,
"496862057",1,0.01,
"496866300",1,0.01,
"49688",1,0.01,
"49690",2,0.01,
"49696",1,0.01,
"49720",1,0.01,
"497209087",1,0.01,
"49740",2,0.01,
"49770",1,0.01,
"497708820",1,0.01,
"49855",2,0.01,
"49931",2,0.01,
"49970-9039",2,0.01,
"500004",0.01,2,
"50002",0.01,1,
"500074",0.01,1,
"50009",1,0.01,
"50014",3,0.01,
"50019",1,0.01,
"50021",1,0.01,
"50111",1,0.01,
"50112",1,0.01,
"501188044",1,0.01,
"50125",1,0.01,
"50131",1,0.01,
"50141",1,0.01,
"502",0.01,2,
"50263",1,0.01,
"50265",2,0.01,
"50265-5488",1,0.01,
"50266",2,0.01,
"50309",1,0.01,
"50310",1,0.01,
"503103826",1,0.01,
"50312",2,0.01,
"50312-5414",1,0.01,
"503121827",1,0.01,
"503121851",1,0.01,
"50317",1,0.01,
"503212619",1,0.01,
"50324",1,0.01,
"50325-6429",1,0.01,
"50327",1,0.01,
"50501",1,0.01,
"505220215",1,0.01,
"5055",1,0.01,
"50613",1,0.01,
"50674",1,0.01,
"50677-2215",1,0.01,
"50801",1,0.01,
"509",0.01,1,
"51020-250",0.01,1,
"51041",1,0.01,
"51201-2105",1,0.01,
"51526-4196",1,0.01,
"51900",0.01,1,
"52001",3,0.01,
"52002-0485",1,0.01,
"52003",1,0.01,
"52240",6,0.01,
"52240-7906",1,0.01,
"52240-9125",1,0.01,
"52241",2,0.01,
"52242",1,0.01,
"52245",5,0.01,
"52245-9205",1,0.01,
"522452027",1,0.01,
"522453245",1,0.01,
"52246",1,0.01,
"52302",1,0.01,
"52302-5147",1,0.01,
"523026265",1,0.01,
"52402",1,0.01,
"524023393",1,0.01,
"52403",2,0.01,
"52462",0.01,1,
"52556-8909",1,0.01,
"527223849",1,0.01,
"52803",1,0.01,
"52807",6,0.01,
"52807-1550",1,0.01,
"53005",3,0.01,
"53012",2,0.01,
"53017",1,0.01,
"53018",1,0.01,
"530181128",1,0.01,
"530219748",1,0.01,
"53022",2,0.01,
"53024",1,0.01,
"530242285",1,0.01,
"53029",1,0.01,
"530298559",1,0.01,
"530299018",1,0.01,
"53044",1,0.01,
"53044-1361",1,0.01,
"530441428",1,0.01,
"53045",3,0.01,
"53045-2218",1,0.01,
"53045-3843",1,0.01,
"530451309",1,0.01,
"530451708",1,0.01,
"530454918",1,0.01,
"530456203",1,0.01,
"530458163",1,0.01,
"53051",1,0.01,
"53051-5885",1,0.01,
"53066",1,0.01,
"530663489",1,0.01,
"530666513",1,0.01,
"53072",1,0.01,
"53072-2691",1,0.01,
"530725700",1,0.01,
"53073",1,0.01,
"53074",1,0.01,
"530741135",1,0.01,
"53080",1,0.01,
"53083",1,0.01,
"53083-2124",1,0.01,
"53089",1,0.01,
"53089-5007",1,0.01,
"53090",1,0.01,
"53092",5,0.01,
"53092-0054",1,0.01,
"53092-5231",1,0.01,
"530925202",1,0.01,
"53095",2,0.01,
"530954744",1,0.01,
"53105",1,0.01,
"531052110",1,0.01,
"53115",1,0.01,
"531153948",1,0.01,
"53125",1,0.01,
"53132",4,0.01,
"531328515",1,0.01,
"53140",1,0.01,
"53142",1,0.01,
"531427546",1,0.01,
"53143",1,0.01,
"53144",1,0.01,
"53149-8860",1,0.01,
"531499375",1,0.01,
"531512394",1,0.01,
"53158",3,0.01,
"53186",2,0.01,
"531861237",1,0.01,
"531865480",1,0.01,
"53188",1,0.01,
"531882526",1,0.01,
"531884408",1,0.01,
"53189",2,0.01,
"53190",1,0.01,
"531902188",1,0.01,
"53202",1,0.01,
"532021260",1,0.01,
"53204",2,0.01,
"53206",3,0.01,
"53206-3311",1,0.01,
"53207",3,0.01,
"532081013",1,0.01,
"53209",1,0.01,
"532091833",1,0.01,
"53211",3,0.01,
"53211-1002",1,0.01,
"53211-4377",1,0.01,
"532111759",1,0.01,
"532111778",1,0.01,
"53212",1,0.01,
"53213",2,0.01,
"53213-2319",1,0.01,
"532131737",1,0.01,
"53215-1946",1,0.01,
"53216",2,0.01,
"53217",6,0.01,
"53217-5601",1,0.01,
"532171915",1,0.01,
"532172736",1,0.01,
"532173871",1,0.01,
"532175501",1,0.01,
"532175742",1,0.01,
"532176021",1,0.01,
"53218",3,0.01,
"53220",1,0.01,
"532201342",1,0.01,
"532214217",1,0.01,
"53222",1,0.01,
"53223",2,0.01,
"532244142",1,0.01,
"53225",1,0.01,
"53226",2,0.01,
"532261255",1,0.01,
"53233",6,0.01,
"53402",3,0.01,
"53403",1,0.01,
"534041473",1,0.01,
"53406",2,0.01,
"53511-7026",1,0.01,
"53521",1,0.01,
"53534",3,0.01,
"53537",1,0.01,
"53545",1,0.01,
"53546",1,0.01,
"53555-9502",1,0.01,
"53558",2,0.01,
"53562",2,0.01,
"53562-1401",1,0.01,
"53562-3765",1,0.01,
"53562-3853",1,0.01,
"53588",1,0.01,
"53593",2,0.01,
"53593-2231",1,0.01,
"535938001",1,0.01,
"53598",1,0.01,
"53703",4,0.01,
"53704",2,0.01,
"53704-4857",1,0.01,
"53705",6,0.01,
"53705-1408",1,0.01,
"53705-1477",1,0.01,
"53711",2,0.01,
"53711-1932",1,0.01,
"53711-7119",1,0.01,
"537111925",1,0.01,
"537112108",1,0.01,
"53715",1,0.01,
"53717",1,0.01,
"53719",2,0.01,
"53719-4539",1,0.01,
"53726",1,0.01,
"53818",2,0.01,
"539162509",1,0.01,
"539169319",1,0.01,
"53946",1,0.01,
"54016",1,0.01,
"541153648",1,0.01,
"541159041",1,0.01,
"541159244",1,0.01,
"54136",1,0.01,
"54166",1,0.01,
"542130177",1,0.01,
"54220",1,0.01,
"54228",1,0.01,
"542359703",1,0.01,
"54245",2,0.01,
"54301",1,0.01,
"54304",1,0.01,
"54311",1,0.01,
"543119525",1,0.01,
"54313",2,1,
"54401",1,0.01,
"54455",1,0.01,
"54455-9410",1,0.01,
"54481",1,0.01,
"54481-3153",1,0.01,
"54494",1,0.01,
"54494-7388",1,0.01,
"54601",4,0.01,
"54636",1,0.01,
"54650",1,0.01,
"54701-7225",1,0.01,
"54822",1,0.01,
"54829",1,0.01,
"54901",1,0.01,
"54902",1,0.01,
"549048884",1,0.01,
"54911",1,0.01,
"549111157",1,0.01,
"549114319",1,0.01,
"54913",5,0.01,
"54914",1,0.01,
"54915-4698",1,0.01,
"549158710",1,0.01,
"54935",1,0.01,
"549428771",1,0.01,
"549522452",1,0.01,
"54956",4,0.01,
"549564625",1,0.01,
"55001",1,0.01,
"55001-9406",1,0.01,
"55016",2,0.01,
"55033",2,0.01,
"55040",2,0.01,
"55042",1,0.01,
"55044",2,0.01,
"55044-6035",1,0.01,
"550571638",1,0.01,
"55060",1,0.01,
"550684385",1,0.01,
"55073",1,0.01,
"550739768",1,0.01,
"55077",2,0.01,
"55082",5,0.01,
"55082-5405",1,0.01,
"55082-9464",1,0.01,
"550824552",1,0.01,
"55102",1,0.01,
"55102-3221",1,0.01,
"551024024",1,0.01,
"55104",3,0.01,
"55104-1939",1,0.01,
"551046921",1,0.01,
"55105",7,0.01,
"55105-3003",1,0.01,
"551051048",1,0.01,
"551053204",1,0.01,
"55106",1,0.01,
"55107",1,0.01,
"55110",1,0.01,
"55112",1,0.01,
"55113",1,0.01,
"55114",1,0.01,
"55115",1,0.01,
"551151777",1,0.01,
"55116",2,0.01,
"55116-1811",1,0.01,
"551162122",1,0.01,
"551162245",1,0.01,
"55117",1,0.01,
"55118",8,0.01,
"55118-2747",1,0.01,
"55118-3706",1,0.01,
"55118-3710",1,0.01,
"55119",1,0.01,
"55119-4353",1,0.01,
"551193353",1,0.01,
"55120",1,0.01,
"55122",1,0.01,
"551223390",1,0.01,
"55123",4,0.01,
"551231897",1,0.01,
"551232451",1,0.01,
"551233991",1,0.01,
"55124",5,0.01,
"551244236",1,0.01,
"551249580",1,0.01,
"55125",4,0.01,
"551252047",1,0.01,
"551258885",1,0.01,
"55126",1,0.01,
"551277177",1,0.01,
"55129",4,0.01,
"551295299",1,0.01,
"551296204",1,0.01,
"55303-5609",1,0.01,
"55304",1,0.01,
"55305",1,0.01,
"55305-5142",1,0.01,
"553052300",1,0.01,
"553053053",1,0.01,
"55306",2,0.01,
"553065062",1,0.01,
"553065151",1,0.01,
"55311",5,0.01,
"55311-2305",1,0.01,
"55311-2541",1,0.01,
"553111856",1,0.01,
"553112682",1,0.01,
"55317",1,0.01,
"553176401",1,0.01,
"553176705",1,0.01,
"553183416",1,0.01,
"55330",3,0.01,
"55330-7510",1,0.01,
"55331",5,0.01,
"55331-8887",1,0.01,
"553317703",2,0.01,
"553318110",1,0.01,
"553319034",1,0.01,
"55337-4355",1,0.01,
"55343",1,0.01,
"553431310",1,0.01,
"553434365",1,0.01,
"553438085",1,0.01,
"55344",1,0.01,
"55345",2,0.01,
"553452840",1,0.01,
"553453549",1,0.01,
"55346",5,0.01,
"553463232",1,0.01,
"55347",3,0.01,
"553471098",1,0.01,
"553471943",1,0.01,
"553474184",1,0.01,
"553474259",1,0.01,
"553569515",1,0.01,
"55359-9612",1,0.01,
"55364",2,0.01,
"553641121",1,0.01,
"553641845",1,0.01,
"553647383",1,0.01,
"553648147",1,0.01,
"553648160",1,0.01,
"55372",2,0.01,
"55374",1,0.01,
"553748829",1,0.01,
"55378",4,0.01,
"55386",1,0.01,
"55391",2,0.01,
"55391-9373",1,0.01,
"553913235",1,0.01,
"55401",1,0.01,
"55403",1,0.01,
"55405",5,0.01,
"554052436",1,0.01,
"55406",1,0.01,
"554061816",1,0.01,
"554062322",1,0.01,
"55407",3,0.01,
"55407-2641",1,0.01,
"55407-2739",1,0.01,
"554072737",1,0.01,
"55408",5,0.01,
"554083522",1,0.01,
"554091024",1,0.01,
"554091718",1,0.01,
"55410",1,0.01,
"554101407",1,0.01,
"554102445",1,0.01,
"554102635",1,0.01,
"55411",1,0.01,
"554111811",1,0.01,
"55412-1818",1,0.01,
"554121109",1,0.01,
"55414",5,0.01,
"554143677",1,0.01,
"55416",4,0.01,
"554164134",1,0.01,
"554164207",1,0.01,
"554165064",1,0.01,
"55417",6,0.01,
"55417-2527",1,0.01,
"554171429",1,0.01,
"554172441",1,0.01,
"55418",3,0.01,
"55419",3,0.01,
"55419-1523",1,0.01,
"554192014",1,0.01,
"554195336",1,0.01,
"554195339",1,0.01,
"55421",1,0.01,
"554213057",1,0.01,
"55422",2,0.01,
"55423",1,0.01,
"55424",2,0.01,
"554241138",1,0.01,
"55426",1,0.01,
"55426-1072",1,0.01,
"55427",1,0.01,
"554282655",1,0.01,
"554282730",1,0.01,
"55430",1,0.01,
"55431",2,0.01,
"554311961",1,0.01,
"55432",1,0.01,
"55433",1,0.01,
"55435-4031",1,0.01,
"55436",1,0.01,
"55436-1224",1,0.01,
"554361941",1,0.01,
"554362023",1,0.01,
"554362519",1,0.01,
"55437",3,0.01,
"55438",2,0.01,
"554381244",1,0.01,
"55439",1,0.01,
"55439-1044",1,0.01,
"55441",2,0.01,
"554422508",1,0.01,
"55443-1579",1,0.01,
"55444",2,0.01,
"55444-1514",1,0.01,
"55446",1,0.01,
"554462792",1,0.01,
"554471263",1,0.01,
"554471606",1,0.01,
"55449",1,0.01,
"55454",1,0.01,
"556161616",1,0.01,
"55746-9343",1,0.01,
"55805",1,0.01,
"55811",1,0.01,
"55811-4199",1,0.01,
"559028819",1,0.01,
"55904",1,0.01,
"559045616",1,0.01,
"559639676",1,0.01,
"56001",1,0.01,
"56001-2626",1,0.01,
"560012631",1,0.01,
"560031628",1,0.01,
"560102",0.01,1,
"56097",1,0.01,
"56267",1,0.01,
"56301",1,0.01,
"56308",1,0.01,
"56401",1,0.01,
"56560",1,0.01,
"570016706",1,0.01,
"570040205",1,0.01,
"571031012",1,0.01,
"571081544",1,0.01,
"57201",1,0.01,
"5738",1,0.01,
"5777",1,0.01,
"58000",0.01,1,
"58078",1,0.01,
"58504",1,0.01,
"59714",1,0.01,
"59715",1,0.01,
"59718",1,0.01,
"59725",1,0.01,
"599118245",1,0.01,
"60002",17,0.01,
"600022622",1,0.01,
"600022749",1,0.01,
"600026411",1,0.01,
"600028513",1,0.01,
"600029763",1,0.01,
"60004",91,0.01,
"60004-1392",1,0.01,
"60004-3319",1,0.01,
"60004-4050",1,0.01,
"60004-6629",1,0.01,
"60004-6634",1,0.01,
"60004-6863",1,0.01,
"600041396",1,0.01,
"600042511",1,0.01,
"600043249",1,0.01,
"600043662",1,0.01,
"600044727",1,0.01,
"600045115",1,0.01,
"600045766",1,0.01,
"600046049",1,0.01,
"600046438",1,0.01,
"600046720",1,0.01,
"600046938",1,0.01,
"600047216",1,0.01,
"60005",43,0.01,
"60005-1637",1,0.01,
"60005-2638",1,0.01,
"60005-2762",1,0.01,
"60005-2962",1,0.01,
"600051647",1,0.01,
"600052205",1,0.01,
"600052707",1,0.01,
"600053610",1,0.01,
"600053776",1,0.01,
"60007",30,0.01,
"60007-1732",1,0.01,
"60007-4533",1,0.01,
"600071763",1,0.01,
"600073455",1,0.01,
"600073942",1,0.01,
"60008",26,0.01,
"60008-2743",1,0.01,
"60008-3055",1,0.01,
"600082013",1,0.01,
"60009",1,0.01,
"60010",63,0.01,
"60010-1229",1,0.01,
"60010-1413",1,0.01,
"60010-2870",1,0.01,
"60010-2979",1,0.01,
"60010-3566",1,0.01,
"60010-4179",1,0.01,
"60010-6703",1,0.01,
"60010-7044",1,0.01,
"60010-7806",1,0.01,
"600102126",1,0.01,
"600103560",1,0.01,
"600104267",1,0.01,
"600104769",1,0.01,
"600105632",1,0.01,
"600105923",1,0.01,
"600106159",1,0.01,
"600106407",1,0.01,
"600106954",1,0.01,
"600107063",1,0.01,
"600109107",1,0.01,
"600109329",1,0.01,
"60012",18,0.01,
"600123515",1,0.01,
"60013",13,0.01,
"600131862",1,0.01,
"600132702",1,0.01,
"60014",38,0.01,
"60014-1602",1,0.01,
"60014-1939",1,0.01,
"60014-4250",1,0.01,
"600142947",1,0.01,
"600142991",1,0.01,
"60015",61,0.01,
"60015-1511",1,0.01,
"60015-2046",1,0.01,
"60015-2827",1,0.01,
"60015-2902",1,0.01,
"60015-3113",1,0.01,
"60015-3317",1,0.01,
"600151548",1,0.01,
"600151744",1,0.01,
"600151808",1,0.01,
"600152867",1,0.01,
"600152954",1,0.01,
"600153774",1,0.01,
"600153932",1,0.01,
"600154163",1,0.01,
"60016",83,0.01,
"60016-5161",1,0.01,
"60016-7064",1,0.01,
"600161039",1,0.01,
"600161413",1,0.01,
"600162019",1,0.01,
"600162155",1,0.01,
"600163637",1,0.01,
"600164254",1,0.01,
"600165121",1,0.01,
"600166312",1,0.01,
"600166720",1,0.01,
"600167545",1,0.01,
"600168735",1,0.01,
"60017",1,0.01,
"60018",39,0.01,
"60018-1620",1,0.01,
"60018-2651",1,0.01,
"60018-4061",1,0.01,
"600181136",1,0.01,
"600181222",1,0.01,
"600181244",1,0.01,
"600182023",1,0.01,
"600182264",1,0.01,
"600184064",1,0.01,
"600184320",1,0.01,
"60020",1,0.01,
"60021",1,0.01,
"60021-1344",1,0.01,
"60021-1827",1,0.01,
"60021-1908",1,0.01,
"60022",18,0.01,
"600222042",1,0.01,
"60025",118,0.01,
"60025-1403",1,0.01,
"60025-1413",1,0.01,
"60025-2720",1,0.01,
"60025-3243",1,0.01,
"60025-3912",1,0.01,
"60025-4329",1,0.01,
"600252335",1,0.01,
"600252827",1,0.01,
"600253003",1,0.01,
"600253245",1,0.01,
"600253430",1,0.01,
"600253537",1,0.01,
"600254016",1,0.01,
"600254141",1,0.01,
"600254156",1,0.01,
"600254208",1,0.01,
"600254626",1,0.01,
"600254641",1,0.01,
"600255010",1,0.01,
"60026",32,0.01,
"60026-1169",1,0.01,
"60026-7043",1,0.01,
"600261120",1,0.01,
"600267402",1,0.01,
"600268034",1,0.01,
"60029",1,0.01,
"60030",28,0.01,
"60030-3842",1,0.01,
"60030-4204",1,0.01,
"600307956",1,0.01,
"60031",43,0.01,
"60031-5244",1,0.01,
"600311695",1,0.01,
"600313209",1,0.01,
"600313770",1,0.01,
"600315120",1,0.01,
"600316364",1,0.01,
"600319132",1,0.01,
"60033",4,0.01,
"60034",1,0.01,
"60035",54,0.01,
"60035-1258",1,0.01,
"60035-4810",1,0.01,
"60035-5331",1,0.01,
"600352260",1,0.01,
"600353010",1,0.01,
"600353953",1,0.01,
"600353958",1,0.01,
"600354300",1,0.01,
"600354340",1,0.01,
"600354441",1,0.01,
"600355208",1,0.01,
"60040",2,0.01,
"60041",3,0.01,
"60041-9476",1,0.01,
"60042",3,0.01,
"60042-9525",1,0.01,
"600428204",1,0.01,
"60043",8,0.01,
"600431059",1,0.01,
"60044",14,0.01,
"60044-1561",1,0.01,
"600441650",1,0.01,
"60045",39,0.01,
"60045-1514",1,0.01,
"60045-1805",1,0.01,
"60045-2259",1,0.01,
"60045-2338",1,0.01,
"60045-2685",1,0.01,
"60045-3249",1,0.01,
"60045-3391",1,0.01,
"60045-4621",1,0.01,
"60045-4813",1,0.01,
"600451114",1,0.01,
"600453705",1,0.01,
"600453820",1,0.01,
"60046",21,0.01,
"60046-6514",1,0.01,
"60046-6522",1,0.01,
"60046-6725",1,0.01,
"60046-7527",1,0.01,
"600464954",1,0.01,
"60047",60,0.01,
"60047-1464",1,0.01,
"60047-2202",1,0.01,
"60047-2820",1,0.01,
"60047-5063",1,0.01,
"60047-5135",1,0.01,
"60047-5147",1,0.01,
"60047-7300",1,0.01,
"600472965",1,0.01,
"600473343",1,0.01,
"600475018",1,0.01,
"600475061",1,0.01,
"600475073",1,0.01,
"600475193",1,0.01,
"600475208",1,0.01,
"600475223",1,0.01,
"600475250",1,0.01,
"600477560",1,0.01,
"600478454",1,0.01,
"600479286",1,0.01,
"60048",43,0.01,
"60048-1106",1,0.01,
"60048-3066",1,0.01,
"60048-3233",1,0.01,
"60048-3400",1,0.01,
"600483921",1,0.01,
"600484807",1,0.01,
"600484897",1,0.01,
"60050",8,0.01,
"60051",9,0.01,
"60051-5148",1,0.01,
"60053",56,0.01,
"60053-1714",1,0.01,
"60053-1838",1,0.01,
"60053-2919",1,0.01,
"600532017",1,0.01,
"600532024",1,0.01,
"600532432",1,0.01,
"600533324",1,0.01,
"600533369",1,0.01,
"60055",1,0.01,
"60056",88,0.01,
"60056-1528",1,0.01,
"60056-1562",1,0.01,
"60056-1821",1,0.01,
"60056-2032",1,0.01,
"60056-3450",1,0.01,
"60056-3651",1,0.01,
"60056-4148",1,0.01,
"60056-4248",1,0.01,
"60056-4340",1,0.01,
"60056-5064",1,0.01,
"600561208",1,0.01,
"600561685",1,0.01,
"600561901",1,0.01,
"600562392",1,0.01,
"600562416",1,0.01,
"600562805",1,0.01,
"600562952",1,0.01,
"600562982",1,0.01,
"600564027",1,0.01,
"600564135",1,0.01,
"600564310",1,0.01,
"600564924",1,0.01,
"600565076",1,0.01,
"600565748",1,0.01,
"60060",37,0.01,
"60060-1272",1,0.01,
"60060-2888",1,0.01,
"60060-5374",1,0.01,
"60060-5604",1,0.01,
"600601718",1,0.01,
"600603451",1,0.01,
"600603481",1,0.01,
"600604006",1,0.01,
"600604063",1,0.01,
"600609545",1,0.01,
"60061",35,0.01,
"60061-1239",1,0.01,
"60061-1610",1,0.01,
"60061-2333",1,0.01,
"60061-2945",1,0.01,
"60061-3218",1,0.01,
"600611029",1,0.01,
"600611225",1,0.01,
"600612105",1,0.01,
"600613163",1,0.01,
"600614575",1,0.01,
"60062",64,1,
"60062-1102",1,0.01,
"60062-1541",1,0.01,
"60062-3705",1,0.01,
"60062-5413",1,0.01,
"60062-6075",1,0.01,
"60062-6609",1,0.01,
"600621336",1,0.01,
"600622213",1,0.01,
"600622601",1,0.01,
"600623763",1,0.01,
"600624211",1,0.01,
"600624654",1,0.01,
"600624927",1,0.01,
"600624943",1,0.01,
"600625800",1,0.01,
"600626060",1,0.01,
"600626350",1,0.01,
"600626410",1,0.01,
"600626921",1,0.01,
"600627054",1,0.01,
"600627425",1,0.01,
"60064",2,0.01,
"60064-1731",1,0.01,
"60067",66,0.01,
"60067-1941",1,0.01,
"60067-3470",1,0.01,
"60067-5847",1,0.01,
"600672236",1,0.01,
"600674258",1,0.01,
"600674728",1,0.01,
"600674937",1,0.01,
"600677056",1,0.01,
"600677277",1,0.01,
"600677363",1,0.01,
"600679108",1,0.01,
"600679110",1,0.01,
"60068",111,0.01,
"60068-1054",1,0.01,
"60068-1149",1,0.01,
"60068-1716",1,0.01,
"60068-1927",2,0.01,
"60068-2728",1,0.01,
"60068-3537",1,0.01,
"60068-4349",1,0.01,
"60068-4651",1,0.01,
"60068-5515",1,0.01,
"60068-5613",1,0.01,
"600681920",1,0.01,
"600682005",1,0.01,
"600682512",1,0.01,
"600682516",1,0.01,
"600682604",1,0.01,
"600682709",1,0.01,
"600682764",1,0.01,
"600682925",1,0.01,
"600682960",1,0.01,
"600683005",1,0.01,
"600683438",1,0.01,
"600683551",1,0.01,
"600683562",1,0.01,
"600683779",1,0.01,
"600683907",1,0.01,
"600684321",1,0.01,
"600684459",1,0.01,
"600684665",1,0.01,
"600684941",2,0.01,
"600685148",1,0.01,
"600685249",1,0.01,
"600685280",1,0.01,
"600685353",1,0.01,
"60069",10,0.01,
"60069-2209",1,0.01,
"60069-2808",1,0.01,
"60069-4026",1,0.01,
"600699631",1,0.01,
"60070",23,0.01,
"60070-2528",1,0.01,
"600701535",1,0.01,
"60071",4,0.01,
"60072",1,0.01,
"60073",31,0.01,
"60073-2570",1,0.01,
"60073-3619",1,0.01,
"60073-5625",1,0.01,
"600731753",1,0.01,
"600738149",1,0.01,
"600739540",1,0.01,
"60074",34,0.01,
"60074-1093",1,0.01,
"60074-2301",1,0.01,
"60074-3751",1,0.01,
"600741047",1,0.01,
"600745713",1,0.01,
"60076",80,0.01,
"60076-1950",1,0.01,
"60076-2067",1,0.01,
"60076-2422",1,0.01,
"60076-2623",1,0.01,
"60076-2710",1,0.01,
"60076-3070",1,0.01,
"60076-3680",1,0.01,
"600761417",1,0.01,
"600761571",1,0.01,
"600761615",1,0.01,
"600761847",1,0.01,
"600762111",1,0.01,
"600762134",1,0.01,
"600762618",1,0.01,
"600762927",1,0.01,
"600763611",1,0.01,
"600763626",1,0.01,
"60077",63,0.01,
"60077-1221",1,0.01,
"60077-1727",1,0.01,
"60077-1903",1,0.01,
"60077-1995",1,0.01,
"60077-2587",1,0.01,
"600771112",1,0.01,
"600771157",1,0.01,
"600771763",1,0.01,
"600772059",1,0.01,
"600772112",1,0.01,
"600772179",1,0.01,
"600772203",1,0.01,
"600772830",1,0.01,
"600772832",1,0.01,
"600773477",1,0.01,
"600773652",1,0.01,
"600775439",1,0.01,
"60079",1,0.01,
"60081",2,0.01,
"60083",12,0.01,
"600839725",1,0.01,
"60084",10,0.01,
"600845003",1,0.01,
"600845021",1,0.01,
"60085",26,0.01,
"60085-1120",1,0.01,
"60085-4012",1,0.01,
"60085-5820",1,0.01,
"60085-7245",1,0.01,
"60085-8605",1,0.01,
"600851626",1,0.01,
"60087",16,0.01,
"60087-1450",1,0.01,
"60087-2250",1,0.01,
"60087-3560",1,0.01,
"600871820",1,0.01,
"60088",2,0.01,
"60088-2532",1,0.01,
"60089",58,0.01,
"60089-1672",1,0.01,
"60089-1708",1,0.01,
"60089-6858",1,0.01,
"600891046",1,0.01,
"600891181",1,0.01,
"600891191",1,0.01,
"600891832",1,0.01,
"600891838",1,0.01,
"600891961",1,0.01,
"600892080",1,0.01,
"600894153",1,0.01,
"600896909",1,0.01,
"600897739",1,0.01,
"60090",37,0.01,
"60090-2610",1,0.01,
"60090-4460",1,0.01,
"60090-5334",1,0.01,
"60090-5352",1,0.01,
"600903816",1,0.01,
"600905439",1,0.01,
"600905534",1,0.01,
"600905585",1,0.01,
"60091",57,0.01,
"60091-1540",1,0.01,
"60091-1916",1,0.01,
"60091-2144",1,0.01,
"60091-2376",1,0.01,
"600911557",1,0.01,
"600911971",1,0.01,
"600912010",1,0.01,
"600912504",1,0.01,
"600912601",1,0.01,
"600913318",1,0.01,
"60093",48,0.01,
"60093-1421",1,0.01,
"60093-2166",1,0.01,
"60093-2356",1,0.01,
"60093-3820",1,0.01,
"60093-4145",2,0.01,
"600931115",1,0.01,
"600931402",1,0.01,
"600932212",1,0.01,
"600933221",1,0.01,
"600933231",1,0.01,
"600933546",1,0.01,
"600933820",1,0.01,
"600934003",1,0.01,
"600934015",1,0.01,
"60096",1,0.01,
"60097",5,0.01,
"60097-8651",1,0.01,
"60098",16,0.01,
"60098-2505",1,0.01,
"60098-4104",1,0.01,
"60099",8,0.01,
"60101",43,0.01,
"60101-2010",1,0.01,
"60101-2806",1,0.01,
"60101-2866",1,0.01,
"60101-3533",1,0.01,
"60101-3719",1,0.01,
"601011068",1,0.01,
"601011657",1,0.01,
"601011719",1,0.01,
"601012046",1,0.01,
"601012131",1,0.01,
"601012917",1,0.01,
"601013232",1,0.01,
"601013312",1,0.01,
"601013407",1,0.01,
"601015707",1,0.01,
"601015721",1,0.01,
"601016515",1,0.01,
"60102",22,0.01,
"60102-2048",1,0.01,
"60102-4513",1,0.01,
"60102-5036",1,0.01,
"60102-6084",1,0.01,
"601022949",1,0.01,
"601025038",1,0.01,
"601025419",1,0.01,
"601026620",1,0.01,
"601026821",1,0.01,
"60103",60,0.01,
"60103-1306",1,0.01,
"60103-1395",1,0.01,
"60103-4013",1,0.01,
"60103-4702",1,0.01,
"60103-5705",1,0.01,
"60103-7400",1,0.01,
"601031309",1,0.01,
"601031854",1,0.01,
"601032303",1,0.01,
"601032979",1,0.01,
"601034003",1,0.01,
"601035895",1,0.01,
"60104",7,0.01,
"601042329",1,0.01,
"60106",8,0.01,
"60106-1406",1,0.01,
"601063152",1,0.01,
"601063153",1,0.01,
"60107",24,0.01,
"60107-1366",1,0.01,
"60107-1920",2,0.01,
"601071584",1,0.01,
"60108",32,0.01,
"60108-1364",1,0.01,
"60108-1464",1,0.01,
"60108-5409",1,0.01,
"601081056",1,0.01,
"601081312",1,0.01,
"601081330",1,0.01,
"601081461",1,0.01,
"601081465",1,0.01,
"601082532",1,0.01,
"601082534",1,0.01,
"601083016",1,0.01,
"60109",1,0.01,
"60110",11,0.01,
"60110-1137",1,0.01,
"60110-1242",1,0.01,
"60110-1258",1,0.01,
"60110-2908",1,0.01,
"60115",10,0.01,
"60115-1583",1,0.01,
"60115-3855",1,0.01,
"60118",10,0.01,
"60118-3312",1,0.01,
"60118-9005",1,0.01,
"60119",3,0.01,
"601198444",1,0.01,
"60120",17,0.01,
"60120-4617",1,0.01,
"601202464",1,0.01,
"60123",15,0.01,
"60123-1404",1,0.01,
"60123-7713",1,0.01,
"60124",8,0.01,
"601248939",1,0.01,
"60126",68,0.01,
"60126-1804",1,0.01,
"60126-2324",1,0.01,
"60126-2327",1,0.01,
"60126-2915",1,0.01,
"60126-3226",1,0.01,
"60126-3602",1,0.01,
"60126-4840",1,0.01,
"60126-5011",1,0.01,
"60126-5214",1,0.01,
"601261332",1,0.01,
"601263553",1,0.01,
"601263709",1,0.01,
"601263803",1,0.01,
"60130",10,0.01,
"60130-1221",1,0.01,
"60130-1533",1,0.01,
"60131",19,0.01,
"60131-2525",1,0.01,
"60131-2671",1,0.01,
"601311903",1,0.01,
"601312661",1,0.01,
"60133",34,0.01,
"60133-2655",1,0.01,
"60133-5213",1,0.01,
"60133-5320",1,0.01,
"601333842",1,0.01,
"601335114",1,0.01,
"601335202",1,0.01,
"60134",17,0.01,
"60134-1706",1,0.01,
"60134-2536",1,0.01,
"60134-7514",1,0.01,
"60134-7551",1,0.01,
"601341862",1,0.01,
"601345402",1,0.01,
"601346022",1,0.01,
"60135",2,0.01,
"60136",7,0.01,
"60136-4040",1,0.01,
"60136-8021",1,0.01,
"60137",36,0.01,
"60137-3202",1,0.01,
"60137-7042",1,0.01,
"601373951",1,0.01,
"601374108",1,0.01,
"601374454",1,0.01,
"601374749",1,0.01,
"601374829",1,0.01,
"601375219",1,0.01,
"601375595",1,0.01,
"601376107",1,0.01,
"601377251",1,0.01,
"601377306",1,0.01,
"601377458",1,0.01,
"60139",30,0.01,
"60139-2588",3,0.01,
"60139-3117",2,0.01,
"60139-3601",1,0.01,
"601392186",1,0.01,
"601393795",1,0.01,
"60140",14,0.01,
"60140-2045",1,0.01,
"60140-9126",1,0.01,
"60140-9179",1,0.01,
"601407715",1,0.01,
"60142",15,0.01,
"60142-2423",1,0.01,
"60142-4047",1,0.01,
"601428196",1,0.01,
"60143",14,0.01,
"60146",1,0.01,
"60147",1,0.01,
"60148",53,0.01,
"60148-2309",1,0.01,
"60148-2604",1,0.01,
"60148-3254",1,0.01,
"60148-3837",1,0.01,
"60148-4436",1,0.01,
"60148-4722",1,0.01,
"60148-6504",1,0.01,
"601483500",1,0.01,
"60150",1,0.01,
"60151",2,0.01,
"60152",3,0.01,
"60153",5,0.01,
"60153-2306",1,0.01,
"601533218",1,0.01,
"60154",21,0.01,
"60154-3551",1,0.01,
"60154-4438",1,0.01,
"60154-7986",1,0.01,
"601543422",1,0.01,
"601544921",1,0.01,
"601544925",1,0.01,
"601544928",1,0.01,
"601545008",1,0.01,
"60155",5,0.01,
"60155-3007",1,0.01,
"60155-4834",1,0.01,
"60156",22,0.01,
"60156-1231",1,0.01,
"60156-5216",1,0.01,
"60156-5827",1,0.01,
"601565884",1,0.01,
"601566746",1,0.01,
"60157",4,0.01,
"601579504",1,0.01,
"601579754",1,0.01,
"60160",12,0.01,
"60160-1903",1,0.01,
"60160-1920",1,0.01,
"60160-3111",1,0.01,
"601601925",1,0.01,
"601602837",1,0.01,
"60162",2,0.01,
"60163",3,0.01,
"60164",22,0.01,
"60164-1724",1,0.01,
"601641541",1,0.01,
"601642236",1,0.01,
"60165",4,0.01,
"60169",29,0.01,
"60169-1056",1,0.01,
"60169-3362",1,0.01,
"60169-4839",1,0.01,
"601691003",1,0.01,
"601692643",1,0.01,
"601693261",1,0.01,
"601694002",1,0.01,
"60171",12,0.01,
"60171-1448",1,0.01,
"60172",23,0.01,
"601721009",1,0.01,
"601721100",1,0.01,
"601721629",1,0.01,
"601722157",1,0.01,
"601723022",1,0.01,
"60173",13,0.01,
"60173-3915",1,0.01,
"60173-6534",1,0.01,
"60173-6572",1,0.01,
"60174",26,0.01,
"60174-8842",1,0.01,
"601741189",1,0.01,
"601741414",1,0.01,
"601741432",1,0.01,
"601747858",1,0.01,
"601747968",1,0.01,
"601748713",1,0.01,
"60175",25,0.01,
"601756503",1,0.01,
"60176",15,0.01,
"601761445",1,0.01,
"601761580",1,0.01,
"601761829",1,0.01,
"60177",14,0.01,
"60177-1970",1,0.01,
"60177-2380",1,0.01,
"601772823",1,0.01,
"601772913",1,0.01,
"601773276",1,0.01,
"60178",1,0.01,
"60178-8800",1,0.01,
"60181",27,0.01,
"601811744",1,0.01,
"601811968",1,0.01,
"601811972",1,0.01,
"601812946",1,0.01,
"601813862",1,0.01,
"601815254",1,0.01,
"60183",1,0.01,
"60184",3,0.01,
"601842306",1,0.01,
"60185",21,0.01,
"60185-6167",1,0.01,
"60185-6418",1,0.01,
"601854219",1,0.01,
"601854523",1,0.01,
"601855029",1,0.01,
"601855062",1,0.01,
"601855113",1,0.01,
"601855926",1,0.01,
"60187",32,0.01,
"601873062",1,0.01,
"601873910",1,0.01,
"601874021",1,0.01,
"601874040",1,0.01,
"601874744",1,0.01,
"601875637",1,0.01,
"60188",52,0.01,
"60188-1389",1,0.01,
"60188-2500",2,0.01,
"60188-9238",1,0.01,
"601883124",1,0.01,
"601883400",1,0.01,
"601884304",1,0.01,
"601884322",1,0.01,
"601884341",1,0.01,
"601884606",1,0.01,
"601886028",1,0.01,
"601889100",1,0.01,
"60189",43,0.01,
"60189-2110",2,0.01,
"60189-2947",1,0.01,
"60189-7425",1,0.01,
"601892011",1,0.01,
"601897174",1,0.01,
"60190",8,0.01,
"60190-1740",1,0.01,
"601902331",1,0.01,
"60191",12,0.01,
"60191-2031",1,0.01,
"60191-2037",1,0.01,
"60191-2239",1,0.01,
"601912003",1,0.01,
"601912163",1,0.01,
"60192",17,0.01,
"60192-1321",1,0.01,
"60192-4603",1,0.01,
"601921176",1,0.01,
"601921327",1,0.01,
"601921648",1,0.01,
"60193",38,0.01,
"60193-1370",1,0.01,
"60193-3329",1,0.01,
"60193-5154",1,0.01,
"601934885",1,0.01,
"60194",27,0.01,
"60194-2226",1,0.01,
"60194-3820",1,0.01,
"601942252",1,0.01,
"60195",5,0.01,
"601951312",1,0.01,
"60201",46,0.01,
"60201-1184",1,0.01,
"60201-2071",1,0.01,
"602011346",1,0.01,
"602012117",1,0.01,
"602014084",1,0.01,
"60202",58,0.01,
"60202-1113",1,0.01,
"60202-2002",1,0.01,
"60202-3605",1,0.01,
"60202-3948",1,0.01,
"60202-3971",1,0.01,
"602021025",1,0.01,
"602021231",1,0.01,
"602024601",1,0.01,
"60203",5,0.01,
"60203-1942",1,0.01,
"602031302",1,0.01,
"60301",3,0.01,
"60302",56,0.01,
"60302-1636",1,0.01,
"60302-2214",1,0.01,
"60302-2612",1,0.01,
"60302-2703",1,0.01,
"60302-5000",1,0.01,
"603021318",1,0.01,
"603021410",1,0.01,
"603021422",1,0.01,
"603022502",1,0.01,
"603022941",1,0.01,
"603023312",1,0.01,
"603023560",1,0.01,
"60303",3,0.01,
"60304",17,0.01,
"60304-1613",1,0.01,
"60304-1847",1,0.01,
"603041411",1,0.01,
"603041412",1,0.01,
"603041424",1,0.01,
"603041622",1,0.01,
"603041813",1,0.01,
"603041832",1,0.01,
"60305",20,0.01,
"60305-1309",1,0.01,
"60305-1922",1,0.01,
"603051034",1,0.01,
"603051100",1,0.01,
"60306",1,0.01,
"6033",1,0.01,
"60401",4,0.01,
"604013677",1,0.01,
"60402",71,0.01,
"60402-1139",1,0.01,
"60402-1330",1,0.01,
"60402-1623",2,0.01,
"60402-2401",1,0.01,
"60402-3812",1,0.01,
"60402-3863",1,0.01,
"60402-3971",1,0.01,
"60402-4069",1,0.01,
"604021611",1,0.01,
"604021619",1,0.01,
"604021670",1,0.01,
"604022542",1,0.01,
"604022941",1,0.01,
"604023513",1,0.01,
"604023805",1,0.01,
"604023875",1,0.01,
"60403",5,0.01,
"60404",7,0.01,
"60404-8143",1,0.01,
"60404-8191",1,0.01,
"60404-9414",1,0.01,
"604040563",1,0.01,
"604048922",1,0.01,
"604049434",1,0.01,
"604049521",1,0.01,
"60406",14,0.01,
"60409",17,0.01,
"60410",3,0.01,
"60411",26,0.01,
"60411-1310",1,0.01,
"60411-2601",1,0.01,
"60411-3225",1,0.01,
"60411-4207",1,0.01,
"60411-7500",1,0.01,
"604111850",1,0.01,
"604116600",1,0.01,
"60415",3,0.01,
"60416",1,0.01,
"60417",10,0.01,
"60417-1308",1,0.01,
"60417-1975",1,0.01,
"60417-3781",1,0.01,
"60417-4298",1,0.01,
"604171256",1,0.01,
"604171271",1,0.01,
"604173921",1,0.01,
"60419",7,0.01,
"60419-1530",1,0.01,
"60419-2716",1,0.01,
"60420",1,0.01,
"60421",2,0.01,
"60422",18,0.01,
"60422-1039",1,0.01,
"60422-4325",1,0.01,
"604221216",1,0.01,
"60423",31,0.01,
"60423-1031",1,0.01,
"60423-1360",1,0.01,
"60423-1702",1,0.01,
"60423-8085",1,0.01,
"60423-9283",1,0.01,
"604232266",1,0.01,
"604238647",1,0.01,
"60425",9,0.01,
"60425-1018",1,0.01,
"60426",3,0.01,
"60427",1,0.01,
"60428",3,0.01,
"60428-2703",1,0.01,
"60428-3920",1,0.01,
"60428-4619",1,0.01,
"60429",14,0.01,
"60429-0653",1,0.01,
"60429-1318",1,0.01,
"60429-2405",1,0.01,
"60430",28,0.01,
"60430-3205",1,0.01,
"60430-3306",1,0.01,
"604301820",1,0.01,
"60431",15,0.01,
"60431-4906",1,0.01,
"60431-5353",1,0.01,
"604311005",1,0.01,
"604318627",1,0.01,
"60432",3,0.01,
"60432-1246",1,0.01,
"60432-2020",1,0.01,
"60432-2306",1,0.01,
"60433",1,0.01,
"60434",1,0.01,
"60435",22,0.01,
"604357447",1,0.01,
"604358741",1,0.01,
"60436",10,0.01,
"60438",14,0.01,
"60438-1708",1,0.01,
"604381557",1,0.01,
"604382118",1,0.01,
"604383230",1,0.01,
"604386501",1,0.01,
"60439",38,0.01,
"60439-2754",1,0.01,
"60439-4081",1,0.01,
"60439-6134",1,0.01,
"604393502",1,0.01,
"604393941",1,0.01,
"604394492",1,0.01,
"604396135",1,0.01,
"60440",38,0.01,
"60440-1141",1,0.01,
"60440-1210",1,0.01,
"604409006",2,0.01,
"60441",20,0.01,
"60441-4284",1,0.01,
"60441-7604",1,0.01,
"604411507",1,0.01,
"604412996",1,0.01,
"604415246",1,0.01,
"60442",6,0.01,
"604428111",1,0.01,
"60443",23,0.01,
"60443-1205",1,0.01,
"60443-3026",1,0.01,
"604431299",1,0.01,
"604431787",1,0.01,
"60445",10,0.01,
"604451328",1,0.01,
"60446",24,0.01,
"60446-4100",1,0.01,
"604461680",1,0.01,
"604461691",1,0.01,
"604465005",1,0.01,
"60447",6,0.01,
"60447-9371",1,0.01,
"60448",26,0.01,
"60448-1466",1,0.01,
"60448-1707",1,0.01,
"60448-8346",1,0.01,
"604481066",1,0.01,
"604481789",1,0.01,
"60449",5,0.01,
"60450",2,0.01,
"60450-2544",1,0.01,
"60451",31,0.01,
"60451-2677",1,0.01,
"60451-3753",1,0.01,
"60451-9688",1,0.01,
"604513828",1,0.01,
"60452",20,1,
"60452-2739",1,0.01,
"60452-4503",1,0.01,
"604521588",1,0.01,
"604522839",1,0.01,
"60453",60,0.01,
"60453-1335",1,0.01,
"60453-1450",2,0.01,
"60453-2962",1,0.01,
"60453-3059",1,0.01,
"60453-3070",1,0.01,
"60453-3267",1,0.01,
"60453-3913",1,0.01,
"60453-6050",1,0.01,
"604531466",1,0.01,
"604531937",1,0.01,
"604533224",1,0.01,
"604533407",2,0.01,
"604533643",1,0.01,
"604533846",1,0.01,
"60455",12,0.01,
"60455-1397",1,0.01,
"60456",1,0.01,
"604561214",1,0.01,
"60457",16,0.01,
"60457-1911",1,0.01,
"60458",9,0.01,
"60458-1173",1,0.01,
"604581153",1,0.01,
"604581634",1,0.01,
"60459",25,0.01,
"60459-2720",1,0.01,
"604591335",1,0.01,
"604591340",1,0.01,
"604592149",1,0.01,
"604592611",1,0.01,
"60461",4,0.01,
"60461-1330",1,0.01,
"604611421",1,0.01,
"60462",54,0.01,
"60462-2238",1,0.01,
"60462-2846",1,0.01,
"60462-7457",1,0.01,
"60462-7763",1,0.01,
"604621597",1,0.01,
"604622325",1,0.01,
"604622360",1,0.01,
"604622604",1,0.01,
"604626113",1,0.01,
"604626115",1,0.01,
"604626413",1,0.01,
"604627710",1,0.01,
"60463",23,0.01,
"60463-1324",1,0.01,
"604631918",1,0.01,
"604632428",1,0.01,
"60464",14,0.01,
"60464-2508",1,0.01,
"604641753",1,0.01,
"604641943",1,0.01,
"60465",28,0.01,
"604651095",1,0.01,
"604651152",1,0.01,
"604651158",1,0.01,
"604651392",1,0.01,
"604652414",1,0.01,
"60466",11,0.01,
"60466-1822",1,0.01,
"60466-1827",1,0.01,
"60467",53,0.01,
"60467-4505",1,0.01,
"60467-4605",1,0.01,
"60467-5353",1,0.01,
"60467-5378",1,0.01,
"60467-5403",1,0.01,
"60467-7826",1,0.01,
"60467-8509",2,0.01,
"60467-9410",1,0.01,
"604674408",1,0.01,
"604674435",1,0.01,
"604674589",1,0.01,
"604675884",1,0.01,
"604677195",1,0.01,
"604677466",1,0.01,
"604678478",1,0.01,
"60468",2,0.01,
"60468-9428",1,0.01,
"60469",2,0.01,
"60469-1102",1,0.01,
"60471",4,0.01,
"60471-1284",1,0.01,
"604711047",1,0.01,
"60472",3,0.01,
"60473",14,0.01,
"60473-2559",1,0.01,
"60473-3580",1,0.01,
"604732228",1,0.01,
"60475",4,0.01,
"604755952",1,0.01,
"604761121",1,0.01,
"60477",46,0.01,
"60477-3660",1,0.01,
"604772608",1,0.01,
"604776725",1,0.01,
"604776851",1,0.01,
"604777175",1,0.01,
"60478",13,0.01,
"60478-5427",1,0.01,
"604784783",1,0.01,
"60479",1,0.01,
"60480",6,0.01,
"604801057",1,0.01,
"60481",2,0.01,
"60482",5,0.01,
"604821102",1,0.01,
"60484",2,0.01,
"60487",24,0.01,
"60487-4684",1,0.01,
"60487-5600",1,0.01,
"60487-5649",1,0.01,
"60487-5807",1,0.01,
"60487-6102",1,0.01,
"60487-7202",1,0.01,
"60487-7500",1,0.01,
"60487-8440",1,0.01,
"60487-8602",1,0.01,
"604875616",1,0.01,
"604878637",1,0.01,
"60490",20,0.01,
"60490-3321",1,0.01,
"60490-5445",1,0.01,
"604904584",1,0.01,
"604904940",1,0.01,
"604904965",1,0.01,
"604905449",1,0.01,
"60491",27,0.01,
"60491-6930",1,0.01,
"60491-7595",1,0.01,
"60491-7997",1,0.01,
"60491-9228",1,0.01,
"60491-9299",1,0.01,
"604917825",1,0.01,
"604918375",1,0.01,
"604918401",1,0.01,
"604918487",1,0.01,
"60501",11,0.01,
"60502",22,0.01,
"60502-7403",1,0.01,
"60502-9009",1,0.01,
"60502-9045",1,0.01,
"605026549",1,0.01,
"605027000",1,0.01,
"605028608",1,0.01,
"605029654",1,0.01,
"605029666",1,0.01,
"60503",12,0.01,
"605036258",1,0.01,
"60504",35,0.01,
"60504-2008",1,0.01,
"60504-3203",1,0.01,
"60504-5333",1,0.01,
"60504-5390",1,0.01,
"60504-5470",1,0.01,
"60504-6023",1,0.01,
"60504-6070",2,0.01,
"605044028",1,0.01,
"605045265",1,0.01,
"605045360",1,0.01,
"605048414",1,0.01,
"60505",8,0.01,
"60505-1819",1,0.01,
"60505-3734",1,0.01,
"60505-4843",1,0.01,
"60506",22,0.01,
"60506-5349",1,0.01,
"60506-6908",1,0.01,
"605061859",1,0.01,
"605067313",1,0.01,
"60510",13,0.01,
"60510-3555",1,0.01,
"60510-8611",1,0.01,
"605102505",1,0.01,
"605102884",1,0.01,
"60513",31,0.01,
"60513-1761",1,0.01,
"60513-2010",1,0.01,
"605131334",1,0.01,
"605131809",1,0.01,
"605131814",1,0.01,
"605132555",1,0.01,
"60514",7,0.01,
"60514-1312",2,0.01,
"605141228",1,0.01,
"605141305",1,0.01,
"605141309",1,0.01,
"605141708",1,0.01,
"60515",26,0.01,
"60515-1146",1,0.01,
"60515-1947",1,0.01,
"605151301",1,0.01,
"605152142",1,0.01,
"605153455",1,0.01,
"60516",20,0.01,
"60516-5109",1,0.01,
"605161937",1,0.01,
"605163658",1,0.01,
"60517",30,0.01,
"60517-1546",1,0.01,
"60517-1683",1,0.01,
"60517-2732",1,0.01,
"60517-3857",1,0.01,
"60517-8027",1,0.01,
"605172003",1,0.01,
"605172241",1,0.01,
"605173107",1,0.01,
"605173109",1,0.01,
"605173759",1,0.01,
"605174617",1,0.01,
"605175403",1,0.01,
"605175407",1,0.01,
"605177744",1,0.01,
"60521",38,0.01,
"60521-3440",1,0.01,
"60521-3754",1,0.01,
"60521-4727",1,0.01,
"605213008",1,0.01,
"605214454",1,0.01,
"605215147",1,0.01,
"60523",19,0.01,
"60523-1129",1,0.01,
"60523-2353",1,0.01,
"60523-2574",1,0.01,
"605232534",1,0.01,
"605232560",1,0.01,
"605232784",1,0.01,
"60525",35,0.01,
"60525-2213",1,0.01,
"60525-2522",1,0.01,
"60525-5833",1,0.01,
"605253076",1,0.01,
"605253625",1,0.01,
"605257115",2,0.01,
"605257913",1,0.01,
"60526",21,0.01,
"605261547",1,0.01,
"605265304",1,0.01,
"60527",39,0.01,
"60527-0301",1,0.01,
"60527-7706",1,0.01,
"60527-8022",1,0.01,
"605275151",1,0.01,
"605275234",1,0.01,
"605275242",1,0.01,
"605275363",1,0.01,
"605275724",1,0.01,
"605276115",1,0.01,
"60532",24,0.01,
"60532-2329",1,0.01,
"60532-2535",1,0.01,
"60532-2855",1,0.01,
"605323316",1,0.01,
"605324429",1,0.01,
"605328234",1,0.01,
"60534",6,0.01,
"60538",8,0.01,
"60538-3437",1,0.01,
"60540",30,0.01,
"60540-3610",1,0.01,
"60540-5171",1,0.01,
"60540-5619",1,0.01,
"60540-7303",1,0.01,
"60540-9495",1,0.01,
"605404303",1,0.01,
"605406381",1,0.01,
"605407659",1,0.01,
"605408151",1,0.01,
"605408202",1,0.01,
"605409571",1,0.01,
"60541",1,0.01,
"60542",10,0.01,
"60543",17,0.01,
"60543-4080",1,0.01,
"60543-8222",1,0.01,
"605438231",1,0.01,
"605438384",1,0.01,
"605439108",1,0.01,
"60544",9,0.01,
"60544-7331",1,0.01,
"605446077",1,0.01,
"605447951",1,0.01,
"60545",4,0.01,
"60546",30,0.01,
"60546-2240",1,0.01,
"605461140",2,0.01,
"605461155",1,0.01,
"605461526",1,0.01,
"605461527",1,0.01,
"605461805",1,0.01,
"605462035",1,0.01,
"60548",2,0.01,
"605482562",1,0.01,
"60554",4,0.01,
"60555",8,0.01,
"60556",1,0.01,
"60558",28,0.01,
"60558-1616",1,0.01,
"60558-2110",1,0.01,
"605581834",1,0.01,
"60559",19,0.01,
"605591208",1,0.01,
"605592076",1,0.01,
"605592627",1,0.01,
"605592893",1,0.01,
"60560",5,0.01,
"605601041",1,0.01,
"60561",29,0.01,
"60561-3547",1,0.01,
"60561-3663",2,0.01,
"60561-4309",1,0.01,
"60561-4506",1,0.01,
"60561-4550",1,0.01,
"60561-5393",1,0.01,
"60561-6418",1,0.01,
"605614816",1,0.01,
"605615187",1,0.01,
"605615918",1,0.01,
"605618455",1,0.01,
"60563",31,1,
"60563-2951",1,0.01,
"60563-9035",1,0.01,
"605631204",1,0.01,
"605631391",1,0.01,
"605632056",1,0.01,
"605632586",1,0.01,
"605632722",1,0.01,
"605638500",1,0.01,
"60564",70,0.01,
"60564-3112",1,0.01,
"60564-3188",1,0.01,
"60564-4166",1,0.01,
"60564-4324",1,0.01,
"60564-4994",1,0.01,
"60564-5142",1,0.01,
"605641119",1,0.01,
"605643109",1,0.01,
"605644100",1,0.01,
"605644101",1,0.01,
"605644360",1,0.01,
"605644423",1,0.01,
"605644699",1,0.01,
"605644779",1,0.01,
"605645102",1,0.01,
"605645161",1,0.01,
"605645665",1,0.01,
"605645731",1,0.01,
"605646116",1,0.01,
"605646123",1,0.01,
"605646141",1,0.01,
"605648205",1,0.01,
"605648306",1,0.01,
"605648460",1,0.01,
"605649782",1,0.01,
"60565",60,0.01,
"60565-1102",1,0.01,
"60565-1357",1,0.01,
"60565-2349",1,0.01,
"60565-2612",1,0.01,
"60565-2830",1,0.01,
"60565-4109",1,0.01,
"60565-4316",1,0.01,
"60565-5238",1,0.01,
"60565-5320",1,0.01,
"605651101",1,0.01,
"605651102",1,0.01,
"605651108",1,0.01,
"605651240",1,0.01,
"605651475",1,0.01,
"605652013",1,0.01,
"605652297",1,0.01,
"605652455",1,0.01,
"605653454",1,0.01,
"605653479",1,0.01,
"605653568",1,0.01,
"605655222",1,0.01,
"605656726",1,0.01,
"605656808",1,0.01,
"605659307",1,0.01,
"60567",1,0.01,
"60585",16,0.01,
"60585-4508",1,0.01,
"605851596",1,0.01,
"605856146",1,0.01,
"60586",27,0.01,
"60586-2179",1,0.01,
"60586-4025",1,0.01,
"60586-5319",1,0.01,
"60586-8524",1,0.01,
"605862301",1,0.01,
"605868142",1,0.01,
"60601",24,0.01,
"60601-5282",1,0.01,
"60601-5974",3,0.01,
"60601-5988",1,0.01,
"60601-7501",1,0.01,
"60601-7515",1,0.01,
"60601-7892",1,0.01,
"606017535",1,0.01,
"60602",3,0.01,
"60602-4881",1,0.01,
"60603",2,0.01,
"60604",4,0.01,
"60605",48,0.01,
"60605-1517",1,0.01,
"606052141",1,0.01,
"60606",8,0.01,
"60607",33,0.01,
"60607-2906",1,0.01,
"60607-4805",1,0.01,
"60607-4866",1,0.01,
"60607-5304",1,0.01,
"60608",107,0.01,
"60608-1673",1,0.01,
"60608-2755",1,0.01,
"60608-2907",1,0.01,
"60608-3363",1,0.01,
"60608-5539",1,0.01,
"60608-5705",2,0.01,
"60608-6216",1,0.01,
"60608-6322",1,0.01,
"60608-6826",1,0.01,
"606083336",1,0.01,
"606083390",1,0.01,
"606083405",1,0.01,
"606084029",1,0.01,
"606084114",1,0.01,
"606084205",1,0.01,
"606085505",1,0.01,
"606085606",1,0.01,
"606085807",1,0.01,
"606085921",1,0.01,
"606086012",1,0.01,
"606086344",1,0.01,
"606086413",1,0.01,
"606086702",1,0.01,
"606086749",1,0.01,
"606086840",1,0.01,
"60609",45,0.01,
"60609-1230",1,0.01,
"60609-1235",1,0.01,
"60609-2042",1,0.01,
"60609-2812",1,0.01,
"60609-3211",1,0.01,
"60609-3503",1,0.01,
"60609-4166",1,0.01,
"60609-4171",1,0.01,
"60609-4252",1,0.01,
"60609-6149",1,0.01,
"606091746",1,0.01,
"606091908",1,0.01,
"606092736",1,0.01,
"606093128",1,0.01,
"606093265",1,0.01,
"606093885",1,0.01,
"606094718",1,0.01,
"606094728",1,0.01,
"606094947",1,0.01,
"60610",67,0.01,
"60610-5502",1,0.01,
"60610-6688",1,0.01,
"606101724",1,0.01,
"60611",51,0.01,
"60611-1103",1,0.01,
"60611-3546",1,0.01,
"60611-4695",1,0.01,
"60611-7134",2,0.01,
"60612",28,0.01,
"60612-1206",1,0.01,
"60612-1247",1,0.01,
"60612-1407",1,0.01,
"60612-1886",1,0.01,
"60612-3520",1,0.01,
"60612-4156",1,0.01,
"606121110",1,0.01,
"606124239",1,0.01,
"606124295",1,0.01,
"60613",128,1,
"60613-1019",1,0.01,
"60613-1203",1,0.01,
"60613-1328",1,0.01,
"60613-1757",1,0.01,
"60613-1959",1,0.01,
"60613-3469",1,0.01,
"60613-3805",1,0.01,
"60613-4141",1,0.01,
"60613-4302",1,0.01,
"60613-4726",1,0.01,
"60613-5772",1,0.01,
"60613-6620",1,0.01,
"606131492",1,0.01,
"60614",204,0.01,
"60614-1315",1,0.01,
"60614-2024",1,0.01,
"60614-3327",1,0.01,
"60614-3356",1,0.01,
"60614-3399",1,0.01,
"60614-3478",1,0.01,
"60614-4175",1,0.01,
"60614-4663",1,0.01,
"60614-5216",1,0.01,
"60614-5621",1,0.01,
"60614-5743",1,0.01,
"60614-5904",1,0.01,
"60614-6509",1,0.01,
"606142028",1,0.01,
"606142379",1,0.01,
"60615",70,0.01,
"60615-2044",1,0.01,
"60615-3114",1,0.01,
"60615-3133",1,0.01,
"60615-3253",1,0.01,
"606155208",1,0.01,
"60616",88,0.01,
"60616-1968",1,0.01,
"60616-2216",1,0.01,
"60616-2752",1,0.01,
"60616-3008",1,0.01,
"60616-3128",1,0.01,
"60616-4053",1,0.01,
"60616-4271",1,0.01,
"60616-4809",1,0.01,
"606161153",1,0.01,
"606162208",1,0.01,
"606162213",1,0.01,
"606162489",1,0.01,
"606162548",1,0.01,
"606162603",1,0.01,
"606162604",1,0.01,
"606163620",1,0.01,
"60617",119,0.01,
"60617-1159",1,0.01,
"60617-1436",1,0.01,
"60617-2032",1,0.01,
"60617-3465",1,0.01,
"60617-3850",1,0.01,
"60617-5029",1,0.01,
"60617-5256",1,0.01,
"60617-5533",1,0.01,
"60617-6036",1,0.01,
"60617-6223",1,0.01,
"60617-6703",1,0.01,
"60617-7142",1,0.01,
"60617-7358",1,0.01,
"606172503",1,0.01,
"606174902",1,0.01,
"606175126",1,0.01,
"606176007",1,0.01,
"606176252",1,0.01,
"606176323",1,0.01,
"606176324",1,0.01,
"606176526",1,0.01,
"606176542",1,0.01,
"606176602",1,0.01,
"606176811",1,0.01,
"606176836",1,0.01,
"606177057",1,0.01,
"60618",121,0.01,
"60618-1213",1,0.01,
"60618-1217",1,0.01,
"60618-1602",1,0.01,
"60618-2813",1,0.01,
"60618-3109",1,0.01,
"60618-3528",1,0.01,
"60618-3620",1,0.01,
"60618-5207",1,0.01,
"60618-5718",1,0.01,
"60618-6509",1,0.01,
"60618-7219",1,0.01,
"60618-7319",1,0.01,
"606181003",1,0.01,
"606181104",1,0.01,
"606182011",1,0.01,
"606183316",1,0.01,
"606184031",1,0.01,
"606184418",1,0.01,
"606185112",1,0.01,
"606185714",1,0.01,
"606185718",1,0.01,
"606186749",1,0.01,
"606187026",1,0.01,
"606187319",1,0.01,
"606187512",1,0.01,
"606187768",1,0.01,
"606188211",1,0.01,
"60619",62,0.01,
"60619-1217",1,0.01,
"60619-1320",1,0.01,
"60619-3005",1,0.01,
"60619-3814",1,0.01,
"60619-4204",1,0.01,
"60619-6629",1,0.01,
"60619-7718",1,0.01,
"60619-7730",1,0.01,
"60620",51,0.01,
"60620-2607",1,0.01,
"60620-3512",1,0.01,
"60620-3620",1,0.01,
"60620-3660",1,0.01,
"60620-3923",1,0.01,
"60620-4253",1,0.01,
"60620-5033",1,0.01,
"606204923",1,0.01,
"606205514",1,0.01,
"60621",17,0.01,
"60621-1633",1,0.01,
"60621-2319",1,0.01,
"60622",112,0.01,
"60622-1700",1,0.01,
"60622-1933",2,0.01,
"60622-2854",1,0.01,
"60622-2932",1,0.01,
"60622-3110",1,0.01,
"60622-3343",1,0.01,
"60622-3451",1,0.01,
"60622-4458",1,0.01,
"60622-6230",1,0.01,
"606224400",1,0.01,
"606224517",1,0.01,
"606224613",1,0.01,
"60623",95,0.01,
"60623-2216",1,0.01,
"60623-2642",1,0.01,
"60623-3135",1,0.01,
"60623-3510",1,0.01,
"60623-3943",1,0.01,
"60623-4454",1,0.01,
"60623-4618",1,0.01,
"60623-4638",1,0.01,
"606233413",1,0.01,
"606233423",1,0.01,
"606233426",1,0.01,
"606233459",1,0.01,
"606233540",1,0.01,
"606233714",1,0.01,
"606234324",1,0.01,
"606234612",1,0.01,
"606234616",1,0.01,
"606234752",1,0.01,
"606234755",1,0.01,
"606234820",1,0.01,
"60624",14,0.01,
"60624-1247",1,0.01,
"60624-3502",1,0.01,
"60624-3720",1,0.01,
"606242903",1,0.01,
"60625",157,0.01,
"60625-2019",1,0.01,
"60625-2586",1,0.01,
"60625-2703",1,0.01,
"60625-4653",1,0.01,
"60625-4923",1,0.01,
"60625-5205",1,0.01,
"60625-5914",1,0.01,
"60625-6004",1,0.01,
"60625-8360",1,0.01,
"606251010",1,0.01,
"606251809",1,0.01,
"606252306",1,0.01,
"606254216",2,0.01,
"606254220",1,0.01,
"606254307",1,0.01,
"606254363",1,0.01,
"606254443",1,0.01,
"606254581",1,0.01,
"606254603",1,0.01,
"606255546",1,0.01,
"606255619",1,0.01,
"606255904",1,0.01,
"606255943",1,0.01,
"606256039",1,0.01,
"606256486",1,0.01,
"606256705",1,0.01,
"60626",87,0.01,
"60626-2323",1,0.01,
"60626-3404",1,0.01,
"60626-4273",1,0.01,
"60626-4535",1,0.01,
"60626-4536",1,0.01,
"606262710",1,0.01,
"606263336",1,0.01,
"60627",1,0.01,
"60628",41,0.01,
"60628-1043",1,0.01,
"60628-2102",1,0.01,
"60628-3401",1,0.01,
"60628-3622",1,0.01,
"60628-4741",1,0.01,
"60628-4902",1,0.01,
"60628-5529",1,0.01,
"60628-6042",1,0.01,
"60628-7237",1,0.01,
"606282030",1,0.01,
"606282820",1,0.01,
"60629",182,0.01,
"60629-1019",1,0.01,
"60629-1213",1,0.01,
"60629-1503",1,0.01,
"60629-2209",1,0.01,
"60629-2623",1,0.01,
"60629-3314",1,0.01,
"60629-3520",1,0.01,
"60629-4144",1,0.01,
"60629-4343",1,0.01,
"60629-4810",1,0.01,
"60629-4824",1,0.01,
"60629-4852",1,0.01,
"60629-4901",1,0.01,
"60629-5231",1,0.01,
"60629-5232",1,0.01,
"60629-5235",1,0.01,
"60629-5236",1,0.01,
"60629-5434",1,0.01,
"60629-5554",1,0.01,
"60629-5635",1,0.01,
"606291010",1,0.01,
"606291044",1,0.01,
"606291521",1,0.01,
"606292135",1,0.01,
"606292207",1,0.01,
"606292401",1,0.01,
"606292416",1,0.01,
"606292421",1,0.01,
"606292610",1,0.01,
"606293017",1,0.01,
"606293316",1,0.01,
"606293723",1,0.01,
"606293818",2,0.01,
"606294144",1,0.01,
"606294340",1,0.01,
"606294823",1,0.01,
"606295120",1,0.01,
"606295211",1,0.01,
"606295214",1,0.01,
"606295640",1,0.01,
"6063",1,0.01,
"60630",77,0.01,
"60630-1792",1,0.01,
"60630-2226",1,0.01,
"60630-2722",1,0.01,
"60630-4131",1,0.01,
"60630-4238",1,0.01,
"60630-4605",1,0.01,
"606301128",1,0.01,
"606301836",1,0.01,
"606302151",1,0.01,
"606302607",1,0.01,
"606302733",1,0.01,
"606302911",1,0.01,
"606302942",1,0.01,
"606303144",1,0.01,
"606303365",1,0.01,
"606304256",1,0.01,
"60631",39,0.01,
"60631-1927",1,0.01,
"60631-4438",1,0.01,
"606311065",1,0.01,
"606311108",1,0.01,
"606311116",1,0.01,
"606311631",1,0.01,
"606314236",1,0.01,
"606314433",1,0.01,
"60632",158,0.01,
"60632-1103",1,0.01,
"60632-1171",1,0.01,
"60632-1508",1,0.01,
"60632-1713",1,0.01,
"60632-2113",1,0.01,
"60632-2213",1,0.01,
"60632-2234",1,0.01,
"60632-2426",1,0.01,
"60632-2527",1,0.01,
"60632-2607",1,0.01,
"60632-2721",1,0.01,
"60632-2815",1,0.01,
"60632-2920",1,0.01,
"60632-3528",1,0.01,
"60632-3532",1,0.01,
"60632-3547",1,0.01,
"60632-4049",1,0.01,
"60632-4618",1,0.01,
"606321007",1,0.01,
"606321119",1,0.01,
"606321218",1,0.01,
"606321340",1,0.01,
"606321505",1,0.01,
"606321526",1,0.01,
"606321606",1,0.01,
"606321709",1,0.01,
"606321803",1,0.01,
"606322111",1,0.01,
"606322538",1,0.01,
"606322921",1,0.01,
"606322923",1,0.01,
"606322947",1,0.01,
"606323004",1,0.01,
"606323223",1,0.01,
"606323252",1,0.01,
"606323314",1,0.01,
"606324109",1,0.01,
"606324604",1,0.01,
"606324606",1,0.01,
"606324815",1,0.01,
"60633",12,0.01,
"60633-1064",1,0.01,
"60633-2001",1,0.01,
"606331704",1,0.01,
"60634",117,0.01,
"60634-1510",1,0.01,
"60634-1832",1,0.01,
"60634-2420",1,0.01,
"60634-2553",1,0.01,
"60634-2631",1,0.01,
"60634-3412",1,0.01,
"60634-4005",1,0.01,
"60634-4011",1,0.01,
"60634-4120",1,0.01,
"60634-4510",1,0.01,
"60634-4529",1,0.01,
"60634-4943",1,0.01,
"606341585",1,0.01,
"606341706",1,0.01,
"606341805",1,0.01,
"606342031",1,0.01,
"606342376",1,0.01,
"606342666",1,0.01,
"606342918",1,0.01,
"606342939",1,0.01,
"606343410",1,0.01,
"606343617",1,0.01,
"606343720",1,0.01,
"606343751",1,0.01,
"606343905",1,0.01,
"606344636",1,0.01,
"606344952",1,0.01,
"606345007",1,0.01,
"606345058",1,0.01,
"60635",3,0.01,
"60636",18,0.01,
"60636-1400",1,0.01,
"60636-2406",1,0.01,
"60636-2827",1,0.01,
"60636-3016",1,0.01,
"60636-3731",1,0.01,
"606361213",1,0.01,
"606363847",1,0.01,
"60637",33,1,
"60637-1118",1,0.01,
"60637-4436",1,0.01,
"60637-4514",1,0.01,
"606371639",1,0.01,
"606373603",1,0.01,
"60638",70,0.01,
"60638-1513",1,0.01,
"60638-1625",1,0.01,
"60638-2143",1,0.01,
"60638-2303",1,0.01,
"60638-3024",1,0.01,
"60638-3111",1,0.01,
"60638-3545",1,0.01,
"60638-5546",1,0.01,
"60638-5742",1,0.01,
"606381128",1,0.01,
"606381344",1,0.01,
"606382207",1,0.01,
"606382416",1,0.01,
"606383512",1,0.01,
"606384003",1,0.01,
"606384509",1,0.01,
"606384638",1,0.01,
"606385931",1,0.01,
"60639",126,0.01,
"60639-1251",1,0.01,
"60639-1918",1,0.01,
"60639-2630",1,0.01,
"60639-2831",1,0.01,
"60639-4838",1,0.01,
"606391003",1,0.01,
"606391052",1,0.01,
"606391092",1,0.01,
"606391524",1,0.01,
"606391602",1,0.01,
"606391903",1,0.01,
"606392024",1,0.01,
"606392629",1,0.01,
"606393104",1,0.01,
"606393438",1,0.01,
"606395205",1,0.01,
"60640",118,0.01,
"60640-2743",1,0.01,
"60640-2813",1,0.01,
"60640-4082",1,0.01,
"60640-5447",1,0.01,
"60640-5620",1,0.01,
"60640-6148",1,0.01,
"60640-7415",1,0.01,
"60640-7891",1,0.01,
"606402007",1,0.01,
"606402051",1,0.01,
"606402220",1,0.01,
"606402903",1,0.01,
"606402909",1,0.01,
"606403391",1,0.01,
"606403779",1,0.01,
"606404701",1,0.01,
"606406515",1,0.01,
"606407540",1,0.01,
"60641",99,0.01,
"60641-1348",2,0.01,
"60641-1479",1,0.01,
"60641-2224",1,0.01,
"60641-2230",1,0.01,
"60641-2623",1,0.01,
"60641-2916",1,0.01,
"60641-3251",1,0.01,
"60641-3725",1,0.01,
"60641-5023",1,0.01,
"60641-5145",1,0.01,
"606411315",1,0.01,
"606411421",1,0.01,
"606411439",1,0.01,
"606412608",1,0.01,
"606412945",1,0.01,
"606413242",1,0.01,
"606413248",1,0.01,
"606413450",1,0.01,
"606414120",1,0.01,
"606414251",1,0.01,
"606414909",1,0.01,
"606414939",1,0.01,
"606415112",1,0.01,
"606415135",1,0.01,
"606415227",1,0.01,
"606415229",1,0.01,
"606415343",1,0.01,
"60642",34,0.01,
"60642-5801",1,0.01,
"60642-5864",1,0.01,
"60642-6467",1,0.01,
"60642-8070",1,0.01,
"60642-8161",1,0.01,
"606426147",1,0.01,
"60643",48,0.01,
"60643-3314",1,0.01,
"60643-4128",1,0.01,
"60643-4515",1,0.01,
"606431808",1,0.01,
"606432161",1,0.01,
"606432167",1,0.01,
"606432819",1,0.01,
"606433102",1,0.01,
"60644",20,0.01,
"60644-1015",1,0.01,
"60644-1804",1,0.01,
"60644-3942",1,0.01,
"60644-4236",1,0.01,
"60644-4802",1,0.01,
"606442222",1,0.01,
"60645",73,0.01,
"60645-1849",1,0.01,
"60645-2386",1,0.01,
"60645-2491",1,0.01,
"60645-3013",1,0.01,
"606451501",1,0.01,
"606454105",1,0.01,
"606454296",1,0.01,
"606454528",1,0.01,
"606454670",1,0.01,
"606454714",1,0.01,
"606454809",1,0.01,
"606455017",1,0.01,
"606455103",1,0.01,
"606455681",1,0.01,
"60646",34,0.01,
"60646-1264",1,0.01,
"60646-2703",1,0.01,
"60646-4909",1,0.01,
"60646-5305",1,0.01,
"60646-5516",1,0.01,
"60646-6205",1,0.01,
"60646-6539",1,0.01,
"606461330",1,0.01,
"606461413",1,0.01,
"606461512",1,0.01,
"606463630",1,0.01,
"606464918",1,0.01,
"606465014",2,0.01,
"606465024",1,0.01,
"606465215",1,0.01,
"606465223",1,0.01,
"606465225",2,0.01,
"606465347",1,0.01,
"606465807",1,0.01,
"606466159",1,0.01,
"606466420",1,0.01,
"60647",150,0.01,
"60647-1408",1,0.01,
"60647-1606",1,0.01,
"60647-2330",1,0.01,
"60647-2405",1,0.01,
"60647-3538",1,0.01,
"60647-3694",1,0.01,
"60647-3732",1,0.01,
"60647-3754",1,0.01,
"60647-4019",1,0.01,
"60647-4706",1,0.01,
"60647-4909",1,0.01,
"606471050",1,0.01,
"606471602",1,0.01,
"606473636",1,0.01,
"606474919",1,0.01,
"606475309",1,0.01,
"60648",1,0.01,
"60649",54,0.01,
"60649-1806",1,0.01,
"60649-2208",1,0.01,
"60649-3317",1,0.01,
"60649-3828",1,0.01,
"60649-3905",1,0.01,
"60649-4120",1,0.01,
"606493825",1,0.01,
"60651",40,0.01,
"60651-1971",1,0.01,
"60651-2501",1,0.01,
"60651-3919",1,0.01,
"60651-3950",1,0.01,
"606511929",1,0.01,
"60652",54,0.01,
"60652-1222",1,0.01,
"60652-1304",1,0.01,
"60652-1336",1,0.01,
"60652-2213",1,0.01,
"60652-2318",1,0.01,
"60652-2858",1,0.01,
"60652-2919",1,0.01,
"60652-3901",1,0.01,
"606521318",1,0.01,
"606521816",1,0.01,
"606522437",1,0.01,
"606523317",1,0.01,
"606523807",1,0.01,
"606523812",1,0.01,
"60653",29,0.01,
"60653-3249",1,0.01,
"60653-3429",1,0.01,
"60653-4366",1,0.01,
"606532012",1,0.01,
"606534085",1,0.01,
"60654",20,0.01,
"60654-7235",1,0.01,
"60655",21,0.01,
"60655-3215",1,0.01,
"60655-3311",1,0.01,
"606551024",1,0.01,
"606551138",1,0.01,
"606551504",1,0.01,
"606551515",1,0.01,
"606552641",1,0.01,
"606553233",1,0.01,
"606553740",1,0.01,
"606553905",1,0.01,
"60656",43,0.01,
"60656-1110",1,0.01,
"60656-1724",1,0.01,
"60656-2720",1,0.01,
"606562016",1,0.01,
"606562331",1,0.01,
"606562395",1,0.01,
"606563502",1,0.01,
"606564251",1,0.01,
"60657",180,0.01,
"60657-1618",1,0.01,
"60657-1841",1,0.01,
"60657-2014",1,0.01,
"60657-2240",1,0.01,
"60657-4227",1,0.01,
"60657-4366",1,0.01,
"60657-4600",1,0.01,
"60657-4949",1,0.01,
"60657-5526",1,0.01,
"60657-5552",1,0.01,
"606571412",1,0.01,
"606573412",1,0.01,
"606574103",1,0.01,
"60659",85,0.01,
"60659-2165",1,0.01,
"60659-2509",1,0.01,
"60659-2836",2,0.01,
"60659-3610",1,0.01,
"60659-4380",1,0.01,
"60659-4418",1,0.01,
"606591685",1,0.01,
"606591706",1,0.01,
"606591906",1,0.01,
"606592109",1,0.01,
"606592413",1,0.01,
"606592707",1,0.01,
"606592945",1,0.01,
"606593006",1,0.01,
"606593411",1,0.01,
"606594264",1,0.01,
"606594336",1,0.01,
"606594572",1,0.01,
"606594906",1,0.01,
"60660",75,1,
"60660-2309",1,0.01,
"60660-2603",1,0.01,
"60660-4839",2,0.01,
"60660-5119",1,0.01,
"60660-5514",1,0.01,
"606602211",1,0.01,
"606602309",1,0.01,
"606602322",1,0.01,
"606602982",1,0.01,
"60661",10,0.01,
"60661-2402",1,0.01,
"606806626",1,0.01,
"60690",4,0.01,
"60706",38,0.01,
"60706-1142",1,0.01,
"60706-3887",1,0.01,
"607061157",1,0.01,
"607063406",1,0.01,
"607064428",1,0.01,
"607064741",1,0.01,
"60707",69,0.01,
"60707-1707",1,0.01,
"60707-1719",1,0.01,
"60707-1752",1,0.01,
"60707-1844",1,0.01,
"60707-2110",1,0.01,
"60707-3231",1,0.01,
"60707-3236",1,0.01,
"60707-4144",1,0.01,
"607071140",1,0.01,
"607071209",1,0.01,
"607071344",1,0.01,
"607071731",1,0.01,
"607071744",1,0.01,
"607072216",1,0.01,
"607072401",1,0.01,
"607073636",1,0.01,
"607073929",1,0.01,
"607074221",1,0.01,
"607074317",1,0.01,
"607074409",1,0.01,
"60712",53,0.01,
"60712-3015",1,0.01,
"60712-3831",1,0.01,
"60712-4723",1,0.01,
"607121006",1,0.01,
"607121016",1,0.01,
"607122525",1,0.01,
"607123452",1,0.01,
"607123501",1,0.01,
"60714",58,0.01,
"60714-1317",1,0.01,
"60714-3218",1,0.01,
"607143311",1,0.01,
"607145751",1,0.01,
"6073",1,0.01,
"60803",11,0.01,
"60803-2402",1,0.01,
"608035853",1,0.01,
"60804",49,0.01,
"60804-1014",1,0.01,
"60804-1056",1,0.01,
"60804-1904",1,0.01,
"60804-3222",1,0.01,
"60804-3418",1,0.01,
"60804-3948",1,0.01,
"60804-4019",1,0.01,
"60804-4311",1,0.01,
"608041054",1,0.01,
"608041710",1,0.01,
"608041838",1,0.01,
"608042140",1,0.01,
"608042751",1,0.01,
"608042808",1,0.01,
"608043225",1,0.01,
"608043327",1,0.01,
"608043551",1,0.01,
"608043628",1,0.01,
"608043750",1,0.01,
"60805",12,0.01,
"60805-2642",1,0.01,
"60805-3333",1,0.01,
"608052226",1,0.01,
"608053229",1,0.01,
"608053762",1,0.01,
"60827",4,0.01,
"60827-6414",1,0.01,
"60901",4,0.01,
"609018371",1,0.01,
"60914",11,0.01,
"609144904",1,0.01,
"609146401",1,0.01,
"609149201",1,0.01,
"60915",3,0.01,
"609277093",1,0.01,
"60930",1,0.01,
"60942",2,0.01,
"60950",1,0.01,
"610",0.01,1,
"6100",0.01,1,
"61004",0.01,1,
"610064",0.01,2,
"61008",8,0.01,
"61008-7009",1,0.01,
"610081927",1,0.01,
"610087182",1,0.01,
"610088578",1,0.01,
"61010",2,0.01,
"61011",1,0.01,
"61015",1,0.01,
"61016",2,0.01,
"61021",2,0.01,
"61032",2,0.01,
"61036",1,0.01,
"61037",1,0.01,
"61054",1,0.01,
"61061",2,0.01,
"61064",1,0.01,
"61065",2,0.01,
"6107",1,0.01,
"61070",1,0.01,
"61071",1,0.01,
"61072",2,1,
"61073",8,0.01,
"61073-9080",1,0.01,
"610737585",1,0.01,
"6108",1,0.01,
"61080",1,0.01,
"61081",1,0.01,
"610819575",1,0.01,
"61101",1,0.01,
"611012821",1,0.01,
"611016457",1,0.01,
"61102",1,0.01,
"61103",2,0.01,
"61103-4544",1,0.01,
"611034339",1,0.01,
"611036343",1,0.01,
"61104",1,0.01,
"61107",9,0.01,
"61107-2713",1,0.01,
"611071025",1,0.01,
"611073010",1,0.01,
"61108",2,0.01,
"61109-2168",1,0.01,
"61109-2482",1,0.01,
"6111",1,0.01,
"61111",4,0.01,
"61111-3532",1,0.01,
"611118638",1,0.01,
"61113",0.01,1,
"61114",5,0.01,
"61114-5516",1,0.01,
"611146156",1,0.01,
"611147408",1,0.01,
"61115",4,0.01,
"61201",2,0.01,
"61232-9528",1,0.01,
"61234",2,0.01,
"61242",2,0.01,
"61244",2,0.01,
"61250",1,0.01,
"61254",1,0.01,
"61259",1,0.01,
"612630104",1,0.01,
"61264",1,0.01,
"61265",4,0.01,
"61320",2,0.01,
"613309530",1,0.01,
"61342",1,0.01,
"61342-9304",1,0.01,
"61345-9281",1,0.01,
"61348",1,0.01,
"61350",1,0.01,
"613504204",1,0.01,
"61354",2,0.01,
"61354-1582",1,0.01,
"61356",4,0.01,
"613569180",1,0.01,
"613609342",1,0.01,
"61367",1,0.01,
"61368",1,0.01,
"61376",1,0.01,
"61401",1,0.01,
"614011451",1,0.01,
"614011820",1,0.01,
"61422",1,0.01,
"61440",0.01,1,
"61443-3565",1,0.01,
"61455",2,0.01,
"614553029",1,0.01,
"61462",3,0.01,
"61517",1,0.01,
"61523",2,0.01,
"61525",3,0.01,
"615252715",1,0.01,
"61530",1,0.01,
"61535",1,0.01,
"61536-9626",1,0.01,
"61537",1,0.01,
"61548",1,0.01,
"61548-0144",1,0.01,
"61550",1,0.01,
"61552",1,0.01,
"61559",1,0.01,
"61561",2,0.01,
"61561-7812",1,0.01,
"61568",1,0.01,
"61571",2,0.01,
"61606",3,0.01,
"616112211",2,0.01,
"61614",3,0.01,
"616141099",1,0.01,
"616144112",1,0.01,
"61615",5,0.01,
"61615-2361",1,0.01,
"61701",8,0.01,
"61701-4230",1,0.01,
"61704",7,0.01,
"61705",1,0.01,
"61737",1,0.01,
"61761",9,0.01,
"61764",2,0.01,
"61764-1448",1,0.01,
"61801",4,0.01,
"61802",4,0.01,
"61820",17,0.01,
"61820-6526",1,0.01,
"618204616",1,0.01,
"618207635",1,0.01,
"61821",4,0.01,
"618214520",1,0.01,
"61822",6,0.01,
"61822-7391",1,0.01,
"61832",3,0.01,
"61853",1,0.01,
"61866",1,0.01,
"618739060",1,0.01,
"61880",1,0.01,
"61920",3,0.01,
"62002",1,0.01,
"62025",6,0.01,
"62025-7343",1,0.01,
"62034",2,0.01,
"62040",2,0.01,
"622031624",1,0.01,
"62207",1,0.01,
"62208",1,0.01,
"622083928",1,0.01,
"62220",1,0.01,
"62221",2,0.01,
"62221-7040",1,0.01,
"62223",1,0.01,
"62226",2,0.01,
"62226-6051",1,0.01,
"62234-4869",1,0.01,
"62249",1,0.01,
"62249-2627",1,0.01,
"62269",5,0.01,
"622943614",1,0.01,
"62301",1,0.01,
"623055935",1,0.01,
"62321",1,0.01,
"62411",1,0.01,
"62427",1,0.01,
"62454",1,0.01,
"62471",1,0.01,
"62521",1,0.01,
"62522",2,0.01,
"62526",2,0.01,
"62535",1,0.01,
"625481212",1,0.01,
"62549",1,0.01,
"62650",2,0.01,
"62656",1,0.01,
"62675",1,0.01,
"62702",2,0.01,
"62703",2,0.01,
"62704",4,0.01,
"627046476",1,0.01,
"627079340",1,0.01,
"62711",3,0.01,
"62712",3,0.01,
"62801",1,0.01,
"62801-2614",1,0.01,
"62808",1,0.01,
"62812",1,0.01,
"62821",1,0.01,
"628642155",1,0.01,
"62880",1,0.01,
"62901",5,0.01,
"62901-3266",1,0.01,
"62901-4102",1,0.01,
"62906",1,0.01,
"62912",1,0.01,
"62946-2313",1,0.01,
"62958",1,0.01,
"62959",1,0.01,
"62959-4270",1,0.01,
"62983",1,0.01,
"63005",2,0.01,
"63005-4966",1,0.01,
"630054468",1,0.01,
"630054484",1,0.01,
"630054661",1,0.01,
"630056336",1,0.01,
"63011",2,0.01,
"63011-3454",1,0.01,
"63017",4,0.01,
"63017-2489",1,0.01,
"63017-3047",1,0.01,
"630171912",1,0.01,
"630172477",1,0.01,
"63021",5,0.01,
"63021-6820",1,0.01,
"630213819",1,0.01,
"630215865",1,0.01,
"63025",2,0.01,
"630263962",1,0.01,
"63028",1,0.01,
"63033",2,0.01,
"630334346",1,0.01,
"63034-2051",1,0.01,
"630342161",1,0.01,
"630342648",1,0.01,
"63040",3,0.01,
"630401659",1,0.01,
"63042",1,0.01,
"630443513",1,0.01,
"63052-1536",1,0.01,
"63069",2,0.01,
"63089",1,0.01,
"63104",2,0.01,
"63104-1404",1,0.01,
"63104-2541",1,0.01,
"63105",3,0.01,
"63105-2516",1,0.01,
"63106",1,0.01,
"63108",3,0.01,
"631082302",1,0.01,
"63109",1,0.01,
"631102703",1,0.01,
"63111-1131",1,0.01,
"63112-1001",1,0.01,
"63114",1,0.01,
"63117",2,0.01,
"631172139",1,0.01,
"63118",2,0.01,
"63118-1126",1,0.01,
"63119",9,0.01,
"63119-3022",1,0.01,
"63119-5243",1,0.01,
"631192259",1,0.01,
"631192837",1,0.01,
"631193242",1,0.01,
"631194833",1,0.01,
"63122",6,0.01,
"63122-4102",1,0.01,
"63122-6335",1,0.01,
"631223344",1,0.01,
"631223433",1,0.01,
"631224828",1,0.01,
"63123-1146",1,0.01,
"631231537",1,0.01,
"631232831",1,0.01,
"63124",1,0.01,
"631242042",1,0.01,
"63126",2,0.01,
"63126-1439",1,0.01,
"63126-3504",1,0.01,
"63127",1,0.01,
"63128",3,0.01,
"63129",2,0.01,
"63130",4,0.01,
"63130-3042",1,0.01,
"631303824",1,0.01,
"63131",4,0.01,
"631311137",1,0.01,
"631312151",1,0.01,
"631312304",1,0.01,
"631313627",1,0.01,
"631314114",1,0.01,
"631324474",1,0.01,
"63135-3158",1,0.01,
"63135-3511",1,0.01,
"631364508",1,0.01,
"63137",1,0.01,
"631372310",1,0.01,
"63141",4,0.01,
"63141-6379",1,0.01,
"63141-7365",1,0.01,
"63143",1,0.01,
"63144-1010",1,0.01,
"63144-1643",1,0.01,
"63144-2533",1,0.01,
"63146",2,0.01,
"631465020",1,0.01,
"63147",1,0.01,
"63147-1215",1,0.01,
"63301",2,0.01,
"633011628",1,0.01,
"63303",1,0.01,
"633036468",1,0.01,
"63304",1,0.01,
"633612101",1,0.01,
"63366-5587",1,0.01,
"633671014",1,0.01,
"63368",1,0.01,
"633687185",1,0.01,
"633688596",1,0.01,
"63376",3,0.01,
"63376-7183",1,0.01,
"63383-4829",1,0.01,
"63388",1,0.01,
"63501",1,0.01,
"63701",1,0.01,
"6371",1,0.01,
"64014",1,0.01,
"64015",1,0.01,
"640608792",1,0.01,
"64068-1804",1,0.01,
"640681287",1,0.01,
"64080",1,0.01,
"64081",1,0.01,
"640812490",1,0.01,
"64086",1,0.01,
"64086-6719",1,0.01,
"640898929",1,0.01,
"64109",3,0.01,
"64110",1,0.01,
"64111",1,0.01,
"64113",1,0.01,
"64113-2035",1,0.01,
"641131233",1,0.01,
"641131560",1,0.01,
"641131903",1,0.01,
"641132004",1,0.01,
"641132539",1,0.01,
"64114",1,0.01,
"64116",2,0.01,
"64151",1,0.01,
"64158",1,0.01,
"64160",0.01,1,
"645063516",1,0.01,
"6511",2,0.01,
"65201",4,0.01,
"65201-4000",1,0.01,
"65202",2,0.01,
"65202-9866",1,0.01,
"65203",3,0.01,
"65301-8969",1,0.01,
"65500",0.01,1,
"655000",0.01,1,
"6560",0.01,1,
"65714",1,0.01,
"657147024",1,0.01,
"65781",1,0.01,
"65804",1,0.01,
"65807",1,0.01,
"65809",1,0.01,
"65810",1,0.01,
"6604",1,0.01,
"66047",1,0.01,
"66048",1,0.01,
"6605",1,0.01,
"66061",2,0.01,
"66062",4,0.01,
"66104-5518",1,0.01,
"6614",1,0.01,
"662024245",1,0.01,
"662053234",1,0.01,
"662061213",1,0.01,
"662062510",1,0.01,
"66207",1,0.01,
"662072205",1,0.01,
"66208",1,0.01,
"66208-1945",1,0.01,
"66208-2021",1,0.01,
"66209",2,0.01,
"66209-3546",1,0.01,
"662092112",1,0.01,
"662103316",1,0.01,
"66213",2,0.01,
"662154213",1,0.01,
"662156003",1,0.01,
"66216",3,0.01,
"662218084",1,0.01,
"662218176",1,0.01,
"66297",0.01,1,
"66502",1,0.01,
"666061262",1,0.01,
"67002",1,0.01,
"67203",1,0.01,
"672033523",1,0.01,
"67211",1,0.01,
"67219",1,0.01,
"67220",1,0.01,
"67220-2970",1,0.01,
"678001",0.01,1,
"68008",1,0.01,
"68069",1,0.01,
"68102",1,0.01,
"68104",1,0.01,
"68114",1,0.01,
"68116",2,0.01,
"68118-2722",1,0.01,
"681242715",1,0.01,
"68127",1,0.01,
"68132",2,0.01,
"68132-2617",1,0.01,
"68135",2,0.01,
"68135-1366",1,0.01,
"681351308",1,0.01,
"68137",1,0.01,
"681441432",1,0.01,
"681521037",1,0.01,
"68181",1,0.01,
"6830",1,0.01,
"68502",1,0.01,
"685025034",1,0.01,
"685122431",1,0.01,
"68516",2,0.01,
"68524",1,0.01,
"6854",1,0.01,
"6877",2,0.01,
"687765603",1,0.01,
"6905",1,0.01,
"69559",1,0.01,
"70001",1,0.01,
"700021918",1,0.01,
"700053815",1,0.01,
"700068",0.01,1,
"70058",2,0.01,
"700656605",1,0.01,
"7010",1,0.01,
"70115",2,0.01,
"701154335",2,0.01,
"701182941",1,0.01,
"70122",1,0.01,
"701222211",1,0.01,
"70125",1,0.01,
"70301",1,0.01,
"7034",1,0.01,
"7042",1,0.01,
"70433-0348",1,0.01,
"70501",1,0.01,
"7052",1,0.01,
"7054",1,0.01,
"7069",1,0.01,
"7071",1,0.01,
"70714-3211",1,0.01,
"70769",1,0.01,
"70808",1,0.01,
"70808-8725",1,0.01,
"710032",0.01,1,
"7114",1,0.01,
"71459",1,0.01,
"71998",1,0.01,
"720",0.01,1,
"72019",1,0.01,
"72113",1,0.01,
"72116",1,0.01,
"72176",1,0.01,
"72204",1,0.01,
"72390",1,0.01,
"72601",1,0.01,
"72701",1,0.01,
"72703",1,0.01,
"72712-3621",1,0.01,
"72761",1,0.01,
"727620891",1,0.01,
"72837",1,0.01,
"72903",1,0.01,
"73025-2534",1,0.01,
"73071",2,0.01,
"73071-2259",1,0.01,
"73071-4054",1,0.01,
"73106",1,0.01,
"73120",1,0.01,
"74055",1,0.01,
"74074",1,0.01,
"74104",1,0.01,
"741052211",1,0.01,
"741074506",1,0.01,
"74114",1,0.01,
"74129",1,0.01,
"74133",1,0.01,
"74136",1,0.01,
"74137",2,0.01,
"74402-1005",1,0.01,
"7461",1,0.01,
"74700",0.01,1,
"7481",1,0.01,
"75002",1,0.01,
"75003",0.01,1,
"75007",2,0.01,
"7501",1,0.01,
"75010",1,1,
"75013",1,0.01,
"75015",0.01,1,
"75023",2,0.01,
"75024",1,0.01,
"75025",1,0.01,
"750283781",1,0.01,
"75032-7626",1,0.01,
"75033",2,0.01,
"75034",2,0.01,
"750341279",1,0.01,
"75035",2,0.01,
"75038",1,0.01,
"75039",1,0.01,
"75054",1,0.01,
"75056",1,0.01,
"750565785",1,0.01,
"75057-2702",1,0.01,
"75062",2,0.01,
"75065",1,0.01,
"75070",2,0.01,
"75070-2874",1,0.01,
"75070-7234",1,0.01,
"750707252",1,0.01,
"75071",1,0.01,
"75080",2,0.01,
"75080-3926",1,0.01,
"75082",1,0.01,
"7512",1,0.01,
"75181",1,0.01,
"75201",1,0.01,
"75204",2,0.01,
"75206",1,0.01,
"75209-3212",1,0.01,
"752144430",1,0.01,
"752182233",1,0.01,
"752201703",1,0.01,
"75225",2,0.01,
"75228",1,0.01,
"752292852",1,0.01,
"75230",1,0.01,
"75230-2852",1,0.01,
"75241",1,0.01,
"75243",1,0.01,
"752482860",1,0.01,
"75252",1,0.01,
"752522372",1,0.01,
"75287",1,0.01,
"752875141",1,0.01,
"75605-8216",1,0.01,
"75707",1,0.01,
"76001",2,0.01,
"76006",1,0.01,
"76012",1,0.01,
"76012-5676",1,0.01,
"760125320",1,0.01,
"76013",1,0.01,
"76014",1,0.01,
"76016-5336",1,0.01,
"760164521",1,0.01,
"76034",1,0.01,
"760345886",1,0.01,
"76040",1,0.01,
"76049",0.01,1,
"76051-5627",1,0.01,
"76054",1,0.01,
"76063",1,0.01,
"760635445",1,0.01,
"76092",2,0.01,
"76109",1,0.01,
"76133",2,0.01,
"76180",1,0.01,
"76201",1,0.01,
"76205",1,0.01,
"76207",1,0.01,
"7624",1,0.01,
"7631",1,0.01,
"76502",1,0.01,
"76504",1,0.01,
"76522",1,0.01,
"76657",1,0.01,
"76706-6559",1,0.01,
"7675",1,0.01,
"77004",1,0.01,
"77006",1,0.01,
"77009",1,0.01,
"77016",1,0.01,
"77024",1,0.01,
"77026",1,0.01,
"77040",1,0.01,
"77043",1,0.01,
"77055",1,0.01,
"770554712",1,0.01,
"77063",2,0.01,
"77077",1,0.01,
"77088",1,0.01,
"77093",1,0.01,
"77095",1,0.01,
"770957298",1,0.01,
"77096-6002",1,0.01,
"7716",1,0.01,
"7731",1,0.01,
"77338-1314",1,0.01,
"77375",1,0.01,
"77377",1,0.01,
"77380-1346",1,0.01,
"77381",3,0.01,
"77381-5126",1,0.01,
"77381-6139",1,0.01,
"773811406",1,0.01,
"773822627",1,0.01,
"77401",1,0.01,
"77433",1,0.01,
"77450",1,0.01,
"774505770",1,0.01,
"77479",1,0.01,
"77487",1,0.01,
"77494",1,0.01,
"77494-2376",1,0.01,
"774945251",1,0.01,
"77498-7031",1,0.01,
"774987221",1,0.01,
"77520",1,0.01,
"77546",1,0.01,
"7757",1,0.01,
"77627",1,0.01,
"77664",1,0.01,
"78006",1,0.01,
"78015",1,0.01,
"78015-6512",1,0.01,
"78028",1,0.01,
"78108",1,0.01,
"781082268",1,0.01,
"78132",1,0.01,
"78155",1,0.01,
"781632199",1,0.01,
"78201",1,0.01,
"782013740",1,0.01,
"782014021",1,0.01,
"78209",3,0.01,
"78212",1,0.01,
"782123674",1,0.01,
"78213",1,0.01,
"78216",1,0.01,
"78216-3435",1,0.01,
"78218-1772",1,0.01,
"78227-1629",1,0.01,
"782283213",1,0.01,
"78230",1,0.01,
"782383524",1,0.01,
"78247",2,0.01,
"78249",1,0.01,
"78249-1639",1,0.01,
"78254",1,0.01,
"78257",1,0.01,
"78261",1,0.01,
"782831746",1,0.01,
"78400",0.01,1,
"7843",1,0.01,
"785207417",1,0.01,
"78575",1,0.01,
"78630",1,0.01,
"78641",1,0.01,
"78642",1,0.01,
"78660",1,0.01,
"78664",1,0.01,
"78665",1,0.01,
"78703",1,0.01,
"787035459",1,0.01,
"78704",1,0.01,
"78705",2,0.01,
"78717",1,0.01,
"78726",1,0.01,
"78727",1,0.01,
"78729",1,0.01,
"78732",1,0.01,
"78735",1,0.01,
"78737-9524",1,0.01,
"78738",1,0.01,
"78739",1,0.01,
"78741",1,0.01,
"78745-4966",1,0.01,
"78751",1,0.01,
"78801",1,0.01,
"78833",1,0.01,
"78840",1,0.01,
"79011",0.01,1,
"7920",1,0.01,
"7922",1,0.01,
"7932",1,0.01,
"79603",1,0.01,
"79901",1,0.01,
"79905",1,0.01,
"79912",1,0.01,
"799128106",1,0.01,
"79935-3714",1,0.01,
"79938-2761",1,0.01,
"80013",2,0.01,
"80016",1,0.01,
"80023",1,0.01,
"80027",3,0.01,
"800279404",1,0.01,
"8003",1,0.01,
"80031",2,0.01,
"80031-2144",1,0.01,
"80104",1,0.01,
"80108",1,0.01,
"80108-3482",1,0.01,
"801089259",1,0.01,
"80109",1,0.01,
"80110",1,0.01,
"801115289",1,0.01,
"80120",2,0.01,
"80120-3625",1,0.01,
"80123",1,0.01,
"80126",2,0.01,
"80126-5265",1,0.01,
"80129-6407",1,0.01,
"801295784",1,0.01,
"80130",1,0.01,
"80134",1,0.01,
"801358201",1,0.01,
"8016",1,0.01,
"80205",1,0.01,
"80205-4722",1,0.01,
"80206",2,0.01,
"80209",1,0.01,
"80209-2424",1,0.01,
"80210",2,0.01,
"80211",1,0.01,
"80220",2,0.01,
"80220-1548",1,0.01,
"802307036",1,0.01,
"80237",1,0.01,
"80238",1,0.01,
"80240-220",0.01,1,
"80302",2,0.01,
"80305",1,0.01,
"80305-3413",1,0.01,
"803694",0.01,1,
"80401-6571",1,0.01,
"80465",1,0.01,
"804775166",1,0.01,
"80498",1,0.01,
"80503",1,0.01,
"80517",1,0.01,
"80524",1,0.01,
"80525",1,0.01,
"80537",1,0.01,
"8057",1,0.01,
"806014265",1,0.01,
"80631",1,0.01,
"80634",2,0.01,
"8075",1,0.01,
"80816",1,0.01,
"80841",1,0.01,
"80904",1,0.01,
"80915-3149",1,0.01,
"80916",1,0.01,
"80923",1,0.01,
"80923-5437",1,0.01,
"81621-0899",1,0.01,
"820012531",1,0.01,
"82633",1,0.01,
"830021726",1,0.01,
"83128",1,0.01,
"83338",1,0.01,
"83440",1,0.01,
"83713-1115",1,0.01,
"84003-0054",1,0.01,
"84014",1,0.01,
"84041",1,0.01,
"84058",1,0.01,
"84062",1,0.01,
"84067",1,0.01,
"84093",1,0.01,
"84095",1,0.01,
"84095-3031",1,0.01,
"840957750",1,0.01,
"84106",1,0.01,
"841091212",1,0.01,
"84117-7033",1,0.01,
"84119",1,0.01,
"84401",1,0.01,
"84403",1,0.01,
"84604",1,0.01,
"85008",2,0.01,
"85012-1734",1,0.01,
"85013",1,0.01,
"85014",1,0.01,
"85018",1,0.01,
"85021",1,0.01,
"850283068",1,0.01,
"85033",1,0.01,
"85037",1,0.01,
"85045",1,0.01,
"85048",1,0.01,
"85053-4628",1,0.01,
"85085-9022",1,0.01,
"85142",1,0.01,
"85202",1,0.01,
"85204-5624",1,0.01,
"85224",1,0.01,
"85248",2,0.01,
"85249",1,0.01,
"85254",1,0.01,
"85255",1,0.01,
"85259",1,0.01,
"85260",1,0.01,
"85283",1,0.01,
"852842262",1,0.01,
"85301",1,0.01,
"853031624",1,0.01,
"85338-1212",1,0.01,
"85340",1,0.01,
"85353",1,0.01,
"85382",2,0.01,
"85392",1,0.01,
"8550",1,0.01,
"8559",1,0.01,
"85629-8134",1,0.01,
"85641",1,0.01,
"85710-6241",1,0.01,
"857121383",1,0.01,
"85713-5829",1,0.01,
"85718",1,0.01,
"85719",1,0.01,
"85719-5038",1,0.01,
"85742",1,0.01,
"857499238",1,0.01,
"85750",1,0.01,
"857551822",1,0.01,
"86351",1,0.01,
"86403",1,0.01,
"86426-9288",1,0.01,
"8648",1,0.01,
"87106",1,0.01,
"87111",3,0.01,
"871123764",1,0.01,
"87114",2,0.01,
"871223896",1,0.01,
"87124-1792",1,0.01,
"87144",1,0.01,
"87144-5337",1,0.01,
"8721",1,0.01,
"8723",1,0.01,
"8742",1,0.01,
"87504",1,0.01,
"875050341",1,0.01,
"875056275",1,0.01,
"8774",0.01,1,
"88001",1,0.01,
"8807",2,0.01,
"881015114",1,0.01,
"8816",1,0.01,
"8820",1,0.01,
"8822",1,0.01,
"8854",1,0.01,
"88690",0.01,1,
"89002",1,0.01,
"89015",1,0.01,
"8902",1,0.01,
"890522363",1,0.01,
"89085",2,0.01,
"89085-4434",1,0.01,
"89117",2,0.01,
"89123",2,0.01,
"89129",1,0.01,
"891292221",1,0.01,
"891346180",1,0.01,
"891357864",1,0.01,
"89139",2,0.01,
"89142",1,0.01,
"89143",2,0.01,
"89144",1,0.01,
"89144-4350",1,0.01,
"89145",1,0.01,
"89147",1,0.01,
"891481408",1,0.01,
"891484410",1,0.01,
"89149",1,0.01,
"891490142",1,0.01,
"89183",1,0.01,
"89503",1,0.01,
"89511",1,0.01,
"89523",1,0.01,
"89703",1,0.01,
"90004",3,0.01,
"90006",1,0.01,
"90008",1,0.01,
"90008-4914",1,0.01,
"90011",1,0.01,
"90014-2951",1,0.01,
"900181766",1,0.01,
"900245303",1,0.01,
"90025",1,0.01,
"90026",1,0.01,
"90026-2320",1,0.01,
"90027-2629",1,0.01,
"900271814",1,0.01,
"90034",1,0.01,
"90036-3780",1,0.01,
"900362810",1,0.01,
"90039",2,0.01,
"90041",1,0.01,
"900432317",1,0.01,
"90045",1,0.01,
"90045-2545",1,0.01,
"900451037",1,0.01,
"90046",2,0.01,
"90046-1320",1,0.01,
"90047",1,0.01,
"90056",1,0.01,
"900561905",1,0.01,
"90064",1,0.01,
"90064-3447",1,0.01,
"900643823",1,0.01,
"900651117",1,0.01,
"90066-4167",1,0.01,
"90068",1,0.01,
"9012",1,0.01,
"90210",1,0.01,
"90212-4771",1,0.01,
"90230-5723",1,0.01,
"902305632",1,0.01,
"90240",1,0.01,
"90254",2,0.01,
"902544751",1,0.01,
"90255-5208",1,0.01,
"90260",1,0.01,
"902622340",1,0.01,
"90265-3711",1,0.01,
"90266",4,0.01,
"90266-2232",1,0.01,
"90266-4943",1,0.01,
"902663426",1,0.01,
"902664512",1,0.01,
"902666542",1,0.01,
"90272",1,0.01,
"90272-3337",1,0.01,
"90274",1,0.01,
"90274-5241",1,0.01,
"902741254",1,0.01,
"90275",1,0.01,
"90275-5896",1,0.01,
"902754931",1,0.01,
"90277",2,0.01,
"90277-3533",1,0.01,
"902772935",1,0.01,
"902781533",1,0.01,
"902782323",1,0.01,
"90290",2,0.01,
"90292-4945",1,0.01,
"90301",1,0.01,
"90305",1,0.01,
"90403",1,0.01,
"90405-1841",1,0.01,
"90405-4013",1,0.01,
"90501",1,0.01,
"90503",1,0.01,
"90505-6240",1,0.01,
"905052025",1,0.01,
"905054308",1,0.01,
"90601-1752",1,0.01,
"906011791",1,0.01,
"90620-4262",1,0.01,
"90623-1781",1,0.01,
"906306802",1,0.01,
"90631-3325",1,0.01,
"90640",1,0.01,
"906602684",1,0.01,
"90703-6321",1,0.01,
"90720",1,0.01,
"90723",1,0.01,
"907322718",1,0.01,
"90740-5616",1,0.01,
"90755",1,0.01,
"90803-4138",1,0.01,
"908031523",1,0.01,
"908053401",1,0.01,
"90807",1,0.01,
"90807-3204",1,0.01,
"90808",1,0.01,
"90810-3318",1,0.01,
"90814",1,0.01,
"90815",2,0.01,
"91001",1,0.01,
"91006",1,0.01,
"91007",1,0.01,
"91011",3,0.01,
"91011-3330",1,0.01,
"91016",1,0.01,
"910201505",1,0.01,
"910241805",1,0.01,
"91030",2,0.01,
"910304106",1,0.01,
"910401611",1,0.01,
"911043035",1,0.01,
"911044624",1,0.01,
"91105",2,0.01,
"91106",1,0.01,
"911064405",1,0.01,
"91107",2,0.01,
"91107-5921",1,0.01,
"911072137",1,0.01,
"91201",1,0.01,
"912051917",1,0.01,
"912053624",1,0.01,
"912061028",1,0.01,
"91207-1241",1,0.01,
"91208",1,0.01,
"91208-3016",1,0.01,
"91214",1,0.01,
"91214-1527",1,0.01,
"91301",2,0.01,
"91301-2813",1,0.01,
"91301-5200",1,0.01,
"913014635",1,0.01,
"91302",1,0.01,
"91302-3053",1,0.01,
"91311-1367",1,0.01,
"91311-2834",1,0.01,
"91316-2556",1,0.01,
"91316-4377",1,0.01,
"91320",3,0.01,
"91320-4321",1,0.01,
"91321-5825",1,0.01,
"913213529",1,0.01,
"91326",2,0.01,
"91326-3844",1,0.01,
"913262758",1,0.01,
"91331",1,0.01,
"91343-1807",1,0.01,
"91343-1856",1,0.01,
"91344",1,0.01,
"91350",1,0.01,
"91350-2125",1,0.01,
"913553230",1,0.01,
"913562917",1,0.01,
"913563221",1,0.01,
"913564436",1,0.01,
"913603646",1,0.01,
"91362",2,0.01,
"91367",1,0.01,
"91367-7208",1,0.01,
"91377",1,0.01,
"91381",2,0.01,
"913811502",1,0.01,
"91384",1,0.01,
"91384-4527",1,0.01,
"913843578",1,0.01,
"91387",1,0.01,
"91402",1,0.01,
"91403-2807",1,0.01,
"91405-3337",1,0.01,
"914064116",1,0.01,
"91411-3787",1,0.01,
"91411-4034",1,0.01,
"91423",1,0.01,
"91423-1386",1,0.01,
"91423-2108",1,0.01,
"914235114",1,0.01,
"91436-3420",1,0.01,
"91436-3836",1,0.01,
"91504-1111",1,0.01,
"91505",2,0.01,
"91601",1,0.01,
"91604-1378",1,0.01,
"91604-2652",1,0.01,
"91605",1,0.01,
"91607-1136",1,0.01,
"91709",1,0.01,
"91709-7852",1,0.01,
"91711",1,0.01,
"917306619",1,0.01,
"91750",1,0.01,
"917542421",1,0.01,
"917544533",1,0.01,
"91765-4408",1,0.01,
"91765-4623",1,0.01,
"91767",1,0.01,
"91773-4232",1,0.01,
"91775",1,0.01,
"917752903",1,0.01,
"91780",1,0.01,
"91801-2075",1,0.01,
"91902",1,0.01,
"91911",1,0.01,
"91915",2,0.01,
"91941",1,0.01,
"919414429",1,0.01,
"92008",1,0.01,
"92009",1,0.01,
"92009-5202",1,0.01,
"92009-6371",1,0.01,
"920097624",1,0.01,
"92011-2506",1,0.01,
"92011-4830",1,0.01,
"92019",1,0.01,
"92019-3902",1,0.01,
"920193862",1,0.01,
"92020-5647",1,0.01,
"92024",2,0.01,
"920243051",1,0.01,
"920246523",1,0.01,
"920247105",1,0.01,
"920256149",1,0.01,
"92026",2,0.01,
"92027",1,0.01,
"920373345",1,0.01,
"920376403",1,0.01,
"920376742",1,0.01,
"92054",2,0.01,
"92054-3545",1,0.01,
"92057",3,0.01,
"92064",2,0.01,
"92065",1,0.01,
"92067",3,0.01,
"92067-4775",1,0.01,
"92069",1,0.01,
"92071-2771",1,0.01,
"92103",1,0.01,
"921035005",1,0.01,
"921044924",1,0.01,
"921085123",1,0.01,
"92109",1,0.01,
"92111",2,0.01,
"92116",1,0.01,
"92118",1,0.01,
"92118-1818",1,0.01,
"92118-2038",1,0.01,
"92121",1,0.01,
"921243768",1,0.01,
"92127",1,0.01,
"921271241",1,0.01,
"92128",3,0.01,
"92128-4238",1,0.01,
"921292329",1,0.01,
"92130",2,0.01,
"921303407",1,0.01,
"921307617",1,0.01,
"92131",1,0.01,
"921312226",1,0.01,
"922602258",1,0.01,
"92270",1,0.01,
"92277",2,0.01,
"92310",2,0.01,
"92404",2,0.01,
"92407",1,0.01,
"92503",1,0.01,
"92507",1,0.01,
"92536",1,0.01,
"925490038",1,0.01,
"92562",1,0.01,
"92563-4346",1,0.01,
"925916169",1,0.01,
"92603",1,0.01,
"92604",3,0.01,
"926060801",1,0.01,
"92612",1,0.01,
"92612-2607",1,0.01,
"92614",1,0.01,
"92617",1,0.01,
"92620",2,0.01,
"92620-1823",1,0.01,
"92626-2255",1,0.01,
"92627",2,0.01,
"926291191",1,0.01,
"926292340",1,0.01,
"926292353",1,0.01,
"92630",1,0.01,
"926304639",1,0.01,
"92646",1,0.01,
"92646-8117",1,0.01,
"926464815",1,0.01,
"92648",1,0.01,
"92648-6811",1,0.01,
"92649",1,0.01,
"92649-3732",1,0.01,
"92651",7,0.01,
"92651-8313",1,0.01,
"926511200",1,0.01,
"926512016",1,0.01,
"926514006",1,0.01,
"926518125",1,0.01,
"926536504",1,0.01,
"92656",1,0.01,
"92657-0107",1,0.01,
"92660",2,0.01,
"926606605",1,0.01,
"926635617",1,0.01,
"926724545",1,0.01,
"92673",2,0.01,
"926733433",1,0.01,
"92677",2,0.01,
"92677-1011",1,0.01,
"92677-6300",1,0.01,
"926771656",1,0.01,
"926771935",1,0.01,
"926772454",1,0.01,
"926772818",1,0.01,
"926774525",1,0.01,
"926777621",1,0.01,
"926779043",1,0.01,
"92679",2,0.01,
"926794214",1,0.01,
"926795100",1,0.01,
"926795104",1,0.01,
"926795151",1,0.01,
"92688",1,0.01,
"92691",1,0.01,
"92692",1,0.01,
"926921920",1,0.01,
"926925184",1,0.01,
"92694",1,0.01,
"926940430",1,0.01,
"92701",1,0.01,
"92703",1,0.01,
"92703-3439",1,0.01,
"92704",2,0.01,
"92705",1,0.01,
"92705-7863",1,0.01,
"927051931",1,0.01,
"927056018",1,0.01,
"92706",1,0.01,
"92708-5751",1,0.01,
"927802254",1,0.01,
"927805954",1,0.01,
"92782",1,0.01,
"928064353",1,0.01,
"92807",1,0.01,
"928082333",1,0.01,
"928082605",1,0.01,
"92821",1,0.01,
"92831",2,0.01,
"928402113",1,0.01,
"92841",1,0.01,
"92845",1,0.01,
"92861",1,0.01,
"92865",1,0.01,
"92866-2641",1,0.01,
"92867",2,0.01,
"928672078",1,0.01,
"928676494",1,0.01,
"928678608",1,0.01,
"92868-1747",1,0.01,
"92869",1,0.01,
"92880",1,0.01,
"92881",1,0.01,
"92886",2,0.01,
"92887",1,0.01,
"93001",1,0.01,
"93013",1,0.01,
"93021",2,0.01,
"93021-3750",1,0.01,
"93023",1,0.01,
"93030-3275",1,0.01,
"93060",1,0.01,
"93063-1203",1,0.01,
"93108",1,0.01,
"93108-2150",1,0.01,
"931081062",1,0.01,
"931104507",1,0.01,
"93117-2135",1,0.01,
"93274",1,0.01,
"93291",2,0.01,
"93312",1,0.01,
"93552",1,0.01,
"93560-6408",1,0.01,
"93647",1,0.01,
"937303451",1,0.01,
"93908",1,0.01,
"940022916",1,0.01,
"94010",1,0.01,
"94010-5013",1,0.01,
"940106142",1,0.01,
"940106629",1,0.01,
"94018",1,0.01,
"94022",1,0.01,
"940221609",1,0.01,
"940224071",1,0.01,
"94024",1,0.01,
"940240129",1,0.01,
"940244817",1,0.01,
"940245044",1,0.01,
"940245515",1,0.01,
"940246441",1,0.01,
"94025",3,0.01,
"94025-4910",1,0.01,
"940251648",1,0.01,
"940273902",1,0.01,
"940274028",1,0.01,
"94037",1,0.01,
"94040",1,0.01,
"94041-2209",1,0.01,
"940412356",1,0.01,
"94043",1,0.01,
"94061",1,0.01,
"94062",1,0.01,
"94066",1,0.01,
"940661208",1,0.01,
"94070-4940",1,0.01,
"94080",1,0.01,
"940804255",1,0.01,
"940874451",1,0.01,
"94103",1,0.01,
"94108",1,0.01,
"941081541",1,0.01,
"94109",1,0.01,
"94110-2096",1,0.01,
"94110-6132",1,0.01,
"94112",3,0.01,
"94112-2911",1,0.01,
"941143126",1,0.01,
"94115",1,0.01,
"94116",1,0.01,
"94116-1453",1,0.01,
"94117",1,0.01,
"94117-3209",1,0.01,
"94118",2,0.01,
"941211446",1,0.01,
"94122",3,0.01,
"94123",1,0.01,
"94131",1,0.01,
"94131-3030",1,0.01,
"94132",1,0.01,
"94132-2724",1,0.01,
"941321068",1,0.01,
"94133",1,0.01,
"94301",1,0.01,
"94303",1,0.01,
"94303-3412",1,0.01,
"943033022",1,0.01,
"943033036",1,0.01,
"943033844",1,0.01,
"94306-1425",1,0.01,
"94306-4439",1,0.01,
"94401",1,0.01,
"94402",2,0.01,
"944022215",1,0.01,
"944023206",1,0.01,
"94403",1,0.01,
"94404",1,0.01,
"944041308",1,0.01,
"944043609",1,0.01,
"945014024",1,0.01,
"94502",2,0.01,
"94506",2,0.01,
"94507",1,0.01,
"94517",1,0.01,
"94526",1,0.01,
"94533",1,0.01,
"94534",1,0.01,
"945364930",1,0.01,
"94538",1,0.01,
"94539",1,0.01,
"94539-3222",1,0.01,
"945396316",1,0.01,
"94542-2110",1,0.01,
"94545-4932",1,0.01,
"94546",1,0.01,
"945493146",1,0.01,
"94550",1,0.01,
"945516163",1,0.01,
"945525307",1,0.01,
"94553",1,0.01,
"94556",1,0.01,
"945562810",1,0.01,
"94558-4551",1,0.01,
"94561",1,0.01,
"94563",2,0.01,
"945633211",1,0.01,
"945633348",1,0.01,
"94566-9767",1,0.01,
"945664568",1,0.01,
"945664618",1,0.01,
"945665524",1,0.01,
"945667521",1,0.01,
"945668642",1,0.01,
"94577",1,0.01,
"94578-2932",1,0.01,
"94582",1,0.01,
"94582-4823",1,0.01,
"94583",1,0.01,
"94587",1,0.01,
"94595",1,0.01,
"94596",1,0.01,
"94602",2,0.01,
"94602-2433",1,0.01,
"946021402",1,0.01,
"946021628",1,0.01,
"946021944",1,0.01,
"94608",2,0.01,
"94609",1,0.01,
"94610",2,0.01,
"94610-1054",1,0.01,
"94610-1818",1,0.01,
"94611",5,0.01,
"946114118",1,0.01,
"946115911",1,0.01,
"94618-2004",1,0.01,
"946181348",1,0.01,
"94619",1,0.01,
"94621-2738",1,0.01,
"94705",1,0.01,
"94705-2331",1,0.01,
"94707",1,0.01,
"947071544",1,0.01,
"947081427",1,0.01,
"948032124",1,0.01,
"948032740",1,0.01,
"94804-3105",1,0.01,
"948062294",1,0.01,
"94914",1,0.01,
"94941",5,0.01,
"94941-3593",1,0.01,
"949411131",1,0.01,
"949411519",1,0.01,
"949412017",1,0.01,
"949453205",1,0.01,
"94947",1,0.01,
"94947-4250",1,0.01,
"949474427",1,0.01,
"94949",1,0.01,
"94952",2,0.01,
"95003",2,0.01,
"95008",1,0.01,
"950080618",1,0.01,
"950081837",1,0.01,
"950081908",1,0.01,
"95014-2455",1,0.01,
"950144768",1,0.01,
"950306238",1,0.01,
"950321127",1,0.01,
"950324743",1,0.01,
"95033",1,0.01,
"95050",2,0.01,
"95055",1,0.01,
"95060",2,0.01,
"95065",1,0.01,
"95070",2,0.01,
"95070-6470",1,0.01,
"950703818",1,0.01,
"950704628",1,0.01,
"950705144",1,0.01,
"95111",1,0.01,
"95112",1,0.01,
"951162958",1,0.01,
"95117",1,0.01,
"95117-1906",1,0.01,
"95118",1,0.01,
"951204452",1,0.01,
"95124",1,0.01,
"95124-1301",1,0.01,
"95125",2,0.01,
"951261558",1,0.01,
"95127",1,0.01,
"951273053",1,0.01,
"95134",1,0.01,
"95134-1823",1,0.01,
"95138",1,0.01,
"95138-2260",1,0.01,
"95138-2369",1,0.01,
"95148-2827",1,0.01,
"95207",1,0.01,
"95336",1,0.01,
"95337",1,0.01,
"953578181",1,0.01,
"95381",1,0.01,
"95403",2,0.01,
"95476",1,0.01,
"954927921",1,0.01,
"954928645",1,0.01,
"956029680",1,0.01,
"956035262",1,0.01,
"95608",1,0.01,
"956083484",1,0.01,
"95628",2,0.01,
"95677",1,0.01,
"95682",1,0.01,
"956829673",1,0.01,
"957139715",1,0.01,
"95757",1,0.01,
"95765",1,0.01,
"95811",1,0.01,
"95818",1,0.01,
"95818-4015",1,0.01,
"958194028",1,0.01,
"95825",1,0.01,
"95831",1,0.01,
"95835-1237",1,0.01,
"95864",1,0.01,
"95948-9483",1,0.01,
"96204-3049",1,0.01,
"9627",1,0.01,
"9630",1,0.01,
"96716",1,0.01,
"96720-3245",1,0.01,
"967342115",1,0.01,
"96740",1,0.01,
"967444732",1,0.01,
"96813",1,0.01,
"96814",1,0.01,
"96815-1444",1,0.01,
"968164242",1,0.01,
"96825",2,0.01,
"96950",1,0.01,
"97007",1,0.01,
"9702",1,0.01,
"97027",1,0.01,
"97034",2,0.01,
"97035",1,0.01,
"97062",2,0.01,
"97068",1,0.01,
"97068-1861",1,0.01,
"97201",1,0.01,
"97202",1,0.01,
"972028811",1,0.01,
"97210",1,0.01,
"97212",2,0.01,
"97224",1,0.01,
"97225",2,0.01,
"97230",2,0.01,
"97232-1732",1,0.01,
"973024940",1,0.01,
"97361-1660",1,0.01,
"973811372",1,0.01,
"97401",1,0.01,
"97405",1,0.01,
"97520-1417",1,0.01,
"975203048",1,0.01,
"97701",1,0.01,
"98004",1,0.01,
"980046354",1,0.01,
"980046836",1,0.01,
"98011",1,0.01,
"98020",0.01,1,
"98021",1,0.01,
"98026",1,0.01,
"98027",1,0.01,
"980278436",1,0.01,
"980296543",1,0.01,
"98032-1808",1,0.01,
"98034",2,0.01,
"98038",1,0.01,
"98040",2,0.01,
"98042",1,0.01,
"980521701",1,0.01,
"980756294",1,0.01,
"98077-7145",1,0.01,
"980927204",1,0.01,
"98103",1,0.01,
"98104",1,0.01,
"98105",2,0.01,
"98105-2020",1,0.01,
"981052246",1,0.01,
"981053837",1,0.01,
"98109",1,0.01,
"98110",1,0.01,
"98115",4,0.01,
"98117",2,0.01,
"98119",1,0.01,
"98122",4,0.01,
"98125",1,0.01,
"98133",1,0.01,
"98144",1,0.01,
"98166",1,0.01,
"98247",1,0.01,
"98275-4252",1,0.01,
"983709772",1,0.01,
"983839239",1,0.01,
"984652666",1,0.01,
"98498",1,0.01,
"98532",1,0.01,
"98901",1,0.01,
"99005-9227",1,0.01,
"99205",1,0.01,
"99208",1,0.01,
"99338",1,0.01,
"99354",1,0.01,
"99362",1,0.01,
"99403",2,0.01,
"99508-4867",1,0.01,
"995153376",1,0.01,
"99669",1,0.01,
"99703",1,0.01,
"99712-2730",1,0.01,
"99762",1,0.01,
"FM1100",0.01,1,
"IL",2,0.01,
"Illinois",1,0.01,
"n/a",0.01,1,
"N16 0TX",0.01,1,
"SW14 8JH",0.01,1,
"UNK",758,9397,
"V6B1V5",0.01,1,
"V6H1Z3",0.01,1,
"V7S 2G4",0.01,1
)
zippsmatrix <- matrix(dta, ncol = 3, byrow = T)
zippsDF <- data.frame( zippsmatrix, stringsAsFactors = F )
names(zippsDF) <- cols
zippsDF$D <- as.numeric( zippsDF$D )
zippsDF$F <- as.numeric( zippsDF$F )
|
/zipps.R
|
no_license
|
aray6a/products
|
R
| false
| false
| 119,143
|
r
|
cols <- c("OZIP","D","F")
dta <- c(
"0",0.01,1,
"0.000014",0.01,1,
"0.0000305",0.01,1,
"0000",0.01,2,
"00000",0.01,1,
"006463607",1,0.01,
"00693",1,0.01,
"00719",1,0.01,
"00726-9324",1,0.01,
"00820",2,0.01,
"00907-2420",1,0.01,
"00924-4073",0.01,1,
"00926",1,2,
"00926-5117",1,0.01,
"00952-4055",1,0.01,
"01060-1639",1,0.01,
"010819602",1,0.01,
"01106",1,0.01,
"01107-1246",1,0.01,
"01450-1346",1,0.01,
"01501",1,0.01,
"01519",1,0.01,
"015241272",1,0.01,
"01534",1,0.01,
"01545",2,0.01,
"01564-1508",1,0.01,
"016031838",1,0.01,
"01604",1,0.01,
"01702",1,0.01,
"01720-4440",1,0.01,
"01740",1,0.01,
"017422225",1,0.01,
"017423454",1,0.01,
"017481516",1,0.01,
"017481845",1,0.01,
"01760",1,0.01,
"018032745",1,0.01,
"01810",0.01,1,
"01840-1025",1,0.01,
"01845",1,0.01,
"01845-5310",1,0.01,
"018456310",1,0.01,
"018641923",1,0.01,
"018673826",1,0.01,
"019151356",1,0.01,
"01922-1125",1,0.01,
"01940",1,0.01,
"02021-3192",1,0.01,
"02025",1,0.01,
"02038",1,0.01,
"020431961",1,0.01,
"020506372",1,0.01,
"020522907",1,0.01,
"020522910",1,0.01,
"020523146",1,0.01,
"020814363",1,0.01,
"020903041",1,0.01,
"020931720",1,0.01,
"02122",1,0.01,
"02122-2810",1,0.01,
"02129",1,0.01,
"021303439",1,0.01,
"02135",1,0.01,
"02135-2517",1,0.01,
"02136-3602",1,0.01,
"02139-3174",1,0.01,
"02143",1,0.01,
"021451027",1,0.01,
"02155",1,0.01,
"021691624",1,0.01,
"021804314",1,0.01,
"021847314",1,0.01,
"021862229",1,0.01,
"023014085",1,0.01,
"02332",1,0.01,
"023641366",1,0.01,
"024465428",1,0.01,
"02459",1,0.01,
"024591333",1,0.01,
"02466-1330",1,0.01,
"02476",1,0.01,
"024813103",1,0.01,
"024941418",1,0.01,
"02537-1262",1,0.01,
"026321948",1,0.01,
"02720-2734",1,0.01,
"02771-3306",1,0.01,
"02806",2,0.01,
"028064801",1,0.01,
"02809",2,0.01,
"028184104",1,0.01,
"02871",1,0.01,
"028714031",1,0.01,
"030332476",1,0.01,
"030543300",1,0.01,
"03076",1,0.01,
"03216",1,0.01,
"03301-6934",1,0.01,
"033016915",1,0.01,
"03814",1,0.01,
"038334016",1,0.01,
"03904",1,0.01,
"04074-9194",1,0.01,
"04074-9445",1,0.01,
"04102-1726",1,0.01,
"054827775",1,0.01,
"06001-3522",1,0.01,
"06010",1,0.01,
"06033-2849",1,0.01,
"06042",1,0.01,
"060701238",1,0.01,
"06106",1,0.01,
"061071603",1,0.01,
"061082817",1,0.01,
"06119-1057",1,0.01,
"06226",1,0.01,
"062813318",1,0.01,
"064432059",1,0.01,
"064432177",1,0.01,
"064506919",1,0.01,
"06459-3211",1,0.01,
"064611652",1,0.01,
"064611877",1,0.01,
"064893419",1,0.01,
"06511",1,0.01,
"06517",1,0.01,
"06606",1,0.01,
"06784-1132",1,0.01,
"06795",2,0.01,
"06798",1,0.01,
"06824",1,0.01,
"068246234",1,0.01,
"06840",1,0.01,
"068404400",1,0.01,
"068406511",1,0.01,
"06855",1,0.01,
"06855-2022",1,0.01,
"068772230",1,0.01,
"06880",1,0.01,
"068802013",1,0.01,
"06883",1,0.01,
"06897-2407",1,0.01,
"07002-3703",1,0.01,
"070091406",1,0.01,
"07042",1,0.01,
"07042-4518",1,0.01,
"07045-9694",1,0.01,
"07047",2,0.01,
"070683712",1,0.01,
"07112",1,0.01,
"07512",1,0.01,
"07645",1,0.01,
"076491318",1,0.01,
"07661",1,0.01,
"07676",1,0.01,
"077015640",1,0.01,
"07726-3304",1,0.01,
"07762-2159",1,0.01,
"07834-2149",1,0.01,
"07850",1,0.01,
"07869",1,0.01,
"07869-1021",1,0.01,
"079014050",1,0.01,
"07920",1,0.01,
"079201506",1,0.01,
"079202306",1,0.01,
"07930",1,0.01,
"079302650",1,0.01,
"07945-2932",1,0.01,
"079603416",1,0.01,
"07974",1,0.01,
"079742521",1,0.01,
"080032669",1,0.01,
"080033432",1,0.01,
"08006",0.01,1,
"08043",1,0.01,
"080543191",1,0.01,
"08204",1,0.01,
"08205",2,0.01,
"08225-1435",1,0.01,
"08330-3409",1,0.01,
"08361",1,0.01,
"084",0.01,1,
"085025346",1,0.01,
"08540",1,0.01,
"085403071",1,0.01,
"085407335",1,0.01,
"085423150",1,0.01,
"085503000",1,0.01,
"08730",1,0.01,
"08730-1421",1,0.01,
"08755",1,0.01,
"08807",1,0.01,
"08816",1,0.01,
"08816-1464",1,0.01,
"08833",1,0.01,
"09107-0008",0.01,1,
"096300011",1,0.01,
"1000",0.01,1,
"10000",0.01,2,
"10001",0.01,1,
"100014",0.01,1,
"10002",0.01,1,
"10002-1964",1,0.01,
"10003",1,0.01,
"100032",0.01,1,
"10004",0.01,1,
"10007",0.01,1,
"10008",0.01,2,
"100080",0.01,1,
"100083",1,1,
"100088",0.01,1,
"10011",2,0.01,
"10012",1,0.01,
"100143295",1,0.01,
"100147229",1,0.01,
"10016",2,0.01,
"1002",1,0.01,
"10021",1,0.01,
"10022",1,0.01,
"10025",1,0.01,
"10026",1,0.01,
"10027",1,0.01,
"10028",1,0.01,
"10029-5152",1,0.01,
"10031",1,0.01,
"10035-1334",1,0.01,
"10128",1,0.01,
"10128-1229",1,0.01,
"102209",0.01,1,
"10240",0.01,1,
"102627",0.01,1,
"10452",1,0.01,
"10456",1,0.01,
"10460-1235",1,0.01,
"10512-4110",1,0.01,
"10522",1,0.01,
"10523-2713",1,0.01,
"10524",1,0.01,
"10538",1,0.01,
"10538-2844",1,0.01,
"105521333",1,0.01,
"10566",1,0.01,
"10583",1,0.01,
"10583-7330",1,0.01,
"10591",1,0.01,
"106-0047",0.01,1,
"10605",0.01,1,
"10707",2,0.01,
"108032117",1,0.01,
"10901",0.01,1,
"109501428",1,0.01,
"109861622",1,0.01,
"110013",1,0.01,
"11020",1,0.01,
"11030",1,0.01,
"11050",3,0.01,
"111",0.01,1,
"11101",1,0.01,
"11102",2,0.01,
"11118",0.01,2,
"11201",3,0.01,
"11207",1,0.01,
"11208-3912",1,0.01,
"11210",1,0.01,
"11213",1,0.01,
"11215",3,0.01,
"11221",2,0.01,
"112255004",1,0.01,
"11226",1,0.01,
"11229-4406",1,0.01,
"11249",1,0.01,
"11254",0.01,1,
"11354",1,0.01,
"11360",1,0.01,
"11372",2,0.01,
"11374",1,0.01,
"11413-2120",1,0.01,
"11416",0.01,1,
"11423",1,0.01,
"11429",1,0.01,
"11431",0.01,1,
"11432",1,0.01,
"11432-2878",1,0.01,
"11510-2233",1,0.01,
"11511",0.01,1,
"11519",0.01,1,
"11520",0.01,1,
"11530-1227",1,0.01,
"115305048",1,0.01,
"11545",1,0.01,
"11560",0.01,1,
"11560-1602",1,0.01,
"115601224",1,0.01,
"115612131",1,0.01,
"11563-1752",1,0.01,
"11566",1,0.01,
"11570-2809",1,0.01,
"115802629",1,0.01,
"116031",0.01,1,
"11705-1231",1,0.01,
"117051756",1,0.01,
"11714",1,0.01,
"11725",1,0.01,
"11733",1,0.01,
"117332017",1,0.01,
"11735",1,0.01,
"11740",1,0.01,
"11741",1,0.01,
"117433414",1,0.01,
"11746",1,0.01,
"117463063",1,0.01,
"11749-5072",1,0.01,
"11758",1,0.01,
"11769",1,0.01,
"11786",1,0.01,
"11786-1822",1,0.01,
"11787",1,0.01,
"11795-3619",1,0.01,
"11801-6416",1,0.01,
"11803",2,0.01,
"11822",0.01,1,
"11940",1,0.01,
"11953",0.01,1,
"12020",1,0.01,
"120372",0.01,1,
"12047",1,0.01,
"12205",1,0.01,
"1230",1,0.01,
"12303",1,0.01,
"12309",1,0.01,
"12311",0.01,1,
"1239",1,0.01,
"12471",1,0.01,
"12514",1,0.01,
"12533",1,0.01,
"12560",0.01,1,
"12603",1,0.01,
"12604",1,0.01,
"127711340",1,0.01,
"12804-1230",1,0.01,
"12866",1,0.01,
"12871",1,0.01,
"13027",2,0.01,
"13031",1,0.01,
"13069",1,0.01,
"13078",1,0.01,
"13104-9657",1,0.01,
"13135",1,0.01,
"13152",1,0.01,
"131529000",1,0.01,
"131529633",1,0.01,
"13206",1,0.01,
"13210",1,0.01,
"13346",1,0.01,
"13421",1,0.01,
"13438",1,0.01,
"135-554",0.01,1,
"136761805",1,0.01,
"1370",1,0.01,
"13713",0.01,1,
"13730",1,0.01,
"13879",0.01,1,
"13904",1,0.01,
"14051",2,0.01,
"14051-1735",1,0.01,
"14063-2344",1,0.01,
"140721128",1,0.01,
"140721981",2,0.01,
"14092",1,0.01,
"14150",1,0.01,
"14170-9715",1,0.01,
"14214",3,0.01,
"14214-1609",1,0.01,
"14217",1,0.01,
"14217-2105",1,0.01,
"142172111",1,0.01,
"14219-1011",1,0.01,
"14220",1,0.01,
"14220-2749",1,0.01,
"14221",2,0.01,
"142212101",1,0.01,
"142221229",1,0.01,
"14223",1,0.01,
"14226",3,0.01,
"142263328",1,0.01,
"142264045",1,0.01,
"142283745",1,0.01,
"14450",1,0.01,
"14506",1,0.01,
"14526",1,0.01,
"14527-9418",1,0.01,
"14534",1,0.01,
"14559",1,0.01,
"145648920",1,0.01,
"145648985",1,0.01,
"14580",1,0.01,
"14605",2,0.01,
"14607",1,0.01,
"14618",1,0.01,
"14619",1,0.01,
"14620",1,0.01,
"14733-1315",1,0.01,
"14850",1,0.01,
"14901",1,0.01,
"14904",2,0.01,
"150092719",1,0.01,
"150103339",1,0.01,
"15024",2,0.01,
"150242501",1,0.01,
"15044",1,0.01,
"15057",1,0.01,
"15068",1,0.01,
"1507",1,0.01,
"15071-1119",1,0.01,
"15085-1300",1,0.01,
"15090",1,0.01,
"15102-3693",1,0.01,
"151391802",1,0.01,
"15143",1,0.01,
"15143-9328",1,0.01,
"15146",1,0.01,
"15156",1,0.01,
"15206",1,0.01,
"152061435",1,0.01,
"15208",1,0.01,
"15213",1,0.01,
"15218",1,0.01,
"152181412",1,0.01,
"15221",1,0.01,
"152262343",1,0.01,
"15227",1,0.01,
"15228",2,0.01,
"152282225",1,0.01,
"152341008",1,0.01,
"15235",1,0.01,
"15237",2,0.01,
"15241",1,0.01,
"15243",1,0.01,
"152431138",1,0.01,
"152431738",1,0.01,
"1532",1,0.01,
"15601",1,0.01,
"15683",1,0.01,
"15701",1,0.01,
"1581",1,0.01,
"16008",2,0.01,
"1608",1,0.01,
"16102",1,0.01,
"16117",1,0.01,
"16125",1,0.01,
"16146-3714",1,0.01,
"16188",0.01,2,
"16303",1,0.01,
"16316",1,0.01,
"16415",1,0.01,
"16502",1,0.01,
"16505",1,0.01,
"16506",2,0.01,
"165111423",1,0.01,
"16648",1,0.01,
"16801",2,0.01,
"16823",1,0.01,
"170032012",1,0.01,
"17013",1,0.01,
"17044",1,0.01,
"17050",1,0.01,
"172368715",1,0.01,
"17325-1400",1,0.01,
"173314116",1,0.01,
"175409740",1,0.01,
"1760",1,0.01,
"17815",2,0.01,
"17824",1,0.01,
"17837",1,0.01,
"18020-7848",1,0.01,
"18031",1,0.01,
"18036",1,0.01,
"18302-6661",1,0.01,
"1845",2,0.01,
"1854",0.01,1,
"18612",1,0.01,
"18612-8902",1,0.01,
"1864",2,0.01,
"187043506",1,0.01,
"1890",2,0.01,
"18901-2965",1,0.01,
"18929",1,0.01,
"18932",1,0.01,
"18974",1,0.01,
"189741846",1,0.01,
"19010",1,0.01,
"19041",1,0.01,
"190632114",1,0.01,
"19081-1512",1,0.01,
"19083",1,0.01,
"19087",2,0.01,
"19087-5543",1,0.01,
"19121",1,0.01,
"19135-3508",1,0.01,
"19146",1,0.01,
"19146-1610",1,0.01,
"19152-2214",1,0.01,
"19312-2801",1,0.01,
"19333",1,0.01,
"19335",1,0.01,
"193432647",1,0.01,
"19348",1,0.01,
"19355",2,0.01,
"193551262",1,0.01,
"19373",1,0.01,
"19382",1,0.01,
"19403-1221",1,0.01,
"19426",1,0.01,
"194261446",1,0.01,
"194403049",1,0.01,
"1945",1,0.01,
"19454",2,0.01,
"194543619",1,0.01,
"19606",1,0.01,
"196061401",1,0.01,
"19608-8502",1,0.01,
"19702",1,0.01,
"19703",1,0.01,
"19709",1,0.01,
"19709-2228",1,0.01,
"19810",1,0.01,
"1985",1,0.01,
"20001",1,0.01,
"200013509",1,0.01,
"20002",0.01,1,
"20003-2107",1,0.01,
"200052",0.01,1,
"20007",1,0.01,
"200072",0.01,1,
"200073907",1,0.01,
"20008",2,0.01,
"200083403",1,0.01,
"200086",0.01,1,
"20009",2,0.01,
"20009-1532",1,0.01,
"20010",1,0.01,
"200120",0.01,1,
"20015",2,0.01,
"20016",1,0.01,
"20017",1,0.01,
"20019",1,0.01,
"20019-1726",1,0.01,
"200433",0.01,1,
"200444",0.01,1,
"20080",0.01,1,
"20105",1,0.01,
"201204",0.01,1,
"201206",0.01,2,
"201242350",1,0.01,
"201301",0.01,1,
"201306",0.01,1,
"20136",1,0.01,
"20136-1901",1,0.01,
"201474482",1,0.01,
"20161",0.01,1,
"20165",1,0.01,
"201754335",1,0.01,
"202150",0.01,1,
"206015606",1,0.01,
"20603",1,0.01,
"2067",1,0.01,
"206772056",1,0.01,
"20705",1,0.01,
"20716",1,0.01,
"207213217",1,0.01,
"20740",1,0.01,
"207402758",1,0.01,
"20745",1,0.01,
"20746",1,0.01,
"20769",1,0.01,
"20782",1,0.01,
"207851348",1,0.01,
"2081",1,0.01,
"20815-4245",1,0.01,
"208154072",1,0.01,
"208155739",1,0.01,
"20816",1,0.01,
"208162314",1,0.01,
"20817",1,0.01,
"208173250",1,0.01,
"208176545",1,0.01,
"20832",3,0.01,
"208414353",1,0.01,
"208505470",1,0.01,
"208507502",1,0.01,
"20852",3,0.01,
"20854",2,0.01,
"208661938",1,0.01,
"208766341",1,0.01,
"20878",2,0.01,
"20895",1,0.01,
"2090",1,0.01,
"20901",2,0.01,
"20901-2505",1,0.01,
"20903",1,0.01,
"20904",1,0.01,
"20910",3,0.01,
"209104250",1,0.01,
"209125850",1,0.01,
"209126878",1,0.01,
"21012-1628",1,0.01,
"21017",1,0.01,
"21030",1,0.01,
"21042",2,0.01,
"210422044",1,0.01,
"21044-3749",1,0.01,
"21045",1,0.01,
"21090",1,0.01,
"21093",1,0.01,
"21113",1,0.01,
"21113-1521",1,0.01,
"211141835",1,0.01,
"21117",1,0.01,
"21128",1,0.01,
"21133",1,0.01,
"21146",1,0.01,
"21146-1384",1,0.01,
"2115",0.01,1,
"21157",1,0.01,
"211573806",1,0.01,
"21163",1,0.01,
"21204",1,0.01,
"21204-3510",1,0.01,
"212086370",1,0.01,
"21209",1,0.01,
"21209-3860",1,0.01,
"21215",1,0.01,
"21220-3768",1,0.01,
"21224",1,0.01,
"21228",2,0.01,
"21234",1,0.01,
"21237",1,0.01,
"2134",1,0.01,
"2139",1,0.01,
"21400",0.01,1,
"2144",1,0.01,
"2149",1,0.01,
"21702",1,0.01,
"21703",1,0.01,
"21742",1,0.01,
"21774-6808",1,0.01,
"21788",1,0.01,
"21793-9164",1,0.01,
"218634457",1,0.01,
"21921",1,0.01,
"22003",1,0.01,
"22015",1,0.01,
"22015-4414",1,0.01,
"22030",1,0.01,
"22033",2,0.01,
"22046",1,0.01,
"22066",2,0.01,
"22066-1517",1,0.01,
"221000",0.01,1,
"22101",1,0.01,
"22102",2,0.01,
"22152",1,0.01,
"22181",1,0.01,
"22182-5315",1,0.01,
"22192",1,0.01,
"22201",1,0.01,
"22203",1,0.01,
"222031113",1,0.01,
"222041449",1,0.01,
"22206",1,0.01,
"22209",1,0.01,
"22300",0.01,1,
"22301",1,0.01,
"223051213",1,0.01,
"223062408",1,0.01,
"22307",1,0.01,
"22314",1,0.01,
"224013739",1,0.01,
"22500",0.01,1,
"22554",2,0.01,
"226001",0.01,1,
"22602-6834",1,0.01,
"22611",3,0.01,
"22660-9779",1,0.01,
"22801",1,0.01,
"22802",1,0.01,
"23005",1,0.01,
"231",0.01,1,
"23112",1,0.01,
"23149",0.01,1,
"23188",1,0.01,
"23188-1023",1,0.01,
"23224",1,0.01,
"232251417",1,0.01,
"232331128",1,0.01,
"232363358",1,0.01,
"234",0.01,1,
"23452",1,0.01,
"23454",1,0.01,
"23457",1,0.01,
"23464",1,0.01,
"23508",1,0.01,
"23662",1,0.01,
"236621441",1,0.01,
"23666",1,0.01,
"236800",0.01,1,
"23693",1,0.01,
"2375",1,0.01,
"239229",0.01,1,
"240604925",1,0.01,
"241",0.01,1,
"24153",1,0.01,
"2421",1,0.01,
"2446",2,0.01,
"24502",2,0.01,
"2459",3,0.01,
"2465",1,0.01,
"2467",1,0.01,
"2482",2,0.01,
"2492",1,0.01,
"2536",1,0.01,
"25500",0.01,1,
"25801",1,0.01,
"26443",0.01,1,
"2648",1,0.01,
"26531",1,0.01,
"27025",1,0.01,
"27055",1,0.01,
"27103",3,0.01,
"27104",1,0.01,
"27106",1,0.01,
"272627460",1,0.01,
"27278",1,0.01,
"27358",1,0.01,
"27408-4415",1,0.01,
"27409",1,0.01,
"274103211",1,0.01,
"27516",1,0.01,
"27516-1925",1,0.01,
"275160450",1,0.01,
"275162357",1,0.01,
"27519",1,0.01,
"27560",1,0.01,
"27587",1,0.01,
"275879597",1,0.01,
"27592",1,0.01,
"27607",1,0.01,
"27610",1,0.01,
"27612",1,0.01,
"276137009",1,0.01,
"27617",1,0.01,
"27703",1,0.01,
"27713",3,0.01,
"27823",1,0.01,
"279549483",1,0.01,
"28023",1,0.01,
"28031",1,0.01,
"28036",2,0.01,
"280781252",1,0.01,
"28081",1,0.01,
"281056844",1,0.01,
"28172",1,0.01,
"28173",1,0.01,
"28213",1,0.01,
"28269",1,0.01,
"28314",1,0.01,
"2835",1,0.01,
"28412",1,0.01,
"28460",1,0.01,
"28539-4554",1,0.01,
"28540",1,0.01,
"28562",1,0.01,
"2860",2,0.01,
"28704",2,0.01,
"28729",1,0.01,
"2874",1,0.01,
"2879",1,0.01,
"288032434",1,0.01,
"29016",1,0.01,
"29020",1,0.01,
"29036",3,0.01,
"29045",2,0.01,
"29063",1,0.01,
"2908",1,0.01,
"2916",1,0.01,
"29163",1,0.01,
"29201",1,0.01,
"29225-4002",1,0.01,
"29302",2,0.01,
"29341",1,0.01,
"29379",1,0.01,
"29403",1,0.01,
"29625",1,0.01,
"29631",1,0.01,
"296504055",1,0.01,
"29651",1,0.01,
"296815154",1,0.01,
"29687",1,0.01,
"29707",1,0.01,
"29715",1,0.01,
"29920",1,0.01,
"29928",1,0.01,
"30004",2,0.01,
"30005",1,0.01,
"300071",0.01,2,
"30014",1,0.01,
"300193102",1,0.01,
"30021",1,0.01,
"30041",1,0.01,
"300419309",1,0.01,
"30062",1,0.01,
"30062-5793",1,0.01,
"30066",1,0.01,
"300664772",1,0.01,
"30071",1,0.01,
"30076",1,0.01,
"30080-6471",1,0.01,
"30087",1,0.01,
"300922252",1,0.01,
"30097",2,0.01,
"30126",1,0.01,
"30127",1,0.01,
"30269",2,0.01,
"30277",1,0.01,
"30297",1,0.01,
"30305",1,0.01,
"30308",1,0.01,
"303195047",1,0.01,
"30324-7203",1,0.01,
"303245222",1,0.01,
"30328",1,0.01,
"30338",1,0.01,
"30345",2,0.01,
"30350-3513",1,0.01,
"3038",2,0.01,
"3051",1,0.01,
"305423551",1,0.01,
"3060",1,0.01,
"30601",1,0.01,
"30605",1,0.01,
"30606",2,0.01,
"306066239",1,0.01,
"3064",1,0.01,
"306772488",1,0.01,
"307203088",1,0.01,
"30721",1,0.01,
"30809",1,0.01,
"30813",1,0.01,
"30892",0.01,1,
"309090119",1,0.01,
"310018",0.01,1,
"31005",1,0.01,
"31036",1,0.01,
"31088",2,0.01,
"31220",1,0.01,
"313001",0.01,1,
"31324",1,0.01,
"31401",1,0.01,
"31406",1,0.01,
"31410",1,2,
"314111337",1,0.01,
"31419",1,0.01,
"315300",0.01,1,
"31901",1,0.01,
"32024",1,0.01,
"320810547",1,0.01,
"32082",1,0.01,
"320920734",1,0.01,
"32128",1,0.01,
"32168-5347",1,0.01,
"32202",1,0.01,
"32211",1,0.01,
"32212-5112",1,0.01,
"32224",1,0.01,
"32256-9509",1,0.01,
"32259",1,0.01,
"3229",1,0.01,
"32301",1,0.01,
"32304",2,0.01,
"32540",1,0.01,
"32542",1,0.01,
"32605",1,0.01,
"32607",1,0.01,
"32608-2718",1,0.01,
"32701",1,0.01,
"32712",1,0.01,
"32746",1,0.01,
"32765",1,0.01,
"328031928",1,0.01,
"32804",1,0.01,
"32819-7133",1,0.01,
"32821",1,0.01,
"32821-6739",1,0.01,
"32828",1,0.01,
"32835",1,0.01,
"32837",1,0.01,
"32837-7097",1,0.01,
"32903",1,0.01,
"32940-2214",1,0.01,
"32965",1,0.01,
"330006",0.01,1,
"33009",2,0.01,
"330153966",1,0.01,
"33025",1,0.01,
"33028",1,0.01,
"33040",1,0.01,
"33054-2015",1,0.01,
"33054-6313",1,0.01,
"33062",1,0.01,
"33065",1,0.01,
"33071",1,0.01,
"330762438",1,0.01,
"33102",1,0.01,
"33114",1,0.01,
"33126",1,0.01,
"33129",1,0.01,
"33133",1,0.01,
"33134",1,0.01,
"331438613",1,0.01,
"331444918",1,0.01,
"33146-3145",1,0.01,
"33149",1,0.01,
"33154-2357",1,0.01,
"33155",2,0.01,
"33156-7954",1,0.01,
"331563944",1,0.01,
"33157",1,0.01,
"33160",1,0.01,
"33161",1,0.01,
"33165",1,0.01,
"331663250",1,0.01,
"33174",1,0.01,
"33175",1,0.01,
"33177-6159",1,0.01,
"33178",1,0.01,
"33181",1,0.01,
"33185",1,0.01,
"33308-3003",1,0.01,
"33309-6702",1,0.01,
"33313-5143",1,0.01,
"33319",2,0.01,
"333224807",1,0.01,
"33324",1,0.01,
"333313804",1,0.01,
"33332",1,0.01,
"334148124",1,0.01,
"334184570",1,0.01,
"33428",1,0.01,
"33435",1,0.01,
"33444",1,0.01,
"33445",1,0.01,
"334784764",1,0.01,
"33498-6602",1,0.01,
"33558",1,0.01,
"33559",1,0.01,
"33602",1,0.01,
"33606",1,0.01,
"33606-3747",1,0.01,
"33618",2,0.01,
"33624-4504",1,0.01,
"33629",2,0.01,
"33785",1,0.01,
"33812",1,0.01,
"33813",3,0.01,
"33837",1,0.01,
"33870",1,0.01,
"33884",1,0.01,
"33905-5539",1,0.01,
"33967",1,0.01,
"34002",1,0.01,
"34103",2,0.01,
"34105",2,0.01,
"34110",1,0.01,
"34112-5060",1,0.01,
"341123300",1,0.01,
"34114",1,0.01,
"341347421",1,0.01,
"34202",1,0.01,
"342037613",1,0.01,
"34210",1,0.01,
"34232",1,0.01,
"34243",1,0.01,
"34280",1,0.01,
"3431",1,0.01,
"34465-3703",1,0.01,
"34698",1,0.01,
"34711",1,0.01,
"347475001",1,0.01,
"34769",1,0.01,
"347717510",1,0.01,
"34786",1,0.01,
"34996",1,0.01,
"35007",1,0.01,
"35124",1,0.01,
"35127",1,0.01,
"35222",1,0.01,
"35223",1,0.01,
"352426433",1,0.01,
"35405",1,0.01,
"35470",1,0.01,
"35750",1,0.01,
"357586285",1,0.01,
"35802-3750",1,0.01,
"35811",1,0.01,
"36093",1,0.01,
"362651100",1,0.01,
"365323130",1,0.01,
"365326310",1,0.01,
"36605",1,0.01,
"36608",1,0.01,
"366082956",1,0.01,
"36830",1,0.01,
"37027",6,0.01,
"37027-8616",1,0.01,
"37027-8747",1,0.01,
"370275632",2,0.01,
"370278971",1,0.01,
"37040",1,0.01,
"37042",1,0.01,
"37057",0.01,1,
"37064",1,0.01,
"370649484",1,0.01,
"37069",2,0.01,
"37069-6551",1,0.01,
"370691823",1,0.01,
"37072",1,0.01,
"370764310",1,0.01,
"37082",1,0.01,
"370865264",1,0.01,
"370872503",1,0.01,
"37128",1,0.01,
"37135",1,0.01,
"37203",1,0.01,
"37204",1,0.01,
"37205",1,0.01,
"372052819",1,0.01,
"37206",1,0.01,
"37212",1,0.01,
"372152406",1,0.01,
"37220",1,0.01,
"37221-4372",1,0.01,
"374032318",1,0.01,
"37664",1,0.01,
"3768",1,0.01,
"37820",1,0.01,
"37909",1,0.01,
"37916",1,0.01,
"37919",1,0.01,
"37919-4246",1,0.01,
"37922",2,0.01,
"37923",1,0.01,
"380015",0.01,1,
"38002",1,0.01,
"38002-7014",1,0.01,
"38017",4,0.01,
"38017-1637",1,0.01,
"38018",1,0.01,
"38053",1,0.01,
"38103",1,0.01,
"38104",1,0.01,
"381043919",1,0.01,
"381112561",1,0.01,
"38112",3,0.01,
"38116",1,0.01,
"38117",2,0.01,
"38120-1332",1,0.01,
"381203304",1,0.01,
"38122",1,0.01,
"38134",1,0.01,
"381382352",1,0.01,
"38139",1,0.01,
"381396971",1,0.01,
"38255",1,0.01,
"38301",1,0.01,
"38305",1,0.01,
"38348",1,0.01,
"3842",1,0.01,
"38426",0.01,1,
"38580",1,0.01,
"38654-6234",1,0.01,
"39074",1,0.01,
"3908",1,0.01,
"39096",1,0.01,
"39110",1,0.01,
"39202",1,0.01,
"39206",1,0.01,
"392164108",1,0.01,
"39232",1,0.01,
"39401",2,0.01,
"39402",1,0.01,
"39503",1,0.01,
"39648",1,0.01,
"40059",1,0.01,
"400599503",1,0.01,
"400599581",1,0.01,
"40060",0.01,1,
"401122",0.01,1,
"40121",1,0.01,
"40204-1316",1,0.01,
"40206",2,0.01,
"40207",2,0.01,
"402071176",1,0.01,
"40217",1,0.01,
"40219",2,0.01,
"40220",1,0.01,
"40222",1,0.01,
"40223",1,0.01,
"40223-1366",1,0.01,
"40223-2371",1,0.01,
"402231615",1,0.01,
"402232371",1,0.01,
"40241",2,0.01,
"402413127",1,0.01,
"40243",1,0.01,
"40245",3,0.01,
"40245-4577",1,0.01,
"402451843",1,0.01,
"40258-2585",1,0.01,
"40299",1,0.01,
"403241073",1,0.01,
"403831673",1,0.01,
"403838815",1,0.01,
"40422",1,0.01,
"4046",1,0.01,
"40475",2,0.01,
"40502",3,0.01,
"405022313",1,0.01,
"405022444",1,0.01,
"40508",3,0.01,
"40509",4,0.01,
"405112012",1,0.01,
"405119034",1,0.01,
"40513-1400",1,0.01,
"40513-1826",1,0.01,
"40515",4,0.01,
"40515-1129",1,0.01,
"40517",1,0.01,
"40601",1,0.01,
"40965",3,0.01,
"41000",0.01,1,
"41017-4490",1,0.01,
"4105",1,0.01,
"41071-1798",1,0.01,
"41091",2,0.01,
"42223",1,0.01,
"42240-1227",1,0.01,
"42303",2,0.01,
"42701",1,0.01,
"42701-9094",1,0.01,
"430000",0.01,1,
"430064",0.01,1,
"43007",0.01,1,
"430073",0.01,1,
"430074",0.01,1,
"43015-1744",1,0.01,
"43016",3,0.01,
"43016-8659",1,0.01,
"430162221",1,0.01,
"43017",6,0.01,
"43017-8673",1,0.01,
"430178330",1,0.01,
"430178773",1,0.01,
"430219609",1,0.01,
"43023",2,0.01,
"43026",2,0.01,
"43040",1,0.01,
"43054",2,0.01,
"43054-9633",1,0.01,
"430548326",1,0.01,
"430549405",1,0.01,
"43065",2,0.01,
"43065-9594",1,0.01,
"430657051",1,0.01,
"43081",1,0.01,
"430813771",1,0.01,
"430818902",1,0.01,
"43082",1,0.01,
"43082-8919",1,0.01,
"430827757",1,0.01,
"43085",1,0.01,
"43085-2897",1,0.01,
"431131124",1,0.01,
"43119",1,0.01,
"43123",1,0.01,
"43130",1,0.01,
"43135",1,0.01,
"43201",7,0.01,
"43202",1,0.01,
"432042762",1,0.01,
"43205",2,0.01,
"432078619",1,0.01,
"43209",6,0.01,
"432091730",1,0.01,
"432091934",1,0.01,
"43212",2,0.01,
"432123237",1,0.01,
"432123567",1,0.01,
"43213",1,0.01,
"43214",2,0.01,
"43215",4,0.01,
"43219-2741",1,0.01,
"432192972",1,0.01,
"43220",2,0.01,
"432202970",1,0.01,
"432204068",1,0.01,
"43221",1,0.01,
"43221-3049",1,0.01,
"432211227",1,0.01,
"432212337",1,0.01,
"432213205",1,0.01,
"432213765",1,0.01,
"43227",1,0.01,
"43229-1345",1,0.01,
"43230",2,0.01,
"43231",1,0.01,
"432319210",1,0.01,
"432326396",1,0.01,
"43235",1,0.01,
"432357505",1,0.01,
"43326",1,0.01,
"43402",1,0.01,
"43403",1,0.01,
"434109710",1,0.01,
"43412",1,0.01,
"43412-9453",1,0.01,
"43511",1,0.01,
"43522",1,0.01,
"43551",1,0.01,
"43551-2274",1,0.01,
"435515809",1,0.01,
"435519474",1,0.01,
"43560",2,0.01,
"43560-1332",1,0.01,
"435601302",1,0.01,
"435609388",1,0.01,
"43566",1,0.01,
"435669418",1,0.01,
"435719545",1,0.01,
"43614-5508",1,0.01,
"43615",1,0.01,
"436151025",1,0.01,
"436171282",1,0.01,
"43623",1,0.01,
"43623-2646",1,0.01,
"436232930",1,0.01,
"43701",1,0.01,
"440114004",1,0.01,
"44012",3,0.01,
"44012-1317",1,0.01,
"440121977",1,0.01,
"440122534",2,0.01,
"440122929",1,0.01,
"44017",4,0.01,
"440171657",1,0.01,
"44022",5,0.01,
"44022-6675",1,0.01,
"440221314",1,0.01,
"440221334",1,0.01,
"440223300",1,0.01,
"440224245",1,0.01,
"440234568",1,0.01,
"44035",1,0.01,
"440357349",1,0.01,
"440394484",1,0.01,
"44040-9317",1,0.01,
"440409355",1,0.01,
"440409771",1,0.01,
"44056",1,0.01,
"440562411",1,0.01,
"44060",1,0.01,
"440673408",1,0.01,
"440701477",1,0.01,
"44072",2,0.01,
"44074",1,0.01,
"44076",1,0.01,
"440772265",1,0.01,
"440872924",1,0.01,
"44092",1,0.01,
"440945724",1,0.01,
"440949750",1,0.01,
"44095",1,0.01,
"440952504",1,0.01,
"44103",1,0.01,
"44103-2026",1,0.01,
"44106",1,0.01,
"441063220",1,0.01,
"441063241",1,0.01,
"44107",2,0.01,
"44107-1109",1,0.01,
"441082363",1,0.01,
"441092573",1,0.01,
"441093164",1,0.01,
"44111",1,0.01,
"441112846",1,0.01,
"441112847",1,0.01,
"441113948",1,0.01,
"441115817",1,0.01,
"44112-1207",1,0.01,
"44116",2,0.01,
"44116-2709",1,0.01,
"441161202",1,0.01,
"441161443",1,0.01,
"441161659",1,0.01,
"441161868",1,0.01,
"441162354",1,0.01,
"441162847",1,0.01,
"44118",6,0.01,
"44118-2807",1,0.01,
"44118-3506",1,0.01,
"441181224",1,0.01,
"441181342",1,0.01,
"441184509",1,0.01,
"441184661",1,0.01,
"44119-1939",1,0.01,
"44120",3,0.01,
"441201711",1,0.01,
"441203381",1,0.01,
"441203432",1,0.01,
"44121",1,0.01,
"44121-2948",1,0.01,
"44122",4,0.01,
"44122-2604",1,0.01,
"441221740",1,0.01,
"441222104",1,0.01,
"441222935",1,0.01,
"441224812",1,0.01,
"441225037",1,0.01,
"44123-4250",1,0.01,
"441232134",1,0.01,
"44124",1,0.01,
"44124-1427",1,0.01,
"441241305",1,0.01,
"441244818",1,0.01,
"44125",1,0.01,
"441263060",1,0.01,
"44130",2,0.01,
"44131",1,0.01,
"441341903",1,0.01,
"44135-2139",1,0.01,
"441351953",1,0.01,
"44136",3,0.01,
"441367870",1,0.01,
"44137",1,0.01,
"44138",2,0.01,
"441382116",1,0.01,
"44139",1,0.01,
"44139-3430",1,0.01,
"441394673",1,0.01,
"441395925",1,0.01,
"44140",4,0.01,
"441401329",1,0.01,
"441401574",1,0.01,
"441402505",1,0.01,
"441402517",1,0.01,
"44141",5,0.01,
"441411846",1,0.01,
"44142",2,0.01,
"44143",4,0.01,
"441431961",1,0.01,
"44145",3,0.01,
"44145-3064",1,0.01,
"44145-3706",1,0.01,
"441453770",1,0.01,
"441454957",1,0.01,
"441456507",1,0.01,
"441458121",1,0.01,
"44146",1,0.01,
"441463874",1,0.01,
"44147",1,0.01,
"44147-3613",1,0.01,
"44149",1,0.01,
"441496847",1,0.01,
"44202",2,0.01,
"44212",2,0.01,
"442125803",1,0.01,
"44221",1,0.01,
"44223-2989",1,0.01,
"44224-5120",1,0.01,
"442243755",1,0.01,
"44236",1,0.01,
"44236-3111",1,0.01,
"44236-3554",1,0.01,
"44241",1,0.01,
"44256",3,0.01,
"44256-2744",1,0.01,
"44256-4120",1,0.01,
"442567221",1,0.01,
"442569012",1,0.01,
"44273",1,0.01,
"44278",1,0.01,
"44313",2,0.01,
"443213033",1,0.01,
"44333",4,0.01,
"44333-2900",1,0.01,
"443332248",1,0.01,
"443339237",1,0.01,
"44406",1,0.01,
"44410",1,0.01,
"44484",1,0.01,
"44502",1,0.01,
"44511-1355",1,0.01,
"44513",1,0.01,
"44709",1,0.01,
"44714",1,0.01,
"447183223",1,0.01,
"44721",1,0.01,
"4473",1,0.01,
"44805",1,0.01,
"44814-9654",1,0.01,
"44820",1,0.01,
"44839",2,0.01,
"44870",2,0.01,
"45011",2,0.01,
"450119212",1,0.01,
"450144476",1,0.01,
"45030",1,0.01,
"45030-2009",1,0.01,
"45040",2,0.01,
"45040-1175",1,0.01,
"450409457",1,0.01,
"45050",1,0.01,
"450529615",1,0.01,
"45056",1,0.01,
"450668128",1,0.01,
"45069",2,0.01,
"450691167",1,0.01,
"451034047",1,0.01,
"45129",0.01,1,
"45140",3,0.01,
"451408336",1,0.01,
"45144",1,0.01,
"45150",1,0.01,
"451501880",1,0.01,
"45157-9173",1,0.01,
"45202",1,0.01,
"45206",1,0.01,
"45208",5,0.01,
"452081308",2,0.01,
"452081316",1,0.01,
"452081534",1,0.01,
"452081910",1,0.01,
"452082511",1,0.01,
"452082532",1,0.01,
"452082707",1,0.01,
"452083102",1,0.01,
"452083407",1,0.01,
"452084210",2,0.01,
"45209",1,0.01,
"45211",1,0.01,
"45212",1,0.01,
"45212-1924",1,0.01,
"452123219",1,0.01,
"45213",1,0.01,
"45215",3,0.01,
"45220",1,0.01,
"45223",1,0.01,
"45226",2,0.01,
"452261301",1,0.01,
"45227",2,0.01,
"45227-4248",1,0.01,
"452273021",1,0.01,
"45230",4,0.01,
"452303775",1,0.01,
"452305322",1,0.01,
"45231",2,0.01,
"452314440",1,0.01,
"45233",1,0.01,
"45233-4870",1,0.01,
"452331907",1,0.01,
"45236",4,0.01,
"452384330",1,0.01,
"452385807",1,0.01,
"452402812",1,0.01,
"45241",6,0.01,
"452414811",1,0.01,
"45242",7,0.01,
"45242-4531",1,0.01,
"452423201",1,0.01,
"452424458",1,0.01,
"452425907",1,0.01,
"45243",2,0.01,
"452432962",1,0.01,
"452434229",1,0.01,
"45244",1,0.01,
"45245",1,0.01,
"45248",2,0.01,
"45249",3,0.01,
"452492102",1,0.01,
"45251",1,0.01,
"45255",3,0.01,
"453051364",1,0.01,
"45309",1,0.01,
"45343",1,0.01,
"45371",2,0.01,
"45373",1,0.01,
"45402-4306",1,0.01,
"45408",1,0.01,
"45409",4,0.01,
"45415",1,0.01,
"45419",4,0.01,
"45419-3141",1,0.01,
"45420",1,0.01,
"45430",2,0.01,
"45431",1,0.01,
"45432",1,0.01,
"45458",2,0.01,
"454583263",1,0.01,
"454589267",1,0.01,
"45459",1,0.01,
"454591608",1,0.01,
"454591646",1,0.01,
"45631",1,0.01,
"45701",2,0.01,
"45810",1,0.01,
"45810-9762",1,0.01,
"45883",2,0.01,
"46011",1,0.01,
"46032",3,0.01,
"460327709",1,0.01,
"460329146",1,0.01,
"46033",4,0.01,
"460338690",1,0.01,
"460338959",1,0.01,
"46037",2,0.01,
"46037-4344",1,0.01,
"460379300",1,0.01,
"46038",3,0.01,
"46038-5238",1,0.01,
"460386853",1,0.01,
"46055",1,0.01,
"46074",1,0.01,
"46074-0090",1,0.01,
"46074-2228",1,0.01,
"460741406",1,0.01,
"46077",4,0.01,
"46112",1,0.01,
"46113",1,0.01,
"46123",1,0.01,
"46131",1,0.01,
"461311782",1,0.01,
"46135",1,0.01,
"461358781",1,0.01,
"46140",1,0.01,
"46142",1,0.01,
"46143-6028",1,0.01,
"46151",1,0.01,
"46163",2,0.01,
"46184",1,0.01,
"46202",1,0.01,
"46203",2,0.01,
"462041530",1,0.01,
"46205",4,0.01,
"462053423",1,0.01,
"46208",4,0.01,
"46208-2513",1,0.01,
"46214",1,0.01,
"462174343",1,0.01,
"46219",1,0.01,
"46220",3,0.01,
"46220-5803",1,0.01,
"462202880",1,0.01,
"462204245",1,0.01,
"46221",1,0.01,
"46226",1,0.01,
"46226-6385",1,0.01,
"46228",1,0.01,
"462286603",1,0.01,
"462286720",1,0.01,
"46229-1884",1,0.01,
"462314510",1,0.01,
"462353333",1,0.01,
"46236",2,0.01,
"462368980",1,0.01,
"46239",2,0.01,
"46240",3,0.01,
"46250",1,0.01,
"46250-3093",1,0.01,
"462503530",1,0.01,
"46254",1,0.01,
"462568501",1,0.01,
"46259",3,0.01,
"462604352",1,0.01,
"46268",1,0.01,
"46303",2,0.01,
"46303-8556",1,0.01,
"46304",4,0.01,
"463048892",1,0.01,
"46307",12,0.01,
"463071585",1,0.01,
"463079234",1,0.01,
"463079634",1,0.01,
"46311",3,0.01,
"463111108",1,0.01,
"463111283",1,0.01,
"463113072",1,0.01,
"46321",14,0.01,
"46321-2701",2,0.01,
"463211321",1,0.01,
"463212833",1,0.01,
"463213336",1,0.01,
"46322",8,0.01,
"46323",2,0.01,
"46323-3062",1,0.01,
"46324",2,0.01,
"463419162",1,0.01,
"46342",3,0.01,
"463423840",1,0.01,
"46350",3,0.01,
"463507941",1,0.01,
"46356",2,0.01,
"46360",4,0.01,
"46360-2081",1,0.01,
"463604524",1,0.01,
"463606129",1,0.01,
"46368-8713",1,0.01,
"463687735",1,0.01,
"46373",2,0.01,
"46375",4,0.01,
"46375-2386",1,0.01,
"46383",10,0.01,
"463833316",1,0.01,
"463834412",1,0.01,
"463834424",1,0.01,
"46385",5,0.01,
"46394",1,0.01,
"46404",1,0.01,
"46410",4,0.01,
"46506",1,0.01,
"46507",1,0.01,
"46514-4312",1,0.01,
"46517",2,0.01,
"46528",1,0.01,
"46530",5,0.01,
"46530-6844",1,0.01,
"46530-7013",1,0.01,
"465306512",1,0.01,
"465307268",1,0.01,
"465307862",1,0.01,
"465308399",1,0.01,
"465319543",1,0.01,
"46534",1,0.01,
"46545",1,0.01,
"46552",1,0.01,
"46561",1,0.01,
"465632720",1,0.01,
"465639023",1,0.01,
"46582-8227",1,0.01,
"46601-1030",1,0.01,
"46613",1,0.01,
"46614",2,0.01,
"466145045",1,0.01,
"46615-1003",1,0.01,
"466151036",1,0.01,
"46616",1,0.01,
"46616-1356",1,0.01,
"46617",1,0.01,
"46617-3320",1,0.01,
"46619",1,0.01,
"46619-9596",1,0.01,
"466284072",1,0.01,
"46637",1,0.01,
"466373826",1,0.01,
"46703",1,0.01,
"467259256",1,0.01,
"467742214",1,0.01,
"46783",1,0.01,
"46783-1003",1,0.01,
"46804",1,0.01,
"468041403",1,0.01,
"468043851",1,0.01,
"46805",3,0.01,
"46807",1,0.01,
"46814",3,0.01,
"468149427",1,0.01,
"46815",2,0.01,
"46816",2,0.01,
"46901",1,0.01,
"470069024",1,0.01,
"47012",1,0.01,
"47130",3,0.01,
"47161",1,0.01,
"47201",3,0.01,
"47203",1,0.01,
"47243",2,0.01,
"47374",1,0.01,
"47401",5,0.01,
"47403",1,0.01,
"47404",1,0.01,
"47404-5117",1,0.01,
"47406",2,0.01,
"47406-7514",1,0.01,
"47408",1,0.01,
"474082781",1,0.01,
"47421",1,0.01,
"47501",1,0.01,
"476160050",1,0.01,
"47630",2,0.01,
"47648",1,0.01,
"47711",1,0.01,
"47802-9606",1,0.01,
"47803",1,0.01,
"47834",1,0.01,
"47905",2,0.01,
"47906",5,0.01,
"47906-9671",1,0.01,
"47933",1,0.01,
"47977-8676",1,0.01,
"48009",6,0.01,
"480091420",1,0.01,
"480091905",1,0.01,
"480095768",1,0.01,
"480095865",1,0.01,
"48021",1,0.01,
"48025",7,0.01,
"480255137",1,0.01,
"480361645",1,0.01,
"48038-4946",1,0.01,
"48039",1,0.01,
"48042",3,0.01,
"48044",1,0.01,
"48062",1,0.01,
"48065",1,0.01,
"48066",2,0.01,
"48067",1,0.01,
"48070",2,0.01,
"480701560",1,0.01,
"48073",2,0.01,
"48075",1,0.01,
"48076",3,0.01,
"480761764",1,0.01,
"480765281",1,0.01,
"480765337",1,0.01,
"48081",1,0.01,
"48083",1,0.01,
"48084",2,0.01,
"480841220",1,0.01,
"480841400",1,0.01,
"48085",4,0.01,
"480851086",1,0.01,
"480857031",1,0.01,
"48090",1,0.01,
"48092",1,0.01,
"48094",1,0.01,
"480962521",1,0.01,
"48098",3,0.01,
"480984277",1,0.01,
"480985621",1,0.01,
"48101",1,0.01,
"481011439",1,0.01,
"48103",12,0.01,
"48103-3400",1,0.01,
"481032418",1,0.01,
"481032559",1,0.01,
"48104",8,0.01,
"48104-4414",1,0.01,
"48105",6,0.01,
"48105-2416",1,0.01,
"481052851",1,0.01,
"48108",9,0.01,
"481081720",1,0.01,
"48111",1,0.01,
"481147659",1,0.01,
"48116",1,0.01,
"481160453",1,0.01,
"481166776",1,0.01,
"481168593",1,0.01,
"48118",2,0.01,
"48124",1,0.01,
"48126",2,0.01,
"48127",1,0.01,
"48128",2,0.01,
"48130",3,0.01,
"481449655",1,0.01,
"48150",1,0.01,
"48152",6,0.01,
"48154",1,0.01,
"481618901",1,0.01,
"481659201",1,0.01,
"48167",6,0.01,
"481672719",1,0.01,
"481673945",1,0.01,
"481679380",1,0.01,
"48168",3,0.01,
"48168-8685",1,0.01,
"481681819",1,0.01,
"481683223",2,0.01,
"48169",2,0.01,
"48170",2,0.01,
"48170-1218",1,0.01,
"48170-1533",1,0.01,
"481705710",1,0.01,
"48176",2,0.01,
"48178",3,0.01,
"481781878",1,0.01,
"48180",1,0.01,
"48183",1,0.01,
"481832528",1,0.01,
"48185",2,0.01,
"48187",4,0.01,
"48188",2,0.01,
"48188-6241",1,0.01,
"48189",2,0.01,
"48189-9568",1,0.01,
"48192",1,0.01,
"48197",2,0.01,
"481974321",1,0.01,
"481974727",1,0.01,
"48198",1,0.01,
"48202",1,0.01,
"48203",1,0.01,
"482053141",1,0.01,
"48207",1,0.01,
"48208",1,0.01,
"48219",2,0.01,
"48221",1,0.01,
"482251649",1,0.01,
"48228",1,0.01,
"48230",8,0.01,
"48230-1063",1,0.01,
"48230-1310",1,0.01,
"482301460",1,0.01,
"482301814",1,0.01,
"482301924",1,0.01,
"48234",1,0.01,
"48236",7,0.01,
"482361161",1,0.01,
"482361874",1,0.01,
"482362617",1,0.01,
"482371481",1,0.01,
"48238",1,0.01,
"48239",1,0.01,
"48301",8,0.01,
"48301-2563",1,0.01,
"483013226",1,0.01,
"483013445",1,0.01,
"48302",4,0.01,
"48302-2729",1,0.01,
"483021106",1,0.01,
"483021217",1,0.01,
"483021570",1,0.01,
"483022534",1,0.01,
"483022849",1,0.01,
"483022850",1,0.01,
"48304",3,0.01,
"48304-1056",1,0.01,
"48304-1974",1,0.01,
"48304-2542",1,0.01,
"483041115",1,0.01,
"483041145",1,0.01,
"48306",3,0.01,
"483062835",1,0.01,
"483063593",1,0.01,
"48307",6,0.01,
"48307-1728",1,0.01,
"48307-2607",1,0.01,
"483076063",1,0.01,
"48309",3,0.01,
"483094510",1,0.01,
"48310",1,0.01,
"483141866",1,0.01,
"483144536",1,0.01,
"48315",1,0.01,
"48316",1,0.01,
"483161068",1,0.01,
"48320",2,0.01,
"48322",4,0.01,
"483222252",1,0.01,
"483224021",1,0.01,
"48323",5,0.01,
"483231372",1,0.01,
"483232452",1,0.01,
"483233821",1,0.01,
"48324",2,0.01,
"483241949",1,0.01,
"483243248",1,0.01,
"48326",1,0.01,
"483283453",1,0.01,
"48329",1,0.01,
"48331",3,0.01,
"483311934",1,0.01,
"483312055",1,0.01,
"483313529",1,0.01,
"48334",6,0.01,
"48334-4158",1,0.01,
"483343264",1,0.01,
"483344000",1,0.01,
"483344315",1,0.01,
"483344757",1,0.01,
"48335",2,0.01,
"483351240",1,0.01,
"48346",2,0.01,
"483462046",1,0.01,
"48347",1,0.01,
"48348",3,0.01,
"48348-4906",1,0.01,
"483482197",1,0.01,
"483482869",1,0.01,
"483484373",1,0.01,
"48356",1,0.01,
"48359",1,0.01,
"48360",1,0.01,
"483632642",1,0.01,
"48367",1,0.01,
"48371",1,0.01,
"48374",1,0.01,
"48374-2377",1,0.01,
"48374-3740",1,0.01,
"483742153",1,0.01,
"483743636",1,0.01,
"483743731",1,0.01,
"483743794",1,0.01,
"483743870",1,0.01,
"48375",3,0.01,
"483753934",1,0.01,
"483754755",1,0.01,
"483754761",1,0.01,
"48377",1,0.01,
"48380",1,0.01,
"48382",2,0.01,
"48383",1,0.01,
"48386",1,0.01,
"48413",2,0.01,
"48419",1,0.01,
"484238114",1,0.01,
"48430",1,0.01,
"484301467",1,0.01,
"484333706",1,0.01,
"48439",1,0.01,
"48439-9472",1,0.01,
"48455",2,0.01,
"48462",2,0.01,
"48473",1,0.01,
"48503",1,0.01,
"48507",1,0.01,
"48509-1251",1,0.01,
"48601",1,0.01,
"48602",2,0.01,
"486021945",1,0.01,
"48603",2,0.01,
"48611",1,0.01,
"48640",2,0.01,
"48640-2430",1,0.01,
"486402429",1,0.01,
"48641",1,0.01,
"48642",2,0.01,
"48661",1,0.01,
"48726",1,0.01,
"48734",1,0.01,
"48813",1,0.01,
"48821",1,0.01,
"48823",11,0.01,
"48832",1,0.01,
"48842",1,0.01,
"488428778",1,0.01,
"48843",2,0.01,
"48854",1,0.01,
"48858-1851",1,0.01,
"48858-6130",1,0.01,
"48864",4,0.01,
"48864-3849",1,0.01,
"488643468",1,0.01,
"48895",1,0.01,
"48906",1,0.01,
"48910",2,0.01,
"48915",1,0.01,
"48917",4,0.01,
"49001-2936",1,0.01,
"490029026",1,0.01,
"49006",1,0.01,
"49008",5,0.01,
"49008-1319",1,0.01,
"49009",2,0.01,
"49009-9381",1,0.01,
"49012-9710",1,0.01,
"49014",2,0.01,
"49017",1,0.01,
"49021",1,0.01,
"49024",1,0.01,
"490245520",1,0.01,
"49028",1,0.01,
"49033",1,0.01,
"49038",1,0.01,
"49048",1,0.01,
"490539777",1,0.01,
"490539780",1,0.01,
"49055",2,0.01,
"49057",1,0.01,
"49058",2,0.01,
"49071",2,0.01,
"49072",2,0.01,
"49078",1,0.01,
"49080",1,0.01,
"490809109",1,0.01,
"49085",10,0.01,
"490853429",1,0.01,
"49090",2,0.01,
"490999017",1,0.01,
"49102",1,0.01,
"491171473",1,0.01,
"49120",2,0.01,
"49203",1,0.01,
"49235",1,0.01,
"49270",1,0.01,
"49301",5,0.01,
"49306",1,0.01,
"49316",2,0.01,
"49331",3,0.01,
"49341",2,0.01,
"49401",3,0.01,
"49417",5,0.01,
"494178318",1,0.01,
"494178888",1,0.01,
"49418",2,0.01,
"49423",4,0.01,
"49424",2,0.01,
"494246104",1,0.01,
"49428",1,0.01,
"49441",2,0.01,
"49442",1,0.01,
"49444",1,0.01,
"494444273",1,0.01,
"49445",1,0.01,
"49456",2,0.01,
"494561875",1,0.01,
"49503",3,0.01,
"49504",2,0.01,
"49506",15,0.01,
"495063379",1,0.01,
"495064741",1,0.01,
"495065018",1,0.01,
"495086596",1,0.01,
"49509",2,0.01,
"49509-1480",1,0.01,
"49512",1,0.01,
"49525",1,0.01,
"495251213",1,0.01,
"49546",4,0.01,
"495465532",1,0.01,
"495467586",1,0.01,
"49636",1,0.01,
"49643",1,0.01,
"49648",1,0.01,
"49684",2,0.01,
"49685",1,0.01,
"49686",3,0.01,
"496862057",1,0.01,
"496866300",1,0.01,
"49688",1,0.01,
"49690",2,0.01,
"49696",1,0.01,
"49720",1,0.01,
"497209087",1,0.01,
"49740",2,0.01,
"49770",1,0.01,
"497708820",1,0.01,
"49855",2,0.01,
"49931",2,0.01,
"49970-9039",2,0.01,
"500004",0.01,2,
"50002",0.01,1,
"500074",0.01,1,
"50009",1,0.01,
"50014",3,0.01,
"50019",1,0.01,
"50021",1,0.01,
"50111",1,0.01,
"50112",1,0.01,
"501188044",1,0.01,
"50125",1,0.01,
"50131",1,0.01,
"50141",1,0.01,
"502",0.01,2,
"50263",1,0.01,
"50265",2,0.01,
"50265-5488",1,0.01,
"50266",2,0.01,
"50309",1,0.01,
"50310",1,0.01,
"503103826",1,0.01,
"50312",2,0.01,
"50312-5414",1,0.01,
"503121827",1,0.01,
"503121851",1,0.01,
"50317",1,0.01,
"503212619",1,0.01,
"50324",1,0.01,
"50325-6429",1,0.01,
"50327",1,0.01,
"50501",1,0.01,
"505220215",1,0.01,
"5055",1,0.01,
"50613",1,0.01,
"50674",1,0.01,
"50677-2215",1,0.01,
"50801",1,0.01,
"509",0.01,1,
"51020-250",0.01,1,
"51041",1,0.01,
"51201-2105",1,0.01,
"51526-4196",1,0.01,
"51900",0.01,1,
"52001",3,0.01,
"52002-0485",1,0.01,
"52003",1,0.01,
"52240",6,0.01,
"52240-7906",1,0.01,
"52240-9125",1,0.01,
"52241",2,0.01,
"52242",1,0.01,
"52245",5,0.01,
"52245-9205",1,0.01,
"522452027",1,0.01,
"522453245",1,0.01,
"52246",1,0.01,
"52302",1,0.01,
"52302-5147",1,0.01,
"523026265",1,0.01,
"52402",1,0.01,
"524023393",1,0.01,
"52403",2,0.01,
"52462",0.01,1,
"52556-8909",1,0.01,
"527223849",1,0.01,
"52803",1,0.01,
"52807",6,0.01,
"52807-1550",1,0.01,
"53005",3,0.01,
"53012",2,0.01,
"53017",1,0.01,
"53018",1,0.01,
"530181128",1,0.01,
"530219748",1,0.01,
"53022",2,0.01,
"53024",1,0.01,
"530242285",1,0.01,
"53029",1,0.01,
"530298559",1,0.01,
"530299018",1,0.01,
"53044",1,0.01,
"53044-1361",1,0.01,
"530441428",1,0.01,
"53045",3,0.01,
"53045-2218",1,0.01,
"53045-3843",1,0.01,
"530451309",1,0.01,
"530451708",1,0.01,
"530454918",1,0.01,
"530456203",1,0.01,
"530458163",1,0.01,
"53051",1,0.01,
"53051-5885",1,0.01,
"53066",1,0.01,
"530663489",1,0.01,
"530666513",1,0.01,
"53072",1,0.01,
"53072-2691",1,0.01,
"530725700",1,0.01,
"53073",1,0.01,
"53074",1,0.01,
"530741135",1,0.01,
"53080",1,0.01,
"53083",1,0.01,
"53083-2124",1,0.01,
"53089",1,0.01,
"53089-5007",1,0.01,
"53090",1,0.01,
"53092",5,0.01,
"53092-0054",1,0.01,
"53092-5231",1,0.01,
"530925202",1,0.01,
"53095",2,0.01,
"530954744",1,0.01,
"53105",1,0.01,
"531052110",1,0.01,
"53115",1,0.01,
"531153948",1,0.01,
"53125",1,0.01,
"53132",4,0.01,
"531328515",1,0.01,
"53140",1,0.01,
"53142",1,0.01,
"531427546",1,0.01,
"53143",1,0.01,
"53144",1,0.01,
"53149-8860",1,0.01,
"531499375",1,0.01,
"531512394",1,0.01,
"53158",3,0.01,
"53186",2,0.01,
"531861237",1,0.01,
"531865480",1,0.01,
"53188",1,0.01,
"531882526",1,0.01,
"531884408",1,0.01,
"53189",2,0.01,
"53190",1,0.01,
"531902188",1,0.01,
"53202",1,0.01,
"532021260",1,0.01,
"53204",2,0.01,
"53206",3,0.01,
"53206-3311",1,0.01,
"53207",3,0.01,
"532081013",1,0.01,
"53209",1,0.01,
"532091833",1,0.01,
"53211",3,0.01,
"53211-1002",1,0.01,
"53211-4377",1,0.01,
"532111759",1,0.01,
"532111778",1,0.01,
"53212",1,0.01,
"53213",2,0.01,
"53213-2319",1,0.01,
"532131737",1,0.01,
"53215-1946",1,0.01,
"53216",2,0.01,
"53217",6,0.01,
"53217-5601",1,0.01,
"532171915",1,0.01,
"532172736",1,0.01,
"532173871",1,0.01,
"532175501",1,0.01,
"532175742",1,0.01,
"532176021",1,0.01,
"53218",3,0.01,
"53220",1,0.01,
"532201342",1,0.01,
"532214217",1,0.01,
"53222",1,0.01,
"53223",2,0.01,
"532244142",1,0.01,
"53225",1,0.01,
"53226",2,0.01,
"532261255",1,0.01,
"53233",6,0.01,
"53402",3,0.01,
"53403",1,0.01,
"534041473",1,0.01,
"53406",2,0.01,
"53511-7026",1,0.01,
"53521",1,0.01,
"53534",3,0.01,
"53537",1,0.01,
"53545",1,0.01,
"53546",1,0.01,
"53555-9502",1,0.01,
"53558",2,0.01,
"53562",2,0.01,
"53562-1401",1,0.01,
"53562-3765",1,0.01,
"53562-3853",1,0.01,
"53588",1,0.01,
"53593",2,0.01,
"53593-2231",1,0.01,
"535938001",1,0.01,
"53598",1,0.01,
"53703",4,0.01,
"53704",2,0.01,
"53704-4857",1,0.01,
"53705",6,0.01,
"53705-1408",1,0.01,
"53705-1477",1,0.01,
"53711",2,0.01,
"53711-1932",1,0.01,
"53711-7119",1,0.01,
"537111925",1,0.01,
"537112108",1,0.01,
"53715",1,0.01,
"53717",1,0.01,
"53719",2,0.01,
"53719-4539",1,0.01,
"53726",1,0.01,
"53818",2,0.01,
"539162509",1,0.01,
"539169319",1,0.01,
"53946",1,0.01,
"54016",1,0.01,
"541153648",1,0.01,
"541159041",1,0.01,
"541159244",1,0.01,
"54136",1,0.01,
"54166",1,0.01,
"542130177",1,0.01,
"54220",1,0.01,
"54228",1,0.01,
"542359703",1,0.01,
"54245",2,0.01,
"54301",1,0.01,
"54304",1,0.01,
"54311",1,0.01,
"543119525",1,0.01,
"54313",2,1,
"54401",1,0.01,
"54455",1,0.01,
"54455-9410",1,0.01,
"54481",1,0.01,
"54481-3153",1,0.01,
"54494",1,0.01,
"54494-7388",1,0.01,
"54601",4,0.01,
"54636",1,0.01,
"54650",1,0.01,
"54701-7225",1,0.01,
"54822",1,0.01,
"54829",1,0.01,
"54901",1,0.01,
"54902",1,0.01,
"549048884",1,0.01,
"54911",1,0.01,
"549111157",1,0.01,
"549114319",1,0.01,
"54913",5,0.01,
"54914",1,0.01,
"54915-4698",1,0.01,
"549158710",1,0.01,
"54935",1,0.01,
"549428771",1,0.01,
"549522452",1,0.01,
"54956",4,0.01,
"549564625",1,0.01,
"55001",1,0.01,
"55001-9406",1,0.01,
"55016",2,0.01,
"55033",2,0.01,
"55040",2,0.01,
"55042",1,0.01,
"55044",2,0.01,
"55044-6035",1,0.01,
"550571638",1,0.01,
"55060",1,0.01,
"550684385",1,0.01,
"55073",1,0.01,
"550739768",1,0.01,
"55077",2,0.01,
"55082",5,0.01,
"55082-5405",1,0.01,
"55082-9464",1,0.01,
"550824552",1,0.01,
"55102",1,0.01,
"55102-3221",1,0.01,
"551024024",1,0.01,
"55104",3,0.01,
"55104-1939",1,0.01,
"551046921",1,0.01,
"55105",7,0.01,
"55105-3003",1,0.01,
"551051048",1,0.01,
"551053204",1,0.01,
"55106",1,0.01,
"55107",1,0.01,
"55110",1,0.01,
"55112",1,0.01,
"55113",1,0.01,
"55114",1,0.01,
"55115",1,0.01,
"551151777",1,0.01,
"55116",2,0.01,
"55116-1811",1,0.01,
"551162122",1,0.01,
"551162245",1,0.01,
"55117",1,0.01,
"55118",8,0.01,
"55118-2747",1,0.01,
"55118-3706",1,0.01,
"55118-3710",1,0.01,
"55119",1,0.01,
"55119-4353",1,0.01,
"551193353",1,0.01,
"55120",1,0.01,
"55122",1,0.01,
"551223390",1,0.01,
"55123",4,0.01,
"551231897",1,0.01,
"551232451",1,0.01,
"551233991",1,0.01,
"55124",5,0.01,
"551244236",1,0.01,
"551249580",1,0.01,
"55125",4,0.01,
"551252047",1,0.01,
"551258885",1,0.01,
"55126",1,0.01,
"551277177",1,0.01,
"55129",4,0.01,
"551295299",1,0.01,
"551296204",1,0.01,
"55303-5609",1,0.01,
"55304",1,0.01,
"55305",1,0.01,
"55305-5142",1,0.01,
"553052300",1,0.01,
"553053053",1,0.01,
"55306",2,0.01,
"553065062",1,0.01,
"553065151",1,0.01,
"55311",5,0.01,
"55311-2305",1,0.01,
"55311-2541",1,0.01,
"553111856",1,0.01,
"553112682",1,0.01,
"55317",1,0.01,
"553176401",1,0.01,
"553176705",1,0.01,
"553183416",1,0.01,
"55330",3,0.01,
"55330-7510",1,0.01,
"55331",5,0.01,
"55331-8887",1,0.01,
"553317703",2,0.01,
"553318110",1,0.01,
"553319034",1,0.01,
"55337-4355",1,0.01,
"55343",1,0.01,
"553431310",1,0.01,
"553434365",1,0.01,
"553438085",1,0.01,
"55344",1,0.01,
"55345",2,0.01,
"553452840",1,0.01,
"553453549",1,0.01,
"55346",5,0.01,
"553463232",1,0.01,
"55347",3,0.01,
"553471098",1,0.01,
"553471943",1,0.01,
"553474184",1,0.01,
"553474259",1,0.01,
"553569515",1,0.01,
"55359-9612",1,0.01,
"55364",2,0.01,
"553641121",1,0.01,
"553641845",1,0.01,
"553647383",1,0.01,
"553648147",1,0.01,
"553648160",1,0.01,
"55372",2,0.01,
"55374",1,0.01,
"553748829",1,0.01,
"55378",4,0.01,
"55386",1,0.01,
"55391",2,0.01,
"55391-9373",1,0.01,
"553913235",1,0.01,
"55401",1,0.01,
"55403",1,0.01,
"55405",5,0.01,
"554052436",1,0.01,
"55406",1,0.01,
"554061816",1,0.01,
"554062322",1,0.01,
"55407",3,0.01,
"55407-2641",1,0.01,
"55407-2739",1,0.01,
"554072737",1,0.01,
"55408",5,0.01,
"554083522",1,0.01,
"554091024",1,0.01,
"554091718",1,0.01,
"55410",1,0.01,
"554101407",1,0.01,
"554102445",1,0.01,
"554102635",1,0.01,
"55411",1,0.01,
"554111811",1,0.01,
"55412-1818",1,0.01,
"554121109",1,0.01,
"55414",5,0.01,
"554143677",1,0.01,
"55416",4,0.01,
"554164134",1,0.01,
"554164207",1,0.01,
"554165064",1,0.01,
"55417",6,0.01,
"55417-2527",1,0.01,
"554171429",1,0.01,
"554172441",1,0.01,
"55418",3,0.01,
"55419",3,0.01,
"55419-1523",1,0.01,
"554192014",1,0.01,
"554195336",1,0.01,
"554195339",1,0.01,
"55421",1,0.01,
"554213057",1,0.01,
"55422",2,0.01,
"55423",1,0.01,
"55424",2,0.01,
"554241138",1,0.01,
"55426",1,0.01,
"55426-1072",1,0.01,
"55427",1,0.01,
"554282655",1,0.01,
"554282730",1,0.01,
"55430",1,0.01,
"55431",2,0.01,
"554311961",1,0.01,
"55432",1,0.01,
"55433",1,0.01,
"55435-4031",1,0.01,
"55436",1,0.01,
"55436-1224",1,0.01,
"554361941",1,0.01,
"554362023",1,0.01,
"554362519",1,0.01,
"55437",3,0.01,
"55438",2,0.01,
"554381244",1,0.01,
"55439",1,0.01,
"55439-1044",1,0.01,
"55441",2,0.01,
"554422508",1,0.01,
"55443-1579",1,0.01,
"55444",2,0.01,
"55444-1514",1,0.01,
"55446",1,0.01,
"554462792",1,0.01,
"554471263",1,0.01,
"554471606",1,0.01,
"55449",1,0.01,
"55454",1,0.01,
"556161616",1,0.01,
"55746-9343",1,0.01,
"55805",1,0.01,
"55811",1,0.01,
"55811-4199",1,0.01,
"559028819",1,0.01,
"55904",1,0.01,
"559045616",1,0.01,
"559639676",1,0.01,
"56001",1,0.01,
"56001-2626",1,0.01,
"560012631",1,0.01,
"560031628",1,0.01,
"560102",0.01,1,
"56097",1,0.01,
"56267",1,0.01,
"56301",1,0.01,
"56308",1,0.01,
"56401",1,0.01,
"56560",1,0.01,
"570016706",1,0.01,
"570040205",1,0.01,
"571031012",1,0.01,
"571081544",1,0.01,
"57201",1,0.01,
"5738",1,0.01,
"5777",1,0.01,
"58000",0.01,1,
"58078",1,0.01,
"58504",1,0.01,
"59714",1,0.01,
"59715",1,0.01,
"59718",1,0.01,
"59725",1,0.01,
"599118245",1,0.01,
"60002",17,0.01,
"600022622",1,0.01,
"600022749",1,0.01,
"600026411",1,0.01,
"600028513",1,0.01,
"600029763",1,0.01,
"60004",91,0.01,
"60004-1392",1,0.01,
"60004-3319",1,0.01,
"60004-4050",1,0.01,
"60004-6629",1,0.01,
"60004-6634",1,0.01,
"60004-6863",1,0.01,
"600041396",1,0.01,
"600042511",1,0.01,
"600043249",1,0.01,
"600043662",1,0.01,
"600044727",1,0.01,
"600045115",1,0.01,
"600045766",1,0.01,
"600046049",1,0.01,
"600046438",1,0.01,
"600046720",1,0.01,
"600046938",1,0.01,
"600047216",1,0.01,
"60005",43,0.01,
"60005-1637",1,0.01,
"60005-2638",1,0.01,
"60005-2762",1,0.01,
"60005-2962",1,0.01,
"600051647",1,0.01,
"600052205",1,0.01,
"600052707",1,0.01,
"600053610",1,0.01,
"600053776",1,0.01,
"60007",30,0.01,
"60007-1732",1,0.01,
"60007-4533",1,0.01,
"600071763",1,0.01,
"600073455",1,0.01,
"600073942",1,0.01,
"60008",26,0.01,
"60008-2743",1,0.01,
"60008-3055",1,0.01,
"600082013",1,0.01,
"60009",1,0.01,
"60010",63,0.01,
"60010-1229",1,0.01,
"60010-1413",1,0.01,
"60010-2870",1,0.01,
"60010-2979",1,0.01,
"60010-3566",1,0.01,
"60010-4179",1,0.01,
"60010-6703",1,0.01,
"60010-7044",1,0.01,
"60010-7806",1,0.01,
"600102126",1,0.01,
"600103560",1,0.01,
"600104267",1,0.01,
"600104769",1,0.01,
"600105632",1,0.01,
"600105923",1,0.01,
"600106159",1,0.01,
"600106407",1,0.01,
"600106954",1,0.01,
"600107063",1,0.01,
"600109107",1,0.01,
"600109329",1,0.01,
"60012",18,0.01,
"600123515",1,0.01,
"60013",13,0.01,
"600131862",1,0.01,
"600132702",1,0.01,
"60014",38,0.01,
"60014-1602",1,0.01,
"60014-1939",1,0.01,
"60014-4250",1,0.01,
"600142947",1,0.01,
"600142991",1,0.01,
"60015",61,0.01,
"60015-1511",1,0.01,
"60015-2046",1,0.01,
"60015-2827",1,0.01,
"60015-2902",1,0.01,
"60015-3113",1,0.01,
"60015-3317",1,0.01,
"600151548",1,0.01,
"600151744",1,0.01,
"600151808",1,0.01,
"600152867",1,0.01,
"600152954",1,0.01,
"600153774",1,0.01,
"600153932",1,0.01,
"600154163",1,0.01,
"60016",83,0.01,
"60016-5161",1,0.01,
"60016-7064",1,0.01,
"600161039",1,0.01,
"600161413",1,0.01,
"600162019",1,0.01,
"600162155",1,0.01,
"600163637",1,0.01,
"600164254",1,0.01,
"600165121",1,0.01,
"600166312",1,0.01,
"600166720",1,0.01,
"600167545",1,0.01,
"600168735",1,0.01,
"60017",1,0.01,
"60018",39,0.01,
"60018-1620",1,0.01,
"60018-2651",1,0.01,
"60018-4061",1,0.01,
"600181136",1,0.01,
"600181222",1,0.01,
"600181244",1,0.01,
"600182023",1,0.01,
"600182264",1,0.01,
"600184064",1,0.01,
"600184320",1,0.01,
"60020",1,0.01,
"60021",1,0.01,
"60021-1344",1,0.01,
"60021-1827",1,0.01,
"60021-1908",1,0.01,
"60022",18,0.01,
"600222042",1,0.01,
"60025",118,0.01,
"60025-1403",1,0.01,
"60025-1413",1,0.01,
"60025-2720",1,0.01,
"60025-3243",1,0.01,
"60025-3912",1,0.01,
"60025-4329",1,0.01,
"600252335",1,0.01,
"600252827",1,0.01,
"600253003",1,0.01,
"600253245",1,0.01,
"600253430",1,0.01,
"600253537",1,0.01,
"600254016",1,0.01,
"600254141",1,0.01,
"600254156",1,0.01,
"600254208",1,0.01,
"600254626",1,0.01,
"600254641",1,0.01,
"600255010",1,0.01,
"60026",32,0.01,
"60026-1169",1,0.01,
"60026-7043",1,0.01,
"600261120",1,0.01,
"600267402",1,0.01,
"600268034",1,0.01,
"60029",1,0.01,
"60030",28,0.01,
"60030-3842",1,0.01,
"60030-4204",1,0.01,
"600307956",1,0.01,
"60031",43,0.01,
"60031-5244",1,0.01,
"600311695",1,0.01,
"600313209",1,0.01,
"600313770",1,0.01,
"600315120",1,0.01,
"600316364",1,0.01,
"600319132",1,0.01,
"60033",4,0.01,
"60034",1,0.01,
"60035",54,0.01,
"60035-1258",1,0.01,
"60035-4810",1,0.01,
"60035-5331",1,0.01,
"600352260",1,0.01,
"600353010",1,0.01,
"600353953",1,0.01,
"600353958",1,0.01,
"600354300",1,0.01,
"600354340",1,0.01,
"600354441",1,0.01,
"600355208",1,0.01,
"60040",2,0.01,
"60041",3,0.01,
"60041-9476",1,0.01,
"60042",3,0.01,
"60042-9525",1,0.01,
"600428204",1,0.01,
"60043",8,0.01,
"600431059",1,0.01,
"60044",14,0.01,
"60044-1561",1,0.01,
"600441650",1,0.01,
"60045",39,0.01,
"60045-1514",1,0.01,
"60045-1805",1,0.01,
"60045-2259",1,0.01,
"60045-2338",1,0.01,
"60045-2685",1,0.01,
"60045-3249",1,0.01,
"60045-3391",1,0.01,
"60045-4621",1,0.01,
"60045-4813",1,0.01,
"600451114",1,0.01,
"600453705",1,0.01,
"600453820",1,0.01,
"60046",21,0.01,
"60046-6514",1,0.01,
"60046-6522",1,0.01,
"60046-6725",1,0.01,
"60046-7527",1,0.01,
"600464954",1,0.01,
"60047",60,0.01,
"60047-1464",1,0.01,
"60047-2202",1,0.01,
"60047-2820",1,0.01,
"60047-5063",1,0.01,
"60047-5135",1,0.01,
"60047-5147",1,0.01,
"60047-7300",1,0.01,
"600472965",1,0.01,
"600473343",1,0.01,
"600475018",1,0.01,
"600475061",1,0.01,
"600475073",1,0.01,
"600475193",1,0.01,
"600475208",1,0.01,
"600475223",1,0.01,
"600475250",1,0.01,
"600477560",1,0.01,
"600478454",1,0.01,
"600479286",1,0.01,
"60048",43,0.01,
"60048-1106",1,0.01,
"60048-3066",1,0.01,
"60048-3233",1,0.01,
"60048-3400",1,0.01,
"600483921",1,0.01,
"600484807",1,0.01,
"600484897",1,0.01,
"60050",8,0.01,
"60051",9,0.01,
"60051-5148",1,0.01,
"60053",56,0.01,
"60053-1714",1,0.01,
"60053-1838",1,0.01,
"60053-2919",1,0.01,
"600532017",1,0.01,
"600532024",1,0.01,
"600532432",1,0.01,
"600533324",1,0.01,
"600533369",1,0.01,
"60055",1,0.01,
"60056",88,0.01,
"60056-1528",1,0.01,
"60056-1562",1,0.01,
"60056-1821",1,0.01,
"60056-2032",1,0.01,
"60056-3450",1,0.01,
"60056-3651",1,0.01,
"60056-4148",1,0.01,
"60056-4248",1,0.01,
"60056-4340",1,0.01,
"60056-5064",1,0.01,
"600561208",1,0.01,
"600561685",1,0.01,
"600561901",1,0.01,
"600562392",1,0.01,
"600562416",1,0.01,
"600562805",1,0.01,
"600562952",1,0.01,
"600562982",1,0.01,
"600564027",1,0.01,
"600564135",1,0.01,
"600564310",1,0.01,
"600564924",1,0.01,
"600565076",1,0.01,
"600565748",1,0.01,
"60060",37,0.01,
"60060-1272",1,0.01,
"60060-2888",1,0.01,
"60060-5374",1,0.01,
"60060-5604",1,0.01,
"600601718",1,0.01,
"600603451",1,0.01,
"600603481",1,0.01,
"600604006",1,0.01,
"600604063",1,0.01,
"600609545",1,0.01,
"60061",35,0.01,
"60061-1239",1,0.01,
"60061-1610",1,0.01,
"60061-2333",1,0.01,
"60061-2945",1,0.01,
"60061-3218",1,0.01,
"600611029",1,0.01,
"600611225",1,0.01,
"600612105",1,0.01,
"600613163",1,0.01,
"600614575",1,0.01,
"60062",64,1,
"60062-1102",1,0.01,
"60062-1541",1,0.01,
"60062-3705",1,0.01,
"60062-5413",1,0.01,
"60062-6075",1,0.01,
"60062-6609",1,0.01,
"600621336",1,0.01,
"600622213",1,0.01,
"600622601",1,0.01,
"600623763",1,0.01,
"600624211",1,0.01,
"600624654",1,0.01,
"600624927",1,0.01,
"600624943",1,0.01,
"600625800",1,0.01,
"600626060",1,0.01,
"600626350",1,0.01,
"600626410",1,0.01,
"600626921",1,0.01,
"600627054",1,0.01,
"600627425",1,0.01,
"60064",2,0.01,
"60064-1731",1,0.01,
"60067",66,0.01,
"60067-1941",1,0.01,
"60067-3470",1,0.01,
"60067-5847",1,0.01,
"600672236",1,0.01,
"600674258",1,0.01,
"600674728",1,0.01,
"600674937",1,0.01,
"600677056",1,0.01,
"600677277",1,0.01,
"600677363",1,0.01,
"600679108",1,0.01,
"600679110",1,0.01,
"60068",111,0.01,
"60068-1054",1,0.01,
"60068-1149",1,0.01,
"60068-1716",1,0.01,
"60068-1927",2,0.01,
"60068-2728",1,0.01,
"60068-3537",1,0.01,
"60068-4349",1,0.01,
"60068-4651",1,0.01,
"60068-5515",1,0.01,
"60068-5613",1,0.01,
"600681920",1,0.01,
"600682005",1,0.01,
"600682512",1,0.01,
"600682516",1,0.01,
"600682604",1,0.01,
"600682709",1,0.01,
"600682764",1,0.01,
"600682925",1,0.01,
"600682960",1,0.01,
"600683005",1,0.01,
"600683438",1,0.01,
"600683551",1,0.01,
"600683562",1,0.01,
"600683779",1,0.01,
"600683907",1,0.01,
"600684321",1,0.01,
"600684459",1,0.01,
"600684665",1,0.01,
"600684941",2,0.01,
"600685148",1,0.01,
"600685249",1,0.01,
"600685280",1,0.01,
"600685353",1,0.01,
"60069",10,0.01,
"60069-2209",1,0.01,
"60069-2808",1,0.01,
"60069-4026",1,0.01,
"600699631",1,0.01,
"60070",23,0.01,
"60070-2528",1,0.01,
"600701535",1,0.01,
"60071",4,0.01,
"60072",1,0.01,
"60073",31,0.01,
"60073-2570",1,0.01,
"60073-3619",1,0.01,
"60073-5625",1,0.01,
"600731753",1,0.01,
"600738149",1,0.01,
"600739540",1,0.01,
"60074",34,0.01,
"60074-1093",1,0.01,
"60074-2301",1,0.01,
"60074-3751",1,0.01,
"600741047",1,0.01,
"600745713",1,0.01,
"60076",80,0.01,
"60076-1950",1,0.01,
"60076-2067",1,0.01,
"60076-2422",1,0.01,
"60076-2623",1,0.01,
"60076-2710",1,0.01,
"60076-3070",1,0.01,
"60076-3680",1,0.01,
"600761417",1,0.01,
"600761571",1,0.01,
"600761615",1,0.01,
"600761847",1,0.01,
"600762111",1,0.01,
"600762134",1,0.01,
"600762618",1,0.01,
"600762927",1,0.01,
"600763611",1,0.01,
"600763626",1,0.01,
"60077",63,0.01,
"60077-1221",1,0.01,
"60077-1727",1,0.01,
"60077-1903",1,0.01,
"60077-1995",1,0.01,
"60077-2587",1,0.01,
"600771112",1,0.01,
"600771157",1,0.01,
"600771763",1,0.01,
"600772059",1,0.01,
"600772112",1,0.01,
"600772179",1,0.01,
"600772203",1,0.01,
"600772830",1,0.01,
"600772832",1,0.01,
"600773477",1,0.01,
"600773652",1,0.01,
"600775439",1,0.01,
"60079",1,0.01,
"60081",2,0.01,
"60083",12,0.01,
"600839725",1,0.01,
"60084",10,0.01,
"600845003",1,0.01,
"600845021",1,0.01,
"60085",26,0.01,
"60085-1120",1,0.01,
"60085-4012",1,0.01,
"60085-5820",1,0.01,
"60085-7245",1,0.01,
"60085-8605",1,0.01,
"600851626",1,0.01,
"60087",16,0.01,
"60087-1450",1,0.01,
"60087-2250",1,0.01,
"60087-3560",1,0.01,
"600871820",1,0.01,
"60088",2,0.01,
"60088-2532",1,0.01,
"60089",58,0.01,
"60089-1672",1,0.01,
"60089-1708",1,0.01,
"60089-6858",1,0.01,
"600891046",1,0.01,
"600891181",1,0.01,
"600891191",1,0.01,
"600891832",1,0.01,
"600891838",1,0.01,
"600891961",1,0.01,
"600892080",1,0.01,
"600894153",1,0.01,
"600896909",1,0.01,
"600897739",1,0.01,
"60090",37,0.01,
"60090-2610",1,0.01,
"60090-4460",1,0.01,
"60090-5334",1,0.01,
"60090-5352",1,0.01,
"600903816",1,0.01,
"600905439",1,0.01,
"600905534",1,0.01,
"600905585",1,0.01,
"60091",57,0.01,
"60091-1540",1,0.01,
"60091-1916",1,0.01,
"60091-2144",1,0.01,
"60091-2376",1,0.01,
"600911557",1,0.01,
"600911971",1,0.01,
"600912010",1,0.01,
"600912504",1,0.01,
"600912601",1,0.01,
"600913318",1,0.01,
"60093",48,0.01,
"60093-1421",1,0.01,
"60093-2166",1,0.01,
"60093-2356",1,0.01,
"60093-3820",1,0.01,
"60093-4145",2,0.01,
"600931115",1,0.01,
"600931402",1,0.01,
"600932212",1,0.01,
"600933221",1,0.01,
"600933231",1,0.01,
"600933546",1,0.01,
"600933820",1,0.01,
"600934003",1,0.01,
"600934015",1,0.01,
"60096",1,0.01,
"60097",5,0.01,
"60097-8651",1,0.01,
"60098",16,0.01,
"60098-2505",1,0.01,
"60098-4104",1,0.01,
"60099",8,0.01,
"60101",43,0.01,
"60101-2010",1,0.01,
"60101-2806",1,0.01,
"60101-2866",1,0.01,
"60101-3533",1,0.01,
"60101-3719",1,0.01,
"601011068",1,0.01,
"601011657",1,0.01,
"601011719",1,0.01,
"601012046",1,0.01,
"601012131",1,0.01,
"601012917",1,0.01,
"601013232",1,0.01,
"601013312",1,0.01,
"601013407",1,0.01,
"601015707",1,0.01,
"601015721",1,0.01,
"601016515",1,0.01,
"60102",22,0.01,
"60102-2048",1,0.01,
"60102-4513",1,0.01,
"60102-5036",1,0.01,
"60102-6084",1,0.01,
"601022949",1,0.01,
"601025038",1,0.01,
"601025419",1,0.01,
"601026620",1,0.01,
"601026821",1,0.01,
"60103",60,0.01,
"60103-1306",1,0.01,
"60103-1395",1,0.01,
"60103-4013",1,0.01,
"60103-4702",1,0.01,
"60103-5705",1,0.01,
"60103-7400",1,0.01,
"601031309",1,0.01,
"601031854",1,0.01,
"601032303",1,0.01,
"601032979",1,0.01,
"601034003",1,0.01,
"601035895",1,0.01,
"60104",7,0.01,
"601042329",1,0.01,
"60106",8,0.01,
"60106-1406",1,0.01,
"601063152",1,0.01,
"601063153",1,0.01,
"60107",24,0.01,
"60107-1366",1,0.01,
"60107-1920",2,0.01,
"601071584",1,0.01,
"60108",32,0.01,
"60108-1364",1,0.01,
"60108-1464",1,0.01,
"60108-5409",1,0.01,
"601081056",1,0.01,
"601081312",1,0.01,
"601081330",1,0.01,
"601081461",1,0.01,
"601081465",1,0.01,
"601082532",1,0.01,
"601082534",1,0.01,
"601083016",1,0.01,
"60109",1,0.01,
"60110",11,0.01,
"60110-1137",1,0.01,
"60110-1242",1,0.01,
"60110-1258",1,0.01,
"60110-2908",1,0.01,
"60115",10,0.01,
"60115-1583",1,0.01,
"60115-3855",1,0.01,
"60118",10,0.01,
"60118-3312",1,0.01,
"60118-9005",1,0.01,
"60119",3,0.01,
"601198444",1,0.01,
"60120",17,0.01,
"60120-4617",1,0.01,
"601202464",1,0.01,
"60123",15,0.01,
"60123-1404",1,0.01,
"60123-7713",1,0.01,
"60124",8,0.01,
"601248939",1,0.01,
"60126",68,0.01,
"60126-1804",1,0.01,
"60126-2324",1,0.01,
"60126-2327",1,0.01,
"60126-2915",1,0.01,
"60126-3226",1,0.01,
"60126-3602",1,0.01,
"60126-4840",1,0.01,
"60126-5011",1,0.01,
"60126-5214",1,0.01,
"601261332",1,0.01,
"601263553",1,0.01,
"601263709",1,0.01,
"601263803",1,0.01,
"60130",10,0.01,
"60130-1221",1,0.01,
"60130-1533",1,0.01,
"60131",19,0.01,
"60131-2525",1,0.01,
"60131-2671",1,0.01,
"601311903",1,0.01,
"601312661",1,0.01,
"60133",34,0.01,
"60133-2655",1,0.01,
"60133-5213",1,0.01,
"60133-5320",1,0.01,
"601333842",1,0.01,
"601335114",1,0.01,
"601335202",1,0.01,
"60134",17,0.01,
"60134-1706",1,0.01,
"60134-2536",1,0.01,
"60134-7514",1,0.01,
"60134-7551",1,0.01,
"601341862",1,0.01,
"601345402",1,0.01,
"601346022",1,0.01,
"60135",2,0.01,
"60136",7,0.01,
"60136-4040",1,0.01,
"60136-8021",1,0.01,
"60137",36,0.01,
"60137-3202",1,0.01,
"60137-7042",1,0.01,
"601373951",1,0.01,
"601374108",1,0.01,
"601374454",1,0.01,
"601374749",1,0.01,
"601374829",1,0.01,
"601375219",1,0.01,
"601375595",1,0.01,
"601376107",1,0.01,
"601377251",1,0.01,
"601377306",1,0.01,
"601377458",1,0.01,
"60139",30,0.01,
"60139-2588",3,0.01,
"60139-3117",2,0.01,
"60139-3601",1,0.01,
"601392186",1,0.01,
"601393795",1,0.01,
"60140",14,0.01,
"60140-2045",1,0.01,
"60140-9126",1,0.01,
"60140-9179",1,0.01,
"601407715",1,0.01,
"60142",15,0.01,
"60142-2423",1,0.01,
"60142-4047",1,0.01,
"601428196",1,0.01,
"60143",14,0.01,
"60146",1,0.01,
"60147",1,0.01,
"60148",53,0.01,
"60148-2309",1,0.01,
"60148-2604",1,0.01,
"60148-3254",1,0.01,
"60148-3837",1,0.01,
"60148-4436",1,0.01,
"60148-4722",1,0.01,
"60148-6504",1,0.01,
"601483500",1,0.01,
"60150",1,0.01,
"60151",2,0.01,
"60152",3,0.01,
"60153",5,0.01,
"60153-2306",1,0.01,
"601533218",1,0.01,
"60154",21,0.01,
"60154-3551",1,0.01,
"60154-4438",1,0.01,
"60154-7986",1,0.01,
"601543422",1,0.01,
"601544921",1,0.01,
"601544925",1,0.01,
"601544928",1,0.01,
"601545008",1,0.01,
"60155",5,0.01,
"60155-3007",1,0.01,
"60155-4834",1,0.01,
"60156",22,0.01,
"60156-1231",1,0.01,
"60156-5216",1,0.01,
"60156-5827",1,0.01,
"601565884",1,0.01,
"601566746",1,0.01,
"60157",4,0.01,
"601579504",1,0.01,
"601579754",1,0.01,
"60160",12,0.01,
"60160-1903",1,0.01,
"60160-1920",1,0.01,
"60160-3111",1,0.01,
"601601925",1,0.01,
"601602837",1,0.01,
"60162",2,0.01,
"60163",3,0.01,
"60164",22,0.01,
"60164-1724",1,0.01,
"601641541",1,0.01,
"601642236",1,0.01,
"60165",4,0.01,
"60169",29,0.01,
"60169-1056",1,0.01,
"60169-3362",1,0.01,
"60169-4839",1,0.01,
"601691003",1,0.01,
"601692643",1,0.01,
"601693261",1,0.01,
"601694002",1,0.01,
"60171",12,0.01,
"60171-1448",1,0.01,
"60172",23,0.01,
"601721009",1,0.01,
"601721100",1,0.01,
"601721629",1,0.01,
"601722157",1,0.01,
"601723022",1,0.01,
"60173",13,0.01,
"60173-3915",1,0.01,
"60173-6534",1,0.01,
"60173-6572",1,0.01,
"60174",26,0.01,
"60174-8842",1,0.01,
"601741189",1,0.01,
"601741414",1,0.01,
"601741432",1,0.01,
"601747858",1,0.01,
"601747968",1,0.01,
"601748713",1,0.01,
"60175",25,0.01,
"601756503",1,0.01,
"60176",15,0.01,
"601761445",1,0.01,
"601761580",1,0.01,
"601761829",1,0.01,
"60177",14,0.01,
"60177-1970",1,0.01,
"60177-2380",1,0.01,
"601772823",1,0.01,
"601772913",1,0.01,
"601773276",1,0.01,
"60178",1,0.01,
"60178-8800",1,0.01,
"60181",27,0.01,
"601811744",1,0.01,
"601811968",1,0.01,
"601811972",1,0.01,
"601812946",1,0.01,
"601813862",1,0.01,
"601815254",1,0.01,
"60183",1,0.01,
"60184",3,0.01,
"601842306",1,0.01,
"60185",21,0.01,
"60185-6167",1,0.01,
"60185-6418",1,0.01,
"601854219",1,0.01,
"601854523",1,0.01,
"601855029",1,0.01,
"601855062",1,0.01,
"601855113",1,0.01,
"601855926",1,0.01,
"60187",32,0.01,
"601873062",1,0.01,
"601873910",1,0.01,
"601874021",1,0.01,
"601874040",1,0.01,
"601874744",1,0.01,
"601875637",1,0.01,
"60188",52,0.01,
"60188-1389",1,0.01,
"60188-2500",2,0.01,
"60188-9238",1,0.01,
"601883124",1,0.01,
"601883400",1,0.01,
"601884304",1,0.01,
"601884322",1,0.01,
"601884341",1,0.01,
"601884606",1,0.01,
"601886028",1,0.01,
"601889100",1,0.01,
"60189",43,0.01,
"60189-2110",2,0.01,
"60189-2947",1,0.01,
"60189-7425",1,0.01,
"601892011",1,0.01,
"601897174",1,0.01,
"60190",8,0.01,
"60190-1740",1,0.01,
"601902331",1,0.01,
"60191",12,0.01,
"60191-2031",1,0.01,
"60191-2037",1,0.01,
"60191-2239",1,0.01,
"601912003",1,0.01,
"601912163",1,0.01,
"60192",17,0.01,
"60192-1321",1,0.01,
"60192-4603",1,0.01,
"601921176",1,0.01,
"601921327",1,0.01,
"601921648",1,0.01,
"60193",38,0.01,
"60193-1370",1,0.01,
"60193-3329",1,0.01,
"60193-5154",1,0.01,
"601934885",1,0.01,
"60194",27,0.01,
"60194-2226",1,0.01,
"60194-3820",1,0.01,
"601942252",1,0.01,
"60195",5,0.01,
"601951312",1,0.01,
"60201",46,0.01,
"60201-1184",1,0.01,
"60201-2071",1,0.01,
"602011346",1,0.01,
"602012117",1,0.01,
"602014084",1,0.01,
"60202",58,0.01,
"60202-1113",1,0.01,
"60202-2002",1,0.01,
"60202-3605",1,0.01,
"60202-3948",1,0.01,
"60202-3971",1,0.01,
"602021025",1,0.01,
"602021231",1,0.01,
"602024601",1,0.01,
"60203",5,0.01,
"60203-1942",1,0.01,
"602031302",1,0.01,
"60301",3,0.01,
"60302",56,0.01,
"60302-1636",1,0.01,
"60302-2214",1,0.01,
"60302-2612",1,0.01,
"60302-2703",1,0.01,
"60302-5000",1,0.01,
"603021318",1,0.01,
"603021410",1,0.01,
"603021422",1,0.01,
"603022502",1,0.01,
"603022941",1,0.01,
"603023312",1,0.01,
"603023560",1,0.01,
"60303",3,0.01,
"60304",17,0.01,
"60304-1613",1,0.01,
"60304-1847",1,0.01,
"603041411",1,0.01,
"603041412",1,0.01,
"603041424",1,0.01,
"603041622",1,0.01,
"603041813",1,0.01,
"603041832",1,0.01,
"60305",20,0.01,
"60305-1309",1,0.01,
"60305-1922",1,0.01,
"603051034",1,0.01,
"603051100",1,0.01,
"60306",1,0.01,
"6033",1,0.01,
"60401",4,0.01,
"604013677",1,0.01,
"60402",71,0.01,
"60402-1139",1,0.01,
"60402-1330",1,0.01,
"60402-1623",2,0.01,
"60402-2401",1,0.01,
"60402-3812",1,0.01,
"60402-3863",1,0.01,
"60402-3971",1,0.01,
"60402-4069",1,0.01,
"604021611",1,0.01,
"604021619",1,0.01,
"604021670",1,0.01,
"604022542",1,0.01,
"604022941",1,0.01,
"604023513",1,0.01,
"604023805",1,0.01,
"604023875",1,0.01,
"60403",5,0.01,
"60404",7,0.01,
"60404-8143",1,0.01,
"60404-8191",1,0.01,
"60404-9414",1,0.01,
"604040563",1,0.01,
"604048922",1,0.01,
"604049434",1,0.01,
"604049521",1,0.01,
"60406",14,0.01,
"60409",17,0.01,
"60410",3,0.01,
"60411",26,0.01,
"60411-1310",1,0.01,
"60411-2601",1,0.01,
"60411-3225",1,0.01,
"60411-4207",1,0.01,
"60411-7500",1,0.01,
"604111850",1,0.01,
"604116600",1,0.01,
"60415",3,0.01,
"60416",1,0.01,
"60417",10,0.01,
"60417-1308",1,0.01,
"60417-1975",1,0.01,
"60417-3781",1,0.01,
"60417-4298",1,0.01,
"604171256",1,0.01,
"604171271",1,0.01,
"604173921",1,0.01,
"60419",7,0.01,
"60419-1530",1,0.01,
"60419-2716",1,0.01,
"60420",1,0.01,
"60421",2,0.01,
"60422",18,0.01,
"60422-1039",1,0.01,
"60422-4325",1,0.01,
"604221216",1,0.01,
"60423",31,0.01,
"60423-1031",1,0.01,
"60423-1360",1,0.01,
"60423-1702",1,0.01,
"60423-8085",1,0.01,
"60423-9283",1,0.01,
"604232266",1,0.01,
"604238647",1,0.01,
"60425",9,0.01,
"60425-1018",1,0.01,
"60426",3,0.01,
"60427",1,0.01,
"60428",3,0.01,
"60428-2703",1,0.01,
"60428-3920",1,0.01,
"60428-4619",1,0.01,
"60429",14,0.01,
"60429-0653",1,0.01,
"60429-1318",1,0.01,
"60429-2405",1,0.01,
"60430",28,0.01,
"60430-3205",1,0.01,
"60430-3306",1,0.01,
"604301820",1,0.01,
"60431",15,0.01,
"60431-4906",1,0.01,
"60431-5353",1,0.01,
"604311005",1,0.01,
"604318627",1,0.01,
"60432",3,0.01,
"60432-1246",1,0.01,
"60432-2020",1,0.01,
"60432-2306",1,0.01,
"60433",1,0.01,
"60434",1,0.01,
"60435",22,0.01,
"604357447",1,0.01,
"604358741",1,0.01,
"60436",10,0.01,
"60438",14,0.01,
"60438-1708",1,0.01,
"604381557",1,0.01,
"604382118",1,0.01,
"604383230",1,0.01,
"604386501",1,0.01,
"60439",38,0.01,
"60439-2754",1,0.01,
"60439-4081",1,0.01,
"60439-6134",1,0.01,
"604393502",1,0.01,
"604393941",1,0.01,
"604394492",1,0.01,
"604396135",1,0.01,
"60440",38,0.01,
"60440-1141",1,0.01,
"60440-1210",1,0.01,
"604409006",2,0.01,
"60441",20,0.01,
"60441-4284",1,0.01,
"60441-7604",1,0.01,
"604411507",1,0.01,
"604412996",1,0.01,
"604415246",1,0.01,
"60442",6,0.01,
"604428111",1,0.01,
"60443",23,0.01,
"60443-1205",1,0.01,
"60443-3026",1,0.01,
"604431299",1,0.01,
"604431787",1,0.01,
"60445",10,0.01,
"604451328",1,0.01,
"60446",24,0.01,
"60446-4100",1,0.01,
"604461680",1,0.01,
"604461691",1,0.01,
"604465005",1,0.01,
"60447",6,0.01,
"60447-9371",1,0.01,
"60448",26,0.01,
"60448-1466",1,0.01,
"60448-1707",1,0.01,
"60448-8346",1,0.01,
"604481066",1,0.01,
"604481789",1,0.01,
"60449",5,0.01,
"60450",2,0.01,
"60450-2544",1,0.01,
"60451",31,0.01,
"60451-2677",1,0.01,
"60451-3753",1,0.01,
"60451-9688",1,0.01,
"604513828",1,0.01,
"60452",20,1,
"60452-2739",1,0.01,
"60452-4503",1,0.01,
"604521588",1,0.01,
"604522839",1,0.01,
"60453",60,0.01,
"60453-1335",1,0.01,
"60453-1450",2,0.01,
"60453-2962",1,0.01,
"60453-3059",1,0.01,
"60453-3070",1,0.01,
"60453-3267",1,0.01,
"60453-3913",1,0.01,
"60453-6050",1,0.01,
"604531466",1,0.01,
"604531937",1,0.01,
"604533224",1,0.01,
"604533407",2,0.01,
"604533643",1,0.01,
"604533846",1,0.01,
"60455",12,0.01,
"60455-1397",1,0.01,
"60456",1,0.01,
"604561214",1,0.01,
"60457",16,0.01,
"60457-1911",1,0.01,
"60458",9,0.01,
"60458-1173",1,0.01,
"604581153",1,0.01,
"604581634",1,0.01,
"60459",25,0.01,
"60459-2720",1,0.01,
"604591335",1,0.01,
"604591340",1,0.01,
"604592149",1,0.01,
"604592611",1,0.01,
"60461",4,0.01,
"60461-1330",1,0.01,
"604611421",1,0.01,
"60462",54,0.01,
"60462-2238",1,0.01,
"60462-2846",1,0.01,
"60462-7457",1,0.01,
"60462-7763",1,0.01,
"604621597",1,0.01,
"604622325",1,0.01,
"604622360",1,0.01,
"604622604",1,0.01,
"604626113",1,0.01,
"604626115",1,0.01,
"604626413",1,0.01,
"604627710",1,0.01,
"60463",23,0.01,
"60463-1324",1,0.01,
"604631918",1,0.01,
"604632428",1,0.01,
"60464",14,0.01,
"60464-2508",1,0.01,
"604641753",1,0.01,
"604641943",1,0.01,
"60465",28,0.01,
"604651095",1,0.01,
"604651152",1,0.01,
"604651158",1,0.01,
"604651392",1,0.01,
"604652414",1,0.01,
"60466",11,0.01,
"60466-1822",1,0.01,
"60466-1827",1,0.01,
"60467",53,0.01,
"60467-4505",1,0.01,
"60467-4605",1,0.01,
"60467-5353",1,0.01,
"60467-5378",1,0.01,
"60467-5403",1,0.01,
"60467-7826",1,0.01,
"60467-8509",2,0.01,
"60467-9410",1,0.01,
"604674408",1,0.01,
"604674435",1,0.01,
"604674589",1,0.01,
"604675884",1,0.01,
"604677195",1,0.01,
"604677466",1,0.01,
"604678478",1,0.01,
"60468",2,0.01,
"60468-9428",1,0.01,
"60469",2,0.01,
"60469-1102",1,0.01,
"60471",4,0.01,
"60471-1284",1,0.01,
"604711047",1,0.01,
"60472",3,0.01,
"60473",14,0.01,
"60473-2559",1,0.01,
"60473-3580",1,0.01,
"604732228",1,0.01,
"60475",4,0.01,
"604755952",1,0.01,
"604761121",1,0.01,
"60477",46,0.01,
"60477-3660",1,0.01,
"604772608",1,0.01,
"604776725",1,0.01,
"604776851",1,0.01,
"604777175",1,0.01,
"60478",13,0.01,
"60478-5427",1,0.01,
"604784783",1,0.01,
"60479",1,0.01,
"60480",6,0.01,
"604801057",1,0.01,
"60481",2,0.01,
"60482",5,0.01,
"604821102",1,0.01,
"60484",2,0.01,
"60487",24,0.01,
"60487-4684",1,0.01,
"60487-5600",1,0.01,
"60487-5649",1,0.01,
"60487-5807",1,0.01,
"60487-6102",1,0.01,
"60487-7202",1,0.01,
"60487-7500",1,0.01,
"60487-8440",1,0.01,
"60487-8602",1,0.01,
"604875616",1,0.01,
"604878637",1,0.01,
"60490",20,0.01,
"60490-3321",1,0.01,
"60490-5445",1,0.01,
"604904584",1,0.01,
"604904940",1,0.01,
"604904965",1,0.01,
"604905449",1,0.01,
"60491",27,0.01,
"60491-6930",1,0.01,
"60491-7595",1,0.01,
"60491-7997",1,0.01,
"60491-9228",1,0.01,
"60491-9299",1,0.01,
"604917825",1,0.01,
"604918375",1,0.01,
"604918401",1,0.01,
"604918487",1,0.01,
"60501",11,0.01,
"60502",22,0.01,
"60502-7403",1,0.01,
"60502-9009",1,0.01,
"60502-9045",1,0.01,
"605026549",1,0.01,
"605027000",1,0.01,
"605028608",1,0.01,
"605029654",1,0.01,
"605029666",1,0.01,
"60503",12,0.01,
"605036258",1,0.01,
"60504",35,0.01,
"60504-2008",1,0.01,
"60504-3203",1,0.01,
"60504-5333",1,0.01,
"60504-5390",1,0.01,
"60504-5470",1,0.01,
"60504-6023",1,0.01,
"60504-6070",2,0.01,
"605044028",1,0.01,
"605045265",1,0.01,
"605045360",1,0.01,
"605048414",1,0.01,
"60505",8,0.01,
"60505-1819",1,0.01,
"60505-3734",1,0.01,
"60505-4843",1,0.01,
"60506",22,0.01,
"60506-5349",1,0.01,
"60506-6908",1,0.01,
"605061859",1,0.01,
"605067313",1,0.01,
"60510",13,0.01,
"60510-3555",1,0.01,
"60510-8611",1,0.01,
"605102505",1,0.01,
"605102884",1,0.01,
"60513",31,0.01,
"60513-1761",1,0.01,
"60513-2010",1,0.01,
"605131334",1,0.01,
"605131809",1,0.01,
"605131814",1,0.01,
"605132555",1,0.01,
"60514",7,0.01,
"60514-1312",2,0.01,
"605141228",1,0.01,
"605141305",1,0.01,
"605141309",1,0.01,
"605141708",1,0.01,
"60515",26,0.01,
"60515-1146",1,0.01,
"60515-1947",1,0.01,
"605151301",1,0.01,
"605152142",1,0.01,
"605153455",1,0.01,
"60516",20,0.01,
"60516-5109",1,0.01,
"605161937",1,0.01,
"605163658",1,0.01,
"60517",30,0.01,
"60517-1546",1,0.01,
"60517-1683",1,0.01,
"60517-2732",1,0.01,
"60517-3857",1,0.01,
"60517-8027",1,0.01,
"605172003",1,0.01,
"605172241",1,0.01,
"605173107",1,0.01,
"605173109",1,0.01,
"605173759",1,0.01,
"605174617",1,0.01,
"605175403",1,0.01,
"605175407",1,0.01,
"605177744",1,0.01,
"60521",38,0.01,
"60521-3440",1,0.01,
"60521-3754",1,0.01,
"60521-4727",1,0.01,
"605213008",1,0.01,
"605214454",1,0.01,
"605215147",1,0.01,
"60523",19,0.01,
"60523-1129",1,0.01,
"60523-2353",1,0.01,
"60523-2574",1,0.01,
"605232534",1,0.01,
"605232560",1,0.01,
"605232784",1,0.01,
"60525",35,0.01,
"60525-2213",1,0.01,
"60525-2522",1,0.01,
"60525-5833",1,0.01,
"605253076",1,0.01,
"605253625",1,0.01,
"605257115",2,0.01,
"605257913",1,0.01,
"60526",21,0.01,
"605261547",1,0.01,
"605265304",1,0.01,
"60527",39,0.01,
"60527-0301",1,0.01,
"60527-7706",1,0.01,
"60527-8022",1,0.01,
"605275151",1,0.01,
"605275234",1,0.01,
"605275242",1,0.01,
"605275363",1,0.01,
"605275724",1,0.01,
"605276115",1,0.01,
"60532",24,0.01,
"60532-2329",1,0.01,
"60532-2535",1,0.01,
"60532-2855",1,0.01,
"605323316",1,0.01,
"605324429",1,0.01,
"605328234",1,0.01,
"60534",6,0.01,
"60538",8,0.01,
"60538-3437",1,0.01,
"60540",30,0.01,
"60540-3610",1,0.01,
"60540-5171",1,0.01,
"60540-5619",1,0.01,
"60540-7303",1,0.01,
"60540-9495",1,0.01,
"605404303",1,0.01,
"605406381",1,0.01,
"605407659",1,0.01,
"605408151",1,0.01,
"605408202",1,0.01,
"605409571",1,0.01,
"60541",1,0.01,
"60542",10,0.01,
"60543",17,0.01,
"60543-4080",1,0.01,
"60543-8222",1,0.01,
"605438231",1,0.01,
"605438384",1,0.01,
"605439108",1,0.01,
"60544",9,0.01,
"60544-7331",1,0.01,
"605446077",1,0.01,
"605447951",1,0.01,
"60545",4,0.01,
"60546",30,0.01,
"60546-2240",1,0.01,
"605461140",2,0.01,
"605461155",1,0.01,
"605461526",1,0.01,
"605461527",1,0.01,
"605461805",1,0.01,
"605462035",1,0.01,
"60548",2,0.01,
"605482562",1,0.01,
"60554",4,0.01,
"60555",8,0.01,
"60556",1,0.01,
"60558",28,0.01,
"60558-1616",1,0.01,
"60558-2110",1,0.01,
"605581834",1,0.01,
"60559",19,0.01,
"605591208",1,0.01,
"605592076",1,0.01,
"605592627",1,0.01,
"605592893",1,0.01,
"60560",5,0.01,
"605601041",1,0.01,
"60561",29,0.01,
"60561-3547",1,0.01,
"60561-3663",2,0.01,
"60561-4309",1,0.01,
"60561-4506",1,0.01,
"60561-4550",1,0.01,
"60561-5393",1,0.01,
"60561-6418",1,0.01,
"605614816",1,0.01,
"605615187",1,0.01,
"605615918",1,0.01,
"605618455",1,0.01,
"60563",31,1,
"60563-2951",1,0.01,
"60563-9035",1,0.01,
"605631204",1,0.01,
"605631391",1,0.01,
"605632056",1,0.01,
"605632586",1,0.01,
"605632722",1,0.01,
"605638500",1,0.01,
"60564",70,0.01,
"60564-3112",1,0.01,
"60564-3188",1,0.01,
"60564-4166",1,0.01,
"60564-4324",1,0.01,
"60564-4994",1,0.01,
"60564-5142",1,0.01,
"605641119",1,0.01,
"605643109",1,0.01,
"605644100",1,0.01,
"605644101",1,0.01,
"605644360",1,0.01,
"605644423",1,0.01,
"605644699",1,0.01,
"605644779",1,0.01,
"605645102",1,0.01,
"605645161",1,0.01,
"605645665",1,0.01,
"605645731",1,0.01,
"605646116",1,0.01,
"605646123",1,0.01,
"605646141",1,0.01,
"605648205",1,0.01,
"605648306",1,0.01,
"605648460",1,0.01,
"605649782",1,0.01,
"60565",60,0.01,
"60565-1102",1,0.01,
"60565-1357",1,0.01,
"60565-2349",1,0.01,
"60565-2612",1,0.01,
"60565-2830",1,0.01,
"60565-4109",1,0.01,
"60565-4316",1,0.01,
"60565-5238",1,0.01,
"60565-5320",1,0.01,
"605651101",1,0.01,
"605651102",1,0.01,
"605651108",1,0.01,
"605651240",1,0.01,
"605651475",1,0.01,
"605652013",1,0.01,
"605652297",1,0.01,
"605652455",1,0.01,
"605653454",1,0.01,
"605653479",1,0.01,
"605653568",1,0.01,
"605655222",1,0.01,
"605656726",1,0.01,
"605656808",1,0.01,
"605659307",1,0.01,
"60567",1,0.01,
"60585",16,0.01,
"60585-4508",1,0.01,
"605851596",1,0.01,
"605856146",1,0.01,
"60586",27,0.01,
"60586-2179",1,0.01,
"60586-4025",1,0.01,
"60586-5319",1,0.01,
"60586-8524",1,0.01,
"605862301",1,0.01,
"605868142",1,0.01,
"60601",24,0.01,
"60601-5282",1,0.01,
"60601-5974",3,0.01,
"60601-5988",1,0.01,
"60601-7501",1,0.01,
"60601-7515",1,0.01,
"60601-7892",1,0.01,
"606017535",1,0.01,
"60602",3,0.01,
"60602-4881",1,0.01,
"60603",2,0.01,
"60604",4,0.01,
"60605",48,0.01,
"60605-1517",1,0.01,
"606052141",1,0.01,
"60606",8,0.01,
"60607",33,0.01,
"60607-2906",1,0.01,
"60607-4805",1,0.01,
"60607-4866",1,0.01,
"60607-5304",1,0.01,
"60608",107,0.01,
"60608-1673",1,0.01,
"60608-2755",1,0.01,
"60608-2907",1,0.01,
"60608-3363",1,0.01,
"60608-5539",1,0.01,
"60608-5705",2,0.01,
"60608-6216",1,0.01,
"60608-6322",1,0.01,
"60608-6826",1,0.01,
"606083336",1,0.01,
"606083390",1,0.01,
"606083405",1,0.01,
"606084029",1,0.01,
"606084114",1,0.01,
"606084205",1,0.01,
"606085505",1,0.01,
"606085606",1,0.01,
"606085807",1,0.01,
"606085921",1,0.01,
"606086012",1,0.01,
"606086344",1,0.01,
"606086413",1,0.01,
"606086702",1,0.01,
"606086749",1,0.01,
"606086840",1,0.01,
"60609",45,0.01,
"60609-1230",1,0.01,
"60609-1235",1,0.01,
"60609-2042",1,0.01,
"60609-2812",1,0.01,
"60609-3211",1,0.01,
"60609-3503",1,0.01,
"60609-4166",1,0.01,
"60609-4171",1,0.01,
"60609-4252",1,0.01,
"60609-6149",1,0.01,
"606091746",1,0.01,
"606091908",1,0.01,
"606092736",1,0.01,
"606093128",1,0.01,
"606093265",1,0.01,
"606093885",1,0.01,
"606094718",1,0.01,
"606094728",1,0.01,
"606094947",1,0.01,
"60610",67,0.01,
"60610-5502",1,0.01,
"60610-6688",1,0.01,
"606101724",1,0.01,
"60611",51,0.01,
"60611-1103",1,0.01,
"60611-3546",1,0.01,
"60611-4695",1,0.01,
"60611-7134",2,0.01,
"60612",28,0.01,
"60612-1206",1,0.01,
"60612-1247",1,0.01,
"60612-1407",1,0.01,
"60612-1886",1,0.01,
"60612-3520",1,0.01,
"60612-4156",1,0.01,
"606121110",1,0.01,
"606124239",1,0.01,
"606124295",1,0.01,
"60613",128,1,
"60613-1019",1,0.01,
"60613-1203",1,0.01,
"60613-1328",1,0.01,
"60613-1757",1,0.01,
"60613-1959",1,0.01,
"60613-3469",1,0.01,
"60613-3805",1,0.01,
"60613-4141",1,0.01,
"60613-4302",1,0.01,
"60613-4726",1,0.01,
"60613-5772",1,0.01,
"60613-6620",1,0.01,
"606131492",1,0.01,
"60614",204,0.01,
"60614-1315",1,0.01,
"60614-2024",1,0.01,
"60614-3327",1,0.01,
"60614-3356",1,0.01,
"60614-3399",1,0.01,
"60614-3478",1,0.01,
"60614-4175",1,0.01,
"60614-4663",1,0.01,
"60614-5216",1,0.01,
"60614-5621",1,0.01,
"60614-5743",1,0.01,
"60614-5904",1,0.01,
"60614-6509",1,0.01,
"606142028",1,0.01,
"606142379",1,0.01,
"60615",70,0.01,
"60615-2044",1,0.01,
"60615-3114",1,0.01,
"60615-3133",1,0.01,
"60615-3253",1,0.01,
"606155208",1,0.01,
"60616",88,0.01,
"60616-1968",1,0.01,
"60616-2216",1,0.01,
"60616-2752",1,0.01,
"60616-3008",1,0.01,
"60616-3128",1,0.01,
"60616-4053",1,0.01,
"60616-4271",1,0.01,
"60616-4809",1,0.01,
"606161153",1,0.01,
"606162208",1,0.01,
"606162213",1,0.01,
"606162489",1,0.01,
"606162548",1,0.01,
"606162603",1,0.01,
"606162604",1,0.01,
"606163620",1,0.01,
"60617",119,0.01,
"60617-1159",1,0.01,
"60617-1436",1,0.01,
"60617-2032",1,0.01,
"60617-3465",1,0.01,
"60617-3850",1,0.01,
"60617-5029",1,0.01,
"60617-5256",1,0.01,
"60617-5533",1,0.01,
"60617-6036",1,0.01,
"60617-6223",1,0.01,
"60617-6703",1,0.01,
"60617-7142",1,0.01,
"60617-7358",1,0.01,
"606172503",1,0.01,
"606174902",1,0.01,
"606175126",1,0.01,
"606176007",1,0.01,
"606176252",1,0.01,
"606176323",1,0.01,
"606176324",1,0.01,
"606176526",1,0.01,
"606176542",1,0.01,
"606176602",1,0.01,
"606176811",1,0.01,
"606176836",1,0.01,
"606177057",1,0.01,
"60618",121,0.01,
"60618-1213",1,0.01,
"60618-1217",1,0.01,
"60618-1602",1,0.01,
"60618-2813",1,0.01,
"60618-3109",1,0.01,
"60618-3528",1,0.01,
"60618-3620",1,0.01,
"60618-5207",1,0.01,
"60618-5718",1,0.01,
"60618-6509",1,0.01,
"60618-7219",1,0.01,
"60618-7319",1,0.01,
"606181003",1,0.01,
"606181104",1,0.01,
"606182011",1,0.01,
"606183316",1,0.01,
"606184031",1,0.01,
"606184418",1,0.01,
"606185112",1,0.01,
"606185714",1,0.01,
"606185718",1,0.01,
"606186749",1,0.01,
"606187026",1,0.01,
"606187319",1,0.01,
"606187512",1,0.01,
"606187768",1,0.01,
"606188211",1,0.01,
"60619",62,0.01,
"60619-1217",1,0.01,
"60619-1320",1,0.01,
"60619-3005",1,0.01,
"60619-3814",1,0.01,
"60619-4204",1,0.01,
"60619-6629",1,0.01,
"60619-7718",1,0.01,
"60619-7730",1,0.01,
"60620",51,0.01,
"60620-2607",1,0.01,
"60620-3512",1,0.01,
"60620-3620",1,0.01,
"60620-3660",1,0.01,
"60620-3923",1,0.01,
"60620-4253",1,0.01,
"60620-5033",1,0.01,
"606204923",1,0.01,
"606205514",1,0.01,
"60621",17,0.01,
"60621-1633",1,0.01,
"60621-2319",1,0.01,
"60622",112,0.01,
"60622-1700",1,0.01,
"60622-1933",2,0.01,
"60622-2854",1,0.01,
"60622-2932",1,0.01,
"60622-3110",1,0.01,
"60622-3343",1,0.01,
"60622-3451",1,0.01,
"60622-4458",1,0.01,
"60622-6230",1,0.01,
"606224400",1,0.01,
"606224517",1,0.01,
"606224613",1,0.01,
"60623",95,0.01,
"60623-2216",1,0.01,
"60623-2642",1,0.01,
"60623-3135",1,0.01,
"60623-3510",1,0.01,
"60623-3943",1,0.01,
"60623-4454",1,0.01,
"60623-4618",1,0.01,
"60623-4638",1,0.01,
"606233413",1,0.01,
"606233423",1,0.01,
"606233426",1,0.01,
"606233459",1,0.01,
"606233540",1,0.01,
"606233714",1,0.01,
"606234324",1,0.01,
"606234612",1,0.01,
"606234616",1,0.01,
"606234752",1,0.01,
"606234755",1,0.01,
"606234820",1,0.01,
"60624",14,0.01,
"60624-1247",1,0.01,
"60624-3502",1,0.01,
"60624-3720",1,0.01,
"606242903",1,0.01,
"60625",157,0.01,
"60625-2019",1,0.01,
"60625-2586",1,0.01,
"60625-2703",1,0.01,
"60625-4653",1,0.01,
"60625-4923",1,0.01,
"60625-5205",1,0.01,
"60625-5914",1,0.01,
"60625-6004",1,0.01,
"60625-8360",1,0.01,
"606251010",1,0.01,
"606251809",1,0.01,
"606252306",1,0.01,
"606254216",2,0.01,
"606254220",1,0.01,
"606254307",1,0.01,
"606254363",1,0.01,
"606254443",1,0.01,
"606254581",1,0.01,
"606254603",1,0.01,
"606255546",1,0.01,
"606255619",1,0.01,
"606255904",1,0.01,
"606255943",1,0.01,
"606256039",1,0.01,
"606256486",1,0.01,
"606256705",1,0.01,
"60626",87,0.01,
"60626-2323",1,0.01,
"60626-3404",1,0.01,
"60626-4273",1,0.01,
"60626-4535",1,0.01,
"60626-4536",1,0.01,
"606262710",1,0.01,
"606263336",1,0.01,
"60627",1,0.01,
"60628",41,0.01,
"60628-1043",1,0.01,
"60628-2102",1,0.01,
"60628-3401",1,0.01,
"60628-3622",1,0.01,
"60628-4741",1,0.01,
"60628-4902",1,0.01,
"60628-5529",1,0.01,
"60628-6042",1,0.01,
"60628-7237",1,0.01,
"606282030",1,0.01,
"606282820",1,0.01,
"60629",182,0.01,
"60629-1019",1,0.01,
"60629-1213",1,0.01,
"60629-1503",1,0.01,
"60629-2209",1,0.01,
"60629-2623",1,0.01,
"60629-3314",1,0.01,
"60629-3520",1,0.01,
"60629-4144",1,0.01,
"60629-4343",1,0.01,
"60629-4810",1,0.01,
"60629-4824",1,0.01,
"60629-4852",1,0.01,
"60629-4901",1,0.01,
"60629-5231",1,0.01,
"60629-5232",1,0.01,
"60629-5235",1,0.01,
"60629-5236",1,0.01,
"60629-5434",1,0.01,
"60629-5554",1,0.01,
"60629-5635",1,0.01,
"606291010",1,0.01,
"606291044",1,0.01,
"606291521",1,0.01,
"606292135",1,0.01,
"606292207",1,0.01,
"606292401",1,0.01,
"606292416",1,0.01,
"606292421",1,0.01,
"606292610",1,0.01,
"606293017",1,0.01,
"606293316",1,0.01,
"606293723",1,0.01,
"606293818",2,0.01,
"606294144",1,0.01,
"606294340",1,0.01,
"606294823",1,0.01,
"606295120",1,0.01,
"606295211",1,0.01,
"606295214",1,0.01,
"606295640",1,0.01,
"6063",1,0.01,
"60630",77,0.01,
"60630-1792",1,0.01,
"60630-2226",1,0.01,
"60630-2722",1,0.01,
"60630-4131",1,0.01,
"60630-4238",1,0.01,
"60630-4605",1,0.01,
"606301128",1,0.01,
"606301836",1,0.01,
"606302151",1,0.01,
"606302607",1,0.01,
"606302733",1,0.01,
"606302911",1,0.01,
"606302942",1,0.01,
"606303144",1,0.01,
"606303365",1,0.01,
"606304256",1,0.01,
"60631",39,0.01,
"60631-1927",1,0.01,
"60631-4438",1,0.01,
"606311065",1,0.01,
"606311108",1,0.01,
"606311116",1,0.01,
"606311631",1,0.01,
"606314236",1,0.01,
"606314433",1,0.01,
"60632",158,0.01,
"60632-1103",1,0.01,
"60632-1171",1,0.01,
"60632-1508",1,0.01,
"60632-1713",1,0.01,
"60632-2113",1,0.01,
"60632-2213",1,0.01,
"60632-2234",1,0.01,
"60632-2426",1,0.01,
"60632-2527",1,0.01,
"60632-2607",1,0.01,
"60632-2721",1,0.01,
"60632-2815",1,0.01,
"60632-2920",1,0.01,
"60632-3528",1,0.01,
"60632-3532",1,0.01,
"60632-3547",1,0.01,
"60632-4049",1,0.01,
"60632-4618",1,0.01,
"606321007",1,0.01,
"606321119",1,0.01,
"606321218",1,0.01,
"606321340",1,0.01,
"606321505",1,0.01,
"606321526",1,0.01,
"606321606",1,0.01,
"606321709",1,0.01,
"606321803",1,0.01,
"606322111",1,0.01,
"606322538",1,0.01,
"606322921",1,0.01,
"606322923",1,0.01,
"606322947",1,0.01,
"606323004",1,0.01,
"606323223",1,0.01,
"606323252",1,0.01,
"606323314",1,0.01,
"606324109",1,0.01,
"606324604",1,0.01,
"606324606",1,0.01,
"606324815",1,0.01,
"60633",12,0.01,
"60633-1064",1,0.01,
"60633-2001",1,0.01,
"606331704",1,0.01,
"60634",117,0.01,
"60634-1510",1,0.01,
"60634-1832",1,0.01,
"60634-2420",1,0.01,
"60634-2553",1,0.01,
"60634-2631",1,0.01,
"60634-3412",1,0.01,
"60634-4005",1,0.01,
"60634-4011",1,0.01,
"60634-4120",1,0.01,
"60634-4510",1,0.01,
"60634-4529",1,0.01,
"60634-4943",1,0.01,
"606341585",1,0.01,
"606341706",1,0.01,
"606341805",1,0.01,
"606342031",1,0.01,
"606342376",1,0.01,
"606342666",1,0.01,
"606342918",1,0.01,
"606342939",1,0.01,
"606343410",1,0.01,
"606343617",1,0.01,
"606343720",1,0.01,
"606343751",1,0.01,
"606343905",1,0.01,
"606344636",1,0.01,
"606344952",1,0.01,
"606345007",1,0.01,
"606345058",1,0.01,
"60635",3,0.01,
"60636",18,0.01,
"60636-1400",1,0.01,
"60636-2406",1,0.01,
"60636-2827",1,0.01,
"60636-3016",1,0.01,
"60636-3731",1,0.01,
"606361213",1,0.01,
"606363847",1,0.01,
"60637",33,1,
"60637-1118",1,0.01,
"60637-4436",1,0.01,
"60637-4514",1,0.01,
"606371639",1,0.01,
"606373603",1,0.01,
"60638",70,0.01,
"60638-1513",1,0.01,
"60638-1625",1,0.01,
"60638-2143",1,0.01,
"60638-2303",1,0.01,
"60638-3024",1,0.01,
"60638-3111",1,0.01,
"60638-3545",1,0.01,
"60638-5546",1,0.01,
"60638-5742",1,0.01,
"606381128",1,0.01,
"606381344",1,0.01,
"606382207",1,0.01,
"606382416",1,0.01,
"606383512",1,0.01,
"606384003",1,0.01,
"606384509",1,0.01,
"606384638",1,0.01,
"606385931",1,0.01,
"60639",126,0.01,
"60639-1251",1,0.01,
"60639-1918",1,0.01,
"60639-2630",1,0.01,
"60639-2831",1,0.01,
"60639-4838",1,0.01,
"606391003",1,0.01,
"606391052",1,0.01,
"606391092",1,0.01,
"606391524",1,0.01,
"606391602",1,0.01,
"606391903",1,0.01,
"606392024",1,0.01,
"606392629",1,0.01,
"606393104",1,0.01,
"606393438",1,0.01,
"606395205",1,0.01,
"60640",118,0.01,
"60640-2743",1,0.01,
"60640-2813",1,0.01,
"60640-4082",1,0.01,
"60640-5447",1,0.01,
"60640-5620",1,0.01,
"60640-6148",1,0.01,
"60640-7415",1,0.01,
"60640-7891",1,0.01,
"606402007",1,0.01,
"606402051",1,0.01,
"606402220",1,0.01,
"606402903",1,0.01,
"606402909",1,0.01,
"606403391",1,0.01,
"606403779",1,0.01,
"606404701",1,0.01,
"606406515",1,0.01,
"606407540",1,0.01,
"60641",99,0.01,
"60641-1348",2,0.01,
"60641-1479",1,0.01,
"60641-2224",1,0.01,
"60641-2230",1,0.01,
"60641-2623",1,0.01,
"60641-2916",1,0.01,
"60641-3251",1,0.01,
"60641-3725",1,0.01,
"60641-5023",1,0.01,
"60641-5145",1,0.01,
"606411315",1,0.01,
"606411421",1,0.01,
"606411439",1,0.01,
"606412608",1,0.01,
"606412945",1,0.01,
"606413242",1,0.01,
"606413248",1,0.01,
"606413450",1,0.01,
"606414120",1,0.01,
"606414251",1,0.01,
"606414909",1,0.01,
"606414939",1,0.01,
"606415112",1,0.01,
"606415135",1,0.01,
"606415227",1,0.01,
"606415229",1,0.01,
"606415343",1,0.01,
"60642",34,0.01,
"60642-5801",1,0.01,
"60642-5864",1,0.01,
"60642-6467",1,0.01,
"60642-8070",1,0.01,
"60642-8161",1,0.01,
"606426147",1,0.01,
"60643",48,0.01,
"60643-3314",1,0.01,
"60643-4128",1,0.01,
"60643-4515",1,0.01,
"606431808",1,0.01,
"606432161",1,0.01,
"606432167",1,0.01,
"606432819",1,0.01,
"606433102",1,0.01,
"60644",20,0.01,
"60644-1015",1,0.01,
"60644-1804",1,0.01,
"60644-3942",1,0.01,
"60644-4236",1,0.01,
"60644-4802",1,0.01,
"606442222",1,0.01,
"60645",73,0.01,
"60645-1849",1,0.01,
"60645-2386",1,0.01,
"60645-2491",1,0.01,
"60645-3013",1,0.01,
"606451501",1,0.01,
"606454105",1,0.01,
"606454296",1,0.01,
"606454528",1,0.01,
"606454670",1,0.01,
"606454714",1,0.01,
"606454809",1,0.01,
"606455017",1,0.01,
"606455103",1,0.01,
"606455681",1,0.01,
"60646",34,0.01,
"60646-1264",1,0.01,
"60646-2703",1,0.01,
"60646-4909",1,0.01,
"60646-5305",1,0.01,
"60646-5516",1,0.01,
"60646-6205",1,0.01,
"60646-6539",1,0.01,
"606461330",1,0.01,
"606461413",1,0.01,
"606461512",1,0.01,
"606463630",1,0.01,
"606464918",1,0.01,
"606465014",2,0.01,
"606465024",1,0.01,
"606465215",1,0.01,
"606465223",1,0.01,
"606465225",2,0.01,
"606465347",1,0.01,
"606465807",1,0.01,
"606466159",1,0.01,
"606466420",1,0.01,
"60647",150,0.01,
"60647-1408",1,0.01,
"60647-1606",1,0.01,
"60647-2330",1,0.01,
"60647-2405",1,0.01,
"60647-3538",1,0.01,
"60647-3694",1,0.01,
"60647-3732",1,0.01,
"60647-3754",1,0.01,
"60647-4019",1,0.01,
"60647-4706",1,0.01,
"60647-4909",1,0.01,
"606471050",1,0.01,
"606471602",1,0.01,
"606473636",1,0.01,
"606474919",1,0.01,
"606475309",1,0.01,
"60648",1,0.01,
"60649",54,0.01,
"60649-1806",1,0.01,
"60649-2208",1,0.01,
"60649-3317",1,0.01,
"60649-3828",1,0.01,
"60649-3905",1,0.01,
"60649-4120",1,0.01,
"606493825",1,0.01,
"60651",40,0.01,
"60651-1971",1,0.01,
"60651-2501",1,0.01,
"60651-3919",1,0.01,
"60651-3950",1,0.01,
"606511929",1,0.01,
"60652",54,0.01,
"60652-1222",1,0.01,
"60652-1304",1,0.01,
"60652-1336",1,0.01,
"60652-2213",1,0.01,
"60652-2318",1,0.01,
"60652-2858",1,0.01,
"60652-2919",1,0.01,
"60652-3901",1,0.01,
"606521318",1,0.01,
"606521816",1,0.01,
"606522437",1,0.01,
"606523317",1,0.01,
"606523807",1,0.01,
"606523812",1,0.01,
"60653",29,0.01,
"60653-3249",1,0.01,
"60653-3429",1,0.01,
"60653-4366",1,0.01,
"606532012",1,0.01,
"606534085",1,0.01,
"60654",20,0.01,
"60654-7235",1,0.01,
"60655",21,0.01,
"60655-3215",1,0.01,
"60655-3311",1,0.01,
"606551024",1,0.01,
"606551138",1,0.01,
"606551504",1,0.01,
"606551515",1,0.01,
"606552641",1,0.01,
"606553233",1,0.01,
"606553740",1,0.01,
"606553905",1,0.01,
"60656",43,0.01,
"60656-1110",1,0.01,
"60656-1724",1,0.01,
"60656-2720",1,0.01,
"606562016",1,0.01,
"606562331",1,0.01,
"606562395",1,0.01,
"606563502",1,0.01,
"606564251",1,0.01,
"60657",180,0.01,
"60657-1618",1,0.01,
"60657-1841",1,0.01,
"60657-2014",1,0.01,
"60657-2240",1,0.01,
"60657-4227",1,0.01,
"60657-4366",1,0.01,
"60657-4600",1,0.01,
"60657-4949",1,0.01,
"60657-5526",1,0.01,
"60657-5552",1,0.01,
"606571412",1,0.01,
"606573412",1,0.01,
"606574103",1,0.01,
"60659",85,0.01,
"60659-2165",1,0.01,
"60659-2509",1,0.01,
"60659-2836",2,0.01,
"60659-3610",1,0.01,
"60659-4380",1,0.01,
"60659-4418",1,0.01,
"606591685",1,0.01,
"606591706",1,0.01,
"606591906",1,0.01,
"606592109",1,0.01,
"606592413",1,0.01,
"606592707",1,0.01,
"606592945",1,0.01,
"606593006",1,0.01,
"606593411",1,0.01,
"606594264",1,0.01,
"606594336",1,0.01,
"606594572",1,0.01,
"606594906",1,0.01,
"60660",75,1,
"60660-2309",1,0.01,
"60660-2603",1,0.01,
"60660-4839",2,0.01,
"60660-5119",1,0.01,
"60660-5514",1,0.01,
"606602211",1,0.01,
"606602309",1,0.01,
"606602322",1,0.01,
"606602982",1,0.01,
"60661",10,0.01,
"60661-2402",1,0.01,
"606806626",1,0.01,
"60690",4,0.01,
"60706",38,0.01,
"60706-1142",1,0.01,
"60706-3887",1,0.01,
"607061157",1,0.01,
"607063406",1,0.01,
"607064428",1,0.01,
"607064741",1,0.01,
"60707",69,0.01,
"60707-1707",1,0.01,
"60707-1719",1,0.01,
"60707-1752",1,0.01,
"60707-1844",1,0.01,
"60707-2110",1,0.01,
"60707-3231",1,0.01,
"60707-3236",1,0.01,
"60707-4144",1,0.01,
"607071140",1,0.01,
"607071209",1,0.01,
"607071344",1,0.01,
"607071731",1,0.01,
"607071744",1,0.01,
"607072216",1,0.01,
"607072401",1,0.01,
"607073636",1,0.01,
"607073929",1,0.01,
"607074221",1,0.01,
"607074317",1,0.01,
"607074409",1,0.01,
"60712",53,0.01,
"60712-3015",1,0.01,
"60712-3831",1,0.01,
"60712-4723",1,0.01,
"607121006",1,0.01,
"607121016",1,0.01,
"607122525",1,0.01,
"607123452",1,0.01,
"607123501",1,0.01,
"60714",58,0.01,
"60714-1317",1,0.01,
"60714-3218",1,0.01,
"607143311",1,0.01,
"607145751",1,0.01,
"6073",1,0.01,
"60803",11,0.01,
"60803-2402",1,0.01,
"608035853",1,0.01,
"60804",49,0.01,
"60804-1014",1,0.01,
"60804-1056",1,0.01,
"60804-1904",1,0.01,
"60804-3222",1,0.01,
"60804-3418",1,0.01,
"60804-3948",1,0.01,
"60804-4019",1,0.01,
"60804-4311",1,0.01,
"608041054",1,0.01,
"608041710",1,0.01,
"608041838",1,0.01,
"608042140",1,0.01,
"608042751",1,0.01,
"608042808",1,0.01,
"608043225",1,0.01,
"608043327",1,0.01,
"608043551",1,0.01,
"608043628",1,0.01,
"608043750",1,0.01,
"60805",12,0.01,
"60805-2642",1,0.01,
"60805-3333",1,0.01,
"608052226",1,0.01,
"608053229",1,0.01,
"608053762",1,0.01,
"60827",4,0.01,
"60827-6414",1,0.01,
"60901",4,0.01,
"609018371",1,0.01,
"60914",11,0.01,
"609144904",1,0.01,
"609146401",1,0.01,
"609149201",1,0.01,
"60915",3,0.01,
"609277093",1,0.01,
"60930",1,0.01,
"60942",2,0.01,
"60950",1,0.01,
"610",0.01,1,
"6100",0.01,1,
"61004",0.01,1,
"610064",0.01,2,
"61008",8,0.01,
"61008-7009",1,0.01,
"610081927",1,0.01,
"610087182",1,0.01,
"610088578",1,0.01,
"61010",2,0.01,
"61011",1,0.01,
"61015",1,0.01,
"61016",2,0.01,
"61021",2,0.01,
"61032",2,0.01,
"61036",1,0.01,
"61037",1,0.01,
"61054",1,0.01,
"61061",2,0.01,
"61064",1,0.01,
"61065",2,0.01,
"6107",1,0.01,
"61070",1,0.01,
"61071",1,0.01,
"61072",2,1,
"61073",8,0.01,
"61073-9080",1,0.01,
"610737585",1,0.01,
"6108",1,0.01,
"61080",1,0.01,
"61081",1,0.01,
"610819575",1,0.01,
"61101",1,0.01,
"611012821",1,0.01,
"611016457",1,0.01,
"61102",1,0.01,
"61103",2,0.01,
"61103-4544",1,0.01,
"611034339",1,0.01,
"611036343",1,0.01,
"61104",1,0.01,
"61107",9,0.01,
"61107-2713",1,0.01,
"611071025",1,0.01,
"611073010",1,0.01,
"61108",2,0.01,
"61109-2168",1,0.01,
"61109-2482",1,0.01,
"6111",1,0.01,
"61111",4,0.01,
"61111-3532",1,0.01,
"611118638",1,0.01,
"61113",0.01,1,
"61114",5,0.01,
"61114-5516",1,0.01,
"611146156",1,0.01,
"611147408",1,0.01,
"61115",4,0.01,
"61201",2,0.01,
"61232-9528",1,0.01,
"61234",2,0.01,
"61242",2,0.01,
"61244",2,0.01,
"61250",1,0.01,
"61254",1,0.01,
"61259",1,0.01,
"612630104",1,0.01,
"61264",1,0.01,
"61265",4,0.01,
"61320",2,0.01,
"613309530",1,0.01,
"61342",1,0.01,
"61342-9304",1,0.01,
"61345-9281",1,0.01,
"61348",1,0.01,
"61350",1,0.01,
"613504204",1,0.01,
"61354",2,0.01,
"61354-1582",1,0.01,
"61356",4,0.01,
"613569180",1,0.01,
"613609342",1,0.01,
"61367",1,0.01,
"61368",1,0.01,
"61376",1,0.01,
"61401",1,0.01,
"614011451",1,0.01,
"614011820",1,0.01,
"61422",1,0.01,
"61440",0.01,1,
"61443-3565",1,0.01,
"61455",2,0.01,
"614553029",1,0.01,
"61462",3,0.01,
"61517",1,0.01,
"61523",2,0.01,
"61525",3,0.01,
"615252715",1,0.01,
"61530",1,0.01,
"61535",1,0.01,
"61536-9626",1,0.01,
"61537",1,0.01,
"61548",1,0.01,
"61548-0144",1,0.01,
"61550",1,0.01,
"61552",1,0.01,
"61559",1,0.01,
"61561",2,0.01,
"61561-7812",1,0.01,
"61568",1,0.01,
"61571",2,0.01,
"61606",3,0.01,
"616112211",2,0.01,
"61614",3,0.01,
"616141099",1,0.01,
"616144112",1,0.01,
"61615",5,0.01,
"61615-2361",1,0.01,
"61701",8,0.01,
"61701-4230",1,0.01,
"61704",7,0.01,
"61705",1,0.01,
"61737",1,0.01,
"61761",9,0.01,
"61764",2,0.01,
"61764-1448",1,0.01,
"61801",4,0.01,
"61802",4,0.01,
"61820",17,0.01,
"61820-6526",1,0.01,
"618204616",1,0.01,
"618207635",1,0.01,
"61821",4,0.01,
"618214520",1,0.01,
"61822",6,0.01,
"61822-7391",1,0.01,
"61832",3,0.01,
"61853",1,0.01,
"61866",1,0.01,
"618739060",1,0.01,
"61880",1,0.01,
"61920",3,0.01,
"62002",1,0.01,
"62025",6,0.01,
"62025-7343",1,0.01,
"62034",2,0.01,
"62040",2,0.01,
"622031624",1,0.01,
"62207",1,0.01,
"62208",1,0.01,
"622083928",1,0.01,
"62220",1,0.01,
"62221",2,0.01,
"62221-7040",1,0.01,
"62223",1,0.01,
"62226",2,0.01,
"62226-6051",1,0.01,
"62234-4869",1,0.01,
"62249",1,0.01,
"62249-2627",1,0.01,
"62269",5,0.01,
"622943614",1,0.01,
"62301",1,0.01,
"623055935",1,0.01,
"62321",1,0.01,
"62411",1,0.01,
"62427",1,0.01,
"62454",1,0.01,
"62471",1,0.01,
"62521",1,0.01,
"62522",2,0.01,
"62526",2,0.01,
"62535",1,0.01,
"625481212",1,0.01,
"62549",1,0.01,
"62650",2,0.01,
"62656",1,0.01,
"62675",1,0.01,
"62702",2,0.01,
"62703",2,0.01,
"62704",4,0.01,
"627046476",1,0.01,
"627079340",1,0.01,
"62711",3,0.01,
"62712",3,0.01,
"62801",1,0.01,
"62801-2614",1,0.01,
"62808",1,0.01,
"62812",1,0.01,
"62821",1,0.01,
"628642155",1,0.01,
"62880",1,0.01,
"62901",5,0.01,
"62901-3266",1,0.01,
"62901-4102",1,0.01,
"62906",1,0.01,
"62912",1,0.01,
"62946-2313",1,0.01,
"62958",1,0.01,
"62959",1,0.01,
"62959-4270",1,0.01,
"62983",1,0.01,
"63005",2,0.01,
"63005-4966",1,0.01,
"630054468",1,0.01,
"630054484",1,0.01,
"630054661",1,0.01,
"630056336",1,0.01,
"63011",2,0.01,
"63011-3454",1,0.01,
"63017",4,0.01,
"63017-2489",1,0.01,
"63017-3047",1,0.01,
"630171912",1,0.01,
"630172477",1,0.01,
"63021",5,0.01,
"63021-6820",1,0.01,
"630213819",1,0.01,
"630215865",1,0.01,
"63025",2,0.01,
"630263962",1,0.01,
"63028",1,0.01,
"63033",2,0.01,
"630334346",1,0.01,
"63034-2051",1,0.01,
"630342161",1,0.01,
"630342648",1,0.01,
"63040",3,0.01,
"630401659",1,0.01,
"63042",1,0.01,
"630443513",1,0.01,
"63052-1536",1,0.01,
"63069",2,0.01,
"63089",1,0.01,
"63104",2,0.01,
"63104-1404",1,0.01,
"63104-2541",1,0.01,
"63105",3,0.01,
"63105-2516",1,0.01,
"63106",1,0.01,
"63108",3,0.01,
"631082302",1,0.01,
"63109",1,0.01,
"631102703",1,0.01,
"63111-1131",1,0.01,
"63112-1001",1,0.01,
"63114",1,0.01,
"63117",2,0.01,
"631172139",1,0.01,
"63118",2,0.01,
"63118-1126",1,0.01,
"63119",9,0.01,
"63119-3022",1,0.01,
"63119-5243",1,0.01,
"631192259",1,0.01,
"631192837",1,0.01,
"631193242",1,0.01,
"631194833",1,0.01,
"63122",6,0.01,
"63122-4102",1,0.01,
"63122-6335",1,0.01,
"631223344",1,0.01,
"631223433",1,0.01,
"631224828",1,0.01,
"63123-1146",1,0.01,
"631231537",1,0.01,
"631232831",1,0.01,
"63124",1,0.01,
"631242042",1,0.01,
"63126",2,0.01,
"63126-1439",1,0.01,
"63126-3504",1,0.01,
"63127",1,0.01,
"63128",3,0.01,
"63129",2,0.01,
"63130",4,0.01,
"63130-3042",1,0.01,
"631303824",1,0.01,
"63131",4,0.01,
"631311137",1,0.01,
"631312151",1,0.01,
"631312304",1,0.01,
"631313627",1,0.01,
"631314114",1,0.01,
"631324474",1,0.01,
"63135-3158",1,0.01,
"63135-3511",1,0.01,
"631364508",1,0.01,
"63137",1,0.01,
"631372310",1,0.01,
"63141",4,0.01,
"63141-6379",1,0.01,
"63141-7365",1,0.01,
"63143",1,0.01,
"63144-1010",1,0.01,
"63144-1643",1,0.01,
"63144-2533",1,0.01,
"63146",2,0.01,
"631465020",1,0.01,
"63147",1,0.01,
"63147-1215",1,0.01,
"63301",2,0.01,
"633011628",1,0.01,
"63303",1,0.01,
"633036468",1,0.01,
"63304",1,0.01,
"633612101",1,0.01,
"63366-5587",1,0.01,
"633671014",1,0.01,
"63368",1,0.01,
"633687185",1,0.01,
"633688596",1,0.01,
"63376",3,0.01,
"63376-7183",1,0.01,
"63383-4829",1,0.01,
"63388",1,0.01,
"63501",1,0.01,
"63701",1,0.01,
"6371",1,0.01,
"64014",1,0.01,
"64015",1,0.01,
"640608792",1,0.01,
"64068-1804",1,0.01,
"640681287",1,0.01,
"64080",1,0.01,
"64081",1,0.01,
"640812490",1,0.01,
"64086",1,0.01,
"64086-6719",1,0.01,
"640898929",1,0.01,
"64109",3,0.01,
"64110",1,0.01,
"64111",1,0.01,
"64113",1,0.01,
"64113-2035",1,0.01,
"641131233",1,0.01,
"641131560",1,0.01,
"641131903",1,0.01,
"641132004",1,0.01,
"641132539",1,0.01,
"64114",1,0.01,
"64116",2,0.01,
"64151",1,0.01,
"64158",1,0.01,
"64160",0.01,1,
"645063516",1,0.01,
"6511",2,0.01,
"65201",4,0.01,
"65201-4000",1,0.01,
"65202",2,0.01,
"65202-9866",1,0.01,
"65203",3,0.01,
"65301-8969",1,0.01,
"65500",0.01,1,
"655000",0.01,1,
"6560",0.01,1,
"65714",1,0.01,
"657147024",1,0.01,
"65781",1,0.01,
"65804",1,0.01,
"65807",1,0.01,
"65809",1,0.01,
"65810",1,0.01,
"6604",1,0.01,
"66047",1,0.01,
"66048",1,0.01,
"6605",1,0.01,
"66061",2,0.01,
"66062",4,0.01,
"66104-5518",1,0.01,
"6614",1,0.01,
"662024245",1,0.01,
"662053234",1,0.01,
"662061213",1,0.01,
"662062510",1,0.01,
"66207",1,0.01,
"662072205",1,0.01,
"66208",1,0.01,
"66208-1945",1,0.01,
"66208-2021",1,0.01,
"66209",2,0.01,
"66209-3546",1,0.01,
"662092112",1,0.01,
"662103316",1,0.01,
"66213",2,0.01,
"662154213",1,0.01,
"662156003",1,0.01,
"66216",3,0.01,
"662218084",1,0.01,
"662218176",1,0.01,
"66297",0.01,1,
"66502",1,0.01,
"666061262",1,0.01,
"67002",1,0.01,
"67203",1,0.01,
"672033523",1,0.01,
"67211",1,0.01,
"67219",1,0.01,
"67220",1,0.01,
"67220-2970",1,0.01,
"678001",0.01,1,
"68008",1,0.01,
"68069",1,0.01,
"68102",1,0.01,
"68104",1,0.01,
"68114",1,0.01,
"68116",2,0.01,
"68118-2722",1,0.01,
"681242715",1,0.01,
"68127",1,0.01,
"68132",2,0.01,
"68132-2617",1,0.01,
"68135",2,0.01,
"68135-1366",1,0.01,
"681351308",1,0.01,
"68137",1,0.01,
"681441432",1,0.01,
"681521037",1,0.01,
"68181",1,0.01,
"6830",1,0.01,
"68502",1,0.01,
"685025034",1,0.01,
"685122431",1,0.01,
"68516",2,0.01,
"68524",1,0.01,
"6854",1,0.01,
"6877",2,0.01,
"687765603",1,0.01,
"6905",1,0.01,
"69559",1,0.01,
"70001",1,0.01,
"700021918",1,0.01,
"700053815",1,0.01,
"700068",0.01,1,
"70058",2,0.01,
"700656605",1,0.01,
"7010",1,0.01,
"70115",2,0.01,
"701154335",2,0.01,
"701182941",1,0.01,
"70122",1,0.01,
"701222211",1,0.01,
"70125",1,0.01,
"70301",1,0.01,
"7034",1,0.01,
"7042",1,0.01,
"70433-0348",1,0.01,
"70501",1,0.01,
"7052",1,0.01,
"7054",1,0.01,
"7069",1,0.01,
"7071",1,0.01,
"70714-3211",1,0.01,
"70769",1,0.01,
"70808",1,0.01,
"70808-8725",1,0.01,
"710032",0.01,1,
"7114",1,0.01,
"71459",1,0.01,
"71998",1,0.01,
"720",0.01,1,
"72019",1,0.01,
"72113",1,0.01,
"72116",1,0.01,
"72176",1,0.01,
"72204",1,0.01,
"72390",1,0.01,
"72601",1,0.01,
"72701",1,0.01,
"72703",1,0.01,
"72712-3621",1,0.01,
"72761",1,0.01,
"727620891",1,0.01,
"72837",1,0.01,
"72903",1,0.01,
"73025-2534",1,0.01,
"73071",2,0.01,
"73071-2259",1,0.01,
"73071-4054",1,0.01,
"73106",1,0.01,
"73120",1,0.01,
"74055",1,0.01,
"74074",1,0.01,
"74104",1,0.01,
"741052211",1,0.01,
"741074506",1,0.01,
"74114",1,0.01,
"74129",1,0.01,
"74133",1,0.01,
"74136",1,0.01,
"74137",2,0.01,
"74402-1005",1,0.01,
"7461",1,0.01,
"74700",0.01,1,
"7481",1,0.01,
"75002",1,0.01,
"75003",0.01,1,
"75007",2,0.01,
"7501",1,0.01,
"75010",1,1,
"75013",1,0.01,
"75015",0.01,1,
"75023",2,0.01,
"75024",1,0.01,
"75025",1,0.01,
"750283781",1,0.01,
"75032-7626",1,0.01,
"75033",2,0.01,
"75034",2,0.01,
"750341279",1,0.01,
"75035",2,0.01,
"75038",1,0.01,
"75039",1,0.01,
"75054",1,0.01,
"75056",1,0.01,
"750565785",1,0.01,
"75057-2702",1,0.01,
"75062",2,0.01,
"75065",1,0.01,
"75070",2,0.01,
"75070-2874",1,0.01,
"75070-7234",1,0.01,
"750707252",1,0.01,
"75071",1,0.01,
"75080",2,0.01,
"75080-3926",1,0.01,
"75082",1,0.01,
"7512",1,0.01,
"75181",1,0.01,
"75201",1,0.01,
"75204",2,0.01,
"75206",1,0.01,
"75209-3212",1,0.01,
"752144430",1,0.01,
"752182233",1,0.01,
"752201703",1,0.01,
"75225",2,0.01,
"75228",1,0.01,
"752292852",1,0.01,
"75230",1,0.01,
"75230-2852",1,0.01,
"75241",1,0.01,
"75243",1,0.01,
"752482860",1,0.01,
"75252",1,0.01,
"752522372",1,0.01,
"75287",1,0.01,
"752875141",1,0.01,
"75605-8216",1,0.01,
"75707",1,0.01,
"76001",2,0.01,
"76006",1,0.01,
"76012",1,0.01,
"76012-5676",1,0.01,
"760125320",1,0.01,
"76013",1,0.01,
"76014",1,0.01,
"76016-5336",1,0.01,
"760164521",1,0.01,
"76034",1,0.01,
"760345886",1,0.01,
"76040",1,0.01,
"76049",0.01,1,
"76051-5627",1,0.01,
"76054",1,0.01,
"76063",1,0.01,
"760635445",1,0.01,
"76092",2,0.01,
"76109",1,0.01,
"76133",2,0.01,
"76180",1,0.01,
"76201",1,0.01,
"76205",1,0.01,
"76207",1,0.01,
"7624",1,0.01,
"7631",1,0.01,
"76502",1,0.01,
"76504",1,0.01,
"76522",1,0.01,
"76657",1,0.01,
"76706-6559",1,0.01,
"7675",1,0.01,
"77004",1,0.01,
"77006",1,0.01,
"77009",1,0.01,
"77016",1,0.01,
"77024",1,0.01,
"77026",1,0.01,
"77040",1,0.01,
"77043",1,0.01,
"77055",1,0.01,
"770554712",1,0.01,
"77063",2,0.01,
"77077",1,0.01,
"77088",1,0.01,
"77093",1,0.01,
"77095",1,0.01,
"770957298",1,0.01,
"77096-6002",1,0.01,
"7716",1,0.01,
"7731",1,0.01,
"77338-1314",1,0.01,
"77375",1,0.01,
"77377",1,0.01,
"77380-1346",1,0.01,
"77381",3,0.01,
"77381-5126",1,0.01,
"77381-6139",1,0.01,
"773811406",1,0.01,
"773822627",1,0.01,
"77401",1,0.01,
"77433",1,0.01,
"77450",1,0.01,
"774505770",1,0.01,
"77479",1,0.01,
"77487",1,0.01,
"77494",1,0.01,
"77494-2376",1,0.01,
"774945251",1,0.01,
"77498-7031",1,0.01,
"774987221",1,0.01,
"77520",1,0.01,
"77546",1,0.01,
"7757",1,0.01,
"77627",1,0.01,
"77664",1,0.01,
"78006",1,0.01,
"78015",1,0.01,
"78015-6512",1,0.01,
"78028",1,0.01,
"78108",1,0.01,
"781082268",1,0.01,
"78132",1,0.01,
"78155",1,0.01,
"781632199",1,0.01,
"78201",1,0.01,
"782013740",1,0.01,
"782014021",1,0.01,
"78209",3,0.01,
"78212",1,0.01,
"782123674",1,0.01,
"78213",1,0.01,
"78216",1,0.01,
"78216-3435",1,0.01,
"78218-1772",1,0.01,
"78227-1629",1,0.01,
"782283213",1,0.01,
"78230",1,0.01,
"782383524",1,0.01,
"78247",2,0.01,
"78249",1,0.01,
"78249-1639",1,0.01,
"78254",1,0.01,
"78257",1,0.01,
"78261",1,0.01,
"782831746",1,0.01,
"78400",0.01,1,
"7843",1,0.01,
"785207417",1,0.01,
"78575",1,0.01,
"78630",1,0.01,
"78641",1,0.01,
"78642",1,0.01,
"78660",1,0.01,
"78664",1,0.01,
"78665",1,0.01,
"78703",1,0.01,
"787035459",1,0.01,
"78704",1,0.01,
"78705",2,0.01,
"78717",1,0.01,
"78726",1,0.01,
"78727",1,0.01,
"78729",1,0.01,
"78732",1,0.01,
"78735",1,0.01,
"78737-9524",1,0.01,
"78738",1,0.01,
"78739",1,0.01,
"78741",1,0.01,
"78745-4966",1,0.01,
"78751",1,0.01,
"78801",1,0.01,
"78833",1,0.01,
"78840",1,0.01,
"79011",0.01,1,
"7920",1,0.01,
"7922",1,0.01,
"7932",1,0.01,
"79603",1,0.01,
"79901",1,0.01,
"79905",1,0.01,
"79912",1,0.01,
"799128106",1,0.01,
"79935-3714",1,0.01,
"79938-2761",1,0.01,
"80013",2,0.01,
"80016",1,0.01,
"80023",1,0.01,
"80027",3,0.01,
"800279404",1,0.01,
"8003",1,0.01,
"80031",2,0.01,
"80031-2144",1,0.01,
"80104",1,0.01,
"80108",1,0.01,
"80108-3482",1,0.01,
"801089259",1,0.01,
"80109",1,0.01,
"80110",1,0.01,
"801115289",1,0.01,
"80120",2,0.01,
"80120-3625",1,0.01,
"80123",1,0.01,
"80126",2,0.01,
"80126-5265",1,0.01,
"80129-6407",1,0.01,
"801295784",1,0.01,
"80130",1,0.01,
"80134",1,0.01,
"801358201",1,0.01,
"8016",1,0.01,
"80205",1,0.01,
"80205-4722",1,0.01,
"80206",2,0.01,
"80209",1,0.01,
"80209-2424",1,0.01,
"80210",2,0.01,
"80211",1,0.01,
"80220",2,0.01,
"80220-1548",1,0.01,
"802307036",1,0.01,
"80237",1,0.01,
"80238",1,0.01,
"80240-220",0.01,1,
"80302",2,0.01,
"80305",1,0.01,
"80305-3413",1,0.01,
"803694",0.01,1,
"80401-6571",1,0.01,
"80465",1,0.01,
"804775166",1,0.01,
"80498",1,0.01,
"80503",1,0.01,
"80517",1,0.01,
"80524",1,0.01,
"80525",1,0.01,
"80537",1,0.01,
"8057",1,0.01,
"806014265",1,0.01,
"80631",1,0.01,
"80634",2,0.01,
"8075",1,0.01,
"80816",1,0.01,
"80841",1,0.01,
"80904",1,0.01,
"80915-3149",1,0.01,
"80916",1,0.01,
"80923",1,0.01,
"80923-5437",1,0.01,
"81621-0899",1,0.01,
"820012531",1,0.01,
"82633",1,0.01,
"830021726",1,0.01,
"83128",1,0.01,
"83338",1,0.01,
"83440",1,0.01,
"83713-1115",1,0.01,
"84003-0054",1,0.01,
"84014",1,0.01,
"84041",1,0.01,
"84058",1,0.01,
"84062",1,0.01,
"84067",1,0.01,
"84093",1,0.01,
"84095",1,0.01,
"84095-3031",1,0.01,
"840957750",1,0.01,
"84106",1,0.01,
"841091212",1,0.01,
"84117-7033",1,0.01,
"84119",1,0.01,
"84401",1,0.01,
"84403",1,0.01,
"84604",1,0.01,
"85008",2,0.01,
"85012-1734",1,0.01,
"85013",1,0.01,
"85014",1,0.01,
"85018",1,0.01,
"85021",1,0.01,
"850283068",1,0.01,
"85033",1,0.01,
"85037",1,0.01,
"85045",1,0.01,
"85048",1,0.01,
"85053-4628",1,0.01,
"85085-9022",1,0.01,
"85142",1,0.01,
"85202",1,0.01,
"85204-5624",1,0.01,
"85224",1,0.01,
"85248",2,0.01,
"85249",1,0.01,
"85254",1,0.01,
"85255",1,0.01,
"85259",1,0.01,
"85260",1,0.01,
"85283",1,0.01,
"852842262",1,0.01,
"85301",1,0.01,
"853031624",1,0.01,
"85338-1212",1,0.01,
"85340",1,0.01,
"85353",1,0.01,
"85382",2,0.01,
"85392",1,0.01,
"8550",1,0.01,
"8559",1,0.01,
"85629-8134",1,0.01,
"85641",1,0.01,
"85710-6241",1,0.01,
"857121383",1,0.01,
"85713-5829",1,0.01,
"85718",1,0.01,
"85719",1,0.01,
"85719-5038",1,0.01,
"85742",1,0.01,
"857499238",1,0.01,
"85750",1,0.01,
"857551822",1,0.01,
"86351",1,0.01,
"86403",1,0.01,
"86426-9288",1,0.01,
"8648",1,0.01,
"87106",1,0.01,
"87111",3,0.01,
"871123764",1,0.01,
"87114",2,0.01,
"871223896",1,0.01,
"87124-1792",1,0.01,
"87144",1,0.01,
"87144-5337",1,0.01,
"8721",1,0.01,
"8723",1,0.01,
"8742",1,0.01,
"87504",1,0.01,
"875050341",1,0.01,
"875056275",1,0.01,
"8774",0.01,1,
"88001",1,0.01,
"8807",2,0.01,
"881015114",1,0.01,
"8816",1,0.01,
"8820",1,0.01,
"8822",1,0.01,
"8854",1,0.01,
"88690",0.01,1,
"89002",1,0.01,
"89015",1,0.01,
"8902",1,0.01,
"890522363",1,0.01,
"89085",2,0.01,
"89085-4434",1,0.01,
"89117",2,0.01,
"89123",2,0.01,
"89129",1,0.01,
"891292221",1,0.01,
"891346180",1,0.01,
"891357864",1,0.01,
"89139",2,0.01,
"89142",1,0.01,
"89143",2,0.01,
"89144",1,0.01,
"89144-4350",1,0.01,
"89145",1,0.01,
"89147",1,0.01,
"891481408",1,0.01,
"891484410",1,0.01,
"89149",1,0.01,
"891490142",1,0.01,
"89183",1,0.01,
"89503",1,0.01,
"89511",1,0.01,
"89523",1,0.01,
"89703",1,0.01,
"90004",3,0.01,
"90006",1,0.01,
"90008",1,0.01,
"90008-4914",1,0.01,
"90011",1,0.01,
"90014-2951",1,0.01,
"900181766",1,0.01,
"900245303",1,0.01,
"90025",1,0.01,
"90026",1,0.01,
"90026-2320",1,0.01,
"90027-2629",1,0.01,
"900271814",1,0.01,
"90034",1,0.01,
"90036-3780",1,0.01,
"900362810",1,0.01,
"90039",2,0.01,
"90041",1,0.01,
"900432317",1,0.01,
"90045",1,0.01,
"90045-2545",1,0.01,
"900451037",1,0.01,
"90046",2,0.01,
"90046-1320",1,0.01,
"90047",1,0.01,
"90056",1,0.01,
"900561905",1,0.01,
"90064",1,0.01,
"90064-3447",1,0.01,
"900643823",1,0.01,
"900651117",1,0.01,
"90066-4167",1,0.01,
"90068",1,0.01,
"9012",1,0.01,
"90210",1,0.01,
"90212-4771",1,0.01,
"90230-5723",1,0.01,
"902305632",1,0.01,
"90240",1,0.01,
"90254",2,0.01,
"902544751",1,0.01,
"90255-5208",1,0.01,
"90260",1,0.01,
"902622340",1,0.01,
"90265-3711",1,0.01,
"90266",4,0.01,
"90266-2232",1,0.01,
"90266-4943",1,0.01,
"902663426",1,0.01,
"902664512",1,0.01,
"902666542",1,0.01,
"90272",1,0.01,
"90272-3337",1,0.01,
"90274",1,0.01,
"90274-5241",1,0.01,
"902741254",1,0.01,
"90275",1,0.01,
"90275-5896",1,0.01,
"902754931",1,0.01,
"90277",2,0.01,
"90277-3533",1,0.01,
"902772935",1,0.01,
"902781533",1,0.01,
"902782323",1,0.01,
"90290",2,0.01,
"90292-4945",1,0.01,
"90301",1,0.01,
"90305",1,0.01,
"90403",1,0.01,
"90405-1841",1,0.01,
"90405-4013",1,0.01,
"90501",1,0.01,
"90503",1,0.01,
"90505-6240",1,0.01,
"905052025",1,0.01,
"905054308",1,0.01,
"90601-1752",1,0.01,
"906011791",1,0.01,
"90620-4262",1,0.01,
"90623-1781",1,0.01,
"906306802",1,0.01,
"90631-3325",1,0.01,
"90640",1,0.01,
"906602684",1,0.01,
"90703-6321",1,0.01,
"90720",1,0.01,
"90723",1,0.01,
"907322718",1,0.01,
"90740-5616",1,0.01,
"90755",1,0.01,
"90803-4138",1,0.01,
"908031523",1,0.01,
"908053401",1,0.01,
"90807",1,0.01,
"90807-3204",1,0.01,
"90808",1,0.01,
"90810-3318",1,0.01,
"90814",1,0.01,
"90815",2,0.01,
"91001",1,0.01,
"91006",1,0.01,
"91007",1,0.01,
"91011",3,0.01,
"91011-3330",1,0.01,
"91016",1,0.01,
"910201505",1,0.01,
"910241805",1,0.01,
"91030",2,0.01,
"910304106",1,0.01,
"910401611",1,0.01,
"911043035",1,0.01,
"911044624",1,0.01,
"91105",2,0.01,
"91106",1,0.01,
"911064405",1,0.01,
"91107",2,0.01,
"91107-5921",1,0.01,
"911072137",1,0.01,
"91201",1,0.01,
"912051917",1,0.01,
"912053624",1,0.01,
"912061028",1,0.01,
"91207-1241",1,0.01,
"91208",1,0.01,
"91208-3016",1,0.01,
"91214",1,0.01,
"91214-1527",1,0.01,
"91301",2,0.01,
"91301-2813",1,0.01,
"91301-5200",1,0.01,
"913014635",1,0.01,
"91302",1,0.01,
"91302-3053",1,0.01,
"91311-1367",1,0.01,
"91311-2834",1,0.01,
"91316-2556",1,0.01,
"91316-4377",1,0.01,
"91320",3,0.01,
"91320-4321",1,0.01,
"91321-5825",1,0.01,
"913213529",1,0.01,
"91326",2,0.01,
"91326-3844",1,0.01,
"913262758",1,0.01,
"91331",1,0.01,
"91343-1807",1,0.01,
"91343-1856",1,0.01,
"91344",1,0.01,
"91350",1,0.01,
"91350-2125",1,0.01,
"913553230",1,0.01,
"913562917",1,0.01,
"913563221",1,0.01,
"913564436",1,0.01,
"913603646",1,0.01,
"91362",2,0.01,
"91367",1,0.01,
"91367-7208",1,0.01,
"91377",1,0.01,
"91381",2,0.01,
"913811502",1,0.01,
"91384",1,0.01,
"91384-4527",1,0.01,
"913843578",1,0.01,
"91387",1,0.01,
"91402",1,0.01,
"91403-2807",1,0.01,
"91405-3337",1,0.01,
"914064116",1,0.01,
"91411-3787",1,0.01,
"91411-4034",1,0.01,
"91423",1,0.01,
"91423-1386",1,0.01,
"91423-2108",1,0.01,
"914235114",1,0.01,
"91436-3420",1,0.01,
"91436-3836",1,0.01,
"91504-1111",1,0.01,
"91505",2,0.01,
"91601",1,0.01,
"91604-1378",1,0.01,
"91604-2652",1,0.01,
"91605",1,0.01,
"91607-1136",1,0.01,
"91709",1,0.01,
"91709-7852",1,0.01,
"91711",1,0.01,
"917306619",1,0.01,
"91750",1,0.01,
"917542421",1,0.01,
"917544533",1,0.01,
"91765-4408",1,0.01,
"91765-4623",1,0.01,
"91767",1,0.01,
"91773-4232",1,0.01,
"91775",1,0.01,
"917752903",1,0.01,
"91780",1,0.01,
"91801-2075",1,0.01,
"91902",1,0.01,
"91911",1,0.01,
"91915",2,0.01,
"91941",1,0.01,
"919414429",1,0.01,
"92008",1,0.01,
"92009",1,0.01,
"92009-5202",1,0.01,
"92009-6371",1,0.01,
"920097624",1,0.01,
"92011-2506",1,0.01,
"92011-4830",1,0.01,
"92019",1,0.01,
"92019-3902",1,0.01,
"920193862",1,0.01,
"92020-5647",1,0.01,
"92024",2,0.01,
"920243051",1,0.01,
"920246523",1,0.01,
"920247105",1,0.01,
"920256149",1,0.01,
"92026",2,0.01,
"92027",1,0.01,
"920373345",1,0.01,
"920376403",1,0.01,
"920376742",1,0.01,
"92054",2,0.01,
"92054-3545",1,0.01,
"92057",3,0.01,
"92064",2,0.01,
"92065",1,0.01,
"92067",3,0.01,
"92067-4775",1,0.01,
"92069",1,0.01,
"92071-2771",1,0.01,
"92103",1,0.01,
"921035005",1,0.01,
"921044924",1,0.01,
"921085123",1,0.01,
"92109",1,0.01,
"92111",2,0.01,
"92116",1,0.01,
"92118",1,0.01,
"92118-1818",1,0.01,
"92118-2038",1,0.01,
"92121",1,0.01,
"921243768",1,0.01,
"92127",1,0.01,
"921271241",1,0.01,
"92128",3,0.01,
"92128-4238",1,0.01,
"921292329",1,0.01,
"92130",2,0.01,
"921303407",1,0.01,
"921307617",1,0.01,
"92131",1,0.01,
"921312226",1,0.01,
"922602258",1,0.01,
"92270",1,0.01,
"92277",2,0.01,
"92310",2,0.01,
"92404",2,0.01,
"92407",1,0.01,
"92503",1,0.01,
"92507",1,0.01,
"92536",1,0.01,
"925490038",1,0.01,
"92562",1,0.01,
"92563-4346",1,0.01,
"925916169",1,0.01,
"92603",1,0.01,
"92604",3,0.01,
"926060801",1,0.01,
"92612",1,0.01,
"92612-2607",1,0.01,
"92614",1,0.01,
"92617",1,0.01,
"92620",2,0.01,
"92620-1823",1,0.01,
"92626-2255",1,0.01,
"92627",2,0.01,
"926291191",1,0.01,
"926292340",1,0.01,
"926292353",1,0.01,
"92630",1,0.01,
"926304639",1,0.01,
"92646",1,0.01,
"92646-8117",1,0.01,
"926464815",1,0.01,
"92648",1,0.01,
"92648-6811",1,0.01,
"92649",1,0.01,
"92649-3732",1,0.01,
"92651",7,0.01,
"92651-8313",1,0.01,
"926511200",1,0.01,
"926512016",1,0.01,
"926514006",1,0.01,
"926518125",1,0.01,
"926536504",1,0.01,
"92656",1,0.01,
"92657-0107",1,0.01,
"92660",2,0.01,
"926606605",1,0.01,
"926635617",1,0.01,
"926724545",1,0.01,
"92673",2,0.01,
"926733433",1,0.01,
"92677",2,0.01,
"92677-1011",1,0.01,
"92677-6300",1,0.01,
"926771656",1,0.01,
"926771935",1,0.01,
"926772454",1,0.01,
"926772818",1,0.01,
"926774525",1,0.01,
"926777621",1,0.01,
"926779043",1,0.01,
"92679",2,0.01,
"926794214",1,0.01,
"926795100",1,0.01,
"926795104",1,0.01,
"926795151",1,0.01,
"92688",1,0.01,
"92691",1,0.01,
"92692",1,0.01,
"926921920",1,0.01,
"926925184",1,0.01,
"92694",1,0.01,
"926940430",1,0.01,
"92701",1,0.01,
"92703",1,0.01,
"92703-3439",1,0.01,
"92704",2,0.01,
"92705",1,0.01,
"92705-7863",1,0.01,
"927051931",1,0.01,
"927056018",1,0.01,
"92706",1,0.01,
"92708-5751",1,0.01,
"927802254",1,0.01,
"927805954",1,0.01,
"92782",1,0.01,
"928064353",1,0.01,
"92807",1,0.01,
"928082333",1,0.01,
"928082605",1,0.01,
"92821",1,0.01,
"92831",2,0.01,
"928402113",1,0.01,
"92841",1,0.01,
"92845",1,0.01,
"92861",1,0.01,
"92865",1,0.01,
"92866-2641",1,0.01,
"92867",2,0.01,
"928672078",1,0.01,
"928676494",1,0.01,
"928678608",1,0.01,
"92868-1747",1,0.01,
"92869",1,0.01,
"92880",1,0.01,
"92881",1,0.01,
"92886",2,0.01,
"92887",1,0.01,
"93001",1,0.01,
"93013",1,0.01,
"93021",2,0.01,
"93021-3750",1,0.01,
"93023",1,0.01,
"93030-3275",1,0.01,
"93060",1,0.01,
"93063-1203",1,0.01,
"93108",1,0.01,
"93108-2150",1,0.01,
"931081062",1,0.01,
"931104507",1,0.01,
"93117-2135",1,0.01,
"93274",1,0.01,
"93291",2,0.01,
"93312",1,0.01,
"93552",1,0.01,
"93560-6408",1,0.01,
"93647",1,0.01,
"937303451",1,0.01,
"93908",1,0.01,
"940022916",1,0.01,
"94010",1,0.01,
"94010-5013",1,0.01,
"940106142",1,0.01,
"940106629",1,0.01,
"94018",1,0.01,
"94022",1,0.01,
"940221609",1,0.01,
"940224071",1,0.01,
"94024",1,0.01,
"940240129",1,0.01,
"940244817",1,0.01,
"940245044",1,0.01,
"940245515",1,0.01,
"940246441",1,0.01,
"94025",3,0.01,
"94025-4910",1,0.01,
"940251648",1,0.01,
"940273902",1,0.01,
"940274028",1,0.01,
"94037",1,0.01,
"94040",1,0.01,
"94041-2209",1,0.01,
"940412356",1,0.01,
"94043",1,0.01,
"94061",1,0.01,
"94062",1,0.01,
"94066",1,0.01,
"940661208",1,0.01,
"94070-4940",1,0.01,
"94080",1,0.01,
"940804255",1,0.01,
"940874451",1,0.01,
"94103",1,0.01,
"94108",1,0.01,
"941081541",1,0.01,
"94109",1,0.01,
"94110-2096",1,0.01,
"94110-6132",1,0.01,
"94112",3,0.01,
"94112-2911",1,0.01,
"941143126",1,0.01,
"94115",1,0.01,
"94116",1,0.01,
"94116-1453",1,0.01,
"94117",1,0.01,
"94117-3209",1,0.01,
"94118",2,0.01,
"941211446",1,0.01,
"94122",3,0.01,
"94123",1,0.01,
"94131",1,0.01,
"94131-3030",1,0.01,
"94132",1,0.01,
"94132-2724",1,0.01,
"941321068",1,0.01,
"94133",1,0.01,
"94301",1,0.01,
"94303",1,0.01,
"94303-3412",1,0.01,
"943033022",1,0.01,
"943033036",1,0.01,
"943033844",1,0.01,
"94306-1425",1,0.01,
"94306-4439",1,0.01,
"94401",1,0.01,
"94402",2,0.01,
"944022215",1,0.01,
"944023206",1,0.01,
"94403",1,0.01,
"94404",1,0.01,
"944041308",1,0.01,
"944043609",1,0.01,
"945014024",1,0.01,
"94502",2,0.01,
"94506",2,0.01,
"94507",1,0.01,
"94517",1,0.01,
"94526",1,0.01,
"94533",1,0.01,
"94534",1,0.01,
"945364930",1,0.01,
"94538",1,0.01,
"94539",1,0.01,
"94539-3222",1,0.01,
"945396316",1,0.01,
"94542-2110",1,0.01,
"94545-4932",1,0.01,
"94546",1,0.01,
"945493146",1,0.01,
"94550",1,0.01,
"945516163",1,0.01,
"945525307",1,0.01,
"94553",1,0.01,
"94556",1,0.01,
"945562810",1,0.01,
"94558-4551",1,0.01,
"94561",1,0.01,
"94563",2,0.01,
"945633211",1,0.01,
"945633348",1,0.01,
"94566-9767",1,0.01,
"945664568",1,0.01,
"945664618",1,0.01,
"945665524",1,0.01,
"945667521",1,0.01,
"945668642",1,0.01,
"94577",1,0.01,
"94578-2932",1,0.01,
"94582",1,0.01,
"94582-4823",1,0.01,
"94583",1,0.01,
"94587",1,0.01,
"94595",1,0.01,
"94596",1,0.01,
"94602",2,0.01,
"94602-2433",1,0.01,
"946021402",1,0.01,
"946021628",1,0.01,
"946021944",1,0.01,
"94608",2,0.01,
"94609",1,0.01,
"94610",2,0.01,
"94610-1054",1,0.01,
"94610-1818",1,0.01,
"94611",5,0.01,
"946114118",1,0.01,
"946115911",1,0.01,
"94618-2004",1,0.01,
"946181348",1,0.01,
"94619",1,0.01,
"94621-2738",1,0.01,
"94705",1,0.01,
"94705-2331",1,0.01,
"94707",1,0.01,
"947071544",1,0.01,
"947081427",1,0.01,
"948032124",1,0.01,
"948032740",1,0.01,
"94804-3105",1,0.01,
"948062294",1,0.01,
"94914",1,0.01,
"94941",5,0.01,
"94941-3593",1,0.01,
"949411131",1,0.01,
"949411519",1,0.01,
"949412017",1,0.01,
"949453205",1,0.01,
"94947",1,0.01,
"94947-4250",1,0.01,
"949474427",1,0.01,
"94949",1,0.01,
"94952",2,0.01,
"95003",2,0.01,
"95008",1,0.01,
"950080618",1,0.01,
"950081837",1,0.01,
"950081908",1,0.01,
"95014-2455",1,0.01,
"950144768",1,0.01,
"950306238",1,0.01,
"950321127",1,0.01,
"950324743",1,0.01,
"95033",1,0.01,
"95050",2,0.01,
"95055",1,0.01,
"95060",2,0.01,
"95065",1,0.01,
"95070",2,0.01,
"95070-6470",1,0.01,
"950703818",1,0.01,
"950704628",1,0.01,
"950705144",1,0.01,
"95111",1,0.01,
"95112",1,0.01,
"951162958",1,0.01,
"95117",1,0.01,
"95117-1906",1,0.01,
"95118",1,0.01,
"951204452",1,0.01,
"95124",1,0.01,
"95124-1301",1,0.01,
"95125",2,0.01,
"951261558",1,0.01,
"95127",1,0.01,
"951273053",1,0.01,
"95134",1,0.01,
"95134-1823",1,0.01,
"95138",1,0.01,
"95138-2260",1,0.01,
"95138-2369",1,0.01,
"95148-2827",1,0.01,
"95207",1,0.01,
"95336",1,0.01,
"95337",1,0.01,
"953578181",1,0.01,
"95381",1,0.01,
"95403",2,0.01,
"95476",1,0.01,
"954927921",1,0.01,
"954928645",1,0.01,
"956029680",1,0.01,
"956035262",1,0.01,
"95608",1,0.01,
"956083484",1,0.01,
"95628",2,0.01,
"95677",1,0.01,
"95682",1,0.01,
"956829673",1,0.01,
"957139715",1,0.01,
"95757",1,0.01,
"95765",1,0.01,
"95811",1,0.01,
"95818",1,0.01,
"95818-4015",1,0.01,
"958194028",1,0.01,
"95825",1,0.01,
"95831",1,0.01,
"95835-1237",1,0.01,
"95864",1,0.01,
"95948-9483",1,0.01,
"96204-3049",1,0.01,
"9627",1,0.01,
"9630",1,0.01,
"96716",1,0.01,
"96720-3245",1,0.01,
"967342115",1,0.01,
"96740",1,0.01,
"967444732",1,0.01,
"96813",1,0.01,
"96814",1,0.01,
"96815-1444",1,0.01,
"968164242",1,0.01,
"96825",2,0.01,
"96950",1,0.01,
"97007",1,0.01,
"9702",1,0.01,
"97027",1,0.01,
"97034",2,0.01,
"97035",1,0.01,
"97062",2,0.01,
"97068",1,0.01,
"97068-1861",1,0.01,
"97201",1,0.01,
"97202",1,0.01,
"972028811",1,0.01,
"97210",1,0.01,
"97212",2,0.01,
"97224",1,0.01,
"97225",2,0.01,
"97230",2,0.01,
"97232-1732",1,0.01,
"973024940",1,0.01,
"97361-1660",1,0.01,
"973811372",1,0.01,
"97401",1,0.01,
"97405",1,0.01,
"97520-1417",1,0.01,
"975203048",1,0.01,
"97701",1,0.01,
"98004",1,0.01,
"980046354",1,0.01,
"980046836",1,0.01,
"98011",1,0.01,
"98020",0.01,1,
"98021",1,0.01,
"98026",1,0.01,
"98027",1,0.01,
"980278436",1,0.01,
"980296543",1,0.01,
"98032-1808",1,0.01,
"98034",2,0.01,
"98038",1,0.01,
"98040",2,0.01,
"98042",1,0.01,
"980521701",1,0.01,
"980756294",1,0.01,
"98077-7145",1,0.01,
"980927204",1,0.01,
"98103",1,0.01,
"98104",1,0.01,
"98105",2,0.01,
"98105-2020",1,0.01,
"981052246",1,0.01,
"981053837",1,0.01,
"98109",1,0.01,
"98110",1,0.01,
"98115",4,0.01,
"98117",2,0.01,
"98119",1,0.01,
"98122",4,0.01,
"98125",1,0.01,
"98133",1,0.01,
"98144",1,0.01,
"98166",1,0.01,
"98247",1,0.01,
"98275-4252",1,0.01,
"983709772",1,0.01,
"983839239",1,0.01,
"984652666",1,0.01,
"98498",1,0.01,
"98532",1,0.01,
"98901",1,0.01,
"99005-9227",1,0.01,
"99205",1,0.01,
"99208",1,0.01,
"99338",1,0.01,
"99354",1,0.01,
"99362",1,0.01,
"99403",2,0.01,
"99508-4867",1,0.01,
"995153376",1,0.01,
"99669",1,0.01,
"99703",1,0.01,
"99712-2730",1,0.01,
"99762",1,0.01,
"FM1100",0.01,1,
"IL",2,0.01,
"Illinois",1,0.01,
"n/a",0.01,1,
"N16 0TX",0.01,1,
"SW14 8JH",0.01,1,
"UNK",758,9397,
"V6B1V5",0.01,1,
"V6H1Z3",0.01,1,
"V7S 2G4",0.01,1
)
zippsmatrix <- matrix(dta, ncol = 3, byrow = T)
zippsDF <- data.frame( zippsmatrix, stringsAsFactors = F )
names(zippsDF) <- cols
zippsDF$D <- as.numeric( zippsDF$D )
zippsDF$F <- as.numeric( zippsDF$F )
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
/cachematrix.R
|
no_license
|
skoh5/ProgrammingAssignment2
|
R
| false
| false
| 789
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ISOFeatureOperation.R
\docType{class}
\name{ISOFeatureOperation}
\alias{ISOFeatureOperation}
\title{ISOFeatureOperation}
\format{\code{\link{R6Class}} object.}
\usage{
ISOFeatureOperation
}
\value{
Object of \code{\link{R6Class}} for modelling an ISOFeatureOperation
}
\description{
ISOFeatureOperation
}
\section{Fields}{
\describe{
\item{\code{formalDefinition}}{}
}}
\section{Methods}{
\describe{
\item{\code{new(xml)}}{
This method is used to instantiate an ISOFeatureOperation
}
\item{\code{setSignature(signature)}}{
Sets the signature
}
\item{\code{setFormalDefinition(formalDefinition)}}{
Sets the formal definition
}
}
}
\examples{
md <- ISOFeatureOperation$new()
md$setMemberName("name")
md$setDefinition("definition")
md$setCardinality(lower=1,upper=1)
md$setSignature("signature")
md$setFormalDefinition("def")
}
\references{
ISO 19110:2005 Methodology for Feature cataloguing
}
\author{
Emmanuel Blondel <emmanuel.blondel1@gmail.com>
}
\keyword{ISO}
\keyword{feature}
\keyword{operation}
|
/man/ISOFeatureOperation.Rd
|
no_license
|
65MO/geometa
|
R
| false
| true
| 1,112
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ISOFeatureOperation.R
\docType{class}
\name{ISOFeatureOperation}
\alias{ISOFeatureOperation}
\title{ISOFeatureOperation}
\format{\code{\link{R6Class}} object.}
\usage{
ISOFeatureOperation
}
\value{
Object of \code{\link{R6Class}} for modelling an ISOFeatureOperation
}
\description{
ISOFeatureOperation
}
\section{Fields}{
\describe{
\item{\code{formalDefinition}}{}
}}
\section{Methods}{
\describe{
\item{\code{new(xml)}}{
This method is used to instantiate an ISOFeatureOperation
}
\item{\code{setSignature(signature)}}{
Sets the signature
}
\item{\code{setFormalDefinition(formalDefinition)}}{
Sets the formal definition
}
}
}
\examples{
md <- ISOFeatureOperation$new()
md$setMemberName("name")
md$setDefinition("definition")
md$setCardinality(lower=1,upper=1)
md$setSignature("signature")
md$setFormalDefinition("def")
}
\references{
ISO 19110:2005 Methodology for Feature cataloguing
}
\author{
Emmanuel Blondel <emmanuel.blondel1@gmail.com>
}
\keyword{ISO}
\keyword{feature}
\keyword{operation}
|
poisson.MVlerouxCARdasmoothalpha <- function(formula, data=NULL, W, Y.region, weights, alpha, burnin, n.sample, thin=1, n.da, prior.mean.beta=NULL, prior.var.beta=NULL, prior.Sigma.df=NULL, prior.Sigma.scale=NULL, rho=NULL, MALA=FALSE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "poisson")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
J <- ncol(Y)
N.all <- K * J
K.area <- nrow(Y.region)
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### W matrix
if(!is.matrix(W)) stop("W is not a matrix.", call.=FALSE)
if(nrow(W)!= K) stop("The number of data points divided by the number of rows in W is not a whole number.", call.=FALSE)
#### rho
if(is.null(rho))
{
rho <- runif(1)
fix.rho <- FALSE
}else
{
fix.rho <- TRUE
}
if(!is.numeric(rho) ) stop("rho is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho is outside the range [0, 1].", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.Sigma.df)) prior.Sigma.df <- J+1
if(is.null(prior.Sigma.scale)) prior.Sigma.scale <- diag(rep(1,J)) / 1000
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
common.prior.varmat.check(prior.Sigma.scale, J)
#### Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
#############################
#### Initial parameter values
#############################
beta <- array(0, c(p, J))
phi <- matrix(0, nrow=K, ncol=J)
Sigma <- diag(rep(0.01,J))
Sigma.inv <- solve(Sigma)
regression <- X.standardised %*% beta
fitted <- exp(regression + phi + offset)
###############################
#### Set up the MCMC quantities
###############################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, J*p))
samples.phi <- array(NA, c(n.keep, N.all))
samples.Sigma <- array(NA, c(n.keep, J, J))
if(!fix.rho) samples.rho <- array(NA, c(n.keep, 1))
samples.loglike <- array(NA, c(n.keep, N.all))
samples.fitted <- array(NA, c(n.keep, N.all))
samples.Y <- array(NA, c(n.keep, K*J))
#### Metropolis quantities
accept <- rep(0,4)
accept.beta <- rep(0,2*J)
proposal.sd.beta <- rep(0.01, J)
proposal.sd.phi <- 0.1
proposal.sd.rho <- 0.02
Sigma.post.df <- prior.Sigma.df + K
if(rho==1) Sigma.post.df <- prior.Sigma.df + K - 1
##################################
#### Set up the spatial quantities
##################################
#### W matrix quantities
W.quants <- common.Wcheckformat(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
W.weights <- apply(W, 1, sum)
Wstar <- diag(apply(W,1,sum)) - W
Q <- rho * Wstar + diag(rep(1-rho,K))
#### Create the determinant
if(!fix.rho)
{
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q <- sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Specify vector variants
Y.vec <- as.numeric(t(Y))
###########################
#### Run the Bayesian model
###########################
#### Start timer
if(verbose)
{
cat("Generating", n.keep, "post burnin and thinned (if requested) samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
#### Create the MCMC samples
for(j in 1:n.sample)
{
############################################################
## Sample from Y - data augmentation - every n.da iterations
############################################################
if(floor(j / n.da)==ceiling(j / n.da))
{
for(z in 1:J)
{
if(!is.na(Y.region[1, z]))
{
#### Do DA on this outcome variable
phi.smooth <- alpha * phi[ ,z] + (1 - alpha) * (W %*% phi[ ,z] / W.weights)
risk.current <- exp(as.numeric(X.standardised %*% beta[ ,z]) + phi.smooth)
Y.intersections <- array(0, c(K.area, K))
for(s in 1:K.area)
{
probs.unscaled <- risk.current * weights[s, ]
probs <- probs.unscaled / sum(probs.unscaled)
Y.intersections[s, ] <- as.numeric(rmultinom(n=1, size=Y.region[s, z], prob=probs))
}
Y[ ,z] <- round(apply(Y.intersections,2,sum))
}else
{
#### Don't do DA on this outcome variable
}
}
}else
{}
###################
## Sample from beta
###################
offset.temp <- phi + offset
for(r in 1:J)
{
if(MALA)
{
temp <- poissonbetaupdateMALA(X.standardised, K, p, beta[ ,r], offset.temp[ ,r], Y[ ,r], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta[r], list.block)
}else
{
temp <- poissonbetaupdateRW(X.standardised, K, p, beta[ ,r], offset.temp[ ,r], Y[ ,r], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta[r], list.block)
}
beta[ ,r] <- temp[[1]]
accept.beta[r] <- accept.beta[r] + temp[[2]]
accept.beta[(r+J)] <- accept.beta[(r+J)] + n.beta.block
}
regression <- X.standardised %*% beta
##################
## Sample from phi
##################
den.offset <- rho * W.triplet.sum + 1 - rho
phi.offset <- regression + offset
temp1 <- poissonmcarupdateRW(W.triplet, W.begfin, K, J, phi, Y, phi.offset, den.offset, Sigma.inv, rho, proposal.sd.phi)
phi <- temp1[[1]]
for(r in 1:J)
{
phi[ ,r] <- phi[ ,r] - mean(phi[ ,r])
}
accept[1] <- accept[1] + temp1[[2]]
accept[2] <- accept[2] + K
####################
## Sample from Sigma
####################
Sigma.post.scale <- t(phi) %*% Q %*% phi + prior.Sigma.scale
Sigma <- riwish(Sigma.post.df, Sigma.post.scale)
Sigma.inv <- solve(Sigma)
##################
## Sample from rho
##################
if(!fix.rho)
{
## Propose a new value
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
Q.prop <- proposal.rho * Wstar + diag(rep(1-proposal.rho), K)
det.Q.prop <- sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
## Compute the acceptance rate
logprob.current <- 0.5 * J * det.Q - 0.5 * sum(diag(t(phi) %*% Q %*% phi %*% Sigma.inv))
logprob.proposal <- 0.5 * J * det.Q.prop - 0.5 * sum(diag(t(phi) %*% Q.prop %*% phi %*% Sigma.inv))
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
if(prob > runif(1))
{
rho <- proposal.rho
det.Q <- det.Q.prop
Q <- Q.prop
accept[3] <- accept[3] + 1
}else
{}
accept[4] <- accept[4] + 1
}else
{}
#########################
## Calculate the deviance
#########################
fitted <- exp(regression + phi + offset)
loglike <- dpois(x=as.numeric(t(Y)), lambda=as.numeric(t(fitted)), log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- as.numeric(beta)
samples.phi[ele, ] <- as.numeric(t(phi))
samples.Sigma[ele, , ] <- Sigma
if(!fix.rho) samples.rho[ele, ] <- rho
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- as.numeric(t(fitted))
samples.Y[ele, ] <- as.numeric(t(Y))
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
for(r in 1:J)
{
if(p>2)
{
proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J))], proposal.sd.beta[r], 40, 50)
}else
{
proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J))], proposal.sd.beta[r], 30, 40)
}
}
proposal.sd.phi <- common.accceptrates1(accept[1:2], proposal.sd.phi, 40, 50)
if(!fix.rho)
{
proposal.sd.rho <- common.accceptrates2(accept[3:4], proposal.sd.rho, 40, 50, 0.5)
}
accept <- c(0,0,0,0)
accept.beta <- rep(0,2*J)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
cat("\nSummarising results.")
close(progressBar)
}else
{}
###################################
#### Summarise and save the results
###################################
#### Compute the acceptance rates
accept.beta <- 100 * sum(accept.beta[1:J]) / sum(accept.beta[(J+1):(2*J)])
accept.phi <- 100 * accept[1] / accept[2]
if(!fix.rho)
{
accept.rho <- 100 * accept[3] / accept[4]
}else
{
accept.rho <- NA
}
accept.Sigma <- 100
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.Sigma)
names(accept.final) <- c("beta", "phi", "rho", "Sigma")
#### WAIC
p.w <- sum(apply(samples.loglike,2, var), na.rm=TRUE)
mean.like <- apply(exp(samples.loglike),2,mean)
mean.min <- min(mean.like[mean.like>0])
mean.like[mean.like==0] <- mean.min
lppd <- sum(log(mean.like), na.rm=TRUE)
WAIC <- -2 * (lppd - p.w)
#### LMPL
CPO <- 1/apply(exp(-samples.loglike), 2, mean)
mean.min <- min(CPO[CPO>0])
CPO[CPO==0] <- mean.min
LMPL <- sum(log(CPO), na.rm=TRUE)
#### DIC
DIC.dist <- rep(NA, n.keep)
p.d.dist <- rep(NA, n.keep)
mean.deviance <- -2 * sum(samples.loglike, na.rm=TRUE) / nrow(samples.loglike)
mean.beta <- matrix(apply(samples.beta, 2, mean), nrow=p, ncol=J, byrow=F)
mean.phi <- matrix(apply(samples.phi, 2, mean), nrow=K, ncol=J, byrow=T)
fitted.mean <- exp(X.standardised %*% mean.beta + mean.phi + offset)
for(z in 1:n.keep)
{
Y.temp <- matrix(samples.Y[z, ], nrow=K, ncol=J, byrow=TRUE)
deviance.fitted <- -2 * sum(dpois(x=as.numeric(t(Y.temp)), lambda=as.numeric(t(fitted.mean)), log=TRUE), na.rm=TRUE)
p.d.dist[z] <- mean.deviance - deviance.fitted
DIC.dist[z] <- deviance.fitted + 2 * p.d.dist[z]
}
DIC <- mean(DIC.dist)
p.d <- mean(p.d.dist)
modelfit <- c(DIC, p.d, WAIC, p.w, LMPL)
names(modelfit) <- c("DIC", "p.d", "WAIC", "p.w", "LMPL")
#### transform the parameters back to the origianl covariate scale.
samples.beta.orig <- samples.beta
for(r in 1:J)
{
samples.beta.orig[ ,((r-1)*p+1):(r*p)] <- common.betatransform(samples.beta[ ,((r-1)*p+1):(r*p) ], X.indicator, X.mean, X.sd, p, FALSE)
}
#### Create a summary object
samples.beta.orig <- mcmc(samples.beta.orig)
summary.beta <- t(apply(samples.beta.orig, 2, quantile, c(0.5, 0.025, 0.975)))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,J*p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
col.name <- rep(NA, p*(J-1))
if(is.null(colnames(Y)))
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste("Variable ", r, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Median", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c((J+1) ,7))
summary.hyper[1:J, 1] <- diag(apply(samples.Sigma, c(2,3), quantile, c(0.5)))
summary.hyper[1:J, 2] <- diag(apply(samples.Sigma, c(2,3), quantile, c(0.025)))
summary.hyper[1:J, 3] <- diag(apply(samples.Sigma, c(2,3), quantile, c(0.975)))
summary.hyper[1:J, 4] <- n.keep
summary.hyper[1:J, 5] <- accept.Sigma
summary.hyper[1:J, 6] <- diag(apply(samples.Sigma, c(2,3), effectiveSize))
for(r in 1:J)
{
summary.hyper[r, 7] <- geweke.diag(samples.Sigma[ ,r,r])$z
}
if(!fix.rho)
{
summary.hyper[(J+1), 1:3] <- quantile(samples.rho, c(0.5, 0.025, 0.975))
summary.hyper[(J+1), 4:7] <- c(n.keep, accept.rho, effectiveSize(samples.rho), geweke.diag(samples.rho)$z)
}else
{
summary.hyper[(J+1), 1:3] <- c(rho, rho, rho)
summary.hyper[(J+1), 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
#### Create the fitted values and residuals
fitted.values <- matrix(apply(samples.fitted, 2, mean), nrow=K, ncol=J, byrow=T)
#response.residuals <- Y - fitted.values
#pearson.residuals <- response.residuals / sqrt(fitted.values)
#residuals <- list(response=response.residuals, pearson=pearson.residuals)
residuals <- NA
#### Compile and return the results
model.string <- c("Likelihood model - Poisson (log link function) with data augmentation", "\nRandom effects model - Leroux MCAR\n")
if(fix.rho) samples.rho=NA
samples <- list(beta=samples.beta.orig, phi=mcmc(samples.phi), Sigma=samples.Sigma, rho=mcmc(samples.rho), fitted=mcmc(samples.fitted), Y=mcmc(samples.Y))
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, X=X)
class(results) <- "CARBayes"
#### Finish by stating the time taken
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
/poisson.MVlerouxCARdasmoothalpha.R
|
no_license
|
duncanplee/spatially-misaligned-count-data
|
R
| false
| false
| 15,800
|
r
|
poisson.MVlerouxCARdasmoothalpha <- function(formula, data=NULL, W, Y.region, weights, alpha, burnin, n.sample, thin=1, n.da, prior.mean.beta=NULL, prior.var.beta=NULL, prior.Sigma.df=NULL, prior.Sigma.scale=NULL, rho=NULL, MALA=FALSE, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "poisson")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
J <- ncol(Y)
N.all <- K * J
K.area <- nrow(Y.region)
#### Check on MALA argument
if(length(MALA)!=1) stop("MALA is not length 1.", call.=FALSE)
if(!is.logical(MALA)) stop("MALA is not logical.", call.=FALSE)
#### W matrix
if(!is.matrix(W)) stop("W is not a matrix.", call.=FALSE)
if(nrow(W)!= K) stop("The number of data points divided by the number of rows in W is not a whole number.", call.=FALSE)
#### rho
if(is.null(rho))
{
rho <- runif(1)
fix.rho <- FALSE
}else
{
fix.rho <- TRUE
}
if(!is.numeric(rho) ) stop("rho is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho is outside the range [0, 1].", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.Sigma.df)) prior.Sigma.df <- J+1
if(is.null(prior.Sigma.scale)) prior.Sigma.scale <- diag(rep(1,J)) / 1000
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
common.prior.varmat.check(prior.Sigma.scale, J)
#### Compute the blocking structure for beta
block.temp <- common.betablock(p)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
#############################
#### Initial parameter values
#############################
beta <- array(0, c(p, J))
phi <- matrix(0, nrow=K, ncol=J)
Sigma <- diag(rep(0.01,J))
Sigma.inv <- solve(Sigma)
regression <- X.standardised %*% beta
fitted <- exp(regression + phi + offset)
###############################
#### Set up the MCMC quantities
###############################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, J*p))
samples.phi <- array(NA, c(n.keep, N.all))
samples.Sigma <- array(NA, c(n.keep, J, J))
if(!fix.rho) samples.rho <- array(NA, c(n.keep, 1))
samples.loglike <- array(NA, c(n.keep, N.all))
samples.fitted <- array(NA, c(n.keep, N.all))
samples.Y <- array(NA, c(n.keep, K*J))
#### Metropolis quantities
accept <- rep(0,4)
accept.beta <- rep(0,2*J)
proposal.sd.beta <- rep(0.01, J)
proposal.sd.phi <- 0.1
proposal.sd.rho <- 0.02
Sigma.post.df <- prior.Sigma.df + K
if(rho==1) Sigma.post.df <- prior.Sigma.df + K - 1
##################################
#### Set up the spatial quantities
##################################
#### W matrix quantities
W.quants <- common.Wcheckformat(W)
W <- W.quants$W
W.triplet <- W.quants$W.triplet
n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
W.weights <- apply(W, 1, sum)
Wstar <- diag(apply(W,1,sum)) - W
Q <- rho * Wstar + diag(rep(1-rho,K))
#### Create the determinant
if(!fix.rho)
{
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
det.Q <- sum(log((rho * Wstar.val + (1-rho))))
}else
{}
#### Specify vector variants
Y.vec <- as.numeric(t(Y))
###########################
#### Run the Bayesian model
###########################
#### Start timer
if(verbose)
{
cat("Generating", n.keep, "post burnin and thinned (if requested) samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
#### Create the MCMC samples
for(j in 1:n.sample)
{
############################################################
## Sample from Y - data augmentation - every n.da iterations
############################################################
if(floor(j / n.da)==ceiling(j / n.da))
{
for(z in 1:J)
{
if(!is.na(Y.region[1, z]))
{
#### Do DA on this outcome variable
phi.smooth <- alpha * phi[ ,z] + (1 - alpha) * (W %*% phi[ ,z] / W.weights)
risk.current <- exp(as.numeric(X.standardised %*% beta[ ,z]) + phi.smooth)
Y.intersections <- array(0, c(K.area, K))
for(s in 1:K.area)
{
probs.unscaled <- risk.current * weights[s, ]
probs <- probs.unscaled / sum(probs.unscaled)
Y.intersections[s, ] <- as.numeric(rmultinom(n=1, size=Y.region[s, z], prob=probs))
}
Y[ ,z] <- round(apply(Y.intersections,2,sum))
}else
{
#### Don't do DA on this outcome variable
}
}
}else
{}
###################
## Sample from beta
###################
offset.temp <- phi + offset
for(r in 1:J)
{
if(MALA)
{
temp <- poissonbetaupdateMALA(X.standardised, K, p, beta[ ,r], offset.temp[ ,r], Y[ ,r], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta[r], list.block)
}else
{
temp <- poissonbetaupdateRW(X.standardised, K, p, beta[ ,r], offset.temp[ ,r], Y[ ,r], prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta[r], list.block)
}
beta[ ,r] <- temp[[1]]
accept.beta[r] <- accept.beta[r] + temp[[2]]
accept.beta[(r+J)] <- accept.beta[(r+J)] + n.beta.block
}
regression <- X.standardised %*% beta
##################
## Sample from phi
##################
den.offset <- rho * W.triplet.sum + 1 - rho
phi.offset <- regression + offset
temp1 <- poissonmcarupdateRW(W.triplet, W.begfin, K, J, phi, Y, phi.offset, den.offset, Sigma.inv, rho, proposal.sd.phi)
phi <- temp1[[1]]
for(r in 1:J)
{
phi[ ,r] <- phi[ ,r] - mean(phi[ ,r])
}
accept[1] <- accept[1] + temp1[[2]]
accept[2] <- accept[2] + K
####################
## Sample from Sigma
####################
Sigma.post.scale <- t(phi) %*% Q %*% phi + prior.Sigma.scale
Sigma <- riwish(Sigma.post.df, Sigma.post.scale)
Sigma.inv <- solve(Sigma)
##################
## Sample from rho
##################
if(!fix.rho)
{
## Propose a new value
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
Q.prop <- proposal.rho * Wstar + diag(rep(1-proposal.rho), K)
det.Q.prop <- sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
## Compute the acceptance rate
logprob.current <- 0.5 * J * det.Q - 0.5 * sum(diag(t(phi) %*% Q %*% phi %*% Sigma.inv))
logprob.proposal <- 0.5 * J * det.Q.prop - 0.5 * sum(diag(t(phi) %*% Q.prop %*% phi %*% Sigma.inv))
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
if(prob > runif(1))
{
rho <- proposal.rho
det.Q <- det.Q.prop
Q <- Q.prop
accept[3] <- accept[3] + 1
}else
{}
accept[4] <- accept[4] + 1
}else
{}
#########################
## Calculate the deviance
#########################
fitted <- exp(regression + phi + offset)
loglike <- dpois(x=as.numeric(t(Y)), lambda=as.numeric(t(fitted)), log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- as.numeric(beta)
samples.phi[ele, ] <- as.numeric(t(phi))
samples.Sigma[ele, , ] <- Sigma
if(!fix.rho) samples.rho[ele, ] <- rho
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- as.numeric(t(fitted))
samples.Y[ele, ] <- as.numeric(t(Y))
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
for(r in 1:J)
{
if(p>2)
{
proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J))], proposal.sd.beta[r], 40, 50)
}else
{
proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J))], proposal.sd.beta[r], 30, 40)
}
}
proposal.sd.phi <- common.accceptrates1(accept[1:2], proposal.sd.phi, 40, 50)
if(!fix.rho)
{
proposal.sd.rho <- common.accceptrates2(accept[3:4], proposal.sd.rho, 40, 50, 0.5)
}
accept <- c(0,0,0,0)
accept.beta <- rep(0,2*J)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
cat("\nSummarising results.")
close(progressBar)
}else
{}
###################################
#### Summarise and save the results
###################################
#### Compute the acceptance rates
accept.beta <- 100 * sum(accept.beta[1:J]) / sum(accept.beta[(J+1):(2*J)])
accept.phi <- 100 * accept[1] / accept[2]
if(!fix.rho)
{
accept.rho <- 100 * accept[3] / accept[4]
}else
{
accept.rho <- NA
}
accept.Sigma <- 100
accept.final <- c(accept.beta, accept.phi, accept.rho, accept.Sigma)
names(accept.final) <- c("beta", "phi", "rho", "Sigma")
#### WAIC
p.w <- sum(apply(samples.loglike,2, var), na.rm=TRUE)
mean.like <- apply(exp(samples.loglike),2,mean)
mean.min <- min(mean.like[mean.like>0])
mean.like[mean.like==0] <- mean.min
lppd <- sum(log(mean.like), na.rm=TRUE)
WAIC <- -2 * (lppd - p.w)
#### LMPL
CPO <- 1/apply(exp(-samples.loglike), 2, mean)
mean.min <- min(CPO[CPO>0])
CPO[CPO==0] <- mean.min
LMPL <- sum(log(CPO), na.rm=TRUE)
#### DIC
DIC.dist <- rep(NA, n.keep)
p.d.dist <- rep(NA, n.keep)
mean.deviance <- -2 * sum(samples.loglike, na.rm=TRUE) / nrow(samples.loglike)
mean.beta <- matrix(apply(samples.beta, 2, mean), nrow=p, ncol=J, byrow=F)
mean.phi <- matrix(apply(samples.phi, 2, mean), nrow=K, ncol=J, byrow=T)
fitted.mean <- exp(X.standardised %*% mean.beta + mean.phi + offset)
for(z in 1:n.keep)
{
Y.temp <- matrix(samples.Y[z, ], nrow=K, ncol=J, byrow=TRUE)
deviance.fitted <- -2 * sum(dpois(x=as.numeric(t(Y.temp)), lambda=as.numeric(t(fitted.mean)), log=TRUE), na.rm=TRUE)
p.d.dist[z] <- mean.deviance - deviance.fitted
DIC.dist[z] <- deviance.fitted + 2 * p.d.dist[z]
}
DIC <- mean(DIC.dist)
p.d <- mean(p.d.dist)
modelfit <- c(DIC, p.d, WAIC, p.w, LMPL)
names(modelfit) <- c("DIC", "p.d", "WAIC", "p.w", "LMPL")
#### transform the parameters back to the origianl covariate scale.
samples.beta.orig <- samples.beta
for(r in 1:J)
{
samples.beta.orig[ ,((r-1)*p+1):(r*p)] <- common.betatransform(samples.beta[ ,((r-1)*p+1):(r*p) ], X.indicator, X.mean, X.sd, p, FALSE)
}
#### Create a summary object
samples.beta.orig <- mcmc(samples.beta.orig)
summary.beta <- t(apply(samples.beta.orig, 2, quantile, c(0.5, 0.025, 0.975)))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,J*p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
col.name <- rep(NA, p*(J-1))
if(is.null(colnames(Y)))
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste("Variable ", r, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:J)
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[r], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Median", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c((J+1) ,7))
summary.hyper[1:J, 1] <- diag(apply(samples.Sigma, c(2,3), quantile, c(0.5)))
summary.hyper[1:J, 2] <- diag(apply(samples.Sigma, c(2,3), quantile, c(0.025)))
summary.hyper[1:J, 3] <- diag(apply(samples.Sigma, c(2,3), quantile, c(0.975)))
summary.hyper[1:J, 4] <- n.keep
summary.hyper[1:J, 5] <- accept.Sigma
summary.hyper[1:J, 6] <- diag(apply(samples.Sigma, c(2,3), effectiveSize))
for(r in 1:J)
{
summary.hyper[r, 7] <- geweke.diag(samples.Sigma[ ,r,r])$z
}
if(!fix.rho)
{
summary.hyper[(J+1), 1:3] <- quantile(samples.rho, c(0.5, 0.025, 0.975))
summary.hyper[(J+1), 4:7] <- c(n.keep, accept.rho, effectiveSize(samples.rho), geweke.diag(samples.rho)$z)
}else
{
summary.hyper[(J+1), 1:3] <- c(rho, rho, rho)
summary.hyper[(J+1), 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
rownames(summary.results)[((J*p)+1): nrow(summary.results)] <- c(paste(rep("Sigma",J), 1:J, 1:J, sep=""), "rho")
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
#### Create the fitted values and residuals
fitted.values <- matrix(apply(samples.fitted, 2, mean), nrow=K, ncol=J, byrow=T)
#response.residuals <- Y - fitted.values
#pearson.residuals <- response.residuals / sqrt(fitted.values)
#residuals <- list(response=response.residuals, pearson=pearson.residuals)
residuals <- NA
#### Compile and return the results
model.string <- c("Likelihood model - Poisson (log link function) with data augmentation", "\nRandom effects model - Leroux MCAR\n")
if(fix.rho) samples.rho=NA
samples <- list(beta=samples.beta.orig, phi=mcmc(samples.phi), Sigma=samples.Sigma, rho=mcmc(samples.rho), fitted=mcmc(samples.fitted), Y=mcmc(samples.Y))
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, X=X)
class(results) <- "CARBayes"
#### Finish by stating the time taken
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cite_data.R
\name{cite_climate_data}
\alias{cite_climate_data}
\title{Returns the references associated with the climate data used to fit the \code{pdfs}.}
\usage{
cite_climate_data(x, verbose = TRUE)
}
\arguments{
\item{x}{A \code{\link{crestObj}} produced by one of the \code{\link{crest}},
\code{\link{crest.get_modern_data}}, \code{\link{crest.calibrate}},
\code{\link{crest.reconstruct}} or \code{\link{loo}} functions.}
\item{verbose}{A boolean to print non-essential comments on the terminal
(default \code{TRUE}).}
}
\value{
A list of references to add if the data generated by crestr are published.
}
\description{
Returns the references associated with the climate data used to fit the \code{pdfs}.
}
|
/man/cite_climate_data.Rd
|
permissive
|
mchevalier2/crestr
|
R
| false
| true
| 790
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cite_data.R
\name{cite_climate_data}
\alias{cite_climate_data}
\title{Returns the references associated with the climate data used to fit the \code{pdfs}.}
\usage{
cite_climate_data(x, verbose = TRUE)
}
\arguments{
\item{x}{A \code{\link{crestObj}} produced by one of the \code{\link{crest}},
\code{\link{crest.get_modern_data}}, \code{\link{crest.calibrate}},
\code{\link{crest.reconstruct}} or \code{\link{loo}} functions.}
\item{verbose}{A boolean to print non-essential comments on the terminal
(default \code{TRUE}).}
}
\value{
A list of references to add if the data generated by crestr are published.
}
\description{
Returns the references associated with the climate data used to fit the \code{pdfs}.
}
|
# Gibbs sampler for Hurricane Error Fields
# LM2: Constant mean instead of landfall-location-specific mean
# Steve Walsh Feb 2020
# pdf("pdf/Gibbs/GibbsSamplerHurrRegr_EMPBAYESIW_NA_GHG_sqrt.pdf")
# remove(list=ls())
# load("NAM-Model-Validation/RData/par_optim_allbut43.RData")
# rm(list=setdiff(ls(), c("par_optim")))
# set.seed(489)
suppressMessages(library(MCMCpack)) #riwish (inverse wishart draws)
suppressMessages(library(LaplacesDemon)) #rmvn, rmatrixnorm (multivariate normal draws)
subtractPWmean <- F
rounder <- 9 #number of decimal places to round matrices so solve doesn't induce asymmetry
# nsims <- 46 #number of theta_i, i \in 1:nsims; number of simulated storms
# all_avgHess <- T # if FALSE, must have nsims <= 46
# if(!all_avgHess & nsims > 46) nsims <- 46
#turns vector of length n^2 into nxn square matrix
as.square <- function(mat){matrix(mat, nrow=sqrt(length(mat)),ncol=sqrt(length(mat)))}
#testy <- matrix(rnorm(10000),100,100); all.equal(testy, as.square(as.vector(testy)))
# These are the actual hurricane estimates
# lambda_hat <- all_storm_res[,c("MLEsigma2","MLEphi")]
stormMLEfiles <- grep(list.files("csv/myMLEresults/myMLEs", full.names = T, recursive = F),
pattern='sim_vals', invert=TRUE, value=TRUE)
ind <- grepl("subtractPWmean", stormMLEfiles)
if(subtractPWmean){
myMLEfiles <- stormMLEfiles[ind]
} else {
myMLEfiles <- stormMLEfiles[!ind]
}
myMLEs <- do.call(rbind, lapply(myMLEfiles, read.csv))
lambda_hat <- cbind(myMLEs$sigs,myMLEs$phis)
N <- nrow(lambda_hat) # number of storms, 47
P <- ncol(lambda_hat) # number of params, theta1 and theta2
R <- 3 #number of landfall locations (ATL, FL, GULF)
# for(i in 1:N){ lambda_hat[i,] <- par_optim[[1]][[i]] }
# colnames(lambda_hat) <- c("MLEsigma2","MLEphi","MLEkappa")
theta_hat <- cbind(log(lambda_hat[,1]/lambda_hat[,2]), log(lambda_hat[,1]))
hessians <- list()
all_hess_theta_files <- list.files("csv/myMLEresults/pkgthetahessvecs", full.names = T)
ind <- grepl("subtractPWmean", all_hess_theta_files)
if(subtractPWmean){
hess_theta_files <- all_hess_theta_files[ind]
} else {
hess_theta_files <- all_hess_theta_files[!ind]
}
if(length(hess_theta_files) != N){stop("number of MLEs != number of Hessians")}
for (i in 1:N) {
hess <- read.csv(hess_theta_files[i], row.names = 1)
hess_mtx <- as.square(as.numeric(-1*hess))
hessians[[i]] <- (hess_mtx + t(hess_mtx))/2
if(!isSymmetric(hessians[[i]])){stop(paste("storm",i,"is not symm,etric"))}
if(!is.positive.definite(hessians[[i]])){stop(paste("storm",i,"is not pos def"))}
}
theta_bar <- apply(theta_hat, 2, mean)
# true_Sigma_theta <- cov(theta_hat)
# read in location and intensity info
loc_int <- read.csv("csv/storm_levels_and_locs.csv", row.names = 1)#[avail,]#[-43,]
colnames(loc_int) <- c("int","loc")
# nA <- table(loc_int$loc)[1]
# nF <- table(loc_int$loc)[2]
# nG <- table(loc_int$loc)[3]
# Model matrix for locations
x <- with(loc_int, model.matrix(~ loc))
sum_x_xt <- matrix(0, R, R)
for(i in 1:dim(x)[1]){sum_x_xt <- sum_x_xt + x[i,]%*%t(x[i,])}
V <- round(solve(sum_x_xt), rounder)
sum_thhat_xt <- matrix(0, P, R)
for(j in 1:N){sum_thhat_xt <- sum_thhat_xt+ matrix(theta_hat[j,],2,1)%*%t(x[j,])}
hat_M<- sum_thhat_xt %*% round(solve(sum_x_xt), rounder)
# # obtain mean vector by location
theta_A <- lm(theta_hat ~ loc_int$loc)$coefficients[1,]
theta_F <- lm(theta_hat ~ loc_int$loc)$coefficients[2,]
theta_G <- lm(theta_hat ~ loc_int$loc)$coefficients[3,]
# # cbind(theta_A, theta_F, theta_G) and hat_M are approximately equal
# obtain covariance matrix by location
# can't use. not enough degrees of freedom
# use common sigma_theta across locations: cov(theta_hat)
# cov_A <- cov(filter(as.data.frame(theta_hat), loc_int$loc=="ATL"))
# cov_F <- cov(filter(as.data.frame(theta_hat), loc_int$loc=="FL"))
# cov_G <- cov(filter(as.data.frame(theta_hat), loc_int$loc=="GULF"))
# Generate simulated data based on the hurricane data:
# Simulate theta_i, i=1:nsims, from a normal
avgHess <- Reduce("+", hessians) / length(hessians)
# is.positive.definite(avgHess) is TRUE, not for solve(avgHess)
# theta_i_sim <- theta_i_hat_sim <- matrix(NA, nrow = nsims, ncol = 3)
# colnames(theta_i_sim) <- colnames(theta_i_hat_sim) <- c("MLEsigma2","MLEphi","MLEkappa")
# for (i in 1:nsims) {
# # theta_i iid \sim N(\bar\mu_\theta, \Sigma_\theta)
# theta_i_sim[i,] <- rmvn(1, t(hat_M%*%x[i,]), true_Sigma_theta)
# if(all_avgHess){
# theta_i_hat_sim[i,] <- rmvn(1, theta_i_sim[i,], round(solve(avgHess), rounder)) #all covmats the same
# hessians[[i]] <- avgHess
# }else{
# theta_i_hat_sim[i,] <- rmvn(1, theta_i_sim[i,], round(solve(hessians[[i]]), rounder))} #or each covmat H_i^-1
# }
par(mfrow=c(1,P))
for (i in 1:P) {hist(theta_hat[,i], main=paste("theta_",i))}
# for (i in 1:3) {hist(log(theta_hat[,i]), main=paste("log theta_",i))}
# for (i in 1:3) {hist(theta_i_hat_sim[,i], main=paste("theta_",i,"hat"))}
# Change the actual data to the simulated data:
# hessians changed to avgHess 9 lines above # hessians[[i]] <- avgHess
# theta_hat <- theta_i_hat_sim
# For full conditionals, will need:
#
# N=47
# \bar\theta and \hat\theta_i
# H_i (hessian for theta_i)
#
# B, x_i
# \Sigma_\theta (and its inverse)
# v_0, S_0
# set.seed(Sys.time()) #seed seems to stay the same from the loaded data
#Gibbs sampler for B, theta_i and Sigma_theta
iters <- 10100
burn <- 100
# mu_theta <- matrix(NA, nrow = iters, ncol = P)
Sigma_theta <- matrix(NA, nrow = iters, ncol = P^2)
mu_theta <- matrix(NA, nrow = iters, ncol = P)
theta <- list()
for (j in 1:N) {
theta[[j]] <- matrix(NA, nrow = iters, ncol = P)
theta[[j]][1,] <- c(100,100)
}
# Initial values for Markov chains
# mu_theta[1,] <- rep(100, P)
Sigma_theta[1,] <- diag(rep(100, P))
mu_theta[1,] <- rep(100, P)
#IW hyperparameters
v0 <- P + 1
S0 <- v0*cov(theta_hat) #EMPIRICAL BAYES
for (i in 2:iters) {
#i <- 2; j <- 1
if(i %% 2000 ==0) print(i)
# Update mu_theta
mu_theta[i,] <- rmvn(1, theta_bar, as.square(Sigma_theta[i-1,])/N)
# Update Sigma_theta
L <- matrix(0, ncol = P, nrow = P)
for(j in 1:N){
# Variance of theta_i
Vtheta_i <- solve(hessians[[j]] + solve(as.square(Sigma_theta[i-1,])))
Vtheta_i <- round(Vtheta_i, rounder)
# Expectation of theta_i
Etheta_i <- Vtheta_i %*% ((hessians[[j]]%*%theta_hat[j,]) +
(solve(as.square(Sigma_theta[i-1,]))%*%mu_theta[i,]))
theta[[j]][i,] <- rmvn(1, t(Etheta_i), Vtheta_i)
# L is the component of the scale matrix from the likelihood for the inverse wishart draw
L <- L + round((theta[[j]][i,]-(mu_theta[i,]))%*%t(theta[[j]][i,]-(mu_theta[i,])), rounder)
}
Sigma_theta[i,] <- riwish(N + v0, L + S0)
# Sigma_theta[i,] <- LaplacesDemon::rsiw(nu = v0, S = L + diag(3), mu = c(0,0,0), delta = c(1,1,1))
Sigma_theta[i,] <- round(Sigma_theta[i,], rounder)
}
theta_cols <- c("sigma2","phi")
mu_burn <- mu_theta[(burn+1):iters,]
Sigma_burn <- Sigma_theta[(burn+1):iters,]
theta_burn <- list()
for (i in 1:N) {theta_burn[[i]] <- theta[[i]][(burn+1):iters,]}
# Histograms and trace plots for B
par(mfrow=c(1,P))
for(i in 1:P) {
hist(mu_burn[,i], main = paste("mu_theta",i))
abline(v=theta_bar[i], lwd=2, col="blue")
abline(v=colMeans(mu_burn)[i], lwd=2, col="green")
}
for(i in 1:(P)) {
plot(mu_burn[,i], main = paste("mu_theta",i), type="l")
abline(h=theta_bar[i], lwd=2, col="blue")
abline(h=colMeans(mu_burn)[i], lwd=2, col="green")
}
# Trace plots for inidividual storms' parameters
for (j in 1:N){#c(1:5,(nsims-5):nsims)) {
for(i in 1:P) {
plot(theta_burn[[j]][,i], main = paste("storm",j,loc_int$loc[j],theta_cols[i]), type="l")
# ylim=c(apply(theta_hat, 2, min)[i] - 0.2, apply(theta_hat, 2, max)[i] + 0.2))
# abline(h=theta_bar[i], lwd=2, col="red")
abline(h=colMeans(theta_burn[[j]])[i], lwd=2, col="green")
# abline(h=theta_i_sim[j,i], lwd=2, col="blue")
abline(h=theta_hat[j,i], lwd=2, col="blue")
}
}
# Variance histograms
# true_sigma_theta: generates the sims (from the actual storm data) (blue)
# cov(theta_hat): the covariance estimated from the sims (orange)
# par(mfrow=c(1,3))
# Variance histograms
for (i in c(1,4)) { #1,4 are the diagonals of a vectorized 2x2 matrix
hist(as.vector(Sigma_theta[(burn+1):iters,i]), main = paste("Variance of theta_[",i,"]"))
abline(v=as.vector(cov(theta_hat))[i], lwd=2, col="blue")
# abline(v=as.vector(true_Sigma_theta)[i], lwd=2, col="blue")
abline(v=apply(Sigma_theta[(burn+1):iters,],2,mean)[i], lwd=2, col="green")
}
# Covariance histograms
for (i in 1:P){
for(j in 1:P){
# if(i!=j) next
hist((Sigma_theta[(burn+1):iters, (i-1)*P+j]), main = paste("Covariance of theta_[",i,",",j,"]"))
abline(v=as.vector(cov(theta_hat)[(i-1)*P+j]), lwd=2, col="blue")
# abline(v=as.vector(true_Sigma_theta[(i-1)*P+j]), lwd=2, col="blue")
abline(v=apply(Sigma_theta[(burn+1):iters,],2,mean)[(i-1)*P+j], lwd=2, col="green")
}
}
# #Correlations between parts of regression coefficient matrix B
# for (i in 1:P^2) {
# j <- i + 1
# while(j <= P^2) {
# plot(B[(burn+1):iters,i], B[(burn+1):iters,j],
# main=paste("cor of B",i,",",j,":",
# round(cor(B[(burn+1):iters,i], B[(burn+1):iters,j]),3)))
# print(round(cor(B[(burn+1):iters,i], B[(burn+1):iters,j]),3))
# j <- j + 1
# }
# }
var_results <- rbind(as.vector(S0), apply(Sigma_theta[(burn+1):iters,], 2,mean), as.vector(cov(theta_hat)))
rownames(var_results) <- c("S0", "Sigma_theta", "cov(theta_hat)")
var_results
rnorm(3) #to check for seed being the same... -1.6760883 -0.8578595 1.2997342
###### Evaluate coverage of the Gibbs sampler
# "true" data for sim study
# theta_i_sim
# theta_i_hat_sim
# hat_M
# true_Sigma_theta
# estimates from MCMC
# B_burn ; creates emp_B, emp_B_LB, emp_B_UB
# Sigma_burn ; creates emp_Sigma_theta, emp_Sigma_theta_LB, emp_Sigma_theta_UB
# theta_burn ; creates emp_thetaMED, emp_thetaLB, emp_thetaUB
emp_thetaLB <- emp_thetaMED <- emp_thetaUB <- matrix(NA, nrow = N, ncol = P)
for (i in 1:N) {
# emp_theta[i,] <- apply(theta[[i]], 2, mean)
emp_thetaLB[i,] <- apply(theta_burn[[i]], 2, function(x){quantile(x, 0.025)})
emp_thetaMED[i,]<- apply(theta_burn[[i]], 2, function(x){quantile(x, 0.5)})
emp_thetaUB[i,] <- apply(theta_burn[[i]], 2, function(x){quantile(x, 0.975)})
}
# This is expected to be 1; thetahat will be in the 95% credible interval
apply(theta_hat < emp_thetaUB & theta_hat > emp_thetaLB, 2, sum)/N
# Collect the "bad thetas": one of the true elements of theta_i
# isn't contained in the 95% credible interval
bad_thetas <- which(apply(theta_hat < emp_thetaUB & theta_hat > emp_thetaLB, 1, all)==F)
# cbind(emp_thetaLB[bad_thetas,], theta_i_sim[bad_thetas,], emp_thetaUB[bad_thetas,])
par(mfrow=c(1,P))
for (j in head(bad_thetas,3)) {
for(i in 1:P) {
plot(theta[[j]][(burn+1):iters,i],
main = paste("storm",j,loc_int$loc[j],theta_cols[i]), type="l")#,
# ylim=c(apply(theta_hat, 2, min)[i] + 0.2, apply(theta_hat, 2, max)[i] - 0.2))
abline(h=apply(theta[[j]][(burn+1):iters,], 2, mean)[i], lwd=2, col="green")
abline(h=theta_hat[j,i], lwd=2, col="blue")
# abline(h=theta_i_hat_sim[j,i], lwd=2, col="orange")
abline(h=theta_bar[i], lwd=2, col="red")
}
}
# Check if each element of true B and Sigma_theta is contained in the credible intervals
emp_mu <- apply(mu_burn, 2, function(x){quantile(x,0.5)})
emp_mu_UB <- apply(mu_burn, 2, function(x){quantile(x,0.975)})
emp_mu_LB <- apply(mu_burn, 2, function(x){quantile(x,0.025)})
theta_bar < emp_mu_UB & theta_bar > emp_mu_LB
emp_Sigma_theta <- apply(Sigma_burn, 2, function(x){quantile(x,0.5)})
emp_Sigma_theta_LB <- apply(Sigma_burn, 2, function(x){quantile(x,0.025)})
emp_Sigma_theta_UB <- apply(Sigma_burn, 2, function(x){quantile(x,0.975)})
cov(theta_hat) < emp_Sigma_theta_UB & cov(theta_hat) > emp_Sigma_theta_LB
# Are the theta_i's close to the estimated \hat\theta^GS_i?
# emp_thetaMED - theta_i_hat_sim
apply(emp_thetaMED - theta_hat, 2, function(X)max(abs(X)))
apply(emp_thetaMED - theta_hat, 2, mean)
emp_mu - theta_bar
emp_Sigma_theta - cov(theta_hat)
for (i in 1:P) {
hist(emp_thetaMED[,i] - theta_hat[,i], main = paste("theta_hat_GS - theta,", colnames(theta_hat)[i]))
abline(v=0, lwd=2, col="green")
}
## Make plots comparing theta_i posterior density to
## theta_i_hat from MLE with corresponding Hessian^-1 variance
# pdf("NAM-Model-Validation/pdf/Gibbs/compare_postthetai_thetaihat_col.pdf")
par(mfrow=c(1,P))
# make it easier to see each of the densities in their corresponding plots
smush <- c(.04,.15)
for (i in 1:P) {
plot(0, 0, col = "white", xlab = "", ylab = "",
xlim=c(min(theta_hat[,i], emp_thetaMED[,i]),
max(theta_hat[,i], emp_thetaMED[,i])),
ylim=c(0,2), yaxt='n',
main = bquote(theta[.(i)]~"bottom and"~hat(theta)[.(i)]~"top"))
# mult_seg <- data.frame(x0 = c(0.7, 0.2, - 0.9, 0.5, - 0.2), # Create data frame with line-values
# y0 = c(0.5, 0.5, 0.6, - 0.3, 0.4),
# x1 = c(0, 0.2, 0.4, - 0.3, - 0.6),
# y1 = c(- 0.1, 0.3, - 0.6, - 0.8, 0.9))
segments(x0 = emp_thetaMED[,i], # Draw multiple lines
y0 = 0,
x1 = theta_hat[,i],
y1 = 1, col = as.factor(loc_int$loc))
points(x=emp_thetaMED[,i], y=rep(0, length(emp_thetaMED[,i])), col= as.factor(loc_int$loc))
points(x=theta_hat[,i], y=rep(1, length(emp_thetaMED[,i])), col= as.factor(loc_int$loc))
for (j in 1:N) {
#top row, theta hats
xseq <- seq(theta_hat[j,i]-3*sqrt(solve(hessians[[j]])[i,i]),
theta_hat[j,i]+3*sqrt(solve(hessians[[j]])[i,i]),
length.out = 1000)
lines(xseq,smush[i]*dnorm(xseq,
theta_hat[j,i],
sqrt(solve(hessians[[j]])[i,i]))+1, col=as.factor(loc_int$loc)[j],
lty = i)
#bottom row, thetas from MCMC
xseq <- seq(min(theta_burn[[j]][,i]),
max(theta_burn[[j]][,i]),
length.out = 1000)
lines(xseq, smush[i]*dnorm(xseq,
mean(theta_burn[[j]][,i]),
sd(theta_burn[[j]][,i])), col=as.factor(loc_int$loc)[j],
lty = i)
}
legend(max(theta_hat[,i], emp_thetaMED[,i])-
(max(theta_hat[,i], emp_thetaMED[,i])-min(theta_hat[,i], emp_thetaMED[,i]))*.4,
2,
legend=unique((loc_int$loc)),
col=unique(as.factor(loc_int$loc)), lty=1:3, cex=0.8)
}
theta1sds <- theta2sds <- matrix(NA,N,P)
for (i in 1:P) {
for (j in 1:N) {
if(i==1) theta1sds[j,] <- c(sqrt(solve(hessians[[j]])[i,i]),sd(theta_burn[[j]][,i]))
if(i==2) theta2sds[j,] <- c(sqrt(solve(hessians[[j]])[i,i]),sd(theta_burn[[j]][,i]))
}
}
par(mfrow=c(1,P))
plot(theta1sds, main = bquote(paste("SDs for "~ hat(theta)[1]~" from "~ bar(H)^{-1},~" and "~theta[1,MCMC])),
xlab = expression(hat(theta)[1]), ylab = expression(theta[1]), col=as.factor(loc_int$loc), pch=as.numeric(as.factor(loc_int$loc))-1,
xlim = range(theta1sds), ylim = range(theta1sds),
cex=2, cex.lab=2, cex.axis=2, cex.main=1.5, cex.sub=2)
abline(0,1)
plot(theta2sds, main = expression(paste("SDs for ",hat(theta)[2]," from ", bar(H)^{-1}," and ", theta[2,MCMC])),
xlab = expression(hat(theta)[2]), ylab = expression(theta[2]), col=as.factor(loc_int$loc), pch=as.numeric(as.factor(loc_int$loc))-1,
xlim = range(theta2sds), ylim = range(theta2sds),
cex=2, cex.lab=2, cex.axis=2, cex.main=1.5, cex.sub=2)
abline(0,1)
# dev.off()
# Comparing the different variance components
# > sqrt(diag(solve(avgHess)))
# MLEsigma2 MLEphi MLEkappa
# 0.016262686 0.022689820 0.006525455
# > sqrt(diag(cov(theta_hat)))
# MLEsigma2 MLEphi MLEkappa
# 0.2544399 0.2556946 0.0822819
# > apply(theta_burn[[1]], 2, sd)
# [1] 0.04515972 0.07133124 0.01014894
# > apply(theta_var_avg, 2, mean)
# [1] 0.05040922 0.07389148 0.01297306
theta_var_avg <- matrix(NA,N,P)
for(i in 1:N) theta_var_avg[i,] <- apply(theta_burn[[i]], 2, sd)
apply(theta_var_avg, 2, mean)
hess_inv <- matrix(NA, N, P)
for (i in 1:N) {
hess_inv[i,] <- sqrt(diag(solve(hessians[[i]])))
}
apply(hess_inv, 2, mean)
# Posterior Medians for each element of theta_i
burn_meds <- matrix(NA,N,P)
for (i in 1:N) {
burn_meds[i,] <- apply(theta_burn[[i]], 2, median)
}
# png("NAM-Model-Validation/png/burn_med_hist.png",width = 1400, height=1000)
par(mfrow=c(1,P))
hist(burn_meds[,1], xlab=expression(sigma^2), main=NULL, cex.axis=2, cex.lab=2)
hist(burn_meds[,2], xlab=expression(phi), main=NULL, cex.axis=2, cex.lab=2)
# dev.off()
hist(theta_hat[,1], xlab=expression(hat(sigma^2)[MLE]), main=NULL, cex.axis=2, cex.lab=2)
hist(theta_hat[,2], xlab=expression(hat(phi)[MLE]), main=NULL, cex.axis=2, cex.lab=2)
# dev.off()
rnorm(5)
emp_mu
matrix(emp_Sigma_theta, P, P)
save.image(file = paste0("RData/Gibbs_sqrt_LM2",
if(subtractPWmean){"_subtractPWmean"},".RData"))
|
/scripts/GibbsSamp_LM2.R
|
no_license
|
stevewalsh124/NAM-Model-Validation
|
R
| false
| false
| 17,137
|
r
|
# Gibbs sampler for Hurricane Error Fields
# LM2: Constant mean instead of landfall-location-specific mean
# Steve Walsh Feb 2020
# pdf("pdf/Gibbs/GibbsSamplerHurrRegr_EMPBAYESIW_NA_GHG_sqrt.pdf")
# remove(list=ls())
# load("NAM-Model-Validation/RData/par_optim_allbut43.RData")
# rm(list=setdiff(ls(), c("par_optim")))
# set.seed(489)
suppressMessages(library(MCMCpack)) #riwish (inverse wishart draws)
suppressMessages(library(LaplacesDemon)) #rmvn, rmatrixnorm (multivariate normal draws)
subtractPWmean <- F
rounder <- 9 #number of decimal places to round matrices so solve doesn't induce asymmetry
# nsims <- 46 #number of theta_i, i \in 1:nsims; number of simulated storms
# all_avgHess <- T # if FALSE, must have nsims <= 46
# if(!all_avgHess & nsims > 46) nsims <- 46
#turns vector of length n^2 into nxn square matrix
as.square <- function(mat){matrix(mat, nrow=sqrt(length(mat)),ncol=sqrt(length(mat)))}
#testy <- matrix(rnorm(10000),100,100); all.equal(testy, as.square(as.vector(testy)))
# These are the actual hurricane estimates
# lambda_hat <- all_storm_res[,c("MLEsigma2","MLEphi")]
stormMLEfiles <- grep(list.files("csv/myMLEresults/myMLEs", full.names = T, recursive = F),
pattern='sim_vals', invert=TRUE, value=TRUE)
ind <- grepl("subtractPWmean", stormMLEfiles)
if(subtractPWmean){
myMLEfiles <- stormMLEfiles[ind]
} else {
myMLEfiles <- stormMLEfiles[!ind]
}
myMLEs <- do.call(rbind, lapply(myMLEfiles, read.csv))
lambda_hat <- cbind(myMLEs$sigs,myMLEs$phis)
N <- nrow(lambda_hat) # number of storms, 47
P <- ncol(lambda_hat) # number of params, theta1 and theta2
R <- 3 #number of landfall locations (ATL, FL, GULF)
# for(i in 1:N){ lambda_hat[i,] <- par_optim[[1]][[i]] }
# colnames(lambda_hat) <- c("MLEsigma2","MLEphi","MLEkappa")
theta_hat <- cbind(log(lambda_hat[,1]/lambda_hat[,2]), log(lambda_hat[,1]))
hessians <- list()
all_hess_theta_files <- list.files("csv/myMLEresults/pkgthetahessvecs", full.names = T)
ind <- grepl("subtractPWmean", all_hess_theta_files)
if(subtractPWmean){
hess_theta_files <- all_hess_theta_files[ind]
} else {
hess_theta_files <- all_hess_theta_files[!ind]
}
if(length(hess_theta_files) != N){stop("number of MLEs != number of Hessians")}
for (i in 1:N) {
hess <- read.csv(hess_theta_files[i], row.names = 1)
hess_mtx <- as.square(as.numeric(-1*hess))
hessians[[i]] <- (hess_mtx + t(hess_mtx))/2
if(!isSymmetric(hessians[[i]])){stop(paste("storm",i,"is not symm,etric"))}
if(!is.positive.definite(hessians[[i]])){stop(paste("storm",i,"is not pos def"))}
}
theta_bar <- apply(theta_hat, 2, mean)
# true_Sigma_theta <- cov(theta_hat)
# read in location and intensity info
loc_int <- read.csv("csv/storm_levels_and_locs.csv", row.names = 1)#[avail,]#[-43,]
colnames(loc_int) <- c("int","loc")
# nA <- table(loc_int$loc)[1]
# nF <- table(loc_int$loc)[2]
# nG <- table(loc_int$loc)[3]
# Model matrix for locations
x <- with(loc_int, model.matrix(~ loc))
sum_x_xt <- matrix(0, R, R)
for(i in 1:dim(x)[1]){sum_x_xt <- sum_x_xt + x[i,]%*%t(x[i,])}
V <- round(solve(sum_x_xt), rounder)
sum_thhat_xt <- matrix(0, P, R)
for(j in 1:N){sum_thhat_xt <- sum_thhat_xt+ matrix(theta_hat[j,],2,1)%*%t(x[j,])}
hat_M<- sum_thhat_xt %*% round(solve(sum_x_xt), rounder)
# # obtain mean vector by location
theta_A <- lm(theta_hat ~ loc_int$loc)$coefficients[1,]
theta_F <- lm(theta_hat ~ loc_int$loc)$coefficients[2,]
theta_G <- lm(theta_hat ~ loc_int$loc)$coefficients[3,]
# # cbind(theta_A, theta_F, theta_G) and hat_M are approximately equal
# obtain covariance matrix by location
# can't use. not enough degrees of freedom
# use common sigma_theta across locations: cov(theta_hat)
# cov_A <- cov(filter(as.data.frame(theta_hat), loc_int$loc=="ATL"))
# cov_F <- cov(filter(as.data.frame(theta_hat), loc_int$loc=="FL"))
# cov_G <- cov(filter(as.data.frame(theta_hat), loc_int$loc=="GULF"))
# Generate simulated data based on the hurricane data:
# Simulate theta_i, i=1:nsims, from a normal
avgHess <- Reduce("+", hessians) / length(hessians)
# is.positive.definite(avgHess) is TRUE, not for solve(avgHess)
# theta_i_sim <- theta_i_hat_sim <- matrix(NA, nrow = nsims, ncol = 3)
# colnames(theta_i_sim) <- colnames(theta_i_hat_sim) <- c("MLEsigma2","MLEphi","MLEkappa")
# for (i in 1:nsims) {
# # theta_i iid \sim N(\bar\mu_\theta, \Sigma_\theta)
# theta_i_sim[i,] <- rmvn(1, t(hat_M%*%x[i,]), true_Sigma_theta)
# if(all_avgHess){
# theta_i_hat_sim[i,] <- rmvn(1, theta_i_sim[i,], round(solve(avgHess), rounder)) #all covmats the same
# hessians[[i]] <- avgHess
# }else{
# theta_i_hat_sim[i,] <- rmvn(1, theta_i_sim[i,], round(solve(hessians[[i]]), rounder))} #or each covmat H_i^-1
# }
par(mfrow=c(1,P))
for (i in 1:P) {hist(theta_hat[,i], main=paste("theta_",i))}
# for (i in 1:3) {hist(log(theta_hat[,i]), main=paste("log theta_",i))}
# for (i in 1:3) {hist(theta_i_hat_sim[,i], main=paste("theta_",i,"hat"))}
# Change the actual data to the simulated data:
# hessians changed to avgHess 9 lines above # hessians[[i]] <- avgHess
# theta_hat <- theta_i_hat_sim
# For full conditionals, will need:
#
# N=47
# \bar\theta and \hat\theta_i
# H_i (hessian for theta_i)
#
# B, x_i
# \Sigma_\theta (and its inverse)
# v_0, S_0
# set.seed(Sys.time()) #seed seems to stay the same from the loaded data
#Gibbs sampler for B, theta_i and Sigma_theta
iters <- 10100
burn <- 100
# mu_theta <- matrix(NA, nrow = iters, ncol = P)
Sigma_theta <- matrix(NA, nrow = iters, ncol = P^2)
mu_theta <- matrix(NA, nrow = iters, ncol = P)
theta <- list()
for (j in 1:N) {
theta[[j]] <- matrix(NA, nrow = iters, ncol = P)
theta[[j]][1,] <- c(100,100)
}
# Initial values for Markov chains
# mu_theta[1,] <- rep(100, P)
Sigma_theta[1,] <- diag(rep(100, P))
mu_theta[1,] <- rep(100, P)
#IW hyperparameters
v0 <- P + 1
S0 <- v0*cov(theta_hat) #EMPIRICAL BAYES
for (i in 2:iters) {
#i <- 2; j <- 1
if(i %% 2000 ==0) print(i)
# Update mu_theta
mu_theta[i,] <- rmvn(1, theta_bar, as.square(Sigma_theta[i-1,])/N)
# Update Sigma_theta
L <- matrix(0, ncol = P, nrow = P)
for(j in 1:N){
# Variance of theta_i
Vtheta_i <- solve(hessians[[j]] + solve(as.square(Sigma_theta[i-1,])))
Vtheta_i <- round(Vtheta_i, rounder)
# Expectation of theta_i
Etheta_i <- Vtheta_i %*% ((hessians[[j]]%*%theta_hat[j,]) +
(solve(as.square(Sigma_theta[i-1,]))%*%mu_theta[i,]))
theta[[j]][i,] <- rmvn(1, t(Etheta_i), Vtheta_i)
# L is the component of the scale matrix from the likelihood for the inverse wishart draw
L <- L + round((theta[[j]][i,]-(mu_theta[i,]))%*%t(theta[[j]][i,]-(mu_theta[i,])), rounder)
}
Sigma_theta[i,] <- riwish(N + v0, L + S0)
# Sigma_theta[i,] <- LaplacesDemon::rsiw(nu = v0, S = L + diag(3), mu = c(0,0,0), delta = c(1,1,1))
Sigma_theta[i,] <- round(Sigma_theta[i,], rounder)
}
theta_cols <- c("sigma2","phi")
mu_burn <- mu_theta[(burn+1):iters,]
Sigma_burn <- Sigma_theta[(burn+1):iters,]
theta_burn <- list()
for (i in 1:N) {theta_burn[[i]] <- theta[[i]][(burn+1):iters,]}
# Histograms and trace plots for B
par(mfrow=c(1,P))
for(i in 1:P) {
hist(mu_burn[,i], main = paste("mu_theta",i))
abline(v=theta_bar[i], lwd=2, col="blue")
abline(v=colMeans(mu_burn)[i], lwd=2, col="green")
}
for(i in 1:(P)) {
plot(mu_burn[,i], main = paste("mu_theta",i), type="l")
abline(h=theta_bar[i], lwd=2, col="blue")
abline(h=colMeans(mu_burn)[i], lwd=2, col="green")
}
# Trace plots for inidividual storms' parameters
for (j in 1:N){#c(1:5,(nsims-5):nsims)) {
for(i in 1:P) {
plot(theta_burn[[j]][,i], main = paste("storm",j,loc_int$loc[j],theta_cols[i]), type="l")
# ylim=c(apply(theta_hat, 2, min)[i] - 0.2, apply(theta_hat, 2, max)[i] + 0.2))
# abline(h=theta_bar[i], lwd=2, col="red")
abline(h=colMeans(theta_burn[[j]])[i], lwd=2, col="green")
# abline(h=theta_i_sim[j,i], lwd=2, col="blue")
abline(h=theta_hat[j,i], lwd=2, col="blue")
}
}
# Variance histograms
# true_sigma_theta: generates the sims (from the actual storm data) (blue)
# cov(theta_hat): the covariance estimated from the sims (orange)
# par(mfrow=c(1,3))
# Variance histograms
for (i in c(1,4)) { #1,4 are the diagonals of a vectorized 2x2 matrix
hist(as.vector(Sigma_theta[(burn+1):iters,i]), main = paste("Variance of theta_[",i,"]"))
abline(v=as.vector(cov(theta_hat))[i], lwd=2, col="blue")
# abline(v=as.vector(true_Sigma_theta)[i], lwd=2, col="blue")
abline(v=apply(Sigma_theta[(burn+1):iters,],2,mean)[i], lwd=2, col="green")
}
# Covariance histograms
for (i in 1:P){
for(j in 1:P){
# if(i!=j) next
hist((Sigma_theta[(burn+1):iters, (i-1)*P+j]), main = paste("Covariance of theta_[",i,",",j,"]"))
abline(v=as.vector(cov(theta_hat)[(i-1)*P+j]), lwd=2, col="blue")
# abline(v=as.vector(true_Sigma_theta[(i-1)*P+j]), lwd=2, col="blue")
abline(v=apply(Sigma_theta[(burn+1):iters,],2,mean)[(i-1)*P+j], lwd=2, col="green")
}
}
# #Correlations between parts of regression coefficient matrix B
# for (i in 1:P^2) {
# j <- i + 1
# while(j <= P^2) {
# plot(B[(burn+1):iters,i], B[(burn+1):iters,j],
# main=paste("cor of B",i,",",j,":",
# round(cor(B[(burn+1):iters,i], B[(burn+1):iters,j]),3)))
# print(round(cor(B[(burn+1):iters,i], B[(burn+1):iters,j]),3))
# j <- j + 1
# }
# }
var_results <- rbind(as.vector(S0), apply(Sigma_theta[(burn+1):iters,], 2,mean), as.vector(cov(theta_hat)))
rownames(var_results) <- c("S0", "Sigma_theta", "cov(theta_hat)")
var_results
rnorm(3) #to check for seed being the same... -1.6760883 -0.8578595 1.2997342
###### Evaluate coverage of the Gibbs sampler
# "true" data for sim study
# theta_i_sim
# theta_i_hat_sim
# hat_M
# true_Sigma_theta
# estimates from MCMC
# B_burn ; creates emp_B, emp_B_LB, emp_B_UB
# Sigma_burn ; creates emp_Sigma_theta, emp_Sigma_theta_LB, emp_Sigma_theta_UB
# theta_burn ; creates emp_thetaMED, emp_thetaLB, emp_thetaUB
emp_thetaLB <- emp_thetaMED <- emp_thetaUB <- matrix(NA, nrow = N, ncol = P)
for (i in 1:N) {
# emp_theta[i,] <- apply(theta[[i]], 2, mean)
emp_thetaLB[i,] <- apply(theta_burn[[i]], 2, function(x){quantile(x, 0.025)})
emp_thetaMED[i,]<- apply(theta_burn[[i]], 2, function(x){quantile(x, 0.5)})
emp_thetaUB[i,] <- apply(theta_burn[[i]], 2, function(x){quantile(x, 0.975)})
}
# This is expected to be 1; thetahat will be in the 95% credible interval
apply(theta_hat < emp_thetaUB & theta_hat > emp_thetaLB, 2, sum)/N
# Collect the "bad thetas": one of the true elements of theta_i
# isn't contained in the 95% credible interval
bad_thetas <- which(apply(theta_hat < emp_thetaUB & theta_hat > emp_thetaLB, 1, all)==F)
# cbind(emp_thetaLB[bad_thetas,], theta_i_sim[bad_thetas,], emp_thetaUB[bad_thetas,])
par(mfrow=c(1,P))
for (j in head(bad_thetas,3)) {
for(i in 1:P) {
plot(theta[[j]][(burn+1):iters,i],
main = paste("storm",j,loc_int$loc[j],theta_cols[i]), type="l")#,
# ylim=c(apply(theta_hat, 2, min)[i] + 0.2, apply(theta_hat, 2, max)[i] - 0.2))
abline(h=apply(theta[[j]][(burn+1):iters,], 2, mean)[i], lwd=2, col="green")
abline(h=theta_hat[j,i], lwd=2, col="blue")
# abline(h=theta_i_hat_sim[j,i], lwd=2, col="orange")
abline(h=theta_bar[i], lwd=2, col="red")
}
}
# Check if each element of true B and Sigma_theta is contained in the credible intervals
emp_mu <- apply(mu_burn, 2, function(x){quantile(x,0.5)})
emp_mu_UB <- apply(mu_burn, 2, function(x){quantile(x,0.975)})
emp_mu_LB <- apply(mu_burn, 2, function(x){quantile(x,0.025)})
theta_bar < emp_mu_UB & theta_bar > emp_mu_LB
emp_Sigma_theta <- apply(Sigma_burn, 2, function(x){quantile(x,0.5)})
emp_Sigma_theta_LB <- apply(Sigma_burn, 2, function(x){quantile(x,0.025)})
emp_Sigma_theta_UB <- apply(Sigma_burn, 2, function(x){quantile(x,0.975)})
cov(theta_hat) < emp_Sigma_theta_UB & cov(theta_hat) > emp_Sigma_theta_LB
# Are the theta_i's close to the estimated \hat\theta^GS_i?
# emp_thetaMED - theta_i_hat_sim
apply(emp_thetaMED - theta_hat, 2, function(X)max(abs(X)))
apply(emp_thetaMED - theta_hat, 2, mean)
emp_mu - theta_bar
emp_Sigma_theta - cov(theta_hat)
for (i in 1:P) {
hist(emp_thetaMED[,i] - theta_hat[,i], main = paste("theta_hat_GS - theta,", colnames(theta_hat)[i]))
abline(v=0, lwd=2, col="green")
}
## Make plots comparing theta_i posterior density to
## theta_i_hat from MLE with corresponding Hessian^-1 variance
# pdf("NAM-Model-Validation/pdf/Gibbs/compare_postthetai_thetaihat_col.pdf")
par(mfrow=c(1,P))
# make it easier to see each of the densities in their corresponding plots
smush <- c(.04,.15)
for (i in 1:P) {
plot(0, 0, col = "white", xlab = "", ylab = "",
xlim=c(min(theta_hat[,i], emp_thetaMED[,i]),
max(theta_hat[,i], emp_thetaMED[,i])),
ylim=c(0,2), yaxt='n',
main = bquote(theta[.(i)]~"bottom and"~hat(theta)[.(i)]~"top"))
# mult_seg <- data.frame(x0 = c(0.7, 0.2, - 0.9, 0.5, - 0.2), # Create data frame with line-values
# y0 = c(0.5, 0.5, 0.6, - 0.3, 0.4),
# x1 = c(0, 0.2, 0.4, - 0.3, - 0.6),
# y1 = c(- 0.1, 0.3, - 0.6, - 0.8, 0.9))
segments(x0 = emp_thetaMED[,i], # Draw multiple lines
y0 = 0,
x1 = theta_hat[,i],
y1 = 1, col = as.factor(loc_int$loc))
points(x=emp_thetaMED[,i], y=rep(0, length(emp_thetaMED[,i])), col= as.factor(loc_int$loc))
points(x=theta_hat[,i], y=rep(1, length(emp_thetaMED[,i])), col= as.factor(loc_int$loc))
for (j in 1:N) {
#top row, theta hats
xseq <- seq(theta_hat[j,i]-3*sqrt(solve(hessians[[j]])[i,i]),
theta_hat[j,i]+3*sqrt(solve(hessians[[j]])[i,i]),
length.out = 1000)
lines(xseq,smush[i]*dnorm(xseq,
theta_hat[j,i],
sqrt(solve(hessians[[j]])[i,i]))+1, col=as.factor(loc_int$loc)[j],
lty = i)
#bottom row, thetas from MCMC
xseq <- seq(min(theta_burn[[j]][,i]),
max(theta_burn[[j]][,i]),
length.out = 1000)
lines(xseq, smush[i]*dnorm(xseq,
mean(theta_burn[[j]][,i]),
sd(theta_burn[[j]][,i])), col=as.factor(loc_int$loc)[j],
lty = i)
}
legend(max(theta_hat[,i], emp_thetaMED[,i])-
(max(theta_hat[,i], emp_thetaMED[,i])-min(theta_hat[,i], emp_thetaMED[,i]))*.4,
2,
legend=unique((loc_int$loc)),
col=unique(as.factor(loc_int$loc)), lty=1:3, cex=0.8)
}
theta1sds <- theta2sds <- matrix(NA,N,P)
for (i in 1:P) {
for (j in 1:N) {
if(i==1) theta1sds[j,] <- c(sqrt(solve(hessians[[j]])[i,i]),sd(theta_burn[[j]][,i]))
if(i==2) theta2sds[j,] <- c(sqrt(solve(hessians[[j]])[i,i]),sd(theta_burn[[j]][,i]))
}
}
par(mfrow=c(1,P))
plot(theta1sds, main = bquote(paste("SDs for "~ hat(theta)[1]~" from "~ bar(H)^{-1},~" and "~theta[1,MCMC])),
xlab = expression(hat(theta)[1]), ylab = expression(theta[1]), col=as.factor(loc_int$loc), pch=as.numeric(as.factor(loc_int$loc))-1,
xlim = range(theta1sds), ylim = range(theta1sds),
cex=2, cex.lab=2, cex.axis=2, cex.main=1.5, cex.sub=2)
abline(0,1)
plot(theta2sds, main = expression(paste("SDs for ",hat(theta)[2]," from ", bar(H)^{-1}," and ", theta[2,MCMC])),
xlab = expression(hat(theta)[2]), ylab = expression(theta[2]), col=as.factor(loc_int$loc), pch=as.numeric(as.factor(loc_int$loc))-1,
xlim = range(theta2sds), ylim = range(theta2sds),
cex=2, cex.lab=2, cex.axis=2, cex.main=1.5, cex.sub=2)
abline(0,1)
# dev.off()
# Comparing the different variance components
# > sqrt(diag(solve(avgHess)))
# MLEsigma2 MLEphi MLEkappa
# 0.016262686 0.022689820 0.006525455
# > sqrt(diag(cov(theta_hat)))
# MLEsigma2 MLEphi MLEkappa
# 0.2544399 0.2556946 0.0822819
# > apply(theta_burn[[1]], 2, sd)
# [1] 0.04515972 0.07133124 0.01014894
# > apply(theta_var_avg, 2, mean)
# [1] 0.05040922 0.07389148 0.01297306
theta_var_avg <- matrix(NA,N,P)
for(i in 1:N) theta_var_avg[i,] <- apply(theta_burn[[i]], 2, sd)
apply(theta_var_avg, 2, mean)
hess_inv <- matrix(NA, N, P)
for (i in 1:N) {
hess_inv[i,] <- sqrt(diag(solve(hessians[[i]])))
}
apply(hess_inv, 2, mean)
# Posterior Medians for each element of theta_i
burn_meds <- matrix(NA,N,P)
for (i in 1:N) {
burn_meds[i,] <- apply(theta_burn[[i]], 2, median)
}
# png("NAM-Model-Validation/png/burn_med_hist.png",width = 1400, height=1000)
par(mfrow=c(1,P))
hist(burn_meds[,1], xlab=expression(sigma^2), main=NULL, cex.axis=2, cex.lab=2)
hist(burn_meds[,2], xlab=expression(phi), main=NULL, cex.axis=2, cex.lab=2)
# dev.off()
hist(theta_hat[,1], xlab=expression(hat(sigma^2)[MLE]), main=NULL, cex.axis=2, cex.lab=2)
hist(theta_hat[,2], xlab=expression(hat(phi)[MLE]), main=NULL, cex.axis=2, cex.lab=2)
# dev.off()
rnorm(5)
emp_mu
matrix(emp_Sigma_theta, P, P)
save.image(file = paste0("RData/Gibbs_sqrt_LM2",
if(subtractPWmean){"_subtractPWmean"},".RData"))
|
#' Generate numeric summaries
#'
#' Generates numeric summaries, removing missing values
#'
#' @param x The numeric data
#' @param na.rm Should missing values be removed?
#'
#' @author Aimee Gott
#'
statSummary <- function(x, na.rm = TRUE){
xMean <- mean(x, na.rm = na.rm)
xVar <- var(x, na.rm = na.rm)
xRange <- range(x, na.rm = na.rm)
c("Min" = xRange[1], "Mean" = xMean, "Variance" = xVar, "Max" = xRange[2])
}
|
/Packages/summaryTools/R/statSummary.R
|
no_license
|
R-forks-to-learn/Formal-Package-Development-Workshop
|
R
| false
| false
| 439
|
r
|
#' Generate numeric summaries
#'
#' Generates numeric summaries, removing missing values
#'
#' @param x The numeric data
#' @param na.rm Should missing values be removed?
#'
#' @author Aimee Gott
#'
statSummary <- function(x, na.rm = TRUE){
xMean <- mean(x, na.rm = na.rm)
xVar <- var(x, na.rm = na.rm)
xRange <- range(x, na.rm = na.rm)
c("Min" = xRange[1], "Mean" = xMean, "Variance" = xVar, "Max" = xRange[2])
}
|
print("coda")
library(coda)
samples = coda.samples(model, jags.params, n.iter=n.iter,
n.chains=n.chains, n.adapt=n.burnin,
thin=n.thin)
#coda.samples
############################################################################
ss <- summary(samples)
m <- ss$statistics[,"Mean"]
#m <- s$statistics[,"Median"]
retrieved.u = matrix(m[4: (4+N*D-1)], N, D)
retrieved.v = matrix(m[(4+N*D): length(m)], M, D)
iterations = ceiling( (n.iter * n.chains)/n.thin ) #TODO
user_sample = function(i, k) { stop("user_sample function not implemented for coda"); }
item_sample = function(j, k) { stop("item_sample function not implemented for coda"); }
|
/wrappers/coda.R
|
no_license
|
tkusmierczyk/pmf-jags
|
R
| false
| false
| 690
|
r
|
print("coda")
library(coda)
samples = coda.samples(model, jags.params, n.iter=n.iter,
n.chains=n.chains, n.adapt=n.burnin,
thin=n.thin)
#coda.samples
############################################################################
ss <- summary(samples)
m <- ss$statistics[,"Mean"]
#m <- s$statistics[,"Median"]
retrieved.u = matrix(m[4: (4+N*D-1)], N, D)
retrieved.v = matrix(m[(4+N*D): length(m)], M, D)
iterations = ceiling( (n.iter * n.chains)/n.thin ) #TODO
user_sample = function(i, k) { stop("user_sample function not implemented for coda"); }
item_sample = function(j, k) { stop("item_sample function not implemented for coda"); }
|
#' Convert Malaysian odds to Probabilities
#'
#' @param x A vector of Malaysian odds
#'
#' @return A vector of Probabilities
#'
#'@export
#'
#' @examples
#' odds.malay2prob(c(1.93,2.05))
odds.malay2prob <- function (x) {
prob <- x
prob[] <- NA_real_
prob[which(x > 0 & x <= 1)] <- 1 / (1 + x[which(x > 0 & x <= 1)])
prob[which(x >= -1 & x < 0)] <- 1 / (1 - 1 / x[which(x >= -1 & x < 0)])
prob
}
|
/R/odds.malay2prob.R
|
no_license
|
cran/odds.converter
|
R
| false
| false
| 405
|
r
|
#' Convert Malaysian odds to Probabilities
#'
#' @param x A vector of Malaysian odds
#'
#' @return A vector of Probabilities
#'
#'@export
#'
#' @examples
#' odds.malay2prob(c(1.93,2.05))
odds.malay2prob <- function (x) {
prob <- x
prob[] <- NA_real_
prob[which(x > 0 & x <= 1)] <- 1 / (1 + x[which(x > 0 & x <= 1)])
prob[which(x >= -1 & x < 0)] <- 1 / (1 - 1 / x[which(x >= -1 & x < 0)])
prob
}
|
#'@title Maximum Likelihood Fit of the BMT Distribution to Non-censored Data.
#'
#'@description Fit of the BMT distribution to non-censored data by maximum
#' likelihood estimation (mle).
#'
#'@rdname BMTfit.mle
#'@name BMTfit.mle
#'
#'@details This function is not intended to be called directly but is internally
#' called in \code{\link{BMTfit}} when used with the maximum likelihood method.
#'
#' \code{BMTfit.mle} is based on the function \code{\link{mledist}} from the
#' package \code{\link{fitdistrplus}} but it focuses on the maximum likelihood
#' parameter estimation for the BMT distribution (see \code{\link{BMT}} for
#' details about the BMT distribution and \code{\link{mledist}} for details
#' about maximum likelihood fit of univariate distributions).
#'
#'@param data A numeric vector with the observed values for non-censored data.
#'@param start A named list giving the initial values of parameters of the BMT
#' distribution or a function of data computing initial values and returning a
#' named list. (see the 'details' section of
#' \code{\link{mledist}}).
#'@param fix.arg An optional named list giving the values of fixed parameters of
#' the BMT distribution or a function of data computing (fixed) parameter
#' values and returning a named list. Parameters with fixed value are thus NOT
#' estimated. (see the 'details' section of
#' \code{\link{mledist}}).
#'@param type.p.3.4 Type of parametrization asociated to p3 and p4. "t w" means
#' tails weights parametrization (default) and "a-s" means asymmetry-steepness
#' parametrization.
#'@param type.p.1.2 Type of parametrization asociated to p1 and p2. "c-d" means
#' domain parametrization (default) and "l-s" means location-scale
#' parametrization.
#'@param optim.method \code{"default"} (see the 'details' section of
#' \code{\link{mledist}}) or optimization method to pass to
#' \code{\link{optim}}.
#'@param custom.optim A function carrying the optimization (see the 'details'
#' section of \code{\link{mledist}}).
#'@param silent A logical to remove or show warnings when bootstraping.
#'@param \dots Further arguments to be passed to generic functions or to the
#' function \code{"mledist"}. See \code{\link{mledist}} for details.
#'
#'@return \code{BMTfit.mle} returns a list with following components,
#'
#' \item{estimate}{ the parameter estimates.}
#'
#' \item{convergence}{ an integer code for the convergence of
#' \code{\link{optim}}/\code{\link{constrOptim}} defined as below or defined by
#' the user in the user-supplied optimization function.
#'
#' \code{0} indicates successful convergence.
#'
#' \code{1} indicates that the iteration limit of \code{\link{optim}} has been
#' reached.
#'
#' \code{10} indicates degeneracy of the Nealder-Mead simplex.
#'
#' \code{100} indicates that \code{\link{optim}} encountered an internal error.
#' }
#'
#' \item{loglik}{the log-likelihood value.}
#'
#' \item{hessian}{a symmetric matrix computed by \code{\link{optim}} as an
#' estimate of the Hessian at the solution found or computed in the
#' user-supplied optimization function. It is used in \code{\link{BMTfit}} to estimate
#' standard errors. }
#'
#' \item{optim.function}{the name of the optimization function used for maximum
#' likelihood.}
#'
#' \item{optim.method}{when \code{\link{optim}} is used, the name of the
#' algorithm used, \code{NULL} otherwise.}
#'
#' \item{fix.arg}{the named list giving the values of parameters of the named
#' distribution that must kept fixed rather than estimated or \code{NULL} if there are no such parameters. }
#'
#' \item{fix.arg.fun}{the function used to set the value of \code{fix.arg} or
#' \code{NULL}.}
#'
#' \item{weights}{the vector of weigths used in the estimation process or
#' \code{NULL}.}
#'
#' \item{counts}{A two-element integer vector giving the number of calls to the
#' log-likelihood function and its gradient respectively. This excludes those
#' calls needed to compute the Hessian, if requested, and any calls to
#' log-likelihood function to compute a finite-difference approximation to the
#' gradient. \code{counts} is returned by \code{\link{optim}} or the
#' user-supplied function or set to \code{NULL}.}
#'
#' \item{optim.message}{A character string giving any additional information
#' returned by the optimizer, or \code{NULL}. To understand exactly the
#' message, see the source code.}
#'
#'@references Torres-Jimenez, C. J. (2017, September), \emph{Comparison of estimation
#' methods for the BMT distribution}. ArXiv e-prints.
#'
#' Torres-Jimenez, C. J. (2018), \emph{The BMT Item Response Theory model: A
#' new skewed distribution family with bounded domain and an IRT model based on
#' it}, PhD thesis, Doctorado en ciencias - Estadistica, Universidad Nacional
#' de Colombia, Sede Bogota.
#'
#'@seealso See \code{\link{BMT}} for the BMT density, distribution, quantile
#' function and random deviates. See \code{\link{BMTfit.mme}},
#' \code{\link{BMTfit.qme}}, \code{\link{BMTfit.mge}},
#' \code{\link{BMTfit.mpse}} and \code{\link{BMTfit.mqde}} for other estimation
#' methods. See \code{\link{optim}} and \code{\link{constrOptim}} for
#' optimization routines. See \code{\link{BMTfit}} and \code{\link{fitdist}}
#' for functions that return an objetc of class \code{"fitdist"}.
#'
#'@author Camilo Jose Torres-Jimenez [aut,cre] \email{cjtorresj@unal.edu.co}
#'
#'@source Based on the function \code{\link{mledist}} of the R package:
#' \code{\link{fitdistrplus}}
#'
#' Delignette-Muller ML and Dutang C (2015), \emph{fitdistrplus: An R Package
#' for Fitting Distributions}. Journal of Statistical Software, 64(4), 1-34.
#'
#' @examples
#' # (1) basic fit by maximum likelihood estimation
#' set.seed(1234)
#' x1 <- rBMT(n=100, p3 = 0.25, p4 = 0.75)
#' BMTfit.mle(x1)
#'
#' # (2) how to change the optimisation method?
#' BMTfit.mle(x1, optim.method="L-BFGS-B")
#' BMTfit.mle(x1, custom.optim="nlminb")
#'
#' # (3) estimation of the tails weights parameters of the BMT
#' # distribution with domain fixed at [0,1]
#' BMTfit.mle(x1, start=list(p3=0.5, p4=0.5), fix.arg=list(p1=0, p2=1))
#'
#' # (4) estimation of the asymmetry-steepness parameters of the BMT
#' # distribution with domain fixed at [0,1]
#' BMTfit.mle(x1, start=list(p3=0, p4=0.5), type.p.3.4 = "a-s",
#' fix.arg=list(p1=0, p2=1))
#'
#'@keywords distribution
#####################
#' @rdname BMTfit.mle
#' @export BMTfit.mle
BMTfit.mle <- function(data,
start = list(p3 = 0.5, p4 = 0.5, p1 = min(data) - 0.1, p2 = max(data) + 0.1),
fix.arg = NULL, type.p.3.4 = "t w", type.p.1.2 = "c-d",
optim.method = "Nelder-Mead", custom.optim = NULL, silent = TRUE, ...){
# Control data
if (!(is.vector(data) & is.numeric(data) & length(data) > 1))
stop("data must be a numeric vector of length greater than 1")
# Further arguments to be passed
my3dots <- list(...)
if (length(my3dots) == 0)
my3dots <- NULL
# Control weights
if(!is.null(my3dots$weights))
stop("Estimation with weights is not considered yet")
# Control type.p.3.4. It allows partial match.
TYPE.P.3.4 <- c("t w", "a-s") # tail weights or asymmetry-steepness
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# mle only allows parametrization "c-d"
# because all data have to be inside the estimated domain.
if(type.p.1.2 != "c-d")
stop("maximum likelihood estimation only allows parametrization \"c-d\"")
# Type of parametrizations are fixed parameters
fix.arg$type.p.3.4 <- type.p.3.4
fix.arg$type.p.1.2 <- "c-d"
# Establish box constraints according to parameters in start
stnames <- names(start)
m <- length(stnames)
# Initialize all box constraints: (0, 1)
lower <- rep(0 + .epsilon, m)
upper <- rep(1 - .epsilon, m)
# domain parametrization
# c has to be inside (-Inf, min(data))
lower[stnames == "p1"] <- -Inf
upper[stnames == "p1"] <- min(data) - .epsilon
# d has to be inside (max(data), Inf)
lower[stnames == "p2"] <- max(data) + .epsilon
upper[stnames == "p2"] <- Inf
# asymmetry-steepness parametrization
if(int.type.p.3.4 == 2) {
# asymmetry has to be inside (-1, 1)
lower[stnames == "p3"] <- -1 + .epsilon
}
# nlminb optimization method
if(!is.null(custom.optim))
if(custom.optim=="nlminb")
custom.optim <- .m.nlminb
# mledist function of fitdistplus
mle <- fitdistrplus::mledist(data, "BMT", start = start, fix.arg = fix.arg,
optim.method = optim.method, lower = lower, upper = upper,
custom.optim = custom.optim, silent = silent, ...)
return(mle)
}
|
/R/BMTfit.mle.R
|
no_license
|
cran/BMT
|
R
| false
| false
| 9,158
|
r
|
#'@title Maximum Likelihood Fit of the BMT Distribution to Non-censored Data.
#'
#'@description Fit of the BMT distribution to non-censored data by maximum
#' likelihood estimation (mle).
#'
#'@rdname BMTfit.mle
#'@name BMTfit.mle
#'
#'@details This function is not intended to be called directly but is internally
#' called in \code{\link{BMTfit}} when used with the maximum likelihood method.
#'
#' \code{BMTfit.mle} is based on the function \code{\link{mledist}} from the
#' package \code{\link{fitdistrplus}} but it focuses on the maximum likelihood
#' parameter estimation for the BMT distribution (see \code{\link{BMT}} for
#' details about the BMT distribution and \code{\link{mledist}} for details
#' about maximum likelihood fit of univariate distributions).
#'
#'@param data A numeric vector with the observed values for non-censored data.
#'@param start A named list giving the initial values of parameters of the BMT
#' distribution or a function of data computing initial values and returning a
#' named list. (see the 'details' section of
#' \code{\link{mledist}}).
#'@param fix.arg An optional named list giving the values of fixed parameters of
#' the BMT distribution or a function of data computing (fixed) parameter
#' values and returning a named list. Parameters with fixed value are thus NOT
#' estimated. (see the 'details' section of
#' \code{\link{mledist}}).
#'@param type.p.3.4 Type of parametrization asociated to p3 and p4. "t w" means
#' tails weights parametrization (default) and "a-s" means asymmetry-steepness
#' parametrization.
#'@param type.p.1.2 Type of parametrization asociated to p1 and p2. "c-d" means
#' domain parametrization (default) and "l-s" means location-scale
#' parametrization.
#'@param optim.method \code{"default"} (see the 'details' section of
#' \code{\link{mledist}}) or optimization method to pass to
#' \code{\link{optim}}.
#'@param custom.optim A function carrying the optimization (see the 'details'
#' section of \code{\link{mledist}}).
#'@param silent A logical to remove or show warnings when bootstraping.
#'@param \dots Further arguments to be passed to generic functions or to the
#' function \code{"mledist"}. See \code{\link{mledist}} for details.
#'
#'@return \code{BMTfit.mle} returns a list with following components,
#'
#' \item{estimate}{ the parameter estimates.}
#'
#' \item{convergence}{ an integer code for the convergence of
#' \code{\link{optim}}/\code{\link{constrOptim}} defined as below or defined by
#' the user in the user-supplied optimization function.
#'
#' \code{0} indicates successful convergence.
#'
#' \code{1} indicates that the iteration limit of \code{\link{optim}} has been
#' reached.
#'
#' \code{10} indicates degeneracy of the Nealder-Mead simplex.
#'
#' \code{100} indicates that \code{\link{optim}} encountered an internal error.
#' }
#'
#' \item{loglik}{the log-likelihood value.}
#'
#' \item{hessian}{a symmetric matrix computed by \code{\link{optim}} as an
#' estimate of the Hessian at the solution found or computed in the
#' user-supplied optimization function. It is used in \code{\link{BMTfit}} to estimate
#' standard errors. }
#'
#' \item{optim.function}{the name of the optimization function used for maximum
#' likelihood.}
#'
#' \item{optim.method}{when \code{\link{optim}} is used, the name of the
#' algorithm used, \code{NULL} otherwise.}
#'
#' \item{fix.arg}{the named list giving the values of parameters of the named
#' distribution that must kept fixed rather than estimated or \code{NULL} if there are no such parameters. }
#'
#' \item{fix.arg.fun}{the function used to set the value of \code{fix.arg} or
#' \code{NULL}.}
#'
#' \item{weights}{the vector of weigths used in the estimation process or
#' \code{NULL}.}
#'
#' \item{counts}{A two-element integer vector giving the number of calls to the
#' log-likelihood function and its gradient respectively. This excludes those
#' calls needed to compute the Hessian, if requested, and any calls to
#' log-likelihood function to compute a finite-difference approximation to the
#' gradient. \code{counts} is returned by \code{\link{optim}} or the
#' user-supplied function or set to \code{NULL}.}
#'
#' \item{optim.message}{A character string giving any additional information
#' returned by the optimizer, or \code{NULL}. To understand exactly the
#' message, see the source code.}
#'
#'@references Torres-Jimenez, C. J. (2017, September), \emph{Comparison of estimation
#' methods for the BMT distribution}. ArXiv e-prints.
#'
#' Torres-Jimenez, C. J. (2018), \emph{The BMT Item Response Theory model: A
#' new skewed distribution family with bounded domain and an IRT model based on
#' it}, PhD thesis, Doctorado en ciencias - Estadistica, Universidad Nacional
#' de Colombia, Sede Bogota.
#'
#'@seealso See \code{\link{BMT}} for the BMT density, distribution, quantile
#' function and random deviates. See \code{\link{BMTfit.mme}},
#' \code{\link{BMTfit.qme}}, \code{\link{BMTfit.mge}},
#' \code{\link{BMTfit.mpse}} and \code{\link{BMTfit.mqde}} for other estimation
#' methods. See \code{\link{optim}} and \code{\link{constrOptim}} for
#' optimization routines. See \code{\link{BMTfit}} and \code{\link{fitdist}}
#' for functions that return an objetc of class \code{"fitdist"}.
#'
#'@author Camilo Jose Torres-Jimenez [aut,cre] \email{cjtorresj@unal.edu.co}
#'
#'@source Based on the function \code{\link{mledist}} of the R package:
#' \code{\link{fitdistrplus}}
#'
#' Delignette-Muller ML and Dutang C (2015), \emph{fitdistrplus: An R Package
#' for Fitting Distributions}. Journal of Statistical Software, 64(4), 1-34.
#'
#' @examples
#' # (1) basic fit by maximum likelihood estimation
#' set.seed(1234)
#' x1 <- rBMT(n=100, p3 = 0.25, p4 = 0.75)
#' BMTfit.mle(x1)
#'
#' # (2) how to change the optimisation method?
#' BMTfit.mle(x1, optim.method="L-BFGS-B")
#' BMTfit.mle(x1, custom.optim="nlminb")
#'
#' # (3) estimation of the tails weights parameters of the BMT
#' # distribution with domain fixed at [0,1]
#' BMTfit.mle(x1, start=list(p3=0.5, p4=0.5), fix.arg=list(p1=0, p2=1))
#'
#' # (4) estimation of the asymmetry-steepness parameters of the BMT
#' # distribution with domain fixed at [0,1]
#' BMTfit.mle(x1, start=list(p3=0, p4=0.5), type.p.3.4 = "a-s",
#' fix.arg=list(p1=0, p2=1))
#'
#'@keywords distribution
#####################
#' @rdname BMTfit.mle
#' @export BMTfit.mle
BMTfit.mle <- function(data,
start = list(p3 = 0.5, p4 = 0.5, p1 = min(data) - 0.1, p2 = max(data) + 0.1),
fix.arg = NULL, type.p.3.4 = "t w", type.p.1.2 = "c-d",
optim.method = "Nelder-Mead", custom.optim = NULL, silent = TRUE, ...){
# Control data
if (!(is.vector(data) & is.numeric(data) & length(data) > 1))
stop("data must be a numeric vector of length greater than 1")
# Further arguments to be passed
my3dots <- list(...)
if (length(my3dots) == 0)
my3dots <- NULL
# Control weights
if(!is.null(my3dots$weights))
stop("Estimation with weights is not considered yet")
# Control type.p.3.4. It allows partial match.
TYPE.P.3.4 <- c("t w", "a-s") # tail weights or asymmetry-steepness
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# mle only allows parametrization "c-d"
# because all data have to be inside the estimated domain.
if(type.p.1.2 != "c-d")
stop("maximum likelihood estimation only allows parametrization \"c-d\"")
# Type of parametrizations are fixed parameters
fix.arg$type.p.3.4 <- type.p.3.4
fix.arg$type.p.1.2 <- "c-d"
# Establish box constraints according to parameters in start
stnames <- names(start)
m <- length(stnames)
# Initialize all box constraints: (0, 1)
lower <- rep(0 + .epsilon, m)
upper <- rep(1 - .epsilon, m)
# domain parametrization
# c has to be inside (-Inf, min(data))
lower[stnames == "p1"] <- -Inf
upper[stnames == "p1"] <- min(data) - .epsilon
# d has to be inside (max(data), Inf)
lower[stnames == "p2"] <- max(data) + .epsilon
upper[stnames == "p2"] <- Inf
# asymmetry-steepness parametrization
if(int.type.p.3.4 == 2) {
# asymmetry has to be inside (-1, 1)
lower[stnames == "p3"] <- -1 + .epsilon
}
# nlminb optimization method
if(!is.null(custom.optim))
if(custom.optim=="nlminb")
custom.optim <- .m.nlminb
# mledist function of fitdistplus
mle <- fitdistrplus::mledist(data, "BMT", start = start, fix.arg = fix.arg,
optim.method = optim.method, lower = lower, upper = upper,
custom.optim = custom.optim, silent = silent, ...)
return(mle)
}
|
corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
files <- list.files( path = directory )
cr <- c()
for(f in 1:length(files)){
data <- read.csv( paste(directory, "/", files[f], sep="") )
data <- data[complete.cases(data),]
if ( nrow(data) > threshold ) {
cr <- c(cr, cor(data$sulfate, data$nitrate) ) # append corralations
}
}
return( cr )
}
|
/corr.r
|
no_license
|
anilhardageri/datasciencecoursera
|
R
| false
| false
| 748
|
r
|
corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
files <- list.files( path = directory )
cr <- c()
for(f in 1:length(files)){
data <- read.csv( paste(directory, "/", files[f], sep="") )
data <- data[complete.cases(data),]
if ( nrow(data) > threshold ) {
cr <- c(cr, cor(data$sulfate, data$nitrate) ) # append corralations
}
}
return( cr )
}
|
# lm_eqn
# Function to displaying R2 on ggplot
lm_eqn = function(m){
l <- list(a = format(coef(m)[1], digits = 2),
b = format(abs(coef(m)[2]), digits = 2),
r2 = format(summary(m)$r.squared, digits = 3));
if(coef(m)[2] >= 0){
eq <- substitute(italic(y) == a + b %.%
italic(x)*","~~italic(r)^2~"="~r2,l)
} else {
eq <- substitute(italic(y) == a - b %.%
italic(x)*","~~italic(r)^2~"="~r2,l)
}
as.character(as.expression(eq));
}
|
/code/functions/lm_R2_equation_ggplot.R
|
no_license
|
ksirving/asci_ffm_2019
|
R
| false
| false
| 514
|
r
|
# lm_eqn
# Function to displaying R2 on ggplot
lm_eqn = function(m){
l <- list(a = format(coef(m)[1], digits = 2),
b = format(abs(coef(m)[2]), digits = 2),
r2 = format(summary(m)$r.squared, digits = 3));
if(coef(m)[2] >= 0){
eq <- substitute(italic(y) == a + b %.%
italic(x)*","~~italic(r)^2~"="~r2,l)
} else {
eq <- substitute(italic(y) == a - b %.%
italic(x)*","~~italic(r)^2~"="~r2,l)
}
as.character(as.expression(eq));
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cyjShiny.R
\name{renderCyjShiny}
\alias{renderCyjShiny}
\title{More shiny plumbing - a cyjShiny wrapper for htmlwidget standard rendering operation}
\usage{
renderCyjShiny(expr, env = parent.frame(), quoted = FALSE)
}
\arguments{
\item{expr}{an expression that generates an HTML widget.}
\item{env}{environment in which to evaluate expr.}
\item{quoted}{logical specifies whether expr is quoted ("useuful if you want to save an expression in a variable").}
}
\value{
not sure
}
\description{
More shiny plumbing - a cyjShiny wrapper for htmlwidget standard rendering operation
}
|
/man/renderCyjShiny.Rd
|
permissive
|
mw201608/cyjShiny
|
R
| false
| true
| 660
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cyjShiny.R
\name{renderCyjShiny}
\alias{renderCyjShiny}
\title{More shiny plumbing - a cyjShiny wrapper for htmlwidget standard rendering operation}
\usage{
renderCyjShiny(expr, env = parent.frame(), quoted = FALSE)
}
\arguments{
\item{expr}{an expression that generates an HTML widget.}
\item{env}{environment in which to evaluate expr.}
\item{quoted}{logical specifies whether expr is quoted ("useuful if you want to save an expression in a variable").}
}
\value{
not sure
}
\description{
More shiny plumbing - a cyjShiny wrapper for htmlwidget standard rendering operation
}
|
ggROC = function(df, facetName, groupName = "grp", predName = "res") {
require(plyr)
require(ggplot2)
df = df[complete.cases(df),]
plotdata = ddply(df, facetName,
function(x) data.frame(roc(x[,groupName], x[,predName])[c("sensitivities",
"specificities")]))
plotdata$specificities = 1 - plotdata$specificities
colnames(plotdata) = c(facetName, "tpr", "fpr")
p = ggplot(plotdata, aes(fpr, tpr)) +
geom_line(aes_string(colour=facetName))
return(p)
}
|
/R/ggROC.R
|
permissive
|
hbc/CHBUtils
|
R
| false
| false
| 537
|
r
|
ggROC = function(df, facetName, groupName = "grp", predName = "res") {
require(plyr)
require(ggplot2)
df = df[complete.cases(df),]
plotdata = ddply(df, facetName,
function(x) data.frame(roc(x[,groupName], x[,predName])[c("sensitivities",
"specificities")]))
plotdata$specificities = 1 - plotdata$specificities
colnames(plotdata) = c(facetName, "tpr", "fpr")
p = ggplot(plotdata, aes(fpr, tpr)) +
geom_line(aes_string(colour=facetName))
return(p)
}
|
data <- read.csv("C:\\Users\\user\\Desktop\\R\\iris.csv")
summary(data)
m <- lm(cbind(data$sepal_length,data$sepal_width) ~ data$petal_length + data$petal_width)
summary(m)
mm <- manova(m)
mm
summary(mm)
|
/Assignment4/MultivariateRegression.R
|
no_license
|
Krithikap019/Data-Analytics
|
R
| false
| false
| 213
|
r
|
data <- read.csv("C:\\Users\\user\\Desktop\\R\\iris.csv")
summary(data)
m <- lm(cbind(data$sepal_length,data$sepal_width) ~ data$petal_length + data$petal_width)
summary(m)
mm <- manova(m)
mm
summary(mm)
|
source('Functions.R', chdir = TRUE)
x=c(-300, -200, -100, 0, 100, 200, 300);
y=c(-20, -10, -10, 10, 5, 5, 20);
resultExercise2 <- exercise2Enhanced(x, y, 400, 60)
resultExercise2
|
/Homework3/Exercise2-Enhanced.R
|
no_license
|
buzduganalex1/Special-Chapters-on-Artificial-Intelligence
|
R
| false
| false
| 182
|
r
|
source('Functions.R', chdir = TRUE)
x=c(-300, -200, -100, 0, 100, 200, 300);
y=c(-20, -10, -10, 10, 5, 5, 20);
resultExercise2 <- exercise2Enhanced(x, y, 400, 60)
resultExercise2
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{segRESTREND}
\alias{segRESTREND}
\title{Data frame containing the annual data for a segRESTREND analysis}
\format{R data frame}
\source{
\code{\link[gimms]{gimms-package}}
}
\usage{
segRESTREND
}
\description{
contains anu.VI, acu.RF, VI.index, rf.b4, rf.af. Range 1982 - 2013. Breakpoint for this pixel is 24 (2005)
}
\keyword{datasets}
|
/TSS.RESTREND/man/segRESTREND.Rd
|
permissive
|
fdbesanto2/TSSRESTREND
|
R
| false
| true
| 462
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{segRESTREND}
\alias{segRESTREND}
\title{Data frame containing the annual data for a segRESTREND analysis}
\format{R data frame}
\source{
\code{\link[gimms]{gimms-package}}
}
\usage{
segRESTREND
}
\description{
contains anu.VI, acu.RF, VI.index, rf.b4, rf.af. Range 1982 - 2013. Breakpoint for this pixel is 24 (2005)
}
\keyword{datasets}
|
library(palinsol)
### Name: Insol
### Title: Computes incoming solar radiation (insolation)
### Aliases: Insol
### Keywords: misc
### ** Examples
## make a little wrapper, with all default values
insolation <- function(times, astrosol=ber78,...)
sapply(times, function(tt) Insol(orbit=astrosol(tt)))
tts <- seq(from = -400e3, to = 0, by = 1e3)
isl <- insolation(tts, ber78)
plot(tts, isl, typ='l')
|
/data/genthat_extracted_code/palinsol/examples/Insol.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 411
|
r
|
library(palinsol)
### Name: Insol
### Title: Computes incoming solar radiation (insolation)
### Aliases: Insol
### Keywords: misc
### ** Examples
## make a little wrapper, with all default values
insolation <- function(times, astrosol=ber78,...)
sapply(times, function(tt) Insol(orbit=astrosol(tt)))
tts <- seq(from = -400e3, to = 0, by = 1e3)
isl <- insolation(tts, ber78)
plot(tts, isl, typ='l')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/treemap.R
\name{hgch_treemap_CatNum}
\alias{hgch_treemap_CatNum}
\title{Treemap density by numeric variable}
\usage{
hgch_treemap_CatNum(data, title = NULL, subtitle = NULL, caption = NULL,
minColor = "#E63917", maxColor = "#18941E", back_color = "white",
color_title = "black", reverse = TRUE, export = FALSE, ...)
}
\arguments{
\item{x}{A data.frame}
}
\value{
highcharts viz
}
\description{
Treemap density by numeric variable
}
\section{ctypes}{
Cat-Num
}
\examples{
hgch_treemap_CatNum(sampleData("Cat-Num", nrow = 10))
}
|
/man/hgch_treemap_CatNum.Rd
|
no_license
|
isciolab/hgchmagic
|
R
| false
| true
| 613
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/treemap.R
\name{hgch_treemap_CatNum}
\alias{hgch_treemap_CatNum}
\title{Treemap density by numeric variable}
\usage{
hgch_treemap_CatNum(data, title = NULL, subtitle = NULL, caption = NULL,
minColor = "#E63917", maxColor = "#18941E", back_color = "white",
color_title = "black", reverse = TRUE, export = FALSE, ...)
}
\arguments{
\item{x}{A data.frame}
}
\value{
highcharts viz
}
\description{
Treemap density by numeric variable
}
\section{ctypes}{
Cat-Num
}
\examples{
hgch_treemap_CatNum(sampleData("Cat-Num", nrow = 10))
}
|
#' @importFrom stats rbinom runif pnorm setNames coef rnbinom vcov simulate update
#' rnorm rpois sd sigma terms time reshape
#' @importFrom stats na.omit
#' @importFrom utils data head tail write.csv zip
#' @importFrom methods new
.onAttach = function(...) {
if (!interactive()) return()
msg = "Welcome to OpenSDP." # nocov
packageStartupMessage(paste(strwrap(msg), collapse = "\n")) # nocov
}
utils::globalVariables(c("Sex", "iep", "Race", "frpl", "age"))
.onLoad <- function(libname = find.package("OpenSDPsynthR"), pkgname = "OpenSDPsynthR"){
# CRAN Note avoidance
if(getRversion() >= "2.15.1")
utils::globalVariables(
# from simpop and subset function calls
c("sid", "grad", "first_flag", "schools_race_prob", "White",
"initschid", "Lodgment_method", "PHI_Ind", "Sw_amt", "Alow_ben_amt",
"ontrack_yr1", "grade", "math_ss", "rdg_ss",
"ontrack_yr4", "cum_credits_yr1", "cum_credits_yr4", "cum_credits_yr1_ela",
"cum_credits_yr4_ela", "cum_credits_yr1_math", "cum_credits_yr4_math",
"cum_gpa_yr1", "cum_gpa_yr4", "yr_seq",
"scale_gpa", "grad_prob", "ps_prob", "ps",
"cum_credits", "credits_earned", "credits_attempted", "grad",
"hs_status", "status_after", "event",
"ps_transfer", "opeid", "ps_change_ind", "short_name",
"type",
# we use the magrittr pipe
".",
# CLEANER
"grade", "sid", "schid", "gifted", "chrt_ninth", "name", "keep", "math_ss",
"rdg_ss", "test_math_8_std", "test_ela_8_std", "test_composite_8",
"yr_seq", "ontrack", "cum_credits", "cum_credits_ela", "cum_credits_math",
"cum_gpa", "status_after", "scale_gpa", "gpa", "grad_prob", "grad",
"hs_status", "ps_prob", "ps", "diploma_type", "class_rank", "opeid",
"first_ps", "last_ps", "chrt_grad", "variable", "value", "temp",
"1_enrl_1oct_grad", "2_enrl_1oct_grad", "3_enrl_1oct_grad", "4_enrl_1oct_grad",
"5_enrl_1oct_grad", "1_enrl_1oct_ninth", "2_enrl_1oct_ninth", "3_enrl_1oct_ninth",
"4_enrl_1oct_ninth", "5_enrl_1oct_ninth", "enrl_ever_w2_grad_any",
"enrl_ever_w2_ninth_any", "ps_type", "enrl_1oct_grad", "enrl_1oct_ninth",
"enrl_ever_w2_ninth", "enrl_ever_w2_grad", "enrl_ever_w2_grad_2yr",
"enrl_ever_w2_grad_4yr", "enrl_ever_w2_ninth_2yr", "enrl_ever_w2_ninth_4yr",
"term", "ps_short_name", "enroll_count", "all4", "persist", "chrt",
"observed",
# control parameters
"race_list", "frl_list", "school_list", "gifted_list",
# more simpop
"flag", "grade_diff", "cohort_year", "subject", "assess_id", "score",
"ntests", "schid", "cohort_grad_year"
)
)
invisible()
}
|
/R/zzz.R
|
permissive
|
OpenSDP/OpenSDPsynthR
|
R
| false
| false
| 2,797
|
r
|
#' @importFrom stats rbinom runif pnorm setNames coef rnbinom vcov simulate update
#' rnorm rpois sd sigma terms time reshape
#' @importFrom stats na.omit
#' @importFrom utils data head tail write.csv zip
#' @importFrom methods new
.onAttach = function(...) {
if (!interactive()) return()
msg = "Welcome to OpenSDP." # nocov
packageStartupMessage(paste(strwrap(msg), collapse = "\n")) # nocov
}
utils::globalVariables(c("Sex", "iep", "Race", "frpl", "age"))
.onLoad <- function(libname = find.package("OpenSDPsynthR"), pkgname = "OpenSDPsynthR"){
# CRAN Note avoidance
if(getRversion() >= "2.15.1")
utils::globalVariables(
# from simpop and subset function calls
c("sid", "grad", "first_flag", "schools_race_prob", "White",
"initschid", "Lodgment_method", "PHI_Ind", "Sw_amt", "Alow_ben_amt",
"ontrack_yr1", "grade", "math_ss", "rdg_ss",
"ontrack_yr4", "cum_credits_yr1", "cum_credits_yr4", "cum_credits_yr1_ela",
"cum_credits_yr4_ela", "cum_credits_yr1_math", "cum_credits_yr4_math",
"cum_gpa_yr1", "cum_gpa_yr4", "yr_seq",
"scale_gpa", "grad_prob", "ps_prob", "ps",
"cum_credits", "credits_earned", "credits_attempted", "grad",
"hs_status", "status_after", "event",
"ps_transfer", "opeid", "ps_change_ind", "short_name",
"type",
# we use the magrittr pipe
".",
# CLEANER
"grade", "sid", "schid", "gifted", "chrt_ninth", "name", "keep", "math_ss",
"rdg_ss", "test_math_8_std", "test_ela_8_std", "test_composite_8",
"yr_seq", "ontrack", "cum_credits", "cum_credits_ela", "cum_credits_math",
"cum_gpa", "status_after", "scale_gpa", "gpa", "grad_prob", "grad",
"hs_status", "ps_prob", "ps", "diploma_type", "class_rank", "opeid",
"first_ps", "last_ps", "chrt_grad", "variable", "value", "temp",
"1_enrl_1oct_grad", "2_enrl_1oct_grad", "3_enrl_1oct_grad", "4_enrl_1oct_grad",
"5_enrl_1oct_grad", "1_enrl_1oct_ninth", "2_enrl_1oct_ninth", "3_enrl_1oct_ninth",
"4_enrl_1oct_ninth", "5_enrl_1oct_ninth", "enrl_ever_w2_grad_any",
"enrl_ever_w2_ninth_any", "ps_type", "enrl_1oct_grad", "enrl_1oct_ninth",
"enrl_ever_w2_ninth", "enrl_ever_w2_grad", "enrl_ever_w2_grad_2yr",
"enrl_ever_w2_grad_4yr", "enrl_ever_w2_ninth_2yr", "enrl_ever_w2_ninth_4yr",
"term", "ps_short_name", "enroll_count", "all4", "persist", "chrt",
"observed",
# control parameters
"race_list", "frl_list", "school_list", "gifted_list",
# more simpop
"flag", "grade_diff", "cohort_year", "subject", "assess_id", "score",
"ntests", "schid", "cohort_grad_year"
)
)
invisible()
}
|
#Resilience
#K. Wiese September 2018
########################
Resilience <- function(NANOprev, TANO, SPEI, NANO){
Input <- stack(NANOprev, TANO, SPEI, NANO)#stack all raster variables
tsl <- dim(NANO)[3] #time series length
#Resilience Beta coeficient, NANO - 1
funBeta <- function(x) { if (is.na(x[1])){ NA } else lm(x[(3*tsl+1):(4*tsl)] ~ x[1:tsl] + x[(tsl+1):(2*tsl)] + x[(2*tsl+1):(3*tsl)] + 0)$coefficients[1] }
Beta <- calc(Input, funBeta)
#Resistence to Temperatura alpha coeficient, TANO
funAlpha <- function(x) { if (is.na(x[1])){ NA } else lm(x[(3*tsl+1):(4*tsl)] ~ x[1:tsl] + x[(tsl+1):(2*tsl)] + x[(2*tsl+1):(3*tsl)] + 0)$coefficients[2] }
Alpha <- calc(Input, funAlpha)
#Resistence to drought tetha coeficient, SPEI
funTetha <- function(x) { if (is.na(x[1])){ NA } else lm(x[(3*tsl+1):(4*tsl)] ~ x[1:tsl] + x[(tsl+1):(2*tsl)] + x[(2*tsl+1):(3*tsl)] + 0)$coefficients[3] }
Tetha <- calc(Input, funTetha)
#RMSE
funRMSE <- function(x) { if (is.na(x[1])){ NA } else sqrt(mean(lm(x[(3*tsl+1):(4*tsl)] ~ x[1:tsl] + x[(tsl+1):(2*tsl)] + x[(2*tsl+1):(3*tsl)] + 0)$residuals^2)) }
RMSE <- calc(Input,funRMSE)
#Results
Index <- stack(Beta, Alpha, Tetha, RMSE)
names(Index) <- c("Resilience", "Resistence_to_Temperature", "Resistence_to_Drought", "RMSE")
return(Index)
}
|
/R/Resilience.R
|
no_license
|
klauswiese/Forward
|
R
| false
| false
| 1,316
|
r
|
#Resilience
#K. Wiese September 2018
########################
Resilience <- function(NANOprev, TANO, SPEI, NANO){
Input <- stack(NANOprev, TANO, SPEI, NANO)#stack all raster variables
tsl <- dim(NANO)[3] #time series length
#Resilience Beta coeficient, NANO - 1
funBeta <- function(x) { if (is.na(x[1])){ NA } else lm(x[(3*tsl+1):(4*tsl)] ~ x[1:tsl] + x[(tsl+1):(2*tsl)] + x[(2*tsl+1):(3*tsl)] + 0)$coefficients[1] }
Beta <- calc(Input, funBeta)
#Resistence to Temperatura alpha coeficient, TANO
funAlpha <- function(x) { if (is.na(x[1])){ NA } else lm(x[(3*tsl+1):(4*tsl)] ~ x[1:tsl] + x[(tsl+1):(2*tsl)] + x[(2*tsl+1):(3*tsl)] + 0)$coefficients[2] }
Alpha <- calc(Input, funAlpha)
#Resistence to drought tetha coeficient, SPEI
funTetha <- function(x) { if (is.na(x[1])){ NA } else lm(x[(3*tsl+1):(4*tsl)] ~ x[1:tsl] + x[(tsl+1):(2*tsl)] + x[(2*tsl+1):(3*tsl)] + 0)$coefficients[3] }
Tetha <- calc(Input, funTetha)
#RMSE
funRMSE <- function(x) { if (is.na(x[1])){ NA } else sqrt(mean(lm(x[(3*tsl+1):(4*tsl)] ~ x[1:tsl] + x[(tsl+1):(2*tsl)] + x[(2*tsl+1):(3*tsl)] + 0)$residuals^2)) }
RMSE <- calc(Input,funRMSE)
#Results
Index <- stack(Beta, Alpha, Tetha, RMSE)
names(Index) <- c("Resilience", "Resistence_to_Temperature", "Resistence_to_Drought", "RMSE")
return(Index)
}
|
#' Plot the response probabilities for all observations as a heatmap
#'
#' This function plots the response probabilities from a model fit as a
#' heatmap with additional options for sorting and filtering both axes.
#'
#' @param result The BAMBAResult object.
#' @param xorderTable A \code{data.frame} with all ag/re/tp combinations
#' to include as well as ordering, labeling, and color information.
#' Should have the following columns: ag, re, tp, order, label, color.
#' @param yorderTable A \code{data.frame} with all subjectIds to include
#' as well as ordering, labeling, and color information.
#' Should have the following columns: subjectId, order, label, color
#' @param responseThreshold If not NULL, the threshold probability
#' defining a response, resulting in a two-color heatmap rather than
#' a continuous heatmap. Defaults to \code{NULL}
#' @param xtext The label for the x-axis. Defaults to 'Antigen/Fc Variable'.
#' @param xlines A string defining the color for lines separating groups
#' (by label) on the x-axis or \code{NULL} for no lines.
#' Defaults to 'white'.
#' @param ytext The label for the y-axis. Defaults to 'SubjectId'.
#' @param ylines A string defining the color for lines separating groups
#' (by label) on the y-axis or \code{NULL} for no lines.
#' Defaults to \code{NULL}
#'
#' @return A ggplot heatmap.
#'
#' @export
response_heatmap_custom <- function(result,
xorderTable,
yorderTable,
responseThreshold = NULL,
xtext = "Antigen/Fc Variable",
xlines = "white",
ytext = "SubjectId",
ylines = NULL) {
resp <- responses(result)
if (!"re" %in% names(resp)) {
resp$re <- "noFc"
xorderTable$re <- "noFc"
}
resp <- resp %>%
dplyr::filter(paste(ag, re, tp) %in%
paste(xorderTable$ag, xorderTable$re, xorderTable$tp),
subjectId %in% yorderTable$subjectId)
hmData <- resp %>%
left_join(xorderTable %>%
rename(xorder = order,
xlabel = label,
xgroup = group,
xcolor = color),
by = c("ag", "re", "tp")) %>%
left_join(yorderTable %>%
rename(yorder = order,
ylabel = label,
ygroup = group,
ycolor = color),
by = "subjectId") %>%
mutate(xorder = dense_rank(xorder),
yorder = dense_rank(yorder)) %>%
arrange(xorder, yorder)
xax <- hmData %>%
dplyr::select(xorder, xlabel, xgroup, xcolor) %>%
distinct() %>%
arrange(xorder)
xsep <- cumsum(rle(xax$xlabel)$lengths)
## xsep2 <- floor((xsep + c(0, xsep[1:(length(xsep)-1)])) / 2)
xsep2 <- NULL
if (length(xsep) > 1) {
xsep2 <- ceiling((xsep + c(0, xsep[1:(length(xsep)-1)])) / 2)
} else {
xsep2 <- ceiling(xsep /2)
}
## xgrpsep <- cumsum(table(xax$xgroup))
xgrpsep <- cumsum(rle(xax$xgroup)$lengths)
yax <- hmData %>%
dplyr::select(yorder, ylabel, ygroup, ycolor) %>%
distinct() %>%
arrange(yorder)
ysep <- cumsum(rle(yax$ylabel)$lengths)
## ysep2 <- floor((ysep + c(0, ysep[1:(length(ysep)-1)])) / 2)
ysep2 <- NULL
if (length(ysep) > 1) {
ysep2 <- ceiling((ysep + c(0, ysep[1:(length(ysep)-1)])) / 2)
} else {
ysep2 <- ceiling(ysep /2)
}
## ygrpsep <- cumsum(table(yax$ygroup))
ygrpsep <- cumsum(rle(yax$ygroup)$lengths)
hmPlot <- NULL
if (is.null(responseThreshold)) {
hmPlot <- ggplot(hmData) +
geom_tile(aes(xorder, yorder, fill=responseProb))
} else {
hmPlot <- ggplot(hmData) +
geom_tile(aes(xorder, yorder,
fill=responseProb > responseThreshold)) +
scale_fill_manual(values = c("darkred", "darkgreen"))
}
if (!is.null(xlines)) {
hmPlot <- hmPlot +
geom_vline(xintercept = xgrpsep+0.5, color=xlines)
}
if (!is.null(ylines)) {
hmPlot <- hmPlot +
geom_hline(yintercept = ygrpsep+0.5, color=ylines)
}
hmPlot +
scale_x_discrete(xtext, limits=xax$xorder,
breaks=xax$xorder[xsep2], labels=xax$xlabel[xsep2]) +
scale_y_discrete(ytext, limits=yax$yorder,
breaks=yax$yorder[ysep2], labels=yax$ylabel[ysep2]) +
theme(axis.text.x = element_text(angle = 45, hjust=.5, vjust=.5,
color = xax$xcolor[xsep2]),
axis.text.y = element_text(color = yax$ycolor[ysep2]))
}
|
/R/response_heatmap_custom.r
|
no_license
|
RGLab/BAMBA
|
R
| false
| false
| 4,858
|
r
|
#' Plot the response probabilities for all observations as a heatmap
#'
#' This function plots the response probabilities from a model fit as a
#' heatmap with additional options for sorting and filtering both axes.
#'
#' @param result The BAMBAResult object.
#' @param xorderTable A \code{data.frame} with all ag/re/tp combinations
#' to include as well as ordering, labeling, and color information.
#' Should have the following columns: ag, re, tp, order, label, color.
#' @param yorderTable A \code{data.frame} with all subjectIds to include
#' as well as ordering, labeling, and color information.
#' Should have the following columns: subjectId, order, label, color
#' @param responseThreshold If not NULL, the threshold probability
#' defining a response, resulting in a two-color heatmap rather than
#' a continuous heatmap. Defaults to \code{NULL}
#' @param xtext The label for the x-axis. Defaults to 'Antigen/Fc Variable'.
#' @param xlines A string defining the color for lines separating groups
#' (by label) on the x-axis or \code{NULL} for no lines.
#' Defaults to 'white'.
#' @param ytext The label for the y-axis. Defaults to 'SubjectId'.
#' @param ylines A string defining the color for lines separating groups
#' (by label) on the y-axis or \code{NULL} for no lines.
#' Defaults to \code{NULL}
#'
#' @return A ggplot heatmap.
#'
#' @export
response_heatmap_custom <- function(result,
xorderTable,
yorderTable,
responseThreshold = NULL,
xtext = "Antigen/Fc Variable",
xlines = "white",
ytext = "SubjectId",
ylines = NULL) {
resp <- responses(result)
if (!"re" %in% names(resp)) {
resp$re <- "noFc"
xorderTable$re <- "noFc"
}
resp <- resp %>%
dplyr::filter(paste(ag, re, tp) %in%
paste(xorderTable$ag, xorderTable$re, xorderTable$tp),
subjectId %in% yorderTable$subjectId)
hmData <- resp %>%
left_join(xorderTable %>%
rename(xorder = order,
xlabel = label,
xgroup = group,
xcolor = color),
by = c("ag", "re", "tp")) %>%
left_join(yorderTable %>%
rename(yorder = order,
ylabel = label,
ygroup = group,
ycolor = color),
by = "subjectId") %>%
mutate(xorder = dense_rank(xorder),
yorder = dense_rank(yorder)) %>%
arrange(xorder, yorder)
xax <- hmData %>%
dplyr::select(xorder, xlabel, xgroup, xcolor) %>%
distinct() %>%
arrange(xorder)
xsep <- cumsum(rle(xax$xlabel)$lengths)
## xsep2 <- floor((xsep + c(0, xsep[1:(length(xsep)-1)])) / 2)
xsep2 <- NULL
if (length(xsep) > 1) {
xsep2 <- ceiling((xsep + c(0, xsep[1:(length(xsep)-1)])) / 2)
} else {
xsep2 <- ceiling(xsep /2)
}
## xgrpsep <- cumsum(table(xax$xgroup))
xgrpsep <- cumsum(rle(xax$xgroup)$lengths)
yax <- hmData %>%
dplyr::select(yorder, ylabel, ygroup, ycolor) %>%
distinct() %>%
arrange(yorder)
ysep <- cumsum(rle(yax$ylabel)$lengths)
## ysep2 <- floor((ysep + c(0, ysep[1:(length(ysep)-1)])) / 2)
ysep2 <- NULL
if (length(ysep) > 1) {
ysep2 <- ceiling((ysep + c(0, ysep[1:(length(ysep)-1)])) / 2)
} else {
ysep2 <- ceiling(ysep /2)
}
## ygrpsep <- cumsum(table(yax$ygroup))
ygrpsep <- cumsum(rle(yax$ygroup)$lengths)
hmPlot <- NULL
if (is.null(responseThreshold)) {
hmPlot <- ggplot(hmData) +
geom_tile(aes(xorder, yorder, fill=responseProb))
} else {
hmPlot <- ggplot(hmData) +
geom_tile(aes(xorder, yorder,
fill=responseProb > responseThreshold)) +
scale_fill_manual(values = c("darkred", "darkgreen"))
}
if (!is.null(xlines)) {
hmPlot <- hmPlot +
geom_vline(xintercept = xgrpsep+0.5, color=xlines)
}
if (!is.null(ylines)) {
hmPlot <- hmPlot +
geom_hline(yintercept = ygrpsep+0.5, color=ylines)
}
hmPlot +
scale_x_discrete(xtext, limits=xax$xorder,
breaks=xax$xorder[xsep2], labels=xax$xlabel[xsep2]) +
scale_y_discrete(ytext, limits=yax$yorder,
breaks=yax$yorder[ysep2], labels=yax$ylabel[ysep2]) +
theme(axis.text.x = element_text(angle = 45, hjust=.5, vjust=.5,
color = xax$xcolor[xsep2]),
axis.text.y = element_text(color = yax$ycolor[ysep2]))
}
|
library('SummarizedExperiment')
library('recount')
library('ggplot2')
## Load files
projects <- c('sra', 'TCGA', 'SRP012682')
rse <- lapply(projects, function(project) {
load(paste0('rse_', project, '.Rdata'))
## Scale counts
result <- scale_counts(rse, round = FALSE)
return(result)
})
projects <- names(rse) <- c('SRA', 'TCGA', 'GTEx')
## To do this automatically, use a for loop around the unique values of
## for(region_of_interest in unique(rowRanges(rse[[1]])$region)) {
## code below
## }
region_of_interest <- 'IgA-reg1'
## Construct a data.frame that will be easy to use with ggplot2
counts <- mapply(function(se, project) {
## Subset to region of interest
se <- subset(se, region == region_of_interest)
## Assign names so the original code works
names(se) <- seq_len(nrow(se))
## Original code, minus width part
df <- data.frame(
counts = as.vector(assays(se)$counts),
region = factor(rep(names(se), each = ncol(se)), levels = names(se)),
project = rep(project, ncol(se) * nrow(se))
)
return(df)
}, rse, projects, SIMPLIFY = FALSE)
counts <- do.call(rbind, counts)
rownames(counts) <- NULL
## Save data for later
save(counts, file = paste0('counts_', region_of_interest, '.Rdata'))
## Save image in a PDF file for the region of interest
## using a height based on the number of boxplots to make
pdf(file = paste0('counts_', region_of_interest, '.pdf'),
height = round(sum(rowRanges(rse[[1]])$region == region_of_interest) / 10) + 7)
ggplot(counts, aes(y = log10(counts + 1), x = region)) + geom_boxplot() +
facet_grid(. ~ project) + coord_flip() + theme_bw(base_size = 18)
dev.off()
## This is a big pdf and takes a while to open, alternatively, save to a png (non-vector graphics)
png(file = paste0('counts_', region_of_interest, '.png'),
height = (round(sum(rowRanges(rse[[1]])$region == region_of_interest) / 10) + 7) * 480 / 7)
ggplot(counts, aes(y = log10(counts + 1), x = region)) + geom_boxplot() +
facet_grid(. ~ project) + coord_flip() + theme_bw(base_size = 18)
dev.off()
## Exclude 0 counts
png(file = paste0('counts_GT0_', region_of_interest, '.png'),
height = (round(sum(rowRanges(rse[[1]])$region == region_of_interest) / 10) + 7) * 480 / 7)
ggplot(subset(counts, counts > 0), aes(y = log10(counts + 1), x = region)) + geom_boxplot() +
facet_grid(. ~ project) + coord_flip() + theme_bw(base_size = 18)
dev.off()
|
/IGHwindows/explore_windows_counts.R
|
no_license
|
LieberInstitute/insp
|
R
| false
| false
| 2,463
|
r
|
library('SummarizedExperiment')
library('recount')
library('ggplot2')
## Load files
projects <- c('sra', 'TCGA', 'SRP012682')
rse <- lapply(projects, function(project) {
load(paste0('rse_', project, '.Rdata'))
## Scale counts
result <- scale_counts(rse, round = FALSE)
return(result)
})
projects <- names(rse) <- c('SRA', 'TCGA', 'GTEx')
## To do this automatically, use a for loop around the unique values of
## for(region_of_interest in unique(rowRanges(rse[[1]])$region)) {
## code below
## }
region_of_interest <- 'IgA-reg1'
## Construct a data.frame that will be easy to use with ggplot2
counts <- mapply(function(se, project) {
## Subset to region of interest
se <- subset(se, region == region_of_interest)
## Assign names so the original code works
names(se) <- seq_len(nrow(se))
## Original code, minus width part
df <- data.frame(
counts = as.vector(assays(se)$counts),
region = factor(rep(names(se), each = ncol(se)), levels = names(se)),
project = rep(project, ncol(se) * nrow(se))
)
return(df)
}, rse, projects, SIMPLIFY = FALSE)
counts <- do.call(rbind, counts)
rownames(counts) <- NULL
## Save data for later
save(counts, file = paste0('counts_', region_of_interest, '.Rdata'))
## Save image in a PDF file for the region of interest
## using a height based on the number of boxplots to make
pdf(file = paste0('counts_', region_of_interest, '.pdf'),
height = round(sum(rowRanges(rse[[1]])$region == region_of_interest) / 10) + 7)
ggplot(counts, aes(y = log10(counts + 1), x = region)) + geom_boxplot() +
facet_grid(. ~ project) + coord_flip() + theme_bw(base_size = 18)
dev.off()
## This is a big pdf and takes a while to open, alternatively, save to a png (non-vector graphics)
png(file = paste0('counts_', region_of_interest, '.png'),
height = (round(sum(rowRanges(rse[[1]])$region == region_of_interest) / 10) + 7) * 480 / 7)
ggplot(counts, aes(y = log10(counts + 1), x = region)) + geom_boxplot() +
facet_grid(. ~ project) + coord_flip() + theme_bw(base_size = 18)
dev.off()
## Exclude 0 counts
png(file = paste0('counts_GT0_', region_of_interest, '.png'),
height = (round(sum(rowRanges(rse[[1]])$region == region_of_interest) / 10) + 7) * 480 / 7)
ggplot(subset(counts, counts > 0), aes(y = log10(counts + 1), x = region)) + geom_boxplot() +
facet_grid(. ~ project) + coord_flip() + theme_bw(base_size = 18)
dev.off()
|
library(quantmod)
library(lubridate)
library(ggplot2)
library(gridExtra)
Sys.setlocale("LC_TIME", "C")
getSymbols(c("GOOG", "AMZN", "AAPL", "NFLX", "FB", "MSFT"), env = .GlobalEnv, from="2017-01-01")
getSymbols("SPY", env = .GlobalEnv, from="2017-01-01")
FANG <- data.frame(GOOG[,"GOOG.Close"] + AMZN[,"AMZN.Close"] + NFLX[,"NFLX.Close"]+ FB[,"FB.Close"])
FANG$Date <- as.Date(row.names(FANG))
FANG$SPY <- SPY[,"SPY.Close"]
names(FANG)[3] <- "SPY"
FANG$FAANG <- data.frame(GOOG[,"GOOG.Close"] + AMZN[,"AMZN.Close"] + AAPL[,"AAPL.Close"] + NFLX[,"NFLX.Close"]+ FB[,"FB.Close"])
FANG$FAAMG <- data.frame(GOOG[,"GOOG.Close"] + AMZN[,"AMZN.Close"] + AAPL[,"AAPL.Close"] + MSFT[,"MSFT.Close"]+ FB[,"FB.Close"])
ggplot(FANG, aes(x=Date, y=scale(GOOG.Close), group = 1))+
geom_line(col = "red")+
geom_line(aes(x=Date, y=scale(SPY)))+
geom_line(aes(x=Date, y=scale(FAANG)), col = "red", alpha = 0.2)+
geom_line(aes(x=Date, y=scale(FAAMG)), col = "darkred", alpha = 0.2)+
ylab("")+
xlab("")+
theme_bw()+
ggtitle(paste("FANG/FAANG/FAAMG vs SPY", FANG$Date[nrow(FANG)]))+
scale_x_date(date_breaks = "1 month", date_labels = "%b")+
theme(axis.line = element_line(),
axis.text=element_text(color='black'),
axis.title = element_text(colour = 'black'),
legend.text=element_text(),
legend.title=element_text(),
axis.text.x = element_text(angle = 0),
legend.position='none',
text = element_text(size=10))
|
/derived/FANG_Index.R
|
no_license
|
JMFlin/trading-analytics
|
R
| false
| false
| 1,476
|
r
|
library(quantmod)
library(lubridate)
library(ggplot2)
library(gridExtra)
Sys.setlocale("LC_TIME", "C")
getSymbols(c("GOOG", "AMZN", "AAPL", "NFLX", "FB", "MSFT"), env = .GlobalEnv, from="2017-01-01")
getSymbols("SPY", env = .GlobalEnv, from="2017-01-01")
FANG <- data.frame(GOOG[,"GOOG.Close"] + AMZN[,"AMZN.Close"] + NFLX[,"NFLX.Close"]+ FB[,"FB.Close"])
FANG$Date <- as.Date(row.names(FANG))
FANG$SPY <- SPY[,"SPY.Close"]
names(FANG)[3] <- "SPY"
FANG$FAANG <- data.frame(GOOG[,"GOOG.Close"] + AMZN[,"AMZN.Close"] + AAPL[,"AAPL.Close"] + NFLX[,"NFLX.Close"]+ FB[,"FB.Close"])
FANG$FAAMG <- data.frame(GOOG[,"GOOG.Close"] + AMZN[,"AMZN.Close"] + AAPL[,"AAPL.Close"] + MSFT[,"MSFT.Close"]+ FB[,"FB.Close"])
ggplot(FANG, aes(x=Date, y=scale(GOOG.Close), group = 1))+
geom_line(col = "red")+
geom_line(aes(x=Date, y=scale(SPY)))+
geom_line(aes(x=Date, y=scale(FAANG)), col = "red", alpha = 0.2)+
geom_line(aes(x=Date, y=scale(FAAMG)), col = "darkred", alpha = 0.2)+
ylab("")+
xlab("")+
theme_bw()+
ggtitle(paste("FANG/FAANG/FAAMG vs SPY", FANG$Date[nrow(FANG)]))+
scale_x_date(date_breaks = "1 month", date_labels = "%b")+
theme(axis.line = element_line(),
axis.text=element_text(color='black'),
axis.title = element_text(colour = 'black'),
legend.text=element_text(),
legend.title=element_text(),
axis.text.x = element_text(angle = 0),
legend.position='none',
text = element_text(size=10))
|
#' Extreme Heat Exposure (EHE)
#'
#' @description Extreme Heat Exposure (EHE). Useful for climatic risks assessement on wheat and barley.
#' @param mx vector of daily maximum temperature series.
#' @param dates vector of dates corresponding with daily temprature series
#' @param op character. Indicates whether the output will be in date or numeric format.
#' @details Adapted from Trnka et al. (2014). Event is triggered when the Tmax is above +35°C for at least three days during the period from five days after anthesis (supposed to be May-1st) to maturity (suposed to be July-31st). The minimum daily temperature is usually measured 2 m above ground; thus, the actual crop temperature might be even lower.
#' @return If op = "first", the function returns the first day (date format) when the first event is triggered. If op =='doy', julian day is returned. If op = "number", the funciton returns the number of events occurred in the year.
#' @references Trnka M, Rotter RP, Ruiz-Ramos M, Kersebaum KC, Olesen JE, Zalud Z, Semenov MA (2014) Adverse weather conditions for European wheat production will become more frequent with climate change. Nature Climate Change volume 4, pages 637–643.
#' @examples
#'
#' ehe(mx = daily_tmax,
#' dates = seq.Date(as.Date('1981-01-01'),
#' as.Date('2010-12-31'), by ='day'),
#' op = 'first')
#'
#' @import zoo
#' @export
ehe <- function(mx, dates, op = 'first'){
ff <- function(xx, dd, op){
if(is.na(sum(xx))) daythres <- NA else{
xx <- zoo(xx, dd)
wini <- which(format(time(xx),'%d-%m') == '01-05')
wend <- which(format(time(xx),'%d-%m') == '31-07')
f <- rle(as.numeric(xx)[wini:wend] > 35)
w <- which(which(f$values) > 3)
if(length(w) > 0){
daythres <- numeric()
for(i in 1:length(w)){
daythres[i] <- format(time(xx[wini:wend][sum(f$lengths[1:(w[i] - 1)])+1]), '%d-%m')
}
} else {daythres <- NULL}
}
if(op == 'first'){
if(is.null(daythres)){
return(NA)
} else{
return(daythres[1])
}
} else if(op == 'doy'){
if(is.null(daythres)){
return(NA)
} else{
w <- which(as.Date(paste0('1901-',daythres[1]), '%Y-%d-%m') ==
seq.Date(as.Date('1901-01-01'), as.Date('1901-12-31'), by ='day'))
return(w)
}
} else if(op == 'number'){
return(length(daythres))
}
}
years <- unique(substr(dates, 1, 4))
eheres <- numeric()
for(i in 1:length(years)){
dd <- dates[which(substr(dates, 1, 4) == years[i])]
xx <- mx[match(dd, dates)]
eheres[i] <- ff(xx, dd, op = op)
}
return(eheres)
}
|
/R/ehe.R
|
no_license
|
cran/agroclim
|
R
| false
| false
| 2,697
|
r
|
#' Extreme Heat Exposure (EHE)
#'
#' @description Extreme Heat Exposure (EHE). Useful for climatic risks assessement on wheat and barley.
#' @param mx vector of daily maximum temperature series.
#' @param dates vector of dates corresponding with daily temprature series
#' @param op character. Indicates whether the output will be in date or numeric format.
#' @details Adapted from Trnka et al. (2014). Event is triggered when the Tmax is above +35°C for at least three days during the period from five days after anthesis (supposed to be May-1st) to maturity (suposed to be July-31st). The minimum daily temperature is usually measured 2 m above ground; thus, the actual crop temperature might be even lower.
#' @return If op = "first", the function returns the first day (date format) when the first event is triggered. If op =='doy', julian day is returned. If op = "number", the funciton returns the number of events occurred in the year.
#' @references Trnka M, Rotter RP, Ruiz-Ramos M, Kersebaum KC, Olesen JE, Zalud Z, Semenov MA (2014) Adverse weather conditions for European wheat production will become more frequent with climate change. Nature Climate Change volume 4, pages 637–643.
#' @examples
#'
#' ehe(mx = daily_tmax,
#' dates = seq.Date(as.Date('1981-01-01'),
#' as.Date('2010-12-31'), by ='day'),
#' op = 'first')
#'
#' @import zoo
#' @export
ehe <- function(mx, dates, op = 'first'){
ff <- function(xx, dd, op){
if(is.na(sum(xx))) daythres <- NA else{
xx <- zoo(xx, dd)
wini <- which(format(time(xx),'%d-%m') == '01-05')
wend <- which(format(time(xx),'%d-%m') == '31-07')
f <- rle(as.numeric(xx)[wini:wend] > 35)
w <- which(which(f$values) > 3)
if(length(w) > 0){
daythres <- numeric()
for(i in 1:length(w)){
daythres[i] <- format(time(xx[wini:wend][sum(f$lengths[1:(w[i] - 1)])+1]), '%d-%m')
}
} else {daythres <- NULL}
}
if(op == 'first'){
if(is.null(daythres)){
return(NA)
} else{
return(daythres[1])
}
} else if(op == 'doy'){
if(is.null(daythres)){
return(NA)
} else{
w <- which(as.Date(paste0('1901-',daythres[1]), '%Y-%d-%m') ==
seq.Date(as.Date('1901-01-01'), as.Date('1901-12-31'), by ='day'))
return(w)
}
} else if(op == 'number'){
return(length(daythres))
}
}
years <- unique(substr(dates, 1, 4))
eheres <- numeric()
for(i in 1:length(years)){
dd <- dates[which(substr(dates, 1, 4) == years[i])]
xx <- mx[match(dd, dates)]
eheres[i] <- ff(xx, dd, op = op)
}
return(eheres)
}
|
###################################################################################################################
# Code for simulating detection-nondetection data for 30 regional monitoring transects #
###################################################################################################################
library(raster)
library(rSPACE)
library(rgdal)
library(spatstat)
library(maptools)
forest <- "OKWA"
frst_area <- c(COWA=583861.9,DEOR=784698.1,FROR=1303163.5,GPWA=440452.1,MAOR=837544,MHOR=258748.1,OCOR=394800.5,
OKWA=1319493.5,UMOR=733938.4,WAOR=1020271.1) #In hectares
trnd <- 0.9 #Can be 0.9, 0.95, 0.98, or 1.0
HR <- 1 #Home range radius (km; can be 1 or 0.6)
D <- 0.2522 #Density = number of home ranges per 70 ha
HR_path <- "HR1000mD2522/" #subfolder corresponding with selected home range size and initial density
path <- paste("F:/research stuff/FS_PostDoc/Occupancy_analysis_simulations/WHWO_R6_monitoring/Power_analysis_via_simulation/R6/",HR_path,sep="")
#path <- paste("C:/qs_files/occ_sims/",HR_path,sep="")
path.grid <- "E:/GISData/WHWO/Occ_sims/R6"
library(R.utils)
pth <- paste(path,forest,sep="")
if(!dir.exists(pth)) dir.create(pth)
setwd(pth)
N <- as.numeric(round(D*(frst_area[forest]/70)))
HabitatMap <- raster(paste(path.grid,"/NF_",forest,"/habitat.tif",sep=""),band=1)
HabitatMap[which(getValues(HabitatMap)==0)] <-
0.99 # Replace 0s with 0.99 so that WHWO movement is not restricted by habitat (only HR center should be restricted)
GridMap<-raster(paste(path.grid,"/NF_",forest,"/surveys.tif",sep=""), band=1)
pth <- paste(pth,"/Lmbd",trnd*100,sep="")
if(!dir.exists(pth)) dir.create(pth)
setwd(pth)
#-- Manual input version
BaseParameters<-list(
N = N, # Initial population size
trendtype ="abundance-exponential", #Not a necessary addition, but we're moving towards requiring this to be set explicitly. You can also use "abundance-linear".
lmda = trnd, # Population growth rate
n_yrs = 20, # Maximum number of years in simulation
n_visits = 2, # Maximum number of visits per year
grid_size = 25, ##IGNORED! # Cell size in grid
MFratio = c(1), # Ratio of types of individuals
buffer = c(0.3), # Distance between individual center locations (km)
moveDist = c(HR), # Movement radius
moveDistQ = c(0.95), # Proportion of time in radius (defines meaning of "how far")
maxDistQ = c(0), # Truncate movements above 1 SD
habitat.cutoff = 1, # Minimum habitat value required for individual center locations
sample.cutoff = 0.5, ##IGNORED! # % pixels in cell above habitat value to include cell in sample frame
repeat.groups = T, #Doubles availability to represent pair of individuals
wghts = T) #Indicates whether the habitat suitability values should be used to weight the probability of choosing individual center locations
#from <- paste(forest,"/rSPACEy",seq(1,8),".txt",sep="")
#to <- paste(forest,"/rSPACEx",seq(1,8)+92,".txt",sep="")
#file.rename(from,to)
if(!file.exists("Ppres.txt")) file.create("Ppres.txt",showWarnings=F)
#as.list(body(encounter.history)) # To see where to put the trace (at=?), run this and place it at the [[#]] after 'P.pres[,tt] <- probPRES(useLayer, grid_layer)'
#untrace(encounter.history) #Run this if re-running within a session.
trace(encounter.history, tracer=quote(if(file.exists("Ppres.txt")){
cat(round(P.pres,3),'\n', file="Ppres.txt", append=T)
} ), at=28, where=rSPACE::createReplicates)
createReplicates(n_runs=30, map=HabitatMap, Parameters=BaseParameters,
filter.map=GridMap,skipConfirm=T, run.label=forest, base.name="rSPACEx", add=T)
|
/02-Sims_rSPACE_R6.R
|
no_license
|
qureshlatif/WHWO-Regional-monitoring-simulation-project
|
R
| false
| false
| 3,925
|
r
|
###################################################################################################################
# Code for simulating detection-nondetection data for 30 regional monitoring transects #
###################################################################################################################
library(raster)
library(rSPACE)
library(rgdal)
library(spatstat)
library(maptools)
forest <- "OKWA"
frst_area <- c(COWA=583861.9,DEOR=784698.1,FROR=1303163.5,GPWA=440452.1,MAOR=837544,MHOR=258748.1,OCOR=394800.5,
OKWA=1319493.5,UMOR=733938.4,WAOR=1020271.1) #In hectares
trnd <- 0.9 #Can be 0.9, 0.95, 0.98, or 1.0
HR <- 1 #Home range radius (km; can be 1 or 0.6)
D <- 0.2522 #Density = number of home ranges per 70 ha
HR_path <- "HR1000mD2522/" #subfolder corresponding with selected home range size and initial density
path <- paste("F:/research stuff/FS_PostDoc/Occupancy_analysis_simulations/WHWO_R6_monitoring/Power_analysis_via_simulation/R6/",HR_path,sep="")
#path <- paste("C:/qs_files/occ_sims/",HR_path,sep="")
path.grid <- "E:/GISData/WHWO/Occ_sims/R6"
library(R.utils)
pth <- paste(path,forest,sep="")
if(!dir.exists(pth)) dir.create(pth)
setwd(pth)
N <- as.numeric(round(D*(frst_area[forest]/70)))
HabitatMap <- raster(paste(path.grid,"/NF_",forest,"/habitat.tif",sep=""),band=1)
HabitatMap[which(getValues(HabitatMap)==0)] <-
0.99 # Replace 0s with 0.99 so that WHWO movement is not restricted by habitat (only HR center should be restricted)
GridMap<-raster(paste(path.grid,"/NF_",forest,"/surveys.tif",sep=""), band=1)
pth <- paste(pth,"/Lmbd",trnd*100,sep="")
if(!dir.exists(pth)) dir.create(pth)
setwd(pth)
#-- Manual input version
BaseParameters<-list(
N = N, # Initial population size
trendtype ="abundance-exponential", #Not a necessary addition, but we're moving towards requiring this to be set explicitly. You can also use "abundance-linear".
lmda = trnd, # Population growth rate
n_yrs = 20, # Maximum number of years in simulation
n_visits = 2, # Maximum number of visits per year
grid_size = 25, ##IGNORED! # Cell size in grid
MFratio = c(1), # Ratio of types of individuals
buffer = c(0.3), # Distance between individual center locations (km)
moveDist = c(HR), # Movement radius
moveDistQ = c(0.95), # Proportion of time in radius (defines meaning of "how far")
maxDistQ = c(0), # Truncate movements above 1 SD
habitat.cutoff = 1, # Minimum habitat value required for individual center locations
sample.cutoff = 0.5, ##IGNORED! # % pixels in cell above habitat value to include cell in sample frame
repeat.groups = T, #Doubles availability to represent pair of individuals
wghts = T) #Indicates whether the habitat suitability values should be used to weight the probability of choosing individual center locations
#from <- paste(forest,"/rSPACEy",seq(1,8),".txt",sep="")
#to <- paste(forest,"/rSPACEx",seq(1,8)+92,".txt",sep="")
#file.rename(from,to)
if(!file.exists("Ppres.txt")) file.create("Ppres.txt",showWarnings=F)
#as.list(body(encounter.history)) # To see where to put the trace (at=?), run this and place it at the [[#]] after 'P.pres[,tt] <- probPRES(useLayer, grid_layer)'
#untrace(encounter.history) #Run this if re-running within a session.
trace(encounter.history, tracer=quote(if(file.exists("Ppres.txt")){
cat(round(P.pres,3),'\n', file="Ppres.txt", append=T)
} ), at=28, where=rSPACE::createReplicates)
createReplicates(n_runs=30, map=HabitatMap, Parameters=BaseParameters,
filter.map=GridMap,skipConfirm=T, run.label=forest, base.name="rSPACEx", add=T)
|
context("Check misc functions...")
testthat::test_that("Checking is.wholenumber..", {
testthat::expect_equal(
is.wholenumber(x=1),
TRUE,
info = "is.wholenumber failed")
})
testthat::test_that("Checking isNumeric..", {
testthat::expect_equal(
isNumeric(x=1),
TRUE,
info = "isNumeric failed")
})
testthat::test_that("Checking is.even..", {
testthat::expect_equal(
is.even(x=1),
FALSE,
info = "is.even failed")
})
testthat::test_that("Checking filename..", {
testthat::expect_equal(
filename(x="~/Dropbox/0_postdoc/10_metaDigitise/example_figs/5_fig2a.png"),
"5_fig2a.png",
info = "filename failed")
})
user_options_tester_func <- function(...) {
testthat::with_mock(
readline = function(question) "a",
user_options (...)
)
}
testthat::test_that("Checking user_options..", {
testthat::expect_equal(
user_options_tester_func("question", c("a","b","c")),
"a",
info = "user_options failed")
})
testthat::test_that("Checking user_unique..", {
testthat::expect_equal(
testthat::with_mock(
readline = function(question) "d",
user_unique("question", c("a","b","c"))
),
"d",
info = "user_unique failed")
})
user_numeric_tester_func <- function(..., user_entry) {
testthat::with_mock(
readline = function(question) user_entry,
user_numeric (...)
)
}
testthat::test_that("Checking user_numeric..", {
testthat::expect_equal(
user_numeric_tester_func("question",user_entry="1"),
1,
info = "user_numeric failed")
})
user_count_tester_func <- function(...) {
testthat::with_mock(
readline = function(question) "1",
user_count (...)
)
}
testthat::test_that("Checking user_count..", {
testthat::expect_equal(
user_count_tester_func("question"),
1,
info = "user_count failed")
})
testthat::test_that("Checking user_base..", {
testthat::with_mock( readline = function(question) "1", testthat::expect_equal(user_base(),"1", info = "user_base failed"))
testthat::with_mock( readline = function(question) "e", testthat::expect_equal(user_base(),"e", info = "user_base failed"))
})
ask_variable_tester_func <- function(...) {
testthat::with_mock(
readline = function(question) "x",
ask_variable (...)
)
}
testthat::test_that("Checking ask_variable..", {
testthat::expect_equal(
ask_variable_tester_func(plot_type="scatterplot"),
c(y="x",x="x"),
info = "ask_variable failed")
testthat::expect_equal(
ask_variable_tester_func(plot_type="mean_error"),
"x",
info = "ask_variable failed")
testthat::expect_equal(
ask_variable_tester_func(plot_type="boxplot"),
"x",
info = "ask_variable failed")
testthat::expect_equal(
ask_variable_tester_func(plot_type="histogram"),
"x",
info = "ask_variable failed")
})
testthat::test_that("Checking knownN..", {
testthat::with_mock(
`metaDigitise::user_options` = function(...) "n",
testthat::evaluate_promise(testthat::expect_equal(
knownN(plot_type="scatterplot",processed_data=data.frame(id=rep(1,20), x=rep(1,20),y=rep(1,20), stringsAsFactors = TRUE), knownN=NULL)
,
NULL,
info = "knownN failed"
))
)
testthat::with_mock(
`metaDigitise::user_options` = function(...) "y",
`metaDigitise::user_count` = mockery::mock(40,30,20,10),
testthat::evaluate_promise(testthat::expect_equal(
knownN(plot_type="scatterplot",processed_data=data.frame(id=rep(letters[4:1],5), x=rep(1,20),y=rep(1,20), stringsAsFactors = TRUE), knownN=NULL)
,
c(d=40,c=30,b=20,a=10),
info = "knownN failed"
))
)
testthat::with_mock(
`metaDigitise::user_options` = function(...) "n",
testthat::evaluate_promise(testthat::expect_equal(
knownN(plot_type="scatterplot",processed_data=data.frame(id=rep(letters[4:1],5), x=rep(1,20),y=rep(1,20), stringsAsFactors = TRUE), knownN=c(40,30,20,10))
,
NULL,
info = "knownN failed"
))
)
testthat::with_mock(
`metaDigitise::user_options` = function(...) "y",
`metaDigitise::user_count` = mockery::mock(40,30,20,10),
testthat::evaluate_promise(testthat::expect_equal(
knownN(plot_type="scatterplot",processed_data=data.frame(id=rep(letters[4:1],5), x=rep(1,20),y=rep(1,20), stringsAsFactors = TRUE), knownN=c(10,20,30,40))
,
c(d=40,c=30,b=20,a=10),
info = "knownN failed"
))
)
testthat::with_mock(
`metaDigitise::user_options` = function(...) "c",
`metaDigitise::user_count` = mockery::mock(40,30,20,10),
testthat::evaluate_promise(testthat::expect_equal(
knownN(plot_type="scatterplot",processed_data=data.frame(id=rep(letters[4:1],5), x=rep(1,20),y=rep(1,20), stringsAsFactors = TRUE), knownN=c(d=40,c=30,b=20,a=10))
,
c(d=40,c=30,b=20,a=10),
info = "knownN failed"
))
)
})
|
/tests/testthat/test-misc_func.R
|
no_license
|
daniel1noble/metaDigitise
|
R
| false
| false
| 4,703
|
r
|
context("Check misc functions...")
testthat::test_that("Checking is.wholenumber..", {
testthat::expect_equal(
is.wholenumber(x=1),
TRUE,
info = "is.wholenumber failed")
})
testthat::test_that("Checking isNumeric..", {
testthat::expect_equal(
isNumeric(x=1),
TRUE,
info = "isNumeric failed")
})
testthat::test_that("Checking is.even..", {
testthat::expect_equal(
is.even(x=1),
FALSE,
info = "is.even failed")
})
testthat::test_that("Checking filename..", {
testthat::expect_equal(
filename(x="~/Dropbox/0_postdoc/10_metaDigitise/example_figs/5_fig2a.png"),
"5_fig2a.png",
info = "filename failed")
})
user_options_tester_func <- function(...) {
testthat::with_mock(
readline = function(question) "a",
user_options (...)
)
}
testthat::test_that("Checking user_options..", {
testthat::expect_equal(
user_options_tester_func("question", c("a","b","c")),
"a",
info = "user_options failed")
})
testthat::test_that("Checking user_unique..", {
testthat::expect_equal(
testthat::with_mock(
readline = function(question) "d",
user_unique("question", c("a","b","c"))
),
"d",
info = "user_unique failed")
})
user_numeric_tester_func <- function(..., user_entry) {
testthat::with_mock(
readline = function(question) user_entry,
user_numeric (...)
)
}
testthat::test_that("Checking user_numeric..", {
testthat::expect_equal(
user_numeric_tester_func("question",user_entry="1"),
1,
info = "user_numeric failed")
})
user_count_tester_func <- function(...) {
testthat::with_mock(
readline = function(question) "1",
user_count (...)
)
}
testthat::test_that("Checking user_count..", {
testthat::expect_equal(
user_count_tester_func("question"),
1,
info = "user_count failed")
})
testthat::test_that("Checking user_base..", {
testthat::with_mock( readline = function(question) "1", testthat::expect_equal(user_base(),"1", info = "user_base failed"))
testthat::with_mock( readline = function(question) "e", testthat::expect_equal(user_base(),"e", info = "user_base failed"))
})
ask_variable_tester_func <- function(...) {
testthat::with_mock(
readline = function(question) "x",
ask_variable (...)
)
}
testthat::test_that("Checking ask_variable..", {
testthat::expect_equal(
ask_variable_tester_func(plot_type="scatterplot"),
c(y="x",x="x"),
info = "ask_variable failed")
testthat::expect_equal(
ask_variable_tester_func(plot_type="mean_error"),
"x",
info = "ask_variable failed")
testthat::expect_equal(
ask_variable_tester_func(plot_type="boxplot"),
"x",
info = "ask_variable failed")
testthat::expect_equal(
ask_variable_tester_func(plot_type="histogram"),
"x",
info = "ask_variable failed")
})
testthat::test_that("Checking knownN..", {
testthat::with_mock(
`metaDigitise::user_options` = function(...) "n",
testthat::evaluate_promise(testthat::expect_equal(
knownN(plot_type="scatterplot",processed_data=data.frame(id=rep(1,20), x=rep(1,20),y=rep(1,20), stringsAsFactors = TRUE), knownN=NULL)
,
NULL,
info = "knownN failed"
))
)
testthat::with_mock(
`metaDigitise::user_options` = function(...) "y",
`metaDigitise::user_count` = mockery::mock(40,30,20,10),
testthat::evaluate_promise(testthat::expect_equal(
knownN(plot_type="scatterplot",processed_data=data.frame(id=rep(letters[4:1],5), x=rep(1,20),y=rep(1,20), stringsAsFactors = TRUE), knownN=NULL)
,
c(d=40,c=30,b=20,a=10),
info = "knownN failed"
))
)
testthat::with_mock(
`metaDigitise::user_options` = function(...) "n",
testthat::evaluate_promise(testthat::expect_equal(
knownN(plot_type="scatterplot",processed_data=data.frame(id=rep(letters[4:1],5), x=rep(1,20),y=rep(1,20), stringsAsFactors = TRUE), knownN=c(40,30,20,10))
,
NULL,
info = "knownN failed"
))
)
testthat::with_mock(
`metaDigitise::user_options` = function(...) "y",
`metaDigitise::user_count` = mockery::mock(40,30,20,10),
testthat::evaluate_promise(testthat::expect_equal(
knownN(plot_type="scatterplot",processed_data=data.frame(id=rep(letters[4:1],5), x=rep(1,20),y=rep(1,20), stringsAsFactors = TRUE), knownN=c(10,20,30,40))
,
c(d=40,c=30,b=20,a=10),
info = "knownN failed"
))
)
testthat::with_mock(
`metaDigitise::user_options` = function(...) "c",
`metaDigitise::user_count` = mockery::mock(40,30,20,10),
testthat::evaluate_promise(testthat::expect_equal(
knownN(plot_type="scatterplot",processed_data=data.frame(id=rep(letters[4:1],5), x=rep(1,20),y=rep(1,20), stringsAsFactors = TRUE), knownN=c(d=40,c=30,b=20,a=10))
,
c(d=40,c=30,b=20,a=10),
info = "knownN failed"
))
)
})
|
context("repositories")
# SETUP ------------------------------------------------------------------------
suffix <- sample(letters, 10, replace = TRUE) %>% str_c(collapse = "")
setup(suppressMessages({
create_team(
name = str_c("test-repositories-", suffix),
org = "HairyCoos",
description = "This is a team to test repositories"
)
}))
teardown(suppressMessages({
try(silent = TRUE, {
delete_team(str_c("test-repositories-", suffix), org = "HairyCoos")
})
try(silent = TRUE, {
delete_repository(str_c("ChadGoymer/user-repository-", suffix))
})
try(silent = TRUE, {
delete_repository(str_c("ChadGoymer/org-repository-", suffix))
})
try(silent = TRUE, {
delete_repository(str_c("ChadGoymer/updated-user-repository-", suffix))
})
try(silent = TRUE, {
delete_repository(str_c("ChadGoymer/updated-org-repository-", suffix))
})
}))
# TEST: create_repository ------------------------------------------------------
test_that("create_repository creates a repository and returns its properties", {
user_repo <- create_repository(
name = str_c("user-repository-", suffix),
description = "This is a user repository",
homepage = "https://user-repository.com",
auto_init = TRUE
)
Sys.sleep(1)
expect_is(user_repo, "list")
expect_identical(attr(user_repo, "status"), 201L)
expect_identical(
map_chr(user_repo, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_identical(
user_repo$full_name,
str_c("ChadGoymer/user-repository-", suffix)
)
expect_identical(user_repo$description, "This is a user repository")
expect_identical(user_repo$homepage, "https://user-repository.com")
org_repo <- create_repository(
name = str_c("org-repository-", suffix),
org = "HairyCoos",
description = "This is an organization respository",
homepage = "https://org-repository.com"
)
expect_is(org_repo, "list")
expect_identical(attr(org_repo, "status"), 201L)
expect_identical(
map_chr(org_repo, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_identical(
org_repo$full_name,
str_c("HairyCoos/org-repository-", suffix)
)
expect_identical(org_repo$description, "This is an organization respository")
expect_identical(org_repo$homepage, "https://org-repository.com")
})
# TEST: update_repository ------------------------------------------------------
test_that("update_repository changes a repository's properties", {
user_repo <- update_repository(
repo = str_c("ChadGoymer/user-repository-", suffix),
name = str_c("updated-user-repository-", suffix),
description = "This is an updated user respository",
homepage = "https://updated-user-repository.com",
has_issues = FALSE,
has_projects = FALSE,
has_wiki = FALSE,
default_branch = "main"
)
expect_is(user_repo, "list")
expect_identical(attr(user_repo, "status"), 200L)
expect_identical(
map_chr(user_repo, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_identical(
user_repo$full_name,
str_c("ChadGoymer/updated-user-repository-", suffix)
)
expect_identical(user_repo$description, "This is an updated user respository")
expect_identical(user_repo$homepage, "https://updated-user-repository.com")
expect_false(user_repo$has_issues)
expect_false(user_repo$has_projects)
expect_false(user_repo$has_wiki)
expect_identical(user_repo$default_branch, "main")
org_repo <- update_repository(
repo = str_c("HairyCoos/org-repository-", suffix),
name = str_c("updated-org-repository-", suffix),
description = "This is an updated organization respository",
homepage = "https://updated-org-repository.com",
private = FALSE,
allow_squash_merge = FALSE,
allow_merge_commit = FALSE,
allow_rebase_merge = TRUE,
delete_branch_on_merge = TRUE
)
expect_is(org_repo, "list")
expect_identical(attr(org_repo, "status"), 200L)
expect_identical(
map_chr(org_repo, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_identical(
org_repo$full_name,
str_c("HairyCoos/updated-org-repository-", suffix)
)
expect_identical(
org_repo$description,
"This is an updated organization respository"
)
expect_identical(org_repo$homepage, "https://updated-org-repository.com")
expect_false(org_repo$private)
expect_false(org_repo$allow_squash_merge)
expect_false(org_repo$allow_merge_commit)
expect_true(org_repo$allow_rebase_merge)
archived_repo <- update_repository(
repo = str_c("ChadGoymer/updated-user-repository-", suffix),
archived = TRUE
)
expect_is(archived_repo, "list")
expect_identical(attr(archived_repo, "status"), 200L)
expect_identical(
map_chr(archived_repo, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_true(archived_repo$archived)
added_team_repo <- update_repository(
repo = str_c("HairyCoos/updated-org-repository-", suffix),
team = str_c("test-repositories-", suffix),
permission = "pull"
)
expect_is(added_team_repo, "list")
expect_identical(attr(added_team_repo, "status"), 200L)
expect_identical(
map_chr(added_team_repo, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
read_repo <- view_repository(
repo = str_c("HairyCoos/updated-org-repository-", suffix),
team = str_c("test-repositories-", suffix),
org = "HairyCoos"
)
expect_identical(read_repo$permission, "pull")
updated_team_repo <- update_repository(
repo = str_c("HairyCoos/updated-org-repository-", suffix),
team = str_c("test-repositories-", suffix),
permission = "maintain"
)
expect_is(updated_team_repo, "list")
expect_identical(attr(updated_team_repo, "status"), 200L)
expect_identical(
map_chr(updated_team_repo, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
maintain_repo <- view_repository(
repo = str_c("HairyCoos/updated-org-repository-", suffix),
team = str_c("test-repositories-", suffix),
org = "HairyCoos"
)
expect_identical(maintain_repo$permission, "maintain")
})
# TEST: view_repositories ------------------------------------------------------
test_that("view_repositories returns a tibble summarising the repositories", {
user_repos <- view_repositories(user = "ChadGoymer", n_max = 10)
expect_is(user_repos, "tbl")
expect_identical(attr(user_repos, "status"), 200L)
expect_identical(
map_chr(user_repos, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_true(str_c("updated-user-repository-", suffix) %in% user_repos$name)
expect_identical(
sort(user_repos$created_at, decreasing = TRUE),
user_repos$created_at
)
ordered_repos <- view_repositories(
user = "ChadGoymer",
sort = "full_name",
direction = "asc",
n_max = 10
)
expect_is(ordered_repos, "tbl")
expect_identical(attr(ordered_repos, "status"), 200L)
expect_identical(
map_chr(ordered_repos, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_identical(sort(ordered_repos$full_name), ordered_repos$full_name)
org_repos <- view_repositories(org = "HairyCoos", n_max = 10)
expect_is(org_repos, "tbl")
expect_identical(attr(org_repos, "status"), 200L)
expect_identical(
map_chr(org_repos, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_true(str_c("updated-org-repository-", suffix) %in% org_repos$name)
expect_identical(
org_repos %>%
filter(name == str_c("updated-org-repository-", suffix)) %>%
pull("permission"),
"admin"
)
team_repos <- view_repositories(
team = str_c("test-repositories-", suffix),
org = "HairyCoos",
n_max = 10
)
expect_is(team_repos, "tbl")
expect_identical(attr(team_repos, "status"), 200L)
expect_identical(
map_chr(team_repos, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_true(str_c("updated-org-repository-", suffix) %in% team_repos$name)
expect_identical(
team_repos %>%
filter(name == str_c("updated-org-repository-", suffix)) %>%
pull("permission"),
"maintain"
)
auth_repos <- view_repositories(n_max = 10)
expect_is(auth_repos, "tbl")
expect_identical(attr(auth_repos, "status"), 200L)
expect_identical(
map_chr(auth_repos, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_true(str_c("updated-user-repository-", suffix) %in% auth_repos$name)
})
# TEST: view_repository --------------------------------------------------------
test_that("view_repository returns a list of repository properties", {
test_repo <- view_repository(
repo = str_c("ChadGoymer/updated-user-repository-", suffix)
)
expect_is(test_repo, "list")
expect_identical(attr(test_repo, "status"), 200L)
expect_identical(
map_chr(test_repo, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_identical(test_repo$name, str_c("updated-user-repository-", suffix))
})
# TEST: browse_repository ------------------------------------------------------
test_that("browse_repository opens the repository's page in the browser", {
skip_if(!interactive(), "browse_repository must be tested manually")
repo <- browse_repository(
repo = str_c("ChadGoymer/updated-user-repository-", suffix)
)
expect_is(repo, "character")
expect_identical(attr(repo, "status"), 200L)
expect_identical(
as.character(repo),
str_c("https://github.com/ChadGoymer/updated-user-repository-", suffix)
)
})
# TEST: delete_repository ------------------------------------------------------
test_that("delete_repository removes a repository and returns TRUE", {
user_repo <- delete_repository(
repo = str_c("ChadGoymer/updated-user-repository-", suffix)
)
expect_is(user_repo, "logical")
expect_identical(attr(user_repo, "status"), 204L)
expect_identical(as.logical(user_repo), TRUE)
org_repo <- delete_repository(
repo = str_c("HairyCoos/updated-org-repository-", suffix)
)
expect_is(org_repo, "logical")
expect_identical(attr(org_repo, "status"), 204L)
expect_identical(as.logical(org_repo), TRUE)
})
|
/tests/testthat/test-repositories.R
|
permissive
|
jfontestad/githapi
|
R
| false
| false
| 24,601
|
r
|
context("repositories")
# SETUP ------------------------------------------------------------------------
suffix <- sample(letters, 10, replace = TRUE) %>% str_c(collapse = "")
setup(suppressMessages({
create_team(
name = str_c("test-repositories-", suffix),
org = "HairyCoos",
description = "This is a team to test repositories"
)
}))
teardown(suppressMessages({
try(silent = TRUE, {
delete_team(str_c("test-repositories-", suffix), org = "HairyCoos")
})
try(silent = TRUE, {
delete_repository(str_c("ChadGoymer/user-repository-", suffix))
})
try(silent = TRUE, {
delete_repository(str_c("ChadGoymer/org-repository-", suffix))
})
try(silent = TRUE, {
delete_repository(str_c("ChadGoymer/updated-user-repository-", suffix))
})
try(silent = TRUE, {
delete_repository(str_c("ChadGoymer/updated-org-repository-", suffix))
})
}))
# TEST: create_repository ------------------------------------------------------
test_that("create_repository creates a repository and returns its properties", {
user_repo <- create_repository(
name = str_c("user-repository-", suffix),
description = "This is a user repository",
homepage = "https://user-repository.com",
auto_init = TRUE
)
Sys.sleep(1)
expect_is(user_repo, "list")
expect_identical(attr(user_repo, "status"), 201L)
expect_identical(
map_chr(user_repo, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_identical(
user_repo$full_name,
str_c("ChadGoymer/user-repository-", suffix)
)
expect_identical(user_repo$description, "This is a user repository")
expect_identical(user_repo$homepage, "https://user-repository.com")
org_repo <- create_repository(
name = str_c("org-repository-", suffix),
org = "HairyCoos",
description = "This is an organization respository",
homepage = "https://org-repository.com"
)
expect_is(org_repo, "list")
expect_identical(attr(org_repo, "status"), 201L)
expect_identical(
map_chr(org_repo, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_identical(
org_repo$full_name,
str_c("HairyCoos/org-repository-", suffix)
)
expect_identical(org_repo$description, "This is an organization respository")
expect_identical(org_repo$homepage, "https://org-repository.com")
})
# TEST: update_repository ------------------------------------------------------
test_that("update_repository changes a repository's properties", {
user_repo <- update_repository(
repo = str_c("ChadGoymer/user-repository-", suffix),
name = str_c("updated-user-repository-", suffix),
description = "This is an updated user respository",
homepage = "https://updated-user-repository.com",
has_issues = FALSE,
has_projects = FALSE,
has_wiki = FALSE,
default_branch = "main"
)
expect_is(user_repo, "list")
expect_identical(attr(user_repo, "status"), 200L)
expect_identical(
map_chr(user_repo, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_identical(
user_repo$full_name,
str_c("ChadGoymer/updated-user-repository-", suffix)
)
expect_identical(user_repo$description, "This is an updated user respository")
expect_identical(user_repo$homepage, "https://updated-user-repository.com")
expect_false(user_repo$has_issues)
expect_false(user_repo$has_projects)
expect_false(user_repo$has_wiki)
expect_identical(user_repo$default_branch, "main")
org_repo <- update_repository(
repo = str_c("HairyCoos/org-repository-", suffix),
name = str_c("updated-org-repository-", suffix),
description = "This is an updated organization respository",
homepage = "https://updated-org-repository.com",
private = FALSE,
allow_squash_merge = FALSE,
allow_merge_commit = FALSE,
allow_rebase_merge = TRUE,
delete_branch_on_merge = TRUE
)
expect_is(org_repo, "list")
expect_identical(attr(org_repo, "status"), 200L)
expect_identical(
map_chr(org_repo, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_identical(
org_repo$full_name,
str_c("HairyCoos/updated-org-repository-", suffix)
)
expect_identical(
org_repo$description,
"This is an updated organization respository"
)
expect_identical(org_repo$homepage, "https://updated-org-repository.com")
expect_false(org_repo$private)
expect_false(org_repo$allow_squash_merge)
expect_false(org_repo$allow_merge_commit)
expect_true(org_repo$allow_rebase_merge)
archived_repo <- update_repository(
repo = str_c("ChadGoymer/updated-user-repository-", suffix),
archived = TRUE
)
expect_is(archived_repo, "list")
expect_identical(attr(archived_repo, "status"), 200L)
expect_identical(
map_chr(archived_repo, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_true(archived_repo$archived)
added_team_repo <- update_repository(
repo = str_c("HairyCoos/updated-org-repository-", suffix),
team = str_c("test-repositories-", suffix),
permission = "pull"
)
expect_is(added_team_repo, "list")
expect_identical(attr(added_team_repo, "status"), 200L)
expect_identical(
map_chr(added_team_repo, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
read_repo <- view_repository(
repo = str_c("HairyCoos/updated-org-repository-", suffix),
team = str_c("test-repositories-", suffix),
org = "HairyCoos"
)
expect_identical(read_repo$permission, "pull")
updated_team_repo <- update_repository(
repo = str_c("HairyCoos/updated-org-repository-", suffix),
team = str_c("test-repositories-", suffix),
permission = "maintain"
)
expect_is(updated_team_repo, "list")
expect_identical(attr(updated_team_repo, "status"), 200L)
expect_identical(
map_chr(updated_team_repo, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
maintain_repo <- view_repository(
repo = str_c("HairyCoos/updated-org-repository-", suffix),
team = str_c("test-repositories-", suffix),
org = "HairyCoos"
)
expect_identical(maintain_repo$permission, "maintain")
})
# TEST: view_repositories ------------------------------------------------------
test_that("view_repositories returns a tibble summarising the repositories", {
user_repos <- view_repositories(user = "ChadGoymer", n_max = 10)
expect_is(user_repos, "tbl")
expect_identical(attr(user_repos, "status"), 200L)
expect_identical(
map_chr(user_repos, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_true(str_c("updated-user-repository-", suffix) %in% user_repos$name)
expect_identical(
sort(user_repos$created_at, decreasing = TRUE),
user_repos$created_at
)
ordered_repos <- view_repositories(
user = "ChadGoymer",
sort = "full_name",
direction = "asc",
n_max = 10
)
expect_is(ordered_repos, "tbl")
expect_identical(attr(ordered_repos, "status"), 200L)
expect_identical(
map_chr(ordered_repos, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_identical(sort(ordered_repos$full_name), ordered_repos$full_name)
org_repos <- view_repositories(org = "HairyCoos", n_max = 10)
expect_is(org_repos, "tbl")
expect_identical(attr(org_repos, "status"), 200L)
expect_identical(
map_chr(org_repos, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_true(str_c("updated-org-repository-", suffix) %in% org_repos$name)
expect_identical(
org_repos %>%
filter(name == str_c("updated-org-repository-", suffix)) %>%
pull("permission"),
"admin"
)
team_repos <- view_repositories(
team = str_c("test-repositories-", suffix),
org = "HairyCoos",
n_max = 10
)
expect_is(team_repos, "tbl")
expect_identical(attr(team_repos, "status"), 200L)
expect_identical(
map_chr(team_repos, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_true(str_c("updated-org-repository-", suffix) %in% team_repos$name)
expect_identical(
team_repos %>%
filter(name == str_c("updated-org-repository-", suffix)) %>%
pull("permission"),
"maintain"
)
auth_repos <- view_repositories(n_max = 10)
expect_is(auth_repos, "tbl")
expect_identical(attr(auth_repos, "status"), 200L)
expect_identical(
map_chr(auth_repos, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_true(str_c("updated-user-repository-", suffix) %in% auth_repos$name)
})
# TEST: view_repository --------------------------------------------------------
test_that("view_repository returns a list of repository properties", {
test_repo <- view_repository(
repo = str_c("ChadGoymer/updated-user-repository-", suffix)
)
expect_is(test_repo, "list")
expect_identical(attr(test_repo, "status"), 200L)
expect_identical(
map_chr(test_repo, ~ class(.)[[1]]),
c(
id = "integer",
name = "character",
full_name = "character",
description = "character",
owner = "character",
homepage = "character",
language = "character",
size = "numeric",
default_branch = "character",
permission = "character",
private = "logical",
has_issues = "logical",
has_projects = "logical",
has_wiki = "logical",
has_pages = "logical",
has_downloads = "logical",
allow_squash_merge = "logical",
allow_merge_commit = "logical",
allow_rebase_merge = "logical",
fork = "logical",
archived = "logical",
disabled = "logical",
watchers_count = "integer",
stargazers_count = "integer",
forks_count = "integer",
html_url = "character",
pushed_at = "POSIXct",
created_at = "POSIXct",
updated_at = "POSIXct"
)
)
expect_identical(test_repo$name, str_c("updated-user-repository-", suffix))
})
# TEST: browse_repository ------------------------------------------------------
test_that("browse_repository opens the repository's page in the browser", {
skip_if(!interactive(), "browse_repository must be tested manually")
repo <- browse_repository(
repo = str_c("ChadGoymer/updated-user-repository-", suffix)
)
expect_is(repo, "character")
expect_identical(attr(repo, "status"), 200L)
expect_identical(
as.character(repo),
str_c("https://github.com/ChadGoymer/updated-user-repository-", suffix)
)
})
# TEST: delete_repository ------------------------------------------------------
test_that("delete_repository removes a repository and returns TRUE", {
user_repo <- delete_repository(
repo = str_c("ChadGoymer/updated-user-repository-", suffix)
)
expect_is(user_repo, "logical")
expect_identical(attr(user_repo, "status"), 204L)
expect_identical(as.logical(user_repo), TRUE)
org_repo <- delete_repository(
repo = str_c("HairyCoos/updated-org-repository-", suffix)
)
expect_is(org_repo, "logical")
expect_identical(attr(org_repo, "status"), 204L)
expect_identical(as.logical(org_repo), TRUE)
})
|
# Leaving the code used to derive the hr_NHDPlusFlowline layer in sugar creek gpkg
# matched <- readr::read_csv("../../HU12_NHD/out/report/matched.csv")
#
# src_gpkg <- system.file("gpkg/sugar_creek_fort_mill.gpkg", package = "hygeo")
#
# fline <- nhdplusTools::align_nhdplus_names(sf::read_sf(src_gpkg, "NHDFlowline_Network"))
#
# comids <- nhdplusTools::get_UT(fline, 9731454)
#
# fline <- dplyr::filter(fline, COMID %in% comids)
#
# all_lps <- unique(fline$LevelPathI)
#
# matched <- dplyr::filter(matched, mr_LevelPathI %in% all_lps)
#
# hr <- nhdplusTools::get_hr_data("~/Documents/Data/hr/03/0305.gdb/", layer = "NHDFlowline")
#
# hr_sub <- dplyr::filter(hr, COMID %in% matched$member_NHDPlusID)
#
# mapview::mapview(fline, lwd = 3, color = "blue") + mapview::mapview(hr_sub, lwd = 1.5, color = "red")
#
# hr_sub <- dplyr::left_join(hr_sub, dplyr::select(matched, COMID = member_NHDPlusID, mr_LevelPathI), by = "COMID")
#
# sf::write_sf(hr_sub, "inst/gpkg/sugar_creek_fort_mill.gpkg", "hr_NHDPlusFlowline")
|
/inst/gpkg/mr_hr_crosswalk_pull.R
|
permissive
|
dblodgett-usgs/hygeo
|
R
| false
| false
| 1,013
|
r
|
# Leaving the code used to derive the hr_NHDPlusFlowline layer in sugar creek gpkg
# matched <- readr::read_csv("../../HU12_NHD/out/report/matched.csv")
#
# src_gpkg <- system.file("gpkg/sugar_creek_fort_mill.gpkg", package = "hygeo")
#
# fline <- nhdplusTools::align_nhdplus_names(sf::read_sf(src_gpkg, "NHDFlowline_Network"))
#
# comids <- nhdplusTools::get_UT(fline, 9731454)
#
# fline <- dplyr::filter(fline, COMID %in% comids)
#
# all_lps <- unique(fline$LevelPathI)
#
# matched <- dplyr::filter(matched, mr_LevelPathI %in% all_lps)
#
# hr <- nhdplusTools::get_hr_data("~/Documents/Data/hr/03/0305.gdb/", layer = "NHDFlowline")
#
# hr_sub <- dplyr::filter(hr, COMID %in% matched$member_NHDPlusID)
#
# mapview::mapview(fline, lwd = 3, color = "blue") + mapview::mapview(hr_sub, lwd = 1.5, color = "red")
#
# hr_sub <- dplyr::left_join(hr_sub, dplyr::select(matched, COMID = member_NHDPlusID, mr_LevelPathI), by = "COMID")
#
# sf::write_sf(hr_sub, "inst/gpkg/sugar_creek_fort_mill.gpkg", "hr_NHDPlusFlowline")
|
testlist <- list(a = 2.01456375923297e-314, b = 0)
result <- do.call(BayesMRA::rmvn_arma_scalar,testlist)
str(result)
|
/BayesMRA/inst/testfiles/rmvn_arma_scalar/AFL_rmvn_arma_scalar/rmvn_arma_scalar_valgrind_files/1615926022-test.R
|
no_license
|
akhikolla/updatedatatype-list1
|
R
| false
| false
| 117
|
r
|
testlist <- list(a = 2.01456375923297e-314, b = 0)
result <- do.call(BayesMRA::rmvn_arma_scalar,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_aes.R
\name{create_aes}
\alias{create_aes}
\title{Create Aes Mapping from a List}
\usage{
create_aes(.list, parse = FALSE)
}
\arguments{
\item{.list}{a list of aesthetic arguments; for example .list = list(x = "dose", y = "len", color = "dose").}
\item{parse}{logical. If TRUE, parse the input as an expression.}
}
\description{
Create aes mapping to make programming easy with ggplot2.
}
\examples{
# Simple aes creation
create_aes(list(x = "Sepal.Length", y = "Petal.Length" ))
# Parse an expression
x <- "log2(Sepal.Length)"
y <- "log2(Petal.Length)"
create_aes(list(x = x, y = y ), parse = TRUE)
# Create a ggplot
mapping <- create_aes(list(x = x, y = y ), parse = TRUE)
ggplot(iris, mapping) +
geom_point()
}
|
/man/create_aes.Rd
|
no_license
|
earcanal/ggpubr
|
R
| false
| true
| 804
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_aes.R
\name{create_aes}
\alias{create_aes}
\title{Create Aes Mapping from a List}
\usage{
create_aes(.list, parse = FALSE)
}
\arguments{
\item{.list}{a list of aesthetic arguments; for example .list = list(x = "dose", y = "len", color = "dose").}
\item{parse}{logical. If TRUE, parse the input as an expression.}
}
\description{
Create aes mapping to make programming easy with ggplot2.
}
\examples{
# Simple aes creation
create_aes(list(x = "Sepal.Length", y = "Petal.Length" ))
# Parse an expression
x <- "log2(Sepal.Length)"
y <- "log2(Petal.Length)"
create_aes(list(x = x, y = y ), parse = TRUE)
# Create a ggplot
mapping <- create_aes(list(x = x, y = y ), parse = TRUE)
ggplot(iris, mapping) +
geom_point()
}
|
\name{heights}
\alias{heights}
\alias{heights.cover}
\alias{heights.incidence}
\alias{height.poset}
\title{
Heights
}
\description{
The function computes the vector of heights of poset elements.
}
\usage{
heights(z)
}
\arguments{
\item{z}{
an object of class \code{cover}, \code{incidence} or \code{poset}.
}
}
\examples{
vl <- c(3, 2, 4)
prof <- var2prof(varlen = vl)
Z <- getzeta(prof)
heights(Z)
}
|
/man/heights.Rd
|
no_license
|
cran/parsec
|
R
| false
| false
| 434
|
rd
|
\name{heights}
\alias{heights}
\alias{heights.cover}
\alias{heights.incidence}
\alias{height.poset}
\title{
Heights
}
\description{
The function computes the vector of heights of poset elements.
}
\usage{
heights(z)
}
\arguments{
\item{z}{
an object of class \code{cover}, \code{incidence} or \code{poset}.
}
}
\examples{
vl <- c(3, 2, 4)
prof <- var2prof(varlen = vl)
Z <- getzeta(prof)
heights(Z)
}
|
################################################################################
#' Convert to a tibble of the n first/last elements.
#'
#' @param x A [FDF][FDF-class].
#' @param ... Not used.
#' @param n A single positive integer.
#'
#' @importFrom utils head
#' @export
#' @method head FDF
#'
#' @include filter.R
#'
#' @rdname head
#'
#' @examples
#' test <- FDF(datasets::iris)
#' head(test)
head.FDF <- function(x, n = 6L, ...) {
assert_nodots()
assert_pos(n)
n <- min(n, x$nrow)
as_tibble(filter_int(x, seq_len(n), check = FALSE))
}
################################################################################
#' @exportMethod head
#' @rdname head
setGeneric("head", utils::head)
################################################################################
#' @importFrom utils tail
#' @export
#' @method tail FDF
#'
#' @rdname head
#'
#' @examples
#' test <- FDF(datasets::iris)
#' tail(test)
tail.FDF <- function(x, n = 6L, ...) {
assert_nodots()
assert_pos(n)
n <- min(n, x$nrow)
as_tibble(filter_int(x, x$nrow + seq_len(n) - n, check = FALSE))
}
################################################################################
#' @exportMethod tail
#' @rdname head
setGeneric("tail", utils::tail)
################################################################################
|
/R/head-tail.R
|
no_license
|
privefl/bigdfr
|
R
| false
| false
| 1,326
|
r
|
################################################################################
#' Convert to a tibble of the n first/last elements.
#'
#' @param x A [FDF][FDF-class].
#' @param ... Not used.
#' @param n A single positive integer.
#'
#' @importFrom utils head
#' @export
#' @method head FDF
#'
#' @include filter.R
#'
#' @rdname head
#'
#' @examples
#' test <- FDF(datasets::iris)
#' head(test)
head.FDF <- function(x, n = 6L, ...) {
assert_nodots()
assert_pos(n)
n <- min(n, x$nrow)
as_tibble(filter_int(x, seq_len(n), check = FALSE))
}
################################################################################
#' @exportMethod head
#' @rdname head
setGeneric("head", utils::head)
################################################################################
#' @importFrom utils tail
#' @export
#' @method tail FDF
#'
#' @rdname head
#'
#' @examples
#' test <- FDF(datasets::iris)
#' tail(test)
tail.FDF <- function(x, n = 6L, ...) {
assert_nodots()
assert_pos(n)
n <- min(n, x$nrow)
as_tibble(filter_int(x, x$nrow + seq_len(n) - n, check = FALSE))
}
################################################################################
#' @exportMethod tail
#' @rdname head
setGeneric("tail", utils::tail)
################################################################################
|
testlist <- list(a = 0L, b = 0L, x = c(3079936L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610130999-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 313
|
r
|
testlist <- list(a = 0L, b = 0L, x = c(3079936L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
library(CodeClanData)
library(dplyr)
library(ggplot2)
library(shiny)
library(data.table)
library(shinythemes)
library(gganimate)
ui <- fluidPage(
theme = shinytheme("sandstone"),
titlePanel(tags$b("Video Games 1988-2018")),
tabsetPanel(
tabPanel(
"Find Games",
column(4,
selectInput("genre",
"Choose genre",
choices = unique(game_sales$genre))
),
column(4,
selectInput("year",
"Choose year",
choices = unique(game_sales$year_of_release))
),
column(4,
selectInput("developer",
"Choose developer",
choices = unique(game_sales$developer))
),
column(4,
selectInput("platform",
"Choose platform",
choices = unique(game_sales$platform))
),
actionButton("update", "Find game"),
DT::dataTableOutput("table_output")
),
tabPanel(
"Score Comparison",
column(4,
plotOutput("scatter")),
sidebarPanel(
sliderInput("transparency",
"Transparency",
min = 0, max = 1, value = 0.8)
)
),
tabPanel(
"Nintendo sales since 2000",
tags$br(actionButton("update1", "Click")),
tags$br(plotOutput("line"))
)
)
)
server <- function(input, output) {
game_data <- eventReactive(input$update, {
game_sales %>%
filter(genre == input$genre) %>%
filter(year_of_release == input$year) %>%
filter(developer == input$developer) %>%
filter(platform == input$platform) %>%
slice(1:10) %>%
mutate(round((user_score * 10)))
})
output$table_output <- DT::renderDataTable({
game_data()
})
game_data2 <- reactive({
game_sales
})
output$scatter <- renderPlot({
ggplot(game_data2()) +
aes(x = critic_score,
y = user_score) +
labs(x = "Critic Score", y = "User Score") +
geom_point(alpha = input$transparency) +
theme(panel.background = element_rect(fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid"),
panel.grid.major = element_line(size = 0.5, linetype = 'solid',
colour = "white"),
panel.grid.minor = element_line(size = 0.25, linetype = 'solid',
colour = "white"))
# I thought it would be interesting for the user to see how user score compared with crititc score and whether the two correlated
#the user will see that often they do and so could reasonably base judgement on a game using either score.
})
nintendo_data <- eventReactive(input$update1, {
game_sales %>%
filter(publisher == "Nintendo")
})
output$line <- renderImage({
outfile <- tempfile(fileext='.gif')
nintendo_plot <- ggplot(nintendo_data(), aes(year_of_release,
sales,
fill = publisher)) +
geom_line() +
expand_limits(x= 2000)+
labs(x = "Year", y = "Sales (m)", title = "Nintendo Sales Since the Begninning of the Millennium") +
theme(panel.background = element_rect(fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid"),
panel.grid.major = element_line(size = 0.5, linetype = 'solid',
colour = "white"),
panel.grid.minor = element_line(size = 0.25, linetype = 'solid',
colour = "white")) +
geom_point(aes(group = seq_along(year_of_release))) +
transition_reveal(year_of_release, )
# I chose to concentrate on the top publisher by sales Nintendo to demonstarate how their sales had flucuated since 2000 despite being top
# I chose to animate the line graph to emphasise this fluctuation.
#Unfortunately it takes a while to render after activated by action button.
anim_save("outfile.gif", animate(nintendo_plot)) # I had to include this code (149 to 156) in the funciton in order for the gif graph to work
list(src = "outfile.gif",
contentType = 'image/gif'
)},
deleteFile = TRUE)
}
# Run the app
shinyApp(ui = ui, server = server)
|
/week_05/weekend/shiny_weekend_homework/app.R
|
no_license
|
C-Power1/codeclan_homework_ConorPower
|
R
| false
| false
| 5,443
|
r
|
library(CodeClanData)
library(dplyr)
library(ggplot2)
library(shiny)
library(data.table)
library(shinythemes)
library(gganimate)
ui <- fluidPage(
theme = shinytheme("sandstone"),
titlePanel(tags$b("Video Games 1988-2018")),
tabsetPanel(
tabPanel(
"Find Games",
column(4,
selectInput("genre",
"Choose genre",
choices = unique(game_sales$genre))
),
column(4,
selectInput("year",
"Choose year",
choices = unique(game_sales$year_of_release))
),
column(4,
selectInput("developer",
"Choose developer",
choices = unique(game_sales$developer))
),
column(4,
selectInput("platform",
"Choose platform",
choices = unique(game_sales$platform))
),
actionButton("update", "Find game"),
DT::dataTableOutput("table_output")
),
tabPanel(
"Score Comparison",
column(4,
plotOutput("scatter")),
sidebarPanel(
sliderInput("transparency",
"Transparency",
min = 0, max = 1, value = 0.8)
)
),
tabPanel(
"Nintendo sales since 2000",
tags$br(actionButton("update1", "Click")),
tags$br(plotOutput("line"))
)
)
)
server <- function(input, output) {
game_data <- eventReactive(input$update, {
game_sales %>%
filter(genre == input$genre) %>%
filter(year_of_release == input$year) %>%
filter(developer == input$developer) %>%
filter(platform == input$platform) %>%
slice(1:10) %>%
mutate(round((user_score * 10)))
})
output$table_output <- DT::renderDataTable({
game_data()
})
game_data2 <- reactive({
game_sales
})
output$scatter <- renderPlot({
ggplot(game_data2()) +
aes(x = critic_score,
y = user_score) +
labs(x = "Critic Score", y = "User Score") +
geom_point(alpha = input$transparency) +
theme(panel.background = element_rect(fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid"),
panel.grid.major = element_line(size = 0.5, linetype = 'solid',
colour = "white"),
panel.grid.minor = element_line(size = 0.25, linetype = 'solid',
colour = "white"))
# I thought it would be interesting for the user to see how user score compared with crititc score and whether the two correlated
#the user will see that often they do and so could reasonably base judgement on a game using either score.
})
nintendo_data <- eventReactive(input$update1, {
game_sales %>%
filter(publisher == "Nintendo")
})
output$line <- renderImage({
outfile <- tempfile(fileext='.gif')
nintendo_plot <- ggplot(nintendo_data(), aes(year_of_release,
sales,
fill = publisher)) +
geom_line() +
expand_limits(x= 2000)+
labs(x = "Year", y = "Sales (m)", title = "Nintendo Sales Since the Begninning of the Millennium") +
theme(panel.background = element_rect(fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid"),
panel.grid.major = element_line(size = 0.5, linetype = 'solid',
colour = "white"),
panel.grid.minor = element_line(size = 0.25, linetype = 'solid',
colour = "white")) +
geom_point(aes(group = seq_along(year_of_release))) +
transition_reveal(year_of_release, )
# I chose to concentrate on the top publisher by sales Nintendo to demonstarate how their sales had flucuated since 2000 despite being top
# I chose to animate the line graph to emphasise this fluctuation.
#Unfortunately it takes a while to render after activated by action button.
anim_save("outfile.gif", animate(nintendo_plot)) # I had to include this code (149 to 156) in the funciton in order for the gif graph to work
list(src = "outfile.gif",
contentType = 'image/gif'
)},
deleteFile = TRUE)
}
# Run the app
shinyApp(ui = ui, server = server)
|
#This is a function which computes the correlation of two columns of data
#for each data set in a directory.
corr <- function(directory, threshold = 0){
file_list <- list.files(directory, full.names=TRUE)
correlation <- vector(mode = "numeric", length = 0)
j <- 0
for(i in 1:332){
#print(i)
dat <- read.csv(file_list[i])
good <- complete.cases(dat)
comp <- dat[good,]
len <- length(comp[,2]) #Perhaps change to comp$nitrate?
s <- comp[,2]
n <- comp[,3]
if(len > threshold){
j <- j + 1
correlation[j] = cor(s,n)
}
}
correlation
}
|
/corr.R
|
no_license
|
hymno/data_science_coursera
|
R
| false
| false
| 803
|
r
|
#This is a function which computes the correlation of two columns of data
#for each data set in a directory.
corr <- function(directory, threshold = 0){
file_list <- list.files(directory, full.names=TRUE)
correlation <- vector(mode = "numeric", length = 0)
j <- 0
for(i in 1:332){
#print(i)
dat <- read.csv(file_list[i])
good <- complete.cases(dat)
comp <- dat[good,]
len <- length(comp[,2]) #Perhaps change to comp$nitrate?
s <- comp[,2]
n <- comp[,3]
if(len > threshold){
j <- j + 1
correlation[j] = cor(s,n)
}
}
correlation
}
|
library(tidyverse)
library(latex2exp)
setwd('~/Documents/paper2/bla')
################################
# Data Collection
################################
collect_blas <- function (runtime, infile, state, location, solvent){
data <- read_csv(infile, col_names = FALSE)
nsteps <- ncol(data) - 1
time_labels <- (0:(nsteps-1))*runtime/(nsteps -1)
names(data) <- c('Bond', time_labels)
s1_bond_data <- data %>% gather(key = "t", value = "Length", -Bond) %>%
group_by(Bond, t) %>%
summarise(MeanLength = mean(Length)) %>%
spread(Bond, MeanLength) %>%
mutate(BLA = (`1`+`3`)/2 - `2`) %>%
mutate(t = as.numeric(t)) %>%
mutate(State = state) %>%
mutate(Location = location) %>%
mutate(SolventID = solvent) %>%
mutate(Solvent = if_else(location == '', solvent, sprintf("%s-%s", solvent, location))) %>%
mutate(Description = if_else(location == '', state, sprintf("%s-%s", state, location)))
s1_bond_data
}
vacuum_near <- collect_blas(runtime = 1,
infile = 'bla-near-vacuum.csv',
state = 'S1',
location = "Near",
solvent = "Vacuum")
ch3oh_near <- collect_blas(runtime = 1,
infile = 'bla-near-ch3oh.csv',
state = 'S1',
location = "Near",
solvent = "0")
ch3oh_5s_near <- collect_blas(runtime = 1,
infile = 'bla-near-ch3oh-5s.csv',
state = 'S1',
location = "Near",
solvent = "5")
ch3oh_10s_near <- collect_blas(runtime = 1,
infile = 'bla-near-ch3oh-10s.csv',
state = 'S1',
location = "Near",
solvent = "10")
bla_near <- bind_rows(
vacuum_near,
ch3oh_near,
ch3oh_5s_near,
ch3oh_10s_near
)
vacuum_far <- collect_blas(runtime = 1,
infile = 'bla-far-vacuum.csv',
state = 'S1',
location = "Far",
solvent = "Vacuum")
ch3oh_far <- collect_blas(runtime = 1,
infile = 'bla-far-ch3oh.csv',
state = 'S1',
location = "Far",
solvent = "0")
ch3oh_far_5s <- collect_blas(runtime = 1,
infile = 'bla-far-ch3oh-5s.csv',
state = 'S1',
location = "Far",
solvent = "5")
ch3oh_far_10s <- collect_blas(runtime = 1,
infile = 'bla-far-ch3oh-10s.csv',
state = 'S1',
location = "Far",
solvent = "10")
bla_far <- bind_rows(
vacuum_far,
ch3oh_far,
ch3oh_far_5s,
ch3oh_far_10s
)
bla <- bind_rows(
bla_near,
bla_far
) %>%
mutate(Location = factor(Location, levels = c("Near", "Far"))) %>%
mutate(SolventID = factor(SolventID, levels = c("Vacuum", "0", "5", "10")))
######################################
# Ploting
######################################
bla %>%
ggplot(aes(x = t, y = BLA, color = SolventID)) +
geom_line(size=1.5) +
scale_y_continuous(limits = c(0, 0.12), breaks = seq(0, 0.10, by=0.02)) +
scale_x_continuous(breaks = seq(0, 1, by = 0.2)) +
labs(x = "Time (ps)",
y = expression(paste("BLA (", ring(A), ")" )),
color = "Number QM Solvents") +
facet_wrap(~ Location) +
theme_bw() +
theme(axis.text=element_text(size=15),
strip.text = element_text(size=12),
axis.title=element_text(size=20),
legend.title = element_blank(),
legend.text = element_text(size = 15),
legend.text.align = 0,
legend.position = "top")
ggsave("~/potentialparadox.github.io/Paper2/Images/bla/solvent_comparison.png", width = 7.5, height = 5)
bondorders %>%
ggplot(aes(x = Timefs, y = MeanBondOrder, color = NSolvent)) +
geom_line(size = 1.5) +
facet_wrap(~ Bond) +
annotation_custom(diagram, xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf)
|
/Paper2/plot_funcs/bla.R
|
no_license
|
PotentialParadox/potentialparadox.github.io
|
R
| false
| false
| 4,323
|
r
|
library(tidyverse)
library(latex2exp)
setwd('~/Documents/paper2/bla')
################################
# Data Collection
################################
collect_blas <- function (runtime, infile, state, location, solvent){
data <- read_csv(infile, col_names = FALSE)
nsteps <- ncol(data) - 1
time_labels <- (0:(nsteps-1))*runtime/(nsteps -1)
names(data) <- c('Bond', time_labels)
s1_bond_data <- data %>% gather(key = "t", value = "Length", -Bond) %>%
group_by(Bond, t) %>%
summarise(MeanLength = mean(Length)) %>%
spread(Bond, MeanLength) %>%
mutate(BLA = (`1`+`3`)/2 - `2`) %>%
mutate(t = as.numeric(t)) %>%
mutate(State = state) %>%
mutate(Location = location) %>%
mutate(SolventID = solvent) %>%
mutate(Solvent = if_else(location == '', solvent, sprintf("%s-%s", solvent, location))) %>%
mutate(Description = if_else(location == '', state, sprintf("%s-%s", state, location)))
s1_bond_data
}
vacuum_near <- collect_blas(runtime = 1,
infile = 'bla-near-vacuum.csv',
state = 'S1',
location = "Near",
solvent = "Vacuum")
ch3oh_near <- collect_blas(runtime = 1,
infile = 'bla-near-ch3oh.csv',
state = 'S1',
location = "Near",
solvent = "0")
ch3oh_5s_near <- collect_blas(runtime = 1,
infile = 'bla-near-ch3oh-5s.csv',
state = 'S1',
location = "Near",
solvent = "5")
ch3oh_10s_near <- collect_blas(runtime = 1,
infile = 'bla-near-ch3oh-10s.csv',
state = 'S1',
location = "Near",
solvent = "10")
bla_near <- bind_rows(
vacuum_near,
ch3oh_near,
ch3oh_5s_near,
ch3oh_10s_near
)
vacuum_far <- collect_blas(runtime = 1,
infile = 'bla-far-vacuum.csv',
state = 'S1',
location = "Far",
solvent = "Vacuum")
ch3oh_far <- collect_blas(runtime = 1,
infile = 'bla-far-ch3oh.csv',
state = 'S1',
location = "Far",
solvent = "0")
ch3oh_far_5s <- collect_blas(runtime = 1,
infile = 'bla-far-ch3oh-5s.csv',
state = 'S1',
location = "Far",
solvent = "5")
ch3oh_far_10s <- collect_blas(runtime = 1,
infile = 'bla-far-ch3oh-10s.csv',
state = 'S1',
location = "Far",
solvent = "10")
bla_far <- bind_rows(
vacuum_far,
ch3oh_far,
ch3oh_far_5s,
ch3oh_far_10s
)
bla <- bind_rows(
bla_near,
bla_far
) %>%
mutate(Location = factor(Location, levels = c("Near", "Far"))) %>%
mutate(SolventID = factor(SolventID, levels = c("Vacuum", "0", "5", "10")))
######################################
# Ploting
######################################
bla %>%
ggplot(aes(x = t, y = BLA, color = SolventID)) +
geom_line(size=1.5) +
scale_y_continuous(limits = c(0, 0.12), breaks = seq(0, 0.10, by=0.02)) +
scale_x_continuous(breaks = seq(0, 1, by = 0.2)) +
labs(x = "Time (ps)",
y = expression(paste("BLA (", ring(A), ")" )),
color = "Number QM Solvents") +
facet_wrap(~ Location) +
theme_bw() +
theme(axis.text=element_text(size=15),
strip.text = element_text(size=12),
axis.title=element_text(size=20),
legend.title = element_blank(),
legend.text = element_text(size = 15),
legend.text.align = 0,
legend.position = "top")
ggsave("~/potentialparadox.github.io/Paper2/Images/bla/solvent_comparison.png", width = 7.5, height = 5)
bondorders %>%
ggplot(aes(x = Timefs, y = MeanBondOrder, color = NSolvent)) +
geom_line(size = 1.5) +
facet_wrap(~ Bond) +
annotation_custom(diagram, xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf)
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/utilities.R
\name{rollingMean}
\alias{rollingMean}
\title{Calculate rollingMean values}
\usage{
rollingMean(mydata, pollutant = "o3", width = 8, new.name = "rolling",
data.thresh = 75, align = "centre", ...)
}
\arguments{
\item{mydata}{A data frame containing a \code{date}
field. \code{mydata} must contain a \code{date} field in
\code{Date} or \code{POSIXct} format. The input time series must
be regular e.g. hourly, daily.}
\item{pollutant}{The name of a pollutant e.g. \code{pollutant = "o3"}.}
\item{width}{The averaging period (rolling window width) to use
e.g. \code{width = 8} will generate 8-hour rolling mean values
when hourly data are analysed.}
\item{new.name}{The name given to the new rollingMean variable. If
not supplied it will create a name based on the name of the
pollutant and the averaging period used.}
\item{data.thresh}{The data capture threshold in %. No values are
calculated if data capture over the period of interest is less
than this value. For example, with \code{width = 8} and
\code{data.thresh = 75} at least 6 hours are required to calculate
the mean, else \code{NA} is returned.}
\item{align}{specifyies how the moving window should be
aligned. \code{"right"} means that the previous \code{hours}
(including the current) are averaged. This seems to be the default
for UK air quality rolling mean statistics. \code{"left"} means
that the forward \code{hours} are averaged, and \code{"centre"} or
\code{"center"}, which is the default.}
\item{...}{other arguments, currently unused.}
}
\description{
Calculate rollingMean values taking account of data capture thresholds
}
\details{
This is a utility function mostly designed to calculate rolling
mean statistics relevant to some pollutant limits e.g. 8 hour
rolling means for ozone and 24 hour rolling means for
PM10. However, the function has a more general use in helping to
display rolling mean values in flexible ways e.g. with the rolling
window width left, right or centre aligned.
The function will try and fill in missing time gaps to get a full
time sequence but return a data frame with the same number of rows
supplied.
}
\examples{
## rolling 8-hour mean for ozone
mydata <- rollingMean(mydata, pollutant = "o3", width = 8, new.name =
"rollingo3", data.thresh = 75, align = "right")
}
\author{
David Carslaw
}
\keyword{methods}
|
/man/rollingMean.Rd
|
no_license
|
antoniopessotti/openair
|
R
| false
| false
| 2,424
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/utilities.R
\name{rollingMean}
\alias{rollingMean}
\title{Calculate rollingMean values}
\usage{
rollingMean(mydata, pollutant = "o3", width = 8, new.name = "rolling",
data.thresh = 75, align = "centre", ...)
}
\arguments{
\item{mydata}{A data frame containing a \code{date}
field. \code{mydata} must contain a \code{date} field in
\code{Date} or \code{POSIXct} format. The input time series must
be regular e.g. hourly, daily.}
\item{pollutant}{The name of a pollutant e.g. \code{pollutant = "o3"}.}
\item{width}{The averaging period (rolling window width) to use
e.g. \code{width = 8} will generate 8-hour rolling mean values
when hourly data are analysed.}
\item{new.name}{The name given to the new rollingMean variable. If
not supplied it will create a name based on the name of the
pollutant and the averaging period used.}
\item{data.thresh}{The data capture threshold in %. No values are
calculated if data capture over the period of interest is less
than this value. For example, with \code{width = 8} and
\code{data.thresh = 75} at least 6 hours are required to calculate
the mean, else \code{NA} is returned.}
\item{align}{specifyies how the moving window should be
aligned. \code{"right"} means that the previous \code{hours}
(including the current) are averaged. This seems to be the default
for UK air quality rolling mean statistics. \code{"left"} means
that the forward \code{hours} are averaged, and \code{"centre"} or
\code{"center"}, which is the default.}
\item{...}{other arguments, currently unused.}
}
\description{
Calculate rollingMean values taking account of data capture thresholds
}
\details{
This is a utility function mostly designed to calculate rolling
mean statistics relevant to some pollutant limits e.g. 8 hour
rolling means for ozone and 24 hour rolling means for
PM10. However, the function has a more general use in helping to
display rolling mean values in flexible ways e.g. with the rolling
window width left, right or centre aligned.
The function will try and fill in missing time gaps to get a full
time sequence but return a data frame with the same number of rows
supplied.
}
\examples{
## rolling 8-hour mean for ozone
mydata <- rollingMean(mydata, pollutant = "o3", width = 8, new.name =
"rollingo3", data.thresh = 75, align = "right")
}
\author{
David Carslaw
}
\keyword{methods}
|
#' Load Application Default Credentials
#'
#' Loads credentials from a file identified via a search strategy known as
#' Application Default Credentials (ADC). The hope is to make auth "just work"
#' for someone working on Google-provided infrastructure or who has used Google
#' tooling to get started. A sequence of paths is consulted, which we describe
#' here, with some abuse of notation. ALL_CAPS represents the value of an
#' environment variable and `%||%` is used in the spirit of a [null coalescing
#' operator](https://en.wikipedia.org/wiki/Null_coalescing_operator).
#' ```
#' GOOGLE_APPLICATION_CREDENTIALS
#' CLOUDSDK_CONFIG/application_default_credentials.json
#' # on Windows:
#' (APPDATA %||% SystemDrive %||% C:)\gcloud\application_default_credentials.json
#' # on not-Windows:
#' ~/.config/gcloud/application_default_credentials.json
#' ```
#' If the above search successfully identifies a JSON file, it is parsed and
#' ingested either as a service account token or a user OAuth2 credential.
#'
#' @inheritParams token_fetch
#'
#' @seealso
#'
#' <https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application>
#'
#' <https://cloud.google.com/sdk/docs/>
#'
#' @return An [`httr::TokenServiceAccount`][httr::Token-class] or an
#' [`httr::Token2.0`][httr::Token-class] or `NULL`.
#' @family credential functions
#' @export
#' @examples
#' \dontrun{
#' credentials_app_default()
#' }
credentials_app_default <- function(scopes = NULL, ...) {
cat_line("trying credentials_app_default()")
# In general, application default credentials only include the cloud-platform
# scope.
path <- credentials_app_default_path()
if (!file_exists(path)) {
return(NULL)
}
cat_line("file exists at ADC path: ", path)
# The JSON file stored on disk can be either a user credential or a service
# account.
info <- jsonlite::fromJSON(path, simplifyVector = FALSE)
if (info$type == "authorized_user") {
# In the case of *user* credentials stored as the application default, only
# the cloud-platform scope will be included. This means we need our scopes to
# be *implied* by the cloud-platform scope, which is hard to validate;
# instead, we just approximate.
valid_scopes <- c(
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.readonly"
)
if (is.null(scopes) || !all(scopes %in% valid_scopes)) {
return(NULL)
}
cat_line("ADC cred type: authorized_user")
endpoint <- httr::oauth_endpoints("google")
app <- httr::oauth_app("google", info$client_id, secret = info$client_secret)
scope <- "https://www.googleapis.com/auth/cloud.platform"
token <- httr::Token2.0$new(
endpoint = endpoint,
app = app,
credentials = list(refresh_token = info$refresh_token),
# ADC is already cached.
cache_path = FALSE,
params = list(scope = scope, as_header = TRUE)
)
token$refresh()
token
} else {
cat_line("ADC cred type: service_account")
credentials_service_account(scopes, path)
}
}
credentials_app_default_path <- function() {
if (nzchar(Sys.getenv("GOOGLE_APPLICATION_CREDENTIALS"))) {
return(path_expand(Sys.getenv("GOOGLE_APPLICATION_CREDENTIALS")))
}
pth <- "application_default_credentials.json"
if (nzchar(Sys.getenv("CLOUDSDK_CONFIG"))) {
pth <- c(Sys.getenv("CLOUDSDK_CONFIG"), pth)
} else if (is_windows()) {
appdata <- Sys.getenv("APPDATA", Sys.getenv("SystemDrive", "C:"))
pth <- c(appdata, "gcloud", pth)
} else {
pth <- path_home(".config", "gcloud")
}
path_join(pth)
}
|
/R/credentials_app_default.R
|
permissive
|
MarkEdmondson1234/gargle
|
R
| false
| false
| 3,754
|
r
|
#' Load Application Default Credentials
#'
#' Loads credentials from a file identified via a search strategy known as
#' Application Default Credentials (ADC). The hope is to make auth "just work"
#' for someone working on Google-provided infrastructure or who has used Google
#' tooling to get started. A sequence of paths is consulted, which we describe
#' here, with some abuse of notation. ALL_CAPS represents the value of an
#' environment variable and `%||%` is used in the spirit of a [null coalescing
#' operator](https://en.wikipedia.org/wiki/Null_coalescing_operator).
#' ```
#' GOOGLE_APPLICATION_CREDENTIALS
#' CLOUDSDK_CONFIG/application_default_credentials.json
#' # on Windows:
#' (APPDATA %||% SystemDrive %||% C:)\gcloud\application_default_credentials.json
#' # on not-Windows:
#' ~/.config/gcloud/application_default_credentials.json
#' ```
#' If the above search successfully identifies a JSON file, it is parsed and
#' ingested either as a service account token or a user OAuth2 credential.
#'
#' @inheritParams token_fetch
#'
#' @seealso
#'
#' <https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application>
#'
#' <https://cloud.google.com/sdk/docs/>
#'
#' @return An [`httr::TokenServiceAccount`][httr::Token-class] or an
#' [`httr::Token2.0`][httr::Token-class] or `NULL`.
#' @family credential functions
#' @export
#' @examples
#' \dontrun{
#' credentials_app_default()
#' }
credentials_app_default <- function(scopes = NULL, ...) {
cat_line("trying credentials_app_default()")
# In general, application default credentials only include the cloud-platform
# scope.
path <- credentials_app_default_path()
if (!file_exists(path)) {
return(NULL)
}
cat_line("file exists at ADC path: ", path)
# The JSON file stored on disk can be either a user credential or a service
# account.
info <- jsonlite::fromJSON(path, simplifyVector = FALSE)
if (info$type == "authorized_user") {
# In the case of *user* credentials stored as the application default, only
# the cloud-platform scope will be included. This means we need our scopes to
# be *implied* by the cloud-platform scope, which is hard to validate;
# instead, we just approximate.
valid_scopes <- c(
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.readonly"
)
if (is.null(scopes) || !all(scopes %in% valid_scopes)) {
return(NULL)
}
cat_line("ADC cred type: authorized_user")
endpoint <- httr::oauth_endpoints("google")
app <- httr::oauth_app("google", info$client_id, secret = info$client_secret)
scope <- "https://www.googleapis.com/auth/cloud.platform"
token <- httr::Token2.0$new(
endpoint = endpoint,
app = app,
credentials = list(refresh_token = info$refresh_token),
# ADC is already cached.
cache_path = FALSE,
params = list(scope = scope, as_header = TRUE)
)
token$refresh()
token
} else {
cat_line("ADC cred type: service_account")
credentials_service_account(scopes, path)
}
}
credentials_app_default_path <- function() {
if (nzchar(Sys.getenv("GOOGLE_APPLICATION_CREDENTIALS"))) {
return(path_expand(Sys.getenv("GOOGLE_APPLICATION_CREDENTIALS")))
}
pth <- "application_default_credentials.json"
if (nzchar(Sys.getenv("CLOUDSDK_CONFIG"))) {
pth <- c(Sys.getenv("CLOUDSDK_CONFIG"), pth)
} else if (is_windows()) {
appdata <- Sys.getenv("APPDATA", Sys.getenv("SystemDrive", "C:"))
pth <- c(appdata, "gcloud", pth)
} else {
pth <- path_home(".config", "gcloud")
}
path_join(pth)
}
|
# Empirical survival ------------------------------------------------------
source('lib/reload.R'); reload()
load('data/rda/study_info.rda')
load('data/rda/KM_digitized.rda')
load('data/rda/KM2IPD.rda')
id_developed <- study_info$pubID[study_info$Developed=='Developed']
id_developing <- study_info$pubID[study_info$Developed=='Developing']
tst1 <- KM2IPD[intersect(id_developed, names(KM2IPD))]
tst2 <- KM2IPD[intersect(id_developing, names(KM2IPD))]
empirical_surv <- function(ipd){
s1 <- IPD2Surv(ipd)
tidy(summary(survfit(s1~1),c(5,10,15)))
}
KM_deaths <- tibble(pubID = names(KM2IPD),
ndeath = sapply(KM2IPD, function(ipd) length(ipd$d.times)))
KM_Ns <- KM_full %>% select(ids,N) %>%
group_by(ids) %>% summarise(N = sum(N)) %>% ungroup() %>%
left_join(KM_deaths, c('ids' = 'pubID')) %>%
rename(pubID=ids)
dat1 <- plyr::ldply(lapply(tst1, empirical_surv),.id='pubID') %>%
left_join(study_info %>% select(pubID, yr_of_study, end_of_study)) %>%
distinct() %>% left_join(KM_Ns) %>%
select(pubID, N, ndeath, yr_of_study, end_of_study, time, Survival) %>%
arrange(yr_of_study)
dat2 <- plyr::ldply(lapply(tst2, empirical_surv),.id='pubID') %>%
left_join(study_info %>% select(pubID, yr_of_study, end_of_study)) %>%
distinct() %>% left_join(KM_Ns) %>%
select(pubID, N, ndeath, yr_of_study, end_of_study, time, Survival) %>%
arrange(yr_of_study)
library(openxlsx)
write.xlsx(list('Developed' = dat1, 'Developing'=dat2),
file = 'docs/Empirical_Survival_from_KM.xlsx')
out <- plyr::ldply(list('Developed countries'=dat1, 'Developing countries'=dat2), .id='Dev') %>%
mutate(Year=factor(paste(time,'year')),
Med = Survival,
yr = yr_of_study) %>% select(Year, Dev, Med,yr)
|
/EmpiricalSurvivalEstimatesKM.R
|
permissive
|
webbedfeet/LupusDeath
|
R
| false
| false
| 1,764
|
r
|
# Empirical survival ------------------------------------------------------
source('lib/reload.R'); reload()
load('data/rda/study_info.rda')
load('data/rda/KM_digitized.rda')
load('data/rda/KM2IPD.rda')
id_developed <- study_info$pubID[study_info$Developed=='Developed']
id_developing <- study_info$pubID[study_info$Developed=='Developing']
tst1 <- KM2IPD[intersect(id_developed, names(KM2IPD))]
tst2 <- KM2IPD[intersect(id_developing, names(KM2IPD))]
empirical_surv <- function(ipd){
s1 <- IPD2Surv(ipd)
tidy(summary(survfit(s1~1),c(5,10,15)))
}
KM_deaths <- tibble(pubID = names(KM2IPD),
ndeath = sapply(KM2IPD, function(ipd) length(ipd$d.times)))
KM_Ns <- KM_full %>% select(ids,N) %>%
group_by(ids) %>% summarise(N = sum(N)) %>% ungroup() %>%
left_join(KM_deaths, c('ids' = 'pubID')) %>%
rename(pubID=ids)
dat1 <- plyr::ldply(lapply(tst1, empirical_surv),.id='pubID') %>%
left_join(study_info %>% select(pubID, yr_of_study, end_of_study)) %>%
distinct() %>% left_join(KM_Ns) %>%
select(pubID, N, ndeath, yr_of_study, end_of_study, time, Survival) %>%
arrange(yr_of_study)
dat2 <- plyr::ldply(lapply(tst2, empirical_surv),.id='pubID') %>%
left_join(study_info %>% select(pubID, yr_of_study, end_of_study)) %>%
distinct() %>% left_join(KM_Ns) %>%
select(pubID, N, ndeath, yr_of_study, end_of_study, time, Survival) %>%
arrange(yr_of_study)
library(openxlsx)
write.xlsx(list('Developed' = dat1, 'Developing'=dat2),
file = 'docs/Empirical_Survival_from_KM.xlsx')
out <- plyr::ldply(list('Developed countries'=dat1, 'Developing countries'=dat2), .id='Dev') %>%
mutate(Year=factor(paste(time,'year')),
Med = Survival,
yr = yr_of_study) %>% select(Year, Dev, Med,yr)
|
##Downloaded files
filename= "exdata_data_household_power_consumption.zip"
if(!file.exists("exdata_data_household_power_consumption")){
unzip(filename)
}
##Read data
data<-read.table("./exdata_data_household_power_consumption/household_power_consumption.txt" ,
header = T,
sep=";" ,
na.strings = "?")
requiredData<- subset(data , Date %in% c("1/2/2007","2/2/2007"))
requiredData$Date <-as.Date(requiredData$Date ,format= "%d/%m/%Y")
dateTime<-paste(requiredData$Date,requiredData$Time)
requiredData$DateTime <-as.POSIXct(dateTime)
##Save as PNG
png("plot2.png",width = 480, height = 480)
##Plot
plot(requiredData$Global_active_power~requiredData$DateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
dev.off()
|
/plot2.R
|
no_license
|
sowmyamohan13/ExData_Plotting1
|
R
| false
| false
| 789
|
r
|
##Downloaded files
filename= "exdata_data_household_power_consumption.zip"
if(!file.exists("exdata_data_household_power_consumption")){
unzip(filename)
}
##Read data
data<-read.table("./exdata_data_household_power_consumption/household_power_consumption.txt" ,
header = T,
sep=";" ,
na.strings = "?")
requiredData<- subset(data , Date %in% c("1/2/2007","2/2/2007"))
requiredData$Date <-as.Date(requiredData$Date ,format= "%d/%m/%Y")
dateTime<-paste(requiredData$Date,requiredData$Time)
requiredData$DateTime <-as.POSIXct(dateTime)
##Save as PNG
png("plot2.png",width = 480, height = 480)
##Plot
plot(requiredData$Global_active_power~requiredData$DateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
dev.off()
|
## Calculate attributable fraction of Hib ##
########################################################################################################
## Prep ##
########################################################################################################
library(metafor)
library(matrixStats)
library(plyr)
library(ggplot2)
library(reshape2)
source("/filepath/get_covariate_estimates.R")
source("/filepath/get_covariate_estimates.R")
source("/filepath/mr_brt_functions.R")
source("/filepath/plot_mr_brt_function.R")
source("/filepath/run_mr_brt_function.R")
hib_vac_data <- read.csv("filepath")
hib_vac_data$ve_invasive <- hib_vac_data$ve_invasive
hib_vac_data$ve_invasive_lower <- hib_vac_data$lower_invasive
hib_vac_data$ve_invasive_upper <- hib_vac_data$upper_invasive
hib_vac_data$ve_invasive_se <- with(hib_vac_data, (ve_invasive_upper - ve_invasive_lower)/2/qnorm(0.975))
hib_vac_data$ve_pneumonia <- hib_vac_data$Vaccine.Efficacy
hib_vac_data$ve_pneumonia_lower <- hib_vac_data$Lower.95.
hib_vac_data$ve_pneumonia_upper <- hib_vac_data$Upper.95.
hib_vac_data$ve_pneumonia_se <- with(hib_vac_data, (ve_pneumonia_upper - ve_pneumonia_lower)/2/qnorm(0.975))
# make in log space
hib_vac_data$ln_vi <- log(1-hib_vac_data$ve_invasive/100)
hib_vac_data$ln_vi_upper <- log(1-hib_vac_data$ve_invasive_upper/100)
hib_vac_data$ln_vi_se <- (hib_vac_data$ln_vi - hib_vac_data$ln_vi_upper)/qnorm(0.975)
hib_vac_data$ln_ve <- log(1-hib_vac_data$ve_pneumonia/100)
hib_vac_data$ln_ve_upper <- log(1-hib_vac_data$ve_pneumonia_upper/100)
hib_vac_data$ln_ve_se <- (hib_vac_data$ln_ve - hib_vac_data$ln_ve_upper)/qnorm(0.975)
#########################################################################################################
## Part 1: Meta-analysis of Hib RCT studies to get a PAF in the absence of vaccine ##
#################################################################################
## keep if studies are RCT
rct <- subset(hib_vac_data, Study.type=="RCT" & is_outlier==0)
## The idea is to run a meta-analysis for studies that report
## the reduction in invasive Hib disease and for
## reduction in all-cause pneumonia. The ratio of these two
## values is the attributable fraction for Hib in the
## absence of the vaccine.
paf_table <- rct[,c("first","iso3","ve_invasive","ve_invasive_se","ve_pneumonia","ve_pneumonia_se","ln_vi","ln_vi_se","ln_ve","ln_ve_se")]
pneumo_draws <- c()
invasive_draws <- c()
for(i in 1:1000){
draw_pneumo <- rnorm(n=length(rct$source), mean=rct$ve_pneumonia, sd=rct$ve_pneumonia_se)
draw_hib <- rnorm(n=length(rct$source), mean=rct$ve_invasive, sd=rct$ve_invasive_se)
draw <- draw_pneumo / draw_hib
draw <- ifelse(draw > 1, 0.995, draw)
paf_table[,paste0("draw_",i)] <- draw
}
## Do this in log-space since the VE estimates are log-normal
paf_table$mean <- rowMeans(paf_table[,11:1010])
paf_table$std <- apply(paf_table[,11:1010], 1, sd)
stds <- rowSds(as.matrix(paf_table[,11:1010]))
paf_table[,c("first","iso3","ve_invasive","ve_invasive_se","ve_pneumonia","ve_pneumonia_se","mean","std")]
## Check the uncertainty using variance of ratios ##
paf_table$se_ratio <- with(paf_table,
sqrt(ve_pneumonia^2 / ve_invasive^2 *
(ve_pneumonia_se^2 / ve_pneumonia^2 + ve_invasive_se^2/ve_invasive^2)))
meta <- rma(mean, sei=se_ratio, data=paf_table, method="DL")
summary(meta)
forest(meta, slab=paf_table$iso3)
## Run meta-analysis ##
meta <- rma(mean, sei=std, data=paf_table, method="DL")
summary(meta)
forest(meta, slab=paf_table$iso3)
## Test this in MR-BRT ##
fit1 <- run_mr_brt(
output_dir = "filepath",
model_label = "hib_paf",
data = paf_table[!is.na(paf_table$mean),c("mean","std","iso3")],
mean_var = "mean",
se_var = "std",
overwrite_previous = TRUE
)
df_pred <- data.frame(intercept = 1)
pred1 <- predict_mr_brt(fit1, newdata = df_pred)
pred_object <- load_mr_brt_preds(pred1)
preds <- pred_object$model_summaries
# Calculate se
preds$se <- (preds$Y_mean_hi - preds$Y_mean_lo) / 2 / qnorm(0.975)
mod_data <- fit1$train_data
## Create essentially a forest plot
f <- ggplot(mod_data, aes(ymax=mean + std*1.96, ymin=mean - std*1.96)) + geom_point(aes(y=mean, x=iso3)) +
geom_errorbar(aes(x=iso3), width=0) +
theme_bw() + ylab("PAF") + xlab("") + coord_flip() +
ggtitle(paste0("Modeled Attributable Fraction: ", round(preds$Y_mean,2), " (", round(preds$Y_mean_lo,2),"-",round(preds$Y_mean_hi,2),")")) +
geom_hline(yintercept=0) + geom_hline(yintercept=preds$Y_mean, col="purple") +
geom_rect(data=preds, aes(ymin=Y_mean_lo, ymax=Y_mean_hi, xmin=0, xmax=length(mod_data$iso3)+1), alpha=0.1, fill="purple")
print(f)
## Calculate Hib paf draws ##
hib_paf <- data.frame(modelable_entity="lri_hib", rei_id=189)
for(i in 1:1000){
hib_paf[,paste0("draw_",i)] <- rnorm(n=1, mean=preds$Y_mean, sd=preds$se)
}
## We have Hib PAF in the absence of vaccine ##
write.csv(hib_paf, "filepath", row.names=F)
## Check for values outside of range (negative or positive)
draws <- hib_paf[,3:1002]
range(draws)
hist(as.numeric(draws))
quantile(draws, 0.025)
################################################################################################################
## Part 2: Use Hib vaccine coverage to determine the Hib PAF in the presence of vaccine ##
################################################################################################################
## Previous GBD rounds used a meta-analysis (Swingler et al https://www.ncbi.nlm.nih.gov/pubmed/17443509)
## to get an estimate of the vaccine efficacy against invasive Hib disease.
## Three problems with this. The first is that they didn't use one of our RCTs (Gessner 2005).
## The second is that previous GBDs didn't incorporate uncertainty
## The third is that we have a better estimate of this value.
## Need to track the differences between previous approach (0.8, no uncertainty),
## incorporation of Swingler uncertainty (0.46-0.97),
## and estimation using our data.
### use the value produced by the meta-analysis of Hib data ###
efficacy_default <- 0.91
sys_efficacy <- data.frame(mean=0.8, lower=0.46, upper=0.97, ln_mean=log(1-0.8), ln_upper=log(1-0.46))
sys_efficacy$ln_se <- (sys_efficacy$ln_upper - sys_efficacy$ln_mean)/qnorm(0.975)
meta_efficacy <- data.frame(mean = 0.91, lower=0.77, upper=0.96, ln_mean=-2.3835, ln_se = 0.4611)
meta.invasive <- rma(ln_vi, sei=ln_vi_se, data=rct, method="DL")
summary(meta.invasive)
## Check that value makes sense
1-exp(meta.invasive$b)
rct[,c("ve_invasive")]
## Calculate Hib VE invasive draws ##
hib_invasive <- data.frame(type=c("single_value","sys_review","meta_analysis"))
for(i in 1:1000){
hib_invasive[2,paste0("draw_",i)] <- 1-exp(rnorm(n=1, mean=sys_efficacy$ln_mean, sd=sys_efficacy$ln_se))
hib_invasive[3,paste0("draw_",i)] <- 1-exp(rnorm(n=1, mean=meta.invasive$b, sd=meta.invasive$se))
hib_invasive[1,paste0("draw_",i)] <- efficacy_default
}
# Make a histogram to compare
hib_melt <- melt(hib_invasive[2:3,])
ggplot(hib_melt, aes(x=value, fill=type)) + geom_histogram(col="black") + theme_bw() + geom_vline(xintercept=0.8, lwd=2, col="darkblue") +
geom_vline(xintercept=1-exp(meta.invasive$b), col="darkred", lwd=2)
## Pull in the Hib vaccine coverage for every GBD location and year ##
hib_cov <- get_covariate_estimates(covariate_id=47, location_id="all", year_id=1990:2019, decomp_step="step4")
## Create draws for vaccine coverage ##
library(boot)
# logit transform the vaccine coverage
hib_cov$logit_mean <- logit(hib_cov$mean_value)
hib_cov$logit_upper <- logit(hib_cov$upper_value)
hib_cov$logit_se <- (hib_cov$logit_upper - hib_cov$logit_mean) / qnorm(0.975)
hib_c <- hib_cov
for(i in 1:1000){
draw <- inv.logit(rnorm(n=length(hib_cov$mean_value), mean=hib_cov$logit_mean, sd=hib_cov$logit_se))
draw <- ifelse(draw=="NaN",0,draw)
hib_c[,paste0("coverage_",i)] <- draw
}
## Save this for later ##
write.csv(hib_c, "filepath", row.names=F)
## To be able to test this ##
is_test <- FALSE
if(is_test==TRUE){
est_df <- data.frame(subset(hib_c, year_id==2017))
} else{
est_df <- data.frame(hib_c)
}
## everything is prepped ##
## Now, calculate the Hib PAF in the presence of vaccine ##
est_df$mean_coverage <- est_df$mean_value
hib_df <- est_df[,c("location_id","location_name","year_id","sex_id","age_group_id","mean_coverage")]
## The loop creates the values from GBD 2017 but also calculates alternatives, summarizes, and saves for comparison ##
# Equation is PAFBase * (1-Cov*VE) / (1 - PAFBase * Cov * VE)
paf_sys <- data.frame(location_name=hib_df$location_name)
paf_meta <- data.frame(location_name=hib_df$location_name)
for(i in 1:1000){
p <- hib_paf[,paste0("draw_",i)]
cov <- est_df[,paste0("coverage_",i)]
ve <- hib_invasive[,paste0("draw_",i)]
gbd <- p * (1 - cov * ve[1]) / (1 - p * cov * ve[1])
hib_df[,paste0("draw_",i)] <- gbd
if(is_test==TRUE){
sys <- p * (1 - cov * ve[2]) / (1 - p * cov * ve[2])
meta <- p * (1 - cov * ve[3]) / (1 - p * cov * ve[3])
paf_sys[,paste0("draw_",i)] <- sys
paf_meta[,paste0("draw_",i)] <- meta
}
}
hib_df$mean_paf <- rowMeans(hib_df[,7:1006])
hib_df$std_paf <- apply(hib_df[,7:1006], 1, sd)
hib_df$paf_lower <- apply(hib_df[,7:1006], 1, function(x) quantile(x, 0.025))
hib_df$paf_upper <- apply(hib_df[,7:1006], 1, function(x) quantile(x, 0.975))
if(is_test==TRUE){
hib_df$mean_sys <- rowMeans(paf_sys[,2:1001])
hib_df$sys_lower <- apply(paf_sys[,2:1001], 1, function(x) quantile(x, 0.025))
hib_df$sys_upper <- apply(paf_sys[,2:1001], 1, function(x) quantile(x, 0.975))
hib_df$mean_meta <- rowMeans(paf_meta[,2:1001])
hib_df$meta_lower <- apply(paf_meta[,2:1001], 1, function(x) quantile(x, 0.025))
hib_df$meta_upper <- apply(paf_meta[,2:1001], 1, function(x) quantile(x, 0.975))
}
## Tada!! ##
write.csv(hib_df, "filepath", row.names=F)
## Summarize Hib PAFs for use in pcv analysis ##
hib_trunc <- hib_df[,c("location_id","year_id","mean_paf","std_paf","paf_lower","paf_upper")]
colnames(hib_trunc) <- c("location_id","year_id","hib_paf","hib_paf_std","hib_paf_lower","hib_paf_upper")
write.csv(hib_trunc, "filepath", row.names=F)
################################################################################################################
## Great, now produce some diagnostic plots! ##
ggplot(data=hib_df, aes(x=mean_coverage, y=mean_paf, ymin=paf_lower, ymax=paf_upper)) + geom_errorbar(width=0) + geom_point() +
theme_bw() + ggtitle("Approach used in GBD 2017") + stat_smooth(method="loess", col="purple")
ggplot(data=hib_df, aes(x=mean_coverage, y=mean_sys, ymin=sys_lower, ymax=sys_upper)) + geom_errorbar(width=0) + geom_point() +
theme_bw() + ggtitle("Approach using Systematic Review Uncertainty") + stat_smooth(method="loess", col="darkblue")
ggplot(data=hib_df, aes(x=mean_coverage, y=mean_meta, ymin=meta_lower, ymax=meta_upper)) + geom_errorbar(width=0) + geom_point() +
theme_bw() + ggtitle("Approach using Meta-analysis Uncertainty") + stat_smooth(method="loess", col="darkred")
ggplot(hib_df, aes(x=mean_coverage)) + geom_errorbar(aes(ymin=paf_lower, ymax=paf_upper), col="purple", alpha=0.25) +
geom_errorbar(aes(ymin=sys_lower, ymax=sys_upper), col="darkblue", alpha=0.25) +
geom_errorbar(aes(ymin=meta_lower, ymax=meta_upper), col="darkred", alpha=0.25) + theme_bw()
diag_df <- melt(hib_df[,c("mean_coverage","mean_paf","mean_sys",
"mean_meta")], id.vars="mean_coverage")
ggplot(diag_df, aes(x=mean_coverage, y=value, col=variable)) + geom_line() + theme_bw()
diag_df <- melt(data.frame(mean_coverage=hib_df$mean_coverage, range_paf = hib_df$paf_upper - hib_df$paf_lower,
range_sys = hib_df$sys_upper - hib_df$sys_lower,
range_meta = hib_df$meta_upper - hib_df$meta_lower), id.vars="mean_coverage")
ggplot(diag_df, aes(x=mean_coverage, y=value, col=variable)) + geom_line() + theme_bw()
|
/gbd_2019/nonfatal_code/lri/Etiologies/Hib/calculate_hib_paf.R
|
no_license
|
Nermin-Ghith/ihme-modeling
|
R
| false
| false
| 12,225
|
r
|
## Calculate attributable fraction of Hib ##
########################################################################################################
## Prep ##
########################################################################################################
library(metafor)
library(matrixStats)
library(plyr)
library(ggplot2)
library(reshape2)
source("/filepath/get_covariate_estimates.R")
source("/filepath/get_covariate_estimates.R")
source("/filepath/mr_brt_functions.R")
source("/filepath/plot_mr_brt_function.R")
source("/filepath/run_mr_brt_function.R")
hib_vac_data <- read.csv("filepath")
hib_vac_data$ve_invasive <- hib_vac_data$ve_invasive
hib_vac_data$ve_invasive_lower <- hib_vac_data$lower_invasive
hib_vac_data$ve_invasive_upper <- hib_vac_data$upper_invasive
hib_vac_data$ve_invasive_se <- with(hib_vac_data, (ve_invasive_upper - ve_invasive_lower)/2/qnorm(0.975))
hib_vac_data$ve_pneumonia <- hib_vac_data$Vaccine.Efficacy
hib_vac_data$ve_pneumonia_lower <- hib_vac_data$Lower.95.
hib_vac_data$ve_pneumonia_upper <- hib_vac_data$Upper.95.
hib_vac_data$ve_pneumonia_se <- with(hib_vac_data, (ve_pneumonia_upper - ve_pneumonia_lower)/2/qnorm(0.975))
# make in log space
hib_vac_data$ln_vi <- log(1-hib_vac_data$ve_invasive/100)
hib_vac_data$ln_vi_upper <- log(1-hib_vac_data$ve_invasive_upper/100)
hib_vac_data$ln_vi_se <- (hib_vac_data$ln_vi - hib_vac_data$ln_vi_upper)/qnorm(0.975)
hib_vac_data$ln_ve <- log(1-hib_vac_data$ve_pneumonia/100)
hib_vac_data$ln_ve_upper <- log(1-hib_vac_data$ve_pneumonia_upper/100)
hib_vac_data$ln_ve_se <- (hib_vac_data$ln_ve - hib_vac_data$ln_ve_upper)/qnorm(0.975)
#########################################################################################################
## Part 1: Meta-analysis of Hib RCT studies to get a PAF in the absence of vaccine ##
#################################################################################
## keep if studies are RCT
rct <- subset(hib_vac_data, Study.type=="RCT" & is_outlier==0)
## The idea is to run a meta-analysis for studies that report
## the reduction in invasive Hib disease and for
## reduction in all-cause pneumonia. The ratio of these two
## values is the attributable fraction for Hib in the
## absence of the vaccine.
paf_table <- rct[,c("first","iso3","ve_invasive","ve_invasive_se","ve_pneumonia","ve_pneumonia_se","ln_vi","ln_vi_se","ln_ve","ln_ve_se")]
pneumo_draws <- c()
invasive_draws <- c()
for(i in 1:1000){
draw_pneumo <- rnorm(n=length(rct$source), mean=rct$ve_pneumonia, sd=rct$ve_pneumonia_se)
draw_hib <- rnorm(n=length(rct$source), mean=rct$ve_invasive, sd=rct$ve_invasive_se)
draw <- draw_pneumo / draw_hib
draw <- ifelse(draw > 1, 0.995, draw)
paf_table[,paste0("draw_",i)] <- draw
}
## Do this in log-space since the VE estimates are log-normal
paf_table$mean <- rowMeans(paf_table[,11:1010])
paf_table$std <- apply(paf_table[,11:1010], 1, sd)
stds <- rowSds(as.matrix(paf_table[,11:1010]))
paf_table[,c("first","iso3","ve_invasive","ve_invasive_se","ve_pneumonia","ve_pneumonia_se","mean","std")]
## Check the uncertainty using variance of ratios ##
paf_table$se_ratio <- with(paf_table,
sqrt(ve_pneumonia^2 / ve_invasive^2 *
(ve_pneumonia_se^2 / ve_pneumonia^2 + ve_invasive_se^2/ve_invasive^2)))
meta <- rma(mean, sei=se_ratio, data=paf_table, method="DL")
summary(meta)
forest(meta, slab=paf_table$iso3)
## Run meta-analysis ##
meta <- rma(mean, sei=std, data=paf_table, method="DL")
summary(meta)
forest(meta, slab=paf_table$iso3)
## Test this in MR-BRT ##
fit1 <- run_mr_brt(
output_dir = "filepath",
model_label = "hib_paf",
data = paf_table[!is.na(paf_table$mean),c("mean","std","iso3")],
mean_var = "mean",
se_var = "std",
overwrite_previous = TRUE
)
df_pred <- data.frame(intercept = 1)
pred1 <- predict_mr_brt(fit1, newdata = df_pred)
pred_object <- load_mr_brt_preds(pred1)
preds <- pred_object$model_summaries
# Calculate se
preds$se <- (preds$Y_mean_hi - preds$Y_mean_lo) / 2 / qnorm(0.975)
mod_data <- fit1$train_data
## Create essentially a forest plot
f <- ggplot(mod_data, aes(ymax=mean + std*1.96, ymin=mean - std*1.96)) + geom_point(aes(y=mean, x=iso3)) +
geom_errorbar(aes(x=iso3), width=0) +
theme_bw() + ylab("PAF") + xlab("") + coord_flip() +
ggtitle(paste0("Modeled Attributable Fraction: ", round(preds$Y_mean,2), " (", round(preds$Y_mean_lo,2),"-",round(preds$Y_mean_hi,2),")")) +
geom_hline(yintercept=0) + geom_hline(yintercept=preds$Y_mean, col="purple") +
geom_rect(data=preds, aes(ymin=Y_mean_lo, ymax=Y_mean_hi, xmin=0, xmax=length(mod_data$iso3)+1), alpha=0.1, fill="purple")
print(f)
## Calculate Hib paf draws ##
hib_paf <- data.frame(modelable_entity="lri_hib", rei_id=189)
for(i in 1:1000){
hib_paf[,paste0("draw_",i)] <- rnorm(n=1, mean=preds$Y_mean, sd=preds$se)
}
## We have Hib PAF in the absence of vaccine ##
write.csv(hib_paf, "filepath", row.names=F)
## Check for values outside of range (negative or positive)
draws <- hib_paf[,3:1002]
range(draws)
hist(as.numeric(draws))
quantile(draws, 0.025)
################################################################################################################
## Part 2: Use Hib vaccine coverage to determine the Hib PAF in the presence of vaccine ##
################################################################################################################
## Previous GBD rounds used a meta-analysis (Swingler et al https://www.ncbi.nlm.nih.gov/pubmed/17443509)
## to get an estimate of the vaccine efficacy against invasive Hib disease.
## Three problems with this. The first is that they didn't use one of our RCTs (Gessner 2005).
## The second is that previous GBDs didn't incorporate uncertainty
## The third is that we have a better estimate of this value.
## Need to track the differences between previous approach (0.8, no uncertainty),
## incorporation of Swingler uncertainty (0.46-0.97),
## and estimation using our data.
### use the value produced by the meta-analysis of Hib data ###
efficacy_default <- 0.91
sys_efficacy <- data.frame(mean=0.8, lower=0.46, upper=0.97, ln_mean=log(1-0.8), ln_upper=log(1-0.46))
sys_efficacy$ln_se <- (sys_efficacy$ln_upper - sys_efficacy$ln_mean)/qnorm(0.975)
meta_efficacy <- data.frame(mean = 0.91, lower=0.77, upper=0.96, ln_mean=-2.3835, ln_se = 0.4611)
meta.invasive <- rma(ln_vi, sei=ln_vi_se, data=rct, method="DL")
summary(meta.invasive)
## Check that value makes sense
1-exp(meta.invasive$b)
rct[,c("ve_invasive")]
## Calculate Hib VE invasive draws ##
hib_invasive <- data.frame(type=c("single_value","sys_review","meta_analysis"))
for(i in 1:1000){
hib_invasive[2,paste0("draw_",i)] <- 1-exp(rnorm(n=1, mean=sys_efficacy$ln_mean, sd=sys_efficacy$ln_se))
hib_invasive[3,paste0("draw_",i)] <- 1-exp(rnorm(n=1, mean=meta.invasive$b, sd=meta.invasive$se))
hib_invasive[1,paste0("draw_",i)] <- efficacy_default
}
# Make a histogram to compare
hib_melt <- melt(hib_invasive[2:3,])
ggplot(hib_melt, aes(x=value, fill=type)) + geom_histogram(col="black") + theme_bw() + geom_vline(xintercept=0.8, lwd=2, col="darkblue") +
geom_vline(xintercept=1-exp(meta.invasive$b), col="darkred", lwd=2)
## Pull in the Hib vaccine coverage for every GBD location and year ##
hib_cov <- get_covariate_estimates(covariate_id=47, location_id="all", year_id=1990:2019, decomp_step="step4")
## Create draws for vaccine coverage ##
library(boot)
# logit transform the vaccine coverage
hib_cov$logit_mean <- logit(hib_cov$mean_value)
hib_cov$logit_upper <- logit(hib_cov$upper_value)
hib_cov$logit_se <- (hib_cov$logit_upper - hib_cov$logit_mean) / qnorm(0.975)
hib_c <- hib_cov
for(i in 1:1000){
draw <- inv.logit(rnorm(n=length(hib_cov$mean_value), mean=hib_cov$logit_mean, sd=hib_cov$logit_se))
draw <- ifelse(draw=="NaN",0,draw)
hib_c[,paste0("coverage_",i)] <- draw
}
## Save this for later ##
write.csv(hib_c, "filepath", row.names=F)
## To be able to test this ##
is_test <- FALSE
if(is_test==TRUE){
est_df <- data.frame(subset(hib_c, year_id==2017))
} else{
est_df <- data.frame(hib_c)
}
## everything is prepped ##
## Now, calculate the Hib PAF in the presence of vaccine ##
est_df$mean_coverage <- est_df$mean_value
hib_df <- est_df[,c("location_id","location_name","year_id","sex_id","age_group_id","mean_coverage")]
## The loop creates the values from GBD 2017 but also calculates alternatives, summarizes, and saves for comparison ##
# Equation is PAFBase * (1-Cov*VE) / (1 - PAFBase * Cov * VE)
paf_sys <- data.frame(location_name=hib_df$location_name)
paf_meta <- data.frame(location_name=hib_df$location_name)
for(i in 1:1000){
p <- hib_paf[,paste0("draw_",i)]
cov <- est_df[,paste0("coverage_",i)]
ve <- hib_invasive[,paste0("draw_",i)]
gbd <- p * (1 - cov * ve[1]) / (1 - p * cov * ve[1])
hib_df[,paste0("draw_",i)] <- gbd
if(is_test==TRUE){
sys <- p * (1 - cov * ve[2]) / (1 - p * cov * ve[2])
meta <- p * (1 - cov * ve[3]) / (1 - p * cov * ve[3])
paf_sys[,paste0("draw_",i)] <- sys
paf_meta[,paste0("draw_",i)] <- meta
}
}
hib_df$mean_paf <- rowMeans(hib_df[,7:1006])
hib_df$std_paf <- apply(hib_df[,7:1006], 1, sd)
hib_df$paf_lower <- apply(hib_df[,7:1006], 1, function(x) quantile(x, 0.025))
hib_df$paf_upper <- apply(hib_df[,7:1006], 1, function(x) quantile(x, 0.975))
if(is_test==TRUE){
hib_df$mean_sys <- rowMeans(paf_sys[,2:1001])
hib_df$sys_lower <- apply(paf_sys[,2:1001], 1, function(x) quantile(x, 0.025))
hib_df$sys_upper <- apply(paf_sys[,2:1001], 1, function(x) quantile(x, 0.975))
hib_df$mean_meta <- rowMeans(paf_meta[,2:1001])
hib_df$meta_lower <- apply(paf_meta[,2:1001], 1, function(x) quantile(x, 0.025))
hib_df$meta_upper <- apply(paf_meta[,2:1001], 1, function(x) quantile(x, 0.975))
}
## Tada!! ##
write.csv(hib_df, "filepath", row.names=F)
## Summarize Hib PAFs for use in pcv analysis ##
hib_trunc <- hib_df[,c("location_id","year_id","mean_paf","std_paf","paf_lower","paf_upper")]
colnames(hib_trunc) <- c("location_id","year_id","hib_paf","hib_paf_std","hib_paf_lower","hib_paf_upper")
write.csv(hib_trunc, "filepath", row.names=F)
################################################################################################################
## Great, now produce some diagnostic plots! ##
ggplot(data=hib_df, aes(x=mean_coverage, y=mean_paf, ymin=paf_lower, ymax=paf_upper)) + geom_errorbar(width=0) + geom_point() +
theme_bw() + ggtitle("Approach used in GBD 2017") + stat_smooth(method="loess", col="purple")
ggplot(data=hib_df, aes(x=mean_coverage, y=mean_sys, ymin=sys_lower, ymax=sys_upper)) + geom_errorbar(width=0) + geom_point() +
theme_bw() + ggtitle("Approach using Systematic Review Uncertainty") + stat_smooth(method="loess", col="darkblue")
ggplot(data=hib_df, aes(x=mean_coverage, y=mean_meta, ymin=meta_lower, ymax=meta_upper)) + geom_errorbar(width=0) + geom_point() +
theme_bw() + ggtitle("Approach using Meta-analysis Uncertainty") + stat_smooth(method="loess", col="darkred")
ggplot(hib_df, aes(x=mean_coverage)) + geom_errorbar(aes(ymin=paf_lower, ymax=paf_upper), col="purple", alpha=0.25) +
geom_errorbar(aes(ymin=sys_lower, ymax=sys_upper), col="darkblue", alpha=0.25) +
geom_errorbar(aes(ymin=meta_lower, ymax=meta_upper), col="darkred", alpha=0.25) + theme_bw()
diag_df <- melt(hib_df[,c("mean_coverage","mean_paf","mean_sys",
"mean_meta")], id.vars="mean_coverage")
ggplot(diag_df, aes(x=mean_coverage, y=value, col=variable)) + geom_line() + theme_bw()
diag_df <- melt(data.frame(mean_coverage=hib_df$mean_coverage, range_paf = hib_df$paf_upper - hib_df$paf_lower,
range_sys = hib_df$sys_upper - hib_df$sys_lower,
range_meta = hib_df$meta_upper - hib_df$meta_lower), id.vars="mean_coverage")
ggplot(diag_df, aes(x=mean_coverage, y=value, col=variable)) + geom_line() + theme_bw()
|
#' Set up a \pkg{basilisk} environments
#'
#' Set up a Python conda environment for isolated execution of Python code with appropriate versions of all Python packages.
#'
#' @param envpath String containing the path to the environment to use.
#' @param packages Character vector containing the names of conda packages to install into the environment.
#' Version numbers must be included.
#'
#' @return
#' A conda environment is created at \code{envpath} containing the specified \code{packages}.
#' The function will return a logical scalar indicating whether creation was performed,
#' which will be \code{FALSE} if the environment already exists.
#'
#' @details
#' \pkg{basilisk} environments are simply Python conda environments that are created and managed by \pkg{basilisk}.
#' Each \pkg{basilisk} environment can contain different Python packages with different versions,
#' allowing us to avoid version conflicts within an R session when different Bioconductor packages (or even different functions within a single package) require incompatible versions of Python packages.
#'
#' Developers of client packages should never need to call this function directly.
#' For typical usage, \code{setupBasiliskEnv} is automatically called by \code{\link{basiliskStart}} to perform lazy installation.
#' Developers should also create \code{configure(.win)} files to call \code{\link{configureBasiliskEnv}},
#' which will call \code{setupBasiliskEnv} during R package installation when \code{BASILISK_USE_SYSTEM_DIR} is set.
#'
#' Pinned version numbers must be present for all requested conda packages in \code{packages}.
#' This improved predictability makes debugging much easier when the R package is installed and executed on different systems.
#' Note that this refers to conda packages, not Python packages, where the version notation for the former uses a single \code{=};
#' any \code{==} will be coerced to \code{=} automatically.
#'
#' It is also good practice to explicitly list the dependencies of all desired packages.
#' This protects against future changes in the behavior of your code if conda's dependency resolver defaults to a different version of a required package.
#' We suggest using \code{conda env export} to identify relevant dependencies and include them in \code{packages};
#' the only reason that pinned dependencies are not mandatory is because some dependencies are OS-specific,
#' requiring some manual pruning of the output of \code{conda env export}.
#'
#' If a \pkg{basilisk} environment is already present at \code{envpath}, \code{setupBasiliskEnv} is a no-op.
#' This ensures that the function only installs the packages once.
#'
#' @examples
#' tmploc <- file.path(tempdir(), "my_package_A")
#' setupBasiliskEnv(tmploc, c('pandas=0.25.3',
#' "python-dateutil=2.8.1", "pytz=2019.3"))
#'
#' @seealso
#' \code{\link{listCorePackages}}, for a list of core Python packages with pinned versions.
#'
#' @export
#' @importFrom basilisk.utils getBasiliskDir installAnaconda
#' @importFrom reticulate conda_install
setupBasiliskEnv <- function(envpath, packages) {
if (file.exists(envpath)) {
return(FALSE)
}
packages <- sub("==", "=", packages)
if (any(failed <- !grepl("=", packages))) {
stop(paste("versions must be explicitly specified for",
paste(sprintf("'%s'", packages[failed]), collapse=", ")))
}
installAnaconda() # no-ops if it's already there.
# Unsetting this variable, otherwise it seems to override the python=
# argument in virtualenv_create() (see LTLA/basilisk#1).
old.retpy <- Sys.getenv("RETICULATE_PYTHON")
if (old.retpy!="") {
Sys.unsetenv("RETICULATE_PYTHON")
on.exit(Sys.setenv(RETICULATE_PYTHON=old.retpy), add=TRUE)
}
old.pypath <- Sys.getenv("PYTHONPATH")
if (old.pypath!="") {
Sys.unsetenv("PYTHONPATH")
on.exit(Sys.setenv(PYTHONPATH=old.pypath), add=TRUE)
}
base.dir <- getBasiliskDir()
conda.cmd <- file.path(base.dir, .retrieve_conda())
py.cmd <- .get_py_cmd(base.dir)
version <- sub("^Python ", "", system2(py.cmd, "--version", stdout=TRUE))
# We ensure that it exists and installs to the specified location,
# rather than being tucked away in Anaconda's 'envs' directory.
dir.create(envpath, recursive=TRUE)
conda_install(envname=normalizePath(envpath), conda=conda.cmd,
python_version=version, packages=packages)
TRUE
}
.basilisk_freeze <- function(py.cmd) {
# Unsetting the PYTHONPATH to avoid freezing other versions.
old.pypath <- Sys.getenv("PYTHONPATH")
if (old.pypath!="") {
Sys.unsetenv("PYTHONPATH")
on.exit(Sys.setenv(PYTHONPATH=old.pypath), add=TRUE)
}
system2(py.cmd, c("-m", "pip", "freeze"), stdout=TRUE)
}
|
/R/setupBasiliskEnv.R
|
no_license
|
kieranrcampbell/basilisk
|
R
| false
| false
| 4,800
|
r
|
#' Set up a \pkg{basilisk} environments
#'
#' Set up a Python conda environment for isolated execution of Python code with appropriate versions of all Python packages.
#'
#' @param envpath String containing the path to the environment to use.
#' @param packages Character vector containing the names of conda packages to install into the environment.
#' Version numbers must be included.
#'
#' @return
#' A conda environment is created at \code{envpath} containing the specified \code{packages}.
#' The function will return a logical scalar indicating whether creation was performed,
#' which will be \code{FALSE} if the environment already exists.
#'
#' @details
#' \pkg{basilisk} environments are simply Python conda environments that are created and managed by \pkg{basilisk}.
#' Each \pkg{basilisk} environment can contain different Python packages with different versions,
#' allowing us to avoid version conflicts within an R session when different Bioconductor packages (or even different functions within a single package) require incompatible versions of Python packages.
#'
#' Developers of client packages should never need to call this function directly.
#' For typical usage, \code{setupBasiliskEnv} is automatically called by \code{\link{basiliskStart}} to perform lazy installation.
#' Developers should also create \code{configure(.win)} files to call \code{\link{configureBasiliskEnv}},
#' which will call \code{setupBasiliskEnv} during R package installation when \code{BASILISK_USE_SYSTEM_DIR} is set.
#'
#' Pinned version numbers must be present for all requested conda packages in \code{packages}.
#' This improved predictability makes debugging much easier when the R package is installed and executed on different systems.
#' Note that this refers to conda packages, not Python packages, where the version notation for the former uses a single \code{=};
#' any \code{==} will be coerced to \code{=} automatically.
#'
#' It is also good practice to explicitly list the dependencies of all desired packages.
#' This protects against future changes in the behavior of your code if conda's dependency resolver defaults to a different version of a required package.
#' We suggest using \code{conda env export} to identify relevant dependencies and include them in \code{packages};
#' the only reason that pinned dependencies are not mandatory is because some dependencies are OS-specific,
#' requiring some manual pruning of the output of \code{conda env export}.
#'
#' If a \pkg{basilisk} environment is already present at \code{envpath}, \code{setupBasiliskEnv} is a no-op.
#' This ensures that the function only installs the packages once.
#'
#' @examples
#' tmploc <- file.path(tempdir(), "my_package_A")
#' setupBasiliskEnv(tmploc, c('pandas=0.25.3',
#' "python-dateutil=2.8.1", "pytz=2019.3"))
#'
#' @seealso
#' \code{\link{listCorePackages}}, for a list of core Python packages with pinned versions.
#'
#' @export
#' @importFrom basilisk.utils getBasiliskDir installAnaconda
#' @importFrom reticulate conda_install
setupBasiliskEnv <- function(envpath, packages) {
if (file.exists(envpath)) {
return(FALSE)
}
packages <- sub("==", "=", packages)
if (any(failed <- !grepl("=", packages))) {
stop(paste("versions must be explicitly specified for",
paste(sprintf("'%s'", packages[failed]), collapse=", ")))
}
installAnaconda() # no-ops if it's already there.
# Unsetting this variable, otherwise it seems to override the python=
# argument in virtualenv_create() (see LTLA/basilisk#1).
old.retpy <- Sys.getenv("RETICULATE_PYTHON")
if (old.retpy!="") {
Sys.unsetenv("RETICULATE_PYTHON")
on.exit(Sys.setenv(RETICULATE_PYTHON=old.retpy), add=TRUE)
}
old.pypath <- Sys.getenv("PYTHONPATH")
if (old.pypath!="") {
Sys.unsetenv("PYTHONPATH")
on.exit(Sys.setenv(PYTHONPATH=old.pypath), add=TRUE)
}
base.dir <- getBasiliskDir()
conda.cmd <- file.path(base.dir, .retrieve_conda())
py.cmd <- .get_py_cmd(base.dir)
version <- sub("^Python ", "", system2(py.cmd, "--version", stdout=TRUE))
# We ensure that it exists and installs to the specified location,
# rather than being tucked away in Anaconda's 'envs' directory.
dir.create(envpath, recursive=TRUE)
conda_install(envname=normalizePath(envpath), conda=conda.cmd,
python_version=version, packages=packages)
TRUE
}
.basilisk_freeze <- function(py.cmd) {
# Unsetting the PYTHONPATH to avoid freezing other versions.
old.pypath <- Sys.getenv("PYTHONPATH")
if (old.pypath!="") {
Sys.unsetenv("PYTHONPATH")
on.exit(Sys.setenv(PYTHONPATH=old.pypath), add=TRUE)
}
system2(py.cmd, c("-m", "pip", "freeze"), stdout=TRUE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/user_predict.R
\name{user_predict}
\alias{user_predict}
\title{Generate Predictions for 1 User}
\usage{
user_predict(user, user_similarities, ratings_wide, predict_n)
}
\arguments{
\item{user}{An existing or new user denoted by their unique User.ID}
\item{user_similarities}{A wide matrix that contains cosine similarity scores
between each User and Item combination. A cosine similarity score of 1
indicates that Users are similar to each other and a cosine similarity
score of 0 indicates that Users are dissimilar (different from each other)
with numbers between 0 and 1 depicting relative similarity of Users.}
\item{ratings_wide}{A wide matrix which contains unique User ID's as row
names and unique Item ID's as column names with ratings as the values in
the matrix.}
\item{predict_n}{The number of predictions to generate for the specified
user.}
}
\value{
The output dataset is a wide matrix that contains cosine similarity
scores between each User and Item combination. A cosine similarity score of
1 indicates that Users are similar to each other and a cosine similarity
score of 0 indicates that Users are dissimilar (different from each other)
with numbers between 0 and 1 depicting relative similarity of Users.
}
\description{
Generate Predictions for 1 User
}
|
/cf/man/user_predict.Rd
|
no_license
|
apentz/assign1
|
R
| false
| true
| 1,364
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/user_predict.R
\name{user_predict}
\alias{user_predict}
\title{Generate Predictions for 1 User}
\usage{
user_predict(user, user_similarities, ratings_wide, predict_n)
}
\arguments{
\item{user}{An existing or new user denoted by their unique User.ID}
\item{user_similarities}{A wide matrix that contains cosine similarity scores
between each User and Item combination. A cosine similarity score of 1
indicates that Users are similar to each other and a cosine similarity
score of 0 indicates that Users are dissimilar (different from each other)
with numbers between 0 and 1 depicting relative similarity of Users.}
\item{ratings_wide}{A wide matrix which contains unique User ID's as row
names and unique Item ID's as column names with ratings as the values in
the matrix.}
\item{predict_n}{The number of predictions to generate for the specified
user.}
}
\value{
The output dataset is a wide matrix that contains cosine similarity
scores between each User and Item combination. A cosine similarity score of
1 indicates that Users are similar to each other and a cosine similarity
score of 0 indicates that Users are dissimilar (different from each other)
with numbers between 0 and 1 depicting relative similarity of Users.
}
\description{
Generate Predictions for 1 User
}
|
library(seqinr)
library(stringr)
library(readr)
peptide_df_gen <- function(protein_fasta, peptide_file, group = 'default'){
protein_seq_list <- read.fasta(protein_fasta, seqtype = 'AA', seqonly = TRUE)
protein_seq <- protein_seq_list[[1]]
peptides <- read_csv(peptide_file,
col_names = FALSE)
return_df <- data.frame(peptide_seq=character(),
position=integer())
for (i in 1:nrow(peptides)){
temp_peptide_df <- single_peptide_df(peptides$X1[i], protein_seq)
return_df <- bind_rows(return_df, temp_peptide_df)
}
return_df$group <- group
return(return_df)
}
#generates all the indices of a single peptide
single_peptide_df <- function(peptide, protein_seq){
peptide_location <- str_locate_all(protein_seq, peptide)
matches <- str_count(protein_seq, peptide)
peptide_pos_df <- data.frame(peptide_seq=character(),
position=integer())
for (j in 1:matches){
position <- rep(NA, nchar(peptide))
pos <- peptide_location[[1]][j]
for (i in 1:nchar(peptide)){
position[i] <- pos
pos <- pos+1
}
peptide_seq <- rep(peptide, nchar(peptide))
temp_pep_df <- data.frame(peptide_seq,position)
print(temp_pep_df)
peptide_pos_df <- bind_rows(peptide_pos_df, temp_pep_df)
}
return(peptide_pos_df)
}
remove_mods_str <- function(string){
return_string <- str_replace_all(string, "[a-z]|:", "")
return(return_string)
}
remove_mods_unique_file <- function(text_file){
strings <- read_csv(text_file,
col_names = FALSE)
basefile <- tools::file_path_sans_ext(text_file)
strings$X1 <- remove_mods_str(strings$X1)
return_strings <- unique(strings)
write.table(return_strings, paste0(basefile, "unique.txt"),col.names = FALSE, row.names = FALSE)
}
plot <- ggplot(data = master_df) + geom_point(aes(x = position, y = group), shape = 124) +
theme_bw() +
theme(panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank())
plot
|
/peptide_mapping_to_protein.R
|
no_license
|
weaversd/MUC16_glycosylation_R
|
R
| false
| false
| 2,066
|
r
|
library(seqinr)
library(stringr)
library(readr)
peptide_df_gen <- function(protein_fasta, peptide_file, group = 'default'){
protein_seq_list <- read.fasta(protein_fasta, seqtype = 'AA', seqonly = TRUE)
protein_seq <- protein_seq_list[[1]]
peptides <- read_csv(peptide_file,
col_names = FALSE)
return_df <- data.frame(peptide_seq=character(),
position=integer())
for (i in 1:nrow(peptides)){
temp_peptide_df <- single_peptide_df(peptides$X1[i], protein_seq)
return_df <- bind_rows(return_df, temp_peptide_df)
}
return_df$group <- group
return(return_df)
}
#generates all the indices of a single peptide
single_peptide_df <- function(peptide, protein_seq){
peptide_location <- str_locate_all(protein_seq, peptide)
matches <- str_count(protein_seq, peptide)
peptide_pos_df <- data.frame(peptide_seq=character(),
position=integer())
for (j in 1:matches){
position <- rep(NA, nchar(peptide))
pos <- peptide_location[[1]][j]
for (i in 1:nchar(peptide)){
position[i] <- pos
pos <- pos+1
}
peptide_seq <- rep(peptide, nchar(peptide))
temp_pep_df <- data.frame(peptide_seq,position)
print(temp_pep_df)
peptide_pos_df <- bind_rows(peptide_pos_df, temp_pep_df)
}
return(peptide_pos_df)
}
remove_mods_str <- function(string){
return_string <- str_replace_all(string, "[a-z]|:", "")
return(return_string)
}
remove_mods_unique_file <- function(text_file){
strings <- read_csv(text_file,
col_names = FALSE)
basefile <- tools::file_path_sans_ext(text_file)
strings$X1 <- remove_mods_str(strings$X1)
return_strings <- unique(strings)
write.table(return_strings, paste0(basefile, "unique.txt"),col.names = FALSE, row.names = FALSE)
}
plot <- ggplot(data = master_df) + geom_point(aes(x = position, y = group), shape = 124) +
theme_bw() +
theme(panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank())
plot
|
library(party) ## for cforest
library(caret)
## preprocessing
data <- read.csv("../data/NIHMS53736-supplement-9.csv")
str(data)
data$Cell <- sapply(as.character(data$Cell),
function(x) substr(x,1,nchar(x)-3))
data$Cell <- as.factor(data$Cell)
#######################################################################
## cforest implementation of random forest
## One simple simulation with default values
initialTime <- Sys.time()
forest <- cforest(Cell~.,data=data)
forest.pred <- predict(forest,newdata=data)
onfusionMatrix(forest.pred,data$Cell)$overall[1]
finalTime <- Sys.time() - initialTime
finalTime
######################################################################
## performing parameter variation
## number of simulations
nsims <- 2
## parameter to vary
parameter <- seq(0.8,1,0.2)
results <- matrix(0,nsims*length(parameter),2)
colnames(results) <- c("accuracy","parameter")
for (i in 1:length(parameter)){
for (j in 1:nsims) {
train <- createDataPartition(data$Cell,p=parameter,list=FALSE)
forest <- cforest(Cell~.,data=data,subset = train)
forest.pred <- predict(forest,newdata=data[-train,])
## index to store simulation data
index <- (nsims*(i-1)) + j
## storing results
results[index,1] <- confusionMatrix(forest.pred,
data$Cell[-train])$overall[1]
results[index,2] <- parameter[i]
}
}
results
##############################################################################
##############################################################################
## mtry variation
## perform random forest with mtry variation
## can perform randomForest or cforest methods
forestmtry <- function(output,
input,
nsims=30,
method="randomForest",
mtryInterval=seq(2,10,1)) {
if ( class(input) == "matrix" ) {
input <- as.data.frame(input)
}
if ( class(input) != "data.frame") {
stop("Input must be a data.frame or matrix")
}
if ( ! method %in% c("cforest","randomForest") ) {
stop("Method must be 'cforest' or 'randomForest'")
}
results <- matrix(0,nsims*length(mtryInterval),3)
results <- as.data.frame(results)
colnames(results) <- c("Accuracy","mtry","performance")
for (i in 1:length(mtryInterval)){
for (j in 1:nsims) {
if ( method == "cforest" ) {
## simulation parameters
intrain <- createDataPartition(output,p = 0.8,list = FALSE)
cforestControl <- cforest_control(teststat = "quad",
testtype = "Univ",
mincriterion = 0,
ntree = 50,
mtry = mtryInterval[i], ## variation
replace = FALSE)
## perform simulation
initialTime <- Sys.time()
forest <- cforest(output[intrain]~.,
data=input[intrain,],
control = cforestControl)
finalTime <- Sys.time() - initialTime
## perform prediction on test sample
forest.pred <- predict(forest,
newdata=input[-intrain,])
mss <- mean(sum(forest.pred-output[-intrain])^2)
mse <- sqrt(mss)
}
if ( method == "randomForest" ) {
initialTime <- Sys.time()
rforest <- randomForest(output[intrain]~.,
data=input[intrain,])
rforest.pred <- predict(rforest,
newdata = input[-intrain,])
mss <- mean(sum(rforest.pred-output[-intrain])^2)
mse <- sqrt(mss)
mse
finalTime <- Sys.time() - initialTime
finalTime
}
## index to store simulation data
index <- (nsims*(i-1)) + j
## storing results
results[index,1] <- mse
results[index,2] <- mtryInterval[i]
results[index,3] <- finalTime
}
}
# return(results)
theme_set(theme_bw())
g <- ggplot(results, aes(x=mtry,y=Accuracy)) + geom_point()
g <- g + geom_boxplot(aes(group=mtry,fill="steelblue"))
g <- g + theme(legend.position = "none")
g <- g + labs(title =paste("\t\t",method,"- Accuracy"),y="MSE")
g <- g + theme(title = element_text(face="bold"),
axis.title.x = element_text(face="bold"),
axis.title.y = element_text(face="bold"))
theme_set(theme_bw())
g1 <- ggplot(results, aes(x=mtry,y=performance)) + geom_point()
g1 <- g1 + geom_boxplot(aes(group=mtry, ## x axis cathegorical values
fill=132)) ## 132 -> steelblue color
g1 <- g1 + theme(legend.position = "none") ## removing legend
g1 <- g1 + labs(title =paste("\t", method,"- Waiting time"),
y="Time (secs)")
g1 <- g1 + theme(title = element_text(face="bold"),
axis.title.x = element_text(face="bold"),
axis.title.y = element_text(face="bold"))
grid.arrange(g,g1,nrow=1)
}
##############################################################################
## ntree variation
## perform random forest with ntree variation
## can perform randomForest or cforest methods
forestntree <- function(output,
input,
method="randomForest",
nsims=30,
mtry=3,
ntreeInterval=seq(20,100,10)) {
if ( class(input) == "matrix" ) {
input <- as.data.frame(input)
}
if ( class(input) != "data.frame") {
warning("Input must be a data.frame or matrix")
}
if ( ! method %in% c("cforest","randomForest") ) {
stop("Method must be 'cforest' or 'randomForest'")
}
results <- matrix(0,nsims*length(ntreeInterval),3)
results <- as.data.frame(results)
colnames(results) <- c("Accuracy","ntree","performance")
for (i in 1:length(ntreeInterval)){
for (j in 1:nsims) {
## Use cforest algorithm
if ( method == "cforest" ) {
intrain <- createDataPartition(output,p = 0.8,list = FALSE)
initialTime <- Sys.time()
cforestControl <- cforest_control(teststat = "quad",
testtype = "Univ",
mincriterion = 0,
ntree = ntreeInterval[i],
mtry = 3,
replace = FALSE)
forest <- cforest(output[intrain]~.,data=input[intrain,],
control = cforestControl)
forest.pred <- predict(forest,newdata=input[-intrain,])
mss <- mean(sum(forest.pred-output[-intrain])^2)
mse <- sqrt(mss)
finalTime <- Sys.time() - initialTime
}
## Use randomForest method
if ( method == "randomForest" ) {
intrain <- createDataPartition(output,p = 0.8,list = FALSE)
initialTime <- Sys.time()
forest <- randomForest(output[intrain]~.,
data=input[intrain,],
ntree = ntreeInterval[i])
finalTime <- Sys.time() - initialTime
}
## perform prediction on test sample
forest.pred <- predict(forest,
newdata=input[-intrain,])
mss <- mean(sum(forest.pred-output[-intrain])^2)
mse <- sqrt(mss)
## index to store simulation data
index <- (nsims*(i-1)) + j
## storing results
results[index,1] <- mse
results[index,2] <- ntreeInterval[i]
results[index,3] <- finalTime
}
}
# return(results)
theme_set(theme_bw())
g <- ggplot(results, aes(x=ntree,y=Accuracy)) + geom_point()
g <- g + geom_boxplot(aes(group=ntree,fill="steelblue"))
g <- g + theme(legend.position = "none")
g <- g + labs(title =paste("\t\t",method,"- Accuracy"),y="MSE")
g <- g + theme(title = element_text(face="bold"),
axis.title.x = element_text(face="bold"),
axis.title.y = element_text(face="bold"))
theme_set(theme_bw())
g1 <- ggplot(results, aes(x=ntree,y=performance)) + geom_point()
g1 <- g1 + geom_boxplot(aes(group=ntree, ## x axis cathegorical values
fill=132)) ## 132 -> steelblue color
g1 <- g1 + theme(legend.position = "none") ## removing legend
g1 <- g1 + labs(title =paste("\t", method,"- Waiting time"),
y="Time (secs)")
g1 <- g1 + theme(title = element_text(face="bold"),
axis.title.x = element_text(face="bold"),
axis.title.y = element_text(face="bold"))
grid.arrange(g,g1,nrow=1)
}
|
/machineLearning/randomForest.R
|
no_license
|
caramirezal/dataScience
|
R
| false
| false
| 11,570
|
r
|
library(party) ## for cforest
library(caret)
## preprocessing
data <- read.csv("../data/NIHMS53736-supplement-9.csv")
str(data)
data$Cell <- sapply(as.character(data$Cell),
function(x) substr(x,1,nchar(x)-3))
data$Cell <- as.factor(data$Cell)
#######################################################################
## cforest implementation of random forest
## One simple simulation with default values
initialTime <- Sys.time()
forest <- cforest(Cell~.,data=data)
forest.pred <- predict(forest,newdata=data)
onfusionMatrix(forest.pred,data$Cell)$overall[1]
finalTime <- Sys.time() - initialTime
finalTime
######################################################################
## performing parameter variation
## number of simulations
nsims <- 2
## parameter to vary
parameter <- seq(0.8,1,0.2)
results <- matrix(0,nsims*length(parameter),2)
colnames(results) <- c("accuracy","parameter")
for (i in 1:length(parameter)){
for (j in 1:nsims) {
train <- createDataPartition(data$Cell,p=parameter,list=FALSE)
forest <- cforest(Cell~.,data=data,subset = train)
forest.pred <- predict(forest,newdata=data[-train,])
## index to store simulation data
index <- (nsims*(i-1)) + j
## storing results
results[index,1] <- confusionMatrix(forest.pred,
data$Cell[-train])$overall[1]
results[index,2] <- parameter[i]
}
}
results
##############################################################################
##############################################################################
## mtry variation
## perform random forest with mtry variation
## can perform randomForest or cforest methods
forestmtry <- function(output,
input,
nsims=30,
method="randomForest",
mtryInterval=seq(2,10,1)) {
if ( class(input) == "matrix" ) {
input <- as.data.frame(input)
}
if ( class(input) != "data.frame") {
stop("Input must be a data.frame or matrix")
}
if ( ! method %in% c("cforest","randomForest") ) {
stop("Method must be 'cforest' or 'randomForest'")
}
results <- matrix(0,nsims*length(mtryInterval),3)
results <- as.data.frame(results)
colnames(results) <- c("Accuracy","mtry","performance")
for (i in 1:length(mtryInterval)){
for (j in 1:nsims) {
if ( method == "cforest" ) {
## simulation parameters
intrain <- createDataPartition(output,p = 0.8,list = FALSE)
cforestControl <- cforest_control(teststat = "quad",
testtype = "Univ",
mincriterion = 0,
ntree = 50,
mtry = mtryInterval[i], ## variation
replace = FALSE)
## perform simulation
initialTime <- Sys.time()
forest <- cforest(output[intrain]~.,
data=input[intrain,],
control = cforestControl)
finalTime <- Sys.time() - initialTime
## perform prediction on test sample
forest.pred <- predict(forest,
newdata=input[-intrain,])
mss <- mean(sum(forest.pred-output[-intrain])^2)
mse <- sqrt(mss)
}
if ( method == "randomForest" ) {
initialTime <- Sys.time()
rforest <- randomForest(output[intrain]~.,
data=input[intrain,])
rforest.pred <- predict(rforest,
newdata = input[-intrain,])
mss <- mean(sum(rforest.pred-output[-intrain])^2)
mse <- sqrt(mss)
mse
finalTime <- Sys.time() - initialTime
finalTime
}
## index to store simulation data
index <- (nsims*(i-1)) + j
## storing results
results[index,1] <- mse
results[index,2] <- mtryInterval[i]
results[index,3] <- finalTime
}
}
# return(results)
theme_set(theme_bw())
g <- ggplot(results, aes(x=mtry,y=Accuracy)) + geom_point()
g <- g + geom_boxplot(aes(group=mtry,fill="steelblue"))
g <- g + theme(legend.position = "none")
g <- g + labs(title =paste("\t\t",method,"- Accuracy"),y="MSE")
g <- g + theme(title = element_text(face="bold"),
axis.title.x = element_text(face="bold"),
axis.title.y = element_text(face="bold"))
theme_set(theme_bw())
g1 <- ggplot(results, aes(x=mtry,y=performance)) + geom_point()
g1 <- g1 + geom_boxplot(aes(group=mtry, ## x axis cathegorical values
fill=132)) ## 132 -> steelblue color
g1 <- g1 + theme(legend.position = "none") ## removing legend
g1 <- g1 + labs(title =paste("\t", method,"- Waiting time"),
y="Time (secs)")
g1 <- g1 + theme(title = element_text(face="bold"),
axis.title.x = element_text(face="bold"),
axis.title.y = element_text(face="bold"))
grid.arrange(g,g1,nrow=1)
}
##############################################################################
## ntree variation
## perform random forest with ntree variation
## can perform randomForest or cforest methods
forestntree <- function(output,
input,
method="randomForest",
nsims=30,
mtry=3,
ntreeInterval=seq(20,100,10)) {
if ( class(input) == "matrix" ) {
input <- as.data.frame(input)
}
if ( class(input) != "data.frame") {
warning("Input must be a data.frame or matrix")
}
if ( ! method %in% c("cforest","randomForest") ) {
stop("Method must be 'cforest' or 'randomForest'")
}
results <- matrix(0,nsims*length(ntreeInterval),3)
results <- as.data.frame(results)
colnames(results) <- c("Accuracy","ntree","performance")
for (i in 1:length(ntreeInterval)){
for (j in 1:nsims) {
## Use cforest algorithm
if ( method == "cforest" ) {
intrain <- createDataPartition(output,p = 0.8,list = FALSE)
initialTime <- Sys.time()
cforestControl <- cforest_control(teststat = "quad",
testtype = "Univ",
mincriterion = 0,
ntree = ntreeInterval[i],
mtry = 3,
replace = FALSE)
forest <- cforest(output[intrain]~.,data=input[intrain,],
control = cforestControl)
forest.pred <- predict(forest,newdata=input[-intrain,])
mss <- mean(sum(forest.pred-output[-intrain])^2)
mse <- sqrt(mss)
finalTime <- Sys.time() - initialTime
}
## Use randomForest method
if ( method == "randomForest" ) {
intrain <- createDataPartition(output,p = 0.8,list = FALSE)
initialTime <- Sys.time()
forest <- randomForest(output[intrain]~.,
data=input[intrain,],
ntree = ntreeInterval[i])
finalTime <- Sys.time() - initialTime
}
## perform prediction on test sample
forest.pred <- predict(forest,
newdata=input[-intrain,])
mss <- mean(sum(forest.pred-output[-intrain])^2)
mse <- sqrt(mss)
## index to store simulation data
index <- (nsims*(i-1)) + j
## storing results
results[index,1] <- mse
results[index,2] <- ntreeInterval[i]
results[index,3] <- finalTime
}
}
# return(results)
theme_set(theme_bw())
g <- ggplot(results, aes(x=ntree,y=Accuracy)) + geom_point()
g <- g + geom_boxplot(aes(group=ntree,fill="steelblue"))
g <- g + theme(legend.position = "none")
g <- g + labs(title =paste("\t\t",method,"- Accuracy"),y="MSE")
g <- g + theme(title = element_text(face="bold"),
axis.title.x = element_text(face="bold"),
axis.title.y = element_text(face="bold"))
theme_set(theme_bw())
g1 <- ggplot(results, aes(x=ntree,y=performance)) + geom_point()
g1 <- g1 + geom_boxplot(aes(group=ntree, ## x axis cathegorical values
fill=132)) ## 132 -> steelblue color
g1 <- g1 + theme(legend.position = "none") ## removing legend
g1 <- g1 + labs(title =paste("\t", method,"- Waiting time"),
y="Time (secs)")
g1 <- g1 + theme(title = element_text(face="bold"),
axis.title.x = element_text(face="bold"),
axis.title.y = element_text(face="bold"))
grid.arrange(g,g1,nrow=1)
}
|
as_integer_tensor <- function(x, dtype = tf$int64) {
# recurse over lists
if (is.list(x) || (is.numeric(x) && length(x) > 1))
lapply(x, function(elem) as_integer_tensor(elem, dtype))
else if (is.null(x))
x
else if (is_tensor(x))
tensor_value(tf$cast(x, dtype = dtype))
else
tensor_value(tf$constant(as.integer(x), dtype = dtype))
}
as_tensor_shapes <- function(x) {
if (is.list(x))
tuple(lapply(x, as_tensor_shapes))
else if (is_tensor(x))
tensor_value(tf$cast(x, dtype = tf$int64))
else if (inherits(x, "python.builtin.object"))
x
else if (is.null(x))
tensor_value(tf$constant(-1L, dtype = tf$int64))
else
tensor_value(tf$constant(as.integer(x), dtype = tf$int64))
}
tensor_value <- function(x) {
if (is_eager_tensor(x))
x$numpy()
else
x
}
with_session <- function(f, session = NULL) {
if (is.null(session))
session <- tf$get_default_session()
if (is.null(session)) {
session <- tf$Session()
on.exit(session$close(), add = TRUE)
}
f(session)
}
validate_tf_version <- function(required_ver = "1.4", feature_name = "tfdatasets") {
tf_ver <- tensorflow::tf_version()
if (is.null(tf_ver)) {
stop("You need to install TensorFlow to use tfdatasets ",
"-- install with tensorflow::install_tensorflow()",
call. = FALSE)
} else if (tf_ver < required_ver) {
stop(
feature_name, " requires version ", required_ver, " ",
"of TensorFlow (you are currently running version ", tf_ver, ").",
call. = FALSE
)
}
}
column_names <- function(dataset) {
if (!is.list(dataset$output_shapes) || is.null(names(dataset$output_shapes)))
stop("Unable to resolve features for dataset that does not have named outputs", call. = FALSE)
names(dataset$output_shapes)
}
is_dataset <- function(x) {
inherits(x, "tensorflow.python.data.ops.dataset_ops.Dataset")
}
is_tensor <- function(x) {
inherits(x, "tensorflow.python.framework.ops.Tensor")
}
is_eager_tensor <- function(x) {
inherits(x, "python.builtin.EagerTensor") ||
inherits(x, "tensorflow.python.framework.ops.EagerTensor")
}
|
/R/utils.R
|
no_license
|
stjordanis/tfdatasets
|
R
| false
| false
| 2,127
|
r
|
as_integer_tensor <- function(x, dtype = tf$int64) {
# recurse over lists
if (is.list(x) || (is.numeric(x) && length(x) > 1))
lapply(x, function(elem) as_integer_tensor(elem, dtype))
else if (is.null(x))
x
else if (is_tensor(x))
tensor_value(tf$cast(x, dtype = dtype))
else
tensor_value(tf$constant(as.integer(x), dtype = dtype))
}
as_tensor_shapes <- function(x) {
if (is.list(x))
tuple(lapply(x, as_tensor_shapes))
else if (is_tensor(x))
tensor_value(tf$cast(x, dtype = tf$int64))
else if (inherits(x, "python.builtin.object"))
x
else if (is.null(x))
tensor_value(tf$constant(-1L, dtype = tf$int64))
else
tensor_value(tf$constant(as.integer(x), dtype = tf$int64))
}
tensor_value <- function(x) {
if (is_eager_tensor(x))
x$numpy()
else
x
}
with_session <- function(f, session = NULL) {
if (is.null(session))
session <- tf$get_default_session()
if (is.null(session)) {
session <- tf$Session()
on.exit(session$close(), add = TRUE)
}
f(session)
}
validate_tf_version <- function(required_ver = "1.4", feature_name = "tfdatasets") {
tf_ver <- tensorflow::tf_version()
if (is.null(tf_ver)) {
stop("You need to install TensorFlow to use tfdatasets ",
"-- install with tensorflow::install_tensorflow()",
call. = FALSE)
} else if (tf_ver < required_ver) {
stop(
feature_name, " requires version ", required_ver, " ",
"of TensorFlow (you are currently running version ", tf_ver, ").",
call. = FALSE
)
}
}
column_names <- function(dataset) {
if (!is.list(dataset$output_shapes) || is.null(names(dataset$output_shapes)))
stop("Unable to resolve features for dataset that does not have named outputs", call. = FALSE)
names(dataset$output_shapes)
}
is_dataset <- function(x) {
inherits(x, "tensorflow.python.data.ops.dataset_ops.Dataset")
}
is_tensor <- function(x) {
inherits(x, "tensorflow.python.framework.ops.Tensor")
}
is_eager_tensor <- function(x) {
inherits(x, "python.builtin.EagerTensor") ||
inherits(x, "tensorflow.python.framework.ops.EagerTensor")
}
|
H.6 <- matrix(rbind(diag(3), c(0, 0, 0), c(0, 0, 0)),
nrow=5, ncol=3)
H6 <- summary(bh6lrtest(z = H1, H = H.6,
r = 2, r1 = 1))
|
/vars/book-ex/Rcode-8-6.R
|
permissive
|
solgenomics/R_libs
|
R
| false
| false
| 169
|
r
|
H.6 <- matrix(rbind(diag(3), c(0, 0, 0), c(0, 0, 0)),
nrow=5, ncol=3)
H6 <- summary(bh6lrtest(z = H1, H = H.6,
r = 2, r1 = 1))
|
#check if data from 2007-02-01 and 2007-02-02 are already stored in the "subpower" table in R
if(!exists("subpower")) {
#check if the "powert" table with all the power data is already stored in R
if(!exists("powert")) {
#check if the external file with power data has been downloaded into the working directory
if(!file.exists("exdata_data_household_power_consumption.zip")) {
setInternet2(use = TRUE) #needed for downloading via IE, which is used by my employer
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "exdata_data_household_power_consumption.zip")
}
poweruz<-unzip("exdata_data_household_power_consumption.zip")
powert<-read.table(poweruz, header=TRUE, sep = ";", stringsAsFactors=FALSE, na.strings="?")
}
#extract data with the required dates from "powert" table into a "subpower" table
powert$Date<-as.Date(powert$Date, format="%d/%m/%Y")
subpower <- powert[powert$Date == "2007-02-01" | powert$Date == "2007-02-02",]
#create a datetime column to use on the x axes of the plots
subpower$DateTime <- paste(subpower$Date, subpower$Time, sep = " ")
subpower$datetime <- strptime(subpower$DateTime, format = "%Y-%m-%d %H:%M:%S")
}
|
/checkfiles.R
|
no_license
|
aventurine/ExData_Plotting1
|
R
| false
| false
| 1,216
|
r
|
#check if data from 2007-02-01 and 2007-02-02 are already stored in the "subpower" table in R
if(!exists("subpower")) {
#check if the "powert" table with all the power data is already stored in R
if(!exists("powert")) {
#check if the external file with power data has been downloaded into the working directory
if(!file.exists("exdata_data_household_power_consumption.zip")) {
setInternet2(use = TRUE) #needed for downloading via IE, which is used by my employer
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "exdata_data_household_power_consumption.zip")
}
poweruz<-unzip("exdata_data_household_power_consumption.zip")
powert<-read.table(poweruz, header=TRUE, sep = ";", stringsAsFactors=FALSE, na.strings="?")
}
#extract data with the required dates from "powert" table into a "subpower" table
powert$Date<-as.Date(powert$Date, format="%d/%m/%Y")
subpower <- powert[powert$Date == "2007-02-01" | powert$Date == "2007-02-02",]
#create a datetime column to use on the x axes of the plots
subpower$DateTime <- paste(subpower$Date, subpower$Time, sep = " ")
subpower$datetime <- strptime(subpower$DateTime, format = "%Y-%m-%d %H:%M:%S")
}
|
require(ggplot2)
require(dplyr)
require(tidyr)
require(xtable)
mytheme = theme_bw() + theme(text = element_text(family="Times",size=14),axis.text.x = element_text(angle =45,hjust = 1))
data = read.table("data_distance.txt",header=T)
glimpse(data)
data$distance = as.numeric(data$distance)
# Make a graph !
pdf("distance_indiv_versus_site.pdf",height = 20, width=20)
ggplot(data) + geom_jitter(width = 0.5, size= 2, alpha = 0.6, aes(x=seqtot,y=distance,color=seqtot,shape=geography)) + facet_wrap(~ site_mission,scale="free_x") + mytheme + scale_x_discrete(name="Species") + scale_color_discrete(name="Species") + scale_shape_discrete(name="Geography",labels=c("Allopatric","Site","Sympatric"))
dev.off()
# Launch wilcoxon test and print them on screen
test = data %>% group_by(site_mission) %>% do (res = pairwise.wilcox.test((.)$distance,(.)$seqtot,paired=F)$p.value)
for (i in 1:14) {print(test[i,]$site_mission) ; print(test[i,]$res[[1]])}
|
/stat_distance.R
|
no_license
|
loire/dilectus_pumillo_dufour
|
R
| false
| false
| 961
|
r
|
require(ggplot2)
require(dplyr)
require(tidyr)
require(xtable)
mytheme = theme_bw() + theme(text = element_text(family="Times",size=14),axis.text.x = element_text(angle =45,hjust = 1))
data = read.table("data_distance.txt",header=T)
glimpse(data)
data$distance = as.numeric(data$distance)
# Make a graph !
pdf("distance_indiv_versus_site.pdf",height = 20, width=20)
ggplot(data) + geom_jitter(width = 0.5, size= 2, alpha = 0.6, aes(x=seqtot,y=distance,color=seqtot,shape=geography)) + facet_wrap(~ site_mission,scale="free_x") + mytheme + scale_x_discrete(name="Species") + scale_color_discrete(name="Species") + scale_shape_discrete(name="Geography",labels=c("Allopatric","Site","Sympatric"))
dev.off()
# Launch wilcoxon test and print them on screen
test = data %>% group_by(site_mission) %>% do (res = pairwise.wilcox.test((.)$distance,(.)$seqtot,paired=F)$p.value)
for (i in 1:14) {print(test[i,]$site_mission) ; print(test[i,]$res[[1]])}
|
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Master's Thesis - Remote Sensing %
# Environmental Engineering - ISA/UL - Lisbon, Portugal %
# (c) 2014 by Jonas Schmedtmann & Manuel Campagnolo %
# %
# MAIN SCRIPT %
# %
# Implements the program logic using functions from %
# functions.R. All steps typically found in remote %
# sensing can be found here. %
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Processing steps:
# 01. DATA AQUISITION AND PROCESSING
# 02. PARCEL DATA SELECTION
# 03. EXPLORATORY DATA ANALYSIS
# 04. VARIABLE SELECTION
# 05. 06. 07. SELECTION/TRAINING/VALIDATION OF CLASSIFIERS
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# 1. DATA AQUISITION AND PROCESSING ####
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Loading all fucntions and initializing program
source('functions.R')
init()
# Defining study area, clipping, and projecting
coordsArea <- cbind(AREA.X,AREA.Y)
cdg <- carregaRecortaDadosEspaciais(coordsArea, PROJ4.UTM)
plot(cdg$area);plot(cdg$parc2005, add=T)
# Loading and correcting images
rm(todasImagens)
todasImagens <- constroiListaImagensLandsat(landsatPath=CAMINHO.LANDSAT,
areaEstudo=cdg$area,
prefixo="CORR_14.08",
ano=2005,
corrige=TRUE)
# Building a list holding all data
rm(listaDados)
listaDados <- constroiListaDados(ano=2005)
# Getting a list of all parcels (data.frame)
listaTodasParcelasIniciais <- constroiTodasParcelas()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# 2. PARCEL DATA SELECTION ####
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Excluding some crop codes that don't make sense to include (e.g. too difficult to classify)
codExclusao <- c(87,88,666)
listaTodasParcelas <- listaTodasParcelasIniciais[!(listaTodasParcelasIniciais$cultura %in% codExclusao),]
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Determining which crops, together, occupy the majority of study area (tested for 90%, 95%, 98% of area). This is limit the amount of crops to anazlize
# Getting total parcel area of all crops
dados <- data.table(listaTodasParcelas)
areasCulturas <- dados[,list(area=sum(area),numParc=length(area)),by=cultura]
areasCulturas <- areasCulturas[order(areasCulturas$area, decreasing = TRUE),]
areasCulturas <- cbind(areasCulturas,cumsum(areasCulturas$area))
areaTotal <- sum(areasCulturas$area)
# Visualizing crops for the 3 area thresholds
plot(1:nrow(areasCulturas),areasCulturas$V2)
abline(h=areaTotal*0.9,col='blue')
abline(h=areaTotal*0.95,col='orange')
abline(h=areaTotal*0.98,col='red')
# Selecting 95% of area as a good candidate
limite<-0.95
cultInfl <- areasCulturas[areasCulturas$V2 < areaTotal*limite,]
cultInfl <- cultInfl[!cultInfl$cultura == 27]
fraccaoInfl <- sum(cultInfl$area)/areaTotal
cultInfluentes <- cultInfl$cultura
length(cultInfluentes)
# Number or remaining crops
nrow(areasCulturas)-13
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Selecting the most influential crops (95% of total area) and reclassifying parcels to a standard 1-12 crop code to make analysis more sytraightforward
listaTodasParcelas <- listaTodasParcelas[listaTodasParcelas$cultura %in% cultInfluentes,]
novasClasses <- as.data.frame(cbind(cultInfluentes, c(1,2,3,4,5,6,7,8,5,9,10,11,12)))
colnames(novasClasses) <- c('cultura','novaClasse')
nClasses <- length(table(novasClasses$novaClasse))
for(i in 1:length(listaTodasParcelas$cultura))
listaTodasParcelas$cultura[i] <- novasClasses$novaClasse[which(novasClasses$cultura==listaTodasParcelas$cultura[i])]
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# 3. EXPLORATORY DATA ANALYSIS ####
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
##### ANOVA to determine if crop and parcel affect reflectance values
#%%%%%%%%%%%%%%%%%%%%%%%%%%
# APPROACH 1)
dadosANOVA <- constroiDadosANOVA(data=4, banda=4, dimAmostra=11852/2)
table(dadosANOVA$cultura)
plot(dadosANOVA$reflectancias, dadosANOVA$cultura)
nrow(dadosANOVA)
length(table(dadosANOVA$parcela))
load('finalListaDados2005_03.12.Robj')
load('dadosANOVA_18.04.2015.Robj')
dadosANOVA.d1.b1 <- constroiDadosANOVA(data=1, banda=1, dimAmostra=11852)
dadosANOVA.d1.b2 <- constroiDadosANOVA(data=1, banda=2, dimAmostra=11852)
dadosANOVA.d1.b3 <- constroiDadosANOVA(data=1, banda=3, dimAmostra=11852)
dadosANOVA.d1.b4 <- constroiDadosANOVA(data=1, banda=4, dimAmostra=11852)
dadosANOVA.d1.b5 <- constroiDadosANOVA(data=1, banda=5, dimAmostra=11852)
dadosANOVA.d1.b6 <- constroiDadosANOVA(data=1, banda=6, dimAmostra=11852)
dadosANOVA.d2.b1 <- constroiDadosANOVA(data=2, banda=1, dimAmostra=11852)
dadosANOVA.d2.b2 <- constroiDadosANOVA(data=2, banda=2, dimAmostra=11852)
dadosANOVA.d2.b3 <- constroiDadosANOVA(data=2, banda=3, dimAmostra=11852)
dadosANOVA.d2.b4 <- constroiDadosANOVA(data=2, banda=4, dimAmostra=11852)
dadosANOVA.d2.b5 <- constroiDadosANOVA(data=2, banda=5, dimAmostra=11852)
dadosANOVA.d2.b6 <- constroiDadosANOVA(data=2, banda=6, dimAmostra=11852)
dadosANOVA.d3.b1 <- constroiDadosANOVA(data=3, banda=1, dimAmostra=11852)
dadosANOVA.d3.b2 <- constroiDadosANOVA(data=3, banda=2, dimAmostra=11852)
dadosANOVA.d3.b3 <- constroiDadosANOVA(data=3, banda=3, dimAmostra=11852)
dadosANOVA.d3.b4 <- constroiDadosANOVA(data=3, banda=4, dimAmostra=11852)
dadosANOVA.d3.b5 <- constroiDadosANOVA(data=3, banda=5, dimAmostra=11852)
dadosANOVA.d3.b6 <- constroiDadosANOVA(data=3, banda=6, dimAmostra=11852)
dadosANOVA.d4.b1 <- constroiDadosANOVA(data=4, banda=1, dimAmostra=11852)
dadosANOVA.d4.b2 <- constroiDadosANOVA(data=4, banda=2, dimAmostra=11852)
dadosANOVA.d4.b3 <- constroiDadosANOVA(data=4, banda=3, dimAmostra=11852)
dadosANOVA.d4.b4 <- constroiDadosANOVA(data=4, banda=4, dimAmostra=11852)
dadosANOVA.d4.b5 <- constroiDadosANOVA(data=4, banda=5, dimAmostra=11852)
dadosANOVA.d4.b6 <- constroiDadosANOVA(data=4, banda=6, dimAmostra=11852)
dadosANOVA.d5.b1 <- constroiDadosANOVA(data=5, banda=1, dimAmostra=11852)
dadosANOVA.d5.b2 <- constroiDadosANOVA(data=5, banda=2, dimAmostra=11852)
dadosANOVA.d5.b3 <- constroiDadosANOVA(data=5, banda=3, dimAmostra=11852)
dadosANOVA.d5.b4 <- constroiDadosANOVA(data=5, banda=4, dimAmostra=11852)
dadosANOVA.d5.b5 <- constroiDadosANOVA(data=5, banda=5, dimAmostra=11852)
dadosANOVA.d5.b6 <- constroiDadosANOVA(data=5, banda=6, dimAmostra=11852)
dadosANOVA.d6.b1 <- constroiDadosANOVA(data=6, banda=1, dimAmostra=11852)
dadosANOVA.d6.b2 <- constroiDadosANOVA(data=6, banda=2, dimAmostra=11852)
dadosANOVA.d6.b3 <- constroiDadosANOVA(data=6, banda=3, dimAmostra=11852)
dadosANOVA.d6.b4 <- constroiDadosANOVA(data=6, banda=4, dimAmostra=11852)
dadosANOVA.d6.b5 <- constroiDadosANOVA(data=6, banda=5, dimAmostra=11852)
dadosANOVA.d6.b6 <- constroiDadosANOVA(data=6, banda=6, dimAmostra=11852)
#%%%%%%%%%%%%%%%%%%%%%%%%%%
# APPROACH 2) Hierarchical ANOVA
# df$x é o factor dominante: cultura
# df$y é o factor subordinado: parcela
# df$z é a resposta (reflectância)
decomposition.hierarq.anova<-function(df, data, banda)
{
names(df) <- c("z","x","y")
# clean NAs
df<-df[!is.na(df$z) & !is.na(df$x) & !is.na(df$y),]
sqa<-0
sqre<-0
crops.nparcels<-c()
great.mean<-mean(df$z)
for (levC in levels(df$x)) #crops
{
pixels.diff<-c()
crop.mean<-mean(df$z[df$x==levC])
crop.length<-sum(df$x==levC);#print(crop.length)
crop.nparcels<-length(unique(as.character(df$y[df$x==levC])))
crops.nparcels<-c(crops.nparcels,crop.nparcels)
sqa<-sqa+crop.length*(crop.mean-great.mean)^2 #
for (levP in unique(as.character(df$y[df$x==levC]))) # parcels
{
pixels.diff<-df$z[df$x==levC & df$y==levP]-mean(df$z[df$x==levC & df$y==levP])
sqre<-sqre+sum(pixels.diff^2)
}
}
#print(crops.nparcels) #b_i's
N<-length(df$z)
gla<-(nlevels(df$x)-1) # a-1
glb<-(sum(crops.nparcels-1)) #\sum (b_i -1)
gle<-(N-sum(crops.nparcels)) #n-\sum b_i
sqt<-var(df$z)*(N-1)
sqb<-sqt-sqa-sqre
qma<-sqa/gla
qmb<-sqb/glb
qmre<-sqre/gle
print(paste(gla,round(sqa,5),round(qma,5),round(qma/qmre,3)))
print(paste(glb,round(sqb,5),round(qmb,5),round(qmb/qmre,3)))
print(paste(gle,round(sqre,5),round(qmre,5)))
return(list(culturas=c(gla,sqa,qma),parcelas=c(glb,sqb,qmb),residuos=c(gle,sqre,qmre), data=data, banda=banda))
}
decomp.d1.b1 <- decomposition.hierarq.anova(dadosANOVA.d1.b1, 1, 1)
decomp.d1.b2 <- decomposition.hierarq.anova(dadosANOVA.d1.b2, 1, 2)
decomp.d1.b3 <- decomposition.hierarq.anova(dadosANOVA.d1.b3, 1, 3)
decomp.d1.b4 <- decomposition.hierarq.anova(dadosANOVA.d1.b4, 1, 4)
decomp.d1.b5 <- decomposition.hierarq.anova(dadosANOVA.d1.b5, 1, 5)
decomp.d1.b6 <- decomposition.hierarq.anova(dadosANOVA.d1.b6, 1, 6)
decomp.d2.b1 <- decomposition.hierarq.anova(dadosANOVA.d2.b1, 2, 1)
decomp.d2.b2 <- decomposition.hierarq.anova(dadosANOVA.d2.b2, 2, 2)
decomp.d2.b3 <- decomposition.hierarq.anova(dadosANOVA.d2.b3, 2, 3)
decomp.d2.b4 <- decomposition.hierarq.anova(dadosANOVA.d2.b4, 2, 4)
decomp.d2.b5 <- decomposition.hierarq.anova(dadosANOVA.d2.b5, 2, 5)
decomp.d2.b6 <- decomposition.hierarq.anova(dadosANOVA.d2.b6, 2, 6)
decomp.d3.b1 <- decomposition.hierarq.anova(dadosANOVA.d3.b1, 3, 1)
decomp.d3.b2 <- decomposition.hierarq.anova(dadosANOVA.d3.b2, 3, 2)
decomp.d3.b3 <- decomposition.hierarq.anova(dadosANOVA.d3.b3, 3, 3)
decomp.d3.b4 <- decomposition.hierarq.anova(dadosANOVA.d3.b4, 3, 4)
decomp.d3.b5 <- decomposition.hierarq.anova(dadosANOVA.d3.b5, 3, 5)
decomp.d3.b6 <- decomposition.hierarq.anova(dadosANOVA.d3.b6, 3, 6)
decomp.d4.b1 <- decomposition.hierarq.anova(dadosANOVA.d4.b1, 4, 1)
decomp.d4.b2 <- decomposition.hierarq.anova(dadosANOVA.d4.b2, 4, 2)
decomp.d4.b3 <- decomposition.hierarq.anova(dadosANOVA.d4.b3, 4, 3)
decomp.d4.b4 <- decomposition.hierarq.anova(dadosANOVA.d4.b4, 4, 4)
decomp.d4.b5 <- decomposition.hierarq.anova(dadosANOVA.d4.b5, 4, 5)
decomp.d4.b6 <- decomposition.hierarq.anova(dadosANOVA.d4.b6, 4, 6)
decomp.d5.b1 <- decomposition.hierarq.anova(dadosANOVA.d5.b1, 5, 1)
decomp.d5.b2 <- decomposition.hierarq.anova(dadosANOVA.d5.b2, 5, 2)
decomp.d5.b3 <- decomposition.hierarq.anova(dadosANOVA.d5.b3, 5, 3)
decomp.d5.b4 <- decomposition.hierarq.anova(dadosANOVA.d5.b4, 5, 4)
decomp.d5.b5 <- decomposition.hierarq.anova(dadosANOVA.d5.b5, 5, 5)
decomp.d5.b6 <- decomposition.hierarq.anova(dadosANOVA.d5.b6, 5, 6)
decomp.d6.b1 <- decomposition.hierarq.anova(dadosANOVA.d6.b1, 6, 1)
decomp.d6.b2 <- decomposition.hierarq.anova(dadosANOVA.d6.b2, 6, 2)
decomp.d6.b3 <- decomposition.hierarq.anova(dadosANOVA.d6.b3, 6, 3)
decomp.d6.b4 <- decomposition.hierarq.anova(dadosANOVA.d6.b4, 6, 4)
decomp.d6.b5 <- decomposition.hierarq.anova(dadosANOVA.d6.b5, 6, 5)
decomp.d6.b6 <- decomposition.hierarq.anova(dadosANOVA.d6.b6, 6, 6)
listaANOVA <- list(decomp.d1.b1, decomp.d1.b2, decomp.d1.b3, decomp.d1.b4, decomp.d1.b5, decomp.d1.b6,
decomp.d2.b1, decomp.d2.b2, decomp.d2.b3, decomp.d2.b4, decomp.d2.b5, decomp.d2.b6,
decomp.d3.b1, decomp.d3.b2, decomp.d3.b3, decomp.d3.b4, decomp.d3.b5, decomp.d3.b6,
decomp.d4.b1, decomp.d4.b2, decomp.d4.b3, decomp.d4.b4, decomp.d4.b5, decomp.d4.b6,
decomp.d5.b1, decomp.d5.b2, decomp.d5.b3, decomp.d5.b4, decomp.d5.b5, decomp.d5.b6,
decomp.d6.b1, decomp.d6.b2, decomp.d6.b3, decomp.d6.b4, decomp.d6.b5, decomp.d6.b6)
df<-data.frame(z=runif(10),x=as.factor(c(rep("a",5), rep("b",5))),y=as.factor(c(rep("A",2), rep("B",2),rep("C",2),rep("D",4))) )
j1 <- decomposition.hierarq.anova(df,1,1)
j2 <- decomposition.hierarq.anova(df,2,2)
j3 <- decomposition.hierarq.anova(df,2,3)
j <- list(j1,j2,j3)
names(j) <- c("a", "b", "c")
#------------------------------------------------------------------
# OLD VERSION WITH 1 FACTOR ONLY
#df$x is the factor (parcel) and df$y is the reflectance
decomposition.one.way.anova <- function(df, data, banda)
{
means.parcels <- c()
n.pixels <- c()
great.mean <- mean(df$reflectancias,na.rm=TRUE)
for (lev in levels(df$parcela))
{
means.parcels <- c(means.parcels,mean(df$reflectancias[df$parcela==lev],na.rm=TRUE))
n.pixels <- c(n.pixels,length(df$reflectancias[!is.na(df$parcela) & df$parcela==lev]))
}
sqf <- sum(n.pixels*(means.parcels-great.mean)^2)
sqt <- var(df$reflectancias)*(length(df$reflectancias[!is.na(df$reflectancias)])-1)
qmf <- sqf/(nlevels(df$parcela)-1)
qmre <- (sqt-sqf)/(length(df$reflectancias[!is.na(df$reflectancias)])-nlevels(df$parcela))
#print(paste("QMF=",qmf))
#print(paste("QMRE=",qmre))
#print(paste("F=",qmf/qmre))
return(c(data, banda, qmf, qmre))
}
decomp.d1.b1 <- decomposition.one.way.anova(dadosANOVA.d1.b1, 1, 1)
decomp.d1.b2 <- decomposition.one.way.anova(dadosANOVA.d1.b2, 1, 2)
decomp.d1.b3 <- decomposition.one.way.anova(dadosANOVA.d1.b3, 1, 3)
decomp.d1.b4 <- decomposition.one.way.anova(dadosANOVA.d1.b4, 1, 4)
decomp.d1.b5 <- decomposition.one.way.anova(dadosANOVA.d1.b5, 1, 5)
decomp.d1.b6 <- decomposition.one.way.anova(dadosANOVA.d1.b6, 1, 6)
decomp.d2.b1 <- decomposition.one.way.anova(dadosANOVA.d2.b1, 2, 1)
decomp.d2.b2 <- decomposition.one.way.anova(dadosANOVA.d2.b2, 2, 2)
decomp.d2.b3 <- decomposition.one.way.anova(dadosANOVA.d2.b3, 2, 3)
decomp.d2.b4 <- decomposition.one.way.anova(dadosANOVA.d2.b4, 2, 4)
decomp.d2.b5 <- decomposition.one.way.anova(dadosANOVA.d2.b5, 2, 5)
decomp.d2.b6 <- decomposition.one.way.anova(dadosANOVA.d2.b6, 2, 6)
decomp.d3.b1 <- decomposition.one.way.anova(dadosANOVA.d3.b1, 3, 1)
decomp.d3.b2 <- decomposition.one.way.anova(dadosANOVA.d3.b2, 3, 2)
decomp.d3.b3 <- decomposition.one.way.anova(dadosANOVA.d3.b3, 3, 3)
decomp.d3.b4 <- decomposition.one.way.anova(dadosANOVA.d3.b4, 3, 4)
decomp.d3.b5 <- decomposition.one.way.anova(dadosANOVA.d3.b5, 3, 5)
decomp.d3.b6 <- decomposition.one.way.anova(dadosANOVA.d3.b6, 3, 6)
decomp.d4.b1 <- decomposition.one.way.anova(dadosANOVA.d4.b1, 4, 1)
decomp.d4.b2 <- decomposition.one.way.anova(dadosANOVA.d4.b2, 4, 2)
decomp.d4.b3 <- decomposition.one.way.anova(dadosANOVA.d4.b3, 4, 3)
decomp.d4.b4 <- decomposition.one.way.anova(dadosANOVA.d4.b4, 4, 4)
decomp.d4.b5 <- decomposition.one.way.anova(dadosANOVA.d4.b5, 4, 5)
decomp.d4.b6 <- decomposition.one.way.anova(dadosANOVA.d4.b6, 4, 6)
decomp.d5.b1 <- decomposition.one.way.anova(dadosANOVA.d5.b1, 5, 1)
decomp.d5.b2 <- decomposition.one.way.anova(dadosANOVA.d5.b2, 5, 2)
decomp.d5.b3 <- decomposition.one.way.anova(dadosANOVA.d5.b3, 5, 3)
decomp.d5.b4 <- decomposition.one.way.anova(dadosANOVA.d5.b4, 5, 4)
decomp.d5.b5 <- decomposition.one.way.anova(dadosANOVA.d5.b5, 5, 5)
decomp.d5.b6 <- decomposition.one.way.anova(dadosANOVA.d5.b6, 5, 6)
decomp.d6.b1 <- decomposition.one.way.anova(dadosANOVA.d6.b1, 6, 1)
decomp.d6.b2 <- decomposition.one.way.anova(dadosANOVA.d6.b2, 6, 2)
decomp.d6.b3 <- decomposition.one.way.anova(dadosANOVA.d6.b3, 6, 3)
decomp.d6.b4 <- decomposition.one.way.anova(dadosANOVA.d6.b4, 6, 4)
decomp.d6.b5 <- decomposition.one.way.anova(dadosANOVA.d6.b5, 6, 5)
decomp.d6.b6 <- decomposition.one.way.anova(dadosANOVA.d6.b6, 6, 6)
resultado <- rbind(decomp.d1.b1, decomp.d1.b2, decomp.d1.b3, decomp.d1.b4, decomp.d1.b5, decomp.d1.b6,
decomp.d2.b1, decomp.d2.b2, decomp.d2.b3, decomp.d2.b4, decomp.d2.b5, decomp.d2.b6,
decomp.d3.b1, decomp.d3.b2, decomp.d3.b3, decomp.d3.b4, decomp.d3.b5, decomp.d3.b6,
decomp.d4.b1, decomp.d4.b2, decomp.d4.b3, decomp.d4.b4, decomp.d4.b5, decomp.d4.b6,
decomp.d5.b1, decomp.d5.b2, decomp.d5.b3, decomp.d5.b4, decomp.d5.b5, decomp.d5.b6,
decomp.d6.b1, decomp.d6.b2, decomp.d6.b3, decomp.d6.b4, decomp.d6.b5, decomp.d6.b6)
resultado <- cbind(resultado, resultado[,3]/resultado[,4], sqrt(resultado[,4]))
colnames(resultado) <- c("Data", "Banda", "QMF", "QMRE", "F", "RMSE")
#For latex output
round(resultado[1:6,5:6][,"F"], 1)
res.d1 <- paste0(round(resultado[1:6,5:6][,'F'], 1), ", ", round(resultado[1:6,5:6][,'RMSE'], 3))
res.d2 <- paste0(round(resultado[7:12,5:6][,'F'], 1), ", ", round(resultado[7:12,5:6][,'RMSE'], 3))
res.d3 <- paste0(round(resultado[13:18,5:6][,'F'], 1), ", ", round(resultado[13:18,5:6][,'RMSE'], 3))
res.d4 <- paste0(round(resultado[19:24,5:6][,'F'], 1), ", ", round(resultado[19:24,5:6][,'RMSE'], 3))
res.d5 <- paste0(round(resultado[25:30,5:6][,'F'], 1), ", ", round(resultado[25:30,5:6][,'RMSE'], 3))
res.d6 <- paste0(round(resultado[31:36,5:6][,'F'], 1), ", ", round(resultado[31:36,5:6][,'RMSE'], 3))
res.final <- cbind(res.d1, res.d2, res.d3, res.d4, res.d5, res.d6)
res.final.xtab <- xtable(res.final)
align(res.final.xtab) <- rep("l", 6)
print.xtable(res.final.xtab, booktabs=T, include.rownames=F)
save(resultado, file="resultadoDecompANOVA.object")
RMSE <- sqrt()
rm(dadosANOVA.d6.b1)
rm(dadosANOVA.d6.b2)
rm(dadosANOVA.d6.b3)
rm(dadosANOVA.d6.b4)
rm(dadosANOVA.d6.b5)
rm(dadosANOVA.d6.b6)
#%%%%%%%%%%%%%%%%%%%%%%%%%%
# APPROACH 3)
MEGAdadosANOVA <- constroiDadosANOVA(data=5, banda=4, dimAmostra=11582)
somaDeQuadrados(MEGAdadosANOVA)
somaDeQuadrados(dadosANOVA)
#%%%%%%%%%%%%%%%%%%%%%%%%%%
# APPROACH 4)
# Test if the crop affects combinations of image/date and NDVIs
FValues <- c()
pValues <- c()
efeito <- c()
for(i in 7:length(listaTodasParcelas))
{
parcelas.aov <- aov(listaTodasParcelas[,i] ~ as.factor(listaTodasParcelas$cultura))
FV <- summary(parcelas.aov)[[1]][["F value"]][[1]]
pV <- summary(parcelas.aov)[[1]][["Pr(>F)"]][[1]]
FValues <- c(FValues,FV)
pValues <- c(pValues,pV)
if(pV <= 0.05) ef <- 1 else ef <- 0
efeito <- c(efeito,ef)
}
nn <- colnames(listaTodasParcelas[,7:length(listaTodasParcelas)])
resultadoANOVA <- data.frame(cbind(nn,FValues,pValues,efeito))
resultadoANOVA$FValues <- as.numeric(as.character(resultadoANOVA$FValues))
resultadoANOVA <- resultadoANOVA[order(-resultadoANOVA$FValues),]; resultadoANOVA
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# 4. VARIABLE SELECTION ####
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# 4.1 WHOLE DATASET
# Prparing data for the classifier in the next step. Only spectral signatures for the first 6 dates
dadosClassificadores <- listaTodasParcelas[,c(5,7:42)]
dadosClassificadores$cultura <- as.factor(dadosClassificadores$cultura)
dadosClassificadores[,-1][dadosClassificadores[,-1] > 1] <- 1
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# 4.2 PARTIAL DATASET (DATASET 2) (This is where variable selection happens)
# Using trim.matrix from subselect package
dadosTrim <- dadosClassificadores
criterioExclusaoVars <- 0.02
classTrim <- trim.matrix(cor(dadosTrim[,-1]), criterioExclusaoVars);classTrim
classTrim$names.discarded == classTrimOLD$names.discarded
varsRetirar <- classTrim$numbers.discarded+1 #+1 because in the original data frame, col 1 is for the crop
varsSobram <- 1:length(dadosTrim)
varsSobram <- varsSobram[! varsSobram %in% varsRetirar]
# Preparing DATASET 2 (after variable selection)
dadosClassificadoresSub <- dadosTrim[,varsSobram]
dadosClassificadoresSub$cultura <- as.factor(dadosClassificadoresSub$cultura)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# 5. 6. 7. ESCOLHA/TREINO/VALIDACAO DOS CLASSIFICADORES ####
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# 5.1 KNN
#%%%%%%%%%%%%%%%%%%%%%%%%
# DATASET 1
# Tuning
resTune.comp.knn <- matrix(nrow=21, ncol=0)
for(i in 1:10)
{
print(i)
KNN.tune.comp <- tune.knn(dadosClassificadores[,-1], dadosClassificadores[,1], k=1:20)
k <- KNN.tune.comp[1][[1]][1,1]
erros <- KNN.tune.comp$performances$error
resTune.comp.knn <- cbind(resTune.comp.knn, c(k, erros))
}
# RESULT: use k=7, because it produces the lowest error
# Cross validation
KNN.comp.cruz <- validacaoCruzada(n = 10, tipo = 'KNN', dados = dadosClassificadores, lambda = 0.8, k = 7)
KNN.comp.cruz$result$correcTot
#%%%%%%%%%%%%%%%%%%%%%%%%
#DATASET 2
# Tuning
resTune.sub.knn <- matrix(nrow=21, ncol=0)
for(i in 1:10)
{
print(i)
KNN.tune.sub <- tune.knn(dadosClassificadoresSub[,-1], dadosClassificadoresSub[,1], k=1:20)
k <- KNN.tune.sub[1][[1]][1,1]
erros <- KNN.tune.sub$performances$error
resTune.sub.knn <- cbind(resTune.sub.knn, c(k, erros))
}
# RESULT: use k=7, because it produces the lowest error
# Cross validation
KNN.sub.cruz <- validacaoCruzada(n = 10, tipo = 'KNN', dados = dadosClassificadoresSub, lambda = 0.8, k = 7)
KNN.sub.cruz$result$correcTot
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# 5.2 SVM
#%%%%%%%%%%%%%%%%%%%%%%%%
# DATASET 1
# Tuning
rGamma <- c(0.4, 0.6, 0.8, 1, 1.2, 1.4)
rCost <- c(0.5, 1, 1.5, 2, 2.5, 3)
resTune.comp.v1 <- matrix(ncol = length(rCost), nrow = length(rGamma))
for(i in 1:length(rGamma))
for(j in 1:length(rCost))
{
print(paste0("i = ", i, " and j = ", j))
res <- SVM.tune.comp <- tune.svm(dadosClassificadores[,-1], dadosClassificadores[,1], gamma = rGamma[i], cost = rCost[j])
resTune.comp.v1[i,j] <- res$best.performance
}
colnames(resTune.comp.v1) <- rCost
rownames(resTune.comp.v1) <- rGamma
min(resTune.comp.v1)
# Cross validation
SVM.comp.cruz <- validacaoCruzada(n = 10, tipo = 'SVM', dados = dadosClassificadores, lambda = 0.8, gamma = 0.4, cost = 1.5)
SVM.comp.cruz$result$correcTot
#%%%%%%%%%%%%%%%%%%%%%%%%
#DATASET 2
# Tuning
rGamma <- c(0.4, 0.6, 0.8, 1, 1.2, 1.4)
rCost <- c(0.5, 1, 1.5, 2, 2.5, 3)
resTune.sub.v1 <- matrix(ncol = length(rCost), nrow = length(rGamma))
for(i in 1:length(rGamma))
for(j in 1:length(rCost))
{
print(paste0("i = ", i, " and j = ", j))
res <- SVM.tune.sub <- tune.svm(dadosClassificadoresSub[,-1], dadosClassificadoresSub[,1], gamma = rGamma[i], cost = rCost[j])
resTune.sub.v1[i,j] <- res$best.performance
}
colnames(resTune.sub.v1) <- rCost
rownames(resTune.sub.v1) <- rGamma
min(resTune.sub.v1)
# Cross validation
SVM.sub.cruz <- validacaoCruzada(n = 10, tipo = 'SVM', dados = dadosClassificadoresSub, lambda = 0.8, gamma = 0.4, cost = 2.5)
#%%%%%%%%%%%%%%%%%%%%%%%%
# METHOD CALIBRATION
# q_j estimation. Follow the estimaQ function in functions.R
SVM.sub.qi <- estimaQ(classificador = SVM.sub.cruz$classificador, lambdas = c(0.6, 0.7, 0.8, 0.9, 0.95))
# CALIBRATION ASSESSMENT
# Follow the assessmentMetodo function in functions.R
SVM.sub.assess <- assessmentMetodo(classificador = SVM.sub.cruz$classificador, qis = SVM.sub.qi)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Final data structures: output for paper
#%%%%%%%%%%%%%%%%%%%%%%%%
# Data.frame containing all parcels and data from calibration mehtod
comp1 <- SVM.sub.analise$resultClass
comp1$parcela <- as.character(comp1$parcela)
comp1 <- comp1[order(comp1$parcela, decreasing = TRUE),]
comp2 <- listaTodasParcelas
comp2$id <- as.character(comp2$id)
comp2 <- comp2[order(comp2$id, decreasing = TRUE),]
igual <- comp1$verdade == comp1$class1
igual[igual == TRUE] <- 'Igual'
igual[igual == FALSE] <- 'Diferente'
qj <- c()
for(i in 1:nrow(comp1)) qj[i] <- SVM.sub.analise$l0.8$qi[comp1$class1[i]]
resultado <- comp1$prob1 >= qj
resultado[resultado == TRUE] <- 'Aceite'
resultado[resultado == FALSE] <- 'Rejeitado'
dadosFinais <- cbind(comp1, igual, qj, resultado, area=comp2$area)
head(dadosFinais)
|
/main.R
|
no_license
|
sharath747/crop-identification-cap
|
R
| false
| false
| 24,474
|
r
|
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Master's Thesis - Remote Sensing %
# Environmental Engineering - ISA/UL - Lisbon, Portugal %
# (c) 2014 by Jonas Schmedtmann & Manuel Campagnolo %
# %
# MAIN SCRIPT %
# %
# Implements the program logic using functions from %
# functions.R. All steps typically found in remote %
# sensing can be found here. %
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Processing steps:
# 01. DATA AQUISITION AND PROCESSING
# 02. PARCEL DATA SELECTION
# 03. EXPLORATORY DATA ANALYSIS
# 04. VARIABLE SELECTION
# 05. 06. 07. SELECTION/TRAINING/VALIDATION OF CLASSIFIERS
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# 1. DATA AQUISITION AND PROCESSING ####
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Loading all fucntions and initializing program
source('functions.R')
init()
# Defining study area, clipping, and projecting
coordsArea <- cbind(AREA.X,AREA.Y)
cdg <- carregaRecortaDadosEspaciais(coordsArea, PROJ4.UTM)
plot(cdg$area);plot(cdg$parc2005, add=T)
# Loading and correcting images
rm(todasImagens)
todasImagens <- constroiListaImagensLandsat(landsatPath=CAMINHO.LANDSAT,
areaEstudo=cdg$area,
prefixo="CORR_14.08",
ano=2005,
corrige=TRUE)
# Building a list holding all data
rm(listaDados)
listaDados <- constroiListaDados(ano=2005)
# Getting a list of all parcels (data.frame)
listaTodasParcelasIniciais <- constroiTodasParcelas()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# 2. PARCEL DATA SELECTION ####
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Excluding some crop codes that don't make sense to include (e.g. too difficult to classify)
codExclusao <- c(87,88,666)
listaTodasParcelas <- listaTodasParcelasIniciais[!(listaTodasParcelasIniciais$cultura %in% codExclusao),]
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Determining which crops, together, occupy the majority of study area (tested for 90%, 95%, 98% of area). This is limit the amount of crops to anazlize
# Getting total parcel area of all crops
dados <- data.table(listaTodasParcelas)
areasCulturas <- dados[,list(area=sum(area),numParc=length(area)),by=cultura]
areasCulturas <- areasCulturas[order(areasCulturas$area, decreasing = TRUE),]
areasCulturas <- cbind(areasCulturas,cumsum(areasCulturas$area))
areaTotal <- sum(areasCulturas$area)
# Visualizing crops for the 3 area thresholds
plot(1:nrow(areasCulturas),areasCulturas$V2)
abline(h=areaTotal*0.9,col='blue')
abline(h=areaTotal*0.95,col='orange')
abline(h=areaTotal*0.98,col='red')
# Selecting 95% of area as a good candidate
limite<-0.95
cultInfl <- areasCulturas[areasCulturas$V2 < areaTotal*limite,]
cultInfl <- cultInfl[!cultInfl$cultura == 27]
fraccaoInfl <- sum(cultInfl$area)/areaTotal
cultInfluentes <- cultInfl$cultura
length(cultInfluentes)
# Number or remaining crops
nrow(areasCulturas)-13
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Selecting the most influential crops (95% of total area) and reclassifying parcels to a standard 1-12 crop code to make analysis more sytraightforward
listaTodasParcelas <- listaTodasParcelas[listaTodasParcelas$cultura %in% cultInfluentes,]
novasClasses <- as.data.frame(cbind(cultInfluentes, c(1,2,3,4,5,6,7,8,5,9,10,11,12)))
colnames(novasClasses) <- c('cultura','novaClasse')
nClasses <- length(table(novasClasses$novaClasse))
for(i in 1:length(listaTodasParcelas$cultura))
listaTodasParcelas$cultura[i] <- novasClasses$novaClasse[which(novasClasses$cultura==listaTodasParcelas$cultura[i])]
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# 3. EXPLORATORY DATA ANALYSIS ####
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
##### ANOVA to determine if crop and parcel affect reflectance values
#%%%%%%%%%%%%%%%%%%%%%%%%%%
# APPROACH 1)
dadosANOVA <- constroiDadosANOVA(data=4, banda=4, dimAmostra=11852/2)
table(dadosANOVA$cultura)
plot(dadosANOVA$reflectancias, dadosANOVA$cultura)
nrow(dadosANOVA)
length(table(dadosANOVA$parcela))
load('finalListaDados2005_03.12.Robj')
load('dadosANOVA_18.04.2015.Robj')
dadosANOVA.d1.b1 <- constroiDadosANOVA(data=1, banda=1, dimAmostra=11852)
dadosANOVA.d1.b2 <- constroiDadosANOVA(data=1, banda=2, dimAmostra=11852)
dadosANOVA.d1.b3 <- constroiDadosANOVA(data=1, banda=3, dimAmostra=11852)
dadosANOVA.d1.b4 <- constroiDadosANOVA(data=1, banda=4, dimAmostra=11852)
dadosANOVA.d1.b5 <- constroiDadosANOVA(data=1, banda=5, dimAmostra=11852)
dadosANOVA.d1.b6 <- constroiDadosANOVA(data=1, banda=6, dimAmostra=11852)
dadosANOVA.d2.b1 <- constroiDadosANOVA(data=2, banda=1, dimAmostra=11852)
dadosANOVA.d2.b2 <- constroiDadosANOVA(data=2, banda=2, dimAmostra=11852)
dadosANOVA.d2.b3 <- constroiDadosANOVA(data=2, banda=3, dimAmostra=11852)
dadosANOVA.d2.b4 <- constroiDadosANOVA(data=2, banda=4, dimAmostra=11852)
dadosANOVA.d2.b5 <- constroiDadosANOVA(data=2, banda=5, dimAmostra=11852)
dadosANOVA.d2.b6 <- constroiDadosANOVA(data=2, banda=6, dimAmostra=11852)
dadosANOVA.d3.b1 <- constroiDadosANOVA(data=3, banda=1, dimAmostra=11852)
dadosANOVA.d3.b2 <- constroiDadosANOVA(data=3, banda=2, dimAmostra=11852)
dadosANOVA.d3.b3 <- constroiDadosANOVA(data=3, banda=3, dimAmostra=11852)
dadosANOVA.d3.b4 <- constroiDadosANOVA(data=3, banda=4, dimAmostra=11852)
dadosANOVA.d3.b5 <- constroiDadosANOVA(data=3, banda=5, dimAmostra=11852)
dadosANOVA.d3.b6 <- constroiDadosANOVA(data=3, banda=6, dimAmostra=11852)
dadosANOVA.d4.b1 <- constroiDadosANOVA(data=4, banda=1, dimAmostra=11852)
dadosANOVA.d4.b2 <- constroiDadosANOVA(data=4, banda=2, dimAmostra=11852)
dadosANOVA.d4.b3 <- constroiDadosANOVA(data=4, banda=3, dimAmostra=11852)
dadosANOVA.d4.b4 <- constroiDadosANOVA(data=4, banda=4, dimAmostra=11852)
dadosANOVA.d4.b5 <- constroiDadosANOVA(data=4, banda=5, dimAmostra=11852)
dadosANOVA.d4.b6 <- constroiDadosANOVA(data=4, banda=6, dimAmostra=11852)
dadosANOVA.d5.b1 <- constroiDadosANOVA(data=5, banda=1, dimAmostra=11852)
dadosANOVA.d5.b2 <- constroiDadosANOVA(data=5, banda=2, dimAmostra=11852)
dadosANOVA.d5.b3 <- constroiDadosANOVA(data=5, banda=3, dimAmostra=11852)
dadosANOVA.d5.b4 <- constroiDadosANOVA(data=5, banda=4, dimAmostra=11852)
dadosANOVA.d5.b5 <- constroiDadosANOVA(data=5, banda=5, dimAmostra=11852)
dadosANOVA.d5.b6 <- constroiDadosANOVA(data=5, banda=6, dimAmostra=11852)
dadosANOVA.d6.b1 <- constroiDadosANOVA(data=6, banda=1, dimAmostra=11852)
dadosANOVA.d6.b2 <- constroiDadosANOVA(data=6, banda=2, dimAmostra=11852)
dadosANOVA.d6.b3 <- constroiDadosANOVA(data=6, banda=3, dimAmostra=11852)
dadosANOVA.d6.b4 <- constroiDadosANOVA(data=6, banda=4, dimAmostra=11852)
dadosANOVA.d6.b5 <- constroiDadosANOVA(data=6, banda=5, dimAmostra=11852)
dadosANOVA.d6.b6 <- constroiDadosANOVA(data=6, banda=6, dimAmostra=11852)
#%%%%%%%%%%%%%%%%%%%%%%%%%%
# APPROACH 2) Hierarchical ANOVA
# df$x é o factor dominante: cultura
# df$y é o factor subordinado: parcela
# df$z é a resposta (reflectância)
decomposition.hierarq.anova<-function(df, data, banda)
{
names(df) <- c("z","x","y")
# clean NAs
df<-df[!is.na(df$z) & !is.na(df$x) & !is.na(df$y),]
sqa<-0
sqre<-0
crops.nparcels<-c()
great.mean<-mean(df$z)
for (levC in levels(df$x)) #crops
{
pixels.diff<-c()
crop.mean<-mean(df$z[df$x==levC])
crop.length<-sum(df$x==levC);#print(crop.length)
crop.nparcels<-length(unique(as.character(df$y[df$x==levC])))
crops.nparcels<-c(crops.nparcels,crop.nparcels)
sqa<-sqa+crop.length*(crop.mean-great.mean)^2 #
for (levP in unique(as.character(df$y[df$x==levC]))) # parcels
{
pixels.diff<-df$z[df$x==levC & df$y==levP]-mean(df$z[df$x==levC & df$y==levP])
sqre<-sqre+sum(pixels.diff^2)
}
}
#print(crops.nparcels) #b_i's
N<-length(df$z)
gla<-(nlevels(df$x)-1) # a-1
glb<-(sum(crops.nparcels-1)) #\sum (b_i -1)
gle<-(N-sum(crops.nparcels)) #n-\sum b_i
sqt<-var(df$z)*(N-1)
sqb<-sqt-sqa-sqre
qma<-sqa/gla
qmb<-sqb/glb
qmre<-sqre/gle
print(paste(gla,round(sqa,5),round(qma,5),round(qma/qmre,3)))
print(paste(glb,round(sqb,5),round(qmb,5),round(qmb/qmre,3)))
print(paste(gle,round(sqre,5),round(qmre,5)))
return(list(culturas=c(gla,sqa,qma),parcelas=c(glb,sqb,qmb),residuos=c(gle,sqre,qmre), data=data, banda=banda))
}
decomp.d1.b1 <- decomposition.hierarq.anova(dadosANOVA.d1.b1, 1, 1)
decomp.d1.b2 <- decomposition.hierarq.anova(dadosANOVA.d1.b2, 1, 2)
decomp.d1.b3 <- decomposition.hierarq.anova(dadosANOVA.d1.b3, 1, 3)
decomp.d1.b4 <- decomposition.hierarq.anova(dadosANOVA.d1.b4, 1, 4)
decomp.d1.b5 <- decomposition.hierarq.anova(dadosANOVA.d1.b5, 1, 5)
decomp.d1.b6 <- decomposition.hierarq.anova(dadosANOVA.d1.b6, 1, 6)
decomp.d2.b1 <- decomposition.hierarq.anova(dadosANOVA.d2.b1, 2, 1)
decomp.d2.b2 <- decomposition.hierarq.anova(dadosANOVA.d2.b2, 2, 2)
decomp.d2.b3 <- decomposition.hierarq.anova(dadosANOVA.d2.b3, 2, 3)
decomp.d2.b4 <- decomposition.hierarq.anova(dadosANOVA.d2.b4, 2, 4)
decomp.d2.b5 <- decomposition.hierarq.anova(dadosANOVA.d2.b5, 2, 5)
decomp.d2.b6 <- decomposition.hierarq.anova(dadosANOVA.d2.b6, 2, 6)
decomp.d3.b1 <- decomposition.hierarq.anova(dadosANOVA.d3.b1, 3, 1)
decomp.d3.b2 <- decomposition.hierarq.anova(dadosANOVA.d3.b2, 3, 2)
decomp.d3.b3 <- decomposition.hierarq.anova(dadosANOVA.d3.b3, 3, 3)
decomp.d3.b4 <- decomposition.hierarq.anova(dadosANOVA.d3.b4, 3, 4)
decomp.d3.b5 <- decomposition.hierarq.anova(dadosANOVA.d3.b5, 3, 5)
decomp.d3.b6 <- decomposition.hierarq.anova(dadosANOVA.d3.b6, 3, 6)
decomp.d4.b1 <- decomposition.hierarq.anova(dadosANOVA.d4.b1, 4, 1)
decomp.d4.b2 <- decomposition.hierarq.anova(dadosANOVA.d4.b2, 4, 2)
decomp.d4.b3 <- decomposition.hierarq.anova(dadosANOVA.d4.b3, 4, 3)
decomp.d4.b4 <- decomposition.hierarq.anova(dadosANOVA.d4.b4, 4, 4)
decomp.d4.b5 <- decomposition.hierarq.anova(dadosANOVA.d4.b5, 4, 5)
decomp.d4.b6 <- decomposition.hierarq.anova(dadosANOVA.d4.b6, 4, 6)
decomp.d5.b1 <- decomposition.hierarq.anova(dadosANOVA.d5.b1, 5, 1)
decomp.d5.b2 <- decomposition.hierarq.anova(dadosANOVA.d5.b2, 5, 2)
decomp.d5.b3 <- decomposition.hierarq.anova(dadosANOVA.d5.b3, 5, 3)
decomp.d5.b4 <- decomposition.hierarq.anova(dadosANOVA.d5.b4, 5, 4)
decomp.d5.b5 <- decomposition.hierarq.anova(dadosANOVA.d5.b5, 5, 5)
decomp.d5.b6 <- decomposition.hierarq.anova(dadosANOVA.d5.b6, 5, 6)
decomp.d6.b1 <- decomposition.hierarq.anova(dadosANOVA.d6.b1, 6, 1)
decomp.d6.b2 <- decomposition.hierarq.anova(dadosANOVA.d6.b2, 6, 2)
decomp.d6.b3 <- decomposition.hierarq.anova(dadosANOVA.d6.b3, 6, 3)
decomp.d6.b4 <- decomposition.hierarq.anova(dadosANOVA.d6.b4, 6, 4)
decomp.d6.b5 <- decomposition.hierarq.anova(dadosANOVA.d6.b5, 6, 5)
decomp.d6.b6 <- decomposition.hierarq.anova(dadosANOVA.d6.b6, 6, 6)
listaANOVA <- list(decomp.d1.b1, decomp.d1.b2, decomp.d1.b3, decomp.d1.b4, decomp.d1.b5, decomp.d1.b6,
decomp.d2.b1, decomp.d2.b2, decomp.d2.b3, decomp.d2.b4, decomp.d2.b5, decomp.d2.b6,
decomp.d3.b1, decomp.d3.b2, decomp.d3.b3, decomp.d3.b4, decomp.d3.b5, decomp.d3.b6,
decomp.d4.b1, decomp.d4.b2, decomp.d4.b3, decomp.d4.b4, decomp.d4.b5, decomp.d4.b6,
decomp.d5.b1, decomp.d5.b2, decomp.d5.b3, decomp.d5.b4, decomp.d5.b5, decomp.d5.b6,
decomp.d6.b1, decomp.d6.b2, decomp.d6.b3, decomp.d6.b4, decomp.d6.b5, decomp.d6.b6)
df<-data.frame(z=runif(10),x=as.factor(c(rep("a",5), rep("b",5))),y=as.factor(c(rep("A",2), rep("B",2),rep("C",2),rep("D",4))) )
j1 <- decomposition.hierarq.anova(df,1,1)
j2 <- decomposition.hierarq.anova(df,2,2)
j3 <- decomposition.hierarq.anova(df,2,3)
j <- list(j1,j2,j3)
names(j) <- c("a", "b", "c")
#------------------------------------------------------------------
# OLD VERSION WITH 1 FACTOR ONLY
#df$x is the factor (parcel) and df$y is the reflectance
decomposition.one.way.anova <- function(df, data, banda)
{
means.parcels <- c()
n.pixels <- c()
great.mean <- mean(df$reflectancias,na.rm=TRUE)
for (lev in levels(df$parcela))
{
means.parcels <- c(means.parcels,mean(df$reflectancias[df$parcela==lev],na.rm=TRUE))
n.pixels <- c(n.pixels,length(df$reflectancias[!is.na(df$parcela) & df$parcela==lev]))
}
sqf <- sum(n.pixels*(means.parcels-great.mean)^2)
sqt <- var(df$reflectancias)*(length(df$reflectancias[!is.na(df$reflectancias)])-1)
qmf <- sqf/(nlevels(df$parcela)-1)
qmre <- (sqt-sqf)/(length(df$reflectancias[!is.na(df$reflectancias)])-nlevels(df$parcela))
#print(paste("QMF=",qmf))
#print(paste("QMRE=",qmre))
#print(paste("F=",qmf/qmre))
return(c(data, banda, qmf, qmre))
}
decomp.d1.b1 <- decomposition.one.way.anova(dadosANOVA.d1.b1, 1, 1)
decomp.d1.b2 <- decomposition.one.way.anova(dadosANOVA.d1.b2, 1, 2)
decomp.d1.b3 <- decomposition.one.way.anova(dadosANOVA.d1.b3, 1, 3)
decomp.d1.b4 <- decomposition.one.way.anova(dadosANOVA.d1.b4, 1, 4)
decomp.d1.b5 <- decomposition.one.way.anova(dadosANOVA.d1.b5, 1, 5)
decomp.d1.b6 <- decomposition.one.way.anova(dadosANOVA.d1.b6, 1, 6)
decomp.d2.b1 <- decomposition.one.way.anova(dadosANOVA.d2.b1, 2, 1)
decomp.d2.b2 <- decomposition.one.way.anova(dadosANOVA.d2.b2, 2, 2)
decomp.d2.b3 <- decomposition.one.way.anova(dadosANOVA.d2.b3, 2, 3)
decomp.d2.b4 <- decomposition.one.way.anova(dadosANOVA.d2.b4, 2, 4)
decomp.d2.b5 <- decomposition.one.way.anova(dadosANOVA.d2.b5, 2, 5)
decomp.d2.b6 <- decomposition.one.way.anova(dadosANOVA.d2.b6, 2, 6)
decomp.d3.b1 <- decomposition.one.way.anova(dadosANOVA.d3.b1, 3, 1)
decomp.d3.b2 <- decomposition.one.way.anova(dadosANOVA.d3.b2, 3, 2)
decomp.d3.b3 <- decomposition.one.way.anova(dadosANOVA.d3.b3, 3, 3)
decomp.d3.b4 <- decomposition.one.way.anova(dadosANOVA.d3.b4, 3, 4)
decomp.d3.b5 <- decomposition.one.way.anova(dadosANOVA.d3.b5, 3, 5)
decomp.d3.b6 <- decomposition.one.way.anova(dadosANOVA.d3.b6, 3, 6)
decomp.d4.b1 <- decomposition.one.way.anova(dadosANOVA.d4.b1, 4, 1)
decomp.d4.b2 <- decomposition.one.way.anova(dadosANOVA.d4.b2, 4, 2)
decomp.d4.b3 <- decomposition.one.way.anova(dadosANOVA.d4.b3, 4, 3)
decomp.d4.b4 <- decomposition.one.way.anova(dadosANOVA.d4.b4, 4, 4)
decomp.d4.b5 <- decomposition.one.way.anova(dadosANOVA.d4.b5, 4, 5)
decomp.d4.b6 <- decomposition.one.way.anova(dadosANOVA.d4.b6, 4, 6)
decomp.d5.b1 <- decomposition.one.way.anova(dadosANOVA.d5.b1, 5, 1)
decomp.d5.b2 <- decomposition.one.way.anova(dadosANOVA.d5.b2, 5, 2)
decomp.d5.b3 <- decomposition.one.way.anova(dadosANOVA.d5.b3, 5, 3)
decomp.d5.b4 <- decomposition.one.way.anova(dadosANOVA.d5.b4, 5, 4)
decomp.d5.b5 <- decomposition.one.way.anova(dadosANOVA.d5.b5, 5, 5)
decomp.d5.b6 <- decomposition.one.way.anova(dadosANOVA.d5.b6, 5, 6)
decomp.d6.b1 <- decomposition.one.way.anova(dadosANOVA.d6.b1, 6, 1)
decomp.d6.b2 <- decomposition.one.way.anova(dadosANOVA.d6.b2, 6, 2)
decomp.d6.b3 <- decomposition.one.way.anova(dadosANOVA.d6.b3, 6, 3)
decomp.d6.b4 <- decomposition.one.way.anova(dadosANOVA.d6.b4, 6, 4)
decomp.d6.b5 <- decomposition.one.way.anova(dadosANOVA.d6.b5, 6, 5)
decomp.d6.b6 <- decomposition.one.way.anova(dadosANOVA.d6.b6, 6, 6)
resultado <- rbind(decomp.d1.b1, decomp.d1.b2, decomp.d1.b3, decomp.d1.b4, decomp.d1.b5, decomp.d1.b6,
decomp.d2.b1, decomp.d2.b2, decomp.d2.b3, decomp.d2.b4, decomp.d2.b5, decomp.d2.b6,
decomp.d3.b1, decomp.d3.b2, decomp.d3.b3, decomp.d3.b4, decomp.d3.b5, decomp.d3.b6,
decomp.d4.b1, decomp.d4.b2, decomp.d4.b3, decomp.d4.b4, decomp.d4.b5, decomp.d4.b6,
decomp.d5.b1, decomp.d5.b2, decomp.d5.b3, decomp.d5.b4, decomp.d5.b5, decomp.d5.b6,
decomp.d6.b1, decomp.d6.b2, decomp.d6.b3, decomp.d6.b4, decomp.d6.b5, decomp.d6.b6)
resultado <- cbind(resultado, resultado[,3]/resultado[,4], sqrt(resultado[,4]))
colnames(resultado) <- c("Data", "Banda", "QMF", "QMRE", "F", "RMSE")
#For latex output
round(resultado[1:6,5:6][,"F"], 1)
res.d1 <- paste0(round(resultado[1:6,5:6][,'F'], 1), ", ", round(resultado[1:6,5:6][,'RMSE'], 3))
res.d2 <- paste0(round(resultado[7:12,5:6][,'F'], 1), ", ", round(resultado[7:12,5:6][,'RMSE'], 3))
res.d3 <- paste0(round(resultado[13:18,5:6][,'F'], 1), ", ", round(resultado[13:18,5:6][,'RMSE'], 3))
res.d4 <- paste0(round(resultado[19:24,5:6][,'F'], 1), ", ", round(resultado[19:24,5:6][,'RMSE'], 3))
res.d5 <- paste0(round(resultado[25:30,5:6][,'F'], 1), ", ", round(resultado[25:30,5:6][,'RMSE'], 3))
res.d6 <- paste0(round(resultado[31:36,5:6][,'F'], 1), ", ", round(resultado[31:36,5:6][,'RMSE'], 3))
res.final <- cbind(res.d1, res.d2, res.d3, res.d4, res.d5, res.d6)
res.final.xtab <- xtable(res.final)
align(res.final.xtab) <- rep("l", 6)
print.xtable(res.final.xtab, booktabs=T, include.rownames=F)
save(resultado, file="resultadoDecompANOVA.object")
RMSE <- sqrt()
rm(dadosANOVA.d6.b1)
rm(dadosANOVA.d6.b2)
rm(dadosANOVA.d6.b3)
rm(dadosANOVA.d6.b4)
rm(dadosANOVA.d6.b5)
rm(dadosANOVA.d6.b6)
#%%%%%%%%%%%%%%%%%%%%%%%%%%
# APPROACH 3)
MEGAdadosANOVA <- constroiDadosANOVA(data=5, banda=4, dimAmostra=11582)
somaDeQuadrados(MEGAdadosANOVA)
somaDeQuadrados(dadosANOVA)
#%%%%%%%%%%%%%%%%%%%%%%%%%%
# APPROACH 4)
# Test if the crop affects combinations of image/date and NDVIs
FValues <- c()
pValues <- c()
efeito <- c()
for(i in 7:length(listaTodasParcelas))
{
parcelas.aov <- aov(listaTodasParcelas[,i] ~ as.factor(listaTodasParcelas$cultura))
FV <- summary(parcelas.aov)[[1]][["F value"]][[1]]
pV <- summary(parcelas.aov)[[1]][["Pr(>F)"]][[1]]
FValues <- c(FValues,FV)
pValues <- c(pValues,pV)
if(pV <= 0.05) ef <- 1 else ef <- 0
efeito <- c(efeito,ef)
}
nn <- colnames(listaTodasParcelas[,7:length(listaTodasParcelas)])
resultadoANOVA <- data.frame(cbind(nn,FValues,pValues,efeito))
resultadoANOVA$FValues <- as.numeric(as.character(resultadoANOVA$FValues))
resultadoANOVA <- resultadoANOVA[order(-resultadoANOVA$FValues),]; resultadoANOVA
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# 4. VARIABLE SELECTION ####
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# 4.1 WHOLE DATASET
# Prparing data for the classifier in the next step. Only spectral signatures for the first 6 dates
dadosClassificadores <- listaTodasParcelas[,c(5,7:42)]
dadosClassificadores$cultura <- as.factor(dadosClassificadores$cultura)
dadosClassificadores[,-1][dadosClassificadores[,-1] > 1] <- 1
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# 4.2 PARTIAL DATASET (DATASET 2) (This is where variable selection happens)
# Using trim.matrix from subselect package
dadosTrim <- dadosClassificadores
criterioExclusaoVars <- 0.02
classTrim <- trim.matrix(cor(dadosTrim[,-1]), criterioExclusaoVars);classTrim
classTrim$names.discarded == classTrimOLD$names.discarded
varsRetirar <- classTrim$numbers.discarded+1 #+1 because in the original data frame, col 1 is for the crop
varsSobram <- 1:length(dadosTrim)
varsSobram <- varsSobram[! varsSobram %in% varsRetirar]
# Preparing DATASET 2 (after variable selection)
dadosClassificadoresSub <- dadosTrim[,varsSobram]
dadosClassificadoresSub$cultura <- as.factor(dadosClassificadoresSub$cultura)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# 5. 6. 7. ESCOLHA/TREINO/VALIDACAO DOS CLASSIFICADORES ####
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# 5.1 KNN
#%%%%%%%%%%%%%%%%%%%%%%%%
# DATASET 1
# Tuning
resTune.comp.knn <- matrix(nrow=21, ncol=0)
for(i in 1:10)
{
print(i)
KNN.tune.comp <- tune.knn(dadosClassificadores[,-1], dadosClassificadores[,1], k=1:20)
k <- KNN.tune.comp[1][[1]][1,1]
erros <- KNN.tune.comp$performances$error
resTune.comp.knn <- cbind(resTune.comp.knn, c(k, erros))
}
# RESULT: use k=7, because it produces the lowest error
# Cross validation
KNN.comp.cruz <- validacaoCruzada(n = 10, tipo = 'KNN', dados = dadosClassificadores, lambda = 0.8, k = 7)
KNN.comp.cruz$result$correcTot
#%%%%%%%%%%%%%%%%%%%%%%%%
#DATASET 2
# Tuning
resTune.sub.knn <- matrix(nrow=21, ncol=0)
for(i in 1:10)
{
print(i)
KNN.tune.sub <- tune.knn(dadosClassificadoresSub[,-1], dadosClassificadoresSub[,1], k=1:20)
k <- KNN.tune.sub[1][[1]][1,1]
erros <- KNN.tune.sub$performances$error
resTune.sub.knn <- cbind(resTune.sub.knn, c(k, erros))
}
# RESULT: use k=7, because it produces the lowest error
# Cross validation
KNN.sub.cruz <- validacaoCruzada(n = 10, tipo = 'KNN', dados = dadosClassificadoresSub, lambda = 0.8, k = 7)
KNN.sub.cruz$result$correcTot
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# 5.2 SVM
#%%%%%%%%%%%%%%%%%%%%%%%%
# DATASET 1
# Tuning
rGamma <- c(0.4, 0.6, 0.8, 1, 1.2, 1.4)
rCost <- c(0.5, 1, 1.5, 2, 2.5, 3)
resTune.comp.v1 <- matrix(ncol = length(rCost), nrow = length(rGamma))
for(i in 1:length(rGamma))
for(j in 1:length(rCost))
{
print(paste0("i = ", i, " and j = ", j))
res <- SVM.tune.comp <- tune.svm(dadosClassificadores[,-1], dadosClassificadores[,1], gamma = rGamma[i], cost = rCost[j])
resTune.comp.v1[i,j] <- res$best.performance
}
colnames(resTune.comp.v1) <- rCost
rownames(resTune.comp.v1) <- rGamma
min(resTune.comp.v1)
# Cross validation
SVM.comp.cruz <- validacaoCruzada(n = 10, tipo = 'SVM', dados = dadosClassificadores, lambda = 0.8, gamma = 0.4, cost = 1.5)
SVM.comp.cruz$result$correcTot
#%%%%%%%%%%%%%%%%%%%%%%%%
#DATASET 2
# Tuning
rGamma <- c(0.4, 0.6, 0.8, 1, 1.2, 1.4)
rCost <- c(0.5, 1, 1.5, 2, 2.5, 3)
resTune.sub.v1 <- matrix(ncol = length(rCost), nrow = length(rGamma))
for(i in 1:length(rGamma))
for(j in 1:length(rCost))
{
print(paste0("i = ", i, " and j = ", j))
res <- SVM.tune.sub <- tune.svm(dadosClassificadoresSub[,-1], dadosClassificadoresSub[,1], gamma = rGamma[i], cost = rCost[j])
resTune.sub.v1[i,j] <- res$best.performance
}
colnames(resTune.sub.v1) <- rCost
rownames(resTune.sub.v1) <- rGamma
min(resTune.sub.v1)
# Cross validation
SVM.sub.cruz <- validacaoCruzada(n = 10, tipo = 'SVM', dados = dadosClassificadoresSub, lambda = 0.8, gamma = 0.4, cost = 2.5)
#%%%%%%%%%%%%%%%%%%%%%%%%
# METHOD CALIBRATION
# q_j estimation. Follow the estimaQ function in functions.R
SVM.sub.qi <- estimaQ(classificador = SVM.sub.cruz$classificador, lambdas = c(0.6, 0.7, 0.8, 0.9, 0.95))
# CALIBRATION ASSESSMENT
# Follow the assessmentMetodo function in functions.R
SVM.sub.assess <- assessmentMetodo(classificador = SVM.sub.cruz$classificador, qis = SVM.sub.qi)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Final data structures: output for paper
#%%%%%%%%%%%%%%%%%%%%%%%%
# Data.frame containing all parcels and data from calibration mehtod
comp1 <- SVM.sub.analise$resultClass
comp1$parcela <- as.character(comp1$parcela)
comp1 <- comp1[order(comp1$parcela, decreasing = TRUE),]
comp2 <- listaTodasParcelas
comp2$id <- as.character(comp2$id)
comp2 <- comp2[order(comp2$id, decreasing = TRUE),]
igual <- comp1$verdade == comp1$class1
igual[igual == TRUE] <- 'Igual'
igual[igual == FALSE] <- 'Diferente'
qj <- c()
for(i in 1:nrow(comp1)) qj[i] <- SVM.sub.analise$l0.8$qi[comp1$class1[i]]
resultado <- comp1$prob1 >= qj
resultado[resultado == TRUE] <- 'Aceite'
resultado[resultado == FALSE] <- 'Rejeitado'
dadosFinais <- cbind(comp1, igual, qj, resultado, area=comp2$area)
head(dadosFinais)
|
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
coal <- grepl("coal", SCC$SCC.Level.Four, ignore.case=TRUE)
combustion <- grepl("comb", SCC$SCC.Level.One, ignore.case=TRUE)
coal_combustion <- (combustion & coal)
SCC_combustion <- SCC[coal_combustion,]$SCC
NEI_combustion <- NEI[NEI$SCC %in% SCC_combustion,]
png("plot4.png",width=480,height=480,units="px")
ggpic <- ggplot(NEI_combustion,aes(factor(year),Emissions/10^5)) +
geom_bar(stat="identity",fill="blue",width=0.5) +
theme_bw() + guides(fill=TRUE) +
labs(x="year", y=expression("Total Emissions, PM"[2.5])) +
labs(title=expression("Emissions from Coal Combustion for the US")
)
print(ggpic)
dev.off()
|
/plot4.R
|
no_license
|
wyassue/exploratory-data-analysis-week4
|
R
| false
| false
| 730
|
r
|
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
coal <- grepl("coal", SCC$SCC.Level.Four, ignore.case=TRUE)
combustion <- grepl("comb", SCC$SCC.Level.One, ignore.case=TRUE)
coal_combustion <- (combustion & coal)
SCC_combustion <- SCC[coal_combustion,]$SCC
NEI_combustion <- NEI[NEI$SCC %in% SCC_combustion,]
png("plot4.png",width=480,height=480,units="px")
ggpic <- ggplot(NEI_combustion,aes(factor(year),Emissions/10^5)) +
geom_bar(stat="identity",fill="blue",width=0.5) +
theme_bw() + guides(fill=TRUE) +
labs(x="year", y=expression("Total Emissions, PM"[2.5])) +
labs(title=expression("Emissions from Coal Combustion for the US")
)
print(ggpic)
dev.off()
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/plotLon.R
\docType{methods}
\name{plotLon,Movement-method}
\alias{plotLon,Movement-method}
\title{Longitude plot for a Movement reference class object}
\usage{
\S4method{plotLon}{Movement}(object, plotArgs = list(), args = list(),
add = FALSE, sdArgs = list(col = "grey", border = NA), sd = TRUE, ...)
}
\arguments{
\item{object}{Movement reference class object}
\item{plotArgs}{Arguments to setup background plot}
\item{args}{Arguments for plotting longitude movement data.}
\item{add}{If FALSE a new plot window is created.}
\item{sdArgs}{Arguments for plotting standard errors.}
\item{sd}{Should standard errors be plotted?}
\item{...}{additional arguments}
}
\description{
Longitude plot for a Movement reference class object
}
\author{
Christoffer Moesgaard Albertsen
}
\seealso{
\code{\link{plotLon}}, \code{\link{plotLon,Animal-method}}, \code{\link{plotLon,Observation-method}}
}
|
/man/plotLon-Movement-method.Rd
|
no_license
|
dsjohnson/argosTrack
|
R
| false
| false
| 983
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/plotLon.R
\docType{methods}
\name{plotLon,Movement-method}
\alias{plotLon,Movement-method}
\title{Longitude plot for a Movement reference class object}
\usage{
\S4method{plotLon}{Movement}(object, plotArgs = list(), args = list(),
add = FALSE, sdArgs = list(col = "grey", border = NA), sd = TRUE, ...)
}
\arguments{
\item{object}{Movement reference class object}
\item{plotArgs}{Arguments to setup background plot}
\item{args}{Arguments for plotting longitude movement data.}
\item{add}{If FALSE a new plot window is created.}
\item{sdArgs}{Arguments for plotting standard errors.}
\item{sd}{Should standard errors be plotted?}
\item{...}{additional arguments}
}
\description{
Longitude plot for a Movement reference class object
}
\author{
Christoffer Moesgaard Albertsen
}
\seealso{
\code{\link{plotLon}}, \code{\link{plotLon,Animal-method}}, \code{\link{plotLon,Observation-method}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GetSlopes.R
\name{getSlopes}
\alias{getSlopes}
\title{Estimate gene specific count-depth relationships}
\usage{
getSlopes(
Data,
SeqDepth = 0,
Tau = 0.5,
FilterCellNum = 10,
ditherCounts = FALSE
)
}
\arguments{
\item{Data}{matrix of un-normalized expression counts. Rows are genes and
columns are samples.}
\item{SeqDepth}{vector of sequencing depths estimated as columns sums of
un-normalized expression matrix.}
\item{Tau}{value of quantile for the quantile regression used to estimate
gene-specific slopes (default is median, Tau = .5 ).}
\item{FilterCellNum}{the number of non-zero expression estimate required to
include the genes into the SCnorm fitting (default = 10). The initial}
\item{ditherCounts}{whether to dither/jitter the counts, may be used for data
with many ties, default is FALSE.}
}
\value{
vector of estimated slopes.
}
\description{
This is the gene-specific fitting function, where a median
(Tau = .5) quantile regression is fit for each gene. Only genes having at
least 10 non-zero expression values are considered.
}
\examples{
data(ExampleSimSCData)
myslopes <- getSlopes(ExampleSimSCData)
}
\author{
Rhonda Bacher
}
|
/man/getSlopes.Rd
|
no_license
|
rhondabacher/SCnorm
|
R
| false
| true
| 1,245
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GetSlopes.R
\name{getSlopes}
\alias{getSlopes}
\title{Estimate gene specific count-depth relationships}
\usage{
getSlopes(
Data,
SeqDepth = 0,
Tau = 0.5,
FilterCellNum = 10,
ditherCounts = FALSE
)
}
\arguments{
\item{Data}{matrix of un-normalized expression counts. Rows are genes and
columns are samples.}
\item{SeqDepth}{vector of sequencing depths estimated as columns sums of
un-normalized expression matrix.}
\item{Tau}{value of quantile for the quantile regression used to estimate
gene-specific slopes (default is median, Tau = .5 ).}
\item{FilterCellNum}{the number of non-zero expression estimate required to
include the genes into the SCnorm fitting (default = 10). The initial}
\item{ditherCounts}{whether to dither/jitter the counts, may be used for data
with many ties, default is FALSE.}
}
\value{
vector of estimated slopes.
}
\description{
This is the gene-specific fitting function, where a median
(Tau = .5) quantile regression is fit for each gene. Only genes having at
least 10 non-zero expression values are considered.
}
\examples{
data(ExampleSimSCData)
myslopes <- getSlopes(ExampleSimSCData)
}
\author{
Rhonda Bacher
}
|
brownian.motion.variance <-
function(n.locs, time.lag, location.error, x, y, max.lag){
#Creating NULL vectors to store data
T.jump <- alpha <- ztz <- ob <- loc.error.1 <- loc.error.2 <- NULL
i <- 2
while(i < n.locs){
if((time.lag[i]+time.lag[i-1])/2 >= max.lag) {
i = i + 1
} else {
ob <- c(ob, i)
t <- time.lag[i]+time.lag[i-1]
T.jump <- c(T.jump, t)
a <- time.lag[i] / t
alpha <- c(alpha, a)
u <- c(x[i-1], y[i-1]) + a*(c(x[i+1], y[i+1]) - c(x[i-1], y[i-1]))
ztz <- c(ztz, (c(x[i], y[i]) - u)%*%(c(x[i], y[i]) - u))
loc.error.1 <- c(loc.error.1, location.error[i-1])
loc.error.2 <- c(loc.error.2, location.error[i+1])
i <- i + 2
}
}
#Likelihood function for Brownian Motion variance estimation
likelihood <- function(var){
v <- T.jump*alpha*(1-alpha)*var + ((1-alpha)^2)*(loc.error.1^2) +
(alpha^2)*(loc.error.2^2)
l <- (1/(2*pi*v))*exp(-ztz/(2*v))
-sum(log(l), na.rm=TRUE)
}
BMvar <- optimize(likelihood, lower=1, upper=1000000)$minimum
return(BMvar)
}
|
/R/brownian.motion.variance.R
|
no_license
|
cran/BBMM
|
R
| false
| false
| 1,115
|
r
|
brownian.motion.variance <-
function(n.locs, time.lag, location.error, x, y, max.lag){
#Creating NULL vectors to store data
T.jump <- alpha <- ztz <- ob <- loc.error.1 <- loc.error.2 <- NULL
i <- 2
while(i < n.locs){
if((time.lag[i]+time.lag[i-1])/2 >= max.lag) {
i = i + 1
} else {
ob <- c(ob, i)
t <- time.lag[i]+time.lag[i-1]
T.jump <- c(T.jump, t)
a <- time.lag[i] / t
alpha <- c(alpha, a)
u <- c(x[i-1], y[i-1]) + a*(c(x[i+1], y[i+1]) - c(x[i-1], y[i-1]))
ztz <- c(ztz, (c(x[i], y[i]) - u)%*%(c(x[i], y[i]) - u))
loc.error.1 <- c(loc.error.1, location.error[i-1])
loc.error.2 <- c(loc.error.2, location.error[i+1])
i <- i + 2
}
}
#Likelihood function for Brownian Motion variance estimation
likelihood <- function(var){
v <- T.jump*alpha*(1-alpha)*var + ((1-alpha)^2)*(loc.error.1^2) +
(alpha^2)*(loc.error.2^2)
l <- (1/(2*pi*v))*exp(-ztz/(2*v))
-sum(log(l), na.rm=TRUE)
}
BMvar <- optimize(likelihood, lower=1, upper=1000000)$minimum
return(BMvar)
}
|
## library(exactRankTests)
setwd('/gpfs/group/su/lhgioia/map/')
## allen.dat <- read.table('data/allen/tpm_matrix.csv',sep=',',header=T)
## var.genes <- readRDS('results/allen/variable_genes_log_4k.RDS')
## allen.dat <- allen.dat[,var.genes]
## saveRDS(allen.dat,'data/allen/allen_tpm_variable_genes_log_4k.RDS')
allen.pca <- readRDS('results/allen/pca/allen_50_dim_4k_filtered_irlba.RDS')
tpm.dat <- read.table('data/allen/tpm_matrix.csv',sep=',',header=T)
gene.vars <- apply(tpm.dat,2,var)
tpm.dat <- tpm.dat[,gene.vars>0]
louvain.dat <- readRDS('results/allen/clustering/louvain_pca_filtered_4k_k30.RDS')
cluster.vec <- louvain.dat$membership
names(cluster.vec) <- rownames(allen.pca)
cluster.vec <- cluster.vec[rownames(tpm.dat)]
## pval.mat <- matrix(0,nrow=ncol(tpm.dat),ncol=length(unique(cluster.vec)))
pval.res <- list()
for(i in unique(cluster.vec)){
group.vec <- rep('Outside',nrow(tpm.dat))
group.vec[cluster.vec==i] <- 'Inside'
pct.express.out <- round(apply(tpm.dat[group.vec=='Outside',,drop=F],2,function(x) sum(x>0) / length(x)),digits=3)
pct.express.in <- round(apply(tpm.dat[group.vec=='Inside',,drop=F],2,function(x) sum(x>0) / length(x)),digits=3) # fraction of cells expressing genes
pct.df <- data.frame(pct.express.out,pct.express.in)
pct.max <- apply(pct.df,1,max)
names(pct.max) <- rownames(pct.df)
genes.use <- names(which(x=pct.max > 0.1))
if(length(genes.use) == 0){
print(sprintf('No genes pass min.pct threshold for cluster %d',i))
next
}
out.dat <- apply(tpm.dat[group.vec=='Outside',,drop=F],2,function(x) log(mean(expm1(x)+1)))
in.dat <- apply(tpm.dat[group.vec=='Inside',,drop=F],2,function(x) log(mean(expm1(x)+1)))
total.diff <- out.dat-in.dat
genes.diff <- names(which(abs(total.diff) > 0.25))
genes.use <- intersect(genes.use,genes.diff)
if(length(genes.use) == 0){
print(sprintf('No genes pass logfc.threshold for cluster %d',i))
next
}
p.val <- tryCatch(
{
## sapply(1:ncol(counts.mat),function(x) {return(wilcox.test(counts.mat[,x] ~ as.factor(group.vec))$p.value)})
apply(tpm.dat[,genes.use,drop=F],2,function(x) {test = wilcox.test(x ~ as.factor(group.vec)); return(c(test$p.value,test$statistic))})
},
error = function(cond) {
print(i)
print(unique(group.vec))
message(cond)
message(sprintf('Only %d entries for cluster %d out of %d cells',sum(cluster.vec==i),i,nrow(tpm.dat)))
break
})
## cluster.markers[[i]] <- data.frame(p.val,row.names=colnames(counts.mat))
names(p.val) <- genes.use
pval.res[[i]] <- p.val
print(sprintf('Completed cluster %d',i))
saveRDS(pval.res,'results/allen/markers/louvain_wilcox_markers.RDS')
}
## saveRDS(cluster.markers,'results/allen/markers/kmeans_wilcox_markers.RDS')
## saveRDS(pval.mat,'results/allen/markers/louvain_wilcox_markers.RDS')
##
## clusters <- clusters[rownames(allen.dat)]
##
## cluster.dat <- list()
##
## for(i in 1:length(unique(clusters))){
##
## group.vec <- rep('Outside',nrow(allen.dat))
## group.vec[clusters==i] <- 'Inside'
##
## p.val <- tryCatch(
## {
## sapply(1:ncol(allen.dat),function(x) {return(wilcox.test(allen.dat[,x] ~ as.factor(group.vec))$p.value)})
##
## },
## error = function(cond) {
## message(cond)
## message(sprintf('Only %d entries for cluster %d out of %d cells',sum(group.vec[clusters==i]),i,nrow(allen.dat)))
##
## break
## })
##
## cluster.dat[[i]] <- data.frame(p.val,row.names=colnames(allen.dat))
## }
##
## saveRDS(cluster.dat,'results/allen/markers/kmeans_8_wilcox.RDS')
|
/src/allen/markers/wilcox_louvain_markers.R
|
no_license
|
SuLab/expression_map
|
R
| false
| false
| 3,744
|
r
|
## library(exactRankTests)
setwd('/gpfs/group/su/lhgioia/map/')
## allen.dat <- read.table('data/allen/tpm_matrix.csv',sep=',',header=T)
## var.genes <- readRDS('results/allen/variable_genes_log_4k.RDS')
## allen.dat <- allen.dat[,var.genes]
## saveRDS(allen.dat,'data/allen/allen_tpm_variable_genes_log_4k.RDS')
allen.pca <- readRDS('results/allen/pca/allen_50_dim_4k_filtered_irlba.RDS')
tpm.dat <- read.table('data/allen/tpm_matrix.csv',sep=',',header=T)
gene.vars <- apply(tpm.dat,2,var)
tpm.dat <- tpm.dat[,gene.vars>0]
louvain.dat <- readRDS('results/allen/clustering/louvain_pca_filtered_4k_k30.RDS')
cluster.vec <- louvain.dat$membership
names(cluster.vec) <- rownames(allen.pca)
cluster.vec <- cluster.vec[rownames(tpm.dat)]
## pval.mat <- matrix(0,nrow=ncol(tpm.dat),ncol=length(unique(cluster.vec)))
pval.res <- list()
for(i in unique(cluster.vec)){
group.vec <- rep('Outside',nrow(tpm.dat))
group.vec[cluster.vec==i] <- 'Inside'
pct.express.out <- round(apply(tpm.dat[group.vec=='Outside',,drop=F],2,function(x) sum(x>0) / length(x)),digits=3)
pct.express.in <- round(apply(tpm.dat[group.vec=='Inside',,drop=F],2,function(x) sum(x>0) / length(x)),digits=3) # fraction of cells expressing genes
pct.df <- data.frame(pct.express.out,pct.express.in)
pct.max <- apply(pct.df,1,max)
names(pct.max) <- rownames(pct.df)
genes.use <- names(which(x=pct.max > 0.1))
if(length(genes.use) == 0){
print(sprintf('No genes pass min.pct threshold for cluster %d',i))
next
}
out.dat <- apply(tpm.dat[group.vec=='Outside',,drop=F],2,function(x) log(mean(expm1(x)+1)))
in.dat <- apply(tpm.dat[group.vec=='Inside',,drop=F],2,function(x) log(mean(expm1(x)+1)))
total.diff <- out.dat-in.dat
genes.diff <- names(which(abs(total.diff) > 0.25))
genes.use <- intersect(genes.use,genes.diff)
if(length(genes.use) == 0){
print(sprintf('No genes pass logfc.threshold for cluster %d',i))
next
}
p.val <- tryCatch(
{
## sapply(1:ncol(counts.mat),function(x) {return(wilcox.test(counts.mat[,x] ~ as.factor(group.vec))$p.value)})
apply(tpm.dat[,genes.use,drop=F],2,function(x) {test = wilcox.test(x ~ as.factor(group.vec)); return(c(test$p.value,test$statistic))})
},
error = function(cond) {
print(i)
print(unique(group.vec))
message(cond)
message(sprintf('Only %d entries for cluster %d out of %d cells',sum(cluster.vec==i),i,nrow(tpm.dat)))
break
})
## cluster.markers[[i]] <- data.frame(p.val,row.names=colnames(counts.mat))
names(p.val) <- genes.use
pval.res[[i]] <- p.val
print(sprintf('Completed cluster %d',i))
saveRDS(pval.res,'results/allen/markers/louvain_wilcox_markers.RDS')
}
## saveRDS(cluster.markers,'results/allen/markers/kmeans_wilcox_markers.RDS')
## saveRDS(pval.mat,'results/allen/markers/louvain_wilcox_markers.RDS')
##
## clusters <- clusters[rownames(allen.dat)]
##
## cluster.dat <- list()
##
## for(i in 1:length(unique(clusters))){
##
## group.vec <- rep('Outside',nrow(allen.dat))
## group.vec[clusters==i] <- 'Inside'
##
## p.val <- tryCatch(
## {
## sapply(1:ncol(allen.dat),function(x) {return(wilcox.test(allen.dat[,x] ~ as.factor(group.vec))$p.value)})
##
## },
## error = function(cond) {
## message(cond)
## message(sprintf('Only %d entries for cluster %d out of %d cells',sum(group.vec[clusters==i]),i,nrow(allen.dat)))
##
## break
## })
##
## cluster.dat[[i]] <- data.frame(p.val,row.names=colnames(allen.dat))
## }
##
## saveRDS(cluster.dat,'results/allen/markers/kmeans_8_wilcox.RDS')
|
#' Map BCU Indicator
#'
#' Produce a satellite map for a BCU, along with the values for a supplied indicator.
#'
#' @param bcu Indicator data for a single BCU, from \code{data/bcus_list.rds}
#' @param tile Bing tile for a single BCU, from \code{data/bcus_tiles_indicator.rds}
#' @param indicator Indicator being mapped
#' @param indicator_title Longer-form title for indicator being mapped, used in creating the map title. Defaults to \code{indicator}
#' @param bcu_ext Extension factor of the BCU's bounding box, from \code{data/bcus_ext.rds}
#'
#' @return A \code{tmap} object
#' @export
#'
#' @examples
#' bcu_name <- "Aceh"
#' bcu <- readRDS(here::here("data", "bcus_list.rds"))[[bcu_name]]
#' tile <- readRDS(here::here("data", "bcus_tiles_indicator.rds"))[[bcu_name]]
#' bcu_ext <- readRDS(here::here("data", "bcus_ext.rds"))[[bcu_name]]
#'
#' map_indicator(bcu, tile,
#' indicator = "grav_NC",
#' indicator_title = "Fishing: Market Pressure", bcu_ext
#' )
map_indicator <- function(bcu, tile, indicator = NA, indicator_title = indicator, bcu_ext) {
if (inherits(tile, "list") & all(names(tile) == c("left", "right"))) {
m <- tmap::tm_shape(tile[["left"]],
bbox = square_bbox(bcu, ext = bcu_ext),
projection = sf::st_crs(bcu)
) +
tmap::tm_rgb() +
tmap::tm_shape(tile[["right"]]) +
tmap::tm_rgb()
} else {
m <- tmap::tm_shape(tile) +
tmap::tm_rgb()
}
if (!is.na(indicator)) {
indicator_breaks <- breaks[[indicator]]
breaks_midpoint <- ceiling(length(indicator_breaks) / 2)
breaks_midpoint_value <- indicator_breaks[[breaks_midpoint]]
blank_breaks <- rep("", breaks_midpoint - 2)
legend_labels <- c(min(indicator_breaks), blank_breaks, breaks_midpoint_value, blank_breaks, max(indicator_breaks))
legend_style <- "cont"
m <- m +
tmap::tm_shape(bcu) +
tmap::tm_fill(
col = indicator, palette = RColorBrewer::brewer.pal(length(indicator_breaks), "OrRd"), style = legend_style, breaks = indicator_breaks, title = indicator_title, scale = 0.8, alpha = 1, legend.is.portrait = FALSE, labels = legend_labels, showNA = FALSE
) +
tmap::tm_legend(show = FALSE)
}
m +
tmap::tm_layout(
main.title = indicator_title,
main.title.position = "center",
main.title.size = 0.75,
frame = FALSE,
inner.margins = c(0, 0, 0, 0)
) +
tmap::tmap_options(
output.dpi = 96,
show.messages = FALSE
)
}
legend_titles <- list(
grav_NC = "Market gravity",
pop_count = "Number of individuals",
num_ports = "Number of ports",
reef_value = "USD (thousands)",
sediment = "ton / km2",
nutrient = "ton / km2"
)
#' Generate legend for a BCU indicator map
#'
#' Generate a ggplot2 legend for a BCU indicator map. The ggplot2 legend is more flexible than the tmap legend, so a ggplot2 legend is produced for every map from \code{map_indicator}.
#'
#' @param indicator Indicator being mapped
#'
#' @return A ggplot2 object
#' @export
#'
#' @examples
#' generate_map_legend("grav_NC")
generate_map_legend <- function(indicator) {
legend_breaks <- breaks[[indicator]]
legend_n <- length(legend_breaks)
df <- dplyr::tibble(legend_breaks = legend_breaks) %>%
dplyr::mutate(id = dplyr::row_number())
p <- ggplot2::ggplot(df, ggplot2::aes(x = id, y = legend_breaks, colour = legend_breaks)) +
ggplot2::geom_point() +
ggplot2::guides(col = ggplot2::guide_legend(override.aes = list(shape = 15, size = 5), label.position = "bottom", title.position = "top"))
breaks_midpoint <- ceiling(length(legend_breaks) / 2)
breaks_midpoint_value <- legend_breaks[[breaks_midpoint]]
actual_midpoint_value <- max(legend_breaks) / 2
actual_legend_breaks <- c(min(legend_breaks), actual_midpoint_value, max(legend_breaks))
legend_labels <- c(min(legend_breaks), breaks_midpoint_value, max(legend_breaks))
p <- p +
ggplot2::scale_colour_gradientn(
colours = RColorBrewer::brewer.pal(length(legend_breaks), "OrRd"),
breaks = actual_legend_breaks,
labels = scales::comma(legend_labels, accuracy = 1)
) +
ggplot2::guides(colour = ggplot2::guide_colourbar(ticks = FALSE, title.position = "top"))
p <- p +
ggplot2::labs(colour = legend_titles[[indicator]]) +
ggplot2::theme_minimal(base_size = 9) +
ggplot2::theme(
legend.position = "bottom",
legend.text = ggplot2::element_text(margin = ggplot2::margin(t = -2.5)),
legend.key = ggplot2::element_blank(),
legend.title.align = 0.5
)
l <- ggpubr::get_legend(p)
ggpubr::as_ggplot(l)
}
breaks <- list(
grav_NC = c(0, 10^(-1:6)),
pop_count = c(0, 10^(2:7)),
num_ports = 0:6,
reef_value = c(0, 2.5 * 10^(0:5)),
sediment = c(0, 10^(2:5)),
nutrient = c(0, 10^(-2:5))
)
layout_indicator_maps <- function(bcu, tile_indicator, bcu_ext) {
size <- 600
width_in <- size / 300
height_in <- width_in * 0.3
indicator_with_title <- dplyr::tribble(
~indicator, ~title,
"grav_NC", "Fishing:\nMarket Pressure",
"sediment", "Pollution:\nSedimentation",
"nutrient", "Pollution:\nNitrogen",
"pop_count", "Coastal Development:\nHuman Population",
"num_ports", "Industrial Development:\nPorts",
"reef_value", "Tourism:\nReef Value"
)
tmap_dir <- tempdir()
purrr::walk2(
indicator_with_title[["indicator"]], indicator_with_title[["title"]],
function(indicator, title) {
map_res <- map_indicator(bcu, tile_indicator, indicator, title, bcu_ext)
tmap::tmap_save(map_res, glue::glue("{tmap_dir}/{indicator}.png"), width = size, height = size, dpi = 300)
legend_res <- generate_map_legend(indicator)
ggsave(glue::glue("{tmap_dir}/{indicator}_legend.png"), legend_res, width = width_in, height = height_in, units = "in")
}
)
tmaps <- indicator_with_title[["indicator"]] %>%
purrr::map(~ magick::image_read(glue::glue("{tmap_dir}/{.x}.png")))
names(tmaps) <- indicator_with_title[["indicator"]]
legends <- indicator_with_title[["indicator"]] %>%
purrr::map(~ magick::image_read(glue::glue("{tmap_dir}/{.x}_legend.png")))
names(legends) <- indicator_with_title[["indicator"]]
panel_size <- magick::image_info(tmaps[[1]])[["height"]]
legend_height <- magick::image_info(legends[[1]])[["height"]]
plot_image <- magick::image_blank(width = panel_size * 3, height = panel_size * 2 + legend_height * 2, color = "white")
width_offset <- 0
height_offset <- panel_size
for (i in 1:3) {
plot_image <- magick::image_composite(plot_image, tmaps[[i]], offset = glue::glue("+{width_offset}+0"))
plot_image <- magick::image_composite(plot_image, legends[[i]], offset = glue::glue("+{width_offset}+{height_offset}"))
# Then increment the offset by the width of the panel, so the next panel can use it
width_offset <- width_offset + panel_size
}
# Add the last 3 panels
width_offset <- 0
height_offset <- panel_size + legend_height
for (i in 4:6) {
plot_image <- magick::image_composite(plot_image, tmaps[[i]], offset = glue::glue("+{width_offset}+{height_offset}"))
plot_image <- magick::image_composite(plot_image, legends[[i]], offset = glue::glue("+{width_offset}+{height_offset + panel_size}"))
width_offset <- width_offset + panel_size
}
final_file <- glue::glue("{tmap_dir}/indicators_map.png")
magick::image_write(plot_image, final_file, quality = 72)
return(final_file)
}
|
/R/map_indicator.R
|
permissive
|
sparkgeo/local-reef-pressures
|
R
| false
| false
| 7,409
|
r
|
#' Map BCU Indicator
#'
#' Produce a satellite map for a BCU, along with the values for a supplied indicator.
#'
#' @param bcu Indicator data for a single BCU, from \code{data/bcus_list.rds}
#' @param tile Bing tile for a single BCU, from \code{data/bcus_tiles_indicator.rds}
#' @param indicator Indicator being mapped
#' @param indicator_title Longer-form title for indicator being mapped, used in creating the map title. Defaults to \code{indicator}
#' @param bcu_ext Extension factor of the BCU's bounding box, from \code{data/bcus_ext.rds}
#'
#' @return A \code{tmap} object
#' @export
#'
#' @examples
#' bcu_name <- "Aceh"
#' bcu <- readRDS(here::here("data", "bcus_list.rds"))[[bcu_name]]
#' tile <- readRDS(here::here("data", "bcus_tiles_indicator.rds"))[[bcu_name]]
#' bcu_ext <- readRDS(here::here("data", "bcus_ext.rds"))[[bcu_name]]
#'
#' map_indicator(bcu, tile,
#' indicator = "grav_NC",
#' indicator_title = "Fishing: Market Pressure", bcu_ext
#' )
map_indicator <- function(bcu, tile, indicator = NA, indicator_title = indicator, bcu_ext) {
if (inherits(tile, "list") & all(names(tile) == c("left", "right"))) {
m <- tmap::tm_shape(tile[["left"]],
bbox = square_bbox(bcu, ext = bcu_ext),
projection = sf::st_crs(bcu)
) +
tmap::tm_rgb() +
tmap::tm_shape(tile[["right"]]) +
tmap::tm_rgb()
} else {
m <- tmap::tm_shape(tile) +
tmap::tm_rgb()
}
if (!is.na(indicator)) {
indicator_breaks <- breaks[[indicator]]
breaks_midpoint <- ceiling(length(indicator_breaks) / 2)
breaks_midpoint_value <- indicator_breaks[[breaks_midpoint]]
blank_breaks <- rep("", breaks_midpoint - 2)
legend_labels <- c(min(indicator_breaks), blank_breaks, breaks_midpoint_value, blank_breaks, max(indicator_breaks))
legend_style <- "cont"
m <- m +
tmap::tm_shape(bcu) +
tmap::tm_fill(
col = indicator, palette = RColorBrewer::brewer.pal(length(indicator_breaks), "OrRd"), style = legend_style, breaks = indicator_breaks, title = indicator_title, scale = 0.8, alpha = 1, legend.is.portrait = FALSE, labels = legend_labels, showNA = FALSE
) +
tmap::tm_legend(show = FALSE)
}
m +
tmap::tm_layout(
main.title = indicator_title,
main.title.position = "center",
main.title.size = 0.75,
frame = FALSE,
inner.margins = c(0, 0, 0, 0)
) +
tmap::tmap_options(
output.dpi = 96,
show.messages = FALSE
)
}
legend_titles <- list(
grav_NC = "Market gravity",
pop_count = "Number of individuals",
num_ports = "Number of ports",
reef_value = "USD (thousands)",
sediment = "ton / km2",
nutrient = "ton / km2"
)
#' Generate legend for a BCU indicator map
#'
#' Generate a ggplot2 legend for a BCU indicator map. The ggplot2 legend is more flexible than the tmap legend, so a ggplot2 legend is produced for every map from \code{map_indicator}.
#'
#' @param indicator Indicator being mapped
#'
#' @return A ggplot2 object
#' @export
#'
#' @examples
#' generate_map_legend("grav_NC")
generate_map_legend <- function(indicator) {
legend_breaks <- breaks[[indicator]]
legend_n <- length(legend_breaks)
df <- dplyr::tibble(legend_breaks = legend_breaks) %>%
dplyr::mutate(id = dplyr::row_number())
p <- ggplot2::ggplot(df, ggplot2::aes(x = id, y = legend_breaks, colour = legend_breaks)) +
ggplot2::geom_point() +
ggplot2::guides(col = ggplot2::guide_legend(override.aes = list(shape = 15, size = 5), label.position = "bottom", title.position = "top"))
breaks_midpoint <- ceiling(length(legend_breaks) / 2)
breaks_midpoint_value <- legend_breaks[[breaks_midpoint]]
actual_midpoint_value <- max(legend_breaks) / 2
actual_legend_breaks <- c(min(legend_breaks), actual_midpoint_value, max(legend_breaks))
legend_labels <- c(min(legend_breaks), breaks_midpoint_value, max(legend_breaks))
p <- p +
ggplot2::scale_colour_gradientn(
colours = RColorBrewer::brewer.pal(length(legend_breaks), "OrRd"),
breaks = actual_legend_breaks,
labels = scales::comma(legend_labels, accuracy = 1)
) +
ggplot2::guides(colour = ggplot2::guide_colourbar(ticks = FALSE, title.position = "top"))
p <- p +
ggplot2::labs(colour = legend_titles[[indicator]]) +
ggplot2::theme_minimal(base_size = 9) +
ggplot2::theme(
legend.position = "bottom",
legend.text = ggplot2::element_text(margin = ggplot2::margin(t = -2.5)),
legend.key = ggplot2::element_blank(),
legend.title.align = 0.5
)
l <- ggpubr::get_legend(p)
ggpubr::as_ggplot(l)
}
breaks <- list(
grav_NC = c(0, 10^(-1:6)),
pop_count = c(0, 10^(2:7)),
num_ports = 0:6,
reef_value = c(0, 2.5 * 10^(0:5)),
sediment = c(0, 10^(2:5)),
nutrient = c(0, 10^(-2:5))
)
layout_indicator_maps <- function(bcu, tile_indicator, bcu_ext) {
size <- 600
width_in <- size / 300
height_in <- width_in * 0.3
indicator_with_title <- dplyr::tribble(
~indicator, ~title,
"grav_NC", "Fishing:\nMarket Pressure",
"sediment", "Pollution:\nSedimentation",
"nutrient", "Pollution:\nNitrogen",
"pop_count", "Coastal Development:\nHuman Population",
"num_ports", "Industrial Development:\nPorts",
"reef_value", "Tourism:\nReef Value"
)
tmap_dir <- tempdir()
purrr::walk2(
indicator_with_title[["indicator"]], indicator_with_title[["title"]],
function(indicator, title) {
map_res <- map_indicator(bcu, tile_indicator, indicator, title, bcu_ext)
tmap::tmap_save(map_res, glue::glue("{tmap_dir}/{indicator}.png"), width = size, height = size, dpi = 300)
legend_res <- generate_map_legend(indicator)
ggsave(glue::glue("{tmap_dir}/{indicator}_legend.png"), legend_res, width = width_in, height = height_in, units = "in")
}
)
tmaps <- indicator_with_title[["indicator"]] %>%
purrr::map(~ magick::image_read(glue::glue("{tmap_dir}/{.x}.png")))
names(tmaps) <- indicator_with_title[["indicator"]]
legends <- indicator_with_title[["indicator"]] %>%
purrr::map(~ magick::image_read(glue::glue("{tmap_dir}/{.x}_legend.png")))
names(legends) <- indicator_with_title[["indicator"]]
panel_size <- magick::image_info(tmaps[[1]])[["height"]]
legend_height <- magick::image_info(legends[[1]])[["height"]]
plot_image <- magick::image_blank(width = panel_size * 3, height = panel_size * 2 + legend_height * 2, color = "white")
width_offset <- 0
height_offset <- panel_size
for (i in 1:3) {
plot_image <- magick::image_composite(plot_image, tmaps[[i]], offset = glue::glue("+{width_offset}+0"))
plot_image <- magick::image_composite(plot_image, legends[[i]], offset = glue::glue("+{width_offset}+{height_offset}"))
# Then increment the offset by the width of the panel, so the next panel can use it
width_offset <- width_offset + panel_size
}
# Add the last 3 panels
width_offset <- 0
height_offset <- panel_size + legend_height
for (i in 4:6) {
plot_image <- magick::image_composite(plot_image, tmaps[[i]], offset = glue::glue("+{width_offset}+{height_offset}"))
plot_image <- magick::image_composite(plot_image, legends[[i]], offset = glue::glue("+{width_offset}+{height_offset + panel_size}"))
width_offset <- width_offset + panel_size
}
final_file <- glue::glue("{tmap_dir}/indicators_map.png")
magick::image_write(plot_image, final_file, quality = 72)
return(final_file)
}
|
library('rvest')
library('stringr')
library('xlsx')
library(bestglm)
# Reads in excel file. Necessary because manual imputation was made for the high school socioeconomic information.
y=file.choose()
finalESPN1=read.xlsx(y,1)
southESPN=finalESPN1
southESPN = southESPN[,-1]
southESPN = southESPN[,-2]
names(southESPN) = c("Grade", "Position", "State", "Height", "Weight", "Drafted", "Private", "Enrollment", "AllBoys", "Minority", "EconomicDis", "Graduation")
head(subset(southESPN, select = 'Position'))
southESPN$State=trimws(southESPN$State)
southESPN$Northeast=0
for (x in which(southESPN$State %in% c("ME","NH","MA","RI","CT","VT","NY","PA","NJ", "DC","DE", "MD"))) {
southESPN[x,13]=1
}
southESPN$South=0
for (x in which(southESPN$State %in% c("WV","VA","KY","TN","NC","SC","GA","AL","MS","AR","FL","LA"))){
southESPN[x,14]=1
}
southESPN$Southwest=0
for (x in which(southESPN$State %in% c("TX","OK", "NM", "AZ"))){
southESPN[x,15]=1
}
southESPN$Midwest=0
for (x in which(southESPN$State %in% c("OH","IN","MI","IL","MO","WI","MN","IA","KS","NE","SD","ND"))){
southESPN[x,16]=1
}
southESPN$West=0
for (x in which(southESPN$State %in% c("CO","WY","MT","ID","WA","OR","CA","AK","HI","UT","NV"))){
southESPN[x,17]=1
}
southESPN$ATH=0
for (x in which(southESPN$Position %in% "ATH")){
southESPN[x,18]=1
}
southESPN$QB=0
for (x in which(southESPN$Position %in% c("QB", "QB-DT", "QB-PP"))){
southESPN[x,19]=1
}
southESPN$OL=0
for (x in which(southESPN$Position %in% c("OC","OG","OT"))){
southESPN[x,20]=1
}
southESPN$RB=0
for (x in which(southESPN$Position %in% c("RB"))){
southESPN[x,21]=1
}
southESPN$REC=0
for (x in which(southESPN$Position %in% c("WR","TE", "TE-H", "TE-Y"))){
southESPN[x,22]=1
}
southESPN$DL=0
for (x in which(southESPN$Position %in% c("DE","DT"))){
southESPN[x,23]=1
}
southESPN$LB=0
for (x in which(southESPN$Position %in% c("ILB","OLB"))){
southESPN[x,24]=1
}
southESPN$DB=0
for (x in which(southESPN$Position %in% c("S","CB"))){
southESPN[x,25]=1
}
southESPN <- southESPN[!duplicated(southESPN),]
southESPN$EconomicDis=as.numeric(southESPN$EconomicDis)
southESPN=subset(southESPN, select = -c(State, Position))
southESPN=southESPN[c(1:3,5:23,4)]
southESPN=subset(southESPN, subset = (Private==0))
southESPN=na.omit(southESPN)
southESPN=subset(southESPN, subset = (South==1))
southESPN=subset(southESPN, select = -c(Northeast, South, Southwest, Midwest, West, Private,AllBoys,ATH))
#############################################################################3
library(bestglm)
modelsSouthESPN=bestglm(southESPN, IC="AIC", family = binomial,TopModels = 10)
modelsSouthESPN$BestModels
fit1=glm(Drafted~ Grade + Height + Minority + OL + RB, data = southESPN, family = "binomial")
fit2=glm(Drafted~ Grade + Height + OL + RB, data = southESPN, family = "binomial")
fit3=glm(Drafted~ Grade + Height + EconomicDis + OL + RB, data = southESPN, family = "binomial")
fit4=glm(Drafted~ Grade + Height + Minority + OL + RB + LB, data = southESPN, family = "binomial")
fit5=glm(Drafted~ Grade + Height + Graduation + OL + RB, data = southESPN, family = "binomial")
compareGLM(fit1,fit2,fit3,fit4,fit5)
summary(fit1)
summary(fit2)
summary(fit3)
summary(fit4)
summary(fit5)
BIC(fit1)
BIC(fit2)
BIC(fit3)
BIC(fit4)
BIC(fit5)
# fail top reject second model is better
anova(fit4,fit5, test="Chisq")
# Hosmer-Lemoshow Test
# p-value over 0.05 indicates a good fit.
library(ResourceSelection)
hoslem.test(southESPN$Drafted, fitted(fit1), g=6)
hoslem.test(southESPN$Drafted, fitted(fit2), g=5)
hoslem.test(southESPN$Drafted, fitted(fit3), g=6)
hoslem.test(southESPN$Drafted, fitted(fit4), g=7)
hoslem.test(southESPN$Drafted, fitted(fit5), g=7)
# Reject 3 and 4
anova(fit1, "Chisq")
anova(fit2, "Chisq")
anova(fit3, "Chisq")
anova(fit4, "Chisq")
anova(fit5, "Chisq")
# Likelihood ratio test
lrtest(fit1,fit2)
lrtest(fit1,fit4)
lrtest(fit2,fit5)
#Wald Test
regTermTest(fit2, "RB")
#Fit 2 has second lowest AIC, lowest BIC. Predictors are inlcuded in every model. Every predictor is significant. LR test it is better than fit1
# fit 2 has better specificity, sensitivity and precision
predicted1 <- predict(fit1, southESPN, type="response")
predicted2 <- predict(fit2, southESPN, type="response")
predicted3 <- predict(fit3, southESPN, type="response")
predicted4 <- predict(fit4, southESPN, type="response")
predicted5 <- predict(fit5, southESPN, type="response")
library(InformationValue)
# provides ways to find the optimal cutoff to improve the prediction of 1's, 0's,
# both 1's and 0's and o reduce the misclassification error.
optCutOff1 <- optimalCutoff(southESPN$Drafted, predicted1)
optCutOff2 <- optimalCutoff(southESPN$Drafted, predicted2)
optCutOff3 <- optimalCutoff(southESPN$Drafted, predicted3)
optCutOff4 <- optimalCutoff(southESPN$Drafted, predicted4)
optCutOff5 <- optimalCutoff(southESPN$Drafted, predicted5)
# Misclassification error is the percentage mismatch of predcited vs actuals, irrespective
# of 1's or 0's. The lower the misclassification error, the better is your model.
misClassError(southESPN$Drafted, predicted1, threshold = optCutOff1)
misClassError(southESPN$Drafted, predicted2, threshold = optCutOff2)
misClassError(southESPN$Drafted, predicted3, threshold = optCutOff3)
misClassError(southESPN$Drafted, predicted4, threshold = optCutOff4)
misClassError(southESPN$Drafted, predicted5, threshold = optCutOff5)
# traces the percentage of true positives accurately predicted by a given logit model as the
# prediction probability cutoff is lowered from 1 to 0. Greater area under the better.
plotROC(southESPN$Drafted, predicted1)
plotROC(southESPN$Drafted, predicted2)
plotROC(southESPN$Drafted, predicted3)
plotROC(southESPN$Drafted, predicted4)
plotROC(southESPN$Drafted, predicted5)
sensitivity(southESPN$Drafted, predicted1, threshold = optCutOff1)
specificity(southESPN$Drafted, predicted1, threshold = optCutOff1)
precision(southESPN$Drafted, predicted1, threshold = optCutOff1)
confusionMatrix(southESPN$Drafted, predicted1, optCutOff1)
sensitivity(southESPN$Drafted, predicted2, threshold = optCutOff2)
specificity(southESPN$Drafted, predicted2, threshold = optCutOff2)
precision(southESPN$Drafted, predicted2, threshold = optCutOff2)
confusionMatrix(southESPN$Drafted, predicted2, optCutOff2)
sensitivity(southESPN$Drafted, predicted3, threshold = optCutOff3)
specificity(southESPN$Drafted, predicted3, threshold = optCutOff3)
precision(southESPN$Drafted, predicted3, threshold = optCutOff3)
confusionMatrix(southESPN$Drafted, predicted3, optCutOff3)
sensitivity(southESPN$Drafted, predicted4, threshold = optCutOff4)
specificity(southESPN$Drafted, predicted4, threshold = optCutOff4)
precision(southESPN$Drafted, predicted4, threshold = optCutOff4)
confusionMatrix(southESPN$Drafted, predicted4, optCutOff4)
sensitivity(southESPN$Drafted, predicted5, threshold = optCutOff5)
specificity(southESPN$Drafted, predicted5, threshold = optCutOff5)
precision(southESPN$Drafted, predicted5, threshold = optCutOff5)
confusionMatrix(southESPN$Drafted, predicted5, optCutOff5)
#### Final model ####
southESPN.log=glm(Drafted~ Grade + Height + OL + RB, data = southESPN, family = "binomial")
predicted.southESPN <- predict(southESPN.log, southESPN, type="response")
optCutOff.southESPN <- optimalCutoff(southESPN$Drafted, predicted.southESPN)
misClassError(southESPN$Drafted, predicted.southESPN, threshold = optCutOff.southESPN)
plotROC(southESPN$Drafted, predicted.southESPN)
sensitivity(southESPN$Drafted, predicted.southESPN, threshold = optCutOff.southESPN)
specificity(southESPN$Drafted, predicted.southESPN, threshold = optCutOff.southESPN)
precision(southESPN$Drafted, predicted.southESPN, threshold = optCutOff.southESPN)
confusionMatrix(southESPN$Drafted, predicted.southESPN, optCutOff.southESPN)
#############################################################################################
## Decision Tree
library(tree)
southESPN$Drafted=as.factor(southESPN$Drafted)
trainData = southESPN
DraftStatus = ifelse(trainData$Drafted==1, "Drafted", "Undrafted")
trainData=data.frame(trainData, DraftStatus)
trainData=subset(trainData, select = -Drafted)
tree.train=tree(DraftStatus ~., data = trainData)
summary(tree.train)
plot(tree.train)
text(tree.train, pretty = 0)
train.pred=predict(tree.train, trainData, type = "class")
cm1=table(predicted=train.pred, actual=trainData$DraftStatus)
(sum(diag(cm1)))/sum(cm1)
cm1[2,2]/(cm1[2,1]+cm1[2,2])
cm2=table(predicted=test.pred, actual=testData$DraftStatus)
(sum(diag(cm2)))/sum(cm2)
cm2[2,2]/(cm2[2,1]+cm2[2,2])
train.cv = cv.tree(tree.train, FUN = prune.misclass)
min_idx=which.min(train.cv$dev)
train.cv$size[min_idx]
par(mfrow = c(1, 1))
plot(train.cv)
# better plot
plot(train.cv$size, train.cv$dev / nrow(trainData), type = "b",
xlab = "Tree Size", ylab = "CV Misclassification Rate")
train.prune= prune.misclass(tree.train, best = 3)
summary(train.prune)
plot(train.prune)
text(train.prune, pretty = 0)
title(main = "Pruned Classification Tree")
train.prune.pred = predict(train.prune, trainData, type = "class")
cm3=table(predicted = train.prune.pred, actual = trainData$DraftStatus)
cm3
(sum(diag(cm3)))/sum(cm3)
cm3[2,2]/(cm3[2,1]+cm3[2,2])
finalsouthESPNtree=train.prune
finalsouthESPNtree
trainData4=trainData
# Final tree
plot(finalsouthESPNtree)
text(finalsouthESPNtree, pretty = 0)
title(main = "southESPN Classification Tree")
southESPN.pred = predict(finalsouthESPNtree,trainData4, type= "class")
cm4=table(predicted = southESPN.pred, actual = trainData4$DraftStatus)
cm4
(sum(diag(cm4)))/sum(cm4)
cm4[2,2]/(cm4[2,1]+cm4[2,2])
|
/SouthESPN.R
|
no_license
|
NickTice/thesis
|
R
| false
| false
| 9,954
|
r
|
library('rvest')
library('stringr')
library('xlsx')
library(bestglm)
# Reads in excel file. Necessary because manual imputation was made for the high school socioeconomic information.
y=file.choose()
finalESPN1=read.xlsx(y,1)
southESPN=finalESPN1
southESPN = southESPN[,-1]
southESPN = southESPN[,-2]
names(southESPN) = c("Grade", "Position", "State", "Height", "Weight", "Drafted", "Private", "Enrollment", "AllBoys", "Minority", "EconomicDis", "Graduation")
head(subset(southESPN, select = 'Position'))
southESPN$State=trimws(southESPN$State)
southESPN$Northeast=0
for (x in which(southESPN$State %in% c("ME","NH","MA","RI","CT","VT","NY","PA","NJ", "DC","DE", "MD"))) {
southESPN[x,13]=1
}
southESPN$South=0
for (x in which(southESPN$State %in% c("WV","VA","KY","TN","NC","SC","GA","AL","MS","AR","FL","LA"))){
southESPN[x,14]=1
}
southESPN$Southwest=0
for (x in which(southESPN$State %in% c("TX","OK", "NM", "AZ"))){
southESPN[x,15]=1
}
southESPN$Midwest=0
for (x in which(southESPN$State %in% c("OH","IN","MI","IL","MO","WI","MN","IA","KS","NE","SD","ND"))){
southESPN[x,16]=1
}
southESPN$West=0
for (x in which(southESPN$State %in% c("CO","WY","MT","ID","WA","OR","CA","AK","HI","UT","NV"))){
southESPN[x,17]=1
}
southESPN$ATH=0
for (x in which(southESPN$Position %in% "ATH")){
southESPN[x,18]=1
}
southESPN$QB=0
for (x in which(southESPN$Position %in% c("QB", "QB-DT", "QB-PP"))){
southESPN[x,19]=1
}
southESPN$OL=0
for (x in which(southESPN$Position %in% c("OC","OG","OT"))){
southESPN[x,20]=1
}
southESPN$RB=0
for (x in which(southESPN$Position %in% c("RB"))){
southESPN[x,21]=1
}
southESPN$REC=0
for (x in which(southESPN$Position %in% c("WR","TE", "TE-H", "TE-Y"))){
southESPN[x,22]=1
}
southESPN$DL=0
for (x in which(southESPN$Position %in% c("DE","DT"))){
southESPN[x,23]=1
}
southESPN$LB=0
for (x in which(southESPN$Position %in% c("ILB","OLB"))){
southESPN[x,24]=1
}
southESPN$DB=0
for (x in which(southESPN$Position %in% c("S","CB"))){
southESPN[x,25]=1
}
southESPN <- southESPN[!duplicated(southESPN),]
southESPN$EconomicDis=as.numeric(southESPN$EconomicDis)
southESPN=subset(southESPN, select = -c(State, Position))
southESPN=southESPN[c(1:3,5:23,4)]
southESPN=subset(southESPN, subset = (Private==0))
southESPN=na.omit(southESPN)
southESPN=subset(southESPN, subset = (South==1))
southESPN=subset(southESPN, select = -c(Northeast, South, Southwest, Midwest, West, Private,AllBoys,ATH))
#############################################################################3
library(bestglm)
modelsSouthESPN=bestglm(southESPN, IC="AIC", family = binomial,TopModels = 10)
modelsSouthESPN$BestModels
fit1=glm(Drafted~ Grade + Height + Minority + OL + RB, data = southESPN, family = "binomial")
fit2=glm(Drafted~ Grade + Height + OL + RB, data = southESPN, family = "binomial")
fit3=glm(Drafted~ Grade + Height + EconomicDis + OL + RB, data = southESPN, family = "binomial")
fit4=glm(Drafted~ Grade + Height + Minority + OL + RB + LB, data = southESPN, family = "binomial")
fit5=glm(Drafted~ Grade + Height + Graduation + OL + RB, data = southESPN, family = "binomial")
compareGLM(fit1,fit2,fit3,fit4,fit5)
summary(fit1)
summary(fit2)
summary(fit3)
summary(fit4)
summary(fit5)
BIC(fit1)
BIC(fit2)
BIC(fit3)
BIC(fit4)
BIC(fit5)
# fail top reject second model is better
anova(fit4,fit5, test="Chisq")
# Hosmer-Lemoshow Test
# p-value over 0.05 indicates a good fit.
library(ResourceSelection)
hoslem.test(southESPN$Drafted, fitted(fit1), g=6)
hoslem.test(southESPN$Drafted, fitted(fit2), g=5)
hoslem.test(southESPN$Drafted, fitted(fit3), g=6)
hoslem.test(southESPN$Drafted, fitted(fit4), g=7)
hoslem.test(southESPN$Drafted, fitted(fit5), g=7)
# Reject 3 and 4
anova(fit1, "Chisq")
anova(fit2, "Chisq")
anova(fit3, "Chisq")
anova(fit4, "Chisq")
anova(fit5, "Chisq")
# Likelihood ratio test
lrtest(fit1,fit2)
lrtest(fit1,fit4)
lrtest(fit2,fit5)
#Wald Test
regTermTest(fit2, "RB")
#Fit 2 has second lowest AIC, lowest BIC. Predictors are inlcuded in every model. Every predictor is significant. LR test it is better than fit1
# fit 2 has better specificity, sensitivity and precision
predicted1 <- predict(fit1, southESPN, type="response")
predicted2 <- predict(fit2, southESPN, type="response")
predicted3 <- predict(fit3, southESPN, type="response")
predicted4 <- predict(fit4, southESPN, type="response")
predicted5 <- predict(fit5, southESPN, type="response")
library(InformationValue)
# provides ways to find the optimal cutoff to improve the prediction of 1's, 0's,
# both 1's and 0's and o reduce the misclassification error.
optCutOff1 <- optimalCutoff(southESPN$Drafted, predicted1)
optCutOff2 <- optimalCutoff(southESPN$Drafted, predicted2)
optCutOff3 <- optimalCutoff(southESPN$Drafted, predicted3)
optCutOff4 <- optimalCutoff(southESPN$Drafted, predicted4)
optCutOff5 <- optimalCutoff(southESPN$Drafted, predicted5)
# Misclassification error is the percentage mismatch of predcited vs actuals, irrespective
# of 1's or 0's. The lower the misclassification error, the better is your model.
misClassError(southESPN$Drafted, predicted1, threshold = optCutOff1)
misClassError(southESPN$Drafted, predicted2, threshold = optCutOff2)
misClassError(southESPN$Drafted, predicted3, threshold = optCutOff3)
misClassError(southESPN$Drafted, predicted4, threshold = optCutOff4)
misClassError(southESPN$Drafted, predicted5, threshold = optCutOff5)
# traces the percentage of true positives accurately predicted by a given logit model as the
# prediction probability cutoff is lowered from 1 to 0. Greater area under the better.
plotROC(southESPN$Drafted, predicted1)
plotROC(southESPN$Drafted, predicted2)
plotROC(southESPN$Drafted, predicted3)
plotROC(southESPN$Drafted, predicted4)
plotROC(southESPN$Drafted, predicted5)
sensitivity(southESPN$Drafted, predicted1, threshold = optCutOff1)
specificity(southESPN$Drafted, predicted1, threshold = optCutOff1)
precision(southESPN$Drafted, predicted1, threshold = optCutOff1)
confusionMatrix(southESPN$Drafted, predicted1, optCutOff1)
sensitivity(southESPN$Drafted, predicted2, threshold = optCutOff2)
specificity(southESPN$Drafted, predicted2, threshold = optCutOff2)
precision(southESPN$Drafted, predicted2, threshold = optCutOff2)
confusionMatrix(southESPN$Drafted, predicted2, optCutOff2)
sensitivity(southESPN$Drafted, predicted3, threshold = optCutOff3)
specificity(southESPN$Drafted, predicted3, threshold = optCutOff3)
precision(southESPN$Drafted, predicted3, threshold = optCutOff3)
confusionMatrix(southESPN$Drafted, predicted3, optCutOff3)
sensitivity(southESPN$Drafted, predicted4, threshold = optCutOff4)
specificity(southESPN$Drafted, predicted4, threshold = optCutOff4)
precision(southESPN$Drafted, predicted4, threshold = optCutOff4)
confusionMatrix(southESPN$Drafted, predicted4, optCutOff4)
sensitivity(southESPN$Drafted, predicted5, threshold = optCutOff5)
specificity(southESPN$Drafted, predicted5, threshold = optCutOff5)
precision(southESPN$Drafted, predicted5, threshold = optCutOff5)
confusionMatrix(southESPN$Drafted, predicted5, optCutOff5)
#### Final model ####
southESPN.log=glm(Drafted~ Grade + Height + OL + RB, data = southESPN, family = "binomial")
predicted.southESPN <- predict(southESPN.log, southESPN, type="response")
optCutOff.southESPN <- optimalCutoff(southESPN$Drafted, predicted.southESPN)
misClassError(southESPN$Drafted, predicted.southESPN, threshold = optCutOff.southESPN)
plotROC(southESPN$Drafted, predicted.southESPN)
sensitivity(southESPN$Drafted, predicted.southESPN, threshold = optCutOff.southESPN)
specificity(southESPN$Drafted, predicted.southESPN, threshold = optCutOff.southESPN)
precision(southESPN$Drafted, predicted.southESPN, threshold = optCutOff.southESPN)
confusionMatrix(southESPN$Drafted, predicted.southESPN, optCutOff.southESPN)
#############################################################################################
## Decision Tree
library(tree)
southESPN$Drafted=as.factor(southESPN$Drafted)
trainData = southESPN
DraftStatus = ifelse(trainData$Drafted==1, "Drafted", "Undrafted")
trainData=data.frame(trainData, DraftStatus)
trainData=subset(trainData, select = -Drafted)
tree.train=tree(DraftStatus ~., data = trainData)
summary(tree.train)
plot(tree.train)
text(tree.train, pretty = 0)
train.pred=predict(tree.train, trainData, type = "class")
cm1=table(predicted=train.pred, actual=trainData$DraftStatus)
(sum(diag(cm1)))/sum(cm1)
cm1[2,2]/(cm1[2,1]+cm1[2,2])
cm2=table(predicted=test.pred, actual=testData$DraftStatus)
(sum(diag(cm2)))/sum(cm2)
cm2[2,2]/(cm2[2,1]+cm2[2,2])
train.cv = cv.tree(tree.train, FUN = prune.misclass)
min_idx=which.min(train.cv$dev)
train.cv$size[min_idx]
par(mfrow = c(1, 1))
plot(train.cv)
# better plot
plot(train.cv$size, train.cv$dev / nrow(trainData), type = "b",
xlab = "Tree Size", ylab = "CV Misclassification Rate")
train.prune= prune.misclass(tree.train, best = 3)
summary(train.prune)
plot(train.prune)
text(train.prune, pretty = 0)
title(main = "Pruned Classification Tree")
train.prune.pred = predict(train.prune, trainData, type = "class")
cm3=table(predicted = train.prune.pred, actual = trainData$DraftStatus)
cm3
(sum(diag(cm3)))/sum(cm3)
cm3[2,2]/(cm3[2,1]+cm3[2,2])
finalsouthESPNtree=train.prune
finalsouthESPNtree
trainData4=trainData
# Final tree
plot(finalsouthESPNtree)
text(finalsouthESPNtree, pretty = 0)
title(main = "southESPN Classification Tree")
southESPN.pred = predict(finalsouthESPNtree,trainData4, type= "class")
cm4=table(predicted = southESPN.pred, actual = trainData4$DraftStatus)
cm4
(sum(diag(cm4)))/sum(cm4)
cm4[2,2]/(cm4[2,1]+cm4[2,2])
|
# start with jump-start code... no need to run the data prep code
# Here we read in the training and test files
# Read text columns as character strings and convert selected columns to factor variables.
# The training set will be used to create build and validation sets in cross-validation.
training <- read.csv(file = "training.csv", header = TRUE, stringsAsFactors = FALSE)
# The test set will be set aside during the model development process and used
# and used only after the full model has been developed and tested.
test <- read.csv(file = "test.csv", header = TRUE, stringsAsFactors = FALSE)
# For consistency in team codes between training and test, set STL to LAR in training set.
for (i in seq(along = training$Tm)) {
if (training$Tm[i] == 'STL') training$Tm[i] <- 'LAR'
}
# Here we show how to set up a factor variable for the NFL team
training$Tm <- factor(training$Tm)
test$Tm <- factor(test$Tm)
# Quick check of the data frames
cat('\n Summary of Training Set\n')
print(summary(training))
cat('\n Summary of Test Set\n')
print(summary(test))
#Missing Data Test
nulls <- data.frame(col=as.character(colnames(training)), pct_null=colSums(is.na(training))*100/(colSums(is.na(training))+colSums(!is.na(training))))
nulls
# Because the modeling function for this assignment may involve many steps,
# we set it up as a function. Input to the function includes definition of
# the training and test sets for an iteration/fold of cross-validation.
# Within the cross-validation procedure, training_input and test_input will be
# subsets of the original full training set. At the very end of the model
# development process, of course, training_input and test_input will be the full
# training and test sets, but that only comes at the very end of the process.
# The function returns the root mean-square error in test_input set.
eval_model <- function(training_input, test_input) {
qb.k.p.model <- lm(Pick ~ Ht + Wt + Shuttle + Vertical + BroadJump, data = training_input, na.action = 'na.omit')
qb.k.p.predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
if (!is.na(test_input$Ht[i]) && !is.na(test_input$Wt[i]) && !is.na(test_input$Shuttle[i]) &&
!is.na(test_input$Vertical[i]) && !is.na(test_input$BroadJump[i]))
qb.k.p.predict[i] <- predict.lm(qb.k.p.model, newdata = test_input[i,])
rb.model <- lm(Pick ~ Ht + Wt + X40yd + X3Cone + BroadJump, data = training_input, na.action = 'na.omit')
rb.predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
if (!is.na(test_input$Ht[i]) && !is.na(test_input$Wt[i]) && !is.na(test_input$X40yd[i]) &&
!is.na(test_input$X3Cone[i]) && !is.na(test_input$BroadJump[i]))
rb.predict[i] <- predict.lm(rb.model, newdata = test_input[i,])
wr.model <- lm(Pick ~ Ht + Wt + X40yd + Vertical + X3Cone, data = training_input, na.action = 'na.omit')
wr.predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
if (!is.na(test_input$Ht[i]) && !is.na(test_input$Wt[i]) && !is.na(test_input$X40yd[i]) &&
!is.na(test_input$Vertical[i]) && !is.na(test_input$X3Cone[i]))
wr.predict[i] <- predict.lm(wr.model, newdata = test_input[i,])
fb.te.ls.model <- lm(Pick ~ Ht + Wt + Bench + Vertical + BroadJump, data = training_input, na.action = 'na.omit')
fb.te.ls.predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
if (!is.na(test_input$Ht[i]) && !is.na(test_input$Wt[i]) && !is.na(test_input$Bench[i]) &&
!is.na(test_input$Vertical[i]) && !is.na(test_input$BroadJump[i]))
fb.te.ls.predict[i] <- predict.lm(fb.te.ls.model, newdata = test_input[i,])
ot.model <- lm(Pick ~ Ht + Wt + Shuttle + X40yd + Bench, data = training_input, na.action = 'na.omit')
ot.predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
if (!is.na(test_input$Ht[i]) && !is.na(test_input$Wt[i]) && !is.na(test_input$Shuttle[i]) &&
!is.na(test_input$X40yd[i]) && !is.na(test_input$Bench[i]))
ot.predict[i] <- predict.lm(ot.model, newdata = test_input[i,])
og.c.dt.model <- lm(Pick ~ Ht + Wt + Shuttle + Bench + BroadJump, data = training_input, na.action = 'na.omit')
og.c.dt.predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
if (!is.na(test_input$Ht[i]) && !is.na(test_input$Wt[i]) && !is.na(test_input$Shuttle[i]) &&
!is.na(test_input$Bench[i]) && !is.na(test_input$BroadJump[i]))
og.c.dt.predict[i] <- predict.lm(og.c.dt.model, newdata = test_input[i,])
de.prolb.model <- lm(Pick ~ Ht + Wt + X3Cone + Vertical + BroadJump, data = training_input, na.action = 'na.omit')
de.prolb.predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
if (!is.na(test_input$Ht[i]) && !is.na(test_input$Wt[i]) && !is.na(test_input$X3Cone[i]) &&
!is.na(test_input$Vertical[i]) && !is.na(test_input$BroadJump[i]))
de.prolb.predict[i] <- predict.lm(de.prolb.model, newdata = test_input[i,])
ilb.tolb.model <- lm(Pick ~ Ht + Wt + Shuttle + X3Cone + BroadJump, data = training_input, na.action = 'na.omit')
ilb.tolb.predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
if (!is.na(test_input$Ht[i]) && !is.na(test_input$Wt[i]) && !is.na(test_input$Shuttle[i]) &&
!is.na(test_input$X3Cone[i]) && !is.na(test_input$BroadJump[i]))
ilb.tolb.predict[i] <- predict.lm(ilb.tolb.model, newdata = test_input[i,])
cb.s.model <- lm(Pick ~ Ht + Wt + Shuttle + X40yd + X3Cone, data = training_input, na.action = 'na.omit')
cb.s.predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
if (!is.na(test_input$Ht[i]) && !is.na(test_input$Wt[i]) && !is.na(test_input$Shuttle[i]) &&
!is.na(test_input$X40yd[i]) && !is.na(test_input$X3Cone[i]))
cb.s.predict[i] <- predict.lm(cb.s.model, newdata = test_input[i,])
# We are creating an ensemble or hybrid prediction by averaging all component
# model predictions with non-missing values. Do this one player at a time.
response_predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
response_predict[i] <- mean(c(qb.k.p.predict[i], rb.predict[i],
wr.predict[i], fb.te.ls.predict[i], ot.predict[i],
og.c.dt.predict[i], de.prolb.predict[i], ilb.tolb.predict[i],
cb.s.predict[i]), na.rm = TRUE)
response_actual <- test_input$Pick
ensemble_data_frame <- data.frame(qb.k.p.predict, rb.predict, wr.predict, fb.te.ls.predict, ot.predict,
og.c.dt.predict, de.prolb.predict, ilb.tolb.predict, cb.s.predict,
response_predict, response_actual)
# To check calculations, we can examine the first rows of the ensemble_data_frame
cat('\nFirst and last six rows of ensemble_data_frame\n')
print(head(ensemble_data_frame))
cat(' . . . \n')
print(tail(ensemble_data_frame))
# compute and return root mean-square error in test_input
sqrt(mean((response_predict - response_actual)^2, na.rm = TRUE))
}
# Whatever model is used for prediction, we want it to do better than a null model
# that predicts the mean response value for every player. Null model is like no model.
null_model <- function(training_input, test_input) {
# for demonstration purposes we show what would be the prediction
# of a null model... predicting the mean Pick for every player in test_input
response_predict <- mean(test_input$Pick)
response_actual <- test_input$Pick
# compute and return root mean-square error in test_input
sqrt(mean((response_predict - response_actual)^2))
}
# Cross-validation work
library(cvTools)
set.seed(9999) # for reproducibility
nfolds <- 10
study_folds <- cvFolds(nrow(training), K = nfolds, type = 'consecutive')
cv_model_results <- numeric(nfolds) # initialize array to store fold model results
cv_null_results <- numeric(nfolds) # initialize array to store fold null results
for (ifold in 1:nfolds) {
cat('\nWorking on fold ', ifold, '\n')
this_fold_test_data <- training[study_folds$which == ifold,]
this_fold_training_data <-
training[study_folds$which != ifold,]
# fit model and get root mean-square error for this iteration
cv_model_results[ifold] <- eval_model(training_input = this_fold_training_data,
test_input = this_fold_test_data)
cv_null_results[ifold] <- null_model(training_input = this_fold_training_data,
test_input = this_fold_test_data)
}
cat('\n', 'Cross-validation My Model Average Root Mean-Square Error:',
mean(cv_model_results))
cat('\n', 'Cross-validation No Model Average Root Mean-Square Error:',
mean(cv_null_results))
cv_model_results_mean <- mean(cv_model_results)
cv_null_results_mean <- mean(cv_null_results)
plot(cv_model_results, xlab = "Model Results", ylab = "RMSE", main = "Model Performance", type = "p", col = "blue")
points(cv_null_results, col = "red")
abline(h = cv_model_results_mean, col = "blue")
abline(h = cv_null_results_mean, col = "red")
legend("topright", legend=c("Evaluation Model RMSE Values", "Null Model RMSE Values"), col=c("blue", "red"), pch = 1, bty= "n", cex=0.8)
legend("bottomleft", legend=c("Evaluation Model Average RMSE", "Null Model Average RMSE"), col=c("blue", "red"),lty=c(1,1),bty= "n", cex=0.8)
|
/code/nfl_combine_modeling.R
|
no_license
|
michaelpallante/nfl_combine_modeling
|
R
| false
| false
| 9,694
|
r
|
# start with jump-start code... no need to run the data prep code
# Here we read in the training and test files
# Read text columns as character strings and convert selected columns to factor variables.
# The training set will be used to create build and validation sets in cross-validation.
training <- read.csv(file = "training.csv", header = TRUE, stringsAsFactors = FALSE)
# The test set will be set aside during the model development process and used
# and used only after the full model has been developed and tested.
test <- read.csv(file = "test.csv", header = TRUE, stringsAsFactors = FALSE)
# For consistency in team codes between training and test, set STL to LAR in training set.
for (i in seq(along = training$Tm)) {
if (training$Tm[i] == 'STL') training$Tm[i] <- 'LAR'
}
# Here we show how to set up a factor variable for the NFL team
training$Tm <- factor(training$Tm)
test$Tm <- factor(test$Tm)
# Quick check of the data frames
cat('\n Summary of Training Set\n')
print(summary(training))
cat('\n Summary of Test Set\n')
print(summary(test))
#Missing Data Test
nulls <- data.frame(col=as.character(colnames(training)), pct_null=colSums(is.na(training))*100/(colSums(is.na(training))+colSums(!is.na(training))))
nulls
# Because the modeling function for this assignment may involve many steps,
# we set it up as a function. Input to the function includes definition of
# the training and test sets for an iteration/fold of cross-validation.
# Within the cross-validation procedure, training_input and test_input will be
# subsets of the original full training set. At the very end of the model
# development process, of course, training_input and test_input will be the full
# training and test sets, but that only comes at the very end of the process.
# The function returns the root mean-square error in test_input set.
eval_model <- function(training_input, test_input) {
qb.k.p.model <- lm(Pick ~ Ht + Wt + Shuttle + Vertical + BroadJump, data = training_input, na.action = 'na.omit')
qb.k.p.predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
if (!is.na(test_input$Ht[i]) && !is.na(test_input$Wt[i]) && !is.na(test_input$Shuttle[i]) &&
!is.na(test_input$Vertical[i]) && !is.na(test_input$BroadJump[i]))
qb.k.p.predict[i] <- predict.lm(qb.k.p.model, newdata = test_input[i,])
rb.model <- lm(Pick ~ Ht + Wt + X40yd + X3Cone + BroadJump, data = training_input, na.action = 'na.omit')
rb.predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
if (!is.na(test_input$Ht[i]) && !is.na(test_input$Wt[i]) && !is.na(test_input$X40yd[i]) &&
!is.na(test_input$X3Cone[i]) && !is.na(test_input$BroadJump[i]))
rb.predict[i] <- predict.lm(rb.model, newdata = test_input[i,])
wr.model <- lm(Pick ~ Ht + Wt + X40yd + Vertical + X3Cone, data = training_input, na.action = 'na.omit')
wr.predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
if (!is.na(test_input$Ht[i]) && !is.na(test_input$Wt[i]) && !is.na(test_input$X40yd[i]) &&
!is.na(test_input$Vertical[i]) && !is.na(test_input$X3Cone[i]))
wr.predict[i] <- predict.lm(wr.model, newdata = test_input[i,])
fb.te.ls.model <- lm(Pick ~ Ht + Wt + Bench + Vertical + BroadJump, data = training_input, na.action = 'na.omit')
fb.te.ls.predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
if (!is.na(test_input$Ht[i]) && !is.na(test_input$Wt[i]) && !is.na(test_input$Bench[i]) &&
!is.na(test_input$Vertical[i]) && !is.na(test_input$BroadJump[i]))
fb.te.ls.predict[i] <- predict.lm(fb.te.ls.model, newdata = test_input[i,])
ot.model <- lm(Pick ~ Ht + Wt + Shuttle + X40yd + Bench, data = training_input, na.action = 'na.omit')
ot.predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
if (!is.na(test_input$Ht[i]) && !is.na(test_input$Wt[i]) && !is.na(test_input$Shuttle[i]) &&
!is.na(test_input$X40yd[i]) && !is.na(test_input$Bench[i]))
ot.predict[i] <- predict.lm(ot.model, newdata = test_input[i,])
og.c.dt.model <- lm(Pick ~ Ht + Wt + Shuttle + Bench + BroadJump, data = training_input, na.action = 'na.omit')
og.c.dt.predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
if (!is.na(test_input$Ht[i]) && !is.na(test_input$Wt[i]) && !is.na(test_input$Shuttle[i]) &&
!is.na(test_input$Bench[i]) && !is.na(test_input$BroadJump[i]))
og.c.dt.predict[i] <- predict.lm(og.c.dt.model, newdata = test_input[i,])
de.prolb.model <- lm(Pick ~ Ht + Wt + X3Cone + Vertical + BroadJump, data = training_input, na.action = 'na.omit')
de.prolb.predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
if (!is.na(test_input$Ht[i]) && !is.na(test_input$Wt[i]) && !is.na(test_input$X3Cone[i]) &&
!is.na(test_input$Vertical[i]) && !is.na(test_input$BroadJump[i]))
de.prolb.predict[i] <- predict.lm(de.prolb.model, newdata = test_input[i,])
ilb.tolb.model <- lm(Pick ~ Ht + Wt + Shuttle + X3Cone + BroadJump, data = training_input, na.action = 'na.omit')
ilb.tolb.predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
if (!is.na(test_input$Ht[i]) && !is.na(test_input$Wt[i]) && !is.na(test_input$Shuttle[i]) &&
!is.na(test_input$X3Cone[i]) && !is.na(test_input$BroadJump[i]))
ilb.tolb.predict[i] <- predict.lm(ilb.tolb.model, newdata = test_input[i,])
cb.s.model <- lm(Pick ~ Ht + Wt + Shuttle + X40yd + X3Cone, data = training_input, na.action = 'na.omit')
cb.s.predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
if (!is.na(test_input$Ht[i]) && !is.na(test_input$Wt[i]) && !is.na(test_input$Shuttle[i]) &&
!is.na(test_input$X40yd[i]) && !is.na(test_input$X3Cone[i]))
cb.s.predict[i] <- predict.lm(cb.s.model, newdata = test_input[i,])
# We are creating an ensemble or hybrid prediction by averaging all component
# model predictions with non-missing values. Do this one player at a time.
response_predict <- rep(NA, times = nrow(test_input))
for (i in seq(along = test_input$Player))
response_predict[i] <- mean(c(qb.k.p.predict[i], rb.predict[i],
wr.predict[i], fb.te.ls.predict[i], ot.predict[i],
og.c.dt.predict[i], de.prolb.predict[i], ilb.tolb.predict[i],
cb.s.predict[i]), na.rm = TRUE)
response_actual <- test_input$Pick
ensemble_data_frame <- data.frame(qb.k.p.predict, rb.predict, wr.predict, fb.te.ls.predict, ot.predict,
og.c.dt.predict, de.prolb.predict, ilb.tolb.predict, cb.s.predict,
response_predict, response_actual)
# To check calculations, we can examine the first rows of the ensemble_data_frame
cat('\nFirst and last six rows of ensemble_data_frame\n')
print(head(ensemble_data_frame))
cat(' . . . \n')
print(tail(ensemble_data_frame))
# compute and return root mean-square error in test_input
sqrt(mean((response_predict - response_actual)^2, na.rm = TRUE))
}
# Whatever model is used for prediction, we want it to do better than a null model
# that predicts the mean response value for every player. Null model is like no model.
null_model <- function(training_input, test_input) {
# for demonstration purposes we show what would be the prediction
# of a null model... predicting the mean Pick for every player in test_input
response_predict <- mean(test_input$Pick)
response_actual <- test_input$Pick
# compute and return root mean-square error in test_input
sqrt(mean((response_predict - response_actual)^2))
}
# Cross-validation work
library(cvTools)
set.seed(9999) # for reproducibility
nfolds <- 10
study_folds <- cvFolds(nrow(training), K = nfolds, type = 'consecutive')
cv_model_results <- numeric(nfolds) # initialize array to store fold model results
cv_null_results <- numeric(nfolds) # initialize array to store fold null results
for (ifold in 1:nfolds) {
cat('\nWorking on fold ', ifold, '\n')
this_fold_test_data <- training[study_folds$which == ifold,]
this_fold_training_data <-
training[study_folds$which != ifold,]
# fit model and get root mean-square error for this iteration
cv_model_results[ifold] <- eval_model(training_input = this_fold_training_data,
test_input = this_fold_test_data)
cv_null_results[ifold] <- null_model(training_input = this_fold_training_data,
test_input = this_fold_test_data)
}
cat('\n', 'Cross-validation My Model Average Root Mean-Square Error:',
mean(cv_model_results))
cat('\n', 'Cross-validation No Model Average Root Mean-Square Error:',
mean(cv_null_results))
cv_model_results_mean <- mean(cv_model_results)
cv_null_results_mean <- mean(cv_null_results)
plot(cv_model_results, xlab = "Model Results", ylab = "RMSE", main = "Model Performance", type = "p", col = "blue")
points(cv_null_results, col = "red")
abline(h = cv_model_results_mean, col = "blue")
abline(h = cv_null_results_mean, col = "red")
legend("topright", legend=c("Evaluation Model RMSE Values", "Null Model RMSE Values"), col=c("blue", "red"), pch = 1, bty= "n", cex=0.8)
legend("bottomleft", legend=c("Evaluation Model Average RMSE", "Null Model Average RMSE"), col=c("blue", "red"),lty=c(1,1),bty= "n", cex=0.8)
|
observeEvent(input$nucPreset, {
sel <- input$nucPreset
updateRadioSubgroup(session, "exampleId", "sub", selected = sel, inline = TRUE)
})
observeEvent(input$exampleId, {
sel <- input$exampleId
sel2 <- as.numeric(gsub(".*-", "", sel))
if (sel2 %in% 9:10) {
updateRadioButtons(session, "nucPreset",
selected = sel2, inline = TRUE
)
}
})
updateRadioSubgroup <- function(session, inputId, id = "sub", inline, selected, ...) {
value <- paste0(id, "-", selected)
updateRadioButtons(session, inputId, label = NULL, choices = NULL, inline = inline, selected = value)
}
radioGroupContainer <- function(inputId, ...) {
class <- "form-group shiny-input-radiogroup shiny-input-container"
div(id = inputId, class = class, ...)
}
radioSubgroup <- function(inputId, label, choiceNames, choiceValues, inline = TRUE, first = FALSE, id = "sub") {
choiceValues <- paste0(id, "-", choiceValues)
choices <- setNames(choiceValues, choiceNames)
if (first == FALSE) {
rb <- radioButtons(inputId, label, choices, selected = character(0), inline = inline)
} else {
rb <- radioButtons(inputId, label, choices, selected = choices[1], inline = inline)
}
rb$children
}
exampleId <- reactive({
req(input$exampleId)
parts <- unlist(strsplit(input$exampleId, "-"))
list(
id = parts[1],
value = as.numeric(parts[2])
)
})
#
# param presets
#
observeEvent(
{
input$examIncrease
},
ignoreInit = TRUE,
{
if (values[["number"]] == 0 | values[["number"]] >= maxEx) {
values[["number"]] <- 1
values[["canButton"]] <- TRUE
} else if (values[["number"]] <= maxEx - 1) {
idx <- grep(paste0("\\<", exampleVec[exampleVec == values[["number"]]], "\\>"), exampleVec) + 1
values[["number"]] <- exampleVec[idx]
values[["canButton"]] <- TRUE
}
if (values[["number"]] %in% 9:10) {
CurrentM$menu <- menulist[2]
updateTabItems(session, "tabs", menulist[2])
updateTabItems(session, "tabsetpanel4", tablist4[1])
}
values[["dfList"]] <- list()
values[["button3ab"]] <- NA
values[["entrez_summary1"]] <- NA
values[["entrez_titles"]] <- NA
values[["entrez_selected"]] <- NA
values[["fetch_listAll"]] <- NA
values[["authors"]] <- ""
values[["entrez_search1"]] <- NA
values[["fetch_list"]] <- NA
values[["titles_number"]] <- NA
}
)
observeEvent(
{
input$examDecrease
},
ignoreInit = TRUE,
{
if (values[["number"]] >= 2 & values[["number"]] <= maxEx) {
idx <- grep(paste0("\\<", exampleVec[exampleVec == values[["number"]]], "\\>"), exampleVec) - 1
values[["number"]] <- exampleVec[idx]
values[["canButton"]] <- TRUE
} else {
values[["number"]] <- maxEx
values[["canButton"]] <- TRUE
}
if (values[["number"]] %in% 9:10) {
CurrentM$menu <- menulist[2]
updateTabItems(session, "tabs", menulist[2])
updateTabItems(session, "tabsetpanel4", tablist4[1])
}
values[["dfList"]] <- list()
values[["button3ab"]] <- NA
values[["entrez_summary1"]] <- NA
values[["entrez_titles"]] <- NA
values[["entrez_selected"]] <- NA
values[["fetch_listAll"]] <- NA
values[["authors"]] <- ""
values[["entrez_search1"]] <- NA
values[["fetch_list"]] <- NA
values[["titles_number"]] <- NA
}
)
observeEvent(
{
input$upPresetButton
},
ignoreInit = TRUE,
{
# values[["go"]]<-FALSE
values[["canButton"]] <- FALSE
updateCheckboxInput(session, "fileInsteadNcbi", value = FALSE)
values[["number"]] <- input$upPreset
values[["dfList"]] <- list()
values[["button3ab"]] <- NA
values[["entrez_summary1"]] <- NA
values[["entrez_titles"]] <- NA
values[["entrez_selected"]] <- NA
values[["fetch_listAll"]] <- NA
values[["authors"]] <- ""
values[["entrez_search1"]] <- NA
values[["fetch_list"]] <- NA
values[["titles_number"]] <- NA
values[["df1"]] <- values[["paramVec"]]$dfChrSizeVec[as.numeric(values[["number"]])][[1]]
values[["df1Mark"]] <- values[["paramVec"]]$dfMarkPosVec[as.numeric(values[["number"]])][[1]]
values[["df1MStyle"]] <- values[["paramVec"]]$dfMarkColorVec[as.numeric(values[["number"]])][[1]]
values[["notes"]] <- values[["paramVec"]]$notesVec[as.numeric(values[["number"]])][[1]]
values[["leftNotes"]] <- values[["paramVec"]]$leftNotesVec[as.numeric(values[["number"]])][[1]]
values[["leftNotesUp"]] <- values[["paramVec"]]$leftNotesUpVec[as.numeric(values[["number"]])][[1]]
}
)
observeEvent(
{
input$exampleButton
},
ignoreInit = TRUE,
{
values[["canButton"]] <- TRUE
updateCheckboxInput(session, "fileInsteadNcbi", value = FALSE)
values[["number"]] <- exampleId()$value # input$exampleId
values[["dfList"]] <- list()
values[["button3ab"]] <- NA
values[["entrez_summary1"]] <- NA
values[["entrez_titles"]] <- NA
values[["entrez_selected"]] <- NA
values[["fetch_listAll"]] <- NA
values[["authors"]] <- ""
values[["entrez_search1"]] <- NA
values[["fetch_list"]] <- NA
values[["titles_number"]] <- NA
}
)
observeEvent(
{
input$nucPresetButton
},
ignoreInit = TRUE,
{
values[["canButton"]] <- TRUE
updateCheckboxInput(session, "fileInsteadNcbi", value = FALSE)
values[["number"]] <- input$nucPreset
values[["dfList"]] <- list()
values[["button3ab"]] <- NA
values[["entrez_summary1"]] <- NA
values[["entrez_titles"]] <- NA
values[["entrez_selected"]] <- NA
values[["fetch_listAll"]] <- NA
values[["authors"]] <- ""
values[["entrez_search1"]] <- NA
values[["fetch_list"]] <- NA
values[["titles_number"]] <- NA
}
)
observeEvent(
numberRe(),
ignoreInit = TRUE,
{
validate(need(try(numberRe() > 0), "not ready"))
sel <- numberRe()
if (sel %in% 1:maxEx) {
updateRadioSubgroup(session, "exampleId", "sub", selected = sel, inline = TRUE)
}
if (sel %in% 9:10) {
updateRadioButtons(session, "nucPreset", selected = sel, inline = TRUE)
}
updateNumericInput(session, "karHeight", value = values[["paramVec"]]$karHeightVec[as.numeric(sel)])
updateNumericInput(session, "karHeiSpace", value = values[["paramVec"]]$karHeiSpaceVec[as.numeric(sel)])
updateNumericInput(session, "amoSepar", value = values[["paramVec"]]$amoSeparVec[as.numeric(sel)])
updateNumericInput(session, "karSepar", value = values[["paramVec"]]$karSeparVec[as.numeric(sel)])
updateNumericInput(session, "chrWidth", value = values[["paramVec"]]$chrWidthVec[as.numeric(sel)])
updateRadioButtons(session, "chrId", selected = values[["paramVec"]]$chrIdVec[as.numeric(sel)])
updateNumericInput(session, "squareness", value = values[["paramVec"]]$squarenessVec[as.numeric(sel)])
updateNumericInput(session, "markN", value = values[["paramVec"]]$markNVec[as.numeric(sel)])
updateNumericInput(session, "n", value = values[["paramVec"]]$nVec[as.numeric(sel)])
updateRadioButtons(session, "orderChr", selected = values[["paramVec"]]$orderChrVec[as.numeric(sel)])
updateRadioButtons(session, "markDistType", selected = values[["paramVec"]]$markDistTypeVec[as.numeric(sel)])
updateCheckboxInput(session, "useOneDot", value = values[["paramVec"]]$useOneDotVec[as.numeric(sel)])
updateTextInput(session, "bannedMarkName", value = values[["paramVec"]]$bannedMarkNameVec[as.numeric(sel)])
updateTextInput(session, "specialOTUNames", value = values[["paramVec"]]$specialOTUNamesVec[as.numeric(sel)])
updateTextInput(session, "addMissingOTUAfter", value = values[["paramVec"]]$addMissingOTUAfterVec[as.numeric(sel)])
updateTextInput(session, "missOTUspacings", value = values[["paramVec"]]$missOTUspacingsVec[as.numeric(sel)])
updateCheckboxInput(session, "origin", value = values[["paramVec"]]$originVec[as.numeric(sel)])
updateTextInput(session, "OTUfamily", value = values[["paramVec"]]$OTUfamilyVec[as.numeric(sel)])
updateTextInput(session, "classChrName", value = values[["paramVec"]]$classChrNameVec[as.numeric(sel)])
updateTextInput(session, "classChrNameUp", value = values[["paramVec"]]$classChrNameUpVec[as.numeric(sel)])
updateTextInput(session, "chrIdPatternRem", value = values[["paramVec"]]$chrIdPatternRemVec[as.numeric(sel)])
updateNumericInput(session, "xModMonoHoloRate", value = values[["paramVec"]]$xModMonoHoloRateVec[as.numeric(sel)])
updateTextInput(session, "specialyTitle", value = values[["paramVec"]]$specialyTitleVec[as.numeric(sel)])
updateNumericInput(session, "specialChrWidth", value = values[["paramVec"]]$specialChrWidthVec[as.numeric(sel)])
updateNumericInput(session, "specialChrSpacing", value = values[["paramVec"]]$specialChrSpacingVec[as.numeric(sel)])
updateTextInput(session, "yTitle", value = values[["paramVec"]]$yTitleVec[as.numeric(sel)])
updateNumericInput(session, "nameChrIndexPos", value = values[["paramVec"]]$nameChrIndexPosVec[as.numeric(sel)])
updateCheckboxInput(session, "cMBeginCenter", value = values[["paramVec"]]$cMBeginCenterVec[as.numeric(sel)])
updateNumericInput(session, "pMarkFac", value = values[["paramVec"]]$pMarkFacVec[as.numeric(sel)])
updateNumericInput(session, "protruding", value = values[["paramVec"]]$protrudingVec[as.numeric(sel)])
updateNumericInput(session, "arrowhead", value = values[["paramVec"]]$arrowheadVec[as.numeric(sel)])
# circularPlot
updateNumericInput(session, "chrLabelSpacing", value = values[["paramVec"]]$chrLabelSpacingVec[as.numeric(sel)])
updateNumericInput(session, "labelSpacing", value = values[["paramVec"]]$labelSpacingVec[as.numeric(sel)])
updateNumericInput(session, "rotation", value = values[["paramVec"]]$rotationVec[as.numeric(sel)])
updateNumericInput(session, "shrinkFactor", value = values[["paramVec"]]$shrinkFactorVec[as.numeric(sel)])
updateNumericInput(session, "separFactor", value = values[["paramVec"]]$separFactorVec[as.numeric(sel)])
updateNumericInput(session, "radius", value = values[["paramVec"]]$radiusVec[as.numeric(sel)])
updateNumericInput(session, "circleCenter", value = values[["paramVec"]]$circleCenterVec[as.numeric(sel)])
updateNumericInput(session, "OTUsrt", value = values[["paramVec"]]$OTUsrtVec[as.numeric(sel)])
updateNumericInput(session, "OTUjustif", value = values[["paramVec"]]$OTUjustifVec[as.numeric(sel)])
updateNumericInput(session, "OTULabelSpacerx", value = values[["paramVec"]]$OTULabelSpacerxVec[as.numeric(sel)])
updateNumericInput(session, "OTUlegendHeight", value = values[["paramVec"]]$OTUlegendHeightVec[as.numeric(sel)])
updateRadioButtons(session, "OTUplacing", selected = values[["paramVec"]]$OTUplacingVec[as.numeric(sel)])
# general
updateNumericInput(session, "chrSpacing", value = as.numeric(values[["paramVec"]]$chrSpacingVec[as.numeric(sel)]))
updateNumericInput(session, "groupSepar", value = as.numeric(values[["paramVec"]]$groupSeparVec[as.numeric(sel)]))
updateRadioButtons(session, "morpho", selected = values[["paramVec"]]$morphoVec[as.numeric(sel)])
updateTextInput(session, "chrColor", value = values[["paramVec"]]$chrColorVec[as.numeric(sel)])
updateTextInput(session, "cenColor", value = values[["paramVec"]]$cenColorVec[as.numeric(sel)])
updateRadioButtons(session, "chrIndex", selected = values[["paramVec"]]$chrIndexVec[as.numeric(sel)])
updateCheckboxInput(session, "karIndex", value = values[["paramVec"]]$karIndexVec[as.numeric(sel)])
updateNumericInput(session, "karIndexPos", value = values[["paramVec"]]$karIndexPosVec[as.numeric(sel)])
updateCheckboxInput(session, "chrSize", value = as.logical(values[["paramVec"]]$chrSizeVec[as.numeric(sel)]))
updateCheckboxInput(session, "showMarkPos", value = as.logical(values[["paramVec"]]$showMarkPosVec[as.numeric(sel)]))
updateCheckboxInput(session, "useMinorTicks", value = as.logical(values[["paramVec"]]$useMinorTicksVec[as.numeric(sel)]))
updateNumericInput(session, "indexIdTextSize", value = as.numeric((values[["paramVec"]]$indexIdTextSizeVec[as.numeric(sel)])))
updateNumericInput(session, "miniTickFactor", value = as.numeric(values[["paramVec"]]$miniTickFactorVec[as.numeric(sel)]))
updateNumericInput(session, "nsmall", value = as.numeric(values[["paramVec"]]$nsmallVec[as.numeric(sel)]))
updateCheckboxInput(session, "chrSizeMbp", value = as.logical((values[["paramVec"]]$chrSizeMbpVec[as.numeric(sel)])))
updateCheckboxInput(session, "chrNameUp", value = as.logical(values[["paramVec"]]$chrNameUpVec[as.numeric(sel)]))
updateNumericInput(session, "distTextChr", value = as.numeric((values[["paramVec"]]$distTextChrVec[as.numeric(sel)])))
updateNumericInput(session, "OTUTextSize", value = as.numeric((values[["paramVec"]]$OTUTextSizeVec[as.numeric(sel)])))
updateCheckboxInput(session, "chromatids", value = values[["paramVec"]]$chromatidsVec[as.numeric(sel)])
updateCheckboxInput(session, "holocenNotAsChromatids", value = values[["paramVec"]]$holocenNotAsChromatidsVec[as.numeric(sel)])
updateNumericInput(session, "xModifier", value = as.numeric(values[["paramVec"]]$xModifierVec[as.numeric(sel)]))
updateCheckboxInput(session, "circularPlot", value = values[["paramVec"]]$circularPlotVec[as.numeric(sel)])
updateCheckboxInput(session, "ruler", value = values[["paramVec"]]$rulerVec[as.numeric(sel)])
updateNumericInput(session, "rulerPos", value = as.numeric((values[["paramVec"]]$rulerPosVec[as.numeric(sel)])))
updateNumericInput(session, "threshold", value = as.numeric((values[["paramVec"]]$thresholdVec[as.numeric(sel)])))
updateNumericInput(session, "ceilingFactor", value = as.numeric((values[["paramVec"]]$ceilingFactorVec[as.numeric(sel)])))
updateNumericInput(session, "rulerInterval", value = as.numeric((values[["paramVec"]]$rulerIntervalVec[as.numeric(sel)])))
updateNumericInput(session, "rulerIntervalMb", value = as.numeric((values[["paramVec"]]$rulerIntervalMbVec[as.numeric(sel)])))
updateNumericInput(session, "rulerIntervalcM", value = as.numeric((values[["paramVec"]]$rulerIntervalcMVec[as.numeric(sel)])))
updateNumericInput(session, "ruler.tck", value = as.numeric((values[["paramVec"]]$ruler.tckVec[as.numeric(sel)])))
updateCheckboxInput(session, "collapseCen", value = as.logical(values[["paramVec"]]$collapseCenVec[as.numeric(sel)]))
updateNumericInput(session, "rulerNumberSize", value = as.numeric((values[["paramVec"]]$rulerNumberSizeVec[as.numeric(sel)])))
updateNumericInput(session, "rulerNumberPos", value = as.numeric((values[["paramVec"]]$rulerNumberPosVec[as.numeric(sel)])))
updateNumericInput(session, "rulerTitleSize", value = values[["paramVec"]]$rulerTitleSizeVec[as.numeric(sel)])
updateNumericInput(session, "xPosRulerTitle", value = as.numeric((values[["paramVec"]]$xPosRulerTitleVec[as.numeric(sel)])))
updateRadioButtons(session, "legend", selected = values[["paramVec"]]$legendVec[as.numeric(sel)])
updateRadioButtons(session, "cenFormat", selected = values[["paramVec"]]$cenFormatVec[as.numeric(sel)])
updateNumericInput(session, "cenFactor", value = values[["paramVec"]]$cenFactorVec[as.numeric(sel)])
updateNumericInput(session, "centromereSize", value = values[["paramVec"]]$centromereSizeVec[as.numeric(sel)])
updateCheckboxInput(session, "autoCenSize", value = as.logical(values[["paramVec"]]$autoCenSizeVec[as.numeric(sel)]))
updateNumericInput(session, "legendWidth", value = as.numeric((values[["paramVec"]]$legendWidthVec[as.numeric(sel)])))
updateNumericInput(session, "legendHeight", value = as.numeric((values[["paramVec"]]$legendHeightVec[as.numeric(sel)])))
updateTextInput(session, "pattern", value = values[["paramVec"]]$patternVec[as.numeric(sel)])
updateTextInput(session, "markNewLine", value = values[["paramVec"]]$markNewLineVec[as.numeric(sel)])
updateTextInput(session, "forbiddenMark", value = values[["paramVec"]]$forbiddenMarkVec[as.numeric(sel)])
updateTextInput(session, "classGroupName", value = values[["paramVec"]]$classGroupNameVec[as.numeric(sel)])
updateCheckboxInput(session, "remSimiMarkLeg", value = as.logical((values[["paramVec"]]$remSimiMarkLegVec[as.numeric(sel)])))
updateNumericInput(session, "markLabelSize", value = as.numeric((values[["paramVec"]]$markLabelSizeVec[as.numeric(sel)])))
updateNumericInput(session, "markLabelSpacer", value = as.numeric((values[["paramVec"]]$markLabelSpacerVec[as.numeric(sel)])))
updateNumericInput(session, "legendYcoord", value = as.numeric((values[["paramVec"]]$legendYcoordVec[as.numeric(sel)])))
updateCheckboxInput(session, "fixCenBorder", value = as.logical((values[["paramVec"]]$fixCenBorderVec[as.numeric(sel)])))
updateCheckboxInput(session, "bMarkNameAside", value = as.logical(values[["paramVec"]]$bMarkNameAsideVec[as.numeric(sel)]))
updateCheckboxInput(session, "chrBorderColor", value = values[["paramVec"]]$chrBorderColorVec[as.numeric(sel)])
updateNumericInput(session, "lwd.chr", value = as.numeric((values[["paramVec"]]$lwd.chrVec[as.numeric(sel)])))
updateNumericInput(session, "lwd.cM", value = as.numeric((values[["paramVec"]]$lwd.cMVec[as.numeric(sel)])))
updateNumericInput(session, "lwd.mimicCen", value = as.numeric((values[["paramVec"]]$lwd.mimicCenVec[as.numeric(sel)])))
updateNumericInput(session, "xlimLeftMod", value = as.numeric((values[["paramVec"]]$xlimLeftModVec[as.numeric(sel)])))
updateNumericInput(session, "xlimRightMod", value = values[["paramVec"]]$xlimRightModVec[as.numeric(sel)])
updateNumericInput(session, "ylimBotMod", value = values[["paramVec"]]$ylimBotModVec[as.numeric(sel)])
updateNumericInput(session, "ylimTopMod", value = as.numeric((values[["paramVec"]]$ylimTopModVec[as.numeric(sel)])))
updateNumericInput(session, "hwModifier", value = as.numeric(values[["paramVec"]]$hwModifierVec[as.numeric(sel)]))
updateNumericInput(session, "widFactor", value = as.numeric((values[["paramVec"]]$widFactorVec[as.numeric(sel)])))
updateNumericInput(session, "heiFactor", value = as.numeric((values[["paramVec"]]$heiFactorVec[as.numeric(sel)])))
updateRadioButtons(session, "pngorsvg", selected = values[["paramVec"]]$pngorsvgVec[as.numeric(sel)])
updateRadioButtons(session, "pngorsvgDown", selected = values[["paramVec"]]$pngorsvgDownVec[as.numeric(sel)])
updateTextInput(session, "mycolors", value = values[["paramVec"]]$mycolorsVec[as.numeric(sel)])
updateTextInput(session, "markPer", value = values[["paramVec"]]$markPerVec[as.numeric(sel)])
updateTextInput(session, "bToRemove", value = values[["paramVec"]]$bToRemoveVec[as.numeric(sel)])
updateCheckboxInput(session, "perAsFraction", value = values[["paramVec"]]$perAsFractionVec[as.numeric(sel)])
updateTextInput(session, "chrNamesToSwap", value = values[["paramVec"]]$chrNamesToSwapVec[as.numeric(sel)])
updateCheckboxInput(session, "addOTUName", value = values[["paramVec"]]$addOTUNameVec[as.numeric(sel)])
updateRadioButtons(session, "OTUfont", selected = as.character(values[["paramVec"]]$OTUfontVec[as.numeric(sel)]))
updateTextInput(session, "moveKarHor", value = values[["paramVec"]]$moveKarHorVec[as.numeric(sel)])
updateNumericInput(session, "mkhValue", value = values[["paramVec"]]$mkhValueVec[as.numeric(sel)])
updateCheckboxInput(session, "anchor", value = values[["paramVec"]]$anchorVec[as.numeric(sel)])
updateNumericInput(session, "moveAnchorV", value = values[["paramVec"]]$moveAnchorVVec[as.numeric(sel)])
updateNumericInput(session, "moveAnchorH", value = values[["paramVec"]]$moveAnchorHVec[as.numeric(sel)])
updateNumericInput(session, "anchorVsizeF", value = values[["paramVec"]]$anchorVsizeFVec[as.numeric(sel)])
updateNumericInput(session, "anchorHsizeF", value = values[["paramVec"]]$anchorHsizeFVec[as.numeric(sel)])
updateNumericInput(session, "notesTextSize", value = values[["paramVec"]]$notesTextSizeVec[as.numeric(sel)])
updateNumericInput(session, "notesPosX", value = values[["paramVec"]]$notesPosXVec[as.numeric(sel)])
updateNumericInput(session, "leftNoteFontUp", value = values[["paramVec"]]$leftNoteFontUpVec[as.numeric(sel)])
updateNumericInput(session, "leftNotesPosX", value = values[["paramVec"]]$leftNotesPosXVec[as.numeric(sel)])
updateNumericInput(session, "leftNotesPosY", value = values[["paramVec"]]$leftNotesPosYVec[as.numeric(sel)])
updateNumericInput(session, "leftNotesUpPosY", value = values[["paramVec"]]$leftNotesUpPosYVec[as.numeric(sel)])
updateNumericInput(session, "leftNotesUpPosX", value = values[["paramVec"]]$leftNotesUpPosXVec[as.numeric(sel)])
updateNumericInput(session, "notesPosY", value = values[["paramVec"]]$notesPosYVec[as.numeric(sel)])
updateNumericInput(session, "leftNoteFont", value = values[["paramVec"]]$leftNoteFontVec[as.numeric(sel)])
updateNumericInput(session, "noteFont", value = values[["paramVec"]]$noteFontVec[as.numeric(sel)])
updateNumericInput(session, "leftNotesUpTextSize", value = values[["paramVec"]]$leftNotesUpTextSizeVec[as.numeric(sel)])
updateNumericInput(session, "leftNotesTextSize", value = values[["paramVec"]]$leftNotesTextSizeVec[as.numeric(sel)])
updateCheckboxInput(session, "parseStr2lang", value = values[["paramVec"]]$parseStr2langVec[as.numeric(sel)])
updateNumericInput(session, "moveAllKarValueHor", value = values[["paramVec"]]$moveAllKarValueHorVec[as.numeric(sel)])
updateNumericInput(session, "moveAllKarValueY", value = values[["paramVec"]]$moveAllKarValueYVec[as.numeric(sel)])
updateCheckboxInput(session, "verticalPlot", value = values[["paramVec"]]$verticalPlotVec[as.numeric(sel)])
updateNumericInput(session, "karSpaceHor", value = values[["paramVec"]]$karSpaceHorVec[as.numeric(sel)])
updateTextInput(session, "karAnchorLeft", value = values[["paramVec"]]$karAnchorLeftVec[as.numeric(sel)])
updateCheckboxInput(session, "OTUasNote", value = values[["paramVec"]]$OTUasNoteVec[as.numeric(sel)])
updateCheckboxInput(session, "labelOutwards", value = values[["paramVec"]]$labelOutwardsVec[as.numeric(sel)])
# updateCheckboxInput(session, "callPlot", value = values[["paramVec"]]$callPlotVec[as.numeric(sel)] )
updateTextInput(session, "colorBorderMark", value = values[["paramVec"]]$colorBorderMarkVec[as.numeric(sel)])
updateNumericInput(session, "lwd.marks", value = values[["paramVec"]]$lwd.marksVec[as.numeric(sel)])
updateCheckboxInput(session, "gishCenBorder", value = values[["paramVec"]]$gishCenBorderVec[as.numeric(sel)])
updateNumericInput(session, "hideCenLines", value = values[["paramVec"]]$hideCenLinesVec[as.numeric(sel)])
#
# nuc marks
#
# this one is a list #
updateCheckboxGroupInput(session, "markType", selected = values[["paramVec"]]$markTypeVec[[as.numeric(sel)]])
updateCheckboxGroupInput(session, "fetchSelect", selected = values[["paramVec"]]$fetchSelectVec[as.numeric(sel)])
updateNumericInput(session, "amountofSpaces", value = values[["paramVec"]]$amountofSpacesVec[as.numeric(sel)])
updateNumericInput(session, "colNumber", value = values[["paramVec"]]$colNumberVec[as.numeric(sel)])
updateNumericInput(session, "protrudingInt", value = values[["paramVec"]]$protrudingIntVec[as.numeric(sel)])
updateTextInput(session, "mycolors2", value = values[["paramVec"]]$mycolors2Vec[as.numeric(sel)])
updateTextInput(session, "term", value = values[["paramVec"]]$termVec[as.numeric(sel)])
updateCheckboxInput(session, "useGeneNames", value = values[["paramVec"]]$useGeneNamesVec[as.numeric(sel)])
updateCheckboxInput(session, "useRCNames", value = values[["paramVec"]]$useRCNamesVec[as.numeric(sel)])
updateCheckboxInput(session, "makeUnique", value = values[["paramVec"]]$makeUniqueVec[as.numeric(sel)])
updateCheckboxInput(session, "colorFeature", value = values[["paramVec"]]$colorFeatureVec[as.numeric(sel)])
updateRadioButtons(session, "nucMarkStyle", selected = values[["paramVec"]]$nucMarkStyleVec[as.numeric(sel)])
updateRadioButtons(session, "pseudo", selected = values[["paramVec"]]$pseudoVec[as.numeric(sel)])
updateCheckboxInput(session, "mirror", value = values[["paramVec"]]$mirrorVec[as.numeric(sel)])
updateCheckboxInput(session, "addSTARTPos", value = values[["paramVec"]]$addSTARTPosVec[as.numeric(sel)])
}
)
|
/inst/shinyApps/iBoard/obser/obserPresets.R
|
no_license
|
cran/idiogramFISH
|
R
| false
| false
| 24,452
|
r
|
observeEvent(input$nucPreset, {
sel <- input$nucPreset
updateRadioSubgroup(session, "exampleId", "sub", selected = sel, inline = TRUE)
})
observeEvent(input$exampleId, {
sel <- input$exampleId
sel2 <- as.numeric(gsub(".*-", "", sel))
if (sel2 %in% 9:10) {
updateRadioButtons(session, "nucPreset",
selected = sel2, inline = TRUE
)
}
})
updateRadioSubgroup <- function(session, inputId, id = "sub", inline, selected, ...) {
value <- paste0(id, "-", selected)
updateRadioButtons(session, inputId, label = NULL, choices = NULL, inline = inline, selected = value)
}
radioGroupContainer <- function(inputId, ...) {
class <- "form-group shiny-input-radiogroup shiny-input-container"
div(id = inputId, class = class, ...)
}
radioSubgroup <- function(inputId, label, choiceNames, choiceValues, inline = TRUE, first = FALSE, id = "sub") {
choiceValues <- paste0(id, "-", choiceValues)
choices <- setNames(choiceValues, choiceNames)
if (first == FALSE) {
rb <- radioButtons(inputId, label, choices, selected = character(0), inline = inline)
} else {
rb <- radioButtons(inputId, label, choices, selected = choices[1], inline = inline)
}
rb$children
}
exampleId <- reactive({
req(input$exampleId)
parts <- unlist(strsplit(input$exampleId, "-"))
list(
id = parts[1],
value = as.numeric(parts[2])
)
})
#
# param presets
#
observeEvent(
{
input$examIncrease
},
ignoreInit = TRUE,
{
if (values[["number"]] == 0 | values[["number"]] >= maxEx) {
values[["number"]] <- 1
values[["canButton"]] <- TRUE
} else if (values[["number"]] <= maxEx - 1) {
idx <- grep(paste0("\\<", exampleVec[exampleVec == values[["number"]]], "\\>"), exampleVec) + 1
values[["number"]] <- exampleVec[idx]
values[["canButton"]] <- TRUE
}
if (values[["number"]] %in% 9:10) {
CurrentM$menu <- menulist[2]
updateTabItems(session, "tabs", menulist[2])
updateTabItems(session, "tabsetpanel4", tablist4[1])
}
values[["dfList"]] <- list()
values[["button3ab"]] <- NA
values[["entrez_summary1"]] <- NA
values[["entrez_titles"]] <- NA
values[["entrez_selected"]] <- NA
values[["fetch_listAll"]] <- NA
values[["authors"]] <- ""
values[["entrez_search1"]] <- NA
values[["fetch_list"]] <- NA
values[["titles_number"]] <- NA
}
)
observeEvent(
{
input$examDecrease
},
ignoreInit = TRUE,
{
if (values[["number"]] >= 2 & values[["number"]] <= maxEx) {
idx <- grep(paste0("\\<", exampleVec[exampleVec == values[["number"]]], "\\>"), exampleVec) - 1
values[["number"]] <- exampleVec[idx]
values[["canButton"]] <- TRUE
} else {
values[["number"]] <- maxEx
values[["canButton"]] <- TRUE
}
if (values[["number"]] %in% 9:10) {
CurrentM$menu <- menulist[2]
updateTabItems(session, "tabs", menulist[2])
updateTabItems(session, "tabsetpanel4", tablist4[1])
}
values[["dfList"]] <- list()
values[["button3ab"]] <- NA
values[["entrez_summary1"]] <- NA
values[["entrez_titles"]] <- NA
values[["entrez_selected"]] <- NA
values[["fetch_listAll"]] <- NA
values[["authors"]] <- ""
values[["entrez_search1"]] <- NA
values[["fetch_list"]] <- NA
values[["titles_number"]] <- NA
}
)
observeEvent(
{
input$upPresetButton
},
ignoreInit = TRUE,
{
# values[["go"]]<-FALSE
values[["canButton"]] <- FALSE
updateCheckboxInput(session, "fileInsteadNcbi", value = FALSE)
values[["number"]] <- input$upPreset
values[["dfList"]] <- list()
values[["button3ab"]] <- NA
values[["entrez_summary1"]] <- NA
values[["entrez_titles"]] <- NA
values[["entrez_selected"]] <- NA
values[["fetch_listAll"]] <- NA
values[["authors"]] <- ""
values[["entrez_search1"]] <- NA
values[["fetch_list"]] <- NA
values[["titles_number"]] <- NA
values[["df1"]] <- values[["paramVec"]]$dfChrSizeVec[as.numeric(values[["number"]])][[1]]
values[["df1Mark"]] <- values[["paramVec"]]$dfMarkPosVec[as.numeric(values[["number"]])][[1]]
values[["df1MStyle"]] <- values[["paramVec"]]$dfMarkColorVec[as.numeric(values[["number"]])][[1]]
values[["notes"]] <- values[["paramVec"]]$notesVec[as.numeric(values[["number"]])][[1]]
values[["leftNotes"]] <- values[["paramVec"]]$leftNotesVec[as.numeric(values[["number"]])][[1]]
values[["leftNotesUp"]] <- values[["paramVec"]]$leftNotesUpVec[as.numeric(values[["number"]])][[1]]
}
)
observeEvent(
{
input$exampleButton
},
ignoreInit = TRUE,
{
values[["canButton"]] <- TRUE
updateCheckboxInput(session, "fileInsteadNcbi", value = FALSE)
values[["number"]] <- exampleId()$value # input$exampleId
values[["dfList"]] <- list()
values[["button3ab"]] <- NA
values[["entrez_summary1"]] <- NA
values[["entrez_titles"]] <- NA
values[["entrez_selected"]] <- NA
values[["fetch_listAll"]] <- NA
values[["authors"]] <- ""
values[["entrez_search1"]] <- NA
values[["fetch_list"]] <- NA
values[["titles_number"]] <- NA
}
)
observeEvent(
{
input$nucPresetButton
},
ignoreInit = TRUE,
{
values[["canButton"]] <- TRUE
updateCheckboxInput(session, "fileInsteadNcbi", value = FALSE)
values[["number"]] <- input$nucPreset
values[["dfList"]] <- list()
values[["button3ab"]] <- NA
values[["entrez_summary1"]] <- NA
values[["entrez_titles"]] <- NA
values[["entrez_selected"]] <- NA
values[["fetch_listAll"]] <- NA
values[["authors"]] <- ""
values[["entrez_search1"]] <- NA
values[["fetch_list"]] <- NA
values[["titles_number"]] <- NA
}
)
observeEvent(
numberRe(),
ignoreInit = TRUE,
{
validate(need(try(numberRe() > 0), "not ready"))
sel <- numberRe()
if (sel %in% 1:maxEx) {
updateRadioSubgroup(session, "exampleId", "sub", selected = sel, inline = TRUE)
}
if (sel %in% 9:10) {
updateRadioButtons(session, "nucPreset", selected = sel, inline = TRUE)
}
updateNumericInput(session, "karHeight", value = values[["paramVec"]]$karHeightVec[as.numeric(sel)])
updateNumericInput(session, "karHeiSpace", value = values[["paramVec"]]$karHeiSpaceVec[as.numeric(sel)])
updateNumericInput(session, "amoSepar", value = values[["paramVec"]]$amoSeparVec[as.numeric(sel)])
updateNumericInput(session, "karSepar", value = values[["paramVec"]]$karSeparVec[as.numeric(sel)])
updateNumericInput(session, "chrWidth", value = values[["paramVec"]]$chrWidthVec[as.numeric(sel)])
updateRadioButtons(session, "chrId", selected = values[["paramVec"]]$chrIdVec[as.numeric(sel)])
updateNumericInput(session, "squareness", value = values[["paramVec"]]$squarenessVec[as.numeric(sel)])
updateNumericInput(session, "markN", value = values[["paramVec"]]$markNVec[as.numeric(sel)])
updateNumericInput(session, "n", value = values[["paramVec"]]$nVec[as.numeric(sel)])
updateRadioButtons(session, "orderChr", selected = values[["paramVec"]]$orderChrVec[as.numeric(sel)])
updateRadioButtons(session, "markDistType", selected = values[["paramVec"]]$markDistTypeVec[as.numeric(sel)])
updateCheckboxInput(session, "useOneDot", value = values[["paramVec"]]$useOneDotVec[as.numeric(sel)])
updateTextInput(session, "bannedMarkName", value = values[["paramVec"]]$bannedMarkNameVec[as.numeric(sel)])
updateTextInput(session, "specialOTUNames", value = values[["paramVec"]]$specialOTUNamesVec[as.numeric(sel)])
updateTextInput(session, "addMissingOTUAfter", value = values[["paramVec"]]$addMissingOTUAfterVec[as.numeric(sel)])
updateTextInput(session, "missOTUspacings", value = values[["paramVec"]]$missOTUspacingsVec[as.numeric(sel)])
updateCheckboxInput(session, "origin", value = values[["paramVec"]]$originVec[as.numeric(sel)])
updateTextInput(session, "OTUfamily", value = values[["paramVec"]]$OTUfamilyVec[as.numeric(sel)])
updateTextInput(session, "classChrName", value = values[["paramVec"]]$classChrNameVec[as.numeric(sel)])
updateTextInput(session, "classChrNameUp", value = values[["paramVec"]]$classChrNameUpVec[as.numeric(sel)])
updateTextInput(session, "chrIdPatternRem", value = values[["paramVec"]]$chrIdPatternRemVec[as.numeric(sel)])
updateNumericInput(session, "xModMonoHoloRate", value = values[["paramVec"]]$xModMonoHoloRateVec[as.numeric(sel)])
updateTextInput(session, "specialyTitle", value = values[["paramVec"]]$specialyTitleVec[as.numeric(sel)])
updateNumericInput(session, "specialChrWidth", value = values[["paramVec"]]$specialChrWidthVec[as.numeric(sel)])
updateNumericInput(session, "specialChrSpacing", value = values[["paramVec"]]$specialChrSpacingVec[as.numeric(sel)])
updateTextInput(session, "yTitle", value = values[["paramVec"]]$yTitleVec[as.numeric(sel)])
updateNumericInput(session, "nameChrIndexPos", value = values[["paramVec"]]$nameChrIndexPosVec[as.numeric(sel)])
updateCheckboxInput(session, "cMBeginCenter", value = values[["paramVec"]]$cMBeginCenterVec[as.numeric(sel)])
updateNumericInput(session, "pMarkFac", value = values[["paramVec"]]$pMarkFacVec[as.numeric(sel)])
updateNumericInput(session, "protruding", value = values[["paramVec"]]$protrudingVec[as.numeric(sel)])
updateNumericInput(session, "arrowhead", value = values[["paramVec"]]$arrowheadVec[as.numeric(sel)])
# circularPlot
updateNumericInput(session, "chrLabelSpacing", value = values[["paramVec"]]$chrLabelSpacingVec[as.numeric(sel)])
updateNumericInput(session, "labelSpacing", value = values[["paramVec"]]$labelSpacingVec[as.numeric(sel)])
updateNumericInput(session, "rotation", value = values[["paramVec"]]$rotationVec[as.numeric(sel)])
updateNumericInput(session, "shrinkFactor", value = values[["paramVec"]]$shrinkFactorVec[as.numeric(sel)])
updateNumericInput(session, "separFactor", value = values[["paramVec"]]$separFactorVec[as.numeric(sel)])
updateNumericInput(session, "radius", value = values[["paramVec"]]$radiusVec[as.numeric(sel)])
updateNumericInput(session, "circleCenter", value = values[["paramVec"]]$circleCenterVec[as.numeric(sel)])
updateNumericInput(session, "OTUsrt", value = values[["paramVec"]]$OTUsrtVec[as.numeric(sel)])
updateNumericInput(session, "OTUjustif", value = values[["paramVec"]]$OTUjustifVec[as.numeric(sel)])
updateNumericInput(session, "OTULabelSpacerx", value = values[["paramVec"]]$OTULabelSpacerxVec[as.numeric(sel)])
updateNumericInput(session, "OTUlegendHeight", value = values[["paramVec"]]$OTUlegendHeightVec[as.numeric(sel)])
updateRadioButtons(session, "OTUplacing", selected = values[["paramVec"]]$OTUplacingVec[as.numeric(sel)])
# general
updateNumericInput(session, "chrSpacing", value = as.numeric(values[["paramVec"]]$chrSpacingVec[as.numeric(sel)]))
updateNumericInput(session, "groupSepar", value = as.numeric(values[["paramVec"]]$groupSeparVec[as.numeric(sel)]))
updateRadioButtons(session, "morpho", selected = values[["paramVec"]]$morphoVec[as.numeric(sel)])
updateTextInput(session, "chrColor", value = values[["paramVec"]]$chrColorVec[as.numeric(sel)])
updateTextInput(session, "cenColor", value = values[["paramVec"]]$cenColorVec[as.numeric(sel)])
updateRadioButtons(session, "chrIndex", selected = values[["paramVec"]]$chrIndexVec[as.numeric(sel)])
updateCheckboxInput(session, "karIndex", value = values[["paramVec"]]$karIndexVec[as.numeric(sel)])
updateNumericInput(session, "karIndexPos", value = values[["paramVec"]]$karIndexPosVec[as.numeric(sel)])
updateCheckboxInput(session, "chrSize", value = as.logical(values[["paramVec"]]$chrSizeVec[as.numeric(sel)]))
updateCheckboxInput(session, "showMarkPos", value = as.logical(values[["paramVec"]]$showMarkPosVec[as.numeric(sel)]))
updateCheckboxInput(session, "useMinorTicks", value = as.logical(values[["paramVec"]]$useMinorTicksVec[as.numeric(sel)]))
updateNumericInput(session, "indexIdTextSize", value = as.numeric((values[["paramVec"]]$indexIdTextSizeVec[as.numeric(sel)])))
updateNumericInput(session, "miniTickFactor", value = as.numeric(values[["paramVec"]]$miniTickFactorVec[as.numeric(sel)]))
updateNumericInput(session, "nsmall", value = as.numeric(values[["paramVec"]]$nsmallVec[as.numeric(sel)]))
updateCheckboxInput(session, "chrSizeMbp", value = as.logical((values[["paramVec"]]$chrSizeMbpVec[as.numeric(sel)])))
updateCheckboxInput(session, "chrNameUp", value = as.logical(values[["paramVec"]]$chrNameUpVec[as.numeric(sel)]))
updateNumericInput(session, "distTextChr", value = as.numeric((values[["paramVec"]]$distTextChrVec[as.numeric(sel)])))
updateNumericInput(session, "OTUTextSize", value = as.numeric((values[["paramVec"]]$OTUTextSizeVec[as.numeric(sel)])))
updateCheckboxInput(session, "chromatids", value = values[["paramVec"]]$chromatidsVec[as.numeric(sel)])
updateCheckboxInput(session, "holocenNotAsChromatids", value = values[["paramVec"]]$holocenNotAsChromatidsVec[as.numeric(sel)])
updateNumericInput(session, "xModifier", value = as.numeric(values[["paramVec"]]$xModifierVec[as.numeric(sel)]))
updateCheckboxInput(session, "circularPlot", value = values[["paramVec"]]$circularPlotVec[as.numeric(sel)])
updateCheckboxInput(session, "ruler", value = values[["paramVec"]]$rulerVec[as.numeric(sel)])
updateNumericInput(session, "rulerPos", value = as.numeric((values[["paramVec"]]$rulerPosVec[as.numeric(sel)])))
updateNumericInput(session, "threshold", value = as.numeric((values[["paramVec"]]$thresholdVec[as.numeric(sel)])))
updateNumericInput(session, "ceilingFactor", value = as.numeric((values[["paramVec"]]$ceilingFactorVec[as.numeric(sel)])))
updateNumericInput(session, "rulerInterval", value = as.numeric((values[["paramVec"]]$rulerIntervalVec[as.numeric(sel)])))
updateNumericInput(session, "rulerIntervalMb", value = as.numeric((values[["paramVec"]]$rulerIntervalMbVec[as.numeric(sel)])))
updateNumericInput(session, "rulerIntervalcM", value = as.numeric((values[["paramVec"]]$rulerIntervalcMVec[as.numeric(sel)])))
updateNumericInput(session, "ruler.tck", value = as.numeric((values[["paramVec"]]$ruler.tckVec[as.numeric(sel)])))
updateCheckboxInput(session, "collapseCen", value = as.logical(values[["paramVec"]]$collapseCenVec[as.numeric(sel)]))
updateNumericInput(session, "rulerNumberSize", value = as.numeric((values[["paramVec"]]$rulerNumberSizeVec[as.numeric(sel)])))
updateNumericInput(session, "rulerNumberPos", value = as.numeric((values[["paramVec"]]$rulerNumberPosVec[as.numeric(sel)])))
updateNumericInput(session, "rulerTitleSize", value = values[["paramVec"]]$rulerTitleSizeVec[as.numeric(sel)])
updateNumericInput(session, "xPosRulerTitle", value = as.numeric((values[["paramVec"]]$xPosRulerTitleVec[as.numeric(sel)])))
updateRadioButtons(session, "legend", selected = values[["paramVec"]]$legendVec[as.numeric(sel)])
updateRadioButtons(session, "cenFormat", selected = values[["paramVec"]]$cenFormatVec[as.numeric(sel)])
updateNumericInput(session, "cenFactor", value = values[["paramVec"]]$cenFactorVec[as.numeric(sel)])
updateNumericInput(session, "centromereSize", value = values[["paramVec"]]$centromereSizeVec[as.numeric(sel)])
updateCheckboxInput(session, "autoCenSize", value = as.logical(values[["paramVec"]]$autoCenSizeVec[as.numeric(sel)]))
updateNumericInput(session, "legendWidth", value = as.numeric((values[["paramVec"]]$legendWidthVec[as.numeric(sel)])))
updateNumericInput(session, "legendHeight", value = as.numeric((values[["paramVec"]]$legendHeightVec[as.numeric(sel)])))
updateTextInput(session, "pattern", value = values[["paramVec"]]$patternVec[as.numeric(sel)])
updateTextInput(session, "markNewLine", value = values[["paramVec"]]$markNewLineVec[as.numeric(sel)])
updateTextInput(session, "forbiddenMark", value = values[["paramVec"]]$forbiddenMarkVec[as.numeric(sel)])
updateTextInput(session, "classGroupName", value = values[["paramVec"]]$classGroupNameVec[as.numeric(sel)])
updateCheckboxInput(session, "remSimiMarkLeg", value = as.logical((values[["paramVec"]]$remSimiMarkLegVec[as.numeric(sel)])))
updateNumericInput(session, "markLabelSize", value = as.numeric((values[["paramVec"]]$markLabelSizeVec[as.numeric(sel)])))
updateNumericInput(session, "markLabelSpacer", value = as.numeric((values[["paramVec"]]$markLabelSpacerVec[as.numeric(sel)])))
updateNumericInput(session, "legendYcoord", value = as.numeric((values[["paramVec"]]$legendYcoordVec[as.numeric(sel)])))
updateCheckboxInput(session, "fixCenBorder", value = as.logical((values[["paramVec"]]$fixCenBorderVec[as.numeric(sel)])))
updateCheckboxInput(session, "bMarkNameAside", value = as.logical(values[["paramVec"]]$bMarkNameAsideVec[as.numeric(sel)]))
updateCheckboxInput(session, "chrBorderColor", value = values[["paramVec"]]$chrBorderColorVec[as.numeric(sel)])
updateNumericInput(session, "lwd.chr", value = as.numeric((values[["paramVec"]]$lwd.chrVec[as.numeric(sel)])))
updateNumericInput(session, "lwd.cM", value = as.numeric((values[["paramVec"]]$lwd.cMVec[as.numeric(sel)])))
updateNumericInput(session, "lwd.mimicCen", value = as.numeric((values[["paramVec"]]$lwd.mimicCenVec[as.numeric(sel)])))
updateNumericInput(session, "xlimLeftMod", value = as.numeric((values[["paramVec"]]$xlimLeftModVec[as.numeric(sel)])))
updateNumericInput(session, "xlimRightMod", value = values[["paramVec"]]$xlimRightModVec[as.numeric(sel)])
updateNumericInput(session, "ylimBotMod", value = values[["paramVec"]]$ylimBotModVec[as.numeric(sel)])
updateNumericInput(session, "ylimTopMod", value = as.numeric((values[["paramVec"]]$ylimTopModVec[as.numeric(sel)])))
updateNumericInput(session, "hwModifier", value = as.numeric(values[["paramVec"]]$hwModifierVec[as.numeric(sel)]))
updateNumericInput(session, "widFactor", value = as.numeric((values[["paramVec"]]$widFactorVec[as.numeric(sel)])))
updateNumericInput(session, "heiFactor", value = as.numeric((values[["paramVec"]]$heiFactorVec[as.numeric(sel)])))
updateRadioButtons(session, "pngorsvg", selected = values[["paramVec"]]$pngorsvgVec[as.numeric(sel)])
updateRadioButtons(session, "pngorsvgDown", selected = values[["paramVec"]]$pngorsvgDownVec[as.numeric(sel)])
updateTextInput(session, "mycolors", value = values[["paramVec"]]$mycolorsVec[as.numeric(sel)])
updateTextInput(session, "markPer", value = values[["paramVec"]]$markPerVec[as.numeric(sel)])
updateTextInput(session, "bToRemove", value = values[["paramVec"]]$bToRemoveVec[as.numeric(sel)])
updateCheckboxInput(session, "perAsFraction", value = values[["paramVec"]]$perAsFractionVec[as.numeric(sel)])
updateTextInput(session, "chrNamesToSwap", value = values[["paramVec"]]$chrNamesToSwapVec[as.numeric(sel)])
updateCheckboxInput(session, "addOTUName", value = values[["paramVec"]]$addOTUNameVec[as.numeric(sel)])
updateRadioButtons(session, "OTUfont", selected = as.character(values[["paramVec"]]$OTUfontVec[as.numeric(sel)]))
updateTextInput(session, "moveKarHor", value = values[["paramVec"]]$moveKarHorVec[as.numeric(sel)])
updateNumericInput(session, "mkhValue", value = values[["paramVec"]]$mkhValueVec[as.numeric(sel)])
updateCheckboxInput(session, "anchor", value = values[["paramVec"]]$anchorVec[as.numeric(sel)])
updateNumericInput(session, "moveAnchorV", value = values[["paramVec"]]$moveAnchorVVec[as.numeric(sel)])
updateNumericInput(session, "moveAnchorH", value = values[["paramVec"]]$moveAnchorHVec[as.numeric(sel)])
updateNumericInput(session, "anchorVsizeF", value = values[["paramVec"]]$anchorVsizeFVec[as.numeric(sel)])
updateNumericInput(session, "anchorHsizeF", value = values[["paramVec"]]$anchorHsizeFVec[as.numeric(sel)])
updateNumericInput(session, "notesTextSize", value = values[["paramVec"]]$notesTextSizeVec[as.numeric(sel)])
updateNumericInput(session, "notesPosX", value = values[["paramVec"]]$notesPosXVec[as.numeric(sel)])
updateNumericInput(session, "leftNoteFontUp", value = values[["paramVec"]]$leftNoteFontUpVec[as.numeric(sel)])
updateNumericInput(session, "leftNotesPosX", value = values[["paramVec"]]$leftNotesPosXVec[as.numeric(sel)])
updateNumericInput(session, "leftNotesPosY", value = values[["paramVec"]]$leftNotesPosYVec[as.numeric(sel)])
updateNumericInput(session, "leftNotesUpPosY", value = values[["paramVec"]]$leftNotesUpPosYVec[as.numeric(sel)])
updateNumericInput(session, "leftNotesUpPosX", value = values[["paramVec"]]$leftNotesUpPosXVec[as.numeric(sel)])
updateNumericInput(session, "notesPosY", value = values[["paramVec"]]$notesPosYVec[as.numeric(sel)])
updateNumericInput(session, "leftNoteFont", value = values[["paramVec"]]$leftNoteFontVec[as.numeric(sel)])
updateNumericInput(session, "noteFont", value = values[["paramVec"]]$noteFontVec[as.numeric(sel)])
updateNumericInput(session, "leftNotesUpTextSize", value = values[["paramVec"]]$leftNotesUpTextSizeVec[as.numeric(sel)])
updateNumericInput(session, "leftNotesTextSize", value = values[["paramVec"]]$leftNotesTextSizeVec[as.numeric(sel)])
updateCheckboxInput(session, "parseStr2lang", value = values[["paramVec"]]$parseStr2langVec[as.numeric(sel)])
updateNumericInput(session, "moveAllKarValueHor", value = values[["paramVec"]]$moveAllKarValueHorVec[as.numeric(sel)])
updateNumericInput(session, "moveAllKarValueY", value = values[["paramVec"]]$moveAllKarValueYVec[as.numeric(sel)])
updateCheckboxInput(session, "verticalPlot", value = values[["paramVec"]]$verticalPlotVec[as.numeric(sel)])
updateNumericInput(session, "karSpaceHor", value = values[["paramVec"]]$karSpaceHorVec[as.numeric(sel)])
updateTextInput(session, "karAnchorLeft", value = values[["paramVec"]]$karAnchorLeftVec[as.numeric(sel)])
updateCheckboxInput(session, "OTUasNote", value = values[["paramVec"]]$OTUasNoteVec[as.numeric(sel)])
updateCheckboxInput(session, "labelOutwards", value = values[["paramVec"]]$labelOutwardsVec[as.numeric(sel)])
# updateCheckboxInput(session, "callPlot", value = values[["paramVec"]]$callPlotVec[as.numeric(sel)] )
updateTextInput(session, "colorBorderMark", value = values[["paramVec"]]$colorBorderMarkVec[as.numeric(sel)])
updateNumericInput(session, "lwd.marks", value = values[["paramVec"]]$lwd.marksVec[as.numeric(sel)])
updateCheckboxInput(session, "gishCenBorder", value = values[["paramVec"]]$gishCenBorderVec[as.numeric(sel)])
updateNumericInput(session, "hideCenLines", value = values[["paramVec"]]$hideCenLinesVec[as.numeric(sel)])
#
# nuc marks
#
# this one is a list #
updateCheckboxGroupInput(session, "markType", selected = values[["paramVec"]]$markTypeVec[[as.numeric(sel)]])
updateCheckboxGroupInput(session, "fetchSelect", selected = values[["paramVec"]]$fetchSelectVec[as.numeric(sel)])
updateNumericInput(session, "amountofSpaces", value = values[["paramVec"]]$amountofSpacesVec[as.numeric(sel)])
updateNumericInput(session, "colNumber", value = values[["paramVec"]]$colNumberVec[as.numeric(sel)])
updateNumericInput(session, "protrudingInt", value = values[["paramVec"]]$protrudingIntVec[as.numeric(sel)])
updateTextInput(session, "mycolors2", value = values[["paramVec"]]$mycolors2Vec[as.numeric(sel)])
updateTextInput(session, "term", value = values[["paramVec"]]$termVec[as.numeric(sel)])
updateCheckboxInput(session, "useGeneNames", value = values[["paramVec"]]$useGeneNamesVec[as.numeric(sel)])
updateCheckboxInput(session, "useRCNames", value = values[["paramVec"]]$useRCNamesVec[as.numeric(sel)])
updateCheckboxInput(session, "makeUnique", value = values[["paramVec"]]$makeUniqueVec[as.numeric(sel)])
updateCheckboxInput(session, "colorFeature", value = values[["paramVec"]]$colorFeatureVec[as.numeric(sel)])
updateRadioButtons(session, "nucMarkStyle", selected = values[["paramVec"]]$nucMarkStyleVec[as.numeric(sel)])
updateRadioButtons(session, "pseudo", selected = values[["paramVec"]]$pseudoVec[as.numeric(sel)])
updateCheckboxInput(session, "mirror", value = values[["paramVec"]]$mirrorVec[as.numeric(sel)])
updateCheckboxInput(session, "addSTARTPos", value = values[["paramVec"]]$addSTARTPosVec[as.numeric(sel)])
}
)
|
###########################################################################
## Generating figures from literature search ##
## make sure that the csv file is updated to match the current xlsx file ##
###########################################################################
## path to campus pc
setwd("C:/Users/Sarah/Documents/Dropbox/PhD/Dissertation/Dissertation sections/literature review/Review documents")
## path to laptop
setwd("C:/Users/davebetts/Dropbox/PhD/Dissertation/Dissertation sections/literature review/Review documents")
dir()
##import data set
rv0<-read.csv("Reviews01.csv",header=T,skip=1,na.strings=c(".",""),strip.white=T) #readfrom csv file
dim(rv0) # check dimensions
str(rv0)
head(rv0)
names(rv0)
#get rid of some of the junker rows of journals
rv1<-subset(rv0,rv0$LitRev!="0",) #get rid of the rows where LitRev=0
rv1<-rv1[,!apply(rv1, 2, function(x)
all(gsub(" ", "", x)=="", na.rm=TRUE))] #removes columns of just null values
rv1
##############################################################
### Number of Authors and Publications ###
##############################################################
library(reshape)
authors<-rv1[,1:3]
str(authors)
authors<-melt.list(authors)
authors<-authors[,1]
authors<-gsub('; ','xyz',authors)
authors<-strsplit(authors,"xyz")
authors<-melt.list(authors)
authors<-na.omit(authors[,1])
authorsU<-length(unique(authors))
authorsR<-length(authors)-authorsU
authorsT<-as.data.frame(cbind(authorsU,authorsR))
colnames(authorsT)<-c("Authors","Repeats")
write.csv(authorsT,"unique_authors.csv",row.names=F)
authors<-as.data.frame(authors, row.number=F)
authors<-as.data.frame(authors, row.number=F,stringsAsFactors=F)
colnames(authors)<- "Author"
summary(authors)
unique(authors)
write.csv(authors,"authors.csv",row.names=F)
str(authors)
##############################################################
### Publication Years ###
##############################################################
### basic statistics
range(rv1$Year)
summary(rv1$Year)
#not a bar plot but similar information as hist()
plot(table(rv1$Year))
### for histogram
yrmin=min(rv1$Year) #determine earliest publication year
yrmax=max(rv1$Year) #determine latest publication year
histinfo<-hist(rv1$Year, #histogram of years of publication
xlab="Publication Year", #X-axis label
ylab="", # no Y-axis label
breaks=seq(from=yrmin-0.5,to=yrmax+0.5, by=1), #number of breaks in histogram based on the range of the variable "Years"
main="Number of Publications per Year", #Title of graph
col="lightgrey", #color of bars
xlim=c(yrmin-0.5,yrmax+0.5), #set range of x axis
axes=F) #removing automatic axes labels
histinfo
### labelling
axis(2, col.axis="black", las=2) #create axis and labels for Y axis
axis(1, xaxp=c(yrmin,yrmax+1,(yrmax+1-yrmin))) #create axis and labels for X axis
box() #enclose histogram within a box
##############################################################
### table of journals ###
##############################################################
# basic information
unique(rv1$Journal)
length(unique(rv1$Journal))
library(plyr) #allows for the creation of tables in a format better for reproduction
count(rv1,"Journal")
##############################################################
### table of journal types ###
##############################################################
# basic information
unique(rv1$JournalType)
length(unique(rv1$JournalType))
library(plyr) #allows for the creation of tables in a format better for reproduction
Type<-count(rv1,"JournalType")
Type
Journals<-count(rv1,c("Journal", "JournalType"))
Journals<-Journals[,c(1,3,2)]
Journals
Categories<-merge(Type, Journals,by=c("JournalType","JournalType"),all.x=T)
Categories<-Categories[,c(3,4,1,2)]
Categories
colnames(Categories)<-c("Journal","Number of Publications","Category","Publications per Category")
write.csv(Categories[order(Categories$Category),],"Journals.csv",row.names=F)
ls(Journals)
ls(Type)
#write in rows to table for that is in the CSV that i submittedd to sarah on 1/14
##############################################################
### Geography ###
##############################################################
# create subset of geographic information
names(rv0) # double check column numbers
geo<-rv0[,c(1:6,116:123)]
str(geo)
head(geo)
tail(geo)
##import data set
geogr0<-read.csv("geogr.csv",header=T, na.strings=c(".","")) #readfrom csv file
dim(geogr0) # check dimensions
str(geogr0)
head(geogr0)
names(geogr0)
library(plyr) #allows for the creation of tables in a format better for reproduction
count(geogr0,"Region")
count(geogr0,"Country")
count(geogr0,"State")
count(geogr0,"Locality")
count(geogr0,c("Title","Region")
table(geogr0$Title,geogr0$Region)
count(geogr0,"JournalType")
count(geogr0,"JournalType")
count(geogr0,"JournalType")
count(geogr0,"JournalType")
table(unique(geogr0$Title~geogr0Region))
table(unique(geogr0$Title~geogr0Region))
|
/R/figures.r
|
no_license
|
davebetts/Data-management-scripts
|
R
| false
| false
| 5,187
|
r
|
###########################################################################
## Generating figures from literature search ##
## make sure that the csv file is updated to match the current xlsx file ##
###########################################################################
## path to campus pc
setwd("C:/Users/Sarah/Documents/Dropbox/PhD/Dissertation/Dissertation sections/literature review/Review documents")
## path to laptop
setwd("C:/Users/davebetts/Dropbox/PhD/Dissertation/Dissertation sections/literature review/Review documents")
dir()
##import data set
rv0<-read.csv("Reviews01.csv",header=T,skip=1,na.strings=c(".",""),strip.white=T) #readfrom csv file
dim(rv0) # check dimensions
str(rv0)
head(rv0)
names(rv0)
#get rid of some of the junker rows of journals
rv1<-subset(rv0,rv0$LitRev!="0",) #get rid of the rows where LitRev=0
rv1<-rv1[,!apply(rv1, 2, function(x)
all(gsub(" ", "", x)=="", na.rm=TRUE))] #removes columns of just null values
rv1
##############################################################
### Number of Authors and Publications ###
##############################################################
library(reshape)
authors<-rv1[,1:3]
str(authors)
authors<-melt.list(authors)
authors<-authors[,1]
authors<-gsub('; ','xyz',authors)
authors<-strsplit(authors,"xyz")
authors<-melt.list(authors)
authors<-na.omit(authors[,1])
authorsU<-length(unique(authors))
authorsR<-length(authors)-authorsU
authorsT<-as.data.frame(cbind(authorsU,authorsR))
colnames(authorsT)<-c("Authors","Repeats")
write.csv(authorsT,"unique_authors.csv",row.names=F)
authors<-as.data.frame(authors, row.number=F)
authors<-as.data.frame(authors, row.number=F,stringsAsFactors=F)
colnames(authors)<- "Author"
summary(authors)
unique(authors)
write.csv(authors,"authors.csv",row.names=F)
str(authors)
##############################################################
### Publication Years ###
##############################################################
### basic statistics
range(rv1$Year)
summary(rv1$Year)
#not a bar plot but similar information as hist()
plot(table(rv1$Year))
### for histogram
yrmin=min(rv1$Year) #determine earliest publication year
yrmax=max(rv1$Year) #determine latest publication year
histinfo<-hist(rv1$Year, #histogram of years of publication
xlab="Publication Year", #X-axis label
ylab="", # no Y-axis label
breaks=seq(from=yrmin-0.5,to=yrmax+0.5, by=1), #number of breaks in histogram based on the range of the variable "Years"
main="Number of Publications per Year", #Title of graph
col="lightgrey", #color of bars
xlim=c(yrmin-0.5,yrmax+0.5), #set range of x axis
axes=F) #removing automatic axes labels
histinfo
### labelling
axis(2, col.axis="black", las=2) #create axis and labels for Y axis
axis(1, xaxp=c(yrmin,yrmax+1,(yrmax+1-yrmin))) #create axis and labels for X axis
box() #enclose histogram within a box
##############################################################
### table of journals ###
##############################################################
# basic information
unique(rv1$Journal)
length(unique(rv1$Journal))
library(plyr) #allows for the creation of tables in a format better for reproduction
count(rv1,"Journal")
##############################################################
### table of journal types ###
##############################################################
# basic information
unique(rv1$JournalType)
length(unique(rv1$JournalType))
library(plyr) #allows for the creation of tables in a format better for reproduction
Type<-count(rv1,"JournalType")
Type
Journals<-count(rv1,c("Journal", "JournalType"))
Journals<-Journals[,c(1,3,2)]
Journals
Categories<-merge(Type, Journals,by=c("JournalType","JournalType"),all.x=T)
Categories<-Categories[,c(3,4,1,2)]
Categories
colnames(Categories)<-c("Journal","Number of Publications","Category","Publications per Category")
write.csv(Categories[order(Categories$Category),],"Journals.csv",row.names=F)
ls(Journals)
ls(Type)
#write in rows to table for that is in the CSV that i submittedd to sarah on 1/14
##############################################################
### Geography ###
##############################################################
# create subset of geographic information
names(rv0) # double check column numbers
geo<-rv0[,c(1:6,116:123)]
str(geo)
head(geo)
tail(geo)
##import data set
geogr0<-read.csv("geogr.csv",header=T, na.strings=c(".","")) #readfrom csv file
dim(geogr0) # check dimensions
str(geogr0)
head(geogr0)
names(geogr0)
library(plyr) #allows for the creation of tables in a format better for reproduction
count(geogr0,"Region")
count(geogr0,"Country")
count(geogr0,"State")
count(geogr0,"Locality")
count(geogr0,c("Title","Region")
table(geogr0$Title,geogr0$Region)
count(geogr0,"JournalType")
count(geogr0,"JournalType")
count(geogr0,"JournalType")
count(geogr0,"JournalType")
table(unique(geogr0$Title~geogr0Region))
table(unique(geogr0$Title~geogr0Region))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/continuous_step.R
\name{fire}
\alias{fire}
\title{Fire transition, changing the individual}
\usage{
fire(transition, individual, when, variables = NULL)
}
\arguments{
\item{transition}{A list of transitions}
\item{individual}{A list with individual properties}
\item{when}{A time}
\item{variables}{Any global simulation state}
}
\value{
a new list describing the individual
}
\description{
This function is for testing the transitions on data for a
single individual. It's a helper function. The running simulation
doesn't call this function.
}
\seealso{
\code{\link{is_enabled}}, \code{\link{when}}
}
|
/macro/man/fire.Rd
|
no_license
|
dd-harp/MASH
|
R
| false
| true
| 683
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/continuous_step.R
\name{fire}
\alias{fire}
\title{Fire transition, changing the individual}
\usage{
fire(transition, individual, when, variables = NULL)
}
\arguments{
\item{transition}{A list of transitions}
\item{individual}{A list with individual properties}
\item{when}{A time}
\item{variables}{Any global simulation state}
}
\value{
a new list describing the individual
}
\description{
This function is for testing the transitions on data for a
single individual. It's a helper function. The running simulation
doesn't call this function.
}
\seealso{
\code{\link{is_enabled}}, \code{\link{when}}
}
|
testlist <- list(Rs = numeric(0), atmp = 0, relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61570480682008e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161 ))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615853204-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 659
|
r
|
testlist <- list(Rs = numeric(0), atmp = 0, relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61570480682008e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161 ))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
x <- c(5.4, 6.2, 7.1, 4.8, 7.5)
names(x) <- c('a', 'b', 'c', 'd', 'e')
x
x[1]
x[4]
x[c(1, 3)]
x[1:4]
x[c(1,1,3)]
x[6]
x[0] # If we ask for the 0th element, we get an empty vector
x[-2] # use a negative number as the index of a vector, R will return every element except for the one specified
x[c(-1, -5)] # We can skip multiple elements
x[-1:3]
x[-(1:3)]
x <- x[-4]
x
x <- c(5.4, 6.2, 7.1, 4.8, 7.5)
names(x) <- c('a', 'b', 'c', 'd', 'e')
print(x)
#
x[2:4]
x[c(2, 3, 4)]
x[-c(1, 5)]
x[c(2:4)]
x[c("a", "c")]
x
x[-which(names(x) == "a")] # To skip (or remove) a single named element
names(x) == "a"
which(names(x) == "a") # which then converts this to an index
x[-which(names(x) %in% c("a", "c"))] # Skipping multiple named indices is similar, but uses a different comparison operator:
x <- c(5.4, 6.2, 7.1, 4.8, 7.5)
names(x) <- c('a', 'b', 'c', 'd', 'e')
print(x)
x[-which(names(x) == "g")]
names(x) == "g"
which(names(x) == "g")
x <- 1:3
x
names(x) <- c('a', 'a', 'a')
x
x['a']
x[which(names(x) == 'a')]
x[c(TRUE, TRUE, FALSE, FALSE)]
x[c(TRUE, FALSE)]
x <- c(5.4, 6.2, 7.1, 4.8, 7.5)
x > 6
x[x > 6]
x[x > 6 & x < 7.2]
x[x > 4 & x < 6]
x[x < 7 & x > 4]
x_subset <- x[x<7 & x>4]
print(x_subset)
## data frames
cats <- data.frame(coat = c("calico", "black", "tabby"), weight = c(2.1, 5.0, 3.2), likes_string = c(1, 0, 1))
str(cats)
typeof(cats)
class(cats)
is.data.frame(cats)
cats <- read.csv(file = "data/cats.csv")
cats
cats$weight
cats$coat
cats$weight + 2
paste("My cat is", cats$coat)
typeof(cats$weight)
cats[1]
cats[[1]]
typeof(cats[1])
typeof(cats[[1]])
typeof(cats$coat)
cats[[1:2]]
cats[[-2]]
cats[-1]
d["coat"]
cats$coat
cats[1:3,]
cats[3,]
cats[,3]
cats[, 1]
age <- c(2,3,5,12)
cats
cats <- cbind(cats, age)
age <- c(4,5,8)
cats <- cbind(cats, age)
cats
levels(cats$coat)
levels(cats$coat) <- c(levels(cats$coat, 'tortoiseshell'))
levels(cats$coat) <- c(levels(cats$coat), 'tortoiseshell')
cats <- rbind(cats, list("tortoiseshell", 3.3, TRUE, 9))
str(cats)
perinfo <- data.frame(infor = c("first_name", "last_name", "lucky_number"), ruxiang = c("ruixiang", "liu", 6))
perinfo
perinfo <- rbind(perinfo, list("hao", "zhang", 8))
perinfo
perinfo <- data.frame(first = c('ruixiang'), last = c('liu'), lucky_number = c(6))
perinfo
perinfo <- rbind(perinfo, list("hao", "zhang", 8))
perinfo
perinfo <- rbind(perinfo, list('hao', 'zhang', 8))
perinfo <- -c(4)
perinfo
df <- data.frame(first = c('Grace'),
last = c('Hopper'),
lucky_number = c(0),
stringsAsFactors = FALSE)
df <- rbind(df, list('Marie', 'Curie', 238) )
df
perinfo <- data.frame(first = c('ruixiang'), last = c('liu'), lucky_number = c(6), stringsAsFactors = FALSE) # must add the pamameters that stringAsFacotrs = False, else it will be make mistake.
perinfo <- rbind(perinfo, list('hao', 'zhang', 8))
perinfo
perinfo <- rbind(perinfo, list('Marie', 'Curie', 238) )
perinfo <- data.frame(first = c("ruixiang"),
last = c("liu"),
lucky_number = c(6),
stringsAsFactors = FALSE)
perinfo <- rbind(perinfo, list('hao', 'zhang', 8))
perinfo
age <- c(2,3,5,12)
cats
levels(cats$coat)
str(cats)
cats$coat <- as.character(cats$coat)
str(cats$coat)
str(cats)
newRow <- list("tortoiseshell", 3.3, TRUE, 9)
cats <- rbind(cats, newRow)
cats
cats[-4,]
cats[c(-4, -5), ]
perinfo
cats <- rbind(cats, list('<NA>', 4, 1, 6))
CATS
cats
na.omit(cats)
cats
cats <- cats[c(-5, -6, -7), ]
cats
?c
cats <- rbind(cats, cats)
cats
rownames(cats) <- NULL
rownames(cats)
cats
perinfo
perinfo <- cbind(perinfo, coffeetime = c(TRUE, TRUE))
perinfo
perinfo <- cbind(perinfo, dinertime = c(FALSE,FALSE))
perinfo
|
/week-06.R
|
no_license
|
ruixiangliu/BCB546-R-Exercise
|
R
| false
| false
| 3,683
|
r
|
x <- c(5.4, 6.2, 7.1, 4.8, 7.5)
names(x) <- c('a', 'b', 'c', 'd', 'e')
x
x[1]
x[4]
x[c(1, 3)]
x[1:4]
x[c(1,1,3)]
x[6]
x[0] # If we ask for the 0th element, we get an empty vector
x[-2] # use a negative number as the index of a vector, R will return every element except for the one specified
x[c(-1, -5)] # We can skip multiple elements
x[-1:3]
x[-(1:3)]
x <- x[-4]
x
x <- c(5.4, 6.2, 7.1, 4.8, 7.5)
names(x) <- c('a', 'b', 'c', 'd', 'e')
print(x)
#
x[2:4]
x[c(2, 3, 4)]
x[-c(1, 5)]
x[c(2:4)]
x[c("a", "c")]
x
x[-which(names(x) == "a")] # To skip (or remove) a single named element
names(x) == "a"
which(names(x) == "a") # which then converts this to an index
x[-which(names(x) %in% c("a", "c"))] # Skipping multiple named indices is similar, but uses a different comparison operator:
x <- c(5.4, 6.2, 7.1, 4.8, 7.5)
names(x) <- c('a', 'b', 'c', 'd', 'e')
print(x)
x[-which(names(x) == "g")]
names(x) == "g"
which(names(x) == "g")
x <- 1:3
x
names(x) <- c('a', 'a', 'a')
x
x['a']
x[which(names(x) == 'a')]
x[c(TRUE, TRUE, FALSE, FALSE)]
x[c(TRUE, FALSE)]
x <- c(5.4, 6.2, 7.1, 4.8, 7.5)
x > 6
x[x > 6]
x[x > 6 & x < 7.2]
x[x > 4 & x < 6]
x[x < 7 & x > 4]
x_subset <- x[x<7 & x>4]
print(x_subset)
## data frames
cats <- data.frame(coat = c("calico", "black", "tabby"), weight = c(2.1, 5.0, 3.2), likes_string = c(1, 0, 1))
str(cats)
typeof(cats)
class(cats)
is.data.frame(cats)
cats <- read.csv(file = "data/cats.csv")
cats
cats$weight
cats$coat
cats$weight + 2
paste("My cat is", cats$coat)
typeof(cats$weight)
cats[1]
cats[[1]]
typeof(cats[1])
typeof(cats[[1]])
typeof(cats$coat)
cats[[1:2]]
cats[[-2]]
cats[-1]
d["coat"]
cats$coat
cats[1:3,]
cats[3,]
cats[,3]
cats[, 1]
age <- c(2,3,5,12)
cats
cats <- cbind(cats, age)
age <- c(4,5,8)
cats <- cbind(cats, age)
cats
levels(cats$coat)
levels(cats$coat) <- c(levels(cats$coat, 'tortoiseshell'))
levels(cats$coat) <- c(levels(cats$coat), 'tortoiseshell')
cats <- rbind(cats, list("tortoiseshell", 3.3, TRUE, 9))
str(cats)
perinfo <- data.frame(infor = c("first_name", "last_name", "lucky_number"), ruxiang = c("ruixiang", "liu", 6))
perinfo
perinfo <- rbind(perinfo, list("hao", "zhang", 8))
perinfo
perinfo <- data.frame(first = c('ruixiang'), last = c('liu'), lucky_number = c(6))
perinfo
perinfo <- rbind(perinfo, list("hao", "zhang", 8))
perinfo
perinfo <- rbind(perinfo, list('hao', 'zhang', 8))
perinfo <- -c(4)
perinfo
df <- data.frame(first = c('Grace'),
last = c('Hopper'),
lucky_number = c(0),
stringsAsFactors = FALSE)
df <- rbind(df, list('Marie', 'Curie', 238) )
df
perinfo <- data.frame(first = c('ruixiang'), last = c('liu'), lucky_number = c(6), stringsAsFactors = FALSE) # must add the pamameters that stringAsFacotrs = False, else it will be make mistake.
perinfo <- rbind(perinfo, list('hao', 'zhang', 8))
perinfo
perinfo <- rbind(perinfo, list('Marie', 'Curie', 238) )
perinfo <- data.frame(first = c("ruixiang"),
last = c("liu"),
lucky_number = c(6),
stringsAsFactors = FALSE)
perinfo <- rbind(perinfo, list('hao', 'zhang', 8))
perinfo
age <- c(2,3,5,12)
cats
levels(cats$coat)
str(cats)
cats$coat <- as.character(cats$coat)
str(cats$coat)
str(cats)
newRow <- list("tortoiseshell", 3.3, TRUE, 9)
cats <- rbind(cats, newRow)
cats
cats[-4,]
cats[c(-4, -5), ]
perinfo
cats <- rbind(cats, list('<NA>', 4, 1, 6))
CATS
cats
na.omit(cats)
cats
cats <- cats[c(-5, -6, -7), ]
cats
?c
cats <- rbind(cats, cats)
cats
rownames(cats) <- NULL
rownames(cats)
cats
perinfo
perinfo <- cbind(perinfo, coffeetime = c(TRUE, TRUE))
perinfo
perinfo <- cbind(perinfo, dinertime = c(FALSE,FALSE))
perinfo
|
#' @title Estadisticas de los jugadores de la MLB en ESPN
#' @param year numeric
#' @param stats character
#' @param season_type character
#' @return Estadisticas de la MLB en ESPN
#' @export
#' @import tidyr dplyr purrr janitor rvest stringr
#' @importFrom dplyr %>%
#' @importFrom jsonlite fromJSON
#' @importFrom glue glue
#' @importFrom janitor clean_names
#' @examples
#' # year = year que queremos visualizar
#' # stats = batting, pitching, fielding
#' # season_type = regular o playoffs
#' # Get las estadisticas de pitcheo en playoffs de 2003
#' \donttest{espn_player_stats(2012, "pitching", "playoffs")}
#'
#'
#'
espn_player_stats <- function(year = year_actual, stats = "batting", season_type = "regular" ){
year_actual <- as.double(substr(Sys.Date(), 1, 4))
if (!season_type %in% c("regular", "playoffs")) {
stop("Please choose season_type of 'regular' or 'playoffs'")
}
if (!stats %in% c("batting", "pitching", "fielding")) {
stop("Please choose season_type of 'batting', 'pitching' o 'fielding!'")
}
if (!dplyr::between(as.numeric(year), 2002, year_actual)) {
stop(paste("Please choose season between 2002 and", year_actual))
}
message(
dplyr::if_else(
season_type == "regular",
glue::glue("Getting {stats} stats de la {season_type} season del {year}!"),
glue::glue("Getting {stats} stats de los {season_type} del {year}!")
)
)
season_type <- dplyr::if_else(season_type == "regular", "2", "3")
url <- glue::glue("https://www.espn.com/mlb/stats/player/_/view/{stats}/season/{year}/seasontype/{season_type}&limit=200")
batting_n <- c("year", "season_type", "rank", "name", "team", "pos", "games_played", "at_bats",
"runs", "hits", "batting_avg","doubles", "triples", "home_runs", "runs_batted_in",
"total_bases", "walks","strikeouts", "stolen_bases", "on_base_pct",
"slugging_pct","opb_slg_pct", "war")
pitching_n <- c("year", "season_type","rank", "name", "team", "pos", "games_played", "games_started",
"quality_starts", "earned_run_avg", "wins", "losses", "saves", "holds",
"innings_pitched", "hits", "earned_runs", "home_runs", "walks", "strikeouts",
"strikes_x_9_i", "war", "whip")
fielding_n <- c("year", "season_type", "rank", "name", "team", "pos", "games_played",
"games_started", "full_innings", "total_chances", "putouts", "assists",
"fielding_pct", "errors", "double_plays", "range_factor", "passed_balls",
"stolen_bases_allowed", "caught_stealing", "caught_stealing_pct", "dwar")
fix_names <- dplyr::case_when(
stats == "batting" ~ list(batting_n),
stats == "pitching" ~ list(pitching_n),
stats == "fielding" ~ list(fielding_n)
)[[1]]
espn <- url %>%
rvest::read_html() %>%
rvest::html_table(fill = TRUE) %>%
dplyr::bind_cols() %>%
janitor::clean_names() %>%
dplyr::as_tibble() %>%
dplyr::mutate(team = stringr::str_extract(.data$name, "[^.[::a-z::]]+$"),
name = stringr::str_remove(.data$name, "[^.[::a-z::]]+$"),
name = stringr::str_squish(.data$name),
year = year,
season_type = dplyr::if_else(season_type == 2, "regular", "playoffs")
)%>%
dplyr::select(
"year", "season_type", "rk", "name", "team", dplyr::everything()
) %>%
purrr::set_names(nm = fix_names)
return(espn)
}
|
/R/espn_player_stats.R
|
no_license
|
IvoVillanueva/mlbstatsR
|
R
| false
| false
| 3,495
|
r
|
#' @title Estadisticas de los jugadores de la MLB en ESPN
#' @param year numeric
#' @param stats character
#' @param season_type character
#' @return Estadisticas de la MLB en ESPN
#' @export
#' @import tidyr dplyr purrr janitor rvest stringr
#' @importFrom dplyr %>%
#' @importFrom jsonlite fromJSON
#' @importFrom glue glue
#' @importFrom janitor clean_names
#' @examples
#' # year = year que queremos visualizar
#' # stats = batting, pitching, fielding
#' # season_type = regular o playoffs
#' # Get las estadisticas de pitcheo en playoffs de 2003
#' \donttest{espn_player_stats(2012, "pitching", "playoffs")}
#'
#'
#'
espn_player_stats <- function(year = year_actual, stats = "batting", season_type = "regular" ){
year_actual <- as.double(substr(Sys.Date(), 1, 4))
if (!season_type %in% c("regular", "playoffs")) {
stop("Please choose season_type of 'regular' or 'playoffs'")
}
if (!stats %in% c("batting", "pitching", "fielding")) {
stop("Please choose season_type of 'batting', 'pitching' o 'fielding!'")
}
if (!dplyr::between(as.numeric(year), 2002, year_actual)) {
stop(paste("Please choose season between 2002 and", year_actual))
}
message(
dplyr::if_else(
season_type == "regular",
glue::glue("Getting {stats} stats de la {season_type} season del {year}!"),
glue::glue("Getting {stats} stats de los {season_type} del {year}!")
)
)
season_type <- dplyr::if_else(season_type == "regular", "2", "3")
url <- glue::glue("https://www.espn.com/mlb/stats/player/_/view/{stats}/season/{year}/seasontype/{season_type}&limit=200")
batting_n <- c("year", "season_type", "rank", "name", "team", "pos", "games_played", "at_bats",
"runs", "hits", "batting_avg","doubles", "triples", "home_runs", "runs_batted_in",
"total_bases", "walks","strikeouts", "stolen_bases", "on_base_pct",
"slugging_pct","opb_slg_pct", "war")
pitching_n <- c("year", "season_type","rank", "name", "team", "pos", "games_played", "games_started",
"quality_starts", "earned_run_avg", "wins", "losses", "saves", "holds",
"innings_pitched", "hits", "earned_runs", "home_runs", "walks", "strikeouts",
"strikes_x_9_i", "war", "whip")
fielding_n <- c("year", "season_type", "rank", "name", "team", "pos", "games_played",
"games_started", "full_innings", "total_chances", "putouts", "assists",
"fielding_pct", "errors", "double_plays", "range_factor", "passed_balls",
"stolen_bases_allowed", "caught_stealing", "caught_stealing_pct", "dwar")
fix_names <- dplyr::case_when(
stats == "batting" ~ list(batting_n),
stats == "pitching" ~ list(pitching_n),
stats == "fielding" ~ list(fielding_n)
)[[1]]
espn <- url %>%
rvest::read_html() %>%
rvest::html_table(fill = TRUE) %>%
dplyr::bind_cols() %>%
janitor::clean_names() %>%
dplyr::as_tibble() %>%
dplyr::mutate(team = stringr::str_extract(.data$name, "[^.[::a-z::]]+$"),
name = stringr::str_remove(.data$name, "[^.[::a-z::]]+$"),
name = stringr::str_squish(.data$name),
year = year,
season_type = dplyr::if_else(season_type == 2, "regular", "playoffs")
)%>%
dplyr::select(
"year", "season_type", "rk", "name", "team", dplyr::everything()
) %>%
purrr::set_names(nm = fix_names)
return(espn)
}
|
###### Data wrangling of the original tree div data#####
#read csv file with Simon's data
data_simon<-read.csv("Data/data_original/data_simon.csv", header = TRUE)%>%as_tibble() #data_simon.csv not on github because way to heavy to upload (160 MB). See sharepoint under "Survival_Analysis"
#Make a dummy dataset that holds the survival of trees at the start of experiment. Create that initial dataset here
df_initial<-data_simon%>% #start from full dataset
dplyr::select(-X, - year,-survival)%>% #remove dummy integer column "x", re-add year and survival later
distinct()%>% #keep the original distinct records
mutate(year = 0, survival = 1)%>% #default survival state at time of planting (t = 0) was alive (survival = 1)
dplyr::select(ID, year, survival, everything()) #change order of columns
#Create a complete survival dataset
df_survival<-data_simon%>% #start from all recordings to create a complete survival dataset
dplyr::select(ID, year, survival, everything(), -X)%>% #junk column X removed, now order is the same as df_initial. There as 167,571 distinct IDs
full_join(df_initial)#full join with the dummy initial conditions dataframe. df_initial can be removed from environment after done to save memory space
# create data products for further use
### Subset for trying out computation heavy analyses #####
#create a subset of the survival dataset with three sites that have numerous early-stage survival data
df_subset_survival<-df_survival%>%
filter(exp %in% c("FORBIO_Ged", "IDENT.cloquet", "Satakunta"))
#create a dataframe from the subsettes sites with percentage of survival per species per plot per year
df_subset_survival_plot<-df_subset_survival%>%
group_by(exp,block, plot,SR, SpComp,year)%>%
filter(!is.na(survival))%>%#make sure that "NA" is not treated as zero (dead) by filtering them
dplyr::summarise(sum_alive = sum(survival), n_initial = n())%>%#count the number of alive trees and the number of initial trees
mutate(proportion_alive = (sum_alive/n_initial))%>%
as_tibble()
write.csv(df_subset_survival_plot,"Data/data_products/df_subset_survival_plot.csv") #### write this subsetted data oiut for further use
#make sure to clean the global environment at the end of the session. It can take a lot of memory space
### Proportional data on all sites ####
df_survival_plot<-df_survival%>%
group_by(exp,block, plot,SR, SpComp,year)%>%
filter(!is.na(survival))%>%#make sure that "NA" is not treated as zero (dead) by filtering them
dplyr::summarise(sum_alive = sum(survival), n_initial = n())%>%#count the number of alive trees and the number of initial trees
mutate(proportion_alive = (sum_alive/n_initial))%>%
as_tibble()%>%
group_by(exp,year)%>% #group by year and experiment
nest()%>% #nest the other data away to easily view what will happen with the following codes
group_by(exp)%>% #group only by experiment
mutate(yearfilter_max = ifelse(year == max(year),1,0))%>% #create a yearfilter on the the maximum year in the dataset
mutate(yearfilter_5y = ifelse( abs(5-year) == min(abs(5-year)),1,0))%>% #create a yearfilter on the year closest to year 5 per experiment
unnest(cols = c(data))%>%
ungroup()
write.csv(df_survival_plot,"Data/data_products/df_survival_plot.csv") #write out the data
|
/Data/data_products/wrangling_survivaldata.R
|
no_license
|
yadevi/CAMBIO_survival
|
R
| false
| false
| 3,295
|
r
|
###### Data wrangling of the original tree div data#####
#read csv file with Simon's data
data_simon<-read.csv("Data/data_original/data_simon.csv", header = TRUE)%>%as_tibble() #data_simon.csv not on github because way to heavy to upload (160 MB). See sharepoint under "Survival_Analysis"
#Make a dummy dataset that holds the survival of trees at the start of experiment. Create that initial dataset here
df_initial<-data_simon%>% #start from full dataset
dplyr::select(-X, - year,-survival)%>% #remove dummy integer column "x", re-add year and survival later
distinct()%>% #keep the original distinct records
mutate(year = 0, survival = 1)%>% #default survival state at time of planting (t = 0) was alive (survival = 1)
dplyr::select(ID, year, survival, everything()) #change order of columns
#Create a complete survival dataset
df_survival<-data_simon%>% #start from all recordings to create a complete survival dataset
dplyr::select(ID, year, survival, everything(), -X)%>% #junk column X removed, now order is the same as df_initial. There as 167,571 distinct IDs
full_join(df_initial)#full join with the dummy initial conditions dataframe. df_initial can be removed from environment after done to save memory space
# create data products for further use
### Subset for trying out computation heavy analyses #####
#create a subset of the survival dataset with three sites that have numerous early-stage survival data
df_subset_survival<-df_survival%>%
filter(exp %in% c("FORBIO_Ged", "IDENT.cloquet", "Satakunta"))
#create a dataframe from the subsettes sites with percentage of survival per species per plot per year
df_subset_survival_plot<-df_subset_survival%>%
group_by(exp,block, plot,SR, SpComp,year)%>%
filter(!is.na(survival))%>%#make sure that "NA" is not treated as zero (dead) by filtering them
dplyr::summarise(sum_alive = sum(survival), n_initial = n())%>%#count the number of alive trees and the number of initial trees
mutate(proportion_alive = (sum_alive/n_initial))%>%
as_tibble()
write.csv(df_subset_survival_plot,"Data/data_products/df_subset_survival_plot.csv") #### write this subsetted data oiut for further use
#make sure to clean the global environment at the end of the session. It can take a lot of memory space
### Proportional data on all sites ####
df_survival_plot<-df_survival%>%
group_by(exp,block, plot,SR, SpComp,year)%>%
filter(!is.na(survival))%>%#make sure that "NA" is not treated as zero (dead) by filtering them
dplyr::summarise(sum_alive = sum(survival), n_initial = n())%>%#count the number of alive trees and the number of initial trees
mutate(proportion_alive = (sum_alive/n_initial))%>%
as_tibble()%>%
group_by(exp,year)%>% #group by year and experiment
nest()%>% #nest the other data away to easily view what will happen with the following codes
group_by(exp)%>% #group only by experiment
mutate(yearfilter_max = ifelse(year == max(year),1,0))%>% #create a yearfilter on the the maximum year in the dataset
mutate(yearfilter_5y = ifelse( abs(5-year) == min(abs(5-year)),1,0))%>% #create a yearfilter on the year closest to year 5 per experiment
unnest(cols = c(data))%>%
ungroup()
write.csv(df_survival_plot,"Data/data_products/df_survival_plot.csv") #write out the data
|
##########
# PCA plots
# Usage: R --args config.file pca.type < pca_plots.R
##########
library(GWASTools)
library(QCpipeline)
library(RColorBrewer)
library(MASS)
library(ggplot2)
library(GGally)
library(ggExtra)
sessionInfo()
# read configuration
args <- commandArgs(trailingOnly=TRUE)
if (length(args) < 1) stop("missing configuration file")
config <- readConfig(args[1])
# check for type
if (length(args) < 2) stop("missing pca type (study or combined)")
type <- args[2]
theme_set(theme_bw())
# check config and set defaults
if (type == "study") {
required <- c("annot_scan_file", "annot_scan_raceCol", "annot_snp_file")
optional <- c("annot_scan_ethnCol", "annot_snp_rsIDCol",
"num_evs_to_plot", "out_corr_file", "out_pca_file",
"out_corr_plot_prefix", "out_corr_pruned_plot_prefix",
"out_dens_plot", "out_ev12_plot", "out_pairs_plot", "out_scree_plot",
"out_parcoord_plot",
"parcoord_vars", "out_parcoord_var_prefix")
default <- c(NA, "rsID", 12, "pca_corr.RData", "pca.RData",
"pca_corr", NA, "pca_dens.pdf",
"pca_ev12.pdf", "pca_pairs.png", "pca_scree.pdf",
"pca_parcoord.png",
"", "pca_parcoord")
snpfile <- config["annot_snp_file"]
} else if (type == "combined"){
required <- c("annot_scan_file", "annot_scan_raceCol", "out_comb_prefix")
optional <- c("annot_scan_ethnCol", "annot_snp_rsIDCol", "ext_annot_scan_file",
"ext_annot_scan_raceCol",
"num_evs_to_plot", "out_corr_file", "out_pca_file",
"out_corr_plot_prefix", "out_corr_pruned_plot_prefix",
"out_dens_plot", "out_ev12_plot", "out_pairs_plot", "out_scree_plot",
"out_parcoord_plot",
"out_ev12_plot_hapmap", "out_ev12_plot_study",
"parcoord_vars", "out_parcoord_var_prefix")
default <- c(NA, "rsID", NA, "pop.group", 12, "pca_combined_corr.RData",
"pca_combined.RData", "pca_corr", NA, "pca_dens.pdf",
"pca_ev12.pdf", "pca_pairs.png", "pca_scree.pdf",
"pca_parcoord.png",
"pca_ev12_hapmap.pdf", "pca_ev12_study.pdf",
"", "pca_parcoord")
snpfile <- paste0(config["out_comb_prefix"], "_snpAnnot.RData")
}
config <- setConfigDefaults(config, required, optional, default)
print(config)
# functions for parallel coordinate plots later
.getN <- function(samp, var){
ntab <- table(samp[[var]])
n <- ntab[as.character(samp[[var]])]
n[is.na(n)] <- sum(is.na(samp[[var]]))
n
}
# transparency based on number of samples in a group
.getParcoordAlpha <- function(samp, var) {
n <- .getN(samp, var)
return(ifelse(n < 10, 1,
ifelse(n < 100, 0.5,
ifelse(n < 1000, 0.3, 0.1))) * 255)
}
# parallel coordinates plot variables
vars <- unlist(strsplit(config["parcoord_vars"], " "))
# scan annotation
if (type == "study") {
scanAnnot <- getobj(config["annot_scan_file"])
samp <- getVariable(scanAnnot, c("scanID", c(config["annot_scan_raceCol"], vars)))
names(samp) <- c("scanID", "race", vars)
if (!is.na(config["annot_scan_ethnCol"])) {
samp$ethnicity <- getVariable(scanAnnot, config["annot_scan_ethnCol"])
} else samp$ethnicity <- NA
} else if (type == "combined") {
scanAnnot <- getobj(config["annot_scan_file"])
scan1 <- getVariable(scanAnnot, c("scanID", config["annot_scan_raceCol"], config["annot_scan_hapmapCol"]))
names(scan1) <- c("scanID", "race", "geno.cntl")
if (!is.na(config["annot_scan_ethnCol"])) {
scan1$ethnicity <- getVariable(scanAnnot, config["annot_scan_ethnCol"])
} else scan1$ethnicity <- NA
if (sum(is.na(scan1$race)) > 0 & hasVariable(scanAnnot, config["ext_annot_scan_raceCol"])) {
scan1$race2 <- getVariable(scanAnnot, config["ext_annot_scan_raceCol"])
scan1$race[is.na(scan1$race)] <- scan1$race2[is.na(scan1$race)]
scan1$race2 <- NULL
}
ext.scanAnnot <- getobj(config["ext_annot_scan_file"])
scan2 <- getVariable(ext.scanAnnot, c("scanID", config["ext_annot_scan_raceCol"]))
names(scan2) <- c("scanID", "race")
scan2$geno.cntl <- 1
scan2$ethnicity <- NA
samp <- rbind(scan1, scan2)
} else {
stop("pca type must be study or combined")
}
# get PCA results
pca <- getobj(config["out_pca_file"])
samp <- samp[match(pca$sample.id, samp$scanID),]
stopifnot(allequal(pca$sample.id, samp$scanID))
table(samp$race, samp$ethnicity, useNA="ifany")
# why are we doing this? (sort capital letters before lower case)
Sys.setlocale("LC_COLLATE", "C")
# color by race
race <- as.character(sort(unique(samp$race)))
if (length(race) > 0) {
#stopifnot(all(race %in% names(config)))
cmapRace <- setNames(config[race], race)
chk <- which(is.na(cmapRace))
if (length(chk) > 0) {
message(sprintf("Using default colors for %s races: %s", length(chk), paste(names(cmapRace[chk]), collapse=", ")))
defaultColors <- c(brewer.pal(8, "Dark2"), brewer.pal(8, "Set2"))
cmapRace[chk] <- defaultColors[1:length(chk)]
}
colorScale <- scale_color_manual("race", values=cmapRace, breaks=names(cmapRace), na.value="grey")
} else {
colorScale <- scale_color_manual("race", values="black", breaks="hack", na.value="black")
}
rm(race)
# plot symbol by ethnicity
ethn <- as.character(sort(unique(samp$ethnicity)))
if (length(ethn) > 0){
stopifnot(all(ethn %in% names(config)))
symbolMap <- config[ethn]
mode(symbolMap) <- "integer"
symbolScale <- scale_shape_manual("ethnicity", values=symbolMap, breaks=names(symbolMap), na.value=16)
} else {
symbolScale <- scale_shape_manual("ethnicity", values=1, breaks="hack", na.value=16)
}
rm(ethn)
# labels
## recent change in SNPRelate - pca$eigenval only returns first 32 values
#(x <- pca$eigenval[1:4]/sum(pca$eigenval))
x <- pca$varprop[1:4]
lbls <- paste("EV", 1:4, " (", format(100*x,digits=2), "%)", sep="")
pcs <- pca$eigenvect
colnames(pcs) <- paste0("EV", 1:ncol(pcs))
pcs <- as.data.frame(pcs)
pcs$scanID <- pca$sample.id
dat <- merge(pcs, samp)
# order by number of samples in race group
dat$nrace <- table(dat$race, useNA="ifany")[dat$race]
dat$nrace[is.na(dat$nrace)] <- sum(is.na(dat$nrace))
dat <- dat[order(-dat$nrace),]
# plot the first four pcs
nev <- 4
pairs <- ggpairs(dat,
mapping=aes(color=race, shape=ethnicity),
columns=which(names(dat) %in% sprintf("EV%s", 1:nev)),
upper=list(continuous=wrap("points", alpha=0.7)),
lower=list(continuous=wrap("points", alpha=0.7)),
columnLabels=lbls[1:nev],
axisLabels="internal")
for (i in 1:pairs$nrow){
for (j in 1:pairs$ncol){
subplot <- getPlot(pairs, i, j)
subplot <- subplot + colorScale + symbolScale
pairs <- putPlot(pairs, subplot, i, j)
}
}
png(config["out_pairs_plot"], width=1000, height=1000)
print(pairs)
dev.off()
# plot EV1 vs EV2 with density plots
p <- ggplot(dat, aes(x=EV1, y=EV2, color=race, shape=ethnicity)) +
geom_point(alpha=0.7) +
colorScale +
symbolScale +
theme(legend.position="none") +
xlab(lbls[1]) + ylab(lbls[2])
pdf(config["out_dens_plot"], width=6, height=6)
ggMarginal(p, type="density")
dev.off()
# plot EV1 vs EV2
p <- p + theme(legend.position="right") +
guides(colour = guide_legend(override.aes = list(alpha=1)))
ggsave(config["out_ev12_plot"], plot=p, width=6, height=6)
ggParcoordTheme <- theme(axis.title.x=element_blank(),
axis.ticks.y=element_blank(),
axis.text.y=element_blank(),
axis.title.y=element_blank(),
axis.text.x=element_text(colour="black"),
panel.grid.major.y=element_blank(),
panel.grid.minor.y=element_blank(),
legend.position="top")
# parallel coordinates plot
ev.ind <- which(names(dat) %in% sprintf("EV%s", 1:12))
p <- ggparcoord(dat, columns=ev.ind, groupColumn="race", scale="uniminmax", alphaLines=0.5) +
colorScale + ggParcoordTheme +
guides(colour = guide_legend(override.aes = list(alpha=1)))
ggsave(config["out_parcoord_plot"], plot=p, width=10, height=5)
## other variables for parallel coordinate, specified by user
if (type == "study" & length(vars) > 0){
for (var in vars){
stopifnot(var %in% names(samp))
# auto filename
fname <- paste(config["out_parcoord_var_prefix"], "_", var, ".png", sep="")
dat[["fvar"]] <- as.factor(dat[[var]])
p <- ggparcoord(dat, columns=ev.ind, groupColumn="fvar", scale="uniminmax", alphaLines=0.5) +
scale_color_brewer(var, palette="Set1", na.value="grey") + ggParcoordTheme +
guides(colour = guide_legend(override.aes = list(alpha=1)))
ggsave(fname, plot=p, width=10, height=5)
}
}
if (type == "combined"){
xlim <- range(dat$EV1)
ylim <- range(dat$EV2)
# hapmap plot
dat$plotcol <- dat$race
dat$plotcol[dat$geno.cntl %in% 0] <- NA
p <- ggplot(dat, aes(x=EV1, y=EV2, color=plotcol, shape=ethnicity)) +
geom_point() +
colorScale +
symbolScale +
xlab(lbls[1]) + ylab(lbls[2]) +
xlim(xlim)
ggsave(config["out_ev12_plot_hapmap"], plot=p, width=6, height=6)
p <- ggplot(dat[dat$geno.cntl %in% 0, ], aes(x=EV1, y=EV2, color=race, shape=ethnicity)) +
geom_point() +
colorScale +
symbolScale +
xlab(lbls[1]) + ylab(lbls[2]) +
xlim(xlim)
ggsave(config["out_ev12_plot_study"], plot=p, width=6, height=6)
}
#plot SNP-PC correlation
snpAnnot <- getobj(snpfile)
corr <- getobj(config["out_corr_file"])
snp <- snpAnnot[match(corr$snp.id, getSnpID(snpAnnot)),]
chrom <- getChromosome(snp, char=TRUE)
nev <- as.integer(config["num_evs_to_plot"])
png(paste(config["out_corr_plot_prefix"], "_%03d.png", sep=""), height=720, width=720)
par(mfrow=c(4,1), mar=c(5,5,4,2)+0.1, lwd=1.5, cex.lab=1.5, cex.main=1.5)
for(i in 1:nev){
snpCorrelationPlot(abs(corr$snpcorr[i,]), chrom,
main=paste("Eigenvector",i), ylim=c(0,1))
}
dev.off()
if (!is.na(config["out_corr_pruned_plot_prefix"])) {
snps.pruned <- getobj(config["out_pruned_file"])
ind <- getSnpID(snp) %in% snps.pruned
png(paste(config["out_corr_pruned_plot_prefix"], "_%03d.png", sep=""), height=720, width=720)
par(mfrow=c(4,1), mar=c(5,5,4,2)+0.1, lwd=1.5, cex.lab=1.5, cex.main=1.5)
for(i in 1:nev){
snpCorrelationPlot(abs(corr$snpcorr[i,ind]), chrom[ind],
main=paste("Eigenvector",i), ylim=c(0,1))
}
dev.off()
}
# scree plot
dat <- data.frame(ev=1:nev, varprop=pca$varprop[1:nev])
p <- ggplot(dat, aes(x=factor(ev), y=100*varprop)) +
geom_point() +
xlab("Eigenvector") + ylab("Percent of variance accounted for")
ggsave(config["out_scree_plot"], plot=p, width=6, height=6)
|
/R/pca_plots.R
|
no_license
|
UW-GAC/QCpipeline
|
R
| false
| false
| 10,855
|
r
|
##########
# PCA plots
# Usage: R --args config.file pca.type < pca_plots.R
##########
library(GWASTools)
library(QCpipeline)
library(RColorBrewer)
library(MASS)
library(ggplot2)
library(GGally)
library(ggExtra)
sessionInfo()
# read configuration
args <- commandArgs(trailingOnly=TRUE)
if (length(args) < 1) stop("missing configuration file")
config <- readConfig(args[1])
# check for type
if (length(args) < 2) stop("missing pca type (study or combined)")
type <- args[2]
theme_set(theme_bw())
# check config and set defaults
if (type == "study") {
required <- c("annot_scan_file", "annot_scan_raceCol", "annot_snp_file")
optional <- c("annot_scan_ethnCol", "annot_snp_rsIDCol",
"num_evs_to_plot", "out_corr_file", "out_pca_file",
"out_corr_plot_prefix", "out_corr_pruned_plot_prefix",
"out_dens_plot", "out_ev12_plot", "out_pairs_plot", "out_scree_plot",
"out_parcoord_plot",
"parcoord_vars", "out_parcoord_var_prefix")
default <- c(NA, "rsID", 12, "pca_corr.RData", "pca.RData",
"pca_corr", NA, "pca_dens.pdf",
"pca_ev12.pdf", "pca_pairs.png", "pca_scree.pdf",
"pca_parcoord.png",
"", "pca_parcoord")
snpfile <- config["annot_snp_file"]
} else if (type == "combined"){
required <- c("annot_scan_file", "annot_scan_raceCol", "out_comb_prefix")
optional <- c("annot_scan_ethnCol", "annot_snp_rsIDCol", "ext_annot_scan_file",
"ext_annot_scan_raceCol",
"num_evs_to_plot", "out_corr_file", "out_pca_file",
"out_corr_plot_prefix", "out_corr_pruned_plot_prefix",
"out_dens_plot", "out_ev12_plot", "out_pairs_plot", "out_scree_plot",
"out_parcoord_plot",
"out_ev12_plot_hapmap", "out_ev12_plot_study",
"parcoord_vars", "out_parcoord_var_prefix")
default <- c(NA, "rsID", NA, "pop.group", 12, "pca_combined_corr.RData",
"pca_combined.RData", "pca_corr", NA, "pca_dens.pdf",
"pca_ev12.pdf", "pca_pairs.png", "pca_scree.pdf",
"pca_parcoord.png",
"pca_ev12_hapmap.pdf", "pca_ev12_study.pdf",
"", "pca_parcoord")
snpfile <- paste0(config["out_comb_prefix"], "_snpAnnot.RData")
}
config <- setConfigDefaults(config, required, optional, default)
print(config)
# functions for parallel coordinate plots later
.getN <- function(samp, var){
ntab <- table(samp[[var]])
n <- ntab[as.character(samp[[var]])]
n[is.na(n)] <- sum(is.na(samp[[var]]))
n
}
# transparency based on number of samples in a group
.getParcoordAlpha <- function(samp, var) {
n <- .getN(samp, var)
return(ifelse(n < 10, 1,
ifelse(n < 100, 0.5,
ifelse(n < 1000, 0.3, 0.1))) * 255)
}
# parallel coordinates plot variables
vars <- unlist(strsplit(config["parcoord_vars"], " "))
# scan annotation
if (type == "study") {
scanAnnot <- getobj(config["annot_scan_file"])
samp <- getVariable(scanAnnot, c("scanID", c(config["annot_scan_raceCol"], vars)))
names(samp) <- c("scanID", "race", vars)
if (!is.na(config["annot_scan_ethnCol"])) {
samp$ethnicity <- getVariable(scanAnnot, config["annot_scan_ethnCol"])
} else samp$ethnicity <- NA
} else if (type == "combined") {
scanAnnot <- getobj(config["annot_scan_file"])
scan1 <- getVariable(scanAnnot, c("scanID", config["annot_scan_raceCol"], config["annot_scan_hapmapCol"]))
names(scan1) <- c("scanID", "race", "geno.cntl")
if (!is.na(config["annot_scan_ethnCol"])) {
scan1$ethnicity <- getVariable(scanAnnot, config["annot_scan_ethnCol"])
} else scan1$ethnicity <- NA
if (sum(is.na(scan1$race)) > 0 & hasVariable(scanAnnot, config["ext_annot_scan_raceCol"])) {
scan1$race2 <- getVariable(scanAnnot, config["ext_annot_scan_raceCol"])
scan1$race[is.na(scan1$race)] <- scan1$race2[is.na(scan1$race)]
scan1$race2 <- NULL
}
ext.scanAnnot <- getobj(config["ext_annot_scan_file"])
scan2 <- getVariable(ext.scanAnnot, c("scanID", config["ext_annot_scan_raceCol"]))
names(scan2) <- c("scanID", "race")
scan2$geno.cntl <- 1
scan2$ethnicity <- NA
samp <- rbind(scan1, scan2)
} else {
stop("pca type must be study or combined")
}
# get PCA results
pca <- getobj(config["out_pca_file"])
samp <- samp[match(pca$sample.id, samp$scanID),]
stopifnot(allequal(pca$sample.id, samp$scanID))
table(samp$race, samp$ethnicity, useNA="ifany")
# why are we doing this? (sort capital letters before lower case)
Sys.setlocale("LC_COLLATE", "C")
# color by race
race <- as.character(sort(unique(samp$race)))
if (length(race) > 0) {
#stopifnot(all(race %in% names(config)))
cmapRace <- setNames(config[race], race)
chk <- which(is.na(cmapRace))
if (length(chk) > 0) {
message(sprintf("Using default colors for %s races: %s", length(chk), paste(names(cmapRace[chk]), collapse=", ")))
defaultColors <- c(brewer.pal(8, "Dark2"), brewer.pal(8, "Set2"))
cmapRace[chk] <- defaultColors[1:length(chk)]
}
colorScale <- scale_color_manual("race", values=cmapRace, breaks=names(cmapRace), na.value="grey")
} else {
colorScale <- scale_color_manual("race", values="black", breaks="hack", na.value="black")
}
rm(race)
# plot symbol by ethnicity
ethn <- as.character(sort(unique(samp$ethnicity)))
if (length(ethn) > 0){
stopifnot(all(ethn %in% names(config)))
symbolMap <- config[ethn]
mode(symbolMap) <- "integer"
symbolScale <- scale_shape_manual("ethnicity", values=symbolMap, breaks=names(symbolMap), na.value=16)
} else {
symbolScale <- scale_shape_manual("ethnicity", values=1, breaks="hack", na.value=16)
}
rm(ethn)
# labels
## recent change in SNPRelate - pca$eigenval only returns first 32 values
#(x <- pca$eigenval[1:4]/sum(pca$eigenval))
x <- pca$varprop[1:4]
lbls <- paste("EV", 1:4, " (", format(100*x,digits=2), "%)", sep="")
pcs <- pca$eigenvect
colnames(pcs) <- paste0("EV", 1:ncol(pcs))
pcs <- as.data.frame(pcs)
pcs$scanID <- pca$sample.id
dat <- merge(pcs, samp)
# order by number of samples in race group
dat$nrace <- table(dat$race, useNA="ifany")[dat$race]
dat$nrace[is.na(dat$nrace)] <- sum(is.na(dat$nrace))
dat <- dat[order(-dat$nrace),]
# plot the first four pcs
nev <- 4
pairs <- ggpairs(dat,
mapping=aes(color=race, shape=ethnicity),
columns=which(names(dat) %in% sprintf("EV%s", 1:nev)),
upper=list(continuous=wrap("points", alpha=0.7)),
lower=list(continuous=wrap("points", alpha=0.7)),
columnLabels=lbls[1:nev],
axisLabels="internal")
for (i in 1:pairs$nrow){
for (j in 1:pairs$ncol){
subplot <- getPlot(pairs, i, j)
subplot <- subplot + colorScale + symbolScale
pairs <- putPlot(pairs, subplot, i, j)
}
}
png(config["out_pairs_plot"], width=1000, height=1000)
print(pairs)
dev.off()
# plot EV1 vs EV2 with density plots
p <- ggplot(dat, aes(x=EV1, y=EV2, color=race, shape=ethnicity)) +
geom_point(alpha=0.7) +
colorScale +
symbolScale +
theme(legend.position="none") +
xlab(lbls[1]) + ylab(lbls[2])
pdf(config["out_dens_plot"], width=6, height=6)
ggMarginal(p, type="density")
dev.off()
# plot EV1 vs EV2
p <- p + theme(legend.position="right") +
guides(colour = guide_legend(override.aes = list(alpha=1)))
ggsave(config["out_ev12_plot"], plot=p, width=6, height=6)
ggParcoordTheme <- theme(axis.title.x=element_blank(),
axis.ticks.y=element_blank(),
axis.text.y=element_blank(),
axis.title.y=element_blank(),
axis.text.x=element_text(colour="black"),
panel.grid.major.y=element_blank(),
panel.grid.minor.y=element_blank(),
legend.position="top")
# parallel coordinates plot
ev.ind <- which(names(dat) %in% sprintf("EV%s", 1:12))
p <- ggparcoord(dat, columns=ev.ind, groupColumn="race", scale="uniminmax", alphaLines=0.5) +
colorScale + ggParcoordTheme +
guides(colour = guide_legend(override.aes = list(alpha=1)))
ggsave(config["out_parcoord_plot"], plot=p, width=10, height=5)
## other variables for parallel coordinate, specified by user
if (type == "study" & length(vars) > 0){
for (var in vars){
stopifnot(var %in% names(samp))
# auto filename
fname <- paste(config["out_parcoord_var_prefix"], "_", var, ".png", sep="")
dat[["fvar"]] <- as.factor(dat[[var]])
p <- ggparcoord(dat, columns=ev.ind, groupColumn="fvar", scale="uniminmax", alphaLines=0.5) +
scale_color_brewer(var, palette="Set1", na.value="grey") + ggParcoordTheme +
guides(colour = guide_legend(override.aes = list(alpha=1)))
ggsave(fname, plot=p, width=10, height=5)
}
}
if (type == "combined"){
xlim <- range(dat$EV1)
ylim <- range(dat$EV2)
# hapmap plot
dat$plotcol <- dat$race
dat$plotcol[dat$geno.cntl %in% 0] <- NA
p <- ggplot(dat, aes(x=EV1, y=EV2, color=plotcol, shape=ethnicity)) +
geom_point() +
colorScale +
symbolScale +
xlab(lbls[1]) + ylab(lbls[2]) +
xlim(xlim)
ggsave(config["out_ev12_plot_hapmap"], plot=p, width=6, height=6)
p <- ggplot(dat[dat$geno.cntl %in% 0, ], aes(x=EV1, y=EV2, color=race, shape=ethnicity)) +
geom_point() +
colorScale +
symbolScale +
xlab(lbls[1]) + ylab(lbls[2]) +
xlim(xlim)
ggsave(config["out_ev12_plot_study"], plot=p, width=6, height=6)
}
#plot SNP-PC correlation
snpAnnot <- getobj(snpfile)
corr <- getobj(config["out_corr_file"])
snp <- snpAnnot[match(corr$snp.id, getSnpID(snpAnnot)),]
chrom <- getChromosome(snp, char=TRUE)
nev <- as.integer(config["num_evs_to_plot"])
png(paste(config["out_corr_plot_prefix"], "_%03d.png", sep=""), height=720, width=720)
par(mfrow=c(4,1), mar=c(5,5,4,2)+0.1, lwd=1.5, cex.lab=1.5, cex.main=1.5)
for(i in 1:nev){
snpCorrelationPlot(abs(corr$snpcorr[i,]), chrom,
main=paste("Eigenvector",i), ylim=c(0,1))
}
dev.off()
if (!is.na(config["out_corr_pruned_plot_prefix"])) {
snps.pruned <- getobj(config["out_pruned_file"])
ind <- getSnpID(snp) %in% snps.pruned
png(paste(config["out_corr_pruned_plot_prefix"], "_%03d.png", sep=""), height=720, width=720)
par(mfrow=c(4,1), mar=c(5,5,4,2)+0.1, lwd=1.5, cex.lab=1.5, cex.main=1.5)
for(i in 1:nev){
snpCorrelationPlot(abs(corr$snpcorr[i,ind]), chrom[ind],
main=paste("Eigenvector",i), ylim=c(0,1))
}
dev.off()
}
# scree plot
dat <- data.frame(ev=1:nev, varprop=pca$varprop[1:nev])
p <- ggplot(dat, aes(x=factor(ev), y=100*varprop)) +
geom_point() +
xlab("Eigenvector") + ylab("Percent of variance accounted for")
ggsave(config["out_scree_plot"], plot=p, width=6, height=6)
|
category I/O
IOが読み込み可能になるまで待つ機能を提供するライブラリです。
Windowsではこのライブラリで定義されているメソッドは
Socketに対してしか利用できません。
= reopen IO
== Instance Methods
#@since 1.9.1
--- nread -> Integer
ブロックせずに読み込み可能なバイト数を返します。
ブロックする場合は0を返します。
判別が不可能な場合は0を返します。#@end
#@until 1.9.1
--- ready?-> Integer | false | nil
ブロックせずに読み込み可能なら真を、
不可能であれば偽を返します。
より正確には、
ブロックせずに読み込み可能ならそのバイト数を返します。内部のバッファにデータがある場合にはtrueを返します。self が EOF に達していれば false を返します。判定不可能な場合には false を返します。ブロックせずに読み込み可能な
データが存在しない場合には nil を返します。#@else
--- ready?-> bool | nil
ブロックせずに読み込み可能ならtrueを、
ブロックしてしまう可能性があるならfalseを返します。
判定不可能な場合は nil を返します。#@end
--- wait(timeout = nil) -> bool | self | nil
self が読み込み可能になるまでブロックし、読み込み可能になったら
真値を返します。タイムアウト、もしくはEOFで
それ以上読みこめない場合は偽の値を返します。
より詳しくは、一度ブロックしてから読み込み可能になった場合には
selfを返します。内部のバッファにデータがある場合には
ブロックせずに true を返します。#@until 1.9.1
内部のバッファとはCランタイムのFILE構造体内部の
バッファのことです。#@else
内部のバッファとはRubyの処理系が保持管理している
バッファのことです。#@end
つまり、読み込み可能である場合にはtrueを返す場合と
selfを返す場合があることに注意してください。
timeout を指定した場合は、指定秒数経過するまでブロックし、タ
イムアウトした場合は nil を返します。
self が EOF に達していれば false を返します。
@param timeout タイムアウトまでの秒数を指定します。
|
/target/rubydoc/refm/api/src/io/wait.rd
|
no_license
|
nacyot/omegat-rurima-ruby
|
R
| false
| false
| 2,374
|
rd
|
category I/O
IOが読み込み可能になるまで待つ機能を提供するライブラリです。
Windowsではこのライブラリで定義されているメソッドは
Socketに対してしか利用できません。
= reopen IO
== Instance Methods
#@since 1.9.1
--- nread -> Integer
ブロックせずに読み込み可能なバイト数を返します。
ブロックする場合は0を返します。
判別が不可能な場合は0を返します。#@end
#@until 1.9.1
--- ready?-> Integer | false | nil
ブロックせずに読み込み可能なら真を、
不可能であれば偽を返します。
より正確には、
ブロックせずに読み込み可能ならそのバイト数を返します。内部のバッファにデータがある場合にはtrueを返します。self が EOF に達していれば false を返します。判定不可能な場合には false を返します。ブロックせずに読み込み可能な
データが存在しない場合には nil を返します。#@else
--- ready?-> bool | nil
ブロックせずに読み込み可能ならtrueを、
ブロックしてしまう可能性があるならfalseを返します。
判定不可能な場合は nil を返します。#@end
--- wait(timeout = nil) -> bool | self | nil
self が読み込み可能になるまでブロックし、読み込み可能になったら
真値を返します。タイムアウト、もしくはEOFで
それ以上読みこめない場合は偽の値を返します。
より詳しくは、一度ブロックしてから読み込み可能になった場合には
selfを返します。内部のバッファにデータがある場合には
ブロックせずに true を返します。#@until 1.9.1
内部のバッファとはCランタイムのFILE構造体内部の
バッファのことです。#@else
内部のバッファとはRubyの処理系が保持管理している
バッファのことです。#@end
つまり、読み込み可能である場合にはtrueを返す場合と
selfを返す場合があることに注意してください。
timeout を指定した場合は、指定秒数経過するまでブロックし、タ
イムアウトした場合は nil を返します。
self が EOF に達していれば false を返します。
@param timeout タイムアウトまでの秒数を指定します。
|
## ---- echo = FALSE, message = FALSE--------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ------------------------------------------------------------------------
query_v_1 <-
'{"_and":[
{"_gte":{"patent_date":"2007-03-01"}},
{"_or":[
{"_text_all":{"patent_title":"dog"}},
{"_text_all":{"patent_abstract":"dog"}}
]},
{"_or":[
{"_eq":{"assingee_country":"US"}},
{"_eq":{"assingee_country":"CA"}}
]}
]}'
## ------------------------------------------------------------------------
query_v_2 <-
list("_and" =
list(
list("_gte" = list(patent_date = "2007-03-01")),
list("_or" =
list(
list("_text_all" = list(patent_title = "dog")),
list("_text_all" = list(patent_abstract = "dog"))
)
),
list("_or" =
list(
list("_eq" = list(assingee_country = "US")),
list("_eq" = list(assingee_country = "CA"))
)
)
)
)
## ------------------------------------------------------------------------
library(patentsview)
query_v_3 <-
with_qfuns(
and(
gte(patent_date = "2007-03-01"),
or(
text_all(patent_title = "dog"),
text_all(patent_abstract = "dog")
),
eq(assingee_country = c("US", "CA"))
)
)
## ------------------------------------------------------------------------
jsonlite::minify(query_v_1)
jsonlite::toJSON(query_v_2, auto_unbox = TRUE)
jsonlite::toJSON(query_v_3, auto_unbox = TRUE)
## ------------------------------------------------------------------------
qry_funs$lte(assignee_total_num_inventors = 10)
## ------------------------------------------------------------------------
qry_funs$eq(cpc_subsection_id = "G12")
## ------------------------------------------------------------------------
with_qfuns(
and(
contains(rawinventor_first_name = "joh"),
text_phrase(patent_abstract = c("dog bark", "cat meow")),
not(
text_phrase(patent_abstract = c("dog chain"))
)
)
)
## ------------------------------------------------------------------------
with_qfuns(
or(
and(
eq(inventor_last_name = "smith"),
text_phrase(patent_title = "cotton gin")
),
and(
eq(inventor_last_name = "hopper"),
text_phrase(patent_title = "COBOL")
)
)
)
|
/inst/doc/writing-queries.R
|
no_license
|
crew102/patentsview
|
R
| false
| false
| 2,530
|
r
|
## ---- echo = FALSE, message = FALSE--------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ------------------------------------------------------------------------
query_v_1 <-
'{"_and":[
{"_gte":{"patent_date":"2007-03-01"}},
{"_or":[
{"_text_all":{"patent_title":"dog"}},
{"_text_all":{"patent_abstract":"dog"}}
]},
{"_or":[
{"_eq":{"assingee_country":"US"}},
{"_eq":{"assingee_country":"CA"}}
]}
]}'
## ------------------------------------------------------------------------
query_v_2 <-
list("_and" =
list(
list("_gte" = list(patent_date = "2007-03-01")),
list("_or" =
list(
list("_text_all" = list(patent_title = "dog")),
list("_text_all" = list(patent_abstract = "dog"))
)
),
list("_or" =
list(
list("_eq" = list(assingee_country = "US")),
list("_eq" = list(assingee_country = "CA"))
)
)
)
)
## ------------------------------------------------------------------------
library(patentsview)
query_v_3 <-
with_qfuns(
and(
gte(patent_date = "2007-03-01"),
or(
text_all(patent_title = "dog"),
text_all(patent_abstract = "dog")
),
eq(assingee_country = c("US", "CA"))
)
)
## ------------------------------------------------------------------------
jsonlite::minify(query_v_1)
jsonlite::toJSON(query_v_2, auto_unbox = TRUE)
jsonlite::toJSON(query_v_3, auto_unbox = TRUE)
## ------------------------------------------------------------------------
qry_funs$lte(assignee_total_num_inventors = 10)
## ------------------------------------------------------------------------
qry_funs$eq(cpc_subsection_id = "G12")
## ------------------------------------------------------------------------
with_qfuns(
and(
contains(rawinventor_first_name = "joh"),
text_phrase(patent_abstract = c("dog bark", "cat meow")),
not(
text_phrase(patent_abstract = c("dog chain"))
)
)
)
## ------------------------------------------------------------------------
with_qfuns(
or(
and(
eq(inventor_last_name = "smith"),
text_phrase(patent_title = "cotton gin")
),
and(
eq(inventor_last_name = "hopper"),
text_phrase(patent_title = "COBOL")
)
)
)
|
# Clear workspace --------------------------------------------------------------
rm(list = ls())
# Load libraries ---------------------------------------------------------------
library("tidyverse")
library("leaflet")
library("leaflet.extras")
library("ggpubr")
library("broom")
library("purrr")
library("rpart")
library("rpart.plot")
library("factoextra")
library("caret")
library("gridExtra")
# Define functions -------------------------------------------------------------
source(file = "R/99_func.R")
# Load data --------------------------------------------------------------------
df_patient <-
read_csv(file = "data/_augmented/final_patient_data_df_augm.csv",
col_types = cols())
df_ts <-
read_csv(file = "data/_augmented/final_ts_world_df_augm.csv",
col_types = cols())
# Basic descriptive and visualization: Patient data ----------------------------
# Plot 1: Distribution of age group by gender ----------------------------------
df_patient %>%
group_by(age_group, gender) %>%
tally() %>%
collect() %>%
drop_na(gender, age_group) %>%
arrange(desc(age_group)) %>%
ggplot() +
geom_col(aes(x = age_group, y = n, fill = gender)) +
labs(title = "Distribution of age group by gender",
subtitle= "COVID-19 affected", x = "Age group", y = "Count") +
ggsave(path = "results",
filename = "03_distribution_age_group_gender.png",
width = 6,
height = 5)
# Plot 2: Smoothing of time_2_admin ~ age, grouped by gender and dead ----------
df_patient %>%
mutate(time2admis = as.integer(date_admission_hospital - date_onset)) %>%
select(gender, age, time2admis, is_dead, contact_with_Wuhan) %>%
drop_na() %>%
ggplot() +
geom_point(aes(age, time2admis, color=gender)) +
geom_smooth(aes(age, time2admis)) +
facet_grid(contact_with_Wuhan~.,
labeller = label_both, scales = "free") +
ylim(0,30) +
labs(title = "From onset to hospital admission",
subtitle= "COVID-19 affected", x = "Age",
y = "Day(s)") +
ggsave(path = "results",
filename = "03_onset_to_admission.png",
plot = last_plot(),
width = 6,
height = 5)
# Plot 3: Boxplot of age range ~ is_dead, contact_with_wuhan and dyspnea -------
df_patient %>%
select(gender, age, dyspnea, is_dead, contact_with_Wuhan) %>%
drop_na(gender, age, dyspnea, is_dead, contact_with_Wuhan) %>%
ggplot() +
geom_boxplot(aes(as.factor(dyspnea),age, fill=gender)) +
facet_grid(contact_with_Wuhan~is_dead,
labeller = label_both, scales = "free") +
labs(title = "Age distribution by symptoms, death and contact with wuhan",
subtitle= "COVID-19 affected", x = "Dyspnea", y = "Age") +
ggsave(path = "results",
filename = "03_age_symptoms_death_wuhan.png",
plot = last_plot(),
width = 6,
height = 5)
# Plot 4: Barplot in polar coordinates of incidents per region above 100 -------
df_patient %>%
group_by(country) %>%
tally() %>%
filter(country != "China",n > 100) %>%
collect() %>%
ggplot() +
geom_bar(aes(country, n,fill = country), stat = "identity") +
coord_polar(start = 300) +
labs(title = "Numbers of cases (above 100) between Jan-feb 2020",
subtitle= "COVID-19 affected", x = "", y = "Count") +
ggsave(path = "results",
filename = "03_cases_above_hundred.png",
width = 6,
height = 5)
# Plot 5: Barplot of the symptoms (when counts > 10 for visual purposes) -------
df_patient %>%
select(chills:thirst) %>%
summarise_if(is.numeric,sum,na.rm=TRUE) %>%
gather(symptoms,counts,chills:thirst) %>%
filter(counts > 10) %>%
ggplot(aes(reorder(symptoms,counts),counts,fill = symptoms)) +
geom_bar(stat="identity") +
coord_flip() +
theme(legend.position = "none") + ylim(0,650) +
labs(title = "Prevalence of symptoms",
subtitle= "Observed in more than 10 cases",
x = "Symptoms", y = "Count") +
ggsave(path = "results",
filename = "03_prevalence_symptoms.png",
plot = last_plot(),
width = 6,
height = 5)
# Plot 6: Heatmap of cases -----------------------------------------------------
df_patient %>%
drop_na(lat,long) %>%
leaflet() %>%
addProviderTiles("CartoDB.DarkMatter") %>%
addHeatmap(lng = ~long, lat = ~lat,
blur = 9, max = 0.05, radius = 6) %>%
addMarkers(clusterOptions =
markerClusterOptions())
# labels not possible!
# labs(title = "Confirmed cases",
# subtitle= "COVID-19 affected",
# x = "", y = "")
# not able to save by mapview (not working on R 3.6.2)
# tried other packages but did not work
# Visualization: Time Series Data ----------------------------------------------
# FROM HERE - ONLY FROM 2020-03-11 ARE SHOWN (DATE OF DK LOCKDOWN)
# Plot 7: Compare Denmark, Sweden, Romania, Turkey, Philippines per mil.pop ----
df_ts %>%
filter(region %in% c("Denmark", "Sweden", "Romania",
"Turkey", "Philippines")) %>%
filter(date_observation >= "2020-03-11") %>% # Starting from lockdown
ggplot() +
geom_line(aes(date_observation, total_confirmed_per_mil_pop,
color = region)) +
labs(title = "Confirmed case(s) per million population",
subtitle= "COVID-19 affected",
x = "Date", y = "Count per million population") +
ggsave(path = "results",
filename = "03_confirmed_per_mill.png",
width = 6,
height = 5)
# Plot 8: Total deaths per mil.pop for the above-mentioned countries -----------
df_ts %>%
filter(region %in% c("Denmark", "Sweden", "Romania",
"Turkey", "Philippines")) %>%
filter(date_observation >= "2020-03-11") %>% # Starting from lockdown
ggplot() +
geom_line(aes(date_observation, total_deaths_per_mil_pop, color = region)) +
labs(title = "Death(s) per million population",
subtitle= "COVID-19 affected",
x = "Date", y = "Count per million population") +
ggsave(path = "results",
filename = "03_deaths_per_mill.png",
width = 6,
height = 5)
# Model data: Time Series
# ------------------------------------------------------------------------------
# Selecting few countries and nesting per province and time series type
df_ts_selected<- df_ts %>%
filter(region %in% c("Denmark", "Sweden", "Romania",
"Turkey","Philippines"))
# Model data: Time Series ------------------------------------------------------
# Selecting few countries and nesting per region and time series type
df_ts_selected <-
df_ts %>%
filter(region %in% c("Denmark", "Sweden", "Romania",
"Turkey","Philippines")) %>%
filter(date_observation >= "2020-03-11") %>% # Starting from lockdown
gather(ts, count, total_confirmed:total_deaths_per_mil_pop) %>%
group_by(region, ts) %>%
nest()
# Modeling with linear model
df_ts_models <-
df_ts_selected %>%
mutate(ts_region = str_c(ts, region, sep="_"),
mdls = map(data, mdl),
glance = map(mdls, glance),
tidy = map(mdls, tidy),
conf = map(mdls, confint_tidy),
aug = map(mdls, augment))
# Plot 9: Estimate pr. day of confirmed and death per mil pop -----------------
# Showing model estimate (coefficient) confirmed per mil pop
df_ts_models %>%
unnest(c(tidy, conf)) %>%
filter(ts == "total_confirmed_per_mil_pop",term == "date_observation") %>%
select(region, ts, ts_region, estimate, conf.low, conf.high) %>%
ggplot(aes(estimate, ts_region, color = ts_region), show.legend = FALSE) +
geom_point() +
geom_errorbarh(aes(xmin = conf.low, xmax = conf.high)) +
labs(title = "Model evaluation of confirmed cases",
subtitle= "COVID-19 affected",
x = "Estimated coefficient", y = "region") +
ggsave(path = "results",
filename = "03_model_eval_confirmed.png",
width = 10,
height = 5)
# Plot 10: Showing model estimate (coefficient) confirmed per mil pop ----------
table_df_ts_models_stats <-
df_ts_models %>%
unnest(c(tidy, conf)) %>%
select(region, ts, term:p.value) %>%
head(10)
linear_models_per_country <-
grid.arrange(top = "Linear models statistics per country",
tableGrob(table_df_ts_models_stats))
ggsave(path = "results",
filename = "03_table_df_ts_models_stats.png",
plot = linear_models_per_country,
width = 10,
height = 5)
# Plot 11: Showing model estimate (coefficient) deaths per mil pop -------------
df_ts_models %>%
unnest(c(tidy, conf)) %>%
filter(ts == "total_deaths_per_mil_pop", term == "date_observation") %>%
select(region, ts, ts_region, estimate,conf.low,conf.high) %>%
ggplot(aes(estimate, ts_region, color = ts_region), show.legend = FALSE) +
geom_point() +
geom_errorbarh(aes(xmin= conf.low, xmax = conf.high)) +
labs(title = "Model evaluation of death cases",
subtitle= "COVID-19 affected",
x = "Estimated coefficient", y = "region") +
ggsave(path = "results",
filename = "03_model_eval_death.png",
width = 10,
height = 5)
# Plot 12: Evaluation of the models based on the residuals per region ----------
df_ts_models %>%
unnest(aug) %>%
select(region, ts, count,.resid) %>%
filter(ts %in% c("total_confirmed_per_mil_pop",
"total_deaths_per_mil_pop")) %>%
ggplot() +
geom_boxplot(aes(region,.resid, fill = region)) +
facet_grid(.~ts) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
labs(title = "Model evaluation (residuals)",
subtitle= "COVID-19",
x = "region", y = "Residuals") +
ggsave(path = "results",
filename = "03_model_eval_residuals.png",
width = 6,
height = 5)
# Model data: Patient data -----------------------------------------------------
# Subsetting data frame for the pca (only biological features)
df_patient_pca <-
df_patient %>%
select(gender,age, contact_with_Wuhan:is_recovered, chills:thirst) %>%
na.omit() %>%
mutate(gender = case_when(gender == 'female' ~ 1,
gender == 'male' ~ 0)) %>%
select_if(~length(unique(.)) > 1) # removing columns with same value
# Plot 13: Making PCA of the subset --------------------------------------------
# Selecting only the binary variables to avoid scale
df_patient_pca %>%
select(-age) %>%
prcomp(center = TRUE) %>%
fviz_eig(main = "PCA of biological features",
subtitle = "Explained variance in percentage by dimension",
xlab = "Dimension", ylab = "Percentage") %>%
ggsave(path = "results",
filename = "03_pca_biological_features.png",
width = 6,
height = 5)
# Creating data frame for decision tree
df_patient_dec <-
df_patient %>%
select(gender, age, contact_with_Wuhan:is_recovered,
chills:thirst) %>%
select_if(~length(unique(.)) > 1) %>%
mutate(status = case_when(is_dead == 0 & is_recovered == 0 ~ "still_sick",
is_dead == 0 & is_recovered == 1 ~ "recovered",
is_dead == 1 & is_recovered == 0 ~ "dead",
is_dead == 1 & is_recovered == 1 ~ "dead")) %>%
mutate(gender = case_when(gender == "female" ~ 1,
gender == "male" ~ 0)) %>%
mutate_if(is.character, as.factor) %>%
mutate_if(is.numeric, as.factor) %>%
mutate(age = as.integer(age)) %>%
select(-is_dead, -is_recovered) %>%
mutate(patient_id = as.character(1:nrow(df_patient))) %>%
drop_na(status)
set.seed(22100)
# Making train and test for decision tree
df_patient_dec_train <-
df_patient_dec %>%
sample_frac(0.8)
df_patient_dec_test <-
df_patient_dec %>%
anti_join(df_patient_dec_train, by = "patient_id")
# Fitting the training data
df_patient_dec_fit <-
df_patient_dec_train %>%
select(-patient_id) %>%
rpart(status ~ ., ., method = 'class', model = TRUE,
minsplit = 1, minbucket = 2, cp = 0.004)
# Plot 14: Plotting the tree ---------------------------------------------------
# Not able to save the image, done manually
rpart.plot(df_patient_dec_fit, roundint = FALSE, extra = "auto")
# Explanation of the tree plot output
# predicted class
# predicted prob for each class
# fraction of observation in the node
# Predicting with the model
df_patient_pred_status <-
predict(df_patient_dec_fit, df_patient_dec_test,
type = 'class')
# Defining the true class and predicted class
true_class <-
df_patient_dec_test %>%
select(status) %>%
as_vector()
pred_class <-
as_vector(df_patient_pred_status)
# Creating confusion matrix
table_cm <-
as.matrix(confusionMatrix(table(true_class, pred_class)))
table_cm_plot <-
grid.arrange(top="Confusion Matrix: Decision tree prediction",
tableGrob(table_cm))
ggsave(path = "results",
filename = "03_table_cm_plot.png",
plot = table_cm_plot,
width = 5,
height = 4)
# Calculating accuracy
dec_tree_model_acc <- round(sum(diag(table_cm)) / sum(table_cm),3)
# Write data
# ------------------------------------------------------------------------------
#write_tsv(...)
# ggsave(path = "./results",
# filename = "04_plot.png",
# plot = bl62_pca_aug_plt,
# width = 10,
# height = 6)
|
/R/03_analysis_descriptive.R
|
no_license
|
rforbiodatascience/2020_group06
|
R
| false
| false
| 13,264
|
r
|
# Clear workspace --------------------------------------------------------------
rm(list = ls())
# Load libraries ---------------------------------------------------------------
library("tidyverse")
library("leaflet")
library("leaflet.extras")
library("ggpubr")
library("broom")
library("purrr")
library("rpart")
library("rpart.plot")
library("factoextra")
library("caret")
library("gridExtra")
# Define functions -------------------------------------------------------------
source(file = "R/99_func.R")
# Load data --------------------------------------------------------------------
df_patient <-
read_csv(file = "data/_augmented/final_patient_data_df_augm.csv",
col_types = cols())
df_ts <-
read_csv(file = "data/_augmented/final_ts_world_df_augm.csv",
col_types = cols())
# Basic descriptive and visualization: Patient data ----------------------------
# Plot 1: Distribution of age group by gender ----------------------------------
df_patient %>%
group_by(age_group, gender) %>%
tally() %>%
collect() %>%
drop_na(gender, age_group) %>%
arrange(desc(age_group)) %>%
ggplot() +
geom_col(aes(x = age_group, y = n, fill = gender)) +
labs(title = "Distribution of age group by gender",
subtitle= "COVID-19 affected", x = "Age group", y = "Count") +
ggsave(path = "results",
filename = "03_distribution_age_group_gender.png",
width = 6,
height = 5)
# Plot 2: Smoothing of time_2_admin ~ age, grouped by gender and dead ----------
df_patient %>%
mutate(time2admis = as.integer(date_admission_hospital - date_onset)) %>%
select(gender, age, time2admis, is_dead, contact_with_Wuhan) %>%
drop_na() %>%
ggplot() +
geom_point(aes(age, time2admis, color=gender)) +
geom_smooth(aes(age, time2admis)) +
facet_grid(contact_with_Wuhan~.,
labeller = label_both, scales = "free") +
ylim(0,30) +
labs(title = "From onset to hospital admission",
subtitle= "COVID-19 affected", x = "Age",
y = "Day(s)") +
ggsave(path = "results",
filename = "03_onset_to_admission.png",
plot = last_plot(),
width = 6,
height = 5)
# Plot 3: Boxplot of age range ~ is_dead, contact_with_wuhan and dyspnea -------
df_patient %>%
select(gender, age, dyspnea, is_dead, contact_with_Wuhan) %>%
drop_na(gender, age, dyspnea, is_dead, contact_with_Wuhan) %>%
ggplot() +
geom_boxplot(aes(as.factor(dyspnea),age, fill=gender)) +
facet_grid(contact_with_Wuhan~is_dead,
labeller = label_both, scales = "free") +
labs(title = "Age distribution by symptoms, death and contact with wuhan",
subtitle= "COVID-19 affected", x = "Dyspnea", y = "Age") +
ggsave(path = "results",
filename = "03_age_symptoms_death_wuhan.png",
plot = last_plot(),
width = 6,
height = 5)
# Plot 4: Barplot in polar coordinates of incidents per region above 100 -------
df_patient %>%
group_by(country) %>%
tally() %>%
filter(country != "China",n > 100) %>%
collect() %>%
ggplot() +
geom_bar(aes(country, n,fill = country), stat = "identity") +
coord_polar(start = 300) +
labs(title = "Numbers of cases (above 100) between Jan-feb 2020",
subtitle= "COVID-19 affected", x = "", y = "Count") +
ggsave(path = "results",
filename = "03_cases_above_hundred.png",
width = 6,
height = 5)
# Plot 5: Barplot of the symptoms (when counts > 10 for visual purposes) -------
df_patient %>%
select(chills:thirst) %>%
summarise_if(is.numeric,sum,na.rm=TRUE) %>%
gather(symptoms,counts,chills:thirst) %>%
filter(counts > 10) %>%
ggplot(aes(reorder(symptoms,counts),counts,fill = symptoms)) +
geom_bar(stat="identity") +
coord_flip() +
theme(legend.position = "none") + ylim(0,650) +
labs(title = "Prevalence of symptoms",
subtitle= "Observed in more than 10 cases",
x = "Symptoms", y = "Count") +
ggsave(path = "results",
filename = "03_prevalence_symptoms.png",
plot = last_plot(),
width = 6,
height = 5)
# Plot 6: Heatmap of cases -----------------------------------------------------
df_patient %>%
drop_na(lat,long) %>%
leaflet() %>%
addProviderTiles("CartoDB.DarkMatter") %>%
addHeatmap(lng = ~long, lat = ~lat,
blur = 9, max = 0.05, radius = 6) %>%
addMarkers(clusterOptions =
markerClusterOptions())
# labels not possible!
# labs(title = "Confirmed cases",
# subtitle= "COVID-19 affected",
# x = "", y = "")
# not able to save by mapview (not working on R 3.6.2)
# tried other packages but did not work
# Visualization: Time Series Data ----------------------------------------------
# FROM HERE - ONLY FROM 2020-03-11 ARE SHOWN (DATE OF DK LOCKDOWN)
# Plot 7: Compare Denmark, Sweden, Romania, Turkey, Philippines per mil.pop ----
df_ts %>%
filter(region %in% c("Denmark", "Sweden", "Romania",
"Turkey", "Philippines")) %>%
filter(date_observation >= "2020-03-11") %>% # Starting from lockdown
ggplot() +
geom_line(aes(date_observation, total_confirmed_per_mil_pop,
color = region)) +
labs(title = "Confirmed case(s) per million population",
subtitle= "COVID-19 affected",
x = "Date", y = "Count per million population") +
ggsave(path = "results",
filename = "03_confirmed_per_mill.png",
width = 6,
height = 5)
# Plot 8: Total deaths per mil.pop for the above-mentioned countries -----------
df_ts %>%
filter(region %in% c("Denmark", "Sweden", "Romania",
"Turkey", "Philippines")) %>%
filter(date_observation >= "2020-03-11") %>% # Starting from lockdown
ggplot() +
geom_line(aes(date_observation, total_deaths_per_mil_pop, color = region)) +
labs(title = "Death(s) per million population",
subtitle= "COVID-19 affected",
x = "Date", y = "Count per million population") +
ggsave(path = "results",
filename = "03_deaths_per_mill.png",
width = 6,
height = 5)
# Model data: Time Series
# ------------------------------------------------------------------------------
# Selecting few countries and nesting per province and time series type
df_ts_selected<- df_ts %>%
filter(region %in% c("Denmark", "Sweden", "Romania",
"Turkey","Philippines"))
# Model data: Time Series ------------------------------------------------------
# Selecting few countries and nesting per region and time series type
df_ts_selected <-
df_ts %>%
filter(region %in% c("Denmark", "Sweden", "Romania",
"Turkey","Philippines")) %>%
filter(date_observation >= "2020-03-11") %>% # Starting from lockdown
gather(ts, count, total_confirmed:total_deaths_per_mil_pop) %>%
group_by(region, ts) %>%
nest()
# Modeling with linear model
df_ts_models <-
df_ts_selected %>%
mutate(ts_region = str_c(ts, region, sep="_"),
mdls = map(data, mdl),
glance = map(mdls, glance),
tidy = map(mdls, tidy),
conf = map(mdls, confint_tidy),
aug = map(mdls, augment))
# Plot 9: Estimate pr. day of confirmed and death per mil pop -----------------
# Showing model estimate (coefficient) confirmed per mil pop
df_ts_models %>%
unnest(c(tidy, conf)) %>%
filter(ts == "total_confirmed_per_mil_pop",term == "date_observation") %>%
select(region, ts, ts_region, estimate, conf.low, conf.high) %>%
ggplot(aes(estimate, ts_region, color = ts_region), show.legend = FALSE) +
geom_point() +
geom_errorbarh(aes(xmin = conf.low, xmax = conf.high)) +
labs(title = "Model evaluation of confirmed cases",
subtitle= "COVID-19 affected",
x = "Estimated coefficient", y = "region") +
ggsave(path = "results",
filename = "03_model_eval_confirmed.png",
width = 10,
height = 5)
# Plot 10: Showing model estimate (coefficient) confirmed per mil pop ----------
table_df_ts_models_stats <-
df_ts_models %>%
unnest(c(tidy, conf)) %>%
select(region, ts, term:p.value) %>%
head(10)
linear_models_per_country <-
grid.arrange(top = "Linear models statistics per country",
tableGrob(table_df_ts_models_stats))
ggsave(path = "results",
filename = "03_table_df_ts_models_stats.png",
plot = linear_models_per_country,
width = 10,
height = 5)
# Plot 11: Showing model estimate (coefficient) deaths per mil pop -------------
df_ts_models %>%
unnest(c(tidy, conf)) %>%
filter(ts == "total_deaths_per_mil_pop", term == "date_observation") %>%
select(region, ts, ts_region, estimate,conf.low,conf.high) %>%
ggplot(aes(estimate, ts_region, color = ts_region), show.legend = FALSE) +
geom_point() +
geom_errorbarh(aes(xmin= conf.low, xmax = conf.high)) +
labs(title = "Model evaluation of death cases",
subtitle= "COVID-19 affected",
x = "Estimated coefficient", y = "region") +
ggsave(path = "results",
filename = "03_model_eval_death.png",
width = 10,
height = 5)
# Plot 12: Evaluation of the models based on the residuals per region ----------
df_ts_models %>%
unnest(aug) %>%
select(region, ts, count,.resid) %>%
filter(ts %in% c("total_confirmed_per_mil_pop",
"total_deaths_per_mil_pop")) %>%
ggplot() +
geom_boxplot(aes(region,.resid, fill = region)) +
facet_grid(.~ts) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
labs(title = "Model evaluation (residuals)",
subtitle= "COVID-19",
x = "region", y = "Residuals") +
ggsave(path = "results",
filename = "03_model_eval_residuals.png",
width = 6,
height = 5)
# Model data: Patient data -----------------------------------------------------
# Subsetting data frame for the pca (only biological features)
df_patient_pca <-
df_patient %>%
select(gender,age, contact_with_Wuhan:is_recovered, chills:thirst) %>%
na.omit() %>%
mutate(gender = case_when(gender == 'female' ~ 1,
gender == 'male' ~ 0)) %>%
select_if(~length(unique(.)) > 1) # removing columns with same value
# Plot 13: Making PCA of the subset --------------------------------------------
# Selecting only the binary variables to avoid scale
df_patient_pca %>%
select(-age) %>%
prcomp(center = TRUE) %>%
fviz_eig(main = "PCA of biological features",
subtitle = "Explained variance in percentage by dimension",
xlab = "Dimension", ylab = "Percentage") %>%
ggsave(path = "results",
filename = "03_pca_biological_features.png",
width = 6,
height = 5)
# Creating data frame for decision tree
df_patient_dec <-
df_patient %>%
select(gender, age, contact_with_Wuhan:is_recovered,
chills:thirst) %>%
select_if(~length(unique(.)) > 1) %>%
mutate(status = case_when(is_dead == 0 & is_recovered == 0 ~ "still_sick",
is_dead == 0 & is_recovered == 1 ~ "recovered",
is_dead == 1 & is_recovered == 0 ~ "dead",
is_dead == 1 & is_recovered == 1 ~ "dead")) %>%
mutate(gender = case_when(gender == "female" ~ 1,
gender == "male" ~ 0)) %>%
mutate_if(is.character, as.factor) %>%
mutate_if(is.numeric, as.factor) %>%
mutate(age = as.integer(age)) %>%
select(-is_dead, -is_recovered) %>%
mutate(patient_id = as.character(1:nrow(df_patient))) %>%
drop_na(status)
set.seed(22100)
# Making train and test for decision tree
df_patient_dec_train <-
df_patient_dec %>%
sample_frac(0.8)
df_patient_dec_test <-
df_patient_dec %>%
anti_join(df_patient_dec_train, by = "patient_id")
# Fitting the training data
df_patient_dec_fit <-
df_patient_dec_train %>%
select(-patient_id) %>%
rpart(status ~ ., ., method = 'class', model = TRUE,
minsplit = 1, minbucket = 2, cp = 0.004)
# Plot 14: Plotting the tree ---------------------------------------------------
# Not able to save the image, done manually
rpart.plot(df_patient_dec_fit, roundint = FALSE, extra = "auto")
# Explanation of the tree plot output
# predicted class
# predicted prob for each class
# fraction of observation in the node
# Predicting with the model
df_patient_pred_status <-
predict(df_patient_dec_fit, df_patient_dec_test,
type = 'class')
# Defining the true class and predicted class
true_class <-
df_patient_dec_test %>%
select(status) %>%
as_vector()
pred_class <-
as_vector(df_patient_pred_status)
# Creating confusion matrix
table_cm <-
as.matrix(confusionMatrix(table(true_class, pred_class)))
table_cm_plot <-
grid.arrange(top="Confusion Matrix: Decision tree prediction",
tableGrob(table_cm))
ggsave(path = "results",
filename = "03_table_cm_plot.png",
plot = table_cm_plot,
width = 5,
height = 4)
# Calculating accuracy
dec_tree_model_acc <- round(sum(diag(table_cm)) / sum(table_cm),3)
# Write data
# ------------------------------------------------------------------------------
#write_tsv(...)
# ggsave(path = "./results",
# filename = "04_plot.png",
# plot = bl62_pca_aug_plt,
# width = 10,
# height = 6)
|
# Tools for the package, tests and calcuations.
# Find the p-values for the Johansen procedure using bootstrapped values
rankTest <- function(M){
p = length(M$test)
pVals = numeric(0)
for(i in 1:p){
test.cdf = ecdf(M$boot[,i])
pVals[i] = 1-test.cdf(M$test[i])
}
names(pVals) = paste0("r=",(0:(p-1)))
if(sum(pVals>0.05)!=0){
r.est = min(which(pVals>0.05))-1
} else{
r.est = p
}
names(r.est) = ""
out = list(r=r.est,pVal = pVals)
return(out)
}
# Test restrictions (using LRT) on alpha and beta, input models (from:unrestricted/previous, to:restrictions)
LRtest <- function(from,to){
T = nrow(from$res)+1
r = to$r
df = to$df
lp = from$lambda
lh = to$lambda
test = T*sum(log(1-lh[1:r])-log(1-lp[1:r]))
pVal = pchisq(test,df,lower.tail = FALSE)
out = round(data.frame(test,df,pVal),3)
names(out) = c("Statistic","df","p-value")
return(out)
}
# Caculate the standard errors for alpha and mu
getSE <- function(fit){
N = fit$N
dt = fit$dt
Om = fit$Omega
phi = fit$data
r = fit$r
p = nrow(Om)
# Calculate...
if(r==0){
SE = sqrt(diag(Om)*dt)
alpha = NULL
} else {
alpha = fit$alpha
beta = fit$beta
#Z = t(beta)%*%t(phi)
Z = crossprod(beta,t(phi))
Z = rbind(Z,1)
#ZZ = solve(Z%*%t(Z))
ZZ = solve(tcrossprod(Z,Z))
SE = sqrt(diag(kronecker(Om,ZZ))/dt)
}
# Set output with headers
outMat = matrix(SE,nr=3,byrow=TRUE)
outVec = c(outMat)
rownames(outMat) = c("phi_1","phi_2","phi_3")
if(r > 0){
colnames(outMat)=c(paste0("alpha_",1:r),"mu")
names(outVec) = c(paste0("alpha_",as.character(outer(10*(1:p),(1:r),FUN = "+"))),paste0("mu_",1:p))
} else {
colnames(outMat)=paste0("mu")
names(outVec)=paste0("mu_",1:p)
}
out = rbind(c(alpha,fit$Psi),outVec)
tVal = abs(out[1,]/out[2,])
pVal = 2*pnorm(tVal,lower.tail = FALSE)
out = rbind(out,pVal)
colnames(out) = names(outVec)
rownames(out) = c("Estimate","Std.Err","p-value")
# Tvals = abs(estPar)/stdErr
# pVals = 2*pnorm(Tvals,lower.tail = FALSE)
return(t(out))
}
|
/R/tools.R
|
permissive
|
jacobostergaard/cods
|
R
| false
| false
| 2,385
|
r
|
# Tools for the package, tests and calcuations.
# Find the p-values for the Johansen procedure using bootstrapped values
rankTest <- function(M){
p = length(M$test)
pVals = numeric(0)
for(i in 1:p){
test.cdf = ecdf(M$boot[,i])
pVals[i] = 1-test.cdf(M$test[i])
}
names(pVals) = paste0("r=",(0:(p-1)))
if(sum(pVals>0.05)!=0){
r.est = min(which(pVals>0.05))-1
} else{
r.est = p
}
names(r.est) = ""
out = list(r=r.est,pVal = pVals)
return(out)
}
# Test restrictions (using LRT) on alpha and beta, input models (from:unrestricted/previous, to:restrictions)
LRtest <- function(from,to){
T = nrow(from$res)+1
r = to$r
df = to$df
lp = from$lambda
lh = to$lambda
test = T*sum(log(1-lh[1:r])-log(1-lp[1:r]))
pVal = pchisq(test,df,lower.tail = FALSE)
out = round(data.frame(test,df,pVal),3)
names(out) = c("Statistic","df","p-value")
return(out)
}
# Caculate the standard errors for alpha and mu
getSE <- function(fit){
N = fit$N
dt = fit$dt
Om = fit$Omega
phi = fit$data
r = fit$r
p = nrow(Om)
# Calculate...
if(r==0){
SE = sqrt(diag(Om)*dt)
alpha = NULL
} else {
alpha = fit$alpha
beta = fit$beta
#Z = t(beta)%*%t(phi)
Z = crossprod(beta,t(phi))
Z = rbind(Z,1)
#ZZ = solve(Z%*%t(Z))
ZZ = solve(tcrossprod(Z,Z))
SE = sqrt(diag(kronecker(Om,ZZ))/dt)
}
# Set output with headers
outMat = matrix(SE,nr=3,byrow=TRUE)
outVec = c(outMat)
rownames(outMat) = c("phi_1","phi_2","phi_3")
if(r > 0){
colnames(outMat)=c(paste0("alpha_",1:r),"mu")
names(outVec) = c(paste0("alpha_",as.character(outer(10*(1:p),(1:r),FUN = "+"))),paste0("mu_",1:p))
} else {
colnames(outMat)=paste0("mu")
names(outVec)=paste0("mu_",1:p)
}
out = rbind(c(alpha,fit$Psi),outVec)
tVal = abs(out[1,]/out[2,])
pVal = 2*pnorm(tVal,lower.tail = FALSE)
out = rbind(out,pVal)
colnames(out) = names(outVec)
rownames(out) = c("Estimate","Std.Err","p-value")
# Tvals = abs(estPar)/stdErr
# pVals = 2*pnorm(Tvals,lower.tail = FALSE)
return(t(out))
}
|
# Parse the header,test and train files
# Build glm model with lambda search
# Predict using all models and calculate auc for each model
# Compare the aucs returned with those calculated by ROCR package
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../findNSourceUtils.R')
test.pub.822 <- function(conn) {
print("Parse header file")
spect_header = h2o.importFile(conn,normalizePath(locate("smalldata/SPECT_header.txt")),key = "spect_header")
print("Parse train and test files")
spect_train = h2o.importFile(conn,normalizePath(locate("smalldata/SPECT_train.txt")),key = "spect_train",col.names=spect_header)
spect_test = h2o.importFile(conn,normalizePath(locate("smalldata/SPECT_test.txt")),key = "spect_test", col.names=spect_header)
print("Summary of the train set")
print(summary(spect_train))
print(str(spect_train))
print("As all columns in the dataset are binary, converting the datatype to factors")
for(i in 1:length(colnames(spect_train))){
spect_train[,i] = as.factor(spect_train[,i])
spect_test[,i] = as.factor(spect_test[,i])
}
print(summary(spect_train))
print(summary(spect_test))
print("Build GLM model")
myX = 2:length(colnames(spect_train))
myY = 1
my.glm = h2o.glm(x=myX, y=myY, data=spect_train, family="binomial",standardize=T,use_all_factor_levels=1,higher_accuracy=T,lambda_search=T,return_all_lambda=T,variable_importances=1)
print(my.glm)
print("Predict models on test set and print AUC")
print("Also Check if auc from H2O is correct by checking it against ROCR's auc")
for(i in 1:100){
pred = h2o.predict(my.glm@models[[i]],spect_test)
perf = h2o.performance(pred$'1',spect_test$OVERALL_DIAGNOSIS )
auc_h = perf@model$auc
predic = prediction(as.data.frame(pred$'1'),as.data.frame(spect_test$OVERALL_DIAGNOSIS))
perfor <- performance(predic,"auc")
auc_R = as.numeric(perfor@y.values)
print(paste("model: ",i, " auc from H2O: ",auc_h , " auc from ROCR:", auc_R,sep =''))
expect_equal(auc_h,auc_R)
}
testEnd()
}
doTest("Test pub 822", test.pub.822)
|
/R/tests/testdir_jira/runit_pub_822_lambdaSearch_auc_check_medium.R
|
permissive
|
yangls06/h2o
|
R
| false
| false
| 2,045
|
r
|
# Parse the header,test and train files
# Build glm model with lambda search
# Predict using all models and calculate auc for each model
# Compare the aucs returned with those calculated by ROCR package
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../findNSourceUtils.R')
test.pub.822 <- function(conn) {
print("Parse header file")
spect_header = h2o.importFile(conn,normalizePath(locate("smalldata/SPECT_header.txt")),key = "spect_header")
print("Parse train and test files")
spect_train = h2o.importFile(conn,normalizePath(locate("smalldata/SPECT_train.txt")),key = "spect_train",col.names=spect_header)
spect_test = h2o.importFile(conn,normalizePath(locate("smalldata/SPECT_test.txt")),key = "spect_test", col.names=spect_header)
print("Summary of the train set")
print(summary(spect_train))
print(str(spect_train))
print("As all columns in the dataset are binary, converting the datatype to factors")
for(i in 1:length(colnames(spect_train))){
spect_train[,i] = as.factor(spect_train[,i])
spect_test[,i] = as.factor(spect_test[,i])
}
print(summary(spect_train))
print(summary(spect_test))
print("Build GLM model")
myX = 2:length(colnames(spect_train))
myY = 1
my.glm = h2o.glm(x=myX, y=myY, data=spect_train, family="binomial",standardize=T,use_all_factor_levels=1,higher_accuracy=T,lambda_search=T,return_all_lambda=T,variable_importances=1)
print(my.glm)
print("Predict models on test set and print AUC")
print("Also Check if auc from H2O is correct by checking it against ROCR's auc")
for(i in 1:100){
pred = h2o.predict(my.glm@models[[i]],spect_test)
perf = h2o.performance(pred$'1',spect_test$OVERALL_DIAGNOSIS )
auc_h = perf@model$auc
predic = prediction(as.data.frame(pred$'1'),as.data.frame(spect_test$OVERALL_DIAGNOSIS))
perfor <- performance(predic,"auc")
auc_R = as.numeric(perfor@y.values)
print(paste("model: ",i, " auc from H2O: ",auc_h , " auc from ROCR:", auc_R,sep =''))
expect_equal(auc_h,auc_R)
}
testEnd()
}
doTest("Test pub 822", test.pub.822)
|
run_analysis <- function(X_test, X_train, y_test, y_train, features, subject_test, subject_train)
{
data <- rbind(X_test, X_train) #Merges the training and the test sets to create one data set
c <- (grepl("mean", features)) | (grepl("std", features))
data <- data[,c] #Extracts only the measurements on the mean and standard deviation for each measurement.
activity_labels <- rbind(y_test, y_train)
for (i in 1:10299){
if(activity_labels[i,1]==1){activity_labels[i,1]="WALKING"}
if(activity_labels[i,1]==2){activity_labels[i,1]="WALKING_UPSTAIRS"}
if(activity_labels[i,1]==3){activity_labels[i,1]="WALKING_DOWNSTAIRS"}
if(activity_labels[i,1]==4){activity_labels[i,1]="SITTING"}
if(activity_labels[i,1]==5){activity_labels[i,1]="STANDING"}
if(activity_labels[i,1]==6){activity_labels[i,1]="LAYING"}
} #Uses descriptive activity names to name the activities in the data set
names(data) <- features[c] #Appropriately labels the data set with descriptive variable names.
subject <- rbind(subject_test, subject_train)
names(activity_labels) <- "activity"
names(subject) <- "subject"
data <- cbind(subject, activity_labels, data)
data2 <- group_by(data, subject, activity)
data3 <- summarise_each(data2, funs(mean))
data3
}
|
/run_analysis.R
|
no_license
|
AnastasiaPlisova/MyProject
|
R
| false
| false
| 1,319
|
r
|
run_analysis <- function(X_test, X_train, y_test, y_train, features, subject_test, subject_train)
{
data <- rbind(X_test, X_train) #Merges the training and the test sets to create one data set
c <- (grepl("mean", features)) | (grepl("std", features))
data <- data[,c] #Extracts only the measurements on the mean and standard deviation for each measurement.
activity_labels <- rbind(y_test, y_train)
for (i in 1:10299){
if(activity_labels[i,1]==1){activity_labels[i,1]="WALKING"}
if(activity_labels[i,1]==2){activity_labels[i,1]="WALKING_UPSTAIRS"}
if(activity_labels[i,1]==3){activity_labels[i,1]="WALKING_DOWNSTAIRS"}
if(activity_labels[i,1]==4){activity_labels[i,1]="SITTING"}
if(activity_labels[i,1]==5){activity_labels[i,1]="STANDING"}
if(activity_labels[i,1]==6){activity_labels[i,1]="LAYING"}
} #Uses descriptive activity names to name the activities in the data set
names(data) <- features[c] #Appropriately labels the data set with descriptive variable names.
subject <- rbind(subject_test, subject_train)
names(activity_labels) <- "activity"
names(subject) <- "subject"
data <- cbind(subject, activity_labels, data)
data2 <- group_by(data, subject, activity)
data3 <- summarise_each(data2, funs(mean))
data3
}
|
testlist <- list(ExpressionSet = structure(c(3.10503529562433e+231, 1.23181983389617e+58, 1.52478221747831e+245, 1.57532647271067e+179, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), Phylostratum = numeric(0))
result <- do.call(myTAI:::cpp_TAI,testlist)
str(result)
|
/myTAI/inst/testfiles/cpp_TAI/AFL_cpp_TAI/cpp_TAI_valgrind_files/1615763604-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 334
|
r
|
testlist <- list(ExpressionSet = structure(c(3.10503529562433e+231, 1.23181983389617e+58, 1.52478221747831e+245, 1.57532647271067e+179, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), Phylostratum = numeric(0))
result <- do.call(myTAI:::cpp_TAI,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/heatmap.3.R
\name{heatmap.3}
\alias{heatmap.3}
\title{Heatmap}
\usage{
heatmap.3(x, dist.fun = c("euclidean", "pearson"), na.avg = TRUE,
col.ramp = bluered, z.transf = c(FALSE, TRUE), breaks = seq(-1, +1,
length = 100), linkage = c("average", "ward.D", "ward.D2", "single",
"complete", "mcquitty", "median", "centroid"), nLabLim = 30, ...)
}
\arguments{
\item{x}{MSnSet object}
\item{dist.fun}{distance function "euclidean" or"pearson"}
\item{na.avg}{logical. Should NA distances be assigned just an average value?}
\item{col.ramp}{color mapping function. default gplots::bluered}
\item{z.transf}{logical perform Z-transform or not.}
\item{breaks}{color key breaks}
\item{linkage}{see ?hclust}
\item{nLabLim}{max limit of the row/column labels to show. default 30}
\item{...}{further arguments to \code{gplots::heatmap.2}}
}
\description{
Customized Heatmap. TO FILL WHAT EXACTLY IS CUSTOM ABOUT IT.
}
\examples{
data(srm_msnset)
set.seed(0)
clrz <- sample(colors(), 17)
heatmap.3(cor(exprs(msnset)),
dist.fun="pearson",
linkage="average",
nLabLim=50,
ColSideColors=c('red3','yellow3','green3')[as.factor(pData(msnset)$subject.type)],
RowSideColors=clrz[as.factor(pData(msnset)$match.group)])
}
|
/man/heatmap.3.Rd
|
no_license
|
JMoon1/vp.misc
|
R
| false
| true
| 1,339
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/heatmap.3.R
\name{heatmap.3}
\alias{heatmap.3}
\title{Heatmap}
\usage{
heatmap.3(x, dist.fun = c("euclidean", "pearson"), na.avg = TRUE,
col.ramp = bluered, z.transf = c(FALSE, TRUE), breaks = seq(-1, +1,
length = 100), linkage = c("average", "ward.D", "ward.D2", "single",
"complete", "mcquitty", "median", "centroid"), nLabLim = 30, ...)
}
\arguments{
\item{x}{MSnSet object}
\item{dist.fun}{distance function "euclidean" or"pearson"}
\item{na.avg}{logical. Should NA distances be assigned just an average value?}
\item{col.ramp}{color mapping function. default gplots::bluered}
\item{z.transf}{logical perform Z-transform or not.}
\item{breaks}{color key breaks}
\item{linkage}{see ?hclust}
\item{nLabLim}{max limit of the row/column labels to show. default 30}
\item{...}{further arguments to \code{gplots::heatmap.2}}
}
\description{
Customized Heatmap. TO FILL WHAT EXACTLY IS CUSTOM ABOUT IT.
}
\examples{
data(srm_msnset)
set.seed(0)
clrz <- sample(colors(), 17)
heatmap.3(cor(exprs(msnset)),
dist.fun="pearson",
linkage="average",
nLabLim=50,
ColSideColors=c('red3','yellow3','green3')[as.factor(pData(msnset)$subject.type)],
RowSideColors=clrz[as.factor(pData(msnset)$match.group)])
}
|
## ------------------------------------------------------------------------
library(aoos)
Person <- defineRefClass({
Class <- "person" # this is the argument 'Class' in setRefClass
personName <- "character" # this is a field of class 'character'
initialize <- function(name) {
.self$personName <- name
.self$greet()
}
greet <- function() {
cat(paste0("Hello, my name is ", .self$personName, ".\n"))
}
})
ann <- Person("Ann")
ann
ann$personName
ann$personName <- "not Ann"
ann$greet()
## ------------------------------------------------------------------------
PrivatePerson <- defineRefClass({
Class <- "PrivatePerson"
contains <- "Private" # also just passed as argument to setRefClass
.personName <- "character"
initialize <- function(name) {
.self$.personName <- name
.self$greet()
}
greet <- function() {
cat(paste0("Hello, my name is ", .self$.personName, ".\n"))
}
})
ann <- PrivatePerson("Ann")
ann
stopifnot(inherits(try(ann$.personName, silent = TRUE), "try-error"))
ann$greet()
## ------------------------------------------------------------------------
removeClass("PrivatePerson")
PrivatePerson <- setRefClass(
Class = "PrivatePerson",
fields = list(.personName = "character"),
contains = "Private",
methods = list(
initialize = function(name) {
.self$.personName <- name
.self$greet()
},
greet = function() {
cat(paste0("Hello, my name is ", .self$.personName, ".\n"))
}
)
)
ann <- PrivatePerson("Ann")
ann
stopifnot(inherits(try(ann$.personName, silent = TRUE), "try-error"))
ann$greet()
|
/inst/doc/referenceClasses.R
|
no_license
|
wahani/aoos
|
R
| false
| false
| 1,629
|
r
|
## ------------------------------------------------------------------------
library(aoos)
Person <- defineRefClass({
Class <- "person" # this is the argument 'Class' in setRefClass
personName <- "character" # this is a field of class 'character'
initialize <- function(name) {
.self$personName <- name
.self$greet()
}
greet <- function() {
cat(paste0("Hello, my name is ", .self$personName, ".\n"))
}
})
ann <- Person("Ann")
ann
ann$personName
ann$personName <- "not Ann"
ann$greet()
## ------------------------------------------------------------------------
PrivatePerson <- defineRefClass({
Class <- "PrivatePerson"
contains <- "Private" # also just passed as argument to setRefClass
.personName <- "character"
initialize <- function(name) {
.self$.personName <- name
.self$greet()
}
greet <- function() {
cat(paste0("Hello, my name is ", .self$.personName, ".\n"))
}
})
ann <- PrivatePerson("Ann")
ann
stopifnot(inherits(try(ann$.personName, silent = TRUE), "try-error"))
ann$greet()
## ------------------------------------------------------------------------
removeClass("PrivatePerson")
PrivatePerson <- setRefClass(
Class = "PrivatePerson",
fields = list(.personName = "character"),
contains = "Private",
methods = list(
initialize = function(name) {
.self$.personName <- name
.self$greet()
},
greet = function() {
cat(paste0("Hello, my name is ", .self$.personName, ".\n"))
}
)
)
ann <- PrivatePerson("Ann")
ann
stopifnot(inherits(try(ann$.personName, silent = TRUE), "try-error"))
ann$greet()
|
source(here::here("code/packages.R"))
source(here::here("code/file_paths.R"))
source(here("code/programs/fn_getplotdata.R")) # will need this little function later and in other codes get_plot_data
dir.create(file.path(here("out")), showWarnings = FALSE)
dir.create(file.path(here("out", "analysis")), showWarnings = FALSE)
YY <- c("depression", "anxiety")
XX <- c("psoriasis", "eczema")
severity_results <- NULL
st <- Sys.time()
for(exposure in XX){
#exposure <- XX[1]
ABBRVexp <- substr(exposure, 1 , 3)
for (outcome in YY) {
#outcome = YY[1]
.dib(paste0(outcome, "~", exposure))
# load data ---------------------------------------------------------------
df_model <-
readRDS(paste0(
datapath,
"out/df_model",
ABBRVexp,
"_",
outcome,
".rds"
))
# load models -------------------------------------------------------------
mod4 <-
readRDS(
paste0(
datapath,
"out/models_data/",
ABBRVexp,
"_",
outcome,
"_mod4_severity_modeldata.rds"
)
)
mod4tab <- broom::tidy(mod4, conf.int = T, exponentiate = T, conf.level = 0.95)
mod4results <- mod4tab %>%
filter(str_detect(string = term, pattern = "^severity.")) %>%
mutate(Y = paste0(outcome), X = paste0(exposure)) %>%
dplyr::select(X, Y, term, estimate, conf.low, conf.high)
severity_results <- severity_results %>%
bind_rows(mod4results)
}
}
severity_results <- severity_results %>%
mutate_at("term", ~str_remove(., "^severity")) %>%
mutate_at(c("X", "Y"), ~str_to_title(.)) %>%
rename(severity = term,
exposure = X,
outcome = Y)
pd <- position_dodge(width = 0.3)
ybase <- -0.1 + severity_results$conf.low %>% min() %>% round(digits = 2)
yheight <- 0.1 + severity_results$conf.high %>% max() %>% round(digits = 2)
pdf(here::here("out/analysis/forest_plot2_severity.pdf"), width = 6, height = 4)
p1 <- ggplot(severity_results, aes(x = severity, y = estimate, ymin = conf.low, ymax = conf.high, group = outcome, colour = outcome)) +
geom_point(position = pd, size = 3, shape = 1) +
geom_errorbar(position = pd, width = 0.25) +
geom_hline(yintercept = 1, lty=2) +
#ylim(c(0,NA)) +
scale_y_log10(breaks=seq(0.5,2,0.1),position="left",limits=c(ybase, yheight)) +
scale_x_discrete(limits=rev) +
facet_grid(rows = vars(exposure), drop = TRUE, space = "free", scales = "free") +
coord_flip() +
guides(colour = guide_legend("Outcome")) +
labs(y = "Hazard ratio", x = "Exposure severity") +
scale_alpha_identity() +
theme_bw() +
theme(strip.background = element_blank(),
strip.text = element_text(face = "bold"),
legend.position = "bottom")
print(p1)
dev.off()
make_gt_results <- function(exposure){
#exposure <- XX[1]
ABBRVexp <- substr(exposure, 1 , 3)
# load data ---------------------------------------------------------------
df_model_anx <-
readRDS(paste0(
datapath,
"out/df_model",
ABBRVexp,
"_anxiety.rds"
))
df_model_dep <-
readRDS(paste0(
datapath,
"out/df_model",
ABBRVexp,
"_depression.rds"
))
# load models -------------------------------------------------------------
mod4_dep <-
readRDS(
paste0(
datapath,
"out/models_data/",
ABBRVexp,
"_depression_mod4_severity_modeldata.rds"
)
)
mod4_anx <-
readRDS(
paste0(
datapath,
"out/models_data/",
ABBRVexp,
"_anxiety_mod4_severity_modeldata.rds"
)
)
# severity table with n, events, p-years --------------------------------
get_data_info <- function(model, data) {
vars <- attr(terms(model), "term.labels")
vars <- vars[1:length(vars) - 1]
df <- data %>%
dplyr::select(setid, patid, gender, age, pracid, out, all_of(vars), t) %>%
distinct(setid, patid, .keep_all = TRUE) %>%
drop_na()
dfsum <- df %>%
group_by(severity) %>%
summarise(n = n(),
nevents = sum(out),
pyars_mil = sum(t) / 1e6) %>%
ungroup()
# dfsum %>%
# summarise_at(-1, ~sum(.)) %>%
# bind_cols(exposed = "Total") %>%
# bind_rows(dfsum)
dfsum
}
mod4_desc_anx <- get_data_info(mod4_anx, df_model_anx)
mod4_desc_dep <- get_data_info(mod4_dep, df_model_dep)
# Get CIs and p-values ----------------------------------------------------
getP <- function(model,
sigF = 3,
ci_level = 0.95) {
model_sum <- summary(model, conf.int = ci_level)
modelrownames <- rownames(model_sum$coefficients)
sev_names <- modelrownames[str_detect(modelrownames, "severity")]
pval <- model_sum$coefficients[sev_names, 5] %>% signif(digits = 1)
pvalout <- vector()
for(ii in 1:length(pval)){
if (pval[ii] < 0.0001) {
pvalout[ii] <- "***"
} else if (pval[ii] < 0.001) {
pvalout[ii] <- "**"
} else if (pval[ii] < 0.01) {
pvalout[ii] <- "*"
} else {
pvalout[ii] <- paste0(pval[ii])
}
}
pvalout
}
getCI <- function(model,
sigF = 3,
ci_level = 0.95) {
model_sum <- summary(model, conf.int = ci_level)
modelrownames <- rownames(model_sum$coefficients)
sev_names <- modelrownames[str_detect(modelrownames, "severity")]
ciout <- vector()
for(ii in 1:length(sev_names)){
ciout[ii] <- paste0(signif(model_sum$conf.int[ii, 3], sigF),
"-",
signif(model_sum$conf.int[ii, 4], sigF))
}
ciout
}
models_list <- list(mod4_anx, mod4_dep)
p_values <- sapply(models_list, getP)
ci_values <- sapply(models_list, getCI)
colnames(ci_values) <- colnames(p_values) <- c("mod4_anx", "mod4_dep")
# regression table ---------------------------------------------------------
sev_levels <- unique(df_model_anx$severity) %>% as.character()
sev_levels <- sev_levels[sev_levels != "None"]
n_levels <- length(sev_levels)
#col1 severity
char <- c(" ",
" Unexposed",
paste0(" ", str_to_title(sev_levels)),
" ",
" Unexposed",
paste0(" ", str_to_title(sev_levels)))
#col2 outcome
out <- c("Anxiety", rep(" ", n_levels+1), "Depression", rep(" ", n_levels+1))
#col3 N
colN <- c(
" ",
prettyNum(mod4_desc_anx$n, big.mark = ","),
" ",
prettyNum(mod4_desc_dep$n, big.mark = ",")
)
colevents <- c(
" ",
paste0(prettyNum(mod4_desc_anx$nevents, big.mark = ","), "/", prettyNum(mod4_desc_anx$pyars_mil, digits = 3, big.mark = ",")),
" ",
paste0(prettyNum(mod4_desc_dep$nevents, big.mark = ","), "/", prettyNum(mod4_desc_dep$pyars_mil, digits = 3, big.mark = ","))
)
#col7 HR (model 1)
mod4HR <-
c(
" ",
"Ref",
prettyNum(
exp(mod4_anx$coefficients[1:n_levels]),
digits = 3,
big.mark = ","
),
" ",
"Ref",
prettyNum(
exp(mod4_dep$coefficients[1:n_levels]),
digits = 3,
big.mark = ","
)
)
#col8 CI (model 1)
mod4Ci <- c(" ", "-", ci_values[,1], " ", "-", ci_values[,2])
#col9 p (model 1)
mod4P <- c(" ", "-", p_values[,1], " ", "-", p_values[,2])
out_table <-
tibble(
characteristic = char,
outcome = out,
n = colN,
events = colevents,
hr4 = mod4HR,
ci4 = mod4Ci,
p4 = mod4P
)
gt::gt(out_table) %>%
gt::cols_align(columns = 3:dim(out_table)[2], align = "right")
}
pso_table <- make_gt_results(XX[1])
ecz_table <- make_gt_results(XX[2])
tab1 <- ecz_table$`_data`
tab2 <- pso_table$`_data`
tab3 <- bind_rows(tab1, tab2)
tab3_out <- tab3 %>%
gt() %>%
tab_row_group(
label = md("**Atopic eczema**"),
rows = 1:10
) %>%
tab_row_group(
label = md("**Psoriasis**"),
rows = 11:18
) %>%
row_group_order(c("**Atopic eczema**", "**Psoriasis**")) %>%
cols_align(columns = 3:dim(tab3)[2], align = "right") %>%
cols_label(
characteristic = md("**Exposure severity**"),
outcome = md("**Event**"),
n = md("**N (total)**"),
events = md("**No. events/person-years (mil)**"),
hr4 = md("**HR**"),
ci4 = md("**95% CI**"),
p4 = md("***p***")
) %>%
tab_spanner(
label = md("**Severity model**"),
columns = 5:7
) %>%
tab_style(
style = cell_text(weight = "bold"),
locations = cells_body(columns = outcome)
) %>%
tab_footnote(
footnote = "HR: Hazard ratio, CI: Confidence Interval",
locations = cells_column_labels(
columns = c(hr4, ci4))) %>%
tab_footnote(
footnote = "Additionally adjusted for calendar period and comorbidities",
locations = cells_column_spanners(
spanners = "**Severity model**")) %>%
tab_footnote(
footnote = "***: p<0.0001",
locations = cells_column_labels(
columns = c(p4)))
tab3_out
tab3_out %>%
gt::gtsave(
filename = paste0("tab7_regressionseverity.html"),
path = here::here("out/tables")
)
# make forest plot with HRs printed ---------------------------------------
get_n_ecz <- tab1 %>%
dplyr::select(characteristic, starts_with("n")) %>%
filter(n != " ") %>%
mutate_all(~str_remove_all(.,",")) %>%
mutate_at("n", as.numeric) %>%
mutate(outcome = c(rep("Anxiety",4), rep("Depression",4))) %>%
mutate_if(is.character, ~str_remove_all(., " ")) %>%
mutate(exposure = "Eczema")
get_n_pso <- tab2 %>%
dplyr::select(characteristic, starts_with("n")) %>%
filter(n != " ") %>%
mutate_all(~str_remove_all(.,",")) %>%
mutate_at("n", as.numeric) %>%
mutate(outcome = c(rep("Anxiety",3), rep("Depression",3))) %>%
mutate_if(is.character, ~str_remove_all(., " ")) %>%
mutate(exposure = "Psoriasis")
plot_df <- get_n_pso %>%
bind_rows(get_n_ecz) %>%
left_join(severity_results, by = c("outcome", "exposure", "characteristic" = "severity")) %>%
drop_na()
# convert results to string and pad so they have same width for printing on plots
plot_df$text_hr <- str_pad(round(plot_df$estimate,2), 4, pad = "0", side = "right")
plot_df$text_ciL <- str_pad(round(plot_df$conf.low,2), 4, pad = "0", side = "right")
plot_df$text_ciU <- str_pad(round(plot_df$conf.high,2), 4, pad = "0", side = "right")
plot_df$text_n <- prettyNum(plot_df$n, big.mark = ",")
plot_df$text_to_plot <- str_pad(paste0("(",
plot_df$text_n,
") ",
plot_df$text_hr,
" [",
plot_df$text_ciL,
",",
plot_df$text_ciU,
"]"),
28, pad = " ", side = "left")
pdf(here::here("out/analysis/forest_plot2_severity_v2.pdf"), width = 7.5, height = 4.5)
pd <- position_dodge(width = 0.75)
p1 <- ggplot(plot_df, aes(y = characteristic, x = estimate, xmin = conf.low, xmax = conf.high, group = outcome, colour = outcome, label = text_to_plot)) +
geom_point(position = pd, size = 3, shape = 1) +
geom_errorbar(position = pd, width = 0.25) +
geom_vline(xintercept = 1, lty=2, col = alpha(1,0.4)) +
geom_text(aes(x = 0.95),
alpha = 1,
position = pd,
size = 3.6,
#colour = 1,
show.legend = FALSE,
hjust = 1) +
scale_x_log10(breaks=seq(0.5,2,0.1),limits=c(0.8,NA)) +
scale_y_discrete(limits=rev) +
facet_grid(rows = vars(exposure), drop = TRUE, space = "free", scales = "free") +
guides(colour = guide_legend("Outcome"),
alpha = "none") +
labs(x = "Hazard ratio (95% CI)", y = "Exposure severity", caption = "(n) HR [95% CI]") +
theme_ali() +
theme(strip.background = element_blank(),
strip.text = element_text(face = "bold"),
legend.position = "bottom")
print(p1)
dev.off()
## save as jpeg for Word compatibility
jpeg(here::here("out/analysis/forest_plot2_severity_v2.jpg"), width = 7.5, height = 4.5, units = "in", res = 800)
print(p1)
dev.off()
|
/code/analysis/05b_analyse_severity.R
|
no_license
|
hendersonad/2021_skinCMDs
|
R
| false
| false
| 12,239
|
r
|
source(here::here("code/packages.R"))
source(here::here("code/file_paths.R"))
source(here("code/programs/fn_getplotdata.R")) # will need this little function later and in other codes get_plot_data
dir.create(file.path(here("out")), showWarnings = FALSE)
dir.create(file.path(here("out", "analysis")), showWarnings = FALSE)
YY <- c("depression", "anxiety")
XX <- c("psoriasis", "eczema")
severity_results <- NULL
st <- Sys.time()
for(exposure in XX){
#exposure <- XX[1]
ABBRVexp <- substr(exposure, 1 , 3)
for (outcome in YY) {
#outcome = YY[1]
.dib(paste0(outcome, "~", exposure))
# load data ---------------------------------------------------------------
df_model <-
readRDS(paste0(
datapath,
"out/df_model",
ABBRVexp,
"_",
outcome,
".rds"
))
# load models -------------------------------------------------------------
mod4 <-
readRDS(
paste0(
datapath,
"out/models_data/",
ABBRVexp,
"_",
outcome,
"_mod4_severity_modeldata.rds"
)
)
mod4tab <- broom::tidy(mod4, conf.int = T, exponentiate = T, conf.level = 0.95)
mod4results <- mod4tab %>%
filter(str_detect(string = term, pattern = "^severity.")) %>%
mutate(Y = paste0(outcome), X = paste0(exposure)) %>%
dplyr::select(X, Y, term, estimate, conf.low, conf.high)
severity_results <- severity_results %>%
bind_rows(mod4results)
}
}
severity_results <- severity_results %>%
mutate_at("term", ~str_remove(., "^severity")) %>%
mutate_at(c("X", "Y"), ~str_to_title(.)) %>%
rename(severity = term,
exposure = X,
outcome = Y)
pd <- position_dodge(width = 0.3)
ybase <- -0.1 + severity_results$conf.low %>% min() %>% round(digits = 2)
yheight <- 0.1 + severity_results$conf.high %>% max() %>% round(digits = 2)
pdf(here::here("out/analysis/forest_plot2_severity.pdf"), width = 6, height = 4)
p1 <- ggplot(severity_results, aes(x = severity, y = estimate, ymin = conf.low, ymax = conf.high, group = outcome, colour = outcome)) +
geom_point(position = pd, size = 3, shape = 1) +
geom_errorbar(position = pd, width = 0.25) +
geom_hline(yintercept = 1, lty=2) +
#ylim(c(0,NA)) +
scale_y_log10(breaks=seq(0.5,2,0.1),position="left",limits=c(ybase, yheight)) +
scale_x_discrete(limits=rev) +
facet_grid(rows = vars(exposure), drop = TRUE, space = "free", scales = "free") +
coord_flip() +
guides(colour = guide_legend("Outcome")) +
labs(y = "Hazard ratio", x = "Exposure severity") +
scale_alpha_identity() +
theme_bw() +
theme(strip.background = element_blank(),
strip.text = element_text(face = "bold"),
legend.position = "bottom")
print(p1)
dev.off()
make_gt_results <- function(exposure){
#exposure <- XX[1]
ABBRVexp <- substr(exposure, 1 , 3)
# load data ---------------------------------------------------------------
df_model_anx <-
readRDS(paste0(
datapath,
"out/df_model",
ABBRVexp,
"_anxiety.rds"
))
df_model_dep <-
readRDS(paste0(
datapath,
"out/df_model",
ABBRVexp,
"_depression.rds"
))
# load models -------------------------------------------------------------
mod4_dep <-
readRDS(
paste0(
datapath,
"out/models_data/",
ABBRVexp,
"_depression_mod4_severity_modeldata.rds"
)
)
mod4_anx <-
readRDS(
paste0(
datapath,
"out/models_data/",
ABBRVexp,
"_anxiety_mod4_severity_modeldata.rds"
)
)
# severity table with n, events, p-years --------------------------------
get_data_info <- function(model, data) {
vars <- attr(terms(model), "term.labels")
vars <- vars[1:length(vars) - 1]
df <- data %>%
dplyr::select(setid, patid, gender, age, pracid, out, all_of(vars), t) %>%
distinct(setid, patid, .keep_all = TRUE) %>%
drop_na()
dfsum <- df %>%
group_by(severity) %>%
summarise(n = n(),
nevents = sum(out),
pyars_mil = sum(t) / 1e6) %>%
ungroup()
# dfsum %>%
# summarise_at(-1, ~sum(.)) %>%
# bind_cols(exposed = "Total") %>%
# bind_rows(dfsum)
dfsum
}
mod4_desc_anx <- get_data_info(mod4_anx, df_model_anx)
mod4_desc_dep <- get_data_info(mod4_dep, df_model_dep)
# Get CIs and p-values ----------------------------------------------------
getP <- function(model,
sigF = 3,
ci_level = 0.95) {
model_sum <- summary(model, conf.int = ci_level)
modelrownames <- rownames(model_sum$coefficients)
sev_names <- modelrownames[str_detect(modelrownames, "severity")]
pval <- model_sum$coefficients[sev_names, 5] %>% signif(digits = 1)
pvalout <- vector()
for(ii in 1:length(pval)){
if (pval[ii] < 0.0001) {
pvalout[ii] <- "***"
} else if (pval[ii] < 0.001) {
pvalout[ii] <- "**"
} else if (pval[ii] < 0.01) {
pvalout[ii] <- "*"
} else {
pvalout[ii] <- paste0(pval[ii])
}
}
pvalout
}
getCI <- function(model,
sigF = 3,
ci_level = 0.95) {
model_sum <- summary(model, conf.int = ci_level)
modelrownames <- rownames(model_sum$coefficients)
sev_names <- modelrownames[str_detect(modelrownames, "severity")]
ciout <- vector()
for(ii in 1:length(sev_names)){
ciout[ii] <- paste0(signif(model_sum$conf.int[ii, 3], sigF),
"-",
signif(model_sum$conf.int[ii, 4], sigF))
}
ciout
}
models_list <- list(mod4_anx, mod4_dep)
p_values <- sapply(models_list, getP)
ci_values <- sapply(models_list, getCI)
colnames(ci_values) <- colnames(p_values) <- c("mod4_anx", "mod4_dep")
# regression table ---------------------------------------------------------
sev_levels <- unique(df_model_anx$severity) %>% as.character()
sev_levels <- sev_levels[sev_levels != "None"]
n_levels <- length(sev_levels)
#col1 severity
char <- c(" ",
" Unexposed",
paste0(" ", str_to_title(sev_levels)),
" ",
" Unexposed",
paste0(" ", str_to_title(sev_levels)))
#col2 outcome
out <- c("Anxiety", rep(" ", n_levels+1), "Depression", rep(" ", n_levels+1))
#col3 N
colN <- c(
" ",
prettyNum(mod4_desc_anx$n, big.mark = ","),
" ",
prettyNum(mod4_desc_dep$n, big.mark = ",")
)
colevents <- c(
" ",
paste0(prettyNum(mod4_desc_anx$nevents, big.mark = ","), "/", prettyNum(mod4_desc_anx$pyars_mil, digits = 3, big.mark = ",")),
" ",
paste0(prettyNum(mod4_desc_dep$nevents, big.mark = ","), "/", prettyNum(mod4_desc_dep$pyars_mil, digits = 3, big.mark = ","))
)
#col7 HR (model 1)
mod4HR <-
c(
" ",
"Ref",
prettyNum(
exp(mod4_anx$coefficients[1:n_levels]),
digits = 3,
big.mark = ","
),
" ",
"Ref",
prettyNum(
exp(mod4_dep$coefficients[1:n_levels]),
digits = 3,
big.mark = ","
)
)
#col8 CI (model 1)
mod4Ci <- c(" ", "-", ci_values[,1], " ", "-", ci_values[,2])
#col9 p (model 1)
mod4P <- c(" ", "-", p_values[,1], " ", "-", p_values[,2])
out_table <-
tibble(
characteristic = char,
outcome = out,
n = colN,
events = colevents,
hr4 = mod4HR,
ci4 = mod4Ci,
p4 = mod4P
)
gt::gt(out_table) %>%
gt::cols_align(columns = 3:dim(out_table)[2], align = "right")
}
pso_table <- make_gt_results(XX[1])
ecz_table <- make_gt_results(XX[2])
tab1 <- ecz_table$`_data`
tab2 <- pso_table$`_data`
tab3 <- bind_rows(tab1, tab2)
tab3_out <- tab3 %>%
gt() %>%
tab_row_group(
label = md("**Atopic eczema**"),
rows = 1:10
) %>%
tab_row_group(
label = md("**Psoriasis**"),
rows = 11:18
) %>%
row_group_order(c("**Atopic eczema**", "**Psoriasis**")) %>%
cols_align(columns = 3:dim(tab3)[2], align = "right") %>%
cols_label(
characteristic = md("**Exposure severity**"),
outcome = md("**Event**"),
n = md("**N (total)**"),
events = md("**No. events/person-years (mil)**"),
hr4 = md("**HR**"),
ci4 = md("**95% CI**"),
p4 = md("***p***")
) %>%
tab_spanner(
label = md("**Severity model**"),
columns = 5:7
) %>%
tab_style(
style = cell_text(weight = "bold"),
locations = cells_body(columns = outcome)
) %>%
tab_footnote(
footnote = "HR: Hazard ratio, CI: Confidence Interval",
locations = cells_column_labels(
columns = c(hr4, ci4))) %>%
tab_footnote(
footnote = "Additionally adjusted for calendar period and comorbidities",
locations = cells_column_spanners(
spanners = "**Severity model**")) %>%
tab_footnote(
footnote = "***: p<0.0001",
locations = cells_column_labels(
columns = c(p4)))
tab3_out
tab3_out %>%
gt::gtsave(
filename = paste0("tab7_regressionseverity.html"),
path = here::here("out/tables")
)
# make forest plot with HRs printed ---------------------------------------
get_n_ecz <- tab1 %>%
dplyr::select(characteristic, starts_with("n")) %>%
filter(n != " ") %>%
mutate_all(~str_remove_all(.,",")) %>%
mutate_at("n", as.numeric) %>%
mutate(outcome = c(rep("Anxiety",4), rep("Depression",4))) %>%
mutate_if(is.character, ~str_remove_all(., " ")) %>%
mutate(exposure = "Eczema")
get_n_pso <- tab2 %>%
dplyr::select(characteristic, starts_with("n")) %>%
filter(n != " ") %>%
mutate_all(~str_remove_all(.,",")) %>%
mutate_at("n", as.numeric) %>%
mutate(outcome = c(rep("Anxiety",3), rep("Depression",3))) %>%
mutate_if(is.character, ~str_remove_all(., " ")) %>%
mutate(exposure = "Psoriasis")
plot_df <- get_n_pso %>%
bind_rows(get_n_ecz) %>%
left_join(severity_results, by = c("outcome", "exposure", "characteristic" = "severity")) %>%
drop_na()
# convert results to string and pad so they have same width for printing on plots
plot_df$text_hr <- str_pad(round(plot_df$estimate,2), 4, pad = "0", side = "right")
plot_df$text_ciL <- str_pad(round(plot_df$conf.low,2), 4, pad = "0", side = "right")
plot_df$text_ciU <- str_pad(round(plot_df$conf.high,2), 4, pad = "0", side = "right")
plot_df$text_n <- prettyNum(plot_df$n, big.mark = ",")
plot_df$text_to_plot <- str_pad(paste0("(",
plot_df$text_n,
") ",
plot_df$text_hr,
" [",
plot_df$text_ciL,
",",
plot_df$text_ciU,
"]"),
28, pad = " ", side = "left")
pdf(here::here("out/analysis/forest_plot2_severity_v2.pdf"), width = 7.5, height = 4.5)
pd <- position_dodge(width = 0.75)
p1 <- ggplot(plot_df, aes(y = characteristic, x = estimate, xmin = conf.low, xmax = conf.high, group = outcome, colour = outcome, label = text_to_plot)) +
geom_point(position = pd, size = 3, shape = 1) +
geom_errorbar(position = pd, width = 0.25) +
geom_vline(xintercept = 1, lty=2, col = alpha(1,0.4)) +
geom_text(aes(x = 0.95),
alpha = 1,
position = pd,
size = 3.6,
#colour = 1,
show.legend = FALSE,
hjust = 1) +
scale_x_log10(breaks=seq(0.5,2,0.1),limits=c(0.8,NA)) +
scale_y_discrete(limits=rev) +
facet_grid(rows = vars(exposure), drop = TRUE, space = "free", scales = "free") +
guides(colour = guide_legend("Outcome"),
alpha = "none") +
labs(x = "Hazard ratio (95% CI)", y = "Exposure severity", caption = "(n) HR [95% CI]") +
theme_ali() +
theme(strip.background = element_blank(),
strip.text = element_text(face = "bold"),
legend.position = "bottom")
print(p1)
dev.off()
## save as jpeg for Word compatibility
jpeg(here::here("out/analysis/forest_plot2_severity_v2.jpg"), width = 7.5, height = 4.5, units = "in", res = 800)
print(p1)
dev.off()
|
###############
# 3^5 Projected Change Analyses - drought plotter
library(ncdf4) # loading necessary libraries and extra functions
library(maps)
library(fields)
library(sp)
source("analysisfunctions.R")
##############
# User supplied inputs
varname = "SPEIlow" # short name for the variable of interest options include tasmax, tasmin, pr, tmax95, tmax100, tmin32, tmin28, pr25, and pr50
varin = varname # don't change this
#noleap=TRUE # if your data has leap days change this to FALSE, otherwise leave it alone. Should be false for 3^5.
if(varname=="SPIlow" | varname=="SPIhigh") varin="SPI" # for the tmax95 and other threshold variables, you need to use the base variable and calculate the number of days matching the threshold.
if(varname=="SPEIlow" | varname=="SPEIlow") varin="SPEI"
scale = 1 # monthly scale for SPI and SPEI
difftype="absolute" # type of difference to take, can be either absolute or percent
applymask = NA # if you don't want to apply a state mask, leave this alone
colorchoicediff = "greentobrown" # colorramps for difference plots, choices include "bluetored","redtoblue","browntogreen","greentobrown"
BINLIMIT = 30 # maximum number of color bins allowed for plotting the projected changes
appfunc = "sum" # which functions do you apply for yearly calculations? "mean" is used for getting annual average temps for instance. "sum" would be used for annual total rainfall and thresholds
# for precipitation and all the threshold functions this should be "sum", otherwise use "mean"
TC = TRUE # Threshold calculator - should this be calculating a threshold? TRUE (calculate threshold) or FALSE(don't calculate threshold)
TH = 0 # Threshold value - what's the threshold the script should calculate for?
cond = "lte" # Threshold condition - "gte" = greater than or equal to, "lte" = less than or equal to, "gt" = greater than, "lt"= less than
# threshold value and threshold condition are ignored if TC=FALSE
# should be using these to calculate tmax95 and the others, temperature thresholds should be supplied in degrees K, precipitation thresholds in mm
# thresholds for each of the following are
# SPIlow, SPEIlow: -1.5 - SPEI, SPI values less than or equal to -1.5
# SPIhigh, SPEIhigh: 1.5 - SPEI, SPI values greater than or equal to 1.5
########################################################
# DON'T CHANGE ANYTHING BELOW THIS LINE!!!
###########
# 1. Data Gather and conversion
histfilelist = system(paste("ls /data2/3to5/I35/",varin,scale,"/EDQM/",varin,scale,"*historical*.nc",sep=""),intern=T)
projfilelist = system(paste("ls /data2/3to5/I35/",varin,scale,"/EDQM/",varin,scale,"*rcp*.nc",sep=""),intern=T)
filebreakdown = do.call(rbind,strsplit(projfilelist,"_",fixed=TRUE))
filebreakdown2 = do.call(rbind,strsplit(filebreakdown[,3],"-",fixed=TRUE))
filebreakdown3 = data.frame(filebreakdown[,1:2],filebreakdown2,filebreakdown[,4:7])
filebreakdown3$GCM = rep(c("CCSM4","MIROC5","MPI-ESM-LR"),each=9)
filebreakdown3$obs = rep(c("Daymet","Livneh","PRISM"),9)
filebreakdown3 = filebreakdown3[,-c(3,8,9)]
names(filebreakdown3) = c("var","tempres","DS","code","scen","experiment","GCM","obs")
projfilebreakdown = filebreakdown3
rm(filebreakdown3)
filebreakdown = do.call(rbind,strsplit(histfilelist,"_",fixed=TRUE))
filebreakdown2 = do.call(rbind,strsplit(filebreakdown[,3],"-",fixed=TRUE))
filebreakdown3 = data.frame(filebreakdown[,1:2],filebreakdown2,filebreakdown[,4:7])
filebreakdown3$GCM = rep(c("CCSM4","MIROC5","MPI-ESM-LR"),each=3)
filebreakdown3$obs = rep(c("Daymet","Livneh","PRISM"),3)
filebreakdown3 = filebreakdown3[,-c(3,8,9)]
names(filebreakdown3) = c("var","tempres","DS","code","scen","experiment","GCM","obs")
histfilebreakdown = filebreakdown3
rm(filebreakdown3)
rm(filebreakdown2)
rm(filebreakdown)
dates = seq(as.Date("1981-01-15"),as.Date("2005-12-15"),by="month")
for(i in 1:length(histfilelist)){
ptm = proc.time()
message("Starting work on file ",histfilelist[i])
datesin = dates
test=nc_open(histfilelist[i])
vardata = ncvar_get(test,varin)
if(varname == "SPEIlow" | varname=="SPIlow") {
vardata=ifelse(vardata<=TH,1,0)
}
if(varname == "SPEIhigh" | varname=="SPIhigh") {
vardata=ifelse(vardata>=TH,1,0)
}
if(i==1){
lon = ncvar_get(test,"lon")
lat = ncvar_get(test,"lat")
histlist = array(NA,dim=c(length(lon),length(lat),length(histfilelist)))
}
nc_close(test)
years = unique(as.numeric(substr(dates,1,4)))
tmp = array(NA,dim=c(length(lon),length(lat),length(years)))
for(y in 1:length(years)){
yearidx2 = which(as.numeric(substr(dates,1,4))==years[y])
tmp[,,y]=apply(vardata[,,yearidx2],c(1,2),sum,na.rm=TRUE)
tmp[,,y] = ifelse(is.na(vardata[,,1])==FALSE,tmp[,,y],NA)
}
histlist[,,i] = apply(tmp,c(1,2),mean,na.rm=TRUE)
gc()
ptmend = proc.time()
message("Finished with file ",i," / ",length(histfilelist))
message("Time to complete gathering: ",ptmend[3]-ptm[3]," secs")
}
########
# Future data grab
dates = seq(as.Date("2006-01-15"),as.Date("2099-12-15"),by="month")
yearsused = 2044:2068
yearidx = which(as.numeric(substr(dates,1,4))>=2044 & as.numeric(substr(dates,1,4))<=2068)
for(i in 1:length(projfilelist)){
ptm = proc.time()
#if(noleap==TRUE) dates = dates[-which(substr(dates,6,10)=="02-29")]
datesin = dates
test=nc_open(projfilelist[i])
vardata = ncvar_get(test,varin,start=c(1,1,yearidx[1]),count=c(-1,-1,length(yearidx)))
if(varname == "SPEIlow" | varname=="SPIlow") {
vardata=ifelse(vardata<=TH,1,0)
}
if(varname == "SPEIhigh" | varname=="SPIhigh") {
vardata=ifelse(vardata>=TH,1,0)
}
if(i==1){
lon = ncvar_get(test,"lon")
lat = ncvar_get(test,"lat")
projlist = array(NA,dim=c(length(lon),length(lat),length(projfilelist)))
}
nc_close(test)
years = unique(as.numeric(substr(dates[yearidx],1,4)))
tmp = array(NA,dim=c(length(lon),length(lat),length(years)))
for(y in 1:length(years)){
yearidx2 = which(as.numeric(substr(dates[yearidx],1,4))==years[y])
tmp[,,y]=apply(vardata[,,yearidx2],c(1,2),sum,na.rm=TRUE)
tmp[,,y] = ifelse(is.na(vardata[,,1])==FALSE,tmp[,,y],NA)
}
projlist[,,i] = apply(tmp,c(1,2),mean,na.rm=TRUE)
gc()
ptmend = proc.time()
message("Finished with file ",i," / ",length(projfilelist))
message("Time to complete gathering: ",ptmend[3]-ptm[3]," secs")
}
######
# Difference Calcs
source("analysisfunctions.R")
diffs = array(NA,dim=dim(projlist))
for(i in 1:length(projfilelist)){
GCMin = projfilebreakdown$GCM[i]
obsin = projfilebreakdown$obs[i]
histidx = which(histfilebreakdown$GCM==GCMin & histfilebreakdown$obs==obsin)
diffs[,,i]=diffcalc(projlist[,,i],histlist[,,histidx],type=difftype)
}
#####
# Group by Emissions Scenario
diffsg1 = array(NA,dim=c(length(lon),length(lat),length(unique(projfilebreakdown$scen))))
scens = unique(projfilebreakdown$scen)
for(s in 1:length(scens)){
scenidx = which(projfilebreakdown$scen==scens[s])
diffsg1[,,s] = apply(diffs[,,scenidx],c(1,2),mean,na.rm=TRUE)
}
################
lon=lon-360
if(is.na(applymask)==FALSE){
diffs = statemask(diffs,inputlat=lat,inputlon=lon,state=applymask)
diffsg1 = statemask(diffsg1,inputlat=lat,inputlon=lon,state=applymask)
projlist = statemask(projlist,inputlat=lat,inputlon=lon,state=applymask)
histlist = statemask(histlist,inputlat=lat,inputlon=lon,state=applymask)
} else {
diffs = list(lon=lon,lat=lat,outputdata=diffs)
diffsg1 = list(lon=lon,lat=lat,outputdata=diffsg1)
projlist = list(lon=lon,lat=lat,outputdata=projlist)
histlist = list(lon=lon,lat=lat,outputdata=histlist)
}
################
# Plotting
diffcolorbar = colorramp(diffs[[3]],colorchoice=colorchoicediff,Blimit=BINLIMIT,type="difference")
diffs_sort = diffs[[3]][,,order(projfilebreakdown$scen)]
projfilebreakdown = projfilebreakdown[order(projfilebreakdown$scen),]
pdf(paste("IndividualMembers_",varname,scale,"_",difftype,".pdf",sep=""),onefile=TRUE,width=10,height=10)
par(mfrow=c(3,3))
for(i in 1:length(projfilelist)){
GCM = projfilebreakdown$GCM[i]
scen = projfilebreakdown$scen[i]
obs = projfilebreakdown$obs[i]
DS = projfilebreakdown$DS[i]
testsfc1 = list(x=diffs[[1]],y=diffs[[2]],z=diffs_sort[,,i])
surface(testsfc1,type="I",main=paste("Projected Difference from Historical Climate\nScen: ",scen," GCM: ",GCM," DS: ", DS," Obs: ",obs,sep=""),zlim=diffcolorbar[[1]],col=diffcolorbar[[3]],breaks=diffcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
}
dev.off()
scensin = scens[c(1,3)]
diffsg1_sort = diffsg1[[3]][,,c(1,3)]
pdf(paste("Group1_",varname,scale,"_",difftype,".pdf",sep=""),onefile=TRUE,width=10,height=5)
diffcolorbar = colorramp(diffsg1[[3]],colorchoice=colorchoicediff,Blimit=BINLIMIT,type="difference")
par(mfrow=c(1,2))
for(i in 1:length(scensin)){
testsfc1 = list(x=diffsg1[[1]],y=diffsg1[[2]],z=diffsg1_sort[,,i])
surface(testsfc1,type="I",main=paste("Projected Difference from Historical Climate\nScen: ",scensin[i],sep=""),zlim=diffcolorbar[[1]],col=diffcolorbar[[3]],breaks=diffcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
}
dev.off()
|
/old/analysis3to5_droughtplotter.R
|
no_license
|
amwootte/analysisscripts
|
R
| false
| false
| 9,107
|
r
|
###############
# 3^5 Projected Change Analyses - drought plotter
library(ncdf4) # loading necessary libraries and extra functions
library(maps)
library(fields)
library(sp)
source("analysisfunctions.R")
##############
# User supplied inputs
varname = "SPEIlow" # short name for the variable of interest options include tasmax, tasmin, pr, tmax95, tmax100, tmin32, tmin28, pr25, and pr50
varin = varname # don't change this
#noleap=TRUE # if your data has leap days change this to FALSE, otherwise leave it alone. Should be false for 3^5.
if(varname=="SPIlow" | varname=="SPIhigh") varin="SPI" # for the tmax95 and other threshold variables, you need to use the base variable and calculate the number of days matching the threshold.
if(varname=="SPEIlow" | varname=="SPEIlow") varin="SPEI"
scale = 1 # monthly scale for SPI and SPEI
difftype="absolute" # type of difference to take, can be either absolute or percent
applymask = NA # if you don't want to apply a state mask, leave this alone
colorchoicediff = "greentobrown" # colorramps for difference plots, choices include "bluetored","redtoblue","browntogreen","greentobrown"
BINLIMIT = 30 # maximum number of color bins allowed for plotting the projected changes
appfunc = "sum" # which functions do you apply for yearly calculations? "mean" is used for getting annual average temps for instance. "sum" would be used for annual total rainfall and thresholds
# for precipitation and all the threshold functions this should be "sum", otherwise use "mean"
TC = TRUE # Threshold calculator - should this be calculating a threshold? TRUE (calculate threshold) or FALSE(don't calculate threshold)
TH = 0 # Threshold value - what's the threshold the script should calculate for?
cond = "lte" # Threshold condition - "gte" = greater than or equal to, "lte" = less than or equal to, "gt" = greater than, "lt"= less than
# threshold value and threshold condition are ignored if TC=FALSE
# should be using these to calculate tmax95 and the others, temperature thresholds should be supplied in degrees K, precipitation thresholds in mm
# thresholds for each of the following are
# SPIlow, SPEIlow: -1.5 - SPEI, SPI values less than or equal to -1.5
# SPIhigh, SPEIhigh: 1.5 - SPEI, SPI values greater than or equal to 1.5
########################################################
# DON'T CHANGE ANYTHING BELOW THIS LINE!!!
###########
# 1. Data Gather and conversion
histfilelist = system(paste("ls /data2/3to5/I35/",varin,scale,"/EDQM/",varin,scale,"*historical*.nc",sep=""),intern=T)
projfilelist = system(paste("ls /data2/3to5/I35/",varin,scale,"/EDQM/",varin,scale,"*rcp*.nc",sep=""),intern=T)
filebreakdown = do.call(rbind,strsplit(projfilelist,"_",fixed=TRUE))
filebreakdown2 = do.call(rbind,strsplit(filebreakdown[,3],"-",fixed=TRUE))
filebreakdown3 = data.frame(filebreakdown[,1:2],filebreakdown2,filebreakdown[,4:7])
filebreakdown3$GCM = rep(c("CCSM4","MIROC5","MPI-ESM-LR"),each=9)
filebreakdown3$obs = rep(c("Daymet","Livneh","PRISM"),9)
filebreakdown3 = filebreakdown3[,-c(3,8,9)]
names(filebreakdown3) = c("var","tempres","DS","code","scen","experiment","GCM","obs")
projfilebreakdown = filebreakdown3
rm(filebreakdown3)
filebreakdown = do.call(rbind,strsplit(histfilelist,"_",fixed=TRUE))
filebreakdown2 = do.call(rbind,strsplit(filebreakdown[,3],"-",fixed=TRUE))
filebreakdown3 = data.frame(filebreakdown[,1:2],filebreakdown2,filebreakdown[,4:7])
filebreakdown3$GCM = rep(c("CCSM4","MIROC5","MPI-ESM-LR"),each=3)
filebreakdown3$obs = rep(c("Daymet","Livneh","PRISM"),3)
filebreakdown3 = filebreakdown3[,-c(3,8,9)]
names(filebreakdown3) = c("var","tempres","DS","code","scen","experiment","GCM","obs")
histfilebreakdown = filebreakdown3
rm(filebreakdown3)
rm(filebreakdown2)
rm(filebreakdown)
dates = seq(as.Date("1981-01-15"),as.Date("2005-12-15"),by="month")
for(i in 1:length(histfilelist)){
ptm = proc.time()
message("Starting work on file ",histfilelist[i])
datesin = dates
test=nc_open(histfilelist[i])
vardata = ncvar_get(test,varin)
if(varname == "SPEIlow" | varname=="SPIlow") {
vardata=ifelse(vardata<=TH,1,0)
}
if(varname == "SPEIhigh" | varname=="SPIhigh") {
vardata=ifelse(vardata>=TH,1,0)
}
if(i==1){
lon = ncvar_get(test,"lon")
lat = ncvar_get(test,"lat")
histlist = array(NA,dim=c(length(lon),length(lat),length(histfilelist)))
}
nc_close(test)
years = unique(as.numeric(substr(dates,1,4)))
tmp = array(NA,dim=c(length(lon),length(lat),length(years)))
for(y in 1:length(years)){
yearidx2 = which(as.numeric(substr(dates,1,4))==years[y])
tmp[,,y]=apply(vardata[,,yearidx2],c(1,2),sum,na.rm=TRUE)
tmp[,,y] = ifelse(is.na(vardata[,,1])==FALSE,tmp[,,y],NA)
}
histlist[,,i] = apply(tmp,c(1,2),mean,na.rm=TRUE)
gc()
ptmend = proc.time()
message("Finished with file ",i," / ",length(histfilelist))
message("Time to complete gathering: ",ptmend[3]-ptm[3]," secs")
}
########
# Future data grab
dates = seq(as.Date("2006-01-15"),as.Date("2099-12-15"),by="month")
yearsused = 2044:2068
yearidx = which(as.numeric(substr(dates,1,4))>=2044 & as.numeric(substr(dates,1,4))<=2068)
for(i in 1:length(projfilelist)){
ptm = proc.time()
#if(noleap==TRUE) dates = dates[-which(substr(dates,6,10)=="02-29")]
datesin = dates
test=nc_open(projfilelist[i])
vardata = ncvar_get(test,varin,start=c(1,1,yearidx[1]),count=c(-1,-1,length(yearidx)))
if(varname == "SPEIlow" | varname=="SPIlow") {
vardata=ifelse(vardata<=TH,1,0)
}
if(varname == "SPEIhigh" | varname=="SPIhigh") {
vardata=ifelse(vardata>=TH,1,0)
}
if(i==1){
lon = ncvar_get(test,"lon")
lat = ncvar_get(test,"lat")
projlist = array(NA,dim=c(length(lon),length(lat),length(projfilelist)))
}
nc_close(test)
years = unique(as.numeric(substr(dates[yearidx],1,4)))
tmp = array(NA,dim=c(length(lon),length(lat),length(years)))
for(y in 1:length(years)){
yearidx2 = which(as.numeric(substr(dates[yearidx],1,4))==years[y])
tmp[,,y]=apply(vardata[,,yearidx2],c(1,2),sum,na.rm=TRUE)
tmp[,,y] = ifelse(is.na(vardata[,,1])==FALSE,tmp[,,y],NA)
}
projlist[,,i] = apply(tmp,c(1,2),mean,na.rm=TRUE)
gc()
ptmend = proc.time()
message("Finished with file ",i," / ",length(projfilelist))
message("Time to complete gathering: ",ptmend[3]-ptm[3]," secs")
}
######
# Difference Calcs
source("analysisfunctions.R")
diffs = array(NA,dim=dim(projlist))
for(i in 1:length(projfilelist)){
GCMin = projfilebreakdown$GCM[i]
obsin = projfilebreakdown$obs[i]
histidx = which(histfilebreakdown$GCM==GCMin & histfilebreakdown$obs==obsin)
diffs[,,i]=diffcalc(projlist[,,i],histlist[,,histidx],type=difftype)
}
#####
# Group by Emissions Scenario
diffsg1 = array(NA,dim=c(length(lon),length(lat),length(unique(projfilebreakdown$scen))))
scens = unique(projfilebreakdown$scen)
for(s in 1:length(scens)){
scenidx = which(projfilebreakdown$scen==scens[s])
diffsg1[,,s] = apply(diffs[,,scenidx],c(1,2),mean,na.rm=TRUE)
}
################
lon=lon-360
if(is.na(applymask)==FALSE){
diffs = statemask(diffs,inputlat=lat,inputlon=lon,state=applymask)
diffsg1 = statemask(diffsg1,inputlat=lat,inputlon=lon,state=applymask)
projlist = statemask(projlist,inputlat=lat,inputlon=lon,state=applymask)
histlist = statemask(histlist,inputlat=lat,inputlon=lon,state=applymask)
} else {
diffs = list(lon=lon,lat=lat,outputdata=diffs)
diffsg1 = list(lon=lon,lat=lat,outputdata=diffsg1)
projlist = list(lon=lon,lat=lat,outputdata=projlist)
histlist = list(lon=lon,lat=lat,outputdata=histlist)
}
################
# Plotting
diffcolorbar = colorramp(diffs[[3]],colorchoice=colorchoicediff,Blimit=BINLIMIT,type="difference")
diffs_sort = diffs[[3]][,,order(projfilebreakdown$scen)]
projfilebreakdown = projfilebreakdown[order(projfilebreakdown$scen),]
pdf(paste("IndividualMembers_",varname,scale,"_",difftype,".pdf",sep=""),onefile=TRUE,width=10,height=10)
par(mfrow=c(3,3))
for(i in 1:length(projfilelist)){
GCM = projfilebreakdown$GCM[i]
scen = projfilebreakdown$scen[i]
obs = projfilebreakdown$obs[i]
DS = projfilebreakdown$DS[i]
testsfc1 = list(x=diffs[[1]],y=diffs[[2]],z=diffs_sort[,,i])
surface(testsfc1,type="I",main=paste("Projected Difference from Historical Climate\nScen: ",scen," GCM: ",GCM," DS: ", DS," Obs: ",obs,sep=""),zlim=diffcolorbar[[1]],col=diffcolorbar[[3]],breaks=diffcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
}
dev.off()
scensin = scens[c(1,3)]
diffsg1_sort = diffsg1[[3]][,,c(1,3)]
pdf(paste("Group1_",varname,scale,"_",difftype,".pdf",sep=""),onefile=TRUE,width=10,height=5)
diffcolorbar = colorramp(diffsg1[[3]],colorchoice=colorchoicediff,Blimit=BINLIMIT,type="difference")
par(mfrow=c(1,2))
for(i in 1:length(scensin)){
testsfc1 = list(x=diffsg1[[1]],y=diffsg1[[2]],z=diffsg1_sort[,,i])
surface(testsfc1,type="I",main=paste("Projected Difference from Historical Climate\nScen: ",scensin[i],sep=""),zlim=diffcolorbar[[1]],col=diffcolorbar[[3]],breaks=diffcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
}
dev.off()
|
### Version history:
# V02: changed to pair with functions_V03
setwd("/frazer01/projects/GTEx_v7/analysis/eqtls_deconvolution")
suppressMessages(source("analysis/cardiac_qtls_packages.R" ))
suppressMessages(source("analysis/cardiac_qtls_input_files.R" ))
suppressMessages(source("analysis/cardiac_qtls_functions.R" ))
suppressMessages(source("analysis/cardiac_qtls_input_data.R" ))
suppressMessages(source("analysis/cardiac_qtls_load_metadata.R" ))
option_list = list(make_option("--taskid" , type="integer" , default=0, help="SGE task ID" , metavar="character"),
make_option("--folder" , type="character", default=0, help="Analysis folder (from initialize_qtl_analysis)", metavar="character"),
make_option("--geneinfo_file", type="character", default=0, help="geneinfo file" , metavar="character"))
opt_parser = OptionParser(option_list=option_list)
opt = parse_args(opt_parser)
taskid = opt$taskid
infolder = opt$folder
geneinfo_file = opt$geneinfo_file
write_genotypes(taskid, infolder, geneinfo_file)
|
/qtls_run_divide_genotypes.R
|
no_license
|
Diennguyen8290/gtex_deconvolution
|
R
| false
| false
| 1,167
|
r
|
### Version history:
# V02: changed to pair with functions_V03
setwd("/frazer01/projects/GTEx_v7/analysis/eqtls_deconvolution")
suppressMessages(source("analysis/cardiac_qtls_packages.R" ))
suppressMessages(source("analysis/cardiac_qtls_input_files.R" ))
suppressMessages(source("analysis/cardiac_qtls_functions.R" ))
suppressMessages(source("analysis/cardiac_qtls_input_data.R" ))
suppressMessages(source("analysis/cardiac_qtls_load_metadata.R" ))
option_list = list(make_option("--taskid" , type="integer" , default=0, help="SGE task ID" , metavar="character"),
make_option("--folder" , type="character", default=0, help="Analysis folder (from initialize_qtl_analysis)", metavar="character"),
make_option("--geneinfo_file", type="character", default=0, help="geneinfo file" , metavar="character"))
opt_parser = OptionParser(option_list=option_list)
opt = parse_args(opt_parser)
taskid = opt$taskid
infolder = opt$folder
geneinfo_file = opt$geneinfo_file
write_genotypes(taskid, infolder, geneinfo_file)
|
library(PKconverter)
### Name: TwoComp_Volume_Exponent
### Title: Convert pharmacokinetic parameters for two compartment model
### Aliases: TwoComp_Volume_Exponent
### ** Examples
TwoComp_Volume_Exponent(V1=5,alpha=1.221, beta=0.029, k21=0.05,
V1.sd=0.01,alpha.sd=0.01,beta.sd=0.00005,k21.sd=0.0006)
|
/data/genthat_extracted_code/PKconverter/examples/TwoComp_Volume_Exponent.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 307
|
r
|
library(PKconverter)
### Name: TwoComp_Volume_Exponent
### Title: Convert pharmacokinetic parameters for two compartment model
### Aliases: TwoComp_Volume_Exponent
### ** Examples
TwoComp_Volume_Exponent(V1=5,alpha=1.221, beta=0.029, k21=0.05,
V1.sd=0.01,alpha.sd=0.01,beta.sd=0.00005,k21.sd=0.0006)
|
#' power.groupsByFactor
#'
#' Subsets a dataset by a factor then uses power.groups to calculate statistical power for each group
#'
#' @author James Hutchison
#' @param x Numerical vector from which the distribution parameters will be calculated using \code{fitdistr}
#' @param f Factor the same length as \code{x}, which is used to subset the data
#' @param change Mean of second group minus mean of first group (i.e. mu2-mu1) or percentage change in mu1 to create mu2 (depending on value of \code{change.type}).
#' @param change.type Whether the parameter change represents an additive (\code{"A"}) or percentage (\code{"M"}) change.
#' @param n1 Vector of sample sizes for group 1. Must be of same dimension as \code{n2}.
#' @param n2 Vector of sample sizes for group 2. Must be of same dimension as \code{n1}.
#' @param distribution The statistical distribution for the two groups. Can be either: \code{"Normal"}, \code{"Poisson"}, \code{"Lognormal"} or \code{"Negbin"}.
#' @param test Should be \code{"P"} for parametric or \code{"NP"} for non-parametric. See \code{?power.groups} for more details.
#' @param alternative A character string specifying the alternative hypothesis, must be one of \code{"two.sided"} (default), \code{"greater"} or \code{"less"}. You can specify just the initial letter. As an example, "less" means that the alternative is that the mean of the first group is less than the mean of the second.
#' @param alpha The type 1 error for assessing statistical significance (default is 0.05) in the power simulations.
#' @param nsims Number of repeat simulations to estimate power (default is 1000).
#' @param nreps Number of repeat permutations for randomisation test (default is 999).
#' @param target.power The desired power. This is not used in the calculation, but is passed into the resulting object and used to show the target line when the object is plotted.
#' @details This function splits \code{x} into subsets based on \code{f}, then runs \code{power.groups} on each subset. \code{pars1} and \code{pars2} for \code{power.groups} are automatically calculated based on \code{x} and \code{distribution} using basic summary stats (e.g. \code{mean}, \code{sd}) or using \code{MASS::fitdistr} if \code{distribution = "Negbin"}.
#' @return An object of class \code{powerByFactor}, which is essentially a list containing the power results for each level of \code{f}. The package includes methods to \code{plot() powerByFactor} objects.
#' @export
#'
power.groupsByFactor <- function(x, f=NULL, change, change.type, n1, n2, distribution, test, alternative="two.sided", alpha=0.05, nsims=1000, nreps=999, target.power=0.8) {
MASSLoaded <- require(MASS)
if(!isTRUE(MASSLoaded)) stop("Package 'MASS' could not be loaded. Is it installed?")
emonLoaded <- require(emon)
if(!isTRUE(emonLoaded)) stop("Package 'emon' could not be loaded. Is it installed?")
distrCheck <- distribution %in% c('Normal', 'Poisson', 'Lognormal', 'Negbin')
if(any(!distrCheck)) stop("distr must be one of 'Normal', 'Poisson', 'Lognormal' or 'Negbin'. Other values are not permitted.")
if (!is.null(f)) {
splitData <- split(x, f, drop=TRUE)
} else {
splitData <- list(Power=x)
}
if (any(sapply(splitData, length)==1)) {
if (any(sapply(splitData, length)>1)) {
warning(paste("The following factor levels have only one value and will not be used: ", paste(names(which(sapply(splitData, length)==1)), collapse=", ")))
splitData <- splitData[sapply(splitData, length) > 1]
} else {
stop("No level has more than one data value. Please check your data and re-run")
}
}
if (any(sapply(splitData, length)<=10)) {
warning(paste("The following factor levels have small data sets (10 values or fewer). Results will still be calculated but should be interpreted with caution: ", paste(names(which(sapply(splitData, length)<=10)), collapse=", ")))
}
if (distribution == "Normal") {
pars_1.1 <- lapply(splitData, mean)
pars_1.2 <- lapply(splitData, sd)
pars_2 <- pars_1.2
pars_1 <- mapply(c, pars_1.1, pars_1.2, SIMPLIFY = FALSE)
} else if (distribution == "Poisson") {
pars_1 <- lapply(splitData, mean)
pars_2 <- NULL
} else if (distribution == "Lognormal") {
if (any(sapply(splitData, function(x) 0 %in% x))) {
stop(paste("The following levels contain 0s, so cannot be used with the lognormal distribution. Please correct the data then re-run: ", paste(names(which(sapply(splitData, function(x) 0 %in% x))), collapse=", ")))
}
splitDataLog <- lapply(splitData, log)
pars_1.1 <- lapply(splitDataLog, mean)
pars_1.2 <- lapply(splitDataLog, sd)
pars_2 <- pars_1.2
pars_1 <- mapply(c, pars_1.1, pars_1.2, SIMPLIFY = FALSE)
} else if (distribution == "Negbin") {
NegbinPars <- function(x) {fitdistr(x, "Negative Binomial")$estimate}
pars_1 <- lapply(splitData, FUN = function(x) NegbinPars(x)[c(2,1)])
if (change.type == "M") {
pars_2 <- lapply(splitData, FUN = function(x, y = change) {
size2.samevar(NegbinPars(x)[2], NegbinPars(x)[2]*(100+y)/100, NegbinPars(x)[1])
})
} else if (change.type == "A") {
pars_2 <- lapply(splitData, FUN = function(x, y = change) {
size2.samevar(NegbinPars(x)[2], NegbinPars(x)[2]*(100+y)/100, NegbinPars(x)[1])
})
}
} else stop("distr must be one of 'Normal', 'Poisson', 'Lognormal' or 'Negbin'")
result <- mapply(power.groups, pars1 = pars_1, pars2 = pars_2, MoreArgs = list(change = change, change.type = change.type, n1 = n1, n2 = n2, distribution = distribution, test = test, alternative = alternative, alpha = alpha, nsims = nsims, nreps = nreps), SIMPLIFY = FALSE)
output <- list(result = result, nSample = n2, alpha = alpha, target.power = target.power)
class(output) <- "powerByFactor"
return(output)
}
|
/R/power.groupsByFactor_function.r
|
no_license
|
hutchisonjtw/JNCCTools
|
R
| false
| false
| 5,833
|
r
|
#' power.groupsByFactor
#'
#' Subsets a dataset by a factor then uses power.groups to calculate statistical power for each group
#'
#' @author James Hutchison
#' @param x Numerical vector from which the distribution parameters will be calculated using \code{fitdistr}
#' @param f Factor the same length as \code{x}, which is used to subset the data
#' @param change Mean of second group minus mean of first group (i.e. mu2-mu1) or percentage change in mu1 to create mu2 (depending on value of \code{change.type}).
#' @param change.type Whether the parameter change represents an additive (\code{"A"}) or percentage (\code{"M"}) change.
#' @param n1 Vector of sample sizes for group 1. Must be of same dimension as \code{n2}.
#' @param n2 Vector of sample sizes for group 2. Must be of same dimension as \code{n1}.
#' @param distribution The statistical distribution for the two groups. Can be either: \code{"Normal"}, \code{"Poisson"}, \code{"Lognormal"} or \code{"Negbin"}.
#' @param test Should be \code{"P"} for parametric or \code{"NP"} for non-parametric. See \code{?power.groups} for more details.
#' @param alternative A character string specifying the alternative hypothesis, must be one of \code{"two.sided"} (default), \code{"greater"} or \code{"less"}. You can specify just the initial letter. As an example, "less" means that the alternative is that the mean of the first group is less than the mean of the second.
#' @param alpha The type 1 error for assessing statistical significance (default is 0.05) in the power simulations.
#' @param nsims Number of repeat simulations to estimate power (default is 1000).
#' @param nreps Number of repeat permutations for randomisation test (default is 999).
#' @param target.power The desired power. This is not used in the calculation, but is passed into the resulting object and used to show the target line when the object is plotted.
#' @details This function splits \code{x} into subsets based on \code{f}, then runs \code{power.groups} on each subset. \code{pars1} and \code{pars2} for \code{power.groups} are automatically calculated based on \code{x} and \code{distribution} using basic summary stats (e.g. \code{mean}, \code{sd}) or using \code{MASS::fitdistr} if \code{distribution = "Negbin"}.
#' @return An object of class \code{powerByFactor}, which is essentially a list containing the power results for each level of \code{f}. The package includes methods to \code{plot() powerByFactor} objects.
#' @export
#'
power.groupsByFactor <- function(x, f=NULL, change, change.type, n1, n2, distribution, test, alternative="two.sided", alpha=0.05, nsims=1000, nreps=999, target.power=0.8) {
MASSLoaded <- require(MASS)
if(!isTRUE(MASSLoaded)) stop("Package 'MASS' could not be loaded. Is it installed?")
emonLoaded <- require(emon)
if(!isTRUE(emonLoaded)) stop("Package 'emon' could not be loaded. Is it installed?")
distrCheck <- distribution %in% c('Normal', 'Poisson', 'Lognormal', 'Negbin')
if(any(!distrCheck)) stop("distr must be one of 'Normal', 'Poisson', 'Lognormal' or 'Negbin'. Other values are not permitted.")
if (!is.null(f)) {
splitData <- split(x, f, drop=TRUE)
} else {
splitData <- list(Power=x)
}
if (any(sapply(splitData, length)==1)) {
if (any(sapply(splitData, length)>1)) {
warning(paste("The following factor levels have only one value and will not be used: ", paste(names(which(sapply(splitData, length)==1)), collapse=", ")))
splitData <- splitData[sapply(splitData, length) > 1]
} else {
stop("No level has more than one data value. Please check your data and re-run")
}
}
if (any(sapply(splitData, length)<=10)) {
warning(paste("The following factor levels have small data sets (10 values or fewer). Results will still be calculated but should be interpreted with caution: ", paste(names(which(sapply(splitData, length)<=10)), collapse=", ")))
}
if (distribution == "Normal") {
pars_1.1 <- lapply(splitData, mean)
pars_1.2 <- lapply(splitData, sd)
pars_2 <- pars_1.2
pars_1 <- mapply(c, pars_1.1, pars_1.2, SIMPLIFY = FALSE)
} else if (distribution == "Poisson") {
pars_1 <- lapply(splitData, mean)
pars_2 <- NULL
} else if (distribution == "Lognormal") {
if (any(sapply(splitData, function(x) 0 %in% x))) {
stop(paste("The following levels contain 0s, so cannot be used with the lognormal distribution. Please correct the data then re-run: ", paste(names(which(sapply(splitData, function(x) 0 %in% x))), collapse=", ")))
}
splitDataLog <- lapply(splitData, log)
pars_1.1 <- lapply(splitDataLog, mean)
pars_1.2 <- lapply(splitDataLog, sd)
pars_2 <- pars_1.2
pars_1 <- mapply(c, pars_1.1, pars_1.2, SIMPLIFY = FALSE)
} else if (distribution == "Negbin") {
NegbinPars <- function(x) {fitdistr(x, "Negative Binomial")$estimate}
pars_1 <- lapply(splitData, FUN = function(x) NegbinPars(x)[c(2,1)])
if (change.type == "M") {
pars_2 <- lapply(splitData, FUN = function(x, y = change) {
size2.samevar(NegbinPars(x)[2], NegbinPars(x)[2]*(100+y)/100, NegbinPars(x)[1])
})
} else if (change.type == "A") {
pars_2 <- lapply(splitData, FUN = function(x, y = change) {
size2.samevar(NegbinPars(x)[2], NegbinPars(x)[2]*(100+y)/100, NegbinPars(x)[1])
})
}
} else stop("distr must be one of 'Normal', 'Poisson', 'Lognormal' or 'Negbin'")
result <- mapply(power.groups, pars1 = pars_1, pars2 = pars_2, MoreArgs = list(change = change, change.type = change.type, n1 = n1, n2 = n2, distribution = distribution, test = test, alternative = alternative, alpha = alpha, nsims = nsims, nreps = nreps), SIMPLIFY = FALSE)
output <- list(result = result, nSample = n2, alpha = alpha, target.power = target.power)
class(output) <- "powerByFactor"
return(output)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/b2r.R
\name{b2r}
\alias{b2r}
\title{Obtain correlation coefficients and their variance-covariances}
\usage{
b2r(b, s, rho, n)
}
\arguments{
\item{b}{the vector of linear regression coefficients.}
\item{s}{the corresponding vector of standard errors.}
\item{rho}{triangular array of between-SNP correlation.}
\item{n}{the sample size.}
}
\value{
The returned value is a list containing:
\itemize{
\item r the vector of correlation coefficients.
\item V the variance-covariance matrix of correlations.
}
}
\description{
Obtain correlation coefficients and their variance-covariances
}
\details{
This function converts linear regression coefficients of phenotype on
single nucleotide polymorphisms (SNPs) into Pearson correlation coefficients
with their variance-covariance matrix. It is useful as a preliminary step
for meta-analyze SNP-trait associations at a given region. Between-SNP
correlations (e.g., from HapMap) are required as auxiliary information.
}
\examples{
\dontrun{
n <- 10
r <- c(1,0.2,1,0.4,0.5,1)
b <- c(0.1,0.2,0.3)
s <- c(0.4,0.3,0.2)
bs <- b2r(b,s,r,n)
}
}
\references{
\insertRef{elston75}{gap}
\insertRef{becker00}{gap}
\insertRef{casella02}{gap}
}
\seealso{
\code{\link{mvmeta}}, \code{\link{LD22}}
}
\author{
Jing Hua Zhao
}
\keyword{datagen}
|
/gap/man/b2r.Rd
|
no_license
|
jinghuazhao/R
|
R
| false
| true
| 1,351
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/b2r.R
\name{b2r}
\alias{b2r}
\title{Obtain correlation coefficients and their variance-covariances}
\usage{
b2r(b, s, rho, n)
}
\arguments{
\item{b}{the vector of linear regression coefficients.}
\item{s}{the corresponding vector of standard errors.}
\item{rho}{triangular array of between-SNP correlation.}
\item{n}{the sample size.}
}
\value{
The returned value is a list containing:
\itemize{
\item r the vector of correlation coefficients.
\item V the variance-covariance matrix of correlations.
}
}
\description{
Obtain correlation coefficients and their variance-covariances
}
\details{
This function converts linear regression coefficients of phenotype on
single nucleotide polymorphisms (SNPs) into Pearson correlation coefficients
with their variance-covariance matrix. It is useful as a preliminary step
for meta-analyze SNP-trait associations at a given region. Between-SNP
correlations (e.g., from HapMap) are required as auxiliary information.
}
\examples{
\dontrun{
n <- 10
r <- c(1,0.2,1,0.4,0.5,1)
b <- c(0.1,0.2,0.3)
s <- c(0.4,0.3,0.2)
bs <- b2r(b,s,r,n)
}
}
\references{
\insertRef{elston75}{gap}
\insertRef{becker00}{gap}
\insertRef{casella02}{gap}
}
\seealso{
\code{\link{mvmeta}}, \code{\link{LD22}}
}
\author{
Jing Hua Zhao
}
\keyword{datagen}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.