blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M โ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 โ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 โ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a2355bed6ba3bbfbb2b5804ed92f2075fcdd07e9 | d68441b6311721a84d0210c371a1a94b2eb5f261 | /R/cbind.fill.R | a2f4307a90770b50d80c27974ff614dfcf95bc10 | [] | no_license | jasdumas/dumas | 0e787cb29037cbfac331af108cff0f28c758b513 | 84aedfdd0e095e3a20d07877120a86e7b5d64f8b | refs/heads/master | 2020-04-06T05:37:36.930368 | 2017-07-17T19:24:24 | 2017-07-17T19:24:24 | 38,554,253 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 624 | r | cbind.fill.R | #' cbind.fill
#'
#' returns a data.frame joined by columns but filled with NA's if the values are missing
#' @param ... data.frame objects needed to be combined by columns
#'
#' @return a data.frame
#' @export
#'
#' @examples
#' x = data.frame("x1" = c(1, 2, 3))
#' y = data.frame("x1" = c(1, 2), "y1" = c(1, 2))
#' cbind.fill(x, y)
#' @seealso
#' http://stackoverflow.com/questions/7962267/cbind-a-df-with-an-empty-df-cbind-fill
cbind.fill <- function(...){
nm <- list(...)
nm <- lapply(nm, as.matrix)
n <- max(sapply(nm, nrow))
do.call(cbind, lapply(nm, function (x)
rbind(x, matrix(, n-nrow(x), ncol(x)))))
}
|
04a8e7cf5be47a4f25b3528bbe3a3dd9aae2bbb7 | 7561c7a2bda45a2cbcccd9e22cc4135ac1e711bc | /Fishers_Criterion.R | 6eb2bccb7e8b70fadcb85ea6b40e170550d710c1 | [] | no_license | a30123/R | 10cc9e071f75e55a96320e5b078f102d83c86a23 | 6f5280874eb5c0c7eeb9ebaf1299a77bfbc822af | refs/heads/master | 2020-05-17T15:24:01.282712 | 2016-01-29T07:01:22 | 2016-01-29T07:01:22 | 34,438,393 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,188 | r | Fishers_Criterion.R | #setwd("D:/20150602")
#Fisher's criterion function
Fisher_cri = function(y,feature){
x_1 = feature[ y== unique(y)[1]]
x_2 = feature[ y== unique(y)[2]]
j = (mean(x_1)-mean(x_2))^2 / (var(x_1)+var(x_2))
return(j)
}
#test
y = sample(x = c(0,1),size = 100,replace = T)
feature = rnorm(100)
Fisher_cri(y,feature)
########################
path1<-"C:/Users/A30123.ITRI/Documents/R scripts/New for event mining/Try_20150604_Fishers_criterion/Faulty"
path2<-"C:/Users/A30123.ITRI/Documents/R scripts/New for event mining/Try_20150604_Fishers_criterion/Normal"
filename1<-list.files(path=path1)[1]
filename2<-list.files(path=path2)[1]
dat_y0<-read.csv(paste(path1,"/",filename1,sep=""))
dat_y1<-read.csv(paste(path2,"/",filename2,sep=""))
#colnames(dat_y0)==colnames(dat_y1)
y = c(rep(0,dim(dat_y0)[1]) , rep(1,dim(dat_y1)[1]))
fc = NA
for( i in 1: dim(dat_y0)[2]){
x = c( (dat_y0[,i]) , (dat_y1[,i]) )
fc[i] = Fisher_cri(y,x)
}
output = data.frame( feature=colnames(dat_y0) ,Fisher_cri=fc )
write.csv(output,"C:/Users/A30123.ITRI/Documents/R scripts/New for event mining/Try_20150604_Fishers_criterion/Fisher's criterion result.csv",row.names = F)
|
15c1d15ddeb8395cca128430efae91620eca2384 | c4a36c8bd4aac9b17904b1db90d0196f994da8b7 | /CFBavgprobit.R | f78a3731d0d04907bce6575b810c13b595fae0bb | [] | no_license | lizerlfunk/categoricaldatacollegefootball | 01c8be4d7cfb4a22d184be9cdc9a35ba22bfe758 | b58dcf6f3a68ef3a1687ac4c50431478f09da4c6 | refs/heads/main | 2023-03-08T03:46:45.951795 | 2021-02-24T03:55:41 | 2021-02-24T03:55:41 | 341,772,458 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,787 | r | CFBavgprobit.R | CFB2019 <- read.csv("~/OneDrive - SectorShield Inc/2020 Summer/CFB2019-v2.csv")
View(CFB2019)
attach(CFB2019)
Vars18 <- c("Pass.Yards.Per.Game.Allowed","Penalty.Yards.Per.Game","Avg.Yards.Per.Punt.Return","Avg.Yards.Allowed.per.Punt.Return","Rushing.Yards.per.Game")
Data18 <- CFB2019[Vars18]
names(Data18)[1:5]=c("x1","x2","x3","x4","x5")
probitmodel1<-glm(as.factor(WR) ~ .*.*.,family=binomial(link='probit'),data=Data18)
summary(probitmodel1)
drop1(probitmodel1,test = "LRT")
probitmodel2<-update(probitmodel1, .~. -x2:x3:x4)
summary(probitmodel2)
drop1(probitmodel2,test = "LRT")
probitmodel3<-update(probitmodel2, .~. -x1:x3:x5)
summary(probitmodel3)
drop1(probitmodel3,test = "LRT")
probitmodel4<-update(probitmodel3, .~. -x2:x3:x5)
summary(probitmodel4)
drop1(probitmodel4,test = "LRT")
probitmodel5<-update(probitmodel4, .~. -x1:x2:x4)
summary(probitmodel5)
drop1(probitmodel5,test = "LRT")
probitmodel6<-update(probitmodel5, .~. -x1:x2:x3)
summary(probitmodel6)
drop1(probitmodel6,test = "LRT")
probitmodel7<-update(probitmodel6, .~. -x2:x3)
summary(probitmodel7)
drop1(probitmodel7,test = "LRT") #AIC 170.10
probitmodel8<-update(probitmodel7, .~. -x1:x2:x5) #AIC 170.63
summary(probitmodel8)
drop1(probitmodel8,test = "LRT")
probitmodel9<-update(probitmodel8, .~. -x1:x2)
summary(probitmodel9)
drop1(probitmodel9,test = "LRT")
probitmodel10<-update(probitmodel9, .~. -x1:x3:x4)
summary(probitmodel10)
drop1(probitmodel10,test = "LRT")
probitmodel11<-update(probitmodel10, .~. -x1:x3)
summary(probitmodel11)
drop1(probitmodel11,test = "LRT")
probitmodel12<-update(probitmodel11, .~. -x2:x4:x5)
summary(probitmodel12)
drop1(probitmodel12,test = "LRT")
probitmodel13<-update(probitmodel12, .~. -x2:x5)
summary(probitmodel13)
drop1(probitmodel13,test = "LRT")
probitmodel14<-update(probitmodel13, .~. -x3:x4:x5)
summary(probitmodel14)
drop1(probitmodel14,test = "LRT")
probitmodel15<-update(probitmodel14, .~. -x3:x5)
summary(probitmodel15)
drop1(probitmodel15,test = "LRT")
probitmodel16<-update(probitmodel15, .~. -x2:x4)
summary(probitmodel16)
drop1(probitmodel16,test = "LRT")
probitmodel17<-update(probitmodel16, .~. -x2)
summary(probitmodel17)
drop1(probitmodel17,test = "LRT")
probitmodel18<-update(probitmodel17, .~. -x3:x4)
summary(probitmodel18)
drop1(probitmodel18,test = "LRT")
probitmodel19<-update(probitmodel18, .~. -x3)
summary(probitmodel19) #AIC 163.93
drop1(probitmodel19,test = "LRT")
probitmodel20<-update(probitmodel19, .~. -x1:x4:x5)
summary(probitmodel20) #AIC 166.37
drop1(probitmodel20,test = "LRT")
probitmodel21<-update(probitmodel20, .~. -x4:x5)
summary(probitmodel21) #AIC 164.37
drop1(probitmodel21,test = "LRT")
probitmodel22<-update(probitmodel21, .~. -x1:x5)
summary(probitmodel22) #AIC 162.92
drop1(probitmodel22,test = "LRT")
library(ROCR)
|
71517705787d2d44cd3e02412e75f7c51e13174f | 8d81ecafe5095bd5b180d5e1c9d871c66b6a8f76 | /rrapply/63_grolemund_list_columns.R | 549ac98ee8217ef4d478dc074644fe51cdeffc16 | [] | no_license | jimrothstein/try_things_here | 0a3447b5578db293685fb71c7368f1460d057106 | d2da5ce3698dd21c2fe1a96c30f52cefe6f87de4 | refs/heads/main | 2023-08-28T08:55:43.314378 | 2023-08-25T23:47:23 | 2023-08-25T23:47:23 | 98,355,239 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,926 | r | 63_grolemund_list_columns.R | # REF: video-
# https://resources.rstudio.com/webinars/how-to-work-with-list-columns-garrett-grolemund"
library(tidyr)
#library(babynames)
x <- c(1L, 2L, 3L)
typeof(x) # integer
# Create new data types by adding class or other attribute
# Date
# make it Date (actually S3)
class(x) <- "Date"
# but still integer[]
typeof(x) # STILL an integer
attributes(x) # only class
## Array
# make an array
a <- array(1:8)
a
# What is array?
typeof(a) # integer[]
attributes(a) # dim: 8
class(a) # array
Matrix
# array again
a <- array(1:8)
# make a matrix
attr(a, "dim") <- c(2,4)
# What is matrix?
typeof(a) # integer[]
class(a) # matrix, array
attributes(a) # 2, 4
matrix - slight difference
# Begin with integer[]
x <- c(1L, 2L, 3L)
# Add attributes
dim(x) <- c(3,1)
# same as
# attr(x, "dim") <- c(3,1)
# What is matrix?
typeof(x) # STILL an integer
class(x) # matrix, array
attributes(x) # now 3,1
## factor or categorical variable
# Begin with integer[]
x <- c(1L, 2L, 3L)
# Add attributes & class
levels(x) <- c("BLUE", "BROWN", "BLACK")
class(x) <- "factor"
# What is factor?
typeof(x) # STILL an integer
attributes(x) # now 2!, levels and class=factor
# -------------------------------------
HOW TO MIX data types in 1 container?
use list
# Begin list of named atomic vectors.
l <- list(a=c(1,2,3), b=c(TRUE, TRUE, FALSE ))
# named list
is.list(l)
names(l)
# Check
typeof(l) # list
attributes(l) #names
## data.frame
## data tables, is a named LIST of vectors.
# Begin list of named atomic vectors.
l <- list(a=c(1,2,3), b=c(TRUE, TRUE, FALSE ))
# Add class, attributes
class(l) <- "data.frame"
str(l) # 0 rows !!
rownames(l) <- c("1", "2", "3")
str(l) # 3 rows
# What is data.frame?
typeof(l) # STILL a list
attributes(l) #names, row and class
# data.frame with list as column works, but problem
# lists ARE vectors and therefore can be column table.
l <- list(a=c(1,2,3), b=c(TRUE, TRUE, FALSE ))
l$d <- list(p=1:3, q=4:5, r=c(letters[10:12]))
# Add class, attributes, as above
class(l) <- "data.frame"
rownames(l) <- c("1", "2", "3")
# What is data.frame?
typeof(l) # STILL a list
attributes(l) #names, row and class
str(l) # data.frame !
l
# ----------------------------------------------------------------------
# Here's the problem:
# Did what we said, made column d list, but not so easy to manipulate.
# ----------------------------------------------------------------------
## try again, but as tibble - not exactly
l <- list(a=c(1,2,3), b=c(TRUE, TRUE, FALSE ))
l$d <- list(p=1:3, q=4:5, r=c(letters[10:12]))
m <- l
# Add class, attributes, as above
class(l) <- c( "tbl_df", "tbl", "data.frame")
# REPLACE:
# rownames(m) <- c("1", "2", "3") Depreciated
attr(l, "row.names") <- c("1", "2", "3")
typeof(l)
attributes(l)
l
# tibble: much better, why?
# now comes in as <named list>
# Better way
m <- as_tibble(m)
# What is tibble?
typeof(m)
attributes(m)
m
# different row.names !
attributes(l)$row.names
attributes(m)$row.names
# Compare! Tibble does it better than df.
y <- tibble(
a = c(1.0, 2.0, 3.14),
b = c( "a","b","c"),
c = c(TRUE, FALSE, FALSE),
d = list(as.integer(c(1,2,3)), TRUE, 2L)
)
y # note d is a list-column
df <- data.frame(
a = c(1.0, 2.0, 3.14),
b = c( "a","b","c"),
c = c(TRUE, FALSE, FALSE),
d = list(as.integer(c(1,2,3)), TRUE, 2L)
)
df # note d is a MESS
# -------------------------------
# ---------------------------------------------
## R has tools for atomic vectors and for data tables.
## Less so for lists.
## Compare sqrt(list) just fails to purrr:map(list, sqrt) tries to convert and much more tolerant.
# ---------------------------------------------
dplyr::mutate: tibble->tibble;
# challenge: convert to 10 x 3 tibble, all int[]
test <- tibble(a = 1:10, b = tibble(x = 11:20, y = 21:30))
typeof(test)
str(test)
sqrt: vector -> dbl vector
y %>% dplyr::mutate(asc_a = sqrt(a)) %>% print()
*
Error: sqrt rejects list
y %>% dplyr::mutate(asc_d = sqrt(d) %>% print())
# - **instead**, dplyr::map applies list to sqrt, element-by-element, converts if necessary and repackages as list.
y %>% dplyr::mutate(asc_d = purrr::map(d, sqrt)) %>% print()
# ---------------------------------------------
## Babynames, filter names present every year
# ---------------------------------------------
library(babynames)
str(babynames)
everpresent <- babynames %>%
dplyr::group_by(name, sex) %>%
dplyr::summarize(years = n) %>%
dplyr::ungroup() %>%
filter(years == max(years))
## Keep all rows (and fields) in x which match group_by in y
babynames <- babynames %>%
semi_join(everpresent)
## most popular, each year
babynames %>%
group_by(year,sex) %>%
filter(prop == max(prop))
## (test)
mtcars %>%
group_by(cyl) %>%
top_n(1,hp)
babynames %>%
group_by(year,sex) %>%
top_n(1,prop) %>%
arrange(desc(year))
## select joe, all years
joe <- babynames %>%
filter(name == "Joe")
joe %>%
ggplot(aes(x=year,y=prop)) +
geom_point() +
geom_line() +
geom_smooth(method=lm,se=FALSE)
## is linear a good fit for joe?
fit <- lm (prop ~ year, data=joe)
library(broom)
pluck(coef(fit),"year")
pluck(glance(fit), "r.squared")
@32:00
model for every name in babynames
babynames %>%
group_by(name,sex) %>%
nest()
retrieve Mary
babynames %>%
group_by(name,sex) %>%
nest() %>%
pluck("data") %>%
pluck(1)
use map to run lm interatively over list "data"
d<-babynames %>%
group_by(name,sex) %>%
nest() %>%
mutate(model =
map(data,
~lm(prop ~ year, data=.x)),
slope =
map_dbl(model,
~pluck(coef(.x), "year")),
r2 =
map_dbl(model,
~pluck(glance(.x), "r.squared"))
)
save(d,file="baby_model")
verify "Mary"
d %>% pluck("name") %>% pluck(1)
d %>% pluck("model") %>% pluck(1)
|
c37848131ebbcf85eea5ade9cf11292b3d811907 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Ultimixt/examples/K.MixPois.Rd.R | 0588a908c2ca4566b7f665c5a0063a04766b5196 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 563 | r | K.MixPois.Rd.R | library(Ultimixt)
### Name: K.MixPois
### Title: Sample from a Poisson mixture posterior associated with a
### noninformative prior and obtained by Metropolis-within-Gibbs sampling
### Aliases: K.MixPois
### Keywords: Poisson mixture model Non-informative prior
### ** Examples
#N=500
#U =runif(N)
#xobs = rep(NA,N)
#for(i in 1:N){
# if(U[i]<.6){
# xobs[i] = rpois(1,lambda=1)
# }else{
# xobs[i] = rpois(1,lambda=5)
# }
#}
#estimate=K.MixPois(xobs, k=2, alpha0=.5, alpha=.5, Nsim=10000)
|
8771603289c9037d0cf184f02a7e96f565e30343 | 2e731f06724220b65c2357d6ce825cf8648fdd30 | /dexterMST/inst/testfiles/mutate_booklet_score/libFuzzer_mutate_booklet_score/mutate_booklet_score_valgrind_files/1612727870-test.R | 5484df2bbdc388692c6b2c35c57ceedc25308c0d | [] | no_license | akhikolla/updatedatatype-list1 | 6bdca217d940327d3ad42144b964d0aa7b7f5d25 | 3c69a987b90f1adb52899c37b23e43ae82f9856a | refs/heads/master | 2023-03-19T11:41:13.361220 | 2021-03-20T15:40:18 | 2021-03-20T15:40:18 | 349,763,120 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 780 | r | 1612727870-test.R | testlist <- list(id = NULL, score = NULL, id = NULL, booklet_id = -1L, item_score = c(-1802201964L, -1802201964L, -1802201964L, -1802201964L, -1802201964L, 1397053520L, 673866607L, 1853252978L, 1951690561L, 1819552040L, 1668247155L, 1948271464L, 1634885987L, 1952805462L, 1701016687L, 1915103636L, -1802201964L, -1802201964L, -1802201964L, -1802201964L, -1802240000L, 1217684L, -1802201964L, -1802201964L, -1802201964L, -1802201964L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -5308416L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), person_id = NA_integer_)
result <- do.call(dexterMST:::mutate_booklet_score,testlist)
str(result) |
7fb4a5ea216283f662b8c5fc70db6a972e34acc3 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/aplpack/examples/slider.lowess.plot.Rd.R | 986119dbdfd1fba8520393485d176cbde2b43c2f | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 316 | r | slider.lowess.plot.Rd.R | library(aplpack)
### Name: slider.lowess.plot
### Title: interactive lowess smoothing
### Aliases: slider.lowess.plot
### Keywords: iplot
### ** Examples
## Not run:
##D ## This example cannot be run by examples() but should be work in an interactive R session
##D slider.lowess.plot(cars)
## End(Not run)
|
24db295d943e119150a09fdee01c68c1ca87b052 | 7a018a314f4a5c80be897e7ef52e659da937debd | /plot3.R | 2441ebad47cac518492880033be75839dcd76d2c | [] | no_license | RedTigerbear/ExData_Plotting2 | b2c45411feb4b86d5e4df959cd15db62b3044832 | 14668c3d0746defd930864631c5bb20c1702bc32 | refs/heads/master | 2016-09-10T15:16:29.423234 | 2015-05-15T10:46:18 | 2015-05-15T10:46:18 | 35,280,773 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,218 | r | plot3.R | ## Set locale to English
Sys.setlocale("LC_TIME", "English")
## Get file with data
FileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
if (!file.exists("NEI_data.zip")) {
download.file(FileUrl, destfile = "NEI_data.zip", method = "curl")
}
unzip("NEI_data.zip")
## Get data
NEI <- readRDS("./data/summarySCC_PM25.rds")
NEI$year <- factor(NEI$year)
NEI_Baltimore <- NEI[NEI$fips == "24510",]
rm(NEI)
## Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad)
## variable, which of these four sources have seen decreases in emissions from 1999โ2008
## for Baltimore City? Which have seen increases in emissions from 1999โ2008?
## Use the ggplot2 plotting system to make a plot answer this question.
library(ggplot2)
library(plyr)
plot3_data <- ddply(NEI_Baltimore, .(year,type), summarize, TotalEmissions=sum(Emissions))
png('./ExData_Plotting2/plot3.png',
width = 480, height = 480, units = "px", pointsize = 12,
bg = "white") ## Save to file
ggplot(data = plot3_data, aes(x = year, y = TotalEmissions, fill = type)) +
geom_bar(stat="identity", position="dodge") +
ggtitle("Emission Types in the Baltimore (Maryland)")
dev.off() |
0c8afbc318501f1b9203e9accf3e614adc327ff0 | 0a69c354a77a56ad5dac93967d2576e10fefffd5 | /s3/buckets/templates/1/r/code/main.R | c64e020711273ecf532665efc0c1ce0fee0c257b | [] | no_license | codeocean/private-cloud | 524f1e87b596af268f4319f1138ee2c41c4ce075 | 5fcb541485e04a05cc84384f9dac8271027f291d | refs/heads/master | 2021-09-16T23:51:17.414464 | 2021-05-24T11:35:25 | 2021-05-24T11:35:25 | 244,925,666 | 2 | 0 | null | 2021-08-12T02:25:47 | 2020-03-04T14:46:03 | TypeScript | UTF-8 | R | false | false | 1,034 | r | main.R | # Install and load ggplot2 R package
library(ggplot2)
# get command line arguments
args <- commandArgs(trailingOnly = TRUE)
# check the amount of arguments
if (length(args) >= 3) {
plot_title <- args[1]
number_of_cycles <- args[2]
input_data <- args[3]
} else {
plot_title <- "Hello Code Ocean"
number_of_cycles <- 3
input_data <- "../data/sample-data.txt"
}
# use an argument as a parameter for the sine function
cycles <- as.numeric(number_of_cycles)
# read some input data from a filename specified by an argument
points <- as.numeric(readLines(input_data))
# sine function
x = seq(0, cycles * 2 * pi, length = points)
y = sin(x)
# plot to a PNG file (note the output directory)
png(
filename = "../results/fig1.png",
width = 5,
height = 4,
units = 'in',
res = 300
)
plot(x, y, type = "l")
title(plot_title)
dev.off()
# alternatively, plot using ggplot (and save to PNG)
df <- data.frame(x = x, y = y)
p <- qplot(x, y, data = df) + geom_line() + ggtitle(plot_title)
p
ggsave('../results/fig2.png', p)
|
989b2b41d4d667aa3af97a825800d82907012622 | c26a15db12227206fe363d3807ca2b192f4df2bc | /man/lzo-test.Rd | 52638f7bfe02def20c0e59bb90e25d822a2f7ae4 | [] | no_license | cran/RTisean | 52f3e67f0b18f8ed9a141841b70170fa43cf4e50 | f819f6b88aa814cdaa5d1d2f1411cee105c978d2 | refs/heads/master | 2021-01-01T05:49:40.303232 | 2011-12-29T00:00:00 | 2011-12-29T00:00:00 | 17,692,991 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,009 | rd | lzo-test.Rd | \name{lzo.test}
\alias{zeroth} %old routine name
\alias{lzo.test}
\title{ Modeling data trough a zeroth order ansatz }
\description{Makes a zeroth order ansatz and estimates the one step prediction errors
of the model on a multivariate time series.}
\usage{
lzo.test(series, l, x = 0, m=c(1,2), c, d = 1, n, S = 1, k = 30, r, f = 1.2, s = 1, C)
}
\arguments{
\item{series}{a matrix or a vector.}
\item{l}{number of points to use. }
\item{x}{number of lines to be ignored. }
\item{m}{a vector containing the number of components of the time series and the embedding dimension. }
\item{c}{a vector containing the columns to be read.}
\item{d}{delay for the embedding. }
\item{n}{number of points for which the error should be calculated. }
\item{S}{temporal distance between the reference points. }
\item{k}{minimal numbers of neighbors for the fit. }
\item{r}{neighborhood size to start with. }
\item{f}{factor to increase the neighborhood size if not enough neighbors were found. }
\item{s}{steps to be forecasted. }
\item{C}{width of causality window. }
}
\details{
The function searches for all neighbors of the point to be forecasted and
takes as its image the average of the images of the neighbors. The given forecast errors are
normalized to the standard deviations of each component. In addition to using a multicomponent
time series, a temporal embedding is possible. That's why the \code{m} argument needs two
numbers as input, where the first one is the number of components and the second one the temporal embedding.
}
\value{A matrix of \code{s} lines, containing the steps forecasted in the first column
and the normalized forecast errors in the following columns for each component of the vector.}
\seealso{ \code{\link{predict}}, \code{\link{xzero}}. }
\examples{
\dontrun{
dat <- henon(1000)
zerotherr <- lzo.test(dat, s = 20)
plot(zerotherr, t="l", xlab= "Steps", ylab= "Normalized error", main = "Zeroth order ansatz prediction errors")
}
}
\keyword{ ts }
|
2350cbfcdeca1156ffcce32cdba18bd9440b9fc1 | c11aa641eb4e619f40ec9e68883c11f362ed8ee4 | /man/is_GSVD_gsvd.Rd | 8084b58eb808a9a407817ca7a1810e7dbdf99b36 | [] | no_license | derekbeaton/GSVD | d27e4c1a70f74dd8fb94b5d5d7e17995da354e83 | 0f41cf29bcdcadc6c3a45847427df83e5226ceb3 | refs/heads/master | 2023-04-15T07:28:59.832285 | 2020-12-29T23:06:30 | 2020-12-29T23:06:30 | 152,910,640 | 18 | 6 | null | 2023-04-08T14:17:44 | 2018-10-13T20:14:45 | R | UTF-8 | R | false | true | 475 | rd | is_GSVD_gsvd.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class_methods_helpers.R
\name{is_GSVD_gsvd}
\alias{is_GSVD_gsvd}
\title{is_GSVD_gsvd}
\usage{
is_GSVD_gsvd(x)
}
\arguments{
\item{x}{object to test}
}
\value{
boolean. \code{TRUE} if the object is of class gsvd, FALSE otherwise.
}
\description{
Tests if the \code{x} object is of class type "gsvd"
}
\details{
Only \code{\link{gsvd}} produces this class type.
}
\seealso{
\code{\link{inherits}}
}
|
d8797538f9a99373fc8b1d27de0ea5c49a5bedfa | ad474589dc5f3ec309b89faff60984d662b63861 | /tests/test_helper.R | 83ecd767eb3e97b65e4cb0e69b4cb89f29f345a3 | [] | no_license | massgov/bradford | 553d97b7f0d9bee2d740662d3ddf6e83acc3d66c | dd3e7f085314db1b6f027287b9f07b62387a3451 | refs/heads/master | 2021-05-01T04:28:44.048186 | 2017-03-20T21:05:24 | 2017-03-20T21:05:24 | 73,943,412 | 0 | 1 | null | 2017-03-20T21:05:25 | 2016-11-16T17:20:20 | CSS | UTF-8 | R | false | false | 6,031 | r | test_helper.R | library(testthat)
library(methods)
source("../functions/helper.R")
context("Helper functions")
test_that("factorPercentage fails on incorrect input", {
factor.vec <- factor(c("cats", "cats", "dogs", "rabbits"))
expect_error(factorPercentage(factor.vec = list()))
expect_error(factorPercentage(factor.vec = factor.vec, factor.value = "turtles"))
expect_error(factorPercentage(factor.vec = matrix(), factor.value = "onions"))
})
test_that("factorPercentage outputs the correct answer", {
factor.vec <- factor(c("cats", "cats", "dogs", "rabbits"))
expect_equal(factorPercentage(factor.vec = factor.vec, factor.value = "cats"), 50)
})
test_that("prettyPercent fails on incorrect input", {
expect_error(prettyPercent(num = "10", round.n = 0, is.percent.points = T))
expect_error(prettyPercent(num = 10, round.n = "0", is.percent.points = T))
expect_error(prettyPercent(num = 10, round.n = 0, is.percent.points = NA))
expect_error(prettyPercent(num = 10, round.n = -2, is.percent.points = T))
})
test_that("prettyPercent warns on illogical input", {
expect_warning(prettyPercent(num = 100000, round.n = 0, is.percent.points = T))
})
test_that("prettyPercent outputs the correct answer", {
expect_equal(prettyPercent(num = 10, round.n = 0, is.percent.points = T), "10 %")
expect_equal(prettyPercent(num = .576, round.n = 0, is.percent.points = F), "58 %")
expect_equal(prettyPercent(num = .576, round.n = 1, is.percent.points = F), "57.6 %")
})
test_that("meanCount errors on incorrect input", {
grouped.df <- tibble::tibble(x = c("cats", "cats", "dogs", "rabbits")) %>%
dplyr::group_by(x)
expect_error(meanCount(grouped.df = data.frame(), round.n = 0))
expect_error(meanCount(grouped.df = tibble::tibble(), round.n = 0))
expect_error(meanCount(grouped.df = matrix(), round.n = 0))
expect_error(meanCount(grouped.df = list(), round.n = 0))
expect_error(meanCount(grouped.df = grouped.df, round.n = -1))
expect_error(meanCount(grouped.df = grouped.df, round.n = "1"))
})
test_that("meanCount outputs the correct answer", {
grouped.df <- tibble::tibble(x = c("cats", "cats", "dogs", "rabbits")) %>%
dplyr::group_by(x)
expect_equal(meanCount(grouped.df = grouped.df, round.n = 0), 1)
expect_equal(meanCount(grouped.df = grouped.df, round.n = 1), 1.3)
})
test_that("flagIncompleteTimeperiod errors on incorrect input", {
ts.vector <- ts(lubridate::ymd("2017-01-01", "2017-01-02"))
zoo.vector <- zoo::as.zoo(lubridate::ymd("2017-01-01", "2017-01-02"))
from <- as.Date(lubridate::now())
to <- as.Date(lubridate::now() + lubridate::days(3))
flag.vector <- seq.Date(from = from, to = to, by = "day")
expect_error(flagIncompleteTimeperiod(reference.vector = ts.vector, time.unit = "day"))
expect_error(flagIncompleteTimeperiod(reference.vector = zoo.vector, time.unit = "day"))
expect_error(flagIncompleteTimeperiod(reference.vector = flag.vector, time.unit = "hour"))
expect_error(flagIncompleteTimeperiod(reference.vector = flag.vector, time.unit = "year"))
})
test_that("flagIncompleteTimeperiod outputs the correct answer", {
from <- as.Date(lubridate::now())
to <- as.Date(lubridate::now() + lubridate::days(3))
flag.vector <- seq.Date(from = from, to = to, by = "day")
expect_true(any(flagIncompleteTimeperiod(reference.vector = flag.vector, time.unit = "week")))
expect_true(any(flagIncompleteTimeperiod(reference.vector = flag.vector, time.unit = "day")))
})
test_that("groupAndOrder errors on incorrect input", {
test.df <- data.frame(a = factor(c("A", "A", "B", "C")),
b = c(1, 1, 1, 1),
c = c("A", "A", "B", "C"))
expect_error(groupAndOrder(df = test.df, group.col = "a", data.col = "c"))
expect_error(groupAndOrder(df = test.df, group.col = "b", data.col = "b"))
expect_error(groupAndOrder(df = test.df, group.col = "a", data.col = "b", top.pct = 2))
})
test_that("groupAndOrder outputs the correct answer", {
test.df <- data.frame(a = factor(c("A", "A", "B", "C")),
b = c(1, 1, 1, 1),
c = c("A", "A", "B", "C"))
expect_equal(groupAndOrder(df = test.df, group.col = "a", data.col = "b", top.pct = 1),
data.frame(group = factor(c("A", "B", "C")),
total = c(.5, .25, .25),
cumul = c(.50, .75, 1)))
# Fix for releveling
# expect_equal(groupAndOrder(df = test.df, group.col = "c", data.col = "b", top.pct = .8),
# data.frame(group = c("A", "B"),
# total = c(.5, .25),
# cumul = c(.50, .75)))
})
test_that("getTopOrBottomK accepts correct input", {
test.df <- data.frame(a = factor(c("A", "A", "B", "C")),
b = c(1, 1, 1, 1),
c = c("A", "A", "B", "C"))
expect_error(getTopOrBottomK(df = test.df, group.col = "a", data.col = "b", k = 'd', get.top = TRUE))
expect_error(getTopOrBottomK(df = test.df, group.col = "a", data.col = "b", k = 4, get.top = 'test'))
expect_error(getTopOrBottomK(df = test.df, group.col = "a", data.col = "c", k = 4, get.top = FALSE))
expect_error(getTopOrBottomK(df = test.df, group.col = "b", data.col = "c", k = 4, get.top = FALSE))
})
test_that("getTopOrBottomK returns correct values", {
test.df <- data.frame(a = factor(c("A", "A", "B", "C","D","C")),
b = c(1, 1, 1, 1, 3, 4))
expect_equal(getTopOrBottomK(df = test.df, group.col = "a", data.col = "b", k = 1, get.top = TRUE),
test.df[test.df$a == 'C',])
expect_equal(getTopOrBottomK(df = test.df, group.col = "a", data.col = "b", k = 6, get.top = TRUE),
test.df)
expect_equal(getTopOrBottomK(df = test.df, group.col = "a", data.col = "b", k = 2, get.top = FALSE),
test.df[test.df$a %in% c('B','A'),])
expect_equal(getTopOrBottomK(df = test.df, group.col = "a", data.col = "b", k = 3, get.top = FALSE),
test.df[test.df$a %in% c('A','B','D'),])
})
|
621d133e176bc785d6ea70aba67b8ac821e7e982 | 7db08f13e152824a3c12a2620ef22b1a19c9e810 | /_in-progress/prop_var.R | 68d362d3d46f69bb7bd45c7786799174b1d2ba6b | [] | no_license | stenhaug/kinedu | 53db9caeabcb21a523fdc8866790b3551eeee4f1 | 35db19a7378e278a8c3957a78ebffa4e646f6455 | refs/heads/master | 2022-11-23T10:22:02.671958 | 2020-07-30T22:32:10 | 2020-07-30T22:32:10 | 267,909,431 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,388 | r | prop_var.R |
model <- results$model_full[[4]]
rotation <- "varimax"
library(tidyverse)
library(mirt)
d_mat <- read_rds(here::here("data-clean/d_mat.rds"))
mirts <- read_rds(here::here("02_mirts_2_add_5.rds"))
bifactor <- read_rds(here::here("02_bifactors.rds"))
full <-
bind_rows(mirts, bifactor) %>%
rename(out_of_sample = ll_person_item) %>%
mutate(
in_sample = exp(log_lik / nrow(d_mat))^(1/ncol(d_mat))
) %>%
mutate(
oos =
splits_with_log_lik %>% map_dbl(~ sum(.$log_lik_test))
) %>%
select(
factors, itemtype, in_log_lik = log_lik, in_p = in_sample,
out_log_lik = oos, out_p = out_of_sample, model_full, fscores, splits_with_log_lik)
summary(results$model_full[[4]], rotate = "none")
summary(full$model_full[[4]], rotate = "varimax")
summary(full$model_full[[4]], rotate = "oblimin")
136.759 + 66.698 + 44.507
summary(object, rotate = "none")
y <- summary(object, rotate = "varimax")
results$model_full[[4]] %>% get_prop_var()
y %>% str()
object %>% get_prop_var()
get_prop_var <- function(object){
F <- object@Fit$F
SS <- apply(F^2, 2, sum)
SS / nrow(F)
}
mirts2$model_full[[1]] %>%
get_prop_var() %>%
enframe() %>%
ggplot(aes(x = name, y = value)) +
geom_col() +
labs(
x = "factor",
y = "Proportion Var",
title = "4 factor model"
)
|
d67570bb365ff2876c232bd3675c6fb430976a87 | 3f183be77d1083af606a56db1cbd66eea97dde3d | /Shiny/Endpoint/server.R | afb97e92a2cd3b9347aee8b0f3dd15f225ab40c2 | [] | no_license | therandomwa/BERDShiny | 0ed361abc338c014ac22b3f689e7cb8e4b47b419 | e9e0d20fe2c0233039dc06738524e79f4e363d53 | refs/heads/master | 2020-09-06T00:57:04.910263 | 2019-11-19T16:18:44 | 2019-11-19T16:18:44 | 220,264,944 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,781 | r | server.R | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(tidyverse)
library(plyr); library(dplyr)
library(viridis)
library(plotly)
server <- function(input, output, session) {
################################### Spider ################################
myData <- reactive({
if (is.null(input$file)) {
return(NULL)
} else {
read.csv(input$file$datapath, na.strings = "")
}
})
## update 'column' selector
observeEvent(myData(), {
col <- colnames(myData())[!(colnames(myData()) %in% c("ID", "changebsl", "week", "changenadir01", "sum"))]
updateSelectInput(session, "pic", choices = col)
})
## update 'level' selector
observeEvent(input$pic, {
column_levels <- unique(myData()[[input$pic]])
updateSelectInput(session, "level",
choices = column_levels,
label = paste("Choose level in", input$pic),
selected = column_levels)
}, ignoreInit = TRUE)
output$change <- renderUI({
# This input exists if the `static`
# one is equal to `Yes` only
if (input$changenadir == 'Yes') {
selectInput(inputId = 'change',
label = 'changeNadir > 20',
choices = c("Yes" = 1,
"No" = 0),
multiple = TRUE)
} else {
return(NULL)
}
})
# data with primary variable = the selected level
myData3 <- reactive({
df = myData()
# data with primary variable = the selected level
df = df[df[,input$pic] %in% input$level,]
df[,input$pic] = as.factor(df[,input$pic])
df %>%
# data adding change in sum from the baseline
group_by(ID) %>%
arrange(ID, week) %>%
mutate(changebsl = 100*(sum - first(sum))/(first(sum))) %>%
ungroup() %>%
mutate(changebsl = replace(changebsl, changebsl == "NaN", 0))
})
output$contents <- renderTable({
req(input$file)
return(myData3())
})
output$spider <- renderPlotly({
if (!is.null(input$file)){
df = myData3()
a = df %>%
group_by(ID) %>%
plot_ly(x=~week,
y = ~changebsl,
type = "scatter",
color = ~get(input$pic),
text = ~ paste("ID:", ID),
mode = "lines") %>%
add_lines(inherit = FALSE,
y = 20,
x = c(0:max(df$week) ),
line = list(dash = "dash", color = "black"),
name = "reference line 1 (20%)") %>%
add_lines(inherit = FALSE,
y = -30,
x = c(0:max(df$week)),
line = list(dash = "dash", color = "red"),
name = "reference line 2 (-30%)") %>%
layout(title = "Spider plot for changes from baseline",
xaxis = list(showgrid = FALSE,
title = "Week of Visit",
range = c(-2,(max(df$week) * 1.2))),
yaxis = list(showgrid = FALSE,
title = "Change from Baseline (%)",
range = c(-100, 100)) ) %>%
layout(margin = list(l = 50, r = 50, t = 100, b = 100),
annotations = list(text = input$notes,
font = list(size = 12),
showarrow = FALSE,
xref = 'paper', x = 0,
yref = 'paper', y = -1,
yanchor = "bottom"))
if (input$changenadir == 'Yes' & 'changenadir01' %in% colnames(df) ) {
a = a %>%
filter(changenadir01 %in% input$change ) %>%
add_trace(inherit = FALSE,
x=~week,
y = ~changebsl,
type = "scatter",
color = ~ as.factor(changenadir01),
symbol = ~ as.factor(changenadir01),
mode = "markers",
marker = list(size = 7))
}
if (input$lesion == 'Yes' & 'lesion' %in% colnames(df) ) {
df_lesion = df[df$lesion == "1",]
a = a %>%
add_trace(name = "lesion",
inherit = FALSE,
x= ~df_lesion$week,
y =~df_lesion$changebsl-1.5,
text = "*",
type = "scatter",
mode = "text",
textposition = 'middle center')
}
if (input$ID == 'Yes' ) {
df_ID = df %>%
group_by(ID) %>%
filter(week == max(week))
a = a %>%
add_trace(name = "ID",
inherit = FALSE,
x = ~df_ID$week,
y = ~df_ID$changebsl,
text = ~df_ID$ID,
type = "scatter",
mode = "text",
textposition = 'middle right')
}
return (a)
}
})
########################################## Waterfall ######################################
wfData <- reactive({
if (is.null(input$file2)) {
return(NULL)
} else {
read.csv(input$file2$datapath)
}
})
## update 'column' selector
observeEvent(wfData(), {
col2 <- colnames(wfData())[!(colnames(wfData()) %in% c("ID", "changebsl", "sum"))]
updateSelectInput(session, "pic2", choices = col2)
})
## update 'level' selector
observeEvent(input$pic2, {
column_levels <- unique(wfData()[[input$pic2]])
updateSelectInput(session, "level2",
choices = column_levels,
label = paste("Choose level in", input$pic2),
selected = column_levels)
}, ignoreInit = TRUE)
wfData2 <- reactive({
if (is.null(input$file2)) {
return(NULL)
} else {
wfData() %>%
group_by(ID) %>%
arrange(ID, week) %>%
mutate(changebsl = 100*(sum - first(sum))/first(sum)) %>%
ungroup() %>%
mutate(changebsl = replace(changebsl, changebsl == "NaN", 0)) %>%
group_by(ID) %>%
mutate(id = row_number()) %>%
filter(id != 1 ) %>%
filter(changebsl == min(changebsl) ) %>%
slice(1) %>%
ungroup() %>%
as.data.frame()
}
})
output$contents2 <- renderTable({
return(wfData2())
})
output$waterfall <- renderPlotly({
if(is.null(input$file2)){
return(NULL)
} else {
a = wfData2() %>%
mutate(changebsl = ifelse((wfData2()[,input$pic2] %in% input$level2), changebsl, 0)) %>%
mutate(id = as.factor(unclass(fct_reorder(ID, desc(changebsl)))))%>%
mutate(ID = factor(ID, levels(ID)[order(id)])) %>%
plot_ly() %>%
add_trace(x = ~ID,
y = ~changebsl,
color = ~ as.factor(get(input$pic2)),
type = "bar",
width = 0.9,
text = ~paste("ID: ", wfData2()$ID)) %>%
layout(bargap = 4,
title = "Waterfall plot for changes in QoL scores",
xaxis = list(showgrid = FALSE, title = "", tickangle = -90),
yaxis = list(showgrid = FALSE, title = "Best RECIST response (%)",
range = c(-100, 100))) %>%
layout(margin = list(l = 50, r = 50, t = 100, b = 250),
annotations = list(text = input$notes2,
font = list(size = 12),
showarrow = FALSE,
xref = 'paper', x = 0,
yref = 'paper', y = -1,
yanchor = "bottom"))
if (input$lesion2 == "Yes" & "lesion" %in% colnames(wfData2())) {
a = a %>%
filter(lesion == 1) %>%
add_trace(name = "lesion",
inherit = FALSE,
x = ~ID,
y = ~changebsl,
text = "*",
type = "scatter",
mode = "text",
textposition = 'middle center')
}
else {
return(a)
}
}
})
########################################## swimmer ######################################
sfData <- reactive({
if (is.null(input$file3_frame)) {
return(NULL)
} else {
read.csv(input$file3_frame$datapath)
}
})
seData <- reactive({
if (is.null(input$file3_event)) {
return(NULL)
} else {
read.csv(input$file3_event$datapath)
}
})
sfData2 <- reactive({
if (!is.null(input$file3_frame)) {
sfData() %>%
group_by(ID) %>%
filter(week == max(week))
} else{
return(NULL)
}
})
## update 'column' selector
observeEvent(sfData(), {
col3 <- colnames(sfData())[!(colnames(sfData()) %in% c("ID", "week"))]
updateSelectInput(session, "pic3", choices = col3)
})
## update 'level' selector
observeEvent(input$pic3, {
column_levels <- unique(sfData()[[input$pic3]])
updateSelectInput(session,
"level3",
choices = column_levels,
label = paste("Choose level in", input$pic3),
selected = column_levels)
}, ignoreInit = TRUE)
# output$contents3 <- renderTable({
# req(input$file3_frame)
# return(sfData2())
# })
#
# output$contents4 <- renderTable({
# req(input$file3_event)
# return(df)
# })
sfData_selected <- reactive({
sfData()[sfData()[,input$pic3] %in% input$level3,]
})
seData_selected <- reactive({
join(seData(), sfData_selected(), by = "ID", match = "all", type = "right")
})
output$contents5 <- renderTable({
req(input$file3_frame)
req(input$file3_event)
return(seData_selected())
})
output$swimmer <- renderPlotly ({
if(is.null(input$file3_frame)|is.null(input$file3_event)){
return(NULL)
} else {
P = sfData() %>%
mutate(ID = as.factor(ID)) %>%
mutate(ID = fct_reorder(ID, week)) %>%
mutate(week = ifelse(sfData()[,input$pic3] %in% input$level3, week, 0)) %>%
plot_ly( width = 1000, height = 800) %>%
add_trace(x = ~week,
y = ~ID,
orientation = "h",
color = ~ as.factor(get(input$pic3)),
type = "bar",
width = 0.9) %>%
# events
add_trace(x = ~seData_selected()$event_time,
y = ~as.factor(seData_selected()$ID),
type = "scatter",
mode="markers",
symbol = seData_selected()$event,
symbols = c('cross', 'diamond', 'square', 'triangle-down', 'triangle-left', 'triangle-right', 'triangle-up'),
marker = list(size = 7, color = "black")) %>%
# reference line
layout(shapes=list(type='line',
x0= input$reference,
x1=input$reference,
y0=0,
y1=length(sfData()$ID),
line = list(dash = "dash", color = "red")),
# style
title = "Swimmers' plot",
xaxis = list(showgrid = FALSE,
title = "Weeks since Enrollment",
range = c(0, max(sfData()$week)+5),
titlefont = list(size = 12)),
yaxis = list(showgrid = FALSE,
title = "Subject ID",
titlefont = list(size = 12))) %>%
# legend position
layout(legend = list(x = 0.7, y = 0.1)) %>%
# Notes position
layout(
margin = list(l = 50, r = 50, t = 75, b = 150),
annotations = list(text = input$notes3,
font = list(size = 12),
showarrow = FALSE,
xref = 'paper', x = 0,
yref = 'paper', y = -0.25,
yanchor = "top"))}
})
} |
d2e6bb392da3a02eb302c2097fa4d7bb7cdac50a | c1045ee07cd88d40fa35df9bad13773b72e69994 | /R/zz-rascunho.R | a21aad713a6ed605a2b4e8a53f3cac18ff8df980 | [] | no_license | curso-r/stn-caixa | fba2fd3035e728dcc429bb4c2cf17848a2c832ec | 58af2946553aef69365c39e1a56327523daf688f | refs/heads/master | 2022-09-17T18:24:45.824523 | 2020-05-29T07:16:21 | 2020-05-29T07:16:21 | 223,437,254 | 1 | 3 | null | 2020-03-11T13:58:00 | 2019-11-22T15:56:27 | HTML | UTF-8 | R | false | false | 1,729 | r | zz-rascunho.R | library(tidyverse)
library(data.table)
obrigacoes <- read_rds("data/obrigacoes.rds")
# exemplos de documentos
obrigacoes %>% count(ID_DOCUMENTO, sort = TRUE)
obrigacoes %>% filter(ID_DOCUMENTO == "200109000012019OB802321") %>% select(-starts_with("ID_"), - starts_with("CO_")) %>% View(title = "obrigacoes")
obrigacoes %>% filter(ID_DOCUMENTO == "194035192082019NS002882") %>% select(-starts_with("ID_"), - starts_with("CO_")) %>% View
obrigacoes %>% filter(ID_DOCUMENTO == "194035192082019NS000415") %>% select(-starts_with("ID_"), - starts_with("CO_")) %>% View
obrigacoes %>% filter(ID_DOCUMENTO == "200325000012019NS000379") %>% select(-starts_with("ID_"), - starts_with("CO_")) %>% View
obrigacoes %>% filter(ID_DOCUMENTO == "200006000012018OB800266") %>% select(-starts_with("ID_"), - starts_with("CO_")) %>% View
obrigacoes %>% filter(ID_DOCUMENTO == "194048192082018NS000094") %>% select(-starts_with("ID_"), - starts_with("CO_")) %>% View
# tem ID_DOCUMENTO_CCOR estranho (com valores -7)
obrigacoes %>%
mutate(tem_ne = str_detect(ID_DOCUMENTO_CCOR, "NE")) %>%
filter(!tem_ne)
obrigacoes %>% count(ID_DOCUMENTO_CCOR, sort = TRUE)
# valor acumulado dos saldos
# obrigacoes a pagar negativa estรก associada com pagamentos feitos (avaliar pagamentos vs obrigacoes)
# nao consegue fazer o pagamento por conta de cotas para as fontes.
# cenรกrio ruim: ug nรฃo consegue pagar. (problema de alocaรงรฃo)
# a alocaรงรฃo seria feita entre UGs para a mesma fonte. (mesmo รณrgรฃo รฉ vantagem na burocracia)
pagamentos <- read_rds(path = "data/pagamentos.rds")
pagamentos %>% filter(ID_DOCUMENTO == "200109000012019OB802321") %>% select(-starts_with("ID_"), - starts_with("CO_")) %>% View(title = "pagamentos")
|
e3c5b6311196ee910dfc99d88f7723c728877bb2 | 6fd0155b54c02014e7cb6ab955ccf5f3a125eb4d | /data-raw/gse68456-reference.r | 9586296726c80150502a50f8c5411b8fd6d1a5a0 | [
"Noweb",
"Artistic-2.0"
] | permissive | perishky/meffil | 736f2576e50063074d5260a3115686664fbb1d98 | 7788be938877ff9367d33bb02169c09142e1d49c | refs/heads/master | 2023-06-08T17:18:38.127236 | 2023-05-26T08:30:06 | 2023-05-26T08:30:06 | 30,215,946 | 41 | 25 | Artistic-2.0 | 2023-03-28T14:43:58 | 2015-02-03T00:11:11 | HTML | UTF-8 | R | false | false | 2,238 | r | gse68456-reference.r |
#' Defines cell type reference "cord blood gse68456"
#' from the GEO repository GSE68456
#' for estimating cord blood cell counts.
retrieve.gse68456 <- function(dir) {
wd <- getwd()
on.exit(setwd(wd))
setwd(dir)
cat("Downloading data ...\n")
filename <- "gse68456.tar"
download.file("http://www.ncbi.nlm.nih.gov/geo/download/?acc=GSE68456&format=file", filename, method="wget")
cat("Unzipping data ...\n")
system(paste("tar xvf", filename))
filenames <- list.files(path=".", pattern="Red.idat.gz$")
basenames <- sub("_Red.idat.gz$", "", filenames)
samples <- data.frame(Basename=basenames,
gsm=sub("([^_]+)_.*", "\\1", basenames),
participant=sub(".*_([^_]_)_.*", "\\1", basenames),
cell.type=sub(".*_.*_(.+)$", "\\1", basenames),
stringsAsFactors=F)
samples$cell.type[which(samples$cell.type == "B")] <- "Bcell"
samples$cell.type[which(samples$cell.type == "G")] <- "Gran"
samples$cell.type[which(samples$cell.type == "Mo")] <- "Mono"
samples$Sex <- NA
samples$Sample_Name <- samples$gsm
samples
}
create.gse68456.reference <- function() {
number.pcs <- 5
verbose <- T
chip <- "450k"
featureset <- "common"
dir.create(temp.dir <- tempfile(tmpdir="."))
on.exit(unlink(temp.dir, recursive=TRUE))
samplesheet <- retrieve.gse68456(temp.dir)
samplesheet$Basename <- file.path(temp.dir, samplesheet$Basename)
## remove standard facs samples
id <- as.integer(sub("^GSM", "", samplesheet$Sample_Name))
samplesheet <- samplesheet[which(id > 1672168),]
qc.objects <- meffil.qc(samplesheet, chip=chip, featureset=featureset, verbose=verbose)
norm.objects <- meffil.normalize.quantiles(qc.objects, number.pcs=number.pcs, verbose=verbose)
ds <- meffil.normalize.samples(norm.objects, just.beta=F, verbose=T)
meffil.add.cell.type.reference(
"cord blood gse68456", ds$M, ds$U,
cell.types=samplesheet$cell.type,
chip=chip,
featureset=featureset,
description="Cord blood reference of Goede et al. Clin Epigenetics 2015",
verbose=verbose)
}
|
1bf7b89f848ca0055e93f0e2034c85879088c125 | f871ea0fdfc0ba93e65037f28090fb0a60513bec | /FACS_umaps/fig1b_c_7.16.R | 52cf710edde196c5fd05bf352ecefe3016346d7c | [] | no_license | jcooperdevlin/RewildedMice | 8b838ff2831ea5f3e8432859dbb8c1631ab142c5 | fd95ba111a3c2127939c85e336091de78fa724c1 | refs/heads/master | 2023-03-07T08:45:19.141349 | 2020-04-06T14:16:53 | 2020-04-06T14:16:53 | 340,936,220 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,167 | r | fig1b_c_7.16.R | ### Plot figure 1b and c indiv umaps and some boxplots
### 6.25
setwd("/Volumes/lokep01lab/lokep01labspace/Rewilding_Data/int/FACS_umaps")
library(ggplot2)
library(reshape)
library(ggsci)
library(gridExtra)
colors_clusters = c(pal_d3("category10")(10), pal_d3("category20b")(20), pal_igv("default")(51))
ramper_basic = colorRampPalette(c("grey99","purple4"))(2)
ramper_more = colorRampPalette(c("white","darkorange", "red2", "purple4"))(100)
plotter2D <- function(input_df, type, x, y, num_cols, xlims, ylims, splitter, ramper=NA) {
plot_plist <- list()
counter=1
for(i in num_cols){
color_item <- input_df[,i]
color_item <- log2(color_item+1)
Log2 <- color_item
lab_df <- subset(input_df, Environment == "lab")
new_lab <- paste0("Lab ", round(length(which(lab_df[,i]>0))/nrow(lab_df)*100,3), "%")
wild_df <- subset(input_df, Environment == "wild")
new_wild <- paste0("Wild ", round(length(which(wild_df[,i]>0))/nrow(wild_df)*100,3), "%")
input_df$Environment2<-input_df$Environment
input_df$Environment2 <- gsub("lab", new_lab, input_df$Environment2)
input_df$Environment2 <- gsub("wild", new_wild, input_df$Environment2)
if(is.na(ramper)){
ramper = colorRampPalette(c("grey90", "purple4"))(2)
} else {ramper=ramper}
g=ggplot(input_df, aes(input_df[,x], input_df[,y], color = Log2))+
geom_point(size=0.001, aes(alpha=Log2))+
ylim(ylims)+xlim(xlims)+
#ylab(paste0(type, "_2")) + xlab(paste0(type, "_1"))+
scale_colour_gradientn(colors = ramper)+
#scale_colour_gradient2(low="gray85", mid="red2", high="purple4")+
ggtitle(paste0(colnames(input_df[i]))) +
theme_void() +
theme(legend.position='none',
panel.border = element_rect(colour = "black", fill=NA, size=1),
plot.title = element_text(hjust = 0.5, size=17))
if(splitter==T){
plot_plist[[counter]]<-ggplotGrob(
g+facet_wrap(~Environment2, nrow=1) +
theme(strip.text = element_text(size=15))
)
} else {
plot_plist[[counter]]<-ggplotGrob(g)
}
counter=counter+1
}
return(plot_plist)
}
#
#
#
#
#
#
#
#
#Blood
# Row 1. TSNE vs UMAP & CD3, CD19, CD4, CD8 Combined
### Blood all
umap_blood_df <- read.table("inputs/umap_combo_Blood.txt", F, '\t')
meta <- read.table("mice_metadata.11.19_mouse_id.txt", T, '\t')
orig_df <- read.table("inputs/Blood_df.txt", F, '\t')
ids <- orig_df$V1
names <- colnames(read.table("name_change.csv", T, ","))
colnames(orig_df) <- c("id", names)
### add metadata
orig_df$id <- factor(orig_df$id, levels = unique(orig_df$id))
orig_df <- orig_df[order(orig_df$id),]
rownames(meta) <- meta$mouse_id
meta <- meta[levels(orig_df$id),]
uniq_ids <- unique(orig_df$id)
orig_df2 <- data.frame(orig_df[1,], Genotype = NA, Environment = NA,
Wedge_cage = NA, Gender = NA, Pregnant = NA, Diarrhea = NA, Flow.date=NA)
for (j in 1:length(uniq_ids)){
curr <- subset(orig_df, id == uniq_ids[j])
meta_curr <- subset(meta, mouse_id == as.character(uniq_ids[j]))
curr$Genotype <- rep(meta_curr$Genotype, each = nrow(curr))
curr$Environment <- rep(meta_curr$Environment, each =nrow(curr))
curr$Wedge_cage <- rep(meta_curr$Wedge_cage, each =nrow(curr))
curr$Gender <- rep(meta_curr$Gender, each =nrow(curr))
curr$Pregnant <- rep(meta_curr$Pregnant, each =nrow(curr))
curr$Diarrhea <- rep(meta_curr$Diarrhea, each =nrow(curr))
curr$Flow.date <- rep(meta_curr$Flow.date, each =nrow(curr))
orig_df2 <- rbind(orig_df2, curr)
}
orig_df <- orig_df2[-1,]
colnames(orig_df)
# blood all
orig_df$umap_1 <- umap_blood_df$V1
orig_df$umap_2 <- umap_blood_df$V2
umap_xlims <- c(floor(min(orig_df$umap_1)), ceiling(max(orig_df$umap_1)))
umap_ylims <- c(floor(min(orig_df$umap_2)), ceiling(max(orig_df$umap_2)))
thresh <- read.table("Distributions/thresholds/Blood_major_thresholds.txt", T, "\t")
orig_df$CD3[orig_df$CD3<thresh$CD3[1]] <- 0
orig_df$CD19[orig_df$CD19<thresh$CD19[1]] <- 0
orig_df$CD4[orig_df$CD4<thresh$CD4[1]] <- 0
orig_df$CD8[orig_df$CD8<thresh$CD8[1]] <- 0
int_cols <- c(which(colnames(orig_df)=="CD19"))
cd19_blood <- plotter2D(orig_df, "umap", "umap_1", "umap_2",
int_cols, umap_xlims, umap_ylims, splitter=F)
cd19 <- arrangeGrob(grobs=cd19_blood, nrow=1)
int_cols <- c(which(colnames(orig_df)=="CD4"))
cd4_blood <- plotter2D(orig_df, "umap", "umap_1", "umap_2",
int_cols, umap_xlims, umap_ylims, splitter=F)
cd4 <- arrangeGrob(grobs=cd4_blood, nrow=1)
#
#
#
#
#
#
#
#
### Blood CD19-CD44
### Blood CD4-CD62L
reader19 <- paste0("inputs/CD19_umap_combo_Blood.txt")
umap_blood_df <- read.table(reader19, F, '\t')
meta <- read.table("mice_metadata.11.19_mouse_id.txt", T, '\t')
reader19 <- paste0("inputs/CD19_Blood_df.txt")
orig_df <- read.table(reader19, F, '\t')
ids <- orig_df$V1
names <- colnames(read.table("name_change.csv", T, ","))
colnames(orig_df) <- c("id", names)
### add metadata
orig_df$id <- factor(orig_df$id, levels = unique(orig_df$id))
orig_df <- orig_df[order(orig_df$id),]
rownames(meta) <- meta$mouse_id
meta <- meta[levels(orig_df$id),]
uniq_ids <- unique(orig_df$id)
orig_df2 <- data.frame(orig_df[1,], Genotype = NA, Environment = NA,
Wedge_cage = NA, Gender = NA, Pregnant = NA, Diarrhea = NA, Flow.date=NA)
for (j in 1:length(uniq_ids)){
curr <- subset(orig_df, id == uniq_ids[j])
meta_curr <- subset(meta, mouse_id == as.character(uniq_ids[j]))
curr$Genotype <- rep(meta_curr$Genotype, each = nrow(curr))
curr$Environment <- rep(meta_curr$Environment, each =nrow(curr))
curr$Wedge_cage <- rep(meta_curr$Wedge_cage, each =nrow(curr))
curr$Gender <- rep(meta_curr$Gender, each =nrow(curr))
curr$Pregnant <- rep(meta_curr$Pregnant, each =nrow(curr))
curr$Diarrhea <- rep(meta_curr$Diarrhea, each =nrow(curr))
curr$Flow.date <- rep(meta_curr$Flow.date, each =nrow(curr))
orig_df2 <- rbind(orig_df2, curr)
}
orig_df <- orig_df2[-1,]
colnames(orig_df)
# blood all
orig_df$umap_1 <- umap_blood_df$V1
orig_df$umap_2 <- umap_blood_df$V2
umap_xlims <- c(floor(min(orig_df$umap_1)), ceiling(max(orig_df$umap_1)))
umap_ylims <- c(floor(min(orig_df$umap_2)), ceiling(max(orig_df$umap_2)))
reader <- paste0("Distributions/thresholds/CD19_Blood_minor_thresholds.txt")
thresh <- read.table(reader, T, "\t")
orig_df$CD43_1B11[orig_df$CD43_1B11<thresh$CD43_1B11[1]] <- 0
orig_df$PD1[orig_df$PD1<thresh$PD1[1]] <- 0
orig_df$CD25[orig_df$CD25<thresh$CD25[1]] <- 0
orig_df$CD44[orig_df$CD44<thresh$CD44[1]] <- 0
orig_df$CD127[orig_df$CD127<thresh$CD127[1]] <- 0
orig_df$CXCR3[orig_df$CXCR3<thresh$CXCR3[1]] <- 0
orig_df$KLRG1[orig_df$KLRG1<thresh$KLRG1[1]] <- 0
orig_df$CD27[orig_df$CD27<thresh$CD27[1]] <- 0
orig_df$CD69[orig_df$CD69<thresh$CD69[1]] <- 0
orig_df$CD62L[orig_df$CD62L<thresh$CD62L[1]] <- 0
int_cols <- c(which(colnames(orig_df)=="CD44"))
cd19_cd44 <- plotter2D(orig_df, "umap", "umap_1", "umap_2",
int_cols, umap_xlims, umap_ylims, splitter=T)
input_df=orig_df
new_df = data.frame(Environment=NA, perc_cd44=NA)
for(k in 1:length(unique(input_df$id))){
curr <- subset(input_df, id == unique(input_df$id)[k])
up = length(which(curr[,int_cols]>0))/nrow(curr)*100
adder <- data.frame(Environment=curr$Environment[1], perc_cd44=up)
new_df <- rbind(new_df, adder)
}
new_df <- new_df[-1,]
cd19_cd44_box = ggplot(new_df, aes(Environment, perc_cd44, color=Environment)) +
geom_boxplot(alpha=0.2, outlier.shape = NA) + geom_jitter(width=0.2) +
scale_color_manual(values=c("mediumorchid3", "red3"))+
ylab("% of CD44+ cells per mouse") + xlab("")+
theme_bw() +
theme(axis.title = element_text(size=15),
axis.text = element_text(size=12, color='black'),
#legend.title = element_text(size=15),
legend.position = 'none',
legend.title = element_blank(),
legend.text = element_text(size=12))
##sig test
kruskal.test(perc_cd44 ~ factor(Environment), data=new_df)#$p.value
#
#
#
#
reader4 <- paste0("inputs/CD4_umap_combo_Blood.txt")
umap_blood_df <- read.table(reader4, F, '\t')
meta <- read.table("mice_metadata.11.19_mouse_id.txt", T, '\t')
reader4 <- paste0("inputs/CD4_Blood_df.txt")
orig_df <- read.table(reader4, F, '\t')
ids <- orig_df$V1
names <- colnames(read.table("name_change.csv", T, ","))
colnames(orig_df) <- c("id", names)
### add metadata
orig_df$id <- factor(orig_df$id, levels = unique(orig_df$id))
orig_df <- orig_df[order(orig_df$id),]
rownames(meta) <- meta$mouse_id
meta <- meta[levels(orig_df$id),]
uniq_ids <- unique(orig_df$id)
orig_df2 <- data.frame(orig_df[1,], Genotype = NA, Environment = NA,
Wedge_cage = NA, Gender = NA, Pregnant = NA, Diarrhea = NA, Flow.date=NA)
for (j in 1:length(uniq_ids)){
curr <- subset(orig_df, id == uniq_ids[j])
meta_curr <- subset(meta, mouse_id == as.character(uniq_ids[j]))
curr$Genotype <- rep(meta_curr$Genotype, each = nrow(curr))
curr$Environment <- rep(meta_curr$Environment, each =nrow(curr))
curr$Wedge_cage <- rep(meta_curr$Wedge_cage, each =nrow(curr))
curr$Gender <- rep(meta_curr$Gender, each =nrow(curr))
curr$Pregnant <- rep(meta_curr$Pregnant, each =nrow(curr))
curr$Diarrhea <- rep(meta_curr$Diarrhea, each =nrow(curr))
curr$Flow.date <- rep(meta_curr$Flow.date, each =nrow(curr))
orig_df2 <- rbind(orig_df2, curr)
}
orig_df <- orig_df2[-1,]
colnames(orig_df)
# blood all
orig_df$umap_1 <- umap_blood_df$V1
orig_df$umap_2 <- umap_blood_df$V2
umap_xlims <- c(floor(min(orig_df$umap_1)), ceiling(max(orig_df$umap_1)))
umap_ylims <- c(floor(min(orig_df$umap_2)), ceiling(max(orig_df$umap_2)))
reader <- paste0("Distributions/thresholds/CD4_Blood_minor_thresholds.txt")
thresh <- read.table(reader, T, "\t")
orig_df$CD43_1B11[orig_df$CD43_1B11<thresh$CD43_1B11[1]] <- 0
orig_df$PD1[orig_df$PD1<thresh$PD1[1]] <- 0
orig_df$CD25[orig_df$CD25<thresh$CD25[1]] <- 0
orig_df$CD44[orig_df$CD44<thresh$CD44[1]] <- 0
orig_df$CD127[orig_df$CD127<thresh$CD127[1]] <- 0
orig_df$CXCR3[orig_df$CXCR3<thresh$CXCR3[1]] <- 0
orig_df$KLRG1[orig_df$KLRG1<thresh$KLRG1[1]] <- 0
orig_df$CD27[orig_df$CD27<thresh$CD27[1]] <- 0
orig_df$CD69[orig_df$CD69<thresh$CD69[1]] <- 0
orig_df$CD62L[orig_df$CD62L<thresh$CD62L[1]] <- 0
int_cols <- c(which(colnames(orig_df)=="CD62L"))
cd4_cd62L <- plotter2D(orig_df, "umap", "umap_1", "umap_2",
int_cols, umap_xlims, umap_ylims, splitter=T)
input_df=orig_df
new_df = data.frame(Environment=NA, perc_cd44=NA)
for(k in 1:length(unique(input_df$id))){
curr <- subset(input_df, id == unique(input_df$id)[k])
up = length(which(curr[,int_cols]>0))/nrow(curr)*100
adder <- data.frame(Environment=curr$Environment[1], perc_cd44=up)
new_df <- rbind(new_df, adder)
}
new_df <- new_df[-1,]
cd4_cd62L_box = ggplot(new_df, aes(Environment, perc_cd44, color=Environment)) +
geom_boxplot(alpha=0.2, outlier.shape = NA) + geom_jitter(width=0.2) +
scale_color_manual(values=c("mediumorchid3", "red3"))+
ylab("% of CD62L+ cells per mouse") + xlab("")+
theme_bw() +
theme(axis.title = element_text(size=15),
axis.text = element_text(size=12, color='black'),
#legend.title = element_text(size=15),
legend.position = 'none',
legend.title = element_blank(),
legend.text = element_text(size=12))
###
kruskal.test(perc_cd44 ~ factor(Environment), data=new_df)#$p.value
#
#
g1=ggplot()+theme_void()
## boxplots and smaller umaps
p1 <- arrangeGrob(cd19, cd19_cd44_box, nrow=2)
p2 <- arrangeGrob(cd4, cd4_cd62L_box, nrow=2)
p1_void <- arrangeGrob(cd19, g1, nrow=1)
p2_void <- arrangeGrob(cd4, g1, nrow=1)
##
#png("plots/Fig1b_c.png",
# height = 8, width = 14, units = 'in', res=300)
#grid.arrange(p1, g1, cd19_cd44[[1]], g1, p2, g1, cd4_cd62L[[1]],
# nrow = 1, widths=c(1,0.2,1,0.2,1,0.2,1))
#dev.off()
png("plots/Fig1b_c_void.png",
height = 8, width = 14, units = 'in', res=300)
grid.arrange(
arrangeGrob(p1_void, cd19_cd44[[1]], nrow=2),
arrangeGrob(p2_void, cd4_cd62L[[1]], nrow=2),
nrow = 1)
dev.off()
#TOO BIG
#pdf("plots/Fig1b_c.pdf",
# height = 8, width = 14)
#grid.arrange(p1, g1, cd19_cd44[[1]], g1, p2, g1, cd4_cd62L[[1]],
# nrow = 1, widths=c(1,0.2,1,0.2,1,0.2,1))
#dev.off()
# png("plots/Fig1b_c_1.png",
# height = 5, width = 5, units = 'in', res=500)
# grid.arrange(cd19)
# dev.off()
#
#
pdf("plots/Fig1b_c_2.pdf",
height = 5, width = 5)
grid.arrange(cd19_cd44_box)
dev.off()
#
# png("plots/Fig1b_c_3.png",
# height = 10, width = 5, units = 'in', res=500)
# grid.arrange(cd19_cd44[[1]])
# dev.off()
#
#
# png("plots/Fig1b_c_4.png",
# height = 5, width = 5, units = 'in', res=500)
# grid.arrange(cd4)
# dev.off()
#
# png("plots/Fig1b_c_5.png",
# height = 5, width = 5, units = 'in', res=500)
# grid.arrange(cd4_cd62L_box)
# dev.off()
#
pdf("plots/Fig1b_c_5.pdf",
height = 5, width = 5)
grid.arrange(cd4_cd62L_box)
dev.off()
#
# png("plots/Fig1b_c_6.png",
# height = 10, width = 5, units = 'in', res=500)
# grid.arrange(cd4_cd62L[[1]])
# dev.off()
#
#
#
#
#
#
#
#
#
#
#
#
#
#
##
|
d51a39929ddf31b6e169e6556ac3d631c6202f7a | 89f09607d51b2552f05465d7325e235fe376ba19 | /f3_DStat_qpWave/sharedDriftOutgroupF3Map.R | 35edf7073c83c0d7f3e40ae4c185ef81477e17ca | [] | no_license | weigelworld/north_american_arabidopsi | d0e68bcd749ffa4634c154e884d49e05d807b777 | 00cb7750fe04e1246d2bcb3bd058cca23b3d8f03 | refs/heads/master | 2023-05-30T12:37:11.772765 | 2021-06-10T05:00:36 | 2021-06-10T05:00:36 | 375,575,979 | 6 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,518 | r | sharedDriftOutgroupF3Map.R | #usethis::edit_r_environ() #put admixtools path in there
library(admixr)
###################################################
library(maptools)
library(ggplot2)
library(ggthemes)
library(raster)
library(akima)
library(wesanderson)
###################################################3
#import data
snps <- eigenstrat("/mnt/bigdisk/work/northAmericanArabidopsis/evolHistory/admixtools/subClusters/merged.bi.lmiss90.ed.nativeClustersAmericanGroups")
#set N. American groups
pops1<- c("newGroup12_2","newGroup15_2","newGroup16_1",
"newGroup16_2","newGroup17_2","newGroup18_1",
"newGroup20_1","newGroup22_1","newGroup26_1",
"newGroup34_1","newGroup35","newGroup36","newGroup37",
"newGroup38","newGroup39","newGroup9_2","newGroupCol0",
"newGroupMAAA","newGroupNYBG","newGroupOHOS", "newGroup3")
#set AEA sub-clusters
pops3 <- c("fs11_1","fs11_2",
"fs20_1","fs20_2","fs20_3","fs20_4",
"fs10_1","fs10_2",
"fs4_1","fs4_2","fs4_3","fs4_4","fs4_5","fs4_6","fs4_7","fs4_8",
"fs3_1","fs3_2","fs3_3","fs3_4","fs3_5","fs3_6",
"fs7_1","fs7_2","fs7_3",
"fs8_1","fs8_2",
"fs18_1","fs18_10","fs18_11","fs18_12","fs18_2","fs18_3","fs18_4","fs18_5","fs18_6","fs18_7","fs18_9",
"fs17_10","fs17_11","fs17_12","fs17_13","fs17_14","fs17_9","fs17_1","fs17_2","fs17_3","fs17_4","fs17_5","fs17_6","fs17_7","fs17_8",
"fs18_14","fs18_15",
"fs15_18","fs15_2","fs15_15","fs15_17",
"fs16_1","fs16_10","fs16_11","fs16_12","fs16_2","fs16_3","fs16_4","fs16_5","fs16_8","fs16_9",
"fs18_13","fs18_8",
"fs3_17","fs13_4","fs2_1","fs9_1","fs9_2","fs9_3","fs9_4",
"fs5_1","fs5_10","fs5_11","fs5_12","fs5_2","fs5_3","fs5_4","fs5_5","fs5_6","fs5_7","fs5_8","fs5_9",
"fs3_9","fs15_1","fs15_10","fs15_11","fs15_12","fs15_13","fs15_14","fs15_16","fs15_2","fs15_3","fs15_4","fs15_5",
"fs15_6","fs15_7","fs15_8","fs15_9","fs16_6","fs16_7","fs10_3","fs10_4","fs10_5","fs10_6",
"fs12_1","fs12_2",
"fs13_1","fs13_2","fs13_3",
"fs6_1","fs6_10","fs6_11","fs6_12","fs6_13","fs6_2","fs6_3","fs6_4","fs6_5","fs6_6","fs6_7","fs6_8","fs6_9",
"fs15_19","fs16_13","fs16_14",
"fs19_1","fs19_2","fs19_3","fs19_4","fs19_5","fs19_6",
"fs21_1","fs21_2","fs21_3","fs21_4","fs21_5","fs3_10","fs3_11","fs3_12","fs3_13","fs3_14","fs3_15",
"fs3_16","fs4_3","fs14_1","fs14_2","fs14_3","fs1_1","fs1_2","fs1_3","fs1_4","fs1_5")
result_NAmericans_Regions_Fs12 <- f3(A=pops1, B=pops3, C="fs12_3", data = snps)
#Write output to a file
write.table(file="/mnt/bigdisk/work/northAmericanArabidopsis/evolHistory/admixtools/subClusters/result_NAmericans_SubClusters_Fs12_3.f3.txt", result_NAmericans_Regions_Fs12,
row.names =F, col.names = T, sep="\t" )
###################### Plotting ###############################3
f3Results <- read.table("/mnt/bigdisk/work/northAmericanArabidopsis/evolHistory/admixtools/subClusters/tmp1", header = T)
AmericanGroupList =list("newGroup12_2","newGroup15_2","newGroup16_1","newGroup16_2","newGroup17_2",
"newGroup18_1","newGroup20_1","newGroup22_1","newGroup26_1","newGroup3",
"newGroup34_1","newGroup35","newGroup36",
"newGroup37","newGroup38","newGroup39","newGroup9_2","newGroupCol0","newGroupMAAA",
"newGroupNYBG","newGroupOHOS")
### IMPORTANT: newGroups were named after regions for final analysis, the dictionary mapping their new names is:
# newAmericanGroups.Renamed.populationsDictionary.txt, it is kept in the same directory.
#### Loop through the groups #########
for (i in AmericanGroupList){
aGroup <- i
f3NewGroup <- f3Results[f3Results$A==aGroup,]
data("wrld_simpl")
mymap <- fortify(wrld_simpl)
mydf2 <- with(f3NewGroup, interp(x=f3NewGroup$Long, y=f3NewGroup$Lat, z=f3NewGroup$f3,
xo=seq(min(f3NewGroup$Long), max(f3NewGroup$Long), length=400),
#yo=seq(min(f3NewGroup$Lat), max(f3NewGroup$Lat), length=100),
duplicate = "mean"))
pal <- wes_palette("Zissou1",100, type = "continuous")
gdat <-interp2xyz(mydf2, data.frame=T)
p <- ggplot(data=gdat, aes(x=x, y=y, z=z))+theme_bw()+
geom_tile(aes(fill=z),alpha=0.8)+
stat_contour(aes(fill= z), alpha=0.8, geom = "polygon", binwidth = 0.8)+
#geom_contour(color="gray90")+
geom_path(data=mymap, aes(x=long, y=lat, group=group), inherit.aes = F)+
scale_x_continuous(limits = c(-15,62), expand = c(0,0))+
scale_y_continuous(limits = c(35,62), expand = c(0,0))+
scale_fill_gradientn(colors = c("white", "lightblue", "royalblue", "khaki2","navajowhite", "red"),breaks=seq(0,0.5,0.1))+
#scale_fill_gradientn(colors = pal)+
coord_equal()+
labs(title = aGroup)+
xlab("Longitude")+
ylab("Latitude")+
#theme(legend.position = "top")+
#theme(legend.position = "bottom")+
theme(legend.justification=c(1,1), legend.position=c(0.99,0.99),legend.title=element_blank(), legend.direction = "horizontal")+
theme(legend.background = element_rect(fill="seashell2",
size=0.5, linetype="solid",
colour ="gray"))
savefileName <- paste0("~/Desktop/nAmericanArabidopsis/admixtoolsFigures/",aGroup,".pdf")
pdf(savefileName)
print(p)
dev.off()
}
|
3b3c8848aa2bf9c4797d35419705ccae4843e2ef | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /ClusterStability/man/ClusterStability.Rd | b735865bc4f4a2b78ed079d68ba6d2a9d0300888 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,444 | rd | ClusterStability.Rd | \encoding{ISO-8859-2}
\name{ClusterStability}
\alias{ClusterStability}
\title{Calculates the approximate stability score (\emph{ST}) of individual objects in a clustering solution (the approximate version allowing one to avoid possible variable overflow errors).}
\description{This function will return the individual stability score \emph{ST} and the global score \emph{STglobal} using either the K-means or K-medoids algorithm and four different clustering indices: Calinski-Harabasz, Silhouette, Dunn or Davies-Bouldin.
}
\usage{ClusterStability(dat, k, replicate, type) }
\arguments{
\item{dat}{the input dataset: either a matrix or a dataframe.}
\item{k}{the number of classes for the K-means or K-medoids algorithm (default=3).}
\item{replicate}{the number of replicates to perform (default=1000).}
\item{type}{the algorithm used in the partitioning: either 'kmeans' or 'kmedoids' algorithm (default=kmeans).}
}
\value{Returns the individual (\emph{ST}) and global (\emph{ST_global}) stability scores for the four clustering indices: Calinski-Harabasz (\emph{ch}), Silhouette (\emph{sil}), Dunn (\emph{dunn}) or Davies-Bouldin (\emph{db}).}
\examples{
## Calculates the stability scores of individual objects of the Iris dataset
## using K-means, 100 replicates (random starts) and k=3
ClusterStability(dat=iris[1:4],k=3,replicate=100,type='kmeans');
}
\keyword{Stability score,ST,individual,global,approximative}
|
e5ac5b215b733c135ed246efcac27e70f34464e1 | eb9883eda588ca0d115fe22595b7c861d29b8cd5 | /Web Scraping/Web Scraping.R | c93566533ba34f829a435e078aeb9eeb9f23f776 | [] | no_license | Nikita-data-scientist/Projects | 0bff1005c17a2a47552cc30ac5e1999285031c0e | 5ab13da8e3f01facffd43c6524bce39e57a45c3f | refs/heads/master | 2021-06-21T11:28:45.543810 | 2017-07-30T15:56:05 | 2017-07-30T15:56:05 | 93,074,266 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,483 | r | Web Scraping.R | library(rvest)
url <- ("http://www.imdb.com/search/title?count=100&release_date=2016,2016&title_type=feature")
#ะงัะตะฝะธะต html-ัััะฐะฝะธัั
webpage <- read_html(url)
#ะัะฟะพะปัะทะพะฒะฐะฝะธะต CSS ัะตะปะตะบัะพัะฐ ะดะปั ัะฑะพัะฐ ัะฐะณะฝะพะฒ ัะธะปัะผะพะฒ
rank_data_html <- html_nodes(webpage, '.text-primary')
#ะะพะฝะฒะตััะฐัะธั rank_data ะฒ ัะตะบัั
rank_data <- html_text(rank_data_html)
head(rank_data)
rank_data <- as.numeric(rank_data)
title_data_html <- html_nodes(webpage, '.lister-item-header a')
title_data <- html_text(title_data_html)
head(title_data)
description_data_html <- html_nodes(webpage, '.ratings-bar+ .text-muted')
description_data <- html_text(description_data_html)
head(description_data, n = 10)
#ะฃะดะฐะปะตะฝะธะต'\n'
description_data<-gsub("\n", "", description_data)
head(description_data)
runtime_data_html <- html_nodes(webpage, '.text-muted .runtime')
runtime_data <- html_text(runtime_data_html)
#ะฃะดะฐะปะตะฝะธะต min ะธ ะบะพะฝะฒะตััะฐัะธั ะฒ imeric
runtime_data<-gsub(" min","",runtime_data)
runtime_data <- as.numeric(runtime_data)
head(runtime_data)
genre_data_html <- html_nodes(webpage, '.genre')
genre_data <- html_text(genre_data_html)
head(genre_data)
#ะฃะดะฐะปะตะฝะธะต \n
genre_data<-gsub("\n","",genre_data)
#ะะทััั ัะพะปัะบะพ ะฟะตัะฒัั ะทะฐะฟะธัั ะฒ ะบะฐะถะดะพะผ ัะธะปัะผะต
genre_data<-gsub(",.*","",genre_data)
#ะะพะฝะฒะตััะฐัะธั ะฒ ัะฐะบัะพั
genre_data<-as.factor(genre_data)
head(genre_data)
rating_data_html <- html_nodes(webpage, '.ratings-imdb-rating strong')
rating_data <- html_text(rating_data_html)
rating_data <- as.numeric(rating_data)
head(rating_data)
votes_data_html <- html_nodes(webpage, '.sort-num_votes-visible span:nth-child(2)')
votes_data <- html_text(votes_data_html)
head(votes_data)
#ะฃะดะฐะปะตะฝะธะต ะปะธัะฝะธั
ะทะฐะฟัััั
votes_data<-gsub(",","",votes_data)
votes_data <- as.numeric(votes_data)
directors_data_html <- html_nodes(webpage, '.text-muted+ p a:nth-child(1)')
directors_data <- html_text(directors_data_html)
directors_data <- as.factor(directions_data)
head(directors_data)
actors_data_html <- html_nodes(webpage, '.lister-item-content .ghost+ a')
actors_data <- html_text(actors_data_html)
actors_data <- as.factor(actors_data)
head(actors_data)
metascore_data_html <- html_nodes(webpage, '.metascore')
metascore_data <- html_text(metascore_data_html)
head(metascore_data)
metascore_data<-gsub(" ","",metascore_data)
#ะัะพะฒะตัะบะฐ ะดะปะธะฝะฝั metascore
length(metascore_data)
for (i in c(14,42,89)){
a<-metascore_data[1:(i-1)]
b<-metascore_data[i:length(metascore_data)]
metascore_data <- append(a, list("NA"))
metascore_data <- append(metascore_data, b)
}
metascore_data <- as.numeric(metascore_data)
length(metascore_data)
summary(metascore_data)
gross_data_html <- html_nodes(webpage,'.ghost~ .text-muted+ span')
gross_data <- html_text(gross_data_html)
head(gross_data)
#ะฃะดะฐะปะตะฝะธะต '$' ะธ 'M'
gross_data<-gsub("M","",gross_data)
gross_data<-substring(gross_data,2,6)
length(gross_data)
#ะะฐะฟะพะปะฝะตะฝะธะต ะฟัะพะฟััะตะฝะฝัั
ะทะฝะฐัะตะฝะธะน NA
for (i in c(14, 42, 43, 52, 58, 72, 79, 80, 89, 93, 96, 99, 100)){
a<-gross_data[1:(i-1)]
b<-gross_data[i:length(gross_data)]
gross_data<-append(a,list("NA"))
gross_data<-append(gross_data,b)
}
length(gross_data)
summary(gross_data)
str(gross_data)
gross_data <- gross_data[1:100]
#ะะพะฝะฒะตััะฐัะธั gross ะฒ numerical
gross_data<-as.numeric(gross_data)
#ะกะพะทะดะฐะฝะธะต data frame
movies_df <- data.frame(Rank = rank_data, Title = title_data,
Description = description_data,
Runtime = runtime_data, Genre = genre_data,
Rating = rating_data, Metascore = metascore_data,
Votes = votes_data, Gross_Earning_in_Mil = gross_data,
Director = directors_data, Actor = actors_data)
str(movies_df)
#ะัะฐัะบะฐั ะฒะธะทัะฐะปะธะทะฐัะธั
library(ggplot2)
#ะะปะธัะตะปัะฝะพััั ัะธะปัะผะพะฒ ะฟะพ ะถะฐะฝัะฐะผ
qplot(data = movies_df, Runtime, fill = Genre, bins = 30)
#ะ ะตะนัะธะฝะณ ัะธะปัะผะพะฒ ะฟะพ ะถะฐะฝัะฐะผ
ggplot(movies_df, aes(x = Runtime, y = Rating)) +
geom_point(aes(size = Votes, col = Genre))
#ะะฐััะพะฒัะต ัะฑะพัั ัะธะปัะผะพะฒ ะฟะพ ะถะฐะฝัะฐะผ
ggplot(movies_df, aes(x = Runtime, y = Gross_Earning_in_Mil)) +
geom_point(aes(size = Rating, col = Genre))
|
3d0b91fce6d3f33224288cebf62414a648a5e767 | 18e521773cd8dcb9e5983982b4c59cdc657f0ecd | /DataCamp R/datacamp notes.R | 39bf8ef3f0aa0cc5d8eca894768a327bcd54a572 | [] | no_license | dblosqrl/learning2017 | 2a3a87742415a1a855f7300f59800a5afa6049a8 | 20e5347e785dc71636acebb5d585f7d3b7b109db | refs/heads/master | 2021-05-14T14:18:13.615414 | 2018-01-02T03:41:20 | 2018-01-02T03:41:20 | 115,968,138 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,028 | r | datacamp notes.R | # Intro to data - R notes
install.packages('openintro')
install.packages('dplyr')
install.packages('ggplot2')
install.packages('tidyr')
library(dplyr)
library(openintro)
data(hsb2) # use this syntax to load into environment, don't need the assignment
str(hsb2)
data(email50)
str(email50)
?table
?ifelse
# Calculate median number of characters: med_num_char
med_num_char <- median(email50$num_char)
# Create num_char_cat variable in email50
email50 <- email50 %>%
mutate(num_char_cat = ifelse(num_char < med_num_char, 'below median', 'at or above median'))
# Count emails in each category
table(email50$num_char_cat)
# Load ggplot2
library(ggplot2)
# Scatterplot of exclaim_mess vs. num_char
ggplot(email50, aes(x = num_char, y = exclaim_mess, color = factor(spam))) +
geom_point()
library(tidyr)
?count
?spread
# Count number of male and female applicants admitted
ucb_counts <- ucb_admit %>% count(Admit, Gender)
# View result
ucb_counts
# Spread the output across columns
ucb_counts %>%
spread(Admit, n)
|
116282f0c961740e8776da2ff53fa3e2e4ce36e5 | a641e294f9525468294a2b702a5370d88b4e402d | /experiments/2017-03_def_fitting/rmse_err.R | 69e32ceabec6c4aebf30f1690ef5936d25d3adc7 | [] | no_license | tambu85/misinfo_spread | 252b5c010b59c3a2cac7795331364916feb73bb2 | 3911892e40075ab118a5eb848f38f0806b60d266 | refs/heads/master | 2023-02-23T14:16:41.101763 | 2023-02-15T18:30:41 | 2023-02-15T18:30:41 | 44,867,463 | 0 | 4 | null | null | null | null | UTF-8 | R | false | false | 6,173 | r | rmse_err.R | library(reshape2)
library(ggplot2)
library(plotly)
setwd("~/Desktop/fit/fitting_data")
files=list.files("~/Desktop/fit/fitting_data")
# Function that returns Root Mean Squared Error
rmse <- function(error)
{
sqrt(mean(error^2))
}
seg_b_rmse <- vector()
seg_f_rmse <- vector()
noseg_b_rmse <- vector()
noseg_f_rmse <- vector()
seg_b_nrmse1 <- vector()
seg_f_nrmse1 <- vector()
noseg_b_nrmse1 <- vector()
noseg_f_nrmse1 <- vector()
seg_b_nrmse2 <- vector()
seg_f_nrmse2 <- vector()
noseg_b_nrmse2 <- vector()
noseg_f_nrmse2 <- vector()
hoax <- vector()
i=1
for(j in seq(1,length(files),2)){
#if(j!=31 && j!=55 && j!=21 && j!=27){
#seg
seg <- read.table(files[j], header = TRUE, sep=",")
seg_for =seg$For_empirico
seg_against=seg$Against_empirico
seg_ba = seg$For_BA
seg_fa = seg$Against_FA
seg_error_b = abs(seg_for-seg_ba)
seg_error_f = abs(seg_against-seg_fa)
name=strsplit(files[j],"*_with_segregation.csv")
#name1=strsplit(name[[1]],"hoaxy_*")
hoax[i]= name[[1]][1]
print(hoax[i])
seg_b_rmse[i]=rmse(seg_error_b)
seg_f_rmse[i]=rmse(seg_error_f)
print(paste0("rmse FOR:",rmse(seg_error_b)))
print(paste0("den_1 FOR (max-min):", (max(seg_for)-min(seg_for))))
print(paste0("den_2 FOR (mean):", mean(seg_for)))
seg_b_nrmse1[i]=rmse(seg_error_b)/(max(seg_for)-min(seg_for))
seg_b_nrmse2[i]=rmse(seg_error_b)/(mean(seg_for))
if(mean(seg_against)!=0){
print(paste0("rmse AGAINST:",rmse(seg_error_f)))
print(paste0("den_1 AGAINST (max-min):", (max(seg_against)-min(seg_against))))
print(paste0("den_2 AGAINST (mean):", mean(seg_against)))
seg_f_nrmse1[i]=rmse(seg_error_f)/(max(seg_against)-min(seg_against))
seg_f_nrmse2[i]=rmse(seg_error_f)/(mean(seg_against))
}else{
print("media 0!")
print(paste0("rmse AGAINST:",rmse(seg_error_f)))
seg_f_nrmse1[i]=rmse(seg_error_f)
seg_f_nrmse2[i]=rmse(seg_error_f)
}
#noseg
noseg <- read.table(files[j+1], header = TRUE, sep=",")
noseg_for =noseg$For_empirico
noseg_against=noseg$Against_empirico
noseg_ba = noseg$For_BA
noseg_fa = noseg$Against_FA
noseg_error_b = abs(noseg_for-noseg_ba)
noseg_error_f = abs(noseg_against-noseg_fa)
noseg_b_rmse[i]=rmse(noseg_error_b)
noseg_f_rmse[i]=rmse(noseg_error_f)
noseg_b_nrmse1[i]=rmse(noseg_error_b)/(max(noseg_for)-min(noseg_for))
noseg_b_nrmse2[i]=rmse(noseg_error_b)/(mean(noseg_for))
if(mean(noseg_against)!=0){
print(paste0("rmse AGAINST:",rmse(noseg_error_f)))
print(paste0("den_1 AGAINST (max-min):",(max(noseg_against)-min(noseg_against))))
print(paste0("den_2 AGAINST (mean):",mean(noseg_against)))
noseg_f_nrmse1[i]=rmse(noseg_error_f)/(max(noseg_against)-min(noseg_against))
noseg_f_nrmse2[i]=rmse(noseg_error_f)/(mean(noseg_against))
}else{
noseg_f_nrmse1[i]=rmse(noseg_error_f)
noseg_f_nrmse2[i]=rmse(noseg_error_f)
}
i <-i+1
#}
}
#rmse
ave_seg_b_rmse <- mean(seg_b_rmse)
ave_seg_f_rmse <- mean(seg_f_rmse)
ave_noseg_b_rmse <- mean(noseg_b_rmse)
ave_noseg_f_rmse <- mean(noseg_f_rmse)
#nrmse1
ave_seg_b_nrmse1 <- mean(seg_b_nrmse1)
ave_seg_f_nrmse1 <- mean(seg_f_nrmse1)
ave_noseg_b_nrmse1 <-mean(noseg_b_nrmse1)
ave_noseg_f_nrmse1<- mean(noseg_f_nrmse1)
#nrmse2
ave_seg_b_nrmse2 <- mean(seg_b_nrmse2)
ave_seg_f_nrmse2 <- mean(seg_f_nrmse2)
ave_noseg_b_nrmse2 <- mean(noseg_b_nrmse2)
ave_noseg_f_nrmse2 <- mean(noseg_f_nrmse2)
print("====RMSE====")
print(paste0("noseg_AB:",ave_noseg_b_rmse))
print(paste0("noseg_AF:",ave_noseg_f_rmse))
print(paste0("seg_AB:",ave_seg_b_rmse))
print(paste0("seg_AF:",ave_seg_f_rmse))
print("====NRMSE1(range)====")
print(paste0("noseg_AB:",ave_noseg_b_nrmse1))
print(paste0("noseg_AF:",ave_noseg_f_nrmse1))
print(paste0("seg_AB:",ave_seg_b_nrmse1))
print(paste0("seg_AF:",ave_seg_f_nrmse1))
print("====NRMSE2(mean)====")
print(paste0("noseg_AB:",ave_noseg_b_nrmse2))
print(paste0("noseg_AF:",ave_noseg_f_nrmse2))
print(paste0("seg_AB:",ave_seg_b_nrmse2))
print(paste0("seg_AF:",ave_seg_f_nrmse2))
ave_df1 <- data.frame(ave_noseg_b_nrmse1, ave_noseg_f_nrmse1, ave_seg_b_nrmse1, ave_seg_f_nrmse1)
colnames(ave_df1) <-c("noseg_b","noseg_f","seg_b","seg_f")
m_ave1 <- melt(ave_df1)
ap1 <- ggplot(data=m_ave1, aes(x=variable, y=value, fill=variable))+
geom_bar(stat="identity",position=position_dodge())+
scale_fill_brewer(palette="Spectral")+
ylim(c(0,0.4))+
xlab("model compartment")+
#scale_x_discrete(labels=c("","","",""))+
theme(axis.text.x =element_text(size=12))+
ggtitle("NRMSE1 (range)")
ap1 <- ggplotly(ap1)
show(ap1)
ave_df2 <- data.frame(ave_noseg_b_nrmse2, ave_noseg_f_nrmse2, ave_seg_b_nrmse2, ave_seg_f_nrmse2)
colnames(ave_df2) <-c("noseg_b","noseg_f","seg_b","seg_f")
m_ave2 <- melt(ave_df2)
ap2 <- ggplot(data=m_ave2, aes(x=variable, y=value, fill=variable))+
geom_bar(stat="identity",position=position_dodge())+
scale_fill_brewer(palette="Spectral")+
xlab("model compartment")+
ylim(c(0,0.4))+
#scale_x_discrete(labels=c("","","",""))+
theme(axis.text.x =element_text(size=12))+
ggtitle("NRMSE2 (mean)")
ap2 <- ggplotly(ap2)
show(ap2)
plot_df1 <- data.frame(hoax,
noseg_b_nrmse1, noseg_f_nrmse1,
seg_b_nrmse1, seg_f_nrmse1)
colnames(plot_df1) <-c("hoax","noseg_b","noseg_f", "seg_b","seg_f")
m1 <- melt(plot_df1)
p1 <- ggplot(data=m1, aes(x=hoax, y=value, fill=variable))+
geom_bar(stat="identity", position=position_dodge())+
theme(axis.text.x =element_text(size=8, angle=90))+
scale_fill_brewer(palette="Spectral")+
scale_x_discrete(labels=c(1:31))+
ggtitle("NRMSE1 (range)")
p1 <- ggplotly(p1)
show(p1)
plot_df2 <- data.frame(hoax,
noseg_b_nrmse2, noseg_f_nrmse2,
seg_b_nrmse2, seg_f_nrmse2)
colnames(plot_df2) <-c("hoax","noseg_b","noseg_f", "seg_b","seg_f")
m2 <- melt(plot_df2)
p2 <- ggplot(data=m2, aes(x=hoax, y=value, fill=variable))+
geom_bar(stat="identity", position=position_dodge())+
theme(axis.text.x =element_text(size=8, angle=90))+
scale_fill_brewer(palette="Spectral")+
scale_x_discrete(labels=c(1:31))+
ggtitle("NRMSE2 (mean)")
p2 <- ggplotly(p2)
show(p2)
|
54dbe804713a3cb17704469c55985d8a64865da5 | a981c72c198a24e55f8c6b008033edb6c419011a | /r-programming/week-4/programming-assignment/rankhospital.R | 2e747964942a106931bc90ba4c83ac88df483dc1 | [] | no_license | antoinemertz/datasciencecoursera | db9bc3a1180ffc53fb1fbc5496d66479c832f410 | 5ba1e621b2fb99d07927300aa71a0341722a0d62 | refs/heads/master | 2021-01-22T21:27:59.092110 | 2018-07-03T17:32:15 | 2018-07-03T17:32:15 | 85,433,280 | 0 | 0 | null | 2018-07-03T17:32:16 | 2017-03-18T21:30:31 | HTML | UTF-8 | R | false | false | 1,407 | r | rankhospital.R | #setwd("Documents/Perso/Perso/Coursera/Data Science - John Hopkins/datasciencecoursera/ProgrammingAssignment3/")
rankhospital <- function(state, outcome, num = "best") {
library(dplyr)
# read csv file
df <- read.csv("data/outcome-of-care-measures.csv", colClasses = "character")
# check that state is valid
st <- levels(as.factor(df$State))
if (!(state %in% st)) {
stop("invalid state")
}
# check that outcome is valid
out <- c("heart attack", "heart failure", "pneumonia")
if (!(outcome %in% out)) {
stop("invalid outcome")
} else {
if (outcome == out[1]) {
nc <- 11
} else if (outcome == out[2]) {
nc <- 17
} else if (outcome == out[3]) {
nc <-23
}
}
df.state <- df[df$State == state,c(2,nc)]
names(df.state) <- c("Hospital.Name", "Rate")
df.state$Rate <- as.numeric(df.state$Rate)
n <- nrow(df.state)
if (is.numeric(num)) {
if (num > n) {
return(NA)
} else {
df.sort <- df.state %>%
arrange(Rate, Hospital.Name) %>%
mutate(Rank = row_number())
}
} else {
df.sort <- df.state %>%
arrange(Rate, Hospital.Name) %>%
mutate(Rank = row_number())
}
if (num == "best") {
num = 1
} else if (num == "worst") {
num = nrow(df.sort[!(is.na(df.sort$Rate)),])
} else if (!(is.numeric(num))) {
stop("invalid num")
}
return(df.sort[num,"Hospital.Name"])
}
|
e12c1f0933e2159bc880131372296c41c0c02ace | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Kronegger-Pfandler-Pichler/dungeon/dungeon_i15-m7-u4-v0.pddl_planlen=20/dungeon_i15-m7-u4-v0.pddl_planlen=20.R | 55322250e451f9eff64ff679d1550f149ecacd85 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 88 | r | dungeon_i15-m7-u4-v0.pddl_planlen=20.R | 927c0d7c4c021f5bf42b728097d48371 dungeon_i15-m7-u4-v0.pddl_planlen=20.qdimacs 6293 25663 |
d4473150eaf0e6cea7b1507634412b00aae929da | 681d96f700665b54e841219de5f872b853c22477 | /dataframes_plots.R | a3e8ff39b8555182d7c5a5a7135d662136765d1f | [] | no_license | hylsonk/TrabalhoAD | 28aac918e88018a1949a6be5392dc2e88cd5aabc | 47b338fccd806475ddd5cf5adf760fe4706d5005 | refs/heads/master | 2020-09-24T01:46:26.100125 | 2019-12-24T00:50:07 | 2019-12-24T00:50:07 | 225,632,878 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 674 | r | dataframes_plots.R | f1 <- function(n) {
eixo_x <- numeric()
eixo_y <- numeric()
for (i in 0:n) {
print(i)
eixo_x[i] <- i
eixo_y[i] <- (n-(i-1))
}
data.frame(eixo_x, eixo_y)
}
df=f1(5)
df$eixo_x
df$eixo_y
#plotar grรกficos (x, y, tipo de grafico, label do eixo x e y, titulo do plot, cor do plot)
plot(df$eixo_x, df$eixo_y, type='h', ylab = 'n', xlab = 's',lwd = 5, main="Teste de Plot", col="red")
plot(df$eixo_x, df$eixo_y, type='l', ylab = 'n', xlab = 's', lwd = 2,main="Teste de Plot")
f2 <- function(){
var1 = 1
var2 = 2
return(data.frame(var1, var2))
}
n=5
novo_df<-data.frame("Variavel1", "Variavel2")
for (i in 1:n) {
df=f2()
novo_df[i] <- df
}
novo_df
|
79ef6fa9ff0aa72560e0823142af5529e4167429 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Haplin/examples/output.Rd.R | 199eba4dfe62247d77527bbed8abfe8f38ea1b51 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 394 | r | output.Rd.R | library(Haplin)
### Name: output
### Title: Save files with summary, table, and plot from a haplin object.
### Aliases: output
### ** Examples
## Not run:
##D
##D # Run haplin and save results in separate files
##D # in the c:\work\haplinresults directory:
##D res <- haplin("data.dat", use.missing = T, maternal = T)
##D output(res, dirname = "c:/work/haplinresults")
## End(Not run)
|
57b06b64eaded6097976a970a18016b7d72337b9 | cee6e1465fa98f6dcb12f936130ab4e625fe8c84 | /cachematrix.R | b3183107565effa507dab158454f0581806a15d1 | [] | no_license | chujustin/ProgrammingAssignment2 | 174c826d05411b643b496d0e1e52abdb16ec8757 | 810a42a79c0f94a76a0055af1b71e3db91e0cfad | refs/heads/master | 2020-08-17T20:47:40.838600 | 2019-10-18T04:15:55 | 2019-10-18T04:15:55 | 215,710,479 | 0 | 0 | null | 2019-10-17T05:32:01 | 2019-10-17T05:32:00 | null | UTF-8 | R | false | false | 976 | r | cachematrix.R | ## These functions cache the inverse of a matrix, so that if it is cached, it
## does not need to be computed again, which is useful for costly/big
## computations.
## This function creates a list that sets and gets x and the inverse of x.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function checks if the matrix inputted already has the inverse computed,
## and if it doesn't, the inverse will be computed.
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
7dae62ea127bc9d8889a8a62a751f17f4151fc7b | 7225139edc69f95a358143b6da98dee0d8dd83b4 | /r_kkagi/ch04_ex01.R | 801156dca3881b91645e5b03ea1e8a28b0db9ef9 | [] | no_license | diligejy/r_train | 57512fb571fcef4bdc63e3fccdc8d304ff020024 | 925bb9fea85151a4e1be8ddc9ced3e48a3d789a8 | refs/heads/master | 2022-12-16T06:44:49.017734 | 2020-09-16T10:04:42 | 2020-09-16T10:04:42 | 258,130,049 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,820 | r | ch04_ex01.R |
# ๋ฐ์ดํฐ ๋ก๋
DF <- read.csv('C:/Users/jinyoung/Pictures/example/example_studentlist.csv')
# ๋ฐ์ดํฐ ํ์ธ
head(DF)
str(DF)
# ๊ธฐ๋ณธ plot
plot(DF$age)
# ์๊ด๊ด๊ณ ํ์
ํ๊ธฐ
plot(DF$height, DF$weight)
plot(DF$weight ~ DF$height) # ๋ ๊ฐ์ง ๋ชจ๋ ๊ฐ์ผ๋ ์ด ๋ฐฉ๋ฒ์ ์ ๊ท์์ ์ฐ๋ฉฐ ์ข
์๋ณ์ ~ ๋
๋ฆฝ๋ณ์ ์ ์ด์ฃผ๋ฉด ๋จ
# ๋ช
๋ชฉํ ๋ณ์์ ์์นํ ๋ณ์์ ๊ด๊ณ๊ณ
plot(DF$height, DF$sex)
plot(DF$sex, DF$weight)
# plot์์ ์ฒซ๋ฒ์งธ ์ธ์๊ฐ x์ถ, ๋ ๋ฒ์งธ ์ธ์๊ฐ y์ถ
# ํน์ ๋ณ์๋ง ๊ฐ์ฒด์ ์ฝ์
DF2 <- data.frame(DF$height, DF$weight)
plot(DF2)
# ๋ณ์ 3๊ฐ๋ก ์ด๋ฃจ์ด์ง ๋ฐ์ดํฐ ํ๋ ์ ์๊ฐํ
DF3 <- cbind(DF2, DF$age)
head(DF3)
plot(DF3)
plot(DF)
# LEVEL๋ณ ๊ทธ๋ํ ๋ณด๊ธฐ
plot(DF$weight ~ DF$height, pch = as.integer(DF$sex))
legend('topleft', c('๋จ', '์ฌ'), pch = DF$sex)
coplot(DF$weight ~ DF$height | DF$sex)
# ann = F๋ก ํจ์ผ๋ก์จ ๊ทธ๋ํฝ์ ์๋ฌด ๋ผ๋ฒจ๋ ์๋์ด
plot(DF$weight ~ DF$height, ann = FALSE)
title(main = 'A๋ํ Bํ๊ณผ์ ๋ชธ๋ฌด๊ฒ์ ํค์ ์๊ด๊ด๊ณ๊ณ')
title(xlab = '๋ชธ๋ฌด๊ฒ')
title(ylab = 'ํค')
# ๊ฒฉ์ ์ถ๊ฐ
grid()
# ๊ทธ๋ํ์ ์ ์ ๊ธ๊ธฐ
weightMean <- mean(DF$height)
abline(v = weightMean, color = 'red')
# ๋น๋์ ๋ํ๋ด๊ธฐ
FreqBlood <- table(DF$bloodtype)
FreqBlood
barplot(FreqBlood)
title(main='ํ์กํ๋ณ ๋น๋์')
title(xlab = 'ํ์กํ')
title(ylab = '๋น๋์')
#
Height <- tapply(DF$height, DF$bloodtype, mean)
Height
barplot(Height, ylim = c(0, 200))
plot(DF$bloodtype)
boxplot(DF$height)
boxplot(DF$height ~ DF$bloodtype)
# hist
hist(DF$height)
# ๋ง๋ ๊ฐ์ ๋ฐ๊พธ๊ณ ์ถ๋ค๋ฉด ์ธ์ ์ถ๊ฐ
hist(DF$height, breaks = 10)
hist(DF$height, breaks = 10, prob = T)
lines(density(DF$height))
# 7๊ฐ๊ฒฉ ๊ณ๊ธ์ผ๋ก ๋ง๋ค๊ธฐ
BreakPoint <- seq(min(DF$height), max(DF$height) + 7, by = 7)
hist(DF$height, breaks = BreakPoint)
DiffPoint <- c(min(DF$height), 165, 170, 180, 185, 190)
hist(DF$height, breaks = DiffPoint)
hist(0)
# ํ ํ๋ฉด์ ์ฌ๋ฌ ๊ฐ ๊ทธ๋ํ๊ทธ๋ฆฌ๊ธฐ
par(mfrow = c(2, 3))
plot(DF$weight, DF$height)
plot(DF$sex, DF$height)
barplot(table(DF$bloodtype))
barplot(DF$height)
barplot(DF$height ~ DF$bloodtype)
hist(DF$height, breaks = 10)
par(mfrow = c(1, 1))
# ๋๊ฒจ๊ฐ๋ฉฐ ๊ทธ๋ํ ๋ณด๊ธฐ
plot(DF$weight ~ DF$height + DF$age + DF$grade + DF$absence + DF$sex)
# ๋ ๋ผ์ธ์ ๊ฒน์ณ ๋น๊ตํ๋ ๊ทธ๋ํ ๊ทธ๋ฆฌ๊ธฐ
TS1 <- c(round(runif(30) * 100))
TS1
TS2 <- c(round(runif(30) * 100))
TS2
TS1 <- sort(TS1, decreasing = F)
TS2 <- sort(TS2, decreasing = F)
TS1
TS2
plot(TS1, type = 'l')
lines(TS2, lty = 'dashed', col = 'red')
install.packages('ggplot2')
install.packages('ggthemes')
library('ggplot2')
library('ggthemes')
ggplot(data = diamonds, aes(x=caret, y = price, colour = clarity))
+ geom_point() + theme_wsj()
|
75098ae1a75e21b2c037326359b793451ffc91aa | 740d61a6181e12753c10a83bf7c6ee13541744ac | /ะกะฑะพั ะดะฐะฝะฝัั
ะฒ ะธะฝัะตัะฝะตัะต/14/11-jump_follow.R | 3d3757191ae2643d6ec45085d7867b9b96a782d1 | [] | no_license | kn7072/R | d566efd2410421b4333b8666bfc99ce90c2cbec7 | 142f3d51d2beff767896c43307881be6f9a64fee | refs/heads/master | 2021-01-11T11:07:02.972657 | 2017-02-05T16:59:29 | 2017-02-05T16:59:29 | 72,877,223 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 219 | r | 11-jump_follow.R | # ะะฐะฒะธะณะฐัะธั.
library(rvest)
s <- html_session("http://hadley.nz")
s$url
s <- s %>% follow_link("github")
s$url
s <- s %>% back()
s$url
s <- s %>% jump_to("http://recipes.had.co.nz/")
s$url
session_history(s)
|
c162dbc53eb10e98b37052d2590f5f9c9569f211 | 95f623500066946fbc6fe4951195fc55d3567289 | /2020/scripts/day_16.R | 2bb4c7d6df51ae54a4118784144afaedcd622675 | [] | no_license | dzhang32/advent_of_code | cdb148fd5a599bcea57e3b725d28f761bf1ef0b7 | 52e06cfb2808db069b465031b6dc990640b6e1af | refs/heads/master | 2023-02-05T20:26:38.812659 | 2020-12-27T17:36:11 | 2020-12-27T17:36:11 | 319,388,437 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,866 | r | day_16.R | library(tidyverse)
library(stringr)
# Load data ---------------------------------------------------------------
tickets_raw <- read_lines(here::here("2020", "raw_data", "day_16.txt"))
# Main --------------------------------------------------------------------
##### Part 1 #####
rules_raw <- tickets_raw[1:which(tickets_raw == "your ticket:")]
rules_tidy <- tibble(rules = rules_raw[!rules_raw %in% c("", "your ticket:")]) %>%
separate(rules, into = c("type", "rules"), ": ") %>%
separate(rules, into = str_c("rules_", 1:2), " or ") %>%
separate(rules_1, into = str_c(c("start_", "end_"), 1)) %>%
separate(rules_2, into = str_c(c("start_", "end_"), 2)) %>%
mutate_at(vars(contains(c("start", "end"))),
as.numeric)
rules_tidy <- rules_tidy %>%
gather("start_1_2", "start", contains(c("start"))) %>%
mutate(end = ifelse(start_1_2 == "start_1", end_1, end_2)) %>%
dplyr::select(type, start, end)
nearby_raw <- tickets_raw[which(tickets_raw == "nearby tickets:"):length(tickets_raw)]
nearby_raw <- nearby_raw[nearby_raw != "nearby tickets:"]
nearby_tidy <- vector("list", length(nearby_raw))
for(i in seq_along(nearby_raw)){
nearby_tidy[[i]] <- tibble(times = nearby_raw[i] %>%
str_split(",") %>%
unlist() %>%
as.integer(),
ticket = i)
}
nearby_tidy <- do.call(bind_rows, nearby_tidy)
nearby_tidy <- nearby_tidy %>%
mutate(valid = FALSE)
for(i in seq_len(nrow(nearby_tidy))){
valid_curr <- any(nearby_tidy[["times"]][i] >= rules_tidy[["start"]] &
nearby_tidy[["times"]][i] <= rules_tidy[["end"]])
nearby_tidy[["valid"]][i] <- valid_curr
}
nearby_tidy %>%
filter(valid == FALSE) %>%
.[["times"]] %>%
sum()
##### Part 2 #####
nearby_valid <- nearby_tidy %>%
group_by(ticket) %>%
summarise(valid = all(valid)) %>%
filter(valid)
nearby_valid <- nearby_tidy %>%
filter(ticket %in% nearby_valid[["ticket"]])
nearby_valid <- nearby_valid %>%
group_by(ticket) %>%
mutate(index = row_number()) %>%
ungroup()
index_to_rule <- tibble(index = unique(nearby_valid[["index"]]),
rule = NA_character_)
index_to_rule[["rule_poss"]] <- vector("list", nrow(index_to_rule))
# which rules are possible for each index to match?
for(i in seq_len(nrow(index_to_rule))){
index_times <- nearby_valid %>%
filter(index == index_to_rule[["index"]][i]) %>%
.[["times"]]
for(j in seq_along(unique(rules_tidy[["type"]]))){
type_curr <- unique(rules_tidy[["type"]])[j]
rules_curr <- rules_tidy %>%
filter(type == type_curr)
valid_1 <- index_times >= rules_curr[["start"]][1] &
index_times <= rules_curr[["end"]][1]
valid_2 <- index_times >= rules_curr[["start"]][2] &
index_times <= rules_curr[["end"]][2]
if(all(valid_1 | valid_2)){
index_to_rule[["rule_poss"]][[i]] <- c(index_to_rule[["rule_poss"]][[i]],
type_curr)
}
}
}
# narrow down the rules to one per index
while(any(is.na(index_to_rule[["rule"]]))){
poss <- lapply(index_to_rule[["rule_poss"]], length) %>%
unlist()
which_1_poss <- which(poss == 1)
rule_to_add <- index_to_rule[["rule_poss"]][[which_1_poss]]
index_to_rule[["rule"]][which_1_poss] <- rule_to_add
index_to_rule[["rule_poss"]] <- index_to_rule[["rule_poss"]] %>%
lapply(FUN = function(x) x[x != rule_to_add])
}
your_ticket <- tickets_raw[which(str_detect(tickets_raw, "your ticket:")) + 1] %>%
str_split(",") %>%
unlist() %>%
as.integer()
departure_indices <- index_to_rule %>%
filter(str_detect(rule, "departure")) %>%
.[["index"]]
prod(your_ticket[departure_indices])
|
20d906c1a93f6fa64428ef488aa6f66c1af29e33 | 23632e85dfd7e75d4b394c7d98fa5bfaf72f21ea | /plot2.r | a1b8dd97ab289637e7a9dd1a24cae83591ac194c | [] | no_license | dipeshwalte/ExData_Plotting1 | 53c3485d3f07c757bbffcba1b05cd60f773d3280 | 51bd5949b86b7cdd2585833835d387919d9d6332 | refs/heads/master | 2021-01-24T15:52:28.888337 | 2016-09-19T16:57:03 | 2016-09-19T16:57:03 | 68,602,059 | 0 | 0 | null | 2016-09-19T12:08:17 | 2016-09-19T12:08:17 | null | UTF-8 | R | false | false | 1,009 | r | plot2.r | setwd("F:\\Data Science\\My Codes and Assignments\\Coursera\\Exploratory Data Analysis\\Data")
data <- read.table("household_power_consumption.txt",stringsAsFactors = FALSE,sep = ";",header = TRUE)
#Select only the two dates
exploratoryData <- subset(data,Date=="2/2/2007"|data$Date=="1/2/2007")
exploratoryData$Date = as.Date(exploratoryData$Date,"%d/%m/%Y")
#Derive a new column as timestamp
exploratoryData<-within(exploratoryData, { timestamp=format(as.POSIXct(paste(Date, Time)), "%Y-%m-%d %H:%M:%S")})
exploratoryData$timestamp <- as.Date(exploratoryData$timestamp,"%Y-%m-%d %H:%M:%S")
#Convert Global active power to numeric
exploratoryData$Global_active_power = as.numeric(exploratoryData$Global_active_power)
plot(exploratoryData$timestamp,exploratoryData$Global_active_power,pch=NA,xlab = "Time",ylab = "Global Active Power")
#pch = na ensures no symbols
lines(exploratoryData$timestamp,exploratoryData$Global_active_power)
dev.copy(png, file = "plot2.png")
dev.off()
|
1537df22b38b9ac12f851c0d23f2f32cdcd6f732 | 2d6b42f1a95a9544b42939dcb13b29e6e503ef79 | /R/.Rprofile | 0aa2f2163275c7ab2de481e9261ef9f8bf100f8e | [] | no_license | jamesgitting/linux | ce82b832b798f9ae264dc9a59c2bd78151d64bb8 | ff48a2005bf3bea8453074e74364b54d97aee684 | refs/heads/master | 2020-03-21T01:27:40.809150 | 2018-07-10T14:57:49 | 2018-07-10T14:57:49 | 46,635,445 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,071 | rprofile | .Rprofile | options(prompt="R> ")
options(continue="R+> ")
options(width=200)
options(scipen=999)
options(repos = c(CRAN = "https://cran.rstudio.com"))
options(stringsAsFactors=FALSE)
q <- function (save="no", ...){
quit(save=save, ...)
}
.Last <- function(){
if(interactive()){
hist_file <- Sys.getenv("R_HISTFILE")
if (hist_file == "") hist_file <- "~/.RHistory"
savehistory(hist_file)
}
}
shhh <- function(a.package){
suppressWarnings(suppressPackageStartupMessages(
library(a.package, character.only=TRUE)))
}
# Auto-load packages
auto.loads <- c("stats", "devtools", "tidyverse", "lubridate")
if (interactive()) {
invisible(sapply(auto.loads, shhh))
library("colorout")
setOutputColors256(normal = 39,
number = 40,
negnum = 160,
date = 43,
string = 79,
const = 75,
verbose = FALSE)
}
xdg <- function(path="."){
system(sprintf("xdg-open %s", path))
}
fix.time <- function(time_field, format.pattern = "%Y-%m-%d %H:%M:%S"){
return (as.POSIXct(time_field, format=format.pattern))
}
options(tibble.print_min = 20)
|
6dd081a24640150dbdee35ce2f5992bd9d94b2bf | f07095445cf41012f652e4862d10c839f7602de8 | /getPanelIDs_NEB.R | 126a31110aa19a3d51f844e0cae6e1236a26fdf1 | [] | no_license | CoreyVernot/data-plus | f9658bcf743e753edb964bd48bf8f830f7f82150 | a5795cd548421f2ed5d33af2eed82102ec5ae81e | refs/heads/master | 2021-01-20T19:53:30.321690 | 2016-07-29T15:28:44 | 2016-07-29T15:28:44 | 62,651,859 | 2 | 2 | null | 2016-08-16T18:55:31 | 2016-07-05T16:18:49 | R | ISO-8859-1 | R | false | false | 6,396 | r | getPanelIDs_NEB.R |
####DON'T USE THIS CODE!!!!!!!!!1 ####
library(RODBC)
getRx <- function(years = c(10,11,12), server = "SQLServer_IRI" ){
ch <- odbcConnect(server)
rx <- data.frame()
allRx <- paste("Rx",years,sep="")
for(i in 1:length(years)){
rx_i <- sqlFetch(ch,allRx[i])
rx <- rbind(rx, rx_i)
}
#rx_sub <- data.frame(rx$panelid,rx$Birth_Year,rx$Sex,rx$Week,rx$Rx_Brand)
#colnames(rx_sub) <- c("panelid","Birth_Year","Sex","Week","Rx_Brand")
return(rx)
}
getDemo <- function(server = "SQLServer_IRI"){
ch <- odbcConnect(server)
demo <- sqlFetch(ch, "DEMO")
#demo_sub <- data.frame(demo$panelid,demo$hhsize)
#colnames(demo_sub) <- c("panelid","hhsize")
return(demo)
}
getPanelIDs <- function(..., rx, demo,HHSizes=1){
allBrands <- list(...)
#the drugs are vectors of brands that contain a certain drug of interest
#ID <- rx$panelid[rx$Rx_Brand %in% Drug1,] #we'll want to return ID's (panel or HH?) of ppl who took this drug and are in a certain household size
panelIDs <- list()
demoids <- demo$panelid[demo$hhsize %in% HHSizes]
#cleaning brand names
#eliminate all whitespace and non-character/number things
rx$Rx_Brand_Print <- rx$Rx_Brand
rx$Rx_Brand <- tolower(rx$Rx_Brand)
rx$Rx_Brand <- gsub("[^[:alnum:]]", "", rx$Rx_Brand)
allBrands <- lapply( allBrands, function(x){ tolower(gsub("[^[:alnum:]]", "", x))})
for(i in 1:length(allBrands)){
brands <- allBrands[[i]]
expr <- paste("(", paste(brands, collapse = "|"), ")", sep = "")
index <- grep(expr, rx$Rx_Brand)
cat("Drug brands in database matching drug set ",i,":", "\n",sep="")
cat(unique(as.vector((rx$Rx_Brand_Print[index]))),"\n","\n",sep=" ")
rxids <- unique(rx$panelid[index])
ids <- rxids[rxids %in% demoids]
panelIDs$IDs[[i]] <- ids
panelIDs$Brands[[i]] <- unique(as.vector((rx$Rx_Brand_Print[index])))
}
return(panelIDs)
}
#end####
#TESTING STUFF
B11 <- "Metformin"; B12 <- "Plavix"
B21 <- "ALLOPURINOL"; B22 <- "COSOPT"
D1 <- c(B11,B12)
D2 <- c(B21,B22)
D3 <- c("not in dataset")
server <- "SQLServer_IRI"
rx <- (getRx(c(10,11,12), server))
demo <- (getDemo(server))
panelids <- getPanelIDs(D1,D2,D3, rx=rx, demo=demo)
rm(B11,B12,B22,B21,D1,D2,D3,server)
#source: www.diabetes.org/living-with-diabetes/treatment-and-care/medication/oral-medications/what-are-my-options.html?referrer=https://google.com/
#http://www.webmd.com/diabetes/sulfonylureas-for-type-2-diabetes
sulfonylureas <- c("Diabinese", "Glucotrol", "Micronase","Glynase", "Diabeta",
"Amaryl", "chlorpropamide", "glimepiride", "glipizide", "glyburide",
"tolazamide", "tolbutamide")
#associated with increased appetite/weight!!! (because it increases insulin production)
#http://www.diabetesselfmanagement.com/diabetes-resources/definitions/sulfonylureas/
#https://books.google.com/books?id=KhPSBQAAQBAJ&pg=PA357&lpg=PA357&dq=sulfonylureas+insulin+%22appetite%22&source=bl&ots=Ncb7ny2Q0X&sig=kqDw1osroR5dO8KP6GMOY-W4u6o&hl=en&sa=X&ved=0ahUKEwj2saXky-TNAhXC6iYKHUQcBwgQ6AEIKjAC#v=onepage&q=sulfonylureas%20insulin%20%22appetite%22&f=false
biguanides <- c("Fortamet", "Glucophage", "Glumetza", "Riomet", "Obimet",
"Dianben", "Diaformin", "Siofor", "Metfogamma", "Janumet",
"Kazano", "Invokamet", "Xigduo", "Synjardy", "Metaglip" ,
"Jentaduo" , "Actoplus", "Prandimet", "Avandamet", "Kombiglyze",
"Glumetza", "Metformin")
#associated with decreased appetite/weight!!!
#the pdf corey emailed us
meglitinides <- c("Prandin","Starlix")
#associated with weight gain (because it increases insulin production)
#http://www.webmd.com/diabetes/meglitinides-for-type-2-diabetes
thiazolidinediones <- c("Avandia", "ACTOS", "Rezulin")
#associated with weight gain!!!
#http://www.nytimes.com/health/guides/disease/type-2-diabetes/medications.html
dpp_4_inhibitors <- c("Januvia","Onglyza","Tradjenta","Nesina")
#neutral
#http://care.diabetesjournals.org/content/34/Supplement_2/S276
sglt_2_inhibitors <- c("SGLT2","Invokana","Farxiga","Jardiance",
"canaglifozin","dapaglifozin","empaglifozin")
#weight loss?
#http://care.diabetesjournals.org/content/38/3/352
alpha_glucosidase_inhibitors <- c("Precose","Glyset")
#These last two aren't that big anyways.
bile_acid_sequestrants <- c("Welchol")
#I'll probably get to them later.
oral_combination_therapy <- NA
#Why did I even include this one?
insulin <- c("Glulisine" ,"(Apidra)",
"Detemir" ,"(Levemir)",
"Glargine", "(Lantus)",
"Aspart", "(Novolog)",
"Lispro", "(Humalog)",
"Humulin",
"Novolin",
"Regular Insulin",
"NPH",
"Ultralente"ย ,
"U-500 concentrate",
"U-300 glargine" ,"(Toujeo)",
"Degludec" ,"(Tresiba)",
"U-200 Degludec", "(Tresiba)",
"U-200 lispro", "(Humalog)",
"Regular Insulin",
"70/30 Insulin",
"75/25 Insulin",
"50/50 Insulin",
"Inhaled insulin", "(Afrezza)")
#not even a drug, but a therapy. It is associated with weight gain though, in case you were curious
#http://www.nytimes.com/health/guides/disease/type-2-diabetes/medications.html
rx <- read.csv("C:\\Users\\Nathaniel Brown\\workspace\\BECR\\rx_keep.csv")
Diabetes_IDs_1 <- getNewIDs(sulfonylureas, biguanides, meglitinides, thiazolidinediones,
dpp_4_inhibitors, sglt_2_inhibitors, alpha_glucosidase_inhibitors,
bile_acid_sequestrants, oral_combination_therapy,insulin,rx=rx,HHSizes = 1)
Diabetes_IDs_2 <- getNewIDs(sulfonylureas, biguanides, meglitinides, thiazolidinediones,
dpp_4_inhibitors, sglt_2_inhibitors, alpha_glucosidase_inhibitors,
bile_acid_sequestrants, oral_combination_therapy,insulin,rx=rx,HHSizes = 2)
length(Diabetes_IDs[[1]][[1]])
#Does every RxID appear in purchase data?
#des08 <- sqlFetch(ch, "BAK_DES08")
#mean(panelids[[1]] %in% (des08$panelid)) #76%
#mean(panelids[[2]] %in% (des08$panelid)) #78%
|
7eb4a11b031f65e1031df70ed5102444d5f66373 | e33778b8ba153ed995589a63afc85c3d78e4cf29 | /scripts/undeveloped/database.R | 6089feb0e6336ebef3097ed1168dd7e3c10594c2 | [
"MIT"
] | permissive | bransonf/stlvacancy | 3728337d726f4ec0e5cc6f4a8549c3f6da19201c | 357e0c6856f581a97fc85591ff4253eb73d17634 | refs/heads/master | 2020-08-23T11:36:18.518321 | 2019-12-19T19:36:29 | 2019-12-19T19:36:29 | 216,606,662 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 369 | r | database.R | # Implement the Final Dataset
library(RPostgreSQL)
library(cyphr); library(yaml)
# Credentials for Admin Access are Encrypted
cyphr::decrypt_file('creds.yaml', )
drvr <- dbDriver('PostgreSQL')
connection <- dbConnect(drvr, dbname = ,
host = ,
port = ,
user =
password = ) |
71c8f49b4c09333c936a54d0e2c842b94991d974 | c771e1a0d82be152def79aa1fc3e8f0c5e8ef0f4 | /scripts/r/Preprocesamiento/Muestreo.R | a4bea90dfd2a1db3ab8ff72ad8346bbc21c31dd0 | [] | no_license | jstamayor/DataMining-USAccidents | bc497a728e9e7a7498ce4481215e4d348ff6f978 | 9684734be2598d9da5f2a01ef6c47846bfc56d2a | refs/heads/main | 2023-04-09T17:40:22.677685 | 2021-04-13T23:18:58 | 2021-04-13T23:18:58 | 347,643,914 | 0 | 0 | null | null | null | null | ISO-8859-3 | R | false | false | 1,971 | r | Muestreo.R |
#Cargando el conjunto de datos con variables climaticas imputadas
load("C:/Users/Daniela/Documents/Maestria/Semestre 1/Mineria de datos/Proyecto final/Datos_preproc.RData")
#Seleccion las variables requeridas para el muestreo
subs_clim_var<-data.frame(US_var_selec$ID,US_var_selec$`Temperature(F)`,US_var_selec$`Humidity(%)`,
US_var_selec$`Pressure(in)`,US_var_selec$`Visibility(mi)`,
US_var_selec$`Wind_Speed(mph)`,US_var_selec$Severity,
US_var_selec$Start_Time,US_var_selec$State)
#Creaciรณn de la variable year
subs_clim_var$year<-substr(subs_clim_var$US_var_selec.Start_Time,1,4)
#Eliminando informaciรณn faltante
clim_var_lim<-na.omit(subs_clim_var)
#Verificando las proporciones de las variables year y severity respecto a la original
table(clim_var_lim$year)/nrow(clim_var_lim)
table(clim_var_lim$US_var_selec.Severity)/nrow(clim_var_lim)
##### Muestreo proporcional al estado #####
#Proporcion de la informacion por estado
prop_est<-data.frame(prop.table(table(clim_var_lim$US_var_selec.State)))
names(prop_est)<-c("Estado","Probabilidad")
#Eleccion de la muestra por estado
set.seed(123)
sample_accidents<-sample(seq(1:nrow(prop_est)), 20, replace = F, prob = prop_est$Probabilidad)
sample_states<-data.frame(prop_est[sample_accidents,])
#Seleccionando la informaciรณn de los estados muestreados
US_acc_sample<-clim_var_lim[which(clim_var_lim$US_var_selec.State%in%sample_states$Estado),]
#Muestreando al interior de cada estrato (estado)
set.seed(123)
Stratified_sampling <- splitstackshape::stratified(US_acc_sample, "US_var_selec.State", .7)
#Verificando que las proporciones se mantienen respecto a la poblacion
prop.table(table(Stratified_sampling$US_var_selec.Severity))
prop.table(table(Stratified_sampling$year))
#Archivo final con muestreo y variables climaticas
write.csv(Stratified_sampling, file = "Muestra_variables_climaticas.csv",row.names = F)
|
53e3c939ee5f62a103ff7317db3aab0c31c84897 | cd4673aea4e41a212ff5c57e2b1d99a7ade1fbf1 | /R/sqlp_base.R | 28016c43f389bf84af0bf86a44d04c1bf586c49c | [] | no_license | cran/sdpt3r | 62765e36a08557f846fe50312a2ced20debdf153 | 6668a2744cfbe777717e3175d0a79ac542823e2c | refs/heads/master | 2021-07-15T04:50:22.141609 | 2019-02-11T07:50:03 | 2019-02-11T07:50:03 | 102,360,434 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,415 | r | sqlp_base.R | sqlp_base <- function(blk=NULL, At=NULL, C=NULL, b=NULL, OPTIONS=NULL, X0=NULL, y0=NULL, Z0=NULL){
if((is.null(blk) | is.null(At) | is.null(C) | is.null(b))){
stop("Error: Improper input methods")
}
b <- as.matrix(b)
##########################################
######## Define Local Variables #########
##########################################
#Replace .GlobalEnv with sys.frame(which = ) for each
idxdenAl <- numeric(0)
idxdenAq <- numeric(0)
nnzschur_qblk <- numeric(0)
nnzschur <- numeric(0)
nzlistschur <- numeric(0)
schurfun <- numeric(0)
schurfun_par <- numeric(0)
diagR <- numeric(0)
diagRold <- numeric(0)
exist_analytic_term <- numeric(0)
existlowrank <- numeric(0)
matfct_options <- numeric(0)
matfct_options_old <- numeric(0)
nnzmat <- numeric(0)
nnzmatold <- numeric(0)
numpertdiashschur <- numeric(0)
printlevel <- numeric(0)
smallblkdim <- numeric(0)
solve_ok <- numeric(0)
spdensity <- numeric(0)
use_LU <- numeric(0)
##################################
isemptyAtb <- 0
if(is.null(At) & is.null(b)){
#Add Redundant Constraint
b <- 0
At <- ops(ops(blk, "identity"), "*", -1)
numblk <- nrow(blk)
blk[[numblk+1, 1]] <- "l"
blk[[numblk+1, 2]] <- 1
At[[numblk+1,1]] <- 1
C[[numblk+1,1]] <- 0
isemptyAtb <- 1
}
#Set default parameters from sqlparameters (OPTIONS input not used)
par <- list(vers = 0,
gam = 0,
predcorr = 1,
expon = 1,
gaptol = 1e-8,
inftol = 1e-8,
steptol = 1e-6,
maxit = 100,
printlevel = 3,
stoplevel = 1,
scale_data = 0,
spdensity = 0.4,
rmdepconstr = 0,
smallblkdim = 50,
parbarrier = c(),
schurfun = matrix(list(),nrow=nrow(blk),ncol=1),
schurfun_par = matrix(list(),nrow=nrow(blk),ncol=1),
blkdim = c(),
ublksize = c(),
depconstr = c(),
AAt = c(),
normAAt = c(),
numcolAt = c(),
permA = c(),
permZ = c(),
isspA = c(),
nzlist = c(),
nzlistAsum = c(),
isspAy = c(),
nzlistAy = c(),
iter = c(),
obj = c(),
relgap = c(),
pinfeas = c(),
dinfeas = c(),
rp = c(),
y = c(),
dy = c(),
normX = c(),
ZpATynorm = c())
##
parbarrier <- matrix(list(),nrow=nrow(blk),ncol=1)
for(p in 1:nrow(blk)){
pblk <- blk[[p,1]]
if(pblk == "s" | pblk == "q"){
parbarrier[[p,1]] <- matrix(0, nrow=1, ncol=length(blk[[p,2]]))
}else if(pblk == "l" | pblk == "u"){
parbarrier[[p,1]] <- matrix(0, nrow=1, ncol=sum(blk[[p,2]]))
}
}
parbarrier_0 <- parbarrier
if(!is.null(OPTIONS) | length(OPTIONS) > 0){
if(!is.null(OPTIONS$vers)) par$vers <- OPTIONS$vers
if(!is.null(OPTIONS$predcorr)) par$predcorr <- OPTIONS$predcorr
if(!is.null(OPTIONS$gam)) par$gam <- OPTIONS$gam
if(!is.null(OPTIONS$expon)) par$expon <- OPTIONS$expon
if(!is.null(OPTIONS$gaptol)) par$gaptol <- OPTIONS$gaptol
if(!is.null(OPTIONS$inftol)) par$inftol <- OPTIONS$inftol
if(!is.null(OPTIONS$steptol)) par$steptol <- OPTIONS$steptol
if(!is.null(OPTIONS$maxit)) par$maxit <- OPTIONS$maxit
if(!is.null(OPTIONS$printlevel)) par$printlevel <- OPTIONS$printlevel
if(!is.null(OPTIONS$stoplevel)) par$stoplevel <- OPTIONS$stoplevel
if(!is.null(OPTIONS$scale_data)) par$scale_data <- OPTIONS$scale_data
if(!is.null(OPTIONS$spdensity)) par$spedensity <- OPTIONS$spdensity
if(!is.null(OPTIONS$rmdepconstr)) par$rmdepconstr <- OPTIONS$rmdepconstr
if(!is.null(OPTIONS$smallblkdim)) par$smallblkdim <- OPTIONS$smallblkdim
if(!is.null(OPTIONS$parbarrier)){
parbarrier <- OPTIONS$parbarrier
if(is.null(parbarrier)) parbarrier <- parbarrier_0
if(!is.list(parbarrier)){
tmp <- parbarrier
parbarrier <- matrix(list(),1,1)
parbarrier[[1]] <- tmp
}
if(max(dim(as.matrix(parbarrier))) < nrow(blk)){
len <- max(dim(as.matrix(parbarrier)))
parbarrier_1 <- matrix(list(),nrow(blk),1)
for(i in 1:len){
parbarrier_1[[i]] <- parbarrier[[i]]
}
for(i in (len+1):nrow(blk)){
parbarrier_1[[i]] <- parbarrier_0[[i]]
}
parbarrier <- parbarrier_1
}
}
}
if(ncol(blk) > 2){
par$smallblkdim <- 0
}
######################
##Validate SQLP data##
######################
out <- validate(blk,At,C,b,par,parbarrier)
blk <- out$blk
At <- out$At
C <- out$C
b <- out$b
blkdim <- out$dim
numblk <- out$nnblk
parbarrier <- out$parbarrier
out <- convertcmpsdp(blk, At, C, b)
blk <- out$bblk
At <- out$AAt
C <- out$CC
b <- out$bb
iscmp <- out$iscmp
if(is.null(X0) | is.null(y0) | is.null(Z0)){
#create a starting point
out <- infeaspt(blk, At, C, b)
X0 <- out$X0
y0 <- out$y0
Z0 <- out$Z0
par$startpoint <- 1
}else{
par$startpoint <- 2
out <- validate_startpoint(blk, X0,Z0,par$spdensity,iscmp)
X0 <- out$X
Z0 <- out$Z
}
##############################
##DETECT UNRESTRICTED BLOCKS##
##############################
user_supplied_schurfun <- 0
for(p in 1:nrow(blk)){
if(!is.null(par$schurfun[[p]])){
user_supplied_schurfun <- 1
}
}
if(user_supplied_schurfun == 0){
out <- detect_ublk(blk,At,C,parbarrier,X0,Z0)
blk2 <- out$blk2
At2 <- out$At2
C2 <- out$C2
ublkinfo <- out$ublkinfo
parbarrier2 <- out$parbarrier2
X02 <- out$X2
Z02 <- out$Z2
}else{
blk2 <- blk
At2 <- At
C2 <- C
parbarrier2 <- parbarrier
X02 <- X0
Z02 <- Z0
ublkinfo <- matrix(list(), nrow(blk3), 1)
}
ublksize <- blkdim[4]
for(p in 1:nrow(ublkinfo)){
if(!is.null(ublkinfo[[p,1]])){
ublksize <- ublksize + max(dim(ublkinfo[[p,1]]))
}
}
################################
#####Detect diagonal blocks#####
################################
if(user_supplied_schurfun == 0){
out <- detect_lblk(blk2,At2,C2,b,parbarrier2,X02,Z02)
blk3 <- as.matrix(out$blk)
At3 <- as.matrix(out$At)
C3 <- as.matrix(out$C)
diagblkinfo <- out$diagblkinfo
diagblkchange <- out$blockchange
parbarrier3 <- as.matrix(out$parbarrier)
X03 <- as.matrix(out$X)
Z03 <- as.matrix(out$Z)
}else{
blk3 <- blk2
At3 <- At2
C3 <- C2
parbarrier3 <- parbarrier2
X03 <- X02
Z03 <- Z02
diagblkchange <- 0
diagblkinfo <- matrix(list(), nrow(blk3), 1)
}
#################################
######### MAIN SOLVER ###########
#################################
exist_analytic_term <- 0
for(p in 1:nrow(blk3)){
idx <- which(parbarrier3[[p,1]] > 0)
if(length(idx) > 0){
exist_analytic_term <- 1
}
}
if(par$vers == 0){
if(blkdim[1]){
par$vers <- 1
}else{
par$vers <- 2
}
}
par$blkdim <- blkdim
par$ublksize <- ublksize
out <- sqlp_main(blk3, At3, C3, b, par, parbarrier3, X03, y0, Z03)
obj <- out$obj
X3 <- out$X
y <- out$y
Z3 <- out$Z
info <- out$info
runhist <- out$runhist
pobj <- info$obj[1]
dobj <- info$obj[2]
################################################
#Recover Semidefinite Blocks from Linear Blocks#
################################################
if(any(diagblkchange == 1)){
X2 <- matrix(list(),nrow(blk),1)
Z2 <- matrix(list(),nrow(blk),1)
count <- 0
for(p in 1:nrow(blk)){
n <- sum(blk[[p,2]])
blkno <- diagblkinfo[[p,1]]
idxdiag <- diagblkinfo[[p,2]]
idxnondiag <- diagblkinfo[[p,3]]
if(length(idxdiag) > 0){
len <- length(idxdiag)
Xtmp <- rbind(cbind(idxdiag,idxdiag,X3[[nrow(X3)]][count+c(1:len)]))
Ztmp <- rbind(cbind(idxdiag,idxdiag,Z3[[nrow(Z3)]][count+c(1:len)]))
if(length(idxnondiag) > 0){
tmp <- which(X3[[blkno]] != 0, arr.ind=TRUE)
ii <- tmp[,1]
jj <- tmp[,2]
vv <- X3[[blkno]][ which(X3[[blkno]] != 0)]
Xtmp <- rbind(Xtmp,cbind(idxnondiag[ii],idxnondiag[jj],vv))
tmp <- which(Z3[[blkno]] != 0, arr.ind=TRUE)
ii <- tmp[,1]
jj <- tmp[,2]
vv <- Z3[[blkno]][ which(Z3[[blkno]] != 0)]
Ztmp <- rbind(Ztmp,cbind(idxnondiag[ii],idxnondiag[jj],vv))
}
X2[[p]] <- matrix(0,n,n)
for(i in 1:nrow(Xtmp)){
X2[[p]][Xtmp[i,1],Xtmp[i,2]] <- Xtmp[i,3]
}
Z2[[p]] <- matrix(0,n,n)
for(i in 1:nrow(Ztmp)){
Z2[[p]][Ztmp[i,1],Ztmp[i,2]] <- Ztmp[i,3]
}
count <- count + len
}else{
X2[[p]] <- X3[[blkno]]
Z2[[p]] <- Z3[[blkno]]
}
}
}else{
X2 <- X3
Z2 <- Z3
}
################################################
# Recover linear block from unrestricted block #
################################################
numblk <- nrow(blk)
numblknew <- numblk
X <- matrix(list(),numblk,1)
Z <- matrix(list(),numblk,1)
for(p in 1:numblk){
n <- blk[[p,2]]
if(is.null(ublkinfo[[p,1]])){
X[[p]] <- X2[[p]]
Z[[p]] <- Z2[[p]]
}else{
Xtmp <- matrix(0,n,1)
Ztmp <- matrix(0,n,1)
Xtmp[ublkinfo[[p,1]]] <- pmax(0,X2[[p]])
Xtmp[ublkinfo[[p,2]]] <- pmax(0,-X2[[p]])
Ztmp[ublkinfo[[p,1]]] <- pmax(0,Z2[[p]])
Ztmp[ublkinfo[[p,2]]] <- pmax(0,-Z2[[p]])
if(!is.null(ublkinfo[[p,3]])){
numblknew <- numblknew + 1
Xtmp[ublkinfo[[p,3]]] <- X2[[numblknew]]
Ztmp[ublkinfo[[p,3]]] <- Z2[[numblknew]]
}
X[[p]] <- Xtmp
Z[[p]] <- Ztmp
}
}
output <- list(X=X, y=y, Z=Z, pobj=pobj, dobj=dobj)
return(output)
} |
a22ad107c17dd593b007701b8a118cc43334db3c | 4459eb5432916b4ad6c5c5d911b50c9d2fec1ad5 | /R/ActivePremium.R | 837c2702e0eaf200684ba83d20fc94af09afc31f | [] | no_license | braverock/PerformanceAnalytics | 057af55b0a4ddeb4befcc02e36a85f582406b95c | 49a93f1ed6e2e159b63bf346672575f3634ed370 | refs/heads/master | 2023-08-03T10:18:27.115592 | 2023-03-29T09:23:17 | 2023-03-29T09:23:17 | 58,736,268 | 209 | 113 | null | 2023-05-23T17:46:08 | 2016-05-13T12:02:42 | R | UTF-8 | R | false | false | 2,985 | r | ActivePremium.R | #' Active Premium or Active Return
#'
#' The return on an investment's annualized return minus the benchmark's
#' annualized return.
#'
#' Active Premium = Investment's annualized return - Benchmark's annualized
#' return
#'
#' Also commonly referred to as 'active return'.
#'
#' @param Ra return vector of the portfolio
#' @param Rb return vector of the benchmark asset
#' @param scale number of periods in a year
#' (daily scale = 252, monthly scale = 12, quarterly scale = 4)
#' @param ... any other passthru parameters to Return.annualized
#' (e.g., \code{geometric=FALSE})
#' @author Peter Carl
#' @seealso \code{\link{InformationRatio}} \code{\link{TrackingError}}
#' \code{\link{Return.annualized}}
#' @references Sharpe, W.F. The Sharpe Ratio,\emph{Journal of Portfolio
#' Management}, Fall 1994, 49-58.
###keywords ts multivariate distribution models
#' @examples
#'
#' data(managers)
#' ActivePremium(managers[, "HAM1", drop=FALSE], managers[, "SP500 TR", drop=FALSE])
#' ActivePremium(managers[,1,drop=FALSE], managers[,8,drop=FALSE])
#' ActivePremium(managers[,1:6], managers[,8,drop=FALSE])
#' ActivePremium(managers[,1:6], managers[,8:7,drop=FALSE])
#' @rdname ActivePremium
#' @aliases
#' ActivePremium
#' ActiveReturn
#' @export ActiveReturn ActivePremium
ActiveReturn <- ActivePremium <- function (Ra, Rb, scale = NA, ...)
{ # @author Peter Carl
# FUNCTION
Ra = checkData(Ra)
Rb = checkData(Rb)
Ra.ncols = NCOL(Ra)
Rb.ncols = NCOL(Rb)
pairs = expand.grid(1:Ra.ncols, 1:Rb.ncols)
if(is.na(scale)) {
freq = periodicity(Ra)
switch(freq$scale,
minute = {stop("Data periodicity too high")},
hourly = {stop("Data periodicity too high")},
daily = {scale = 252},
weekly = {scale = 52},
monthly = {scale = 12},
quarterly = {scale = 4},
yearly = {scale = 1}
)
}
ap <- function (Ra, Rb, scale)
{
merged = na.omit(merge(Ra, Rb)) # align
ap = (Return.annualized(merged[,1], scale = scale, ...)
- Return.annualized(merged[,2], scale = scale, ...))
ap
}
result = apply(pairs, 1, FUN = function(n, Ra, Rb, scale) ap(Ra[,n[1]], Rb[,n[2]], scale), Ra = Ra, Rb = Rb, scale = scale)
if(length(result) == 1)
return(result)
else {
dim(result) = c(Ra.ncols, Rb.ncols)
colnames(result) = paste("Active Premium:", colnames(Rb))
rownames(result) = colnames(Ra)
return(t(result))
}
}
###############################################################################
# R (http://r-project.org/) Econometrics for Performance and Risk Analysis
#
# Copyright (c) 2004-2020 Peter Carl and Brian G. Peterson
#
# This R package is distributed under the terms of the GNU Public License (GPL)
# for full details see the file COPYING
#
# $Id$
#
###############################################################################
|
d249e4ec8590ee22ab6048489061bebfd48de859 | caa801ed1f7212b51e705be70f0cb2609036e4ab | /inst/tinytest/test_lamW.R | 7538e68a32f8f31e557da21be9a8ecb64e6832ec | [
"LicenseRef-scancode-dco-1.1"
] | no_license | aadler/lamW | a57fa45e872e45b81b869f2e20b50d18fbf609cc | aed91c4b7cbb130dc35b68c5e28e879cd9bb8122 | refs/heads/master | 2023-08-29T07:00:05.471058 | 2023-08-07T16:19:42 | 2023-08-07T16:19:42 | 275,083,232 | 1 | 1 | NOASSERTION | 2023-08-06T20:26:26 | 2020-06-26T05:47:21 | C++ | UTF-8 | R | false | false | 2,325 | r | test_lamW.R | # Copyright (c) 2015, Avraham Adler All rights reserved
# SPDX-License-Identifier: BSD-2-Clause
tol <- sqrt(.Machine$double.eps)
# Test that functions return proper values
principleBranchAnswers <- runif(5000, min = -1, max = 703.22703310477016)
principleBranchTests <- principleBranchAnswers * exp(principleBranchAnswers)
secondaryBranchAnswers <- runif(5000, min = -714.96865723796657, max = -1)
secondaryBranchTests <- secondaryBranchAnswers * exp(secondaryBranchAnswers)
# Test that function works properly in general
expect_equal(lambertW0(principleBranchTests), principleBranchAnswers,
tolerance = tol)
expect_equal(lambertWm1(secondaryBranchTests), secondaryBranchAnswers,
tolerance = tol)
# Test that function works properly for larger numbers
expect_equal(lambertW0(1000) * exp(lambertW0(1000)), 1000, tolerance = tol)
# Test that function behaves properly near 0
V0 <- seq(-2e-2, 2e-2, 2e-6)
V0E <- V0 * exp(V0)
expect_equal(lambertW0(V0E), V0, tolerance = tol)
# Test that W0 behaves properly VERY close to 0
expect_identical(lambertW0(1e-275), 1e-275)
expect_identical(lambertW0(7e-48), 7e-48)
expect_identical(lambertW0(-3.81e-71), -3.81e-71)
# Test that function behaves properly near -1/e
expect_identical(lambertW0(-1 / exp(1)), -1)
expect_identical(lambertWm1(-1 / exp(1)), -1)
# Test that function behaves properly near its asymptotes
L <- seq(1e-6 - exp(-1), -0.25, 3e-6)
V0 <- lambertW0(L)
vm1 <- lambertWm1(L)
expect_equal(V0 * exp(V0), L, tolerance = tol)
expect_equal(vm1 * exp(vm1), L, tolerance = tol)
vm1 <- seq(-714, -714.96865, -3e-5)
vm1E <- vm1 * exp(vm1)
expect_equal(lambertWm1(vm1E), vm1, tolerance = tol)
# Test that function behaves properly at its asymptotes
expect_identical(lambertW0(Inf), Inf)
expect_identical(lambertWm1(0), -Inf)
# Test that NaNs are returned for values outside domain
expect_true(is.nan(lambertW0(-Inf)))
expect_true(is.nan(lambertW0(-1)))
expect_true(is.nan(lambertW0(c(1, -1)))[[2]])
expect_true(is.nan(lambertWm1(-Inf)))
expect_true(is.nan(lambertWm1(Inf)))
expect_true(is.nan(lambertWm1(-0.5))) # x < -M_1_E
expect_true(is.nan(lambertWm1(1.2))) # x > 0
# Test that integers are converted to reals for principle branch
expect_identical(lambertW0(c(-1L, 0L, 1L, 2L, 3L, 4L)),
lambertW0(c(-1, 0, 1, 2, 3, 4)))
|
412a8b3ce747cb74dffb5b0b0a39767c059dfe2d | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/diffpriv/examples/DPMechNumeric-class.Rd.R | 88450fa89f90b0b5a2f03e8666a5b2ccdf0f8cd5 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 643 | r | DPMechNumeric-class.Rd.R | library(diffpriv)
### Name: DPMechNumeric-class
### Title: A virtual S4 class for differentially-private numeric
### mechanisms.
### Aliases: DPMechNumeric-class DPMechNumeric show,DPMechNumeric-method
### sensitivityNorm,DPMechNumeric-method
### releaseResponse,DPMechNumeric,DPParamsEps-method
### ** Examples
f <- function(xs) mean(xs)
n <- 100
m <- DPMechLaplace(sensitivity = 1/n, target = f, dims = 1)
X1 <- runif(n)
X2 <- runif(n)
sensitivityNorm(m, X1, X2)
f <- function(xs) mean(xs)
n <- 100
m <- DPMechLaplace(sensitivity = 1/n, target = f, dims = 1)
X <- runif(n)
p <- DPParamsEps(epsilon = 1)
releaseResponse(m, p, X)
|
7719e9d2abbee28485dce7d2e5351be4f2244c37 | becd75b18af72191be27f00ce22ba464d2e91b84 | /man/g_sex.Rd | 862616666487f77bfb908aa3c8bf7c7562c0fc6a | [
"MIT"
] | permissive | fidelmorla/drcovidplots | 7247f8d4ddc961034be903725f34d7dceed2ebed | a44fe6338be064607cd846089dcc02a76a3f139f | refs/heads/master | 2022-11-16T08:49:23.898762 | 2020-07-09T19:18:56 | 2020-07-09T19:18:56 | 260,526,704 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 756 | rd | g_sex.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/g_sex.R
\name{g_sex}
\alias{g_sex}
\title{Distribution by sex of COVID19 positives in the Dominican Republic}
\usage{
g_sex(saveplot = FALSE, savepng = FALSE)
}
\arguments{
\item{saveplot}{Logical. Should save the ggplot objet to the \code{.GlobalEnv}? Default \code{FALSE}.}
\item{savepng}{Logical. Should save a png version of the plot? Default \code{FALSE}.}
}
\value{
Graph of the distribution according to sex of the positives and saves a
copy in png format to the computer at the address defined in \code{setwd()}.
}
\description{
This function graphs the distribution according to the sex of the positives.
}
\examples{
g_sex()
g_sex(saveplot = FALSE, savepng = TRUE)
}
|
a638bd0165f795d39846bb7265949e93e992f2b5 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/geoCount/examples/rhoPowerExp.Rd.R | 1ccc7cf1b18f92f1ac729ddb2387419874eb7a25 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 248 | r | rhoPowerExp.Rd.R | library(geoCount)
### Name: rhoPowerExp
### Title: Powered Exponential Correlation Function
### Aliases: rhoPowerExp
### Keywords: Correlation
### ** Examples
## Not run:
##D rhoPowerExp(0.3, a=0.1, k=1)
## End(Not run)
|
424042a68d1b0e553c8f945683596f631c592d0c | fea57649b5ca68431d41a49fc9e73dd018e5007d | /test/server.R | a85fcfff826cc6789069cf4d4b99628acef0bf4e | [] | no_license | ThePaulTanner/R | 366fd6e6445c45f9efe393f2067b2c640d815375 | 7eeafdf7c53920628f07dc03824e429c4521521c | refs/heads/master | 2021-07-11T07:22:17.307185 | 2017-10-07T21:47:33 | 2017-10-07T21:47:33 | 106,131,513 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 285 | r | server.R | shinyServer(function(input, output) {
x <- 1:10
y <- x^2
output$main_plot <- renderPlot({
plot(x, y)}, height = 200, width = 300)
output$main_plot2 <- renderPlot({
plot(x, y, cex=input$opt.cex, cex.lab=input$opt.cexaxis) }, height = 400, width = 600 )
} ) |
7cb8a4cd4ba2945306cff4c09087353be9cc5b0d | 02b5ba32a64d0be0e09d1f1c463239999e5670ed | /tests/testthat/test-parameters.R | 40cdfdd78e647310f7cece9449241aa622cfbe27 | [
"MIT"
] | permissive | mrc-ide/sircovid | 2b4c169eaeca2d59cb684537d284d7b2942a2c08 | 8df142d85859deebf96eae3c69524aace3dbd5cd | refs/heads/master | 2023-07-09T23:15:06.853906 | 2023-06-06T11:21:50 | 2023-06-06T11:21:50 | 252,092,864 | 30 | 6 | NOASSERTION | 2023-06-21T10:22:45 | 2020-04-01T06:38:31 | C++ | UTF-8 | R | false | false | 7,607 | r | test-parameters.R | context("parameters")
test_that("single piecewise linear value", {
expect_identical(sircovid_parameters_piecewise_linear(NULL, pi, 0.1), pi)
expect_identical(sircovid_parameters_piecewise_linear(NULL,
matrix(c(1, pi),
nrow = 1),
0.1),
matrix(c(1, pi), nrow = 1))
expect_error(sircovid_parameters_piecewise_linear(NULL, numeric(0), 0.1),
"As 'date' is NULL, expected single value")
expect_error(sircovid_parameters_piecewise_linear(NULL, 1:5, 0.1),
"As 'date' is NULL, expected single value")
})
test_that("varying piecewise linear value", {
## TODO: this should get a better set of tests, as it's complex
## enough
date <- as_sircovid_date(c("2020-02-01", "2020-02-10", "2020-02-29"))
beta <- sircovid_parameters_piecewise_linear(date, 1:3, 0.5)
expect_equal(
beta,
c(rep(1, 64),
seq(1, 2, length.out = 19),
seq(2, 3, length.out = 39)[-1]))
date <- as_sircovid_date(c("2020-02-01", "2020-02-10", "2020-02-29"))
beta <- sircovid_parameters_piecewise_linear(date, matrix(1:3, 3, 2), 0.5)
expect_equal(
beta,
matrix(c(rep(1, 64),
seq(1, 2, length.out = 19),
seq(2, 3, length.out = 39)[-1]), 121, 2))
})
test_that("piecewise linear date and value have to be the same length", {
date <- c(32, 41, 60)
expect_error(
sircovid_parameters_piecewise_linear(date, 1:2, 0.5),
"'date' and 'value' must have the same length")
expect_error(
sircovid_parameters_piecewise_linear(date, 1:4, 0.5),
"'date' and 'value' must have the same length")
})
test_that("can't use a single piecewise linear date/value", {
expect_error(
sircovid_parameters_piecewise_linear(32, 1, 0.5),
"Need at least two dates and values for a varying piecewise linear")
})
test_that("piecewise linear dates must be increasing", {
expect_error(
sircovid_parameters_piecewise_linear(c(32, 41, 41, 64), 1:4, 0.5),
"'date' must be strictly increasing")
})
test_that("piecewise linear dates must be sircovid_dates", {
expect_error(
sircovid_parameters_piecewise_linear(as_date(c("2020-02-01", "2020-02-10")),
1:2, 0.5),
"'date' must be numeric - did you forget sircovid_date()?")
expect_error(
sircovid_parameters_piecewise_linear(c(-10, 41, 60), 1:3, 0.5),
"Negative dates, sircovid_date likely applied twice")
})
test_that("single piecewise constant value", {
expect_identical(sircovid_parameters_piecewise_constant(NULL, pi, 0.1), pi)
expect_error(sircovid_parameters_piecewise_constant(NULL, numeric(0), 0.1),
"As 'date' is NULL, expected single value")
expect_error(sircovid_parameters_piecewise_constant(NULL, 1:5, 0.1),
"As 'date' is NULL, expected single value")
})
test_that("varying piecewise constant value", {
date <- as_sircovid_date(c("2019-12-31", "2020-02-10", "2020-02-29"))
y <- sircovid_parameters_piecewise_constant(date, 1:3, 0.5)
expect_equal(
y,
c(rep(1, 82),
rep(2, 38),
3))
})
test_that("piecewise constant date and value have to be the same length", {
date <- c(0, 41, 60)
expect_error(
sircovid_parameters_piecewise_constant(date, 1:2, 0.5),
"'date' and 'value' must have the same length")
expect_error(
sircovid_parameters_piecewise_constant(date, 1:4, 0.5),
"'date' and 'value' must have the same length")
})
test_that("piecewise constant first date must be 0", {
expect_error(
sircovid_parameters_piecewise_constant(c(20, 31, 41, 64), 1:4, 0.5),
"As 'date' is not NULL, first date should be 0")
})
test_that("piecewise constant dates must be increasing", {
expect_error(
sircovid_parameters_piecewise_constant(c(0, 41, 41, 64), 1:4, 0.5),
"'date' must be strictly increasing")
})
test_that("piecewise constant dates must be sircovid_dates", {
expect_error(
sircovid_parameters_piecewise_constant(
as_date(c("2020-02-01", "2020-02-10")), 1:2, 0.5),
"'date' must be numeric - did you forget sircovid_date()?")
expect_error(
sircovid_parameters_piecewise_constant(c(-10, 41, 60), 1:3, 0.5),
"Negative dates, sircovid_date likely applied twice")
})
test_that("can read the default severity file", {
data <- sircovid_parameters_severity(NULL)
expect_identical(
sircovid_parameters_severity(severity_default()),
data)
expect_vector_equal(lengths(data), 17)
expect_setequal(
names(data),
c("p_star", "p_C", "p_G_D", "p_H_D",
"p_ICU_D", "p_W_D", "p_ICU", "p_R",
"p_sero_pos_1", "p_sero_pos_2", "p_H"))
expect_vector_equal(data$p_serocoversion, data$p_serocoversion[[1]])
expect_equal(
data$p_G_D, rep(0.05, 17))
expect_equal(
data$p_star, rep(0.2, 17))
})
test_that("can validate a severity input", {
d <- severity_default()
expect_error(
sircovid_parameters_severity(d[-1, ]),
"Elements missing from 'data': 'p_C'")
})
test_that("can reprocess severity", {
s <- sircovid_parameters_severity(NULL)
expect_identical(
sircovid_parameters_severity(s),
s)
expect_error(
sircovid_parameters_severity(s[-1]),
"Elements missing from 'params': 'p_star'")
})
test_that("shared parameters accepts a beta vector", {
date <- sircovid_date("2020-02-01")
beta_date <- sircovid_date(c("2020-02-01", "2020-02-14", "2020-03-15"))
beta_value <- c(3, 1, 2)
pars <- sircovid_parameters_shared(date, "england", beta_date, beta_value,
"piecewise-linear", NULL, 1, 10)
expect_equal(
pars$beta_step,
sircovid_parameters_piecewise_linear(beta_date, beta_value, 0.25))
beta_date <- sircovid_date(c("2019-12-31", "2020-02-14", "2020-03-15"))
beta_value <- c(3, 1, 2)
pars <- sircovid_parameters_shared(date, "england", beta_date, beta_value,
"piecewise-constant", NULL, 1, 10)
expect_equal(
pars$beta_step,
sircovid_parameters_piecewise_constant(beta_date, beta_value, 0.25))
expect_error(pars <- sircovid_parameters_shared(date, "england", beta_date,
beta_value,
"quadratic", NULL, 1, 10),
"'beta_type' must be 'piecewise-linear' or 'piecewise-constant'")
})
test_that("shared parameters", {
date <- sircovid_date("2020-02-01")
pars <- sircovid_parameters_shared(date, "england", NULL, 0.1,
"piecewise-linear", NULL, 1, 10)
expect_setequal(
names(pars),
c("hosp_transmission", "ICU_transmission", "G_D_transmission",
"dt", "steps_per_day", "n_age_groups",
"beta_step", "population", "seed_step_start", "seed_value"))
expect_equal(pars$beta_step, 0.1)
})
test_that("can expand beta", {
date <- sircovid_date(c("2020-02-01", "2020-02-14", "2020-03-15"))
value <- c(3, 1, 2)
beta <- sircovid_parameters_piecewise_linear(date, value, 1)
# The implied time series looks like this:
t1 <- seq(0, date[[3]])
res1 <- cbind(t1, beta, deparse.level = 0)
expect_equal(sircovid_parameters_expand_step(t1, beta), beta)
t2 <- seq(0, 100, by = 1)
beta2 <- sircovid_parameters_expand_step(t2, beta)
expect_equal(beta2[seq_along(beta)], beta)
expect_equal(beta2[-seq_along(beta)], rep(beta[length(beta)], 25))
t3 <- t2[1:65]
beta3 <- sircovid_parameters_expand_step(t3, beta)
expect_equal(beta3, beta[1:65])
})
|
7c85930842a6aabe9235b73e517ed3faba5f1d6c | 2b73cb9ae681bc43be9c1d53eee9e6116a1af173 | /R/wt.R | 6e4b19663d0ef4f6caea7a2a65f55552aabd0de5 | [] | no_license | cran/wsyn | 8a161b1c239875cddb0a802e8393b4a1993725e9 | 72e97f83500ebc44fb1a15d968c426ad478d4f9f | refs/heads/master | 2021-08-22T22:28:55.339113 | 2021-06-18T20:10:02 | 2021-06-18T20:10:02 | 167,043,420 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,107 | r | wt.R | #' Computes the wavelet transform of a timeseries. Also the creator function for the
#' \code{wt} class.
#'
#' Computes the wavelet transform of a timeseries. Also the creator function for the
#' \code{wt} class. The \code{wt} class inherits from the \code{tts} class, which
#' inherits from the \code{list} class.
#'
#' @param t.series A timeseries of real values
#' @param times A vector of time step values (e.g., years), spacing 1
#' @param scale.min The smallest scale of fluctuation that will be examined. At least 2.
#' @param scale.max.input The largest scale of fluctuation that is guaranteed to be examined
#' @param sigma The ratio of each time scale examined relative to the next timescale. Should be greater than 1.
#' @param f0 The ratio of the period of fluctuation to the width of the envelope. Defaults to 1.
#'
#' @return \code{wt} returns an object of class \code{wt}. Slots are:
#' \item{values}{A matrix of complex numbers, of dimensions \code{length(t.series)} by the number of timescales. Entries not considered reliable (longer timescales, near the edges of the time span) are set to NA.}
#' \item{times}{The time steps specified (e.g. years)}
#' \item{wtopt}{The inputted wavelet transform options scale.min, scale.max.input, sigma, f0 in a list}
#' \item{timescales}{The timescales (1/frequency) computed for the wavelet transform}
#' \item{dat}{The data vector from which the transform was computed}
#'
#' @note Important for interpreting the phase: the phases grow through time, i.e., they turn anti-clockwise.
#'
#' @author Lawrence Sheppard \email{lwsheppard@@ku.edu}, Jonathan Walter
#' \email{jaw3es@@virginia.edu}, Daniel Reuman \email{reuman@@ku.edu}
#'
#' @seealso \code{\link{wt_methods}}, \code{\link{tts}}, \code{\link{plotmag}}, \code{\link{plotphase}},
#' \code{browseVignettes("wsyn")}
#'
#' @examples
#' time1<-1:100
#' time2<-101:200
#' ts1p1<-sin(2*pi*time1/15)
#' ts1p2<-0*time1
#' ts2p1<-0*time2
#' ts2p2<-sin(2*pi*time2/8)
#' ts1<-ts1p1+ts1p2
#' ts2<-ts2p1+ts2p2
#' ts<-c(ts1,ts2)
#' ra<-rnorm(200,mean=0,sd=0.5)
#' t.series<-ts+ra
#' t.series<-t.series-mean(t.series)
#' times<-c(time1,time2)
#' res<-wt(t.series, times)
#'
#' @export
#' @importFrom stats fft
wt <- function(t.series, times, scale.min=2, scale.max.input=NULL, sigma=1.05, f0=1)
{
#error checking
errcheck_tsdat(times,t.series,"wt")
errcheck_wavparam(scale.min,scale.max.input,sigma,f0,times,"wt")
if(is.null(scale.max.input)){
scale.max<-length(t.series)
}
else{
scale.max<-scale.max.input
}
if (is.matrix(t.series))
{
t.series<-as.vector(t.series)
}
#for return
wtopt<-list(scale.min=scale.min,scale.max.input=scale.max.input,
sigma=sigma,f0=f0)
#determine how many frequencies are in the range and make receptacle for results
scale.min <- f0*scale.min
scale.max <- f0*scale.max
m.max <- floor(log(scale.max/scale.min)/log(sigma))+1 #number of timescales
s2 <- scale.min*sigma^seq(from=0, by=1, to=m.max) #widths of wavelet envelopes
margin2 <- ceiling(sqrt(-(2*s2*s2)*log(0.5)))
translength <- length(t.series)
m.last <- max(which(margin2<0.5*translength))
result <- matrix(NA, nrow=translength, ncol=m.max+1)
#wavsize determines the size of the calculated wavelet
wavsize <- ceiling(sqrt(-(2*s2[m.last]*s2[m.last])*log(0.001)));
#preparations for finding components
Y <- stats::fft(c(t.series,rep(0,2*wavsize)))
lenY<-length(Y)
freqs<-seq(from=0, by=1, to=lenY-1)/lenY;
freqs2<-c(seq(from=0, by=1, to=floor(lenY/2)), seq(from=-(ceiling(lenY/2)-1),
by=1, to=-1))/lenY;
#find transform components using wavelets of each frequency
for (stage in 1 : m.last)
{
s.scale<-s2[stage];
#begin calculating wavelet
#margin determines how close large wavelets can come to the edges of the timeseries
margin<-margin2[stage];
#perform convolution
XX <- (2*pi*s.scale)^(0.5)*(exp(-s.scale^2*(2*pi*(freqs-((f0/s.scale))))^2/2) -
(exp(-s.scale^2*(2*pi*(freqs2))^2/2))*
(exp(-0.5*(2*pi*f0)^2)))*exp(-1i*2*pi*wavsize*freqs);
con <- stats::fft((XX*Y),inverse=TRUE)
con <- con/length(con)
#fit result into transform
result[(margin+1):(translength-margin),stage] <-
con[(wavsize + margin + 1):(translength + wavsize - margin)];
}
if(is.null(scale.max.input)){
result<-result[,1:m.last]
timescales<-s2[1:m.last]/f0
errcheck_tts(times,timescales,result,"wt")
result<-list(values=result, times=times, wtopt=wtopt, timescales=timescales, dat=t.series)
class(result)<-c("wt","tts","list")
return(result)
}
else{
timescales<-s2/f0
errcheck_tts(times,timescales,result,"wt")
result<-list(values=result, times = times, wtopt=wtopt, timescales=timescales, dat=t.series)
class(result)<-c("wt","tts","list")
return(result)
}
}
|
c62a4fc4d882fcba5914c483b00869ea70b0d070 | 138c2843cd7f8aff3bd326f669961260a2c77b8f | /R/coco_loader.R | 92564fd76bc83f3cd2a7b7c372b41c82b5fc5cc1 | [] | no_license | leslie-arch/R-vqa | 30a27c39c0b562ddbcaf31cca85446bffcc6469b | 6bec4b883c40583326cbff64afc02932524834b3 | refs/heads/main | 2023-08-08T04:48:08.471098 | 2021-09-23T03:10:07 | 2021-09-23T03:10:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,550 | r | coco_loader.R |
load_train_batch <- function(config)
{
anno_path <- config$input_json
annotations <- fromJSON(file = anno_path)
image_path <- config$image_path
if (length(image_path) <= 0)
{
tmp <- strsplit(anno_path, split = '/')
lentmp <- length(tmp[[1]])
list_tmp <- tmp[[1]][-c(lentmp, lentmp - 1)]
anno_parent <- paste(list_tmp[[1]], collapse = "/");
image_path <- sprintf("%s/%s%s", anno_parent, config$type, config$subtype)
}
}
#img <- image_draw(img_magick)
#rect(20, 20, 200, 100, border = "red", lty = "dashed", lwd = 5)
#abline(h = 300, col = 'blue', lwd = '10', lty = "dotted")
#text(30, 250, "Hoiven-Glaven", family = "monospace", cex = 4, srt = 90)
#palette(rainbow(11, end = 0.9))
#symbols(rep(200, 11), seq(0, 400, 40), circles = runif(11, 5, 35),
# bg = 1:11, inches = FALSE, add = TRUE)
#dev.off()
#
#image <- ocv_read(full_path)
#plot(image)
#library(keras)
cifar_demo <- function(){
# Parameters --------------------------------------------------------------
batch_size <- 32
epochs <- 200
data_augmentation <- TRUE
# Data Preparation --------------------------------------------------------
# See ?dataset_cifar10 for more info
cifar10 <- dataset_cifar10()
# Feature scale RGB values in test and train inputs
x_train <- cifar10$train$x/255
x_test <- cifar10$test$x/255
y_train <- to_categorical(cifar10$train$y, num_classes = 10)
y_test <- to_categorical(cifar10$test$y, num_classes = 10)
# Defining Model ----------------------------------------------------------
# Initialize sequential model
model <- keras_model_sequential()
model %>%
# Start with hidden 2D convolutional layer being fed 32x32 pixel images
layer_conv_2d(
filter = 32, kernel_size = c(3,3), padding = "same",
input_shape = c(32, 32, 3)
) %>%
layer_activation("relu") %>%
# Second hidden layer
layer_conv_2d(filter = 32, kernel_size = c(3,3)) %>%
layer_activation("relu") %>%
# Use max pooling
layer_max_pooling_2d(pool_size = c(2,2)) %>%
layer_dropout(0.25) %>%
# 2 additional hidden 2D convolutional layers
layer_conv_2d(filter = 32, kernel_size = c(3,3), padding = "same") %>%
layer_activation("relu") %>%
layer_conv_2d(filter = 32, kernel_size = c(3,3)) %>%
layer_activation("relu") %>%
# Use max pooling once more
layer_max_pooling_2d(pool_size = c(2,2)) %>%
layer_dropout(0.25) %>%
# Flatten max filtered output into feature vector
# and feed into dense layer
layer_flatten() %>%
layer_dense(512) %>%
layer_activation("relu") %>%
layer_dropout(0.5) %>%
# Outputs from dense layer are projected onto 10 unit output layer
layer_dense(10) %>%
layer_activation("softmax")
opt <- optimizer_rmsprop(lr = 0.0001, decay = 1e-6)
model %>% compile(
loss = "categorical_crossentropy",
optimizer = opt,
metrics = "accuracy"
)
# Training ----------------------------------------------------------------
if(!data_augmentation){
model %>% fit(
x_train, y_train,
batch_size = batch_size,
epochs = epochs,
validation_data = list(x_test, y_test),
shuffle = TRUE
)
} else {
datagen <- image_data_generator(
rotation_range = 20,
width_shift_range = 0.2,
height_shift_range = 0.2,
horizontal_flip = TRUE
)
datagen %>% fit_image_data_generator(x_train)
model %>% fit_generator(
flow_images_from_data(x_train, y_train, datagen, batch_size = batch_size),
steps_per_epoch = as.integer(50000/batch_size),
epochs = epochs,
validation_data = list(x_test, y_test)
)
}
}
|
c8cfb2d89d8ec6c5d85eac6127dd9849242cc2b9 | 74797ca0c5961bdd78c025ba55c2339169fbe7dc | /my packages/s.FE/man/get_num_statis.Rd | e56a9bdff93957dacb9436259d87c103dca0e763 | [] | no_license | fangju2013/RProj | af895d87bdcef0425ea5bcad7c4d38d4d63b5533 | e7eb4983966794bd5a59229b5a45973ea3e3e8ab | refs/heads/master | 2021-01-11T15:36:43.591629 | 2017-01-24T09:23:24 | 2017-01-24T09:23:24 | 79,896,894 | 0 | 0 | null | 2017-01-24T09:26:39 | 2017-01-24T09:26:39 | null | UTF-8 | R | false | true | 606 | rd | get_num_statis.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nums_handle.R
\name{get_num_statis}
\alias{get_num_statis}
\title{get numeric variables' statistics index}
\usage{
get_num_statis(df, key.vec, varname, targ.vec = NULL)
}
\arguments{
\item{df}{the discreting dataframe by best seperation}
\item{key.vec}{the vars need to change}
\item{varname}{the label variable}
\item{reordervar}{whether reorder the dataframe by media}
}
\description{
This function will get numeric variables' statistics index
}
\examples{
get_num_statis(alldata,alldata[,'Idx'],'varname',reordervar=F)
}
|
432e6526989723dc2f91ebe32e39686cdfd69137 | 291a6b11be20e97e0160100ee94b9d7cfaf06fc1 | /R/helpers.R | 3e25748503a2c3e54346ea808df1f6e3c65c6960 | [] | no_license | IvanVoronin/mlth.data.frame | 992302d7911b0e5a966c3c14f6bb17ca3e99bd02 | d9cfd47e0c3ed52d81923c169bad2fa0f4db1bb1 | refs/heads/master | 2023-07-08T06:31:30.465193 | 2023-06-27T15:48:17 | 2023-06-27T15:48:17 | 80,029,460 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 17,072 | r | helpers.R | #' @export
kable2 <- function(x, ...) UseMethod('kable2')
#' @export
kable2.default <- function(...) {
# Additional arguments:
# align_first
# footnote
# register_output
# name
dots <- list(...)
if (length(dots$register_output) > 0 && dots$register_output) {
dots <- register_output_internal(...)
}
if (length(dots$x) > 0)
x <- dots$x
else x <- dots[[1]]
if (length(dots$align_first) > 0) {
if (length(dots$align) == 0) {
isn <- apply(x, 2, is.numeric)
align <- ifelse(isn, 'r', 'l')
} else if (length(dots$align) == 1) {
align <- rep(dots$align, ncol(x))
}
align[1:length(dots$align_first)] <- dots$align_first
dots <- list(...)
dots$align_first <- NULL
dots$align <- align
}
if (length(dots$footnote) > 0) {
dots_note <- dots$footnote
note <- dots_note[[1]]
dots$footnote <- NULL
} else {
note <- NULL
}
# if (length(dots$register_output > 0) && dots$register_output) {
# register_output(
# x,
# name = dots$name,
# caption = dots$caption,
# note = note
# )
# }
dots$name <- NULL
dots$register_output <- NULL
outp <- do.call(knitr::kable, dots)
if (length(note) > 0) {
outp <- do.call(
'footnote',
c(
list(outp),
dots_note
)
)
}
outp
}
#' @export
kable2.list <- function(l, ...) {
# l is a list of data.frames or matrices
dots <- list(...)
if (length(dots$register_output) > 0 && dots$register_output) {
dots <- register_output_internal(l, ...)
dots[[1]] <- NULL
}
# l <- lapply(l, as.data.frame)
rn <- character(0)
if (length(dots$row.names) == 0 || is.na(dots$row.names)) {
if (
!all(
sapply(
l,
function(x)
identical(
row.names(x),
as.character(1:nrow(x))
)
)
)
) {
rn <- Reduce('c', lapply(l, row.names))
}
} else if (dots$row.names) {
rn <- Reduce('c', lapply(l, row.names))
}
tab <- do.call('rbind', l)
if (length(rn) > 0)
if (is.mlth.data.frame(tab)) {
tab <- cbind(mlth.data.frame(' ' = rn), tab)
} else
tab <- cbind(' ' = rn, tab)
dots$row.names <- FALSE
kableExtra::pack_rows(
do.call('kable2', c(list(tab), dots)),
index = setNames(
sapply(l, nrow),
names(l)
)
)
}
#' @export
kable2.mlth.data.frame <- function(x, ...) {
dots <- list(...)
if (length(dots$register_output) > 0 && dots$register_output) {
dots <- register_output_internal(x, ...)
dots[[1]] <- NULL
}
outp <- do.call(
'kable2',
c(
list(behead(x)),
dots
)
)
add_complex_header_above(
outp, x,
row.names = dots$row.names
)
}
#' @title Separate table header and table body
#' @description
#' Separate table header and table body to write it to a spreadsheet or to html or to whatever.
#' It returns dataframe with a header as an `attr(tbl, 'header')``.
#' @param tbl is a `mlth.data.frame` or `data.frame`. If `tbl` is a `data.frame`, the function returns it unchanged.
#'
#' @details Also see `unpivotr::behead()``
#'
#' @export
behead <- function(tbl) UseMethod('behead', tbl)
#' @export
behead.default <- function(tbl) {
tbl_out <- as.data.frame(tbl)
attr(tbl_out, 'header') <- list()
attr(tbl_out, 'caption') <- attr(tbl, 'caption')
attr(tbl_out, 'note') <- attr(tbl, 'note')
tbl_out
}
#' @export
behead.mlth.data.frame <- function(tbl) {
# header <- list()
# if (!is.mlth.data.frame(tbl)) {
# attr(tbl, 'header') <- list()
# return(tbl)
# }
# if (is.mlth.data.frame(tbl)) {
# make_header_tree <- function(x) {
# if (isAtomic(x))
# return(1)
# lapply(x, make_header_tree)
# }
collect_leaves <- function(tree) {
pile <- numeric(0)
for (i in 1:length(tree)) {
if (is.numeric(tree[[i]])) {
leaf <- tree[[i]]
names(leaf) <- names(tree)[i]
pile <- c(pile, leaf)
} else {
pile <- c(pile, collect_leaves(tree[[i]]))
}
}
pile
}
trim_tree <- function(tree) {
chop = 0
trimmed = list()
nm <- names(tree)
if (!any(sapply(tree, is.list)))
return(sum(unlist(tree)))
for (i in 1:length(tree)) {
if (is.list(tree[[i]])) {
if (chop > 0) {
trimmed <- c(trimmed, list(' ' = chop))
chop <- 0
}
l <- list(trim_tree(tree[[i]]))
names(l) <- nm[i]
trimmed <- c(trimmed, l)
} else {
chop <- chop + tree[[i]]
}
}
if (chop > 0) {
trimmed <- c(trimmed, list(' ' = chop))
chop <- 0
}
trimmed
}
cap <- attr(tbl, 'caption')
note <- attr(tbl, 'note')
header <- list()
ht <- rapply(tbl, function(x) return(1), how = 'list')
while (any(names(ht) != ' ')) {
header <- c(header, list(collect_leaves(ht)))
ht <- trim_tree(ht)
}
tbl <- setNames(as.data.frame(tbl), names(header[[1]]))
header <- header[-1]
attr(tbl, 'header') <- header
attr(tbl, 'caption') <- cap
attr(tbl, 'note') <- note
tbl
}
#' @export
behead.list <- function(tbl) {
cap <- attr(tbl, 'caption')
note <- attr(tbl, 'note')
tbl <- lapply(tbl, behead)
attr(tbl, 'caption') <- cap
attr(tbl, 'note') <- note
tbl
}
#' @title Add complex header above the kable table
#' @description
#' Add complex header above the kable table. It is supposed to be a part of `knitr - kable - kableExtra` pipeline. Relies on `kableExtra::add_header_above` when `behead` returns a table with complex header.
#' (E.g., when the table is `mlth.data.frame`.)
#' @param kable_input is whatever kable input.
#' @param tbl is the initial table.
#' @param row.names shoul we include `row.names`?
#' @export
add_complex_header_above <- function(
kable_input,
tbl,
row.names = NA
) {
# adapted from from knitr code
# https://github.com/yihui/knitr/blob/1b40794a1a93162d87252e9aa9a65876933d729b/R/table.R
has_rownames = function(x) {
!is.null(row.names(x)) &&
!identical(
row.names(x),
as.character(seq_len(NROW(x)))
)
}
outp <- kable_input
header <- attr(behead(tbl), 'header')
if (length(row.names) == 0 || is.na(row.names)) {
if (length(header) > 0 && has_rownames(tbl))
header <- lapply(header, function(x) c(' ' = 1, x))
} else if (row.names) {
header <- lapply(header, function(x) c(' ' = 1, x))
}
for (i in header)
outp <- add_header_above(outp, i)
return(outp)
}
#' @title Render a table with layered rows
#' @description
#' Render a table with layered rows using kable.
#' It is supposed to be a list of tables that define the pieces of the output table.
#' @param l is a list of tables.
#' @param ... are parameters passed to kable.
#' @export
kable_collapse_rows <- function(l, ...) {
# l is a list of data.frames or matrices
dots <- list(...)
# l <- lapply(l, as.data.frame)
rn <- character(0)
if (length(dots$row.names) == 0 || is.na(dots$row.names)) {
if (
!all(
sapply(
l,
function(x)
identical(
row.names(x),
as.character(1:nrow(x))
)
)
)
) {
rn <- Reduce('c', lapply(l, row.names))
}
} else if (dots$row.names) {
rn <- Reduce('c', lapply(l, row.names))
}
tab <- do.call('rbind', l)
if (length(rn) > 0)
if (is.mlth.data.frame(tab)) {
tab <- cbind(mlth.data.frame(' ' = rn), tab)
} else
tab <- cbind(' ' = rn, tab)
dots$row.names <- FALSE
kableExtra::pack_rows(
do.call('kable', c(list(tab), dots)),
index = setNames(
sapply(l, nrow),
names(l)
)
)
}
#' @title Register table for the output
#' @description Save the table into a global `OUTPUT` list to write is as an output spreadsheet later.
#' @param tbl is a `data.frame` or `mlth.data.frame` or any other input supported by `\link{write.xlsx.output}`.
#' @param name is table name in the `OUTPUT` list. Can be empty.
#' @param caption is table caption as a merged cell above the table.
#' @param note is table footnote as a merged cell below the table.
#'
#' @return `tbl` with 'caption' and 'note' attributes
#'
#' @export
# FIXME: Strange behavior when called from a loop
register_output <- function(tbl, name = NULL, caption = NULL, note = NULL) {
if (!exists('OUTPUT', where = globalenv())) {
OUTPUT <- list()
} else {
OUTPUT <- get(
'OUTPUT',
envir = globalenv()
)
}
attr(tbl, 'caption') <- caption
attr(tbl, 'note') <- note
if (length(name) == 0) {
OUTPUT <- c(OUTPUT, list(tbl))
} else {
OUTPUT[[name]] <- tbl
}
assign(
'OUTPUT',
OUTPUT,
envir = globalenv()
)
return(tbl)
}
register_output_internal <- function(...) {
# This function accepts same arguments as kable/kable2,
# registers output and peels the dots from unnecessary args
dots <- list(...)
if (length(dots$register_output > 0) && dots$register_output) {
if (length(dots$x) > 0)
x <- dots$x
else x <- dots[[1]]
if (length(dots$footnote) > 0) {
dots_note <- dots$footnote
note <- dots_note[[1]]
} else {
note <- NULL
}
register_output(
x,
name = dots$name,
caption = dots$caption,
note = note
)
}
dots$register_output <- NULL
dots
}
#' @title Write tables to xlsx file
#' @description These are the writers to use for writing the tables to an xlsx file.
#' Different writers can rely on different packages, like `openxlsx` or `xlsx`.
#' My current package of choice is `openxlsx`.
#
#' @param tblList is a list of `data.frame`s. It is assumed that the input table can have `caption` and `note` attributes
#' and may accept beheaded `mlth.data.frame` (attribute `header`).
#' @param file is the name of xlsx file.
#' @param overwrite should we overwrite the file?
#' @details
#' It is important that tblList is a true list! `data.frame` is also a list and
#' the function will throw an error if `tblList` is `data.frame`.
#'
#' @export
xlsx.writer.openxlsx <- function(tblList, file, overwrite) {
if (is.data.frame(tblList))
stop('tblList must be a true list, not data.frame or mlth.data.frame')
require('openxlsx')
wb <- openxlsx::createWorkbook()
if (length(names(tblList)) == 0)
names(tblList) <- paste('Sheet', 1:length(tblList))
empty_names <- which(names(tblList) == '')
if (length(empty_names) > 0)
names(tblList)[empty_names] <- paste0('Sheet', 1:length(empty_names))
for (sheet in names(tblList)) {
curTbl <- tblList[[sheet]]
# Is this a list?
this_is_list <-
is.list(curTbl) &&
!is.data.frame(curTbl) &&
!is.mlth.data.frame(curTbl)
nc <- ncol(curTbl)
if (length(nc) == 0)
nc <- ncol(curTbl[[1]])
if (length(nc) == 0)
stop('something is wrong with the table: failed compute number of rows')
has_rn <- length(row.names(curTbl) > 0)
addWorksheet(wb, sheet)
startRow <- 1
# Write caption ------------------------------------------------------------
if (length(attr(curTbl, 'caption')) > 0) {
mergeCells(
wb, sheet,
cols = c(1, nc + as.numeric(has_rn)),
rows = startRow
)
writeData(
wb, sheet,
as.character(attr(curTbl, 'caption')),
startCol = 1, startRow = startRow
)
startRow <- startRow + 1
}
# Write header -------------------------------------------------------------
# if this is mlth.data.frame
header <- attr(curTbl, 'header')
startRow <- startRow + length(header)
if (length(header) > 0) {
for (i in 1:length(header)) {
currCol <- 1
for (j in 1:length(header[[i]])) {
mergeCells(
wb, sheet,
cols = 1:header[[i]][j] + currCol,
rows = startRow - i
)
writeData(
wb, sheet,
names(header[[i]])[j],
startCol = currCol + 1,
startRow = startRow - i
)
currCol <- currCol + header[[i]][j]
}
}
}
addStyle(
wb, sheet,
createStyle(textDecoration = 'bold'),
rows = 1:startRow,
cols = 1 + 1:nc,
gridExpand = TRUE
)
# Write body ---------------------------------------------------------------
if (!this_is_list) {
writeData(
wb, sheet,
curTbl,
startCol = 2,
startRow = startRow
)
if (has_rn) {
writeData(
wb, sheet,
row.names(curTbl),
startCol = 1,
startRow = startRow + 1
)
}
startRow <- startRow + nrow(curTbl) + 1
} else {
# assuming curTbl is list
writeData(
wb, sheet,
as.data.frame(t(names(curTbl[[1]]))),
startCol = 2,
startRow = startRow,
colNames = FALSE
)
startRow <- startRow + 1
for (i in 1:length(curTbl)) {
mergeCells(
wb, sheet,
cols = 1:(nc + 1),
rows = startRow
)
addStyle(
wb, sheet,
createStyle(textDecoration = 'bold'),
cols = 1,
rows = startRow,
gridExpand = TRUE
)
writeData(
wb, sheet,
names(curTbl)[i],
startCol = 1,
startRow = startRow
)
startRow <- startRow + 1
writeData(
wb, sheet,
curTbl[[i]],
startCol = 2,
startRow = startRow,
colNames = FALSE
)
if (length(row.names(curTbl[[i]])) > 0) {
writeData(
wb, sheet,
row.names(curTbl[[i]]),
startCol = 1,
startRow = startRow
)
}
startRow <- startRow + nrow(curTbl[[i]])
}
}
# Write note ---------------------------------------------------------------
if (length(attr(curTbl, 'note')) > 0) {
mergeCells(
wb, sheet,
cols = c(1, nc + as.numeric(has_rn)),
rows = startRow
)
writeData(
wb, sheet,
as.character(attr(curTbl, 'note')),
startCol = 1, startRow = startRow
)
}
}
openxlsx::saveWorkbook(wb, file, overwrite = overwrite)
}
#' @title Write registered output tables
#' @description
#' Write the contents of `OUTPUT` list to an `xlsx` file. This function is supposed to be used
#' at the very end of the analysis when all output tables are prepared.
#' @param file is the name of `xlsx` file.
#' @param overwrite should we overwrite the existing output file?
#' @param writer is the function that writes list of tables into an xlsx file.
#'
#' @export
write.xlsx.output <- function(file, overwrite = TRUE, writer = xlsx.writer.openxlsx) {
if (!exists('OUTPUT', where = globalenv()))
stop('OUTPUT does not exist in globalenv, I have nothing to write')
else
x <- OUTPUT
x <- lapply(x, behead)
writer(
tblList = x,
file = file,
overwrite = overwrite
)
}
#' @rdname cor_helpers
#' @title Render correlation table
#' @description
#' Render correlation table either as `mlth.data.frame` or as `kable` table.
#' @param x,y are tables (`matrix`, `data.frame`).
#' @param type is type of correlation: Pearson or Spearman.
#' @details When using `kable_cors`, include the following html-code to turn on popovers:
#' `<!--html_preserve-->`
#' `<script>`
#' `$(document).ready(function(){`
#' ` $('[data-toggle="popover"]').popover();`
#' `});`
#' `</script>`
#' `<!--/html_preserve-->`
#'
#' @export
kable_cors <- function(x, y = x, type = c('pearson', 'spearman')) {
require('kableExtra')
require('Hmisc')
f <- function(r, p, n) {
cell_spec(
sprintf('%0.3f', r),
'html', bold = p < 0.05,
escape = FALSE,
popover = spec_popover(
sprintf('p = %0.3f, n = %0.0f', p, n),
position = 'bottom')
)
}
x <- as.matrix(x)
y <- as.matrix(y)
cors <- rcorr(x, y, type = type)
cors <- lapply(cors, `[`, colnames(x), colnames(y))
matrix(
Map(f, cors$r, cors$P, cors$n),
ncol = ncol(cors$r),
dimnames = list(
colnames(x),
colnames(y)
)
)
}
#' @rdname cor_helpers
#' @export
mlth_cors <- function(x, y = x, type = c('pearson', 'spearman')) {
require('mlth.data.frame')
require('Hmisc')
x <- as.matrix(x)
y <- as.matrix(y)
cors <- rcorr(x, y, type = type)
cors <- lapply(cors, `[`, colnames(x), colnames(y))
as.mlth.data.frame(
Map(
function(r, n, P) data.frame(r = r, n = n, p = P),
asplit(cors$r, 2),
asplit(cors$n, 2),
asplit(cors$P, 2)
),
row.names = colnames(x)
)
}
# TODO: Write on Google Drive |
d3ac8471411b60f58cf0d86dfb5c2310556fc1b6 | 7917fc0a7108a994bf39359385fb5728d189c182 | /cran/paws.storage/man/s3_get_bucket_intelligent_tiering_configuration.Rd | 53c2120a7da59de529441f40e32154fc83f815ab | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | true | 2,920 | rd | s3_get_bucket_intelligent_tiering_configuration.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s3_operations.R
\name{s3_get_bucket_intelligent_tiering_configuration}
\alias{s3_get_bucket_intelligent_tiering_configuration}
\title{Gets the S3 Intelligent-Tiering configuration from the specified bucket}
\usage{
s3_get_bucket_intelligent_tiering_configuration(Bucket, Id)
}
\arguments{
\item{Bucket}{[required] The name of the Amazon S3 bucket whose configuration you want to modify
or retrieve.}
\item{Id}{[required] The ID used to identify the S3 Intelligent-Tiering configuration.}
}
\value{
A list with the following syntax:\preformatted{list(
IntelligentTieringConfiguration = list(
Id = "string",
Filter = list(
Prefix = "string",
Tag = list(
Key = "string",
Value = "string"
),
And = list(
Prefix = "string",
Tags = list(
list(
Key = "string",
Value = "string"
)
)
)
),
Status = "Enabled"|"Disabled",
Tierings = list(
list(
Days = 123,
AccessTier = "ARCHIVE_ACCESS"|"DEEP_ARCHIVE_ACCESS"
)
)
)
)
}
}
\description{
Gets the S3 Intelligent-Tiering configuration from the specified bucket.
The S3 Intelligent-Tiering storage class is designed to optimize storage
costs by automatically moving data to the most cost-effective storage
access tier, without additional operational overhead. S3
Intelligent-Tiering delivers automatic cost savings by moving data
between access tiers, when access patterns change.
The S3 Intelligent-Tiering storage class is suitable for objects larger
than 128 KB that you plan to store for at least 30 days. If the size of
an object is less than 128 KB, it is not eligible for auto-tiering.
Smaller objects can be stored, but they are always charged at the
frequent access tier rates in the S3 Intelligent-Tiering storage class.
If you delete an object before the end of the 30-day minimum storage
duration period, you are charged for 30 days. For more information, see
\href{https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html#sc-dynamic-data-access}{Storage class for automatically optimizing frequently and infrequently accessed objects}.
Operations related to
\code{\link[=s3_get_bucket_intelligent_tiering_configuration]{get_bucket_intelligent_tiering_configuration}}
include:
\itemize{
\item \code{\link[=s3_delete_bucket_intelligent_tiering_configuration]{delete_bucket_intelligent_tiering_configuration}}
\item \code{\link[=s3_put_bucket_intelligent_tiering_configuration]{put_bucket_intelligent_tiering_configuration}}
\item \code{\link[=s3_list_bucket_intelligent_tiering_configurations]{list_bucket_intelligent_tiering_configurations}}
}
}
\section{Request syntax}{
\preformatted{svc$get_bucket_intelligent_tiering_configuration(
Bucket = "string",
Id = "string"
)
}
}
\keyword{internal}
|
890d48c4127324103c427b7790a065ff33bb09a1 | d45d799cbbb7be7013ae3e56f143f333e2ff4319 | /Procesamiento.R | 898a7cebbc970ed963af0ad2a8c10aa3a3dced64 | [] | no_license | nico2021-s/Miprimerrepo | f0b5b8a9a6f2e8e1073a6eb7450cd87ef17e7405 | d44457e72bcd099c00cf5e71907a31e61babea7b | refs/heads/main | 2023-04-13T12:35:49.152880 | 2021-04-25T04:13:53 | 2021-04-25T04:13:53 | 361,295,561 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,476 | r | Procesamiento.R | # install.packages("dslabs")
# install.packages("tidyverse")
library(dslabs)
library(tidyverse)
s = dslabs::gapminder
library(ggplot2)
ggplot(s, aes(x=continent, y=fertility))+geom_col()
unique(s$year)
length(unique(s$year))
# Agrupar por aรฑo la cantidad de observaciones que aparecen para Colombia
length(unique(s$country))
muestra = s %>% filter(country == "Colombia") %>% group_by(year) %>% summarise(conteo=n())
# Calcule el promedio para los paรญses de suramรฉrica en fertilidad y esperanza de vida
unique(s$region[s$continent=="Americas"])
unique(s$continent)
## Sudamรฉrica
muestra = s %>% filter(region == "South America") %>% group_by(year,continent) %>% summarise(promedio_f=round(mean(fertility,na.rm=TRUE),0),
promedio_ev=round(mean(life_expectancy,na.rm=TRUE),0))
ggplot(muestra, aes(x=year))+geom_line(aes(y=promedio_f,colour="fertilidad"))+geom_line(aes(y=promedio_ev,colour="esperanza_de_vida"))
ggplot(muestra, aes(x=promedio_f, y=promedio_ev))+geom_point()
## Europa
muestra = s %>% group_by(year,continent) %>% summarise(promedio_ev=round(mean(life_expectancy,na.rm=TRUE),0))
muestra = spread(muestra,continent,promedio_ev)
## Grรกfica
ggplot(muestra,aes(x=year))+geom_line(aes(y=Africa,colour="Africa"))+geom_line(aes(y=Americas,colour="Americas"))+geom_line(aes(y=Oceania,colour="Oceania"))+geom_line(aes(y=Europe,colour="Europe"))+geom_line(aes(y=Asia,colour="Asia"))
|
7b0c5fa980f2ce59107b45f02bdd64072e2a08f7 | d80e56a3c15e8156ad9ce75b54d3bc147351b78f | /R/ex6_template.R | 51bf8c7ca8b6607a5ccf85588e6a1462b6e34a6a | [
"MIT"
] | permissive | Lornebradia/c4l_workshop | e0bc8ffc08354f1da3db1bc016bda2fa72c2d25a | 62e54318de5152146180e0815162ae672f3cb71c | refs/heads/master | 2020-04-29T23:13:06.563591 | 2019-04-11T16:35:13 | 2019-04-11T16:35:13 | 176,470,559 | 0 | 0 | NOASSERTION | 2019-04-10T20:53:45 | 2019-03-19T09:08:29 | HTML | UTF-8 | R | false | false | 477 | r | ex6_template.R | #' Exercise 6 - Reporting
#'
#' Prepare the report for running
library(shiny)
# Add to plan ----------
#' We need to export the results to be able to later publish:
#' the drake cache is not available for export
# Create target to export the data that will be needed (final, seasonal_plots, aggregated data)
# Run the report as a target with
# report = rmarkdown::run(knitr_in("R/report_template.Rmd"))
# ex6_plan <-
# Config -------------
# Run --------------------
|
061fc68b7f3df8bc9a8f762a62cc62a291214a25 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ContourFunctions/examples/cf_func.Rd.R | 2bb479d33b488d75637fa0d31e51e3ddff86e7cd | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 392 | r | cf_func.Rd.R | library(ContourFunctions)
### Name: cf_func
### Title: Makes filled contour plot from function without sidebar, uses
### cf_grid
### Aliases: cf_func
### ** Examples
cf_func(function(x){x[1]*x[2]})
cf_func(function(x)(exp(-(x[1]-.5)^2-5*(x[2]-.5)^2)))
cf_func(function(xx){exp(-sum((xx-.5)^2/.1))}, bar=TRUE)
cf_func(function(xx){exp(-sum((xx-.5)^2/.1))}, bar=TRUE, mainminmax=TRUE)
|
9def8b862040baef8e03ccbdcd094c04accb3941 | 4b76f1a19c6fc9a8a2263ddda140aff00ec3396a | /man/rtmvn.Rd | fc50b8e50e0a56f74a436c824b5a7c340b2ca10d | [] | no_license | cran/tmvmixnorm | db90df7285039e701fde8ce91665fd133327313a | cbb7edbc4b3c255eea3ceeaea9e77947c6422ddd | refs/heads/master | 2021-06-27T21:36:10.016413 | 2020-09-18T17:00:02 | 2020-09-18T17:00:02 | 145,902,241 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,241 | rd | rtmvn.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rtmvn.R
\name{rtmvn}
\alias{rtmvn}
\title{Random number generation for truncated multivariate normal distribution subject to linear inequality constraints}
\usage{
rtmvn(
n,
Mean,
Sigma,
D = diag(1, length(Mean)),
lower,
upper,
int = NULL,
burn = 10,
thin = 1
)
}
\arguments{
\item{n}{number of random samples desired (sample size).}
\item{Mean}{mean vector of the underlying multivariate normal distribution.}
\item{Sigma}{positive definite covariance matrix of the underlying multivariate normal distribution.}
\item{D}{matrix or vector of coefficients of linear inequality constraints.}
\item{lower}{vector of lower bounds for truncation.}
\item{upper}{vector of upper bounds for truncation.}
\item{int}{initial value vector for Gibbs sampler (satisfying truncation), if \code{NULL} then determine automatically.}
\item{burn}{burn-in iterations discarded (default as \code{10}).}
\item{thin}{thinning lag (default as \code{1}).}
}
\value{
\code{rtmvn} returns a (\code{n*p}) matrix (or vector when \code{n=1}) containing random numbers which approximately follows truncated multivariate normal distribution.
}
\description{
\code{rtmvn} simulates truncated multivariate (p-dimensional) normal distribution subject to linear inequality constraints. The constraints should be written as a matrix (\code{D}) with \code{lower} and \code{upper} as the lower and upper bounds for those constraints respectively. Note that \code{D} can be non-full rank, which generalize many traditional methods.
}
\examples{
# Example for full rank with strong dependence
d <- 3
rho <- 0.9
Sigma <- matrix(0, nrow=d, ncol=d)
Sigma <- rho^abs(row(Sigma) - col(Sigma))
D1 <- diag(1,d) # Full rank
set.seed(1203)
ans.1 <- rtmvn(n=1000, Mean=1:d, Sigma, D=D1, lower=rep(-1,d), upper=rep(1,d),
int=rep(0,d), burn=50)
apply(ans.1, 2, summary)
# Example for non-full rank
d <- 3
rho <- 0.5
Sigma <- matrix(0, nrow=d, ncol=d)
Sigma <- rho^abs(row(Sigma) - col(Sigma))
D2 <- matrix(c(1,1,1,0,1,0,1,0,1),ncol=d)
qr(D2)$rank # 2
set.seed(1228)
ans.2 <- rtmvn(n=100, Mean=1:d, Sigma, D=D2, lower=rep(-1,d), upper=rep(1,d), burn=10)
apply(ans.2, 2, summary)
}
|
b90bb24ce740f6e8c7dbe2e4f9d2b2e894114243 | 8c67a0356c5086cfc2534727bfd2e6ff4618f112 | /man/coalesce2.Rd | 37085e64307b2b14eaf0ea6fb6ccfebf09ba6fd9 | [
"MIT"
] | permissive | PelizaC-Jacobs/SOBPredictR | a3106489e07f398ecb1bf2c70dd4778fcb834c81 | 5689656dbd2dfeda6217d92b40eab9b21b29a7bc | refs/heads/main | 2023-03-17T23:59:04.407642 | 2021-02-09T00:07:26 | 2021-02-09T00:07:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 310 | rd | coalesce2.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_wrangle.R
\name{coalesce2}
\alias{coalesce2}
\title{Modified Coalesce}
\usage{
coalesce2(...)
}
\arguments{
\item{...}{vector}
}
\value{
vector
}
\description{
Coalesce two dataframes
}
\examples{
\dontrun{
coalesce2(x, y)
}
}
|
b65d11e4046c2c9989f1b457c65853377631fe3b | d0957837cc4dae8843c1c5037ec5f1f83e393ff1 | /man/Detergent.Rd | 7249c66fa05d385d0c2c0239bcd1438cb4080efe | [] | no_license | cran/vcdExtra | 99ade8da0accd5d400cda9489f32aee3a0a1de3f | d98d118bf59554f334f0c9782857aae663744611 | refs/heads/master | 2023-04-27T19:29:18.255703 | 2023-04-16T14:10:02 | 2023-04-16T14:10:02 | 17,700,730 | 1 | 0 | null | 2014-03-24T22:41:21 | 2014-03-13T06:43:46 | R | UTF-8 | R | false | false | 1,782 | rd | Detergent.Rd | \name{Detergent}
\Rdversion{1.1}
\alias{Detergent}
\docType{data}
\title{Detergent preference data}
\description{Cross-classification of a sample of 1008 consumers according to
(a) the softness of the laundry water used, (b) previous use of detergent Brand M, (c) the temperature of laundry water used and (d) expressed preference for Brand X or Brand M in a blind trial.}
\usage{
data(Detergent)
}
\format{
A 4-dimensional array resulting from cross-tabulating 4 variables for 1008 observations. The variable names and their levels are:
\tabular{rll}{
No \tab Name \tab Levels \cr
1\tab \code{Temperature}\tab \code{"High", "Low"}\cr
2\tab \code{M_User}\tab \code{"Yes", "No"}\cr
3\tab \code{Preference}\tab \code{"Brand X", "Brand M"}\cr
4\tab \code{Water_softness}\tab \code{"Soft", "Medium", "Hard"}\cr
}
}
%\details{ }
\source{
% \cite{Fienberg:80 [p. 71]}
Fienberg, S. E. (1980).
\emph{The Analysis of Cross-Classified Categorical Data}
Cambridge, MA: MIT Press, p. 71.
}
\references{
% \cite{RiesSmith:63}
Ries, P. N. & Smith, H. (1963).
The use of chi-square for preference testing in multidimensional problems.
\emph{Chemical Engineering Progress}, 59, 39-43.
}
%\seealso{ }
\examples{
data(Detergent)
# basic mosaic plot
mosaic(Detergent, shade=TRUE)
require(MASS)
(det.mod0 <- loglm(~ Preference + Temperature + M_User + Water_softness,
data=Detergent))
# examine addition of two-way terms
add1(det.mod0, ~ .^2, test="Chisq")
# model for Preference as a response
(det.mod1 <- loglm(~ Preference + (Temperature * M_User * Water_softness),
data=Detergent))
mosaic(det.mod0)
}
\keyword{datasets}
\concept{loglinear models}
|
4670ddb5a56f0ffec2dfe4fc58b713e91c6d4051 | 9b76f92dfecfc84e2a43da24b9e4aa678a2de356 | /bootcamp/064PipeOperator.R | abdc6850f58fea1a0f021808f9b8394de32f18a9 | [] | no_license | rathanDev/r | fa9d82582a83271b1f771c3bc9dd4348b0b28f73 | 2c4871f13de7cde82df7e0e63a253fa4a575a23b | refs/heads/master | 2022-12-13T17:56:46.669651 | 2020-09-10T12:32:49 | 2020-09-10T12:32:49 | 264,051,092 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 513 | r | 064PipeOperator.R | library(dplyr)
df <- mtcars
df
head(df)
# Nesting - efficient memory usage, but hard to read
result <- arrange(sample_n(filter(df, mpg>20), size=5),desc(mpg))
result
# Multiple assignments - more memory usage
mpg20 <- filter(df, mpg>20)
sample5 <- sample_n(mpg20, size = 5)
result <- arrange(sample5, desc(mpg))
result
# Pipe operator - readable and efficient memory usage
result <- df %>% filter(mpg>20) %>% sample_n(size = 5) %>% arrange(desc(mpg))
result
|
c9d33c05011bbc798bcc1b094255786acabe35cb | df7e56686117977a8a0721a432ea6a954d11831d | /app.R | cfd1d96808d608e413270a369a44eb201083e3e3 | [] | no_license | stat133-sp19/hw-stat133-Aadiraj | 2603ae1a35d16d4826e6a154cc0787808afe5c44 | 4aba22ae80f551ee97a632ed71d98d7097f9cae8 | refs/heads/master | 2020-04-28T20:19:46.108210 | 2019-05-05T03:05:19 | 2019-05-05T03:05:19 | 175,540,912 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,025 | r | app.R | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Future Value of Investment on Different Modalities"),
# Sidebar with a slider input for number of bins
fluidRow(
column(4,
sliderInput("inital",
"Inital Amount:",
min = 0,
max = 100000,
step = 500,
value = 1000),
sliderInput("contrib",
"Annual Contribution:",
min = 0,
max = 50000,
step = 500,
value = 2000)
),
column(4,
sliderInput("return",
"Return Rate:",
min = 0,
max = 0.2,
step = 0.001,
value = 0.05),
sliderInput("growth",
"Growth Rate:",
min = 0,
max = 0.2,
step = 0.001,
value = 0.02)
),
column(4,
sliderInput("years",
"Years:",
min = 0,
max = 50,
step = 1,
value = 20),
selectInput("facet",
"Facet?:",
choices = c("No", "Yes"))
)
),
hr(),
h4("Timlines"),
br(),
# Show a plot of the generated distribution
mainPanel(
column(12, offset = 2,
plotOutput("distPlot")
),
br(),
hr(),
h4("Balances"),
br(),
column(12, offset = 2,
verbatimTextOutput("table")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
# generate bins based on input$bins from ui.R
library(ggplot2)
#' @title Future Value
#' @description calculates the future value of inital investment
#' @param amount inital investment
#' @param rate rate of growth
#' @param years number of years invested
#' @return computed future value
future_value <- function(amount = 0, rate = 0, years = 0) {
return(amount*(1 + rate)^years)
}
future_value(100,0.05,1)
future_value(500,0.05,5)
future_value(1000,0.05,10)
#' @title Future Value of Annuity
#' @description calculates the future value of annuity from the inital investment
#' @param contrib amount deposited at the end of each year
#' @param rate annual rate of return
#' @param years number of years invested
#' @return computed future value
annuity <- function(contrib = 0, rate = 0, years = 0) {
return(contrib*(((1 + rate)^years - 1)/rate))
}
annuity(200, 0.05, 1)
annuity(200, 0.05, 2)
annuity(200, 0.05, 10)
#' @title Future Value of Growing Annuity
#' @description calculates the future value of growing annuity from the inital investment
#' @param contrib inital amount deposited
#' @param rate annual rate of return
#' @param growth annual growth rate
#' @param years number of years invested
#' @return computed future value
growing_annuity <- function(contrib = 0, rate = 0, growth = 0, years = 0) {
return(contrib*((1 + rate)^years - (1 + growth)^years)/(rate - growth))
}
growing_annuity(200, 0.05, 0.03, 1)
growing_annuity(200, 0.05, 0.03, 2)
growing_annuity(200, 0.05, 0.03, 10)
no_contrib <- rep(0,input$years)
for (year in 0:input$years) {
no_contrib[year + 1] <- future_value(input$inital, input$return, year)
}
fixed_contrib <- rep(0,input$years)
for (year in 0:input$years) {
fixed_contrib[year + 1] <- future_value(input$inital, input$return, year) + annuity(input$contrib, input$return, year)
}
growing_contrib <- rep(0,input$years)
for (year in 0:input$years) {
growing_contrib[year + 1] <- future_value(input$inital, input$return,year) + growing_annuity(input$contrib, input$return, input$growth, year)
}
modalities <- data.frame("year" = 0:input$years, no_contrib, fixed_contrib, growing_contrib)
modal <- append(append(rep("No Contribution", input$years + 1),rep("Fixed Contribution", input$years + 1)), rep("Growing Contribution", input$years + 1))
facetted <- data.frame("year" = 0:input$years, append(append(no_contrib, fixed_contrib), growing_contrib), modal)
names(facetted)[2] <- "balance"
# draw the histogram with the specified number of bins
if(input$facet == "No") {
ggplot(data = modalities) + geom_line(aes(x = year, y = no_contrib, color = "No Contribution")) + geom_line(aes(x = year, y = fixed_contrib, color = "Fixed Contribution")) + geom_line(aes(x = year, y = growing_contrib, color = "Growing Contribution")) + labs(x = "Years After Inital Investment", y = "Current Value of Investment", title = "Future Value of Investment for Different Investment Modes" )
}
else {
ggplot(data = facetted) + geom_area(aes(x = year, y = balance, color = modal, fill = modal)) + facet_grid(.~modal)
}
})
output$table <- renderPrint({
library(ggplot2)
#' @title Future Value
#' @description calculates the future value of inital investment
#' @param amount inital investment
#' @param rate rate of growth
#' @param years number of years invested
#' @return computed future value
future_value <- function(amount = 0, rate = 0, years = 0) {
return(amount*(1 + rate)^years)
}
future_value(100,0.05,1)
future_value(500,0.05,5)
future_value(1000,0.05,10)
#' @title Future Value of Annuity
#' @description calculates the future value of annuity from the inital investment
#' @param contrib amount deposited at the end of each year
#' @param rate annual rate of return
#' @param years number of years invested
#' @return computed future value
annuity <- function(contrib = 0, rate = 0, years = 0) {
return(contrib*(((1 + rate)^years - 1)/rate))
}
annuity(200, 0.05, 1)
annuity(200, 0.05, 2)
annuity(200, 0.05, 10)
#' @title Future Value of Growing Annuity
#' @description calculates the future value of growing annuity from the inital investment
#' @param contrib inital amount deposited
#' @param rate annual rate of return
#' @param growth annual growth rate
#' @param years number of years invested
#' @return computed future value
growing_annuity <- function(contrib = 0, rate = 0, growth = 0, years = 0) {
return(contrib*((1 + rate)^years - (1 + growth)^years)/(rate - growth))
}
growing_annuity(200, 0.05, 0.03, 1)
growing_annuity(200, 0.05, 0.03, 2)
growing_annuity(200, 0.05, 0.03, 10)
no_contrib <- rep(0,input$years)
for (year in 0:input$years) {
no_contrib[year + 1] <- future_value(input$inital, input$return, year)
}
fixed_contrib <- rep(0,input$years)
for (year in 0:input$years) {
fixed_contrib[year + 1] <- future_value(input$inital, input$return, year) + annuity(input$contrib, input$return, year)
}
growing_contrib <- rep(0,input$years)
for (year in 0:input$years) {
growing_contrib[year + 1] <- future_value(input$inital, input$return,year) + growing_annuity(input$contrib, input$return, input$growth, year)
}
modalities <- data.frame("year" = 0:input$years, no_contrib, fixed_contrib, growing_contrib)
modalities
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
9519e27c66fd574295d7e3ba4aa57e1db9ced4e8 | 1c9e02b3e531f1dad978afff5fd863e6fe007383 | /man/rawsDF_isRawsDF.Rd | 9c4fa8e85b1c29a53e21d93344d81943f94da4f1 | [] | no_license | mgjust/RAWSmet | 95d23fdc45bba8e3cfa3974f85847e8b077ac4ac | 79bad67e4d8a13a966dc19c40144bddba44e6254 | refs/heads/master | 2023-02-18T21:39:20.503427 | 2021-01-19T00:25:55 | 2021-01-19T00:25:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,676 | rd | rawsDF_isRawsDF.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rawsDF_utils.R
\name{rawsDF_isRawsDF}
\alias{rawsDF_isRawsDF}
\title{Test for correct structure for a \emph{rawsDF} object}
\usage{
rawsDF_isRawsDF(rawsDF = NULL)
}
\arguments{
\item{rawsDF}{\emph{rawsDF} object}
}
\value{
\code{TRUE} if \code{rawsDF} has the correct structure,
\code{FALSE} otherwise.
}
\description{
The \code{rawsDF} is checked for the presence of core data columns
Core columns include:
\itemize{
\item{\code{datetime} -- datetime of the observation}
\item{\code{temperature} -- temperature (C)}
\item{\code{humidity} -- humidity (\%)}
\item{\code{windSpeed} -- wind speed (m/s)}
\item{\code{windDirection} -- wind direction (degrees)}
\item{\code{maxGustSpeed} -- speed of max gust (m/s)}
\item{\code{maxGustDirection} -- direction of max gust (degrees)}
\item{\code{precipitation} -- precipitation (mm/h)}
\item{\code{solarRadiation} -- solar radiation (W/m^2)}
\item{\code{fuelMoisture} -- fuel moisture}
\item{\code{fuelTemperature} -- fuel temperature (C)}
\item{\code{monitorType} -- FW13 or WRCC depending on data source}
\item{\code{nwsID} -- NWS station identifier (for FW13 data)}
\item{\code{wrccID} -- WRCC station identifier (for WRCC data)}
\item{\code{siteName} -- English language station name}
\item{\code{longitude} -- decimal degrees E}
\item{\code{latitude} -- decimal degrees N}
\item{\code{timezone} -- timezone of the station}
\item{\code{elevation} -- elevation of station in m}
}
}
\examples{
\donttest{
library(RAWSmet)
rawsDF <- example_fw13SaddleMountain \%>\% raws_toRawsDF()
rawsDF_isRawsDF(rawsDF)
}
}
|
bcfd50925247bc082c83cd6c3b35b5d045962c44 | 27da76805544719c650c644a1eb2433b7ec7e3f4 | /missingness.R | 84c27237fa93ba218cbf3716554f97e5a6d89033 | [] | no_license | Joe-Hilgard/vvg-2d4d | 3428ae003b4f0d2294b53c61c5ea8c8dcfe5f40c | 1f94e83c0a7b02eef464bb6f2369ab03f2cf1224 | refs/heads/master | 2020-03-19T22:54:45.787129 | 2019-01-16T23:58:40 | 2019-01-16T23:58:40 | 136,985,930 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 473 | r | missingness.R | # Script for identifying missingness of data
library(dplyr)
dat = read.delim("./analysis/aggregated_data.txt",
#quote="",
sep="\t",
stringsAsFactors=F)
names(dat)[names(dat)=="Assignment"] = "DV"
dat$Subject = as.numeric(dat$Subject)
dat %>%
filter(is.na(DV) | is.na(Condition)) %>%
select(Subject, DV, Condition) %>%
View
dat %>%
filter(Subject == 420) %>%
select(Subject, DV, Condition) %>%
View |
d1461ecdf7fc88f5f407c485471d1ac41963e960 | 63f7123c43e044985c41f89e3b5b523bf862c3ef | /BT/BT/BTvantagemsemW1delta/GSMH_BT_DIF4A.R | baac1010cf85bad776dde44675ec15853cef9c64 | [] | no_license | danilomachadopires/CodeThesis | ab18813bab64bd5a854870c1961cbac32491a101 | d9b419f684fa33dd8f5ea5f2bc8eec3e5c159950 | refs/heads/master | 2020-06-16T20:23:45.008573 | 2019-07-07T20:45:49 | 2019-07-07T20:45:49 | 195,447,375 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,649 | r | GSMH_BT_DIF4A.R | #-----------------------------------------------Modelo com um รบnico delta-sem w---------------------------------------
# Entra com o rating e a dif de cada jogador
# theta = c(gama1,gama2,delta1,delta2)
rm(list=ls(all=T))
source("FGSMH.R")
dados <- read.table("granprix2010a2013.txt",h=T)
attach(dados)
nomes <- levels(W)
## Jogadores de interesse TRabalho antigo (46 jogadores do live rating 15 de maio de 2013)
estimandos <- nomes[c(7,19,20,32,35,47,51,63,143,145,
204,208,232,284,292,325,401,407,433,437,
472,494,531,536,570,576,627,633,644,646,
653,658,738,757,773,843,877,908,929,931,
942,948,959,972,973,979)]
m <- length(estimandos) # nรบmero de jogadores analisados
n <- length(W) # nรบmero de jogos
deltag <- 0
###########################
# Distribuiรงรตes a priori
# Ajustadas a partir de dados histรณricos
## Gama ~ Normal(mu=2705,sigma=400)
mu0 <- 2705
sigma0 <- 400
## delta ~ Normal(mu=0,sigma=10)
mud <- 0
sigmad <- 40
###########################
# Distribuiรงรตes geradoras de candidatos
#
## Gama ~ Normal(mu=2705,sigma=400)
mugC <- rep(2705,m)
sigmagC <- rep(400,m)
## delta ~ Normal(mu=0,sigma=10)
mudC <- 0
sigmadC <- 40
################### Veriricar o banco de dados para partidas onde os participantes nรฃo serรฃo analisados
GW <- matrix(0,n,m) # matriz de 0s com linhas igual ao nรบmero de partidas e colunas igual a jogadores
GB <- matrix(0,n,m)
# aqui รฉ preenchido as matrizes com indices referentes aas partidas jogados pelos jogadores analisados
# por exemplo na primeira partida o jogador Shakhriyar jogou de brancas, logo na matriz GW cuja coluna
# รฉ referente a este jogador irรก aparecer indรญce (1) referente a ele.
for(i in 1:m){
GW[,i] <- as.integer(W==estimandos[i])
GB[,i] <- as.integer(B==estimandos[i])
}
M <- GW+GB
fora <- which(apply(M,1,sum)==0)
n-length(fora)
GW <- GW[-fora,]
GB <- GB[-fora,]
y <- y[-fora];
WR <- WR[-fora]
BR <- BR[-fora]
W <- W[-fora]
n <- length(y)
detach(dados,pos=dados)
rm(dados)
### chute inicial
gama <- mugC
delta <- mudC
cgr <- cbind(c(mugC,mudC),c(sigmagC,sigmadC))
# Substituir no vetor WR e BR os gama
# Substituir no vetor DR os delta , Aqui รฉ mantido o rating do jogador que nรฃo serรก analisado como fixo, e os ratings
# dos jogadores de interesse como a mรฉdia do torneio.
for(i in 1:m){
WR[which(GW[,i]==1)] <- gama[i]
BR[which(GB[,i]==1)] <- gama[i]
}
## Tamanho da cadeia MCCM
B <- 50000 #burn-in
J <- 20 #jump
nef <- 4000 #tamanho amostral efetivo
nsMC <- B+nef*J #tamanho total da amostra (total da cadeia)
tcgc <- 1000
Mtcgc <- matrix(0,(m+1),tcgc)
###########################################################
cont <- 0
#### Laรงo de atualizaรงรฃo dos parรขmetros (Gibbs Sampling)
while (cont <= nsMC) {
# Atualizar o gama
for(i in sample(1:m)){
indice <- which((GW[,i]+GB[,i])!=0)
# Construir a matriz Theta para o jogador
Theta <- NULL
Theta <- matrix(0,length(indice),3)
Theta <- cbind(WR[indice],
BR[indice],
rep(delta,length(indice)))
pi <- BTt.Dif(Theta)
# Valor corrente da log-Verossimilhanรงa
Y<-y[indice]
lVa <- lLpi(pi)
# Amostra candidato
cand <- rnorm(1,mugC[i],sigmagC[i])
# Valor da log-Verossimilhanรงa para o candidato
WR[which(GW[,i]==1)] <- cand
BR[which(GB[,i]==1)] <- cand
Thetac <- NULL
Thetac <- matrix(0,length(indice),3)
Thetac <- cbind(WR[indice],
BR[indice],
rep(delta,length(indice)))
pi <- BTt.Dif(Thetac)
lVc <- lLpi(pi)
lgC <- log(dnorm(cand ,mugC[i],sigmagC[i]))
lgA <- log(dnorm(gama[i],mugC[i],sigmagC[i]))
lpC <- log(dnorm(cand ,mu0,sigma0))
lpA <- log(dnorm(gama[i],mu0,sigma0))
gama[i] <- aceita(gama[i],cand,(lVc+lpC+lgA),(lVa+lpA+lgC))
WR[which(GW[,i]==1)] <- gama[i]
BR[which(GB[,i]==1)] <- gama[i]
}
# Atualizar o delta
# Construir a matriz Theta para o jogador
Theta <- NULL
Theta <- matrix(0,n,3)
Theta <- cbind(WR,
BR,
rep(delta,n))
Y<-y
pi <- BTt.Dif(Theta)
# Valor corrente da log-Verossimilhanรงa
lVa <- lLpi(pi)
# Amostra candidato
cand <- rnorm(1,mudC,sigmadC)
# Valor da log-Verossimilhanรงa para o candidato
Thetac <- Theta
Thetac[,3] <- rep(cand,n)
pi <- BTt.Dif(Thetac)
lVc <- lLpi(pi)
lgC <- log(dnorm(cand, mudC,sigmadC))
lgA <- log(dnorm(delta, mudC,sigmadC))
lpC <- log(dnorm(cand ,mud,sigmad))
lpA <- log(dnorm(delta,mud,sigmad))
delta<- aceita(delta,cand,(lVc+lpC+lgA),(lVa+lpA+lgC))
# atualizar e imprimir matriz de parรขmetros
if(cont%%J==0 && cont>B)
{
write(t(gama),"cadeia.gamaA.txt",ncol=length(gama),append=TRUE)
write(t(delta),"cadeia.deltaA.txt",ncol=1,append=TRUE)
# Valor corrente da logVerossimilhanรงa
# para o cรกlculo do fator de Bayes
Theta <- cbind(WR,BR,delta)
pi <- BTt.Dif(Theta)
Y<-y
lVm <- lLpi(pi)
write(lVm,"cadeia.lVmA.txt",ncol=1,append=TRUE)
}
Mtcgc[,cont%%tcgc] <- c(gama,delta)
if(cont%%tcgc==0){
cgr[,1] <- apply(Mtcgc,1,mean)
cgr[,2] <- apply(Mtcgc,1,sd)+0.01
mugC <- cgr[1:46,1]
sigmagC <- cgr[1:46,2]
mudC <- cgr[47,1]
sigmadC <- cgr[47,2]
}
cont <- cont + 1
}
|
671f955686a2d12cd244f1a857519bd5522bba5c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/sppmix/examples/plot_CompDist.Rd.R | 74505356f621bd7a9f587e0bf4d93122648be000 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 326 | r | plot_CompDist.Rd.R | library(sppmix)
### Name: plot_CompDist
### Title: Plots for the number of components
### Aliases: plot_CompDist
### ** Examples
## No test:
fitBD <- est_mix_bdmcmc(spatstat::redwood, m = 10)
plot_CompDist(fitBD)
CAfitBD=est_mix_bdmcmc(pp = CAQuakes2014.RichterOver3.0, m = 10)
plot_CompDist(CAfitBD)
## End(No test)
|
b151034ff935207f728e058f464e85bb76c83775 | 557c317f924f94019e60b40cc2a0a6b713c80543 | /OlderCode/SYV_vs_Tower.R | a75453c3cc27cd3ce18e16ebe470520544ffd1a4 | [] | no_license | bblakely/SapFlux | 23416d66479dc80cf420b1dbeabb6443976bac58 | 122973ab38ae07c20e7d80087ea67a5ab8694234 | refs/heads/master | 2021-01-10T13:25:32.786118 | 2020-05-06T19:31:37 | 2020-05-06T19:31:37 | 55,317,548 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,585 | r | SYV_vs_Tower.R | start=190
end=230
tower.raw.syv<-read.csv('Syv_TowerData_2015.csv',skip=1, header=TRUE)
tower.names.syv<-colnames(read.csv('Syv_TowerData_2015.csv'))
names(tower.raw.syv)<-tower.names.syv
tower.match.syv<-tower.raw.syv[tower.raw.syv$DTIME>=start & tower.raw.syv$DTIME<end,]
#Ustar filter
#tower.match.syv[tower.match.syv$UST<0.2,6:45]<-(-9999)
#tower.match.syv[tower.match.syv$WD>330 | tower.match.syv$WD<90, 6:45]<-(-9999)
tower.match.syv[tower.match.syv==-9999]<-NA
source('SapProcess_simple_SYV.R')
rm(list=setdiff(ls(), c("tower.match.syv", "sap.match.syv","DAT.SYV",
"tower.match.wcr", "sap.match.wcr", "DAT.WCR", "start","end")))
sap.match.syv<-DAT.SYV[DAT.SYV$DecDay>=start & DAT.SYV$DecDay<end,]
sample.index<-seq(from=1, to=nrow(sap.match.syv), by=6)
sap.match.syv<-sap.match.syv[sample.index,]
sap.match.syv<-sap.match.syv[1:(nrow(sap.match.syv)-1),]
sap.match.syv$colind<-'black'
sap.match.syv$colind[sap.match.syv$Dectime<14 & sap.match.syv$Dectime>=6]<-"green"
sap.match.syv$colind[sap.match.syv$Dectime>=14 & sap.match.syv$Dectime<22]<-"orange"
#sap.match.syv[is.na(sap.match.syv)]<-0
seq(from=1, to=nrow(sap.match.syv), by=48)
strt<-481
numrg<-strt:(strt+47)
#Treesp=c("TSCA","TSCA","TSCA","TSCA","OSVI","OSVI","BEAL", "TSCA","ACSA","TSCA","TSCA",
# "12", "ACSA","ACSA","ACSA","ACSA", "ACSA","18","TSCA","TSCA")
par(mfrow=c(2,2))
for(i in c(2:5)){
plot(sap.match.syv[,i+2]~tower.match.syv$SWC1, pch='*',
col=sap.match.syv$colind, ylab="Sapflux (m/s)", xlab="SWC", main=paste("SYV",i),
ylim=c(0,5e-05))
}
|
6c7e05d769c322557ae0bf01a85a8049af92e219 | a01a72a5b9ac2fa81930df2d687623956ed19b6c | /R/singlecell_network_attributes.R | ffb0636b5c3bd80906b30ab3e2e98c8f15c84ec6 | [] | no_license | sturkarslan/single-cell-analysis | 5fc12c8ba94613537410c4de405ff6856211f031 | 52380cd52f9c8b8329934993a2d5ec0b7c443518 | refs/heads/master | 2021-09-17T14:47:40.597471 | 2018-07-02T21:59:19 | 2018-07-02T21:59:19 | 108,581,692 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,506 | r | singlecell_network_attributes.R | # This script creates network attributes for single cell mutation nodes
#load early geenration mutations
ua3.b = read.delim("~/Google Drive/Single-Cell-Genomics/variants/EPD/UA3_variants.FINAL-dvh.txt", sep="\t", header=F, stringsAsFactors = F)
ua3.b$line = "ua3.b"
ua3.03 = read.delim("~/Google Drive/Single-Cell-Genomics/variants/EPD/UA3_03_variants.FINAL-dvh.txt", sep="\t", header=F, stringsAsFactors = F)
ua3.03$line = "ua3.03"
ua3.09 = read.delim("~/Google Drive/Single-Cell-Genomics/variants/EPD/UA3_09_variants.FINAL-dvh.txt", sep="\t", header=F, stringsAsFactors = F)
ua3.09$line = "ua3.09"
ua3.10 = read.delim("~/Google Drive/Single-Cell-Genomics/variants/after_300/UA3-10_variants.FINAL-dvh.txt", sep="\t", header=F, stringsAsFactors = F)
ua3.10$line = "ua3.10"
ua3.15 = read.delim("~/Google Drive/Single-Cell-Genomics/variants/after_300/UA3-15_variants.FINAL-dvh.txt", sep="\t", header=F, stringsAsFactors = F)
ua3.15$line = "ua3.15"
ua3.45 = read.delim("~/Google Drive/Single-Cell-Genomics/variants/after_300/UA3-45_variants.FINAL-dvh.txt", sep="\t", header=F, stringsAsFactors = F)
ua3.45$line = "ua3.45"
ua3.76 = read.delim("~/Google Drive/Single-Cell-Genomics/variants/after_300/UA3-76_variants.FINAL-dvh.txt", sep="\t", header=F, stringsAsFactors = F)
ua3.76$line = "ua3.76"
ua3.118 = read.delim("~/Google Drive/Single-Cell-Genomics/variants/after_300/UA3-118_variants.FINAL-dvh.txt", sep="\t", header=F, stringsAsFactors = F)
ua3.118$line = "ua3.118"
# Load dvh genome data files for Dvh-UA3-152-03</h3>
essential = read.delim("~/Google Drive/Portals/Snytrophy_Portal/dvh-essentiality-data.txt", header=T, sep="\t", stringsAsFactors=F)
genome = read.delim("~/Google Drive/Portals/Snytrophy_Portal/dvu_genomeInfo.txt", header=T, sep="\t", stringsAsFactors=F)
# read original mutation data
dvh03.original = read.delim("~/Google Drive/Single-Cell-Genomics/variants/singlecell/dvh-03-single-cell-Final-Merged-Variant-Filtered2.txt", sep="\t", header=F)
## function to format names
generation.names = list(ua3.b,ua3.03, ua3.09, ua3.10, ua3.15, ua3.45, ua3.76, ua3.118)
generations = data.frame()
for(generation in generation.names){
line = unique(generation$line)
for(row in 1:length(generation$V1)){
if("" %in% generation[row,"V11"]){
name = sub("Chromosome", "", paste(paste("IG", generation[row, "V2"], sep="_"), sub("pDV", "p", generation[row, "V1"] ), sep = ""))
} else {
name =sub("Chromosome", "", paste(sub("DVU_", "DVU", paste(generation[row, "V11"], generation[row, "V2"], sep="_")), sub("pDV", "p", generation[row, "V1"] ), sep = ""))
}
generations = rbind(generations, cbind(generation = line, names = name))
}
}
# convert data frame into a list
generation.list = list()
for(line in unique(generations$generation)){
generation.list[[line]] = as.vector(generations[which(generations$generation == line),"names"])
}
# append unique mutation id to each row
for(row in 1:length(dvh03.original$V1)){
if("" %in% dvh03.original[row,"V11"]){
dvh03.original[row,"ID"] = paste("IG", dvh03.original[row, "V2"], sep="_")
} else {
dvh03.original[row,"ID"] = sub("DVU_", "DVU", paste(dvh03.original[row, "V11"], dvh03.original[row, "V2"], sep="_"))
}
}
# read mutations bedfile
dvh03.bedfile = read.delim("/Volumes/omics4tb/sturkarslan/dvh-coculture-rnaseq/dvh-single-cells/dvh-UA3-152-03-singlecell-variants-2callers-80percent-2cells_noan-bed.txt", sep="\t", header=F)
for(row in 1:length(dvh03.bedfile$V1)){
dvh03.bedfile[row,"ID"] = sub("DVU_", "DVU", paste(dvh03.bedfile[row,"V7"], dvh03.bedfile[row,"V2"], sep="_"))
}
#load clonal isolate naming file
clonal.isolate.names = read.delim("/Volumes/omics4tb/sturkarslan/clonal-isolates/clonal_isolate_names.txt", sep="\t", header=T, stringsAsFactors = F)
# read clonal isolate mutations
dvhUA3.isolates = read.delim("/Volumes/omics4tb/sturkarslan/clonal-isolates/results/dvh/clonal-isolates_2callers-filtered-variants.txt", sep="\t", header=F, stringsAsFactors = F)
dvhUA3.isolates$cloneno = sapply(dvhUA3.isolates$V18, function(x) strsplit(x, split = "_")[[1]][1])
for(i in 1:length(dvhUA3.isolates$V1)){
id = dvhUA3.isolates[i,"cloneno"]
dvhUA3.isolates[i,"clonename"] = clonename = clonal.isolate.names[which(clonal.isolate.names$pair == id),"isolate"][1]
dvhUA3.isolates[i,"line"] = strsplit(clonename, split=".", fixed=T)[[1]][1]
dvhUA3.isolates[i,"epd"] = strsplit(clonename, split=".", fixed=T)[[1]][3]
dvhUA3.isolates[i,"clone"] = line = strsplit(clonename, split=".", fixed=T)[[1]][4]
if("" %in% dvhUA3.isolates[i,"V11"]){
dvhUA3.isolates[i,"ID"] = sub("Chromosome", "", paste(paste("IG", dvhUA3.isolates[i, "V2"], sep="_"), sub("pDV", "p", dvhUA3.isolates[i, "V1"] ), sep = ""))
} else {
dvhUA3.isolates[i,"ID"] = sub("Chromosome", "", paste(sub("DVU_", "DVU", paste(dvhUA3.isolates[i, "V11"], dvhUA3.isolates[i, "V2"], sep="_")), sub("pDV", "p", dvhUA3.isolates[i, "V1"] ), sep = ""))
}
}
# selet only UA3/03 epd line variants
dvh03.isolates = dvhUA3.isolates[which(dvhUA3.isolates$line == "UA3" & dvhUA3.isolates$epd == "03"),]
# read mutation matrix
dvh03.matrix = read.delim("/Volumes/omics4tb/sturkarslan/scite_single_cells_syntrophy/dvh-UA3-152-03_noan_mutation_counts_verified_5cells_50nas_mutation_matrix.txt", sep="\t", header=F, stringsAsFactors = F)
# read mutation names
dvh03.names = read.delim("/Volumes/omics4tb/sturkarslan/scite_single_cells_syntrophy/dvh-UA3-152-03_noan_mutation_counts_verified_5cells_50nas_mutation_names.txt", sep="\t", header=F)
# Attach attributes to mutations</h3>
dvh03.features = data.frame()
for(name in c(dvh03.names)[[1]]){
cat("Now analyzing ", name, "...\n")
cat("\n")
locus = strsplit(name, split = "_", fixed = T)[[1]][1]
name.us = sub("p", "", name)
# name.us = sub("DVUA", "DVXA", name.us)
# name.us = sub("DVU", "DVU_", name.us)
# name.us = sub("DVXA", "DVUA", name.us)
cat(name.us,"\n")
if(name.us %in% dvh03.original$ID){
# mutations
type = as.character(dvh03.original[which(dvh03.original$ID == name.us), "V6"])
impact = as.character(dvh03.original[which(dvh03.original$ID == name.us), "V7"])
effect = as.character(dvh03.original[which(dvh03.original$ID == name.us), "V8"])
# info
if(locus == "IG"){
gene.name = ""
gene.desc = ""
} else if(length(genome[which(genome$sysName == locus), "name"]) == 0){
gene.name = ""
gene.desc = ""
} else {
gene.name = genome[which(genome$sysName == locus), "name"]
gene.desc = genome[which(genome$sysName == locus), "desc"]
}
# essentialiy
moyls4.1 = essential[which(essential$locus_tag == locus),"WT.MOYLS4.1"]
mols4 = essential[which(essential$locus_tag == locus),"WT.MOLS4"]
if(length(moyls4.1) == 0){
moyls4.1 = ""
} else {
moyls4.1 = moyls4.1
}
if(length(mols4) == 0){
mols4 = ""
} else {
mols4 = mols4
}
# go terms
go = genome[which(genome$sysName == locus), "GO"]
if (length(go) == 0){
go = ""
}
if(go == ""){
go = ""
} else {
go = paste(go)
}
# COGFun
cog = genome[which(genome$sysName == locus), "COGFun"]
if(length(cog) == 0){
cog = ""
}
if(cog == ""){
cog = ""
} else {
cog.length = length(strsplit(cog, split = "")[[1]])
if(cog.length > 1){
cog = sapply(cog, function(x) paste(strsplit(x, split = "")[[1]][1], strsplit(x, split = "")[[1]][2], sep = ":" ))
} else {
cog = cog
}
}
# accession
accession = genome[which(genome$sysName == locus), "accession"]
if(length(accession) == 0){
accession = ""
}
if(accession == ""){
accession = ""
} else {
accession = accession
}
# GI
GI = as.character(genome[which(genome$sysName == locus), "GI"])
if(length(GI) == 0){
GI = ""
} else {
GI = GI
}
# number of cells w/ mutation, w/o and NA
rownumber = grep(name, dvh03.names$V1)
in.cells = length(grep(1, dvh03.matrix[rownumber,]))
notin.cells = length(grep(0, dvh03.matrix[rownumber,]))
NA.cells = length(grep(3, dvh03.matrix[rownumber,]))
} else {
cat(name.us, "Not found..\n")
}
## check the clonal isolates file
if(name %in% dvh03.isolates$ID){
clones.1 = dvh03.isolates[which(dvh03.isolates$ID == name),"clone"]
clones = paste(clones.1, collapse = ":", sep="")
clone.count = length(clones.1)
} else {
cat(name, "Not found in clonal isolates..\n")
clones = ""
clone.count = 0
}
## check if mutation is in early generations
early.gen = paste(names(generation.list)[grep(name, generation.list)], sep = "", collapse = ":")
gen.names = c("ua3.b","ua3.03", "ua3.09", "ua3.10", "ua3.15", "ua3.45", "ua3.76", "ua3.118")
k.list = list()
for(k in gen.names){
k.list[[k]] = length(grep(k, early.gen))
}
count.ua3.b = k.list[["ua3.b"]]
count.ua3.03 = k.list[["ua3.03"]]
count.ua3.09 = k.list[["ua3.09"]]
count.ua3.10 = k.list[["ua3.10"]]
count.ua3.15 = k.list[["ua3.15"]]
count.ua3.45 = k.list[["ua3.45"]]
count.ua3.76 = k.list[["ua3.76"]]
count.ua3.118 = k.list[["ua3.118"]]
dvh03.features = rbind(dvh03.features,
cbind(name = name,
locus = locus, gene.name = gene.name, gene.desc = gene.desc,
type = type, impact = impact, effect=effect,
moyls4 = moyls4.1, mols4=mols4,
go = go, cog = cog, accession = accession, GI = GI,
in.cells = in.cells, notin.cells = notin.cells, NA.cells = NA.cells,
clones = clones, clone.count = clone.count, early.gen = early.gen,
count.152 = count.ua3.b, count.03 = count.ua3.03, count.09 = count.ua3.09, count.10 = count.ua3.10,
count.15 = count.ua3.15, count.45 = count.ua3.45, count.76 = count.ua3.76, count.118 = count.ua3.118))
cat(name,"|",locus,"|",gene.name,"|",gene.desc,"|",name.us,"|",type,"|",impact,"|",effect,"|",mols4,"|",moyls4.1,"|",go,"|",cog,"|",accession,"|",GI,"|",in.cells,"|",notin.cells,"|",NA.cells,"\n")
}
write.table(dvh03.features, file="/Volumes/omics4tb/sturkarslan/scite_single_cells_syntrophy/dvh-UA3-152-03_noan_5cells_50nas-network-attributes.txt", sep="\t", row.names=F, quote=F)
|
45f85aed5b6fd4f00a57c55d97b683dd75f90f1f | 67c56336d7d1236fa57700af759fe75e9454ed44 | /util/var_exists.R | a24c63ea5fa8d7480d92af770682e9fd039e14a6 | [] | no_license | actongender/geam-report | b9fbec753a0fca1a4854d60d0abf00a55d3ca65b | 163f5b88dacf1ead8cb0397f1ad0e795ffbcb1eb | refs/heads/master | 2023-04-28T06:14:36.966608 | 2021-05-21T06:51:58 | 2021-05-21T06:51:58 | 275,156,339 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 823 | r | var_exists.R | #' @title Check if variable name exists in data frame.
#'
#' @description Useful to avoid error messages before generating figures and tables. Performs exact match.
#'
#' @param needle String of one or more column names.
#' @param stack String of all column names
#'
#' @return logit
#'
var_exists <- function(needle, stack="", data=NULL){
# if no stack provided, check if globally defined column names exist (in index.Rmd)
if (length(stack) == 1 & stack[1] == "" & exists("cnames")){
stack <- cnames
} else if (length(stack) == 1 & stack[1] == "" & !is.null(data)){
stack <- names(data)
} else if (length(stack) == 1 & stack[1] == "" & is.null(data) & exists("df.geam")){
stack <- names(df.geam)
}
exists <- all(needle %in% stack)
exists
} |
8109fb7cf25078be0c6284e2eb9868fd956789c1 | c605fe0c0bcdafc69b4c1b5ba84b0c1aa20f1b4d | /longitudinal/ch2.R | a4bcf48590fa8fc54ac3f33d1ec84ab35d5333ec | [] | no_license | Privlko/datacamp | cf2edddd7e1b8ece148e198e64e91948a766e0ec | 74467d52a96c4068d4772189ee125ef5b09489a3 | refs/heads/master | 2020-04-07T02:57:41.441283 | 2019-07-23T21:30:41 | 2019-07-23T21:30:41 | 157,997,025 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,083 | r | ch2.R | library(nlme)
library(lme4)
library(tidyverse)
View(BodyWeight)
ggplot(BodyWeight, aes(x = Time, y = weight)) +
geom_line(aes(group = Rat), alpha = 0.6) +
geom_smooth(se = FALSE, size = 2) +
theme_bw(base_size = 16) +
xlab("Number of Days") +
ylab("Weight (grams)")
B1 <- mutate(BodyWeight, Time = Time - 1)
body_ri <- lmer(weight ~ 1 + Time + (1 | Rat), data = B1)
summary(body_ri)
B2 <- BodyWeight %>%
mutate(Time = Time - 1,
diet_f = paste("Diet", Diet, sep = " "))
view(B2)
body_weight <- lmer(weight ~ 1 + Time + diet_f +
(1 + Time | Rat), data = B2)
summary(body_weight)
bodyweight_agg <- B2 %>%
mutate(pred_values = predict(body_weight, re.form = NA)) %>%
group_by(Time, Diet) %>%
summarize(mean_diet_pred = mean(pred_values))
head(B2)
head(bodyweight_agg)
ggplot(bodyweight_agg, aes(x = Time, y = mean_diet_pred, color = Diet)) +
geom_point(data = BodyWeight, aes(x = Time, y = weight)) +
geom_line(size = 2) +
ylab("Body Weight") +
xlab("Time (in days)") +
theme_bw(base_size = 16)
|
8fa5f349df494e63e0888b156ed2ce7e44f980cd | aad7ad92d65f2f81957b6f0e6928c6f3fdbb2bd5 | /code/Rintro.R | a052fc5d19d030d8bc8ea8edc175bcd38da11e45 | [] | no_license | pr453/Pratiwi-Ridwan | a24b1be4413ef473c90ea7513d2794384cf13fad | d73e2882833dcc7db7d49de215a431745beaecc9 | refs/heads/master | 2021-05-08T19:47:37.407631 | 2018-01-30T17:25:27 | 2018-01-30T17:25:27 | 119,578,391 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,193 | r | Rintro.R |
# make sure we are in the right directory
getwd() # get working directory
setwd("..") # go up one level
getwd()
setwd("datasets") # down one level into datasets subdirectory
getwd()
setwd("../code") # up one and back down into code
getwd()
# reading in data
iaafdata <- read.csv("../datasets/iaaf_testosterone.csv",header=TRUE)
# Everything that exists in R is an object
# Everything that happens in R is a function call.
# - John Chambers
# see what's in the data object
iaafdata
class(iaafdata) # "class" of the object
names(iaafdata) # names of columns of data frame
str(iaafdata) # more detailed info
head(iaafdata) # first six rows
dim(iaafdata) # number of rows and columns
summary(iaafdata) # statistical summary
# variable types
typeof(TRUE)
typeof(1L)
typeof(1)
typeof(pi)
typeof("I am a string")
typeof("Integer")
# access a column of the data
iaafdata$event # event variable
iaafdata[2,3] # second row, third column
iaafdata[2,] # second row
iaafdata[ ,2 ] # second column
iaafdata["result_all"]
iaafdata[1,2] # first row, second column
iaafdata$event[1] # first entry of gallons variable
# get a subset of the rows based on one of the variables
iaafdata$units == "seconds"
iaafdata[iaafdata$units == "seconds" , ]
# a different way to get a subset of the data
subset(iaafdata, units == "meters")
# for homework 1, we need to compute probabilities
# R has a number of "named" distributions built in
# the naming convention is the following:
# r<distribution> simulate from <distribution>
# d<distribution> evaluate pmf or pdf
# p<distribution> evaluate cdf
# q<distribution> evaluate quantile function (inverse cdf)
?Distributions
?rnorm # documentation
rnorm()
rnorm(7)
# some arguments have no defaults - these are required
# arguments with defaults are not required but can be changed
# these are all the same!
rnorm(n = 7,mean = 3, sd = 2)
rnorm(7,3,2)
rnorm(7, sd = 2, mean = 3)
rnorm(7, 2, mean = 3)
rnorm(n = 7, 3, 2)
rnorm(sd = 2, n = 7, 3)
pnorm(-1.96)
pnorm(-1.645)
?dbinom
x <- 0:10
p <- dbinom(x,10,1/2)
cbind(x,p)
plot(x,p)
p <- dbinom(x,10,0.1)
plot(x,p)
|
b08b6923b65dea512f5b412c10f9c885240c91ce | 8d8d1d24986dce6b8a56ed8bcb71ada4b4eeb2bd | /man/animal_15.Rd | 736a29e2763b898701e0d035e6c98a14ab174cef | [
"MIT"
] | permissive | schochastics/networkdata | edaed94b788dcd925f55ae07f8a2d8b58d45ae8e | 535987d074d35206b6804e9c90dbfa4b50768632 | refs/heads/master | 2023-01-07T07:20:41.475574 | 2023-01-05T18:54:17 | 2023-01-05T18:54:17 | 226,346,857 | 142 | 17 | null | null | null | null | UTF-8 | R | false | true | 1,219 | rd | animal_15.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-animals.R
\docType{data}
\name{animal_15}
\alias{animal_15}
\title{Asianelephants Dominance (unweighted)}
\format{
list of igraph objects
}
\source{
https: //bansallab.github.io/asnr/
}
\usage{
animal_15
}
\description{
Species: \emph{Elephas maximus}
Taxonomic class: Mammalia
Population type: free-ranging
Geographical location: Uda Walawe National Park, Sri Lanka
Data collection technique: focal sampling
Interaction type: dominance
Definition of interaction: Indicators of dominance as well as subordination was included. If a series of interactions occurred during a particular event, the winners/losers were determined only on conclusion of the event, when individuals or groups moved apart.
Edge weight type: unweighted
Total duration of data collection: 206days
Time resolution of data collection (within a day): 1sec
Time span of data collection (within a day): 5.5 hours
Note:
}
\references{
de Silva, Shermin, Volker Schmid, and George Wittemyer. "Fissionโfusion processes weaken dominance networks of female Asian elephants in a productive habitat." Behavioral Ecology (2016): arw153.
}
\keyword{datasets}
|
7835c8dc000ba52394e8012371507a9659dde22b | c63fc5e6607e2cd5d62464a72c78b06191277eb6 | /R/adaptTradeDataNames.R | af133a6686b6d18cdcfb08578093f23f4dad1b32 | [] | no_license | SWS-Methodology/faoswsTrade | 6ce400e545fc805fe1f87d5d3f9d5ba256a8a78c | 2145d71a2fda7b63d17fa7461ec297f98b40756c | refs/heads/master | 2023-02-17T08:40:21.308495 | 2023-02-09T13:53:56 | 2023-02-09T13:53:56 | 55,507,302 | 4 | 1 | null | 2020-05-15T14:45:59 | 2016-04-05T12:49:03 | R | UTF-8 | R | false | false | 1,216 | r | adaptTradeDataNames.R | #' Standardise TL or ES variable names.
#'
#' Use the same variable names in both datasets.
#'
#' TL and ES data use different variable names for reporters, partners,
#' commodities, values, quantity, and year. This function give these
#' variables the same name in both datasets.
#'
#' @param tradedata TL or ES trade data.
#' @return TL or ES data with common names (TL will also have "qunit").
#' @import data.table
#' @export
adaptTradeDataNames <- function(tradedata) {
if (missing(tradedata)) stop('"tradedata" should be set.')
tradedataname <- tolower(lazyeval::expr_text(tradedata))
if (tradedataname == "tldata")
old_common_names <- c(
"tyear", "rep", "prt",
"flow", "comm", "tvalue",
"weight", "qty")
if (tradedataname == "esdata")
old_common_names <- c(
"period", "declarant", "partner",
"flow", "product_nc", "value_1k_euro",
"qty_ton", "sup_quantity")
new_common_names <- c("year", "reporter", "partner",
"flow", "hs", "value",
"weight", "qty")
stopifnot(length(old_common_names) ==
length(new_common_names))
setnames(tradedata, old_common_names, new_common_names)
}
|
c507d4940cccbb35feaf9cedced60e4298759609 | b3ef2bf029d200fb43ec8530899208d5b5887887 | /edivalo-seedlings-traits.R | 884c4163f2ab2a809c32ba28092f1b21a29a2f6e | [] | no_license | cmwerner/edivalo-seedlings | f9c3266578f7246d5746541654c9715704d3a99e | a5cfecaef4946650b41a218bff702255bb720d84 | refs/heads/master | 2021-07-04T04:18:16.051888 | 2021-07-02T21:05:19 | 2021-07-02T21:05:19 | 238,467,918 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,243 | r | edivalo-seedlings-traits.R | # condense the seedling trait data into single average values
library(tidyverse)
library(GGally)
library(ggplot2)
### Biomass and Length ---------------
# main path
seedlings.size <- read.csv("data/seedling_traits_size.csv",
stringsAsFactors = FALSE)[,1:7]
# change the names of the newly added species so they match the natural_regen data
seedlings.size$species[seedlings.size$species == "lampur"] <- "lamsp"
seedlings.size$species[seedlings.size$species %in% c("lychflc", "lycflc")] <- "lycsp"
# View(seedlings.size)
# calculated metrics: root-shoot length and biomass, total biomass
seedlings.size$root.shoot.length <- seedlings.size$root.length / seedlings.size$shoot.length
seedlings.size$root.shoot.bm <- seedlings.size$root.bm / seedlings.size$shoot.bm
seedlings.size$total.bm <- seedlings.size$root.bm + seedlings.size$shoot.bm
seedlings.size$treatment <- factor(seedlings.size$treatment,
levels = c('none','fert','shade'))
# average for each species (rather than by individual)
species.size <- seedlings.size %>%
group_by(treatment, species) %>%
dplyr::summarize(bm.sh = mean(shoot.bm),
bm.rt = mean(root.bm),
bm.tot = mean(total.bm),
len.sh = mean(shoot.length),
rt.sh.bm = mean(root.shoot.bm))
# calculated metric: fertilizer and shade responses of shoot length and biomass
species.size.2 <- species.size %>%
pivot_wider(id_cols = species,
names_from = treatment,
values_from = c(bm.sh, len.sh, bm.rt, bm.tot, rt.sh.bm),
names_sep = ".") %>%
select(species, bm.sh = bm.sh.none, bm.rt = bm.rt.none, bm.tot = bm.tot.none,
len.sh = len.sh.none, rt.sh.bm = rt.sh.bm.none,
bm.sh.fert, bm.sh.shade, len.sh.fert, len.sh.shade)
species.size.2$fert.diff.bm <- species.size.2$bm.sh.fert / species.size.2$bm.sh
species.size.2$fert.diff.len <- species.size.2$len.sh.fert / species.size.2$len.sh
species.size.2$shade.diff.bm <- species.size.2$bm.sh.shade / species.size.2$bm.sh
species.size.2$shade.diff.len <- species.size.2$len.sh.shade / species.size.2$len.sh
# trimming down columns to only our final metrics
species.size.3 <- species.size.2 %>%
select(species, bm.tot, bm.sh, len.sh,
bm.rt, rt.sh.bm,
fert.diff.bm, fert.diff.len, shade.diff.bm, shade.diff.len)
toothpick.list <- c('plalan','crebie','galalb','medfal','diacar','daucar')
species.size.3$toothpicks <- species.size.3$species %in% toothpick.list
### SLA ----------------
# main path
seedlings.leaf <- read.csv("data/seedling_traits_SLA.csv",
stringsAsFactors = FALSE) %>%
select(group:size.mm2)
# View(seedlings.leaf)
seedlings.leaf$species <- tolower(seedlings.leaf$species)
# change the names of the newly added species so they match the natural_regen data
seedlings.leaf$species[seedlings.leaf$species == "lampur"] <- "lamsp"
seedlings.leaf$species[seedlings.leaf$species %in% c("lychflc", "lycflc")] <- "lycsp"
# notes: data frame is currently sorted by individuals (10/species) and leaf
# where 1 is the first emergent leaf, 2 is the second, and 3 is the third
# in some cases, the 3rd leaf was smaller than leaf 1+2 due to collection timing
# want to check any analyses we do to make sure they're robust to using 2 or 3 leaves
# We only have mass (and therefore SLA) for forbs, not for the grasses
seedlings.leaf$sla.mm2 <- seedlings.leaf$size.mm2/seedlings.leaf$biomass.mg
species.sla <- seedlings.leaf %>%
group_by(group, species) %>%
dplyr::summarize(leaf.weight = mean(biomass.mg),
leaf.area = mean(size.mm2),
sla = mean(sla.mm2), # using all three leaves
sla.2 = mean(sla.mm2[leaf %in% c(1,2)])) # using only the first two leaves
## add in to main trait df
species.size.4 <- species.size.3 %>%
left_join(species.sla, by='species')
### C:N---------------
# main path
seedlings.cn <- read.csv("data/seedling_trait_CN.csv", stringsAsFactors = FALSE) %>%
select(species = sample, c.perc, n.perc) %>%
filter(!is.na(c.perc))
# change the names of the newly added species so they match the natural_regen data
seedlings.cn$species[seedlings.cn$species == "lampur"] <- "lamsp"
seedlings.cn$species[seedlings.cn$species %in% c("lychflc", "lycflc")] <- "lycsp"
# calculate C:N ratio
seedlings.cn$c.n.ratio <- seedlings.cn$c.perc / seedlings.cn$n.perc
## add in to main trait df
species.size.5 <- species.size.4 %>%
left_join(seedlings.cn, by='species')
### Visualization Plots-----------------
## condensed to key traits and just the forbs
species.size.6 <- species.size.5 %>%
filter(group=='forb') %>%
select(species, bm.tot, len.sh, rt.sh.bm,
bm.sh, bm.rt, sla, sla.2, c.n.ratio, toothpicks)
# simple visualization plots to see the range of variation
ggcorr(species.size.6)
# root bm, shoot bm, and total bm are strongly correlated,
# should probably just use total
# double-checking sla vs sla2, yes they are tightly correlated
ggplot(species.size.6, aes(x=sla, y = sla.2)) +
geom_text(aes(label=species, color=toothpicks),hjust=0, vjust=0) +
scale_color_manual(values=c('black','red'))
# further filtering based on correlation plots
species.size.7 <- species.size.6 %>%
select(species, bm.tot, len.sh, rt.sh.bm, sla.2, c.n.ratio, toothpicks)
ggpairs(species.size.7, columns = c('bm.tot', 'len.sh', 'rt.sh.bm', 'sla.2','c.n.ratio'))
# total biomass is pos.correlated with shoot length, root:shoot, and neg. with SLA
# shoot length is also pos. correlated with C:N
# the only strong (R2 > .5) corrleation is between total biomass and shoot length
# UPDATE: with the new species (Lamium pur, Cerastium hol. and Lychnis flc the strong correlation is no longer present)
## seeing where our toothpick species fall on these axes
ggplot(species.size.7, aes(x=bm.tot, y = rt.sh.bm)) +
geom_text(aes(label=species, color=toothpicks),hjust=0, vjust=0) +
scale_color_manual(values=c('black','red')) +
geom_smooth(method = 'lm', se = FALSE)
ggplot(species.size.7, aes(x=bm.tot, y = len.sh)) +
geom_text(aes(label=species, color=toothpicks),hjust=0, vjust=0) +
scale_color_manual(values=c('black','red')) +
geom_smooth(method = 'lm', se = FALSE)
ggplot(species.size.7, aes(x=sla.2, y = c.n.ratio)) +
geom_text(aes(label=species, color=toothpicks),hjust=0, vjust=0) +
scale_color_manual(values=c('black','red')) +
geom_smooth(method = 'lm', se = FALSE)
# surprisingly positive correlation between fert and shade responses
# I'm not convinced on this, I think it's possible it's an artifact of
# how the data were collected (counting germination days etc)
ggplot(species.size.3, aes(x=fert.diff.bm, y = shade.diff.bm)) +
geom_text(aes(label=species, color=toothpicks),hjust=0, vjust=0) +
scale_color_manual(values=c('black','red')) +
geom_smooth(method = 'lm', se = FALSE)
# somewhat different pattern for the heights
# some respond to shade by growing taller--this is its own trait
ggplot(species.size.3, aes(x=fert.diff.len, y = shade.diff.len)) +
geom_text(aes(label=species, color=toothpicks),hjust=0, vjust=0) +
scale_color_manual(values=c('black','red')) +
geom_smooth(method = 'lm', se = FALSE)
|
d2f82af12155e9280d42758d9396e394a15b7932 | 7a95abd73d1ab9826e7f2bd7762f31c98bd0274f | /mcga/inst/testfiles/ByteVectorToDoubles/AFL_ByteVectorToDoubles/ByteVectorToDoubles_valgrind_files/1613100339-test.R | 45ea488fbfac421309d89ecfcfb108903df63d01 | [] | no_license | akhikolla/updatedatatype-list3 | 536d4e126d14ffb84bb655b8551ed5bc9b16d2c5 | d1505cabc5bea8badb599bf1ed44efad5306636c | refs/heads/master | 2023-03-25T09:44:15.112369 | 2021-03-20T15:57:10 | 2021-03-20T15:57:10 | 349,770,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 123 | r | 1613100339-test.R | testlist <- list(b = c(0L, 0L, 659968L, 1376511L, 255L))
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result) |
a59ab3bb12aff868d2ca4d78d6b0c9bcf6f72def | 4967f91b14024b361a9bd23ebad17be23775c0b3 | /TSA Crime.R | 07ca93053bde2e32236c2b8a450e66841b914212 | [] | no_license | Mrinal-P/Boston-Crime-Rate | 20234b7ac35807676dd07972c65af4d0986fbf4e | c2043ee829fc885dac93dbd48d4c2b26151ee552 | refs/heads/master | 2020-03-24T19:13:52.853617 | 2018-07-30T18:21:19 | 2018-07-30T18:21:19 | 142,911,004 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,820 | r | TSA Crime.R | # Boston Crime Rate- TIME SERIES ANALYSIS
# Data visualization
library(xlsxjars)
library(xlsx)
violent <-read.xlsx("E:\\NEU\\IE 7275 Data mining\\R\\case\\violent crime.xlsx", sheetIndex = 3, header = T,stringsAsFactors = F)
str(violent)
# Parsing the date format
library(zoo)
violent$DATE1 <- as.yearmon(violent$DATE,"%Y/%m")
colnames(violent)[5] <- "Violent.Crime"
# Line chart
library(ggplot2)
# Time series plot using ggplot
ggplot(violent, aes(x=DATE1)) +
geom_line(aes(y=AGGRAVATED.ASSAULT, color="AGGRAVATED.ASSAULT"))+
geom_line(aes(y=ROBBERY, color="ROBBERY"))+
geom_line(aes(y=HOMICIDE, color="HOMICIDE"))+
geom_line(aes(y=Violent.Crime, color="Violent.Crime"))+
scale_color_manual(values = c("firebrick2", "black", "cornflowerblue", "orange"))+
labs(title =" Violent Crime 2012/07~ 2017/07", x = "", y = "Counts")
# Line plot for aggregate violent crime
ggplot(violent, aes(x=DATE1)) +
geom_line(aes(y=Violent.Crime, color="Violent.Crime"))+
scale_color_manual(values = c("orange"))+
labs(title =" Violent.Crime 2012/07~ 2017/07", x = "", y = "Counts")
# Line plot for aggravated assault over time
ggplot(violent, aes(x=DATE1)) +
geom_line(aes(y=AGGRAVATED.ASSAULT, color="AGGRAVATED.ASSAULT"))+
scale_color_manual(values = c("firebrick2"))+
labs(title =" AGGRAVATED.ASSAULT 2012/07~ 2017/07", x = "", y = "Counts")
# Line plot for robbery over time
ggplot(violent, aes(x=DATE1)) +
geom_line(aes(y=ROBBERY, color="ROBBERY"))+
scale_color_manual(values = c("cornflowerblue"))+
labs(title =" ROBBERY 2012/07~ 2017/07", x = "", y = "Counts")
# Line plot for homicide over time
ggplot(violent, aes(x=DATE1)) +
geom_line(aes(y=HOMICIDE, color="HOMICIDE"))+
scale_color_manual(values = c("black"))+
labs(title =" HOMICIDE 2012/07~ 2017/07", x = "", y = "Counts")
# Bar plot for violent crime over the seasons
str(violent)
sea <- c("spring","summer","fall","winter")
ggplot(violent.season, aes(x=season, y=Violent.crime,))+
theme_bw()+
geom_bar(stat = "identity",position="stack", width = 0.5)+
scale_x_discrete(limits = sea)+
scale_fill_manual(values=c('#fee8c8','#fdbb84','#e34a33'))+
labs(title =" Violent Crime between seasons", x = "", y = "value")
# Bar plot for violent crime over the week
library(reshape2)
dat = melt(violent.week[-5], id.var="DAY_WEEK", variable.name="status")
week.or <- c("Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday")
ggplot(dat, aes(x=DAY_WEEK, y=value, fill=status))+
theme_bw()+
geom_bar(stat = "identity",position="stack", width = 0.5)+
scale_x_discrete(limits = week.or)+
scale_fill_manual(values=c("gray65","gray38","black"))+
labs(title =" Violent Crime between Days of the week", x = "", y = "value")
# Bar plot for violent crime in districts
library(reshape2)
dat = melt(district[-5], id.var="district", variable.name="status")
ggplot(dat, aes(x=district, y=value, fill=status))+
theme_bw()+
geom_bar(stat = "identity",position="stack")+
scale_fill_manual(values=c(AGGRAVATED.ASSAULT="gray65", ROBBERY="gray38", HOMICIDE="black"))+
labs(title =" Violent Crime between districts", x = "districts", y = "value")
# Time series plot using ts()
Violent.Crime=ts(violent$Violent.Crime)
# Data partition 60%40%
violent.ts <- ts(Violent.Crime, start = c(2012,7), end = c(2017,6), frequency = 12)
train <- window(violent.ts, start = c(2012,7), end = c(2015,6), frequency = 12)
validation <- window(violent.ts, start = c(2015,7), end = c(2017,6), frequency = 12)
# Time series plot for training set
plot.ts(train, main="Time series plot of violent crime from 201207~201506")
# ACF PACF plot
opar <- par(no.readonly = TRUE)
par(mfrow = c(1,2))
acf(train, main="ACF")
pacf(train,main="PACF")
par(opar)
# ARIMA model selection
library(forecast)
auto.arima(train)
#ARIMA(0,0,0)(1,1,0)[12]
train_model <- arima0(train, order = c(0,0,0),seasonal = list(order =c(1,1,0),
period=12))
# Residual diagnostic
residuals <- train_model$residuals
par(mfrow = c(2,2))
ts.plot(residuals)
abline(h=0)
qqnorm(residuals)
qqline(residuals)
acf(residuals)
pacf(residuals)
# Test for Stationarity
library(TSA)
adf.test(residuals)
pp.test(residuals)
kpss.test(residuals)
# Test for normality of residuals
shapiro.test(residuals)
# Test for independence of residuals
Box.test(residuals, type = "Box-Pierce")
Box.test(residuals, type = "Ljung-Box")
# Developing a predictive model
pred <- predict(train_model,n.ahead = 24)
pred$pred
pred.real.ts <- ts(data.frame(validation, pred=pred$pred),start = c(2015,7), end = c(2017,6), frequency = 12)
pred.real.ts
library(ggfortify)
autoplot(pred.real.ts, facets = FALSE,ts.linetype = 1,xlim = , ylab = "Counts", main = "Prediction vs. real value from 201507~ 201706")
# Performance evaluation
library(Metrics)
mae(validation,pred$pred) |
3006b50ead16fc62150f1c6bc4e0afc40e184643 | f6ec5794dce638094fe6a5d0de698415db12a24c | /R/ols-f-test.R | 87bcb979b323cbab69ce6d0314d1f54cc127fb6e | [
"MIT"
] | permissive | FantasticFreddy/olsrr | d4507a26a1dc4aa7ec8b0619e1c09c4147db2c89 | 5c638713d2d365623e68daedc428290c84611d61 | refs/heads/master | 2020-03-22T00:08:29.851414 | 2018-06-03T16:12:21 | 2018-06-03T16:12:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,405 | r | ols-f-test.R | #' F test
#'
#' @description
#' Test for heteroskedasticity under the assumption that the errors are
#' independent and identically distributed (i.i.d.).
#'
#' @param model An object of class \code{lm}.
#' @param fitted_values Logical; if TRUE, use fitted values of regression model.
#' @param rhs Logical; if TRUE, specifies that tests for heteroskedasticity be
#' performed for the right-hand-side (explanatory) variables of the fitted
#' regression model.
#' @param vars Variables to be used for for heteroskedasticity test.
#' @param ... Other arguments.
#'
#' @return \code{ols_test_f} returns an object of class \code{"ols_test_f"}.
#' An object of class \code{"ols_test_f"} is a list containing the
#' following components:
#'
#' \item{f}{f statistic}
#' \item{p}{p-value of \code{f}}
#' \item{fv}{fitted values of the regression model}
#' \item{rhs}{names of explanatory variables of fitted regression model}
#' \item{numdf}{numerator degrees of freedom}
#' \item{dendf}{denominator degrees of freedom}
#' \item{vars}{variables to be used for heteroskedasticity test}
#' \item{resp}{response variable}
#' \item{preds}{predictors}
#'
#' @references
#' Wooldridge, J. M. 2013. Introductory Econometrics: A Modern Approach. 5th ed. Mason, OH: South-Western.
#'
#' @section Deprecated Function:
#' \code{ols_f_test()} has been deprecated. Instead use \code{ols_test_f()}.
#'
#' @examples
#' # model
#' model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
#'
#' # using fitted values
#' ols_test_f(model)
#'
#' # using all predictors of the model
#' ols_test_f(model, rhs = TRUE)
#'
#' # using fitted values
#' ols_test_f(model, vars = c('disp', 'hp'))
#'
#' @family heteroskedasticity tests
#'
#' @importFrom stats pf
#'
#' @export
#'
ols_test_f <- function(model, fitted_values = TRUE, rhs = FALSE, vars = NULL, ...) UseMethod("ols_test_f")
#' @export
#'
ols_test_f.default <- function(model, fitted_values = TRUE, rhs = FALSE, vars = NULL, ...) {
check_model(model)
check_logic(fitted_values)
check_logic(rhs)
if (length(vars) > 0) {
check_modelvars(model, vars)
fitted_values <- FALSE
}
l <- avplots_data(model)
nam <-
l %>%
names() %>%
extract(-1)
resp <-
l %>%
names() %>%
extract(1)
n <- nrow(l)
if (rhs) {
fitted_values <- FALSE
k <- frhs(nam, model, n, l)
result <- ftest_result(k)
} else {
if (fitted_values) {
k <- ffit(model)
result <- ftest_result(k)
} else {
k <- fvar(n, l, model, vars)
result <- ftest_result(k)
}
}
out <- list(
f = result$f,
p = result$p,
numdf = result$numdf,
dendf = result$dendf,
fv = fitted_values,
rhs = rhs,
vars = vars,
resp = resp,
preds = nam
)
class(out) <- "ols_test_f"
return(out)
}
#' @export
#' @rdname ols_test_f
#' @usage NULL
#'
ols_f_test <- function(model, fitted_values = TRUE, rhs = FALSE, vars = NULL, ...) {
.Deprecated("ols_test_f()")
}
#' @export
#'
print.ols_test_f <- function(x, ...) {
print_ftest(x)
}
frhs <- function(nam, model, n, l) {
fstatistic <- NULL
np <- length(nam)
var_resid <-
model_rss(model) %>%
divide_by(n) %>%
subtract(1)
ind <- model %>%
residuals() %>%
raise_to_power(2) %>%
divide_by(var_resid)
l <- cbind(l, ind)
mdata <- l[-1]
lm(ind ~ ., data = mdata) %>%
summary() %>%
use_series(fstatistic)
}
fvar <- function(n, l, model, vars) {
fstatistic <- NULL
var_resid <-
model_rss(model) %>%
divide_by(n) %>%
subtract(1)
ind <- model %>%
residuals() %>%
raise_to_power(2) %>%
divide_by(var_resid)
mdata <- l[-1]
dl <- mdata[, vars]
dk <- as.data.frame(cbind(ind, dl))
lm(ind ~ ., data = dk) %>%
summary() %>%
use_series(fstatistic)
}
ffit <- function(model) {
fstatistic <- NULL
pred <- fitted(model)
pred_len <- length(pred)
resid <-
model %>%
use_series(residuals) %>%
raise_to_power(2)
avg_resid <-
resid %>%
sum() %>%
divide_by(pred_len)
scaled_resid <- resid / avg_resid
lm(scaled_resid ~ pred) %>%
summary() %>%
use_series(fstatistic)
}
ftest_result <- function(k) {
f <- k[[1]]
numdf <- k[[2]]
dendf <- k[[3]]
p <- pf(f, numdf, dendf, lower.tail = F)
list(f = f, numdf = numdf, dendf = dendf, p = p)
}
|
c6d21796a2309d550b3f7902b2a7107e2f270a70 | a9063297b1b78391a0def226296df30c380f553d | /book/packt/Big.Data.Analytics.with.R.and.Hadoop/3282OS_05_Codes/codes/Webpages_categorization_analytics1/codes/RGA_dataextraction.R | b686a635d631646eadb2ae298a0bf78cd4001810 | [] | no_license | xenron/sandbox-da-hadoop | 4f0171b1f3277d7ff9f79c82e5a4475ed9aed183 | 2ae0092627aff6e61e82bf8d53b7ecd4977af2de | refs/heads/master | 2022-10-17T18:47:03.445080 | 2016-08-24T06:02:39 | 2016-08-24T06:02:39 | 32,766,392 | 2 | 0 | null | 2022-10-05T18:40:14 | 2015-03-24T00:22:56 | Java | UTF-8 | R | false | false | 1,903 | r | RGA_dataextraction.R | ================================================================================
Book - Big Data Analytics with R and Hadoop
Book URL - https://www.packtpub.com/big-data-analytics-with-r-and-hadoop/book
Chapter - 4 Using Hadoop Streaming with R
Author - Vignesh Prajapati
Contact - a. email -> vignesh2066@gmail.com
b. LinkedIn -> http://www.linkedin.com/in/vigneshprajapati
================================================================================
# Loading the RGoogleAnalytics library
require("RGoogleAnalyics")
# Step 1. Authorize your account and paste the accesstoken
query <- QueryBuilder()
access_token <- query$authorize()
# Step 2. Create a new Google Analytics API object
ga <- RGoogleAnalytics()
# To retrieve profiles from Google Analytics
ga.profiles <- ga$GetProfileData(access_token)
# List the GA profiles
ga.profiles
# Step 3. Setting up the input parameters
profile <- ga.profiles$id[3]
startdate <- "2010-01-08"
enddate <- "2013-08-23"
dimension <- "ga:date,ga:source,ga:pageTitle,ga:pagePath"
metric <- "ga:visits"
#filter <-
#segment <-
sort <- "ga:visits"
maxresults <- 100099
# Step 4. Build the query string, use the profile by setting its index value
query$Init(start.date = startdate,
end.date = enddate,
dimensions = dimension,
metrics = metric,
#sort = sort,
#filters="",
#segment="",
max.results = maxresults,
table.id = paste("ga:",profile,sep="",collapse=","),
access_token=access_token)
# Step 5. Make a request to get the data from the API
ga.data <- ga$GetReportData(query)
# Look at the returned data
head(ga.data)
# Writing extracted Google Analytics data to csv file
write.csv(ga.data,"webpages.csv", row.names=FALSE)
|
365d56e12841650cb759d1ba90c6f09d0df72662 | 1c74d653f86b446a9cd87435ce3920977e2cb109 | /packages/seewave/test.R | eb2ba73c7db653b1c874aa4a4ef55e9abf7266e5 | [
"Apache-2.0"
] | permissive | rstudio/shinyapps-package-dependencies | f1742d5cddf267d06bb895f97169eb29243edf44 | 8d73ce05438f49368b887de7ae00ff9d2681df38 | refs/heads/master | 2023-07-22T08:53:56.108670 | 2023-07-12T13:58:58 | 2023-07-12T13:58:58 | 22,746,486 | 81 | 76 | NOASSERTION | 2023-07-12T13:59:00 | 2014-08-08T04:57:26 | R | UTF-8 | R | false | false | 137 | r | test.R | install.packages("seewave", repos="http://cran.at.r-project.org/")
# call a function as a test case
library(seewave)
data(tico)
ACI(tico) |
f945f6f6f7d7dfff2f8a0f1bb46660f08ad79d7e | c13cb27a79df9353978edb94fdb2376cd6e2dd96 | /hello.R | cffd50324654fffd4d7a337f6b262cfce0cc2353 | [] | no_license | jvmurray/Rtest1 | d6a12318cc10ef792e80a5f7dff34195c5c41d5e | 9fc4c74e25c81f47e97749e4543e275036fc7ecd | refs/heads/master | 2022-12-23T05:10:52.518199 | 2020-09-04T18:34:53 | 2020-09-04T18:34:53 | 292,919,379 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 245 | r | hello.R | n=100000
die1 = sample(1:6,n,replace=TRUE)
die2 = sample(1:6,n,replace=TRUE)
die3 = sample(1:6,n,replace=TRUE)
diesum = die1+die2+die3
prob = 0
for (i in 1:n) if (diesum[i] < 5) prob = prob+1
prob = prob/n
print(prob)
#This is a Change |
ae0eb7a34f2a9b2536275a49da06306f0aeaa0ba | dd80dace8fda16b5cfee3d6d0faf48552b3b3d3b | /plot1.R | 90e4da2df1d0dc6d7599855824b3583c3c697c54 | [] | no_license | ZhPrav/ExData_Plotting1 | 38a67b90e6c7ace1dfd29b27fba42d946527ac0f | 46438eb18762a162db0dddf64bfa720477a12604 | refs/heads/master | 2020-03-09T08:44:53.507999 | 2018-04-09T01:24:04 | 2018-04-09T01:24:04 | 128,696,497 | 0 | 0 | null | 2018-04-09T01:12:31 | 2018-04-09T01:12:30 | null | UTF-8 | R | false | false | 1,247 | r | plot1.R |
## Create a directory
if (!file.exists("./household_power_consumption") | !file.exists("./household_power_consumption/household_power_consumption.txt") ) {
dir.create("./household_power_consumption")
## download file
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "./household_power_consumption/household_power_consumption.zip")
## unzip
zipF<- "./household_power_consumption/household_power_consumption.zip"
outDir<-"./household_power_consumption"
unzip(zipF,exdir=outDir)
}
## load into a data.frame
df <- read.table("./household_power_consumption/household_power_consumption.txt", header=TRUE, sep=";", quote="", comment.char="", encoding="utf-8 . ")
## convert Date column to date format
df$Date <- as.Date(df$Date, format = "%d/%m/%Y")
## filter by dates
df1 <- df[df$Date %in% as.Date(c('2007-02-01', '2007-02-02')),]
df <- NULL
df2 <- df1[,1:3]
df2$Global_active_power <- as.character(df2$Global_active_power)
df2$Global_active_power <- as.numeric(df2$Global_active_power)
hist(df2$Global_active_power, col = "red", main = " Global Active Power", xlab = "Global Active Power (killowats)", ylab = "Frequency")
dev.copy(png, "plot1.png")
dev.off()
|
93ea2334bd19be944693595870d58510057124b1 | 98a871f8f4c41311fec4ecd24b95cb43fdddf209 | /R/update_aqi_history.R | 39d9fc84dad9ff268e7dd35f867c3b904786bdab | [] | no_license | MPCA-air/aqi-dashboard | 90b906df62d2c06e8a1cbee0c809ccea57f2e152 | 23c65c7c49510f723d9ee6450ef359024adbcf33 | refs/heads/master | 2021-01-01T20:39:04.091594 | 2017-09-20T23:47:13 | 2017-09-20T23:47:13 | 98,908,494 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,133 | r | update_aqi_history.R | # Update annual AQI history charts
# Load fonts
extrafont::loadfonts(device="win")
#extrafont::font_import()
#import_roboto_condensed()
library(tidyverse)
library(hrbrthemes)
library(magick)
# AQI color functions
setwd("X://Agency_Files//Outcomes//Risk_Eval_Air_Mod//_Air_Risk_Evaluation//Staff Folders//Dorian//AQI//")
source("Web/aqi-watch/R/aqi_convert.R")
# Load history results
setwd("X://Agency_Files//Outcomes//Risk_Eval_Air_Mod//_Air_Risk_Evaluation//Staff Folders//Dorian//AQI//")
history <- read_csv(paste0("X://Agency_Files//Outcomes//Risk_Eval_Air_Mod//_Air_Risk_Evaluation//Staff Folders//Dorian//AQI//Verification//AQI History//Archive//", Sys.Date()-1, " AQI history.csv"))
# Load yesterday's results
yesterday <- read_csv(paste0("Current forecast//", Sys.Date() - 1, "_AQI_observed", ".csv"))
# Join tables
history <- left_join(select(history, -observation_recorded), select(yesterday, -date, -air_monitor, -count_ozone_obs, -count_pm25_obs))
history$aqi_max_date <- as.Date(history$aqi_max_date, "%m/%d/%Y")
# Add new value to correct AQI color
history <- history %>%
rowwise() %>%
mutate(max_aqi = max(c(-1,
conc2aqi(obs_max_ozone_8hr_ppb, "ozone"),
conc2aqi(obs_pm25_24hr_ugm3, "pm25")), na.rm = T),
aqi_yellow = aqi_yellow + (max_aqi > 50 & max_aqi < 101),
aqi_green = aqi_green + (max_aqi > -1 & max_aqi < 51),
aqi_orange = aqi_orange + (max_aqi > 100),
aqi_max_date = ifelse(!is.na(max_aqi) & (max_aqi > aqi_max), as.character(Sys.Date()-1), as.character(aqi_max_date)),
aqi_max = max(c(aqi_max, max_aqi), na.rm = T))
# Change name of column indicating if observation was recorded
names(history)[grep("aqsid", names(history))] <- "observation_recorded"
# Drop concentration columns
history <- select(history, -max_aqi, -obs_pm25_24hr_ugm3, -obs_max_ozone_8hr_ppb)
# Replace -Inf with NAs
history[history == -Inf] <- NA
# Save table
if(T) {
write_csv(history,
"X://Agency_Files//Outcomes//Risk_Eval_Air_Mod//_Air_Risk_Evaluation//Staff Folders//Dorian//AQI//Verification//AQI History//2017 AQI history.csv")
# Archive
write_csv(history,
paste0("X://Agency_Files//Outcomes//Risk_Eval_Air_Mod//_Air_Risk_Evaluation//Staff Folders//Dorian//AQI//Verification//AQI History//Archive//", Sys.Date(), " AQI history.csv"))
}
# Select 24 sites
## Drop PM2.5 only sites, and duplicates
history <- filter(history, !short_name %in% c("Marshall2",
"Fond_Du_Lac2",
"Voyageurs",
"Winona_pm",
"Ramsey_Health",
"St_Louis_Park",
"Duluth_WDSE",
#"Cedar_Creek",
"Stanton"))
# Update site names
#write_csv(hist_names, "Verification/AQI History/Names for history charts.csv")
hist_names <- read_csv("Verification/AQI History/Names for history charts.csv")
history <- left_join(history, hist_names)
# Flip to tall
history <- gather(data = history, key = aqi_color, value = aqi_days, na.rm = FALSE, aqi_yellow, aqi_green, aqi_orange)
# Calculate percent of days for each color
history <- history %>%
group_by(hist_name) %>%
mutate(total_days = sum(aqi_days),
aqi_days_pct = aqi_days / sum(total_days),
aqi_label = ifelse(aqi_days < 100, aqi_days, paste(aqi_days, "days")),
aqi_pos = ifelse(aqi_color == "aqi_green", 110,
ifelse(aqi_color == "aqi_yellow", 4 + 0.3 * aqi_days + max(0, aqi_days[aqi_color == "aqi_orange"], na.rm = T),
4 + 0.3 * aqi_days)))
# Find max for chart scaling
max_aqi_days <- max(history$total_days, na.rm = T) + 15
# Save Max AQI table
max_data <- filter(history, aqi_days > 0, aqi_color == "aqi_green") %>%
arrange(desc(aqi_color), -aqi_days) %>%
select(`Air Monitor`, site_catid, aqi_max) %>%
mutate(aqi_color = aqi2color(aqi_max))
write_csv(max_data,
"X://Agency_Files//Outcomes//Risk_Eval_Air_Mod//_Air_Risk_Evaluation//Staff Folders//Dorian//AQI//Verification//AQI History//2017_site_max_aqi.csv")
# Split sites into 4 groups of 6
for(i in 1:4) {
# Select next 5 sites
sub_data <- filter(history,
aqi_days > 0,
hist_name %in% arrange(filter(history, aqi_color == "aqi_yellow"), -aqi_days)$hist_name[(i*6-5):(i*6)]) %>%
arrange(desc(aqi_color), -aqi_days)
sub_data$hist_name <- factor(sub_data$hist_name, levels = rev(unique(sub_data$hist_name)))
# Plot colors
plot_colors <- c("#53BF33","#DB6B1A","#FFEE00")[c(T, "aqi_orange" %in% sub_data$aqi_color, "aqi_yellow" %in% sub_data$aqi_color)]
text_colors <- c("white","grey50","grey50")[c(T, "aqi_orange" %in% sub_data$aqi_color, "aqi_yellow" %in% sub_data$aqi_color)]
# Adjust low numbers to make room for labels
sub_data$aqi_days_bump <- sub_data$aqi_days + 4
# Create bar charts
# Days chart
day_chart <-
ggplot(sub_data, aes(hist_name, aqi_days_bump)) +
geom_bar(stat="identity", aes(fill = aqi_color), position = position_stack(reverse = F), alpha = 0.74) +
geom_text(size = 3.7, fontface = "bold",
aes(label = ifelse(aqi_color %in% c("aqi_green", "aqi_yellow"), aqi_label, ""),
y = aqi_pos, color= aqi_color)) +
coord_flip() +
theme_ipsum(grid="X", base_size = 10) +
scale_fill_manual(values = plot_colors) +
scale_color_manual(values = text_colors) +
ylim(c(0, max_aqi_days)) +
#scale_x_discrete(labels = percent_format()) +
guides(fill = F, color = F) +
labs(x = NULL, y = NULL) +
theme(axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
panel.grid.major = element_blank(),
plot.margin = unit(c(0,0,0,0.5), "lines"))
# Save to PNG image
png(paste0("X:/Agency_Files/Outcomes/Risk_Eval_Air_Mod/_Air_Risk_Evaluation/Staff Folders/Dorian/AQI/Web/aqi-dashboard/images/history", i, ".png"),
width = 1300, height = 890, res = 300)
#grid.arrange(day_chart)
print(day_chart)
dev.off()
# % chart
if(F) {
pct_chart <-
ggplot(sub_data, aes(hist_name, aqi_days_pct)) +
geom_bar(stat="identity", aes(fill = aqi_color), position = position_stack(reverse = T), alpha = 0.74) +
geom_text(size = 3, aes(label = ifelse(aqi_color == "aqi_green", paste0(sprintf("%.0f", aqi_days_pct*100),"%"),""),
y = (1 - 0.52 * aqi_days_pct)), color= "white") +
coord_flip() +
theme_ipsum(grid="X") +
scale_fill_manual(values = plot_colors) +
#scale_color_manual(values = text_colors) +
#scale_x_discrete(labels = percent_format()) +
guides(fill = F, color = F) +
labs(x = NULL, y = NULL) +
theme(axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
panel.grid.major = element_blank(),
plot.margin = unit(c(0,0,0,0.5), "lines"))
#print(pct_chart)
}
}
# Combine charts into a single GIF image
img1 <- image_read(paste0("X:/Agency_Files/Outcomes/Risk_Eval_Air_Mod/_Air_Risk_Evaluation/Staff Folders/Dorian/AQI/Web/aqi-dashboard/images/history1.png"))
img2 <- image_read(paste0("X:/Agency_Files/Outcomes/Risk_Eval_Air_Mod/_Air_Risk_Evaluation/Staff Folders/Dorian/AQI/Web/aqi-dashboard/images/history2.png"))
img3 <- image_read(paste0("X:/Agency_Files/Outcomes/Risk_Eval_Air_Mod/_Air_Risk_Evaluation/Staff Folders/Dorian/AQI/Web/aqi-dashboard/images/history3.png"))
img4 <- image_read(paste0("X:/Agency_Files/Outcomes/Risk_Eval_Air_Mod/_Air_Risk_Evaluation/Staff Folders/Dorian/AQI/Web/aqi-dashboard/images/history4.png"))
list(img1, img1, img1, img1, img1, img1, image_morph(c(img1,img2), frames=1),
img2, img2, img2, img2, img2, img2, image_morph(c(img2,img3), frames=1),
img3, img3, img3, img3, img3, img3, image_morph(c(img3,img4), frames=1),
img4, img4, img4, img4, img4, img4, image_morph(c(img4,img1), frames=1)) %>%
image_join() %>%
image_animate(fps=4) %>%
image_write(paste0("X://Agency_Files//Outcomes//Risk_Eval_Air_Mod//_Air_Risk_Evaluation//Staff Folders//Dorian//AQI//Web//aqi-dashboard//images//history_fade.gif"))
list(img1, img1, img1,
img2, img2, img2,
img3, img3, img3,
img4, img4, img4) %>%
image_join() %>%
image_animate(fps=0.5) %>%
image_write(paste0("X://Agency_Files//Outcomes//Risk_Eval_Air_Mod//_Air_Risk_Evaluation//Staff Folders//Dorian//AQI//Web//aqi-dashboard//images//history.gif"))
##
|
0b5d46643fb76879d686b903cab9944835aa2bfe | c2a20a3c33a2ad3898e4192d7a07b767575cfc29 | /man/in.calc.Rd | ca73cda2580d0167859c4a58e4f07cd89ef4d8bb | [] | no_license | kkeenan02/diveRsity-dev | 401f094789c8bc3f5de85ba94d3a8af0aed5c09e | c6557b8b2c881b909baa9ed8d5f2b5be4e2f3821 | refs/heads/master | 2021-01-01T18:37:08.317050 | 2013-10-28T14:49:47 | 2013-10-28T14:49:47 | 13,863,669 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 636 | rd | in.calc.Rd | \name{in.calc}
\alias{in.calc}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
A function to calculate locus informative for the inference of ancestry
}
\note{This function is deprecated. Please use \code{inCalc}. See \code{?inCalc} for detail on its' usage.}
\description{
\code{inCalc} allows the calculation of locus informativeness for ancestry (\emph{In}), (Rosenberg \emph{et al.,} 2003), both across all population samples under consideration and for all pairwise combinations of population samples. These data can be bootstrapped using the same procedure as above, to obtain 95\% confidence intervals.
}
|
b245862dd1f0300aecd5ff23908e2296dace2849 | 05c62f1af1025e734060806d73dd6a4300bbef81 | /code/1_header.R | f184d2549342126cb6b76b7533c2f6e1e6ed0fea | [] | no_license | EmilyMarkowitz-NOAA/FishR101-Models | 33d50398b10b05adbee95df1d8462c2925f4cbc7 | 245a902b7ac6a826c8ffcd88960dc1087d7a42b7 | refs/heads/main | 2023-07-26T13:33:49.924716 | 2021-02-23T09:47:27 | 2021-02-23T09:47:27 | 323,549,826 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,174 | r | 1_header.R | # Lesson 8: Modling
# Created by: Emily Markowitz
# Contact: Emily.Markowitz@noaa.gov
# Created: 2020-12-18
# Modified: 2021-02-17
# packages ----------------------------------------------------------------
library(tidyverse)
# install.packages("broom")
library(broom)
# install.packages("modeldata")
library(modeldata) # This is also loaded by the tidymodels package
# install.packages("modelr")
library(modelr)
# install.packages("tidymv")
library(tidymv)
# install.packages("recipes")
# library(recipes) # not covered here but you should check it out!
# install.packages("DAAG")
library(DAAG) # for orings dataset
library(here)
library(janitor)
# directories --------------------------------------------------------------------
source(here("functions", "file_folders.R"))
# download data --------------------------------------------------------------------
# Anscombe's Quartet of โIdenticalโ Simple Linear Regressions
#?datasets::anscombe
sim1 <- datasets::anscombe
orings<-DAAG::orings
data(crickets, package = "modeldata")
# look at your data -------------------------------------------------------
str(sim1)
str(orings)
str(crickets)
str(mtcars)
|
fa2603200f1a14a4c7f2c77aed91c5c5fb163867 | 9946c1f093399ec8d83b02324a2f0fed95cbdc19 | /man/plotgl.Rd | bed6506838f8befc5378dbe6159d104556f40bc5 | [] | no_license | cran/gld | 6a60f0fd164f6a3db98d2212440c081a27afefd3 | 8b6ac4a399deb0479737710576217e24f393d878 | refs/heads/master | 2023-04-08T03:52:28.262264 | 2022-10-23T13:25:06 | 2022-10-23T13:25:06 | 17,696,412 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,071 | rd | plotgl.Rd | \name{plotgl}
\alias{plotgl}
\alias{plotglc}
\alias{plotgld}
\title{Plots of density and distribution function for the generalised lambda
distribution}
\description{
Produces plots of density and distribution function for the generalised lambda
distribution. Although you could use \code{plot(function(x)dgl(x))} to do
this, the fact that the density and quantiles of the generalised lambda are
defined in terms of the depth, \eqn{u}, means that a seperate function that
uses the depths to produce the values to plot is more efficient
}
\usage{
plotgld(lambda1 = 0, lambda2 = NULL, lambda3 = NULL, lambda4 = NULL,
param = "fmkl", lambda5 = NULL, add = NULL, truncate = 0,
bnw = FALSE, col.or.type = 1, granularity = 10000, xlab = "x",
ylab = NULL, quant.probs = seq(0,1,.25), new.plot = NULL, ...)
plotglc(lambda1 = 0, lambda2 = NULL, lambda3 = NULL, lambda4 = NULL,
param = "fmkl", lambda5 = NULL, granularity = 10000, xlab = "x",
ylab = "cumulative probability", add = FALSE, ...)
}
\arguments{
\item{lambda1}{This can be either a single numeric value or a vector.
If it is a vector, it must be of length 4 for parameterisations
\code{fmkl} or \code{rs} and of length 5 for parameterisation \code{fm5}.
If it is a vector, it gives all the parameters of the generalised lambda
distribution (see below for details) and the other \code{lambda} arguments
must be left as NULL.
If it is a a single value, it is \eqn{\lambda_1}{lambda 1}, the location
parameter of the distribution and the other parameters are given by the
following arguments
\emph{Note that the numbering of the \eqn{\lambda}{lambda} parameters for
the fmkl parameterisation is different to that used by Freimer,
Mudholkar, Kollia and Lin.}
}
\item{lambda2}{\eqn{\lambda_2}{lambda 2} - scale parameter}
\item{lambda3}{\eqn{\lambda_3}{lambda 3} - first shape parameter}
\item{lambda4}{\eqn{\lambda_4}{lambda 4} - second shape parameter}
\item{lambda5}{\eqn{\lambda_5}{lambda 5} - a skewing parameter, in the
fm5 parameterisation}
\item{param}{choose parameterisation:
\code{fmkl} uses \emph{Freimer, Mudholkar, Kollia and Lin (1988)} (default).
\code{rs} uses \emph{Ramberg and Schmeiser (1974)}
\code{fm5} uses the 5 parameter version of the FMKL parameterisation
(paper to appear)}
\item{add}{a logical value describing whether this should add to an existing plot (using
\code{lines}) or produce a new plot (using \code{plot}). Defaults to FALSE (new plot) if
both \code{add} and \code{new.plot} are NULL.}
\item{truncate}{for \code{plotgld}, a minimum density value at which the
plot should be truncated.}
\item{bnw}{a logical value, true for a black and white plot, with different
densities identified using line type (\code{lty}), false for a colour plot,
with different
densities identified using line colour (\code{col})}
\item{col.or.type}{Colour or type of line to use}
\item{granularity}{Number of points to calculate quantiles and density at
--- see \emph{details}}
\item{xlab}{X axis label}
\item{ylab}{Y axis label}
\item{quant.probs}{Quantiles of distribution to return (see \emph{value}
below). Set to NULL to suppress this return entirely.}
\item{new.plot}{a logical value describing whether this should produce a
new plot (using \code{plot}), or add to an existing plot (using
\code{lines}). Ignored if \code{add} is set.}
\item{...}{arguments that get passed to \code{plot} if this is a new plot}
}
\details{
The generalised lambda distribution is defined in terms of its quantile
function. The density of the distribution is available explicitly as a
function of depths, \eqn{u}, but not explicitly available as a function of
\eqn{x}. This function calculates quantiles and depths as a function of
depths to produce a density plot \code{plotgld} or cumulative probability plot
\code{plotglc}.
The plot can be truncated, either by restricting the values using \code{xlim}
--- see \code{par} for details, or by the \code{truncate} argument, which
specifies a minimum density. This is recommended for graphs of densities
where the tail is very long.
}
\value{
A number of quantiles from the distribution, the default being the minimum,
maximum and quartiles.
}
\references{
Freimer, M., Mudholkar, G. S., Kollia, G. & Lin, C. T. (1988),
\emph{A study of the generalized tukey lambda family}, Communications
in Statistics - Theory and Methods \bold{17}, 3547--3567.
Ramberg, J. S. & Schmeiser, B. W. (1974), \emph{An approximate method for
generating asymmetric random variables}, Communications of the ACM \bold{17},
78--82.
Karian, Z.E. & Dudewicz, E.J. (2000), \emph{Fitting Statistical Distributions to
Data: The generalised Lambda Distribution and the Generalised Bootstrap
Methods}, CRC Press.
\url{https://github.com/newystats/gld/}
}
\author{Robert King, \email{robert.king.newcastle@gmail.com},
\url{https://github.com/newystats/}}
\seealso{\code{\link{GeneralisedLambdaDistribution}}}
\examples{
plotgld(0,1.4640474,.1349,.1349,main="Approximation to Standard Normal",
sub="But you can see this isn't on infinite support")
plotgld(1.42857143,1,.7,.3,main="The whale")
plotglc(1.42857143,1,.7,.3)
plotgld(0,-1,5,-0.3,param="rs")
plotgld(0,-1,5,-0.3,param="rs",xlim=c(1,2))
# A bizarre shape from the RS paramterisation
plotgld(0,1,5,-0.3,param="fmkl")
plotgld(10/3,1,.3,-1,truncate=1e-3)
plotgld(0,1,.0742,.0742,col.or.type=2,param="rs",
main="All distributions have the same moments",
sub="The full Range of all distributions is shown")
plotgld(0,1,6.026,6.026,col.or.type=3,new.plot=FALSE,param="rs")
plotgld(0,1,35.498,2.297,col.or.type=4,new.plot=FALSE,param="rs")
legend(0.25,3.5,lty=1,col=c(2,3,4),legend=c("(0,1,.0742,.0742)",
"(0,1,6.026,6.026)","(0,1,35.498,2.297)"),cex=0.9)
# An illustration of problems with moments as a method of characterising shape
}
\keyword{distribution}
\keyword{hplot}
\keyword{aplot}
|
56837b9a0d14e06c580a0f1af9532f3c26a1de70 | c5f7ccf06e8bce436bebc6492d6030a5ca0b54f5 | /Maps_for_SSpaper_2020.R | 79a2845ba6d7592980c998903707378956616fc7 | [] | no_license | elasmobranch23/Sperm-storage-maps | 953014366d64a6713fca14dfb7a75667367dc563 | 821e5aa1d7cd4507feb03f923e768ab4e814b957 | refs/heads/main | 2023-01-12T17:56:32.455330 | 2020-11-18T09:39:38 | 2020-11-18T09:39:38 | 313,882,623 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,730 | r | Maps_for_SSpaper_2020.R | library(ggplot2)
library(sf)
library(rnaturalearth)
library(readxl)
library(ggsn)
####### Code for map for the Indian Ocean
#Load data used
Indian <- read_excel("C:/Users/G00303872/OneDrive/Masters/Data Sheets/Spatial.xlsx", sheet = "Indian")
Cou <- read_excel("C:/Users/G00303872/OneDrive/Masters/Data Sheets/Spatial.xlsx", sheet = "Indian_countries")
Ocean <- read_excel("C:/Users/G00303872/OneDrive/Masters/Data Sheets/Spatial.xlsx", sheet = "Indian_label")
Cruise <- read_excel("C:/Users/G00303872/OneDrive/Masters/Data Sheets/Spatial.xlsx", sheet = "Research_Cruise")
#Crop the study area
worldmap <- ne_countries(scale = 'medium', type = 'map_units',
returnclass = 'sf')
oman_cropped <- st_crop(worldmap, xmin = 50, xmax = 65,
ymin = 15, ymax = 32.5)
# plot map
attach(Indian)
NIO <- ggplot() + geom_sf(data = oman_cropped, fill = "white") + theme(panel.grid = element_blank(), panel.background = element_rect("grey81"),
legend.text =element_text(size = 15)) +
#geom_point(data = Cou, aes(x = Lon, y = Lat), pch = 16, col = "black") +
geom_point(data = Ocean, aes(x = Lon, y = Lat), pch = 16, col = "grey") +
geom_line(data = Cruise, aes(x = Lon, y = Lat), col = "black", size = 1) +
geom_point(data = Indian, aes( x= Lon, y = Lat, shape = Site), size = 2) +
scale_shape_manual(values = c(0,16,15,11,17,18,6,8)) +
geom_text(data = Cou, aes(x = Lon, y = Lat, label = Country), size = 3) +
geom_text(data = Ocean, aes(x = Lon, y = Lat, label = Country), size = 2.5) +
labs(x = "Longtitude", y = "Latitude")
#Add North arrow
north2(NIO, x = .25, y = .80, symbol = 3)
####### Code for map for the Indian Ocean
# Load data for Atlantic Ocean
Atlantic <- read_excel("C:/Users/G00303872/OneDrive/Masters/Data Sheets/Spatial.xlsx", sheet = "Atlantic")
Atlantic_label <- read_excel("C:/Users/G00303872/OneDrive/Masters/Data Sheets/Spatial.xlsx", sheet = "Atlantic_label")
#Crop the study area
ire_cropped <- st_crop(worldmap, xmin = -13, xmax = -4, ymin = 48, ymax = 57.5)
# plot map
NEA <- ggplot() + geom_sf(data = ire_cropped, fill = "white") +theme(panel.grid = element_blank(), panel.background = element_rect("grey81"),
legend.text =element_text(size = 15)) +
geom_point(data = Atlantic, aes(x= Lon, y = Lat), pch=16, cex=1) +
geom_point(data = Atlantic_label, aes(x =Lon, y = Lat), pch =16, colour = "grey81") +
geom_text(data = Atlantic_label, aes(x =Lon, y = Lat, label =Country)) +
labs(x = "Longtitude", y = "Latitude")
#Add North arrow
north2(NEA, x=.3, y=.9, symbol=3)
|
1b3c08259751fceed7f7d865450a30444f66fd4d | 0ebb4b9fc23b67a30522ccb5b1fb248532ae810b | /code/historical_dhs.R | 0892af90fc47615240af8e73f1f0258cf825b8bc | [] | no_license | danjwalton/p20_indicator_time_trends | 64e6490cd4448ed4d60f16570cf1d3a491ed4232 | 5fb7f4c69cd2602e1b310e6db91f07d96ebb6a14 | refs/heads/master | 2020-06-04T08:06:03.888612 | 2019-06-14T13:22:31 | 2019-06-14T13:22:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,815 | r | historical_dhs.R | ####Function and setup####
list.of.packages <- c("Hmisc","plyr","foreign","data.table","varhandle","zoo","survey")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(list.of.packages, require, character.only=T)
#Taken from https://raw.githubusercontent.com/akmiller01/alexm-util/master/DevInit/R/P20/2013_tab_data2.R
if(Sys.info()[["user"]]=="alex"){
wd <- "~/git/p20_indicator_time_trends"
wd2 <- "~/git/p20_private_data/project_data/DHS auto"
} if(Sys.info()[["user"]]=="dan-w"){
wd <- "C:/Users/dan-w/Box/Gap Narrative (ITEP), June 2019/git/gap-narrative"
wd2 <- "C:/Users/dan-w/Box/Gap Narrative (ITEP), June 2019/git/gap-narrative/data"
}else{
wd <- "E:/DHSauto"
wd2 <- "~/git/p20_private_data/project_data/"
}
setwd(wd)
source("code/child_mort.R")
povcalcuts <- fread("https://raw.githubusercontent.com/ZChristensen/poverty_trends/master/data/P20incometrends.csv")
dhsmeta<- fread("https://raw.githubusercontent.com/ZChristensen/p20_indicator_time_trends/master/data/dhs_meta_data20190524.csv")
dhsmeta<- subset(dhsmeta, Recode.Structure.!="DHS-I")
dhsmeta$Country.[which(dhsmeta$Country.=="Cape Verde")]<-"Cabo Verde"
dhsmeta$Country.[which(dhsmeta$Country.=="Congo")]<-"Congo, Republic of"
dhsmeta$Country.[which(dhsmeta$Country.=="Congo Democratic Republic")]<-"Congo, Democratic Republic of"
dhsmeta$Country.[which(dhsmeta$Country.=="Egypt")]<-"Egypt, Arab Republic of"
dhsmeta$Country.[which(dhsmeta$Country.=="Gambia")]<-"Gambia, The"
dhsmeta$Country.[which(dhsmeta$Country.=="Yemen")]<-"Yemen, Republic of"
#Afghanistan, Cambodia, Equatorial Guinea and Eritrea have had DHS surveys but don't have PovcalNet data
names(dhsmeta)[which(names(dhsmeta)=="Country.")] <- "CountryName"
dhsmeta$filename=paste0(dhsmeta$dhs_cc,"HR",dhsmeta$dhs_recode_code,"DT")
dhsmeta=dhsmeta[which(!is.na(dhsmeta$dhs_cc)),]
dhsmeta2 <- unique(dhsmeta[,c("CountryName","surveyyr","filename")])
povcalyears=c(1981,1984,1987,1990,1993,1996,1999,2002,2005,2008,2010,2011,2012,2013,2015)
for(year in povcalyears){
dhsmeta2[, as.character(year)] <- abs(dhsmeta2$surveyyr - year)
}
dhsmeta2 <- melt(dhsmeta2, id.vars = c("filename","CountryName","surveyyr"))
dhsmeta2 <- dhsmeta2[dhsmeta2[, .I[value == min(value)], by=.(CountryName,variable)]$V1]
dhsmeta2 <- dhsmeta2[complete.cases(dhsmeta2)]
dhsmeta2$variable<- as.numeric(levels(dhsmeta2$variable))[dhsmeta2$variable]
names(dhsmeta2)[which(names(dhsmeta2)=="variable")] <- "RequestYear"
povcalcuts <- join(dhsmeta2,povcalcuts,by=c("CountryName","RequestYear"))
names(povcalcuts)[which(names(povcalcuts)=="RequestYear")] <- "year"
names(povcalcuts)[which(names(povcalcuts)=="CountryCode")] <- "iso3"
povcalcuts$hc<- povcalcuts$P20Headcount/100
povcalcuts$extreme <- povcalcuts$ExtPovHC/100
keep <- c("iso3","year","hc","PovGap","filename","extreme")
povcalcuts <- povcalcuts[,keep, with=F]
# povcalcuts$filename <- NA
# povcalcuts$filename[which(povcalcuts$iso3=="NPL")]<-"NPIR7HFL"
# povcalcuts$filename[which(povcalcuts$iso3=="BEN")]<-"BJIR61FL"
weighted.percentile <- function(x,w,prob,na.rm=TRUE){
df <- data.frame(x,w)
if(na.rm){
df <- df[which(complete.cases(df)),]
}
#Sort
df <- df[order(df$x),]
sumw <- sum(df$w)
df$cumsumw <- cumsum(df$w)
#For each percentile
cutList <- c()
cutNames <-c()
for(i in 1:length(prob)){
p <- prob[i]
pStr <- paste0(round(p*100,digits=2),"%")
sumwp <- sumw*p
df$above.prob <- df$cumsumw>=sumwp
thisCut <- df$x[which(df$above.prob==TRUE)[1]]
cutList <- c(cutList,thisCut)
cutNames <- c(cutNames,pStr)
}
names(cutList) <- cutNames
return(cutList)
}
psum <- function(...,na.rm=TRUE) {
rowSums(do.call(cbind,list(...)),na.rm=na.rm)
}
####Run function####
setwd(wd2)
rdatas <- list.files(pattern="*.RData",ignore.case=T,full.names=TRUE)
dataList <- list()
dataIndex <- 1
# Loop through every dir
for(i in 1:length(rdatas)){
rdata <- rdatas[i]
# Pull some coded info out of the dir name
country <- substr(basename(rdata),1,2)
recode <- substr(basename(rdata),3,4)
phase <- substr(basename(rdata),5,6)
subphase <- substr(basename(rdata),5,5)
povcal_filename <- paste0(country,recode,phase,"dt")
if(povcal_filename %in% tolower(povcalcuts$filename)){
message(povcal_filename)
povcal_subset = subset(povcalcuts,filename==toupper(povcal_filename))
iso3 = povcal_subset$iso3
survey_year = povcal_subset$year
for(year in survey_year){
message(year)
br_patha <- paste0(country,"br",phase)
br_path <- paste0("data/",tolower(br_patha),"fl.RData")
load(br_path)
br <- data.frame(data)
remove(data)
pr_patha <- paste0(country,"pr",phase)
pr_path <- paste0("data/",tolower(pr_patha),"fl.RData")
load(pr_path)
pr <- data.frame(data)
remove(data)
names(pr)[which(names(pr)=="hv001")] <- "cluster"
names(pr)[which(names(pr)=="hv002")] <- "household"
names(pr)[which(names(pr)=="hvidx")] <- "line"
#Rename sample.weights var
names(pr)[which(names(pr)=="hv005")] <- "sample.weights"
pr$weights <- pr$sample.weights/1000000
#Urban/rural
if(phase>1){
names(pr)[which(names(pr)=="hv025")] <- "urban.rural"
}else{
names(pr)[which(names(pr)=="v102")] <- "urban.rural"
}
pr$urban <- NA
pr$urban[which(pr$urban.rural==1)] <- 1
pr$urban[which(pr$urban.rural==2)] <- 0
# Wealth
if("hv271" %in% names(pr)){
pr$hv271 <- pr$hv271/100000
names(pr)[which(names(pr)=="hv271")] <- "wealth"
}else{
wi_patha <- paste0(country,"wi",phase)
wi_path <- paste0("data/",tolower(wi_patha),"fl.RData")
if(file.exists(wi_path)){
load(wi_path)
wi <- data.frame(data)
remove(data)
}else{
next;
}
names(wi)[which(names(wi)=="whhid")] <-"hhid"
pr<- join(pr,wi,by="hhid")
names(pr)[which(names(pr)=="wlthindf")] <-"wealth"
}
# Poverty
filename=paste0(country,recode,phase,"dt")
povcalcuts$filename=tolower(povcalcuts$filename)
povcalcut <- subset(povcalcuts,filename==povcal_filename)$hc
extcut <- subset(povcalcuts,filename==povcal_filename)$extreme
cuts <- c(povcalcut,extcut)
povperc <- weighted.percentile(pr$wealth,pr$weights,prob=cuts)
pr$p20 <- (pr$wealth < povperc[1])
pr$ext <- (pr$wealth < povperc[2])
# Education
if(phase>1){
names(pr)[which(names(pr)=="hv109")] <- "educ"
recode.educ <- function(x){
if(is.na(x)){return(NA)}
else if(x==8 | x==9){return(NA)}
else if(x==0 | x==1){return("No education, preschool")}
else if(x==2 | x==3 ){return("Primary")}
else if(x==4){return("Secondary")}
else if(x==5){return("Higher")}
else{return(NA)}
}
pr$educ <- sapply(pr$educ,recode.educ)
} else{
names(pr)[which(names(pr)=="v106")] <- "educ"
recode.educ <- function(x){
if(is.na(x)){return(NA)}
else if(x==8 | x==9){return(NA)}
else if(x==0 ){return("No education, preschool")}
else if(x==1){return("Primary")}
else if(x==2){return("Secondary")}
else if(x==3){return("Higher")}
else{return(NA)}
}
}
# Age
names(pr)[which(names(pr)=="hv105")] <- "age"
# Sex
names(pr)[which(names(pr)=="hv104")] <- "sex"
# ID vars
names(pr)[which(names(pr)=="hv001")] <- "cluster"
names(pr)[which(names(pr)=="hv002")] <- "household"
names(pr)[which(names(pr)=="hv024")] <- "region"
names(pr)[which(names(pr)=="hvidx")] <- "line"
names(pr)[which(names(pr)=="hv112")] <- "mother.line"
pr$mother.line[which(pr$mother.line==99)] <- NA
# Head vars
names(pr)[which(names(pr)=="hv219")] <- "head.sex"
names(pr)[which(names(pr)=="hv220")] <- "head.age"
# Birth certificate
names(pr)[which(names(pr)=="hv140")] <- "birth.cert"
#0 - neither certificate or registered
#1 - has certificate
#2 - registered, no certificate
#3 - registered, no certificate
#6 - other
#8 - dk
pr$birth.reg = NA
pr$birth.reg[which(pr$birth.cert %in% c(0,6,8,9))] = 0
pr$birth.reg[which(pr$birth.cert %in% c(1,2,3))] = 1
# Stunting
names(pr)[which(names(pr)=="hc70")] <- "child.height.age"
if(typeof(pr$child.height.age)=="NULL"){
pr$child.height.age <- NA
}else{
pr$child.height.age <- pr$child.height.age/100
}
pr$child.height.age[which(pr$child.height.age>80)] <- NA
pr$stunting <- NA
pr$stunting[which(pr$child.height.age > (-6) & pr$child.height.age<= (-3))] <- 1
pr$stunting[which(pr$child.height.age > (-3) & pr$child.height.age<= (-2))] <- 1
pr$stunting[which(pr$child.height.age > (-2) & pr$child.height.age< (6))] <- 0
keep <- c(
"wealth","weights","urban","region","educ","age","sex","cluster","household","head.sex","head.age","p20","ext","birth.reg","stunting"
)
prNames <- names(pr)
namesDiff <- setdiff(keep,prNames)
if(length(namesDiff)>0){
for(y in 1:length(namesDiff)){
pr[namesDiff[y]] <- NA
message(paste("Missing variable",namesDiff[y]))
}
}
pr <- pr[,keep]
names(br)[which(names(br)=="v001")] <- "cluster"
names(br)[which(names(br)=="v002")] <- "household"
pr.pov = data.table(pr)[,.(p20=mean(p20,na.rm=T)),by=.(cluster,household)]
br <- as.data.table(br)
br = merge(br,pr.pov,by=c("cluster","household"),all.x=T)
br.p20 = subset(br,p20==T)
br.u80 = subset(br,!p20)
if(nrow(br.p20)>1){
p20.mort.list = mort(br.p20)
p20.mort = p20.mort.list$mortality
p20.mort.numerator = p20.mort.list$total_morts
p20.mort.denominator = p20.mort.list$total_survs
}else{
p20.mort = NA
p20.mort.numerator = NA
p20.mort.denominator = NA
}
if(nrow(br.u80)>1){
u80.mort.list = mort(br.u80)
u80.mort = u80.mort.list$mortality
u80.mort.numerator = u80.mort.list$total_morts
u80.mort.denominator = u80.mort.list$total_survs
}else{
u80.mort = NA
u80.mort.numerator = NA
u80.mort.denominator = NA
}
mort_dat = data.frame(
p20=c(rep(T,3),rep(F,3)),
variable=c(rep("mortality",6)),
type=rep(c("statistic","numerator","denominator"),2),
value=c(p20.mort,p20.mort.numerator,p20.mort.denominator,u80.mort,u80.mort.numerator,u80.mort.denominator)
)
dsn = svydesign(
data=pr
,ids=~1
,weights=~weights
)
pov.stunting.tab = svytable(~stunting+p20,dsn)
if("TRUE" %in% colnames(pov.stunting.tab)){
p20.stunting = pov.stunting.tab["1","TRUE"]/sum(pov.stunting.tab["0","TRUE"],pov.stunting.tab["1","TRUE"],na.rm=T)
p20.stunting.numerator = pov.stunting.tab["1","TRUE"]
p20.stunting.denominator = sum(pov.stunting.tab["0","TRUE"],pov.stunting.tab["1","TRUE"],na.rm=T)
}else{
p20.stunting = NA
p20.stunting.numerator = NA
p20.stunting.denominator = NA
}
if("FALSE" %in% colnames(pov.stunting.tab)){
u80.stunting = pov.stunting.tab["1","FALSE"]/sum(pov.stunting.tab["0","FALSE"],pov.stunting.tab["1","FALSE"],na.rm=T)
u80.stunting.numerator = pov.stunting.tab["1","FALSE"]
u80.stunting.denominator = sum(pov.stunting.tab["0","FALSE"],pov.stunting.tab["1","FALSE"],na.rm=T)
}else{
u80.stunting = NA
u80.stunting.numerator = NA
u80.stunting.denominator = NA
}
stunt_dat = data.frame(
p20=c(rep(T,3),rep(F,3)),
variable=c(rep("stunting",6)),
type=rep(c("statistic","numerator","denominator"),2),
value=c(p20.stunting,p20.stunting.numerator,p20.stunting.denominator,u80.stunting,u80.stunting.numerator,u80.stunting.denominator)
)
pov.reg.tab = svytable(~birth.reg+p20,dsn)
if("TRUE" %in% colnames(pov.reg.tab)){
p20.reg = pov.reg.tab["1","TRUE"]/sum(pov.reg.tab["0","TRUE"],pov.reg.tab["1","TRUE"],na.rm=T)
p20.reg.numerator = pov.reg.tab["1","TRUE"]
p20.reg.denominator = sum(pov.reg.tab["0","TRUE"],pov.reg.tab["1","TRUE"],na.rm=T)
}else{
p20.reg = NA
p20.reg.numerator = NA
p20.reg.denominator = NA
}
if("FALSE" %in% colnames(pov.reg.tab)){
u80.reg = pov.reg.tab["1","FALSE"]/sum(pov.reg.tab["0","FALSE"],pov.reg.tab["1","FALSE"],na.rm=T)
u80.reg.numerator = pov.reg.tab["1","FALSE"]
u80.reg.denominator = sum(pov.reg.tab["0","FALSE"],pov.reg.tab["1","FALSE"],na.rm=T)
}else{
u80.reg = NA
u80.reg.numerator = NA
u80.reg.denominator = NA
}
reg_dat = data.frame(
p20=c(rep(T,3),rep(F,3)),
variable=c(rep("registration",6)),
type=rep(c("statistic","numerator","denominator"),2),
value=c(p20.reg,p20.reg.numerator,p20.reg.denominator,u80.reg,u80.reg.numerator,u80.reg.denominator)
)
dat = rbind(mort_dat,stunt_dat,reg_dat)
dat$filename <- povcal_filename
if(length(iso3)>0){
dat$iso3 = iso3
dat$survey_year = year
}else{
dat$iso3 = NA
dat$survey_year = NA
}
dataList[[dataIndex]] <- dat
dataIndex <- dataIndex + 1
}
}
}
data.total <- rbindlist(dataList)
save(data.total,file="historical_dhs.RData")
fwrite(data.total,"historical_dhs.csv")
|
cf36de7a92668743c8fc58ae0342f95814303d89 | 73267cef739013c42899c0cd00b261a3610dbb91 | /data_scientists/03_tidy_data/w2/install_sqldf.R | aa9221423ef22a2985e19336cec6854a7be8e6bb | [] | no_license | annie2010/coursera_2014 | 3509ea690f1dbf76285a168d18d447787ba2316d | ad64465cd1e48e948da399aa4b3a72258af4874d | refs/heads/master | 2020-04-06T06:43:12.395104 | 2014-07-15T06:12:52 | 2014-07-15T06:12:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 938 | r | install_sqldf.R | #install.packages('/tmp/Rtmp7uX5lV/downloaded_packages/RJDBC_0.2-4.tar.gz',repo=NULL) # DONE (RJDBC)
#install.packages('/tmp/Rtmpn14y5d/downloaded_packages/doBy_4.5-10.tar.gz',repo=NULL) # DONE (doBy)
#install.packages('/tmp/Rtmpn14y5d/downloaded_packages/quantreg_5.05.tar.gz',repo=NULL) # DONE (quantreg)
#install.packages('/tmp/Rtmpn14y5d/downloaded_packages/svUnit_0.7-12.tar.gz',repo=NULL) # DONE (svUnit)
# > install.packages('gsubfn',dependecies=TRUE)
#install.packages('/tmp/Rtmp7uX5lV/downloaded_packages/gsubfn_0.6-5.tar.gz',repo=NULL) # DONE (gsubfn)
#install.packages('/tmp/Rtmp7uX5lV/downloaded_packages/RH2_0.1-2.12.tar.gz',repo=NULL) # DONE (RH2)
#install.packages('/tmp/Rtmp7uX5lV/downloaded_packages/RSQLite.extfuns_0.0.1.tar.gz',repo=NULL) # DONE (RSQLite.extfuns)
install.packages('/tmp/Rtmp7uX5lV/downloaded_packages/sqldf_0.4-7.1.tar.gz',repo=NULL) # DONE (sqldf)
# > install.packages('sqlfd',dependecies=TRUE)
|
614f0f01fa1fc38d19aec1d9f2dd65e603908eb2 | e756c6f0872f95f6893703582e183ca167d2dd85 | /analysis/reasons.R | 4b1c8983147203357661115e35cdbe32d3fc6ccc | [] | no_license | johnjosephhorton/sharing | 2a1c3d9fc8c47011f2a71c99b9f6df2f6296e507 | 524873092891cdc2e96968676a223e7d8016a39c | refs/heads/master | 2021-01-10T09:19:47.823454 | 2017-01-03T17:30:10 | 2017-01-03T17:30:10 | 52,822,774 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,578 | r | reasons.R | #!/usr/bin/env Rscript
library(shaRing)
library(JJHmisc)
df <- shaRing::GetDF()
df.no.own <- data.table(subset(df, !is.na(answer.no_own_reason) & answer.no_own_reason != "space"))
df.no.own.summary <- df.no.own[, list(
num.obs = .N,
num.income = sum(answer.no_own_reason == "expensive"),
frac.income = mean(answer.no_own_reason == "expensive"),
frac.use = mean(answer.no_own_reason == "little_use")), by = list(input.good)]
g.reasons <- ggplot(data = subset(df.no.own.summary, num.obs > 7), aes(x = frac.income, y = frac.use)) +
geom_point() +
geom_text_repel(aes(label = input.good)) +
theme_bw() +
xlab("Fraction non-owners citing income") +
ylab("Fraction non-owners citing usage") +
geom_abline(intercept = 1, slope = -1)
df.tmp <- df.no.own.summary %>% filter(num.obs > 7)
df.tmp$input.good <- with(df.tmp, reorder(input.good, frac.income, mean))
df.tmp <- df.tmp %>% cbind(with(df.tmp, Hmisc::binconf(num.income, num.obs)) %>% as.data.frame)
g.reasons <- ggplot(data = df.tmp,
aes(y = input.good, x = frac.income)) +
geom_point() +
geom_errorbarh(aes(xmin = Lower, xmax = Upper), height = 0, colour = "grey") +
theme_bw() +
scale_x_continuous(label = scales::percent) +
xlab("Fraction citing income = (1 - Fraction citing usage)") +
ylab("")
JJHmisc::writeImage(g.reasons, "reasons", path = "../writeup/plots/", width = 6, height = 3)
|
1e151b20752054cf83e7ee457f5ad6840a060192 | 3976f0dd4fa56d39f1a06ac09e9fa7575573b0c8 | /R/mc-rank-test.R | e12fe208d4f05175d4af95d32b36cf283f82603c | [] | no_license | lcallot/pcvar | 309e08ac3dd3eda108c44b515c458a4fe7487b03 | cabe10cbdc2b0dea931bc0fda4fd6941d07ce2eb | refs/heads/master | 2021-01-01T17:21:40.264823 | 2015-06-10T09:03:27 | 2015-06-10T09:03:27 | 15,044,879 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,340 | r | mc-rank-test.R | #' @description bla!
#'
#' @details bla, bla?
#'
#'
#' @name mc.rank.test
#' @aliases mc.rank.test
#' @title Performs a Monte Carlo experiment on the performance of the panel rank test procedure of Callot (2013) to a pcvar model.
#' @author Laurent Callot \email{l.callot@@vu.nl}
#'
#'
#'
#' @param obs An integer, the number of observations to generate.
#' @param N an integer, the number of cross section units.
#' @param nvar The number of variables for each cross section unit.
#' @param rank The cointegration rank of the system. Default 1.
#' @param BS The number of bootstrap iterations for the test. Default: 99.
#' @param MC The number of monte carlo iterations. Default: 1.
#' @param Alpha Adjustment matrix (dimension nvar * rank) or list (length N) of parameter matrices.
#' @param Beta Co-integration matrix (dimension (2*nvar) * rank) or list (length N) of parameter matrices.
#' @param Lambda0 Contemporaneous dependency matrix (dimension nvar * nvar) or list (length N) of parameter matrices.
#' @param Gammal List (length is number of lagged first differences) of matrices (N * N) or lists (length N) of matrices.
#' @param Omega The covariance matrix.
#' @param err.dist The distribution of the innovations, _gaussian_ or _t_
#' @param t.df If the innovations are _t_ distributed, the number of degrees of freedom. default 3.
#' @param det.type An integer indicating the type of deterministics to use, following the typology by Johansen 1988.
#' @param burn.smpl The Number of burned observations used to generate the data.
#' @param res.dep Dependency of the residuals, _iid_ (default) or _garch_.
#' @param garchspec See fGarch package.
#' @param cdet.load The loadings on the deterministics.
#' @param W The weighting scheme. Default: equal.
#' @param bs.method 'resample' for iid resampling, 'wild' for gaussian wild bootstrap.
#' @param ncores The number of cores, default 1.
#'
#'
#' @return A list.
#'
#'
#'
#'
#'
#'
#' @export
mc.rank.test <- function(obs,N,nvar,BS=99,MC=1,W='equal',Alpha=NULL,Beta=NULL,Lambda0=NULL,Gammal=NULL,Omega,err.dist='gaussian',det.type=1,cdet.load=c(1,1),bs.method='resample',t.df=1,burn.smpl=10,res.dep='iid',garchspec=NULL,ncores=1){
# 0/ Chk the arguments
# 1/ Initialisation
lags <- 1+length(Gammal)
bsmc <- list()
if(is.null(W))W <- 'equal'
# 2/ Chk that the DGP is not explosive and the inputs valid before starting MC.
# Generate a set of data and hope for no crash.
Ysim <- gen.pcvar(obs=obs,N=N,nvar=nvar,W=W,Alpha=Alpha,Beta=Beta,Lambda0=Lambda0,Gammal=Gammal,Omega=Omega,err.dist=err.dist,t.df=t.df,burn.smpl=burn,res.dep=res.dep,garchspec=garchspec)
# check the roots
if(sum(abs(Ysim$roots[[1]])>1.01)>0)stop(paste('The input data generating process is explosive. Max root: ',max(abs(Ysim$roots[[1]])),'.',sep=''))
bsmc$dgp.roots <- Ysim$roots
# 3/ MC loop
# Parallel
mcrk <- mclapply(1:MC,.mcrk.i,MC,obs,N,nvar,BS,
W,Alpha,Beta,Lambda0,Gammal,Omega,err.dist,
det.type,cdet.load,bs.method,t.df,res.dep,garchspec,burn.smpl,mc.cores=ncores)
#mcrk <- lapply(1:MC,.mcrk.i,MC,obs,N,nvar,BS,
# W,Alpha,Beta,Lambda0,Gammal,Omega,err.dist,
# det.type,cdet.load,bs.method,t.df,res.dep,garchspec,burn.smpl)
# 4/ Aggregate rank test outcome.
aggmc <- .agg.mc(mcrk)
aggmc$dgproots <- Ysim$roots[[1]]
return(aggmc)
}
|
a3a94c2a7bb3523b6fe0b5a73530946f341b11c3 | f3521b6175374e344ff73def2f9e5d7c2a440b00 | /footprints/testdb/AWS_test/fillFunctions.R | b8179d1b0002232ff94f568ef4e23ec2cbfdec89 | [] | no_license | PriceLab/BDDS | 2c450bab947df96a3767c4f5d7082f3fc2441b1e | 9ebedeb27bbb2921b40ec8b05290a111d5582c70 | refs/heads/master | 2021-03-27T15:45:39.175845 | 2017-12-06T23:27:41 | 2017-12-06T23:27:41 | 67,898,606 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,885 | r | fillFunctions.R | simpleLoopFill <- function(dbConnection, tbls){
print("Beginning simple for loop on regions")
# Loop through the table, construct each query,
# and run it in series
for(i in 1:length(tbls$regions$loc)){
my.query <- sprintf("insert into regions values ('%s','%s',%d,%d) on conflict (loc) do nothing;",
tbls$regions$loc[i],
tbls$regions$chrom[i],
tbls$regions$start[i],
tbls$regions$end[i])
dbSendQuery(dbConnection,my.query)
}
print("Continuing with hits")
for(i in 1:length(tbls$regions$loc)){
my.query <- sprintf("insert into regions values ('%s','%s',%d,%d) on conflict (loc) do nothing;",
tbls$regions$loc[i],
tbls$regions$start[i],
tbls$regions$end[i])
dbSendQuery(dbConnection,my.query)
}
print("Simple for loop completed")
} #simpleLoopFill
#---------------------------------------------------------------------------
tempTableFill <- function(dbConnection, tbl,ID,tableID){
# Create the temporary table using the passed ID
my.query <- sprintf("create table %s (like regions);",ID)
dbSendQuery(dbConnection, my.query)
# Fill the table all at once
dbWriteTable(dbConnection, ID, tbl, row.names = FALSE, append=TRUE)
# Use the temp table to fill the real one, then delete it
my.query <- sprintf("insert into %s select * from %s on conflict (loc) do nothing;",
tableID, ID)
dbSendQuery(dbConnection, my.query)
dbRemoveTable(dbConnection, name = ID)
} #tempTableFill
#---------------------------------------------------------------------------
batchFill <- function(dbConnection, tbl){
# Create a temporary text file of queries
} #batchFill |
5bcd56fe5fddfbcc8e1bf611ec0da29eef465e15 | 9a58513989a0ab0bdb4fb4352f17aa572c375d0e | /run_analysis.R | bd95c5087bcd20d9ad993244e40b4ffa50589f8e | [] | no_license | NestaKobe/Getting-and-cleaning-data-Week4-assignment | 22fd59a2f28e9e1474300651c0a5411101f629f0 | 8b97d9b61ce8c197dd2c11bab452176f6dd0256e | refs/heads/main | 2023-03-01T06:41:22.741452 | 2021-02-10T16:05:56 | 2021-02-10T16:05:56 | 337,706,275 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,357 | r | run_analysis.R | # Getting and Cleaning Data Project John Hopkins Coursera
#Week 4 Assignment
#The data for the project:
#https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
#TASK: create one R script called run_analysis.R that does the following.
#1. Merges the training and the test sets to create one data set.
#2. Extracts only the measurements on the mean and standard deviation for each measurement.
#3. Uses descriptive activity names to name the activities in the data set
#4. Appropriately labels the data set with descriptive variable names.
#5. From the data set in step 4, creates a second, independent tidy data set with
# the average of each variable for each activity and each subject.
library(dplyr)
# 1. Merge training and test sets ----------------------------------------
#Load packages and get the data
path <- getwd()
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, file.path(path, "dataFiles.zip"))
unzip(zipfile = "dataFiles.zip")
#Read training data
x_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
#Read testing data
x_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
#Read features
features <- read.table("./UCI HAR Dataset/features.txt")
#Read activity labels
activity_labels = read.table("./UCI HAR Dataset/activity_labels.txt")
#Assigning variable names to columns
colnames(x_train) <- features[,2]
colnames(y_train) <- "activityID"
colnames(subject_train) <- "subjectID"
colnames(x_test) <- features[,2]
colnames(y_test) <- "activityID"
colnames(subject_test) <- "subjectID"
colnames(activity_labels) <- c("activityID", "activityType")
#Merging datasets
train_all <- cbind(y_train, subject_train, x_train)
test_all <- cbind(y_test, subject_test, x_test)
dataset_merged <- rbind(train_all, test_all)
#View(dataset_merged)
# 2. Extracting measurements on the mean and SD --------------------------
#Keep columns based on column name for mean & SD
keep <- grepl("subject|activity|mean|std", colnames(dataset_merged))
#Extracting data & reshaping file
dataset_meanSD <- dataset_merged[, keep]
View(dataset_meanSD)
# 3. Descriptive activity names -------------------------------------------
#Replace activity values with named factor levels
#Turn activities and subjects into factors
dataset_meanSD$activity <- factor(dataset_meanSD$activityID,
levels = activity_labels[, 1], labels = activity_labels [, 2])
dataset_meanSD$subject <- as.factor(dataset_meanSD$subjectID)
# 4. Labeling dataset ---------------------------------------------------
#done under previous steps
# 5. Create second, independent tidy data set -----------------------------
# with the average of each variable for each activity and each subject.
#Create second set
dataset_tidy <- aggregate(. ~subjectID + activityID, dataset_meanSD, mean)
dataset_tidy <- dataset_tidy[order(dataset_meanSD$subjectID, dataset_meanSD$activityID), ]
#Write table
write.table(dataset_tidy, "tidy_dataset.txt", row.names = FALSE)
|
6f8a1df13aba4af5c115f4cdcf61157290030fc7 | 3478901ab27212ba5798db9b6a8813b7ef596724 | /Project_Scripts/ClassificationTree.R | 9784019cb108ba79858acc7856bc1a8516d4eeba | [] | no_license | NicSchuler/DSF_NFLDraftPrediction | 016e341a2f870d98af4bcaa21a747e224d2150ee | 301032b0b9f7053a5ac8303cf2357de496c53925 | refs/heads/master | 2020-09-07T11:59:57.337829 | 2019-12-02T22:38:33 | 2019-12-02T22:38:33 | 220,773,179 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 53,932 | r | ClassificationTree.R | # Load required packages
library(dplyr) # data wrangling
library(rpart) # performing regression trees
library(rpart.plot) # plotting regression trees
library(tidyverse)
library(rattle) # Fancy tree plot
library(RColorBrewer) # Color selection for fancy tree plot
load("../Data/CleanData/CleanClass2007to2014_3.Rdata")
load("../Data/CleanData/CleanClass2007to2013_3_oversampling.Rdata")
load("../Data/CleanData/CleanClass2007to2013_3_undersampling.Rdata")
load("../Data/CleanData/CleanClass2007to2013_3_rose.both.Rdata")
load("../Data/CleanData/CleanClass2007to2013_3_smote.Rdata")
ClassificationTreePerfMeas = data.frame(Method = character(), Sampling = character(), QB_TP = integer(), QB_TN = integer(), QB_FP = integer(), QB_FN = integer(),
WR_TP = integer(), WR_TN = integer(), WR_FP = integer(), WR_FN = integer(),
RB_TP = integer(), RB_TN = integer(), RB_FP = integer(), RB_FN = integer(),
Together_TP = integer(), Together_TN = integer(), Together_FP = integer(), Together_FN = integer(), stringsAsFactors = FALSE)
ClassificationTreePerfMeas[1,2] = "no_sampling"
ClassificationTreePerfMeas[2,2] = "oversampling"
ClassificationTreePerfMeas[3,2] = "undersampling"
ClassificationTreePerfMeas[4,2] = "Rose_both"
ClassificationTreePerfMeas[5,2] = "Smote"
ClassificationTreePerfMeas$Method = "ClassificationTree"
# We will do the next steps 5 times (e.g. "1. No Splitting" does the same thing as "2. Oversampling"), but using different data for training the model
# In other words, this is the cross-validation of the sampling methods. The reason for doing it a couple of times instead of looping or functioning it
# is the easier availability of the steps in between in case of further processing them.
# Part 6 will be the testing of the models on the 2014 data.
## 1. No Sampling ------------------------
# Splitting the data
# We use all the available information just before the 2014 NFL-Draft, in order to train the model and then apply it on the data for 2014.
DtrainNS = CleanClass2007to2014_3 %>%
filter(Year != 2014)
DtestNS = CleanClass2007to2014_3 %>%
filter(Year == 2014)
# QB ---------------------------
# Predicting the likelyhood of a QB being picked in the draft
DtrainQBNS = DtrainNS %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestQBNS = DtestNS %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeQBNS = rpart(
formula = Drafted ~ .,
data = DtrainQBNS,
method = "class")
CheckList = as.data.frame(cbind(DtrainQBNS$Drafted, predict(ClassTreeQBNS, DtrainQBNS)))
CheckListQBNS = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Y==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Y==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Y!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Y!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[1,"QB_TP"] = sum(CheckListQBNS$QB_TP)
ClassificationTreePerfMeas[1,"QB_TN"] = sum(CheckListQBNS$QB_TN)
ClassificationTreePerfMeas[1,"QB_FP"] = sum(CheckListQBNS$QB_FP)
ClassificationTreePerfMeas[1,"QB_FN"] = sum(CheckListQBNS$QB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeQBNS, main="Classification Tree for QB's with unsampled data", sub="", cex=0.5)
# WR ---------------------------
# Predicting the likelyhood of a WR being picked in the draft
DtrainWRNS = DtrainNS %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestWRNS = DtestNS %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeWRNS = rpart(
formula = Drafted ~ .,
data = DtrainWRNS,
method = "class")
CheckList = as.data.frame(cbind(DtrainWRNS$Drafted, predict(ClassTreeWRNS, DtrainWRNS)))
CheckListWRNS = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Y==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Y==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Y!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Y!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[1,"WR_TP"] = sum(CheckListWRNS$WR_TP)
ClassificationTreePerfMeas[1,"WR_TN"] = sum(CheckListWRNS$WR_TN)
ClassificationTreePerfMeas[1,"WR_FP"] = sum(CheckListWRNS$WR_FP)
ClassificationTreePerfMeas[1,"WR_FN"] = sum(CheckListWRNS$WR_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeWRNS, main="Classification Tree for WR's with unsampled data", sub="", cex=0.5)
# RB ---------------------------
# Predicting the likelyhood of a RB being picked in the draft
DtrainRBNS = DtrainNS %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestRBNS = DtestNS %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeRBNS = rpart(
formula = Drafted ~ .,
data = DtrainRBNS,
method = "class")
CheckList = as.data.frame(cbind(DtrainRBNS$Drafted, predict(ClassTreeRBNS, DtrainRBNS)))
CheckListRBNS = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Y==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Y==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Y!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Y!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[1,"RB_TP"] = sum(CheckListRBNS$RB_TP)
ClassificationTreePerfMeas[1,"RB_TN"] = sum(CheckListRBNS$RB_TN)
ClassificationTreePerfMeas[1,"RB_FP"] = sum(CheckListRBNS$RB_FP)
ClassificationTreePerfMeas[1,"RB_FN"] = sum(CheckListRBNS$RB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeRBNS, main="Classification Tree for RB's with unsampled data", sub="", cex=0.5)
# Together ---------------------------
# Predicting the likelyhood of QB/RB/WR together for being picked in the draft
DtrainTogetherNS = DtrainNS %>%
select(-c(Player.Code, Name, Class, Year))
DtestTogetherNS = DtestNS %>%
select(-c(Player.Code, Name, Class, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeTogetherNS = rpart(
formula = Drafted ~ .,
data = DtrainTogetherNS,
method = "class")
CheckList = as.data.frame(cbind(DtrainTogetherNS$Drafted, predict(ClassTreeTogetherNS, DtrainTogetherNS)))
CheckListTogetherNS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[1,"Together_TP"] = sum(CheckListTogetherNS$Together_TP)
ClassificationTreePerfMeas[1,"Together_TN"] = sum(CheckListTogetherNS$Together_TN)
ClassificationTreePerfMeas[1,"Together_FP"] = sum(CheckListTogetherNS$Together_FP)
ClassificationTreePerfMeas[1,"Together_FN"] = sum(CheckListTogetherNS$Together_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeTogetherNS, main="Classification Tree for QB/WR/RB together with unsampled data", sub="", cex=0.5)
## 2. Oversampling ------------------------
# Splitting the data
DtrainOS = CleanClass2007to2014_3_oversampling %>%
filter(Year != 2014)
DtestOS = CleanClass2007to2014_3_oversampling %>%
filter(Year == 2014)
# QB ---------------------------
# Predicting the likelyhood of a QB being picked in the draft
DtrainQBOS = DtrainOS %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestQBOS = DtestOS %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeQBOS = rpart(
formula = Drafted ~ .,
data = DtrainQBOS,
method = "class")
CheckList = as.data.frame(cbind(DtrainQBNS$Drafted, predict(ClassTreeQBOS, DtrainQBNS)))
CheckListQBOS = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Y==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Y==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Y!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Y!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[2,"QB_TP"] = sum(CheckListQBOS$QB_TP)
ClassificationTreePerfMeas[2,"QB_TN"] = sum(CheckListQBOS$QB_TN)
ClassificationTreePerfMeas[2,"QB_FP"] = sum(CheckListQBOS$QB_FP)
ClassificationTreePerfMeas[2,"QB_FN"] = sum(CheckListQBOS$QB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeQBOS, main="Classification Tree for QB's with oversampled data", sub="", cex=0.5)
# WR ---------------------------
# Predicting the likelyhood of a WR being picked in the draft
DtrainWROS = DtrainOS %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestWROS = DtestOS %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeWROS = rpart(
formula = Drafted ~ .,
data = DtrainWROS,
method = "class")
CheckList = as.data.frame(cbind(DtrainWRNS$Drafted, predict(ClassTreeWROS, DtrainWRNS)))
CheckListWROS = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Y==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Y==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Y!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Y!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[2,"WR_TP"] = sum(CheckListWROS$WR_TP)
ClassificationTreePerfMeas[2,"WR_TN"] = sum(CheckListWROS$WR_TN)
ClassificationTreePerfMeas[2,"WR_FP"] = sum(CheckListWROS$WR_FP)
ClassificationTreePerfMeas[2,"WR_FN"] = sum(CheckListWROS$WR_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeWROS, main="Classification Tree for WR's with oversampled data", sub="", cex=0.5)
# RB ---------------------------
# Predicting the likelyhood of a RB being picked in the draft
DtrainRBOS = DtrainOS %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestRBOS = DtestOS %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeRBOS = rpart(
formula = Drafted ~ .,
data = DtrainRBOS,
method = "class")
CheckList = as.data.frame(cbind(DtrainRBNS$Drafted, predict(ClassTreeRBOS, DtrainRBNS)))
CheckListRBOS = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Y==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Y==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Y!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Y!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[2,"RB_TP"] = sum(CheckListRBOS$RB_TP)
ClassificationTreePerfMeas[2,"RB_TN"] = sum(CheckListRBOS$RB_TN)
ClassificationTreePerfMeas[2,"RB_FP"] = sum(CheckListRBOS$RB_FP)
ClassificationTreePerfMeas[2,"RB_FN"] = sum(CheckListRBOS$RB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeRBOS, main="Classification Tree for RB's with oversampled data", sub="", cex=0.5)
# Together ---------------------------
# Predicting the likelyhood of QB/RB/WR together for being picked in the draft
DtrainTogetherOS = DtrainOS %>%
select(-c(Player.Code, Name, Class, Year))
DtestTogetherOS = DtestOS %>%
select(-c(Player.Code, Name, Class, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeTogetherOS = rpart(
formula = Drafted ~ .,
data = DtrainTogetherOS,
method = "class")
CheckList = as.data.frame(cbind(DtrainTogetherNS$Drafted, predict(ClassTreeTogetherOS, DtrainTogetherNS)))
CheckListTogetherOS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[2,"Together_TP"] = sum(CheckListTogetherOS$Together_TP)
ClassificationTreePerfMeas[2,"Together_TN"] = sum(CheckListTogetherOS$Together_TN)
ClassificationTreePerfMeas[2,"Together_FP"] = sum(CheckListTogetherOS$Together_FP)
ClassificationTreePerfMeas[2,"Together_FN"] = sum(CheckListTogetherOS$Together_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeTogetherOS, main="Classification Tree for QB/WR/RB together with oversampled data", sub="", cex=0.5)
## 3. Undersampling ------------------------
# Splitting the data
# We use all the available information just before the 2014 NFL-Draft, in order to train the model and then apply it on the data for 2014.
DtrainUS = CleanClass2007to2014_3_undersampling %>%
filter(Year != 2014)
DtestUS = CleanClass2007to2014_3_undersampling %>%
filter(Year == 2014)
# QB ---------------------------
# Predicting the likelyhood of a QB being picked in the draft
DtrainQBUS = DtrainUS %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestQBUS = DtestUS %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeQBUS = rpart(
formula = Drafted ~ .,
data = DtrainQBUS,
method = "class")
CheckList = as.data.frame(cbind(DtrainQBNS$Drafted, predict(ClassTreeQBUS, DtrainQBNS)))
CheckListQBUS = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Y==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Y==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Y!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Y!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[3,"QB_TP"] = sum(CheckListQBUS$QB_TP)
ClassificationTreePerfMeas[3,"QB_TN"] = sum(CheckListQBUS$QB_TN)
ClassificationTreePerfMeas[3,"QB_FP"] = sum(CheckListQBUS$QB_FP)
ClassificationTreePerfMeas[3,"QB_FN"] = sum(CheckListQBUS$QB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeQBUS, main="Classification Tree for QB's with undersampled data", sub="", cex=0.5)
# WR ---------------------------
# Predicting the likelyhood of a WR being picked in the draft
DtrainWRUS = DtrainUS %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestWRUS = DtestUS %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeWRUS = rpart(
formula = Drafted ~ .,
data = DtrainWRUS,
method = "class")
CheckList = as.data.frame(cbind(DtrainWRNS$Drafted, predict(ClassTreeWRUS, DtrainWRNS)))
CheckListWRUS = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Y==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Y==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Y!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Y!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[3,"WR_TP"] = sum(CheckListWRUS$WR_TP)
ClassificationTreePerfMeas[3,"WR_TN"] = sum(CheckListWRUS$WR_TN)
ClassificationTreePerfMeas[3,"WR_FP"] = sum(CheckListWRUS$WR_FP)
ClassificationTreePerfMeas[3,"WR_FN"] = sum(CheckListWRUS$WR_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeWRUS, main="Classification Tree for WR's with undersampled data", sub="", cex=0.5)
# RB ---------------------------
# Predicting the likelyhood of a RB being picked in the draft
DtrainRBUS = DtrainUS %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestRBUS = DtestUS %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeRBUS = rpart(
formula = Drafted ~ .,
data = DtrainRBUS,
method = "class")
CheckList = as.data.frame(cbind(DtrainRBNS$Drafted, predict(ClassTreeRBUS, DtrainRBNS)))
CheckListRBUS = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Y==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Y==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Y!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Y!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[3,"RB_TP"] = sum(CheckListRBUS$RB_TP)
ClassificationTreePerfMeas[3,"RB_TN"] = sum(CheckListRBUS$RB_TN)
ClassificationTreePerfMeas[3,"RB_FP"] = sum(CheckListRBUS$RB_FP)
ClassificationTreePerfMeas[3,"RB_FN"] = sum(CheckListRBUS$RB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeRBUS, main="Classification Tree for RB's with undersampled data", sub="", cex=0.5)
# Together ---------------------------
# Predicting the likelyhood of QB/RB/WR together for being picked in the draft
DtrainTogetherUS = DtrainUS %>%
select(-c(Player.Code, Name, Class, Year))
DtestTogetherUS = DtestUS %>%
select(-c(Player.Code, Name, Class, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeTogetherUS = rpart(
formula = Drafted ~ .,
data = DtrainTogetherUS,
method = "class")
CheckList = as.data.frame(cbind(DtrainTogetherNS$Drafted, predict(ClassTreeTogetherUS, DtrainTogetherNS)))
CheckListTogetherUS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[3,"Together_TP"] = sum(CheckListTogetherUS$Together_TP)
ClassificationTreePerfMeas[3,"Together_TN"] = sum(CheckListTogetherUS$Together_TN)
ClassificationTreePerfMeas[3,"Together_FP"] = sum(CheckListTogetherUS$Together_FP)
ClassificationTreePerfMeas[3,"Together_FN"] = sum(CheckListTogetherUS$Together_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeTogetherUS, main="Classification Tree for QB/WR/RB together with undersampled data", sub="", cex=0.5)
## 4. Rose_Both------------------------
# Splitting the data
# We use all the available information just before the 2014 NFL-Draft, in order to train the model and then apply it on the data for 2014.
DtrainRO = CleanClass2007to2014_3_Rose.both %>%
filter(Year != 2014)
DtestRO = CleanClass2007to2014_3_Rose.both %>%
filter(Year == 2014)
# QB ---------------------------
# Predicting the likelyhood of a QB being picked in the draft
DtrainQBRO = DtrainRO %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestQBRO = DtestRO %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeQBRO = rpart(
formula = Drafted ~ .,
data = DtrainQBRO,
method = "class")
CheckList = as.data.frame(cbind(DtrainQBNS$Drafted, predict(ClassTreeQBRO, DtrainQBNS)))
CheckListQBRO = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Y==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Y==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Y!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Y!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[4,"QB_TP"] = sum(CheckListQBRO$QB_TP)
ClassificationTreePerfMeas[4,"QB_TN"] = sum(CheckListQBRO$QB_TN)
ClassificationTreePerfMeas[4,"QB_FP"] = sum(CheckListQBRO$QB_FP)
ClassificationTreePerfMeas[4,"QB_FN"] = sum(CheckListQBRO$QB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeQBRO, main="Classification Tree for QB's with Rose Both sampled data", sub="", cex=0.5)
# WR ---------------------------
# Predicting the likelyhood of a WR being picked in the draft
DtrainWRRO = DtrainRO %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestWRRO = DtestRO %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeWRRO = rpart(
formula = Drafted ~ .,
data = DtrainWRRO,
method = "class")
CheckList = as.data.frame(cbind(DtrainWRNS$Drafted, predict(ClassTreeWRRO, DtrainWRNS)))
CheckListWRRO = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Y==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Y==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Y!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Y!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[4,"WR_TP"] = sum(CheckListWRRO$WR_TP)
ClassificationTreePerfMeas[4,"WR_TN"] = sum(CheckListWRRO$WR_TN)
ClassificationTreePerfMeas[4,"WR_FP"] = sum(CheckListWRRO$WR_FP)
ClassificationTreePerfMeas[4,"WR_FN"] = sum(CheckListWRRO$WR_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeWRRO, main="Classification Tree for WR's with Rose Both sampled data", sub="", cex=0.5)
# RB ---------------------------
# Predicting the likelyhood of a RB being picked in the draft
DtrainRBRO = DtrainRO %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
DtestRBRO = DtestRO %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Class, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeRBRO = rpart(
formula = Drafted ~ .,
data = DtrainRBRO,
method = "class")
CheckList = as.data.frame(cbind(DtrainRBNS$Drafted, predict(ClassTreeRBRO, DtrainRBNS)))
CheckListRBRO = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Y==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Y==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Y!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Y!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[4,"RB_TP"] = sum(CheckListRBRO$RB_TP)
ClassificationTreePerfMeas[4,"RB_TN"] = sum(CheckListRBRO$RB_TN)
ClassificationTreePerfMeas[4,"RB_FP"] = sum(CheckListRBRO$RB_FP)
ClassificationTreePerfMeas[4,"RB_FN"] = sum(CheckListRBRO$RB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeRBRO, main="Classification Tree for RB's with Rose Both sampled data", sub="", cex=0.5)
# Together ---------------------------
# Predicting the likelyhood of QB/RB/WR together for being picked in the draft
DtrainTogetherRO = DtrainRO %>%
select(-c(Player.Code, Name, Class, Year))
DtestTogetherRO = DtestRO %>%
select(-c(Player.Code, Name, Class, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeTogetherRO = rpart(
formula = Drafted ~ .,
data = DtrainTogetherRO,
method = "class")
CheckList = as.data.frame(cbind(DtrainTogetherNS$Drafted, predict(ClassTreeTogetherRO, DtrainTogetherNS)))
CheckListTogetherRO = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[4,"Together_TP"] = sum(CheckListTogetherRO$Together_TP)
ClassificationTreePerfMeas[4,"Together_TN"] = sum(CheckListTogetherRO$Together_TN)
ClassificationTreePerfMeas[4,"Together_FP"] = sum(CheckListTogetherRO$Together_FP)
ClassificationTreePerfMeas[4,"Together_FN"] = sum(CheckListTogetherRO$Together_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeTogetherRO, main="Classification Tree for QB/WR/RB together with Rose Both sampled data", sub="", cex=0.5)
## 5. Smote------------------------
# Splitting the data
# We use all the available information just before the 2014 NFL-Draft, in order to train the model and then apply it on the data for 2014.
DtrainSM = cleanData_smote %>%
filter(Year != 2014)
DtestSM = cleanData_smote %>%
filter(Year == 2014)
# QB ---------------------------
# Predicting the likelyhood of a QB being picked in the draft
DtrainQBSM = DtrainSM %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Position, Year))
DtestQBSM = DtestSM %>%
filter(Position == "QB") %>%
select(-c(Player.Code, Name, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeQBSM = rpart(
formula = Drafted ~ .,
data = DtrainQBSM,
method = "class")
CheckList = as.data.frame(cbind(DtrainQBNS$Drafted, predict(ClassTreeQBSM, DtrainQBNS)))
CheckListQBSM = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Y==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Y==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Y!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Y!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[5,"QB_TP"] = sum(CheckListQBSM$QB_TP)
ClassificationTreePerfMeas[5,"QB_TN"] = sum(CheckListQBSM$QB_TN)
ClassificationTreePerfMeas[5,"QB_FP"] = sum(CheckListQBSM$QB_FP)
ClassificationTreePerfMeas[5,"QB_FN"] = sum(CheckListQBSM$QB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeQBSM, main="Classification Tree for QB's with smote sampled data", sub="", cex=0.5)
# WR ---------------------------
# Predicting the likelyhood of a WR being picked in the draft
DtrainWRSM = DtrainSM %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Position, Year))
DtestWRSM = DtestSM %>%
filter(Position == "WR") %>%
select(-c(Player.Code, Name, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeWRSM = rpart(
formula = Drafted ~ .,
data = DtrainWRSM,
method = "class")
CheckList = as.data.frame(cbind(DtrainWRNS$Drafted, predict(ClassTreeWRSM, DtrainWRNS)))
CheckListWRSM = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Y==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Y==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Y!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Y!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[5,"WR_TP"] = sum(CheckListWRSM$WR_TP)
ClassificationTreePerfMeas[5,"WR_TN"] = sum(CheckListWRSM$WR_TN)
ClassificationTreePerfMeas[5,"WR_FP"] = sum(CheckListWRSM$WR_FP)
ClassificationTreePerfMeas[5,"WR_FN"] = sum(CheckListWRSM$WR_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeWRSM, main="Classification Tree for WR's with smote sampled data", sub="", cex=0.5)
# RB ---------------------------
# Predicting the likelyhood of a RB being picked in the draft
DtrainRBSM = DtrainSM %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Position, Year))
DtestRBSM = DtestSM %>%
filter(Position == "RB") %>%
select(-c(Player.Code, Name, Position, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeRBSM = rpart(
formula = Drafted ~ .,
data = DtrainRBSM,
method = "class")
CheckList = as.data.frame(cbind(DtrainRBNS$Drafted, predict(ClassTreeRBSM, DtrainRBNS)))
CheckListRBSM = CheckList %>%
mutate(Y=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Y==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Y==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Y!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Y!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[5,"RB_TP"] = sum(CheckListRBSM$RB_TP)
ClassificationTreePerfMeas[5,"RB_TN"] = sum(CheckListRBSM$RB_TN)
ClassificationTreePerfMeas[5,"RB_FP"] = sum(CheckListRBSM$RB_FP)
ClassificationTreePerfMeas[5,"RB_FN"] = sum(CheckListRBSM$RB_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeRBSM, main="Classification Tree for RB's with smote sampled data", sub="", cex=0.5)
# Together ---------------------------
# Predicting the likelyhood of QB/RB/WR together for being picked in the draft
DtrainTogetherSM = DtrainSM %>%
select(-c(Player.Code, Name, Year))
DtestTogetherSM = DtestSM %>%
select(-c(Player.Code, Name, Year))
# Run a classification tree. We use the whole data for training, since the rpart-function has a built in cross-validation. For the evaluation of the
# best model we also use the whole training set for this cross-validation.
ClassTreeTogetherSM = rpart(
formula = Drafted ~ .,
data = DtrainTogetherSM,
method = "class")
CheckList = as.data.frame(cbind(DtrainTogetherNS$Drafted, predict(ClassTreeTogetherSM, DtrainTogetherNS)))
CheckListTogetherSM = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas[5,"Together_TP"] = sum(CheckListTogetherSM$Together_TP)
ClassificationTreePerfMeas[5,"Together_TN"] = sum(CheckListTogetherSM$Together_TN)
ClassificationTreePerfMeas[5,"Together_FP"] = sum(CheckListTogetherSM$Together_FP)
ClassificationTreePerfMeas[5,"Together_FN"] = sum(CheckListTogetherSM$Together_FN)
# Plotting the Tree
fancyRpartPlot(ClassTreeTogetherSM, main="Classification Tree for QB/WR/RB together with smote sampled data", sub="", cex=0.5)
# Save the tibble for the Performance Measurement separately
save(ClassificationTreePerfMeas, file = "../Data/PerformanceMeasurement/ClassificationTreePerfMeas.Rdata")
# Uncomment to save a Plot of a tree (and update the name!)
# savePlotToFile(file.name = "QBtreeNS.jpg")
# 6. Predicting the 2014 NFL Draft---------------
# This is the Testing!
# Create an empty tibble
ClassificationTreePerfMeas14 = data.frame(Method = character(), Sampling = character(), QB_TP = integer(), QB_TN = integer(), QB_FP = integer(), QB_FN = integer(),
WR_TP = integer(), WR_TN = integer(), WR_FP = integer(), WR_FN = integer(),
RB_TP = integer(), RB_TN = integer(), RB_FP = integer(), RB_FN = integer(),
Together_TP = integer(), Together_TN = integer(), Together_FP = integer(), Together_FN = integer(), stringsAsFactors = FALSE)
ClassificationTreePerfMeas14[1,2] = "no_sampling"
ClassificationTreePerfMeas14[2,2] = "oversampling"
ClassificationTreePerfMeas14[3,2] = "undersampling"
ClassificationTreePerfMeas14[4,2] = "Rose_both"
ClassificationTreePerfMeas14[5,2] = "Smote"
ClassificationTreePerfMeas14$Method = "ClassificationTree"
# Unsampled 2014-----------------
# Unsampled model / QB
CheckList = as.data.frame(cbind(DtestQBNS$Drafted, predict(ClassTreeQBNS, DtestQBNS)))
CheckListQBNS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[1,"QB_TP"] = sum(CheckListQBNS$QB_TP)
ClassificationTreePerfMeas14[1,"QB_TN"] = sum(CheckListQBNS$QB_TN)
ClassificationTreePerfMeas14[1,"QB_FP"] = sum(CheckListQBNS$QB_FP)
ClassificationTreePerfMeas14[1,"QB_FN"] = sum(CheckListQBNS$QB_FN)
# Unsampled model / WR
CheckList = as.data.frame(cbind(DtestWRNS$Drafted, predict(ClassTreeWRNS, DtestWRNS)))
CheckListWRNS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[1,"WR_TP"] = sum(CheckListWRNS$WR_TP)
ClassificationTreePerfMeas14[1,"WR_TN"] = sum(CheckListWRNS$WR_TN)
ClassificationTreePerfMeas14[1,"WR_FP"] = sum(CheckListWRNS$WR_FP)
ClassificationTreePerfMeas14[1,"WR_FN"] = sum(CheckListWRNS$WR_FN)
# Unsampled model / RB
CheckList = as.data.frame(cbind(DtestRBNS$Drafted, predict(ClassTreeRBNS, DtestRBNS)))
CheckListRBNS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[1,"RB_TP"] = sum(CheckListRBNS$RB_TP)
ClassificationTreePerfMeas14[1,"RB_TN"] = sum(CheckListRBNS$RB_TN)
ClassificationTreePerfMeas14[1,"RB_FP"] = sum(CheckListRBNS$RB_FP)
ClassificationTreePerfMeas14[1,"RB_FN"] = sum(CheckListRBNS$RB_FN)
# Unsampled model / Together
CheckList = as.data.frame(cbind(DtestTogetherNS$Drafted, predict(ClassTreeTogetherNS, DtestTogetherNS)))
CheckListTogetherNS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[1,"Together_TP"] = sum(CheckListTogetherNS$Together_TP)
ClassificationTreePerfMeas14[1,"Together_TN"] = sum(CheckListTogetherNS$Together_TN)
ClassificationTreePerfMeas14[1,"Together_FP"] = sum(CheckListTogetherNS$Together_FP)
ClassificationTreePerfMeas14[1,"Together_FN"] = sum(CheckListTogetherNS$Together_FN)
# Oversampled 2014-----------------
# Oversampled model / QB
CheckList = as.data.frame(cbind(DtestQBNS$Drafted, predict(ClassTreeQBOS, DtestQBNS)))
CheckListQBOS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[2,"QB_TP"] = sum(CheckListQBOS$QB_TP)
ClassificationTreePerfMeas14[2,"QB_TN"] = sum(CheckListQBOS$QB_TN)
ClassificationTreePerfMeas14[2,"QB_FP"] = sum(CheckListQBOS$QB_FP)
ClassificationTreePerfMeas14[2,"QB_FN"] = sum(CheckListQBOS$QB_FN)
# Oversampled model / WR
CheckList = as.data.frame(cbind(DtestWRNS$Drafted, predict(ClassTreeWROS, DtestWRNS)))
CheckListWROS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[2,"WR_TP"] = sum(CheckListWROS$WR_TP)
ClassificationTreePerfMeas14[2,"WR_TN"] = sum(CheckListWROS$WR_TN)
ClassificationTreePerfMeas14[2,"WR_FP"] = sum(CheckListWROS$WR_FP)
ClassificationTreePerfMeas14[2,"WR_FN"] = sum(CheckListWROS$WR_FN)
# Oversampled model / RB
CheckList = as.data.frame(cbind(DtestRBNS$Drafted, predict(ClassTreeRBOS, DtestRBNS)))
CheckListRBOS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[2,"RB_TP"] = sum(CheckListRBOS$RB_TP)
ClassificationTreePerfMeas14[2,"RB_TN"] = sum(CheckListRBOS$RB_TN)
ClassificationTreePerfMeas14[2,"RB_FP"] = sum(CheckListRBOS$RB_FP)
ClassificationTreePerfMeas14[2,"RB_FN"] = sum(CheckListRBOS$RB_FN)
# Oversampled model / Together
CheckList = as.data.frame(cbind(DtestTogetherNS$Drafted, predict(ClassTreeTogetherOS, DtestTogetherNS)))
CheckListTogetherOS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[2,"Together_TP"] = sum(CheckListTogetherOS$Together_TP)
ClassificationTreePerfMeas14[2,"Together_TN"] = sum(CheckListTogetherOS$Together_TN)
ClassificationTreePerfMeas14[2,"Together_FP"] = sum(CheckListTogetherOS$Together_FP)
ClassificationTreePerfMeas14[2,"Together_FN"] = sum(CheckListTogetherOS$Together_FN)
# Undersampled 2014-----------------
# Undersampled model / QB
CheckList = as.data.frame(cbind(DtestQBNS$Drafted, predict(ClassTreeQBUS, DtestQBNS)))
CheckListQBUS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[3,"QB_TP"] = sum(CheckListQBUS$QB_TP)
ClassificationTreePerfMeas14[3,"QB_TN"] = sum(CheckListQBUS$QB_TN)
ClassificationTreePerfMeas14[3,"QB_FP"] = sum(CheckListQBUS$QB_FP)
ClassificationTreePerfMeas14[3,"QB_FN"] = sum(CheckListQBUS$QB_FN)
# Undersampled model / WR
CheckList = as.data.frame(cbind(DtestWRNS$Drafted, predict(ClassTreeWRUS, DtestWRNS)))
CheckListWRUS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[3,"WR_TP"] = sum(CheckListWRUS$WR_TP)
ClassificationTreePerfMeas14[3,"WR_TN"] = sum(CheckListWRUS$WR_TN)
ClassificationTreePerfMeas14[3,"WR_FP"] = sum(CheckListWRUS$WR_FP)
ClassificationTreePerfMeas14[3,"WR_FN"] = sum(CheckListWRUS$WR_FN)
# Undersampled model / RB
CheckList = as.data.frame(cbind(DtestRBNS$Drafted, predict(ClassTreeRBUS, DtestRBNS)))
CheckListRBUS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[3,"RB_TP"] = sum(CheckListRBUS$RB_TP)
ClassificationTreePerfMeas14[3,"RB_TN"] = sum(CheckListRBUS$RB_TN)
ClassificationTreePerfMeas14[3,"RB_FP"] = sum(CheckListRBUS$RB_FP)
ClassificationTreePerfMeas14[3,"RB_FN"] = sum(CheckListRBUS$RB_FN)
# Undersampled model / Together
CheckList = as.data.frame(cbind(DtestTogetherNS$Drafted, predict(ClassTreeTogetherUS, DtestTogetherNS)))
CheckListTogetherUS = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[3,"Together_TP"] = sum(CheckListTogetherUS$Together_TP)
ClassificationTreePerfMeas14[3,"Together_TN"] = sum(CheckListTogetherUS$Together_TN)
ClassificationTreePerfMeas14[3,"Together_FP"] = sum(CheckListTogetherUS$Together_FP)
ClassificationTreePerfMeas14[3,"Together_FN"] = sum(CheckListTogetherUS$Together_FN)
# Rose Both 2014-----------------
# Rose Both model / QB
CheckList = as.data.frame(cbind(DtestQBNS$Drafted, predict(ClassTreeQBRO, DtestQBNS)))
CheckListQBRO = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[4,"QB_TP"] = sum(CheckListQBRO$QB_TP)
ClassificationTreePerfMeas14[4,"QB_TN"] = sum(CheckListQBRO$QB_TN)
ClassificationTreePerfMeas14[4,"QB_FP"] = sum(CheckListQBRO$QB_FP)
ClassificationTreePerfMeas14[4,"QB_FN"] = sum(CheckListQBRO$QB_FN)
# Rose Both model / WR
CheckList = as.data.frame(cbind(DtestWRNS$Drafted, predict(ClassTreeWRRO, DtestWRNS)))
CheckListWRRO = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[4,"WR_TP"] = sum(CheckListWRRO$WR_TP)
ClassificationTreePerfMeas14[4,"WR_TN"] = sum(CheckListWRRO$WR_TN)
ClassificationTreePerfMeas14[4,"WR_FP"] = sum(CheckListWRRO$WR_FP)
ClassificationTreePerfMeas14[4,"WR_FN"] = sum(CheckListWRRO$WR_FN)
# Rose Both model / RB
CheckList = as.data.frame(cbind(DtestRBNS$Drafted, predict(ClassTreeRBRO, DtestRBNS)))
CheckListRBRO = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[4,"RB_TP"] = sum(CheckListRBRO$RB_TP)
ClassificationTreePerfMeas14[4,"RB_TN"] = sum(CheckListRBRO$RB_TN)
ClassificationTreePerfMeas14[4,"RB_FP"] = sum(CheckListRBRO$RB_FP)
ClassificationTreePerfMeas14[4,"RB_FN"] = sum(CheckListRBRO$RB_FN)
# Rose Both model / Together
CheckList = as.data.frame(cbind(DtestTogetherNS$Drafted, predict(ClassTreeTogetherRO, DtestTogetherNS)))
CheckListTogetherRO = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[4,"Together_TP"] = sum(CheckListTogetherRO$Together_TP)
ClassificationTreePerfMeas14[4,"Together_TN"] = sum(CheckListTogetherRO$Together_TN)
ClassificationTreePerfMeas14[4,"Together_FP"] = sum(CheckListTogetherRO$Together_FP)
ClassificationTreePerfMeas14[4,"Together_FN"] = sum(CheckListTogetherRO$Together_FN)
# Smote 2014-----------------
# Smote model / QB
CheckList = as.data.frame(cbind(DtestQBNS$Drafted, predict(ClassTreeQBSM, DtestQBNS)))
CheckListQBSM = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(QB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(QB_TP=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_TN=ifelse(Drafted==QB_Pred,ifelse(QB_Pred==0,1,0),0)) %>%
mutate(QB_FP=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==1,1,0),0)) %>%
mutate(QB_FN=ifelse(Drafted!=QB_Pred,ifelse(QB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[5,"QB_TP"] = sum(CheckListQBSM$QB_TP)
ClassificationTreePerfMeas14[5,"QB_TN"] = sum(CheckListQBSM$QB_TN)
ClassificationTreePerfMeas14[5,"QB_FP"] = sum(CheckListQBSM$QB_FP)
ClassificationTreePerfMeas14[5,"QB_FN"] = sum(CheckListQBSM$QB_FN)
# Smote model / WR
CheckList = as.data.frame(cbind(DtestWRNS$Drafted, predict(ClassTreeWRSM, DtestWRNS)))
CheckListWRSM = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(WR_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(WR_TP=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_TN=ifelse(Drafted==WR_Pred,ifelse(WR_Pred==0,1,0),0)) %>%
mutate(WR_FP=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==1,1,0),0)) %>%
mutate(WR_FN=ifelse(Drafted!=WR_Pred,ifelse(WR_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[5,"WR_TP"] = sum(CheckListWRSM$WR_TP)
ClassificationTreePerfMeas14[5,"WR_TN"] = sum(CheckListWRSM$WR_TN)
ClassificationTreePerfMeas14[5,"WR_FP"] = sum(CheckListWRSM$WR_FP)
ClassificationTreePerfMeas14[5,"WR_FN"] = sum(CheckListWRSM$WR_FN)
# Smote model / RB
CheckList = as.data.frame(cbind(DtestRBNS$Drafted, predict(ClassTreeRBSM, DtestRBNS)))
CheckListRBSM = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(RB_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(RB_TP=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_TN=ifelse(Drafted==RB_Pred,ifelse(RB_Pred==0,1,0),0)) %>%
mutate(RB_FP=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==1,1,0),0)) %>%
mutate(RB_FN=ifelse(Drafted!=RB_Pred,ifelse(RB_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[5,"RB_TP"] = sum(CheckListRBSM$RB_TP)
ClassificationTreePerfMeas14[5,"RB_TN"] = sum(CheckListRBSM$RB_TN)
ClassificationTreePerfMeas14[5,"RB_FP"] = sum(CheckListRBSM$RB_FP)
ClassificationTreePerfMeas14[5,"RB_FN"] = sum(CheckListRBSM$RB_FN)
# Smote model / Together
CheckList = as.data.frame(cbind(DtestTogetherNS$Drafted, predict(ClassTreeTogetherSM, DtestTogetherNS)))
CheckListTogetherSM = CheckList %>%
mutate(Drafted=V1) %>%
select(-V1) %>%
mutate(Together_Pred=ifelse(CheckList[,3]>0.5, 1,0)) %>%
mutate(Together_TP=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_TN=ifelse(Drafted==Together_Pred,ifelse(Together_Pred==0,1,0),0)) %>%
mutate(Together_FP=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==1,1,0),0)) %>%
mutate(Together_FN=ifelse(Drafted!=Together_Pred,ifelse(Together_Pred==0,1,0),0))
# Fill the Performance Measurement Matrix
ClassificationTreePerfMeas14[5,"Together_TP"] = sum(CheckListTogetherSM$Together_TP)
ClassificationTreePerfMeas14[5,"Together_TN"] = sum(CheckListTogetherSM$Together_TN)
ClassificationTreePerfMeas14[5,"Together_FP"] = sum(CheckListTogetherSM$Together_FP)
ClassificationTreePerfMeas14[5,"Together_FN"] = sum(CheckListTogetherSM$Together_FN)
save(ClassificationTreePerfMeas14, file = "../Data/PerformanceMeasurement/ClassificationTreePerfMeas14.Rdata")
|
79369b3d1683104f16f1e5b1446034ca7810bdf9 | 1e820fe644a039a60bfbee354e50c775af675f6b | /Sampling/sampling_from_50_straws.R | de77fcaed4e7f1d1a47343f6e8b820136ef82f63 | [] | no_license | PyRPy/stats_r | a334a58fca0e335b9b8b30720f91919b7b43d7bc | 26a3f47977773044d39f6d8ad0ac8dafb01cce3f | refs/heads/master | 2023-08-17T00:07:38.819861 | 2023-08-16T14:27:16 | 2023-08-16T14:27:16 | 171,056,838 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,834 | r | sampling_from_50_straws.R |
# Sampling from 50 Straws -------------------------------------------------
# This sampling method is for 'draw 6 x yao' to determine a 'gua' use
# 'yin' or 'yang' to represent changes;
# 'yin' in yiching is like 0 while 'yang' is 1, so the gua is like a
# six digits like 011011
# The sampling method is farily a 'complicated' process, comprising 3 steps
draw_yao <- function(straw_start) {
straw_half_left <- sample(straw_start,
floor(length(straw_start)/2.0) + sample(1:4, 1))
straw_half_right <- setdiff(straw_start, straw_half_left)
straw_half_left_lessone <- sample(straw_half_left, length(straw_half_left)-1)
group_four = 4
draw_remainder = length(straw_half_left_lessone) %% group_four
if (draw_remainder != 0 ) {
straw_half_left_lessone2 <- sample(straw_half_left_lessone, length(straw_half_left_lessone) - draw_remainder)
} else {
straw_half_left_lessone2 <- sample(straw_half_left_lessone, length(straw_half_left_lessone) - group_four)
}
draw_remainder = length(straw_half_right) %% group_four
if (draw_remainder != 0 ) {
straw_half_right2 <- sample(straw_half_right, length(straw_half_right) - draw_remainder)
} else {
straw_half_right2 <- sample(straw_half_right, length(straw_half_right) - group_four)
}
return (union(straw_half_left_lessone2, straw_half_right2))
}
yao_number <- function(straw_start1) {
group_four = 4
yao1 = draw_yao(straw_start1)
yao2 = draw_yao(yao1)
yao3 = draw_yao(yao2)
return (length(yao3) / group_four)
}
gua <- c()
for (i in (1:6)) {
straw_ID <- c(1:50)
size_temp <- length(straw_ID)-1
straw_start1 <- sample(straw_ID, 49)
yao_temp <- yao_number(straw_start1)
gua <- c(gua, yao_temp)
cat("yao", i, " ", yao_temp, "\n")
}
gua
|
b862b68b4f5e6ac5e1d90e05917859a611720dea | f9c0a474528efd3f9afb1854a577f7bd7986bae8 | /long_lat_locations.R | d6e313d5ecde87a8f7399561904c85b0cfbc375c | [] | no_license | billzkhan/Dangerous_driving_behavior_AV | 4443858d0a43bf4f245b1ee587b2ef26a0a26ded | e90a8a9511fc2b254cb35c37f9da5881f26fcab1 | refs/heads/main | 2023-05-28T14:07:41.956360 | 2021-06-09T00:38:43 | 2021-06-09T00:38:43 | 375,180,194 | 0 | 0 | null | null | null | null | UHC | R | false | false | 839 | r | long_lat_locations.R | # ggmap 2.7์ ์ค์นํฉ๋๋ค. (์์ง CRAN์ ๋ฑ๋ก๋์ด ์์ง ์์ต๋๋ค.)
devtools::install_github('dkahle/ggmap')
# ํ์ ํจํค์ง๋ฅผ ๋ถ๋ฌ์ต๋๋ค.
library(ggmap)
register_google(key = 'AIzaSyCRNk5UxmpemxrqUxQKymycSSVBT5CpYsU')
get_map(location = '์ธ์ข
',
zoom = 14,
maptype = 'roadmap',
source = 'google') %>%
ggmap()
#install.packages("googleway")
library(googleway)
x<-google_reverse_geocode(location = c(36.49663,127.2573), result_type = c('administrative_area_level_5'), location_type = "rooftop",
key = "AIzaSyCRNk5UxmpemxrqUxQKymycSSVBT5CpYsU", language = "ko")
x
y<- x$results$address_components
z<- x$results$formatted_address
z
xx<-google_snapToRoads(data, lat = 'lat', lon = 'long', key = "AIzaSyCRNk5UxmpemxrqUxQKymycSSVBT5CpYsU")
xx
|
74a3f3af96a6b4a944173649982f660315ee3e80 | 844439538e4c7ddf130b791d348618374f2c2ea3 | /man/collect.Rd | 9084ce78f01b35c7fc0fdeb2276693d700ca4d80 | [
"Apache-2.0"
] | permissive | cran/narray | bdb2011851021a35a638e7d9ee97dddb92843976 | 2581863eb61fb37a6728bca79a72418f932ec731 | refs/heads/master | 2023-03-21T05:58:32.962838 | 2022-10-02T12:40:02 | 2022-10-02T12:40:02 | 75,849,917 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 424 | rd | collect.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/collect.r
\name{collect}
\alias{collect}
\title{Converts a logical matrix to a list of character vectors}
\usage{
collect(x, along = 2)
}
\arguments{
\item{x}{A logical matrix}
\item{along}{Which axis to spread mask on}
}
\value{
A character vector or list thereof
}
\description{
This currently only supports x with only one non-zero element
}
|
c4b7475799acaa9c44edc8b3826331a373399e44 | 40ec7ab6d73f71688c4ef417f0d4850049a8af1d | /Monte_Carlo_Simulation.R | cc05a3765148ec37fc85347fa26a4be7994a537f | [] | no_license | gsatone/data-analytics | 28dfe94421e3ce882c38e10c660f63fcfbf127e0 | f4e1e0d3094d8c644900812a81eaeda6fdf48c53 | refs/heads/master | 2022-12-21T00:34:53.406468 | 2020-04-02T15:52:40 | 2020-04-02T15:52:40 | 242,031,344 | 0 | 0 | null | 2022-12-16T10:02:33 | 2020-02-21T01:48:39 | Java | UTF-8 | R | false | false | 2,965 | r | Monte_Carlo_Simulation.R | #Monte Carlo simulation
p <- 0.45 # unknown p to estimate
N <- 1000
# simulate one poll of size N and determine x_hat
x <- sample(c(0,1), size = N, replace = TRUE, prob = c(1-p, p))
x_hat <- mean(x)
# simulate B polls of size N and determine average x_hat
B <- 10000 # number of replicates
N <- 1000 # sample size per replicate
x_hat <- replicate(B, {
x <- sample(c(0,1), size = N, replace = TRUE, prob = c(1-p, p))
mean(x)
})
library(tidyverse)
library(gridExtra)
p1 <- data.frame(x_hat = x_hat) %>%
ggplot(aes(x_hat)) +
geom_histogram(binwidth = 0.005, color = "black")
p2 <- data.frame(x_hat = x_hat) %>%
ggplot(aes(sample = x_hat)) +
stat_qq(dparams = list(mean = mean(x_hat), sd = sd(x_hat))) +
geom_abline() +
ylab("X_hat") +
xlab("Theoretical normal")
grid.arrange(p1, p2, nrow=1)
library(tidyverse)
N <- 100000
p <- seq(0.35, 0.65, length = 100)
SE <- sapply(p, function(x) 2*sqrt(x*(1-x)/N))
data.frame(p = p, SE = SE) %>%
ggplot(aes(p, SE)) +
geom_line()
take_sample <- function(p , N){
x <- sample(c(0,1), size = N, replace = TRUE, prob = c(1-p, p))
return(mean(x))
}
set.seed(1)
# Define `p` as the proportion of Democrats in the population being polled
p <- 0.45
# Define `N` as the number of people polled
N <- 100
#take_sample(p , N)
# Calculate the standard error
SE <- sqrt((p * (1 - p)/N))
SE
errors <- replicate(10000, {p - take_sample(p , N)})
# Calculate the standard deviation of `errors`
print(sqrt(mean(errors^2)))
#mean(errors)
#hist(errors)
#abs(errors)
#mean(abs(errors))
#hist(abs(errors))
set.seed(1)
# Define `p` as the proportion of Democrats in the population being polled
p <- 0.45
# Define `N` as the number of people polled
N <- 100
x <- sample(c(0,1), size = N, replace = TRUE, prob = c(0,1))
X_bar <- mean(x)
X_bar
SE <- sqrt((X_bar * (1 - X_bar))/N)
SE
N <- seq(100, 5000, len = 100)
p <- 0.5
se <- sqrt(p*(1-p)/N)
se
# Define `p` as the proportion of Democrats in the population being polled
p <- 0.45
# Define `N` as the number of people polled
N <- 100
# The variable `B` specifies the number of times we want the sample to be replicated
B <- 10000
# Use the `set.seed` function to make sure your answer matches the expected result after random sampling
set.seed(1)
# Define `p` as the proportion of Democrats in the population being polled
p <- 0.45
# Define `N` as the number of people polled
N <- 100
# Calculate the probability that the estimated proportion of Democrats in the population is greater than 0.5. Print this value to the console.
1 - pnorm(0.5, mean = p, sd = sqrt(p*(1-p)/N))
# Define `N` as the number of people polled
N <-100
# Define `X_hat` as the sample average
X_hat <- 0.51
# Define `se_hat` as the standard error of the sample average
se_hat <- sqrt(X_hat*(1-X_hat)/N)
# Calculate the probability that the error is 0.01 or larger
1 - pnorm(0.01, 0, se_hat) + pnorm(-0.01, 0, se_hat)
|
0ecfa5d3a463816c527d258d593c12cc58d1fa03 | 42deb94948bd04300274d517aa8e3033c8dd3df2 | /R/checkGMU.R | 04ae1d5ada7dcb2b9adca6d29289252dae7a0fda | [] | no_license | cran/pedometrics | fe0940a0af6ceb02f9910d955ce6af72da94a5dc | 7329722f174c88e86620b21a81c1affc0e42b93f | refs/heads/master | 2022-07-03T13:49:28.737562 | 2022-06-19T05:10:02 | 2022-06-19T05:10:02 | 22,748,555 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,213 | r | checkGMU.R | #' Evaluation of geostatistical models of uncertainty
#'
#' @description
#' Evaluate the local quality of a geostatistical model of uncertainty (GMU) using summary measures
#' and graphical displays.
#'
#' @param observed Vector of observed values at the validation points. See \sQuote{Details} for more
#' information.
#'
#' @param simulated Data frame or matrix with simulated values (columns) for each validation point
#' (rows). See \sQuote{Details} for more information.
#'
#' @param pi Vector defining the width of the series of probability intervals. Defaults to
#' `pi = seq(0.01, 0.99, 0.01)`. See \sQuote{Details} for more information.
#'
#' @param symmetric Logical for choosing the type of probability interval. Defaults to
#' `symmetric = TRUE`. See \sQuote{Details} for more information.
#'
#' @param plotit Logical for plotting the results. Defaults to `plotit = TRUE`.
#'
#' @details
#' There is no standard way of evaluating the local quality of a GMU. The collection of summary
#' measures and graphical displays presented here is far from being comprehensive. A few definitions
#' are given bellow.
#'
#' \subsection{Error statistics}{
#' Error statistics measure how well the GMU predicts the measured values at the validation points.
#' Four error statistics are presented:
#'
#' \describe{
#' \item{Mean error (ME)}{
#' Measures the bias of the predictions of the GMU, being defined as the mean of the differences
#' between the average of the simulated values and the observed values, i.e. the average of all
#' simulations is taken as the predicted value.
#' }
#' \item{Mean squared error (MSE)}{
#' Measures the accuracy of the predictions of the GMU, being defined as the mean of the squared
#' differences between the average of the simulated values and the observed values.
#' }
#' \item{Scaled root mean squared error (SRMSE)}{
#' Measures how well the GMU estimate of the prediction error variance (PEV) approximates the
#' observed prediction error variance, where the first is given by the variance of the simulated
#' values, while the second is given by the squared differences between the average of the simulated
#' values, i.e. the squared error (SE). The SRMSE is computed as the average of SE / PEV, where
#' SRMSE > 1 indicates underestimation, while SRMSE < 1 indicates overestimation.
#' }
#' \item{Pearson correlation coefficient}{
#' Measures how close the GMU predictions are to the observed values. A scatter plot of the observed
#' values versus the average of the simulated values can be used to check for possible unwanted
#' outliers and non-linearities. The square of the Pearson correlation coefficient measures the
#' fraction of the overall spread of observed values that is explained by the GMU, that is, the
#' amount of variance explained (AVE), also known as coefficient of determination or ratio of
#' scatter.
#' }
#' }
#' }
#' \subsection{Coverage probabilities}{
#' The coverage probability of an interval is given by the number of times that that interval
#' contains its parameter over several replications of an experiment. For example, consider the
#' interquartile range \eqn{IQR = Q3 - Q1} of a Gaussian distributed variable with mean equal to
#' zero and variance equal to one. The nominal coverage probability of the IQR is 0.5, i.e. two
#' quarters of the data fall within the IQR. Suppose we generate a Gaussian distributed
#' _random_ variable with the same mean and variance and count the number of values that fall within
#' the IQR defined above: about 0.5 of its values will fall within the IQR. If we continue
#' generating Gaussian distributed _random_ variables with the same mean and variance, on average,
#' 0.5 of the values will fall in that interval.
#'
#' Coverage probabilities are very useful to evaluate the local quality of a GMU: the closer the
#' observed coverage probabilities of a sequence of probability intervals (PI) are to the nominal
#' coverage probabilities of those PIs, the better the modeling of the local uncertainty.
#'
#' Two types of PIs can be used here: symmetric, median-centered PIs, and left-bounded PIs. Papritz
#' & Dubois (1999) recommend using left-bounded PIs because they are better at evidencing deviations
#' for both large and small PIs. The authors also point that the coverage probabilities of the
#' symmetric, median-centered PIs can be read from the coverage probability plots produced using
#' left-bounded PIs.
#'
#' In both cases, the PIs are computed at each validation location using the quantiles of the
#' conditional cumulative distribution function (ccdf) defined by the set of realizations at that
#' validation location. For a sequence of PIs of increasing width, we check which of them contains
#' the observed value at all validation locations. We then average the results over all validation
#' locations to compute the proportion of PIs (with the same width) that contains the observed
#' value: this gives the coverage probability of the PIs.
#'
#' Deutsch (1997) proposed three summary measures of the coverage probabilities to assess the local
#' _goodness_ of a GMU: accuracy ($A$), precision ($P$), and goodness ($G$). According to Deutsch
#' (1997), a GMU can be considered \dQuote{good} if it is both accurate and precise. Although easy
#' to compute, these measures seem not to have been explored by many geostatisticians, except for
#' the studies developed by Pierre Goovaerts and his later software implementation (Goovaerts,
#' 2009). Richmond (2001) suggests that they should not be used as the only measures of the local
#' quality of a GMU.
#'
#' \describe{
#' \item{Accuracy}{
#' An accurate GMU is that for which the proportion \eqn{p^*} of true values falling within the $p$
#' PI is equal to or larger than the nominal probability $p$, that is, when \eqn{p^* \geq p}. In the
#' coverage probability plot, a GMU will be more accurate when all points are on or above the 1:1
#' line. The range of $A$ goes from 0 (lest accurate) to 1 (most accurate).
#' }
#' \item{Precision}{
#' The _precision_, $P$, is defined only for an accurate GMU, and measures how close \eqn{p^*} is to
#' $p$. The range of $P$ goes from 0 (lest precise) to 1 (most precise). Thus, a GMU will be more
#' accurate when all points in the PI-width plot are on or above the 1:1 line.
#' }
#' \item{Goodness}{
#' The _goodness_, $G$, is a measure of the departure of the points from the 1:1 line in the
#' coverage probability plot. $G$ ranges from 0 (minimum goodness) to 1 (maximum goodness), the
#' maximum $G$ being achieved when \eqn{p^* = p}, that is, all points in both coverage probability
#' and interval width plots are exactly on the 1:1 line.
#' }
#' }
#' It is worth noting that the coverage probability and PI-width plots are relevant mainly to GMU
#' created using _conditional simulations_, that is, simulations that are locally conditioned to the
#' data observed at the validation locations. Conditioning the simulations locally serves the
#' purposes of honoring the available data and reducing the variance of the output realizations.
#' This is why one would like to find the points falling above the 1:1 line in both coverage
#' probability and PI-width plots. For _unconditional simulations_, that is, simulations that are
#' only globally conditioned to the histogram (and variogram) of the data observed at the validation
#' locations, one would expect to find that, over a large number of simulations, the whole set of
#' possible values (i.e. the global histogram) can be generated at any node of the simulation grid.
#' In other words, it is expected to find all points on the 1:1 line in both coverage probability
#' and PI-width plots. Deviations from the 1:1 line could then be used as evidence of problems in
#' the simulation.
#' }
#'
#' @return
#' A `list` of summary measures and plots of the coverage probability and width of probability
#' intervals.
#'
#' @references
#' Deutsch, C. Direct assessment of local accuracy and precision. Baafi, E. Y. & Schofield, N. A.
#' (Eds.) _Geostatistics Wollongong '96_. Dordrecht: Kinwer Academic Publishers, v. I, p. 115-125,
#' 1997.
#'
#' Papritz, A. & Dubois, J. R. Mapping heavy metals in soil by (non-)linear kriging: an empirical
#' validation. Gรณmez-Hernรกndez, J.; Soares, A. & Froidevaux, R. (Eds.) _geoENV II -- Geostatistics
#' for Environmental Applications_. Springer, p. 429-440, 1999.
#'
#' Goovaerts, P. Geostatistical modelling of uncertainty in soil science. _Geoderma_. v. 103, p.
#' 3 - 26, 2001.
#'
#' Goovaerts, P. AUTO-IK: a 2D indicator kriging program for the automated non-parametric modeling
#' of local uncertainty in earth sciences. _Computers & Geosciences_. v. 35, p. 1255-1270, 2009.
#'
#' Richmond, A. J. Maximum profitability with minimum risk and effort. Xie, H.; Wang, Y. & Jiang, Y.
#' (Eds.) _Proceedings 29th APCOM_. Lisse: A. A. Balkema, p. 45-50, 2001.
#'
#' Ripley, B. D. _Stochastic simulation_. New York: John Wiley & Sons, p. 237, 1987.
#'
#' @note Comments by Pierre Goovaerts \email{pierre.goovaerts@@biomedware.com} were important to
#' describe how to use the coverage probability and PI-width plots when a GMU is created using
#' unconditional simulations.
#'
#' @author Alessandro Samuel-Rosa \email{alessandrosamuelrosa@@gmail.com}
#'
#' @examples
#' if (interactive()) {
#' set.seed(2001)
#' observed <- round(rnorm(100), 3)
#' simulated <- t(
#' sapply(1:length(observed), function (i) round(rnorm(100), 3)))
#' resa <- checkGMU(observed, simulated, symmetric = T)
#' resb <- checkGMU(observed, simulated, symmetric = F)
#' resa$error; resb$error
#' resa$goodness; resb$goodness
#' }
# FUNCTION #########################################################################################
#' @export
checkGMU <-
function(observed, simulated, pi = seq(0.01, 0.99, 0.01), symmetric = TRUE, plotit = TRUE) {
# Initial settings
n_pts <- length(observed)
n_pis <- length(pi)
# If required, compute the symmetric probability intervals
if (symmetric) {
pi_bounds <- sapply(seq_along(pi), function(i) c(1 - pi[i], 1 + pi[i]) / 2)
message("Processing ", n_pis, " symmetric probability intervals...")
} else {
message("Processing ", n_pis, " probability intervals...")
}
# Do true values fall into each of the (symmetric) probability intervals?
fall <- matrix(nrow = n_pts, ncol = n_pis)
width <- matrix(nrow = n_pts, ncol = n_pis)
g_fall <- matrix(nrow = n_pts, ncol = n_pis)
g_width <- matrix(nrow = n_pts, ncol = n_pis)
if (symmetric) { # Deutsch (1997)
for (i in 1:n_pts) {
x <- simulated[i, ]
y <- observed[i]
for (j in 1:n_pis) {
# Local
bounds <- stats::quantile(x = x, probs = pi_bounds[, j])
fall[i, j] <- bounds[1] < y & y <= bounds[2]
width[i, j] <- as.numeric(bounds[2] - bounds[1])
# Global
g_bounds <- stats::quantile(x = observed, probs = pi_bounds[, j])
g_fall[i, j] <- g_bounds[1] < y & y <= g_bounds[2]
g_width[i, j] <- as.numeric(g_bounds[2] - g_bounds[1])
}
}
} else { # Papritz & Dubois (1999)
for (i in 1:n_pts) {
x <- simulated[i, ]
y <- observed[i]
lower <- min(x)
g_lower <- min(observed)
for (j in 1:n_pis) {
# Local
upper <- stats::quantile(x = x, probs = pi[j])
fall[i, j] <- y <= upper
width[i, j] <- as.numeric(upper - lower)
# Global
g_upper <- stats::quantile(x = observed, probs = pi[j])
g_fall[i, j] <- y <= g_upper
g_width[i, j] <- as.numeric(g_upper - g_lower)
}
}
}
# Compute the proportion of true values that fall into each of the (symmetric) probability
# intervals
count <- apply(fall, 2, sum)
prop <- count / n_pts
g_count <- apply(g_fall, 2, sum)
# g_prop <- g_count / n_pts
# Compute the average width of the (symmetric) probability intervals into
# each the true values fall
width <- width * fall
width <- apply(width, 2, sum) / count
g_width <- g_width * g_fall
g_width <- apply(g_width, 2, sum) / g_count
# Compute summary statistics
accu <- prop >= pi
pi_idx <- which(accu)
accu <- sum(prop >= pi) / n_pis # accuracy
prec <- 1 - 2 * sum(prop[pi_idx] - pi[pi_idx]) / n_pis # precision
pi_w <- ifelse(1:n_pis %in% pi_idx, 1, 2)
good <- 1 - (sum(pi_w * abs(prop - pi)) / n_pis) # goodness
pred <- apply(simulated, 1, mean) # predicted value
pred_var <- apply(simulated, 1, stats::var) # prediction variance
err <- pred - observed # error
me <- mean(err) # mean error
serr <- err ^ 2 # squared error
mse <- mean(serr) # mean squared error
srmse <- mean(serr / pred_var) # scaled root mean squared error
corr <- stats::cor(pred, observed) # linear correlation
error_stats <- data.frame(me = me, mse = mse, srmse = srmse, cor = corr)
good_meas <- data.frame(A = accu, P = prec, G = good, symmetric = symmetric)
if (plotit) {
on.exit(graphics::par())
graphics::par(mfrow = c(2, 2))
cex <- ifelse(n_pts > 10, 0.5, 1)
# Coverage probability plot
graphics::plot(
0:1, 0:1, type = "n", main = "Coverage probability",
xlab = "Probability interval", ylab = "Proportion")
graphics::abline(a = 0, b = 1)
graphics::points(x = pi, y = prop, cex = cex)
if (symmetric) {
graphics::text(x = 1, y = 0, labels = "Symmetric PIs", pos = 2)
}
# PI-width plot
lim <- range(c(width, g_width), na.rm = TRUE)
graphics::plot(
x = width, y = g_width, ylim = lim, xlab = "Local",
ylab = "Global", cex = cex, xlim = lim, main = "PI width")
graphics::abline(a = 0, b = 1)
if (symmetric) {
graphics::text(x = lim[2], y = lim[1], labels = "Symmetric PIs", pos = 2)
}
# Plot observed vs simulated values
lim <- range(c(observed, pred))
graphics::plot(
x = observed, pred, main = "Observed vs Simulated", xlab = "Observed",
ylim = lim, xlim = lim, ylab = "Simulated (average)", cex = cex)
graphics::abline(a = 0, b = 1)
# Plot box plots
idx <- 1:n_pts
idx <- idx[order(rank(observed))]
if (n_pts > 100) {
sub_idx <- round(seq(1, n_pts, length.out = 100))
graphics::boxplot(
t(simulated[idx[sub_idx], ]), col = "yellow", pars = list(cex = cex),
names = idx[sub_idx])
graphics::points(observed[idx[sub_idx]], col = "red", pch = 17, cex = cex)
xlab <- "Validation point (max of 100)"
} else {
graphics::boxplot(
t(simulated[idx, ]), col = "yellow", pars = list(cex = cex),
names = idx, xlab = "Validation point")
graphics::points(observed[idx], col = "red", pch = 17, cex = cex)
xlab <- "Validation point"
}
graphics::title(main = "Distribution of values", xlab = xlab, ylab = "Distribution")
}
# Output
res <- list(data = data.frame(pi = pi, prop = prop, width = width),
error = error_stats, goodness = good_meas)
return(res)
}
|
2ad4eff1b315363184e6cde925a72dc800d7cbfc | de9d448132f90f073d29add688de2fcf72527a89 | /man/prepSeurat.Rd | ec177e0b878750ffef497ae1672c5390ae3a0282 | [
"MIT"
] | permissive | NMikolajewicz/scMiko | 33f137e9e3a6318fb0386506ac4666a3822463f0 | bd00724889db265817fc54d0d50b14647d32438d | refs/heads/master | 2023-06-09T05:51:30.199131 | 2023-06-04T20:23:51 | 2023-06-04T20:23:51 | 249,496,034 | 20 | 4 | null | null | null | null | UTF-8 | R | false | true | 394 | rd | prepSeurat.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preparation_functions.R
\name{prepSeurat}
\alias{prepSeurat}
\title{prep Seurat}
\usage{
prepSeurat(object)
}
\arguments{
\item{object}{Seurat objects}
}
\value{
Seurat object
}
\description{
Preprocess using fixBarcodeLabel(), UpdateSeuratObject() and updateDimNames() Functions.
}
\author{
Nicholas Mikolajewicz
}
|
799f82dda917186a959758a453525338820c8790 | 834d59eb3b518dd33732b92d99d4767b868cbe20 | /src/download_file.R | 22ece6244ce709876151ae31c7ffff22be7c5edb | [
"MIT"
] | permissive | UBC-MDS/DSCI522_Group413_WhatHappensInVegas | 2937624020d8788ba54e0f4525188e8b096a9ae4 | f12f231eeed3462d69c7819be4b48180dd54891c | refs/heads/master | 2020-12-14T05:33:02.647061 | 2020-02-08T21:52:08 | 2020-02-08T21:52:08 | 234,657,738 | 1 | 4 | MIT | 2020-02-08T21:52:09 | 2020-01-18T00:07:32 | R | UTF-8 | R | false | false | 607 | r | download_file.R | # authors: Arun, Bronwyn, Manish
# date: 2020-01-17
"The script downloads a file from specified url to specified location on location machine.
Usage: src/download_file.R <file_source> <destination_file>
Options:
<file_source> Takes in a link to the data (this is a required positional argument)
<destination_file> Takes in a file path (this is a required option)
" -> doc
library(tidyverse)
library(docopt)
opt <- docopt(doc)
print(opt)
main <- function(file_source, destination_file){
download.file(file_source, destination_file)
}
main(opt$file_source, opt$destination_file) |
1334ad9f5fb4c52de9cf544357b82c5842d32889 | e7fcc7ba4ed4ecc842c13a09e44c32cdf88cff46 | /lol.R | 4fa4e5c0c32cc6fd8af884c5b63fd394e1487fd1 | [] | no_license | josephcoz/lol | 58177ad6dadfbec6a48c7f19f849d6c31a11f1d6 | b738517cf4683ef2552b2ce4fd31f84b4cbd3f12 | refs/heads/master | 2020-06-28T03:12:06.701986 | 2019-08-01T22:58:54 | 2019-08-01T22:58:54 | 200,129,140 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,870 | r | lol.R | # attach relevant libraries
library(httr)
library(jsonlite)
get1<-GET("https://s3-us-west-1.amazonaws.com/riot-developer-portal/seed-data/matches10.json")
alljson1<-fromJSON(content(get1, "text", encoding = "UTF-8"))
# get 100 chunks
lol <- data.frame(win=integer(),
kills=integer(),
deaths=integer(),
assists=integer(),
goldEarned=double(),
longestTimeSpentLiving=double(),
largestMultiKill=integer())
for (i in 1:100) {
this.row.i<-data.frame(win=ifelse(alljson1$matches$participants[[i]]$stats$win==TRUE,1,0),
kills=alljson1$matches$participants[[i]]$stats$kills,
deaths=alljson1$matches$participants[[i]]$stats$deaths,
assists=alljson1$matches$participants[[i]]$stats$assists,
goldEarned=alljson1$matches$participants[[i]]$stats$goldEarned,
longestTimeSpentLiving=alljson1$matches$participants[[i]]$stats$longestTimeSpentLiving,
largestMultiKill=alljson1$matches$participants[[i]]$stats$largestMultiKill)
# add condition: lol gets first iteration, binds to following iterations
if (i == 1) {
lol <- this.row.i
} else {
lol <- rbind(lol, this.row.i)
}
}
# ensure data read in nicely
str(lol)
tail(lol)
#create boxplot of total 'skill score' vs wins
#boxplot(kills + deaths + assists + goldEarned + longestTimeSpentLiving + largestMultiKill ~ win, data = lol,
# xlab = 'Win or no win', ylab = 'Skill score', main = 'League of Legends player skill score vs. Wins')
# whoops, I was supposed to create separate boxplots...now I understand
boxplot(kills ~ win, data = lol, main = 'Kills vs Wins')
boxplot(deaths ~ win, data = lol, main = 'Deaths vs Wins')
boxplot(assists ~ win, data = lol, main = 'Assists vs Wins')
boxplot(goldEarned ~ win, data = lol, main = 'Gold Earned vs Wins')
boxplot(longestTimeSpentLiving ~ win, data = lol, main = 'Longest Time Spent Living vs Wins')
boxplot(largestMultiKill ~ win, data = lol, main = 'Largest Multikill vs Wins')
# remove obs 175, 322, 374, 526, 792 and any obs with Kills >=20 and Deaths >= 15 (SHOULD BE 989 OBS)
lol <- subset(lol, kills < 20)
#str(lol)
lol <- subset(lol, deaths < 15)
#str(lol)
lol <- lol[-c(175, 322, 374, 526, 792),]
str(lol)
# response variable win = 1, loss = 0
# explanatory variables:
# offense components: kills, gold earned
# 'error' components: deaths
# team play components: assists
# risk/reward components: longest time spent living
# 'hot hand' components: largest multikill
# create train and test datasets
set.seed(58)
train_ind <- sample(989, 700)
lol_train <- lol[train_ind,]
lol_test <- lol[-train_ind,]
# confirm train and test are similar
summary(lol_train)
summary(lol_test)
# Analysis
# FIT MODEL
# log( P(Win) / P(Loss))
# = beta0 + beta1 * kills + beta2 * deaths + beta3 * assists +
# beta4 * goldEarned + beta5 * longestTimesSpentLiving + beta6 * largestMultiKill
lol_out <- glm(win ~ kills + deaths + assists + goldEarned + longestTimeSpentLiving + largestMultiKill,
data = lol_train,
family = "binomial")
summary(lol_out)
#for some reason my nummbers are different even though I set the same seed..?
#Coeffiecients are close, though
#For each additional kill, *holding all else constant*, we estimate an
# increase of 0.06 in the log odds ratio of winning
#change in odds interpretation
#exponentiating y-intercept coefficient has no interpretation
exp(coef(lol_out)[-1])
#the '-1' excludes the y-intercept
#***************************************************************
#Remember that with exponentiating transformations, the mu = 1.
#Therefore increasing coefficients are greater than 1, and
# and decreasing coefficients are less than 1
#***************************************************************
#Interpretation:
#For each additional kill, *holding all else constant*, we estimate a
# 6% increase in the odds of winning
#For each additional death, *holding all else constant*, we estimate a
# 53% decrease in the odds of winning
#NOW, using the coefficients table from before, we can still infer significance
#There is no statistically significant kills effect on wins (p-value=0.278)
#There is a statistically significant deaths effect on wins (p-value < 0.0001)
#Confidence Intervals
#95% CI on log odds
confint(lol_out)
#95% CI on change in odds
exp(confint(lol_out)[-1,])
#kills CI: (-3%, +17%)
#GRAPHICS of effects
# kills
par(mfrow=c(2,2))
#in terms of log odds
x_star <- seq(0,10,length=100)
plot(x_star, coef(lol_out)[2]*x_star,type="l",
xlab="Kills", ylab="Partial logit(Win)") #partial log odds of win
x_star<- seq(0,15,length=100)
plot(x_star, coef(lol_out)[3]*x_star,type="l",
xlab="Deaths", ylab="Partial logit(Win)")
#par(mfrow=c(1,1))
#add for assists and gold earned
x_star <- seq(0,30,length=100)
plot(x_star, coef(lol_out)[4]*x_star,type="l",
xlab="Assists", ylab="Partial logit(Win)")
x_star <- seq(0,25000,length=100)
plot(x_star, coef(lol_out)[5]*x_star, type="l",
xlab="Gold Earned", ylab="Partial logit(Win)")
#I wonder if you can overlay plots using ggplot..?
#From a probability perspective
#demonstrate effect of kills
#set all other expl. vars to the median
#check medians
summary(lol)
x_star <- data.frame(goldEarned = seq(5000,17500,length=100),
kills=5, deaths=5, assists=7,
longestTimeSpentLiving=600, largestMultiKill=1)
#par(mfrow)
plot(x_star$kills,predict(lol_out,newdata=x_star,type="response"),
type = "l", xlab = "Kills", ylab= "P(Win)", ylim=c(0,1))
#had to change x_star above for this plot
plot(x_star$goldEarned,predict(lol_out,newdata=x_star,type="response"),
type = "l", xlab = "Gold Earned", ylab= "P(Win)", ylim=c(0,1))
#summarize statistical significance of model factors
#use sig. tests or 95% conf intervals
summary(lol_out)
#Test Ho: no effect on winning for an aggressive strategy
# Ho: no kills or largestMultiKill or goldEarned
lol_reduced<- glm(win ~ deaths + assists + longestTimeSpentLiving,
data = lol_train,
family = "binomial")
anova(lol_reduced,lol_out,test='Chisq')
#reject Ho; there is an effect on winning for aggressive strategy
#Predict P(Win) for a player with Faker (very skilled player)-like skills
predict(lol_out,newdata=data.frame(kills=2,goldEarned=15000,deaths=2,assists=8,
longestTimeSpentLiving=600, largestMultiKill=2),
type="response")
#very high probability
#95% conf int on P(Win)
Faker_logit<-predict(lol_out,newdata=data.frame(kills=2,goldEarned=15000,deaths=2,assists=8,
longestTimeSpentLiving=600, largestMultiKill=2),
type="link",se.fit=TRUE)
Faker_logit
# 95% CI on logit(Win)
logit_L <- Faker_logit$fit - 1.96*Faker_logit$se.fit
logit_U <- Faker_logit$fit + 1.96*Faker_logit$se.fit
#transform logit to probability
Faker_phat_L <- exp(logit_L)/(1+exp(logit_L))
Faker_phat_U <- exp(logit_U)/(1+exp(logit_U))
#construct ROC (receiver operator characterisic) curve
library(ROCR)
train_pred<-prediction(predict(lol_out, type="response"), lol_train$win)
train_perf<-performance(train_pred,measure="tpr",x.measure="fpr")
plot(train_perf,xlab="1-specificity",ylab="sensitivity",main="ROC curve")
abline(0,1,col='grey')
test_pred<-prediction(predict(lol_out,newdata=lol_test,type="response"), lol_test$win)
#we expect test data to be worse; closer to 50/50 line on plot
test_perf<-performance(test_pred,measure="tpr",x.measure = "fpr")
plot(test_perf,add=TRUE,col="dodgerblue")
# AUC - area under curve
performance(train_pred,measure="auc")
performance(test_pred,measure="auc")
|
b64804002692af9e2bfdb49aeb1086f082aee08b | 4732ee568329830a646280c2267a1d9e4fd7dba3 | /Ejemplo2.R | e18f607522f9bb283513acfbb1b738d94c2539e4 | [] | no_license | OscarSalaza/BSSESION7 | 6fcf591998eb723f8cc457faaff8dedd9bc28d93 | 2629841076e3131a729720d276e4cee462d2b0ab | refs/heads/main | 2023-02-24T10:23:41.634762 | 2021-01-28T02:04:51 | 2021-01-28T02:04:51 | 333,608,453 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 42 | r | Ejemplo2.R | #otro intento
summary(mtcars)
#funciona
|
819c93a0004787508d45925366c9905632d7bf42 | a349462b9abf56b84eef64eb32c34e2c5b019483 | /tests/testthat/test_data.R | 27f7b67fe440e004d517ae90220e5d88565de02d | [] | no_license | timmcm/dibbler | e040b7afdd0ebea5d2d1eea1b1befa0431c72b06 | 0703c35648271914f008c0d3a23822a01ffe4da0 | refs/heads/master | 2021-01-21T18:22:13.203421 | 2016-07-08T15:57:32 | 2016-07-08T15:57:32 | 65,034,201 | 0 | 0 | null | 2016-08-05T16:43:16 | 2016-08-05T16:43:15 | null | UTF-8 | R | false | false | 289 | r | test_data.R | context("Test input data processing")
## test data ##
test_that("test: data are processed fine", {
## skip on CRAN
skip_on_cran()
rm(list=ls())
## generate data
expect_error(dibbler.data())
## check output shape
## check attributes
## round trip
})
|
0c3826a63e01af5c9cc6fee12fa6d6a66e3fc1f3 | 04ff194cd73de53e0172e1ce59ba8ab755c95783 | /2021-04-21 lecture/ch02.R | 89665f2f32767ae629efd22f07b56a50708baaf2 | [] | no_license | GhyunOh/R-Lecture-2021 | fb7c9e59f2a4f6c93f07e077460e559ba81ebad9 | 6505abe48030f19181c378eff3f3d93621a8f74d | refs/heads/main | 2023-04-19T19:45:13.591122 | 2021-05-18T08:54:37 | 2021-05-18T08:54:37 | 359,694,713 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,175 | r | ch02.R | women
plot(women)
str(cars)
# ๋ ์ค์ ์ ํํ ํ ์๋จ์ ์๋ 'run' ๋ฒํผ์ ๋๋ฅด๋ฉด ํ๊บผ๋ฒ์ ์คํ๋จ
a <-2
b <- a*a
# ์์
๋๋ ํ ๋ฆฌ ์ง์
getwd()
setwd('/workspace/r')
getwd()
library(dplyr)
library(ggplot2)
search()
str(iris)
iris
head(iris) # Default๋ ์์ 6๊ฐ๋ง ๋ณด์ฌ์ค
head(iris, 30)
tail(iris) # Default๋ ํ์ 6๊ฐ๋ง ๋ณด์ฌ์ค์ค
plot(iris)
plot(iris$Petal.Length, iris$Petal.Width, col=iris$Species, pch = 18)
legend(iris$Petal.Length, iris$Petal.Width, legend=c("line 1", "line 2", "line 3"), col=c("black","blue","green"), lty 1:2:3, cex=0.8)
# tips.csv download
tips = read.csv('https://raw.githubusercontent.com/mwaskom/seaborn-data/master/tips.csv')
head(tips)
str(tips)
head(tips)
# ์์ฝ ํต๊ณ
summary(tips)
# ggplot2 ๊ทธ๋ฆผ ๊ทธ๋ ค๋ณด๊ธฐ
tips %>% ggplot(aes(size))+geom_histogram() # ํ์คํ ๊ทธ๋จ
tips %>% ggplot(aes(total_bill, tip))+geom_point() # ์ ์ ๋
tips %>% ggplot(aes(total_bill, tip))+geom_point(aes(col=day))
tips %>% ggplot(aes(total_bill, tip))+geom_point(aes(col=day,pch=sex),size=3)
tips %>% ggplot(aes(total_bill, tip))+geom_point(aes(col=day,pch=time),size=3) |
bae6696d975f1cb531d799099726bc51801dc386 | 3449a99c56cf3120aa02ab22c58edbd3d6286074 | /get_var.R | 9fbfe27b98e059f12af617a70563d03d6b82d0f9 | [] | no_license | martin-vasilev/Bmeta | f124f73e8d2b53ecddb41ef50ecde699b35c84e6 | 39d587df5f816c24510093a44d8d2f100729292c | refs/heads/master | 2021-01-18T02:20:32.482029 | 2016-08-02T10:53:23 | 2016-08-02T10:53:23 | 58,971,665 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 64 | r | get_var.R | get_var<- function(s1, s2, n){
return((s1^2+s2^2)/n)
} |
47f507d5b41e266b82ca8e3c275e6d4fa2c7b790 | b25541ff0ed8d5a85b52137e16795c38306ca534 | /Project Analisa Klasifikasi Pinjaman untuk Sektor UMKM.R | 149610c12238402972bad1d6c321e24afc73c163 | [
"MIT"
] | permissive | mulkiah/Data-Science-with-R | a23a0097d0250780dab01f8ad60b2f0e067ce8bc | 3429fc7969d13b6d0a575a28cbd8d9e2601b6925 | refs/heads/main | 2023-08-18T11:44:29.993627 | 2021-10-20T01:25:06 | 2021-10-20T01:25:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,573 | r | Project Analisa Klasifikasi Pinjaman untuk Sektor UMKM.R | # Membaca Data External
data = read.csv("https://storage.googleapis.com/dqlab-dataset/project.csv")
# Inspeksi data
# Enam baris teratas data
head(data)
# Tampilkan tipe data setiap kolomnya
str(data)
# Statistik Dekriptif data
summary(data)
# Menghapus kolom X dan nama nasabah
data_reduce = data[-c(1,2)]
colnames(data_reduce)
# Pemilihan data kategori
data_kategorik = data_reduce[,c("KONDISI_USAHA","KONDISI_JAMINAN","REKOMENDASI_TINDAK_LANJUT")]
data_reduce$REKOMENDASI_TINDAK_LANJUT = as.factor(data_reduce$REKOMENDASI_TINDAK_LANJUT)
chisq.test(data_kategorik$KONDISI_USAHA,data_kategorik$REKOMENDASI_TINDAK_LANJUT)
chisq.test(data_kategorik$KONDISI_JAMINAN,data_kategorik$REKOMENDASI_TINDAK_LANJUT)
# Korelasi antar variabel data
library(corrplot)
library(ggcorrplot)
M = data_reduce[,8:11]
par(mfrow=c(2,2))
corrplot(cor(M),type = "upper",order="hclust")
corrplot(cor(M), method="square",type ="upper")
corrplot(cor(M), method="number", type = "lower")
corrplot(cor(M), method ="ellipse")
par(mfrow=c(2,2))
corrplot(cor(M,method="kendall"), type = "upper", order="hclust")
corrplot(cor(M,method="kendall"), method="square", type="upper")
corrplot(cor(M,method="kendall"), method="number", type="lower")
corrplot(cor(M,method="kendall"), method = "ellipse")
corr = round(cor(M),1)
ggcorrplot(round(cor(M),1),
hc.order = TRUE,
type="lower",
lab=TRUE,
lab_size=3,
method="circle",
colors=c("tomato2","white","springgreen3"),
title="Correlogram of Data Nasabah",
ggtheme=theme_bw)
# Pemilihan fitur/independent variabel/input
colnames(data_reduce)
data_select = data_reduce[,c("KARAKTER","KONDISI_USAHA","KONDISI_JAMINAN","STATUS","KEWAJIBAN","OSL","KOLEKTIBILITAS","REKOMENDASI_TINDAK_LANJUT")]
# Transformasi Data
data_non_na = na.omit(data_select)
data_select_new = data_select
data_select_new$KEWAJIBAN = scale(data_select_new$KEWAJIBAN)[,1]
data_select_new$OSL = scale(data_select_new$OSL)[,1]
data_select_new$KEWAJIBAN = cut(data_select_new$KEWAJIBAN,breaks=c(-0.354107,5,15,30))
data_select_new$KEWAJIBAN = as.factor(data_select_new$KEWAJIBAN)
data_select_new$OSL = cut(data_select_new$OSL,breaks=c(-0.60383,3,10,15))
data_select_new$OSL = as.factor(data_select_new$OSL)
data_select_new = na.omit(data_select_new)
# Training Data
library(caret)
index = createDataPartition(data_select_new$REKOMENDASI_TINDAK_LANJUT,p=.95, list = FALSE)
train = data_select_new[index,]
test = data_select_new[-index,]
# Pemodelan/Modelling
train2 = train
train2$REKOMENDASI_TINDAK_LANJUT = relevel(train2$REKOMENDASI_TINDAK_LANJUT,ref = "Angsuran Biasa")
require(nnet)
multinom_model = multinom(REKOMENDASI_TINDAK_LANJUT ~ ., data=train2)
summary (multinom_model)
exp (coef(multinom_model))
head(round(fitted(multinom_model),2))
train2$ClassPredicted = predict(multinom_model,newdata = train2, "class")
train_prob = predict(multinom_model,newdata = train2, "probs")
df = train_prob
df$max = apply(df,1,max)
train2$score = df$max
test_prob = predict(multinom_model, newdata = test, "probs")
df2 = test_prob
df2$max=apply(df2,1,max)
tab_train = table(train2$REKOMENDASI_TINDAK_LANJUT,train2$ClassPredicted)
round((sum(diag(tab_train))/sum(tab_train))*100,4)
test$ClassPredicted = predict(multinom_model,newdata = test,"class")
test$score = df2$max
tab_test=table(test$REKOMENDASI_TINDAK_LANJUT, test$ClassPredicted)
round((sum(diag(tab_test))/sum(tab_test))*100,4)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.