blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
โ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
โ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
โ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ebbf01f7180d1514778ba5f8009bb167504d1fde
|
4201e9b754760dc35fc0aeef9df5a8b9d801c47f
|
/bin/R-3.5.1/src/library/methods/man/methods-deprecated.Rd
|
6c7ae18e849d8cfbd1f31705a1ee74354f5793b1
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only"
] |
permissive
|
lifebit-ai/exomedepth
|
cbe59cb7fcf2f9183d187f8d466c6620fb1a0c2e
|
5a775ae5e2a247aeadc5208a34e8717c7855d080
|
refs/heads/master
| 2020-03-27T12:55:56.400581
| 2018-10-11T10:00:07
| 2018-10-11T10:00:07
| 146,578,924
| 0
| 0
|
MIT
| 2018-08-29T09:43:52
| 2018-08-29T09:43:51
| null |
UTF-8
|
R
| false
| false
| 938
|
rd
|
methods-deprecated.Rd
|
% File src/library/methods/man/methods-deprecated.Rd
% Part of the R package, https://www.R-project.org
% Copyright 1995-2009 R Core Team
% Distributed under GPL 2 or later
\name{methods-deprecated}
\alias{methods-deprecated}
%----- NOTE: ../R/methods-deprecated.R must be synchronized with this!
\title{Deprecated Functions in Package \pkg{methods}}
%----- PLEASE: put \alias{.} here for EACH ! --- Do not remove this file, even when ``empty''
%
\description{
These functions are provided for compatibility with older versions of
\R only, and may be defunct as soon as the next release.
}
\details{
The original help page for these functions is often
available at \code{help("oldName-deprecated")} (note the quotes).
Functions in packages other than the methods package are listed in
\code{help("pkg-deprecated")}.
}
\seealso{
\code{\link{Deprecated}}, \code{\link{methods-defunct}}
}
\keyword{internal}
\keyword{misc}
|
5eb71eb79ee6a80139099ff086b96db981b4922f
|
e3aedc3e0413859b86ef14b63b8ba6e44dd3c04b
|
/Code_2018.R
|
5669a0d1b84beacb770274840a927c8950212f0e
|
[] |
no_license
|
SaraEdw/Holiday_2018
|
f5bd2f134ae2882a1a482c79b6238996c17ef181
|
5261c2675864056b80b36add53cb44cf0d1eed89
|
refs/heads/master
| 2020-04-12T15:44:01.410602
| 2018-12-20T14:38:05
| 2018-12-20T14:38:05
| 162,590,583
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,822
|
r
|
Code_2018.R
|
# Author: Sara Edwards
# Date: December 2018
# Instructions: Simply select all & run
# (if you use R studio don't have it full screen)
# R studio Mac: Option+command+R
# PC: Control+Alt+R
# In base R Mac: Command+A then Command+Enter
# PC: Ctrl+A then Ctrl+Enter
X1 <- c(rep(2,4), rep(3,4), rep(4,7), rep(5,10), rep(6,6), rep(7,4), rep(8,5), rep(9,4), rep(10,5), rep(11,2), rep(12,2), 13)
Y1 <- c(3,4,5,11, 2,5,10,11, 5:11, 5:14, 5:8,12,15, 5:7,12, 5:7,13,14, 4:7, 3:7, 3,7, 2,3, 1)
X2 <- c(1, rep(2,2), rep(3,3), rep(4,5), rep(5,5), rep(6,4), rep(7,6), rep(8,6), rep(9,5), rep(10,3), rep(11,4), rep(12,7), rep(13,8), rep(14,8), rep(15,6), 16)
Y2 <- c(9, 8:9, 8:10, 7:11, 6:10, 7:10, 6:11, 5:10, 6:10, 6:8, 7,10,12,13, 7,9:14, 8:15, 9:16, 10,13:17, 14)
X3 <- c(12,12,13,13,15,15,16,16)
Y3 <- c(4,5,4,5,5,6,5,6)
X4 <- c(1, rep(2,3), 3, rep(4,6), rep(5,5), rep(6,8), rep(7,4), rep(8,3), rep(9,4), rep(10,9))
X4 <- c(X4, rep(11,12), 22-X4)
Y4 <- c(12, 11:13, 12, 5,7,11,13,17,19, 6,7,12,17,18, 5:7,11,13,17:19, 8,10,14,16, 9,12,15, 8,10,14,16, 3,5,7,11:13,17,19,21)
Y4 <- c(Y4, 2:4, 6,9,11,13,15,18,20:22,Y4)
X5 <- c(1, rep(2,2), rep(3,3), rep(4,6), rep(5,3), rep(6,6), rep(7,4), rep(8,2), rep(9,6), rep(10,9))
X5 <- c(X5, rep(11,8), 22-X5)
Y5 <- c(11, 10,12, 9,11,13, 4,6,10,12,16,18, 5,11,17, 4,6,10,12,16,18, 7,9,13,15, 8,14, 3,7,9,13,15,19, 2,4,6,10:12,16,18,20)
Y5 <- c(Y5, c(1,3,5,10,12,17,19,21), Y5)
D <- data.frame(X=c(-X1-2, X1), Y=c(Y1,Y1), Fig='Brown')
L <- data.frame(X=c(X2/2 + 12, -X2/2 + 28), Y=c(Y2/2+8, -Y2/2+8), Fig="Green")
B <- data.frame(X=c(X3/2 + 12, -X3/2 + 28), Y=c(Y3/2+8, -Y3/2+8), Fig="Red")
Df <- rbind(D, L, B)
Sn <- data.frame(X=c(X4/1.2-10, X5/1.2+10), Y=c(Y4/1.2+81, Y5/1.2+82), Fig="Blue" )
dev.new(width=8, height=6, unit="in", noRStudioGD = T)
par(family='serif', bg='blanchedalmond', mar=c(2,0,1,0))
plot(Y1~X1, xlim=c(0,120), ylim=c(0,100), type='n',axes=FALSE)
for (i in c(12, 57, 102)){
points(Df$X+i, Df$Y, col=c('tan4','forestgreen','red')[Df$Fig], pch=15, cex=c(1.3, 0.5, 0.5, 0.7)[Df$Fig])
}
for (i in c(0, 40, 80, 120)){
points(Sn$X+i, Sn$Y, pch=15, col='dodgerblue4', cex=0.7)
}
abline(h=c(80, 102, -4, 20), col='darkred', lwd=2)
abline(h=c(81, 79, 101, 103, -3, -5, 19, 21), col='darkred', lty=6, xpd=NA)
T1 <- c(LETTERS[8], letters[c(1,16,16,25)])
T2 <- c(LETTERS[8], letters[c(15,12,9,4,1,25,19)])
T1 <- paste(T1, collapse="")
T2 <- paste(c(T2,"!"), collapse="")
text(45, 60, T1, cex=4, col='darkred', font=3)
text(65, 45, T2, cex=4, col='darkred', font=3)
points(c(X2+15), c(Y2+55), pch=15, col='forestgreen', cex=0.85)
points(c(X3+14), c(Y3+55), pch=15, col='red', cex=0.85)
points(c(-X2+104), c(Y2+45), pch=15, col='forestgreen', cex=0.85)
points(c(-X3+104), c(Y3+45), pch=15, col='red', cex=0.85)
|
3b3c19c6434058f5828930f0ec837f320f43308d
|
2b172258eb7c17c9607439b86ea51824e374ec17
|
/plot1.R
|
7f578ce080e1bde8ecdce8b4fdbe217eca160a3a
|
[] |
no_license
|
jieun0228/ExData_Plotting1
|
48b32326602d18f04533993b2758980022d81724
|
bc384e4faf864724807f3e75cef912f397921756
|
refs/heads/master
| 2021-01-21T17:03:23.873753
| 2015-04-12T22:47:59
| 2015-04-12T22:47:59
| 33,802,535
| 0
| 0
| null | 2015-04-12T04:07:45
| 2015-04-12T04:07:45
| null |
UTF-8
|
R
| false
| false
| 582
|
r
|
plot1.R
|
setwd("C:/Users/veronica/DataScience")
##Load dataset
data <- read.csv("./data/household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
##Subset the data
data_2days <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
##Convert the type of columns
data_2days$Global_active_power <- as.numeric(data_2days$Global_active_power)
##Plot
hist(data_2days$Global_active_power, main="Global Active Power", xlab="Global Active Power(kilowatts)", ylab="Frequency", col="Red")
##Save the file
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off()
|
88ab88a16656d61c4385efca1a45b7325427dfaf
|
97d42d0116a38692851fbc42deac5475c761229d
|
/Code - 02 28 2018/Table Code/Items 80,81, Tables AB,AC.R
|
1ae81a7d7e8a71332fbfd0d558218c70ebce7032
|
[] |
no_license
|
casey-stevens/Cadmus-6000-2017
|
f4632518088de34541b0c6c130b3dd021f0809d1
|
ab4450e77885a9723dba6bc3890112dec8c7328f
|
refs/heads/master
| 2021-01-25T06:17:59.728730
| 2018-09-27T20:35:42
| 2018-09-27T20:35:42
| 93,548,804
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 72,182
|
r
|
Items 80,81, Tables AB,AC.R
|
#############################################################################################
## Title: RBSA Analysis
## Author: Casey Stevens, Cadmus Group
## Created: 06/13/2017
## Updated:
## Billing Code(s):
#############################################################################################
## Clear variables
rm(list=ls())
rundate <- format(Sys.time(), "%d%b%y")
options(scipen=999)
## Create "Not In" operator
"%notin%" <- Negate("%in%")
# Source codes
source("Code/Table Code/SourceCode.R")
source("Code/Table Code/Weighting Implementation Functions.R")
source("Code/Sample Weighting/Weights.R")
source("Code/Table Code/Export Function.R")
# Read in clean RBSA data
rbsa.dat <- read.xlsx(xlsxFile = file.path(filepathCleanData, paste("clean.rbsa.data", rundate, ".xlsx", sep = "")))
length(unique(rbsa.dat$CK_Cadmus_ID))
rbsa.dat <- rbsa.dat[grep("site", rbsa.dat$CK_Building_ID, ignore.case = T),]
#Read in data for analysis
appliances.dat <- data.frame(read.xlsx(xlsxFile = file.path(filepathRawData, appliances.export))
,stringsAsFactors = FALSE)
#clean cadmus IDs
appliances.dat$CK_Cadmus_ID <- trimws(toupper(appliances.dat$CK_Cadmus_ID))
#Read in data for analysis
mechanical.dat <- read.xlsx(mechanical.export)
#clean cadmus IDs
mechanical.dat$CK_Cadmus_ID <- trimws(toupper(mechanical.dat$CK_Cadmus_ID))
# sites.interview.dat <- read.xlsx(xlsxFile = file.path(filepathRawData, sites.interview.export))
# sites.interview.dat$CK_Cadmus_ID <- trimws(toupper(sites.interview.dat$CK_Cadmus_ID))
#
#
# sites.interview.dat1 <- sites.interview.dat[which(colnames(sites.interview.dat) %in% c("CK_Cadmus_ID", "INTRVW_CUST_RES_HomeandEnergyUseHome_ClothesWasherLoadsPerWeek"))]
# sites.interview.dat1 <- sites.interview.dat1[which(!is.na(sites.interview.dat1$INTRVW_CUST_RES_HomeandEnergyUseHome_ClothesWasherLoadsPerWeek)),]
#
# rbsa.dat.sf <- rbsa.dat[which(rbsa.dat$BuildingType == "Single Family"),]
#
# rbsa.merge <- left_join(rbsa.dat.sf, sites.interview.dat1)
# rbsa.merge <- rbsa.merge[which(!is.na(rbsa.merge$INTRVW_CUST_RES_HomeandEnergyUseHome_ClothesWasherLoadsPerWeek)),]
#############################################################################################
#Item 80: AVERAGE NUMBER OF APPLIANCES PER HOME BY TYPE (SF table 87, MH table 68)
#############################################################################################
# For water Heaters
item80.mech <- mechanical.dat[grep("Water Heat", mechanical.dat$Generic),]
item80.mech$Generic[grep("Water Heat", item80.mech$Generic)] <- "Water Heater"
item80.mech$WaterHeaterCount <- 1
item80.mech1 <- left_join(rbsa.dat, item80.mech, by = "CK_Cadmus_ID")
item80.mech2 <- unique(item80.mech1[-grep("Multifamily", item80.mech1$BuildingType),])
which(duplicated(item80.mech2$CK_Cadmus_ID))
item80.mech2$WaterHeaterCount[which(is.na(item80.mech2$WaterHeaterCount))] <- 0
item80.mech2$count <- 1
#summarise by home
item80.site <- summarise(group_by(item80.mech2, CK_Cadmus_ID, Generic)
,Count = sum(WaterHeaterCount))
unique(item80.site$Count)
colnames(item80.site)[which(colnames(item80.site) == "Generic")] <- "Type"
#For everything else
item80.dat <- appliances.dat[which(colnames(appliances.dat) %in% c("CK_Cadmus_ID"
,"Type"
,"Large.Unusual.Load.Quantity"
,"Age"
,""
,""))]
item80.dat$count <- 1
item80.dat0 <- item80.dat[which(item80.dat$CK_Cadmus_ID != "CK_CADMUS_ID"),]
item80.dat1 <- left_join(item80.dat0, rbsa.dat, by = "CK_Cadmus_ID")
item80.dat1$Large.Unusual.Load.Quantity[which(item80.dat1$Large.Unusual.Load.Quantity %in% c("N/A",NA))] <- 1
unique(item80.dat1$Large.Unusual.Load.Quantity)
item80.dat1$Large.Unusual.Load.Quantity <- as.numeric(as.character(item80.dat1$Large.Unusual.Load.Quantity))
item80.dat1$TotalQty <- item80.dat1$Large.Unusual.Load.Quantity * item80.dat1$count
item80.sum <- summarise(group_by(item80.dat1, CK_Cadmus_ID, Type)
,Count = sum(TotalQty))
# Row bind water heater and appliance counts
item80.merge <- rbind.data.frame(item80.site, item80.sum)
item80.merge <- left_join(rbsa.dat, item80.merge) #switch RBSA.dat to rbsa.merge to get more info on washers/dryers
item80.merge <- item80.merge[which(!is.na(item80.merge$Type)),]
item80.merge$Count[which(is.na(item80.merge$Count))] <- 0
item80.cast <- dcast(setDT(item80.merge)
,formula = CK_Cadmus_ID ~ Type
,value.var = c("Count"))
# item80.missing.washer <- item80.cast[which(is.na(item80.cast$Washer)),]
# item80.missing.washer <- left_join(item80.missing.washer, rbsa.dat)
# item80.washer.sf <- item80.missing.washer[which(item80.missing.washer$BuildingType == "Single Family"),]
#
# item80.washer.sf.merge <- left_join(item80.washer.sf, sites.interview.dat1)
item80.cast[is.na(item80.cast),] <- 0
item80.melt <- melt(item80.cast, id.vars = "CK_Cadmus_ID")
names(item80.melt) <- c("CK_Cadmus_ID", "Type", "Count")
item80.merge <- left_join(rbsa.dat, item80.melt)
item80.merge$Type <- as.character(item80.merge$Type)
unique(item80.merge$Type)
item80.merge <- item80.merge[which(item80.merge$Type %in% c("Dishwasher"
,"Dryer"
,"Freezer"
,"Refrigerator"
,"Washer"
,"Water Heater")),]
################################################
# Adding pop and sample sizes for weights
################################################
item80.data <- weightedData(item80.merge[-which(colnames(item80.merge) %in% c("Count"
,"Type"
,"Age"))])
item80.data <- left_join(item80.data, item80.merge[which(colnames(item80.merge) %in% c("CK_Cadmus_ID"
,"Count"
,"Type"
,"Age"))])
item80.data$count <- 1
#######################
# Weighted Analysis
#######################
item80.final <- mean_one_group(CustomerLevelData = item80.data
,valueVariable = 'Count'
,byVariable = 'Type'
,aggregateRow = "Total")
item80.final <- item80.final[which(item80.final$Type != "Total"),]
item80.final.SF <- item80.final[which(item80.final$BuildingType == "Single Family")
,-which(colnames(item80.final) %in% c("BuildingType"))]
item80.final.MH <- item80.final[which(item80.final$BuildingType == "Manufactured")
,-which(colnames(item80.final) %in% c("BuildingType"))]
# exportTable(item80.final.SF, "SF", "Table 87", weighted = TRUE)
exportTable(item80.final.MH, "MH", "Table 68", weighted = TRUE)
#######################
# Unweighted Analysis
#######################
item80.final <- mean_one_group_unweighted(CustomerLevelData = item80.data
,valueVariable = 'Count'
,byVariable = 'Type'
,aggregateRow = "Total")
item80.final <- item80.final[which(item80.final$Type != "Total"),]
item80.final <- item80.final[which(item80.final$Type %in% c("Dishwasher"
,"Dryer"
,"Freezer"
,"Refrigerator"
,"Washer"
,"Water Heater")),]
item80.final.SF <- item80.final[which(item80.final$BuildingType == "Single Family")
,-which(colnames(item80.final) %in% c("BuildingType"))]
item80.final.MH <- item80.final[which(item80.final$BuildingType == "Manufactured")
,-which(colnames(item80.final) %in% c("BuildingType"))]
# exportTable(item80.final.SF, "SF", "Table 87", weighted = FALSE)
exportTable(item80.final.MH, "MH", "Table 68", weighted = FALSE)
#############################################################################################
#Table AB: Average Age of Appliance Equipment by Type
#############################################################################################
tableAB.dat <- appliances.dat[which(colnames(appliances.dat) %in% c("CK_Cadmus_ID"
,"Type"
,"Age"))]
tableAB.dat$count <- 1
tableAB.dat$Age <- as.numeric(as.character(tableAB.dat$Age))
tableAB.dat0 <- tableAB.dat[which(tableAB.dat$Age > 0),]
tableAB.merge <- left_join(rbsa.dat, tableAB.dat0, by = "CK_Cadmus_ID")
tableAB.merge <- tableAB.merge[grep("site", tableAB.merge$CK_Building_ID, ignore.case = T),]
tableAB.merge <- tableAB.merge[which(tableAB.merge$Age > 0),]
unique(tableAB.merge$Type)
tableAB.merge <- tableAB.merge[which(tableAB.merge$Type %in% c("Dishwasher"
,"Dryer"
,"Freezer"
,"Refrigerator"
,"Washer")),]
################################################
# Adding pop and sample sizes for weights
################################################
tableAB.data <- weightedData(tableAB.merge[-which(colnames(tableAB.merge) %in% c("count"
,"Type"
,"Age"))])
tableAB.data <- left_join(tableAB.data, tableAB.merge[which(colnames(tableAB.merge) %in% c("CK_Cadmus_ID"
,"count"
,"Type"
,"Age"))])
tableAB.data$count <- 1
#######################
# Weighted Analysis
#######################
tableAB.final <- mean_one_group(CustomerLevelData = tableAB.data
,valueVariable = 'Age'
,byVariable = 'Type'
,aggregateRow = "Total")
tableAB.final <- tableAB.final[which(tableAB.final$Type != "Total"),]
tableAB.final$Mean <- round(tableAB.final$Mean,0)
tableAB.final.SF <- tableAB.final[which(tableAB.final$BuildingType == "Single Family")
,-which(colnames(tableAB.final) %in% c("BuildingType"))]
tableAB.final.MH <- tableAB.final[which(tableAB.final$BuildingType == "Manufactured")
,-which(colnames(tableAB.final) %in% c("BuildingType"))]
tableAB.final.MF <- tableAB.final[which(tableAB.final$BuildingType == "Multifamily")
,-which(colnames(tableAB.final) %in% c("BuildingType"))]
# exportTable(tableAB.final.SF, "SF", "Table AB", weighted = TRUE)
exportTable(tableAB.final.MH, "MH", "Table AB", weighted = TRUE)
# exportTable(tableAB.final.MF, "MF", "Table AB", weighted = TRUE)
#######################
# Unweighted Analysis
#######################
tableAB.final <- mean_one_group_unweighted(CustomerLevelData = tableAB.data
,valueVariable = 'Age'
,byVariable = 'Type'
,aggregateRow = "Total")
tableAB.final <- tableAB.final[which(tableAB.final$Type != "Total"),]
tableAB.final.SF <- tableAB.final[which(tableAB.final$BuildingType == "Single Family")
,-which(colnames(tableAB.final) %in% c("BuildingType"))]
tableAB.final.MH <- tableAB.final[which(tableAB.final$BuildingType == "Manufactured")
,-which(colnames(tableAB.final) %in% c("BuildingType"))]
tableAB.final.MF <- tableAB.final[which(tableAB.final$BuildingType == "Multifamily")
,-which(colnames(tableAB.final) %in% c("BuildingType"))]
# exportTable(tableAB.final.SF, "SF", "Table AB", weighted = FALSE)
exportTable(tableAB.final.MH, "MH", "Table AB", weighted = FALSE)
# exportTable(tableAB.final.MF, "MF", "Table AB", weighted = FALSE)
#############################################################################################
#Table AC: Percent of Appliance Equipment above measure life by Type
#############################################################################################
# For water Heaters
tableAC.mech <- mechanical.dat[grep("Water Heat", mechanical.dat$Generic),]
tableAC.mech$Generic[grep("Water Heat", tableAC.mech$Generic)] <- "Water Heater"
tableAC.mech$WaterHeaterCount <- 1
tableAC.mech1 <- left_join(rbsa.dat, tableAC.mech, by = "CK_Cadmus_ID")
tableAC.mech2 <- unique(tableAC.mech1[-grep("Multifamily", tableAC.mech1$BuildingType),])
which(duplicated(tableAC.mech2$CK_Cadmus_ID))
tableAC.mech2$WaterHeaterCount[which(is.na(tableAC.mech2$WaterHeaterCount))] <- 0
tableAC.mech2$count <- 1
#summarise by home
tableAC.site <- summarise(group_by(tableAC.mech2, CK_Cadmus_ID, Generic, DHW.Year.Manufactured)
,count = sum(WaterHeaterCount))
unique(tableAC.site$count)
colnames(tableAC.site)[which(colnames(tableAC.site) %in% c("Generic", "DHW.Year.Manufactured"))] <- c("Type","Age")
tableAC.site$Age <- as.numeric(as.character(tableAC.site$Age))
tableAC.site1 <- tableAC.site[which(!is.na(tableAC.site$Age)),]
tableAC.site2 <- tableAC.site1[which(tableAC.site1$Age > 0),]
tableAC.dat <- appliances.dat[which(colnames(appliances.dat) %in% c("CK_Cadmus_ID"
,"Type"
,"Age"
,""
,""))]
tableAC.dat$count <- 1
tableAC.dat$Age <- as.numeric(as.character(tableAC.dat$Age))
tableAC.dat0 <- tableAC.dat[which(tableAC.dat$Age > 0),]
tableAC.merge0 <- rbind.data.frame(tableAC.site2, tableAC.dat0)
tableAC.merge <- left_join(rbsa.dat, tableAC.merge0, by = "CK_Cadmus_ID")
tableAC.merge <- tableAC.merge[grep("site",tableAC.merge$CK_Building_ID, ignore.case = T),]
tableAC.merge <- tableAC.merge[which(tableAC.merge$Age > 0),]
unique(tableAC.merge$Type)
tableAC.merge <- tableAC.merge[which(tableAC.merge$Type %in% c("Dishwasher"
,"Dryer"
,"Freezer"
,"Refrigerator"
,"Washer"
,"Water Heater")),]
tableAC.merge$MeasureMap <- 0
tableAC.merge$MeasureMap[which(tableAC.merge$Type == "Refrigerator")] <- 15
tableAC.merge$MeasureMap[which(tableAC.merge$Type == "Freezer")] <- 22
tableAC.merge$MeasureMap[which(tableAC.merge$Type == "Washer")] <- 14
tableAC.merge$MeasureMap[which(tableAC.merge$Type == "Dryer")] <- 12
tableAC.merge$MeasureMap[which(tableAC.merge$Type == "Dishwasher")] <- 12
tableAC.merge$MeasureMap[which(tableAC.merge$Type == "Water Heater")] <- 15
tableAC.merge$Age.Diff <- 2017 - tableAC.merge$Age
tableAC.merge$Above.Measure.Life <- "No"
tableAC.merge$Above.Measure.Life[which(tableAC.merge$Age.Diff > tableAC.merge$MeasureMap)] <- "Yes"
tableAC.merge$Ind <- 0
tableAC.merge$Ind[which(tableAC.merge$Age.Diff > tableAC.merge$MeasureMap)] <- 1
################################################
# Adding pop and sample sizes for weights
################################################
tableAC.data <- weightedData(tableAC.merge[-which(colnames(tableAC.merge) %in% c("Type"
,"Age"
,"count"
,"MeasureMap"
,"Above.Measure.Life"
,"Age.Diff"
,"Ind"))])
tableAC.data <- left_join(tableAC.data, tableAC.merge[which(colnames(tableAC.merge) %in% c("CK_Cadmus_ID"
,"Type"
,"Age"
,"count"
,"MeasureMap"
,"Above.Measure.Life"
,"Age.Diff"
,"Ind"))])
tableAC.data$count <- 1
tableAC.data$Count <- 1
#######################
# Weighted Analysis
#######################
tableAC.final <- proportions_one_group(CustomerLevelData = tableAC.data
,valueVariable = "Ind"
,groupingVariable = "Type"
,total.name = "Total")
tableAC.final <- tableAC.final[which(tableAC.final$Type != "Total"),]
tableAC.final.SF <- tableAC.final[which(tableAC.final$BuildingType == "Single Family")
,-which(colnames(tableAC.final) %in% c("BuildingType"))]
tableAC.final.MH <- tableAC.final[which(tableAC.final$BuildingType == "Manufactured")
,-which(colnames(tableAC.final) %in% c("BuildingType"))]
tableAC.final.MF <- tableAC.final[which(tableAC.final$BuildingType == "Multifamily")
,-which(colnames(tableAC.final) %in% c("BuildingType"))]
# exportTable(tableAC.final.SF, "SF", "Table AC", weighted = TRUE)
exportTable(tableAC.final.MH, "MH", "Table AC", weighted = TRUE)
# exportTable(tableAC.final.MF, "MF", "Table AC", weighted = TRUE)
#######################
# Unweighted Analysis
#######################
tableAC.final <- proportions_one_group(CustomerLevelData = tableAC.data
,valueVariable = "Ind"
,groupingVariable = "Type"
,total.name = "Total"
,weighted = FALSE)
tableAC.final <- tableAC.final[which(tableAC.final$Type != "Total"),]
tableAC.final.SF <- tableAC.final[which(tableAC.final$BuildingType == "Single Family")
,-which(colnames(tableAC.final) %in% c("BuildingType"))]
tableAC.final.MH <- tableAC.final[which(tableAC.final$BuildingType == "Manufactured")
,-which(colnames(tableAC.final) %in% c("BuildingType"))]
tableAC.final.MF <- tableAC.final[which(tableAC.final$BuildingType == "Multifamily")
,-which(colnames(tableAC.final) %in% c("BuildingType"))]
# exportTable(tableAC.final.SF, "SF", "Table AC", weighted = FALSE)
exportTable(tableAC.final.MH, "MH", "Table AC", weighted = FALSE)
# exportTable(tableAC.final.MF, "MF", "Table AC", weighted = FALSE)
#############################################################################################
#Item 81: DISTRIBUTION OF REFRIGERATOR/FREEZERS BY VINTAGE (SF table 88, MH table 69)
#############################################################################################
#subset to columns needed for analysis
item81.dat <- appliances.dat[which(colnames(appliances.dat) %in% c("CK_Cadmus_ID"
,"Type"
,"Age"
,""))]
item81.dat$count <- 1
item81.dat0 <- item81.dat[which(item81.dat$CK_Cadmus_ID != "CK_CADMUS_ID"),]
item81.dat1 <- left_join(item81.dat0, rbsa.dat, by = "CK_Cadmus_ID")
item81.dat1 <- item81.dat1[grep("site",item81.dat1$CK_Building_ID, ignore.case = T),]
item81.dat2 <- item81.dat1[which(item81.dat1$Type %in% c("Refrigerator", "Freezer")),]
# Bin equipment vintages for items 50 and 52 (4 categories)
item81.dat2$EquipVintage_bins <- as.numeric(as.character(item81.dat2$Age))
item81.dat3 <- item81.dat2[which(!(is.na(item81.dat2$EquipVintage_bins))),]
item81.dat3$EquipVintage_bins[which(item81.dat3$Age < 1980)] <- "Pre 1980"
item81.dat3$EquipVintage_bins[which(item81.dat3$Age >= 1980 & item81.dat3$Age < 1990)] <- "1980-1989"
item81.dat3$EquipVintage_bins[which(item81.dat3$Age >= 1990 & item81.dat3$Age < 1995)] <- "1990-1994"
item81.dat3$EquipVintage_bins[which(item81.dat3$Age >= 1995 & item81.dat3$Age < 2000)] <- "1995-1999"
item81.dat3$EquipVintage_bins[which(item81.dat3$Age >= 2000 & item81.dat3$Age < 2005)] <- "2000-2004"
item81.dat3$EquipVintage_bins[which(item81.dat3$Age >= 2005 & item81.dat3$Age < 2010)] <- "2005-2009"
item81.dat3$EquipVintage_bins[which(item81.dat3$Age >= 2010 & item81.dat3$Age < 2015)] <- "2010-2014"
item81.dat3$EquipVintage_bins[which(item81.dat3$Age >= 2015)] <- "Post 2014"
#check uniques
unique(item81.dat3$EquipVintage_bins)
item81.merge <- left_join(rbsa.dat, item81.dat3)
item81.merge <- item81.merge[which(!is.na(item81.merge$EquipVintage_bins)),]
################################################
# Adding pop and sample sizes for weights
################################################
item81.data <- weightedData(item81.merge[-which(colnames(item81.merge) %in% c("count"
,"Type"
,"Age"
,"EquipVintage_bins"))])
item81.data <- left_join(item81.data, item81.merge[which(colnames(item81.merge) %in% c("CK_Cadmus_ID"
,"count"
,"Type"
,"Age"
,"EquipVintage_bins"))])
item81.data$count <- 1
#######################
# Weighted Analysis
#######################
item81.final <- proportions_one_group(CustomerLevelData = item81.data
,valueVariable = 'count'
,groupingVariable = 'EquipVintage_bins'
,total.name = 'All Vintages')
unique(item81.final$EquipVintage_bins)
rowOrder <- c("Pre 1980"
,"1980-1989"
,"1990-1994"
,"1995-1999"
,"2000-2004"
,"2005-2009"
,"2010-2014"
,"Post 2014"
,"Total")
item81.final <- item81.final %>% mutate(EquipVintage_bins = factor(EquipVintage_bins, levels = rowOrder)) %>% arrange(EquipVintage_bins)
item81.final <- data.frame(item81.final)
item81.final.SF <- item81.final[which(item81.final$BuildingType == "Single Family")
,-which(colnames(item81.final) %in% c("BuildingType"))]
item81.final.MH <- item81.final[which(item81.final$BuildingType == "Manufactured")
,-which(colnames(item81.final) %in% c("BuildingType"))]
item81.final.MF <- item81.final[which(item81.final$BuildingType == "Multifamily")
,-which(colnames(item81.final) %in% c("BuildingType"))]
# exportTable(item81.final.SF, "SF", "Table 88", weighted = TRUE)
exportTable(item81.final.MH, "MH", "Table 69", weighted = TRUE)
# exportTable(item81.final.MF, "MF", "Table 87", weighted = TRUE)
#######################
# Unweighted Analysis
#######################
item81.final <- proportions_one_group(CustomerLevelData = item81.data
,valueVariable = 'count'
,groupingVariable = 'EquipVintage_bins'
,total.name = 'All Vintages'
,weighted = FALSE)
unique(item81.final$EquipVintage_bins)
rowOrder <- c("Pre 1980"
,"1980-1989"
,"1990-1994"
,"1995-1999"
,"2000-2004"
,"2005-2009"
,"2010-2014"
,"Post 2014"
,"Total")
item81.final <- item81.final %>% mutate(EquipVintage_bins = factor(EquipVintage_bins, levels = rowOrder)) %>% arrange(EquipVintage_bins)
item81.final <- data.frame(item81.final)
item81.final.SF <- item81.final[which(item81.final$BuildingType == "Single Family")
,-which(colnames(item81.final) %in% c("BuildingType"))]
item81.final.MH <- item81.final[which(item81.final$BuildingType == "Manufactured")
,-which(colnames(item81.final) %in% c("BuildingType"))]
item81.final.MF <- item81.final[which(item81.final$BuildingType == "Multifamily")
,-which(colnames(item81.final) %in% c("BuildingType"))]
# exportTable(item81.final.SF, "SF", "Table 88", weighted = FALSE)
exportTable(item81.final.MH, "MH", "Table 69", weighted = FALSE)
# exportTable(item81.final.MF, "MF", "Table 87", weighted = FALSE)
############################################################################################################
#
#
# OVERSAMPLE ANALYSIS
#
#
############################################################################################################
# Read in clean scl data
os.dat <- read.xlsx(xlsxFile = file.path(filepathCleanData, paste("clean.",os.ind,".data", rundate, ".xlsx", sep = "")))
length(unique(os.dat$CK_Cadmus_ID))
os.dat$CK_Building_ID <- os.dat$Category
os.dat <- os.dat[which(names(os.dat) != "Category")]
names(os.dat)
#############################################################################################
#Item 80: AVERAGE NUMBER OF APPLIANCES PER HOME BY TYPE (SF table 87, MH table 68)
#############################################################################################
# For water Heaters
item80.os.mech <- mechanical.dat[grep("Water Heat", mechanical.dat$Generic),]
item80.os.mech$Generic[grep("Water Heat", item80.os.mech$Generic)] <- "Water Heater"
item80.os.mech$WaterHeaterCount <- 1
item80.os.mech1 <- left_join(os.dat, item80.os.mech, by = "CK_Cadmus_ID")
item80.os.mech2 <- item80.os.mech1
which(duplicated(item80.os.mech2$CK_Cadmus_ID))
item80.os.mech2$WaterHeaterCount[which(is.na(item80.os.mech2$WaterHeaterCount))] <- 0
item80.os.mech2$count <- 1
#summarise by home
item80.os.site <- summarise(group_by(item80.os.mech2, CK_Cadmus_ID, CK_Building_ID, Generic)
,Count = sum(WaterHeaterCount))
unique(item80.os.site$Count)
colnames(item80.os.site)[which(colnames(item80.os.site) == "Generic")] <- "Type"
#For everything else
item80.os.dat <- appliances.dat[which(colnames(appliances.dat) %in% c("CK_Cadmus_ID"
,"Type"
,"Large.Unusual.Load.Quantity"
,"Age"
,""
,""))]
item80.os.dat$count <- 1
item80.os.dat0 <- item80.os.dat[which(item80.os.dat$CK_Cadmus_ID != "CK_CADMUS_ID"),]
item80.os.dat1 <- left_join(item80.os.dat0, os.dat, by = "CK_Cadmus_ID")
item80.os.dat1$Large.Unusual.Load.Quantity[which(item80.os.dat1$Large.Unusual.Load.Quantity %in% c("N/A",NA))] <- 1
unique(item80.os.dat1$Large.Unusual.Load.Quantity)
item80.os.dat1$Large.Unusual.Load.Quantity <- as.numeric(as.character(item80.os.dat1$Large.Unusual.Load.Quantity))
item80.os.dat1$TotalQty <- item80.os.dat1$Large.Unusual.Load.Quantity * item80.os.dat1$count
item80.os.sum <- summarise(group_by(item80.os.dat1, CK_Cadmus_ID, CK_Building_ID, Type)
,Count = sum(TotalQty))
# Row bind water heater and appliance counts
item80.os.merge <- rbind.data.frame(item80.os.site, item80.os.sum)
item80.os.merge <- left_join(os.dat, item80.os.merge) #switch os.dat to scl.merge to get more info on washers/dryers
item80.os.merge <- item80.os.merge[which(!is.na(item80.os.merge$Type)),]
item80.os.merge$Count[which(is.na(item80.os.merge$Count))] <- 0
item80.os.cast <- dcast(setDT(item80.os.merge)
,formula = CK_Cadmus_ID + CK_Building_ID ~ Type
,value.var = c("Count"))
item80.os.cast[is.na(item80.os.cast),] <- 0
item80.os.melt <- melt(item80.os.cast, id.vars = c("CK_Cadmus_ID", "CK_Building_ID"))
names(item80.os.melt) <- c("CK_Cadmus_ID", "CK_Building_ID", "Type", "Count")
item80.os.merge <- left_join(os.dat, item80.os.melt)
item80.os.merge$Type <- as.character(item80.os.merge$Type)
unique(item80.os.merge$Type)
item80.os.merge <- item80.os.merge[which(item80.os.merge$Type %in% c("Dishwasher"
,"Dryer"
,"Freezer"
,"Refrigerator"
,"Washer"
,"Water Heater")),]
################################################
# Adding pop and sample sizes for weights
################################################
item80.os.data <- weightedData(item80.os.merge[-which(colnames(item80.os.merge) %in% c("Count"
,"Type"
,"Age"))])
item80.os.data <- left_join(item80.os.data, unique(item80.os.merge[which(colnames(item80.os.merge) %in% c("CK_Cadmus_ID"
,"Count"
,"Type"
,"Age"))]))
item80.os.data$count <- 1
#######################
# Weighted Analysis
#######################
item80.os.final <- mean_two_groups(CustomerLevelData = item80.os.data
,valueVariable = 'Count'
,byVariableColumn = "CK_Building_ID"
,byVariableRow = 'Type'
,columnAggregate = "Remove"
,rowAggregate = "Total")
item80.os.cast <- item80.os.final[which(item80.os.final$Type != "Total"),]
names(item80.os.cast)
if(os.ind == "scl"){
item80.os.final <- data.frame("BuildingType" = item80.os.cast$BuildingType
,"Type" = item80.os.cast$Type
,"Mean_SCL.GenPop" = item80.os.cast$`Mean_SCL.GenPop`
,"SE_SCL.GenPop" = item80.os.cast$`SE_SCL.GenPop`
,"n_SCL.GenPop" = item80.os.cast$`n_SCL.GenPop`
,"Mean_SCL.LI" = item80.os.cast$`Mean_SCL.LI`
,"SE_SCL.LI" = item80.os.cast$`SE_SCL.LI`
,"n_SCL.LI" = item80.os.cast$`n_SCL.LI`
,"Mean_SCL.EH" = item80.os.cast$`Mean_SCL.EH`
,"SE_SCL.EH" = item80.os.cast$`SE_SCL.EH`
,"n_SCL.EH" = item80.os.cast$`n_SCL.EH`
,"Mean_2017.RBSA.PS" = item80.os.cast$`Mean_2017.RBSA.PS`
,"SE_2017.RBSA.PS" = item80.os.cast$`SE_2017.RBSA PS`
,"n_2017.RBSA.PS" = item80.os.cast$`n_2017.RBSA.PS`
,"EB_SCL.GenPop" = item80.os.cast$`EB_SCL.GenPop`
,"EB_SCL.LI" = item80.os.cast$`EB_SCL.LI`
,"EB_SCL.EH" = item80.os.cast$`EB_SCL.EH`
,"EB_2017.RBSA.PS" = item80.os.cast$`EB_2017.RBSA PS`)
}else if(os.ind == "snopud"){
item80.os.final <- data.frame("BuildingType" = item80.os.cast$BuildingType
,"Type" = item80.os.cast$Type
,"Mean_SnoPUD" = item80.os.cast$`Mean_SnoPUD`
,"SE_SnoPUD" = item80.os.cast$`SE_SnoPUD`
,"n_SnoPUD" = item80.os.cast$`n_SnoPUD`
,"Mean_2017.RBSA.PS" = item80.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item80.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item80.os.cast$`n_2017 RBSA PS`
,"Mean_RBSA.NW" = item80.os.cast$`Mean_2017 RBSA NW`
,"SE_RBSA.NW" = item80.os.cast$`SE_2017 RBSA NW`
,"n_RBSA.NW" = item80.os.cast$`n_2017 RBSA NW`
,"EB_SnoPUD" = item80.os.cast$`EB_SnoPUD`
,"EB_2017.RBSA.PS" = item80.os.cast$`EB_2017 RBSA PS`
,"EB_RBSA.NW" = item80.os.cast$`EB_2017 RBSA NW`)
}
item80.os.final.SF <- item80.os.final[which(item80.os.final$BuildingType == "Single Family")
,-which(colnames(item80.os.final) %in% c("BuildingType"))]
exportTable(item80.os.final.SF, "SF", "Table 87", weighted = TRUE, osIndicator = export.ind, OS = T)
#######################
# Unweighted Analysis
#######################
item80.os.final <- mean_two_groups_unweighted(CustomerLevelData = item80.os.data
,valueVariable = 'Count'
,byVariableColumn = "CK_Building_ID"
,byVariableRow = 'Type'
,columnAggregate = "Remove"
,rowAggregate = "Total")
item80.os.cast <- item80.os.final[which(item80.os.final$Type != "Total"),]
names(item80.os.cast)
if(os.ind == "scl"){
item80.os.final <- data.frame("BuildingType" = item80.os.cast$BuildingType
,"Type" = item80.os.cast$Type
,"Mean_SCL.GenPop" = item80.os.cast$`Mean_SCL GenPop`
,"SE_SCL.GenPop" = item80.os.cast$`SE_SCL GenPop`
,"n_SCL.GenPop" = item80.os.cast$`n_SCL GenPop`
,"Mean_SCL.LI" = item80.os.cast$`Mean_SCL LI`
,"SE_SCL.LI" = item80.os.cast$`SE_SCL LI`
,"n_SCL.LI" = item80.os.cast$`n_SCL LI`
,"Mean_SCL.EH" = item80.os.cast$`Mean_SCL EH`
,"SE_SCL.EH" = item80.os.cast$`SE_SCL EH`
,"n_SCL.EH" = item80.os.cast$`n_SCL EH`
,"Mean_2017.RBSA.PS" = item80.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item80.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item80.os.cast$`n_2017 RBSA PS`)
}else if(os.ind == "snopud"){
item80.os.final <- data.frame("BuildingType" = item80.os.cast$BuildingType
,"Type" = item80.os.cast$Type
,"Mean_SnoPUD" = item80.os.cast$`Mean_SnoPUD`
,"SE_SnoPUD" = item80.os.cast$`SE_SnoPUD`
,"n_SnoPUD" = item80.os.cast$`n_SnoPUD`
,"Mean_2017.RBSA.PS" = item80.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item80.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item80.os.cast$`n_2017 RBSA PS`
,"Mean_RBSA.NW" = item80.os.cast$`Mean_2017 RBSA NW`
,"SE_RBSA.NW" = item80.os.cast$`SE_2017 RBSA NW`
,"n_RBSA.NW" = item80.os.cast$`n_2017 RBSA NW`)
}
item80.os.final.SF <- item80.os.final[which(item80.os.final$BuildingType == "Single Family")
,-which(colnames(item80.os.final) %in% c("BuildingType"))]
exportTable(item80.os.final.SF, "SF", "Table 87", weighted = FALSE, osIndicator = export.ind, OS = T)
#############################################################################################
#Table AB: Average Age of Appliance Equipment by Type
#############################################################################################
tableAB.os.dat <- appliances.dat[which(colnames(appliances.dat) %in% c("CK_Cadmus_ID"
,"Type"
,"Age"))]
tableAB.os.dat$count <- 1
tableAB.os.dat$Age <- as.numeric(as.character(tableAB.os.dat$Age))
tableAB.os.dat0 <- tableAB.os.dat[which(tableAB.os.dat$Age > 0),]
tableAB.os.merge <- left_join(os.dat, tableAB.os.dat0, by = "CK_Cadmus_ID")
tableAB.os.merge <- tableAB.os.merge[which(tableAB.os.merge$Age > 0),]
unique(tableAB.os.merge$Type)
tableAB.os.merge <- tableAB.os.merge[which(tableAB.os.merge$Type %in% c("Dishwasher"
,"Dryer"
,"Freezer"
,"Refrigerator"
,"Washer")),]
################################################
# Adding pop and sample sizes for weights
################################################
tableAB.os.data <- weightedData(tableAB.os.merge[-which(colnames(tableAB.os.merge) %in% c("count"
,"Type"
,"Age"))])
tableAB.os.data <- left_join(tableAB.os.data, unique(tableAB.os.merge[which(colnames(tableAB.os.merge) %in% c("CK_Cadmus_ID"
,"count"
,"Type"
,"Age"))]))
tableAB.os.data$count <- 1
#######################
# Weighted Analysis
#######################
tableAB.os.final <- mean_two_groups(CustomerLevelData = tableAB.os.data
,valueVariable = 'Age'
,byVariableColumn = "CK_Building_ID"
,byVariableRow = 'Type'
,columnAggregate = "Remove"
,rowAggregate = "Total")
tableAB.os.cast <- tableAB.os.final[which(tableAB.os.final$Type != "Total"),]
names(tableAB.os.cast)
if(os.ind == "scl"){
tableAB.os.final <- data.frame("BuildingType" = tableAB.os.cast$BuildingType
,"Type" = tableAB.os.cast$Type
,"Mean_SCL.GenPop" = tableAB.os.cast$`Mean_SCL GenPop`
,"SE_SCL.GenPop" = tableAB.os.cast$`SE_SCL GenPop`
,"n_SCL.GenPop" = tableAB.os.cast$`n_SCL GenPop`
,"Mean_SCL.LI" = tableAB.os.cast$`Mean_SCL LI`
,"SE_SCL.LI" = tableAB.os.cast$`SE_SCL LI`
,"n_SCL.LI" = tableAB.os.cast$`n_SCL LI`
,"Mean_SCL.EH" = tableAB.os.cast$`Mean_SCL EH`
,"SE_SCL.EH" = tableAB.os.cast$`SE_SCL EH`
,"n_SCL.EH" = tableAB.os.cast$`n_SCL EH`
,"Mean_2017.RBSA.PS" = tableAB.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = tableAB.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = tableAB.os.cast$`n_2017 RBSA PS`
,"EB_SCL.GenPop" = tableAB.os.cast$`EB_SCL GenPop`
,"EB_SCL.LI" = tableAB.os.cast$`EB_SCL LI`
,"EB_SCL.EH" = tableAB.os.cast$`EB_SCL EH`
,"EB_2017.RBSA.PS" = tableAB.os.cast$`EB_2017 RBSA PS`)
}else if(os.ind == "snopud"){
tableAB.os.final <- data.frame("BuildingType" = tableAB.os.cast$BuildingType
,"Type" = tableAB.os.cast$Type
,"Mean_SnoPUD" = tableAB.os.cast$`Mean_SnoPUD`
,"SE_SnoPUD" = tableAB.os.cast$`SE_SnoPUD`
,"n_SnoPUD" = tableAB.os.cast$`n_SnoPUD`
,"Mean_2017.RBSA.PS" = tableAB.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = tableAB.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = tableAB.os.cast$`n_2017 RBSA PS`
,"Mean_RBSA.NW" = tableAB.os.cast$`Mean_2017 RBSA NW`
,"SE_RBSA.NW" = tableAB.os.cast$`SE_2017 RBSA NW`
,"n_RBSA.NW" = tableAB.os.cast$`n_2017 RBSA NW`
,"EB_SnoPUD" = tableAB.os.cast$`EB_SnoPUD`
,"EB_2017.RBSA.PS" = tableAB.os.cast$`EB_2017 RBSA PS`
,"EB_RBSA.NW" = tableAB.os.cast$`EB_2017 RBSA NW`)
}
tableAB.os.final.SF <- tableAB.os.final[which(tableAB.os.final$BuildingType == "Single Family")
,-which(colnames(tableAB.os.final) %in% c("BuildingType"))]
exportTable(tableAB.os.final.SF, "SF", "Table AB", weighted = TRUE, osIndicator = export.ind, OS = T)
#######################
# Unweighted Analysis
#######################
tableAB.os.final <- mean_two_groups_unweighted(CustomerLevelData = tableAB.os.data
,valueVariable = 'Age'
,byVariableColumn = "CK_Building_ID"
,byVariableRow = 'Type'
,columnAggregate = "Remove"
,rowAggregate = "Total")
tableAB.os.cast <- tableAB.os.final[which(tableAB.os.final$Type != "Total"),]
names(tableAB.os.cast)
if(os.ind == "scl"){
tableAB.os.final <- data.frame("BuildingType" = tableAB.os.cast$BuildingType
,"Type" = tableAB.os.cast$Type
,"Mean_SCL.GenPop" = tableAB.os.cast$`Mean_SCL GenPop`
,"SE_SCL.GenPop" = tableAB.os.cast$`SE_SCL GenPop`
,"n_SCL.GenPop" = tableAB.os.cast$`n_SCL GenPop`
,"Mean_SCL.LI" = tableAB.os.cast$`Mean_SCL LI`
,"SE_SCL.LI" = tableAB.os.cast$`SE_SCL LI`
,"n_SCL.LI" = tableAB.os.cast$`n_SCL LI`
,"Mean_SCL.EH" = tableAB.os.cast$`Mean_SCL EH`
,"SE_SCL.EH" = tableAB.os.cast$`SE_SCL EH`
,"n_SCL.EH" = tableAB.os.cast$`n_SCL EH`
,"Mean_2017.RBSA.PS" = tableAB.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = tableAB.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = tableAB.os.cast$`n_2017 RBSA PS`)
}else if(os.ind == "snopud"){
tableAB.os.final <- data.frame("BuildingType" = tableAB.os.cast$BuildingType
,"Type" = tableAB.os.cast$Type
,"Mean_SnoPUD" = tableAB.os.cast$`Mean_SnoPUD`
,"SE_SnoPUD" = tableAB.os.cast$`SE_SnoPUD`
,"n_SnoPUD" = tableAB.os.cast$`n_SnoPUD`
,"Mean_2017.RBSA.PS" = tableAB.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = tableAB.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = tableAB.os.cast$`n_2017 RBSA PS`
,"Mean_RBSA.NW" = tableAB.os.cast$`Mean_2017 RBSA NW`
,"SE_RBSA.NW" = tableAB.os.cast$`SE_2017 RBSA NW`
,"n_RBSA.NW" = tableAB.os.cast$`n_2017 RBSA NW`)
}
tableAB.os.final.SF <- tableAB.os.final[which(tableAB.os.final$BuildingType == "Single Family")
,-which(colnames(tableAB.os.final) %in% c("BuildingType"))]
exportTable(tableAB.os.final.SF, "SF", "Table AB", weighted = FALSE, osIndicator = export.ind, OS = T)
#############################################################################################
#Table AC: Percent of Appliance Equipment above measure life by Type
#############################################################################################
# For water Heaters
tableAC.os.mech <- mechanical.dat[grep("Water Heat", mechanical.dat$Generic),]
tableAC.os.mech$Generic[grep("Water Heat", tableAC.os.mech$Generic)] <- "Water Heater"
tableAC.os.mech$WaterHeaterCount <- 1
tableAC.os.mech1 <- left_join(os.dat, tableAC.os.mech, by = "CK_Cadmus_ID")
tableAC.os.mech2 <- tableAC.os.mech1
which(duplicated(tableAC.os.mech2$CK_Cadmus_ID))
tableAC.os.mech2$WaterHeaterCount[which(is.na(tableAC.os.mech2$WaterHeaterCount))] <- 0
tableAC.os.mech2$count <- 1
#summarise by home
tableAC.os.site <- summarise(group_by(tableAC.os.mech2, CK_Cadmus_ID, Generic, DHW.Year.Manufactured)
,count = sum(WaterHeaterCount))
unique(tableAC.os.site$count)
colnames(tableAC.os.site)[which(colnames(tableAC.os.site) %in% c("Generic", "DHW.Year.Manufactured"))] <- c("Type","Age")
tableAC.os.site$Age <- as.numeric(as.character(tableAC.os.site$Age))
tableAC.os.site1 <- tableAC.os.site[which(!is.na(tableAC.os.site$Age)),]
tableAC.os.site2 <- tableAC.os.site1[which(tableAC.os.site1$Age > 0),]
tableAC.os.dat <- appliances.dat[which(colnames(appliances.dat) %in% c("CK_Cadmus_ID"
,"Type"
,"Age"
,""
,""))]
tableAC.os.dat$count <- 1
tableAC.os.dat$Age <- as.numeric(as.character(tableAC.os.dat$Age))
tableAC.os.dat0 <- tableAC.os.dat[which(tableAC.os.dat$Age > 0),]
tableAC.os.merge0 <- rbind.data.frame(tableAC.os.site2, tableAC.os.dat0)
tableAC.os.merge <- left_join(os.dat, tableAC.os.merge0, by = "CK_Cadmus_ID")
tableAC.os.merge <- tableAC.os.merge[which(tableAC.os.merge$Age > 0),]
unique(tableAC.os.merge$Type)
tableAC.os.merge <- tableAC.os.merge[which(tableAC.os.merge$Type %in% c("Dishwasher"
,"Dryer"
,"Freezer"
,"Refrigerator"
,"Washer"
,"Water Heater")),]
tableAC.os.merge$MeasureMap <- 0
tableAC.os.merge$MeasureMap[which(tableAC.os.merge$Type == "Refrigerator")] <- 15
tableAC.os.merge$MeasureMap[which(tableAC.os.merge$Type == "Freezer")] <- 22
tableAC.os.merge$MeasureMap[which(tableAC.os.merge$Type == "Washer")] <- 14
tableAC.os.merge$MeasureMap[which(tableAC.os.merge$Type == "Dryer")] <- 12
tableAC.os.merge$MeasureMap[which(tableAC.os.merge$Type == "Dishwasher")] <- 12
tableAC.os.merge$MeasureMap[which(tableAC.os.merge$Type == "Water Heater")] <- 15
tableAC.os.merge$Age.Diff <- 2017 - tableAC.os.merge$Age
tableAC.os.merge$Above.Measure.Life <- "No"
tableAC.os.merge$Above.Measure.Life[which(tableAC.os.merge$Age.Diff > tableAC.os.merge$MeasureMap)] <- "Yes"
tableAC.os.merge$Ind <- 0
tableAC.os.merge$Ind[which(tableAC.os.merge$Age.Diff > tableAC.os.merge$MeasureMap)] <- 1
################################################
# Adding pop and sample sizes for weights
################################################
tableAC.os.data <- weightedData(tableAC.os.merge[-which(colnames(tableAC.os.merge) %in% c("Type"
,"Age"
,"count"
,"MeasureMap"
,"Above.Measure.Life"
,"Age.Diff"
,"Ind"))])
tableAC.os.data <- left_join(tableAC.os.data, unique(tableAC.os.merge[which(colnames(tableAC.os.merge) %in% c("CK_Cadmus_ID"
,"Type"
,"Age"
,"count"
,"MeasureMap"
,"Above.Measure.Life"
,"Age.Diff"
,"Ind"))]))
tableAC.os.data$count <- 1
tableAC.os.data$Count <- 1
#######################
# Weighted Analysis
#######################
tableAC.os.final <- proportionRowsAndColumns1(CustomerLevelData = tableAC.os.data
,valueVariable = "Ind"
,columnVariable = "CK_Building_ID"
,rowVariable = "Type"
,aggregateColumnName = "Remove")
tableAC.os.final <- tableAC.os.final[which(tableAC.os.final$CK_Building_ID != "Remove"),]
tableAC.os.final <- tableAC.os.final[which(tableAC.os.final$Type != "Total"),]
tableAC.os.cast <- dcast(setDT(tableAC.os.final)
,formula = BuildingType + Type ~ CK_Building_ID
,value.var = c("w.percent", "w.SE","n", "EB"))
names(tableAC.os.dat)
if(os.ind == "scl"){
tableAC.os.final <- data.frame("BuildingType" = tableAC.os.cast$BuildingType
,"Type" = tableAC.os.cast$Type
,"Percent_SCL.GenPop" = tableAC.os.cast$`w.percent_SCL GenPop`
,"SE_SCL.GenPop" = tableAC.os.cast$`w.SE_SCL GenPop`
,"n_SCL.GenPop" = tableAC.os.cast$`n_SCL GenPop`
,"Percent_SCL.LI" = tableAC.os.cast$`w.percent_SCL LI`
,"SE_SCL.LI" = tableAC.os.cast$`w.SE_SCL LI`
,"n_SCL.LI" = tableAC.os.cast$`n_SCL LI`
,"Percent_SCL.EH" = tableAC.os.cast$`w.percent_SCL EH`
,"SE_SCL.EH" = tableAC.os.cast$`w.SE_SCL EH`
,"n_SCL.EH" = tableAC.os.cast$`n_SCL EH`
,"Percent_2017.RBSA.PS" = tableAC.os.cast$`w.percent_2017 RBSA PS`
,"SE_2017.RBSA.PS" = tableAC.os.cast$`w.SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = tableAC.os.cast$`n_2017 RBSA PS`
,"EB_SCL.GenPop" = tableAC.os.cast$`EB_SCL GenPop`
,"EB_SCL.LI" = tableAC.os.cast$`EB_SCL LI`
,"EB_SCL.EH" = tableAC.os.cast$`EB_SCL EH`
,"EB_2017.RBSA.PS" = tableAC.os.cast$`EB_2017 RBSA PS`)
}else if(os.ind == "snopud"){
tableAC.os.final <- data.frame("BuildingType" = tableAC.os.cast$BuildingType
,"Type" = tableAC.os.cast$Type
,"Percent_SnoPUD" = tableAC.os.cast$`w.percent_SnoPUD`
,"SE_SnoPUD" = tableAC.os.cast$`w.SE_SnoPUD`
,"n_SnoPUD" = tableAC.os.cast$`n_SnoPUD`
,"Percent_2017.RBSA.PS" = tableAC.os.cast$`w.percent_2017 RBSA PS`
,"SE_2017.RBSA.PS" = tableAC.os.cast$`w.SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = tableAC.os.cast$`n_2017 RBSA PS`
,"Percent_RBSA.NW" = tableAC.os.cast$`w.percent_2017 RBSA NW`
,"SE_RBSA.NW" = tableAC.os.cast$`w.SE_2017 RBSA NW`
,"n_RBSA.NW" = tableAC.os.cast$`n_2017 RBSA NW`
,"EB_SnoPUD" = tableAC.os.cast$`EB_SnoPUD`
,"EB_2017.RBSA.PS" = tableAC.os.cast$`EB_2017 RBSA PS`
,"EB_RBSA.NW" = tableAC.os.cast$`EB_2017 RBSA NW`)
}
tableAC.os.final.SF <- tableAC.os.final[which(tableAC.os.final$BuildingType == "Single Family")
,-which(colnames(tableAC.os.final) %in% c("BuildingType"))]
exportTable(tableAC.os.final.SF, "SF", "Table AC", weighted = TRUE, osIndicator = export.ind, OS = T)
#######################
# Unweighted Analysis
#######################
tableAC.os.final <- proportions_two_groups_unweighted(CustomerLevelData = tableAC.os.data
,valueVariable = "Ind"
,columnVariable = "CK_Building_ID"
,rowVariable = "Type"
,aggregateColumnName = "Remove")
tableAC.os.final <- tableAC.os.final[which(tableAC.os.final$CK_Building_ID != "Remove"),]
tableAC.os.final <- tableAC.os.final[which(tableAC.os.final$Type != "Total"),]
tableAC.os.cast <- dcast(setDT(tableAC.os.final)
,formula = BuildingType + Type ~ CK_Building_ID
,value.var = c("Percent", "SE","n"))
names(tableAC.os.cast)
if(os.ind == "scl"){
tableAC.os.final <- data.frame("BuildingType" = tableAC.os.cast$BuildingType
,"Type" = tableAC.os.cast$Type
,"Percent_SCL.GenPop" = tableAC.os.cast$`Percent_SCL GenPop`
,"SE_SCL.GenPop" = tableAC.os.cast$`SE_SCL GenPop`
,"n_SCL.GenPop" = tableAC.os.cast$`n_SCL GenPop`
,"Percent_SCL.LI" = tableAC.os.cast$`Percent_SCL LI`
,"SE_SCL.LI" = tableAC.os.cast$`SE_SCL LI`
,"n_SCL.LI" = tableAC.os.cast$`n_SCL LI`
,"Percent_SCL.EH" = tableAC.os.cast$`Percent_SCL EH`
,"SE_SCL.EH" = tableAC.os.cast$`SE_SCL EH`
,"n_SCL.EH" = tableAC.os.cast$`n_SCL EH`
,"Percent_2017.RBSA.PS" = tableAC.os.cast$`Percent_2017 RBSA PS`
,"SE_2017.RBSA.PS" = tableAC.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = tableAC.os.cast$`n_2017 RBSA PS`)
}else if(os.ind == "snopud"){
tableAC.os.final <- data.frame("BuildingType" = tableAC.os.cast$BuildingType
,"Type" = tableAC.os.cast$Type
,"Percent_SnoPUD" = tableAC.os.cast$`Percent_SnoPUD`
,"SE_SnoPUD" = tableAC.os.cast$`SE_SnoPUD`
,"n_SnoPUD" = tableAC.os.cast$`n_SnoPUD`
,"Percent_2017.RBSA.PS" = tableAC.os.cast$`Percent_2017 RBSA PS`
,"SE_2017.RBSA.PS" = tableAC.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = tableAC.os.cast$`n_2017 RBSA PS`
,"Percent_RBSA.NW" = tableAC.os.cast$`Percent_2017 RBSA NW`
,"SE_RBSA.NW" = tableAC.os.cast$`SE_2017 RBSA NW`
,"n_RBSA.NW" = tableAC.os.cast$`n_2017 RBSA NW`)
}
tableAC.os.final.SF <- tableAC.os.final[which(tableAC.os.final$BuildingType == "Single Family")
,-which(colnames(tableAC.os.final) %in% c("BuildingType"))]
exportTable(tableAC.os.final.SF, "SF", "Table AC", weighted = FALSE, osIndicator = export.ind, OS = T)
#############################################################################################
#Item 81: DISTRIBUTION OF REFRIGERATOR/FREEZERS BY VINTAGE (SF table 88, MH table 69)
#############################################################################################
#subset to columns needed for analysis
item81.os.dat <- appliances.dat[which(colnames(appliances.dat) %in% c("CK_Cadmus_ID"
,"Type"
,"Age"
,""))]
item81.os.dat$count <- 1
item81.os.dat0 <- item81.os.dat[which(item81.os.dat$CK_Cadmus_ID != "CK_CADMUS_ID"),]
item81.os.dat1 <- left_join(item81.os.dat0, os.dat, by = "CK_Cadmus_ID")
item81.os.dat2 <- item81.os.dat1[which(item81.os.dat1$Type %in% c("Refrigerator", "Freezer")),]
# Bin equipment vintages for items 50 and 52 (4 categories)
item81.os.dat2$EquipVintage_bins <- as.numeric(as.character(item81.os.dat2$Age))
item81.os.dat3 <- item81.os.dat2[which(!(is.na(item81.os.dat2$EquipVintage_bins))),]
item81.os.dat3$EquipVintage_bins[which(item81.os.dat3$Age < 1980)] <- "Pre 1980"
item81.os.dat3$EquipVintage_bins[which(item81.os.dat3$Age >= 1980 & item81.os.dat3$Age < 1990)] <- "1980-1989"
item81.os.dat3$EquipVintage_bins[which(item81.os.dat3$Age >= 1990 & item81.os.dat3$Age < 1995)] <- "1990-1994"
item81.os.dat3$EquipVintage_bins[which(item81.os.dat3$Age >= 1995 & item81.os.dat3$Age < 2000)] <- "1995-1999"
item81.os.dat3$EquipVintage_bins[which(item81.os.dat3$Age >= 2000 & item81.os.dat3$Age < 2005)] <- "2000-2004"
item81.os.dat3$EquipVintage_bins[which(item81.os.dat3$Age >= 2005 & item81.os.dat3$Age < 2010)] <- "2005-2009"
item81.os.dat3$EquipVintage_bins[which(item81.os.dat3$Age >= 2010 & item81.os.dat3$Age < 2015)] <- "2010-2014"
item81.os.dat3$EquipVintage_bins[which(item81.os.dat3$Age >= 2015)] <- "Post 2014"
#check uniques
unique(item81.os.dat3$EquipVintage_bins)
item81.os.merge <- left_join(os.dat, item81.os.dat3)
item81.os.merge <- item81.os.merge[which(!is.na(item81.os.merge$EquipVintage_bins)),]
################################################
# Adding pop and sample sizes for weights
################################################
item81.os.data <- weightedData(item81.os.merge[-which(colnames(item81.os.merge) %in% c("count"
,"Type"
,"Age"
,"EquipVintage_bins"))])
item81.os.data <- left_join(item81.os.data, unique(item81.os.merge[which(colnames(item81.os.merge) %in% c("CK_Cadmus_ID"
,"count"
,"Type"
,"Age"
,"EquipVintage_bins"))]))
item81.os.data$count <- 1
#######################
# Weighted Analysis
#######################
item81.os.final <- proportionRowsAndColumns1(CustomerLevelData = item81.os.data
,valueVariable = "count"
,columnVariable = "CK_Building_ID"
,rowVariable = "EquipVintage_bins"
,aggregateColumnName = "Remove")
item81.os.final <- item81.os.final[which(item81.os.final$CK_Building_ID != "Remove"),]
item81.os.cast <- dcast(setDT(item81.os.final)
,formula = BuildingType + EquipVintage_bins ~ CK_Building_ID
,value.var = c("w.percent", "w.SE","n", "EB"))
names(item81.os.cast)
if(os.ind == "scl"){
item81.os.final <- data.frame("BuildingType" = item81.os.cast$BuildingType
,"Equipment.Vintage" = item81.os.cast$EquipVintage_bins
,"Percent_SCL.GenPop" = item81.os.cast$`w.percent_SCL GenPop`
,"SE_SCL.GenPop" = item81.os.cast$`w.SE_SCL GenPop`
,"n_SCL.GenPop" = item81.os.cast$`n_SCL GenPop`
,"Percent_SCL.LI" = item81.os.cast$`w.percent_SCL LI`
,"SE_SCL.LI" = item81.os.cast$`w.SE_SCL LI`
,"n_SCL.LI" = item81.os.cast$`n_SCL LI`
,"Percent_SCL.EH" = item81.os.cast$`w.percent_SCL EH`
,"SE_SCL.EH" = item81.os.cast$`w.SE_SCL EH`
,"n_SCL.EH" = item81.os.cast$`n_SCL EH`
,"Percent_2017.RBSA.PS" = item81.os.cast$`w.percent_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item81.os.cast$`w.SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item81.os.cast$`n_2017 RBSA PS`
,"EB_SCL.GenPop" = item81.os.cast$`EB_SCL GenPop`
,"EB_SCL.LI" = item81.os.cast$`EB_SCL LI`
,"EB_SCL.EH" = item81.os.cast$`EB_SCL EH`
,"EB_2017.RBSA.PS" = item81.os.cast$`EB_2017 RBSA PS`)
}else if(os.ind == "snopud"){
item81.os.final <- data.frame("BuildingType" = item81.os.cast$BuildingType
,"Equipment.Vintage" = item81.os.cast$EquipVintage_bins
,"Percent_SnoPUD" = item81.os.cast$`w.percent_SnoPUD`
,"SE_SnoPUD" = item81.os.cast$`w.SE_SnoPUD`
,"n_SnoPUD" = item81.os.cast$`n_SnoPUD`
,"Percent_2017.RBSA.PS" = item81.os.cast$`w.percent_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item81.os.cast$`w.SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item81.os.cast$`n_2017 RBSA PS`
,"Percent_RBSA.NW" = item81.os.cast$`w.percent_2017 RBSA NW`
,"SE_RBSA.NW" = item81.os.cast$`w.SE_2017 RBSA NW`
,"n_RBSA.NW" = item81.os.cast$`n_2017 RBSA NW`
,"EB_SnoPUD" = item81.os.cast$`EB_SnoPUD`
,"EB_2017.RBSA.PS" = item81.os.cast$`EB_2017 RBSA PS`
,"EB_RBSA.NW" = item81.os.cast$`EB_2017 RBSA NW`)
}
unique(item81.os.final$Equipment.Vintage)
rowOrder <- c("Pre 1980"
,"1980-1989"
,"1990-1994"
,"1995-1999"
,"2000-2004"
,"2005-2009"
,"2010-2014"
,"Post 2014"
,"Total")
item81.os.final <- item81.os.final %>% mutate(Equipment.Vintage = factor(Equipment.Vintage, levels = rowOrder)) %>% arrange(Equipment.Vintage)
item81.os.final <- data.frame(item81.os.final)
item81.os.final.SF <- item81.os.final[which(item81.os.final$BuildingType == "Single Family")
,-which(colnames(item81.os.final) %in% c("BuildingType"))]
exportTable(item81.os.final.SF, "SF", "Table 88", weighted = TRUE, osIndicator = export.ind, OS = T)
#######################
# Unweighted Analysis
#######################
item81.os.final <- proportions_two_groups_unweighted(CustomerLevelData = item81.os.data
,valueVariable = "count"
,columnVariable = "CK_Building_ID"
,rowVariable = "EquipVintage_bins"
,aggregateColumnName = "Remove")
item81.os.final <- item81.os.final[which(item81.os.final$CK_Building_ID != "Remove"),]
item81.os.cast <- dcast(setDT(item81.os.final)
,formula = BuildingType + EquipVintage_bins ~ CK_Building_ID
,value.var = c("Percent", "SE","n"))
names(item81.os.cast)
if(os.ind == "scl"){
item81.os.final <- data.frame("BuildingType" = item81.os.cast$BuildingType
,"Equipment.Vintage" = item81.os.cast$EquipVintage_bins
,"Percent_SCL.GenPop" = item81.os.cast$`Percent_SCL GenPop`
,"SE_SCL.GenPop" = item81.os.cast$`SE_SCL GenPop`
,"n_SCL.GenPop" = item81.os.cast$`n_SCL GenPop`
,"Percent_SCL.LI" = item81.os.cast$`Percent_SCL LI`
,"SE_SCL.LI" = item81.os.cast$`SE_SCL LI`
,"n_SCL.LI" = item81.os.cast$`n_SCL LI`
,"Percent_SCL.EH" = item81.os.cast$`Percent_SCL EH`
,"SE_SCL.EH" = item81.os.cast$`SE_SCL EH`
,"n_SCL.EH" = item81.os.cast$`n_SCL EH`
,"Percent_2017.RBSA.PS" = item81.os.cast$`Percent_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item81.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item81.os.cast$`n_2017 RBSA PS`)
}else if(os.ind == "snopud"){
item81.os.final <- data.frame("BuildingType" = item81.os.cast$BuildingType
,"Equipment.Vintage" = item81.os.cast$EquipVintage_bins
,"Percent_SnoPUD" = item81.os.cast$`Percent_SnoPUD`
,"SE_SnoPUD" = item81.os.cast$`SE_SnoPUD`
,"n_SnoPUD" = item81.os.cast$`n_SnoPUD`
,"Percent_2017.RBSA.PS" = item81.os.cast$`Percent_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item81.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item81.os.cast$`n_2017 RBSA PS`
,"Percent_RBSA.NW" = item81.os.cast$`Percent_2017 RBSA NW`
,"SE_RBSA.NW" = item81.os.cast$`SE_2017 RBSA NW`
,"n_RBSA.NW" = item81.os.cast$`n_2017 RBSA NW`)
}
unique(item81.os.final$Equipment.Vintage)
rowOrder <- c("Pre 1980"
,"1980-1989"
,"1990-1994"
,"1995-1999"
,"2000-2004"
,"2005-2009"
,"2010-2014"
,"Post 2014"
,"Total")
item81.os.final <- item81.os.final %>% mutate(Equipment.Vintage = factor(Equipment.Vintage, levels = rowOrder)) %>% arrange(Equipment.Vintage)
item81.os.final <- data.frame(item81.os.final)
item81.os.final.SF <- item81.os.final[which(item81.os.final$BuildingType == "Single Family")
,-which(colnames(item81.os.final) %in% c("BuildingType"))]
exportTable(item81.os.final.SF, "SF", "Table 88", weighted = FALSE, osIndicator = export.ind, OS = T)
|
f2fb9664b82ce454102d07b4753a402eaec23984
|
4b7f18afdadfa37c379370180ae0b0fb69d9a823
|
/07_R_Graphs/PAR1_diversity.R
|
36fce86c348e7446917bee36edb82cc64515f76c
|
[] |
no_license
|
WilsonSayresLab/PARdiversity
|
98b6d3c051b645fcbaa0b58db4cebad0626aab96
|
bedfb23d74aaa046f5ffcac443a4ddb249d58354
|
refs/heads/master
| 2021-01-17T11:01:14.362053
| 2016-03-23T17:17:23
| 2016-03-23T17:17:23
| 54,513,259
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,098
|
r
|
PAR1_diversity.R
|
par(mfrow = c(4, 1))
par(cex = 0.6)
par(mar = c(2, 2, 1.5, 1.5), oma = c(4, 4, 0.5, 0.5))
par(tcl = -0.25)
par(mgp = c(2, 0.6, 0))
for (i in 1:4) {
plot(1, axes = FALSE, type = "n")
filtered_pi.All26.100kb_filtered.o.PAR1 <- read.delim("~/Projects/PAR/BrotmanCotter/PAR_Project/Codes_02/08_galaxy_data_for_graphs/filtered_pi.All26.100kb_filtered.o.PAR1.txt", header=FALSE)
data1 <- (filtered_pi.All26.100kb_filtered.o.PAR1)
Position = ((data1$V1 + data1$V2)/2)*0.000001
Diversity = (data1$V3)
plot (Position, Diversity, pch = 20,
col=sapply(Position, function(x){
if(x<=2.699){"red"}else if(x >= 88.193855 & x <= 93.193855){"blue"}else if(x>=154.940559){"red"}else{"black"}}))
mtext("A", side = 3, line = 0.1, adj = -0.07, cex = 0.9,
col = "black")
filtered_100kb_All26.o.panTro4_PAR1 <- read.delim("~/Projects/PAR/BrotmanCotter/PAR_Project/Codes_02/08_galaxy_data_for_graphs/All26.100kb_filtered.o.panTro4_PAR1.txt", header=FALSE)
data2 <- (filtered_100kb_All26.o.panTro4_PAR1)
Position = ((data2$V1 + data2$V2)/2)*0.000001
Diversity = (data2$V3)
plot (Position, Diversity, pch = 20,
col=sapply(Position, function(x){
if(x<=2.699){"red"}else if(x >= 88.193855 & x <= 93.193855){"blue"}else if(x>=154.940559){"red"}else{"black"}}))
mtext("B", side = 3, line = 0.1, adj = -0.07, cex = 0.9,
col = "black")
filtered_100kb_All26.o.RheMac3_PAR1 <- read.delim("~/Projects/PAR/BrotmanCotter/PAR_Project/Codes_02/08_galaxy_data_for_graphs/All26.100kb_filtered.o.RheMac3_PAR1.txt", header=FALSE)
data3 <- (filtered_100kb_All26.o.RheMac3_PAR1)
Position = ((data3$V1 + data3$V2)/2)*0.000001
Diversity = (data3$V3)
plot (Position, Diversity, xlab = "Chromosome X Position (Mb)", ylab = "Diversity (pi)", pch = 20,
col=sapply(Position, function(x){
if(x<=2.699){"red"}else if(x >= 88.193855 & x <= 93.193855){"blue"}else if(x>=154.940559){"red"}else{"black"}}))
mtext("C", side = 3, line = 0.1, adj = -0.07, cex = 0.9,
col = "black")
filtered_100kb_All26.o.canFam3_PAR1 <- read.delim("~/Projects/PAR/BrotmanCotter/PAR_Project/Codes_02/08_galaxy_data_for_graphs/All26.100kb_filtered.o.canFam3_PAR1.txt", header=FALSE)
data3 <- (filtered_100kb_All26.o.canFam3_PAR1)
Position = ((data3$V1 + data3$V2)/2)*0.000001
Diversity = (data3$V3)
plot (Position, Diversity, xlab = "Chromosome X Position (Mb)", ylab = "Diversity (pi)", pch = 20,
col=sapply(Position, function(x){
if(x<=2.699){"red"}else if(x >= 88.193855 & x <= 93.193855){"blue"}else if(x>=154.940559){"red"}else{"black"}}))
mtext("D", side = 3, line = 0.1, adj = -0.07, cex = 0.9,
col = "black")
box(col = "black")
}
mtext("Chromosome X Position (Mb)", side = 1, outer = TRUE, cex = 0.9, line = 2.2,
col = "black")
mtext("Diversity (pi)", side = 2, outer = TRUE, cex = 0.9, line = 2.2,
col = "black")
|
2d6578d64d5ef5b7ab4f72b8330666bb48b12194
|
71f44b2dfd4b8d8c33f6b0936ff34a3cd361c484
|
/chapter01.R
|
dc97e64c0d66a4b2eb634da0e84ce03e8ee741ac
|
[] |
no_license
|
Libardo1/data-manipulation-with-R
|
6dfb1e3464b95bd741e7f6ecdab521cc85b09936
|
c5a473d3ee20007e251183459b5e3a033148b086
|
refs/heads/master
| 2021-01-15T09:28:55.792793
| 2013-12-29T11:59:25
| 2013-12-29T11:59:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,561
|
r
|
chapter01.R
|
rm(list=ls())
# 1.1 Modes and Classes
mylist=list(a=c(1,2,3),b=c("cat","dog","duck"),d=factor("a","b","a"))
sapply(mylist,mode)
sapply(mylist,class)
# 1.2 Data Storage in R
x=c(1,2,5,10)
x
mode(x)
y=c(1,2,"cat",3)
mode(y)
z=c(5,TRUE,3,7)
mode(z)
all=c(x,y,z)
all
x=c(one=1,two=2,three=3)
x
x=c(1,2,3)
x
names(x)=c('one','two','three')
x
str(x)
mode(x)
class(x)
nums=1:10
nums+1
nums+c(1,2)
nums+1:2
nums+c(1,2,3)
rmat=matrix(rnorm(15),5,3,
dimnames=list(NULL,c('A','B','C')))
rmat
rmat[,'A']
as.matrix(rmat[,'A'])
mylist=list(c(1,4,6),"dog",3,"cat",TRUE,c(9,10,11))
mylist
sapply(mylist,mode)
sapply(mylist,class)
mylist=list(first=c(1,3,5),second=c('one','three','five'),third='end')
mylist
mylist['third']
mylist=list(c(1,3,5),c('one','three','five'),'end')
names(mylist)=c('first','second','third')
mylist
# 1.3 Testing for Modes and Classes
# no code
# 1.4 Structure of R Objects
mylist=list(a=c(1,2,3),b=c('cat','dog','duck'),d=factor('a','b','a'))
summary(mylist)
nestlist=list(a=list(matrix(rnorm(10),5,2),val=3),
b=list(sample(letters,10),values=runif(5)),
c=list(list(1:10,1:20),list(1:5,1:10)))
summary(nestlist)
str(nestlist)
list(1:4,1:5)
# 1.5 Conversion of Lists
nums=c(12,10,8,12,10,12,8,10,12,8)
tt=table(nums)
tt
names(tt)
sum(names(tt)*tt)
sum(as.numeric(names(tt))*tt)
as.numeric("123")
x=c(1,2,3,4,5)
list(x)
as.list(x)
# 1.6 Missing Values
# no code
# 1.7 Working with Missing Values
# no code
|
993c9dbabe3319104fd004a707afde3ad30d6671
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/spdep/examples/LOSH.mc.Rd.R
|
06fdbdb0834b7c3d1ff2626c5b45fe7c4071ad4f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 396
|
r
|
LOSH.mc.Rd.R
|
library(spdep)
### Name: LOSH.mc
### Title: Bootstrapping-based test for local spatial heteroscedasticity
### Aliases: LOSH.mc
### Keywords: spatial
### ** Examples
data(columbus, package="spData")
resLOSH_mc <- LOSH.mc(columbus$CRIME, nb2listw(col.gal.nb), 2, 100)
resLOSH_cs <- LOSH.cs(columbus$CRIME, nb2listw(col.gal.nb))
plot(resLOSH_mc[,"Pr()"], resLOSH_cs[,"Pr()"])
|
3300e5d27d61d9551e5ae9561fdc8b544945637a
|
74e560e97d1e07a7fbd325165ffb61e7dbbe01c6
|
/Plot2.R
|
5fc155983e79f41c271b038a444d80638467f97e
|
[] |
no_license
|
RogerioDestro/JHExploratoryData
|
3084044f087bef9c592bf0881068111b82969aec
|
f76c1b2f15e8e08aa1d217ac24cf167c3bbdba31
|
refs/heads/master
| 2021-01-10T15:31:55.633175
| 2016-02-13T13:44:25
| 2016-02-13T13:44:25
| 51,468,611
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 542
|
r
|
Plot2.R
|
plot2 <- function(dados){
#Setting the system to USA (Running on Windows 7)
Sys.setlocale("LC_TIME","English")
#Ploting the data
plot(dados$jtime,dados$Global_active_power,type = "l",xaxt = "n", xlab = "",ylab = "Global Active Power (kilowatts)")
#Setting the x axis to the days of the week
axis(1,at = c(1,length(dados$Date)/2,length(dados$Date)),c(weekdays(dados$Date[1],abbreviate = T),weekdays(dados$Date[length(dados$Date)],abbreviate = T),weekdays(dados$Date[length(dados$Date)]+days(1),abbreviate = T)))
}
|
8546743e0c5a879c22cc9f8d5b6f96628b58b6a1
|
627119064049fb9cf070d73e3766cceac02ab514
|
/man/mlear1.Rd
|
ed842c241fa28e639bc18af5c3621d94b6268ba8
|
[] |
no_license
|
cran/HKprocess
|
7061c227c0593010f2abde4db3fea1fd008aed31
|
b7aea6c4c497d4b822c11a460390029ab09537e7
|
refs/heads/master
| 2022-11-14T08:24:10.955085
| 2022-10-26T21:17:59
| 2022-10-26T21:17:59
| 49,682,780
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,928
|
rd
|
mlear1.Rd
|
\name{mlear1}
\alias{mlear1}
\title{
Maximum likelihood estimation for the AR(1) parameters.
}
\description{
The function mlear1 is used to estimate the
\ifelse{html}{\out{μ}}{\eqn{\mu}{mu}},
\ifelse{html}{\out{σ}}{\eqn{\sigma}{sigma}} and
\ifelse{html}{\out{φ}}{\eqn{\phi}{phi}} parameters of the AR(1) process as
defined in Tyralis and Koutsoyiannis (2014). The method for their estimation is
described in eqs.8-9 (Tyralis and Koutsoyiannis 2011).
}
\usage{
mlear1(data, interval = c(-0.9999, 0.9999), tol = .Machine$double.eps^0.25)
}
\arguments{
\item{data}{time series data}
\item{interval}{\ifelse{html}{\out{φ}}{\eqn{\phi}{phi}} interval
estimation}
\item{tol}{estimation error tolerance}
}
\value{
A vector whose values are the maximum likelihood estimates of
\ifelse{html}{\out{μ}}{\eqn{\mu}{mu}},
\ifelse{html}{\out{σ}}{\eqn{\sigma}{sigma}} and
\ifelse{html}{\out{φ}}{\eqn{\phi}{phi}}.
}
\note{The function likelihoodfunction.c is called from the C library of the
package. Ideas for creating this function came from McLeod et al. (2007).}
\author{Hristos Tyralis}
\references{
McLeod AI, Yu H, Krougly ZL (2007) Algorithms for linear time series
analysis: With R package. \emph{Journal of Statistical Software}
\bold{23(5)}:1--26.
\doi{10.18637/jss.v023.i05}.
Tyralis H, Koutsoyiannis D (2011) Simultaneous estimation of the parameters
of the Hurst-Kolmogorov stochastic process.
\emph{Stochastic Environmental Research & Risk Assessment} \bold{25(1)}:21--33.
\doi{10.1007/s00477-010-0408-x}.
Tyralis H, Koutsoyiannis D (2014) A Bayesian statistical model for deriving
the predictive distribution of hydroclimatic variables. \emph{Climate Dynamics}
\bold{42(11-12)}:2867--2883.
\doi{10.1007/s00382-013-1804-y}.
}
\examples{
# Estimate the parameters for the Nile time series.
mlear1(Nile)
}
\keyword{ts}
|
e1a54b1f1b9b12acb360f2a9a260f46bd94c2d57
|
659aefca294d97f0ed3df28f11448100ae9dcae8
|
/cachematrix.R
|
2abf011ef1960e61bf14451ecd22de6e5f61069e
|
[] |
no_license
|
dmanicka/ProgrammingAssignment2
|
cb25787855f6cd4af140dd8cf0cee59096fdb073
|
5b4a308cf2772904bd95cd654f7ea00620e6bfe5
|
refs/heads/master
| 2021-01-13T16:54:29.897479
| 2017-01-21T14:15:36
| 2017-01-21T14:15:36
| 79,576,390
| 0
| 0
| null | 2017-01-20T16:35:53
| 2017-01-20T16:35:53
| null |
UTF-8
|
R
| false
| false
| 1,363
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Function creates a matrix object that can cache its inverse
makeCacheMatrix <- function(m = matrix()) {
# inverse property initialization
inv <- NULL
## function to set the matrix
set <- function(matrix) {
m <<- matrix
inv <<- NULL
}
get <- function() {
## return the matrix
m
}
## method to set inverse of the matrix
setinverse <- function(inverse){
inv <<- inverse
}
## method to get the inverse of the matrix
getinverse <- function() {
## return the inverse
inv
}
## returns the list of all methods
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve calculates the inverse of special matrix created by makecachematrix function
## If the inverse has been already computed the function retrieves the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
## function returns inverse if already computed
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## get the matrix
data <- x$get()
## calculates the inverse
m <- solve(data) %*% data
## set inverse to object
x$setinverse(m)
## return matrix
m
}
|
8fe0056e8913a523ec315dc3164e7d7a22d8e863
|
6ec650d8565f4b68a3a2c23bd9c39bbad4e1006a
|
/man/sodomeetgomorrhe.Rd
|
1fd2abaf16c9de75d88664f7df71ab426e35318d
|
[] |
no_license
|
ColinFay/proustr
|
9de427aa8f69eb527b6e8d2f12b046473bc9308b
|
ff1cd4bad45701e97ae85f894dbcab1bff9f15de
|
refs/heads/master
| 2021-01-24T08:32:11.157401
| 2019-02-05T13:17:02
| 2019-02-05T13:17:02
| 93,385,515
| 26
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 492
|
rd
|
sodomeetgomorrhe.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/proust_novels.R
\docType{data}
\name{sodomeetgomorrhe}
\alias{sodomeetgomorrhe}
\title{Marcel Proust's novel "Sodome et Gomorrhe"}
\format{A tibble with text, book, volume, and year}
\source{
<https://fr.wikisource.org/wiki/Sodome_et_Gomorrhe>
}
\usage{
sodomeetgomorrhe
}
\description{
A dataset containing Marcel Proust's "Sodom et Gomorrhe".
This text has been downloaded from WikiSource.
}
\keyword{datasets}
|
cf6c251d12363988ff03350ef4e634a1d1962339
|
ce2496ff30f10e4c35e82d25a0db9765703713b5
|
/man/Hills.Rd
|
63681a067061e5b5ef5222bb3d5e4a81d1c66d97
|
[
"MIT"
] |
permissive
|
klauswiese/pnlt
|
1dd90476a161951d1a9169ce0542b7760dd80ba8
|
cf6c8fcaa297b6551c880d09173457d71b4019a7
|
refs/heads/main
| 2023-08-16T16:51:01.527324
| 2021-10-02T17:12:15
| 2021-10-02T17:12:15
| 383,334,757
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 721
|
rd
|
Hills.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Hills.R
\docType{data}
\name{Hills}
\alias{Hills}
\title{Data names of Hills in La Tigra National Park, Honduras}
\format{
Simple feature collection with 47 features and 3 fields:
\itemize{
\item{id} {}
\item{name} {}
\item{elevation} {}
}
}
\source{
Instituto Geogrรกfico Nacional, escala 1:50,000.
}
\usage{
Hills
}
\description{
Data names of Hills in La Tigra National Park, Honduras
}
\examples{
if (requireNamespace("sf", quietly = TRUE)) {
library(sf)
data(Hills)
plot(Hills[2], axes=TRUE, pch=16)
}
}
\references{
\url{https://www.ign.hn/index.php}
}
\keyword{datasets}
\keyword{sf}
|
2df4034e18cf37a24d699807fb65cca5f230d0d6
|
d1d19805e21ac6305341d9815f80551f03bfb514
|
/best.r
|
fc0634e18d3806daa701960c0ef6f310e4c4615f
|
[] |
no_license
|
etaney/ProgrammingAssignment3
|
8aff7e56fdf7183d6dfb2e8fc5a4c380c2b8c691
|
51dedeb375311e03858e909e9a6cd8a61d69d572
|
refs/heads/master
| 2020-12-25T14:49:01.054996
| 2016-07-24T05:05:06
| 2016-07-24T05:05:06
| 64,049,702
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,298
|
r
|
best.r
|
best <- function(state, outcome){
options(warn=-1)
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
usedata <- data.frame(hospital=data[,2], statename=data[,7], HeartAttack=as.numeric(data[,11]), HeartFailure=as.numeric(data[,17]), Pneumonia=as.numeric(data[,23]))
usedata[outcome] <- as.numeric(usedata[outcome])
##print("I read the data")
## Check that state and outcome are valid
if(sum(usedata$statename==state)==0){
return("invalid state")
}
if(outcome != "heart failure" && outcome != "heart attack" && outcome != "pneumonia"){
return("invalid outcome")
}
else{
## print("Valid Data")
}
statedata <- usedata[usedata$statename==state,]
## Return hospital name in that state with lowest 30-day death rate
if(outcome == "heart failure"){
statedata <- statedata[!is.na(statedata[,4]),]
return(statedata$hospital[statedata[,4] == min(statedata[,4])])
}
if(outcome == "heart attack"){
statedata <- statedata[!is.na(statedata[,3]),]
return(statedata$hospital[statedata[,3] == min(statedata[,3])])
}
if(outcome == "pneumonia"){
statedata <- statedata[!is.na(statedata[,5]),]
return(statedata$hospital[statedata[,5] == min(statedata[,5])])
}
options(warn=0)
}
|
dd5bb21db316cd147d754499d2e34f9c28ea7790
|
0bf451654ba419e58b139e3e7786a234484608ca
|
/tests/testthat/test-quantile.R
|
e7486666a14c52c7b6d087989463046df2817646
|
[] |
no_license
|
cran/extremeStat
|
0d57c0bba0518fd8fea07eb12b00abf2c79aa5f4
|
8a24429cbd4067c35805eadeeda5cb1124c3cf7e
|
refs/heads/master
| 2023-04-13T19:51:57.912209
| 2023-04-08T05:30:02
| 2023-04-08T05:30:02
| 58,643,208
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,628
|
r
|
test-quantile.R
|
context("distLquantile")
data(annMax, package="extremeStat") # Annual Discharge Maxima (streamflow)
set.seed(007) # with other random samples, there can be warnings in q_gpd -> Renext::fGPD -> fmaxlo
ndist <- length(lmomco::dist.list()) - 13 + 22
# 13: excluded in distLfit.R Line 149
# 22: empirical, weighted, GPD_, n, threshold, etc
test_that("distLquantile generally runs fine",{
distLquantile(annMax)
expect_equal(nrow(distLquantile(annMax[annMax<30])), ndist)
expect_equal(nrow(distLquantile(annMax)), ndist)
expect_silent(distLquantile(annMax, truncate=0.6, gpd=FALSE, time=FALSE))
expect_message(distLquantile(annMax, selection="wak", empirical=FALSE, quiet=FALSE),
"distLfit execution took")
expect_message(distLquantile(rexp(199), truncate=0.8, probs=0.7, time=FALSE, emp=FALSE, quiet=FALSE),
"must contain values that are larger than")
expect_message(distLquantile(rexp(4), selection="gpa"),
"Note in distLquantile: sample size is too small to fit parameters (4). Returning NAs", fixed=TRUE)
d <- distLquantile(annMax, probs=0:4/4)
})
test_that("infinite values are removed",{
expect_message(distLextreme(c(-Inf,annMax)),
"1 Inf/NA was omitted from 36 data points (2.8%)", fixed=TRUE)
})
test_that("distLquantile can handle selection input",{
dlf <- distLquantile(annMax, selection="wak", empirical=FALSE, list=TRUE)
plotLquantile(dlf, breaks=10)
expect_message(distLquantile(rexp(199), sel=c("wak", "gpa"), truncate=0.8, probs=c(0.7, 0.8, 0.9)),
"Note in q_gpd: quantiles for probs (0.7) below truncate (0.8) replaced with NAs.", fixed=TRUE)
distLquantile(rexp(199), selection=c("wak", "gpa"))
distLquantile(rexp(199), selection="gpa")
expect_error(distLquantile(rexp(199), selection=1:5, emp=FALSE), # index is a bad idea anyways
"Since Version 0.4.36 (2015-08-31), 'selection' _must_ be a character string vector", fixed=TRUE)
expect_error(distLquantile(rexp(199), selection=-3),
"Since Version 0.4.36 (2015-08-31), 'selection' _must_ be a character string vector", fixed=TRUE)
set.seed(42)
expect_warning(dlf <- distLfit(rnorm(100))) # gam + ln3 excluded
expect_equal(dlf$distfailed, c(gam="gam", ln3="ln3"))
dlf <- distLfit(annMax)
shouldbe <- c("80%"=82.002, "90%"=93.374, "99%"=122.505, "RMSE"=0.022)
d1 <- distLquantile(annMax, selection="dummy", onlydn=FALSE)
d2 <- distLquantile(dlf=dlf, selection="dummy", onlydn=FALSE)
expect_equal(d1,d2)
d1 <- distLquantile(annMax, selection = c("dummy","revgum","wak"))
d2 <- distLquantile(dlf=dlf, selection = c("dummy","revgum","wak"))
expect_equal(d1,d2)
expect_equal(round(d1[1,], 3), shouldbe)
expect_equal(round(d2[1,], 3), shouldbe)
dlf <- distLfit(annMax, selection=c("ln3","wak","gam", "gum"))
expect_equal(rownames(dlf$gof), c("wak", "ln3", "gum", "gam") )
sel <- c("dummy","gam","zzz","revgum","wak")
d3 <- distLquantile(annMax, selection=sel, emp=FALSE )
d4 <- distLquantile(dlf=dlf, selection=sel, emp=FALSE )
o3 <- distLquantile(annMax, selection=sel, emp=FALSE, order=FALSE)
o4 <- distLquantile(dlf=dlf, selection=sel, emp=FALSE, order=FALSE)
expect_equal(rownames(d3)[1:5], c("wak","gam","revgum","dummy","zzz"))
expect_equal(rownames(d4)[1:5], c("wak","gam","dummy","zzz","revgum")) # dlf does not have revgum
expect_equal(rownames(o3)[1:5], sel)
expect_equal(rownames(o4)[1:5], sel)
})
test_that("distLfit can handle truncate and threshold",{
expect_message(dlf <- distLfit(annMax), "distLfit execution", all=TRUE)
expect_message(dlf <- distLfit(annMax, truncate=0.7), "distLfit execution", all=TRUE)
expect_message(dlf <- distLfit(annMax, threshold=50), "distLfit execution", all=TRUE)
expect_message(dlf <- distLfit(annMax), "distLfit execution", all=TRUE)
})
test_that("distLquantile can deal with a given dlf",{
dlf <- distLfit(annMax)
expect_error(distLquantile(dlf, truncate=0.7), "x must be a vector")
distLquantile(dlf=dlf, truncate=0.7)
expect_message(dlf <- distLfit(annMax, threshold=50), "distLfit execution")
expect_message(dlf <- distLfit(annMax), "distLfit execution")
})
test_that("distLquantile can handle emp, truncate",{
expect_equal(nrow(distLquantile(annMax, emp=FALSE)), ndist-19) # only distributions in lmomco
aq <- distLquantile(annMax, truncate=0.8, probs=0.95) # POT
#round(aq,4)
# expected output (depending on lmomco version)
ex <- read.table(header=TRUE, text="
95% RMSE
exp 101.1631 0.0703
lap 100.5542 0.0774
gpa 103.4762 0.0778
wak 103.4762 0.0778
wei 102.7534 0.0796
pe3 102.4791 0.0806
kap 106.0260 0.0816
gno 102.1442 0.0822
ln3 102.1442 0.0822
gev 101.9731 0.0831
glo 101.4164 0.0870
pdq3 101.2073 0.0875 # added Aug 2022
gum 102.5499 0.0893
ray 103.6840 0.0971
pdq4 107.0252 0.1023 # added Aug 2022
gam 103.8951 0.1128
rice 104.2135 0.1217
nor 104.2161 0.1218
revgum 104.9992 0.1595
empirical 109.2000 NA
quantileMean 105.7259 NA
weighted1 102.9910 NA # |
weighted2 102.8478 NA # | > changed Aug 2022, ignored in test
weighted3 102.5979 NA # |
weightedc NaN NA
GPD_LMO_lmomco 103.4762 0.0156
GPD_LMO_extRemes 99.8417 0.0163
GPD_PWM_evir 100.9874 0.0169
GPD_PWM_fExtremes 100.7009 0.0176
GPD_MLE_extRemes 99.0965 0.0161
GPD_MLE_ismev 108.8776 0.0467
GPD_MLE_evd 108.4444 0.0454
GPD_MLE_Renext_Renouv 108.4226 0.0453
GPD_MLE_evir NA NA
GPD_MLE_fExtremes NA NA
GPD_GML_extRemes 100.9103 0.0161 # changed from 99.0965 (2022-11-16) after bug fix by Eric G.
GPD_MLE_Renext_2par 166.9137 0.0958
GPD_BAY_extRemes NA NA
n_full 35.0000 NA
n 7.0000 NA
threshold 82.1469 NA")
colnames(ex) <- colnames(aq)
ex <- as.matrix(ex)
tsta <- rownames(aq) %in% lmomco::dist.list() | substr(rownames(aq),1,3) %in% c("GPD","n_f","n","thr")
tste <- rownames(ex) %in% lmomco::dist.list() | substr(rownames(ex),1,3) %in% c("GPD","n_f","n","thr")
tsta[rownames(aq)=="GPD_GML_extRemes"] <- FALSE # excluded while extRemes is being updated
tste[rownames(ex)=="GPD_GML_extRemes"] <- FALSE
if(is.na(aq["GPD_MLE_Renext_Renouv",1]))
{
tsta[rownames(aq)=="GPD_MLE_Renext_Renouv"] <- FALSE # excluded on weird Mac CRAN check
tste[rownames(ex)=="GPD_MLE_Renext_Renouv"] <- FALSE
}
expect_equal(round(aq[tsta,],1), round(ex[tste,],1))
dd <- distLquantile(annMax, selection="gpa", weighted=FALSE, truncate=0.001)
expect_equal(sum(is.na(dd[1:15,1:3])), 0)
expect_equal(dd["gpa",1:3], dd["GPD_LMO_lmomco",1:3])
})
test_that("distLquantile can handle list",{
# Compare several GPD Fitting functions:
distLquantile(annMax, threshold=70, selection="gpa", weighted=FALSE, list=TRUE)
expect_is(distLquantile(annMax, truncate=0.62, list=TRUE), "list")
expect_is(distLquantile(annMax, threshold=70, list=TRUE), "list")
})
test_that("distLquantile can handle inputs with (rare) errors",{
# invalid lmoms
xx1 <- c(4.2, 1.1, 0.9, 5, 0.6, 5.1, 0.9, 1.2, 0.6, 0.7, 0.9, 1.1, 1.3,
1.4, 1.4, 0.6, 3, 1.6, 0.5, 1.4, 1.1, 0.5, 1.3, 3.6, 0.5)
expect_message(distLquantile(xx1, truncate=0.8),
"Note in distLfit: L-moments are not valid. No distributions are fitted.")
# kap failed
xx2 <- c(0.6, 1.6, 2.2, 0.6, 0.9, 3.3, 1.3, 4.7, 0.9, 0.8, 0.5, 0.8, 0.6, 0.7, 1.1, 0.9,
5.4, 3.9, 0.9, 0.7, 0.6, 0.7, 15.1, 2.7, 0.7, 1, 0.5, 0.6, 1, 0.9, 1.4)
dd <- distLquantile(xx2, truncate=0.8)
expect_equal(dd["kap","RMSE"], NA_real_)
# kap and ln3
xx3 <- c(0.7, 1.5, 0.7, 2.6, 0.7, 0.8, 1.9, 5.4, 1.4, 1, 1.7, 0.8, 1.3, 0.8, 0.9, 0.5,
0.5, 5.1, 0.9, 1, 1, 1.4, 1.5, 1.4, 4.9, 0.6, 4.3, 0.7, 0.7, 1.2, 0.9, 0.8)
expect_warning(dd <- distLquantile(xx3, truncate=0.8),
glob2rx("in parln3(lmom, ...): L-skew is negative, try reversing the data*"))
expect_equal(dd["kap","RMSE"], NA_real_)
# strongly skewed (gno):
xx4 <- c(2.4,2.7,2.3,2.5,2.2, 62.4 ,3.8,3.1)
expect_warning(dd <- distLquantile(xx4),
glob2rx("in pargno(lmom, ...): L-skew is too large*"), ignore.case=TRUE)
# kap should fail:
xx5 <- c(2.4, 2.5, 2.6, 2.9, 4.2, 4.6, 5.7)
distLfit(xx5)$parameter$kap
dfun <- function(xxx) expect_true(all(is.na(distLquantile(xxx, probs=0:10/10,
sel="kap", emp=FALSE)["kap",])))
dfun(xx5)
dfun(c(2.2, 2.3, 2.3, 2.3, 4.1, 8.8))
dfun(c(2.2, 2.3, 2.4, 2.5, 3.2, 4.2, 4.5, 5.9, 6))
dfun(c(1.8, 1.8, 2, 2, 2.6, 2.7, 3.7, 3.7))
dfun(c(2.2, 2.2, 2.3, 2.9, 3.4, 4.4, 5.2))
dfun(c(2.1, 2.2, 2.5, 3.2, 7.8, 16.1)) # kap has 4 distinct values here...
# wakeby (and others) with unrealistically high values:
xx6 <- c(0.342, 0.398, 0.415, 0.415, 0.462, 0.477, 0.491, 0.756, 0.763, 1.699)
d6 <- distLquantile(xx6, probs=c(0.8,0.9,0.99,0.9999), list=TRUE)
plotLfit(d6, xlim=c(0,2), nbest=10); d6$quant[1:10,] # 36!!!
# works fine here:
xx7 <- c(0.415, 0.415, 0.431, 0.447, 0.531, 0.544, 0.643, 0.732, 0.82, 1.134)
d7 <- distLquantile(xx7, probs=c(0.8,0.9,0.99,0.9999), list=TRUE)
plotLfit(d7, xlim=c(0,2), nbest=10); d7$quant[1:10,] # 4 (good)
})
|
a177311663cbc4b23a04720e02ff8285b4642606
|
15794c233b100ced4729bf8dd3f83cc312998543
|
/man/data2016.Rd
|
98bd2b90af423b5a837894934ca85d51beaffea6
|
[] |
no_license
|
alexchouraki/ProjetAlex
|
ea8613cbe10fbb038d0d4ba3a54fae05426646dc
|
1695936d8771f1ab47aa9257a49bfed2726821ac
|
refs/heads/master
| 2021-08-07T09:27:06.594665
| 2017-11-07T23:57:32
| 2017-11-07T23:57:32
| 107,143,601
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 362
|
rd
|
data2016.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data2016.R
\docType{data}
\name{data2016}
\alias{data2016}
\title{data2016}
\description{
data2016
}
\references{
\url{kaggle.com}{https://www.kaggle.com/unsdsn/world-happiness}
}
\author{
Kaggle \email{alexandre.chouraki@hec.edu}
}
\keyword{data}
\keyword{happiness}
\keyword{world}
|
70e0ec468ec2c6fc63e3c2df98dbd36007e577ac
|
c527575648ed7911e0fd223b4f9fa256c612d73f
|
/JB_timeSeries.R
|
df7a8ab22936607f97a8d49e76647a6a0cc07f8f
|
[] |
no_license
|
doeungim/ADP-1
|
794890bd7cb4c1d6870baf5f444a4aa877733f45
|
e575dadc8cfdf7f630fa1f481fcb2f3fc8f4196c
|
refs/heads/master
| 2022-12-23T11:54:15.727525
| 2020-09-28T12:54:54
| 2020-09-28T12:54:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,706
|
r
|
JB_timeSeries.R
|
#################
## ์๊ณ์ด ๋ถ์ ##
#################
# 1. ์๊ณ์ด ์๋ฃ
# ์๊ฐ์ ํ๋ฆ์ ๋ฐ๋ผ์ ๊ด์ฐฐ๋ ๋ฐ์ดํฐ
# 2. ์ ์์ฑ
# ๋๋ถ๋ถ์ ์๊ณ์ด ์๋ฃ๋ ๋ค๋ฃจ๊ธฐ ์ด๋ ค์ด ๋น์ ์์ฑ ์๊ณ์ด ์๋ฃ
# ๋ถ์ํ๊ธฐ ์ฌ์ด ์ ์์ฑ ์๊ณ์ด ์๋ฃ๋ก ๋ณํํด์ผํจ
# ์ ์์ฑ ์กฐ๊ฑด
# - ํ๊ท ์ด ์ผ์ ํด์ผ ํจ
# ํ๊ท ์ด ์ผ์ ํ์ง ์์ ์๊ณ์ด์ ์ฐจ๋ถ(difference)์ ํตํด ์ ์ํ
# - ๋ถ์ฐ์ด ์์ ์ ์์กดํ์ง ์์
# ๋ถ์ฐ์ด ์ผ์ ํ์ง ์์ ์๊ณ์ด์ ๋ณํ(transformation)์ ํตํด ์ ์ํ
# - ๊ณต๋ถ์ฐ๋ ์์ฐจ์๋ง ์์กดํ ๋ฟ, ํน์ ์์ ์๋ ์์กดํ์ง ์์
# 3. ์๊ณ์ด ๋ชจํ
# 3.1 ์๊ธฐํ๊ท ๋ชจํ(Autogressive model, AR)
# P ์์ ์ด์ ์ ์๋ฃ๊ฐ ํ์ฌ ์๋ฃ์ ์ํฅ์ ์ค
# ์ค์ฐจํญ = ๋ฐฑ์์ก์๊ณผ์ (white noise process)
# ์๊ธฐ์๊ดํจ์(Autocorrelation Function, ACF) : k ๊ธฐ๊ฐ ๋จ์ด์ง ๊ฐ๋ค์ ์๊ด๊ณ์
# ๋ถ๋ถ์๊ธฐ์๊ดํจ์(partial ACF) : ์๋ก ๋ค๋ฅธ ๋ ์์ ์ ์ค๊ฐ์ ์๋ ๊ฐ๋ค์ ์ํฅ์ ์ ์ธ์ํจ ์๊ด๊ณ์
# ACF ๋น ๋ฅด๊ฒ ๊ฐ์, PACF๋ ์ด๋ ์์ ์์ ์ ๋จ์ ์ ๊ฐ์ง
# PACF๊ฐ 2์์ ์์ ์ ๋จ์ ์ ๊ฐ์ง๋ฉด AR(1) ๋ชจํ
# 3.2 ์ด๋ํ๊ท ๋ชจํ(Moving average model, MA)
# ์ ํํ ๊ฐ์์ ๋ฐฑ์์ก์ ๊ฒฐํฉ์ด๋ฏ๋ก ํญ์ ์ ์์ฑ ๋ง์กฑ
# ACF๊ฐ ์ ๋จ์ ์ ๊ฐ๊ณ , PACF๋ ๋น ๋ฅด๊ฒ ๊ฐ์
# ์๊ธฐํ๊ท๋์ ์ด๋ํ๊ท ๋ชจํ (Autoregressive integrated moving average model, ARIMA)
# ๋น์ ์ ์๊ณ์ด ๋ชจํ
# ์ฐจ๋ถ์ด๋ ๋ณํ์ ํตํด AR, MA, ๋๋ ์ด ๋์ ํฉํ ARMA ๋ชจํ์ผ๋ก ์ ์ํ
# ARIMA(p, d, q) - d : ์ฐจ๋ถ ์ฐจ์ / p : AR ๋ชจํ ์ฐจ์ / q : MA ๋ชจํ ์ฐจ์
# ๋ถํด ์๊ณ์ด
# ์๊ณ์ด์ ์ํฅ์ ์ฃผ๋ ์ผ๋ฐ์ ์ธ ์์ธ์ ์๊ณ์ด์์ ๋ถ๋ฆฌํด ๋ถ์ํ๋ ๋ฐฉ๋ฒ
# ๊ณ์ ์์ธ(seasonal factor), ์ํ ์์ธ(cyclical), ์ถ์ธ ์์ธ(trend), ๋ถ๊ท์น ์์ธ(random)
# 1) ์์ค ๋ฐ์ดํฐ๋ฅผ ์๊ณ์ด ๋ฐ์ดํฐ๋ก ๋ณํ
ts(data, frequency = n, start = c(์์๋
๋, ์))
# 2) ์๊ณ์ด ๋ฐ์ดํฐ๋ฅผ x, trend, seasonal, random ๊ฐ์ผ๋ก ๋ถํด
decompose(data)
# 3) ์๊ณ์ด ๋ฐ์ดํฐ๋ฅผ ์ด๋ํ๊ท ํ ๊ฐ ์์ฑ
SMA(data, n = ์ด๋ํ๊ท ์)
# 4) ์๊ณ์ด ๋ฐ์ดํฐ๋ฅผ ์ฐจ๋ถ
diff(data, differences = ์ฐจ๋ถํ์)
# 5) ACF ๊ฐ๊ณผ ๊ทธ๋ํ๋ฅผ ํตํด ๋๊ทธ ์ ๋จ๊ฐ์ ํ์ธ
acf(data, lag.max = ๋๊ทธ์)
# 6) PACF ๊ฐ๊ณผ ๊ทธ๋ํ๋ฅผ ํตํด ๋๊ทธ ์ ๋จ๊ฐ์ ํ์ธ
pacf(data, lag.max = ๋๊ทธ์)
# 7) ๋ฐ์ดํฐ๋ฅผ ํ์ฉํ์ฌ ์ต์ ์ ARIMA ๋ชจํ์ ์ ํ
auto.arima(data)
# 8) ์ ์ ๋ ARIMA ๋ชจํ์ผ๋ก ๋ฐ์ดํฐ๋ฅผ ๋ณด์ (fitting)
arima(data, order = c(p, d, q))
# 9) ARIMA ๋ชจํ์ ์ํด ๋ณด์ ๋ ๋ฐ์ดํฐ๋ฅผ ํตํด ๋ฏธ๋๊ฐ์ ์์ธก
forecast.Arima(fittedData, h = ๋ฏธ๋์์ธก์)
# 10) ์๊ณ์ด ๋ฐ์ดํฐ๋ฅผ ๊ทธ๋ํ๋ก ํํ
plot.ts(์๊ณ์ด๋ฐ์ดํฐ)
# 11) ์์ธก๋ ์๊ณ์ด ๋ฐ์ดํฐ๋ฅผ ๊ทธ๋ํ๋ก ํํ
plot.forecast(์์ธก๋์๊ณ์ด๋ฐ์ดํฐ)
##########################################
## ์๊ณ์ด ์ค์ต - ์๊ตญ์๋ค์ ์ฌ๋ง์ ๋์ด ##
##########################################
library(TTR)
library(forecast)
# ์๊ตญ์๋ค์ ์ฌ๋ง์ ๋์ด
kings <- scan("http://robjhyndman.com/tsdldata/misc/kings.dat", skip = 3)
kings
kings_ts <- ts(kings)
kings_ts
plot.ts(kings_ts)
# ์ด๋ํ๊ท
kings_sma3 <- SMA(kings_ts, n = 3)
kings_sma8 <- SMA(kings_ts, n = 8)
kings_sma12 <- SMA(kings_ts, n = 12)
par(mfrow = c(2,2))
plot.ts(kings_ts)
plot.ts(kings_sma3)
plot.ts(kings_sma8)
plot.ts(kings_sma12)
# ์ฐจ๋ถ์ ํตํด ๋ฐ์ดํฐ ์ ์ํ
kings_diff1 <- diff(kings_ts, differences = 1)
kings_diff2 <- diff(kings_ts, differences = 2)
kings_diff3 <- diff(kings_ts, differences = 3)
plot.ts(kings_ts)
plot.ts(kings_diff1) # 1์ฐจ ์ฐจ๋ถ๋ง ํด๋ ์ด๋์ ๋ ์ ์ํ ํจํด์ ๋ณด์
plot.ts(kings_diff2)
plot.ts(kings_diff3)
par(mfrow = c(1,1))
mean(kings_diff1); sd(kings_diff1)
# 1์ฐจ ์ฐจ๋ถํ ๋ฐ์ดํฐ๋ก ARIMA ๋ชจํ ํ์ธ
acf(kings_diff1, lag.max = 20) # lag 2๋ถํฐ ์ ์ ์์ ์กด์ฌ. lag ์ ๋จ๊ฐ = 2. --> MA(1)
pacf(kings_diff1, lag.max = 20) # lag 4์์ ์ ๋จ๊ฐ --> AR(3)
# --> ARIMA(3,1,1) --> AR(3), I(1), MA(1) : (3,1,1)
# ์๋์ผ๋ก ARIMA ๋ชจํ ํ์ธ
auto.arima(kings) # --> ARIMA(0,1,1)
# ์์ธก
kings_arima <- arima(kings_ts, order = c(3,1,1)) # ์ฐจ๋ถํตํด ํ์ธํ ๊ฐ ์ ์ฉ
kings_arima
# ๋ฏธ๋ 5๊ฐ์ ์์ธก๊ฐ ์ฌ์ฉ
kings_fcast <- forecast(kings_arima, h = 5)
kings_fcast
plot(kings_fcast)
kings_arima1 <- arima(kings_ts, order = c(0,1,1)) # auto.arima ์ถ์ฒ๊ฐ ์ ์ฉ
kings_arima1
kings_fcast1 <- forecast(kings_arima1, h = 5)
kings_fcast1
plot(kings_fcast)
plot(kings_fcast1)
############################################
## ์๊ณ์ด ์ค์ต - ๋ฆฌ์กฐํธ ๊ธฐ๋
ํ๋งค์ฅ ๋งค์ถ์ก ##
############################################
data <- scan("http://robjhyndman.com/tsdldata/data/fancy.dat")
fancy <- ts(data, frequency = 12, start = c(1987, 1))
fancy
plot.ts(fancy) # ๋ถ์ฐ์ด ์ฆ๊ฐํ๋ ๊ฒฝํฅ --> log ๋ณํ์ผ๋ก ๋ถ์ฐ ์กฐ์
fancy_log <- log(fancy)
plot.ts(fancy_log)
fancy_diff <- diff(fancy_log, differences = 1)
plot.ts(fancy_diff)
# ํ๊ท ์ ์ด๋์ ๋ ์ผ์ ํ์ง๋ง ํน์ ์๊ธฐ์ ๋ถ์ฐ์ด ํฌ๋ค
# --> ARIMA ๋ณด๋ค๋ ๋ค๋ฅธ ๋ชจํ ์ ์ฉ ์ถ์ฒ
acf(fancy_diff, lag.max = 100)
pacf(fancy_diff, lag.max = 100)
auto.arima(fancy) # ARIMA(1,1,1)(0,1,1)[12]
fancy_arima <- arima(fancy, order = c(1,1,1), seasonal = list(order = c(0,1,1), period = 12))
fancy_fcast <- forecast.Arima(fancy_arima)
plot(fancy_fcast)
|
f72bfbeb3cbcac031a97ef81baf0959a48bdf1a2
|
e407e8e724356282f85582eb8f9857c9d3d6ee8a
|
/man/split_data.Rd
|
6ee0c90f872b40979f061c004a60eb204df89823
|
[
"MIT"
] |
permissive
|
adibender/pammtools
|
c2022dd4784280881f931e13f172b0057825c5e4
|
ab4caeae41748c395772615c70a0cd5e206ebfe6
|
refs/heads/master
| 2023-08-29T17:30:30.650073
| 2023-07-19T10:30:06
| 2023-07-19T10:30:06
| 106,259,608
| 43
| 14
|
NOASSERTION
| 2023-07-19T10:30:08
| 2017-10-09T08:55:47
|
R
|
UTF-8
|
R
| false
| true
| 1,992
|
rd
|
split_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/split-data.R
\name{split_data}
\alias{split_data}
\title{Function to transform data without time-dependent covariates into piece-wise
exponential data format}
\usage{
split_data(
formula,
data,
cut = NULL,
max_time = NULL,
multiple_id = FALSE,
...
)
}
\arguments{
\item{formula}{A two sided formula with a \code{\link[survival]{Surv}} object
on the left-hand-side and covariate specification on the right-hand-side (RHS).
The RHS can be an extended formula, which specifies how TDCs should be transformed
using specials \code{concurrent} and \code{cumulative}. The left hand-side can
be in start-stop-notation. This, however, is only used to create left-truncated
data and does not support the full functionality.}
\item{data}{Either an object inheriting from data frame or in case of
time-dependent covariates a list of data frames (of length 2), where the first data frame
contains the time-to-event information and static covariates while the second
(and potentially further data frames) contain information on time-dependent
covariates and the times at which they have been observed.}
\item{cut}{Split points, used to partition the follow up into intervals.
If unspecified, all unique event times will be used.}
\item{max_time}{If \code{cut} is unspecified, this will be the last
possible event time. All event times after \code{max_time}
will be administratively censored at \code{max_time}.}
\item{multiple_id}{Are occurences of same id allowed (per transition).
Defaults to \code{FALSE}, but is sometimes set to \code{TRUE}, e.g., in case of
multi-state models with back transitions.}
\item{...}{Further arguments passed to the \code{data.frame} method and
eventually to \code{\link[survival]{survSplit}}}
}
\description{
Function to transform data without time-dependent covariates into piece-wise
exponential data format
}
\seealso{
\code{\link[survival]{survSplit}}
}
\keyword{internal}
|
0c589d389a6dd4ab9f9ed1a3a6d93898df41af97
|
9ae7a61edcbc5c8e4bd83e0584538cf1bc0f7206
|
/R/hc_add_series.R
|
5d01c33411eb12de005927477441704517cd0d2e
|
[] |
no_license
|
sz-cgt/highcharter
|
266b57c891694590209338247e52ddd9ffd1f68c
|
9db6e89993bd21be4a602fe2e074b3e1fdca54be
|
refs/heads/master
| 2021-01-19T07:38:48.463846
| 2016-10-01T01:42:45
| 2016-10-01T01:42:45
| 64,556,449
| 0
| 0
| null | 2016-07-30T17:37:48
| 2016-07-30T17:37:48
| null |
UTF-8
|
R
| false
| false
| 2,421
|
r
|
hc_add_series.R
|
#' Removing series to highchart objects
#'
#' @param hc A \code{highchart} \code{htmlwidget} object.
#' @param names The series's names to delete.
#'
#' @export
hc_rm_series <- function(hc, names = NULL) {
stopifnot(!is.null(names))
positions <- hc$x$hc_opts$series %>%
map("name") %>%
unlist()
position <- which(positions %in% names)
hc$x$hc_opts$series[position] <- NULL
hc
}
#' Adding and removing series from highchart objects
#'
#' @param hc A \code{highchart} \code{htmlwidget} object.
#' @param ... Arguments defined in \url{http://api.highcharts.com/highcharts#chart}.
#'
#' @examples
#'
#' data("citytemp")
#'
#' hc <- highchart() %>%
#' hc_xAxis(categories = citytemp$month) %>%
#' hc_add_series(name = "Tokyo", data = citytemp$tokyo) %>%
#' hc_add_series(name = "New York", data = citytemp$new_york)
#'
#' hc
#'
#' hc %>%
#' hc_add_series(name = "London", data = citytemp$london, type = "area") %>%
#' hc_rm_series(names = c("New York", "Tokyo"))
#'
#' @export
hc_add_series <- function(hc, ...) {
validate_args("add_series", eval(substitute(alist(...))))
dots <- list(...)
if (is.numeric(dots$data) & length(dots$data) == 1) {
dots$data <- list(dots$data)
}
lst <- do.call(list, dots)
hc$x$hc_opts$series <- append(hc$x$hc_opts$series, list(lst))
hc
}
# hc_add_series2 <- function(hc, data, ...){
# UseMethod("hc_add_series2", data)
# }
#
# hc_add_series2.default <- function(hc, data, ...) {
#
# # validate_args("add_series", eval(substitute(alist(...))))
#
# dots <- list(...)
#
# if (is.numeric(data) & length(data) == 1) {
# data <- list(data)
# }
#
# dots <- append(list(data = data), dots)
#
# lst <- do.call(list, dots)
#
# hc$x$hc_opts$series <- append(hc$x$hc_opts$series, list(lst))
#
# hc
#
# }
#
# hc_add_series2.numeric <- function(hc, data, ...) {
# print("numeric function")
# hc_add_series2.default(hc, data, ...)
# }
#
# hc_add_series2.ts <- function(hc, data, ...) {
# print("ts function")
# hc_add_series_ts(hc, ts = data, ...)
# }
#
#
# highchart() %>%
# hc_add_series2(data = c(4)) %>%
# hc_add_series2(data = rnorm(5), type = "column", color = "red", name = "asd")
#
#
# highchart() %>%
# hc_add_series2(data = AirPassengers) %>%
# hc_add_series2(data = AirPassengers + 3000, color = "red", name = "asd")
|
1884aa149f039134ccc1a3732ec5b54b50d2400d
|
97031ea4feb150557fcca7ad65c135c032ddc9ae
|
/r/ujccalc.r
|
1767ca4378b2600d94bafbd29c4056f89683ef31
|
[] |
no_license
|
adamkc/HydroRSL
|
56f2cf39c7f9bebd7971f0488564c559418f4278
|
f464480a09ee873103da89feba37578728b9a8cf
|
refs/heads/master
| 2022-04-05T06:59:42.372706
| 2020-02-08T01:00:11
| 2020-02-08T01:00:11
| 220,350,568
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 77
|
r
|
ujccalc.r
|
ujccalc <-
function (stg)
{
a <- 13.084
b <- 3.9429
a * stg^b
}
|
edf510318f5151380bdb9b4391d6a2a15ceb97fa
|
a287696020bda3e3f3cafc15aa6c6e698d2b6425
|
/man/MergePDFs.Rd
|
d89145ac26689a984c62a13564b6bc7e22b6b5bc
|
[
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0"
] |
permissive
|
ktreinen/Trends
|
4dfa6dcddfa50148878ae6abb71931578c8f58c2
|
91c0d5cd4dd932f9ca5fd4efaf8d983508743270
|
refs/heads/master
| 2022-01-19T11:08:24.560552
| 2019-03-28T20:29:36
| 2019-03-28T20:29:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,011
|
rd
|
MergePDFs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MergePDFs.R
\name{MergePDFs}
\alias{MergePDFs}
\title{Merge PDF Files}
\usage{
MergePDFs(path, pdfs, preserve.files = FALSE, open.file = FALSE)
}
\arguments{
\item{path}{character.
Path name of the folder containing the PDF files to merge.}
\item{pdfs}{character.
Vector of file names, if missing, all PDF files under \code{path} will be merged.}
\item{preserve.files}{logical.
If true, all individual PDF files are preserved after a merge is completed.}
\item{open.file}{logical.
If true, the merged PDF file is opened using your systems default PDF viewer.}
}
\value{
Returns the name of the merged file.
}
\description{
This function combines Portable Document Format (PDF) files into a single new PDF file.
}
\details{
Names of the individual PDF files are used as bookmarks in the merged file.
The merged file is placed one directory above the \code{path} folder.
}
\note{
Requires \href{https://www.pdflabs.com/tools/pdftk-server/}{PDFtk Server},
a cross-platform command-line tool for working with PDFs.
}
\examples{
\donttest{
# Create a temporary directory
dir.create(path <- file.path(tempdir(), "merge"))
# Write three single-page PDF files to the temporary directory
pdf(file.path(path, "f1.pdf"))
plot(seq_len(10), main = "f1a")
plot(sin, -pi, 2 * pi, main = "f1b")
plot(qnorm, col = "red", main = "f1c")
dev.off()
pdf(file.path(path, "f2.pdf"))
plot(table(rpois(100, 5)), type = "h", col = "yellow", main = "f2a")
dev.off()
pdf(file.path(path, "f3.pdf"))
plot(x <- sort(rnorm(47)), type = "s", col = "green", main = "f3a")
plot(x, main = "f3b")
dev.off()
# Merge PDF files into a single file and open it in your default viewer
MergePDFs(path, open.file = TRUE)
# Remove PDF files
unlink(path, recursive = TRUE)
}
}
\seealso{
\code{\link{RunAnalysis}}, \code{\link{system}}
}
\author{
J.C. Fisher, U.S. Geological Survey, Idaho Water Science Center
}
\keyword{utilities}
|
12302d0e46639981615fee1c4891faaa3096ab1e
|
8d65868a49cf236c662bc5019ac2400381f8ce1c
|
/plot4.R
|
52c0fa8f95e1513d4472d1526fdeba14b82aeebe
|
[] |
no_license
|
flanalytics/ExData_Plotting1
|
a7c2749551621b15c097cff79d14bd58ef8b523a
|
06556c005e699fc37d08b8c01b6ae157d784e008
|
refs/heads/master
| 2021-01-18T12:49:35.360769
| 2014-09-07T20:49:36
| 2014-09-07T20:49:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,494
|
r
|
plot4.R
|
## This function is used to read in data and subset two days observations
setwd("E:/Exploratory Data Analysis/Course Project 1")
library(data.table)
## Read in all the data from the Text File using the data.table fread function
## into data.table call dat
dat<-fread(input="household_power_consumption.txt",header=TRUE,sep=";",na.strings="?",stringsAsFactors=FALSE)
## Now select out records for 1st & 2nd Feb 2007 into the data.frame daydat
setkey(dat,Date) ## Index the dat datatable on the Date field
daydat<-as.data.frame(rbind(dat["1/2/2007"],dat["2/2/2007"]))
## Due to a bug in the fread function numeric columns containing a character
## NA value are converted to character before the NA conversion
## We need to ensure that columns that should be numeric are now numeric
for (col in 3:9) {daydat[,col]<-as.numeric(daydat[,col])}
## Also need to create a DateTime field from the Date and Time fields
daydat$DateTime<-as.POSIXct((strptime(paste(daydat$Date,daydat$Time,sep=""),"%d/%m/%Y %H:%M:%S")))
## Now plot 4 charts to plot4.png with size of 480x480 pixels
png(filename = "plot4.png", width = 480, height = 480,units = "px",
pointsize = 12, bg = "white", res = NA,restoreConsole = TRUE)
par(mfrow=c(2,2))
## Show the Global Active Power for each data point
## Turn off default labels on Xaxis and display days of week.
plot(daydat$DateTime,daydat$Global_active_power,type="l",
ylab="Global Active Power",xlab="")
## Show the Voltage for each data point
## Turn off default labels on Xaxis and display days of week.
plot(daydat$DateTime,daydat$Voltage,type="l", ylab="Voltage"
,xlab="datetime")
## Show the Power for each Energy Sub Meter for each data point
## Turn off default labels on Xaxis and display days of week.
## Place a legend in the top right to label the three lines, with no lines around the box
with ( daydat, {
plot(DateTime,Sub_metering_1 ,type="l", ylab="Energy sub metering"
,xlab="",col="black")
lines(DateTime,Sub_metering_2,type="l",col="red")
lines(DateTime,Sub_metering_3,type="l",col="blue")
})
## Place a legend in the top right to label the three lines
legend("topright", col = c("black","red","blue"),lwd=2,bty="n",cex=.95,
legend=(colnames(daydat)[7:9]))
## Show the Global Reactive Power for each data point
## Turn off default labels on Xaxis and display days of week.
plot(daydat$DateTime,daydat$Global_reactive_power,type="l", ylab="Global_reactive_power"
,xlab="datetime")
dev.off()
|
527b6c68614a47adf4cf45ee555861c1b9c0bd6a
|
91c247ab34db8fb2bee5592ae8977bd8b49ec0ac
|
/R/proj2_plotFun.R
|
7016394c85a3e8bfdd96c968f5ad1dc872552f60
|
[] |
no_license
|
cran/ClinicalUtilityRecal
|
d408ea107d673fcdbb65859bfd36c9ccf42508bb
|
c3bc634a596fab70462193a531c28420d27ac7a6
|
refs/heads/master
| 2022-04-15T01:53:01.885182
| 2020-04-15T10:10:02
| 2020-04-15T10:10:02
| 256,159,578
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,664
|
r
|
proj2_plotFun.R
|
##### Plotting Functions #######
utils::globalVariables(c("..count.."))
### sNB Recalibration Curve with std error bars
# plotting snb as function of t -- still need to add other potential std error bars
snbRecalPlot <- function(p,p.std,y,r,stdErrThresh=1,ylim=NULL,
titlePlot = "Potential sNB Under Recalibration",
risk.model.std=TRUE){
## sNB recalibration Plot
t.vec <- seq(0,1,0.0005)
sNB <- cbind(t.vec,NA)
for(i in 1:length(t.vec)){
pick.t <- t.vec[i]
sNB[i,2] <- snb.t(par= pick.t,y = y,p = p,r = r)
}
t.max <- NULL
t.max$maximum <- sNB[which.max(sNB[,2]),1]
t.max$objective <- sNB[which.max(sNB[,2]),2]
sNB.max.se <- snbVar.tmax(tVec = t.max$maximum,y = y,p = p,r = r)
upp <- t.max$objective + stdErrThresh*sNB.max.se
low <- t.max$objective - stdErrThresh*sNB.max.se
## points marking orig and std recal sNB
snb.orig <- nb(y = y,p = p,r = r)$snb
snb.recal <- nb(y = y,p = p.std,r = r)$snb
if(is.null(ylim)){
ylim = c(min(c(snb.orig,snb.recal,0)),max(c(snb.orig,snb.recal,0.8)))
}
plot(sNB[,1],sNB[,2],type="l",col="black",lwd=2,ylim=ylim,
xlab="Threshold (t) for Decision Rule",ylab="sNB",
main=titlePlot)
#standard error bars for points
snb.t.orig <- sNB[which.min(abs(snb.orig-sNB[,2])),1]
sNB.t.orig.se <- snbVar.tmax(tVec = snb.t.orig,y = y,p = p,r = r)
snb.t.std <- sNB[which.min(abs(snb.recal-sNB[,2])),1]
sNB.t.std.se <- snbVar.tmax(tVec = snb.t.std,y = y,p = p,r = r)
points(snb.t.std,
sNB[which.min(abs(snb.recal-sNB[,2])),2],col="blue",pch=1,cex=1.3,lwd=3)
points(snb.t.orig,
sNB[which.min(abs(snb.orig-sNB[,2])),2],col="red",pch=1,cex=1.2,lwd=3)
#abline(h=upp,lwd=1,col="black",lty=c(2,3,4))
abline(h=low,lwd=1,col="black",lty=c(2,3,4))
if(risk.model.std==TRUE){
arrows(x0 = snb.t.std,y0 = sNB[which.min(abs(snb.recal-sNB[,2])),2] - sNB.t.std.se,
x1 = snb.t.std,y1 = sNB[which.min(abs(snb.recal-sNB[,2])),2] + sNB.t.std.se, angle = 90,length = 0.1,lwd=1.5,code = 3,col="blue")
arrows(x0 = snb.t.orig,y0 = sNB[which.min(abs(snb.orig-sNB[,2])),2] - sNB.t.orig.se,
x1 = snb.t.orig,y1 = sNB[which.min(abs(snb.orig-sNB[,2])),2] + sNB.t.orig.se, angle = 90,length = 0.1,lwd=1.5,code = 3,col="red")
}
legend("topleft",paste("Max(sNB) =",round(t.max$objective,3)),bty="n")
legend("topright",c("Orig Risk Model","Std. Log. Recal. Risk Model",
paste(stdErrThresh,"Std Err from Maximum")),
col=c("red","blue","black"),pch=c(1,1,NA),lwd=c(1.5,1.5),lty = c(NA,NA,2),bty = "n")
}
### calibration plots and histogram
calCurvPlot <- function(y,p,p.std=NULL,p.recal=NULL,stdPlot=FALSE,recalPlot=FALSE,
xlim=c(0,1),ylim=c(0,1),
label="Original Risk Score",
label2 = "Standard Recalibrated Risk Score",
label3 = "Weighted/Constrained Recalibrated Risk Score",
legendLab = c("Orig.", "Std.", "Wt."),
mainTitle="Calibration of Risk Score",
hist=TRUE,ylimHist = c(0,0.5),
r,rl = -Inf, ru = Inf){
### wont work in plot null value so put something outside plotting range
orig.loess <- data.frame("x"=lowess(x = p,y = y,f = 2/3,iter = 0)$x,"y"=lowess(x = p,y = y,f = 2/3,iter = 0)$y)
orig.loess$type <- "orig"
hist.orig <- ggplot2::ggplot(orig.loess,ggplot2::aes(orig.loess$x)) +
ggplot2::geom_histogram(binwidth = (xlim[2]-xlim[1])/20, ggplot2::aes(y = (..count..)/sum(..count..))) +
ggplot2::labs(title =NULL, x = label,y="Percentage") +
ggplot2::geom_vline(xintercept = r,linetype="dotted") +
ggplot2::geom_vline(xintercept = rl,linetype=ifelse(is.infinite(abs(rl)),NA,"dotdash")) +
ggplot2::geom_vline(xintercept = ru,linetype=ifelse(is.infinite(abs(rl)),NA,"dotdash")) +
ggplot2::coord_cartesian(xlim=xlim, ylim=ylimHist)
if(stdPlot==TRUE){
stdCal.loess <- data.frame("x"=lowess(x = p.std,y = y,f = 2/3,iter = 0)$x,"y"=lowess(x = p.std,y = y,f = 2/3,iter = 0)$y)
stdCal.loess$type <- "std"
hist.std <- ggplot2::ggplot(stdCal.loess,ggplot2::aes(stdCal.loess$x)) +
ggplot2::geom_histogram(binwidth = (xlim[2]-xlim[1])/20, ggplot2::aes(y = (..count..)/sum(..count..))) +
ggplot2::labs(title =NULL, x = label2,y="Percentage") +
ggplot2::geom_vline(xintercept = r,linetype="dotted") +
ggplot2::geom_vline(xintercept = rl,linetype=ifelse(is.infinite(abs(rl)),NA,"dotdash")) +
ggplot2::geom_vline(xintercept = ru,linetype=ifelse(is.infinite(abs(rl)),NA,"dotdash")) +
ggplot2::coord_cartesian(xlim=xlim, ylim=ylimHist)
}
else{stdCal.loess <- NULL}
if(recalPlot==TRUE){
wtCal.loess <- data.frame("x"=lowess(x = p.recal,y = y,f = 2/3,iter = 0)$x,"y"=lowess(x = p.recal,y = y,f = 2/3,iter = 0)$y)
wtCal.loess$type <- "wt"
hist.wt <- ggplot2::ggplot(wtCal.loess,ggplot2::aes(wtCal.loess$x)) +
ggplot2::geom_histogram(binwidth = (xlim[2]-xlim[1])/20, ggplot2::aes(y = (..count..)/sum(..count..))) +
ggplot2::labs(title =NULL, x = label3,y="Percentage") +
ggplot2::geom_vline(xintercept = r,linetype="dotted") +
ggplot2::geom_vline(xintercept = rl,linetype=ifelse(is.infinite(abs(rl)),NA,"dotdash")) +
ggplot2::geom_vline(xintercept = ru,linetype=ifelse(is.infinite(abs(rl)),NA,"dotdash")) +
ggplot2::coord_cartesian(xlim=xlim, ylim=ylimHist)
}
else(wtCal.loess <- NULL)
loessDat <- as.data.frame(rbind(orig.loess,stdCal.loess,wtCal.loess))
if(stdPlot==TRUE & recalPlot==TRUE){
plot.cal <- ggplot2::ggplot(as.data.frame(loessDat),ggplot2::aes(.data$x,y = .data$y,group=.data$type,col=.data$type)) +
ggplot2::geom_line() + ggplot2::coord_cartesian(xlim=xlim,ylim=ylim) +
ggplot2::geom_abline(intercept=0,slope=1,linetype="dashed") +
ggplot2::geom_vline(xintercept = r,linetype="dotted") +
ggplot2::geom_hline(yintercept = r,linetype="dotted") +
ggplot2::geom_vline(xintercept = rl,linetype=ifelse(is.infinite(abs(rl)),NA,"dotdash")) +
ggplot2::geom_vline(xintercept = ru,linetype=ifelse(is.infinite(abs(rl)),NA,"dotdash")) +
ggplot2::labs(title = mainTitle, x = "Predicted Risk", y = "Observed Event Rate") +
ggplot2::scale_colour_discrete(name="Risk Score",
breaks=c("orig", "std", "wt"),
labels=legendLab)
if(hist==TRUE){
suppressWarnings(
cowplot::plot_grid(plot.cal, NULL,
hist.orig + ggplot2::geom_line(ggplot2::aes(x = p,y=y,color = "TEST"))
+ ggplot2::scale_color_manual(values = NA) + ggplot2::theme(legend.text = ggplot2::element_blank(), legend.title = ggplot2::element_blank()),
NULL,
hist.std + ggplot2::geom_line(ggplot2::aes(x = p,y=y,color = "TEST"))
+ ggplot2::scale_color_manual(values = NA) + ggplot2::theme(legend.text = ggplot2::element_blank(), legend.title = ggplot2::element_blank()),
NULL,
hist.wt + ggplot2::geom_line(ggplot2::aes(x = p,y=y,color = "TEST"))
+ ggplot2::scale_color_manual(values = NA) + ggplot2::theme(legend.text = ggplot2::element_blank(), legend.title = ggplot2::element_blank()),
align = "hv",axis=1, ncol = 1,rel_heights = c(1,-0.2,0.6,-0.2,0.6,-0.2,0.6))
)
}
else{
suppressWarnings(print(plot.cal))
}
}
else if(stdPlot==TRUE & recalPlot==FALSE){
plot.cal <- ggplot2::ggplot(as.data.frame(subset(loessDat,subset = loessDat$type!="wt")),
ggplot2::aes(.data$x,y = .data$y,group=.data$type,col=.data$type)) +
ggplot2::geom_line() + ggplot2::coord_cartesian(xlim=xlim,ylim=ylim) +
ggplot2::geom_abline(intercept=0,slope=1,linetype="dashed") +
ggplot2::geom_vline(xintercept = r,linetype="dotted") +
ggplot2::geom_hline(yintercept = r,linetype="dotted") +
ggplot2::geom_vline(xintercept = rl,linetype=ifelse(is.infinite(abs(rl)),NA,"dotdash")) +
ggplot2::geom_vline(xintercept = ru,linetype=ifelse(is.infinite(abs(rl)),NA,"dotdash")) +
ggplot2::labs(title = mainTitle, x = "Predicted Risk", y = "Observed Event Rate") +
ggplot2::scale_colour_discrete(name="Risk Score",
breaks=c("orig", "std"),
labels=legendLab[1:2])
if(hist==TRUE){
suppressWarnings(
cowplot::plot_grid(plot.cal,
NULL,
hist.orig + ggplot2::geom_line(ggplot2::aes(x = p,y=y,color = "TEST")) +
ggplot2::scale_color_manual(values = NA) + ggplot2::theme(legend.text = ggplot2::element_blank(), legend.title = ggplot2::element_blank()),
NULL,
hist.std + ggplot2::geom_line(ggplot2::aes(x = p,y=y,color = "TEST")) +
ggplot2::scale_color_manual(values = NA) + ggplot2::theme(legend.text = ggplot2::element_blank(), legend.title = ggplot2::element_blank()),
align = "hv",axis=1, ncol = 1,rel_heights = c(1,-0.2,0.6,-0.2,0.6))
)
}
else{suppressWarnings(print(plot.cal))}
}
else if(stdPlot==FALSE & recalPlot==TRUE){
plot.cal <- ggplot2::ggplot(as.data.frame(subset(loessDat,subset = loessDat$type!="std")),
ggplot2::aes(.data$x,y = .data$y,group=.data$type,col=.data$type) ) +
ggplot2::geom_line() + ggplot2::coord_cartesian(xlim=xlim,ylim=ylim) +
ggplot2::geom_abline(intercept=0,slope=1,linetype="dashed") +
ggplot2::geom_vline(xintercept = r,linetype="dotted") +
ggplot2::geom_hline(yintercept = r,linetype="dotted") +
ggplot2::geom_vline(xintercept = rl,linetype=ifelse(is.infinite(abs(rl)),NA,"dotdash")) +
ggplot2::geom_vline(xintercept = ru,linetype=ifelse(is.infinite(abs(rl)),NA,"dotdash")) +
ggplot2::labs(title = mainTitle, x = "Predicted Risk", y = "Observed Event Rate") +
ggplot2::scale_colour_discrete(name="Recalibration\nType",
breaks=c("orig", "wt"),
labels=legendLab[c(1,3)])
if(hist==TRUE){
suppressWarnings(
cowplot::plot_grid(plot.cal,
NULL,
hist.orig + ggplot2::geom_line(ggplot2::aes(x = p,y=y,color = "TEST")) +
ggplot2::scale_color_manual(values = NA) +theme(legend.text = ggplot2::element_blank(), legend.title = ggplot2::element_blank()),
NULL,
hist.wt + ggplot2::geom_line(ggplot2::aes(x = p,y=y,color = "TEST")) +
ggplot2::scale_color_manual(values = NA) + ggplot2::theme(legend.text = ggplot2::element_blank(), legend.title = ggplot2::element_blank()),
align = "hv",axis=1, ncol = 1,rel_heights = c(1,-0.2,0.6,-0.2,0.6))
)
}
else(suppressWarnings(print(plot.cal)))
}
else if(stdPlot==FALSE & recalPlot==FALSE){
plot.cal <- ggplot2::ggplot(as.data.frame(subset(loessDat,subset = loessDat$type=="orig")),
ggplot2::aes(.data$x,y = .data$y,group=.data$type,col=.data$type)) +
ggplot2::geom_line() + ggplot2::coord_cartesian(xlim=xlim,ylim=ylim) +
ggplot2::geom_abline(intercept=0,slope=1,linetype="dashed") +
ggplot2::geom_vline(xintercept = r,linetype="dotted") +
ggplot2::geom_hline(yintercept = r,linetype="dotted") +
ggplot2::geom_vline(xintercept = rl,linetype=ifelse(is.infinite(abs(rl)),NA,"dotdash")) +
ggplot2::geom_vline(xintercept = ru,linetype=ifelse(is.infinite(abs(rl)),NA,"dotdash")) +
ggplot2::labs(title =" Calibration of Risk Score", x = "Predicted Risk", y = "Observed Event Rate") +
ggplot2::scale_colour_discrete(name="Recalibration\nType",
breaks=c("orig"),
labels=c(label))
if(hist==TRUE){
suppressWarnings(
cowplot::plot_grid(plot.cal,
NULL,
hist.orig + ggplot2::geom_line(ggplot2::aes(x = p,y=y,color = "TEST")) +
ggplot2::scale_color_manual(values = NA) + ggplot2::theme(legend.text = ggplot2::element_blank(), legend.title = ggplot2::element_blank()),
align = "hv",axis=1, ncol = 1,
rel_heights = c(1,-0.2,0.6)))
}
else(suppressWarnings(print(plot.cal)))
}
}
|
541161d24c07bb748ff5081a06c29d977b129ab6
|
f339641cefa9025ef94fe53c9d23856b4ac69933
|
/man/geomEcuador.Rd
|
0d02498f5cfadfc34bb3f7bf554d89e0da624f8a
|
[
"MIT"
] |
permissive
|
sjbeckett/localcovid19now
|
8ba7f044d8e7458cb4c8c490c8389b596624c84f
|
af7979dd7e4b1f73751617bd2bae17bdbd285184
|
refs/heads/main
| 2023-04-17T21:06:30.905493
| 2023-01-23T18:31:06
| 2023-01-23T18:31:06
| 413,489,189
| 4
| 4
|
MIT
| 2022-12-23T18:10:25
| 2021-10-04T15:53:44
|
R
|
UTF-8
|
R
| false
| true
| 331
|
rd
|
geomEcuador.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{geomEcuador}
\alias{geomEcuador}
\title{geomEcuador}
\format{
An object of class \code{sf} (inherits from \code{data.frame}) with 224 rows and 9 columns.
}
\usage{
geomEcuador
}
\description{
geomEcuador
}
\keyword{datasets}
|
d659cb151a06a8d0491a52ae243437ce54eb3bb1
|
90aacb74264c2bd3172a0ba90629c4ceeceabbed
|
/Plot_scripts/NMDS_all_samples_plots.R
|
f9027c06b059ae8a9533eb0142fc738b1eea0c82
|
[
"MIT"
] |
permissive
|
liberjul/Leaf_litter_communities
|
30b64c294285a5a8f9383add3088bf4b313dcfbf
|
fcbe167e6ff3d232c6d01a9d6b39e0e374d72c38
|
refs/heads/master
| 2021-11-27T07:28:49.257459
| 2021-11-15T20:20:00
| 2021-11-15T20:20:00
| 253,580,738
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,555
|
r
|
NMDS_all_samples_plots.R
|
library(ggplot2)
library(ggpubr)
library(vegan)
library(patchwork)
setwd("C:/Users/julia/OneDrive - Michigan State University/Documents/MSU/Undergrad/Fall 2018/PLP 847/miseq_dat/Leaf_litter_communities")
map_wo_negs <- as.matrix(read.csv("./Data/DEM_map_wo_negs.csv", stringsAsFactors = F))
rare_otu <- as.matrix(read.csv("./Data/Rare_otu_table.csv"))
colnames(map_wo_negs) <- map_wo_negs["SampleID",]
colnames(rare_otu) <- map_wo_negs["SampleID",]
veganCovEllipse<-function (cov, center = c(0, 0), scale = 1, npoints = 100) # Make ellipses plottable
{
theta <- (0:npoints) * 2 * pi/npoints
Circle <- cbind(cos(theta), sin(theta))
t(center + scale * t(Circle %*% chol(cov)))
}
MDS_dat <- metaMDS(t(rare_otu)) # Calculate NMDS axes, using Bray-Curtis as default
MDS_points <- MDS_dat$points # Extract coordinates
MDS_stress <- MDS_dat$stress
MDS_dat_df <- as.data.frame(MDS_points) # Convert to a df
MDS_dat_df <- cbind(MDS_dat_df, t(map_wo_negs)) # Add map data
NMDS_bray = data.frame(MDS1 = MDS_points[,1], # Make dataframe for plotting
MDS2 = MDS_points[,2],
group=MDS_dat_df$Soil_Leaf_Litter_Leaf_swab,
species=MDS_dat_df$Plant_species,
site=MDS_dat_df$Site)
plot.new()
ord<-ordiellipse(MDS_dat, MDS_dat_df$Soil_Leaf_Litter_Leaf_swab, display = "sites", kind = "se", conf = 0.97, label = T) # Calculate ellipses
df_ell_bc <- data.frame() # Dataframe for storing ellipses
for(g in levels(MDS_dat_df$Soil_Leaf_Litter_Leaf_swab)){
df_ell_bc <- rbind(df_ell_bc, cbind(as.data.frame(with(MDS_dat_df[MDS_dat_df$Soil_Leaf_Litter_Leaf_swab==g,], # Add ellipse values
veganCovEllipse(ord[[g]]$cov,ord[[g]]$center,ord[[g]]$scale)))
,group=g))
}
NMDS_bray.mean=aggregate(NMDS_bray[,1:2],list(group=NMDS_bray$group),mean) # Calculate mean for groups
p_bc <- ggplot(data = NMDS_bray, aes(MDS1, MDS2)) + # Make plot
geom_point(aes(color = group, shape = site),size=3) +
geom_point(aes(color = group, fill = group, alpha = species, shape=site),size=3) +
geom_path(data=df_ell_bc, aes(x=NMDS1, y=NMDS2,color=group), size=1, linetype=2) +
labs(alpha="Host species", color="Substrate", shape="Site",
x = "NMDS1", y = "NMDS2") +
scale_shape_manual(values = 21:25) +
scale_alpha_manual(values=c(0,1), guide =
guide_legend(label.theme = element_text(size = 10, angle = 0, face = "italic"),
override.aes = list(pch = 21,
color = 1,
alpha = 1,
fill = c(NA, 1)))) +
annotate(geom = "text", hjust = 0,
x = min(NMDS_bray$MDS1), y = min(NMDS_bray$MDS2),
label = paste("Stress =", round(MDS_stress, 4))) +
theme_pubr() +
guides(fill=FALSE) +
# ggtitle("Bray-Curtis") +
theme(plot.title = element_text(hjust=0.5),
legend.position = "right",
legend.justification = "left") +
scale_color_discrete(labels=c("Endophytes","Epiphytes","Litter", "Soil"))
p_bc
ggsave("./Figures/bc_NDMS_all_samples.png", p_bc, width = 8, height = 6, units="in")
MDS_stress
MDS_dat <- metaMDS(t(rare_otu), distance = "jaccard") # Calculate NMDS axes, using Jaccard distance
MDS_points <- MDS_dat$points # Extract coordinates
MDS_dat_df <- as.data.frame(MDS_points) # Convert to a df
MDS_dat_df <- cbind(MDS_dat_df, t(map_wo_negs)) # Add map data
NMDS_jac = data.frame(MDS1 = MDS_points[,1], # Make dataframe for plotting
MDS2 = MDS_points[,2],
group=MDS_dat_df$Soil_Leaf_Litter_Leaf_swab,
species=MDS_dat_df$Plant_species,
site=MDS_dat_df$Site)
ord<-ordiellipse(MDS_dat, MDS_dat_df$Soil_Leaf_Litter_Leaf_swab, display = "sites", kind = "se", conf = 0.97, label = T) # Calculate ellipses
df_ell_jac <- data.frame() # Dataframe for storing ellipses
for(g in levels(MDS_dat_df$Soil_Leaf_Litter_Leaf_swab)){
df_ell_jac <- rbind(df_ell_jac, cbind(as.data.frame(with(MDS_dat_df[MDS_dat_df$Soil_Leaf_Litter_Leaf_swab==g,], # Add ellipse values
veganCovEllipse(ord[[g]]$cov,ord[[g]]$center,ord[[g]]$scale)))
,group=g))
}
NMDS_jac.mean=aggregate(NMDS_jac[,1:2],list(group=NMDS_jac$group),mean) # Calculate mean for groups
# p_jac <- ggplot(data = NMDS_jac, aes(MDS1, MDS2)) + # Make plot
# geom_point(aes(color = group, shape = site),size=3) +
# geom_point(aes(color = group, fill = group, alpha = species, shape=site),size=3) +
# geom_path(data=df_ell_jac, aes(x=NMDS1, y=NMDS2,color=group), size=1, linetype=2) +
# labs(alpha="Host species", color="Substrate", shape="Site") +
# scale_shape_manual(values = 21:25) +
# scale_alpha_manual(values=c(0,1), guide =
# guide_legend(label.theme = element_text(size = 10, angle = 0, face = "italic"))) +
# guides(fill=FALSE) +
# ggtitle("Jaccard") +
# theme_pubr() +
# theme(plot.title = element_text(hjust=0.5),
# legend.position = "right",
# legend.justification = "left") +
# scale_color_discrete(labels=c("Endophytes","Epiphytes","Litter", "Soil"))
# p_jac
# combined_NMDS <- p_bc + p_jac + plot_layout(guides="collect")
# ggsave("./Figures/Combined_NMDS_w_site.png", combined_NMDS, height=6, width=8, units="in")
bc.d = vegdist(t(rare_otu), method="bray")
sor.d = vegdist(t(rare_otu), method = "bray", binary = T)
jac.d = vegdist(t(rare_otu), method="jaccard")
bc.pcoa = cmdscale(bc.d, eig=T)
ax1.v.bc = bc.pcoa$eig[1]/sum(bc.pcoa$eig)
ax2.v.bc = bc.pcoa$eig[2]/sum(bc.pcoa$eig)
sor.pcoa = cmdscale(sor.d, eig=T)
ax1.v.sor = sor.pcoa$eig[1]/sum(sor.pcoa$eig)
ax2.v.sor = sor.pcoa$eig[2]/sum(sor.pcoa$eig)
jac.pcoa = cmdscale(jac.d, eig=T)
ax1.v.jac = jac.pcoa$eig[1]/sum(jac.pcoa$eig)
ax2.v.jac = jac.pcoa$eig[2]/sum(jac.pcoa$eig)
ax1.v.bc
ax1.v.sor
ax1.v.jac
ax2.v.bc
ax2.v.sor
ax2.v.jac
ax1.v.bc + ax2.v.bc
ax1.v.sor + ax2.v.sor
ax1.v.jac + ax2.v.jac
bc.pcoa_df <- data.frame(ax1 = bc.pcoa$points[,1],
ax2 = bc.pcoa$points[,2],
substrate=MDS_dat_df$Substrate,
species=MDS_dat_df$Plant_species,
site=MDS_dat_df$Site)
pcoa_bc <- ggplot(data = bc.pcoa_df, aes(ax1, ax2)) + # Make plot
geom_point(aes(color = substrate, shape = site),size=3) +
geom_point(aes(color = substrate, fill = substrate, alpha = species, shape=site),size=3) +
labs(alpha="Host species", color="Substrate", shape="Site",
x = paste("PCoA1: ",100*round(ax1.v.bc,3),"% var. explained",sep=""),
y = paste("PCoA2: ",100*round(ax2.v.bc,3),"%var. explained",sep="")) +
scale_shape_manual(values = 21:25) +
scale_alpha_manual(values=c(0,1), guide =
guide_legend(label.theme = element_text(size = 10, angle = 0, face = "italic"))) +
guides(fill=FALSE) +
theme_pubr() +
theme(plot.title = element_text(hjust=0.5),
legend.position = "right",
legend.justification = "left") +
scale_color_discrete(labels=c("Endophytes","Epiphytes","Litter", "Soil"))
pcoa_bc
ggsave("./Figures/bc_pcoa_all_samples.png", pcoa_bc, width = 8, height=6, units="in")
ado_bray_pcoa <- adonis2(t(rare_otu) ~ substrate * species * site,
bc.pcoa_df, method="bray")
ado_bray_pcoa
|
e66e9a9c2b49575b8324f60a19307c8d5844e47d
|
f4836a9f9beb659ff275f3df3e198db7ccd3c8f5
|
/R/georamps.R
|
8555f11dec5c68e34223d53f1cda86765fbc3b2f
|
[] |
no_license
|
cran/ramps
|
f9d3d723d37adc8b8588fec7a28e39ec7beb2c1b
|
af67bcc81ff5271c7482c7e676bdee0ea0cbb75f
|
refs/heads/master
| 2023-04-09T06:11:34.205812
| 2023-03-13T13:30:02
| 2023-03-13T13:30:02
| 17,698,980
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,490
|
r
|
georamps.R
|
georamps <- function(fixed, random, correlation, data = sys.frame(sys.parent()),
subset, weights, variance = list(fixed = ~ 1, random = ~ 1, spatial = ~ 1),
aggregate = list(grid = NULL, blockid = ""), kmat = NULL,
control = ramps.control(...), contrasts = NULL, ...)
{
## Create data frame containing all relevant variables
## Random effects are added later
call <- match.call()
val <- c(all.vars(fixed), all.vars(variance$fixed),
all.vars(variance$spatial), all.vars(variance$random))
if (!is.null(aggregate$grid)) val <- c(val, aggregate$blockid)
spvars <- all.vars(getCovariateFormula(correlation))
val <- reformulate(c(val, spvars))
mfargs <- list(formula = val, data = data, weights = call[["weights"]],
subset = call[["subset"]], na.action = na.pass)
mfdata <- do.call("model.frame", mfargs)
## Zero-out the coordinates for aggregate measurements
if (nchar(aggregate$blockid) > 0) {
val <- c(aggregate$blockid, spvars)
if (!all(val %in% colnames(aggregate$grid)))
stop("Coordinates and 'blockid' must be given in 'grid'")
aggregate$grid <- na.omit(aggregate$grid[,val])
idx <- is.element(mfdata[,aggregate$blockid],
aggregate$grid[,aggregate$blockid])
mfdata[idx, spvars] <- 0
}
## Remove incomplete records from the data frame
mfdata <- na.omit(mfdata)
## Extract weights
weights <- model.weights(mfdata)
## Drop the model frame attributes that are no longer needed
attr(mfdata, "terms") <- NULL
# Extract response vector and main effects design matrix
mf <- model.frame(fixed, data = mfdata, drop.unused.levels = TRUE)
mt <- attr(mf, "terms")
y <- model.response(mf, "numeric")
val <- model.matrix(mt, mf, contrasts)
xmat <- as(val, "sparseMatrix")
attr(xmat, "contrasts") <- attr(val, "contrasts")
## Indices to map measurement error variances
variance$fixed <- if (is.null(variance$fixed)) factor(rep(1, nrow(mfdata)))
else factor(getCovariate(mfdata, variance$fixed))
## Structures for latent spatial parameters
if (missing(correlation)) {
stop("Unspecified correlation structure")
} else {
## Create matrix of unique coordinates for latent parameters
spt <- terms(getCovariateFormula(correlation))
attr(spt, "intercept") <- 0
if (is.null(aggregate$grid)) {
idx1 <- rep(TRUE, nrow(mfdata))
val <- model.matrix(spt, mfdata)
idx2 <- NULL
} else {
idx1 <- !is.element(mfdata[,aggregate$blockid],
aggregate$grid[,aggregate$blockid])
val <- model.matrix(spt, mfdata[idx1,,drop=FALSE])
idx2 <- is.element(aggregate$grid[,aggregate$blockid],
mfdata[,aggregate$blockid])
val <- rbind(val, model.matrix(spt, aggregate$grid[idx2,,drop=FALSE]))
}
sites <- unique.sites(val)
## Logical vector indicating z values to be monitored
if (is.logical(control$z$monitor)) {
control$z$monitor <- rep(control$z$monitor,
length.out = nrow(sites$coords))
} else {
idx <- colnames(sites$coords)
if (!all(idx %in% colnames(control$z$monitor)))
stop("Coordinate names not found in 'z' monitor")
val <- unique.sites(control$z$monitor[,idx,drop=FALSE])
val <- merge(cbind(sites$coords, 1:nrow(sites$coords)),
cbind(val$coords, 1), by=idx, all.x = TRUE)
n <- length(idx)
control$z$monitor <- !is.na(val[order(val[, n+1]), n+2])
}
## Order latent parameters as (z$monitor == T, z$monitor == F)
idx <- order(control$z$monitor, decreasing = TRUE)
control$z$monitor <- control$z$monitor[idx]
sites$coords <- sites$coords[idx,,drop=FALSE]
sites$map <- sites$map[,idx,drop=FALSE]
## Initialize correlation structure
correlation <- Initialize(correlation, data = as.data.frame(sites$coords))
## Matrix to map latent parameters to observed data
if (is.null(kmat)) {
k <- sites$map
kmat <- Matrix(0, nrow(mfdata), nrow(sites$coords))
kmat[idx1,] <- k[seq(length.out = sum(idx1)),]
if (length(idx2) > 0) {
idx <- aggregate$grid[idx2, aggregate$blockid]
val <- sort(unique(idx))
kmap <- Matrix(0, length(val), length(idx))
kmap[nrow(kmap) * (seq(idx) - 1) + match(idx, val)] <- 1
kmat[match(val, mfdata[, aggregate$blockid]),] <-
(kmap / tabulate(idx)) %*%
k[seq(sum(idx1) + 1, length.out = sum(idx2)),]
}
} else {
n <- c(nrow(mfdata), nrow(sites$coords))
if (!(is(kmat, "matrix") || is(kmat, "Matrix")) || any(dim(kmat) != n))
stop("Supplied 'kmat' must be a matrix object of dimension ",
n[1], " x ", n[2])
kmat <- as(kmat, "sparseMatrix")
k <- abs(kmat)
if (any(rowSums(k) == 0))
stop("Supplied 'kmat' should not contain rows of zeros")
if (any(colSums(k) == 0))
stop("Supplied 'kmat' should not contain columns of zeros")
}
## Indices to map spatial variances
val <- if (is.null(variance$spatial)) factor(rep(1, nrow(mfdata)))
else factor(getCovariate(mfdata, variance$spatial))
idx <- unlist(apply(as.numeric(val) * (kmat != 0), 2, unique))
idx <- idx[idx > 0]
if (length(idx) != ncol(kmat))
stop("Unsupported latent spatial structure. Different spatial",
" variances assigned to measurements from the same site.")
else variance$spatial <- as.factor(levels(val)[idx])
}
## Structures for random effects parameters
if (missing(random)) {
wmat <- Matrix(numeric(0), nrow(mfdata), 0)
} else {
## Matrix to map random effects to observed data
w <- factor(getGroups(data, random)[as.numeric(rownames(mfdata))])
wmat <- Matrix(0, length(w), nlevels(w))
wmat[na.omit(seq(w) + nrow(wmat) * (as.numeric(w) - 1))] <- 1
## Indices to map random effects variances
val <- if (is.null(variance$random)) factor(rep(1, nrow(mfdata)))
else factor(getCovariate(mfdata, variance$random))
idx <- unlist(apply(as.numeric(val) * (wmat > 0), 2, unique))
idx <- idx[idx > 0]
if (length(idx) != ncol(wmat))
stop("Unsupported random effects structure. Different random effects",
" variances assigned to measurements within the same group.")
else variance$random <- as.factor(levels(val)[idx])
}
## Default values for weights if not supplied
if (is.null(weights)) weights <- rowSums(as(kmat, "lsparseMatrix"))
## Check parameter specifications against supplied data
if (length(control$beta) != (n <- ncol(xmat)))
stop("'beta' parameter specification in 'ramps.control' must be of",
" length ", n)
if (length(control$sigma2.e) != (n <- nlevels(variance$fixed)))
stop("'sigma2.e' parameter specification in 'ramps.control' must be of",
" length ", n)
if (length(control$phi) != (n <- length(correlation)))
stop("'phi' parameter specification in 'ramps.control' must be of",
" length ", n)
if (length(control$sigma2.z) != (n <- nlevels(variance$spatial)))
stop("'sigma2.z' parameter specification in 'ramps.control' must be of",
" length ", n)
if (length(control$sigma2.re) != (n <- nlevels(variance$random)))
stop("'sigma2.re' parameter specification in 'ramps.control' must be of",
" length ", n)
## Set a single tuning parameter for the sigma2 parameters
val <- min(sigma2tuning(control))
if (length(control$sigma2.e)) control$sigma2.e$tuning[] <- val
if (length(control$sigma2.z)) control$sigma2.z$tuning[] <- val
if (length(control$sigma2.re)) control$sigma2.re$tuning[] <- val
## Obtain MCMC samples from ramps engine
val <- ramps.engine(y, xmat, kmat, wmat, correlation, variance$fixed,
variance$spatial, variance$random, weights, control)
structure(
list(params = as.mcmc(val$params), z = as.mcmc(val$z),
loglik = val$loglik, evals = val$evals, call = call, y = y,
xmat = xmat, terms = attr(mf, "terms"),
xlevels = .getXlevels(mt, mf), etype = variance$fixed,
weights = weights, kmat = kmat, correlation = correlation,
coords = sites$coords, ztype = variance$spatial, wmat = wmat,
retype = variance$random, control = control),
class = "ramps")
}
print.ramps <- function(x, ...)
{
cat("\nCall: ", paste(deparse(x$call), collapse = "\n"), "\n")
params <- colnames(x$params)
cat("\nCoefficients:\n")
if (length(tmp <- params2beta(params, x$control)) > 0)
print.default(tmp, print.gap = 2, quote = FALSE)
sigma2 <- params2kappa(params, x$control)
n <- sum(rowSums(as(x$kmat, "lsparseMatrix")) > 1)
cat("\nMeasurements\n",
" N = ", length(x$y), "\n",
" Point Source = ", length(x$y) - n, "\n",
" Areal = ", n, "\n",
" Error Variance: ",
paste(kappa2kappa.e(sigma2, x$control), collapse = " "), "\n", sep = "")
cat("\nLatent Spatial Process\n",
" Sites = ", ncol(x$kmat), "\n",
" Correlation: ", class(x$correlation)[1], "(",
paste(params2phi(params, x$control), collapse = ", "), ")\n",
" Variance: ",
paste(kappa2kappa.z(sigma2, x$control), collapse = " "), "\n", sep = "")
if (ncol(x$wmat) > 0) {
cat("\nRandom Effects\n",
" N = ", ncol(x$wmat), "\n",
" Variance: ",
paste(kappa2kappa.re(sigma2, x$control), collapse = " "), "\n",
sep = "")
}
n <- nrow(x$params)
rn <- rownames(x$params)[1:min(n, 3)]
if (n > 4) rn <- c(rn, "...")
if (n > 3) rn <- c(rn, rownames(x$params)[n])
cat("\nMCMC Output\n",
" Saved Samples = ", n, " (", paste(rn, collapse = ", "), ")\n",
" Slice Evaluations = ", sum(x$evals), "\n", sep = "")
invisible(x)
}
summary.ramps <- function(object, ...)
{
summary(object$params, ...)
}
|
b700644cb9acc2679b1c3f17199314702f9ddee3
|
1fd38c17bd8367a62c3d48be141a89f7ef2c7262
|
/source_functions/reduce_prism.R
|
71f1c68fef97c27e2cc7dd9a91f5571a8d0cfb5b
|
[] |
no_license
|
harlydurbin/angus_regions
|
648dc6792351520db2bf6e97d30178ff8d8d8425
|
ab00af1a004dce197ab02ead0f20b4317da644b0
|
refs/heads/master
| 2023-05-29T12:40:51.351189
| 2021-06-08T21:26:32
| 2021-06-08T21:26:32
| 290,271,190
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,969
|
r
|
reduce_prism.R
|
library(tidyverse)
library(prism)
options(prism.path = "~/regions/data/raw_data/prism")
growth_regions <- readr::read_rds(here::here("data/derived_data/growth_regions.rds"))
yrs <- 1972:2017
get_prism_annual(type = "tmean", years = yrs, keepZip = FALSE)
get_prism_annual(type = "ppt", years = yrs, keepZip = FALSE)
zips <-
growth_regions %>%
#filter(region %in% c(1, 2, 5)) %>%
distinct(zip, lat, lng) %>%
mutate(lat = round(lat, digits = 1),
lng = round(lng, digits = 1))
reduce_prism <-
function(file, yr, var, zips_df) {
var <- rlang::enquo(var)
data <-
prism::prism_stack(file) %>%
raster::rasterToPoints() %>%
as_tibble() %>%
rename(!! var := 3,
lat = y,
lng = x) %>%
mutate(
lat = round(lat, digits = 1),
lng = round(lng, digits = 1),
!! var := round(!! var, digits = 0)
) %>%
distinct()
zips_df %>%
left_join(data) %>%
group_by(zip) %>%
mutate(!! var := mean(!! var)) %>%
distinct() %>%
mutate(year = yr)
}
prism_tmean <-
purrr::map2_df(
.x = purrr::map_chr(.x = yrs, ~ glue::glue("PRISM_tmean_stable_4kmM2_{.x}_bil")),
.y = yrs,
~ reduce_prism(
file = .x,
yr = .y,
var = tmean,
zips_df = zips
)
)
# annoying
ppt_paths <-
list.dirs(path = here::here("data/raw_data/prism")) %>%
str_subset("ppt") %>%
str_remove("C:/Users/agiintern/Documents/regions/data/raw_data/prism/")
prism_ppt <-
purrr::map2_df(
.x = ppt_paths,
.y = yrs,
~ reduce_prism(
file = .x,
yr = .y,
var = ppt,
zips_df = zips
)
)
prism_ppt %>%
group_by(year, zip) %>%
filter(n() > 1)
prism_zip <- full_join(prism_tmean, prism_ppt)
readr::write_rds(prism_zip, here::here("data/derived_data/prism_zip.rds"))
|
0055691e1b7a4ea4dc75cceac57ff9e198608c3f
|
a5c81b7498341e8d658632c28ab3a8c6a6546cb3
|
/Labs/Lab12/BREWSTER_Lab12.R
|
d69f73453bfd83c7027a861727a62b44fe1c9524
|
[] |
no_license
|
maddenbrewster/CompBioLabsAndHomework
|
0655f148f320d1b1239949156d851fc29a137d4a
|
97efd2d42e3cf136bb644d234d1dc5c93d0d2806
|
refs/heads/master
| 2020-12-21T19:32:46.801939
| 2020-05-01T01:46:55
| 2020-05-01T01:46:55
| 236,536,139
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,081
|
r
|
BREWSTER_Lab12.R
|
# EBIO5420
# Lab 12
# Madden Brewster
# Sunday, April 12, 2020
# More work with ggplot2
# Problems work with the Cusack et al. Dataset
setwd("/Users/maddenbrewster/Documents/EBIO5420/CompBioLabsAndHomework/Labs/Lab12")
cam_data <- read.csv("Cusack_et_al_data.csv", stringsAsFactors = F)
# Problem 1: A bar plot in ggplot()
# All the species' names (all 41 of them) are indeed there on the x-axis, and the y-axis is the total number of observations
# of each species in the whole dataset.
library(ggplot2)
library(dplyr)
# One way to do this: takes longer, but first you subset data by unique names with frequencies
unique_species_freq <- cam_data %>%
group_by(Species) %>%
summarise(species_frequency = length(Species)) # give the frequency for each unique species by taking the length of th number of rows of each unique species
cam_barplot <- ggplot(unique_species_freq, aes(x = Species, y = species_frequency)) +
geom_bar(stat = "identity") # must inclues thid because you specify an x any y above, with geom_bar, r only expects to get an x and will calcualte frequency from that
cam_barplot
# use this, much easier; uses original data and also less to code altogether
cam_barplot <- ggplot(cam_data) +
geom_bar(aes(Species))
cam_barplot
# Problem 2: Rotate the axis tick labels.
cam_barplot_2 <- ggplot(cam_data) +
geom_bar(aes(Species)) +
theme(axis.text.x = element_text(angle = 90)) # use the theme and combined elements to adjust axis text angle
cam_barplot_2
# Problem 3: A different orientation, scaling, and sorting
sorted_species_data <- arrange(unique_species_freq, species_frequency) # this uses the code from probelm 1
cam_barplot_3 <- ggplot(sorted_species_data, x = Species, y = species_frequency)) +
geom_bar(stat = "identity") + # remeber this is required because you sepcifices an x and y variable above
scale_y_log10() +
coord_flip()
cam_barplot_3
# this still doens't give me what I want, can't get the order correct--not sure how to rememdy this. Is there way to turn off R's default alphebatization?
|
150716679b0d22b394efa1ec2e0c3cd7d18ea9a8
|
3bf08bebebaf7a8f7f18e6cee1fc2a472f36d2c7
|
/academic-twitter-example.R
|
8bfb41102885cc7b1532c407d01dd1161ec3fe03
|
[
"MIT"
] |
permissive
|
omiyas/s21-intro-to-data-sci-methods-in-ed
|
112960b49e59b235cc8d0557c119955f6d47dcc5
|
bcd7b52fee1db891aeb428bfcd74ecf99bfee476
|
refs/heads/main
| 2023-05-02T03:18:59.466194
| 2021-05-07T17:23:54
| 2021-05-07T17:23:54
| 365,340,793
| 0
| 0
|
MIT
| 2021-05-07T20:01:29
| 2021-05-07T20:01:28
| null |
UTF-8
|
R
| false
| false
| 3,625
|
r
|
academic-twitter-example.R
|
# devtools::install_github("cjbarrie/academictwitteR")
# install.packages("rtweet")
library(academictwitteR)
library(rtweet)
library(tidyverse)
hashtags_to_search <- c(str_c("#AERA", 19:21), str_c("AERA20", 19:21)) %>%
paste0(collapse = " OR ")
get_hashtag_tweets(hashtags_to_search, "2010-01-01T00:00:00Z", "2021-04-17T00:00:00Z", bearer_token = Sys.getenv("bearer_token"), data_path = "twitter-data-aera-new/")
tweets %>% write_rds("data/aera-tweets-unproc.rds")
tweets_proc <- tidytags::lookup_many_tweets(tweets$id)
tweets_proc %>% write_rds("data/aera-tweets.csv")
library(quanteda)
d <- read_rds("~/s21-intro-to-data-sci-methods-in-ed/data/aera-tweets.rds")
# https://www.tidytextmining.com/sentiment.html
library(janeaustenr)
library(stringr)
library(tidytext)
austen_books()
tidy_books <- d %>%
select(created_at, text) %>%
unnest_tokens(word, text)
library(googlesheets4)
library(lubridate)
conf_dates <- read_sheet("https://docs.google.com/spreadsheets/d/1lBg1zMUtUYOwS2kWm1hAaW5dZ2iWc-R5013wTubjG_Q/edit#gid=0")
conf_dates <- janitor::clean_names(conf_dates)
my_interval <- interval(start = conf_dates$start_date - days(3),
end = conf_dates$end_date + days(3))
my_interval
int_midpoint <- function(interval) {
int_start(interval) + (int_end(interval) - int_start(interval))/2
}
tidy_books_f <- tidy_books %>%
filter(created_at %within% my_interval)
tidy_proc <- tidy_books_f %>%
inner_join(get_sentiments("bing")) %>%
mutate(day = lubridate::round_date(created_at, "day")) %>%
count(day, sentiment)
tidy_proc
tidy_proc_tm <- tidy_proc %>%
mutate(day = lubridate::round_date(day, "day"),
week = lubridate::round_date(day, "week"),
month = lubridate::round_date(day, "month"),
year = lubridate::year(day),
yday = lubridate::yday(day))
mid_points <- int_midpoint(my_interval)
mid_points <- tibble(year = 2014:2021,
mid_point = mid_points)
tidy_proc_tm %>%
ggplot(aes(x = yday, y = n, group = sentiment, color = sentiment)) +
geom_line() +
geom_point() +
facet_wrap("year", scales = "free_x") +
theme_minimal() +
scale_color_brewer("Sentiment", type = "qual") +
ylab("Number of Tweets") +
xlab("Day of the Year")
ggsave("aera-sentiment-by-year.png", width = 10, height = 10)
d <- mutate(d, day = lubridate::round_date(created_at, "day"),
week = lubridate::round_date(created_at, "week"),
month = lubridate::round_date(created_at, "month"),
year = lubridate::round_date(created_at, "year"),
yday = lubridate::yday(created_at))
# quanteda
c <- corpus(d, text_field = "text")
toks_news <- tokens(c, remove_punct = TRUE)
# select only the "negative" and "positive" categories
data_dictionary_LSD2015_pos_neg <- data_dictionary_LSD2015[1:2]
data_dictionary_LSD2015_pos_neg
toks_gov_lsd <- tokens_lookup(toks_news, dictionary = data_dictionary_LSD2015_pos_neg)
# create a document document-feature matrix and group it by day
dfmat_gov_lsd <- dfm(toks_gov_lsd) %>%
dfm_group(groups = c("week"))
tp <- dfmat_gov_lsd %>%
as_tibble() %>%
mutate(ratio = negative/positive) %>%
mutate(date = lubridate::ymd(doc_id)) %>%
mutate(yday = lubridate::yday(date)) %>%
mutate(day = lubridate::day(date)) %>%
mutate(week = lubridate::week(date)) %>%
mutate(month = lubridate::month(date)) %>%
mutate(year = lubridate::year(date)) %>%
filter(month == 4,
year > 2014)
tp
mutate(year = as.factor(year)) %>%
ggplot(aes(x = day, y = ratio, color = year, group = year)) +
geom_smooth(se = FALSE) +
scale_color_brewer()
|
85220c4c66a71fc778ffe17b3e07945188a2f892
|
2d549b99d2f77abe9f4658ffb7ef3fef6e25c286
|
/R/binaryAttributes.R
|
34cbb12a8ed7a5f18820927abccea0ac925dbf33
|
[] |
no_license
|
Hackout2/repijson
|
549b538135badb358b864be5c47e26e177de74c0
|
4ad8f9d7c33cd2225e11674f651f608bff08bc91
|
refs/heads/master
| 2020-12-29T02:36:05.984596
| 2017-02-22T22:06:34
| 2017-02-22T22:06:34
| 35,152,284
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,171
|
r
|
binaryAttributes.R
|
# Author: Thomas Finnie
###############################################################################
#' Convert a file to a base64 encoded attribute
#'
#' Read a file in binary mode and convert the bytesteam into a base64 encoded
#' ejAttribute
#' @param file The file to read and convert
#' @param name The name for the attribute
#' @author Thomas Finnie (thomas.finnie@@phe.gov.uk)
#' @return An ejAttribute containing the file
#' @export
fileToAttribute <- function(file, name=as.character(file)){
create_ejAttribute(
name=name,
type="base64",
value= base64enc::base64encode(file)
)
}
#' Convert a base64 attribute to a file
#'
#' Convert a base64 encoded attribute to a file.
#' @param attribute The ejAttribute to convert
#' @param file The filename to write to
#' @author Thomas Finnie (thomas.finnie@@phe.gov.uk)
#' @return invisilbe NULL
#' @export
attributeToFile <- function(attribute, file){
if(class(attribute)!="ejAttribute")
stop("Attribute must be an ejAttribute")
if (attribute$type!="base64")
stop("Attribute must be a base64 encode attribute")
writeBin(base64enc::base64decode(attribute$value), file)
invisible(NULL)
}
|
1fcea07da68986c3f1a72829ebf11279696b46bb
|
9775fa1bc36cac2a9e8f61fb25248ec9fe71f6a3
|
/R/transition-between-polygons.R
|
5658a8b25860e50cb46f19ef394a9f580cb76df6
|
[] |
no_license
|
mathiasisaksen/artKIT
|
944ea1f71aea566f25fe506e5b3c6a606064059b
|
7b539cee119a9b4fe743b78799c24d5aef912319
|
refs/heads/main
| 2023-07-01T07:46:23.925597
| 2021-07-31T13:53:54
| 2021-07-31T13:53:54
| 391,369,092
| 13
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,871
|
r
|
transition-between-polygons.R
|
#' Transition between polygons
#'
#' Function that interpolates/transitions between two polygons. This is done by
#' computing \code{(1 - time)*start.polygon + time*end.polygon}.
#'
#' @param start.polygon,end.polygon Dataframes/matrices containing the vertices of the
#' polygons to interpolate between. The coordinates of the vertices must be stored in columns named "x" and "y",
#' and the polygons must contain the same number of vertices.
#' @param time The times at which we are interested in interpolating the polygons. \code{time = 0} gives
#' \code{start.polygon}, while \code{time = 1} gives \code{end.polygon}. Can be either a single number
#' or a numeric vector. If time contains values outside [0, 1], a warning will be given.
#' @param vertex.order Determines whether and how the vertices in \code{start.polygon} and \code{end.polygon}
#' are reordered before the interpolation is computed. If \code{vertex.order = "preserve"}, no
#' reordering is applied, and the polygons are used as-is. If \code{vertex.order = "reorder"},
#' the function first ensures that the polygons have the same orientation (i.e. clockwise/counter-clockwise).
#' Then, it attempts to shift the indices of the vertices so that the corresponding vertices on
#' \code{start.polygon} and \code{end.polygon} are "aligned".
#'
#' @return A data frame that contains one row per vertex. If \code{start.polygon}
#' and \code{end.polygon} contain n vertices, and \code{time} contains m values, then
#' the returned data frame will contain n*m rows. following columns:
#' \item{x, y}{The coordinates of the vertex}
#' \item{group}{Which polygon the vertex belongs to (1 for the first value in \code{time}, 2 for the second and so on)}
#' \item{time}{The time value of the associated polygon}
#'
#' @note It is recommended to ensure that the start and end polygons have the correct
#' orientation and numbering of vertices before computing the transition, and then using
#' \code{vertex.order = "preserve"}.
#'
#' @examples
#' # Example: Transition from hexagon to square
#' # Create hexagon
#' hexagon.df = compute_regular_polygons(
#' center = c(0, 0),
#' radius = 1,
#' rotation = 0,
#' num.edges = 6
#' )
#' # Round corners slightly
#' hexagon.df = round_polygon_corners(hexagon.df, corner.radius.scale = 0.3)
#'
#' # Create square
#' square.df = compute_regular_polygons(
#' center = c(20, -20),
#' radius = 2,
#' rotation = 0,
#' num.edges = 4
#' )
#' # Round corners slightly
#' square.df = round_polygon_corners(square.df, corner.radius.scale = 0.3)
#'
#' # Resample polygons with many vertices, so that the transition becomes smooth
#' num.vertices = 1000
#' resample.time = seq(0, 1, length.out = num.vertices + 1)[-(num.vertices + 1)]
#' hexagon.resample = interpolate_polygon(hexagon.df)(resample.time)
#' square.resample = interpolate_polygon(square.df)(resample.time)
#'
#' # Show transition over 10 steps
#' num.transition = 10
#' transition.time = seq(0, 1, length.out = num.transition)
#' # Use vertex.order = "preserve" (both polygons are CCW, and have the top vertex
#' # as the first in hexagon.df and square.df)
#' transition.df = transition_between_polygons(
#' hexagon.resample,
#' square.resample,
#' transition.time,
#' "preserve")
#'
#' # Show the result:
#' library(ggplot2)
#' ggplot()+
#' geom_polygon(data = transition.df, aes(x = x, y = y, group = group), fill = NA, color = "black")+
#' coord_fixed()
#'
#' @export
#' @author Mathias Isaksen \email{mathiasleanderi@@gmail.com}
transition_between_polygons = function(start.polygon, end.polygon, time, vertex.order = "reorder") {
check_vertex_df(start.polygon, "start.polygon")
check_vertex_df(end.polygon, "end.polygon")
if (nrow(start.polygon) != nrow(end.polygon)) {
stop(paste(c("start.polygon and end.polygon must contain the same number of vertices. ",
"If you wish to interpolate between polygons of different sizes, ",
"then interpolate_polygon can be used to make them the same size.")))
}
if (min(time) < 0 || max(time) > 1) {
warning("time contains values outside the interval [0, 1].")
}
n = nrow(start.polygon)
n.times = length(time)
if (is_polygon_ccw(start.polygon) != is_polygon_ccw(end.polygon)) {
if (vertex.order != "preserve") {
end.polygon = end.polygon[n:1, ]
} else {
warning(paste(c("start.polygon and end.polygon do not have the same orientation ",
"(one is clockwise and the other is counter-clockwise.)")))
}
}
if (vertex.order == "reorder") {
reordered.polygons = reorder_polygons(start.polygon, end.polygon)
start.polygon = reordered.polygons$polygon.1
end.polygon = reordered.polygons$polygon.2
}
start.polygon.total = start.polygon[rep(1:n, n.times), c("x", "y")]
end.polygon.total = end.polygon[rep(1:n, n.times), c("x", "y")]
time.total = rep(time, each = n)
group = rep(1:n.times, each = n)
result = (1 - time.total)*start.polygon.total + time.total*end.polygon.total
result$time = time.total
result$group = group
return(result)
}
reorder_polygons = function(polygon.1, polygon.2) {
n = nrow(polygon.1)
centroid.1 = compute_polygon_centroid(polygon.1)
centroid.2 = compute_polygon_centroid(polygon.2)
centered.polygon.1 = polygon.1[, c("x", "y")] - centroid.1[rep(1, n), c("x", "y")]
centered.polygon.2 = polygon.2[, c("x", "y")] - centroid.2[rep(1, n), c("x", "y")]
closest.indices = closest_pair_of_points(centered.polygon.1, centered.polygon.2)
reordered.indices.start = (1:n + closest.indices[1] - 2) %% n + 1
reordered.indices.end = (1:n + closest.indices[2] - 2) %% n + 1
reordered.polygon.1 = polygon.1[reordered.indices.start, ]
reordered.polygon.2 = polygon.2[reordered.indices.start, ]
return(list(polygon.1 = reordered.polygon.1, polygon.2 = reordered.polygon.2))
}
|
81cdadf567a09f8155655666595f45bcccbeeb7f
|
2bcb5fb15e9c93368891e20d4c5d4f129f36203b
|
/deseq2_morphAge.R
|
57d32e8b8747712583aea9154bbc1f2416a77e6b
|
[] |
no_license
|
soojinyilab/sparrow_WGBS_paper
|
ab13e0c395408b56c908824f5b602ed5e7a8a747
|
0cd4f49298bc15966ec8d1edd2d3adca22cc1d1c
|
refs/heads/master
| 2022-11-06T19:06:52.572373
| 2020-07-01T03:08:45
| 2020-07-01T03:08:45
| 276,264,003
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,066
|
r
|
deseq2_morphAge.R
|
library(DESeq2)
### Read data
cond <- "female_Hyp"
cts <- as.matrix(read.csv("gene_count_matrix.csv", row.names="gene_id", check.names = F)) # raw count
coldata <- read.csv(paste(cond, ".cond", sep = ""), sep="\t", row.names=1, header=F) # sample condition file
colnames(coldata) <- c("morph", "age", "type")
rownames(coldata) <- sub("fb", "", rownames(coldata))
print(all(rownames(coldata) %in% colnames(cts)))
cts <- cts[, rownames(coldata)]
all(rownames(coldata) == colnames(cts))
### Differential expression between two morphs
dds <- DESeqDataSetFromMatrix(countData = cts,
colData = coldata,
design = ~ age + morph) # corrects for age
dds <- estimateSizeFactors(dds)
dds <- DESeq(dds, fitType='local')
counts <- counts(dds, normalized=TRUE)
res <- results(dds)
baseMeanSep <- sapply(levels(dds$morph), function(lvl) rowMeans(counts(dds,normalized=TRUE)[,dds$morph == lvl]))
res <- cbind(as.data.frame(res), baseMeanSep)
res$Gene <- rownames(res)
res <- res[,c(9,1,7,8,2,3,4,5,6)]
res <- cbind(res, counts)
resOrdered <- res[order(res$padj), ]
write.table(resOrdered, file=paste(cond, ".morph.DESeq2", sep = ""), quote=F, sep="\t", row.names=F)
write.table(cbind(dds$morph, dds$sizeFactor), file=paste(cond, "_sizeFactor.txt", sep=""), quote=F, sep="\t", col.names=F)
## Differential expression between two age groups
dds <- DESeqDataSetFromMatrix(countData = cts,
colData = coldata,
design = ~ morph + age) # corrects for morph
dds <- estimateSizeFactors(dds)
dds <- DESeq(dds, fitType='local')
counts <- counts(dds, normalized=TRUE)
res <- results(dds)
baseMeanSep <- sapply(levels(dds$age), function(lvl) rowMeans(counts(dds,normalized=TRUE)[,dds$age == lvl]))
res <- cbind(as.data.frame(res), baseMeanSep)
res$Gene <- rownames(res)
res <- res[,c(9,1,7,8,2,3,4,5,6)]
res <- cbind(res, counts)
resOrdered <- res[order(res$padj), ]
write.table(resOrdered, file=paste(cond, ".age.DESeq2", sep = ""), quote=F, sep="\t", row.names=F)
|
e027fb9b884d2f2be1a769f7e79a370a506bee8f
|
113c20043720a2b49fbaced8c3efa7cb2f8fc8d6
|
/Code/R/RandomForest CS504.R
|
87de9696ded7c2a24b6985b09deea7dc936595fb
|
[] |
no_license
|
gturner7/Census-Income
|
da4f087ff521097149594e6daf862e8da04c9543
|
3528233eae3d667326506730e7c654ef83f9a5f1
|
refs/heads/main
| 2023-07-24T14:44:10.032727
| 2021-09-06T22:10:05
| 2021-09-06T22:10:05
| 372,581,981
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 562
|
r
|
RandomForest CS504.R
|
data<-read.csv('Final_Data_With_Outliers.csv')
library(DescTools)
library(MASS)
library(randomForest)
#partition data
set.seed(100)
trainrows<-sample(nrow(data),nrow(data)*.8, replace = FALSE)
traindata<-data[trainrows,]
testdata<-data[-trainrows,]
#run rf
rf<-randomForest(x=traindata[,-41],y=traindata[,41],ntree=100,mtry=5,do.trace=1)
varImpPlot(rf, main="Random Forest Top 10 Variables", n.var=10)
#predict
rfpredictions<-predict(rf, newdata = testdata)
#calculate statistics
RMSE(rfpredictions,testdata[,41])
summary(lm(testdata[,41]~rfpredictions))
|
3356d8dc964bcfb20df23a16304162de56d37db6
|
6771b3be59935639b51698036e5fbbaf51feec4b
|
/man/remove_small_pols.Rd
|
6e10b2fda411ad1c8f30e6589941a3e9cc29fc07
|
[] |
no_license
|
pieterbeck/CanHeMonR
|
f1a15bc68afc77f66bb13b3e90fbfaa3e99376e3
|
94ac1171b5bb7ff88e3cbe7dee3594c31d628ff4
|
refs/heads/master
| 2020-05-21T04:42:39.673459
| 2018-05-14T09:33:09
| 2018-05-14T09:33:09
| 48,625,321
| 0
| 3
| null | 2018-05-14T09:33:10
| 2015-12-26T22:26:10
|
R
|
UTF-8
|
R
| false
| true
| 552
|
rd
|
remove_small_pols.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/remove_small_pols.r
\name{remove_small_pols}
\alias{remove_small_pols}
\title{Remove Small Polygons}
\usage{
remove_small_pols(spatpols, minsize, outname = NULL)
}
\arguments{
\item{spatpols}{A SpatialPolygons object}
\item{minsize}{numeric Any polygons below this size will be removed}
\item{outname}{character. Optional filename to write the result to}
}
\value{
A SpatialPolygons(DataFrame) object
}
\description{
Remove small polygons from a SpatialPolygons object
}
|
94218bdf8d2519dd1480cbf1a68166c55b8ef3b2
|
f1971a5cbf1829ce6fab9f5144db008d8d9a23e1
|
/packrat/lib/x86_64-pc-linux-gnu/3.2.5/metricsgraphics/doc/introductiontometricsgraphics.R
|
f16fe66492ee4eed5b708abee0d09fb36af5ca7e
|
[] |
no_license
|
harryprince/seamonster
|
cc334c87fda44d1c87a0436139d34dab310acec6
|
ddfd738999cd302c71a11aad20b3af2f4538624f
|
refs/heads/master
| 2021-01-12T03:44:33.452985
| 2016-12-22T19:17:01
| 2016-12-22T19:17:01
| 78,260,652
| 1
| 0
| null | 2017-01-07T05:30:42
| 2017-01-07T05:30:42
| null |
UTF-8
|
R
| false
| false
| 9,528
|
r
|
introductiontometricsgraphics.R
|
## ----echo=FALSE----------------------------------------------------------
suppressPackageStartupMessages(library(metricsgraphics))
suppressPackageStartupMessages(library(jsonlite))
suppressPackageStartupMessages(library(RColorBrewer))
suppressPackageStartupMessages(library(htmltools))
suppressPackageStartupMessages(library(dplyr))
## ------------------------------------------------------------------------
library(metricsgraphics)
library(jsonlite)
library(RColorBrewer)
library(htmltools)
library(dplyr)
# this lets us add a title to the plot since the package follows the guidance
# of the htmlwidgets authors and does not include the MetricsGraphics.js title
# option to ensure consistent div sizing.
show_plot <- function(plot_object, title) {
div(style="margin:auto;text-align:center", strong(title), br(), plot_object)
}
## ------------------------------------------------------------------------
fake_users_1 <- fromJSON("http://metricsgraphicsjs.org/data/fake_users1.json")
fake_users_1$date <- as.Date(fake_users_1$date)
fake_users_1 %>%
mjs_plot(x=date, y=value, width=600, height=200) %>%
mjs_axis_x(xax_format="date") %>%
mjs_line(area=TRUE) %>%
show_plot("Line Chart")
## ------------------------------------------------------------------------
confidence_band <- fromJSON("http://metricsgraphicsjs.org/data/confidence_band.json")
confidence_band %>%
mjs_plot(x=date, y=value, format="percentage", width=600, height=200) %>%
mjs_axis_x(xax_format="date",
show_secondary_x_label=FALSE,
extended_ticks=TRUE) %>%
mjs_line() %>%
mjs_add_confidence_band() %>%
show_plot("Confidence Band")
## ------------------------------------------------------------------------
small_range <- fromJSON("http://metricsgraphicsjs.org/data/small-range.json")
small_range %>%
mjs_plot(x=date, y=value, width=600, height=200) %>%
mjs_axis_x(xax_format="date") %>%
mjs_line(interpolate="basic", area=TRUE) %>%
show_plot("Small Range of Integers")
## ------------------------------------------------------------------------
brief_1 <- fromJSON("http://metricsgraphicsjs.org/data/brief-1.json")
brief_2 <- fromJSON("http://metricsgraphicsjs.org/data/brief-2.json")
brief_1 %>%
mjs_plot(x=date, y=value, width=600, height=200, linked=TRUE) %>%
mjs_axis_x(xax_format="date", xax_count=4) %>%
mjs_line(area=TRUE) -> mjs_brief_1
brief_2 %>%
mjs_plot(x=date, y=value, width=600, height=200, linked=TRUE) %>%
mjs_axis_x(xax_format="date", xax_count=4) %>%
mjs_line() -> mjs_brief_2
div(style="margin:auto;text-align:center",
strong("Linked Graphic"), br(), mjs_brief_1,
strong("Other Linked Graphic"), br(), mjs_brief_2)
## ------------------------------------------------------------------------
solitary <- data.frame(
date=as.Date("2015-03-05"),
value=12000
)
solitary %>%
mjs_plot(x=date, y=value, width=600, height=200) %>%
mjs_axis_x(xax_format="date") %>%
mjs_point() %>%
show_plot("Singleton")
## ------------------------------------------------------------------------
fake_users2_list <- fromJSON("http://metricsgraphicsjs.org/data/fake_users2.json")
fake_users2 <- data.frame(
date=fake_users2_list[[1]]$date,
value_1=fake_users2_list[[1]]$value,
value_2=fake_users2_list[[2]]$value,
value_3=fake_users2_list[[3]]$value
)
fake_users2 %>%
mjs_plot(x=date, y=value_1, width=600, height=200) %>%
mjs_axis_x(xax_format="date") %>%
mjs_line() %>%
mjs_add_line(value_2) %>%
mjs_add_line(value_3) %>%
mjs_add_legend(c("Line 1", "Line 2", "Line 3")) %>%
show_plot("Multi-Line Chart")
fake_users2 %>%
mjs_plot(x=date, y=value_1, width=600, height=200) %>%
mjs_axis_x(xax_format="date") %>%
mjs_line(color="blue") %>%
mjs_add_line(value_2, color="rgb(255,100,43)") %>%
mjs_add_line(value_3, color="#ccccff") %>%
mjs_add_legend(c("Line 1", "Line 2", "Line 3")) %>%
show_plot("Multi-Line Char with Custom Colors")
## ------------------------------------------------------------------------
fake_users3_list <- fromJSON("http://metricsgraphicsjs.org/data/fake_users3.json")
fake_users3 <- data.frame(
date=fake_users3_list[[1]]$date,
value_1=fake_users3_list[[1]]$value,
value_2=fake_users3_list[[2]]$value,
value_3=fake_users3_list[[3]]$value
)
fake_users3 %>%
mjs_plot(x=date, y=value_1, width=600, height=200, right=40) %>%
mjs_axis_x(xax_format="date") %>%
mjs_line() %>%
mjs_add_line(value_2) %>%
mjs_add_line(value_3) %>%
mjs_add_legend(c('US', 'CA', 'DE'), inline=TRUE) %>%
show_plot("Labeling Lines")
## ------------------------------------------------------------------------
xnotondate <- fromJSON("http://metricsgraphicsjs.org/data/xnotdate.json")
xnotondate %>%
mjs_plot(x=males, y=females, width=600, height=240,
left=80, right=40, bottom=50) %>%
mjs_line(animate_on_load=TRUE, area=FALSE) %>%
mjs_labs("Males", "Females") %>%
mjs_axis_y(extended_ticks=TRUE) %>%
show_plot("Axis Labels")
## ------------------------------------------------------------------------
some_percentages <- fromJSON("http://metricsgraphicsjs.org/data/some_percentage.json")
some_percentages[[1]] %>%
mjs_plot(x=date, y=value, format="percentage", width=600, height=200) %>%
mjs_axis_x(xax_format="date") %>%
mjs_line(area=TRUE) %>%
show_plot("Some Percentages")
## ------------------------------------------------------------------------
some_currency <- fromJSON("http://metricsgraphicsjs.org/data/some_currency.json")
some_currency %>%
mjs_plot(x=date, y=value, width=600, height=200) %>%
mjs_axis_x(xax_format="date") %>%
mjs_line() %>%
mjs_axis_y(yax_units="$") %>%
show_plot("Some Currency")
## ------------------------------------------------------------------------
log_scale <- fromJSON("http://metricsgraphicsjs.org/data/log.json")
log_scale %>%
mjs_plot(x=date, y=value, width=600, height=200) %>%
mjs_axis_x(xax_format="date") %>%
mjs_line(area=TRUE) %>%
mjs_axis_y(y_scale_type="log") %>%
show_plot("Log Scale")
## ------------------------------------------------------------------------
fake_users_1 <- fromJSON("http://metricsgraphicsjs.org/data/fake_users1.json")
brief_1 <- fromJSON("http://metricsgraphicsjs.org/data/brief-1.json")
fake_users_1 %>%
mjs_plot(x=date, y=value, width=600, height=200) %>%
mjs_axis_x(xax_format="date", show=FALSE) %>%
mjs_line() -> no_x
brief_1 %>%
mjs_plot(x=date, y=value, width=600, height=200) %>%
mjs_axis_x(xax_format="date") %>%
mjs_axis_y(show=FALSE) %>%
mjs_line() -> no_y
div(style="margin:auto;text-align:center",
strong("No X Axis"), br(), no_x,
strong("No Y Axis"), br(), no_y)
## ------------------------------------------------------------------------
fake_users_1 <- fromJSON("http://metricsgraphicsjs.org/data/fake_users1.json")
fake_users_1 %>%
mjs_plot(x=date, y=value, width=600, height=200) %>%
mjs_axis_x(xax_format="date", show=FALSE) %>%
mjs_line(color="#8c001a", area=TRUE) %>%
mjs_axis_y(rug=TRUE) %>%
show_plot("Colors!")
## ------------------------------------------------------------------------
fake_users_1 <- fromJSON("http://metricsgraphicsjs.org/data/fake_users1.json")
fake_users_1 %>%
mjs_plot(x=date, y=value, width=600, height=200) %>%
mjs_axis_x(xax_format="date", show=FALSE) %>%
mjs_line() %>%
mjs_axis_y(rug=TRUE) %>%
show_plot("Rug Plots")
## ------------------------------------------------------------------------
some_percentages <- fromJSON("http://metricsgraphicsjs.org/data/some_percentage.json")
some_percentages[[1]] %>%
mjs_plot(x=date, y=value, format="percentage", width=600, height=200) %>%
mjs_axis_x(xax_format="date") %>%
mjs_line(area=TRUE) %>%
mjs_add_marker("2014-02-01", "1st Milestone") %>%
mjs_add_marker(as.Date("2014-03-15"), "2nd Milestone") %>%
show_plot("Markers")
## ------------------------------------------------------------------------
fake_users_1 <- fromJSON("http://metricsgraphicsjs.org/data/fake_users1.json")
fake_users_1 %>%
mjs_plot(x=date, y=value, width=600, height=200) %>%
mjs_axis_x(xax_format="date", show=FALSE) %>%
mjs_add_baseline(160000000, "a baseline") %>%
mjs_line(area=TRUE) %>%
show_plot("Baselines")
## ------------------------------------------------------------------------
points_1 <- fromJSON("http://metricsgraphicsjs.org/data/points1.json")
points_1 %>%
mjs_plot(x=x, y=y, width=600, height=460) %>%
mjs_point(y_rug=TRUE) %>%
mjs_axis_x() %>%
show_plot("Simple Scatterplot")
## ------------------------------------------------------------------------
points_1 %>%
mjs_plot(x=x, y=y, width=600, height=460) %>%
mjs_point(y_rug=TRUE, color_accessor=v, color_type="category", color_range=c("green", "orange")) %>%
mjs_axis_x() %>%
show_plot("Color mapping")
## ------------------------------------------------------------------------
points_1 %>%
mjs_plot(x=x, y=y, width=600, height=460) %>%
mjs_point(y_rug=TRUE, x_rug=TRUE, color_accessor=z, size_accessor=w, color_type="category") %>%
mjs_axis_x(rug=TRUE) %>%
show_plot("Size Too!")
## ------------------------------------------------------------------------
moar_plots <- lapply(1:7, function(x) {
mjs_plot(rbeta(10000, x, x), width="250px", height="250px", linked=TRUE) %>%
mjs_histogram(bar_margin=2) %>%
mjs_labs(x_label=sprintf("Plot %d", x))
})
mjs_grid(moar_plots, nrow=4, ncol=3, widths=c(rep(0.33, 3)))
|
557f6d0443b08c76c1402e0a9715fafe8509866d
|
2e24128ded0f064b44e8d114072e6694012f5dba
|
/code/ratings_counting.R
|
48314a98072566716d7c6238cbad88387917f003
|
[] |
no_license
|
cass-code/20346212
|
b9b7f3790e70b34c065316a9fd4f9bd1a4048b40
|
ec0f527e0b934f1f933380db4be44a2648e55290
|
refs/heads/main
| 2023-04-26T20:52:14.681909
| 2021-05-24T19:12:31
| 2021-05-24T19:12:31
| 370,134,367
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 541
|
r
|
ratings_counting.R
|
#count how many times when rotten tomatoes rated it 80% that audiences rates it >85%
ratings_counting <- function(movies){
library(tidyverse)
# right <- movies %>% filter(`Rotten Tomatoes %` > 80 & `Audience score %` >85) %>% count()
# number_of_movies <- count()
# right_freq <- right/number_of_movies
#
#
# right
right <- movies %>% filter(`Rotten Tomatoes %` > 80 & `Audience score %` >85)
#number_of_movies <- movies %>% count(film)
right_freq <- (right/74) *100
right_freq
}
|
bff111870e5706a249493505e24c30c52208f761
|
3bbebea9260b8b3c1b7d07f66d91998116babf55
|
/cell_type_deconv_heatmap.r
|
736a38b8c0fe8a4a549bbfd0f15a67a8f7b642df
|
[] |
no_license
|
DataScienceGenomics/mirTarRnaSeq_Paper
|
bc6827ddf185010e7eec0f649155bcbcf9f3b3bf
|
b25318da0896c970a7690e19551c3e0f2031ff3f
|
refs/heads/main
| 2023-07-14T08:16:14.135860
| 2021-09-05T21:29:11
| 2021-09-05T21:29:11
| 403,410,406
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 586
|
r
|
cell_type_deconv_heatmap.r
|
#Paper correlationHeatmapEBV miRNA
library(pheatmap)
library(dplyr)
library(mirTarRnaSeq)
library(readxl)
##CellTypeDeconv
CellTypeDecon<-read.table("~/Desktop/CellTypeDeconv_GEDIT.txt", as.is = TRUE, header = T, row.names = 1)
summary(CellTypeDecon)#What is max-what mean and what is min
breaks<-seq(0,1,length.out=2001)#One element longer than the color vector
col_heat<-colorRampPalette(c("#FFFBF3","red","purple","blue"))(2000)
pheatmap(t(log2(CellTypeDecon+1)),breaks=breaks,col=col_heat,
fontsize_col=5,fontsize_row=5,fontsize = 6,
cellwidth=10,cellheight=10)
|
2a3b5f30a02254ac62194afb74a7e45aee73833b
|
018087b04d66f1b33ec55619ac8dbaaf18c68449
|
/plot3.R
|
71c30330946bad72b092060693f66c77dd2bd385
|
[] |
no_license
|
sajiajialong/ExData_Plotting1
|
c41f056cdd690f577b22f9a802f065d20074cc5e
|
969301adc14c983e43c6146794c17e2ef88e9bde
|
refs/heads/master
| 2021-04-06T20:04:10.204576
| 2018-03-12T20:10:39
| 2018-03-12T20:10:39
| 124,770,817
| 0
| 0
| null | 2018-03-11T15:33:09
| 2018-03-11T15:33:09
| null |
UTF-8
|
R
| false
| false
| 1,231
|
r
|
plot3.R
|
data<- read.table("household_power_consumption.txt",header = TRUE, sep = ";")
## turn column Date to "Date"
data$Date<- as.character(data$Date)
data$Date<- strptime(data$Date,"%d/%m/%Y")
data$Date<- as.Date(data$Date)
target<-subset(data, Date>=as.Date("2007-02-01")& Date<=as.Date("2007-02-02"))
target<- target[complete.cases(target), ]
## turn column Sub_metering_1,2,3 into numeric
target$Sub_metering_1<-as.numeric(as.character(target$Sub_metering_1))
target$Sub_metering_2<-as.numeric(as.character(target$Sub_metering_2))
target$Sub_metering_3<-as.numeric(as.character(target$Sub_metering_3))
## set label on x-axis
tick1<-min(which(target$Date==as.Date("2007-02-01")))
tick2<-min(which(target$Date==as.Date("2007-02-02")))
tick3<-max(which(target$Date==as.Date("2007-02-02")))
label<-c("Thu","Fri","Sat")
##plot
png("plot3.png")
plot(target$Sub_metering_1,type="l", col="black",xaxt="n",yaxt="n",xlab="",ylab = "Energy sub metering")
axis(1,at=c(tick1,tick2,tick3),labels = label)
axis(2,at=seq(0,30,10))
lines(target$Sub_metering_2,col="red")
lines(target$Sub_metering_3,col="blue")
legend("topright",legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty = c(1,1,1),col = c("black","red","blue"))
dev.off()
|
8fcff7f44aecb7b1668d7f3bb70905f943a406bc
|
200a7c74eb36dcb7b7be917e31e40f3fbd5bbe17
|
/ui.R
|
22157c05f6b7bba14de3c32856e8d9c6efc415c3
|
[] |
no_license
|
SupermercadoEmporium/Julio2014
|
9bc15ad9bb71f0eb2633664336b4fe442bebb0ca
|
f2e077fabb9cf34bdcd85ac8deb66f1414b848cf
|
refs/heads/master
| 2021-01-10T15:51:23.530492
| 2016-01-15T16:19:38
| 2016-01-15T16:19:38
| 49,674,151
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,095
|
r
|
ui.R
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Emporium 2014"),
fluidRow(
column(3,
selectInput("select", label = h3("Primera Categoria (Antecedente)", style ="color:#297418;"),
choices = c(vec_aux1[(3:30)],vec_aux1[(32:41)])), tableOutput("Julio")),
column(3,
selectInput("select2", label = h3("Segunda Categoria (Consecuente)", style = "color:#dd21d5;"),
choices =c(vec_aux1[(3:30)],vec_aux1[(32:41)])), tableOutput("Julio2"))
),
titlePanel("Julio"),
sidebarLayout(
sidebarPanel( "Resumen Julio",
style = "color:#2183dd;",
tableOutput("confidenceJulio"),
tableOutput("liftJulio")
),
mainPanel()
),
sidebarLayout(
sidebarPanel( "Productos mรกs vendidos",
textOutput("tablanamecat1Julio"),
style = "color:#297418;",
textOutput("tablaprobcat1Julio"),
textOutput("tablanamecat2Julio"),
textOutput("tablaprobcat2Julio"),
textOutput("tablanamecat3Julio"),
textOutput("tablaprobcat3Julio"),
textOutput("tablanamecat4Julio"),
textOutput("tablaprobcat4Julio"),
textOutput("tablanamecat5Julio"),
textOutput("tablaprobcat5Julio")),
sidebarPanel("Productos mรกs vendidos",
textOutput("tabla1namecat1Julio"),
style = "color:#dd21d5;",
textOutput("tabla1probcat1Julio"),
textOutput("tabla1namecat2Julio"),
textOutput("tabla1probcat2Julio"),
textOutput("tabla1namecat3Julio"),
textOutput("tabla1probcat3Julio"),
textOutput("tabla1namecat4Julio"),
textOutput("tabla1probcat4Julio"),
textOutput("tabla1namecat5Julio"),
textOutput("tabla1probcat5Julio"))
)
))
|
da6f4dfe1b0b542194315ab35865bd753ba5b7b1
|
7e0f4777f4e06b0ac72b90422ac0d9c765767755
|
/projects/websi/lc.R
|
329a9bd2e898cdf5d5ebcc0cd0814a60d8d4a612
|
[] |
no_license
|
psolymos/abmianalytics
|
edd6a040082260f85afbf4fc25c4f2726b369392
|
9e801c2c564be155124109b4888d29c80bd1340d
|
refs/heads/master
| 2023-01-30T05:00:32.776882
| 2023-01-21T05:36:23
| 2023-01-21T05:36:23
| 34,713,422
| 0
| 7
| null | 2017-01-20T19:07:59
| 2015-04-28T06:39:37
|
R
|
UTF-8
|
R
| false
| false
| 1,518
|
r
|
lc.R
|
library(opticut)
library(cure4insect)
opar <- set_options(path = "w:/reports")
load_common_data()
SPP <- get_all_species("birds")
subset_common_data(id=NULL, species=SPP)
level <- 0.8
res <- list()
for (spp in SPP) {
cat(spp, "\n")
y <- load_species_data(spp, boot=FALSE)
lc0 <- lorenz(rowSums(y$SA.Ref))
lc1 <- lorenz(rowSums(y$SA.Curr))
xt0 <- quantile(lc0, 1-level, type="L")
pt0 <- iquantile(lc0, xt0, type="p")
xt1 <- quantile(lc1, 1-level, type="L")
pt1 <- iquantile(lc1, xt1, type="p")
res[[spp]] <- rbind(
ref= c(unclass(summary(lc0)), pt0=unname(pt0)),
curr=c(unclass(summary(lc1)), pt1=unname(pt1)))
}
pts <- t(sapply(res, function(z) z[,"pt0"]))
plot(pts, xlim=c(0.4,1), ylim=c(0.4,1))
ind <- (1-pts)/(level)
summary(t(sapply(res,function(z) z[,"G"])))
res0 <- list()
res1 <- list()
q <- c(0.05, 0.25, 0.5, 0.75, 0.95)
for (spp in SPP) {
cat(spp, "\n");flush.console()
y <- load_species_data(spp, boot=FALSE)
r <- rasterize_results(y)
D <- values(r[["NC"]])
D <- D[!is.na(D)]
lc <- lorenz(D)
Dmax <- max(D)
xt <- quantile(lc, 1-level, type="L")
pt <- iquantile(lc, xt, type="p")
Lt_half <- iquantile(lc, Dmax*q, type="L")
pt_half <- iquantile(lc, Dmax*q, type="p")
tmp <- rbind(Lt=Lt_half, pt=pt_half)
colnames(tmp) <- q
res0[[spp]] <- c(unclass(summary(lc)), pt=unname(pt))
res1[[spp]] <- tmp
}
plot(lc)
abline(h=Lt_half)
abline(v=pt_half)
pc <- t(sapply(res1, function(z) z[,"0.5"]))
|
5839ff390d30fba54012e86e21c44f22f1490c95
|
453976d125156d4f98396870657e9100f7f7563f
|
/ss-es-anova-plot.R
|
a76f5c987b9976834b1e62762c3355ad75c65657
|
[] |
no_license
|
statexpert/12-002
|
190510c97c788e43e6febc69ed40eef31216f828
|
dfd8207018caa4258a7da65011fcce1a38c51f8f
|
refs/heads/master
| 2016-09-16T00:18:06.182525
| 2013-01-22T02:15:20
| 2013-01-22T02:15:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,417
|
r
|
ss-es-anova-plot.R
|
source("functions.R")
opar <- par(no.readonly=TRUE)
f <- seq.int(0, 1, length.out = 100)
size=60 # ัะฐะทะผะตัั ัััะตะบัะฐ ะดะปั ANOVA
sig <- c(0.05, 0.01) # ััะพะฒะฝะธ ะทะฝะฐัะธะผะพััะธ
groups <- 3
tab.power <- matrix(
mapply(power.test.anova, f = rep(f, each = 2), sig = sig, groups = groups, n = size),
ncol=2, byrow=TRUE, dimnames=list(f, sig))
# ะัะฐัะธะบ ะทะฐะฒะธัะธะผะพััะธ ะผะพะทะฝะพััะธ ะพั ัะฐะทะผะตัะฐ ัััะตะบัะฐ ะธ ััะพะฒะฝั ะทะฝะฐัะธะผะพััะธ ะดะปั ะฒัะฑะพัะบะธ ะฒ 60 ัะตะปะพะฒะตะบ
colors <- rainbow(length(sig))
par(mar=c(6, 4, 4, 2) + 0.1, xpd = TRUE)
matplot(f, tab.power, type = "l", lwd = 2, lty = 1, col = colors, cex.axis = 0.8, xlab = "", ylab = "")
abline(h = 0.8, lty = "longdash", lwd = 0.5, xpd = FALSE)
title(main = paste0("ะัะฐัะธะบ ะทะฐะฒะธัะธะผะพััะธ ะผะพัะฝะพััะธ\nะพั ัะฐะทะผะตัะฐ ัััะตะบัะฐ (n=", size, ", k=", groups, ")"), xlab = "ะ ะฐะทะผะตั ัััะตะบัะฐ", ylab = "ะะพัะฝะพััั")
legend("bottom", inset=c(0, -0.45), legend = c("p=0.05", "p=0.01"), col = colors, lwd = 1, lty = 1, bty = "n", xpd = TRUE, xjust=0, yjust=0.5, ncol = 2)
points <- sort(mapply(FUN = effect.size.anova, sig = sig, groups = groups, n = size))
points(points, rep(0.8, length(points)), pch = 20)
abline(v = points, lty = "longdash", lwd = 0.5, xpd = FALSE)
text((points + 0.03), rep(0.03, length(points)), labels = points, cex=0.7)
par(opar)
|
ba6faddf4666d03e931b1bce73a55b4ff7664707
|
fd56b6a77bbb080ac7d1e3109446b635ae8eed69
|
/man/kable_summary.Rd
|
1879a274ce6356e3125ce063635822c97f159604
|
[] |
no_license
|
philliplab/MotifBinner2
|
325c010b18d662be1abf700e6028eaf138705ad5
|
734b24c2f9d009cd6c8d3ea4a8f8085ac9d4a7dd
|
refs/heads/master
| 2021-07-01T12:36:35.369658
| 2020-09-07T15:11:15
| 2020-09-07T15:11:15
| 149,005,542
| 0
| 0
| null | 2018-09-16T14:51:06
| 2018-09-16T14:51:05
| null |
UTF-8
|
R
| false
| true
| 342
|
rd
|
kable_summary.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genSummary.R
\name{kable_summary}
\alias{kable_summary}
\title{formats a summary table for markdown}
\usage{
kable_summary(summary_tab)
}
\arguments{
\item{summary_tab}{A data.frame as produced by genSummary}
}
\description{
formats a summary table for markdown
}
|
656f3db91c96af4ac49f02a02798758cb740f4ef
|
2f4946c9194041457d3aeec7f53da815789fc375
|
/man/unique_pairs.Rd
|
0ef170b5b5597ea6f2fb9e2673924272276504e3
|
[] |
no_license
|
M-U-UNI-MA/tpfunctions
|
ce5ca7406caccb849cca28ed089af7d72d7cb918
|
54d484c9ab499ac99acd7889612ee073daf3c02f
|
refs/heads/master
| 2020-03-28T17:07:43.348975
| 2019-01-14T15:12:19
| 2019-01-14T15:12:19
| 148,760,713
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 553
|
rd
|
unique_pairs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/measures.R
\name{unique_pairs}
\alias{unique_pairs}
\title{Unique Combinations of all Elements within a Vector}
\usage{
unique_pairs(vec)
}
\arguments{
\item{vec}{}
}
\value{
A Dataframe with (n^2-n)/2 observations
}
\description{
Given a vector of elements this function calculates the unique combinations between all
element
}
\examples{
# unique pairs of 3 companies, a dataframe with (3^2-3)/2 = 3 observations is returned
unique_pairs(c("comp_1", "comp_2", "comp_3"))
}
|
a405af988689b8c56bb65c035ef355124d9d739d
|
a4c9ec280e70749cf4aac9d30bdda6ea0173e10e
|
/plot1.R
|
c0d0a34fe059289edbb28528b2082ef3d54f6876
|
[] |
no_license
|
charlestjpark/EnergyDataVisualization
|
7b73d731dfd2a9e46aefd640f79e6f1a2143ef20
|
047c8e942fb924a42308a8eda7394082df1a72dd
|
refs/heads/master
| 2021-01-01T05:18:04.674083
| 2016-05-27T04:32:08
| 2016-05-27T04:32:08
| 59,803,497
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,177
|
r
|
plot1.R
|
## Read the file into a data frame
URL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
temp <- tempfile()
download.file(URL, temp)
data <- read.table(unz(temp, "household_power_consumption.txt"), header = TRUE, sep = ";")
unlink(temp)
## Isolate entries ranging from 2007-02-01 to 2007-02-02
## By default, class(data$Date) gives a factor variable, need to reformat into date format
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
graphdata <- subset(data, data$Date == as.Date("2007-02-01") | data$Date == as.Date("2007-02-02"))
## Similarly, the global active power needs to be cast as a numeric type. The field has to be
## cast as a character first, and then as numeric, in order to retain the value from factor variable.
graphdata$Global_active_power <- as.numeric(as.character(graphdata$Global_active_power))
## Set up png file
png(filename = "plot1.png")
## Plot the graph into png file
hist(graphdata$Global_active_power,
col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)",
ylab = "Frequency")
## Close the graphics device to be able to view contents
dev.off()
|
ffd28e6da45cfc4913d1f63e80a1a1e059d4091e
|
01773ed6fe41297b852dca890ade98ca2c6ee22e
|
/R/FreundlichPlot.R
|
d6f8972ab20b8effe9ca9ccb97e55d8391d8b7f3
|
[
"MIT"
] |
permissive
|
devalc/Sorption
|
c481acb0756c832a6a2406af2dfb05a169b87fda
|
b1f74aa562971e7d2b5c6a351809053b3cc2bc0a
|
refs/heads/master
| 2022-01-22T08:01:17.794215
| 2021-12-29T21:30:39
| 2021-12-29T21:30:39
| 158,130,131
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,160
|
r
|
FreundlichPlot.R
|
#' Plot the Freundlich model fit to the experimental data
#'
#' This function plots the Freundlich isotherm model fit
#' @param Ce equilibrium solution concentrations in mg/l
#' @param Qe retention by solid (adsorption) mg/kg
#' @param cor_lab_x,cor_lab_y location on the plot to place pearson r and p-value
#' @param eq_lab_x,eq_lab_y location on the plot to place equation of the fitted line
#' @return plot
#' @import ggpubr
#' @import ggplot2
#' @import IDPmisc
#' @export
FreundlichPlot <- function(Ce, Qe, cor_lab_x , cor_lab_y ,
eq_lab_x, eq_lab_y){
x <- log10(Ce)
y <- log10(Qe)
z <- data.frame(x,y)
fit <- lm(z$y ~ z$x)
coeff = coefficients(fit)
z <- data.frame(x,y)
ggscatter(x = "x",y ="y", data = z, xlab = "log10 [Ce (mg/L)]", ylab = " log10 [Qe (mg/kg)]", add = "reg.line",
conf.int = TRUE,
add.params = list(color = "blue",
fill = "lightgray")) +
stat_cor(method = "pearson", label.x = cor_lab_x, label.y = cor_lab_y) + # Add correlation coefficient
stat_regline_equation(label.y = eq_lab_y,label.x = eq_lab_x)
}
|
03984cc068bb748b0248f51351b7439aca07589c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Sleuth3/examples/ex1715.Rd.R
|
c448c6cabb098734efd9730277f30d038eaed4ec
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 146
|
r
|
ex1715.Rd.R
|
library(Sleuth3)
### Name: ex1715
### Title: Church Distinctiveness
### Aliases: ex1715
### Keywords: datasets
### ** Examples
str(ex1715)
|
0800e6a56010a1ed45efb1583320ac873ae52fd0
|
01b1446adcc5612fe9a1dd49172a87c59200882b
|
/man/gi.Rd
|
0f5cee5c4eccd5163dcbc091b163f95bded809be
|
[] |
no_license
|
davidearn/epigrowthfit
|
de5f046c123aecff7ca4b88d484e438b25e5c8cf
|
36aac5d2b33c064725434bf298ac008e3929f9d6
|
refs/heads/master
| 2022-09-30T14:35:07.931181
| 2022-09-18T21:28:28
| 2022-09-18T21:28:28
| 250,906,109
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,560
|
rd
|
gi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gi.R
\name{gi}
\alias{gi}
\alias{dgi}
\alias{pgi}
\alias{qgi}
\alias{rgi}
\title{Generation interval distribution}
\usage{
dgi(x, latent, infectious)
pgi(q, latent, infectious)
qgi(p, latent, infectious)
rgi(n, latent, infectious)
}
\arguments{
\item{x, q}{A numeric vector listing generation intervals.}
\item{latent, infectious}{Numeric vectors such that \code{latent[i]} and \code{infectious[i]}
are the probabilities that the latent and infectious periods,
respectively, are \code{i} units of time.
It is sufficient to supply probability weights, as both vectors
are divided by their sums internally.}
\item{p}{A numeric vector listing probabilities.}
\item{n}{A non-negative integer indicating a sample size.
If \code{length(n) > 1}, then \code{length(n)} is taken
to be the sample size.}
}
\value{
A numeric vector with length equal to the that of the first argument,
or length \code{n} in the case of \code{rgi}.
}
\description{
Generation interval
density function (\code{dgi}), distribution function (\code{pgi}),
quantile function (\code{qgi}), and sampling (\code{rgi}).
Results are conditional on supplied latent and infectious period
distributions. It is assumed
\itemize{
\item that the latent period and infectious waiting time are independent,
\item that infectiousness is constant over the infectious period, and
\item that the latent and infectious periods are positive and integer-valued
(in arbitrary but like units of time).
}
}
\examples{
data(plague_latent_period)
latent <- plague_latent_period$relfreq
m <- length(latent)
data(plague_infectious_period)
infectious <- plague_infectious_period$relfreq
n <- length(infectious)
## Histogram of samples
y <- rgi(1e06, latent, infectious)
hist(y, breaks = seq(0, m + n + 1), freq = FALSE, las = 1,
ylab = "relative frequency",
main = "")
## Density and distribution functions
x <- seq(0, m + n + 1, by = 0.02)
fx <- dgi(x, latent, infectious)
Fx <- pgi(x, latent, infectious)
plot(x, fx, type = "l", las = 1, # consistent with histogram
xlab = "generation interval",
ylab = "density function")
plot(x, Fx, type = "l", las = 1,
xlab = "generation interval",
ylab = "distribution function")
## Quantile function
p <- seq(0, 1, by = 0.001)
qp <- qgi(p, latent, infectious)
plot(p, qp, type = "l", las = 1,
xlab = "probability",
ylab = "quantile function")
}
\references{
Svensson, ร
. A note on generation times in epidemic models.
Math Biosci. 2007;208:300--11.
}
|
1e25f497d7e2bec553213e66d7d3a2e7f0a1e46a
|
27c994854607957cfde15e552b205763e8e26f71
|
/code/reproduce.R
|
3119df8d80f6c000ddbac4cf1dfa25a08c019826
|
[] |
no_license
|
duanby/I-cube
|
f251b0e1fb6f49d5c87f1e41f06086a2cdbea50d
|
4fb8d070f974dfdaddec5f12117da664c44e615d
|
refs/heads/main
| 2023-03-08T12:58:16.346584
| 2021-02-20T00:58:58
| 2021-02-20T00:58:58
| 338,975,304
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,184
|
r
|
reproduce.R
|
source("setup.R")
treatment_type = "sparse_pos_bias"
Cd_seq = seq(0, 5, length.out = 6)
result = list()
for (Cd in Cd_seq) {
print(Cd)
para_vary = list(list(name = "treatment_type", value = treatment_type),
list(name = "methods_unpair",
value = c("Crossfit-I-cube", "MaY-I-cube", "linear-BH",
"Crossfit-I-cube-CATE", "MaY-I-cube-RF")),
list(name = "C_delta", value = Cd))
result[[as.character(Cd)]] = experiment_unpair(para_vary)
}
save(result, file=paste(dirname(getwd()),"/result/", treatment_type,".Rdata",sep = ""))
treatment_type = "sparse_pos_bias"
Cd_seq = seq(0, 2, length.out = 6)
result = list()
for (Cd in Cd_seq) {
print(Cd)
para_vary = list(list(name = "treatment_type", value = treatment_type),
list(name = "paired", value = TRUE),
list(name = "methods_pair",
value = c("pair-Crossfit", "pair-MaY", "unpair-Crossfit", "unpair-MaY")),
list(name = "C_delta", value = Cd))
result[[as.character(Cd)]] = experiment_pair(para_vary)
}
save(result, file=paste(dirname(getwd()),"/result/paired_", treatment_type,".Rdata",sep = ""))
treatment_type = "sparse_pos_bias"
eps_seq = seq(0, 0.5, length.out = 6)
result = list()
for (eps in eps_seq) {
print(eps)
para_vary = list(list(name = "treatment_type", value = treatment_type),
list(name = "paired", value = TRUE),
list(name = "eps", value = eps),
list(name = "methods_pair",
value = c("pair-Crossfit", "pair-MaY", "unpair-Crossfit", "unpair-MaY")),
list(name = "C_delta", value = 2))
result[[as.character(eps)]] = experiment_pair(para_vary)
}
save(result, file=paste(dirname(getwd()),"/result/paired_", treatment_type,"_mismatch.Rdata",sep = ""))
treatment_type = "sparse_pos_bias"
eps_seq = seq(0, 2.5, length.out = 6)
result = list()
for (eps in eps_seq) {
print(eps)
para_vary = list(list(name = "treatment_type", value = treatment_type),
list(name = "paired", value = TRUE),
list(name = "eps", value = eps),
list(name = "methods_pair",
value = c("pair-Crossfit", "pair-MaY", "unpair-Crossfit", "unpair-MaY")),
list(name = "C_delta", value = 2))
result[[as.character(eps)]] = experiment_pair(para_vary)
}
save(result, file=paste(dirname(getwd()),"/result/paired_", treatment_type,"_mismatch_large.Rdata",sep = ""))
treatment_type = "linear"
Cd_seq = seq(0, 5, length.out = 6)
result = list()
for (Cd in Cd_seq) {
print(Cd)
para_vary = list(list(name = "treatment_type", value = treatment_type),
list(name = "methods_unpair",
value = c("Crossfit-I-cube", "MaY-I-cube", "linear-BH")),
list(name = "C_delta", value = Cd))
result[[as.character(Cd)]] = experiment_unpair(para_vary)
}
save(result, file=paste(dirname(getwd()),"/result/", treatment_type,".Rdata",sep = ""))
treatment_type = "sparse_oneside"
Cd_seq = seq(0, 5, length.out = 6)
result = list()
for (Cd in Cd_seq) {
print(Cd)
para_vary = list(list(name = "treatment_type", value = treatment_type),
list(name = "methods_unpair",
value = c("Crossfit-I-cube", "MaY-I-cube", "linear-BH")),
list(name = "C_delta", value = Cd))
result[[as.character(Cd)]] = experiment_unpair(para_vary)
}
save(result, file=paste(dirname(getwd()),"/result/", treatment_type,".Rdata",sep = ""))
treatment_type = "sparse_twoside"
Cd_seq = seq(0, 5, length.out = 6)
result = list()
for (Cd in Cd_seq) {
print(Cd)
para_vary = list(list(name = "treatment_type", value = treatment_type),
list(name = "methods_unpair",
value = c("Crossfit-I-cube", "MaY-I-cube", "linear-BH")),
list(name = "C_delta", value = Cd))
result[[as.character(Cd)]] = experiment_unpair(para_vary)
}
save(result, file=paste(dirname(getwd()),"/result/", treatment_type,".Rdata",sep = ""))
treatment_type = "subgroup_even"
Cd_seq = seq(0, 5, length.out = 6)
result = list()
for (Cd in Cd_seq) {
print(Cd)
para_vary = list(list(name = "treatment_type", value = treatment_type),
list(name = "n", value = 2000),
list(name = "C_delta", value = Cd))
result[[as.character(Cd)]] = experiment_subgroup(para_vary)
}
save(result, file=paste(dirname(getwd()),"/result/", treatment_type,".Rdata",sep = ""))
treatment_type = "subgroup_even"
Cd_seq = seq(0, 1, length.out = 6)
result = list()
for (Cd in Cd_seq) {
print(Cd)
para_vary = list(list(name = "treatment_type", value = treatment_type),
list(name = "n", value = 1000),
list(name = "paired", value = TRUE),
list(name = "C_delta", value = Cd))
result[[as.character(Cd)]] = experiment_subgroup(para_vary)
}
save(result, file=paste(dirname(getwd()),"/result/", treatment_type,"_paired.Rdata",sep = ""))
treatment_type = "subgroup_smooth"
Cd_seq = seq(0, 1, length.out = 6)
result = list()
for (Cd in Cd_seq) {
print(Cd)
para_vary = list(list(name = "treatment_type", value = treatment_type),
list(name = "n", value = 1000),
list(name = "paired", value = TRUE),
list(name = "C_delta", value = Cd))
result[[as.character(Cd)]] = experiment_subgroup(para_vary)
}
save(result, file=paste(dirname(getwd()),"/result/", treatment_type,"_paired.Rdata",sep = ""))
treatment_type = "subgroup_sparse"
Cd_seq = seq(0, 1.5, length.out = 6)
result = list()
for (Cd in Cd_seq) {
print(Cd)
para_vary = list(list(name = "treatment_type", value = treatment_type),
list(name = "n", value = 1000),
list(name = "paired", value = TRUE),
list(name = "C_delta", value = Cd))
result[[as.character(Cd)]] = experiment_subgroup(para_vary)
}
save(result, file=paste(dirname(getwd()),"/result/", treatment_type,"_paired.Rdata",sep = ""))
|
12f2e76c883484721169cb592da1716be74d1643
|
d8e0425233afe5226f3ac249ecdd2446b4f19ddb
|
/NEW2HomePNDatabaseAL_R_2018-01-04_1541_growth.r
|
449f530b84ffdafd5bb11655b8fb18a14bf09562
|
[] |
no_license
|
akshithrk/Jan-4-2018-Export
|
557fccc5b3f9c4a6cb463bb945d3354f408f9907
|
3c04fbb51bef9a8d68e9624575c874010027c0a4
|
refs/heads/master
| 2021-05-14T12:59:58.451150
| 2018-02-23T18:59:13
| 2018-02-23T18:59:13
| 116,422,194
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,874
|
r
|
NEW2HomePNDatabaseAL_R_2018-01-04_1541_growth.r
|
#Clear existing data and graphics
rm(list=ls())
graphics.off()
#Load Hmisc library
library(Hmisc)
#Read Data
# data=read.csv('NEW2HomePNDatabaseAL_DATA_2018-01-04_1541_growth.csv')
# data=read.csv('NEW2HomePNDatabaseAL_DATA_2018-01-11_1032_growth.csv')
data=read.csv('NEW2HomePNDatabaseAL_DATA_2018-01-15_1421_growth_data.csv')
#Setting Labels
label(data$mrn)="BCH Medical Record Number"
label(data$redcap_repeat_instrument)="Repeat Instrument"
label(data$redcap_repeat_instance)="Repeat Instance"
label(data$growth_date)="Date of measurement"
label(data$growth_time)="Time of measurement"
label(data$growth_inpt_outpt)="Type of visit"
label(data$growth_ht_cm)="Height (cm)"
label(data$growth_wt_kg)="Weight (kg)"
label(data$growth_data_complete)="Complete?"
#Setting Units
#Setting Factors(will create new variable for factors)
data$redcap_repeat_instrument.factor = factor(data$redcap_repeat_instrument,levels=c("active_on_service","central_line","inpatient_encounters","bloodstream_infections","nutrition_intake","growth_data","liver_disease","outpatient_encounters","interventions"))
data$growth_inpt_outpt.factor = factor(data$growth_inpt_outpt,levels=c("1","2"))
data$growth_data_complete.factor = factor(data$growth_data_complete,levels=c("0","1","2"))
levels(data$redcap_repeat_instrument.factor)=c("Active On Service","Central Line","Inpatient Encounters","Bloodstream Infections","Nutrition Intake","Growth Data","Liver Disease","Outpatient Encounters","Interventions")
levels(data$growth_inpt_outpt.factor)=c("Inpatient","Outpatient")
levels(data$growth_data_complete.factor)=c("Incomplete","Unverified","Complete")
data <- filter(data, data$redcap_repeat_instrument!="")
data$growth_date <- as.Date(data$growth_date, format = "%m/%d/%Y" )
# data$growth_date <- as.Date(data$growth_date)
growth.df <- data
rm(data)
write.csv(growth.df,"growth_Jan 18.csv")
|
6bbc531424bd987f35d89462da7dadaa458df54b
|
cf606e7a3f06c0666e0ca38e32247fef9f090778
|
/test/integration/example-models/Bayesian_Cognitive_Modeling/ParameterEstimation/DataAnalysis/Correlation_1_Stan.R
|
595c177cf1dc4c8687b7a33dbe44bc55e66f74a1
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
nhuurre/stanc3
|
32599a71d5f82c759fd6768b8b699fb5f2b2d072
|
5612b357c1cd5a08cf2a57db97ce0e789bb87018
|
refs/heads/master
| 2023-07-05T02:27:08.083259
| 2020-11-12T15:37:42
| 2020-11-12T15:37:42
| 222,684,189
| 0
| 0
|
BSD-3-Clause
| 2019-11-19T11:50:39
| 2019-11-19T11:50:38
| null |
UTF-8
|
R
| false
| false
| 3,416
|
r
|
Correlation_1_Stan.R
|
# clears workspace:
rm(list=ls())
library(rstan)
#### Notes to Stan model #######################################################
## 1) Multivariate normal distribution in Stan uses covariance matrix instead of
## precision matrix.
## 2) Multivariate normal distribution can be (and is) also vectorized.
## 3) Warnings may occur during sampling, ignore them.
################################################################################
model <- "
// Pearson Correlation
data {
int<lower=0> n;
vector[2] x[n];
}
parameters {
vector[2] mu;
vector<lower=0>[2] lambda;
real<lower=-1,upper=1> r;
}
transformed parameters {
vector<lower=0>[2] sigma;
cov_matrix[2] T;
// Reparameterization
sigma[1] <- inv_sqrt(lambda[1]);
sigma[2] <- inv_sqrt(lambda[2]);
T[1,1] <- square(sigma[1]);
T[1,2] <- r * sigma[1] * sigma[2];
T[2,1] <- r * sigma[1] * sigma[2];
T[2,2] <- square(sigma[2]);
}
model {
// Priors
mu ~ normal(0, inv_sqrt(.001));
lambda ~ gamma(.001, .001);
// Data
x ~ multi_normal(mu, T);
}"
# Choose a dataset:
dataset <- 1
# The datasets:
if (dataset == 1) {
x <- matrix(c( .8, 102,
1.0, 98,
.5, 100,
.9, 105,
.7, 103,
.4, 110,
1.2, 99,
1.4, 87,
.6, 113,
1.1, 89,
1.3, 93), nrow=11, ncol=2, byrow=T)
}
if (dataset == 2) {
x <- matrix(c( .8, 102,
1.0, 98,
.5, 100,
.9, 105,
.7, 103,
.4, 110,
1.2, 99,
1.4, 87,
.6, 113,
1.1, 89,
1.3, 93,
.8, 102,
1.0, 98,
.5, 100,
.9, 105,
.7, 103,
.4, 110,
1.2, 99,
1.4, 87,
.6, 113,
1.1, 89,
1.3, 93), nrow=22,ncol=2,byrow=T)
}
n <- nrow(x) # number of people/units measured
data <- list(x=x, n=n) # to be passed on to Stan
myinits <- list(
list(r=0, mu=c(0, 0), lambda=c(1, 1)))
# parameters to be monitored:
parameters <- c("r", "mu", "sigma")
# The following command calls Stan with specific options.
# For a detailed description type "?rstan".
samples <- stan(model_code=model,
data=data,
init=myinits, # If not specified, gives random inits
pars=parameters,
iter=10000,
chains=1,
thin=1,
# warmup = 100, # Stands for burn-in; Default = iter/2
# seed = 123 # Setting seed; Default is random seed
)
r <- extract(samples)$r
#Frequentist point-estimate of r:
freq.r <- cor(x[,1],x[,2])
#make the two panel plot:
windows(width=9,height=6) #this command works only under Windows!
layout(matrix(c(1,2),1,2))
layout.show(2)
#some plotting options to make things look better:
par(cex.main=1.5, mar=c(5, 6, 4, 5) + 0.1, mgp=c(3.5, 1, 0), cex.lab=1.5,
font.lab=2, cex.axis=1.3, bty = "n", las=1)
# data panel:
plot(x[,1],x[,2], type="p", pch=19, cex=1)
# correlation panel:
plot(density(r, from=-1,to=1), main="", ylab="Posterior Density",
xlab="Correlation", lwd=2)
lines(c(freq.r, freq.r), c(0,100), lwd=2, lty=2)
|
ecdb6c71e89a62d3d494850a3b1c2e4fcd52f46d
|
ef475903010e72b4777d62f40f80f3e105240dbd
|
/R/Player_Analysis.R
|
f5efd3ad90c82e217f87db47be94a7fe43c0fde8
|
[
"MIT"
] |
permissive
|
sujoydc/DS-611-Project
|
125a2f6a861e9ad0c053f5fd7ab8b98f0676ed3b
|
65063eb74c62c7589bf528c17a2fba3e58fe60b8
|
refs/heads/master
| 2020-07-31T19:31:34.165324
| 2019-10-26T02:45:40
| 2019-10-26T02:45:40
| 210,729,420
| 1
| 1
|
MIT
| 2019-10-22T21:28:18
| 2019-09-25T01:27:38
|
R
|
UTF-8
|
R
| false
| false
| 4,873
|
r
|
Player_Analysis.R
|
library(sqldf)
library(ggplot2)
library(plyr)
setwd("/Users/gogol/Documents/Utica/DSC-611-Z1/Module8/DS-611-Project")
olympic <- read.csv("./data/athlete_events.csv", header = TRUE)
#1. player younger than 19 years old
#USA male/female players
usa_tot <- sqldf("SELECT year,COUNT(*) olympic FROM olympic WHERE noc ='USA' and Age < 19 and year >1999 group by year order by year")
usa_male <- sqldf("SELECT year,COUNT(*) male FROM olympic WHERE noc ='USA' and Age < 19 and year >1999 and Sex = 'M' group by year order by year")
usa_female <- sqldf("SELECT year,COUNT(*) female FROM olympic WHERE noc ='USA' and Age < 19 and year >1999 and Sex ='F'group by year order by year")
year <- sqldf("SELECT distinct year FROM olympic WHERE year >1999 order by year")
--
#2. Non top 10 Medal winning countries \n-- player under 19 (Summer Olympic)
non_top <- sqldf("SELECT year, COUNT(*) player FROM olympic WHERE noc not in ('CAN','CHN','USA','RUS','EUN','RFA','FRG','GER','GRB','JPN','KOR','ITA','NED','NOR','POR','SWE','SWZ','URS','ESP') and Age < 19 and year in ('2000','2004','2008','2012','2016') group by year order by year")
nontop_g <- ddply(non_top, c("Year", "player"))
ggplot(nontop_g, aes(x=Year, y=player, colour=player)) + geom_line() + geom_point() +
xlab('Year') +
ylab('Player')+ labs(title = "Non top 10 Medal winning countries \n-- player under 19 (Summer Olympic)",
x = "From 2000 to 2016",
y = "Player number",
fill = "blue") + scale_x_continuous("Year", breaks=seq(2000, 2016, 4))
#==================
#3. Line chart - Medal winning second tier countries - Summer olympic player under 19
Summer_2tier <- sqldf("SELECT year,COUNT(*) player FROM olympic WHERE noc in
('AUS','NED','HUN','BRA','ESP','KEN','JAM','CRO','CUB','NZL','CAN','UZB','KAZ','COL','SUI','IRI','GRE','ARG','DEN','SWE','RSA')
and Age < 19 and year in ('2000','2004','2008','2012','2016') group by year order by year")
sum_pg <- ddply(Summer_2tier, c("Year", "player"))
ggplot(sum_pg, aes(x=Year, y=player, colour=player)) + geom_line() + geom_point() +
xlab('Year') +
ylab('Player')+ labs(title = "Medal winning second tier countries \n -- player under 19 in Olympic event",
x = "From 2000 to 2016",
y = "Player",
fill = "blue") + scale_x_continuous("Year", breaks=seq(2000, 2016, 4))
==============================
#4. Player barchart - Medal winning second tier countries - Summer Olympic player under 19
#split male/female
Summer_2tier <- sqldf("SELECT year,sex,COUNT(*) player FROM olympic WHERE noc in
('AUS','NED','HUN','BRA','ESP','KEN','JAM','CRO','CUB','NZL','CAN','UZB','KAZ','COL','SUI','IRI','GRE','ARG','DEN','SWE','RSA')
and Age < 19 and year in ('2000','2004','2008','2012','2016') group by year,sex order by year")
sum_pdf <- as.data.frame(Summer_2tier)
sum_pg2 <- ddply(Summer_2tier, c("Year", "player", "Sex"))
spg2 <-ggplot(sum_pdf, aes(Year, player))
spg2 + geom_bar(stat = "identity", aes(fill = Sex),width=1) +theme_minimal() +
xlab('Year') +
ylab('Player')+ labs(title = "Medal winning second tier countries \n-- player under 19 (Summer Olympic)",
x = "From 2000 to 2016",
y = "Player number",
fill = "blue") + scale_x_continuous("Year", breaks=seq(2000, 2016, 4))
------------------------------
#5. Player barchart - Medal winning second tier countries - Winter Olympic player under 19
#Second tier medal countries - Winter olympic
Winter_2tier <- sqldf("SELECT year,sex, COUNT(*) player FROM olympic WHERE noc in
('JPN','ITA','OAR','CZE','BLR','CHN','SVK','FIN','GBR','POL','HUN','UKR','AUS','SLO','BEL','NZL','ESP','KAZ','LAT', 'LIE')
and Age < 19 and year in ('2002','2006','2010','2014','2018') group by year,sex order by year");
wdf <- as.data.frame(Winter_2tier)
wpg <- ddply(Winter_2tier, c("Year", "player", "Sex"))
wpg <-ggplot(wpg, aes(Year, player))
wpg +geom_bar(stat = "identity", aes(fill = Sex),width=1) +
labs(title = "Medal winning second tier countries \n-- player under 19 (Winter Olympic)",
x = "From 2002 to 2018",
y = "Player number") + scale_x_continuous("Year", breaks=seq(2002, 2018, 4))
--------------------------
#6. Country barchart - Medal winning second tier countries - Winter olympic
Winter_top10_20 <- sqldf("SELECT year, noc, COUNT(*) player FROM olympic WHERE noc in
('JPN','ITA','RUS','CZE','BLR','CHN','SVK','FIN','GBR','POL')
and Age < 19 and year in ('2002','2006','2010','2014','2018') group by year,noc order by year");
wdf <- as.data.frame(Winter_top10_20)
#wp2 <- ddply(wdf, c("Year", "player", "NOC"))
wpg <-ggplot(Winter_top10_20, aes(Year, player))
wpg +geom_bar(stat = "identity", aes(fill = NOC),width=1) +
labs(title = "Medal winning second tier countries \n-- player under 19 (Winter Olympic)",
x = "From 2002 to 2018",
y = "Player number") + scale_x_continuous("Year", breaks=seq(2002, 2018, 4))
---------------------------
|
edbd47758fe7d9b7dddadd8c45bf22b55e5c19b3
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Gent-Rowley/Connect5/cf_5_5x6_w_/cf_5_5x6_w_.R
|
e23ba46b608573078ca88c65fa9820a0ca180c92
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64
|
r
|
cf_5_5x6_w_.R
|
1859cc01ce95f474a4043bf6cccb6dfc cf_5_5x6_w_.qdimacs 97530 26380
|
f9a4ab59f50cffb8c3cd78e4a4e3324bc72fc801
|
b1d25fd8d0cb2e8806c9c84dd11f8f7010f0d76f
|
/tests/testthat.R
|
72547d1eb766941cd75878df7ac98969c6300a0c
|
[] |
no_license
|
bigpas/gkchestertonr
|
a16f5a587d078a154017704e2fb416cf8d49c628
|
e2e19766f7fa976b42b04aa2219989248adbb7f6
|
refs/heads/master
| 2020-05-05T02:36:11.085884
| 2019-04-07T22:32:17
| 2019-04-07T22:32:17
| 179,643,817
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70
|
r
|
testthat.R
|
library(testthat)
library(gkchestertonr)
test_check("gkchestertonr")
|
ff21a6e3db2e712950672f417bcd22b51d237b2d
|
7f3cbd23be4fc8ece15b7143f4167d1290953b09
|
/man/wheeler.smith.Rd
|
dabe8ca672487ececbbb7d3434c72159044e2347
|
[] |
no_license
|
Libardo1/koRpus
|
98fbdea632cf7b816293e35922b7433bca5322d8
|
628c295c1c69e7a71711a4ae5e8229b1a4af2c73
|
refs/heads/master
| 2021-01-18T08:19:34.961113
| 2014-03-19T00:00:00
| 2014-03-19T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,759
|
rd
|
wheeler.smith.Rd
|
\name{wheeler.smith}
\alias{wheeler.smith}
\title{Readability: Wheeler-Smith Score}
\usage{
wheeler.smith(txt.file, hyphen = NULL, parameters = c(syll = 2), ...)
}
\arguments{
\item{txt.file}{Either an object of class
\code{\link[koRpus]{kRp.tagged-class}}, a character
vector which must be be a valid path to a file containing
the text to be analyzed, or a list of text features. If
the latter, calculation is done by
\code{\link[koRpus:readability.num]{readability.num}}.}
\item{hyphen}{An object of class kRp.hyphen. If
\code{NULL}, the text will be hyphenated automatically.}
\item{parameters}{A numeric vector with named magic
numbers, defining the relevant parameters for the index.}
\item{...}{Further valid options for the main function,
see \code{\link[koRpus:readability]{readability}} for
details.}
}
\value{
An object of class
\code{\link[koRpus]{kRp.readability-class}}.
}
\description{
This is just a convenient wrapper function for
\code{\link[koRpus:readability]{readability}}.
}
\details{
This function calculates the Wheeler-Smith Score. In
contrast to \code{\link[koRpus:readability]{readability}},
which by default calculates all possible indices, this
function will only calculate the index value.
If \code{parameters="de"}, the calculation stays the same,
but grade placement is done according to Bamberger &
Vanecek (1984), that is for german texts.
}
\examples{
\dontrun{
wheeler.smith(tagged.text)
}
}
\references{
Bamberger, R. & Vanecek, E. (1984).
\emph{Lesen--Verstehen--Lernen--Schreiben}. Wien: Jugend
und Volk.
Wheeler, L.R. & Smith, E.H. (1954). A practical readability
formula for the classroom teacher in the primary grades.
\emph{Elementary English}, 31, 397--399.
}
\keyword{readability}
|
4b573caa6fae04a8879aa2f4256d29432db53c9a
|
89f471a1facf26cba075e79ad778f58c1d03a175
|
/R/stem_leaf_display.R
|
18103cfd1dfbc10d3b8c276f8d08d5e794d229a6
|
[
"MIT"
] |
permissive
|
deandevl/RplotterPkg
|
a90d229946639235949483f595b9ee8c5eeab101
|
5a70e51eeb45d84685e4fddc9a9f7bd9e68f089a
|
refs/heads/main
| 2023-05-12T06:55:22.537757
| 2023-05-01T09:47:17
| 2023-05-01T09:47:17
| 230,162,174
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,066
|
r
|
stem_leaf_display.R
|
#' Function is a wrapper around aplpack::stem.leaf that provides one or more stem and leaf display(s).
#'
#' Function accepts a named list of numeric vectors from which stem and leaf displays
#' are provided.
#'
#' @param x The named list of numeric vectors from which stem and leaf displays are provided.
#' @param unit Leaf unit, as a power of 10 (e.g. 100, 0.01). The default is 1.
#' @param m Number of parts (1, 2, 5) into which each stem will be separated. The default is 1.
#' @param min_val Optional numeric that sets the smallest non-outlying value.
#' @param max_val Optional numeric that sets the largest non-outlying value.
#' @param outliers A logical which if TRUE (the default), outliers are placed on LO and HI stems
#' @param depths A logical which if TRUE (the default), print a column of "depths" to the left of the stems
#' @param col_width A numeric that sets the display column widths in cm. The default is 4, which
#' works when \code{depths} is FALSE. You may need to increase this value to avoid cutting off wide leaf values.
#' @param row_height A numeric that sets the display row height in cm. The default is 0.5. You may need to
#' decrease this value for smaller font sizes and longer stem values.
#' @param font_sz A numeric that sets the display's font size. The default is 11.
#' @param heading_color A string that sets the heading's color in name or hex. The default is "black".
#' @param display_grob A logical that if TRUE (the default) will display the TableGrob.
#'
#' @importFrom aplpack stem.leaf
#'
#' @author Rick Dean
#'
#' @return A TableGrob object if \code{display_grob} is FALSE.
#'
#' @export
stem_leaf_display <- function(
x,
unit = 1,
m = 1,
min_val = NULL,
max_val = NULL,
outliers = TRUE,
depths = FALSE,
col_width = 4,
row_height = 0.5,
font_sz = 11,
heading_color = "black",
display_grob = TRUE
) {
var_names <- names(x)
values <- unlist(x)
if(is.null(min_val)){
min_val <- min(values)
}
if(is.null(max_val)){
max_val <- max(values)
}
stem_leaf_lst <- aplpack::stem.leaf(
data = values,
unit = unit,
m = m,
Min = min_val,
Max = max_val,
trim.outliers = outliers,
depths = depths,
printresult = F
)
col_widths <- rep(col_width, length(var_names) + 1)
row_heights <- rep(row_height, length(stem_leaf_lst$stem) + 2) # adds 2 lines for info and textGrob headings
display_table <- gtable::gtable(
name = "display_table",
widths = grid::unit(x = col_widths, units = "cm"),
heights = grid::unit(x = row_heights, units = "cm")
)
# for debug: show layout
#gtable::gtable_show_layout(display_table)
# creating info textGrob
info_grob <- grid::textGrob(
label = paste(stem_leaf_lst$info[[1]], stem_leaf_lst$info[[2]], stem_leaf_lst$info[[3]], sep = " "),
just = "center",
gp = grid::gpar(col = "black", fontsize = 12, fontface = 2L)
)
# creating heading textGrobs
heading_grobs <- vector(mode = "list", length = length(var_names))
for(i in seq_along(var_names)){
heading_grobs[[i]] <- grid::textGrob(
label = var_names[[i]],
just = "left",
gp = grid::gpar(col = heading_color, fontsize = 12, fontface = 2L))
}
# create stem & leaf textGrobs
n_rows <- length(stem_leaf_lst$stem)
n_cols <- length(var_names)
n <- n_rows * n_cols
stem_leaf_grobs <- vector(mode = "list", length = n)
for(i in seq_along(var_names)){
var_stem_leaf_lst <- aplpack::stem.leaf(
data = x[[var_names[[i]]]],
unit = unit,
m = m,
Min = min_val,
Max = max_val,
trim.outliers = outliers,
depths = depths,
printresult = F
)
for(ii in seq_along(var_stem_leaf_lst$stem)){
display_str <- ""
if(depths){
display_str <- var_stem_leaf_lst$depths[[ii]]
}
display_str <- paste0(display_str, var_stem_leaf_lst$stem[[ii]], var_stem_leaf_lst$leaves[[ii]])
stem_leaf_grobs[[(i - 1) * n_rows + ii]] <- grid::textGrob(
label = display_str,
just = "left",
gp = grid::gpar(col = "black", fontsize = font_sz, fontface = 2L))
}
}
# add info textGrob to gtable
display_table <- gtable::gtable_add_grob(
x = display_table,
grobs = info_grob,
t = 1,
l = 1,
r = length(var_names)+1
)
# add heading textGrobs to gtable
for(i in seq_along(var_names)){
display_table <- gtable::gtable_add_grob(
x = display_table,
grobs = heading_grobs[[i]],
t = 2,
l = i,
r = i + 1
)
}
# add stem & leaf textGrobs to gtable
for(i in seq_along(var_names)){
for(ii in seq_along(var_stem_leaf_lst$stem)){
display_table <- gtable::gtable_add_grob(
x = display_table,
grobs = stem_leaf_grobs[[(i - 1) * n_rows + ii]],
t = ii + 2, # first two lines are info and the heading textGrobs
l = i,
r = i + 1
)
}
}
if(display_grob){
grid::grid.newpage()
grid::grid.draw(display_table)
}else{
return(display_table)
}
}
|
aae7b9b62c7074a11a9c90030f8eb8bc2260cdae
|
a2bfab36444668a0a6fcb1aea234da2b9323be07
|
/R/ultimate_upgrade.R
|
f268792d4fac51fb82f8201b840978a56125fc19
|
[] |
no_license
|
rahasayantan/springleaf
|
93643518d4a3d1bd87019d74694ed67966fc57d2
|
894a13be5b7244596bba304eac73dbdf82fdc0fb
|
refs/heads/master
| 2021-01-19T23:24:47.590137
| 2015-10-21T07:30:11
| 2015-10-21T07:30:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,563
|
r
|
ultimate_upgrade.R
|
## wd etc ####
require(Metrics)
require(caret)
require(readr)
require(doParallel)
require(stringr)
require(lubridate)
require(lme4)
## extra functions ####
# print a formatted message
msg <- function(mmm,...)
{
cat(sprintf(paste0("[%s] ",mmm),Sys.time(),...)); cat("\n")
}
auc<-function (actual, predicted) {
r <- as.numeric(rank(predicted))
n_pos <- as.numeric(sum(actual == 1))
n_neg <- as.numeric(length(actual) - n_pos)
auc <- (sum(r[actual == 1]) - n_pos * (n_pos + 1)/2)/(n_pos * n_neg)
auc
}
## read data ####
xtrain <- read_csv(file = "./input/train.csv")
id_train <- xtrain$ID; xtrain$ID <- NULL; y <- xtrain$target; xtrain$target <- NULL
xtest <- read_csv(file = "./input/test.csv")
id_test <- xtest$ID; xtest$ID <- NULL
## preliminary preparation ####
# drop columns with nothing but NA
is_missing <- colSums(is.na(xtrain))
constant_columns <- which(is_missing == nrow(xtrain))
xtrain <- xtrain[,-constant_columns]; xtest <- xtest[,-constant_columns]
rm(is_missing, constant_columns)
# drop duplicated columns
duplicate_columns <- which(duplicated(lapply(xtrain, c)))
xtrain <- xtrain[,-duplicate_columns]; xtest <- xtest[,-duplicate_columns]
rm(duplicate_columns)
# check column types
col_types <- unlist(lapply(xtrain, class))
fact_cols <- which(col_types == "character")
# separate into (what seems like) numeric and categorical
xtrain_fc <- xtrain[,fact_cols]; xtrain <- xtrain[, -fact_cols]
xtest_fc <- xtest[,fact_cols]; xtest <- xtest[, -fact_cols]
# add zipcode
xtrain_fc$zipcode <- xtrain$VAR_0212; xtest_fc$zipcode <- xtest$VAR_0212
xtrain_fc$zipcode2 <- xtrain$VAR_0241; xtest_fc$zipcode2 <- xtest$VAR_0241
## factor handling: cleanup ####
isTrain <- 1:nrow(xtrain_fc); xdat_fc <- rbind(xtrain_fc, xtest_fc); rm(xtrain_fc, xtest_fc)
xdat_fc$zipcode <- as.character(xdat_fc$zipcode)
xdat_fc$zipcode2 <- as.character(xdat_fc$zipcode2)
xdat_fc$zipcode[is.na(xdat_fc$zipcode)] <- ""
xdat_fc$zipcode2[is.na(xdat_fc$zipcode2)] <- ""
# drop timestamp columns - not needed here
time_cols <- c("VAR_0073","VAR_0075","VAR_0156","VAR_0157","VAR_0158","VAR_0159",
"VAR_0166","VAR_0167","VAR_0168","VAR_0169","VAR_0176","VAR_0177",
"VAR_0178","VAR_0179","VAR_0204","VAR_0217")
time_cols <- time_cols[time_cols %in% colnames(xdat_fc)]
xdat_fc <- xdat_fc[,-which(colnames(xdat_fc) %in% time_cols)]
# true / false cases
{
tf_columns <- c("VAR_0008","VAR_0009","VAR_0010","VAR_0011","VAR_0012",
"VAR_0043","VAR_0196","VAR_0226","VAR_0229","VAR_0230","VAR_0232","VAR_0236","VAR_0239")
tf_columns <- tf_columns[tf_columns %in% colnames(xdat_fc)]
for (ff in tf_columns)
{
x <- xdat_fc[,ff]; x[x == ""] <- "mis"
x <- factor(x); xdat_fc[,ff] <- x
msg(ff)
}
}
# location columns
{
loc_columns <- c("VAR_0237", "VAR_0274", "VAR_0200", "zipcode", "zipcode2")
for (ff in loc_columns)
{
x <- xdat_fc[,ff]; x[x == ""] <- "mis"; x[x == "-1"] <- "mis"
x <- factor(x); xdat_fc[,ff] <- x
msg(ff)
}
}
# alphanumeric generic columns
{
an_columns <- c("VAR_0001","VAR_0005", "VAR_0044", "VAR_1934", "VAR_0202", "VAR_0222",
"VAR_0216","VAR_0283","VAR_0305","VAR_0325",
"VAR_0342","VAR_0352","VAR_0353","VAR_0354","VAR_0466","VAR_0467")
for (ff in an_columns)
{
x <- xdat_fc[,ff]; x[x == ""] <- "mis"; x[x == ""] <- "mis"
x <- factor(as.integer(factor(x))); xdat_fc[,ff] <- x
msg(ff)
}
}
# job columns => for bag of words later
{
job_columns <- c("VAR_0404", "VAR_0493")
xjobs <- xdat_fc[,job_columns]; xdat_fc <- xdat_fc[,-which(colnames(xdat_fc) %in% job_columns)]
for (ff in job_columns)
{
x <- xjobs[,ff]; x[x == ""] <- "mis"; x[x == "-1"] <- "mis"
x <- factor(x); xjobs[,ff] <- x
msg(ff)
}
}
rm(xtrain, xtest, xjobs,x, ff )
## factor handling: create new ones ####
xcomb <- combn(ncol(xdat_fc),2)
for (ii in 1:ncol(xcomb))
{
xloc <- xdat_fc[,xcomb[,ii]]
xname <- paste("bi",colnames(xloc)[1] , colnames(xloc)[2],sep = "_")
xfac <- paste(xloc[,1], xloc[,2], sep = "_")
xdat_fc[,xname] <- xfac
msg(ii)
}
## factor handling: counts ####
for (ii in 1:ncol(xdat_fc))
{
xname <- colnames(xdat_fc)[ii]
xtab <- data.frame(table(xdat_fc[,ii]))
colnames(xtab)[1] <- xname
colnames(xtab)[2] <- paste("ct", xname, sep = "")
xdat_fc[,paste("ct", xname, sep = "")] <- xtab[match(xdat_fc[,ii], xtab[,1]),2]
msg(ii)
}
## add response rates for zipcode, zipcode2 and new ones ####
# drop the raw factors
drop_list <- grep("^VAR", colnames(xdat_fc))
xdat_fc <- xdat_fc[,-drop_list]
# separate the count columns
count_list <- grep("^ct", colnames(xdat_fc))
xdat_count <- xdat_fc[,count_list]
xdat_fc <- xdat_fc[,-count_list]
xtrain_count <- xdat_count[isTrain,]
xtest_count <- xdat_count[-isTrain,]
rm(xdat_count)
# separate into xtrain / xtest
xtrain <- xdat_fc[isTrain,]
xtest <- xdat_fc[-isTrain,]
rm(xdat_fc)
# setup the folds for cross-validation
xfold <- read_csv(file = "./input/xfolds.csv")
idFix <- list()
for (ii in 1:10)
{
idFix[[ii]] <- which(xfold$fold10 == ii)
}
rm(xfold,ii)
# grab factor variables
factor_vars <- colnames(xtrain)
# loop over factor variables, create a response rate version for each
for (varname in factor_vars)
{
# placeholder for the new variable values
x <- rep(NA, nrow(xtrain))
for (ii in seq(idFix))
{
# separate ~ fold
idx <- idFix[[ii]]
x0 <- xtrain[-idx, factor_vars]; x1 <- xtrain[idx, factor_vars]
y0 <- y[-idx]; y1 <- y[idx]
# take care of factor lvl mismatches
x0[,varname] <- factor(as.character(x0[,varname]))
# fit LMM model
myForm <- as.formula (paste ("y0 ~ (1|", varname, ")"))
myLME <- lmer (myForm, x0, REML=FALSE, verbose=F)
myFixEf <- fixef (myLME); myRanEf <- unlist (ranef (myLME))
# table to match to the original
myLMERDF <- data.frame (levelName = as.character(levels(x0[,varname])), myDampVal = myRanEf+myFixEf)
rownames(myLMERDF) <- NULL
x[idx] <- myLMERDF[,2][match(xtrain[idx, varname], myLMERDF[,1])]
x[idx][is.na(x[idx])] <- mean(y0)
}
rm(x0,x1,y0,y1, myLME, myLMERDF, myFixEf, myRanEf)
# add the new variable
xtrain[,paste(varname, "dmp", sep = "")] <- x
# create the same on test set
xtrain[,varname] <- factor(as.character(xtrain[,varname]))
x <- rep(NA, nrow(xtest))
# fit LMM model
myForm <- as.formula (paste ("y ~ (1|", varname, ")"))
myLME <- lmer (myForm, xtrain[,factor_vars], REML=FALSE, verbose=F)
myFixEf <- fixef (myLME); myRanEf <- unlist (ranef (myLME))
# table to match to the original
myLMERDF <- data.frame (levelName = as.character(levels(xtrain[,varname])), myDampVal = myRanEf+myFixEf)
rownames(myLMERDF) <- NULL
x <- myLMERDF[,2][match(xtest[, varname], myLMERDF[,1])]
x[is.na(x)] <- mean(y)
xtest[,paste(varname, "dmp", sep = "")] <- x
msg(varname)
}
# drop the factors
ix <- which(colnames(xtrain) %in% factor_vars)
xtrain <- xtrain[,-ix]
xtest <- xtest[,-ix]
## aggregate and store ####
xtrain <- cbind(xtrain, xtrain_count)
xtest <- cbind(xtest, xtest_count)
rm(xtrain_count, xtest_count)
xtrain$ID <- id_train; xtrain$target <- y
colnames(xtrain) <- str_replace_all(colnames(xtrain), "_", "")
write_csv(xtrain, path = "./input/xtrain_v8a.csv")
xtest$ID <- id_test
colnames(xtest) <- str_replace_all(colnames(xtest), "_", "")
write_csv(xtest, path = "./input/xtest_v8a.csv")
|
db8878a9408ddb88ec63f74d2141e55403c03bac
|
058df96499d8053fb468a27d30a2adaf787bf6fc
|
/R_scripts/barplot_percent_stacked.R
|
907b9347fa24699c76b7c775174901d655d66e62
|
[] |
no_license
|
rosaranli/My_scripts
|
da806f80da310814fb9c75d826272d208875aa89
|
f5425f1075c8852b8a09c315b826cd5158c3207f
|
refs/heads/main
| 2023-07-10T09:34:24.704904
| 2021-08-23T16:03:43
| 2021-08-23T16:03:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,452
|
r
|
barplot_percent_stacked.R
|
library(data.table)
library(tidyverse)
library(gridExtra)
library(grid)
############################################################
#Input format for matrix should be: colnames as "SNV, sample1, sample2.....sampleN (doesn't depend on the naming style except at the simplification step
# i.e gather function)"
#with SNV having a format like 1:10000_A>T
############################################################
setwd('../Single_cell/Bar plots/')
#Input VAF file, split snv, deselect ref and alt and simplify colnames
vaf_data <- fread('N8_3_VAF_matrix_CLEAN_10c_nExtrRmean.txt') %>% separate(SNV, into = c("chr", "loc", "ref", "alt")) %>% select(-ref, -alt)
vaf_data$loc <- as.integer(vaf_data$loc)
colnames(vaf_data) <- sapply(colnames(vaf_data), function (x) gsub("_wasp_AlignedByCoord_vW_filt_dedupped_sorted", "", x))
# Annotate the VAFs with customised gene list, order by chr and loc, create a new "formatted" location column, and rename it, keep chr column for grouping
#vaf_data <- left_join(vaf_data, gene_ref, by = "chr") %>% filter(loc >= start & loc <= end) %>% distinct() %>% select(-start, -end)
setorder(vaf_data, chr, loc)
vaf_data <- vaf_data %>% unite("loc1", chr:loc, remove = F) %>% select(-loc)
vaf_data$loc1 <- sapply(vaf_data$loc1, function (x) paste0("chr", x))
names(vaf_data)[1] <- "loc"
#Tidy the data: count for each bins, group by snvs
# If the bins have changed from <0.2, 0.2-0.4....then replace '4' with (# of bins-1)
vaf_data <- vaf_data %>% group_by(loc)
vaf_data$`<0.2` <- rowSums(vaf_data < 0.2, na.rm = TRUE)
vaf_data$`0.2-0.4` <- rowSums(vaf_data >= 0.2 & vaf_data < 0.4 , na.rm = TRUE)
vaf_data$`0.4-0.6` <- rowSums(vaf_data >= 0.4 & vaf_data < 0.6, na.rm = TRUE)
vaf_data$`0.6-0.8` <- rowSums(vaf_data >= 0.6 & vaf_data < 0.8, na.rm = TRUE)
vaf_data$`>0.8` <- rowSums(vaf_data[, 3:(ncol(vaf_data)-4)] >= 0.8, na.rm = TRUE)
# get into "tidy form" for ggplot, select only those SNVs that you want to visualize (enter the threshold value obtained from previous histogram)
# Split datasets into 2 parts: 1) where the sum of cells is above threshold and 2) where the sum is below or equal to threshold
# If the bins have changed from <0.2, 0.2-0.4....then replace '6' with (# of bins+1)
vaf_data <- vaf_data %>% select(1, 2, tail(seq_along(vaf_data), 5))
vaf_data <- vaf_data %>% gather("VAF", "counts", -loc, -chr)
# Prepare for labels (if required) and facettting, create stacked ggpot, flip coord, remove unwanted gridlines
# If the bins have changed from <0.2, 0.2-0.4....then replace all three '5's with (# of bins)
vaf_data$VAF <- factor(vaf_data$VAF, levels = unique(vaf_data$VAF)) # if the levels of factoring is weird, reorder using the levels option
vaf_data$num_label <- c(rep(c(1:(nrow(vaf_data)/5)), times=5))
vaf_data$chr <- factor(vaf_data$chr, levels = c(1:22, "X"))
# To reverse the ordering of labels just add reverse=TRUE to position_fill() as an argument
g <- ggplot(vaf_data, aes(x = num_label, y=counts, fill=VAF, width=1)) + geom_bar(position = position_fill(), stat = "identity") + coord_flip()
g + theme_classic() + ylab("percent cell count") + scale_x_continuous("", breaks = 1:(nrow(vaf_data)/5), labels = vaf_data$loc[1:(nrow(vaf_data)/5)]) + theme(axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_text(size = 14, face = "bold"), axis.title.x = element_text(size = 14, face = "bold")) + facet_wrap(~chr, scales = "free", ncol = 8)
|
b96cb419589e671aafe22b652e62250ccc6367a7
|
d12501d77b129bf5eabcd1ffa34c784fa18b2990
|
/LiuHetools/tests/testthat/test_fhw26.R
|
26a9bf65aa182100ade6ffd6e9029256d6ca8da3
|
[] |
no_license
|
3701/hw3
|
631321e898d8f1fe432c78530dae9c177d1c094b
|
b36bcbebf46347d7ccb11ad4ba91e13aa78355ea
|
refs/heads/master
| 2021-04-26T21:52:11.072514
| 2018-03-07T02:56:08
| 2018-03-07T02:56:08
| 123,729,473
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 299
|
r
|
test_fhw26.R
|
context("fquiz26 in my package")
test_that("if fquiz26 working",{
x<-hw26
testing<-list( apply(x,c(1,3),median),
apply(x,c(1,2),median),
apply(x,c(2,3),median),
apply(x,c(3),median),
apply(x,c(1),median),
apply(x,c(2),median))
expect_identical(fhw26(x),testing)
})
|
800f983c43d0188844fd7f3f2fec2189f156f72b
|
c54c73306f1d25cc78c346bbefd7248c7b45da60
|
/man/get.clusters.Rd
|
e12037af21f3ecf546da9a318abe5ce7d8dd2737
|
[] |
no_license
|
stuchly/MetaMass
|
7a3ad4cd01870ecf73ece41004106baec5782c39
|
2190f0d92a8dc4d14ce593c616b4fa4efa0a29b2
|
refs/heads/master
| 2020-04-17T02:30:43.937779
| 2019-11-08T11:09:44
| 2019-11-08T11:09:44
| 56,692,568
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,682
|
rd
|
get.clusters.Rd
|
\name{get.clusters}
\alias{get.clusters}
\title{get.clusters}
\description{
retrieves cluster data.frame from AnnoMass object
}
\usage{
get.clusters(AM,rID=1)
}
\arguments{
\item{AM}{AnnoMass object. Result of function \code{analyze.MSfile}}
\item{rID}{integer. Which annotation should be used to the comparison
(see annotation.component argument in \code{analyze.MSfile})}
}
\value{
data.frame. One line per each cluster with cluster info.
main_component - most abundant component in the cluster
purity_main_component - ration of main_component in the cluster
Nb_main_component - number of annotation suporting the most abundant component
assigned_location - annotation of cluster with respect predefined scheme (see details)
other columns - ratio of other components etc
}
\details{
assigned_location scheme:
Cytoplasm : (Cyt + CS (cytoskeleton)+proteasome ) >=51\%
Subcategory: cytoskeleton if more than 30\% of markers in cytoplasm
category is cytosleketon
Ribosome (no subcategory) >=51\%
Membrane: PM (Plasma membrane) + ER (endoplasmic
reticulum)+Golgi+Mitochondrion + lysosomes + ensodomes >=51\%
Subcategory: Most dominant count.
Nucleus: Nucleus + Nucleolus
Subcategory: Nucleolus if more than 25\% of nuclear markers are Nucleolus,
}
\examples{
file1<-system.file("extdata","Bileck.txt",package="MetaMass")
file2<-system.file("extdata","Andreyev.txt",package="MetaMass")
file1
file2
res<-analyze.MSfile(MSfile=c(file1,file2),Metadata=c("Christoforou","Rodriguez"),markers=c(3,4,5,6,7))
head(get.clusters(res,rID=1)) #clusters annotation with respect to the 1st annotation.component (3rd column in AnnotationFile)
}
|
82d6752770c6b7138676263c0ba0c904533773d5
|
55a5e246d1318275a5a0f1fc9b2e1b080ab26fe7
|
/tests/testthat/test-folder-dataset.R
|
80fbc09d27d63ac73143248e49daf689181fa95c
|
[
"MIT"
] |
permissive
|
mohamed-180/torchvision
|
610577f5b1dec7a628df8c047c41ec18376e35f5
|
0761c61441f838f1b0c6f3624c40542934fb24f8
|
refs/heads/main
| 2023-07-14T14:20:21.225664
| 2021-08-23T17:18:10
| 2021-08-23T17:18:10
| 399,161,368
| 0
| 0
|
NOASSERTION
| 2021-08-23T15:48:55
| 2021-08-23T15:48:54
| null |
UTF-8
|
R
| false
| false
| 512
|
r
|
test-folder-dataset.R
|
test_that("image_folder dataset", {
ds <- image_folder_dataset(
root = "assets/class",
transform = . %>% transform_to_tensor %>%
transform_resize(c(32,32))
)
expect_length(ds[1], 2)
dl <- torch::dataloader(ds, batch_size = 2, drop_last = TRUE)
coro::loop(for(batch in dl) {
expect_tensor_shape(batch[[1]], c(2, 3, 32, 32))
expect_tensor_shape(batch[[2]], 2)
expect_tensor_shape(batch$x, c(2, 3, 32, 32))
expect_tensor_shape(batch$y, 2)
})
expect_length(ds, 12)
})
|
db0b5cad223e1f1c26773ac3f7846f63d0fd9f9e
|
8d74e828c671df7f5211705befeba2f857915f07
|
/chap_6/03/app.R
|
0b17f6f8a03e6afc3b4c75460d4777b95f010d62
|
[
"MIT"
] |
permissive
|
kpivert/my_ms_book
|
1aa82e0cfabeac92ad0945f9bdcf7764ee0df507
|
d5e7c2b8d092bc56aefd7f1b61c4b77c29ed3320
|
refs/heads/main
| 2023-07-14T03:10:37.358289
| 2021-08-08T14:36:22
| 2021-08-08T14:36:22
| 380,824,308
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 476
|
r
|
app.R
|
library(shiny)
ui <- fluidPage(
tabsetPanel(
tabPanel("Import data",
fileInput("file", "Data", buttonLabel = "Upload..."),
textInput("delim", "Delimiter (leave blank to guess)", value = ""),
numericInput("skip", "Rows to skip", 0, min = 0),
numericInput("rows", "Rows to preview", 10, min = 1)
),
tabPanel("Set parameters"),
tabPanel("Visualize results")
)
)
server <- function(input, output, session) {
}
shinyApp(ui, server)
|
0f0e97a5579eb6209775620c669e2511f5f2ac99
|
944ad6b296718cf5ebc057064f205660609d7322
|
/shanAns3.R
|
7fddf5102e2a3cbd2c3ca7fdb725fcac02f4e226
|
[] |
no_license
|
jishupu05/funny-correlation
|
2ee54b71bbe309b25241034e63f19e7ca6d8b553
|
f9e27c5f665a5beef6f9e3e34f43763024288362
|
refs/heads/master
| 2022-12-17T23:15:23.101180
| 2020-09-23T09:22:16
| 2020-09-23T09:22:16
| 297,917,353
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 802
|
r
|
shanAns3.R
|
#find out the partial correlation
#partial correlation means when you feel that your two continuous variables are correlated with each other and any of them
#also correlated with other continuous variable.
# find partial correlation to get influence free relation between those two variables.
#load the data.
df<-sat.act
# remove all NA values from dataset by using NA.omit()
df<-na.omit(df)
#now you can use partial.r() function under psych package to get the result
partial.r(df[5:6])
#To get better summarized result use pcor.test() method under 'ppcor' package
install.packages('ppcor')
library(ppcor)
#Here your controlling variable is Education. which is defined as z
pcor.test(x=df$SATV,y=df$SATQ,z=df$education,method = 'pearson')
#run the code get the interpretation herewith
|
cd953a26c3f922c0066719d10d18244aa2df4701
|
909157178ed55cf23adbd5b835012f78f510669e
|
/data_visualisation/scatterplots.R
|
da44a86364452c62b49a299ffae5280643ff4a6c
|
[] |
no_license
|
tomscott1/R-notes
|
d7e4c0e45497c02e88b7bfe4fbbecd8cea811d58
|
d890a5432acddd3618fa7ce1b11aa45b7c0ef3d4
|
refs/heads/master
| 2021-01-12T10:52:27.142033
| 2016-12-04T21:35:53
| 2016-12-04T21:35:53
| 72,739,973
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 259
|
r
|
scatterplots.R
|
library(ggplot2)
df <- mtcars
# data
pl <- ggplot(df, aes(x=wt,y=mpg))
# geometry and aesthetics layer
pl2 <- pl + geom_point(aes(size=hp, shape=factor(cyl), colour=hp), alpha=0.7)
pl3 <- pl2 + scale_color_gradient(low='#90C3D4', high='red')
print(pl3)
|
1ac3c60ed7fb6eec61542feace457b744afb86f7
|
a64252a36d2d005f141b6f5bd66160c6c12ade2b
|
/Plots/ModelWithHighCorrelatingIndicators_Forecast_plot.r
|
0b73942731e5bd4b5ad43af5455d205472c16f83
|
[] |
no_license
|
clairecDS/DoingDataScience_CaseStudy2
|
ab2d1e2dd9d615f3888c07f56c883ce54d4fcdd7
|
91b13e802bf5ded858c3e0ff4d45fa1679d0f87d
|
refs/heads/master
| 2020-12-25T14:48:10.564131
| 2016-07-28T03:48:17
| 2016-07-28T03:48:17
| 63,804,714
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 236
|
r
|
ModelWithHighCorrelatingIndicators_Forecast_plot.r
|
#Creates ModelWithHighCorrelatingIndicators_Forecast plot
png("Plots/ModelWithHighCorrelatingIndicators_Forecast_plot.png")
plot(ModelWithHighCorrelatingIndicators_Forecast, main="ModelWithHighCorrelatingIndicators_Forecast")
dev.off()
|
c5f92fedbc82175543c0fcd64c97eb317b745f8b
|
2cfb6436fec534763f6f157abb20ea56f1224d56
|
/team--middle-of-the-road/meetup-gender.R
|
7376eead32f1b01f8d26d4a92549feeefc25f70c
|
[] |
no_license
|
wolass/BRUG_API
|
26ddc21ff282b9b57b1bee4eee96e71f3025d10c
|
ec6036eb9210e967b2601fb202a4a3a07e521d87
|
refs/heads/master
| 2020-12-13T23:30:06.990427
| 2017-04-06T20:25:03
| 2017-04-06T20:25:03
| 86,577,952
| 2
| 4
| null | 2017-04-06T20:25:04
| 2017-03-29T12:12:00
|
R
|
UTF-8
|
R
| false
| false
| 2,324
|
r
|
meetup-gender.R
|
# How many users of the berlin R users Group are male/female?
# Data sources: meetup.com API, wikipedia data (first names)
library(devtools)
if("meetupr" %in% rownames(installed.packages()) == FALSE) {
install_github("rladies/meetupr")
}
library(meetupr)
library(tidyverse)
library(jsonlite)
# see script 'first_names_from_wikipedia.R', snippet by Andreas Busjahn, modified
# https://www.meetup.com/Berlin-R-Users-Group/events/238289864/
# vectors with first-names
load("names.RData")
api_key <- Sys.getenv("R_meetup_api_key")
group_name <- "Berlin-R-Users-Group"
events <- get_events(group_name, api_key)
events_df <- events %>% toJSON() %>% fromJSON()
members <-get_members(group_name, api_key)
members_df <- members %>% toJSON() %>% fromJSON()
members_df$fname <- tolower(gsub(members_df$name, pattern="\\s.*$", replacement = "", perl=TRUE))
members_df <- members_df %>%
select(fname, status) %>%
mutate(status = unlist(status)) %>% # remove weird artifact from JSON conversion
mutate(is_male=fname %in% male, # perform lookup
is_female=fname %in% female,
is_ambiguous = fname %in% ambiguous) %>%
mutate(gender = ifelse(is_male==TRUE, "male",
ifelse(is_female==TRUE, "female",
ifelse(is_ambiguous == TRUE, "ambiguous", "undetermined")))) %>%
filter(gender %in% c("male", "female")) # remove ambiguous or undetermined
# how many women, fraction
(tab <- prop.table(with(members_df, table(gender))))
females_fraction <- tab[[1]]
#### How many are *expected* to show up?
# generate a simulation: during the next 1000 Berlin-R-Users Group meetings, assuming
# 30 people show up each time, how many women are among them,
# when the expected fraction of women is estimated to be 22%?
n <- 30
n_trials <- 1000
females_appeared_sim <- rbinom(n = n_trials, size = n, prob = females_fraction)
hist(females_appeared_sim)
# 2.5% quantile and 97.5% quantiles
min_appear_expected <- qbinom(size=n, prob=females_fraction, p=0.025)
max_appear_expected <- qbinom(size=n, prob=females_fraction, p=0.975)
# "95% confidence interval"
round(c(min_appear_expected, max_appear_expected) / n, 2)
# => estimate: 22% of group members are women, 95% boundaries: between 7% and 37%
|
41682c92c59ca049d47208e3567816c39d540ae2
|
e7a29b7452edb314b66ecc32dc5d53a235cb1b55
|
/R/grid-rowwise.R
|
9a8d43f9dd64ecaa7d44aea0a3137fef5dc2bcb3
|
[
"MIT"
] |
permissive
|
atusy/cssgrid
|
03dfc754a989fd3fc5bbc94580d432c423fc5767
|
9d9f255c72c663bd9476ba090bc0b707b79b6694
|
refs/heads/master
| 2020-06-07T21:07:20.669061
| 2019-06-23T06:28:03
| 2019-06-23T06:28:28
| 193,093,802
| 30
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 546
|
r
|
grid-rowwise.R
|
#' Gird layout in single row or column
#'
#' @param ...
#' Items in CSS Grid Layout and arguments passed to [`grid_layout`].
#' @param rows,cols
#' Sizes of rows and columns in a character vector.
#' If the given number of sizes are less than the number of items,
#' then `"auto"` is used for items missing sizes.
#'
#' @name grid_rowwise
#' @export
grid_rowwise <- function(..., cols = character(0L)) {
n <- n_item(...) - sum(lengths(strsplit(cols, " +")))
cols <- c(cols, rep("auto", n * (n > 0)))
grid_layout(..., cols = cols)
}
|
0a9ae04b7e33ce4566913032f564b36d6fe4529f
|
72b8a4f3d2a6666b608ca9227b8c27a540996c28
|
/R files/heart_points_analysis.R
|
9b4b807c2dcc8633901080a91ff9790cf72a5168
|
[] |
no_license
|
BenjaminDupre/tsvr
|
1eeb04617e2c981f45ac1b0cf56727b9d9172eee
|
1def2088efcf4b382cbe69132968591037cba663
|
refs/heads/main
| 2023-03-23T09:58:03.128323
| 2021-03-12T09:15:53
| 2021-03-12T09:15:53
| 310,716,326
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,888
|
r
|
heart_points_analysis.R
|
# Monday 22 February 2021
# Coding for getting insight into heart behavior.
# Author: B.D.
library(ez) # for anovas
library(plyr) # for building the rt graph and revalue function.
library(ggplot2)
library(tidyverse) # for playing around with the %>%
#library(gapminder) # sacar si no uso
library("viridis")
normalize <- function(x) {
return ((x - min(x)) / (max(x) - min(x)))
}
setwd("/Users/calypso/Dropbox/My Mac (glaroam2-185-117.wireless.gla.ac.uk)/Documents/MATLAB/projects/untitled/R files")
tsvr <- read.table('responsetime1.csv',header=T,sep = ',',dec = '.') # this is the file saved from the Matlab file
# Removing specific participants & wrongly assigned data,
tsvr <- tsvr[!(tsvr$ptcp== 2 | tsvr$ptcp== 3 | tsvr$ptcp == 13 | tsvr$ptcp == 11),]
tsvr <- tsvr[!(tsvr$zyklus== "0"),]
#tsvr <- tsvr[!(tsvr$zyklus== "0" | tsvr$zyklus == "keine vibration"),]
# Arranging factors so to have base conditions in order
tsvr$stimulus <- factor(tsvr$stimulus, levels = c(3, 2, 1))
tsvr$stimulus <-revalue(tsvr$stimulus, c("3"="base", "2"="Incongruent", "1"="Congruent"))
tsvr$zyklus = factor(tsvr$zyklus,levels(tsvr$zyklus)[c(1, 3, 2, 4)]) # the factor function is different because zyklus is already recognize as a factor.
tsvr$zyklus <- droplevels(tsvr$zyklus)
#tsvr$ptcp <- factor(tsvr$ptcp)
# Normalizing response time for every participant
for (i in min(tsvr$ptcp):max(tsvr$ptcp)){
tsvr[tsvr$ptcp==i,5]=normalize(tsvr[tsvr$ptcp==i,5])
}
aggregate(tsvr[, 5], list(tsvr$zyklus), mean)
aggregate(tsvr[, 8], list(tsvr$zyklus), mean)
# ANOVA for the zyklus
output_anova = ezANOVA(data = tsvr,
dv = .(diff),
wid = .(ptcp),
within = .(zyklus),
within_covariates = .(set),
#diff = .(stimulus),
detailed = T)
print(output_anova)
# ANOVA for the stimulus.
output_anova2 = ezANOVA(data = tsvr,
dv = .(diff),
wid = .(ptcp),
within = .(stimulus),
within_covariates = .(set),
#diff = .(stimulus),
detailed = T)
print(output_anova2)
# Visual for the Zyklus.
sd = sd(df$diff)
df = tsvr %>%
#filter(ptcp != 22) %>%
#filter(zyklus %in% c("Diastole","Systole")) %>%
group_by(ptcp,zyklus) %>%
summarise(n_diff = mean(diff))
#summarise(n_diff = mean(diff))
df %>%
ggplot(aes(zyklus,n_diff, fill=zyklus)) +
geom_boxplot() +
scale_fill_brewer()+
geom_line(aes(group=ptcp, col=ptcp)) +#, position = position_dodge(0.2)) +
geom_point(aes(group=ptcp, col=ptcp)) +#, position = position_dodge(0.2)) +
scale_color_viridis(option = "D")+
theme(legend.position="none" )
boxplot(diff ~ stimulus*zyklus,sub)
normalize(tsvr$diff)
normalize(tsvr$diff)
plot(normalize(tsvr$diff))
|
f0d04a2ef47ce10f5674cc4d8a5997b62c719b67
|
7f5d90ead24483775e908a561ac8c6f1dea37767
|
/plot1.R
|
3520b5971ab297d063b6e9bf4576ceddce668f14
|
[] |
no_license
|
anirudh838/ExData_Plotting1
|
edfdb0a991619f9cb0c08f400932d66b7b88c65b
|
0483d3e75327b8c0563dd78214a8dd75666bd6d8
|
refs/heads/master
| 2020-06-01T20:19:26.614701
| 2019-06-08T18:23:57
| 2019-06-08T18:23:57
| 190,915,074
| 0
| 0
| null | 2019-06-08T17:15:57
| 2019-06-08T17:15:56
| null |
UTF-8
|
R
| false
| false
| 360
|
r
|
plot1.R
|
# Construct a histogram between Global Active Powerand save it to a PNG file with a
# width of 480 pixels and a height of 480 pixels
hist(data$Global_active_power, main = paste("Global Active Power"),
xlab = "Global Active Power(kilowatts)", ylab = "Frequency",
col = "red")
dev.copy(png, file = "plot1.png", width = 480, height = 480)
dev.off()
|
0187cdbb7511d69a29e7e67e95bfbb6b2bc4f1e7
|
c924b44352dc43e174e140ce75fd1fe5a1c2ee7b
|
/r5.R
|
ee13518bf56eaf93f2f8bbf4620b23e7c2894f4f
|
[] |
no_license
|
phonamnuaisuk/DAVIS
|
86fc59f8f0d7eb2b08d4621ac5f25ef686448297
|
cc085cd75ab997c82d532714b082067728e5c869
|
refs/heads/master
| 2021-08-28T17:45:34.574909
| 2021-08-14T04:06:08
| 2021-08-14T04:06:08
| 88,837,076
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,277
|
r
|
r5.R
|
#
# Text Mining
#
text.rt <- readLines("http://kdd.ics.uci.edu/databases/reuters21578/README.txt")
length(text.rt)
# examine
text.rt
which(text.rt == "")
# clean up
text.rt <- text.rt[- which(text.rt=="")]
length(text.rt)
text.rt <- text.rt[- which(text.rt=="}")]
length(text.rt)
#
lastind <- which(text.rt=="X. Bibliography")-1
text.rt <- text.rt[1:lastind]
#
text.rt <- gsub(pattern = "\"",replacement = "", x = text.rt, ignore.case = FALSE, perl = FALSE, fixed = FALSE, useBytes = FALSE)
text.rt
#
library(tm)
txt <- VectorSource(text.rt)
txt.corpus <- Corpus(txt); rm(txt)
txt.corpus
inspect(txt.corpus)
#
txt.corpus <- tm_map(txt.corpus, tolower)
txt.corpus <- tm_map(txt.corpus, removePunctuation)
txt.corpus <- tm_map(txt.corpus, removeNumbers)
txt.corpus <- tm_map(txt.corpus, removeWords, stopwords("english"))
txt.corpus <- tm_map(txt.corpus, stemDocument)
txt.corpus <- tm_map(txt.corpus, stripWhitespace)
inspect(txt.corpus)
tdm <- TermDocumentMatrix(txt.corpus)
inspect(tdm)
#
library(wordcloud)
wordcloud(txt.corpus)
#
freq.terms <- findFreqTerms(x = tdm, lowfreq = 10, highfreq = Inf)
terms.freq <- rowSums(as.matrix(tdm))
terms.freq <- subset(terms.freq, terms.freq >=20)
df <- data.frame(term=names(terms.freq), freq = terms.freq)
library(ggplot2)
ggplot(df,aes(x=term,y=freq))+ geom_bar(stat="identity")+ xlab("Terms") + ylab("Counts") + coord_flip()
#
tdm2 <- removeSparseTerms(tdm, sparse=0.95)
m2 <- as.matrix(tdm2)
distm2 <- dist(scale(m2))
fit <- hclust(distm2, method="ward.D")
plot(fit)
###############################################
library(tm)
#library(SnowballC)
reut21578 <- system.file("texts", "crude", package = "tm")
reuters <- VCorpus(DirSource(reut21578), readerControl = list(reader = readReut21578XMLasPlain))
reuters
# inspect
inspect(reuters[1:3])
inspect(reuters[[1]])
meta(reuters[[1]],"id")
#
# getwd()
#setwd("C:/Users/My HP/Desktop/05Teaching/CRC5VIS/_Lectures/R-exercises/reutersdata")
#writeCorpus(text.corpus)
#
reuters <- tm_map(reuters, removePunctuation)
inspect(reuters[[1]])
reuters <- tm_map(reuters, stripWhitespace)
inspect(reuters[[1]])
reuters <- tm_map(reuters, content_transformer(tolower))
inspect(reuters[[1]])
reuters <- tm_map(reuters, removeNumbers)
inspect(reuters[[1]])
reuters <- tm_map(reuters, removeWords, stopwords("english"))
inspect(reuters[[1]])
reuters <- tm_map(reuters, stemDocument)
inspect(reuters[[1]])
reuters
dtm <- DocumentTermMatrix(reuters)
inspect( dtm[5:10,640:650])
#
tdm <- TermDocumentMatrix(reuters)
inspect( tdm[640:650,5:10])
#
findFreqTerms(x = tdm, lowfreq = 10, highfreq = Inf)
#
findAssocs(x = tdm, term = "accord", corlimit = 0.6)
#
tdm.common.70 <- removeSparseTerms(x=tdm, sparse=0.7)
tdm.common.20 <- removeSparseTerms(x=tdm, sparse=0.2)
#
freq.terms <- findFreqTerms(x = dtm, lowfreq = 10, highfreq = Inf)
terms.freq <- rowSums(as.matrix(dtm))
terms.freq <- subset(terms.freq, terms.freq >=15)
df <- data.frame(term=names(terms.freq), freq = terms.freq)
library(ggplot2)
ggplot(df,aes(x=term,y=freq))+ geom_bar(stat="identity")+ xlab("Terms") + ylab("Counts") + coord_flip()
#
#
findAssocs(x = dtm, term = "oil", corlimit = 0.9)
#
dtm.common.70 <- removeSparseTerms(x=dtm, sparse=0.7)
dtm.common.20 <- removeSparseTerms(x=dtm, sparse=0.2)
inspect(dtm)
#
library(wordcloud)
wordcloud(reuters)
#
tdm2 <- removeSparseTerms(tdm, sparse=0.6)
m2 <- as.matrix(tdm2)
distm2 <- dist(scale(m2))
fit <- hclust(distm2, method="ward.D")
plot(fit)
#############################################
setwd("C:/Users/My HP/Desktop/05Teaching/CRC5VIS/_Lectures/R-exercises/")
text.comb <- readLines("wikipedia/textmining/_COMB.txt")
length(text.comb)
#text.comb <- gsub(pattern = "and",replacement = "", x = text.comb, ignore.case = FALSE, perl = FALSE, fixed = FALSE, useBytes = FALSE)
#text.comb <- gsub(pattern = "are",replacement = "", x = text.comb, ignore.case = FALSE, perl = FALSE, fixed = FALSE, useBytes = FALSE)
#text.comb <- gsub(pattern = "for",replacement = "", x = text.comb, ignore.case = FALSE, perl = FALSE, fixed = FALSE, useBytes = FALSE)
#text.comb <- gsub(pattern = "not",replacement = "", x = text.comb, ignore.case = FALSE, perl = FALSE, fixed = FALSE, useBytes = FALSE)
#
library(tm)
txt <- VectorSource(text.comb)
txt.corpus <- Corpus(txt); rm(txt)
txt.corpus
inspect(txt.corpus[1])
#txt.corpus <- tm_map(txt.corpus, removeWords, stopwords("english"))
#inspect(txt.corpus[1])
tdm <- TermDocumentMatrix(txt.corpus, control = list(removePunctuation = TRUE,stopwords = TRUE,tolower = TRUE,stemming = TRUE,removeNumbers = TRUE,bounds = list(global = c(3, Inf))))
inspect(tdm)
#
library(wordcloud)
wordcloud(txt.corpus)
#
freq.terms <- findFreqTerms(x = tdm, lowfreq = 10, highfreq = Inf)
terms.freq <- rowSums(as.matrix(tdm))
terms.freq <- subset(terms.freq, terms.freq >=10)
df <- data.frame(term=names(terms.freq), freq = terms.freq)
library(ggplot2)
ggplot(df,aes(x=term,y=freq))+ geom_bar(stat="identity")+ xlab("Terms") + ylab("Counts") + coord_flip()
#
#############################################
dtm <- as.DocumentTermMatrix(tdm)
library(topicmodels)
lda <- LDA(dtm,k=5)
trmtop = terms(lda,5)
trmtop
|
3b71589c2278a12d02a1ee6cbc6826e61ca7d114
|
0d5325d24dbefd4b79475bc63153b592207e050e
|
/Rexam/navercomic.R
|
b523ed2ef0f37000d5b3356c859473740083be7c
|
[] |
no_license
|
HWANG593/R_Programming
|
ec9842dcfdcb26994f7ae9f951c3de29657f3c41
|
5171b76f2a5631a9fa12007d2dfc67e141bee69c
|
refs/heads/master
| 2023-03-26T11:18:51.274623
| 2021-03-16T13:16:57
| 2021-03-16T13:16:57
| 341,129,533
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 846
|
r
|
navercomic.R
|
library(httr)
library(rvest)
library(XML)
comicName <- NULL
comicSummary <- NULL
comicGrade <- NULL
site<- "https://comic.naver.com/genre/bestChallenge.nhn?&page="
for (i in 1:20) {
url <- paste0(site,i,sep="")
text <- read_html(url)
vcomicName<- html_nodes(text, '.challengeTitle > a')
vcomicName <- html_text(vcomicName)
vcomicName <- gsub("[[:space:]]","",vcomicName)
comicName <- c(comicName,vcomicName)
vcomicSummary<- html_nodes(text, '.summary')
vcomicSummary <- html_text(vcomicSummary)
comicSummary <- c(comicSummary,vcomicSummary)
vcomicGrade<- html_nodes(text, '.rating_type > strong')
vcomicGrade <- html_text(vcomicGrade)
comicGrade <- c(comicGrade,vcomicGrade)
}
navercomic <- data.frame(comicName,comicSummary,comicGrade)
View(navercomic)
write.csv(navercomic, file = "output/navercomic.csv")
|
0d8401e427802cb1291e396cb40b5b862deabf94
|
43575504154d202590d6c407e900ddf796e523da
|
/code/VisaCost_Analysis.R
|
34c94f983fbf1d6402f3aaed025347cc8b6db68c
|
[] |
no_license
|
FabianFox/Visa
|
c25eb179673c91990f4b35fcc3a2e5017b4ed7d2
|
72180e0fe844dbb8c708291639223c8a92c1ae65
|
refs/heads/main
| 2023-02-12T20:23:49.922780
| 2021-01-13T12:24:50
| 2021-01-13T12:24:50
| 311,952,706
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,286
|
r
|
VisaCost_Analysis.R
|
# GMP: Global Visa Cost Dataset
# Data
# year: 2019
# source: https://cadmus.eui.eu/handle/1814/66583
# Load/install packages
### ------------------------------------------------------------------------###
if (!require("xfun")) install.packages("xfun")
pkg_attach2("tidyverse", "rio", "lubridate","countrycode", "states")
# Load data
### ------------------------------------------------------------------------###
visa_cost.df <- import("./data/GMP Visa Cost/GMP_GlobalVisaCostDataset_v1.0.xlsx") %>%
select(source_iso3, target_iso3, tourist_visa)
# Filter to independent states
### ------------------------------------------------------------------------###
# Independent states as defined by Gleditsch & Ward (1999)
# data: http://ksgleditsch.com/data-4.html
# Note: excluding microstates
# Custom matches, i.e. 347 (Kosovo) = XKX
custom.match <- c("260" = "DEU" ,"340" = "SRB", "347" = "RKI", "678" = "YEM")
# Data
states.df <- gwstates %>%
filter(year(end) == 9999 & microstate == FALSE) %>%
mutate(iso3c = countrycode(gwcode, "cown", "iso3c", # original ISO3 is out-of-date
custom_match = custom.match))
# Subset
visa_cost.df <- visa_cost.df %>%
filter(source_iso3 %in% states.df$iso3c &
target_iso3 %in% states.df$iso3c)
|
cc2cdeba2551676b1734c5d57a6aec958e748640
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/pangoFontDescriptionSetStyle.Rd
|
688ee6dfa81092b2a330a861a02d97500987b9ab
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 928
|
rd
|
pangoFontDescriptionSetStyle.Rd
|
\alias{pangoFontDescriptionSetStyle}
\name{pangoFontDescriptionSetStyle}
\title{pangoFontDescriptionSetStyle}
\description{Sets the style field of a \code{\link{PangoFontDescription}}. The
\code{\link{PangoStyle}} enumeration describes whether the font is slanted and
the manner in which it is slanted; it can be either
\verb{PANGO_STYLE_NORMAL}, \verb{PANGO_STYLE_ITALIC}, or \verb{PANGO_STYLE_OBLIQUE}.
Most fonts will either have a italic style or an oblique
style, but not both, and font matching in Pango will
match italic specifications with oblique fonts and vice-versa
if an exact match is not found.}
\usage{pangoFontDescriptionSetStyle(object, style)}
\arguments{
\item{\verb{object}}{[\code{\link{PangoFontDescription}}] a \code{\link{PangoFontDescription}}}
\item{\verb{style}}{[\code{\link{PangoStyle}}] the style for the font description}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
c0c84b5b84e2e7e67f900cdccd4374bcb8e039bf
|
569b144ce50f8e25e89a732cab0cf77731ed22cf
|
/Exploratory_Data_Analysis_Course_Project1/Plot2.R
|
a1e9464a91c42901a313f2c1ff88438fd863bb82
|
[] |
no_license
|
RATHsid/ExData_Plotting1
|
614709b70926831b33dc90aef59a9c9e127e5f2c
|
54aa5f1b6220dc99a5071e2a5339204ef9bb954e
|
refs/heads/master
| 2020-12-07T09:05:06.730230
| 2014-09-05T16:10:51
| 2014-09-05T16:10:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 618
|
r
|
Plot2.R
|
Plot2<-function(){
y <- read.table("household_power_consumption.txt", header=TRUE, sep = ';')
a<-(y[,1]=="1/2/2007")|(y[,1]=="2/2/2007")
x<-y[a,]
Date<-as.character(x[[1]])
Time<-as.character(x[[2]])
dt<-paste(Date,Time)
dt1<-strptime(dt, "%d/%m/%Y %H:%M:%S")
f<-x[,-1]
f[[1]]<-dt1
datetime<-f[[1]]
Global_Active_Power<-as.numeric(as.character(f[[2]]))
png("Plot2.png", width = 480, height = 480)
plot(datetime,Global_Active_Power, type = "l", ylim = c(0,6), xlab=" ", ylab="Global Active Power (kilowatts)")
dev.off()
print("Please find the Plot2.png in your working directory")
}
|
30e332880d4427efaa16015e452950a862d3e6ba
|
5b2792dbb609b5737cec65a81a56d2a20c456aa1
|
/R/problem_003.r
|
3f90139dd18b513e135ad32700e1b611bde68e01
|
[] |
no_license
|
pbcoronel/project_euler
|
ab9642ff88ccc54d82fb7821318396293d8fff3e
|
c35272d246040964eaec9ae88236beb328c1e527
|
refs/heads/master
| 2021-01-24T07:55:03.520164
| 2014-08-23T18:46:12
| 2014-08-23T18:46:12
| 9,350,970
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 173
|
r
|
problem_003.r
|
library(gmp)
largest_prime_factor <- function(number){
factors <- factorize(number)
return(max(factors))
}
largest_prime_factor(13195)
largest_prime_factor(600851475143)
|
3ae05ddaef90df7e7a0314f7f3f36b53a65d577d
|
4a3ce9c13a4cbe18d748d4e7858712caab705f9a
|
/COVID-19 Case Comp/Data Wrangling Files/lags.R
|
ebf6534ae7da216faa8cfea9b94ab51637f282cd
|
[] |
no_license
|
scbrock/covid_case_comp
|
e6abcb07ea0b7baca1d6047f3326c3cef8339dfd
|
e9964eda04ebb4d4586c4c1ec4a0bdf5147d292f
|
refs/heads/main
| 2023-02-17T09:24:47.656751
| 2021-01-16T22:48:42
| 2021-01-16T22:48:42
| 308,984,463
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,875
|
r
|
lags.R
|
library(ggplot2)
library(dplyr)
library(data.table)
df = read.csv("data_can2.csv")
df <- data.table(df)
responses <- names(df)[5:8] # similar to Samuel's code
# create new variables for "number of new.."
df[, (paste0("new_", responses)) := lapply(.SD, function(v) c(0,diff(v))),
.SDcols = responses]
df = df[, -c(9:11)] # Remove NA columns
df$date = as.Date(df$date) # change type from factor to date
# filter to just Ontario data for daily
ont_data = filter(df, key_apple_mobility == "Ontario")
# read in data frame for confirmed positive cases and estimated date of onset
pos_data = read.csv("conposcovidloc.csv", stringsAsFactors = FALSE)
onset_date = pos_data$Accurate_Episode_Date
onset_date = data.frame(table(onset_date)) # aggregate counts by date
colnames(onset_date) = c("date", "onset") #rename columns
# need to format dates
onset_date$date = strptime(onset_date$date,format="%Y-%m-%d")
onset_date$date = as.Date(onset_date$date)
# join data along date column
ont_data = inner_join(ont_data, onset_date, by = "date")
#
#
# Visualizing lag in Ontario data
#
#
# Visualized lag between estimated onset and number of new cases
ggplot(data = ont_data, aes(x = date, y = new_confirmed)) +
geom_point(color = "red") +
geom_point(aes(x = date, y = onset), color = "blue")
ggplot(data = ont_data, aes(x = date, y = new_confirmed)) +
geom_smooth(color = "red") +
geom_smooth(aes(x = date, y = onset), color = "blue")
#
#
# Estimated Lag
#
#
# format dates again
pos_data$Accurate_Episode_Date = strptime(pos_data$Accurate_Episode_Date,format="%Y-%m-%d")
pos_data$Accurate_Episode_Date = as.Date(pos_data$Accurate_Episode_Date)
pos_data$Test_Reported_Date = strptime(pos_data$Test_Reported_Date,format="%Y-%m-%d")
pos_data$Test_Reported_Date = as.Date(pos_data$Test_Reported_Date)
# create lage column: difference between reported date and date of onset
lag = pos_data$Test_Reported_Date - pos_data$Accurate_Episode_Date
lag = as.numeric(unlist(lag)) # list to numeric vector
onset = pos_data$Accurate_Episode_Date
lag_data = data.frame(onset, lag) # create a lag dataframe for plotting
# Histogram for lag between estimated onset and reported test
ggplot(data = lag_data, aes(x = lag)) +
geom_histogram(binwidth = 5)
# Summary statistics.
summary(lag)
# for each onset date, take the mean number of days to report the case
mean_lag_data = aggregate(lag ~ onset, data=lag_data, FUN = mean)
colnames(mean_lag_data) = c("date", "mean_lag")
# format dates
mean_lag_data$date = strptime(mean_lag_data$date,format="%Y-%m-%d")
mean_lag_data$date = as.Date(mean_lag_data$date)
# plot mean lags by onset date
ggplot(data = mean_lag_data, aes(x = date, y = mean_lag)) +
geom_point()
# limit the y axis to get a better look of the rightside
ggplot(data = mean_lag_data, aes(x = date, y = mean_lag)) +
geom_point() +
ylim(0, 20)
|
4d54dee7d48ff8c8e81f58d3db08bf121b35769f
|
938b3c7b167544207c36a22ac5042729cc8f3471
|
/src/simulation/compile_all_results.R
|
4d2a3772cff5edb1bc981b0876b23ddd7bbd2e63
|
[] |
no_license
|
smmakela/cluster_sampling
|
5aa9fc00b7a17173512ec783c80a7e8d26336e1f
|
c0825884c9c9d387f16ab6fb8d0203f91d42a15c
|
refs/heads/master
| 2021-01-24T03:19:02.892635
| 2017-08-11T19:01:35
| 2017-08-11T19:01:35
| 68,622,666
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,971
|
r
|
compile_all_results.R
|
# Author: Susanna Makela
# Date: 13 Jan 2016
# Purpose: process results from simulation
################################################################################
### Setup of directories and libraries
################################################################################
libdir <- "/vega/stats/users/smm2253/rpackages"
.libPaths(libdir)
rootdir <- "/vega/stats/users/smm2253/cluster_sampling/"
Sys.setenv(HOME = rootdir)
# set working directory, figure directory
resdir <- paste0(rootdir, "output/simulation/")
setwd(resdir)
# libraries
library(dplyr)
library(tidyr)
# print time
print(Sys.time())
today <- Sys.Date()
today <- gsub("-", "_", today)
################################################################################
# Loop through sim params
################################################################################
sp1 <- expand.grid(use_sizes = c(0, 1),
outcome_type = c("binary", "continuous"),
size_model = c("multinomial", "poisson"),
num_clusters = c(5, 10, 20, 30),
num_units = c(0.05, 0.1, 0.25, 0.5, 1, 10, 30, 60))
sp2 <- expand.grid(use_sizes = c(0, 1),
outcome_type = c("binary", "continuous"),
size_model = "ff",
num_clusters = 16,
num_units = 99)
tot_rows <- nrow(sp1) + nrow(sp2)
# model names for stan
model_list = c("bb", "cluster_inds_only", "knowsizes", "lognormal", "negbin")
# allocate space for all results (columns calculated from outputs of the
# individual compile files)
lmer_all <- data.frame(matrix(NA, nrow = 12*tot_rows, ncol = 18))
svy_all <- data.frame(matrix(NA, nrow = 2*tot_rows, ncol = 17))
stan_pars_all <- data.frame(matrix(NA, nrow = 5*7*tot_rows, ncol = 18))
stan_ybar_all <- data.frame(matrix(NA, nrow = 5*tot_rows, ncol = 17))
stan_Nj_all <- data.frame(matrix(NA, nrow = 5*8*tot_rows, ncol = 12))
# counters for rows
start_lmer <- 1
start_svy <- 1
start_pars <- 1
start_ybar <- 1
start_Nj <- 1
for (i in 1:tot_rows) {
if (i <= nrow(sp1)) {
curr_params <- sp1[i, ]
} else {
curr_params <- sp2[i - nrow(sp1), ]
}
cat("curr_params for i =", i, "\n")
print(curr_params)
use_sizes <- curr_params$use_sizes
outcome_type <- curr_params$outcome_type
size_model <- curr_params$size_model
model_name <- curr_params$model_name
num_clusters <- curr_params$num_clusters
num_units <- curr_params$num_units
if (num_units <= 1) {
nunits <- paste(100*num_units, "pct", sep = "")
} else {
nunits <- num_units
}
# concatenate to get current stubs
curr_stub <- paste0("usesizes_", use_sizes, "_", outcome_type, "_",
size_model, "_nclusters_", num_clusters, "_nunits_",
nunits, "_", today, ".rds")
cat("curr_stub:", curr_stub, "\n")
svy_stub <- paste0("compiled_svy_results_", curr_stub)
lmer_stub <- paste0("compiled_lmer_results_", curr_stub)
# load files
if (!file.exists(paste0(resdir, svy_stub))) {
cat("This file does not exist!\n")
cat(svy_stub, "\n")
next
}
curr_svy <- readRDS(paste0(resdir, svy_stub))
if (!file.exists(paste0(resdir, lmer_stub))) {
cat("This file does not exist!\n")
cat(lmer_stub, "\n")
next
}
curr_lmer <- readRDS(paste0(resdir, lmer_stub))
# rename *_all variables
names(svy_all) <- names(curr_svy)
names(lmer_all) <- names(curr_lmer)
#print("str(curr_svy)")
#print(str(curr_svy))
#print("str(curr_lmer)")
#print(str(curr_lmer))
#print("nrow(curr_svy)")
#print(nrow(curr_svy))
#print("length svy")
#print(length(start_svy:(start_svy+nrow(curr_svy)-1)))
#print("length lmer")
#print(length(start_lmer:(start_lmer+nrow(curr_lmer)-1)))
#print("nrow(curr_lmer)")
#print(nrow(curr_lmer))
#print("start_svy")
#print(start_svy)
#print("start_lmer")
#print(start_lmer)
#print("str(svy_all)")
#print(str(svy_all))
#print("str(lmer_all)")
#print(str(lmer_all))
# add to output
svy_all[start_svy:(start_svy+nrow(curr_svy)-1), ] <- curr_svy
lmer_all[start_lmer:(start_lmer+nrow(curr_lmer)-1), ] <- curr_lmer
# update start values
start_svy <- start_svy + nrow(curr_svy) + 1
start_lmer <- start_lmer + nrow(curr_lmer) + 1
# loop through models to do stan files
for (m in model_list) {
stan_stub <- paste0("compiled_stan_results_usesizes_", use_sizes, "_",
outcome_type, "_", size_model, "_", m, "_nclusters_",
num_clusters, "_nunits_", nunits, "_", today, ".rds")
if (!file.exists(paste0(resdir, stan_stub))) {
cat("This file does not exist!\n")
cat(stan_stub, "\n")
next
}
stan_res <- readRDS(paste0(resdir, stan_stub))
# pull summaries out of the list
curr_pars <- stan_res[["param_ests_summ"]]
curr_ybar <- stan_res[["ybar_ests_summ"]]
curr_Nj <- stan_res[["Nj_ests_summ"]]
#print("str(curr_pars)")
#print(str(curr_pars))
#print("str(curr_ybar)")
#print(str(curr_ybar))
#print("str(curr_Nj)")
#print(str(curr_Nj))
#print("start_pars")
#print(start_pars)
#print("start_ybar")
#print(start_ybar)
#print("start_Nj")
#print(start_Nj)
# rename *_all variables
names(stan_pars_all) <- names(curr_pars)
names(stan_ybar_all) <- names(curr_ybar)
names(stan_Nj_all) <- names(curr_Nj)
# add to output
stan_pars_all[start_pars:(start_pars+nrow(curr_pars)-1), ] <- curr_pars
stan_ybar_all[start_ybar:(start_ybar+nrow(curr_ybar)-1), ] <- curr_ybar
stan_Nj_all[start_Nj:(start_Nj+nrow(curr_Nj)-1), ] <- curr_Nj
# update start values
start_pars <- start_pars + nrow(curr_pars) + 1
start_ybar <- start_ybar + nrow(curr_ybar) + 1
start_Nj <- start_Nj + nrow(curr_Nj) + 1
} # end stan model loop
} # end sim_params loop
print("str(svy_all)")
print(str(svy_all))
print("str(lmer_all)")
print(str(lmer_all))
print("str(stan_pars_all)")
print(str(stan_pars_all))
print("str(stan_ybar_all)")
print(str(stan_ybar_all))
print("str(stan_Nj_all)")
print(str(stan_Nj_all))
################################################################################
# Get rid of remaining all-NA rows
################################################################################
count_na <- function(x) sum(is.na(x))
ncol_svy <- ncol(svy_all)
svy_all <- svy_all %>%
dplyr::mutate(num_na = apply(., 1, count_na)) %>%
dplyr::filter(!(num_na == ncol_svy)) %>%
dplyr::select(-num_na)
ncol_lmer <- ncol(lmer_all)
lmer_all <- lmer_all %>%
dplyr::mutate(num_na = apply(., 1, count_na)) %>%
dplyr::filter(!(num_na == ncol_lmer)) %>%
dplyr::select(-num_na)
ncol_pars <- ncol(stan_pars_all)
stan_pars_all <- stan_pars_all %>%
dplyr::mutate(num_na = apply(., 1, count_na)) %>%
dplyr::filter(!(num_na == ncol_pars)) %>%
dplyr::select(-num_na)
ncol_ybar <- ncol(stan_ybar_all)
stan_ybar_all <- stan_ybar_all %>%
dplyr::mutate(num_na = apply(., 1, count_na)) %>%
dplyr::filter(!(num_na == ncol_ybar)) %>%
dplyr::select(-num_na)
ncol_Nj <- ncol(stan_Nj_all)
stan_Nj_all <- stan_Nj_all %>%
dplyr::mutate(num_na = apply(., 1, count_na)) %>%
dplyr::filter(!(num_na == ncol_Nj)) %>%
dplyr::select(-num_na)
################################################################################
# SAVE
################################################################################
saveRDS(svy_all, paste0(resdir, "all_svy_results_", today, ".rds"))
saveRDS(lmer_all, paste0(resdir, "all_lmer_results_", today, ".rds"))
saveRDS(stan_pars_all, paste0(resdir, "all_pars_results_", today, ".rds"))
saveRDS(stan_ybar_all, paste0(resdir, "all_ybar_results_", today, ".rds"))
saveRDS(stan_Nj_all, paste0(resdir, "all_Nj_results_", today, ".rds"))
|
29f6a5ef0dffca373752ec4a68d0664b0e16545c
|
9f5c5897cd41ad5459574b2fc7b46b1bd26c6efd
|
/plot4.R
|
8a76123474e59a4a758552de56e6e2c09034e0ee
|
[] |
no_license
|
p234a137/ExData_Plotting2
|
6f289f24dff6604a92cd780e01d3f78587a5ad93
|
6ea753389b3d030812a1f095753ace5b116fd457
|
refs/heads/master
| 2016-09-05T12:40:15.156994
| 2015-02-14T01:07:57
| 2015-02-14T01:07:57
| 30,399,776
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,037
|
r
|
plot4.R
|
# https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip
## read in data
# This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Question 4
# Across the United States, how have emissions from coal combustion-related sources changed from 1999โ2008
# find indices for Short.Names in SCC with the string 'coal' in them
indices_comb_coal <- grep(pattern = 'Comb.*Coal|Coal*Comb', SCC$Short.Name)
# find corresponding indices in NEI
indices_nei <- which(NEI$SCC %in% SCC$SCC[indices_comb_coal])
# summarize
library('dplyr')
usa <- NEI[indices_nei,] %>%
group_by(year) %>%
summarize(sum(Emissions))
names(usa)[2] = "coal_emissions"
# plot
library(ggplot2)
g <- ggplot(usa, aes(x = year, y = coal_emissions))
g + geom_point() + geom_smooth(method = "lm", se = FALSE) +
labs(x = "Year") + labs(y = "Coal Combustion Emissions") +
labs(title = "Coal combustion emissions accross USA vs. year")
ggsave(file = "plot4.png")
|
eec5402769b675d003bb3d78ee754380c630d4e6
|
ae2678731fa0698a59f0196cf3f2d14eb8dd7778
|
/tests/testthat/test-unglue_detect.R
|
a47c796a9aa2cf1dfd13e0523e04c5daf095be94
|
[] |
no_license
|
cran/unglue
|
9e6fd2da72d1edde32d1ea6800501d9d14612931
|
ecd4a780cd03df4198b51346ecfa96912da068c9
|
refs/heads/master
| 2020-12-22T22:58:52.803649
| 2020-06-11T04:50:03
| 2020-06-11T04:50:03
| 236,956,052
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 164
|
r
|
test-unglue_detect.R
|
test_that("unglue_detect works", {
expect_true(unglue_detect("this and that", "{x} and {y}"))
expect_false(unglue_detect("this and that", "{x} or {y}"))
})
|
2b857eb1c8a963d5c05cdff6c0a6cc8fe7517011
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/BGGM/R/ggm_compare_confirm.R
|
95ba1469dd8df74c3966486f92e02bfa5dfbaef7
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,038
|
r
|
ggm_compare_confirm.R
|
#' GGM Compare: Confirmatory Hypothesis Testing
#'
#' @description Confirmatory hypothesis testing for comparing GGMs. Hypotheses are expressed as equality
#' and/or ineqaulity contraints on the partial correlations of interest. Here the focus is \emph{not}
#' on determining the graph (see \code{\link{explore}}) but testing specific hypotheses related to
#' the conditional (in)dependence structure. These methods were introduced in
#' \insertCite{Williams2019_bf;textual}{BGGM} and in \insertCite{williams2020comparing;textual}{BGGM}
#'
#' @name ggm_compare_confirm
#'
#' @param ... At least two matrices (or data frame) of dimensions \emph{n} (observations) by \emph{p} (nodes).
#'
#' @param hypothesis Character string. The hypothesis (or hypotheses) to be tested. See notes for futher details.
#'
#' @param formula an object of class \code{\link[stats]{formula}}. This allows for including
#' control variables in the model (i.e., \code{~ gender}).
#'
#' @param prior_sd Numeric. The scale of the prior distribution (centered at zero),
#' in reference to a beta distribtuion (defaults to 0.25).
#'
#' @param type Character string. Which type of data for \code{Y} ? The options include \code{continuous},
#' \code{binary}, \code{ordinal}, or \code{mixed}. Note that mixed can be used for data with only
#' ordinal variables. See the note for further details.
#'
#' @param mixed_type numeric vector. An indicator of length p for which varibles should be treated as ranks.
#' (1 for rank and 0 to assume normality). The default is currently (dev version) to treat all integer variables
#' as ranks when \code{type = "mixed"} and \code{NULL} otherwise. See note for further details.
#'
#' @param iter Number of iterations (posterior samples; defaults to 25,000).
#'
#' @param impute Logicial. Should the missing values (\code{NA})
#' be imputed during model fitting (defaults to \code{TRUE}) ?
#'
#' @param progress Logical. Should a progress bar be included (defaults to \code{TRUE}) ?
#'
#' @param seed An integer for the random seed.
#'
#' @references
#' \insertAllCited{}
#'
#' @return The returned object of class \code{confirm} contains a lot of information that
#' is used for printing and plotting the results. For users of \strong{BGGM}, the following
#' are the useful objects:
#'
#' \itemize{
#'
#' \item \code{out_hyp_prob} Posterior hypothesis probabilities.
#'
#' \item \code{info} An object of class \code{BF} from the R package \strong{BFpack}
#' \insertCite{mulder2019bfpack}{BGGM}
#'
#' }
#'
#' @details
#' The hypotheses can be written either with the respective column names or numbers.
#' For example, \code{g1_1--2} denotes the relation between the variables in column 1 and 2 for group 1.
#' The \code{g1_} is required and the only difference from \code{\link{confirm}} (one group).
#' Note that these must correspond to the upper triangular elements of the correlation
#' matrix. This is accomplished by ensuring that the first number is smaller than the second number.
#' This also applies when using column names (i.e,, in reference to the column number).
#'
#'
#' \strong{One Hypothesis}:
#'
#' To test whether a relation in larger in one group, while both are expected
#' to be positive, this can be written as
#'
#' \itemize{
#'
#' \item \code{hyp <- c(g1_1--2 > g2_1--2 > 0)}
#' }
#'
#' This is then compared to the complement.
#'
#' \strong{More Than One Hypothesis}:
#'
#' The above hypothesis can also be compared to, say, a null model by using ";"
#' to seperate the hypotheses, for example,
#'
#' \itemize{
#'
#' \item \code{hyp <- c(g1_1--2 > g2_1--2 > 0; g1_1--2 = g2_1--2 = 0)}.
#'
#'}
#'
#' Any number of hypotheses can be compared this way.
#'
#' \strong{Using "&"}
#'
#' It is also possible to include \code{&}. This allows for testing one constraint \bold{and}
#' another contraint as one hypothesis.
#'
#' \itemize{
#'
#' \item \code{hyp <- c("g1_A1--A2 > g2_A1--A2 & g1_A1--A3 = g2_A1--A3")}
#'
#' }
#'
#' Of course, it is then possible to include additional hypotheses by separating them with ";".
#'
#' \strong{Testing Sums}
#'
#' It might also be interesting to test the sum of partial correlations. For example, that the
#' sum of specific relations in one group is larger than the sum in another group.
#'
#' \itemize{
#'
#' \item \code{hyp <- c("g1_A1--A2 + g1_A1--A3 > g2_A1--A2 + g2_A1--A3;
#' g1_A1--A2 + g1_A1--A3 = g2_A1--A2 + g2_A1--A3")}
#'
#' }
#'
#'
#' \strong{Potential Delays}:
#'
#' There is a chance for a potentially long delay from the time the progress bar finishes
#' to when the function is done running. This occurs when the hypotheses require further
#' sampling to be tested, for example, when grouping relations
#' \code{c("(g1_A1--A2, g2_A2--A3) > (g2_A1--A2, g2_A2--A3)"}.
#' This is not an error.
#'
#'
#' \strong{Controlling for Variables}:
#'
#' When controlling for variables, it is assumed that \code{Y} includes \emph{only}
#' the nodes in the GGM and the control variables. Internally, \code{only} the predictors
#' that are included in \code{formula} are removed from \code{Y}. This is not behavior of, say,
#' \code{\link{lm}}, but was adopted to ensure users do not have to write out each variable that
#' should be included in the GGM. An example is provided below.
#'
#' \strong{Mixed Type}:
#'
#' The term "mixed" is somewhat of a misnomer, because the method can be used for data including \emph{only}
#' continuous or \emph{only} discrete variables \insertCite{hoff2007extending}{BGGM}. This is based on the
#' ranked likelihood which requires sampling the ranks for each variable (i.e., the data is not merely
#' transformed to ranks). This is computationally expensive when there are many levels. For example,
#' with continuous data, there are as many ranks as data points!
#'
#' The option \code{mixed_type} allows the user to determine which variable should be treated as ranks
#' and the "emprical" distribution is used otherwise. This is accomplished by specifying an indicator
#' vector of length \emph{p}. A one indicates to use the ranks, whereas a zero indicates to "ignore"
#' that variable. By default all integer variables are handled as ranks.
#'
#' \strong{Dealing with Errors}:
#'
#' An error is most likely to arise when \code{type = "ordinal"}. The are two common errors (although still rare):
#'
#' \itemize{
#'
#' \item The first is due to sampling the thresholds, especially when the data is heavily skewed.
#' This can result in an ill-defined matrix. If this occurs, we recommend to first try
#' decreasing \code{prior_sd} (i.e., a more informative prior). If that does not work, then
#' change the data type to \code{type = mixed} which then estimates a copula GGM
#' (this method can be used for data containing \strong{only} ordinal variable). This should
#' work without a problem.
#'
#' \item The second is due to how the ordinal data are categorized. For example, if the error states
#' that the index is out of bounds, this indicates that the first category is a zero. This is not allowed, as
#' the first category must be one. This is addressed by adding one (e.g., \code{Y + 1}) to the data matrix.
#'
#' }
#'
#'
#' \strong{Imputing Missing Values}:
#'
#' Missing values are imputed with the approach described in \insertCite{hoff2009first;textual}{BGGM}.
#' The basic idea is to impute the missing values with the respective posterior pedictive distribution,
#' given the observed data, as the model is being estimated. Note that the default is \code{TRUE},
#' but this ignored when there are no missing values. If set to \code{FALSE}, and there are missing
#' values, list-wise deletion is performed with \code{na.omit}.
#'
#' @note
#'
#' \strong{"Default" Prior}:
#'
#' In Bayesian statistics, a default Bayes factor needs to have several properties. I refer
#' interested users to \insertCite{@section 2.2 in @dablander2020default;textual}{BGGM}. In
#' \insertCite{Williams2019_bf;textual}{BGGM}, some of these propteries were investigated (e.g.,
#' model selection consistency). That said, we would not consider this a "default" or "automatic"
#' Bayes factor and thus we encourage users to perform sensitivity analyses by varying the scale of
#' the prior distribution (\code{prior_sd}).
#'
#' Furthermore, it is important to note there is no "correct" prior and, also, there is no need
#' to entertain the possibility of a "true" model. Rather, the Bayes factor can be interpreted as
#' which hypothesis best (relative to each other) predicts the observed data
#' \insertCite{@Section 3.2 in @Kass1995}{BGGM}.
#'
#' \strong{Interpretation of Conditional (In)dependence Models for Latent Data}:
#'
#' See \code{\link{BGGM-package}} for details about interpreting GGMs based on latent data
#' (i.e, all data types besides \code{"continuous"})
#'
#'
#' @examples
#' \donttest{
#' # note: iter = 250 for demonstrative purposes
#'
#' # data
#' Y <- bfi
#'
#' ###############################
#' #### example 1: continuous ####
#' ###############################
#'
#' # males
#' Ymale <- subset(Y, gender == 1,
#' select = -c(education,
#' gender))[,1:5]
#'
#'
#' # females
#' Yfemale <- subset(Y, gender == 2,
#' select = -c(education,
#' gender))[,1:5]
#'
#' # exhaustive
#' hypothesis <- c("g1_A1--A2 > g2_A1--A2;
#' g1_A1--A2 < g2_A1--A2;
#' g1_A1--A2 = g2_A1--A2")
#'
#' # test hyp
#' test <- ggm_compare_confirm(Ymale, Yfemale,
#' hypothesis = hypothesis,
#' iter = 250,
#' progress = FALSE)
#'
#' # print (evidence not strong)
#' test
#'
#' #########################################
#' #### example 2: sensitivity to prior ####
#' #########################################
#' # continued from example 1
#'
#' # decrease prior SD
#' test <- ggm_compare_confirm(Ymale,
#' Yfemale,
#' prior_sd = 0.1,
#' hypothesis = hypothesis,
#' iter = 250,
#' progress = FALSE)
#'
#' # print
#' test
#'
#' # indecrease prior SD
#' test <- ggm_compare_confirm(Ymale,
#' Yfemale,
#' prior_sd = 0.5,
#' hypothesis = hypothesis,
#' iter = 250,
#' progress = FALSE)
#'
#' # print
#' test
#'
#' ################################
#' #### example 3: mixed data #####
#' ################################
#'
#' hypothesis <- c("g1_A1--A2 > g2_A1--A2;
#' g1_A1--A2 < g2_A1--A2;
#' g1_A1--A2 = g2_A1--A2")
#'
#' # test (1000 for example)
#' test <- ggm_compare_confirm(Ymale,
#' Yfemale,
#' type = "mixed",
#' hypothesis = hypothesis,
#' iter = 250,
#' progress = FALSE)
#'
#' # print
#' test
#'
#' ##############################
#' ##### example 4: control #####
#' ##############################
#' # control for education
#'
#' # data
#' Y <- bfi
#'
#' # males
#' Ymale <- subset(Y, gender == 1,
#' select = -c(gender))[,c(1:5, 26)]
#'
#' # females
#' Yfemale <- subset(Y, gender == 2,
#' select = -c(gender))[,c(1:5, 26)]
#'
#' # test
#' test <- ggm_compare_confirm(Ymale,
#' Yfemale,
#' formula = ~ education,
#' hypothesis = hypothesis,
#' iter = 250,
#' progress = FALSE)
#' # print
#' test
#'
#'
#' #####################################
#' ##### example 5: many relations #####
#' #####################################
#'
#' # data
#' Y <- bfi
#'
#' hypothesis <- c("g1_A1--A2 > g2_A1--A2 & g1_A1--A3 = g2_A1--A3;
#' g1_A1--A2 = g2_A1--A2 & g1_A1--A3 = g2_A1--A3;
#' g1_A1--A2 = g2_A1--A2 = g1_A1--A3 = g2_A1--A3")
#'
#' Ymale <- subset(Y, gender == 1,
#' select = -c(education,
#' gender))[,1:5]
#'
#'
#' # females
#' Yfemale <- subset(Y, gender == 2,
#' select = -c(education,
#' gender))[,1:5]
#'
#' test <- ggm_compare_confirm(Ymale,
#' Yfemale,
#' hypothesis = hypothesis,
#' iter = 250,
#' progress = FALSE)
#'
#' # print
#' test
#' }
#' @export
ggm_compare_confirm <- function(...,
hypothesis,
formula = NULL,
type = "continuous",
mixed_type = NULL,
prior_sd = 0.25,
iter = 25000,
impute = TRUE,
progress = TRUE,
seed = 1){
# temporary warning until missing data is fully implemented
if(type != "continuous"){
warning(paste0("imputation during model fitting is\n",
"currently only implemented for 'continuous' data."))
}
old <- .Random.seed
set.seed(seed)
# prior prob
priorprob <- 1
# delta parameter
delta <- delta_solve(prior_sd)
# combine data
dat_list <- list(...)
# combine data
info <- Y_combine(...)
# groups
groups <- length(info$dat)
if(type == "continuous"){
if(is.null(formula)){
post_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Posterior Sampling ", "(Group ",x ,")")
}
# data
Y <- as.matrix(scale(dat_list[[x]], scale = F))
# nodes
p <- ncol(Y)
if(!impute){
# na omit
Y <- as.matrix(na.omit(Y))
Y_miss <- Y
} else {
Y_miss <- ifelse(is.na(Y), 1, 0)
if(sum(Y_miss) == 0){
impute <- FALSE
}
# impute means
for(i in 1:p){
Y[which(is.na(Y[,i])),i] <- mean(na.omit(Y[,i]))
}
}
start <- solve(cov(Y))
.Call(
'_BGGM_Theta_continuous',
PACKAGE = 'BGGM',
Y = Y,
iter = iter + 50,
delta = delta,
epsilon = 0.01,
prior_only = 0,
explore = 1,
start = start,
progress = progress,
impute = impute,
Y_miss = Y_miss
)
})
# formula
} else {
post_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Posterior Sampling ", "(Group ", x ,")")
}
control_info <- remove_predictors_helper(list(as.data.frame(dat_list[[x]])),
formula = formula)
# data
Y <- as.matrix(scale(control_info$Y_groups[[1]], scale = F))
# nodes
p <- ncol(Y)
# observations
n <- nrow(Y)
# model matrix
X <- as.matrix(control_info$model_matrices[[1]])
start <- solve(cov(Y))
# posterior sample
.Call(
"_BGGM_mv_continuous",
Y = Y,
X = X,
delta = delta,
epsilon = 0.01,
iter = iter + 50,
start = start,
progress = progress
)
})
}
} else if(type == "binary"){
# intercept only
if (is.null(formula)) {
post_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Posterior Sampling ", "(Group ",x ,")")
}
# data
Y <- as.matrix(na.omit(dat_list[[x]]))
# obervations
n <- nrow(Y)
# nodes
p <- ncol(Y)
X <- matrix(1, n, 1)
start <- solve(cov(Y))
# posterior sample
.Call(
"_BGGM_mv_binary",
Y = Y,
X = X,
delta = delta,
epsilon = 0.01,
iter = iter + 50,
beta_prior = 0.0001,
cutpoints = c(-Inf, 0, Inf),
start = start,
progress = progress
)
})
} else {
post_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Posterior Sampling ", "(Group ",x ,")")
}
control_info <- remove_predictors_helper(list(as.data.frame(dat_list[[x]])),
formula = formula)
# data
Y <- as.matrix(control_info$Y_groups[[1]])
# observations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# model matrix
X <- as.matrix(control_info$model_matrices[[1]])
start <- solve(cov(Y))
# posterior sample
.Call(
"_BGGM_mv_binary",
Y = Y,
X = X,
delta = delta,
epsilon = 0.01,
iter = iter + 50,
beta_prior = 0.0001,
cutpoints = c(-Inf, 0, Inf),
start = start,
progress = progress
)
})
}
} else if(type == "ordinal"){
if(is.null(formula)){
post_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Posterior Sampling ", "(Group ",x ,")")
}
# data
Y <- as.matrix(na.omit(dat_list[[x]]))
# obervations
n <- nrow(Y)
# nodes
p <- ncol(Y)
X <- matrix(1, n, 1)
# categories
K <- max(apply(Y, 2, function(x) { length(unique(x)) } ))
start <- solve(cov(Y))
# posterior sample
# call c ++
.Call(
"_BGGM_mv_ordinal_albert",
Y = Y,
X = X,
iter = iter + 50,
delta = delta,
epsilon = 0.01,
K = K,
start = start,
progress = progress
)
})
} else {
post_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Posterior Sampling ", "(Group ",x ,")")
}
control_info <- remove_predictors_helper(list(as.data.frame(dat_list[[x]])),
formula = formula)
# data
Y <- as.matrix(control_info$Y_groups[[1]])
# observations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# model matrix
X <- as.matrix(control_info$model_matrices[[1]])
# categories
K <- max(apply(Y, 2, function(x) { length(unique(x)) } ))
start <- solve(cov(Y))
# posterior sample
# call c ++
.Call(
"_BGGM_mv_ordinal_albert",
Y = Y,
X = X,
iter = iter + 50,
delta = delta,
epsilon = 0.01,
K = K,
start = start,
progress = progress
)
})
}
} else if(type == "mixed") {
if(!is.null(formula)){
warning("formula ignored for mixed data at this time")
post_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Posterior Sampling ", "(Group ",x ,")")
}
control_info <- remove_predictors_helper(list(as.data.frame(dat_list[[x]])),
formula = formula)
# data
Y <- as.matrix(control_info$Y_groups[[1]])
Y <- na.omit(Y)
# observations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# default for ranks
if(is.null(mixed_type)) {
idx = colMeans(round(Y) == Y)
idx = ifelse(idx == 1, 1, 0)
# user defined
} else {
idx = mixed_type
}
# rank following hoff (2008)
rank_vars <- rank_helper(Y)
post_samp <- .Call(
"_BGGM_copula",
z0_start = rank_vars$z0_start,
levels = rank_vars$levels,
K = rank_vars$K,
Sigma_start = rank_vars$Sigma_start,
iter = iter + 50,
delta = delta,
epsilon = 0.01,
idx = idx,
progress = progress
)
})
} else {
post_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Posterior Sampling ", "(Group ",x ,")")
}
Y <- na.omit(dat_list[[x]])
# observations
n <- nrow(Y)
# nodes
p <- ncol(Y)
# default for ranks
if(is.null(mixed_type)) {
idx = colMeans(round(Y) == Y)
idx = ifelse(idx == 1, 1, 0)
# user defined
} else {
idx = mixed_type
}
# rank following hoff (2008)
rank_vars <- rank_helper(Y)
post_samp <- .Call(
"_BGGM_copula",
z0_start = rank_vars$z0_start,
levels = rank_vars$levels,
K = rank_vars$K,
Sigma_start = rank_vars$Sigma_start,
iter = iter + 50,
delta = delta,
epsilon = 0.01,
idx = idx,
progress = progress
)
})
}
} else {
stop("'type' not supported: must be continuous, binary, ordinal, or mixed.")
}
# sample prior
if(is.null(formula)){
Yprior <- as.matrix(dat_list[[1]])
prior_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Prior Sampling ", "(Group ",x ,")")
}
.Call(
'_BGGM_sample_prior',
PACKAGE = 'BGGM',
Y = Yprior,
iter = 25000,
delta = delta,
epsilon = 0.01,
prior_only = 1,
explore = 0,
progress = progress
)$fisher_z
})
} else {
control_info <- remove_predictors_helper(list(as.data.frame(dat_list[[1]])),
formula = formula)
Yprior <- as.matrix(scale(control_info$Y_groups[[1]], scale = F))
prior_samp <- lapply(1:groups, function(x) {
if(isTRUE(progress)){
message("BGGM: Prior Sampling ", "(Group ", x ,")")
}
set.seed(x)
.Call(
'_BGGM_sample_prior',
PACKAGE = 'BGGM',
Y = Yprior,
iter = 25000,
delta = delta,
epsilon = 0.01,
prior_only = 1,
explore = 0,
progress = progress
)$fisher_z
})
}
# nodes
p <- ncol(Yprior)
# number of pcors
pcors <- 0.5 * (p * (p - 1))
# identity matrix
I_p <- diag(p)
# colnames: post samples
col_names <- numbers2words(1:p)
mat_names <- lapply(1:groups, function(x) paste0("g", numbers2words(x),
sapply(col_names, function(x) paste(col_names, x, sep = ""))[upper.tri(I_p)]))
# posterior start group (one)
post_group <- matrix(post_samp[[1]]$fisher_z[, , 51:(iter + 50)][upper.tri(I_p)],
iter, pcors, byrow = TRUE)
# prior start group (one)
prior_group <- matrix(prior_samp[[1]][ , ,][upper.tri(I_p)],
nrow = iter,
ncol = pcors,
byrow = TRUE)
# post group
for(j in 2:(groups)){
post_group <- cbind(post_group,
matrix(post_samp[[j]]$fisher_z[, , 51:(iter+50)][upper.tri(I_p)],
nrow = iter, ncol = pcors,
byrow = TRUE))
prior_group <- cbind(prior_group,
matrix(prior_samp[[j]][ , ,][upper.tri(I_p)], iter, pcors, byrow = TRUE))
}
posterior_samples <- post_group
colnames(posterior_samples) <- unlist(mat_names)
prior_samples <- prior_group
colnames(prior_samples) <- unlist(mat_names)
prior_mu <- colMeans(prior_samples)
prior_cov <- cov(prior_samples)
post_mu <- colMeans(posterior_samples)
post_cov <- cov(posterior_samples)
BFprior <- BF(prior_mu,
Sigma = prior_cov,
hypothesis = group_hyp_helper(hypothesis, x = info$dat[[1]]),
n = 1)
BFpost <- BF(post_mu,
Sigma = post_cov,
hypothesis = group_hyp_helper(hypothesis, x = info$dat[[1]]),
n = 1)
# number of hypotheses
n_hyps <- nrow(BFpost$BFtable_confirmatory)
# BF against unconstrained
BF_tu <- NA
for (i in seq_len(n_hyps)) {
# BF tu
BF_tu[i] <-
prod(BFpost$BFtable_confirmatory[i, 3:4] / BFprior$BFtable_confirmatory[i, 3:4])
}
# posterior hyp probs
out_hyp_prob <- (BF_tu * priorprob) / sum(BF_tu * priorprob)
# BF matrix
BF_matrix <- matrix(rep(BF_tu, length(BF_tu)),
ncol = length(BF_tu),
byrow = TRUE)
BF_matrix[is.nan(BF_matrix)] <- 0
diag(BF_matrix) <- 1
BF_matrix <- t(BF_matrix) / (BF_matrix)
row.names(BF_matrix) <- row.names(BFpost$BFtable_confirmatory)
colnames(BF_matrix) <- row.names(BFpost$BFtable_confirmatory)
if(isTRUE(progress)){
message("BGGM: Finished")
}
returned_object <- list(
BF_matrix = BF_matrix,
out_hyp_prob = out_hyp_prob,
info = BFpost,
groups = groups,
info_dat = info,
type = type,
call = match.call(),
hypothesis = hypothesis,
iter = iter,
p = p,
posterior_samples = posterior_samples,
post_group = post_group,
delta = delta,
formula = formula,
dat_list = dat_list,
post_samp = post_samp
)
.Random.seed <<- old
class(returned_object) <- c("BGGM",
"confirm",
"ggm_compare_confirm")
returned_object
}
print_ggm_confirm <- function(x, ...){
groups <- x$groups
info <- x$info_dat
cat("BGGM: Bayesian Gaussian Graphical Models \n")
cat("Type:", x$type , "\n")
cat("--- \n")
cat("Posterior Samples:", x$iter, "\n")
for(i in 1:groups){
cat(" Group", paste( i, ":", sep = "") , info$dat_info$n[[i]], "\n")
}
# number of variables
cat("Variables (p):", x$p, "\n")
# number of edges
cat("Relations:", .5 * (x$p * (x$p-1)), "\n")
cat("Delta:", x$delta, "\n")
cat("--- \n")
cat("Call:\n")
print(x$call)
cat("--- \n")
cat("Hypotheses: \n\n")
hyps <- strsplit(x$hypothesis, ";")
n_hyps <- length(hyps[[1]])
x$info$hypotheses[1:n_hyps] <- hyps[[1]]
n_hyps <- length(x$info$hypotheses)
for (h in seq_len(n_hyps)) {
cat(paste0("H", h, ": ", gsub(" ", "", gsub('[\n]', '', x$info$hypotheses[h])), "\n"))
}
cat("--- \n")
cat("Posterior prob: \n\n")
for(h in seq_len(n_hyps)){
cat(paste0("p(H",h,"|data) = ", round(x$out_hyp_prob[h], 3 ) ))
cat("\n")
}
cat("--- \n")
cat('Bayes factor matrix: \n')
print(round(x$BF_matrix, 3))
cat("--- \n")
cat("note: equal hypothesis prior probabilities")
}
#' @title Plot \code{confirm} objects
#'
#' @description Plot the posterior hypothesis probabilities as a pie chart, with
#' each slice corresponding the probability of a given hypothesis.
#'
#' @param x An object of class \code{confirm}
#'
#' @param ... Currently ignored.
#'
#' @return A \code{ggplot} object.
#'
#'
#' @examples
#'
#' \donttest{
#'
#' #####################################
#' ##### example 1: many relations #####
#' #####################################
#'
#' # data
#' Y <- bfi
#'
#' hypothesis <- c("g1_A1--A2 > g2_A1--A2 & g1_A1--A3 = g2_A1--A3;
#' g1_A1--A2 = g2_A1--A2 & g1_A1--A3 = g2_A1--A3;
#' g1_A1--A2 = g2_A1--A2 = g1_A1--A3 = g2_A1--A3")
#'
#' Ymale <- subset(Y, gender == 1,
#' select = -c(education,
#' gender))[,1:5]
#'
#'
#' # females
#' Yfemale <- subset(Y, gender == 2,
#' select = -c(education,
#' gender))[,1:5]
#'
#' test <- ggm_compare_confirm(Ymale,
#' Yfemale,
#' hypothesis = hypothesis,
#' iter = 250,
#' progress = FALSE)
#'
#'
#' # plot
#' plot(test)
#' }
#' @export
plot.confirm <- function(x, ...){
probs <- x$out_hyp_prob
hyps_names <- paste0("p(H", 1:length(probs), "|data) = ", round(probs, 3))
df <- data.frame(hyps_names = hyps_names,
hyps = probs)
plt <- ggplot(df, aes(x="",
y = probs,
fill = hyps_names))+
geom_bar(width = 1, stat = "identity") +
coord_polar("y") +
theme_minimal() +
theme(axis.text = element_blank(),
axis.ticks = element_blank(),
panel.grid = element_blank()) +
scale_fill_discrete("Posterior Prob") +
ylab("") +
xlab("")
plt
}
|
2959de0120d6e7f093114ececf65e933286cab81
|
6eb21cdd51630e339cd645748cc528a7d868da5e
|
/Airline_Delay/SQLite.R
|
c2d01574779ee3a05e2d1791b4800ec44f3e4c8c
|
[] |
no_license
|
z357412526/Present_Projects
|
6654b05414a538d9b33ed0b035481e576ded922a
|
9ce73049f493dc27b714f58a89adae6667da9058
|
refs/heads/master
| 2021-01-09T21:55:14.142038
| 2016-03-18T02:00:29
| 2016-03-18T02:00:29
| 49,180,649
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,680
|
r
|
SQLite.R
|
library(RSQLite)
install.packages("data.table")
library(data.table)
fileName <- "Airlines.db"
db <- dbConnect(SQLite(), dbname = fileName)
initExtension(db)
#try for a full case
tableCL <- c(rep("integer",8), "factor","integer","logical","integer",
"integer","logical","integer","integer","factor","factor","integer","logical",
"logical","integer","logical","integer",rep("logical",5))
for (i in 1987:2008){
unzip_name <- paste("bunzip2 -c ", i,".csv.bz2",sep="")
tmp <- fread(unzip_name, header = TRUE, colClasses=tableCL)
dbWriteTable(conn = db, name = "Airlines", value = tmp, append=TRUE, row.names = FALSE)
print(i)
}
dbListTables(db)
#"Airlines"
dbListFields(db, "Airlines")
# [1] "Year" "Month" "DayofMonth"
# [4] "DayOfWeek" "DepTime" "CRSDepTime"
# [7] "ArrTime" "CRSArrTime" "UniqueCarrier"
# [10] "FlightNum" "TailNum" "ActualElapsedTime"
# [13] "CRSElapsedTime" "AirTime" "ArrDelay"
# [16] "DepDelay" "Origin" "Dest"
# [19] "Distance" "TaxiIn" "TaxiOut"
# [22] "Cancelled" "CancellationCode" "Diverted"
# [25] "CarrierDelay" "WeatherDelay" "NASDelay"
# [28] "SecurityDelay" "LateAircraftDelay"
file.size("Airlines.db")
#9399877632
#some about SQLite
db_date_new <- dbSendQuery(db, "UPDATE Airlines SET CRSDepTime = floor(CRSDepTime/100)")
db_filter <- dbSendQuery(db,
"CREATE VIEW airline_data1 AS SELECT * FROM Airlines WHERE DepDelay != 'NA'")
system.time(db_table <-dbSendQuery(db,
"CREATE TABLE table1 AS SELECT UniqueCarrier, Dest, Origin, Month, DayOfWeek, CRSDepTime,
sum(case when DepDelay>30 then 1 else 0 end)*1.0/count(*) AS per30,
sum(case when DepDelay>60 then 1 else 0 end)*1.0/count(*) AS per60,
sum(case when DepDelay>180 then 1 else 0 end)*1.0/count(*) AS per80,
COUNT (*) AS total
FROM airline_data1 GROUP BY UniqueCarrier, Dest, Origin, Month, DayOfWeek, CRSDepTime"))
# user system elapsed
# 674.776 32.260 742.649
##indexing here
system.time(db_index <- dbSendQuery(db,
"CREATE INDEX indices ON Airlines (UniqueCarrier, Dest, Origin, Month, DayOfWeek, CRSDepTime)"))
# user system elapsed
# 506.852 38.468 580.333
#The index is something which the optimizer picks up "automagically
#- ideally you don't need to force select an index.
system.time(db_filter2 <- dbSendQuery(db,
"CREATE VIEW airline_data2 AS SELECT * FROM Airlines WHERE DepDelay != 'NA'"))
# user system elapsed
# 0.004 0.000 0.002
#comparing with the time using in previous part, I found that the running time after indexing is
#highly improved. It's about half of previous one.
#Well, indexing process costs lots of time though. So, if there is no further selecting processes
#indexing does not make difference from non-indexing methods
system.time(db_table2 <-dbSendQuery(db,
"CREATE TABLE table2 AS SELECT UniqueCarrier, Dest, Origin, Month, DayOfWeek, CRSDepTime,
sum(case when DepDelay>30 then 1 else 0 end)*1.0/count(*) AS per30,
sum(case when DepDelay>60 then 1 else 0 end)*1.0/count(*) AS per60,
sum(case when DepDelay>180 then 1 else 0 end)*1.0/count(*) AS per180,
COUNT (*) AS total
FROM airline_data2 GROUP BY UniqueCarrier, Dest, Origin, Month, DayOfWeek, CRSDepTime"))
# user system elapsed
# 245.876 53.984 403.838
system.time(top_delay30 <- dbSendQuery(db,
"SELECT * FROM table2 WHERE total >= 150 ORDER BY per30 DESC LIMIT 5"))
fetch(top_delay30, 5)
# UniqueCarrier Dest Origin Month DayOfWeek CRSDepTime per30 per60
# 1 WN HOU DAL 6 5 20 0.4125000 0.1750000
# 2 WN DAL HOU 2 5 19 0.4039735 0.1192053
# 3 WN HOU DAL 4 5 20 0.3800000 0.2066667
# 4 WN HOU DAL 6 5 21 0.3750000 0.1447368
# 5 WN DAL HOU 6 5 19 0.3680982 0.1533742
# per180 total
# 1 0.000000000 160
# 2 0.006622517 151
# 3 0.020000000 150
# 4 0.000000000 152
# 5 0.000000000 163
dbClearResult(top_delay30)
top_delay60 <- dbSendQuery(db,
"SELECT * FROM table2 WHERE total >= 150 ORDER BY per60 DESC LIMIT 5")
fetch(top_delay60, 5)
# UniqueCarrier Dest Origin Month DayOfWeek CRSDepTime per30 per60
# 1 UA SFO LAX 12 5 11 0.3641975 0.2222222
# 2 WN HOU DAL 4 5 20 0.3800000 0.2066667
# 3 UA SFO LAX 10 5 16 0.3178808 0.1986755
# 4 UA SFO LAX 12 5 18 0.3375000 0.1937500
# 5 AA LAX ORD 1 4 0 0.2817680 0.1878453
# per180 total
# 1 0.00617284 162
# 2 0.02000000 150
# 3 0.00000000 151
# 4 0.01250000 160
# 5 0.03314917 181
top_delay180 <- dbSendQuery(db,
"SELECT * FROM table2 WHERE total >= 150 ORDER BY per180 DESC LIMIT 5")
fetch(top_delay180, 5)
# UniqueCarrier Dest Origin Month DayOfWeek CRSDepTime per30 per60
# 1 AA ORD BOS 12 2 0 0.1116751 0.06598985
# 2 AA LGA ORD 12 3 0 0.2033898 0.11299435
# 3 AA DFW ORD 1 4 0 0.1907895 0.11184211
# 4 AA ORD LGA 12 3 0 0.1027027 0.06486486
# 5 AA LGA ORD 1 4 0 0.2192513 0.11229947
# per180 total
# 1 0.04568528 197
# 2 0.03954802 177
# 3 0.03947368 304
# 4 0.03783784 185
# 5 0.03743316 187
dbClearResult(top_delay180)
#[1] TRUE
|
14875a0a0a653da479b7d5a3488ba695de576d63
|
e8bd1221d5edf301183e222ae215afa7f3a4c166
|
/man/gg.polygon.Rd
|
4f86047f4b8d79c612383aad66d7bad8be38d097
|
[] |
no_license
|
dill/inlabru
|
1b9a581ae5b56246fcd748db8df051ae4ff8bfa8
|
e2c38a34d591f712b57cbe430c24bb0a82f03ae4
|
refs/heads/master
| 2021-01-22T22:53:21.963501
| 2017-03-18T09:30:08
| 2017-03-18T09:30:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 367
|
rd
|
gg.polygon.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot.R
\name{gg.polygon}
\alias{gg.polygon}
\title{Polygon geom for Spatial* objects}
\usage{
gg.polygon(data, crs = NULL, colour = "black", alpha = 0.1, ...)
}
\arguments{
\item{data}{A SpatialPolygon* object}
}
\value{
geom_polygon
}
\description{
Polygon geom for Spatial* objects
}
|
becc510e470f491f9285c7c8825152271545bc80
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/psychometric/examples/ICC.lme.Rd.R
|
0b40afbff52c3081f364c985deed8699e2813a23
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 315
|
r
|
ICC.lme.Rd.R
|
library(psychometric)
### Name: ICC.lme
### Title: Intraclass Correlation Coefficient from a Mixed-Effects Model
### Aliases: ICC.lme ICC1.lme ICC2.lme
### Keywords: models univar
### ** Examples
library(nlme)
library(multilevel)
data(bh1996)
ICC1.lme(HRS, GRP, data=bh1996)
ICC2.lme(HRS, GRP, data=bh1996)
|
5cf367e63f0b94f43626cffa7fa6bd41a71c47d9
|
226b1036611f247760f8e68de857a37108db64e6
|
/man/getTrailingDays.Rd
|
5617000c32d8eee1ab4fb12ccf8b30ffee452880
|
[] |
no_license
|
sameermanek/mmisc
|
da23008391d571e11d938ae9062db86556d79cb4
|
6623251b9879db643f244d3c81f41c0465bc5725
|
refs/heads/master
| 2021-05-16T01:48:29.468519
| 2017-06-20T21:05:28
| 2017-06-20T21:05:28
| 42,786,330
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 517
|
rd
|
getTrailingDays.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dates.R
\name{getTrailingDays}
\alias{getTrailingDays}
\title{Get a sequence of trailing days, inclusive}
\usage{
getTrailingDays(chr.end.date, int.length)
}
\arguments{
\item{chr.end.date}{The end date (only one)}
\item{int.length}{The number of days}
}
\value{
A character vector of int.length, with the trailing series of days
}
\description{
Get a sequence of trailing days, inclusive
}
\examples{
getTrailingDays('2017-04-01', 12)
}
|
b5811445ff27a6827180485d75cec388861ba08b
|
23dfa51c5aac37ff6f7ef5d50ce9f951622ff9b7
|
/R/printCI.R
|
61bde01b68eacf67c09b116715cd3d092e7ef225
|
[] |
no_license
|
cran/dafs
|
e8bd227ee1392de9b64964fb6e80c1373696da3d
|
3288237c8c1b83744ec00a6665f557dd99782627
|
refs/heads/master
| 2022-05-14T06:43:59.958293
| 2022-04-11T08:12:33
| 2022-04-11T08:12:33
| 17,695,374
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 224
|
r
|
printCI.R
|
printCI = function(x, fmt){
if(length(x)!=2)
stop("x must have be a vector of length 2")
strFmt = paste('(',fmt,', ',fmt,')',sep='')
strResult = sprintf(strFmt, x[1], x[2])
return(strResult)
}
|
0cd5401fdbe70caa1c8e77336767363a1aae604a
|
0e3ccfecc18c042f5eeaed6661e26fd46ef9fcb3
|
/ui.R
|
c4d5d3ea4b25b82436357f2844d6d9ef27e2c84c
|
[] |
no_license
|
SteffenMoritz/dashboard-zukunft
|
e48231f3a650984c23ddaea915b37e66c99d1eec
|
4abc802a9a566c72f30e3227d82b24d69554be61
|
refs/heads/main
| 2023-08-14T13:07:03.717551
| 2021-09-08T04:40:47
| 2021-09-08T04:40:47
| 401,039,624
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,614
|
r
|
ui.R
|
#
# ui.R for the 'Dashboard Zukunft' Shiny application
# Created for the destatis KI-Hackathon
# Team C for Climate (Denis, Laura, Maria, Steffen)
# See www.destatis.de ( statistisches Bundesamt / German Federal Statistical Office)
#
# Required Libraries
library("shinyWidgets")
library("shiny")
library("plotly")
library("shinythemes")
library("DT")
library("rsconnect")
library("shinydashboard")
# Create the Dashboard
ui <-
fluidPage(
#----------------------------------------------------------------------------------------------------------
# Initial CSS and Theme settings
includeCSS(path = "AdminLTE.css"),
includeCSS(path = "shinydashboard.css"),
theme = shinytheme("readable"), # cerulean #simplex
#----------------------------------------------------------------------------------------------------------
# Browser window text and header picture
titlePanel(
windowTitle = "Dashboard Zukunft",
title = div(img(src = "header6.jpeg", height = "100px", width = "100%"))
),
#----------------------------------------------------------------------------------------------------------
# The Actual Page content
navbarPage(
title = "",
# Menu Point 1 - Dashboard Zukunft
# (includes info boxes and forecasts)
#----------------------------------------------------------------------------------------------------
tabPanel(
"Dashboard Zukunft",
fluidRow(
tags$h3("Zielerreichung Artenvielfalt Vogelbestand"),
tags$br(),
valueBox(
"100",
subtitle = "ZIEL (2030)",
color = "light-blue",
width = 3
),
valueBox(
value = textOutput("bestand_all1"),
subtitle = "AKTUELLER STAND",
color = "red",
icon = icon("frown"),
width = 3
),
tags$i("Der Indikator Artenvielfalt und Landschaftsqualitรคt
zeigt die Bestandsentwicklung fรผr 51 ausgewรคhlte Vogelarten
in Form eines Index. Er wird gemessen in erreichten Prozent
des avisierten Endziels von 100% in 2030."),
tags$br(),
tags$br(),
tags$br(),
tags$br(),
tags$br(),
tags$br(),
tags$h3("Artenvielfalt je Landschaftstyp"),
tags$br(),
valueBox(
value = textOutput("bestand_all2"),
subtitle = "Gesamtvogelbestand (2016)",
color = "maroon",
icon = icon("crow"),
width = 2
),
valueBox(
value = textOutput("bestand_agrar"),
subtitle = "Agrarland (2016)",
color = "green",
icon = icon("tractor"),
width = 2
),
valueBox(
value = textOutput("bestand_binnen"),
subtitle = "Binnengewรคsser (2016)",
color = "aqua",
icon = icon("water"),
width = 2
),
valueBox(
value = textOutput("bestand_meere"),
subtitle = "Kรผsten/Meere (2016)",
color = "light-blue",
icon = icon("ship"),
width = 2
),
valueBox(
value = textOutput("bestand_siedlungen"),
subtitle = "Siedlungen (2016)",
color = "purple",
icon = icon("home"),
width = 2
),
valueBox(
value = textOutput("bestand_wald"),
subtitle = "Wรคlder (2016)",
color = "olive",
icon = icon("tree"),
width = 2
),
tags$br(),
tags$br(),
tags$br(),
tags$br(),
tags$br(),
tags$br(),
tags$br(),
tags$i("Gesamt gibt der Indikator die Entwicklung der Bestรคnde ausgewรคhlter Vogelarten fรผr fรผnf Landschafts- und Lebensraumtypen wieder.
Diese gehen dabei unterschiedlich gewichtet in den Indikator ein, den grรถรten Einfluss haben
Agrarland und Wรคlder, weil dies auch Flรคchenmรครig die grรถรten Flรคchen sind. Mehr zur Gewichtung auf der Hintergund Seite."),
tags$br(),
tags$br(),
tags$br(),
tags$br(),
tags$h3("Voraussichtliche Entwicklung Gesamtbestand"),
tags$i("Untenstehenden Slider nutzen, um den Prognosehorizont zu erweitern. Mit dem Feld Algorithmus
kรถnnen kann zwischen Prognosealgorithmen gewechselt werden"),
tags$br(),
tags$br(),
# Show a plot of the generated distribution
plotOutput("all_voegel_fore"),
fluidRow(
column(sliderInput("all_voegel_h",
"",
min = 1, max = 50, value = 8, width = "100%"
), width = 9),
column(selectInput("algo", label = "", choices = c("ARIMA", "ETS", "NNETAR")), width = 2)
),
tags$i("Prognosen fรผr die einzlenen Lebensrรคume sind im Menรผ auf den Detailseiten zu den jeweiligen Lebensrรคumen zu finden")
)
),
#----------------------------------------------------------------------------------------------------
# Menu Point 2 - Wirksamkeitsanalyse
# (includes side tabs with different analysis)
#----------------------------------------------------------------------------------------------------
tabPanel(
title = "Wirksamkeitsanalyse",
navlistPanel(
widths = c(3, 9),
tabPanel(
"Zeitliche Entwicklung in den Lebensrรคumen",
plotlyOutput("plotly_all")
),
tabPanel(
"Im Vergleich mit hรคufigen Klimaindikatoren",
plotlyOutput("plotly_andere"),
tags$br(),
selectInput("norm", label = "Skalierung", choices = c("absolut", "normalisiert"))
),
tabPanel(
"Helfen monetรคre Umweltschutzausgaben der Artenvielfalt",
plotlyOutput("plotly_umwelt")
),
tabPanel(
"Mรถgliche externe Einflussfaktoren",
plotlyOutput("plotly_ext")
),
tabPanel(
"Machine Learning Analyse",
tags$h3("Modellbasierte Analyse"),
tags$i("Idee ist es ein Modell zu ertellen um Einflussfaktoren auf die Vogelanzahl zu bestimmen. Insbesondere der Zusammenhang zwischen
Ausgaben fรผr den Umweltschutz war von besonderem Interesse, um die Effektivitรคt der Maรnahmen/ der Ausgaben in Bezug auf unseren
Indikator zu รผberprรผfen."),
tags$br(),
tags$br(),
tags$h4("GLM Model (Generalized Linear Model)"),
tags$br(),
tags$i("formula = `Bestand repraesentativer Vogelarten, insgesamt` ~
Umweltschutzausgaben_gesamt + `Feinstaub (PM2,5)` + `Anteil des Stroms aus erneuerbaren Energiequellen am Bruttostromverbrauch` +
Bevoelkerungsstand + `Bruttowertschoepfung in jeweiligen Preisen, insgesamt`"),
tags$br(),
tags$br(),
tags$img(src = "ML.png", width = "600px", height = "200px")
)
)
),
#----------------------------------------------------------------------------------------------------
# Menu Point 3 - Agrarland
# (includes more information about )
#----------------------------------------------------------------------------------------------------
tabPanel(
title = "Agrarland",
fluidRow(
tags$h3("Voraussichtliche Entwicklung Agrarland"),
tags$i("Untenstehenden Slider nutzen, um den Prognosehorizont zu erweitern."),
plotOutput("agrar_voegel_fore", width = "90%"),
sliderInput("agrar_voegel_h",
width = "90%",
"", min = 1, max = 50, value = 8
)
)
),
#----------------------------------------------------------------------------------------------------
# Menu Point 4 - Binnengewรคsser
# (includes more information about)
#----------------------------------------------------------------------------------------------------
tabPanel(
"Binnengewรคsser",
fluidRow(
tags$h3("Voraussichtliche Entwicklung Agrarland und Binnengewรคsser"),
tags$i("Untenstehenden Slider nutzen, um den Prognosehorizont zu erweitern."),
plotOutput("binnen_voegel_fore", width = "90%"),
sliderInput("binnen_voegel_h",
width = "90%",
"", min = 1, max = 50, value = 8
)
)
),
#----------------------------------------------------------------------------------------------------
# Menu Point 5 - Kรผsten/Meeren
# (includes more information about)
#----------------------------------------------------------------------------------------------------
tabPanel(
title = "Kรผsten/Meere",
fluidRow(
tags$h3("Voraussichtliche Entwicklung Meere/Kรผsten"),
tags$i("Untenstehenden Slider nutzen, um den Prognosehorizont zu erweitern."),
plotOutput("meer_voegel_fore", width = "90%"),
sliderInput("meer_voegel_h",
width = "90%",
"", min = 1, max = 50, value = 8
)
)
),
#----------------------------------------------------------------------------------------------------
# Menu Point 6 - Siedlungen
# (includes more information about)
#----------------------------------------------------------------------------------------------------
tabPanel(
title = "Siedlungen",
fluidRow(
tags$h3("Voraussichtliche Entwicklung Siedlungsbebiete"),
tags$i("Untenstehenden Slider nutzen, um den Prognosehorizont zu erweitern."),
plotOutput("stadt_voegel_fore", width = "90%"),
sliderInput("stadt_voegel_h",
width = "90%",
"", min = 1, max = 50, value = 8
)
)
),
#----------------------------------------------------------------------------------------------------
# Menu Point 7 - Wรคlder
# (includes more information about)
#----------------------------------------------------------------------------------------------------
tabPanel(
title = "Wรคlder",
fluidRow(
tags$h3("Voraussichtliche Entwicklung Wรคlder"),
tags$i("Untenstehenden Slider nutzen, um den Prognosehorizont zu erweitern."),
tags$br(),
tags$br(),
plotOutput("wald_voegel_fore"),
sliderInput("wald_voegel_h",
width = "90%",
"", min = 1, max = 50, value = 8
)
)
),
#----------------------------------------------------------------------------------------------------
# Menu Point 8
# (includes more information about)
#----------------------------------------------------------------------------------------------------
tabPanel(
title = "Hintergrundwissen",
navlistPanel(
widths = c(3, 9),
tabPanel(
"Zielsetzung Indikator",
tags$h3("Zielsetzung Indikator"),
tags$i("Der Indikator ist einer der zentralen Umwelt-Indikatoren des Umweltbundesamtes. Die Umweltindikatoren
sind fรผr die deutsche und internationale Umweltpolitik besonders relevant. Ziel des Indikators ist es 100%
in 2030 zu erreichen und damit entsprechend auf Bestandswerte die 1975 erreicht wurden
zurรผckzukommen."),
tags$br(),
tags$br(),
tags$img(src = "ziele.png", width = "800px", height = "600px")
),
tabPanel(
"Zusammensetzung Indikator",
tags$h3("Zusammensetzung Indikator"),
tags$i("Untenstehenden Slider nutzen, um den Prognosehorizont zu erweitern."),
tags$br(),
tags$br(),
tags$img(src = "indikator.png", width = "800px", height = "800px")
),
tabPanel(
"Linksammlung",
fluidRow(
tags$h3("Linksammlung"),
tags$h5("Datenquellen und Indikator"),
tags$i("https://www.umweltbundesamt.de/daten/umweltindikatoren"),
tags$i("https://www.umweltbundesamt.de/daten/umweltindikatoren/indikator-artenvielfalt-landschaftqualitaet"),
tags$br(),
tags$br(),
tags$h5("Bildqellen"),
tags$i("https://www.umweltbundesamt.de/daten/umweltindikatoren"),
tags$i("https://www.umweltbundesamt.de/daten/umweltindikatoren/indikator-artenvielfalt-landschaftqualitaet"),
tags$br(),
tags$br(),
tags$h5("Infos zum Dashboard"),
tags$i("https://www.destatis.de"),
tags$i("github")
)
)
)
),
#----------------------------------------------------------------------------------------------------
# Menu Point 9 - Impressum
# (includes more information about)
#----------------------------------------------------------------------------------------------------
tabPanel(
title = "Impressum", fluidPage(htmlOutput("impressum"))
)
#----------------------------------------------------------------------------------------------------
), # end navbarPage
#----------------------------------------------------------------------------------------------------------
# Adding favicon to webpage. Favicon is base64 coded to avoid adding image.
title = tags$head(tags$link(
rel = "icon",
href = "data:image/x-icon;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQC
AYAAAAf8/9hAAABtUlEQVQ4T62TTUiTcRjAf89sjolsIkmXTYXYIAVBOmRIUAYKK
whqM4o86dSbIR47BIGH9CKIH9PTDokf0aWLhGHILqIEI4wCLQUvdlhBKu7VPfFuu
l4/Bpb+b8//eZ7f8y0cfWMPqrFJKyJ1QPm++juq70lphEevP1pdJCtMhJyo9iG0g
Pz9PxRAFWUUkQ4aJ7dNVcbQdEanQW4cy+jED50DaTAhGcB4MIJI+HTO+1aqIzyca
hXMmvNkMXfaubCq7OlVYTw4iEi7w2bncWlt2jqR3GT2xxI/jU2uFfuocHmylGTK4
NVaDLMZqA4JE6EvgL84v5BvgX66P7/hosNFyFvD/VgvLnsB1UXlXLDZeHYlSFc8y
sjKzAHwqwnYAfJNQLy+F8/b9rSywxegyl1G88JgWu6pasJld9K2GLHWlMwJeOq7Q
6XbQ3hhmKCnhk7/XW7NPmcntXsMkC1hJdDPi6UpShxuGr3XuRd7iZHaY/52NwPL0
6xvJzB0l6Hld5YSLE18UpZZg0TyNzMbn/hlbHGzpJLLhZeyUU1gdPVDRk438cxjP
PMincsqH0D++5isg/nHc/4Dohe5L/OvdC4AAAAASUVORK5CYII=",
type = "image/x-icon"
)) # end title/favicon
#----------------------------------------------------------------------------------------------------------
) # end fluidPage
|
aca595911816bcf429ac76ccfdbf602457dc3617
|
411c1f70e9e7bc543dcd28377fe4426b7e081d07
|
/HW4/HW4-1.R
|
b0f47e5d4b63653fefed23e946bde35b18cf1614
|
[] |
no_license
|
owogyx1219/CS498-df
|
b243404e95a345e5f06d6c3bd0995e679bf1280e
|
c8bae5a7910a2d221e02f1b61597369bb18958ee
|
refs/heads/master
| 2021-01-24T00:43:17.211705
| 2018-05-10T07:03:05
| 2018-05-10T07:03:05
| 122,775,003
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 577
|
r
|
HW4-1.R
|
library(ape)
rawData <- read.table("employment_data2.txt", TRUE, "\t")
countries <- rawData$Country
row.names(rawData) <- countries
hclustresult1 <- hclust(dist(rawData), method = "single")
plot(as.phylo(hclustresult1), type='fan', show.node.label = TRUE, font = 2, cex= 0.45)
hclustresult2 <- hclust(dist(rawData), method = "average")
plot(as.phylo(hclustresult2), type='fan', show.node.label = TRUE, font = 2, cex= 0.45)
hclustresult3 <- hclust(dist(rawData), method = "average")
plot(as.phylo(hclustresult3), type='fan', show.node.label = TRUE, font = 2, cex= 0.45)
|
f3ce44dbc8bf14a267c9214bb4a3e17b1ce4be17
|
6c73899865d066604762aa711717c22a6d437009
|
/codes/point process beilschmiedia forest.R
|
0255831dbc63e26aa8bdd41b05827fad98e77116
|
[] |
no_license
|
benjaminsw/Spatial_Statistics
|
9ee1a07288a2ba6aaca07e5e5328c8eb7ab52674
|
628a27cfc20fa0b04ef05b3508a2d7f2e11bba16
|
refs/heads/main
| 2023-02-22T23:58:52.349832
| 2021-01-26T16:37:18
| 2021-01-26T16:37:18
| 330,215,221
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 662
|
r
|
point process beilschmiedia forest.R
|
##############################
#### Beilschmiedia forest data
##############################
#### Load the data and library
library(spatstat)
data(bei)
plot(bei, pch="+",)
plot(bei.extra, main="")
#### Fit the model - covariates
fit <- ppm(bei~elev + grad, data=bei.extra)
summary(fit)
plot(fit)
fit2 <- ppm(bei~elev + grad + I(elev^2) + I(grad^2), data=bei.extra)
print(fit2)
plot(fit2)
#### Fit the model - spatial process
fit2 <- kppm(bei, trend=~elev + grad, data=bei.extra, clusters="LGCP", method="palm")
print(fit2)
plot(fit2)
summary(fit2)
par(mfrow=c(2,2))
plot(fit2)
plot(simulate(fit2))
plot(simulate(fit2))
plot(simulate(fit2), main="")
|
f149e4d288931b7e5b94063824962ad5ae0f36cf
|
5bd38d8a11271a34a4294339d5d1e578c7c84116
|
/kicking_dst_raw_stats_NFLFastR.R
|
4de0301940eecb0e97b24f4153be896ecfba419c
|
[] |
no_license
|
Jeffery-777/NFLFastR-Fantasy-DST-K-Weekly
|
cccdf20286cbe7300b2deb97c278e902a1f18c38
|
95d2f0c4a1855b7e66d2e9814d565de3a1d05eb9
|
refs/heads/main
| 2023-07-13T06:36:02.764895
| 2021-08-24T16:09:06
| 2021-08-24T16:09:06
| 399,451,379
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,089
|
r
|
kicking_dst_raw_stats_NFLFastR.R
|
# Kicking Points Table
library(tidyverse)
library(nflfastR)
kicking <- pbp_db %>%
filter(!is.na(kicker_player_id),
play_type != "kickoff",
week <= 16) %>%
mutate(pat.pts = ifelse(extra_point_result == "good", 1, 0),
fg.30 = ifelse(field_goal_result == "made" & kick_distance <= 39, 1, 0),
fg.40 = ifelse(field_goal_result == "made" & kick_distance >= 40 & kick_distance <= 49, 1, 0),
fg.50p = ifelse(field_goal_result == "made" & kick_distance >= 50, 1, 0)) %>%
# turn on for missed field goal negative points
mutate(fg.miss = case_when(
!is.na(field_goal_result) & field_goal_result != "made" ~ 1,
TRUE ~ 0
)) %>%
# turn on for missed PAT negative points
mutate(pat.miss = case_when(
!is.na(extra_point_result) & extra_point_result %in% c("blocked", "failed") ~ 1,
TRUE ~ 0
)) %>%
mutate(made.fg.yards = ifelse(field_goal_result == "made", kick_distance, 0)) %>%
replace_na(list(pat.pts = 0, fg.pts = 0)) %>%
group_by(season, week, posteam, game_id, kicker_player_id, kicker_player_name) %>%
summarise(pats = sum(pat.pts, na.rm = T),
fg.30 = sum(fg.30, na.rm = T),
fg.40 = sum(fg.40, na.rm = T),
fg.50p = sum(fg.50p, na.rm = T),
fg.misses = sum(fg.miss, na.rm = T),
pat.misses = sum(pat.miss, na.rm = T),
made.fg.distance = sum(made.fg.yards, na.rm = T)) %>%
arrange(desc(season), week) %>%
as.data.frame()
dst <- pbp_db %>%
filter(play == 1) %>%
select(game_id,season, week, play_type, posteam, defteam, home_team, away_team,
home_score, away_score, touchdown, interception, safety,
fumble_forced, fumble_lost) %>%
mutate(pick.fum.6 = case_when(
interception == 1 & touchdown == 1 ~ 1,
fumble_lost == 1 & touchdown == 1 ~ 1,
TRUE ~ 0
)) %>%
# make safety for plays not on special teams
mutate(safety.no.spec.teams = case_when(
play_type == "punt" & safety == 1 ~ 1,
play_type != "punt" & safety == 1 ~ 0,
TRUE ~ 0
)) %>%
group_by(game_id, season, week, posteam, defteam, home_team, away_team, home_score, away_score) %>%
summarise(away.pts.allowed = mean(home_score),
home.pts.allowed = mean(away_score),
to.pts.allowed = sum(pick.fum.6) *6,
sfty.pts.allowed = sum(safety.no.spec.teams) *2) %>%
mutate(corrected.pts = case_when(
defteam == away_team ~ away_score - to.pts.allowed - sfty.pts.allowed,
defteam == home_team ~ home_score - to.pts.allowed - sfty.pts.allowed),
raw.pts = case_when(
posteam == home_team ~ away_score,
posteam == away_team ~ home_Score)) %>%
select(game_id,season, week, defteam, home_team, home_score, away_score, corrected.pts, raw.pts) %>%
# -------- First Join kickoff and field goal return tds -------------
left_join(
pbp_db %>%
mutate(dst.posteam.td = case_when(
play_type == "kickoff" & fumble_lost == 0 & touchdown == 1 ~ 1,
play_type == "field_goal" & fumble_lost == 0 & interception == 0 & touchdown == 1 ~ 1,
TRUE ~ 0)) %>%
filter(dst.posteam.td == 1) %>%
group_by(game_id, posteam, defteam, season, week) %>%
count() %>%
rename(kickoff.tds = n), by = c("defteam", "game_id", "season", "week")) %>%
# ------- Next join punt TDS -----------
left_join(
# Be sure to JOIN ON DEFTEAM - NOT POSTEAM for this query
pbp_db %>%
mutate(dst.posteam.td = case_when(
play_type == "punt" & fumble_lost == 0 & touchdown == 1 ~ 1,
TRUE ~ 0
)) %>%
filter(dst.posteam.td == 1) %>%
group_by(game_id, season, week, posteam, defteam) %>%
count() %>%
rename(puntreturn.tds = n), by = c("posteam.x" = "defteam", "game_id", "season", "week")
) %>%
# ------- Next join blocked FG and PUNTS -----------
left_join(
# Be sure to JOIN ON DEFTEAM - NOT POSTEAM for this query
pbp_db %>%
mutate(dst.block = case_when(
field_goal_result == "blocked" ~ 1,
punt_blocked == 1 ~ 1,
TRUE ~ 0
)) %>%
filter(dst.block == 1) %>%
group_by(game_id, season, week, posteam, defteam) %>%
count() %>%
rename(dst.blocks = n), by = c("posteam.x" = "defteam", "game_id", "season", "week")
) %>%
# -------- Join sacks ints fumbles safetys etc. -----------
left_join(
pbp_db %>%
filter(nchar(posteam) > 0,
nchar(defteam) > 0) %>%
mutate(def.td = case_when(
(fumble_lost == 1 | interception == 1 | field_goal_result == "blocked") & touchdown == 1 ~ 1,
TRUE ~ 0
)) %>%
group_by(game_id, posteam, defteam, season, week) %>%
summarise(
tot.sacks = sum(sack, na.rm = T),
tot.ints = sum(interception, na.rm = T),
tot.sfty = sum(safety, na.rm = T),
tot.fumblerec = sum(fumble_lost, na.rm = T),
tot.tds = sum(def.td)), by = c("posteam.x" = "defteam", "game_id", "season", "week")
) %>%
# ---------- Defensive yards allowed ------------------\
left_join(
pbp_db %>%
filter(play == 1) %>%
group_by(game_id, season, week, posteam, defteam) %>%
summarise(yards.allowed = sum(yards_gained, na.rm = T)), by = c("posteam.x" = "defteam", "game_id", "season", "week")
) %>%
as.data.frame() %>%
mutate(
across(everything(), ~replace_na(.x, 0))
) %>%
# YARDS ALLOWED ISNT READY FYI SO TAKING OUT
select(player_name = posteam.x,
game_id,
season,
week,
opponent = defteam,
home_team,
away_team,
points.allowed.corrected = corrected.pts,
points.allwed.raw = raw.pts,
blocks = dst.blocks,
sacks = tot.sacks,
ints = tot.ints,
safeties = tot.sfty,
fumble.recoveries = tot.fumblerec,
def.tds = tot.tds,
kickoff.return.tds = kickoff.tds,
punt.return.tds = puntreturn.tds
) %>%
as.data.frame()
|
9b9a2551ea88535cc4369f0a6b5f4557471008ed
|
9477b0d92b6cac88c715bef053be9d15536919e0
|
/src/data/match_pdss_snis.R
|
5b831343d4e793174825ccece12deaeee4840a68
|
[] |
no_license
|
BLSQ/service_availability
|
029ba3a1c4a860e7f7dda2d66cbb7331a13da574
|
4fdd37e4eccff31939d3bf9f67cfcb65029cc365
|
refs/heads/master
| 2020-03-13T21:46:13.703729
| 2018-07-24T14:48:38
| 2018-07-24T14:48:38
| 131,303,526
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,542
|
r
|
match_pdss_snis.R
|
library(dhisextractr)
load_env()
pdss_org <- read.csv(paste0(pdss_data_dir,'/org_units_description.csv'))
snis_org <- read.csv(paste0(snis_data_dir,'/org_units_description.csv'))
fac_zones_snis <- read.csv('data/references/snis_fosas_zones.csv')
fac_zones_pdss <- read.csv('data/references/pdss_fosas_zones.csv')
pdss_org$name <- gsub('รฉ', 'e' ,trimws(tolower(as.character(pdss_org$name)), 'right'))
snis_org$name <- gsub('รฉ', 'e' ,trimws(tolower(as.character(snis_org$name)), 'right'))
pdss_org$name <- gsub('gethy' , 'gety', pdss_org$name)
pdss_org$name <- gsub('kiyambi' , 'kiambi', pdss_org$name)
pdss_org$name <- gsub('mongbalu' , 'mongbwalu', pdss_org$name)
pdss_org$name <- gsub('kisandji' , 'kisanji', pdss_org$name)
snis_matched <- snis_org[snis_org$name %in% pdss_org$name,]
pdss_matched <- pdss_org[pdss_org$name %in% snis_org$name,]
missing_snis_zone <- unique(as.character(fac_zones_snis$zone[!(fac_zones_snis$zone %in% snis_matched$id)]))
missing_pdss_zone <- unique(as.character(fac_zones_pdss$zone[!(fac_zones_pdss$zone %in% pdss_matched$id)]))
missing_snis_zone <- snis_org[snis_org$id %in% missing_snis_zone , ]
missing_pdss_zone <- pdss_org[pdss_org$id %in% missing_pdss_zone , ]
col_to_match <- c('id', 'name')
snis_side <- snis_matched[snis_matched$id %in% fac_zones_snis$zone, col_to_match]
pdss_side <- pdss_matched[pdss_matched$id %in% fac_zones_pdss$zone, col_to_match]
out <- merge(snis_side, pdss_side, by = 'name' , suffixes = c('_snis', '_pdss'))
write.csv(out, 'data/references/matched_zones.csv')
|
f805ef37b9c9fb72af3c02b5f31c83b27c557cc9
|
aa7836793561f01fa26e51f37ac13a79402a0a86
|
/analysis/dtwclust_tests.R
|
1415a42f79264bbff0243354a4ba6c92b0d1faba
|
[
"Apache-2.0"
] |
permissive
|
jmausolf/OpenFEC
|
211989de56a78555c4d973635b89abfe186467a9
|
7313b0a3ef21a60984db0668757c0032554dc453
|
refs/heads/master
| 2022-04-28T23:21:48.511435
| 2020-04-11T06:35:26
| 2020-04-11T06:35:26
| 104,601,210
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,547
|
r
|
dtwclust_tests.R
|
setwd('~/Box Sync/Dissertation_v2/CH1_OpenFEC/OpenFEC_test_MASTER/analysis/')
library(zoo)
source("indiv_source.R")
source("indiv_vartab_varplot_functions.R")
source("indiv_partisan_functions.R")
source("indiv_make_polarization_similarity_measures.R")
source("hca400_functions.R")
library(zoo)
y1 = 1980
y2 = 2018
cycle_min = 1980
cycle_max = 2018
gtitle = paste("Hiearchical Cluster Model of Partisan Polarization", y1, "-", y2, "(AGNES HCA Using Ward)", sep = " ")
gfile = paste(y1, y2, sep = "_")
df_filtered <- df_analysis %>%
filter(cycle >= cycle_min & cycle <= cycle_max ) %>%
#filter(cycle >= 1980 & cycle <= 2000 ) %>%
filter(!is.na(pid2),
!is.na(partisan_score),
!is.na(occ3),
!is.na(occlevels)) %>%
#Group by Company (Collapse Across Cycles)
#group_by(cid_master)
group_by(cycle, cid_master, occ3) %>%
summarize(#var_pid2 = var(as.numeric(pid2), na.rm = TRUE),
#var_ps = var(as.numeric(partisan_score), na.rm = TRUE),
mean_pid2 = mean(as.numeric(pid2), na.rm = TRUE),
#mean_pid3 = mean(as.numeric(pid3), na.rm = TRUE),
#median_pid = median(as.numeric(pid), na.rm = TRUE),
median_pid2 = median(as.numeric(pid2), na.rm = TRUE),
#median_pid3 = median(as.numeric(pid3), na.rm = TRUE),
mean_ps = mean(partisan_score, na.rm = TRUE),
median_ps = median(partisan_score, na.rm = TRUE),
mean_ps_mode = mean(as.numeric(partisan_score_mode), na.rm = TRUE),
mean_ps_min = mean(as.numeric(partisan_score_min), na.rm = TRUE),
mean_ps_max = mean(as.numeric(partisan_score_max), na.rm = TRUE)
#sum_pid_count = sum(as.numeric(party_id_count))
)
#Reverse the Other/All Conversion for Graphing Pre 2004
df_polarization_prep <- df_polarization %>%
mutate(occ3 = as.character(occ)) %>%
mutate(occ3 = ifelse(occ3 == "ALL" & cycle < 2004, "OTHERS", as.character(occ3))) %>%
filter(occ3 != "ALL") %>%
mutate(occ3 = factor(occ3,
levels = c("CSUITE", "MANAGEMENT", "OTHERS"))) %>%
filter(!is.na(occ3)) %>%
select(-occ)
#Join with Polarization / Similarity Measures
df_pre_hca <- left_join(df_filtered, df_polarization_prep)
#df_pre_hca <- na.omit(df_pre_hca)
dfna <- df_pre_hca[!complete.cases(df_pre_hca), ]
#Spread OCC Columns
df_pre_hca <- df_pre_hca %>%
spread_chr(key_col = "occ3",
value_cols = tail(names(df_pre_hca), -3),
sep = "_") %>%
arrange(cycle)
#Extract CID MASTER
df_cid_master <- df_pre_hca %>%
ungroup() %>%
select(cid_master) %>%
arrange(cid_master)
#Prep and Standardize Data
df <- df_pre_hca %>%
arrange(cid_master, cycle) %>%
ungroup() %>%
select(-cid_master, cycle)
df <- scale(df, center = FALSE)
#Backfill NA from Next Column
#i.e. use Manager/Other etc to fill missing exec / manager
#need to transpose first
#in this way, na fill uses relevant values from that firm-year instead of the whole dataset
dfT <- t(df)
dfT <- na.locf(dfT, fromLast = TRUE)
df <- as.data.frame(t(dfT))
#Fowardfill Any Remaining NA from Next Column
#i.e. use CSUTIE/Manager/Other etc to fill missing Manager/Other
#need to transpose first
#in this way, na fill uses relevant values from that firm-year instead of the whole dataset
dfT2 <- t(df)
dfT2 <- na.locf(dfT2, fromLast = FALSE)
df <- as.data.frame(t(dfT2))
dfna2 <- df[!complete.cases(df), ]
df <- na.omit(df)
#df <- as.data.frame(scale(df))
#df <- as.data.frame(sapply(df, as.numeric))
df <- bind_cols(df_cid_master, df)
# dfna2 <- df[!complete.cases(df), ]
#
# dfinf <- df[!is.finite(df),]
# m <- data.matrix(df)
# m[!is.finite(m)] <- 0
# dfinf <- m[!rowSums(!is.finite(m)),]
#df <- bind_cols(df_cid_master, df)
# df <- scale(df)
#
# df_matrix <- as.matrix(df_filtered)
#Turn Each
df_ts_matrix <- split(df, df$cid_master)
# for(i in seq_along(df_ts_matrix)){
# m <- rgr::remove.na(df_ts_matrix[[i]])
# print(m$nna)
# }
# df <- as.data.frame(df_ts_matrix[[1]])
#print(df)
# df <- as.data.frame(df) %>%
# ungroup() %>%
# select(-cid_master)
#
#
#
# #df <- scale(df)
# df <- na.aggregate(df)
#
# df <- Filter(function(x)!all(is.na(x)), df)
# df <- na.omit(df)
# table(is.na (df))
#
# rm(matrix_list)
# rm(new_mat)
matrix_list <- list()
for(i in seq_along(df_ts_matrix)){
#print(i)
df <- as.data.frame(df_ts_matrix[[i]])
df <- as.data.frame(df) %>%
ungroup() %>%
select(-cid_master, -cycle)
#dfna3 <- df[!complete.cases(df), ]
#print(dfna3)
#df <- scale(df)
df <- na.aggregate(df)
df <- Filter(function(x)!all(is.na(x)), df)
df <- na.omit(df)
#table(is.na (df))
# print(table(is.na (df)))
#df <- na.omit(df)
#rgr::remove.na(data.matrix(df))
matrix_list[[i]] <- data.matrix(df)
}
#Add Names
df_get_names <- df_filtered %>%
ungroup() %>%
select(cid_master) %>%
distinct() %>%
arrange(cid_master)
names(matrix_list) <- as.list(df_get_names)[[1]]
matrix_list[[2]]
# for(i in seq_along(matrix_list)){
# m <- rgr::remove.na(matrix_list[[i]])
# print(m$nna)
# }
#
#
# # Making many repetitions
# pc.l2 <- tsclust(matrix_list, k = 3L,
# distance = "dtw", centroid = "pam",
# seed = 3247, trace = TRUE,
# control = partitional_control(nrep = 10L))
#
# # Cluster validity indices
# sapply(pc.l2, cvi)
#
# pc.l2[[1L]]@distmat
#
# pc.l2[[4L]]@cluster
#
# mvc <- tsclust(matrix_list, k = 3L, trace = TRUE,
# type = "hierarchical",
# hierarchical_control(method = "all",
# distmat = pc.l2[[6L]]@distmat))
#
# mvc
# mvc@cluster
#
#
#
# mvc <- tsclust(matrix_list, k = 4L, trace = TRUE,
# type = "hierarchical")
#
# mvc
# mvc@cluster
#
#
# require(cluster)
#
# hc.diana <- tsclust(matrix_list, type = "h", k = 4L,
# distance = "L2", trace = TRUE,
# control = hierarchical_control(method = diana))
#
# plot(hc.diana, type = "sc")
#
#
#
# # Using GAK distance
# mvc <- tsclust(matrix_list, k = 3L, distance = "gak", seed = 390,
# args = tsclust_args(dist = list(sigma = 100)))
#
# mvc
# mvc@cluster
#
# plot(mvc)
#
# dist_ts2 <- TSclust::diss(SERIES = t(matrix_list), METHOD = "DTWARP")
# dist_ts2
dist_ts <- TSclust::diss(SERIES = matrix_list, METHOD = "DTWARP")
# dist_ts <- TSclust::diss(SERIES = matrix_list_old, METHOD = "DTWARP")
#dist_ts <- TSclust::diss(SERIES = matrix_list, METHOD = "PACF")
#
#
# hca <- agnes(dist_ts, method = "ward")
# sub_grp <- cutree(as.hclust(hca), k = 3, order_clusters_as_data = FALSE)
# sub_grp_df <- as.data.frame(sub_grp)
# df_post_cluster <- post_cluster_df(df_analysis, df_get_names, hca, cycle_min, cycle_max)
# library(stats)
hca <- agnes(dist_ts, method = "ward")
hc <- as.hclust(hca)
hc1 <- as.hclust(hca)
dend1 <- as.dendrogram(hc1)
df_labels <- stats::cutree(hc, k = 3) %>% # hclus <- cluster::pam(dist_ts, k = 2)$clustering has a similar result
as.data.frame(.) %>%
dplyr::rename(.,cluster = .) %>%
tibble::rownames_to_column("cid_master")
df_post_cluster <- post_cluster_df_k(df_analysis, df_labels, hc, cycle_min, cycle_max, K=3)
df_party_clusters <- infer_partisanship(df_post_cluster) %>%
mutate(cycle_mean = as.character(cycle_mean))
method = "time_series_hca_ward_k3_polar"
base = TRUE
oth = TRUE
# join post cluster to df_analysis
df_hca_all <- left_join(df_analysis, df_party_clusters,
by = c("cid_master" = "cid_master"))
mean(df_hca_all$partisan_score, na.rm = TRUE)
table(df_hca_all$pid2)
## join post cluster to df_analysis
df_hca_all_dem <- df_hca_all %>%
filter(cluster_party == "DEM")
mean(df_hca_all_dem$partisan_score, na.rm = TRUE)
table(df_hca_all_dem$pid2)
# trans_dems <- df_hca_all_dem %>% select(cid_master, party_pat) %>% distinct()
# trans_dems
## join post cluster to df_analysis
df_hca_all_rep <- df_hca_all %>%
filter(cluster_party == "REP")
mean(df_hca_all_rep$partisan_score, na.rm = TRUE)
table(df_hca_all_rep$pid2)
# trans_reps <- df_hca_all_rep %>% select(cid_master, party_pat) %>% distinct()
# trans_reps
df_hca_all_oth <- df_hca_all %>%
filter(cluster_party == "OTH")
mean(df_hca_all_oth$partisan_score, na.rm = TRUE)
table(df_hca_all_oth$pid2)
# trans_oth <- df_hca_all_oth %>% select(cid_master, party_pat) %>% distinct()
# trans_oth
## Make Graphs
source("indiv_mean_party_hca_loop.R")
|
65e7bc66c506cc984afa9145edd21656b1456a1d
|
f13a487d46e8dda1d04491eb346b46ab35f8caba
|
/R/kottby.R
|
33c9436d2eda40d031dea27e57582941b869d0d9
|
[] |
no_license
|
DiegoZardetto/EVER
|
99d0c3a5a90df3b94c04a2412ceb2c7780b4376e
|
f97c50d67aac9cc849b2f284b905204c1d2358a4
|
refs/heads/master
| 2023-03-28T23:25:59.972626
| 2021-03-31T11:00:14
| 2021-03-31T11:00:14
| 268,497,478
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,766
|
r
|
kottby.R
|
`kottby` <-
function (deskott, y, by = NULL, estimator = c("total", "mean"),
vartype = c("se", "cv", "cvpct", "var"), conf.int = FALSE, conf.lev = 0.95)
#######################################################################################
# Calcola (su oggetti di classe kott.design) le stime dei totali o delle medie #
# di piu' variabili ed i corrispondenti errori standard ed intervalli di confidenza #
# nelle sottopopolazioni definite dai livelli delle variabili di 'by'. #
# NOTA: A seconda che la variabile di stima y sia di tipo a) numeric, b) factor la #
# funzione calcola: #
# se estimator="total" -> a) la stima del totale di y #
# b) la stima delle frequenze assolute di y #
# se estimator="mean" -> a) la stima dela media di y #
# b) la stima delle frequenze relative di y #
# NOTA: La formula da passare per 'y' deve essere del tipo y = ~var1 + ... + varn #
# (ogni operatore nella formula verra' comunque interpretato come "+") #
# NOTA: La formula da passare per 'by' deve essere del tipo by = ~var1 : ... : varn #
# (ogni operatore nella formula verra' comunque interpretato come ":") #
# NOTA: Gli intervalli di confidenza sono calcolati usando la distribuzione t di #
# Student con nrg-1 gradi di liberta'. #
# NOTA: Il valore di ritorno della funzione puo' essere un dataframe o una lista e #
# la sua struttura dipende dalla natura dell'input. #
#######################################################################################
{
if (!inherits(deskott, "kott.design"))
stop("Object ", substitute(deskott), " must be of class kott.design")
few.obs(deskott)
if (!inherits(y, "formula"))
stop("Variables of interest must be supplied as a formula")
y.charvect <- names(model.frame(y, deskott[1, ]))
na.fail(deskott, y.charvect)
typetest <- sapply(y.charvect, function(y) is.factor(deskott[,
y]) || is.numeric(deskott[, y]))
if (!all(typetest))
stop("Variables of interest must be numeric or factor")
if (!identical(by, NULL)) {
if (!inherits(by, "formula"))
stop("'by' variables must be supplied as a formula")
by.charvect <- names(model.frame(by, deskott[1, ]))
na.fail(deskott, by.charvect)
typetest <- sapply(by.charvect, function(y) is.factor(deskott[,
y]))
if (!all(typetest))
stop("'by' variables must be factor")
few.obs(deskott, by.charvect)
}
estimator <- match.arg(estimator)
if (missing(vartype))
vartype <- "se"
vartype <- match.arg(vartype, several.ok = TRUE)
vartype <- unique(vartype)
vartype.pos <- pmatch(vartype, eval(formals(sys.function())$vartype))
if (any(is.na(vartype.pos)))
stop("Unavailable vartype")
variabilities <- function(se, cv, cvpct, var, which.one){
var.mat <- cbind(se, cv, cvpct, var)[, which.one, drop = FALSE]
colnames(var.mat) <- c("SE", "CV", "CV%", "Var")[which.one]
var.mat
}
if (!is.logical(conf.int))
stop("Parameter 'conf.int' must be logical")
if (!is.numeric(conf.lev) || conf.lev < 0 || conf.lev > 1)
stop("conf.lev must be between 0 and 1")
kottby1 <- function(deskott, y, by = NULL, estimator, vartype.pos,
conf.int, conf.lev) {
#########################################################
# Calcola (su oggetti di classe kott.design) la stima #
# del totale (o della media) di una sola variabile ed #
# il relativo errore standard (ed intervallo di #
# confidenza), nelle sottopopolazioni definite dai #
# livelli delle variabili di 'by'. #
# NOTA: 'y' e 'by' devono essere vettori character. #
# NOTA: Gli intervalli di confidenza sono calcolati #
# usando la distribuzione t di Student con #
# nrg-1 gradi di liberta'. #
#########################################################
if (is.null(by))
return(kottestim1(deskott, y, estimator, vartype.pos, conf.int,
conf.lev))
dfby <- deskott[, by]
yvect <- deskott[, y]
if (is.numeric(yvect)) {
out <- sapply(split(deskott, dfby, drop = TRUE), function(des) kottestim1(des,
y, estimator, vartype.pos, conf.int, conf.lev))
return(as.data.frame(out))
}
if (is.factor(yvect)) {
out <- lapply(split(deskott, dfby, drop = TRUE), function(des) kottestim1(des,
y, estimator, vartype.pos, conf.int, conf.lev))
return(out)
}
}
kottestim1 <- function(deskott, y, estimator, vartype.pos, conf.int, conf.lev) {
#############################################################################
# Calcola (su oggetti di classe kott.design) la stima del totale o della #
# media di una sola variabile ed il corrispondente errore standard (ed #
# intervallo di confidenza). #
# NOTA: A seconda che la variabile di stima y sia di tipo a) numeric, #
# b) factor la funzione calcola: #
# se estimator="total" -> a) la stima del totale di y #
# b) la stima delle frequenze assolute di y #
# se estimator="mean" -> a) la stima dela media di y #
# b) la stima delle frequenze relative di y #
# NOTA: 'y' deve essere di tipo character. #
# NOTA: Gli intervalli di confidenza sono calcolati usando la #
# distribuzione t di Student con nrg-1 gradi di liberta'. #
#############################################################################
estim1 <- function(data, y, w, estim) {
total1 <- function(data, y, w) {
yvect <- data[, y]
wvect <- data[, w]
if (is.numeric(yvect))
ty <- sum(yvect * wvect)
if (is.factor(yvect)) {
yvect <- factor(yvect) # rimuove gli (eventuali) empty levels di yvect
ty <- tapply(wvect, yvect, sum)
}
ty
}
mean1 <- function(data, y, w) {
yvect <- data[, y]
wvect <- data[, w]
wsum <- sum(wvect)
if (is.numeric(yvect))
ty <- sum(yvect * wvect)
if (is.factor(yvect)) {
yvect <- factor(yvect) # rimuove gli (eventuali) empty levels di yvect
ty <- tapply(wvect, yvect, sum)
}
my <- ty/wsum
my
}
switch(estim, total = total1, mean = mean1)
}
nrg <- attr(deskott, "nrg")
w <- attr(deskott, "weights")
w.char <- names(model.frame(w, deskott[1, ]))
yvect <- deskott[, y]
if (is.factor(yvect)) {
yvect <- factor(yvect) # rimuove gli (eventuali) empty levels di yvect
full.levname <- paste(y, levels(yvect), sep = ".")
}
est.fun <- estim1(deskott, y, w.char, estimator)
e <- est.fun(deskott, y, w.char)
er <- sapply(1:nrg, function(r) est.fun(deskott, y, paste(w.char,
r, sep = "")))
if (length(e) == 1) {
# sempre T se yvect e' numeric, se yvect e' factor T solo se ha un unico livello non empty
var <- ((nrg - 1)/nrg) * sum((er - e)^2)
se <- sqrt(var)
cv <- se/e
cvpct <- 100*cv
vars <- rbind(variabilities(se = se, cv = cv, cvpct = cvpct, var = var, which.one = vartype.pos))
if (!identical(conf.int, FALSE)) {
l.conf <- confidence(estim = e, se = se, df = (nrg -
1), alpha = conf.lev)[1]
u.conf <- confidence(estim = e, se = se, df = (nrg -
1), alpha = conf.lev)[2]
out <- cbind(e, vars, l.conf, u.conf)
l.conf.tag <- paste("l.conf(", round(100*conf.lev,1), "%)", sep="")
u.conf.tag <- paste("u.conf(", round(100*conf.lev,1), "%)", sep="")
dimnames(out) <- list(ifelse(!(is.factor(yvect)),
y, full.levname), c(estimator, colnames(vars), l.conf.tag,
u.conf.tag))
}
else {
out <- cbind(e, vars)
dimnames(out) <- list(ifelse(!(is.factor(yvect)),
y, full.levname), c(estimator, colnames(vars)))
}
return(as.data.frame(out))
}
else {
ecol <- cbind(e)
emat <- matrix(ecol, nrow(er), ncol(er))
var <- cbind(((nrg - 1)/nrg) * rowSums((er - emat)^2))
se <- sqrt(var)
cv <- se/ecol
cvpct <- 100*cv
vars <- variabilities(se = se, cv = cv, cvpct = cvpct, var = var, which.one = vartype.pos)
if (!identical(conf.int, FALSE)) {
l.conf <- confidence(estim = ecol, se = se, df = (nrg -
1), alpha = conf.lev)[, 1]
u.conf <- confidence(estim = ecol, se = se, df = (nrg -
1), alpha = conf.lev)[, 2]
out <- cbind(ecol, vars, l.conf, u.conf)
l.conf.tag <- paste("l.conf(", round(100*conf.lev,1), "%)", sep="")
u.conf.tag <- paste("u.conf(", round(100*conf.lev,1), "%)", sep="")
colnames(out) <- c(estimator, colnames(vars), l.conf.tag,
u.conf.tag)
}
else {
out <- cbind(ecol, vars)
colnames(out) <- c(estimator, colnames(vars))
}
rownames(out) <- full.levname
return(as.data.frame(out))
}
}
if (identical(by, NULL)) {
out <- lapply(y.charvect, function(y) kottby1(deskott,
y, by, estimator, vartype.pos, conf.int, conf.lev))
}
else {
out <- lapply(y.charvect, function(y) kottby1(deskott,
y, by.charvect, estimator, vartype.pos, conf.int, conf.lev))
}
names(out) <- y.charvect
if (length(out) == 1)
out <- out[[1]]
out
}
|
f68191d3af3460c97fbb1cb7985849648c2371c6
|
fb18a5404891b8cc43ec65dc861318aee3017188
|
/server.R
|
bda54d76fd84dbdd522a7d08c53f14df41a89901
|
[] |
no_license
|
cryptomanic/Twitter-Web-App
|
f8656983a328257a5ba7f1aa2b1e6fc45aa30374
|
825c6966caa2b1ff4ec1b452a29dcaca28d4258b
|
refs/heads/master
| 2021-01-10T13:16:57.060267
| 2015-12-29T18:34:02
| 2015-12-29T18:34:02
| 47,999,300
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,155
|
r
|
server.R
|
library(shiny)
library(tweeteR)
shinyServer(function(input, output) {
key <- readline("Enter your API Key : ")
secret <- readline("Enter your API Secret : ")
token <- tweetOauth(key, secret)
token <- tweetOauth(key, secret)
output$tweets <- renderTable({
input$dispByHt
isolate({
data <- hashTag(token, input$hashtag, input$count)
img_urls<- vector("character")
for(url in img_profile) {
img_urls <- append(img_urls, as.character(img(src = url)))
}
cbind(pic = img_urls, data[,-1])
})
}, sanitize.text.function = function(x) x)
output$userinfo <- renderTable({
data <- userInfo(token, input$username)
if (is.null(data$id)) {
data <- userInfo(token, "narendramodi")
}
pic <- profile_pic
if (! is.null(pic)) cbind(pic = as.character(img(src = pic)), data.frame(data))
}, sanitize.text.function = function(x) x)
output$usertweets <- renderTable({
data <- userTweets(token, input$username, count = input$counter)
if (length(data) == 0) {
data <- userTweets(token, "narendramodi")
}
data.frame(`Recent Tweets` = data)
})
})
|
e623c491eb34d61a85add235e663f4adf30b4a20
|
e02b906d4d3c548085954f3832afac30c7137228
|
/man/fritillary.Rd
|
751243ee3e504ab9c049f6a8c1c04bcfe901a67a
|
[] |
no_license
|
poissonconsulting/bauw
|
151948ab0dc55649baff13b2d79a551b6fc5a49d
|
47b12dc140ba965ae8c89693c0d8d8fefa0fd7db
|
refs/heads/main
| 2023-06-15T10:56:20.506561
| 2022-12-16T20:00:03
| 2022-12-16T20:00:03
| 78,153,890
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 770
|
rd
|
fritillary.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-fritillary.R
\docType{data}
\name{fritillary}
\alias{fritillary}
\title{Fritillary butterfly abundance data}
\format{
A data frame with 665 rows and 4 columns
}
\usage{
fritillary
}
\description{
The silver-washed fritillary (\emph{Argynnis paphia})
butterfly duplicate site counts from
Kery & Schaub (2011 p.396).
}
\details{
The variables are as follows:
\itemize{
\item \code{site} the site surveyed.
\item \code{day} the day of the survey.
\item \code{count1} the first count.
\item \code{count2} the second count.
}
}
\references{
Kery M & Schaub M (2011) Bayesian Population Analysis
using WinBUGS. Academic Press. (\url{http://www.vogelwarte.ch/bpa})
}
\keyword{datasets}
|
23c7c900e85db6ad6b2a1d09fea03dfb987b565c
|
63f53585caf576deea1eea626f3dc1099dc57fbd
|
/R/circleFun.R
|
3e2337fc8685874d4c0732b9b683c4f1fdf56c26
|
[] |
no_license
|
ThinkR-open/rusk
|
32f0f86b38f1f1167b4fd214d470034e403d42bb
|
8943d42e6cee54502a28cae323baf367996eaaa4
|
refs/heads/master
| 2021-09-25T07:53:22.859016
| 2018-10-19T19:51:09
| 2018-10-19T19:51:09
| 121,903,730
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 277
|
r
|
circleFun.R
|
# https://stackoverflow.com/questions/6862742/draw-a-circle-with-ggplot2
circleFun <- function(center = c(0,0),r = 1, npoints = 100){
tt <- seq(0,2*pi,length.out = npoints)
xx <- center[1] + r * cos(tt)
yy <- center[2] + r * sin(tt)
data.frame(x = xx, y = yy)
}
|
f5093fff787fdd3bae093cb7ffe4cddc2468e574
|
ae48675555fd497b345fb2818d57b33e83a4e203
|
/Proteomics/4c_makeHeatmap_sigHitsBar.R
|
3478388592f2058eaaca9622e136f88cdd0061e9
|
[] |
no_license
|
mcclo/Mahendralingam-et-al.-Nat-Metab
|
0bfef4ab501cb2b409f1c05bbd316e92a6fe641c
|
c075e048e69c5f63463ed6d55f9e476947551903
|
refs/heads/main
| 2023-03-21T14:35:19.993821
| 2021-03-13T15:31:42
| 2021-03-13T15:31:42
| 346,809,455
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,142
|
r
|
4c_makeHeatmap_sigHitsBar.R
|
#' This script was used to make a heatmap of the metabolic proteome with an annotation bar ..
#' .. for genes showing the metabolic cell lineage signatures - after ANOVA and Tukey test (p<0.05) and logFC > 0
#'
#' Note: significant_hits object holds the final signatures
#' BC = basal cell, ML = luminal mature, LP = luminal progenitor
#'
#' Input:
#' - gene lists
#' Output:
#' - heat maps - individual for each signature, final heat map with "cluster assignment" or "signature" bar
#' - RData file
#' - heat map
# Call required libraries into environment
library(pheatmap)
library(RColorBrewer)
library(cluster)
library(scales)
# Load files from previous scripts
load("signatures.RData")
load("h.possemato_matrices_and_dendograms.RData")
load("h.pint.patient.pheno.info.RData")
# Read in scripts with plotting functions
source("make_human_heatmap_4c.R") # with "signature" annotation bar
source("make_human_heatmap.R")
# Define function to compute and return matrix with z scores (for heatmap plotting)
compute_z_scores <- function(matrix){
# Formula for z score:
# [element x - mean of row x is in]/ standard deviation of row x is in
return (apply(matrix, 1, function(x) (x - mean(x)) / sd(x)))
}
# Make z score matrix
exp_matrix <- h.pint.combat.pos[,1:29] # metabolic protein expression matrix
exp_matrix_z <- t(compute_z_scores(exp_matrix))
# Make a new vector that indicates which genes belong to which cell type
significant_hits_bar <- ifelse(h.pint.combat.pos$Gene.names %in% significant_hits$BC, "BC",
ifelse(h.pint.combat.pos$Gene.names %in% significant_hits$ML, "ML",
ifelse(h.pint.combat.pos$Gene.names %in% significant_hits$LP, "LP", "Unassigned")))
# Define cell type order
celltype_gene_assignments <- c("BC", "ML", "LP", "Unassigned")
# Plot final heatmap with signature/cluster vector----------------------------------
filename <- sprintf("%s_FINAL_possemato_heat_map_signatures.pdf", format(Sys.Date(), "%Y%m%d"))
final_heatmap_plot <- plot_h_heatmap2(exp_matrix = exp_matrix_z, hc_samples = hclust(possemato.sample_dist),
hc_proteins = hclust(possemato.protein_dist), filename = filename, title = NA,
pint.pheno=pint.pheno, cluster_vector = significant_hits_bar)
# a) Make heatmap for each
# Define function to make heat maps for each o the 3 cell signatures-----------------
make_cell_signature_heatmap <- function(cell_signatures, title){
# cell_signatures is a list of 3 signatures for basal, ML and LP respectively
plot_titles <- c("BC_population", "ML_population", "LP_population") #names of titles/file names
celltypes <- c("BC", "ML", "LP")
sapply(1:3, function(i){
title <- paste(plot_titles[[i]], title, sep="_")
signature <- cell_signatures[[i]]
x <- make_human_heatmap(exp_matrix, signature, title)
# dev.off()
})
}
# Run function for each signature list (before, after ANOVA, and after ANOVA+Tukey)
make_cell_signature_heatmap(significant_hits, "significant_hits") #unclustered #cluster_cols = F
|
adcf8d72de990246ce473c52d4e4dda208d0f244
|
20c64e38c5738e9bdb2f6e04e1d450ec64ec55e1
|
/AITransportation.r
|
36870bc12a307f1c4a2aa70c8a6cca4dd99f82e3
|
[] |
no_license
|
dobbytech/AITransportation
|
397d2e3c79ea6d14a2299bccebd1cec44873b95d
|
2ff9dc5419113b8dd3e1ffe10c27776605670bd0
|
refs/heads/master
| 2020-07-17T01:02:28.141778
| 2019-09-02T22:50:20
| 2019-09-02T22:50:20
| 205,909,056
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,700
|
r
|
AITransportation.r
|
#'=========================================================================
#'AI for Transportation
#'=========================================================================
# Libraries -------------
rm(list=ls())
library(tidyverse)
library(tidytext)
library(readr)
library(qdap)
library(qdapTools)
library(dplyr)
library(readxl)
library(lubridate)
library(tm)
library(ggplot2)
library(scales)
library(reshape2)
library(wordcloud)
library(tidyr)
dev.off()
setwd("E:\\University of Sussex\\Courses\\Dissertation\\Data\\AI_Transport\\data")
#------INTRODUCTION----------
#1. Plot number of journal, news and patent from 2010 to 2018 (Data source: WIPO, SCOPUS, FACTIVA) --------
pubs_data <- read_excel("publication.xlsx")
pubsdf <- pubs_data %>%
gather(key = "Publication", value = "value", -Year)
head(pubsdf)
pubsdf
ggplot(pubsdf, aes(x = Year, y = value)) +
geom_line(aes(color = Publication, linetype = Publication), size=1.3) +
geom_point() +
labs(x = "Year", y = "Number of Publications") +
scale_color_manual(values = c("darkred", "steelblue", "darkgreen")) +
scale_x_discrete(limits = pubsdf$Year)
#2. Plot Monthly Active Twitter Users from 2010 to 2019 (Data Source: Statista) -------
my_data <- read_excel("twitterusers.xlsx")
my_data$Time <- factor(my_data$Time, levels = my_data$Time[order(my_data$Number)])
ggplot(my_data, aes(Time, Number, group=1)) +
geom_line(color='steelblue', size=2, stat="identity") +
labs(x = "Time (Quarter/Year)", y = "Number of Monthly Active Twitter Users (Millions)")
#------DATA COLLECTION-------
#1. Prepare stop words -------------
load("stop_words.rda")
add_stop_words <- c("http", "https", "pic.twitter.com", "bit.ly", "0", "1", "2",
"3", "4", "5", "6", "7", "8", "9", "10", "11", "buff.ly",
"ow.ly", "twitter.com")
add_lexicon <- c("custom", "custom", "custom", "custom", "custom", "custom",
"custom", "custom", "custom", "custom", "custom", "custom",
"custom", "custom", "custom", "custom", "custom", "custom", "custom")
custom_stop_words <- bind_rows(stop_words,
data_frame(word = add_stop_words, lexicon = add_lexicon))
custom_stop_words <- subset(custom_stop_words, word!="self")
percentage <- 0.001
#2. Parse Twitter data related to AI -------------
filelist <- c("artificialintelligencetransportation", "autonomouscar",
"autonomouscars", "autonomousvehicle",
"autonomousvehicles", "intelligentautomotive",
"intelligenttransport", "intellilgenttransportation",
"driverless", "artificialintelligencetraffic")
i <- 1
j <- 1
singleword <- c(NULL)
twowords <- c(NULL)
threewords <- c(NULL)
df_twitter <- NULL
bigram <- NULL
trigram <- NULL
dfall <- NULL
dfkeyword <- NULL
dfkey <- NULL
dfuser <- NULL
listuser <- NULL
for (file in filelist)
{
file1 <- paste(file, ".csv", sep = "")
df <- read_delim(file1, delim = ";")
#add attribute user
j <- 1
listuser <- NULL
for (link in df$permalink) {
user <- unlist(strsplit(link, "/", fixed = TRUE))
listuser[[j]] <- user[4]
j <- j+1
}
dfuser <- data.frame(user = listuser, df)
#add attribute keyword
listkeyword <- rep(file,nrow(dfuser))
dfkey <- data.frame(keyword = listkeyword, dfuser)
if (i==1) (
dfall <- dfkey
) else {
dfall <- bind_rows(dfall, dfkey)
}
#the most frequent single words
df_twitter <- df %>%
select(text, id) %>%
unnest_tokens(word, text) %>%
anti_join(custom_stop_words) %>%
count(word, sort = TRUE)
norow <- nrow(df_twitter)
tinytop <- round(norow*percentage)
if (i==1) {
singleword <- df_twitter$word[1:tinytop]
} else (
singleword <- c(singleword, df_twitter$word[1:tinytop])
)
#the most frequent bigrams
df_twitter <- df %>%
select(text, id) %>%
unnest_tokens(bigram, text, token = "ngrams", n = 2) %>%
separate(bigram, c("word1", "word2"), sep = " ") %>%
filter(!word1 %in% custom_stop_words$word,
!word2 %in% custom_stop_words$word) %>%
unite(word, word1, word2, sep=" ") %>%
count(word, sort = TRUE)
norow <- nrow(df_twitter)
tinytop <- round(norow*percentage)
if (i==1) {
twowords <- df_twitter$word[1:tinytop]
} else (
twowords <- c(twowords, df_twitter$word[1:tinytop])
)
#the most frequent trigrams
df_twitter <- df %>%
select(text, id) %>%
unnest_tokens(trigram, text, token = "ngrams", n = 3) %>%
separate(trigram, c("word1", "word2", "word3"), sep = " ") %>%
filter(!word1 %in% custom_stop_words$word,
!word2 %in% custom_stop_words$word,
!word3 %in% custom_stop_words$word) %>%
unite(word, word1, word2, word3, sep=" ") %>%
count(word, sort = TRUE)
norow <- nrow(df_twitter)
tinytop <- round(norow*percentage)
if (i==1) {
threewords <- df_twitter$word[1:tinytop]
} else (
threewords <- c(threewords, df_twitter$word[1:tinytop])
)
i <- i+1
}
dfall
#3. Get distinct most frequent ----------------
#unigrams
singleword
singleworddist <- unique(unlist(singleword))
singleworddist
write.csv(singleworddist, file = "singlewords.csv")
#bigrams
twowords
twowordsdist <- unique(unlist(twowords))
twowordsdist
write.csv(twowordsdist, file = "bigrams.csv")
#trigrams
threewords
threewordsdist <- unique(unlist(threewords))
threewordsdist
write.csv(threewordsdist, file = "trigrams.csv")
#4. Find additional keywords ---------------
newfilelist <- c("aitransportation", "machinelearningtransportation",
"selfdriving")
for (file in newfilelist)
{
file1 <- paste(file, ".csv", sep = "")
df <- read_delim(file1, delim = ";")
#update attribute username
j <- 1
listuser <- NULL
for (link in df$permalink) {
user <- unlist(strsplit(link, "/", fixed = TRUE))
listuser[[j]] <- user[4]
j <- j+1
}
dfuser <- data.frame(user = listuser, df)
listkeyword <- rep(file,nrow(dfuser))
dfkey <- data.frame(keyword = listkeyword, dfuser)
dfall <- bind_rows(dfall, dfkey)
}
#5. Save data in csv file
#all tweets (contains duplicate tweets with different keywords)
dfall <- dfall[order(dfall$date),]
write.csv(dfall, file = "MyData.csv")
#distinct tweets
dfalldis <- dfall %>% distinct(id, .keep_all = TRUE)
dfalldis <- dfalldis[order(dfalldis$date),]
write.csv(dfalldis, file = "MyDataDistinct.csv")
#--------------DATA PROCESSING-----------------
#1. Preparation --------------
#convert the date format
dfdate <- dfall %>%
mutate(date = as.Date(date))
dfdate$date
dfdisdate <- dfalldis %>%
mutate(date = as.Date(date))
dfdisdate$date
#separate the data into 3 groups: <1000 obs, 1000-10000 obs, >10000 obs
dfdatea <- dfdate[(dfdate$keyword=="artificialintelligencetransportation" |
dfdate$keyword=="intelligentautomotive" |
dfdate$keyword=="intelligenttransport" |
dfdate$keyword=="intelligenttransportation" |
dfdate$keyword=="aitransportation" |
dfdate$keyword=="machinelearningtransportation" |
dfdate$keyword=="artificialintelligencetraffic"),]
dfdateb <- dfdate[(dfdate$keyword=="autonomouscar" |
dfdate$keyword=="autonomouscars" |
dfdate$keyword=="autonomousvehicle" |
dfdate$keyword=="autonomousvehicles"),]
dfdatec <- dfdate[(dfdate$keyword=="selfdriving" |
dfdate$keyword=="driverless"),]
#2. Trends Analysis ------------------
#general trends
dfdisdate$date <- as.POSIXct(dfdisdate$date)
dev.off()
p <- ggplot(dfdisdate, aes(date, ..count..)) +
geom_histogram() +
theme_bw() + xlab("Time (month-year)") + ylab("Number of Tweets") +
scale_x_datetime(breaks = date_breaks("3 months"),
minor_breaks = date_breaks("3 months"),
labels = date_format("%b-%y", tz=Sys.timezone()),
limits = c(as.POSIXct("2014-07-01"),
as.POSIXct("2019-07-01")) )
p
dfd <- dfdisdate %>%
count(date)
ggplot(dfd, aes(date, n, group=1)) +
geom_line(color='steelblue', size=1.3, stat="identity") +
labs(x = "Time (month-year)", y = "Number of Tweets") +
scale_x_datetime(breaks = date_breaks("3 months"),
minor_breaks = date_breaks("3 months"),
labels = date_format("%b-%y", tz=Sys.timezone()),
limits = c(as.POSIXct("2014-07-01"),
as.POSIXct("2019-07-01")) )
#the most frequent users for all tweets
userfreq <- count(dfdisdate, user, sort = TRUE)
userfreq
write.csv(userfreq, file = "userfreq.csv")
#pie chart of AI role for transportation
tweetstostring <- paste(dfdisdate$text, collapse = " ")
t_count <- str_count(tweetstostring, pattern = "traffic")
pt_count <- str_count(tweetstostring, pattern = "public transport")
f_count <- str_count(tweetstostring, pattern = "freight")
av_count <- str_count(tweetstostring, pattern = "autonomous vehicle")
slices <- c(t_count, pt_count, f_count, av_count)
Role <- c("Traffic Management", "Public Transportation", "Freight Transport System", "Autonomous Vehicles")
dfpie <- data.frame(Role, slices)
# Add variable position
dfpie <- dfpie %>%
arrange(desc(Role)) %>%
mutate(lab.ypos = cumsum(slices) - 0.5*slices)
dfpie
dev.off()
pie <- ggplot(dfpie, aes(x="", y=slices, fill=Role)) +
geom_bar(width = 1, stat = "identity") +
coord_polar("y", start=0) +
geom_text(aes(y = lab.ypos, label = slices), color = "white") +
theme_void()
pie
#specific keyword trends in one figure divided by grid using facet_wrap
dfdatefw <- dfdate %>%
count(date, keyword)
dfdatefw$date <- as.POSIXct(dfdatefw$date)
fw <- ggplot(dfdatefw, aes(date, n, color = keyword), show.legend = FALSE) +
geom_line(size = 1.3) +
labs(x = "Time (month-year)", y = "Number of Tweets") +
scale_x_datetime(breaks = date_breaks("6 months"),
labels = date_format("%b-%y", tz=Sys.timezone()),
limits = c(as.POSIXct("2014-07-01"),
as.POSIXct("2019-07-01")) )
fw + facet_wrap(~ keyword, ncol=2)
#trends for keywords in category 1 (<1000 obs)
keywordbytimea <- dfdatea %>%
count(date, keyword)
keywordbytimea
keywordbytimea$date <- as.POSIXct(keywordbytimea$date)
ga <- ggplot(keywordbytimea, aes(date, n, color = keyword)) +
geom_line(size = 1.3) +
labs(x = "Time (month-year)", y = "Number of Tweets") +
scale_x_datetime(breaks = date_breaks("3 months"),
labels = date_format("%b-%y", tz=Sys.timezone()),
limits = c(as.POSIXct("2014-07-01"),
as.POSIXct("2019-07-01")) )
ga
ga + facet_wrap(~ keyword, ncol=1)
#trends for keywords in category 2 (1000-10000 obs)
keywordbytimeb <- dfdateb %>%
count(date, keyword)
keywordbytimeb
keywordbytimeb$date <- as.POSIXct(keywordbytimeb$date)
gb <- ggplot(keywordbytimeb, aes(date, n, color = keyword)) +
geom_line(size = 1.3) +
labs(x = "Time (month-year)", y = "Number of Tweets") +
scale_x_datetime(breaks = date_breaks("3 months"),
labels = date_format("%b-%y", tz=Sys.timezone()),
limits = c(as.POSIXct("2014-07-01"),
as.POSIXct("2019-07-01")) )
gb
gb + facet_wrap(~ keyword, ncol=1)
#trends for keywords in category 3 (>10000 obs)
keywordbytimec <- dfdatec %>%
count(date, keyword)
keywordbytimec
keywordbytimec$date <- as.POSIXct(keywordbytimec$date)
gc <- ggplot(keywordbytimec, aes(date, n, color = keyword)) +
geom_line(size = 1.3) +
labs(x = "Time (month-year)", y = "Number of Tweets") +
scale_x_datetime(breaks = date_breaks("3 months"),
labels = date_format("%b-%y", tz=Sys.timezone()),
limits = c(as.POSIXct("2014-07-01"),
as.POSIXct("2019-07-01")) )
gc
gc + facet_wrap(~ keyword, ncol=1)
#3. Sentiment Analysis -------------
bing <- get_sentiments("bing")
#sentiment analysis for all tweets
dfallwords <- dfall %>%
select(text, id) %>%
unnest_tokens(word, text) %>%
count(word, sort = TRUE)
dfallwords
dfallsa <- dfall %>%
select(text, id) %>%
unnest_tokens(word, text) %>%
inner_join(bing) %>%
count(word, sentiment, sort = TRUE)
dfallsa
dfallsa <- dfallwords %>%
inner_join(bing) %>%
count(word, sentiment, sort = TRUE)
dfallsa
write.csv(dfallsa, file = "SentimentAnalysis.csv")
#write.csv(dfallsa, file = "SentimentAnalysis3.csv")
aitransportsentiment <- dfdate %>%
select(text, id, keyword, date) %>%
unnest_tokens(word, text) %>%
inner_join(bing) %>%
count(keyword, date, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(sentiment = positive - negative)
aitransportsentiment$date <- as.POSIXct(aitransportsentiment$date)
sp <- ggplot(aitransportsentiment, aes(date, sentiment)) +
geom_col(show.legend = FALSE) +
labs(x = "Time (month-year)", y = "Sentiment") +
scale_x_datetime(breaks = date_breaks("3 months"),
labels = date_format("%b-%y", tz=Sys.timezone()),
limits = c(as.POSIXct("2014-07-01"),
as.POSIXct("2019-07-01")) )
sp
#various keywords in one plot
sp + facet_wrap(~ keyword, ncol=2)
#the most frequent users during uber self-driving car accident in 18 March 2018
dfuseracc <- dfdisdate[(dfdisdate$date>="2018-03-18" & dfdisdate$date<="2018-03-20"),]
useracc <- count(dfuseracc, user, sort = TRUE)
useracc
write.csv(useracc, file = "useracc.csv")
#with text
useracc1 <- count(dfuseracc, user, text, sort = TRUE)
useracc1
write.csv(useracc1, file = "useracc1.csv")
#focused trends
# end of June 2016 = tesla
sp <- ggplot(aitransportsentiment, aes(date, sentiment)) +
geom_col(show.legend = FALSE) +
labs(x = "Time (date-month-year)", y = "Sentiment") +
scale_x_datetime(breaks = date_breaks("1 day"),
labels = date_format("%d-%b-%y"),
limits = c(as.POSIXct("2016-06-25"),
as.POSIXct("2016-07-05")) )
sp
# March 2017 = uber
sp <- ggplot(aitransportsentiment, aes(date, sentiment)) +
geom_col(show.legend = FALSE) +
labs(x = "Time (date-month-year)", y = "Sentiment") +
scale_x_datetime(breaks = date_breaks("1 day"),
labels = date_format("%d-%b-%y"),
limits = c(as.POSIXct("2017-03-16"),
as.POSIXct("2017-04-01")) )
sp
#sentiment analysis without selfdriving and driverless
dfwithoutdrive <- dfdate[(dfdate$keyword!="selfdriving" &
dfdate$keyword!="driverless"),]
dfallsa2 <- dfwithoutdrive %>%
select(text, id) %>%
unnest_tokens(word, text) %>%
inner_join(bing) %>%
count(word, sentiment, sort = TRUE)
dfallsa2
write.csv(dfallsa2, file = "SentimentAnalysis2.csv")
aitransportsentiment2 <- dfwithoutdrive %>%
select(text, id, keyword, date) %>%
unnest_tokens(word, text) %>%
inner_join(get_sentiments("bing")) %>%
count(keyword, date, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(sentiment = positive - negative)
aitransportsentiment2$date <- as.POSIXct(aitransportsentiment2$date)
ggplot(aitransportsentiment2, aes(date, sentiment)) +
geom_col(show.legend = FALSE) +
labs(x = "Time (month-year)", y = "Sentiment") +
scale_x_datetime(breaks = date_breaks("3 months"),
labels = date_format("%b-%y", tz=Sys.timezone()),
limits = c(as.POSIXct("2014-07-01"),
as.POSIXct("2019-07-01")) )
#4. Word Cloud ---------------
#general word cloud for all tweets
dfallwc <- dfall %>%
select(text, id) %>%
unnest_tokens(word, text) %>%
anti_join(custom_stop_words) %>%
count(word) %>%
with(wordcloud(word, n, max.words = 100))
#sentiment word cloud for all tweets
dev.off()
dfall %>%
select(text, id) %>%
unnest_tokens(word, text) %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("gray20", "gray80"),
max.words = 100)
|
c9bf1ea73f2be5075681ef66e246a63d859e35ec
|
326537a42f5a3f128fbb26fda2ababa3e2de2576
|
/Rpub/zBLUP/blupTest2.R
|
000c98dc368783cbb1cf1d7a9772a0e77769c56a
|
[] |
no_license
|
gc5k/Notes
|
b3f01c7c89d63de565bd968c99234ab04da51273
|
981d4d6935a446f09fb788e12161288d9727faa6
|
refs/heads/master
| 2022-10-01T08:42:13.174374
| 2022-09-12T03:03:58
| 2022-09-12T03:03:58
| 38,083,965
| 2
| 2
| null | 2016-08-14T07:02:59
| 2015-06-26T01:34:57
| null |
UTF-8
|
R
| false
| false
| 954
|
r
|
blupTest2.R
|
M=200
N=200
ha=0.5
hd=0.3
frq=rep(0.5, M)
G=matrix(rbinom(M*N, 2, frq), N, M)
Gd=matrix(ifelse(G==1, 1, 0), N, M)
a=rnorm(M)
d=rnorm(M)
BVa=G%*%a
BVd=Gd%*%d
Beta=matrix(c(1, 2), 2, 1)
X=matrix(rbinom(2*N, 2, 0.5), N, 2)
vBVa=var(BVa)[1,1]
vBVd=var(BVd)[1,1]
ve=vBVa+vBVd
y=X%*%Beta+BVa+BVd+rnorm(N, 0, sqrt(ve))
#MME
C11=t(X)%*%X
C12=t(X)
C21=X
C13=t(X)
C31=X
C22=diag(1, M)+1.5*diag(1, M)
C33=diag(1, M)+3*diag(1, M)
MME_1=cbind(C11, C12, C13)
MME_2=cbind(C21, C22, diag(1, M))
MME_3=cbind(C31, diag(1, M), C33)
MME_mat=rbind(MME_1, MME_2, MME_3)
MME_y=matrix(c(t(X)%*%y, y, y), 2*M+2, 1)
MME_b=solve(MME_mat)%*%MME_y
plot(BVa, MME_b[3:(M+2),1])
abline(a=0, b=1)
plot(BVd, MME_b[(M+3):(nrow(MME_b)),1])
##GLMM
V=(diag(vBVa, N)+diag(vBVd, N)+diag(ve, N))
VI=solve(V)
bEst=solve(t(X)%*%VI%*%X)%*%t(X)%*%VI%*%y
uA=vBVa*VI%*%(y-X%*%bEst)
uD=vBVd*VI%*%(y-X%*%bEst)
plot(uA, MME_b[3:(M+2),1])
plot(uD, MME_b[(M+3):(nrow(MME_b)),1])
uD2=(vBVd/vBVa)*uA
|
eddf2d09ff65159d464408f9b6072ceba7cb313e
|
8c88a7d5741d18d3fd5d5b3de446f864bbe747d4
|
/rubberduck_things.R
|
f6b1ade4781b85d79d5546438fae264ddd2020e6
|
[] |
no_license
|
JorgensenMart/ISOGP
|
7e764d2b38247e59b39c405395bbe32359d60538
|
43e28bf5c42d2bbb55c18c1c7f572cdd39678625
|
refs/heads/master
| 2023-02-23T19:32:04.129866
| 2021-02-03T13:09:34
| 2021-02-03T13:09:34
| 244,441,443
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 278
|
r
|
rubberduck_things.R
|
devtools::install_github("jlmelville/coil20")
library(coil20)
coil20 <- download_coil20(verbose = TRUE)
rubberduck_ind <- startsWith(rownames(coil20), "1_")
rubberduck <- coil20[rubberduck_ind,]
dist_rubber <- dist(rubberduck)
pca_rubber <- prcomp(x = datmat, center = TRUE)
|
e226f2bb8ba08d56e17ba9c745fd651e109e744c
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/bigMap/man/bdm.mybdm.Rd
|
46a5be3650dfa1c518376238854aa24c51970425
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 425
|
rd
|
bdm.mybdm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bdm_env.R
\name{bdm.mybdm}
\alias{bdm.mybdm}
\title{Set/get default path for \var{mybdm}}
\usage{
bdm.mybdm(path = NULL)
}
\arguments{
\item{path}{Path to \var{mybdm}.}
}
\value{
The current path value to \var{mybdm}
}
\description{
Set/get default path for \var{mybdm}
}
\examples{
# --- set default path for \\var{mybdm}
bdm.mybdm('~/mybdm')
}
|
32523aecbb46ef5a4ce4faac6744f9b5a4bcf4a3
|
29c54143fd2cb1d7c2beb4bc94e5ce2afae1c790
|
/man/geom_recession.Rd
|
fb3fb337b8f599686ebb5e1a49e342a8d04d9164
|
[] |
no_license
|
kbrevoort/kpbtemplates
|
15677c3134d4e237c4780cf4d3b9b54198d64d16
|
9ffdb2e3ac0b2b8755cb67568ce18eed9b6538fc
|
refs/heads/master
| 2022-02-21T03:26:01.168948
| 2022-02-16T03:22:51
| 2022-02-16T03:22:51
| 148,955,662
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 553
|
rd
|
geom_recession.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom-recession.R
\name{geom_recession}
\alias{geom_recession}
\title{NBER Recession Date Geom}
\usage{
geom_recession(
mapping = NULL,
data = NULL,
position = "identity",
na.rm = FALSE,
hjust = 0,
size = 10,
inherit.aes = TRUE,
nudge_y = 0,
nudge_x = 0,
alpha = 0.1,
fill = "gray90",
...
)
}
\description{
Adds shaded areas to a time series that indicate the periods corresponding
to recessions as dated by the National Bureau of Economic Research.
}
|
6ed8e6bd7632258d8af269eeb18ebfda33026a40
|
7b79fe568308f42a8189e9b841d2f7fa269fb9c9
|
/Rscript05.R
|
53641dfc97a4eb635c93a764ef9be7ceb1d6c966
|
[] |
no_license
|
ybk2810/R_workspace
|
6c027738f6ddcf1591fa7a20698f4286ef073d7f
|
3c11abb4b70742f2df4b6b5c43032fd42d3f5026
|
refs/heads/master
| 2020-04-08T23:20:13.187627
| 2018-11-30T12:29:55
| 2018-11-30T12:29:55
| 159,820,651
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,842
|
r
|
Rscript05.R
|
# ๋ฉ๋ก ์์ ๊ฐ์ฌ ์ถ์ถํ๊ธฐ
install.packages("xml2")
install.packages("rvest")
library(xml2)
library(rvest)
url <- "https://music.naver.com/lyric/index.nhn?trackId=22205276"
url
song <- read_html(url)
song
download.file(url,destfile = "song.html", quiet = T)
song <- read_html("song.html")
song
songNode <- html_node(song,"#lyricText")
songNode
Lyrics <- html_text(songNode)
Lyrics
# ---------------------------------------------------------------------
#
kbreport <- read_html("http://www.kbreport.com/player/detail/456")
kbreport
str(kbreport)
# ๊ฒฝ๊ธฐ : #p1 > div > div.scrollable > table > tbody > tr:nth-child(2) > td:nth-child(3)
# ์ ์ฒด ๋
๋ ๊ฒฝ๊ธฐ์
play_cnt_nodes <- html_nodes(kbreport,'#p1 td:nth-child(3)')
play_cnt_nodes
play_cnt <- html_text(play_cnt_nodes)
play_cnt
play_cnt <- as.numeric(play_cnt)
play_cnt
# ์ฐ๋๊ฐ
# #p1 td:nth-child(1)
season_nodes <- html_nodes(kbreport,'#p1 td:nth-child(1)')
season <- html_text(season_nodes)
season
season <- as.numeric(season)
season
df <- data.frame(season, play_cnt)
df
# ๊ฐ๋จํ ์๊ฐํ
plot(df$season,df$play_cnt)
# ops ๊ฐ์ ธ์ค๊ธฐ
ops_nodes <- html_nodes(kbreport,'#p1 td:nth-child(17)')
ops <- html_text(ops_nodes)
ops
ops <- as.numeric(ops)
ops
# df์ ์ถ๊ฐ
df$ops <- ops
df
# ------------------------------------------------------------------------
# ๋ฐ์ดํฐ ๋ง์ด๋
# ๋๋์ ๋ฐ์ดํฐ๋ก ๋ถํฐ ๊ณผ๊ฑฐ์ ์๋ ค์ ธ ์์ง ์์ ์ ์ฉํ ์ ๋ณด๋ฅผ ๋ฐ๊ฒฌํ๋ ๊ธฐ์
install.packages("rJava")
update.packages("rJava")
install.packages(c("KoNLP","tm","wordcloud"))
library(rJava)
library(KoNLP)
library(NLP)
library(tm)
library(RColorBrewer)
library(wordcloud)
useSejongDic()
# ์ฌ์ ์ ์ถ๊ฐํ ๋จ์ด
mergeUserDic(data.frame("๋ํ๋ฏผ๊ตญ","ncn"))
txt <- readLines("815.txt")
txt
nouns <- sapply(txt,extractNoun, USE.NAMES = F)
nouns
str(nouns)
txt2 <- "์๋ฒ์ง๊ฐ ๋ฐฉ์ ๋นจ๋ฆฌ ๋นจ๋ฆฌ ๋ค์ด๊ฐ์ ๋ค"
txt2
# ์ด ํจ์ ํ๋๋ก ์ฌ์ ์ ๋จ์ด์ ๋น๊ตํ์ฌ ๋ช
์ฌ๋ง ์ถ์ถํ๋ค.
extractNoun(txt2)
# ---------------------------------------------------------------------
head(unlist(nouns),20)
# ์์ ๊ฒฐ๊ณผ์์ ์๋ฏธ์๋ ๋จ์ด๋ค์ ๋ถ๋ฅ
nouns <- unlist(nouns)
# 2๊ธ์ ์ด์์ ๋จ์ด๋ง ์ ๋ณ
# ํ ๊ฒ ์ ๋ณ ํด ๋ฑ๋ฑ..
nouns<-gsub("\\d+",'', nouns)
nouns<-gsub("์ํ",'', nouns)
nouns<-gsub(" ",'', nouns)
nouns<-gsub("",'', nouns)
nouns<-gsub("๊ฒ",'', nouns)
nouns<-gsub("๋๋ฌธ",'', nouns)
nouns[nchar(nouns)>=2]
# ํ์ผ์ ์ฅ
write(unlist(nouns),'new815.txt')
data<-read.table("new815.txt")
data
str(data)
nrow(data)
wordcount <- table(data)
wordcount
wordcount <- head(sort(wordcount,decreasing = T),50)
# ์์ํ
library(RColorBrewer)
pal <- brewer.pal(12,"Paired")
wordcloud(names(wordcount),
freq=wordcount,
colors=pal,
min.freq = 3,
rot.per = 0.1,
random.order = F)
# ---------------------------------------------------------------------
# ์ค๋ผํด์ ์ฐ๊ฒฐํ๊ธฐ
# RJDBC
sessionInfo()
Sys.getenv()
install.packages("RJDBC")
library(RJDBC)
# ๋๋ผ์ด๋ฒ ํด๋์ค๋ช
๊ณผ driver ์์น๋ฅผ ์ง์
drv <- JDBC("oracle.jdbc.driver.OracleDriver",
classPath <- "C:/app/acorn/product/11.2.0/dbhome_1/jdbc/lib/ojdbc6.jar")
# \\ ์ผ๋ก ์ฐ๋์ง / ๋ก ์จ์ผ ์ธ์๊ฐ๋ฅ!
url <- "jdbc:oracle:thin:@192.168.0.206:1521:orcl"
username <- "scott"
password <- "tiger"
conn <- dbConnect(drv,url,username,password)
conn
d <- dbReadTable(conn,"DEPT")
d
str(d)
# ๋ค๋ฅธ ํ
์ด๋ธ๋ก ์ ์ฅ
dbWriteTable(conn,"DEPT89",d)
# d ==> db.csv ์ ์ฅ
write.csv(d, file="db.csv", fileEncoding = "UTF-8", row.names = F)
# 10 ๋ฒ
dept10 <- dbGetQuery(conn,"SELECT * FROM dept WHERE DEPTNO = 10")
dept10
str(d)
dbDisconnect(conn)
# ๋ถ์๋ฒํธ๋ณ job๋ณ ํ๊ท ๊ธ์ฌ๋ฅผ ๊ตฌํด
# df : dataFrame ๊ฐ์ฒด๋ฅผ ์์ฑ
# ์์ ์ db์ dfxx๋ผ๋ ํ
์ด๋ธ ์์ฑ
# df.csv ํ์ผ์์ฑ
# ์์์ ๋ฆฌ
drv <- JDBC("oracle.jdbc.driver.OracleDriver",
classPath <- "C:/app/acorn/product/11.2.0/dbhome_1/jdbc/lib/ojdbc6.jar")
url <- "jdbc:oracle:thin:@192.168.0.118:1521:orcl"
url <- "jdbc:oracle:thin:@localhost:1521:orcl"
username <- "scott"
password <- "tiger"
conn <- dbConnect(drv,url,username,password)
conn
emp <- dbReadTable(conn,"EMP")
emp
df <- dbGetQuery(conn, "SELECT DEPTNO, JOB, AVG(SAL) avgsal FROM emp GROUP by deptno, job")
df
str(df)
dbWriteTable(conn,"df89",df)
write.csv(df, file="df.csv", fileEncoding = "UTF-8", row.names = F)
dbDisconnect(conn)
# -------------------------------------------------------------------------------------
# hw1.
# iris
# db์ ์ ์ฅ
# df2 : data.frame
# ๊ฝ์ ๋น๋ : pie
# -------------------------------------------------------------------------------------
library(psych)
head(iris)
drv <- JDBC("oracle.jdbc.driver.OracleDriver",
classPath <- "C:/app/acorn/product/11.2.0/dbhome_1/jdbc/lib/ojdbc6.jar")
url <- "jdbc:oracle:thin:@192.168.0.118:1521:orcl"
url <- "jdbc:oracle:thin:@localhost:1521:orcl"
username <- "scott"
password <- "tiger"
conn <- dbConnect(drv,url,username,password)
conn
df2<-as.data.frame(iris)
names(df2)<-c("SepalLength","SepalWidth","PetalLength","PetalWidth","Species")
dbWriteTable(conn,"df2",df2)
pie(table(df2$Species))
# Mysql db์ ์ฐ๊ฒฐํ๊ธฐ
drv <- JDBC("com.mysql.jdbc.Driver",
"C:/libs/mysql-connector-java-5.1.47-bin.jar")
url <- "jdbc:mysql://192.168.0.206/testdb"
username <- "scott"
password <- "tiger"
conn2 <- dbConnect(drv,url,username,password)
conn2
k <- dbGetQuery(conn2,"SELECT * FROM dept ")
k
#-----------------------------------------------------------------------------------------
# ์ธํฐ๋ท ๊ธฐ์ฌ ํน์ ๋
ธ๋๊ฐ์ฌ๋ฅผ ๊ตฌํด์ data.txt ํ์ผ์ ์์ฑ
# ํ
์คํธ ๋ง์ด๋) ์์ ๋น๋ 20๊ฐ๋ง ๊ตฌํด์ ์๋ ํด๋ผ์ฐ๋๋ฅผ ๊ทธ๋ ค๋ณด์!
mmac <- readLines("mmac.txt")
mmac
nouns2 <- sapply(mmac,extractNoun, USE.NAMES = F)
nouns2
str(nouns2)
extractNoun(mmac)
head(unlist(nouns2),20)
nouns2 <- unlist(nouns2)
nouns2[nchar(nouns2)>=2]
write(unlist(nouns2),'newmmac.txt')
data2<-read.table("newmmac.txt")
data2
str(data2)
nrow(data2)
wordcount2 <- table(data2)
wordcount2
wordcount2 <- head(sort(wordcount2,decreasing = T),20)
library(RColorBrewer)
pal <- brewer.pal(12,"Paired")
wordcloud(names(wordcount2),
freq=wordcount,
colors=pal,
min.freq = 3,
rot.per = 0.1,
random.order = F)
word_df2 <- data.frame(wordcount2)
word_df2
library(dplyr)
word_df2 <- word_df2 %>% arrange(desc(word_df2$Freq))
word_df2
topword2 <- head(word_df2,10)
pie(topword2$Freq, topword2$data2,
col=rainbow(10),radius=1)
# ๋น๋์ ๋ฐฑ๋ถ์จ ์ ์ฉ
ptc2 <- round(topword2$Freq/sum(topword2$Freq)*100,1)
ptc2
# ๋จ์ด๋ ๋ฐฑ๋ถ์จ์ ํ๋๋ก ํฉ์น๋ค
# ์ฐ๋ฆฌ 21.6%
lab <- paste(topword2$data2,"\n",ptc2,"%")
pie(topword2$Freq, col=rainbow(10),
cex=0.8, main="์ด๋์๋ ๊ฐ์ฌ", labels=lab)
# ๋ชจ์๋ด๊ธฐ
par(new=T)
pie(topword2$Freq,
radius = 0.6,
col = "white",
labels = NA, border = NA)
#-----------------------------------------------------------------------------------------
# ์์ 10๊ฐ ํ ํฝ ์ถ์ถ
str(wordcount)
word_df <- data.frame(wordcount)
word_df
# ์ ๋ ฌ
library(dplyr)
word_df <- word_df %>% arrange(desc(word_df$Freq))
word_df
# ํ ???
topword <- head(word_df,11)
topword <- topword[-1,]
topword
pie(topword$Freq, topword$data,
col=rainbow(10),radius=1)
# ๋น๋์ ๋ฐฑ๋ถ์จ ์ ์ฉ
ptc <- round(topword$Freq/sum(topword$Freq)*100,1)
ptc
# ๋จ์ด๋ ๋ฐฑ๋ถ์จ์ ํ๋๋ก ํฉ์น๋ค
# ์ฐ๋ฆฌ 21.6%
lab <- paste(topword$data,"\n",ptc,"%")
pie(topword$Freq, col=rainbow(10),
cex=0.8, main="8.15 ๊ฒฝ์ถ์ฌ", labels=lab)
# ๋ชจ์๋ด๊ธฐ
par(new=T)
pie(topword$Freq,
radius = 0.6,
col = "white",
labels = NA, border = NA)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.