blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
56cd7f3445e947cd437cd7432d714dc197b16c78
|
d85eb408d71b710a1eeef67042020145382d0d00
|
/man/sumRaster.Rd
|
b3ac1cb1304ae4ea1b90b410990798762e213546
|
[] |
no_license
|
jcarlis3/umbrella
|
8e74e3c7e6391e77c9466b6a731176cf9fdafd95
|
62ef91c91ad1b3952c55c373b5741fb36d14c51f
|
refs/heads/master
| 2022-05-22T02:08:16.832137
| 2022-03-25T16:44:05
| 2022-03-25T16:44:05
| 37,620,665
| 1
| 1
| null | 2017-11-08T00:13:39
| 2015-06-17T21:01:40
|
R
|
UTF-8
|
R
| false
| true
| 898
|
rd
|
sumRaster.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sumRaster.R
\name{sumRaster}
\alias{sumRaster}
\title{Sum of raster within polygon}
\usage{
sumRaster(rast, poly)
}
\arguments{
\item{rast}{A raster object of class RasterLayer.}
\item{poly}{A spatial polygon object of class SpatialPolygons.}
}
\value{
Numeric, sum of raster cells within a polygon.
}
\description{
Returns the sum of raster cells within a polygon. When the raster is coded as a typical SDM (where 1 = suitable habitat
and 0 = non-suitable habitat), returns the number of suitable cells within the polygon.
}
\details{
The same process can be done with \code{raster::extract}; however, this
function (uses \code{raster::mask} and \code{raster::cellStats}) performed faster in a local test using large raster objects.
}
\author{
Jason D. Carlisle, University of Wyoming, <jason.d.carlisle@gmail.com>
}
|
199e2e9b6d7664f576ce4f264f89bcce012d5a65
|
acb0fffc554ae76533ba600f04e4628315b1cd95
|
/R/CompilePhytos.R
|
40115ae66628318b974c5f02d12e2aab19ca8eb9
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
lukeloken/USBRDelta
|
83826e12a5b5a2e81adeb2119e9c2599a5f8b870
|
fd6569385776d4579748b6422b5153e64606e0ba
|
refs/heads/master
| 2021-06-09T19:08:01.976985
| 2020-05-28T21:51:10
| 2020-05-28T21:51:10
| 145,152,807
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,508
|
r
|
CompilePhytos.R
|
# Consolidate phyto data
library(readxl)
library(plyr)
library(lubridate)
source('R/read_excel_allsheets.R')
source('R/g_legend.R')
# Project folder where outputs are stored
dropbox_dir<-'C:/Dropbox/USBR Delta Project'
#Where data come from
google_dir<-'C:/GoogleDrive/DeltaNutrientExperiment'
PhytoFiles<-list.files(paste(google_dir, 'Data', 'Phyto', sep='/'))
PhytoFiles<-PhytoFiles[grep('.xls', PhytoFiles)]
KeepNames<-c('STATION', 'SAMPLE', 'GENUS', 'DIVISION', 'TALLY', 'DENSITY', 'TOTAL BV', 'DENSITY (cells/L)', 'NOTES')
File_i=1
Phyto_list<-list()
for (File_i in 1:length(PhytoFiles)){
Phyto_list[[File_i]]<-read_excel(paste(google_dir, 'Data', 'Phyto', PhytoFiles[File_i], sep='/'), skip=1)
PhytoNames<-names(read_excel(paste(google_dir, 'Data', 'Phyto', PhytoFiles[File_i], sep='/'), skip=0))
PhytoNames[which(PhytoNames=="DENSITY (cells/L)")]<-"DENSITY"
names(Phyto_list[[File_i]])<-PhytoNames
Phyto_list[[File_i]]<-Phyto_list[[File_i]][,intersect(KeepNames, PhytoNames)]
}
Phyto_df<-ldply(Phyto_list, data.frame)
head(Phyto_df)
head(Phyto_list[[1]])
Phyto_df$DATE<-as.Date(Phyto_df$SAMPLE)
Phyto_df<-Phyto_df[which(!is.na(Phyto_df$STATION) | !is.na(Phyto_df$DATE)),]
#Zooplankton
ZooFiles<-list.files(paste(google_dir, 'Data', 'Zoops', sep='/'))
ZooFiles<-ZooFiles[grep('.xls', ZooFiles)]
ZooFiles<-ZooFiles[-grep('SSC Zoops Date comparison', ZooFiles)]
ZooKeepNames<-c('bottle ID', 'date', 'genus', 'species', 'division', 'notes', "tow length (m)", "net radius (cm)", "tow volume filtered (L)", "total sample volume (ml)", "aliquot (ml)", "count factor", "#individuals counted", "# / L" , "biomass factor", "species biomass (µg d.w./L)")
File_i=1
Zoo_list<-list()
for (File_i in 1:length(ZooFiles)){
col1<-read_excel(paste(google_dir, 'Data', 'Zoops', ZooFiles[File_i], sep='/'), skip=0)[,1]
headerrow<-which(col1=='bottle ID')
if(length(headerrow)==0){
zoo_i<-read_excel(paste(google_dir, 'Data', 'Zoops', ZooFiles[File_i], sep='/'))
} else if (length(headerrow)>0){
zoo_i<-read_excel(paste(google_dir, 'Data', 'Zoops', ZooFiles[File_i], sep='/'), skip=(headerrow))
}
zoo_i<-zoo_i[,intersect(ZooKeepNames, names(zoo_i))]
zoo_i<-zoo_i[which(!is.na(zoo_i$`bottle ID`) | !is.na(zoo_i$date)),]
zoo_i$date<-as.Date(zoo_i$date, tryFormats=c('%m/%d/%Y'))
# zoo_i$date<-as.Date(zoo_i$date)
Zoo_list[[File_i]]<-zoo_i
}
Zoo_df<-ldply(Zoo_list, data.frame)
Zoo_df$species.biomass..µg.d.w..L.<-as.numeric(Zoo_df$species.biomass..µg.d.w..L.)
Zoo_df$biomass.factor <-as.numeric(Zoo_df$biomass.factor)
#Rename phyto and zooplankton stations
AllStations<-unique(c(Phyto_df$STATION, Zoo_df$bottle.ID))
greps<-c(16,34,44,56,62,64,66,70,74,76,84,'Pro', 'WSP')
names16<-c(16, AllStations[grep('16', AllStations)])
names34<-c(34, AllStations[grep('34', AllStations)])
names44<-c(44, AllStations[grep('44', AllStations)])
names56<-c(56, AllStations[grep('56', AllStations)])
names62<-c(62, AllStations[grep('62', AllStations)])
names64<-c(64, AllStations[grep('64', AllStations)])
names66<-c(66, AllStations[grep('66', AllStations)])
names70<-c(70, AllStations[grep('70', AllStations)])
names74<-c(74, AllStations[grep('74', AllStations)])
names76<-c(76, AllStations[grep('76', AllStations)])
names84<-c(84, AllStations[grep('84', AllStations)])
namesPro <- c("Pro", "Prospect", "Prospect-1/PS" , "PSL", "Prospect/Stair Steps", "Prospect 1", "Prospect 51", "Prospect-1", "Prospect -1", "Prospect AM")
namesWSP<-c("WSP","COE West Sac", "COE Gate/W. Sac", "West Sac Port", "WS-Port", "COE Gate / W. Sac. Port", "W. Sac. Port","West Sac.", "W. Sac PM", "W. Sac AM", "West Sac", "W. Sac", "W.S.P.", "West Sacs", "COE Gate W. Sac Port")
names_list<-list(names16, names34, names44, names56, names62, names64, names66, names70, names74, names76, names84, namesPro, namesWSP)
Phyto_df$STATIONclean<-NA
Zoo_df$STATIONclean<-NA
station<-1
for (station in 1:length(names_list)){
Phyto_df$STATIONclean[which(Phyto_df$STATION %in% names_list[[station]])]<-names_list[[station]][1]
Zoo_df$STATIONclean[which(Zoo_df$bottle.ID %in% names_list[[station]])]<-names_list[[station]][1]
}
head(Phyto_df)
head(Zoo_df)
unique(Phyto_df$STATION[is.na(Phyto_df$STATIONclean)])
unique(Zoo_df$bottle.ID[is.na(Zoo_df$STATIONclean)])
write.csv(Phyto_df, file=paste(dropbox_dir, 'Data', 'Phyto', 'PhytoCountsAll.csv', sep='/'), row.names=F)
write.csv(Zoo_df, file=paste(dropbox_dir, 'Data', 'Zoops', 'ZoopsCountsAll.csv', sep='/'), row.names=F)
|
c859944041971e5783db3b52e10cdccc92f708bb
|
271078c2ead58ca0446d0b05194c70cb1c8dc172
|
/plot4.R
|
3eef07ce8e3158fccf6e395fee6127f0e8e7d43e
|
[] |
no_license
|
pocketni/ExData_Plotting1
|
48bc013ab335d5a86111d938da3e300a264f1fe0
|
3ce60121db1300d94790ec8baeddfc25c08b941c
|
refs/heads/master
| 2021-01-22T18:57:42.333688
| 2015-01-11T17:46:57
| 2015-01-11T17:46:57
| 29,093,856
| 0
| 0
| null | 2015-01-11T14:14:42
| 2015-01-11T14:14:42
| null |
UTF-8
|
R
| false
| false
| 1,162
|
r
|
plot4.R
|
power <- read.csv("household_power_consumption.txt", sep=";", na.strings="?", as.is=c("Date","Time"))
datea <- "1/2/2007"
dateb<-"2/2/2007"
powerseta <- subset(power, Date == datea)
powersetb <-subset(power,Date==dateb)
powerset <- rbind(powerseta,powersetb)
attach(powerset)
####
#problem 4
png(file="plot4.png", width=480,height=480)
par(mfrow=c(2,2))
with(powerset,
{
plot(Global_active_power,type="l", ylab="Global Active Power (kilowatts)", xlab="",xaxt="n")
axis(1,at=c(0,1440,2880),labels=c("Thu","Fri","Sat"))
plot(Voltage,type="l",ylab="Voltage", xlab="datetime",xaxt="n")
axis(1,at=c(0,1440,2880),labels=c("Thu","Fri","Sat"))
plot(Sub_metering_1,type="l", ylab="Energy Sub Metering", xlab="",xaxt="n")
lines(Sub_metering_2,type="l",col="red")
lines(Sub_metering_3,type="l",col="blue")
axis(1,at=c(0,1440,2880),labels=c("Thu","Fri","Sat"))
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),bty="n",lty=c(1,1,1),col=c("black","red","blue"))
plot(Global_reactive_power,type="l",xlab="datetime",xaxt="n",ylim=c(0,0.5))
axis(1,at=c(0,1440,2880),labels=c("Thu","Fri","Sat"))
}
)
dev.off()
|
d1ac3a8cb2d11171026d9415f12394d061eba38f
|
4804e4a4166a33faf98e9ad3df60757d94a0f1d9
|
/R/konfigurujKnitr.R
|
b84cf6d93383f7baf28890393128bd67a3079f15
|
[
"MIT"
] |
permissive
|
zozlak/MLAK
|
958cb673939b684657ff88f141145f038ed2d89a
|
89e88050814b2ff2594669eb38ad198163e13b87
|
refs/heads/master
| 2021-06-01T11:34:57.797493
| 2020-07-09T08:51:11
| 2020-07-09T08:51:11
| 23,737,268
| 2
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 838
|
r
|
konfigurujKnitr.R
|
# funkcja ustawiająca opcje konfiguracyjne knitr-a,
# tak by bardziej odpowiadały naszym potrzebom
#' @import knitr
konfigurujKnitr = function(){
opts_chunk$set(
'error' = FALSE, 'warnings' = FALSE, 'message' = FALSE,
'echo' = FALSE, 'results' = 'asis'
)
if(!is.null(opts_knit$get('rmarkdown.pandoc.to'))){
if(opts_knit$get('rmarkdown.pandoc.to') == 'latex'){
cairo = capabilities()['cairo']
if(cairo %in% TRUE){
opts_chunk$set('dev' = 'cairo_pdf')
}
# Bez tego na Mac-u koniec koncow produkuja sie poprawne wykresy,
# ale najpierw nastepuje litania bledow (tak jakby mimo wskazania
# cairo_pdf() probowal najpierw wyprodukowac wykres za pomoca pdf(),
# a dopiero po bledzie przechodzil do cairo_pdf())
grDevices::pdf.options(encoding = 'CP1250')
}
}
}
|
63e79588523cbf20290fc1d924c1df1572f54c7f
|
034a190b9920e3cb8ee16f62007845c916eb6338
|
/man/cnaPanCO.Rd
|
aae1a4d076846c55f9e972570b6faae80bb99434
|
[] |
no_license
|
KalariRKLab-Mayo/panoply
|
04c67a09fc9e676f6231d6af45d25fe744064505
|
b49c4123d5b3750016bed3c920d542d5aefe1024
|
refs/heads/master
| 2023-08-07T15:16:47.121134
| 2019-08-07T17:52:54
| 2019-08-07T17:52:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 794
|
rd
|
cnaPanCO.Rd
|
\name{cnaPanCO}
\alias{cnaPanCO}
\docType{data}
\title{
DNA CNV (germline) and CNA (tumor) data for the TCGA Colon Cancer Subjects
}
\description{
Per-Gene DNA Copy Number Variation (germline) and Copy Number Alteration
(tumor) data for the TCGA Colon Cancer Subjects
}
\usage{data("cnaPanCO")}
\format{
A data frame with 6 subjects. One non-responder and 5 matched responders
\describe{
\item{\code{CHROM}}{chromosome of gene}
\item{\code{START}}{gene start position}
\item{\code{STOP}}{gene stop position}
\item{\code{Gene.Symbol}}{gene symbol, NCBI}
\item{\code{TCGA-DM-A0XD}}{patient identifier}
}
}
\details{
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\examples{
data(cnaPanCO)
str(cnaPanCO)
}
\keyword{datasets}
|
d6e6b3c60cbdc0fdfcde7d250693846b763abcbb
|
874be2b31a5838cb274dbf959cead0d6285714d2
|
/Suppl_Figure_SAAV_chapter_OmicCircos.R
|
3491816bc6c65bf901d81783417824312219a7e6
|
[
"MIT"
] |
permissive
|
bszeitz/MM_Segundo
|
d234a616f9df65c7898a422efdd89089addd4b71
|
7504491733d60fa61f3ab658dfea4f8910c36769
|
refs/heads/main
| 2023-08-21T23:09:46.039171
| 2023-08-12T12:00:35
| 2023-08-12T12:00:35
| 378,593,315
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,518
|
r
|
Suppl_Figure_SAAV_chapter_OmicCircos.R
|
#### Suppl.Figure for SAAV chapter (OmicCircos plot)
options(stringsAsFactors = F)
library(OmicCircos)
library(readxl)
setwd("C:/Users/User/PhD/MM_segundo/")
excelfile <- "SAAV_chapter_SupplTable_final.xlsx"
Verified.SAAVs <- as.data.frame(read_xlsx(excelfile, sheet=2))
Verified.SAAVs$Name <- paste(Verified.SAAVs$Master.Protein.Accession, Verified.SAAVs$Mutation.in.isoform, sep="_")
## seg.f.mydata: the segment data, which is used to draw the outmost anchor track
## column 1: segment names, 2 and 3: start and end positions, 4: additional info of the segments (optional)
seg.f.mydata <- matrix(nrow=0, ncol=4)
colnames(seg.f.mydata) <- c("No.of.Batches", "Start", "End", "Name")
#no.seq <- c(1,4,7,10,13, 16)
no.seq <- base::seq(1,15,1)
#cat.names <- c("One-Three", "Four-Seven", "Eight-Ten", "Eleven-Thirteen", "Sixteen")
Verified.SAAVs <- Verified.SAAVs[order(Verified.SAAVs$Master.Protein.Accession),]
for (i in 1:length(no.seq)) {
table.sub <- Verified.SAAVs[which( Verified.SAAVs$No.of.Batches == no.seq[i] ),c("Name", "No.of.Batches")]
table.sub <- table.sub[order(table.sub$No.of.Batches),]
table.sub[, "Start"] <- seq(0, nrow(table.sub)-1, 1)
table.sub[, "End"] <- seq(1, nrow(table.sub), 1)
table.sub <- table.sub[,c("No.of.Batches", "Start", "End", "Name")]
seg.f.mydata <- rbind(seg.f.mydata, table.sub)
}
## seg.v.mydata: the mapping data, a data frame, which includes the values to be drawn in the graph
## column 1: segment names, 2: position, 3 and beyond: values and names (optional)
seg.v.mydata <- seg.f.mydata[,c("No.of.Batches", "End", "Name")]
for (i in 1:nrow(Verified.SAAVs)){
Verified.SAAVs$Cosmic.Found[i] <- ifelse(Verified.SAAVs$Cosmic_ID[i] == "Not Found", 0, 10)
peptatlas.match <- sum(as.numeric(strsplit(Verified.SAAVs$No.PeptideAtlas.ExactMatch[i], split=" + ", fixed = T)[[1]])) +
sum(as.numeric(strsplit(Verified.SAAVs$No.PeptideAtlas.PartialMatch[i], split=" + ", fixed = T)[[1]]))
Verified.SAAVs$PeptideAtlas.Found[i] <- ifelse(peptatlas.match == 0, 0, 10)
Verified.SAAVs$cs.IDs[i] <- ifelse(Verified.SAAVs$cs.IDs[i] == "Not Found", 0, 10)
}
Verified.SAAVs.short <- Verified.SAAVs[,c("Name", "SAAV.PSM.sum", "PSM.ratio",
"PeptideAtlas.Found", "cs.IDs")]
seg.v.mydata <- merge(seg.v.mydata, Verified.SAAVs.short, by="Name")
seg.v.mydata <- seg.v.mydata[,c(2,3,1,4:(ncol(seg.v.mydata)))]
seg.v.mydata$cs.IDs <- as.numeric(as.character(seg.v.mydata$cs.IDs))
sapply(seg.v.mydata, class)
colnames(seg.f.mydata) <- c("seg.name", "seg.Start", "seg.End", "the.v")
seg.f.mydata$NO <- NA
seg.num <- length(unique(seg.f.mydata[,1]))
seg.name <- paste("B", 1:seg.num, sep="")
seg.f.mydata[,1] <- paste("B", seg.f.mydata[,1], sep="")
#new
seg.v.mydata <- seg.v.mydata[order(seg.v.mydata$No.of.Batches),]
summary(seg.v.mydata$PSM.ratio)
#colors <- rainbow(seg.num, alpha=0.5)
seg.v.mydata[is.na(seg.v.mydata$PSM.ratio),"PSM.ratio"] <- -0.3
col.ratio <- vector()
for (i in 1:nrow(seg.v.mydata)) {
if (is.na(seg.v.mydata[i,"PSM.ratio"])) { col.ratio[i] <- NA
} else if (seg.v.mydata[i,"PSM.ratio"] >= 0.50) { col.ratio[i] <- "#E31A1C" #red
} else if (seg.v.mydata[i,"PSM.ratio"] >= 0.30) { col.ratio[i] <- "#FF7F00" #orange
} else if (seg.v.mydata[i,"PSM.ratio"] >= 0.002) { col.ratio[i] <- "#1F78B4" #blue
} else { col.ratio[i] <- "grey"}
}
names(col.ratio) <- seg.v.mydata[,3]
seg.v.mydata$No.of.Batches <- paste("B", seg.v.mydata$No.of.Batches, sep="")
seg.v.mydata$SAAV.PSM.sum <- log10(seg.v.mydata$SAAV.PSM.sum)
name.order <- seg.f.mydata$the.v
name.row.nos <- vector()
for (i in 1:nrow(seg.v.mydata)){
name.row.nos <- c(name.row.nos, grep(paste0("\\b", seg.v.mydata$Name[i], "\\b"), name.order))
}
row.names(seg.v.mydata) <- seq(1, nrow(seg.v.mydata), 1)
seg.v.mydata <- seg.v.mydata[name.row.nos,]
all(seg.f.mydata$the.v == seg.v.mydata$Name)
seg.v.mydata.short <- seg.v.mydata[,c("No.of.Batches", "End", "Name", "SAAV.PSM.sum", "PSM.ratio")]
min(seg.v.mydata.short$PSM.ratio)
max(seg.v.mydata.short$PSM.ratio)
seg.v.mydata.short[1,6] <- -0.3
seg.v.mydata.short[1015,6] <- max(seg.v.mydata.short$PSM.ratio)
seg.v.mydata.short[2:1014,6] <- 0.5
min(seg.v.mydata.short$SAAV.PSM.sum)
max(seg.v.mydata.short$SAAV.PSM.sum)
seg.v.mydata.short[1,7] <- min(seg.v.mydata.short$SAAV.PSM.sum)
seg.v.mydata.short[1015,7] <- max(seg.v.mydata.short$SAAV.PSM.sum)
seg.v.mydata.short[2:1014,7] <- log10(2)
# color the batch segments
colors.start <- RColorBrewer::brewer.pal(5, "Greens")
colors <- vector()
colors[1:5] <- colors.start[2]
colors[6:10] <- colors.start[3]
colors[11:15] <- colors.start[5]
# color the PSM sum
colors.start.PSM.sum <- RColorBrewer::brewer.pal(11, "Spectral")
colors.PSM <- vector()
for (i in 1:nrow(seg.v.mydata)) {
if (seg.v.mydata[i,"End"] ==1) {
#colors.PSM[i] <- "grey"
next
}
if (seg.v.mydata[i,"SAAV.PSM.sum"] > log10(20) ) { colors.PSM[i] <- colors.start.PSM.sum[1]
} else if (seg.v.mydata[i,"SAAV.PSM.sum"] > log10(10) ) { colors.PSM[i] <- colors.start.PSM.sum[3]
} else if (seg.v.mydata[i,"SAAV.PSM.sum"] > log10(5)) { colors.PSM[i] <- colors.start.PSM.sum[4]
} else if (seg.v.mydata[i,"SAAV.PSM.sum"] > log10(2)) { colors.PSM[i] <- colors.start.PSM.sum[5]
} else { colors.PSM[i] <- colors.start.PSM.sum[6] }
}
# color the PSM ratios
colors.start.ratio <- RColorBrewer::brewer.pal(9, "Set1")
colors.ratio <- vector()
for (i in 1:nrow(seg.v.mydata)) {
if (seg.v.mydata[i,"End"] ==1) {
#colors.ratio[i] <- "grey"
next
}
if (seg.v.mydata[i,"PSM.ratio"] > 0.50 ) { colors.ratio[i] <- colors.start.ratio[1]
} else if (seg.v.mydata[i,"PSM.ratio"] > 0.30 ) { colors.ratio[i] <- colors.start.ratio[5]
} else if (seg.v.mydata[i,"PSM.ratio"] > 0.002 ) { colors.ratio[i] <- colors.start.ratio[2]
} else { colors.ratio[i] <- "grey" }
}
db <- segAnglePo(seg.f.mydata, seg=seg.name, angle.start = 90, angle.end = 360)
par(mar=c(2,2,2,2))
plot(c(1,800), c(1,800), type="n", axes=FALSE, xlab="", ylab="", main="")
circos(R=400, cir=db, type="chr", col=colors, print.chr.lab=F, W=75, scale = F)
circos(R=300, cir=db, W=100, mapping=seg.v.mydata, col.v=grep("SAAV.PSM.sum", colnames(seg.v.mydata)),
type="h", B=T , col=na.omit(colors.PSM), lwd=0.1, scale=F)
circos(R=180, cir=db, W=100, mapping=seg.v.mydata, col.v=grep("PSM.ratio", colnames(seg.v.mydata)),
type="s", B=T , col=colors.ratio, lwd=1, scale=F, cex=0.6)
circos(R=180, cir=db, W=100, mapping=seg.v.mydata.short, col.v=grep("V6", colnames(seg.v.mydata.short)),
type="lh", B=F , col="black", lwd=1, scale=F)
circos(R=140, cir=db, W=20, mapping=seg.v.mydata, col.v=grep("PeptideAtlas.Found", colnames(seg.v.mydata)),
type="h", col = "#A6761D",lwd=0.05, col.bar=TRUE, cluster=F)
circos(R=120, cir=db, W=20, mapping=seg.v.mydata, col.v=grep("cs.IDs", colnames(seg.v.mydata)),
type="h", col = "#6A3D9A",lwd=0.05, col.bar=TRUE, cluster=F)
all(seg.f.mydata$the.v == seg.v.mydata$Name)
all(seg.f.mydata$seg.End == seg.v.mydata$End)
#### Suppl.Figure for correlation analyis
cor.test(as.numeric(Verified.SAAVs$PSM.ratio),
as.numeric(Verified.SAAVs$Alt.AF.EU),
method = "spearman", use="pairwise.complete.obs")
library(ggplot2)
ggplot(Verified.SAAVs, aes(x=as.numeric(Alt.AF.EU), y=as.numeric(PSM.ratio))) +
geom_point() + theme_bw() + ylab("PSMr") + xlab("AAF")#+
#annotate("text", x = 0.3, y = 0.98,
#label = "Spearman's rank correlation coefficient = 0.75\np-value < 2.2e-16")
|
d1ad43618e527db6e80bd7e5dc4fe2249b22159c
|
724167e44c800d9e8da1f85a8d412ab9d16333c5
|
/man/seq_read_write.Rd
|
f5c3c5140d285036cfdab45b1962e6523f822549
|
[] |
no_license
|
cran/krm
|
4186963ea959c2c747d852dc68ad247ba9bd94af
|
96e5c5c18e6e0d200c568863888587694c71619b
|
refs/heads/master
| 2022-11-07T16:38:22.194183
| 2022-10-18T06:40:11
| 2022-10-18T06:40:11
| 17,696,938
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,762
|
rd
|
seq_read_write.Rd
|
\name{readFastaFile}
\alias{readFastaFile}
\alias{writeFastaFile}
\alias{aa2arabic}
\alias{string2arabic}
\alias{fastaFile2arabicFile}
\alias{selexFile2arabicFile}
\alias{stringList2arabicFile}
\alias{arabic2arabicFile}
\alias{readSelexFile}
\alias{readSelexAsMatrix}
\alias{arabic2fastaFile}
\alias{readArabicFile}
\alias{readBlockFile}
\title{
Read a Fasta Sequence File
}
\description{
Read a Fasta Sequence File
}
\usage{
readFastaFile(fileName, sep = " ")
writeFastaFile (seqList, fileName)
aa2arabic (seq1)
string2arabic (seqList)
fastaFile2arabicFile (fastaFile, arabicFile, removeGapMajor=FALSE)
selexFile2arabicFile (selexFile, arabicFile, removeGapMajor=FALSE)
stringList2arabicFile (seqList, arabicFile, removeGapMajor=FALSE)
arabic2arabicFile (alignment, arabicFile)
readSelexFile (fileName)
readSelexAsMatrix (fileName)
arabic2fastaFile (alignment, fileName)
readArabicFile (fileName)
readBlockFile (fileName)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{fileName}{string}
\item{fastaFile}{string}
\item{arabicFile}{string}
\item{selexFile}{string}
\item{sep}{string}
\item{seq1}{string. A string of amino acids}
\item{seqList}{list of string.}
\item{removeGapMajor}{Boolean}
\item{alignment}{matrix of arabic representation of sequences (1 based)}
}
\value{
string2arabic returns a matrix of arabic numbers representing aa.
readSelexFile return a list of strings.
readArabicFile return a matrix of n by p alignment.
}
\examples{
library(RUnit)
fileName=paste(system.file(package="krm")[1],'/misc/SETpfamseed_aligned_for_testing.fasta', sep="")
seqs = readFastaFile (fileName, sep=" ")
checkEquals(length(seqs),11)
}
|
7a55b443b33d3960221f575bdbd02b5430362d1b
|
c86e9c80957dffc72d9facd1614376c3d0c9d322
|
/LexisNexis/Review/LNDataReview-Set-2019-12-03-CourtCaseTypeOpinion-B.r
|
052175d0dc426124c2479a3937f42e1ac01dc593
|
[] |
no_license
|
tbalmat/Duke-Law
|
ca4ae8c10235a3fa72b2c14a945dba712de4f8e2
|
be82053c63070f67fb88d961842a26ea6c6de3f1
|
refs/heads/master
| 2022-10-06T19:28:37.270716
| 2020-06-05T16:37:38
| 2020-06-05T16:37:38
| 261,865,591
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 63,572
|
r
|
LNDataReview-Set-2019-12-03-CourtCaseTypeOpinion-B.r
|
# Duke University Law Appeals Analysis
# Review of 2019-12-03 LexisNexis Data
# Followup issues to report, resulting from 2019-04-16 team meeting
options(max.print=1000) # number of elements, not rows
options(stringsAsFactors=F)
options(scipen=999999)
options(device="windows")
library(ggplot2)
#library(xtable)
library(DBI)
library(RMySQL)
#######################################################################################
# Set directories
#######################################################################################
setwd("C:\\Projects\\Duke\\Law\\LexisNexisCaseAnalysis\\MySQL\\Review\\2020-04-20")
lnsourcedir1 <- "C:\\Projects\\Duke\\Law\\LexisNexisCaseAnalysis\\LexisNexisData-2019-03-13"
lnsourcedir2 <- "C:\\Projects\\Duke\\Law\\LexisNexisCaseAnalysis\\LexisNexisData-2019-12-03"
imgdir <- "C:\\Projects\\Duke\\Law\\LexisNexisCaseAnalysis\\MySQL\\Review\\2020-04-20"
#######################################################################################
# Connect to Appeals databases
# db1 for data set 1, March 2019
# db2 for data set 2, December 2019
#######################################################################################
usr <- "tjb48"
#dbDisconnect(db1)
#dbDisconnect(db2)
db1 <- dbConnect(MySQL(), host="127.0.0.1", port=3306, dbname="Appeals", user=usr, password=rstudioapi::askForPassword("Password: "))
db2 <- dbConnect(MySQL(), host="127.0.0.1", port=3306, dbname="Appeals2", user=usr, password=rstudioapi::askForPassword("Password: "))
#######################################################################################
# List table structures
#######################################################################################
dbGetQuery(db1, "show tables")
dbGetQuery(db2, "show tables")
dbGetQuery(db1, "describe CaseHeader")
dbGetQuery(db2, "describe CaseHeader")
dbGetQuery(db2, "describe CaseHeaderExt")
dbGetQuery(db1, "describe CaseType")
dbGetQuery(db2, "describe CaseType")
dbGetQuery(db1, "describe Court")
dbGetQuery(db2, "describe Court")
dbGetQuery(db1, "describe CaseOutcomeType")
dbGetQuery(db2, "describe CaseOutcomeType")
dbGetQuery(db1, "describe Opinion")
dbGetQuery(db2, "describe Opinion")
dbGetQuery(db2, "select distinct opiniontype from Opinion")
dbGetQuery(db2, "select * from Court")
dbGetQuery(db2, "describe CaseLegalTopics")
dbGetQuery(db2, "select distinct pubstatus from CaseHeader")
######################################################################################
# Table and field examination
#######################################################################################
# Verify one-one case header extension existence
dbGetQuery(db2, "select count(1) from CaseHeader where lni not in(select lni from CaseHeaderExt)")
# Evaluate char length of null (returns null)
dbGetQuery(db2, "select character_length(ifnull(null, ''))")
#######################################################################################
# Plot the distributions of opinion text length for 4th and 11th circuits
#######################################################################################
# Accumulate text lengths by opinion type
# Note that each case header case has a corresponding extension record
x <- dbGetQuery(db2, "select c.ShortName as court,
year(a.DecisionDate) as year,
character_length(ifnull(b.OpinionByText, '')) as no,
character_length(ifnull(b.ConcurByText, '')) as nc,
character_length(ifnull(b.DissentByText, '')) as nd
from CaseHeader a join CaseHeaderExt b on a.lni=b.lni
join Court c on a.CourtID=c.ID
where c.ShortName in('4th Circuit Court of Appeals',
'11th Circuit Court of Appeals')")
# Abbreviate court names
unique(x[,"court"])
x[,"court"] <- sub("Circuit Court of Appeals", "Circuit", x[,"court"])
#x[,"court"] <- factor(x[,"court"], levels=c("4th Circuit", "11th Circuit"))
# Plot annual panels of distributions by court
ct <- c("4th Circuit", "11th Circuit")[2]
if(ct=="4th Circuit") {
f <- paste(imgdir, "\\images\\Dist-4th-OpText-Length.png", sep="")
} else {
f <- paste(imgdir, "\\images\\Dist-11th-OpText-Length.png", sep="")
}
# Filter by court and text length
nlim <- 50
ko <- which(x[,"court"]==ct & x[,"no"]>0 & x[,"no"]<=nlim)
kc <- which(x[,"court"]==ct & x[,"nc"]>0 & x[,"nc"]<=nlim)
kd <- which(x[,"court"]==ct & x[,"nd"]>0 & x[,"nd"]<=nlim)
nbins <- c(30)[1]
gdat <- rbind(data.frame("type"="opinion", "year"=x[ko,"year"], "n"=x[ko, "no"]),
data.frame("type"="concur", "year"=x[kc,"year"], "n"=x[kc, "nc"]),
data.frame("type"="dissent", "year"=x[kd,"year"], "n"=x[kd, "nd"]))
gdat[,"type"] <- factor(gdat[,"type"], levels=c("dissent", "concur", "opinion"))
png(f, res=300, width=2400, height=2400)
ggplot() +
geom_histogram(data=gdat, aes(x=n, fill=type, group=type), alpha=1, position="stack", bins=nbins) +
scale_fill_manual(values=c("opinion"="green", "concur"="blue", "dissent"="red")) +
#scale_x_continuous(breaks=log(seq(10, 100, 10))/log(10), labels=seq(10, 100, 10)) +
#scale_y_continuous(labels=function(x) format(x, big.mark=",")) +
scale_y_log10(labels=function(x) format(x, big.mark=",")) +
facet_wrap(~year) +
theme(plot.title=element_text(size=12, hjust=0.5),
plot.subtitle=element_text(size=10, hjust=0.5),
plot.caption=element_text(size=10, hjust=0.5),
panel.background=element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.major.y=element_blank(),
panel.grid.minor=element_blank(),
panel.border=element_rect(fill=NA, color="gray75"),
#panel.spacing=unit(-0.2, "lines"),
axis.title.x=element_text(size=12),
axis.title.y=element_text(size=12),
axis.text.x=element_text(size=10, angle=90, hjust=0, vjust=0.5),
axis.text.y=element_text(size=10),
#axis.ticks=element_blank(),
strip.text=element_text(size=8),
strip.background=element_blank(),
legend.position="bottom",
legend.background=element_rect(color=NA),
legend.key=element_rect(fill="white"),
legend.box="horizontal",
legend.text=element_text(size=10),
legend.title=element_text(size=10)) +
labs(x="\ntext length", y="number of cases\n")
dev.off()
# Review opinion bys of length between 5 and 15 chars in years 1990-1994
y <- dbGetQuery(db2, "select c.ShortName as court,
year(a.DecisionDate) as year,
b.OpinionByText,
character_length(ifnull(b.OpinionByText, '')) as no
from CaseHeader a join CaseHeaderExt b on a.lni=b.lni
join Court c on a.CourtID=c.ID
where c.ShortName in('4th Circuit Court of Appeals',
'11th Circuit Court of Appeals')
and year(a.DecisionDate) between 1990 and 1994
and character_length(ifnull(b.OpinionByText, '')) between 5 and 15")
# Plot proportion of opinion-by text fields containing the exact text "per curiam"
# Panel by court and year
z <- dbGetQuery(db2, "select c.ShortName as court, year(a.DecisionDate) as year,
sum(case when(lower(b.OpinionByText)='per curiam')then 1 else 0 end)*1./count(1) as p
from CaseHeader a join CaseHeaderExt b on a.lni=b.lni
join Court c on a.CourtID=c.ID
group by c.ShortName, year(a.DecisionDate)")
# Abbreviate names
unique(x[,"court"])
z[,"court"] <- sub("Circuit Court of Appeals", "Circ", z[,"court"])
z[,"court"] <- sub("Circuit Bankruptcy Appellate Panel", "Circ Bkruptcy", z[,"court"])
z[,"court"] <- sub("Court of Federal Claims", "Fed Claims", z[,"court"])
z[,"court"] <- sub("Circuit Bankruptcy Appellate Panel", "Circ Bkruptcy", z[,"court"])
z[,"court"] <- sub("Judicial Conference, Committee on Judicial Conduct", "Jud Conduct", z[,"court"])
z[,"court"] <- sub("Temporary Emergency Court of Appeals", "Temp Emergency", z[,"court"])
z[,"court"] <- sub("Tennessee Eastern District Court", "Tenn E Dist", z[,"court"])
z[,"court"] <- sub("Texas Southern District Court", "Tex S Dist", z[,"court"])
sort(unique(z[,"court"]))
z[,"court"] <- factor(z[,"court"], levels=c("", "1st Circ", "2nd Circ", "3rd Circ", "4th Circ", "5th Circ",
"6th Circ", "6th Circ Bkruptcy", "7th Circ", "8th Circ",
"9th Circ", "10th Circ", "11th Circ", "DC Circ", "Fed Claims",
"Federal Circ", "Jud Conduct", "Temp Emergency", "Tenn E Dist",
"Tex S Dist"))
png(paste(imgdir, "\\images\\Proportion-Per-Curiam-Opinions-By-Author.png", sep=""), res=300, width=2400, height=2400)
ggplot() +
#geom_rect(data=data.frame(xmin=1990, xmax=1994, ymin=0, ymax=1),
# aes(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax), color="gray85", fill=NA) +
geom_vline(xintercept=1990, color="gray75", linetype="dashed") +
geom_vline(xintercept=1994, color="gray75", linetype="dashed") +
geom_line(data=z, aes(x=year, y=p)) +
scale_x_continuous(breaks=seq(1974, 2018, 4)) +
facet_wrap(~court) +
theme(plot.title=element_text(size=12, hjust=0.5),
plot.subtitle=element_text(size=10, hjust=0.5),
plot.caption=element_text(size=10, hjust=0.5),
panel.background=element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.major.y=element_blank(),
panel.grid.minor=element_blank(),
panel.border=element_rect(fill=NA, color="gray75"),
#panel.spacing=unit(-0.2, "lines"),
axis.title.x=element_text(size=12),
axis.title.y=element_text(size=12),
axis.text.x=element_text(size=10, angle=90, hjust=0, vjust=0.5),
axis.text.y=element_text(size=10),
#axis.ticks=element_blank(),
strip.text=element_text(size=8),
strip.background=element_blank(),
legend.position="bottom",
legend.background=element_rect(color=NA),
legend.key=element_rect(fill="white"),
legend.box="horizontal",
legend.text=element_text(size=10),
legend.title=element_text(size=10)) +
labs(x="\nyear", y="proportion \"per curiam\" opinions\n")
dev.off()
# Compare proportion per curiam cases with that of cases containing "per curiam" in opinion author field
z <- dbGetQuery(db2, "select c.ShortName as court, year(a.DecisionDate) as year,
sum(case when(lower(b.OpinionByText)='per curiam')then 1 else 0 end)*1./count(1) as p1,
sum(case when(a.PerCuriam=1)then 1 else 0 end)*1./count(1) as p2
from CaseHeader a join CaseHeaderExt b on a.lni=b.lni
join Court c on a.CourtID=c.ID
group by c.ShortName, year(a.DecisionDate)")
# Abbreviate names
unique(z[,"court"])
z[,"court"] <- sub("Circuit Court of Appeals", "Circ", z[,"court"])
z[,"court"] <- sub("Circuit Bankruptcy Appellate Panel", "Circ Bkruptcy", z[,"court"])
z[,"court"] <- sub("Court of Federal Claims", "Fed Claims", z[,"court"])
z[,"court"] <- sub("Circuit Bankruptcy Appellate Panel", "Circ Bkruptcy", z[,"court"])
z[,"court"] <- sub("Judicial Conference, Committee on Judicial Conduct", "Jud Conduct", z[,"court"])
z[,"court"] <- sub("Temporary Emergency Court of Appeals", "Temp Emergency", z[,"court"])
z[,"court"] <- sub("Tennessee Eastern District Court", "Tenn E Dist", z[,"court"])
z[,"court"] <- sub("Texas Southern District Court", "Tex S Dist", z[,"court"])
sort(unique(z[,"court"]))
z[,"court"] <- factor(z[,"court"], levels=c("", "1st Circ", "2nd Circ", "3rd Circ", "4th Circ", "5th Circ",
"6th Circ", "6th Circ Bkruptcy", "7th Circ", "8th Circ",
"9th Circ", "10th Circ", "11th Circ", "DC Circ", "Fed Claims",
"Federal Circ", "Jud Conduct", "Temp Emergency", "Tenn E Dist",
"Tex S Dist"))
png(paste(imgdir, "\\images\\Proportion-Per-Curiam-Opinions-By-PerCuriam.png", sep=""), res=300, width=2400, height=2400)
ggplot() +
geom_vline(xintercept=1990, color="gray75", linetype="dashed") +
geom_vline(xintercept=1994, color="gray75", linetype="dashed") +
geom_line(data=z, aes(x=year, y=p1, linetype="author")) +
geom_line(data=z, aes(x=year, y=p2, linetype="per-curiam-indicator")) +
scale_linetype_manual(name="method", values=c("author"="solid", "per-curiam-indicator"="dashed")) +
scale_x_continuous(breaks=seq(1974, 2018, 4)) +
facet_wrap(~court) +
theme(plot.title=element_text(size=12, hjust=0.5),
plot.subtitle=element_text(size=10, hjust=0.5),
plot.caption=element_text(size=10, hjust=0.5),
panel.background=element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.major.y=element_blank(),
panel.grid.minor=element_blank(),
panel.border=element_rect(fill=NA, color="gray75"),
#panel.spacing=unit(-0.2, "lines"),
axis.title.x=element_text(size=12),
axis.title.y=element_text(size=12),
axis.text.x=element_text(size=10, angle=90, hjust=0, vjust=0.5),
axis.text.y=element_text(size=10),
#axis.ticks=element_blank(),
strip.text=element_text(size=8),
strip.background=element_blank(),
legend.position="bottom",
legend.background=element_rect(color=NA),
legend.key=element_rect(fill="white"),
legend.box="horizontal",
legend.text=element_text(size=10),
legend.title=element_text(size=10)) +
labs(x="\nyear", y="proportion \"per curiam\" opinions\n")
dev.off()
#######################################################################################
# Distribution of proportion cases with outcome type of "other" by court and year
#######################################################################################
# Verify that each case has an outcome type record
dbGetQuery(db2, "select count(1) from CaseHeader where lni not in(select lni from CaseOutcomeType)")
# Inspect outcome type values
dbGetQuery(db2, "select distinct outcometype from CaseOutcomeType")
# Test for null outcome text
dbGetQuery(db2, "select count(1) from CaseHeader where outcome is null")
# Compute proportion of "other" cases and proportion "other" cases with empty outcome fields
x <- dbGetQuery(db2, "select c.ShortName as court, year(a.DecisionDate) as year,
sum(case when(b.outcometype='other')then 1 else 0 end)*1./count(1) as p1,
sum(case when(a.outcome is null and b.outcometype='other')then 1 else 0 end)*1./
sum(case when(b.outcometype='other')then 1 else 0 end) as p2,
count(1)*1./d.n as p3
from CaseHeader a join CaseOutcomeType b on a.lni=b.lni
join Court c on a.CourtID=c.ID
join( select year(decisiondate) as year, count(1) as n
from CaseHeader
group by year(decisiondate)
) d on year(a.decisiondate)=d.year
group by c.ShortName, year(a.DecisionDate)")
# Abbreviate names
unique(x[,"court"])
x[,"court"] <- sub("Circuit Court of Appeals", "Circ", x[,"court"])
x[,"court"] <- sub("Circuit Bankruptcy Appellate Panel", "Circ Bkruptcy", x[,"court"])
x[,"court"] <- sub("Court of Federal Claims", "Fed Claims", x[,"court"])
x[,"court"] <- sub("Circuit Bankruptcy Appellate Panel", "Circ Bkruptcy", x[,"court"])
x[,"court"] <- sub("Judicial Conference, Committee on Judicial Conduct", "Jud Conduct", x[,"court"])
x[,"court"] <- sub("Temporary Emergency Court of Appeals", "Temp Emergency", x[,"court"])
x[,"court"] <- sub("Tennessee Eastern District Court", "Tenn E Dist", x[,"court"])
x[,"court"] <- sub("Texas Southern District Court", "Tex S Dist", x[,"court"])
sort(unique(x[,"court"]))
x[,"court"] <- factor(x[,"court"], levels=c("", "1st Circ", "2nd Circ", "3rd Circ", "4th Circ", "5th Circ",
"6th Circ", "6th Circ Bkruptcy", "7th Circ", "8th Circ",
"9th Circ", "10th Circ", "11th Circ", "DC Circ", "Fed Claims",
"Federal Circ", "Jud Conduct", "Temp Emergency", "Tenn E Dist",
"Tex S Dist"))
if(T) {
x[,"y1"] <- x[,"p1"]
x[,"y2"] <- x[,"p2"]
# The space in " proportion-total...." is intentional - it places that category first in the legend
ltlab <- c(" proportion-total-cases-other", "proportion-other-cases-empty")
png(paste(imgdir, "\\images\\Proportion-Outcome-Other-Empty-Text.png", sep=""), res=300, width=2400, height=2400)
} else {
x[,"y1"] <- 1-x[,"p1"]
x[,"y2"] <- 1-x[,"p2"]
# The space in " proportion-total...." is intentional - it places that category first in the legend
ltlab <- c(" proportion-total-cases-non-other", "proportion-other-cases-non-empty")
png(paste(imgdir, "\\images\\Proportion-Outcome-Other-Empty-Text-Neg.png", sep=""), res=300, width=2400, height=2400)
}
ggplot() +
geom_line(data=x, aes(x=year, y=y1, linetype=ltlab[1])) +
geom_line(data=x, aes(x=year, y=y2, linetype=ltlab[2])) +
geom_bar(data=x, aes(x=year, y=p3), stat="identity", color="blue3", fill=NA) +
scale_linetype_manual(name="", values=setNames(c("solid", "dashed"), ltlab)) +
scale_x_continuous(breaks=seq(1974, 2018, 4)) +
facet_wrap(~court) +
theme(plot.title=element_text(size=12, hjust=0.5),
plot.subtitle=element_text(size=10, hjust=0.5),
plot.caption=element_text(size=10, hjust=0.5),
panel.background=element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.major.y=element_blank(),
panel.grid.minor=element_blank(),
panel.border=element_rect(fill=NA, color="gray75"),
#panel.spacing=unit(-0.2, "lines"),
axis.title.x=element_text(size=12),
axis.title.y=element_text(size=12),
axis.text.x=element_text(size=10, angle=90, hjust=0, vjust=0.5),
axis.text.y=element_text(size=10),
#axis.ticks=element_blank(),
strip.text=element_text(size=8),
strip.background=element_blank(),
legend.position="bottom",
legend.background=element_rect(color=NA),
legend.key=element_rect(fill="white"),
legend.box="horizontal",
legend.text=element_text(size=10),
legend.title=element_text(size=10)) +
labs(x="\nyear", y="proportion\n")
dev.off()
# Randomly sample outcome text
x <- dbGetQuery(db2, "select b.ShortName as court, year(a.decisiondate) as year, a.outcome
from CaseHeader a join Court b on a.CourtID=b.ID
join CaseOutcomeType c on a.lni=c.lni
where c.outcometype='other'")
x[,"court"] <- sub("Circuit Court of Appeals", "Circ", x[,"court"])
x[,"court"] <- sub("Circuit Bankruptcy Appellate Panel", "Circ Bkruptcy", x[,"court"])
x[,"court"] <- sub("Court of Federal Claims", "Fed Claims", x[,"court"])
x[,"court"] <- sub("Circuit Bankruptcy Appellate Panel", "Circ Bkruptcy", x[,"court"])
x[,"court"] <- sub("Judicial Conference, Committee on Judicial Conduct", "Jud Conduct", x[,"court"])
x[,"court"] <- sub("Temporary Emergency Court of Appeals", "Temp Emergency", x[,"court"])
x[,"court"] <- sub("Tennessee Eastern District Court", "Tenn E Dist", x[,"court"])
x[,"court"] <- sub("Texas Southern District Court", "Tex S Dist", x[,"court"])
# Text containing "dismiss"
k <- sample(grep("dismiss", x[,"outcome"]), 50, replace=F)
k <- k[order(x[k,"year"])]
writeLines(paste(x[k,"court"], " & ", x[k,"year"], " & ", gsub("\\$", "\\\\$", x[k,"outcome"]), "\\\\[4pt]", sep=""))
# 1993-2000, text does not contain "dismiss"
k <- sample(intersect(which(x[,"year"]>1992 & x[,"year"]<2001 & !is.na(x[,"outcome"])),
grep("dismiss", x[,"outcome"], invert=T)),
100, replace=F)
k <- k[order(x[k,"year"])]
writeLines(paste(x[k,"court"], " & ", x[k,"year"], " & ", gsub("\\$", "\\\\$", x[k,"outcome"]), "\\\\[4pt]", sep=""))
#######################################################################################
# Identify duplicate records for cases with alternative spellings of titles in their names
#######################################################################################
# Sample short, long, and LN case titles
x <- dbGetQuery(db2, "select casetitleshort, casetitlelong, casetitlelexisnexis from CaseHeader")
# Render a sample of titles in Latex
a <- ""
for(i in sample(1:nrow(x), 10, replace=F))
a <- c(a,
paste(gsub("&", "\\&", gsub("$", "\\$", x[i,"casetitleshort"], fixed=T), fixed=T), " & ",
gsub("&", "\\&", gsub("$", "\\$", x[i,"casetitlelong"], fixed=T), fixed=T), " & ",
gsub("&", "\\&", gsub("$", "\\$", x[i,"casetitlelexisnexis"], fixed=T), fixed=T), " &\\\\", sep=""),
"& & &\\\\[-4pt]")
writeLines(a)
# Retrieve case data
x <- dbGetQuery(db2, "select a.lni, a.casetitleshort, year(a.decisiondate) as year, b.shortname as court
from CaseHeader a join Court b on a.courtid=b.id")
# Compose abbreviation substitutions
# Note that the following "words" should be surrounded by spaces when searching text for accurate delimiting
tsub <- matrix(c(
"u.s.", "united states",
"usa ", "united states",
"us", "united states",
"NLRB", "national labor relations board",
"e.e.o.c.", "equal employment opportunity commission",
"o.w.c.p.", "office of workers' compensation programs",
"sec. dep't of", "secretary department of",
"sec., dep't of", "secretary, department of",
"social sec.", "social security",
"employment sec.", "employment security",
"nat'l comm.", "national committee",
"aclu", "american civil liberties union",
"afscme", "american federation of state, county and municipal employees",
"batf", "bureau of alcohol tobacco and firearms",
"cia", "central intelligence agency",
"faa", "federal aviation administration",
"fbi", "federal bureau of investigation",
"fdic", "federal deposit insurance corporation",
"fha", "federal housing authority",
"frb", "federal reserve board",
"hud", "housing and urban development",
"ins", "immigration and naturalization service",
"irs", "internal revenue service",
"naacp", "national association for the advancement of colored people",
"nra", "national rifle association",
"nrc", "nuclear regulatory commission",
"nrdc", "natural resources defense council",
"ntsb", "national transportation safety council",
"nyse", "new york stock exchange",
"omb", "office of management and budget",
"opm", "office of personnel management",
"osha", "occupational safety and health administration",
"pbs", "public broadcasting service",
"sba", "small business administration",
"ssa", "social security administration",
"stb", "surface transportation board",
"uaw", "united auto workers",
"ufcw", "united food and commercial workers",
"ufw", "united farm workers",
"ups", "united parcel service",
"usda", "united states department of agriculture",
"usps", "united states postal service",
" va ", "united states department of veterans affairs",
"ag's", "attorney general's",
"admin'r", "administrator",
"adm'r", "administrator",
"ass'n", "association",
"assn's", "associations",
"att'y", "attorney",
"atty's", "attorneys",
"c'mmr", "commissioner",
"comm'n", "commission",
"comm'r", "commissioner",
"com'n", "commission",
#"comn'n", "commonwealth",
"com'r", "commissioner",
"commr's", "commissioners",
"comn'r", "commissioner",
"comr's", "commissioners",
"cont'l", "continental",
"da's", "district attorney's",
"dep't", "department",
"enf't", "enforcement",
"emplr's'.", "employers'",
"emples'.", "employees'",
"emples.'", "employees'",
"eng'g", "engineering",
"eng'r", "engineer",
"entm't", "entertainment",
"env't", "environment",
"exam'r", "examiner",
"ex'r", "examiner",
"examr's", "examiner's",
"fed'n", "federation",
"fla.'s", "florida's",
"gen'l", "general",
"gen's", "general's",
"gen.'s", "general's",
"gov't", "government",
"govn't", "government",
"indp't", "independent",
"inter'l", "international",
"int'l", "international",
"intern'l", "international",
"intern'l.", "international",
"intrn'l", "international",
"inv'rs", "investors",
"mem'l", "memorial",
"mem'l.", "memorial",
"mfr.'s", "manufacturer's",
"na'l", "national",
"nat'l", "national",
"nt'l", "national",
"p'ship", "partnership",
"p'shp", "partnership",
"p'shp.", "partnership",
"prof'l", "professional",
"publ'g", "publishing",
"publ'n", "publishing",
"publ'ns.", "publications",
"publ'ns", "publications",
"publ'rs", "publishers",
"reg'l", "regional",
"sec't", "secretary",
"sec'y", "secretary",
"s'holders", "shareholders",
"sup'r", "supervisor",
"soc'y", "society",
"acc.", "accident",
"acci.", "accident",
"admin.", "administration",
"adver.", "advertizing",
"agric.", "agriculture",
"ala.", "alabama",
"am.", "american",
"appt.", "apartment",
"ariz.", "arizona",
"ark.", "arkansas",
"assn.", "association",
"asso.", "association",
"assoc.", "association",
"assocs.", "associations",
"assur.", "assurance",
"atty.", "attorney",
"attys.", "attorneys",
"auth.", "authority",
"auto.", "automotive",
"ave.", "avenue",
"balt.", "baltimore",
"bankr.", "bankruptcy",
"bhd.", "brotherhood",
"bldg.", "building",
"bldgs.", "buildings",
"bros.", "brothers",
"broth.", "brothers",
"bus.", "business",
"cal.", "california",
"chem.", "chemical",
"chems.", "chemicals",
"chgo.", "chicago",
"chi.", "chicago",
"civ.", "civil",
"cmty.", "community",
"cnty.", "county",
"co.", "company",
"cos.", "companies",
"colo.", "colorado",
"com.", "commission",
"commer.", "commercial",
"commn.", "commission",
"commun.", "communication",
"communs.", "communications",
"comp.", "compensation",
"condo.", "condominium",
"conn.", "connecticut",
"consol.", "consolidated",
"const.", "construction",
"constr.", "construction",
"contr.", "contractor",
"contrs.", "contractors",
"coop.", "cooperative",
"coops.", "cooperatives",
"corp.", "corporation",
"corr.", "correction",
"crim.", "criminal",
"ctr.", "center",
"ctrs.", "centers",
"cty.", "city",
"def.", "defense",
"del.", "delaware",
"dept.", "department",
"dev.", "development",
"det.", "detention",
"dir.", "director",
"disc.", "discipline",
"discrim.", "discrimination",
"dist.", "district",
"distrib.", "distribution",
"distribs.", "distributors",
"div.", "division",
"econ.", "economic",
"educ.", "education",
"elec.", "electric",
"elecs.", "electronics",
"emples.", "employees",
"emplr.", "employer",
"emplrs.", "employers",
"enter.", "enterprise",
"enters.", "enterprises",
"envtl.", "environmental",
"equal.", "equality",
"equip.", "equipment",
"exch.", "exchange",
"exec.", "executive",
"exp.", "export",
"fed.", "federal",
"fedn.", "federation",
"fid.", "fidelity",
"fin.", "finance",
"fla.", "florida",
"found.", "foundation",
"ga.", "georgia",
"gen.", "general",
"grp.", "group",
"guar.", "guarantee",
"hon.", "honorable",
"hosp.", "hospital",
"hosps.", "hospitals",
"hous.", "houston",
"ill.", "illinois",
"imp.", "import",
"imps.", "importers",
"inc.", "incorporated",
"indem.", "indemnity",
"indus.", "industry",
"info.", "information",
"ins.", "insurance",
"inst.", "institute",
"intern.", "international",
"intl.", "international",
"inv.", "investment",
"invest.", "investment",
"invs.", "investments",
"kan.", "kansas",
"ky.", "kentucky",
"la.", "lousiana",
"lab.", "laboratory",
"labs.", "laboratories",
"liab.", "liability",
"litig.", "litigation",
"ltd.", "limited",
"mach.", "machine",
"maint.", "maintenance",
"md.", "maryland",
"me.", "maine",
"mech.", "mechanical",
"med.", "medical",
"mem.", "memorial",
"merch.", "merchant",
"metro.", "metropolitan",
"mfg.", "manufacturing",
"mfrs.", "manufacturers",
"mgmt.", "management",
"mich.", "michigan",
"minn.", "minnesota",
"miss.", "mississippi",
"mkt.", "market",
"mktg.", "marketing",
"mkts.", "markets",
"mo.", "missouri",
"mont.", "montana",
"mortg.", "mortgage",
"mr.", "mister",
"mun.", "municipal",
"mut.", "mutual",
"n.c.", "north carolina",
"n.h.", "new hampshire",
"n.j.", "new jersey",
"n.m.", "new mexico",
"n.y.", "new york",
"natl.", "national",
"nev.", "nevada",
"no.", "number",
"new eng.", "new england",
"ofc.", "office",
"off.", "office",
"okla.", "oklahoma",
"or.", "oregon",
"org.", "organization",
"pa.", "pennsylvania",
"pac.", "pacific",
"par.", "parish",
"pers.", "personnel",
"pharm.", "pharmaceutical",
"pharms.", "pharmaceuticals",
"phila.", "philadelphia",
"reprod.", "reproductive",
"prod.", "product",
"prods.", "products",
"prop.", "property",
"props.", "properties",
"prot.", "protection",
"pshp.", "partnership",
"pub.", "public",
"publ.", "publishing",
"publs.", "publishers",
"r.i.", "rhode island",
"rd.", "road",
"rds.", "roads",
"rec.", "recreation",
"rehab.", "rehabilitation",
"rels.", "relations",
#"res.", "resources",
"rest.", "restaurant",
"rests.", "restaurants",
"ret.", "retirement",
"rev.", "revenue",
"ry.", "railway",
"s.c.", "south carolina",
"s.d.", "south dakota",
"sch.", "school",
"schs.", "schools",
"soc. sec.", "social secutity",
"homeland sec.", "homeland security",
"sec. for", "secretary for",
"sec. of", "secretary of",
"serv.", "service",
"servs.", "services",
"std.", "standard",
"sys.", "system",
"tel.", "telephone",
"tenn.", "tennessee",
"tex.", "texas",
"transp.", "transportation",
"twp.", "township",
"univ.", "university",
"va.", "virginia",
"wash.", "washington"
),
ncol=2, byrow=T)
# Compose Latex table with wrapped columns of text substitution pairs
ncol <- 2
a <- ""
for(i in seq(1, nrow(tsub), ncol)) {
j <- min(i+ncol-1, nrow(tsub))
b <- paste(tsub[i:j,1], " & ", tsub[i:j,2], sep="")
a <- c(a,
paste(paste(b, collapse=" & & ", sep=""), " &\\\\", sep=""),
paste(paste(rep(" & ", 3*ncol-1), collapse="", sep=""), "\\\\[-6pt]", sep=""))
}
writeLines(a)
# Identify words in case titles that contain a special character(s) (apostrophe, period), but are not in the tsub vector
schar <- c("'", "\\.")[2]
y <- sort(unique(
unlist(lapply(
# Extract case titles that do not contain words in tsub
x[-unique(
unlist(
lapply(1:nrow(tsub),
function(i)
grep(tsub[i,1],
gsub(" v. ", "", tolower(x[,"casetitleshort"])), fixed=T)
))),"casetitleshort"],
function(a) {
# Identify words containing an special character(s)
b <- strsplit(a, " ")[[1]]
b[grep(schar, b)]
}))
))
# Examine words with specified leading character(s)
writeLines(y[which(tolower(substring(y, 1, 1))=="s")])
# Examine specific strings
x[grep("b.a.s.i.c.", tolower(x[,"casetitleshort"]), fixed=T),]
x[setdiff(setdiff(setdiff(setdiff(
grep("sec\\.", tolower(x[,"casetitleshort"])),
grep("soc\\. sec\\.", tolower(x[,"casetitleshort"]))),
grep("homeland sec\\.", tolower(x[,"casetitleshort"]))),
grep("sec\\. for", tolower(x[,"casetitleshort"]))),
grep("sec\\. of", tolower(x[,"casetitleshort"]))),]
# Examine upper case words in titles
z <- sort(unique(unlist(
lapply(x[,"casetitleshort"],
function(a) {
lapply(strsplit(a, " ")[[1]],
function(b)
if(nchar(b)==length(grep("[A-Z]", strsplit(b, "")[[1]]))) {
return(b)
} else {
return(NULL)
})
}))))
writeLines(z[which(tolower(substring(z, 1, 1))=="v")])
####
# Convert title text to lower case
# Surround with spaces so that leading and trailing words are delimited on each side
# Replace commas, colons, and semicolons with a single space (to avoid, for instance, "int'l,")
# Omit repeated spaces
####
ttl <- tolower(
# Surround with spaces first so that leading and trailing spaces become repeats to be collapsed
# Convert punctuation symbols to a space then convert repeating spaces
# Note that the comma is a control character within [], so escape it
# \s+ locates repeated whitespace, tabs, new line, cr, vert tab
gsub("\\s+", " ",
gsub("[\\,;:]", " ",
paste(" ", x[,"casetitleshort"], " ", sep=""))))
# Verify absence of punctuation and repeated spaces
grep(",", ttl)
grep(";", ttl)
grep(":", ttl)
ttl[grep(" ", ttl)]
# Substitute text in titles
# Include delimiting spaces so that "words" are isolated
for(i in 1:nrow(tsub))
ttl <- gsub(paste(" ", tsub[i,1], " ", sep=""), paste(" ", tsub[i,2], " ", sep=""), ttl, fixed=T)
gc()
# Omit surrounding spaces in substituted titles
which(substring(ttl, 1, 1) != " ")
which(substring(ttl, nchar(ttl), nchar(ttl)) != " ")
ttl <- substring(ttl, 2, nchar(ttl)-1)
# Compare initial and text-substituted titles
w <- cbind(x[,"casetitleshort"], ttl, "")
# Upload text-substituted titles to database
dbGetQuery(db2, "create table CaseAltTitle(LNI varchar(50) primary key, AltTitle varchar(500))")
dbGetQuery(db2, "truncate table CaseAltTitle")
nt <- 10000
for(i in seq(1, nrow(x), nt)) {
k <- i:min(i+nt-1, nrow(x))
query <- paste("insert into CaseAltTitle(lni, alttitle) values(",
paste(paste("'", x[k,"lni"], "', '", gsub("'", "''", ttl[k], fixed=T), "'", sep=""),
collapse="), (", sep=""),
")", sep="")
dbGetQuery(db2, query)
}
gc()
# Verify case counts
dbGetQuery(db2, "select count(1) from CaseHeader")
dbGetQuery(db2, "select count(1) from CaseAltTitle")
dbGetQuery(db2, "select count(1) from CaseHeader where lni not in(select lni from CaseAltTitle)")
# Enumerate distinct case titles, courts and dates
dbGetQuery(db2, "select count(distinct courtid, decisiondate, casetitleshort) from CaseHeader")
dbGetQuery(db2, "select count(distinct a.courtid, a.decisiondate, b.alttitle) from CaseHeader a join CaseAltTitle b on a.lni=b.lni")
# Verify absence of double spaces in substituted titles
dbGetQuery(db2, "select alttitle from CaseAltTitle where alttitle like '% %'")
# Identify cases with identical court and decision date, unequal titles, but equal alternate titles
w0 <- dbGetQuery(db2, "select a.lni
from CaseHeader a join CaseHeader b on a.courtid=b.courtid and a.decisiondate=b.decisiondate
join Court c on a.courtid=c.id
join CaseAltTitle d on a.lni=d.lni
join CaseAltTitle e on b.lni=e.lni
where a.casetitleshort<>b.casetitleshort
and d.AltTitle=e.AltTitle")
# Tabulate case title duplication by court and year, using original title
z <- dbGetQuery(db2, "select a.lni, a.courtid, b.shortname as court, a.decisiondate, a.casetitleshort, c.n
from CaseHeader a join Court b on a.courtid=b.id
join ( select min(lni) as lni, count(1) as n
from CaseHeader
group by courtid, decisiondate, casetitleshort
having count(1)>1
) c on a.lni=c.lni")
table(z[,"n"])
# Tabulate case title duplication
z2 <- dbGetQuery(db2, "select a.lni, a.courtid, b.shortname as court, a.decisiondate, a.casetitleshort, c.n
from CaseHeader a join Court b on a.courtid=b.id
join ( select min(a.lni) as lni, count(1) as n
from CaseHeader a join CaseAltTitle b on a.lni=b.lni
group by a.courtid, a.decisiondate, b.alttitle
having count(1)>1
) c on a.lni=c.lni")
table(z2[,"n"])
# Render Latex table to compare distribution of case duplication frequency
z3 <- merge(data.frame(table(z[,"n"])), data.frame(table(z2[,"n"])), by="Var1", all=T)
z3 <- z3[order(as.integer(z3[,"Var1"])),]
writeLines(paste(z3[,1], " & ", format(z3[,2], big.mark=","), " & ", format(z3[,3], big.mark=","), "\\\\", sep=""))
# Abbreviate court names
z2[,"court"] <- sub("Circuit Court of Appeals", "Circ", z2[,"court"])
z2[,"court"] <- sub("Circuit Bankruptcy Appellate Panel", "Circ Bkruptcy", z2[,"court"])
z2[,"court"] <- sub("Court of Federal Claims", "Fed Claims", z2[,"court"])
z2[,"court"] <- sub("Circuit Bankruptcy Appellate Panel", "Circ Bkruptcy", z2[,"court"])
z2[,"court"] <- sub("Judicial Conference, Committee on Judicial Conduct", "Jud Conduct", z2[,"court"])
z2[,"court"] <- sub("Temporary Emergency Court of Appeals", "Temp Emergency", z2[,"court"])
z2[,"court"] <- sub("Tennessee Eastern District Court", "Tenn E Dist", z2[,"court"])
z2[,"court"] <- sub("Texas Southern District Court", "Tex S Dist", z2[,"court"])
z2[,"court"] <- factor(z2[,"court"], levels=c("", "1st Circ", "2nd Circ", "3rd Circ", "4th Circ", "5th Circ",
"6th Circ", "6th Circ Bkruptcy", "7th Circ", "8th Circ",
"9th Circ", "10th Circ", "11th Circ", "DC Circ", "Fed Claims",
"Federal Circ", "Jud Conduct", "Temp Emergency", "Tenn E Dist",
"Tex S Dist"))
# Generate heat map indicating duplicate case frequencies by year and court
# Tabulate duplicate cases by court and year
gdat <- aggregate(z2[,"n"], by=list(z2[,"court"], substring(z2[,"decisiondate"], 1, 4)), sum)
colnames(gdat) <- c("court", "year", "n")
png(paste(imgdir, "\\images\\CaseTitleDuplicateCourtYearHeatMap.png", sep=""), res=300, width=2400, height=2400)
ggplot() +
geom_tile(data=gdat, aes(x=court, y=year, fill=n)) +
scale_fill_gradient(name="short case name duplicates ", limits=c(0, 8000), low="#0000b0", high="yellow",
labels=function(x) format(x, big.mark=",")) +
scale_y_discrete(breaks=seq(1974, 2018, 4)) +
theme(plot.title=element_text(size=12, hjust=0.5),
plot.subtitle=element_text(size=10, hjust=0.5),
plot.caption=element_text(size=10, hjust=0.5),
panel.background=element_rect(fill="#0000b0"),
panel.grid.major.x=element_blank(),
panel.grid.major.y=element_blank(),
panel.grid.minor=element_blank(),
panel.border=element_rect(fill=NA, color="gray75"),
#panel.spacing=unit(-0.2, "lines"),
axis.title.x=element_text(size=12),
axis.title.y=element_text(size=12),
axis.text.x=element_text(size=10, angle=90, hjust=0, vjust=0.5),
axis.text.y=element_text(size=10),
#axis.ticks=element_blank(),
strip.text=element_text(size=8),
strip.background=element_blank(),
legend.position="bottom",
legend.background=element_rect(color=NA),
legend.key=element_rect(fill="white"),
legend.box="horizontal",
legend.text=element_text(size=10, angle=90, hjust=0.5, vjust=0.5),
legend.title=element_text(size=10)) +
labs(x="\ncourt", y="year\n")
dev.off()
# List cases with identical titles after text substitution
k <- which(z2[,"court"]=="4th Circ" & substring(z2[,"decisiondate"], 1, 4)=="1996" & !z2[,"lni"] %in% z[,"lni"])
k <- k[order(z2[k,"decisiondate"], z2[k,"casetitleshort"])]
w <- z2[k,c("decisiondate", "casetitleshort")]
rownames(w) <- NULL
# Inspect long names and LN names for selected courts and dates (with duplicated short case names)
k2 <-1
w2 <- dbGetQuery(db2, paste(" select casetitleshort, casetitlelong, casetitlelexisnexis
from CaseHeader
where courtid=", z2[k[k2],"courtid"], " and decisiondate='", z2[k[k2],"decisiondate"], "'",
" order by casetitleshort", sep=""))
write.table(w2, "TitleEval\\CaseTitle-4th-1996.csv", row.names=F, col.names=T, sep=", ", quote=T)
# Identify cases with duplicated name and differing values in outcome, per curiam, pubstatus, or authors
x <- dbGetQuery(db2, "select min(a.lni) as lni, a.courtid, c.shortname as court, a.decisiondate, d.alttitle,
count(distinct a.outcome) as noutcome,
count(distinct a.percuriam) as npercuriam,
count(distinct a.pubstatus) as npubstatus,
count(distinct b.paneltext) as npanel,
count(distinct b.opinionbytext) as nopinion,
count(distinct b.concurbytext) as nconcur,
count(distinct b.dissentbytext) as ndissent,
count(1) as ncase
from CaseHeader a join CaseHeaderExt b on a.lni=b.lni
join Court c on a.courtid=c.id
join CaseAltTitle d on a.lni=d.lni
group by a.courtid, c.shortname, a.decisiondate, d.AltTitle")
# Enumerate cases
sum(x[,"ncase"])
# Enumerate distinct alternate titles
nrow(x)
# Enumerate alternate titles with multiple cases (court and date), but single outcomes, per curiam, pub status, and authors
k <- which(x[,"ncase"]>1 & x[,"noutcome"]<2 & x[,"npercuriam"]<2 & x[,"npubstatus"]<2 & x[,"npanel"]<2 & x[,"nopinion"]<2 & x[,"nconcur"]<2 & x[,"ndissent"]<2)
length(k)
# Enumerate alternate titles with multiple cases and different outcome, ... values
k <- which(x[,"noutcome"]>1 | x[,"npercuriam"]>1 | x[,"npubstatus"]>1 | x[,"npanel"]>1 | x[,"nopinion"]>1 | x[,"nconcur"]>1 | x[,"ndissent"]>1)
length(k)
w <- x[k,]
rownames(w) <- NULL
# Tabulate frequency of differences by variable
table(w[,"court"])
aggregate(1:nrow(x), by=list(x[,"court"]), function(k) length(which(x[k,"noutcome"]>1))/length(k))
table(substring(w[,"decisiondate"], 1, 4))
aggregate(1:nrow(x), by=list(substring(x[,"decisiondate"], 1, 4)), function(k) length(which(x[k,"noutcome"]>1))/length(k))
table(x[,"ncase"])
table(x[,"noutcome"])
sum(table(x[which(x[,"ncase"]>3),"noutcome"]))
table(x[,"npercuriam"])
table(x[,"npubstatus"])
table(x[,"npanel"])
table(x[,"nopinion"])
table(x[,"nconcur"])
table(x[,"ndissent"])
# Render Latex table of n values by variable
# Limit to duplicated cases
y <- data.frame("n"=integer())
for(v in c("noutcome", "npanel", "nopinion", "nconcur", "ndissent", "npercuriam", "npubstatus"))
y <- merge(y, setNames(data.frame(table(x[which(x[,"ncase"]>1),v])), c("n", v)), by="n", all=T)
for(j in 1:ncol(y))
y[which(is.na(y[,j])),j] <- 0
y <- y[order(y[,"n"]),]
a <- paste(paste(colnames(y), collapse=" & ", sep=""), "\\\\", sep="")
for(i in 1:nrow(y))
a <- c(a, paste(paste(format(y[i,], big.mark=","), collapse=" & ", sep=""), "\\\\", sep=""))
writeLines(a)
# Inspect cases with differences in comparison fields
j <- which(x[,"ncase"]>1 & x[,"nopinion"]>1 & x[,"nconcur"]>1 & x[,"ndissent"]>1)
k <- j[2]
y <- dbGetQuery(db2, paste("select a.courtid, a.decisiondate, a.casetitlelexisnexis, a.outcome,
b.paneltext, b.opinionbytext, b.concurbytext, b.dissentbytext
from CaseHeader a join CaseHeaderExt b on a.lni=b.lni
join CaseAltTitle c on a.lni=c.lni
where c.alttitle='", gsub("'", "''", x[k[1],"alttitle"], fixed=T), "' and a.courtid=", x[k[1],"courtid"], " and a.decisiondate='", x[k[1],"decisiondate"], "'
order by a.courtid, a.decisiondate, a.casetitlelexisnexis", sep=""))
y[,"outcome"]
y[,"paneltext"]
dbGetQuery(db2, "select casetitleshort, outcome from CaseHeader where courtid=1 and decisiondate='1979-07-18' order by casetitleshort")
w <- dbGetQuery(db2, "select * from CaseHeader where courtid=1 and decisiondate='1979-09-05' order by casetitleshort")
write.table(w, "TitleEval\\CaseTitle-9th-1975-09-05.csv", row.names=F, col.names=T, sep=",", quote=T)
# Enumerate alternate titles with multiple cases, outcomes, and panels
k <- which(x[,"ncase"]>1 & x[,"noutcome"]>1 & x[,"npanel"]>1)
length(k)
# Enumerate alternate titles with multiple cases, outcomes, and opinion authors
k <- which(x[,"ncase"]>1 & x[,"noutcome"]>1 & x[,"nopinion"]>1)
length(k)
# Enumerate alternate titles with multiple cases, outcomes, and concurring authors
k <- which(x[,"ncase"]>1 & x[,"noutcome"]>1 & x[,"nconcur"]>1)
length(k)
# Enumerate alternate titles with multiple cases, outcomes, and dissenting authors
k <- which(x[,"ncase"]>1 & x[,"noutcome"]>1 & x[,"ndissent"]>1)
length(k)
# Sample short, long, and LN titles for cases with duplicated short titles
# Render in Latex
k <- which(x[,"ncase"]>2 & x[,"ncase"]<6)
length(k)
a <- ""
for(i in sample(k, 10, replace=F)) {
w2 <- dbGetQuery(db2, paste("select a.casetitleshort, a.casetitlelong, a.casetitlelexisnexis
from CaseHeader a join CaseAltTitle b on a.lni=b.lni
where b.alttitle='", gsub("'", "''", x[i,"alttitle"], fixed=T), "'
and a.courtid=", x[i,"courtid"], "
and a.decisiondate='", x[i,"decisiondate"], "'", sep=""))
for(j in 1:nrow(w2))
a <- c(a,
paste(gsub("&", "\\&", gsub("$", "\\$", w2[j,"casetitleshort"], fixed=T), fixed=T), " & ",
gsub("&", "\\&", gsub("$", "\\$", w2[j,"casetitlelong"], fixed=T), fixed=T), " & ",
gsub("&", "\\&", gsub("$", "\\$", w2[j,"casetitlelexisnexis"], fixed=T), fixed=T), " &\\\\", sep=""),
"& & &\\\\[-4pt]")
a <- c(a, "\\hline\\\\[-4pt]")
}
writeLines(a)
# Examine individual cases for demonstration
w <- dbGetQuery(db2, "select a.courtid, a.decisiondate, a.casetitlelexisnexis, a.outcome,
b.paneltext, b.opinionbytext, b.concurbytext, b.dissentbytext
from CaseHeader a join CaseHeaderExt b on a.lni=b.lni
join CaseAltTitle c on a.lni=c.lni
where c.alttitle='guam v. ibanez' and a.decisiondate='1993-04-13'
or c.alttitle='norman v. lynaugh' and a.decisiondate='1988-07-05'
or c.alttitle='united states v. tolliver' and a.decisiondate='1995-10-19'
or c.alttitle like 'in re Southwest Restaurant Systems%' and a.decisiondate='1979-09-05'
or c.alttitle like 'Xrutherford v. bd pardon%' and a.decisiondate='2003-04-23'
or c.alttitle like 'r.e. serv%'
order by a.courtid, a.decisiondate, a.casetitlelexisnexis")
#######################################################################################
# Reproduce figure 3 using unique cases
#######################################################################################
# Identify cases with duplicated name and differing values in outcome, per curiam, pubstatus, or authors
x <- dbGetQuery(db2, "select min(a.lni) as lni, a.courtid, c.shortname as court, a.decisiondate, d.alttitle,
count(distinct a.outcome) as noutcome,
count(distinct a.percuriam) as npercuriam,
count(distinct a.pubstatus) as npubstatus,
count(distinct b.paneltext) as npanel,
count(distinct b.opinionbytext) as nopinion,
count(distinct b.concurbytext) as nconcur,
count(distinct b.dissentbytext) as ndissent,
count(1) as ncase
from CaseHeader a join CaseHeaderExt b on a.lni=b.lni
join Court c on a.courtid=c.id
join CaseAltTitle d on a.lni=d.lni
group by a.courtid, c.shortname, a.decisiondate, d.AltTitle")
# Inspect cases with differences in author fields
j <- which(x[,"nopinion"]>1 & (x[,"nconcur"]>1 | x[,"ndissent"]>1))
y <- apply(as.matrix(j), 1,
function(k) dbGetQuery(db2,
paste("select a.courtid, a.decisiondate, a.casetitlelexisnexis, a.outcome,
b.paneltext, b.opinionbytext, b.concurbytext, b.dissentbytext
from CaseHeader a join CaseHeaderExt b on a.lni=b.lni
join CaseAltTitle c on a.lni=c.lni
where c.alttitle='", gsub("'", "''", x[k,"alttitle"], fixed=T), "'
and a.courtid=", x[k,"courtid"], "
and a.decisiondate='", x[k,"decisiondate"], "'
order by a.courtid, a.decisiondate, a.casetitlelexisnexis", sep="")))
# Render Latex table containing select cases
a <- ""
for(i in 1:length(y)) {
for(j in 1:nrow(y[[i]]))
a <- c(a, paste("Title: & ", gsub("&", "\\&", y[[i]][j,"casetitlelexisnexis"], fixed=T), "\\\\[2pt]", sep=""),
paste("Outcome: & ", gsub("&", "\\&", y[[i]][j,"outcome"], fixed=T), "\\\\[2pt]", sep=""),
paste("Panel: & ", y[[i]][j,"paneltext"], "\\\\[2pt]", sep=""),
paste("Op. by: & ", y[[i]][j,"opinionbytext"], "\\\\[2pt]", sep=""),
paste("Conc. by: & ", y[[i]][j,"concurbytext"], "\\\\[2pt]", sep=""),
paste("Diss. by: & ", y[[i]][j,"dissentbytext"], "\\\\[2pt]", sep=""),
ifelse(j<nrow(y[[i]]), "\\arrayrulecolor{gray}\\hline\\\\[-4pt]", ""))
a <- c(a, "\\arrayrulecolor{black}\\hline\\\\[-4pt]")
}
writeLines(a)
####
# Distribution of opinion, concurring, and dissenting authors by year and court
####
# Distribution of cases by author combination, both data sets
x <- dbGetQuery(db2, "select b.ShortName as court, year(a.DecisionDate) as year,
concat(case when(c.o=1)then 'o' else '-' end,
concat(case when(c.c=1)then 'c' else '-' end,
case when(c.d=1)then 'd' else '-' end)) as pattern,
count(1) as n
from CaseHeader a join Court b on a.CourtID=b.ID
join ( select min(a.lni) as lni,
max(case when(OpinionType='Opinion' and char_length(JudgeID)>0)then 1 else 0 end) as o,
max(case when(OpinionType='Concur' and char_length(JudgeID)>0)then 1 else 0 end) as c,
max(case when(OpinionType='Dissent' and char_length(JudgeID)>0)then 1 else 0 end) as d
from CaseHeader a join Opinion b on a.lni=b.lni
join CaseAltTitle c on a.lni=c.lni
group by a.courtid, a.decisiondate, c.alttitle
) c on a.lni=c.lni
group by b.ShortName, year(a.DecisionDate),
concat(case when(c.o=1)then 'o' else '-' end,
concat(case when(c.c=1)then 'c' else '-' end,
case when(c.d=1)then 'd' else '-' end))")
# Abbreviate court names
unique(x[,"court"])
x[,"court"] <- sub("Circuit Court of Appeals", "Circ", x[,"court"])
x[,"court"] <- sub("Circuit Bankruptcy Appellate Panel", "Circ Bkruptcy", x[,"court"])
x[,"court"] <- sub("Court of Federal Claims", "Fed Claims", x[,"court"])
x[,"court"] <- sub("Circuit Bankruptcy Appellate Panel", "Circ Bkruptcy", x[,"court"])
x[,"court"] <- sub("Judicial Conference, Committee on Judicial Conduct", "Jud Conduct", x[,"court"])
x[,"court"] <- sub("Temporary Emergency Court of Appeals", "Temp Emergency", x[,"court"])
x[,"court"] <- sub("Tennessee Eastern District Court", "Tenn E Dist", x[,"court"])
x[,"court"] <- sub("Texas Southern District Court", "Tex S Dist", x[,"court"])
sort(unique(x[,"court"]))
x[,"court"] <- factor(x[,"court"], levels=c("", "1st Circ", "2nd Circ", "3rd Circ", "4th Circ", "5th Circ",
"6th Circ", "6th Circ Bkruptcy", "7th Circ", "8th Circ",
"9th Circ", "10th Circ", "11th Circ", "DC Circ", "Fed Claims",
"Federal Circ", "Jud Conduct", "Temp Emergency", "Tenn E Dist",
"Tex S Dist"))
# Plot
png(paste(imgdir, "\\images\\Fig3-Dups-Omitted.png", sep=""), res=300, width=2400, height=2400)
ggplot() +
geom_bar(data=x, aes(x=year-0.25, y=n, fill=pattern), position="stack", stat="identity") +
scale_fill_manual(name="o=opinion, c=concur, d=dissent",
values=c("ocd"="#984EA3", "oc-"="#F781BF", "o-d"="#4DAF4A", "o--"="#E41A1C",
"-cd"="#FF7F00", "-c-"="#FFFF33", "--d"="#A65628", "---"="#377EB8")) +
#geom_vline(data=data.frame("x"=seq(1973.5, 2018.5, 1)), aes(xintercept=x), color="gray85", size=0.1) +
scale_x_continuous(breaks=seq(1974, 2018, 4)) +
scale_y_continuous(labels=function(x) format(x, big.mark=",")) +
facet_wrap(~court) +
theme(plot.title=element_text(size=12, hjust=0.5),
plot.subtitle=element_text(size=10, hjust=0.5),
plot.caption=element_text(size=10, hjust=0.5),
panel.background=element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.major.y=element_blank(),
panel.grid.minor=element_blank(),
panel.border=element_rect(fill=NA, color="gray75"),
#panel.spacing=unit(-0.2, "lines"),
axis.title.x=element_text(size=12),
axis.title.y=element_text(size=12),
axis.text.x=element_text(size=10, angle=90, hjust=0, vjust=0.5),
axis.text.y=element_text(size=10),
#axis.ticks=element_blank(),
strip.text=element_text(size=8),
strip.background=element_blank(),
legend.position="bottom",
legend.background=element_rect(color=NA),
legend.key=element_rect(fill="white"),
legend.box="horizontal",
legend.text=element_text(size=10),
legend.title=element_text(size=10)) +
labs(x="\nyear", y="cases\n")
dev.off()
# Compute average outcome length for duplicated cases
# Distribution of cases by author combination, both data sets
x <- dbGetQuery(db2, "select a.outcome
from CaseHeader a join CaseAltTitle b on a.lni=b.lni
join ( select a.courtid, a.decisiondate, b.alttitle
from CaseHeader a join CaseAltTitle b on a.lni=b.lni
group by a.courtid, a.decisiondate, b.alttitle
having count(1)>1
) c on a.courtid=c.courtid and a.decisiondate=c.decisiondate and b.alttitle=c.alttitle")
length(which(is.na(x[,"outcome"])))/nrow(x)
#######################################################################################
# Sample cases with largest difference in number of legal topics between data sets A and B
#######################################################################################
x <- dbGetQuery(db1, "select a.lni, b.lni, ifnull(t1.n, 0) as n1, ifnull(t2.n, 0) as n2,
c.shortname as court, a.decisiondate, a.casetitleshort
from CaseHeader a left join Appeals2.CaseHeader b on a.lni=b.lni
left join ( select lni, count(1) as n
from CaseLegalTopics
group by lni
) t1 on a.lni=t1.lni
left join ( select lni, count(1) as n
from Appeals2.CaseLegalTopics
group by lni
) t2 on a.lni=t2.lni
join Court c on a.courtid=c.id
where ifnull(t1.n, 0)<>ifnull(t2.n, 0)
order by abs(ifnull(t1.n, 0)-ifnull(t2.n, 0)) desc")
x[,"court"] <- gsub(" Court of Appeals", "", x[,"court"], fixed=T)
a <- ""
for(i in 1:100)
a <- c(a,
paste(paste(gsub("&", "\\&", x[i,"casetitleshort"], fixed=T), " & ", x[i,"court"], " & ",
x[i,"decisiondate"], " & ", x[i,"n1"], " & ", x[i,"n2"], sep=""),
"\\\\", sep=""),
"& & & &\\\\[-6pt]")
writeLines(a)
# Verify with instructions from legal topics script
x2 <- dbGetQuery(db1, "select LNI, count(1) from CaseLegalTopics group by LNI")
y2 <- dbGetQuery(db2, "select LNI, count(1) from CaseLegalTopics group by LNI")
# Merge March and December counts by case
# Retain cases that do not appear in the alternate dat set
z2 <- merge(x2, y2, by="LNI", all=T)
colnames(z2) <- c("LNI", "n1", "n2")
# Convert counts to 0 for cases missing in one data set
z2[which(is.na(z2[,"n1"])),"n1"] <- 0
z2[which(is.na(z2[,"n2"])),"n2"] <- 0
# Compute the difference in counts, between data sets, by case
z2[,"nDiff"] <- z2[,"n2"]-z2[,"n1"]
# Inspect maximum frequencies
max(z2[,"n1"])
max(z2[,"n2"])
lni <- c('4895-3050-0038-X013-00000-00', '3TRW-C880-0038-X28B-00000-00')[2]
dbGetQuery(db1, paste("select * from CaseHeader where lni='", lni, "'", sep=""))
dbGetQuery(db2, paste("select * from CaseHeader where lni='", lni, "'", sep=""))
#######################################################################################
# Sample 4th or 11th circuit cases with each author combination in "spike' period of 1990-1994
#######################################################################################
dbGetQuery(db2, "select * from Opinion limit 20")
ocd <- c("ocd", "oc-", "o-d", "o--", "-cd", "-c-", "--d", "---")
csn <- c("4th Circuit Court of Appeals", "11th Circuit Court of Appeals")[2]
x <- lapply(ocd,
function(p) {
authpattern <- strsplit(p, "")[[1]]
print(authpattern)
dbGetQuery(db2, paste("select a.decisiondate, a.casetitleshort,
ifnull(opo.judgeid, '') as opo,
ifnull(opc.judgeid, '') as opc,
ifnull(opd.judgeid, '') as opd
from CaseHeader a join Court c on a.courtid=c.id
left join Opinion opo on a.lni=opo.lni and opo.opiniontype='opinion'
left join Opinion opc on a.lni=opc.lni and opc.opiniontype='concur'
left join Opinion opd on a.lni=opd.lni and opd.opiniontype='dissent'
where year(a.decisiondate) between 1990 and 1994
and c.shortname='", csn, "'
and ifnull(opo.judgeid, '')", ifelse("o" %in% authpattern, "<>", "="), "''
and ifnull(opc.judgeid, '')", ifelse("c" %in% authpattern, "<>", "="), "''
and ifnull(opd.judgeid, '')", ifelse("d" %in% authpattern, "<>", "="), "''", sep=""))
})
# Render Latex table
ns <- c(20, 20, 20, 100, 10, 10, 10, 20)
a <- ""
i0 <- 4
#i0 <- c(1, 2, 3, 8)
for(i in i0) {
k <- sample(1:nrow(x[[i]]), ns[i], replace=F)
k <- k[order(x[[i]][k,"decisiondate"])]
for(j in k)
a <- c(a, paste(ocd[i], " & ",
x[[i]][j,"decisiondate"], " & ",
gsub("&", "\\&", x[[i]][j,"casetitleshort"], fixed=T), " & ",
gsub("[\\~0-9]", "", gsub("urn:entity:jud-", "", x[[i]][j,"opo"], fixed=T), fixed=F), " & ",
gsub("[\\~0-9]", "", gsub("urn:entity:jud-", "", x[[i]][j,"opc"], fixed=T), fixed=F), " & ",
gsub("[\\~0-9]", "", gsub("urn:entity:jud-", "", x[[i]][j,"opd"], fixed=T), fixed=F), " & ",
"\\\\", sep=""),
"& & & & & &\\\\[-4pt]")
}
writeLines(a)
# Spot check cases
i <- 11
ttl <- c("United States v. Van Dyke", "UNITED STATES v. CROCKETT", "Slattery v. Rizzo",
"Fant v. United States Marshal Serv.", "Elmore v. Cone Mills Corp.",
"Zady Natey, Inc. v. United Food & Commercial Workers Int''l Union, Local No. 27",
"Shaw v. Stroud", "Republican Party v. Hunt", "UNITED STATES v. BOARD",
"Hutchinson v. Town of Elkton",
"United States ex rel. Barber-Colman Co. v. United States Fidelity & Guar. Co.")[i]
dt <- c("1990-02-12", "1990-11-13", "1991-07-25", "1993-10-27", "1994-05-06", "1993-06-01",
"1994-01-06", "1993-04-27", "1991-04-05", "1990-05-24", "1994-03-21")[i]
dbGetQuery(db2, paste("select b.*
from CaseHeader a join Opinion b on a.lni=b.lni
where courtid=13
and a.casetitleshort='", ttl, "'
and decisiondate='", dt, "'", sep=""))
#######################################################################################
# Dirt
#######################################################################################
z <- dbGetQuery(db2, "select decisiondate, casetitleshort, outcome from CaseHeader where casetitleshort like '%trump%' order by decisiondate")
z[which(substring(z[,"decisiondate"], 1, 4)<"2017"),"outcome"]
|
7831c0f460709ead70ac3dabdecbf8e25b83d0fb
|
6519f4b85c9ac0597e1b00716adf3f2ae7641121
|
/figureS4_credible_interval/BEST/BESTexamplePower.R
|
d0a67974a09c369aafe5cca468c4afecbe98c071
|
[
"MIT"
] |
permissive
|
flu-crew/n2-diversity
|
0a9409e31c730c87561b81a6c0265f8836e573af
|
a0e164fc241b6c4ffab7b1c0f71e11facd8c7706
|
refs/heads/master
| 2023-08-02T06:32:29.027201
| 2021-09-14T16:06:25
| 2021-09-14T16:06:25
| 267,682,821
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,904
|
r
|
BESTexamplePower.R
|
# Version of May 26, 2012. Re-checked on 2015 May 08.
# John K. Kruschke
# johnkruschke@gmail.com
# http://www.indiana.edu/~kruschke/BEST/
#
# This program is believed to be free of errors, but it comes with no guarantee!
# The user bears all responsibility for interpreting the results.
# Please check the webpage above for updates or corrections.
#
### ***************************************************************
### ******** SEE FILE BESTexample.R FOR INSTRUCTIONS **************
### ***************************************************************
# OPTIONAL: Clear R's memory and graphics:
rm(list=ls()) # Careful! This clears all of R's memory!
graphics.off() # This closes all of R's graphics windows.
# Get the functions loaded into R's working memory:
source("BEST.R")
#-------------------------------------------------------------------------------
# RETROSPECTIVE POWER ANALYSIS.
# !! This section assumes you have already run BESTexample.R !!
# Re-load the saved data and MCMC chain from the previously conducted
# Bayesian analysis. This re-loads the variables y1, y2, mcmcChain, etc.
load( "BESTexampleMCMC.Rdata" )
power = BESTpower( mcmcChain , N1=length(y1) , N2=length(y2) ,
ROPEm=c(-0.1,0.1) , ROPEsd=c(-0.1,0.1) , ROPEeff=c(-0.1,0.1) ,
maxHDIWm=2.0 , maxHDIWsd=2.0 , maxHDIWeff=0.2 , nRep=1000 ,
mcmcLength=10000 , saveName = "BESTexampleRetroPower.Rdata" )
#-------------------------------------------------------------------------------
# PROSPECTIVE POWER ANALYSIS, using fictitious strong data.
# Generate large fictitious data set that expresses hypothesis:
prospectData = makeData( mu1=108, sd1=17, mu2=100, sd2=15, nPerGrp=1000,
pcntOut=10, sdOutMult=2.0, rnd.seed=NULL )
y1pro = prospectData$y1 # Merely renames simulated data for convenience below.
y2pro = prospectData$y2 # Merely renames simulated data for convenience below.
# Generate Bayesian posterior distribution from fictitious data:
# (uses fewer than usual MCMC steps because it only needs nRep credible
# parameter combinations, not a high-resolution representation)
mcmcChainPro = BESTmcmc( y1pro , y2pro , numSavedSteps=2000 )
postInfoPro = BESTplot( y1pro , y2pro , mcmcChainPro , pairsPlot=TRUE )
save( y1pro, y2pro, mcmcChainPro, postInfoPro,
file="BESTexampleProPowerMCMC.Rdata" )
# Now compute the prospective power for planned sample sizes:
N1plan = N2plan = 50 # specify planned sample size
powerPro = BESTpower( mcmcChainPro , N1=N1plan , N2=N2plan , showFirstNrep=5 ,
ROPEm=c(-1.5,1.5) , ROPEsd=c(-0.0,0.0) , ROPEeff=c(-0.0,0.0) ,
maxHDIWm=15.0 , maxHDIWsd=10.0 , maxHDIWeff=1.0 , nRep=1000 ,
mcmcLength=10000 , saveName = "BESTexampleProPower.Rdata" )
#-------------------------------------------------------------------------------
|
3f991add69b843ec502d4b6ace01320605698f94
|
27652814ed58788adc7c07e327825aaca1ea4034
|
/DMC_KT.r
|
086ae5a55c9fd8368398da107a5111c1a8719027
|
[] |
no_license
|
Libardo1/CAPSTONE-1
|
33c0496f014e5ec458b592ed08f06be93d5c4622
|
dc909977bd71b294a72cffdcb72f7138cac2bd09
|
refs/heads/master
| 2020-12-25T08:42:55.344999
| 2014-06-06T16:46:38
| 2014-06-06T16:46:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,998
|
r
|
DMC_KT.r
|
# Required libraries
library(lubridate)
library(beanplot)
library(doBy)
library(modeest)
library(plyr)
library(psych)
# We can each add in our working directories here - just un# and # as you check code out and back in
# setwd("C:/Users/Jim Braun/My Documents/Predict 498 Capstone/Data Mining Cup")
#
#
#
# Read in data from Google Drive
# Need to update path
# orders.train <- read.table("C:/Users/Katie/Google Drive/Predict 498 Capstone/orders_train.txt", header = TRUE, sep = ";")
# Jim's path
# orders.train <- read.table("C:/Users/Jim Braun/My Documents/Predict 498 Capstone/Data Mining Cup/orders_train.txt", header = TRUE, sep = ";")
7library(tseries)7
library(forecast)
# Read in data from Google Drive
# Added the
orders.train <- read.table("orders_train.txt", header = TRUE, sep = ";")
#orders.train <- read.table("C:/Users/Katie/Google Drive/Predict 498 Capstone/orders_train.txt", header = TRUE, sep = ";")
# orders.train <- read.table("C:/Users/Jim Braun/My Documents/Predict 498 Capstone/Data Mining Cup/orders_train.txt", header = TRUE, sep = ";")
str(orders.train)
# Update date fields to date type instead of factors
orders.train$orderDate <- as.Date(orders.train$orderDate, format = "%Y-%m-%d")
orders.train$deliveryDate <- as.Date(orders.train$deliveryDate, format = "%Y-%m-%d")
orders.train$dateOfBirth <- as.Date(orders.train$dateOfBirth, format = "%Y-%m-%d")
orders.train$creationDate <- as.Date(orders.train$creationDate, format = "%Y-%m-%d")
str(orders.train)
summary(orders.train)
# Add date diff variables
orders.train$timeToDeliver <- as.numeric(difftime(orders.train$deliveryDate,orders.train$orderDate,unit="days"))
orders.train$accountAge <- as.numeric(difftime(orders.train$orderDate,orders.train$creationDate,unit="weeks"))/52.25
orders.train$customerAge <- as.numeric(difftime(orders.train$orderDate,orders.train$dateOfBirth,unit="weeks"))/52.25
# Check
summary(orders.train[15:17])
# timeToDeliver should never be negative, and age should never be negative
# call unreal values N/A as if a missing value
# without access to management, we need to deal with these values another way
# perhaps through imputation
orders.train$timeToDeliver <- ifelse(orders.train$timeToDeliver<0,NA,orders.train$timeToDeliver)
orders.train$customerAge <- ifelse(orders.train$customerAge<0,NA,orders.train$customerAge)
# age should also probably not be > 100 - what should we use for the cut-off?
orders.train$customerAge <- ifelse(orders.train$customerAge>100,NA,orders.train$customerAge)
# Recheck
summary(orders.train[15:17])
# Look at PDF of numeric variables given reponse
# Note that we're just using a random sample due to processing time for graphics
set.seed(498)
sample_ind <- sample(seq_len(nrow(orders.train)), size = 1000)
orders.sample <- orders.train [sample_ind, ]
pdf(file = "bean_plots.pdf", width = 11, height = 8.5) ##/\open pdf/\##
beanplot(customerAge ~ returnShipment, orders.sample, side = "b", col = list("yellow", "orange"), border = c("yellow2","darkorange"), main = "Customer Age Distribution", ylab = "Age in Years", xaxt="n")
legend("topleft", bty="n",c("Not Returned", "Returned"), fill = c("yellow", "orange"))
beanplot(accountAge ~ returnShipment, orders.sample, side = "b", col = list("yellow", "orange"), border = c("yellow2","darkorange"), main = "Account Age Distribution", ylab = "Age in Years", xaxt="n")
legend("topleft", bty="n",c("Not Returned", "Returned"), fill = c("yellow", "orange"))
beanplot(timeToDeliver ~ returnShipment, orders.sample, side = "b", col = list("yellow", "orange"), border = c("yellow2","darkorange"), main = "Delivery Time Distribution", ylab = "Time in Days", xaxt="n")
legend("topleft", bty="n",c("Not Returned", "Returned"), fill = c("yellow", "orange"))
beanplot(price ~ returnShipment, orders.sample, side = "b", col = list("yellow", "orange"), border = c("yellow2","darkorange"), main = "Price Distribution", xaxt="n")
legend("topleft", bty="n",c("Not Returned", "Returned"), fill = c("yellow", "orange"))
dev.off() ##\/close pdf\/##
# Mean & count of response given nominal vars
# Only doing ones with few possible values- salutation & state
summaryBy(returnShipment ~ salutation, orders.train, FUN=c(length,mean))
summaryBy(returnShipment ~ state, orders.train, FUN=c(length,mean))
# More EDA - a breakout of stats by returnShipment
describeBy(orders.train, group=orders.train$returnShipment, mat=FALSE, type=3, digits=6)
# quick X vs Y plot
plot(orders.sample, cex=0.1)
#--------------------------#
# DEAL WITH MISSING VALUES #
#--------------------------#
# using mi package - get visual plot of missing obs
library(mi)
# Hmmm, too big to run. Any ideas guys?
pdf(file = "missing_obs_plots.pdf", width = 11, height = 8.5) ##/\open pdf/\##
missing.pattern.plot(orders.train, gray.scale = TRUE)
dev.off() ##\/close pdf\/##
# One method to check how many observations for each variable have missing values
sum(is.na(orders.train$orderItemID))
sum(is.na(orders.train$orderDate))
sum(is.na(orders.train$deliveryDate))
# No need to do rest, since this is also covered by summary command
#--------------------------#
# Imputation??? #
#--------------------------#
# need to decide on imputation method: mice?,
library(mice)
#---for future DELETION-------#
# calculate customer's preferred size
# this was WAY more complicated than necessary...
# mvf = most frequent value (a.k.a mode), requires Modeest package and library
# have to make # obs match orders.sample
# also, why does this create 3 variables instead of 1?
# custMode <- summaryBy(size ~ customerID, data=orders.sample, FUN = function (x) {c(m=mfv(x))})
# custMode
# custMode <- customer
# sorting orders by customerID to cbind customer Mode to right observation
# r <- order(orders.sample$customerID)
# r
# sortID <- orders.sample[r,]
# sortID
# cbind(sortID,custMode[,2])
# Add column to denote whether the order size was not the customer's usual order (size mode)
# had to use custMode column instead of one cbinded in. Not sure why, but this works
# sortID$OrdNotMode <- ifelse((sortID$size != custMode[,2]),0,1)
# sortID$OrdNotMode
# beanplot(sortID$OrdNotMode ~ returnShipment, sortID, side = "b", col = list("yellow", "orange"), border = c("yellow2","darkorange"), main = "Unusual Size?", xaxt="n")
# legend("topleft", bty="n",c("Not Returned", "Returned"), fill = c("yellow", "orange"))
# let's try this again...
#nope
# mfv(orders.sample$size, group=orders.sample$customerID)
# mfv(orders.sample$size)
#nope
# myfun<-function(x){mfv(x)}
# summaryBy(orders.sample$size~orders.sample$customerID, data=orders.sample, FUN=myfun)
#nope
# OB <- orderBy(~orders.sample$customerID+orders.sample$size, data=orders.sample)
# OM <- function(d){c(NA,mfv(orders.sample$size)}
# v<-lapplyBy(~orders.sample$customerID, data=orders.sample, OM)
# orders.sample$OM <-unlist(v)
#-----END DELETION-----#
# Try this one for modes- but do we need to get a numeric and s/m/l?
# First convert from a factor to a string, standardizing case
orders.train$revSize <- toupper(as.character(orders.train$size))
# Add mode function - note that this only gives one mode if there is more than one
mymode <- function(x){
names(sort(-table(as.character(x))))[1]
}
custMode <- summaryBy(revSize ~ customerID, orders.train, FUN=mymode)
# Time-series data - taking the mean of return aggregated by order date
# NOTE- it's been awhile since I've done a TS analysis, so really I was just looking at the plots & packages here. It will likely need a fair bit of revisions.
avgReturnByDay <- summaryBy(returnShipment ~ orderDate, orders.train, FUN=mean)
ts.orders <- ts(avgReturnByDay$returnShipment.mean, start=c(2012,4), frequency=365)
plot(ts.orders)
acf(ts.orders,20)
pacf(ts.orders,20)
lag.plot(ts.orders,9,do.lines=F)
plot(diff(ts.orders))
acf(diff(ts.orders),20)
pacf(diff(ts.orders),20)
adf.test(ts.orders)
auto.arima(ts.orders)
#list variables for cut and paste within code
# orderItemID : int 1 2 3 4 5 6 7 8 9 10 ...
# orderDate : Factor w/ 365 levels "2012-04-01","2012-04-02",..: 1 1 1 2 2 2 2 2 2 2 ...
# deliveryDate : Factor w/ 328 levels "?","1990-12-31",..: 3 3 3 1 2 2 2 3 3 3 ...
# itemID : int 186 71 71 22 151 598 15 32 32 57 ...
# size : Factor w/ 122 levels "1","10","10+",..: 110 103 103 110 60 119 60 119 119 119 ...
# color : Factor w/ 88 levels "?","almond","amethyst",..: 44 70 37 51 19 24 19 24 80 51 ...
# manufacturerID: int 25 21 21 14 53 87 1 3 3 3 ...
# price : num 69.9 70 70 39.9 29.9 ...
# customerID : int 794 794 794 808 825 825 825 850 850 850 ...
# salutation : Factor w/ 5 levels "Company","Family",..: 4 4 4 4 4 4 4 4 4 4 ...
# dateOfBirth : Factor w/ 14309 levels "?","1655-04-19",..: 7074 7074 7074 5195 6896 6896 6896 1446 1446 1446 ...
# state : Factor w/ 16 levels "Baden-Wuerttemberg",..: 1 1 1 13 11 11 11 10 10 10 ...
# creationDate : Factor w/ 775 levels "2011-02-16","2011-02-17",..: 69 69 69 323 1 1 1 1 1 1 ...
# returnShipment: int 0 1 1 0 0 0 0 1 1 1 ...
# timeToDeliver
# accountAge
# customerAge
#------------#
# t-tests #
#------------#
# We should add simple t-tests for any binary variables - can use for high risk indicators
# independent 2-group t-test
t.test(y~x) # where y is numeric and x is a binary factor
# Plot Histograms for all variables by class
# will need to sub in our data names #
# I can't remember what MMST is for, but it was in a lot of my EDA code
library(MMST)
pdf(file = "hist_plots.pdf", width = 11, height = 8.5)
nm <- names(wine)[1:13]
for (i in seq(along = nm)) {
hist.plot <- ggplot(wine,aes(x = eval(parse(text = paste("wine$", nm[i], sep=""))),
fill=factor(class))) + geom_histogram(alpha = 0.5)+xlab(nm[i])
print(hist.plot)
}
dev.off()
#-------------------------#
# Density Plots by class #
#-------------------------#
# includes a loop with output routed to a pdf file
# will need to sub in our data names #
library(ggplot2)
pdf(file = "my_plots.pdf", width = 11, height = 8.5)
nm <- names(wine)[1:13]
for (i in seq(along = nm)) {
this.plot <- ggplot(wine,aes(x = eval(parse(text = paste("wine$", nm[i], sep=""))),
fill=factor(class))) + geom_density(alpha = 0.5)+xlab(nm[i])
print(this.plot)
}
dev.off()
#------------------------------------#
# To illustrate clustering by class #
# XY Plot by class #
#------------------------------------#
# lattice plots for key explanatory variables
# Shows X&Y relationship by class - Can use for EDA or after algorithm returns top vars
# But I think this may help identify interaction effects
library(lattice) # required for the xyplot() function
# this is just a template for integration #
xyplot(Flav ~ Color | class,
data = wine,
layout = c(6, 1),
aspect=1,
strip=function(...) strip.default(..., style=1),
xlab = "Flavanoids",
ylab = "Color Intensity")
# Along same lines, we can look at scatterplots
# The larger graphs with the overlay
# make the relationships a bit more visible
library(car)
# this is by class
scatterplot(Flav ~ Color | class, data=wine, boxplots=FALSE,
span=0.75, col=gray(c(0,0.5,0.7)),id.n=0)
# this is just X vs. Y. We can adjust for any specific variable comparisons we want to look at
scatterplot(carat ~ price, data=diamonds, boxplots=FALSE,
span=0.75,id.n=0)
#------------------------------------------#
# Conditioned XY Plots - to look in panels #
#------------------------------------------#
# this was a handy XYplot tool to look at the relationship between 2 variables, conditioned by other variables
# this was borrowed from our diamonds data set program
# showing the relationship between price and carat, while conditioning
# on cut and channel provides a convenient view of the diamonds data
# in addition, we jitter to show all points in the data frame
xyplot(jitter(sqrtprice) ~ jitter(carat) | channel + cut,
data = diamonds,
aspect = 1,
layout = c(3, 2),
strip=function(...) strip.default(..., style=1),
xlab = "Size or Weight of Diamond (carats)",
ylab = "Price")
#------------------------------------------------#
# to run some Weka algorithms - good for EDA too #
#------------------------------------------------#
library(RWeka)
# May need to add pruning rules for j48 and JRip #
# to run j48 in RWeka
returns_j48 <- J48(class ~., data = orders.train)
returns_j48
summary(wine_j48)
# to add a 10-folds cross-validation (does it help?)
eval_j48 <- evaluate_Weka_classifier(returns_j48, numFolds = 10, complexity = FALSE,
seed = 1, class = TRUE)
eval_j48
# To run JRip - Recall this shows rules - will not plot a tree
returns_JRip <- JRip(class ~., data = orders.train)
returns_JRip
summary(returns_JRip)
|
5a59d6386f8b97b2f5701f9ac26df9e73f1badf0
|
876ce11ab6150c9bd312a53b39ae4b4918eb3520
|
/HW7.R
|
82a2eb705aa3738423899f14cba8c3bfaa64936b
|
[] |
no_license
|
Muu24/stat744
|
2138778dce07e6e2ed6c99e5f7e806147ac01a32
|
8e1882a4e165165b080e4f7d59b657e668f2e219
|
refs/heads/master
| 2021-05-13T13:57:09.738598
| 2018-05-01T19:07:09
| 2018-05-01T19:07:09
| 116,723,367
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,315
|
r
|
HW7.R
|
library(leaflet)
m <- leaflet()
dat <- addTiles(m)
##Test Beijing
addMarkers(dat,lng=116.391, lat=39.912, popup="Beijing")
##Test Major cities in China
##https://zh.wikipedia.org/wiki/%E4%B8%AD%E8%8F%AF%E4%BA%BA%E6%B0%91%E5%85%B1%E5%92%8C%E5%9C%8B%E5%9F%8E%E5%B8%82%E4%BA%BA%E5%8F%A3%E6%8E%92%E5%90%8D
dat1 <- read.csv(text = "City,lng,lat,pop
Beijing,116.4666667,39.9,19612368
Shanghai,121.4833333,31.23333333,23019196
Tianjin,117.1833333,39.15,12938693
Chongqing,106.5333333,29.53333333,16044027
Haerbin,126.6833333,45.75,9413359
Changchun,125.3166667,43.86666667,6421956
Shenyang,123.4,41.83333333,8106171
Huhehaote,111.8,40.81666667,2866615
Shijianzhuang,114.4666667,38.03333333,2921433
Taiyuan,112.5666667,37.86666667,4201592
Jinan,117,36.63333333,1064210
Zhengzhou,113.7,34.8,8627089
Xian,108.9,34.26666667,8467838
Lanzhou,103.8166667,36.05,3142523
Yinchuan,106.2666667,38.33333333,840869
Xining,101.75,36.63333333,1087192
Wulumuqi,87.6,43.8,1384349
Hefei,117.3,31.85,5702466
Nanjing,118.8333333,32.03333333,8003744
Hangzhou,120.15,30.23333333,8700373
Changsha,113,28.18333333,1279469
Nanchang,115.8666667,28.68333333,4331668
Wuhan,114.35,30.61666667,8312700
Chengdu,104.0833333,30.65,11108534
Guiyang,106.7,26.58333333,4322611
Fuzhou,119.3,26.08333333,1660688
Taibei,121.5166667,25.05,2705000
Guangzhou,113.25,23.13333333,5630733
Haikou,110.3333333,20.03333333,830192
Nanning,108.3333333,22.8,2608571
Kunming,102.6833333,25,1995438
Lasa,91.16666667,29.66666667,373946
Hongkong,114.1666667,22.3,7071576
Macao,113.5,22.2,552503")
addMarkers(dat,lng=dat1$lng,lat=dat1$lat,popup=dat1$City)
##add Populations as circle
dat1%>%leaflet()%>%addTiles()%>%
addCircleMarkers(lng=~lng,lat=~lat,radius = ~pop/1000000)
#addCircleMarkers(dat,lat=dat1$lat,lng=dat1$lng,radius = dat1$pop)
|
76553961d97f370e62295a65217274d71d28f131
|
9dcc1b98baf0d4df40ef9470330993660d725bca
|
/man/new_classification.Rd
|
1d850d731a8801f28835c6d99c58b7c16ac2bd0e
|
[
"MIT"
] |
permissive
|
ropensci/taxa
|
b1aa00a0d8256916cdccf5b6a8f39e96e6d5ea9c
|
ed9b38ca95b6dd78ef6e855a1bb8f4a25c14b8fd
|
refs/heads/master
| 2022-04-30T23:28:44.735975
| 2022-04-12T05:10:10
| 2022-04-12T05:10:10
| 53,763,679
| 40
| 9
|
NOASSERTION
| 2021-07-08T18:11:32
| 2016-03-13T02:27:40
|
HTML
|
UTF-8
|
R
| false
| true
| 671
|
rd
|
new_classification.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classification.R
\name{new_classification}
\alias{new_classification}
\title{Minimal classfication constructor}
\usage{
new_classification(taxonomy = taxonomy(), instances = integer())
}
\arguments{
\item{taxonomy}{A \code{\link[=taxonomy]{taxonomy()}} object.}
\item{instances}{The indexes of each instance of a taxon in the taxonomy. Can be any length.}
}
\value{
An \code{S3} object of class \code{taxa_classification}
}
\description{
Minimal classfication constructor for internal use. Only use when the input is known to be valid
since few validity checks are done.
}
\keyword{internal}
|
748ea28c7c477e07e43d0477fdd8e216eb20b3d9
|
d0257bb73f8ea868b66500f48abbb9463a2b3629
|
/man/test_goodness_of_fit.Rd
|
feb9542e848fa77f749f4aa93279944a87de70d1
|
[] |
no_license
|
adamtclark/gauseR
|
014b31f6f9ff89e80f80d491ee6a8c87f855918a
|
c3bb249c4253851b6bf5c5180dbd9d0ba1cdc3ba
|
refs/heads/master
| 2021-11-30T22:20:21.132519
| 2021-11-28T12:08:19
| 2021-11-28T12:08:19
| 247,702,098
| 5
| 2
| null | 2020-03-16T12:56:57
| 2020-03-16T12:55:31
|
R
|
UTF-8
|
R
| false
| true
| 1,900
|
rd
|
test_goodness_of_fit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test_goodness_of_fit.R
\name{test_goodness_of_fit}
\alias{test_goodness_of_fit}
\title{Test goodness of fit}
\usage{
test_goodness_of_fit(observed, predicted, bycolumn = FALSE, droptimecol = TRUE)
}
\arguments{
\item{observed}{A vector or matrix of observed values.}
\item{predicted}{A vector or matrix of predicted values.}
\item{bycolumn}{If TRUE, then separate values are calculated for each column in observed and predicted.}
\item{droptimecol}{If TRUE, will automatically remove the column labeled "time"
in the predicted variable. This is useful for dealing with the default output of the gause_wrapper function.
Defaults to FALSE.}
}
\description{
Tests goodness of fit for predictions vs. observations.
This statistic can be though of in the same way as a classic "R2",
except that it measures scatter around the 1-1 line, rather than
around a fitted regresson line of observed vs. predicted values.
Value close to 1 indicate a that predictions match observations closely.
Values at or below zero indicate that predictions do not match observations
any better than the grand mean taken across all observations.
}
\examples{
#load competition data
data("gause_1934_science_f02_03")
#subset out data from species grown in mixture
mixturedat<-gause_1934_science_f02_03[gause_1934_science_f02_03$Treatment=="Mixture",]
#extract time and species data
time<-mixturedat$Day
species<-data.frame(mixturedat$Volume_Species1, mixturedat$Volume_Species2)
colnames(species)<-c("P_caudatum", "P_aurelia")
#run wrapper
#note - keeptimes=TRUE is needed, so that predicted time steps match
#observed time steps
gause_out<-gause_wrapper(time=time, species=species, keeptimes = TRUE)
#calculate goodness of fit
test_goodness_of_fit(observed=species, predicted=gause_out)
# > 0.9 for both time series - these are good fits!
}
|
5b7e1c71a34441acef1a7864512c44c4632590b4
|
c16e9ad8ca7ac5da16b51a8ef93cb7eb333c98db
|
/barPlot.r
|
efdd750ea45604fca44083e5827e5c4123a5bfd1
|
[
"MIT"
] |
permissive
|
Jefftopia/r_scripts
|
74f6d67dbce225d46e444483afcd7d1b8255066b
|
8578e1b5726a29ad8121b31ba731b7cff4286eb5
|
refs/heads/master
| 2016-09-06T18:31:26.658806
| 2014-05-30T16:00:13
| 2014-05-30T16:00:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,159
|
r
|
barPlot.r
|
plotBarChart <- function(data,x,title,y=x,h=5,w=5,facet=FALSE,professional=FALSE,color=FALSE) {
require(ggplot2);
require(ggthemes);
# if x is numeric, cut into quantiles, then store as factor; else, factor x
if (is.numeric(data[[x]])) {
quant <- quantile(data[[x]])
data[[x]] <- factor(cut(data[[x]],quant))
# colnames(data) <- c("Q1", "Q2", "Q3", "Q4")
} else {
data[[x]] <- factor(data[[x]])
}
# same for y as x
if (is.numeric(data[[y]])) {
quant <- quantile(data[[y]])
data[[y]] <- factor(cut(data[[y]],quant))
} else {
data[[y]] <- factor(data[[y]])
}
# facet only makes sense when y != x...
if(facet) {
graph <- ggplot(data, aes_string(x=y),fill=y) +
geom_bar(fill=y) +
facet_wrap(as.formula(paste("~", x))) +
theme(axis.text.x=element_text(angle=90)) +
ggtitle(title);
} else {
graph <- ggplot(data, aes_string(x=x, fill=y)) +
geom_bar(fill=y) +
ggtitle(title);
}
if(professional){
graph <- graph +
theme_tufte();
}
ggsave(filename="bar_plot.svg",plot=graph,height=h,width=w)
return(graph)
}
plotBarChart(data=diamonds,x="color",title="My Plot",professional=TRUE);
|
1fc9f84314991b911817b28eef2eada91af8f092
|
f317887c7d83e62235ba2cf19065dcef9244f645
|
/man/textTable.ftable.Rd
|
77a5ed89a3239359637b573190687da7c50faf1f
|
[] |
no_license
|
rrprf/tablesgg
|
3fec64842266f8a7f28e29899d31c673b5dad09c
|
1a60f894869326b34eff1804c9378a1c05e78a79
|
refs/heads/master
| 2023-05-07T14:12:05.102317
| 2021-06-03T14:45:34
| 2021-06-03T14:45:34
| 318,291,905
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,485
|
rd
|
textTable.ftable.Rd
|
% Auto-generated documentation for function textTable.ftable
% 2021-06-02 11:12:19
\name{textTable.ftable}
\alias{textTable.ftable}
\title{Create a \code{texttable} from an \code{ftable} }
\description{
Create a \code{textTable} object representing a flattened multiway
contingency table.
}
\usage{
\method{textTable}{ftable}(x, colheadLabels=c("layers", "none", "paste"), sep=": ",
title=character(0), subtitle=character(0), foot=character(0), ...)
}
\arguments{
\item{x}{An \code{ftable} object, as produced by R's \code{ftable} function,
representing a flattened multiway contingency table.
}
\item{colheadLabels}{Character scalar; how to display names of column header variables. "none"
means to not display them. "layers" (the default) means to display them
as additional column header layers (so each header variable occupies two
rows instead of one). "paste" means to paste the variable name in front
of each of its values, separated by \code{sep}.
}
\item{sep}{Character scalar; string that separates a variable name from its values
when \code{colheadLabels} is "paste".
}
\item{title, subtitle, foot}{Optional character vectors providing annotation for the table. May be
empty (i.e., \code{character(0)}, the default).
}
\item{...}{Ignored, with a warning. (Included for compatibility with the generic.)
}
}
\value{
An object with S3 class \code{textTable}. See the documentation for the
generic for details about its structure.
}
\seealso{
\code{ftable}, \code{format.ftable}
}
\examples{
# From examples in '?ftable':
data(Titanic, package="datasets")
ft <- ftable(Titanic, row.vars = 1:2, col.vars = "Survived")
ttbl <- textTable(ft, title="Plotting an 'ftable'")
plot(ttbl)
data(mtcars, package="datasets")
ft <- ftable(mtcars$cyl, mtcars$vs, mtcars$am, mtcars$gear, row.vars = c(2, 4),
dnn = c("Cylinders", "V/S", "Transmission", "Gears"))
ttbl <- textTable(ft, colheadLabels="none")
plt1 <- plot(ttbl, title="Plotting an 'ftable'",
subtitle="No colheadLabels")
ttbl <- textTable(ft, colheadLabels="layers")
plt2 <- plot(ttbl, title="Plotting an 'ftable'",
subtitle="colheadLabels = 'layers'")
ttbl <- textTable(ft, colheadLabels="paste")
plt3 <- plot(ttbl, title="Plotting an 'ftable'",
subtitle="colheadLabels = 'paste'")
print(plt1, position=c("left", "top"))
print(plt2, position=c("left", "center"), newpage=FALSE)
print(plt3, position=c("left", "bottom"), newpage=FALSE)
}
|
f6b171bad267008fa62688b6030351c7122883e8
|
aacce0404d82b281d2e08c475066bde0c0088c5c
|
/man/parse_catch_legacy.Rd
|
f25078dc9d72272aa5ca8fd3746cba3e1153b7dd
|
[] |
no_license
|
Rindrics/gyokaikyor
|
ec4a152cbc8fe590b6c103bfe748d1c40436c869
|
72cec892eb921e6202464c03fff506b3d689525f
|
refs/heads/master
| 2022-10-02T00:58:33.036355
| 2020-06-05T02:28:49
| 2020-06-05T02:28:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 385
|
rd
|
parse_catch_legacy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parse_legacy.R
\name{parse_catch_legacy}
\alias{parse_catch_legacy}
\title{Parse legacy Excel data into data frame}
\usage{
parse_catch_legacy(legacy)
}
\arguments{
\item{legacy}{List which contains
\itemize{
\item{fname}
\item{spcs}
\item{year}
}}
}
\description{
Parse legacy Excel data into data frame
}
|
47e1991cc3f9991adff97d618e101c70b954fb83
|
cb3371ecfa7ae3706e09355e381d1c79c9d6f859
|
/tests/testthat/test_tau.R
|
d93dbb759bcfb00de0c1783f2cfdf9fa93b6ec68
|
[] |
no_license
|
cran/ircor
|
b046d2504f1a5552d1bf6fe2efb957b19a6f9b38
|
4680cdc3390308121faa0b8b6f1e8df74249bd48
|
refs/heads/master
| 2021-01-19T14:52:36.698551
| 2017-08-21T08:07:29
| 2017-08-21T08:07:29
| 100,934,209
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,093
|
r
|
test_tau.R
|
context("tau")
test_that("tau", {
expect_equal(tau(set1$x, set1$y), .9111, tolerance = 5e-5)
expect_equal(tau(set2$x, set2$y), .2889, tolerance = 5e-5)
expect_equal(tau(set3$x, set3$y), -.6889, tolerance = 5e-5)
# check symmetry
expect_equal(tau(set1$y, set1$x), .9111, tolerance = 5e-5)
expect_equal(tau(set2$y, set2$x), .2889, tolerance = 5e-5)
expect_equal(tau(set3$y, set3$x), -.6889, tolerance = 5e-5)
})
test_that("tau_a", {
# check that it's the same as tau when there are no ties
expect_equal(tau_a(set1$x, set1$y), .9111, tolerance = 5e-5)
expect_equal(tau_a(set2$x, set2$y), .2889, tolerance = 5e-5)
expect_equal(tau_a(set3$x, set3$y), -.6889, tolerance = 5e-5)
# and symmetry
expect_equal(tau_a(set1$y, set1$x), .9111, tolerance = 5e-5)
expect_equal(tau_a(set2$y, set2$x), .2889, tolerance = 5e-5)
expect_equal(tau_a(set3$y, set3$x), -.6889, tolerance = 5e-5)
# now with ties in y
expect_equal(tau_a(set1$x, set1$y.ties), .8889, tolerance = 5e-5)
expect_equal(tau_a(set2$x, set2$y.ties), .3333, tolerance = 5e-5)
expect_equal(tau_a(set3$x, set3$y.ties), -.6222, tolerance = 5e-5)
})
test_that("tau_b", {
# check that it's the same as tau when there are no ties
expect_equal(tau_b(set1$x, set1$y), .9111, tolerance = 5e-5)
expect_equal(tau_b(set2$x, set2$y), .2889, tolerance = 5e-5)
expect_equal(tau_b(set3$x, set3$y), -.6889, tolerance = 5e-5)
# and symmetry
expect_equal(tau_b(set1$y, set1$x), .9111, tolerance = 5e-5)
expect_equal(tau_b(set2$y, set2$x), .2889, tolerance = 5e-5)
expect_equal(tau_b(set3$y, set3$x), -.6889, tolerance = 5e-5)
# now with ties
expect_equal(tau_b(set1$x.ties, set1$y.ties), .9398, tolerance = 5e-5)
expect_equal(tau_b(set2$x.ties, set2$y.ties), .3765, tolerance = 5e-5)
expect_equal(tau_b(set3$x.ties, set3$y.ties), -.6510, tolerance = 5e-5)
# and symmetry
expect_equal(tau_b(set1$y.ties, set1$x.ties), .9398, tolerance = 5e-5)
expect_equal(tau_b(set2$y.ties, set2$x.ties), .3765, tolerance = 5e-5)
expect_equal(tau_b(set3$y.ties, set3$x.ties), -.6510, tolerance = 5e-5)
})
|
62187c45ad54f2bd4be8bc5797fb8d3fdf8ac65c
|
034b1554dfb45410ea0a41e989aab21fb845e4a4
|
/R/fixDates.R
|
383df2bd201d89d6799efaac535de2869e85db40
|
[
"MIT"
] |
permissive
|
tomjemmett/sqlhelpers
|
6139f817681eec7f85cc9198104720f611839e31
|
eea4232ca66d711134681158a716436f0555b5e7
|
refs/heads/master
| 2020-04-01T02:08:04.786711
| 2019-07-22T13:43:13
| 2019-07-22T13:43:13
| 152,766,420
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 263
|
r
|
fixDates.R
|
#' @importFrom purrr map
fixDates <- function(...) {
dots <- list(...)
# if ... was a single list, then we need to just select the first item of dots
if (length(dots) == 1 && is.list(dots[[1]])) dots <- dots[[1]]
purrr::map(dots, convertDateToString)
}
|
9b25f0c8a22bdfc59faadd781880d441cb16430e
|
cb89a6a2391a80411254649e5845df77c0309f56
|
/R/optVoicing.R
|
658096955b3923aea8009d9a9ac4c75cfee51cdf
|
[] |
no_license
|
simphon/PP2020
|
5fd8f9ca193ede57afc16ee6ea6b2c9a1a0d1705
|
0fabae638786ab175c1cc15fbfd5d173c17298bf
|
refs/heads/master
| 2022-12-19T14:01:27.030766
| 2020-09-14T10:55:47
| 2020-09-14T10:55:47
| 293,513,254
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 42,663
|
r
|
optVoicing.R
|
# optVoicing.R
# =============================================================================
# Daniel Duran
# Albert-Ludwigs-Universität Freiburg, Germany
# daniel.duran@germanistik.uni-freiburg.de
# http://simphon.net/
#
# =============================================================================
# INITIALIZATION
# =============================================================================
rm(list = ls())
library('tidyverse')
library('textgRid')
library('lme4')
set.seed(42)
KONFIG <- list(isWindows = str_detect(Sys.getenv('OS'), pattern = "(W|w)indows") | str_detect(Sys.getenv('SystemRoot'), pattern = "(W|w)indows"),
trainTestRatio = 0.85, # SET TO NA IN ORDER TO USE ALL DATA FOR OPTIMIZATION
trainMinimum = 5, # THE ABSOLUTE MINIMUM OF REQUIRED TOKENS PER LABEL (CURRENTLY NOT IMPLEMENTED)
runTest = FALSE, # ONLY IF trainTestRatio IS NOT NA
# SKIPPING THE TEST SET MAY BE USEFUL IF YOU WANT/HAVE TO RESTART THE OPTIMIZATION REPEATEDLY
# PRAAT PARAMETERS:
pitch_floor_range = c( 30, 180, 180-30),
pitch_ceiling_range = c(350, 950, 950-350),
silence_threshold_range = c(0.01, 0.75, 0.75-0.01),
voicing_threshold_range = c(0.20, 0.9, 0.9-0.2),
voiced_unvoiced_cost_range = c(0.01, 0.55, 0.55-0.01),
phone_tier = "PHO",
phone_segments = "p,t,k,b,d,g,<V>",
# SEARCH PARAMETERS:
start = "random",
# AVAILABLE OPTIONS FOR "start": "default", "random", "previous"
# -- OPTION "previous" LOADS PREVIOUSLY TESTED PARAMETERSETTINGS (IF ANY, OTHERWIESE THE DEFAULT SETTINGS ARE USED)
# -- OPTION "random" OVERRIDES PREVIOUS RESULTS
# -- OPTION "default" OVERRIDES RANDOM START AND PREVIOUS RESULTS
RANDOM_LOW = 1e-8,
RANDOM_HIGH = 1 - 1e-8,
ROUND_HZ_PARAMETERS = TRUE,
parameterName_index = list(pitch_floor=1, pitch_ceiling=2, silence_threshold=3, voicing_threshold=4, voiced_unvoiced_cost=5),
DEFAULT_ERROR = 30,
verbose = FALSE,
# SETTINGS FOR THE NELDER-MEAD FUNCTION:
# DEFAULTS -> SEE PACKAGE DOCUMENTATION FOR lme4
# - maxfun (default 10000) maximum number of function evaluations (you may want to set this to a very small number first to test the script!)
# - FtolAbs (default 1e-5) absolute tolerance on change in function values
# - XtolRel (default 1e-7) relative tolerance on change in parameter values
NelderMead = list(verbose = 2, ftolabs = 1e-5, xtorel = 1e-7, max_fun = 999),
NM_RESULT_PREFIX = "nm_result",
# ADJUST THESE PATHS:
PRAAT.EXE = '/usr/bin/praat',
PRAAT.SCRIPT = '/path/to/PP2020.git/Praat/Voicing/voice-Advanced.praat',
outputDir = '/home/dd/Scratch/Optimization/Nelder-Mead',
files.csv = '/home/dd/Scratch/Optimization/files.csv'
)
# -----------------------------------------------------------------------------
# CHECKING CONFIGURATION:
if(is.na(KONFIG$trainTestRatio)){
KONFIG$doTrainTest <- FALSE
} else {
if((KONFIG$trainTestRatio <= 0 || KONFIG$trainTestRatio >= 1)){
stop("trainTestRatio must be in range (0,1)")
}
KONFIG$doTrainTest <- TRUE
}
# =============================================================================
# TEXTGRID HELPER FUNCTIONS
# =============================================================================
importTextGridToTibble <- function(textGridFile, add.interval.number=TRUE, fix.encoding=TRUE, add.global.time=TRUE) {
stopifnot(file.exists(textGridFile))
tg <- tryCatch(
TextGrid(textGridFile),
error=function(cond) {
warning(gettextf("[import: %s]\n %s", textGridFile, cond))
return(NULL)
},
warning=function(cond) {
warning(gettextf("[import: %s]\n %s", textGridFile, cond))
return(NULL)
},
finally={}
)
if(is.null(tg)) {
return(NULL)
}
if(add.global.time) {
tg.tbl <- tibble(tmin=tg@startTime,
tmax=tg@endTime,
text=NA_character_,
tier=NA_character_)
if(add.interval.number){
tg.tbl$interval <- 0
}
} else {
tg.tbl <- tibble()
}
tgTierNames <- names(tg)
for(tx in tgTierNames) {
tier <- tg[[tx]]
N <- length(tier@labels)
if(N>0){
if( class(tier) == "IntervalTier" ){
tmp.tbl <- tibble(tmin=tier@startTimes,
tmax=tier@endTimes,
text=tier@labels,
tier=tier@name)
} else {
tmp.tbl <- tibble(tmin=tier@times,
tmax=NA,
text=tier@labels,
tier=tier@name)
}
if(add.interval.number){
tmp.tbl$interval <- 1:N
}
tg.tbl <- rbind(tg.tbl,tmp.tbl)
rm(tmp.tbl)
}
}
rm(tg, tgTierNames, tx)
if(fix.encoding) {
# FIX ENCODING
# ATTENTION: THIS SEEMS TO BE NECESSARY ON WINDOWS SYSTEMS
tg.tbl$text <- iconv(iconv(tg.tbl$text, from="UTF-8", to = "Windows-1252"), to="UTF-8")
}
return(tg.tbl)
}
# -----------------------------------------------------------------------------
# RETURNS A NAMED LIST
get_interval <- function(tg, tier.name, t=NA, i.num=NA) {
if(is.na(t)){
return( as.list(filter(tg, tier==tier.name, interval==i.num)) )
} else {
return( as.list(filter(tg, tier==tier.name, tmin<=t & tmax>t)) )
}
}
# -----------------------------------------------------------------------------
get_point_at <- function(tg, point.tier, t) {
return( as.list(filter(tg, tier==point.tier, tmin==t)) )
}
# =============================================================================
#
# =============================================================================
#' @param gold.files.tbl A tibble holding the data from the file <KONFIG$files.csv>
#'
load_gold_voice_annotations <- function(gold.files.tbl,
target.tier = "PHO",
target.segments = c("p","t","k","b","d","g","<V>"),
voice.tier="voice", confidence.tier="confidence",
voiced.label="V", squared.confidence = TRUE,
train.test.ratio = NA
) {
gold <- tibble(index = integer(),
interval = integer(),
label = character(),
duration = double(),
voiced = double(),
unvoiced = double(),
conf.v = double(),
conf.u = double() )
for(iRow in 1:nrow(gold.files.tbl)){
goldTG <- importTextGridToTibble(gold.files.tbl[iRow,]$gold.voice.file)
gold.pho <- filter(goldTG, tier==target.tier, text %in% target.segments)
for(tx in 1:nrow(gold.pho)){
tMin <- gold.pho[tx,]$tmin
intervalGold <- get_interval(tg = goldTG, tier.name=voice.tier, t = tMin)
if(is.na(intervalGold$text) || str_length(intervalGold$text)==0){
next()
} else {
tMax <- gold.pho[tx,]$tmax
dur <- tMax - tMin
v_dur <- 0.0
subDurations <- c()
subConfidences <- c()
subVoiced <- c()
currentStart <- tMin
while(intervalGold$tmax <= tMax) {
subDur <- intervalGold$tmax - currentStart
isVoice <- FALSE
if(intervalGold$text==voiced.label) {
v_dur <- v_dur + subDur
isVoice <- TRUE
}
subDurations <- c(subDurations, subDur)
subVoiced <- c(subVoiced, isVoice)
konf <- get_point_at(tg=goldTG, point.tier = confidence.tier, t=currentStart)$text
if(length(konf) > 0){
subConfidences <- c(subConfidences, as.numeric(konf) )
} else {
subConfidences <- c(subConfidences, NA )
}
intervalGold <- get_interval(tg = goldTG, tier.name=voice.tier, t = intervalGold$tmax)
currentStart <- intervalGold$tmin
}
if(currentStart < tMax) {
warning(gettextf("Voice label extending beyond phone segment? [%s]: [%s] %.4f > %.4f in %s\n",
gold.pho[tx,]$text, intervalGold$text, intervalGold$tmax, tMax,
gold.files.tbl[iRow,]$gold.voice.file ))
subDur <- tMax - currentStart
if(intervalGold$text==voiced.label) {
v_dur <- v_dur + subDur
}
subDurations <- c(subDurations, subDur)
konf <- get_point_at(tg=goldTG, point.tier = confidence.tier, t=currentStart)$text
if(length(konf) > 0){
subConfidences <- c(subConfidences, as.numeric(konf) )
} else {
subConfidences <- c(subConfidences, NA )
}
}
if(v_dur>0) {
v_perc <- v_dur / dur
} else {
v_perc <- 0.0
}
subConf_v <- subConfidences[subVoiced]
subConf_u <- subConfidences[!subVoiced]
subDur_v <- subDurations[subVoiced]
subDur_u <- subDurations[!subVoiced]
if(length(subConf_v)>0) {
conf_v <- weighted.mean(x=subConf_v, w=(subDur_v / dur))
} else {
conf_v <- NA
}
if(length(subConf_u)>0) {
conf_u <- weighted.mean(x=subConf_u, w=(subDur_u / dur))
} else {
conf_u <- NA
}
gold <- add_row(gold,
index = iRow,
interval = gold.pho[tx,]$interval,
label = gold.pho[tx,]$text,
duration = dur,
voiced = v_perc,
unvoiced = 1.0-v_perc,
conf.v = conf_v,
conf.u = conf_u )
}# ENDIF
}#ENDFOR tx
}#ENDFOR iRow
if(squared.confidence) {
# USE SQUARED CONFIDENCE VALUES:
gold$conf.v <- gold$conf.v ^2
gold$conf.u <- gold$conf.u ^2
}
if(is.na(train.test.ratio)){
gold$train <- TRUE
} else {
gold$train <- FALSE
allRooms <- sort(unique(DATAFILES$room))
allLabels <- sort(unique(gold$label))
for(xRoom in allRooms) {
roomIndex <- which(DATAFILES$room == xRoom)
for(lx in allLabels){
labelIndex <- which(gold$label == lx & gold$index %in% roomIndex)
M <- length(labelIndex)
if(M==0){
warning(gettextf("No [%s] labels for room <%s>", lx, xRoom))
next()
}
N <- ceiling(M*train.test.ratio)
gold[ sample(labelIndex, size = if_else(N==M, N-1, N)), ]$train <- TRUE
}
}
}
return(gold)
}
# =============================================================================
# PRAAT AND SEARCH HELPERS
# =============================================================================
DEFAULT_VOICEADVANCED <- list(time.step = 0.0,
pitch.floor = 75,
max.candidates = 15,
very.accurate = TRUE,
silence.threshold = 0.03,
voicing.threshold = 0.45,
octave.cost = 0.01,
octave.jump.cost = 0.35,
voiced.unvoiced.cost = 0.14,
pitch.ceiling = 600,
max.period.factor = 1.3,
max.amplitude.factor = 1.6,
verbose = FALSE,
write.log.file = TRUE )
# -----------------------------------------------------------------------------
# SACALING
min_max_norm <- function(val, val.range, reverse=FALSE) {
if(reverse){
return( (val * val.range[3]) + val.range[1] )
} else {
return( (val - val.range[1]) / val.range[3] )
}
}
# -----------------------------------------------------------------------------
# HELPER FUNCTION FOR FILE IO
format_room <- function(room) {
room <- str_replace_all(room, "\\s+", "_")
room <- str_replace_all(room, "\\.+", "_")
return(room)
}
# -----------------------------------------------------------------------------
find_previous_optima <- function(result.dir, all.rooms, default.p0, file.pattern) {
p0List <- vector("list", length = length(all.rooms))
names(p0List) <- all.rooms
for(xRoom in all.rooms) {
roomDir <- file.path(result.dir, format_room(xRoom))
allNMresults <- dir(roomDir, full.names = TRUE, pattern = file.pattern)
if(length(allNMresults)==0) {
warning(gettextf("No Nelder-Mead results for room <%s> - Using default parameters", xRoom))
p0List[[xRoom]] <- default.p0
next()
}
roomOpt <- Inf
optPar <- NA
for(iNM in 1:length(allNMresults)) {# iNM=1
nm.result <- readRDS(allNMresults[iNM])
if(is.null("nm.result")) {
warning(gettextf("No Nelder-Mead results found in [%s]", allNMresults[iNM]))
next()
}
if( nm.result$fval < roomOpt) {
roomOpt <- nm.result$fval
optPar <- nm.result$par
}
rm(nm.result)
}
p0List[[xRoom]] <- optPar
}
return(p0List)
}
# -----------------------------------------------------------------------------
# THIS IS BAD PROGRAMMING STYLE (USING GLOBAL VARIABLES)
# see: opt_fun
get_parameters_from_NMx <- function(nm.x, do.round = KONFIG$ROUND_HZ_PARAMETERS) {
if(do.round){
p <- list(pitch_floor = round(min_max_norm(nm.x[KONFIG$parameterName_index$pitch_floor], KONFIG$pitch_floor_range, reverse=TRUE)),
pitch_ceiling = round(min_max_norm(nm.x[KONFIG$parameterName_index$pitch_ceiling], KONFIG$pitch_ceiling_range, reverse=TRUE)),
silence_threshold = min_max_norm(nm.x[KONFIG$parameterName_index$silence_threshold], KONFIG$silence_threshold_range, reverse=TRUE),
voicing_threshold = min_max_norm(nm.x[KONFIG$parameterName_index$voicing_threshold], KONFIG$voicing_threshold_range, reverse=TRUE),
voiced_unvoiced_cost = min_max_norm(nm.x[KONFIG$parameterName_index$voiced_unvoiced_cost], KONFIG$voiced_unvoiced_cost_range, reverse=TRUE) )
} else {
p <- list(pitch_floor = min_max_norm(nm.x[KONFIG$parameterName_index$pitch_floor], KONFIG$pitch_floor_range, reverse=TRUE),
pitch_ceiling = min_max_norm(nm.x[KONFIG$parameterName_index$pitch_ceiling], KONFIG$pitch_ceiling_range, reverse=TRUE),
silence_threshold = min_max_norm(nm.x[KONFIG$parameterName_index$silence_threshold], KONFIG$silence_threshold_range, reverse=TRUE),
voicing_threshold = min_max_norm(nm.x[KONFIG$parameterName_index$voicing_threshold], KONFIG$voicing_threshold_range, reverse=TRUE),
voiced_unvoiced_cost = min_max_norm(nm.x[KONFIG$parameterName_index$voiced_unvoiced_cost], KONFIG$voiced_unvoiced_cost_range, reverse=TRUE) )
}
return(p)
}
get_parameters_from_NMresult <- function(nm.result) {
return(get_parameters_from_NMx(nm.result$par))
}
# -----------------------------------------------------------------------------
load_previous_parameters_to_tbl <- function(data.dir, file.pattern, show.warnings=TRUE) {
prev.tbl <- tibble(pf=double(), pc=double(), st=double(), vt=double(), vc=double(), fval=double(), num.calls=integer())
prevFiles <- dir(data.dir, recursive = TRUE, full.names = TRUE, pattern = file.pattern)
N <- length(prevFiles)
if(N > 0) {
# THERE ARE rds-FILES WITH PREVIOUSLY EVALUATED PARAMETER COMBINATIONS:
# WE IMPORT THEM IN ORDER TO SPEED UP THE COMPUTATIONS WITH NELDER-MEAD
for(ix in 1:N) {
prev.tbl <- rbind(prev.tbl, readRDS(prevFiles[ix]))
}
prev.tbl <- mutate(prev.tbl, x = paste(pf, pc, st, vt, vc, sep = "#"), use = FALSE)
schluessel <- unique(prev.tbl$x)
N <- length(schluessel)
if(N>0 && N < nrow(prev.tbl)) {
for(xS in schluessel) {# xS=schluessel[1]
kIndex <- which(prev.tbl$x == xS)
if(length(kIndex)==1) {
# THERE IS ONLY ONE UNIQUE ROW OF PREVIOUS EVALUATIONS FOR THIS PARAMETER COMBINATION:
prev.tbl[kIndex,]$use <- TRUE
} else if (length(kIndex)>1) {
# WE NEED TO COMBINE MULTIPLE ROWS (FROM DIFFERENT rds-FILES):
tmp.tbl <- prev.tbl[kIndex,]
if(length(unique(tmp.tbl$fval))>1){
warning(gettextf("Found %d different error terms for p=[%f, %f, %f, %f, %f] -> discarding parameter combination!",
length(unique(tmp.tbl$fval)), tmp.tbl[1,]$pf, tmp.tbl[1,]$pc, tmp.tbl[1,]$st, tmp.tbl[1,]$vt, tmp.tbl[1,]$vc ),
immediate. = TRUE)
} else {
aufrufe <- sum(tmp.tbl$num.calls)
prev.tbl[kIndex[1],]$num.calls <- aufrufe
prev.tbl[kIndex[1],]$use <- TRUE
}
}
}
}
prev.tbl <- filter(prev.tbl, use == TRUE) %>% select(-x, -use)
message(gettextf("Imported %d previously evaluated parameter combinations.", nrow(prev.tbl)))
}
return(prev.tbl)
}
# -----------------------------------------------------------------------------
# FUNCTION "praat_voiceAdvanced"
# RUNS PRAAT SCRIPT "Voicing/voice-Advanced.praat" AND RETURNS THE VOICING TABLE
praat_voiceAdvanced <- function (sound.file, textGrid.file, output.tsv.file,
praat.exe = KONFIG$PRAAT.EXE,
praat.script = KONFIG$PRAAT.SCRIPT,
is.Windows = KONFIG$isWindows,
# ARGUMENTS FOR THE PRAAT SCRIPT:
target.tier = KONFIG$phone_tier,
target.segments = KONFIG$phone_segments,
time.step = 0.0,
pitch.floor = 75,
max.candidates = 15,
very.accurate = TRUE,
silence.threshold = 0.03,
voicing.threshold = 0.45,
octave.cost = 0.01,
octave.jump.cost = 0.35,
voiced.unvoiced.cost = 0.14,
pitch.ceiling = 600,
max.period.factor = 1.3,
max.amplitude.factor = 1.6,
verbose = FALSE,
write.log.file = FALSE )
{
stopifnot(file.exists(praat.exe),
file.exists(praat.script),
file.exists(sound.file),
file.exists(textGrid.file) )
if(round(pitch.floor)!=pitch.floor) {
warning(gettextf("using rounded (integer) value for pitch.floor=%f with %s", pitch.floor, sound.file))
}
if(round(max.candidates)!=max.candidates) {
warning(gettextf("using rounded (integer) value for max.candidates=%f with %s", max.candidates, sound.file))
}
if(round(pitch.ceiling)!=pitch.ceiling) {
warning(gettextf("using rounded (integer) value for pitch.ceiling=%f with %s", pitch.ceiling, sound.file))
}
# CREATE A TEMPORARY PRAAT SCRIPT WHICH WILL RUN THE VOICING EXTRACTION SCRIPT WITH THE PROVIDED PARAMETERS:
# (BECAUSE WINDOWS DOES NOT LIKE THE EXECUTION OF shell() WITH ARGUMENTS FOR THE SCRIPT;
# AS A WORKAROUND WE WRITE ALL ARGUMENTS TO A TEMPORARY SCRIPT, WHICH IS THEN CALLED
# WITHOUT ANY ADDITIONALY ARGUMENTS AND DELETED AFTERWARDS)
praatScriptArgs <- gettextf("\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", 0, %f, %d, %d, %d, %f, %f, %f, %f, %f, %d, %f, %f, %d, %d",
normalizePath(sound.file), normalizePath(textGrid.file),
target.tier, target.segments,
normalizePath(output.tsv.file, mustWork = FALSE),
time.step, round(pitch.floor), round(max.candidates), as.integer(very.accurate),
silence.threshold, voicing.threshold, octave.cost, octave.jump.cost, voiced.unvoiced.cost, round(pitch.ceiling),
max.period.factor, max.amplitude.factor, as.integer(verbose), as.integer(write.log.file) )
tmp.praat.script <- tempfile(fileext = ".praat")
tmp.praat.script.content <- c("# TEMPORARY PRAAT SCRIPT")
tmp.praat.script.content <- c(tmp.praat.script.content,
gettextf("runScript: \"%s\", %s", normalizePath(praat.script), praatScriptArgs) )
dateiVerbindung <- file(tmp.praat.script)
writeLines(tmp.praat.script.content, dateiVerbindung)
close(dateiVerbindung)
if(!file.exists(tmp.praat.script)) {
warning(gettextf("Could not create temp script %s", tmp.praat.script), immediate. = TRUE)
}
# CHANGE TO TEMPORARY DIRECTORY:
currentWD <- getwd()
setwd(tempdir())
# RUN PRAAT SCRIPT:
kom <- gettextf("\"%s\" --run %s", normalizePath(praat.exe), basename(tmp.praat.script))
if(is.Windows) {
shell(kom)
} else {
system(kom)
}
if(file.exists(output.tsv.file)) {
suppressMessages( voice.tbl <- read_tsv(output.tsv.file) )
} else {
warning(gettextf("No Praat output found at %s", output.tsv.file), immediate. = TRUE)
voice.tbl <- NULL
}
file.remove(tmp.praat.script)
setwd(currentWD)
return(voice.tbl)
}
# =============================================================================
# EVALUATION OF PRAAT MEASUREMENTS
# =============================================================================
# Errors on voiced and unvoiced parts are actually complementary, but there are
# probably different confidences associated with voiced and unvoiced (and reverberation)
# annotations, so we compute them separately:
#
# =============================================================================
evaluate_voice_2 <- function(praat.tbl, # table with Praat output
gold.tbl, # table with reference annotations
default.error = KONFIG$DEFAULT_ERROR,
normalize.error=FALSE ) {
fehlerV <- 0
fehlerU <- 0
for(rx in 1:nrow(gold.tbl)) {# rx=1
pred <- filter(praat.tbl, interval==gold.tbl[rx,]$interval)
if(nrow(pred)>0) {
if(is.na(pred[1,]$voiced)){
#warning(gettextf("PRAAT RESULT IS NA FOR %s: %.0f [%s]", DATAFILES[gold[rx,]$index,]$gold.voice.file, gold[rx,]$interval, gold[rx,]$label), immediate. = TRUE)
fv <- default.error
fu <- default.error
} else {
if(is.na(gold.tbl[rx,]$conf.v)){
fv <- 0
} else {
fv <- (abs(gold.tbl[rx,]$voiced - pred[1,]$voiced) * gold.tbl[rx,]$conf.v)
}
if(is.na(gold.tbl[rx,]$conf.u)){
fu <- 0
} else {
fu <- (abs(gold.tbl[rx,]$unvoiced - pred[1,]$unvoiced) * gold.tbl[rx,]$conf.u)
}
}
fehlerV <- fehlerV + fv
fehlerU <- fehlerU + fu
} else {
#warning(gettextf("NO PRAAT RESULT FOR %s: %.0f [%s]", DATAFILES[gold[rx,]$index,]$gold.voice.file, gold[rx,]$interval, gold[rx,]$label), immediate. = TRUE)
fehlerV <- fehlerV + default.error
fehlerU <- fehlerU + default.error
}
}
if(normalize.error) {
fehlerV <- fehlerV / (nrow(gold.tbl) * default.error)
fehlerU <- fehlerU / (nrow(gold.tbl) * default.error)
}
return( fehlerV + fehlerU )
}
# =============================================================================
# FUNCTION TO BE OPTIMIZED
# =============================================================================
# THIS IS BAD PROGRAMMING STYLE (USING GLOBAL VARIABLES)
# EVERYTHING NEEDS TO HAPPEN INSIDE THIS FUNCTION
# INPUT: A NUMERIC VECTOR (THE PARAMETERS FOR PRAAT; MAPPED TO 0...1)
# OUTPUT: A SCALAR (THE ERROR TERM, THIS SHOULD BE MINIMIZED)
opt_fun_2 <- function(x) {# x<-p0
stopifnot(!is.null(inputFiles.tbl <- attr(x, "input")),
nrow(inputFiles.tbl) > 0,
!is.null(prev.tbl <- attr(x, "cache")),
!is.null(gold.tbl <- attr(x, "gold")))
# DETERMINE PARAMETER VALUES FOR PRAAT
praatPar <- get_parameters_from_NMx(x)
prevX <- which(prev.tbl$pf == praatPar$pitch_floor &
prev.tbl$pc == praatPar$pitch_ceiling &
prev.tbl$st == praatPar$silence_threshold &
prev.tbl$vt == praatPar$voicing_threshold &
prev.tbl$vc == praatPar$voiced_unvoiced_cost )
if(length(prevX) > 0) {
# THE CURRENT INPUT VECTOR x HAS ALREADY BEEN EVALUATED
# RETURN THE CACHED VALUE:
return(prev.tbl[prevX,]$fval)
}
fehler <- 0.0
# RUN PRAAT
for(ix in 1:nrow(inputFiles.tbl)){# ix=1
#if(verbose) cat('+')
iFile <- inputFiles.tbl[ix,]$gold.voice.file
wavFile <- inputFiles.tbl[ix,]$audio.file
tmp.tsv <- tempfile(fileext = ".tsv")
tmp.tbl <- NULL
suppressWarnings (
tmp.tbl <- praat_voiceAdvanced(sound.file = wavFile,
textGrid.file = iFile,
output.tsv.file = tmp.tsv,
#target.tier = config$phone_tier,
#target.segments = config$phone_segments,
pitch.floor = praatPar$pitch_floor,
pitch.ceiling = praatPar$pitch_ceiling,
silence.threshold = praatPar$silence_threshold,
voicing.threshold = praatPar$voicing_threshold,
voiced.unvoiced.cost = praatPar$voiced_unvoiced_cost
#write.log.file = FALSE,
#praat.exe = config$PRAAT.EXE,
#praat.script = config$PRAAT.SCRIPT,
#is.Windows = config$isWindows
)
)
if(is.null(tmp.tbl) || nrow(tmp.tbl)==0) {
warning(gettextf("No Praat results for %s!", iFile))
#TODO SOME DEFAULT ERROR VALUE SHOULD BE ADDED HERE (DEPENDING ON THE NUMBER OF DATA POINTS)
} else {
#if(verbose) cat('.')
# EVALUATE ACCURACY OF PRAAT'S VOICING ANALYSIS
f <- evaluate_voice_2(praat.tbl = tmp.tbl, gold.tbl = gold.tbl )
if(is.na(f)) {
warning(gettextf("ERROR TERM IS NA FOR %s", iFile), immediate.=TRUE)
f <- 0 # THIS SHOULD ACTUALLY BE A LARGE VALUE, SINCE WE ARE MINIMIZING FOR f!
}
fehler <- fehler + f
}
file.remove(tmp.tsv)
}
# RETURN ERROR
#if(OPT_VERBOSE) cat(gettextf(" VALUE (VOICE ERROR) = %.6e\n", ausgabe$fehler))
return(fehler)
}
# =============================================================================
#
# =============================================================================
plot_and_save_results <- function(nm.result, outputDir, room, prev_pars.tbl, time.stamp)
{
outFile <- file.path(outputDir, paste0("nm_results_", time.stamp, ".rds"))
cat(gettextf(" NELDER-MEAD RESULTS (ROOM %s), %s:\n", room, time.stamp))
saveRDS(object = nm.result, file = outFile)
cat(gettextf(" - OPTIMAL x=[%f, %f, %f, %f, %f]\n", nm.result$par[1], nm.result$par[2], nm.result$par[3], nm.result$par[4], nm.result$par[5] ))
cat(gettextf(" - OPTIMAL fval=%f\n", nm.result$fval))
cat(gettextf(" - CONVERGENCE =(%d) %s\n", nm.result$convergence, nm.result$message))
optimalParameters <- get_parameters_from_NMresult(nm.result)
cat(gettextf(" - PARAMETERS =[%8.6f, %8.6f, %8.6f, %8.6f, %8.6f]\n", optimalParameters[1], optimalParameters[2], optimalParameters[3], optimalParameters[4], optimalParameters[5]))
#prev_pars.tbl <- readRDS(prev.par.file)
cat(gettextf(" - Number of parameter combinations: %4d\n - Total number of function calls: %4d\n", nrow(prev_pars.tbl), sum(prev_pars.tbl$num.calls) ))
}
# -----------------------------------------------------------------------------
backup_File <- function(f, suffix=NULL, warn=TRUE) {
ok <- TRUE
if(file.exists(f)) {
if(is.null(suffix)){
suffix <- format(file.mtime(f), format="%Y-%m-%d+%H%M%S")
}
pLoc <- str_locate(f, "\\.")
newFile <- str_c( str_sub(f, end=pLoc[1]-1), '_', suffix, str_sub(f, start=pLoc[1]) )
ok <- file.copy(from=f, to=newFile, copy.date = TRUE )
} else {
if(warn){
warning(gettextf("File not found: %s", f))
}
ok <- FALSE
}
return(ok)
}
# =============================================================================
# NELDER-MEAD
# =============================================================================
# THIS RE-IMPLEMENTS THE FUNCTION lme4::Nelder_Mead IN ORDER TO USE ADDITIONAL
# ATTRIBUTED ON THE INPUT AND OUTPUT ARGUMENTS OF THIS FUNCTION AND THE FUNCTION
# fn = opt_fun_2
# SEE THE DOCUMENTATION OF THE lme4 PACKAGE FOR DETAILS ON THE ORIGINAL
# IMPLEMENTATION!
#
my_Nelder_Mead <- function (fn, param0, lower = rep.int(-Inf, n), upper = rep.int(Inf, n), control = list())
{
## DD>>
cache.tbl <- attr(param0, "cache")
dateien <- attr(param0, "input")
gold.tbl <- attr(param0, "gold")
## <<DD
n <- length(param0)
if (is.null(xst <- control[["xst"]]))
xst <- rep.int(0.02, n)
if (is.null(xt <- control[["xt"]]))
xt <- xst * 5e-04
control[["xst"]] <- control[["xt"]] <- NULL
if (is.null(verbose <- control[["verbose"]]))
verbose <- 0
control[["verbose"]] <- NULL
if (is.null(control[["iprint"]])) {
control[["iprint"]] <- switch(as.character(min(as.numeric(verbose),3L)), `0` = 0, `1` = 20, `2` = 10, `3` = 1)
}
stopifnot(is.function(fn), length(formals(fn)) == 1L,
(n <- length(param0 <- as.numeric(param0))) == length(lower <- as.numeric(lower)),
length(upper <- as.numeric(upper)) == n,
length(xst <- as.numeric(xst)) == n, all(xst != 0),
length(xt <- as.numeric(xt)) == n )
nM <- NelderMead$new(lower = lower, upper = upper, x0 = param0, xst = xst, xt = xt)
cc <- do.call(
function(iprint = 0L, maxfun = 10000L, FtolAbs = 1e-05, FtolRel = 1e-15, XtolRel = 1e-07, MinfMax = -.Machine$double.xmax, warnOnly = FALSE, ...)
{
if (length(list(...)) > 0)
warning("unused control arguments ignored")
list(iprint = iprint, maxfun = maxfun, FtolAbs = FtolAbs,
FtolRel = FtolRel, XtolRel = XtolRel, MinfMax = MinfMax,
warnOnly = warnOnly)
},
control)
nM$setFtolAbs(cc$FtolAbs)
nM$setFtolRel(cc$FtolRel)
nM$setIprint(cc$iprint)
nM$setMaxeval(cc$maxfun)
nM$setMinfMax(cc$MinfMax)
it <- 0
repeat {
it <- it + 1
eingabe <- nM$xeval()
attr(eingabe, "input") <- dateien
attr(eingabe, "cache") <- cache.tbl
attr(eingabe, "gold") <- gold.tbl
wert <- fn(eingabe)
nMres <- nM$newf(wert)
cache.tbl <- update_NM_cache_tbl(x = eingabe, f.value = wert, cache.tbl = cache.tbl)
#nMres <- nM$newf(fn(nM$xeval()))
if (nMres != 0L)
break
}
cmsg <- "reached max evaluations"
if (nMres == -4) {
cmsg <- warning(sprintf("failure to converge in %d evaluations",
cc$maxfun))
nMres <- 4
}
msgvec <- c("nm_forced", "cannot generate a feasible simplex",
"initial x is not feasible", "active", "objective function went below allowed minimum",
"objective function values converged to within tolerance",
"parameter values converged to within tolerance",
cmsg)
if (nMres < 0) {
(if (cc$warnOnly)
warning
else stop)(msgvec[nMres + 4])
}
list(fval = nM$value(), param0 = nM$xpos(), convergence = pmin(0, nMres),
NM.result = nMres, message = msgvec[nMres + 4],
control = c(cc, xst = xst, xt = xt), feval = it,
DD.cache = cache.tbl )
}
# -----------------------------------------------------------------------------
update_NM_cache_tbl <- function(x, f.value, cache.tbl) {
stopifnot(length(x)==5)
praatX <- get_parameters_from_NMx(x)
ix <- which(cache.tbl$pf == praatX$pitch_floor &
cache.tbl$pc == praatX$pitch_ceiling &
cache.tbl$st == praatX$silence_threshold &
cache.tbl$vt == praatX$voicing_threshold &
cache.tbl$vc == praatX$voiced_unvoiced_cost )
if(length(ix)==0) {
cache.tbl <- add_row(cache.tbl,
pf=praatX$pitch_floor, pc=praatX$pitch_ceiling, st=praatX$silence_threshold, vt=praatX$voicing_threshold, vc=praatX$voiced_unvoiced_cost,
#x1=x[1], x2=x[2], x3=x[3], x4=x[4], x5=x[5],
fval=f.value, num.calls=1L)
} else if(length(ix)==1) {
cache.tbl[ix,]$num.calls <- 1 + cache.tbl[ix,]$num.calls
} else {
warning(gettextf("Inconsitend NM_cache for Praat p=[%f,%f,%f,%f,%f]; value=%f -- found %d rows in table!",
praatX$pitch_floor, praatX$pitch_ceiling, praatX$silence_threshold, praatX$voicing_threshold, praatX$voiced_unvoiced_cost,
f.value, length(ix)))
}
return(cache.tbl)
}
# =============================================================================
get_parameters_from_NMx <- cmpfun(get_parameters_from_NMx)
update_NM_cache_tbl <- cmpfun(update_NM_cache_tbl)
evaluate_voice_2 <- cmpfun(evaluate_voice_2)
opt_fun_2 <- cmpfun(opt_fun_2)
my_Nelder_Mead <- cmpfun(my_Nelder_Mead)
# =============================================================================
#
# =============================================================================
timestamp()
cat("PREPARING GOLD DATA...\n")
suppressMessages( DATAFILES <- read_csv(KONFIG$files.csv) )
GOLDVOICE <- load_gold_voice_annotations(gold.files.tbl = DATAFILES, train.test.ratio = ifelse(KONFIG$doTrainTest, KONFIG$trainTestRatio, NA))
cat("INITIALIZING OUTPUT DIRECTORIES...\n")
allRooms <- sort(unique(DATAFILES$room))
for(xRoom in allRooms) {
dir.create(path = file.path(KONFIG$outputDir, format_room(xRoom)), recursive = TRUE, showWarnings = FALSE)
}
cat("INITIALIZING VECTOR x0...\n")
# INITIAL PARAMETERS = PRAAT DEFAULTS
praat_p0 <- c(min_max_norm(DEFAULT_VOICEADVANCED$pitch.floor, KONFIG$pitch_floor_range),
min_max_norm(DEFAULT_VOICEADVANCED$pitch.ceiling, KONFIG$pitch_ceiling_range),
min_max_norm(DEFAULT_VOICEADVANCED$silence.threshold, KONFIG$silence_threshold_range),
min_max_norm(DEFAULT_VOICEADVANCED$voicing.threshold, KONFIG$voicing_threshold_range),
min_max_norm(DEFAULT_VOICEADVANCED$voiced.unvoiced.cost, KONFIG$voiced_unvoiced_cost_range) )
if(KONFIG$start == "default") {
cat("STARTING WITH DEFAULT VECTOR x0\n")
p0 <- praat_p0
p0List <- NULL
} else if(KONFIG$start == "random") {
cat("STARTING WITH RANDOM VECTOR x0\n")
p0 <- runif(length(praat_p0), min = KONFIG$RANDOM_LOW, max = KONFIG$RANDOM_HIGH)
p0List <- NULL
} else if(KONFIG$start == "previous") {
cat("STARTING WITH PREVIOUSLY GENERATED VECTOR x0\n")
p0 <- NULL
p0List <- find_previous_optima(KONFIG$outputDir, all.rooms=allRooms, default.p0=praat_p0, file.pattern = paste0(KONFIG$NM_RESULT_PREFIX, ".+\\.rds"))
} else {
stop(gettextf("Unknown parameter in KONFIG$start: \"%s\"", KONFIG$start))
}
# -----------------------------------------------------------------------------
# RUN NELDER-MEAD OPTIMIZATION
# -----------------------------------------------------------------------------
if(KONFIG$NelderMead$max_fun < 1) {
warning("SKIPPING NELDER-MEAD (max_fun < 1)!", immediate. = TRUE)
} else {
cat("RUNNING NELDER-MEAD OPTIMIZATION FOR ALL ROOMS...\n")
for(iRoom in 1:length(allRooms)) {# iRoom = 1
timestamp()
cat(gettextf("OPTIMIZING FOR ROOM: <%s>\n", allRooms[iRoom]))
indexRooms <- which(DATAFILES$room == allRooms[iRoom])
if(length(indexRooms)==0) {
warning(gettextf("No input data for room <%s>?", allRooms[iRoom]))
next()
}
if(!is.null(p0List)) {
p0 <- p0List[[ allRooms[iRoom] ]]
}# ELSE all rooms use the same p0 (either default or random)
outDir <- file.path(KONFIG$outputDir, format_room(allRooms[iRoom]))
attr(p0, "input") <- DATAFILES[indexRooms,]
attr(p0, "cache") <- load_previous_parameters_to_tbl(data.dir = outDir, file.pattern = "prev.+\\.rds$")
attr(p0, "gold") <- filter(GOLDVOICE, index %in% indexRooms, train == TRUE)
cat(gettextf(" Running Nelder-Mead with p0=[%f, %f, %f, %f, %f]\n", p0[1], p0[2], p0[3], p0[4], p0[5] ))
nm.result <- my_Nelder_Mead(opt_fun_2, p0,
lower=rep(0,length(p0)), upper=rep(1,length(p0)),
control=list(maxfun=KONFIG$NelderMead$max_fun, verbose=KONFIG$NelderMead$verbose, FtolAbs=KONFIG$NelderMead$ftolabs, XtolRel=KONFIG$NelderMead$xtorel))
saveRDS(nm.result$DD.cache, file = file.path(outDir, paste0("prev_", KONFIG$timeStamp, ".rds")))
plot_and_save_results(nm.result, outputDir=outDir, room=allRooms[iRoom], prev_pars.tbl = nm.result$DD.cache, time.stamp = KONFIG$timeStamp)
}#ENDFOR iRoom
}
# -----------------------------------------------------------------------------
# FINALLY: COLLECT OPTIMAL PARAMETERS
optimalParFile <- file.path(KONFIG$outputDir, "p0List.rds")
cat(gettextf("SAVING OPTIMAL PARAMETERS IN OBJECT p0List TO FILE %s\n", optimalParFile))
if(file.exists(optimalParFile)) {
backup_File(optimalParFile)
}
p0List <- find_previous_optima(KONFIG$outputDir, all.rooms=allRooms, default.p0=praat_p0, file.pattern = paste0(KONFIG$NM_RESULT_PREFIX, ".+\\.rds"))
saveRDS(p0List, file=optimalParFile)
cat("EXPORTING OPTIMAL PARAMETERS TO CSV FILE...\n")
praatParams.tbl <- tibble(room = character(),
pitch_floor = double(),
pitch_ceiling = double(),
silence_threshold = double(),
voicing_threshold = double(),
voiced_unvoiced_cost = double() )
rooms <- names(p0List)
for(ix in 1:length(p0List)){
p0 <- get_parameters_from_NMx(p0List[[ix]])
praatParams.tbl <- add_row(praatParams.tbl,
room = rooms[ix],
pitch_floor = p0$pitch_floor,
pitch_ceiling = p0$pitch_ceiling,
silence_threshold = p0$silence_threshold,
voicing_threshold = p0$voicing_threshold,
voiced_unvoiced_cost = p0$voiced_unvoiced_cost
)
}
write_csv(praatParams.tbl, path = file.path(KONFIG$outputDir, "praatParams.csv"))
# =============================================================================
#
# =============================================================================
if(KONFIG$doTrainTest) {
if(KONFIG$runTest) {
timestamp()
cat("RUNNING PRAAT ON TEST SET...\n")
DATAFILES$test.N <- NA_integer_
DATAFILES$error.opt <- NA_real_
DATAFILES$error.def <- NA_real_
for(ix in 1:nrow(DATAFILES)) {
xRoom <- DATAFILES[ix,]$room
GOLD <- filter(GOLDVOICE, index == ix, train == FALSE)
DATAFILES[ix,]$test.N <- nrow(GOLD)
if(nrow(GOLD)==0) {
warning(gettextf("No \"test\" items in %s?", DATAFILES[ix,]$gold.voice.file))
rm(GOLD)
next()
}
praatPar <- as.list( praatParams.tbl[praatParams.tbl$room == xRoom,] )
iFile <- DATAFILES[ix,]$gold.voice.file
wavFile <- DATAFILES[ix,]$audio.file
# USE OPTIMIZED PARAMETERS:
tmp.tsv <- tempfile(fileext = ".tsv")
suppressWarnings (
tmp.tbl <- praat_voiceAdvanced(sound.file = wavFile, textGrid.file = iFile, output.tsv.file = tmp.tsv,
pitch.floor = praatPar$pitch_floor,
pitch.ceiling = praatPar$pitch_ceiling,
silence.threshold = praatPar$silence_threshold,
voicing.threshold = praatPar$voicing_threshold,
voiced.unvoiced.cost = praatPar$voiced_unvoiced_cost
)
)
DATAFILES[ix,]$error.opt <- evaluate_voice_2(praat.tbl = tmp.tbl, gold.tbl = GOLD)
rm(tmp.tsv, tmp.tbl)
# USE DEFAULT PARAMETERS:
tmp.tsv <- tempfile(fileext = ".tsv")
suppressWarnings (
tmp.tbl <- praat_voiceAdvanced(sound.file = wavFile, textGrid.file = iFile, output.tsv.file = tmp.tsv,
pitch.floor = DEFAULT_VOICEADVANCED$pitch.floor,
pitch.ceiling = DEFAULT_VOICEADVANCED$pitch.ceiling,
silence.threshold = DEFAULT_VOICEADVANCED$silence.threshold,
voicing.threshold = DEFAULT_VOICEADVANCED$voicing.threshold,
voiced.unvoiced.cost = DEFAULT_VOICEADVANCED$voiced.unvoiced.cost
)
)
DATAFILES[ix,]$error.def <- evaluate_voice_2(praat.tbl = tmp.tbl, gold.tbl = GOLD)
rm(tmp.tsv, tmp.tbl)
}
cat("EXPORTING TEST RESULTS...\n")
outFile <- file.path(KONFIG$outputDir, paste0("test_results_tbl_", KONFIG$timeStamp, ".csv"))
write_csv(select(DATAFILES, gold.voice.file, room, test.N, error.opt, error.def), path = outFile)
} else {
warning("SKIPPING EVALUATION ON TEST SET!", immediate. = TRUE)
}
}
# =============================================================================
timestamp()
cat("ALL DONE. BYE!\n")
|
8f35601d5613b830ea425a2eb4c8cfa408a76d05
|
196d6a47ef55f1c68584ef443dcc611d1e21835d
|
/data-raw/data_funnel.R
|
379d3970dccb50731572c3d3fd32c1c325301e74
|
[] |
no_license
|
datastorm-open/rAmCharts
|
452083b44658b99423ae790f176c856eae24f4c5
|
5f61ad704aa77fbe2af7fc6b90d96e7873615d69
|
refs/heads/master
| 2022-10-06T04:54:36.784313
| 2022-09-29T07:47:42
| 2022-09-29T07:47:42
| 38,245,790
| 42
| 18
| null | 2020-11-19T11:11:13
| 2015-06-29T12:10:46
|
JavaScript
|
UTF-8
|
R
| false
| false
| 553
|
r
|
data_funnel.R
|
{
data_funnel <- data.frame(description = c("Website visits", "Downloads",
"Requested price list",
"Contaced for more info",
"Purchased", "Contacted for support",
"Purchased additional products"),
value = c(300, 123, 98, 72, 80, 15, 8),
stringsAsFactors = FALSE)
devtools::use_data(data_funnel, overwrite = TRUE)
rm(list = ls())
}
|
b9a75dc49f972bad14917480b895cb5ffd64383c
|
59f81a70f64033fd369621255d91ad623f3c2588
|
/R/createLocalList.R
|
20c9118fdf07197702ed0d36c931e5d28663013e
|
[] |
no_license
|
cran/clampSeg
|
2cc1699d18534579f359daf39e03262225328314
|
6a91363844f0090bbadc8529a7d76d1baf23826f
|
refs/heads/master
| 2022-02-09T01:47:20.547322
| 2022-01-27T22:10:06
| 2022-01-27T22:10:06
| 93,599,323
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,932
|
r
|
createLocalList.R
|
createLocalList <- function(filter, method = c("2Param", "LR"),
lengths = if (method == "LR") 1:20 else 1:65) {
if (!is(filter, "lowpassFilter")) {
stop("filter must be an object of class 'lowpassFilter'")
}
method <- match.arg(method)
if (!is.numeric(lengths) || any(!is.finite(lengths)) || any(lengths < 1)) {
stop("lengths must be an integer vector containing finite positive values")
}
if (any(!is.integer(lengths))) {
lengths <- as.integer(lengths + 1e-6)
}
if (is.unsorted(lengths, strictly = TRUE)) {
lengths <- sort(lengths)
if (is.unsorted(lengths, strictly = TRUE)) {
warning("lengths contains duplicated values, they will be removed")
lengths <- unique(lengths)
}
}
localList <- list()
if (method == "2Param") {
for (indexLen in seq(along = lengths)) {
len <- lengths[indexLen]
time <- 1:(len + filter$len - 1) / filter$sr
cpLeft <- 0
cpRight <- len / filter$sr
Fleft <- filter$truncatedStepfun(time - cpLeft)
Fright <- filter$truncatedStepfun(time - cpRight)
v <- Fleft - Fright
sumv2 <- sum(v^2)
Fleft <- outer(time, time, function(i, j) filter$acAntiderivative(pmin(i, j) - cpLeft, abs(j - i)))
Fright <- outer(time, time, function(i, j) filter$acAntiderivative(pmin(i, j) - cpRight, abs(j - i)))
cor <- outer(time, time, function(i, j) filter$acfun(abs(j - i)))
w <- Fleft - Fright
sigmaL <- (cor - Fleft)
sigmaR <- Fright
vv <- outer(seq(along = time), seq(along = time), function(i, j) v[i] * v[j] / sum(v^2))
diagW <- diag(w)
matrixDiagW <- matrix(rep(diagW, length(diagW)), length(diagW))
AL <- sum(diag(sigmaL) * diagW) - sum(vv * sigmaL * matrixDiagW)
AR <- sum(diag(sigmaR) * diagW) - sum(vv * sigmaR * matrixDiagW)
B <- sum(diagW^2) - sum(vv * w * matrixDiagW)
w <- diagW
sigmaL <- diag(sigmaL)
sigmaR <- diag(sigmaR)
Fleft <- 1 - filter$truncatedStepfun(time - cpLeft)
Fright <- filter$truncatedStepfun(time - cpRight)
localList[[indexLen]] = list(len = len, Fleft = Fleft, Fright = Fright, v = v, sumv2 = sumv2,
sumSigmaL = AL, sumSigmaR = AR, sumW = B, w = w,
sigmaL = sigmaL, sigmaR = sigmaR)
}
class(localList) <- c("localList", class(localList))
attr(localList, "method") <- method
attr(localList, "filter") <- filter
attr(localList, "lengths") <- lengths
} else {
correlations <- filter$acf
correlations[1] <- correlations[1] + 1
for (indexLen in seq(along = lengths)) {
len <- lengths[indexLen]
time <- 1:(len + filter$len - 1) / filter$sr
cpLeft <- 0
cpRight <- len / filter$sr
m <- min(len + filter$len - 1, length(correlations) - 1L)
A <- matrix(0, len + filter$len - 1, len + filter$len - 1)
for (i in 1:(len + filter$len - 2)) {
A[i, i] <- correlations[1]
A[i, i + 1:min(m, len + filter$len - 1 - i)] <- correlations[2:min(m + 1, len + filter$len - 1 - i + 1)]
A[i + 1:min(m, len + filter$len - 1 - i), i] <- correlations[2:min(m + 1, len + filter$len - 1 - i + 1)]
}
A[len + filter$len - 1, len + filter$len - 1] <- correlations[1]
Fleft <- filter$truncatedStepfun(time - cpLeft)
Fright <- filter$truncatedStepfun(time - cpRight)
v <- Fleft - Fright
sol <- solve(A, v)
vtAv <- sum(v * sol)
Fleft <- 1 - Fleft
localList[[indexLen]] = list(len = len, Fleft = Fleft, Fright = Fright, v = v, sol = sol, vtAv = vtAv)
}
class(localList) <- c("localList", class(localList))
attr(localList, "method") <- method
attr(localList, "filter") <- filter
attr(localList, "lengths") <- lengths
}
localList
}
|
908b32f15922cead4ce09348c66926f3036918e3
|
cdb4a5d68b7a37b58b38145a559ec5d25250b183
|
/code.R
|
8cdcebe4c80ad42ea74aae7d0967f5b0bc659c63
|
[] |
no_license
|
jQSfire125/MovieLens
|
3384025c52fdc95d800314b7a135d9a36c44f8d8
|
a4bc8f1b495cc4618581c21b3f847ac1de34d972
|
refs/heads/master
| 2023-06-06T07:28:08.115866
| 2021-06-25T19:42:17
| 2021-06-25T19:42:17
| 372,559,959
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,656
|
r
|
code.R
|
# 1 Overview and Introduction
# 1.1 Library imports
# Make sure the user has the required packages
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(scales)) install.packages("scales", repos = "http://cran.us.r-project.org")
if(!require(lubridate)) install.packages("lubridate", repos = "http://cran.us.r-project.org")
# Library imports
library(tidyverse)
library(caret)
library(data.table)
# 1.2 Download the raw dataset
# Note: this process could take a couple of minutes
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
# Download the file and read it
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t",
readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
# 1.3 Build the base data set and split into main (edx) and validation set (final hold-out test set)
# Build the data set
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")),
"\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(movieId),
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind="Rounding")
test_index <- createDataPartition(y = movielens$rating,
times = 1, p = 0.1,
list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
# Cleanup and remove temporary files
rm(dl, ratings, movies, test_index, temp, movielens, removed)
# 2 Analysis
# 2.1 Initial Exploratory Analysis
# Look at the file
head(edx)
glimpse(edx)
# Summary statistics
summary(edx)
# Number of unique users and movies
edx %>% summarize(unique_users = n_distinct(userId), unique_movies = n_distinct(movieId))
# Top 10 movies by ratings
top_10 <- edx %>%
dplyr::count(movieId) %>%
top_n(10) %>%
pull(movieId)
edx %>% filter(movieId %in% top_10) %>%
group_by(title) %>%
summarize(n_reviews = n()) %>%
arrange(desc(n_reviews)) %>%
knitr::kable()
# Import Library for formatting graphs
library(scales)
# Number of ratings per Movie
# Blockbusters on the right, obscure movies on the left
edx %>%
count(movieId) %>%
ggplot(aes(n)) +
geom_histogram(bins= 30, fill= 'gray', color= 'black') +
scale_x_log10() +
labs(title= "Number of ratings per movie",
x= "Number of ratings",
y= "Number of movies")
# Number of ratings per User
# Most users rate between 30 and 100 movies
edx %>%
count(userId) %>%
ggplot(aes(n)) +
geom_histogram(bins=30, fill= 'gray', color= 'black') +
scale_x_log10() +
scale_y_continuous(label= comma) +
labs(title= "Number of ratings per user",
x= "Number of ratings",
y= "Number of users")
# Ratings Distribution
# We can see that the most common rating is 4
edx %>%
ggplot(aes(rating)) +
geom_histogram(binwidth= 0.25, fill= 'gray', color= 'black') +
scale_x_continuous(breaks= seq(0.5,5.0,0.5)) +
scale_y_continuous(label= comma, breaks= seq(0, 2500000,500000)) +
labs(title= "Ratings Distribution",
x= "Rating",
y= "Number of ratings")
# Mean movie rating
edx %>% summarize(mean_rating = mean(rating))
# 2.2 Data Cleaning and feature engineering
# Remove the year from the title and add it to year column
edx <- edx %>% mutate(year = as.numeric(str_sub(title, -5,-2)))
edx <- edx %>% mutate(title = str_sub(title, 1, -8))
validation <- validation %>% mutate(year = as.numeric(str_sub(title, -5,-2)))
validation <- validation %>% mutate(title = str_sub(title, 1, -8))
# Create Age of movie column. We will use 2020 as the base year.
edx <- edx %>% mutate(movie_age= 2020 - year)
validation <- validation %>% mutate(movie_age= 2020 - year)
# Create a version with genres
# This step takes a long time. For now I will only split the edx dataset
edx_split_genres <- edx %>% separate_rows(genres, sep = "\\|")
# Extract year rated from timestamp column
# Install library for handling dates
library(lubridate)
# The timestamp shows the number of seconds elapsed since January 1st, 1970
edx <- edx %>%
mutate(year_rated= year(as.Date(as.POSIXct(timestamp,
origin= "1970-01-01"))))
validation <- validation %>%
mutate(year_rated= year(as.Date(as.POSIXct(timestamp,
origin= "1970-01-01"))))
# 2.3 Further visual exploration
# Mean rating by movie age
edx %>% group_by(movie_age) %>%
summarize(mean_rating = mean(rating)) %>%
ggplot(aes(movie_age, mean_rating)) +
geom_point() +
geom_smooth() +
labs(title= "Mean rating by movie age",
x= "Age",
y= "Rating")
# Mean rating by rated_year
edx %>% group_by(year_rated) %>%
summarize(mean_rating= mean(rating)) %>%
ggplot(aes(year_rated, mean_rating)) +
geom_point() +
geom_smooth() +
labs(title= "Mean rating by year movie was rated",
x= "Year rated",
y= "Rating")
# Boxplots of move ratings per year
edx %>% group_by(movieId) %>%
summarize(n= n(), year= as.character(first(year))) %>%
ggplot(aes(year, n)) +
geom_boxplot() +
coord_trans(y= "sqrt") +
theme(axis.text.x= element_text(angle= 90, hjust= 1, size= 5)) +
labs(title= "Boxplots of movie ratings per year",
x= "Year",
y= "Number of ratings")
# Mean rating by genre
edx_split_genres %>% group_by(genres) %>%
summarize(mean_rating= mean(rating)) %>%
ggplot(aes(reorder(genres, mean_rating), mean_rating)) +
geom_point() +
coord_flip() +
labs(title= "Mean rating by genre",
x= "Mean rating",
y= "Genre")
# Movies that get rated more often have better ratings
# Popular movies get better ratings
edx %>%
filter(year>= 1990) %>%
group_by(movieId) %>%
summarize(n= n(),
age= movie_age[1],
title= title[1],
mean_rating= mean(rating)) %>%
mutate(rate= n/age) %>%
ggplot(aes(rate, mean_rating)) +
geom_point() +
geom_smooth() +
scale_x_continuous(label= comma) +
labs(title= "Frequency of rating and mean rating",
x= "Frequency of rating",
y= "Mean rating")
# 2.4 Modeling Approach
# Split the edx dataset into train and test sets
set.seed(157, sample.kind= "Rounding")
test_index <- createDataPartition(y= edx$rating, times= 1,
p= 0.2, list= FALSE)
edx_train <- edx[-test_index,]
edx_test <- edx[test_index,]
# Make sure we don't use users and movies on the test set that do not appear on the train set
edx_test <- edx_test %>%
semi_join(edx_train, by= "movieId") %>%
semi_join(edx_train, by= "userId")
# Create loss Function. Residual mean square error (RMSE)
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings - predicted_ratings)^2))
}
# Let's build the simplest model possible
# 2.4.1 Model 1. Avg movie rating model
# This model estimates each new rating at the average of all ratings
mu <- mean(edx_train$rating)
mu
# Calculate the RMSE for this model
m_1_rmse <- RMSE(edx_test$rating, mu)
m_1_rmse
# Create table with RMSE results
rmses_table <- tibble(Model = "Using just the average", RMSE = m_1_rmse)
rmses_table %>% knitr::kable()
# 2.4.2 Model 2. Movie effect model
# Lets add to our estimate the effect of the average rating for each movie
movie_effect <- edx_train %>%
group_by(movieId) %>%
summarize(m_e= mean(rating - mu))
# Visualize this effect
movie_effect %>%
ggplot(aes(m_e)) +
geom_histogram(bins= 20, fill= 'gray', color= 'black') +
scale_x_continuous(breaks= seq(-3,1.5,0.5)) +
scale_y_continuous(label= comma) +
labs(title= "Movie Effect",
x= "Movie effect",
y= "Number of ratings")
# Model with average and movie effect:
predicted_ratings <- edx_test %>%
left_join(movie_effect, by= "movieId") %>%
mutate(pred= mu + m_e) %>%
.$pred
# Calculate the RMSE for this model
m_2_rmse <- RMSE(edx_test$rating, predicted_ratings)
m_2_rmse
# Update summary table
rmses_table <- rmses_table %>% add_row(Model= "Movie effect model", RMSE= m_2_rmse)
rmses_table %>% knitr::kable()
# 2.4.3 Model 3. Movie and user effect model
# Mean rating by user
edx %>%
group_by(userId) %>%
summarise(m_r_u = mean(rating)) %>%
ggplot(aes(m_r_u)) +
geom_histogram(bins= 30, fill= 'gray', color= 'black') +
scale_x_continuous(breaks= seq(0.5,5.0,0.5)) +
scale_y_continuous(label= comma) +
labs(title= "Mean Rating by user",
x= "Mean rating",
y= "Number of users")
# Now lets add the effect of the user bias
user_effect <- edx_train %>%
left_join(movie_effect, by= "movieId") %>%
group_by(userId) %>%
summarize(u_e= mean(rating - mu - m_e))
# Visualize this effect
user_effect %>%
ggplot(aes(u_e)) +
geom_histogram(bins= 20, fill= 'gray', color= 'black') +
scale_x_continuous(breaks= seq(-3.0,2.0,0.5)) +
scale_y_continuous(label= comma) +
labs(title= "User Effect",
x= "User effect",
y= "Number of ratings")
# Model with average, movie and user effects:
predicted_ratings <- edx_test %>%
left_join(movie_effect, by= "movieId") %>%
left_join(user_effect, by= "userId") %>%
mutate(pred= mu + m_e + u_e) %>%
.$pred
# Calculate the RMSE for this model
m_3_rmse <- RMSE(edx_test$rating, predicted_ratings)
m_3_rmse
# Update summary table
rmses_table <- rmses_table %>% add_row(Model= "Movie + user effect model", RMSE= m_3_rmse)
rmses_table %>% knitr::kable()
# 2.4.4 Model 4. Year effect
# Mean rating by year
edx %>% group_by(year) %>%
summarize(mean_rating = mean(rating)) %>%
ggplot(aes(year, mean_rating)) +
geom_point() +
geom_smooth() +
labs(title= "Mean rating by year",
x= "Year",
y= "Rating")
# Now let's add the effect of the year bias
year_effect <- edx_train %>%
left_join(movie_effect, by= "movieId") %>%
left_join(user_effect, by= "userId") %>%
group_by(year) %>%
summarize(y_e= mean(rating - mu - m_e - u_e))
# Visualize this effect
# Very small effect
year_effect %>%
ggplot(aes(y_e)) +
geom_histogram(bins= 20, fill= 'gray', color= 'black') +
labs(title= "Year Effect",
x= "Year effect",
y= "Number of years")
# Model with average, movie, user and year effects:
predicted_ratings <- edx_test %>%
left_join(movie_effect, by= "movieId") %>%
left_join(user_effect, by= "userId") %>%
left_join(year_effect, by= "year") %>%
mutate(pred= mu + m_e + u_e + y_e) %>%
.$pred
# Calculate the RMSE for this model
m_4_rmse <- RMSE(edx_test$rating, predicted_ratings)
m_4_rmse
# Update the summary table
rmses_table <- rmses_table %>% add_row(Model= "Movie + user + year effect model", RMSE= m_4_rmse)
rmses_table %>% knitr::kable()
# 2.4.5 Model 5. Regularized movie + user effect model
# Looking at just the movie effect, the biggest errors are for movies with very few ratings
movie_titles <- edx %>%
select(movieId, title) %>%
distinct()
# Biggest positive errors
# We can see that they are all obscure movies with a hanful of ratings
edx_train %>% dplyr::count(movieId) %>%
left_join(movie_effect) %>%
left_join(movie_titles, by="movieId") %>%
arrange(desc(m_e)) %>%
select(title, m_e, n) %>%
slice(1:10) %>%
knitr::kable()
# Biggest negative errors
edx_train %>% dplyr::count(movieId) %>%
left_join(movie_effect) %>%
left_join(movie_titles, by="movieId") %>%
arrange(m_e) %>%
select(title, m_e, n) %>%
slice(1:10) %>%
knitr::kable()
# In order to fix the impact of scarcely reviewed movies, we can use regularization
# lambda is the regularization strength
# We need to find the correct lambda though iteration
lambdas <- seq(0, 10, 0.25)
# We will calculate the RMSE with a sequence of different lambdas
# This process can take a few minutes
# The sapply function iterates over the sequence of lambdas
# At each iteration, it calculates the mean (mu), the movie effect (m_e)
# the user effect (u_e), creates predictions and evaluates the rmse
rmses <- sapply(lambdas, function(l) {
mu <- mean(edx_train$rating)
m_e <- edx_train %>%
group_by(movieId) %>%
summarize(m_e= sum(rating - mu)/(n()+l))
u_e <- edx_train %>%
left_join(m_e, by= "movieId") %>%
group_by(userId) %>%
summarize(u_e= sum(rating - m_e - mu)/(n()+l))
predicted_ratings <- edx_test %>%
left_join(m_e, by= "movieId") %>%
left_join(u_e, by= "userId") %>%
mutate(pred= mu + m_e + u_e) %>%
.$pred
return(RMSE(edx_test$rating, predicted_ratings))
})
# We pick the lambda that minimises the rmse
qplot(lambdas, rmses) +
labs(title= "Best lambda for regularized movie + user effect model",
x= "Lambda",
y= "RMSE")
lambda <- lambdas[which.min(rmses)]
lambda
# This is the RMSE for this model
min(rmses)
# Update the summary table
rmses_table <- rmses_table %>% add_row(Model= "Regularized movie + user effect model", RMSE= min(rmses))
rmses_table %>% knitr::kable()
# 2.4.6 Model 6. Regularized movie + user + year model
# We are going to add the year effect to improve our model
# This process can take a few minutes
# The sapply function iterates over the sequence of lambdas
# At each iteration, it calculates the mean (mu), the movie effect (m_e)
# the user effect (u_e), year effect (y_e), creates predictions and evaluates the rmse
rmses_2 <- sapply(lambdas, function(l) {
mu <- mean(edx_train$rating)
m_e <- edx_train %>%
group_by(movieId) %>%
summarize(m_e= sum(rating - mu)/(n()+l))
u_e <- edx_train %>%
left_join(m_e, by= "movieId") %>%
group_by(userId) %>%
summarize(u_e= sum(rating - m_e - mu)/(n()+l))
y_e <- edx_train %>%
left_join(m_e, by= "movieId") %>%
left_join(u_e, by= "userId") %>%
group_by(year) %>%
summarize(y_e= sum(rating - m_e - u_e - mu)/(n()+l))
predicted_ratings <- edx_test %>%
left_join(m_e, by= "movieId") %>%
left_join(u_e, by= "userId") %>%
left_join(y_e, by= "year") %>%
mutate(pred= mu + m_e + u_e + y_e) %>%
.$pred
return(RMSE(edx_test$rating, predicted_ratings))
})
# We pick the lambda that minimises the rmse
qplot(lambdas, rmses_2) +
labs(title= "Best lambda for regularized movie + user + year effect model",
x= "Lambda",
y= "RMSE")
lambda_2 <- lambdas[which.min(rmses_2)]
lambda_2
# This is the RMSE for this model
min(rmses_2)
# Update the summary table
rmses_table <- rmses_table %>% add_row(Model= "Regularized movie + user + year effect model", RMSE= min(rmses_2))
rmses_table %>% knitr::kable()
# 3 Results
# Final model
# Calculate the RMSE training on the full edx set and testing on the validation set
# We will use the lambda estimated in model 6 for regularization
mu <- mean(edx$rating)
movie_effect_final <- edx %>%
group_by(movieId) %>%
summarize(m_e= sum(rating - mu)/(n()+lambda_2))
user_effect_final <- edx %>%
left_join(movie_effect_final, by= "movieId") %>%
group_by(userId) %>%
summarize(u_e= sum(rating - mu - m_e)/(n()+lambda_2))
year_effect_final <- edx %>%
left_join(movie_effect_final, by= "movieId") %>%
left_join(user_effect_final, by= "userId") %>%
group_by(year) %>%
summarize(y_e= sum(rating - mu - m_e - u_e)/(n()+lambda_2))
predicted_ratings <- validation %>%
left_join(movie_effect_final, by= "movieId") %>%
left_join(user_effect_final, by= "userId") %>%
left_join(year_effect_final, by= "year") %>%
mutate(pred= mu + m_e + u_e + y_e) %>%
.$pred
# Calculate the RMSE for this model
# We have improved 18.5% on the baseline model!
m_7_rmse <- RMSE(validation$rating, predicted_ratings)
m_7_rmse
# Update the summary table
rmses_table <- rmses_table %>% add_row(Model= "Final RMSE (validation set, regularized movie + user + year effect model)", RMSE= m_7_rmse)
# Final Summary Table
# This is the summary of all our models
rmses_table %>% knitr::kable()
# 4 Conclusion
|
48831840f96b47cbbb48e79d9f3a7a10292d317a
|
74949d1ef5c649def97ea32b64e09d228a74d67d
|
/plot1.R
|
35bc733adb56f9afe51fce51654946520c21f7bf
|
[] |
no_license
|
homeupnorth/exploredata1
|
b212e808622b876818f43b230eed3e074bbd8811
|
74e083f443b50a6c34633e17eaf7a4e468689cdf
|
refs/heads/master
| 2020-06-01T11:48:07.985762
| 2015-03-08T15:17:42
| 2015-03-08T15:17:42
| 31,853,883
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,221
|
r
|
plot1.R
|
# Exploratory Data Analysis
# Project 1
# March 4, 2015
#
setwd("~/Documents/Coursera/Exploratory Data Analysis")
# download data from https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
# unzip in my working directory
# read into R
powerdata <- read.csv("~/Documents/Coursera/Exploratory Data Analysis/household_power_consumption.txt", sep=";")
# Data data is from the UC Irvine Machine Learning Repository,
# “Individual household electric power consumption Data Set”
# ===============================================================
#
# I called my data 'powerdata', if you do too, then
# the rest of the code you can run on your data
#
# ================================================================
head(powerdata) # see quick view of data
length(powerdata) # see how many components
str(powerdata) # see structure, looking for data types
class(powerdata) # see class making sure it is a data frame
names(powerdata) # see names of fields to get spelling right
#
# Convert factors to numbers and date/time columns to date time field
#
powerdata$Global_active_power<-as.numeric(as.character(powerdata$Global_active_power))
powerdata$Global_reactive_power<-as.numeric(as.character(powerdata$Global_reactive_power))
powerdata$Voltage<-as.numeric(as.character(powerdata$Voltage))
powerdata$Global_intensity<-as.numeric(as.character(powerdata$Global_intensity))
powerdata$Sub_metering_1<-as.numeric(as.character(powerdata$Sub_metering_1))
powerdata$Sub_metering_2<-as.numeric(as.character(powerdata$Sub_metering_2))
powerdata$DateTime <- paste(powerdata$Date, powerdata$Time)
powerdata$DateTime<-strptime(powerdata$DateTime, "%d/%m/%Y %H:%M:%S")
#
# Check our conversions
#
summary(powerdata)
head(powerdata)
str(powerdata)
#
# Pull subset for study and check our work
#
pdsubset<-subset(powerdata, powerdata$Date=="1/2/2007"|powerdata$Date=="2/2/2007")
head(pdsubset)
str(pdsubset)
dim(pdsubset)
#
# Exploratory Graphs
#
par(mfrow=c(1,1))
# Plot 1
hist(pdsubset$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (Kilowatts)", ylab="Frequency")
dev.copy(png,filename="plot1.png");
dev.off()
par(mfrow=c(1,1))
#
#
# All Done
|
4214d7e700b69cde62c1f48c3579a69ee643aa32
|
257b39265a6b796d54e0e861825984e7e205bbd8
|
/man/createTidyFromMatrix.Rd
|
68198d167fe242fabe76df0a8e1d5bc83af01715
|
[] |
no_license
|
yaoguodong/zFactor-1
|
230c8576f004efb6bde669c60e249fd36134ca4f
|
66d6f0732e35c8e84bcd98d28251a0badc7fe423
|
refs/heads/master
| 2020-04-20T04:26:18.046950
| 2017-10-23T06:22:46
| 2017-10-23T06:22:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 685
|
rd
|
createTidyFromMatrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{createTidyFromMatrix}
\alias{createTidyFromMatrix}
\title{Create a tidy table from Ppr and Tpr vectors}
\usage{
createTidyFromMatrix(ppr_vector, tpr_vector, correlation)
}
\arguments{
\item{ppr_vector}{a pseudo-reduced pressure vector}
\item{tpr_vector}{a pseudo-reduced temperature vector}
\item{correlation}{a z-factor correlation}
}
\description{
Create a tidy table from Ppr and Tpr vectors
}
\examples{
ppr <- c(0.5, 1.5, 2.5, 3.5)
tpr <- c(1.05, 1.1, 1.2)
createTidyFromMatrix(ppr, tpr, correlation = "DAK")
createTidyFromMatrix(ppr, tpr, correlation = "BB")
}
|
1bc01cbb1b55a3adfa9f91ad092869fa20db07e4
|
110fed67a467812e9b7642bca98168883926d710
|
/src/jd_nb_script.r
|
b23121bf8b17cff94bfcea8098e852b76bd71f5a
|
[] |
no_license
|
tjbencomo/Titanic
|
306291ece3b8bd44540c772c97ef19d0d91cd043
|
b37387c95452900b4be234ba62d73aa7f7c4be75
|
refs/heads/master
| 2021-01-01T16:14:55.518623
| 2017-10-20T23:42:41
| 2017-10-20T23:42:41
| 97,794,465
| 1
| 1
| null | 2017-10-20T23:42:42
| 2017-07-20T05:36:31
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 992
|
r
|
jd_nb_script.r
|
#The data used for analysis in this notebook is from the dataframe train_edited.csv which was cleaned by Tomas Bencomo.
# Further description about how the file was cleaned can be found in his notebook.
# Import necessary libraries
library(ggplot2)
# import the data
df <- read.csv('datasets/train_edited.csv')
head(df)
str(df)
# Lets begin by analyzing the relationship between age and fare.
# We looked at the top 38 most expensive fares and tried to see if there was any correlation with age.
# To find these fares, we only analyzed the fares that were 2 standard deviations above the mean.
# The amount of tickets that matched this description was 38 tickets.
standard_dev <- sd(df$Fare)
two_sd_over <- mean(df$Fare) + 2*standard_dev
fares <- df$Fare[df$Fare > two_sd_over]
ages <- df$Age[df$Fare > two_sd_over]
length(fares)
qplot(Age, Fare, data = df, color = Sex)
cor(df$Age, df$Fare)
survived_ages <- df$Age[df$Survived == 1]
qplot(survived_ages, geom="histogram", bins = 30)
|
3e95bcca63cb969c40909a4976cb6d85bee6c713
|
19a29f667faf1a30b4aaadc84c6007c6d5df150f
|
/processed_real_data_models/cr_card_models.R
|
fb37e626ba874e8b3c58bf662abb1e1a28470595
|
[] |
no_license
|
yordanivanov92/MT-Fin_Fraud_Detection
|
71a96e0c2966a799a04f572543036c5c70913c00
|
137f828447c254e7fd767c8baa6f2a0a13d2db34
|
refs/heads/master
| 2021-05-15T18:11:36.209834
| 2018-05-06T15:51:26
| 2018-05-06T15:51:26
| 106,425,028
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,089
|
r
|
cr_card_models.R
|
# Using real credit card data from Dal Pozzlo
library(dplyr)
library(caret)
library(DMwR) #SMOTE
library(purrr)
library(pROC)
library(gbm)
library(PRROC)
library(caTools)
set.seed(2142)
###########################################################################
############################### BankSim data ##############################
###########################################################################
credit_card_data <- read.csv(file = "C:/Users/Yordan Ivanov/Desktop/Master Thesis Project/data/dal_pozzlo_real_data_PCA/creditcard.csv",
header = TRUE,
sep = ",")
#cc_data <- credit_card_data[sample(nrow(credit_card_data), 100000), ]
cc_data <- credit_card_data
# Removing time column
cc_data <- cc_data[, -1]
split = sample.split(cc_data$Class, SplitRatio = 0.6)
cc_data_train = subset(cc_data, split == TRUE)
cc_data_test = subset(cc_data, split == FALSE)
prop.table(table(cc_data_train$Class))
prop.table(table(cc_data_test$Class))
ctrl_ccard <- trainControl(method = "repeatedcv",
number = 10,
repeats = 5,
summaryFunction = twoClassSummary,
classProbs = TRUE,
verboseIter = TRUE)
cc_data_train$Class <- ifelse(cc_data_train$Class == 1, "fraud", "clean")
cc_data_train$Class <- as.factor(cc_data_train$Class)
cc_data_test$Class <- ifelse(cc_data_test$Class == 1, "fraud", "clean")
cc_data_test$Class <- as.factor(cc_data_test$Class)
cluster <- makeCluster(detectCores() - 1) # convention to leave 1 core for OS
registerDoParallel(cluster)
cc_orig <- train(Class ~ .,
data = cc_data_train,
method = "gbm",
verbose = FALSE,
metric = "ROC",
trControl = ctrl_ccard)
stopCluster(cluster)
registerDoSEQ()
test_results <- predict(cc_orig, newdata = cc_data_test)
confusionMatrix(test_results, cc_data_test$Class)
cc_test_roc <- function(model, data) {
roc(data$Class,
predict(model, data, type = "prob")[, "fraud"])
}
cc_orig %>%
cc_test_roc(data = cc_data_test) %>%
auc()
# Handling class imbalance with weighted or sampling methods
cc_data_weights <- ifelse(cc_data_train$Class == "clean",
(1/table(cc_data_train$Class)[1]) * 0.5,
(1/table(cc_data_train$Class)[2]) * 0.5)
ctrl_ccard$seeds <- cc_orig$control$seeds
#weighted model
cc_weights <- train(Class ~ .,
data = cc_data_train,
method = "gbm",
verbose = FALSE,
weights = cc_data_weights,
metric = "ROC",
trControl = ctrl_ccard)
#sampled-down model
ctrl_ccard$sampling <- "down"
cc_down <- train(Class ~ .,
data = cc_data_train,
method = "gbm",
verbose = FALSE,
metric = "ROC",
trControl = ctrl_ccard)
#sampled-up
ctrl_ccard$sampling <- "up"
cc_up <- train(Class ~ .,
data = cc_data_train,
method = "gbm",
verbose = FALSE,
metric = "ROC",
trControl = ctrl_ccard)
#SMOTE
ctrl_ccard$sampling <- "smote"
cc_smote <- train(Class ~ .,
data = cc_data_train,
method = "gbm",
verbose = FALSE,
metric = "ROC",
trControl = ctrl_ccard)
cc_model_list <- list(original = cc_orig,
weighted = cc_weights,
down = cc_down,
up = cc_up,
SMOTE = cc_smote)
cc_model_list_roc <- cc_model_list %>%
map(cc_test_roc, data = cc_data_train)
cc_model_list_roc %>%
map(auc)
cc_results_list_roc <- list(NA)
num_mod <- 1
for(the_roc in cc_model_list_roc){
cc_results_list_roc[[num_mod]] <-
data_frame(tpr = the_roc$sensitivities,
fpr = 1 - the_roc$specificities,
model = names(cc_model_list)[num_mod])
num_mod <- num_mod + 1
}
cc_model_list_roc_df <- bind_rows(cc_results_list_roc)
custom_col <- c("#000000", "#009E73", "#0072B2", "#D55e00", "#CC79A7")
ggplot(aes(x = fpr, y = tpr, group = model), data = cc_model_list_roc_df) +
geom_line(aes(color = model), size = 1) +
scale_color_manual(values = custom_col) +
geom_abline(intercept = 0, slope = 1, color = "gray", size = 1) +
theme_bw(base_size = 18)
##### the test_results_model do not give probabilities, as the type = "prob" is omitted
#### the predict() gives us directly predictions at a cutoff at 0.5
#### a thing to try is to create confusion matrices at different cutoffs
test_results_orig <- predict(cc_orig, newdata = cc_data_test)
confusionMatrix(test_results_orig, cc_data_test$Class)
test_results_weight <- predict(cc_weights, newdata = cc_data_test)
confusionMatrix(test_results_weight, cc_data_test$Class)
test_results_up <- predict(cc_up, newdata = cc_data_test)
confusionMatrix(test_results_up, cc_data_test$Class)
test_results_down <- predict(cc_down, newdata = cc_data_test)
confusionMatrix(test_results_down, cc_data_test$Class)
test_results_smote <- predict(cc_smote, newdata = cc_data_test)
confusionMatrix(test_results_smote, cc_data_test$Class)
#### second part - more detailed metrics
cc_calc_auprc <- function(model, data) {
index_class2 <- data$Class == "fraud"
index_class1 <- data$Class == "clean"
predictions <- predict(model, data, type = "prob")
pr.curve(predictions$fraud[index_class2],
predictions$fraud[index_class1],
curve = TRUE)
}
cc_model_list_pr <- cc_model_list %>%
map(cc_calc_auprc, data = cc_data_test)
cc_model_list_pr %>%
map(function(the_mod) the_mod$auc.integral)
cc_results_list_pr <- list(NA)
num_mod <- 1
for (the_pr in cc_model_list_pr) {
cc_results_list_pr[[num_mod]] <-
data_frame(recall = the_pr$curve[, 1],
precision = the_pr$curve[, 2],
model = names(cc_model_list_pr)[num_mod])
num_mod <- num_mod + 1
}
cc_results_df_pr <- bind_rows(cc_results_list_pr)
ggplot(aes(x = recall, y = precision, group = model), data = cc_results_df_pr) +
geom_line(aes(color = model), size = 1) +
scale_color_manual(values = custom_col) +
geom_abline(intercept = sum(cc_data_test$Class == "fraud")/nrow(cc_data_test),slope = 0, color = "gray", size = 1)
cc_auprcSummary <- function(data, lev = NULL, model = NULL){
index_class2 <- data$obs == "fraud"
index_class1 <- data$obs == "clean"
the_curve <- pr.curve(data$fraud[index_class2],
data$fraud[index_class1],
curve = FALSE)
out <- the_curve$auc.integral
names(out) <- "AUPRC"
out
}
#Re-initialize control function to remove smote and
# include our new summary function
ctrl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 5,
summaryFunction = auprcSummary,
classProbs = TRUE,
seeds = orig_fit$control$seeds)
orig_pr <- train(Class ~ .,
data = cc_data_train,
method = "gbm",
verbose = FALSE,
metric = "AUPRC",
trControl = ctrl_ccard)
# Get results for auprc on the test set
orig_fit_test <- orig_fit %>%
calc_auprc(data = imbal_test) %>%
(function(the_mod) the_mod$auc.integral)
orig_pr_test <- orig_pr %>%
calc_auprc(data = imbal_test) %>%
(function(the_mod) the_mod$auc.integral)
# The test errors are the same
identical(orig_fit_test,
orig_pr_test)
## [1] TRUE
# Because both chose the same
# hyperparameter combination
identical(orig_fit$bestTune,
orig_pr$bestTune)
|
eea2ea6f76069f0a63218d4d68fa5991f45ec109
|
766555a2ce29b4c79602bc10b404782ddeaf1eb5
|
/man/tsal-boot.Rd
|
d1c398b4220f813198b5dfbaeb889ef781b92e0e
|
[] |
no_license
|
cran/tsallisqexp
|
2b5c5356d70b23684b096c94ec066eeb375cc876
|
70af02c7d313e10da43ba9a03a402d6ee5aeec5d
|
refs/heads/master
| 2021-07-16T17:51:53.501020
| 2021-02-10T04:50:02
| 2021-02-10T04:50:02
| 32,381,379
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,886
|
rd
|
tsal-boot.Rd
|
\name{tsal.boot}
\alias{tsal.boot}
\alias{tsal.bootstrap.errors}
\alias{tsal.total.magnitude}
\title{Bootstraps methods for Tsallis Distributions}
\description{
Bootstrap functions.
}
\usage{
tsal.bootstrap.errors(dist=NULL, reps=500, confidence=0.95,
n=if(is.null(dist)) 1 else dist$n,
shape=if(is.null(dist)) 1 else dist$shape,
scale=if(is.null(dist)) 1 else dist$scale,
q = if(is.null(dist)) tsal.q.from.shape(shape) else dist$q,
kappa = if(is.null(dist)) tsal.kappa.from.ss(shape,scale) else dist$kappa,
method = if(is.null(dist)) "mle.equation" else dist$method,
xmin = if(is.null(dist)) 0 else dist$xmin)
tsal.total.magnitude(dist=NULL, n=if(is.null(dist)) 1 else dist$n,
shape=if(is.null(dist)) 1 else dist$shape,
scale=if(is.null(dist)) 1 else dist$scale,
q = if(is.null(dist)) tsal.q.from.shape(shape) else dist$q,
kappa = if(is.null(dist)) tsal.kappa.from.ss(shape,scale) else dist$kappa,
xmin = if(is.null(dist)) 0 else dist$xmin,
mult = 1)
}
\arguments{
\item{dist}{distribution (as a list of the sort produced by tsal.fit)}
\item{reps}{number of bootstrap replicates.}
\item{confidence}{confidence level for confidence intervals.}
\item{n}{original sample size.}
\item{shape, q}{shape parameters (over-riding those of the distribution, if
one was given).}
\item{scale, kappa}{scale parameters (over-riding those of the distribution, if
one was given).}
\item{method}{fitting method (over-riding that used in the original
fit, if one was given), see \code{\link{tsal.fit}}.}
\item{xmin}{minimum x-value (left-censoring threshold).}
\item{mult}{multiplier of size (if the base units of the data are not real units).}
}
\details{
\code{tsal.bootstrap.errors} finds biases and standard errors for parameter estimates by parametric
bootstrapping, and simple confidence intervals
Simulate, many times, drawing samples from the estimated distribution, of
the same size as the original data; re-estimate the parameters on the
simulated data. The distribution of the re-estimates around the estimated
parameters is approximately the same as the distribution of the estimate
around the true parameters.
This function invokes the estimating-equation MLE, but it would be easy to modify to
use other methods.
Confidence intervals (CI) are calculated for each parameter separately, using
a simple pivotal interval (see, e.g., Wasserman, _All of Statistics_, Section
8.3). Confidence regions for combinations of parameters would be a tedious,
but straightforward, extension.
\code{tsal.total.magnitude}
estimates the total magnitude of a tail-sampled population
given that we have n samples from the tail of a distribution, i.e., only
values >= xmin were retained, provide an estimate of the total magnitude
(summed values) of the population.
Then it estimates the number of objects, observed and un-observed, as n/pr(X >= xmin)
and then multiply by the mean.
}
\value{
\code{tsal.bootstrap.errors} returns a structured list, containing the actual parameter settings used,
the estimated biases, the estimated standard errors, the lower
confidence limits, the upper confidence limits, the sample size, the
number of replicates, the confidence level, and the fitting method.
\code{tsal.total.magnitude} returns a list, giving estimated total magnitude
and estimated total population size.
}
\references{
\emph{Maximum Likelihood Estimation for q-Exponential (Tsallis) Distributions},
\url{http://bactra.org/research/tsallis-MLE/} and \url{https://arxiv.org/abs/math/0701854}.
}
\author{
Cosma Shalizi (original R code),
Christophe Dutang (R packaging)
}
\examples{
#####
# (1) fit
x <- rtsal(20, 1/2, 1/4)
tsal.loglik(x, 1/2, 1/4)
tsal.fit(x, method="mle.equation")
tsal.fit(x, method="mle.direct")
tsal.fit(x, method="leastsquares")
}
\keyword{distribution}
|
782116abf4acb3df89569f4b7851f5f5a3c44422
|
495408837d7bc870a07c9f59a66900959ae3bed5
|
/test-triang.R
|
b1314ef2975fdef1b34a7ee48253c2e899f5a77c
|
[] |
no_license
|
bogdanoancea/pop_test
|
5099204970c5f7082570370115f26eca94513d0f
|
05a5ffe9ecad01d495273dd457c5efe3f557c6c7
|
refs/heads/master
| 2021-09-13T22:33:03.608104
| 2018-05-05T09:47:25
| 2018-05-05T09:47:25
| 119,249,914
| 0
| 0
| null | 2018-01-28T19:42:39
| 2018-01-28T10:52:43
|
R
|
UTF-8
|
R
| false
| false
| 2,772
|
r
|
test-triang.R
|
library(rbenchmark)
library(pestim)
dtriang2 <- function(x, xMin, xMax, xMode){
if (any(xMin > xMax)) stop('xMax must be greater than xMin.')
if (!(all(xMode >= xMin) & all(xMode <= xMax))) stop('xMode must be between xMin and xMax.')
n <- length(x)
if (length(xMin) == 1) xMin <- rep(xMin, n)
if (length(xMax) == 1) xMax <- rep(xMax, n)
if (length(xMode) == 1) xMode <- rep(xMode, n)
output <- x
output[x <= xMin | x >= xMax] <- 0
range1 <- (x > xMin & x <= xMode)
output[range1] <- (2 * (x[range1] - xMin[range1])) / ((xMax[range1] - xMin[range1]) * (xMode[range1] - xMin[range1]))
range2 <- (x >= xMode & x < xMax)
output[range2] <- (2 * (xMax[range2] - x[range2])) / ((xMax[range2] - xMin[range2]) * (xMax[range2] - xMode[range2]))
return(output)
}
ptriang2 <- function(q, xMin, xMax, xMode){
if (any(xMin > xMax)) stop('xMax must be greater than xMin.')
if (!(all(xMode >= xMin) & all(xMode <= xMax))) stop('xMode must be between xMin and xMax.')
n <- length(q)
if (length(xMin) == 1) xMin <- rep(xMin, n)
if (length(xMax) == 1) xMax <- rep(xMax, n)
if (length(xMode) == 1) xMode <- rep(xMode, n)
output <- q
output[q <= xMin] <- 0
output[q >= xMax] <- 1
range1 <- (q > xMin & q <= xMode)
output[range1] <- ((output[range1] - xMin[range1])^2) / ((xMax[range1] - xMin[range1]) * (xMode[range1] - xMin[range1]))
range2 <- (q > xMode & q < xMax)
output[range2] <- 1 - ((output[range2] - xMax[range2])^2) / ((xMax[range2] - xMin[range2]) * (xMax[range2] - xMode[range2]))
return(output)
}
rtriang2 <- function(n, xMin, xMax, xMode){
if (any(xMin > xMax)) stop('xMax must be greater than xMin.')
if (!(all(xMode >= xMin) & all(xMode <= xMax))) stop('xMode must be between xMin and xMax.')
u <- runif(n)
mc <- match.call()
mc[[1L]] <- qtriang2
mc[['n']] <- NULL
mc[['q']] <- u
output <- eval(mc)
return(output)
}
qtriang2 <- function(q, xMin, xMax, xMode){
if (any(xMin > xMax)) stop('xMax must be greater than xMin.')
if (!(all(xMode >= xMin) & all(xMode <= xMax))) stop('xMode must be between xMin and xMax.')
n <- length(q)
output <- q
range1 <- (q < (xMode - xMin) / (xMax - xMin))
output[range1] <- xMin + sqrt(q[range1] * (xMax - xMin) * (xMode - xMin))
range2 <- ( q > (xMode - xMin) / (xMax - xMin))
output[range2] <- xMax - sqrt((1 - q[range2]) * (xMax - xMin) * (xMax - xMode))
return(output)
}
x<-function() {
set.seed(1)
return (pestim::rtriang(1e6, 0, 3, 1))
}
y<-function() {
set.seed(1)
return (rtriang2(1e6, 0, 3, 1))
}
stopifnot(identical(x(),y()))
set.seed(1)
hist(pestim::rtriang(1e10, 0, 3, 1))
#set.seed(1)
rtriang2(1e6, 0, 3, 1)
res <- benchmark(pestim::rtriang(1e6, 0, 3, 1), rtriang2(1e6, 0, 3, 1), order="relative")
res[, 1:4]
|
bc8b80e5bd18d04ec7765796963f7a3e8bf07349
|
67be68eee5fa348fbff1cf5c435d7347a255a677
|
/cachematrix.R
|
fdc774c1a207d9d415b1fca31eacd7bc58cd6c1e
|
[] |
no_license
|
gvtorres/ProgrammingAssignment2
|
de2aba6533855af07878f28e32396c4972ac6594
|
2794df7d04387366972ac5af31d072a26ccb9e5a
|
refs/heads/master
| 2021-01-22T09:27:42.936575
| 2015-04-23T11:33:25
| 2015-04-23T11:33:25
| 34,422,763
| 0
| 0
| null | 2015-04-22T23:55:52
| 2015-04-22T23:55:51
| null |
UTF-8
|
R
| false
| false
| 965
|
r
|
cachematrix.R
|
# This R file contains the functions corresponding to Program Assingment
# 02 from the R Programming course
# The maing goal is to learn about the <<- operator
# This function creates a special kind of matrix that can
# cache its inverse and call it in order to avoid unecessary
# computations
makeCacheMatrix <- function(x = matrix())
{
i <- NULL
set <- function(y)
{
x <<- y
i <<- NULL
}
get <- function() x
setInv <- function(solve) i <<- solve
getInv <- function() i
list(set = set, get = get,
setInv = setInv, getInv = getInv)
}
## This function actually computes the matrix inverse
cacheSolve <- function(x,...)
{
i <- x$getInv()
if(!is.null(i)) # If the inverse matrix already exists
{
message("getting cached data")
return(i) # Call it
}
data <- x$get()
i <- solve(data, ...)
x$setInv(i)
i
}
|
87390786826cd3d3c4c641b7a662c07a7ff1839c
|
6f41667e796a7a27e6d73707ae781caf28c581ff
|
/cachematrix.R
|
4e0090b6f556fafb8261504876bd369b23e1929c
|
[] |
no_license
|
Laffite-Buffon/ProgrammingAssignment2
|
ade5ae9da65a85d3a1c85ef3a89ce84626d963ca
|
67fdd4ce84225a5216707f8dab7ca5785f2edeeb
|
refs/heads/master
| 2021-01-18T14:53:55.193698
| 2015-10-25T20:24:35
| 2015-10-25T20:24:35
| 44,774,557
| 0
| 0
| null | 2015-10-22T21:33:09
| 2015-10-22T21:33:09
| null |
UTF-8
|
R
| false
| false
| 2,634
|
r
|
cachematrix.R
|
## makeCacheMatrix is a function that creates a special function with a
## characteristics to be able to return a solved matrix obtaining it the
## from the cache. To do that, after must apply the cachesolve function.
## the first function has several parts:
## Firstly, It assign a value NULL for "slv".
## Secondly, create a function named "set", who assign the "y" to "x" (possibility
## of set a new value to the vector) and NULL to slv, not only in the
## enclosing enviroment but also the current enviroment.
## The third part create "get" function, who give you the "x" value.
## The fourth part "set.slv", create a function to fix solve to "slv" value in
## the current enviroment.
## The set.slv function give you the "slv" value assigned by set.slv.
## The last function creates a list with all the functions with the same name for
## every one. So, you can extract the function or the value of the list with
## "$" and the name.
makeCacheMatrix <- function(x = matrix()) {
slv <- NULL
set <- function(y) {
x <<- y
slv <<- NULL
}
get <- function() x
set.slv <- function(solve) {
slv <<- solve
}
get.slv <- function() {
slv
}
list(set = set, get = get,
set.slv = set.slv,
get.slv = get.slv)
}
## The cacheSolve function solves the matrix named "x".
## Firstly, it is assigned the function x$get.slv() to slv. This value is NULL
## if solved matrix has not been assigned to set.slv.
## After, it applies an "if". If slv is no NULL (a cacheSolve has been called
## before) return the "slv" value and the message "getting cached data".
## if not, assign the function x$get() (the value of the x matrix) to "data".
## It is applied "solve function" to data and assign it to "slv".
## It is applies the x$set.slv function to slv (the matrix solved) and it is
## fixed in current enviroment, so the x$get() is not NULL (and slv is not
## NULL, therefor the loop if will act the next time).
## "slv" value (solved matrix) is given.
cacheSolve <- function(x, ...) {
slv <- x$get.slv()
if(!is.null(slv)) {
message("getting cached data")
return(slv)
}
data <- x$get()
slv <- solve(data, ...)
x$set.slv(slv)
slv
}
## examples
## m <- matrix(rnorm(16), 4, 4)
## hilbert <- function(n) { i <- 1:n; 1 / outer(i - 1, i, "+") }
## h8 <- hilbert(8); h8
## #"an inner example" sh8 <- solve(h8)
## #"an inner example" round(sh8 %*% h8, 3)
## execution:
## hilbert <- function(n) { i <- 1:n; 1 / outer(i - 1, i, "+") }
## h8 <- hilbert(8); h8
## t_matrix <- makeCacheMatrix(h8)
## cacheSolve(t_matrix)
## m <- matrix(rnorm(16), 4, 4)
## m_matrix <- makeCacheMatrix(x)
## cacheSolve(m_matrix)
|
43bc00dea681ecf1b941a736aa8295403ecc4220
|
4dbfcd501cf1acd63a8f956a40c90993aef00ce3
|
/R/All_new_functions.r
|
7beaa552bab46a5471820d0611da2f9453f46c22
|
[
"MIT"
] |
permissive
|
changwn/ICTD
|
11a21902439f534d364bf518aef510d8814133cf
|
acb0d5c2c859b4c756e1ff50e6624046a2f68d36
|
refs/heads/master
| 2021-06-24T14:06:40.249973
| 2021-03-30T22:44:39
| 2021-03-30T22:44:39
| 224,000,547
| 3
| 0
| null | 2019-11-25T17:06:23
| 2019-11-25T17:06:22
| null |
UTF-8
|
R
| false
| false
| 14,435
|
r
|
All_new_functions.r
|
MRHCA_IM_compute_full_pub_new<-function(data_CORS_cancer,list_c,IM_id_list,immune_cell_uni_table=marker_stats20_uni,step_size0=20)
{
MR_M<-list_c[[1]]
cor_c<-list_c[[2]]
down_lim<-list_c[[3]]
print("Compute MR IM genes")
MR_IM_result_c<-list()
print(nrow(MR_M))
step_size<-step_size0
IM_id_list0<-IM_id_list
MR_IM_result_c<-c()
for(ii in 1:nrow(list_c[[1]]))
{
tg_gene<-rownames(MR_M)[ii]
x0<-sqrt(sort(MR_M[tg_gene,]))
tg_growth_rate<-calculate_growth_rate2(x0,step=step_size)
tg_ccc1<-which(tg_growth_rate<down_lim)
aaa<-sqrt(sort(MR_M[tg_gene,]))
bbb3<-aaa/(1:length(aaa))
bbb4<-cor_c[tg_gene,names(sort(MR_M[tg_gene,]))]
ddd<-cbind(1:length(tg_growth_rate),tg_growth_rate,bbb3,bbb4)
colnames(ddd)[1:4]<-c("MR_order_ID","growth_rate","sorted_MR","Corr")
tg_ccc3<-intersect(tg_ccc1,which(bbb4>0.7))
fff<-""
if(length(tg_ccc3)>1)
{
fff<-as.matrix(ddd[1:max(tg_ccc3),])
}
if(ii%%500==1)
{
print(ii)
}
MR_IM_result_c[[ii]]<-list(fff,tg_ccc3)
}
names(MR_IM_result_c)<-rownames(MR_M)[1:length(MR_IM_result_c)]
return(MR_IM_result_c)
}
clean_rank1_module_new<-function(data_c,module_info,module_rank,st0=8,RR=50)
{
N<-0
nc<-c()
module_new<-list()
for(i in 1:length(module_info))
{
if(module_rank[i]==1)
{
N<-N+1
nc<-c(nc,names(module_info)[i])
module_new[[N]]<-module_info[[i]]
}
if(module_rank[i]>1)
{
ccc<-module_info[[i]]
st<-st0
rr<-1
while((rr==1)&(st<=length(ccc)))
{
tg_genes<-c(ccc[1:st])
pp<-BCV_ttest2(data_c[tg_genes,],rounds=RR,maxrank0=5)
rr<-sum(pp<0.001)
st<-st+1
}
tg_genes<-tg_genes[-length(tg_genes)]
if(length(tg_genes)>st0)
{
N<-N+1
nc<-c(nc,names(module_info)[i])
module_new[[N]]<-tg_genes
}
}
}
names(module_new)<-nc
return(module_new)
}
R1_list_filtering_step1_new<-function(list_c2,data_CORS_cancer,max_cut=20,cutn0=20,cut10=0.8,IM_id_list,immune_cell_uni_table=immune_cell_uni_table0_GPL570)
{
tg_1_rank_markers0<-list_c2[[1]]
tg_m_names0<-list_c2[[2]]
RMSE_on_CORS_cancer_c<-RMSE_row(data_CORS_cancer)
tg_marker_lists<-list()
for(i in 1:length(list_c2[[1]]))
{
ccc<-c(names(list_c2[[1]])[i],rownames(list_c2[[1]][[i]]))
tg_marker_lists[[i]]<-ccc
}
names(tg_marker_lists)<-names(list_c2[[1]])
pp_all<-c()
for(i in 1:length(tg_marker_lists))
{
pp<-sum(BCV_ttest2(data_CORS_cancer[tg_marker_lists[[i]],],maxrank0=20)<0.001, na.rm=T)
pp_all<-c(pp_all,pp)
}
pp_R1_marker_list_f1<-clean_rank1_module_new(data_CORS_cancer,tg_marker_lists,pp_all,st0=6)
pp_R1_marker_list_f1.5<-cut_modules(pp_R1_marker_list_f1,cutn=cutn0)
stat_p1<-list()
for(i in 1:length(pp_R1_marker_list_f1.5))
{
tg_gene_c<-pp_R1_marker_list_f1.5[[i]][-1]
stat_p1[[i]]<-tg_1_rank_markers0[[names(pp_R1_marker_list_f1.5)[i]]][tg_gene_c,]
}
names(tg_marker_lists)<-names(pp_R1_marker_list_f1.5)
tg_genes_all<-c()
for(i in 1:length(pp_R1_marker_list_f1.5))
{
tg_genes_all<-c(tg_genes_all,pp_R1_marker_list_f1.5[[i]])
}
tg_genes_all<-unique(sort(tg_genes_all))
print("filter 1 and stat done!")
#ccc<-MAP_GPL570_genes2(R1_markers_f1)
#names(ccc)<-names(Top_cell_proportion)
#table(names(Top_cell_proportion))
R1_markers_f1<-pp_R1_marker_list_f1.5
cut1<-cut10
ccc<-compute_min_jaccard(R1_markers_f1)
ccc0<-ccc>cut1
stat_cc<-c(1:nrow(ccc0))
names(stat_cc)<-1:nrow(ccc0)
for(i in 1:nrow(ccc0))
{
for(j in 1:ncol(ccc0))
{
if((i<j)&(ccc0[i,j]>0))
{
nn<-max(i,j)
stat_cc[which(stat_cc==i)]<-nn
stat_cc[which(stat_cc==j)]<-nn
}
}
}
table(stat_cc)
tg_ccc<-unique(stat_cc)
R1_marker_list_f2<-list()
N<-0
for(i in 1:length(tg_ccc))
{
N<-N+1
tg_ids<-as.numeric(names(stat_cc)[which(stat_cc==tg_ccc[i])])
ccc<-c()
for(j in 1:length(tg_ids))
{
ccc<-c(ccc,R1_markers_f1[[tg_ids[j]]])
}
ccc<-unique(ccc)
R1_marker_list_f2[[N]]<-ccc
}
R1_marker_list_f2.5_stat<-rank_based_module_sorting(data_CORS_cancer,R1_marker_list_f2,IM_id_list,immune_cell_uni_table=immune_cell_uni_table)
R1_marker_list_f2.5<-R1_marker_list_f2.5_stat[[1]]
pp_all<-c()
for(i in 1:length(R1_marker_list_f2.5))
{
pp<-sum(BCV_ttest(data_CORS_cancer[R1_marker_list_f2.5[[i]],],maxrank0=20)<0.001, na.rm=T)
pp_all<-c(pp_all,pp)
}
pp_R1_marker_list_f3<-clean_rank1_module(data_CORS_cancer,R1_marker_list_f2.5,pp_all,st0=6)
pp_R1_marker_list_f3.5<-cut_modules(pp_R1_marker_list_f3,cutn=cutn0)
R1_marker_list_f3.5_stat<-rank_based_module_sorting(data_CORS_cancer,pp_R1_marker_list_f3.5,IM_id_list,immune_cell_uni_table=immune_cell_uni_table)
print("filter 2 done!")
ccc<-c()
nn<-c()
for(i in 1:length(pp_R1_marker_list_f3.5))
{
ccc0<-c()
for(j in 1:length(IM_id_list))
{
if(length(IM_id_list[[j]])>1)
{
cc0<-apply(immune_cell_uni_table[pp_R1_marker_list_f3.5[[i]],IM_id_list[[j]]],1,sum)/sum((1/(1:length(IM_id_list[[j]]))))
}
else
{
cc0<-immune_cell_uni_table[pp_R1_marker_list_f3.5[[i]],IM_id_list[[j]]]
}
ccc0<-cbind(ccc0,cc0)
}
colnames(ccc0)<-names(IM_id_list)
ddd<-apply(ccc0,2,mean)
ccc<-rbind(ccc,ddd)
nn<-c(nn,colnames(ccc0)[which(ddd==max(ddd))[1]])
}
rownames(ccc)<-nn
cell_enrich_stat<-ccc
names(pp_R1_marker_list_f3.5)<-nn
rrr<-rep(1,length(pp_R1_marker_list_f3.5))
Filter_1_result_list<-list(pp_R1_marker_list_f1,R1_marker_list_f2,R1_marker_list_f3.5_stat,pp_R1_marker_list_f3.5,rrr,cell_enrich_stat)
names(Filter_1_result_list)<-c("R1_marker_list_f1","R1_marker_list_f2","R1_marker_list_f3.5_stat","R1_marker_list_f3.5","R1_marker_list_rank","R1_marker_list_f3.5_cell_enrich_stat")
return(Filter_1_result_list)
}
rank_based_module_sorting<-function(data_c,tg_list,IM_id_list,immune_cell_uni_table=marker_stats20_uni)
{
tg_list_new<-list()
tg_list_stat<-list()
for(i in 1:length(tg_list))
{
rr<-svd(data_c[tg_list[[i]],])$v[,1]
if(mean(cor(t(data_c[tg_list[[i]],]),rr))<0)
{
rr<--rr
}
tg_genes_cc<-tg_list[[i]][order(-cor(t(data_c[tg_list[[i]],]),rr))]
ccc<-immune_cell_uni_table[tg_genes_cc,]^(1/2)
ccc0<-c()
for(j in 1:length(IM_id_list))
{
if(length(IM_id_list[[j]])>1)
{
xx<-sum((1/(1:length(IM_id_list[[j]])))^(1/2))
cc0<-(cumsum(apply(ccc[,IM_id_list[[j]]],1,sum))/xx/c(1:nrow(ccc)))
}
else
{
cc0<-(cumsum(ccc[,IM_id_list[[j]]])/c(1:nrow(ccc)))
}
ccc0<-cbind(ccc0,cc0)
}
colnames(ccc0)<-names(IM_id_list)
tg_list_new[[i]]<-tg_genes_cc
tg_list_stat[[i]]<-ccc0
}
return(list(tg_list_new,tg_list_stat))
}
Process_MR_IM_result_new<-function(MR_IM_result_c=MR_IM_result_c,tg_key_c=tg_key_c,cor_cut0=0.7,cell_type_enrich_cut=0.5,num_cut=5,num_cut2=5,IM_id_list,immune_cell_uni_table=immune_cell_uni_table0_GPL570)
{
tg_1_rank_markers<-list()
tg_m_names<-c()
N<-0
print("Select Marker!")
for(i in 1:length(MR_IM_result_c))
{
if(length(MR_IM_result_c[[i]][[2]])>=num_cut)
{
ss<-scoring_MR_order(MR_IM_result_c[[i]][[2]])
if(ss>=num_cut)
{
ccc<-MR_IM_result_c[[i]][[1]][1:ss,]
if(sum(ccc[,4]>cor_cut0)>=num_cut)
{
tg_ccc<-names(which(ccc[,4]>cor_cut0))
if(length(tg_ccc)>=num_cut)
{
ccc1<-ccc[order(-ccc[,4]),]
tg_gene<-rownames(ccc1)
ccc<-immune_cell_uni_table[tg_gene,]^(1/2)
ccc0<-c()
for(j in 1:length(IM_id_list))
{
if(length(IM_id_list[[j]])>1)
{
xx<-sum((1/(1:length(IM_id_list[[j]])))^(1/2))
cc0<-(cumsum(apply(ccc[,IM_id_list[[j]]],1,sum))/xx/c(1:nrow(ccc)))
}
else
{
cc0<-(cumsum(ccc[,IM_id_list[[j]]])/c(1:nrow(ccc)))
}
ccc0<-cbind(ccc0,cc0)
}
colnames(ccc0)<-names(IM_id_list)
ccc3<-cbind(ccc1,ccc0)
ddd<-apply(ccc0,2,mean)
if(max(ddd)>cell_type_enrich_cut)
{
tg_c_ids<-names(which(ddd==max(ddd)))[1]
tg_c_id0<-IM_id_list[[tg_c_ids]]
eee<-ccc[,tg_c_id0]
if(length(tg_c_id0)>1)
{
eee<-apply(ccc[,tg_c_id0],1,mean)
}
if(sum(eee>0.5)>=num_cut2)
{
N<-N+1
tg_1_rank_markers[[N]]<-ccc3[which(ccc3[,4]>cor_cut0),]
tg_m_names<-c(tg_m_names,names(MR_IM_result_c)[i])
}
}
}
}
}
}
}
print("Select Marker Done!")
#tg_RF2<-paste(tg_key_c,"_1rankmarker_cell_type_consistency.pdf",sep="")
##library(gplots)
#colors = c(0:100)/100
#my_palette <- grDevices::colorRampPalette(c("white","white", "blue"))(n =100)
#pdf(tg_RF2)
#for(i in 1:length(tg_1_rank_markers))
#{
# aaa<-tg_1_rank_markers[[i]][,-c(1:4)]
# heatmap.2(aaa,Rowv=F,Colv =F,scale="none",main=tg_m_names[i],
# col=my_palette,breaks=colors,density.info="none",dendrogram="both",
# trace="none",margin=c(10,10),cexRow=0.5,cexCol=1)
#}
#dev.off()
names(tg_1_rank_markers)<-tg_m_names
list_cc<-list(tg_1_rank_markers,tg_m_names)
return(list_cc)
}
Process_MR_IM_result_GPL570_new<-function(MR_IM_result_c=MR_IM_result_c,tg_key_c=tg_key_c,cor_cut0=0.7,cell_type_enrich_cut=0.5,num_cut=5,num_cut2=5,IM_id_list,immune_cell_uni_table=immune_cell_uni_table0_GPL570)
{
tg_1_rank_markers<-list()
tg_m_names<-c()
N<-0
print("Select Marker!")
for(i in 1:length(MR_IM_result_c))
{
if(length(MR_IM_result_c[[i]][[2]])>=num_cut)
{
ss<-scoring_MR_order(MR_IM_result_c[[i]][[2]])
if(ss>=num_cut)
{
ccc<-MR_IM_result_c[[i]][[1]][1:ss,]
if(sum(ccc[,4]>cor_cut0)>=num_cut)
{
tg_ccc<-names(which(ccc[,4]>cor_cut0))
tg_ccc2<-unique(GPL570_id_symbol0[intersect(GPL570_id_symbol[,1],c(names(MR_IM_result_c)[i],tg_ccc)),2])
if(length(tg_ccc2)>=num_cut)
{
ccc1<-ccc[order(-ccc[,4]),]
tg_gene<-rownames(ccc1)
ccc<-immune_cell_uni_table[tg_gene,]^(1/2)
ccc0<-c()
for(j in 1:length(IM_id_list))
{
if(length(IM_id_list[[j]])>1)
{
xx<-sum((1/(1:length(IM_id_list[[j]])))^(1/2))
cc0<-(cumsum(apply(ccc[,IM_id_list[[j]]],1,sum))/xx/c(1:nrow(ccc)))
}
else
{
cc0<-(cumsum(ccc[,IM_id_list[[j]]])/c(1:nrow(ccc)))
}
ccc0<-cbind(ccc0,cc0)
}
colnames(ccc0)<-names(IM_id_list)
ccc3<-cbind(ccc1,ccc0)
ddd<-apply(ccc0,2,mean)
if(max(ddd)>cell_type_enrich_cut)
{
tg_c_ids<-names(which(ddd==max(ddd)))[1]
tg_c_id0<-IM_id_list[[tg_c_ids]]
eee<-ccc[,tg_c_id0]
if(length(tg_c_id0)>1)
{
eee<-apply(ccc[,tg_c_id0],1,mean)
}
}
if(sum(eee>0.5)>=num_cut2)
{
N<-N+1
tg_1_rank_markers[[N]]<-ccc3[which(ccc3[,4]>cor_cut0),]
tg_m_names<-c(tg_m_names,names(MR_IM_result_c)[i])
}
}
}
}
}
}
print("Select Marker Done!")
#tg_RF2<-paste(tg_key_c,"_1rankmarker_cell_type_consistency.pdf",sep="")
##library(gplots)
#colors = c(0:100)/100
#my_palette <- grDevices::colorRampPalette(c("white","white", "blue"))(n =100)
#pdf(tg_RF2)
#for(i in 1:length(tg_1_rank_markers))
#{
# aaa<-tg_1_rank_markers[[i]][,-c(1:4)]
# heatmap.2(aaa,Rowv=F,Colv =F,scale="none",main=tg_m_names[i],
# col=my_palette,breaks=colors,density.info="none",dendrogram="both",
# trace="none",margin=c(10,10),cexRow=0.5,cexCol=1)
#}
#dev.off()
names(tg_1_rank_markers)<-tg_m_names
list_cc<-list(tg_1_rank_markers,tg_m_names)
return(list_cc)
}
|
658a5c4e1d7ddc86f1772334c598253e27a821d8
|
13110ac3fe1f3de135975f586e3b995ecb4588d2
|
/R/upm.R
|
cba0699a836e4491b8e88121fcc2e32ef4fce15e
|
[] |
no_license
|
biostata/tpidesigns
|
e933b32cd99cc522e9afdbdbf09210e1cc5e439b
|
215a886f48d0dc7dd3ebd838e3f32fa1e1c73fa1
|
refs/heads/master
| 2022-03-15T23:15:50.532759
| 2019-12-04T04:07:13
| 2019-12-04T04:07:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,775
|
r
|
upm.R
|
#' Calculation of Unit Probability Mass
#'
#' \code{UPM} calculates Unit Probability Mass for an interval (a, b) when the Underlying distribution is beta or mixture of two beta distributions.
#' @import stats
#' @param a,b Range Parameters between which UPM is needed to be calculated.
#' @inheritParams weights_formulate
#'
#' @details
#' Unit Probability MASS or UPM(a,b) = \eqn{(F(b) - F(a))/(b - a)}, defined for an interval (a,b), when X~F().
#' In this function, F() is assumed to be Cumulative Beta distribution function or mixture of two cumulative Beta distribution functions.
#' @details
#' Hence, \eqn{F(x) = w * pbeta(x, a1, b1) + (1 - w) * pbeta(x, a2, b2)}, pbeta is cumulative Beta distribution.
#' @details
#' If F() consists of a single Beta distribution, and not a mixture, then the convention here assumed is
#' to input \eqn{w = 1} and a1, b1 , or \eqn{w = 0} and a2,b2
#' @return Unit Probability Mass value or the UPM value for the interval (a, b)
#' @seealso
#' \code{\link{weights_formulate}}, \code{\link[stats]{Beta}}
#' @export
#'
#' @examples UPM(w = 1, a = 0.3, b = 0.4, a1 = 2, b1 = 5)
#' @examples UPM(w = 0, a = 0.3, b = 0.4, a2 = 2, b2 = 5)
#' @examples UPM(w = 0.3, a = 0.3, b = 0.4, a1 = 3, b1 = 6, a2 = 2, b2 = 5)
#' @examples UPM(w = 1, a = 0.3, b = 0.4, a1 = 2, b1 = 5, a2 = 7, b2 = 8) #will give warning
UPM <- function(w, a = 0, b = 1, a1 = NULL, b1 = NULL, a2 = NULL, b2 = NULL)
{
#Checking if the weight value is at most 1 or at least 0
if(isTRUE(w < 0 || w > 1))
{
stop("w is weight taken on first prior (informative), which can lie between 0 and 1")
}
#Checking the feasibility of the domain
if(isTRUE(a < 0))
{
a = 0
warning("Domain of Beta distribution is (0,1), changing a to 0")
}
if(isTRUE(b > 1))
{
b = 1
warning("Domain of Beta distribution is (0,1), changing a to 1")
}
if(isTRUE(a >= b)) stop("a must be less than b and both should lie within (0,1")
#Checking feasibility condition of prior parameters
a1_null = is.null(a1)
b1_null = is.null(b1)
a2_null = is.null(a2)
b2_null = is.null(b2)
total_null = a1_null + b1_null + a2_null + b2_null
if (isTRUE(total_null == 4))
{
stop("Please input a1, a2, b1, b2 properly. ")
}
#Checking the over toxicity of the dose
if(w %in% c(0, 1))
{
if (isTRUE(total_null == 2))
{
if(isTRUE((a2_null + b2_null) == 1))
{
stop("Please input either both a1 and b1, or both a2 and b2, (ai,bi) is the pair of parameters. For Uniform Distribution, either put a1 = 1, b1 = 1, or put, a2 = 1 and b2 = 1")
}
else if (isTRUE((a2_null + b2_null) == 0))
{
a1 = a2
b1 = b2
warning("You should put the parameter values for a1 and b1 instead of a2 and b2")
}
}
else if (total_null %in% c(1,3))
{
stop("Please input a1, b1, a2, b2 properly, (ai,bi) is the pair of parameters. For Uniform Distribution, either put a1 = 1, b1 = 1, or put, a2 = 1 and b2 = 1")
}
else if (isTRUE(total_null == 0))
{
warning("Check inputs for prior parameters, taking a1 and b1 as original parameters")
}
}
else
{
if (isTRUE(total_null > 0))
{
stop("Please input model parameters for both priors properly")
}
}
#calculating the value
if (w %in% c(0,1))
{
val = (pbeta(b, shape1 = a1, shape2 = b1) - pbeta(a, shape1 = a1, shape2 = b1)) / (b - a)
}
else
{
val = w * ((pbeta(b, shape1 = a1, shape2 = b1) - pbeta(a, shape1 = a1, shape2 = b1)) / (b - a)) +
(1 - w) * ((pbeta(b, shape1 = a2, shape2 = b2) - pbeta(a, shape1 = a2, shape2 = b2)) / (b - a))
}
return(val)
}
#' Graphical plot of Unit Probability MASS
#'
#' \code{upmplot} Produces a graphical plot of Unit Probability Mass for a given set of parameters.
#' @import ggplot2
#' @inheritParams weights_formulate
#' @param pt Target toxicity proportion to achieve in current Dose Level (Less Toxicity means under- dosing, where as more toxicity means over - dosing)
#' @param e1 Amount of variation that can be allowed to the left of the pt value to conclude that target toxicity has been achieved.
#' Default value is 0.05. This means, that if a Posterior Toxicity (DLT) mean takes a value within the range (pt - e1, pt), toxicity for the cohort (of size >= 3) will be achieved.
#' @param e2 Amount of variation that can be allowed to the right of the pt value to conclude that target toxicity has been achieved.
#' Default value is 0.05. This means, that if a Posterior Toxicity (DLT) mean takes a value within the range (pt, pt + e2), toxicity for the cohort (of size >= 3) will be achieved.
#' @param design The Design that is implemented in the trials. This arguement includes values "mtpi" and "mmtpi"
#'
#' @return A graph that includes Probability Distributions of the Dose Limiting Toxocity Rate and value of Unit Probability Mass at corresponding intervals.
#' @inherit UPM details
#' @section Decision Making Based on UPM values:
#' For modified Toxicity Probability Interval (mTPI) Design, the toxicity range (0,1) is divided into
#' three ranges, (1) Under-Dosing Interval [0, pt - e1), (2) Target-Toxicity Interval [pt - e1, pt - e2], (3) Over-Dosing Interval (pt + e2, 1].
#' UPM is calculated for the the above intervals and Decision is taking accordingly,\cr if the UPM is maximum for interval (1),
#' then the strength of the current Dosage is escalated,\cr if its maximum for Interval (2), then more patients are administered with
#' current dose,\cr if the UPM is maximum in interval (3), then strength of the current Dose is de-escalated.\cr For Modified Toxicity Interval Design-2 (mTPI -2, encoded as "mmtpi")
#' the intervals (1) and (3) are again divided into another sub- intervals and same steps are followed.\cr But, before that, we must ensure that the Dose is not severely toxic
#' and hence it is advised to run the \code{\link{decisiontpi}} function to know about the severity of current Dose.The graphical display will be meaningful only if \code{\link{decisiontpi}} does not return the value "DU"
#' @seealso
#' \code{\link{UPM}}, \code{\link{weights_formulate}}
#' @export
#'
#' @examples require(ggplot2)
#' @examples n = 13 #must be a value >= 3
#' @examples x = sample.int(n, 1)
#' @examples upmplot(x = 5, n = 7, pt = 0.3, design = "mmtpi", w = 0.1, a1 = 1, a2 = 1, b1 = 4, b2 = 6)
upmplot <- function(x , n , pt, e1 = 0.05, e2 = 0.05, design = c("mtpi", "mmtpi"), w, a1 = NULL, b1 = NULL, a2 = NULL, b2 = NULL)
{
if(isTRUE(pt > 1 || pt < 0))
{
stop("Target toxicity Probability should take values between 0 and 1")
}
if(isTRUE(pt - e1 < 0 || pt + e2 > 1))
{
stop ("e1 and e2, two thresholds should be small compared to the target probability pt")
}
if (isTRUE(w > 1))
{
stop("Weight on informative prior can be at most 1")
}
else if (isTRUE(w < 0))
{
stop("Weight on a prior can not be negative")
}
#Checking the eligibility of the parameters
if (isTRUE(any(c(a1, b1, a2, b2) <= 0) == TRUE))
{
stop("Beta parameters must be non-negative")
}
#Checking the number of events happened is less than total number of trials
if (isTRUE(n < 1))
{
stop("The trial size must be at least 1")
}
if(isTRUE(x > n))
{
stop("Number of successes for the event (i.e. experiencing DLT 's) must be lower than total number of trials (i.e. patients treated)")
}
#Checking feasibility condition of prior parameters
a1_null = is.null(a1)
b1_null = is.null(b1)
a2_null = is.null(a2)
b2_null = is.null(b2)
total_null = a1_null + b1_null + a2_null + b2_null
if (isTRUE(total_null == 4))
{
stop("Please input a1, a2, b1, b2 properly. ")
}
if(w %in% c(0, 1))
{
if (isTRUE(total_null == 2))
{
if(isTRUE((a2_null + b2_null) == 1))
{
stop("Please input either both a1 and b1, or both a2 and b2, (ai,bi) is the pair of parameters. For Uniform Distribution, either put a1 = 1, b1 = 1, or put, a2 = 1 and b2 = 1")
}
else if (isTRUE((a2_null + b2_null) == 0))
{
a1 = a2
b1 = b2
warning("You should put the parameter values for a1 and b1 instead of a2 and b2")
}
}
else if (total_null %in% c(1,3))
{
stop("Please input a1, b1, a2, b2 properly, (ai,bi) is the pair of parameters. For Uniform Distribution, either put a1 = 1, b1 = 1, or put, a2 = 1 and b2 = 1")
}
else if (isTRUE(total_null == 0))
{
warning("Check inputs for prior parameters, taking a1 and b1 as original parameters")
}
}
else
{
if (isTRUE(total_null > 0) )
{
stop("Please input model parameters for both priors properly")
}
}
if (design == "mtpi")
{
interval = c(0, pt - e1, pt + e2, 1)
length_interval = length(interval)
}
else if (design %in% c("mtpi", "mmtpi"))
{
breaks_lower = floor((pt - e1) / 0.1)
breaks_upper = floor((1 - pt - e2) / 0.1)
interval = c(0, pt - e1 - 0.1 * (breaks_lower : 0) , pt + e2 + 0.1 * (0 : breaks_upper) , 1)
length_interval = length(interval)
}
else
{
stop("Please input one input among the designs: mtpi, mmtpi")
}
params = weights_formulate(w = w, x = x, n = n, a1 = a1, b1 = b1, a2 = a2, b2 = b2)
w = params$weight
a1 = params$param_inform[1]
b1 = params$param_inform[2]
a2 = params$param_noninform[1]
b2 = params$param_noninform[2]
upm_array= rep(0, length_interval - 1)
for(i in 1: (length_interval - 1))
{
upm_array[i] = UPM(w = w, a = interval [i], b = interval [i + 1], a1 = a1, b1 = b1, a2 = a2, b2 = b2)
}
if (w %in% c(0,1))
{
plotupm = ggplot(data.frame(x=seq(0.01,1,0.01)), aes(x)) +
stat_function(fun=function(x) dbeta(x, shape1 = a1, shape2 = b1))
}
else
{
plotupm = ggplot(data.frame(x=seq(0.01,1,0.01)), aes(x)) +
stat_function(fun=function(x) w * dbeta(x, shape1 = a1, shape2 = b1) + (1 - w) * dbeta(x, shape1 = a2, shape2 = b2))
}
plotupm_addY <- plotupm + geom_vline(xintercept = interval, linetype="dashed", color = "steelblue", size = 0.7)
segment_x <- interval[-length_interval]
segment_xend <- interval[-1]
segment_y <- segment_yend <- upm_array
segment_data <- data.frame(x = segment_x, y = segment_y, x_end = segment_xend,y_end = segment_yend)
plotupm_addsegments <- plotupm_addY +
geom_segment(data = segment_data, mapping = aes(x = x, xend = segment_data$x_end, y = segment_data$y, yend = segment_data$y_end))
plotupm_addfootnote <- plotupm_addsegments +
labs(title = " Plotting of UPM values and Posterior DLT distribution", x = "DLT Rate", y = "Unit Probability Mass (UPM)",
caption = "The Dashed lines represent the intervals and the Horizontal lines represent the UPM")
return(plotupm_addfootnote)
}
|
dc42810662989a31a39f0af384e1632d39fb4ec5
|
96ddc8c398f162250ebaef2ab76436ab5a60c3bb
|
/pre_process/optimize.R
|
c4d1c360d7ea90860ed72f791473986f9d473407
|
[] |
no_license
|
kylemonper/EDF-forest-mgmt
|
670ebb91547420d5d1379ea8daabdeb92709cb89
|
216d23f78648eac2c97a716b01d1e7c1514435cb
|
refs/heads/master
| 2022-12-25T10:05:10.522874
| 2020-09-30T16:05:18
| 2020-09-30T16:05:18
| 272,543,764
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,353
|
r
|
optimize.R
|
#####################
##### Optimize #####
###################
library(tidyverse)
### final step:
## we now have final discounted values for each package for this plot, now select the package with the lowest CPU
relative_carb <- read_csv("output_data/relative_carb_og_05.csv")
price <- 200
## new method for selecting optimal (based on value of carbon)
optimal <- relative_carb %>%
filter(total_carbon > 0 & rxpackage != "031") %>%
mutate(value = (price * total_carbon) - total_cost) %>%
group_by(ID) %>%
filter(value > 0 &
value == max(value))
opt_tie_break <- optimal %>%
group_by(ID) %>%
sample_n(1) %>%
ungroup()
optimal_noCC <- relative_carb %>%
filter(total_carbon > 0 ) %>%
filter(!rxpackage %in% c("032", "033")) %>%
mutate(value = (price * total_carbon) - total_cost) %>%
group_by(ID) %>%
filter(value > 0 &
value == max(value))
opt_tie_break_nocc <- optimal_noCC %>%
group_by(ID) %>%
sample_n(1) %>%
ungroup()
###################################
############ RESULTS ##############
###################################
###### MCC #######
## get cumsum
cumsum <- opt_tie_break %>%
arrange(cpu) %>%
filter(cpu > -100 & cpu < 200) %>%
mutate(cumsum_carb = cumsum(total_carbon))
cumsum_noCC <- opt_tie_break_nocc %>%
arrange(cpu) %>%
filter(cpu > -100 & cpu < 200) %>%
mutate(cumsum_carb = cumsum(total_carbon))
library(scales) # for comma in x axis
ggplot(cumsum, aes(cumsum_carb, cpu)) +
geom_point(aes(color = cpu)) +
scale_colour_gradient2(low = "forestgreen", mid = "yellow", high = "red", midpoint = 50) +
scale_x_continuous(limits = c(0, 60000000),label=comma) +
scale_y_continuous(limits = c(-150,220), expand = c(0,0)) +
theme_minimal(base_size = 24) +
theme(legend.position = "none") +
labs(
x = "Tons of Carbon",
y = "$/Ton of Carbon",
title = "MCC (w/ CC)"
)
ggplot(cumsum_noCC, aes(cumsum_carb, cpu)) +
geom_point(aes(color = cpu)) +
scale_colour_gradient2(low = "forestgreen", mid = "yellow", high = "red", midpoint = 50) +
scale_x_continuous(limits = c(0, 60000000),label=comma) +
scale_y_continuous(limits = c(-150,220), expand = c(0,0)) +
theme_minimal(base_size = 24) +
theme(legend.position = "none") +
labs(
x = "Tons of Carbon",
y = "$/Ton of Carbon",
title = "MCC (w/o CC)"
)
### repeat for using rev
optimal_rev <- relative_carb %>%
filter(total_carbon > 0 & !rxpackage %in% c("031", "032", "033")) %>%
mutate(value = (price * total_carbon) - (total_cost - total_val)) %>%
group_by(ID) %>%
filter(value > 0 &
value == max(value))
opt_tie_break_rev <- optimal_rev %>%
group_by(ID) %>%
sample_n(1) %>%
ungroup()
cumsum_rev <- opt_tie_break_rev %>%
arrange(cpu_rev) %>%
mutate(cumsum_rev = cumsum(total_carbon))
test <- opt_tie_break_rev %>%
filter(owngrpcd == 40)
ggplot(cumsum_rev, aes(cumsum_rev, cpu_rev)) +
geom_point() +
#scale_colour_gradient2(low = "forestgreen", mid = "yellow", high = "red") +
scale_x_continuous(limits = c(-1000, 95000000), expand = c(0,0),label=comma) +
scale_y_continuous(limits = c(-1200,400), expand = c(0,0)) +
geom_hline(yintercept = 0) +
theme_minimal(base_size = 24) +
theme(legend.position = "none") +
labs(
x = "Tons of Carbon",
y = "$/Ton of Carbon"
)
|
68d6a75ccc888c69955546a70763f009352547a3
|
0b4a31d79b6a5258c7e681a74a6b13c71576f853
|
/notebooks/CoxRegression.R
|
b8cfe95515a01fab870f1b8a022846d84300304d
|
[] |
no_license
|
liacov/OPTproj
|
10c7c6f3ad324f1ad3a39066d733c6578376b433
|
a5c06347a454e0c206125d146ab43d17bffa003a
|
refs/heads/master
| 2023-03-30T13:09:43.668082
| 2021-04-08T09:22:54
| 2021-04-08T09:22:54
| 265,876,095
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,441
|
r
|
CoxRegression.R
|
# set directory to where the RNA and Clinical folders are
setwd("/Users/federicomatteo/Downloads/")
library(survival)
# read RNA file
rna <- read.table('RNA/KIRC.rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes_normalized__data.data.txt',nrows=20533, header=T,row.names=1,sep='\t')
# and take off first row cause we don't need it
rna <- rna[-1,]
# and read the Clinical file, in this case i transposed it to keep the clinical feature title as column name
clinical <- t(read.table('/Users/federicomatteo/Desktop/OPTproj/Data/Clinical/KIRC.merged_only_clinical_clin_format.txt', header=T, row.names=1, sep='\t'))
clinical = as.data.frame(clinical)
names(df)
# first remove genes whose expression is <= 275.735 in more than 50% of the samples:
rem <- function(x){
x <- as.matrix(x)
x <- t(apply(x, 1, as.numeric))
r <- as.numeric(apply(x, 1, function(i) sum(i <= 275.735)))
remove <- which(r > dim(x)[2]*0.5)
return(remove)
}
# sum of the gene smaller than 275.735 gives a dimension d = 9375 (paper = 9376)
remove <- rem(rna)
rna <- rna[-remove,]
dim(rna)
table(substr(colnames(rna),14,14))
dim(rna)
write.table(rna, "/Users/federicomatteo/Desktop/OPTproj/Data/mydata.txt", sep=";")
n_index <- which(substr(colnames(new), 14, 14) == '1')
t_index <- which(substr(colnames(new), 14, 14) == '0')
########
# check which patients of rna are in clinical (done in python at the end)
sapply(rownames(rna), function(x) unlist(strsplit(x,'\\|'))[[1]])
colnames(rna) <- gsub('\\.','-',substr(colnames(rna),1,12))
View(rna)
clinical$IDs <- toupper(clinical$patient.bcr_patient_barcode)
sum(df$IDs %in% colnames(rna))
n_index <- which(substr(colnames(new),14,14) == '1')
t_index <- which(substr(colnames(new),14,14) == '0')
rna[ , n_index & t_index & df$IDs]
clinical$death_event <- ifelse(clinical$patient.vital_status == 'alive', 0,1)
#########
# select day of death
ind_keep <- grep('days_to_death',colnames(clinical))
death <- as.matrix(clinical[,ind_keep])
death_collapsed <- c()
for (i in 1:dim(death)[1]){
if ( sum ( is.na(death[i,])) < dim(death)[2]){
m <- max(death[i,],na.rm=T)
death_collapsed <- c(death_collapsed,m)
} else {
death_collapsed <- c(death_collapsed,'NA')
}
}
clinical$death_days = death_collapsed
# select day of last follow up (last time in which patient has been observed, from this
# day on we do not have any information about the patient)
ind_keep <- grep('days_to_last_followup',colnames(clinical))
fl <- as.matrix(clinical[,ind_keep])
fl_collapsed <- c()
for (i in 1:dim(fl)[1]){
if ( sum(is.na(fl[i,])) < dim(fl)[2]){
m <- max(fl[i,],na.rm=T)
fl_collapsed <- c(fl_collapsed,m)
} else {
fl_collapsed <- c(fl_collapsed,'NA')
}
}
clinical$followUp_days = fl_collapsed
# combine follow up and death info to create a unique vector regarding time observations
clinical$new_death <- c()
for (i in 1:length(as.numeric(as.character(clinical$death_days)))){
clinical$new_death[i] <- ifelse (is.na(as.numeric(as.character(clinical$death_days))[i]),
as.numeric(as.character(clinical$followUp_days))[i],as.numeric(as.character(clinical$death_days))[i])
}
clinical$new_death # time
clinical$death_event # y
new_clinical = clinical[,c("new_death","death_event","IDs")]
plot(new_clinical$new_death, new_clinical$death_event)
write.table(new_clinical, "/Users/federicomatteo/Desktop/OPTproj/Data/SurvivalTimes.txt", sep=";")
|
9fe3300425a4feac08231e038912051a63c1ad63
|
b0b4891b6df683a8b754e4469eb40d2694cb18e0
|
/Examens/Examen2017.R
|
a6f777534736971c6cadb5e8c7d0ba32c2035483
|
[] |
no_license
|
JulienNique/M2DM-PC4DS
|
79c5c816e414fbd1798ca36bbdef57152e9428ed
|
ceda6f989b30000827fef6cb3c9271f69c50f31d
|
refs/heads/main
| 2023-01-03T09:12:03.596313
| 2020-10-22T16:50:41
| 2020-10-22T16:50:41
| 302,307,999
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,335
|
r
|
Examen2017.R
|
## Fichier: 2017pc4ds-trump.r
## Etudian : D. Trump
## Description : Rendu de l'examen du PC4DS 2017
## Date : 26 janvier 2017
rm(list = ls())
################################################################################
#### ####
#### E X E R C I C E 1 ####
#### ####
################################################################################
## Un collègue qui travaille sur le test d'indépedance Chi-2 vous transmet le
## code R ci dessous :
myfun<-function(a){da <- dim(a)
n <- da[1]; m <- da[2]; res <- c()
for(i in seq(n)){res<-c(res,0)
for(j in seq(m)){if(is.na(a[i,j])) res[i]<-res[i]+1}}
return(res)}
# test
nas <- matrix(ifelse(runif(5e5) > 0.2, NA, 1), 1e4)
a <- matrix(rnorm(5e5), 1e4) * nas
myfun(a)
# Il vous demande de l'aide pour améliorer le temps de calcul.
# __ 1. Rajoutez une description du code ####
# en suivant le canevas ci dessous
# Description : ...
# Entrée : ...
# Sortie : ...
# __ 2. Écrivez une version plus facile à lire de myfun ####
# Corrigez la mise en format du code, le noms des objets intermédiaires
# et commentez-le si besoin.
# __ 3. Écrivez une version vectorisée de la fonction dans 2. ####
# __ 4. Écrivez une première version en parallèle de la fonction dans 2. ####
# en utilisant la librarie foreach (parallélisme implicite) et 2 noeuds de
# calcul.
# __ 5. Écrivez une deuxièeme version en parallèle de myfun2 ####
# en utilisant la librarie parallel (parallélisme explicite) et 2 noeuds de
# calcul.
# __ 6. Obtenez les temps d'exécution ####
# de toutes les versions de la fonction myfun que vous avez écrit.
# Quelle est la plus performant?
################################################################################
#### ####
#### E X E R C I C E 2 ####
#### ####
################################################################################
# Obtenir une version plus performante del fonction main (cf. fichier
# simulate_multivariate.r)
# Vous serez notés en fonction du gain obtenu. Ne modifiez pas les valeurs
# du point 1, UNIQUEMENT la fonction main.
# Astuce : n'essayez pas de rentrer dans le détail du code (assez long et )
#library(CDVine)
library(energy)
library(mvtnorm)
source('simulate_exam.r')
## 1. Choix pour les simulations ####
corel <- seq(0.5, 0.95, by = 0.15) # Corrélations
nb_data <- seq(20, 200, length.out = 4) # Taille des données réelles
nb_var <- 4 # Nombre de variables
n_simu <- nb_data # Taille de données simulées
nb.iter <- 2 # Nombre d'itérations
nb.test <- 5 # Nombre de tests
method.vect <- c("indep", "indepPCA") # Méthodes de simulation
## 2. Simulations ####
system.time(
multitest <- main(method.vect, corel, nb_data, nb_var,
n_simu, nb.iter, nb.test)
)
|
bf4fd9b52deb2ce807809d718fcbc1ba3506239b
|
973c2d68485cfc1c6fde427effb95fed8442ca5d
|
/analysis/switchde/3_parse_switchde_results.R
|
fa41a6759d54c0f92c054ffe2862ef25ee539ee6
|
[] |
no_license
|
kieranrcampbell/ouija-paper
|
89bb0965a645901ded92c02515143a40bafbc256
|
02f50ffa002af1cb12dfea6a8196adb803c60845
|
refs/heads/master
| 2020-11-30T00:34:11.735315
| 2018-02-03T19:33:27
| 2018-02-03T19:33:27
| 95,869,625
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,390
|
r
|
3_parse_switchde_results.R
|
suppressPackageStartupMessages(library(scater))
suppressPackageStartupMessages(library(dplyr))
suppressPackageStartupMessages(library(readr))
suppressPackageStartupMessages(library(aargh))
suppressPackageStartupMessages(library(purrr))
suppressPackageStartupMessages(library(cowplot))
plot_switchde_results <- function(input_directory = "../../data/switchde/sde",
output_png = "../../figs/switchde/fig_switchde.png") {
trace_csvs <- dir(input_directory, full.names = TRUE)
df <- map_df(trace_csvs, read_csv)
signif_genes <- group_by(df, gene, gsn) %>%
summarise(all_signif = all(qval < 0.05)) %>%
filter(all_signif) %>%
.$gene
dfs <- filter(df, gene %in% signif_genes)
# Now extract summary stats
dfg <- group_by(dfs, gene, gsn) %>%
summarise(k_median = median(k),
k_lower = quantile(k, probs = 0.05),
k_upper = quantile(k, probs = 0.95),
t0_median = median(t0),
t0_lower = quantile(t0, probs = 0.05),
t0_upper = quantile(t0, probs = 0.95)) %>%
ungroup()
dfgr <- filter(dfg,
abs(k_median) < 10,
abs(t0_median) < 10,
abs(k_lower) < 20,
abs(k_upper) < 20,
abs(t0_lower) < 20,
abs(t0_upper) < 20)
set.seed(123L)
df_sample_k <- df_sample_t0 <- sample_n(dfgr, 100)
df_sample_k$gsn <- factor(df_sample_k$gsn,
levels = df_sample_k$gsn[order(df_sample_k$k_median)])
pltk <- ggplot(df_sample_k, aes(x = gsn, y = k_median)) +
geom_errorbar(aes(ymin = k_lower, ymax = k_upper)) +
geom_point() +
coord_flip() +
labs(x = "Gene", y = "Switch strength") +
theme(axis.text.y = element_text(size = 5))
df_sample_t0$gsn <- factor(df_sample_t0$gsn,
levels = df_sample_t0$gsn[order(df_sample_t0$t0_median)])
pltt0 <- ggplot(df_sample_t0, aes(x = gsn, y = t0_median)) +
geom_errorbar(aes(ymin = t0_lower, ymax = t0_upper)) +
geom_point() +
coord_flip() +
labs(x = "Gene", y = "Switch time") +
theme(axis.text.y = element_text(size = 5))
pltg <- plot_grid(pltk, pltt0, ncol = 2, labels = "AUTO")
ggsave(output_png, width = 6, height = 9)
}
aargh(plot_switchde_results)
|
7da8d79183b177bb23546b2f3b6c29035e9d2d95
|
d9d872e10dfd029fc3fd7f4badf5c384576a0b3a
|
/Figures/NewVersion.R
|
e6465e155b6fc908307ee9f2fb9258490d0ee1d5
|
[] |
no_license
|
DustinRoten/STACK-project
|
7cfebfa3a9bf99d86a1fe11fde4e003584d4e00b
|
2d011106deaae5fa2f3161df90ca300824cd86ea
|
refs/heads/master
| 2021-09-14T17:21:14.679431
| 2018-05-16T15:24:40
| 2018-05-16T15:24:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,249
|
r
|
NewVersion.R
|
library(ggplot2)
library(reshape)
source("TEST-DEMOFunctions.R")
PlantLAT <- 39.28682
PlantLON <- -96.1172
Dispersion <- read.delim("JEC-10000m2.txt", header = TRUE, sep = "")
Origin_Dispersion <- ShiftToOrigin("S", Dispersion, PlantLAT, PlantLON)
Functions <- c("ShiftDispersion", "RotateDispersion", "RadialDilation", "AngularStretch")
Scenarios <- c("ShiftedDispersion", "RotatedDispersion", "RadialStretchDispersion", "AngularStretchDispersion")
NumOfRuns <- c(50, 200, 100, 100)
MetricFrames <- c("Metrics_ShiftedDispersion", "Metrics_RotatedDispersion", "Metrics_RadialStretchDispersion", "Metrics_AngularStretchDispersion")
Names_Metrics <- c("MRSMeasure", "MeanAngleMeasure", "STDAngleMeasure", "COMMeasure")
# Create empty data frames to store metric values in
for (a in 1:4) {eval(parse(text = paste(MetricFrames[a], " <- ", "data.frame()", sep = "")))}
# Run all 4 metrics here
for (i in 1:4) {
for (j in 0:NumOfRuns[i]) { # Begin individual iterations here. Matrices 1 & 2 are filled
eval(parse(text = paste(Scenarios[i], " <- ", Functions[i], "(Origin_Dispersion,", j, ")")))
eval(parse(text = paste("Matrix_Model1 <- GridDispersions(Origin_Dispersion, ", Scenarios[i], ", 1)", sep = "")))
eval(parse(text = paste("Matrix_Model2 <- GridDispersions(Origin_Dispersion, ", Scenarios[i], ", 2)", sep = "")))
# Both matrices are reformatted here
Melted_Matrix_Model1 <- melt(Matrix_Model1)
Melted_Matrix_Model2 <- melt(Matrix_Model2)
names(Melted_Matrix_Model1) <- c("LAT", "LON", "CO2")
names(Melted_Matrix_Model2) <- c("LAT", "LON", "CO2")
# Each MRS metric value is calculated here
MRS_Value <- (1/sum(Matrix_Model1))*sum(abs(Matrix_Model1 - Matrix_Model2))
# The Difference in COM is calculated here
COM_Value <- COMMeasure(Melted_Matrix_Model1, Melted_Matrix_Model2)
# Mean angle difference is calculated here
Angle1 <- COMAngle(Melted_Matrix_Model1)
Angle2 <- COMAngle(Melted_Matrix_Model2)
MeanAngle_Value <- if(abs(Angle1 - Angle2) <= 180) {abs(Angle1 - Angle2)} else {360 - abs(Angle1 - Angle2)}
# Calculating the standard deviation of dispersion
Rotated_Dispersion1 <- RotateToAxis(Melted_Matrix_Model1, Angle1)
Rotated_Dispersion2 <- RotateToAxis(Melted_Matrix_Model2, Angle2)
STDAngles1 <- sd((180/pi)*atan2(Rotated_Dispersion1$LAT, Rotated_Dispersion1$LON))
STDAngles2 <- sd((180/pi)*atan2(Rotated_Dispersion2$LAT, Rotated_Dispersion2$LON))
STDAngle_Value <- abs(STDAngles1-STDAngles2)
eval(parse(text = paste(MetricFrames[i], "[j+1,1] <- MRS_Value", sep = "")))
eval(parse(text = paste(MetricFrames[i], "[j+1,2] <- COM_Value", sep = "")))
eval(parse(text = paste(MetricFrames[i], "[j+1,3] <- MeanAngle_Value", sep = "")))
eval(parse(text = paste(MetricFrames[i], "[j+1,4] <- STDAngle_Value", sep = "")))
} # End individual iterations here. Matrices 1 & 2 have been filled
}
for (i in 1:4) {eval(parse(text = paste("names(", MetricFrames[i], ")", " <- ", "c('MRSValue', 'COMValue', 'MeanAngleValue', 'STDAngleValue')", sep = "")))}
|
c398776df99c90312c0af0e832953cbf036f4d2d
|
fb6f5e9fa22093856c7628acd0e937b6090767de
|
/code/figure5.R
|
228c56cfb6e696341afaacfde59c6c6e5f1117df
|
[] |
no_license
|
eafyounian/hichip
|
1987b59ebe36239680f1318cdb710bac7715d066
|
d9fc3dc663c40b9b8aee233d86cfd5194b525acc
|
refs/heads/main
| 2023-09-03T18:58:37.475840
| 2021-11-17T21:45:30
| 2021-11-17T21:45:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,019
|
r
|
figure5.R
|
# Figure 5 and analysis
# meta_analysis_functional_data.R
#######################################
# functions and libraries
library(metafor)
library(dplyr)
library(ggplot2) # plot
library(ggpubr) # plot
# https://stats.stackexchange.com/questions/30394/how-to-perform-two-sample-t-tests-in-r-by-inputting-sample-statistics-rather-tha
# equal.variance: whether or not to assume equal variance. Default is FALSE.
t.test <- function(m1,m2,s1,s2,n1,n2,m0=0,equal.variance=FALSE)
{
if( equal.variance==FALSE )
{
se <- sqrt( (s1^2/n1) + (s2^2/n2) )
# welch-satterthwaite df
df <- ( (s1^2/n1 + s2^2/n2)^2 )/( (s1^2/n1)^2/(n1-1) + (s2^2/n2)^2/(n2-1) )
} else
{
# pooled standard deviation, scaled by the sample sizes
se <- sqrt( (1/n1 + 1/n2) * ((n1-1)*s1^2 + (n2-1)*s2^2)/(n1+n2-2) )
df <- n1+n2-2
}
t <- (m1-m2-m0)/se
dat <- c(m1-m2, se, t, 2*pt(-abs(t),df))
names(dat) <- c("Difference of means", "Std Error", "t", "p-value")
return(dat)
}
#######################################
#######################################
# This is from Sandor's raw data "HiChip_KRAB_validation_raw_data.xlsx", Expression_AR for each of the Replicates 1,2,3 (T,U,V) columns.
#######################################
ar1 = data.frame(OR = c(0.902, 1.031, 1.067, 0.649, 0.553, 0.509, 0.093, 0.025, 0.022, 0.033, 0.016, 0.009, 0.025, 0.011, 0.005), studlab = c(rep("CTRL", 3), rep("gRNA1",3), rep("gRNA2",3), rep("gRNA3",3), rep("gRNA4",3)), tec_rep = rep(c("Rep1", "Rep2", "Rep3"), 5), biol_rep = "Rep1")
ar2 = data.frame(OR = c(0.99, 0.95, 1.06, 0.924, 0.936, 0.919, 0.701, 0.673, 0.678, 0.706, 0.743, 0.762, 0.798, 0.745, 0.711), studlab = c(rep("CTRL", 3), rep("gRNA1",3), rep("gRNA2",3), rep("gRNA3",3), rep("gRNA4",3)), tec_rep = rep(c("Rep1", "Rep2", "Rep3"), 5), biol_rep = "Rep2")
ar = rbind.data.frame(ar1, ar2)
ar$logOR = log(ar$OR)
data = ar %>%
group_by(studlab, biol_rep) %>%
summarize(yi=mean(logOR), vi = (sd(logOR))^2) %>% data.frame
res = data.frame()
reps = names(table(data$studlab))
for (r in reps) {
x = data[data$studlab==r,]
m = rma(yi, vi, data=x, method="FE")
m_df = data.frame(study = r, TE.fixed = m$beta, seTE.fixed = m$se, upper.fixed= m$ci.ub, lower.fixed=m$ci.lb, zval.fixed = m$zval, pval.fixed=m$pval)
res = rbind.data.frame(res, m_df)
}
res$TE.fixed= exp(res$TE.fixed)
res$upper.fixed = exp(res$upper.fixed)
res$lower.fixed = exp(res$lower.fixed)
# In the data frame "res": These are the results from meta-analyzing across two biological samples each of the CNTRL, gRNA1, gRNA2, gRNA3, gRNA4 that we report in Supplementary Table S11
#######################################
# We run Student's t-test pairwise between CNTRL and gRNA with equal.variance
# Add to the results in Table S11
#######################################
df = data.frame()
groups = as.character(res$study)
groups = groups[!(groups %in% "CTRL")]
eff_ctrl = res$TE.fixed[res$study=="CTRL"]
se_ctrl = res$seTE.fixed[res$study=="CTRL"]
stat.test = data.frame()
for (i in groups) {
eff_i = res$TE.fixed[res$study==i]
se_i = res$seTE.fixed[res$study==i]
t= c(eff_ctrl, eff_i, (se_ctrl*sqrt(2)), (se_i*sqrt(2)), "CTRL", i)
print(t)
stat.test = rbind.data.frame(stat.test, t(t))
x = t.test2( eff_ctrl, eff_i, (se_ctrl*sqrt(2)), (se_i*sqrt(2)), 2, 2, equal.variance=T)
x = data.frame(t(x))
df = rbind.data.frame(df, x)
}
stat.test$p.value = df$p.value
stat.test$p.value = signif(stat.test$p.value,1)
names(stat.test)[5] = "group1"
names(stat.test)[6] = "group2"
new.row = df[1,]
new.row[]=NA
df = rbind.data.frame(new.row, df)
res_all = cbind.data.frame(res, df)
#######################################
# Plot
#######################################
# g = ggplot(res_all, aes(x = study, y=TE.fixed)) + geom_bar(position=position_dodge(), width=0.4, stat="identity",colour="black", size=.3) + geom_errorbar(aes(ymin=TE.fixed-seTE.fixed, ymax=TE.fixed+seTE.fixed),size=.3, width=.2, position=position_dodge(.9)) + xlab("") + ylab("") + theme_bw()
pad = 0.01
label.df <- data.frame(study = res_all$study,
TE.fixed = res_all$TE.fixed+res_all$seTE.fixed+pad, pval = res_all$p.value)
label.df$pval = signif(label.df$pval, 1)
label.df$sig = cut(label.df$pval,breaks = c(-0.1, 0.0001, 0.001, 0.01, 0.05, 1),labels = c("****", "***", "**", "*", ""))
label.df$sig = as.character(label.df$sig)
label.df$sig[is.na(label.df$sig)] = ""
g1 = g + geom_text(data = label.df, label = label.df$sig)
library(ggpubr)
my_comparisons = list( c("CTRL", "gRNA1"), c("CTRL", "gRNA2"), c("CTRL", "gRNA3"), c("CTRL", "gRNA4") )
stat.test$sig = label.df$sig[-1]
stat.test$p.value_sig = paste(stat.test$p.value, stat.test$sig, sep="")
p = g + stat_pvalue_manual(
data = stat.test, label = "p.value_sig", #"p.value",
y.position = c(1.1,1.2,1.3,1.4)
)
p1 = p + theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))
|
35ed0eac96f7c7c4f3df56d708d6ac7176fb90eb
|
f2a0a8fda06fc7c1a7602472aab8569df5101d48
|
/man/resettestFrontier.Rd
|
6ca8e93247ea95bed2fdf6fcde32b199e8cdb63e
|
[] |
no_license
|
cran/frontier
|
833b64b32ae93e7f5c8333ccbbd3670f0fa12182
|
91725b1e6bb2df9b47c3d9eda2d545996a0f0c54
|
refs/heads/master
| 2021-01-01T19:07:23.783127
| 2020-04-17T15:10:03
| 2020-04-17T15:10:03
| 17,696,150
| 5
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,363
|
rd
|
resettestFrontier.Rd
|
\name{resettestFrontier}
\alias{resettestFrontier}
\title{RESET test for Stochastic Frontier Models}
\description{
Generalized Ramsey's RESET test (REgression Specification Error Test)
for misspecification of the functional form
based on a Likelihood Ratio test.
}
\usage{
resettestFrontier( object, power = 2:3 )
}
\arguments{
\item{object}{a fitted model object of class \code{frontier}.}
\item{power}{a vector indicating the powers of the fitted variables
that should be included as additional explanatory variables.
By default, the test is for quadratic or cubic influence
of the fitted response.}
}
\value{
An object of class \code{anova}
as returned by \code{\link{lrtest.frontier}}.
}
\references{
Ramsey, J.B. (1969), Tests for Specification Error
in Classical Linear Least Squares Regression Analysis.
\emph{Journal of the Royal Statistical Society, Series B} 31, 350-371.
}
\author{Arne Henningsen}
\seealso{
\code{\link{sfa}}, \code{\link[lmtest]{resettest}}, and
\code{\link{lrtest.frontier}}
}
\examples{
# load data set
data( front41Data )
# estimate a Cobb-Douglas production frontier
cobbDouglas <- sfa( log( output ) ~ log( capital ) + log( labour ),
data = front41Data )
# conduct the RESET test
resettestFrontier( cobbDouglas )
}
\keyword{models}
|
427449fee0eb286d187866d4e5eaf6047344de93
|
0fda90787687660b41d4fbf320e4b56372a46b8b
|
/README.RD
|
31ecc2126268f08bb1b4983e121e5b559d4ed756
|
[] |
no_license
|
zxcchen/blog
|
23ce220e667763738d3001a33ea875e2c6d20952
|
430049c8e9fe6fe098d25c1e015c69a0e67248ab
|
refs/heads/master
| 2021-01-23T03:59:34.939564
| 2018-05-16T08:27:30
| 2018-05-16T08:27:30
| 86,142,401
| 7
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,099
|
rd
|
README.RD
|
在线预览
http://qiqi.bling.ink
博客系统
为了熟悉Node.js,熟悉现代前端开发使用的各种工作流工具,我开发了这个博客系统。
工程目录结构:
├── README.RD
├── client
│ ├── blogpost.js
│ ├── main.js
│ └── utils.js
├── common
│ ├── common.js
│ └── config.js
├── error.html
├── gulpfile.js
├── jsconfig.json
├── package.json
├── resources
│ ├── css
│ ├── img
│ └── js
├── server
│ ├── cache.js
│ ├── db.js
│ ├── server.js
│ ├── session.js
│ ├── site.config.js
│ └── utils.js
├── template
│ ├── 404.ejs
│ ├── blogpost.ejs
│ ├── error.ejs
│ ├── header.ejs
│ ├── index.ejs
│ ├── login.ejs
│ └── sidebar.ejs
└── webpack.config.js
其中server为服务端代码目录,client为浏览器端代码目录,common为前后端公共代码,template为ejs模板文件目录,resources为资源(scss/css/images等)目录。
webpack.config.js为webpack的配置脚本,gulpfile.js为gulp的构建脚本。
开发环境构建,运行命令 npm run-script build:dev 或 gulp
线上构建,运行命令 npm run-script build:prod 或 NODE_ENV=production gulp
构建完毕后,生成deploy目录,此为客户端需要部署的资源,由于没有专用的cdn,此目录即为cdn目录。
使用webpack之后,原来不到1k的javascript代码文件膨胀到了好几k,原因是webpack会将require的模块进行bundle,同时加入webpack模块化实现代码。说明使用npm 模块来加快客户端的开发效率将折衷部署的文件体积,webpack等打包工具只能尽可能地通过拆分代码再打包来优化部署性能。
总结就是webpack等模块化打包工具的使用场景,最好应该是项目代码足够大,有模块化可能性。
关于打包体积的优化,参考https://github.com/youngwind/blog/issues/65
|
a1151572ced0575f53ae9a5c8212c80db0eb1a69
|
b4521cfd5b2f3cc2373a8c2087a4a11037f54ef6
|
/ui.R
|
57c78ba112148c30b1729e6057f2b420df3c2cb2
|
[] |
no_license
|
Dark-angel2019/digital-product-coursework4
|
8021eb72f5b198f782fc5c045e0b379edd6f991e
|
58e5e505faeb96b5516e16441a2123b47ca7837c
|
refs/heads/main
| 2023-01-30T20:21:11.947132
| 2020-12-14T02:25:14
| 2020-12-14T02:25:14
| 321,208,052
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,244
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
## An app to calculate the length of hypotenuse of a isosceles right triangle
## based on length of adjacent provided.
library(shiny)
## defining appearance of the input panel
shinyUI(fluidPage(
titlePanel("Calculating the length of hypotenuse"),
sidebarLayout(
sidebarPanel(
sliderInput("Adjacent", "Input length of adjacent", 1, 100, value = 50),
submitButton("Submit")
),
mainPanel(
plotOutput("plot1"),
h2 ("Length of Hypotenuse"),
textOutput("H1"),
h6 ("This is the documentation for the Length-of-hypotenuse app."),
h6 ("This app calculates the length of the hypotenuse of a isosceles right triangle based on the length of the two adjacents."),
h6 ("To use the app, simply select the length of the adjacents by using the slider."),
h6 ("The length of the hypotenuse is then calculated and displayed ")
)
)
))
|
d29d8d008f6f3f9154b2feb471976e64237af984
|
2bb56a3bc4869feca3561351ca8a26e61ed16fb4
|
/session3/session3.r
|
22ec6cbabee519243983ff6559bcaa8ae979a41e
|
[] |
no_license
|
mvogel78/2015kurs2
|
e42aed33d5cbd587c51c249e874d5aa7505e800c
|
078350cf6fb80d34d00d43dd2f5fcdbe9a5206d8
|
refs/heads/master
| 2021-01-22T04:41:39.737535
| 2015-08-30T18:16:18
| 2015-08-30T18:16:18
| 38,581,661
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,892
|
r
|
session3.r
|
require(Hmisc)
x <- spss.get("session3/ZA5240_v2-0-0.sav")
## y <- stata.get("data/ZA5240_v2-0-0.dta")
## the column V417 contains the net income,
## calculate the mean using the mean() function!
## What is the problem?
nrow(x)
ncol(x)
mean(x$V417,na.rm = T)
mean(x$V417,trim=0.05,na.rm = T)
## summarize the net income using summary(), quantiles() and fivenum()!
summary(x$V417)
summary(x$V81)
quantile(x$V417,na.rm = T,probs = c(0,0.1,0.2,0.3,1))
quantile(x$V417,na.rm = T,probs = seq(0,1,by=0.05))
fivenum(x$V417)
## make a boxplot by using the following syntax!
require(ggplot2)
ggplot(x, aes(x=V86, y=V417)) +
geom_boxplot()
ggplot(x, aes(x=V86, y=V417, fill=V81)) +
geom_boxplot()
######################################################################
####################### T-Test #######################################
######################################################################
## one sample
set.seed(1)
x <- rnorm(12)
t.test(x,mu=0)
t.test(x,mu=1)
## two sample Welch or Satterthwaite test
set.seed(1)
x <- rnorm(12)
y <- rnorm(12)
g <- sample(c("A","B"),12,replace = T)
t.test(x, y)
t.test(x ~ g)
t.test(x, y, var.equal = T)
######################################################################
####################### Exercises ###################################
######################################################################
## do a t-test of income (V417): male against female (V81)!
t.test(x$V417 ~ x$V81)
## and compare the bmi (V279) in smokers and non-smokers (V272)
## and between the people with high and normal blood pressure (V242)
t.test(x$V279 ~ x$V272)
t.test(x$V279 ~ x$V242)
summary(x$V242)
## bmi by smokers/ non smokers
ggplot(x, aes(x=V272, y=V279, fill=V81)) +
geom_boxplot() +
facet_wrap( ~ V86)
summary(x$V272)
## bmi by high/normal blood pressure
## Alter
summary(x$V84)
|
d13b4ddec23fa92864ad1aec5f1062bbe580a3c3
|
b136b4bfef2449633275481a5aa62c60e32f07bd
|
/man/cdf.Z.Rd
|
b0d8b5b406a1a6d4beacef47cfece4377e7cd218
|
[] |
no_license
|
cran/MHTcop
|
cba5339e5c2875ee8d9dfc318aeb132c8e89dcae
|
496ee271b9e68adff69523e19dee05c469678ee4
|
refs/heads/master
| 2020-03-08T17:36:15.334807
| 2019-01-21T15:10:03
| 2019-01-21T15:10:03
| 128,273,287
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 569
|
rd
|
cdf.Z.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cdf.Z.R
\name{cdf.Z}
\alias{cdf.Z}
\title{Evaluate the inverse Laplace-Stieltjes transform of a copula's generator}
\usage{
cdf.Z(cop, z)
}
\arguments{
\item{cop}{The copula}
\item{z}{Argument to the inverse Laplace-Stieltjes transform of the copula's generator}
}
\description{
\code{cdf.Z} evaluates the inverse Laplace-Stieltjes transform of the generator of the copula \code{cop} at \code{z}. Not: The evaluated mapping is a distribution function.
}
\keyword{internal}
|
b5db7bb72331eb35803ef7facbfb99ed49020429
|
ed6d7dbac0c32cce7e784b712466878d817f97f0
|
/plot4.R
|
cd86fb04d7ad2e131634e671533628a702b6f314
|
[] |
no_license
|
nsdfxela/ExData_Plotting1
|
a1eabdccd3f7ac9e9b122c3c222e63fcfa67abdb
|
45420d6a4fc041f7af5ef337dabd4764df545cfe
|
refs/heads/master
| 2021-01-21T06:14:14.980633
| 2014-05-11T16:51:52
| 2014-05-11T16:51:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,684
|
r
|
plot4.R
|
getData <- function(){
dataPlace <- read.csv("./data/household_power_consumption.txt", sep=";", header=TRUE)
dataPlace["Date"] <- as.Date(strptime(dataPlace[,"Date"], format="%d/%m/%Y"))
dataPlace <- dataPlace[dataPlace[,"Date"]<="2007-02-02" & dataPlace[,"Date"]>="2007-02-01",]
dataPlace$Time <-as.POSIXct( strptime(dataPlace[,"Time"],format="%H:%M:%S"), format = "%H:%M:%S")
dataPlace
}
dataPlace<-getData()
dataPlace$Time <- as.POSIXct(strptime (paste (as.character(dataPlace$Date, format = "%Y-%m-%d"), as.character(dataPlace$Time, format = "%H:%M:%S")), format ="%Y-%m-%d %H:%M:%S"))
png(file = "plot4.png", width = 480, height = 480)
par(mfcol = c(2,2), mar=c(4,4,1,1), bg="transparent")
with(dataPlace,{
##1
plot(dataPlace$Time, as.numeric(as.vector(dataPlace$Global_active_power)), type ="l", ylab="Global Active Power", xlab="")
##2
plot(dataPlace$Time, as.numeric(as.vector(dataPlace$Sub_metering_1)), type ="n", ylab="Energy sub metering", xlab="")
lines(dataPlace$Time, as.numeric(as.vector(dataPlace$Sub_metering_1)), col="black")
lines(dataPlace$Time, as.numeric(as.vector(dataPlace$Sub_metering_2)), col="red")
lines(dataPlace$Time, as.numeric(as.vector(dataPlace$Sub_metering_3)), col="blue")
legend("topright", lty=1, col=c("black", "red", "blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), cex=0.8, bty="n")
##3
plot(dataPlace$Time, as.numeric(as.vector(dataPlace$Voltage)), type ="l", ylab="Voltage", xlab="datetime")
##4
plot(dataPlace$Time, as.numeric(as.vector(dataPlace$Global_reactive_power)), type ="l", ylab="Global_reactive_power", xlab="datetime")
})
dev.off()
|
0295a4ae564d807b3db3f82bb3a7cc2574104514
|
a3c78700a65f10714471a0d307ab984e8a71644d
|
/modules/assim.sequential/R/met_filtering_helpers.R
|
523d75bb685ddf92cf025fd69a89e486e3fc8b2b
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
PecanProject/pecan
|
e42a8a6a0fc9c0bb624e0743ab891f6cf131ed3f
|
ce327b92bf14498fa32fcf4ef500a7a5db5c9c6c
|
refs/heads/develop
| 2023-08-31T23:30:32.388665
| 2023-08-28T13:53:32
| 2023-08-28T13:53:32
| 6,857,384
| 187
| 217
|
NOASSERTION
| 2023-09-14T01:40:24
| 2012-11-25T23:48:26
|
R
|
UTF-8
|
R
| false
| false
| 2,306
|
r
|
met_filtering_helpers.R
|
##' Sample meteorological ensembles
##'
##' @param settings PEcAn settings list
##' @param nens number of ensemble members to be sampled
##'
##' @export
sample_met <- function(settings, nens=1){
# path where ensemble met folders are
if(length(settings$run$inputs$met[["path"]]) == 1){
path <- settings$run$inputs$met[["path"]]
}else if(!is.null(settings$run$inputs$met[["path"]])){ # this function will be deprecated soon anyway
path <- settings$run$inputs$met[["path"]][[1]]
}else{
PEcAn.logger::logger.error("Met path not found in settings.")
}
if(settings$host$name == "localhost"){
ens_members <- list.files(path, recursive = TRUE)
}else{
# remote
ens_members <- PEcAn.remote::remote.execute.cmd(host, paste0('ls -d -1 ', path, "/*.*"))
}
start_date <- as.POSIXlt((settings$run$site$met.start))
end_date <- as.POSIXlt((settings$run$site$met.end))
#start_date <- as.POSIXlt(strptime(settings$run$site$met.start, "%Y/%m/%d"))
#end_date <- as.POSIXlt(strptime(settings$run$site$met.end, "%Y/%m/%d"))
start_date$zone <- end_date$zone <- NULL
# only the original (not-splitted) file has start and end date only
tmp_members <- gsub(paste0(".", start_date), "", ens_members)
tmp_members <- gsub(paste0(".", end_date), "", tmp_members)
member_names <- unique(dirname(ens_members))
# this will change from model to model, generalize later
# This function is temporary but if we will continue to use this approach for met ensembles (instead of met process workflow)
# it might not be a bad idea to have sample_met.model
if(settings$model$type == "ED2"){
# TODO : it doesn't have to be called ED_MET_DRIVER_HEADER
ens_members <- file.path(basename(ens_members), "ED_MET_DRIVER_HEADER")
ens_ind <- seq_along(ens_members)
}else if(settings$model$type == "SIPNET"){
ens_ind <- unlist(sapply(paste0(member_names, ".clim"), grep, tmp_members))
}else if(settings$model$type == "LINKAGES"){
ens_ind <- seq_along(ens_members)
}
# ens_members[ens_ind]
ens_input <- list()
for(i in seq_len(nens)){
ens_input[[i]] <- list(met=NULL)
ens_input[[i]]$met$path <- file.path(path, ens_members[sample(ens_ind, 1)])
}
names(ens_input) <- rep("met",length=nens)
return(ens_input)
}
|
9fcb9b79752eb44177b74e36e49e5a2c3d82be32
|
58a1c70f5695d29f54232a261734aae2a675838e
|
/R/df_점수형.R
|
7578069ffc35b009e0dc3f0dfb48201ca5188b51
|
[] |
no_license
|
wpsl94/DataMining_Mid-2021
|
090b7e8f5e8d51708dba8067c92976e98e8595c6
|
6718fa74a4da91bdb318ad4f8ed245404bc6d650
|
refs/heads/main
| 2023-04-15T14:37:19.081979
| 2021-04-28T11:12:07
| 2021-04-28T11:12:07
| 362,017,970
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 7,161
|
r
|
df_점수형.R
|
library(foreign)
library(MASS)
library(dplyr)
library(ggplot2)
library(readxl)
###데이터 재가공(점수형으로 변환)
##2015년
df.wr.2015 <- raw_welfare.2015
df.wr.2015 <- rename(df.wr.2015,
sex=h10_g3,#성별
area=h10_reg7, #지역코드
birth=h10_g4, #태어난 년도
edu=h10_g6, #교육수준
religion=h10_g11, #종교
dis=h10_g9,#장애수준
marriage=h10_g10, #혼인상태
health=h10_med2, #건강수준
code_job=h10_eco9, #직종
income_1=h10_pers_income1, #상용근로자 소득
income_2=h10_pers_income2, #일용근로자 소득
income_3=h10_pers_income3, #자영업자 소득(농림축어업 외)
income_4=h10_pers_income4, #부업소득
income_5=h10_pers_income5) #농림축어업 소득
income <- coalesce(df.wr.2015$income_1,
df.wr.2015$income_2,
df.wr.2015$income_3,
df.wr.2015$income_4,
df.wr.2015$income_5)
df.wr.2015 <- df.wr.2015[,c("sex", "area", "birth", "edu", "religion", "dis", "marriage", "health", "code_job")]
df.wr.2015 <- cbind(df.wr.2015, income)
df.wr.2015[is.na(df.wr.2015)] <- 0 #결측치 제거
#df.wr.2015$marriage <- ifelse(df.wr.2015$marriage == 1 | df.wr.2015$marriage == 2|df.wr.2015$marriage == 3|df.wr.2015$marriage == 4 ,2, 0)
year <- rep(2015, times=nrow(df.wr.2015))
df.wr.2015 <- data.frame(year, df.wr.2015)
##2016년
df.wr.2016 <- raw_welfare.2016
df.wr.2016 <- rename(df.wr.2016,
sex=h11_g3,#성별
area=h11_reg7, #지역코드
birth=h11_g4, #태어난 년도
edu=h11_g6, #교육수준
religion=h11_g11, #종교
dis=h11_g9,#장애수준
marriage=h11_g10, #혼인상태
health=h11_med2, #건강수준
code_job=h11_eco9, #직종
income_1=h11_pers_income1, #상용근로자 소득
income_2=h11_pers_income2, #일용근로자 소득
income_3=h11_pers_income3, #자영업자 소득(농림축어업 외)
income_4=h11_pers_income4, #부업소득
income_5=h11_pers_income5) #농림축어업 소득
income <- coalesce(df.wr.2016$income_1,
df.wr.2016$income_2,
df.wr.2016$income_3,
df.wr.2016$income_4,
df.wr.2016$income_5)
df.wr.2016 <- df.wr.2016[,c("sex", "area", "birth", "edu", "religion", "dis", "marriage", "health", "code_job")]
df.wr.2016 <- cbind(df.wr.2016, income)
df.wr.2016[is.na(df.wr.2016)] <- 0 #결측치 제거
year <- rep(2016, times=nrow(df.wr.2016))
df.wr.2016 <- data.frame(year, df.wr.2016)
##2017년
df.wr.2017 <- raw_welfare.2017
df.wr.2017 <- rename(df.wr.2017,
sex=h12_g3,#성별
area=h12_reg7, #지역코드
birth=h12_g4, #태어난 년도
edu=h12_g6, #교육수준
religion=h12_g11, #종교
dis=h12_g9,#장애수준
marriage=h12_g10, #혼인상태
health=h12_med2, #건강수준
code_job=h12_eco9, #직종
income_1=h12_pers_income1, #상용근로자 소득
income_2=h12_pers_income2, #일용근로자 소득
income_3=h12_pers_income3, #자영업자 소득(농림축어업 외)
income_4=h12_pers_income4, #부업소득
income_5=h12_pers_income5) #농림축어업 소득
income <- coalesce(df.wr.2017$income_1,
df.wr.2017$income_2,
df.wr.2017$income_3,
df.wr.2017$income_4,
df.wr.2017$income_5)
df.wr.2017 <- df.wr.2017[,c("sex", "area", "birth", "edu", "religion", "dis", "marriage", "health", "code_job")]
df.wr.2017 <- cbind(df.wr.2017, income)
df.wr.2017[is.na(df.wr.2017)] <- 0 #결측치 제거
year <- rep(2017, times=nrow(df.wr.2017))
df.wr.2017 <- data.frame(year, df.wr.2017)
##2018년
df.wr.2018 <- raw_welfare.2018
df.wr.2018 <- rename(df.wr.2018,
sex=h13_g3,#성별
area=h13_reg7, #지역코드
birth=h13_g4, #태어난 년도
edu=h13_g6, #교육수준
religion=h13_g11, #종교
dis=h13_g9,#장애수준
marriage=h13_g10, #혼인상태
health=h13_med2, #건강수준
code_job=h13_eco9, #직종
income_1=h13_pers_income1, #상용근로자 소득
income_2=h13_pers_income2, #일용근로자 소득
income_3=h13_pers_income3, #자영업자 소득(농림축어업 외)
income_4=h13_pers_income4, #부업소득
income_5=h13_pers_income5) #농림축어업 소득
income <- coalesce(df.wr.2018$income_1,
df.wr.2018$income_2,
df.wr.2018$income_3,
df.wr.2018$income_4,
df.wr.2018$income_5)
df.wr.2018 <- df.wr.2018[,c("sex", "area", "birth", "edu", "religion", "dis", "marriage", "health", "code_job")]
df.wr.2018 <- cbind(df.wr.2018, income)
df.wr.2018[is.na(df.wr.2018)] <- 0 #결측치 제거
year <- rep(2018, times=nrow(df.wr.2018))
df.wr.2018 <- data.frame(year, df.wr.2018)
##2019년
df.wr.2019 <- raw_welfare.2019
df.wr.2019 <- rename(df.wr.2019,
sex=h14_g3,#성별
area=h14_reg7, #지역코드
birth=h14_g4, #태어난 년도
edu=h14_g6, #교육수준
religion=h14_g11, #종교
dis=h14_g9,#장애수준
marriage=h14_g10, #혼인상태
health=h14_med2, #건강수준
code_job=h14_eco9, #직종
income_1=h14_pers_income1, #상용근로자 소득
income_2=h14_pers_income2, #일용근로자 소득
income_3=h14_pers_income3, #자영업자 소득(농림축어업 외)
income_4=h14_pers_income4, #부업소득
income_5=h14_pers_income5) #농림축어업 소득
income <- coalesce(df.wr.2019$income_1,
df.wr.2019$income_2,
df.wr.2019$income_3,
df.wr.2019$income_4,
df.wr.2019$income_5)
df.wr.2019 <- df.wr.2019[,c("sex", "area", "birth", "edu", "religion", "dis", "marriage", "health", "code_job")]
df.wr.2019 <- cbind(df.wr.2019, income)
df.wr.2019[is.na(df.wr.2019)] <- 0 #결측치 제거
year <- rep(2019, times=nrow(df.wr.2019))
df.wr.2019 <- data.frame(year, df.wr.2019)
|
9cbf9647222bc736df38bc8477cb80c410980892
|
d354983f75228b3aa82eea518f42de95e3fa32f7
|
/functions/colorRampPalette/red-to-blue.R
|
eb5c226f013198f41b64225767bf5b939e91a564
|
[] |
no_license
|
ReneNyffenegger/about-r
|
f1f1d1f6c52f0446207978e78436ccbd91b89d20
|
ae511ae632e1f8827cab91d4e36c1a9349fda5ab
|
refs/heads/master
| 2022-01-14T10:19:45.230836
| 2021-12-27T19:50:37
| 2021-12-27T19:50:37
| 22,269,629
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 110
|
r
|
red-to-blue.R
|
paletteFunc <- colorRampPalette(c('red', 'blue'));
palette <- paletteFunc(8);
barplot(1:8, col=palette);
|
536066e33304eeabb5770000832d8712b62203cb
|
db8eeb68541dba916fa0ab9567fe9199d95bdb6a
|
/man/chargeCalculationGlobal.Rd
|
3043d4277bc76c561fea51d54b9ec72120313a94
|
[] |
no_license
|
alptaciroglu/idpr
|
f26544ffe869854a0fd636fcf7a2fa85a41efeed
|
e5f7838d27fb9ada1b10d6a3f0261a5fa8588908
|
refs/heads/master
| 2023-01-27T20:40:33.186127
| 2020-12-05T21:57:55
| 2020-12-05T21:57:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 6,140
|
rd
|
chargeCalculationGlobal.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chargeCalculations.R
\name{chargeCalculationGlobal}
\alias{chargeCalculationGlobal}
\title{Protein Charge Calculation, Globally}
\usage{
chargeCalculationGlobal(
sequence,
pKaSet = "IPC_protein",
pH = 7,
plotResults = FALSE,
includeTermini = TRUE,
sumTermini = TRUE,
proteinName = NA,
printCitation = FALSE,
...
)
}
\arguments{
\item{sequence}{amino acid sequence as a character string or vector of
individual residues. alternatively, a character string of the path to a
.fasta / .fa file}
\item{pKaSet}{A character string or data frame. "IPC_protein" by default.
Character string to load specific, preloaded pKa sets.
c("EMBOSS", "DTASelect", "Solomons", "Sillero", "Rodwell",
"Lehninger", "Toseland", "Thurlkill", "Nozaki", "Dawson",
"Bjellqvist", "ProMoST", "Vollhardt", "IPC_protein", "IPC_peptide")
Alternatively, the user may supply a custom pKa dataset.
The format must be a data frame where:
Column 1 must be a character vector of residues named "AA" AND
Column 2 must be a numeric vector of pKa values.}
\item{pH}{numeric value, 7.0 by default.
The environmental pH used to calculate residue charge.}
\item{plotResults}{logical value, FALSE by default.
This determines what is returned. If \code{plotResults = FALSE}, a
data frame is returned with the position, residue, and charge (-1 to +1).
If \code{plotResults = TRUE}, a graphical output is returned (ggplot)
showing the charge distribution.}
\item{includeTermini, sumTermini}{Logical values, both TRUE by default. This
determines how the calculation handles the N- and C- terminus.
includeTermini determines if the calculation will use the charge of the
amine and carboxyl groups at the ends of the peptide (When TRUE). These
charges are ignored when \code{includeTermini = FALSE}. sumTermini
determines if the charge of the first (likely Met, therefore uncharged),
and final residue (varies) will be added to the termini charges, or if the
N and C terminus will be returned as separate residues.
When \code{sumTermini = TRUE}, charges are summed. When
\code{sumTermini = FALSE}, the N and C terminus are added as a unique
residue in the DF. This will impact averages by increasing the sequence
length by 2. sumTermini is ignored if \code{includeTermini = FALSE}.}
\item{proteinName}{character string with length = 1.
optional setting to include the name in the plot title.}
\item{printCitation}{Logical value. FALSE by default.
When \code{printCitation = TRUE} the citation for the pKa set is printed.
This allows for the user to easily obtain the dataset citation.
Will not print if there is a custom dataset.}
\item{...}{any additional parameters, especially those for plotting.}
}
\value{
If \code{plotResults = FALSE}, a data frame
is returned with the position, residue, and charge (-1 to +1). If
\code{plotResults = TRUE}, a graphical output is returned (ggplot) showing
the charge distribution.
}
\description{
This function will determine the charge of a peptide using the
Henderson-Hasselbalch Equation. The output is a data frame (default) or q
plot of charge calculations along the peptide sequence. Charges are
determined globally, or along the entire chain.
}
\section{Plot Colors}{
For users who wish to keep a common aesthetic, the following colors are
used when plotResults = TRUE. \cr
\itemize{
\item Dynamic line colors: \itemize{
\item Close to -1 = "#92140C"
\item Close to +1 = "#348AA7"
\item Close to 0 (midpoint) = "grey65" or "#A6A6A6"}}
}
\examples{
#Amino acid sequences can be character strings
aaString <- "ACDEFGHIKLMNPQRSTVWY"
#Amino acid sequences can also be character vectors
aaVector <- c("A", "C", "D", "E", "F",
"G", "H", "I", "K", "L",
"M", "N", "P", "Q", "R",
"S", "T", "V", "W", "Y")
#Alternatively, .fasta files can also be used by providing
#a character string of the path to the file.
exampleDF <- chargeCalculationGlobal(aaString)
head(exampleDF)
exampleDF <- chargeCalculationGlobal(aaVector)
head(exampleDF)
#Changing pKa set or pH used for calculations
exampleDF_pH5 <- chargeCalculationGlobal(aaString,
pH = 5)
head(exampleDF_pH5)
exampleDF_pH7 <- chargeCalculationGlobal(aaString,
pH = 7)
head(exampleDF_pH7)
exampleDF_EMBOSS <- chargeCalculationGlobal(aaString,
pH = 7,
pKa = "EMBOSS")
head(exampleDF_EMBOSS)
#If the termini charge should not be included with includeTermini = F
exampleDF_NoTermini <- chargeCalculationGlobal(aaString,
includeTermini = FALSE)
head(exampleDF_NoTermini)
#and how the termini should be handeled with sumTermini
exampleDF_SumTermini <- chargeCalculationGlobal(aaString,
sumTermini = TRUE)
head(exampleDF_SumTermini)
exampleDF_SepTermini <- chargeCalculationGlobal(aaString,
sumTermini = FALSE)
head(exampleDF_SepTermini)
#plotResults = TRUE will output a ggplot as a line plot
chargeCalculationGlobal(aaString,
plot = TRUE)
#since it is a ggplot, you can change or annotate the plot
gg <- chargeCalculationGlobal(aaVector,
window = 3,
plot = TRUE)
gg <- gg + ggplot2::ylab("Residue Charge")
gg <- gg + ggplot2::geom_text(data = exampleDF,
ggplot2::aes(label = AA,
y = Charge + 0.1))
plot(gg)
#alternativly, you can pass the data frame to sequenceMap()
sequenceMap(sequence = exampleDF$AA,
property = exampleDF$Charge)
}
\seealso{
\code{\link{pKaData}} for residue pKa values and
\code{\link{hendersonHasselbalch}} for charge calculations.
Other charge functions:
\code{\link{chargeCalculationLocal}()},
\code{\link{hendersonHasselbalch}()},
\code{\link{netCharge}()}
}
\concept{charge functions}
|
fd5673e7faf17b89b9b2b5c0b9de046b8262a493
|
875c89121e065a01ffe24d865f549d98463532f8
|
/tests/testthat/test_collectStrays.R
|
a3e63cb38dd6025ee63526c3783b2910f3eb0a0c
|
[] |
no_license
|
hugomflavio/actel
|
ba414a4b16a9c5b4ab61e85d040ec790983fda63
|
2398a01d71c37e615e04607cc538a7c154b79855
|
refs/heads/master
| 2023-05-12T00:09:57.106062
| 2023-05-07T01:30:19
| 2023-05-07T01:30:19
| 190,181,871
| 25
| 6
| null | 2021-03-31T01:47:24
| 2019-06-04T10:42:27
|
R
|
UTF-8
|
R
| false
| false
| 694
|
r
|
test_collectStrays.R
|
skip_on_cran()
tests.home <- getwd()
setwd(tempdir())
test_that("collectStrays work as expected", {
xdet <- list(Test = example.detections[1:5, ])
colnames(xdet[[1]])[1] <- "Timestamp"
collectStrays(input = xdet)
expect_true(file.exists("temp_strays.csv"))
output <- read.csv("temp_strays.csv")
expect_equal(nrow(output), 1)
collectStrays(input = xdet)
output <- read.csv("temp_strays.csv")
expect_equal(nrow(output), 2)
storeStrays()
expect_true(file.exists("stray_tags.csv"))
collectStrays(input = xdet)
storeStrays()
expect_true(file.exists("stray_tags.1.csv"))
file.remove(list.files(pattern = "stray_tags"))
file.remove("temp_strays.csv")
})
# y
# y
setwd(tests.home)
|
0169a227470e89a6f2b6312490a5e25e25b61830
|
83ce22426dd1f7e2cd620b9ba33f13f636b1baec
|
/ODETest.R
|
e06ddc780238aa4b4ad0b5ca12ceacaf022d1a84
|
[] |
no_license
|
rslasater82/weightloss
|
cf17133e29e3a1d0dd9b1d88c06d14bb4d389180
|
02176204c58be8cdc0cb52d0ec9aef223c42348f
|
refs/heads/main
| 2023-01-12T23:01:17.590530
| 2020-11-20T20:15:15
| 2020-11-20T20:15:15
| 300,404,776
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 838
|
r
|
ODETest.R
|
library(deSolve)
parameters <- c(a = -8/3,
b = -10,
c = 28)
state <- c(X = 1,
Y = 1,
Z = 1)
Lorenz <- function(t, state, parameters){
with(as.list(c(state, parameters)), {
#rate of change
dX <- a*X + Y*Z
dY <- b * (Y-Z)
dZ <- -X*Y + c*Y - Z
#return the rate of change
list(c(dX, dY, dZ))
}) # end with (as.list...)
}
times <- seq(0,100, by = 0.01)
out <- ode(y = state,
times = times,
func = Lorenz,
parms = parameters)
head(out)
par(oma = c(0,0,3,0))
plot(out, xlab = "time", ylab = "-")
plot(out[, "X"], out[,"Z"], pch = ".")
mtext(outer=TRUE, side = 3, "Lorenz model", cex = 1.5)
#solve(FFM0 = 10.4*log(F0/x))
C <- function(z){
10.4*log(F0/z)-FFM0
}
z = NULL
f <- function(x) x^2 - 4
uniroot(C, c(0,100))
|
62e9a6781beaaf66e3c9a10fafaae00f7e7795a8
|
c7638d2d2cb0266caa7d13f137018cc9428c6536
|
/R/files.R
|
809148c2c1750ce1c27c7bda182b1feb9365b03f
|
[
"MIT"
] |
permissive
|
cran/batchr
|
74d6ff7646c9a72ea71d5544f3f8fab4b517339c
|
3c9e7fb1098b2f744959432e941e0f042a406d67
|
refs/heads/master
| 2023-08-12T17:21:26.377485
| 2021-10-03T03:10:02
| 2021-10-03T03:10:02
| 340,015,047
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,180
|
r
|
files.R
|
#' Batch Files
#'
#' Gets the names of the files that are remaining to be processed by
#' [batch_run()].
#'
#' [batch_completed()] can be used to test if there are any
#' files remaining.
#'
#' @inheritParams batch_config
#' @inheritParams batch_run
#' @return A character vector of the names of the remaining files.
#' @seealso [batch_process()] and [batch_run()]
#' @export
#' @examples
#' path <- tempdir()
#' write.csv(mtcars, file.path(path, "file1.csv"))
#' batch_config(function(x) TRUE, path, regexp = "[.]csv$")
#' batch_files_remaining(path)
#' batch_run(path, ask = FALSE)
#' batch_files_remaining(path)
#' batch_cleanup(path)
#' unlink(file.path(path, "file1.csv"))
batch_files_remaining <- function(path, failed = FALSE) {
chk_lgl(failed)
config <- batch_config_read(path)
files <- list.files(path, pattern = config$regexp, recursive = config$recurse)
files <- files[file_time(path, files) <= config$time]
if (!length(files) || is.na(failed)) {
return(files)
}
failed_files <- failed_files(path)
failed_files <- intersect(failed_files, files)
if (isTRUE(failed)) {
return(failed_files)
}
files <- setdiff(files, failed_files)
files
}
|
5d0d88c7b0afc731b6b342d5c858c3d630715b64
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/breathtestcore/examples/read_breathid.Rd.R
|
1f2d228d65d401c6d925b6698da343a4d9ea6ea9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 277
|
r
|
read_breathid.Rd.R
|
library(breathtestcore)
### Name: read_breathid
### Title: Read BreathID file
### Aliases: read_breathid
### ** Examples
filename = btcore_file("350_20043_0_GER.txt")
# Show first lines
cat(readLines(filename, n = 10), sep="\n")
#
bid = read_breathid(filename)
str(bid)
|
9ca984b295c772b49d92f1deff2bf46998054d7c
|
e55ffb2edab5f9658f23c46a23b84c78348b99eb
|
/rstudio-ws/Visualizing-of-StationGrid-2014/visualizing-of-ClusterCenters.R
|
53e7510f7e177cab89662cab04ee98331bd39e02
|
[] |
no_license
|
un-knower/hadoop-ws
|
6689dd20fd8818f18cfef7c7aae329017a01b8a9
|
913bbe328a6b2c9c79588f278ed906138d0341eb
|
refs/heads/master
| 2020-03-17T23:15:21.854515
| 2015-04-25T08:09:03
| 2015-04-25T08:09:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,438
|
r
|
visualizing-of-ClusterCenters.R
|
# -----------------------------------------------------------------------------
# 基本图形化展现
# ClusterCenters
# -----------------------------------------------------------------------------
# 运行方法: 在R环境中,使用下面语句
# 修改 中的这两个语句
# dataSetID <- "s01" # s98
# 创建图形输出目录 s01_ClusterCenters
# 执行 - linux版本
# source("~/workspace_github/hadoop-ws/rstudio-ws/Visualizing-of-StationGrid-2014/read-data-of-ClusterCenters.R")
# source("~/workspace_github/hadoop-ws/rstudio-ws/Visualizing-of-StationGrid-2014/visualizing-of-ClusterCenters.R")
# 执行 - windows版本
# source("J:/home/hadoop/workspace_github/hadoop-ws/rstudio-ws/Visualizing-of-StationGrid-2014/read-data-of-ClusterCenters.R")
# source("J:/home/hadoop/workspace_github/hadoop-ws/Visualizing-of-StationGrid-2014/visualizing-of-ClusterCenters.R")
# -----------------------------------------------------------------------------
# 加载包
library(ggplot2)
# -----------------------------------------------------------------------------
# 当前任务名称 , curTaskName
# *****************************************************************************
# 函数定义
# *****************************************************************************
# -----------------------------------------------------------------------------
# 图形名字函数
getImageFile <- function(desc, curTaskName, filetype="pdf", subdir=dataSetID) {
rootFilePathOfImage <- stringr::str_c("output_ClusterCenters_",dataSetID, "/")
fileHead <- paste(rootFilePathOfImage, curTaskName, sep="")
filenameOfImage <- paste(fileHead, desc, filetype, sep=".")
return (filenameOfImage) # 返回值必须加上括号?
}
# 对 ClusterCenters 进行图形化展现
visualizingClusterCenters <- function(fileDataOfClusterCenters, curTaskName) {
cat("-----------------------------------------------------------------------------\n")
cat("\t >>>>> 对ClusterCenters 进行图形化展现 \n")
# -----------------------------------------------------------------------------
curdata <- fileDataOfClusterCenters
#str(curdata)
# -----------------------------------------------------------------------------
# clusterID 及其 数量
org <- fileDataOfClusterCenters[[1]]
curdata <- org[c("clusterID", "counter")]
curdata$clusterID <- as.factor(curdata$clusterID)
#curdata
# ---------------------------
# 每个簇一个折线
rownum <- nrow(org)
for(r in 1:rownum) {
one <- org[r,]
# 横表变纵表
one.v <- melt(one, id = c("clusterID", "counter"), variable_name = "ym")
p <- ggplot(one.v, aes(x=ym, y=value, group=clusterID))
p <- p + xlab("month") + ylab("relative volume per month")
p + geom_line(aes(colour = clusterID))
namePostfix <- paste(curTaskName, r-1, sep="_c")
ggsave(getImageFile("(2.1)簇中心折线图", namePostfix), width = 10, height = 8)
}
# ---------------------------
# ---------------------------
# 基于 y 变量的 value
p <- ggplot(curdata, aes(x=clusterID, y=counter))
p+ geom_bar(stat="identity")
#ggsave("draw-graphys-ggplot2/graphys/s98_m1_k19.geom_bar_counter.pdf", width = 7, height = 6.99)
ggsave(getImageFile("(1.1)geom_bar_counter", curTaskName), width = 10, height = 8)
p <- ggplot(curdata, aes(x=clusterID, y=sqrt(counter)))
p+ geom_bar(stat="identity")
ggsave(getImageFile("(1.2)geom_bar_counter_sqrt", curTaskName), width = 10, height = 8)
p <- ggplot(curdata, aes(x=clusterID, y=sqrt(sqrt(counter))))
p+ geom_bar(stat="identity")
ggsave(getImageFile("(1.3)geom_bart_counter_sqrtsqr", curTaskName), width = 10, height = 8)
# ---------------------------
# 基于 y 变量的 统计次数
# 数据中不能指定 y
p <- ggplot(curdata, aes(x=clusterID))
p+ geom_bar(stat="bin") # p+ geom_bar()
# -----------------------------------------------------------------------------
# 簇中心的月用电量相对比例
vpm.v <- fileDataOfClusterCenters[[2]]
curdata <- vpm.v
curdata$clusterID <- as.factor(curdata$clusterID)
curdata$ym <- ordered(curdata$ym)
#str(curdata)
curdata[c("clusterID","value")]
data.frame(curdata$clusterID, curdata$ym, sqrt(curdata$value))
# ---------------------------
# 折线图
p <- ggplot(curdata, aes(x=ym, y=value, group=clusterID))
#p <- p + xlab("年月") + ylab("簇中心的用电量相对比例") # 中文有问题
p <- p + xlab("month") + ylab("relative volume per month")
p + geom_line()
p + geom_line(aes(colour = clusterID))
#p + geom_line(aes(colour = clusterID, size=clusterID))
#p + geom_line(aes(colour = clusterID, size= as.integer(clusterID) %% 5))
ggsave(getImageFile("(2.1)簇中心折线图", curTaskName), width = 10, height = 8)
p <- ggplot(curdata, aes(x=ym, y=sqrt(value), group=clusterID))
#p <- p + xlab("年月") + ylab("簇中心的用电量") # 中文有问题
p <- p + xlab("month") + ylab("relative volume per month")
p + geom_line(aes(colour = clusterID))
ggsave(getImageFile("(2.2)簇中心折线图_sqrt", curTaskName), width = 10, height = 8)
# ---------------------------
# 线图
p <- ggplot(curdata, aes(factor(ym), value))
p <- p + xlab("年月") + ylab("簇中心的用电量")
p + geom_boxplot()
ggsave(getImageFile("(3.1)geom_boxplot", curTaskName), width = 10, height = 8)
p <- ggplot(curdata, aes(factor(ym), sqrt(value)))
p <- p + xlab("年月") + ylab("簇中心的用电量")
p + geom_boxplot()
ggsave(getImageFile("(3.2)geom_boxplot_sqrt", curTaskName), width = 10, height = 8)
cat("\t 对 对ClusterCenters 进行图形化展现 <<<<< \n")
cat("-----------------------------------------------------------------------------\n")
}
# *****************************************************************************
# 执行可视化
# *****************************************************************************
#library(foreach)
#datasets <- foreach(curdata=datasets, filename=filesVector) %do% {
# onlyname <- strsplit(filename, "\\.")[[1]][1]
#
# onlyname
# visualizingClusterCenters(curdata[[1]],onlyname)
#}
for (i in 1:length(filesVector) ) {
i
dataitem <- datasets[i];
filenameitem <- filesVector[i]
onlyname <- strsplit(filenameitem, "\\.")[[1]][1]
curdata <- dataitem[[1]]
visualizingClusterCenters(curdata,onlyname)
}
|
f636dba9a891b0f50a9edb4e83e4b217b6efc280
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/GpGp/tests/test_loglik.R
|
a5de871d47e45d1494d0108bf93ae257f7f71d89
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,372
|
r
|
test_loglik.R
|
context("Likelihood Functions")
test_that("likelihood approximations are exact when m = n-1", {
n1 <- 12
n2 <- 12
n <- n1*n2
locs <- as.matrix( expand.grid( 1:n1, 1:n2 ) )
ord <- order_maxmin(locs)
locsord <- locs[ord,]
m <- n-1
NNarray <- find_ordered_nn(locsord,m=m)
NNlist <- group_obs(NNarray)
covparms <- c(2,40,0.8,0.01)
y <- fast_Gp_sim(covparms,"matern_isotropic",locsord)
covmat <- matern_isotropic(covparms,locsord)
cholmat <- t(chol(covmat))
logdet_exact <- 2*sum(log(diag(cholmat)))
z <- forwardsolve(cholmat,y)
quadform_exact <- c(crossprod(z))
# ungrouped
ll0 <- vecchia_loglik(covparms,"matern_isotropic",rep(0,n),locsord,NNarray)
logdet_approx <- -2*( ll0 + n/2*log(2*pi) )
ll1 <- vecchia_loglik(covparms,"matern_isotropic",y,locsord,NNarray)
quadform_approx <- -2*( ll1 - ll0 )
expect_equal( logdet_exact, logdet_approx )
expect_equal( quadform_exact, quadform_approx )
# grouped
ll0 <- vecchia_loglik_grouped(covparms,"matern_isotropic",rep(0,n),locsord,NNlist)
logdet_approx <- -2*( ll0 + n/2*log(2*pi) )
ll1 <- vecchia_loglik_grouped(covparms,"matern_isotropic",y,locsord,NNlist)
quadform_approx <- -2*( ll1 - ll0 )
expect_equal( logdet_exact, logdet_approx )
expect_equal( quadform_exact, quadform_approx )
})
|
89817f35b183944f07604d0088044f4c5be327a0
|
2e4675b463ed76047b6875e41e1abd34b48d3692
|
/lib/gdh.db.in/man/getSites.Rd
|
95bda63a7f177b7cf2899453c87b7fefaf3fc589
|
[] |
no_license
|
cerobpm/gdh
|
a8787d479f88c8125a7af233979b2f4d1db10ead
|
57dc8e89771cfe1ce9c151a563aa107068474fc5
|
refs/heads/master
| 2020-03-21T01:42:31.461283
| 2018-06-21T00:35:20
| 2018-06-21T00:35:20
| 137,957,924
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,543
|
rd
|
getSites.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gdhFunctions.R
\name{getSites}
\alias{getSites}
\title{getSites (gdh.db.in package)}
\usage{
getSites(con, siteCode = NULL, featureType = NULL, north = NULL,
south = NULL, east = NULL, west = NULL)
}
\arguments{
\item{con}{Conexión a base de datos (creada con dbConnect)}
\item{siteCode}{Código del sitio (int, opcional)}
\item{featureType}{tipo de objeto espacial ('point' o 'area', opcional)}
\item{north}{coordenada norte del recuadro (opcional)}
\item{south}{coordenada sur del recuadro (opcional)}
\item{east}{coordenada este del recuadro (opcional)}
\item{west}{coordenada oeste del recuadro (opcional)}
}
\description{
Esta función descarga los sitios (features) de la base de datos hidrometeorológica, junto con las
definiciones de series temporales asociados a los mismos. Incluye puntos (featureType='point') y
areas (featureType='area'). Devuelve un data.frame cuyas filas corresponden a los sitios y las
columnas a las propiedades, el cual sirve como parámetro de entrada para la función extractSeriesCatalog
(la propiedad seriesCatalog es un JSON con la definición de las series temporales). Se puede obtener el listado
completo o filtrar por siteCode, featureType o recuadro espacial (north,south,east,west)
}
\examples{
drv<-dbDriver("PostgreSQL")
con<-dbConnect(drv, user="sololectura",host='10.10.9.14',dbname='meteorology')
getSites(con,featureType='point',north=-20,south=-25,east=-55,west=-60)
}
\keyword{getSite}
\keyword{sites}
|
8ee3b7056e7450916aba4ce0dbe58ea94935ec33
|
5ce68155b082c4298bf68c77bbbb668460f16a93
|
/run_analysis.R
|
84b57ebc844ce3ce3028b5247e86287ef7a83736
|
[] |
no_license
|
philiprad/GettingAndCleaningDataCourseProject
|
3d73d484c8bcb6da27cdb41823fd07ea5eea45ed
|
ee58cadc14f9677295b05f754187295508b7ce07
|
refs/heads/master
| 2016-09-05T11:01:44.199744
| 2014-05-25T17:46:17
| 2014-05-25T17:46:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,622
|
r
|
run_analysis.R
|
#Loading test data
X_test<-read.table("X_test.txt",sep="")
y_test<-read.table("y_test.txt",sep="")
subject_test<-read.table("subject_test.txt",sep="")
test.data.set<-cbind(subject_test,X_test,y_test)
#Loading training data
X_train<-read.table("X_train.txt",sep="")
y_train<-read.ta0ble("y_train.txt",sep="")
subject_train<-read.table("subject_train.txt",sep="")
train.data.set<-cbind(subject_train,X_train,y_train)
#Loading features data
features<-read.table("features.txt",sep="",stringsAsFactor=FALSE)
#Merging train and test datasets
data.set<-rbind(test.data.set,train.data.set)
#Naming data.set columns
names(data.set)[2:562]<-features[,2]
names(data.set)[1]<-"subject"
names(data.set)[563]<-"activity"
#Getting mean and standard deviation
tidy.data.set<-data.set[grepl("mean\\(\\)|std\\(\\)|meanFreq", colnames(data.set)) ]
#Adding and naming columns
tidy.data.set<-cbind(data.set[,1],data.set[,563],tidy.data.set)
names(tidy.data.set)[1]<-"subject"
names(tidy.data.set)[2]<-"activity"
#Loading activity label file
act_labels<-read.table("activity_labels.txt",sep="",stringsAsFactor=FALSE)
#Labeling activity column
tidy.data.set$activity <- factor(tidy.data.set$activity,
levels = act_labels$V1,
labels = act_labels$V2)
library(reshape2)
#Reshaping final tidy dataset
tidy.melt<-melt(tidy.data.set,id=c("subject","activity"),measure.vars=names(tidy.data.set[,3:ncol(tidy.data.set)]))
final.tidy.data<-dcast(tidy.melt,subject+activity~variable,mean)
write.table(final.tidy.data,file="tidyDataSet.txt",sep=" ",col.names=TRUE,row.names=FALSE)
|
7a530d59281d36d4367414cae7501c3f8f877bd8
|
fa54716d6e66e4c1b7559a8d3164406bdde5eb0f
|
/kaggle.R
|
50fb7e4490e1aaad9b05c1fa7427423a3abf770b
|
[] |
no_license
|
shrilekha17/Titanic_data_set
|
8100c1085e637ba83a7512be567ff57c7456bac8
|
df2c2d75b93848af4c1705dd2cb45bd50056a61a
|
refs/heads/master
| 2020-03-20T22:38:29.781446
| 2018-06-18T21:23:21
| 2018-06-18T21:23:21
| 137,807,983
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,121
|
r
|
kaggle.R
|
read.csv("/Users/shrilekha/Desktop/Kaggle/Titanic/train.csv", header = TRUE)
train = read.table("/Users/shrilekha/Desktop/Kaggle/Titanic/train.csv", header = TRUE, sep = ",")
train
test = read.table("/Users/shrilekha/Desktop/Kaggle/Titanic/test.csv", header = TRUE, sep = ",")
test
head(train)
str(train)
install.packages("ggplot")
install.library(ggplot)
install.packages("dplyr")
install.library(dplyr)
install.packages("ggthemes")
install.library(ggthemes)
library(scales)
#pclass = ticketclass: 1, 2 and 3 class
#SibSp = # of siblings / spouses aboard the Titanic
#Parch = # of parents/children aboard the Titanic, if parch = 0 then those children travel with nanny only
# Cabin = cabi number
#Port of Embarkation; key:C = Cherbourg,Q = Queenstown,S = Southampton
dim(train)
dim(test)
attach(train)
detach(train)
combine = bind_rows(train, test)
combine
md.pattern(combine)
bind_rows
dim(combine)
head(combine)
dim(combine)
combine[891:1309,]
##R has subsituted all unavailable survival value as NA in Combine table
str(combine)
## Feature Engineering
## We can use families surname in predition. It will help us to know that whether the family members
##have survived together or not. Or were they together during the accident time.
combine$Name
combine$Title <- gsub('(.*, )|(\\..*)', '', combine$Name) ##(Did not get that)
combine$Title
table(combine$Sex, combine$Title)
Rare = c("Capt", "Col", "Don", "Dona", "Dr", "Jonkheer", "Lady", "Major", "Rev", "Sir", "the Countess")
Rare
combine$Title[combine$Title == 'Mlle'] <- 'Miss'
combine$Title[combine$Title == 'Ms'] <- 'Miss'
combine$Title[combine$Title == 'Mme'] <- 'Mrs'
combine$Title[combine$Title %in% Rare] <- 'Rare Title'
table(combine$Sex, combine$Title)
combine$Surname <- sapply(combine$Name,
function(x) strsplit(x, split = '[,.]')[[1]][1])
combine$Surname
cat(paste( unique(Surname),nlevels(factor(Surname))))
combine$Family_size <- combine$SibSp + combine$Parch + 1
combine$Family_size
combine$Family <- paste(combine$Surname, combine$Family_size, sep='_')
combine$Family
ggplot(combine[1:891,], aes(x= Family_size, fill = factor(Survived))) + geom_bar(stat = 'count', position = 'dodge')
+labs(x ='Family Size')
##Discretized family size
combine$Family_sizeD[combine$Family_size==1] = 'single'
combine$Family_sizeD[combine$Family_size < 4 & combine$Family_size >1] = 'medium'
combine$Family_sizeD[combine$Family_size >= 4] = 'large'
combine$Family_sizeD
mosaicplot(table(combine$Family_sizeD, combine$Survived), main='Family Size by Survival', shade=TRUE)
## Small families survived compared to the single and large families
combine$Cabin[1:28]
strsplit(combine$Cabin[2], NULL)[[1]]
##Creation of Deck Variable:
combine$Deck = sapply(combine$Cabin, function(x) strsplit(x,NULL)[1][1])
combine[28,]
##Value Imputation
cat(paste(combine[c(62, 830), 'Fare'][[1]][1], combine[c(62,830),'Fare'][[1]][2],
combine[c(62, 830), 'Pclass'][[1]][1], combine[c(62,830),'Pclass'][[1]][2]))
embark_fare = combine%>%
filter(combine$PassengerId != 62 & combine$PassengerId != 830)
embark_fare
head(embark_fare)
combine$Embarked[c(62, 830)]= 'C'
combine[1044,]
ggplot(combine[combine$Pclass == '3' & combine$Embarked == 'S', ],
aes(x = Fare)) +
geom_density(fill = 'pink') +
geom_vline(aes(xintercept=median(Fare, na.rm=T)),
colour='red', linetype='dashed', lwd=0.5)
combine$Fare[1044]
combine$Fare[1044] <- median(combine[combine$Pclass=='3' & combine$Embarked=='S',]$Fare, na.rm = TRUE)
combine$Fare[1044]
table(is.na(combine$Age))
factor_vars <- c('PassengerId','Pclass','Sex','Embarked',
'Title','Surname','Family','Family_sizeD')
combine[factor_vars] <- lapply(combine[factor_vars], function(x) as.factor(x))
combine[factor_vars]
names(combine[factor_vars])
names(combine)
ss = combine[,-c(1,3,5,12,13,14,16,17,4,9,11,18,15)]
ss
library(rpart)
install.packages("mice")
install.library(mice)
install.packages("Hmisc")
install.library(Hmisc)
set.seed(200)
factor(combine$Survived)
is.factor(combine$Survived)
# install.packages("Amelia")
# libraamelia_fit <- amelia(ss, m=5, parallel = "multicore")
# md.pattern(combine)
# # ?amelia
mice_mod <- mice(combine[, !names(combine) %in% c('PassengerId', 'Pclass', 'Sex', 'Embarked', 'Title', 'Surname', 'Family', 'Family_sizeD')], method='rf')
#
output = complete(mice_mod)
output
par(mfrow=c(1,2))
hist(combine$Age, freq=F, main='Age: Original Data',
col='pink', ylim=c(0,0.04))
hist(output$Age, freq=F, main='Age: MICE Output',
col='lightgreen', ylim=c(0,0.04))
summary(output$Age)
summary(combine$Age)
combine$Age = output$Age
combine$Age
summary(combine$Age)
sum(is.na(combine$Age))
install.packages("stringi",dependencies = TRUE )
library(stringi)
install.packages("devtools")
library(devtools)
# First we'll look at the relationship between age & survival
ggplot(combine[1:891,], aes(Age, fill = factor(Survived))) +
geom_histogram( + theme_classic()
?ggplot
?facet_grid
combine$child[combine$Age < 18] = 'child'
combine$adult[combine$Age>= 18] ='adult'
table(combine$child, combine$Survived)
table(combine$adult, combine$Survived)
combine$Mother[combine$Sex =='female' & combine$Parch > 0 & combine$Age > 18 & combine$Title !='Miss'] = 'Mother'
table(combine$Survived,combine$Mother)
combine$child = factor(combine$child)
combine$Mother = factor(combine$Mother)
is.factor(combine$Mother)
md.pattern(combine)
train_titanic <- combine[1:891,]
dim(train)
test_titanic <- combine[892:1309,]
install.packages("e1071")
library(e1071)
# Naive_bayes =naiveBayes(combine$Survived ~., data=train)
# Naive_bayes
# set.seed(754)
# rf_model <- randomForest(factor(Survived) ~ Pclass + Sex + Age + SibSp + Parch +
# Fare + Embarked + Title +
# child + Mother,
# data = train)
#
#
# combine$Mother[combine$Mother==NA] = 'NO'
# table(combine$Survived, combine$Mother)
combine$Survived= factor(combine$Survived)
is.factor(combine$Survived)
contrasts(combine$Survived)
# test_titanic$Survived[test_titanic$Survived==1] ='yes'
# test_titanic$Survived[test_titanic$Survived==0]= 'no'
# contrasts(test_titanic$Survived)
# test_titanic$Survived=factor(test_titanic$Survived)
log_fit<-glm(Survived~Sex+ Age+Parch+Title+Embarked + SibSp+ Pclass,data = train_titanic, family="binomial")
log_fit
predicted_log_prob<-predict(log_fit,data=train_titanic, type="response")
predicted_log_fit
predicted_log_fit<-ifelse(predicted_log_prob>0.5,"1","0")
predicted_log_fit
mean(train_titanic$Survived==predicted_log_fit) #fraction of data that is correctly predicted
1-mean( train_titanic$Survived == predicted_log_fit)
log_fit_test<-glm(factor(Survived)~Sex+ Age+Parch+Embarked + SibSp+ Pclass,data = test_titanic, family="binomial")
log_fit_test
predicted_log_prob_test<-predict(log_fit,data=test_titanic, type="response")
predicted_log_prob_test
predicted_log_fit_test<-ifelse(predicted_log_prob>0.5,"1","0")
predicted_log_fit_test
|
5a305ee4064bef1b758f15a39b412528ea110683
|
6cc2ba52d7fc77cb9c105397d85b32b8ca90e00a
|
/Tecan/helpers/plates_helpers.R
|
57d428574984de4c8f12a33f587829b0b3db6946
|
[] |
no_license
|
Ploulack/HB
|
dd8abea825a1fc653d14062ab8481d3ed9eaca1e
|
9f8fb6fcbdad2b341bcd39bd9256d5ed5e2ab4b2
|
refs/heads/master
| 2021-09-15T09:04:23.999913
| 2018-04-06T16:15:44
| 2018-04-06T16:15:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,745
|
r
|
plates_helpers.R
|
repeat_parts <- function(part_keys, n) {
map2(part_keys, n, ~ rep(.x,.y)) %>%
as_vector()
}
parts_to_vector <- function(parts) {
repeat_parts(parts$part, parts$n_pcr)
}
primers_to_vector_repeated <- function(parts) {
c(repeat_parts(parts$l_primer, parts$n_pcr),
repeat_parts(parts$r_primer, parts$n_pcr))
}
build_plate <- function(elements) {
k <- 1
plate <- matrix(nrow = 8, ncol = 12)
for (i in 1:12) {
for (j in 1:8) {
plate[j, i] <- if (k <= length(elements)) elements[k] else ""
k <- k + 1
}
}
return(plate %>% as_tibble())
}
generate_plates <- function(parts) {
list(Templates = parts$key %>%
build_plate(),
Primers = c(parts$l_primer, parts$r_primer) %>%
build_plate()
)
}
generate_96_pos <- function() {
map(1:12, function(x) {
map_chr(LETTERS[1:8], ~ paste0(.x,x))
}) %>%
unlist()
}
generate_48_pos <- function() {
map(LETTERS[1:6], function(x) {
map_chr(1:8, ~ paste0(.x, x))
}) %>%
unlist()
}
#Matches A-Z, AA-AZ, etc.. to 96 plates positions
letter_to_96_pos <- function(letter) {
l <- str_length(letter)
pos <- which(LETTERS == str_sub(letter, l, l)) + 26 * (l - 1)
generate_96_pos()[pos]
}
plate_sort_sample <- function(plate) {
assert_all_are_true("Sample" %in% names(plate))
plate %>%
arrange(as.integer(str_extract(Sample, "\\d+")),
str_extract(Sample, "[A-Z]")
)
}
|
6c2495135fab06f260cd3c980d58566708630290
|
aa2a544ee1dbdc89b96ea937b3370884e604f7bd
|
/man/lookup.enm.Rd
|
d441479e2b22414f1481d124c25fe7d38c33ff37
|
[] |
no_license
|
jamiemkass/ENMeval
|
dae21510cf7978ff7a6c446b98db310a86afa2a8
|
199bf0181716b25ea5033be16ed8c6efadcfbd95
|
refs/heads/master
| 2023-08-15T03:42:15.250740
| 2023-01-09T10:47:05
| 2023-01-09T10:47:05
| 29,864,043
| 16
| 13
| null | 2023-06-21T14:31:07
| 2015-01-26T14:18:11
|
R
|
UTF-8
|
R
| false
| true
| 359
|
rd
|
lookup.enm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{lookup.enm}
\alias{lookup.enm}
\title{Look up ENMdetails abject}
\usage{
lookup.enm(algorithm)
}
\arguments{
\item{algorithm}{character: algorithm name (must be implemented as ENMdetails object)}
}
\description{
Internal function to look up ENMdetails objects.
}
|
3c60b8866727e7f5512da8b983f765362f40dcaf
|
eeb5ad87d1cdd86dfedbb969a67b58d46dd05098
|
/raw_please_ignore_this_folder/59.R
|
8eae2c9606a1a99fed39cc8b93f7f7a70fe74b26
|
[
"MIT"
] |
permissive
|
KathrinBusch/16S-AmpliconCorePipeline
|
a222942e542639fa961face68da1f4687d700392
|
6cda7a2dafb4ad5de03894c8a7cbd0dcf0c8d11a
|
refs/heads/master
| 2023-04-07T05:13:52.250618
| 2022-09-03T19:39:43
| 2022-09-03T19:39:43
| 292,565,762
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 255
|
r
|
59.R
|
R
phy = read.table(file ="phylum_relabund.txt", header = F, row.names=1)
names(phy) = phy[1,] [phy[1,] !="phylum"]
for(i in 1:ncol(phy)){
write.table(phy[,i],row.names = row.names(phy), col.names =F,file=paste0(names(phy)[i],".txt"))
}
quit("no")
|
990d28b0a68de61b78dd007c4d996e70ae2a681e
|
b35ad89464853d9b34fe6837aee196a70c943fd8
|
/Revision V01V02 Combined Redo.R
|
c90dba6abedc0ada1504a00234eebc8549c00b58
|
[] |
no_license
|
dijunrui/ScratchSleepMethodPaperQC
|
6f36bcf36ecbd2cb2173b37083d0e7c2d9446bf1
|
46429a73c9812a7c91c1ce9c0cd836e54de2f008
|
refs/heads/master
| 2023-01-08T16:31:12.939479
| 2020-11-04T00:39:07
| 2020-11-04T00:39:07
| 288,811,407
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,841
|
r
|
Revision V01V02 Combined Redo.R
|
#######################################################
## Amendment to the analyses for understanding ##
## correlation between prediction and video ##
## for both V01 and V02 ##
## Di, Junrui 10/23/2020 ##
#######################################################
## Originally, when comparing scratchpy prediction v.s. videography ground truth
## for both V01 V02, observations were treated independently. This needs to be
## fixed to appropriately address the within-subject correlation.
## 1. For correlation, a MMRM was fitted and r.squaredGLMM() were used
## 2. For SD of difference (limits for BA plot), a MMRM was used
rm(list = ls())
library(sas7bdat)
library(nlme)
library(MuMIn)
setwd("~/Pfizer/DMTI CT-44 Scratch Sleep Team - Scratch and Sleep Methods Paper/QCed Plots/") ## Location where created plots are saved
source("scripts/Utils Function.R")
files.dir = "~/OneDrive - Pfizer/SQUAD Study Programming Analysis Tables Quanticate/Quanticate Derived Datasets and Documentation/Training Set Output Datasets and Documentation/"
## We create a dictionary here to loop over file names with corresponding variable names,
## wrists, and figure main titles, log transformation indicator, and where to round.
data_dictionary = tibble(
scatterfiles = paste0("f_15_2_7_2_8_",3:4),
bafiles = paste0("f_15_2_7_2_7_",3:4),
titles = c("Scratch Events (Log Transformed)","Scratch Duration (Log Transformed)")
)
FigNames = NULL
for(i in 1:2){
dat1 = read.sas7bdat(paste0(files.dir, data_dictionary$scatterfiles[i],".sas7bdat")) %>%
na.omit() %>% select(subject, avisitn, acc, vid)
dat2 = read.sas7bdat(paste0(files.dir, data_dictionary$bafiles[i],".sas7bdat")) %>%
na.omit() %>% select(subject, avisitn, diff,mean)
dat = merge(x = dat1, y = dat2) %>% na.omit() %>% mutate(avisitn = as.factor(avisitn))
lo = round_any(min(c(dat$acc,dat$vid), na.rm = T),1,f = floor)
up = round_any(max(c(dat$acc,dat$vid), na.rm = T),1, f = ceiling)
cl = c(alphablend("black",0.4), alphablend("brown",0.4))[as.factor(dat$avisitn)]
pc = c(20,18)[as.factor(dat$avisitn)]
## Codes to generate limits for difference while taking into account
## repeated measures
mixd_model1 = lme(diff ~ 1, random = ~1|subject, data = dat)
var_intercept = as.numeric(getVarCov(mixd_model1))
var_residual = (mixd_model1$sigma)^2
sd_adjust = sqrt(var_intercept + var_residual)
mean_adjust = mixd_model1$coefficients$fixed[1]
print(data_dictionary$titles[i])
print(paste0("Variance of Intercept: ", var_intercept, ", Variance of Residual: ", var_residual, ", Adjusted SD: ", sd_adjust,
"Adjusted Mean: ", mean_adjust))
## Codes to generated r while taking into account repeated measures ## unstructured,
mixed_model2 = lme(acc ~ vid + avisitn, random = ~avisitn|subject, data = dat)
r = sqrt(r.squaredGLMM(mixed_model2)[1]) ## marginal r^2 from mixed effects model
pvalue = anova(mixed_model2)[2,4] ##
sig_inf = symnum(pvalue, corr = FALSE, na = FALSE,
cutpoints = c(0, 0.001, 0.01, 0.05, 0.1, 1), symbols = c("***",
"**", "*", ".", " "))
r_sig = paste0("r = ", round(r,2), " ", sig_inf)
n_size = paste(paste0(c("V01: n = ", "V02: n = "),table(dat$avisitn)), collapse = ",")
print(paste0("Model Adjusted GLMM R: ", r, ", P values = ", pvalue))
print("____________________________________________________")
png(file = paste0("results/revision/ScratchValidation_", data_dictionary$titles[i],".png"),
width = 18, height = 7,units = "in", res = 300)
par(mfrow = c(1,2))
par(mar = c(4,6,4,4))
plot(dat$vid, dat$acc, main = data_dictionary$titles[i], xlab = "Video Annotation",ylab = "ScratchPy Prediction",
xlim = c(lo, up), ylim = c(lo, up), cex = 4, col = cl,
pch = pc, cex.lab = 2, cex.axis = 1.8, cex.main = 2)
legend("topleft", legend = paste0(r_sig, "\n", n_size), cex = 2, bty = "n")
legend("bottomright",legend = c("Identity","Regression"),lty = c(1,1), bty = "n",
col = c("black","red"), lwd = c(2,3),cex = 1.8)
abline(lm(acc~vid, data = dat), col = "red", lty = 1, lwd = 3)
abline(a = 0, b = 1, lty = 1, lwd = 2)
legend("bottomleft",legend = c("V01","V02"),pch = c(20,18), bty = "n",
col = c("#00000066", "#A52A2A66"),cex = 2.8)
BAplot2(ave = dat$mean, dif = dat$diff, mean.diffs = mean_adjust, sd_diff = sd_adjust,var1 = "ScratchPy Prediction",
var2 = "Video Annotation",title = data_dictionary$titles[i],bar = 1,
group = dat$avisitn)
dev.off()
FigNames = c(FigNames,paste0("results/ScratchValidation_", data_dictionary$titles[i],"_",data_dictionary$types[i],".png"))
}
|
ab567cfe6784426b3db9e80657c3e9da7c3c9258
|
b4cf1178b97f1f747f1c6cd8469bb24e1fd8c6e5
|
/man/app_vistributions.Rd
|
07adb3c4f2d7f225e3e35b98e4defedb306e22ec
|
[
"MIT"
] |
permissive
|
benitezrcamilo/xplorerr
|
c0ddc8bd984354db6b6bd3c2f4074a25cb9640d4
|
37c2a74b52760cc5d383d9b7b64f9175a04c566f
|
refs/heads/master
| 2023-04-29T06:18:03.612353
| 2021-05-21T08:36:50
| 2021-05-21T08:36:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 315
|
rd
|
app_vistributions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/launch.R
\name{app_vistributions}
\alias{app_vistributions}
\title{Visualize distributions}
\usage{
app_vistributions()
}
\description{
Launches app for visualizing probability distributions.
}
\examples{
\dontrun{
app_descriptive()
}
}
|
99e9de16bd81e4c6454d6efcc30f339c28b2f860
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/PolyPatEx/R/potentialFatherCounts.R
|
0cd519d7fe00523b18b13ac8872b5c5de4f0fd98
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,062
|
r
|
potentialFatherCounts.R
|
## Function potentialFatherCounts
##
##' Count the number of potential fathers detected for each progeny.
##'
##' Given the output from \code{\link{genotPPE}} or
##' \code{\link{phenotPPE}}, \code{potentialFatherCounts} returns,
##' for each progeny, the number of candidates that are identified as
##' potential fathers.
##'
##' To decide whether a given candidate is a potential father to a
##' given progeny, \code{potentialFatherCounts} uses the quantities
##' FLCount (the number of loci at which a candidate can provide a
##' gamete compatible with the progeny) and VLTotal (the number of
##' loci at which a valid comparison was possible - \sQuote{valid}
##' loci) that are returned by \code{\link{genotPPE}} or
##' \code{\link{phenotPPE}}.
##'
##' For a candidate to be identified as a potential father of a
##' progeny, there are two criteria to be met:
##' \enumerate{
##' \item \code{VLTotal >= max(VLTMin,mismatches+1)},
##' \item \code{FLCount >= VLTotal-mismatches}.
##' }
##' Here, \code{VLTmin} and \code{mismatches} are user-specified
##' parameters. \code{VLTmin} allows the user to ensure that a
##' candidate is only considered for potential fatherhood if a
##' sufficient number of valid loci were available for comparison.
##' \code{mismatches} allows the user to specify a maximum number of
##' allowed mismatching loci between progeny and candidate, before
##' the candidate is rejected as a potential father. Hence the user
##' may wish to relax the condition that ALL valid loci must match for
##' a candidate to be regarded as a potential father to a progeny.
##'
##' @title Count potential fathers
##' @param dataset list: a list structure previously output from
##' \code{\link{genotPPE}} or \code{\link{phenotPPE}}.
##' @param mismatches integer: the maximum allowed number of
##' mismatching loci between candidate and progeny, before the
##' candidate is rejected as a potential father.
##' @param VLTMin integer: the minimum number of \sQuote{valid} loci
##' (loci at which a valid progeny-candidate comparison was possible)
##' required for a candidate to be considered as a potential father.
##' @return A data frame, containing columns \code{Progeny} (progeny
##' id), \code{Mother} (id of the progeny's mother) and
##' \code{potentialFatherCount} (the number of potential fathers found
##' for the given progeny, given the criteria described above).
##' @author Alexander Zwart (alec.zwart at csiro.au)
##' @export
##' @examples
##'
##' ## Using the example dataset 'FR_Genotype':
##' data(FR_Genotype)
##'
##' ## Since we did not load this dataset using inputData(), we must
##' ## first process it with preprocessData() before doing anything
##' ## else:
##' gData <- preprocessData(FR_Genotype,
##' numLoci=7,
##' ploidy=4,
##' dataType="genotype",
##' dioecious=TRUE,
##' mothersOnly=TRUE)
##'
##' head(gData) ## Checked and Cleaned version of FR_Genotype
##'
##' gPPE <- genotPPE(gData) ## Perform the exclusion analyses
##'
##' ## Obtain counts of potential fathers of each seedling, allowing a
##' ## single allele mismatch:
##' pFC <- potentialFatherCounts(gPPE,mismatches=1,VLTMin=2)
##'
##' ## pFC can be viewed or written to file via, e.g. write.csv()
##'
potentialFatherCounts <- function(dataset,mismatches=0,VLTMin=1) {
##
checkForValidPPEOutputObj(dataset)
##
progenyMothers <- attr(dataset$progenyTables$progenyStatusTable,
"progenyMothers")
pFC <- apply(with(dataset$adultTables,
VLTotal >= max(VLTMin,mismatches+1) & ##Note the constraint...
(FLCount >= VLTotal-mismatches)),
1,
function(vv){sum(vv,na.rm=TRUE)})
return(data.frame(Progeny=rownames(dataset$progenyTables$progenyStatusTable),
Mother=progenyMothers,
potentialFatherCount=pFC))
}
|
ccedafe76fb6b5642c9a1465355935a191ebe417
|
fdc70ccba8006e91d9d39b30eaa028c0778055af
|
/coronaVis.R
|
e4a8cab6c37a5b09600e8156885688deea4d7776
|
[] |
no_license
|
muammara/corona
|
56cbfc42253c303fcd53ebe44b5918b070f70300
|
d3baac52930c6f3171d8d236019c2bf8f73e835e
|
refs/heads/master
| 2021-04-22T01:15:41.568104
| 2020-03-24T23:52:43
| 2020-03-24T23:52:43
| 249,838,074
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 740
|
r
|
coronaVis.R
|
install.packages("googleVis")
library(googleVis)
wd='C:/Users/muamma/Documents/python/Coronavirus'
setwd(wd)
flagfilename<-'coronafilename' #Storing the name of the latest file
filename<- readChar(flagfilename, file.info(flagfilename)$size)
print(filename)
fieldsformat=c("numeric", "character","factor","factor","factor","numeric","numeric","factor","factor","numeric","numeric")
C<-read.csv(filename,sep=",",colClasses=fieldsformat)
C$DateRep <- as.Date(C$DateRep,format="%Y-%m-%d")
C2 <-gvisMotionChart(data=C,idvar="Countries.and.territories" ,xvar = "TotalDeath", yvar = "TotalCases",
sizevar = "TotalDeath",timevar="DateRep",options=list(width=1200,height=600))
plot(C2)
cat(C2$html$chart, file="tmp.html")
|
13278fc2ffffee8c7722a91a1e1727b6df82085c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/radiant.model/examples/predict.crtree.Rd.R
|
79bf7224b940856d137728748cc79250492fa03f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 384
|
r
|
predict.crtree.Rd.R
|
library(radiant.model)
### Name: predict.crtree
### Title: Predict method for the crtree function
### Aliases: predict.crtree
### ** Examples
result <- crtree(titanic, "survived", c("pclass", "sex"), lev = "Yes")
predict(result, pred_cmd = "pclass = levels(pclass)")
result <- crtree(titanic, "survived", "pclass", lev = "Yes")
predict(result, pred_data = titanic) %>% head()
|
fa789fb1fbfb70fa19cffc0994bbab57ce54b6d5
|
91294be1f45be0ebe4e588866decab350e7e59a7
|
/RemoteSensingScripts/CrappyTerainaScav.R
|
217ea31bcc8627c57cdc3986d3854c0f4da12eb8
|
[] |
no_license
|
Zheng261/CrabitatResearch
|
6530f5bbc9df8b6406addcbbf48ed7b798c025fd
|
769c00061088638a9b8d581311eb4e0db7b79ff6
|
refs/heads/master
| 2021-06-24T02:27:57.075776
| 2019-05-25T10:52:57
| 2019-05-25T10:52:57
| 140,462,119
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,189
|
r
|
CrappyTerainaScav.R
|
setwd("/volumes/Seagate 4tb/Pacific-islands-planet-imagery")
library(glcm)
library(imager)
library(tmap)
testimg <- brick("TerainaClipped.tif")
plot(subset(testimg,4))
names(testimg) <- c("Blue","Green","Red","IR")
testimg <- subset(testimg, order(c(3,2,1,4)))
plotRGB(testimg,stretch="lin")
#palmyraimg
####### GLCM ON GRAYSCALE ########
#testimgbw <- (testimg$Red + testimg$Green + testimg$Blue ) / 3
#glcmtestimg <- glcm(testimgbw ,window=c(5,5))
#names(glcmtestimg) <- paste("GrayScale.",names(glcmtestimg))
#glcmtestimg <- dropLayer(glcmtestimg,8)
#testimg <- addLayer(testimg,glcmtestimg)
####### GLCM ON RED ########
glcmtestimg <- glcm(testimg$Red ,window=c(11,11))
names(glcmtestimg) <- paste("Red.",names(glcmtestimg))
glcmtestimg <- dropLayer(glcmtestimg,8)
testimg <- addLayer(testimg,glcmtestimg)
####### GLCM ON GREEN ########
glcmtestimg <- glcm(testimg$Green ,window=c(11,11))
names(glcmtestimg) <- paste("Green.",names(glcmtestimg))
glcmtestimg <- dropLayer(glcmtestimg,8)
testimg <- addLayer(testimg,glcmtestimg)
####### GLCM ON BLUE########
glcmtestimg <- glcm(testimg$Blue,window=c(11,11))
names(glcmtestimg) <- paste("Blue.",names(glcmtestimg))
glcmtestimg <- dropLayer(glcmtestimg,8)
testimg <- addLayer(testimg,glcmtestimg)
####### GLCM ON INFRARED ########
glcmtestimg <- glcm(testimg$IR,window=c(11,11))
names(glcmtestimg) <- paste("IR.",names(glcmtestimg))
glcmtestimg <- dropLayer(glcmtestimg,8)
testimg <- addLayer(testimg,glcmtestimg)
#writeRaster(testimg,"8-24-11x11wateryTeraina.tif",overwrite=TRUE)
############# WATER MASKING ###############
crappyTrainingData <- readOGR(dsn = "TerainaTrainingScav.shp", layer = "TerainaTrainingScav")
#This shapefile for some reason was saved with the wrong CRS refernece - QGIS for some reason didn't convert any points before setting the CRS.
proj4string(crappyTrainingData) <- CRS("+proj=utm +zone=4 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
#crappyTrainingData = spTransform(crappyTrainingData,"+proj=utm +zone=4 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
#plot(crappyTrainingData,add=TRUE,col="red")
#You get a warning here for some NA points. I'm not sure why this happens but I just remove them and then it works fine. Perhaps one of the points
#was accidentally classified as water and removed.
dataSet <- as.data.frame(extract(testimg, crappyTrainingData))
crappyTrainingData@data = cbind(crappyTrainingData@data,crappyTrainingData@data=="4")
colnames(crappyTrainingData@data) <- c("Class","isWater")
crappyTrainingData@data = data.frame(crappyTrainingData@data, dataSet[match(rownames(crappyTrainingData@data), rownames(dataSet)),])
## Removes NAs
crappyTrainingData@data = crappyTrainingData@data[complete.cases(crappyTrainingData@data),]
### Classify based on bands: RGB, IR,
#rf.mdl.mask <- randomForest(x=crappyTrainingData@data[,c(3:6,28:34)], y=as.factor(crappyTrainingData@data[,"isWater"]), ntree=200, importance=TRUE, progress="window")
# Classify the image with the above RF model that targets only LAND vs WATER
#crappyLandvWater = predict(testimg, rf.mdl.mask, filename="8.20-MaskForTeraina.tif", type="response", index=1, na.rm=TRUE, progress="window", overwrite=TRUE)
#plot(crappyLandvWater)
#varImpPlot(rf.mdl.mask, sort=TRUE, type=2, scale=TRUE)
#View(importance(rf.mdl))
crappyLandvWater = raster("8.20-MaskForTeraina.tif")
#filename="8.20-WaterMaskedTeraina.tif"
#This kind of takes forever and idk why
crappyLandOnly = raster::mask(testimg,crappyLandvWater,filename="8.22-11x11WaterTrimmedTeraina.tif",maskvalue=2,updatevalue=NA,overwrite=TRUE)
names(crappyLandOnly) <- names(testimg)
plotRGB(crappyLandOnly,r=1,b=2,g=3,stretch="hist")
#subset(crappyLandOnly,1)
#dev.off()
############# START TRAINING FOREST TYPES ###############
#trainingData <- readOGR(dsn = "/volumes/Seagate 4tb/Palmyra Remote Sensing/palmyra-2016-truthing-points-v2.shp", layer = "palmyra-2016-truthing-points-v2")
#trainingData <- trainingData[,-1] #removing some extra coordinate columns...
#trainingData <- trainingData[,-1]
#trainingData = spTransform(trainingData,"+proj=utm +zone=3 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
crappyTrainingData.2 <- subset(crappyTrainingData, isWater == FALSE)
#plot(crappyTrainingData.2,add=TRUE,cex=0.3)
#subset(trainingData.2@data,landcover==5)
#Reads in info containing how important each band is in determining accuracy
bandOrderInfo <- read.csv("8.20OrderOfImportanceTerainaBands.csv")
#Try out different mtry values?
rf.mdl <-randomForest(x=crappyTrainingData.2@data[,as.character(bandOrderInfo[c(1:24),1])],y=as.factor(droplevels(crappyTrainingData.2@data[,"Class"])),ntree=2000,na.action=na.omit, importance=TRUE, progress="window")
# Check error convergence. These "Out of bag" errors are a built in feature of random forest that tells you roughly how well your algorithm is doing
plot(rf.mdl, main="Out-of-bag errors for 16-feature RF model")#, xlab="Number of trees grown", ylab="OOB error")
crappyForestPred <- predict(crappyLandOnly , rf.mdl, type="response", filename="8.21-25x25ClassifiedTerainaScaevola.tif",index=1, na.rm=TRUE, progress="window", overwrite=TRUE)
plot(crappyForestPred)
varImpPlot(rf.mdl, sort=TRUE, type=2, scale=TRUE)
#View(importance(rf.mdl))
# Here I like to average together the MDA and MDG accuracy scores and use that ranking as my new basis for feature selection
var.score <- data.frame(importance(rf.mdl)[,5],importance(rf.mdl)[,6]) # make new dataframe to combine mda and mdg scores
var.score$mdarank <- rank(var.score$importance.rf.mdl....5.)
var.score$mdgrank <- rank(var.score$importance.rf.mdl....6.)
var.score$avgrank <- ( var.score$mdarank + var.score$mdgrank ) / 2
var.score = var.score[order(var.score$avgrank,decreasing=TRUE),]
View(var.score) # Higher ranking is better
#Checks how the important bands change as we expand GLCM radius
#rownames(var.score)[1:20]%in%as.character(bandOrderInfo[,1])[1:20]
#write.csv(var.score,"8.20OrderOfImportanceTerainaBands.csv")
##### Calculates confusion matrix - OOB #######
# OOB calculation
nvariables = 4
conf <- rf.mdl$confusion
conf <- data.frame(conf)
conf$Accuracy = 0
conf$Precision = 0
colnames(conf) = c("Cocos","Native Trees","Scaevola","Sand/Infrastructure", "Error", "Accuracy", "Precision")
rownames(conf) = c("Cocos","Native Trees","Scaevola","Sand/Infrastructure")
for (i in 1:nrow(conf)) {
numSamples = 0
for (j in 1:nvariables) {
numSamples = numSamples + conf[i,j]
}
conf$Accuracy[i] = conf[i,i]/numSamples
conf$Precision[i] = conf[i,i]/sum(conf[,i])
}
View(conf)
mean(conf$Accuracy)
mean(conf$Precision)
## PlanetScope Data ##
allLocations = data.frame(matrix(ncol=4,nrow=1))
colnames(allLocations) = c("Cocos","Natives","Scaevola","Sand")
rownames(allLocations) = c("Teraina")
allLocationsPS = allLocations
terainapts = rasterToPoints(crappyForestPred)
totalavailhab = table(terainapts[,3])
allLocationsPS[1,"Cocos"] = totalavailhab[1]/sum(totalavailhab)
allLocationsPS[1,"Natives"] = totalavailhab[2]/sum(totalavailhab)
allLocationsPS[1,"Scaevola"] = totalavailhab[3]/sum(totalavailhab)
allLocationsPS[1,"Sand"] = totalavailhab[4]/sum(totalavailhab)
allLocationsPS
|
7683a1194750eb9bb7985b455db308c7e0e60a19
|
eda93cdf31b1342dc43dd19d663994cb0103459f
|
/DoFiles/KV_balanced.R
|
95a558a332df20286d5e633e3d67370868da10ed
|
[] |
no_license
|
racheljoyforshaw/hetCyclicalMPCS
|
509ac5d5ca923b1489f8814e46161558993f22b5
|
c43a325ac3827e6b091649c29264343c1b8c47c2
|
refs/heads/master
| 2023-05-06T05:39:44.279845
| 2021-05-28T10:39:16
| 2021-05-28T10:39:16
| 371,655,814
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 32,781
|
r
|
KV_balanced.R
|
# Kaplan Violante - consumption - IV, weighted
# need min of 3 periods for identification, so pre-recession sample is 2003,5,7; post-recession is 2009,11,13
f0913_balanced <- f0913
f0913_balanced <- f0913_balanced[, -grep("primarySamplingUnit", colnames(f0913_balanced))]
f0913_balanced <- f0913_balanced[, -grep("stratification", colnames(f0913_balanced))]
f0313_KV_balanced <- merge(f0307,f0913_balanced, by=c('uniqueID'),all=FALSE)
rm(f0913_balanced)
##### CREATE VARIABLES ########
# create year of birth
# create year variable
f0313_KV_balanced$year03 <- 2003
f0313_KV_balanced$year05 <- 2005
f0313_KV_balanced$year07 <- 2007
f0313_KV_balanced$year09 <- 2009
f0313_KV_balanced$year11 <- 2011
f0313_KV_balanced$year13 <- 2013
# log consumption - move into positive
min_all <- min(min(f0313_KV_balanced$consumption03,na.rm=TRUE),min(f0313_KV_balanced$consumption05,na.rm=TRUE),min(f0313_KV_balanced$consumption07,na.rm=TRUE),
min(f0313_KV_balanced$consumption09,na.rm=TRUE),min(f0313_KV_balanced$consumption11,na.rm=TRUE),min(f0313_KV_balanced$consumption13,na.rm=TRUE),
min(f0313_KV_balanced$income03,na.rm=TRUE),min(f0313_KV_balanced$income05,na.rm=TRUE),min(f0313_KV_balanced$income07,na.rm=TRUE),
min(f0313_KV_balanced$income09,na.rm=TRUE),min(f0313_KV_balanced$income11,na.rm=TRUE),min(f0313_KV_balanced$income13,na.rm=TRUE))
f0313_KV_balanced$logconsumption03 <- log(f0313_KV_balanced$consumption03 + abs(min_all) +1)
f0313_KV_balanced$logconsumption05 <- log(f0313_KV_balanced$consumption05+ abs(min_all) +1)
f0313_KV_balanced$logconsumption07 <- log(f0313_KV_balanced$consumption07+ abs(min_all) +1)
f0313_KV_balanced$logconsumption09 <- log(f0313_KV_balanced$consumption09+ abs(min_all) +1)
f0313_KV_balanced$logconsumption11 <- log(f0313_KV_balanced$consumption11+ abs(min_all) +1)
f0313_KV_balanced$logconsumption13 <- log(f0313_KV_balanced$consumption13+ abs(min_all) +1)
# log income - move into positive
f0313_KV_balanced$logy03 <- log(f0313_KV_balanced$income03 + abs(min_all) +1)
f0313_KV_balanced$logy05 <- log(f0313_KV_balanced$income05+ abs(min_all) +1)
f0313_KV_balanced$logy07 <- log(f0313_KV_balanced$income07+ abs(min_all) +1)
f0313_KV_balanced$logy09 <- log(f0313_KV_balanced$income09+ abs(min_all) +1)
f0313_KV_balanced$logy11 <- log(f0313_KV_balanced$income11+ abs(min_all) +1)
f0313_KV_balanced$logy13 <- log(f0313_KV_balanced$income13+ abs(min_all) +1)
rm(min_all)
f0313_KV_balanced_keeps <- c("uniqueID","primarySamplingUnit","stratification",#"black03","black05","black07",
"educ03","educ05","educ07",
"employed03","employed05","employed07",
"exIncome03","exIncome05","exIncome07",
"famSize03","famSize05","famSize07",
"kids03","kids05","kids07",
"kidsOut03","kidsOut05","kidsOut07",
"longWeight03","longWeight05","longWeight07",
#"other03","other05","other07",
"logy03","logy05","logy07",
"region03","region05","region07",
#"retired03","retired05","retired07",
#"unemployed03","unemployed05","unemployed07",
#"white03","white05","white07",
"year03","year05","year07",
"logconsumption03","logconsumption05","logconsumption07",
"age03","age05","age07",
"income03","income05","income07",
"poorHTM03","poorHTM05","poorHTM07",
#"richHTM03","richHTM05","richHTM07",
"totalWealth03","totalWealth05","totalWealth07",
"race03","race05","race07",
#"black09","black11","black13",
"educ09","educ11","educ13",
"employed09","employed11","employed13",
"exIncome09","exIncome11","exIncome13",
"famSize09","famSize11","famSize13",
"kids09","kids11","kids13",
"kidsOut09","kidsOut11","kidsOut13",
"longWeight09","longWeight11","longWeight13",
#"other09","other11","other13",
"logy09","logy11","logy13",
"region09","region11","region13",
#"retired09","retired11","retired13",
#"unemployed09","unemployed11","unemployed13",
#"white09","white11","white13",
"year09","year11","year13",
"logconsumption09","logconsumption11","logconsumption13",
"age09","age11","age13",
"income09","income11","income13",
"poorHTM09","poorHTM11","poorHTM13",
#"richHTM09","richHTM11","richHTM13",
"totalWealth09","totalWealth11","totalWealth13",
"race09","race11","race13"
)
f0313_KV_balanced <- f0313_KV_balanced[,f0313_KV_balanced_keeps]
rm(f0313_KV_balanced_keeps)
# drop NA
f0313_KV_balanced <- na.omit(f0313_KV_balanced)
# quintiles
# survey design
# familyPanelSurvey0313 <- svydesign(id=~primarySamplingUnit,
# strat=~stratification,
# weights=~longWeight03,
# data=f0313_KV_balanced,
# nest=TRUE)
#
# familyPanelSurvey0313 <- svydesign(id=~primarySamplingUnit,
# strat=~stratification,
# weights=~longWeight09,
# data=f0313_KV_balanced,
# nest=TRUE)
quintiles_03 <- quantile(f0313_KV_balanced$income03, seq(0, 1, 0.2),NA.rm=TRUE)
quintiles_05 <- quantile(f0313_KV_balanced$income05, seq(0, 1, 0.2),NA.rm=TRUE)
quintiles_07 <- quantile(f0313_KV_balanced$income07, seq(0, 1, 0.2),NA.rm=TRUE)
quintiles_09 <- quantile(f0313_KV_balanced$income09, seq(0, 1, 0.2),NA.rm=TRUE)
quintiles_11 <- quantile(f0313_KV_balanced$income11, seq(0, 1, 0.2),NA.rm=TRUE)
quintiles_13 <- quantile(f0313_KV_balanced$income13, seq(0, 1, 0.2),NA.rm=TRUE)
f0313_KV_balanced$quintile03 <- ifelse(f0313_KV_balanced$income03<quintiles_03[2],1,
ifelse(f0313_KV_balanced$income03>=quintiles_03[2] & f0313_KV_balanced$income03<quintiles_03[3],2,
ifelse(f0313_KV_balanced$income03>=quintiles_03[3] & f0313_KV_balanced$income03<quintiles_03[4],3,
ifelse(f0313_KV_balanced$income03>=quintiles_03[4]& f0313_KV_balanced$income03<quintiles_03[5],4,
ifelse(f0313_KV_balanced$income03>=quintiles_03[5],5,NA)))))
f0313_KV_balanced$quintile05 <- ifelse(f0313_KV_balanced$income05<quintiles_05[2],1,
ifelse(f0313_KV_balanced$income05>=quintiles_05[2] & f0313_KV_balanced$income05<quintiles_05[3],2,
ifelse(f0313_KV_balanced$income05>=quintiles_05[3] & f0313_KV_balanced$income05<quintiles_05[4],3,
ifelse(f0313_KV_balanced$income05>=quintiles_05[4]& f0313_KV_balanced$income05<quintiles_05[5],4,
ifelse(f0313_KV_balanced$income05>=quintiles_05[5],5,NA)))))
f0313_KV_balanced$quintile07 <- ifelse(f0313_KV_balanced$income07<quintiles_07[2],1,
ifelse(f0313_KV_balanced$income07>=quintiles_07[2] & f0313_KV_balanced$income07<quintiles_07[3],2,
ifelse(f0313_KV_balanced$income07>=quintiles_07[3] & f0313_KV_balanced$income07<quintiles_07[4],3,
ifelse(f0313_KV_balanced$income07>=quintiles_07[4]& f0313_KV_balanced$income07<quintiles_07[5],4,
ifelse(f0313_KV_balanced$income07>=quintiles_07[5],5,NA)))))
f0313_KV_balanced$quintile09 <- ifelse(f0313_KV_balanced$income09<quintiles_09[2],1,
ifelse(f0313_KV_balanced$income09>=quintiles_09[2] & f0313_KV_balanced$income09<quintiles_09[3],2,
ifelse(f0313_KV_balanced$income09>=quintiles_09[3] & f0313_KV_balanced$income09<quintiles_09[4],3,
ifelse(f0313_KV_balanced$income09>=quintiles_09[4]& f0313_KV_balanced$income09<quintiles_09[5],4,
ifelse(f0313_KV_balanced$income09>=quintiles_09[5],5,NA)))))
f0313_KV_balanced$quintile11 <- ifelse(f0313_KV_balanced$income11<quintiles_11[2],1,
ifelse(f0313_KV_balanced$income11>=quintiles_11[2] & f0313_KV_balanced$income11<quintiles_11[3],2,
ifelse(f0313_KV_balanced$income11>=quintiles_11[3] & f0313_KV_balanced$income11<quintiles_11[4],3,
ifelse(f0313_KV_balanced$income11>=quintiles_11[4]& f0313_KV_balanced$income11<quintiles_11[5],4,
ifelse(f0313_KV_balanced$income11>=quintiles_11[5],5,NA)))))
f0313_KV_balanced$quintile13 <- ifelse(f0313_KV_balanced$income13<quintiles_13[2],1,
ifelse(f0313_KV_balanced$income13>=quintiles_13[2] & f0313_KV_balanced$income13<quintiles_13[3],2,
ifelse(f0313_KV_balanced$income13>=quintiles_13[3] & f0313_KV_balanced$income13<quintiles_13[4],3,
ifelse(f0313_KV_balanced$income13>=quintiles_13[4]& f0313_KV_balanced$income13<quintiles_13[5],4,
ifelse(f0313_KV_balanced$income13>=quintiles_13[5],5,NA)))))
# familyPanelSurvey0313 <- svydesign(id=~primarySamplingUnit,
# strat=~stratification,
# weights=~longWeight03,
# data=f0313_KV_balanced,
# nest=TRUE)
#
# familyPanelSurvey0313 <- svydesign(id=~primarySamplingUnit,
# strat=~stratification,
# weights=~longWeight09,
# data=f0313_KV_balanced,
# nest=TRUE)
# drop income
f0313_KV_balanced <- select(f0313_KV_balanced,-c("income03","income05","income07"))
f0313_KV_balanced <- select(f0313_KV_balanced,-c("income09","income11","income13"))
# wide to long
KVC_balanced_regressionData_0313 <- reshape(f0313_KV_balanced, idvar=c("uniqueID","primarySamplingUnit","stratification"), direction="long",
varying=list(#black=c(grep("black", colnames(f0313_KV_balanced))),
educ=c(grep("educ", colnames(f0313_KV_balanced))),
employed=c(grep("^employed", colnames(f0313_KV_balanced))),
exIncome=c(grep("exIncome", colnames(f0313_KV_balanced))),
famSize=c(grep("famSize", colnames(f0313_KV_balanced))),
kids=c(grep("kids[^Out]", colnames(f0313_KV_balanced))),
kidsOut=c(grep("kidsOut", colnames(f0313_KV_balanced))),
longWeight=c(grep("longWeight", colnames(f0313_KV_balanced))),
#other=c(grep("other", colnames(f0313_KV_balanced))),
logy=c(grep("logy",colnames(f0313_KV_balanced))),
region=c(grep("region", colnames(f0313_KV_balanced))),
#retired=c(grep("retired",colnames(f0313_KV_balanced))),
#unemployed=c(grep("unemployed", colnames(f0313_KV_balanced))),
#white=c(grep("white", colnames(f0313_KV_balanced))),
year=c(grep("year", colnames(f0313_KV_balanced))),
logconsumption=c(grep("logconsumption",colnames(f0313_KV_balanced))),
age=c(grep("age",colnames(f0313_KV_balanced))),
poorHTM=c(grep("poorHTM",colnames(f0313_KV_balanced))),
#richHTM=c(grep("richHTM",colnames(f0313_KV_balanced))),
quintile=c(grep("quintile",colnames(f0313_KV_balanced))),
totalWealth=c(grep("totalWealth",colnames(f0313_KV_balanced))),
race=c(grep("race",colnames(f0313_KV_balanced)))),
v.names = c("educ","employed",
"exIncome","famSize","kids","kidsOut","longWeight",
"logy","region","year","logconsumption","age","poorHTM","quintile","totalWealth","race"), #,"bigCity"),
times=c("03", "05","07","09", "11","13"))
# create year of birth variable
KVC_balanced_regressionData_0313$yob <- KVC_balanced_regressionData_0313$year - KVC_balanced_regressionData_0313$age
# create factors
KVC_balanced_regressionData_0313$year <- factor(KVC_balanced_regressionData_0313$year)
KVC_balanced_regressionData_0313$yob <- factor(KVC_balanced_regressionData_0313$yob)
KVC_balanced_regressionData_0313$educ <- factor(KVC_balanced_regressionData_0313$educ)
KVC_balanced_regressionData_0313$race <- factor(KVC_balanced_regressionData_0313$race)
KVC_balanced_regressionData_0313$employed <- factor(KVC_balanced_regressionData_0313$employed)
KVC_balanced_regressionData_0313$exIncome <- factor(KVC_balanced_regressionData_0313$exIncome)
KVC_balanced_regressionData_0313$region <- factor(KVC_balanced_regressionData_0313$region)
KVC_balanced_regressionData_0313$kidsOut <- factor(KVC_balanced_regressionData_0313$kidsOut)
KVC_balanced_regressionData_0313$poorHTM <- factor(KVC_balanced_regressionData_0313$poorHTM)
# divide wealth by 10000
KVC_balanced_regressionData_0313$totalWealth <- KVC_balanced_regressionData_0313$totalWealth/10000
# survey design
familyPanelSurvey0313 <- svydesign(id=~primarySamplingUnit,
strat=~stratification,
weights=~longWeight,
data=KVC_balanced_regressionData_0313,
nest=TRUE)
# do the regressions
# we first regress log income and log consumption expenditures on
#year and cohort dummies, education, race, family structure, employment, geographic
#variables, and interactions of year dummies with education, race, employment, and
#region. We then construct the first-differenced residuals of log consumption d(cit) and
#log income d(yit).
KVC_balanced_income_0313.lm = svyglm(logy ~ year +
yob +
#age +
educ +
race +
#white +
#black +
#other +
famSize +
kids +
employed +
#unemployed +
#retired +
exIncome +
region +
kidsOut +
poorHTM +
#richHTM +
#educ*year +
#white*year +
#black*year +
#other*year +
#employed*year +
#unemployed*year +
#retired*year +
#region*year +
totalWealth #+
#interestRate
,familyPanelSurvey0313)
KVC_balanced_regressionData_0313$resIncome <-KVC_balanced_income_0313.lm$residuals
KVC_balanced_consumption_0313.lm = svyglm(logconsumption ~ year +
yob +
#age +
educ +
race +
#white +
#black +
#other +
famSize +
kids +
employed +
#unemployed +
#retired +
exIncome +
region +
kidsOut +
poorHTM +
#richHTM +
# educ*year +
# white*year +
# black*year +
# other*year +
# employed*year +
# unemployed*year +
# retired*year +
# region*year +
totalWealth #+
#interestRate
, familyPanelSurvey0313)
KVC_balanced_regressionData_0313$resconsumption <- KVC_balanced_consumption_0313.lm$residuals
KVC_balanced_covData_0313 = KVC_balanced_regressionData_0313[ , c("uniqueID","quintile","year","resconsumption","resIncome")]
# deal with outliers - take off bottom 0.1%
resconsumption_q_0313 <- quantile(KVC_balanced_covData_0313$resconsumption,seq(0,1,0.001),na.rm=TRUE)
KVC_balanced_covData_0313 <- subset(KVC_balanced_covData_0313,KVC_balanced_covData_0313$resconsumption>resconsumption_q_0313[2] & KVC_balanced_covData_0313$resconsumption<resconsumption_q_0313[1000])
resIncome_q_0313 <- quantile(KVC_balanced_covData_0313$resIncome,seq(0,1,0.001),na.rm=TRUE)
KVC_balanced_covData_0313 <- subset(KVC_balanced_covData_0313,KVC_balanced_covData_0313$resIncome>resIncome_q_0313[2] & KVC_balanced_covData_0313$resIncome<resIncome_q_0313[1000])
#plot residuals
plot(KVC_balanced_covData_0313$resIncome,KVC_balanced_covData_0313$resconsumption)
# make sure have observations for every year
KVC_balanced_covData_0313 <- KVC_balanced_covData_0313[KVC_balanced_covData_0313$uniqueID %in% names(which(table(KVC_balanced_covData_0313$uniqueID)==6)), ]
KVC_balanced_covData_0313$dct <- ifelse(KVC_balanced_covData_0313$year==2003,subset(KVC_balanced_covData_0313$resconsumption,KVC_balanced_covData_0313$year==2005) - subset(KVC_balanced_covData_0313$resconsumption,KVC_balanced_covData_0313$year==2003),
ifelse(KVC_balanced_covData_0313$year==2005,NA,
ifelse(KVC_balanced_covData_0313$year==2007,NA,
ifelse(KVC_balanced_covData_0313$year==2009,subset(KVC_balanced_covData_0313$resconsumption,KVC_balanced_covData_0313$year==2011) - subset(KVC_balanced_covData_0313$resconsumption,KVC_balanced_covData_0313$year==2009),
ifelse(KVC_balanced_covData_0313$year==2011,NA,
ifelse(KVC_balanced_covData_0313$year==2013,NA,NA))))))
KVC_balanced_covData_0313$dyt <- ifelse(KVC_balanced_covData_0313$year==2003,subset(KVC_balanced_covData_0313$resIncome,KVC_balanced_covData_0313$year==2005) - subset(KVC_balanced_covData_0313$resIncome,KVC_balanced_covData_0313$year==2003),
ifelse(KVC_balanced_covData_0313$year==2005,NA ,
ifelse(KVC_balanced_covData_0313$year==2007,NA,
ifelse(KVC_balanced_covData_0313$year==2009,subset(KVC_balanced_covData_0313$resIncome,KVC_balanced_covData_0313$year==2011) - subset(KVC_balanced_covData_0313$resIncome,KVC_balanced_covData_0313$year==2009),
ifelse(KVC_balanced_covData_0313$year==2011,NA,
ifelse(KVC_balanced_covData_0313$year==2013,NA,NA))))))
KVC_balanced_covData_0313$dytplus1 <- ifelse(KVC_balanced_covData_0313$year==2003,subset(KVC_balanced_covData_0313$resIncome,KVC_balanced_covData_0313$year==2007) - subset(KVC_balanced_covData_0313$resIncome,KVC_balanced_covData_0313$year==2005) ,
ifelse(KVC_balanced_covData_0313$year==2005,NA,
ifelse(KVC_balanced_covData_0313$year==2007,NA,
ifelse(KVC_balanced_covData_0313$year==2009,subset(KVC_balanced_covData_0313$resIncome,KVC_balanced_covData_0313$year==2013) - subset(KVC_balanced_covData_0313$resIncome,KVC_balanced_covData_0313$year==2011),
ifelse(KVC_balanced_covData_0313$year==2011,NA,
ifelse(KVC_balanced_covData_0313$year==2013,NA,NA))))))
#KVC_balanced_covData_0313 <- KVC_balanced_covData_0313[,c("uniqueID","quintile","dct","dyt","dytplus1","year")]
#KVC_balanced_covData_0313 <- reshape(KVC_balanced_covData_0313,idvar="uniqueID",direction="wide",v.names=c("quintile","dct","dyt","dytplus1"),timevar="year")
KVC_balanced_covData_0313$quintile.2003 <- ifelse(KVC_balanced_covData_0313$year==2003,subset(KVC_balanced_covData_0313$quintile,KVC_balanced_covData_0313$year==2003),
ifelse(KVC_balanced_covData_0313$year==2005,NA,
ifelse(KVC_balanced_covData_0313$year==2007,NA,
ifelse(KVC_balanced_covData_0313$year==2009,subset(KVC_balanced_covData_0313$quintile,KVC_balanced_covData_0313$year==2009),
ifelse(KVC_balanced_covData_0313$year==2011,NA,
ifelse(KVC_balanced_covData_0313$year==2013,NA,NA))))))
KVC_balanced_covData_0313 <- na.omit(KVC_balanced_covData_0313)
# MPC_balanced
print("MPC_balanced")
for (i in c(1,2,3,4,5)){
MPC_balanced_mdl_0307 <- ivreg(dct ~ dyt, ~ dytplus1, x=TRUE, data=KVC_balanced_covData_0313, subset=quintile.2003==i)
assign(paste0("MPC_balanced_mdl_0307_q",i),
MPC_balanced_mdl_0307)
assign(paste0("MPC_balanced_0307_q",i),
MPC_balanced_mdl_0307$coefficients[2]
)
temp <- anderson.rubin.ci(MPC_balanced_mdl_0307)
assign(paste0("MPC_balanced_CI_low_07_q",i),
as.numeric(substr(unlist(strsplit(temp$confidence.interval, split=" , "))[1],3,nchar(unlist(strsplit(temp$confidence.interval, split=" , "))[1]))))
assign(paste0("MPC_balanced_CI_up_07_q",i),
as.numeric(substr(unlist(strsplit(temp$confidence.interval, split=" , "))[2],1,nchar(unlist(strsplit(temp$confidence.interval, split=" , "))[2])-3)))
print(paste0("CI low q",i,":",unname(eval(as.name(paste0("MPC_balanced_CI_low_07_q",i))))))
print(paste0("q",i,":",unname(eval(as.name(paste0("MPC_balanced_0307_q",i))))))
print(paste0("CI high q",i,":",unname(eval(as.name(paste0("MPC_balanced_CI_up_07_q",i))))))
}
out_MPC_balanced <- data.frame("quantile" = c(1,2,3,4,5,1,2,3,4,5),
#"year" = c("2007","2007","2007","2007","2007"),
"MPC_balanced" = c(MPC_balanced_0307_q1,
MPC_balanced_0307_q2,
MPC_balanced_0307_q3,
MPC_balanced_0307_q4,
MPC_balanced_0307_q5),
"MPC_balanced_CI_lower" = c(MPC_balanced_CI_low_07_q1,
MPC_balanced_CI_low_07_q2,
MPC_balanced_CI_low_07_q3,
MPC_balanced_CI_low_07_q4,
MPC_balanced_CI_low_07_q5),
"MPC_balanced_CI_upper" = c(MPC_balanced_CI_up_07_q1,
MPC_balanced_CI_up_07_q2,
MPC_balanced_CI_up_07_q3,
MPC_balanced_CI_up_07_q4,
MPC_balanced_CI_up_07_q5))
pdf(file=paste0(getwd(),"/Results/MPC_balanced_whole.pdf"))
ggplot(data = out_MPC_balanced, aes(x = quantile, y = MPC_balanced)) +
geom_line() + geom_point()+
scale_color_grey() +
geom_ribbon(data= out_MPC_balanced,aes(ymin= MPC_balanced_CI_lower,ymax= MPC_balanced_CI_upper),alpha=0.3) +
xlab("Income Quintile") +
ylab("MPC (balanced panel)") +
theme_pubr()
dev.off()
# whole sample MPC_balanced
MPC_balanced_mdl_0307_whole = ivreg(dct ~ dyt, ~ dytplus1, x=TRUE, data=KVC_balanced_covData_0313,subset=(year==2005))
MPC_balanced_mdl_0307_whole$coefficients[2]
anderson.rubin.ci(MPC_balanced_mdl_0307_whole)
# write to text files
# coefficient
write(toString(round(MPC_balanced_mdl_0307_whole$coefficients[2],3)),file=paste0(getwd(),"/Results/MPC_balanced_0313.txt"))
#standard error
write(toString(round(coef(summary(MPC_balanced_mdl_0307_whole))[2,2],3)),file=paste0(getwd(),"/Results/MPC_balanced_0313_stdErr.txt"))
# stars
write(toString(stars.pval(coef(summary(MPC_balanced_mdl_0307_whole))[2,4])),file=paste0(getwd(),"/Results/MPC_balanced_0313_stars.txt"))
#N
write(toString(MPC_balanced_mdl_0307_whole$nobs),file=paste0(getwd(),"/Results/MPC_balanced_0313_N.txt"))
MPC_balanced_mdl_0913_whole = ivreg(dct ~ dyt, ~ dytplus1, x=TRUE, data=KVC_balanced_covData_0313,subset=(year==2009))
MPC_balanced_mdl_0913_whole$coefficients[2]
anderson.rubin.ci(MPC_balanced_mdl_0913_whole)
# write to text files
# coefficient
write(toString(round(MPC_balanced_mdl_0913_whole$coefficients[2],3)),file=paste0(getwd(),"/Results/MPC_balanced_0913.txt"))
#standard error
write(toString(round(coef(summary(MPC_balanced_mdl_0913_whole))[2,2],3)),file=paste0(getwd(),"/Results/MPC_balanced_0913_stdErr.txt"))
# stars
write(toString(stars.pval(coef(summary(MPC_balanced_mdl_0913_whole))[2,4])),file=paste0(getwd(),"/Results/MPC_balanced_0913_stars.txt"))
#N
write(toString(MPC_balanced_mdl_0913_whole$nobs),file=paste0(getwd(),"/Results/MPC_balanced_0913_N.txt"))
rm(i,temp)#,KVC_balanced_covData_0313, KVC_balanced_covData_0313)
rm(list=ls(pattern="quintiles_"))
rm(list=ls(pattern="quantiles_"))
rm(list=ls(pattern="familyPanelSurvey"))
KVC_balanced_covData_0313_PE <- KVC_balanced_covData_0313[,c("uniqueID","dct","dyt","dytplus1","year")]
KVC_balanced_covData_0313_PE <- KVC_balanced_covData_0313[,c("uniqueID","dct","dyt","dytplus1","year")]
KVC_balanced_covData_0313_PE$dct_0307 <-ifelse(KVC_balanced_covData_0313_PE$year==2003,
KVC_balanced_covData_0313_PE$dct,NA)
KVC_balanced_covData_0313_PE$dyt_0307 <-ifelse(KVC_balanced_covData_0313_PE$year==2003,
KVC_balanced_covData_0313_PE$dyt,NA)
KVC_balanced_covData_0313_PE$dytplus1_0307 <-ifelse(KVC_balanced_covData_0313_PE$year==2003,
KVC_balanced_covData_0313_PE$dytplus1,NA)
KVC_balanced_covData_0313_PE$dct_0913 <-ifelse(KVC_balanced_covData_0313_PE$year==2009,
KVC_balanced_covData_0313_PE$dct,NA)
KVC_balanced_covData_0313_PE$dyt_0913 <-ifelse(KVC_balanced_covData_0313_PE$year==2009,
KVC_balanced_covData_0313_PE$dyt,NA)
KVC_balanced_covData_0313_PE$dytplus1_0913 <-ifelse(KVC_balanced_covData_0313_PE$year==2009,
KVC_balanced_covData_0313_PE$dytplus1,NA)
KVC_balanced_covData_0313_PE=na.omit(KVC_balanced_covData_0313_PE)
eqn_0307 <- dct_0307 ~ -1 + dyt_0307
eqn_0913 <- dct_0913 ~ -1 + dyt_0913
system <- list(eqn_0307,eqn_0913)
inst1 <- ~ dytplus1_0307
inst2 <- ~ dytplus1_0913
instlist <- list( inst1, inst2 )
fit2sls2 <- systemfit( system, "2SLS", inst = instlist, data = KVC_balanced_covData_0313_PE )
print(fit2sls2)
linearHypothesis(fit2sls2,"eq1_dyt_0307=eq2_dyt_0913")
# latex
writeLines(capture.output(stargazer(KVC_balanced_consumption_0313.lm,KVC_balanced_income_0313.lm,
omit=c("yob"),omit.labels = ("Year of Birth"),
omit.stat =c("ll","rsq","aic"),
column.labels = c("2002-2012","2002-2012"),
covariate.labels = c("Year=2004","Year=2006","Year=2008","Year=2010","Year=2012",
"Education=Medium","Education=High",
"Race=Black","Race=Other","Family Size","Number of Kids",
"Status=Unemployed","Status=Retired","Status=Inactive",
"Extra Family Income",
"Region=Midwest", "Region=South","Region=West",
"Kids outside Family Unit",
"Poor-HtM","Rich-HtM",
"Total Wealth (\\$1000s)",
"Constant"),
dep.var.labels = c("log($\\widehat{c_{it}}$)","log($\\widehat{y_{it}}$)"),
dep.var.caption="",
float=FALSE, align=TRUE,style = "qje",no.space=TRUE)),
paste0(getwd(),"/Results/KVC_balanced_consumption_0307.tex"))
#table.layout = "=d#-t=n"
#print(xtable(KVC_balanced_consumption_0307.lm, type = "latex"),file=paste0(getwd(),"/Results/KVC_balanced_consumption_0307.tex"),floating=FALSE)
#print(xtable(KVC_balanced_consumption_0913.lm, type = "latex"),file=paste0(getwd(),"/Results/KVC_balanced_consumption_0913.tex"),floating=FALSE)
#print(xtable(KVC_balanced_income_0307.lm, type = "latex"),file=paste0(getwd(),"/Results/KVC_balanced_income_0307.tex"),floating=FALSE)
#print(xtable(KVC_balanced_income_0913.lm, type = "latex"),file=paste0(getwd(),"/Results/KVC_balanced_income_0913.tex"),floating=FALSE)
writeLines(capture.output(stargazer(MPC_balanced_mdl_0307_q1, MPC_balanced_mdl_0307_q2,MPC_balanced_mdl_0307_q3, MPC_balanced_mdl_0307_q4,MPC_balanced_mdl_0307_q5,
float=FALSE, align=TRUE,dep.var.caption="",dep.var.labels = c("$\\Delta \\widehat{c_{i,t}}$"),
omit=c("Constant"), covariate.labels = c("$\\Delta \\widehat{y_{i,t}}$"),
omit.stat =c("adj.rsq"))),paste0(getwd(),"/Results/MPC_balanced_mdl_0307.tex"))
writeLines(capture.output(stargazer(MPC_balanced_mdl_0913_q1, MPC_balanced_mdl_0913_q2,MPC_balanced_mdl_0913_q3, MPC_balanced_mdl_0913_q4,MPC_balanced_mdl_0913_q5,
float=FALSE, align=TRUE,dep.var.caption="",dep.var.labels = c("$\\Delta \\widehat{c_{i,t}}$"),
omit=c("Constant"), covariate.labels = c("$\\Delta \\widehat{y_{i,t}}$"),
omit.stat =c("adj.rsq"))),paste0(getwd(),"/Results/MPC_balanced_mdl_0913.tex"))
|
361a713083c9b4ecf923000dceda3b634b8a62ac
|
4fc3c300ebc5318c49268c52aa7842795cda91fc
|
/man/CoupledPF-package.Rd
|
7953b045917b063a7a006836ff507cf24d9aac59
|
[] |
no_license
|
pierrejacob/CoupledPF
|
a10699de0ee195620bf6f24be7b02ebceb7fed80
|
6a349d0b6d51bd54b656f35ba65877dc34cbc503
|
refs/heads/master
| 2021-01-20T20:32:56.090492
| 2016-06-04T21:56:18
| 2016-06-04T21:56:18
| 60,315,237
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 315
|
rd
|
CoupledPF-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CoupledPF-package.R
\docType{package}
\name{CoupledPF-package}
\alias{CoupledPF}
\alias{CoupledPF-package}
\title{CoupledPF}
\description{
...
}
\details{
...
}
\author{
Pierre E. Jacob <pierre.jacob.work@gmail.com>
}
\keyword{package}
|
d6ef908b0c763f081d293a5e072e6cd34ff70e52
|
a8adeffe3d9f17976e02ef3ec82914e29b865c2c
|
/hw1.R
|
2fd5856c185122371d82003f393c334edb53805b
|
[] |
no_license
|
mjschaub/naive-bayes
|
483917ccd17d6b389b1ee25fc9b0a0d787b893ba
|
b58b31621f1a940336bdcddf66217ab7789e0836
|
refs/heads/master
| 2020-03-26T20:50:50.754671
| 2018-08-20T01:19:46
| 2018-08-20T01:19:46
| 145,349,748
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,164
|
r
|
hw1.R
|
library(caret)
library(klaR)
#load diabetes data
setwd('D:/CS498/HW1 - naive bayes/')
raw_data<-read.csv('pima-indians-diabetes.txt', header=FALSE)
x_data <- raw_data[-c(9)]
labels <- raw_data[,9]
#1a
training_score<-array(dim=10)
testing_score<-array(dim=10)
for (wi in 1:10){
data_partition <- createDataPartition(y=labels, p=.8, list=FALSE)
x_test <- x_data[-data_partition,]
y_test <- labels[-data_partition]
x_train <- x_data[data_partition,]
y_train <- labels[data_partition]
trposflag<-y_train>0
ptregs <- x_train[trposflag, ]
ntregs <- x_train[!trposflag,]
ptrmean<-sapply(ptregs, mean, na.rm=TRUE)
ntrmean<-sapply(ntregs, mean, na.rm=TRUE)
ptrsd<-sapply(ptregs, sd, na.rm=TRUE)
ntrsd<-sapply(ntregs, sd, na.rm=TRUE)
#training set
ptroffsets<-t(t(x_train)-ptrmean)
ptrscales<-t(t(ptroffsets)/ptrsd)
ptrlogs<--(1/2)*rowSums(apply(ptrscales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ptrsd))
ntroffsets<-t(t(x_train)-ntrmean)
ntrscales<-t(t(ntroffsets)/ntrsd)
ntrlogs<--(1/2)*rowSums(apply(ntrscales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ntrsd))
num_pos_greater_neg_train<-ptrlogs>ntrlogs
num_correct_train<-num_pos_greater_neg_train==y_train
training_score[wi]<-sum(num_correct_train)/(sum(num_correct_train)+sum(!num_correct_train))
#test set
pteoffsets<-t(t(x_test)-ptrmean)
ptescales<-t(t(pteoffsets)/ptrsd)
ptelogs<--(1/2)*rowSums(apply(ptescales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ptrsd))
nteoffsets<-t(t(x_test)-ntrmean)
ntescales<-t(t(nteoffsets)/ntrsd)
ntelogs<--(1/2)*rowSums(apply(ntescales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ntrsd))
num_pos_greater_neg<-ptelogs>ntelogs
num_correct_test<-num_pos_greater_neg==y_test
testing_score[wi]<-sum(num_correct_test)/(sum(num_correct_test)+sum(!num_correct_test))
}
accuracy_train <- sum(training_score) / length(training_score)
accuracy_test <- sum(testing_score) / length(testing_score)
accuracy_train
accuracy_test
#1b
#replace 0's with NA
x_data_two <- x_data
for (i in c(3, 4, 6, 8)){
nan_vals <- x_data[, i]==0
x_data_two[nan_vals, i]=NA
}
training_score<-array(dim=10)
testing_score<-array(dim=10)
for (wi in 1:10){
data_partition <- createDataPartition(y=labels, p=.8, list=FALSE)
x_train <- x_data_two[data_partition,]
y_train <- labels[data_partition]
x_test <- x_data_two[-data_partition,]
y_test <- labels[-data_partition]
trposflag<-y_train>0
ptregs <- x_train[trposflag, ]
ntregs <- x_train[!trposflag,]
ptrmean<-sapply(ptregs, mean, na.rm=TRUE)
ntrmean<-sapply(ntregs, mean, na.rm=TRUE)
ptrsd<-sapply(ptregs, sd, na.rm=TRUE)
ntrsd<-sapply(ntregs, sd, na.rm=TRUE)
#training stuff
ptroffsets<-t(t(x_train)-ptrmean)
ptrscales<-t(t(ptroffsets)/ptrsd)
ptrlogs<--(1/2)*rowSums(apply(ptrscales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ptrsd))
ntroffsets<-t(t(x_train)-ntrmean)
ntrscales<-t(t(ntroffsets)/ntrsd)
ntrlogs<--(1/2)*rowSums(apply(ntrscales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ntrsd))
num_pos_greater_neg_train<-ptrlogs>ntrlogs
num_correct_train<-num_pos_greater_neg_train==y_train
training_score[wi]<-sum(num_correct_train)/(sum(num_correct_train)+sum(!num_correct_train))
#testing stuff
pteoffsets<-t(t(x_test)-ptrmean)
ptescales<-t(t(pteoffsets)/ptrsd)
ptelogs<--(1/2)*rowSums(apply(ptescales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ptrsd))
nteoffsets<-t(t(x_test)-ntrmean)
ntescales<-t(t(nteoffsets)/ntrsd)
ntelogs<--(1/2)*rowSums(apply(ntescales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ntrsd))
num_pos_greater_neg<-ptelogs>ntelogs
num_correct_test<-num_pos_greater_neg==y_test
testing_score[wi]<-sum(num_correct_test)/(sum(num_correct_test)+sum(!num_correct_test))
}
accuracy_nan_train <- sum(training_score) / length(training_score)
accuracy_nan_test <- sum(testing_score) / length(testing_score)
accuracy_nan_train
accuracy_nan_test
#1c
data_partition <- createDataPartition(y=labels, p=.8, list=FALSE)
x_train <- x_data[data_partition,]
y_train <- labels[data_partition]
x_test <- x_data[-data_partition,]
y_test <- labels[-data_partition]
tr <- trainControl(method='cv' , number=10)
model <- train (x_train , factor(y_train) , 'nb' , trControl=tr)
predictions <- predict(model, newdata=x_test)
cf <- confusionMatrix(data=predictions, y_test)
correct <- length(y_test[y_test == predictions])
wrong <- length(y_test[y_test != predictions])
accuracy <- correct / (correct + wrong)
testing_accuracy <- accuracy
accuracy_cv <- sum(testing_accuracy)/length(testing_accuracy)
cf
accuracy_cv
#1d
data_partition<-createDataPartition(y=labels, p=.8, list=FALSE)
x_train <- x_data[data_partition,]
y_train <- labels[data_partition]
x_test <- x_data[-data_partition,]
y_test <- labels[-data_partition]
#svm stuff
svm <- svmlight(x_train, factor(y_train))
labels <- predict(svm, x_test)
results <- labels$class
correct <- sum(results == y_test)
wrong <- sum(results != y_test)
accuracy_svm <- correct / (correct + wrong)
accuracy_svm
#Problem 2
library(readr)
library(data.table)
setwd('D:/CS498/HW1 - naive bayes/')
raw_train_data_two <- as.data.frame(read.csv("MNIST_train.csv",header=TRUE,check.names=FALSE))
library(caret)
library(klaR)
library(e1071)
y_labels <- (raw_train_data_two$label)
y_labels<-y_labels
y_labels
x_data_mnist <- raw_train_data_two
data_partition <- createDataPartition(y=y_labels, p=.8, list=FALSE) #[1:30], p=.8, list=FALSE)
x_train <- x_data_mnist[data_partition,]
y_train <- y_labels[data_partition]
x_test <- x_data_mnist[-data_partition,]
y_test <- y_labels[-data_partition]
#train naive bayes model using e1071
model <- naiveBayes(x_train,factor(y_train))
#prediction
predictions <- predict(model, newdata=x_test)
cf <- confusionMatrix(data=predictions, y_test)
correct <- length(y_test[y_test == predictions])
wrong <- length(y_test[y_test != predictions])
accuracy <- correct / (correct + wrong)
testing_accuracy_gaussian_untouched <- accuracy
accuracy_gaussian <- sum(testing_accuracy_gaussian_untouched)/length(testing_accuracy_gaussian_untouched)
cf
accuracy_gaussian
library(quanteda)
library(naivebayes)
thresh = 127
thresh_x_train <- x_train
thresh_x_train[x_train < thresh] <- 0
thresh_x_train[x_train >= thresh] <- 1
thresh_x_test <- x_test
thresh_x_test[x_test < thresh] <- 0
thresh_x_test[x_test >= thresh] <- 1
head(thresh_x_train)
#x_train_dfm <- dfm(as.character(thresh_x_train))
#head(x_train_dfm)
#x_test_dfm <- dfm(as.character(thresh_x_test))
#model_bernoulli <- textmodel_nb(x=x_train_dfm,y=factor(y_train),distribution = c("Bernoulli"))
model_bernoulli <- naive_bayes(x=factor(thresh_x_train, levels=c(0,1)),y=factor(y_train),laplace=1)
model_bernoulli
predictions_b <- predict(model_bernoulli, newdata=thresh_x_test)
cf_b <- confusionMatrix(data=predictions_b, y_test)
correct <- length(y_test[y_test == predictions_b])
wrong <- length(y_test[y_test != predictions_b])
accuracy <- correct / (correct + wrong)
testing_accuracy_bernoulli_untouched <- accuracy
accuracy_bernoulli <- sum(testing_accuracy_bernoulli_untouched)/length(testing_accuracy_bernoulli_untouched)
#accuracy after doing bernoulli naive bayes on MNIST
cf_b
accuracy_bernoulli
rotate_matrix <- function(x) t(apply(x, 2, rev)) #rotates matrix
library(naivebayes)
bounded_m_data_matrix <- matrix(NA,nrow=42000,ncol=401)
#bounded_m_data <- data.frame(matrix(NA, nrow = 42000, ncol = 401))
bounded_m_data_matrix[1:42000,1] <- raw_train_data_two[1:42000,1]
for(x in 1:42000)
{
curr_m = rotate_matrix(matrix(unlist(raw_train_data_two[x,-1]),nrow = 28,byrow = T))
prev_matrix <- raw_train_data_two[x,-1]
curr_m
thresh = 127
thresh_m <- curr_m
thresh_m[curr_m < thresh] <- 0
thresh_m[curr_m >= thresh] <- 1
curr_bounded_m <- thresh_m[4:23,4:23]
curr_bounded_m
new_matrix <- as.vector(curr_bounded_m)
prev_matrix
bounded_m_data_matrix[x,-1] <- new_matrix
}
bounded_m_data <- data.frame(bounded_m_data_matrix)
data_partition <- createDataPartition(y=y_labels, p=.8, list=FALSE) #[1:30], p=.8, list=FALSE)
x_train <- bounded_m_data[data_partition,]
y_train <- y_labels[data_partition]
x_test <- bounded_m_data[-data_partition,]
y_test <- y_labels[-data_partition]
#train naive bayes model using e1071
model <- naiveBayes(x_train,factor(y_train))
#prediction
predictions <- predict(model, newdata=x_test)
cf_bounded <- confusionMatrix(data=predictions, y_test)
correct <- length(y_test[y_test == predictions])
wrong <- length(y_test[y_test != predictions])
accuracy <- correct / (correct + wrong)
testing_accuracy_gaussian_bounded <- accuracy
accuracy_gaussian_bounded <- sum(testing_accuracy_gaussian_bounded)/length(testing_accuracy_gaussian_bounded)
cf_bounded
accuracy_gaussian_bounded
model_bernoulli_bounded <- naive_bayes(x=factor(x_train, levels=c(0,1)),y=factor(y_train),laplace=1)
model_bernoulli_bounded
predictions_b_bounded <- predict(model_bernoulli_bounded, newdata=thresh_x_test)
cf_b_bounded <- confusionMatrix(data=predictions_b, y_test)
correct <- length(y_test[y_test == predictions_b_bounded])
wrong <- length(y_test[y_test != predictions_b_bounded])
accuracy <- correct / (correct + wrong)
testing_accuracy_bernoulli_bounded <- accuracy
accuracy_bernoulli_bounded <- sum(testing_accuracy_bernoulli_bounded)/length(testing_accuracy_bernoulli_bounded)
#accuracy after doing bernoulli naive bayes on MNIST
cf_b_bounded
accuracy_bernoulli_bounded
#decision forest section
library(party)
library(randomForest)
head(raw_train_data_two)
output_forest <- randomForest(label ~ .,data = raw_train_data_two, ntree=30, maxnodes=65536)
output_forest
output_forest_bounded <- randomForest(x=bounded_m_data,y=y_labels,ntree=10,maxnodes=65536)
output_forest_bounded
#maxnodes = 2^depth
#depth 16 - 65536 nodes
#depth 8 - 256 nodes
#depth 4 - 16 nodes
|
42e6c018081695798913007bb4ad4466794cf481
|
8a208c7405ba0ec615145958c34c73dcb30822c3
|
/Titanic-1st-Tutorial.r
|
5479a857bb3cfb6b60d78a79fb981886da8196b3
|
[] |
no_license
|
giffen-n/titanic-test
|
368c668a84207984980f9a413a793994e023511d
|
e58eadb215d4278f9a7a7b13a7795f2f24a7c951
|
refs/heads/master
| 2020-12-24T16:43:05.711513
| 2014-12-24T05:13:10
| 2014-12-24T05:13:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 434
|
r
|
Titanic-1st-Tutorial.r
|
# Nicholas Giffen - 23 Dec 2014
# Titanic Tutorial 1 by Trevor Stephens
# Full guide available at: http://trevorstephens.com/
# Set Working Directory and load data sets
setwd("~/titanic")
train <- read.csv("~/titanic/train.csv")
test <- read.csv("~/titanic/test.csv")
# Observe structure of training dataframe
str(train)
# Create table of number and proportion of survived
table(train$Survived)
prop.table(table(train$Survived))
|
7eb9b75d0215041216b47a4cc210cb74342bc5dd
|
fb5d577ce7e37678f7612590b1e78df887b42d7f
|
/treat_blocks_lines_output.R
|
b3dc1af18e89962dc0c731b4345f104be5d7ab04
|
[] |
no_license
|
S-Homayounpour/wildfire_risk_shiny
|
86d1a52927fb98c791169a2654a490a4a5548275
|
cf701d1ba355c1e6c35f7a421d03cd55cd3d256b
|
refs/heads/master
| 2021-09-15T19:29:20.673973
| 2021-09-06T02:53:28
| 2021-09-06T02:53:28
| 174,888,654
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,542
|
r
|
treat_blocks_lines_output.R
|
library(WildfireRisk)
library(RSQLite)
do_treat_blocks_line <- function(block.risk, line.risk) {
# load point data from database
con <- dbConnect(SQLite(), dbname = attr(line.risk, "dbname"))
pointdata <- dbReadTable(con, "pointdata")
dbDisconnect(con)
# get indices of intersecting scan lines for each block
intersections <- lines_in_blocks(block.risk, line.risk, by = "block")
# the ignition data
initials <-line.risk
print("initial line risks are")
print(initials)
# reduce line.risk to a plain data frame with IDs and the variables
# needed for risk calculation
line.risk <- line.risk %>%
as.data.frame() %>%
select(locationid, lineid, forest_p, distance, is_west) %>%
mutate(locationid = as.character(locationid))
blocks.with.lines <- which(sapply(intersections, length) > 0)
if (length(blocks.with.lines) == 0) {
warning("No intersecting scan lines found for any block.\n")
block.risk$ptreat_mean <- block.risk$pobs_mean
return(line.risk)
}
# For each block intersected by scan lines, identify sample points
# within the block, set time since fire of those points to zero,
# re-calculate line risk values, and summarize for the block.
block.risk$ptreat_mean <- block.risk$pobs_mean
k <- 0
pb <- txtProgressBar(0, length(blocks.with.lines), style = 3)
treated_lines <- data.frame()
for (iblock in blocks.with.lines) {
ii <- intersections[[iblock]]
pdat <- line.risk[ii, ] %>%
select(locationid, lineid) %>%
left_join(pointdata, by = c("locationid", "lineid"))
# Set time since fire of points within the block
# to zero
pts <- lapply(1:nrow(pdat), function(i) st_point(c(pdat$x[i], pdat$y[i])) )
pts <- st_sfc(pts, crs = st_crs(block.risk))
ii <- st_intersects(block.risk[iblock, ], pts)[[1]]
# It is possible to have a scan line that intersects the block
# has no sample points in the block (so nothing to do).
#
if (length(ii) > 0) {
pdat$tsf[ii] <- 0
# Calculate updated line risk values
ldat <- pdat %>%
group_by(locationid, lineid) %>%
summarize(tsf_mean_treated = mean(tsf, na.rm = TRUE)) %>%
ungroup() %>%
left_join(line.risk, by = c("locationid", "lineid")) %>%
mutate(pobs = calculate_line_risk(tsf_mean = tsf_mean_treated,
forest_p = forest_p,
distance = distance,
is_west = is_west)) %>%
mutate(pmax = calculate_line_risk(tsf_mean = 50,
forest_p = forest_p,
distance = distance,
is_west = is_west) )
print(ldat)
treated_lines <- rbind(treated_lines,ldat)
}
k <- k + 1
setTxtProgressBar(pb, k)
}
close(pb)
## dropping duplicated lines with greater pobs value as
## seeting tsf to zero will reduce the pobs or wont change at all
## if points probability does not contribute to the
## overall probability of line significantly
names(treated_lines)[3] <- "tsf_mean"
print(treated_lines)
trimmed <- initials %>% select(locationid,lineid)
result <- rbind(as.data.frame(initials) %>% select(-geometry),treated_lines)%>%
inner_join(trimmed,by = c("locationid","lineid")) %>%
group_by(locationid,lineid) %>%
filter(pobs == min(pobs)) %>%
ungroup() %>%
distinct(locationid,lineid,.keep_all = TRUE)
print("result is")
print(result)
result
}
### noDB is modified version of summarize_location_risk
## that ignore risk class attributes of line
summarize_location_risk_noDB <- function(line.risk, quantiles = c(0.25, 0.75)) {
# Helper function to retrieve central point from a
# set of scan lines
firstpoint <- function(lines) {
m <- st_coordinates(lines)
data.frame(x = m[1,1], y = m[1,2])
}
has.quantiles <- !is.null(quantiles) & length(quantiles) > 0
if (has.quantiles) {
qnames <- names(quantile(1, quantiles)) %>% stringr::str_replace("\\%", "")
}
# Get point locations
loc <- line.risk %>%
group_by(locationid) %>%
do(firstpoint(.$geometry))
# Helper function to calculate mean and quantiles and
# return them as a data frame
fn <- function(x, varname) {
d <- data.frame(mu = mean(x, na.rm = TRUE))
colnames(d) <- paste(varname, "mean", sep = "_")
if (has.quantiles) {
q <- quantile(x, probs = quantiles, na.rm = TRUE)
q <- t(q)
colnames(q) <- paste(varname, qnames, sep = "_")
d <- cbind(d, q)
}
d
}
# Summary statistics for each location
pstats <- line.risk %>%
# drop scan lines
as.data.frame() %>%
# calculate mean probabilities
group_by(locationid) %>%
do({
dobs <- fn(.$pobs, "pobs")
dmax <- fn(.$pmax, "pmax")
cbind(dobs, dmax)
}) %>%
ungroup() %>%
# join location data
left_join(loc, by = "locationid") %>%
# convert to a spatial (sf) object with point geometry
st_as_sf(coords = c("x", "y"))
# Set coordinate reference system
st_crs(pstats) <- st_crs(line.risk)
pstats
}
|
c82edb78bc525766fa144c65dbb29859962a9fcf
|
fc77f67a23c359160a60ce135182b94ba697cd2c
|
/lab3/lab3.R
|
ea1a32393c15f61a5bd6a2e339e5b396cd8569df
|
[] |
no_license
|
edlinguerra/LCA-ME
|
e4ebd9869dc678405c3aae2196fa4a90946916e1
|
b84108c42bf4a828649449f5d8f575aee338c29c
|
refs/heads/master
| 2023-02-23T23:28:49.471949
| 2023-02-14T22:16:06
| 2023-02-14T22:16:06
| 238,227,762
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,629
|
r
|
lab3.R
|
#Script del laboratorio 3
#Nombre y Apellido:
#Preguntas:
# 1. Importa los datos en R, y verifica sus características y estructura. ¿Cuántas dimensiones tiene la
#tabla que importaste? ¿En qué difiere esta de aquellas usada en las pruebas de *t* para dos muestras?
#instlar ambos paquetes en caso no los tengan
library(dplyr)
library(tidyr)
datos <- datos_originales %>% # CTR SHIFT M
pivot_longer(cols = 2:5, #entre lineas 14 y 16
names_to = "LOCALIDADES", #apila las localidades a columna llamada LOCALIDADES
values_to = "DBO") %>% #valores de DBO a columna llamada DBO
select(-localidades) %>% #elimino la columna localidades (no es informativa)
arrange(LOCALIDADES) #ordeno por LOCALIDADES
boxplot(formula = DBO ~ LOCALIDADES, data = datos)
#
#
# 2. Lee el problema con atención y responde a las siguientes preguntas:
# a) Identifica la relación causa-efecto que se pretende corroborar con este experimento.
# b) Identifica la variable de respuesta, y las unidades en que ésta fue medida. ¿Es una variable
#continua o discreta?
# c) Identifica la variable que explicaría la respuesta si se corrobora la relación causa-efecto que
#fue propuesta. ¿Es una variable continua o categórica?
# d) Identifica la unidad experimental. ¿Cuál es el valor de n (i.e., réplicas por nivel)?
# f) Formula la hipótesis nula de este análisis en un enunciado simple.
#
# 3. Lleva a cabo una breve exploración gráfica y numérica de los datos que te permita responder a las
#siguientes preguntas. Para esto usa las funciones `boxplot` y `aggregate`. Este último para calcular
#las medias y desviaciones estándar de cada localidad con las funciones `mean` y `sd`, respectivamente.
#Note que no podrá estimar ambos parámteros simultáneamente, por lo que deberá calcularlos separadamente,
#y luego combinar resultados para tener una sola tabla. Recomiendo que para ambas funciones (`boxplot` y
#`aggregate`) use el método *S3 method for class 'formula'*.
#
##Completa los argumentos para la construcción de el gráfico de cajas
boxplot()
##Completa los argumentos para la estimación de promedio y desviación estándar
promedio<-aggregate()
promedio
desv.est<-aggregate()
desv.est
# Combinar ambos resultados en una tabla
tabla1<-data.frame("localidades" = promedio$localidades, "DBO" = c(NA,NA,NA,NA), "Desv.Est" = c(NA,NA,NA,NA))
tabla1[,2]<-promedio[,2]
tabla1[,3]<-desv.est[,2]
tabla1
#
# Responde a las siguientes preguntas:
# a) ¿Son similares o diferentes los valores promedios de las 4 localidades?
# b) ¿Son similares o diferentes las dispersiones de las 4 localidades?
# c) ¿Cómo es la distribución de la variable de respuesta?
# d) ¿Es esta distribución similar entre los distintos niveles?
#
# 4. Aplica un **ANOVA** a los datos. Para ello se requiere primero obtener un modelo lineal usando la función
#`lm`. Esta función ajusta un modelo lineal de la variable de respuesta en función de la variable explicativa.
#Como en este caso la variable explicativa es un factor (categórico), es conveniente hacerlo explícito. Puedes
#ajustar el modelo usando la *localidad* como variable explicativa. Copia el siguiente comando y analiza la respuesta
#que R devuelve (PISTA: la primera linea de la respuesta es el modelo).
##Especificar que localidades son una variable expicativa (factor)
datos$localidades<-as.factor(datos$localidades)
#
#Preguntamos si localidades son reconocidas como factor en R
is.factor(datos$localidades)
#
#Modelo lineal
lm(DBO ~ 0 + localidades, data = datos)
#
# Responde a las siguientes preguntas:
# a) ¿Reconoces alguno de los valores bajo el título de *Coefficients*? ¿Qué crees que son éstos valores?
# b) ¿qué representa el primer coeficiente generado por `lm`?
#
#
# 5. Guarda el modelo que acabas de ajustar bajo un objeto con el nombre *mod1*, y aplica la función `anova` a dicho objeto.
# Responde a las siguientes preguntas:
# a) ¿Qué hace la función `anova`?
# b) ¿Qué es la *Sum Sq* correspondiente al factor *localidades* y a los residuales?¿qué es *Df*?
# c) ¿Cuánto vale la *Sum Sq* total?
# d) ¿Corresponden los valores de *Sum Sq* y *Df* que aparecen en la consola con aquéllos calculados en clase?
# e) ¿Qué es la *Mean Sq*?
# f) ¿Qué representa el valor de *F* de la tabla? ¿Es un valor grande o pequeño? ¿Cómo lo sabes?
# g) ¿Qué representa el valor de probabilidad? ¿Es un valor grande o pequeño? ¿Cómo lo sabes?
# h) Calcula la proporción de la variación total de la variable *DBO* que es debida al factor *localidades*.
# i) ¿Es grande o pequeña esta proporción? ¿Cómo lo sabes?
# j) A partir de este resultado, concluye si tienes evidencias suficientes para rechazar la Ho que formulaste antes.
# k) ¿Cuál es la probabilidad de equivocarte en esta aseveración?
#
# 6. Utilizando la función `qf` obtén el valor crítico de *F* bajo la hipótesis nula. Los argumentos de la función están
#en el 'help'. Busca valores de los grados de libertad para el numerador y el denominador en la tabla anterior, y considera
#un valor de $\alpha = 0.05$. ¿Qué representa este valor?
##Completa los argumentos de la función
qf(p = , df1 =, df2 =, lower.tail=F)
#
#
#
#7. Intenta predecir lo que sucedería con el valor crítico de *F* bajo las siguientes situaciones. Después modifica el
# comando que escribiste en el inciso 6 para corroborar tus predicciones.
# a) si se aumenta el valor de $\alpha = 0.10$ (uno en diez chances de equivocarme).
# b) si se disminuye el valor de alfa a $\alpha = 0.001$ (uno en mil chances de equivocarme).
# c) si aumentas el número de réplicas en este experimento a *n* = 30 réplicas por cada nivel del factor, manteniendo $\alpha = 0.05$.
#
# 8. Aplica la función `summary` al modelo lineal que ajustaste, y responde a las siguientes preguntas:
# #Completa los argumentos de la función
summary()
#
# Responde a las siguientes preguntas:
# a) ¿Reconoces algún valor ya obtenido o calculado en el resultado que R devuelve?
# b) ¿Qué crees que sea el valor dado en 'Residual Standard Error'?
# 9. Aplica la función `fitted` al modelo lineal que ajustaste. ¿Qué hace la función `fitted`?
# ¿Qué pasa si aplicas la función `predict` al modelo lineal? ¿Cuántos hay?
#Completa los argumentos de la función
fitted()
predict()
#
# 10. Para obtener una visualización prolija del modelo con los datos observados, copia los siguientes
#códigos del paquete `ggplot2`. Estos códigos representarán los valores por localidad, los promedios
#y desviaciones estándar. Explora cada uno y trata de identificar qué se va ganando a medida que agregas capas.
library(ggplot2)
#
# #Figura básica
# fig1 <- ggplot(datos, aes(y=DBO, x=localidades))+
# geom_point()
# fig1
#
# #Figura básica con los promedios
# fig1.1 <- fig1 +
# geom_point(data=tabla1, aes(x=localidades, y=DBO, col=localidades), size=3)
#
# fig1.1
# #Figura básica con promedios y barras de desviación estándar
# fig1.2 <- fig1.1 +
# geom_point(data=tabla1, aes(x=localidades, y=DBO, col=localidades), size=3) +
# geom_errorbar(data = tabla1, aes(x= localidades, ymin = DBO - Desv.Est, ymax = DBO + Desv.Est), width = 0.2)
# fig1.2
#
# #figura básica con promedios, barras de desviación estándar y cambios en la estética de la figura
#
# fig1.3 <- fig1.2 +
# theme_bw() +
# ylab(expression(paste("DBO ", "(mg ", O[2], "/l/d)")))+
# xlab("Localidades")
#
# #figura sólo con promedios, barras de desviación estándar y cambios en la estética de la figura
# fig1.4 <- ggplot(data = tabla1, aes(y=DBO, x=localidades)) +
# geom_errorbar(data = tabla1, aes(x= localidades, ymin = DBO - Desv.Est, ymax = DBO + Desv.Est), width = 0.2) +
# geom_point(aes(col = localidades), size = 3)+
# theme_bw() +
# ylab(expression(paste("DBO ", "(mg ", O[2], "/l/d)")))+
# xlab("Localidades")
#
# ```
# Responde a las siguientes preguntas:
# a) ¿Qué representan los puntos de color?
# b) ¿Qué representan los puntos negros?
# c) ¿Qué representan las barras?
# d) Desde el punto de vista gráfico ¿qué se gana al pasar de fig1 a fig1.1, luego a fig1.2, a fig1.3 y fig1.4?
# e) En el contexto del seguimiento ambiental ¿qué sugiere el resultado?
# f) ¿existen diferencias significativas entre loc1 con loc2? ¿y entre loc1 con loc3? ¿loc3 respecto loc4?
#
# Por ahora es suficiente. Salve el proyecto con el nombre "laboratorio 3".
|
9f437078ae108eaf2ea8332aa04dc2511175f78c
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/metafolio/inst/testfiles/est_beta_params/libFuzzer_est_beta_params/est_beta_params_valgrind_files/1612988963-test.R
|
361a77311b910b1b678458c29e2e16dc615436e8
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 140
|
r
|
1612988963-test.R
|
testlist <- list(mu = -2.7226523566839e-40, var = -2.72265235668397e-40)
result <- do.call(metafolio:::est_beta_params,testlist)
str(result)
|
6f7253d0c9de97d1f164866ccc91dd1eb0feb630
|
02f363c8bfc69406f63a0441726c12cce970d093
|
/code/R/09_mod_start.R
|
d8ab56bf78284b3df7dcf2cde7890fb169251051
|
[] |
no_license
|
wgar84/Primaset
|
4d07e290cae9a3f3b502a1b7a08db7b4c89e97be
|
2367871dd798e1144b64045e7854836c91cdb91d
|
refs/heads/master
| 2020-06-27T08:25:09.859983
| 2019-01-18T14:39:44
| 2019-01-18T14:39:44
| 94,248,303
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,873
|
r
|
09_mod_start.R
|
require(geomorph)
require(shapes)
require(evolqg)
require(plotrix)
load('../Raw Data/Aux.RData')
load('Primates/Sym.RData')
load('Primates/Info.RData')
dim(prima.sym$coord)
sag.sym <- prima.sym $ coord [, , prima.info $ GSP == 'Saguinus_geoffroyi']
sag.sizeshape.gpa <- procGPA(sag.sym, scale = FALSE)
sag.ss.tan <- sag.sizeshape.gpa $ tan
dim(sag.ss.tan) <- c(36, 3, 109)
dimnames(sag.ss.tan)[1:2] <- dimnames(prima.sym $ coord)[1:2]
sag.ss.tan <- sag.ss.tan [1:22, , ]
dimnames(sag.ss.tan) [[1]] <- gsub('-E', '', dimnames(sag.ss.tan) [[1]])
dimnames(sag.ss.tan) [[2]] <- c('X', 'Y', 'Z')
coord.names <- paste(rep(dimnames(sag.ss.tan) [[1]], each = 3),
rep(dimnames(sag.ss.tan) [[2]], times = 22), sep = '.')
sag.ss.tan <- aperm(sag.ss.tan, c(2, 1, 3))
dim(sag.ss.tan) <- c(66, 109)
dimnames(sag.ss.tan) <- list(coord.names,
prima.info $ ID [prima.info $ GSP == 'Saguinus_geoffroyi'])
sag.ss.tan <- t(sag.ss.tan)
par(mfrow = c(1, 2))
color2D.matplot(cor(sag.ss.tan))
plot(eigen(var(sag.ss.tan)) $ values)
rownames(Aux $ sym.hyp [[1]])
sym.hyps <- Aux $ sym.hyp [[1]] [1:66, ]
rownames(sym.hyps) <- gsub('-D', '', rownames(sym.hyps))
sym.hyps <- sym.hyps [match(colnames(sag.ss.tan), rownames(sym.hyps)), ]
neuroface <-
sym.hyps [, 'Neuro'] %*% t(sym.hyps [, 'Neuro']) +
sym.hyps [, 'Face'] %*% t(sym.hyps [, 'Face'])
color2D.matplot(neuroface)
MantelModTest(neuroface, cor(sag.ss.tan), landmark.dim = 3,
withinLandmark = FALSE, MHI = TRUE)
sag.cormat <- cor(sag.ss.tan)
sag.vcv <- var(sag.ss.tan)
sag.evec3 <- eigen(sag.vcv) $ vectors [, 1:3]
rownames(sag.evec3) <- colnames(sag.ss.tan)
hist(abs(sag.cormat) [which(sym.hyps [, 'Neuro'] == 1), which(sym.hyps [, 'Neuro'] == 1)])
hist(abs(sag.cormat) [which(sym.hyps [, 'Neuro'] == 1), which(sym.hyps [, 'Face'] == 1)])
|
5b26acdd2370aa7e6256209090b8790903dcbe9b
|
f3328142651592d3ddf06f333153c50158ac87df
|
/ChicagoCrime.R
|
656bf0e0db8f813d2cea5eb0a1b6c47ea1f9db96
|
[] |
no_license
|
rachaelbardell/Chicago_Crime_Map
|
41faaccd7fd1477e5119eeafbe603800763fdd36
|
bb4adfd5d3c9869828bec37815fa65360a25af8c
|
refs/heads/master
| 2016-09-06T21:47:34.189589
| 2013-08-22T16:10:48
| 2013-08-22T16:10:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,991
|
r
|
ChicagoCrime.R
|
library(ggplot2)
data <- read.csv("http://data.cityofchicago.org/views/x2n5-8w5q/rows.csv", stringsAsFactors = F)
names(data) <- tolower(names(data))
data <- subset(data, !is.na(longitude) & !is.na(latitude))
ggplot(data)+geom_point(aes(x=longitude,y=latitude), alpha = .2, size =.4)
#highest crimes
crimes <- table(data$primary.description) # 31 crimes
crimes <- as.list(names(crimes[crimes > 100])) # only 25 with over 100 observations
data_new <- subset(data, primary.description %in% crimes)
ggplot(data_new)+geom_bar(aes(x=primary.description, fill = primary.description))
#by month
month <- substring(data$date..of.occurrence, 1, 2)
data_new$month <- substring(data_new$date..of.occurrence, 1, 2)
table(data_new$month)
ggplot(data_new, aes(x=primary.description, fill = month))+geom_bar(position="stack")
# ggplot(data_new)+geom_bar(aes(x=primary.description, fill = month), position="stack")
#by hour
data$hour <- substring(data$date..of.occurrence, 21, 22)
ggplot(data, aes(x=primary.description, fill = hour))+geom_bar(position="stack")
ggplot(data_new, aes(x=primary.description, fill = hour))+geom_bar(position="dodge")
# ggplot(data)+geom_bar(aes(x=primary.description, fill = hour), position="stack")
# ggplot(data_new)+geom_bar(aes(x=primary.description, fill = hour), position="dodge")
# create a column to categorize crimes as violent or not violent
violent <- c("ASSUALT", "BATTERY", "CRIM SEXUAL ASSAULT", "KIDNAPPING", "SEX OFFENSE", "HOMICIDE", "INTIMIDATION")
# 1. Return TRUE or FALSE
data$violent <- data$primary.description %in% violent
# 2. make a column with all not violent and then change if in violent column
data$violent_str <- rep("not violent", nrow(data))
data$violent_str[data$primary.description %in% violent] <- "violent"
data$violent_str[data$violent=="not violent"] <- "Not Violent"
# 3. if esle statement doing the same thing
data$violent_str <- ifelse(data$violent_str %in% violent, "Violent", "Not Violent")
# filter data by violent crime
|
4772be0912db6f88428639344f0929cb9907c1d4
|
741ee389d11bd329b79075c6a2e6b4eda11b6b23
|
/E4/droslong.r
|
dd8451169f9bd16ec0ffacb7401a8e4b9b8f4ca7
|
[] |
no_license
|
thanhan/SM2
|
f30b1c04104e01d1261122708b6b31e7662002e1
|
50a6e37683d2aa9677745682c905398bee1128bc
|
refs/heads/master
| 2021-01-11T17:22:48.515232
| 2017-04-26T05:03:00
| 2017-04-26T05:03:00
| 79,769,579
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,463
|
r
|
droslong.r
|
library(lattice)
library(mvtnorm)
library(MCMCpack)
d = read.csv('droslong.csv')
xyplot(log2exp~time | gene, data=d)
xyplot(log2exp~time | group, data=d)
# model: log2exp = a[group] + b1[gene] + b2[gene] * time + b3[gene] * time2 + e
# e ~ N(0, pre = p)
# a ~ N(ma, pre = pa)
# (b1, b2, b3) ~ N(mb, var = sb)
# (mb, sb) ~ NIW(mu0, k0, L0, nu0)
d = transform(d,geid=as.numeric(factor(gene)))
d = transform(d,grid=as.numeric(factor(group)))
ni = c(180, 180, 144)
n = nrow(d)
n_gr = 3
n_ge = 14
a = vector("numeric", n_gr)
b = matrix(nrow = 3, ncol = n_ge, 0)
p = 1
mb = c(0, 0, 0)
sb = diag(3)
# prior for NIW
mu0 = c(0, 0, 0)
k0 = 1
L0 = diag(3)
nu0 = 4
# prior for a
ma = 0
pa = 1
sum_xx = array(dim = c(n_ge, 3, 3), 0)
for (i in 1:n){
ge = d$geid[i]
t = d$time[i]
sum_xx[ge,,] = sum_xx[ge,,] + c(1, t, t*t) %*% t(c(1,2,3))
}
NMC = 1000
for (it in 1:NMC){
if (it %% 100 == 0) print(it)
# sample a
s = vector("numeric", n_gr)
for (i in 1:n){
ge = d$geid[i]
t = d$time[i]
s = s + d$log2exp[i] - b[1, ge] - b[2, ge] * t - b[3, ge] * t*t
}
for (i in 1:n_gr){
new_p = ni[i] * p + pa
new_mu = (p * s[i] + ma * pa) / new_p
a[i] = rnorm(1, new_mu, sqrt(1/new_p))
}
# sample b
sum_xlma = matrix(nrow = n_ge, ncol = 3, 0)
for (i in 1:n){
ge = d$geid[i]
gr = d$grid[i]
t = d$time[i]
sum_xlma[ge,] = sum_xlma[ge,] + c(1, t, t*t) * (d$log2exp[i] - a[gr])
}
sb_inv = solve(sb)
for (i in 1:n_ge){ # i = the gene
b_pre = sb_inv + p * sum_xx[i]
b_var = solve(b_pre)
b_m = b_var %*% (sb_inv %*% mb + p * sum_xlma[i])
b[,i] = rmvnorm(1, mean = b_m, sigma = b_var)
}
# sample mb, sb
b_bar = rowMeans(b)
b_S = matrix(nrow = 3, ncol = 3, 0)
for (i in 1:n_ge){
b_S = b_S + (b[,i] - b_bar) %*% t(b[,i] - b_bar)
}
k1 = k0 + n_ge
mu1 = (k0 / k1) * mu0 + (n_ge / k1) * b_bar
L1 = L0 + b_S + k0 * n_ge / (k0 + n_ge) * (b_bar - mu0) %*% t(b_bar - mu0)
nu1 = nu0 + n_ge
sb = riwish(nu1, L1)
mb = t(rmvnorm(1, mean = mu1, sb / k1))
# sample p
sum_lmp = 0
for (i in 1:n){
ge = d$geid[i]
gr = d$grid[i]
sum_lmp = sum_lmp + (d$log2exp - a[gr] - b[1, ge] - b[2, ge] * t - b[3, ge] * t*t)^2
}
p = rgamma(1, shape = 1.5, rate = sum_lmp/2)
# sample ma and pa
sum_ama = (a[1] - ma)^2 + (a[2] - ma)^2 + (a[3] - ma)^2
pa = rgamma(1, shape = 1.5, rate = sum_ama / 2)
ma= rnorm(1, mean(a), 1/ sqrt(3 * pa))
}
|
90ce4d28a6335b161722240c7164eb268831ef9f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/tensorflow/examples/sub-.tensorflow.tensor.Rd.R
|
fb77d59e2c53b85f0b71937e472bed9d74a35c9b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,080
|
r
|
sub-.tensorflow.tensor.Rd.R
|
library(tensorflow)
### Name: [.tensorflow.tensor
### Title: Subset tensors with '['
### Aliases: [.tensorflow.tensor
### ** Examples
## Not run:
##D sess <- tf$Session()
##D
##D x <- tf$constant(1:15, shape = c(3, 5))
##D sess$run(x)
##D # by default, numerics supplied to `...` are interperted R style
##D sess$run( x[,1] )# first column
##D sess$run( x[1:2,] ) # first two rows
##D sess$run( x[,1, drop = FALSE] )
##D
##D # strided steps can be specified in R syntax or python syntax
##D sess$run( x[, seq(1, 5, by = 2)] )
##D sess$run( x[, 1:5:2] )
##D # if you are unfamiliar with python-style strided steps, see:
##D # https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.indexing.html#basic-slicing-and-indexing
##D
##D # missing arguments for python syntax are valid, but they must by backticked
##D # or supplied as NULL
##D sess$run( x[, `::2`] )
##D sess$run( x[, NULL:NULL:2] )
##D sess$run( x[, `2:`] )
##D
##D # Another python features that is available is a python style ellipsis `...`
##D # (not to be confused with R dots `...`)
##D # a all_dims() expands to the shape of the tensor
##D y <- tf$constant(1:(3^5), shape = c(3,3,3,3,3))
##D identical(
##D sess$run( y[all_dims(), 1] ),
##D sess$run( y[,,,,1] )
##D )
##D
##D # tf$newaxis are valid
##D sess$run( x[,, tf$newaxis] )
##D
##D # negative numbers are always interperted python style
##D # The first time a negative number is supplied to `[`, a warning is issued
##D # about the non-standard behavior.
##D sess$run( x[-1,] ) # last row, with a warning
##D sess$run( x[-1,] )# the warning is only issued once
##D
##D # specifying `style = 'python'` changes the following:
##D # + zero-based indexing is used
##D # + slice sequences in the form of `start:stop` do not include `stop`
##D # in the returned value
##D # + out-of-bounds indices in a slice are valid
##D
##D # The style argument can be supplied to individual calls of `[` or set
##D # as a global option
##D
##D # example of zero based indexing
##D sess$run( x[0, , style = 'python'] ) # first row
##D sess$run( x[1, , style = 'python'] ) # second row
##D
##D # example of slices with exclusive stop
##D options(tensorflow.extract.style = 'python')
##D sess$run( x[, 0:1] ) # just the first column
##D sess$run( x[, 0:2] ) # first and second column
##D
##D # example of out-of-bounds index
##D sess$run( x[, 0:10] )
##D options(tensorflow.extract.style = NULL)
##D
##D # slicing with tensors is valid too, but note, tensors are never
##D # translated and are always interperted python-style.
##D # A warning is issued the first time a tensor is passed to `[`
##D sess$run( x[, tf$constant(0L):tf$constant(2L)] )
##D # just as in python, only scalar tensors are valid
##D # https://www.tensorflow.org/api_docs/python/tf/Tensor#__getitem__
##D
##D # To silence the warnings about tensors being passed as-is and negative numbers
##D # being interperted python-style, set
##D options(tensorflow.extract.style = 'R')
##D
##D # clean up from examples
##D options(tensorflow.extract.style = NULL)
## End(Not run)
|
df683d5e7d989c057c21c959ca15f8fbd3942153
|
12b677e6782d44285a78a5dec7cc65b869ed7ea0
|
/src/Optimus/OptimusBundle/Servicios/Util/EnergySource/OLD/simulateData.R
|
ea3f11eb1d6ce2e93a70c6ea615830e46288b2cc
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
epu-ntua/optimusdss.symfony2app
|
128aad7820a503534e16c0ee74c2379fbba30585
|
0e2b2d8d445d4e6b8f5c1cb8183c7c4e1b9c28b6
|
refs/heads/master
| 2021-05-01T03:41:26.979434
| 2017-02-08T16:46:55
| 2017-02-08T16:46:55
| 58,220,691
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,149
|
r
|
simulateData.R
|
simulateData <- function(data){
startingcapacity<-0 #Apo pou ksekinhsa (as ksekinhsoume me 10)
Pcapacity=50 #estw 50max kai 4 min
Ncapacity=4
data2 <- data
#NA afta pou thelw na ftiaksw egw
data2$Storage <- 0; data2$Grid <- 0
data2$Capacity <-0 ; data2$Capacity[1]=startingcapacity
data2$Grid[1]=data2$Load[1]-data2$PV[1]+data2$CHP[1]
for (i in 2:length(data2$Load)){
# Storage - otan travaw apo mpataria
capacity= data2$Capacity[i-1] #capacity th stigmh i
renewable <- data2$PV[i]-data2$CHP[i] #res th stigmh i
if ((renewable>data2$Load[i])&(capacity<Pcapacity)) { #Megalhterh paragwgh - Adeia mpataria
if ( (renewable-data2$Load[i]) >= (Pcapacity-capacity) ){ #mporw na thn gemisw full
data2$Storage[i] <- Pcapacity-capacity
data2$Capacity[i] <- Pcapacity
data2$Grid[i] <- (-1)*( renewable-data2$Load[i]-(Pcapacity-capacity) )
}else{ # Th gemizw oso mporw
data2$Storage[i] <- renewable-data2$Load[i]
data2$Capacity[i] <- capacity+renewable-data2$Load[i]
data2$Grid[i] <- 0
}
}
if ((renewable>data2$Load[i])&(capacity==Pcapacity)){ #Megalhterh paragwgh - Gemati mpataria
data2$Storage[i] <- 0
data2$Capacity[i] <- Pcapacity
data2$Grid[i] <- (-1)*(renewable-data2$Load[i])
}
if (renewable<=data2$Load[i]) { #Mikroterh paragwgh
if ( capacity==Ncapacity ){ #adeia mpataria
data2$Storage[i] <- 0
data2$Capacity[i] <- Ncapacity
data2$Grid[i] <- data2$Load[i]-renewable
}else{
if (capacity>=(data2$Load[i]-renewable)){ #H mpataria kalhptei
data2$Storage[i] <- (-1)*(data2$Load[i]-renewable)
data2$Capacity[i] <- capacity-(data2$Load[i]-renewable)
data2$Grid[i] <- 0
}else{ # H mpataria den kalyptei
data2$Storage[i] <- (-1)*(capacity-Ncapacity)
data2$Capacity[i] <- Ncapacity
data2$Grid[i] <- data2$Load[i]-renewable-(capacity-Ncapacity)
}
}
}
}
return(data2)
}
|
f35aba4c53b2fd667e658f4ab106280c64db658a
|
b05ff0cb36e1be4f7808b956a0743acc9e0a5d93
|
/R/predDensities2000_2010.R
|
5c8cb8f5be896b605b7dbbecc63b79b618da2818
|
[
"CC0-1.0"
] |
permissive
|
dongmeic/climate-space
|
b649a7a8e6b8d76048418c6d37f0b1dd50512be7
|
7e800974e92533d3818967b6281bc7f0e10c3264
|
refs/heads/master
| 2021-01-20T02:13:12.143683
| 2020-04-03T16:47:56
| 2020-04-03T16:47:56
| 89,385,878
| 0
| 0
| null | 2020-04-03T16:47:57
| 2017-04-25T17:01:45
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,281
|
r
|
predDensities2000_2010.R
|
DATA_DIR <- '~/dongmei/sdm/data/cluster/year/'
dat.early <- read.csv(paste(DATA_DIR, 'X_test.csv', sep=''))
y.early <- read.csv(paste(DATA_DIR, 'y_test.csv', sep=''))
dat.late <- read.csv(paste(DATA_DIR, 'X_train.csv', sep=''))
y.late <- read.csv(paste(DATA_DIR, 'y_train.csv', sep=''))
early <- cbind(y.early, dat.early)
late <- cbind(y.late, dat.late)
data <- rbind(early, late)
data <- subset(data, beetle == 1)
get.yearly.data <- function(field, year) {
data[data$year == year, field]
}
density.plot <- function(xs, colors, ...) {
d1 <- density(xs[[1]])
d2 <- density(xs[[2]])
plot(d1, col=colors[1], ylim=range(c(d1$y, d2$y), na.rm=T), ...)
lines(d2, col=colors[2])
}
names(data)
exclude <- c('beetle', 'year', 'vegetation', 'studyArea', 'x', 'y')
predictors <- names(data)[-which(names(data) %in% exclude)]
length(predictors)
par(mfrow=c(4, 5))
par(mar=c(0, 0, 2, 0))
for (p in predictors) {
x2000 <- get.yearly.data(p, 2000)
x2010 <- get.yearly.data(p, 2010)
density.plot(list(x2000, x2010),
colors=c(2, 4),
main=p,
xaxt='n',
yaxt='n',
cex.main=0.7)
if (p == predictors[1]) {
legend('topright', lty=1, col=c(2, 4), legend=c(2000, 2010), bty='n')
}
}
|
572d97134f0e20b383ca8b009b97d0581b9cc0a5
|
8b07efbe40855dea61fe4018519fb658bad5ab05
|
/Consumption_Trend_Analysis/code/code_20201015_시각화추출.R
|
92d5095051e6c7e45347c115273e704e9fb0915b
|
[] |
no_license
|
ne-choi/project
|
02d605d59c753d331174317c09e6d2f5057bc7cd
|
ca6fcbb36b236065ec3568ee88281863740a91b7
|
refs/heads/main
| 2023-08-23T01:11:53.001104
| 2021-10-21T14:21:43
| 2021-10-21T14:21:43
| 302,356,308
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,539
|
r
|
code_20201015_시각화추출.R
|
# 20201015 수정 내용
#
# 필요 작업:
---
library(readxl)
library(dplyr)
library(tidyr)
library(reshape2)
library(ggplot2)
# 1. 데이터 전처리
# 1) Mcorporation 64개 데이터 합치기
# 파일 합치기
files <- list.files(path = "sample/Mcorporation/상품 카테고리 데이터_KDX 시각화 경진대회 Only/use", pattern = "*.xlsx", full.names = T)
products <- sapply(files, read_excel, simplify = FALSE) %>%
bind_rows(.id = "id")
glimpse(products)
# 전체 필터 넣기
filter_products <- group_by(products, 카테고리명, 구매날짜, 고객성별, 고객나이, 구매금액, 구매수) %>%
separate(구매날짜, into = c("구매연월", "삭제(일자)"), sep = 6) %>%
select(카테고리명, 구매연월, 고객성별, 고객나이, 구매금액, 구매수)
head(filter_products, 2)
# 성별&나이 결측치 제거하기(성별 F, M, 나이 0 이상만 추출)
nomiss_products <- filter_products %>%
filter(!is.na(고객성별) & !is.na(고객나이)) %>%
filter((고객성별 %in% c("F", "M")), 고객나이 > 0)
head(nomiss_products)
# 2) 필요한 데이터 정리하기
# 색조 화장품
cosmetics <- filter(nomiss_products, 카테고리명 == "메이크업 용품")
cosmetics
# 월별 데이터 합계_색조
summarise_cosmetics <- cosmetics %>%
group_by(구매연월, 고객성별) %>%
summarise(금액합계 = sum(구매금액))
summarise_cosmetics
# 기초 화장품
skincare <- filter(nomiss_products, 카테고리명 == "스킨케어")
skincare
# 월별 데이터 합계_기초
summarise_skincare <- skincare %>%
group_by(구매연월, 고객성별) %>%
summarise(금액합계 = sum(구매금액))
summarise_skincare
# 3) 시각화하기
# '단위: 억' 적용
label_ko_num = function(num){
ko_num = function(x){
new_num = x %/% 100000000
return(paste(new_num, '억', sep = ''))
}
return(sapply(num, ko_num))
}
#색조 화장품
library(ggplot2)
graph_cosmetics <- ggplot(summarise_cosmetics, aes(x = 구매연월, y = 금액합계, color = 고객성별)) +
geom_point() +
scale_y_continuous(labels = label_ko_num) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
theme(legend.position = "bottom")
graph_cosmetics
#기초 화장품
graph_skincare <- ggplot(summarise_skincare, aes(x = 구매연월, y = 금액합계, color = 고객성별)) +
geom_point() +
scale_y_continuous(labels = label_ko_num) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
theme(legend.position = "bottom")
graph_skincare
|
a39a7c7f82dea38b78e9d5ece13ae41eb5928b9c
|
59aae554beba38bf265dd19fb4071784fdddce27
|
/compile_gitbook.R
|
820d1805ff953a35e77a6ef29925ba55ce340d22
|
[] |
no_license
|
RemkoDuursma/prcr
|
0a8276b9afa3d6c88c95ffe2f10503d175dce9de
|
392b3e17fc4bb8128a3c68fa64b8d85080509f63
|
refs/heads/master
| 2021-06-08T18:18:44.051004
| 2021-05-19T08:37:00
| 2021-05-19T08:37:00
| 165,263,332
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 118
|
r
|
compile_gitbook.R
|
bookdown::render_book(input="index.Rmd",
'bookdown::gitbook', new_session = TRUE, clean=TRUE)
|
6769fcc5b2f4ff65a3855beb686dc4c8fd998daf
|
4dfcc827ed8501d3a2274ff922c0bc1112cf293e
|
/man/reach_style_color_reds.Rd
|
12f259450e7d555854ce5bc5f88aa50ca17f6806
|
[] |
no_license
|
mabafaba/reachR2
|
5324e7ff1e413fe2e8e7bf5d39cd6736cc53d783
|
0ecbb831a637369718c759286720bbbf08966725
|
refs/heads/master
| 2020-03-19T01:34:57.913274
| 2018-06-13T13:17:24
| 2018-06-13T13:17:24
| 135,556,190
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 329
|
rd
|
reach_style_color_reds.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colours.R
\name{reach_style_color_reds}
\alias{reach_style_color_reds}
\title{Reach brand reds triples}
\usage{
reach_style_color_reds(transparent = F)
}
\description{
Reach brand reds triples
}
\examples{
}
\seealso{
\code{\link{function_name}}
}
|
8d071f5c5595e120ae5ca676388f394049208f75
|
10e518cfefb3e44d245fa2ca35b809d4f0da9b38
|
/man/load_pqt.Rd
|
0e61ea52d96bdc34fc00c46ebc6622b0556f9fd1
|
[
"MIT"
] |
permissive
|
bryanwhiting/bonds
|
e07048449ada74e40139c713209abd5f9f16913a
|
0805a87b89b0554c811dd9cb8d7aed73ffc99254
|
refs/heads/main
| 2023-08-22T07:39:12.987012
| 2021-10-29T07:02:45
| 2021-10-29T07:02:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 401
|
rd
|
load_pqt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{load_pqt}
\alias{load_pqt}
\title{Read in a multipart parquet file using a given file range}
\usage{
load_pqt(dir_root, tab, range = NULL)
}
\arguments{
\item{range}{}
}
\value{
}
\description{
Read in a multipart parquet file using a given file range
}
\examples{
read_parquet_table("files", range = 1:5)
}
|
8b66a24ff40ebeb13f90565017a54609ef18d243
|
cc870392539610a12db89082d4fd5855c5b3c718
|
/workshop3glm_slides.R
|
337117c058450b4b71a62d149c6e777b2bf29d3f
|
[
"MIT"
] |
permissive
|
rosemm/workshops
|
438b88ac15459009952134d4a5af624828a12ea0
|
8a96a380fa3119e965ff74fa4a86c35b0f58097d
|
refs/heads/master
| 2020-12-03T04:03:08.418841
| 2017-06-29T18:51:45
| 2017-06-29T18:51:45
| 95,809,098
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,201
|
r
|
workshop3glm_slides.R
|
### R code from vignette source 'workshop3glm_slides.Rnw'
###################################################
### code chunk number 2: workshop3glm_slides.Rnw:115-120
###################################################
install.packages("ggplot2")
install.packages("dplyr")
library("ggplot2")
library("dplyr")
###################################################
### code chunk number 3: workshop3glm_slides.Rnw:176-178
###################################################
install.packages("knitr")
library("knitr")
###################################################
### code chunk number 4: workshop3glm_slides.Rnw:202-203
###################################################
install.packages("pander")
###################################################
### code chunk number 5: getwd
###################################################
getwd()
###################################################
### code chunk number 6: workshop3glm_slides.Rnw:248-249
###################################################
osf <- read.csv("data/OSF_badges.csv")
###################################################
### code chunk number 7: workshop3glm_slides.Rnw:261-264
###################################################
str(osf)
head(osf)
summary(osf)
###################################################
### code chunk number 8: workshop3glm_slides.Rnw:267-269
###################################################
osf$date <- as.Date(osf$date)
str(osf)
###################################################
### code chunk number 9: workshop3glm_slides.Rnw:282-283
###################################################
?na.omit
###################################################
### code chunk number 10: workshop3glm_slides.Rnw:287-293
###################################################
osf.lgt <- osf %>%
select(statement.included, date, Journal) %>%
filter(Journal != "Infant behavior & development") %>%
na.omit()
summary(osf.lgt)
###################################################
### code chunk number 11: workshop3glm_slides.Rnw:314-315
###################################################
?glm
###################################################
### code chunk number 12: workshop3glm_slides.Rnw:330-333
###################################################
glm(statement.included ~ date, data=osf.lgt,
family=binomial(link="logit"),
na.action=na.exclude)
###################################################
### code chunk number 13: links
###################################################
?binomial
###################################################
### code chunk number 14: model
###################################################
logit.model1 <- glm(statement.included ~ date, data=osf.lgt,
family=binomial(link="logit"),
na.action=na.exclude)
###################################################
### code chunk number 15: workshop3glm_slides.Rnw:366-369
###################################################
logit.model1
str(logit.model1)
###################################################
### code chunk number 16: workshop3glm_slides.Rnw:390-391
###################################################
summary(logit.model1)
###################################################
### code chunk number 17: workshop3glm_slides.Rnw:397-398
###################################################
levels(osf.lgt$statement.included)
###################################################
### code chunk number 18: workshop3glm_slides.Rnw:407-409
###################################################
model.sum <- summary(logit.model1)
pander(model.sum)
###################################################
### code chunk number 19: workshop3glm_slides.Rnw:421-424
###################################################
logit.model0 <- glm(statement.included ~ 1, data=osf.lgt,
family=binomial(link="logit"),
na.action=na.exclude)
###################################################
### code chunk number 20: workshop3glm_slides.Rnw:429-430
###################################################
anova(logit.model0, logit.model1, test="Chisq")
###################################################
### code chunk number 21: predict
###################################################
osf.lgt$pred1 <- predict(logit.model1,
osf.lgt,
type="response")
osf.lgt$pred0 <- predict(logit.model0,
osf.lgt,
type="response")
###################################################
### code chunk number 22: clas
###################################################
osf.lgt$clas0 <- ifelse(osf.lgt$pred0 >= .5, 1,
ifelse(osf.lgt$pred0 < .5, 0,
NA))
osf.lgt$clas1 <- ifelse(osf.lgt$pred1 >= .5, 1,
ifelse(osf.lgt$pred1 < .5, 0,
NA))
###################################################
### code chunk number 23: clas_factor
###################################################
osf.lgt$clas0 <- factor(osf.lgt$clas0,
levels=c(1,0),
labels=c("yes", "no"))
osf.lgt$clas1 <- factor(osf.lgt$clas1,
levels=c(1,0),
labels=c("yes", "no"))
###################################################
### code chunk number 24: crosstabs
###################################################
xtabs(~ statement.included + clas0, data=osf.lgt)
xtabs(~ statement.included + clas1, data=osf.lgt)
###################################################
### code chunk number 25: workshop3glm_slides.Rnw:510-512
###################################################
ggplot(osf.lgt, aes(x=date, y=statement.included)) +
geom_point(alpha=.3)
###################################################
### code chunk number 26: plot_predict
###################################################
ggplot(osf.lgt, aes(x=date, y=as.numeric(statement.included)-1)) +
geom_point( alpha=.3 ) +
geom_line( aes(y=pred, x=date) ) +
labs(y="Probability of providing a data statement")
###################################################
### code chunk number 27: model2
###################################################
logit.model2 <- glm(statement.included ~ date*Journal, data=osf.lgt,
family=binomial(link="logit"),
na.action=na.exclude)
###################################################
### code chunk number 28: workshop3glm_slides.Rnw:560-561
###################################################
anova(logit.model0, logit.model1, logit.model2, test="Chisq")
###################################################
### code chunk number 29: workshop3glm_slides.Rnw:568-579
###################################################
osf.lgt$pred2 <- predict(logit.model2,
osf.lgt,
type="response")
osf.lgt$clas2 <- ifelse(osf.lgt$pred2 >= .5, 1,
ifelse(osf.lgt$pred2 < .5, 0,
NA))
osf.lgt$clas2 <- factor(osf.lgt$clas2,
levels=c(1,0),
labels=c("yes", "no"))
xtabs(~ statement.included + clas2, data=osf.lgt)
###################################################
### code chunk number 30: plot_predict2
###################################################
ggplot(osf.lgt, aes(x=date, y=as.numeric(statement.included)-1,
color=Journal)) +
geom_point( alpha=.3 ) +
geom_line( aes(y=pred2, x=date) ) +
labs(y="Probability of providing a data statement")
###################################################
### code chunk number 31: workshop3glm_slides.Rnw:604-607
###################################################
summary(osf$Number.of.experiments)
hist(osf$Number.of.experiments)
###################################################
### code chunk number 32: workshop3glm_slides.Rnw:612-616
###################################################
osf.pois <- osf %>%
select(Number.of.experiments, Journal) %>%
filter(Journal != "Infant behavior & development") %>%
na.omit()
###################################################
### code chunk number 33: pos_model
###################################################
pois.model <- glm(Number.of.experiments ~ Journal, data=osf.pois,
family=poisson(link = "log"),
na.action=na.exclude)
###################################################
### code chunk number 34: workshop3glm_slides.Rnw:638-640
###################################################
ggplot(osf.pois, aes(x=Number.of.experiments)) +
geom_histogram()
###################################################
### code chunk number 35: workshop3glm_slides.Rnw:644-649
###################################################
ggplot(osf.pois, aes(x=Number.of.experiments, fill=Journal)) +
geom_histogram()
ggplot(osf.pois, aes(x=Number.of.experiments, fill=Journal)) +
geom_density(alpha=.3, adjust=2)
|
45b62e8045616f6f3dff005585c743115b853adc
|
d7b1f6f13781ebf0daa817ac6e328513813db7e6
|
/scripts/db_complete_missing_task_logs.R
|
9806fa0ca6cfaecffbcb306ac126d135506a9c13
|
[] |
no_license
|
petermeissner/wikipediadumbs
|
2d381d09d1925c921f753b371b21236177b051f5
|
f8565d9796ee0273efede8f662809df251bafbf7
|
refs/heads/master
| 2020-08-06T11:10:49.340760
| 2018-11-20T09:07:50
| 2018-11-20T09:07:50
| 212,954,772
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,168
|
r
|
db_complete_missing_task_logs.R
|
library(wpd)
library(ggplot2)
library(data.table)
library(dplyr)
# get data
tasks <-
wpd_get_query_master("select * from upload_tasks")$return
# plot progress
tasks %>%
group_by(task_date) %>%
summarise(
task_status = sum(task_status == "done")
) %>%
ggplot(aes(x = task_date, y = task_status)) +
geom_col() +
theme_bw() +
geom_hline(data = data.frame(y=0), aes(yintercept=y))
#
tasks_done <-
tasks %>%
group_by(task_date) %>%
summarise(
sum_done = sum(task_status == 'done')
) %>%
filter(
sum_done == 1,
substring(task_date, 1, 4) != "2014"
) %>%
left_join(
tasks, by="task_date"
) %>%
group_by(task_date) %>%
summarise(
sum_progress = sum(task_volume, na.rm = TRUE)/20,
sum_duration = sum(task_duration, na.rm = TRUE)/20,
ts_update = max(task_status_ts)
) %>%
left_join(
(task %>% select(task_date, task_id)), by = "task_date"
)
update <-
wpd_task_update(
task_id = tasks_done$task_id,
task_status = "done",
task_duration = tasks_done$sum_duration,
task_volume = tasks_done$sum_progress,
task_status_ts = tasks_done$ts_update
)
|
944182bbe8359f196f6624ff10d2f24dd38d651a
|
5a5bc9e1b0d59859b4e213b092e19afe232819e1
|
/R/coast/calc_ldif_block.R
|
7077b8a90c9dd099e4eb8a1565c687b64445e6cd
|
[] |
no_license
|
jrmosedale/microclimates
|
bf469e07b688e9342c0a8d767db84ee428e778f3
|
ae2e61969631506c523bd618c9106a61b00355dd
|
refs/heads/master
| 2021-04-30T15:18:19.091728
| 2018-02-12T11:31:16
| 2018-02-12T11:31:16
| 121,236,443
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 456
|
r
|
calc_ldif_block.R
|
# Calculates single raster ldif.block
# Input: ldif.stack of ldif for each wind direction
# wind direction for block
# direction interval for which ldif calculated
calc.ldif.block<-function(ldif.stack,wdir.block,interval=10){
ldif.block<-raster
wdir.layer<-round(wdir.block/interval)+1 # creates raster holding layer in ldif.stack for wind direction
ldif.block<-stackSelect(ldif.stack,wdir.layer)
return(ldif.block)
} # end function
|
f1d6cc6674868360b4cb8fbe50c5f05e85b3dd42
|
60e8d991a5c569c80c50c31fda9722589cd867ed
|
/DrugAbuse_and_Psych.R
|
8ee41f3845e9feead55e9755b5f02e38da1c485b
|
[
"MIT"
] |
permissive
|
asulovar/DeepPhenoVIZ
|
7b4932bac7c5ababad46152d1e9e49dfa4b3e1ed
|
0427e877afa255b3a994c6db43e435ed5f5d9902
|
refs/heads/master
| 2020-03-09T14:51:53.212791
| 2018-04-09T23:27:02
| 2018-04-09T23:27:02
| 128,845,523
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 41,599
|
r
|
DrugAbuse_and_Psych.R
|
#Author: Arvis Sulovari, PhD
##1) What psychiatric disorders are developed as consequences of substance abuse?
##2) What genetic mechanisms lead to drug abuse, followed by comorbid psychiatric disorders
#Discovery datasets
ssadda <- read.delim("C:/Users/arvis/[...].txt")
ssadda_AA <- ssadda[which(ssadda$A8a_RACE==4 | ssadda$A8a_RACE==5),]
ssadda_EA <- ssadda[which(ssadda$A8a_RACE==6 | ssadda$A8a_RACE==7),]
#Replication datasets
ssadda_rep_AA <- read.delim("C:/Users/arvis/[...].txt")
ssadda_rep_EA <- read.delim("C:/Users/arvis/[...].txt")
ssadda_rep <- rbind(ssadda_rep_EA,ssadda_rep_AA)
ssadda_rep_AA <- ssadda_rep[which(ssadda_rep$A8a_RACE==4 | ssadda_rep$A8a_RACE==5),]
ssadda_rep_EA <- ssadda_rep[which(ssadda_rep$A8a_RACE==6 | ssadda_rep$A8a_RACE==7),]
#########################################################################################
# #
# INDIVIDUAL-LEVEL REPLICATIONS #
# #
#########################################################################################
#Obtain index for Age-of-Onset columns in each of the four datasets
ao_col_indeces_AA <- grep("AgeO$",colnames(ssadda_AA))
ao_col_indeces_rep_AA <- grep("AgeO$",colnames(ssadda_rep_AA))
ao_col_indeces_EA <- grep("AgeO$",colnames(ssadda_EA))
ao_col_indeces_rep_EA <- grep("AgeO$",colnames(ssadda_rep_EA))
####Clean-up non-age numbers fro mthe age of onset columns:
for(i in 1:length(ao_col_indeces_EA)){ssadda[which(as.numeric(ssadda[,ao_col_indeces_EA[i]])>70 | as.numeric(ssadda[,ao_col_indeces_EA[i]])==0),ao_col_indeces_EA[i]] <- c("UC")}
for(i in 1:length(ao_col_indeces_rep_EA)){ssadda_rep[which(as.numeric(ssadda_rep[,ao_col_indeces_rep_EA[i]])>70 | as.numeric(ssadda_rep[,ao_col_indeces_rep_EA[i]])==0),ao_col_indeces_rep_EA[i]] <- c("UC")}
for(i in 1:length(ao_col_indeces_AA)){ssadda_AA[which(as.numeric(ssadda_AA[,ao_col_indeces_AA[i]])>70 | as.numeric(ssadda_AA[,ao_col_indeces_AA[i]])==0),ao_col_indeces_AA[i]] <- c("UC")}
for(i in 1:length(ao_col_indeces_EA)){ssadda_EA[which(as.numeric(ssadda_EA[,ao_col_indeces_EA[i]])>70 | as.numeric(ssadda_EA[,ao_col_indeces_EA[i]])==0),ao_col_indeces_EA[i]] <- c("UC")}
for(i in 1:length(ao_col_indeces_rep_AA)){ssadda_rep_AA[which(as.numeric(ssadda_rep_AA[,ao_col_indeces_rep_AA[i]])>70 | as.numeric(ssadda_rep_AA[,ao_col_indeces_rep_AA[i]])==0),ao_col_indeces_rep_AA[i]] <- c("UC")}
for(i in 1:length(ao_col_indeces_rep_EA)){ssadda_rep_EA[which(as.numeric(ssadda_rep_EA[,ao_col_indeces_rep_EA[i]])>70 | as.numeric(ssadda_rep_EA[,ao_col_indeces_rep_EA[i]])==0),ao_col_indeces_rep_EA[i]] <- c("UC")}
#Function for extraction of age-of-onset variables
AO_order_fun <- function(ssadda_in=ssadda,out_mx=outmx_ao_order){
#General pattern-finder for order in which Age of Onset occurred
ao_col_indeces <- grep("AgeO$",colnames(ssadda_in))
ssadda_ageOfOnset <- ssadda_in[,ao_col_indeces]
##Output matrix
out_mx <- array(NA,dim=c(nrow(ssadda_ageOfOnset),length(ao_col_indeces)))
colnames(out_mx) <- colnames(ssadda_ageOfOnset)
#Populate output matrix
for(i in 1:nrow(ssadda_ageOfOnset)) {
#Vectorize instead of inner for-loop
out_mx[i,] <- match(colnames(ssadda_ageOfOnset),colnames(ssadda_ageOfOnset)[order(ssadda_ageOfOnset[i,])])
out_mx[i,which(is.na(ssadda_ageOfOnset[i,]))] <- 999
if(i%%100==0){
per_comp <- round((i/nrow(ssadda_ageOfOnset))*100,3)
print(paste0(per_comp," %"))
}
else{}
}
#Order of events
mx_overall_order <- array(NA,dim=c(135,3))
mx_overall_order[,1] <- colnames(ssadda_ageOfOnset)
for(i in 1:135){
mx_overall_order[i,2] <- mean(out_mx[(which(out_mx[,i]<999)),i])
#print(m)
}
#Count number of samples that support every 'direct & unidirectional' link
ordered_out_mx <- mx_overall_order[order(as.numeric(mx_overall_order[,2])),]
for(i in 1:134){
factor_one <- ordered_out_mx[i,1]
index_one <- which(colnames(ssadda_ageOfOnset)==factor_one)
factor_two <- ordered_out_mx[(i+1),1]
index_two <- which(colnames(ssadda_ageOfOnset)==factor_two)
#ordered_out_mx[i,3] <- length(which(out_mx[,index_one]==i & out_mx[,index_two]==(i+1)))
ordered_out_mx[i,3] <- length(which(out_mx[,index_one] < out_mx[,index_two]))
}
#colnames(ordered_out_mx) <- c("","","")
return(ordered_out_mx)
}
#Run AO_order_fun() to replicate ordered events
ALL_ordered_events <- AO_order_fun(ssadda_in = ssadda)
AA_ordered_events <- AO_order_fun(ssadda_in = ssadda_AA)
EA_ordered_events <- AO_order_fun(ssadda_in = ssadda_EA)
##Reformat
ALL_ordered_events_v2 <- cbind(ALL_ordered_events[-135,1],ALL_ordered_events[-1,1],ALL_ordered_events[-135,2],
ALL_ordered_events[-135,3],paste0(ALL_ordered_events[-135,1],ALL_ordered_events[-1,1]))
AA_ordered_events_v2 <- cbind(AA_ordered_events[-135,1],AA_ordered_events[-1,1],AA_ordered_events[-135,2],AA_ordered_events[-135,3],
paste0(AA_ordered_events[-135,1],AA_ordered_events[-1,1]))
EA_ordered_events_v2 <- cbind(EA_ordered_events[-135,1],EA_ordered_events[-1,1],EA_ordered_events[-135,2],EA_ordered_events[-135,3],
paste0(EA_ordered_events[-135,1],EA_ordered_events[-1,1]))
##
ALL_rep_ordered_events <- AO_order_fun(ssadda_in = ssadda_rep)
AA_rep_ordered_events <- AO_order_fun(ssadda_in = ssadda_rep_AA)
EA_rep_ordered_events <- AO_order_fun(ssadda_in = ssadda_rep_EA)
##Reformat
ALL_rep_ordered_events_v2 <- cbind(ALL_rep_ordered_events[-135,1],ALL_rep_ordered_events[-1,1],ALL_rep_ordered_events[-135,2],ALL_rep_ordered_events[-135,3],
paste0(ALL_rep_ordered_events[-135,1],ALL_rep_ordered_events[-1,1]))
AA_rep_ordered_events_v2 <- cbind(AA_rep_ordered_events[-135,1],AA_rep_ordered_events[-1,1],AA_rep_ordered_events[-135,2],AA_rep_ordered_events[-135,3],
paste0(AA_rep_ordered_events[-135,1],AA_rep_ordered_events[-1,1]))
EA_rep_ordered_events_v2 <- cbind(EA_rep_ordered_events[-135,1],EA_rep_ordered_events[-1,1],EA_rep_ordered_events[-135,2],EA_rep_ordered_events[-135,3],
paste0(EA_rep_ordered_events[-135,1],EA_rep_ordered_events[-1,1]))
###
#FIND THE OVERLP
##Allow for "skipping" when calculating the overlap
##NON-NETWORK SOLUTION
AA_ordered_events_v2[,1]
AA_rep_ordered_events_v2[,1]
EA_ordered_events_v2[,1]
EA_rep_ordered_events_v2[,1]
#Check if any of the paths (A->B) are replicated in all 4 datasets, allowing for "skipping"
replicated_pairs <- array(NA,dim=c(300,4))
#ONLY for the first (out of 4) loopings
counter <- 0
for(k in 1:3) {
tryCatch({
for(i in 1:133) {
for(j in (i+1)) {
if(k==1){
current_pair <- c(AA_ordered_events_v2[i,1],AA_ordered_events_v2[j,1])
current_ordered_events <- AA_ordered_events_v2
thresh <- nrow(ssadda_AA)*0.2
}
else if(k==2) {
current_pair <- c(AA_rep_ordered_events_v2[i,1],AA_rep_ordered_events_v2[j,1])
current_ordered_events <- AA_rep_ordered_events_v2
thresh <- nrow(ssadda_rep_AA)*0.2
}
else if(k==3) {
current_pair <- c(EA_ordered_events_v2[i,1],EA_ordered_events_v2[j,1])
current_ordered_events <- EA_ordered_events_v2
thresh <- nrow(ssadda_EA)*0.2
}
else if(k==4) {
current_pair <- c(EA_rep_ordered_events_v2[i,1],EA_rep_ordered_events_v2[j,1])
current_ordered_events <- EA_rep_ordered_events_v2
thresh <- nrow(ssadda_rep_EA)*0.2
}
current_ordered_events <- current_ordered_events[which(as.numeric(as.character(current_ordered_events[,4]))>=thresh),]
if(length(which(AA_ordered_events_v2[,1]==current_pair[1])) +
length(which(AA_rep_ordered_events_v2[,1]==current_pair[1])) +
length(which(EA_ordered_events_v2[,1]==current_pair[1])) +
length(which(EA_rep_ordered_events_v2[,1]==current_pair[1])) +
length(which(AA_ordered_events_v2[,1]==current_pair[2])) +
length(which(AA_rep_ordered_events_v2[,1]==current_pair[2])) +
length(which(EA_ordered_events_v2[,1]==current_pair[2])) +
length(which(EA_rep_ordered_events_v2[,1]==current_pair[2])) == 8) {
aa_status <- (which(AA_ordered_events_v2[,1]==current_pair[1]) <
which(AA_ordered_events_v2[,1]==current_pair[2]))
aa_rep_status <- (which(AA_rep_ordered_events_v2[,1]==current_pair[1]) <
which(AA_rep_ordered_events_v2[,1]==current_pair[2]))
ea_status <- (which(EA_ordered_events[,1]==current_pair[1]) <
which(EA_ordered_events[,1]==current_pair[2]))
ea_rep_status <- (which(EA_ordered_events_v2[,1]==current_pair[1]) <
which(EA_ordered_events_v2[,1]==current_pair[2]))
if(aa_status==T & aa_rep_status==T & ea_status == T & ea_rep_status == T){
print(c(i,j))
counter <- counter+1
replicated_pairs[counter,1] <- current_ordered_events[i,1]
replicated_pairs[counter,2] <- current_ordered_events[j,1]
replicated_pairs[counter,3] <- which((colnames(ssadda_ageOfOnset))==current_ordered_events[i,1])
replicated_pairs[counter,4] <- which((colnames(ssadda_ageOfOnset))==current_ordered_events[j,1])
}
}
}
}
}, error=function(e){})
}
replicated_pairs_vClean <- (cbind(replicated_pairs[,1],replicated_pairs[,2],as.numeric(replicated_pairs[,3]),as.numeric(replicated_pairs[,4])))
write.csv(replicated_pairs,"Final&clean/replicated_pairs_FINAL_dec9.csv")
#########################################################################################
# #
# GRAPH-BASED SOLUTIONS #
# #
#########################################################################################
#Need to use Graph Theory.
##Build adjacency matrix with info on each network
require(igraph)
require(graph)
require(networkD3)
AA_adj_mx <- matrix(0,nr=135,nc=135)
#colnames(ssadda_ageOfOnset)
combined_mx_num <- cbind(match(AA_ordered_events_v2[,1],colnames(ssadda_ageOfOnset)),match(AA_ordered_events_v2[,2],colnames(ssadda_ageOfOnset)))
combined_mx_num_ALL <- cbind(c(match(AA_ordered_events_v2[,1],colnames(ssadda_ageOfOnset)),match(AA_rep_ordered_events_v2[,1],colnames(ssadda_ageOfOnset)),
match(EA_ordered_events_v2[,1],colnames(ssadda_ageOfOnset)),match(EA_rep_ordered_events_v2[,1],colnames(ssadda_ageOfOnset))),
c(match(AA_ordered_events_v2[,2],colnames(ssadda_ageOfOnset)),match(AA_rep_ordered_events_v2[,2],colnames(ssadda_ageOfOnset)),
match(EA_ordered_events_v2[,2],colnames(ssadda_ageOfOnset)),match(EA_rep_ordered_events_v2[,2],colnames(ssadda_ageOfOnset)))
)
for(i in 1:nrow(combined_mx_num_ALL)) {
#(combined_mx_num[,1]==i & combined_mx_num[,2]==j)
AA_adj_mx[combined_mx_num_ALL[i,1],combined_mx_num_ALL[i,2]] <- 1
}
#Plot graph of the adacency matrix
#adj.mat <- matrix(sample(c(0,1), 9, replace=TRUE), nr=3)
g <- graph.adjacency(AA_adj_mx)
plot(g)
#BEST SOLUTION
network_data <- data.frame(combined_mx_num)
network_data_AA <- data.frame(cbind(AA_ordered_events_v2[,1],AA_ordered_events_v2[,2]))
network_data_EA <- data.frame(cbind(EA_ordered_events_v2[,1],EA_ordered_events_v2[,2]))
network_data_AA_combined <- data.frame(rbind(cbind(AA_ordered_events_v2[,1],AA_ordered_events_v2[,2]),cbind(AA_rep_ordered_events_v2[,1],AA_rep_ordered_events_v2[,2])))
simpleNetwork(network_data_AA,fontSize = 10)
simpleNetwork(network_data_EA,fontSize = 10)
simpleNetwork(network_data_AA_combined,fontSize = 10)
#More complex networks
##AA
AA_network_data_forced <- data.frame(rbind(cbind(AA_ordered_events_v2[,1],AA_ordered_events_v2[,2],AA_ordered_events_v2[,4]),
cbind(AA_rep_ordered_events_v2[,1],AA_rep_ordered_events_v2[,2],AA_rep_ordered_events_v2[,4])))
AA_links_data <- data.frame(cbind(match(AA_network_data_forced$X1,colnames(ssadda_ageOfOnset)),
match(AA_network_data_forced$X2,colnames(ssadda_ageOfOnset)),
AA_network_data_forced$X3))
colnames(AA_links_data) <- c("source","target","value")
#RUN ONLY ONCE:
#AA_links_data$source <- AA_links_data$source-1
#AA_links_data$target <- AA_links_data$target-1
AA_nodes_data <- data.frame(cbind(colnames(ssadda_ageOfOnset),c(rep(5,81),rep(2,54)),rep(20,135)))
colnames(AA_nodes_data) <- c("name","group","size")
##AA
forceNetwork(Links = AA_links_data,Nodes = AA_nodes_data,Source = "source",Target = "target",Value = "value",NodeID = "name",Group = "group",Nodesize = "size",
fontSize = 20,charge = -400,zoom=T)
##EA
EA_network_data_forced <- data.frame(rbind(cbind(EA_ordered_events_v2[,1],EA_ordered_events_v2[,2],EA_ordered_events_v2[,4]),
cbind(EA_rep_ordered_events_v2[,1],EA_rep_ordered_events_v2[,2],EA_rep_ordered_events_v2[,4])))
EA_links_data <- data.frame(cbind(match(EA_network_data_forced$X1,colnames(ssadda_ageOfOnset)),
match(EA_network_data_forced$X2,colnames(ssadda_ageOfOnset)),
EA_network_data_forced$X3))
colnames(EA_links_data) <- c("source","target","value")
#RUN ONLY ONCE:
#EA_links_data$source <- EA_links_data$source-1
#EA_links_data$target <- EA_links_data$target-1
EA_nodes_data <- data.frame(cbind(colnames(ssadda_ageOfOnset),c(rep(5,81),rep(2,54)),rep(20,135)))
colnames(EA_nodes_data) <- c("name","group","size")
##EA
forceNetwork(Links = EA_links_data,Nodes = EA_nodes_data,Source = "source",Target = "target",Value = "value",NodeID = "name",Group = "group",Nodesize = "size",
fontSize = 20,charge = -400,zoom=T)
##AA+EA
BOTH_network_data_forced <- data.frame(rbind(cbind(EA_ordered_events_v2[,1],EA_ordered_events_v2[,2],EA_ordered_events_v2[,4]),
cbind(EA_rep_ordered_events_v2[,1],EA_rep_ordered_events_v2[,2],EA_rep_ordered_events_v2[,4]),
cbind(AA_ordered_events_v2[,1],AA_ordered_events_v2[,2],AA_ordered_events_v2[,4]),
cbind(AA_rep_ordered_events_v2[,1],AA_rep_ordered_events_v2[,2],AA_rep_ordered_events_v2[,4])))
BOTH_links_data <- data.frame(cbind(match(BOTH_network_data_forced$X1,colnames(ssadda_ageOfOnset)),
match(BOTH_network_data_forced$X2,colnames(ssadda_ageOfOnset)),
BOTH_network_data_forced$X3))
colnames(BOTH_links_data) <- c("target","source","value")
#RUN ONLY ONCE:
#BOTH_links_data$source <- BOTH_links_data$source-1
#BOTH_links_data$target <- BOTH_links_data$target-1
BOTH_nodes_data <- data.frame(cbind(colnames(ssadda_ageOfOnset),c(rep(5,81),rep(2,54)),rep(4,135)))
colnames(BOTH_nodes_data) <- c("name","group","size")
##BOTH
forceNetwork(Links = BOTH_links_data,Nodes = BOTH_nodes_data,Source = "source",Target = "target",Value = "value",NodeID = "name",Group = "group",Nodesize = "size",
fontSize = 40,charge = -1000,zoom=T)
#TEST NETWORK
BOTH_links_data_tmp <- BOTH_links_data
BOTH_nodes_data_tmp <- BOTH_nodes_data
BOTH_nodes_data_tmp$size <- as.numeric(as.character(BOTH_nodes_data_tmp$size))+11
#IMPORTANT!!! Revert node IDs to +1 (not zero indexed)
##RUN ONLY ONCE!
#BOTH_links_data_tmp$target <- BOTH_links_data_tmp$target+1
#BOTH_links_data_tmp$source <- BOTH_links_data_tmp$source+1
replicated_nodes_clean <- unique(cbind(na.omit(as.numeric(replicated_pairs[,3])),na.omit(as.numeric(replicated_pairs[,4]))))
replicated_nodes_clean_v2 <- cbind(replicated_nodes_clean,rep(100,123))
replicated_nodes_clean_v2 <- as.data.frame(replicated_nodes_clean_v2)
colnames(replicated_nodes_clean_v2) <- colnames(BOTH_links_data_tmp)
#Zero Index the Links df
#RUN ONCE ONLY
replicated_nodes_clean_v2$target <- replicated_nodes_clean_v2$target-1
replicated_nodes_clean_v2$source <- replicated_nodes_clean_v2$source-1
#
#for(i in 1:123){
# indeks <- which(BOTH_links_data_tmp[,1]==replicated_nodes_clean[i,1] & BOTH_links_data_tmp[,2]==replicated_nodes_clean[i,2])
# BOTH_links_data_tmp[indeks,3] <- 100
#}
#BOTH_links_data_tmp[which(BOTH_links_data_tmp[,3]!=100),3] <- rep(0,184)
forceNetwork(Links = replicated_nodes_clean_v2,Nodes = BOTH_nodes_data_tmp,Source = "source",Target = "target",Value = "value",NodeID = "name",Group = "group",Nodesize = "size",
fontSize = 40,charge = -200,zoom=T)
sankeyNetwork(Links = replicated_nodes_clean_v2,Nodes = BOTH_nodes_data_tmp,Source = "source",Target = "target",Value = "value",NodeID = "name",fontSize = 40)
###iGraph version of the D3 graph
all_adj_mx <- array(0,dim=c(122,122))
colnames(all_adj_mx) <- unique(c(replicated_nodes_clean[,1],replicated_nodes_clean[,2]))
rownames(all_adj_mx) <- unique(c(replicated_nodes_clean[,1],replicated_nodes_clean[,2]))
#Populate adjacency matrix
for(i in 1:123) {
row_n <- which(rownames(all_adj_mx)==replicated_nodes_clean[i,1])
col_n <- which(colnames(all_adj_mx)==replicated_nodes_clean[i,2])
all_adj_mx[row_n,col_n] <- 1
}
#Cleaning-up
ssadda_ao_replicated <- ssadda_ageOfOnset[,as.numeric(as.character(unique(colnames(all_adj_mx))))]
#Re-name columns and rows of adj matrix according to actual SSADDA header name
colnames(all_adj_mx) <- colnames(ssadda_ageOfOnset)[as.numeric(as.character(colnames(all_adj_mx)))]
rownames(all_adj_mx) <- colnames(ssadda_ageOfOnset)[as.numeric(as.character(rownames(all_adj_mx)))]
#CLEAN-UP the age of onset mx
for(i in 1:ncol(ssadda_ao_replicated)){p <- which(ssadda_ao_replicated[,i]>80 | ssadda_ao_replicated[,i]<1); ssadda_ao_replicated[p,i] <- "NA"}
avg_age_arr <- array(NA,dim=c(ncol(ssadda_ao_replicated),2))
avg_age_arr[,1] <- colnames(ssadda_ao_replicated)
for(i in 1:ncol(ssadda_ao_replicated)) {
avg_val <- mean(na.omit(as.numeric(as.character(ssadda_ao_replicated[,i]))))
avg_age_arr[i,2] <- avg_val
}
#Order chronologically the age of onset events
avg_age_arr <- avg_age_arr[order(as.numeric(as.character(avg_age_arr[,2]))),]
##
#Remove E connecting V that are in the wrong order (according to avg_age_arr[,2])
#Working version Below!
##IMPORTANT: BEFORE RUNNING CODE BELOW, colnames(all-adj_mx) MUST BE FULL CHARACTER_TYPE NAMES
for(i in 1:ncol(all_adj_mx)) {
running_node <- colnames(all_adj_mx)[i]
next_node <- colnames(all_adj_mx)[(which(all_adj_mx[i,]==1))]
origin_node <- colnames(all_adj_mx)[(which(all_adj_mx[,i]==1))]
#Running node is the origin
if(length(next_node)!=0){
#print(paste0("From: ",running_node," To: ",next_node,";"))
for(j in 1:length(next_node)) {
if((which(avg_age_arr[,1]==toString(next_node[j]))) < (which(avg_age_arr[,1]==toString(running_node)))){
all_adj_mx[running_node,next_node] <- 0
}
}
}
else {}
#Running node is the destination
if(length(origin_node)!=0){
#print(paste0("From: ",origin_node," To: ",running_node,";"))
for(k in 1:length(origin_node)){
if((which(avg_age_arr[,1]==toString(origin_node[k]))) > (which(avg_age_arr[,1]==toString(running_node)))){
all_adj_mx[running_node,next_node] <- 0
}
}
}
else {}
}
View(all_adj_mx)
###
#Add an extra column for the order in which the vertices occur (average age of onset)
renaming_mx <- cbind(colnames(all_adj_mx),match(colnames(all_adj_mx),avg_age_arr[,1]))
gsub("AgeO","",paste0(renaming_mx[,1],"_",renaming_mx[,2]))
colnames(all_adj_mx) <- gsub("AgeO","",paste0(renaming_mx[,1],"_",renaming_mx[,2]))
rownames(all_adj_mx) <- gsub("AgeO","",paste0(renaming_mx[,1],"_",renaming_mx[,2]))
#unique(c(replicated_nodes_clean[,1],replicated_nodes_clean[,2]))
#rownames(all_adj_mx) <- unique(c(replicated_nodes_clean[,1],replicated_nodes_clean[,2]))
g <- graph.adjacency(all_adj_mx)
g <- graph.adjacency(ALL_ADJ)
plot(g,layout=layout_components)
tkplot(g)
tkigraph()
############################END OF D3 NETWORKS##############################
#########################GGPLOT2 Networks(requires all_adj_mx from above)###################################
install.packages("network","sna")
install.packages("ggnetwork")
install.packages("ggrepel")
library(GGally)
library(network)
library(sna)
library(ggplot2)
library(intergraph)
library(ggnetwork)
#Random network
net = rgraph(10, mode = "graph", tprob = 0.5)
net = network(net, directed = FALSE)
network.vertex.names(net) = letters[1:10]
ggnet2(net,mode="circle")
#Node colors
net %v% "phono" = ifelse(letters[1:10] %in% c("a", "e", "i"), "vowel", "consonant")
ggnet2(net, color = "phono",mode="circle",size="degree",label=1:10,directed=T) +
theme(panel.background = element_rect(fill = "grey90"))
#Layout vriable in ggnet2 will accept 2 columns with coordinates data. It'll have as many rows as the number of nodes(i.e.122).
mynet = network(all_adj_mx,directed=T)
mycoords <- as.data.frame(cbind(as.numeric(avg_age_arr[,2]),rep(1,122)))
ggnet2(mynet,layout.par = mycoords,arrow.size = 12,arrow.gap = 0.01,edge.size = 1,edge.color = "black",label = 1:123,label.size = 12,label.color = "black")
########################End of GGPLOT2 Networks##############################
##################################ALLUVIAL NETWORK##################################
require(alluvial)
#Use ssadda_ageOfOnset
alluvial_arr <- array(NA,dim=c(13000,135))
indeks_arr <- (replicated_nodes_clean_v2$target+1)
out_arr <- array(NA,dim=c(1,2))
for(i in 1:122) {
arr <- which(ssadda_ageOfOnset[,indeks_arr[i]] <= ssadda_ageOfOnset[,indeks_arr[i+1]])
slice_arr <- cbind(rep(i,length(arr)),arr)
out_arr <- rbind(slice_arr,out_arr)
#plot(x = slice_arr[,1],y = slice_arr[,2])
#out_arr <- intersect(arr,out_arr)
#print(length(out_arr))
#which(ssadda_ageOfOnset[,indeks_arr[i+1]] < ssadda_ageOfOnset[,indeks_arr[i+2]])
#which(ssadda_ageOfOnset[,indeks_arr[i+2]] < ssadda_ageOfOnset[,indeks_arr[i+3]])
}
##############################END OF ALLUVIAL NETWORK###############################
#########################################################################################
# #
# PAIRWISE CORRELATIONS #
# #
#########################################################################################
###Simple merge
merge(AA_ordered_events_v2,AA_rep_ordered_events_v2,by="V5")
merge(ALL_ordered_events_v2,ALL_rep_ordered_events_v2,by="V5")
#Write into CSV files
write.csv(ALL_ordered_events,"ALL_ordered_events.csv",row.names = F)
write.csv(AA_ordered_events,"AA_ordered_events.csv",row.names = F)
write.csv(EA_ordered_events,"EA_ordered_events.csv",row.names = F)
#Define Function that takes ssadda file and does pairwise correlations for all Age of Onset phenotypes
pairwise_corr_fun <- function(ssadda_input=ssadda,pairwise_cor_output="Pairwise_summary",direction="d2p") {
#Force correct variable type
pairwise_cor_output <- as.character(pairwise_cor_output)
direction <- as.character(direction)
#Let's pick up all "Age of Onset" columns
ssadda_ageOfOnset <- ssadda_input[,grep("AgeO$",colnames(ssadda_input))]
ageO_n <- ncol(ssadda_ageOfOnset)
#colnames(ssadda_ageOfOnset)
#Run pairwise correlations (around 31K of them)
summary_array <- array(NA,dim=c(choose(ageO_n,2),7))
#Last column header with drug information
lim_col_n <- max(as.numeric(grep("H2",colnames(ssadda_ageOfOnset),ignore.case = F)))
#Nested loop for pairwise correlations
k <- 0
for(i in 1:(ageO_n-1)) {
for(j in (i+1):ageO_n) {
#Start incrementing index
k <- k+1
### ==> Here insert code to determine if the Age of onset (AO) of column i is < AO(j) <==
### calculate correlaiton test for those rows separately for AO(i) > AO(j)
if(direction=="both") {
#Find row indeces of missing or non-age data
arr_1 <- which(is.na(ssadda_ageOfOnset[,i]) | ssadda_ageOfOnset[,i]==0 | ssadda_ageOfOnset[,i] >80)
arr_2 <- which(is.na(ssadda_ageOfOnset[,j]) | ssadda_ageOfOnset[,j]==0 | ssadda_ageOfOnset[,j] >80)
arr_all <- unique(c(arr_1,arr_2))
missing_per <- ((length(arr_all)/nrow(ssadda_ageOfOnset))*100)
}
else if(direction=="d2p") {
#Find row indeces of data with AO(i) < AO(j) or (!AO(i) >= AO(j))
not_d2p <- which(ssadda_ageOfOnset[,i]>=ssadda_ageOfOnset[,j])
arr_1 <- which(is.na(ssadda_ageOfOnset[,i]) | ssadda_ageOfOnset[,i]==0 | ssadda_ageOfOnset[,i] >80)
arr_2 <- which(is.na(ssadda_ageOfOnset[,j]) | ssadda_ageOfOnset[,j]==0 | ssadda_ageOfOnset[,j] >80)
arr_all <- unique(c(arr_1,arr_2,not_d2p))
missing_per <- ((length(arr_all)/nrow(ssadda_ageOfOnset))*100)
}
else if(direction=="p2d") {
#Find row indeces of data with AO(i) > AO(j) or (!AO(i) <= AO(j))
not_p2d <- which(ssadda_ageOfOnset[,i]<=ssadda_ageOfOnset[,j])
arr_1 <- which(is.na(ssadda_ageOfOnset[,i]) | ssadda_ageOfOnset[,i]==0 | ssadda_ageOfOnset[,i] >80)
arr_2 <- which(is.na(ssadda_ageOfOnset[,j]) | ssadda_ageOfOnset[,j]==0 | ssadda_ageOfOnset[,j] >80)
arr_all <- unique(c(arr_1,arr_2,not_p2d))
missing_per <- ((length(arr_all)/nrow(ssadda_ageOfOnset))*100)
}
else{
print("Unknown value for direction variable")
}
if(missing_per>=99) next
test_sum <- cor.test(ssadda_ageOfOnset[-c(arr_all),i],ssadda_ageOfOnset[-c(arr_all),j])
summary_array[k,1] <- colnames(ssadda_ageOfOnset)[i]
summary_array[k,2] <- colnames(ssadda_ageOfOnset)[j]
summary_array[k,3] <- test_sum$estimate
summary_array[k,4] <- test_sum$p.value
summary_array[k,5] <- 100-missing_per
summary_array[k,6] <- ifelse(i<=lim_col_n,c("Drugs"),c("Other"))
summary_array[k,7] <- ifelse(j>lim_col_n,c("Psych"),c("Other"))
}
per_comp <- round((k/choose(ageO_n,2)*100),3)
print(paste0(per_comp," %"))
}
#Label summary array columns
colnames(summary_array) <- c("Age_of_onset_1","Age_of_onset_2","Correlation_coefficient","Correlation_Pvalue","Available_data_%","Drugs_Age_of_onset","Psych_Age_of_onset")
write.csv(summary_array,paste0(pairwise_cor_output,".csv"))
}
####End of Function
#Now run the function 'pairwise_corr_fun' on any ssadda files (with both)!
pairwise_corr_fun(ssadda_input = ssadda,pairwise_cor_output = "Pairwise_summary",direction="both")
pairwise_corr_fun(ssadda_input = ssadda_EA,pairwise_cor_output = "Pairwise_summary_EA",direction="both")
pairwise_corr_fun(ssadda_input = ssadda_AA,pairwise_cor_output = "Pairwise_summary_AA",direction="both")
pairwise_corr_fun(ssadda_input = ssadda_rep,pairwise_cor_output = "Pairwise_summary_rep",direction="both")
pairwise_corr_fun(ssadda_input = ssadda_rep_EA,pairwise_cor_output = "Pairwise_summary_rep_EA",direction="both")
pairwise_corr_fun(ssadda_input = ssadda_rep_AA,pairwise_cor_output = "Pairwise_summary_rep_AA",direction="both")
#D2P
pairwise_corr_fun(ssadda_input = ssadda,pairwise_cor_output = "d2p_Pairwise_summary",direction="d2p")
pairwise_corr_fun(ssadda_input = ssadda_EA,pairwise_cor_output = "d2p_Pairwise_summary_EA",direction="d2p")
pairwise_corr_fun(ssadda_input = ssadda_AA,pairwise_cor_output = "d2p_Pairwise_summary_AA",direction="d2p")
pairwise_corr_fun(ssadda_input = ssadda_rep,pairwise_cor_output = "d2p_Pairwise_summary_rep",direction="d2p")
pairwise_corr_fun(ssadda_input = ssadda_rep_EA,pairwise_cor_output = "d2p_Pairwise_summary_rep_EA",direction="d2p")
pairwise_corr_fun(ssadda_input = ssadda_rep_AA,pairwise_cor_output = "d2p_Pairwise_summary_rep_AA",direction="d2p")
#P2D
pairwise_corr_fun(ssadda_input = ssadda,pairwise_cor_output = "p2d_Pairwise_summary",direction="p2d")
pairwise_corr_fun(ssadda_input = ssadda_EA,pairwise_cor_output = "p2d_Pairwise_summary_EA",direction="p2d")
pairwise_corr_fun(ssadda_input = ssadda_AA,pairwise_cor_output = "p2d_Pairwise_summary_AA",direction="p2d")
pairwise_corr_fun(ssadda_input = ssadda_rep,pairwise_cor_output = "p2d_Pairwise_summary_rep",direction="p2d")
pairwise_corr_fun(ssadda_input = ssadda_rep_EA,pairwise_cor_output = "p2d_Pairwise_summary_rep_EA",direction="p2d")
pairwise_corr_fun(ssadda_input = ssadda_rep_AA,pairwise_cor_output = "p2d_Pairwise_summary_rep_AA",direction="p2d")
#########################################################################################
# #
# PLOTS #
# #
#########################################################################################
par(mfrow=c(2,2))
boxplot(as.numeric(ssadda_AA[which(ssadda_AA$aspd==1),]$F2_CocUseAgeO),as.numeric(ssadda_AA[which(ssadda_AA$aspd==2),]$F2_CocUseAgeO),ylab="Coc age of onset", xlab="ASPD(-) and ASPD(+)",main="AA discovery")
boxplot(as.numeric(ssadda_rep_AA[which(ssadda_rep_AA$aspd==1),]$F2_CocUseAgeO),as.numeric(ssadda_rep_AA[which(ssadda_rep_AA$aspd==2),]$F2_CocUseAgeO),ylab="Coc age of onset", xlab="ASPD(-) and ASPD(+)",main="AA replication")
boxplot(as.numeric(ssadda_EA[which(ssadda_EA$aspd==1),]$F2_CocUseAgeO),as.numeric(ssadda_EA[which(ssadda_EA$aspd==2),]$F2_CocUseAgeO),ylab="Coc age of onset", xlab="ASPD(-) and ASPD(+)",main="EA discovery")
boxplot(as.numeric(ssadda_rep_EA[which(ssadda_rep_EA$aspd==1),]$F2_CocUseAgeO),as.numeric(ssadda_rep_EA[which(ssadda_rep_EA$aspd==2),]$F2_CocUseAgeO),ylab="Coc age of onset", xlab="ASPD(-) and ASPD(+)",main="EA replication")
#Gap in age of onsets
hist(as.numeric(as.character(ssadda_AA$I11C_1_TkeAdvAgeO)) - as.numeric(as.character(ssadda_AA$D4D_CigAgeO)))
par(mfrow=c(2,2))
barplot(table(as.numeric(as.character(ssadda_AA$I11C_1_TkeAdvAgeO)) - as.numeric(as.character(ssadda_AA$D4D_CigAgeO))),ylab="Samples",xlab="AA age gap",main="I11C_1_TkeAdv -> D4D_CigAgeO")
barplot(table(as.numeric(as.character(ssadda_rep_AA$I11C_1_TkeAdvAgeO)) - as.numeric(as.character(ssadda_rep_AA$D4D_CigAgeO))),ylab="Samples",xlab="AA(rep) age gap")
barplot(table(as.numeric(as.character(ssadda_EA$I11C_1_TkeAdvAgeO)) - as.numeric(as.character(ssadda_EA$D4D_CigAgeO))),ylab="Samples",xlab="EA age gap")
barplot(table(as.numeric(as.character(ssadda_rep_EA$I11C_1_TkeAdvAgeO)) - as.numeric(as.character(ssadda_rep_EA$D4D_CigAgeO))),ylab="Samples",xlab="EA(rep) age gap")
par(mfrow=c(2,2))
barplot(table(as.numeric(as.character(ssadda_AA$I42A_1_DebtAgeO)) - as.numeric(as.character(ssadda_AA$F4B_CocHghDyAgeO))),ylab="Samples",xlab="AA age gap",main="I42A_1_Debt -> F4B_CocHghDy")
barplot(table(as.numeric(as.character(ssadda_rep_AA$I42A_1_DebtAgeO)) - as.numeric(as.character(ssadda_rep_AA$F4B_CocHghDyAgeO))),ylab="Samples",xlab="AA(rep) age gap")
barplot(table(as.numeric(as.character(ssadda_EA$I42A_1_DebtAgeO)) - as.numeric(as.character(ssadda_EA$F4B_CocHghDyAgeO))),ylab="Samples",xlab="EA age gap")
barplot(table(as.numeric(as.character(ssadda_rep_EA$I42A_1_DebtAgeO)) - as.numeric(as.character(ssadda_rep_EA$F4B_CocHghDyAgeO))),ylab="Samples",xlab="EA(rep) age gap")
par(mfrow=c(2,2))
barplot(table(as.numeric(as.character(ssadda_AA$I18A_2_VandalAgeO)) - as.numeric(as.character(ssadda_AA$F20C_Exp2BxAgeO))),ylab="Samples",xlab="AA age gap",main="I18A_2_Vandal -> F20C_Exp2BxAgeO")
barplot(table(as.numeric(as.character(ssadda_rep_AA$I18A_2_VandalAgeO)) - as.numeric(as.character(ssadda_rep_AA$F20C_Exp2BxAgeO))),ylab="Samples",xlab="AA(rep) age gap")
barplot(table(as.numeric(as.character(ssadda_EA$I18A_2_VandalAgeO)) - as.numeric(as.character(ssadda_EA$F20C_Exp2BxAgeO))),ylab="Samples",xlab="EA age gap")
barplot(table(as.numeric(as.character(ssadda_rep_EA$I18A_2_VandalAgeO)) - as.numeric(as.character(ssadda_rep_EA$F20C_Exp2BxAgeO))),ylab="Samples",xlab="EA(rep) age gap")
#########################################################################################
# #
# STATS #
# #
#########################################################################################
#Find Odds ratios for each of 4 datasets in the Left -> Right direction
t <- table(ssadda_AA[which(as.numeric(as.character(ssadda_AA$I7A_ChlngAuthAgeO))<as.numeric(as.character(ssadda_AA$I1B_HookyAgeO))),]$aspd)
fisher.test(matrix(c(as.numeric(t[3]),(table(ssadda_AA$aspd)[3]-as.numeric(t[3])),as.numeric(t[2]),(table(ssadda_AA$aspd)[2]-as.numeric(t[2]))),nrow=2))
t <- table(ssadda_rep_AA[which(as.numeric(as.character(ssadda_rep_AA$I7A_ChlngAuthAgeO))<as.numeric(as.character(ssadda_rep_AA$I1B_HookyAgeO))),]$aspd)
fisher.test(matrix(c(as.numeric(t[3]),(table(ssadda_rep_AA$aspd)[3]-as.numeric(t[3])),as.numeric(t[2]),(table(ssadda_rep_AA$aspd)[2]-as.numeric(t[2]))),nrow=2))
t <- table(ssadda_EA[which(as.numeric(as.character(ssadda_EA$I7A_ChlngAuthAgeO))<as.numeric(as.character(ssadda_EA$I1B_HookyAgeO))),]$aspd)
fisher.test(matrix(c(as.numeric(t[3]),(table(ssadda_EA$aspd)[3]-as.numeric(t[3])),as.numeric(t[2]),(table(ssadda_EA$aspd)[2]-as.numeric(t[2]))),nrow=2))
t <- table(ssadda_rep_EA[which(as.numeric(as.character(ssadda_rep_EA$I7A_ChlngAuthAgeO))<as.numeric(as.character(ssadda_rep_EA$I1B_HookyAgeO))),]$aspd)
fisher.test(matrix(c(as.numeric(t[3]),(table(ssadda_rep_EA$aspd)[3]-as.numeric(t[3])),as.numeric(t[2]),(table(ssadda_rep_EA$aspd)[2]-as.numeric(t[2]))),nrow=2))
#Find Odds ratios for each of 4 datasets i nthe Left <- Right direction
t <- table(ssadda_AA[which(as.numeric(as.character(ssadda_AA$I7A_ChlngAuthAgeO))>as.numeric(as.character(ssadda_AA$I1B_HookyAgeO))),]$aspd)
fisher.test(matrix(c(as.numeric(t[3]),(table(ssadda_AA$aspd)[3]-as.numeric(t[3])),as.numeric(t[2]),(table(ssadda_AA$aspd)[2]-as.numeric(t[2]))),nrow=2))
t <- table(ssadda_rep_AA[which(as.numeric(as.character(ssadda_rep_AA$I7A_ChlngAuthAgeO))>as.numeric(as.character(ssadda_rep_AA$I1B_HookyAgeO))),]$aspd)
fisher.test(matrix(c(as.numeric(t[3]),(table(ssadda_rep_AA$aspd)[3]-as.numeric(t[3])),as.numeric(t[2]),(table(ssadda_rep_AA$aspd)[2]-as.numeric(t[2]))),nrow=2))
t <- table(ssadda_EA[which(as.numeric(as.character(ssadda_EA$I7A_ChlngAuthAgeO))>as.numeric(as.character(ssadda_EA$I1B_HookyAgeO))),]$aspd)
fisher.test(matrix(c(as.numeric(t[3]),(table(ssadda_EA$aspd)[3]-as.numeric(t[3])),as.numeric(t[2]),(table(ssadda_EA$aspd)[2]-as.numeric(t[2]))),nrow=2))
t <- table(ssadda_rep_EA[which(as.numeric(as.character(ssadda_rep_EA$I7A_ChlngAuthAgeO))>as.numeric(as.character(ssadda_rep_EA$I1B_HookyAgeO))),]$aspd)
fisher.test(matrix(c(as.numeric(t[3]),(table(ssadda_rep_EA$aspd)[3]-as.numeric(t[3])),as.numeric(t[2]),(table(ssadda_rep_EA$aspd)[2]-as.numeric(t[2]))),nrow=2))
###Log Rge for ASPD~age-of-onset
t <- na.omit(as.data.frame((cbind(as.character(ssadda_AA$I23C_1_IlglAgeO),as.character(ssadda_AA$aspd)))))
t <- t[which(t$V1!="UC"),]
t <- t[which(t$V2!=0),]
t <- cbind(t,as.numeric(as.character(t$V2))-rep(1,nrow(t)))
mod <- glm(t[,3]~as.numeric(as.character(t[,1])),family="binomial")
summary(mod)
exp(coef(mod))
#########################################################################################
# #
# EXTRA #
# #
#########################################################################################
#Count samples with and without progression
length(which(ssadda_AA$I7_ChlngAuth==5 & ssadda_AA$I1_Hooky == 1))
length(which(ssadda_rep_AA$I7_ChlngAuth==5 & ssadda_rep_AA$I1_Hooky == 1))
length(which(ssadda_EA$I7_ChlngAuth==5 & ssadda_EA$I1_Hooky == 1))
length(which(ssadda_rep_EA$I7_ChlngAuth==5 & ssadda_rep_EA$I1_Hooky == 1))
length(which(ssadda_AA$I2_Expell==5 & ssadda_AA$I1_Hooky == 1))
length(which(ssadda_rep_AA$I2_Expell==5 & ssadda_rep_AA$I1_Hooky == 1))
length(which(ssadda_EA$I2_Expell==5 & ssadda_EA$I1_Hooky == 1))
length(which(ssadda_rep_EA$I2_Expell==5 & ssadda_rep_EA$I1_Hooky == 1))
length(which(ssadda_AA$I7_ChlngAuth==5 & ssadda_AA$I4_StyOut == 1))
length(which(ssadda_rep_AA$I7_ChlngAuth==5 & ssadda_rep_AA$I4_StyOut == 1))
length(which(ssadda_EA$I7_ChlngAuth==5 & ssadda_EA$I4_StyOut == 1))
length(which(ssadda_rep_EA$I7_ChlngAuth==5 & ssadda_rep_EA$I4_StyOut == 1))
length(which(ssadda_AA$H10B_MJ2Prb==5 & ssadda_AA$F3_CocDaily == 1))
length(which(ssadda_rep_AA$H10B_MJ2Prb==5 & ssadda_rep_AA$F3_CocDaily == 1))
length(which(ssadda_EA$H10B_MJ2Prb==5 & ssadda_EA$F3_CocDaily == 1))
length(which(ssadda_rep_EA$H10B_MJ2Prb==5 & ssadda_rep_EA$F3_CocDaily == 1))
length(which(ssadda_AA$H10B_MJ2Prb==5 & ssadda_AA$F3_CocDaily == 1))
length(which(ssadda_rep_AA$H10B_MJ2Prb==5 & ssadda_rep_AA$F3_CocDaily == 1))
length(which(ssadda_EA$H10B_MJ2Prb==5 & ssadda_EA$F3_CocDaily == 1))
length(which(ssadda_rep_EA$H10B_MJ2Prb==5 & ssadda_rep_EA$F3_CocDaily == 1))
length(which(ssadda_AA$G1_OpiEver==5 & ssadda_AA$F1_CocEver == 1))
length(which(ssadda_rep_AA$G1_OpiEver==5 & ssadda_rep_AA$F1_CocEver == 1))
length(which(ssadda_EA$G1_OpiEver==5 & ssadda_EA$F1_CocEver == 1))
length(which(ssadda_rep_EA$G1_OpiEver==5 & ssadda_rep_EA$F1_CocEver == 1))
length(which(ssadda_AA$I18_Vandal!=1 & ssadda_AA$F20A_CocExp3 == 1))
length(which(ssadda_rep_AA$I18_Vandal!=1 & ssadda_rep_AA$F20A_CocExp3 == 1))
length(which(ssadda_EA$I18_Vandal!=1 & ssadda_EA$F20A_CocExp3 == 1))
length(which(ssadda_rep_EA$I18_Vandal!=1 & ssadda_rep_EA$F20A_CocExp3 == 1))
length(which(ssadda_AA$F20A_CocExp3==5 & ssadda_AA$E33_TrtmtProg == 1))
length(which(ssadda_rep_AA$F20A_CocExp3==5 & ssadda_rep_AA$E33_TrtmtProg == 1))
length(which(ssadda_EA$F20A_CocExp3==5 & ssadda_EA$E33_TrtmtProg == 1))
length(which(ssadda_rep_EA$F20A_CocExp3==5 & ssadda_rep_EA$E33_TrtmtProg == 1))
####
length(which(ssadda_AA$E26H_WDSymDrnk==5 & ssadda_AA$E26C_WDSym2 == 1))
length(which(ssadda_rep_AA$E26H_WDSymDrnk==5 & ssadda_rep_AA$E26C_WDSym2 == 1))
length(which(ssadda_EA$E26H_WDSymDrnk==5 & ssadda_EA$E26C_WDSym2 == 1))
length(which(ssadda_rep_EA$E26H_WDSymDrnk==5 & ssadda_rep_EA$E26C_WDSym2 == 1))
length(which(ssadda_AA$E32_SlfHlp==5 & ssadda_AA$E33_TrtmtProg == 1))
length(which(ssadda_rep_AA$E32_SlfHlp==5 & ssadda_rep_AA$E33_TrtmtProg == 1))
length(which(ssadda_EA$E32_SlfHlp==5 & ssadda_EA$E33_TrtmtProg == 1))
length(which(ssadda_rep_EA$E32_SlfHlp==5 & ssadda_rep_EA$E33_TrtmtProg == 1))
#I23C_1_IlglAgeO
length(which(ssadda_AA$I23_1_BadChk==5 & ssadda_AA$I23_2_StolnGood==5 & ssadda_AA$I23_3_PdSex==5 & ssadda_AA$I23_4_Pimp==5 & ssadda_AA$I28_TrfkViol != 5))
length(which(ssadda_rep_AA$I23_1_BadChk==5 & ssadda_rep_AA$I23_2_StolnGood==5 & ssadda_rep_AA$I23_3_PdSex==5 & ssadda_rep_AA$I23_4_Pimp==5 & ssadda_rep_AA$I28_TrfkViol != 5))
length(which(ssadda_EA$I23_1_BadChk==5 & ssadda_EA$I23_2_StolnGood==5 & ssadda_EA$I23_3_PdSex==5 & ssadda_EA$I23_4_Pimp==5 & ssadda_EA$I28_TrfkViol != 5))
length(which(ssadda_rep_EA$I23_1_BadChk==5 & ssadda_rep_EA$I23_2_StolnGood==5 & ssadda_rep_EA$I23_3_PdSex==5 & ssadda_rep_EA$I23_4_Pimp==5 & ssadda_rep_EA$I28_TrfkViol != 5))
length(which(ssadda_AA$F22_Exp3Bx==5 & ssadda_AA$F5_CocDes == 1))
length(which(ssadda_rep_AA$F22_Exp3Bx==5 & ssadda_rep_AA$F5_CocDes == 1))
length(which(ssadda_EA$F22_Exp3Bx==5 & ssadda_EA$F5_CocDes == 1))
length(which(ssadda_rep_EA$F22_Exp3Bx==5 & ssadda_rep_EA$F5_CocDes == 1))
length(which(ssadda_AA$F5_CocDes==5 & ssadda_AA$E31_AlcPro == 1))
length(which(ssadda_rep_AA$F5_CocDes==5 & ssadda_rep_AA$E31_AlcPro == 1))
length(which(ssadda_EA$F5_CocDes==5 & ssadda_EA$E31_AlcPro == 1))
length(which(ssadda_rep_EA$F5_CocDes==5 & ssadda_rep_EA$E31_AlcPro == 1))
#Run pairwise correlations and calculate p-values
require(corrplot)
cor.mtest <- function(mat, ...) {
mat <- as.matrix(mat)
n <- ncol(mat)
p.mat<- matrix(NA, n, n)
diag(p.mat) <- 0
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
tmp <- cor.test(mat[, i], mat[, j], ...)
p.mat[i, j] <- p.mat[j, i] <- tmp$p.value
}
}
colnames(p.mat) <- rownames(p.mat) <- colnames(mat)
p.mat
}
p_mat <- cor.mtest(ssadda_ageOfOnset)
p_mat[is.na(p_mat)] <- 0
cor_mat <- cor(ssadda_ageOfOnset)
corrplot(cor_mat,method = "square",outline = F,p.mat = p_mat,sig.level = 0.05/136,order = "original",insig = "blank",pch = ".",pch.cex = 1.5)
|
c110b1484012b5260bd0d390588da36d1fcd6e39
|
68f8aa9d06a5b60bb12a206c75ca903cdaa000ac
|
/file1.R
|
755fe8b6b4926e6739dac1f6a3d185ddd64e6d15
|
[] |
no_license
|
binayak91/analytics
|
f382fc3a83d9698def51ca476eec5e4249db2e82
|
b3fcf41e01b45ed5238001c38dbe6cce4286847c
|
refs/heads/master
| 2020-03-30T06:38:16.190691
| 2018-10-02T13:31:18
| 2018-10-02T13:31:18
| 150,876,632
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 104
|
r
|
file1.R
|
women
git config --global user.email "um18280@stu.ximb.ac.in"
git config --global user.name "binayak91"
|
d645aa12ccbccd2fb32aef522ac747b9d5e1b73d
|
b23983ee89e5b116c99f766d6f09c1d9cfe9c55a
|
/workout03/BInomial/man/bin_variance.Rd
|
28ac1333adff58a92b1fb210238b2a867cedd16d
|
[] |
no_license
|
stat133-sp19/hw-stat133-dbian17
|
503ee167b1345b3baae87a5cf13b7ecb69fb8584
|
b8d058673b37c10fafb655cd734f39e417bd8a15
|
refs/heads/master
| 2020-04-28T08:59:04.197050
| 2019-05-04T06:05:17
| 2019-05-04T06:05:17
| 175,149,731
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 441
|
rd
|
bin_variance.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Binomial.R
\name{bin_variance}
\alias{bin_variance}
\title{bin_variance}
\usage{
bin_variance(trials, prob)
}
\arguments{
\item{n}{number of trials}
}
\value{
the variance of the given binomial distribution with "trials "trials and probability "prob" of success
}
\description{
finds the variance of a binomial distribution with "trials" and probability "prob"
}
|
eb65a9afa41a90b77b7bb2aff9cbbaa28fb58f1e
|
f8050f1ad4950555990d4dd4552240ee86575b50
|
/R/autojags.R
|
2444a4c8173b5c80d6b3c98b9d2dd20956b5c093
|
[] |
no_license
|
SMandujanoR/jagsUI
|
aac45c6ae26f51ae46d701278aed8591b9f58843
|
f4838f60c1f6f4a358d4adacde51f8f080c32cf9
|
refs/heads/master
| 2021-03-06T01:27:21.806892
| 2020-03-09T20:35:15
| 2020-03-09T20:35:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,146
|
r
|
autojags.R
|
autojags <- function(data, inits=NULL, parameters.to.save, model.file,
n.chains, n.adapt=1000, iter.increment=1000, n.burnin=0,
n.thin=1, save.all.iter=FALSE, modules=c('glm'),
factories=NULL, parallel=FALSE, n.cores=NULL, DIC=TRUE,
no.stats=NULL, Rhat.limit=1.1, max.iter=100000, quiet=FALSE){
#Save overall start time
start.time <- Sys.time()
#Initial model run
params <- check_params(parameters.to.save, DIC)
cur_iter <- iter.increment + n.burnin
if(!quiet) cat('Burn-in + Update 1',' (',cur_iter,')\n',sep="")
out <- jags(data, inits, parameters.to.save, model.file, n.chains,
n.adapt, cur_iter, n.burnin, n.thin, modules, factories,
parallel, n.cores, DIC, no.stats=params, quiet=TRUE)
Rhat_fail <- test_Rhat(out$samples, Rhat.limit)
reach_max <- cur_iter >= max.iter
index <- 0
new_burnin = n.burnin
while(Rhat_fail$result & !reach_max){
index <- index + 1
new_burnin <- cur_iter
cur_iter <- cur_iter + iter.increment
if(!quiet) cat('Update ',index,' (',cur_iter,')\n',sep="")
if(save.all.iter) old_samples <- out$samples
out <- stats::update(out, n.adapt=n.adapt, n.iter=iter.increment,
no.stats = params, quiet=TRUE)
if(save.all.iter) out$samples <- comb_mcmc_list(old_samples, out$samples)
#Tests
Rhat_fail <- test_Rhat(out$samples, Rhat.limit)
reach_max <- cur_iter >= max.iter
}
if(!quiet & reach_max) cat('\nMaximum iterations reached.\n\n')
#Update MCMC info with final results
out$run.info$start.time <- start.time
out$run.info$end.time <- Sys.time()
out$mcmc.info$n.iter <- cur_iter
out$mcmc.info$n.burnin <- new_burnin
if(save.all.iter){
out$mcmc.info$n.burnin <- n.burnin
out$mcmc.info$n.draws <- nrow(out$samples[[1]]) * out$mcmc.info$n.chains
}
#Process output
stats <- process_output(out$samples, exclude_params=no.stats)
#Build jagsUI object
out[c("sims.list","pD","DIC","summary")] <- NULL
out <- c(stats, out)
class(out) <- 'jagsUI'
out
}
|
544738e9502ba135cf6f66a45298fb84b116a90d
|
b43fd480c7bc8d424a07db8faf5f4c164a48f8d6
|
/5th_Titanic_campaign/hw5_106356013.R
|
a6e29733f0aa4296c6a7b9a366146032bda7b00a
|
[] |
no_license
|
YuTaNCCU/2017_DataSciencePractice
|
ecf66749a4706056b6789b8519a4b6bf8167d784
|
082dc44a4de09256f0fdf228ee8da6f238323d62
|
refs/heads/master
| 2021-09-27T02:06:09.961489
| 2018-11-05T14:53:51
| 2018-11-05T14:53:51
| 104,413,903
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,038
|
r
|
hw5_106356013.R
|
#setwd("~/Desktop/hw5-YuTaNCCU")
#system("Rscript hw5_106356013.R -fold 5 –out performance.csv")
################
### 讀取指令 ###
print('(1/7)讀取指令')
################
args = commandArgs(trailingOnly=TRUE)
if (length(args)==0) {
stop("USAGE: Rscript hw4_106356013.R -fold 5 –out performance.csv", call.=FALSE)
}
i<-1
while(i < length(args)){
if(args[i] == "-fold"){
fold<-as.numeric(args[i+1])
i<-i+1
}else if(args[i] == "–out"){
files<-args[i+1]
i<-i+1
}else{
stop(paste("Unknown flag", args[i]), call.=FALSE)
}
i<-i+1
}
################
### 讀取檔案 ###
print('(2/7)讀取檔案')
################
library(dplyr)
d <- read.csv("Titanic_Data/train.csv", header = T) %>%
select(Survived, Pclass, Sex, Age, SibSp, Parch,Fare )
d$Age = ifelse(is.na(d$Age),
ave(d$Age, FUN = function(x) mean(x, na.rm = TRUE)),
d$Age)
d$Sex = ifelse(d$Sex == 'male', 1, 0)
d$Age = scale(d$Age) # Feature Scaling
d$Fare = scale(d$Fare) # Feature Scaling
test <- read.csv("Titanic_Data/test.csv", header = T) %>%
select(PassengerId, Pclass, Sex, Age, SibSp, Parch, Fare )
test$Age = ifelse(is.na(test$Age),
ave(test$Age, FUN = function(x) mean(x, na.rm = TRUE)),
test$Age)
test$Sex = ifelse(test$Sex == 'male', 1, 0)
test$Age = scale(test$Age) # Feature Scaling
test$Fare = scale(test$Fare) # Feature Scaling
test$Fare = ifelse(is.na(test$Fare),
ave(test$Fare, FUN = function(x) mean(x, na.rm = TRUE)),
test$Fare)
##################
### 分割資料 #####
print('(3/7)分割資料')
##################
#install.packages('caret')
require(caret)
#fold=5
#分割成K個區塊
flds <- createFolds( y=d$Survived, k = fold, list = TRUE, returnTrain = FALSE)
#呼叫函式以選擇‘要第幾組fold‘
nfcv<-function(i){
d_test<-d[flds[[i]],]
if(i+1<=fold){
d_calib<-d[flds[[i+1]],]
}else{
d_calib<-d[flds[[i+1-fold]],]
}
if(i+2<=fold){
d_train<-d[flds[[i+2]],]
}else{
d_train<-d[flds[[i+2-fold]],]
}
for (j in 1:(fold-3) ){
if(i+3<=fold){
d_train<-rbind(d_train,d[flds[[i+3]],])
}else{
d_train<-rbind(d_train,d[flds[[i+3-fold]],])
}
}
return( list(d_test,d_calib,d_train) )
}
ModelFit <- function(fold_i){
}
#################
### 跑n-fold ###
print('(4/7)跑n-fold')
#################
trainningAccuracy <-c()
calibrationAccuracy <-c()
testAccuracy <-c()
for (fold_i in 1:fold ){
data<-nfcv(fold_i)
d_test<-data[[1]]
d_calib<-data[[2]]
d_train<-data[[3]]
########################
### 使用不同k值的KNN ###
if(fold_i==1)print('(5/7)使用不同k值的KNN')
########################
ValidationValue<-c()
ValidationValue<-c()
for(i_knn in 1:10){
#train
library(class)
y_pred = knn(train = d_train[,2:7],
test = d_train[,2:7],
cl = d_train[, 1],
k = i_knn,
prob = TRUE)
cm = table(d_train[, 1], y_pred)
trainningAccuracy <- c(trainningAccuracy, (cm[1,1]+cm[2,2])/sum(cm) )
#validate
y_pred = knn(train = d_train[,2:7],
test = d_calib[,2:7],
cl = d_train[, 1],
k = i_knn)
cm = table(d_calib[, 1], y_pred)
calibrationAccuracy <- c(calibrationAccuracy, (cm[1,1]+cm[2,2])/sum(cm) )
ValidationValue <- c(ValidationValue, (cm[1,1]+cm[2,2])/sum(cm) )
}
########################
### 使用Ramdom forests ###
if(fold_i==1)print('(6/7)Ramdom forests')
########################
#train
library(randomForest)
set.seed(123)
fmodel <- randomForest(x=d_train[,2:7], y=d_train[,1], ntree=100, nodesize=7, importance=T)
y_pred <- ifelse(predict(fmodel, newdata=d_train[,2:7]) >0.5,1,0)
cm = table(d_train[, 1], y_pred)
trainningAccuracy <- c(trainningAccuracy, (cm[1,1]+cm[2,2])/sum(cm) )
#validate
y_pred <- ifelse(predict(fmodel, newdata=d_calib[,2:7]) >0.5,1,0)
cm = table(d_calib[, 1],y_pred)
calibrationAccuracy <- c(calibrationAccuracy, (cm[1,1]+cm[2,2])/sum(cm) )
#choose best valid model
if(max(ValidationValue) > (cm[1,1]+cm[2,2])/sum(cm) ){
#test
y_pred = knn(train = d_train[,2:7],
test = d_test[,2:7],
cl = d_train[, 1],
k = which.max(ValidationValue),
prob = TRUE)
cm = table(d_test[,1], y_pred)
testAccuracy <- c(testAccuracy, (cm[1,1]+cm[2,2])/sum(cm) )
#kaggle
y_pred_kaggle <- knn(train = d[,2:7],
test = test[,2:7],
cl = d[, 1],
k = which.max(ValidationValue),
prob = TRUE)
}else{
y_pred <- ifelse(predict(fmodel, newdata=d_test[,2:7]) >0.5,1,0)
cm = table(d_test[, 1], y_pred)
testAccuracy <- c(trainningAccuracy, (cm[1,1]+cm[2,2])/sum(cm) )
#kaggle
y_pred_kaggle <- ifelse(predict(fmodel, newdata=test[,2:7]) > 0.5, 1, 0)
}
write.csv(test,'1.csv')
}
##################
### print+匯出 ###
print('(7/7)print+匯出')
##################
print('set,accuracy', quote = FALSE)
print(paste('trainning,', round(mean(trainningAccuracy),2 ), sep=''), quote = FALSE )
print(paste('calibration,', round(mean(calibrationAccuracy),2 ), sep=''), quote = FALSE)
print(paste('test,', round(mean(testAccuracy),2 ), sep=''), quote = FALSE)
out_data <- data.frame(set=c('trainning', 'calibration', 'test'),
accuracy=c(round( mean(trainningAccuracy), 2 ),
round( mean(calibrationAccuracy), 2 ),
round( mean(testAccuracy), 2 )
)
)
write.csv(out_data, file=files, row.names = F, quote = F)
#kaggle
out_data_kaggle <- data.frame(PassengerId=test[,1],
Survived= y_pred_kaggle
)
write.csv(out_data_kaggle, file='yuta_ds.csv', row.names = F, quote = F)
|
d770ef426508142cc441da6946f5fda879d2fc7b
|
b6fe639016db185ea6dc74c65e7aee63d62699c8
|
/load_data.r
|
7333396c1f649cd14d04a5109b78b8b0242c646b
|
[] |
no_license
|
aluuu/ExData_CourseProject2
|
580ff072d54a39834ee965e2bffe3218e08997d4
|
c5c984ac79b51072ee651b4a944e870df49c8fa0
|
refs/heads/master
| 2020-12-25T14:23:48.576273
| 2016-09-03T09:34:54
| 2016-09-03T09:34:54
| 67,282,490
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 387
|
r
|
load_data.r
|
library(dplyr, warn.conflicts = F)
if(!dir.exists("./data")){
dir.create("./data")
}
dataset_archive <- "./data/dataset.zip"
if(!file.exists(dataset_archive)){
download.file(url="https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip", destfile = dataset_archive)
unzip(zipfile = dataset_archive, exdir = "./data/")
}
NEI <- readRDS("./data/summarySCC_PM25.rds")
|
7ba78260b268a886504d60a4a798fe57a6ab70ac
|
f5285634e415e8156a6a358aff499378cc25460c
|
/rnaseqVis/server.R
|
c547fa155a706adf293d560c6577a8aba6102e36
|
[
"MIT"
] |
permissive
|
woodhaha/shinyApps
|
4176d1113af37997de50014f88b0fc0618b31ca9
|
743d4ad13728bccab58a15e6a636071c9a1fab98
|
refs/heads/master
| 2020-12-30T15:08:11.512283
| 2016-08-23T14:35:14
| 2016-08-23T14:35:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 894
|
r
|
server.R
|
# server.R
# load libraries
library(ggplot2)
library(dplyr)
# load data (only one time when the app is launched)
data <- read.delim("data/20accessions/normalized_expression.txt")
# define server logic to build the boxplot
server <- function(input, output) {
output$text1 <- renderText({
paste("You have selected the",input$dataset,"dataset")
})
output$plot <- renderPlot
}
# head(df)
# target_id sample est_counts tpm eff_len len accession
# 1 PHS1 C32_1 18800.3322 304.237559 2158 2337 C32
# 2 PHS1 C32_2 110292.4765 1726.890063 2158 2337 C32
# 3 PHS1 C32_3 120180.4201 1853.264232 2158 2337 C32
# 4 PHS1 C32_4 60850.3883 961.701285 2158 2337 C32
# 5 PHS1 LA0407_1 897.0589 13.954113 2158 2337 LA0407
# 6 PHS1 LA0407_2 122.6336 1.973085 2158 2337 LA0407
|
d7f79bac38f3ea0f4550d5f4bc4ea2ca3ce0412d
|
397e6d164d5d37ba2fa12a6d0ea6faa4f8b1cb62
|
/AirlineAnalysis.R
|
edc21a8cb64feeeeca9a27b2202543c98ab4756b
|
[] |
no_license
|
acummings2020/Airline-Insights
|
8a3ea8c9888446e8a66cc627cfcc9f4cff9de41a
|
22e8df51e86ac394bc9a3c2e6f06c0431d538854
|
refs/heads/main
| 2023-02-01T11:52:38.082143
| 2020-12-15T17:39:42
| 2020-12-15T17:39:42
| 321,742,827
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,091
|
r
|
AirlineAnalysis.R
|
#The following URL is the location of the survey datafile
#NPS = %promoters - %detractors
library(arulesViz)
library(car)
library(carData)
library(caret)
library(ggmap)
library(readr)
library(tidyverse)
library(rjson)
library(jsonlite)
library(ggplot2)
library(ggmap)
library(maps)
library(mapproj)
library(mapdata)
library(dplyr)
library(corrplot)
setwd("C:/Users/acumm/Downloads")
# https://drive.google.com/file/d/1G7f3LiSW-NmqsiBENwYd-nEfh4_9eV7D/view?usp=sharing
system("gdown --id 1G7f3LiSW-NmqsiBENwYd-nEfh4_9eV7D")
#we can now read in the datafile - which is in JSON format
mydata.list <- jsonlite::fromJSON("completeSurvey.json")
survey <- data.frame(mydata.list)
surveyOriginal=survey
summary(survey$Loyalty)
#Data Validation
count(unique(surveyOriginal, incomparables = FALSE, MARGIN = 1,
fromLast = FALSE))
uniqueCount=distinct(surveyOriginal,.keep_all = FALSE)
repeats=duplicated(surveyOriginal)
range(survey)
survey$weekday=as.factor(weekdays(as.Date(survey$Flight.date,'%m/%d/%Y')))
survey$DelayTotal=survey$Departure.Delay.in.Minutes+survey$Arrival.Delay.in.Minutes
survey$AirportExp=survey$Eating.and.Drinking.at.Airport+survey$Shopping.Amount.at.Airport
survey$Detractor=survey$Likelihood.to.recommend<8
survey$DelayGreaterThan5Mins=survey$DelayTotal>5
highRec=survey[survey$Likelihood.to.recommend>8,]
medRec=survey[survey$Likelihood.to.recommend==7,]
medRec=rbind(medRec,survey[survey$Likelihood.to.recommend==8,])
lowRec=survey[survey$Likelihood.to.recommend<7,]
survey$Detractor<-as.factor(survey$Detractor)
survey$weekdayNum <- recode(survey$weekday,
"Sunday"=0,
"Monday"=1,
"Tuesday"=2,
"Wednesday"=3,
"Thursday"=4,
"Friday"=5,
"Saturday"=6)
# Convert all to numeric
survey$DetractorL=as.logical(survey$Detractor)
survey$DetractorL=as.numeric(survey$DetractorL)
dat <- cbind(var1=survey$Loyalty,var2=lowRec$Loyalty,var3=highRec$Loyalty)
dat <- as.data.frame(dat) # get this into a data frame as early as possible
barplot(sapply(dat,mean))
lowRec$AirportExp=lowRec$Eating.and.Drinking.at.Airport+lowRec$Shopping.Amount.at.Airport
medRec$AirportExp=medRec$Eating.and.Drinking.at.Airport+medRec$Shopping.Amount.at.Airport
highRec$AirportExp=highRec$Eating.and.Drinking.at.Airport+highRec$Shopping.Amount.at.Airport
hist(summary(lowRec$AirportExp))
ggplot(lowRec, aes(x=AirportExp)) + geom_histogram(binwidth=5)
boxplot(lowRec$AirportExp)$out
ggplot(lowRec, aes(x=AirportExp)) + geom_histogram(binwidth=5)
summary(highRec$AirportExp)
##potential factors so far, loyalty, delay, flights per year
summary(lowRec$Airline.Status)
View(lowRec$Airline.Status)
summary(lowRec$Class)
View(lowRec$Class)
ggplot(lowRec,aes(x=,y=Likelihood.to.recommend))+geom_boxplot()#good tool!!
ggplot(survey,aes(x=survey$Class,y=Likelihood.to.recommend))+geom_boxplot()
ggplot(survey,aes(x=interaction(Gender,Class),y=Likelihood.to.recommend))+geom_histogram()#good tool!!
ggplot(survey,aes(x=interaction(Type.of.Travel,Class),y=Likelihood.to.recommend))+geom_boxplot()
survey=survey[!is.na(x1),]
survey=survey[!is.na(x2),]
survey=survey[!is.na(x3),]
survey=survey[!is.na(x4),]
survey=survey[!is.na(x5),]
survey=survey[!is.na(x6),]
survey=survey[!is.na(x7),]#NA values
survey=survey[!is.na(x8),]#NA values
survey=survey[!is.na(x9),]
survey=survey[!is.na(x10),]#NA values
survey=survey[!is.na(x11),]
summary(survey$Flight.Distance)
plot(medNPS$Loyalty~medNPS$Flight.Distance)
lowMiles=survey[survey$Flight.Distance<1000,]
summary(lowMiles$Loyalty)
y=survey1$Likelihood.to.recommend
x1=survey1$Age
x2=survey1$Gender
x3=survey1$Price.Sensitivity
x4=survey1$Loyalty
x5=survey1$Flights.Per.Year
x6=survey1$Class
x7=survey1$Departure.Delay.in.Minutes
x8=survey1$Arrival.Delay.in.Minutes
x9=survey1$Flight.Distance
x10=survey1$Flight.time.in.minutes
x11=survey1$Flight.cancelled
fit1=lm(waittime~height+duration, data=gdata)
fit2=lm(waittime~1, data=gdata)
app.step.fw=step(fit2, direction="forward", scope=list(upper=fit1, lower=fit2))
surveyNumerics=cbind(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,y)
surveyNumerics=surveyNumerics[complete.cases(surveyNumerics)]
final[complete.cases(final), ]
fit1=lm(y~x1+x2+x3+x4+x5+x6+x7+x8+x9+x10, data=survey1)
fit2=lm(y~1, data=survey1)
app.step.fw=step(fit2, direction="forward", scope=list(upper=fit1, lower=fit2))
survey1=survey<- subset (survey, select = -freeText)
survey1=survey1[complete.cases(survey1),]
leapssubsets=regsubsets(y~x1+x2+x3+x4+x5+x6+x7+x8+x9+x10, data=survey1)
plot(leapssubsets, scale="adjr2")
subsets(leapssubsets, statistic="adjr2", main="Adjusted R^2 plot" , legend= False, min.size=1)
subsets(leapssubsets, statistic="cp", main="Cp plot for all subset regression", legend=FALSE, min.size=1)
subsets(leapssubsets, statistic="adjr2", main="Adjusted R^2 plot" , legend= FALSE, min.size=1)
subsets(leapssubsets, statistic="cp", main="Cp plot for all subset regression", legend=FALSE, min.size=1)
##map data
################## Origion state ###########################
survey$Origin.State <- tolower(survey$Origin.State)
US <- map_data("state")
map1 <- ggplot(survey,aes(map_id=survey$Origin.State))
map1 <- map1 + geom_map(map=US,aes(fill=survey$Likelihood.to.recommend))
map1 <- map1 +expand_limits(x=US$long,y=US$lat)
map1 <- map1 + coord_map() + ggtitle("Likelihood to recommend based on Origin state")
map1
#origin states of interest low NPS-OHIO,CONN,MARYLAND, South Dakota, Nevada
#origin states of interest high NPS-Cal,PA,Vermnont,MiSS
############ CORRELATION ##############
NumSurvey = select_if(survey,is.numeric)
round(cor(NumSurvey,use= "complete.obs"),2)
NumSurvey$Likelihood.to.recommend
library(ggplot2)
ggplot(NumSurvey) +geom_
aes(x =Likelihood.to.recommend, y =Loyalty ) +
geom_point(colour = "#0c4c8a") +
theme_minimal()
# multiple scatterplots
pairs(dat[, c(1, 4, 6)])
# improved correlation matrix
corrplot(cor(NumSurvey,use="complete.obs"),
#method = "number",
type = "upper" # show only upper side
)
corrplot(
data = NumSurvey,
method = "pearson",
sig.level = 0.05,
order = "original",
diag = FALSE,
type = "upper",
tl.srt = 75
)
################# destination state #########################
survey$Destination.State <- tolower(survey$Destination.State)
US <- map_data("state")
US$region=tolower(US$region)
map2 <- ggplot(survey,aes(map_id=survey$Destination.State))
map2 <- map2 + geom_map(map=US,aes(fill=survey$Likelihood.to.recommend))
map2 <- map2 + expand_limits(x=US$long,y=US$lat)
map2 <- map2 +coord_map() ggtitle("Likelihood to recommend based on Destination State")
map2=map2+geom_label(aes(x=survey$dlong,y=survey$dlat),label=survey$Destination.State)
#coord_map()
snames <- aggregate(cbind(survey$dlong,survey$dlat) ~ survey$Destination.State, data=survey,FUN=function(x)mean(range(x)))
map2<-map2+coord_map()+geom_text(data=snames, aes(US$long, US$lat, label =US$region), size=1)
#destination states of interest low NPS,texas,PA,TENN, South carolina
#destination states of interest high NPS, WASH, Oregon Wisconsion Georgia
map2
######################### weekday analysis
home
#WN MQ EV AS OO B6 OU AA DL HA US
ggplot(survey[survey$Partner.Code=="US",],aes(x=weekday,y=Likelihood.to.recommend))+geom_boxplot(fill="lightblue")+
theme(axis.text.x=element_text( angle=60, hjust=1),text = element_text(size = 16))+labs(title="Weekday vs LTR with AIRLINE CODE=US",x="Weekday")
ggplot(survey[survey$Partner.Code=="US",],aes(x=weekday,y=Likelihood.to.recommend))+geom_boxplot()##same across everyday
ggplot(lowRec,aes(x=weekday,y=Loyalty))+geom_boxplot()##same across everyday
ggplot(survey[survey$Class=="Business",],aes(x=weekday,y=Likelihood.to.recommend))+geom_boxplot()##same across everyday
ggplot(lowRec,aes(x=weekday,y=Loyalty))+geom_boxplot()##same across everyday
ggplot(survey[survey$Price.Sensitivity==4,],aes(x=weekday,y=Likelihood.to.recommend))+geom_bar(fill="lightblue")+labs(title="Weekday vs LTR with Price Sen.=4",x="Weekday")
ggplot(survey[survey$Price.Sensitivity==4,],aes(x=weekday,y=Likelihood.to.recommend))+geom_boxplot(fill="lightblue")+theme(axis.text.x=element_text( angle=60, hjust=1),text = element_text(size = 16))+labs(title="Weekday vs LTR with Price Sen.=4",x="Weekday")
summary(survey[(survey$Price.Sensitivity==2) &(survey$weekday=="Tuesday"),])
summary(survey[(survey$Price.Sensitivity==2) &(survey$weekday=="Wednesday"),])
##same across everyday
ggplot(survey[survey$Price.Sensitivity==3,],aes(x=weekday,y=Loyalty))+geom_boxplot()##same across everyday
##when price sensitivity is lower, sunday seems like the day to encourage flying
##Price sensitivity almost a whole point higher on tuesday in terms of likelihood to recommend
ggplot(survey[survey$Price.Sensitivity==3,],aes(x=weekday,y=Likelihood.to.recommend))+geom_boxplot()##same across everyday
ggplot(survey,aes(x=Loyalty,y=Likelihood.to.recommend))+
stat_summary(aes(y =survey$Likelihood.to.recommend ,group=1), fun.y=mean, colour="blue", geom="point",group=1)
#show trend in loyalty with likelihood to reccomend
ggplot(survey[survey$Airline.Status=="Platinum",],aes(x=weekday,y=Likelihood.to.recommend))+geom_boxplot(fill="lightblue")+labs(title="Weekday vs LTR with Airline Status=Platinum",x="Weekday")
summary(survey[(survey$Airline.Status=="Platinum") &(survey$weekday=="Monday"),])
summary(survey[(survey$Airline.Status=="Platinum") &(survey$weekday=="Wednesday"),])
ggplot(lowRec,aes(x=weekday,y=Loyalty))+geom_boxplot()##same across everyday
ggplot(medRec,aes(x=weekday,y=Likelihood.to.recommend))+geom_boxplot()##same across everyday
ggplot(medRec,aes(x=weekday,y=Loyalty))+geom_boxplot()##same across everyday
ggplot(highRec,aes(x=weekday,y=Likelihood.to.recommend))+geom_boxplot()##same across everyday
ggplot(highRec,aes(x=weekday,y=Loyalty))+geom_boxplot()##same across everyday
"Southeast Airlines Co."
#Destination.City Origin.City Airline.Status Age Gender
#[6] Price.Sensitivity Year.of.First.Flight Flights.Per.Year Loyalty Type.of.Travel
#[11] Total.Freq.Flyer.Accts Shopping.Amount.at.Airport Eating.and.Drinking.at.Airport Class Day.of.Month
#[16] Flight.date Partner.Code Partner.Name Origin.State Destination.State
#[21] Scheduled.Departure.Hour Departure.Delay.in.Minutes Arrival.Delay.in.Minutes Flight.cancelled Flight.time.in.minutes
#[26] Flight.Distance Likelihood.to.recommend olong olat dlong
#[31] dlat freeText weekday
survey$Flight.cancelled=as.factor(survey$Flight.cancelled)
####lm modeling p2
train=survey[1:70000,]
test=survey[70001:88100,]
lm6<-lm(survey$Detractor~Flight.time.in.minutes+Flights.Per.Year+Age+Gender+AirportExp+Price.Sensitivity+Airline.Status+Loyalty+Flight.Distance+DelayTotal+Flights.Per.Year+Type.of.Travel+Class,data=survey)
summary(lm6)
lm1<-lm(survey$Likelihood.to.recommend~Airline.Status+Price.Sensitivity+weekday+Flight.Distance+Scheduled.Departure.Hour+Class+DelayTotal+Loyalty+Age+Origin.State+Origin.City+Destination.City+Destination.State,data=survey)
lm2<-lm(survey$Likelihood.to.recommend~Age+Airline.Status+Price.Sensitivity+weekday+Flight.Distance+Scheduled.Departure.Hour+DelayTotal+Flights.Per.Year+Type.of.Travel,data=survey)
lm3<-lm(survey$Likelihood.to.recommend~Age+Airline.Status+Price.Sensitivity+Loyalty+Flight.Distance+DelayTotal+Flights.Per.Year+Type.of.Travel,data=survey)
lm4<-lm(survey$Likelihood.to.recommend~Flight.time.in.minutes+Flights.Per.Year+Age+Gender+AirportExp+Price.Sensitivity+Airline.Status+Loyalty+Flight.Distance+DelayTotal+Flights.Per.Year+Type.of.Travel+Class,data=survey)
#lm4<-lm(survey$DetractorL~Flight.time.in.minutes+Flights.Per.Year+Age+Gender+AirportExp+Price.Sensitivity+Airline.Status+Loyalty+Flight.Distance+DelayTotal+Flights.Per.Year+Type.of.Travel+Class,data=survey)
summary(lm1)
lgm<-glm(survey$Detractor~Flight.time.in.minutes+Flights.Per.Year+Age+Gender+AirportExp+Price.Sensitivity+Airline.Status+Loyalty+Flight.Distance+DelayTotal+Flights.Per.Year+Type.of.Travel+Class,data=survey,family=binomial(link='logit'))
lgm2<-glm(survey$Likelihood.to.recommend~Flight.time.in.minutes+Flights.Per.Year+Age+Gender+AirportExp+Price.Sensitivity+Airline.Status+Loyalty+Flight.Distance+DelayTotal+Flights.Per.Year+Type.of.Travel+Class,data=survey,family=binomial(link='logit'))
summary(lgm)
confint(lgm)
wald.test(b = coef(lgm), Sigma = vcov(lgm),Terms=1:16)
fitted.results <- predict(model,newdata=subset(test,select=c(2,3,4,5,6,7,8)),type='response')
fitted.results 0.5,1,0)
misClasificError <- mean(fitted.results != test$Survived)
print(paste('Accuracy',1-misClasificError))
"Accuracy 0.842696629213483"
###
###Airline.StatusGold 1.090e+00 2.595e-02 41.982 < 2e-16 ***
#Airline.StatusPlatinum 5.786e-01 3.984e-02 14.524 < 2e-16 ***
# Airline.StatusSilver 1.706e+00 1.807e-02 94.393 < 2e-16 ***
#Price.Sensitivity -2.776e-01 1.305e-02 -21.265 < 2e-16 ***
####Destination.StatePennsylvania 4.264e-01 9.747e-02 4.374 1.22e-05 ***
#####Destination.StateUtah 5.223e-01 9.736e-02 5.364 8.14e-08 ***
###last thing associative rule mining with predicting detractors
#this is not a sparse matrix, we see four columns of data, class of passenger,sex, age
#and whether they survived, there are no empty values throughout
surveyA=survey
surveyA=subset(surveyA,select=-c(Likelihood.to.recommend,DetractorL,Partner.Code,Destination.State,Origin.State))
prop.table(table(survey$Detractor,survey$Airline.Status))
surveyA$Origin.City<-as.factor(survey$Origin.City)
surveyA$Destination.City<-as.factor(survey$Destination.City)
surveyA$Type.of.Travel<-as.factor(survey$Type.of.Travel)
surveyA$DelayGreaterThan5Mins=as.factor(survey$DelayGreaterThan5Mins)
surveyA$weekday<-as.factor(survey$weekday)
surveyA$Gender<-as.factor(survey$Gender)
surveyA$Price.Sensitivity<-as.factor(survey$Price.Sensitivity)
surveyA$Class<-as.factor(survey$Class)
surveyA$Flight.cancelled<-as.factor(survey$Flight.cancelled)
surveyX<-as(surveyA,"transactions")
inspect(surveyX)
itemFrequency(surveyX)
itemFrequencyPlot(surveyX,support=.1)
View(surveyX)
#badboatX takes each variable from badboat and breaks it down into comparative values, such as relative frequency's. It also blows out the options for each factor into their own columns
#and putting in 0s for each null value that would come with this, making this a sparse matrix
rules1 <- apriori(surveyX,parameter=list(supp=0.008,conf=0.55),control=list(verbose=F),appearance=list(default="lhs",rhs=("Detractor=TRUE")))
rules2 <- apriori(surveyX,parameter=list(supp=0.008,conf=0.55),control=list(verbose=F),appearance=list(default="lhs",rhs=("Detractor=FALSE")))
inspectDT(rules1[1:15])
inspectDT(rules1sortedLift[1:25])
inspectDT(rules1sortedConfidence[1:25])
inspectDT(rules1sprtedSupport[1:25])
rules1sortedLift <- sort(rules1, by="lift")
rules1sortedConfidence<-sort(rules1,by="confidence")
rules1sprtedSupport<-sort(rules1,by="support")
inspect(rules2[1:20])
plot(rules1sorted[1:6], method="graph", control=list(type="items"))
plot(rules1[1:20], method="paracoord", control=list(reorder=TRUE))
|
2f9b0114a1d9574c1f185d2c5dfc4c72e1a1d872
|
e1b8c775c04daac6026fde92c258af558fbefcdf
|
/shiny_data_update.R
|
1ad15021f01d7a9156dc272ba0ffc6b3621ff5c9
|
[] |
no_license
|
jansodoge/shiny_bbl_lineups
|
8c3cd7661e9cbd121de2129fc6bf94e37c6a58e4
|
01df18ec3843180f6b5fc3a2ccfc145041782023
|
refs/heads/master
| 2021-02-22T17:51:42.162706
| 2020-03-06T09:48:27
| 2020-03-06T09:48:27
| 245,381,865
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,910
|
r
|
shiny_data_update.R
|
source('basketball_metrics.R')
source('pbp_log_editing.R')
source('lineups_level_5_men.R')
source('player_level_analysis.R')
# Run this script to update the data the shiny app uses for lineups
### merge the files downloaded within the respective file (the file needs to be updated
### with downloading game files prior to this)
update_dataset <- function(){
files <- list.files("bbl_lineups/reg_season_batch_19_20", full.names = TRUE)
batch <- data.frame()
for(log in files){
data <- read.csv(log)
data$X7 <- as.integer(data$X7)
data$team <- as.character(data$team)
data$contrary_team <- as.character(data$contrary_team)
batch <- dplyr::bind_rows(batch, data)
}
data <- batch
# to calculate the teams and players we take account of
players <- players_vec(data)
teams <- teams_vec(data)
player_team_frame <- player_team_links(data)
data <- players_on_court(data)
#finally calculate data
lineup_data <- lineups_query_5_men(data)
write.csv(lineup_data, "lineup_data.csv")
}
###returns a dataframe with unique game_id per team and filters exceptions
get_games_per_team <- function(batch){
games_played <- batch %>%
count(game_id, team) %>%
select(game_id, team) %>%
count(team) %>%
filter(n > 10 & n < 100)
return(games_played)
}
get_possesions_per_team <- function(lineup_data){
poss_per_team_offense <- lineup_data %>%
group_by(Team) %>%
summarize_at(vars(`Offensive.Poss`), funs(sum(.)))
poss_per_team_defense <- lineup_data %>%
group_by(Team) %>%
summarize_at(vars(`Defensive.Poss`), funs(sum(.)))
poss <- merge(poss_per_team_defense, poss_per_team_offense) %>%
mutate(poss = `Defensive.Poss` + `Offensive.Poss`) %>%
filter(poss > 500)
return(poss)
}
|
17a0bc6b5244a8d58fa3e393c72752e5f3f23962
|
c526975e8e3be6a12b329d8edb170dee18e8068a
|
/ChIP-SEQ_SOP/含帮助文档的代码示例/2_差异比较diffBind.r
|
89c2f51f1595bfc77e585e85cee398771693e116
|
[] |
no_license
|
wangqing207/SOP
|
13536407dc89a3cc223f8abf85075a1aab0efb00
|
8c383d8de61105f599eb8c348012b35214199dc7
|
refs/heads/master
| 2021-05-15T08:12:10.948946
| 2017-10-23T05:09:56
| 2017-10-23T05:09:56
| 107,934,516
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,423
|
r
|
2_差异比较diffBind.r
|
args <- commandArgs(T)
f_name<- args[1] ####csvFile of the information
if(!length(args==1)){
q()
}
library(DiffBind)
#group1 <- 1
#group2 <- 2
######################################################################
tamoxifen = dba(sampleSheet=f_name,peakCaller="macs")
# tamoxifen = dba.count(tamoxifen,peaks="group_comparison_macs_peaks.xls",bCalledMasks=TRUE,minOverlap=1)
tamoxifen = dba.count(tamoxifen,minOverlap=0.4)
# tamoxifen = dba.contrast(tamoxifen, categories=3)
#tamoxifen = dba.contrast(tamoxifen, group1=group1, group2=group2)
tamoxifen = dba.contrast(tamoxifen, tamoxifen$masks$treatment, tamoxifen$masks$control)
tamoxifen = dba.analyze(tamoxifen,method=DBA_DESEQ)
tamoxifen.DB = dba.report(tamoxifen,method=DBA_DESEQ
,th=1, bUsePval=FALSE, fold=0,bNormalized=TRUE
,bCalled=T, bCounts=T,bCalledDetail=F
,DataType=DBA_DATA_FRAME )
#######
#group_comp_tb <- read.table("group_comparison_macs_peaks.xls",sep="\t",header=T,stringsAsFactors=F)
#
#tamoxifen.DB <- merge(tamoxifen.DB,group_comp_tb[,c("chr","start","end","embDNA1","endDNA1")]
# ,by.x=c("chr","start","end")
# ,by.y=c("chr","start","end"),all=T,sort=F)
#
tamoxifen.DB <- tamoxifen.DB[order(tamoxifen.DB[,"Chr"]),]
tamoxifen.DB_diff <- tamoxifen.DB[tamoxifen.DB[,"p-value"]<=0.05,]
tamoxifen.DB_hyper <- tamoxifen.DB[tamoxifen.DB[,"p-value"]<=0.05 & tamoxifen.DB[,"Fold"]>0,]
tamoxifen.DB_hypo <- tamoxifen.DB[tamoxifen.DB[,"p-value"]<=0.05 & tamoxifen.DB[,"Fold"]<0,]
colnames(tamoxifen.DB)[1:3] <- c("chr","start","end")
colnames(tamoxifen.DB_diff)[1:3] <- c("chr","start","end")
colnames(tamoxifen.DB_hyper)[1:3] <- c("chr","start","end")
colnames(tamoxifen.DB_hypo)[1:3] <- c("chr","start","end")
write.table(tamoxifen.DB,file=paste("total_DiffBind_",gsub(".csv","",f_name),".xls",sep=""),sep="\t",col.names=T,row.names=F,quote=F)
write.table(tamoxifen.DB_diff,file=paste("diff_DiffBind_",gsub(".csv","",f_name),".xls",sep=""),sep="\t",col.names=T,row.names=F,quote=F)
write.table(tamoxifen.DB_hyper,file=paste("hyper_DiffBind_",gsub(".csv","",f_name),".xls",sep=""),sep="\t",col.names=T,row.names=F,quote=F)
write.table(tamoxifen.DB_hypo,file=paste("hypo_DiffBind_",gsub(".csv","",f_name),".xls",sep=""),sep="\t",col.names=T,row.names=F,quote=F)
|
4a3a0e82d638528d52b7680a44643146b2d86afa
|
3f02cb4dfd2e35fb7346830341e29df511f0137e
|
/R/derive_var_atirel.R
|
e6e00845ba08edd3189b134d6e57e91abdc59ed1
|
[] |
no_license
|
rajkboddu/admiral
|
ce08cb2698b62ca45ba6c0e8ed2ac5095f41b932
|
ffbf10d7ffdda1c997f431d4f019c072217188b1
|
refs/heads/master
| 2023-08-11T11:14:44.016519
| 2021-09-08T10:24:45
| 2021-09-08T10:24:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,921
|
r
|
derive_var_atirel.R
|
#' Derive Time Relative to Reference
#'
#' Derives the variable `ATIREL` to CONCOMITANT, PRIOR, PRIOR_CONCOMITANT or NULL
#' based on the relationship of cm Analysis start/end date/times to treatment start date/time
#'
#' @param dataset Input dataset
#' The variables `TRTSDTM`, `ASTDTM`, `AENDTM` are expected
#' @param flag_var Name of the variable with Analysis Start Date Imputation Flag
#' @param new_var Name of variable to create
#'
#' @details `ATIREL` is set to:
#' - null, if Datetime of First Exposure to Treatment is missing,
#' - "CONCOMITANT", if the Analysis Start Date/Time is greater than or equal to Datetime of
#' First Exposure to Treatment,
#' - "PRIOR", if the Analysis End Date/Time is not missing and less than
#' the Datetime of First Exposure to Treatment,
#' - "CONCOMITANT" if the date part of Analysis Start Date/Time is equal to
#' the date part of Datetime of First Exposure to Treatment and
#' the Analysis Start Time Imputation Flag is 'H' or 'M',
#' - otherwise it is set to "PRIOR_CONCOMITANT".
#'
#' @author Teckla Akinyi
#'
#' @return A dataset containing all observations and variables of the input
#' dataset and additionally the variable specified by the `new_var` parameter.
#'
#' @keywords ADaM Relationship Var ATIREL
#'
#' @export
#'
#' @examples
#' library(dplyr, warn.conflicts = FALSE)
#' adcm <- tibble::tribble(
#' ~STUDYID, ~USUBJID, ~TRTSDTM, ~ASTDTM, ~AENDTM, ~ASTTMF,
#' "TEST01", "PAT01", "2012-02-25 23:00:00", "2012-02-28 19:00:00", "2012-02-25 23:00:00", "",
#' "TEST01", "PAT01", "", "2012-02-28 19:00:00", "", "",
#' "TEST01", "PAT01", "2017-02-25 23:00:00", "2013-02-25 19:00:00", "2014-02-25 19:00:00", "",
#' "TEST01", "PAT01", "2017-02-25 16:00:00", "2017-02-25 14:00:00", "2017-03-25 23:00:00", "m",
#' "TEST01", "PAT01", "2017-02-25 16:00:00", "2017-02-25 14:00:00", "2017-04-29 14:00:00", ""
#' ) %>% dplyr::mutate(
#' TRTSDTM = lubridate::as_datetime(TRTSDTM),
#' ASTDTM = lubridate::as_datetime(ASTDTM),
#' AENDTM = lubridate::as_datetime(AENDTM)
#' )
#'
#' derive_var_atirel(
#' dataset = adcm,
#' flag_var = ASTTMF,
#' new_var = ATIREL
#' )
#'
derive_var_atirel <- function(dataset,
flag_var,
new_var) {
# checks
flag_var <- assert_symbol(enquo(flag_var))
assert_data_frame(dataset,
required_vars = vars(STUDYID, USUBJID, TRTSDTM, ASTDTM, AENDTM, !!flag_var)
)
new_var <- assert_symbol(enquo(new_var))
warn_if_vars_exist(dataset, quo_text(new_var))
#logic to create ATIREL
dataset %>%
mutate(!!new_var :=
case_when(
is.na(TRTSDTM) ~ NA_character_,
ASTDTM >= TRTSDTM ~ "CONCOMITANT",
!is.na(AENDTM) & AENDTM < TRTSDTM ~ "PRIOR",
date(ASTDTM) == date(TRTSDTM) & toupper(!!flag_var) %in% c("H", "M") ~ "CONCOMITANT",
TRUE ~ "PRIOR_CONCOMITANT"
))
}
|
c5287060ac140b773fe73eae347db2bbd0cf437f
|
9327a27882b8ff73337462e9ecb8897006efab42
|
/GenerateToyDataSet.R
|
77f01c75ac12b3876a4b562f85ed6151b5a2ce2c
|
[] |
no_license
|
RJHKnight/ACAlpha
|
92cee9c2616c61e81af54a1ac461e8d8263c69a7
|
30e0942710a9340b08bfec87b081163457e07479
|
refs/heads/master
| 2021-04-28T16:49:37.179391
| 2020-03-15T06:35:31
| 2020-03-15T06:35:31
| 120,616,076
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,876
|
r
|
GenerateToyDataSet.R
|
library(dplyr)
# n hidden core factors, all following a random path of t_max steps
# m stocks, each following a path determined by
# beta_n * n + epsilon
# Initially make beta_n time invariant, then we can look a time varying betas.
# For simplicity, we will not introduce any scale to the n_t
# beta_n will be randomly selected i.i.d [-2,2]
# Variance of the n random walks (sigma_n) is a parameter
# n = number of hidden factors
# m = number of stocks
# t_max = number of timesteps
generateToyDataSet <- function(n = 5, m=200, t_max=1000) {
# Calibration
n_init = 100
sigma_n = c(1,2,3,4,5)
factorLevels <- sapply(sigma_n, function(x) generateRandomWalk(num_steps = t_max + 1,
n_init = n_init,
sigma = x))
factorReturns <- apply(factorLevels, 2, getReturn)
betas <- matrix(runif(m * n, -2, 2), ncol = n)
allReturns <- matrix(rep(NA, t_max*m), ncol = m)
for (i in 1:t_max) {
thisFactorReturn <- factorReturns[i,]
allReturns[i,] <- apply(betas, 1, function(x) getOneReturn(thisFactorReturn, x))
}
allReturns <- data.frame(allReturns)
colnames(allReturns) <- 1:length(allReturns)
return (allReturns)
}
# TODO: Ensure this doesn't go below 0?
generateRandomWalk <- function(num_steps, n_init = 100, sigma = 1, uniform = TRUE) {
randomDraws <- NA
if (uniform) {
randomDraws <- runif(num_steps, -sigma, sigma)
}
else {
randomDraws <- rnorm(num_steps, 0, sigma)
}
return (n_init + cumsum(randomDraws))
}
getReturn <- function(x) {
returns <- (x - lag(x)) / lag(x)
return (returns[-1])
}
getOneReturn <- function(betas, factorLevel, noise = 0.01) {
noiseTerm <- rnorm(0, sd = noise)
return (sum(betas * factorLevel) + noise)
}
|
8a8377a751d9819891751b0ca0c49cf5ac108c07
|
d1de1a007fd386b28c6ea2fbf82785265d3ce292
|
/Week-4/Programming Assignment 3/ProgrammingAssignment-3-Quiz.R
|
9ec69b1231fa6203c34c1da0bb4e67968e918947
|
[
"MIT"
] |
permissive
|
JohamSMC/R-Programming-Course
|
db7e415b6cd970aac990a2fd90bb97919b81d41b
|
79eacac46c69b2b3398ebcdb5665203820b40599
|
refs/heads/main
| 2023-03-07T15:09:43.555333
| 2021-02-19T18:01:58
| 2021-02-19T18:01:58
| 340,447,092
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,451
|
r
|
ProgrammingAssignment-3-Quiz.R
|
source('../best.R')
source('../rankall.R')
source('../rankhospital.R')
best("SC", "heart attack")
# > best("SC", "heart attack")
# [1] "MUSC MEDICAL CENTER"
best("NY", "pneumonia")
# > best("NY", "pneumonia")
# [1] "MAIMONIDES MEDICAL CENTER"
best("AK", "pneumonia")
# > best("AK", "pneumonia")
# [1] "YUKON KUSKOKWIM DELTA REG HOSPITAL"
rankhospital("NC", "heart attack", "worst")
# > rankhospital("NC", "heart attack", "worst")
# [1] "WAYNE MEMORIAL HOSPITAL"
rankhospital("WA", "heart attack", 7)
# > rankhospital("WA", "heart attack", 7)
# [1] "YAKIMA VALLEY MEMORIAL HOSPITAL"
rankhospital("TX", "pneumonia", 10)
# > rankhospital("TX", "pneumonia", 10)
# [1] "SETON SMITHVILLE REGIONAL HOSPITAL"
rankhospital("NY", "heart attack", 7)
# > rankhospital("NY", "heart attack", 7)
# [1] "BELLEVUE HOSPITAL CENTER"
r <- rankall("heart attack", 4)
as.character(subset(r, state == "HI")$hospital)
# > r <- rankall("heart attack", 4)
# > as.character(subset(r, state == "HI")$hospital)
# [1] "CASTLE MEDICAL CENTER"
r <- rankall("pneumonia", "worst")
as.character(subset(r, state == "NJ")$hospital)
# > r <- rankall("pneumonia", "worst")
# > as.character(subset(r, state == "NJ")$hospital)
# [1] "BERGEN REGIONAL MEDICAL CENTER"
r <- rankall("heart failure", 10)
as.character(subset(r, state == "NV")$hospital)
# > r <- rankall("heart failure", 10)
# > as.character(subset(r, state == "NV")$hospital)
# [1] "RENOWN SOUTH MEADOWS MEDICAL CENTER"
|
1414744bf8a7f17750c09d74df408cb99b67931f
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/mets/R/pmvn.R
|
70dd30f6f2a0e999fcb2eee6d8b442ba5d0f5bc9
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,515
|
r
|
pmvn.R
|
##' @export
pbvn <- function(upper,rho,sigma) {
if (!missing(sigma)) {
rho <- cov2cor(sigma)[1,2]
upper <- upper/diag(sigma)^0.5
}
arglist <- list("bvncdf",
a=upper[1],
b=upper[2],
r=rho,
PACKAGE="mets")
res <- do.call(".Call",arglist)
return(res)
}
## lower <- rbind(c(0,-Inf),(-Inf,0))
## upper <- rbind(c(Inf,0),(0,Inf))
## mu <- rbind(c(1,1),c(-1,1))
## sigma <- diag(2)+1
## pmvn(lower=lower,upper=upper,mu=mu,sigma=sigma)
##' @export
pmvn <- function(lower,upper,mu,sigma,cor=FALSE) {
if (missing(sigma)) stop("Specify variance matrix 'sigma'")
if (missing(lower)) {
if (missing(upper)) stop("Lower or upper integration bounds needed")
lower <- upper; lower[] <- -Inf
}
p <- ncol(rbind(lower))
if (missing(upper)) {
upper <- lower; upper[] <- Inf
}
if (missing(mu)) mu <- rep(0,p)
sigma <- rbind(sigma)
ncor <- p*(p-1)/2
if (ncol(sigma)!=p && ncol(sigma)!=ncor)
stop("Incompatible dimensions of mean and variance")
if (ncol(rbind(lower))!=p || ncol(rbind(upper))!=p)
stop("Incompatible integration bounds")
arglist <- list("pmvn",
lower=rbind(lower),
upper=rbind(upper),
mu=rbind(mu),
sigma=rbind(sigma),
cor=as.logical(cor[1]))
res <- do.call(".Call",arglist)
return(as.vector(res))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.