blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
816b827c308cc1687088551352c258213188b85b
|
f9f328aaccae52925561d4dc48f419f7829da057
|
/Day2/2c_Uncertainty/likeprof/agestructured/AS.R
|
5895056440c023218877c8ba187c8d6786a48a81
|
[] |
no_license
|
commfish/ADMB_course
|
70d4fb9a61aefd116b48b90e2f05cd93a49b3c6e
|
20e96eccddea6093b438a253ad94052e231e8071
|
refs/heads/master
| 2021-04-15T04:42:59.179600
| 2018-03-19T18:46:59
| 2018-03-19T18:46:59
| 126,224,729
| 0
| 1
| null | 2018-03-21T18:39:37
| 2018-03-21T18:39:36
| null |
UTF-8
|
R
| false
| false
| 504
|
r
|
AS.R
|
wd <- "C:\\merrill\\ADMB_course"
project_dir <- file.path(wd, "Day2", "2c_Uncertainty", "likeprof", "agestructured")
setwd(project_dir)
### read in results from report file using code in tools.R
source(file.path(wd, "tools.R"))
library(R2admb)
compile_admb("AS1")
run_admb("AS1", extra.args="-lprof")
## read in likelihood profile
prof <- readMat(string="Profile likelihood", file="logq_pro.plt", nrow=74)
logq_vec <- prof[,1]
like_vec <- prof[,2]
nll_vec <- -log(like_vec)
plot(logq_vec, nll_vec)
|
fcb7731b0f40b95e44ea8a0cff928af05a7d4fa4
|
8ec8caa9c6fb399082090da94d7fa948b6764417
|
/man/updateModel.Rd
|
c66ca7135eaa736eec368fc4d1495aab684804f9
|
[] |
no_license
|
albert3858/mlr
|
05f910fb35f4c9467a91d0732c86a687faa00eee
|
e410f6ec1d33926b5e4bf1d14552694337fab5c9
|
refs/heads/master
| 2021-01-22T02:28:28.186539
| 2016-11-03T05:32:54
| 2016-11-03T05:32:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,651
|
rd
|
updateModel.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/updateModel.R
\name{updateModel}
\alias{updateModel}
\title{Update a model}
\usage{
updateModel(object, task, newdata, subset, weights = NULL, ...)
}
\arguments{
\item{object}{[\code{\link{WrappedModel}}]\cr
Wrapped model, result of \code{\link{train}}.}
\item{task}{[\code{\link{Task}}]\cr
The task.}
\item{newdata}{[\code{data.frame}]\cr
New observations to update the model}
\item{subset}{[\code{integer} | \code{logical}]\cr
Selected cases. Either a logical or an index vector.
By default all observations are used.}
\item{weights}{[\code{numeric}]\cr
Optional, non-negative case weight vector to be used during fitting.
If given, must be of same length as \code{subset} and in corresponding order.
By default \code{NULL} which means no weights are used unless specified in the task (\code{\link{Task}}).
Weights from the task will be overwritten.}
\item{...}{[any]\cr
Currently ignored.}
}
\value{
[\code{\link{WrappedModel}}].
}
\description{
Update a fitted model with new data.
The row names of the input \code{task} or \code{newdata} are preserved in the output.
}
\examples{
dat <- arima.sim(model = list(ar = c(.5,.2), ma = c(.4), order = c(2,0,1)), n = 100)
times <- (as.POSIXlt("1992-01-14")) + lubridate::days(1:100)
dat <- xts::xts(dat,order.by = times, frequency = 1L)
colnames(dat) <- c("arma_test")
Timeregr.task = makeForecastRegrTask(id = "test", data = dat,
target = "arma_test", frequency = 1L)
arm = makeLearner("fcregr.Arima", h = 1)
trn = train(arm,Timeregr.task, subset = 1:99)
armNew =updateModel(trn, Timeregr.task, newdata = dat[100,])
}
|
ccbb27a4d0b85f3d542d8dd80fc9c66789e849ba
|
d2d436a02b15ad5e529c58f658d644d3f4b04346
|
/hospitaldata/best.R
|
39977b7d0b6c0891be1cff73f97b0ed42a5b147e
|
[] |
no_license
|
unullmass/RProgramming
|
8bcfd2257752b2ca017905f4c9917fec12619df4
|
54966a9f02a88868fb4b0cba66124581f90c9ded
|
refs/heads/master
| 2021-01-10T02:06:37.338850
| 2015-10-01T04:39:41
| 2015-10-01T04:39:41
| 43,478,279
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,706
|
r
|
best.R
|
# used to read data from a file containing information on the mortality rate of hospitals across the US
# and return the hosiptal in a chosen state with the lowest mortality rate with respect to a certain
# disease
best <- function(state, outcome) {
## Read outcome data
outcome_data <-
read(
'~/Documents/Coursera/RProgramming/Assignment3/rprog_data_ProgAssignment3-data/outcome-of-care-measures.csv'
)
## Read hospital data
hospital_data <-
read(
'~/Documents/Coursera/RProgramming/Assignment3/rprog_data_ProgAssignment3-data/hospital-data.csv'
)
## Check that state is valid
# first build a set of states combining the list of outcomes and hospitals
all_states <-
sort(unique((c(
as.character(unique(hospcare$State)), as.character(unique(outcome$State))
))))
#then check if supplied state is there in the list and error out if not so
if (!(state %in% all_states)) {
stop("invalid state")
}
# check if supplied outcome is valid
all_outcomes <- character()
# we will use the headers from col 11 till end to build the
# list of all outcomes
for (ocol in names(outcome_data)[seq(11,ncol(outcome_data))]) {
# first get the outcome name out
outcome_col = unlist(strsplit(x = ocol,split = ".from."))[2]
# then add to the list
all_outcomes = c(all_outcomes, outcome_col)
}
# get only uniques
all_outcomes = unique(all_outcomes)
#then check if supplied disease is there in the list and error out if not so
if (!(outcome %in% tolower(all_outcomes))) {
stop("invalid outcome")
}
matched_outcome <- character()
# which column does it match with
for (a in all_outcomes) {
orig_a <- a
# then remove the periods that occur in the header
a <- gsub(a, pattern = "\\.", replacement = " ")
#then convert to lower case
a <- tolower(a)
if (tolower(outcome) %in% a) {
matched_outcome <- orig_a
}
}
## Return hospital name in that state with lowest 30-day death
## rate
# first we need to get the death rates out
mortality_data <-
subset.data.frame(x = outcome,select = c('State', 'Hospital.Name', names(outcome)[grepl(pattern = "^Hospital.30.Day.Death..Mortality..Rates.from.", x = names(outcome))]))
#split the data by state
mort_statewise <- split(mortality_data, mortality_data$State)
#get the data for the chosen state
mort_for_state <- mort_statewise$state
#get the outcome column header
matched_mort_col <-
paste("Hospital.30.Day.Death..Mortality..Rates.from.",matched_outcome,sep = "")
# we remove the rows that don't have any data
mort_for_state <-
mort_for_state[!is.na(as.numeric(as.character(
mort_for_state$matched_mort_col
))),]
#sort by mort rate ascending (lower is better) and hospital name alphabetically
top_hospital <-
mort_for_state[order(mort_for_state$matched_mort_col, mort_for_state$Hospital.Name),][1,'Hospital.Name']
top_hospital
}
|
06e5fbfe2052eff8fe796d63bba0ac7e5a1d2051
|
f1ebd5faf1ec955dcc31dc416a4785254f4646f4
|
/src/RDSTK/R/functions.R
|
9c4803d40e0ad101daea29a8eb3432e353d5fafa
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
andrewheiss/RDSTK
|
f3da4f0c63f6782ae4be9f0b0fe18b00e6261fc3
|
4cc9fadaf115eb13c25c8b4bf94e7d5e86bf7f1d
|
refs/heads/master
| 2021-01-20T23:40:49.501360
| 2013-02-01T02:23:27
| 2013-02-01T02:23:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,415
|
r
|
functions.R
|
## Andrew Heiss
## Date: 31 January 2013
## Include project-specific functions in this file
.onLoad <- function(libname, pkgname){
if (is.null(getOption("RDSTK_api_base"))) {
default_base <- "http://www.datasciencetoolkit.org"
options("RDSTK_api_base"=default_base)
}
}
street2coordinates <- function(address, session=getCurlHandle()) {
api <- paste(getOption("RDSTK_api_base"), "/street2coordinates/", sep="")
get.addy <- getURL(paste(api, URLencode(address), sep=""), curl=session)
result <- ldply(fromJSON(get.addy), data.frame)
names(result)[1] <- "full.address"
return(result)
}
ip2coordinates <- function(ip, session=getCurlHandle()) {
api <- paste(getOption("RDSTK_api_base"), "/ip2coordinates/", sep="")
get.ips <- getURL(paste(api, URLencode(ip), sep=""), curl=session)
result <- ldply(fromJSON(get.ips), data.frame)
names(result)[1] <- "ip.address"
return(result)
}
coordinates2politics <- function(latitude, longitude, session=getCurlHandle()) {
api <- paste(getOption("RDSTK_api_base"), "/coordinates2politics/", sep="")
result <- getURL(paste(api, latitude, "%2c", longitude, sep=""), curl=session)
return(result)
}
text2sentences <- function(text, session=getCurlHandle()) {
api <- paste(getOption("RDSTK_api_base"), "/text2sentences/", sep="")
r = dynCurlReader()
curlPerform(postfields=text, url=api, post=1L, writefunction=r$update,
curl=session)
result <- fromJSON(r$value())
return(result)
}
text2people <- function(text, session=getCurlHandle()) {
api <- paste(getOption("RDSTK_api_base"), "/text2people/", sep="")
r = dynCurlReader()
curlPerform(postfields=text, url=api, post=1L, writefunction=r$update,
curl=session)
result <- ldply(fromJSON(r$value()), data.frame)
return(result)
}
html2text <- function(html, session=getCurlHandle()) {
api <- paste(getOption("RDSTK_api_base"), "/html2text/", sep="")
r = dynCurlReader()
curlPerform(postfields=html, url=api, post=1L, writefunction=r$update,
curl=session)
result <- fromJSON(r$value())
return(result)
}
text2times <- function(text, session=getCurlHandle()) {
api <- paste(getOption("RDSTK_api_base"), "/text2times/", sep="")
r = dynCurlReader()
curlPerform(postfields=text, url=api, post=1L, writefunction=r$update,
curl=session)
result <- ldply(fromJSON(r$value()), data.frame)
return(result)
}
|
29ae8992a46b5f4a13de78568603538708192a2e
|
184180d341d2928ab7c5a626d94f2a9863726c65
|
/valgrind_test_dir/det_downdate-test.R
|
efb43c455affe56dcd811a007d16f666202e4d85
|
[] |
no_license
|
akhikolla/RcppDeepStateTest
|
f102ddf03a22b0fc05e02239d53405c8977cbc2b
|
97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5
|
refs/heads/master
| 2023-03-03T12:19:31.725234
| 2021-02-12T21:50:12
| 2021-02-12T21:50:12
| 254,214,504
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 215
|
r
|
det_downdate-test.R
|
function (A, v, det)
{
e <- get("data.env", .GlobalEnv)
e[["det_downdate"]][[length(e[["det_downdate"]]) + 1]] <- list(A = A,
v = v, det = det)
.Call("_Benchmarking_det_downdate", A, v, det)
}
|
4a27e6a02632ba6d7b5acfce5a07994020a484b9
|
be7a874c45d83dd27c0754cecc8f89c41219ca11
|
/Test.R
|
4c53df14d247358889887fc3db0c80a4ad68384a
|
[] |
no_license
|
Prerna2602/R-Lab
|
6254b342a038efbb41ea56a23e64f5ef87222723
|
8dc0c6b27a5fe5de4594af2301ef4848162a0192
|
refs/heads/master
| 2023-04-21T19:19:54.649237
| 2021-05-09T06:05:20
| 2021-05-09T06:05:20
| 365,680,109
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,226
|
r
|
Test.R
|
#Reading the dataframe
data <-read.csv("C:\\Users\\KIIT\\Documents\\RLab\\BostonHousing.csv")
data
#Removing Rows
is.na(data)
which(is.na(data))
data[data == "null"]<-NA
data<-na.omit(data)
data
#normalize
nor <- function(x) {
return ((x - min(x)) / (max(x) - min(x)))
}
data$age_norm<-nor(data$age)
data$dis_norm<-nor(data$dis)
datarad_norm<-nor(data$rad)
data$tax_norm<-nor(data$tax)
data$ptratio_norm<-nor(data$ptratio)
data$lstat_norm<-nor(data$lstat)
data$medv_norm<-nor(data$medv)
data$crim_norm<-nor(data$crim)
data$zn_norm<-nor(data$zn)
data$indus_norm<-nor(data$indus)
data$chas_norm<-nor(data$chas)
data$nox_norm<-nor(data$nox)
data$rm_norm<-nor(data$rm)
dataframe <- data[ -c(1,2:10) ]
print(dataframe)
#CorrelationMatrix
cor_mat<-cor(data)
print(cor_mat)
#ScatterMatrix
png(file = "scatterMatrix.jpg")
pairs(~crim+zn+indus+chas+nox+rm+age+dis+rad+tax+ptratio+b+lstat+medv, data=data, main="ScatterMatrix")
dev.off()
#Using Correlation and Scatter matrix identify the variables which has clear dependency with MEDV attribute.
png(file ="scatterandcorelation.jpg")
pairs(~crim+zn+indus+chas+nox+rm+age+dis+rad+tax+ptratio+b+lstat+medv, data=cor_mat, main="ScatterMatrix/CorrMatrix")
dev.off()
|
e9c7f7c434efa92ba8e5aaabb525ea0f234ab282
|
e1a2ea17ed065da7b63411eb1a277301ad811f89
|
/DataDigestion.R
|
217f29accb33e46ec94d8992889411113eba0f1c
|
[] |
no_license
|
aribcarter/StopAndFrisk
|
bfd5a24491068c0d01c43f96db937902cd408e8a
|
6964cfca15862a9f6a0114975cd2f54fee722c40
|
refs/heads/master
| 2022-11-19T08:45:24.190564
| 2020-07-24T03:38:14
| 2020-07-24T03:38:14
| 276,275,866
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,728
|
r
|
DataDigestion.R
|
library(readxl)
library(dplyr)
GetZippedCsvFromNypd<-function(year){
address <- paste("https://www1.nyc.gov/assets/nypd/downloads/zip/analysis_and_planning/stop-question-frisk/sqf-",year,"-csv.zip", sep="")
fileName <- paste(year,".csv",sep="")
temp <- tempfile()
download.file(address, temp)
table <- read.table(unz(temp, fileName), header=T, sep=",", fill=T)
unlink(temp)
table
}
GetCsvFromNypd<-function(year){
address<-paste("https://www1.nyc.gov/assets/nypd/downloads/excel/analysis_and_planning/stop-question-frisk/sqf-", year,".csv", sep="")
temp<-tempfile()
download.file(address, temp)
table <- read.table(temp, header=T, sep=",", fill=T)
unlink(temp)
table
}
GetExcelFromNypd<-function(year){
address<-paste("https://www1.nyc.gov/assets/nypd/downloads/excel/analysis_and_planning/stop-question-frisk/sqf-", year,".xlsx", sep="")
if(year == 2017)
address <- "https://www1.nyc.gov/assets/nypd/downloads/excel/analysis_and_planning/stop-question-frisk/csi-2947-2019-sqf-cy2017-updated1-9-2020.xlsx"
temp<-tempfile()
download.file(address, temp)
table <- read_excel(temp)
unlink(temp)
table
}
ReturnTable<-function(year){
if(year>=2003 & year<=2014)
table <- GetZippedCsvFromNypd(year)
else if(year>2014 & year<2017)
table <- GetCsvFromNypd(year)
else if(year>2016 & year<=2019)
table <- GetExcelFromNypd(year)
else
print("wrong year")
if(year == 2006)
table <- Fix2006(table)
if(year>=2011)
table <- RemoveForceUse(table)
if(year>=2013)
table <- Lowercm(table)
if(year>2016)
table<-FilterTable(table, "Ex")
else
table <- FilterTable(table, "Csv")
table
}
CheckMatching<-function(){
years <-c(2003:2019)
prev<-NA
for(year in years){
yearTable <- ReturnTable(year)
if(!is.na(prev)){
truthVector <- sort(colnames(yearTable)) == prev
if(FALSE %in% truthVector)
print(paste(year-1, "and", year, "don't match"))
else
print(paste(year-1, "and", year, "match"))
}
print(yearTable$Year[[1]])
prev <- sort(colnames(yearTable))
}
}
#2006 labeling is riddled with typos and unclear documentation
Fix2006<-function(badTable){
badTable <- renameCols(badTable, c("adrnum", "adrpct", "dettyp_c", "details_", "detail1_", "premtyp", "prenam", "rescod", "strintr", "strname"),
c("addrnum", "addrpct", "dettypcm", "detailcm", "linecm", "premtype", "premname", "rescode", "stinter", "stname"))
badTable$wepfound <- NULL
badTable
}
#use for 2011 on when this variable started being used
#I don't need this for what I'm doing, but other option is adding column
#with NA for years without
RemoveForceUse<-function(table){
table$forceuse <- NULL
table
}
#2013-2016 capitalize their cm notation for some reason
Lowercm<-function(table){
table <- renameCols(table, c("lineCM", "dettypCM", "detailCM"), c("linecm", "dettypcm", "detailcm"))
table
}
renameCols<-function(table, oldName, newName){
if(length(oldName) != length(newName))
print("Need same size rename vectors")
i<-1
for(name in oldName){
colnames(table)[which(colnames(table) == oldName[i])] <- newName[i]
i<-i+1
}
table
}
FilterTable<-function(table, fileType="Csv"){
codes <- paste(fileType, "Codes", sep="")
rn <- paste(fileType, "Renames", sep="")
filterTable <- read.csv("FilterCodes.csv", header = T, na.strings=c("","NA"))
filterTable <- mutate_all(filterTable, as.character)
filter <- filterTable[,codes]
filter <- filter[!is.na(filter)]
table <- table[,filter]
renames <- filterTable[,c(codes, rn)]
colnames(renames) <- c("Codes", "Renames")
renames <- renames[!is.na(renames$Renames),]
table <- renameCols(table, renames$Codes, renames$Renames)
table <- mutate_all(table, as.character)
table[,colnames(table)] <- lapply(table[,colnames(table)], function(x) gsub(" ", "", x))
table
}
CompressForceOnUnarmed<-function(table){
if(as.numeric(table$Year[[1]]) < 2017)
table$Armed <- table$pistol=="Y" | table$riflshot=="Y" | table$asltweap=="Y" | table$machgun=="Y" |
table$Knife=="Y" | table$othrweap=="Y"
else
table$Armed <- (table$Gun=="Y" | table$Knife=="Y" | table$OtherWeapon=="Y")
View(table)
nrow(table[which(table$Armed==FALSE & table$GunDrawn=="Y"),])
}
CompressInnocence<-function(table){
nrow(table[which(table$Arrested=="N" & table$Summoned=="N"),])
}
#to be used for either annual or precinct data
GenerateTable<-function(table){
blackSplit <- table[which(table$Race == "B" | table$Race == "BLACK"),]
whiteSplit <- table[which(table$Race == "W" | table$Race == "WHITE"),]
data.frame("TotalStops"=nrow(table),"BlackStops"=nrow(blackSplit),"BlackInnocents"=CompressInnocence(blackSplit),
"BlackUnarmed"=CompressForceOnUnarmed(blackSplit), "WhiteStops"=nrow(whiteSplit), "WhiteInnocents"=CompressInnocence(whiteSplit),
"WhiteUnarmed"=CompressForceOnUnarmed(whiteSplit))
}
#Want to get instance of black and white splits for:
#Innocence, stops, guns drawn while unarmed
#Also have total # of stops per year
GenerateBreakData<-function(table){
years <- c(2003:2019)
AnnualSFData <- data.frame()
#colnames(AnnualSFData)<-c("Year", "TotalStops", "BlackStops", "WhiteStops", "BlackInnocents", "White Innocents", "BlackGunDraws", "WhiteGunDraws")
#View(AnnualSFData)
for(year in years){
yearTable <- GenerateTable(ReturnTable(year))
yearFrame <- data.frame("Year"=year)
AnnualSFData <- rbind(AnnualSFData, cbind(yearFrame, yearTable))
#View(AnnualSFData)
print(paste(year, "loaded"))
}
write.csv(AnnualSFData, "StopAndFriskByYear.csv", row.names = F)
}
|
64c2183d4efa5e2db3fe796bdc1ae9aaaaa5a2fb
|
8dd6ff7b87247e373e8b24e1cdeac2e6b89031f3
|
/dev/read_shape_file.R
|
aa47af7bb97367a407c38790b5734209611261f4
|
[
"MIT"
] |
permissive
|
ecoquants/WhaleMap
|
7b76d03dcf149ca45032c336e31821ee5df5403b
|
6a3878974f8a73376550c074dce4c83761ab4dda
|
refs/heads/master
| 2020-04-21T13:44:56.070816
| 2019-02-06T21:13:35
| 2019-02-06T21:13:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 319
|
r
|
read_shape_file.R
|
library(rgdal)
ifile = 'data/raw/to_add/NARW sightings 2017 Exp GoSL/Shapefiles ODY vessel track/ODY_vessel_track_clean_line.shp'
ifile = 'data/raw/to_add/NARW sightings 2017 Exp GoSL/Shapefiles ODY vessel track/Survey tracks/ODY_Oceana_pelagic_survey_2017AUG27_1.shp'
shape <- readOGR(dsn = ifile)
summary(shape)
|
0431351203ad6d776b7f19aa566ad5df02159e0e
|
010cdc330fbbd95423de66a264d13853606477a5
|
/man/Votes.Rd
|
1b51d71e223fd0b6744d170e03e758aa98aeaa24
|
[] |
no_license
|
cran/cba
|
1cd3ed427fded73783bf98ca346dddcd62baaf2a
|
d16e229b75fa69ff5b5484176bf5f6428073837c
|
refs/heads/master
| 2022-12-23T00:51:52.160556
| 2022-12-07T08:48:43
| 2022-12-07T08:48:43
| 17,694,994
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,901
|
rd
|
Votes.Rd
|
\name{Votes}
\alias{Votes}
\docType{data}
\title{Congressional Votes 1984 Data Set}
\description{
This data set includes votes for each of the U.S. House of
Representatives Congressmen on the 16 key votes identified by the
CQA. The CQA lists nine different types of votes: voted for, paired
for, and announced for (these three simplified to yea), voted
against, paired against, and announced against (these three
simplified to nay), voted present, voted present to avoid conflict
of interest, and did not vote or otherwise make a position known
(these three simplified to an unknown disposition).
}
\usage{data(Votes)}
\format{
A data frame with 435 observations on the following 17 variables.
\describe{
\item{\code{handicapped-infants}}{a factor with levels \code{n} and \code{y}}
\item{\code{water-project-cost-sharing}}{a factor with levels \code{n} and \code{y}}
\item{\code{adoption-of-the-budget-resolution}}{a factor with levels \code{n} and \code{y}}
\item{\code{physician-fee-freeze}}{a factor with levels \code{n} and \code{y}}
\item{\code{el-salvador-aid}}{a factor with levels \code{n} and \code{y}}
\item{\code{religious-groups-in-schools}}{a factor with levels \code{n} and \code{y}}
\item{\code{anti-satellite-test-ban}}{a factor with levels \code{n} and \code{y}}
\item{\code{aid-to-nicaraguan-contras}}{a factor with levels \code{n} and \code{y}}
\item{\code{mx-missile}}{a factor with levels \code{n} and \code{y}}
\item{\code{immigration}}{a factor with levels \code{n} and \code{y}}
\item{\code{synfuels-corporation-cutback}}{a factor with levels \code{n} and \code{y}}
\item{\code{education-spending}}{a factor with levels \code{n} and \code{y}}
\item{\code{superfund-right-to-sue}}{a factor with levels \code{n} and \code{y}}
\item{\code{crime}}{a factor with levels \code{n} and \code{y}}
\item{\code{duty-free-exports}}{a factor with levels \code{n} and \code{y}}
\item{\code{export-administration-act-south-africa}}{a factor with levels \code{n} and \code{y}}
\item{\code{Class}}{a factor with levels \code{democrat} and \code{republican}}
}
}
\details{
The records are drawn from:
\emph{Congressional Quarterly Almanac}, 98th Congress,
2nd session 1984, Volume XL: Congressional Quarterly Inc.
Washington, D.C., 1985.
It is important to recognize that \code{NA} in this database does
not mean that the value of the attribute is unknown. It
means simply, that the value is not "yea" or "nay" (see above).
}
\source{
\url{http://www.ics.uci.edu/~mlearn/MLRepository.html}
}
\references{
Blake, C.L. & Merz, C.J. (1998).
UCI Repository of Machine Learning Databases.
Irvine, CA: University of California, Department of Information and
Computer Science.
}
\examples{
data(Votes)
summary(Votes)
## maybe str(Votes) ; plot(Votes) ...
}
\keyword{datasets}
|
812faf91bc16f8b33f844a0397ba36e97960db4e
|
2ff56d5b52f5679d74c28eb34843766f2d1c7ac4
|
/R/addRoadMap.R
|
bdd1c1522259f5356cfd6b104e7cffd545f2fa71
|
[] |
no_license
|
danzhuibing/RAmap
|
89cda75ed4f6001634dd2ff0ff87fcc7eeb05c93
|
02f81ebd6ab50b7e7c93319ba5751045f071e46f
|
refs/heads/master
| 2020-04-06T06:59:07.269675
| 2016-08-30T02:24:27
| 2016-08-30T02:24:27
| 65,667,264
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 977
|
r
|
addRoadMap.R
|
#' Add a layer of road map to a Leaflet map widget
#'
#' @param map a leaflet map widget
#' @param center a numeric vector defineing the center point of the map widget,
#' default set to Beijing Tiananmen. Notice that the longitude and latitude
#' should follow the Chinese standard coordinate GCJ-02
#' @param zoom zoom level of the 10, default set to 10
#' @param layerId layerId in leaflet
#' @param group group in leaflet
#' @return a leaflet map widget
#' @export
#' @examples
#' leaflet() %>% addRoadMap()
addRoadMap <- function(map, center=c(116.40, 39.90), zoom=10, layerId=NULL, group=NULL) {
map <- map %>% addTiles(
'http://webrd02.is.autonavi.com/appmaptile?lang=zh_cn&size=1&scale=1&style=8&x={x}&y={y}&z={z}',
tileOptions(tileSize=256, minZoom=3, maxZoom=18),
attribution = '© <a href="http://ditu.amap.com/">高德地图</a>',
layerId = layerId,
group = group
) %>%
setView(center[1], center[2], zoom=zoom)
return(map)
}
|
c169cff385435bd4395c5a3a071cb0a632af6b72
|
5fd118151180b21a1f87ba7821097972b73e30fd
|
/R/deg_analysis.R
|
d48bb51c7e33f4a57d7087f63cd09c90c26abcf0
|
[
"CC0-1.0"
] |
permissive
|
uruloki85/tfm
|
8f90a78df30a4b62107d14cefc48f08d79abc936
|
cd72556eae3f985d10cbb38f060cc7bfcdd62268
|
refs/heads/main
| 2023-05-07T17:55:28.611979
| 2021-05-31T16:11:43
| 2021-05-31T16:11:43
| 356,537,244
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,668
|
r
|
deg_analysis.R
|
library(dplyr)
library(readr)
require(GEOquery)
library(limma)
library(ggplot2)
########################
# Treatment vs Outcome #
########################
# GSE112282, GSE45757, GSE14426
##############################
# Gene expression vs Outcome #
##############################
# GSE21501, GSE28735, GSE62165, GSE71729, GSE56560
###############################################################################
## Get the data ##
###############################################################################
seriesName <- "GSE71729"
do_volcano_plots <- FALSE
gse <- getGEO(seriesName, GSEMatrix=TRUE, getGPL=TRUE)
gse <- gse[[1]]
# show(gse)
######################
## Explore the data ##
######################
## exprs get the expression levels as a data frame and get the distribution
summary(exprs(gse))
# exprs(gse)
# if data is log2, will be between 0 and 16
if (seriesName == "GSE45757"
|| seriesName == "GSE112282"
|| seriesName == "GSE14426"
|| seriesName == "GSE56560") {
exprs(gse) <- log2(exprs(gse))
}
# Verify data has been normalized
boxplot(exprs(gse),
outline=FALSE,
las=2,
boxwex=0.6,
cex.axis = 0.7)
###############################################################################
## Metadata ##
###############################################################################
#####################################
# Conversion from survival to stage #
#####################################
stages_mst <- data.frame(c(21.44,11.84,8,3),
c(0, 11.84+11.84*.4, 8+8*.4, 3+3*.4))
rownames(stages_mst) <- c('I','II','III','IV')
colnames(stages_mst) <- c('MST','MST+40%')
################################################
## Get the relevant metadata from the samples ##
################################################
# Get sample info
sampleInfo <- pData(gse)
# View(sampleInfo)
# Select some columns
if(seriesName == "GSE112282") { #x
sampleInfo <- dplyr::select(sampleInfo,
"cell line:ch1",
"replicate info:ch1",
"treatment:ch1")
sampleInfo <- dplyr::rename(sampleInfo,
line="cell line:ch1",
replicate="replicate info:ch1",
treatment="treatment:ch1")
} else if (seriesName == "GSE45757") { # x
sampleInfo <- dplyr::select(sampleInfo,
"treated with:ch1",
"cell line:ch1")
sampleInfo <- dplyr::rename(sampleInfo,
treated="treated with:ch1",
line="cell line:ch1")
# Fix typo: replace Mpanc96 with MPanc96
sampleInfo$line[sampleInfo$line == "Mpanc-96"] <- "MPanc-96"
} else if (seriesName == "GSE14426") { # x
# Select only samples for 24h and 168h
library("stringr")
sampleInfoSubset <- sampleInfo[str_detect(sampleInfo$source_name_ch1,
"24hr|168hr"), ]
sampleInfo <- dplyr::select(sampleInfoSubset,
"source_name_ch1")
sampleInfo <- dplyr::rename(sampleInfo,
source="source_name_ch1")
samples_to_keep <- row.names(sampleInfo)
} else if (seriesName == "GSE71729") { # x
# sampleInfo <- dplyr::select(sampleInfo,
# "tissue type:ch2")
# sampleInfo <- rename(sampleInfo,
# tissue="tissue type:ch2")
# sampleInfo <- na.omit(sampleInfo, "tissue")
sampleInfo <- dplyr::select(sampleInfo,
"survival_months:ch2")
sampleInfo <- dplyr::rename(sampleInfo,
OS="survival_months:ch2")
sampleInfo$OS <- as.numeric(sampleInfo$OS)
sampleInfo <- na.omit(sampleInfo, "OS")
samples_to_keep <- row.names(sampleInfo)
# sampleInfo$stage[sampleInfo$OS <= stages_mst["IV","MST+40%"]] <- 'IV'
# sampleInfo$stage[sampleInfo$OS <= stages_mst["III","MST+40%"]
# & sampleInfo$OS > stages_mst["IV","MST+40%"]] <- 'III'
# sampleInfo$stage[sampleInfo$OS > stages_mst["III","MST+40%"]] <- 'I-II'
sampleInfo$stage[sampleInfo$OS <= stages_mst["III","MST+40%"]] <- 'Advanced'
sampleInfo$stage[sampleInfo$OS > stages_mst["III","MST+40%"]] <- 'Early'
} else if (seriesName == "GSE56560") { # x
sampleInfo <- dplyr::select(sampleInfo,
"grading:ch1")
sampleInfo <- dplyr::rename(sampleInfo,
grading="grading:ch1")
sampleInfo$grading[sampleInfo$grading == "N/A"] <- NA
sampleInfo <- na.omit(sampleInfo, "grading")
samples_to_keep <- row.names(sampleInfo)
} else if (seriesName == "GSE28735") { # x
sampleInfo <- dplyr::select(sampleInfo,
"survival_month:ch1",
"tissue:ch1")
sampleInfo <- dplyr::rename(sampleInfo,
OS="survival_month:ch1",
tissue="tissue:ch1")
sampleInfo$OS <- as.numeric(sampleInfo$OS)
sampleInfo <- na.omit(sampleInfo, "OS")
# Select only Tumor samples
sampleInfo <- sampleInfo[sampleInfo$tissue == "T", ]
samples_to_keep <- row.names(sampleInfo)
# sampleInfo$stage[sampleInfo$OS <= stages_mst["IV","MST+40%"]] <- 'IV'
# sampleInfo$stage[sampleInfo$OS <= stages_mst["III","MST+40%"]
# & sampleInfo$OS > stages_mst["IV","MST+40%"]] <- 'III'
# sampleInfo$stage[sampleInfo$OS > stages_mst["III","MST+40%"]] <- 'I-II'
sampleInfo$stage[sampleInfo$OS <= stages_mst["III","MST+40%"]] <- 'Advanced'
sampleInfo$stage[sampleInfo$OS > stages_mst["III","MST+40%"]] <- 'Early'
} else if (seriesName == "GSE21501") { # x
sampleInfo <- dplyr::select(sampleInfo,
"characteristics_ch2.5",
"characteristics_ch2.6")
sampleInfo <- dplyr::rename(sampleInfo,
risk="characteristics_ch2.5",
risk2="characteristics_ch2.6")
# Information is misplaced in these samples
sampleInfo["GSM536946","risk"] <- sampleInfo["GSM536946","risk2"]
sampleInfo["GSM536892","risk"] <- sampleInfo["GSM536892","risk2"]
sampleInfo <- dplyr::select(sampleInfo, risk)
# Remove samples with empty value
sampleInfo[sampleInfo == ""] <- NA
sampleInfo <- na.omit(sampleInfo, "risk")
samples_to_keep <- row.names(sampleInfo)
# samples_to_keep
length(unique(sampleInfo$risk)) # 2 unique values
} else if (seriesName == "GSE62165") { # x
# sampleInfo <- select(sampleInfo,
# "grouped stage:ch1")
# sampleInfo <- rename(sampleInfo,
# stage="grouped stage:ch1")
sampleInfo <- dplyr::select(sampleInfo,
"Stage:ch1", "tissue:ch1")
sampleInfo <- dplyr::rename(sampleInfo,
stage="Stage:ch1",
tissue= "tissue:ch1")
sampleInfo <- sampleInfo[sampleInfo$tissue == "pancreatic tumor", ]
# Remove samples with NA value
# sampleInfo[sampleInfo == "NA"] <- NA
# sampleInfo <- na.omit(sampleInfo, "stage")
# Convert to 4 groups: Early (1,2) & Advanced (3,4) stages
sampleInfo$stage[sampleInfo$stage == "1a"] <- "Early"
sampleInfo$stage[sampleInfo$stage == "1b"] <- "Early"
sampleInfo$stage[sampleInfo$stage == "2a"] <- "Early"
sampleInfo$stage[sampleInfo$stage == "2b"] <- "Early"
sampleInfo$stage[sampleInfo$stage == "3"] <- "Advanced"
sampleInfo$stage[sampleInfo$stage == "4"] <- "Advanced"
# sampleInfo$stage[sampleInfo$stage == "1a"] <- "1"
# sampleInfo$stage[sampleInfo$stage == "1b"] <- "1"
# sampleInfo$stage[sampleInfo$stage == "2a"] <- "2"
# sampleInfo$stage[sampleInfo$stage == "2b"] <- "2"
# sampleInfo$stage[sampleInfo$stage == "3"] <- "3"
# sampleInfo$stage[sampleInfo$stage == "4"] <- "4"
# sampleInfo$stage[sampleInfo$stage == "1a"] <- "1"
# sampleInfo$stage[sampleInfo$stage == "1b"] <- "1"
# sampleInfo$stage[sampleInfo$stage == "2a"] <- "2"
# sampleInfo$stage[sampleInfo$stage == "2b"] <- "2"
# sampleInfo$stage[sampleInfo$stage == "3"] <- "3"
# sampleInfo$stage[sampleInfo$stage == "4"] <- "3"
samples_to_keep <- row.names(sampleInfo)
# samples_to_keep
length(unique(sampleInfo$stage)) # 2 unique values
}
#################################################
## Get the relevant metadata from the platform ##
#################################################
features <- fData(gse)
# View(features)
# We keep the probe ID and the gene accession
if(seriesName == "GSE112282" #x
|| seriesName == "GSE45757" # x
|| seriesName == "GSE21501") { # x
features <- dplyr::select(features, ID, GB_ACC)
} else if (seriesName == "GSE14426") { # x
features <- dplyr::select(features, ID, Accession)
} else if (seriesName == "GSE28735" # x
|| seriesName == "GSE62165" # x
|| seriesName == "GSE56560") { # x
features <- dplyr::select(features, ID, GB_LIST)
} else if (seriesName == "GSE71729") { # x
features <- dplyr::select(features, ID)
}
###############################################################################
## Design ##
###############################################################################
############################################
## Define the design for the DEG analysis ##
############################################
if(seriesName == "GSE112282") { #x
design_colnames <- c("BET","BETMEK","MEK","VEHICLE","COLO201",
"HPAFII","NCIH510","RKO","Replicate2")
design <- model.matrix(~0+sampleInfo$treatment
+sampleInfo$line
+sampleInfo$replicate)
colnames(design) <- design_colnames
contrasts <- makeContrasts(BET - VEHICLE,
BETMEK - VEHICLE,
MEK - VEHICLE,
levels=design)
} else if (seriesName == "GSE45757") { # x
design_colnames <- c("Treated","Untreated","Capan2","CFPAC1","COLO357",
"HPAFII","Hs766T","L33","L36pl","L36sl","MIAPaCa2",
"MPanc96","Panc1","Panc1005","Panc203","Panc213",
"Panc327","Panc504","Panc603","Panc813","PL45",
"SU8686","SW1990")
design <- model.matrix(~0+sampleInfo$treated
+sampleInfo$line)
colnames(design) <- design_colnames
contrasts <- makeContrasts(Untreated - Treated,
levels=design)
} else if (seriesName == "GSE14426") { # x
design <- model.matrix(~0+sampleInfo$source)
design_colnames <- c("ATRA168h","ATRA24h","Vehicle168h","Vehicle24h")
colnames(design) <- design_colnames
contrasts <- makeContrasts(Vehicle168h - ATRA168h,
Vehicle24h - ATRA24h,
levels=design)
} else if (seriesName == "GSE21501") { # x
design <- model.matrix(~0+sampleInfo$risk)
design_colnames <- c("HighRisk","LowRisk")
colnames(design) <- design_colnames
contrasts <- makeContrasts(LowRisk - HighRisk,
levels=design)
} else if (seriesName == "GSE62165") { # x
design <- model.matrix(~0+sampleInfo$stage)
# design_colnames <- c("g1a","g1b","g2a","g2b","g3","g4")
# colnames(design) <- design_colnames
# contrasts <- makeContrasts("g1b - g1a",
# "g2a - g1a",
# "g2b - g1a",
# "g3 - g1a",
# "g4 - g1a",
# "g2a - g1b",
# "g2b - g1b",
# "g3 - g1b",
# "g4 - g1b",
# "g2b - g2a",
# "g3 - g2a",
# "g4 - g2a",
# "g3 - g2b",
# "g4 - g2b",
# "g4 - g3",
# levels=design)
# design_colnames <- c("g1","g2","g3","g4")
# colnames(design) <- design_colnames
# contrasts <- makeContrasts("g3 - g1",
# "g3 - g2",
# "g4 - g1",
# "g4 - g2",
# levels=design)
# design_colnames <- c("Advanced","Early","LNM")
# colnames(design) <- design_colnames
# contrasts <- makeContrasts(Early - Advanced,
# LNM - Advanced,
# LNM - Early,
# levels=design)
design_colnames <- c("Advanced","Early")
colnames(design) <- design_colnames
contrasts <- makeContrasts(Early - Advanced,
levels=design)
# design_colnames <- c("g1","g2","g3")
# colnames(design) <- design_colnames
# contrasts <- makeContrasts("g2 - g1",
# "g3 - g1",
# "g3 - g2",
# levels=design)
} else if (seriesName == "GSE71729") { # x
# design <- model.matrix(~0+sampleInfo$tissue)
# design_colnames <- c("Metastasis", "Normal", "Primary")
# colnames(design) <- design_colnames
# contrasts <- makeContrasts(Normal - Metastasis,
# Primary - Metastasis,
# Primary - Normal,
# levels=design)
design <- model.matrix(~0+sampleInfo$stage)
# colnames(design) <- c("I","III","IV")
# contrasts <- makeContrasts(IV - III,
# IV - I,
# III - I,
# levels=design)
colnames(design) <- c("Advanced", "Early")
contrasts <- makeContrasts(Early - Advanced,
levels=design)
} else if(seriesName == "GSE28735") { # x
# design <- model.matrix(~0+sampleInfo$tissue)
# design_colnames <- c("Metastasis", "Normal", "Primary")
# colnames(design) <- design_colnames
# contrasts <- makeContrasts(Normal - Metastasis,
# Primary - Metastasis,
# Primary - Normal,
# levels=design)
# design <- model.matrix(~0+sampleInfo$stage
# +sampleInfo$tissue)
# design_colnames <- c("Advanced", "Early", "Tumor")
# design_colnames <- c("High", "Low","Medium","Tumor")
design <- model.matrix(~0+sampleInfo$stage)
# design_colnames <- c("I","III","IV")
# colnames(design) <- design_colnames
# contrasts <- makeContrasts(IV - III,
# IV - I,
# III - I,
# levels=design)
# contrasts <- makeContrasts(Low - High,
# Medium - Low,
# Medium - High,
# levels=design)
colnames(design) <- c("Advanced","Early")
contrasts <- makeContrasts(Early - Advanced,
levels=design)
} else if (seriesName == "GSE56560") { # x
# design <- model.matrix(~0+sampleInfo$tissue)
# design_colnames <- c("Adjacent", "Normal","PDAC")
design <- model.matrix(~0+sampleInfo$grading)
design_colnames <- c("G2", "G3")
colnames(design) <- design_colnames
# contrasts <- makeContrasts(PDAC - Adjacent,
# PDAC - Normal,
# levels=design)
contrasts <- makeContrasts(G3 - G2,
levels=design)
}
design
eset <- exprs(gse)
##############################################
## Prepare the expression data if necessary ##
##############################################
if (seriesName == "GSE28735" # x
|| seriesName == "GSE62165" # x
|| seriesName == "GSE56560") { # x
library(tidyr)
# Split rows by Gene in GB_LIST duplicating the information
eset_df <- as.data.frame(exprs(gse))
# Add a new column with the GB_LIST
eset_df <- cbind(features$GB_LIST,eset_df)
# Rename the column
eset_df <- eset_df %>%
dplyr::rename(GB_LIST = "features$GB_LIST")
# eset_df$GB_LIST
# Check if there are repetitions among the probe IDs
length(rownames(eset_df))
length(unique(rownames(eset_df)))
delimiter <- ","
if (seriesName == "GSE57495") {
delimiter <- " "
}
# Duplicate each row as many times as elements are present in GB_LIST
eset_df2 <-
eset_df %>%
mutate(GB_LIST = strsplit(as.character(GB_LIST), delimiter)) %>%
unnest(cols = c(GB_LIST)) %>%
filter(GB_LIST != "")%>%
dplyr::rename(GB_ACC = GB_LIST) #%>%
# select(V1, 1:-1)
# Check if there is any missing value in this column
which(is.na(eset_df2$GB_ACC))
eset2 <- limma::avereps(eset_df2[,2:ncol(eset_df2)], eset_df2$GB_ACC)
ncol(eset2)
nrow(eset2)
eset <- eset2
# eset["AB001736","GSM1527105"]
# eset["AK123548","GSM1527105"]
}
##################################
## Discard samples if necessary ##
##################################
if (seriesName == "GSE45757") { # x
# Remove columns of samples without the relevant metadata
eset <- subset(eset, select=-c(GSM1113671,GSM1113672,GSM1113673,
GSM1113674,GSM1113675,GSM1113676,
GSM1113809,GSM1113810,GSM1113811))
} else if (seriesName == "GSE14426" # x
|| seriesName == "GSE21501" # x
|| seriesName == "GSE62165" # x
|| seriesName == "GSE71729" # x
|| seriesName == "GSE28735" # x
|| seriesName == "GSE56560") { # x
eset <- eset[,samples_to_keep]
}
###############################################################################
## Run DEG analysis with limma ##
###############################################################################
ncol(eset)
nrow(design)
fit <- lmFit(eset, design)
# head(fit$coefficients)
contrasts
fit2 <- contrasts.fit(fit, contrasts)
# Get differential expression statistics and p-values with empirical Bayes
fit2 <- eBayes(fit2)
# Results by contrast
topTable(fit2)
# topTable(fit2, coef=1)
# topTable(fit2, coef=2)
# topTable(fit2, coef=3)
# topTable(fit2, coef=4)
# Find differentially-expressed genes
if (seriesName == "GSE62165") { # x
# results <- decideTests(fit2, p.value = 0.6)
results <- decideTests(fit2, p.value = 0.8)
# results <- decideTests(fit2, p.value = 0.4)
} else if (seriesName == "GSE28735") { # x
# 3 groups: 0.8 (177 DEG), 0.85 (219), 0.9 (2560), 1.0 (all are detected as DEG)
# 2 groups: 0.05 (8 DEG), 0.1 (8), 0.2 (103), 0.25 (317), 0.3 (1773)
# results <- decideTests(fit2, p.value = 0.25)
results <- decideTests(fit2, p.value = 0.4)
# results <- decideTests(fit2)
} else if (seriesName == "GSE71729") { # x
# tried: 0.5 (26), 0.65 (235)
# 3 groups: no DEG
# 2 groups: 0.05 (0), 0.4 (29), 0.5 (42)
# results <- decideTests(fit2, p.value = 0.6)
results <- decideTests(fit2, p.value = 0.7)
} else if (seriesName == "GSE56560") { # x
# tried: 0.5 (40), 0.6 (43), 0.65 (151), 0.7 (242)
# results <- decideTests(fit2, p.value = 0.65)
results <- decideTests(fit2, p.value = 0.99)
} else if (seriesName == "GSE14426") { # x
# results <- decideTests(fit2, p.value = 0.3)
results <- decideTests(fit2, p.value = 0.99)
} else {
results <- decideTests(fit2)
}
# results <- decideTests(fit2, p.value = 0.9)
# How many genes are differentially-expressed
table(results)
##################
## Venn Diagram ##
##################
vennDiagram(results)
####################
## Add gene names ##
####################
# Print top 10 DEG with meaningful name
if (seriesName != "GSE28735" # x
&& seriesName != "GSE62165" # x
&& seriesName != "GSE56560" # x
&& seriesName != "GSE71729") { # x
gene_accession <- "GB_ACC"
if (seriesName == "GSE14426") { # x
gene_accession <- "Accession"
}
anno <- fData(gse)
# anno
anno <- dplyr::select(anno,all_of(gene_accession))
fit2$genes <- anno
} else if (seriesName == "GSE71729") { # x
fit2$genes <- rownames(eset)
# fit2$genes
} else {
eset2_df <- as.data.frame(eset2)
eset2_df$GB_ACC <- rownames(eset2_df)
# eset2_df$GB_ACC
fit2$genes <- eset2_df$GB_ACC
}
topTable(fit2)
######################
## Get common genes ##
######################
# results[,0]
# results[,1]
# results[,2]
# results[,3]
# results[,4]
# make a boolean index vector based on criteria
if (seriesName == "GSE14426") {
iv <- results[,1] != 0 & results[,2] != 0
} else if (seriesName == "GSE112282"
# || seriesName == "GSE28735"
) {
iv <- results[,1] != 0 & results[,2] != 0 & results[,3] != 0
} else if (seriesName == "GSE45757"
|| seriesName == "GSE28735"
|| seriesName == "GSE21501"
|| seriesName == "GSE62165"
|| seriesName == "GSE71729"
|| seriesName == "GSE56560") {
iv <- results[,1] != 0
}
# use it to extract gene names
deg <- fit2$genes[iv]
length(deg)
# deg
###############
# Get top DEG #
###############
do_top_x = FALSE
if (seriesName == "GSE112282") {
res_df_GSE112282 <- data.frame(fit2$genes[iv], fit2$F.p.value[iv])
do_top_x = TRUE
top_value <- 150
} else if (seriesName == "GSE45757") {
res_df_GSE45757 <- data.frame(fit2$genes[iv], fit2$F.p.value[iv])
do_top_x = TRUE
top_value <-300
}
do_top_x = FALSE
if (do_top_x) {
# Get genes and p.values
res_df <- data.frame(fit2$genes[iv], fit2$F.p.value[iv])
# res_df <- res_df_GSE112282
# res_df <- res_df_GSE45757
colnames(res_df) <- c("Gene","F.p.value")
res_df$Gene[res_df$Gene == ""] <- NA
res_df <- na.omit(res_df, "Gene")
# Order by p.value and get top 100
top_x <- res_df[order(res_df$F.p.value),][0:top_value,]
deg <- top_x$Gene
# topTable(fit2, number=30)
}
#######################
# Convert GenBank IDs #
#######################
if (seriesName == "GSE71729") { # x
length(unique(deg))
mapped_to_hgnc <- deg
} else {
library(org.Hs.eg.db)
library(biomaRt)
# gene_list <- unlist(fit2$genes)
gene_list <- deg
length(unique(gene_list))
# 2842
mapped <- select(org.Hs.eg.db, gene_list, c("ENTREZID","SYMBOL"), "ACCNUM")
length(unique(mapped$ACCNUM))
# 2842
length(unique(mapped$ENTREZID))
# 2288
mart <- useMart(biomart="ensembl", dataset="hsapiens_gene_ensembl")
# filters <- listFilters(mart)
mapped_to_hgnc <- getBM(attributes=c('hgnc_symbol'),
filters = 'entrezgene_id',
mart = mart,
values = mapped$ENTREZID)
length(unique(mapped_to_hgnc$hgnc_symbol))
#mapped_to_hgnc
}
##################
# Save GENE list #
##################
# seriesName = "GSE112282"
# seriesName = "GSE45757"
if (do_top_x) {
write.table(unique(mapped_to_hgnc),
file=paste(seriesName,"narrowed_common_genes.csv", sep="_"),
col.names = "HGNC",
row.names = FALSE,
quote = FALSE)
} else {
write.table(unique(mapped_to_hgnc),
file=paste(seriesName,"common_genes.csv", sep="_"),
col.names = "HGNC",
row.names = FALSE,
quote = FALSE)
}
###############################################################################
## Volcano plot #
###############################################################################
if (do_volcano_plots) {
full_results <- topTable(fit2, coef=1, number=Inf)
# full_results <- tibble::rownames_to_column(full_results,"ID")
ggplot(full_results,aes(x = logFC, y=B)) + geom_point()
## change according to your needs
p_cutoff <- 0.05
if (seriesName == "GSE62165") { # x
p_cutoff <- 0.3
}
fc_cutoff <- 1
full_results %>%
mutate(Significant = adj.P.Val < p_cutoff, abs(logFC) > fc_cutoff ) %>%
ggplot(aes(x = logFC, y = B, col=Significant)) + geom_point()
# Label some genes
# library(ggrepel)
#
# p_cutoff <- 0.05
# fc_cutoff <- 1
# topN <- 20
#
# full_results %>%
# mutate(Significant = adj.P.Val < p_cutoff, abs(logFC) > fc_cutoff ) %>%
# mutate(Rank = 1:n(), Label = ifelse(Rank < topN, GB_ACC,"")) %>%
# ggplot(aes(x = logFC, y = B, col=Significant,label=Label)) + geom_point() + geom_text_repel(col="black")
}
# Save as CSV
# write_csv(harmonized_exp_data,file=paste(seriesName,"exp_data_harmonized.csv", sep="_"))
# read.table("test.txt",header=TRUE,row.names=1) # says first column are rownames
|
c1d5dd046ffed56044f9503be17e34c9b12e61de
|
fe289741ad1a69158b67cddc97d392bf622c89ad
|
/plot4.R
|
8c391df18f163126292a73a942eece1e9a33e596
|
[] |
no_license
|
Hopellopo/Exploratory-Analysis-With-Base-Plotting-Test
|
5fef069661c5d01195d50b65356bf2a082ab8691
|
b63479d3cb95cffd5c05031341cc185c6dcb3653
|
refs/heads/master
| 2021-01-19T10:42:04.712140
| 2017-02-16T16:35:48
| 2017-02-16T16:35:48
| 82,199,148
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 523
|
r
|
plot4.R
|
plot4 <- function(){
options(warn=-1)
load("RelevantData.Rdata") #Called d
source('C:/Users/jloum_000/Desktop/Data Analysis Test/plot2.R')
source('C:/Users/jloum_000/Desktop/Data Analysis Test/plot3.R')
d["DateTime"] <- paste(as.Date(d$Date), d$Time)
par(mfrow=c(2, 2))
plot2()
plot(as.POSIXlt(d$DateTime), as.numeric(d$Voltage), type = "l", main = "Plot 2", ann = T, xlab = "")
plot3()
plot(as.POSIXlt(d$DateTime), as.numeric(d$Global_reactive_power), type = "l", main = "Plot 2", ann = T, xlab = "")
}
|
7a02da897054e6a1280080404e213d92760df7ba
|
c2982f1f6360328c8b143e1041f2f3dbf91f39bb
|
/Class_Code/W3_Text_Mining_II/W3 Blog.R
|
40f11b70f1d496c30639b2f3e26c3655f1503bc1
|
[] |
no_license
|
Ironaki/Digital-Humanities-582A
|
b78e71668a77b3eee188cce8e9cb35fdcdea69af
|
2d5f6da4ab2fb8d33b331e5c9214b23318ebe434
|
refs/heads/master
| 2021-08-14T08:58:00.626991
| 2017-11-15T06:45:13
| 2017-11-15T06:45:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,636
|
r
|
W3 Blog.R
|
#####A bunch of useful code
rm(list=ls())
library(stringr)
library(ggplot2)
library(tidyr)
library(plotly)
##### PART I: DIVEDE INTO CHAPTERS
### Function to read the text
my.scan <- function(x){
Raw_Text.scan <- scan(paste("/Users/Armstrong/Google_Drive/Learning_Material/HIST582A/Class_Code/W3/Raw Text/", x, sep = ""),what="character",sep = "\n")
Raw_Text.df <- data.frame(Raw_Text.scan, stringsAsFactors = FALSE)
}
Pride.raw <- my.scan("Pride.txt")
Pride.cut <- Pride.raw[c(16:10734),]
### Find the start and the end line
chapter.line <- grep("^Chapter\\s[[:digit:]]+", Pride.cut)
start.line <- chapter.line + 1
end.line <- chapter.line[2:length(chapter.line)] - 1
end.line <- c(end.line, length(Pride.cut))
Pride.df <- data.frame("Start" = start.line,"End" = end.line, "Text" = NA)
i <- 1
for (i in 1:length(Pride.df$End)){
Pride.df$Text[i] <- paste(Pride.cut[Pride.df$Start[i]:Pride.df$End[i]], collapse = " ")
}
### Now the text is nicely cut in chapters
##### PART II: SCATTERPLOT of USE OF THE WORD IN EACH CHAPTER
Pride.df$letter <- str_count(Pride.df$Text,"\\bletter\\b | \\bletters\\b | \\bLetter\\b | \\bLetters\\b")
Pride.df$count <- str_count(Pride.df$Text,"[[:alpha:]]+")
Pride.df$Chapter <- 1:61
ggplot(Pride.df, aes(Chapter, letter,color=non.null)) +
geom_point(color = "orange") +
xlab("Chapter Number") +
ylab("Use of 'letter(s)'")
Pride.df$letter.per <- Pride.df$letter/Pride.df$count*100
###Another graph in percentage
ggplot(Pride.df, aes(Chapter, letter.per,color=non.null)) +
geom_point(color = "orange") +
xlab("Chapter Number") +
ylab("Use of 'letter(s)' in percentage")
##### PART3: KWIC OF THE WORD LETTER
Pride.kwic <- paste(Pride.cut, collapse = " ")
Pride.kwic.fine <- unlist(str_split(Pride.kwic , "\\W"))
location.kwic <- which(Pride.kwic.fine == "letter" | Pride.kwic.fine == "letters"| Pride.kwic.fine == "Letters" | Pride.kwic.fine == "Letter")
start.kwic <- location.kwic - 5 ## Change 5 to any numbers
end.kwic <- location.kwic + 5 ## Change 5 to any numbers
start.kwic <- ifelse(start.kwic > 0, start.kwic, 0)
end.kwic <- ifelse(end.kwic < length(Pride.kwic.fine), end.kwic, length(Pride.kwic.fine))
KWIC.letter.df <- data.frame("Start" = start.kwic, "End" = end.kwic, "Text" =NA)
k <- 1
for(k in 1:length(KWIC.letter.df$End)){
text <- Pride.kwic.fine[KWIC.letter.df$Start[k]:KWIC.letter.df$End[k]]
KWIC.letter.df$Text [k] <- paste(text, collapse = " ")
}
write.table(KWIC.letter.df,"/Users/Armstrong/Google_Drive/Learning_Material/HIST582A/Class_Code/W3/KWIC_letter.txt",sep = "\t")
|
8c437a033ba592dcae66e3b176ce6ff7788f6e9f
|
3582145ad133e03e0bb735eecbcb30e6aef5d07a
|
/man/mcparam.sample.Rd
|
8925aee633708605ed965a23dcaf375dc6b15ffd
|
[
"BSD-2-Clause"
] |
permissive
|
JGCRI/ambrosia
|
49b635e659507f5cb5014e7f9bca616fbbb075f7
|
bf90ee7024ebee084b22679a76b3d86f9f6f808d
|
refs/heads/master
| 2023-04-12T21:26:21.495659
| 2022-05-21T20:04:07
| 2022-05-21T20:04:07
| 69,679,416
| 6
| 0
|
NOASSERTION
| 2022-05-21T20:04:08
| 2016-09-30T15:40:30
|
R
|
UTF-8
|
R
| false
| true
| 813
|
rd
|
mcparam.sample.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mcpar-analysis.R
\name{mcparam.sample}
\alias{mcparam.sample}
\title{Sample the MC results using bootstrap sampling.}
\usage{
mcparam.sample(mc.data, nsamp = 100, func = NULL)
}
\arguments{
\item{mc.data}{Data frame of Monte Carlo output}
\item{nsamp}{Number of samples to draw}
\item{func}{Optional function to apply to the samples drawn from the MC
distribution.}
}
\value{
Data frame or list (see details)
}
\description{
Optionally, apply a function to the sampled values.
}
\details{
If \code{func} is \code{NULL}, the return value will be a data frame of
sampled parameter values. Otherwise the return value will be a list with the
data frame just described in the first element and the output of \code{func}
in the second.
}
|
306c1b451a0f375d14a214194f4096715e855ed1
|
43e699fbe39b402e7a4827c104c511c41e6af059
|
/Term3_Group4.R
|
6cb4cc9a4a0b409282947735cb2c2cc2c98fc1ef
|
[] |
no_license
|
paulforst/BU_MSA_T3_G4
|
b373d49010c162e063d63456fcf43b03031594c5
|
29ba36fe6a3607ce0f9d8a49b3a9f4a276778816
|
refs/heads/master
| 2021-03-27T10:11:19.113020
| 2017-12-12T17:11:26
| 2017-12-12T17:11:26
| 109,999,195
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,062
|
r
|
Term3_Group4.R
|
# ============================================================================
# Term 3 - Group 4 Group Project
# ============================================================================
# Purpose: This script contains functions that will aid in the process of pulling
# NY Times articles and converting them into a data frame for modeling purposes.
# ============================================================================
# Created: 11/29/2017
# Members: Tammy Hang, Jay Bektasevic, Andrew Brill, Paul Forst
# Bellarmine University
# ----------------------------------------------------------------------------
# ____________________________________________________________________________
# Load Required Packages and Files ####
# Check that necessary packages are installed
packages <- c("tidyverse", "tm", "RMySQL", "jsonlite", "lubridate", "RCurl", "gtools", "XML", "koRpus",
"tidytext", "ngram", "stringr")
new.packages <- packages[!(packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
# Load Neccessary Packages
sapply(packages, require, character.only = TRUE)
# Options
options(stringsAsFactors = FALSE)
set.seed(2017)
# Load source files
# Credentials for API keys and DB conenctions
source("credentials.R")
# Corpus functions
source("corpus_functions.R")
# NY Times functions
source("get_nyt_data.R")
# ____________________________________________________________________________
# NY Times Data ####
#Call general function to generate the data from the NYT API
#Creates the global variables byt_articles and nyt_keywords
get_nyt_data()
# Initialize body_container
body_container <- NULL
# Randomly select 5,000 articles to use for modeling
# nyt_sample <- nyt_articles[sample(1:nrow(nyt_articles), 5000, replace=FALSE),]
for (i in 1:length(nyt_articles[[1]])) {
body_container[[i]] <- tryCatch(get_nyt_body(nyt_articles[[1]][i]), error = function(e) NULL)
# tryCatch() will ignore error and continue on with the loop
print(paste0("Scraping article # ", i, " of ", length(nyt_articles[[1]]))) # print the index of an article being scraped
}
# Save as Rdata
save(body_container, file = "nyt_body.Rdata")
# bind the article body with the rest of meta data
nyt_articles_with_body <- cbind(nyt_articles, body_container)
# select features
features <- c("web_url", "main", "body_container", "section_name", "pub_date")
nyt_final_data <- nyt_articles_with_body[features]
# match the column names of both datasets
colnames(nyt_final_data)[] <- colnames(final_data)
# Remove records with no body text
nyt_final_data <- nyt_final_data[!(is.na(nyt_final_data$body) | nyt_final_data$body==""), ]
# Add a column to label the source
nyt_final_data$source <- "NY Times"
# Combine the datasets
combined_data <- rbind(final_data, nyt_final_data)
# Initilize the word_count attribute
combined_data$word_count <- NULL
# Loop to get the word count of each article
for (i in 1:nrow(combined_data[,1])) {
combined_data$word_count[i] <- wordcount(combined_data$body[i])
}
# Plot the density
hist(combined_data$word_count, prob=TRUE, col="grey")
lines(density(combined_data$word_count), col="blue", lwd=2)
# Remove articles with <200 and >2,000 words
combined_data <- combined_data[combined_data$word_count >= 200 & combined_data$word_count <= 2000]
hist(combined_data$word_count, prob = TRUE, col="grey")
lines(density(combined_data$word_count), col="blue", lwd=2)
table(combined_data$section)
table(combined_data$source)
# Fixed the world news section
combined_data$section <- str_replace(combined_data$section, "World news", "World")
table(combined_data$section)
save(combined_data, file = "combined_final_data.Rdata")
# ____________________________________________________________________________
# Guardian Data ####
# ----------------------> See get_guardian_data.R <---------------------------
# In order to avoid the script from burgeoning in size, we will merge the two
# scripts at a later phase of the project.
# ____________________________________________________________________________
# Create Corpus ####
nytCorpus <- clean.corpus(body_container)
# ____________________________________________________________________________
# Database Connection ####
# Write metadata of articles to database
mydb = dbConnect(MySQL(), user=db_user, password=db_password, dbname= db_name, host= db_host)
# Write information to table
dbWriteTable(mydb, "nyt_articles", nyt_articles, append = 'FALSE', row.names = FALSE)
dbWriteTable(mydb, "nyt_keywords", nyt_keywords, append = 'FALSE', row.names = FALSE)
dbWriteTable(mydb, "guardian_articles", final_data, append = 'FALSE', row.names = FALSE)
# Lexical Diversity - Paul
# Initialize lexical diversity variables
lexdiv <- NULL
read_age <- NULL
# Pull only the text body from the data set
text_body <- combined_data$body
# Create progress bar
pb <- winProgressBar(title = "Calculating Lexical Diversity", min = 0, max = length(text_body), width = 400)
# Loop through each article body to general lexical diversity
system.time(for (i in 1:length(text_body)) {
print(i)
token <- tokenize(text_body[i], format = "obj", lang = "en")
#Use Measure of Textual Lexical Diversity
temp <- koRpus::MTLD(token, quiet = TRUE)
lexdiv[i] <- temp@MTLD$MTLD
#MTLD alone takes approx an hour to run
#Use Flesch-Kincaid index to evaluate readabilty
temp <- readability(token, index = c("Flesch.Kincaid"), quiet = TRUE)
read_age[i] <- temp@Flesch.Kincaid$age
#Took almost 12 hours to complete by itself; really slows down as it goes along
# Update the progress bar
setWinProgressBar(pb, i, title=paste("Calculating Lexical Diversity: ",round(i/length(text_body)*100, 1),"% done"))
})
close(pb)
#Join MTLD and Flesch-Kincaid to main data set
combined_data <- cbind(combined_data, lexdiv, read_age)
#Save new data set to prevent the need to run loop in future
save(combined_data, file = "combined_final_with_lexdiv.Rdata")
#Clean-up temp variables
rm(pb)
rm(temp)
rm(token)
rm(lexdiv)
rm(read_age)
rm(text_body)
# Plot the MTLD and Reading Levels for both sources
ggplot(combined_data, aes(x=source, y=lexdiv)) +
geom_boxplot()
# The Guardian tends to have a higher diversity but there are numerous outliers
ggplot(combined_data, aes(x=source, y=read_age)) +
geom_boxplot()
# Small spread between the two sources that generally overlap. Likely not a good differentiator
# Plot by source and section
source_section <- combined_data %>%
group_by(source, section) %>%
summarize(avg_lexdiv = mean(lexdiv),
max_lexdiv = max(lexdiv),
min_lexdiv = min(lexdiv),
avg_read = mean(read_age),
max_read = max(read_age),
min_read = min(read_age)
)
ggplot(combined_data) +
aes(x = section, y = lexdiv, color = source) +
geom_boxplot() +
labs(x = "Section", y = "Measure of Textual Lexical Diversity", colour="Source") +
theme_bw(base_size = 20)
ggplot(combined_data) +
aes(x = section, y = read_age, color = source) +
geom_boxplot() +
labs(x = "Section", y = "Flesch-Kincaid Reading Age Index", colour="Source") +
coord_cartesian(ylim = c(0, 30)) +
theme_bw(base_size = 20)
# ____________________________________________________________________________
# SVM and Naive Bayes ####
# ----------------------> See data_prep_modeling.R <--------------------------
# ____________________________________________________________________________
# Topic Modeling ####
# ----------------------> See TopicModeling.R <---------------------------
# ____________________________________________________________________________
# Word2Vec ####
# ----------------------> See <---------------------------
|
ade45294e6b53f2bf0fe5114e90fe7356ac16367
|
e1eb7dc6a1ce2f54c4ba5ee95f59ee9da2e7965f
|
/man/newstart.Rd
|
47998f7a8fdd93deb5c4fac70f405714bd7d2304
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
DanielaGawehns/fluiditypilot
|
eaba5f0397fd3b69d3ef604fffde7a44e468b9f8
|
2167dee1bc807cde9e1cfc5f872e42a38f2880ef
|
refs/heads/master
| 2022-12-24T18:21:10.913454
| 2020-09-04T15:29:40
| 2020-09-04T15:29:40
| 291,978,123
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 845
|
rd
|
newstart.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process-data-to-txt.R
\name{newstart}
\alias{newstart}
\title{Find all newstart moments
If tapplied, it accounts for newstart moments depending on tagY (or whichever tapply dependence)}
\usage{
newstart(x, minlength, minlag)
}
\arguments{
\item{x}{Depends on how the function is tapplied, which ever subset of data is fed. Data needs to be formated as the output from the binningBeaconData function}
\item{minlength}{indicates the minimum length of an interaction}
\item{minlag}{indicates how much lag between interactions is needed/wanted}
}
\value{
filtered data, where minlength and minlag are applied on all connections
}
\description{
Find all newstart moments
If tapplied, it accounts for newstart moments depending on tagY (or whichever tapply dependence)
}
|
b592507901b2e28064ad4c21552b54eead042735
|
d27bf90f117ae504900467fb547832eebb4cc33d
|
/processing.R
|
22afd5a7dcfc205e7d4164d73f1193509f70a922
|
[] |
no_license
|
DmytroRybalko/RadioInUA
|
e65aabae07e78539352d837147de797bda0750ad
|
4025857eba9b4b76535eb9b7b0daf9f20828a5c4
|
refs/heads/master
| 2021-09-08T00:42:07.351483
| 2017-11-23T14:18:26
| 2017-11-23T14:18:26
| 108,746,795
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 102
|
r
|
processing.R
|
# Here is main workflow for analysing and visualization playlist data from
# Ukrainian radio stations.
|
63ff2263e5d57f4eb3856bd8992b941eb125fecc
|
ef9cb3d6688c20ae5ff1440208801c0e9e7c9d58
|
/man/get_artist.Rd
|
f13922bc16937c86e3bf76d501cd2e2dce972787
|
[] |
no_license
|
kraigrs/musixmatch
|
f95596cfdc4fd343fe31b74e507f4208da5c0529
|
7d154de164178bf436b0859fa7675621a533c8c0
|
refs/heads/master
| 2021-01-01T05:35:48.762835
| 2015-09-17T13:16:22
| 2015-09-17T13:16:22
| 42,250,047
| 2
| 1
| null | 2015-09-17T13:16:22
| 2015-09-10T14:36:02
|
R
|
UTF-8
|
R
| false
| false
| 710
|
rd
|
get_artist.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/api.R
\name{get_artist}
\alias{get_artist}
\title{Search for artists in the database}
\usage{
get_artist(artist, simplify = TRUE, ...)
}
\arguments{
\item{artist}{string of the artist to search for, e.g. "Slayer"}
\item{...}{other API parameters, e.g. artist_id, artist_mbid.
'data,frane' returns a data.frame of common elements of interest.
'list' will return the full XML results as a list}
\item{type}{type of object to return.}
}
\value{
a data.frame or list containing the data from the API call
}
\description{
Search for artists in the database
}
\examples{
get_artist('slayer')
get_artist('slayer',type='list')
}
|
14f94934f1325bc47a3016e7c2cb371f06c55a95
|
28e16491dbddefeef482f515bb84f8cbf619929a
|
/tools/scde_pathprint/similar-experiments.r
|
88a88da486ff92dddb4dc7971af236bd97faa79b
|
[
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hidelab/galaxy-central-hpc
|
a9e469829cdcfa4adf8fcbcc178534d5e2dccf0b
|
75539db90abe90377db95718f83cafa7cfa43301
|
refs/heads/master
| 2021-01-23T10:20:57.837042
| 2017-09-06T14:15:46
| 2017-09-06T14:15:46
| 102,609,249
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,000
|
r
|
similar-experiments.r
|
#!/usr/bin/env Rscript
#sink(file("/dev/null", "w"), type = "message");
library(pathprint);
# Figure out the relative path to the galaxy-pathprint.r library.
script.args <- commandArgs(trailingOnly = FALSE);
script.name <- sub("--file=", "", script.args[grep("--file=", script.args)])
script.base <- dirname(script.name)
library.path <- file.path(script.base, "galaxy-pathprint.r");
source(library.path)
usage <- function() {
stop("Usage: similar_experiments.r <input> <output>", call. = FALSE)
}
## Get the command line arguments.
args <- commandArgs(trailingOnly = TRUE)
input <- ifelse(! is.na(args[1]), args[1], usage())
output <- ifelse(! is.na(args[2]), args[2], usage())
threshold <- 0.8
fingerprint <- loadFingerprint(input);
consensus <- consensusFingerprint(fingerprint, threshold);
distance <- calculateDistanceToGEO(consensus);
# Display only the first 50 closest experiments
#distance <- distance[1:50,]
generateSimilarExperiments(distance, output);
quit("no", 0)
|
0199e70f6384966e6f46e3f258f7c62952d78aed
|
d0ed7bfa76308345e7e7cbc863267d3842637145
|
/exec/Rpackages_install
|
f0764db8d215e666b80c7f6163254735abccc491
|
[] |
no_license
|
lbraglia/lbmisc
|
b392fd9cf40bd7682eedc21c0beb9ca75d4332ec
|
f1d536507e4e83d0b93f997e1ee693f868383b67
|
refs/heads/master
| 2023-08-11T11:11:11.351492
| 2023-07-29T13:33:14
| 2023-07-29T13:33:14
| 49,871,637
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 441
|
Rpackages_install
|
#!/usr/bin/Rscript --vanilla --quiet
# usage: Rpackages_install pkg1 pkg2 ..
pkgs <- commandArgs(trailingOnly = TRUE)
R_vrs <- with(R.Version(), strtrim(paste(major, minor, sep = '.'), 3))
R_userdir <- paste('~/R/x86_64-pc-linux-gnu-library', R_vrs, '', sep = '/')
install.packages(pkgs = pkgs,
## repos = 'http://cran.rstudio.com',
repos = 'http://cloud.r-project.org',
lib = R_userdir)
|
|
99e6d41a67f4ea3dc4c196428a51f4c17cea386f
|
a693cd98dd57799c39a48c3b2433d91dbc7d1217
|
/R/invoicer.R
|
efe1a11f77026acb3b16f214808648128efb2d00
|
[] |
no_license
|
anthonypileggi/invoicer
|
c9bf75904fa30f9f66ff098b794e6391e01c40ed
|
5bfbf876a7bc03fb6e82055f00427e3d46f4dffb
|
refs/heads/master
| 2020-03-29T06:34:17.966329
| 2018-11-12T14:22:06
| 2018-11-12T14:22:06
| 149,630,794
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,124
|
r
|
invoicer.R
|
#' Generate an invoice
#' @param client client name (character/scalar)
#' @param start_date first day of work (Date/scalar)
#' @param end_date last day of work (Date/scalar)
#' @param due_date date the invoice is due (Date/scalar)
#' @param include_dates include a Date column in the invoice (logical/scalar)
#' @param aggregate aggregate projects across dates (logical/scalar)
#' @param key googlesheets key
#' @export
invoicer <- function(client,
start_date,
end_date,
due_date = Sys.Date() + 14,
include_dates = TRUE,
aggregate = TRUE,
key = Sys.getenv("INVOICER_GS_KEY")) {
# load/prepare data based on params
x <- invoicer_get_data(key)
xs <- invoicer_filter_data(x, client = client, start_date = start_date, end_date = end_date)
# check if an invoice should be created
if (!invoicer_check_data(xs))
return("No invoice will be generated.")
# save data (to pass to invoice)
rds_file <- tempfile(fileext = ".rds")
saveRDS(xs, rds_file)
# generate a new invoice (in current working directory)
html_file <- invoicer_create(rds_file, due_date, include_dates, aggregate)
unlink(rds_file)
# generate a 'pdf' (via screenshot)
pdf_file <- stringr::str_replace(html_file, ".html", ".pdf")
webshot::webshot(html_file, pdf_file)
# store invoice_{#}.pdf on googledrive
googledrive::drive_auth()
gd <- googledrive::drive_upload(pdf_file)
# file cleanup
#unlink(html_file)
#unlink(pdf_file)
# generate invoice summary
invoice_summary <-
dplyr::mutate(
invoicer_summary(xs),
drive_id = gd$id,
file = gd$drive_resource[[1]]$webContentLink
)
# write invoice summary to 'invoice' tab of google sheet
invoicer_record(key = key, ws = "invoices", x = invoice_summary)
# create user message
msg <- dplyr::mutate(invoice_summary, msg = paste0("Generated an invoice for ", client, " for $", total, "."))$msg
message(msg)
dplyr::mutate_at(invoice_summary, c("start_date", "end_date"), as.Date, format = c("%m/%d/%Y"))
}
|
73a4a7a5353b93429d07daea83d32489a01f153a
|
1e4e481248ec82522ba86875795f778b32402fa2
|
/cachematrix.R
|
eb974459c1f567e844dae1b506a19c7c309bba72
|
[] |
no_license
|
carnunez/ProgrammingAssignment2
|
373e62763efb09953d393499eaf5e4df3b896225
|
1caaf6706a049e40e8c2c5cb366b973782c35b99
|
refs/heads/master
| 2021-01-17T18:57:35.157026
| 2015-01-19T21:13:50
| 2015-01-19T21:13:50
| 29,484,150
| 0
| 0
| null | 2015-01-19T18:07:49
| 2015-01-19T18:07:46
| null |
UTF-8
|
R
| false
| false
| 1,958
|
r
|
cachematrix.R
|
## The first function is used to set up a cache variable = inv by using (<<-). It returns a list of
## four functions
## The second function uses three functions of these four (getinverse, get and setinverse).
## * The first one is to check whether "inv" was previously calculated. If TRUE, then return
## the cached value.
## * Otherwise uses function (get) to get matrix data and (setinverse) to calculate "inv" and
## cache its value
## FUNCTION1: Creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
## it works also if only three functions kept in list vs. the four commented out below
list(get = get,
setinverse = setinverse,
getinverse = getinverse)
# list(set = set, get = get,
# setinverse = setinverse,
# getinverse = getinverse)
}
## *************************************************************************************************
## FUNCTION2: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed), then cacheSolve
## should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
## *************************************************************************************************
## TEST:
#> a<-matrix(1:4,2,2)
#> a
# [,1] [,2]
#[1,] 1 3
#[2,] 2 4
#> b<-makeCacheMatrix(matrix(1:4,2,2))
#> cacheSolve(b)
# [,1] [,2]
#[1,] -2 1.5
#[2,] 1 -0.5
#> cacheSolve(b)
#getting cached data
# [,1] [,2]
#[1,] -2 1.5
#[2,] 1 -0.5
|
1c6afd85d470b24fad3ebdfc2c6bb6db858682f1
|
4d4b524b7d5e1cbb1be26e391144cfa6cbce72f4
|
/man/wtInputFileLines.Rd
|
58b90276563c5a910dceecca8576c179fa2c0b9d
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.wtaq
|
e6afc2455e5a828e8d5c98c19868df2d13c505a8
|
f05c8cb30c48051c7f4b856c40571bae08facc27
|
refs/heads/master
| 2022-06-28T07:47:09.950237
| 2022-06-12T08:31:16
| 2022-06-12T08:31:16
| 60,534,495
| 3
| 0
|
MIT
| 2022-06-12T08:29:57
| 2016-06-06T14:26:52
|
Fortran
|
UTF-8
|
R
| false
| false
| 906
|
rd
|
wtInputFileLines.Rd
|
\name{wtInputFileLines}
\alias{wtInputFileLines}
\title{text lines for WTAQ input file}
\description{This function transforms a WTAQ configuration as generated with
\code{\link{wtConfigure}} into a vecotor of text lines. These text lines,
written to a file, can be used as input file to the WTAQ drawdown modelling
software.}
\usage{wtInputFileLines(configuration = wtConfigure(), sep = "\\t\\t",
dbg = FALSE)}
\arguments{
\item{configuration}{WTAQ configuration as generated by \code{\link{wtConfigure}}.}
\item{sep}{Separator to be placed between parameter values and parameter names.
Default: two tab characters. }
\item{dbg}{if TRUE, debug message are shown, else not. Default: FALSE}
}
\value{character vector with each element representing one row of the input file.}
\author{Hauke Sonnenberg}
\seealso{\code{\link{wtReadInputFile}}}
|
007e3da0d81212d63b6a4f861b9f637f84ab8346
|
8101e32331307177ed0c6db631c2b4603e813af7
|
/plot2.R
|
44180b2ca6400c82d5fcf14f59b1380b00ea0ef5
|
[] |
no_license
|
xuemike/Exploratory-data-Analysis
|
4c70d2de36baed6e670169aa65cfe722beecd1a8
|
e222f508ca4aaa24c19790564bb122c7f1855065
|
refs/heads/master
| 2021-01-21T12:59:21.229416
| 2016-04-25T00:36:45
| 2016-04-25T00:36:45
| 52,212,281
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 926
|
r
|
plot2.R
|
# Exploratory Data Analysis Course project 1
# R code will gernerate three PNG files. plot1.png plot2.png plot3.png
#read csv file
data <- read.csv("./household_power_consumption.txt", sep=";", stringsAsFactors=TRUE)
# get two dates' data for analysis
#data$Date <- as.Date(data$Date, format="%d/%m/%Y")
# extract two days data
subData <- data[data$Date %in% c("1/7/2007","2/7/2007") ,]
# this expression sets up a plot with 1 row 3 columns, sets the margin and outer margins
# PLOT2: three graph in one file
subData$timestamp <- strptime(paste(subData$Date, subData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
subData$globalActivePower <- as.numeric(subData$Global_active_power)
# draw the plot of global Active power ~ date
plot(subData$timestamp, subData$globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.copy(png, file="plot1.png", width=480, height=480)
dev.off()
|
1af4ee257339adb2ee355e56017f32d541be6943
|
a6214d7ecd758270d27592c6affe5df3bfd316a2
|
/ledgerplots/man/quarterly.price.Rd
|
ba280379077cf73571dbe111ca99928889602b5a
|
[
"MIT"
] |
permissive
|
RastiGiG/ledger-plots
|
3c56fa0a98f0f347ad4a2045742f3d70f76913ff
|
b8ddb3bf32d51f9ad01ec60cb259fe9815495d38
|
refs/heads/master
| 2023-03-18T00:09:24.522680
| 2018-11-26T20:38:41
| 2018-11-26T20:38:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 297
|
rd
|
quarterly.price.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/statistic-functions.R
\name{quarterly.price}
\alias{quarterly.price}
\title{Quarterly average price}
\usage{
quarterly.price(x)
}
\arguments{
\item{x}{a numeric vector}
}
\description{
calculate monthly average price
}
|
1e9771179ad3f325b3a81f02f26b26f28be44b0f
|
55544bfa2ef73067d9af918c0da41cec66ff368e
|
/man/gameprice.Rd
|
71d02e592714a988556b1c6f4ba4c6d64e2be182
|
[] |
no_license
|
drewlake/steamR
|
c65fb79417fcb37394f54125dd7120e96ef83c63
|
29e31d537ed66d24e689788557a49dd1233e2ea9
|
refs/heads/master
| 2016-08-11T10:02:50.702583
| 2016-04-04T10:59:53
| 2016-04-04T10:59:53
| 45,043,830
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 291
|
rd
|
gameprice.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gameprice.R
\name{gameprice}
\alias{gameprice}
\title{Game Price}
\usage{
gameprice(gameid = "220")
}
\arguments{
\item{gameid}{Steam game id}
}
\description{
Price of a game
}
\note{
Maximum of 200 per 5 mins
}
|
d4750a28cf3959c759238cfb34b6339d3b2e8966
|
7047bc16c6043d7f8866e11ce0d2843840d2aef7
|
/man/randomizationInference-package.Rd
|
871f5241dc66f9aba6b4eb06824a6a44d2cedc93
|
[] |
no_license
|
cran/randomizationInference
|
0c2b78ae6371f7d44796936a7d41bbb4467304ad
|
7abaf184368540e342625c79baca80a6e5952224
|
refs/heads/master
| 2022-05-30T18:32:09.780295
| 2022-05-17T19:00:02
| 2022-05-17T19:00:02
| 17,699,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,835
|
rd
|
randomizationInference-package.Rd
|
\name{randomizationInference}
\alias{randomizationInference-package}
\alias{randomizationInference}
\docType{package}
\title{Flexible Randomization-Based Inference}
\description{Randomization-based p-values and null intervals for a wide variety of experimental scenarios, with corresponding visualizations.}
\details{
\tabular{ll}{
Package: \tab randomizationInference\cr
Type: \tab Package\cr
Version: \tab 1.0.4\cr
Date: \tab 2022-05-17\cr
License: \tab GPL-2\cr
Main functions: \tab \code{\link{randTest}}, \code{\link{randPlot}}, \code{\link{randInterval}}\cr
}
The \code{randomizationInference} package conducts randomization-based inference for a wide variety
of experimental scenarios. The package leverages a potential outcomes framework to output
randomization-based p-values and null intervals for test statistics geared toward any estimands of
interest, according to the specified null and alternative hypotheses. Users can define custom
randomization schemes so that the randomization distributions are accurate for their experimental
settings. The package also creates visualizations of randomization distributions and can test
multiple test statistics simultaneously.
}
\author{
Joseph J. Lee and Tirthankar Dasgupta
Maintainer: Joseph J. Lee <joseph.j.lee@post.harvard.edu>
}
\references{
Wu, C. F. J. and Hamada, M. (2009) Experiments, Planning, Analysis and Optimization (2nd ed), Wiley.
Moore, David S., and George P. McCabe (1989). Introduction to the Practice of Statistics.
}
\examples{
# Completely randomized design example
# with one treatment factor at two levels
w <- c(rep(0, 5), rep(1, 5))
y <- rnorm(10, mean = 0, sd = 1)
# Two-sided test
twoSidedTest <- randTest(y, w, nrand = 50, calcTestStat = diffMeans)
randInterval(twoSidedTest)
randPlot(twoSidedTest)
# One-sided test
oneSidedTest <- randTest(
y, w,
nrand = 50,
calcTestStat = diffMeans,
alternative = "greater"
)
# Two=sided test with non-zero null hypothesis
nonZeroTest <- randTest(
y,
w,
nrand = 50,
calcTestStat = diffMeans,
calcPO = constEffect,
poOptions = list(tau = 2),
null = 2
)
# Randomized block design example
# with one treatment factor at three levels
x <- rep(1:3, 4)
w_block <- rep(1:4, 3)
y_block <- rnorm(12, mean = x, sd = 1)
blockTest <- randTest(
y_block,
w_block,
nrand = 50,
calcTestStat = anovaF,
calcOptions = list(block = x),
randOptions = list(type = "block", block = x)
)
randInterval(blockTest)
randPlot(blockTest)
# 4x4 Latin square example (from the Wu/Hamada reference)
row <- rep(1:4, 4)
col <- c(rep(1, 4), rep(2, 4), rep(3, 4), rep(4, 4))
w_latin <- c(
"C", "D", "B", "A", "A", "B", "D", "C",
"D", "C", "A", "B", "B", "A", "C", "D"
)
y_latin <- c(
235, 236, 218, 268, 251, 241, 227, 229,
234, 273, 274, 226, 195, 270, 230, 225
)
latinTest <- randTest(
y_latin,
w_latin,
nrand = 50,
calcTestStat = anovaF,
calcOptions = list(row = row, col = col),
randOptions = list(type = "Latin", row = row, col = col)
)
randInterval(latinTest)
randPlot(latinTest)
# User-defined randomization example
# Partial randomization: first four assignments are fixed
# Due to physical limitations
# User-defined randomization function
# Input: number of random assignments, function options
# Output: list of random assignments
myRand <- function(nrand, userOptions = NULL){
w_fixed = c(0, 0, 1, 1)
lapply(1:nrand, function(i) c(w_fixed, sample(rep(0:1, 5))))
}
w_user <- c(c(0, 0, 1, 1), c(0, 1, 1, 0, 0, 0, 1, 1, 0, 1)) # observed assignment
y_user <- rnorm(14, mean = 0, sd = 1)
userTest <- randTest(
y_user,
w_user,
nrand = 50,
calcTestStat = diffMeans,
randOptions = list(type = "user.defined"),
userRand = myRand
)
randInterval(userTest)
randPlot(userTest)
# 2^3 factorial design example
# three treatment factors (OT, CP, and ST) at two levels each
OT <- c(-1, -1, -1, -1, 1, 1, 1, 1)
CP <- c(-1, -1, 1, 1, -1, -1, 1, 1)
ST <- rep(c(-1, 1), 4)
w_fac <- cbind(OT, CP, ST)
y_fac <- c(67, 79, 61, 75, 59, 90, 52, 87)
# Testing the main effect of factor "OT"
facTest1 <- randTest(
y_fac,
w_fac,
nrand = 50,
calcTestStat = diffMeans,
calcOptions = list(factor = 1, pair = c(-1, 1))
)
# Testing all three main effects simultaneously
facTest2 <- randTest(
y_fac,
w_fac,
nrand = 50,
calcTestStat = diffMeansVector,
calcOptions = list(
factors = 1:3,
pairs = matrix(rep(c(-1, 1), 3), ncol = 2, byrow = TRUE)
)
)
# Testing all contrasts simultaneously
w_facNew <- cbind(OT, CP, ST, OT*CP, OT * ST, CP * ST, OT * CP * ST)
facTest3 <- randTest(
y_fac,
w_facNew,
nrand = 50,
calcTestStat = diffMeansVector,
calcOptions = list(
factors = 1:7,
pairs = matrix(rep(c(-1, 1), 7), ncol = 2, byrow = TRUE)
)
)
randInterval(facTest3)
randPlot(facTest3, plotDim = c(2, 4))
# Reading comprehension pre- and post-test example
data(reading)
# Ignoring blocks
readingTest1 <- randTest(
y = reading$Diff1,
w = reading$Group,
nrand = 50,
calcTestStat = anovaF
)
# Testing within-block pairwise effects
readingTest2 <- randTest(
y = reading$Diff1,
w = reading$Group,
nrand = 50,
calcTestStat = withinBlockEffects,
calcOptions = list(
block = reading$Block,
pairs = rbind(
c("Basal", "DRTA"),
c("Basal", "Strat"),
c("DRTA", "Strat"),
c("Basal", "DRTA"),
c("Basal", "Strat"),
c("DRTA", "Strat")
),
blockindex = c(rep(1, 3), rep(2, 3))
),
randOptions = list(type = "block", block = reading$Block)
)
randInterval(readingTest2)
randPlot(readingTest2, plotDim = c(2, 3))
}
\keyword{package}
|
cd3114e27b5d9bbc6bb1708b800f57980dbbe0e9
|
a80a8065bc607f04dff519a034aa222177791f55
|
/Plot6.R
|
5e28c8ce6341c5dbba57862659e26b8354e7d47e
|
[] |
no_license
|
rhllnk/Exploratory-Data-Analysis---Project-2
|
77c5a0563735853530bb7b890fffa8759c596d2c
|
f6ca2439f85c8f9d7b7c6a0a1b9595a35315d722
|
refs/heads/master
| 2020-12-25T14:24:10.946427
| 2016-09-04T21:14:21
| 2016-09-04T21:14:21
| 67,366,865
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,693
|
r
|
Plot6.R
|
# Reading the data (Pre Step: Working directory is already set and data is downloaded to working directery)
Nei <- readRDS("summarySCC_PM25.rds")
Scc <- readRDS("Source_Classification_Code.rds")
#Check to see if data is correctly read
head(Nei)
nrow(Nei)
summary(Nei)
head(Scc)
nrow(Scc)
summary(Scc)
#Extract Baltimore Data
BaltimoreData <- subset(Nei, fips == "24510")
head(BaltimoreData)
nrow(BaltimoreData)
#Extract LA County Data
LAdata <- subset(Nei, fips == "06037")
head(LAdata)
nrow(LAdata)
#Extract Motor Vehicle Data
MobileData <- Scc[grepl("Mobile - On-Road", Scc$EI.Sector, ignore.case = TRUE), ]
#Extract Baltimore and LA Motor Vehicle data
BaltimoreMbldata <- subset(BaltimoreData, SCC %in% MobileData$SCC)
nrow(BaltimoreMbldata)
LAMbldata <- subset(LAdata, SCC %in% MobileData$SCC)
nrow(LAMbldata)
#Combine both data frames
CombinedData <- rbind(BaltimoreMbldata, LAMbldata)
nrow(CombinedData)
head(CombinedData)
plotdata <- aggregate(Emissions ~ year + fips, CombinedData, sum)
nrow(plotdata)
head(plotdata)
print(plotdata)
#Add another variable "County" to the data frame, which is later used in the plot function
plotdata$County <- NA
plotdata$County[plotdata$fips== "06037"] <- "LA County"
plotdata$County[plotdata$fips== "24510"] <- "Baltimore"
nrow(plotdata)
head(plotdata)
print(plotdata)
x <- plotdata$year
y <- plotdata$Emissions
png("plot6.png", width=480, height=480) #opening png graphic device
qplot(x,y,data = plotdata, geom = "line", color = County, main = "Baltimore vs Los Angeles County", xlab = "year", ylab = "Motor Vehicle" ~ PM[2.5] ~ "(tons)")
dev.off() #closing the device
|
bf353be8f23cbe7311439745b1396fa4809929ae
|
aece010c3572eaf59a791569ae60fec62a260ee6
|
/man/updatepars.msm.Rd
|
9468c25d505294a638e47e9981efbbac3b631fda
|
[] |
no_license
|
cran/msm
|
edb92247a14b77f5a6726a80623884f29cce20e2
|
fa420503596991f9e0c5e903474c1e24954c9451
|
refs/heads/master
| 2022-12-03T03:59:27.043063
| 2022-11-28T16:30:02
| 2022-11-28T16:30:02
| 17,697,695
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,300
|
rd
|
updatepars.msm.Rd
|
\name{updatepars.msm}
\alias{updatepars.msm}
\title{updatepars.msm}
\description{
Update the maximum likelihood estimates in a fitted model object. Developer use only.
}
\usage{
updatepars.msm(x, pars)
}
\arguments{
\item{x}{A fitted multi-state model object, as returned by
\code{\link{msm}}.}
\item{pars}{Vector of new parameters, in their untransformed real-line
parameterisations, to substitute for the maximum likelihood
estimates corresponding to those in the \code{estimates} component
of the fitted model object (\code{\link{msm.object}}). The
order of the parameters is documented in \code{\link{msm}},
argument \code{fixedpars}.}
}
\value{
An updated \code{\link{msm}} model object with the updated maximum likelihood
estimates, but with the covariances / standard errors unchanged.
Point estimates from output functions such as \code{\link{qmatrix.msm}},
\code{\link{pmatrix.msm}}, or any related function, can then be evaluated
with the new parameters, and at arbitrary covariate values.
This function is used, for example, when computing confidence
intervals from \code{\link{pmatrix.msm}}, and related functions, using
the \code{ci="normal"} method.
}
\author{C. H. Jackson \email{chris.jackson@mrc-bsu.cam.ac.uk}}
\keyword{models}
|
8d7df25e0545d292480a6bc02e8ba7fa25019a59
|
6b769ade12829c97f7aa9930265418bede361967
|
/R/table10_15.R
|
6cdd654bb73a1deb743deedb96d45d7a6d9eb01d
|
[] |
no_license
|
brunoruas2/gujarati
|
67b579a4fde34ae1d57f4a4fd44e5142285663c1
|
a532f2735f9fa8d7cd6958e0fc54e14720052fd4
|
refs/heads/master
| 2023-04-07T01:15:45.919702
| 2021-04-04T15:10:58
| 2021-04-04T15:10:58
| 284,496,240
| 14
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 887
|
r
|
table10_15.R
|
#' Table 10_15
#'
#' Hours of Work and Other Data for 35 Groups
#' Source: D. H. Greenberg and M. Kosters, Income Guarantees and the Working Poor, Rand Corporation, R-579-OEO, December 1970
#'
#' @docType data
#' @usage gujarati::Table10_15
#' @format
#'
#' \itemize{
#' \item \strong{obs}
#' \item \strong{HRS: }average hours worked during the year
#' \item \strong{RATE: }average hourly wage (dollars)
#' \item \strong{ERSP: }average yearly earnings of spouse (dollars).
#' \item \strong{ERNO: } average yearly earnings of other family members (dollars).
#' \item \strong{NEIN: }average yearly nonearned income
#' \item \strong{ASSET: }average family asset holdings (bank account, etc.) (dollars)
#' \item \strong{AGE: }average age of responden
#' \item \strong{DEP: } average number of dependents
#' \item \strong{SCHOOL: }average highest grade of school completed.
#' }
'Table10_15'
|
fe0e23b033c99b69b2b0267405b8d56f6fee1368
|
26eb22e908b0c93d0ddcb17cd0eddd776ac37cbb
|
/Scrap EPEX SPOT/EPEXScraperFunction.R
|
1f695b8c788749e9dfe6ecda9820ba36b784c549
|
[] |
no_license
|
trunghlt/YENCHUN-L
|
2b2fc20c62582ef761d25fe31b2ac34620b1babb
|
60ced89ba63b3cae665c1ada7c21843eb928da68
|
refs/heads/master
| 2023-03-22T13:48:11.141690
| 2020-11-26T15:41:38
| 2020-11-26T15:41:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 42,520
|
r
|
EPEXScraperFunction.R
|
# https://github.com/wagnertimo/emarketcrawlR
# This R package provides functions to crawl the EPEX SPOT Market
#
# Build and Reload Package: 'Cmd + Shift + B'
# Check Package: 'Cmd + Shift + E'
# Test Package: 'Cmd + Shift + T'
#'
#' @export
#'
setLogging <- function(logger) {
options("logging" = logger)
ifelse(logger == TRUE, print("Outputs/logs will be displayed!"), print("No console outputs/logs will be displayed!"))
}
#' @title getIntradayContinuousEPEXSPOT
#'
#' @description This function returns the price data of the EPEX SPOT Continuous Intraday Trading for a time period.
#' In december 2011 the 15min products started in Germany // For the Intrady-Auction (important for Bilanzkreisverantwortliche) the 15min products were introducd in december 2014
#' In june 2013 the 15min products started in Swiss. France has only 1h and 30min products.
#' At EPEX SPOT website there seem to be always two days in one table at the site.
#' It is also only possible to get one day (or two in the table) at once. No time period option.
#' The data is only retrievable via the html document
#' example link for 2017-05-25 for german/austrian market: https://www.epexspot.com/en/market-data/intradaycontinuous/intraday-table/2017-05-25/DE
#'
#' @param startDate - Set the start date for the price data period
#' @param endDate - Set the end date for the price data period
#' @param product - Sets which product should be crawled. There are hourly ("60"), 30min ("30") and 15min ("15") data. Default value is "60" for the hourly data.
#' @param country - Defines the country from which the data should be crawled. Default value is "DE". There is also "FR" (France) and "CH" (Swiss)
#'
#' @return a data.frame with DateTime as POSIXct object and the cont. intra. trading prices
#'
#' @examples
#' h <- getIntradayContinuousEPEXSPOT("2017-05-20", "2017-05-26", "60")
#'
#' @export
#'
getIntradayContinuousEPEXSPOT <- function(startDate, endDate, product = "60", country = "DE") {
library(logging)
library(httr)
library(XML)
library(dplyr)
# Check that 15min products for France is not allowed --> set to default
if(country == "FR" & product == "15"){
product = "60"
print("There are no 15min products for France --> Changed to default: 60min")
}
# Setup the logger and handlers
basicConfig(level="DEBUG") # parameter level = x, with x = debug(10), info(20), warn(30), critical(40) // setLevel()
#nameLogFile <- paste("getReserveNeeds_", Sys.time(), ".txt", sep="")
#addHandler(writeToFile, file=nameLogFile, level='DEBUG')
sdate <- as.Date(startDate, "%Y-%m-%d", tz = "Europe/Berlin")
edate <- as.Date(endDate, "%Y-%m-%d", tz = "Europe/Berlin")
# calls for every day in dates array --> !! maybe every two days, depends if always two dates for one date request are shown in table
# Therefore it is good to start with the loop at the last date, then the day before the last date can be also on the table
dates_array = seq(sdate, edate, by = "days")
# url = paste("https://www.epexspot.com/en/market-data/intradaycontinuous/intraday-table/", dates_array[length(dates_array)], "/DE", sep="")
#
# payload = list();
#
# postResponse <- POST(url, body = payload, encode = "form")
#
# parsedHtml <- htmlParse(content(postResponse, "text", encoding = "UTF-8"))
# return(parseICEPEXSPOT(parsedHtml))
r = data.frame()
# Init progress bar // CAUTION --> the length of auctionIds can be longer than needed (retrieves all auctionIds but stops at the input end date)
if(getOption("logging")) pb <- txtProgressBar(min = 0, max = ifelse(length(dates_array) > 1, length(dates_array) - 1, length(dates_array)), style = 3)
for(i in seq(length(dates_array), 1, -2)) {
if(getOption("logging")) loginfo(paste("getIntradayContinuousEPEXSPOT - Call for: ", dates_array[i], " and ", dates_array[i-1], " | REMEBER 2 dates on site!"))
url = paste("https://www.epexspot.com/en/market-data/intradaycontinuous/intraday-table/", dates_array[i], "/", country, sep="")
payload = list();
postResponse <- POST(url, body = payload, encode = "form")
parsedHtml <- htmlParse(content(postResponse, "text", encoding = "UTF-8"))
r <- rbind(parseICEPEXSPOT(parsedHtml, product, country), r)
# update progress bar
if(getOption("logging")) setTxtProgressBar(pb, length(dates_array) - i + 1)
}
# CLose the progress bar
if(getOption("logging")) close(pb)
r <- r %>% filter(format(DateTime, "%Y-%m-%d") >= sdate) # %>% arrange(DateTime) # do not arrange --> it should already be in order --> ALSO DST otherwise makes problems
if(getOption("logging")) loginfo(paste("getIntradayContinuousEPEXSPOT - DONE"))
return(r)
}
# Helper function for @seealso getIntradayContinuousEPEXSPOT
#
# parses target website for data of first date and second date on the site
# CAUTION what if not two dates are displayed!!! --> BUT it seems that always two are displayed
parseICEPEXSPOT <- function(htmlDoc, product, country) {
library(logging)
library(XML)
if(getOption("logging")) loginfo(paste("parseICEPEXSPOT - Parsing Continuous Intraday EPEX website with 2 dates"))
date_list <- as.Date(xpathSApply(htmlDoc, "id('content')/div/table/tbody/tr[1]/th[contains(@class, 'date')]/text()", saveXML), "%d/%m/%Y", tz = "Europe/Berlin")
# Get the Base and Peak index price for both dates
index_price_list <- xpathSApply(htmlDoc, "id('content')/div/table/tbody/tr/th[contains(@class, 'date')]/text()", saveXML)
base_1 <- sapply(strsplit(gsub("\n", "", gsub(" ", "", index_price_list[3:6], fixed = TRUE)), ":"), function(x) as.numeric(x[2]))[1]
peak_1 <- sapply(strsplit(gsub("\n", "", gsub(" ", "", index_price_list[3:6], fixed = TRUE)), ":"), function(x) as.numeric(x[2]))[2]
base_2 <- sapply(strsplit(gsub("\n", "", gsub(" ", "", index_price_list[3:6], fixed = TRUE)), ":"), function(x) as.numeric(x[2]))[3]
peak_2 <- sapply(strsplit(gsub("\n", "", gsub(" ", "", index_price_list[3:6], fixed = TRUE)), ":"), function(x) as.numeric(x[2]))[4]
# Time: Hour (First 00 - 01) --> But only 00 needed
# ---> contains all time slots: id('content')/div/table/tbody/tr/td[contains(@class, 'title')]
times_list <- xpathSApply(htmlDoc, "id('content')/div/table/tbody/tr/td[contains(@class, 'title')]/text()", saveXML)
# Clean the strings --> remove newline and whitespcaes and in case of DST+1 the 02a and 02b
times_list <- gsub("a|b", "", gsub("\n", "", gsub(" ", "", times_list, fixed = TRUE)))
# get every hour: 00-01 01-02 ...and also reduce to 00 01 ... and append by ":00" for nice format 00:00 01:00 ...
# for other product shift start by 1 (30min) 2 (15min)
# Default value is hour start and freq
if(product == "60") {
# Pattern for 1h: start: 1 freq: every 7th entry is hour data --> e.g. 1. 8. 15. 22. ......
# For french 60 min products the sequence of time is different since France has no 15min products: +3... --> 1 4 7 10 ...
start_freq <- if(country == "FR") seq(1, length(times_list)-1, 3) else seq(1, length(times_list)-1, 7)
# Set the xpath expression to get the right td elements
xpath <- "id('content')/div/table/tbody/tr/td[contains(@class, 'toggle_30min_info_closed')]/../td/text()"
}
else if(product == "30") {
# Pattern for 30min:+3 then +4 ---> 2 5 9 12 16 ...
# For french 30 min products the sequence of time is different since France has no 15min products: +1 +2 ... --> 2 3 5 6 8 9 ..
start_freq <- if(country == "FR") cseq(2, length(times_list)-1, c(1,2)) else cseq(2, length(times_list)-1, c(3,4))
# Set the xpath expression to get the right td elements
# For french 30 min products the xpath is different since France has no 15min products
xpath <- if(country == "FR") "id('content')/div/table/tbody/tr[contains(@id, 'intra_30')]/td/text()" else "id('content')/div/table/tbody/tr/td[contains(@class, 'toggle_15min_info_closed')]/../td/text()"
}
else if(product == "15") {
# Pattern for 15min: +1 then +2 then +1 then +3 ... --> 3 4 6 7 10 11 13 14 17 ...
start_freq <- cseq(3, length(times_list)-1, c(1,2,1,3))
# Set the xpath expression to get the right td elements
xpath <- "id('content')/div/table/tbody/tr[contains(@id, 'intra_15')]/td/text()"
}
else{
print("WRONG PRODUCT CODE - CHOOSE 60, 30 or 15 as character input!")
}
# Get the times
times_list <- sapply(strsplit(times_list[start_freq], "-"), function(x) x[1])
# Only for hourly data: Append by ":00" for nice format 00:00 01:00 ...
times_list <- if(product == "60") paste(times_list, ":00", sep="") else paste(times_list, "", sep="")
# Gets every td elements of table (also times etc)
tds_list <- xpathSApply(htmlDoc, xpath, saveXML)
# initialize data.frames with DateTime for the two dates on the site
df1 <- data.frame(DateTime = as.POSIXct(c(paste(date_list[1], times_list)), format = "%Y-%m-%d %H:%M", tz = "Europe/Berlin"))
df2 <- data.frame(DateTime = as.POSIXct(c(paste(date_list[2], times_list)), format = "%Y-%m-%d %H:%M", tz = "Europe/Berlin"))
end <- if(country == "DE") 19 else 17
shift1 <- if(country == "DE") 0 else 1
shift2 <- if(country == "DE") 0 else 2
# add columns/variables to the initial data.frames
for(i in 2:end) {
# after 10 the next date starts
if(i < (11 - shift1)) {
# 2nd is Low 3rd High .... buy and sell vol have comma seperated value fot thousands
if(i == (9 - shift1) | i == (10 - shift1)) column <- as.numeric(gsub(",", "", tds_list[seq(i, length(tds_list), (19 - shift2))])) else column <- as.numeric(tds_list[seq(i, length(tds_list)-1, (19 - shift2))])
df1 <- cbind(df1, i = column)
}
else {
# buy and sell vol have comma seperated value fot thousands
if(i == (18 - shift2) | i == (19 - shift2)) column <- as.numeric(gsub(",", "", tds_list[seq(i, length(tds_list), (19 - shift2))])) else column <- as.numeric(tds_list[seq(i, length(tds_list)-1, (19 - shift2))])
df2 <- cbind(df2, i = column)
}
}
df1 <- cbind(df1, Index_Base = base_1, Index_Peak = peak_1)
df2 <- cbind(df2, Index_Base = base_2, Index_Peak = peak_2)
df1 = deleteExtraDSTHour(df1, product)
df2 = deleteExtraDSTHour(df2, product)
r <- rbind(df1, df2)
colnames(r) <- if(country == "DE") c("DateTime","Low","High","Last","Weighted_Avg","Idx","ID3", "ID1","Buy_Vol","Sell_Vol","Index_Base","Index_Peak") else c("DateTime","Low","High","Last","Weighted_Avg","Idx","ID3","Buy_Vol","Sell_Vol","Index_Base","Index_Peak")
# Get rid of NA columns when there is DST+1
# There are always two days to be crawled --> so either the day before or after DST has an extra 2 hour obs --> get rid off
# if ((as.Date(dst_date, tz = "Europe/Berlin") - 1) == as.Date(r$DateTime, tz = "Europe/Berlin") |
# (as.Date(dst_date, tz = "Europe/Berlin") + 1) == as.Date(r$DateTime, tz = "Europe/Berlin")){
# r = r[!(hour(r$DateTime) == 2 & is.na(r$Low) & is.na(r$High) & is.na(r$Last)), ]
# }
# Get rid of NA columns when there is DST-1
#r = r[!(hour(r$DateTime) == 1 & is.na(r$Low) & is.na(r$High) & is.na(r$Last)), ]
return(r)
}
# Helper function
# Allows complex patterns for sequence method
cseq <- function(from, to, by){
times <- (to-from) %/% sum(by)
x <- cumsum(c(from, rep(by, times+1)))
x[x<=to]
}
# returns boolean if date (input) is the last sunday in october == DST time saving at 2hour
isDSTDateInOctober <- function(date) {
library(lubridate)
return(as.Date(lastDayOfMonth(1,10,year(date)), tz = "Europe/Berlin") == date)
}
# returns the last date in a given month for a given day (sunday == 1, monday == 2, ...)
lastDayOfMonth <- function(day, month, year){
library(lubridate)
library(zoo)
lastDate = as.Date(zoo::as.yearmon(paste(year,"-",month,"-01",sep = "")), frac = 1, tz = "Europe/Berlin")
# 1 = sunday , 2 = monday ... 7 saturday
lastWeekDay = wday(lastDate)
diff = lastWeekDay - day
if(diff == 0) {
return(lastDate)
}
else {
# e.g target sunday = 1 and lastWeekDay monday = 2 --> diff 2 - 1 = 1 --> shift lastDate back 1 (diff) day(s)
# e.g target sunday = 1 and lastWeekDay tuesday = 3 --> diff 3 - 1 = 2 --> shift lastDate back 2 (diff) day(s)
# e.g target wednesday = 4 and lastWeekDay tuesday = 3 --> diff 3 - 4 = -1 --> if negative --> 7 - diff = 6 --->shift lastDate back 6 (diff) day(s)
# e.g target tuesday = 3 and lastWeekDay monday = 2 --> diff 2 - 3 = -1 --> if negative --> 7 - diff = 6 --->shift lastDate back 6 (diff) day(s)
if(diff < 0) {
# shift lastDate back by 7 - diff
shiftback = 7 + diff
}
else {
# diff positive --> shift lastDate back by diff
shiftback = diff
}
return(lastDate - shiftback)
}
}
# Helper function
# Deletes the extra 2am hour at DST (CEST --> CET).
# This is caused by crawling the table with multiple dates where one date is the DST date (last sunday in october).
# Then all other dates (before or after, depends where the DST date is located in the table) have also the extra 2 am hour (02a and 02b)
# Table has 2 columns (dates) in e.g. @seealso getIntradayContinuousEPEXSPOT
deleteExtraDSTHour <- function(df, time) {
library(dplyr)
library(lubridate)
# Get rid of NA columns when there is DST+1
# There are always two days to be crawled --> so either the day before or after DST has an extra 2 hour obs --> get rid off
dst_date = lastDayOfMonth(1,10, year(as.Date(df$DateTime, tz = "Europe/Berlin")))
attr(dst_date, "tzone") = "Europe/Berlin"
if ((as.Date(dst_date, tz = "Europe/Berlin") - 1) == as.Date(df$DateTime, tz = "Europe/Berlin") |
(as.Date(dst_date, tz = "Europe/Berlin") + 1) == as.Date(df$DateTime, tz = "Europe/Berlin")){
d = df[hour(df$DateTime) == 2,]
rows = 0
if(time == "15") {
# extra 2am hour occured
if(nrow(d) > 4) {
# get rows of the 2am hours and save the last 4 (if 15min or 2 if 30min or 1 if 60min) of them --> delete
rows = which(hour(df$DateTime) == 2)
rows = rows[5:8]
# get rid off the extra 2am hours
df = df[-rows,]
}
}
else if(time == "30"){
# extra 2am hour occured
if(nrow(d) > 4) {
# get rows of the 2am hours and save the last 4 (if 15min or 2 if 30min or 1 if 60min) of them --> delete
rows = which(hour(df$DateTime) == 2)
rows = rows[3:4]
# get rid off the extra 2am hours
df = df[-rows,]
}
}
else if(time == "60") {
# extra 2am hour occured
if(nrow(d) > 1) {
# get rows of the 2am hours and save the last 4 (if 15min or 2 if 30min or 1 if 60min) of them --> delete
rows = which(hour(df$DateTime) == 2)
rows = rows[2]
# get rid off the extra 2am hours
df = df[-rows,]
}
}
}
return(df)
}
#' @title getIntradayAuctionEPEXSPOT
#'
#' @description This function returns the price data of the EPEX SPOT Intraday Auction for a time period.
#' Only for German Market. Only 15min data, bock prices with base and peak
#' https://www.epexspot.com/en/market-data/intradayauction
#'
#' Always get 7 days on one website (request). Date in request link is the latest date.
#'
#'
#' @param startDate - Set the start date for the price data period
#' @param endDate - Set the end date for the price data period
#'
#' @return a data.frame with DateTime as POSIXct object and intraday auction price data of the given product. The columns are DateTime and the 15min Prices with the Volume as well as daily data of: OffPeak, OffPeak1, SunPeak, OffPeak2, BasePrice", BaseVolume, PeakPrice, PeakVolume
#'
#' @examples
#' h <- getIntradayAuctionEPEXSPOT("2017-05-20", "2017-05-26")
#'
#' @export
#'
getIntradayAuctionEPEXSPOT <- function(startDate, endDate) {
library(logging)
library(httr)
library(XML)
library(dplyr)
# Setup the logger and handlers
basicConfig(level="DEBUG") # parameter level = x, with x = debug(10), info(20), warn(30), critical(40) // setLevel()
#nameLogFile <- paste("getReserveNeeds_", Sys.time(), ".txt", sep="")
#addHandler(writeToFile, file=nameLogFile, level='DEBUG')
sdate <- as.Date(startDate, "%Y-%m-%d", tz = "Europe/Berlin")
edate <- as.Date(endDate, "%Y-%m-%d", tz = "Europe/Berlin")
# calls for every day in dates array --> !! maybe every two days, depends if always two dates for one date request are shown in table
# Therefore it is good to start with the loop at the last date, then the day before the last date can be also on the table
dates_array = seq(sdate, edate, by="days")
r = data.frame()
# Init progress bar // CAUTION --> the length of auctionIds can be longer than needed (retrieves all auctionIds but stops at the input end date)
if(getOption("logging")) pb <- txtProgressBar(min = 0, max = ifelse(length(dates_array) > 1, length(dates_array) - 1, length(dates_array)), style = 3)
for(i in seq(length(dates_array), 1, -7)) {
if(getOption("logging")) loginfo(paste("getIntradayAuctionEPEXSPOT - Call for: ", dates_array[i], " - ", dates_array[i-6], " | REMEBER 7 dates on site!"))
url = paste("https://www.epexspot.com/en/market-data/intradayauction/quarter-auction-table/", dates_array[i], "/DE", sep="")
payload = list();
postResponse <- POST(url, body = payload, encode = "form")
parsedHtml <- htmlParse(content(postResponse, "text", encoding = "UTF-8"))
r <- rbind(parseIAEPEXSPOT(parsedHtml, dates_array[i]), r)
# update progress bar
if(getOption("logging")) setTxtProgressBar(pb, length(dates_array) - i + 1)
}
# CLose the progress bar
if(getOption("logging")) close(pb)
r <- r %>% filter(format(DateTime, "%Y-%m-%d") >= sdate)
if(getOption("logging")) loginfo(paste("getIntradayAuctionEPEXSPOT - DONE"))
return(r)
}
#' Helper function for @seealso getIntradayAuctionEPEXSPOT
#'
parseIAEPEXSPOT <- function(htmlDoc, latestDate) {
# Get the hours --> check if length is 24 or in DST+1 (CEST-->CET) 25 // For DST-1 it is still 24 since 7 days are displayed
# --> FIRST read out the dates
# --> THEN read out the starting hours and build the total date string ---> !! For DST+1 there is 02a (- 02b) and 02b (- 03)
# Read out the dates --> id('tab_de')//span[contains(@class, 'date')]/text() # retrieves date range
dateRange = xpathSApply(htmlDoc, paste("id('tab_de')/div[1]/div/span/text()", sep = ""), saveXML)
# Delete the line break and the whitespaces and then split the date range on "-" to get the start and end date
dateRange = gsub(" ", "", gsub("\n", "", dateRange))
sd = strsplit(dateRange, "-")[[1]][1]
ed = strsplit(dateRange, "-")[[1]][2]
# Read Hours -->
hourRange = xpathSApply(htmlDoc, paste("//div[contains(@class, 'quarter_auction_hours')]/div//td/text()", sep = ""), saveXML)
hourRange = gsub(" ", "", gsub("\n", "", hourRange))
hourVector = hourRange[seq(1,length(hourRange), 3)]
# Build DateTime List
# --> 02a and 02b are converted to 02 --> there will be two 02 hours
hourVector = paste(gsub("a|b", "", hourVector), ":00:00", sep="")
# adds the 15mins to the hours
hourVector = addQuartersToHourVector(hourVector)
# Get date range
dateRange = seq.Date(as.Date(sd, "%d/%m/%Y", tz = "Europe/Berlin"), as.Date(ed, "%d/%m/%Y", tz = "Europe/Berlin"), by = "1 day")
# Combine with all combinations the dates with the hours --> In cas of DST+1, dates will have also two 2hours --> those have to be removed after the values "-" are added
timeList = apply(expand.grid(hourVector, dateRange), 1, function(x) paste(x[2], x[1]))
# init the data.frame with the DateTimes of the seven days with 15minute interval
df1 <- data.frame(DateTime = timeList)
# xpath for 15mins (product)
x <- xpathSApply(htmlDoc, "id('quarter_auction_table_wrapp')/table/tbody/tr[contains(@class, 'hour')]/td/text()", saveXML)
# price is every ... entry. It starts with the earliest date till the latest date
# price starts with 4, vol with 12
prices <- c()
vols <- c()
for(day in 0:6) {
prices <- c(prices, x[seq(4 + day, length(x), 18)])
vols <- c(vols, x[seq(12 + day, length(x), 18)])
}
df1 <- cbind(df1, Prices = prices, Volume = vols)
#xpath for block prices (product)
# id('tab_de')/x:table[2]/tbody/tr/td[contains(@class, 'title')]/../td
y <- xpathSApply(htmlDoc, "id('tab_de')/table[2]/tbody/tr/td[contains(@class, 'title')]/../td/text()", saveXML)
offPeak <- c()
offPeak1 <- c()
sunPeak <- c()
offPeak2 <- c()
# set the day offset in 15min sections: 96 * 15min = 24h in normal case and in DST+1 case 100 * 15min = 25h (4*15min = 60min = +1h)
# for 7days --> 672 15min times (96*7) with DST+1 there are 700 (additional 4*7=28)
dayoffset = ifelse(nrow(df1) > 672, 100, 96)
for(day in 0:6) {
offPeak <- c(offPeak, rep(y[2 + day],dayoffset))
offPeak1 <- c(offPeak1, rep(y[18 + day],dayoffset))
sunPeak <- c(sunPeak, rep(y[34 + day],dayoffset))
offPeak2 <- c(offPeak2, rep(y[42 + day],dayoffset))
}
df1 <- cbind(df1, OffPeak = offPeak, OffPeak1 = offPeak1, SunPeak = sunPeak, OffPeak2 = offPeak2)
# xpath for base and peak loads prices (also in block prices xpath) AND VOLUME (product)
# id('tab_de')/x:table[1]/tbody/tr/td
z <- xpathSApply(htmlDoc, "id('tab_de')/table[1]/tbody/tr/td/text()", saveXML)
base_price <- c()
base_vol <- c()
peak_price <- c()
peak_load <- c()
for(day in 0:6) {
base_price <- c(base_price, rep(z[2 + day],dayoffset))
base_vol <- c(base_vol, rep(z[10 + day],dayoffset))
peak_price <- c(peak_price, rep(z[25 + day],dayoffset))
peak_load <- c(peak_load, rep(z[33 + day],dayoffset))
}
df1 <- cbind(df1, BasePrice = base_price, BaseVolume = base_vol, PeakPrice = peak_price, PeakVolume = peak_load)
#Format DataSet
df1$DateTime = as.POSIXct(df1$DateTime, format = "%Y-%m-%d %H:%M:%S", tz = "Europe/Berlin")
df1$Prices = as.numeric(levels(df1$Prices))[df1$Prices]
df1$Volume = as.numeric(gsub(",", "", df1$Volume))
df1$OffPeak = as.numeric(levels(df1$OffPeak))[df1$OffPeak]
df1$OffPeak1 = as.numeric(levels(df1$OffPeak1))[df1$OffPeak1]
df1$SunPeak = as.numeric(levels(df1$SunPeak))[df1$SunPeak]
df1$OffPeak2 = as.numeric(levels(df1$OffPeak2))[df1$OffPeak2]
df1$BasePrice = as.numeric(levels(df1$BasePrice))[df1$BasePrice]
df1$BaseVolume = as.numeric(gsub(",", "", df1$BaseVolume))
df1$PeakPrice = as.numeric(levels(df1$PeakPrice))[df1$PeakPrice]
df1$PeakVolume = as.numeric(gsub(",", "", df1$PeakVolume))
# Get rid of NA columns when there is DST+1
# if (isDSTDateInOctober(as.Date(df1$DateTime, tz = "Europe/Berlin"))){
# df1 = df1[!(hour(df1$DateTime) == 2 & is.na(df1$Low) & is.na(df1$High) & is.na(df1$Last)), ]
# }
# Get rid of NA columns when there is DST-1
#df1 = df1[!(hour(df1$DateTime) == 1 & is.na(df1$Prices) & is.na(df1$Volume)), ]
return(df1)
}
# Helper function in @seealso parseIAEPEXSPOT()
#
# Add Quarters th the hours: "XX:00:00", "XX:15:00", "XX:30:00", "XX:45:00"
# No other option since in case of DST+1 two conecutive 2am hours are following
addQuartersToHourVector <- function(hours) {
res = c()
for(i in 1:length(hours)) {
res = c(res, hours[i],
paste(strsplit(hours[i], ":")[[1]][1], ":15:00", sep=""),
paste(strsplit(hours[i], ":")[[1]][1], ":30:00", sep=""),
paste(strsplit(hours[i], ":")[[1]][1], ":45:00", sep="")
)
}
return(res)
}
#' @title getDayAheadAuctionEPEXSPOT
#'
#' @description This function returns the price data of the EPEX SPOT Day-Ahead-Auction for a time period.
#' For french, german (Phelix) and swiss (swissix) --> MCC = Market Coulped Contracts??
#' https://www.epexspot.com/en/market-data/dayaheadauction
#'
#' Always get 7 days on one website (request). Date in request link is the latest date.
#'
#' @param startDate - Set the start date for the price data period
#' @param endDate - Set the end date for the price data period
#' @param country - Defines the country from which the data should be crawled. Default value is "DE". There is also "FR" (France) and "CH" (Swiss)
#'
#' @return a data.frame with DateTime as POSIXct object and Last prices of hourly data.
#'
#' @examples
#' h <- getDayAheadAuctionEPEXSPOT("2017-05-20", "2017-05-26", "60")
#'
#' @export
#'
getDayAheadAuctionEPEXSPOT <- function(startDate, endDate, country = "DE") {
library(logging)
library(httr)
library(XML)
library(dplyr)
# Setup the logger and handlers
basicConfig(level="DEBUG") # parameter level = x, with x = debug(10), info(20), warn(30), critical(40) // setLevel()
#nameLogFile <- paste("getReserveNeeds_", Sys.time(), ".txt", sep="")
#addHandler(writeToFile, file=nameLogFile, level='DEBUG')
sdate <- as.Date(startDate, "%Y-%m-%d", tz = "Europe/Berlin")
edate <- as.Date(endDate, "%Y-%m-%d", tz = "Europe/Berlin")
# calls for every day in dates array --> !! maybe every two days, depends if always two dates for one date request are shown in table
# Therefore it is good to start with the loop at the last date, then the day before the last date can be also on the table
dates_array = seq(sdate, edate, by="days")
r = data.frame()
# Init progress bar
if(getOption("logging")) pb <- txtProgressBar(min = 0, max = ifelse(length(dates_array) > 1, length(dates_array) - 1, length(dates_array)), style = 3)
for(i in seq(length(dates_array), 1, -7)) {
if(getOption("logging")) loginfo(paste("getDayAheadAuctionEPEXSPOT - Call for: ", dates_array[i], " - ", dates_array[i-6], " | REMEBER 7 dates on site!"))
url = paste("https://www.epexspot.com/en/market-data/dayaheadauction/auction-table/", dates_array[i], "/", country, sep="")
payload = list();
postResponse <- POST(url, body = payload, encode = "form")
parsedHtml <- htmlParse(content(postResponse, "text", encoding = "UTF-8"))
r <- rbind(parseDAAEPEXSPOT(parsedHtml, country, dates_array[i]), r)
# update progress bar
if(getOption("logging")) setTxtProgressBar(pb, length(dates_array) - i + 1)
}
# CLose the progress bar
if(getOption("logging")) close(pb)
# subset the data to the appropriate input date range
r <- r %>% filter(format(DateTime, "%Y-%m-%d") >= sdate)
if(getOption("logging")) loginfo(paste("getDayAheadAuctionEPEXSPOT - DONE"))
return(r)
}
#' Helper function for @seealso getDayAheadAuctionEPEXSPOT
#'
parseDAAEPEXSPOT <- function(htmlDoc, country, latestDate) {
# Get the hours --> check if length is 24 or in DST+1 (CEST-->CET) 25 // For DST-1 it is still 24 since 7 days are displayed
# --> FIRST read out the dates
# --> THEN read out the starting hours and build the total date string ---> !! For DST+1 there is 02a (- 02b) and 02b (- 03)
# Read out the dates --> id('tab_de')//span[contains(@class, 'date')]/text() # retrieves date range
dateRange = xpathSApply(htmlDoc, paste("id('tab_", tolower(country), "')//span[contains(@class, 'date')]/text()", sep = ""), saveXML)
# Delete the line break and the whitespaces and then split the date range on "-" to get the start and end date
dateRange = gsub(" ", "", gsub("\n", "", dateRange))
sd = strsplit(dateRange, "-")[[1]][1]
ed = strsplit(dateRange, "-")[[1]][2]
# Read Hours --> id('tab_de')/table[3]/tbody/tr/td[contains(@class, 'title')]
hourRange = xpathSApply(htmlDoc, paste("id('tab_", tolower(country), "')/table[3]/tbody/tr/td[contains(@class, 'title')]/text()", sep = ""), saveXML)
hourRange = gsub(" ", "", gsub("\n", "", hourRange))
hourVector = unlist(lapply(strsplit(hourRange, "-"), function(x) x[1]))
# Build DateTime List
# --> 02a and 02b are converted to 02 --> there will be two 02 hours
hourVector = paste(gsub("a|b", "", hourVector), ":00:00", sep="")
# Get date range
dateRange = seq.Date(as.Date(sd, "%d/%m/%Y", tz = "Europe/Berlin"), as.Date(ed, "%d/%m/%Y", tz = "Europe/Berlin"), by = "1 day")
# Combine with all combinations the dates with the hours --> In cas of DST+1, dates will have also two 2hours --> those have to be removed after the values "-" are added
timeList = apply(expand.grid(hourVector, dateRange), 1, function(x) paste(x[2], x[1]))
# init the data.frame with the read out and build DateTimes
df1 <- data.frame(DateTime = timeList)
# xpath for 60mins (product)
x <- xpathSApply(htmlDoc, paste("id('tab_", tolower(country), "')/table[3]/tbody/tr/td/text()", sep = ""), saveXML)
prices <- c()
vols <- c()
for(day in 0:6) {
prices <- c(prices, x[seq(3 + day, length(x), 18)])
vols <- c(vols, x[seq(12 + day, length(x), 18)])
}
df1 <- cbind(df1, Prices = prices, Volume = vols)
# xpath for block prices (product)
y <- xpathSApply(htmlDoc, paste("id('tab_", tolower(country), "')/table[2]/tbody/tr/td[contains(@class, 'title')]/../td/text()", sep = ""), saveXML)
middleNight <- c()
earlyMorning <- c()
lateMorning <- c()
earlyAfternoon <- c()
rushHour <- c()
offPeak2 <- c()
night <- c()
offPeak1 <- c()
business <- c()
offPeak <- c()
morning <- c()
highNoon <- c()
afternoon <- c()
evening <- c()
sunPeak <- c()
# set the day offset in hours: 24h in normal case (24*7 = 168 entries) and in DST+1 case 25h (25*7 = 175 entries)
dayoffset = ifelse(nrow(df1) > 168, 25, 24)
for(day in 0:6) {
middleNight <- c(middleNight, rep(y[2 + day],dayoffset))
earlyMorning <- c(earlyMorning, rep(y[10 + day],dayoffset))
lateMorning <- c(lateMorning, rep(y[18 + day],dayoffset))
earlyAfternoon <- c(earlyAfternoon, rep(y[26 + day],dayoffset))
rushHour <- c(rushHour, rep(y[34 + day],dayoffset))
offPeak2 <- c(offPeak2, rep(y[42 + day],dayoffset))
night <- c(night, rep(y[50 + day],dayoffset))
offPeak1 <- c(offPeak1, rep(y[58 + day],dayoffset))
business <- c(business, rep(y[66 + day],dayoffset))
offPeak <- c(offPeak, rep(y[74 + day],dayoffset))
morning <- c(morning, rep(y[82 + day],dayoffset))
highNoon <- c(highNoon, rep(y[90 + day],dayoffset))
afternoon <- c(afternoon, rep(y[98 + day],dayoffset))
evening <- c(evening, rep(y[106 + day],dayoffset))
sunPeak <- c(sunPeak, rep(y[114 + day],dayoffset))
}
df1 <- cbind(df1, MiddleNight = middleNight,
EarlyMorning = earlyMorning,
LateMorning = lateMorning,
EarlyAfternoon = earlyAfternoon,
RushHour = rushHour,
OffPeak2 = offPeak2,
Night = night,
OffPeak1 = offPeak1,
Business = business,
OffPeak = offPeak,
Morning = morning,
HighNoon = highNoon,
Afternoon = afternoon,
Evening = evening,
SunPeak = sunPeak)
# xpath for base and peak loads prices (also in block prices xpath) AND VOLUME (product)
#paste("id('tab_", tolower(country), "')/table[1]/tbody/tr/td", sep = "")
z <- xpathSApply(htmlDoc, paste("id('tab_", tolower(country), "')/table[1]/tbody/tr/td/text()", sep = ""), saveXML)
base_price <- c()
base_vol <- c()
peak_price <- c()
peak_load <- c()
for(day in 0:6) {
base_price <- c(base_price, rep(z[2 + day],dayoffset))
base_vol <- c(base_vol, rep(z[10 + day],dayoffset))
peak_price <- c(peak_price, rep(z[25 + day],dayoffset))
peak_load <- c(peak_load, rep(z[33 + day],dayoffset))
}
df1 <- cbind(df1, BasePrice = base_price, BaseVolume = base_vol, PeakPrice = peak_price, PeakVolume = peak_load)
# Format DataSet
df1$DateTime = as.POSIXct(df1$DateTime, format = "%Y-%m-%d %H:%M:%S", tz = "Europe/Berlin")
df1$Prices = as.numeric(levels(df1$Prices))[df1$Prices]
df1$Volume = as.numeric(gsub(",", "", df1$Volume))
df1$OffPeak = as.numeric(levels(df1$OffPeak))[df1$OffPeak]
df1$OffPeak1 = as.numeric(levels(df1$OffPeak1))[df1$OffPeak1]
df1$SunPeak = as.numeric(levels(df1$SunPeak))[df1$SunPeak]
df1$OffPeak2 = as.numeric(levels(df1$OffPeak2))[df1$OffPeak2]
df1$MiddleNight = as.numeric(levels(df1$MiddleNight))[df1$MiddleNight]
df1$EarlyMorning = as.numeric(levels(df1$EarlyMorning))[df1$EarlyMorning]
df1$LateMorning = as.numeric(levels(df1$LateMorning))[df1$LateMorning]
df1$EarlyAfternoon = as.numeric(levels(df1$EarlyAfternoon))[df1$EarlyAfternoon]
df1$Night = as.numeric(levels(df1$Night))[df1$Night]
df1$Business = as.numeric(levels(df1$Business))[df1$Business]
df1$Morning = as.numeric(levels(df1$Morning))[df1$Morning]
df1$HighNoon = as.numeric(levels(df1$HighNoon))[df1$HighNoon]
df1$Afternoon = as.numeric(levels(df1$Afternoon))[df1$Afternoon]
df1$Evening = as.numeric(levels(df1$Evening))[df1$Evening]
df1$BasePrice = as.numeric(levels(df1$BasePrice))[df1$BasePrice]
df1$BaseVolume = as.numeric(gsub(",", "", df1$BaseVolume))
df1$PeakPrice = as.numeric(levels(df1$PeakPrice))[df1$PeakPrice]
df1$PeakVolume = as.numeric(gsub(",", "", df1$PeakVolume))
# Get rid of NA columns when there is DST+1
# if (isDSTDateInOctober(as.Date(r$DateTime, tz = "Europe/Berlin"))){
# r = r[!(hour(r$DateTime) == 2 & is.na(r$Low) & is.na(r$High) & is.na(r$Last)), ]
# }
# Get rid of NA columns when there is DST-1
#df1 = df1[!(hour(df1$DateTime) == 1 & is.na(df1$Prices) & is.na(df1$Volume)), ]
# And delete the "empty" 2 hour in DST-1 --> the whole row is filled with NA also the index number (rowname) is NA (but as character "NA")
df1 = df1[rownames(df1)[rownames(df1) != "NA"] , ]
return(df1)
}
#' @title getPHELIXDEFuturesEEX
#'
#' @description This function returns the price data of the EEX Phelix DE Futures as seen at https://www.eex.com/en/market-data/power/futures/phelix-de-futures
#' Prices are in EUR/MWh and Volume in MWh. The returned data.frame mimics the table at the EEX website (+ BestBid/BestAsk volume).
#' The name column contains no Date or DateTime object (simply a factor).
#' The data is retrieved by sequentially scraping the data of each day. Therefore the data of the next day (regarding input date) is retrieved.
#' For the Weekend saturday and sunday data is retrieved (at friday dates).
#' Since the next data observation is retrieved (because tables are showing a lot of null values) the function is optimized for the "Day" product.
#' For other products like Week or Year, you will get only the next week or year data of the input date.
#' Note that the website only provides a limited history of the price data! This also depends on the product (Day, Week etc.)
#'
#' @param startDate - Set the start date for the price data period ("YYYY-MM-DD", character)
#' @param endDate - Set the end date for the price data period ("YYYY-MM-DD", character)
#' @param product - Set the product type == Day, Weekend, Week, Month, Quarter or Year --> IT IS RECOOMEND TO ONLY USE "DAY"
#'
#' @return a data.frame with the columns of the table seen on the EEX website. The name column identifies the product. It is not of type Date or DateTime!
#'
#' @examples
#' df = getPHELIXDEFuturesEEX("2017-08-02", "2017-08-04", "Day")
#'
#' @export
#'
getPHELIXDEFuturesEEX <- function(startDate, endDate, product) {
library(logging)
library(rjson)
library(purrr)
library(lubridate)
# Setup the logger and handlers
basicConfig(level="DEBUG") # parameter level = x, with x = debug(10), info(20), warn(30), critical(40) // setLevel()
startDate = as.Date(startDate)
endDate = as.Date(endDate)
# get the startdate befor the input date --> data is always retrieved starting at the next day
dateSeq = seq.Date(startDate - 1, endDate,1)
res = data.frame()
if(getOption("logging")) pb <- txtProgressBar(min = 0, max = length(dateSeq), style = 3)
for(i in 1:length(dateSeq)) {
if(getOption("logging")) loginfo(paste("getPHELIXDEFutures - Call PHELIX DE FUTURE price data: ", product ," - ", dateSeq[i]))
df = getPHELIXDEFuturesForADate(dateSeq[i], product)
res = rbind(res, df)
# update progress bar
if(getOption("logging")) setTxtProgressBar(pb, length(dateSeq) + i)
}
if(getOption("logging")) close(pb)
if(getOption("logging")) loginfo(paste("getPHELIXDEFutures - DONE"))
return(res)
}
#' Helper function for @seealso getPHELIXDEFutures
#'
# NOTE: function is optimized for product Day!
# --> it gets the values of the next day (regarding input date)
# --> And for a (input) friday, it gets the values of the two following days (saturday and sunday)
#
# Tables at https://www.eex.com/en/market-data/power/futures/phelix-de-futures are strangely organized
#
# date has to be of class Date, product a chracter
#
getPHELIXDEFuturesForADate <- function(date, product) {
year = strsplit(as.character(date), "-")[[1]][1]
month = strsplit(as.character(date), "-")[[1]][2]
day = strsplit(as.character(date), "-")[[1]][3]
# url for Phelix DE Futures: P-Power-F-DE-Base-XX --> XX Day, Weekend, Week, Month, Quarter, Year
url = paste("https://www.eex.com/data//view/data/detail/ws-power-futures-german_v4/", year, "/", month, ".", day,".json", sep = "")
h = rjson::fromJSON(file = url)
# Base - Day, Weekend, Week, Month, Quarter, Year // Peak - Day, Weekend, Week, Month, Quarter, Year
# 12 list elements
base = h[[1]]
#peak = h[[1]][[8]]
#base[[1]][base[[1]]$identifier == "P-Power-F-DE-Base-Day"]
res = data.frame()
for(block in c("Base", "Peak")) {
# get the right product --> Base or Peak and Day or Week or Weekend or.....
id = paste("P-Power-F-DE-", block, "-", product, sep = "")
# search through all products and get only the requested ones
for(i in 1:length(base)){
if(base[[i]]$identifier == id) {
r = base[[i]]
}
}
# check if there is data for that product -> if not then return empty data.frame
if(length(r$rows) != 0) {
# if friday then add 2 days (saturday and sunday) to the data.frame -> end == 3 (2,3) else get next day info (2)
end = ifelse(lubridate::wday(date) == 6, 3, 2) # 6 == friday
df = data.frame()
for(i in 2:end) {
# Get the 1/2/3... entry of the table --> for request date always get the data of the next day
k = r$rows[[i]]
# columns:
#
OpenInterest = ifelse(is.null(k$data$openInterestNoOfContracts), NA, k$data$openInterestNoOfContracts)# Open Interest Prev. Day
LastPrice = ifelse(is.null(k$data$lastTradePrice), NA, k$data$lastTradePrice) # Last Price
HighPrice = ifelse(is.null(k$data$highPrice), NA, k$data$highPrice)# High Price
LastVolume = ifelse(is.null(k$data$openInterestVolume), NA, k$data$openInterestVolume)# Last Vol.
AbsChange = ifelse(is.null(k$data$lastTradeDifference), NA, k$data$lastTradeDifference)# Abs. Change
VolumeExchange = ifelse(is.null(k$data$volumeExchange), NA, k$data$volumeExchange)# Vol. Exchange
LastVolume = ifelse(is.null(k$data$lastTradeVolume), NA, k$data$lastTradeVolume)# last traded volume
VolumeTradeRegister = ifelse(is.null(k$data$volumeOtc), NA, k$data$volumeOtc)# Vol. Trade Registration
NoContracts = ifelse(is.null(k$data$noOfTradedContractsTotal), NA, k$data$noOfTradedContractsTotal)# No. of Contracts
BestAsk = ifelse(is.null(k$data$bestAskPrice), NA, k$data$bestAskPrice)# Best Ask
BestAskVolume = ifelse(is.null(k$data$bestAskVolume), NA, k$data$bestAskVolume)# Best Ask Volume
BestBid = ifelse(is.null(k$data$bestBidPrice), NA, k$data$bestBidPrice)# Best Bid
BestBidVolume = ifelse(is.null(k$data$bestBidVolume), NA, k$data$bestBidVolume)# Best Bid Volume
LastTime = ifelse(is.null(k$data$lastTradeTime), NA, substr(k$data$lastTradeTime, 12, 16)) # Last Time
SettlementPrice = ifelse(is.null(k$data$settlementPrice), NA, k$data$settlementPrice)# Settl. Price
Name = strsplit(k$contractIdentifier,"-")[[1]][length(strsplit(k$contractIdentifier,"-")[[1]])] # contractIdentifier
# Product types /names
#
# C-Power-F-DE-Base-Day-2017.08.02
# C-Power-F-DE-Base-Week-2017W31
# C-Power-F-DE-Peak-Weekend-2017WE47
# C-Power-F-DE-Peak-Month-2017.10
# C-Power-F-DE-Peak-Quarter-2017Q4
# C-Power-F-DE-Peak-Year-2018
# data.frame
df2 = data.frame(Name = Name,
Block = block,
Product = product,
BestBid = BestBid,
BestBidVolume = BestBidVolume,
BestAsk = BestAsk,
BestAskVolume = BestAskVolume,
NoContracts = NoContracts,
LastPrice = LastPrice,
AbsChange = AbsChange,
LastTime = LastTime,
LastVolume = LastVolume,
SettlementPrice = SettlementPrice,
VolumeExchange = VolumeExchange,
VolumeTradeRegister = VolumeTradeRegister,
OpenInterest = OpenInterest
)
df = rbind(df, df2)
}
res = rbind(res, df)
}
}
return(res)
}
|
0cac6e3c4e2fb63900f2bd17c16b7ae388e5dcd5
|
95f46683600a82b5d2ca017c4317e99e7ceca840
|
/merge.R
|
9f6b846cffd230837e7f9975247d9a23a5af99de
|
[] |
no_license
|
salleuska/IR_Rstuff
|
835f3d186dcad022aef99fe6791e92d010630d75
|
0840c8d4bc7b74ddcdad782742e8339859b4d470
|
refs/heads/master
| 2021-01-23T20:44:57.628241
| 2014-05-29T16:02:52
| 2014-05-29T16:02:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,345
|
r
|
merge.R
|
#heidel_details <- read.delim("~/Scrivania/TipsterData/esiti tutti/heidel_details.txt", header=F)
#heidel_precision <- read.delim("~/Scrivania/TipsterData/esiti tutti/heidel_datePrecision.txt", header=F)
# setwd("/home/alan/Documents/Parser_R_txt/")
source("/home/alan/Documents/GIT/Rstuff/configurazione.R")
config <- set.config(user = "alan")
setwd(config[3])
heidel_details <- read.delim("heidel_details.txt", header=F)
heidel_precision <- read.delim("heidel_datePrecision.txt", header=F)
colnames(heidel_details) <- c("id", "type", "value", "term", "creation")
colnames(heidel_precision) <- c("id", "value", "gran")
#---- check dimensioni ----#
length(levels(heidel_precision$value))
d <- heidel_details[which(heidel_details$type == "DATE"), ]
length(unique(d$value))
#--------------------------#
notd <- heidel_details[which(heidel_details$type != "DATE"), ]
notd$gran <- NA
tmp <- cbind(d, heidel_precision$gran)
colnames(tmp) <- c("id", "type", "value", "term", "creation", "gran")
str(tmp)
str(notd)
heidel <- rbind(tmp, notd)
str(heidel)
length(which(is.na(heidel$gran))) #ok
heidel$gran[which(is.na(heidel$gran))] <- "undefined"
#check
heidel[which((heidel$gran =="undefined")&(heidel$type != "DATE"))[1:100], ]
# Riordinamento
heidel <- heidel[order(heidel$id), ]
write.table(heidel, file = "heidel_details&precision.txt")
|
c7824d624b2cebec27bc9036a03c7caf51b7b8ec
|
85c772f3db8c3fa6928341bfd97b5a57c9bcc961
|
/misc/projmgr.R
|
12edb38d4ea0c9663fd41ccad14b42dcea908696
|
[] |
no_license
|
DUanalytics/BARtaxila
|
e0efe470fd7a0eeb9a5dedc0f2254cd83efb6075
|
0c76fad59a2e6f565277f52d080b48561a09eed6
|
refs/heads/master
| 2020-07-26T17:04:30.853751
| 2020-02-16T12:02:17
| 2020-02-16T12:02:17
| 208,712,611
| 77
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 308
|
r
|
projmgr.R
|
#Git and R
#https://cran.r-project.org/web/packages/projmgr/projmgr.pdf
install.packages('projmgr')
library(projmgr)
browse_docs('get', 'milestone')
experigit <- create_repo_ref(credentials.False)
check_rate_limit(experigit)
my_repo <- create_repo_ref("dupadhyaya", "rAnalytics")
browse_issues(my_repo)
|
11c03e3f8263dd445ea12beedc3b5844bdc35a69
|
2fbe25472126dab0a3999b4e044e6838a0f9d7c8
|
/models/spy_technique.R
|
5097953c033b6172b095c1a2c5d47c4e3a499206
|
[] |
no_license
|
Ginny15/pulearning
|
710dbc755fff534fe31f7dc0cc4c07b4bef9f7e3
|
d104b9b7a9ab9f61d2ce00f69853ebb1b1e26bc7
|
refs/heads/master
| 2020-04-19T09:45:17.579686
| 2016-09-17T15:06:36
| 2016-09-17T15:06:36
| 67,410,467
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,413
|
r
|
spy_technique.R
|
## preparations ####
LID.1548 = read.table("/Users/xhu/Documents/1548-links.txt")[,2]
library(solr);library(pracma);library(permute);library(data.table);library(caret);library(dummies);library(BBmisc)
url = 'http://localhost:8987/solr/LSH/afts'
score.search = solr_all(q= 'FINGERPRINT:1548', base=url, fl='*,DBID,score', verbose=FALSE, rows=99999)
score.df = data.frame(DBID = score.search$DBID, score = score.search$score)
data = merge(x = data.1[,c(1:10,24)], y = score.df, by = "DBID", all.x = TRUE)
data$score[which(is.na(data$score))] = 0
data$Size <- NULL;
tf.idf.sum = rowSums(tf.idf)
data = cbind(data,tf.idf.sum)
trainIDX = sample(nrow(data),nrow(data)*0.75,replace = FALSE)
train = data[trainIDX,]
test = data[-trainIDX,]
P = train[train$label=='Y',]
U = train[train$label=='N',]
### spy technique ######
#########################
control = trainControl(method="cv", number=3, classProbs = TRUE)
RNlist = NULL
for(itr in 1:10){
RN = NULL
idx.S = sample(nrow(P),0.15*nrow(P),replace = FALSE)
S = P[idx.S,]
Us = rbind(U,S);Us = Us[shuffle(nrow(Us)),];Ps = P[-idx.S,]
Ps$label = 'Y';Us$label = 'N'
PsUs = rbind(Ps,Us)
PsUs = PsUs[shuffle(nrow(PsUs)),];PsUs$label =as.factor(PsUs$label)
fit.xgb <- train(label~., data = PsUs[,-1], method='xgbTree',trControl=control)
pred.Us = predict(fit.xgb,Us[,-1],type = 'prob')$Y
pred.S = predict(fit.xgb,S[,-1],type='prob')$Y
t = min(pred.S);j=1
for (i in 1: length(pred.Us)){
if (pred.Us[i]<t){
RN[j] = Us[i,1];j=j+1
}
}
RNlist[[itr]] = RN
}
insecRN = RNlist[[1]]
for (i in 2:10){
insecRN = intersect(insecRN,RNlist[[i]])
}
# insecRN +P
Us[,1] =as.numeric(Us[,1])
RN = Us[which(Us[,1] %in% insecRN),]
data.step.2 = rbind(RN,P); data.step.2=data.step.2[shuffle(nrow(data.step.2)),]
fit.xgb <- train(label~., data = data.step.2[,-1], method='xgbTree',trControl=control)
confusionMatrix(predict(fit.xgb,test[,-1]),test$label)
# Reference
# Prediction N Y
# N 17758 0
# Y 4705 893
confusionMatrix(predict(fit.xgb,train[,-1]),train$label)
# Reference
# Prediction N Y
# N 53079 4
# Y 14308 2674
prob.r = 892/893
prob.fx.1 = length(pred.test[which(pred.test=='Y')])/length(pred.test)
metric = prob.r^2/prob.fx.1 #7.04
kmeans()
write.csv(cbind(test,data.frame(test,pred=predict(fit.xgb,test[,-1]),prob = predict(fit.xgb,test[,-1],type = 'prob'))),'results_all_catego.0817.testset.csv')
|
5ae124be449ba2498b19721dee9ccce990b731a0
|
7a0cc1a29da34a761327a45f506c4b097cd33bd8
|
/man/AdjustCounts.Rd
|
f9507c05d8e50f12c592e050c083e7b5409a809a
|
[
"CC0-1.0"
] |
permissive
|
USFWS/AKaerial
|
3c4d2ce91e5fac465a38077406716dd94c587fc8
|
407ccc5bf415d8d5ed0d533a5148693926306d27
|
refs/heads/master
| 2023-07-19T22:04:12.470935
| 2023-07-14T19:50:31
| 2023-07-14T19:50:31
| 254,190,750
| 0
| 1
|
CC0-1.0
| 2023-07-14T19:50:33
| 2020-04-08T20:14:41
|
R
|
UTF-8
|
R
| false
| true
| 1,366
|
rd
|
AdjustCounts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{AdjustCounts}
\alias{AdjustCounts}
\title{Create index columns based on Num and Obs_Type}
\usage{
AdjustCounts(full.data)
}
\arguments{
\item{full.data}{A clean (greenlight) file containing observation history}
}
\value{
data frame of original data and 5 new index columns
}
\description{
AdjustCounts will create new columns for 5 indices (itotal, ibb, total, sing1pair2, and flock) and compute the values based on Num and Obs_Type
}
\details{
AdjustCounts will take a new observation file and augment the Num category to create what a particular observation means to an index.
Currently there are 5 indices that are created: \enumerate{
\item itotal - Indicated total. Singles doubled, pairs doubled, opens added, flkdrake 1-4 doubled, flkdrake 5+ added.
\item ibb - Indicated breeding birds. Singles doubled, pairs doubled, opens removed, flkdrake 1-4 doubled, flkdrake 5+ removed.
\item total - Total birds. Singles added, pairs doubled, opens added, flkdrake added.
\item sing1pair2 - Singles and pairs. Singles added, pairs doubled, opens removed, flkdrake removed.
\item flock - Flocks. Singles removed, pairs removed, opens added, flkdrake added.
}
}
\references{
\url{https://github.com/USFWS/AKaerial}
}
\author{
Charles Frost, \email{charles_frost@fws.gov}
}
|
54cdd21b2f5dc49789c52b79d980d21f68afab62
|
93a27498daad1bd00d16aefa1cfb6c37b7e0911a
|
/project2_123a.R
|
74aedf2b6c379eb34fa21b1e437427564eb3069d
|
[] |
no_license
|
catherinewang1/Stat154-Project2
|
101cca234de8a978408bf3571c62d399fed36f0a
|
471a7e79ccd9cf08b5042e72db4a2c1132ee539e
|
refs/heads/master
| 2020-05-17T20:14:42.953499
| 2019-05-04T05:56:25
| 2019-05-04T05:56:25
| 183,936,613
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 42,509
|
r
|
project2_123a.R
|
#Stat 154 Project 2
library(ggplot2)
library(dplyr)
library(gridExtra)
library(GGally)
library(caret)
library(hash)
library(MASS)
library(pROC)
library(e1071)
library(class)
library(car)
library(tidyr)
library(ggpubr)
library(WVPlots)
#read in the raw data
image1 = read.table("image_data/image1.txt", sep = "", header = FALSE)
image2 = read.table("image_data/image2.txt", sep = "", header = FALSE)
image3 = read.table("image_data/image3.txt", sep = "", header = FALSE)
#label the column names
column_names = c("y", "x", "expert_label", "NDAI", "SD", "CORR",
"RadianceAngleDF", "RadianceAngleCF", "RadianceAngleBF", "RadianceAngleAF", "RadianceAngleAN")
colnames(image1) = column_names
colnames(image2) = column_names
colnames(image3) = column_names
image1 = cbind(imageNum=1, image1)
image2 = cbind(imageNum=2, image2)
image3 = cbind(imageNum=3, image3)
image_all = rbind(image1, image2, image3)
write.csv(image1, "data/image1.csv", row.names = FALSE)
write.csv(image2, "data/image2.csv", row.names = FALSE)
write.csv(image3, "data/image3.csv", row.names = FALSE)
write.csv(image_all, "data/image_all.csv", row.names = FALSE)
#----------------------------------------------------------------------------
#-------------------------- Question 1b -------------------------------------
#----------------------------------------------------------------------------
#--------- Summarise the data (ie % points for the different classes)
summary1 = image1 %>% group_by(expert_label) %>% summarise(Image1_count = n(), Image1_prop = n() / nrow(image1))
summary2 = image2 %>% group_by(expert_label) %>% summarise(Image2_count = n(), Image2_prop = n() / nrow(image2))
summary3 = image3 %>% group_by(expert_label) %>% summarise(Image3_count = n(), Image3_prop = n() / nrow(image3))
summary_total = image_all %>% group_by(expert_label) %>% summarise(Total_count = n(), Total_prop = n() / nrow(image_all))
summary_table = cbind(summary1[,1], round(summary1[,c(2,3), 2], 2), round(summary2[,c(2,3)], 2), round(summary3[,c(2,3)], 2), round(summary_total[c(2,3)], 2))
rownames(summary_table) = NULL #c("Not Cloud", "Unlabeled", "Cloud")
rownames(summary_table) = c("Not Cloud", "Unlabeled", "Cloud")
pdf("imgs/Fig1b1.pdf", width = 28, height = 18 )
grid.table(summary_table)
dev.off()
#--------- Plot well-labeled beautiful maps
colors_cloud = c("skyblue4","black","ghostwhite")
#plot image 1
q1b_image1 <- ggplot(image1, aes(x = x, y = y)) + geom_raster(aes(fill=expert_label)) +
scale_fill_gradientn(colours=colors_cloud) +
labs(title="Image 1: Cloud Label in Coordinate Space", x = "x-Coord", y = "y-Coord") +
theme_classic()
#plot image 2
q1b_image2 <- ggplot(image2, aes(x = x, y = y)) + geom_raster(aes(fill=expert_label)) +
scale_fill_gradientn(colours=colors_cloud) +
labs(title="Image 2: Cloud Label in Coordinate Space", x = "x-Coord", y = "y-Coord") +
theme_classic()
#plot image 3
q1b_image3 <- ggplot(image3, aes(x = x, y = y)) + geom_raster(aes(fill=expert_label)) +
scale_fill_gradientn(colours=colors_cloud) +
labs(title="Image 3: Cloud Label in Coordinate Space", x = "x-Coord", y = "y-Coord") +
theme_classic()
#get appropriate legend
q1b_legendplot <- ggplot(image1, aes(x=x, y=y)) + geom_point(aes(colour=factor(expert_label), )) +
scale_colour_manual(values = colors_cloud) + theme(legend.direction = "horizontal") +
#scale_colour_discrete(name = "Cloud Label", labels = c("Not Cloud", "Unlabeled", "Cloud")) +
labs(colour = "Cloud Label") +
#theme(legend.key = element_rect( size = 2, linetype='dashed'))
theme(legend.title=element_text(size=40), legend.text=element_text(size=40))
set.seed(154)
dat_temp = image1[sample(1:nrow(image1), 1000), ]
q1b_legendplot <- ggplot(dat_temp, aes(x=x, y=y)) + geom_tile(aes(fill=factor(expert_label)), color=colors_cloud[dat_temp$expert_label + 2]) +
scale_fill_manual(values=colors_cloud) + theme(legend.direction = "horizontal") + labs(fill = "Cloud Label") +
theme(legend.background = element_rect(fill = 'white', size = 3)) +
theme(legend.title=element_text(size=40), legend.text=element_text(size=40))
#extract legend: https://stackoverflow.com/questions/13649473/add-a-common-legend-for-combined-ggplots
#https://github.com/hadley/ggplot2/wiki/Share-a-legend-between-two-ggplot2-graphs
g_legend<-function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)}
mylegend<-g_legend(q1b_legendplot)
png(filename="imgs/Fig1b2.png", height=1080, width = 1920)
q1b_finalplot <- grid.arrange(arrangeGrob(q1b_image1 + theme(legend.position="none", plot.title = element_text(size = 30, face = "bold")),
q1b_image2 + theme(legend.position="none", plot.title = element_text(size = 30, face = "bold")),
q1b_image3 + theme(legend.position="none", plot.title = element_text(size = 30, face = "bold")),
nrow=1),
mylegend, nrow=2,heights=c(10, 1))
dev.off()
pdf(filename="imgs/Fig1b2.pdf")
grid.arrange(arrangeGrob(q1b_image1 + theme(legend.position="none") + theme(plot.title = element_text(size = 40, face = "bold")),
q1b_image2 + theme(legend.position="none"),
q1b_image3 + theme(legend.position="none"),
nrow=1),
mylegend, nrow=2,heights=c(10, 1))
dev.off()
#----------------------------------------------------------------------------
#-------------------------- Question 1c -------------------------------------
#----------------------------------------------------------------------------
set.seed(154)
png(filename="imgs/Fig1c1.png", width = 1920, height = 1080, units = "px", pointsize = 12)
column_names_short = c("imageNum", "y", "x", "expert_label", "NDAI", "SD", "CORR",
"RadAng DF", "RadAng CF", "RadAng BF", "RadAng AF", "RadAng AN")
sample = image_all[sample(1:nrow(image_all), 100),]
colnames(sample) = column_names_short
ggpairs(sample, aes(colour = factor(expert_label), alpha = 0.4), title="Pairplot for All Images")+
theme(plot.title = element_text(size = 40, face = "bold"))
dev.off()
#plot for just each image individually
#ggpairs(image1[sample(1:nrow(image2), 100),], aes(colour = factor(expert_label), alpha = 0.4), title="Pairplot for Image 1")
#ggpairs(image2[sample(1:nrow(image2), 100),], aes(colour = factor(expert_label), alpha = 0.4), title="Pairplot for Image 2")
#ggpairs(image3[sample(1:nrow(image3), 100),], aes(colour = factor(expert_label), alpha = 0.4), title="Pairplot for Image 3")
print(by(image_all, image_all$expert_label, summary))
####### QUESTION 2 ########
#----------------------------------------------------------------------------
#-------------------------- Question 2a -------------------------------------
#----------------------------------------------------------------------------
set.seed(154)
#image 1:
x_square = floor(image1$x / 40)*40
y_square = floor(image1$y / 40)*40
squares = expand.grid(x = unique(x_square), y = unique(y_square))
indices_randomized = sample(1:nrow(squares))
n = nrow(squares)
train_indices = indices_randomized[1:(floor(.7*n))]
validation_indices = indices_randomized[((floor(.7*n))+1):(floor(.85*n))]
test_indices = indices_randomized[((floor(.85*n))+1):n]
train_chosen = squares[train_indices,]
validation_chosen = squares[validation_indices, ]
test_chosen = squares[test_indices, ]
train_1 = NULL
for(i in 1:nrow(train_chosen)) {
x_chosen = train_chosen[i, "x"]
y_chosen = train_chosen[i, "y"]
train_1 = rbind(train_1, image1[(x_square == x_chosen & y_square == y_chosen), ])
}
validation_1 = NULL
for(i in 1:nrow(validation_chosen)) {
x_chosen = validation_chosen[i, "x"]
y_chosen = validation_chosen[i, "y"]
validation_1 = rbind(validation_1, image1[(x_square == x_chosen & y_square == y_chosen), ])
}
test_1 = NULL
for(i in 1:nrow(test_chosen)) {
x_chosen = test_chosen[i, "x"]
y_chosen = test_chosen[i, "y"]
test_1 = rbind(test_1, image1[(x_square == x_chosen & y_square == y_chosen), ])
}
#train_1 = cbind(imageNum = 1, train_1)
write.csv(train_1, "data/train1.csv", row.names = FALSE)
write.csv(validation_1, "data/validation1.csv", row.names = FALSE)
write.csv(test_1, "data/test1.csv", row.names = FALSE)
#image 2:
x_square = floor(image2$x / 40)*40
y_square = floor(image2$y / 40)*40
squares = expand.grid(x = unique(x_square), y = unique(y_square))
indices_randomized = sample(1:nrow(squares))
n = nrow(squares)
train_indices = indices_randomized[1:(floor(.7*n))]
validation_indices = indices_randomized[((floor(.7*n))+1):(floor(.85*n))]
test_indices = indices_randomized[((floor(.85*n))+1):n]
train_chosen = squares[train_indices,]
validation_chosen = squares[validation_indices, ]
test_chosen = squares[test_indices, ]
train_2 = NULL
for(i in 1:nrow(train_chosen)) {
x_chosen = train_chosen[i, "x"]
y_chosen = train_chosen[i, "y"]
train_2 = rbind(train_2, image2[(x_square == x_chosen & y_square == y_chosen), ])
}
validation_2 = NULL
for(i in 1:nrow(validation_chosen)) {
x_chosen = validation_chosen[i, "x"]
y_chosen = validation_chosen[i, "y"]
validation_2 = rbind(validation_2, image2[(x_square == x_chosen & y_square == y_chosen), ])
}
test_2 = NULL
for(i in 1:nrow(test_chosen)) {
x_chosen = test_chosen[i, "x"]
y_chosen = test_chosen[i, "y"]
test_2 = rbind(test_2, image2[(x_square == x_chosen & y_square == y_chosen), ])
}
#train_2 = cbind(imageNum = 2, train_2)
write.csv(train_2, "data/train2.csv", row.names = FALSE)
write.csv(validation_2, "data/validation2.csv", row.names = FALSE)
write.csv(test_2, "data/test2.csv", row.names = FALSE)
#image 3:
x_square = floor(image3$x / 40)*40
y_square = floor(image3$y / 40)*40
squares = expand.grid(x = unique(x_square), y = unique(y_square))
indices_randomized = sample(1:nrow(squares))
n = nrow(squares)
train_indices = indices_randomized[1:(floor(.7*n))]
validation_indices = indices_randomized[((floor(.7*n))+1):(floor(.85*n))]
test_indices = indices_randomized[((floor(.85*n))+1):n]
train_chosen = squares[train_indices,]
validation_chosen = squares[validation_indices, ]
test_chosen = squares[test_indices, ]
train_3 = NULL
for(i in 1:nrow(train_chosen)) {
x_chosen = train_chosen[i, "x"]
y_chosen = train_chosen[i, "y"]
train_3 = rbind(train_3, image3[(x_square == x_chosen & y_square == y_chosen), ])
}
validation_3 = NULL
for(i in 1:nrow(validation_chosen)) {
x_chosen = validation_chosen[i, "x"]
y_chosen = validation_chosen[i, "y"]
validation_3 = rbind(validation_3, image3[(x_square == x_chosen & y_square == y_chosen), ])
}
test_3 = NULL
for(i in 1:nrow(test_chosen)) {
x_chosen = test_chosen[i, "x"]
y_chosen = test_chosen[i, "y"]
test_3 = rbind(test_3, image3[(x_square == x_chosen & y_square == y_chosen), ])
}
#train_3 = cbind(imageNum = 3, train_3)
write.csv(train_3, "data/train3.csv", row.names = FALSE)
write.csv(validation_3, "data/validation3.csv", row.names = FALSE)
write.csv(test_3, "data/test3.csv", row.names = FALSE)
#total dataset
train = rbind(train_1, train_2, train_3)
validation = rbind(validation_1, validation_2, validation_3)
test = rbind(test_1, test_2, test_3)
write.csv(train, "data/train.csv", row.names = FALSE)
write.csv(validation, "data/validation.csv", row.names = FALSE)
write.csv(test, "data/test.csv", row.names = FALSE)
#sanity checks
## plot the map of the training data (put into a if statement so it can be easily compressed)
if(TRUE) {
colors_cloud = c("skyblue4","black","ghostwhite")
#plot image 1
q1b_image1 <- ggplot(train_1, aes(x = x, y = y)) + geom_raster(aes(fill=expert_label)) +
scale_fill_gradientn(colours=colors_cloud) +
labs(title="Image 1 Train: Cloud/Not Cloud in Coordinate Space", x = "x-Coord", y = "y-Coord") +
theme_classic()
#plot image 2
q1b_image2 <- ggplot(train_2, aes(x = x, y = y)) + geom_raster(aes(fill=expert_label)) +
scale_fill_gradientn(colours=colors_cloud) +
labs(title="Image 2 Train: Cloud/Not Cloud in Coordinate Space", x = "x-Coord", y = "y-Coord") +
theme_classic()
#plot image 3
q1b_image3 <- ggplot(train_3, aes(x = x, y = y)) + geom_raster(aes(fill=expert_label)) +
scale_fill_gradientn(colours=colors_cloud) +
labs(title="Image 3 Train: Cloud/Not Cloud in Coordinate Space", x = "x-Coord", y = "y-Coord") +
theme_classic()
#get appropriate legend
q1b_legendplot <- ggplot(image1, aes(x=x, y=y)) + geom_point(aes(colour=factor(expert_label))) +
scale_colour_manual(values = colors_cloud) + theme(legend.direction = "horizontal") +
#scale_colour_discrete(name = "Cloud Label", labels = c("Not Cloud", "Unlabeled", "Cloud")) +
labs(colour = "Cloud Label")
#extract legend: https://stackoverflow.com/questions/13649473/add-a-common-legend-for-combined-ggplots
#https://github.com/hadley/ggplot2/wiki/Share-a-legend-between-two-ggplot2-graphs
g_legend<-function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)}
mylegend<-g_legend(q1b_legendplot)
png(filename="imgs/Fig2a1.png", width = 1920, height = 1080, units = "px", pointsize = 12)
q1b_finalplot <- grid.arrange(arrangeGrob(q1b_image1 + theme(legend.position="none"),
q1b_image2 + theme(legend.position="none"),
q1b_image3 + theme(legend.position="none"),
nrow=1),
mylegend, nrow=2,heights=c(10, 1))
dev.off()
}
#check dimensions of the training and test against original
if(TRUE) {
dim(train_1)
dim(validation_1)
dim(test_1)
dim(image1)
head(train_1)
dim(train_2)
dim(validation_2)
dim(test_2)
dim(image2)
head(train_2)
dim(train_3)
dim(validation_3)
dim(test_3)
dim(image3)
head(train_3)
}
##METHOD 2: CHOOSE 1 image to be test, split other 2 into training and validation
set.seed(154039827)
image_test_num = sample(c(1, 2, 3), 1) # = 1
method2_test = image1
method2_train = rbind(train_2, train_3)
method2_validation = rbind(test_2, validation_2, test_3, validation_3)
#----------------------------------------------------------------------------
#-------------------------- Question 2b -------------------------------------
#----------------------------------------------------------------------------
#classify all the points in validation and test set as -1 (cloud free). This is a `trivial` classifier
validation_labeled = validation[validation$expert_label != 0, ]
test_labeled = test[test$expert_label != 0, ]
temp_labeled = rbind(validation_labeled, test_labeled)
temp = rbind(validation, test)
accuracy_table = data.frame(dataset = c("Validation", "Test", "Val&Test Combined"),
accuracy = c(mean(validation$expert_label == -1), mean(test$expert_label == -1), mean(temp$expert_label == -1)),
accuracy_labeled = c(mean(validation_labeled$expert_label == -1),
mean(test_labeled$expert_label == -1),
mean(temp_labeled$expert_label == -1)))
colnames(accuracy_table) = c("dataset", "Prop Correct {-1,1}", "Prop Correct {-1, 0, 1}")
pdf("imgs/Fig2b1.pdf")
grid.table(accuracy_table)
dev.off()
#----------------------------------------------------------------------------
#-------------------------- Question 2d -------------------------------------
#----------------------------------------------------------------------------
set.seed(154)
CVgeneric_genericMatrix <- function(generic_classifier, training_features, training_labels, K, lossFunction) {
#assumes generic_classifier(X, y) as inputs
# while some generic functions take in a formula (y~.,X),
# most generic classifiers are okay with these inputs as well
# If classifier only takes formula, then write a helper function as in:
# generic1 <- function(X, y) {return(train(y ~ ., data = X, method="glm", family="binomial"))}
# writing it with X,y rather than a formula allows other inputs into the generic function
# like method, family, etc...
#lossFunction(yhat, y): takes in yhat, y as inputs
#returns a vector length K of the loss of each fold
#Note: CV should be split through part 2a's method, however this depends on which method was used
# to split. Therefore, that split cannot be implemented GENERICALLY, which means it's up to the
# user to input the correct matrices.
folds = createFolds(training_labels, k=K)
losses = c()
for(i in 1:K) {
#get the inputs for the classifier
temp_features_val = training_features[folds[[i]], ]
temp_features_train = training_features[-folds[[i]], ]
temp_labeled_val = training_labels[folds[[i]]]
temp_labeled_train = training_labels[-folds[[i]]]
#run the classifier
mod_fit = generic_classifier(temp_features_train, temp_labeled_train)
predicted = predict(mod_fit, newdata=temp_features_val)
loss = lossFunction(predicted, temp_labeled_val)
losses = c(losses, loss)
}
return(losses)
}
# Now: CVgeneric for generic classifier, but specific to our
# training data and how we specifically split it
# ie there is an extra input (whole training), which has the same number of rows as the
# training_features but with the x and y coordinate values (so we know how to divide the data)
CVgeneric <- function(generic_classifier, training_features, training_labels, K, lossFunction, train_whole) {
#divide the data into K folds through method 1
dat = train_whole
dat$x10 = floor(dat$x / 40)* 40
dat$y10 = floor(dat$y / 40)* 40
d = dat %>% group_by(imageNum, x10, y10) %>% summarise(count = n())
mapIndices <- vector("list", nrow(d))
z = vector("list", nrow(d))
for(i in 1:nrow(d)) {
imageNumval = d$imageNum[i]
xval = d$x10[i]
yval = d$y10[i]
key = paste0(xval,",", yval)
indices = which(dat$imageNum == imageNumval & dat$x10 == xval & dat$y10 == yval)
mapIndices[[i]] <- indices
}
#for each fold, find the loss
losses = c()
folds = createFolds(1:nrow(d), k=K)
for(k in 1:K) {
CVtrain = unlist(mapIndices[-folds[[k]]])
CVvalid = unlist(mapIndices[folds[[k]]])
#get the inputs for the classifier
temp_features_val = training_features[CVvalid, ]
temp_features_train = training_features[CVtrain, ]
temp_labeled_val = training_labels[CVvalid]
temp_labeled_train = training_labels[CVtrain]
#run the classifier
mod_fit = generic_classifier(temp_features_train, temp_labeled_train)
predicted = predict(mod_fit, newdata=temp_features_val)
loss = lossFunction(predicted, temp_labeled_val)
losses = c(losses, loss)
}
#return
return(losses)
}
#An example of how to run CV K=10 with logisitic regression on the entire training data set
# Note: we assume that generic_classifier takes in TrainingData (X), and Labels (y), so it
# is up to the user to wrap R training functions to include other options such as
# method="glm", family="binomial (How to wrap it is shown below in generic1)
# Note: we also added an input train_whole which requires the rows to be the same data points as in
# training_features and training_labels, but requires the x, y coordinat data. This is because
# we split the data through squares, so we need to select squares (not just pixels)
## ----------------------------------------------------------------------------
## Example on how to use CVgeneric
## ----------------------------------------------------------------------------
dat = train[(train$expert_label != 0), ]
rownames(dat) = NULL
generic1 <- function(X, y) {
total_dat = cbind(X, y)
return(train(y ~ ., data = total_dat, method="glm", family="binomial"))
}
generic2 <- function(X, y) {
total_dat = cbind(X, y)
return(lda(y ~ ., data = total_dat))
}
generic_classifier = generic2 #lm(method="glm", family = "binomial")
training_features = dat[,5:11]
training_labels = as.factor(dat$expert_label)
K = 10
loss1 <- function(x,y) {mean(x != y)}
lossFunction = loss1
train_whole = dat
CVgeneric(generic_classifier, training_features, training_labels, K, lossFunction, train_whole)
# Question 3
#----------------------------------------------------------------------------
#-------------------------- Question 3a -------------------------------------
#----------------------------------------------------------------------------
## ----------------------------------------------------------------------------
## Some optimization
## ----------------------------------------------------------------------------
## CV generic takes a lot of time to run, some of it is from splitting the data
# into squares. To speed up computation, we save the list of squares from both
# METHOD 1 and METHOD 2 into intermediate MapIndices objects
if(TRUE) {
method1_train = read.csv("data/train.csv")
method1_train = method1_train[(method1_train$expert_label) != 0, ]
method1_val = read.csv("data/validation.csv")
method1_val = method1_val[(method1_val$expert_label) != 0, ]
method1_test = read.csv("data/test.csv")
method1_test = method1_test[(method1_test$expert_label) != 0, ]
method2_train = rbind(read.csv("data/train2.csv"), read.csv("data/train3.csv"))
method2_train = method2_train[(method2_train$expert_label != 0), ]
method2_val = rbind(read.csv("data/validation2.csv"), read.csv("data/validation3.csv"))
metho2_val = method2_val[(method2_val$expert_label != 0), ]
method2_test = read.csv("data/image1.csv")
method2_test = method2_test[(method2_test$expert_label != 0), ]
# METHOD 1: Get Method1MapIndices (to speed computation)
#divide the data into squares by METHOD 1
train = read.csv("data/train.csv")
validation = read.csv("data/validation.csv")
test = read.csv("data/test.csv")
method1_train = rbind(method1_train, method1_val)
rownames(method1_train) = NULL
dat = method1_train
dat = method1_train[(method1_train$expert_label != 0), ]
dat$x10 = floor(dat$x / 40)* 40
dat$y10 = floor(dat$y / 40)* 40
d = dat %>% group_by(imageNum, x10, y10) %>% summarise(count = n())
mapIndices <- vector("list", nrow(d))
z = vector("list", nrow(d))
for(i in 1:nrow(d)) {
imageNumval = d$imageNum[i]
xval = d$x10[i]
yval = d$y10[i]
key = paste0(xval,",", yval)
indices = which(dat$imageNum == imageNumval & dat$x10 == xval & dat$y10 == yval)
mapIndices[[i]] <- indices
}
Method1MapIndices = mapIndices
# METHOD 2: Get Method2MapIndices (to speed computation)
#divide the data into squares by METHOD 2
method2_train = rbind(method2_train, method2_val)
rownames(method2_train) = NULL
dat = method2_train
dat = method2_train[(method2_train$expert_label != 0), ]
dat$x10 = floor(dat$x / 40)* 40
dat$y10 = floor(dat$y / 40)* 40
d = dat %>% group_by(imageNum, x10, y10) %>% summarise(count = n())
mapIndices <- vector("list", nrow(d))
z = vector("list", nrow(d))
for(i in 1:nrow(d)) {
imageNumval = d$imageNum[i]
xval = d$x10[i]
yval = d$y10[i]
key = paste0(xval,",", yval)
indices = which(dat$imageNum == imageNumval & dat$x10 == xval & dat$y10 == yval)
mapIndices[[i]] <- indices
}
Method2MapIndices = mapIndices
rownames(method1_train) = NULL
rownames(method2_train) = NULL
}
CVgeneric_Optimized <- function(generic_classifier, training_features, training_labels, K, lossFunction, mapIndices) {
#for each fold, find the loss
set.seed(154)
losses = c()
folds = createFolds(1:length(mapIndices), k=K)
for(k in 1:K) {
CVtrain = unlist(mapIndices[-folds[[k]]])
CVvalid = unlist(mapIndices[folds[[k]]])
#get the inputs for the classifier
temp_features_val = training_features[CVvalid, ]
temp_features_train = training_features[CVtrain, ]
temp_labeled_val = training_labels[CVvalid]
temp_labeled_train = training_labels[CVtrain]
#run the classifier
mod_fit = generic_classifier(temp_features_train, temp_labeled_train)
predicted = predict(mod_fit, newdata=temp_features_val)
loss = lossFunction(predicted, temp_labeled_val)
losses = c(losses, loss)
}
#return
return(losses)
}
K = 10
## ----------------------------------------------------------------------------
## LOGISTIC REGRESSION
## ----------------------------------------------------------------------------
generic_logistic <- function(X, y) {
total_dat = cbind(X, y)
return(train(y ~ ., data = total_dat, method="glm", family="binomial"))
}
loss_logisitc <- function(yhat, y) {
if("class" %in% names(yhat)) {
return(mean(yhat$class != y))
} else {
return(mean(yhat != y))
}
}
#function to get CV for logistic regression
getCVLogistic <- function(dataInput, mapIndices) {
dat = dataInput[(dataInput$expert_label != 0), ]
rownames(dat) = NULL
training_features = dat[,5:12]
training_labels = as.factor(dat$expert_label)
CVgeneric_Optimized(generic_logistic, training_features, training_labels, K, loss_logisitc,
mapIndices)
}
#get Logistic CV Folds losses (inaccuracy)
ptm <- proc.time()
LogisticLossesMethod1 = getCVLogistic(method1_train, Method1MapIndices)
LogisticLossesMethod2 = getCVLogistic(method2_train, Method2MapIndices)
logistic_ptm = proc.time() - ptm
#get Logistic Test loss (inaccuracy)
logistic_testMod = train(as.factor(expert_label) ~ ., data = method1_train[,4:12], method="glm", family="binomial")
logistic_testLoss1 = mean(predict(logistic_testMod, newdata = method1_test[,5:12]) != method1_test$expert_label)
logistic_testMod = train(as.factor(expert_label) ~ ., data = method2_train[,4:12], method="glm", family="binomial")
logistic_testLoss2 = mean(predict(logistic_testMod, newdata = method2_test[,5:12]) != method2_test$expert_label)
#total CV results
CVresultsLogistic = data.frame(CVFold = c("Test", "Average Folds", 1:K),
Logisticregression1 = c(logistic_testLoss1, mean(LogisticLossesMethod1), LogisticLossesMethod1),
Logisticregression2 = c(logistic_testLoss2, mean(LogisticLossesMethod2), LogisticLossesMethod2))
names(CVresultsLogistic) = c("Data/CV Fold", "Logistic (Cutoff .5) Method 1", "Logistic (Cutoff .5) Method 2")
write.csv(CVresultsLogistic, "CVresults/CVLogistic.csv", row.names = FALSE)
## ----------------------------------------------------------------------------
## LDA
## ----------------------------------------------------------------------------
generic_lda <- function(X, y) {
total_dat = cbind(X, y)
return(lda(y ~ ., data = total_dat))
}
loss_lda <- function(yhat, y) {
if("class" %in% names(yhat)) {
return(mean(yhat$class != y))
} else {
return(mean(yhat != y))
}
}
#function to get CV for lda classification
getCVlda <- function(dataInput, mapIndices) {
dat = dataInput[(dataInput$expert_label != 0), ]
rownames(dat) = NULL
training_features = dat[,5:12]
training_labels = as.factor(dat$expert_label)
CVgeneric_Optimized(generic_lda, training_features, training_labels, K, loss_lda,
mapIndices)
}
#get lda CV Folds losses (inaccuracy)
ptm <- proc.time()
ldaLossesMethod1 = getCVlda(method1_train, Method1MapIndices)
ldaLossesMethod2 = getCVlda(method2_train, Method2MapIndices)
lda_ptm = proc.time() - ptm
#get lda Test loss (inaccuracy)
lda_testMod = lda(as.factor(expert_label) ~ ., data = method1_train[,4:12])
lda_testLoss1 = mean(predict(lda_testMod, newdata=method1_test[,5:12])$class != method1_test$expert_label)
lda_testMod = lda(as.factor(expert_label) ~ ., data = method2_train[,4:12])
lda_testLoss2 = mean(predict(lda_testMod, newdata=method2_test[,5:12])$class != method2_test$expert_label)
#total CV results
CVresultsLDA = data.frame(CVFold = c("Test","Average Folds", 1:K),
lda1 = c(lda_testLoss1, mean(ldaLossesMethod1), ldaLossesMethod1),
lda2 = c(lda_testLoss2, mean(ldaLossesMethod2), ldaLossesMethod2))
names(CVresultsLDA) = c("Data/CV Fold", "LDA Method 1", "LDA Method 2")
write.csv(CVresultsLDA, "CVresults/CVlda.csv", row.names = FALSE)
## ----------------------------------------------------------------------------
## QDA
## ----------------------------------------------------------------------------
generic_qda <- function(X, y) {
total_dat = cbind(X, y)
return(qda(y ~ ., data = total_dat))
}
loss_qda <- function(yhat, y) {
if("class" %in% names(yhat)) {
return(mean(yhat$class != y))
} else {
return(mean(yhat != y))
}
}
#function to get CV for qda classification
getCVqda <- function(dataInput, mapIndices) {
dat = dataInput[(dataInput$expert_label != 0), ]
rownames(dat) = NULL
training_features = dat[,5:12]
training_labels = as.factor(dat$expert_label)
CVgeneric_Optimized(generic_qda, training_features, training_labels, K, loss_qda,
mapIndices)
}
#get qda CV Folds losses (inaccuracy)
ptm <- proc.time()
qdaLossesMethod1 = getCVqda(method1_train, Method1MapIndices)
qdaLossesMethod2 = getCVqda(method2_train, Method2MapIndices)
qda_ptm = proc.time() - ptm
#get qda Test loss (inaccuracy)
qda_testMod = qda(as.factor(expert_label) ~ ., data = method1_train[,4:12])
qda_testLoss1 = mean(predict(qda_testMod, newdata = method1_test[,5:12])$class != method1_test$expert_label)
qda_testMod = qda(as.factor(expert_label) ~ ., data = method2_train[,4:12])
qda_testLoss2 = mean(predict(qda_testMod, newdata = method2_test[,5:12])$class != method2_test$expert_label)
#total CV results
CVresultsQDA = data.frame(CVFold = c("Test","Average Folds", 1:K),
qda1 = c(qda_testLoss1, mean(qdaLossesMethod1), qdaLossesMethod1),
qda2 = c(qda_testLoss2, mean(qdaLossesMethod2), qdaLossesMethod2))
names(CVresultsQDA) = c("Data/CV Fold", "QDA Method 1", "QDA Method 2")
write.csv(CVresultsQDA, "CVresults/CVqda.csv", row.names = FALSE)
## ----------------------------------------------------------------------------
## SVM
## ----------------------------------------------------------------------------
method1_train = read.csv("data/train.csv")
method1_train = method1_train[(method1_train$expert_label) != 0, ]
method1_val = read.csv("data/validation.csv")
method1_val = method1_val[(method1_val$expert_label) != 0, ]
method1_test = read.csv("data/test.csv")
method1_test = method1_test[(method1_test$expert_label) != 0, ]
method2_train = rbind(read.csv("data/train2.csv"), read.csv("data/train3.csv"))
method2_train = method2_train[(method2_train$expert_label != 0), ]
method2_val = rbind(read.csv("data/validation2.csv"), read.csv("data/validation3.csv"))
method2_val = method2_val[(method2_val$expert_label != 0), ]
method2_test = read.csv("data/image1.csv")
method2_test = method2_test[(method2_test$expert_label != 0), ]
#get mode function from: https://www.tutorialspoint.com/r/r_mean_median_mode.htm
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
## Make the data smaller
method1 = rbind(method1_train, method1_val)
method1$x10 = floor(method1$x / 10) * 10
method1$y10 = floor(method1$y / 10) * 10
method1_small_train = method1 %>% group_by(imageNum, x10, y10) %>%
summarise(expert_label = getmode(expert_label), NDAI = mean(NDAI), SD = mean(SD), CORR = mean(CORR),
RadianceAngleDF = mean(RadianceAngleDF), RadianceAngleCF = mean(RadianceAngleCF),
RadianceAngleBF = mean(RadianceAngleBF), RadianceAngleAF = mean(RadianceAngleAF),
RadianceAngleAN = mean(RadianceAngleAN))
method1_small_train = data.frame(method1_small_train)
method2 = rbind(method2_train, method2_val)
method2$x10 = floor(method2$x / 10) * 10
method2$y10 = floor(method2$y / 10) * 10
method2_small_train = method2 %>% group_by(imageNum, x10, y10) %>%
summarise(expert_label = getmode(expert_label), NDAI = mean(NDAI), SD = mean(SD), CORR = mean(CORR),
RadianceAngleDF = mean(RadianceAngleDF), RadianceAngleCF = mean(RadianceAngleCF),
RadianceAngleBF = mean(RadianceAngleBF), RadianceAngleAF = mean(RadianceAngleAF),
RadianceAngleAN = mean(RadianceAngleAN))
method2_small_train = data.frame(method2_small_train)
#svm_testMod = svm(as.factor(expert_label) ~ ., data = method1_small_train[,4:12], cost = .001)
#svm_testLoss1 = mean(predict(svm_testMod, newdata = method1_test[,5:12]) != method1_test$expert_label)
## SVM CV with K=10
if(TRUE) {
dat = method1_small_train
dat = dat[(dat$expert_label != 0), ]
dat$x10 = floor(dat$x10 / 40)* 40
dat$y10 = floor(dat$y10 / 40)* 40
d = dat %>% group_by(imageNum, x10, y10) %>% summarise(count = n())
mapIndices <- vector("list", nrow(d))
z = vector("list", nrow(d))
for(i in 1:nrow(d)) {
imageNumval = d$imageNum[i]
xval = d$x10[i]
yval = d$y10[i]
key = paste0(xval,",", yval)
indices = which(dat$imageNum == imageNumval & dat$x10 == xval & dat$y10 == yval)
mapIndices[[i]] <- indices
}
Method1MapIndices = mapIndices
# METHOD 2: Get Method2MapIndices (to speed computation)
#divide the data into squares by METHOD 2
dat = method2_small_train
dat = dat[(dat$expert_label != 0), ]
dat$x10 = floor(dat$x / 40)* 40
dat$y10 = floor(dat$y / 40)* 40
d = dat %>% group_by(imageNum, x10, y10) %>% summarise(count = n())
mapIndices <- vector("list", nrow(d))
z = vector("list", nrow(d))
for(i in 1:nrow(d)) {
imageNumval = d$imageNum[i]
xval = d$x10[i]
yval = d$y10[i]
key = paste0(xval,",", yval)
indices = which(dat$imageNum == imageNumval & dat$x10 == xval & dat$y10 == yval)
mapIndices[[i]] <- indices
}
Method2MapIndices = mapIndices
}
generic_svm <- function(X, y) {
total_dat = cbind(X, y)
return(svm(y ~ ., data = total_dat, kernel = "linear", cost = .01, scale = TRUE))
}
loss_svm <- function(yhat, y) {
if("class" %in% names(yhat)) {
return(mean(yhat$class != y))
} else {
return(mean(yhat != y))
}
}
#function to get CV for qda classification
getCVsvm <- function(dataInput, mapIndices) {
dat = dataInput[(dataInput$expert_label != 0), ]
rownames(dat) = NULL
training_features = dat[,5:12]
training_labels = as.factor(dat$expert_label)
CVgeneric_Optimized(generic_svm, training_features, training_labels, K, loss_svm,
mapIndices)
}
#get svm CV Folds losses (inaccuracy)
ptm <- proc.time()
svmLossesMethod1 = getCVsvm(method1_small_train, Method1MapIndices)
svmLossesMethod2 = getCVsvm(method2_small_train, Method2MapIndices)
svm_ptm = proc.time() - ptm
#get svm Test loss (inaccuracy)
svm_testMod = svm(as.factor(expert_label) ~ ., data = method1_small_train[,4:12], cost=.01)
svm_testLoss1 = mean(predict(svm_testMod, newdata = method1_test[,5:12]) != method1_test$expert_label)
svm_testMod = svm(as.factor(expert_label) ~ ., data = method2_small_train[,4:12], cost=.01)
svm_testLoss2 = mean(predict(svm_testMod, newdata = method2_test[,5:12]) != method2_test$expert_label)
#total CV results
CVresultsSVM = data.frame(CVFold = c("Test","Average Folds", 1:K),
svm1 = c(svm_testLoss1, mean(svmLossesMethod1), svmLossesMethod1),
svm2 = c(svm_testLoss2, mean(svmLossesMethod2), svmLossesMethod2))
names(CVresultsSVM) = c("Data/CV Fold", "SVM Method 1", "SVM Method 2")
write.csv(CVresultsSVM, "CVresults/CVsvm.csv", row.names = FALSE)
## ----------------------------------------------------------------------------
## KNN
## ----------------------------------------------------------------------------
# done in another file
source("./code/KNN-CV.R")
CVresultsKNN = read.csv("CVresults/CVknn.csv")
## ----------------------------------------------------------------------------
## CV results
## ----------------------------------------------------------------------------
#The CV results
CVresultsInaccuracy = cbind(CVresultsLogistic, CVresultsLDA[,c(2,3)], CVresultsQDA[,c(2,3)],
CVresultsSVM[,c(2,3)], CVresultsKNN[,c(2,3)])
write.csv(CVresultsInaccuracy, "CVresults/CVresultsInaccuracy.csv")
CVresultsAccuracy = cbind("Data/CV Fold" = CVresultsLogistic[,1] ,1-CVresultsLogistic[,c(2,3)],
1-CVresultsLDA[,c(2,3)], 1-CVresultsQDA[,c(2,3)], 1-CVresultsSVM[,c(2,3)], 1-CVresultsKNN[,c(2,3)])
write.csv(CVresultsAccuracy, "CVresults/CVresultsAccuracy.csv")
png("imgs/Fig3a1.png", height=500, width=1300)
grid.table(CVresultsAccuracy)
dev.off()
## Question 3 a (Analysis of SVM Runtime)
## ----------------------------------------------------------------------------
image = read.csv("data/image_all.csv")
image = image[(image$expert_label != 0), ]
rownames(image) = 1:nrow(image)
basicDat = image[c(1, 12),]
sizes = c(10, 50, 100, 150, seq(200, 2000, 100), seq(2500, 5000, 500), seq(6000, 20000, 1000))
SVMtimes = c()
KNNtimes = c()
for(i in 1:length(sizes)) {
set.seed(154)
dat = rbind(basicDat, image[(sample(1:sizes[i])),])
dattest = rbind(basicDat, image[(sample(1:sizes[i])),])
ptm <- proc.time()
svmModel = svm(as.factor(expert_label) ~ ., data = dat[,4:12], probability=TRUE)
svmPredicted = predict(svmModel, newdata = dattest, probability = TRUE)
svm_ptm = proc.time() - ptm
SVMtimes = c(SVMtimes, unname(svm_ptm["elapsed"]))
ptm <- proc.time()
knnPredicted = knn(dat[, 5:12], dattest[, 5:12], dat$expert_label, 4)
knn_ptm = proc.time() - ptm
KNNtimes = c(KNNtimes, unname(knn_ptm["elapsed"]))
}
SVM_RuntimeDat = data.frame(sizes = sizes, times = SVMtimes)
KNN_RuntimeDat = data.frame(sizes = sizes, times = KNNtimes)
png("imgs/Q3a_SVMRuntime.png", height=1000, width=1000)
g1 = ggplot(SVM_RuntimeDat, aes(x = sizes, y = times)) + geom_line() +
labs(title="Run Time (seconds) of SVM by size of Training and Testing Dataset",
x = "Size of Test and Train Datset (#rows)",
y = "Run Time of SVM (seconds)")
g2 = ggplot(SVM_RuntimeDat, aes(x = sizes, y = log(times))) + geom_line() +
labs(title="Run Time (seconds) of SVM by size of Training and Testing Dataset",
x = "Size of Test and Train Datset (#rows)",
y = "Log Run Time of SVM (log seconds)")
g3 = ggplot(KNN_RuntimeDat, aes(x = sizes, y = times)) + geom_line() +
labs(title="Run Time (seconds) of KNN by size of Training and Testing Dataset",
x = "Size of Test and Train Datset (#rows)",
y = "Run Time of KNN (seconds)")
g4 = ggplot(KNN_RuntimeDat, aes(x = sizes, y = log(times))) + geom_line() +
labs(title="Run Time (seconds) of KNN by size of Training and Testing Dataset",
x = "Size of Test and Train Datset (#rows)",
y = "Log Run Time of KNN (log seconds)")
grid.arrange(g1, g2, g3, g4)
dev.off()
## ----------------------------------------------------------------------------
## Question 3 a (Checking Assumptions)
## ----------------------------------------------------------------------------
image = read.csv("data/image_all.csv")
image$expert_label = as.factor(image$expert_label)
set.seed(154)
dat = image
datsample = image[(sample(1:nrow(image), 200)), ]
#based on variance checking plotting at http://thatdatatho.com/2018/02/19/assumption-checking-lda-vs-qda-r-tutorial-2/
#Check constant variance
plot = list()
box_variables <- c("NDAI", "SD", "CORR", "RadianceAngleDF", "RadianceAngleCF", "RadianceAngleBF", "RadianceAngleAF", "RadianceAngleAN")
for(i in box_variables) {
plot[[i]] <- ggplot(dat, aes_string(x = "expert_label", y = i, col = "expert_label", fill = "expert_label")) +
geom_boxplot(alpha = 0.2) +
theme(legend.position = "none") +
#scale_color_manual(values = c("blue", "red", "green"))
scale_fill_manual(values = c( "red", "green", "blue"))
}
png("imgs/Q3_checkConstantVar.png", height = 1080, width = 1920)
do.call(grid.arrange, c(plot, nrow = 1))
dev.off()
#Check Normality
plot = list()
box_variables <- c("NDAI", "SD", "CORR", "RadianceAngleDF", "RadianceAngleCF", "RadianceAngleBF", "RadianceAngleAF", "RadianceAngleAN")
for(i in box_variables) {#For Cloudy
plot[[paste0(i, "cloudy")]] <- ggqqplot(datsample[(datsample$expert_label == 1), i]) + labs(title = paste0("Cloudy: ", i), ylab="Sample Quantiles", xlab="Theoretical Quantiles")
}
for(i in box_variables) {#For Non-Cloudy
plot[[paste0(i, "noncloudy")]] <- ggqqplot(datsample[(datsample$expert_label == -1), i]) + labs(title = paste0("Non-Cloudy: ", i), ylab="Sample Quantiles", xlab="Theoretical Quantiles")
}
png("imgs/Q3a_checkNormality.png", height = 1080, width = 1080)
do.call(grid.arrange, c(plot, nrow = 4))
dev.off()
#Logistic Regression Assumptions: check variance inflation factors
image = read.csv("data/image_all.csv")
dat = image[(image$expert_label != 0), 4:12]
#log_model = train(as.factor(expert_label) ~ ., data = dat, method="glm", family="binomial")
log_model = glm(as.factor(expert_label) ~., data = dat, family = binomial)
png(filename="imgs/Fig3a_vif.png", height=400, width=350)
vif_df = data.frame("Variance Inflaction Factors" = round(vif(log_model), 2))
colnames(vif_df) = c("Variance Inflaction Factors")
grid.table(vif_df)
dev.off()
#Logistic Regression Assumptions: check linearity relationship
# http://www.sthda.com/english/articles/36-classification-methods-essentials/148-logistic-regression-assumptions-and-diagnostics-in-r/
# Predict the probability (p) of diabete positivity
probabilities <- predict(log_model, type = "response")
predicted.classes <- ifelse(probabilities > 0.5, "1", "-1")
mydata <- dat %>%
mutate(logit = log(probabilities/(1-probabilities))) %>%
gather(key = "predictors", value = "predictor.value", -logit)
set.seed(154)
mydata = mydata[sample(1:nrow(mydata), 2000), ]
predictors <- colnames(mydata)
pdf("imgs/Fig3a_loglinearassumption.pdf")
ggplot(mydata, aes(logit, predictor.value))+
geom_point(size = 0.5, alpha = 0.5) +
geom_smooth(method = "loess") +
theme_bw() +
facet_wrap(~predictors, scales = "free_y")
dev.off()
png("imgs/Fig3a_checkSVM.png", height=1080, width=1080)
datsample= datsample[(datsample$expert_label != 0),]
PairPlot(datsample, colnames(datsample)[5:12], "Pair Wise plots", group_var = "expert_label") + aes(alpha=.3) + scale_alpha(guide = 'none')
dev.off()
|
28ef032236c9f0d68476f6676eb8d3a29d168027
|
07a74984bf59ce4486e1bcaefafb8ce692b50d5a
|
/man/mapdeck_view.Rd
|
41b5eb809be761e62130fa38047b786946230a9b
|
[] |
no_license
|
SymbolixAU/mapdeck
|
c3bc3a61b8d8ade69b9b67fa69a00f9294281630
|
6138c6845e37ab3479e4ff65d9b0fff29e20f070
|
refs/heads/master
| 2023-09-03T22:34:43.418728
| 2023-08-24T22:14:59
| 2023-08-24T22:14:59
| 141,350,341
| 344
| 50
| null | 2023-08-09T22:22:59
| 2018-07-17T22:06:34
|
HTML
|
UTF-8
|
R
| false
| true
| 717
|
rd
|
mapdeck_view.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mapdeck_map.R
\name{mapdeck_view}
\alias{mapdeck_view}
\title{Mapdeck view}
\usage{
mapdeck_view(
map,
location = NULL,
zoom = NULL,
pitch = NULL,
bearing = NULL,
duration = NULL,
transition = c("linear", "fly")
)
}
\arguments{
\item{map}{a \code{mapdeck} map object}
\item{location}{unnamed vector of lon and lat coordinates (in that order)}
\item{zoom}{zoom level of the map}
\item{pitch}{the pitch angle of the map}
\item{bearing}{bearing of the map between 0 and 360}
\item{duration}{time in milliseconds of the transition}
\item{transition}{type of transition}
}
\description{
Changes the view of the of the map
}
|
b5461f0b66bf72c705083183620145dadfd16f46
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PMCMRplus/examples/hartleyTest.Rd.R
|
521a3f6ffcb7d5c36b7d79186daadf96dc54d9e4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 268
|
r
|
hartleyTest.Rd.R
|
library(PMCMRplus)
### Name: hartleyTest
### Title: Hartley's Maximum F-Ratio Test of Homogeneity of Variances
### Aliases: hartleyTest hartleyTest.default hartleyTest.formula
### Keywords: htest
### ** Examples
hartleyTest(count ~ spray, data = InsectSprays)
|
91765dd228eae655bc8e7098da35f7dffc732c60
|
bf9798f3936dc369176f967b96144a6d8e887f8b
|
/Week3/Code/TreeHeight.R
|
9879e031d8c21fefa665e1d3bb249e2c65e30d93
|
[] |
no_license
|
cupofteaandcake/CMEECourseWork
|
cf1cf96ff2168973cf1056c3ff8b96ef318fce79
|
42605aa1934bfb6668fc341dcd7c1ef4afa7edf2
|
refs/heads/master
| 2020-03-30T16:02:21.234538
| 2019-04-05T14:45:47
| 2019-04-05T14:45:47
| 151,390,666
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 958
|
r
|
TreeHeight.R
|
#!/usr/bin/env Rscript
#
#This function calculates heights of trees given distance of each tree from its base and angle to its top, using the trigonometric formula
#
#height = distance * tan(radius)
#
#ARGUMENTS
#degrees: The angle of elevation of tree
#distance: The distance from base of tree (e.g. metres)
#
#OUTPUT
#The heights of the tree, same units as "distance"
# __appname__ = TreeHeight.R
# __author__ = Talia Al-Mushadani
# __version__ = 0.0.1
# __license__ = license for this code
rm(list = ls())
graphics.off()
TreeHeight <- function(degrees, distance){
#calc tree height using trig
radians <- degrees * pi / 180
height <- distance * tan(radians)
print(paste("Tree height is:", height))
return (height)
}
TreeData <- read.csv("../Data/trees.csv")
Tree.height.m <- TreeHeight(TreeData$Angle.degrees, TreeData$Distance.m)
TreeDataResult = cbind(TreeData, Tree.height.m)
write.csv(TreeDataResult, "../Results/TreeHts.csv")
|
c3964283e23642c6d01501b163b69a8b4c138076
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/modelfree/examples/logit_link_private.Rd.R
|
acd440eae60458cbda6c6f0dde6d450e44c74160
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 537
|
r
|
logit_link_private.Rd.R
|
library(modelfree)
### Name: logit_link_private
### Title: Logit link function with guessing and lapsing rates
### Aliases: logit_link_private
### Keywords: nonparametric models regression nonlinear
### ** Examples
data( "01_Miranda" )
x <- example01$x
r <- example01$r
m <- example01$m
glmdata <- data.frame( cbind( r/m ,m , x ) )
names( glmdata ) <- c( "resp", "m", "x" )
glmformula <- c( "resp ~ x" )
userlink<-logit_link_private( 0.1, 0.1 )
fit <- glm( glmformula, data = glmdata, weights = m, family = binomial( userlink ) )
|
2f707d63babe01f10ae547fbfe3b715cd66c969b
|
7036298abe6ae8076189bcbb864d5b4d4dedc8f8
|
/data_wrangling.R
|
06f74d66cc5175ce05d9e8ca2c8570c786c4c095
|
[] |
no_license
|
gordy2x/mining
|
f6a6ee76725e225a51e527cd002456fddf6122c1
|
ed86f6e9d87ef8a04133d4a47bad17bb3fedc7ca
|
refs/heads/master
| 2020-08-29T19:59:07.424792
| 2019-10-28T22:36:11
| 2019-10-28T22:36:11
| 218,157,200
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,463
|
r
|
data_wrangling.R
|
rain_moist=read.csv("all_probes_2012_2017.csv", header=TRUE)
library("ggplot2")
library(cowplot)
library(dplyr)
library(coxme)
library(rms)
library(gridExtra)
library(tidyr)
#define probe
rain_moist$Probe=with(rain_moist,paste0(Swamp,Probe_no,Probe_depth,veg_type,Impact))
probes=unique(rain_moist$Probe)
for(probe in probes){
#which rows of data correspond to this probe
rel.rows=rain_moist$Probe==probe
#extract just those rows
dat.loc=rain_moist[rel.rows,]
#when is the rain greater than 25.4
rain_event=dat.loc$rainfall_daily_total>30.5
#this code extracts the start of a rain event
events=diff(rain_event)
day_before_rain=which(events==1)
#if this probe has no rain events, write a data frame with one row
if(length(day_before_rain)==0){
event=data.frame(counted=FALSE,reason="No rain events",rel=NA,
date=NA,
dbr=NA,ldr=NA,dbnr=NA,
Swamp=unique(dat.loc$Swamp),Probe_no=unique(dat.loc$Probe_no),
Probe_depth=unique(dat.loc$Probe_depth),
veg_type=unique(dat.loc$veg_type),
Impact=unique(dat.loc$Impact),
soil_moist_day_before=NA,days.above.50pcSM=NA,
days.above.25pcSM=NA,days.above.75pcSM=NA,
da50C=FALSE,da25C=FALSE,da75C=FALSE,
n.days.rain=NA,n.days.no.rain=NA,average.days.no.rain=NA,total.rain.volume=NA)
write.csv(event,paste0("probes/",probe,".csv"))
next
}
#otherwise create dataset of this probe
event=data.frame(counted=TRUE,reason=NA,rel=NA,
date=dat.loc$date[day_before_rain],
dbr=day_before_rain,ldr=NA,dbnr=NA,
Swamp=unique(dat.loc$Swamp),Probe_no=unique(dat.loc$Probe_no),
Probe_depth=unique(dat.loc$Probe_depth),
veg_type=unique(dat.loc$veg_type),
Impact=unique(dat.loc$Impact),
soil_moist_day_before=NA,days.above.50pcSM=NA,
days.above.25pcSM=NA,days.above.75pcSM=NA,
da50C=TRUE,da25C=TRUE,da75C=TRUE,
n.days.rain=NA,n.days.no.rain=NA,average.days.no.rain=NA,total.rain.volume=NA)
event$rel=which(rel.rows==T)[1]
#now look inside this probe
#for each rain event
for(i in 1:length(day_before_rain)){
#cycle though data
dbr=day_before_rain[i]
dontstop=T
k=dbr
while(dontstop){
k=k+1
#if missing rain data found, stop, and record not to count this event
if(is.na(events[k])){
event$counted[i]=FALSE
event$reason[i]="Missing rain data"
dontstop=F
#if we reach the end of the data, stop
}else if(k==length(events)){
event$counted[i]=FALSE
event$reason[i]="End of data"
dontstop=F
#if events is -1, this means this is the last day of rain
}else if(events[k]==(-1)){
event$ldr[i]=k #extract last day of rain
#if events is 1 then this is the day before the next rain
}else if(events[k]==1){
event$dbnr[i]=k #extract day before new rain
dontstop=F
}
}
}
#save
write.csv(event,paste0("probes/",probe,".csv"))
}
#for all those probes datasets saved above
for(probe in probes){
#read in data
rel.rows=rain_moist$Probe==probe
dat.loc=rain_moist[rel.rows,]
event=read.csv(paste0("probes/",probe,".csv"),stringsAsFactors=FALSE)
for(i in 1:dim(event)[1]){
#if event is counted
if(event$counted[i]==TRUE){
#extract soil moisture the day before rain
event$soil_moist_day_before[i]=dat.loc$soil_moisture[event$dbr[i]]
between_rain=(event$ldr[i]+1):(event$dbnr[i])
#extract moisture between rain events
moisture_loc=dat.loc$soil_moisture[between_rain]
#if any missing data, record as not counted event
if(any(is.na(moisture_loc))){
event$counted[i]=FALSE
event$reason[i]="Missing soil moisture"
next
}
#count number of days above 50% , 25% and 75%
mg=moisture_loc>50
if(sum(mg)==length(mg)){
event$days.above.50pcSM[i]=sum(mg)
event$da50C[i]=FALSE #false is alive
} else{
event$days.above.50pcSM[i]=min(which(mg==FALSE)-1)
}
mg=moisture_loc>25
if(sum(mg)==length(mg)){
event$days.above.25pcSM[i]=sum(mg)
event$da25C[i]=FALSE
} else{
event$days.above.25pcSM[i]=min(which(mg==FALSE)-1)
}
mg=moisture_loc>75
if(sum(mg)==length(mg)){
event$days.above.75pcSM[i]=sum(mg)
event$da75C[i]=FALSE
} else{
event$days.above.75pcSM[i]=min(which(mg==FALSE)-1)
}
#record number of days of rain, no rain and rain volume
event$n.days.rain[i]=event$ldr[i]-event$dbr[i]
event$n.days.no.rain[i]=event$dbnr[i]-event$ldr[i]
event$total.rain.volume[i]=sum(dat.loc$rainfall_daily_total[(event$dbr[i]+1):event$ldr[i]])
}
event$average.days.no.rain=mean(event$n.days.no.rain,na.rm=T)
}
write.csv(event,paste0("probes/",probe,".csv"))
}
#combine into one dataset
all_probes=event[0,]
for(probe in probes){
event=read.csv(paste0("probes/",probe,".csv"),stringsAsFactors=FALSE)
all_probes=rbind(all_probes,event)
}
#add NARCLIM data
NARCLIM=read.csv("NARCLIM.csv",stringsAsFactors=F)
colnames(NARCLIM)[3]="Probe_no"
NARCLIM$Swamp[NARCLIM$Swamp=="Leech"]="leech"
NARCLIM$Swamp[NARCLIM$Swamp=="1a"]="den_1a"
NARCLIM$Swamp[NARCLIM$Swamp=="5"]="den_5"
NARCLIM$Swamp=factor(NARCLIM$Swamp)
probes=left_join(all_probes,NARCLIM)
#add swamp and catchment area data
area=read.csv("area.csv")
swamp_area=area[area$Type=="Swamp",-2]
colnames(swamp_area)[1]="swamp_area"
catchment_area=area[area$Type=="Catchment",-2]
colnames(catchment_area)[1]="catchment_area"
probes=left_join(probes,swamp_area)
probes=left_join(probes,catchment_area)
#add dist from escarpment
Scarpdist=read.csv("Probe_Scarp_Dist.csv",stringsAsFactors = FALSE)
Scarpdist=dplyr::select(Scarpdist,Swamp,Probechann,scarpdist)
colnames(Scarpdist)[2]="Probe_no"
Scarpdist$Swamp[Scarpdist$Swamp=="Leech"]="leech"
Scarpdist$Swamp[Scarpdist$Swamp=="1a"]="den_1a"
Scarpdist$Swamp[Scarpdist$Swamp=="5"]="den_5"
unique(Scarpdist$Swamp)
unique(probes$Swamp)
probes=left_join(probes,Scarpdist)
probes=probes[,-c(1:2)]
write.csv(probes,paste0("probes/all_probes.csv"))
|
eb46642e8ff2f3bdfd9085fc8a96c31e9e53c99b
|
6317e8f55ac477c0648049f27d246d125d4e0bd8
|
/Create Data Sets/R/extra/truncation filter.R
|
91d37dddcf31fafe44f97eac7a7745e8f6373410
|
[
"MIT"
] |
permissive
|
Allisterh/Double-Q-Survey-Project
|
91cf5ab7941bcd8b4b2d3f432a15f62a660c62f8
|
9ad38ac8f6a25e2092f0a4e12ad4c8208f849a8f
|
refs/heads/master
| 2022-01-16T21:14:08.294573
| 2018-01-12T17:28:32
| 2018-01-12T17:28:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,690
|
r
|
truncation filter.R
|
data<-panel.u
length(which(is.na(data$e2_1month)))
# equity prices
data$keep.e<-"TRUE"
data$keep.e[data$e2_1month>4000|data$e2_3month>4000|data$e2_1year>4000]<-"FALSE"
data$keep.e[data$e2_1month>2000 & data$e2_3month>2000 & data$e2_1year>2000]<-"FALSE"
data$keep.e[data$e2_1month<100 & data$e2_3month<100 & data$e2_1year<100]<-"FALSE"
for(col in c("e2_1month","e2_3month","e2_1year","pi.e.1m.e","pi.e.3m.e","pi.e.1y.e","b.1m.e","c.1m.e","b.3m.e","c.3m.e","b.1y.e","c.1y.e")){
data[[col]][data$keep.e=="FALSE"]<-"TR"
}
# test<-subset(data,select=c("keep.e","e2_1month","e2_3month","e2_1year","b.1m.e","c.1m.e","b.3m.e","c.3m.e","b.1y.e","c.1y.e"))
# View(test)
# length(which(data$e2_1month=="TR"))
# table(data$keep.e)
# gold prices
data$keep.g<-"TRUE"
data$keep.g[data$g2_1month>4000|data$g2_3month>4000|data$g2_1year>4000]<-"FALSE"
data$keep.g[data$g2_1month>2000 & data$g2_3month>2000 & data$g2_1year>2000]<-"FALSE"
data$keep.g[data$g2_1month<100 & data$g2_3month<100 & data$g2_1year<100]<-"FALSE"
for(col in c("g2_1month","g2_3month","g2_1year","pi.e.1m.g","pi.e.3m.g","pi.e.1y.g","b.1m.g","c.1m.g","b.3m.g","c.3m.g","b.1y.g","c.1y.g")){
data[[col]][data$keep.g=="FALSE"]<-"TR"
}
# house prices
data$keep.h<-"TRUE"
data$keep2.h<-"TRUE" # less restrictive
data$keep.h[data$h2_1month>2*data$housing.price|data$h2_1month<0.5*data$housing.price|
data$h2_3month>2*data$housing.price|data$h2_3month<0.5*data$housing.price|
data$h2_1year>2*data$housing.price|data$h2_1year<0.5*data$housing.price]<-"FALSE"
data$keep2.h[data$h2_1month>2*data$housing.price & data$h2_3month>2*data$housing.price & data$h2_1year>2*data$housing.price]<-"FALSE"
data$keep2.h[data$h2_1month<0.5*data$housing.price & data$h2_3month<0.5*data$housing.price & data$h2_1year<0.5*data$housing.price]<-"FALSE"
for(col in c("h2_1month","h2_3month","h2_1year","pi.e.1m.h","pi.e.3m.h","pi.e.1y.h","b.1m.h","c.1m.h","b.3m.h","c.3m.h","b.1y.h","c.1y.h")){
data[[col]][data$keep.h=="FALSE"]<-"TR"
}
### truncation statistics -----------
# house prices
n<-length(which(!is.na(data$h2_1month)))
na<-length(which(is.na(data$h2_1month)))
na2<-length(which(is.na(data$h2_1month)))
n.tr1<-length(which(data$h2_1month=="TR"))-(na-na2)
c1<-c(n,n.tr1,n.tr1/n*100)
n<-length(which(!is.na(data$h2_3month)))
na<-length(which(is.na(data$h2_3month)))
na2<-length(which(is.na(data$h2_3month)))
n.tr3<-length(which(data$h2_3month=="TR"))-(na-na2)
c2<-c(n,n.tr1,n.tr1/n*100)
n<-length(which(!is.na(data$h2_1year)))
na<-length(which(is.na(data$h2_1year)))
na2<-length(which(is.na(data$h2_1year)))
n.tr12<-length(which(data$h2_1year=="TR"))-(na-na2)
c3<-c(n,n.tr1,n.tr1/n*100)
house.tr<-rbind(c1,c2,c3)
# equity prices
n<-length(which(!is.na(data$e2_1month)))
na<-length(which(is.na(data$e2_1month)))
na2<-length(which(is.na(data$e2_1month)))
n.tr1<-length(which(data$e2_1month=="TR"))-(na-na2)
c1<-c(n,n.tr1,n.tr1/n*100)
n<-length(which(!is.na(data$e2_3month)))
na<-length(which(is.na(data$e2_3month)))
na2<-length(which(is.na(data$e2_3month)))
n.tr3<-length(which(data$e2_3month=="TR"))-(na-na2)
c2<-c(n,n.tr1,n.tr1/n*100)
n<-length(which(!is.na(data$e2_1year)))
na<-length(which(is.na(data$e2_1year)))
na2<-length(which(is.na(data$e2_1year)))
n.tr12<-length(which(data$e2_1year=="TR"))-(na-na2)
c3<-c(n,n.tr1,n.tr1/n*100)
equity.tr<-rbind(c1,c2,c3)
# gold prices
n<-length(which(!is.na(data$g2_1month)))
na<-length(which(is.na(data$g2_1month)))
na2<-length(which(is.na(data$g2_1month)))
n.tr1<-length(which(data$g2_1month=="TR"))-(na-na2)
c1<-c(n,n.tr1,n.tr1/n*100)
n<-length(which(!is.na(data$g2_3month)))
na<-length(which(is.na(data$g2_3month)))
na2<-length(which(is.na(data$g2_3month)))
n.tr3<-length(which(data$g2_3month=="TR"))-(na-na2)
c2<-c(n,n.tr1,n.tr1/n*100)
n<-length(which(!is.na(data$g2_1year)))
na<-length(which(is.na(data$g2_1year)))
na2<-length(which(is.na(data$g2_1year)))
n.tr12<-length(which(data$g2_1year=="TR"))-(na-na2)
c3<-c(n,n.tr1,n.tr1/n*100)
gold.tr<-rbind(c1,c2,c3)
tr<-rbind(equity.tr,gold.tr,house.tr)
######## -----------
for(col in c("e2_1month","e2_3month","e2_1year","pi.e.1m.e","pi.e.3m.e","pi.e.1y.e","b.1m.e","c.1m.e","b.3m.e","c.3m.e","b.1y.e","c.1y.e",
"g2_1month","g2_3month","g2_1year","pi.e.1m.g","pi.e.3m.g","pi.e.1y.g","b.1m.g","c.1m.g","b.3m.g","c.3m.g","b.1y.g","c.1y.g",
"h2_1month","h2_3month","h2_1year","pi.e.1m.h","pi.e.3m.h","pi.e.1y.h","b.1m.h","c.1m.h","b.3m.h","c.3m.h","b.1y.h","c.1y.h")){
data[[col]][data[[col]]=="TR"]<-NA
data[[col]]<-as.numeric(data[[col]])
}
for(col in c("msa","v.e","e2_1month","e2_3month","e2_1year","pi.e.1m.e","pi.e.3m.e","pi.e.1y.e","b.1m.e","c.1m.e","b.3m.e","c.3m.e","b.1y.e","c.1y.e",
"v.g","g2_1month","g2_3month","g2_1year","pi.e.1m.g","pi.e.3m.g","pi.e.1y.g","b.1m.g","c.1m.g","b.3m.g","c.3m.g","b.1y.g","c.1y.g",
"v.h","h2_1month","h2_3month","h2_1year","pi.e.1m.h","pi.e.3m.h","pi.e.1y.h","b.1m.h","c.1m.h","b.3m.h","c.3m.h","b.1y.h","c.1y.h")){
data<-data[!is.na(data[[col]]),]
}
#use these lines to replicate 2015 results, comment out
#source("/Users/manguito/Dropbox/Double Survey Q Project (2)/econometric analysis ida 2016/R/extra/prepare time invariant variables.R")
# in prepare individual panel and MSA
# and use
#source("~/Dropbox/Double Survey Q Project (2)/econometric analysis ida 2016/R/extra/delete obs with different house price same city.R")
# in prepare panel.R
# use the following lines instead of the for-loop above
# for(col in c("e2_1month","e2_3month","v.e",
# "g2_1month","g2_3month","g2_1year","v.g",
# "h2_1month","h2_3month","v.h")){
# data<-data[!is.na(data[[col]]),]
# }
|
2c6ceab7a01e4bd42b664f3567fbc627d53ec571
|
7d5d8492c2d88b88bdc57e3c32db038a7e7e7924
|
/bid-cc-agricultural-sector/Output_analysis_final/runDSSAT/Rice_secano.r
|
e8dcc27372e6b15da5bed9e1e316c42c9d79c41c
|
[] |
no_license
|
CIAT-DAPA/dapa-climate-change
|
80ab6318d660a010efcd4ad942664c57431c8cce
|
2480332e9d61a862fe5aeacf6f82ef0a1febe8d4
|
refs/heads/master
| 2023-08-17T04:14:49.626909
| 2023-08-15T00:39:58
| 2023-08-15T00:39:58
| 39,960,256
| 15
| 17
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,135
|
r
|
Rice_secano.r
|
############### Parallel DSSAT ############################
########### Load functions necessary
path_functions <- "/mnt/workspace_cluster_3/bid-cc-agricultural-sector/_scripts/DSSAT-R/"
path_project <- "/mnt/workspace_cluster_3/bid-cc-agricultural-sector/"
# Cargar data frame entradas para DSSAT
load(paste0(path_project, "14-ObjectsR/Soil.RData"))
rm(list=setdiff(ls(), c("Extraer.SoilDSSAT", "values", "Soil_profile", "Cod_Ref_and_Position_Generic", "make_soilfile"
, "Soil_Generic", "wise", "in_data", "read_oneSoilFile", "path_functions", "path_project", "Cod_Ref_and_Position")))
load(paste0(path_project, "/08-Cells_toRun/matrices_cultivo/Rice_secano.Rdat"))
cultivar = c('IB0023','LOW.TEMP.TOL') #fijar cultivar aqui
#cultivar = c('IB0118','IR72')
#cultivar = c('IB0001','IR8')
source(paste0(path_functions, "main_functions.R")) ## Cargar funciones principales
source(paste0(path_functions, "make_xfile.R")) ## Cargar funcion para escribir Xfile DSSAT
source(paste0(path_functions, "make_wth.R"))
source(paste0(path_functions, "dssat_batch.R"))
source(paste0(path_functions, "DSSAT_run.R"))
day0 <- crop_secano$N.app.0d
day_aplication0 <- rep(0, length(day0))
day30 <- crop_secano$N.app.30d
day_aplication30 <- rep(30, length(day30))
amount <- data.frame(day0, day30)
day_app <- data.frame(day_aplication0, day_aplication30)
## configuracion Archivo experimental secano
years <- 69:97
data_xfile <- list()
data_xfile$crop <- "RICE"
data_xfile$exp_details <- "*EXP.DETAILS: BID17101RZ RICE LAC"
data_xfile$name <- "./JBID.RIX"
data_xfile$CR <- "RI"
data_xfile$INGENO <- rep(cultivar[1], length(crop_secano[, "variedad.1"]))
data_xfile$CNAME <- "IRNA"
data_xfile$initation <- crop_secano$mirca.start
data_xfile$final <- crop_secano$mirca.end
data_xfile$system <- "rainfed" ## Irrigation or rainfed, if is irrigation then automatic irrigation
data_xfile$year <- years[1]
data_xfile$nitrogen_aplication <- list(amount = amount, day_app = day_app)
data_xfile$smodel <- "RIXCER" ## Fin Model
data_xfile$bname <- "DSSBatch.v45"
data_xfile$PPOP <- 200
data_xfile$PPOE <- 175
data_xfile$PLME <- "S" ## to rice S semilla T transplanting
data_xfile$PLDS <- "B"
data_xfile$PLRD <- 0 ## Investigar mas acerca de este parametro
data_xfile$PLDP <- 2 ## Investigar mas acerca de este parametro
modelos <- c("bcc_csm1_1", "bnu_esm", "cccma_canesm2", "gfld_esm2g", "inm_cm4", "ipsl_cm5a_lr", "miroc_miroc5",
"mpi_esm_mr", "ncc_noresm1_m")
i <- 4
gcm <- paste0("/mnt/workspace_cluster_3/bid-cc-agricultural-sector/14-ObjectsR/14-ObjectsR/", modelos[i],"/Futuro/")
load(paste0(gcm, "Precipitation.RDat"))
load(paste0(gcm, "Srad.Rdat"))
load(paste0(gcm, "Temperatura_2.Rdat"))
# Climate Data Set for WFD or global model of climate change
climate_data <- list()
climate_data$year <- 69:97
climate_data$Srad <- Srad
climate_data$Tmax <- Tmax
climate_data$Tmin <- Tmin
climate_data$Prec <- Prec
climate_data$lat <- crop_secano[,"y"]
climate_data$long <- crop_secano[, "x"]
climate_data$wfd <- "model"
climate_data$id <- crop_secano[, "Coincidencias"]
## Entradas para las corridas de DSSAT
input_data <- list()
input_data$xfile <- data_xfile
input_data$climate <- climate_data
dir_dssat <- "/home/jmesa/csm45_1_23_bin_ifort/"
dir_base <- "/home/jmesa/Scratch"
## librerias para el trabajo en paralelo
library(foreach)
library(doMC)
## procesadores en su servidor
registerDoMC(8)
Run <- foreach(i = 1:dim(crop_secano)[1]) %dopar% {
run_dssat(input_data, i, dir_dssat, dir_base)
}
tipo <- "Secano_"
cultivo <- "Arroz_"
cultivar.n <- cultivar[2] ## nombre del cultivar
save(Run, file = paste("/home/jmesa/", "_", cultivo, tipo, cultivar.n, "_",modelos[i], "_", ".RDat",sep=""))
|
74f6831d96a331d3706739143f76614d7fb4e869
|
12292a3ea0df7e1ca422d778f8f3ea812069cb83
|
/R/AltSplicingJunctionSupported.R
|
93facf782b55ac27c4749627ce12f3740bed9564
|
[] |
no_license
|
federicomarini/GeneStructureTools
|
bd8be331d1bc29d849452329bcbb77705e6868dd
|
ed946d997fc62ac70b1f34e5ce631aefe60d2dca
|
refs/heads/master
| 2020-03-13T21:15:32.154189
| 2018-04-27T12:22:17
| 2018-04-27T12:22:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 32,452
|
r
|
AltSplicingJunctionSupported.R
|
#' Find alternative junctions for Whippet alternative splicing events
#'
#' Find junctions that pair with each end of an AA (alt. acceptor) or
#' AD (alt. donor) whippet range
#' Find junctions that pair with the upsteam/downstream exon of an
#' AF (alt. first exon) or an AL (alt. last exon)
#' @param whippetDataSet whippetDataSet generated from \code{readWhippetDataSet()}
#' @param jncCoords GRanges object with Whippet junctions. Generated by readWhippetJNCfiles()
#' @param type type of Whippet event (AA/AD/AF/AL).
#' Note only one event type should be processed at a time.
#' @return GRanges object with alternative junctions.
#' Each event should have a set of X (for which the psi measurement is reported) junctions,
#' and alternative Y junctions.
#' @export
#' @import GenomicRanges
#' @family whippet splicing isoform creation
#' @author Beth Signal
#' @examples
#' whippetFiles <- system.file("extdata","whippet/",
#' package = "GeneStructureTools")
#' wds <- readWhippetDataSet(whippetFiles)
#' wds <- filterWhippetEvents(wds)
#'
#' gtf <- rtracklayer::import(system.file("extdata","example_gtf.gtf",
#' package = "GeneStructureTools"))
#' exons <- gtf[gtf$type=="exon"]
#' transcripts <- gtf[gtf$type=="transcript"]
#' g <- BSgenome.Mmusculus.UCSC.mm10::BSgenome.Mmusculus.UCSC.mm10
#'
#' wds.altAce <- filterWhippetEvents(wds, eventTypes="AA")
#' jncPairs.altAce <- findJunctionPairs(wds.altAce, type="AA")
#'
#' wds.altDon <- filterWhippetEvents(wds, eventTypes="AD")
#' jncPairs.altDon <- findJunctionPairs(wds.altDon, type="AD")
#'
#' wds.altFirst <- filterWhippetEvents(wds, eventTypes="AF", psiDelta=0.2)
#' jncPairs.altFirst <- findJunctionPairs(wds.altFirst, type="AF")
#'
#' wds.altLast <- filterWhippetEvents(wds, eventTypes="AL", psiDelta=0.2)
#' jncPairs.altLast <- findJunctionPairs(wds.altLast, type="AL")
findJunctionPairs <- function(whippetDataSet, jncCoords, type=NA){
whippetDataSet <- filterWhippetEvents(whippetDataSet,
probability = 0,
psiDelta = 0,
eventTypes=type)
eventCoords <- coordinates(whippetDataSet)
jncCoords <- junctions(whippetDataSet)
eventCoords$type <- type
# search for alternatives to the left or the right?
eventCoords$search <- "right"
eventCoords$search[(eventCoords$type=="AA" &
as.logical(strand(eventCoords) == '+'))|
(eventCoords$type=="AD" &
as.logical(strand(eventCoords) == '-'))|
(eventCoords$type=="AF" &
as.logical(strand(eventCoords) == '-'))|
(eventCoords$type=="AL" &
as.logical(strand(eventCoords) == '+'))] <-
"left"
junctionSJA.right <- eventCoords[eventCoords$search=="right"]
junctionSJA.left <- eventCoords[eventCoords$search=="left"]
# right if AA&-
# left if AA&+
olA.from <- vector()
if(length(junctionSJA.right) > 0){
if(type %in% c("AA", "AD")){
start(junctionSJA.right) <- start(junctionSJA.right) -1
end(junctionSJA.right) <- start(junctionSJA.right)
}else{
start(junctionSJA.right) <- end(junctionSJA.right)
}
olA.right <- findOverlaps(junctionSJA.right, jncCoords, type="start")
olA.from <- append(olA.from,
as.character(junctionSJA.right$id[olA.right@from]))
junctionsA <- jncCoords[olA.right@to]
}
if(length(junctionSJA.left) > 0){
end(junctionSJA.left) <- start(junctionSJA.left)
olA.left <- findOverlaps(junctionSJA.left, jncCoords, type="end")
olA.from <- append(olA.from,
as.character(junctionSJA.left$id[olA.left@from]))
if(exists("junctionsA")){
junctionsA <- c(junctionsA, jncCoords[olA.left@to])
}else{
junctionsA <- jncCoords[olA.left@to]
}
}
junctionsA$whippet_id <- olA.from
junctionsA$search <- eventCoords$search[match(junctionsA$whippet_id,
eventCoords$id)]
junctionsA$set <- "A"
if(type %in% c("AA","AD")){
# junction B only required if AA/AD
junctionSJB.right <- eventCoords[eventCoords$search=="right"]
junctionSJB.left <- eventCoords[eventCoords$search=="left"]
# same for B junctions
olB.from <- vector()
if(length(junctionSJB.right) > 0){
start(junctionSJB.right) <- end(junctionSJB.right)
olB.right <- findOverlaps(junctionSJB.right, jncCoords,
type="start")
olB.from <- append(olB.from,
as.character(
junctionSJB.right$id[olB.right@from]))
junctionsB <- jncCoords[olB.right@to]
}
if(length(junctionSJB.left) > 0){
end(junctionSJB.left) <- end(junctionSJB.left) +1
start(junctionSJB.left) <- end(junctionSJB.left)
olB.left <- findOverlaps(junctionSJB.left, jncCoords, type="end")
olB.from <- append(olB.from,
as.character(junctionSJB.left$id[olB.left@from]))
if(exists("junctionsB")){
junctionsB <- c(junctionsB, jncCoords[olB.left@to])
}else{
junctionsB <- jncCoords[olB.left@to]
}
}
junctionsB$whippet_id <- olB.from
junctionsB$search <- eventCoords$search[match(junctionsB$whippet_id,
eventCoords$id)]
junctionsB$set <- "B"
junctions <- c(junctionsA, junctionsB)
}
if(type %in% c("AF","AL")){
junctionsA.left <- junctionsA[junctionsA$search=="left"]
junctionsA.right <- junctionsA[junctionsA$search=="right"]
if(length(junctionsA.left) > 0){
end(junctionsA.left) <- start(junctionsA.left)
olC.left <- findOverlaps(junctionsA.left, jncCoords, type="start")
junctionsC.left <- jncCoords[olC.left@to]
junctionsC.left$whippet_id <-
junctionsA.left$whippet_id[olC.left@from]
junctionsC.left$search <- junctionsA.left$search[olC.left@from]
ol <- findOverlaps(junctionsC.left, junctionsA, type="equal")
if(length(ol) > 0){
junctionsC.left <- junctionsC.left[-ol@from]
}
junctionsC <- junctionsC.left
}
if(length(junctionsA.right) > 0){
start(junctionsA.right) <- end(junctionsA.right)
olC.right <- findOverlaps(junctionsA.right, jncCoords, type="end")
junctionsC.right <- jncCoords[olC.right@to]
junctionsC.right$whippet_id <-
junctionsA.right$whippet_id[olC.right@from]
junctionsC.right$search <- junctionsA.right$search[olC.right@from]
ol <- findOverlaps(junctionsC.right, junctionsA, type="equal")
if(length(ol) > 0){
junctionsC.right <- junctionsC.right[-ol@from]
}
if(exists("junctionsC")){
junctionsC <- c(junctionsC, junctionsC.right)
}else{
junctionsC <- junctionsC.right
}
}
junctionsC$set <- "C"
junctions <- c(junctionsA, junctionsC)
}
keep <- which(width(junctions) > 2)
# replace junction codes
if(type %in% c("AA", "AD")){
junctions$set[which((junctions$set=="A" &
as.logical(strand(junctions) == "+")) |
(junctions$set=="B" &
as.logical(strand(junctions) == "-")))] <- "X"
junctions$set[which((junctions$set=="A" &
as.logical(strand(junctions) == "-")) |
(junctions$set=="B" &
as.logical(strand(junctions) == "+")))] <- "Y"
}
if(type %in% c("AF", "AL")){
junctions$set[which(junctions$set=="A")] <- "X"
junctions$set[which(junctions$set=="C")] <- "Y"
}
junctions <- junctions[keep]
return(junctions)
}
#' Find transcripts containing/overlapping junctions and replace them with alternative junctions
#'
#' @param whippetDataSet whippetDataSet generated from \code{readWhippetDataSet()}
#' @param junctionPairs GRanges object with alternative Whippet junctions.
#' Generated by findJunctionPairs()
#' @param exons GRanges object made from a GTF containing exon coordinates
#' @param type type of Whippet event (AA/AD/AF/AL).
#' Note only one event type should be processed at a time.
#' @return GRanges object with transcripts containing alternative junctions.
#' @export
#' @importFrom rtracklayer import
#' @import GenomicRanges
#' @family whippet splicing isoform creation
#' @author Beth Signal
#' @examples
#' whippetFiles <- system.file("extdata","whippet/",
#' package = "GeneStructureTools")
#' wds <- readWhippetDataSet(whippetFiles)
#' wds <- filterWhippetEvents(wds)
#'
#' gtf <- rtracklayer::import(system.file("extdata","example_gtf.gtf",
#' package = "GeneStructureTools"))
#' exons <- gtf[gtf$type=="exon"]
#' transcripts <- gtf[gtf$type=="transcript"]
#' g <- BSgenome.Mmusculus.UCSC.mm10::BSgenome.Mmusculus.UCSC.mm10
#'
#' wds.altAce <- filterWhippetEvents(wds, eventTypes="AA")
#' jncPairs.altAce <- findJunctionPairs(wds.altAce, type="AA")
#' transcripts.altAce <- replaceJunction(wds.altAce, jncPairs.altAce, exons, type="AA")
#'
#' wds.altDon <- filterWhippetEvents(wds, eventTypes="AD")
#' jncPairs.altDon <- findJunctionPairs(wds.altDon, type="AD")
#' transcripts.altDon <- replaceJunction(wds.altDon, jncPairs.altDon, exons, type="AD")
#'
#' wds.altFirst <- filterWhippetEvents(wds, eventTypes="AF", psiDelta=0.2)
#' jncPairs.altFirst <- findJunctionPairs(wds.altFirst, type="AF")
#' transcripts.altFirst <- replaceJunction(wds.altFirst, jncPairs.altFirst, exons, type="AF")
#'
#' wds.altLast <- filterWhippetEvents(wds, eventTypes="AL", psiDelta=0.2)
#' jncPairs.altLast <- findJunctionPairs(wds.altLast, type="AL")
#' transcripts.altLast <- replaceJunction(wds.altLast, jncPairs.altLast, exons, type="AL")
replaceJunction <- function(whippetDataSet, junctionPairs, exons, type=NA){
junctionPairs$type <- type
range <- junctionPairs
eventCoords <- coordinates(whippetDataSet)
eventCoords <- eventCoords[eventCoords$id %in% junctionPairs$whippet_id]
if(type %in% c("AA", "AD")){
## find exons that use/overlap the junction - at the side where it's alternative
end(range)[which(range$search=="right")] <-
start(range)[which(range$search=="right")]
start(range)[which(range$search=="left")] <-
end(range)[which(range$search=="left")]
ol.junction <- findOverlaps(range, exons)
ol.junction <- cbind(as.data.frame(ol.junction),
transcript_id=exons$transcript_id[ol.junction@to])
## table of transcripts overlapping the junction
# tid: transcript id
tidTable <- as.data.frame(table(ol.junction$queryHits,
ol.junction$transcript_id))
tidTable <- tidTable[tidTable$Freq > 0,]
colnames(tidTable)[1:2] <- c("from_index","to_transcript_id")
tids <- unique(tidTable$to_transcript_id)
#all combinations of transcripts + junctions
tidTable <- data.frame(from_index=rep(1:length(junctionPairs),
each=length(tids)),
to_transcript_id=rep(tids,
length(junctionPairs)),
Freq=1)
tidTable$junction_id <- range$id[tidTable$from_index]
## new transcript id:
## unique if different junctions are going to be used in same transcript base
tidTable$new_transcript_id <- paste0(tidTable$to_transcript_id,"+AS",
" ",tidTable$junction_id)
## all transcripts for structural altercations
gtfTranscripts <- exons[exons$transcript_id %in% tids]
mcols(gtfTranscripts) <-
mcols(gtfTranscripts)[,c('gene_id','transcript_id',
'transcript_type','exon_id',
'exon_number')]
m <- match(gtfTranscripts$transcript_id, tidTable$to_transcript_id)
# add new transcript id
gtfTranscripts$new_transcript_id <-
paste0(gtfTranscripts$transcript_id,"+AS ",
range$id[tidTable$from_index[m]])
gtfTranscripts$new_transcript_id_exnum <-
paste0(gtfTranscripts$new_transcript_id,"_",
as.numeric(gtfTranscripts$exon_number))
# duplicate core transcripts if needed
needsDuplicated <- which(!(tidTable$new_transcript_id %in%
gtfTranscripts$new_transcript_id))
if(length(needsDuplicated) > 0){
gtfTranscripts.add <- gtfTranscripts[
gtfTranscripts$transcript_id %in%
tidTable$to_transcript_id[needsDuplicated]]
}
while(length(needsDuplicated) > 0){
gtfTranscripts.add <- gtfTranscripts.add[
gtfTranscripts.add$transcript_id %in%
tidTable$to_transcript_id[needsDuplicated]]
m <- match(gtfTranscripts.add$transcript_id,
tidTable$to_transcript_id[needsDuplicated])
gtfTranscripts.add$new_transcript_id <-
paste0(gtfTranscripts.add$transcript_id,"+AS ",
tidTable$junction_id[needsDuplicated][m])
gtfTranscripts <- c(gtfTranscripts, gtfTranscripts.add)
needsDuplicated <- which(!(tidTable$new_transcript_id %in%
gtfTranscripts$new_transcript_id))
}
gtfTranscripts$from <- unlist(lapply(str_split(
gtfTranscripts$new_transcript_id, "AS "),"[[",2))
gtfTranscripts <- gtfTranscripts[order(gtfTranscripts$transcript_id,
start(gtfTranscripts))]
## alter exons hitting the junctions so they all break at the same place
# range is at the alt. points defined in eventCoords
range <- junctionPairs
start(range) <- min(start(junctionPairs))
end(range) <- max(start(junctionPairs))
ol.left <- as.data.frame(findOverlaps(range, gtfTranscripts))
ol.left$from_id <- range$id[ol.left$queryHits]
ol.left$to_id <- gtfTranscripts$from[ol.left$subjectHits]
ol.left <- ol.left[ol.left$from_id == ol.left$to_id,]
# fix the end of the left transcript exons
exons.left <- gtfTranscripts[ol.left$subjectHits]
keep <- which(start(exons.left) <
start(junctionPairs[ol.left$queryHits]))
end(exons.left)[keep] <- start(junctionPairs[ol.left$queryHits])[keep]
exons.left <- exons.left[keep]
# now the right side
range <- junctionPairs
end(range) <- max(end(junctionPairs))
start(range) <- min(end(junctionPairs))
ol.right <- as.data.frame(findOverlaps(range, gtfTranscripts))
ol.right$from_id <- range$id[ol.right$queryHits]
ol.right$to_id <- gtfTranscripts$from[ol.right$subjectHits]
ol.right <- ol.right[ol.right$from_id == ol.right$to_id,]
# fix the start of the right exons
exons.right <- gtfTranscripts[ol.right$subjectHits]
keep <- which(end(exons.right) > end(junctionPairs[ol.right$queryHits]))
start(exons.right)[keep] <- end(junctionPairs[ol.right$queryHits])[keep]
exons.right <- exons.right[keep]
m <- match(exons.left$new_transcript_id, exons.right$new_transcript_id)
exons.left <- exons.left[which(!is.na(m))]
exons.right <- exons.right[m[which(!is.na(m))]]
exons.glued <- exons.left
end(exons.glued) <- end(exons.right)
# replacement exon pairs
gtfTranscripts.replacement <- c(exons.left,exons.right)
# remove replaced exons from gtf
gtfTranscripts.altered <-
gtfTranscripts[gtfTranscripts$new_transcript_id %in%
gtfTranscripts.replacement$new_transcript_id]
ol <- as.data.frame(findOverlaps(exons.glued, gtfTranscripts.altered))
ol$from_id <- exons.glued$new_transcript_id[ol$queryHits]
ol$to_id <- gtfTranscripts.altered$new_transcript_id[ol$subjectHits]
ol <- ol[ol$from_id == ol$to_id,]
gtfTranscripts.altered <- gtfTranscripts.altered[-unique(ol$subjectHits)]
# add together
gtfTranscripts.altered <- c(gtfTranscripts.altered,
gtfTranscripts.replacement)
gtfTranscripts.altered <- gtfTranscripts.altered[order(
gtfTranscripts.altered$new_transcript_id,
start(gtfTranscripts.altered))]
gtfTranscripts.altered$set <-
range$set[match(gtfTranscripts.altered$from, range$id)]
gtfTranscripts.altered$whippet_id <- junctionPairs$whippet_id[
match(gtfTranscripts.altered$from, junctionPairs$id)]
gtfTranscripts.altered$transcript_id <-
paste0(gtfTranscripts.altered$transcript_id,
"+AS",type,gtfTranscripts.altered$set," ",
gtfTranscripts.altered$whippet_id)
gtfTranscripts.altered$set <- paste0(type, "_",
gtfTranscripts.altered$set)
}else if(type %in% c("AF", "AL")){
end(range)[which(range$search=="right")] <-
start(range)[which(range$search=="right")]
start(range)[which(range$search=="left")] <-
end(range)[which(range$search=="left")]
olFirstLast.left <- findOverlaps(range, exons, type="start")
olFirstLast.right <- findOverlaps(range, exons, type="end")
olFirstLast.left <-
cbind(as.data.frame(olFirstLast.left),
transcript_id=exons$transcript_id[olFirstLast.left@to])
olFirstLast.left <- olFirstLast.left[
which(range$search[olFirstLast.left$queryHits] == "left"),]
olFirstLast.right <-
cbind(as.data.frame(olFirstLast.right),
transcript_id=exons$transcript_id[olFirstLast.right@to])
olFirstLast.right <- olFirstLast.right[
which(range$search[olFirstLast.right$queryHits] == "right"),]
olFirstLast <- rbind(olFirstLast.left, olFirstLast.right)
olFirstLast$search <- range$search[olFirstLast$queryHits]
exonsFirstLast <- exons[olFirstLast$subjectHits]
exonsFirstLast$set <- range$set[olFirstLast$queryHits]
exonsFirstLast$search <- range$search[olFirstLast$queryHits]
exonsFirstLast$junction_id <- range$id[olFirstLast$queryHits]
newId.left <- paste0(seqnames(exonsFirstLast),":",
start(junctionPairs)[olFirstLast$queryHits],"-",
end(junctionPairs)[olFirstLast$queryHits],"+",
end(exonsFirstLast))
newId.right <- paste0(seqnames(exonsFirstLast),":",
start(junctionPairs)[olFirstLast$queryHits],"-",
end(junctionPairs)[olFirstLast$queryHits],"+",
start(exonsFirstLast))
exonsFirstLast$new_id <- NA
exonsFirstLast$new_id[which(exonsFirstLast$search=="left")] <-
newId.left[which(exonsFirstLast$search=="left")]
exonsFirstLast$new_id[which(exonsFirstLast$search=="right")] <-
newId.right[which(exonsFirstLast$search=="right")]
m <- match(exonsFirstLast$junction_id, junctionPairs$id)
junctionPairs <- junctionPairs[m]
range <- junctionPairs
range$id <- exonsFirstLast$new_id
end(range)[which(range$search=="left")] <-
start(range)[which(range$search=="left")]
start(range)[which(range$search=="right")] <-
end(range)[which(range$search=="right")]
ol.junction <- findOverlaps(range, exons)
ol.junction <- cbind(as.data.frame(ol.junction),
transcript_id=exons$transcript_id[ol.junction@to])
## table of transcripts overlapping the junction
# tid: transcript id
tidTable <- as.data.frame(table(ol.junction$queryHits,
ol.junction$transcript_id))
tidTable <- tidTable[tidTable$Freq > 0,]
colnames(tidTable)[1:2] <- c("from_index","to_transcript_id")
tids <- unique(tidTable$to_transcript_id)
tidTable$junction_id <- range$id[tidTable$from_index]
## new transcript id --
## unique if different junctions are going to be used in same transcript base
tidTable$new_transcript_id <- paste0(tidTable$to_transcript_id,"+AS ",
tidTable$junction_id)
## all transcripts for structural altercations
gtfTranscripts <- exons[exons$transcript_id %in% tids]
mcols(gtfTranscripts) <-
mcols(gtfTranscripts)[,c('gene_id','transcript_id',
'transcript_type','exon_id',
'exon_number')]
m <- match(gtfTranscripts$transcript_id, tidTable$to_transcript_id)
# add new transcript id
gtfTranscripts$new_transcript_id <-
paste0(gtfTranscripts$transcript_id,"+AS ",
range$id[tidTable$from_index[m]])
gtfTranscripts$new_transcript_id_exnum <-
paste0(gtfTranscripts$new_transcript_id,
"_",
as.numeric(gtfTranscripts$exon_number))
# duplicate core transcripts if needed
needsDuplicated <- which(!(tidTable$new_transcript_id %in%
gtfTranscripts$new_transcript_id))
if(length(needsDuplicated) > 0){
gtfTranscripts.add <-
gtfTranscripts[gtfTranscripts$transcript_id %in%
tidTable$to_transcript_id[needsDuplicated]]
}
while(length(needsDuplicated) > 0){
gtfTranscripts.add <- gtfTranscripts.add[
gtfTranscripts.add$transcript_id %in%
tidTable$to_transcript_id[needsDuplicated]]
m <- match(gtfTranscripts.add$transcript_id,
tidTable$to_transcript_id[needsDuplicated])
gtfTranscripts.add$new_transcript_id <-
paste0(gtfTranscripts.add$transcript_id,"+AS ",
tidTable$junction_id[needsDuplicated][m])
gtfTranscripts <- c(gtfTranscripts, gtfTranscripts.add)
needsDuplicated <- which(!(tidTable$new_transcript_id %in%
gtfTranscripts$new_transcript_id))
}
gtfTranscripts$from <- unlist(lapply(str_split(
gtfTranscripts$new_transcript_id, "AS "),"[[",2))
gtfTranscripts <- gtfTranscripts[order(gtfTranscripts$transcript_id,
start(gtfTranscripts))]
gtfTranscripts <- gtfTranscripts[gtfTranscripts$transcript_id %in%
exonsFirstLast$transcript_id]
gtfTranscripts$new_transcript_id_exnum <-
paste0(gtfTranscripts$new_transcript_id, "_",
as.numeric(gtfTranscripts$exon_number))
range <- junctionPairs
range$id <- exonsFirstLast$new_id
## find exons that use/overlap the junction - at the side where it's alternative
end(range)[which(range$search=="left")] <-
start(range)[which(range$search=="left")]
start(range)[which(range$search=="right")] <-
end(range)[which(range$search=="right")]
### Same used junction replacement
ol.left <- as.data.frame(findOverlaps(range, gtfTranscripts))
ol.left$from_id <- range$id[ol.left$queryHits]
ol.left$to_id <- gtfTranscripts$from[ol.left$subjectHits]
ol.left <- ol.left[ol.left$from_id == ol.left$to_id,]
ol.left <- ol.left[which(range$search[ol.left$queryHits] == "left"),]
# fix the end of the left transcript exons
exons.left <- gtfTranscripts[ol.left$subjectHits]
end(exons.left) <- end(range[ol.left$queryHits])
ol.right <- as.data.frame(findOverlaps(range, gtfTranscripts))
ol.right$from_id <- range$id[ol.right$queryHits]
ol.right$to_id <- gtfTranscripts$from[ol.right$subjectHits]
ol.right <- ol.right[ol.right$from_id == ol.right$to_id,]
ol.right <-
ol.right[which(range$search[ol.right$queryHits] == "right"),]
# fix the end of the right transcript exons
exons.right <- gtfTranscripts[ol.right$subjectHits]
start(exons.right) <- start(range[ol.right$queryHits])
junctionReplacementExons <- c(exons.left, exons.right)
junctionReplacementExons$set <-
range$set[match(junctionReplacementExons$from, range$id)]
keep <- which(gtfTranscripts$new_transcript_id %in%
junctionReplacementExons$new_transcript_id)
gtfTranscripts.altered <- gtfTranscripts[keep]
gtfTranscripts.altered$set <-
range$set[match(gtfTranscripts.altered$from, range$id)]
### First/last exon replacement
m <- match(junctionReplacementExons$from, exonsFirstLast$new_id)
replacementExonsFirstLast <- junctionReplacementExons
ranges(replacementExonsFirstLast) <- ranges(exonsFirstLast[m])
# remove anything after first/last
if(type=="AF"){
back <- 0
n <- which(gtfTranscripts.altered$new_transcript_id_exnum %in%
junctionReplacementExons$new_transcript_id_exnum)
ids <- gtfTranscripts.altered$new_transcript_id[n]
exonNumbers <- as.numeric(gtfTranscripts.altered$exon_number[n])
altTidExNum <- paste0(ids, "_", exonNumbers-back)
m <- match(altTidExNum,
gtfTranscripts.altered$new_transcript_id_exnum)
m <- m[!is.na(m)]
while(length(m) > 0){
gtfTranscripts.altered <- gtfTranscripts.altered[-m]
back <- back + 1
altTidExNum <- paste0(ids, "_", exonNumbers-back)
m <- match(altTidExNum,
gtfTranscripts.altered$new_transcript_id_exnum)
m <- m[!is.na(m)]
}
}
if(type=="AL"){
fwd <- 0
n <- which(gtfTranscripts.altered$new_transcript_id_exnum %in%
junctionReplacementExons$new_transcript_id_exnum)
ids <- gtfTranscripts.altered$new_transcript_id[n]
exonNumbers <- as.numeric(gtfTranscripts.altered$exon_number[n])
altTidExNum <- paste0(ids, "_", exonNumbers+fwd)
m <- match(altTidExNum,
gtfTranscripts.altered$new_transcript_id_exnum)
m <- m[!is.na(m)]
while(length(m) > 0){
gtfTranscripts.altered <- gtfTranscripts.altered[-m]
fwd <- fwd + 1
altTidExNum <- paste0(ids, "_", exonNumbers+fwd)
m <- match(altTidExNum,
gtfTranscripts.altered$new_transcript_id_exnum)
m <- m[!is.na(m)]
}
}
# remove overlapping exons
longRange <- replacementExonsFirstLast
rangeDF <- data.frame(start_1 = start(replacementExonsFirstLast),
start_2 = start(junctionReplacementExons),
end_1 = end(replacementExonsFirstLast),
end_2 = end(junctionReplacementExons))
start(longRange) <- apply(rangeDF[,1:2], 1, min)
end(longRange) <- apply(rangeDF[,3:4], 1, max)
ol <- as.data.frame(findOverlaps(longRange, gtfTranscripts.altered))
ol$from_id <- longRange$new_transcript_id[ol$queryHits]
ol$to_id <- gtfTranscripts.altered$new_transcript_id[ol$subjectHits]
ol <- ol[which(ol$from_id == ol$to_id),]
if(dim(ol)[1] > 0){
gtfTranscripts.altered <-
(gtfTranscripts.altered[-unique(ol$subjectHits)])
}
gtfTranscripts.altered <- c(gtfTranscripts.altered,
junctionReplacementExons,
replacementExonsFirstLast)
#redo exon numbering
gtfTranscripts.altered <- gtfTranscripts.altered[
order(gtfTranscripts.altered$new_transcript_id,
start(gtfTranscripts.altered))]
tab <- as.data.frame(table(gtfTranscripts.altered$new_transcript_id))
tab$strand <- as.character(strand(gtfTranscripts.altered[
match(tab$Var1,gtfTranscripts.altered$new_transcript_id)]))
gtfTranscripts.altered$exon_number <-
unlist(apply(tab, 1, function(x)
if(x[3] == "-"){c(x[2]:1)}else{c(1:x[2])}))
gtfTranscripts.altered <- gtfTranscripts.altered[
order(gtfTranscripts.altered$new_transcript_id,
start(gtfTranscripts.altered))]
gtfTranscripts.altered$set <- range$set[
match(gtfTranscripts.altered$from, range$id)]
mcols(gtfTranscripts.altered) <- mcols(gtfTranscripts.altered)[
,match(c('gene_id','transcript_id',
'transcript_type','exon_id',
'exon_number','from','set'),
colnames(mcols(gtfTranscripts.altered)))]
colnames(mcols(gtfTranscripts.altered))[6] <- "new_event_id"
gtfTranscripts.altered$whippet_id <- range$whippet_id[
match(gtfTranscripts.altered$new_event_id, range$id)]
gtfTranscripts.altered$transcript_id <- paste0(
gtfTranscripts.altered$transcript_id,
"+AS",type,gtfTranscripts.altered$set," ",
gtfTranscripts.altered$whippet_id)
gtfTranscripts.altered$set <- paste0(type, "_",
gtfTranscripts.altered$set)
}
mcols(gtfTranscripts.altered) <-
mcols(gtfTranscripts.altered)[,c('gene_id','transcript_id',
'transcript_type','exon_id',
'exon_number',
'set','whippet_id')]
gtfTranscripts.altered <- removeDuplicateTranscripts(gtfTranscripts.altered)
return(gtfTranscripts.altered)
}
#' Remove transcript duplicates
#'
#' Removes Structural duplicates of transcripts in a GRanges object
#' Note that duplicates must have different transcript ids.
#' @param transcripts GRanges object with transcript structures in exon form
#' @return GRanges object with unique transcript structures in exon form
#' @export
#' @import GenomicRanges
#' @importFrom rtracklayer import
#' @family gtf manipulation
#' @author Beth Signal
#' @examples
#' gtf <- rtracklayer::import(system.file("extdata","example_gtf.gtf",
#' package = "GeneStructureTools"))
#' exons <- gtf[gtf$type=="exon"]
#' exons.altName <- exons
#' exons.altName$transcript_id <- paste(exons.altName$transcript_id, "duplicated", sep="_")
#' exons.duplicated <- c(exons, exons.altName)
#' length(exons.duplicated)
#' exons.deduplicated <- removeDuplicateTranscripts(exons.duplicated)
#' length(exons.deduplicated)
removeDuplicateTranscripts <- function(transcripts){
transcriptDF <- as.data.frame(transcripts)
transcriptDF$startend <- with(transcriptDF, paste0(start,"-",end))
transcriptRangePaste <- aggregate(startend ~ transcript_id, transcriptDF,
function(x) paste0(x, collapse="+"))
keep <- transcriptRangePaste$transcript_id[
which(!duplicated(transcriptRangePaste$startend))]
transcriptsFiltered <- transcripts[transcripts$transcript_id %in% keep]
return(transcriptsFiltered)
}
|
94773516ee1152ede5f6e83861924dacbcdc980e
|
2fa2bdd966ff1f5cf1704a4971b13b9a55805d24
|
/CAPM.R
|
46a2831cb08351b03fbda2f9a605a6754045895f
|
[] |
no_license
|
davidht296/CAPM
|
12787c9b435d6b214f1ae6ff929c193551bbc821
|
cd6c2eaf80bda3ec6a182b285549dff86f6c3c52
|
refs/heads/master
| 2022-12-03T07:06:31.748464
| 2020-08-25T04:54:55
| 2020-08-25T04:54:55
| 290,115,914
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,953
|
r
|
CAPM.R
|
# File: CAPM Estimation - ASX (5yr, Monthly)
# Author: David Harris
# Date: 15-04-2020
# INSTALL AND LOAD PACKAGES ################################
# Installs pacman ("package manager") if needed
if (!require("pacman")) install.packages("pacman")
# Use pacman to load add-on packages as desired
pacman::p_load(pacman, aplot, boot, car, caret, expss, GGally,
ggthemes, ggvis, httr, huxtable, jtools, lars,
lmtest, lubridate,
MASS, olsrr, PerformanceAnalytics, plotly, plyr,
psych, quantmod, remotes, rio, rmarkdown, sandwich, shiny, sur,
tidyquant, tidyverse, xts)
# LOAD t-TEST FUNCTION #####################################
ttest <- function(reg, coefnum, val){
co <- coef(summary(reg))
tstat <- (co[coefnum,1]-val)/co[coefnum,2]
2 * pt(abs(tstat), reg$df.residual, lower.tail = FALSE)
}
# Set Working Directory ####################################
getwd()
setwd("C:/Users/dtunks/OneDrive - KPMG/Desktop/PD & Personal/R data analysis/CAPM") #*remember to change \ to /
# ASSEST LIST ##############################################
asx200 <- read.csv("https://www.asx200list.com/uploads/csv/20200401-asx200.csv", header = TRUE) ## from ASX200 list - https://www.asx200list.com/
n <- nrow(asx200)
asx200 <- asx200[c(2:n), 1:4]
asx.colnames <- c("Code", "Company", "Sector", "Market Cap")
names(asx200) <- asx.colnames
ticker <- as.character(asx200[,1])
row.names(asx200) <- ticker
# RISK-FREE RATE DATA ######################################
Rf <- import("f2.1-data.csv") ## from RBA - https://www.rba.gov.au/statistics/tables/#interest-rates
## need to manually format dates to YYYY-MM-DD in Excel
n <- nrow(Rf)
Rf <- Rf[c(12:n), c(1, 4)]
Rf <- Rf[!apply(Rf == "", 1, all),]
Rf$V1 <- as.Date(Rf$V1)
Rf$V4 <- as.numeric(Rf$V4)
Rf$V4 <- 100*((1+(Rf$V4/100))^(1/12)-1)
Rf <- xts(Rf$V4, order.by = Rf$V1)
names(Rf) <- c("Rf")
# BENCHMARK DATA ###########################################
Rb <- read.csv("https://query1.finance.yahoo.com/v7/finance/download/VAS.AX?period1=1430092800&period2=1587945600&interval=1mo&events=history") ## from Yahoo
n <- nrow(Rb)
Rb <- Rb[c(1:n-1), c(1,6)]
Rb$Date <- as.Date(Rb[, 1])
Rb <- xts(Rb$`Adj.Close`, order.by = Rb$Date)
names(Rb) <- c("Rb")
Rb$Rb <- Return.calculate(Rb$Rb, method = "log")
# GET ASSET DATA ###########################################
url_f <- "https://query1.finance.yahoo.com/v7/finance/download/"
url_e <- ".AX?period1=1430092800&period2=1587945600&interval=1mo&events=history"
n <- nrow(asx200)
data <- merge(Rf, Rb)
for(i in 1:n){
url_temp_ch <- as.character(asx200[i,1])
url_temp <- paste(url_f, url_temp_ch, url_e, sep = "")
Ra_temp <- read.csv(url_temp)
n_temp <- nrow(Ra_temp)
Ra_temp <- Ra_temp[c(1:n_temp-1), c(1,6)]
Ra_temp$Date <- as.Date(Ra_temp[, 1])
Ra_temp <- xts(Ra_temp$`Adj.Close`, order.by = Ra_temp$Date)
header <- as.character(asx200[i,1])
header <- paste(header, ".Ra", sep = "")
names(Ra_temp) <- header
Ra_temp[, 1] <- Return.calculate(Ra_temp[, 1], method = "log")
data <- merge(data, Ra_temp)
}
# TRIM DATA ################################################
data <- data[complete.cases(data[, c(1, 2)]),]
# GENERATE CAPM VARIABLES ##################################
n <- ncol(data)
capm.data <- as.xts(data$Rb-data$Rf)
names(capm.data) <- "mrp"
for(i in 3:n){
Ra.Er_temp <- as.xts(data[, i]-data$Rf)
header <- as.character(asx200[i-2,1])
header <- paste(header, ".Er", sep = "")
names(Ra.Er_temp) <- header
capm.data <- merge(capm.data, Ra.Er_temp)
}
# CALCULATE PARAMETERS #####################################
n <- ncol(capm.data)
capm.para <- data.frame()
for(i in 2:n){
try(
capm <- lm(capm.data[, i] ~ capm.data$mrp)
, silent = T)
para.temp <- data.frame(rep(0, 4))
try(para.temp <- capm$coefficients, silent = T)
para.temp <- as.data.frame(para.temp)
para.temp <- as.data.frame(transpose(para.temp))
try(para.temp[1, 3] <- ttest(capm, 1, 0), silent = T)
try(para.temp[1, 4] <- ttest(capm, 2, 1), silent = T)
names(para.temp) <- c("alpha", "beta", "alpha(0) ~ Pr(>|t|)", "beta(1) ~ Pr(>|t|)")
row.names(para.temp) <- as.character(asx200[i-1,1])
capm.para[i-1, 1:4] <- para.temp[1, 1:4]
try(rm(capm), silent = T)
rm(para.temp)
}
asx200 <- merge(asx200, capm.para, by = 0, all.x = TRUE, all.y = TRUE)
n <- ncol(asx200)
asx200 <- asx200[, 2:n]
write.table(asx200, file = "ASX200_CAPM.csv", row.names = F, sep = ",")
# FURTHER IDEAS ############################################
CUSUM Test on alpha and beta to see structural breaks?
Run on All Ords companies 300-500??
# CLEAN UP #################################################
# Clear environment
rm(list = ls())
# Clear packages
p_unload(all) # Remove all add-ons
detach("package:datasets", unload = TRUE) # For base
# Clear plots
dev.off() # But only if there IS a plot
# Clear console
cat("\014") # ctrl+L
# Clear mind :)
|
bd4f6e485a4826190643459260c40d1aff8e238a
|
9a59a94c2752f411d95924b070a7f1e848ed18f9
|
/R/RecipeSteps.R
|
87bbb29743dbc6746b303a33b21a3f920efd8679
|
[
"MIT"
] |
permissive
|
TatsumotoH/addstepr
|
1065710352f2cfc8299793c39b247bf6f09b89d3
|
701dea4908e689b99b84c13d73b68fa0b0bce309
|
refs/heads/master
| 2023-07-01T01:22:54.692736
| 2021-08-01T08:15:31
| 2021-08-01T08:15:31
| 376,506,764
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,923
|
r
|
RecipeSteps.R
|
# add new step functions for recipes and tunes
# step_dbscan_fpc ----
#
step_dbscan_fpc_new <- function(terms, role, trained, skip, id, eps, minPts, retain, model, data) {
step(
subclass = "dbscan_fpc",
terms = terms,
role = role,
trained = trained,
skip = skip,
id = id,
eps = eps,
minPts = minPts,
retain = retain,
model = model,
data = data
)
}
#' step_dbscan: a recipe step for clustering by dbscan.
#' fpcパッケージのdbscan関数を用いたstep関数
#'
#' @param role For model terms created by this step, what analysis role should
#' they be assigned?
#' @param trained A logical to indicate if the quantities for preprocessing
#' have been estimated.
#' @param skip A logical. Should the step be skipped when the recipe is baked
#' by [recipes::bake.recipe()]?
#' @param eps The parameter eps defines radius of neighborhood around a point x.
#' @param minPts The parameter MinPts is the minimum number of neighbors within “eps” radius.
#' @param retain Alogical to specify whether the original predictors should be retained along with the new embedding
#' variables.
#' @param id A character string that is unique to this step to identify it.
#' @export
step_dbscan_fpc = function(recipe, ..., role = "predictor", trained = FALSE, skip = FALSE,
eps = NULL, minPts = NULL, retain = FALSE, id = rand_id("dbscan_fpc")) {
if (is.null(eps)) stop("eps value is not defined")
add_step(recipe,
step_dbscan_fpc_new(terms = recipes::ellipse_check(...),
role = role,
trained = trained,
skip = skip,
id = id,
eps = eps,
minPts = minPts,
retain = retain,
model = NULL,
data = NULL
)
)
}
#' prep for step_dbscan_fpc
#' @export
prep.step_dbscan_fpc = function(x, training, info = NULL, ...) {
dat = training[, recipes::terms_select(x$terms, info = info), drop = FALSE]
if (sum(is.na(dat)) > 0) {
warning("Missing values were present")
dat <- na.omit(dat)
if (nrow(dat) == 0) stop("No rows remain in dataset after missing values rows omitted")
}
if (ncol(dat) == 0) stop("Clusters not created as no numeric columns were found")
if (is.null(x$minPts)) {
minPts <- ncol(dat) + 1
} else {
minPts <- x$minPts
}
mod = fpc::dbscan(data = dat, eps = x$eps, MinPts = minPts, scale = FALSE, method = "hybrid", seeds = TRUE)
if (is.null(mod$isseed)) {
stop(paste("No clusters can be detected using MinPts = ", minPts, "and eps = ", x$eps))
}
step_dbscan_fpc_new(terms = x$terms,
role = x$role,
trained = TRUE,
skip = x$skip,
id = x$id,
eps = x$eps,
MinPts = x$minPts,
retain = x$retain,
model = mod,
data = dat)
}
#' bake(juice) for step_dbscan_fpc
#' @export
bake.step_dbscan_fpc = function(object, new_data, ...) {
new_data_predictors = new_data[,names(object$data)]
clus <- predict(object$model, data = object$data, newdata = new_data_predictors)
new_data <- cbind(new_data, cluster = as.factor(paste0("C",clus)))
if (!object$retain) {
new_data[, names(object$data)] = NULL
}
as_tibble(new_data)
}
#' print for step_dbscan
#' @export
print.step_dbscan_fpc = function(x, width = max(20, options()$width - 30), ...) {
if (x$trained) {
cat(paste0("dbscan step found ", length(unique(x$mod$cluster)) - 1," clusters using "))
cat(recipes::format_selectors(x$terms, width = width))
cat(" [trained]\n")
} else {
cat("dbscan step for ")
cat(recipes::format_selectors(x$terms, width = width))
cat("\n")
}
invisible(x)
}
#' tidy for step_dbscan_fpc
#' @export
tidy.step_dbscan_fpc = function(x, ...) {
res <- recipes:::simple_terms(x, ...)
res$id <- x$id
res
}
# step_dbscan_fをtunableにするためのコード ここから
# 参考
# https://qiita.com/takechanman1228/items/c7f23873c087630bab18
#パラメータepsにレンジを与える関数
#' @export
eps <- function(range = c(0.1, 3), trans = NULL) {
new_quant_param(
type = "double",
range = range,
inclusive = c(TRUE, TRUE),
trans = trans,
label = c(eps = "eps"),
finalize = NULL
)
}
#パラメータMinPtsにレンジを与える関数
#' @export
minPts <- function(range = c(1L, 20L), trans = NULL) {
new_quant_param(
type = "integer",
range = range,
inclusive = c(TRUE, TRUE),
trans = trans,
label = c(minPts = "minPts"),
finalize = NULL #データ確定時(=finalize)に呼び出されるhook関数.データ依存のパラメータレンジ設定に使用
)
}
#tunable関数にstep_dbscan関数を登録する
#' @export
tunable.step_dbscan_fpc = function(x, ...) {
tibble::tibble(
name = c("eps", "minPts"),
call_info = list(
list(pkg = "addstepr", fun = "eps", range=c(0.1,3)), ##一番はじめのnameのパラメータepsに対応するパラメータ範囲
list(pkg = "addstepr", fun = "minPts", range=c(1,20)) ##一番はじめのnameのパラメータMinPtsに対応するパラメータ範囲
),
source = "recipe", #recipe or model_spec
component = "step_dbscan_fpc",
component_id = x$id
)
}
# step_dbscanをtunableにするためのコード ここまで
# step_dbscan ----
# originalのdbscanアルゴリズムによるdbscanの実装
# dbscanパッケージのdbscan関数を利用
#
step_dbscan_new = function(terms, role, trained, skip, id, eps, minPts, retain, model, data) {
step(
subclass = "dbscan",
terms = terms,
role = role,
trained = trained,
skip = skip,
id = id,
eps = eps,
minPts = minPts,
retain = retain,
model = model,
data = data
)
}
#' step_dbscan: a recipe step for clustering by dbscan (original algorithm).
#' dbscanパッケージのdbscan関数を利用
#' @param role For model terms created by this step, what analysis role should
#' they be assigned?
#' @param trained A logical to indicate if the quantities for preprocessing
#' have been estimated.
#' @param skip A logical. Should the step be skipped when the recipe is baked
#' by [recipes::bake.recipe()]?
#' @param eps The parameter eps defines radius of neighborhood around a point x.
#' @param minPts The parameter minPts is the minimum number of neighbors within “eps” radius.
#' @param retain Alogical to specify whether the original predictors should be retained along with the new embedding
#' variables.
#' @param id A character string that is unique to this step to identify it.
#' @export
step_dbscan = function(recipe, ..., role = "predictor", trained = FALSE, skip = FALSE,
eps = NULL, minPts = NULL, retain = FALSE, id = rand_id("dbscan")) {
if (is.null(eps)) stop("eps value is not defined")
add_step(recipe,
step_dbscan_new(terms = recipes::ellipse_check(...),
role = role,
trained = trained,
skip = skip,
id = id,
eps = eps,
minPts = minPts,
retain = retain,
model = NULL,
data = NULL
)
)
}
#' prep for step_dbscan
#' @export
prep.step_dbscan = function(x, training, info = NULL, ...) {
dat = training[, recipes::terms_select(x$terms, info = info), drop = FALSE]
if (sum(is.na(dat)) > 0) {
warning("Missing values were present")
dat <- na.omit(dat)
if (nrow(dat) == 0) stop("No rows remain in dataset after missing values rows omitted")
}
if (ncol(dat) == 0) stop("Clusters not created as no numeric columns were found")
if (is.null(x$minPts)) {
minPts <- ncol(dat) + 1
} else {
minPts <- x$minPts
}
mod = dbscan::dbscan(x = dat, eps = x$eps, minPts = minPts, weights = NULL, borderPoints = TRUE)
if (0 == sum(mod$cluster)) {
stop(paste("No clusters can be detected using minPts = ", minPts, "and eps = ", x$eps))
}
step_dbscan_new(terms = x$terms,
role = x$role,
trained = TRUE,
skip = x$skip,
id = x$id,
eps = x$eps,
minPts = x$minPts,
retain = x$retain,
model = mod,
data = dat)
}
#' bake(juice) for step_dbscan
#' @export
bake.step_dbscan = function(object, new_data, ...) {
new_data_predictors = new_data[,names(object$data)]
clus <- predict(object$model, data = object$data, newdata = new_data_predictors)
new_data <- cbind(new_data, cluster = as.factor(paste0("C",clus)))
if (!object$retain) {
new_data[, names(object$data)] <- NULL
}
as_tibble(new_data)
}
#' print for step_dbscan
#' @export
print.step_dbscan = function(x, width = max(20, options()$width - 30), ...) {
if (x$trained) {
cat(paste0("dbscan step found ", length(unique(x$mod$cluster)) - 1," clusters using "))
cat(recipes::format_selectors(x$terms, width = width))
cat(" [trained]\n")
} else {
cat("dbscan step for ")
cat(recipes::format_selectors(x$terms, width = width))
cat("\n")
}
invisible(x)
}
#' print for step_dbscan
#' @export
tidy.step_dbscan = function(x, ...) {
res <- recipes:::simple_terms(x, ...)
res$id <- x$id
res
}
# step_dbscanのパラメータをtunableにする関数
#' パラメータepsにレンジを与える関数
#' @export
#' eps2 = function(range = c(0.1, 3), trans = NULL) {
#' new_quant_param(
#' type = "double",
#' range = range,
#' inclusive = c(TRUE, TRUE),
#' trans = trans,
#' label = c(eps2 = "eps"),
#' finalize = NULL
#' )
#' }
#'
#' #' パラメータminPtsにレンジを与える関数
#' #' @export
#' minPts2 = function(range = c(1L, 20L), trans = NULL) {
#' new_quant_param(
#' type = "integer",
#' range = range,
#' inclusive = c(TRUE, TRUE),
#' trans = trans,
#' label = c(minPts2 = "minPts"),
#' finalize = NULL #データ確定時(=finalize)に呼び出されるhook関数.データ依存のパラメータレンジ設定に使用
#' )
#' }
#' tunable関数にstep_dbscan関数を登録するを
#' @export
tunable.step_dbscan = function(x, ...) {
tibble::tibble(
name = c("eps", "minPts"),
call_info = list(
list(pkg = "addstepr", fun = "eps", range=c(0.1,3)), ##一番はじめのnameのパラメータepsに対応するパラメータ範囲
list(pkg = "addstepr", fun = "minPts", range=c(1,20)) ##一番はじめのnameのパラメータminPtsに対応するパラメータ範囲
),
source = "recipe", #recipe or model_spec
component = "step_dbscan",
component_id = x$id
)
}
# -----ここまで step_dbscan
# step_kmeans ----
#
step_kmeans_new = function(terms, role, trained, skip, id, num_k, retain, model, data) {
step(
subclass = "kmeans",
terms = terms,
role = role,
trained = trained,
skip = skip,
id = id,
num_k = num_k,
retain = retain,
model = model,
data = data
)
}
#' step_kmeans: a recipe step for clustering by kmeans.
#'
#' @param role For model terms created by this step, what analysis role should
#' they be assigned?
#' @param trained A logical to indicate if the quantities for preprocessing
#' have been estimated.
#' @param skip A logical. Should the step be skipped when the recipe is baked
#' by [recipes::bake.recipe()]?
#' @param num_k The parameter num_k is passed to the parameter centers in kmeans function.
#' @param retain A logical to specify whether the original predictors should be retained along with the new embedding
#' variables.
#' @param id A character string that is unique to this step to identify it.
#' @export
step_kmeans = function(recipe, ..., role = "predictor", trained = FALSE, skip = FALSE,
num_k = NULL, retain = FALSE, id = rand_id("kmeans")) {
if (is.null(num_k)) stop("num_k value is not defined")
add_step(recipe,
step_kmeans_new(terms = recipes::ellipse_check(...),
role = role,
trained = trained,
skip = skip,
id = id,
num_k = num_k,
retain = retain,
model = NULL,
data = NULL
)
)
}
#' prep for step_dkmeans
#' @export
prep.step_kmeans = function(x, training, info = NULL, ...) {
dat = training[, recipes::terms_select(x$terms, info = info), drop = FALSE]
if (sum(is.na(dat)) > 0) {
warning("Missing values were present")
dat <- na.omit(dat)
if (nrow(dat) == 0) stop("No rows remain in dataset after missing values rows omitted")
}
if (ncol(dat) == 0) stop("Clusters not created as no numeric columns were found")
if (is.null(x$num_k)) {
num_k <- ncol(dat) - 1
} else {
num_k <- x$num_k
}
mod = kmeans(x = dat, centers = x$num_k)
if (0 == sum(mod$cluster)) {
stop(paste("No clusters can be detected using minPts = ", minPts, "and eps = ", x$eps))
}
step_kmeans_new(terms = x$terms,
role = x$role,
trained = TRUE,
skip = x$skip,
id = x$id,
num_k = x$num_k,
retain = x$retain,
model = mod,
data = dat)
}
#' bake for kmeans
#' @export
bake.step_kmeans = function(object, new_data, ...) {
# predict function for stats::kmeans
predict.kmeans <- function(object,
newdata,
method = c("centers", "classes")) {
method <- match.arg(method)
centers <- object$centers
ss_by_center <- apply(centers, 1, function(x) {
colSums((t(newdata) - x) ^ 2)
})
best_clusters <- apply(ss_by_center, 1, which.min)
if (method == "centers") {
centers[best_clusters, ]
} else {
best_clusters
}
}
new_data_predictors = new_data[,names(object$data)]
clus <- predict.kmeans(object$model, newdata = new_data_predictors, method = "classes")
new_data <- cbind(new_data, cluster = as.factor(paste0("C",clus)))
if (!object$retain) {
new_data[, names(object$data)] <- NULL
}
as_tibble(new_data)
}
#' print for step_kmeans
#' @export
print.step_kmeans = function(x, width = max(20, options()$width - 30), ...) {
if (x$trained) {
cat(paste0("kmeans step found ", length(unique(x$mod$cluster)) - 1," clusters using "))
cat(recipes::format_selectors(x$terms, width = width))
cat(" [trained]\n")
} else {
cat("kmeans step for ")
cat(recipes::format_selectors(x$terms, width = width))
cat("\n")
}
invisible(x)
}
#' print for step_kmeans
#' @export
tidy.step_kmeans = function(x, ...) {
res <- recipes:::simple_terms(x, ...)
res$id <- x$id
res
}
# tunable function for step_kmeans
#' パラメータepsにレンジを与える関数
#' @export
#' eps2 = function(range = c(0.1, 3), trans = NULL) {
#' new_quant_param(
#' type = "double",
#' range = range,
#' inclusive = c(TRUE, TRUE),
#' trans = trans,
#' label = c(eps2 = "eps"),
#' finalize = NULL
#' )
#' }
#'
#' #' パラメータminPtsにレンジを与える関数
#' #' @export
#' minPts2 = function(range = c(1L, 20L), trans = NULL) {
#' new_quant_param(
#' type = "integer",
#' range = range,
#' inclusive = c(TRUE, TRUE),
#' trans = trans,
#' label = c(minPts2 = "minPts"),
#' finalize = NULL #データ確定時(=finalize)に呼び出されるhook関数.データ依存のパラメータレンジ設定に使用
#' )
#' }
#パラメータMinPtsにレンジを与える関数
#' @export
num_k <- function(range = c(1L, 10L), trans = NULL) {
new_quant_param(
type = "integer",
range = range,
inclusive = c(TRUE, TRUE),
trans = trans,
label = c(num_k = "num_k"),
finalize = NULL #データ確定時(=finalize)に呼び出されるhook関数.データ依存のパラメータレンジ設定に使用
)
}
#' tunable関数にstep_dbscan関数を登録するを
#' @export
tunable.step_kmeans = function(x, ...) {
tibble::tibble(
name = c("num_k"),
call_info = list(
list(pkg = "addstepr", fun = "num_k", range=c(1L,10L)) ##一番はじめのnameのパラメータnum_kに対応するパラメータ範囲
),
source = "recipe", #recipe or model_spec
component = "step_kmeans",
component_id = x$id
)
}
|
30de2111bff0ec6e7377324a221713041d87d9cc
|
f2d6b4b264bf3739e91b69bcb343a21b0aa4a43d
|
/practice_2.R
|
503fb4ad5b278c4fcd245dc90c1a060229751735
|
[] |
no_license
|
Kimdumchit/R_practice
|
2a69ae8597fc49e46282e585718ccf05fd3535c0
|
6504bba064378981e6638fa9808171fa93fe9a27
|
refs/heads/master
| 2020-05-07T08:50:43.603920
| 2019-04-14T09:10:00
| 2019-04-14T09:10:00
| 180,349,023
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,231
|
r
|
practice_2.R
|
#데이터 프레임 만들기
#1.변수만들기
english <-c(90,80,60,70)
english
math<-c(50,60,100,20)
math
#2.데이터 프레임 만들기
df_midterm<-data.frame(english,math)
df_midterm
class<-c(1,1,2,2)
class
df_midterm<-data.frame(english,math,class)
df_midterm
#분석하기 ($기호는 데이터 프레임 안에 있는 변수를 지정해준다)
mean(df_midterm$english)
mean(df_midterm$math)
#데이터 프레임 한번에 만들기
df_midterm <-data.frame(english =c(90,80,60,70),math=c(50,60,100,20),class=c(1,1,2,2))
df_midterm
#혼자서 해보기
#Q1 data.frame()과 c()를 조합해 표의 내용을 프레임으로 만들어 출력해보세요
df_fruit<-data.frame(제품=c("사과","딸기","수박"),가격=c(1800,1500,3000),판매량=c(24,38,13))
df_fruit
#Q2 앞에서 만든 데이터 프레임을 이용해 과일 가격 평균 판매량 평균을 구해 보세요
mean(df_fruit$가격)
mean(df_fruit$판매량)
#외부 데이터 가져오기 - readl 패키지 사용
install.packages("readxl")
library(readxl)
#외부 엑셀파일을 가져오는 방법 (read_excel()을 변수에 저장)
df_exam<-read_excel("excel_exam.xlsx")
df_exam
#분석하기
mean(df_exam$english)
mean(df_exam$science)
|
82fb04729d601df6b431eba92ca3c1b7602e304e
|
fb4f18c9816cff08debc4d7c4d0ca035920952ea
|
/Ex_4/q7_wind_forecast.R
|
d155da804880821f66b3b609c9b895205f2de5fe
|
[] |
no_license
|
marcohv/exercises_ts_dtu
|
cdd880768df7bbb7d12a92feb5738249777fe6e7
|
cdf5818a51447b4adce742024b68e387901ea39a
|
refs/heads/master
| 2020-03-27T19:39:13.452140
| 2018-09-18T13:43:11
| 2018-09-18T13:43:11
| 147,003,352
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,205
|
r
|
q7_wind_forecast.R
|
##----------------------------------------------------------------
## Init by deleting all variables and functions
rm(list=ls())
## Set the working directory
setwd(".")
## Packages used
require(splines)
## Source functions
sapply(dir("functions",full.names=TRUE), source)
## Load the data
Data <- readRDS("data_soenderborg.RDS")
##----------------------------------------------------------------
##----------------------------------------------------------------
## Make a data.frame with synced observations and NWPs
k <- 24
X <- data.frame(t=Data$t, Pw=Data$Pw, Ws=lag_vector(Data$Wsnwp[ ,pst("k",k)],k))
## Divide into a training set and a test set
## Just keep the indexes
tstart <- "2010-12-01"
tstart_train <- "2011-03-01"
tend <- "2011-04-01"
X <- X[per(tstart,X$t,tend), ]
itrain <- which(per(tstart,X$t,tstart_train))
itest <- which(per(tstart_train,X$t,tend))
## Plot
plotmulti(X, c("Pw|Ws"))
## See the scatter plot
plot(X$Ws, X$Pw)
## Fit a linear regression model
fit <- lm(Pw ~ Ws, X[itrain, ])
abline(fit)
## RMSE on test set
X$Pw_hat_lm <- predict(fit, X)
## Chop below 0
X$Pw_hat_lm[X$Pw_hat_lm < 0] <- 0
X$residuals_lm <- X$Pw - X$Pw_hat_lm
## The score
rmse(X$residuals_lm[itest])
plotmulti(X[itest, ], c("Pw$|Pw_hat_lm"))
##----------------------------------------------------------------
##----------------------------------------------------------------
## Use a spline model to make it non-linear in the wind speed
fit_bslm <- lm(Pw ~ bs(Ws, df=8), X[itrain, ])
X$Pw_hat_bslm <- predict(fit_bslm, X)
X$residuals_bslm <- X$Pw - X$Pw_hat_bslm
## Improvements?
rmse(X$residuals_bslm[itest])
rmse(X$residuals_lm[itest])
## See the forecasts
plotmulti(X[itest, ], c("Pw$|Pw_hat"))
##----------------------------------------------------------------
##----------------------------------------------------------------
## Use a spline model to make it non-linear in the wind speed
fit_bslm <- lm(Pw ~ bs(Ws, df=8), X[itrain, ])
X$Pw_hat_bslm <- predict(fit_bslm, X)
X$residuals_bslm <- X$Pw - X$Pw_hat_bslm
## Improvements?
rmse(X$residuals_bslm[itest])
rmse(X$residuals_lm[itest])
## See the forecasts
plotmulti(X[itest, ], c("Pw$|Pw_hat"))
##----------------------------------------------------------------
##----------------------------------------------------------------
## Base spline model with rls
obj <- function(prm, frml, data, k, ieval = 1:nrow(data)) {
print(prm)
## Apply a low-pass filter on the input
lambda <- prm[1]
fit <- rls(as.formula(frml), lambda, data, k)
## Evaluate only on the ieval rows
print(score <- rmse(fit$residuals[ieval]))
return(score)
}
frml <- "Pw ~ bs(Ws, df=8)"
## To have a "burn-in" period, then set ieval (here remove the first 14 days
ieval <- itrain[-1:-(24*14)]
result <- optimize(obj, lower = 0.95, upper = 1, frml = frml, data = X[itrain, ], k = k, ieval = ieval)
result$minimum
## Calculate the forecasts
fit <- rls(as.formula(frml), lambda = result$minimum, X, k)
X$residuals_bsrls <- fit$residuals
## Plot
tmp <- X[itest, ]
plot(tmp$Pw, type = "l")
lines(tmp$Pw - tmp$residuals_bsrls, col = 2)
lines(tmp$Pw - tmp$residuals_bslm, col = 3)
## Improvements
rmse(X$residuals_bsrls[itest])
rmse(X$residuals_bslm[itest])
##----------------------------------------------------------------
##----------------------------------------------------------------
## What about a kernel model?
## Use this somewhat generalized function (see functions/obj_kernel.R)
obj_kernel
## Define model
frml <- "Pw ~ Ws"
h <- c(Ws=3)
ieval <- itrain[-1:-(24*14)]
obj_kernel(h, frml, X, k, ieval)
## Optimize
result <- optim(h, obj_kernel, lower = 2, upper = 8, frml = frml, data = X[itrain, ], ieval = ieval, k = k)
result
## Calculate forecasts
Pw_hat <- obj_kernel(h=result$par, frml, X, k, return_yhat = TRUE)
X$residuals_kn <- X$Pw - Pw_hat
## Compare
tmp <- X[itest, ]
plot(tmp$Pw, type = "l")
lines(tmp$Pw - tmp$residuals_bsrls, col = 2)
lines(tmp$Pw - tmp$residuals_bslm, col = 3)
lines(tmp$Pw - tmp$residuals_kn, col = 4)
## Improvements?
rmse(X$residuals_kn[itest])
rmse(X$residuals_bslm[itest])
rmse(X$residuals_bsrls[itest])
##----------------------------------------------------------------
|
de2145c793ff58f963108a65ae17f9dd0db26696
|
d6464bc95946ec867ff935c889c30a3753e88008
|
/test/ui_2.R
|
d466bc2f753d702bc6cab960d3c98475d905fd2d
|
[] |
no_license
|
georgeannie/Kellog_northwestern_university_sinter
|
b83b5d8cb9b4e984c6d41c62f2bda3b41f1242a0
|
858b668900a69415db200470ac8683a76f97b1b8
|
refs/heads/master
| 2020-05-26T05:49:42.317690
| 2019-08-20T20:41:39
| 2019-08-20T20:41:39
| 188,126,614
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,014
|
r
|
ui_2.R
|
library(shiny)
library(shinydashboard)
library(ggplot2)
library(dplyr)
library(tidyr)
library(stringr)
source("sinter_function.R")
sinter= read_rename_sinter()
#PLOTS
#Correlation plot for dependent variables
corr_plot_dep= plotOutput('correlation_dependent', height="400px", width = 'auto')
#Time series
ts_dep = box(title=tags$a(class="primary-title",
style='margin-left:15px; font-type:bold; height:30px; color:black',
"Variation in Potential Dependent variables by time"),
width = NULL,
solidHeader = TRUE,
plotOutput('ts_dependent', height="215px", width = 'auto')
)
#Histogram dependent
hist_dep = plotOutput('hist_dependent', width = "auto", height="350px")
#Scatter plot - dependent vs kpi
scatter_dep =plotOutput('scatter_dependent', width = "auto", height="350px")
#radio button potential dependent
#1.List only required independent variables
dep_var=names(sinter)[c(3, 5, 7:9)]
choice_dep = c("All", dep_var[-1])
#2. choice of dependent var
radio_dep = radioButtons("radio_dep_choice", "Potential Dependent Variables",
choices = list("Histogram" = "1",
"Correlation matrix" ="2",
"Summary" = "3",
"Data" ="4"),
selected = "1")
#3. Box for radio button
text_potential_dep = box(title=tags$h5("Potential Dependent Variables",
style='font-weight:bold'),
solidHeader=TRUE, width = NULL,
radio_dep)
#Tab layouts
#1. NavBar Menu= Dependent variables
#a. Stationary layout
dependent_layout_row1 = fluidRow(
column(width=2,
wellPanel(radio_dep)
),
column(width=10,
plotOutput("dependent_plot", height=400, width=970)
# conditionalPanel(
# condition="input.taba.radio_dep_choice == 'Correlation matrix'",
# fluidRow( tags$div(style = "height:300px;",
# column(6, plotOutput("correlation_matrix")) ,
# column(6, plotOutput("scatter_matrix"))
# )))
)
)
#2. Navbar - Independent variables
#a. List only required independent variables
ind_var=names(sinter)[c(2:3, 11:19)]
#b. choice of independent var
radio_but_indep_var = radioButtons("radio_choice", "Potential Dependent Variables",
choices = ind_var,
selected = ind_var[1])
#choice of plots
plot_type=selectInput("select_plot", "Type of Plot",
choices = c("Correlation Plot/Histogram", "Scatter Plot (numerical variables)",
"Box Plot (categorical variables)",
"Box plot"))
#tab2 - independent variables layout
independent_layout = fluidRow(
column(width=3,
box(title="Independent Variables", width = NULL,
column(12,
radio_but_indep_var
))),
column(width=9,
fluidRow(
plot_type
),
conditionalPanel(
condition="input.select_plot == 'Correlation Plot/Histogram'",
fluidRow( tags$div(style = "height:300px;",
column(6, plotOutput("hist")) ,
column(6, plotOutput("corr_ind"))
))),
conditionalPanel(
condition="!input.select_plot == 'Correlation Plot/Histogram'",
fluidRow(
plotOutput("plot_choice")
))
)
)
model_layout = fluidRow()
#List the tabs
tab_dependent = tabPanel(id="taba", "Exploratory Analysis",
dependent_layout_row1)
tab_model = tabPanel("Model and Recommendation",
model_layout)
#Change background color of header
title_color=tags$head(tags$style(HTML('
/* logo */
.skin-black .main-header .logo {
background-color: #4B0082;},
/* logo when hovered */
.skin-black .main-header .logo:hover {
background-color: #4B0082;}
')))
#Increase height of header
header_height=tags$li(class = "dropdown",
tags$style(".main-header {max-height: 580px}"),
tags$style(".main-header .logo {height: 70px}")
)
#Add logo to header
title_logo=span(column(1, tags$img(src='logo.jpg', height='73', width='220', border='0',
style='margin-left:-2px; padding:0; margin:0; display:block; font-size:0')),
column(8, class="title-box",
tags$h2(class="primary-title", style='margin-top:20px; margin-left:20px; color:white;
font-size:8',
"SINTER PLANT PROJECT")
)
)
#DASHBOARDING
shinyUI(dashboardPage(skin="black",
dashboardHeader(header_height, title = title_logo,
titleWidth='100%'),
dashboardSidebar(disable = TRUE),
dashboardBody(
title_color,
navbarPage(tags$h4("Sinter plant Viz",
style='margin-top:2px; margin-left:10px; color:black;
font-size:10; font-weight:bold'),
tab_dependent,
#tab_independent,
tab_model
)
)
))
|
47bfafa02e99e49abb682cd64ebf741997bd8ba4
|
c2bb1193014c1c5974dfff227a3cdc847260c18d
|
/tests/testthat.R
|
33aec9c141c332181c88c92ae1d76626641c0065
|
[] |
no_license
|
domjarkey/relatable
|
00a2289174c85cbaa7e0fdf5b68276bac8fd795c
|
d02569e5d7b3664b97b04d06942f02833a974691
|
refs/heads/master
| 2021-05-09T10:34:56.719985
| 2018-02-01T07:37:56
| 2018-02-01T07:37:56
| 118,967,880
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 62
|
r
|
testthat.R
|
library(testthat)
library(relatable)
test_check("relatable")
|
7cb371dd1ce6b6b55299dfb72fe9c0b222e07205
|
fc9087208656d27a4b3cc1d20620e6031cc1b02e
|
/ensemble.R
|
8f794cd6e696ad96c024cc0b38ad535da1b68a58
|
[] |
no_license
|
varunpurohit76/mmlm
|
32ee97d77012afda809fa1f160a51be30be8e034
|
b5d0e176acf1f00bf5bbace0d80f2f35960b5df3
|
refs/heads/master
| 2021-01-10T02:12:09.547334
| 2016-03-03T14:58:19
| 2016-03-03T14:58:19
| 52,752,926
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 285
|
r
|
ensemble.R
|
one <- read.csv("C:/Users/Varun/Desktop/ds/mmlm/ensemble/0.265627.csv")
two <- read.csv("C:/Users/Varun/Desktop/ds/mmlm/ensemble/0.266693.csv")
three <- two
three$Pred <- (one$Pred + two$Pred)/2
write.csv(three,"C:/Users/Varun/Desktop/ds/mmlm/ensemble/predict2.csv", row.names = FALSE)
|
d6b8ee2cb60d9a6d90aafb85c74ba7cc55e8c6b5
|
73eec22a33e4f2f08a61cc3e5c8c5a2883009d73
|
/man/as.data.frame.sparsebnData.Rd
|
41e8ed5302d629f6705ae8fbd215cb542824f14b
|
[] |
no_license
|
itsrainingdata/sparsebnUtils
|
958ec179724d75728dfd03a40bbde718f68cc0cc
|
a762b74dda916956d16e2654463736e55b57be0b
|
refs/heads/master
| 2020-04-06T06:36:08.237032
| 2019-05-29T11:04:07
| 2019-05-29T11:04:07
| 50,886,867
| 3
| 2
| null | 2017-04-10T22:45:30
| 2016-02-02T02:05:24
|
R
|
UTF-8
|
R
| false
| true
| 476
|
rd
|
as.data.frame.sparsebnData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s3-sparsebnData.R
\name{as.data.frame.sparsebnData}
\alias{as.data.frame.sparsebnData}
\title{Convert a sparsebnData object back to a data.frame}
\usage{
\method{as.data.frame}{sparsebnData}(x, ...)
}
\arguments{
\item{x}{a \code{\link{sparsebnData}} object.}
\item{...}{(optional) additional argument to \code{as.data.frame}.}
}
\description{
Convert a sparsebnData object back to a data.frame
}
|
036a172c575923fc5f93483ef2d06940f02b4ead
|
bc6fc11685a58ac09ae8cfc286ec3eee68729324
|
/020-models/docs/GAM/mack.r
|
6786aefa73b24c898976ead7de6626b30903f180
|
[] |
no_license
|
quantide/qtraining
|
2803fe93446931c43b70ecd7ed644f01c79ece85
|
846043fcc8207da7b5dbf5bd9a7d4b94111d5132
|
refs/heads/master
| 2021-09-15T10:57:47.797783
| 2018-05-30T09:13:43
| 2018-05-30T09:13:43
| 56,151,061
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,354
|
r
|
mack.r
|
## mackerel survey example. Run the code here and expand it to
## produce a reasonable model for these survey data, and some
## simple predictions form it....
library(mgcv)
library(gamair)
data(mack)
data(coast)
data(mack)
## plot the egg densities against location
plot(mack$lon,mack$lat,cex=0.2+mack$egg.dens/150,col="red")
lines(coast)
names(mack)[12] <- "net.area"
mack$log.net.area <- log(mack$net.area)
## The following fits an initial model in which egg density
## follows a Tweedie distribution, with log mean given by a sum
## of smooth functions of covariates + log sample net area.
gm <- gam(egg.count~s(lon,lat)+s(I(b.depth^.5))+ s(c.dist) +
s(salinity) + s(temp.surf) + s(temp.20m)+offset(log.net.area),
data=mack,family=Tweedie(1.3),select=TRUE,method="REML")
plot(fitted(gm),residuals(gm))
par(mfrow=c(2,3))
plot(gm)
## now refit without salinity (lots of NA's for this, so must drop on its own)
## check k for s(lon,lat) and adjust if needed...
## continue with model selection...
op<-par()
layout(matrix(c(1,1,2,3),2,2))
## use plot.gam's `select' argument to produce pretty pictures
## of spatial smooth with coast line + remaining smooths in nice
## layout...
## Use posterior simulation to obtain a CI for the average egg density, over the sample stations.
|
12d74bf6ca3e0d5f923107e497961360d4b3c650
|
de4a5ef486d3de3c45bf75ba75824323f9b395c4
|
/Analysis/for_paper/Fig2/steps_compare_3_n_paper.R
|
9c25d3a8138d6e586d5552d2ed7c30bf61e8c214
|
[] |
no_license
|
algyz200/R_portfolio
|
e0f937cd8f3066d9df5074dbe31c205bd9fceb0a
|
1d8b9c0a42a8a111753e603fb984e9147ea1f55d
|
refs/heads/master
| 2020-12-06T03:41:24.306181
| 2020-01-22T16:26:31
| 2020-01-22T16:26:31
| 232,330,427
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,480
|
r
|
steps_compare_3_n_paper.R
|
setwd("C:/Users/algis/Desktop/Algis/Algis_UoW/analysis")
source("steps_and_dirs_compiled.R")
filelist <- c(sub)
directory <- c(d_sub)
title <- strsplit(directory,"/"); title <- unlist(title); title <- title[length(title)]
coll=1
#par(new=TRUE) #allows overlaying plots
#exclude false dwells
setwd("C:/Users/algis/Desktop/Algis/Algis_UoW/analysis/")
falsedwells <- read.table("false_dwells.txt")
falsesteps <- read.table("false_steps.txt")
#
setwd(directory)
slopes <- c(); intercepts <- c(); c <- list(); dffit2 <- c()
minback <- -12
interval <- 1
ffrom <- 2
fto <- 9
d_lim <- 20 # how close the detachment has to be to zero position to count as detachment and not backslip
# read in
bthr3 <- read.table(filelist[1], skip = 6,sep=" ")
bthr3$file <- rep(as.character(read.table(filelist[1],nrow=1)[1,1]),nrow(bthr3))
for (i in filelist[2:length(filelist)]) {
bthr3_temp <- read.table(i, skip = 6,sep=" ")
bthr3_temp$file <- rep(as.character(read.table(i,nrow=1)[1,1]),nrow(bthr3_temp))
bthr3 <- rbind(bthr3, bthr3_temp)
}
bthr3 <- bthr3[!((bthr3$time %in% falsedwells$time) & (bthr3$tscore %in% falsedwells$tscore)),]
bthr3 <- bthr3[!((bthr3$time %in% falsesteps$time) & (bthr3$tscore %in% falsesteps$tscore)),] #remove false steps
##### binning
avF <- c(); avD <- c(); avA <- c(); nfor <- c(); nback <- c(); ndrop <- c();
se <- c(); vel <- c(); avDf <- c(); avDb <- c(); avDd <- c(); nbackall <- c();nslip <- c();
for (i in seq(ffrom,fto,by=interval)) {
p <- bthr3$amplitude> minback & bthr3$force >= i-interval/2 & bthr3$force < i+interval/2 #back and for
a <- bthr3$amplitude>0 & bthr3$force >= i-interval/2 & bthr3$force < i+interval/2 #for
b <- bthr3$amplitude<0 & bthr3$amplitude> minback & bthr3$force >= i-interval/2 & bthr3$force < i+interval/2 #back
#d <- bthr3$amplitude < minback & bthr3$force >= i-interval/2 & bthr3$force < i+interval/2 #drop
d <- bthr3$mean1 < d_lim & bthr3$amplitude < 0 & bthr3$force >= i-interval/2 & bthr3$force < i+interval/2 # Detachments
bb <- bthr3$mean1 > d_lim & bthr3$amplitude < 0 & bthr3$amplitude > -48 & bthr3$force >= i-interval/2 & bthr3$force < i+interval/2 #all backsteps without detachment
bs <- bthr3$mean1 > d_lim & bthr3$amplitude < 0 & bthr3$amplitude < -12 & bthr3$force >= i-interval/2 & bthr3$force < i+interval/2 # more than -12 but no detachments
#avF <- c(avF, mean(round(bthr3$force[p]), na.rm=TRUE) )
avF <- c(avF, i)
dwell <- bthr3$dwell[p]; dwell <- dwell[!is.na(dwell)]
avD <- c(avD, mean(bthr3$dwell[p], na.rm=TRUE) )
avDf <- c(avDf, mean(bthr3$dwell[a], na.rm=TRUE) )
avDb <- c(avDb, mean(bthr3$dwell[b], na.rm=TRUE) )
avDd <- c(avDd, mean(bthr3$dwell[d], na.rm=TRUE) )
#se <- c(se, sd(bthr3$dwell[p], na.rm=TRUE)/sqrt(length(bthr3$dwell[p])))
avA <- c(avA, mean(bthr3$amplitude[p], na.rm=TRUE) )
se <- c(se, sd( bthr3$amplitude[p]/bthr3$dwell[p], na.rm=TRUE ) / sqrt(length(dwell)))
nfor <- c( nfor, length(bthr3$amplitude[a]) )
nback <- c( nback, length(bthr3$amplitude[b]) )
nbackall <- c( nbackall, length(bthr3$amplitude[bb]) )
ndrop <- c( ndrop, length(bthr3$amplitude[d]) )
nslip <- c( nslip, length(bthr3$amplitude[bs]) )
}
#####
####################### plotting
#plot(avF,avD, type="p",col="red")
#plot(avF, avA)
#plot(avF, avA/avD, xlim=c(3,9), ylim=c(-50,250), xlab="Force, pN",
#ylab="Velocity, nm/s")
#plot(avF, nfor/nback, type="p", col = coll, xlim=c(ffrom,fto), log="y",ylim=c(0.1,200),
#xlab="Force, pN", ylab="Forestep / Backstep ratio", xaxt="n")
#axis(1,seq(0,10,1))
#plot(avF, nfor/(nback+nfor+ndrop), type="b", col = "black", xlim=c(ffrom,fto),ylim=c(0,1),
#xlab="Force, pN", ylab="Probability",lty=2)
all <- nfor+nback+nslip+ndrop
t <- matrix(nfor/all ,byrow=F,nrow=1,ncol=fto-ffrom+1)
t <- rbind(t,nback/all)
t <- rbind(t,nslip/all)
t <- rbind(t,ndrop/all)
par(mar=c(5,10,5,2))
layout(matrix(c(1,1,2), 3, 1, byrow=T))
barplot(t,names.arg=avF,xlab="Force, pN",ylab="Probability",
legend.text=c("Foresteps","Backsteps smaller than 12 nm","Backsteps bigger than 12 nm","Detachments"),
args.legend = list(x = 10, y=1.2,cex=1,border=F,bty="n"),
main=title,col=c(1,"orange","cyan","grey"),
border=F,cex.lab=2,cex.axis=2,cex.names=2)
################################## write out number of steps
par(mar=c(10,10,3,2))
a1 <- 2.5
a2 <- 8.5
nn <- 7
plot(avF,c(1,2,3,4,5,12,1,2), axes=F, xlab="",ylab="",col="white")
#plot(1:10, axes=F, xlab="",ylab="",col="white")
mmm <- matrix(c(seq(a1,a2,by=(a2-a1)/nn),rep(11,length(avF))),length(avF),2)
text(mmm,as.character(ndrop),col=1,adj=1)
mmm <- matrix(c(seq(a1,a2,by=(a2-a1)/nn),rep(8,length(avF))),length(avF),2)
text(mmm,as.character(nslip),col=1,adj=1)
mmm <- matrix(c(seq(a1,a2,by=(a2-a1)/nn),rep(5,length(avF))),length(avF),2)
text(mmm,as.character(nback),col=1,adj=1)
mmm <- matrix(c(seq(a1,a2,by=(a2-a1)/nn),rep(2,length(avF))),length(avF),2)
text(mmm,as.character(nfor),col=1,adj=1)
mmm <- matrix( c(rep(-0.1,4),2,5,8,11),4,2)
text(mmm, c("Foresteps","Backsteps smaller than 12 nm","Backsteps bigger than 12 nm","Detachments"),xpd=NA,adj=0)
####################################
#lines(avF, nfor/(nback+nfor+ndrop), type="l", lty=2, col = "black", xlim=c(3,8),ylim=c(0,1), main="Pombe")
#lines(avF, nback/(nback+nfor+ndrop), type="l",lty=2, col = "red", xlim=c(3,8),ylim=c(0,1))
#lines(avF, ndrop/(nback+nfor+ndrop), type="l",lty=2, col = "blue", xlim=c(3,8),ylim=c(0,1))
#plot(avF,nfor,type="l")
#lines(avF,nback, col="blue")
#plot(bthr3$force, bthr3$amplitude, ylim=c(-200,50),xlim=c(ffrom,fto),pch=16,cex=0.4,col=coll)
#plot(bthr3$force, bthr3$dwell, ylim=c(0.001,10),xlim=c(ffrom,fto),log="y",col=coll)
#plot(avF,avDf,type="p",col=coll,lwd=6)
#plot(avF,avDf, ylim=c(0.001,10),xlim=c(ffrom,fto),log="y",col=coll)
#######################
directory2 <- "C:/Users/algis/Desktop/Algis/Algis_UoW/R/functions"; setwd(directory2); source("fit_log.R");
source("fit_log_w.R")
#fit_log(avF, nfor/nback, coll)
#fit_log_w(avF, nfor/nback, "pink",w) # w <- r/error
#fit_log(avF, nfor/(nback+ndrop), coll)
#text(6.5, 55, "Pig MT + GMPCPP + epothilone",col=1)
#text(6.5, 50, "round - F/B",col=coll)
#text(6.5, 30, "square - F/(B+D)",col=coll)
#lines(1:10,rep(1,10),col="grey")
################# tony's type of erros bars
r <- nfor/nback
error <- sqrt( r*(1+r)/nback )
lower <- r-error
upper <- r+error
#arrows(ffrom:fto, lower, ffrom:fto, upper, length=0.05, angle=90, code=3,
#col=coll)
setwd("C:/Users/algis/Desktop/Algis/Algis_UoW/analysis/compare/cumultative_histograms/with numbers")
|
d10cf99d72b05bd9e3d9ca0ef7b006c39ef36e11
|
8534e1345f43e216936f14353ea413b4eb8fb1cf
|
/run_analysis.R
|
90a51263adf1cd55adadfbd4fadff39c69a25d50
|
[] |
no_license
|
JogikInt/GettingAndCleaningData
|
d042ea8a71b1acee6acbf43704937ce6e4c1b648
|
b81b7f0a7780518e969fe13ca0126abc8b60b7b2
|
refs/heads/master
| 2021-01-01T04:34:28.702923
| 2016-05-15T15:35:14
| 2016-05-15T15:35:14
| 58,863,746
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,395
|
r
|
run_analysis.R
|
library(data.table)
## Steps 1-4
# loading labels
activityLabels <- fread("activity_labels.txt")
featureLabels <- fread("features.txt")
# loading and binding train and test files
subjects <- rbind(fread("test/subject_test.txt"), fread("train/subject_train.txt"))
featureMeasurements <- rbind(fread("test/X_test.txt"), fread("train/X_train.txt"))
activities <- rbind(fread("test/y_test.txt"), fread("train/y_train.txt"))
# extracting indices of required features: mean() and std()
featureIndices <- grep(x = featureLabels$V2, pattern = "(mean\\(\\)|std\\(\\))")
# subsetting feature labels and measurements in accordance with the indices
neededFeatures <- featureLabels[featureIndices, ]
neededMeasurements <- featureMeasurements[,featureIndices, with = FALSE]
# renaming variables
setnames(neededMeasurements, tolower(neededFeatures[[2]]))
# adding subject numbers and activity labels
neededMeasurements[, subject:= subjects]
neededMeasurements[, activity:= tolower(merge(activities, activityLabels, by = "V1",
sort = FALSE)$V2)]
## Step 5
# averaging columns subsetted by subject and activity and
finalTable <- neededMeasurements[, lapply(.SD, mean), by=list(subject, activity)]
# reordering in accordance with subject number
setorder(finalTable, subject)
# saving
write.table(x = finalTable, file = "finalTable.txt", row.names = FALSE)
|
00a86b74c3d648f6b4349a600eded18bd75f3a78
|
cf9abceb276e692ac8783bc3176f14be11bc5174
|
/man/notes.Rd
|
855186feda5c87789bb7992c4f8cb3dbcb299bee
|
[] |
no_license
|
crhisto/Biobase
|
923b9a7abcb4863168eda083adcb632b8f6b8721
|
8b88109584a30df554196ad1eb5f0324c59ce37e
|
refs/heads/master
| 2022-12-16T14:57:12.821906
| 2020-10-02T18:10:23
| 2020-10-02T18:10:23
| 257,328,529
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 787
|
rd
|
notes.Rd
|
\name{notes}
\alias{notes}
\alias{notes<-}
\title{Retrieve and set eSet notes.}
\description{
These generic functions access notes (unstructured descriptive data)
associated \code{\link{eSet-class}}.
\code{notes(<ExpressionSet>) <- <character>} is unusual, in that the
character vector is appended to the list of notes; use
\code{notes(<ExpressionSet>) <- <list>} to entirely replace the list.
}
\usage{
notes(object)
notes(object) <- value
}
\arguments{
\item{object}{Object, possibly derived from class \code{eSet-class}.}
\item{value}{Character vector containing unstructured information describing the experinement.}
}
\value{
\code{notes} returns a list.
}
\author{Biocore}
\seealso{\code{\link{ExpressionSet-class}}, \code{\link{SnpSet-class}}}
\keyword{manip}
|
8e40ee8ae9cd0aadc562fc2771dd149262ef639e
|
8512a8248589ed7de11b908d307cda001a87df43
|
/R/plot1.R
|
637cfbbb2eeabeb6346ef56c2b2b6781388b6d74
|
[] |
no_license
|
kennchin/shiny_app_monitor
|
e232ffcb02ce727d0719f77849b150b9066afd7e
|
669409c060fa66d55540600bcfd12182a203a6ed
|
refs/heads/master
| 2020-04-07T11:45:42.870393
| 2018-11-20T07:34:52
| 2018-11-20T07:34:52
| 158,339,720
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 608
|
r
|
plot1.R
|
#function to plot if dimension is less than 4 rows
plot1 <-function(dataset,rows,nx,ny)
{
eth_trans =as.data.frame(t(dataset))
eth_cols = eth_trans[,1:2]
eth_rows = eth_cols[rows,]
names(eth_rows)<-c(nx,"Freq")
eth_rows$Freq = as.numeric(levels(eth_rows$Freq))[eth_rows$Freq]
ggplot(data=eth_rows, aes_string(x=nx, y=ny,fill=nx)) +
geom_bar(stat="identity")+scale_x_discrete(limits=eth_rows[,nx])+scale_y_continuous(breaks=round(seq(0, max(eth_rows[,ny]),1),1))+geom_text(aes(label = eth_rows$Freq ,group=nx), position = position_dodge(width = 1),vjust = -0.5, size = 3,color="black")
}
|
3d1c18ddb87bb36bb59e6106ce24b8d04acb52c2
|
1c7b24f44c1332979a235ded510a68583479664a
|
/man/hist.grouped.data.Rd
|
a7c404e0425f2e83561ead57ebe107a7bdbb00f5
|
[] |
no_license
|
cran/actuar
|
a9c39df86bbca3a19e5914aa726dd5cea29b1562
|
b82d1bc41eed62cce17ecc1466f95f6b31c0225b
|
refs/heads/master
| 2023-02-23T07:52:16.126575
| 2023-02-07T03:12:31
| 2023-02-07T03:12:31
| 17,694,238
| 10
| 8
| null | 2018-04-24T13:17:50
| 2014-03-13T03:51:17
|
R
|
UTF-8
|
R
| false
| false
| 3,845
|
rd
|
hist.grouped.data.Rd
|
\name{hist.grouped.data}
\alias{hist.grouped.data}
\title{Histogram for Grouped Data}
\description{
This method for the generic function \code{\link{hist}} is mainly
useful to plot the histogram of grouped data. If \code{plot = FALSE},
the resulting object of class \code{"histogram"} is returned for
compatibility with \code{\link{hist.default}}, but does not contain
much information not already in \code{x}.
}
\usage{
\method{hist}{grouped.data}(x, freq = NULL, probability = !freq,
density = NULL, angle = 45, col = NULL, border = NULL,
main = paste("Histogram of" , xname),
xlim = range(x), ylim = NULL, xlab = xname, ylab,
axes = TRUE, plot = TRUE, labels = FALSE, \dots)
}
\arguments{
\item{x}{an object of class \code{"grouped.data"}; only the first
column of frequencies is used.}
\item{freq}{logical; if \code{TRUE}, the histogram graphic is a
representation of frequencies, the \code{counts} component of
the result; if \code{FALSE}, probability densities, component
\code{density}, are plotted (so that the histogram has a total area
of one). Defaults to \code{TRUE} \emph{iff} group boundaries are
equidistant (and \code{probability} is not specified).}
\item{probability}{an \emph{alias} for \code{!freq}, for S compatibility.}
\item{density}{the density of shading lines, in lines per inch.
The default value of \code{NULL} means that no shading lines
are drawn. Non-positive values of \code{density} also inhibit the
drawing of shading lines.}
\item{angle}{the slope of shading lines, given as an angle in
degrees (counter-clockwise).}
\item{col}{a colour to be used to fill the bars.
The default of \code{NULL} yields unfilled bars.}
\item{border}{the color of the border around the bars. The default
is to use the standard foreground color.}
\item{main, xlab, ylab}{these arguments to \code{title} have useful
defaults here.}
\item{xlim, ylim}{the range of x and y values with sensible defaults.
Note that \code{xlim} is \emph{not} used to define the histogram
(breaks), but only for plotting (when \code{plot = TRUE}).}
\item{axes}{logical. If \code{TRUE} (default), axes are draw if the
plot is drawn.}
\item{plot}{logical. If \code{TRUE} (default), a histogram is
plotted, otherwise a list of breaks and counts is returned.}
\item{labels}{logical or character. Additionally draw labels on top
of bars, if not \code{FALSE}; see \code{\link{plot.histogram}}.}
\item{\dots}{further graphical parameters passed to
\code{\link{plot.histogram}} and their to \code{\link{title}} and
\code{\link{axis}} (if \code{plot=TRUE}).}
}
\value{
An object of class \code{"histogram"} which is a list with components:
\item{breaks}{the \eqn{r + 1} group boundaries.}
\item{counts}{\eqn{r} integers; the frequency within each group.}
\item{density}{the relative frequencies within each group
\eqn{n_j/n}{n[j]/n}, where \eqn{n_j}{n[j]} = \code{counts[j]}.}
\item{intensities}{same as \code{density}. Deprecated, but retained
for compatibility.}
\item{mids}{the \eqn{r} group midpoints.}
\item{xname}{a character string with the actual \code{x} argument name.}
\item{equidist}{logical, indicating if the distances between
\code{breaks} are all the same.}
}
\note{
The resulting value does \emph{not} depend on the values of
the arguments \code{freq} (or \code{probability})
or \code{plot}. This is intentionally different from S.
}
\seealso{
\code{\link{hist}} and \code{\link{hist.default}} for histograms of
individual data and fancy examples.
}
\references{
Klugman, S. A., Panjer, H. H. and Willmot, G. E. (1998),
\emph{Loss Models, From Data to Decisions}, Wiley.
}
\examples{
data(gdental)
hist(gdental)
}
\keyword{dplot}
\keyword{hplot}
\keyword{distribution}
|
b1ae1286625fb6655b30f6f7843afad966520c46
|
8e8982116616d5efb35d5acd5c3dfe12339f7033
|
/subSamples/errorVsCoverage.r
|
0785d04e18e0746fb8df55e288b41a0f85af1b2e
|
[] |
no_license
|
DEploid-dev/dEploidPaper
|
ffc167ede5762df4f7b1be21ed6ab3f12e0055aa
|
39434269d27fcedb50c6a77180f9a2baeb342286
|
refs/heads/master
| 2021-06-20T00:49:01.331168
| 2017-07-20T17:57:59
| 2017-07-20T17:57:59
| 67,420,361
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,732
|
r
|
errorVsCoverage.r
|
rm(list=ls())
source("common.r")
getIndex <- function (mixedSample, ref1, ref2){
sum1 = sum(mixedSample[,1] != ref1) + sum(mixedSample[,2] != ref2)
sum2 = sum(mixedSample[,2] != ref1) + sum(mixedSample[,1] != ref2)
# print(sum1)
# print(sum2)
if ( sum1 < sum2 ){
return (c(1,2))
} else {
return (c(2,1))
}
}
fun.divide.to.seg <- function(hapLength, by = 50){
myseq = seq(1, hapLength, by = by)
if ((hapLength-1)%%by != 0){
myseq = c(myseq, hapLength)
}
return(myseq)
}
#dataDir = "./"
panel = read.table(paste("labStrains.eg.panel.txt",sep=""),header=T)
endAt = cumsum(table(panel[,1]))
beginAt = c(1, 1+endAt[-length(endAt)])
chromLength = (endAt - beginAt+1)
Ref1 = panel[,5] # HB3
Ref1Name = "HB3"
Ref2 = panel[,6] # 7G8
Ref2Name = "7G8"
plotALT = FALSE
for ( sample in c("PG0402-C", "PG0406-C")){
for ( suffix in c("asiaAfirca", "lab")){
#sample = "PG0406-C"
#suffix = "lab"
#suffix = "asiaAfirca"
expectedCovs = c(80)
#subSamples = c(50)
for ( expectedCov in expectedCovs ) {
vcfPrefix = paste(sample, ".subSample.expectedCov", expectedCov, sep="")
prefix = vcfPrefix
vcfName = paste(vcfPrefix,".vcf.gz", sep="")
coverage = fun.extract.vcf(vcfName)
totalCov = coverage$refCount + coverage$altCount
if (plotALT) {
totalCov = coverage$altCount
}
spacing = ceiling(max(totalCov)/16)
mybins = seq(0, ceiling(max(totalCov)/spacing)*spacing, by = spacing)
nBins = length(mybins)
#mybins = quantile(c(totalCov, max(totalCov)+1), prob=(1:nBins)/nBins)
binIndex = findInterval(totalCov, mybins)
eventsType = c("0/0", "0/1", "1/0", "1/1")
eventCountMat = c()#data.frame ()
eventSumMat = c()
eventArray = c()
#prefix = paste(vcfPrefix,".asia.out", sep="")
for ( seed in 1:15){
outprefix = paste("repeats/", sample, ".seed", seed, ".subSample.expectedCov", expectedCov,".", suffix, ".out", sep="")
# tmpProp = read.table(paste(prefix,".prop",sep=""), header=F)
# prop = as.numeric(tmpProp[dim(tmpProp)[1],])
hap.corrected = as.matrix(read.table(paste(outprefix,".hap",sep=""), header=T)[,c(-1,-2)])
for ( chrom in 1:length(beginAt)){
tmpHap = hap.corrected[beginAt[chrom]:endAt[chrom],,drop=FALSE]
# tmpProp = prop.corrected
tmpRef1 = Ref1[beginAt[chrom]:endAt[chrom]]
tmpRef2 = Ref2[beginAt[chrom]:endAt[chrom]]
# rearranged.Index = getIndex(tmpHap, tmpRef1, tmpRef2)
# tmpHap = tmpHap[,rearranged.Index,drop=FALSE]
# tmpProp = tmpProp[rearranged.Index]
haplength = dim(tmpHap)[1]
index.of.seg = fun.divide.to.seg(haplength)
truth = c()
infered = c()
for ( i in 1:(length(index.of.seg)-1) ){
tmpIndex = c(index.of.seg[i]:index.of.seg[i+1])
tmptmpHap = tmpHap[tmpIndex,]
tmptmpRef1 = tmpRef1[tmpIndex]
tmptmpRef2 = tmpRef2[tmpIndex]
rearranged.Index = getIndex(tmptmpHap, tmptmpRef1, tmptmpRef2)
tmptmpHap = tmptmpHap[,rearranged.Index,drop=FALSE]
truth = c(truth, paste(tmptmpRef1, "/", tmptmpRef2, sep=""))
infered = c(infered, paste(tmptmpHap[,1], "/", tmptmpHap[,2], sep=""))
}
for ( event in eventsType ){
eventCount = rep(0, nBins)
eventSum = rep(0, nBins)
eventIndex = which(truth==event)
tmpevent = (truth[eventIndex]!=infered[eventIndex])
# cat("length(eventIndex) ",length(eventIndex), " length(tmpevent)", length(tmpevent), "\n")
for ( i in 1:nBins ){
tmpIndex = which(binIndex[eventIndex]==(i))
# cat(length(tmpIndex),"\n")
eventSum[i] = sum(tmpevent[tmpIndex]*1)
eventCount[i] = length(tmpIndex)
}
# cat(sum(eventCount),"\n")
# eventSum[eventCount<5] = 0
eventCountMat = rbind(eventCountMat, eventCount)
eventSumMat = rbind(eventSumMat, eventSum)
eventArray = c(eventArray, event)
}
}
}
# mytitle = paste(prefix, Ref1Name, round(tmpProp[1], digits=3), "/", Ref2Name, round(tmpProp[2],digits=3))
imageFileName = paste(prefix, ".", suffix, ".errorVsTotalCoverageBox.png",sep="")
if (plotALT){
imageFileName = paste(prefix, ".", suffix, ".errorVsAlt.png",sep="")
}
png(imageFileName, width=600, height=600)
#par(mfrow=c(1,2))
layout(matrix(c(1,1,1,1,2,2), 3, 2, byrow = TRUE))
# plot(c(min(mybins),max(mybins)),c(0, 1), type="n", ylab="# of sites was wrongly inferred", xlab="Total coverage")
case = 1
colors = c(rgb(1,0,0,0.3), rgb(1,1,0,.3), rgb(0,1,0,0.3), rgb(0,0,1,0.3))
# colors = c(rgb(1,0,0,1), rgb(1,1,0,1), rgb(0,1,0,1), rgb(0,0,1,1))
for ( event in eventsType ){
# tmpCount = colSums(eventCountMat[eventArray == event,])
# tmpSum = colSums(eventSumMat[eventArray == event,])
# tmpSum[tmpCount<500] = 0
# lines(mybins,tmpSum/(tmpCount+0.00000001), col=color)
tmpCount = eventCountMat[eventArray == event,]
tmpSum = eventSumMat[eventArray == event,]
tmpMat = c()
for ( i in 1:dim(tmpCount)[1]){
tmpCountRow = tmpCount[i,]
tmpSumRow = tmpSum[i,]
# tmpSumRow[tmpCountRow<5] = 0
# points(jitter(mybins), tmpSumRow/(tmpCountRow+0.00000001), col=color)
tmpMat = rbind(tmpMat, tmpSumRow/(tmpCountRow+0.00000001))
}
colnames(tmpMat) = as.character(mybins)
if (case==1){
boxplot(as.data.frame(tmpMat), col=colors[case],ylim=c(0,0.5), main="Error rate when wrongly infer genotype */*")
} else {
boxplot(as.data.frame(tmpMat), col=colors[case], add=T, axes =F)
}
# plottingObj = colMeans(tmpMat)
# plottingObj[plottingObj==0] = NaN
# if (case==1){
# plot( mybins, plottingObj, col=colors[case],ylim=c(0,0.5), main="Error rate when wrongly infer genotype */*", type="l")
# } else {
# lines(mybins, plottingObj, col=colors[case])
# }
case = case+1
}
legend("topright", legend=c("HB3/7G8",eventsType), fill=c(rgb(0,0,0,0),colors), border=c("white", rep("black",4)), cex=1.5)
# case = 1
# colors = c(rgb(1,0,0,0.3), rgb(1,1,0,.3), rgb(0,1,0,0.3), rgb(0,0,1,0.3))
# for ( event in eventsType ){
# tmpCount = eventCountMat[eventArray == event,]
# tmpMat = c()
# for ( i in 1:dim(tmpCount)[1]){
# tmpCountRow = tmpCount[i,]
# tmpMat = rbind(tmpMat, tmpCountRow+1)
# }
# colnames(tmpMat) = as.character(mybins)
# if (case==1){
# boxplot(as.data.frame(tmpMat), col=colors[case], log="y", main="Error rate when wrongly infer genotype */*")
# } else {
# boxplot(as.data.frame(tmpMat), col=colors[case], log="y", add=T, axes =F)
# }
# case = case+1
# }
# legend("topright", legend=c("HB3/7G8",eventsType), fill=c(rgb(0,0,0,0),colors), border=c("white", rep("black",4)), cex=1.5)
hist(totalCov, breaks=mybins, ylim = c(0, 3500), col = rgb(1,0,0,0.5), xlab = "Total coverage", main="Histogram of coverage" )
#obj = mpileAlt[,3][mpileAlt[,3]>0]
# hist(coverage$altCount, breaks=seq(0, ceiling(max(coverage$altCount)/spacing)*spacing, by = spacing), add = T, col = rgb(0,0,1,0.5))
# legend("topright", c("Total coverage", "Alt count"), fill=c(rgb(1,0,0,0.5), rgb(0,0,1,0.5)), cex=1.5)
dev.off()
}
}
}
|
485cd99929c393a3c903d61dc2d21ca0d7fc30fa
|
ae6bf5c9facfbfc14ad6438cc238db86c41557e8
|
/return_est.R
|
b25283098a0f2f2d0666897d1a729f8bee7e6f59
|
[] |
no_license
|
kecolson/simulator_functions_current
|
c7e3dfc05937b65285492dbb5386e6d8626a13a5
|
36582cb59d6ac6e619bdcce4d88b39c43fcbe8c7
|
refs/heads/master
| 2021-03-12T21:20:04.363864
| 2015-03-12T22:06:54
| 2015-03-12T22:06:54
| 32,105,425
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 525
|
r
|
return_est.R
|
# return_est.R
# Function: return_est
# Input: Risk of outcome in exposed (EY1)
# Risk of outcome in unexposed (EY0)
# Desired metric (metric)
# Output: Estimate (est)
return_est <- function(EY1, EY0, metric) {
# Risk difference
if (metric == "rd") {
est <- EY1 - EY0
return(est)
# Relative risk
} else if (metric == "rr") {
est <- EY1/EY0
return(est)
# Odds ratio
} else if (metric == "or") {
est <- (EY1/(1-EY1)) / (EY0/(1-EY0))
return(est)
}
}
|
7ce105fb57e864839a262629fceb1a102169c01d
|
436a5912af879ae72490d2009010aecf8c2406bf
|
/plot4.R
|
5774cb70b8a0ddc1ea28370690478d2f69fe69e5
|
[] |
no_license
|
houlad/ExData_Plotting1
|
1e915b6e8f5edb1c3f145c5a7300d0b53e629b43
|
ad323913cebb4b34afcd8aeb9665acb142797ef5
|
refs/heads/master
| 2021-05-26T18:43:53.773419
| 2014-06-04T14:24:35
| 2014-06-04T14:24:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,390
|
r
|
plot4.R
|
##Reading full dataset into R
fullpower <- read.table(file = "./household_power_consumption.txt", sep = ";",
header = TRUE, na.strings = "?", stringsAsFactors = FALSE)
fullpower$datetime <- paste(fullpower$Date, fullpower$Time)
fullpower$datetime <- strptime(fullpower$datetime, format = "%d/%m/%Y %H:%M:%S")
subsetpower <- subset(fullpower, datetime >=as.POSIXlt("2007-02-01 00:00:00") &
datetime < as.POSIXlt("2007-02-03 00:00:00"))
##Creating plot 4 and saving in file
png(file="plot4.png")
par(mfrow = c(2, 2))
with(subsetpower, {
plot(subsetpower$datetime, subsetpower$Global_active_power, type = "l", xlab="", ylab = "Global Active Power")
plot(subsetpower$datetime, subsetpower$Voltage, type = "l", xlab= "datetime", ylab = "Voltage")
with(subsetpower, plot(subsetpower$datetime, subsetpower$Sub_metering_1, type ="l", xlab= "", ylab="Energy sub metering"))
lines(subsetpower$datetime, subsetpower$Sub_metering_2, col = "red")
lines(subsetpower$datetime, subsetpower$Sub_metering_3, col = "blue")
legend("topright", bty = "n", lty=c(1, 1, 1), col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(subsetpower$datetime, subsetpower$Global_reactive_power, type = "l", xlab= "datetime", ylab= "Global_reactive_power")
})
dev.off()
|
689247c30c584993ac9796d8a1c233f0806eaf73
|
d7393207913e0e55f4d3543ce9a4fac9eba72a68
|
/tests/testthat/test-sitrep.R
|
f359d54407e505ed8c4f8fbbeed535538c4f99f6
|
[
"MIT"
] |
permissive
|
r-lib/devtools
|
52b2dfccac32a13e6e80c2f6b4a745d6f3c23032
|
163c3f24ef2b2828b738c281b89d66e756f048f7
|
refs/heads/main
| 2023-08-08T21:15:00.328365
| 2023-08-07T16:26:14
| 2023-08-07T16:26:14
| 643,909
| 850
| 366
|
NOASSERTION
| 2023-08-07T16:26:16
| 2010-05-03T04:08:49
|
R
|
UTF-8
|
R
| false
| false
| 1,721
|
r
|
test-sitrep.R
|
test_that("check_for_rstudio_updates", {
skip_if_offline()
skip_on_cran()
# the IDE ends up calling this with `os = "mac"` on macOS, but we would send
# "darwin" in that case, so I test with "darwin"
# also mix in some "windows"
# returns nothing rstudio not available
expect_null(check_for_rstudio_updates("darwin", "1.0.0", FALSE))
# returns nothing if the version is ahead of the current version
expect_null(check_for_rstudio_updates("windows", "2030.12.0+123", TRUE))
# returns something if ...
local_edition(3)
scrub_current_version <- function(message) {
sub("(?<=^RStudio )[0-9\\.\\+]+", "{VERSION}", message, perl = TRUE)
}
# version is not understood by the service
expect_snapshot(
writeLines(check_for_rstudio_updates("windows", "haha-no-wut", TRUE))
)
# version is behind the current version
# truly ancient
expect_snapshot(
writeLines(check_for_rstudio_updates("darwin", "0.0.1", TRUE)),
transform = scrub_current_version
)
# Juliet Rose, does not have long_version, last before numbering changed
expect_snapshot(
writeLines(check_for_rstudio_updates("windows", "1.4.1717", TRUE)),
transform = scrub_current_version
)
# new scheme, introduced 2021-08
# YYYY.MM.<patch>[-(daily|preview)]+<build number>[.pro<pro suffix>]
# YYY.MM is th expected date of release for dailies and previews
# an out-of-date preview
expect_snapshot(
writeLines(check_for_rstudio_updates("darwin", "2021.09.1+372", TRUE)),
transform = scrub_current_version
)
# an out-of-date daily
expect_snapshot(
writeLines(check_for_rstudio_updates("windows", "2021.09.0-daily+328", TRUE)),
transform = scrub_current_version
)
})
|
d8a8c04f8d803a23f50722a37268e0defae38588
|
4a292e34ae6a0fdca63ab07b7b0ccc3f57bdc566
|
/man/col_elev.Rd
|
ddc126ba3faa332caefd15c314bea910a26ccac8
|
[
"MIT"
] |
permissive
|
mikejohnson51/HydroData
|
c122ba9d559e46bf2c58db609cff43411d7922d9
|
6b7f08656d7299bca3a544652cb69ae064049d41
|
refs/heads/master
| 2021-07-09T13:51:37.180796
| 2019-01-17T06:36:21
| 2019-01-17T06:36:21
| 112,221,493
| 38
| 9
|
MIT
| 2018-11-29T14:50:59
| 2017-11-27T16:39:08
|
R
|
UTF-8
|
R
| false
| true
| 407
|
rd
|
col_elev.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/color.palletes.R
\docType{data}
\name{col_elev}
\alias{col_elev}
\title{Color Palletes Usefull for NLCD, CDL and NED plotting}
\format{An object of class \code{character} of length 23.}
\usage{
col_elev
}
\description{
Color pallets for visualizing land use and elevation raster data
}
\author{
Mike Johnson
}
\keyword{datasets}
|
efc1bbf8a8416865c921b7aa40b468d13a4b0710
|
8a76480b897eeac91a863f1a1ca17ee5f9c86f9f
|
/transect/scripts/Fig_3_explain_var/permanova_site_year_panel_b.R
|
148e1894e7f3eca437ac0563c1d341491a0caed5
|
[] |
no_license
|
eot3/European-Root-Suppl
|
687c1e31d8dfd01b989ee0aaa78bece6374f789e
|
eeb9a13f42329454812e0135558219ae962660dc
|
refs/heads/master
| 2022-03-31T10:27:12.400806
| 2020-01-06T09:48:13
| 2020-01-06T09:48:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,477
|
r
|
permanova_site_year_panel_b.R
|
###
### nned to be called separately for each dataset (bacteria, fungi and oomycetes)
###
### need library("vegan")
library("vegan")
### Load data ###
mat.dist=read.table("../../oomycetal_data/bray_curtis_otu_table_norm.txt", h=T, sep="\t")
metadata=read.table("oomycete_table.txt", h=T)
### looping on the fractions to test the effect of site and year
fraction<- c("Soil","RS","RP", "Root")
for (f in 1:4){
fac=fraction[f]
result<-matrix(ncol=4,nrow=5)
tab<-paste("t", fac, sep="")
pat<-paste(fac, "(1|2|3|4|)(P|S|)", sep = ".")
aa_pat=grep(pat,rownames(mat.dist))
aa_inter=intersect(rownames(mat.dist[aa_pat,]),rownames(metadata))
res=adonis(formula= mat.dist[aa_inter,aa_inter] ~ as.vector(metadata[aa_inter,"Site"])*as.vector(metadata[aa_inter,"Experiment"]), data=as.data.frame(metadata[aa_inter,]), permutations = 9999)
for (e in 1:5){
result[e,1]<-res$aov.tab$Df[e]
result[e,2]<-res$aov.tab$F.Model[e]
result[e,3]<-res$aov.tab$`Pr(>F)`[e]
result[e,4]<-res$aov.tab$R2[e]
}
assign(tab, result)
}
result_bac=cbind(tSoil,tRS,tRP,tRoot)
colnames(result_bac)=c("Soil_Df","Soil_F","Soil_Pr","Soil_R2","RS_Df", "RS_F","RS_Pr", "RS_R2", "RP_Df","RP_F", "RP_Pr","RP_R2","Root_Df","Root_F","Root_Pr","Root_R2")
row.names(result_bac)=c("Site", "Year", "Site:Year", "Residuals", "Total")
result_bac
###
###write.table(result_bac,"year_site_perFraction_oomyc_zotus.txt")
|
a3f6951d1e4fa98531fea7b7e8aa059150b50bd5
|
28188f0e2c1458c511476770b2e46feec3d4ea90
|
/system/initialization.R
|
c4b7864f0bb20c0f25e813034cc3e3b19da6ce29
|
[] |
no_license
|
complexitycbs/complex_network_analysis
|
9ac72a34f4d465215d16eba209dd240bce0aa3b3
|
8862bb99b8f40f9889309671e4afd4fbc7955331
|
refs/heads/master
| 2020-03-28T01:36:27.747915
| 2018-08-24T10:42:25
| 2018-08-24T10:42:25
| 147,516,692
| 0
| 0
| null | 2018-09-05T12:48:20
| 2018-09-05T12:48:20
| null |
UTF-8
|
R
| false
| false
| 480
|
r
|
initialization.R
|
# Used packages
suppressPackageStartupMessages({
# Graph calculations and parallel computing
library(igraph)
library(parallel)
library(doParallel)
# Data manipulation and mutating into usable node- and edgelists
library(tidyverse)
library(magrittr)
# Import of data from the internet and basic cleaning
library(stringdist)
library(rvest)
library(xml2)
library(tnet)
# Plotting some networks on maps
library(ggmap)
library(mapproj)
})
|
79b188883f187a5f3ee743f7c6de30fc084e0c98
|
747747c3914bf23746edfe29d7f1db4b481d2b54
|
/print.R
|
f04de67d92573a69a67716a682ca57bf5005c90a
|
[] |
no_license
|
beoing/MATH345-Metapopulations
|
52f1227f3dec585c89c98e9985757d34441d8350
|
70f8a521e04f7960f54c1593303ef98882337e11
|
refs/heads/master
| 2021-03-08T19:24:21.664228
| 2016-04-29T23:42:44
| 2016-04-29T23:42:44
| 57,005,535
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,574
|
r
|
print.R
|
print_model <- function(sm, t){
print(sprintf("The model at time t = %d is: ", t))
print(sprintf(" -------- --------"))
print(sprintf(" |%06s| |%06s|", strtrim(sm[4, t+1],6), strtrim(sm[5, t+1],6)))
print(sprintf(" |------|=========|------|"))
print(sprintf(" |%06s| |%06s|", strtrim(sm[9, t+1],6), strtrim(sm[10, t+1],6)))
print(sprintf(" -------- --------"))
print(sprintf(" // || "))
print(sprintf(" // ^ ^ || "))
print(sprintf(" // ^ ^ || "))
print(sprintf(" // ^ ^ ^ || "))
print(sprintf(" // ^ ^ ^ || "))
print(sprintf("-------- ^ ^ ^ || "))
print(sprintf("|%06s| ^ ^ ^ || ", strtrim(sm[2, t+1],6)))
print(sprintf("|------| ^ ^ ^ || "))
print(sprintf("|%06s| ^ ^ ^ || ", strtrim(sm[7, t+1],6)))
print(sprintf("-------- ^ ^ ^ || "))
print(sprintf(" || ^ ^ || "))
print(sprintf(" || ^ ^ || "))
print(sprintf(" || ^ || "))
print(sprintf(" || ^ ^ || "))
print(sprintf(" || ^ || "))
print(sprintf("-------- --------"))
print(sprintf("|%06s| |%06s|", strtrim(sm[1, t+1],6), strtrim(sm[3, t+1],6)))
print(sprintf("|------|==============|------|"))
print(sprintf("|%06s| |%06s|", strtrim(sm[6, t+1],6), strtrim(sm[7, t+1],6)))
print(sprintf("-------- --------"))
}
|
a41db05034ef6d71e432b6d93c92a8783d62592d
|
faf4e2a563162e96b93800dac3bb1ff2e4c86e29
|
/R/packages/GloCR/man/getWthDB.Rd
|
0e4122e0ae7d07340f7f07685da76484ba507ac0
|
[] |
no_license
|
jasonjb82/GloC
|
4f600bbb82f4988ec8132301fdac9d9039e28377
|
b8b6fd54336be663ac6614fec14170894becfec0
|
refs/heads/master
| 2020-04-15T21:11:48.508428
| 2015-01-27T03:36:52
| 2015-01-27T03:36:52
| 32,000,110
| 1
| 1
| null | 2015-03-11T05:31:05
| 2015-03-11T05:31:04
|
R
|
UTF-8
|
R
| false
| false
| 996
|
rd
|
getWthDB.Rd
|
\name{Get weater data}
\alias{AccessGetWthXY}
\alias{AccessGetWthCell}
\alias{DBgetWthXY}
\alias{DBgetWthCell}
\alias{DBgetWthCellNoDSN}
\title{ Get weather data }
\description{
Get weather data from a database
}
\usage{
AccessGetWthXY(database, table, lon, lat, rst=raster())
AccessGetWthCell(database, table, cell)
DBgetWthXY(database, table, lon, lat, rst=raster())
DBgetWthCell(database, table, cell, verbose=FALSE)
DBgetWthCellNoDSN(table, cell, user, pwd, driver="MySQL ODBC 5.1 Driver", server="geo.irri.org", database="nasa")
}
\arguments{
\item{lon}{ Latitude }
\item{lat}{ Longitude }
\item{start}{ First date }
\item{end}{ Last date }
\item{database}{ }
\item{table}{ }
\item{rst}{ }
\item{cell}{ }
\item{user}{ }
\item{pwd}{ }
\item{driver}{ }
\item{server}{ }
\item{verbose}{ }
}
\value{
An object of class 'weather'
}
\author{ Robert J. Hijmans and JOrrel Aunario }
|
c5af07966752eb24e116844b0dd375b75e685766
|
495ef44f953ccc4cc66eb9882b83cc60b8b61682
|
/cachematrix.R
|
a345077c687ff7951329eab1e46644cf07dd5964
|
[] |
no_license
|
bernardnk/ProgrammingAssignment2
|
484db1c698433e9b26926de77a14704458511ece
|
04f01acaa61f372005692219cc0e6e1dd35a8d30
|
refs/heads/master
| 2021-01-21T20:17:04.258971
| 2014-11-17T03:42:43
| 2014-11-17T03:42:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,167
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## These functions display the basis to build a "dictionary" to provide
## fast lookups based on "keys" (the value used to search) to obtain
## the output values (here, a computed inverse matrix) which is only
## computed once. Dictionary are commonly used in other object oriented
## languages to look up data in a cache rather than recompute them.
## Write a short comment describing this function
## makeCacheMatrix creates an object to store:
## (1) the key, here matrix x,
## (2) the computed inverse matrix s
## (3) access functions to get and set matrix x and inverse matrix s
## Note that value x and s will be manipulated outside of the current
## environment with the help of the <<- operator.
makeCacheMatrix <- function(x = matrix()) {
s <- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
get <- function() x
setsolve <- function(solve) s <<- solve
getsolve <- function() s
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## Write a short comment describing this function
## cacheSolve is a function that uses the object created by makeCacheMatrix
## and determines if the computed value s (the inverse of matrix x) is cached,
## or needs to be computed. The output of function cacheSolve is the the
## inverse of matrix x.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
s <- x$getsolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data, ...)
x$setsolve(s)
s
}
## Sample test code
## x <- matrix(c(2, 0, 4, 1, 3, 1, 1, -1, 2), nrow=3, ncol=3) ##create matrix x
## z <- makeCacheMatrix(x) ## store matrix x in the cache
## z$get() ## read matrix x, just to see that it is cached.
## cacheSolve(z) ## first time, inverse matrix is computed and stored in cache.
## cacheSolve(z) ## second time, the inverse matrix is read from the cache.
|
dca1bc419a8d72385a2a1e551cd48771cf4fda9a
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Miller-Marin/fpu/fpu-01Xh-error02-uniform-depth-7/fpu-01Xh-error02-uniform-depth-7.R
|
2e229b462e32fccfcc4c7f60626ff3eafc815b0d
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 87
|
r
|
fpu-01Xh-error02-uniform-depth-7.R
|
a0dfb5e02dfde29a8f4fa460c144939e fpu-01Xh-error02-uniform-depth-7.qdimacs 205681 547528
|
f23fcd175666b5517ff71ffb6a94927b0bd4e751
|
6f66fb70a11cb88629f227dba8a913ba3ffccdaa
|
/calculations.R
|
4a7b45cf86c2cac191757bb7a42ab3942eb04117
|
[] |
no_license
|
grothjd/RepData_PeerAssessment1
|
5945002cc7538cd5b2a5f552a9410acd5d9a870b
|
98331ad79f9cb5e137a0e4f9a3a5f7e0ee9f3c8f
|
refs/heads/master
| 2021-01-15T09:53:00.908831
| 2015-07-19T23:22:52
| 2015-07-19T23:22:52
| 39,325,644
| 0
| 0
| null | 2015-07-19T07:08:31
| 2015-07-19T07:08:31
| null |
UTF-8
|
R
| false
| false
| 1,536
|
r
|
calculations.R
|
activity <- read.csv("activity.csv")
day_of_week <- weekdays(as.Date(activity$date))
number_of_steps <- sapply(split(activity$steps, activity$date), sum, na.rm=TRUE)
hist(number_of_steps)
mean_number_of_steps <- mean(number_of_steps)
median_number_of_steps <- median(number_of_steps)
print("mean number of steps per day:")
print(mean_number_of_steps)
print("median number of steps per day:")
print(median_number_of_steps)
y <- sapply(split(activity$steps, activity$interval), mean, na.rm=TRUE)
x <- names(y)
plot(x,y, type="l")
title(main="Average steps per 5min interval",xlab="Interval number", ylab="Average number of steps")
x[y==max(y)]
NA_index <- is.na(activity$steps)
num_NA <- sum(NA_index)
median_steps <- sapply(split(activity$steps, activity$interval), median, na.rm=TRUE)
for(i in 1:length(activity$steps)){
if(NA_index[i]){
activity_noNA[i] <- unname(median_steps[activity$interval[i]==names(min_steps)])
}else{
activity_noNA[i] <- activity$steps[i]
}
}
y <- sapply(split(activity_noNA, activity$date), sum, na.rm=TRUE)
hist(y)
title(main="Histogram of total number of steps taken in a day", xlab="total number of steps", ylab="counts")
mean_steps <- mean(y)
meadian_steps <- median(y)
print("mean number of steps per day:")
print(mean_steps)
print("median number of steps per day:")
print(median_steps)
is.weekday <- function(x){
y <- NULL
for(i in 1:length(x)){
if(x[i]=="Sunday" || x[i]=="Saturday"){
y <- c(y, FALSE)
}else{
y <- c(y,TRUE)
}
}
y
}
z <- is.weekday(day_of_week)
|
8df3c689b8248f4c1b486322ce3577ddd746cc8a
|
e5fc120f866933943a29c796c7c607dc2690cab3
|
/analysis/get_climate/climate_anomaly_in_degreesC.R
|
a0470091f69fabb38f926fdd610021345154db2e
|
[] |
no_license
|
AldoCompagnoni/lupine
|
e07054e7e382590d5fa022a23e024dfec80c80b2
|
afc41a2b66c785957db25583f25431bb519dc7ec
|
refs/heads/master
| 2021-06-23T04:50:30.617943
| 2021-06-11T13:00:59
| 2021-06-11T13:00:59
| 185,047,698
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 751
|
r
|
climate_anomaly_in_degreesC.R
|
# What is climate anomaly in terms of degrees C?
rm(list=ls())
source("analysis/format_data/format_functions.R")
options(stringsAsFactors = F)
library(dplyr)
library(tidyr)
# get climate data
clim <- read.csv("data/prism_point_reyes_87_18.csv")
# calculate ABSOLUTE Celsius yearly temperatures
mean_t_df <- tmp_mat %>%
group_by( year) %>%
summarise( mean_t = mean(clim_value) ) %>%
ungroup
# calculate standardized (z) and absolute anomalies
t_anom_df <- data.frame( t_abs = mean_t_df[,2,drop=T],
t_anom_z = mean_t_df[,2,drop=T] %>% scale %>% .[,1] ) %>%
mutate( t_anom_a = t_abs - mean(mean_t_df[,2,drop=T]) )
# verdict: anomaly of 1 == 0.6 Celsius!
|
1b32dee9af25fa2164c65cacd4cb23f149c7da91
|
9be405f871051b5e7f2269128750c8d7726d4d84
|
/shiny/textInput/ui.R
|
df70f7120aa4960777418d789fc84fd4cccc99d5
|
[] |
no_license
|
ChanningC12/Machine-Learning-with-R
|
aeee0b02566073a1f27a7368af33c95c0efedbe8
|
b1edfb60e16d95956194988f0c299d44b534b7aa
|
refs/heads/master
| 2020-04-15T12:47:02.641542
| 2016-12-02T03:28:57
| 2016-12-02T03:28:57
| 64,176,214
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 627
|
r
|
ui.R
|
library(shiny)
# Define UI for the shiny application
shinyUI(fluidPage(
# Application title
titlePanel(title = "Demonstration of textInput widget in shiny"),
sidebarLayout(
# Sidebar panel
sidebarPanel(("Enter the personal information"),
textInput("name","Enter your name",""),
textInput("age","Enter your age","")),
# Main Panel
mainPanel(("Personal Information"),
textOutput("myname"),
textOutput("myage"))
)
)
)
|
aaeb55ca0dfa52a4613638013fc64e9efeecf5f2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/popEpi/examples/ltable.Rd.R
|
8f7ad8eedb92f2ec77dbea214e54e510a4a89d6e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,246
|
r
|
ltable.Rd.R
|
library(popEpi)
### Name: ltable
### Title: Tabulate Counts and Other Functions by Multiple Variables into a
### Long-Format Table
### Aliases: ltable expr.by.cj
### ** Examples
data("sire", package = "popEpi")
sr <- sire
sr$agegroup <- cut(sr$dg_age, breaks=c(0,45,60,75,85,Inf))
## counts by default
ltable(sr, "agegroup")
## any expression can be given
ltable(sr, "agegroup", list(mage = mean(dg_age)))
ltable(sr, "agegroup", list(mage = mean(dg_age), vage = var(dg_age)))
## also returns levels where there are zero rows (expressions as NA)
ltable(sr, "agegroup", list(obs = .N,
minage = min(dg_age),
maxage = max(dg_age)),
subset = dg_age < 85)
#### expr.by.cj
expr.by.cj(sr, "agegroup")
## any arbitrary expression can be given
expr.by.cj(sr, "agegroup", list(mage = mean(dg_age)))
expr.by.cj(sr, "agegroup", list(mage = mean(dg_age), vage = var(dg_age)))
## only uses levels of by.vars present in data
expr.by.cj(sr, "agegroup", list(mage = mean(dg_age), vage = var(dg_age)),
subset = dg_age < 70)
## .SDcols trick
expr.by.cj(sr, "agegroup", lapply(.SD, mean),
subset = dg_age < 70, .SDcols = c("dg_age", "status"))
|
4eb6a6c5eeb8f1e8b79128fc31394d7905ea6b0a
|
1c5577882d16d88bbd18a4fb0819d987ac46ba5d
|
/Session1_HW/Hello world.R
|
1a043a1172b235ae87e7400a90b5303f32e8cc16
|
[] |
no_license
|
paulpobouche/Github_paul
|
a67fa053307f323831a627ed7018d5e0bbc593f6
|
452b359e68030f9b612e31c737aec98dc9e841f1
|
refs/heads/master
| 2020-04-15T09:41:24.538930
| 2016-10-23T16:00:14
| 2016-10-23T16:00:14
| 68,100,282
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 416
|
r
|
Hello world.R
|
#Hello World
#Paul Bouche, 14/09/2016
#There are several ways to print "Hello world!"
#The first option is to use the basic R function "print"
print("Hello World !")
#However there is also a more complete way to do it, thanks to a function which
#prints every chain of 3 chains of characters that I want
fun<-function(a, b, char){
m<- paste(a, b, char)
return(print(m))
}
fun("Hello","World", "!")
|
c1e6bc6b19a34fdb3f0db58255f75e9cb11826ba
|
77060598839a1d34c29025cbfd5d1c2e50eef3b0
|
/GLM 1 Blank/5 Linear Regression Diagnostics/5_Linear Regression Diagnostics/5_Linear_Regression_Diagnostics.R
|
c96d3058280d3f5ff7488b712f28cda819232765
|
[] |
no_license
|
seanxduan/GLM-1-Fall-2020
|
d34ec5a607a19a37cef9b1009360c0e8f8d1f88b
|
0d25ce697570c9436e3d2fca0b7f9ccf3d156660
|
refs/heads/master
| 2023-01-19T18:25:35.915302
| 2020-11-30T23:20:44
| 2020-11-30T23:20:44
| 289,981,482
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,900
|
r
|
5_Linear_Regression_Diagnostics.R
|
###################################
## Linear Regression Diagnostics ##
## Kyle Ripley ##
## 9/6/19 ##
###################################
# Packages needed:
install.packages("olsrr")
library(olsrr)
library(tidyverse)
library(ggplot2)
# Data used:
dat <- read_tsv("https://raw.githubusercontent.com/RipleyKyleR/public_data_files/master/Album%20Sales%202.dat")
# In this dataset, each row represents an album with the following variables:
# adverts - amount (in thousands of dollars) spent promoting the album before release
# sales - sales of the album (in thousands) the week after release
# airplay - number of times songs from the album played on the radio the week prior to release
# attract - attractiveness of the band on a scale from 0 (hideous potato-heads) to 10 (gorgeous sex objects)
#############
# Our Model #
#############
# Because we're focusing on simple one predictor regressions right now,
# we'll create a model that has album sales as the outcome and money
# spent on advertisements as the predictor.
mAdvert <- lm(sales ~ adverts, data = dat)
summary(mAdvert)
############
# Outliers #
############
# You've already learned some ways to identify outliers in your
# data during the lesson Tidying Data. However, now we will look
# at how to identify our outliers visually through scatterplots
# - which may be a bit easier to do.
# I'll also take this opportunity to gently introduce you to the
# ggplot2 package from the tidyverse.
?ggplot
ggplot(data = dat, mapping = aes(x = adverts, y = sales)) +
geom_point() +
geom_smooth(method = "lm", se = TRUE)
# Do you see any points that you think might be outliers?
############
# Leverage #
############
# Leverage values are useful for identifying abberant values,
# and we have a few ways to easily obtain and visualize
# those values in R.
# First, let's create a new variable in our data frame
# that contains the leverage value for each album.
dat$lev <- hat(model.matrix(mAdvert))
# We can then make a simple plot of these lever values
# to see if any observations have a much larger value
# than the other observations.
plot(dat$lev)
# Looking at the plot, we can see that we have a few
# observations above .025 (a subjective value for this
# data that doesn't translate to other data). Let's
# find out which observations those are.
ind <- which(dat$lev > 0.025)
dat[ind,]
#################################
# Studentized Deleted Residuals #
#################################
# We can also use studentized deleted residuals to get an
# idea of influential observations in our data.
dat$stud_del_resid <-rstudent(mAdvert)
# We can get a handy plot of these values with the
# `ols_plot_resid_stud()` function.
ols_plot_resid_stud_fit(mAdvert)
# If we wish to make this plot without the default diagnostics,
# we can pull the necessary values and plot it ourselves.
# NOTE: we already pulled the studentized deleted residuals
dat$predicted <- predict(mAdvert)
plot(dat$predicted,dat$stud_del_resid)
#################
# A Useful Plot #
#################
# We also have a plot that will look at both leverage
# and studentized deleted residuals.
ols_plot_resid_lev(mAdvert)
###################
# Cook's Distance #
###################
# Cook's distance is also a useful diagnostic for our data.
# We can use these values much like we've used the previous.
dat$cooks_d <- cooks.distance(mAdvert)
plot(dat$cooks_d, ylab = "Cook's Distance")
# But we also have more useful plots.
ols_plot_cooksd_bar(mAdvert)
#################################
# Non-Normal Error Distribution #
#################################
# It's also a good idea to get an idea of the structure
# of your distribution of errors. We can do this easily
# by making a qq plot.
qqnorm(mAdvert$res)
qqline(mAdvert$res)
# Ideally, you want the errors to fall on the line.
#################################
# Homogeneity of Error Variance #
#################################
# You also want to check that the error variances
# of your data are homogeneous.
# Will again go back to using ggplot2 for this graph.
ggplot(data = dat, mapping = aes(x = predicted, y = stud_del_resid)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE)
############################
# Removing Abberant Values #
############################
# You very rarely ever want to TRULY delete data. We can get around
# this by just creating a new dataframe that doesn’t include the
# cases we choose to delete.
# This can be helpful in determining if removing these values
# has a significant impact on your analyses.
dat_no_infl <-dat[-c(169, 184),]
###############################
# Bonus Material for HW/Test? #
###############################
plot(dat$attract, jitter(dat$sales, 3) )
dat$adverts
summary(dat$adverts)
dat$meanc_adverts<-dat$adverts-mean(dat$adverts)
|
9436bbc1c66b7b4f30b5f89e70af7617695edfab
|
f19c893ff20f4595d0a255d37e4d5dac9452f13a
|
/scratch.R
|
81697d0e3ce10e697fe790c4a7936fe509eb2ae5
|
[] |
no_license
|
ramnathv/dataviz
|
80d3ab1aa68d92181902cd3058cc45abb850af58
|
30115b1fb589e8fa4a62a978e728cde85670875e
|
refs/heads/master
| 2021-01-18T07:44:59.858007
| 2011-03-10T21:43:47
| 2011-03-10T21:43:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,143
|
r
|
scratch.R
|
source("queryAPI.R")
library(plyr)
library(ggplot2)
load("scratch.RData")
# Scratchwork just to play with the data
# Data ranges from 1984-2010
# Most granular level of data
budgetAcct <- getBudgetAccount()
# Aggregate spending levels
budgetAgg <- getBudgetAggregate()
receiptAcct <- getReceiptAccount()
receiptAgg <- getReceiptAggregate()
# Find uniques:
# account, agency, bureau
acct.unique <- unique(budgetAcct["account"]) # 1580 unique vals
agency.unique <- unique(budgetAcct["agency"]) # 26 unique vals
bureau.unique <- unique(budgetAcct["bureau"]) # 326 unique vals
# function, subfunction
fn.unique <- unique(budgetAcct["function"])
subfn.unique <- unique(budgetAcct["subfunction"])
# accountID, agencyID, bureauID, functionID, subfunctionID
acct.id.unique <- unique(budgetAcct["accountID"])
agency.id.unique <- unique(budgetAcct["agencyID"])
bureau.id.unique <- unique(budgetAcct["bureauID"])
fn.id.unique <- unique(budgetAcct["function"])
subfn.id.unique <- unique(budgetAcct["subfunction"])
# category, subcategory
cat.unique <- unique(receiptAcct["category"])
subcat.unique <- unique(receiptAcct["subcategory"])
# Get pop/GDP/debt/inflation
population <- getPopulation(startYear=1984, endYear=2010)
GDP <- getGDP(startYear=1984, endYear=2010)
debt <- getDebt(startYear=1984, endYear=2010)
inflation <- getInflation(startYear=1984, endYear=2010)
# getTaxRates() function is broken. Need to split it into 2 parts.
# 84-93 has 11 columns, 94-10 has 10 columns
taxRates.84.93 <- getTaxRates(startYear=1984, endYear=1993, type=0)
taxRates.84.93 <- rbind(taxRates.84.93,getTaxRates(startYear=1984,endYear=1993,type=1))
taxRates.84.93 <- rbind(taxRates.84.93,getTaxRates(startYear=1984,endYear=1993,type=2))
taxRates.84.93 <- rbind(taxRates.84.93,getTaxRates(startYear=1984,endYear=1993,type=3))
taxRates.94.10 <- getTaxRates(startYear=1994, endYear=2010, type=0)
taxRates.94.10 <- rbind(taxRates.94.10,getTaxRates(startYear=1994,endYear=2010,type=1))
taxRates.94.10 <- rbind(taxRates.94.10,getTaxRates(startYear=1994,endYear=2010,type=2))
taxRates.94.10 <- rbind(taxRates.94.10,getTaxRates(startYear=1994,endYear=2010,type=3))
|
7a0b64bf54b9aca5d001b511308de30259418710
|
0926205326d5c3654e7e49069591b592fcd42154
|
/cachematrix.R
|
703f0589435209c6fbb90d2c331af160d0a2021d
|
[] |
no_license
|
DevaranoroZ/ProgrammingAssignment2
|
ceca8559eb111d6f36078d57b9620a6e3827fc5b
|
f70cb0d104c7a0ee5c9e2111632116932f9bf227
|
refs/heads/master
| 2021-01-21T20:52:51.957080
| 2016-10-05T05:01:51
| 2016-10-05T05:01:51
| 70,026,279
| 0
| 0
| null | 2016-10-05T05:01:51
| 2016-10-05T03:09:23
|
R
|
UTF-8
|
R
| false
| false
| 1,216
|
r
|
cachematrix.R
|
## Computing the inverse of a matrix and storing it for later use to save
## comutational effort and time.
## makeCacheMatrix - Creates a list of functions to set and get a matrix and set
## the inverse to a matrix and get the cached value of the inverse, if already
## computed
makeCacheMatrix <- function(x = matrix()) {
i <- matrix(nrow = dim(x)[1], ncol = dim(x)[2])
set <- function(y) {
x <<- y
i <<- matrix(nrow = dim(x)[1], ncol = dim(x)[2])
}
get <- function() x
setInverse <- function(inverse) i <<- inverse
getInverse <- function() i
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve - Gets the inverse of amtrix using the list from makeCacheMatrix.
## It first checks if inverse is already computed in which case the cached value
## of the iverse is called. Else, inverse is computed and stores that value in
## cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getInverse()
if(!anyNA(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setInverse(i)
i
}
|
fe4c4f956914eed2bced1a11ff15538d240bc482
|
cb0c33baa61edcbc6bfd8610d65a1e4d43bff6d1
|
/R/method.R
|
344c7a9f8275ca9e099e9fa4d9f99465e8d5632d
|
[
"MIT"
] |
permissive
|
mustafaascha/OOP-WG
|
3e5aef8b0edacbcf0549b501b3c7776fa9a65df3
|
6dc2c4ee9519dc0041ade6fa66849aed662ec651
|
refs/heads/master
| 2023-04-02T00:40:41.395196
| 2021-03-23T19:25:36
| 2021-03-23T19:25:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,138
|
r
|
method.R
|
#' Retrieve or register an R7 method for a generic
#'
#' @param generic The generic to retrieve or register
#' @param signature The method signature
#' @param method,value The new function to use as the method.
#' @importFrom utils getS3method
#' @export
method <- function(generic, signature) {
method_impl(generic, signature, ignore = NULL)
}
method_impl <- function(generic, signature, ignore) {
out <- .Call(method_, generic, signature, ignore)
if (is.null(out)) {
# If no R7 method is found, see if there are any S3 methods registered
if (inherits(generic, "R7_generic")) {
args <- generic@signature
generic <- generic@name
} else {
generic <- find_function_name(generic, topenv(environment(generic)))
args <- args(formals(generic))
}
args <- args[names(args) != "..."]
out <- getS3method(generic, signature[[1]][[1]], optional = TRUE)
# If no method found check if the generic has a default method
out <- getS3method(generic, "default", optional = TRUE)
}
if (is.null(out)) {
stop(sprintf("Can't find method for generic '%s' with arguments of type:\n%s", generic, paste0("- ", names(args), ": ", vcapply(signature, paste0, collapse = ", "), collapse = "\n"), call. = FALSE))
}
out
}
find_function_name <- function(x, env) {
nms <- ls(env, all.names = TRUE, sorted = FALSE)
for (name in nms) {
if (identical(get0(name, envir = env, mode = "function", inherits = FALSE), x)) {
return(name)
}
}
NULL
}
#' Retrieve the next applicable method after the current one
#'
#' @export
next_method <- function() {
current_method <- sys.function(sys.parent(1))
methods <- list()
i <- 1
while (!inherits(current_method, "R7_generic")) {
methods <- c(methods, current_method)
i <- i + 1
current_method <- sys.function(sys.parent(i))
}
generic <- current_method
signature <- eval(generic_generate_signature_call(generic@signature), parent.frame())
method_impl(generic, signature, ignore = methods)
}
#' Register R7 methods
#'
#' When registering methods for R7 generics defined in other packages you must
#' put `method_register()` in your packages [.onLoad] function.
#'
#' @importFrom utils getFromNamespace packageName
#' @export
method_register <- function() {
package <- packageName(parent.frame())
tbl <- asNamespace(package)[[".__S3MethodsTable__."]][[".R7_methods"]]
for (x in tbl) {
if (isNamespaceLoaded(x$package)) {
ns <- asNamespace(x$package)
new_method(getFromNamespace(x$generic, ns), x$signature, x$method)
} else {
setHook(packageEvent(x$package, "onLoad"),
local({
x <- x
function(...) {
ns <- asNamespace(x$package)
if (is.null(x$version) || getNamespaceVersion(ns) >= x$version) {
new_method(getFromNamespace(x$generic, ns), x$signature, x$method)
}
}
})
)
}
}
}
arg_to_string <- function(arg) {
if (is.na(names(arg)[[1]])) {
return("does not exist")
}
sprintf("is `%s = %s`", names(arg), deparse(arg[[1]]))
}
method_compatible <- function(method, generic) {
generic_formals <- suppressWarnings(formals(args(generic)))
method_formals <- formals(method)
# This can happen for some primitive functions such as `[`
if (length(generic_formals) == 0) {
return()
}
for (i in seq_len(length(generic_formals) - 1)) {
if (!identical(generic_formals[i], method_formals[i])) {
stop(sprintf("`method` must be consistent with <R7_generic> %s.\n- Argument %i in generic %s\n- Argument %i in method %s", generic@name, i, arg_to_string(generic_formals[i]), i, arg_to_string(method_formals[i])), call. = FALSE)
}
}
if ("..." %in% names(generic_formals) && !"..." %in% names(method_formals)) {
stop(sprintf("`method` must be consistent with <R7_generic> %s.\n- `generic` has `...`\n- `method` does not have `...`", generic@name), call. = FALSE)
}
if (!"..." %in% names(generic_formals) && "..." %in% names(method_formals)) {
stop(sprintf("`method` must be consistent with <R7_generic> %s.\n- `generic` does not have `...`\n- `method` has `...`", generic@name), call. = FALSE)
}
TRUE
}
#' @rdname method
#' @param package The package to register the method in, only used for soft
#' dependencies. The default `NULL` looks up the package based on the parent
#' frame.
#' @export
new_method <- function(generic, signature, method, package = NULL) {
if (inherits(generic, "R7_external_generic")) {
# Get current package, if any
if (!is.null(package)) {
tbl <- asNamespace(package)[[".__S3MethodsTable__."]]
if (is.null(tbl[[".R7_methods"]])) {
tbl[[".R7_methods"]] <- list()
}
tbl[[".R7_methods"]] <- append(tbl[[".R7_methods"]], list(list(generic = generic$generic, package = generic$package, signature = signature, method = method, version = generic$version)))
return(invisible())
}
generic <- getFromNamespace(generic$generic, asNamespace(generic$package))
}
if (!is.character(signature) && !inherits(signature, "list")) {
signature <- list(signature)
}
generic <- as_generic(generic)
method_compatible(method, generic)
if (!inherits(method, "R7_method")) {
method <- R7_method(generic, signature, method)
}
if (inherits(generic, "S3_generic")) {
if (inherits(signature[[1]], "R7_class")) {
signature[[1]] <- signature[[1]]@name
}
registerS3method(attr(generic, "name"), signature[[1]], method, envir = parent.frame())
return(invisible(generic))
}
generic_name <- generic@name
p_tbl <- generic@methods
for (i in seq_along(signature)) {
if (inherits(signature[[i]], "R7_union")) {
for (class in signature[[i]]@classes) {
new_method(generic, c(signature[seq_len(i - 1)], class@name), method)
}
return(invisible(generic))
} else if (inherits(signature[[i]], "R7_class")) {
signature[[i]] <- signature[[i]]@name
}
if (i == length(signature)) {
p_tbl[[signature[[i]]]] <- method
} else {
tbl <- p_tbl[[signature[[i]]]]
if (is.null(tbl)) {
tbl <- new.env(hash = TRUE, parent = emptyenv())
p_tbl[[signature[[i]]]] <- tbl
}
p_tbl <- tbl
}
}
invisible(generic)
}
#' @rdname method
#' @export
`method<-` <- function(generic, signature, value) {
new_method(generic, signature, value, package = packageName(parent.frame()))
}
find_generic_name <- function(generic) {
env <- environment(generic) %||% baseenv()
for (nme in names(env)) {
if (identical(generic, env[[nme]])) {
return(nme)
}
}
}
as_generic <- function(generic) {
if (length(generic) == 1 && is.character(generic)) {
fun <- match.fun(generic)
generic <- fun
}
if (!inherits(generic, "R7_generic")) {
attr(generic, "name") <- find_generic_name(generic)
class(generic) <- "S3_generic"
}
generic
}
#' Lookup the R7 method for the current generic and call it.
#' @export
method_call <- function() {
.Call(method_call_, sys.call(-1), sys.function(-1), sys.frame(-1))
}
|
7d40bbd3c7a23a8b0db681968b453d59b9c05547
|
8113d59af6d145892922bf13069d67c335908930
|
/R/data.R
|
bfa8fc47fdc449c0ed655fe273f2e5e7bbb58cfc
|
[] |
no_license
|
tjburns08/nnvis
|
04a936f6fe232e8e6b1262a73a53b8874fad7730
|
7892d404f648f42a5682cb5d7bea0d7f81d86429
|
refs/heads/master
| 2020-03-29T07:26:06.864933
| 2020-02-19T14:20:10
| 2020-02-19T14:20:10
| 149,665,105
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 686
|
r
|
data.R
|
#' Publically available dataset from Nikolay Samusik of the Nolan Lab,
#' consisting of mouse bone marrow. Original data is 86,864 cells, but here
#' it is subsampled down to 10,000. The data were asinh transformed. The
#' Flow Repository ID FR-FCM-ZZPH. File is called Samusik_01.fcs.
#' @format a tibble of 10000 cells by 54 features
"samusik_cells"
#' Selected surface markers from the dqvis_cells dataset to be used for dim
#' reduction analysis
#' @format a vector of 38 features
"samusik_surface_markers"
#' The output of t-SNE results from the dqvis_cells dataset with the
#' dqvis_surface_markers used as input.
#' @format a tibble of 10000 cells by 2 features
"samusik_tsne"
|
54df6818789f0c0e2588990b79e51b2f9a78f14f
|
ce2435ac0d405cc80cfaddc02bb709ea7491a5d5
|
/section1/MyRCode.R
|
f114bf25c1d0e9cc07729c454610667e0c5707f0
|
[
"CC0-1.0"
] |
permissive
|
pauEscarcia/BigData-Zacatecas
|
b9e4014ee1242522c04a46a8fd40badd809cfe7c
|
6ed59608d4583f8d0bdb5caa55c80f41a1c3844a
|
refs/heads/master
| 2021-01-10T05:25:26.429723
| 2016-03-14T03:18:03
| 2016-03-14T03:18:03
| 43,478,578
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 186
|
r
|
MyRCode.R
|
defect.counts <- c(12,29,18,3,34,4)
names(defect.counts) <- c("Weather","Overslept", "Alarm Failure",
"Time Change","Traffic","Other")
df.defects <- data.frame(defect.counts)
df.defects
|
8fae73c840c23f6df55b011fcdb545e807989eb7
|
7160c389f135c1ee946d58bd3ce57368d746bc41
|
/man/get_weather.Rd
|
7d4d733dbaeba47127c3ad2844f72f2acdb32d38
|
[
"OGL-Canada-2.0"
] |
permissive
|
jdallmann/winnipegr
|
0e0fa846f72e2c62a0b61082048cd241278aacfb
|
343f342aa32a010723524b140dc67c324b0283f2
|
refs/heads/master
| 2020-04-23T12:50:47.955301
| 2020-01-16T22:02:06
| 2020-01-16T22:02:06
| 171,182,527
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,050
|
rd
|
get_weather.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wpg_weather.R
\name{get_weather}
\alias{get_weather}
\title{Winnipeg Weather}
\usage{
get_weather(
station = c("wpg", "forks", "airport"),
start = as.character(Sys.Date() - 365),
end = as.character(Sys.Date()),
metric = TRUE
)
}
\arguments{
\item{station}{a Winnipeg weather station name in English}
\item{start}{the start date of the weather period}
\item{end}{the end date of the weather period}
\item{metric}{whether the output should be in metric}
}
\value{
hourly Winnipeg weather data set including precipitation
and temperature
}
\description{
This function uses the \code{reim} package to load Winnipeg
weather abstracting station names. Measurements can be
converted to metric using \code{meathermetrics}.
}
\examples{
\dontrun{
get_weather(station = "forks",
start = '2018-01-01',
end = '2018-02-28',
metric = TRUE)
get_weather(station = "airport",
start = '2018-01-01')
}
}
|
06ce3587415e1b7f6a74bbb5d89fd70a3ef9777a
|
ef572bd2b0515892d1f59a073b8bf99f81d6a734
|
/man/MessageQueue.Rd
|
ee3687dcb678975de227ffc4350abe556b34d9b1
|
[
"CC0-1.0"
] |
permissive
|
pepfar-datim/datapackr
|
5bc604caa1ae001b6c04e1d934c0c613c59df1e6
|
9275632673e45948db6846513a53c1436cfc0e47
|
refs/heads/master
| 2023-08-30T23:26:48.454382
| 2023-08-11T13:01:57
| 2023-08-11T13:01:57
| 170,350,211
| 9
| 7
|
CC0-1.0
| 2023-09-11T21:53:24
| 2019-02-12T16:19:47
|
R
|
UTF-8
|
R
| false
| true
| 479
|
rd
|
MessageQueue.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/messageQueue.R
\name{MessageQueue}
\alias{MessageQueue}
\title{Title MessageQueue}
\usage{
MessageQueue(message = character(), level = character())
}
\arguments{
\item{message}{One or more character strings}
\item{level}{One of ERROR, WARNING, INFO}
}
\value{
Object of class data.frame and Message queue.
}
\description{
A simple S3 object to deal with messages created during
DataPack processing
}
|
63814c2d560fe5de299390691e4df2e239dc129a
|
12fff55fc23481f867e4be0ea2c37a25ee88aafd
|
/cachematrix.R
|
4ba41d20583172bef928ad2651308a81f8476fad
|
[] |
no_license
|
mgperry1/ProgrammingAssignment2
|
a2b4aaeec319f1300038f0dba69c20dc74e0707f
|
d6c9e37d9d81b061ca3a91e9d067eac5e1f0b501
|
refs/heads/master
| 2021-01-21T00:05:44.551533
| 2014-08-21T14:31:06
| 2014-08-21T14:31:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 844
|
r
|
cachematrix.R
|
## Put comments here that give an overall description
#of what your
## functions do
# t<-makeCacheMatrix()
# t$set(matrix(1:4,2,2))to store a matrix
# cacheSolve(t)
## Setup the Matrix object
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x
setmatrix<-function(solve) m<<- solve
getmatrix<-function() m
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## Return a matrix that is the inverse of 'x'
cacheSolve <- function(x=matrix(), ...) {
m<-x$getmatrix()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix<-x$get()
m<-solve(matrix, ...)
x$setmatrix(m)
m
}
|
99110f7dd75b743f90d48d2c99c66e112082c540
|
407af0f4f7df21f6adc02b5f9c9171c1c7fc3dee
|
/plot_1.R
|
8d7562f96f4f9fbe24ddcd3fb5d10e2c84b02944
|
[] |
no_license
|
komoroka/Exploratory-Data-Analysis-Course-Project
|
e3288839ff894db88c137d5a55be8b0c50d3fd08
|
ba2efbe4eb5fd165feb4ef674cf2f372cfede677
|
refs/heads/master
| 2022-06-22T23:10:06.256276
| 2020-05-09T22:52:31
| 2020-05-09T22:52:31
| 261,964,958
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,117
|
r
|
plot_1.R
|
# download and unzip the data (only done for the first plot)
filename <- 'Data for Peer Assessment.zip'
download.file(url = 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip',
destfile = filename)
unzip(filename)
NEI <- readRDS('summarySCC_PM25.rds')
SCC <- readRDS('Source_Classification_Code.rds')
# Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
# Using the base plotting system, make a plot showing the total PM2.5 emission from
# all sources for each of the years 1999, 2002, 2005, and 2008.
yrs <- split(NEI, factor(NEI$year))
total.emissions <- unlist(lapply(yrs, function(x) sum(x$Emissions, na.rm = TRUE)))
png('plot_1.png')
plot(as.Date(names(total.emissions),'%Y'), total.emissions, pch = 19,
xlab = 'Years', ylab = 'Total Emissions (Tons)', main = 'Total U.S. PM 2.5 Emissions By Year')
seg <- data.frame(names(total.emissions)[1:3], names(total.emissions)[2:4])
segments(as.Date(names(total.emissions)[1:3],'%Y'), total.emissions[1:3],
as.Date(names(total.emissions)[2:4],'%Y'), total.emissions[2:4])
dev.off()
|
2f044c528d0807f19124d1e9973d5f12d5609fbf
|
9456677b348c919542cc370dfea0b56d06ca767a
|
/Analysis/Disparity_calculations/Disparity_per_matrix.R
|
f2d51d0fead0ab9b380d9162f98a3cf1a16c6b7c
|
[] |
no_license
|
yassato/SpatioTemporal_Disparity
|
97befea49605279788ebb2da251ade9c08e75aa8
|
0e2b9dd29f51e94189b50767aee863592cd85d8a
|
refs/heads/master
| 2022-04-25T00:36:19.657712
| 2018-01-05T11:30:37
| 2018-01-05T11:30:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,155
|
r
|
Disparity_per_matrix.R
|
#Setwd
if(length(grep("TGuillerme", getwd()))) {
setwd('~/PhD/Projects/SpatioTemporal_Disparity/Analysis')
} else {
warning("You might have to change the directory!")
}
if(length(grep("SpatioTemporal_Disparity/Analysis", getwd()))==0) {
if(length(grep("SpatioTemporal_Disparity-master/Analysis", getwd()))==0) {
stop("Wrong directory!\nThe current directory must be:\nSpatioTemporal_Disparity/Analysis/ OR SpatioTemporal_Disparity-master/Analysis/\nYou can clone the whole repository from:\nhttps://github.com/TGuillerme/SpatioTemporal_Disparity")
}
}
#Load the functions and the packages
source("functions.R")
#After running the Data_setup script, load the different results
######################
#Select the data
######################
#Selecting the file
#SLATER 2013 MEE
chain_name<-"Slater2013"
data_path<-"../Data/"
file_matrix<-"../Data/2013-Slater-MEE-matrix-morpho.nex"
file_tree<-"../Data/2013-Slater-MEE-TEM.tre"
int_breaks<-rev(seq(from=0, to=250, by=32.5))
slices<-rev(seq(from=0, to=250, by=20))+5
slices[length(slices)]<-0
KT_bin=5.5
KT_sli=10
#BECK 2014 ProcB
#chain_name<-"Beck2014"
#data_path<-"../Data/"
#file_matrix<-"../Data/2014-Beck-ProcB-matrix-morpho.nex"
#file_tree<-"../Data/2014-Beck-ProcB-TEM.tre"
#int_breaks<-rev(seq(from=0, to=150, by=20))+5
#int_breaks[length(int_breaks)]<-0
#slices<-rev(seq(from=0, to=150, by=10))
#KT_bin=4.5
#KT_sli=9.5
######################
#Tree and matrix
######################
#matrix
Nexus_data<-ReadMorphNexus(file_matrix)
Nexus_matrix<-Nexus_data$matrix
#tree
Tree_data<-read.nexus(file_tree)
#Cleaning the matrices and the trees
#Remove species with only missing data before hand
if (any(apply(as.matrix(Nexus_matrix), 1, function(x) levels(as.factor((x)))) == "?")) {
Nexus_matrix<-Nexus_matrix[-c(as.vector(which(apply(as.matrix(Nexus_matrix), 1, function(x) levels(as.factor(x))) == "?"))),]
}
#Cleaning the tree and the table
#making the saving folder
tree<-clean.tree(Tree_data, Nexus_matrix)
table<-clean.table(Nexus_matrix, Tree_data)
Nexus_data$matrix<-table
#Forcing the tree to be binary
tree<-bin.tree(tree)
#Adding node labels to the tree
tree$node.label<-paste("n",seq(1:Nnode(tree)), sep="")
#Setting the tree root age
tree$root.time<-max(tree.age(tree)[,1])
#Tree ages (useless?)
#Tree_data$root.time<-max(tree.age(Tree_data)$age)
#Plot the tree
#geoscalePhylo(ladderize(Tree_data), cex.age=0.6, cex.ts=0.8, cex.tip=0.5)
######################
#FADLAD file
######################
#Load the F/LAD for Beck
FADLAD<-read.csv(paste(data_path, chain_name, "_FADLAD.csv", sep=""), row.names=1)
######################
#Ancestral states reconstruction files
######################
load(paste(data_path, chain_name, "/", chain_name, "_ancestral_states-claddis.Rda", sep="")) #anc_states
######################
#Distance matrices
######################
load(paste(data_path, chain_name, "/", chain_name, "_distance-tips.Rda", sep="")) #dist_tips
load(paste(data_path, chain_name, "/", chain_name, "_distance-nodes.Rda", sep="")) #dist_nodes
load(paste(data_path, chain_name, "/", chain_name, "_distance-nodes95.Rda", sep="")) #dist_nodes95
#Remove the inapplicable characters
trimmed_max_data_tips<-TrimMorphDistMatrix(dist_tips$max.dist.matrix)
trimmed_max_data_nodes<-TrimMorphDistMatrix(dist_nodes$max.dist.matrix)
trimmed_max_data_nodes95<-TrimMorphDistMatrix(dist_nodes95$max.dist.matrix)
#Remove the dropped taxa from the tree ; and resetting the root age
tree_tips<-drop.tip(tree, trimmed_max_data_tips$removed.taxa) ; tree_tips$root.time<-max(tree.age(tree_tips)[,1])
tree_nodes<-drop.tip(tree, trimmed_max_data_nodes$removed.taxa) ; tree_nodes$root.time<-max(tree.age(tree_nodes)[,1])
tree_nodes95<-drop.tip(tree, trimmed_max_data_nodes95$removed.taxa) ; tree_nodes95$root.time<-max(tree.age(tree_nodes95)[,1])
#Remove the eventual inapplicable nodes
trimmed_max_data_nodes$dist.matrix<-trimmed_max_data_nodes$dist.matrix[c(tree_nodes$tip.label, tree_nodes$node.label),c(tree_nodes$tip.label, tree_nodes$node.label)]
trimmed_max_data_nodes95$dist.matrix<-trimmed_max_data_nodes95$dist.matrix[c(tree_nodes$tip.label, tree_nodes$node.label),c(tree_nodes95$tip.label, tree_nodes95$node.label)]
#List of trees
trees<-list("tips"=tree_tips, "nodes"=tree_nodes, "nodes95"=tree_nodes95)
######################
#PCO
######################
pco_data_tips<-cmdscale(trimmed_max_data_tips$dist.matrix, k=nrow(trimmed_max_data_tips$dist.matrix) - 1, add=T)$points
pco_data_nodes<-cmdscale(trimmed_max_data_nodes$dist.matrix, k=nrow(trimmed_max_data_nodes$dist.matrix) - 1, add=T)$points
pco_data_nodes95<-cmdscale(trimmed_max_data_nodes95$dist.matrix, k=nrow(trimmed_max_data_nodes95$dist.matrix) - 1, add=T)$points
#Storing as a list
pco_data<-list("tips"=pco_data_tips, "nodes"=pco_data_nodes, "nodes95"=pco_data_nodes95)
######################
#Disparity
######################
#Calculating the rarefaction
#rarefaction_median<-disparity(pco_data, rarefaction=TRUE, verbose=TRUE, central_tendency=median)
#Generating the different intervals PCOs
pco_int_tips<-int.pco(pco_data_tips, tree_tips, int_breaks, include.nodes=FALSE, FAD_LAD=FADLAD, diversity=TRUE)
int_tips_div<-pco_int_tips[[2]] ; pco_int_tips<-pco_int_tips[[1]]
pco_int_nodes<-int.pco(pco_data_nodes, tree_nodes, int_breaks, include.nodes=TRUE, FAD_LAD=FADLAD, diversity=TRUE)
int_nodes_div<-pco_int_nodes[[2]] ; pco_int_nodes<-pco_int_nodes[[1]]
pco_int_nodes95<-int.pco(pco_data_nodes95, tree_nodes95, int_breaks, include.nodes=TRUE, FAD_LAD=FADLAD, diversity=TRUE)
int_nodes95_div<-pco_int_nodes95[[2]] ; pco_int_nodes95<-pco_int_nodes95[[1]]
#Calculating the disparity per intervals
disp_int_tips<-time.disparity(pco_int_tips, verbose=TRUE)
#disp_int_tips_95axis<-time.disparity(pco_int_tips, verbose=TRUE, rm.last.axis=TRUE)
disp_int_nodes<-time.disparity(pco_int_nodes, verbose=TRUE)
#disp_int_nodes_95axis<-time.disparity(pco_int_nodes, verbose=TRUE, rm.last.axis=TRUE)
disp_int_nodes95<-time.disparity(pco_int_nodes95, verbose=TRUE)
#disp_int_nodes95_95axis<-time.disparity(pco_int_nodes95, verbose=TRUE, rm.last.axis=TRUE)
#Generating the different PCO slices
#methods list
methods=c("random", "acctran", "deltran", "proximity")
#nodes
pco_slices_nodes<-list()
slices_nodes_div<-list()
for (type in 1:length(methods)) {
pco_slices_nodes[[type]]<-slice.pco(pco_data_nodes, tree_nodes, slices, method=methods[[type]], FAD_LAD=FADLAD, verbose=TRUE, diversity=TRUE)
slices_nodes_div[[type]]<-pco_slices_nodes[[type]][[2]] ; pco_slices_nodes[[type]]<-pco_slices_nodes[[type]][[1]]
}
names(pco_slices_nodes)<-names(slices_nodes_div)<-methods
#nodes95
pco_slices_nodes95<-list()
slices_nodes95_div<-list()
for (type in 1:length(methods)) {
pco_slices_nodes95[[type]]<-slice.pco(pco_data_nodes95, tree_nodes95, slices, method=methods[[type]], FAD_LAD=FADLAD, verbose=TRUE, diversity=TRUE)
slices_nodes95_div[[type]]<-pco_slices_nodes95[[type]][[2]] ; pco_slices_nodes95[[type]]<-pco_slices_nodes95[[type]][[1]]
}
names(pco_slices_nodes95)<-names(slices_nodes95_div)<-paste(methods, "95", sep="")
#Calculating the disparity per interval per list
#nodes
disp_slices_nodes<-list()
for (type in 1:length(methods)) {
disp_slices_nodes[[type]]<-time.disparity(pco_slices_nodes[[type]], verbose=TRUE)
}
names(disp_slices_nodes)<-names(pco_slices_nodes)
#nodes95
disp_slices_nodes95<-list()
for (type in 1:length(methods)) {
disp_slices_nodes95[[type]]<-time.disparity(pco_slices_nodes95[[type]], verbose=TRUE)
}
names(disp_slices_nodes95)<-names(pco_slices_nodes95)
######################
#Plot the disparity
######################
#The following is a "BIG" plot of
quartz(width = 22.4, height = 15.6) #A5 landscape
#Windows dimensions
op<-par(mfrow=c(5, 11), bty="l")# oma=c(bottom, left, top, right)
#Centroid
plot.disparity(disp_int_tips, rarefaction=FALSE, xlab="", ylab="Distance from centroid", measure="Cent.dist", main="Intervals: tips", diversity=int_tips_div)
abline(v= KT_bin, col="red")
plot.disparity(disp_int_nodes, rarefaction=FALSE, xlab="", ylab="", measure="Cent.dist", main="Intervals: nodes", diversity=int_nodes_div)
abline(v= KT_bin, col="red")
plot.disparity(disp_int_nodes95, rarefaction=FALSE, xlab="", ylab="", measure="Cent.dist", main="Intervals: nodes(95)", diversity=int_nodes95_div)
abline(v= KT_bin, col="red")
plot.disparity(disp_slices_nodes$random, rarefaction=FALSE, xlab="", ylab="", measure="Cent.dist", main="Slices: random", diversity=slices_nodes_div$random)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes$acctran, rarefaction=FALSE, xlab="", ylab="", measure="Cent.dist", main="Slices: acctran", diversity=slices_nodes_div$acctran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes$deltran, rarefaction=FALSE, xlab="", ylab="", measure="Cent.dist", main="Slices: deltran", diversity=slices_nodes_div$deltran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes$proximity, rarefaction=FALSE, xlab="", ylab="", measure="Cent.dist", main="Slices: proximity", diversity=slices_nodes_div$proximity)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$random, rarefaction=FALSE, xlab="", ylab="", measure="Cent.dist", main="Slices: random (95)", diversity=slices_nodes95_div$random)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$acctran, rarefaction=FALSE, xlab="", ylab="", measure="Cent.dist", main="Slices: acctran (95)", diversity=slices_nodes95_div$acctran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$deltran, rarefaction=FALSE, xlab="", ylab="", measure="Cent.dist", main="Slices: deltran (95)", diversity=slices_nodes95_div$deltran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$proximity, rarefaction=FALSE, xlab="", ylab="", measure="Cent.dist", main="Slices: proximity (95)", diversity=slices_nodes95_div$proximity)
abline(v= KT_sli, col="red")
#Sum of ranges
plot.disparity(disp_int_tips, rarefaction=FALSE, xlab="", ylab="Sum of ranges", measure="Sum.range", diversity=int_tips_div)
abline(v= KT_bin, col="red")
plot.disparity(disp_int_nodes, rarefaction=FALSE, xlab="", ylab="", measure="Sum.range", diversity=int_nodes_div)
abline(v= KT_bin, col="red")
plot.disparity(disp_int_nodes95, rarefaction=FALSE, xlab="", ylab="", measure="Sum.range", diversity=int_nodes95_div)
abline(v= KT_bin, col="red")
plot.disparity(disp_slices_nodes$random, rarefaction=FALSE, xlab="", ylab="", measure="Sum.range", diversity=slices_nodes_div$random)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes$acctran, rarefaction=FALSE, xlab="", ylab="", measure="Sum.range", diversity=slices_nodes_div$acctran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes$deltran, rarefaction=FALSE, xlab="", ylab="", measure="Sum.range", diversity=slices_nodes_div$deltran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes$proximity, rarefaction=FALSE, xlab="", ylab="", measure="Sum.range", diversity=slices_nodes_div$proximity)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$random, rarefaction=FALSE, xlab="", ylab="", measure="Sum.range", diversity=slices_nodes95_div$random)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$acctran, rarefaction=FALSE, xlab="", ylab="", measure="Sum.range", diversity=slices_nodes95_div$acctran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$deltran, rarefaction=FALSE, xlab="", ylab="", measure="Sum.range", diversity=slices_nodes95_div$deltran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$proximity, rarefaction=FALSE, xlab="", ylab="", measure="Sum.range", diversity=slices_nodes95_div$proximity)
abline(v= KT_sli, col="red")
#Sum of variance
plot.disparity(disp_int_tips, rarefaction=FALSE, xlab="", ylab="Sum of variance", measure="Sum.var", diversity=int_tips_div)
abline(v= KT_bin, col="red")
plot.disparity(disp_int_nodes, rarefaction=FALSE, xlab="", ylab="", measure="Sum.var", diversity=int_nodes_div)
abline(v= KT_bin, col="red")
plot.disparity(disp_int_nodes95, rarefaction=FALSE, xlab="", ylab="", measure="Sum.var", diversity=int_nodes95_div)
abline(v= KT_bin, col="red")
plot.disparity(disp_slices_nodes$random, rarefaction=FALSE, xlab="", ylab="", measure="Sum.var", diversity=slices_nodes_div$random)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes$acctran, rarefaction=FALSE, xlab="", ylab="", measure="Sum.var", diversity=slices_nodes_div$acctran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes$deltran, rarefaction=FALSE, xlab="", ylab="", measure="Sum.var", diversity=slices_nodes_div$deltran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes$proximity, rarefaction=FALSE, xlab="", ylab="", measure="Sum.var", diversity=slices_nodes_div$proximity)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$random, rarefaction=FALSE, xlab="", ylab="", measure="Sum.var", diversity=slices_nodes95_div$random)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$acctran, rarefaction=FALSE, xlab="", ylab="", measure="Sum.var", diversity=slices_nodes95_div$acctran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$deltran, rarefaction=FALSE, xlab="", ylab="", measure="Sum.var", diversity=slices_nodes95_div$deltran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$proximity, rarefaction=FALSE, xlab="", ylab="", measure="Sum.var", diversity=slices_nodes95_div$proximity)
abline(v= KT_sli, col="red")
#Product of ranges
plot.disparity(disp_int_tips, rarefaction=FALSE, xlab="", ylab="Product of range", measure="Prod.range", diversity=int_tips_div)
abline(v= KT_bin, col="red")
plot.disparity(disp_int_nodes, rarefaction=FALSE, xlab="", ylab="", measure="Prod.range", diversity=int_nodes_div)
abline(v= KT_bin, col="red")
plot.disparity(disp_int_nodes95, rarefaction=FALSE, xlab="", ylab="", measure="Prod.range", diversity=int_nodes95_div)
abline(v= KT_bin, col="red")
plot.disparity(disp_slices_nodes$random, rarefaction=FALSE, xlab="", ylab="", measure="Prod.range", diversity=slices_nodes_div$random)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes$acctran, rarefaction=FALSE, xlab="", ylab="", measure="Prod.range", diversity=slices_nodes_div$acctran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes$deltran, rarefaction=FALSE, xlab="", ylab="", measure="Prod.range", diversity=slices_nodes_div$deltran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes$proximity, rarefaction=FALSE, xlab="", ylab="", measure="Prod.range", diversity=slices_nodes_div$proximity)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$random, rarefaction=FALSE, xlab="", ylab="", measure="Prod.range", diversity=slices_nodes95_div$random)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$acctran, rarefaction=FALSE, xlab="", ylab="", measure="Prod.range", diversity=slices_nodes95_div$acctran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$deltran, rarefaction=FALSE, xlab="", ylab="", measure="Prod.range", diversity=slices_nodes95_div$deltran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$proximity, rarefaction=FALSE, xlab="", ylab="", measure="Prod.range", diversity=slices_nodes95_div$proximity)
abline(v= KT_sli, col="red")
#Product of variance
plot.disparity(disp_int_tips, rarefaction=FALSE, xlab="Mya", ylab="Product of variance", measure="Prod.var", diversity=int_tips_div)
abline(v= KT_bin, col="red")
plot.disparity(disp_int_nodes, rarefaction=FALSE, xlab="Mya", ylab="", measure="Prod.var", diversity=int_nodes_div)
abline(v= KT_bin, col="red")
plot.disparity(disp_int_nodes95, rarefaction=FALSE, xlab="Mya", ylab="", measure="Prod.var", diversity=int_nodes95_div)
abline(v= KT_bin, col="red")
plot.disparity(disp_slices_nodes$random, rarefaction=FALSE, xlab="Mya", ylab="", measure="Prod.var", diversity=slices_nodes_div$random)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes$acctran, rarefaction=FALSE, xlab="Mya", ylab="", measure="Prod.var", diversity=slices_nodes_div$acctran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes$deltran, rarefaction=FALSE, xlab="Mya", ylab="", measure="Prod.var", diversity=slices_nodes_div$deltran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes$proximity, rarefaction=FALSE, xlab="Mya", ylab="", measure="Prod.var", diversity=slices_nodes_div$proximity)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$random, rarefaction=FALSE, xlab="Mya", ylab="", measure="Prod.var", diversity=slices_nodes95_div$random)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$acctran, rarefaction=FALSE, xlab="Mya", ylab="", measure="Prod.var", diversity=slices_nodes95_div$acctran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$deltran, rarefaction=FALSE, xlab="Mya", ylab="", measure="Prod.var", diversity=slices_nodes95_div$deltran)
abline(v= KT_sli, col="red")
plot.disparity(disp_slices_nodes95$proximity, rarefaction=FALSE, xlab="Mya", ylab="", measure="Prod.var", diversity=slices_nodes95_div$proximity)
abline(v= KT_sli, col="red")
par(op)
|
574844fd38b88831eba74e5458dd179d0d0574fe
|
094bb4b008075c6c89d437f6a753e553ee01b03e
|
/man/aggregate_taxa.Rd
|
bef480472630194197c6719c353f887a92c8bc98
|
[] |
no_license
|
TommasoCanc/biomonitoR
|
025264f6a6d69fae0e896a9f0a7874da7b184003
|
26136a8becdeb052adc5f8ff08d585024d50d223
|
refs/heads/master
| 2022-02-03T18:31:19.541891
| 2022-01-24T13:19:42
| 2022-01-24T13:19:42
| 141,422,404
| 0
| 0
| null | 2018-07-18T10:57:02
| 2018-07-18T10:57:02
| null |
UTF-8
|
R
| false
| true
| 892
|
rd
|
aggregate_taxa.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aggregate_taxa.R
\name{aggregate_taxa}
\alias{aggregate_taxa}
\title{aggregate_taxa}
\usage{
aggregate_taxa(x, FUN = sum)
}
\arguments{
\item{x}{Result of as_biomonitor}
\item{FUN}{the function to be applied for aggregating to higher taxonomic levels.
Must be sum for both abundances and presence-absence data.
Default to \code{sum}.}
}
\description{
This function prepares data for further calculations.
}
\examples{
data(macro_ex)
data_bio <- as_biomonitor(macro_ex)
data_agr <- aggregate_taxa(data_bio)
# example for macrophytes
data(oglio)
oglio_asb <- as_biomonitor(oglio, group = "mf")
oglio_agg <- aggregate_taxa(oglio_asb)
richness(oglio_agg, tax_lev = "Species")
richness(oglio_agg, tax_lev = "Genus")
richness(oglio_agg, tax_lev = "Family")
}
\seealso{
\link{as_biomonitor}
}
\keyword{aggregate_taxa}
|
2a9ee7edd51262b2895c9cf097216ff3ee8bfa7c
|
ec0bc03a32995226cbb8d314e6c74539f5ff691d
|
/ATTEDII_Base/AGRIS/ATTEDII_EnrichemntOfTFsTargetGenes.R
|
117c51d5aefd2759847949ccfc912254b239e98d
|
[] |
no_license
|
YKeito/PCCofPCC
|
104411b7e3de4186a8a00584b684a62641f89447
|
32973801734523e9641ff4dbe47bc04d2dfbb0c0
|
refs/heads/master
| 2022-12-24T00:06:26.909714
| 2020-09-28T05:29:39
| 2020-09-28T05:29:39
| 299,197,957
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,444
|
r
|
ATTEDII_EnrichemntOfTFsTargetGenes.R
|
#"~/Nakano_RNAseq/network_analysis/script/PCCofPCC/ATTEDII_Base/ATTEDII_EnrichemntOfTFsTargetGenes.R"
before <- proc.time()
install.packages("tidyr")
#package----
library(stringr)
library(dplyr)
library(tidyr)
#input data----
MasterTable <- readRDS("~/bigdata/yasue/PCCOfPCC/ATTEDII/RDS/Table/20200311ATTEDII_MCLNumNodeTable.rds")
AtRegNet <- read.table("~/bigdata/yasue/AGRIS/AtRegNet/20190729AtRegNet_modified.txt", sep = "\t", quote = "", fill = T, stringsAsFactors = F, header = T)
AtRegNet <- AtRegNet[nchar(AtRegNet$TFLocus) == 9 & nchar(AtRegNet$TargetLocus) == 9, ]
TAIR10 <- readRDS("~/bigdata/yasue/TAIR10_ShortName.rds")
AGRIS <- read.table("~/bigdata/yasue/AGRIS/AGRIS_TFLIST/AGRIS_TFList_modified.txt", sep = "\t", quote = "", fill = T, stringsAsFactors = F)
AGRIS$V2 <- toupper(AGRIS$V2)
#processing data----
AtRegNet$TFLocus <- toupper(AtRegNet$TFLocus)
AtRegNet$TargetLocus <- toupper(AtRegNet$TargetLocus)
#Enrichment of TFs Target genes----
T.MCLNum <- max(as.numeric(MasterTable$MCLNum), na.rm = T)
TF.AGI <- unique(AtRegNet$TFLocus)
names(TF.AGI) <- TAIR10$annotation[match(TF.AGI, TAIR10$AGI)]
allgenes <- MasterTable$AGI
N <- length(allgenes)
T.data <- c()
j <- 1
for(j in j:length(TF.AGI)){
T.target <- unique(AtRegNet$TargetLocus[AtRegNet$TFLocus == TF.AGI[j]])
M <- length(T.target)
T.pvalue <- c()
k <- 1
for(k in k:T.MCLNum){
T.Node <- MasterTable %>% filter(MCLNum == k) %>% select("AGI") %>% unlist(use.names = F)
n <- length(T.Node)
x <- length(intersect(T.Node, T.target))
T.pvalue <- c(T.pvalue, phyper(x-1, M, N-M, n, lower.tail = F))
k <- k+1
}
T.data <- rbind(T.data, data.frame(MCLNum = 1:T.MCLNum,
AGI = rep(TF.AGI[j], times = T.MCLNum),
TF = rep(names(TF.AGI)[j], times = T.MCLNum),
family = rep(AGRIS$V1[match(TF.AGI[j], AGRIS$V2)], times = T.MCLNum),
pvalue = T.pvalue,
qvalue = p.adjust(T.pvalue, method = "BH"),
stringsAsFactors = F
))
print(length(TF.AGI)-j)
j <- j+1
}
#save data----
saveRDS(T.data, "~/bigdata/yasue/PCCOfPCC/ATTEDII/RDS/Table/ATTEDII_EnrichemntOfTFsTargetList.rds")
#elapsed time------------------------------------------
after <- proc.time()
print(after - before)#3240.220 sec, 54 min
#remove object----
rm(list = ls())
|
b7d74545f175e48b78d2622f92218c8697a231b3
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.compute/man/ec2_deregister_image.Rd
|
4c2009bdc15973a927f5c4e31fd9c49843fa6685
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 1,320
|
rd
|
ec2_deregister_image.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_deregister_image}
\alias{ec2_deregister_image}
\title{Deregisters the specified AMI}
\usage{
ec2_deregister_image(ImageId, DryRun)
}
\arguments{
\item{ImageId}{[required] The ID of the AMI.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
}
\value{
An empty list.
}
\description{
Deregisters the specified AMI. After you deregister an AMI, it can't be
used to launch new instances; however, it doesn't affect any instances
that you've already launched from the AMI. You'll continue to incur
usage costs for those instances until you terminate them.
When you deregister an Amazon EBS-backed AMI, it doesn't affect the
snapshot that was created for the root volume of the instance during the
AMI creation process. When you deregister an instance store-backed AMI,
it doesn't affect the files that you uploaded to Amazon S3 when you
created the AMI.
}
\section{Request syntax}{
\preformatted{svc$deregister_image(
ImageId = "string",
DryRun = TRUE|FALSE
)
}
}
\keyword{internal}
|
8b36e5a4a7c4ad00cf8f80402f8a480efb1afa4e
|
54619814c48f9f41c427ced3da8159c47316e601
|
/annotating_cell_cycle_genes.R
|
b91335096c8eeae746a51327bbb58fcc180b5487
|
[] |
no_license
|
amarseg/RNA_seq
|
f40f11d97c3a64a18ba9e413bbdaec3453214c53
|
3d07eb8b77e0f03a6c8ef610798f3af3a434c03e
|
refs/heads/master
| 2021-05-04T10:56:34.146765
| 2017-08-14T09:39:11
| 2017-08-14T09:39:11
| 36,794,988
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,350
|
r
|
annotating_cell_cycle_genes.R
|
###Annotating genes in cell cycle phases (using Sam's GO final table)
rm(list = ls())
setwd('C:/Users/am4613/Documents/Summaries_as_timecourses/')
peak_times <- read.delim('C:/Users/am4613/Documents/Summaries_as_timecourses/fission_timecourses/Peaktimes all genes.txt', header= T, strings = F)
load('../GitHub/Misc/GO.analysis.110914.rda')
source('../GitHub/RNA_seq/cell_cycle_plotter.R')
go_extractor <- function(term_name)
{
toDo <- GOfinal[,colnames(GOfinal) == term_name]
genes <- row.names(GOfinal[which(toDo==1),])
return(genes)
}
colnames_cell_cycle <- grep(colnames(GOfinal), pattern = 'Rustici', value = T)
cell_cycle_genes <- lapply(colnames_cell_cycle, FUN = go_extractor)
names(cell_cycle_genes) <- colnames_cell_cycle
rna_list <- read.delim('both_clustering/Cluster_2.txt', header = T, strings = F)
prot_list <- read.delim('protein_isx_correlation/negative_correlation_length.txt', header = T, strings = F)
gene_list <- c(rna_list[,1], row.names(prot_list))
gene_list <- as.data.frame(unique(gene_list))
gene_list$cell_cycle <- NA
for(i in 1:5)
{
x <- cell_cycle_genes[[i]]
gene_list[which(gene_list[,1] %in% x),]$cell_cycle <- names(cell_cycle_genes)[i]
}
only_cell_cycle <- na.omit(gene_list)
only_cell_cycle$cell_cycle <- substr(only_cell_cycle$cell_cycle,start = 1, stop= 2)
barplot(table(only_cell_cycle[,2]), las = 2)
|
a7c030b3957348b5f70dab70b3cb2bbfb844a36b
|
665b491ee5cc3af40c02a49e9ac6277a5eeaca02
|
/inst/tinytest/test-formula.r
|
5be73aaf9e62bb2ee1329c2b164f4f20face0e0e
|
[
"MIT"
] |
permissive
|
USCbiostats/aphylo
|
49ac5286c5b69350b85d11a4d23dc7422ef3c26c
|
0a2f61558723c3a3db95e7f5b4e5edc4bf87a368
|
refs/heads/master
| 2023-08-16T19:57:29.976058
| 2023-08-09T20:20:32
| 2023-08-09T20:20:32
| 77,094,049
| 10
| 1
|
NOASSERTION
| 2020-06-07T02:24:17
| 2016-12-21T23:37:46
|
R
|
UTF-8
|
R
| false
| false
| 1,231
|
r
|
test-formula.r
|
# context("Formulae")
# test_that("Formulas create the right model", {
set.seed(0772)
x <- rdrop_annotations(raphylo(50), .5)
m_mu <- aphylo_formula(x ~ mu_d)
m_mu_psi0 <- aphylo_formula(x ~ mu_d + psi)
m_mu_psi1 <- aphylo_formula(x ~ psi)
m_mu_psi2 <- aphylo_formula(x ~ psi + mu_d)
expect_equal(deparse(m_mu_psi0)[-1], deparse(m_mu_psi1)[-1])
expect_equal(deparse(m_mu_psi0)[-1], deparse(m_mu_psi2)[-1])
set.seed(071)
x <- rdrop_annotations(raphylo(50), .5)
expect_error(aphylo_formula(y ~ psi), "be found")
y <- 1
expect_error(aphylo_formula(y ~ psi), "should be either")
expect_error(aphylo_formula(x ~ mu_d, c(mu_d0=1, mu_d1=1, psi1=0)), "overspecified")
expect_error(aphylo_formula(x ~ mu_d, c(mu_d0=1)), "missing")
expect_error(aphylo_formula(x~mu_d(9)), "Arguments passed to")
expect_error(aphylo_formula(x~mu_d + I(x)), "supported")
set.seed(121)
x <- raphylo(30)
p <- matrix(runif(7*2), nrow=2, dimnames = NULL)
ctrl <- list(nchains=2, nsteps=500, burnin=10, conv_checker = NULL)
expect_warning(
ans <- aphylo_mcmc(x ~ psi+ mu_d + eta + Pi, params = p, control = ctrl),
"matched by position"
)
expect_identical(class(ans), "aphylo_estimates")
# aphylo_formula(x ~ psi + mu_d + mu_s + eta)$fun
|
02a5d600bfe91e50a63bf5b602e4e3db2e01a250
|
c5320cac9d3591ea2fa7319b3c304d59ad143420
|
/plot4.R
|
a3d3e551d839b0b5d8664607e18ee49ffaee0119
|
[] |
no_license
|
saame7w/ExData_Plotting1
|
66b00b5fbafcdd5a3e738ce0d9ece73c032de60e
|
be2d40127c6ab01a02b757c3131d2cda85d84625
|
refs/heads/master
| 2021-07-17T04:08:26.255580
| 2017-10-22T12:47:30
| 2017-10-22T12:47:30
| 107,858,493
| 0
| 0
| null | 2017-10-22T11:05:59
| 2017-10-22T11:05:58
| null |
UTF-8
|
R
| false
| false
| 1,282
|
r
|
plot4.R
|
dataall <- read.table("household_power_consumption.txt", header = T, sep = ";", stringsAsFactors = F)
data <- dataall[dataall$Date=="1/2/2007" | dataall$Date=="2/2/2007",]
data[data=="?"] <- NA
data$Time <- strptime(paste(data$Date,data$Time,sep = " "), "%d/%m/%Y %H:%M:%S")
data$Date <- as.Date.character(data$Date,"%d/%m/%Y")
data$Global_active_power <- as.numeric(data$Global_active_power)
data$Global_reactive_power <- as.numeric(data$Global_reactive_power)
data$Voltage <- as.numeric(data$Voltage)
data$Sub_metering_1 <- as.numeric(data$Sub_metering_1)
data$Sub_metering_2 <- as.numeric(data$Sub_metering_2)
data$Sub_metering_3 <- as.numeric(data$Sub_metering_3)
png("plot4.png")
par(mfrow=c(2,2), oma=c(2,0,1,1), mar=c(4,6,2,0))
with(data,
{
plot(Time,Global_active_power, type="l",xlab = "",ylab = "Global Active Power")
plot(Time,Voltage, type="l",xlab = "datetime")
plot(Time,Sub_metering_1, type="l",xlab = "",ylab = "Energy sub metering")
lines(Time,Sub_metering_2, col="red")
lines(Time,Sub_metering_3, col="blue")
legend("topright", pch = 151, legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"))
plot(Time,Global_reactive_power, type="l",xlab = "datetime")
})
dev.off()
|
c593f43476497ee23595e56535b2d79db1d3c13d
|
8d46815595f25a7b6217ce62dfc7f9c33b31da12
|
/quantmod-master/man/getSymbols.oanda.Rd
|
27c35ea62652ce444c78c1db1decd83698f1101d
|
[
"MIT"
] |
permissive
|
Sdoof/PyFinTech
|
43e4c6ed1cb4b092cebd2fb1e033988ff6fb106d
|
4cd7efa2df52fc9e14aca1bb548abd0fd8ba9278
|
refs/heads/master
| 2020-04-26T11:17:04.987441
| 2019-02-28T15:22:33
| 2019-02-28T15:22:33
| 173,511,290
| 1
| 0
| null | 2019-03-02T23:46:27
| 2019-03-02T23:46:27
| null |
UTF-8
|
R
| false
| false
| 2,370
|
rd
|
getSymbols.oanda.Rd
|
\name{getSymbols.oanda}
\alias{getSymbols.oanda}
\alias{oanda.currencies}
\title{ Download Currency and Metals Data from Oanda.com }
\description{
Access to 191 currency and metal prices, downloadable
as more that 36000 currency pairs from Oanda.com.
Downloads \code{Symbols} to specified \code{env} from
\url{www.oanda.com} historical currency database.
This method is not meant to be called directly, instead
a call to \code{getSymbols("x",src="oanda")} will
in turn call this method. It is documented for the
sole purpose of highlighting the arguments accepted,
and to serve as a guide to creating additional
getSymbols 'methods'.
}
\usage{
getSymbols.oanda(Symbols,
env,
return.class = "xts",
from = Sys.Date() - 179,
to = Sys.Date(),
...)
}
\arguments{
\item{Symbols}{ a character vector specifying the names
of each symbol to be loaded - expressed as a currency pair.
(e.g. U.S. Dollar to Euro rate would be expressed as
a string \dQuote{USD/EUR}. The naming convention follows from
Oanda.com, and a table of possible values is
available by calling \code{oanda.currencies} }
\item{env}{ where to create objects. }
\item{return.class}{ class of returned object }
\item{from}{ Start of series expressed as "CCYY-MM-DD" }
\item{to}{ Start of series expressed as "CCYY-MM-DD" }
\item{\dots}{ additional parameters }
}
\details{
Meant to be called internally by getSymbols only.
Oanda data is 7 day daily average price data, that is Monday through Sunday.
Oanda only provides historical data for the past 180 days. getSymbols will
return as much data as possible, and warn when the \code{from} date is more
than 180 days ago.
}
\value{
A call to getSymbols(Symbols,src="oanda") will load into the specified
environment one object for each 'Symbol' specified, with class
defined by 'return.class'. Presently this may be 'ts',
'zoo', 'xts', or 'timeSeries'.
}
\note{
Oanda rates are quoted as one unit of base currency to the
equivelant amount of foreign
currency.
}
\references{ Oanda.com \url{http://www.oanda.com} }
\author{ Jeffrey A. Ryan }
\seealso{ Currencies: \code{\link{getSymbols.FRED}},
\code{\link{getSymbols}} }
\examples{
\dontrun{
getSymbols("USD/EUR",src="oanda")
getSymbols("USD/EUR",src="oanda",from="2005-01-01")
}
}
\keyword{ datasets }
|
8e325d7b36f9d5fd5c73b19d7288a53d6ae48afb
|
c746b5f40c118fb4f41a2d7cb88024738476d40f
|
/Model_Application/Testing/OS_Lasso_Plus_single.R
|
1373a31a50aabbb94653a79c0cc837627f8acb95
|
[] |
no_license
|
multach87/Dissertation
|
5548375dac9059d5d582a3775adf83b5bc6c0be7
|
d20b4c6d3087fd878a1af9bc6e8543d2b94925df
|
refs/heads/master
| 2023-06-25T20:09:25.902225
| 2021-07-23T18:51:07
| 2021-07-23T18:51:07
| 281,465,959
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,071
|
r
|
OS_Lasso_Plus_single.R
|
#libraries
library(glmnet)
#load data
#data.full <- readRDS()
#full.data <- readRDS("/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/")
debug.data <- readRDS("/Users/Matt Multach/Desktop/Dissertation/Dissertation_Git/Data_Generation/Data_Storage/debug_data_091720.RData")
#load data
single.data <- debug.data[[10]]
X <- single.data[["X"]]
Y <- single.data[["Y"]]
#KFold subsetter function
kfold_subsetter <- function(data , k , seed = 7 , list = FALSE , random = TRUE) {
if(length(dim(data)) == 2) { ###For 2D data
#determine number of larger subsets (when unequal subsets)
nsams.large <- nrow(data) %% k
#determine number of smaller subsets (total number when equal subsets)
nsams.small <- k - nsams.large
#determine sample size of larger subsets (when unequal subsets)
samsize.large <- ceiling(nrow(data) / k) * (nsams.large != 0)
#determine sample size of smaller subsets (all subset size when equal subsets)
samsize.small <- floor(nrow(data) / k)
#indicator for which subset
subset.indicator <- c(rep((1 : k) , floor(nrow(data) / k)) ,
rep((1 : (nsams.large) ) , (1 * (nsams.large != 0)) ))
#fix random assignment process
if(seed) {
set.seed(seed)
}
#combine subset indicator with original data
if(random) {
newdata <- cbind(data , subset = sample(subset.indicator))
} else {
newdata <- cbind(data , subset = sort(subset.indicator))
}
if(list) {
newdata <- return(split(newdata[ , -ncol(newdata)] ,
f = newdata[ , ncol(newdata)]))
} else {
newdata <- return(newdata)
}
} else if (length(dim(data)) == 0){ #for 1D data
#determine number of larger subsets (when unequal subsets)
nsams.large <- length(data) %% k
#determine number of smaller subsets (total number when equal subsets)
nsams.small <- k - nsams.large
#determine sample size of larger subsets (when unequal subsets)
samsize.large <- ceiling(length(data) / k) * (nsams.large != 0)
#determine sample size of smaller subsets (all subset size when equal subsets)
samsize.small <- floor(length(data) / k)
#indicator for which subset
subset.indicator <- c(rep((1 : k) , floor(length(data) / k)) ,
rep((1 : (nsams.large) ) , (1 * (nsams.large != 0)) ))
#fix random assignment process
if(seed) {
set.seed(seed)
}
#combine subset indicator with original data
#create split list if desired
newdata <- matrix(cbind(data ,
subset = sample(subset.indicator)) ,
ncol = 2)
if(list) {
newdata <- return(split(newdata[ , -ncol(newdata)] ,
f = newdata[ , ncol(newdata)]))
} else {
newdata <- return(newdata)
}
}
}
{
#with custom function
n<-length(Y)
n
Y.orgn<- Y
Y.orgn
lambda.try <- exp(seq(log(0.01) , log(1400) , length.out = 100))
set.seed(501)
model.for.cv<- cv.glmnet(X, Y, family="gaussian",lambda=lambda.try)
lambda.lasso.opt<- model.for.cv$lambda.min
lambda.lasso.opt
model.est<- glmnet(X,Y,family="gaussian",lambda=lambda.lasso.opt)
fit.lasso<- predict(model.est,X,s=lambda.lasso.opt)
fit.lasso
res.lasso<- Y - fit.lasso
sigma.est<- mad(Y-fit.lasso)
beta.est<- as.numeric(model.est$beta)
gamma.est<-rep(0,n)
K <- 5
X.new <- kfold_subsetter(X , k = K)
Y.new <- cbind(Y , X.new[ , "subset"])
n.cv <- n/ K
CV.error2<-CV.error<-rep(NA,length(lambda.gamma.try))
Y.pred.cv<-matrix(NA,nrow=length(Y),ncol=length(lambda.gamma.try))
#without custom function
n<-length(Y)
n
Y.orgn<- Y
Y.orgn
set.seed(501)
model.for.cv<- cv.glmnet(X, Y, family="gaussian",lambda=lambda.try)
lambda.lasso.opt<- model.for.cv$lambda.min
lambda.lasso.opt
model.est<- glmnet(X,Y,family="gaussian",lambda=lambda.lasso.opt)
fit.lasso<- predict(model.est,X,s=lambda.lasso.opt)
fit.lasso
res.lasso<- Y - fit.lasso
sigma.est<- mad(Y-fit.lasso)
beta.est<- as.numeric(model.est$beta)
gamma.est<-rep(0,n)
n.fold<- 5
n.cv <- n/n.fold
CV.error2<-CV.error<-rep(NA,length(lambda.gamma.try))
Y.pred.cv<-matrix(NA,nrow=length(Y),ncol=length(lambda.gamma.try))
Y.new<- Y
}
#Without custom function
OS.lasso<- function(X,Y,lambda.lasso.try,lambda.gamma.try){
n<-length(Y)
Y.orgn<- Y
model.for.cv<- cv.glmnet(X, Y, family="gaussian",lambda=lambda.lasso.try)
lambda.lasso.opt<- model.for.cv$lambda.min
model.est<- glmnet(X,Y,family="gaussian",lambda=lambda.lasso.opt)
fit.lasso<- predict(model.est,X,s=lambda.lasso.opt)
res.lasso<- Y - fit.lasso
sigma.est<- mad(Y-fit.lasso)
beta.est<- as.numeric(model.est$beta)
gamma.est<-rep(0,n)
n.fold<- 5
n.cv <- n/n.fold
CV.error2<-CV.error<-rep(NA,length(lambda.gamma.try))
Y.pred.cv<-matrix(NA,nrow=length(Y),ncol=length(lambda.gamma.try))
Y.new<- Y
for (tt in 1:length(lambda.gamma.try))
{
gamma.est.cv<-rep(0,n-n.cv)
for (jj in 1:n.fold)
{
sample.out.index<- (1+n.cv*(jj-1)):(n.cv*(jj))
cat("sample indices = " , sample.out.index , "\n")
X.train<- X[-sample.out.index,]
#cat("dim(X.train) = " , dim(X.train) , "\n")
Y.train<- Y[-sample.out.index]
#cat("dim(Y.train) = " , dim(Y.train) , "\n")
X.test<- X[sample.out.index,]
#cat("X.test = " , X.test , "\n")
model.train.temp<- glmnet(X.train,Y.train,family="gaussian",lambda=lambda.lasso.opt)
beta.pre<-beta.post<- c(model.train.temp$a0,as.numeric(model.train.temp$beta))
#cat("beta.pre = " , beta.pre , "\n")
tol<-100; n.iter <- 0
while(tol>1e-6 & n.iter<100)
{
resid.temp<- Y.train-cbind(rep(1,n-n.cv),X.train)%*%beta.pre
nonzero<-which(abs(resid.temp)>=sigma.est*lambda.gamma.try[tt])
#cat("nonzero = " , nonzero , "\n")
gamma.est.cv[nonzero]<- resid.temp[nonzero]
Y.train.new <- Y.train - gamma.est.cv
model.train.temp<- glmnet(X.train,Y.train.new,family="gaussian",lambda=lambda.lasso.opt)
beta.post <- c(model.train.temp$a0,as.numeric(model.train.temp$beta))
tol<- sum((beta.pre-beta.post)^2)
n.iter<- n.iter+1
beta.pre<-beta.post
}
Y.pred.cv[sample.out.index,tt] <-cbind(rep(1,n.cv),X.test)%*%beta.post
}
CV.error2[tt]<- mean((Y.pred.cv[,tt]-Y.orgn)^2)
CV.error[tt]<- mean(abs(Y.pred.cv[,tt]-Y.orgn))
}
lambda.gamma.opt<- lambda.gamma.try[which.min(CV.error2)]
model.opt<- glmnet(X,Y.orgn,family="gaussian",lambda=lambda.lasso.opt)
beta.pre<- beta.post<- c(model.opt$a0,as.numeric(model.opt$beta))
#cat("beta.pre2 = " , beta.pre , "\n")
tol<-100; n.iter <- 0
while(tol>1e-6 & n.iter<100)
{
resid.opt<- Y.orgn-cbind(rep(1,n),X)%*%beta.pre
#cat("resid.opt = " , resid.opt , "\n")
nonzero<-which(abs(resid.opt)>=sigma.est*lambda.gamma.opt)
#cat("nonzero = " , nonzero , "\n")
gamma.est[nonzero]<- resid.opt[nonzero]
Y.new <- Y.orgn - gamma.est
model.opt<- glmnet(X,Y.new,family="gaussian",lambda=lambda.lasso.opt)
beta.post <- c(model.opt$a0,as.numeric(model.opt$beta))
tol<- mean((beta.pre-beta.post)^2)
n.iter<- n.iter+1
beta.pre<-beta.post
}
Y.fit<- cbind(rep(1,n),X)%*%beta.post
object<- list(coefficient=beta.post,fit=fit.lasso,iter = n.iter, sigma.est = sigma.est,CV.error=CV.error2, n.outlier=length(which(gamma.est!=0)),
gamma.est = gamma.est, lambda.opt=lambda.gamma.opt)
}
#OS lasso+
OS.lassoPLUS<- function(X,Y,lambda.lasso.try,lambda.gamma.try){
#create simulation tracker
#tracker <- as.vector(unlist(data$conditions))
#print tracker of status
#cat("n = " , tracker[1] , " , p = " , tracker[2] ,
" , eta.x = " , tracker[3] , " , eta.y = " , tracker[4] ,
" , g = " , tracker[5] , " , h = " , tracker[6] ,
";\n")
#load X, Y, p, n
#X <- data$X
#Y <- data$Y
#p <- data$conditions$p
#n <- length(Y)
n<-length(Y)
n
Y.orgn<- Y
Y.orgn
model.for.cv<- cv.glmnet(X, Y, family="gaussian",lambda=lambda.lasso.try)
lambda.lasso.opt<- model.for.cv$lambda.min
model.est<- glmnet(X,Y,family="gaussian",lambda=lambda.lasso.opt)
fit.lasso<- predict(model.est,X,s=lambda.lasso.opt)
res.lasso<- Y - fit.lasso
sigma.est<- mad(Y-fit.lasso)
beta.est<- as.numeric(model.est$beta)
gamma.est<-rep(0,n)
K <- 5
X.new <- kfold_subsetter(X , k = K , random = FALSE)
Y.new <- cbind(Y , X.new[ , "subset"])
#cat("subsets = " , X.new[ , "subset"] , "\n")#
n.cv <- n/ K
CV.error2<-CV.error<-rep(NA,length(lambda.gamma.try))
Y.pred.cv<-matrix(NA,nrow=length(Y),ncol=length(lambda.gamma.try))
for (tt in 1:length(lambda.gamma.try))
{
gamma.est.cv<-rep(0,n-n.cv)
for (jj in 1:K)
{
subset <- unique(X.new[ , "subset"])[jj]
sample.out.index <- which(X.new[ , "subset"] == jj)
if(FALSE %in% (which(X.new[ , "subset"] == jj) == which(Y.new[ , 2] == jj))) {
stop("X and Y subsets do not match")
} ##return error if the x and y subset indices do not match
#cat("sample indices = " , sample.out.index , "\n")
X.train<- X.new[X.new[ , "subset"] != subset , -ncol(X.new)]
#cat("dim(X.train) = " , dim(X.train) , "\n")
Y.train<- Y.new[Y.new[ , 2] != subset , 1]
#cat("dim(Y.train) = " , dim(Y.train) , "\n")
X.test<- X.new[X.new[ , "subset"] == subset , -ncol(X.new)]
#cat("X.test = " , X.test , "\n")
model.train.temp<- glmnet(X.train,Y.train,family="gaussian",lambda=lambda.lasso.opt)
beta.pre<-beta.post<- as.numeric(model.train.temp$beta)
#cat("beta.pre = " , beta.pre , "\n")
tol<-100; n.iter <- 0
while(tol>1e-6 & n.iter<100)
{
resid.temp<- Y.train-X.train%*%beta.pre
nonzero<-which(abs(resid.temp)>=sigma.est*lambda.gamma.try[tt])
#cat("nonzero = " , nonzero , "\n")
gamma.est.cv[nonzero]<- resid.temp[nonzero]
Y.train.new <- Y.train - gamma.est.cv
model.train.temp<- glmnet(X.train,Y.train.new,family="gaussian",lambda=lambda.lasso.opt)
beta.post <- as.numeric(model.train.temp$beta)
tol<- sum((beta.pre-beta.post)^2)
n.iter<- n.iter+1
beta.pre<-beta.post
}
Y.pred.cv[sample.out.index,tt] <-X.test%*%beta.post
}
CV.error2[tt]<- mean((Y.pred.cv[,tt]-Y.orgn)^2)
CV.error[tt]<- mean(abs(Y.pred.cv[,tt]-Y.orgn))
}
lambda.gamma.opt<- lambda.gamma.try[which.min(CV.error2)]
model.opt<- glmnet(X,Y.orgn,family="gaussian",lambda=lambda.lasso.opt)
beta.pre<- beta.post<- as.numeric(model.opt$beta)
#cat("beta.pre2 = " , beta.pre , "\n")
tol<-100; n.iter <- 0
while(tol>1e-6 & n.iter<100)
{
cat("dim(X) = " , dim(X) , "\n")
cat("length(beta.pre) = " , length(beta.pre) , "\n")
resid.opt<- Y.orgn-X%*%beta.pre
#cat("resid.opt = " , resid.opt , "\n")
nonzero<-which(abs(resid.opt)>=sigma.est*lambda.gamma.opt)
#cat("nonzero = " , nonzero , "\n")
gamma.est[nonzero]<- resid.opt[nonzero]
Y.new2 <- Y.orgn - gamma.est
model.opt<- glmnet(X,Y.new2,family="gaussian",lambda=lambda.lasso.opt)
beta.post <- as.numeric(model.opt$beta)
tol<- mean((beta.pre-beta.post)^2)
n.iter<- n.iter+1
beta.pre<-beta.post
}
Y.fit<- X%*%beta.post
#store number of nonzero coefs
st.lad <- sum(beta.post) # number nonzero
#generate MSE and sd(MSE) for model
mse.lad <- sum((Y - Y.fit) ^ 2) / (n - st.lad - 1)
sd.mse.lad <- sd((Y - Y.fit) ^ 2 / (n - st.lad - 1))
object<- list(coefficient = beta.post ,
fit = Y.fit ,
iter = n.iter ,
sigma.est = sigma.est ,
mpe = mse.lad ,
mpe.sd = sd.mse.lad ,
n.outlier = length(which(gamma.est != 0)) ,
gamma.est = gamma.est ,
lambda.opt = lambda.gamma.opt)
}
#test runs
lambda.try <- exp(seq(log(0.01) , log(1400) , length.out = 100))
lambda.gamma.try <- seq(1 , 4 , length.out = 50)
set.seed(501)
OS.model.plus <- OS.lassoPLUS(X = X , Y = Y , lambda.lasso.try = lambda.try ,
lambda.gamma.try = lambda.gamma.try)
OS.model.plus
set.seed(501)
OS.model <- OS.lasso(X = X , Y = Y , lambda.lasso.try = lambda.try ,
lambda.gamma.try = lambda.gamma.try)
OS.model
#run OS lasso
lambda.try <- exp(seq(log(0.01) , log(1400) , length.out = 100))
lambda.lasso.try <- seq(0.01 , 0.6 , length.out = 100)
lambda.gamma.try <- seq(1 , 4 , length.out = 50)
set.seed(501)
OS.model.plus <- OS.lassoPLUS(X = X , Y = Y , lambda.lasso.try = lambda.try ,
lambda.gamma.try = lambda.gamma.try)
OS.model.plus
set.seed(501)
OS.model.plus2 <- OS.lassoPLUS(X = X , Y = Y , lambda.lasso.try = lambda.lasso.try ,
lambda.gamma.try = lambda.gamma.try)
OS.model.plus2
set.seed(501)
OS.model.plus3 <- OS.lasso(X = X , Y = Y , lambda.lasso.try = lambda.lasso.try ,
lambda.gamma.try = lambda.gamma.try)
OS.model.plus3
|
ef04f45d9023a86b19385e1b29306a6b4338326d
|
c750c1991c8d0ed18b174dc72f3014fd35e5bd8c
|
/pkgs/GDAdata/man/MexLJ.Rd
|
e9aa7aad8074c33d7acd6b592913de082d765e6b
|
[] |
no_license
|
vaguiar/EDAV_Project_2017
|
4b190e66fe7a6b4078cfe1b875bccd9b5a594b25
|
288ffaeec1cfdd873fe7439c0fa0c46a90a16a4f
|
refs/heads/base
| 2021-01-23T02:39:36.272851
| 2017-05-01T23:21:03
| 2017-05-01T23:21:03
| 86,010,131
| 1
| 0
| null | 2017-05-01T23:43:04
| 2017-03-24T00:21:20
|
HTML
|
UTF-8
|
R
| false
| false
| 710
|
rd
|
MexLJ.Rd
|
\name{MexLJ}
\alias{MexLJ}
\docType{data}
\title{
Data from the longjump final in the 1968 Mexico Olympics.
}
\description{
The best longjumps by the 16 finalists in the 1968 Mexico Olympics. Each athlete jumped up to six times, though the winner of the Gold Medal, Bob Beamon, only jumped twice.
}
\usage{data(MexLJ)}
\format{
A data frame with 16 observations on the following variable.
\describe{
\item{\code{Jump}}{Distance jumped measured in metres}
}
}
\source{
\url{http://en.wikipedia.org/wiki/Athletics_at_the_1968_Summer_Olympics_-_Men's_long_jump}
}
\examples{
data(MexLJ, package="GDAdata")
with(MexLJ, summary(Jump))
with(MexLJ, hist(Jump,breaks=seq(7.25,9,0.25)))
}
\keyword{datasets}
|
0fa15258d7fa123feff165d7a79ad2fc0ebbff3f
|
3a8bbd4346f813ba29725ebb5a3bf13e7504767d
|
/run_analysis.R
|
92a3d38756f9716caa4123f3a588dc8f10f24647
|
[] |
no_license
|
Grrrusti/Analysis.R
|
b18f9f6d01a0c2db481562bdf5f79308c6067e28
|
6d0b12899e64e1b994dcf83159754a00e501e78d
|
refs/heads/master
| 2020-05-18T12:54:31.006004
| 2014-12-21T21:38:49
| 2014-12-21T21:38:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,381
|
r
|
run_analysis.R
|
library(plyr)
##1st PART
##if directory doesn't exist, creates new
if(!file.exists("./data")){
dir.create("./data")}
fileUrl <- "http://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile="./data/project.zip")
setwd("./data")
unzip("project.zip", files=NULL, list=FALSE, overwrite=TRUE, junkpaths=FALSE, exdir=".", unzip="internal", setTimes=FALSE)
##reads train and test sets
trainX<- read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/train/X_train.txt", header=FALSE)
trainY<- read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/train/Y_train.txt", header=FALSE)
trainSubject<- read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/train/subject_train.txt", header=FALSE)
testX<- read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/test/X_test.txt", header=FALSE)
testY<- read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/test/Y_test.txt", header=FALSE)
testSubject<- read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/test/subject_test.txt", header=FALSE)
##reads other files
activityLabels<-read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/activity_labels.txt", col.names= c("Label", "Activity"), colClasses = c('numeric', 'character'))
features<-read.table("./getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/features.txt", colClasses = c("character"))
##Merges train and test sets to one data set
trainSet<-cbind(cbind(trainX, trainSubject), trainY)
testSet<-cbind(cbind(testX, testSubject), testY)
fullSet<- rbind(trainSet, testSet)
##Labels two last columns
fullSetLabels<- rbind(rbind(features, c(562, "Subject")), c(563, "ActivityId"))[,2]
names(fullSet) <-fullSetLabels
##2nd PART
##Extracts measurements on the mean and standart deviation for each measurement
fullSet_Mean_Std<- fullSet[,grepl("mean|std|Subject|ActivityId", names(fullSet))]
##3rd PART
##Uses descriptive activity names to name the activities in the data set
fullSet_Mean_Std <- merge(fullSet_Mean_Std, activityLabels, by.x= 'ActivityId', by.y='Label')
##Remove ActivityId column
fullSet_Mean_Std<- fullSet_Mean_Std[,!(names(fullSet_Mean_Std)%in%c('ActivityId'))]
##4th PART
##Appropriately labels the data set with descriptive variable names.
names(fullSet_Mean_Std) <- gsub('Acc',"Acceleration",names(fullSet_Mean_Std))
names(fullSet_Mean_Std) <- gsub('GyroJerk',"AngularAcceleration",names(fullSet_Mean_Std))
names(fullSet_Mean_Std) <- gsub('Gyro',"AngularSpeed",names(fullSet_Mean_Std))
names(fullSet_Mean_Std) <- gsub('Mag',"Magnitude",names(fullSet_Mean_Std))
names(fullSet_Mean_Std) <- gsub('^t',"TimeDomain.",names(fullSet_Mean_Std))
names(fullSet_Mean_Std) <- gsub('^f',"FrequencyDomain.",names(fullSet_Mean_Std))
names(fullSet_Mean_Std) <- gsub('-mean()',".Mean",names(fullSet_Mean_Std))
names(fullSet_Mean_Std) <- gsub('-std()',".StandardDeviation",names(fullSet_Mean_Std))
names(fullSet_Mean_Std) <- gsub('Freq\\.',"Frequency.",names(fullSet_Mean_Std))
names(fullSet_Mean_Std) <- gsub('Freq$',"Frequency",names(fullSet_Mean_Std))
##5th PART
##Creates a second, independent tidy data set with the average of each variable for each activity and each subject
IndependentData <- ddply(fullSet_Mean_Std, c("Subject","Activity"), numcolwise(mean))
write.table(IndependentData, file = "IndependentData.txt")
|
4b3a5d189e1ae753eea34776500eca4eaa9f60c4
|
ae0f095b97b0481e39614267529d4feb5b3cee58
|
/man/plot.CatDynData.Rd
|
6444743b6b77ef3e6a5da6088feeee2722b7bd7a
|
[] |
no_license
|
santucofs/CatDyn
|
c1c5e66b1939c145606f06c827fc31031ed932b9
|
0c706a84f1fbfc39abf208b8e5558a67d79f52ac
|
refs/heads/master
| 2023-03-16T18:41:43.893781
| 2019-01-02T11:30:07
| 2019-01-02T11:30:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,346
|
rd
|
plot.CatDynData.Rd
|
\name{plot.CatDynData}
\alias{plot.CatDynData}
\title{
Exploratory Analysis of Catch and Effort Fisheries Data
}
\description{
Allows examining the relation between catch and effort, the marginal distributions
of catch and effort, and the time series of catch, effort, the catch spike
statistic, and mean body weight in the catch.
}
\usage{
\method{plot}{CatDynData}(x, mark, offset, hem, \dots)
}
\arguments{
\item{x}{
An object of class CatDynData.
}
\item{mark}{
Logical. If TRUE then the time step is posted on top of each point of the time
series of catch, effort, and the catch spike statistic.
}
\item{offset}{
Numeric. A vector of length 3 that positions the mark above a given distance
over the point.
}
\item{hem}{
Character. Either N (northern hemisphere) or S (southern hemisphere).
}
\item{\dots}{
Further arguments to be passed to plot(), hist().
}
}
\details{
Use NA to cancel the mark over the points of any of the three time series that can
be marked.
In the case of two-fleet models, the plot will display the data for the first
fleet, then the user needs to hit Enter to display the data for the second fleet.
}
\value{
A seven panel plot.
}
\author{
Ruben H. Roa-Ureta (ORCID ID 0000-0002-9620-5224)
}
\examples{
#See examples for CatDynFit().
}
\keyword{ ~iplot }
|
be60d17feb976a2d11d2af5b43c6a01e5fe46f6d
|
8a6c123dce13e5a0a96f258cd0fd78f32f166014
|
/man/FLNaiveBayes.Rd
|
9dfabdb12922d3a9284262dd96155b891a0eb4c4
|
[] |
no_license
|
mcetraro/AdapteR
|
c707cd5bd4918cee093e5f151248e0ed047eacda
|
4ea6ba9226ad187b0a61ab95286043c1eb5fbec3
|
refs/heads/master
| 2021-01-18T12:45:14.416907
| 2015-07-30T14:41:48
| 2015-07-30T14:41:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,122
|
rd
|
FLNaiveBayes.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/FLNaiveBayes.R
\name{FLNaiveBayes}
\alias{FLNaiveBayes}
\title{Naive Bayes Classifier}
\usage{
FLNaiveBayes(table, primary_key, response, laplace = 0, exclude = c(),
class_spec = list(), where_clause = "",
note = "From RWrapper For DBLytix")
}
\arguments{
\item{table}{an object of class \code{FLTable}}
\item{primary_key}{name of primary key column of the table mapped to \code{table}}
\item{response}{name of the dependent variable column}
\item{laplace}{indicates whether Laplacian Correction is to be used (1 for
true and 0 for false)}
\item{exclude}{vector of names of the columns which are to be excluded}
\item{class_spec}{list that identifies the value of the categorical variable
which is to be used as reference when converting to dummy binary variables}
\item{where_clause}{condition to filter out data from the table}
\item{note}{free form string that will be stored with the results, typically
used to document the purpose of the analysis}
}
\description{
Naive Bayes is a simple probabilistic classifier that applies the Bayes'
theorem to compute conditional a-posterior probabilities of a categorical
class variable under the independence assumption -- the presence
(or absence) of a particular feature of a class is unrelated to the
presence (or absence) of any other feature.
}
\details{
Laplacian Correction is used to avoid the issue of zero probability
for a given attribute by adding 1 to the numerator. In order to compensate
for this addition, the denominator is also incremented by the total number
of discrete values for the attribute
}
\examples{
\dontrun{
connection <- odbcConnect("Gandalf")
db_name <- "FL_R_WRAP"
table_name <- "tblCancerData"
# Create FLTable object
Tbl <- FLTable(connection, db_name, table_name)
# Build Naive Bayes Model
NBModel <- FLNaiveBayes(Tbl, "ObsID", "BenignOrMalignant", laplace = 1, exclude = c("SampleCode"), note = "Training NB Model with Laplacian Correction")
#Fetch Model
NBModel <- FLFetch(NBModel)
#Show Model
slot(NBModel, "NBModel")
}
}
|
01e38d8e288da3655b1a9437b7422ab5b32ef34f
|
625407d36cc192d1df51593899bf7514ba9dc6ee
|
/plot4.R
|
404dcba14752059a820b8c38f60d004e8a1af5b4
|
[] |
no_license
|
SergeyBykov1/ExData_Plotting1
|
6dadb9a3fb342dd66ae9e6f2c9dd4048c717de93
|
16f248a8f822d887cc925e5acaf1daaf60e993e3
|
refs/heads/master
| 2021-01-22T14:46:16.190904
| 2014-10-15T02:28:17
| 2014-10-15T02:28:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,182
|
r
|
plot4.R
|
#
# Exploratory Data Analysis
# Course Project 1 Extension
# Plot 4
#
# needs data.table package for fast reading
require("data.table")
full_data <- fread('household_power_consumption.txt', na.strings="?")
target_dates <- as.Date(c("2007-02-01", "2007-02-02"))
target_data <- full_data[as.Date(full_data$Date, format='%d/%m/%Y') %in% target_dates]
datetime <- as.POSIXct(
paste(target_data$Date, target_data$Time), format = "%d/%m/%Y %T")
# construct png directly, as dev.copy results in cropped legend
png("plot4.png", width = 480, height = 480)
par(mfcol = c(2, 2)) # columnwise
# 1
plot(datetime, target_data$Global_active_power, type='l',
xlab = '', ylab = 'Global Active Power')
# 2
plot(datetime, target_data$Sub_metering_1, type='l',
xlab = '', ylab = 'Energy sub metering')
lines(datetime, target_data$Sub_metering_2, type='l', col='red')
lines(datetime, target_data$Sub_metering_3, type='l', col='blue')
legend("topright", col = c("black","blue", "red"), lwd=1, bty='n',
legend = colnames(target_data)[7:9])
# 3
with(target_data, plot(datetime, Voltage, type='l'))
# 4
with(target_data, plot(datetime, Global_reactive_power, type='l'))
dev.off()
|
d5595864fe967d9ae3ee74fb62962ab0c4c2f7cb
|
f87fa55b0b30efa11806c0e0f217f89ba8aab5ab
|
/cachematrix.R
|
1724c72b5b915dadff306448ef09851c17746be7
|
[] |
no_license
|
stbnjenkins/ProgrammingAssignment2
|
1b5558fb3869e02c0b9da03179e2cc175b0f0cb7
|
140ec4f90411b7bc604da0097635eb95a6de0945
|
refs/heads/master
| 2021-01-17T22:29:14.562893
| 2015-03-12T06:10:36
| 2015-03-12T06:10:36
| 32,058,530
| 0
| 0
| null | 2015-03-12T05:18:55
| 2015-03-12T05:18:55
| null |
UTF-8
|
R
| false
| false
| 2,087
|
r
|
cachematrix.R
|
## Since calculating the inverse of a matrix can be potentially
## a time-consuming task, if the matrix is not changing and we
## are calculating its inverse many times, it might be a good
## idea to calculate the inverse only once and cached it. So
## we only recalculate the inverse matrix when the matrix is
## changed.
## The approach used in this example is creating an R object
## that can save the state of the matrix and its inverse. This
## object allows us to get and set those values. Then we have
## a function CacheSolve that works with that R object to get
## the inverse if it is already calculated, or to calculate if
## not
## This function creates an R object that can track the state
## of the matrix and its inverse. It returns a list with four
## functions: the getter and setter of the matrix, and the
## getter and setter of the inverse of that matrix.
makeCacheMatrix <- function(x = matrix()) {
# Cached inverse matrix of X
inverse <- NULL
# Getter and setter for X
get <- function() x
set <- function(y){
x <<- y
inverse <<- NULL
}
# Getter and setter for the inverse matrix of x
get_inverse <- function() inverse
set_inverse <- function(inverted_x) inverse <<- inverted_x
# Return the list encapsulating getters and setters
list (set = set, get = get,
set_inverse = set_inverse,
get_inverse = get_inverse)
}
## This function first looks for a cached copy of the
## inverted matrix. If it exists, then the program
## returns it. Otherwise, it calculates it, caches it
## and returns it.
cacheSolve <- function(x, ...) {
inverse <- x$get_inverse()
# Check if there is a cached copy of the inverse
if (!is.null(inverse)){
message("Getting cached data")
# Return the cached copy.
return (inverse)
}
# There is no cached copy, so calculate the inverse.
matrix <- x$get()
inverse <- solve(matrix)
x$set_inverse(inverse)
## Return a matrix that is the inverse of 'x'
inverse
}
|
f5fd1b001ca2da3dc3db3e5b982156681218e15e
|
125a18c7eba0ca722425fadbfd5e7c1e1692ae86
|
/man/ker.Rd
|
d254951d71b25b7eb9c28c87d37b6e4558c5f5ff
|
[] |
no_license
|
cran/bbemkr
|
8a41414f3161d48028bfbaf4480c894d9357e6bf
|
376a966dfd17a52129d4cbaaa092d1684477674a
|
refs/heads/master
| 2020-04-17T07:51:10.950435
| 2014-04-05T00:00:00
| 2014-04-05T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,178
|
rd
|
ker.Rd
|
\name{ker}
\alias{ker}
\title{
Type of kernel function
}
\description{
For data that have infinite support, Gaussian kernel is suggested. For data that have [-1, 1] support, other types of kernel can be used.
}
\usage{
ker(u, kerntype = c("Gaussian", "Epanechnikov", "Quartic",
"Triweight", "Triangular", "Uniform"))
}
\arguments{
\item{u}{A numeric object}
\item{kerntype}{Type of kernel function}
}
\details{
Oftentimes, we deal with numeric values of infinite support, Gaussian kernel is commonly used.
However, Epanechnikov kernel is the optimal kernel as measured by Mean Integrated Square Error.
The difference among kernel functions is minor, but the influence of bandwidths is vital.
}
\value{
Kernel value
}
\references{
J. Fan and I. Gijbels (1996) Local Polynomial Modelling and Its Application. Chapman and Hall, London.
Q. Li and J. Racine (2007) Nonparametric Econometrics: Theory and Practice. Princeton University Press, New Jersey.
}
\author{
Han Lin Shang
}
\seealso{
\code{\link[bbemkr]{np_gibbs}}, \code{\link[bbemkr]{gibbs_admkr_nw}}, \code{\link[bbemkr]{gibbs_admkr_erro}}
}
\keyword{methods}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.