blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e95cd72d51222c783495af2872a36574e6408755 | c2e4f288cdd6ac789bb61aa1fd99eaf83944b139 | /app.R | 08b39f346299b4932df66260384500e083eca9b6 | [] | no_license | Josh-Myers/WAI-Norms | 0d749ef51a5f2af3ab33c2500e5746d0a0dafceb | e7cfca68dd01918b496c233fea3c3b945d4fbb2b | refs/heads/master | 2020-04-06T17:21:08.627796 | 2018-11-15T05:39:39 | 2018-11-15T05:39:39 | 157,655,603 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,520 | r | app.R | library(shiny)
library(dplyr)
library(ggplot2)
library(DT)
# load data frames
# median by age for each wai measure
wai.24 = readRDS("wai.24.median.rds")
wai.12 = readRDS("wai.12.median.rds")
wai.6 = readRDS("wai.6.median.rds")
wai.3 = readRDS("wai.3.median.rds")
wai.2 = readRDS("wai.2.median.rds")
wai.list = list(wai.24, wai.12, wai.6, wai.3, wai.2)
# ears that were seen at all ages
abs.all.3 = readRDS("abs.all.3.rds")
mag.all.3 = readRDS("mag.all.3.rds")
pha.all.3 = readRDS("pha.all.3.rds")
names.all = c("age", "frequency", "S1L", "S2L", "S3R", "S4R", "S5R", "S6L", "S7R", "S8L")
names(abs.all.3) = names.all
names(mag.all.3) = names.all
names(pha.all.3) = names.all
# normative tables
norms.24 = readRDS("norms.list.24.rds")
norms.12 = readRDS("norms.list.12.rds")
norms.6 = readRDS("norms.list.6.rds")
norms.3 = readRDS("norms.list.3.rds")
norms.2 = readRDS("norms.list.2.rds")
theme_set(theme_bw(base_size = 16))
# ui ----
ui <- fluidPage(
headerPanel("WAI Norms"),
tabsetPanel(
tabPanel("Group Averages", fluid = TRUE,
sidebarLayout(
sidebarPanel(
selectInput(inputId = "wai", label = "WAI measure",
choices = c("A", "|Y|", "Y phase", "g", "b", "|Z|", "Z phase", "r", "x", "normalized |Y|", "normalized |Z|")),
selectInput(inputId = "freq.res", label = "Frequency resolution",
choices = c("1/2 octave", "1/3 octave", "1/6 octave", "1/12 octave", "1/24 octave"
))
),
mainPanel(br(), plotOutput("group.plot"), br())
)),
tabPanel("Individual Results", fluid = TRUE,
sidebarLayout(
sidebarPanel(
selectInput(inputId = "sub", label = "Subject", choices = c(1:8)),
selectInput(inputId = "measure", label = "WAI measure",
choices = c("A", "|Y|", "Y phase"))
),
mainPanel(br(), plotOutput("ind.plot"), br())
)
),
tabPanel("Normative Data", fluid = TRUE,
sidebarLayout(
sidebarPanel(selectInput(inputId = "f.res", label = "Frequency resolution",
choices = c("1/2 octave", "1/3 octave", "1/6 octave", "1/12 octave", "1/24 octave")),
selectInput(inputId = "ethnicity", label = "Ethnic Specific", choices = c("No",
"Yes")),
downloadButton("download", label = "Download data")),
mainPanel(br(), dataTableOutput("table"), br())
)
),
tabPanel("About", fluid = TRUE,
br(),
p("This interactive web application accompanies the article 'Longitudinal Development of Wideband
Absorbance and Admittance Through Infancy' (Myers et al., 2018). The study longitudinally measured wideband acoustic immittance (WAI) on
218 infants at birth, 6 months, 12 months, and 18 months of age. Results from ears with normal middle ear function were included in the study,
assessed using distortion product otoacoustic emissions and tympanometry. High-frequency (1000 Hz) tympanometry was used at birth and
6 months, and low-frequency (226 Hz) tympanometry at 12 and 18 months. Refer to the article for further information about the study"),
p("The 'Group Averages' tab shows median WAI results by age.
You can choose the the frequency resolution and the WAI measure to diplay from the drop down menus."),
p("The 'Individual Results' tab presents results from individual infants who attended all four of the follow up appointments.
Select the subject and WAI measure from the menus. These results are presented at 1/3 octave frequency resolution."),
p("The 'Normative Data' tab provides the group 2.5, 5, 25, 50, 75, 95, and 97.5 percentile results at each frequency for the various WAI
measures. You can choose the frequency resolution and whether or not you want the results to be ethnic specific (Caucasian and non-Caucasian).
The selected results can be downloaded as a spreadsheet with the 'Download data' button."),
p("If you have any questions or experience issues using the app please email myers.josh@gmail.com."),
br(),
h4("References"),
p("Myers, J., Kei, J., Aithal, S., Aithal, V., Driscoll, C., Khan, A., Manuel, A., Joseph, A., Malicka, A. N. (2018).
Longitudinal development of wideband absorbance and admittance through infancy. Manuscript submitted for publication."),
br(),
h4("Abbreviations"),
p("A = absorbance"),
p("b = susceptance"),
p("g = conductance"),
p("r = resistance"),
p("WAI = wideband acoustic immittance"),
p("x = reactance"),
p("|Y| = admittance magnitude"),
p("Y phase = admittance phase angle"),
p("|Z| = impedance magnitude"),
p("Z phase = impedance phase angle"),
br()
)
)
)
server <- function(input, output) {
df = reactive({
switch(input$freq.res,
"1/24 octave" = as.data.frame(wai.list[1]),
"1/12 octave" = as.data.frame(wai.list[2]),
"1/6 octave" = as.data.frame(wai.list[3]),
"1/3 octave" = as.data.frame(wai.list[4]),
"1/2 octave" = as.data.frame(wai.list[5])
)
})
wai = reactive({
wai =
switch(input$wai,
"A" = df()$A,
"|Y|" = df()$Y,
"Y phase" = df()$Ypha,
"g" = df()$G,
"b" = df()$B,
"|Z|" = df()$Z,
"Z phase" = df()$Zpha,
"r" = df()$R,
"x" = df()$X,
"normalized |Y|" = df()$Ynorm,
"normalized |Z|" = df()$Znorm
)
frequency = df()$Frequency
age = df()$age
cbind.data.frame(wai, frequency, age)
})
wai.names = reactive({
switch(input$wai,
"A" = "A",
"|Y|" = "|Y|, millimho",
"Y phase" = "Y phase, degrees",
"g" = "g, millimho",
"b" = "b, millimho",
"|Z|" = "|Z|, milliohm",
"Z phase" = "Z phase, degrees",
"r" = "r, milliohm",
"x" = "x, milliohm",
"normalized |Y|" = "Normalized |Y|",
"normalized |Z|" = "Normalized |Z|"
)
})
output$group.plot <- renderPlot({
ggplot(wai(), aes(x=frequency, y=wai, group=age, colour=age)) +
geom_line() +
xlab("Frequency, Hz") +
ylab(wai.names()) +
scale_x_log10(expand=c(0, 0), breaks=c(250, 500, 1000, 2000, 4000, 8000)) +
theme(aspect.ratio=1/2) +
theme(legend.title=element_blank())
})
abs.ind = reactive({
wai =
switch(input$sub,
"1" = abs.all.3$S1L,
"2" = abs.all.3$S2L,
"3" = abs.all.3$S3R,
"4" = abs.all.3$S4R,
"5" = abs.all.3$S5R,
"6" = abs.all.3$S6L,
"7" = abs.all.3$S7R,
"8" = abs.all.3$S8L
)
frequency = abs.all.3$frequency
age = abs.all.3$age
cbind.data.frame(wai, frequency, age)
})
mag.ind = reactive({
wai =
switch(input$sub,
"1" = mag.all.3$S1L,
"2" = mag.all.3$S2L,
"3" = mag.all.3$S3R,
"4" = mag.all.3$S4R,
"5" = mag.all.3$S5R,
"6" = mag.all.3$S6L,
"7" = mag.all.3$S7R,
"8" = mag.all.3$S8L
)
frequency = mag.all.3$frequency
age = mag.all.3$age
cbind.data.frame(wai, frequency, age)
})
pha.ind = reactive({
wai =
switch(input$sub,
"1" = pha.all.3$S1L,
"2" = pha.all.3$S2L,
"3" = pha.all.3$S3R,
"4" = pha.all.3$S4R,
"5" = pha.all.3$S5R,
"6" = pha.all.3$S6L,
"7" = pha.all.3$S7R,
"8" = pha.all.3$S8L
)
frequency = pha.all.3$frequency
age = pha.all.3$age
cbind.data.frame(wai, frequency, age)
})
measure = reactive({
switch(input$measure,
"A" = abs.ind(),
"|Y|" = mag.ind(),
"Y phase" = pha.ind()
)
})
wai.names.ind = reactive({
switch(input$measure,
"A" = "A",
"|Y|" = "|Y|, millimho",
"Y phase" = "Y phase, degrees")
})
output$ind.plot <- renderPlot({
ggplot(measure(), aes(x=frequency, y=wai, group=age, colour=age)) +
geom_line() +
xlab("Frequency, Hz") +
ylab(wai.names.ind()) +
scale_x_log10(expand=c(0, 0), breaks=c(250, 500, 1000, 2000, 4000, 8000)) +
theme(aspect.ratio=1/2) +
theme(legend.title=element_blank())
})
norms.list = reactive({
switch(input$f.res,
"1/24 octave" = norms.24,
"1/12 octave" = norms.12,
"1/6 octave" = norms.6,
"1/3 octave" = norms.3,
"1/2 octave" = norms.2
)
})
norms.df = reactive({
switch(input$ethnicity,
"No" = as.data.frame(norms.list()[1]),
"Yes" = as.data.frame(norms.list()[2])
)
})
output$table = renderDataTable({
datatable(norms.df(), options = list(lengthMenu = list(c(50, 100, 200, -1), c('50', '100', '200', 'All')), pageLength = 100), rownames = FALSE,
class = 'white-space: nowrap stripe hover') %>% formatSignif("Percentile", 3) %>% formatRound( -c(1:2), 2)
})
output$download <- downloadHandler(
filename = function(){"WAI_Norms.csv"},
content = function(fname){
write.csv(norms.df(), fname, row.names = F)
}
)
}
# Run the application
shinyApp(ui = ui, server = server)
|
981f2102412610d4943cddfc7acee4f794995e46 | b64fdb45e11c5d8717727b89d1361cbfd3943f1e | /man/condense.Rd | 7ab5e523cd765317de448a1f6709448dc979be25 | [] | no_license | garrettgman/ggplyr | 4c11194432d96557a02cab69bb579f791e4105d5 | 34a2861f875a9d30949a6fed737b40b742d1fba8 | refs/heads/master | 2016-09-06T02:47:39.162635 | 2012-06-21T16:33:04 | 2012-06-21T16:33:04 | 4,385,779 | 21 | 5 | null | 2012-06-21T16:39:19 | 2012-05-20T16:24:04 | R | UTF-8 | R | false | false | 332 | rd | condense.Rd | \name{condense}
\alias{condense}
\title{reduce a single valued vector to a single element}
\usage{
condense(var)
}
\arguments{
\item{var}{a vector}
}
\value{
a vector of length one if var only contains one unique
value, var otherwise
}
\description{
reduce a single valued vector to a single element
}
\keyword{internal}
|
8d7c5cd4473144dffd43c32f95095185149fa5ac | 3ed24cf1d44b746f91b04e79547975a8d52ceeae | /R/class-feature.R | b393a93cfdd418e7eac1233f80e7618c84b8b176 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | 52North/sensorweb4R | 2a420c6a86554da44ef0917543dac862475b968b | b4f9d19df2421284ce83f975232b1b0c133ec924 | refs/heads/master | 2021-07-20T12:54:07.561001 | 2020-03-24T18:48:36 | 2020-03-24T18:48:36 | 24,712,323 | 8 | 7 | Apache-2.0 | 2021-02-02T16:13:50 | 2014-10-02T08:43:07 | R | UTF-8 | R | false | false | 1,002 | r | class-feature.R | #' @include virtual-class-domain-resource.R
NULL
#' Feature
#'
#' Represents a feature.
#'
#' @family API Resources
#' @author Christian Autermann \email{c.autermann@@52north.org}
#' @rdname Feature-class
#' @name Feature-class
#' @export
setClass("Feature",
contains = "DomainResource")
#' @export
#' @describeIn Feature-class Checks whether \code{x} is a \code{Feature}.
is.Feature <- function(x) is(x, "Feature")
#' @export
#' @describeIn Feature-class Coerces \code{x} into a \code{Feature}.
as.Feature <- function(x) as(x, "Feature")
setClassUnion("Feature_or_characters",
c("Feature", "character"))
setClassUnion("Feature_or_NULL",
c("Feature", "NULL"))
#' @inheritParams DomainResource
#' @export
#' @describeIn Feature-class Constructs a new \code{Feature}.
Feature <- function(...)
DomainResource(type = "Feature", ...)
setAs("character", "Feature", function(from) Feature(id = from))
setAs("list", "Feature", function(from) concat.list(from))
|
da92d37868c9ab33d94d1acd6a298df21a28ae11 | 448dcc7700ae0fef08d60272c607d51cbb7eec80 | /src/model/NumberOfOscars2016.R | 5a39df85fbc56a3be7c9f4eda7d377fcfaab6d2f | [] | no_license | LeslieTanner/OscarPredictor | 1fc18d494cc1099375f4d03606f4050bafebcb0b | 4f5968385fd64ff6e72c9d5906fb990c3038062d | refs/heads/master | 2020-04-06T06:34:51.531510 | 2016-06-10T16:28:19 | 2016-06-10T16:28:19 | 47,720,106 | 0 | 0 | null | 2015-12-09T21:30:00 | 2015-12-09T21:30:00 | null | UTF-8 | R | false | false | 18,365 | r | NumberOfOscars2016.R | library(leaps)
library(stringr)
library(lubridate)
#library(ROCR)
##############
dataDir <- "../../data/"
load(paste0(dataDir,"movieDFClean.RData"))
load(paste0(dataDir,"movieDFClean2016.RData"))
#TODO
F1Score <- function(train.data,test.data, regressors, response, threshold=0.5){
model.formula <- as.formula(paste0(response," ~ `",paste0(regressors, collapse = "` + `"),'`'))
glm <- glm(model.formula,data = train.data,family="binomial")
predictions <- predict(glm,newdata=test.data,type="response")
predictions <- ifelse(predictions>=threshold,1,0)
actuals <- test.data[,response]
#pred <- prediction(predictions, test.data[,response])
#F1 <- performance(pred,"f")
TN = sum(actuals==0 & predictions==0)
FP = sum(actuals==0 & predictions==1)
FN = sum(actuals==1 & predictions==0)
TP = sum(actuals==1 & predictions==1)
Pre = TP/(TP+FP)
Rec = TP/(TP+FN)
F1 = 2*((Pre*Rec)/(Pre+Rec))
return(F1)
}
F1Leslie <- function(trainData, testData, regressors, response, threshold=0.5) {
fmodel <- as.formula(paste0(response," ~ `",paste0(regressors, collapse = "` + `"),'`'))
f <- glm(fmodel,data=trainData,family="binomial")
preds <- predict(f,newdata=testData,type="response")
preds <- ifelse(preds > threshold, 1, 0)
actual <- testData[, response]
TP <- sum(preds == 1 & actual == 1)
TN <- sum(preds == 0 & actual == 0)
FP <- sum(preds == 1 & actual == 0)
FN <- sum(preds == 0 & actual == 1)
prec <- TP/(TP + FP)
rec <- TP/(TP + FN)
F1 <- 2*((prec*rec)/(prec+rec))
return(F1)
}
#####
movieDFClean$WonOscar <- ifelse(movieDFClean$NumberOfOscars>0,1,0)
#######
# add a factor for quarters
dates <- movieDFClean$ReleaseDate
dates <- str_trim(str_replace(dates,"([^\n]+)\\n.*","\\1"))
dates1 <- as.Date(strptime(dates, "%d %B %Y"))
dates2 <- as.Date(strptime(dates, "%B %Y"))
dates3 <- as.Date(strptime(dates, "%Y"))
dates1[is.na(dates1)] <- dates2[is.na(dates1)]
dates1[is.na(dates1)] <- dates3[is.na(dates1)]
quarters <- quarter(dates1)
movieDFClean$ReleaseQuarter = as.factor(quarters)
######
# somehow we ended up with NAs for pg13
movieDFClean$RatingPG13[is.na(movieDFClean$RatingPG13)] = 0
#####
# names for regressor-response pair plots
names <- names(movieDFClean)
names <- setdiff(names, c("MovieURL","ReleaseYear","MovieTitle",
"ReleaseDate","NumberOfOscars","Nominations",
"RatingNA","AspectRatio.NA","BoxOfficeGross2012"))
response = "WonOscar"
#movieDFClean <- movieDFClean[,setdiff(names(movieDFClean), names)]
#######
# add in squares of numerical variables
for(name in names){
if(!is.factor(movieDFClean[[name]])){
r = range(movieDFClean[[name]], na.rm = TRUE)
if(r[2]- r[1] > 2){
movieDFClean[[paste0(name,"Sq")]] = movieDFClean[[name]]^2
}
}
}
names(movieDFClean)
#######
# Take out NA's
summary(movieDFClean)
completeIndices <- complete.cases(movieDFClean[,names])
sum(complete.cases(movieDFClean[,names]))
movieDFClean <- movieDFClean[completeIndices,]
dim(movieDFClean)
#shorts ended up all gone
movieDFClean <- movieDFClean[,setdiff(names(movieDFClean), "Short")]
#######
# Separate Data into train and test
indices <- movieDFClean$ReleaseYear==2012 #| movieDFClean$ReleaseYear==2011
trainData <- movieDFClean[!indices,]
testData <- movieDFClean[indices,]
#######
# Run Full Model
names <- setdiff(names,c(response,"Short"))
fullModelFormula <- as.formula(paste0(response," ~ `",paste0(names, collapse = "` + `"),'`'))
fullModel <- glm(fullModelFormula,data=movieDFClean,family="binomial")
summary(fullModel)
F1Score(trainData,testData,names,response,threshold = 0.6)
#F1Score = 0.6153846, threshold = 0.6 , above and below this decreased F1 score.
#Analysis of Deviance Residuals
res.dev <- fullModel$deviance / fullModel$df.residual
res.dev #0.2156
p <- fullModel$deviance / qchisq(0.975,fullModel$df.residual)
p #0.2063
# Given that the ratio of deviance/df is not close to unity although the p-value is higher than 0.05, we do have cause to question the adequacy of the model
#######
# Extract Significant regressors from full model + OpeningWeekend2012
table <- summary(fullModel)$coefficients
namesSignifFullModel <- rownames(table)[table[,"Pr(>|z|)"] < .05]
namesSignifFullModel <- str_replace_all(namesSignifFullModel, '`', "")
namesSignifFullModel <- c(namesSignifFullModel[2:length(namesSignifFullModel)],"BoxOfficeOpeningWeekend2012")
namesSignifFullModel
######
# Logistic Regression only on regressors significant on previous model
response = "WonOscar"
signifFullModelFormula <- as.formula(paste0(response," ~ `",paste0(namesSignifFullModel, collapse = "` + `"),'`'))
signifFullModel <- glm(signifFullModelFormula,data=movieDFClean,family="binomial")
summary(signifFullModel)
F1Score(trainData,testData,namesSignifFullModel,response, threshold = 0.6)
#F1Score = 0.72, threshold = 0.6
#Analysis of Deviance Residuals
res.dev <- signifFullModel$deviance / signifFullModel$df.residual
res.dev #0.2276
p <- signifFullModel$deviance / qchisq(0.975,signifFullModel$df.residual)
p #0.2179
# Given that the ratio of deviance/df is not close to unity although the p-value is higher than 0.05, we do have cause to question the adequacy of the model
###########
# Extract Significant regressors from previous model
table <- summary(signifFullModel)$coefficients
namesSignifFullModel2 <- rownames(table)[table[,"Pr(>|z|)"] < .05]
namesSignifFullModel2 <- str_replace_all(namesSignifFullModel2, '`', "")
namesSignifFullModel2 <- namesSignifFullModel2[2:length(namesSignifFullModel2)]
namesSignifFullModel2
######
# Logistic Regression on the significant variables
response = "WonOscar"
signifFullModelFormula2 <- as.formula(paste0(response," ~ `",paste0(namesSignifFullModel2, collapse = "` + `"),'`'))
signifFullModel2<- glm(signifFullModelFormula2,data=movieDFClean,family="binomial")
summary(signifFullModel2)
F1Score(trainData,testData,namesSignifFullModel2,response, threshold = 0.6)
# F1Score = 0.72, we simplified the model, but didnt lose any predictive power.
#Analysis of Deviance Residuals
res.dev <- signifFullModel2$deviance / signifFullModel2$df.residual
res.dev #0.2281
p <- signifFullModel2$deviance / qchisq(0.975,signifFullModel2$df.residual)
p #0.2183
# Given that the ratio of deviance/df is not close to unity although the p-value is higher than 0.05, we do have cause to question the adequacy of the model
#######
# Predictions for Oscars in 2012
predictions <- predict(signifFullModel2,newdata=testData,type="response")
testData$predictions <- predictions
testData <- testData[order(testData$predictions,decreasing=TRUE),]
testData[1:20,c("MovieTitle","predictions")]
testData$binaryPrediction <- ifelse(testData$predictions>=0.5,1,0)
testData$TN <- ifelse(testData$WonOscar==0 & testData$binaryPrediction==0, 1, 0)
testData$FP <- ifelse(testData$WonOscar==0 & testData$binaryPrediction==1, 1, 0)
testData$FN <- ifelse(testData$WonOscar==1 & testData$binaryPrediction==0, 1, 0)
testData$TP <- ifelse(testData$WonOscar==1 & testData$binaryPrediction==1, 1, 0)
sum(testData$TN) #215
sum(testData$FN) #2
sum(testData$TP) #10
sum(testData$FP) #5
Precision = sum(testData$TP)/(sum(testData$TP)+sum(testData$FP))
# 0.6666667
Recall = sum(testData$TP)/(sum(testData$TP)+sum(testData$FN))
# 0.8333333
Accuracy = (sum(testData$TP) + sum(testData$TN))/(sum(testData$TP)+sum(testData$FP)+sum(testData$TN)+sum(testData$FN))
# 0.9698276
#######
# Legit regressors
response = "WonOscar"
namesSignifFullModel3 <- setdiff(namesSignifFullModel2,c("IMDBRating","NumberOfUsersRated"))
signifFullModelFormula3 <- as.formula(paste0(response," ~ `",paste0(namesSignifFullModel3, collapse = "` + `"),'`'))
signifFullModel3<- glm(signifFullModelFormula3,data=movieDFClean,family="binomial")
summary(signifFullModel3)
F1Score(trainData,testData,namesSignifFullModel3,response, threshold = 0.6)
# Predictions for Oscars in 2012
predictions <- predict(signifFullModel3,newdata=testData,type="response")
testData$predictions <- predictions
testData <- testData[order(testData$predictions,decreasing=TRUE),]
testData[1:20,c("MovieTitle","predictions")]
testData$binaryPrediction <- ifelse(testData$predictions>=0.5,1,0)
testData$TN <- ifelse(testData$WonOscar==0 & testData$binaryPrediction==0, 1, 0)
testData$FP <- ifelse(testData$WonOscar==0 & testData$binaryPrediction==1, 1, 0)
testData$FN <- ifelse(testData$WonOscar==1 & testData$binaryPrediction==0, 1, 0)
testData$TP <- ifelse(testData$WonOscar==1 & testData$binaryPrediction==1, 1, 0)
sum(testData$TN) #211
sum(testData$FN) #5
sum(testData$TP) #7
sum(testData$FP) #9
Precision = sum(testData$TP)/(sum(testData$TP)+sum(testData$FP))
# 0.4375
Recall = sum(testData$TP)/(sum(testData$TP)+sum(testData$FN))
# 0.5833333
Accuracy = (sum(testData$TP) + sum(testData$TN))/(sum(testData$TP)+sum(testData$FP)+sum(testData$TN)+sum(testData$FN))
# 0.9396552
##############
# Compare with Actual Oscar Winners of 2012. See what our prediction was for actual winners:
winners2012 <- c("Lincoln", "Les Misérables", "Django Unchained", "Zero Dark Thirty", "Skyfall",
"Argo", "Silver Linings Playbook", "Amour", "Life of Pi", "Anna Karenina", "Brave",
"Curfew", "Inocente", "Paper Man", "Searching for Sugar Man")
# which(testData$MovieTitle == winners2012[1])
testData[c(1,3,4,5,7,8,13,17,19,27,33,49), c("MovieTitle", "predictions")]
## Logistic Regression figure
plot(predictions)
######### Oscar winners for 2016.... Guess:
movieDFClean2016$Musical <- 0
predictions <- predict(signifFullModel3,newdata=movieDFClean2016,type="response")
movieDFClean2016$predictions <- predictions
movieDFClean2016 <- movieDFClean2016[order(movieDFClean2016$predictions,decreasing=TRUE),]
movieDFClean2016 <- as.data.frame(movieDFClean2016)
movieDFClean2016[, c("MovieTitle","predictions")]
# Next, categorize by actual nomination category:
names(movieDFClean2016)
df <- as.data.frame(cbind(movieDFClean2016$MovieTitle,
movieDFClean2016$MovieURL,
movieDFClean2016$predictions))
names(df) <- c("MovieTitle", "MovieURL", "predictions")
# Un-factorize
df$MovieTitle <- as.character(df$MovieTitle)
df$MovieURL <- as.character(df$MovieURL)
df$predictions <- as.character(df$predictions)
# movieDataFrame$RatingG[movieDataFrame$MPAARating == "G"] = 1
# Leading Actor
df$LeadActor <- 0
df$LeadActor[df$MovieTitle == "Trumbo"] = 1
df$LeadActor[df$MovieTitle == "The Martian"] = 1
df$LeadActor[df$MovieTitle == "The Revenant"] = 1
df$LeadActor[df$MovieTitle == "Steve Jobs"] = 1
df$LeadActor[df$MovieTitle == "The Danish Girl"] = 1
# Supporting Actor
df$SupportActor <- 0
df$SupportActor[df$MovieTitle == "The Big Short"] = 1
df$SupportActor[df$MovieTitle == "The Revenant"] = 1
df$SupportActor[df$MovieTitle == "Spotlight"] = 1
df$SupportActor[df$MovieTitle == "Bridge of Spies"] = 1
df$SupportActor[df$MovieTitle == "Creed"] = 1
# Leading Actress
df$LeadActress <- 0
df$LeadActress[df$MovieTitle == "Carol"] = 1
df$LeadActress[df$MovieTitle == "Room"] = 1
df$LeadActress[df$MovieTitle == "Joy"] = 1
df$LeadActress[df$MovieTitle == "45 Years"] = 1
df$LeadActress[df$MovieTitle == "Brooklyn"] = 1
# Supporting Actress
df$SupportActress <- 0
df$SupportActress[df$MovieTitle == "The Hateful Eight"] = 1
df$SupportActress[df$MovieTitle == "Carol"] = 1
df$SupportActress[df$MovieTitle == "Spotlight"] = 1
df$SupportActress[df$MovieTitle == "The Danish Girl"] = 1
df$SupportActress[df$MovieTitle == "Steve Jobs"] = 1
# Animated Feature
df$Animated <- 0
df$Animated[df$MovieTitle == "Anomalisa"] = 1
df$Animated[df$MovieTitle == "Boy & the World"] = 1
df$Animated[df$MovieTitle == "Inside Out"] = 1
df$Animated[df$MovieTitle == "Shaun the Sheep Movie"] = 1
df$Animated[df$MovieTitle == "Omoide no Mânî"] = 1
# Best Picture
df$BestPicture <- 0
df$BestPicture[df$MovieTitle == "The Big Short"] = 1
df$BestPicture[df$MovieTitle == "Bridge of Spies"] = 1
df$BestPicture[df$MovieTitle == "Brooklyn"] = 1
df$BestPicture[df$MovieTitle == "Mad Max: Fury Road"] = 1
df$BestPicture[df$MovieTitle == "The Martian"] = 1
df$BestPicture[df$MovieTitle == "The Revenant"] = 1
df$BestPicture[df$MovieTitle == "Room"] = 1
df$BestPicture[df$MovieTitle == "Spotlight"] = 1
# Cinematography
df$Cinematography <- 0
df$Cinematography[df$MovieTitle == "Carol"] = 1
df$Cinematography[df$MovieTitle == "The Hateful Eight"] = 1
df$Cinematography[df$MovieTitle == "Mad Max: Fury Road"] = 1
df$Cinematography[df$MovieTitle == "The Revenant"] = 1
df$Cinematography[df$MovieTitle == "Sicario"] = 1
# Costume Design
df$Costume <- 0
df$Costume[df$MovieTitle == "Carol"] = 1
df$Costume[df$MovieTitle == "Cinderella"] = 1
df$Costume[df$MovieTitle == "The Danish Girl"] = 1
df$Costume[df$MovieTitle == "Mad Max: Fury Road"] = 1
df$Costume[df$MovieTitle == "The Revenant"] = 1
# Directing
df$Director <- 0
df$Director[df$MovieTitle == "The Big Short"] = 1
df$Director[df$MovieTitle == "Mad Max: Fury Road"] = 1
df$Director[df$MovieTitle == "The Revenant"] = 1
df$Director[df$MovieTitle == "Room"] = 1
df$Director[df$MovieTitle == "Spotlight"] = 1
# Film Editing
df$Editing <- 0
df$Editing[df$MovieTitle == "The Big Short"] = 1
df$Editing[df$MovieTitle == "Mad Max: Fury Road"] = 1
df$Editing[df$MovieTitle == "The Revenant"] = 1
df$Editing[df$MovieTitle == "Spotlight"] = 1
df$Editing[df$MovieTitle == "Star Wars: Episode VII - The Force Awakens"] = 1
# Foreign Language
df$Foreign <- 0
df$Foreign[df$MovieTitle == "Embrace of the Serpent"] = 1
df$Foreign[df$MovieTitle == "Mustang"] = 1
df$Foreign[df$MovieTitle == "Saul fia"] = 1
df$Foreign[df$MovieTitle == "Theeb"] = 1
df$Foreign[df$MovieTitle == "Krigen"] = 1
# Makeup and Hair
df$MakeupHair <- 0
df$MakeupHair[df$MovieTitle == "Mad Max: Fury Road"] = 1
df$MakeupHair[df$MovieTitle ==
"The 100-Year-Old Man Who Climbed Out the Window and Disappeared"] = 1
df$MakeupHair[df$MovieTitle == "The Revenant"] = 1
# Production Design
df$ProductionDesign <- 0
df$ProductionDesign[df$MovieTitle == "Bridge of Spies"] = 1
df$ProductionDesign[df$MovieTitle == "The Danish Girl"] = 1
df$ProductionDesign[df$MovieTitle == "Mad Max: Fury Road"] = 1
df$ProductionDesign[df$MovieTitle == "The Martian"] = 1
df$ProductionDesign[df$MovieTitle == "The Revenant"] = 1
# Sound Editing
df$SoundEditing <- 0
df$SoundEditing[df$MovieTitle == "Mad Max: Fury Road"] = 1
df$SoundEditing[df$MovieTitle == "The Martian"] = 1
df$SoundEditing[df$MovieTitle == "The Revenant"] = 1
df$SoundEditing[df$MovieTitle == "Sicario"] = 1
df$SoundEditing[df$MovieTitle == "Star Wars: Episode VII - The Force Awakens"] = 1
# Sound Mixing
df$SoundMixing <- 0
df$SoundMixing[df$MovieTitle == "Bridge of Spies"] = 1
df$SoundMixing[df$MovieTitle == "Mad Max: Fury Road"] = 1
df$SoundMixing[df$MovieTitle == "The Martian"] = 1
df$SoundMixing[df$MovieTitle == "The Revenant"] = 1
df$SoundMixing[df$MovieTitle == "Star Wars: Episode VII - The Force Awakens"] = 1
# Visual Effects
df$VisualEffects <- 0
df$VisualEffects[df$MovieTitle == "Ex Machina"] = 1
df$VisualEffects[df$MovieTitle == "Mad Max: Fury Road"] = 1
df$VisualEffects[df$MovieTitle == "The Martian"] = 1
df$VisualEffects[df$MovieTitle == "The Revenant"] = 1
df$VisualEffects[df$MovieTitle == "Star Wars: Episode VII - The Force Awakens"] = 1
# Music
df$Music <- 0
df$Music[df$MovieTitle == "Bridge of Spies"] = 1
df$Music[df$MovieTitle == "Carol"] = 1
df$Music[df$MovieTitle == "The Hateful Eight"] = 1
df$Music[df$MovieTitle == "Sicario"] = 1
df$Music[df$MovieTitle == "Star Wars: Episode VII - The Force Awakens"] = 1
# Original Screenplay
df$Screenplay <- 0
df$Screenplay[df$MovieTitle == "Bridge of Spies"] = 1
df$Screenplay[df$MovieTitle == "Ex Machina"] = 1
df$Screenplay[df$MovieTitle == "Inside Out"] = 1
df$Screenplay[df$MovieTitle == "Spotlight"] = 1
df$Screenplay[df$MovieTitle == "Straight Outta Compton"] = 1
# Documentary (Short)
df$DocShort <- 0
df$DocShort[df$MovieTitle == "Body Team 12"] = 1
df$DocShort[df$MovieTitle == "Chau, beyond the lines"] = 1
df$DocShort[df$MovieTitle == "Claude Lanzmann: Spectres of the Shoah"] = 1
df$DocShort[df$MovieTitle == "A Girl in the River: The Price of Forgiveness"] = 1
df$DocShort[df$MovieTitle == "Last Day of Freedom"] = 1
# Documentary (Feature)
df$Documentary <- 0
df$Documentary[df$MovieTitle == "Amy"] = 1
df$Documentary[df$MovieTitle == "Cartel Land"] = 1
df$Documentary[df$MovieTitle == "The Look of Silence"] = 1
df$Documentary[df$MovieTitle == "What Happened, Miss Simone?"] = 1
df$Documentary[df$MovieTitle == "Winter on Fire: Ukraine's Fight for Freedom"] = 1
# Short (Animated)
df$ShortAnimated <- 0
df$ShortAnimated[df$MovieTitle == "Historia de un oso"] = 1
df$ShortAnimated[df$MovieTitle == "Prologue"] = 1
df$ShortAnimated[df$MovieTitle == "Sanjay's Super Team"] = 1
df$ShortAnimated[df$MovieTitle == "Mi ne mozhem zhit bez kosmosa"] = 1
df$ShortAnimated[df$MovieTitle == "World of Tomorrow"] = 1
# Short (Live Action)
df$ShortLive <- 0
df$ShortLive[df$MovieTitle == "Ave Maria"] = 1
df$ShortLive[df$MovieTitle == "Day One"] = 1
df$ShortLive[df$MovieTitle == "Alles wird gut"] = 1
df$ShortLive[df$MovieTitle == "Shok"] = 1
df$ShortLive[df$MovieTitle == "Stutterer"] = 1
# Song
df$Song <- 0
df$Song[df$MovieTitle == "Fifty Shades of Grey"] = 1
df$Song[df$MovieTitle == "Racing Extinction"] = 1
df$Song[df$MovieTitle == "Youth"] = 1
df$Song[df$MovieTitle == "The Hunting Ground"] = 1
df$Song[df$MovieTitle == "Spectre"] = 1
# Adapted Screenplay
df$Adaptation <- 0
df$Adaptation[df$MovieTitle == "The Big Short"] = 1
df$Adaptation[df$MovieTitle == "Brooklyn"] = 1
df$Adaptation[df$MovieTitle == "Carol"] = 1
df$Adaptation[df$MovieTitle == "The Martian"] = 1
df$Adaptation[df$MovieTitle == "Room"] = 1
n <- names(df[,4:27])
# # Factorize Oscar Categories
# for (i in 1:length(n)) {
# print(paste("Factorizing:", n[i]))
# df[ ,n[i]] <- factor(df[ ,n[i]])
# }
# df$MovieTitle[which((df$LeadActor) == 1)]
#file1 <- paste0(dataDir, "2016CategoryPreds.txt")
#sink(file = file1, append = FALSE)
for (i in 1:length(n)) {
print(paste("Oscar Category:", n[i]))
a <- df$MovieTitle[which(df[ , n[i]] == 1)]
b <- df$predictions[which(df[ , n[i]] == 1)]
df2 <- as.data.frame(cbind(a, b))
names(df2) <- c("Movie Title", "Oscar Prediction")
print(df2)
cat("\n")
}
#sink()
|
607e2814c210653201cb6480cb4023fdd1a62904 | 6293deff6ed3b70fcbfb23bf14dd39e947816a66 | /complete.R | 9f73ec405830514cf2e02ca84b5eff1ffb2dba15 | [] | no_license | lspinheiro/datasciencecoursera | 9f18ec3e14449e3881de6a490fad2ca1231acdbe | 60f6ac0a4510d08b73fa61c68acd817eba4615d3 | refs/heads/master | 2021-01-19T05:44:15.410547 | 2014-05-14T18:27:07 | 2014-05-14T18:27:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 420 | r | complete.R | complete <- function(directory, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
complete <- data.frame()
for (i in id){
file <- paste("~/", directory, "/",sprintf("%03d", i), ".csv", sep = "")
x <- read.csv(file)
complete <- rbind.data.frame(complete, c(i, nrow(x[complete.cases(x),])))
}
names(complete) <- c("id", "nobs")
complete
}
|
cee8a4abb43ed0cb2fc0a8f1d60afc12f7e94376 | 23eee42f80b97d092ab181b3f407bb910ac1217a | /man/ColumnMap.Rd | ebc64dcc9d7f52027dbe505db66f7968ab87cff5 | [
"MIT"
] | permissive | Marchen/moni1000 | fe2761fee81fecf563e693edb82679e78cfcfff0 | 596fafba4fa829e7ac287554f3c3b724f2c079dc | refs/heads/master | 2022-11-30T21:26:57.899235 | 2020-08-16T04:47:38 | 2020-08-16T04:47:38 | 287,552,034 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,177 | rd | ColumnMap.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/column.map.r
\name{ColumnMap}
\alias{ColumnMap}
\title{Create column map}
\usage{
ColumnMap(
mesh_xcord = "mesh_xcord",
mesh_ycord = "mesh_ycord",
tag_no = "tag_no",
indv_no = "indv_no",
stem_xcord = "stem_xcord",
stem_ycord = "stem_ycord",
spc_japan = "spc_japan",
gbh = "gbh",
note = "note",
s_date = "s_date",
dead = "dead",
year = "year"
)
}
\arguments{
\item{mesh_xcord}{x coordinate of grid mesh.}
\item{mesh_ycord}{y coordinate of grid mesh.}
\item{tag_no}{stem id, currently should be unique for all stems.}
\item{indv_no}{individual id.}
\item{stem_xcord}{x coordinate of stems.}
\item{stem_ycord}{y coordinate of stems.}
\item{spc_japan}{species name in Japanese.}
\item{gbh}{girth at breast height.}
\item{note}{remarks for each stem in each observation.}
\item{s_date}{census date.}
\item{dead}{logical indicating dead stems.}
\item{year}{observation year.}
}
\description{
Create column name mapping between source data and Moni1000 format.
Names of arguments represent Moni1000 data names and argument values
represent column names of source data.
}
|
26120352aa64ee33fe03ddb04efb473c1351ed43 | 9724647fa32760a171fd7cba463f027c5fac4a3a | /cachematrix.R | d98148ebbdcc77dfd35b9e4092089f071dc53a03 | [] | no_license | pilic/ProgrammingAssignment2 | 1420391060f8b97d46a190d02549327f055b056b | d72821a754d3c2c8610ad8659441312c9feeb980 | refs/heads/master | 2021-01-14T08:55:53.793567 | 2016-08-09T13:26:35 | 2016-08-09T13:26:35 | 65,293,855 | 0 | 0 | null | 2016-08-09T12:39:58 | 2016-08-09T12:39:57 | null | UTF-8 | R | false | false | 637 | r | cachematrix.R | ## makeCacheMatrix: creates a special matrix object
makeCacheMatrix <- function(x = matrix()) {
in_x <- NULL
set <- function(y) {
x <<- y
in_x <<- NULL
}
get <- function() x
setinverse<- function(inverse) in_x <<-inverse
getinverse <- function() in_x
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve: calculates the inverse of the matrix
cacheSolve <- function(x, ...) {
in_x <- x$getinverse()
if (!is.null(in_x)) {
return(in_x)
}
m <- x$get()
in_x <- solve(m, ...)
x$setinverse(in_x)
in_x
}
|
de9294b8683c25f1d4fc35b870535e26f7bc4315 | 99be362c89a70717b228641b5ccc6d18f4fe2e95 | /man/txClassifier.Rd | e85343c6491b5944fe8656bf0cb54303e6ee4b11 | [] | no_license | lppcom/genericClassifier | 291a233f6115c65c2f37571cd1d83f8a79527b15 | 15af4041e13a5e2eb732d1383f3e751eaaac01ce | refs/heads/master | 2021-05-31T20:25:17.496851 | 2016-05-06T22:04:13 | 2016-05-06T22:04:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,423 | rd | txClassifier.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/txClassifierObject.R
\name{txClassifier}
\alias{txClassifier}
\title{txClassifier Constructor Function}
\usage{
txClassifier(classes, data_set, svm_cost = 1, svm_gamma = "def",
type = "PAX")
}
\arguments{
\item{classes}{character vector of 2 elements that specifies the groups of samples to
keep in the data frame}
\item{data_set}{data frame of RMA values for training set with samples as columns and
probesets as rows (default: NULL).}
\item{svm_cost}{numeric value representing the cost parameter when training an svm model
(default: 1)}
\item{svm_gamma}{numermic value (or "def") representingthe gamma parameter when training
an svm model (default: "def")}
\item{type}{character of value "PAX" or "BX" specifying the type of samples to be
tested (default: PAX).}
}
\value{
object of class txClassifier
}
\description{
\code{\link{txClassifier}} returns an object of class txClassifier.
}
\details{
This function is a constructor for building a txClassifier object. The txClassifier
object is a list of parameters for making and using a trained svm model based on
gene expression data from microarrays.
}
\examples{
\dontrun{
txClassifier <- txClassifier(classes=c("notTX", "TX"), data_set="", sample_labels="",
svm_cost=1, svm_gamma=0.05, type="PAX")
}
}
|
5b4cebd6345a4a3f4aa31348967bdfc9bb03ff06 | 8c1333fb9fbaac299285dfdad34236ffdac6f839 | /equity-valuation/ch2/solution-02b.R | 864e866f2d7293e9f605a7ec0b9e19e983533ee4 | [
"MIT"
] | permissive | cassiopagnoncelli/datacamp-courses | 86b4c2a6d19918fc7c6bbf12c51966ad6aa40b07 | d05b74a1e42b119efbbf74da3dfcf71569c8ec85 | refs/heads/master | 2021-07-15T03:24:50.629181 | 2020-06-07T04:44:58 | 2020-06-07T04:44:58 | 138,947,757 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 89 | r | solution-02b.R | # Calculate expected growth rate
exp_growth_rate <- retention_ratio * ke
exp_growth_rate
|
72d2d529e6b0b8a3a157a0a31ffec53de3190869 | 05ba1ac8a4ad8695d7d9cf72cbf1e068ae46dd7a | /scripts/R/correlateList.R | bc6c7cd9089e8eecc22a9fe922bd2acb6af4938d | [
"Apache-2.0",
"Artistic-2.0"
] | permissive | sergpolly/cworld-dekker | 2985788a5e14df9d8be26e2b58ecf4944cd77a95 | 7557bbe873e623e9059482722922faca4e784ad0 | refs/heads/master | 2020-04-21T13:39:34.622698 | 2019-06-07T16:37:37 | 2019-06-07T16:37:37 | 169,606,468 | 0 | 0 | Apache-2.0 | 2019-02-07T16:50:59 | 2019-02-07T16:50:58 | null | UTF-8 | R | false | false | 377 | r | correlateList.R | options(bitmapType='cairo')
args <- commandArgs(TRUE)
inputFile<-args[1]
correlationMode<-args[2]
if(!exists(correlationMode)) {
correlationMode<-"pearson"
}
if((file.info(inputFile)[[1]][1]) > 0) {
tmpData<-read.table(inputFile,header=TRUE,sep="\t")
r<-cor(tmpData$score_1,tmpData$score_2,method=correlationMode);
cat(r)
} else {
cat("NA")
} |
da8406e81769d036a4ccdb4d5a443eea916466e8 | 50272cf92b22f6436abd5e18a31a45f4fe9efc01 | /man/get_covariance_matrix.ModelTree.Rd | 4a833ab6c07457ee0750c8f452b3839d5ca976a5 | [] | no_license | mattdneal/gaussianProcess | 906154584b3c84184f0d9afd2df8865c286d4718 | 9e740bf0948569a8ea24d0311a1cf0ea83544533 | refs/heads/master | 2021-01-10T07:28:42.254970 | 2019-01-20T00:06:34 | 2019-01-20T00:09:04 | 50,181,064 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 823 | rd | get_covariance_matrix.ModelTree.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/covMatUtils.R
\name{get_covariance_matrix.ModelTree}
\alias{get_covariance_matrix.ModelTree}
\title{Get the Covariance Matrix for a Kernel ModelTree}
\usage{
\method{get_covariance_matrix}{ModelTree}(kernel, x, sigma.n, hyper.params,
additional.params = list(), cache = NULL)
}
\arguments{
\item{kernel}{A ModelTree object}
\item{x}{A matrix of data to find the covariance matrix of}
\item{sigma.n}{The standard deviation of the noise to add to the kernel - used to regularise the resulting covariance matrix}
\item{hyper.params}{The hyperparameters of \code{kernel}}
\item{additional.params}{an empty list (not used for ModelTree objects)}
}
\value{
A covariance matrix
}
\description{
Get the Covariance Matrix for a Kernel ModelTree
}
|
b36dec7874df9e6e989716bab6785bee22a68aa2 | 998160ded8ff6f2efadc35655a7b7382ec960aac | /Graph with 3T3 and BCC Split-Cobi.R | d435b53b61b147784b896b4b5346bdf8d2246a75 | [] | no_license | phalas25/ph-future-phd | 4953f9a708cd1a08e0b46e619a9936862ea3d282 | f1953cb8c8d05ee3d0a81fd20b8d36d44534a231 | refs/heads/master | 2020-09-05T16:26:11.316173 | 2020-04-03T17:34:00 | 2020-04-03T17:34:00 | 220,156,010 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,787 | r | Graph with 3T3 and BCC Split-Cobi.R | #clear current environment
rm(list = ls(all.names = TRUE))
#install packages as needed--unmarkdown if you do not have these on the console
#install.packages("RColorBrewer")
#install.packages("ggplot2")
#install.packages("dplyr")
#load packages that have been installed (always run these commands)
library(RColorBrewer)
library(ggplot2)
library(dplyr)
#Import Excel data using import dataset on the right side of the screen, copy the way to read it from the console and then rename the file to something simpler
X12_3_19_Cobi_Combo <- read_excel("12_3_19_Cobi_Combo.xlsx")
View(X12_3_19_Cobi_Combo)
#Paiges laptop location
#X11_18_DCAA_Combo <- read_excel("ph-future-phd/ph-future-phd/11_18_DCAA_Combo.xlsx")
qPCR_raw <- X12_3_19_Cobi_Combo
#Check that the file is correct
print(qPCR_raw)
#Look at current column names
colnames(qPCR_raw)
#rename column names
colnames(qPCR_raw) <- c("Well", "Sample.Name", "Target.Name", "Task", "Reporter", "Quencher", "RQ", "RQ Min", "RQ Max", "Cт")
colnames(qPCR_raw)
#cut out the first 6 rows and the last 5 rows that have no data (random things given by the instrument output file)
qPCR_c = qPCR_raw [7: nrow(qPCR_raw),]
head(qPCR_c)
qPCR_c1 = qPCR_c [1: (nrow(qPCR_c)-5),]
#make a new table with Sample Name, RQ, RQ Min and RQ Max
colnames(qPCR_c1)
qPCR_c2 = qPCR_c1 [, 1:10]
qPCR_cut <- select(qPCR_c2, "Sample.Name", 'RQ', "RQ Min", "RQ Max")
print(qPCR_cut)
#look at the class of each of the variables
sapply(qPCR_cut, class)
#Convert RQ to a numeric and Sample Name to a factor so that it can be later sorted if necessary
qPCR_cut$RQ <- as.numeric(as.character(qPCR_cut$RQ))
qPCR_cut$Sample.Name <- as.factor(qPCR_cut$Sample.Name)
#look to see that the variable types were changed
sapply(qPCR_cut, class)
#filter the file based on type of cell
qPCR_cut %>% filter(grepl('3T3', Sample.Name)) -> qPCR_wt
qPCR_cut %>% filter(grepl('BCC', Sample.Name)) -> qPCR_bcc
#Group by the sample name and then take avg of RQ for each group (accounting for the drop of anything with NA--Reference Gene RQs)
colnames(qPCR_wt)
qPCR_wt %>%
group_by(Sample.Name) %>%
summarise(avgRQ = mean(RQ, na.rm = TRUE)) -> AverageWT
qPCR_bcc %>%
group_by(Sample.Name) %>%
summarise(avgRQ = mean(RQ, na.rm = TRUE)) -> AverageBCC
#Merge back together
Merged = rbind(AverageWT, AverageBCC)
#Put Averages and RQ Min and Max from original plot on one graph
C <- merge(Merged, qPCR_cut, by = "Sample.Name")
#Remove NAs (so that you have a cleaner table)
C_Num <- na.omit(C)
#Remove duplicates (because when you graph things get weird so you need it to be 1-15 where theres no repeat values)
C_Num_2 <- distinct(C_Num)
#Convert columns to numeric for RQ Min and RQ Max
sapply(C_Num_2, class)
C_Num_2$`RQ Min` <- as.numeric(as.character(C_Num_2$`RQ Min`))
sapply(C_Num_2, class)
C_Num_2$`RQ Max` <- as.numeric(as.character(C_Num_2$`RQ Max`))
sapply(C_Num_2, class)
#Name Merged plot
qPCR_DCAA <- C_Num_2
#Split Merged Plot out into BCC versus 3T3
qPCR_DCAA %>% filter(grepl('3T3', Sample.Name)) -> WT
qPCR_DCAA %>% filter(grepl('BCC', Sample.Name)) -> BCC
#WT HERE FORWARD
#Follow the Below to Make a Basic Plot with Sample name versus RQ where the fill is based on Sample Name
#Generate Basic Plot
Plot <- ggplot() + geom_col(data = WT, aes(x = WT$Sample.Name, y = WT$avgRQ, fill = WT$Sample.Name))
print(Plot)
#Add titles to axis (and format them to be readable) as well as add title to graph
Plot <- Plot + ggtitle("GLI1 Expression") +
xlab("Sample Name") + ylab("Fold Change") + theme_classic() + labs(fill = "Sample Name") + theme(axis.text.x = element_text(size=10, angle=90),axis.title=element_text(size=12,face="bold"))
print(Plot)
#Add the RQ Max and Min to the graph
Plot_SD <- Plot + geom_errorbar(data = WT, mapping=aes(x=WT$Sample.Name, ymin=WT$`RQ Min`, ymax=WT$`RQ Max`), width=0.2, size=0.5, color="black")
print(Plot_SD)
#Rearrange sample names if necessary
colnames(WT)
print(WT$Sample.Name)
qPCR_DCAA_2 <- select(WT, "Sample.Name", "avgRQ")
qPCR_DCAA_2$Sample.Name <- factor(WT$Sample.Name, levels = c("3T3 SS 0 nM Cobi", "3T3 SS 4.8 nM Cobi", "3T3 SS 12.8 nM Cobi", "3T3 SS 34.9 nM Cobi", "3T3 SS 100 nM Cobi", "3T3 SSH 0 nM Cobi", "3T3 SSH 4.8 nM Cobi", "3T3 SSH 12.8 nM Cobi", "3T3 SSH 34.9 nM Cobi", "3T3 SSH 100 nM Cobi"))
print(qPCR_DCAA_2)
#Follow this if you needed to rearrange the sample names as above
#Generate Basic Plot with rearrange
Plot <- ggplot() + geom_col(data = qPCR_DCAA_2, aes(x = qPCR_DCAA_2$Sample.Name, y = qPCR_DCAA_2$avgRQ, fill = qPCR_DCAA_2$Sample.Name))
print(Plot)
#Add titles to axis as well as graph (for rearrange)
Plot <- Plot + ggtitle("GLI1 Expression") +
xlab("Concentration Cobimetinib") + ylab("Fold Change") + theme_classic() + labs(fill = "Sample Name") + theme(axis.text.x = element_text(size=10, angle=90),axis.title=element_text(size=12,face="bold"))
print(Plot)
#Add the RQ Max and Min to the graph (for rearrange)
Plot_SD <- Plot + geom_errorbar(data = WT, mapping=aes(x=WT$Sample.Name, ymin=WT$`RQ Min`, ymax=WT$`RQ Max`), width=0.2, size=0.5, color="black")
print(Plot_SD)
#Follow this if you need to filter by Cell Type and want to color your plot by that
#Generate new column in table based on cell type and color by it
qPCR_DCAA_2$CellType <- NA
print(qPCR_DCAA_2)
sapply(qPCR_DCAA_2, class)
qPCR_DCAA_2$CellType <- as.numeric(as.character(qPCR_DCAA_2$CellType))
sapply(qPCR_DCAA_2, class)
qPCR_DCAA_2$CellType <- as.numeric(grepl("\\b3T3 SSH\\b", qPCR_DCAA_2$Sample.Name))
print(qPCR_DCAA_2)
#Generate Basic Plot with rearrange and color
Plot <- ggplot() + geom_col(data = qPCR_DCAA_2, aes(x = qPCR_DCAA_2$Sample.Name, y = qPCR_DCAA_2$avgRQ, fill = as.factor(qPCR_DCAA_2$CellType)))
print(Plot)
#Add titles to axis, remove legend, as well as title graph (for rearrange and color)
Plot <- Plot + theme_classic() + ggtitle("WT GLI1 Expression with MAP2K1/MEK1 Inhibitor") + theme(plot.title = element_text(hjust = 0.5)) +
xlab("Concentration Cobimetinib") + ylab("Fold Change") + labs(fill = "Sample Name") + theme(axis.text.x = element_text(size=10, angle=90), axis.title=element_text(size=12,face="bold"))
print(Plot)
#Add the RQ Max and Min to the graph (for rearrange and color)
Plot_SD <- Plot + geom_errorbar(data = WT, mapping=aes(x=WT$Sample.Name, ymin=WT$`RQ Min`, ymax=WT$`RQ Max`), width=0.2, size=0.5, color="black")
print(Plot_SD)
Plot_SD_E <- Plot_SD + scale_x_discrete(labels=c("0 nM", "4.8 nM", "12.8 nM", "34.9 nM", "100 nM", "0 nM", "4.8 nM", "12.8 nM", "34.9 nM", "100 nM"))
print(Plot_SD_E)
Plot_SD_E_L <- Plot_SD_E + scale_fill_discrete(name = "Media Type", labels = c("SS", "HH SS"))
print(Plot_SD_E_L)
#BCC Plot Here
#Follow the Below to Make a Basic Plot with Sample name versus RQ where the fill is based on Sample Name
#Generate Basic Plot
Plot <- ggplot() + geom_col(data = BCC, aes(x = BCC$Sample.Name, y = BCC$avgRQ, fill = BCC$Sample.Name))
print(Plot)
#Add titles to axis (and format them to be readable) as well as add title to graph
Plot <- Plot + ggtitle("GLI1 Expression") +
xlab("Sample Name") + ylab("Fold Change") + theme_classic() + labs(fill = "Sample Name") + theme(axis.text.x = element_text(size=10, angle=90),axis.title=element_text(size=12,face="bold"))
print(Plot)
#Add the RQ Max and Min to the graph
Plot_SD <- Plot + geom_errorbar(data = BCC, mapping=aes(x=BCC$Sample.Name, ymin=BCC$`RQ Min`, ymax=BCC$`RQ Max`), width=0.2, size=0.5, color="black")
print(Plot_SD)
#Rearrange sample names if necessary
colnames(BCC)
qPCR_DCAA_2 <- select(BCC, "Sample.Name", "avgRQ")
qPCR_DCAA_2$Sample.Name <- factor(BCC$Sample.Name, levels = c("BCC SS 0 nM Cobi", "BCC SS 4.8 nM Cobi", "BCC SS 12.8 nM Cobi", "BCC SS 34.9 nM Cobi", "BCC SS 100 nM Cobi"))
print(qPCR_DCAA_2)
print(qPCR_DCAA_2)
#Follow this if you needed to rearrange the sample names as above
#Generate Basic Plot with rearrange
Plot <- ggplot() + geom_col(data = qPCR_DCAA_2, aes(x = qPCR_DCAA_2$Sample.Name, y = qPCR_DCAA_2$avgRQ, fill = qPCR_DCAA_2$Sample.Name))
print(Plot)
#Add titles to axis as well as graph (for rearrange)
Plot <- Plot + ggtitle("BCC GLI1 Expression with MAP2K1/MEK1 Inhibitor") +
xlab("Concentration Cobimetinib") + ylab("Fold Change") + theme_classic() + labs(fill = "Sample Name") + theme(legend.position = "none") + theme(axis.text.x = element_text(size=10, angle=90),axis.title=element_text(size=12,face="bold"))
print(Plot)
#Add the RQ Max and Min to the graph (for rearrange)
Plot_SD <- Plot + geom_errorbar(data = BCC, mapping=aes(x=BCC$Sample.Name, ymin=BCC$`RQ Min`, ymax=BCC$`RQ Max`), width=0.2, size=0.5, color="black")
print(Plot_SD)
#Rename legend
Plot_SD_E <- Plot_SD + scale_x_discrete(labels=c("SS 0 nM", "SS 4.8 nM", "SS 12.8 nM", "SS 34.9 nM", "SS 100 nM"))
print(Plot_SD_E)
|
eccc3f06fb5f2e7b9cc1466bdeb981131071e522 | 44cd9609cc0181fb22b4933d4b86f4ff718478a1 | /Modeling Kit(code)/Functional_Framework/modeltools.R | adad1e12300a3311c1c3646b37befb7b3e63343b | [] | no_license | chipbuster/ParkinsonsProject | 19657daf1f245f628cc7d4b9802af3b8ef2f0abe | d3f88b315125b180050b4bcbd16c27892240f64f | refs/heads/master | 2021-01-10T19:57:57.999790 | 2014-01-14T22:26:07 | 2014-01-14T22:26:07 | 15,917,582 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,340 | r | modeltools.R | ##Useful functions for datanalysis. Creates and tests models using tools from
##UsefulTools.R.
##YOU NEED TO MAINTAIN THE FIRST TWO FUNCTIONS!!!
##Edit them where there are comments to ensure that things don't break.
#load or source statement here
#Make a model from a given dataset
#Call function with dataframe to base predictions off of. It returns a model.
makemodel <- function(datain){
#You will need to manually enter the variables for the model. One model, one run.
model = lm(motor_UPDRS ~ age + I(age^2) + I(age^3) + I(age^4) + DFA + HNR, data=datain)
return(model)
}
#Test a model that you have made
#The second argument must be of type string (encapsulate in quotes)
#This function returns a data frame with the modelname and In-sample/Out-of-sample
#MSE and MAE (mean).
testmodel <- function(model, modelname, x = x, y = y, xtest = xtest, ytest = ytest){
z = matrix(ncol = 4, nrow = 1) #change numcol as needed to accomodate for #tests, save one col for name
z = data.frame(z, row.names = c(modelname))
colnames(z) <- c("ISE", "OSE", "IAE", "OAE")
z[1] = msein(model)
z[2] = mseout(model)
z[3] = maein(model)
z[4] = maeout(model)
return(z)
}
#Tested Models
#age + Shimmer.APQ11 + RDPE + PPE
# model = lm(motor_UPDRS ~ age + DFA + Shimmer.APQ11 + HNR, data=datain)
|
3d048395a11a4cc0f55d8c9f163b6e86223d8acb | 2724f6e874c9500b0df726c2d6903fe486b8cc84 | /man/get_bioplex_list.Rd | d0d94a1c88f1d5b3be17b2fd5795bec3f8b2a7c5 | [
"MIT"
] | permissive | lagelab/Genoppi | 62006e9f123a1857580e5ebd88885c945483f6e3 | f78e7b3e523650e7ae211fc93b79dff3e28dd03f | refs/heads/master | 2023-08-31T09:51:59.007937 | 2022-10-12T14:17:17 | 2022-10-12T14:17:17 | 83,465,982 | 23 | 10 | null | null | null | null | UTF-8 | R | false | true | 816 | rd | get_bioplex_list.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_bioplex_list.R
\name{get_bioplex_list}
\alias{get_bioplex_list}
\title{Retrieve Bioplex 3.0 interactors for a given bait}
\usage{
get_bioplex_list(bait, p = 0.9)
}
\arguments{
\item{bait}{string. name of bait protein}
\item{p}{numeric. Probability of the protein being an interactor with the bait. See \code{?bioplex_table}.}
}
\value{
data.frame containing gene and significant columns for all non-bait bioplex genes
(significant=T for bioplex 3.0 interactors of bait). NULL if bait not found in bioplex.
}
\description{
Use bioplex_table data to get bioplex interactors and non-interactors of bait.
See \code{?bioplex_table} for more details about the data set.
}
\examples{
\dontrun{
df1 <- get_bioplex_list('BCL2',p = 0.5)
}
}
|
bffb509d98239939430f9351456223b9a2aa36b8 | 4624a2d7848bc7624088b13e06fd20bd905a0e84 | /man/plot_climate.Rd | 59b64580f3fbcb30f74629d813d0bedac9c6dce7 | [
"MIT"
] | permissive | special-uor/smpds | 8ad44b09653c77239a8b637ea754d453ef0e8945 | 90396cec8f1f5b01d30192b91b39c4b74a50915f | refs/heads/main | 2023-04-18T07:31:41.945588 | 2022-08-07T10:25:12 | 2022-08-07T10:25:12 | 391,122,978 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 12,095 | rd | plot_climate.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-plot.R
\name{plot_climate}
\alias{plot_climate}
\alias{plot_climate_countour}
\alias{plot_climate_tiles}
\alias{plot_gdd}
\alias{plot_mat}
\alias{plot_mi}
\alias{plot_mtco}
\alias{plot_mtwa}
\title{Plot climate variable}
\usage{
plot_climate(
.data,
var = "mat",
units = NA,
fill_scale = ggplot2::scale_fill_viridis_d(name = toupper(var)),
size = 1,
stroke = 0.1,
legend.key.width = ggplot2::unit(1, "cm"),
legend.position = "bottom",
xlim = c(-180, 180),
ylim = c(-60, 90),
show_plot = TRUE,
elevation_cut = NULL,
land_borders = rnaturalearth::ne_countries(scale = "small", returnclass = "sf"),
land_borders_colour = "black",
land_borders_size = 0.25,
fill_land = "white",
fill_sea = "#CFE2F3",
contour = FALSE,
...
)
plot_climate_countour(
.data,
var = "mat",
units = NA,
fill_scale = ggplot2::scale_fill_gradientn(name = toupper(var), colours =
wesanderson::wes_palette("Zissou1", 100, type = "continuous")),
size = 1,
stroke = 0.5,
legend.key.width = ggplot2::unit(2, "cm"),
legend.position = "bottom",
xlim = c(-180, 180),
ylim = c(-60, 90),
show_plot = TRUE,
elevation_cut = NULL,
land_borders = rnaturalearth::ne_countries(scale = "small", returnclass = "sf"),
land_borders_colour = "black",
land_borders_size = 0.25,
fill_land = "white",
fill_sea = "#CFE2F3",
resolution = 0.5,
...
)
plot_climate_tiles(
.data,
var = "mat",
units = NA,
fill_scale = ggplot2::scale_fill_manual(name = toupper(var), values =
wesanderson::wes_palette("Zissou1", 9, type = "continuous")),
size = 1,
stroke = 0.1,
legend.key.width = ggplot2::unit(1, "cm"),
legend.position = "bottom",
xlim = c(-180, 180),
ylim = c(-60, 90),
show_plot = TRUE,
elevation_cut = NULL,
land_borders = rnaturalearth::ne_countries(scale = "small", returnclass = "sf"),
land_borders_colour = "black",
land_borders_size = 0.25,
fill_land = "white",
fill_sea = "#CFE2F3",
.overlay_data = NULL,
continuous = FALSE,
...
)
plot_gdd(
.data,
baseline = 0,
size = 1,
stroke = 0.1,
legend.key.width = ggplot2::unit(1, "cm"),
legend.position = "bottom",
xlim = c(-180, 180),
ylim = c(-60, 90),
show_plot = TRUE,
...
)
plot_mat(
.data,
size = 1,
stroke = 0.1,
legend.key.width = ggplot2::unit(1, "cm"),
legend.position = "bottom",
xlim = c(-180, 180),
ylim = c(-60, 90),
show_plot = TRUE,
...
)
plot_mi(
.data,
size = 1,
stroke = 0.1,
legend.key.width = ggplot2::unit(1, "cm"),
legend.position = "bottom",
xlim = c(-180, 180),
ylim = c(-60, 90),
show_plot = TRUE,
...
)
plot_mtco(
.data,
size = 1,
stroke = 0.1,
legend.key.width = ggplot2::unit(1, "cm"),
legend.position = "bottom",
xlim = c(-180, 180),
ylim = c(-60, 90),
show_plot = TRUE,
...
)
plot_mtwa(
.data,
size = 1,
stroke = 0.1,
legend.key.width = ggplot2::unit(1, "cm"),
legend.position = "bottom",
xlim = c(-180, 180),
ylim = c(-60, 90),
show_plot = TRUE,
...
)
}
\arguments{
\item{.data}{Data frame with spatial and climate data. The following are
expected:
\itemize{
\item \strong{Latitude}, named: \code{latitude}, \code{lat} or \code{y}.
\item \strong{Longitude}, named: \code{longitude}, \code{long}, \code{lon}
or \code{y}.
\item \strong{Main variable}, named: value of \code{var}.
}}
\item{var}{String with the name of the climate variable to plot. Default:
\code{"mat"} (Mean Annual Temperature).}
\item{units}{String with the units to display next to the legend tittle.
Default: \code{NA}.}
\item{fill_scale}{\code{ggplot2} compatible object with information on how to
fill the individual points for the climate variable. Default:
\code{ggplot2::scale_fill_viridis_c(name = toupper(var))}.}
\item{size}{Numeric value for the \code{size} aesthetic.}
\item{stroke}{Numeric value for the \code{stroke} aesthetic.}
\item{legend.position}{the position of legends ("none", "left", "right",
"bottom", "top", or two-element numeric vector)}
\item{xlim, ylim}{Limits for the x and y axes. These limits are specified
in the units of the default CRS. By default, this means projected coordinates
(\code{default_crs = NULL}). How limit specifications translate into the exact
region shown on the plot can be confusing when non-linear or rotated coordinate
systems are used as the default crs. First, different methods can be preferable
under different conditions. See parameter \code{lims_method} for details. Second,
specifying limits along only one direction can affect the automatically generated
limits along the other direction. Therefore, it is best to always specify limits
for both x and y. Third, specifying limits via position scales or \code{xlim()}/\code{ylim()}
is strongly discouraged, as it can result in data points being dropped from the plot even
though they would be visible in the final plot region.}
\item{show_plot}{Boolean flag to indicate whether or not the graphic should
be displayed, if \code{FALSE}, it only returns the \code{ggplot2} object.
Default: \code{TRUE}.}
\item{elevation_cut}{Numeric value to use as the threshold of the elevation
at which the sites will be represented with different shapes.}
\item{land_borders}{Data frame with polygons to represent land borders
(e.g. continents, countries, counties, etc.).
Default: \code{rnaturalearth::ne_countries}.}
\item{land_borders_colour}{String containing a colour code (HEX value) or
colour name, to be used as colour for the land borders.
Default: \code{"black"}.}
\item{land_borders_size}{Numeric value of the line size to draw the land
borders. Default: \code{0.25}}
\item{fill_land}{String containing a colour code (HEX value) or colour
name, to be used as filling for the countries.
Default: \code{NA} (transparent).}
\item{fill_sea}{String containing a colour code (HEX value) or colour
name, to be used as filling for the seas
Default: \code{NA} (transparent).}
\item{contour}{Boolean flag to indicate if a contour should be overlaid
on the individual sites/points. It uses the function \code{\link[akima:interp]{akima::interp()}}
to interpolate values from the nearest available points.}
\item{...}{
Arguments passed on to \code{\link[ggplot2:ggsf]{ggplot2::coord_sf}}
\describe{
\item{\code{expand}}{If \code{TRUE}, the default, adds a small expansion factor to
the limits to ensure that data and axes don't overlap. If \code{FALSE},
limits are taken exactly from the data or \code{xlim}/\code{ylim}.}
\item{\code{crs}}{The coordinate reference system (CRS) into which all data should
be projected before plotting. If not specified, will use the CRS defined
in the first sf layer of the plot.}
\item{\code{default_crs}}{The default CRS to be used for non-sf layers (which
don't carry any CRS information) and scale limits. The default value of
\code{NULL} means that the setting for \code{crs} is used. This implies that all
non-sf layers and scale limits are assumed to be specified in projected
coordinates. A useful alternative setting is \code{default_crs = sf::st_crs(4326)},
which means x and y positions are interpreted as longitude and latitude,
respectively, in the World Geodetic System 1984 (WGS84).}
\item{\code{datum}}{CRS that provides datum to use when generating graticules.}
\item{\code{label_graticule}}{Character vector indicating which graticule lines should be labeled
where. Meridians run north-south, and the letters \code{"N"} and \code{"S"} indicate that
they should be labeled on their north or south end points, respectively.
Parallels run east-west, and the letters \code{"E"} and \code{"W"} indicate that they
should be labeled on their east or west end points, respectively. Thus,
\code{label_graticule = "SW"} would label meridians at their south end and parallels at
their west end, whereas \code{label_graticule = "EW"} would label parallels at both
ends and meridians not at all. Because meridians and parallels can in general
intersect with any side of the plot panel, for any choice of \code{label_graticule} labels
are not guaranteed to reside on only one particular side of the plot panel. Also,
\code{label_graticule} can cause labeling artifacts, in particular if a graticule line
coincides with the edge of the plot panel. In such circumstances, \code{label_axes} will
generally yield better results and should be used instead.
This parameter can be used alone or in combination with \code{label_axes}.}
\item{\code{label_axes}}{Character vector or named list of character values
specifying which graticule lines (meridians or parallels) should be labeled on
which side of the plot. Meridians are indicated by \code{"E"} (for East) and
parallels by \code{"N"} (for North). Default is \code{"--EN"}, which specifies
(clockwise from the top) no labels on the top, none on the right, meridians
on the bottom, and parallels on the left. Alternatively, this setting could have been
specified with \code{list(bottom = "E", left = "N")}.
This parameter can be used alone or in combination with \code{label_graticule}.}
\item{\code{lims_method}}{Method specifying how scale limits are converted into
limits on the plot region. Has no effect when \code{default_crs = NULL}.
For a very non-linear CRS (e.g., a perspective centered
around the North pole), the available methods yield widely differing results, and
you may want to try various options. Methods currently implemented include \code{"cross"}
(the default), \code{"box"}, \code{"orthogonal"}, and \code{"geometry_bbox"}. For method \code{"cross"},
limits along one direction (e.g., longitude) are applied at the midpoint of the
other direction (e.g., latitude). This method avoids excessively large limits for
rotated coordinate systems but means that sometimes limits need to be expanded a
little further if extreme data points are to be included in the final plot region.
By contrast, for method \code{"box"}, a box is generated out of the limits along both directions,
and then limits in projected coordinates are chosen such that the entire box is
visible. This method can yield plot regions that are too large. Finally, method
\code{"orthogonal"} applies limits separately along each axis, and method
\code{"geometry_bbox"} ignores all limit information except the bounding boxes of any
objects in the \code{geometry} aesthetic.}
\item{\code{ndiscr}}{Number of segments to use for discretising graticule lines;
try increasing this number when graticules look incorrect.}
\item{\code{default}}{Is this the default coordinate system? If \code{FALSE} (the default),
then replacing this coordinate system with another one creates a message alerting
the user that the coordinate system is being replaced. If \code{TRUE}, that warning
is suppressed.}
\item{\code{clip}}{Should drawing be clipped to the extent of the plot panel? A
setting of \code{"on"} (the default) means yes, and a setting of \code{"off"}
means no. In most cases, the default of \code{"on"} should not be changed,
as setting \code{clip = "off"} can cause unexpected results. It allows
drawing of data points anywhere on the plot, including in the plot margins. If
limits are set via \code{xlim} and \code{ylim} and some data points fall outside those
limits, then those data points may show up in places such as the axes, the
legend, the plot title, or the plot margins.}
}}
\item{resolution}{Numeric value for the grid resolution.}
\item{.overlay_data}{(Optional) Data frame with original observations, to be
to be overlaid on top of the tiles in \code{.data}.}
\item{continuous}{(Optional) Boolean flag to indicate whether or not to use
a continuous (\code{TRUE}) fill scale. Default: \code{FALSE}.}
\item{baseline}{Numeric value to be used as the baseline for the calculation
of the Growing Degree Days, default: \code{0}.}
}
\value{
\code{ggplot} object with the plot.
}
\description{
Plot climate variable (like \code{MAT}, \code{MTCO}, etc.), in addition to
\code{latitude} and \code{longitude}, a column with the value of \code{var}
is required.
}
|
b50c3278b7bf8b050bd34adfd288801f59e5ae86 | 908af1ed68362beadb13640c2951ddf8561280aa | /bin/graphDiagram.R | ba62dac24a5c6915b3b3ce220eb10d692fdb347d | [] | no_license | DuhaimeLab/Hannigan_ConjunctisViribus_mSystems_2017 | 8045ffac2121d986c727150426a9c27c85bd8a06 | af354bd53084a70861ab96dab244e9945e48c424 | refs/heads/master | 2021-01-13T05:36:57.651491 | 2017-06-01T11:58:22 | 2017-06-01T11:58:22 | 95,045,759 | 1 | 0 | null | 2017-06-21T20:59:42 | 2017-06-21T20:59:42 | null | UTF-8 | R | false | false | 1,229 | r | graphDiagram.R | library(DiagrammeR)
# Create a simple NDF
nodevector <- c(
"Study",
"Disease",
"Subject ID",
"Phage Sample ID",
"Bacterial Sample ID",
"Time Point",
"Phage OGU",
"Bacteria OGU")
nodes <-
create_node_df(
n = length(nodevector),
label = nodevector,
fixedsize = FALSE,
fillcolor = "white",
fontcolor = "black",
shape = "oval",
fontname = "Helvetica",
penwidth = 0.25,
color = "black",
fontsize = 7)
# Create a simple EDF
edges <-
create_edge_df(
from = c(
7,
4,
5,
3,
3,
2,
2,
1,
1,
1,
6,
6
),
to = c(
8,
7,
8,
4,
5,
4,
5,
4,
5,
2,
4,
5
),
color = "black",
penwidth = 0.25,
label = c("Predicted Infection", "Abundance", "Abundance"),
fontsize = 7,
fontname = "Helvetica"
)
# Create the graph object,
# incorporating the NDF and
# the EDF, and, providing
# some global attributes
graph <-
create_graph(
nodes_df = nodes,
edges_df = edges)
# render_graph(graph)
# View the graph
export_graph(
graph,
file_name = "./figures/graphdatabasediagram.pdf",
file_type = "pdf"
)
|
525fa995d318647e8b5a6899970b1fd09706bebc | 5c28ce9305f2e0bf0b71bde1c6e7c6fdcb1ad495 | /R/zzz.R | 9036e8f4cd4d14758b1c0ea7c64f90f2b25e4c30 | [] | no_license | GenomicsNX/HeatmapR | 032a63f886a23cdbf806eee7c133a1874b2ec42d | e852dcc7f28cf2487aeb271aba577b3c3b35dfe9 | refs/heads/master | 2022-11-19T10:46:16.506688 | 2020-07-17T00:51:36 | 2020-07-17T00:51:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 478 | r | zzz.R | ## UPDATE GLOBAL OPTIONS ON LOAD -----------------------------------------------
.onLoad <- function(libname, pkgname) {
# HEAT_MAP_SAVE
options("heat_map_save" = NULL)
# HEAT_MAP_CUSTOM
options("heat_map_custom" = FALSE)
# HEAT_MAP_MARGINS
options("heat_map_margins" = NULL)
# HEAT_MAP_COPY
options("heat_map_copy" = NULL)
# HEAT_MAP_FILES
options("heat_map_temp_files" = NULL)
# HEAT_MAP_DEVICE
options("heat_map_device" = NULL)
} |
e4473911c5b2d4cced4caa12382943c961f0cd76 | 2de11afae0cc001256eab563565a86978634a458 | /man/e_step.Rd | f9d239ea01a0a9196326a7cc9313391f28e951ed | [
"Apache-2.0"
] | permissive | JohnCoene/echarts4r | 169726af7e118b2388af7a1453a3240e9b810026 | 2ee457a4541240a7996d332823c0cc5be60b21c1 | refs/heads/master | 2023-09-03T16:48:34.900692 | 2023-08-22T15:01:24 | 2023-08-22T15:01:24 | 124,915,957 | 564 | 99 | NOASSERTION | 2023-08-28T07:43:12 | 2018-03-12T16:08:27 | JavaScript | UTF-8 | R | false | true | 1,731 | rd | e_step.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add.R, R/add_.R
\name{e_step}
\alias{e_step}
\alias{e_step_}
\title{Step}
\usage{
e_step(
e,
serie,
bind,
step = c("start", "middle", "end"),
fill = FALSE,
name = NULL,
legend = TRUE,
y_index = 0,
x_index = 0,
coord_system = "cartesian2d",
...
)
e_step_(
e,
serie,
bind = NULL,
step = c("start", "middle", "end"),
fill = FALSE,
name = NULL,
legend = TRUE,
y_index = 0,
x_index = 0,
coord_system = "cartesian2d",
...
)
}
\arguments{
\item{e}{An \code{echarts4r} object as returned by \code{\link{e_charts}} or
a proxy as returned by \code{\link{echarts4rProxy}}.}
\item{serie}{Column name of serie to plot.}
\item{bind}{Binding between datasets, namely for use of \code{\link{e_brush}}.}
\item{step}{Step type, one of \code{start}, \code{middle} or \code{end}.}
\item{fill}{Set to fill as area.}
\item{name}{name of the serie.}
\item{legend}{Whether to add serie to legend.}
\item{x_index, y_index}{Indexes of x and y axis.}
\item{coord_system}{Coordinate system to plot against.}
\item{...}{Any other option to pass, check See Also section.}
}
\description{
Add step serie.
}
\examples{
USArrests |>
tibble::rownames_to_column("State") |>
e_charts(State) |>
e_step(Murder, name = "Start", step = "start", fill = TRUE) |>
e_step(Rape, name = "Middle", step = "middle") |>
e_step(Assault, name = "End", step = "end") |>
e_tooltip(trigger = "axis")
# timeline
iris |>
group_by(Species) |>
e_charts(Sepal.Length, timeline = TRUE) |>
e_step(Sepal.Width) |>
e_tooltip(trigger = "axis")
}
\seealso{
\href{https://echarts.apache.org/en/option.html#series-line}{Additional arguments}
}
|
de519cf2b2f25db915fb7171f0d4c13743d9f3b3 | 5b2f016f1298c790224d83c1e17a425640fc777d | /array/ld/MethBlockByChrosome.R | b30054cf6068d884c366057e8c396537563d778d | [] | no_license | Shicheng-Guo/methylation2020 | b77017a1fc3629fe126bf4adbb8f21f3cc9738a0 | 90273b1120316864477dfcf71d0a5a273f279ef9 | refs/heads/master | 2023-01-15T20:07:53.853771 | 2020-02-28T03:48:13 | 2020-02-28T03:48:13 | 243,668,721 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 153 | r | MethBlockByChrosome.R | #!/usr/bin/R
setwd("/home/gsc/Dropbox/Project/methylation/array/ld")
data<-read.table("Chr24_450kMerge.txt.trans",head=T,row.names=1,sep="\t",as.is=F)
|
60f36f53c86e237f613a8bddcbba4dc3c3c25593 | e69d3476863aa2f73cdf3787b6f078618f133bbb | /week_2/week_2/RCrawler_example/CaseStudies/Case1YahooKimoStockMajor/tryWriteIntoDB.R | 3321974cb7c33f57e6c9c2e263bacffbdafff792 | [] | no_license | pumpkinlinlin/CSX_RProject_Spring_2018 | 08d714d8ed9318c2c4aafefeba5ce5a5416a92f5 | ef3d5abe1ba3351c0a9daef7a71fb3ceb1c725b5 | refs/heads/master | 2020-04-24T07:21:47.641885 | 2018-10-01T09:45:53 | 2018-10-01T09:45:53 | 171,796,950 | 1 | 0 | null | 2019-02-21T03:57:13 | 2019-02-21T03:57:13 | null | UTF-8 | R | false | false | 471 | r | tryWriteIntoDB.R | rm(list=ls(all.names = TRUE))
source("packIntoFunction.R")
Data_Table = getStockMajorData(2330)
View(Data_Table)
library("RSQLite")
drv <- dbDriver("SQLite")
conn <- dbConnect(drv,"yahooStockMajor.sqlite")
dbListTables(conn)
dbWriteTable(conn, "StockMajor", Data_Table,row.names=FALSE,append=TRUE)
dbDisconnect(conn)
drv <- dbDriver("SQLite")
conn <- dbConnect(drv,"yahooStockMajor.sqlite")
dbListTables(conn)
View(dbReadTable(conn,"StockMajor"))
dbDisconnect(conn)
|
91ac85566a9803fd90925dfec98db758d4c8ee7c | ca5000dd3e18e4e3bc43d5d90e77194031503204 | /Validacion/man/ValPruebaF.Rd | 3c0f4e1fb27d66c717757533d78e9f805d9300f5 | [] | no_license | mablan/Validacion | 2d5bd0e7c664708d542749752234bff96f74082f | cbbf64062219ca82ceecb1ffb1aeccac7086530b | refs/heads/master | 2021-01-23T08:33:55.109739 | 2019-11-29T23:22:51 | 2019-11-29T23:22:51 | 102,534,412 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,519 | rd | ValPruebaF.Rd | \name{ValPruebaF}
\alias{ValPruebaF}
\title{
Prueba F de Fisher
}
\description{
Es una prueba compara valores de los datos contra los modelos de prediccion.
Esta funcion calcula una prueba F que tiene como hipotesis nula que el intercepto es 0 y la pendiente es 1.
}
\usage{
ValPruebaF(x, y)
}
\arguments{
\item{x}{
vector, matriz o data frame correspondiente a los valores simulados.
}
\item{y}{
vector, matriz o data frame correspondiente a los valores reales u observados.
}
}
\value{
\item{F}{Estadistico F}
\item{p}{Probabilidad del estadistico F}
}
\details{
Nota sobre los datos de entrada: En el caso de que los datos ingresados por el usuario esten incompletos (cuando falten algunas observaciones) las posiciones en las cuales deberian estar los elementos faltantes deben catalogarse como NA.
Las filas en las cuales existan valores NA son eliminadas al momento de realizar la comparacion entre datos de una observacion en particular y un determinado modelo.
}
\references{
HAEFNER, James W. (2005) Modeling Biological Systems Principles and Applications. Springer.
}
\author{
Magdiel Ablan, Renny Marquez, Yuraima Rivas, Jose Querales
}
\examples{
## Ejemplo1
data(Mixteque)
x<-data.frame(Mixteque$Mod1SimulaBaja,Mixteque$Mod2SimulaBaja,Mixteque$Mod3SimulaBaja)
y<-data.frame(Mixteque$Mod1DatosBaja)
ValPruebaF(x,y)
## Ejemplo2
data(Lacteos)
x<-Lacteos$InventarioGrutaBacterianaSimula
y<-Lacteos$InventarioGrutaBacterianaDatos
ValPruebaF(x, y)
}
\keyword{Prueba F de Fisher}
|
d1c2296b08e113a893752be4184272cbc939c017 | 148b70d53e28be4bdd4f272dbcfce3f56127f755 | /6Run_PAML/4Calc_LRT.r | 6c7c3168e4565bc5d4a27f692fe9698651ccd4fc | [
"BSD-2-Clause"
] | permissive | Bioinfo2015/Transcriptome_pipeline | c6239db770c4c1c041affec251d5e3914053c8b8 | 9c223cdc0c604e63440382a01fa29a0392b9e128 | refs/heads/master | 2021-01-14T08:51:14.025679 | 2015-10-21T21:59:23 | 2015-10-21T21:59:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,365 | r | 4Calc_LRT.r | # calculate LRT from codeml results, grab gene ontology categories, NMDS
# null = Model A1 (NSsites = 2, model = 2, fix_omega = 1)
# test = Model A (NSsites = 2, model = 2, fix_omega = 0)
# Marisa Lim (c)2015
# load libraries
library(biomaRt)
library("GO.db")
library(vegan)
library(dplyr)
library(tidyr)
# set working directory
MLwd = "C:/Users/mcwlim/Dropbox/Marisacompfiles/Transcriptome files"
setwd(MLwd)
# ----------------- Read in lnL values for each model -----------------
nullmod <- read.table("lnL_null.txt")
testmod <- read.table("lnL_pos.txt")
head(nullmod)
head(testmod)
# give column names
colnames(nullmod) <- c("Gene", "lnL")
colnames(testmod) <- c("Gene", "lnL")
# check dimensions
dim(nullmod)
dim(testmod)
# get gene name
replacebreak <- gsub(pattern="/", replacement=" ", x=nullmod$Gene)
splitbreak <- strsplit(replacebreak, split=" ")
genename1 <- sapply(splitbreak, function(x){
paste(x[[3]])
})
head(genename1)
replacebreak2 <- gsub(pattern="_", replacement=" ", x=genename1)
splitbreak2 <- strsplit(replacebreak2, split=" ")
genename <- sapply(splitbreak2, function(x){
paste(x[[1]])
})
head(genename)
# merge the information for both models
LRTdat <- data.frame("Gene"=genename, "Gene_nullmod"=nullmod$Gene, "lnL_nullmod"=nullmod$lnL,
"Gene_testmod"=testmod$Gene, "lnL_testmod"=testmod$lnL)
# ----------------- calculate LRT -----------------
LRTdat$LRT <- 2*(LRTdat$lnL_testmod - LRTdat$lnL_nullmod)
head(LRTdat)
# plot LRT and significance cutoffs
rownums <- c(1:163)
jpeg("LRTplot.jpg", height=5, width=5, units="in", res=500)
plot(rownums, LRTdat$LRT, pch=20, cex=1.5, xlab="Gene ID", ylab="LRT")
abline(h=3.84, lwd=2, col="tomato")
abline(h=6.64, lty=2, lwd=2, col="deepskyblue2")
abline(h=10.83, lty=4, lwd=2, col="seagreen3")
dev.off()
# Which columns have significant LRT? (df = 1)
dim(LRTdat[LRTdat$LRT >= 3.84,]) # p=0.05
dim(LRTdat[LRTdat$LRT >= 6.64,]) # p=0.01
dim(LRTdat[LRTdat$LRT >= 10.83,]) # p=0.001
# ----------------- search GO terms/info -----------------
ensembl = useMart("ensembl", dataset="tguttata_gene_ensembl")
filters = listFilters(ensembl)
attributes = listAttributes(ensembl)
alldata_go <- getBM(attributes=c('ensembl_peptide_id', 'hgnc_symbol', 'name_1006', 'namespace_1003'), filters='ensembl_peptide_id', values=LRTdat$Gene, mart=ensembl)
head(alldata_go) #i don't think HBB is here because it doesn't have a zebra finch ensembl id
alldata_BP <- alldata_go[alldata_go$namespace_1003 == "biological_process",] # not all ensembl ids have BP
alldata_MF <- alldata_go[alldata_go$namespace_1003 == "molecular_function",] # not all ensembl ids have MF
alldata_CC <- alldata_go[alldata_go$namespace_1003 == "cellular_component",] # not all ensembl ids have MF
head(alldata_MF)
head(alldata_BP)
head(alldata_CC)
alldata_MF$value <- 1
alldata_BP$value <- 1
alldata_CC$value <- 1
dim(alldata_MF)
dim(alldata_BP)
dim(alldata_CC)
LRTdat$signif_grp_05 <- ifelse(LRTdat$LRT > 3.84, "signif", "nonsig")
LRTdat$signif_grp_01 <- ifelse(LRTdat$LRT > 6.64, "signif", "nonsig")
LRTdat$signif_grp_001 <- ifelse(LRTdat$LRT > 10.83, "signif", "nonsig")
head(LRTdat)
MFmerge <- merge(x=alldata_MF, y=LRTdat, by.x="ensembl_peptide_id", by.y="Gene")
dim(MFmerge)
head(MFmerge)
BPmerge <- merge(x=alldata_BP, y=LRTdat, by.x="ensembl_peptide_id", by.y="Gene")
dim(BPmerge)
head(BPmerge)
CCmerge <- merge(x=alldata_CC, y=LRTdat, by.x="ensembl_peptide_id", by.y="Gene")
dim(CCmerge)
head(CCmerge)
MFdat <- spread(data=MFmerge, key=name_1006, value=value)
MFdat[is.na(MFdat)] <- 0
BPdat <- spread(data=BPmerge, key=name_1006, value=value)
BPdat[is.na(BPdat)] <- 0
CCdat <- spread(data=CCmerge, key=name_1006, value=value)
CCdat[is.na(CCdat)] <- 0
head(CCdat)
# ----------------- Non-metric multidimensional scaling -----------------
MF_NMDS = metaMDS(MFdat[12:ncol(MFdat)], k=2)
BP_NMDS = metaMDS(BPdat[12:ncol(BPdat)], k=2)
CC_NMDS = metaMDS(CCdat[12:ncol(CCdat)], k=2)
# not converging - doesn't matter if i add trymax=100, still doesn't converge
# test different distance metrics
stressplot(MF_NMDS)
stressplot(BP_NMDS)
stressplot(CC_NMDS)
jpeg("all_NMDS.jpg", height=10, width=10, units="in", res=500)
par(mfrow=c(3,3))
ordiplot(MF_NMDS,type="n", main="MF, p=0.05")
ordihull(MF_NMDS, groups=MFdat$signif_grp_05, draw="polygon", label=T, show.groups="nonsig", col="darkgrey")
ordihull(MF_NMDS, groups=MFdat$signif_grp_05, draw="polygon", label=T, show.groups="signif", col="red", lty=2)
ordiplot(MF_NMDS,type="n", main="MF, p=0.01")
ordihull(MF_NMDS, groups=MFdat$signif_grp_01, draw="polygon", label=T, show.groups="nonsig", col="darkgrey")
ordihull(MF_NMDS, groups=MFdat$signif_grp_01, draw="polygon", label=T, show.groups="signif", col="red", lty=2)
ordiplot(MF_NMDS,type="n", main="MF, p=0.001")
ordihull(MF_NMDS, groups=MFdat$signif_grp_001, draw="polygon", label=T, show.groups="nonsig", col="darkgrey")
ordihull(MF_NMDS, groups=MFdat$signif_grp_001, draw="polygon", label=T, show.groups="signif", col="red", lty=2)
ordiplot(BP_NMDS,type="n", main="BP, p=0.05")
ordihull(BP_NMDS, groups=BPdat$signif_grp_05, draw="polygon", label=T, show.groups="nonsig", col="darkgrey")
ordihull(BP_NMDS, groups=BPdat$signif_grp_05, draw="polygon", label=T, show.groups="signif", col="red", lty=2)
ordiplot(BP_NMDS,type="n", main="BP, p=0.01")
ordihull(BP_NMDS, groups=BPdat$signif_grp_01, draw="polygon", label=T, show.groups="nonsig", col="darkgrey")
ordihull(BP_NMDS, groups=BPdat$signif_grp_01, draw="polygon", label=T, show.groups="signif", col="red", lty=2)
ordiplot(BP_NMDS,type="n", main="BP, p=0.001")
ordihull(BP_NMDS, groups=BPdat$signif_grp_001, draw="polygon", label=T, show.groups="nonsig", col="darkgrey")
ordihull(BP_NMDS, groups=BPdat$signif_grp_001, draw="polygon", label=T, show.groups="signif", col="red", lty=2)
ordiplot(CC_NMDS,type="n", main="CC, p=0.05")
ordihull(CC_NMDS, groups=CCdat$signif_grp_05, draw="polygon", label=T, show.groups="nonsig", col="darkgrey")
ordihull(CC_NMDS, groups=CCdat$signif_grp_05, draw="polygon", label=T, show.groups="signif", col="red", lty=2)
ordiplot(CC_NMDS,type="n", main="CC, p=0.01")
ordihull(CC_NMDS, groups=CCdat$signif_grp_01, draw="polygon", label=T, show.groups="nonsig", col="darkgrey")
ordihull(CC_NMDS, groups=CCdat$signif_grp_01, draw="polygon", label=T, show.groups="signif", col="red", lty=2)
ordiplot(CC_NMDS,type="n", main="CC, p=0.001")
ordihull(CC_NMDS, groups=CCdat$signif_grp_001, draw="polygon", label=T, show.groups="nonsig", col="darkgrey")
ordihull(CC_NMDS, groups=CCdat$signif_grp_001, draw="polygon", label=T, show.groups="signif", col="red", lty=2)
dev.off()
jpeg("0.05_NMDS.jpg", height=4, width=6, units="in", res=500)
par(mfrow=c(1,3))
ordiplot(MF_NMDS,type="n", main="Molecular function")
ordihull(MF_NMDS, groups=MFdat$signif_grp_05, draw="polygon", label=F, show.groups="nonsig", col="darkgrey")
ordihull(MF_NMDS, groups=MFdat$signif_grp_05, draw="polygon", label=F, show.groups="signif", col="red", lty=2)
ordiplot(BP_NMDS,type="n", main="Biological process")
ordihull(BP_NMDS, groups=BPdat$signif_grp_05, draw="polygon", label=F, show.groups="nonsig", col="darkgrey")
ordihull(BP_NMDS, groups=BPdat$signif_grp_05, draw="polygon", label=F, show.groups="signif", col="red", lty=2)
ordiplot(CC_NMDS,type="n", main="Cellular component")
ordihull(CC_NMDS, groups=CCdat$signif_grp_05, draw="polygon", label=F, show.groups="nonsig", col="darkgrey")
ordihull(CC_NMDS, groups=CCdat$signif_grp_05, draw="polygon", label=F, show.groups="signif", col="red", lty=2)
dev.off()
# individual plots for p=0.05
jpeg("0.05MF_NMDS.jpg", height=10, width=10, units="in", res=500)
ordiplot(MF_NMDS,type="n", main="MF, p=0.05")
orditorp(MF_NMDS,display="species",cex=1.5,col="tomato",air=0.01)
orditorp(MF_NMDS,display="sites",cex=1,air=0.1)
ordihull(MF_NMDS, groups=MFdat$signif_grp_05, draw="polygon", label=T, show.groups="nonsig", col="darkgrey")
ordihull(MF_NMDS, groups=MFdat$signif_grp_05, draw="polygon", label=T, show.groups="signif", col="red", lty=2)
#legend("topright", pch=c(16, 16), cex=c(1.5, 1.5), col=c("grey", "red"), c("Non-significant", "Significant"), bty="n")
dev.off()
jpeg("0.05BP_NMDS.jpg", height=10, width=10, units="in", res=500)
ordiplot(BP_NMDS,type="n", main="BP, p=0.05")
orditorp(BP_NMDS,display="species",cex=1.5,col="tomato",air=0.01)
orditorp(BP_NMDS,display="sites",cex=1,air=0.1)
ordihull(BP_NMDS, groups=BPdat$signif_grp_05, draw="polygon", label=T, show.groups="nonsig", col="darkgrey")
ordihull(BP_NMDS, groups=BPdat$signif_grp_05, draw="polygon", label=T, show.groups="signif", col="red", lty=2)
#legend("topright", pch=c(16, 16), cex=c(1.5, 1.5), col=c("grey", "red"), c("Non-significant", "Significant"), bty="n")
dev.off()
jpeg("0.05CC_NMDS.jpg", height=10, width=10, units="in", res=500)
ordiplot(CC_NMDS,type="n", main="CC, p=0.05")
orditorp(CC_NMDS,display="species",cex=1.5,col="tomato",air=0.01)
orditorp(CC_NMDS,display="sites",cex=1,air=0.1)
ordihull(CC_NMDS, groups=CCdat$signif_grp_05, draw="polygon", label=T, show.groups="nonsig", col="darkgrey")
ordihull(CC_NMDS, groups=CCdat$signif_grp_05, draw="polygon", label=T, show.groups="signif", col="red", lty=2)
#legend("topright", pch=c(16, 16), cex=c(1.5, 1.5), col=c("grey", "red"), c("Non-significant", "Significant"), bty="n")
dev.off()
# ----------------- GO terms for each signif level -----------------
# new dataframes for each level of significance
p0.05 <- LRTdat[LRTdat$LRT >= 3.84,]
p0.01 <- LRTdat[LRTdat$LRT >= 6.64,]
p0.001 <- LRTdat[LRTdat$LRT >= 10.83,]
# read in list of genes not significant in codeml (at any level)
nonsig_genes <- LRTdat[LRTdat$LRT < 3.84,]
p0.05_go <- getBM(attributes=c('ensembl_peptide_id', 'hgnc_symbol'), filters='ensembl_peptide_id', values=p0.05$Gene, mart=ensembl)
head(p0.05_go)
dim(p0.05_go)
p0.05_BP <- p0.05_go2[p0.05_go2$namespace_1003 == "biological_process",]
head(p0.05_BP)
dat_tab <- table(p0.05_BP$ensembl_peptide_id, p0.05_BP$name_1006)
dat_tab[1:5, 1:5]
# #first column is an unidentified gene - that's why the colname is blank
# #out of 80, there are only 60 with known biological_process functions
# write.csv(dat_tab, "dat_tab.csv")
# p0.01_go <- getBM(attributes=c('ensembl_peptide_id', 'hgnc_symbol'), filters='ensembl_peptide_id', values=p0.01$Gene, mart=ensembl)
# head(p0.01_go)
# dim(p0.01_go)
# p0.01_go2 <- getBM(attributes=c('ensembl_peptide_id', 'hgnc_symbol', 'go_id'), filters='ensembl_peptide_id', values=p0.01$Gene, mart=ensembl)
# p0.01goterms <- data.frame("go_description"=Term(p0.01_go2$go_id))
# head(p0.01goterms)
#
# p0.001_go <- getBM(attributes=c('ensembl_peptide_id', 'hgnc_symbol'), filters='ensembl_peptide_id', values=p0.001$Gene, mart=ensembl)
# head(p0.001_go)
# dim(p0.001_go)
# p0.001_go2 <- getBM(attributes=c('ensembl_peptide_id', 'hgnc_symbol', 'go_id'), filters='ensembl_peptide_id', values=p0.001$Gene, mart=ensembl)
# p0.001goterms <- data.frame("go_description"=Term(p0.001_go2$go_id))
# head(p0.001goterms)
# nonsig_go <- getBM(attributes=c('ensembl_peptide_id', 'hgnc_symbol', 'name_1006', 'namespace_1003'), filters='ensembl_peptide_id', values=nonsig_genes$Gene, mart=ensembl)
# head(nonsig_go)
# dim(nonsig_go)
# nonsig_BP <- nonsig_go[nonsig_go$namespace_1003 == "biological_process",]
# nonsig_tab <- table(nonsig_BP$ensembl_peptide_id, nonsig_BP$name_1006)
# nonsig_tab[1:5, 1:5]
|
970fc17f26f8ff4468eb9fa36d54042f7c4c50c0 | 779632d7e64f27db6fcc2e6e0bd880684385869f | /p70.R | db84c685ea0d944e6eb23067f2328121b1007e94 | [] | no_license | benjamin22-314/Project-Euler-in-R | 347c82a5b571f93c84bf8b890d3d9189ed60eafc | 071e5f50d6c8fbb2c9d156a7d83d70f0e147462a | refs/heads/master | 2022-04-29T23:49:04.630837 | 2019-01-13T05:11:48 | 2019-01-13T05:11:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,132 | r | p70.R | # [1] "minRatio = 1.00070905112481 n = 8319823"
# user system elapsed
# 1.001 0.000 0.998
main <- function(){
pmt <- proc.time()
library(numbers)
num <- 1e7
upper <- ceiling(sqrt(1e7))+1000
lower <- upper-2000
x <- Primes(lower, upper)
nSave <- 0
minRatio <- 10
for(i in seq(length(x)-1)){
for(j in seq(i+1,length(x))){
if( x[i]*x[j] < num){
if( sepSortDig(x[i]*x[j]) == sepSortDig(sPhi(x[i],x[j])) ){
if( x[i]*x[j]/sPhi(x[i],x[j]) < minRatio){
nSave <- x[i]*x[j]
minRatio <- x[i]*x[j]/sPhi(x[i],x[j])
#print(paste("minRatio =", minRatio,"n =",nSave))
}
}
}
}
}
print(paste("minRatio =", minRatio,"n =",nSave))
print(proc.time()-pmt)
}
# special phi function for this problem. Not generalised
sPhi <- function(p1,p2){
as.integer((p1-1)*(p2-1))
}
sepSortDig <- function(x){
as.numeric(paste(sort(unlist(strsplit(as.character(x),split = "")), decreasing = TRUE),collapse = ""))
} |
33baeb927a7c3f77bc8f8b2132a8117eb91e1716 | c24399a671dce7a80777e35d4107f6095f32f6be | /REnv_test4.R | cf7fb06afae14fd83a259cb5e0d91953bbf2c69c | [] | no_license | jiangweiyao/renv_test4 | 278b2a7443316669926858018ee03a19066ccf10 | d3678e7efa13dbec94f88628abeed9325b9833c2 | refs/heads/main | 2023-03-25T10:33:47.408479 | 2021-01-26T21:41:47 | 2021-01-26T21:41:47 | 333,224,772 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 161 | r | REnv_test4.R | #install.packages("renv")
#install.packages("tidyverse")
#install.packages("BiocManager")
#BiocManager::install("DESeq2")
library("tidyverse")
library("DESeq2") |
a1a2ff619fad1c8fed860b472f9bc96a919bebc9 | 11727cee6c7faba955f50eba0b86d828b98d3dc4 | /man/cc_read_file.Rd | 1d9a19c2b5cade60f3ccb31cb695430b38978470 | [] | no_license | sriramab/cchaid | 25a68915ad9cf1223ba59ce7c04a31cf789ef4fe | edd0348ae1fe1cc4645f478eefe1a27414d77325 | refs/heads/master | 2020-06-13T19:17:04.539489 | 2017-10-22T06:16:38 | 2017-10-22T06:16:38 | 75,615,748 | 0 | 1 | null | 2017-02-14T09:57:21 | 2016-12-05T10:50:11 | R | UTF-8 | R | false | true | 402 | rd | cc_read_file.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cchaid.R
\name{cc_read_file}
\alias{cc_read_file}
\title{This function reads user file}
\usage{
cc_read_file(x)
}
\arguments{
\item{x}{A file name}
}
\value{
A dataframe with user selected fields in a tab-delimited text file.
}
\description{
This function reads user file
}
\examples{
my.df<-cc_read_file("filename.txt")
}
|
0591c8ec1dbedc44243c7112f7f63c281db426cd | 082bbd3b3e173d802cdc8f4ee090aad0fb821760 | /HW1/hw1-2-7.R | 5445fb1c36cd7efced354b48e7cc81ebe90efd02 | [] | no_license | JohnnyBarber/Investment | 2357b9004e1efdd771aabd931eda6b810369e106 | 67043859e6d61ed6d07570d9fff394977aa964f2 | refs/heads/master | 2020-04-27T19:25:22.307239 | 2019-03-08T22:09:49 | 2019-03-08T22:09:49 | 174,617,142 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,948 | r | hw1-2-7.R | # Problem Set 1 Solution Template
# mysoln is a list to store your answers
# the first element of the list is a vector of names for the students in your group
# make sure these match the names shown on the pdf document provided by the MFE office
# using group 1 as an example:
mysoln <- list(student = c("Huanyu Liu", "Jiaqi Li", "Hyeuk Jung", "Xichen Luo"))
# disable scientific notation, and set the decimal precision of float.
options(scipen=999)
options(digits=4)
# 1
# your code
inv = 10000 # investment
y = 3 # year
r = 0.06 # annual rate
result_ear = inv * (1+r)^y
result_apr_q = inv * (1 + (r/4))^(y*4)
result_apr_m = inv * (1 + (r/12))^(y*12)
# answers
a <- result_ear
b <- result_apr_q
c <- result_apr_m
# add to list (start at the second element - the first element is a vectopr of names)
mysoln[["Q1"]] <- c(a, b, c)
# 2
# code
receiving = 500000*0.1
marriage_y = 30
r = 0.05
discount_factor = 1 / (1+r)^3 # reflecting the fact that Charlotte will receive the payment every three years
# a.
pv_a = receiving / ((1+r)^3 - 1) * (1 - discount_factor^10)
# b. 700k = cf_new / ((1+r)^3 - 1) * (1 - discount_factor^10)
cf_b = 700000 * ((1+r)^3 - 1) / (1 - discount_factor^10)
# answers
a <- pv_a
b <- cf_b
mysoln[["Q2"]] <- c(a, b)
# 3
# code
saving = 200000 * 0.3
y = 35 # years
r = 0.04 # rate
sum_fv = 0
# a.
for(i in 1:35) {
sum_fv = sum_fv + saving * (1 + r)^(y-i)
}
# b. sum_fv = consumption/(1+r) + consumption/(1+r)^2 + .... + consumption/(1+r)^20
# = consumption/r * (1 - 1/(1+r)^retirement_years)
# consumption = sum_fv * r / (1 - 1/(1+r)^retirement_years)
retirement_years = 20
consumption = sum_fv * r / (1 - 1/(1+r)^retirement_years)
# answers
a <- sum_fv
b <- consumption
mysoln[["Q3"]] <- c(a, b)
# 4
# code
pv_mortgage = 400000
y = 30
r = 0.07 # monthly APR
# a. Effective annual rate
ear = (1 + r/12)^12 - 1
# b. monthly payment
# pv_mortgage = payment/(1+monthly_r) + payment/(1+monthly_r)^2 + ... + payment/(1+monthly_r)^n
# = payment / monthly_r * (1 - 1/(1+monthly_r)^n)
# payment = pv_mortgage * monthly_r / (1 - 1/(1+monthly_r)^n)
monthly_r = r / 12
n = y * 12
payment = pv_mortgage * monthly_r / (1 - 1/(1+monthly_r)^n)
# c. principal + interest after the 20th monthly payment
# at time = 20;
# pv_20 = present value after 20 months.
#Quick way
pv_20 = 0
month_left = 30 * 12 - 20
for(i in 1:month_left) {
pv_20 = pv_20 + payment / (1+monthly_r)^i
}
# answers
a <- ear
b <- payment
c <- pv_20
mysoln[["Q4"]] <- c(a, b, c)
# 5
# code
#No IRR:
#Cash flows do not change signs.
# example: pv = 0, CF0 = 200, CF1 = 600, CF2 = 500, there is no IRR
a <- c(200, 600, 500)
#Multiple IRRs:
#Cash flows change signs for more than once.
# example: pv = 0, CF0 = -100, CF1 = 600, CF2 = -800, IRR1 = 1, IRR2 = 3
b <- c(-100, 600, -800)
# answers: change the numbers or extend the vector as needed
mysoln[["Q5"]] <- list(a = a, b = b)
#return my solution
mysoln
|
4ad61f830a8cfee4beb6d8d563b58d0beb1d0a6a | 7c2e136a2ce427597843354ef1bab5797f8ebbc2 | /check_germline_artefact_functions.R | ee164e5259c70a618f73591de24435a9771d9c54 | [] | no_license | shulp2211/InfiniteSites | 4d6515562f70b20c474976b4e39a7eee2d171ddb | 17514d03786ad3237260b823c31cef37f9493b7a | refs/heads/master | 2023-06-24T00:12:20.609385 | 2021-07-22T09:21:14 | 2021-07-22T09:21:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,563 | r | check_germline_artefact_functions.R | #### additional checks to exclude contamination by germline calls
#
#### function to annotate PCAWG vcf with the additional QC metrics (carry this along to the end)
## get allele counts in matched normal
annot_normal_read_counts <- function(normalbam, snvs_vcf, minbq = 20, minmq = 35, ncores = NCORES) {
if (!file.exists(normalbam)) {
print("Normal bam not found")
snvs_vcf$n_ref_count <- NA
snvs_vcf$n_alt_count <- NA
snvs_vcf$n_total_cov <- NA
return(snvs_vcf)
}
# pile up parameters
flag <- scanBamFlag(isNotPassingQualityControls = F, isDuplicate = F, isProperPair = T)
# scbamparam <- ScanBamParam(which = snvs_vcf, flag = flag) #, mapqFilter=minMapQ # simpleCIGAR: allow only reads with M in CIGAR string, no clipping, indels or whatever
pupparam <- PileupParam(max_depth=250, min_base_quality=minbq, min_mapq=minmq, min_nucleotide_depth=0,
min_minor_allele_depth=0, distinguish_strands=F, distinguish_nucleotides=TRUE,
ignore_query_Ns=TRUE, include_deletions=F, include_insertions=F, left_bins=NULL, query_bins=NULL, cycle_bins=NULL)
# actual pile up -- leaner versions
vcflocils <- split(x = granges(snvs_vcf), f = seqnames(snvs_vcf))
vcflocils <- vcflocils[sapply(X = vcflocils, FUN = length) > 0]
if (ncores == 1 || length(snvs_vcf) < 1000) {
pupout <- lapply(X = vcflocils, FUN = function(vcfpos, flag, normalbam, pupparam) {
# browser()
scbamparam <- ScanBamParam(which = vcfpos, flag = flag) #, mapqFilter=minMapQ # simpleCIGAR: allow only reads with M in CIGAR string, no clipping, indels or whatever
pupout <- pileup(file = normalbam, index = normalbam, scanBamParam = scbamparam, pileupParam = pupparam)
return(pupout)
}, flag = flag, normalbam = normalbam, pupparam = pupparam)
} else {
pupout <- mclapply(X = vcflocils, FUN = function(vcfpos, flag, normalbam, pupparam) {
# browser()
scbamparam <- ScanBamParam(which = vcfpos, flag = flag) #, mapqFilter=minMapQ # simpleCIGAR: allow only reads with M in CIGAR string, no clipping, indels or whatever
pupout <- pileup(file = normalbam, index = normalbam, scanBamParam = scbamparam, pileupParam = pupparam)
return(pupout)
}, flag = flag, normalbam = normalbam, pupparam = pupparam, mc.cores = ceiling(ncores)/2, mc.preschedule = F)
}
# pupout <- pileup(file = normalbam, index = normalbam, scanBamParam = scbamparam, pileupParam = pupparam)
# reshape output
pupout <- dcast(data = do.call(rbind, pupout), formula = seqnames + pos ~ nucleotide, value.var = "count", fill = 0)
pupout_gr <- GRanges(seqnames = pupout$seqnames, ranges = IRanges(pupout$pos, width = 1))
sharedcols <- intersect(x = c("A", "C", "G", "T"), colnames(pupout))
mcols(pupout_gr)[, c("A", "C", "G", "T")] <- 0
mcols(pupout_gr)[, sharedcols] <- pupout[, sharedcols]
if (nrow(pupout) == 1) {
pupout_gr$total <- sum(pupout[, -c(1,2)])
} else if (length(sharedcols) == 1) {
pupout_gr$total <- pupout[, sharedcols]
} else {
pupout_gr$total <- rowSums(pupout[, -c(1,2)])
}
# enrich snvs_gr
matchidxs <- nearest(x = snvs_vcf, subject = pupout_gr)
snvs_vcf$n_ref_count <- 0
snvs_vcf$n_alt_count <- 0
snvs_vcf$n_total_cov <- 0
# had an NA here, no read counts collected at locus
goodidxs <- which(!is.na(matchidxs))
matchidxs <- na.omit(matchidxs)
# modified to take in biallelic divergent variants
snvs_vcf$n_ref_count[goodidxs] <- ifelse(snvs_vcf[goodidxs]$REF == "A", pupout_gr[matchidxs]$A,
ifelse(snvs_vcf[goodidxs]$REF == "C", pupout_gr[matchidxs]$C,
ifelse(snvs_vcf[goodidxs]$REF == "G", pupout_gr[matchidxs]$G, pupout_gr[matchidxs]$'T')))
if (any(lengths(snvs_vcf[goodidxs]$ALT) > 1)) {
alt1 <- unlist(DNAStringSetList(sapply(snvs_vcf[goodidxs]$ALT, "[", 1)))
alt2 <- unlist(DNAStringSetList(sapply(snvs_vcf[goodidxs]$ALT, "[", 2)))
snvs_vcf$n_alt_count[goodidxs] <- ifelse(alt1 == "A", pupout_gr[matchidxs]$A,
ifelse(alt1 == "C", pupout_gr[matchidxs]$C,
ifelse(alt1 == "G", pupout_gr[matchidxs]$G, pupout_gr[matchidxs]$'T')))
snvs_vcf$n_alt_count2 <- 0
snvs_vcf$n_alt_count2[goodidxs] <- ifelse(alt2 == "A", pupout_gr[matchidxs]$A,
ifelse(alt2 == "C", pupout_gr[matchidxs]$C,
ifelse(alt2 == "G", pupout_gr[matchidxs]$G, pupout_gr[matchidxs]$'T')))
} else {
snvs_vcf$n_alt_count[goodidxs] <- ifelse(snvs_vcf[goodidxs]$ALT == "A", pupout_gr[matchidxs]$A,
ifelse(snvs_vcf[goodidxs]$ALT == "C", pupout_gr[matchidxs]$C,
ifelse(snvs_vcf[goodidxs]$ALT == "G", pupout_gr[matchidxs]$G, pupout_gr[matchidxs]$'T')))
}
snvs_vcf$n_total_cov[goodidxs] <- pupout_gr[matchidxs]$total
return(snvs_vcf)
}
# already annotated with 1000G AF even
# annot_1kG_matches <- function(refalleles_gr, snvs_vcf) {
#
# refoverlaps <- findOverlaps(query = snvs_vcf, subject = refalleles_gr, type = "equal")
# snvs_vcf$in1kG <- F
# snvs_vcf[queryHits(refoverlaps)]$in1kG <- snvs_vcf[queryHits(refoverlaps)]$ALT == refalleles_gr[subjectHits(refoverlaps)]$alt
#
# return(snvs_vcf)
# }
annot_hg38_consistency <- function(snvs_vcf, hg19tohg38) {
allsnvs_gr <- snvs_vcf
seqlevelsStyle(allsnvs_gr) <- "UCSC"
allsnvs_grhg38 <- liftOver(chain = hg19tohg38chain, x = allsnvs_gr)
# seqlevelsStyle(allsnvs) <- "ensembl"
allsnvs_grhg38 <- unlist(allsnvs_grhg38[names(which(lengths(allsnvs_grhg38) == 1))], use.names = T)
names(allsnvs_grhg38) <- unlist(sapply(X = strsplit(x = names(allsnvs_grhg38), split = ".", fixed = T), FUN = "[", 1)) # fix names if appended - package version differences
seqlevelsStyle(allsnvs_grhg38) <- "ensembl"
allsnvs_grhg38$REF38 <- getSeq(x = BSgenome.Hsapiens.NCBI.GRCh38, names = allsnvs_grhg38)
# goodloci <- names(allsnvshg38)[which(allsnvshg38$REF38 == allsnvshg38$REF)]
# check consistency in GRCh38
snvs_vcf$hg38clean <- F
refmatchidxs <- which(allsnvs_grhg38$REF38 == allsnvs_grhg38$REF)
if (length(refmatchidxs) > 0) {
# snvs_vcf[names(allsnvs_grhg38)[refmatchidxs]]$hg38clean <- T
mcols(snvs_vcf)[names(allsnvs_grhg38)[refmatchidxs], "hg38clean"] <- T
}
return(snvs_vcf)
}
# if "Validation_status field available, USE!!!
annot_validation_status <- function(snvs_vcf) {
if (!"Validation_status" %in% colnames(mcols(snvs_vcf))) {
snvs_vcf$Validation_status <- NA
}
return(snvs_vcf)
}
library(rtracklayer)
library(BSgenome.Hsapiens.NCBI.GRCh38)
library(reshape2)
add_snv_qc <- function(sampleid, sampledir, releasetable, snvs_vcf, hg19tohg38, normalbam, minbq = 20, minmq = 35, ncores = NCORES, bamdir = BAMDIR, checkbam = T) {
annotfile <- file.path(sampledir, paste0(sampleid, ".consensus.20160830.somatic.snv_mnv.annot.gr.RDS"))
if (file.exists(annotfile)) {
snvs_vcf <- readRDS(file = annotfile)
} else {
if (checkbam) {
# annotate matched normal read counts/alt alleles
if (grepl(pattern = "/srv/shared/vanloo/ICGC/", x = bamdir, fixed = T)) {
normalbam <- file.path(bamdir, paste0(releasetable[releasetable$tumor_wgs_aliquot_id == sampleid, c("dcc_project_code", "normal_wgs_bwa_alignment_bam_file_name")], collapse = "/WGS/"))
} else {
normalbam <- file.path(bamdir, releasetable[releasetable$tumor_wgs_aliquot_id == sampleid, "dcc_project_code"], sampleid, paste0(sampleid, "_normal.aln.recal.bam"))
}
snvs_vcf <- annot_normal_read_counts(normalbam = normalbam, snvs_vcf = snvs_vcf, minbq = minbq, minmq = minmq, ncores = ncores)
} else {
snvs_vcf$n_ref_count <- NA
snvs_vcf$n_alt_count <- NA
snvs_vcf$n_total_cov <- NA
}
# anntoate validation status
snvs_vcf <- annot_validation_status(snvs_vcf = snvs_vcf)
# annotate hg38 consistency
snvs_vcf <- annot_hg38_consistency(snvs_vcf = snvs_vcf, hg19tohg38 = hg19tohg38)
# check for presence of QC columns
if (!all(c("n_ref_count", "n_alt_count", "n_total_cov",
"hg38clean", "Validation_status") %in% colnames(mcols(snvs_vcf)))) {
print("Some QC columns are missing")
}
saveRDS(object = snvs_vcf, file = annotfile)
}
return(snvs_vcf)
}
|
846ed2c35cd071e80424d952a19479ccf17610f8 | 73e10a36d8b651fc9365b270f530c4f4be56aa4f | /plot4.R | 086697df09c57bcc0dc17edda240d0b48ef32ce9 | [] | no_license | sveston/ExData_Plotting1 | 5b7854efd2a104127a802a1242a98d60b508e496 | 96432c7598d16027ed514290a882f3fd15a742bd | refs/heads/master | 2021-01-09T06:46:44.042209 | 2014-05-11T16:37:15 | 2014-05-11T16:37:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,779 | r | plot4.R | ##Read the entire table from folder
hpc <- read.table("E:/R/exdata-data-household_power_consumption/hpc.txt", sep=";", quote="\"",header=TRUE)
##Convert the Dates to Date Class
hpc[,"Date"]<-as.Date(hpc[,"Date"],format='%d/%m/%Y')
##Subset Dates falling on 2007-02-01 and 2007-02-02
data<-subset(hpc,hpc$Date=="2007-02-01"|hpc$Date=="2007-02-02")
##Change the class of the respective headers from factor to numeric
data[,"Global_active_power"]<-as.numeric(data[,"Global_active_power"])
data[,"Global_reactive_power"]<-as.numeric(data[,"Global_reactive_power"])
data[,"Voltage"]<-as.numeric(data[,"Voltage"])
data[,"Sub_metering_1"]<-as.numeric(data[,"Sub_metering_1"])
data[,"Sub_metering_2"]<-as.numeric(data[,"Sub_metering_2"])
data[,"Sub_metering_3"]<-as.numeric(data[,"Sub_metering_3"])
data$DateTime <- do.call(paste,c(data[c("Date","Time")],sep=" "))
data[,"DateTime"]<-as.POSIXct(data[,"DateTime"], format = "%Y-%m-%d %H:%M:%S")
##Set the margin of the plot
par(mar=c(4,4,1,1))
##Divide the canvas to fit 4 plots
par(mfrow=c(2,2))
##Plot the 4 plots
plot(data$DateTime,data$Global_active_power/1000,lwd=1,type="l",xlab="",ylab="Global Active Power")
plot(data$DateTime,data$Voltage,lwd=1,type="l",xlab="datetime",ylab="Voltage")
plot(data$DateTime,data$Sub_metering_1,ylim=c(0,35),lwd=1,type="l",xlab="",ylab="Energy sub metering")
lines(data$DateTime,data$Sub_metering_2,lwd=1,type="l",xlab="",ylab="Energy sub metering",col="red")
lines(data$DateTime,data$Sub_metering_3,lwd=1,type="l",xlab="",ylab="Energy sub metering",col="blue")
legend("topright",pch="",lwd=1,bty="n",col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(data$DateTime,data$Global_reactive_power,lwd=1,type="l",xlab="datetime",ylab="Global_reactive_power")
|
ddc2c6f2c4a883da69143268b888700d542c6cd9 | 7180123ce9d90a5642d8595eff3bce61f3bdb3e5 | /common/Logging.R | f281178302edf6d5ea896fbce3d6ad05ac8e892f | [] | no_license | isantabarbara/bcldndc | 33f00b4893b9eed4c8406fe10b404ee23b076811 | 1a1b0e7aee54f7d70a46d846f33d9926bf99bee6 | refs/heads/master | 2020-07-16T02:00:52.005235 | 2019-11-24T13:15:41 | 2019-11-24T13:15:41 | 205,696,419 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 968 | r | Logging.R | #Responsability:
#Logging initialization
library(logging)
Logging <- setRefClass(
"logging"
, fields = list(
logPath="character",
logFile="character",
logLevel="character"
)
, methods = list(
#
# Constructor
#
initialize = function(..., logPath="",
logFile="",
logLevel="")
{
callSuper(..., logPath=logPath,
logFile=logFile,
logLevel=logLevel)
},
#
#
# Initialize logging
#
#
ini = function() {
print(logPath)
print(logFile)
print(logLevel)
logReset()
#Show milliseconds
options(digits.secs = 3)
file <- file.path(logPath,logFile)
basicConfig(level='FINEST')
addHandler(writeToFile, file=file, level=logLevel)
with(getLogger(), names(handlers))
},
testLogging = function(text) {
loginfo(text)
}
)
)
|
c59f70bf844a946440de15a528b0fb82b78573ec | fccac07ce25a237f9deeede62323444a0475cd4f | /man/scale_y_numeric.Rd | 5d71123c3d56089bb492433ccb510207cab03d81 | [] | no_license | mark-me/graydon.package | fa47c4b3cf620a67cc66e5809466b041e884a593 | 36630083ea01dc3a8bee54a0d78e5b60144197b7 | refs/heads/master | 2022-08-03T20:59:20.573179 | 2022-07-21T07:09:38 | 2022-07-21T07:09:38 | 192,348,397 | 0 | 3 | null | 2022-07-21T07:09:40 | 2019-06-17T13:03:45 | R | UTF-8 | R | false | true | 412 | rd | scale_y_numeric.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot_tools.R
\name{scale_y_numeric}
\alias{scale_y_numeric}
\title{A function to create a continuous y-axis for formatted numbers}
\usage{
scale_y_numeric(number_decimals = 0, format_EN = FALSE,
scale = c("normal", "k", "M"), ...)
}
\description{
A function to create a continuous y-axis for formatted numbers
}
\keyword{ggplot2}
|
cafdc141a0842dd7bcda3f1584c32aec719ef275 | 3e53500da3f91211ecfbcec0de87c84ef1183f91 | /FREELANCER FILES/TWITTER AN ANALYSIS/vehicle_twiter analysis.R | f0d79d75e615f96fa65d4b3faecec5bc0d88804c | [] | no_license | watex95/R-FOR-DATA-SCIENCE | b773d9d2f365a0792d78f5c19e3d7c2ebf6dc0e3 | 3635b5a25f059ab9f1a6900c85042144a5c7a4b5 | refs/heads/master | 2023-03-12T07:40:16.791781 | 2023-02-09T19:39:25 | 2023-02-09T19:39:25 | 187,982,806 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,830 | r | vehicle_twiter analysis.R | install.packages("twitteR")
#loading libraries
library(dplyr)
library(tidytext)
library(tidyverse)
library(twitteR)
library(tm)
#to get your consumerKey and consumerSecret see the twitteR documentation for instructions
consumer_key<-'2nSlqvVYhQGUxUMY8ATwOSE6v'
consumer_secret<-'RM9xtXqMZXrNyBjMDx1X1ScUvx1AEhmIO0m9FXAlKAr89IszCK'
access_token<- '2758475550-AtHCc2rolkr2ld78aEzoav2BL8efGFbJ6dfqDci'
access_secret<-'GJFJep5OnqqVpO6emhXWVIHfP6R37rupK1SmOGnfSjO9z'
# ?searchTwitteR # STOP AND EXPLAIN TO STUDENTS!
setup_twitter_oauth(consumer_key, consumer_secret,
access_token=access_token, access_secret=access_secret)
tesla <- twitteR::searchTwitter('#Tesla', n = 1000, lang = 'en', since = '2015-06-01', retryOnRateLimit = 1e3)
d = twitteR::twListToDF(tesla)
head(d)
ford <- twitteR::searchTwitter('#Ford', n = 1000, lang = 'en', since = '2015-06-01', retryOnRateLimit = 1e3)
e = twitteR::twListToDF(ford)
head(e)
mercedes <- twitteR::searchTwitter('#Mercedes', n = 1000, lang = 'en', since = '2015-06-01', retryOnRateLimit = 1e3)
a = twitteR::twListToDF(mercedes)
head(a)
# Cleaning the datasets
# Remove http and https elements manually
d$text <- gsub("http[^[:space:]]*","", d$text) # For http
d$text <- gsub("http[^[:space:]]*","", d$text) # For https
e$text <- gsub("http[^[:space:]]*","", e$text) # For http
e$text <- gsub("http[^[:space:]]*","", e$text) # For https
a$text <- gsub("http[^[:space:]]*","", a$text) # For http
a$text <- gsub("http[^[:space:]]*","", a$text) # For https
#tokenizing all 3 datasets from twitter
tidy_tesla <- d %>%
unnest_tokens(word, text) %>%
anti_join(stop_words)
head(tidy_tesla)
tidy_ford <- e %>%
unnest_tokens(word, text) %>%
anti_join(stop_words)
tidy_mercedes <- a %>%
unnest_tokens(word, text) %>%
anti_join(stop_words)
#################################################
### Combining all 3 tidy data frames and creating correlograms
#################################################
library(tidyr)
frequency <- bind_rows(mutate(tidy_tesla, author="Tesla"),
mutate(tidy_ford, author= "Ford"),
mutate(tidy_mercedes, author="Mercedes")) %>% #closing bind_rows
mutate(word=str_extract(word, "[a-z']+")) %>%
count(author, word) %>%
group_by(author) %>%
mutate(proportion = n/sum(n))%>%
select(-n) %>%
spread(author, proportion) %>%
gather(author, proportion, `Ford`, `Mercedes`)
head(frequency)
#let's plot the correlograms:
library(scales)
ggplot(frequency, aes(x=proportion, y=`Tesla`,
color = abs(`Tesla`- proportion)))+
geom_abline(color="grey40", lty=2)+
geom_jitter(alpha=.1, size=2.5, width=0.3, height=0.3)+
geom_text(aes(label=word), check_overlap = TRUE, vjust=1.5) +
scale_x_log10(labels = percent_format())+
scale_y_log10(labels= percent_format())+
scale_color_gradient(limits = c(0,0.001), low = "darkslategray4", high = "gray75")+
facet_wrap(~author, ncol=2)+
theme(legend.position = "none")+
labs(y= "Tesla", x=NULL)
#Taking a look at correlation coefficients
cor.test(data=frequency[frequency$author == "Ford",],
~proportion + `Tesla`)
cor.test(data=frequency[frequency$author == "Mercedes",],
~proportion + `Tesla`)
############################################
## Sentiment analysis
#############################################
install.packages("textdata")
library(textdata)
library(tidytext)
get_sentiments('afinn') # Show example of the table
# pulling in sentiment for these 3 tokenized datasets
tidy_tesla %>%
inner_join(get_sentiments("afinn"))%>%
group_by(id) %>% #if you remove the group_by it will calculate sentiment for all the data
summarise(sentiment=sum(value)) %>%
mutate(method="AFINN")
tidy_ford %>%
inner_join(get_sentiments("afinn"))%>%
group_by(id) %>% #if you remove the group_by it will calculate sentiment for all the data
summarise(sentiment=sum(value)) %>%
mutate(method="AFINN")
tidy_mercedes %>%
inner_join(get_sentiments("afinn"))%>%
group_by(id) %>% #if you remove the group_by it will calculate sentiment for all the data
summarise(sentiment=sum(value)) %>%
mutate(method="AFINN")
#let's take a look at the most positive and most negative tokens in the tesla dataset
tidy_tesla_sentiment <- tidy_tesla %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=T)
print(tidy_tesla_sentiment)
tidy_tesla_sentiment %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word=reorder(word, n)) %>%
ggplot(aes(word, n, fill=sentiment)) +
geom_col(show.legend = FALSE) +
facet_wrap(~sentiment, scales = "free_y")+
labs(y="Contribution to sentiment", x=NULL)+
coord_flip()
############################################
## TF-IDF analysis
#############################################
combined_cars <- bind_rows(mutate(d, make="Tesla"),
mutate(e, make= "Ford"),
mutate(a, make="Mercedes")
)
tesla_modif <- combined_cars %>%
unnest_tokens(word, text) %>%
count(make, word, sort=TRUE) %>%
ungroup()
tesla_modif2 <- tesla_modif %>%
group_by(make) %>%
summarize(total=sum(n))
tesla_leftjoined <- left_join(tesla_modif, tesla_modif2)
tidy_tesla_tfidf <- tesla_leftjoined %>%
bind_tf_idf(word, make, n)
tidy_tesla_tfidf # we get all the zeors because we are looking at stop words ... too common
tidy_tesla_tfidf %>%
arrange(desc(tf_idf))
#what can we say about these words?
#############
# looking at the graphical apprach:
tidy_tesla_tfidf %>%
arrange(desc(tf_idf)) %>%
mutate(word=factor(word, levels=rev(unique(word)))) %>%
group_by(make) %>%
top_n(15) %>%
ungroup %>%
ggplot(aes(word, tf_idf, fill=make))+
geom_col(show.legend=FALSE)+
labs(x=NULL, y="tf-idf")+
facet_wrap(~make, ncol=2, scales="free")+
coord_flip()
|
e82f3ceb09322e207593c25d4d682d870b9a581f | 49ff29e6edfa8a56e67c7be6dff6cbece1022391 | /ui.R | a65a12cd323aaed7a55b39f56797d65ce1219fc2 | [] | no_license | yxie27/Project3 | 8aedac8090603db303fafd90e8ac0b4f867c08af | 15020699abe9a16293b402fb83a5cf7a83ae09bb | refs/heads/master | 2022-11-24T21:47:44.964230 | 2020-07-28T00:30:48 | 2020-07-28T00:30:48 | 281,841,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,766 | r | ui.R | library(shiny)
library(shinydashboard)
library(dplyr)
library(ggplot2)
library(readr)
library(tree)
#library(randomForest)
library(caret)
library(httr)
library(plotly)
library(RCurl)
#Read in data from my Github url address
url1<-'https://raw.githubusercontent.com/yxie27/Project3/master/World%20Happiness%20Report.csv'
GET(url1, write_disk(tf <- tempfile(fileext = ".csv")))
Data <- read.csv(tf)
Data2 <- Data %>% select(-Overall.rank, -Country.or.region,-Year)
#Data <- read.csv("World Happiness Report.csv")
ui <- dashboardPage(
skin = "red",
#add title
dashboardHeader(
title = "World Happiness Report 2018-2019",
titleWidth = 750),
#define sidebar items
dashboardSidebar(
sidebarMenu(
menuItem(tabName = "info", "Information", icon = icon("dashboard")),
menuItem(tabName = "data", "Data Exploration", icon = icon("table")),
menuItem(tabName = "pca", "Principal Components Analysis", icon = icon("archive")),
menuItem(tabName = "subdata", "Scroll through the Data", icon = icon("th")),
menuItem(tabName = "model", "Modeling", icon = icon("laptop"))
)),
#define the body of the app
dashboardBody(
tabItems(
#Information tab
tabItem(tabName = "info",
fluidRow(
#two columns for each of the two items
column(6,
#description of data
h1("Data Description"),
#box to contain the description
box(background = "red", width = 12,
em(h4(strong("The World Happiness Report")," is a point of interest survey of the state of worldwide bliss. ")),
h4("The report proceeds to pick up worldwide acknowledgment as governments, organizations and respectful society progressively utilize joy pointers to educate their policy-making choices.
Driving specialists over areas – financial matters, brain research, overview investigation, national insights, wellbeing, open approach and more – depict how estimations of well-being can be used effectively to evaluate the advance of countries.
The reports survey the state of bliss within the world nowadays and appear how the modern science of bliss clarifies individual and national varieties in bliss."),
h4("This file contains the Happiness Score for 153 countries along with the factors used to explain the score."),
h4("The Happiness Score is a national average of the responses to the main life evaluation question asked in the Gallup World Poll (GWP), which uses the Cantril Ladder."),
h4("The Happiness Score is explained by the following factors:"),
em(h5("- GDP per capita")),
em(h5("- Social support")),
em(h5("- Healthy Life Expectancy")),
em(h5("- Freedom to make life choices")),
em(h5("- Generosity")),
em(h5("- Perceptions of corruption")),
helpText(a(strong("Click here to get more information"), href = "https://www.kaggle.com/mathurinache/world-happiness-report/data"))
)
),
column(6,
#ability of the APP
h1("Ability of the APP"),
#box to contain the ability
box(background = "red", width = 12,
h4("The tabs across the top of the applet allow for navigation between the marginal order statistic distribution visualization, the joint order statistic distribution visualization, and the theory underlying order statistics."),
h4("The controls for the visualization sections of the applet are located to the left and the visualizations are available on the right."),
h4("Within this app, you can:"),
em(h5("- Explore the common numeric and graphical summaries of the data")),
em(h5("- Apply Principal Components Analysis (PCA)")),
em(h5("- Choose model for certain variables in the dataset and make prediction")),
em(h5("- Scroll through the data"))
)
)
)
),
#Data Exploration tab
tabItem(tabName = "data",
fluidPage(
#title
titlePanel(
uiOutput("title")
),
# Sidebar with options for the data set
sidebarLayout(
sidebarPanel(
h3("Select the Year:"),
selectizeInput("Year", "Year", selected = 2018, choices = levels(as.factor(Data$Year))),
br(),
sliderInput("size", "Size of Points on Graph",
min = 1, max = 10, value = 5, step = 1),
#DT::dataTableOutput('dataByYear'),
downloadButton("download_DataTable3", "Download the Dataset by Selected Year")
),
# Show output
mainPanel(
h3("The ggplot of World Happiness Score V.S. GDP : "),
plotOutput("Plot",click = "plot_click"),
downloadButton("download_ggPlot", "Save image"),
h3("Click on plot to get the value of x and y: "),
verbatimTextOutput("clicktext"),
#plotlyOutput("Plot2"),
h3("The numeric summaries: "),
textOutput("info"),
#tableOutput("table")
DT::dataTableOutput('dataByYear')
)
)
)
),
#Unsupervised Learning tab
tabItem(tabName = "pca",
fluidPage(
#title
headerPanel(h1("Principal Components Analysis (PCA)")),
mainPanel(
tabsetPanel(
tabPanel("Introduction", h4("Principal Components Analysis (PCA) is a dimension reduction technique."),
h4("If you have variables, they contain some joint variability/correlation,
PCA looks for linear combination of those variables that account for most of the variability.")
),
tabPanel("Algorithm", uiOutput('MathJax')),
tabPanel("Biplot & Scree Plot & PCs Values",
sidebarLayout(
sidebarPanel(
checkboxGroupInput("Var", "Please select variables for Principal Component Analysis:", choices = names(Data2),selected = list("GDP.per.capita","Social.support"))
),
mainPanel(
h3("The Biplot for the Selected Variables:"),
plotOutput("BiPlot"),
downloadButton("download_BiPlot", "Save image"),
h3("The Scree Plot for the Selected Variables:"),
plotOutput("Scree_Plot"),
downloadButton("download_ScreePlot", "Save image"),
h3("The PCs Values for the Selected Variables:"),
verbatimTextOutput("PCsValue")
)
)
)
)
)
)
),
#Scroll through the Data tab
tabItem(tabName = "subdata",
mainPanel(
h3("In order to avoid the loss of this data page (because Random Forest model takes a long time to operate), I put the data page before the modeling."),
DT::dataTableOutput('Data_table'),
downloadButton("download_DataTable", "Download the Dataset")
)
),
#Modeling tab
tabItem(tabName = "model",
tabsetPanel(
tabPanel("Linear Regression Model",
fluidPage(
titlePanel("Regression Model"),
sidebarLayout(
sidebarPanel(
selectInput("outcome", label = h3("Outcome"),
choices = list("Score","GDP.per.capita","Social.support","Healthy.life.expectancy","Freedom.to.make.life.choices","Generosity","Perceptions.of.corruption"), selected = 1),
selectInput("indepvar", label = h3("Explanatory variable"),
choices = list("Score","GDP.per.capita","Social.support","Healthy.life.expectancy","Freedom.to.make.life.choices","Generosity","Perceptions.of.corruption"), selected = 1)
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Scatterplot",
plotOutput("scatterplot"),
downloadButton("download_scatterplot", "Save image")
),
tabPanel("Distribution", # Plots of distributions
fluidRow(
column(6, plotOutput("distribution1"),downloadButton("download_histogram1", "Save image")),
column(6, plotOutput("distribution2"),downloadButton("download_histogram2", "Save image")))
),
tabPanel("Model Summary", verbatimTextOutput("summary")), # Regression output
tabPanel("Data", DT::dataTableOutput('tbl'),downloadButton("download_DataTable2", "Download the Dataset")) # Data as datatable
)
)
))
),
tabPanel("Regression Tree Model",
fluidPage(
titlePanel("Regression Tree Model"),
sidebarLayout(
sidebarPanel(
sliderInput("pred", "Select the Values of the GDP", min = 0, max = 3, value = 0.001, step = 0.001)
),
mainPanel(
plotOutput("Regre_tree"),
#verbatimTextOutput("CVtree"),
h3("The value of root MSE about the prediction is: "),
verbatimTextOutput("prediction1"),
h3("The Predicted World Happiness Score is:"),
verbatimTextOutput("prediction1_GDP")
)
)
)
),
tabPanel("Random Forests Model",
fluidPage(
titlePanel("Random Forests Model"),
sidebarLayout(
sidebarPanel(
sliderInput("numberTree","Select the Number of Trees: ", min = 1, max = 12, value = 6, step = 1),
sliderInput("pred2", "Select the Values of the GDP", min = 0, max = 3, value = 0.001, step = 0.001)
),
mainPanel(
h3("The value of root MSE about the prediction is (Slow...Please wait): "),
verbatimTextOutput("prediction2"),
h3("The Results of Random Forest Model (Slow...Please wait): "),
verbatimTextOutput("rf_output"),
h3("The Misclassificatoon Rate is (Slow...Please wait): "),
verbatimTextOutput("rf_output2"),
h3("The Predicted World Happiness Score is (Slow...Please wait):"),
verbatimTextOutput("prediction2_GDP")
)
)
)
)
))
)
)
) |
6215582d72c6f46c1abd5593b194544b429376fa | f9cc312bb527138f4c2d6607d9bd98374ae9cf0a | /man/summary.hmclearn.Rd | c2e5f1a3b229a434b15c79c9a005fd24d0a73617 | [] | no_license | be-green/hmclearn | ad1526f0d982af11a3315333cd0d7e760a045198 | 6a83877ab03cc7a76da8c113084fa7f084f9b784 | refs/heads/master | 2021-05-20T09:31:34.646091 | 2020-04-01T15:55:11 | 2020-04-01T15:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,042 | rd | summary.hmclearn.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{summary.hmclearn}
\alias{summary.hmclearn}
\title{Summarizing HMC Model Fits}
\usage{
\method{summary}{hmclearn}(object, burnin = NULL, probs = c(0.05, 0.25, 0.5, 0.75, 0.95), ...)
}
\arguments{
\item{object}{an object of class \code{hmclearn}, usually a result of a call to \code{mh} or \code{hmc}}
\item{burnin}{optional numeric parameter for the number of initial MCMC samples to omit from the summary}
\item{probs}{quantiles to summarize the posterior distribution}
\item{...}{additional arguments to pass to \code{quantile}}
}
\value{
Returns a matrix with posterior quantiles and the posterior scale reduction factor statistic for each parameter.
}
\description{
summary method for class \code{hmclearn}
}
\references{
Gelman, A., et. al. (2013) \emph{Bayesian Data Analysis}. Chapman and Hall/CRC.
Gelman, A. and Rubin, D. (1992) \emph{Inference from Iterative Simulation Using Multiple Sequences}. Statistical Science 7(4) 457-472.
}
|
cc43623f2ac0ecd0054d3c6502d48410d58317e8 | 62a864c706cb41c5ec0a86e456c9b6d674e4fa03 | /Code.R | 8a63d878210fa5126f782afbcb190334bd8c9aa9 | [] | no_license | rakeshsarma/LoanDefault-Indessa | 9c6ff604332526872f114d1fa82f0325e7eea6ff | 52128335f690d9a0b341e7f45983adbe2435670b | refs/heads/master | 2021-01-23T01:07:54.068448 | 2017-04-04T18:17:24 | 2017-04-04T18:17:24 | 85,876,623 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,769 | r | Code.R | rm(list = ls(all.names = T))
#import data
train_file = "/Users/raki/Data Science/Haker Earth/Loan Default/Data/train_indessa.csv"
test_file = "/Users/raki/Data Science/Haker Earth/Loan Default/Data/test_indessa.csv"
train_dataset <- read.csv(train_file, header = T)
test_dataset <- read.csv(test_file, header = T, nrows = 1000)
dim(train_dataset)
dim(test_dataset)
View(head(train_dataset))
unique(train_dataset$emp_length)
levels(train_dataset$emp_length)
# Data cleaning
# Write a funciton to replace a string with another string
replace <- function(column, replace_with, replace_what){
gsub(replace_what, replace_with, column)
}
dataclean<- function(dataset){
col_nbr_emp_length = which(colnames(dataset)=="emp_length")
#paste(col_nbr_emp_length)
# Collapse all spaces
dataset[,col_nbr_emp_length] <- replace(column = dataset[,col_nbr_emp_length], replace_what = " ", replace_with = "")
# Now replace the string "years" with nothing
dataset[,col_nbr_emp_length] <- replace(column = dataset[,col_nbr_emp_length], replace_what = "years", replace_with = "")
# Now replace the string "year" with nothing
dataset[,col_nbr_emp_length] <- replace(column = dataset[,col_nbr_emp_length], replace_what = "year", replace_with = "")
#replacing <1 to zero and 10+to 10
dataset[,col_nbr_emp_length] <- replace(column = dataset[,col_nbr_emp_length], replace_what = "<1", replace_with = "0")
dataset[,col_nbr_emp_length] <- replace(column = dataset[,col_nbr_emp_length], replace_what = "\\+", replace_with = "")
#changing the column to numbers
dataset[,col_nbr_emp_length] <- as.numeric(dataset[,col_nbr_emp_length])
#change
col_nbr_term = which(colnames(dataset)=="term")
dataset[,col_nbr_term] <- replace(column = dataset[,col_nbr_term], replace_what = " months", replace_with = "")
dataset[,col_nbr_term] <- as.numeric(dataset[,col_nbr_term])
#similarly changning last_week_pay
col_nbr_last_week_pay = which(colnames(dataset)=="last_week_pay")
dataset[,col_nbr_last_week_pay] <- replace(column = dataset[,col_nbr_last_week_pay], replace_what = "th week", replace_with = "")
dataset[,col_nbr_last_week_pay] <- as.numeric(dataset[,col_nbr_last_week_pay] )
#Similarly removing last two characters in zip_code
col_nbr_zip_code = which(colnames(dataset)=="zip_code")
dataset[,col_nbr_zip_code] <- replace(column = dataset[,col_nbr_zip_code], replace_what = "xx", replace_with = "")
#Replacing applicaiotn type with individual or joint
col_nbr_app_type = which(colnames(dataset)=="application_type")
dataset[,col_nbr_app_type] <- replace(column = dataset[,col_nbr_app_type], replace_what = "INDIVIDUAL", replace_with = "1")
dataset[,col_nbr_app_type] <- replace(column = dataset[,col_nbr_app_type], replace_what = "JOINT", replace_with = "2")
dataset[,col_nbr_app_type] <- as.factor(as.character(dataset[,col_nbr_app_type] ))
#Replacing Verification status too with Not Verified =0, Source Verified
col_nbr_ver_status = which(colnames(dataset)=="verification_status")
dataset[,col_nbr_ver_status] <- replace(column = dataset[,col_nbr_ver_status], replace_what = "Not Verified", replace_with = "0")
dataset[,col_nbr_ver_status] <- replace(column = dataset[,col_nbr_ver_status], replace_what = "Source Verified", replace_with = "1")
dataset[,col_nbr_ver_status] <- replace(column = dataset[,col_nbr_ver_status], replace_what = "Verified", replace_with = "1")
dataset[,col_nbr_ver_status] <- as.factor(as.character(dataset[,col_nbr_ver_status] ))
#returning the dataset
return (dataset)
}
train_dataset$emp_length<-replace(column = train_dataset$term, replace_what = "+ ", replace_with = "10")
train_2<-dataclean(train_dataset)
View(train_2[1:10,])
unique(train_dataset$verification_status)
|
ec058b23cf13906b6d874b5fa358e672146571f9 | 31da9633913672a623a1635b9691a09e8dee52da | /R/github.R | 49d5ada4505f9dfbf7bb5ee813fac41073a5311a | [] | no_license | ms609/MorphoBank | eecdeaf3de338a8e62c369cdbfbb7948a843c88e | 6dd019a5be3d93b1e3301a5837f4f93966642db6 | refs/heads/master | 2023-04-28T10:01:49.483968 | 2023-04-17T10:01:03 | 2023-04-17T10:01:03 | 141,433,172 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,166 | r | github.R | #' GitHub Link
#'
#' Generates a link to a file on a specified GitHub repository.
#'
#' @param path Character string specifying path to file in specified GitHub repository
#' @param alt Character string specifying link text to display if
#' knitr output is in HTML format.
#' @param raw Logical specifying whether to link to the raw file at
#' raw.githubusercontent.com.
#' If `FALSE`, links to the file at github.com.
#' @template gitHubParams
#'
#' @return A link to a file hosted on GitHub, formatted appropriately for HTML
#' or Latex output
#' @export
#'
#' @author Martin R. Smith
#' @importFrom knitr is_html_output
#' @examples {
#' GitLink('ms609', 'MorphoBank')
#' options(GitHubUser="ms609", GitHubRepo="MorphoBank")
#' GitLink()
#' GitHubPages()
#' }
GitLink <- function (path='', alt=NULL, raw=TRUE, user=getOption('GitHubUser'),
repo=getOption('GitHubRepo')) {
if (is.null(user)) warning("Specify GitHub Username with options(GitHubUser='USERNAME')")
if (is.null(repo)) warning("Specify GitHub repository with options(GitHubRepo='REPONAME')")
rawGit <- paste0("https://raw.githubusercontent.com/", user, '/', repo, "/master/")
gitHubUrl <- paste0("https://github.com/", user, '/', repo, "/tree/master/")
paste0(" [",
if (!is_html_output() || is.null(alt)) {
paste0(gsub("https://", "", ifelse(raw, rawGit, gitHubUrl), fixed=TRUE), path)
} else alt,
"](", ifelse(raw, rawGit, gitHubUrl), path, ")")
}
#' @describeIn GitLink A link to `user`.github.io/`repo`/
#' @importFrom knitr is_html_output
#' @export
GitHubPages <- function (path='', alt=NULL, user=getOption('GitHubUser'), repo=getOption('GitHubRepo')) {
if (is.null(user)) warning("Specify GitHub Username with options(GitHubUser='USERNAME')")
if (is.null(repo)) warning("Specify GitHub repository with options(GitHubRepo='REPONAME')")
gitPageUrl <- paste0("https://", user, '.github.io/', repo, '/', path)
paste0(" [", ifelse (!is_html_output() || is.null(alt),
gsub("https://", "", gitPageUrl, fixed=TRUE), alt),
"](", gitPageUrl, ")")
}
|
3fdd30902d4f9b59824dd05463335d6c2b3d8fb4 | 1eb6f2998035f9884a86b54c61641157b8fa6d2f | /3-2_hashChain/mod.R | 0ad6f9e2844576f011f3f333a4631f111f0fa501 | [] | no_license | markculp0/dataStruct | 90de9f0ab70bf9f8d93253dd5ffa0ce9e093bef7 | 5c82fc8c70e3f10b384206f1eb9b420385ea1bef | refs/heads/master | 2020-05-28T08:26:25.606863 | 2019-07-05T15:20:49 | 2019-07-05T15:20:49 | 188,937,959 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 398 | r | mod.R | # =====================
# Hash computation in R
# =====================
# "Hell0", m=5 => 4
# "world", m=5 => 4
# "GooD", m=5 => 2
# "luck", m=5 => 2
options(scipen = 999)
m <- 5
word <- "GooD"
word <- sapply(strsplit(word, NULL)[[1L]], utf8ToInt)
i <- 0
for (d in word ) {
word[i + 1] <- word[i + 1] * 263^i
i <- i + 1
}
total <- sum(word)
hash <- total %% 1000000007
hash <- hash %% m
|
dd26c8ecd98f0392138c0c7ba0a2219379394510 | 00cfbc3c4a6f234116eaafa97c8efb3906ae28cd | /R/lhc_plotting.R | 614d938ded13ad7112cc651eb85663aa371f4d8a | [] | no_license | cran/spartan | ff805de8b6c09f0adf38191ee8d80c260ac576ae | 048ba049c3876bcda91b5ab3cb4777abb0a89b36 | refs/heads/master | 2020-04-15T02:06:48.834239 | 2018-11-19T17:20:03 | 2018-11-19T17:20:03 | 17,699,861 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 26,933 | r | lhc_plotting.R | #' Generates parameter/measure plot for each pairing in the analysis
#'
#' Produces a graph for each parameter, and each output measure, showing
#' the simulation output achieved when that parameter was assigned that value.
#' Eases identification of any non-linear effects.
#'
#' @inheritParams lhc_generateLHCSummary
#' @param MEASURE_SCALE Scale in which each of the output responses is
#' measured. Used to label plots
#' @param CORCOEFFSOUTPUTFILE File produced by spartan containing the Partial
#' Rank Correlation Coefficients for each parameter/measure pairing
#' @param OUTPUT_TYPE Type of graph to plot. Can be PDF, PNG, TIFF, BMP, etc,
#' all formats supported by ggplot2
#' @param GRAPHTIME The timepoint being processed, if any. NULL if not.
#' @param check_done For multiple timepoints, whether input has been checked
#' @param corcoeffs_output_object Correlation coefficients can be input as an
#' R object as well as CSV file. In this case, CORCOEFFSOUTPUTFILE will be NULL
#' @param lhc_summary_object If not specified in a CSV file, results can be specified in an
#' R object. In this case LHCSUMMARYFILENAME will be NULL
#'
#' @export
#'
lhc_graphMeasuresForParameterChange <-
function(FILEPATH, PARAMETERS, MEASURES, MEASURE_SCALE, CORCOEFFSOUTPUTFILE,
LHCSUMMARYFILENAME, OUTPUT_TYPE = c("PDF"), TIMEPOINTS = NULL,
TIMEPOINTSCALE = NULL, GRAPHTIME = NULL, check_done=FALSE,
corcoeffs_output_object=NULL, lhc_summary_object=NULL) {
input_check <- list("arguments"=as.list(match.call()),"names"=names(match.call())[-1])
# Run if all checks pass:
if(check_input_args(input_check$names, input_check$arguments)) {
if (is.null(TIMEPOINTS)) {
if(!is.null(CORCOEFFSOUTPUTFILE))
{
corcoeffs <- read_from_csv(file.path(FILEPATH, CORCOEFFSOUTPUTFILE))
lhcresult <- read_from_csv(file.path(FILEPATH, LHCSUMMARYFILENAME))
}
else if(!is.null(corcoeffs_output_object))
{
corcoeffs <- corcoeffs_output_object
lhcresult <- lhc_summary_object
}
message ("Generating output graphs for LHC Parameter Analysis")
# CREATE A GRAPH FOR EACH PARAMETER, FOR EACH MEASURE
for (p in 1:length(PARAMETERS)) {
for (m in 1:length(MEASURES)) {
# Get the PRCC value for this pairing
corr_result <- corcoeffs[
p, paste(MEASURES[m], "_Estimate", sep = "")]
# In some instances, the correlation coefficient has been reported as NA
# especially in cases where the output result is the same for all parameter
# values. This needs detecting and no graph plotted if that occurs
if(!is.na(corr_result))
{
# Make filename, titles, and labels
titles <- make_graph_title(FILEPATH, PARAMETERS[p], GRAPHTIME, MEASURES[m],
MEASURE_SCALE[m],corr_result, TIMEPOINTSCALE)
# Filter the data to plot
data_to_plot <- data.frame(lhcresult[, PARAMETERS[p]],
lhcresult[, MEASURES[m]])
# Create graphs
output_ggplot_graph(titles$file, OUTPUT_TYPE,
make_lhc_plot(data_to_plot, titles))
} else {
message(paste0("For Parameter ",PARAMETERS[p], " Measure ",MEASURES[m], " Pairing, Correlation Coefficient was reported as NA. Excluded from plotting."))
}
}
}
message("LHC Graphs Complete")
} else {
# Process each timepoint
lhc_graphMeasuresForParameterChange_overTime(
FILEPATH, PARAMETERS, MEASURES, MEASURE_SCALE, CORCOEFFSOUTPUTFILE,
LHCSUMMARYFILENAME, OUTPUT_TYPE, TIMEPOINTS,
TIMEPOINTSCALE, GRAPHTIME)
}
}
}
#' Generates parameter/measure plot for each pairing in the analysis, from results stored in a database
#'
#' Produces a graph for each parameter, and each output measure, showing
#' the simulation output achieved when that parameter was assigned that value.
#' Eases identification of any non-linear effects. This method uses simulation
#' results stored in a database by spartanDB
#'
#' @param db_results Results for a specified experiment mined from the database
#' @param corcoeffs Correlation coefficients calculated for those results,
#' held in the databae
#' @param parameters Parameters included in this analysis
#' @param measures Simulation output measures
#' @param MEASURE_SCALE Scale in which each of the output responses is
#' measured. Used to label plots
#' @param output_directory Folder where the graphs should be stored
#' @param OUTPUT_TYPE Type of graph to plot. Can be PDF, PNG, TIFF, BMP, etc,
#' all formats supported by ggplot2
#'
#' @export
#'
lhc_graphMeasuresForParameterChange_from_db <-
function(db_results, corcoeffs, parameters, measures, MEASURE_SCALE, output_directory,
OUTPUT_TYPE = c("PDF")) {
message ("Generating output graphs for LHC Parameter Analysis")
# CREATE A GRAPH FOR EACH PARAMETER, FOR EACH MEASURE
for (p in 1:length(parameters)) {
for (m in 1:length(measures)) {
# Get the PRCC value for this pairing
#corr_result <- subset(corcoeffs, corcoeffs$parameter==parameters[p] & corcoeffs$measure==measures[m],select=c(statistic_1))
corr_result <- corcoeffs[corcoeffs$parameter == parameters[p] & corcoeffs$measure == measures[m],]["statistic_1"]
# Make filename, titles, and labels
titles <- make_graph_title(output_directory, parameters[p], NULL, measures[m],
MEASURE_SCALE[m],as.numeric(corr_result), NULL)
# Filter the data to plot
data_to_plot <- data.frame(as.numeric(db_results[, parameters[p]]),
as.numeric(db_results[, measures[m]]))
# Create graphs
output_ggplot_graph(titles$file, OUTPUT_TYPE,
make_lhc_plot(data_to_plot, titles))
}
}
message("LHC Graphs Complete")
}
#' Wrapper for graphing LHC results for multiple timepoints
#' @inheritParams lhc_graphMeasuresForParameterChange
lhc_graphMeasuresForParameterChange_overTime <-
function(FILEPATH, PARAMETERS, MEASURES, MEASURE_SCALE, CORCOEFFSOUTPUTFILE,
LHCSUMMARYFILENAME, OUTPUT_TYPE = c("PDF"), TIMEPOINTS = NULL,
TIMEPOINTSCALE = NULL, GRAPHTIME = NULL) {
for (n in 1:length(TIMEPOINTS)) {
current_time <- TIMEPOINTS[n]
message(paste("Processing Timepoint: ", current_time, sep = ""))
corcoeffs_output_full <- append_time_to_argument(
CORCOEFFSOUTPUTFILE, current_time,
check_file_extension(CORCOEFFSOUTPUTFILE))
lhcsummary_full <- append_time_to_argument(
LHCSUMMARYFILENAME, current_time,
check_file_extension(LHCSUMMARYFILENAME))
lhc_graphMeasuresForParameterChange(
FILEPATH, PARAMETERS, MEASURES, MEASURE_SCALE, corcoeffs_output_full,
lhcsummary_full, TIMEPOINTS = NULL, TIMEPOINTSCALE = TIMEPOINTSCALE,
GRAPHTIME = current_time, check_done = TRUE)
}
}
#' Make graph title, sub title, and file name
#' @param filepath Directory to output graph to
#' @param parameter Current parameter being processed
#' @param graph_time Timepoint, if multiple timepoints
#' @param measure Current measure being processed
#' @param measure_scale Scale of the measure being processed
#' @param corr_stat The PRCC for this parameter-measure pair
#' @param timepointscale Scale of timepoints, if multiple
#' @return List containing file, title, and subtitle, and axes labels
make_graph_title <- function(filepath, parameter, graph_time, measure,
measure_scale, corr_stat, timepointscale) {
graph_title <- paste("LHC Analysis for Parameter: ",parameter, sep = "")
y_label <- paste("Median Value Across Runs (", measure_scale,
")", sep = "")
x_label <- "Parameter Value"
if (is.null(graph_time)) {
graph_file <- file.path(filepath,paste(parameter,measure,sep="_"))
sub_title <- paste("Measure: ",measure,"\nCorrelation Coefficient: ",
toString(signif(corr_stat, 3)), sep = "")
} else {
graph_file <- file.path(filepath,paste(parameter, measure, graph_time,
sep="_"))
sub_title <- paste(
"Measure: ",measure, ". Timepoint: ", graph_time, " ", timepointscale,
"\nCorrelation Coefficient: ", toString(signif(corr_stat, 3)), sep = "")
}
return(list("title"=graph_title,"file"=graph_file,"sub_title"=sub_title,
"xlabel"=x_label,"ylabel"=y_label))
}
#' Make the LHC output plot
#' @param data_to_plot Parameter and measure pair data
#' @param titles Object containing graph title and subtitle
#' @return Created graph object
make_lhc_plot <- function(data_to_plot, titles) {
output_graph <- ggplot(data_to_plot,
aes(x = data_to_plot[, 1],
y = data_to_plot[, 2])) +
geom_point(size = 0.5) +
scale_y_continuous(limits = c(
floor(min(as.numeric(data_to_plot[,2]))), ceiling(max(as.numeric(data_to_plot[, 2]))))) +
labs(x = titles$xlabel, y = titles$ylabel,
title = titles$title, subtitle = titles$sub_title) +
theme(axis.title = element_text(size = 7),
axis.text = element_text(size = 7),
plot.title = element_text(size = 9, hjust = 0.5),
plot.subtitle = element_text(size = 8, hjust = 0.5))
return(output_graph)
}
#' Deprecated. Use \code{lhc_graphMeasuresForParameterChange} instead
#'
#' @inheritParams lhc_graphMeasuresForParameterChange
lhc_netlogo_graphMeasuresForParameterChange <- function(FILEPATH, PARAMETERS,
MEASURES,
MEASURE_SCALE,
CORCOEFFSOUTPUTFILE,
LHCSUMMARYFILENAME,
TIMEPOINTS,
TIMEPOINTSCALE) {
message("Deprecated. Use the lhc_graphMeasuresForParameterChange method instead")
}
#' Plots the PRCC coefficients against each other for ease of comparison
#'
#' Plots the Partial Rank Correlation Coefficients for either all measures
#' or for one individual measure, for all simulation parameters.
#'
#' @param FILEPATH Location of the LHC result set
#' @param CORCOEFFSOUTPUTFILE Name of the CSV file in FILEPATH containing
#' the Partial Rank Correlation Coefficients
#' @param MEASURES Names of the simulation responses
#' @param PRINTOPT Used in plotting Partial Rank Correlation Coefficients,
#' should be either "ALL" or "INDIVIDUAL"
#' @param TIMEPOINTS Implemented so this method can be used when analysing
#' multiple simulation timepoints. If only analysing one timepoint, this
#' should be set to NULL. If not, this should be an array of timepoints,
#' e.g. c(12,36,48,60)
#' @param TIMEPOINTSCALE Implemented so this method can be used when
#' analysing multiple simulation timepoints. Sets the scale of the timepoints
#' being analysed, e.g. "Hours"
#'
#' @export
#'
#' @importFrom graphics barplot
lhc_plotCoEfficients <- function(FILEPATH, CORCOEFFSOUTPUTFILE, MEASURES,
PRINTOPT, TIMEPOINTS = NULL,
TIMEPOINTSCALE = NULL) {
if (is.null(TIMEPOINTS) || length(TIMEPOINTS) == 1) {
if (file.exists(FILEPATH)) {
# READ IN THE COEFFICIENTS FILE
COEFFS <- read_from_csv(file.path(FILEPATH,CORCOEFFSOUTPUTFILE))
# COLUMN 1 HAS PARAMETER NAMES, THEN FOLLOWS FOR EACH MEASURE -
# THE PRCC AND THE P VALUE
# WE'RE GOING TO GRAPH ALL THE PRCC'S ON ONE GRAPH
if (PRINTOPT == "INDIVIDUAL") {
# INDIVIDUAL PLOTS FOR EACH MEASURE
message("Producing Partial Rank Correlation Coefficient Plots for each measure")
for (i in 1:length(MEASURES)) {
if (is.null(TIMEPOINTS)) {
GRAPHFILE <- paste(FILEPATH, "/PRCC_Measure_", MEASURES[i], ".pdf",
sep = "")
GRAPHTITLE <- paste("PRCC Values for Measure: ", MEASURES[i],
sep = "")
} else {
GRAPHFILE <- paste(FILEPATH, "/PRCC_Measure_", MEASURES[i], "_",
TIMEPOINTS, ".pdf", sep = "")
GRAPHTITLE <- paste("PRCC Values for Measure: ", MEASURES[i],
"\nTimepoint: ", TIMEPOINTS, sep = "")
}
pdf(GRAPHFILE, width = 9, height = 5)
par(xpd = NA, mar = c(2, 4, 2, 17))
# Generate the heading of the CSV file - the measure plus _Estimate
M <- paste(MEASURES[i], "_Estimate", sep = "")
# We can now use this to get the column out the dataset
barplot(COEFFS[, M], ylim = c(-1, 1), col = "black",
main = GRAPHTITLE,
ylab = "Partial Rank Correlation Coefficient",
names.arg = seq(1, nrow(COEFFS), by = 1))
thelabels <- paste(1:nrow(COEFFS), " ", COEFFS[, 1], sep = "")
par(xpd = TRUE)
legend_size <- nrow(COEFFS) + 1.5
legend(legend_size, 1.0, legend = thelabels, pch = "",
cex = 0.6, ncol = 1)
par(xpd = FALSE)
dev.off()
}
} else if (PRINTOPT == "ALL") {
message("Producing Partial Rank Correlation Coefficient Summary Plot of All Measures")
# ALL PRCCS FOR ALL MEASURES, ON ONE PLOT
# Make the data frame for the plot
# FIRST OF ALL WE NEED TO REMOVE THE P VALUES SO WE CAN AUTOMATE THIS
if (is.null(TIMEPOINTS)) {
GRAPHFILE <- paste(FILEPATH, "/PRCC_AllMeasures.pdf", sep = "")
GRAPHTITLE <- "PRCC Values for All Measures"
} else {
GRAPHFILE <- paste(FILEPATH, "/PRCC_AllMeasures_", TIMEPOINTS,
".pdf", sep = "")
GRAPHTITLE <- paste("PRCC Values for All Measures\nTimepoint: ",
TIMEPOINTS, sep = "")
}
pdf(GRAPHFILE, width = 10, height = 5)
par(xpd = NA, mar = c(2, 4, 2, 9))
PRCCS <- NULL
for (p in seq(2, ncol(COEFFS), by = 2)) {
PRCCS <- cbind(PRCCS, COEFFS[, p])
}
# NOW MAKE THE DATA FRAME
d <- data.frame(row.names = levels(COEFFS[, 1]), PRCCS,
check.names = FALSE)
colnames(d) <- MEASURES
d <- do.call(rbind, d)
barplot(d, beside = TRUE, ylim = c(-1, 1.4),
legend.text = rownames(d),
args.legend = list(x = "topright", bty = "n"),
names.arg = seq(1, nrow(COEFFS), by = 1),
main = GRAPHTITLE,
ylab = "Partial Rank Correlation Coefficient")
thelabels <- paste(1:nrow(COEFFS), " ", COEFFS[, 1], sep = "")
par(xpd = TRUE)
legend_size <- nrow(COEFFS)*3
#legend(legend_size, 1.0, legend = thelabels, pch = "",
# cex = 0.7, ncol = 1)
legend("topright", inset=c(-0.2,0), 1.0, legend = thelabels, pch = "",
cex = 0.7, ncol = 1)
par(xpd = FALSE)
dev.off()
}
}
} else {
# PROCESS EACH TIMEPOINT, AMENDING THE FILENAMES, RECALLING THIS FUNCTION
for (n in 1:length(TIMEPOINTS)) {
current_time <- TIMEPOINTS[n]
message(paste("Processing Timepoint ", current_time, sep = ""))
CORCOEFFSOUTPUTFILE_FORMAT <- check_file_extension(CORCOEFFSOUTPUTFILE)
CORCOEFFSOUTPUTFILE_FULL <- paste(substr(CORCOEFFSOUTPUTFILE, 0,
nchar(CORCOEFFSOUTPUTFILE) - 4),
"_", current_time, ".",
CORCOEFFSOUTPUTFILE_FORMAT, sep = "")
lhc_plotCoEfficients(FILEPATH, CORCOEFFSOUTPUTFILE_FULL, MEASURES,
PRINTOPT, current_time, NULL)
}
}
}
#' Creates a polar plot for each response, showing PRCC for each parameter
#'
#' Added in Spartan 3.0. Provides a means of plotting the partial rank
#' correlation coefficients as a polar plot, to ease comparison of these values.
#'
#' @inheritParams lhc_graphMeasuresForParameterChange
#'
#' @export
#'
#' @import plotrix
lhc_polarplot <- function(FILEPATH, PARAMETERS, MEASURES, CORCOEFFSOUTPUTFILE,
TIMEPOINTS = NULL, TIMEPOINTSCALE = NULL) {
# Produce a polar plot that shows all the PRCCs for the PARAMETERS
# in a latin-hypercube analysis
if (is.null(TIMEPOINTS) || length(TIMEPOINTS) == 1) {
# Check the FILEPATH exists
if (file.exists(FILEPATH)) {
# Check the coefficient file exists
if (file.exists(paste(FILEPATH, "/", CORCOEFFSOUTPUTFILE, sep = ""))) {
# Read in the file
CORCOEFFS <- read.csv(paste(FILEPATH, "/", CORCOEFFSOUTPUTFILE,
sep = ""),
header = TRUE, check.names = FALSE,
row.names = 1)
# Plot set up:
# convert 360 degrees to radians
circle_in_radians <- 6.28319
# outputs:
output_forms <- c("png", "pdf")
# Now create a plot for all simulation MEASURES
for (m in 1:length(MEASURES)) {
# Need to exclude any parameters that are NA prior to plotting
na_corrs <- which(is.na(CORCOEFFS[,paste0(MEASURES[m],"_Estimate")]))
plot_parameters<-PARAMETERS
if(length(na_corrs)>0)
{
plot_parameters<-PARAMETERS[!(PARAMETERS %in% PARAMETERS[na_corrs])]
message(paste0("For Measure ",MEASURES[m],", Parameter(s) ",toString(PARAMETERS[na_corrs])," reported correlation coefficients of NA. Excluded from Plot. Check calculation"))
}
# Check there are still parameters left to plot after those removed!
if(length(plot_parameters)>0)
{
degree <- circle_in_radians / length(plot_parameters)
# Create the angles at which the PARAMETERS will be shown on the
# plot, as well as the colours (blue negative, red positive)
angle <- c()
colours <- c()
# Make the header for this measure
col_head <- paste(MEASURES[m], "_Estimate",
sep = "")
#for (i in 1:length(PARAMETERS)) {
for (i in 1:length(plot_parameters))
{
angle <- c(angle, degree * i)
# Now see if the correlation is positive or negative
#if (CORCOEFFS[PARAMETERS[i], col_head] < 0)
if (CORCOEFFS[plot_parameters[i], col_head] < 0)
{
colours <- c(colours, "blue")
} else {
colours <- c(colours, "red")
}
}
graph_name <- paste(FILEPATH, "/polarPlot_", MEASURES[m],sep="")
if(!is.null(TIMEPOINTS))
graph_name<-paste(graph_name,"_",TIMEPOINTS,sep="")
# Now plot the graph:
for (o in 1:length(output_forms)) {
if (output_forms[o] == "pdf")
pdf(paste(graph_name, ".pdf", sep = ""), width = 12)
if (output_forms[o] == "png")
png(filename = paste(graph_name,".png",sep = ""), width = 800)
# Sets the size of the labels on the outside of the polar plot
par(cex.axis = 1.5)
# readjust the parameter list to align with the correct angles
#PARAM_NAMES <- c(PARAMETERS[length(PARAMETERS)],
# PARAMETERS[1:length(PARAMETERS) - 1])
PARAM_NAMES <- c(plot_parameters[length(plot_parameters)],
plot_parameters[1:length(plot_parameters) - 1])
# Note we use absolute values as plot goes from 0 to 1, it is the
# colour which shows if it is positive or negative
radial.plot(abs(CORCOEFFS[plot_parameters, col_head]),
angle, rp.type = "r",
lwd = 4, line.col = colours,
labels = seq(1, length(plot_parameters), by = 1),
radial.lim = c(0, 1), #range of grid circle
main = paste("Partial Rank Correlation Coefficient
Values for ",
MEASURES[m], sep = ""),
show.grid.labels = 2,
#put the concentric circle labels going down
show.radial.grid = TRUE,
cex.lab = 0.7
)
legend("topleft", 1, c("Positive", "Negative"), lty = 1, lwd = 1:2,
col = c("red", "blue"), cex = 0.9, pt.cex = 1)
par(xpd = TRUE)
legend(1, 1, pch = as.character(c(1:length(plot_parameters))),
PARAM_NAMES, cex = 0.9, pt.cex = 1)
par(xpd = FALSE)
dev.off()
}
}
}
}
}
} else {
# PROCESS EACH TIMEPOINT, AMENDING FILENAMES AND RECALLING THIS FUNCTION
for (n in 1:length(TIMEPOINTS)) {
current_time <- TIMEPOINTS[n]
message(paste("Processing Timepoint: ", current_time, sep = ""))
CORCOEFFSOUTPUTFILE_FORMAT <- check_file_extension(CORCOEFFSOUTPUTFILE)
CORCOEFFSOUTPUTFILE_FULL <- paste(substr(CORCOEFFSOUTPUTFILE, 0,
nchar(CORCOEFFSOUTPUTFILE) - 4),
"_", current_time, ".",
CORCOEFFSOUTPUTFILE_FORMAT, sep = "")
lhc_polarplot(FILEPATH, PARAMETERS, MEASURES, CORCOEFFSOUTPUTFILE_FULL,
TIMEPOINTS = current_time,
TIMEPOINTSCALE = TIMEPOINTSCALE)
}
}
}
#' Plots Graphs for Partial Rank Correlation Coefficients Over Time
#'
#' Produces plots to show how the impact of a parameter changes over time,
#' measured by the change in PRCC
#'
#' @inheritParams lhc_graphMeasuresForParameterChange
#' @param CORCOEFFSFILENAME Name of the CSV file containining the correlation
#' coefficients
#' @param DISPLAYPVALS Boolean stating whether PRCC p-values should be printed
#' on the graph
#'
#' @export
#' @importFrom grDevices png
plotPRCCSFromTimepointFiles <- function(FILEPATH, PARAMETERS, MEASURES,
CORCOEFFSFILENAME, TIMEPOINTS,
TIMEPOINTSCALE, DISPLAYPVALS = FALSE) {
message("Plotting Graphs for Partial Rank Correlation Coefficients Over Time")
if (requireNamespace("plotrix", quietly = TRUE)) {
# One plot for each parameter
for (PARAM in 1:length(PARAMETERS)) {
# PRCCS for this parameter
FULLPARAMRESULTS <- NULL
# P-Values for this parameter
PARAMPVALS <- data.frame()
# Now to gather the data for each hour from the relevant result files
for (i in 1:length(TIMEPOINTS)) {
hour <- TIMEPOINTS[i]
# Add the timepoint to the correlation coefficient results file
CORCOEFFSOUTPUTFILE_FORMAT <- check_file_extension(CORCOEFFSFILENAME)
CORCOEFFSOUTPUTFILE_FULL <- paste(substr(CORCOEFFSFILENAME, 0,
nchar(CORCOEFFSFILENAME) - 4),
"_", hour, ".",
CORCOEFFSOUTPUTFILE_FORMAT, sep = "")
# Read in the coefficients
LHCResults <- read_from_csv(file.path(FILEPATH,CORCOEFFSOUTPUTFILE_FULL))
# Get the PRCCS
results <- c(hour, LHCResults[PARAM, 2], LHCResults[PARAM, 4])
# Get the P-Values
pvals.d <- data.frame(LHCResults[PARAM, 3], LHCResults[PARAM, 5])
# Append the PRCCS for this timepoint to those of all timepoints
FULLPARAMRESULTS <- rbind(FULLPARAMRESULTS, results)
# Append the P-Values for this timepoint to those of all timepoints
PARAMPVALS <- rbind(PARAMPVALS, pvals.d)
}
# Set the row and column names of the P-Values data frame
rownames(PARAMPVALS) <- TIMEPOINTS
colnames(PARAMPVALS) <- MEASURES
# Now to make the plot
GRAPHFILE <- file.path(FILEPATH, paste(PARAMETERS[PARAM], "_OverTime.pdf",sep=""))
pdf(GRAPHFILE, width = 7, height = 7)
# Title, with parameter name
GRAPHTITLE <- paste("Partial Rank Correlation Coefficients Over Simulation
Time\nParameter: ", PARAMETERS[PARAM], sep = "")
# Plot the first measure
plot(FULLPARAMRESULTS[, 1], FULLPARAMRESULTS[, 2], type = "o",
main = GRAPHTITLE, lty = 1, xlab = "", ylim = c(-1, 1),
ylab = "Partial Rank Correlation Coefficient",
xaxt = "n", yaxt = "n", bty = "n")
# Now add the rest
lines(FULLPARAMRESULTS[, 1], FULLPARAMRESULTS[, 3], type = "o",
lty = 5, pch = 2)
axis(2, at = seq(-1, 1, by = 0.25))
axis(1, pos = 0, at = seq(as.numeric(min(TIMEPOINTS)),
as.numeric(max(TIMEPOINTS)),
by = (as.numeric(max(TIMEPOINTS)) /
length(TIMEPOINTS))),
tck = 0.015, labels = FALSE)
# Add the axis at 0
abline(h = 0)
# Add the labels to the axis
for (h in 1:length(TIMEPOINTS)) {
text(as.numeric(TIMEPOINTS[h]), 0.08, TIMEPOINTS[h])
}
max_time <- max(TIMEPOINTS)
num_max_time <- as.numeric(max_time)
min_time <- min(TIMEPOINTS)
num_min_time <- as.numeric(min_time)
total_time <- num_max_time + num_min_time
# Add the X axis label
text(total_time / 2, 0.18, TIMEPOINTSCALE)
# P-Values Table, if the user wants this displayed
if (DISPLAYPVALS == TRUE) {
xaxis_loc <- (((as.numeric(max(TIMEPOINTS)) -
as.numeric(min(TIMEPOINTS))) / 100) * 71) +
as.numeric(min(TIMEPOINTS))
plotrix::addtable2plot(xaxis_loc, 0.7, signif(PARAMPVALS, digits = 3),
cex = 0.7,
display.rownames = TRUE,
title = "p-Values",
display.colnames = TRUE, bty = "o",
hlines = TRUE)
}
# Graph legend
legend("topleft", inset = .025, title = "Measures", MEASURES,
pch = 1:length(MEASURES))
# Output graph
dev.off()
}
message(paste("Complete. Check for output in the directory ", FILEPATH,
sep = ""))
}
}
|
924bdce9dac1543a77b4cfccc6f08f4a3346f551 | e1af74e6ad7ac2fde5ad3b2c44aac29c54b0120d | /knn_structbreak_toe2.R | 4ed2d1284229a537854ae8e566969bd9f5d7d200 | [] | no_license | ulabialonczyk/WTUM | e18604819af81f1823c873693c8d5f1eae8897ca | ce419a6ac4e958ddc82527bde3b28b4bc6b73362 | refs/heads/main | 2023-04-13T23:38:31.680799 | 2021-04-28T23:25:24 | 2021-04-28T23:25:24 | 360,322,343 | 0 | 0 | null | null | null | null | WINDOWS-1250 | R | false | false | 1,992 | r | knn_structbreak_toe2.R | source('/Users/Ula/Desktop/wtum/funkcje_pomocnicze2.R')
#----------------------------------- wczytanie danych: gunpoint -----------------------------------------------#
coffee_train <- read.table('/Users/Ula/Desktop/wtum/ToeSegmentation2_TRAIN.txt')
coffee_test <- read.table('/Users/Ula/Desktop/wtum/ToeSegmentation2_TEST.txt')
#----------------------------------- rysowanie szeregu i przełomów -----------------------------------#
coffee_train_val = GetData(coffee_train)
coffee_train_labels = as.factor(as.character(coffee_train[, 1]))
coffee_test_val = GetData(coffee_test)
coffee_test_labels = coffee_test[, 1]
coffe_labels_factor = as.factor(as.character(coffee_test[, 1]))
#------------------------------- ZWYKLE KNN ------------------------------------- #
knn_zwykle_coffee1 = knn(t(coffee_train_val), t(coffee_test_val), coffee_train_labels, k=1)
acc1z=sum(knn_zwykle_coffee1==coffe_labels_factor)/length(coffe_labels_factor)
# k=3
knn_zwykle_coffee3= knn(t(coffee_train_val), t(coffee_test_val), coffee_train_labels, k=3)
acc3z=sum(knn_zwykle_coffee3==coffe_labels_factor)/length(coffe_labels_factor)
# k=5
knn_zwykle_coffee5= knn(t(coffee_train_val), t(coffee_test_val), coffee_train_labels, k=5)
acc5z=sum(knn_zwykle_coffee5==coffe_labels_factor)/length(coffe_labels_factor)
# k=7
knn_zwykle_coffee7= knn(t(coffee_train_val), t(coffee_test_val), coffee_train_labels, k=7)
acc7z=sum(knn_zwykle_coffee7==coffe_labels_factor)/length(coffe_labels_factor)
# k=9
knn_zwykle_coffee9= knn(t(coffee_train_val), t(coffee_test_val), coffee_train_labels, k=9)
acc9z=sum(knn_zwykle_coffee9==coffe_labels_factor)/length(coffe_labels_factor)
# komitet 3 najlepsze
komitetz = cbind(knn_zwykle_coffee3, knn_zwykle_coffee5, knn_zwykle_coffee1)
predykcje_komitet_z = getLabels(komitetz) - 1
acckomz = sum(predykcje_komitet_z==coffe_labels_factor)/length(coffe_labels_factor)
table(predykcje_komitet_z, coffe_labels_factor)
|
f5912f38ef15aa1c3d1dc583133b837b469ba342 | 436ace74a695893aad73229b723fac6be6814129 | /R/weightTSA.R | 089ef2f117d78492a4afbcedfeb408ef974ffa82 | [] | no_license | cran/sensitivity | 18657169c915857dcde8af872e0048fef77107f4 | 2b2cbcb7f1bebecfd05e589e459fdf4334df3af1 | refs/heads/master | 2023-04-06T05:36:54.290801 | 2023-03-19T18:10:02 | 2023-03-19T18:10:02 | 17,699,584 | 17 | 17 | null | 2021-04-07T00:57:30 | 2014-03-13T06:16:44 | R | UTF-8 | R | false | false | 1,243 | r | weightTSA.R | # Author : Bertrand Iooss (2020)
weightTSA <- function(Y, c, upper = TRUE, type="indicTh", param=1) {
# Y = the output vector
# c = the threshold
# upper = TRUE for upper threshold and FALSE for lower threshold
# type = the weight function type ("indicTh", "zeroTh", logistic", "exp1side")
# indicTh: indicator-thresholding, zeroTh: zero-thresholding,
# logistic: logistic transf. at the threshold,
# exp1side: exponential transf. below the threshold
# param = the parameter value for "logistic" and "exp1side" types
if(is.data.frame(Y) == TRUE){
Y <- as.matrix(Y)
}
if (upper){
if (type == "indicTh") wY <- as.numeric(Y>c)
if (type == "zeroTh") wY <- Y * (Y>c)
if (type == "logistic") wY <- 1 / (1 + exp(-param * (Y-c) / abs(c)) ) # Spagnol & Da Veiga
if (type == "exp1side") wY <- exp( - (c-Y)*((c-Y)>0) / (param * sd(Y)/5) ) # Raguet & Marrel
} else{
if (type == "indicTh") wY <- as.numeric(Y<c)
if (type == "zeroTh") wY <- Y * (Y<c)
if (type == "logistic") wY <- 1 / (1 + exp(-param * (c-Y) / abs(c)) ) # Spagnol & Da Veiga
if (type == "exp1side") wY <- exp( - (Y-c)*((Y-c)>0) / (param * sd(Y)/5) ) # Raguet & Marrel
}
return(as.vector(wY))
}
|
7384808aacf8eb937b69577b996e0d8ec53ace08 | 476aacd596bbf0fcc847521f0b8b25e11aa46d09 | /auth_creds.R | 1fd0729d9932a18cd68360700c4abcfa7d723ae6 | [] | no_license | laurakwiley/AMIA_Twitter | df7f759061b3a5ae09585d0213fe178963682e53 | f1d79773807aaa557c4744d7dab1ab00751e9f7c | refs/heads/master | 2021-01-10T19:38:48.829612 | 2015-02-27T19:18:26 | 2015-02-27T19:18:28 | 18,364,872 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 237 | r | auth_creds.R | api_key<-"z05yaDCux8wiaGFltYLHOoHLI"
api_secret<-"wD0SXaVAmMb3zprXs5HEcwdKt4dYIT8dGwBw3tKsJtSItAVIQb"
access_token<-"388595556-6cesMCw3nsnge9LfIoRLHeEJx5cNN2SdiYABMjpy"
access_token_secret<-"TjNjvSQuVRU9xTVtocj7dzNwyuTXQr1AWiL3tqNLN2F1l" |
678b8b21a788878963deacb91384c0b5ba884878 | d0108c3f8d99cf84d0227c57b10dbd8236585192 | /Fig.S3A.S3B.diffbind.best.reps.R | a4e474dc07b61a7f245ba68004cf45d16ce70415 | [] | no_license | Webb-Laboratory/Maybury-Lewis_et_al_2021 | f2ef2f9e4571427571bfbf83f5242cc88254b51a | 77f353d6c5ae91fa0cb260abe0d674fbed946bc4 | refs/heads/main | 2023-06-05T10:24:03.584835 | 2021-06-30T19:55:13 | 2021-06-30T19:55:13 | 346,836,008 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,859 | r | Fig.S3A.S3B.diffbind.best.reps.R | # ==========================================================
# Postnatal ATAC-seq analysis
# Adding only reactivated samples SML2 and SML3
# Based on highest FRiP scores (diffbind.react.reps.R)
# Repeating analysis after downsampling Q, A, R libraries
# ~ 40,000,000 reads and peaks called
# DiffBind
# September 2019
# R version 3.6.1
# DiffBind version 2.12.0
# ==========================================================
setwd("~/Dropbox/Brown_Webb/Desktop/ATAC-seq-PEAKS/DiffBind_downsample")
library(DiffBind)
library(rtracklayer)
# library(rgl)
# ========================================================
# Comparing across quiescent, activated, reactivated
# ========================================================
# Load data
# all <- dba(sampleSheet="best.reps.analysis.csv")
# all
# 6 Samples, 74475 sites in matrix (106677 total):
# ID Tissue Factor Condition Treatment Replicate Caller Intervals
# 1 rNSC_rep2 Postnatal Chromatin Reactivated Full-Media 4 narrow 106586
# 2 rNSC_rep3 Postnatal Chromatin Reactivated Full-Media 4 narrow 107137
# 3 aNSC_rep2 Postnatal Chromatin Activated Full-Media 2 narrow 51918
# 4 aNSC_rep3 Postnatal Chromatin Activated Full-Media 3 narrow 41480
# 5 qNSC_rep1 Postnatal Chromatin Quiescent BMP4-Media 1 narrow 45874
# 6 qNSC_rep2 Postnatal Chromatin Quiescent BMP4-Media 2 narrow 37113
# =====================================
# Save peaks as DBA object
# savefile <- dba.save(all, 'bestPeaks')
# =====================================
# Count reads
# all <- dba.count(all, minOverlap=2)
# all
# 6 Samples, 74475 sites in matrix:
# ID Tissue Factor Condition Treatment Replicate Caller Intervals FRiP
# 1 rNSC_rep2 Postnatal Chromatin Reactivated Full-Media 4 counts 74475 0.17
# 2 rNSC_rep3 Postnatal Chromatin Reactivated Full-Media 4 counts 74475 0.16
# 3 aNSC_rep2 Postnatal Chromatin Activated Full-Media 2 counts 74475 0.09
# 4 aNSC_rep3 Postnatal Chromatin Activated Full-Media 3 counts 74475 0.07
# 5 qNSC_rep1 Postnatal Chromatin Quiescent BMP4-Media 1 counts 74475 0.07
# 6 qNSC_rep2 Postnatal Chromatin Quiescent BMP4-Media 2 counts 74475 0.07
# ========================================
# Save reads as DBA object
# savefile <- dba.save(all, 'bestReads')
# ========================================
# Correlation heatmap based on read count data
# plot(all)
# Get pearson's correlation across conditions
# values <- dba.plotHeatmap(all) ## Fig S3A
# values
# qNSC_rep1 qNSC_rep2 aNSC_rep3 aNSC_rep2 rNSC_rep2 rNSC_rep3
# qNSC_rep1 1.00 0.91 0.74 0.80 0.77 0.80
# qNSC_rep2 0.91 1.00 0.74 0.80 0.77 0.80
# aNSC_rep3 0.74 0.74 1.00 0.87 0.84 0.83
# aNSC_rep2 0.80 0.80 0.87 1.00 0.88 0.88
# rNSC_rep2 0.77 0.77 0.84 0.88 1.00 0.95
# rNSC_rep3 0.80 0.80 0.83 0.88 0.95 1.00
# ========================================================================================
# Load data
all <- dba.load('bestReads')
# Establish contrast across conditions
all <- dba.contrast(all, categories=DBA_CONDITION, minMembers=2)
all
# 3 Contrasts:
# Group1 Members1 Group2 Members2
# 1 Reactivated 2 Activated 2
# 2 Reactivated 2 Quiescent 2
# 3 Activated 2 Quiescent 2
# Perform differential analysis
all <- dba.analyze(all)
# Generate plots
dba.plotPCA(all, label=DBA_ID) ## Fig S3B
# dba.plotPCA(all, label=DBA_ID, b3D=T)
# dba.plotPCA(all, label=DBA_ID, components=2,3)
# dba.plotPCA(all, label=DBA_ID, bLog=F, b3D=T)
dba.plotVolcano(all, contrast=3)
dba.plotVolcano(all, contrast=1)
dba.plotVolcano(all, contrast=2)
# ========================================================================================
# Retrieve differentially accessible and stable sites
############
# A vs Q
############
# Differential
all <- dba.load('bestReads')
all <- dba.contrast(all, group1=all$masks$Activated, group2=all$masks$Quiescent, name1="Activated", name2="Quiescent")
all <- dba.analyze(all)
all <- dba.report(all)
# write.csv(all, "AvsQ.DB.analysis.best.csv", row.names=FALSE, quote=FALSE)
# write.table(all, "AvsQ.DB.analysis.best.txt", sep="\t", row.names=FALSE, quote=FALSE)
# Stable
all <- dba.load('bestReads')
all <- dba.contrast(all, group1=all$masks$Activated, group2=all$masks$Quiescent, name1="Activated", name2="Quiescent")
all <- dba.analyze(all)
df <- dba.overlap(all, all$masks$Activated | all$masks$Quiescent)
df$inAll
export(df$inAll, "AvsQ.shared.bed")
############
# R vs A
############
# Differential
all <- dba.load('bestReads')
all <- dba.contrast(all, group1=all$masks$Reactivated, group2=all$masks$Activated, name1="Reactivated", name2="Activated")
all <- dba.analyze(all)
all <- dba.report(all)
# write.csv(all, "RvsA.DB.analysis.best.csv", row.names=FALSE, quote=FALSE)
# write.table(all, "RvsA.DB.analysis.best.txt", sep="\t", row.names=FALSE, quote=FALSE)
# Stable
all <- dba.load('bestReads')
all <- dba.contrast(all, group1=all$masks$Reactivated, group2=all$masks$Activated, name1="Reactivated", name2="Activated")
all <- dba.analyze(all)
df <- dba.overlap(all, all$masks$Reactivated | all$masks$Activated)
df$inAll
export(df$inAll, "RvsA.shared.bed")
############
# R vs Q
############
# Differential
all <- dba.load('bestReads')
all <- dba.contrast(all, group1=all$masks$Reactivated, group2=all$masks$Quiescent, name1="Reactivated", name2="Quiescent")
all <- dba.analyze(all)
all <- dba.report(all)
# write.csv(all, "RvsQ.DB.analysis.best.csv", row.names=FALSE, quote=FALSE)
# write.table(all, "RvsQ.DB.analysis.best.txt", sep="\t", row.names=FALSE)
# Stable
all <- dba.load('bestReads')
all <- dba.contrast(all, group1=all$masks$Reactivated, group2=all$masks$Quiescent, name1="Reactivated", name2="Quiescent")
all <- dba.analyze(all)
df <- dba.overlap(all, all$masks$Reactivated | all$masks$Quiescent)
df$inAll
export(df$inAll, "RvsQ.shared.bed")
# ========================================================================================
# Format output files for further analysis
setwd("~/Dropbox/Desktop/ATAC-seq-PEAKS/DiffBind_downsample/results/best_react_reps")
###########################################################
# in bash
# sort -k1,1 -k2,2n AvsQ.shared.bed > AvsQ.shared.sorted.bed
# cut -f1-3 AvsQ.shared.sorted.bed > AvsQ.nondiff.bed
# sort -k1,1 -k2,2n RvsA.shared.bed > RvsA.shared.sorted.bed
# cut -f1-3 RvsA.shared.sorted.bed > RvsA.nondiff.bed
# sort -k1,1 -k2,2n RvsQ.shared.bed > RvsQ.shared.sorted.bed
# cut -f1-3 RvsQ.shared.sorted.bed > RvsQ.nondiff.bed
###########################################################
AQ <- read.table(file="AvsQ.nondiff.bed", sep="\t", header=FALSE, stringsAsFactors=FALSE)
AQ_stable <- cbind(AQ, 1:nrow(AQ))
colnames(AQ_stable) <- c("chr", "start", "end", "stable_site")
AQ_stable$stable_site <- sub("^", "site", AQ_stable$stable_site)
write.table(AQ_stable, file="AvsQ.stable.analysis.bed", sep="\t", quote=FALSE, row.names=FALSE, col.names=FALSE)
RA <- read.table(file="RvsA.nondiff.bed", sep="\t", header=FALSE, stringsAsFactors=FALSE)
RA_stable <- cbind(RA, 1:nrow(RA))
colnames(RA_stable) <- c("chr", "start", "end", "stable_site")
RA_stable$stable_site <- sub("^", "site", RA_stable$stable_site)
write.table(RA_stable, file="RvsA.stable.analysis.bed", sep="\t", quote=FALSE, row.names=FALSE, col.names=FALSE)
RQ <- read.table(file="RvsQ.nondiff.bed", sep="\t", header=FALSE, stringsAsFactors=FALSE)
RQ_stable <- cbind(RQ, 1:nrow(RQ))
colnames(RQ_stable) <- c("chr", "start", "end", "stable_site")
RQ_stable$stable_site <- sub("^", "site", RQ_stable$stable_site)
write.table(RQ_stable, file="RvsQ.stable.analysis.bed", sep="\t", quote=FALSE, row.names=FALSE, col.names=FALSE)
# ========================================================================================
# Get consensus peaks for each condition
setwd("~/Dropbox/Desktop/ATAC-seq-PEAKS/DiffBind_downsample/results/best_react_reps")
# Load data
peaks <- dba.load('bestPeaks')
# Get consensus peak sets
Qpeaks <- dba(peaks, mask=peaks$masks$Quiescent, minOverlap=2) #28559
Qsites <- dba.peakset(Qpeaks, bRetrieve=TRUE)
export(Qsites, "Quies.consPeaks.bed")
Apeaks <- dba(peaks, mask=peaks$masks$Activated, minOverlap=2) #32729
Asites <- dba.peakset(Apeaks, bRetrieve=TRUE)
export(Asites, "Act.consPeaks.bed")
Rpeaks <- dba(peaks, mask=peaks$masks$Reactivated, minOverlap=2) #69957
Rsites <- dba.peakset(Rpeaks, bRetrieve=TRUE)
export(Rsites, "React.consPeaks.bed")
# ========================================================================================
# Add consensus peak name for each condition
setwd("~/Dropbox/Desktop/ATAC-seq-PEAKS/DiffBind_downsample/results/best_react_reps")
# Quiescent
df <- read.table("Quies.consPeaks.bed", sep="\t")
dfnew <- cbind(df, 1:nrow(df))
colnames(dfnew) <- c("chr", "start", "end", "site")
dfnew$site <- sub("^", "Quies_site", dfnew$site)
write.table(dfnew, file="Quies.consPeaks.bed", sep="\t", quote=F, row.names=F, col.names=F)
# Activated
df <- read.table("Act.consPeaks.bed", sep="\t")
dfnew <- cbind(df, 1:nrow(df))
colnames(dfnew) <- c("chr", "start", "end", "site")
dfnew$site <- sub("^", "Act_site", dfnew$site)
write.table(dfnew, file="Act.consPeaks.bed", sep="\t", quote=F, row.names=F, col.names=F)
# Reactivated
df <- read.table("React.consPeaks.bed", sep="\t")
dfnew <- cbind(df, 1:nrow(df))
colnames(dfnew) <- c("chr", "start", "end", "site")
dfnew$site <- sub("^", "React_site", dfnew$site)
write.table(dfnew, file="React.consPeaks.bed", sep="\t", quote=F, row.names=F, col.names=F) |
4c3b0dde5feab8357ae6c86a3567d52927bf781a | c118908b1c8bad0914e38e43f1148b58364accc2 | /man/Bchronology.Rd | e52bc9664e6f5d7afee691feaae0da1ad3d21ea3 | [] | no_license | andrewcparnell/Bchron | baf98d6642a328ba3c83e8fcf2e04b6c0af86974 | faa14f54444e7ec417e0e389596014a1c7645349 | refs/heads/master | 2023-06-27T02:01:46.417288 | 2023-06-08T11:17:34 | 2023-06-08T11:17:34 | 40,361,984 | 30 | 12 | null | 2022-04-05T20:46:28 | 2015-08-07T13:33:16 | R | UTF-8 | R | false | true | 8,104 | rd | Bchronology.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Bchronology.R
\name{Bchronology}
\alias{Bchronology}
\title{Runs the Compound Poisson-Gamma chronology model of Haslett and Parnell (2008)}
\usage{
Bchronology(
ages,
ageSds,
positions,
positionThicknesses = rep(0, length(ages)),
calCurves = rep("intcal20", length(ages)),
ids = NULL,
outlierProbs = rep(0.01, length(ages)),
predictPositions = seq(min(positions), max(positions), length = 100),
pathToCalCurves = system.file("data", package = "Bchron"),
artificialThickness = 0.01,
allowOutside = FALSE,
iterations = 10000,
burn = 2000,
thin = 8,
extractDate = 1950 - as.numeric(format(Sys.time(), "\%Y")),
maxExtrap = 1000,
thetaStart = NULL,
thetaMhSd = 0.5,
muMhSd = 0.1,
psiMhSd = 0.1,
ageScaleVal = 1000,
positionEps = 1e-05,
positionNormalise = TRUE
)
}
\arguments{
\item{ages}{A vector of ages provided in years before 1950.}
\item{ageSds}{A vector of 1-sigma values for the ages given above}
\item{positions}{Position values (e.g. depths) for each age. In the case of layers of non-zero thickness, this should be the middle value of the slice}
\item{positionThicknesses}{Thickness values for each of the positions. The thickness value should be the full thickness value of the slice. By default set to zero.}
\item{calCurves}{A vector of values containing either \code{intcal20}, \code{shcal20}, \code{marine20}, or \code{normal} (older calibration curves are supposed such as intcal13). Should be the same length the number of ages supplied. Non-standard calibration curves can be used provided they are supplied in the same format as those previously mentioned and are placed in the same directory. Normal indicates a normally-distributed (non-14C) age.}
\item{ids}{ID names for each age}
\item{outlierProbs}{A vector of prior outlier probabilities, one for each age. Defaults to 0.01}
\item{predictPositions}{A vector of positions (e.g. depths) at which predicted age values are required. Defaults to a sequence of length 100 from the top position to the bottom position}
\item{pathToCalCurves}{File path to where the calibration curves are located. Defaults to the system directory where the 3 standard calibration curves are stored.}
\item{artificialThickness}{Amount to add to the thickness values in the case of equal positions with no \code{positionThicknesses}. Bchron may fail if \code{positionThicknesses} are zero and some positions are repeated. This value is added on to the zero thicknesses (only in the case of repeated positions) to stop this failure.}
\item{allowOutside}{Whether to allow calibrations to run outside the range of the calibration curve. By default this is turned off as calibrations outside of the range of the calibration curve can cause severe issues with probability ranges of calibrated dates}
\item{iterations}{The number of iterations to run the procedure for}
\item{burn}{The number of starting iterations to discard}
\item{thin}{The step size for every iteration to keep beyond the burn-in}
\item{extractDate}{The top age of the core. Used for extrapolation purposes so that no extrapolated ages go beyond the top age of the core. Defaults to the current year}
\item{maxExtrap}{The maximum number of extrapolations to perform before giving up and setting the predicted ages to NA. Useful for when large amounts of extrapolation are required, i.e. some of the \code{predictPositions} are a long way from the dated positions}
\item{thetaStart}{A set of starting values for the calendar ages estimated by Bchron. If NULL uses a function to estimate the ages. These should be in the same units as the posterior ages required. See example below for usage.}
\item{thetaMhSd}{The Metropolis-Hastings standard deviation for the age parameters}
\item{muMhSd}{The Metropolis-Hastings standard deviation for the Compound Poisson-Gamma mean}
\item{psiMhSd}{The Metropolis-Hastings standard deviation for the Compound Poisson-Gamma scale}
\item{ageScaleVal}{A scale value for the ages. \code{Bchronology} works best when the ages are scaled to be approximately between 0 and 100. The default value is thus 1000 for ages given in years.}
\item{positionEps}{A small value used to check whether simulated positions are far enough apart to avoid numerical underflow errors. If errors occur in model runs (e.g. \code{missing value where TRUE/FALSE needed} increase this value)}
\item{positionNormalise}{Whether to normalise the position values. \code{Bchronology} works best when the positions are normalised to be between 0 and 1 The default value is \code{TRUE}}
}
\value{
A list of class \code{BchronologyRun} which include elements:
\item{theta}{The posterior estimated values of the ages}
\item{phi}{The posterior estimated outlier values (1=outlier, 2=not outlier). The means of this parameter give the posterior estimated outlier probabilities}
\item{mu}{The posterior values of the Compound Poisson-Gamma mean}
\item{psi}{The posterior values of the Compound Poisson-Gamma scale}
\item{thetaPredict}{The posterior estimated ages for each of the values in predictPosition}
\item{predictPositions}{The positions at which estimated ages were required}
\item{calAges}{The calibrated ages as output from \code{\link{BchronCalibrate}}}
\item{inputVals}{All of the input values to the \code{Bchronology} run}
}
\description{
Fits a non-parametric chronology model to age/position data according to the Compound Poisson-Gamma model defined by Haslett and Parnell (2008) <DOI:10.1111/j.1467-9876.2008.00623.x>. This version uses a slightly modified Markov chain Monte Carlo fitting algorithm which aims to converge quicker and requires fewer iterations. It also a slightly modified procedure for identifying outliers
}
\details{
The \code{Bchronology} function fits a compound Poisson-Gamma distribution to the increments between the dated levels. This involves a stochastic linear interpolation step where the age gaps are Gamma distributed, and the position gaps are Exponential. Radiocarbon and non-radiocarbon dates (including outliers) are updated within the function also by MCMC.
}
\examples{
\donttest{
# Data from Glendalough
data(Glendalough)
# Run in Bchronology - all but first age uses intcal20
GlenOut <- with(
Glendalough,
Bchronology(
ages = ages,
ageSds = ageSds,
calCurves = calCurves,
positions = position,
positionThicknesses = thickness,
ids = id,
predictPositions = seq(0, 1500, by = 10)
)
)
# Summarise it a few different ways
summary(GlenOut) # Default is for quantiles of ages at predictPosition values
summary(GlenOut, type = "convergence") # Check model convergence
summary(GlenOut, type = "outliers") # Look at outlier probabilities
# Predict for some new positions
predictAges <- predict(GlenOut,
newPositions = c(150, 725, 1500),
newPositionThicknesses = c(5, 0, 20)
)
# Plot the output
plot(GlenOut) +
ggplot2::labs(
title = "Glendalough",
xlab = "Age (cal years BP)",
ylab = "Depth (cm)"
)
# If you need to specify your own starting values
startingAges <- c(0, 2000, 10000, 11000, 13000, 13500)
GlenOut <- with(
Glendalough,
Bchronology(
ages = ages,
ageSds = ageSds,
calCurves = calCurves,
positions = position,
positionThicknesses = thickness,
ids = id,
predictPositions = seq(0, 1500, by = 10),
thetaStart = startingAges
)
)
}
}
\references{
Haslett, J., and Parnell, A. C. (2008). A simple monotone process with application to radiocarbon-dated depth chronologies. Journal of the Royal Statistical Society, Series C, 57, 399-418. DOI:10.1111/j.1467-9876.2008.00623.x
Parnell, A. C., Haslett, J., Allen, J. R. M., Buck, C. E., and Huntley, B. (2008). A flexible approach to assessing synchroneity of past events using Bayesian reconstructions of sedimentation history. Quaternary Science Reviews, 27(19-20), 1872-1885. DOI:10.1016/j.quascirev.2008.07.009
}
\seealso{
\code{\link{BchronCalibrate}}, \code{\link{BchronRSL}}, \code{\link{BchronDensity}}, \code{\link{BchronDensityFast}}
}
|
70785d17a8339d8b7ce83d06960f103bcbc40d2e | 1cd1e1ff5714feb29ccb325fedf5cfdeab6011fe | /final car price prediction model.R | a4f62f7b2c3d5c83afbe8d2add165cdaf369e8ab | [] | no_license | nishthajalan/car-price-prediction-model | 8f3e723b772a1844939e90a7b186f15874a78527 | 58d3f9d87e24dd89fafeba315633d09828dcd9d8 | refs/heads/master | 2022-12-10T15:37:21.061490 | 2020-08-25T11:02:44 | 2020-08-25T11:02:44 | 290,187,378 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,982 | r | final car price prediction model.R |
library(readxl)
data=read_excel("C:/Users/lokesh/Desktop/iaqs/iaqs R/carpriceprediction.xlsx")
head(data)
summary(data)
str(data)
help("separate")
#data cleaning and preparation
#fixing invalid vaues in carName
data$CarName=as.character(data$CarName)
#splittng Carname
library(dplyr)
library(tidyr)
head(unique(data$CarName))
car=separate(data,CarName,into=c("CompanyName","carname"),sep=" ")
head(car)
unique(car$CompanyName)
#correcting spelling errors in CompanyName
car$CompanyName[car$CompanyName=="mazda"]="maxda"
car$CompanyName[car$CompanyName=="nissan"]="Nissan"
car$CompanyName[car$CompanyName=="porcshce"]="porsche"
car$CompanyName[car$CompanyName=="toyouta"]="toyota"
car$CompanyName[car$CompanyName=="vw"]="volkswagen"
car$CompanyName[car$CompanyName=="vokswagen"]="volkswagen"
car$CompanyName=as.factor(car$CompanyName)
help("filter")
#checking for corrected values
filter(car,CompanyName=="mazda"& CompanyName =="maxda")
filter(car,car$CompanyName=="vokswagen")
summary(car$carname)
car$carname
#checking for duplicates
car$car_ID[duplicated(car$car_ID)]
#checking missing values
any(is.na(car))
any(is.na(car$carname))
is.na(car$carname) #only two missing values
#removing na
car$carname[is.na(car$carname)]=0
any(is.na(car))
#Data visualization
par(mfrow=c(1,2))
plot(density(car$price,from=0),main="Car Price Distribution plot")
boxplot(car$price, main="Car Price spread")
summary(car$price)
quantile(car$price)
library(ggplot2)
car$CompanyName=as.factor(car$CompanyName)
class(car$CompanyName)
pl=ggplot(car,aes(x=CompanyName))
pl+geom_bar()
pl1=ggplot(car,aes(x=fueltype))
pl1+geom_bar(fill="blue")
pl2=ggplot(car,aes(x=carbody))
pl2+geom_bar(aes(fill=CompanyName))
car$CompanyName=as.character(car$CompanyName)
#enginetype versus price
pl4=ggplot(car,aes(enginetype,price))
pl4+geom_boxplot(fill="brown")
par(mfrow=c(1,1))
#CompanyName versus average price
df=car%>%group_by(CompanyName)%>%summarise(mean=mean(price)) #using pipe operator
barplot(df$mean,names.arg = df$CompanyName)
pl5=ggplot(car,aes(doornumber,price))
pl5+geom_boxplot()
#Similarly boxplot on aspiration versus price
help("countplot")
# visualizing numerical data
# checking correlation between numerical variables
plot(car$carlength,car$price)
plot(car$carwidth,car$price, col="red")
par(mfrow=c(1,2))
plot(car$carheight,car$price)
plot(car$curbweight,car$price)
#feature engineering
car$fueleconomy=(0.55*car$citympg)+(0.45*car$highwaympg)
#bivariate analysis
pl6=ggplot(car,aes(x=fueleconomy,y=price))
pl6+geom_point(aes(color=factor(drivewheel)))
library(corrgram)
library(corrplot)
#Understanding the correlation between numeric fields using visualization.
num.cols=sapply(car,is.numeric)
cor.data=cor(car[,num.cols])
round(cor.data,4)
corrplot(cor.data,method="color")
#we can notice the corelation of variables with price
#another function that visualizes the correlation between numeric
#fields with additional features
install.packages("corrgram")
library(corrgram)
corrgram(car,order=TRUE,lower.panel = panel.shade,upper.panel = panel.pie,
text.panel = panel.txt)
#Model Building
library(caTools)
fields=c('wheelbase', 'curbweight', 'enginesize', 'boreratio',
'horsepower','fueleconomy','carlength','carwidth','price','enginetype',
'fueltype','carbody','aspiration','cylindernumber','drivewheel' )
use=car[,fields]
head(use)
num.cols=sapply(use,is.numeric)
cor.data=cor(use[,num.cols])
round(cor.data,4)
corrplot(cor.data,method="color")
library(caTools)
set.seed(101)
sample=sample.split(use$price,SplitRatio = 0.7)
train=subset(use,sample=TRUE)
test=subset(use,sample=FALSE)
model=lm(price~.,data=train)
summary(model)
res=residuals(model)
res=as.data.frame(res)
ggplot(res,aes(res))+geom_histogram(fill="blue",alpha=0.5)
price.predictions=predict(model,test)#this predicts the value
results=cbind(price.predictions,test$price)
colnames(results)=c('Predicted','Actual')
results=as.data.frame(results)
plot(results$Actual,results$Predicted)
head(results,10)
tail(results)
#errors can also be calculated as follows
residuals=results$Actual-results$Predicted
hist(residuals)
#tests for assumptions
#Durbin-Watson test
install.packages("car",dependencies = TRUE)
library(car)
durbinWatsonTest(model)
#test for heteroskedasticity
#Breusch pagan test
install.packages("lmtest")
library(lmtest)
bptest(model)
#test for normality of residuals
#shapiro-wilk test
shapiro.test(model$residuals)
#model 2 using AIC and step()
help("step")
step.model=step(model,scale=0,direction="backward",trace=FALSE)
summary(step.model)
res1=residuals(step.model)
res1=as.data.frame(res1)
ggplot(res1,aes(res1))+geom_histogram(fill="blue",alpha=0.5)
#tests for assumptions
#Durbin-Watson test
install.packages("car",dependencies = TRUE)
library(car)
durbinWatsonTest(step.model)
#test for heteroskedasticity
#Breusch pagan test
install.packages("lmtest")
library(lmtest)
bptest(step.model)
#test for normality of residuals
#shapiro-wilk test
shapiro.test(step.model$residuals)
#boxcox transformation is used to change the scales of the variables
#applied to satisfy breusch-pagan test
boxcox=use[,num.cols]
colnames(boxcox)
install.packages("car",dependencies = TRUE)
library(car)
use$wheelbase=bcPower(use$wheelbase,lambda=seq(-2,2,by=0.5))
use$curbweight=bcPower(use$curbweight,lambda=seq(-2,2,by=0.5))
use$enginesize=bcPower(use$enginesize,lambda = seq(-2,2,by=0.5))
use$boreratio=bcPower(use$boreratio,lambda=seq(-2,2,by=0.5))
use$horsepower=bcPower(use$horsepower,lambda=seq(-2,2,by=0.5))
use$fueleconomy=bcPower(use$fueleconomy,lambda=seq(-2,2,by=0.5))
use$carlength=bcPower(use$carlength,lambda=seq(-2,2,by=0.5))
use$carwidth=bcPower(use$carwidth,lambda=seq(-2,2,by=0.5))
use$price=bcPower(use$price,lambda = seq(-2,2,by=0.5))
|
9dfa9de478243a1fc01b3a2acdcf5b286522f973 | 76371ca11e03f754f0613c953a539612c56aabcb | /model/RandomForest_ベースライン_CVあり.R | 330115d509b40a31c9abace17976b0c77590519a | [] | no_license | you1025/probspace_salary | cc26b152d152b098cd764f7cdbf5c8e4eb83b824 | e78b62397cce1585d81bcb039a9237145a1f94dd | refs/heads/master | 2022-04-01T00:44:33.099781 | 2019-12-20T04:15:58 | 2019-12-20T04:15:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,986 | r | RandomForest_ベースライン_CVあり.R | # Test MAE: 47.72200, 37.69149, 33.37720, 29.13506, 26.98884
library(tidyverse)
library(tidymodels)
library(furrr)
set.seed(1025)
source("functions.R", encoding = "utf-8")
# Data Load ---------------------------------------------------------------
df.train_data <- load_train_data("data/input/train_data.csv")
df.cv <- rsample::vfold_cv(df.train_data, v = 5)
# Feature Engineering -----------------------------------------------------
recipe <- recipes::recipe(salary ~ ., data = df.train_data) %>%
recipes::step_dummy(recipes::all_nominal())
# Model Definition --------------------------------------------------------
model <- parsnip::rand_forest(
mode = "regression",
mtry = parsnip::varying(),
min_n = parsnip::varying(),
trees = parsnip::varying()
) %>%
parsnip::set_engine(engine = "ranger")
# Hyper Parameter ---------------------------------------------------------
df.grid.params <- dials::grid_regular(
dials::mtry(c(11, 15)),
dials::min_n(c(2, 3)),
dials::trees(c(500, 1000)),
levels = 3
)
df.grid.params
future::plan(future::multisession)
df.results <-
# ハイパーパラメータをモデルに適用
# merge(df.grid.params, model) %>%
purrr::pmap(df.grid.params, function(mtry, min_n, trees) {
parsnip::set_args(
model,
mtry = mtry,
min_n = min_n,
trees = trees
)
}) %>%
# ハイパーパラメータの組み合わせごとにループ
# purrr::map(function(model.applied) {
furrr::future_map(function(model.applied) {
# クロスバリデーションの分割ごとにループ
purrr::map(df.cv$splits, model = model.applied, function(df.split, model) {
# 前処理済データの作成
df.train <- recipe %>%
recipes::prep() %>%
recipes::bake(rsample::analysis(df.split))
df.test <- recipe %>%
recipes::prep() %>%
recipes::bake(rsample::assessment(df.split))
model %>%
# モデルの学習
{
model <- (.)
parsnip::fit(model, salary ~ ., df.train)
} %>%
# 学習済モデルによる予測
{
fit <- (.)
list(
train = predict(fit, df.train, type = "numeric")[[1]],
test = predict(fit, df.test, type = "numeric")[[1]]
)
} %>%
# 評価
{
lst.predicted <- (.)
# 評価指標の一覧を定義
metrics <- yardstick::metric_set(
yardstick::mae
)
# train データでモデルを評価
df.result.train <- df.train %>%
dplyr::mutate(
predicted = lst.predicted$train
) %>%
metrics(truth = salary, estimate = predicted) %>%
dplyr::select(-.estimator) %>%
dplyr::mutate(
.metric = stringr::str_c("train", .metric, sep = "_")
) %>%
tidyr::spread(key = .metric, value = .estimate)
# test データでモデルを評価
df.result.test <- df.test %>%
dplyr::mutate(
predicted = lst.predicted$test
) %>%
metrics(truth = salary, estimate = predicted) %>%
dplyr::select(-.estimator) %>%
dplyr::mutate(
.metric = stringr::str_c("test", .metric, sep = "_")
) %>%
tidyr::spread(key = .metric, value = .estimate)
dplyr::bind_cols(
df.result.train,
df.result.test
)
}
}) %>%
# CV 分割全体の平均値を評価スコアとする
purrr::reduce(dplyr::bind_rows) %>%
dplyr::summarise_all(mean)
}) %>%
# 評価結果とパラメータを結合
purrr::reduce(dplyr::bind_rows) %>%
dplyr::bind_cols(df.grid.params) %>%
# 評価スコアの順にソート(昇順)
dplyr::arrange(
test_mae
) %>%
dplyr::select(
mtry,
min_n,
trees,
train_mae,
test_mae
)
|
13696b8e8a4206e4c6579e40d62369a63328ff16 | 99db93c104dabf35fb38be7a3150e0b95f7ba517 | /ECMPride_parallel.R | 3cdf3b57ef3fd73383d824bcf8495ef34da163f9 | [] | no_license | ms5302kpum/ECMPride | a493390ad3ca75a59a2eb592bf0722f2ed0232a0 | 9eeb238d7913c7760ced51f10cf3deb13054f111 | refs/heads/master | 2023-03-17T14:41:52.158838 | 2020-02-12T12:40:46 | 2020-02-12T12:40:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,508 | r | ECMPride_parallel.R | ####################################################################################
# author: Binghui Liu
# creation date: 2020-02-12
# file description: Predict whether a specified human protein is ECM protein,
# and calculate the corresponding probability.
# Run from the command line.
# version: R-3.6.1
####################################################################################
########## Receive command line arguments ##########
initial.options <- commandArgs(trailingOnly = FALSE)
file.arg.name <- "--file="
script.name <- sub(file.arg.name, "", initial.options[grep(file.arg.name, initial.options)])
script.basename <- dirname(script.name)
absolute.path <- strsplit(script.name, "\\\\ECMPride_parallel.R")[[1]][1] # Get the unzip path for the ECMPride package
setwd(absolute.path) # Set the unzip path of the ECMPride package to the current working path
args.arg.name <- "--args"
where.args.begin <- grep(args.arg.name, initial.options)
where.proteins.to.be.predicted <- initial.options[where.args.begin + 1]
proteins.to.be.predicted <- read.csv(file = where.proteins.to.be.predicted) # Read the ID of the protein to be predicted
result.path <- dirname(where.proteins.to.be.predicted)
cat("\n\n\nThe prediction is in progress. Please Press Enter once and wait for the result...\n\n")
########## Load required packages ##########
suppressMessages(isInstalled <- require(randomForest))
if (!isInstalled) {
install.packages("randomForest")
suppressMessages(require(randomForest))
}
suppressMessages(isInstalled <- require(plyr))
if (!isInstalled) {
install.packages("plyr")
suppressMessages(require(plyr))
}
suppressMessages(isInstalled <- require(dplyr))
if (!isInstalled) {
install.packages("dplyr")
suppressMessages(require(dplyr))
}
suppressMessages(isInstalled <- require(xlsx))
if (!isInstalled) {
install.packages("xlsx")
suppressMessages(require(xlsx))
}
suppressMessages(isInstalled <- require(mRMRe))
if (!isInstalled) {
install.packages("mRMRe")
suppressMessages(require(mRMRe))
}
suppressMessages(isInstalled <- require(caret))
if (!isInstalled) {
install.packages("caret")
suppressMessages(require(caret))
}
suppressMessages(isInstalled <- require(parallel))
if (!isInstalled) {
install.packages("parallel")
suppressMessages(require(parallel))
}
########## load required function
GetPredictionResult <- function(x) {
if (x[1] > x[2]) {
result <- c(x[1], "ECM")
} else {
result <- c(x[2], "nonECM")
}
}
GenerateSubModel = function(training.sub.models, test.data.feature)
{
# Uses random forest to model and predict whether the tested protein was ECM protein
#
# Args:
# part.of.data.feature.ECM: An ECM training dataset containing the specific set of features.
# part.of.data.feature.nonECM: A nonECM training dataset containing the specific set of features.
# test.data.feature: The proteins to be predicted and their features set.
#
# Returns:
# Protein prediction results: whether ECM protein or not, and the corresponding probability.
# 1 represents ECM, 2 represents nonECM.
# Training model: the model in this run that generate the prediction result
library(randomForest)
set.seed(1)
# test.of.ECMPride.rf <- predict(training.sub.models, test.data.feature)
# test.of.ECMPride.rf <- data.frame(test.of.ECMPride.rf)
test.of.ECMPride.rf.prob <- predict(training.sub.models, test.data.feature, type = "prob")
# test.of.ECMPride.rf.prob <- data.frame(test.of.ECMPride.rf.prob)
# prediction.result.this.run <- cbind(test.of.ECMPride.rf, test.of.ECMPride.rf.prob)
return(test.of.ECMPride.rf.prob)
}
########## Generate the dataset for training (part.of.data.feature)##########
load(file = "materials\\training_features\\training_sub_models.Rdata")
num.sub.model <- 99
cores <- detectCores(logical = TRUE)
########## Generate the dataset for testing (part.of.test.data.feature)##########
load(file = "materials\\full_human_proteins_features\\record_of_PP_human_sp.Rdata")
load(file = "materials\\full_human_proteins_features\\record_of_InterPro_human_sp.Rdata")
load(file = "materials\\full_human_proteins_features\\record_of_GreyPSSM_human_sp.Rdata")
##########load annotations of human proteins
annotation.human.sp <- read.csv(file = "materials\\full_human_proteins_features\\human_full_protein_annotation.csv")
record.of.PP.human.sp.to.be.predicted <- data.frame()
record.of.InterPro.human.sp.to.be.predicted <- data.frame()
record.of.GreyPSSM.human.sp.to.be.predicted <- data.frame()
record.of.annotation.to.be.predicted <- data.frame()
is.successful.prediction <- TRUE
unmatched.ID <- data.frame()
for (i in 1:dim(proteins.to.be.predicted)[1]) {
temp <- which(as.character(record.of.PP.human.sp[,1]) == as.character(proteins.to.be.predicted[i,1]))
if (length(temp)!=0) {
record.of.PP.human.sp.to.be.predicted <- rbind(record.of.PP.human.sp.to.be.predicted, record.of.PP.human.sp[temp,])
record.of.InterPro.human.sp.to.be.predicted <- rbind(record.of.InterPro.human.sp.to.be.predicted, record.of.InterPro.human.sp[temp,])
record.of.GreyPSSM.human.sp.to.be.predicted <- rbind(record.of.GreyPSSM.human.sp.to.be.predicted, record.of.GreyPSSM.human.sp[temp,])
temp.annotation <- which(as.character(annotation.human.sp[,1]) == as.character(proteins.to.be.predicted[i,1]))
if (length(temp.annotation) != 0){
record.of.annotation.to.be.predicted <- rbind(record.of.annotation.to.be.predicted, annotation.human.sp[temp.annotation, 2:6])
} else {
record.of.annotation.to.be.predicted <- rbind(record.of.annotation.to.be.predicted, annotation.human.sp[1, 2:6])
}
} else {
is.successful.prediction <- FALSE
unmatched.ID <- rbind(unmatched.ID, as.character(proteins.to.be.predicted[i,1]))
names(unmatched.ID) <- "unmatched.ID"
}
}
test.data.feature <- cbind(record.of.PP.human.sp.to.be.predicted,
record.of.InterPro.human.sp.to.be.predicted,
record.of.GreyPSSM.human.sp.to.be.predicted)
load("materials\\training_features\\IGR_after_selection_mRMR.Rdata")
feature.choose <- IGR[,2]
where.feature <- c()
for (m in 1:length(feature.choose))
{
where.feature[m] <- which(names(test.data.feature) == feature.choose[m])
}
feature.collection <- test.data.feature[,where.feature]
part.of.test.data.feature <- data.frame(feature.collection=feature.collection)
########## Uses training.sub.models to predict the proteins in part.of.test.data.feature.
cl <- makeCluster(cores)
clusterExport(cl, c("GenerateSubModel","training.sub.models", "part.of.test.data.feature"))
len <- num.sub.model
prediction.result.this.fold.list <- parLapply(cl, 1:len, function(x) {GenerateSubModel(training.sub.models[[x]], part.of.test.data.feature)})
stopCluster(cl)
# prediction.result.submodel.df <- prediction.result.submodel.list[[1]][, 1]
prediction.result.submodel.df.prob <- prediction.result.this.fold.list[[1]][, 1:2]
for (i in 2:length(prediction.result.this.fold.list)) {
# prediction.result.submodel.df <- cbind(prediction.result.submodel.df, prediction.result.submodel.list[[i]][, 1])
prediction.result.submodel.df.prob <- cbind(prediction.result.submodel.df.prob, prediction.result.this.fold.list[[i]][, 1:2])
}
temp.ncol <- ncol(prediction.result.submodel.df.prob)
temp1.mean <- apply(as.matrix(prediction.result.submodel.df.prob[,seq(1, temp.ncol, 2)]),1,mean)
temp2.mean <- apply(as.matrix(prediction.result.submodel.df.prob[,seq(2, temp.ncol, 2)]),1,mean)
temp <- cbind(temp1.mean, temp2.mean)
prediction.result <- apply(temp, 1, GetPredictionResult)
prediction.result <- t(prediction.result)
ID <- record.of.PP.human.sp.to.be.predicted[,1]
prediction.result <- cbind(ID,
data.frame(prediction.result[,2]),
data.frame(as.numeric(prediction.result[,1])))
names(prediction.result) <- c("ID", "prediction.result", "prediction.result.prob")
prediction.result.annotation <- cbind(prediction.result, record.of.annotation.to.be.predicted)
names(prediction.result.annotation) <- c("ID", "prediction.result", "prediction.result.prob",
"Gene.name", "label.human.protein.atlas",
"label.ExoCarta", "label.GO", "label.GO.term")
write.csv(prediction.result.annotation, file = "proteins_to_be_predicted\\prediction_result.csv", row.names = FALSE)
if (is.successful.prediction)
{
cat("\n\n\nPrediction Succeed.\n")
print(prediction.result)
result.path <- file.path(result.path, "prediction_result.csv")
cat(paste("You can find the full results in the file of '", result.path, "'",sep = ""),"\n")
} else {
cat("\n\n\nSome proteins are not successfully predicted:\n")
for (i in 1:dim(unmatched.ID)[1])
{
cat(as.character(unmatched.ID[i,1]), "\n")
}
cat("Possible reason: Not the normal UniProt ID.\n")
cat("Please make sure all protein names have been converted into UniProt ID.\n")
cat("\n\nThe other proteins are successfully predicted:\n")
print(prediction.result)
result.path <- file.path(result.path, "PredictionResult.csv")
cat(paste("\nYou can find the full results in the file of '", result.path, "'",sep = ""),"\n")
} |
b69b7075c5a3ff085de81b12f4ea9b62b1aecb5b | e9f8dfe7ba3a684260610934e08d2570ca578744 | /source/7_R/04_R프로그래밍.R | a6f39ca4ad7cfa54017e378c490673018b216e7b | [] | no_license | cherryuki/bigdata | 87864d6058ae23d854c3b146d11dc7fc2bc3f8eb | 928a1a302977017c47724edb5b5a722484cdcfb4 | refs/heads/main | 2023-04-16T20:32:14.017057 | 2021-04-30T12:37:57 | 2021-04-30T12:37:57 | 317,099,037 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,999 | r | 04_R프로그래밍.R | # 2021-02-03 R_R프로그래밍 ⓒcherryuki(ji) #
# # # 4장. R 프로그래밍; 제어문, function # # #
# 1. 제어문
# 1.1 조건문
# (1) if문
num <- 9
if(num%%2==0) {
print(paste(num, "은 짝수"))
} else {
cat(num, "은 홀수")
}
# (2) ifelse() 함수
# "num%%2==0" "짝수":"홀수"
ifelse(num%%2==0, "짝수", "홀수")
(nums <-c(10, 9, 17, 5, 20))
result <- ifelse(nums%%2==0, "짝수","홀수")
result
m <-matrix(c(nums, result),ncol=5, byrow=TRUE,
dimnames=list(c('수', '홀짝'), c('1회', '2회', '3회', '4회', '5회')))
m
# (3) switch() 함수
switch(2, "red", "green", "blue")
x <- switch(4, "red", "green", "blue")
x #NULL (0넣어도 NULL)
#사용자로부터 color값을 입력받아 해당 color 출력
?readline
color <- as.integer(readline(prompt="원하는 색(red:1, green:2, blue:3)?"))
class(color) #as.numeric() or as.integer()
switch(color, "red", "green", "blue")
color <-readline(prompt = "원하는 색(red, green, blue)?")
colorValue <-switch(color, "red"=1, "green"=2, "blue"=3)
cat('선택한 색상: ', color, '의 색상 값은 ',colorValue)
#Q. 점수(50~100)를 입력받아 학점을 계산하는 프로그램 구현
# 100: perfect, 90점대: A, 80점대: B, 70점대: c, 60점대: D, 50점대: F
score <- as.numeric(readline(prompt="점수(50~100)를 입력하세요: "))
grade <-switch(as.integer(score/10)-4, "F", "D", "C", "B", "A", "Perfect")
grade
getGrade <-function() {
score <- as.numeric(readline(prompt="점수(50~100)를 입력하세요: "))
grade <-switch(as.integer(score/10)-4, "F", "D", "C", "B", "A", "Perfect")
cat('점수는 ', score, '점, 학점은 ', grade)
}
getGrade()
# 1.2 반복문; for, while, repeat
# (1) for문
1:10
seq(1:10)
seq(10)
x<-c(2, 5, -8, 10, 3)
for(val in x) {
cat(val, '\t')
}
count <-0
for(val in x) {
if(val%%2==0) {
count = count+1;
}
}
count
#10회 반복
for(val in 1:10) {
cat(val, "안녕 ")
}
#Q. factorial 계산 함수를 작성
#결과: 5!=120
factorial(5) #5*4*3*2*1
fact <-function(n) {
if(n<0) {
print("양수를 입력해주세요")
} else if(n==0) {
cat("0 ! = 1")
} else {
result <-1
for(val in n:1) {
result=result*val
}
cat(n, "! =", result)
}
}
fact(-2)
fact(0)
fact(3)
fact(5)
rm(list=ls()) #만든 변수 전부 삭제
#getwd() #현재 working 디렉토리
#setwd("C:/Bigdata/source/7_R")
#getwd()
source('fact.R', encoding='utf-8')
fact(3)
# (2) while
i<-1
while(i<6) {
+print(i)
i=i+1
}
# (3) repeat: 반복
i<-1
repeat{
if(i>6) break
print(i)
i<-i+1
}
# (4) break, next(자바에서의 continue)
x <-1
while(x<10) {
x <-x+1
#if(x==5) break;
if(x==3) next;
cat(x,'\t')
}
# 2. 연산자
# 논리연산자 &, &&, |, ||
TRUE & TRUE
TRUE && TRUE
x <-c(TRUE, FALSE, T, T, F)
y <-c(T, T, F, T, F)
x&y #각 요소별 확인: TRUE FALSE FALSE TRUE FALSE
x&&y #첫번째 요소만 확인: TRUE
x | y
x || y
# 중위 연산자
5+8
'+'(5, 8)
'%add%' <-function(x,y) {
return(x+y)
}
'%add%'(5,2)
5%add%2
c <-c(10, 23, 30)
10 %in% c
strings <-c("Hello", "world", "R")
'R' %in% strings #TRUE
strings %in% 'R' #FALSE FALSE TRUE
#%o% : 벡터의 외적(outer)
#%*% : 백터의 내적(or 행렬의 곱)
a <-c(2,3,4)
b <-c(10,20,30)
a %o% b
a <-c('1', '2', '3')
b <-c('a', 'b', 'c')
outer(a,b, FUN=paste)
matrix.a <- matrix(1:6, nrow=3) #3행2열
(matrix.b <-matrix(1:6, nrow=3, ncol=2)) #3행2열
#matrix.a %*% matrix.b #3*2 3*2 곱셈 불가능
#solve(matrix.a) #정방행렬X 불가능
# 3. 함수
pow <-function(x,y) {
return(x^y)
}
pow(2,5)
# 가변인자 함수: total(), total(1), total(2,3,4)
total <-function(...) {
args <-list(...)
sum <-0
for(i in args) {
sum <-sum+i
}
return(sum)
}
total()
total(1)
total(2, 3, 5)
# 재귀호출: 자기자신을 호출하는 함수->재귀함수(Recursive Function)
fact <-function(num) {
if(num<0) {
return()
} else if(num==0||num==1) {
return(1)
} else {
return(num*fact(num-1))
}
}
#fact(3) -> 6(3!)을 리턴
#fact(3) = 3*fact(2) = 3*2*fact(1) = 3*2*1
#fact(num) = num*fact(num-1) (단, num>1)
fact(-1)
fact(0)
fact(1)
fact(3)
# 4. R환경
environment() #<environment: R_GlobalEnv>
ls()
f <-function(f_x) {
g <-function(g_x) {
print('g함수 안')
print(environment())
print(paste('g함수 영역에서의 변수들', ls()))
}
g(5)
print('f함수 안')
print(environment())
cat('f함수 영역에서의 변수들', ls())
}
f(1)
f()
#Q. 소수 체크
#if문과 for문을 이용하여 매개변수가 소수인지 아닌지
#TRUE, FALSE를 return하는 함수를 작성하고 호출
is.primeNum <-function(num) {
if(num<1) {
return(FALSE)
} else {
result <-0
count <-0
for(val in num:1) {
result = num%%val
if(result==0) {
count=count+1
}
}
if(count==2) {
return(TRUE)
} else {
return(FALSE)
}
}
}
is.primeNum(-3)
is.primeNum(0)
is.primeNum(1)
is.primeNum(2)
is.primeNum(5)
is.primeNum(23)
|
fbc755c87b3acf280d95eb073701f5e9b56a9ca9 | 92f1abd9a4a7383ddf41b089f5aea6b8caf43b03 | /man/proportionality.Rd | 3b4e35adba864b70adeb5a7415506548fe649575 | [] | no_license | samleenz/propr | 29cdf5cd5b9da62416206b3d6605d392e1df695e | 922b8a01bc009aa1f469138e9c72d79033ddb443 | refs/heads/master | 2021-08-23T13:35:29.566988 | 2017-11-29T08:06:01 | 2017-11-29T08:06:01 | 111,872,374 | 0 | 0 | null | 2017-11-24T03:34:21 | 2017-11-24T03:34:21 | null | UTF-8 | R | false | true | 3,658 | rd | proportionality.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/propr.R
\docType{methods}
\name{proportionality}
\alias{proportionality}
\alias{initialize,propr-method}
\alias{phit}
\alias{perb}
\alias{phis}
\alias{corr}
\title{Calculate Proportionality}
\usage{
\S4method{initialize}{propr}(.Object, counts, ivar, select)
phit(counts, ivar = 0, select, symmetrize = TRUE)
perb(counts, ivar = 0, select)
phis(counts, ivar = 0, select)
corr(counts, ivar = 0, select)
}
\arguments{
\item{.Object}{Missing. Ignore. Leftover from the generic
method definition.}
\item{counts}{A data.frame or matrix. A "count matrix" with
subjects as rows and features as columns.}
\item{ivar}{A numeric scalar. Specifies reference feature(s)
for additive log-ratio transformation. The argument will also
accept feature name(s) instead of the index position(s).
Set to "iqlr" to use inter-quartile log-ratio transformation.
Ignore to use centered log-ratio transformation.}
\item{select}{Subsets via \code{object@counts[, select]}.
Optional. Use this argument to subset the proportionality
matrix before building without altering the final result.}
\item{symmetrize}{A logical. If \code{TRUE}, forces symmetry
by reflecting the "lower left triangle".}
}
\value{
Returns a \code{propr} object.
}
\description{
Let D represent any number of features measured across N biological replicates
exposed to a binary or continuous event E. For example, E could indicate case-control
status, treatment status, treatment dose, or time. This function converts a "count matrix"
with N rows and D columns into a proportionality matrix of D rows and D columns.
For phi, the result of \code{phit}, one can think of the resultant matrix as
analogous to a distance matrix, except that it has no symmetry unless forced.
For rho, the result of \code{perb}, one can think of the resultant matrix as
analogous to a correlation matrix.
For phs, the result of \code{phis}, one can think of the resultant matrix as
either a naturally symmetric variant of phi or a monotonic variant of rho.
Another function, \code{corr}, calculates Pearsons' correlation using
log-ratio transformed data.
These methods all use the centered log-ratio transformation by default,
but will use an additive log-ratio transformation instead if a scalar
\code{ivar} is provided. When using an additive log-ratio transformation,
this function will return \code{rho = 0} for the column and row in the
\code{@matrix} slot that would contain the reference feature.
Setting \code{ivar} to a numeric or character vector will transform
data using the geometric mean of only the indexed features.
Alternatively, setting \code{ivar} to "iqlr" will transform data using
the geometric mean of only the features with variances that fall in
the inter-quartile range of all per-feature variances. We base this
"iqlr" transformation on the \code{ALDEx2} package.
Log-ratio transformation, by its nature, fails if the input data
contain any zero values. To avoid an error in this case, these
methods automatically replace all zero values with 1. However,
the topic of zero replacement is controversial. Proceed carefully
when analyzing data that contain any zero values.
The \code{select} argument subsets the feature matrix
after log-ratio transformation but before calculating
proportionality. This reduces the run-time and RAM
overhead without impacting the final result. Removing
lowly abundant features prior to log-ratio transformation
could otherwise change the proportionality measure.
}
\examples{
library(propr)
data(mail)
phi <- phit(mail)
rho <- perb(mail)
phs <- phis(mail)
}
|
54f4a4307893b0e1eeb8e1f15783dba2b8ae12f9 | b0231a0c8c675dcac07fc9b18d6bb84e66944f61 | /man/listMean.Rd | 70a1253d700b4bb17486a880d459e737188e72ec | [] | no_license | Adamoso/L4 | 569ca20243a627b707fbdd0d448d33596debf2ab | 80668d9884817dff5c09dfd83b1be7c21c747544 | refs/heads/master | 2020-08-27T07:06:16.177331 | 2019-10-24T11:32:59 | 2019-10-24T11:32:59 | 217,278,751 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 190 | rd | listMean.Rd | \name{listMean}
\alias{listMean}
\title{Mean of list}
\usage{
listMean(x)
}
\description{
Counts the mean value of every element in a list.
}
\examples{
x<-list(c(5,6),c(3,2))
listMean(x)
}
|
c0796dbadd4b13e64b69a9532990c0d5cb48acd2 | 3270487664d61509b5235184f2130c47d00d11ed | /man/radiation.Rd | 6431b16b525ba37d80b0937c217f3a84c6383a7c | [] | no_license | cran/meteor | 97903459a6020dccc017a855a77348b44b094ce3 | 3b1fa5d21cd393674ec7d254e06ec97b9185c670 | refs/heads/master | 2023-07-22T08:29:54.369133 | 2023-07-16T18:00:02 | 2023-07-16T19:30:41 | 236,625,464 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,212 | rd | radiation.Rd | \name{ExtraTerrestrialRadiation}
\alias{ExtraTerrestrialRadiation}
\title{Extra-terrestrial Radiation}
\description{
Compute incoming radiation (J day-1 m-2 ) at the top of the atmosphere and photoperiod (daylength, sunshine duration).
}
\usage{
ExtraTerrestrialRadiation(doy, latitude, sc=1367.7, FAO=FALSE)
}
\arguments{
\item{doy}{integer. Day of the year}
\item{latitude}{numeric. Latitude}
\item{sc}{numeric. The solar constant}
\item{FAO}{logical. If \code{TRUE} the algorithm described by Allen et al (1998) is used. If \code{FALSE} the approach by Goudriaan and Van Laar (1995) is used}
}
\value{
matrix with incoming radiation (J/day) and
}
\references{
Goudriaan and Van Laar, 1995.
R.G. Allen, L.S. Pereira, D. Raes and M. Smith (1998). Crop Evaporation - Guidelines for computing crop water requirements. Irrigation and drainage paper 56. FAO, Rome, Italy. \url{https://www.fao.org/3/x0490e/x0490e07.htm}
}
\author{
Robert Hijmans, based on Python meteolib by Maarten J. Waterloo and J. Delsman \url{http://python.hydrology-amsterdam.nl/}
}
\examples{
ExtraTerrestrialRadiation(50, 60)
ExtraTerrestrialRadiation(50, 60, FAO=TRUE)
}
|
b97230a2daf89f75bd3a73a68d5939ffa509ece4 | 0ea2d1c3642b5859151f6a237943ceff31122fc1 | /tests/testthat/setup.R | 923fdb031e48a1e70450e17374e7eda25e4ca673 | [
"MIT"
] | permissive | ropensci/rb3 | 1d6e1eee091a3783993d210177e597963534f30a | 41235da13782ffb78899a0f3a2e2cbc7b0f13c6d | refs/heads/main | 2023-05-24T03:31:37.003326 | 2023-04-14T09:09:16 | 2023-04-14T09:09:16 | 28,033,021 | 5 | 3 | NOASSERTION | 2023-09-11T00:00:47 | 2014-12-15T11:04:39 | R | UTF-8 | R | false | false | 237 | r | setup.R | local_cachedir <- file.path(tempdir(), "rb3-cache")
withr::defer(unlink(local_cachedir, recursive = TRUE), teardown_env())
op <- options(
rb3.cachedir = local_cachedir,
rb3.silent = TRUE
)
withr::defer(options(op), teardown_env())
|
944e111116d6da5422fe0a47ae5a88318e03e4a2 | 987b70c629be0536922bf60482ed33a299d32013 | /package_build_nolibraries.R | 784b6d7b28fdd84b20469a6081943787766ebb7a | [] | no_license | vppatil/GEISHA_phytoplankton | 90530dc590110af4f1f9624a37dbd1a11f227e2b | 33b371cf1feb444ce9a02ad5e6c4cc66ce8a7a86 | refs/heads/master | 2023-04-13T15:00:06.243178 | 2020-10-06T07:05:45 | 2020-10-06T07:05:45 | 120,331,556 | 1 | 3 | null | 2018-11-01T22:40:35 | 2018-02-05T16:34:45 | R | UTF-8 | R | false | false | 2,487 | r | package_build_nolibraries.R | ###############################################################################
# package creation & maintenance file
# M.J. Lajeunesse, R course 7/5/16
###############################################################################
# define the package name
thePackage <- "algaeClassify"
# set directory to the package development folder
setwd("~/gleon/Geisha/phyto_package/GEISHA_phytoplankton_github_shared/package builds/")
# create new bare-bones package in development folder
# NOTE: only run package creation script once
#install.packages("devtools"); # make sure your have the most recent version
#install.packages("roxygen2"); # make sure your have the most recent version
library(devtools); library(roxygen2)
create(thePackage)
roxygenize(thePackage)
# STEP 2, add functionality ###################################################
# (1) update DESCRIPTION file (package description, authors, dependencies)
# (2) add functions (*.R) in R folder of package
# (3) within *.R files should include roxygen2 markdown of function and
# parameter txt (e.g., what you see in manual)
###############################################################################
# STEP 3, error check and compile package for CRAN ############################
# (1) update DESCRIPTION file
# (2) add functions (*.R) in R folder of package
###############################################################################
# creates bundle to submit to CRAN (*.tar.zip file found in development folder)
build(thePackage, manual = FALSE)
# error checks prior to submission (all errors and warnings need to be addressed)
check(thePackage)
warning()
library(devtools);
document(thePackage)
path <- find.package(thePackage)
system(paste(shQuote(file.path(R.home("bin"), "R")), "CMD", "Rd2pdf", shQuote(path)))
##vijay note- need to @export all function in the roxygen header
##also @examples works but @examples does not.
#with no function definition above, it will try to execute and hit an error otherwise.
# STEP 4, Build example PDF manual (will not be same as CRAN, but close) ######
###############################################################################
# BEFORE STARTING##############################################################
# 1) Download & install MiKTeX to generate manual & vignettes
# 2) through MikTeX /Maintenace/packages install: url, inconsolata, upquote
###############################################################################
# # constructs binaries
|
c1400d0701c791db162caa29c5586fc39a37a3d0 | 897c8c4ba631a410e3b711e5a18108e3e839e03a | /tests/testthat/testConvert.R | 8de407f347f662d35774e1b313240325454fd218 | [] | no_license | llaniewski/rtemplate | 32539cffcccfd233f3af76b768f61c13e507e561 | 0bc32b4050af9bb2c2c8d6d5eb6e804b51c0307e | refs/heads/master | 2023-08-16T21:38:46.068984 | 2023-08-07T03:24:48 | 2023-08-07T03:24:48 | 25,915,678 | 1 | 3 | null | 2023-08-06T14:10:05 | 2014-10-29T10:29:44 | R | UTF-8 | R | false | false | 1,405 | r | testConvert.R | library(rtemplate)
context("RT tests")
RTeval = function(x) {
exp = RTconvert(x)
exp = parse(text=exp)
con = textConnection("str","w")
sink(con)
ret = try(eval(exp),silent=TRUE)
sink()
close(con)
if ("try-error" %in% class(ret)) stop(ret)
str
}
test_that("Sanity 0", {
expect_equal(RTeval("a"),"a")
expect_equal(RTeval(c("a","b","c")),c("a","b","c"))
})
test_that("Sanity 1", {
expect_equal(RTeval("<?R cat('a') ?>"),"a")
expect_equal(RTeval(c("<?R","cat('a')","?>")),"a")
})
test_that("Sanity 2", {
expect_error(RTeval("<?R"), 'Non matching')
expect_error(RTeval("?>"), 'Non matching')
expect_error(RTeval("<?R stop('error') ?>"), 'error')
expect_equal(RTeval("stop('aa')"), "stop('aa')")
})
test_that("Var", {
expect_equal(RTeval(c("<?R","a='a'","cat(a)","?>")),"a")
expect_equal(RTeval(c("<?R","a='a'","?><?R","cat(a)","?>")),c("a"))
expect_equal(RTeval(c("<?R","a='a'","?>","<?R","cat(a)","?>")),c("","a"))
})
test_that("If", {
expect_equal(RTeval(c("<?R","b=3","if (b == 3) {","cat('a')","} else {","cat('b')","}","?>")),"a")
expect_equal(RTeval(c("<?R","b=4","if (b == 3) {","cat('a')","} else {","cat('b')","}","?>")),"b")
expect_equal(RTeval(c("<?R","b=3","if (b == 3) { ?>","a","<?R } else {?>","b","<?R }","?>")),c("","a",""))
expect_equal(RTeval(c("<?R","b=4","if (b == 3) { ?>","a","<?R } else {?>","b","<?R }","?>")),c("","b",""))
})
|
b0fa47023e28f092b995b866999a3dde4444f2ee | a2e5317944ad8050c0820d9380421947da919d08 | /NeuralNet.R | ddde97f5eb4b192f04bbe48d7f215011d0603b88 | [] | no_license | furkangursoy/MIS331-Fall18 | cc9523bf8f77ace760e9417a17497512d04ac33f | 20542d2452ea1322987e698e68b51b9c17cb423d | refs/heads/master | 2021-06-27T01:34:38.882014 | 2020-10-07T09:04:58 | 2020-10-07T09:04:58 | 150,029,914 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,362 | r | NeuralNet.R | #install.packages("neuralnet")
library(neuralnet)
setwd("C:/Users/admin2/Desktop/lab") #set the working directory
# read the dataset and split it into train and test sets #
auto.df <- read.table("autompg1.csv", header=TRUE, sep=",")
auto.df$isUSA <- ifelse(auto.df$origin == 1, 1, 0) #differently from the previous models, we only want to predict whether produced in US or not. so, a new variable is created both for training and test sets.
auto.df$isEUR <- ifelse(auto.df$origin == 2, 1, 0) #differently from the previous models, we only want to predict whether produced in US or not. so, a new variable is created both for training and test sets.
auto.df$isJAP <- ifelse(auto.df$origin == 3, 1, 0) #differently from the previous models, we only want to predict whether produced in US or not. so, a new variable is created both for training and test sets.
auto.df$isUSA
auto.formula <- isUSA + isEUR + isJAP ~ mpg + cylinders + displacement + horsepower + weight #we only want to use these predictors and not all
names(auto.df)
max <- NULL
min <- NULL
for(i in 1:5){
max[i] <- max(auto.df[,i])
min[i] <- min(auto.df[,i])
}
autoRT <- cbind(auto.df) #RT: range transformed. a name we give.
for(i in 1:5){
autoRT[i] <- (auto.df[i] - min[i])/(max[i] - min[i])
}
summary(autoRT)
ind <- sample(1:2, nrow(autoRT), r = TRUE, pr = c(0.7, 0.3)) #approx 70% of all values will be 1 and the rest will be 0
auto.train <- autoRT[ind==1,] #select approx 70% of the rows and create the training set
auto.test <- autoRT[ind==2,] #select remaining approx 30% of the rows and create the test set
# NEURAL NETWORKS #
auto.nn22 <- neuralnet(auto.formula, auto.train, hidden =c(2,2), linear.output = FALSE, stepmax = 1e+06) #build a neural network with 2 hidden layers and 2 nodes in each hidden layer
plot(auto.nn22) #visualize the neural networks
compute(auto.nn22, auto.test[,1:5])
nn22.pred <- max.col(compute(auto.nn22, auto.test[,1:5])$net.result) #neural network predictions (continuous values)
nn22.pred #our class predictions
nn22.test.accuracy <- sum(nn22.pred == auto.test$origin) / nrow(auto.test) #overall accuracy for the test set
nn22.test.accuracy #compare with the other neural network's accuracy
nn22.pred.train <- max.col(compute(auto.nn22, auto.train[,1:5])$net.result) #neural network predictions (continuous values)
nn22.train.accuracy <- sum(nn22.pred.train == auto.train$origin) / nrow(auto.train) #overall accuracy for the test set
nn22.train.accuracy
auto.nn232 <- neuralnet(auto.formula, auto.train, hidden =c(2,3,2), linear.output = FALSE, stepmax = 1e+08) #build a neural network with 2 hidden layers and 2 nodes in each hidden layer
plot(auto.nn232) #visualize the neural networks
compute(auto.nn232, auto.test[,1:5])
nn232.pred <- max.col(compute(auto.nn232, auto.test[,1:5])$net.result) #neural network predictions (continuous values)
nn232.pred #our class predictions
nn232.test.accuracy <- sum(nn232.pred == auto.test$origin) / nrow(auto.test) #overall accuracy for the test set
nn232.test.accuracy #compare with the other neural network's accuracy
nn232.pred.train <- max.col(compute(auto.nn232, auto.train[,1:5])$net.result) #neural network predictions (continuous values)
nn232.train.accuracy <- sum(nn232.pred.train == auto.train$origin) / nrow(auto.train) #overall accuracy for the test set
nn232.train.accuracy
## neural network for numeric prediction ##
# this example does not make use of training and test sets. you can see how to use training and test sets in previous sections of this document.
auto.df2 <- read.table("autompg1.csv", header=TRUE, sep=",")
auto.data <- as.data.frame(scale(auto.df2[,1:4])) #choosing only the variables we want and standardizing the variables. we previously transformed inputs to a fixed range but standardization is usually considered an acceptable method as well.
names(auto.data)
auto.formula2 <- mpg ~ cylinders + displacement + horsepower #we only want to use these predictors and not all
mpg.nn <- neuralnet(auto.formula2, auto.data, hidden =c(3,3), stepmax = 1e+08) #build a neural network with 2 hidden layers and 2 nodes in each hidden layer
plot(mpg.nn)
mpg.nn$net.result #predictions
sum((auto.data$mpg - mpg.nn$net.result[[1]])^2) #sum of squared errors. note that it is for standardized values.
|
aa43c554709b986581a6a3aabd25df66bce30573 | 6d6b25fbdcb4a0c03150c21fa10714e05b6a857f | /code/utils.R | 69886f174e597e1075b43036075f6e7c0d3b894e | [] | no_license | jtleek/replication_paper | 997109b37aa70c0c7ae7a2f0d7e354a61c4176fd | 02a05e209a62f6174c3a83c19ff7aa1362fe1374 | refs/heads/gh-pages | 2020-03-30T00:13:48.000074 | 2016-02-08T20:44:16 | 2016-02-08T20:44:16 | 41,891,336 | 3 | 7 | null | 2016-02-08T20:44:25 | 2015-09-04T00:59:36 | HTML | UTF-8 | R | false | false | 1,843 | r | utils.R | gg.theme <- function(type=c("clean","noax")[1],useArial = F, afmPATH="~/Dropbox"){
require(ggplot2)
if(useArial){
set.Arial(afmPATH)
bf_font="Arial"
} else {bf_font="Helvetica"}
switch(type,
clean = theme_bw(base_size = 16, base_family=bf_font) +
theme(axis.text.x = element_text(size = 14),
axis.title.y = element_text(vjust = +1.5),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.background = element_blank(),
legend.key = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")),
noax = theme(line = element_blank(),
text = element_blank(),
title = element_blank(),
plot.background = element_blank(),
panel.border = element_blank(),
panel.background = element_blank())
)
}
plotHolder <- function(useArial = F,afmPATH="~/Dropbox"){
require(ggplot2)
ggplot() +
geom_blank(aes(1,1)) +
theme(line = element_blank(),
text = element_blank(),
title = element_blank(),
plot.background = element_blank(),
# panel.grid.major = element_blank(),
# panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()
# axis.title.x = element_blank(),
# axis.title.y = element_blank(),
# axis.text.x = element_blank(),
# axis.text.y = element_blank(),
# axis.ticks = element_blank()
)
} |
53d4fb94476c1a0ef80a9c5b2c40a01a4b4a62dc | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/DLASSO/examples/plot.dlasso.Rd.R | 9a59dbb8fbfedafa04d63d3246d6cff28e7d9988 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 146 | r | plot.dlasso.Rd.R | library(DLASSO)
### Name: plot.dlasso
### Title: Plot method for dlasso objects
### Aliases: plot.dlasso
### ** Examples
example(dlasso)
|
9dc31399fbe27ed552ff65d5e03fe83a23a2050b | 3b4487365133323379bcd64a98db7a46e36a1a15 | /examples/SigNetA/examplecode/uploadbiofile.R | d5fec41577a07c5cb00bf3549aa85e423b840b81 | [
"Apache-2.0"
] | permissive | Saadman/r-cytoscape.js.2 | e998c0472372a59a4261e443a35182f99e005176 | 5f07f20c48e13b0b85f42333e7cb5843489ef15b | refs/heads/master | 2021-01-17T16:00:27.917867 | 2017-02-23T15:52:52 | 2017-02-23T15:52:52 | 82,944,548 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,307 | r | uploadbiofile.R | output$contents <- renderPrint({
# input$file1 will be NULL initially. After the user selects
# and uploads a file, it will be a data frame with 'name',
# 'size', 'type', and 'datapath' columns. The 'datapath'
# column will contain the local filenames where the data can
# be found.
inFile <- input$file1
if (is.null(inFile))
return(NULL)
##TRYING TO USE FILE DATA##
logic<-read.csv(inFile$datapath)
#logic<-logic[is.finite(logic$P_Value) & is.finite(logic$Diff_Exp), ]
#getting gene information from server##
source("/opt/raid10/genomics/software/RPrograms/source/geneInfoFromPortals.R")
geninfo<-geneInfoFromPortals(geneList=as.character(logic$geneID),symbol=T,names=F)
geneLabels<-apply(geninfo,1,function(x) paste(x[2],"(",as.integer(x[1]),")",sep=""))
#end of gene info and gene label generation#
# for(i in 1:length(logic[2]))
pval<-as.numeric(logic$P_Value)
names(pval)<-geneLabels
logFC<-as.numeric(logic$Diff_Exp)
names(logFC)<-geneLabels
subnet <- subNetwork(geneLabels, interactome)
subnet <- rmSelfLoops(subnet)
pdf("/opt/raid10/genomics/rashid/cytoproject/NetworkAnalysis/scripts/fitbum2.pdf")
system.time( fb <- fitBumModel(pval, plot = TRUE))
dev.off()
system.time(scores <- scoreNodes(subnet, fb, fdr = 0.001))
system.time(module <- runFastHeinz(subnet, scores))
#a problem:some files give "-Inf" values.#
colorNet<-rashidplotmodule(module, scores = scores, diff.expr = logFC)
library(rcytoscapejs)
# id <- c("Jerry", "Elaine", "Kramer", "George")
id <- nodes(module)
name <- id
nodeData <- data.frame(id, name, stringsAsFactors=FALSE)
nodeData$color<- rep("#00FF0F",nrow(nodeData)) #changed color of nodes
nodeData$shape <- "none" #default shape
nodeData$href <- paste0("http://www.google.com/search?q=Seinfeld%20", nodeData$name)
for(i in 1:length(name)){
nodeData[i,3]<-colorNet$c[i];
}
for(i in 1:length(name)){
if(colorNet$s[i]=="csquare")
colorNet$s[i]<-"rectangle"
else
colorNet$s[i]<-"ellipse"
nodeData[i,4]<-colorNet$s[i];
}
ltn<-unlist(lapply(edgeL(module),function(x) length(x[[1]])))
source<-unlist(lapply(1:length(ltn),function(x) rep(id[x],ltn[x])))
target<-unlist(lapply(edgeL(module), function(x) id[unlist(x)]))
vect<-c()
for(i in 1:length(target)) #extracting the value from the key value pair
vect[i]<-target[[i]]
# source <- c("Jerry", "Jerry", "Jerry", "Elaine", "Elaine", "Kramer", "Kramer", "Kramer", "George")
# target <- c("Elaine", "Kramer", "George", "Jerry", "Kramer", "Jerry", "Elaine", "George", "Jerry")
edgeData <- data.frame(source, target, stringsAsFactors=FALSE)
network <- createCytoscapeJsNetwork(nodeData, edgeData)
for(i in 1:length(target)){
network$edges[[i]]$data$edgeTargetShape="none" #making undirected graphss
}
for(i in 1:length(target)){
for(j in i:length(target)){
if(network$edges[[i]]$data$source == network$edges[[j]]$data$target)
network$edges[[j]]$data$target= "none"
}
}
source("/opt/raid10/genomics/rashid/cytoproject/NetworkAnalysis/scripts/rashidcytoscapejs.R")
rashidcytoscapejs(network$nodes, network$edges,showPanzoom=TRUE)
return(source)
##END DATA GENERATION##
})
print(5) |
2014559011d7734636f595c10ea0f385b8bc4359 | 31a9ec3a9ebfbc6f9667e6233ebef7abeefc5113 | /run_analysis.R | f5d21e9c1b9eb3c85e5060864ebd526e487936a9 | [] | no_license | haurhi/Getting-Cleaning-Data | 2d1691eabe5bf43218528f0ebc97a465077a89f7 | 8836a4a400e603dcb319950843a4e7f3dced9959 | refs/heads/master | 2016-09-10T01:05:44.652146 | 2015-02-12T15:14:20 | 2015-02-12T15:14:20 | 29,330,823 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,968 | r | run_analysis.R | library(dplyr)
## Read filepaths and variable names
test_path <- 'UCI HAR Dataset/test'
train_path <- 'UCI HAR Dataset/train'
test_set <- list.files(test_path)
train_set <- list.files(train_path)
feature <- read.table('UCI HAR Dataset/features.txt', stringsAsFactors = F)
## Test datasets
subject_test <- read.table(paste(test_path, '/', test_set[2], sep = ''))
X_test <- read.table(paste(test_path, '/', test_set[3], sep = ''))
y_test <- read.table(paste(test_path, '/', test_set[4], sep = ''))
test_all <- cbind(subject_test, y_test, X_test)
names(test_all) <- c('subject', 'activity', feature[, 2])
## Train datasets
subject_train <- read.table(paste(train_path, '/', train_set[2], sep = ''))
X_train <- read.table(paste(train_path, '/', train_set[3], sep = ''))
y_train <- read.table(paste(train_path, '/', train_set[4], sep = ''))
train_all <- cbind(subject_train, y_train, X_train)
names(train_all) <- c('subject', 'activity', feature[, 2])
## Merges the training and the test sets
dataset <- rbind(train_all, test_all)
## Extracts mean & std
locations <- grep('mean\\(\\)|std\\(\\)', names(dataset))
subdataset <- dataset[, c(1, 2, locations)]
## Read activity names
act_lab <- read.table('UCI HAR Dataset/activity_labels.txt', stringsAsFactors = F)
subdataset[, 2] <- as.factor(subdataset[, 2])
levels(subdataset[, 2]) <- act_lab[, 2]
## Correct column names
names(subdataset) <- gsub('(-|\\(\\))', '', names(subdataset))
names(subdataset) <- gsub('^t', 'time', names(subdataset))
names(subdataset) <- gsub('^f', 'frequency', names(subdataset))
## Summarize the average of each variable grouped by subject and activity
attach(subdataset)
tidy_set <- aggregate(subdataset, by = list(subject, activity), mean)
tidy_set <- tidy_set[, -c(3, 4)]
names(tidy_set)[1:2] <- c('subject', 'activity')
## Alternative
## group_by(subdataset, subject, activity) %>% summarise_each(funs(mean))
## Save as .txt file
write.table(tidy_set, 'tidy_set.txt', row.name = F) |
b419e9e336a67857f92bfe9d5fba0e3c2b87732a | 5d0bc9fa9c48a468d115e9930f5eac66a0764789 | /man/Balsawood.Rd | 5d1ae91a4f9e8a4cdd69e63d133b4bcd03fb77aa | [] | no_license | rpruim/ISIwithR | a48aac902c9a25b857d2fd9c81cb2fc0eb0e848e | 7703172a2d854516348267c87319ace046508eef | refs/heads/master | 2020-04-15T20:36:55.171770 | 2015-05-21T09:20:21 | 2015-05-21T09:20:21 | 21,158,247 | 5 | 2 | null | null | null | null | UTF-8 | R | false | false | 354 | rd | Balsawood.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{Balsawood}
\alias{Balsawood}
\title{Balsawood}
\format{A dataset with 44 observations on the following 2 variables.
\describe{
\item{treatment}{character}
\item{elasticity}{numeric}
}}
\description{
A data set
}
\keyword{datasets}
|
f03771a1a9ed6ef9a828483942b796195feca8a9 | bd02199b6c185c241aa07e731340100a9eea24e7 | /LexicalDiversity.R | 0728ac09bba37a11edac44cc6c1da95c7cc1b991 | [] | no_license | WernerDJ/Patents-and-Lexical-diversity | 17f418e106cb66f44c46a1f5d3ea1ec1ce955be2 | 4d62c9571ff5bc6e165e42c1a660614fbde698be | refs/heads/master | 2021-08-29T10:28:07.272522 | 2017-11-20T09:38:33 | 2017-11-20T09:38:33 | 110,742,998 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,593 | r | LexicalDiversity.R | #Text minig the patent abstracts
#
# First load the package quanteda
#
library(quanteda)
#
# load the data from the dierectory where it was saved (remember to change the \ in the path string by / if you copy paste the path)
# Cluster sampling was used to avoid over representation from more prolifict patenting countries affecting the lexical diversity measures
# 50 aleatory patents from 9 European non English speaking Cuntries
#
resultlist <- read.csv("C:/Users/Chupacabras/Dropbox/datamining/EPO_Language/resultlist.csv", sep=";")
#
# Change the column names that have uncofortable names in the downloaded GPI csv file
#
names(resultlist)[1]<- "Patent"
names(resultlist)[2]<- "Inventors"
names(resultlist)[3]<- "ApplicantCountry"
names(resultlist)[4]<- "Abstract"
names(resultlist)[5]<- "CPCs"
names(resultlist)[6]<- "Granted"
#
# Create a new data frame where the Inventors names will be substituted by a the number of inventors per patent, and Granted will be 1 or 0 instead of YES or NO
# and the lexical variety indexes will be stored
# The lexical variety indexes are Type-Token Ratio (TTR), Herand C, Giraud's Root and Dugast's Uber Index
#
n <- nrow(resultlist)
x <- data.frame("Granted" = rep(0, n), "NInventors" = rep(0, n), "TTR"=rep(0, n), "HerdanC" = rep(0, n), "GuiraudR" = rep(0, n), "UberIndex" = rep(0, n))
#
LexicalDiv <- cbind(resultlist, x)
LexicalDiv <- LexicalDiv[,-2]
LexicalDiv <- LexicalDiv[,-5]
names(LexicalDiv)[5]<- "Granted"
#
# Obtain the data on lexical diversity
#
for (fila in 1:n) {
if (resultlist$Granted[fila] == "YES")
{
LexicalDiv$Granted[fila] <- 1
}
LexicalDiv$NInventors[fila]<- length(strsplit(as.character(resultlist[[2]][fila]), '\n')[[1]])
mydfm <- as.vector(LexicalDiv$Abstract[fila])
myCorpus <- corpus(mydfm)
myDfm <- dfm(myCorpus)
myStemMat <- dfm(myCorpus, remove = stopwords("english"), stem = TRUE, remove_punct = TRUE)
statis <-textstat_lexdiv(myStemMat, measure = c("all", "TTR", "C", "R", "CTTR", "U", "S","Maas"), log.base = 10, drop = TRUE)
LexicalDiv$TTR[fila] <- as.double(statis[1])
LexicalDiv$HerdanC[fila] <- as.double(statis[2])
LexicalDiv$GuiraudR[fila] <- as.double(statis[3])
LexicalDiv$UberIndex[fila] <- as.double(statis[5])
}
# The decimal separator "." might give problems in computers with an american decimal system in which "," is the standard
# If that is the case, just delete (, dec = ".") from the following line
write.table(LexicalDiv,file = "c:/Users/Chupacabras/Dropbox/datamining/EPO_Language/LexicalDiv.csv",row.names=FALSE, dec = ".")
|
8b603d555d7430917743f0bb4ac5bc645066e722 | b07124a50c70ec5a396ead4e13d033f79220a63c | /man/ejscreen.download.local.Rd | 8bfca5513bcfe47f60bcf2b6f646b007a9833b7b | [
"MIT"
] | permissive | rtoole13/EJSCREENBatch | ff2e04d5b55b1f4e807fd00d69cdb3c4e480d22a | 0f9adfbf42f69001fc4c6ccf30bb1a31e67319d5 | refs/heads/main | 2023-06-20T09:57:34.517801 | 2021-07-19T04:56:07 | 2021-07-19T04:56:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 595 | rd | ejscreen.download.local.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ejscreen.download.gdb.R
\name{ejscreen.download.local}
\alias{ejscreen.download.local}
\title{EJSCREEN data download}
\usage{
ejscreen.download.local(
folder = "EJSCREEN data",
file,
yr = NULL,
ftpurlbase = "https://gaftp.epa.gov/EJSCREEN/",
justreadname = NULL,
addflag = FALSE,
cutoff = 80
)
}
\arguments{
\item{cutoff}{}
}
\value{
}
\description{
Downloads most recent data from EJSCREEN. Code for this function was borrowed
from Mark Corrales' ejscreen function on github at ejanalysis/ejscreen.
}
|
8c8293f76a7dc568f6abeac4d1c93236bba441fa | a845065b2b31e2fde530ad5eb192780340c5f481 | /man/ex16.41.Rd | 0d90df32c29ab945d4166bbc76724e030adbb6f9 | [] | no_license | cran/Devore7 | 319c8a7bd4daca50ba1b7f7acdd53e40957a55bb | 9592edf605e0a7927bdce1e0081796b183b1b5ad | refs/heads/master | 2021-01-13T16:10:38.867717 | 2014-03-25T00:00:00 | 2014-03-25T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 748 | rd | ex16.41.Rd | \name{ex16.41}
\alias{ex16.41}
\docType{data}
\title{R Data set: ex16.41}
\description{
The \code{ex16.41} data frame has 22 rows and 3 columns.
}
\usage{data(ex16.41)}
\format{
A data frame with 22 observations on the following 3 variables.
\describe{
\item{\code{C1}}{a numeric vector}
\item{\code{C2}}{a numeric vector}
\item{\code{C3}}{a numeric vector}
}
}
\details{
Consult the web site \url{http://www.thomsonedu.com/statistics/devore} for additional online resources that are available for this book.
}
\source{
Devore, J. L. (2008) \emph{Probability and Statistics for Engineering and the Sciences (7th Edition)}, ISBN-10: 0495382175 ISBN-13: 9780495382171
}
\examples{
data(ex16.41)
str(ex16.41)
}
\keyword{datasets}
|
95a4f6921a6878435d8bd00f1c8b082652dd1908 | 3514107ceb718704aafba74847cc805a8115a9b8 | /man/stage_1_evaluation.Rd | f64d26ec66e95b487cc9a74aa283fef3ec1b5a86 | [] | no_license | cran/GSED | b85061bdef113c3d30e07adbd63c7201ba668b56 | f49ef8817f794872e792e3b3703e70b0739e073c | refs/heads/master | 2021-01-12T17:32:10.560665 | 2019-08-28T10:20:02 | 2019-08-28T10:20:02 | 71,594,112 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,336 | rd | stage_1_evaluation.Rd | \name{stage_1_evaluation}
\alias{stage_1_evaluation}
\title{Stage 1-evaluation step of GSED}
\description{
\code{stage_1_evaluation} is used to evaluate the efficacy of the subgroup selected at the end of the first stage of GSED.
}
\usage{
stage_1_evaluation(keep, Z_1j, f, u)
}
\arguments{
\item{keep}{Vector containing the indices of the subgroups selected at stage 1.}
\item{Z_1j}{Vector containing the Z-statistics (standard normal under H0) for each subgroup. Must be of length \code{N_subsets}.}
\item{f}{Vector containing the prevalence rates of each subgroup.}
\item{u}{Vector containing the upper boundaries for stagewise decisions.}
}
\value{
A list is returned, consisting of:
\item{stage}{Integer containing the current step. Value is 1 by default, or -1 if the trial stops earlier for efficacy after this evaluation step.}
\item{S}{Vector containing the indices of the subgroups selected at stage 1 (=\code{keep}).}
}
\references{
Baldur P. Magnusson and Bruce W. Turnbull. Group sequential enrichment design incorporating subgroup selection. Statistics in Medicine, 2013. <doi:10.1002/sim.5738>
}
\author{Marie-Karelle Riviere-Jourdan \email{eldamjh@gmail.com}}
\examples{
stage_1_evaluation(keep=c(2,3), Z_1j=c(-0.49,1.07,1.44), f=c(0.6,0.2,0.2), u=c(2.7625,2.5204))
}
|
34584b5efe8cdf5156f8af87c6de4d3f7d9ea501 | d443d4c8b323b61c157fae64ca22610ab836823a | /code/mergeNewStormExporter.R | 65c5d7dadd5674a6c8093cec9b56f7e4d45e88e5 | [] | no_license | zeyuecon/cycloneDave | 4fef296dd35dddffafad3c70cf8eb0acad7b1e3c | 4d3d79cfad69fc00ad6a8a0874749e316f8d757a | refs/heads/master | 2020-04-02T19:55:51.060231 | 2016-07-06T00:16:54 | 2016-07-06T00:16:54 | 61,346,842 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,532 | r | mergeNewStormExporter.R |
setwd("/Volumes/SAMSUNG/DataSharing")
all.country<-read.dta13("Storm_measures_1990_2014_all_countries.dta")
colnames(all.country)[13]<-"iso"
storm.country<-read.dta13("Storm_measures_1990_2014_storm_countries.dta")
export<-read.dta13("export.dta")
export$values<-log(as.numeric(as.character(export$value)))
# generate i to j data
xij<-read.dta13("export.ij.dta")
#xij.sub<-xij[,1:4]
colnames(all.country)[13]<-"iso"
# from i to j, both export country and import country
all.ij<- merge(all.country, xij, by = c("year","iso"), all = T)
all.ij<-na.omit(all.ij)
all.ij$iso<-factor(all.ij$iso)
all.ij$year<-factor(all.ij$year)
save.dta13(all.ij, file ="all.country.export.storm.both.dta")
# only import matters
all<- merge(all.country, export, by = c("year","iso"), all = T)
all<-data.frame(all)
all<-all[,-3]
all$iso<-factor(all$iso)
all$year<-factor(all$year)
#all<-all[-which(all$iso==c("ATA","ATF","CCK","BVT","CUW","CXR","ESH","HMD","IOT","MAF","MNE","UMI","VAT")),]
all<-all[-which(all$iso==c("ATA")),]
all<-all[-which(all$iso==c("ATF")),]
all<-all[-which(all$iso==c("CCK")),]
all<-all[-which(all$iso==c("BVT")),]
all<-all[-which(all$iso==c("CUW")),]
all<-all[-which(all$iso==c("CXR")),]
all<-all[-which(all$iso==c("ESH")),]
all<-all[-which(all$iso==c("HMD")),]
all<-all[-which(all$iso==c("IOT")),]
all<-all[-which(all$iso==c("MAF")),]
all<-all[-which(all$iso==c("MNE")),]
all<-all[-which(all$iso==c("UMI")),]
all<-all[-which(all$iso==c("VAT")),]
#5815
save.dta13(all, file ="all.country.export.storm.dta")
|
aef6997cd60a5293220ccaf21b29aea04cf7c4a3 | 3bdfe2471525cd05f04156fa465471a44950c626 | /Training a self-driving car through supervised learning/UnityProject_Continuous/NN_log.R | 110c4b0e94179f49935abf86b48d6212a7405d34 | [] | no_license | xavi2525/autonomouscar.github.io | 386185d748d4d2fbe1d0690601b1d19ed8273a48 | 5c707cbce2a34d631a3e5cdfba1394dfd9c6a041 | refs/heads/master | 2020-06-17T05:47:01.937277 | 2019-07-08T13:32:13 | 2019-07-08T13:32:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 846 | r | NN_log.R | logdata <- read.csv('log.txt', header=F)
colnames(logdata)<- c("sensor0","sensor1","sensor2","sensor3","sensor4","turn")
#split train test
index <- sample(1:nrow(logdata),round(0.75*nrow(logdata)))
train_ <- logdata[index,]
test_ <- logdata[-index,]
#train
library(neuralnet)
n <- names(train_)
f <- as.formula(turn ~ sensor0 + sensor1 + sensor2 + sensor3 + sensor4)
nn <- neuralnet(f,data=train_,hidden=5,linear.output=T, stepmax = 1e6, threshold=0.1)
plot(nn)
#test
pr.nn <- compute(nn,test_[,1:5])
pr.nn_<-1*(pr.nn$net.result>0.5)+0*(pr.nn$net.result<0.5)
test.r <- test_[,'turn']
MSE.nn <- sum((test.r - pr.nn$net.result)^2)/nrow(test_)
#train
pr.nn.tr <- compute(nn,train_[,1:5])
pr.nn.tr_<-1*(pr.nn.tr$net.result>0.5)+0*(pr.nn.tr$net.result<0.5)
train.r <- train_[,'turn']
MSE.nn.tr <- sum((train.r - pr.nn.tr$net.result)^2)/nrow(train_)
|
3df132f5b55af59fcf6a381ed2a73e7a984758c0 | 61e9c7564b6581941afbaf3e4a0296ee22cd34dd | /transform.R | 124ca7d330f5765e5ebfa63fe62b43407c213001 | [] | no_license | mitchthorson/r-data-science | 98954823ae74bd0a7717cf8810509fa7c4996833 | 80d8a0967cd37087289498e34b6fbab881da7e01 | refs/heads/master | 2020-04-05T18:40:23.001475 | 2018-11-12T01:03:21 | 2018-11-12T01:03:21 | 157,108,266 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,268 | r | transform.R | library(nycflights13)
library(tidyverse)
jan_first <- filter(flights, month == 1, day == 1)
#delayed more than two hours
(delayed_two_hrs <- filter(flights, arr_delay >= 120))
#flew to houston
(to_houston <- filter(flights, dest == "HOU" | dest == "IAH"))
#United, Delta or American
(major_airlines <- filter(flights, carrier == "AA" | carrier == "DL" | carrier == "UA"))
#Departed between midnight and 6am
(redeyes <- filter(flights, dep_time <=600))
#Departed in summer (July, August, and September)
(summer_flights <- filter(flights, month == 7 | month == 8 | month == 9))
#Summer, using between function
(summer_flights_again <- filter(flights, between(month, 7, 9)))
#no departure time
(no_dep_time <- filter(flights, is.na(dep_time)))
#arrange missing first
(missing <- arrange(flights, desc(is.na(dep_time))))
#select
(sm_flights <- select(
flights,
starts_with("dep_"),
starts_with("arr_")
))
vars <- c("year", "month", "day", "dep_delay", "arr_delay")
select(flights, one_of(vars))
#average delay
arrange(summarise(group_by(flights, year, month, day), delay=mean(dep_delay, na.rm = TRUE)), desc(delay))
#pipe that
flights %>%
group_by(year, month, day) %>%
summarise(delay=mean(dep_delay, na.rm = TRUE)) %>%
arrange(desc(delay))
|
0d57e1aed19de3ce9931bdcb1d02a788c922247e | fc8b4f69821d433a3a5976a6b772dfb38eae31bd | /man/estdiweibull.Rd | aceeefc4d35d0129517b9970396efc81848e60d6 | [] | no_license | cran/DiscreteInverseWeibull | 73ee734dc9b5b4653f68fa7b571bb6d3faa14f96 | d43d2c6765383f73a90e75c762c0be131b8d61ac | refs/heads/master | 2021-01-18T21:46:15.030737 | 2016-05-01T00:44:40 | 2016-05-01T00:44:40 | 17,678,830 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,812 | rd | estdiweibull.Rd | \name{estdiweibull}
\alias{estdiweibull}
\title{
Estimation of parameters
}
\description{
Sample estimation of the parameters of the discrete inverse Weibull distribution
}
\usage{
estdiweibull(x, method="P", control=list())
}
\arguments{
\item{x}{
a vector of sample values
}
\item{method}{
the estimation method that will be carried out: \code{"P"} method of proportion, \code{"M"} method of moments, \code{"H"} heuristic-maximum likelihood method, \code{"PP"} graphical method-probability plot
}
\item{control}{
a list of additional parameters:
\code{eps}, \code{nmax} for the method of moments;
\code{beta1}, \code{z}, \code{r}, \code{Leps} for the heuristic method
}
}
\details{For a description of the methods, have a look at the reference. Note that they may be not applicable to some specific samples. For examples, the method of proportion cannot be applied if there are no 1s in the samples; it cannot be applied for estimating \eqn{\beta} if all the sample values are \eqn{\leq 2}. The method of moments cannot be applied for estimating \eqn{\beta} if all the sample values are \eqn{\leq 2}; besides, it may return unreliable results since the first and second moments can be computed only if \eqn{\beta>2}. The heuristic method cannot be applied for estimating \eqn{\beta} if all the sample values are \eqn{\leq 2}.
}
\value{
a vector containing the two estimates of \eqn{q} and \eqn{\beta}
}
\seealso{
\code{\link{heuristic}}, \code{\link{Ediweibull}}
}
\examples{
n<-100
q<-0.5
beta<-2.5
# generation of a sample
x<-rdiweibull(n, q, beta)
# sample estimation through each of the implemented methods
estdiweibull(x, method="P")
estdiweibull(x, method="M")
estdiweibull(x, method="H")
estdiweibull(x, method="PP")
}
\keyword{distribution, htest}
|
2aa7bbac1e546ea2e6a3324ec86c590557b065c1 | c48de8038decdaaa7375a102ee248b94e699bbd1 | /man/fullFact.Rd | 40ea0f4d9961143ace4ee19d93badf2a1bf28aa4 | [] | no_license | DBoegner/userfriendlyscience | 717f4452fac8d99da59481398267df8dd45aee71 | 0d670e706d40cc9f565260696ab181f65e483378 | refs/heads/master | 2023-09-04T11:03:48.681323 | 2018-09-24T17:30:03 | 2018-09-24T17:30:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,052 | rd | fullFact.Rd | \name{fullFact}
\alias{fullFact}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
fullFact
}
\description{
This function provides a userfriendly interface to a number of advanced factor
analysis functions in the \code{\link{psych}} package.
}
\usage{
fullFact(dat = NULL, items = NULL, rotate = "oblimin")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dat}{
Datafile to analyse; if NULL, a pop-up is provided to select a file.
}
\item{items}{
Which variables (items) to factor-analyse. If NULL, all are selected.
}
\item{rotate}{
Which rotation to use (see \code{\link{psych}} package).
}
}
\value{
The outcomes, which are printed to the screen unless assigned.
}
\author{
Gjalt-Jorn Peters
Maintainer: Gjalt-Jorn Peters <gjalt-jorn@userfriendlyscience.com>
}
\seealso{
\code{\link{fa.parallel}}, \code{\link{vss}}
}
\examples{
\dontrun{
### Not run to save processing during package testing
fullFact(attitude);
}
}
\keyword{ univariate }
|
498a757305c2046e244dc9f95d3ec78cff35175a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/mombf/examples/mombf.Rd.R | fbe3ba17af90159a30a60fbdeb1dd3b810a521ef | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 927 | r | mombf.Rd.R | library(mombf)
### Name: mombf
### Title: Moment and inverse moment Bayes factors for linear models.
### Aliases: mombf mombf.lm imombf imombf.lm zellnerbf zellnerbf.lm
### Keywords: models htest
### ** Examples
##compute Bayes factor for Hald's data
data(hald)
lm1 <- lm(hald[,1] ~ hald[,2] + hald[,3] + hald[,4] + hald[,5])
# Set g so that prior mode for standardized effect size is at 0.2
prior.mode <- .2^2
V <- summary(lm1)$cov.unscaled
gmom <- mode2g(prior.mode,prior='normalMom')
gimom <- mode2g(prior.mode,prior='iMom')
# Set g so that interval (-0.2,0.2) has 5% prior probability
# (in standardized effect size scale)
priorp <- .05; q <- .2
gmom <- c(gmom,priorp2g(priorp=priorp,q=q,prior='normalMom'))
gimom <- c(gmom,priorp2g(priorp=priorp,q=q,prior='iMom'))
mombf(lm1,coef=2,g=gmom) #moment BF
imombf(lm1,coef=2,g=gimom,B=10^5) #inverse moment BF
zellnerbf(lm1,coef=2,g=1) #BF based on Zellner's g-prior
|
83c5aad3c4677c46c53485264492d0bc9a0563e3 | 26dd2e27dc53c82ab3d48f6eaf78e2c330dfb1da | /run_analysis.R | bae546d751e54520af9e03143619006817283c3a | [] | no_license | cbeaudrie/Getting-and-Cleaning-Data-course-project | 8dfc8c7c754130acf59c48e9ca9b7f34cc156868 | 7b793b881b27cfe3c74390d76a5aff7d692ae495 | refs/heads/master | 2021-09-08T01:36:02.367977 | 2018-03-05T10:17:28 | 2018-03-05T10:17:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,685 | r | run_analysis.R | #run_analysis.r
#upload tada that should be cleaned
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, "courseprojectdata.zip")
# Final project of Getting and Cleaning data course
setwd("~/edu/Getting and Cleaning data/course_project/data")
#install necessary packages
library(data.table)
library(plyr)
library(dplyr)
#I extracted zip file in set directory so all documents can read by R
#read all necessary tables
activity_labels <- setDT(read.table("activity_labels.txt", header = FALSE))
setnames(activity_labels, c("V1","V2"), c("activity_id","activity") )
features <- read.table("features.txt", header = FALSE)
subject_test <- read.table("test/subject_test.txt", header = FALSE)
x_test <- read.table("test/X_test.txt", header = FALSE)
y_test <- read.table("test/Y_test.txt", header = FALSE)
subject_train <- read.table("train/subject_train.txt", header = FALSE)
x_train <- read.table("train/X_train.txt", header = FALSE)
y_train <- read.table("train/y_train.txt", header = FALSE)
# 1 Merge of all data
# merge training and test sets
datax <- rbind(x_test,x_train)
datay <- rbind(y_test, y_train)
subject <- rbind(subject_test,subject_train)
# set column names
setnames(subject, "V1" , "activity_id" )
colnames(datax) <- features[,2]
colnames(datay) <- "y"
# merge all data in one file
data <- cbind(datax,datay, subject)
# 2 Extract measurements of on the mean and stand
meanandstd <- grep("activity|mean|std", colnames(data))
meanandstddata <- data[,meanandstd]
# 3 Use descriptive activity names to name activity in the data set
meanandstddata <- merge(x = meanandstddata, y = activity_labels, by = "activity_id")
setDT(meanandstddata)
meanandstddata <- select(meanandstddata, - activity_id)
# 4 Appropriately labels the data set with descriptive variable names
colnames <- colnames(meanandstddata)
colnames <- gsub("_"," ", colnames)
colnames <- gsub("-", " ", colnames)
colnames <- gsub("fBody", "body", colnames)
colnames <- gsub("tBody", "body", colnames)
colnames <- gsub("bodyBody", "body", colnames)
colnames <- gsub("tGravity", "gravity", colnames)
colnames <- gsub("\\()"," ", colnames)
colnames(meanandstddata) <- colnames
#5 From the data set in step 4, creates a second, independent tidy data set with
# the average of each variable for each activity and each subject.
seconddata<-aggregate(. ~ activity, meanandstddata, mean)
write.table(seconddata, file = "tidydata.txt",row.name=FALSE)
|
f31af8fff74aae24cb6d9ff9291b6b91fe801b97 | b5821b891a69e18e9d7ed96267ff598463c4e9e7 | /Chapter08/codeCluster.R | 480274e5fdd822a47fb42225a53fa44ff4f0012c | [
"MIT"
] | permissive | PacktPublishing/Mastering-Machine-Learning-with-R-Second-Edition | 2284b6e78f2335b3da0bf41a36a3c139d596d71d | 7e4eda202c920890ecf08bb0bb77a7f08a5a7708 | refs/heads/master | 2023-02-14T16:22:27.925602 | 2023-01-30T09:38:55 | 2023-01-30T09:38:55 | 88,627,022 | 34 | 37 | null | null | null | null | UTF-8 | R | false | false | 2,682 | r | codeCluster.R | library(cluster) #conduct cluster analysis
library(compareGroups) #build descriptive statistic tables
library(HDclassif) #contains the dataset
library(NbClust) #cluster validity measures
library(sparcl) #colored dendrogram
data(wine)
str(wine)
names(wine) <- c("Class", "Alcohol", "MalicAcid", "Ash", "Alk_ash",
"magnesium", "T_phenols", "Flavanoids", "Non_flav",
"Proantho", "C_Intensity", "Hue", "OD280_315", "Proline")
names(wine)
df <- as.data.frame(scale(wine[, -1]))
str(df)
table(wine$Class)
numComplete <- NbClust(df, distance = "euclidean",
min.nc = 2, max.nc = 6,
method = "complete", index = "all")
numComplete$Best.nc
dis <- dist(df, method = "euclidean")
hc <- hclust(dis, method = "complete")
plot(hc, hang = -1,labels = FALSE, main = "Complete-Linkage")
comp3 <- cutree(hc, 3)
ColorDendrogram(hc, y = comp3, main = "Complete", branchlength = 50)
table(comp3)
table(comp3, wine$Class)
(51+50+48)/178
numWard <- NbClust(df, diss = NULL, distance = "euclidean",
min.nc = 2,
max.nc = 6,
method= "ward.D2",
index = "all")
hcWard <- hclust(dis, method = "ward.D2")
plot(hcWard, hang = -1, labels = FALSE, main = "Ward's-Linkage")
ward3 <- cutree(hcWard, 3)
table(ward3, wine$Class)
table(comp3, ward3)
aggregate(wine[, -1], list(comp3), mean)
aggregate(wine[, -1], list(ward3), mean)
par(mfrow = c(1, 2))
boxplot(wine$Proline ~ comp3,
main = "Proline by Complete Linkage")
boxplot(wine$Proline ~ ward3,
main = "Proline by Ward's Linkage")
numKMeans <- NbClust(df, min.nc = 2, max.nc = 15, method = "kmeans")
set.seed(1234)
km <- kmeans(df, 3, nstart = 25)
table(km$cluster)
km$centers
# wine$km_cluster <- km$cluster
boxplot(wine$Alcohol ~ km$cluster,
main = "Alcohol Content, K-Means")
boxplot(wine$Alcohol ~ ward3,
main = "Alcohol Content, Ward's")
table(km$cluster, wine$Class)
wine$Alcohol <- as.factor(ifelse(df$Alcohol > 0, "High", "Low"))
disMatrix <- daisy(wine[, -1], metric = "gower")
set.seed(123)
pamFit <- pam(disMatrix, k = 3)
table(pamFit$clustering)
table(pamFit$clustering, wine$Class)
wine$cluster <- pamFit$clustering
group <- compareGroups(cluster ~ ., data = wine)
clustab <- createTable(group)
clustab
export2csv(clustab, file = "wine_clusters.csv")
library(randomForest)
set.seed(1)
rf <- randomForest(x = wine[, -1], ntree = 2000, proximity = T)
rf
dim(rf$proximity)
rf$proximity[1:5, 1:5]
importance(rf)
dissMat <- sqrt(1 - rf$proximity)
dissMat[1:2, 1:2]
set.seed(123)
pamRF <- pam(dissMat, k = 3)
table(pamRF$clustering)
table(pamRF$clustering, wine$Class)
|
931f5f65ec0a53fae546ddf23d45f4a2919c80ff | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/pointdexter/examples/community_areas_spdf.Rd.R | 63e3b53fdea0aebaf615fadf788e9b7736ab4ce3 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 489 | r | community_areas_spdf.Rd.R | library(pointdexter)
### Name: community_areas_spdf
### Title: A SpatialPolygonsDataFrame of Chicago's 77 community areas
### Aliases: community_areas_spdf
### Keywords: datasets
### ** Examples
# load necessary packages ----
library(sp)
# load necessary data ----
data("community_areas_spdf")
# plot all 77 community areas -----
par(mar = c(0, 0, 1, 0))
plot(community_areas_spdf
, main = "Chicago's 77 community areas"
, col = "gray85"
, border = "dodgerblue4")
|
954daa28143fa4c7ed8ae7e87cd472cac15c50cc | 243afbf525ea85bccb3b707dc0e043d954cc8d53 | /src/Site_tree_hierarchical.R | b496069e9133c930ceeced3482ac02193c2bc4c5 | [] | no_license | avahoffman/blue-grama-diversity | e9e26943a77b51bf6a14c062827de4a47a235b7b | 5fb3a76ad3be76545ad758a19969880735633815 | refs/heads/master | 2020-04-20T13:41:10.389578 | 2020-03-17T15:46:15 | 2020-03-17T15:46:15 | 132,947,858 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,686 | r | Site_tree_hierarchical.R | ###########################################################################################
##
## Create phlogenetic trees for genomic data, both total sites and regional
##
###########################################################################################
# Load libraries
library(adegenet) ## deal with genind objects
library(ade4)
library(ape)
library(phytools)
library(poppr)
library(ggplot2)
library(reshape2)
library(gridExtra)
library(ggrepel)
library(phangorn)
# BiocManager::install("ggtree")
library(ggtree)
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
create_total_site_tree <-
function() {
## remove clones altogether, it messes with the sample size
# Equalize sample sizes
# genind.obj <- get_genind_data(impute_mean = F)[c(13:283)]
genind.obj <- get_genind_data(impute_mean = F)[c(
13:25,
30:42,
46:58,
63:75,
80:92,
97:109,
113:125,
130:142,
145:157,
158:170,
174:186,
189:201,
204:216,
221:233,
237:249,
252:264,
268:280
)]
pop(genind.obj)
genind.obj$tab <- tab(genind.obj, NA.method = "mean")
strata(genind.obj) <- data.frame(pop(genind.obj))
nameStrata(genind.obj) <- ~ Pop
if (file.exists("genomics_output/Site_hierarchy_2.R")) {
load("genomics_output/Site_hierarchy_2.R")
} else {
set.seed(999)
poptree <- genind.obj %>%
genind2genpop(pop = ~ Pop) %>%
aboot(
sample = 10000,
distance = nei.dist,
cutoff = 0,
quiet = F,
tree = "upgma"
)
write.tree(poptree, file = "genomics_output/Site_hierarchy_2.nexus")
save(poptree, file = "genomics_output/Site_hierarchy_2.R")
}
colz <- c(
#"black",
"#859D59",
"#CBB0CE",
"#6BAD9E",
"#F88A89",
"#E19B78",
"#A889C1",
"#217b7e",
"#426081",
"#72ADD1",
"#795199",
"#399F2F",
"#A2D48E",
"#E73233",
"#FDB35A",
"#5EB54C",
"#3386AE",
"#A6CEE3"
)
colz <- as.vector(colz)
## each pop individual
cls <- as.list(unique(genind.obj$pop))
tree <- groupOTU(poptree, cls)
#tree$node.label[4] <- ""
ggtree(tree, aes(color = group), layout = "rectangular") +
geom_label_repel(
aes(label = label),
force = 0,
nudge_x = 0,
nudge_y = 0
) +
theme_tree() +
scale_color_manual(values = c(colz)) +
theme(legend.position = "none")
ggsave(file = "genomics_output/figures/Site_heirarchy_2.jpg",
height = 7,
width = 4)
}
create_reigonal_site_tree <-
function() {
## remove clones altogether, it messes with the sample size
genind.obj <- get_genind_data(impute_mean = F)[c(13:283)]
pop(genind.obj)
genind.obj <- genind.obj[(
genind.obj@pop %in%
c(
'Cibola',
'Sevilleta',
'SGS',
'Comanche',
'Konza',
'Cedar Point',
'Buffalo Gap'
)
),]
genind.obj$tab <- tab(genind.obj, NA.method = "mean")
strata(genind.obj) <- data.frame(pop(genind.obj))
nameStrata(genind.obj) <- ~ Pop
# Analysis
if (file.exists("genomics_output/Site_hierarchy_regional.R")) {
load("genomics_output/Site_hierarchy_regional.R")
} else {
set.seed(999)
regionaltree <- genind.obj %>%
genind2genpop(pop = ~ Pop) %>%
aboot(
sample = 10000,
distance = nei.dist,
cutoff = 0,
quiet = F,
tree = 'upgma'
)
write.tree(regionaltree, file = "genomics_output/Site_hierarchy_regional.nexus")
save(regionaltree, file = "genomics_output/Site_hierarchy_regional.R")
}
# load("genomics_output/Site_hierarchy_regional.R")
colz <- c(
'black',
"#CBB0CE",
"#F88A89",
"#E19B78",
"#A889C1",
"#795199",
"#E73233",
"#FDB35A"
)
colz <- as.vector(colz)
## each pop individual
localcls <- as.list(unique(genind.obj$pop))
regionaltree <- groupOTU(regionaltree, localcls)
#regionaltree$node.label[4] <- ""
ggtree(regionaltree, aes(color = group), layout = "rectangular") +
geom_label_repel(
aes(label = label),
force = 0,
nudge_x = 0,
nudge_y = 0
) +
theme_tree() +
scale_color_manual(values = c(colz)) +
theme(legend.position = "none")
ggsave(file = "genomics_output/figures/Site_heirarchy_regional.jpg",
height = 7,
width = 3)
}
|
da1e4b2f331daf95087f59001fee3bcced47d324 | ba3e338caea37535f443dca9bc814ec32b7dcac6 | /Reg_bed_to_bw.R | 1829841ea1139f35e834e3579997f72029958aae | [] | no_license | elsasserlab/G4 | 7b77d186c85212ac4db9ba8eb6c033e1379f50c1 | d355e3284176b8504839a004c4971213919de1f7 | refs/heads/main | 2023-04-15T16:54:12.223390 | 2022-07-23T17:47:23 | 2022-07-23T17:47:23 | 360,643,618 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 2,590 | r | Reg_bed_to_bw.R | library(rtracklayer)
library(GenomicRanges)
readPQS <- function(file_path)
{
tmp <- read.table(file_path,
header = F, sep = '\t',
col.names = c('seqnames', 'start', 'end', 'name', 'length', 'strand', 'seq')
)
tmp$seqnames <- gsub('(.*)\\sN.*', '\\1', tmp$seqnames)
tmp <- tmp[tmp$seqnames %in% paste0('chr', c(1:100, 'X', 'Y')), ]
makeGRangesFromDataFrame(tmp)
}
bed_to_bw <- function(file_path, .name, genome) {
if (length(file_path) > 1) {
gr <- Reduce("c", sapply(file_path, readPQS))
} else {
gr <- readPQS(file_path)
}
export.bw(coverage(gr), paste0(.name, "_stack.bw"))
gr <- reduce(gr, ignore.strand = T)
gr$score <- 1
if (genome == "hg19") {
seqlengths(gr) <- seqlengths(BSgenome.Hsapiens.UCSC.hg19::BSgenome.Hsapiens.UCSC.hg19)[seqlevels(gr)]
}
if (genome == "mm9") {
seqlengths(gr) <- seqlengths(BSgenome.Mmusculus.UCSC.mm9::BSgenome.Mmusculus.UCSC.mm9)[seqlevels(gr)]
}
export.bw(gr, paste0(.name, "_binary.bw"))
}
bed_to_bw("data/hg19/canonical_PQS_hg19.bed", "canonical_PQS_hg19")
bed_to_bw("data/hg19/extended_canonical_PQS_hg19.bed", "extended_canonical_PQS_hg19")
bed_to_bw("data/hg19/two_tetrads_PQS_hg19.bed", "two_tetrads_PQS_hg19")
bed_to_bw("data/hg19/AAAA_hg19.bed", "AAAA_hg19", "hg19")
bed_to_bw("data/hg19/AAAB_hg19.bed", "AAAB_hg19", "hg19")
bed_to_bw("data/hg19/AABB_hg19.bed", "AABB_hg19", "hg19")
bed_to_bw("data/hg19/ABBB_hg19.bed", "ABBB_hg19", "hg19")
bed_to_bw("data/hg19/ABAB_hg19.bed", "ABAB_hg19", "hg19")
bed_to_bw("data/hg19/ABAA_hg19.bed", "ABAA_hg19", "hg19")
bed_to_bw("data/hg19/ABBA_hg19.bed", "ABBA_hg19", "hg19")
bed_to_bw("data/hg19/AABA_hg19.bed", "AABA_hg19", "hg19")
bed_to_bw(list.files("data/hg19", pattern = "(AA|AB)", full.names = T), "tranStrand_hg19", "hg19")
bed_to_bw("data/mm9/canonical_PQS_mm9.bed", "canonical_PQS_mm9", "mm9")
bed_to_bw("data/mm9/extended_canonical_PQS_mm9.bed", "extended_canonical_PQS_mm9", "mm9")
bed_to_bw("data/mm9/two_tetrads_PQS_mm9.bed", "two_tetrads_PQS_mm9", "mm9")
bed_to_bw("data/mm9/AAAA_mm9.bed", "AAAA_mm9", "mm9")
bed_to_bw("data/mm9/AAAB_mm9.bed", "AAAB_mm9", "mm9")
bed_to_bw("data/mm9/AABB_mm9.bed", "AABB_mm9", "mm9")
bed_to_bw("data/mm9/ABBB_mm9.bed", "ABBB_mm9", "mm9")
bed_to_bw("data/mm9/ABAB_mm9.bed", "ABAB_mm9", "mm9")
bed_to_bw("data/mm9/ABAA_mm9.bed", "ABAA_mm9", "mm9")
bed_to_bw("data/mm9/ABBA_mm9.bed", "ABBA_mm9", "mm9")
bed_to_bw("data/mm9/AABA_mm9.bed", "AABA_mm9", "mm9")
bed_to_bw(list.files("data/mm9", pattern = "(AA|AB)", full.names = T), "tranStrand_mm9", "mm9") |
9b5a14aada930fe8441b6cf628acfe4822b63bae | 9beb6005d6581bb534b6eef49ed82296499518a7 | /15_Faceting_Mas_Graficas_R.R | 9663fc56bb8adcd4ac158c4f24ecfde48ceaf9ee | [] | no_license | BidartMG/R-Mas-Scripts-Practicas | 68ca1c635d235cfcbe932afdba4e3235299cc6e8 | af53bff823d372206cfcc6b51867b1d25a6ef980 | refs/heads/master | 2022-12-25T06:52:48.642663 | 2020-09-29T00:22:18 | 2020-09-29T00:22:18 | 297,231,643 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 1,068 | r | 15_Faceting_Mas_Graficas_R.R | library(tidyverse)
library(ggplot2)
library(dplyr)
# cambio por leyenda a continentes
gapminder %>%
filter(year == 2007) %>%
ggplot(aes(x = gdpPercap,
y = lifeExp,
color = continent)) +
geom_point() +
labs(color = 'LEYENDA')
# faceting muestra en 5 gráficas
gapminder %>%
filter(year == '2007') %>%
ggplot(aes(x = gdpPercap,
y = lifeExp,
color = continent)) +
geom_point() +
facet_wrap(~continent)
# Como combinar dos dataframes - unir dataframes
# col1 <- c(1,2,3)
# col2 <- c('Ana','Pedro','Juan')
# col4 <- c('Pedro', 'Juan', 'Carol')
x <- data.frame(col1 = c(1,2,3),
col2 = c('Ana','Pedro','Juan'))
y <- data.frame(col1 = c(2,3,4),
col2 = c('Pedro','Juan','Carol'))
# combina ambos vectores uno encima del otro
bind_rows(x,y)
# combina ambos vectores considerando los repetidos
union(x,y)
# guardar resultado de verbos de dplyr en variable
# variable dplyer
gapminder %>%
filter(year == 2007)
gapminder_2007 <- gapminder %>%
filter(year == 2007)
|
3668325202b03092d22174abc54cc8a43719e0f9 | d3ded694353a2023d96e980189772e1114ef648c | /R Studio Labs/Test 1.R | ce3a0616306880a3425ba37f1574774fdcf5435f | [] | no_license | speed785/R-Projects | 6374ad8512c33daa0f688983615e60492bab64af | 9a142a513a00c0955ca6c1ef94a1b51990e36ba7 | refs/heads/master | 2020-04-22T14:38:30.119102 | 2019-06-14T03:55:50 | 2019-06-14T03:55:50 | 170,450,777 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,183 | r | Test 1.R | # comments can be made using the #
2 + 2 #must equal 4 if not the universe exploded
#The [1] in brackets means that it's in the first position.
vectorA <- c(1,2,3,4,5,6)
vectorB <- c(7,8,9,10,11,12)
# ( <- ) the arrow is the operator
hist(vectorA)
hist(vectorB)
1:50 # This prints out a vector 1 through 50
d <- 1:50
# 'functionname()' for functions
print(a)
#is the same as
a
help(a)
#Can remove variables
rm()
#Packages
help(install.packages)
install.packages("psych")
#Once you install, you need to see if it's in the library
library()
search()
install.packages("QuantPsyc")
Make.Z(a)
QuantPsyc::Make.Z(a)
##Pacman
install.packages("pacman")
pacman::p_load(prettyR)
#can install many things at once with pacman
pacman::p_load(prettyR, psych, QuantPysc)
###################
age<-c(25,30,56)
gender<-c(160,110,220)
###################
result <- age+weight
result
myFirstDataset<- data.frame(age,gender,weight)
myFirstDataset
#########################Set Working Directory####################
getwd()
setwd("/Macintosh HD/Users/James/R_works")
mydata <- read.csv(203LabSurvey_Januaary22_2019.csv)
read.csv(203LabSurvey_Januaary22_2019.csv)
|
dc502e1fe4cc2d17622b75963e7c9257d6003c04 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.security.identity/R/accessanalyzer_interfaces.R | 4912c41a8b40b214bd5042060184c0dd12922d7b | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | false | 45,689 | r | accessanalyzer_interfaces.R | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common populate
#' @include accessanalyzer_service.R
NULL
.accessanalyzer$apply_archive_rule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzerArn = structure(logical(0), tags = list(type = "string")), ruleName = structure(logical(0), tags = list(type = "string")), clientToken = structure(logical(0), tags = list(idempotencyToken = TRUE, type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$apply_archive_rule_output <- function(...) {
list()
}
.accessanalyzer$cancel_policy_generation_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(jobId = structure(logical(0), tags = list(location = "uri", locationName = "jobId", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$cancel_policy_generation_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$create_access_preview_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzerArn = structure(logical(0), tags = list(type = "string")), configurations = structure(list(structure(list(ebsSnapshot = structure(list(userIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), groups = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), kmsKeyId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ecrRepository = structure(list(repositoryPolicy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), iamRole = structure(list(trustPolicy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), efsFileSystem = structure(list(fileSystemPolicy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), kmsKey = structure(list(keyPolicies = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), grants = structure(list(structure(list(operations = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), granteePrincipal = structure(logical(0), tags = list(type = "string")), retiringPrincipal = structure(logical(0), tags = list(type = "string")), constraints = structure(list(encryptionContextEquals = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), encryptionContextSubset = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), issuingAccount = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), rdsDbClusterSnapshot = structure(list(attributes = structure(list(structure(list(accountIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure", union = TRUE))), tags = list(type = "map")), kmsKeyId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), rdsDbSnapshot = structure(list(attributes = structure(list(structure(list(accountIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure", union = TRUE))), tags = list(type = "map")), kmsKeyId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), secretsManagerSecret = structure(list(kmsKeyId = structure(logical(0), tags = list(type = "string")), secretPolicy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), s3Bucket = structure(list(bucketPolicy = structure(logical(0), tags = list(type = "string")), bucketAclGrants = structure(list(structure(list(permission = structure(logical(0), tags = list(type = "string")), grantee = structure(list(id = structure(logical(0), tags = list(type = "string")), uri = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", union = TRUE))), tags = list(type = "structure"))), tags = list(type = "list")), bucketPublicAccessBlock = structure(list(ignorePublicAcls = structure(logical(0), tags = list(type = "boolean", box = TRUE)), restrictPublicBuckets = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure")), accessPoints = structure(list(structure(list(accessPointPolicy = structure(logical(0), tags = list(type = "string")), publicAccessBlock = structure(list(ignorePublicAcls = structure(logical(0), tags = list(type = "boolean", box = TRUE)), restrictPublicBuckets = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure")), networkOrigin = structure(list(vpcConfiguration = structure(list(vpcId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), internetConfiguration = structure(list(), tags = list(type = "structure"))), tags = list(type = "structure", union = TRUE))), tags = list(type = "structure"))), tags = list(type = "map"))), tags = list(type = "structure")), snsTopic = structure(list(topicPolicy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), sqsQueue = structure(list(queuePolicy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure", union = TRUE))), tags = list(type = "map")), clientToken = structure(logical(0), tags = list(idempotencyToken = TRUE, type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$create_access_preview_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(id = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$create_analyzer_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzerName = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string")), archiveRules = structure(list(structure(list(ruleName = structure(logical(0), tags = list(type = "string")), filter = structure(list(structure(list(eq = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), neq = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), contains = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), exists = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), clientToken = structure(logical(0), tags = list(idempotencyToken = TRUE, type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$create_analyzer_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(arn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$create_archive_rule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzerName = structure(logical(0), tags = list(location = "uri", locationName = "analyzerName", type = "string")), ruleName = structure(logical(0), tags = list(type = "string")), filter = structure(list(structure(list(eq = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), neq = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), contains = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), exists = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "map")), clientToken = structure(logical(0), tags = list(idempotencyToken = TRUE, type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$create_archive_rule_output <- function(...) {
list()
}
.accessanalyzer$delete_analyzer_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzerName = structure(logical(0), tags = list(location = "uri", locationName = "analyzerName", type = "string")), clientToken = structure(logical(0), tags = list(idempotencyToken = TRUE, location = "querystring", locationName = "clientToken", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$delete_analyzer_output <- function(...) {
list()
}
.accessanalyzer$delete_archive_rule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzerName = structure(logical(0), tags = list(location = "uri", locationName = "analyzerName", type = "string")), ruleName = structure(logical(0), tags = list(location = "uri", locationName = "ruleName", type = "string")), clientToken = structure(logical(0), tags = list(idempotencyToken = TRUE, location = "querystring", locationName = "clientToken", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$delete_archive_rule_output <- function(...) {
list()
}
.accessanalyzer$get_access_preview_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(accessPreviewId = structure(logical(0), tags = list(location = "uri", locationName = "accessPreviewId", type = "string")), analyzerArn = structure(logical(0), tags = list(location = "querystring", locationName = "analyzerArn", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$get_access_preview_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(accessPreview = structure(list(id = structure(logical(0), tags = list(type = "string")), analyzerArn = structure(logical(0), tags = list(type = "string")), configurations = structure(list(structure(list(ebsSnapshot = structure(list(userIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), groups = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), kmsKeyId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ecrRepository = structure(list(repositoryPolicy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), iamRole = structure(list(trustPolicy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), efsFileSystem = structure(list(fileSystemPolicy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), kmsKey = structure(list(keyPolicies = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), grants = structure(list(structure(list(operations = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), granteePrincipal = structure(logical(0), tags = list(type = "string")), retiringPrincipal = structure(logical(0), tags = list(type = "string")), constraints = structure(list(encryptionContextEquals = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), encryptionContextSubset = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), issuingAccount = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), rdsDbClusterSnapshot = structure(list(attributes = structure(list(structure(list(accountIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure", union = TRUE))), tags = list(type = "map")), kmsKeyId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), rdsDbSnapshot = structure(list(attributes = structure(list(structure(list(accountIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure", union = TRUE))), tags = list(type = "map")), kmsKeyId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), secretsManagerSecret = structure(list(kmsKeyId = structure(logical(0), tags = list(type = "string")), secretPolicy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), s3Bucket = structure(list(bucketPolicy = structure(logical(0), tags = list(type = "string")), bucketAclGrants = structure(list(structure(list(permission = structure(logical(0), tags = list(type = "string")), grantee = structure(list(id = structure(logical(0), tags = list(type = "string")), uri = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", union = TRUE))), tags = list(type = "structure"))), tags = list(type = "list")), bucketPublicAccessBlock = structure(list(ignorePublicAcls = structure(logical(0), tags = list(type = "boolean", box = TRUE)), restrictPublicBuckets = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure")), accessPoints = structure(list(structure(list(accessPointPolicy = structure(logical(0), tags = list(type = "string")), publicAccessBlock = structure(list(ignorePublicAcls = structure(logical(0), tags = list(type = "boolean", box = TRUE)), restrictPublicBuckets = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure")), networkOrigin = structure(list(vpcConfiguration = structure(list(vpcId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), internetConfiguration = structure(list(), tags = list(type = "structure"))), tags = list(type = "structure", union = TRUE))), tags = list(type = "structure"))), tags = list(type = "map"))), tags = list(type = "structure")), snsTopic = structure(list(topicPolicy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), sqsQueue = structure(list(queuePolicy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure", union = TRUE))), tags = list(type = "map")), createdAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), status = structure(logical(0), tags = list(type = "string")), statusReason = structure(list(code = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$get_analyzed_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzerArn = structure(logical(0), tags = list(location = "querystring", locationName = "analyzerArn", type = "string")), resourceArn = structure(logical(0), tags = list(location = "querystring", locationName = "resourceArn", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$get_analyzed_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(resource = structure(list(resourceArn = structure(logical(0), tags = list(type = "string")), resourceType = structure(logical(0), tags = list(type = "string")), createdAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), analyzedAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), updatedAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), isPublic = structure(logical(0), tags = list(type = "boolean", box = TRUE)), actions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), sharedVia = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), status = structure(logical(0), tags = list(type = "string")), resourceOwnerAccount = structure(logical(0), tags = list(type = "string")), error = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$get_analyzer_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzerName = structure(logical(0), tags = list(location = "uri", locationName = "analyzerName", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$get_analyzer_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzer = structure(list(arn = structure(logical(0), tags = list(type = "string")), name = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string")), createdAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), lastResourceAnalyzed = structure(logical(0), tags = list(type = "string")), lastResourceAnalyzedAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), status = structure(logical(0), tags = list(type = "string")), statusReason = structure(list(code = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$get_archive_rule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzerName = structure(logical(0), tags = list(location = "uri", locationName = "analyzerName", type = "string")), ruleName = structure(logical(0), tags = list(location = "uri", locationName = "ruleName", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$get_archive_rule_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(archiveRule = structure(list(ruleName = structure(logical(0), tags = list(type = "string")), filter = structure(list(structure(list(eq = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), neq = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), contains = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), exists = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "map")), createdAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), updatedAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$get_finding_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzerArn = structure(logical(0), tags = list(location = "querystring", locationName = "analyzerArn", type = "string")), id = structure(logical(0), tags = list(location = "uri", locationName = "id", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$get_finding_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(finding = structure(list(id = structure(logical(0), tags = list(type = "string")), principal = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), action = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), resource = structure(logical(0), tags = list(type = "string")), isPublic = structure(logical(0), tags = list(type = "boolean", box = TRUE)), resourceType = structure(logical(0), tags = list(type = "string")), condition = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), createdAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), analyzedAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), updatedAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), status = structure(logical(0), tags = list(type = "string")), resourceOwnerAccount = structure(logical(0), tags = list(type = "string")), error = structure(logical(0), tags = list(type = "string")), sources = structure(list(structure(list(type = structure(logical(0), tags = list(type = "string")), detail = structure(list(accessPointArn = structure(logical(0), tags = list(type = "string")), accessPointAccount = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$get_generated_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(jobId = structure(logical(0), tags = list(location = "uri", locationName = "jobId", type = "string")), includeResourcePlaceholders = structure(logical(0), tags = list(location = "querystring", locationName = "includeResourcePlaceholders", type = "boolean", box = TRUE)), includeServiceLevelTemplate = structure(logical(0), tags = list(location = "querystring", locationName = "includeServiceLevelTemplate", type = "boolean", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$get_generated_policy_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(jobDetails = structure(list(jobId = structure(logical(0), tags = list(type = "string")), status = structure(logical(0), tags = list(type = "string")), startedOn = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), completedOn = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), jobError = structure(list(code = structure(logical(0), tags = list(type = "string")), message = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), generatedPolicyResult = structure(list(properties = structure(list(isComplete = structure(logical(0), tags = list(type = "boolean", box = TRUE)), principalArn = structure(logical(0), tags = list(type = "string")), cloudTrailProperties = structure(list(trailProperties = structure(list(structure(list(cloudTrailArn = structure(logical(0), tags = list(type = "string")), regions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), allRegions = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "list")), startTime = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), endTime = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601"))), tags = list(type = "structure"))), tags = list(type = "structure")), generatedPolicies = structure(list(structure(list(policy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$list_access_preview_findings_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(accessPreviewId = structure(logical(0), tags = list(location = "uri", locationName = "accessPreviewId", type = "string")), analyzerArn = structure(logical(0), tags = list(type = "string")), filter = structure(list(structure(list(eq = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), neq = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), contains = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), exists = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "map")), nextToken = structure(logical(0), tags = list(type = "string")), maxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$list_access_preview_findings_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(findings = structure(list(structure(list(id = structure(logical(0), tags = list(type = "string")), existingFindingId = structure(logical(0), tags = list(type = "string")), existingFindingStatus = structure(logical(0), tags = list(type = "string")), principal = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), action = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), condition = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), resource = structure(logical(0), tags = list(type = "string")), isPublic = structure(logical(0), tags = list(type = "boolean", box = TRUE)), resourceType = structure(logical(0), tags = list(type = "string")), createdAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), changeType = structure(logical(0), tags = list(type = "string")), status = structure(logical(0), tags = list(type = "string")), resourceOwnerAccount = structure(logical(0), tags = list(type = "string")), error = structure(logical(0), tags = list(type = "string")), sources = structure(list(structure(list(type = structure(logical(0), tags = list(type = "string")), detail = structure(list(accessPointArn = structure(logical(0), tags = list(type = "string")), accessPointAccount = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), nextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$list_access_previews_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzerArn = structure(logical(0), tags = list(location = "querystring", locationName = "analyzerArn", type = "string")), nextToken = structure(logical(0), tags = list(location = "querystring", locationName = "nextToken", type = "string")), maxResults = structure(logical(0), tags = list(location = "querystring", locationName = "maxResults", type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$list_access_previews_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(accessPreviews = structure(list(structure(list(id = structure(logical(0), tags = list(type = "string")), analyzerArn = structure(logical(0), tags = list(type = "string")), createdAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), status = structure(logical(0), tags = list(type = "string")), statusReason = structure(list(code = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), nextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$list_analyzed_resources_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzerArn = structure(logical(0), tags = list(type = "string")), resourceType = structure(logical(0), tags = list(type = "string")), nextToken = structure(logical(0), tags = list(type = "string")), maxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$list_analyzed_resources_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzedResources = structure(list(structure(list(resourceArn = structure(logical(0), tags = list(type = "string")), resourceOwnerAccount = structure(logical(0), tags = list(type = "string")), resourceType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), nextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$list_analyzers_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(nextToken = structure(logical(0), tags = list(location = "querystring", locationName = "nextToken", type = "string")), maxResults = structure(logical(0), tags = list(location = "querystring", locationName = "maxResults", type = "integer", box = TRUE)), type = structure(logical(0), tags = list(location = "querystring", locationName = "type", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$list_analyzers_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzers = structure(list(structure(list(arn = structure(logical(0), tags = list(type = "string")), name = structure(logical(0), tags = list(type = "string")), type = structure(logical(0), tags = list(type = "string")), createdAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), lastResourceAnalyzed = structure(logical(0), tags = list(type = "string")), lastResourceAnalyzedAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), status = structure(logical(0), tags = list(type = "string")), statusReason = structure(list(code = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), nextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$list_archive_rules_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzerName = structure(logical(0), tags = list(location = "uri", locationName = "analyzerName", type = "string")), nextToken = structure(logical(0), tags = list(location = "querystring", locationName = "nextToken", type = "string")), maxResults = structure(logical(0), tags = list(location = "querystring", locationName = "maxResults", type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$list_archive_rules_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(archiveRules = structure(list(structure(list(ruleName = structure(logical(0), tags = list(type = "string")), filter = structure(list(structure(list(eq = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), neq = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), contains = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), exists = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "map")), createdAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), updatedAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601"))), tags = list(type = "structure"))), tags = list(type = "list")), nextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$list_findings_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzerArn = structure(logical(0), tags = list(type = "string")), filter = structure(list(structure(list(eq = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), neq = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), contains = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), exists = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "map")), sort = structure(list(attributeName = structure(logical(0), tags = list(type = "string")), orderBy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), nextToken = structure(logical(0), tags = list(type = "string")), maxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$list_findings_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(findings = structure(list(structure(list(id = structure(logical(0), tags = list(type = "string")), principal = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), action = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), resource = structure(logical(0), tags = list(type = "string")), isPublic = structure(logical(0), tags = list(type = "boolean", box = TRUE)), resourceType = structure(logical(0), tags = list(type = "string")), condition = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), createdAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), analyzedAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), updatedAt = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), status = structure(logical(0), tags = list(type = "string")), resourceOwnerAccount = structure(logical(0), tags = list(type = "string")), error = structure(logical(0), tags = list(type = "string")), sources = structure(list(structure(list(type = structure(logical(0), tags = list(type = "string")), detail = structure(list(accessPointArn = structure(logical(0), tags = list(type = "string")), accessPointAccount = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), nextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$list_policy_generations_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(principalArn = structure(logical(0), tags = list(location = "querystring", locationName = "principalArn", type = "string")), maxResults = structure(logical(0), tags = list(location = "querystring", locationName = "maxResults", type = "integer", box = TRUE)), nextToken = structure(logical(0), tags = list(location = "querystring", locationName = "nextToken", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$list_policy_generations_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(policyGenerations = structure(list(structure(list(jobId = structure(logical(0), tags = list(type = "string")), principalArn = structure(logical(0), tags = list(type = "string")), status = structure(logical(0), tags = list(type = "string")), startedOn = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), completedOn = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601"))), tags = list(type = "structure"))), tags = list(type = "list")), nextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$list_tags_for_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(resourceArn = structure(logical(0), tags = list(location = "uri", locationName = "resourceArn", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$list_tags_for_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$start_policy_generation_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(policyGenerationDetails = structure(list(principalArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), cloudTrailDetails = structure(list(trails = structure(list(structure(list(cloudTrailArn = structure(logical(0), tags = list(type = "string")), regions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), allRegions = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "list")), accessRole = structure(logical(0), tags = list(type = "string")), startTime = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601")), endTime = structure(logical(0), tags = list(type = "timestamp", timestampFormat = "iso8601"))), tags = list(type = "structure")), clientToken = structure(logical(0), tags = list(idempotencyToken = TRUE, type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$start_policy_generation_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(jobId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$start_resource_scan_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzerArn = structure(logical(0), tags = list(type = "string")), resourceArn = structure(logical(0), tags = list(type = "string")), resourceOwnerAccount = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$start_resource_scan_output <- function(...) {
list()
}
.accessanalyzer$tag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(resourceArn = structure(logical(0), tags = list(location = "uri", locationName = "resourceArn", type = "string")), tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$tag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$untag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(resourceArn = structure(logical(0), tags = list(location = "uri", locationName = "resourceArn", type = "string")), tagKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(location = "querystring", locationName = "tagKeys", type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$untag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$update_archive_rule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzerName = structure(logical(0), tags = list(location = "uri", locationName = "analyzerName", type = "string")), ruleName = structure(logical(0), tags = list(location = "uri", locationName = "ruleName", type = "string")), filter = structure(list(structure(list(eq = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), neq = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), contains = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), exists = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "map")), clientToken = structure(logical(0), tags = list(idempotencyToken = TRUE, type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$update_archive_rule_output <- function(...) {
list()
}
.accessanalyzer$update_findings_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(analyzerArn = structure(logical(0), tags = list(type = "string")), status = structure(logical(0), tags = list(type = "string")), ids = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), resourceArn = structure(logical(0), tags = list(type = "string")), clientToken = structure(logical(0), tags = list(idempotencyToken = TRUE, type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$update_findings_output <- function(...) {
list()
}
.accessanalyzer$validate_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(locale = structure(logical(0), tags = list(type = "string")), maxResults = structure(logical(0), tags = list(location = "querystring", locationName = "maxResults", type = "integer", box = TRUE)), nextToken = structure(logical(0), tags = list(location = "querystring", locationName = "nextToken", type = "string")), policyDocument = structure(logical(0), tags = list(type = "string")), policyType = structure(logical(0), tags = list(type = "string")), validatePolicyResourceType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.accessanalyzer$validate_policy_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(findings = structure(list(structure(list(findingDetails = structure(logical(0), tags = list(type = "string")), findingType = structure(logical(0), tags = list(type = "string")), issueCode = structure(logical(0), tags = list(type = "string")), learnMoreLink = structure(logical(0), tags = list(type = "string")), locations = structure(list(structure(list(path = structure(list(structure(list(index = structure(logical(0), tags = list(type = "integer", box = TRUE)), key = structure(logical(0), tags = list(type = "string")), substring = structure(list(start = structure(logical(0), tags = list(type = "integer", box = TRUE)), length = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", union = TRUE))), tags = list(type = "list")), span = structure(list(start = structure(list(line = structure(logical(0), tags = list(type = "integer", box = TRUE)), column = structure(logical(0), tags = list(type = "integer", box = TRUE)), offset = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), end = structure(list(line = structure(logical(0), tags = list(type = "integer", box = TRUE)), column = structure(logical(0), tags = list(type = "integer", box = TRUE)), offset = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), nextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
|
13f474f000bdcef0ec556aa68d910691612e9969 | 810b768094cff034dae61172abeecc33374a72f5 | /function/submit/train_4.R | 406600b3c95be6584773a147f8c31dd6a228d763 | [] | no_license | yahcong/diabetes | 1c0e7667b9385a0d0062515fa9de8d3056b797db | b5171dafcddd218174a701a964995ccc8aee4843 | refs/heads/master | 2021-05-12T17:18:53.932092 | 2018-01-18T04:09:26 | 2018-01-18T04:09:26 | 117,042,177 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,172 | r | train_4.R | rm(list=ls())
library(data.table)
setwd("F:/DataMining/R/diabetes")
load("data/output/local_data_select.rda")
local_data=local_data_select
library(mice)
md.pattern(local_data)
#划分训练集和测试集
sub <- sample(1:nrow(local_data_select),round(nrow(local_data_select)*0.7))
Training_Inner <- local_data_select[sub,]
Test_Inner <- local_data_select[-sub,]
local_data=Training_Inner
#train
library(randomForest)
str(local_data)
set.seed(100)
model_randomForest_NORM_select=randomForest(GLU ~ ., data = local_data,importance=TRUE,ntree=80)
save(model_randomForest_NORM_select,file="model/model_randomForest_NORM_select.rda")
load("model/model_randomForest_NORM_select.rda")
#预测
set.seed(100)
local_data$predict_GLU=round(predict(model_randomForest_NORM_select,subset(local_data,select = -GLU)),3)
#分数
library(caret)
score_rmse=RMSE(local_data$predict_GLU,local_data$GLU)
#0.6104
#预测
set.seed(100)
Test_Inner$predict_GLU <- round(predict(model_randomForest_NORM_select, subset(Test_Inner,select = -GLU)),3)
#分数
library(caret)
score_rmse=RMSE(Test_Inner$predict_GLU,Test_Inner$GLU)
#1.5600
#结论,去掉一些属性后,效果没啥区别?
|
185294bd24dbaf54fb1b44e1917f80d49dcb9e14 | a1c8c3488c4131c445c6f5e4fcb0cfa7fa9ddcc9 | /man/as.geojson.Rd | bc507ae4ec13ee088609a00afa97dfb09c880b38 | [
"MIT"
] | permissive | ropensci/chirps | 998124edbd8e2ed8b3034b6bcf7685dff63a97a5 | 72500a72ee1de48f048d6959d87ea5f7d70289b4 | refs/heads/master | 2023-05-24T01:28:34.605349 | 2023-01-06T11:38:27 | 2023-01-06T11:38:27 | 225,693,680 | 22 | 10 | NOASSERTION | 2021-09-20T08:20:09 | 2019-12-03T18:57:12 | R | UTF-8 | R | false | true | 1,709 | rd | as.geojson.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.geojson.R
\name{as.geojson}
\alias{as.geojson}
\alias{as.geojson.default}
\alias{as.geojson.sf}
\title{Methods to coerce geographical coordinates into a geojson polygon}
\usage{
as.geojson(lonlat, dist = 1e-05, nQuadSegs = 2L, ...)
\method{as.geojson}{default}(lonlat, dist = 1e-05, nQuadSegs = 2L, ...)
\method{as.geojson}{sf}(lonlat, dist = 1e-05, nQuadSegs = 2L, ...)
}
\arguments{
\item{lonlat}{a data.frame or matrix with geographical coordinates lonlat, in
that order, or an object of class 'sf' with geometry type 'POINT' or 'POLYGON'}
\item{dist}{numeric, buffer distance for all \code{lonlat}}
\item{nQuadSegs}{integer, number of segments per quadrant}
\item{...}{further arguments passed to \code{\link[sf]{sf}} methods}
}
\value{
An object of class 'geosjon' for each row in \code{lonlat}
}
\description{
Take single points from geographical coordinates and coerce into a
geojson of geometry 'Polygon'
}
\examples{
\dontshow{if (interactive()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
# Default S3 Method
# random geographic points within bbox(10, 12, 45, 47)
library("sf")
set.seed(123)
lonlat <- data.frame(lon = runif(1, 10, 12),
lat = runif(1, 45, 47))
gjson <- as.geojson(lonlat)
#################
# S3 Method for objects of class 'sf'
# random geographic points within bbox(10, 12, 45, 47)
library("sf")
set.seed(123)
lonlat <- data.frame(lon = runif(5, 10, 12),
lat = runif(5, 45, 47))
lonlat <- st_as_sf(lonlat, coords = c("lon","lat"))
gjson <- as.geojson(lonlat)
\dontshow{\}) # examplesIf}
}
\concept{utility functions}
|
42be3ead41386e7f4e61b38472798581240032fd | 96146580da01780fe539427e4b5c58f12d759e85 | /stat_piechart.r | c3ab948d77e38d5b3059e3a3c682cb5dfe49c9bd | [] | no_license | timothychoi/rcodes | a462e13ac1ed4ce4ffd1c6eb379c087a3b173754 | a6c3771740f8cde91d5ec1d168fe00d17b925b00 | refs/heads/master | 2020-04-17T04:12:45.780724 | 2019-01-17T12:10:45 | 2019-01-17T12:10:45 | 166,219,524 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 533 | r | stat_piechart.r | bloodtypes <- c("O", "O", "A", "A", "O", "B", "O", "B", "A", "O", "AB", "B", "A", "B", "AB", "O", "O", "A", "A", "O", "AB", "O", "A", "B", "A", "O", "A", "A", "O", "A", "O", "A", "O", "AB", "A", "O", "B", "A", "A", "O", "O", "O", "O", "A", "O", "O", "A", "O", "A", "O")
# Frequency
counts <-table(bloodtypes)
lbls <- names(counts)
pct <- round(counts/sum(counts)*100)
lbls <- paste(lbls, " (", sep = "")
lbls <- paste(lbls, pct)
lbls <- paste(lbls, "% )", sep = "")
pie(counts, labels = lbls, main = "Blood Types of 50 Patients")
|
54a960ba78ad6f278d8ffecccf0be1b12609ceb4 | d125c7eaeeaf2258637a91cdf344f48409fa927a | /R/data.R | f99820e042c9a405bb945b981e525c4eb7fe433d | [
"MIT"
] | permissive | KuldeepSJadon/climcropr | e9d36b48423a029013bed96362e3b6d916ab82eb | 67680d4568d367e022ad3bd34e88b78f60023b5e | refs/heads/master | 2020-05-25T22:58:19.185195 | 2018-02-12T08:40:37 | 2018-02-12T08:40:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 233 | r | data.R |
#'df_mirca
#'
#' \describe{
#' \item{mirca_code}{MIRCA2000 Crop Code}
#' \item{mirca_name}{Mirca Name in English, \emph{e.g.}, "Potatoes"}
#' \item{ecocrop_name}{ecocrop Name in English, \emph{e.g.}, "Potato"}
#' }
"df_mirca"
|
2036bc3c3209d6a8d307a55c82861ce4d1ba44bd | 3c7231d7e902c1a7dea015144cbdb280f16453f0 | /scripts/summarize_paired_vcf.R | 65e222b031d0a7229084d63a61cbc560b03f8143 | [
"MIT"
] | permissive | hhuang2018/WGS_analysis | 9532cf14e4793624ea7af6458127d29e6c24653c | a05d5bd90a4066fff951ebd993811edf3637d324 | refs/heads/master | 2021-03-22T05:00:45.458090 | 2018-09-30T02:00:45 | 2018-09-30T02:00:45 | 62,680,908 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,542 | r | summarize_paired_vcf.R | source('util.R', echo = FALSE)
load("../Data/GRCh38_gene_list.RData")
if (!require("vcfR", quietly=TRUE, warn.conflicts = FALSE)) {
install.packages("vcfR", dependencies = TRUE)
library("vcfR", verbose=F, warn.conflicts =F)
}
vcf_fp <- "../"
vcf_file <- "test_stats_DtoR.vcf"
vcf_info <- read.vcfR(paste0(vcf_fp, vcf_file), verbose = FALSE)
if(is.null(colnames(vcf_info@gt))){
if(dim(vcf_info@gt)[2] == 2){
colnames(vcf_info@gt) <- c("FORMAT", "SAMPLE")
}else{
colnames(vcf_info@gt) <- c("FORMAT", sapply(1:(dim(vcf_info@gt)[2]-1), function(x) paste0("SAMPLE", x)))
}
}
vcf_gt <- extract.gt(vcf_info, element = "GT")
CHROMs <- unique(vcf_info@fix[,1])
num_CHROMs <- length(CHROMs)
summary_table <- data.frame(CHROM = character(num_CHROMs),
NumVariants = numeric(num_CHROMs),
NumGenes = character(num_CHROMs),
# ALT = character(num_rows),
# GT = numeric(num_rows),
# groupID = character(num_rows),
stringsAsFactors = FALSE)
Gene_list <- list()
for(chr in 1:num_CHROMs){
ptm <- proc.time()
chrom <- CHROMs[chr]
chr_index <- which(vcf_info@fix[, 1] == chrom)
vcf_gt <- extract.gt(vcf_info, element = "GT")
vcf_gt <- vcf_gt[chr_index, ]
num_rows <- length(chr_index)
summary_table$CHROM[chr] <- chrom
summary_table$NumVariants[chr] <- num_rows
#######
# Check all the genes in the list
#######
GRCh38_gene_list_chr <- GRCh38_gene_list[which(GRCh38_gene_list$chrom == chrom), ]
gene_names <- unique(GRCh38_gene_list_chr$GeneName)
num_genes <- length(gene_names)
Gene_summary <- data.frame(GeneName = vector(mode = "character", length = num_genes),
total_num = vector(mode = "integer", length = num_genes),
intron_variants_num = vector(mode = "integer", length = num_genes),
exon_variants_num = vector(mode = "integer", length = num_genes),
utr_variants_num = vector(mode = "integer", length = num_genes),
promoter_variants_num = vector(mode = "integer", length = num_genes),
stringsAsFactors = F)
geneVariants <- as.data.frame(vcf_info@fix[vcf_info@fix[, 1] == chrom, ], stringsAsFactors = FALSE)
names(geneVariants) <- c("CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO")
geneVariants$POS <- as.integer(geneVariants$POS)
counter <- 0
for (id in 1:num_genes){
GeneName <- as.character(gene_names[id])
geneInfo <- GRCh38_gene_list_chr[which(GRCh38_gene_list_chr$GeneName == GeneName)[1], ]
gene_region_index <- which((geneVariants$POS >= (geneInfo$txStart-999)) & (geneVariants$POS <= (geneInfo$txEnd+999)))
if(length(gene_region_index) > 0){
counter <- counter + 1
gene_chr <- geneVariants[gene_region_index, ]
gene_stats <- gene_variant_stats(geneInfo, gene_chr)
Gene_summary$GeneName[counter] <- GeneName
Gene_summary[counter, 2:6] <- gene_stats$variants_stats[1:5]
}
}
summary_table$NumGenes[chr] <- counter
Gene_summary <- Gene_summary[-which(Gene_summary$GeneName == ""), ]
Gene_list <- append(Gene_list, list(Gene_summary))
proc.time() - ptm
}
save(summary_table, Gene_list, file = "../Output/paired_VCF_summary/test_stats_DtoR.RData")
load("../Output/paired_VCF_summary/test_stats_DtoR.RData")
|
bb83eb0959e47ee4c34c47a16eaa9b859e095552 | bbb57e538148adec3cb6e80c4eb7d9fc2fd3b079 | /code/gender.R | 5d1b67639a1412956d6247b6686993aaa5684ab3 | [] | no_license | amanjuna/patent_impact | 1d0690d0c6d91783e89f98af05a19cb7bc8ae11a | eec1278acb50f6e912ae2e1d3d82be4e290658af | refs/heads/master | 2022-12-19T15:27:42.958413 | 2020-10-04T06:28:42 | 2020-10-04T06:28:42 | 296,167,577 | 9 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,112 | r | gender.R | """
File: gender.R
Function utilizes the gender R package to detemine the gender for all first names in a dataframe.
"""
library(gender)
library(tidyverse)
get_gender <- function(df, var) {
"""
Params:
df - dataframe containing as columns var and pmid (pmid for each paper)
var - name of the column in the data frame containing the first names to be
"""
gender <- df %>% select(pmid, all_of(var), year) %>% mutate(min_year=year-75, max_year=year-15)
gender$min_year <- pmax(1880, pmin(gender$min_year, 2012))
gender$max_year <- pmax(1880, pmin(gender$max_year, 2012))
# Removes most first names that are initials
first <- gender %>% filter(nchar(gender[,var]) > 1)
output <- gender_df(first, name_col=var, year_col=c("min_year", "max_year"))
output <- unique(output)
done <- merge(first, output, by.x=c("min_year", "max_year", var), by.y=c("year_min", "year_max", "name"))
done$gender <- factor(done$gender, levels=c("male", "female"), labels=c("Male", "Female"))
done <- done %>% select("pmid", "gender")
colnames(done) <- c("pmid", paste0(var, "_gender"))
done
}
|
05db7bca5b9effd86b2d446b8b967fbaddc85e3f | 16b00705a7464228786d82c9fd1d4379468cd4f7 | /plot2.R | ddc70750e715ce6b3a21fc4cf8b923af83d19d8d | [] | no_license | muditsharma1234/Exploratory-Data-Analysis | 8e401fd5565a4a069b760289471da547aa66003f | 89d6701efa9a123fe953d1cb9f513a1d6260277d | refs/heads/master | 2021-08-30T21:48:17.025275 | 2017-12-19T14:58:36 | 2017-12-19T14:58:36 | 114,639,436 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 484 | r | plot2.R |
##PLOT 2
png(file.path(plot_dir,"plot2.png"),width=600,height=480)
plot(data_hh_required$DateTimeFormat, data_hh_required$Global_active_power,
type="l",
ylab = "Global Active Power (kilowatts)", xlab = "",
xlim=c(min(data_hh_required$DateTimeFormat),max(data_hh_required$DateTimeFormat)))
axis(1,c(data_hh_required$DateTimeFormat[1],mean(data_hh_required$DateTimeFormat),data_hh_required$DateTimeFormat[2880]),
labels=c("Thu","Fri","Sat"))
dev.off() |
fa14bacc8493efa25a57b0d37f3a988836878d0f | dd4ffdf2ce845d69b1690e689f7d38576289e279 | /test-get-stability-paths.R | f872757f20a486fc093bece83e70cb6e2edc4dce | [] | no_license | fort-w2021/stability-ex-lisa-wm | 3a155f940f3569eb6f04bcc43cd2b5ec0d093347 | 1b042789fa6f0cdf7603385d7121d79239d55884 | refs/heads/main | 2023-02-26T10:00:55.959292 | 2021-01-27T19:05:10 | 2021-01-27T19:05:10 | 317,600,265 | 0 | 0 | null | 2021-02-04T11:11:46 | 2020-12-01T16:23:44 | R | UTF-8 | R | false | false | 539 | r | test-get-stability-paths.R | # ---- test-get-stability-paths ----
library(MASS)
# Dieses package ist nur noch in archivierten Versionen verfügbar, leider:
# remotes::install_version("ElemStatLearn")
data(prostate, package = "ElemStatLearn")
data <- prostate
max_formula <- lpsa ~ (. - train)
model <- leaps::regsubsets(max_formula,
data = data, nbest = 1, nvmax = 8,
really.big = TRUE
)
set.seed(20141020)
stability_paths <- get_stability_paths(model, data, reps = 1000)
stability_paths
plot_stability_paths(stability_paths) |
1fc93781ac49f89caf2be7b88ba29829e4bc298a | 1957338935d192aeb0b9a7b2e9c36bc497dfa9d7 | /plot3.R | 95c833b9c9121b4ccef3a291586ef860574b785e | [] | no_license | lindaholderwho/Exploratory-Data-Analysis-course-project-2 | 7bba82bd649ec88c186deeccf45e28278ad8d836 | b8ad97448ff4c59f08697bb120ef01f7328dd151 | refs/heads/master | 2020-03-20T20:23:03.774595 | 2018-06-17T20:28:37 | 2018-06-17T20:28:37 | 137,684,661 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 577 | r | plot3.R | ## Load datas:
NEI <- readRDS("G:\\data science\\course 4\\assignment\\summarySCC_PM25.rds")
SCC <- readRDS("G:\\data science\\course 4\\assignment\\Source_Classification_Code.rds")
Bal<-subset(NEI,fips=="24510")
total_emission_Bal<-aggregate(Emissions~year+type,data=Bal,sum)
g<-ggplot(data=total_emission_Bal,aes(year,Emissions))
g+geom_point()+geom_smooth(method = "lm",se=FALSE,lwd=2,color="purple")+facet_grid(.~type)+labs(x="Year",y="Baltimore Total Emission", title="Baltimore Total Emission")
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off() |
eea7d73de6f26fd2c0ac9686c2beecf94a4d23e5 | 84eb18fbde5ce6ca6347f7ab7eb4b017b633feb9 | /lib/Rcolortable.r | acf257fb36d7dc1985736f15dd5153ef4867234c | [] | no_license | stakahama/libPy | 8202a6fd793416c748b71d4707280e79466309f9 | 0e00553d6959c6485e1c1a111c2080d5e22eca7b | refs/heads/master | 2021-05-28T18:59:57.985115 | 2014-07-02T11:58:02 | 2014-07-02T11:58:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 117 | r | Rcolortable.r | colortable <- `rownames<-`(cbind(t(col2rgb(colors()))/255,alpha=1),colors())
write.csv(colortable,"Rcolortable.csv")
|
a0c69459a7edef06542144fcc09609938b9370b8 | 47a8dff9177da5f79cc602c6d7842c0ec0854484 | /man/ElbowPlot.Rd | 9af853894b256045d190ca7ce1e7ebedfe73966c | [
"MIT"
] | permissive | satijalab/seurat | 8949973cc7026d3115ebece016fca16b4f67b06c | 763259d05991d40721dee99c9919ec6d4491d15e | refs/heads/master | 2023-09-01T07:58:33.052836 | 2022-12-05T22:49:37 | 2022-12-05T22:49:37 | 35,927,665 | 2,057 | 1,049 | NOASSERTION | 2023-09-01T19:26:02 | 2015-05-20T05:23:02 | R | UTF-8 | R | false | true | 800 | rd | ElbowPlot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{ElbowPlot}
\alias{ElbowPlot}
\title{Quickly Pick Relevant Dimensions}
\usage{
ElbowPlot(object, ndims = 20, reduction = "pca")
}
\arguments{
\item{object}{Seurat object}
\item{ndims}{Number of dimensions to plot standard deviation for}
\item{reduction}{Reduction technique to plot standard deviation for}
}
\value{
A ggplot object
}
\description{
Plots the standard deviations (or approximate singular values if running PCAFast)
of the principle components for easy identification of an elbow in the graph.
This elbow often corresponds well with the significant dims and is much faster to run than
Jackstraw
}
\examples{
data("pbmc_small")
ElbowPlot(object = pbmc_small)
}
\concept{visualization}
|
169a6224f3e9d09495ec61e21f026ad6d716a208 | d068156778a636d7f60c0c6950d1d6520b8b01d8 | /Project2-BankCreditCardDefaultPrediction/Bank Credit Card Default Prediction.R | 9cc2e44cdf2a874846bb41354add8294e8dd45a1 | [
"Apache-2.0"
] | permissive | imkushwaha/Data_Science_Project_Portfolio | 9ee970743969248342e4bc3d4fb118950c7e91a8 | 55dbd9ca4dd23a9d1928fdb7d56d98f8c6a6d264 | refs/heads/master | 2023-06-10T09:32:57.672165 | 2021-06-24T07:34:45 | 2021-06-24T07:34:45 | 323,677,622 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,597 | r | Bank Credit Card Default Prediction.R | #
#
# ****Bank Credit Card Default Prediction****
#
#
# Problem Statement
#
# The banks with the invent of credit card were more focused on the number of customers
# using their credit service but the drawback of them not being able to pay back the credit
# in time was an issue that soon followed, a system was in need to effectively decide the
# credit limit to be allowed to a person based on his previous credit history.
#
# We are going to build a classification model using logistic regression to predict the credibility
# of the customer,in order to minimize the risk and maximize the profit of German Credit Bank.
# ****Start****
#Random number initialization
set.seed(123)
# First we we set the working dir to import dataset in R Studio
setwd("C://Users//USER//Desktop//R Studio")
# Removing all the objects from the workspace list=ls() as base using rm()
rm(list = ls())
#We use garbage collection (or GC for short) to automatically releases memory,
#when an object is no longer used.
gc()
#read in the training dataset ad save it to object named cr_train
cr_train<-read.csv("Credit_Risk_Train_data.csv",na.strings = "")
#View of the dataset
View(cr_train)
#checking the structure of the dataset
str(cr_train)
#check the summary of the dataset
summary(cr_train)
#first 10 rows of the dataset
head(cr_train,10)
#read in the validation dataset and save it to object named cr_valid
cr_valid<-read.csv("Credit_Risk_validate_data.csv",na.strings = "")
#read in the test dataset and save it to object named cr_valid
cr_test<-read.csv("Credit_Risk_Test_data.csv",na.strings = "")
summary(cr_test)
#Carry out the imputation using the correct central value
#Visualising the missing values
table(colSums(is.na.data.frame(cr_train))>0)
colSums(is.na(cr_train)) #shows column wise total missing values
b=barplot(colSums(is.na.data.frame(cr_train)),cex.names = .7,col = "orange",las=2)
text(b, 0,colSums(is.na(cr_train)),cex=.6,pos=3,col = "red")
#install.packages("Amelia") We can also use this package to use library Amelia to see missing values
#using missmap
library(Amelia)
missmap(cr_train,col = c("blue", "orange"))
missmap(cr_test,col=c("black","orange"))
#In case of categorical variables we shall be using the central tendency measure?
#how to find central tendency for skewed and non skewed data?
#Imputation
colnames(cr_train)
#for gender
table(cr_train$Gender) #to find the mode
summary(cr_train$Gender)
cr_train[which(is.na(cr_train$Gender)),"Gender"]<-"Male" #Male is mode
barplot(table(cr_train$Gender),col = c("pink","blue"))
#for Marital status
table(cr_train$Married)
table(is.na(cr_train$Married))
cr_train[which(is.na(cr_train$Married)),"Married"]<-"Yes"
barplot(table(cr_train$Married),col = c("red","green"),ylim=c(0,450))
#for dependents
table(cr_train$Dependents)
summary(cr_train$Dependents)
table(is.na(cr_train$Dependents))
cr_train[which(is.na(cr_train$Dependents)),"Dependents"]<-"0"
barplot(table(cr_train$Dependents),col = c("green","yellow","orange","red"))
#for educations
missmap(cr_train)
table(cr_train$Education)
table(is.na(cr_train$Education))
cr_train[which(is.na(cr_train$Education)),"Education"]<-"Graduate"
barplot(table(cr_train$Education),col = c("orange","brown"))
#for self employed
table(cr_train$Self_Employed)
table(is.na(cr_train$Self_Employed))
cr_train[which(is.na(cr_train$Self_Employed)),"Self_Employed"]<-"No"
barplot(table(cr_train$Self_Employed),col = c("yellow","green"))
#for credit history
summary(cr_train$ Credit_History)
table(is.na(cr_train$Credit_History))
table(cr_train$Credit_History)
cr_train[which(is.na(cr_train$Credit_History)),"Credit_History"]<-"1"
barplot(table(cr_train$Credit_History),col = c("orange","blue"))
#For numeric variable we check the distribution of data?
#Numerically and graphically
install.packages("moments")
library(moments)
library(lattice)
skewness(cr_train$ApplicantIncome)
densityplot(cr_train$ApplicantIncome)
hist(cr_train$ApplicantIncome,main = "Histogram of Loan Applicant Income",
xlab = "Applicant Income",ylab = "Frequency",col = "orange")
#If skewness is negative, the data are negatively skewed or skewed left,
#i.e left tail is longer.
#If skewness is positive, the data are positively skewed or skewed right,
#i.e right tail is longer.
hist(cr_train$LoanAmount,col = "green")#skewness present in data
table(is.na.data.frame(cr_train$LoanAmount))
cr_train[which(is.na(cr_train$LoanAmount)),"LoanAmount"]<-median(cr_train$LoanAmount,na.rm = T)
#for loan amount term
hist(cr_train$Loan_Amount_Term)
table(is.na.data.frame(cr_train$Loan_Amount_Term))
cr_train[which(is.na(cr_train$Loan_Amount_Term)),"Loan_Amount_Term"]<-median(cr_train$Loan_Amount_Term,na.rm = T)
View(cr_train)
#Imputation for categorical variable in Test dataset
cr_test[which(is.na(cr_test$Gender)),"Gender"]<-"Male"
cr_test[which(is.na(cr_test$Married)),"Married"]<-"Yes"
cr_test[which(is.na(cr_test$Dependents)),"Dependents"]<-"0"
cr_test[which(is.na(cr_test$Education)),"Education"]<-"Graduate"
cr_test[which(is.na(cr_test$Self_Employed)),"Self_Employed"]<-"No"
cr_test[which(is.na(cr_test$Credit_History)),"Credit_History"]<-"1"
#for numerical variables in test dataset
cr_test[which(is.na(cr_test$LoanAmount)),"LoanAmount"]<-median(cr_test$LoanAmount,na.rm = T)
cr_test[which(is.na(cr_test$Loan_Amount_Term)),"Loan_Amount_Term"]<-median(cr_test$Loan_Amount_Term,na.rm = T)
#Missing map
missmap(cr_test)
#Imputation for categorical variables in validation data set
cr_valid[which(is.na(cr_valid$Gender)),"Gender"]<-"Male"
cr_valid[which(is.na(cr_valid$Gender)),"Gender"]<-"Male"
cr_valid[which(is.na(cr_valid$Married)),"Married"]<-"Yes"
cr_valid[which(is.na(cr_valid$Dependents)),"Dependents"]<-"0"
cr_valid[which(is.na(cr_valid$Education)),"Education"]<-"Graduate"
cr_valid[which(is.na(cr_valid$Self_Employed)),"Self_Employed"]<-"No"
cr_valid[which(is.na(cr_valid$Credit_History)),"Credit_History"]<-"1"
#for numerical variables in validation dataset
cr_valid[which(is.na(cr_valid$LoanAmount)),"LoanAmount"]<-median(cr_valid$LoanAmount,na.rm = T)
cr_valid[which(is.na(cr_valid$Loan_Amount_Term)),"Loan_Amount_Term"]<-median(cr_valid$Loan_Amount_Term,na.rm = T)
str(cr_train)
#all missing values should have been imputed by now and no NAs should be present
summary(cr_train)
summary(cr_test)
summary(cr_valid)
#for converting the Credit history into numerical variable in train , test and validation set
cr_train$Credit_History<-as.numeric(cr_train$Credit_History)
cr_valid$Credit_History<-as.numeric(cr_valid$Credit_History)
cr_test$Credit_History<-as.numeric(cr_test$Credit_History)
#check summary of all three datasets
#Thre should be no NA`s now in any of the dataset
summary(cr_train)
summary(cr_valid)
summary(cr_test)
#missing value map for all 3 dataset
missmap(cr_train)
missmap(cr_test)
missmap(cr_valid)
#Training data
b=barplot(colSums(is.na.data.frame(cr_train)),cex.names = .7,col = "orange",las=2)
text(b, 0,colSums(is.na(cr_train)),cex=.6,pos=3,col = "red")
#Testing data
c=barplot(colSums(is.na.data.frame(cr_test)),cex.names = .7,col = "orange",las=2)
text(c, 0,colSums(is.na(cr_test)),cex=.6,pos=3,col = "red")
#validation data
v=barplot(colSums(is.na.data.frame(cr_valid)),cex.names = .7,col = "orange",las=2)
text(v, 0,colSums(is.na(cr_train)),cex=.6,pos=3,col = "red")
#plotting th relationship between the input and the output variables
str(cr_train)
names(cr_train)
plot(table(cr_train$Gender,cr_train$Loan_Status),
main="Gender vs Loan Status",col=c("yellow","green"))
plot(table(cr_train$Married,cr_train$Loan_Status),
main="Marital status vs Loan Status",col=c("red","green"))
summary(cr_train$Loan_Amount_Term)
plot(table(cr_train$Dependents,cr_train$Loan_Status),
main="No of Dependents vs Loan Status",col=c("orange","blue"))
plot(table(cr_train$Education,cr_train$Loan_Status),
main="Education vs Loan Status",col=c("red","green"))
plot(table(cr_train$Self_Employed,cr_train$Loan_Status),
main="Self_Employed vs Loan Status",col=c("blue","orange"))
plot(table(as.factor(cr_train$Loan_Amount_Term),cr_train$Loan_Status)
,main="Loan Amount Term vs Loan Status",col=c("red","green"))
table(cr_train$Loan_Amount_Term)
range(cr_train$CoapplicantIncome)
fivenum(cr_train$CoapplicantIncome)
summary(cr_train$CoapplicantIncome)
boxplot(cr_train$CoapplicantIncome,col = "orange")
median(cr_train$CoapplicantIncome)
median(cr_train$ApplicantIncome)
fivenum(cr_train$ApplicantIncome)
boxplot(cr_train$ApplicantIncome~cr_train$Loan_Status,
main="Applicant Income vs Loan Status",col=c("red","blue"),ylim=c(0,15000))
boxplot(cr_train$CoapplicantIncome~cr_train$Loan_Status,
main="Co-Applicant income vs Loan Status",
col = c("red","green"),ylim=c(0,4000))
boxplot(cr_train$LoanAmount~cr_train$Loan_Status,
main="Loan Amount vs Loan Status",col=c("yellow","orange"))
plot(table(as.factor(cr_train$Loan_Amount_Term),
cr_train$Loan_Status),xlab="Loan Amount Term",
ylab="Loan status", main = "Loan Amount Term vs Loan Status",
col=c("red","green"))
plot(table(as.factor(cr_train$Credit_History),
cr_train$Loan_Status),xlab="Credit History",
ylab="Loan status", main = "Credit History vs Loan Status",
col=c("red","green"))
plot(table(cr_train$Property_Area,cr_train$Loan_Status),xlab="Rural to Urban",
main="Property Area vs Loan Approval",col=c("red","green","blue"))
#checking the nature of variables in the training dataset
str(cr_train)
#changing the type of variable in training dataset for the purpose of glm
cr_train$Gender<-as.numeric(ifelse(cr_train$Gender=="Male",1,0))
cr_train$Married<-as.numeric(ifelse(cr_train$Married=="Yes",1,0))
cr_train$Education<-(ifelse(cr_train$Education=="Graduate",1,0))
cr_train$Self_Employed<-as.numeric(ifelse(cr_train$Self_Employed=="Yes",1,0))
cr_train$Property_Area_Rural<-as.numeric(ifelse(cr_train$Property_Area=="Rural",1,0))
cr_train$Property_Area_Urban<-as.numeric(ifelse(cr_train$Property_Area=="Urban",1,0))
View(cr_train)
cr_train$Loan_Status<-as.numeric(ifelse(cr_train$Loan_Status=="Y",1,0))
cr_train$Dependents<-as.numeric(ifelse(cr_train$Dependents=="3+",3,cr_train$Dependents))
cr_train$Loan_ID<-NULL #Dropping Loan_ID column as it is not useful for our model
cr_train$Property_Area<-NULL #Dropping Property_Area column also
str(cr_train)
#changing the type of variable in validation dataset for the purpose of glm
head(cr_valid)
cr_valid$Gender<-as.numeric(ifelse(cr_valid$Gender=="Male",1,0))
cr_valid$Married<-as.numeric(ifelse(cr_valid$Married=="Yes",1,0))
cr_valid$Education<-as.numeric(ifelse(cr_valid$Education=="Graduate",1,0))
cr_valid$Self_Employed<-as.numeric(ifelse(cr_valid$Self_Employed=="Yes",1,0))
cr_valid$Property_Area_Rural<-as.numeric(ifelse(cr_valid$Property_Area=="Rural",1,0))
cr_valid$Property_Area_Urban<-as.numeric(ifelse(cr_valid$Property_Area=="Urban",1,0))
cr_valid$Dependents<-as.numeric(ifelse(cr_valid$Dependents=="3+",3,cr_valid$Dependents))
names(cr_valid)
cr_valid$Loan_ID<-NULL
cr_valid$Property_Area<-NULL
#changing the type of variable in testing dataset for the purpose of glm
cr_test$Gender<-as.numeric(ifelse(cr_test$Gender=="Male",1,0))
cr_test$Married<-as.numeric(ifelse(cr_test$Married=="Yes",1,0))
cr_test$Education<-as.numeric(ifelse(cr_test$Education=="Graduate",1,0))
cr_test$Self_Employed<-as.numeric(ifelse(cr_test$Self_Employed=="Yes",1,0))
cr_test$Property_Area_Rural<-as.numeric(ifelse(cr_test$Property_Area=="Rural",1,0))
cr_test$Property_Area_Urban<-as.numeric(ifelse(cr_test$Property_Area=="Urban",1,0))
cr_test$Dependents<-as.numeric(ifelse(cr_test$Dependents=="3+",3,cr_test$Dependents))
cr_test$Loan_ID<-NULL
cr_test$Property_Area<-NULL
names(cr_valid)
names(cr_train)
#again checking missing values
missmap(cr_test)
missmap(cr_train)
missmap(cr_valid)
#Lets apply the glm to our training dataset
log_model<-glm(Loan_Status ~ .,data = cr_train,family = "binomial")
summary(log_model)
#checking the accuracy of the model using validation set
predic_on_valid<-predict(log_model,newdata = cr_valid,type = "response")
predic_on_valid[1:10]
cr_valid$outcome[1:10]
#accuracy calculation for threshold of 0.5
table(cr_valid$outcome,predic_on_valid>0.5)
acc_log_model<-(58+289)/(58+19+1+289)
print(acc_log_model)
#accuracy calculation for threshold of 0.75
table(cr_valid$outcome,predic_on_valid>0.75)
acc_log_model_1<-(70+206)/(70+206+84+7)
print(acc_log_model_1)
#accuracy calculation for threshold of 0.65
table(cr_valid$outcome,predic_on_valid>0.65)
acc_log_model_2<-(60+274)/(60+274+17+16)
print(acc_log_model_2)
# we use the model to predict on the test data
predicttest<-predict(log_model,newdata = cr_test,type = "response")
predicttest
#for checking this we shall calculate the AUC value
cr_valid$outcome
table(cr_valid$outcome)
#install.packages("ROCR")
#install.packages("gplots")
library(ROCR)
library(gplots)
ROCRpred<-prediction(predicttest,cr_valid$outcome)
ROCRpred
ROCRpref<-performance(ROCRpred,"tpr","fpr")
plot(ROCRpref)
plot(ROCRpref,print.cutoffs.at=seq(0,1,.1),colorize=T)
#to calculate the area under the curve
as.numeric(performance(ROCRpred,"auc")@y.values)
#Now adding an additional column in the test dataset classifying Status as Yes or No by taking 0.5 as threshold value
#We have been able to get the class (yes or No)for each client/observation
cr_test$Status<-ifelse(predicttest>0.5,"Y","N")
table(cr_test$Status)
View(cr_test)
#Sensitivity is as same as TPR
#Specificty is same as TNR
#FPR=(1-Specificity Ratio)
#Hosmer L test
#Null hypothesis states, the model fits data well.
#Alt hypothesis states , the model doesnot fit the data well
#install.packages("ResourceSelection")
library(ResourceSelection)
#alpha level 5% or 10%
log_model$y
hl<-hoslem.test(log_model$y,fitted(log_model),g=10)
hl
cbind(hl$expected,hl$observed)
#checking if the model converged
log_model$converged
#F>N
#The null deviance shows how well the response is predicted by the model with nothing
#but an intercept.
#The residual deviance shows how well the response is predicted by the model
#when the predictors are included
#Fisher Scoring Iterations. This is the number of iterations to fit the model.
# The logistic regression uses an iterative maximum likelihood algorithm to fit the data
|
c66cada508f733590c14db6a82f7bfe0a8a84a51 | 2b98b47f51bc4c76f5646ee9077c3bd9144d748a | /R/data.R | c66d62a009da3b2731cc78fae84bff83c2fcd524 | [
"MIT"
] | permissive | nickreich/scoringutils | 4429a04d7397ff6b4ae2f8f9993b2afd12d9e61a | e42bf3ecacc82549f8e00d039532519b929d7713 | refs/heads/master | 2023-02-10T04:08:56.072567 | 2021-01-06T17:42:00 | 2021-01-06T17:42:00 | 318,857,163 | 0 | 0 | NOASSERTION | 2020-12-05T18:12:46 | 2020-12-05T18:12:45 | null | UTF-8 | R | false | false | 4,410 | r | data.R | #' Binary Example Data
#'
#' A toy dataset for a probability forecast of a binary outcome variable
#'
#' @format A data.table with 120 rows and 5 variables:
#' \describe{
#' \item{id}{unique identifier for true observed values}
#' \item{true_value}{true observed values}
#' \item{model}{name of the model that generated the forecasts}
#' \item{horizon}{forecast horzin (e.g. 1 day ahead forecast)}
#' \item{prediction}{predicted probability that the corresponding true
#' value will be 1}
#' }
"binary_example_data"
#' Continuous Example Data
#'
#' A toy dataset for a probabilistic forecast of a continuous outcome variable
#'
#' @format A data frame with 6000 rows and 6 variables:
#' \describe{
#' \item{id}{unique identifier for true observed values}
#' \item{model}{name of the model that generated the forecasts}
#' \item{horizon}{forecast horzin (e.g. 1 day ahead forecast)}
#' \item{true_value}{true observed values}
#' \item{sample}{number that identifies the predictive sample generated by
#' a specific model for a specific observed value}
#' \item{prediction}{predictive sample for the corresponding true
#' value}
#' }
"continuous_example_data"
#' Integer Example Data
#'
#' A toy dataset for a probabilistic forecast of an integer outcome variable
#'
#' @format A data frame with 6000 rows and 5 variables:
#' \describe{
#' \item{id}{unique identifier for true observed values}
#' \item{model}{name of the model that generated the forecasts}
#' \item{horizon}{forecast horzin (e.g. 1 day ahead forecast)}
#' \item{true_value}{true observed values}
#' \item{sample}{number that identifies the predictive sample generated by
#' a specific model for a specific observed value}
#' \item{prediction}{predictive sample for the corresponding true
#' value}
#' }
"integer_example_data"
#' Quantile Example Data - Wide Format
#'
#' A toy dataset for quantile forecasts of an outcome variable
#'
#' @format A data frame with 120 rows and 10 variables:
#' \describe{
#' \item{true_value}{true observed values}
#' \item{id}{unique identifier for true observed values}
#' \item{model}{name of the model that generated the forecasts}
#' \item{horizon}{forecast horzin (e.g. 1 day ahead forecast)}
#' \item{lower_90}{prediction for the lower value of the 90\% interval range
#' (corresponding to the 5\% quantile)}
#' \item{lower_50}{prediction for the lower value of the 50\% interval range
#' (corresponding to the 25\% quantile)}
#' \item{lower_0}{prediction for the lower value of the 0\% interval range
#' (corresponding to the 50\% quantile, i.e. the median. For computational
#' reasons there need be a column with \code{lower_0} and \code{upper_0})}
#' \item{upper_0}{prediction for the upper value of the 0% interval range
#' (corresponding to the 50\% quantile, i.e. the median)}
#' \item{upper_50}{prediction for the upper value of the 50\% interval range
#' (corresponding to the 75\% quantile)}
#' \item{upper_90}{prediction for the lower value of the 90\% interval range
#' (corresponding to the 95\% quantile)}
#' }
"quantile_example_data_wide"
#' Quantile Example Data - Long Format
#'
#' A toy dataset for quantile forecasts of an outcome variable
#'
#' @format A data frame with 720 rows and 7 variables:
#' \describe{
#' \item{true_value}{true observed values}
#' \item{id}{unique identifier for true observed values}
#' \item{horizon}{forecast horizon (e.g. 1 day ahead forecast)}
#' \item{model}{name of the model that generated the forecasts}
#' \item{prediction}{quantile predictions}
#' \item{boundary}{lower or upper bound of an interval range}
#' \item{range}{interval range for which the quantile forecast was made}
#' }
"quantile_example_data_long"
#' Quantile Example Data - Plain Quantile Format
#'
#' A toy dataset for quantile forecasts of an outcome variable in a format
#' that uses plain quantiles instead of interval ranges
#'
#' @format A data frame with 600 rows and 6 variables:
#' \describe{
#' \item{true_value}{true observed values}
#' \item{id}{unique identifier for true observed values}
#' \item{horizon}{forecast horzin (e.g. 1 day ahead forecast)}
#' \item{model}{name of the model that generated the forecasts}
#' \item{prediction}{quantile predictions}
#' \item{quantile}{quantile of the corresponding prediction}
#' }
"quantile_example_data_plain"
|
4d87df739dd37ece5f9e15dacd812a606aba8bcc | 58f991069ca784990717c0efc802a6acf8e62c11 | /man/fisherExact.Rd | ff23375ac4e0cc6dfebe7d2889ea07dfae40e819 | [] | no_license | matthewcarlucci/DiscoRhythm | 5055e5c5085a673da371d57acbb18cfca990bb14 | a0d23a818093278663313e707e86127348e3bf2e | refs/heads/master | 2022-04-28T07:44:42.765988 | 2022-03-07T22:28:37 | 2022-03-07T22:28:37 | 179,596,217 | 10 | 4 | null | 2019-08-09T13:52:32 | 2019-04-05T00:21:05 | R | UTF-8 | R | false | true | 377 | rd | fisherExact.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats.R
\name{fisherExact}
\alias{fisherExact}
\title{Extract key values from stats::fisher.test results}
\usage{
fisherExact(var1, var2)
}
\value{
modified output of \link[stats]{fisher.test}
}
\description{
Set p-values of 0 to < 2.2e-16 and reformat odds ratio using formatC
}
\keyword{internal}
|
007c7938d951be3e270e0885ec874dff6b0c4484 | 19e3dad991d3a507dbddd70391ae26fcf5262e2d | /Projects/Colin/Gene_Queries/Methylation_Extract.R | 43157d233c923b3369f669ce46de3838f073acd8 | [] | no_license | AndrewSkelton/Old-Code | be1f967e50d5c3ceaa32886c1b950dc439f56c88 | d518f70a6d615bea5bc903ee0183b24a251d0e36 | refs/heads/master | 2021-01-20T15:53:54.745063 | 2016-06-27T09:54:13 | 2016-06-27T09:54:13 | 62,044,885 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,457 | r | Methylation_Extract.R | #-------------------------------------------------------------------------------------------#
# Author : Andrew J Skelton |
# Language : R |
# Study : MCF2L Data from 450K |
# Data Owner : Louise Reynard |
# Description : Visualise the methylation of MCF2L - Requires 450K data |
#-------------------------------------------------------------------------------------------#
##'Set the Working Directory, load essential packages, and import files
##'-----------------------------------------------------------------------------------------#
library(ggplot2)
library(biomaRt)
library(gridExtra)
library(biovizBase)
library(GenomicFeatures)
library(ggbio)
biocLite(c("biovizBase", "GenomicFeatures", "ggbio"))
setwd("~/Colin/Gene_Queries/")
cpg_of_interest <- "MCF2L" #"ALDH1A2"
##'-----------------------------------------------------------------------------------------#
##'Build TxDb
##'-----------------------------------------------------------------------------------------#
transcriptdb <- makeTxDbFromGFF(file = "/data/genomes/GRCh37_Ensembl_Homo_Sapiens/genome.gtf",
format = 'gtf',
organism = 'Homo sapiens')
##'-----------------------------------------------------------------------------------------#
##'Extract Probe Data
##'-----------------------------------------------------------------------------------------#
MCF2L_Anno <- annotation_other[grep(paste(paste0(cpg_of_interest, ";"),
paste0(cpg_of_interest, "$"),
sep="|"),
annotation_other$UCSC_RefGene_Name),]
MCF2L_Probes <- hm450.hg19[rownames(hm450.hg19) %in% rownames(MCF2L_Anno),]
##'-----------------------------------------------------------------------------------------#
##'Query GRCh37
##'-----------------------------------------------------------------------------------------#
ensembl <- useMart("ENSEMBL_MART_ENSEMBL",
dataset="hsapiens_gene_ensembl",
host="grch37.ensembl.org",
path="/biomart/martservice")
annotation <- getBM(attributes=c("ensembl_gene_id",
"external_gene_name",
"description"),
filters="external_gene_name",
values=cpg_of_interest,
ensembl)
##'-----------------------------------------------------------------------------------------#
##'Get Betas
##'-----------------------------------------------------------------------------------------#
MCF2L_Beta <- m2beta(exprs(lumi.norm[rownames(lumi.norm) %in% rownames(MCF2L_Probes),]))
##'-----------------------------------------------------------------------------------------#
##'Create Data Structures
##'-----------------------------------------------------------------------------------------#
df_in <- melt(MCF2L_Beta)
df_in <- cbind(df_in,
pData(lumi.norm)[match(df_in$Var2,
rownames(pData(lumi.norm))),][,c(1,6)])
rownames(df_in) <- 1:nrow(df_in)
# df_in <- cbind(df_in, pData(lumi.norm)[,c(1,6)])
anno_base_in <- hm450.hg19[match(df_in$Var1, rownames(hm450.hg19)),]
anno_other_in <- MCF2L_Anno[match(df_in$Var1, rownames(MCF2L_Anno)),]
df_in <- cbind(df_in, anno_base_in[,c(1:3)])
# df_in <- cbind(df_in, anno_other_in[,c(5,12:13)])
colnames(df_in) <- c("CpG", "Sample", "Beta", "Sample",
"Type", "Chr", "Start", "Stop")
df_in <- df_in[with(df_in, order(Start)),]
##'-----------------------------------------------------------------------------------------#
##'Plot Magic
##'-----------------------------------------------------------------------------------------#
pdf(paste0(cpg_of_interest, "_Methylation.pdf"), paper="a4r")
for(i in seq(1, length(unique(df_in$CpG)), 8)) {
df_sub <- df_in[df_in$CpG %in% as.vector(unique(df_in$CpG)[c(i:(i+7))]),]
df_sub <- df_sub[with(df_sub, order(Start)),]
sort_df <- data.frame(CpG = unique(df_sub$CpG),
Start = unique(df_sub$Start))
gg_a <- ggplot(data=df_sub, aes(x=Start, y=Beta, group=Type, colour=Type)) +
scale_y_continuous(limits = c(0, 1)) +
scale_x_continuous(limits = c(min(df_sub$Start),
max(df_sub$Start))) +
geom_point(alpha = 0.4) +
# geom_jitter() +
# geom_boxplot() +
theme_bw() +
scale_fill_brewer(palette="Set1") +
scale_color_brewer(palette="Set1") +
stat_summary(fun.y = mean,
geom="line") +
ggtitle(cpg_of_interest)
df_sub$CpG <- factor(df_sub$CpG,
levels=sort_df$CpG)
gg_b <- ggplot(data=df_sub, aes(x=CpG, y=Beta)) +
scale_y_continuous(limits = c(0, 1)) +
geom_boxplot(aes(fill = Type),
notch=T) +
geom_point(aes(shape=Type,
fill=Type),
position=position_dodge(width=.75),
pch=21,
size=2) +
theme_bw() +
scale_fill_brewer(palette="Set1") +
theme(axis.text.x = element_text(angle = 90,
hjust = 1))
genename <- annotation$ensembl_gene_id
for_intersect <- transcriptsBy(transcriptdb,
by = "gene")[genename]
trans <- crunch(transcriptdb,
which=for_intersect)
gr1 <- split(trans,
trans$tx_name)
p1 <- autoplot(gr1) +
theme_bw() +
geom_vline(xintercept=c(min(df_sub$Start),
max(df_sub$Start)),
colour="red")
p2 <- p1@ggplot
multiplot(p2, gg_a)
fixed(p1) <- T
fixed(gg_a) <- T
fixed(gg_b) <- T
labeled(p1) <- F
labeled(gg_a) <- F
labeled(gg_b) <- F
foo <- list(A1=p1, A2=gg_a, A3=gg_b)
foo <- list(p1, gg_a)
tracks(foo)
plot_grid(gg_a, gg_b, p2, labels=c("A","B","C"))
multiplot(gg_a, gg_b, cols=1)
}
dev.off()
##'-----------------------------------------------------------------------------------------#
##'Save Outputs
##'-----------------------------------------------------------------------------------------#
write.csv(MCF2L_Beta, file=paste0(cpg_of_interest, "_Betas.csv"))
write.csv(pData(lumi.norm), file=paste0(cpg_of_interest, "_Pheno_Data.csv"))
write.csv(anno_base_in, file="Annotation_1.csv")
write.csv(anno_other_in, file="Annotation_2.csv")
##'-----------------------------------------------------------------------------------------#
cpg <- c("cg13979708","cg19254793","cg20913747","cg18551225")
norm_betas <- m2beta(exprs(lumi.norm[rownames(lumi.norm) %in% cpg,]))
anno <- hm450.hg19[match(cpg, rownames(hm450.hg19)),]
plot_transcript_expression = function(norm_trans, norm_gene, gene_id, transcriptdb, pdata, sample=NA, ...) {
trans1 = trans
trans2 = trans
rm(trans)
trans1$celltype = levels(pdata)[1]
trans1$expression = apply(norm_trans[match(trans1$tx_name,rownames(norm_trans)),which(pdata==levels(pdata)[1])], 1, mean)
trans2$celltype = levels(pdata)[2]
trans2$expression = apply(norm_trans[match(trans2$tx_name,rownames(norm_trans)),which(pdata==levels(pdata)[2])], 1, mean)
gr1 = split(trans1, trans1$tx_name)
gr2 = split(trans2, trans2$tx_name)
all_exp = c(trans1$expression, trans2$expression)
p1 = autoplot(gr1, aes(fill=expression), colour=alpha('black', 0.2)) +
scale_fill_gradient(low="blue", high="red", limits=c(0,max(all_exp))) + theme_clear()
p2 = autoplot(gr2, aes(fill=expression), colour=alpha('black', 0.2)) +
scale_fill_gradient(low="blue", high="red", limits=c(0,max(all_exp))) + theme_clear()
track_col = gg_color_hue(2)
track_col = c("white", track_col, "white")
t = tracks(undiff=p1, diff=p2, title=paste0(gene_id, " (", genename, ")"),
label.bg.fill=track_col)
return(t)
}
|
16eea0218600fc699e427f36d7f89517f68c3fa4 | b547ef1ced687e9fb7b7ae08c914b199011460f0 | /tests/testthat/test-selectormodule.R | 0961bccce16f7fdaa0435b601cd117bd2bc42bdd | [] | no_license | cselefendi/marineAppTested | 69c29690d7345587398ea97d651e9125bfe381a8 | 3e3cdadbf4c258ca4c4576b74011ceb5d183ac51 | refs/heads/master | 2023-04-29T05:08:23.533465 | 2021-05-21T10:10:30 | 2021-05-21T10:10:30 | 369,482,278 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,357 | r | test-selectormodule.R | context("selector module")
# config tests
type_sample <- c('Tug', 'Tanker', 'Other')
please_select_sample <- "Please select"
testServer(
selectorServer,
args = list(temp_label = 'LABEL',
please_select = please_select_sample,
all_choices = type_sample,
selected_value = NULL),
{
test_that('A valid element can be selected', {
value_shoud_be <- type_sample[1]
session$setInputs(selectInputElem = value_shoud_be)
expect_equal(
session$returned$selectInputElem(),
value_shoud_be)
})
# Could not implement test 'successfully'
# test_that('An invalid element can NOT be selected', {
#
# session$setInputs(selectInputElem = "Something invalid")
# expect_equal(
# session$returned$selectInputElem(),
# please_select_sample)
#
# })
test_that('Valid elements can be selected after each other', {
value_shoud_be <- type_sample[1]
session$setInputs(selectInputElem = value_shoud_be)
expect_equal(
session$returned$selectInputElem(),
value_shoud_be)
value_shoud_be <- type_sample[2]
session$setInputs(selectInputElem = value_shoud_be)
expect_equal(
session$returned$selectInputElem(),
value_shoud_be)
})
}) |
8f8040666d618c14e04050cf88541a834f28e7ec | 99397fb5516c425df9a41335849097e78b1a5727 | /RIPPER.R | 49305992e3d5a75afd5ede70aa77a3b1273444c7 | [] | no_license | ChandanVerma/General-R-Scripts | 92fa31afdeb5a8d672b7b5020ea6718bfe69d47f | 0a05baf400a3d54b26fa6f06caa9f3da1047bd79 | refs/heads/master | 2020-06-29T03:38:10.737810 | 2017-07-10T09:07:55 | 2017-07-10T09:07:55 | 74,449,649 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 325 | r | RIPPER.R | rm(list = ls())
mushrooms <- read.csv('mushrooms.csv', stringsAsFactors = T)
str(mushrooms)
mushrooms$veil_type<- NULL
install.packages('RWeka')
library(RWeka)
mushroom_1R <- OneR(type ~., data = mushrooms)
summary(mushroom_1R)
mushrooms_Jrip <- JRip(type~., data = mushrooms)
summary((mushrooms_Jrip))
|
faf224e0fbe6ab3586a65ddbc255ad5f4cbe4c9b | b652323ddbe8a06f0cbd219477daf11d841fb034 | /man/preference.Rd | 4cedbcf00ac727fd3244358dbe036eeae76580a6 | [] | no_license | laynelv/isenso | 9112047f41591f62c28da21a027f9eb0e0edb306 | 73137eca2707dda60ab991d85210f85177f3dc74 | refs/heads/master | 2020-12-25T11:06:14.396441 | 2016-07-27T05:23:37 | 2016-07-27T05:23:37 | 61,980,879 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,070 | rd | preference.Rd | \name{preference}
\alias{preference}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ consumer preference
%% ~~function to do ... ~~
}
\description{
1. Draw a barplot to show which product is the faverate of consummers.compute the means of consummers liking scores,then do LSD multiple comparations to identify whether the means are significantly different. Visualize the results by a barplot with means and different colors.
2.Draw a biplot to show the relations of products and consummers and if there are sperate groups of consummers by the method of K-means.
Preference Mapping is a set of statistical techniques that can be used to analyze data collected during a Product Guidance or Product Optimization test.
The Preference Mapping analysis provides additional understanding of consumer preference data, providing greater actionability to the consumer test.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
preference(data=NULL,ncluster=3)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{a data frame, contains consummers in rows and products in columns.
%% ~~Describe \code{x} here~~
}
\item{ncluster}{How many clusters you want to have? default is 3
%% ~~Describe \code{x} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{Layne lv (layne.lv@outlook.com)
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
ggplot2
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
data(choc_preference)
preference(choc_preference)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~preference }
\keyword{ ~cluster }% __ONLY ONE__ keyword per line
|
2d96da7b3dd102a78732b6b235551c3cc5a5394e | c4f065fcdc73605b9944b69bc981b339d8356ef4 | /tests/testthat/test_compare_thresholds.R | ba85d09bbe2091a0c5b1a560c01a21047bf27dae | [] | no_license | saskiaotto/INDperform | a9c566a774bdfd58480e0493eb564761abef8621 | dffa5a2af4f2a10ba53e2622da9e540fd9dbcdde | refs/heads/master | 2021-11-11T11:26:54.013687 | 2021-10-23T19:13:44 | 2021-10-23T19:13:44 | 106,686,773 | 7 | 2 | null | null | null | null | UTF-8 | R | false | false | 740 | r | test_compare_thresholds.R | context("test thresh_gam")
press2 <- ind_init_ex$press_train[[1]]
nthd <- length(press2)
lower <- stats::quantile(press2, prob = 0.2, na.rm = TRUE)
upper <- stats::quantile(press2, prob = 0.8, na.rm = TRUE)
t_val <- seq(from = lower, to = upper, by = (upper -
lower)/nthd) #steps 'even'
test <- compare_thresholds(t_val, press2)
x <- min(which(test$change == FALSE))
test_that("test compare_thresholds", {
# test colclasses
expect_is(test$change, "logical")
expect_is(test$t_val, "numeric")
# first one has to be TRUE!
expect_true(test$change[1])
# same values for press2 with x and x-1 --> no
# change duo to different threshold
expect_equal(press2[press2 < test$t_val[x]], press2[press2 <
test$t_val[x - 1]])
})
|
6db5caa8c216637a1453f2605951470f5057c45a | 3faf413c32f2ebd71d35ccb00e7212e636ea1d16 | /code/calculate_mutation_profiles.R | 1caf6e4e79da355d6dd528884cc45beff3610713 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | daniel-wells/somatic-selection | bdbd1a19bebbb04923cf31a04420748bdb74f82d | 98bcfed738feeed4a81635ba4105b5f450663d7a | refs/heads/master | 2021-03-24T10:32:04.716439 | 2019-10-16T10:26:45 | 2019-10-16T10:26:45 | 50,685,074 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,640 | r | calculate_mutation_profiles.R | # Start writing to an output file
logfile <- file(paste("logs/calculate_mutation_profiles.R.log",format(Sys.time(), "%Y-%m-%d.%H-%M-%S"), "txt", sep = "."))
sink(logfile)
sink(logfile, type="message")
source("code/functions.R")
# source("http://bioconductor.org/biocLite.R")
# biocLite("SomaticSignatures")
# biocLite("BSgenome.Hsapiens.UCSC.hg19")
library(SomaticSignatures)
library(data.table)
library(ggplot2)
## Genomic sequences
library(BSgenome.Hsapiens.UCSC.hg19)
genome <- BSgenome.Hsapiens.UCSC.hg19
### LOAD ICGC MUTATION DATA ###
single.base.coding.substitutions <- readRDS("data/single.base.coding.substitutions.rds")
### CONTEXTUALISE MUTATIONS & CALCULATE MUTATION PROFILE & PROBAILITIES ###
single.base.coding.substitutions[,.N,by=project_code]
single.base.coding.substitutions[,.N,by=mutation_type]
single.base.coding.substitutions[,.N,by=variant.class]
single.base.coding.substitutions[,.N,by=sequencing_strategy]
# Remove duplicate annotations (1mut:>1annot due to multiple transcripts and overlapping exons! e.g. PCDHG, PCDHA, RP11, UGT1A, PRAME, CTC-)
# Warning, 1 base can mutate to 2 different bases in same patient - use mut_id not position
setkey(single.base.coding.substitutions,icgc_donor_id,icgc_mutation_id)
single.base.coding.substitutions <- unique(single.base.coding.substitutions)
# Make VRanges object
vr = VRanges(
seqnames = single.base.coding.substitutions$chromosome,
ranges = IRanges(single.base.coding.substitutions$chromosome_start,single.base.coding.substitutions$chromosome_end),
ref = single.base.coding.substitutions$reference_genome_allele,
alt = single.base.coding.substitutions$mutated_to_allele,
sampleNames = single.base.coding.substitutions$icgc_donor_id,
study = single.base.coding.substitutions$project_code,
sequencing_strategy = single.base.coding.substitutions$sequencing_strategy)
# add "chr" to work with UCSC.hg19
vr <- ucsc(vr)
# remove non SNV variants (alt coded as 1 at time of writing in NBL)
idx_snv = ref(vr) %in% DNA_BASES & alt(vr) %in% DNA_BASES
vr[!idx_snv]
vr <- vr[idx_snv]
## Annotate variants with context
vr_context <- mutationContext(vr, genome)
motif.matrix.count = motifMatrix(vr_context, group = "study", normalize = FALSE)
# number of donors per project
setkey(single.base.coding.substitutions,icgc_donor_id)
ICGCdonors <- unique(single.base.coding.substitutions)
donor.count <- ICGCdonors[,.("donor.count"=.N),by=project_code][order(donor.count)]
CJ.dt = function(X,Y) {
stopifnot(is.data.table(X),is.data.table(Y))
k = NULL
X = X[, c(k=1, .SD)]
setkey(X, k)
Y = Y[, c(k=1, .SD)]
setkey(Y, NULL)
X[Y, allow.cartesian=TRUE][, k := NULL][]
}
coding.trimer.counts <- readRDS("data/coding.trimer.counts.rds")
# cross join with donor counts
trimer.count.by.project <- CJ.dt(coding.trimer.counts,donor.count)
# overall counts of trinucleotides over all donors in a project
trimer.count.by.project$total.count <- trimer.count.by.project$coding.trimer.counts * trimer.count.by.project$donor.count
# Convert mutation motif counts to data table
motif.probabilities <- as.data.table(melt(motif.matrix.count))
setnames(motif.probabilities,c("mutation","project_code","mutation_count"))
# add base motif column
motif.probabilities$mutation <- as.character(motif.probabilities$mutation)
motif.probabilities$base_motif <- subseq(motif.probabilities$mutation, 4, 6)
subseq(motif.probabilities$base_motif, 2, 2) <- subseq(motif.probabilities$mutation, 1, 1)
# all rows in motif probabilities with timer counts added
setkey(trimer.count.by.project,project_code,base_motif)
setkey(motif.probabilities,project_code,base_motif)
motif.probabilities <- trimer.count.by.project[motif.probabilities]
# Calculate mutation "probability"
motif.probabilities$mutation.probability <- motif.probabilities$mutation_count / motif.probabilities$total.count
w_df = melt(motif.matrix.count, varnames = c("motif", "sample"))
w_df$alteration = sub("([ACGTN])([ACGTN]) .+", "\\1>\\2", w_df$motif)
w_df$context = sub("[ACGTN][ACGTN] (.+)", "\\1", w_df$motif)
archive.file("results/mutation_profiles_QC.pdf")
pdf("results/mutation_profiles_QC.pdf",width=20, height=60, onefile = TRUE)
# number of somatic coding mutations per donor
hist(single.base.coding.substitutions[,.N,by=icgc_donor_id][order(N)]$N,breaks=3000,xlim=c(0,500))
ggplot(w_df) + geom_bar(aes_string(x = "context", y = "value"), stat = "identity", position = "identity") + facet_grid(sample ~ alteration,scales="free_y")
dev.off()
archive.file("data/motif.probabilities.rds")
saveRDS(motif.probabilities, "data/motif.probabilities.rds")
sessionInfo()
sink(type="message")
sink() |
0d49547eace8d1c0b9a0ef65fee6427ed6911aea | 4f7175f4610cdaddeb499b3b2a1428cf806af64a | /R/tables-phase1-cost-estimate.R | ecec9bccabe32190efb25a08a78870a765587a9a | [
"Apache-2.0"
] | permissive | NewGraphEnvironment/fish_passage_elk_2020_reporting_cwf | adcfcda949a928363dac0180eda217caab01e767 | 6c07e4941e6ba596cadc52f9c87a97a518f61b24 | refs/heads/master | 2023-07-14T15:02:36.414050 | 2021-08-26T19:12:14 | 2021-08-26T19:12:14 | 321,383,083 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,934 | r | tables-phase1-cost-estimate.R | source('R/packages.R')
source('R/functions.R')
source('R/tables.R')
source('R/tables-phase2.R')
## we need to screen out the crossings that are not matched well
# make_tab_cost_est_phase1 <- function(dat = pscis){
bcfishpass_rd <- bcfishpass_phase2 %>%
select(pscis_crossing_id, my_crossing_reference, crossing_id, distance, road_name_full,
road_class, road_name_full, road_surface, file_type_description, forest_file_id,
client_name, client_name_abb, map_label, owner_name, admin_area_abbreviation,
wct_network_km, wct_belowupstrbarriers_network_km,distance) %>%
# mutate(uphab_net_sub22 = rowSums(select(., uphab_l_net_inf_000_030:uphab_l_net_obs_150_220))) %>%
filter(distance < 100) %>%
select(pscis_crossing_id:admin_area_abbreviation, wct_network_km, wct_belowupstrbarriers_network_km)
##trying to fix the missed matches
dups <- c(4600183, 4600069, 4600367, 4605732, 4600070)
match_this <- xref_pscis_my_crossing_modelled %>%
filter(my_crossing_reference %in% dups) %>%
purrr::set_names(nm = paste0('x_', names(.))) %>%
mutate(x_my_crossing_reference = as.numeric(x_my_crossing_reference))
bcfishpass_rd <- left_join(
bcfishpass_rd,
match_this,
by = c('pscis_crossing_id' = 'x_stream_crossing_id')
) %>%
mutate(my_crossing_reference =
case_when(is.na(my_crossing_reference) & !is.na(x_my_crossing_reference) ~ x_my_crossing_reference,
T ~ my_crossing_reference)) %>%
select(-x_my_crossing_reference, -x_misc_point_id, -x_crossing_id)
# bcfishpass_rd <- bcfishpass_rd %>%
## updated to new bcfishpass info
# bcfishpass_rd <- bcfishpass_phase2 %>%
# select(pscis_crossing_id, my_crossing_reference, crossing_id, distance, road_name_full,
# road_class, road_name_full, road_surface, file_type_description, forest_file_id,
# client_name, client_name_abb, map_label, owner_name, admin_area_abbreviation,
# uphab_l_net_inf_000_030:uphab_gross_sub22, distance) %>%
# mutate(uphab_net_sub22 = rowSums(select(., uphab_l_net_inf_000_030:uphab_l_net_obs_150_220))) %>%
# filter(distance < 100) %>%
# select(my_crossing_reference:admin_area_abbreviation, uphab_gross_sub22, uphab_net_sub22)
###note that some of the rd info is not likely correct if the distance is >100m
pscis_rd <- left_join(
pscis,
bcfishpass_rd,
by = c('my_crossing_reference')
) %>%
dplyr::mutate(my_road_class = case_when(is.na(road_class) & !is.na(file_type_description) ~
file_type_description,
T ~ road_class)) %>%
dplyr::mutate(my_road_class = case_when(is.na(my_road_class) & !is.na(owner_name) ~
'rail',
T ~ my_road_class)) %>%
dplyr::mutate(my_road_surface = case_when(is.na(road_surface) & !is.na(file_type_description) ~
'loose',
T ~ road_surface)) %>%
dplyr::mutate(my_road_surface = case_when(is.na(my_road_surface) & !is.na(owner_name) ~
'rail',
T ~ my_road_surface)) %>%
select(rowname:road_name, file_type_description, owner_name, road_surface, road_class, my_road_class, everything())
# test <- pscis_rd %>% filter(my_crossing_reference == 4605732)
####----tab cost multipliers for road surface-----
tab_cost_rd_mult <- pscis_rd %>%
select(my_road_class, my_road_surface) %>%
# mutate(road_surface_mult = NA_real_, road_class_mult = NA_real_) %>%
mutate(road_class_mult = case_when(my_road_class == 'local' ~ 4,
my_road_class == 'collector' ~ 4,
my_road_class == 'arterial' ~ 20,
my_road_class == 'highway' ~ 20,
my_road_class == 'rail' ~ 20,
T ~ 1)) %>%
mutate(road_surface_mult = case_when(my_road_surface == 'loose' ~ 1,
my_road_surface == 'rough' ~ 1,
my_road_surface == 'rail' ~ 2,
my_road_surface == 'paved' ~ 2)) %>%
# mutate(road_type_mult = road_class_mult * road_surface_mult) %>%
mutate(cost_m_1000s_bridge = road_surface_mult * road_class_mult * 12.5,
cost_embed_cv = road_surface_mult * road_class_mult * 25) %>%
# mutate(cost_1000s_for_10m_bridge = 10 * cost_m_1000s_bridge) %>%
distinct( .keep_all = T) %>%
arrange(cost_m_1000s_bridge, my_road_class)
# readr::write_csv(file = paste0(getwd(), '/data/raw_input/tab_cost_rd_mult.csv')) %>%
# kable() %>%
# kable_styling(latex_options = c("striped", "scale_down")) %>%
# kableExtra::save_kable("fig/tab_cost_rd_mult.png")
####-----------report table--------------------
tab_cost_rd_mult_report <- tab_cost_rd_mult %>%
mutate(cost_m_1000s_bridge = cost_m_1000s_bridge * 10) %>%
rename(
Class = my_road_class,
Surface = my_road_surface,
`Class Multiplier` = road_class_mult,
`Surface Multiplier` = road_surface_mult,
`Bridge $K/10m` = cost_m_1000s_bridge,
`Streambed Simulation $K` = cost_embed_cv
) %>%
filter(!is.na(Class)) %>%
mutate(Class = stringr::str_to_title(Class),
Surface = stringr::str_to_title(Surface)
)
##make the cost estimates
tab_cost_est_prep <- left_join(
select(pscis_rd, my_crossing_reference, stream_name, road_name, habitat_value, my_road_class,
my_road_surface, downstream_channel_width_meters, barrier_result, final_score,
fill_depth_meters, crossing_fix, habitat_value, recommended_diameter_or_span_meters),
select(tab_cost_rd_mult, my_road_class, my_road_surface, cost_m_1000s_bridge, cost_embed_cv),
by = c('my_road_class','my_road_surface')
)
# tab_cost_est_prep <- left_join(
# select(pscis_rd, my_crossing_reference, stream_name, road_name, my_road_class,
# my_road_surface, downstream_channel_width_meters, barrier_result,
# fill_depth_meters, crossing_fix, , habitat_value, recommended_diameter_or_span_meters),
# select(tab_cost_rd_mult, my_road_class, my_road_surface, cost_m_1000s_bridge, cost_embed_cv),
# by = c('my_road_class','my_road_surface')
# )
tab_cost_est_prep2 <- left_join(
tab_cost_est_prep,
select(xref_structure_fix, crossing_fix, crossing_fix_code),
by = c('crossing_fix')
) %>%
mutate(cost_est_1000s = case_when(
crossing_fix_code == 'SS-CBS' ~ cost_embed_cv,
crossing_fix_code == 'OBS' ~ cost_m_1000s_bridge * recommended_diameter_or_span_meters)
) %>%
mutate(cost_est_1000s = round(cost_est_1000s, 0))
##add in the model data. This is a good reason for the data to be input first so that we can use the net distance!!
tab_cost_est_prep3 <- left_join(
tab_cost_est_prep2,
select(bcfishpass_rd, my_crossing_reference, wct_network_km, wct_belowupstrbarriers_network_km),
by = c('my_crossing_reference' = 'my_crossing_reference')
) %>%
mutate(cost_net = round(wct_belowupstrbarriers_network_km * 1000/cost_est_1000s, 1),
cost_gross = round(wct_network_km * 1000/cost_est_1000s, 1),
cost_area_net = round((wct_belowupstrbarriers_network_km * 1000 * downstream_channel_width_meters)/cost_est_1000s, 1), ##this is a triangle area!
cost_area_gross = round((wct_network_km * 1000 * downstream_channel_width_meters)/cost_est_1000s, 1)) ##this is a triangle area!
##add the xref stream_crossing_id
tab_cost_est_prep4 <- left_join(
tab_cost_est_prep3,
xref_pscis_my_crossing_modelled,
by = 'my_crossing_reference'
)
##add the priority info
tab_cost_est <- left_join(
tab_cost_est_prep4,
select(phase1_priorities, my_crossing_reference, priority_phase1),
by = 'my_crossing_reference'
) %>%
mutate(wct_network_km = round(wct_network_km,2)) %>%
arrange(stream_crossing_id) %>%
mutate(stream_crossing_id = as.character(stream_crossing_id),
my_crossing_reference = as.character(my_crossing_reference)) %>%
mutate(ID = case_when(
!is.na(stream_crossing_id) ~ stream_crossing_id,
T ~ paste0('*', my_crossing_reference
))) %>%
mutate(barrier_result_score = paste0(barrier_result, ' (', final_score, ')')) %>%
mutate(`Habitat Value (priority)` = case_when(is.na(habitat_value) | is.na(priority_phase1) ~ '--',
T ~ paste0(habitat_value, ' (' , priority_phase1, ')'))) %>%
select(stream_crossing_id, my_crossing_reference, crossing_id, ID, stream_name, road_name, `Habitat Value (priority)`, habitat_value, barrier_result_score, downstream_channel_width_meters, priority_phase1,
crossing_fix_code, cost_est_1000s, wct_network_km,
cost_gross, cost_area_gross)
tab_cost_est_phase1 <- tab_cost_est %>%
select(-stream_crossing_id:-crossing_id, -habitat_value, -priority_phase1) %>%
rename(
# `Habitat Value` = habitat_value,
# Priority = priority_phase1,
Stream = stream_name,
Road = road_name,
`Result (Score)` = barrier_result_score,
`Stream Width (m)` = downstream_channel_width_meters,
Fix = crossing_fix_code,
`Cost Est ( $K)` = cost_est_1000s,
`Habitat Upstream (km)` = wct_network_km,
`Cost Benefit (m / $K)` = cost_gross,
`Cost Benefit (m2 / $K)` = cost_area_gross) %>%
mutate(across(everything(), as.character)) %>%
replace(., is.na(.), "--")
# tab_cost_est_phase1 <- left_join(
# tab_cost_est,
# select(bcfishpass_phase2, my_crossing_reference, stream_crossing_id),
# by = 'my_crossing_reference'
# ) %>%
# mutate(ID = case_when())
# select(`PSCIS ID` = stream_crossing_id, everything(), -my_crossing_reference) %>%
# arrange(`PSCIS ID`)
##clean up workspace
rm(ab_cost_est, tab_cost_est_prep, tab_cost_est_prep2, tab_cost_est_prep3,
bcfishpass_rd, pscis_rd)
|
8996782b09c68657fd53bb21f2f26ee80acc0513 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/varbvs/examples/summary.varbvs.Rd.R | b1a879e1839b9302b648a2cd26ec7dfb0821a58b | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 225 | r | summary.varbvs.Rd.R | library(varbvs)
### Name: summary.varbvs,print.summary.varbvs
### Title: Summarize a fitted variable selection model.
### Aliases: summary.varbvs print.summary.varbvs
### ** Examples
# See help(varbvs) for examples.
|
d3a1faae71ddb5dbab3188be1d6cb6ce5446fb6b | 76954f2237d0511feca066ab13d6706cde4b3475 | /pathways/Phase_II.R | 77fc5700c4096a3bc0332a5ee027f88a12cfe6c1 | [] | no_license | scheng87/evidence-based | f118196b002f44f5e803d0502cbf2aefbe965e75 | 7d05332838a9bb4fdec8ef73d906b675dea4bad3 | refs/heads/master | 2021-01-10T04:08:02.081643 | 2017-06-23T20:56:17 | 2017-06-23T20:56:17 | 49,231,535 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 13,862 | r | Phase_II.R | ##Phase II Analyses
library(dplyr)
library(tidyr)
setwd("~/Documents/github/evidence-based/pathways/")
load("evidence_based_2_11_16.RData")
##Conceptual models - exploring trends and informing pathways
models <- filter(data.pathways,Concept_mod == 1)
models <- models %>% select(aid,Concept_mod,Concept_mod_name) %>% distinct()
model_types <- count(models,Concept_mod_name)
model_aids <- select(models,aid)
model_aids <- distinct(model_aids)
nm_aids <- data.biblio %>% select(aid)
nm_aids <- nm_aids %>% anti_join(model_aids,by="aid") %>% distinct()
##Disaggregating dataset into models and non-models
m.data.biblio <- left_join(model_aids,data.biblio,by="aid")
m.data.biomes <- left_join(model_aids,data.biomes,by="aid")
m.data.interv <- left_join(model_aids,data.interv,by="aid")
m.data.outcome <- left_join(model_aids,data.outcome,by="aid")
m.data.outhwb <- left_join(model_aids,data.outhwb,by="aid")
m.data.pathways <- left_join(model_aids,data.pathways,by="aid")
m.data.study <- left_join(model_aids,data.study,by="aid")
nm.data.biblio <- left_join(nm_aids,data.biblio,by="aid")
nm.data.biomes <- left_join(nm_aids,data.biomes,by="aid")
nm.data.interv <- left_join(nm_aids,data.interv,by="aid")
nm.data.outcome <- left_join(nm_aids,data.outcome,by="aid")
nm.data.outhwb <- left_join(nm_aids,data.outhwb,by="aid")
nm.data.pathways <- left_join(nm_aids,data.pathways,by="aid")
nm.data.study <- left_join(nm_aids,data.study,by="aid")
#Comparing models and non-models - data needs to be normalized (present percentages)
#1. Linkages examined
#2a. Biomes
#2b.Regions
##Plot countries
#load in full country list
country <- read.csv("~/Documents/CI_Projects_LA/Knowledge_Base/country_list2.csv", head=TRUE, sep=",")
names(country)<- c("Study_country", "Region", "Code")
regions <- arrange(country,Region)
##Count number of studies for all countries and arrange by region
country_count <- matrix(nrow=nrow(regions), ncol=3)
rownames(country_count) <- regions$Study_country
colnames(country_count) <- c("Study_country", "m_counts","nm_counts")
#Calculate in for loop and write to blank matrix
for (c in regions$Study_country){
subset <- filter(m.data.study, Study_country == c)
country_count[c,1] <- c
country_count[c,2] <- as.numeric(n_distinct(subset$aid))
subset <- filter(nm.data.study, Study_country == c)
country_count[c,3] <- as.numeric(n_distinct(subset$aid))
}
#Remove rownames and reformat data types
rownames(country_count) = NULL
country_count <- as.data.frame(country_count, stringsAsFactors=FALSE)
countries_only <- inner_join(country_count,regions,by="Study_country")
countries_only <- filter(countries_only, Code != "")
countries_only <- arrange(countries_only,Region)
countries_only$m_counts <- as.numeric(countries_only$m_counts)
countries_only$nm_counts <- as.numeric(countries_only$nm_counts)
countries_only <- as.data.frame(countries_only)
#Calculate percent of overall studies that include a model
c_perc_mod <- matrix(nrow=nrow(countries_only),ncol=1)
rownames(c_perc_mod) <- countries_only$Study_country
colnames(c_perc_mod) <- c("perc_mod")
d <- countries_only
rownames(d) <- d$Study_country
for (c in countries_only$Study_country){
perc <- ((d[c,2])/(d[c,2]+d[c,3]))*100
c_perc_mod[c,1] <- as.numeric(perc)
}
c_perc_mod <- as.data.frame(c_perc_mod)
c_perc_mod$perc_mod <- as.numeric(c_perc_mod$perc_mod)
countries_only <- bind_cols(countries_only,c_perc_mod)
countries_data <- as.data.frame(filter(countries_only, perc_mod != "NaN"))
countries_nodata <- as.data.frame(filter(countries_only,perc_mod == "NaN"))
countries_nodata <- as.data.frame(select(countries_nodata,Study_country, Code))
#Plot distribution to PDF
library(rgeos)
library(maptools)
library(gpclib)
library(ggplot2)
library(RColorBrewer)
oecd <- read.csv("~/Documents/CI_Projects_LA/Knowledge_Base/oecd.csv",header=TRUE)
map <- readShapeSpatial("~/Documents/CI_Projects_LA/Knowledge_Base/TM_WORLD_BORDERS-0.3/TM_WORLD_BORDERS-0.3.shp")
plot(map)
map <- fortify(map, region="ISO3")
pdf(file="Percent_Models_Country_Map.pdf", width=16, height=8.5)
ggplot() + geom_map(data=countries_data, aes(map_id=Code, fill=perc_mod),map=map) + geom_map(data=oecd, aes(map_id=CODE),fill="#f0f0f0",map=map) + geom_map(data=countries_nodata, aes(map_id=Code),fill="#bbbbbb",map=map) + expand_limits(x=map$long,y=map$lat) + scale_fill_gradient2(low="#d9f0a3",mid="#41ab5d",high="#004529",midpoint=50,limits=c(0,100))
dev.off()
#3. Study design/quality
#4. Researcher/affiliations
#5. Outcomes examined and complexity of outcomes
##Generate term document matrix (as standard matrix) of article IDs (aids) and outcome types
outcome_tdm <- matrix(nrow=12,ncol=n_distinct(nm_aids$aid))
colnames(outcome_tdm) <- as.character(nm_aids$aid)
out_type = c("env", "mat_liv_std", "eco_liv_std", "health", "education", "soc_rel", "sec_saf", "gov", "sub_well", "culture", "free_choice", "other")
rownames(outcome_tdm) <- out_type
m_aid <- as.vector(as.character(nm_aids$aid))
for (n in nm_aids$aid){
sub <- filter(nm.data.outcome,aid == n)
for (o in out_type){
if (o %in% sub$Outcome == TRUE){
state <- as.numeric(1)
} else
state <- as.numeric(0)
outcome_tdm[as.character(o),as.character(n)] <- state
}
}
##Calculate number of outcomes studies
nm_to <- as.data.frame(colSums(outcome_tdm))
colnames(nm_to) <- c("tot_outcome")
nm_outcome_avg <- nm_to %>% filter(tot_outcome != 0) %>% summarise(avg = mean(tot_outcome))
m_to <- as.data.frame(colSums(outcome_tdm_models))
colnames(m_to) <- c("tot_outcome")
m_outcome_avg <- m_to %>% filter(tot_outcome != 0) %>% summarise(avg = mean(tot_outcome))
#Compare means with t-test - first testing for equal variances
nm_to <- filter(nm_to, tot_outcome !=0)
m_to <- filter(m_to, tot_outcome !=0)
var.test(nm_to$tot_outcome,m_to$tot_outcome)
t.test(m_to$tot_outcome,nm_to$tot_outcome)
#Compare distributions
par(mfrow=c(1,2))
hist(m_to$tot_outcome,main="Studies employing a conceptual model", xlab="Total number of outcomes studied")
hist(nm_to$tot_outcome,main="Studies without a conceptual model", xlab="Total number of outcomes studied")
##Check tdm for missing data
class(outcome_tdm) <- "numeric"
outcome_tdm <- as.data.frame(outcome_tdm)
missing_data <- outcome_tdm[, colSums(outcome_tdm) == 0]
##13 articles w/ models with NO outcome information - need to go back into data and check
##32 articles w/ no models with NO outcome information
#Build term-term adjacency matrix
outcome_tdm <- as.matrix(outcome_tdm)
outcome_tdm[outcome_tdm>=1] <- 1
outcome_term_matrix <- outcome_tdm %*% t(outcome_tdm)
#Build graph of adjacent terms (co-occurence of two terms)
library(igraph)
g <- graph.adjacency(outcome_term_matrix,weighted=T,mode="undirected")
g <- simplify(g)
V(g)$label <- V(g)$name
V(g)$degree <- degree(g)
set.seed(3952)
layout1 <- layout.fruchterman.reingold(g)
plot(g,layout=layout1)
##Scale based on degrees to make common terms standout
V(g)$label.cex <- 2.2 * V(g)$degree / max(V(g)$degree)+ .2
V(g)$label.color <- rgb(0, 0, .2, .8)
V(g)$frame.color <- NA
egam <- (log(E(g)$weight)+.4) / max(log(E(g)$weight)+.4)
E(g)$color <- rgb(.5, .5, 0, egam)
E(g)$width <- egam
plot(g, layout=layout1)
##Building actual term document matrix
library(tm)
library(SnowballC)
dist <- dist(scale(outcome_tdm))
fit <- hclust(dist,method="ward.D")
plot(fit, cex=0.9,hang=1)
#Comparing within models
#1. Complexity - # outcomes, #interventions
#Distribution of # of outcomes and # interventions per study
#2. Robustness of data - is this biased based on the scientific capacity of the country?
#3. Types of models - standard vs. novel - different linkages? different complexity?
#Separate out novel vs standard models
nov_mod <- filter(models, Concept_mod_name == "in situ")
nov_mod <- distinct(nov_mod)
std_mod <- anti_join(models,nov_mod,by="aid")
std_mod <- distinct(std_mod)
std_mod_aids <- distinct(as.data.frame(std_mod$aid))
nov_mod_aids <- distinct(as.data.frame(nov_mod$aid))
colnames(std_mod_aids) <- c("aid")
colnames(nov_mod_aids) <- c("aid")
std_mod_int_out <- left_join(std_mod_aids,data.interv,by="aid")
std_mod_int_out <- left_join(std_mod_int_out,data.outcome,by="aid")
nov_mod_int_out <- left_join(nov_mod_aids,data.interv,by="aid")
nov_mod_int_out <- left_join(nov_mod_int_out,data.outcome,by="aid")
std_mod_int_out <- std_mod_int_out %>% select(aid,Int_type,Outcome) %>% distinct()
nov_mod_int_out <- nov_mod_int_out %>% select(aid,Int_type,Outcome) %>% distinct()
library(gplots)
library(RColorBrewer)
#Create new dataframe with intervention and outcome data
int_out <- std_mod_int_out
#Heatmap of linkages - UNCONDENSED
int_type = c("area_protect", "area_mgmt", "res_mgmt", "sp_control", "restoration", "sp_mgmt", "sp_recov", "sp_reint", "ex_situ", "form_ed", "training", "aware_comm", "legis", "pol_reg", "priv_codes", "compl_enfor", "liv_alt", "sub", "market", "non_mon", "inst_civ_dev", "part_dev", "cons_fin", "sus_use", "other")
out_type = c("env", "mat_liv_std", "eco_liv_std", "health", "education", "soc_rel", "sec_saf", "gov", "sub_well", "culture", "free_choice", "other")
io_counts = matrix(nrow=12, ncol=25)
rownames(io_counts) <- out_type
colnames(io_counts) <- int_type
#Calculate number of unique studies for each linkage cell between intervention and outcome
#Calculate in for loop and write to blank matrix
for (i in int_type){
for (j in out_type){
subset <- filter(int_out, Outcome == j, Int_type == i)
io_counts[j,i] <- n_distinct(subset$aid)
}
}
#Relabel rows and columns
int_labels = c("Area protection", "Area management", "Resource management/protection", "Species control", "Restoration", "Species management", "Species recovery", "Species reintroduction", "Ex-situ conservation", "Formal education", "Training", "Awareness & Communications", "Legislation", "Policies & Regulations", "Private sector standards and codes", "Compliance & enforcement", "Enterprises & livelihood alternatives", "Substitution", "Market-based forces", "Non-monetary values", "Institutional & civil society development", "Alliance & partnership development", "Conservation finance", "Sustainable use", "Other")
out_labels = c("Environmental", "Material living standards", "Economic living standards", "Health", "Education", "Social relations", "Security & safety", "Governance & empowerment", "Subjective well-being", "Culture & Spiritual", "Freedom of choice/action", "Other")
rownames(io_counts) <- out_labels
colnames(io_counts) <- int_labels
#Define color palette for heatmap
palette_final <- colorRampPalette(c("#e5f5f9", "#d9f0a3","#41ab5d", "#004529")) (n=50)
#Write heatmap and legend to PDF
pdf(file="Interventions_Outcomes_Standard_models.pdf", width=11, height=8.5)
heatmap.2(io_counts, Colv=NA, dendrogram="none", col=palette_final, cellnote=io_counts, notecol="black", notecex=1.0, trace="none", cexRow=1.5, cexCol=1.5, key=TRUE, Rowv=NA)
dev.off()
#Create new matrix for intervention groups
rows <- c(1:nrow(int_out))
int_groups <- matrix(nrow=nrow(int_out),ncol=1)
rownames(int_groups) <- rows
colnames(int_groups) <- c("int_group")
#Assign intervention groups
for (i in rows){
int <- int_out$Int_type[i]
if (int == "area_mgmt" | int == "sp_control" | int == "restoration") {
group <- "land_wat_mgmt"
} else if (int == "sp_mgmt" | int == "sp_recov" | int == "sp_reint" | int == "ex_situ") {
group <- "species_mgmt"
} else if (int == "form_ed" | int == "training" | int == "aware_comm") {
group <- "education"
} else if (int == "legis" | int == "pol_reg" | int == "priv_codes" | int == "compl_enfor") {
group <- "law_policy"
} else if (int == "liv_alt" | int == "sub" | int == "market" | int == "non_mon") {
group <- "liv_eco_inc"
} else if (int == "inst_civ_dev" | int == "part_dev" | int == "cons_fin") {
group <- "ext_cap_build"
} else
group <- int
int_groups[i,"int_group"] <- group
}
int_groups <- as.data.frame(int_groups)
int_out_final <- bind_cols(int_out,int_groups)
int_out_final <- filter(int_out_final,!is.na(int_groups))
#Create blank data matrix with labeled rows and columns
int_groups_type = c("area_protect", "land_wat_mgmt", "res_mgmt", "species_mgmt", "education", "law_policy", "liv_eco_inc", "ext_cap_build", "sus_use", "other")
out_type = c("eco_liv_std", "mat_liv_std", "health", "education", "soc_rel", "sec_saf", "gov", "sub_well", "culture", "free_choice", "other")
ios_counts = matrix(nrow=11, ncol=10)
rownames(ios_counts) <- out_type
colnames(ios_counts) <- int_groups_type
#Calculate number of unique studies for each linkage cell between intervention and outcome
#Calculate in for loop and write to blank matrix
for (i in int_groups_type){
for (j in out_type){
subset <- distinct(filter(int_out_sys_final, Outcome == j, int_group == i))
sys_rev <- distinct(filter(subset, Part_sys_rev == 1))
ios_counts[j,i] <- n_distinct(sys_rev$aid)
}
}
#Relabel rows and columns
int_group_labels = c("Area protection", "Land/Water management", "Resource management", "Species management", "Education", "Law & Policy", "Livelihood, economic & other incentives", "External capacity building", "Sustainable use", "Other")
out_labels = c("Economic living standards", "Material living standards", "Health", "Education", "Social relations", "Security & safety", "Governance & empowerment", "Subjective well-being", "Culture & Spiritual", "Freedom of choice/action", "Other")
rownames(ios_counts) <- out_labels
colnames(ios_counts) <- int_group_labels
#Write heatmap and legend to PDF
pdf(file="Interventions_Outcomes_Condensed_Sys_Rev_Heatmap.pdf", width=11, height=8.5)
heatmap.2(ios_counts, Colv=NA, dendrogram="none", col=palette_final, cellnote=ios_counts, notecol="black", notecex=1.0, trace="none", cexRow=1.5, cexCol=1.5, key=TRUE, Rowv=NA)
dev.off()
#4. Looking at models versus examining explanatory variables/factors
|
27f604538dc4c5be57fe0a68534c2a81f2e6fa40 | cf8622557c2d10b6424b17e694da9fa2b13b47ec | /sim_axis/simulate_axis_results_36.R | af4a19d8c4545653c051d4749377d2b0e8d7f88d | [
"MIT"
] | permissive | silastittes/lasthenia_curves | 0c0c7cb645abe27a2b58aa8e7aa01405e630dc58 | d7bed64203f23a028e1ce737b395cecbc228b50d | refs/heads/master | 2021-09-26T22:31:51.831345 | 2018-11-03T20:50:49 | 2018-11-03T20:50:49 | 140,039,078 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 39,224 | r | simulate_axis_results_36.R | run_x <-
structure(list(lm_df = structure(list(term = c("maxima_sc", "stretch_sc",
"x_max_sc", "x_min_sc"), coef = c(0.0228075058404137, 0.0570561389841266,
-0.0231594713585525, -0.0150030470914907), p_value = c(0.137892669622365,
0.00438356544664703, 0.15043404655076, 0.131479279280423), rand_var = c(1.41586353557413,
1.41586353557413, 1.41586353557413, 1.41586353557413), mean_rand = c(-0.442041621464583,
-0.442041621464583, -0.442041621464583, -0.442041621464583),
sim_n = c(36L, 36L, 36L, 36L)), .Names = c("term", "coef",
"p_value", "rand_var", "mean_rand", "sim_n"), class = c("tbl_df",
"tbl", "data.frame"), row.names = c(NA, -4L)), lasso_df = structure(list(
var = c("maxima", "stretch", "x_max", "x_min"), mean = c(0.127795607814222,
0.155130110452087, -0.0240184429899352, -0.140623093881239
), prop = c(0.0675, 0.165, 0.04, 0.01), rand_var = c(1.41586353557413,
1.41586353557413, 1.41586353557413, 1.41586353557413), mean_rand = c(-0.442041621464583,
-0.442041621464583, -0.442041621464583, -0.442041621464583
), sim_n = c(36L, 36L, 36L, 36L)), .Names = c("var", "mean",
"prop", "rand_var", "mean_rand", "sim_n"), class = c("tbl_df",
"tbl", "data.frame"), row.names = c(NA, -4L)), summ_stan_df = structure(list(
shape1 = 3.47629288671328, shape2 = 4.2222851166933, stretch = 0.842219732095997,
x_min = -0.168317380246451, x_max = 3.86762451739167, maxima = 2.23149250350202,
rep = 36L), .Names = c("shape1", "shape2", "stretch", "x_min",
"x_max", "maxima", "rep"), class = c("tbl_df", "tbl", "data.frame"
), row.names = c(NA, -1L))), .Names = c("lm_df", "lasso_df",
"summ_stan_df"))
run_x <-
structure(list(lm_df = structure(list(term = c("maxima_sc", "stretch_sc",
"x_max_sc", "x_min_sc"), coef = c(0.0228075058404137, 0.0570561389841266,
-0.0231594713585525, -0.0150030470914907), p_value = c(0.137892669622365,
0.00438356544664703, 0.15043404655076, 0.131479279280423), rand_var = c(1.41586353557413,
1.41586353557413, 1.41586353557413, 1.41586353557413), mean_rand = c(-0.442041621464583,
-0.442041621464583, -0.442041621464583, -0.442041621464583),
sim_n = c(36L, 36L, 36L, 36L)), .Names = c("term", "coef",
"p_value", "rand_var", "mean_rand", "sim_n"), class = c("tbl_df",
"tbl", "data.frame"), row.names = c(NA, -4L)), lasso_df = structure(list(
var = c("maxima", "stretch", "x_max", "x_min"), mean = c(0.127795607814222,
0.155130110452087, -0.0240184429899352, -0.140623093881239
), prop = c(0.0675, 0.165, 0.04, 0.01), rand_var = c(1.41586353557413,
1.41586353557413, 1.41586353557413, 1.41586353557413), mean_rand = c(-0.442041621464583,
-0.442041621464583, -0.442041621464583, -0.442041621464583
), sim_n = c(36L, 36L, 36L, 36L)), .Names = c("var", "mean",
"prop", "rand_var", "mean_rand", "sim_n"), class = c("tbl_df",
"tbl", "data.frame"), row.names = c(NA, -4L)), summ_stan_df = structure(list(
shape1 = 3.47629288671328, shape2 = 4.2222851166933, stretch = 0.842219732095997,
x_min = -0.168317380246451, x_max = 3.86762451739167, maxima = 2.23149250350202,
rep = 36L), .Names = c("shape1", "shape2", "stretch", "x_min",
"x_max", "maxima", "rep"), class = c("tbl_df", "tbl", "data.frame"
), row.names = c(NA, -1L))), .Names = c("lm_df", "lasso_df",
"summ_stan_df"))
run_x <-
structure(list(lasso_df = structure(list(var = c("maxima", "stretch",
"x_max", "x_min"), mean = c(0.252420310505223, 0.204970796345933,
-0.0198357644186884, -0.197934950800058), prop = c(0.1175, 0.2025,
0.09, 0.0275), rand_var = c(1.41586353557413, 1.41586353557413,
1.41586353557413, 1.41586353557413), mean_rand = c(-0.442041621464583,
-0.442041621464583, -0.442041621464583, -0.442041621464583),
sim_n = c(36L, 36L, 36L, 36L)), .Names = c("var", "mean",
"prop", "rand_var", "mean_rand", "sim_n"), class = c("tbl_df",
"tbl", "data.frame"), row.names = c(NA, -4L)), lasso_depth = structure(list(
draw = 1:400, `(Intercept)` = c(-0.0785653877310065, -0.0737534367396876,
-0.0736948766255932, -0.0448191402526735, -0.0832789204351927,
-0.0666130952380953, -0.0764071356003434, -0.0666130952380953,
-0.0666130952380953, -0.0646467020093678, -0.0666130952380953,
-0.064601948950259, -0.0881730057497709, -0.0761211930106068,
-0.0458130294316729, -0.0468751286030183, -0.0555146396868829,
-0.0682868381078246, -0.0719632432181006, -0.0675974722316181,
-0.063224461791935, -0.0789374338754166, -0.0666130952380953,
-0.0775030415082687, -0.0700232706441188, -0.0666130952380953,
-0.0702722789161689, -0.0666130952380953, -0.0742533476393076,
-0.0786394774757599, -0.0600069932520435, -0.0666130952380953,
-0.0771788835131426, -0.0649330640652903, -0.0735882836549315,
-0.0666130952380953, -0.0780962893927143, -0.0666130952380953,
-0.0673554650539484, -0.0790209722866368, -0.0581960367118326,
-0.0706909988532572, -0.0741791008484825, -0.0755628360743915,
-0.0633744555202206, -0.0666130952380953, -0.080670424306732,
-0.0808044593918928, -0.0666130952380953, -0.0667586959745283,
-0.0666130952380953, -0.0728690505714217, -0.0758234350108377,
-0.0666130952380953, -0.0682217400399282, -0.0666130952380953,
-0.0666130952380953, -0.0686087847841562, -0.0695227310926897,
-0.061809472081219, -0.0841004655046351, -0.0673932223623619,
-0.0630825775460082, -0.0902772906604765, -0.0721148083843461,
-0.0666130952380953, -0.0666130952380953, -0.0752596007209752,
-0.0693739433587734, -0.0418411496333327, -0.0839137765541549,
-0.0604526704571982, -0.0622328134650288, -0.0666130952380953,
-0.0709766030571346, -0.0503298963194644, -0.0346328164013502,
-0.0620547086959025, -0.0630814929709889, -0.0551073625557703,
-0.0666130952380953, -0.0589660485767279, -0.0649432846187843,
-0.0653004281213084, -0.0731976091459174, -0.0889872419048375,
-0.0672269891541937, -0.0699021348899725, -0.0654819494717717,
-0.0777397738998365, -0.0739597891557739, -0.0668921295652804,
-0.0682255730850399, -0.0666130952380953, -0.0666130952380953,
-0.078372739493877, -0.0741523829496321, -0.0666130952380953,
-0.0725644876611507, -0.0939858500376469, -0.0458963047464187,
-0.0722638463433415, -0.0572646249512219, -0.0666130952380953,
-0.0582902178914756, -0.0710021228758685, -0.0152730276903612,
-0.0422773578747004, -0.0609239822290653, -0.0608180375968576,
-0.0632895360942793, -0.0758800103766418, -0.069044271404148,
-0.0611731231906813, -0.0582895401932352, -0.0644980929236107,
-0.0657572590846491, -0.071010220032233, -0.0667174636969485,
-0.0628271504539795, -0.0582742403107793, -0.0498377780398965,
-0.0782027155420368, -0.0666130952380953, -0.0615345204461313,
-0.0688149343607618, -0.0418128121350079, -0.0446289717638794,
-0.0539449441849652, -0.0658678118831725, -0.0572212145714174,
-0.0666130952380953, -0.046242867519462, -0.0559218332913681,
-0.0648453386557442, -0.0672386497721482, -0.0229983956576887,
-0.0656807406003591, -0.0664402962371828, -0.0594179038349185,
-0.0666130952380953, -0.0421721924317512, -0.0664931520964895,
-0.0437351876484362, -0.0505272585809839, -0.0682039290561697,
-0.0546089872630482, -0.0790037952811893, -0.0593841359414855,
-0.0632285157416797, -0.0238434139986897, -0.0680707997730117,
-0.0666130952380953, -0.0502121138421067, -0.0497430655355535,
-0.0676593197714559, -0.0440323985282432, -0.0516392072976645,
-0.0511911490006368, -0.051964794560571, -0.0653186490877546,
-0.0595839194485399, -0.0464884343167314, -0.0662709477795787,
-0.0592532819608149, -0.0437524164969002, -0.0631760607477182,
-0.0671162454512188, -0.0569152666098943, -0.0616850196994015,
-0.0560444329257893, -0.0625839276353523, -0.0259471811335321,
-0.04075242057113, -0.0620554427580666, -0.0666130952380953,
-0.0710588856114543, -0.0563155497594817, -0.0467944326608943,
-0.0547684807495657, -0.0638764690743796, -0.0333896600100587,
-0.0637350620875441, -0.0979853582911615, -0.0518073972584689,
-0.0641696615126442, -0.0709225974782667, -0.0666130952380953,
-0.0679999275239609, -0.0665582681340564, -0.0566280332663904,
-0.0694148449086677, -0.0659279958417957, -0.0532487861890747,
-0.056383300224607, -0.0661321532195266, -0.067636832928044,
-0.0664720269319548, -0.0565567946597707, -0.0612978979696921,
-0.0674220373209549, -0.0609086277853211, -0.0521639596391776,
-0.0707582714332701, -0.0441039758070953, -0.0684179394600345,
-0.0662245282294469, -0.057748477594682, -0.0839468874657525,
-0.0572565212959164, -0.0657664100376211, -0.0658518401129678,
-0.0564928789774472, -0.0697459731979912, -0.0694161145303583,
-0.063575246599818, -0.0593727229872294, -0.0112069846862159,
-0.0631614766523654, -0.0673386915240652, -0.0587341353960616,
-0.0828442856244294, -0.0637319622352164, -0.0688163999817983,
-0.0692044567240665, -0.0571084941886545, -0.0775878172902471,
-0.0488785028007894, -0.057272194179074, -0.0401154533275052,
-0.069123181719856, -0.0585562665894249, -0.0730952991714897,
-0.0666130952380953, -0.0636743514962766, -0.0714625212912789,
-0.0538413270338978, -0.068754516508895, -0.0670503914536422,
-0.061962887050067, -0.0350772916379022, -0.0827096816010734,
-0.0666130952380953, -0.0815951469815881, -0.0591684993803164,
-0.0639246190979653, -0.0350745285619913, -0.0596140603148968,
-0.0591963076956752, -0.0476032378710879, -0.0655160095565184,
-0.0666130952380953, -0.0537146898934726, 0.0162124217990334,
-0.0616759943114012, -0.0729434214544584, -0.0614479614199412,
-0.0357561329818425, -0.0665751840407207, -0.0784802759161941,
-0.0671054621644115, -0.0575809626859827, -0.0713549295474797,
-0.0645543294806095, -0.0520501805766765, -0.0688292433897259,
-0.0423603969734812, -0.0666130952380953, -0.0666130952380953,
-0.0394538878536485, -0.0521393035403543, -0.0629059335419995,
-0.0451162910957789, -0.0516613605715132, -0.069059649809896,
-0.0609776880031022, -0.0623272854298471, -0.0578499064800275,
-0.0742301720870974, -0.0658313903102946, -0.0546583902195905,
-0.0666130952380953, -0.0598877222608012, -0.0571766935468374,
-0.045787470646931, -0.0751488806774287, -0.0645893630281485,
-0.0563577969239294, -0.070798610954127, -0.0657786082920658,
-0.0597365863196131, -0.0650709596029749, -0.0603238348234694,
-0.0623749911299781, -0.0527543350325773, -0.0526239008365142,
-0.0551833437781836, -0.0598572209324157, -0.0579544417422629,
-0.0693311491490387, -0.0596906669408649, -0.0708343794499972,
-0.0666130952380953, -0.0728735460334244, -0.0982595250631079,
-0.0564953039416041, -0.0625568342751564, -0.0652582610085664,
-0.0541062858370048, -0.0633743800624807, -0.075994630619699,
-0.0603348576918057, -0.0666130952380953, -0.0606159962506919,
-0.0654282305451249, -0.0646301123125592, -0.0683498904775949,
-0.0720685725682885, -0.0664456713605656, -0.0820158076834625,
-0.0713055598348794, -0.0480576200019331, -0.0687922493025547,
-0.0836682686919087, -0.0654781147051306, -0.063840774740535,
-0.051085962776741, -0.0613459537086141, -0.0666130952380953,
-0.0663627997581147, -0.0802518883851518, -0.0392944395105437,
-0.0541218784644882, -0.0709578336041794, -0.0607102988416362,
-0.0450050487661239, -0.0539549284638688, -0.0665093365539857,
-0.0546455720805663, -0.0467202342850464, -0.0547611549521904,
-0.0578295297320703, -0.0568642587714582, -0.0697979630929315,
-0.0646740159981864, -0.0505875716804193, -0.0561486955883406,
-0.0666130952380953, -0.0534082317206074, -0.0446722200186679,
-0.0632648656541472, -0.0622572354818417, -0.0661569166748389,
-0.0398559010271972, -0.0666130952380953, -0.0792584058781229,
-0.0666130952380953, -0.0679677658421505, -0.0702271748092287,
-0.0659433537784436, -0.0545853158585778, -0.0562020696975121,
-0.0731469617780032, -0.062280245307846, -0.0666130952380953,
-0.0498919506369602, -0.0573244199608091, -0.0598533369781337,
-0.0599064634322288, -0.0673114900412133, -0.0232389122309002,
-0.064238080799883, -0.0672792393584211, -0.0710187130207668,
-0.0656524853272704, -0.0592834801534269, -0.0557058749768686,
-0.0629510518176042, -0.0545273128803198, -0.0666130952380953,
-0.0616577031760391, -0.0616017231015464, 0.00471302916201938,
-0.0446857177285505, -0.0676597150760336, -0.0510938155008977,
-0.0569942886804155, -0.0664543157177189, -0.0559056760309896,
-0.0689914020885078, -0.0617989097556568, -0.0458225824798689,
-0.0565126289878998, -0.0701125689965104, -0.0668835878143043,
-0.0726266037623769, -0.0566955257281633, -0.0667997370237194,
-0.0518294537862317, -0.0740144131591469), stretch_sc = c(0,
0.010712552793687, 0.030305507771342, 0.0291777974100318,
0.0121114450567444, 0, 0.0286590343050757, 0, 0, 0, 0, 0.0347237395706643,
0.00523470992566278, 0.0193801961840816, 0.00789206190485717,
-0.0108374340755648, 0.0475967346872699, 0.0346878888119248,
0.0222144083788635, 0.0204822484333428, 0.0549675528404303,
0.0470649509677231, 0, 0, 0, 0, 0.0365344266142901, 0, 0.0179389305870372,
0, 0.0314594845990231, 0, 0.0343338381751884, 0.0196868676030028,
0, 0, 0.0264520683162785, 0, 0.021588177723625, 0.0393486166707394,
0.0134541437958671, 0.0329622199116492, 0, 0, 0.0445753825183569,
0, 0.0223399232359856, 0.0287866459056312, 0, 0.0422585624998843,
0, 0.0384959663838311, 0.0294090614413109, 0, 0.0382767697176093,
0, 0, 0, 0.0130808791195063, 0.0582793593124656, 0.0167264396913211,
0.0293773493736255, 0.0201572029471515, 0.0296129699114062,
0, 0, 0, 0.0552141061556481, 0.0444453697437142, 0.0123043310248808,
0.0346528758102693, 0.016030038220527, 0.0319655194023042,
0, 0.0342042978707451, 0.0794728274142922, 0.0681147715244254,
0.0468424894784834, 0.00519536071642741, -0.032649491110188,
0, 0.053265453564357, 0.0182547398235379, 0.0549545383822924,
0.0472319427760616, 0.01023937584271, 0.0156923487293115,
0.0604749364414498, 0, 0.0183386448200304, 0.0129541579366008,
0.0117381770887053, 0, 0, 0, 0, 0.0502252508635525, 0, 0.0418618817747504,
0, 0.0663438203283314, 0.0338783838171674, 0.0535161033349041,
0, 0.0334063781994868, 0.021969801072835, 0.103364360684351,
0.0516986753077737, 0.0492546450656526, 0.0282108867799603,
0.0460227569869802, 0.0731490599212701, 0.0316961344494408,
0.0376760883395232, 0.0272041059707333, 0.0317116778192143,
0.0478646784636681, 0.0508318867855728, 0.0219361090450446,
0.0249162074166424, 0.11801260775704, 0.100286697343934,
0.0365528205607739, 0, 0.0382698915103799, 0.0509915579560317,
0.0481750456735742, 0.131283288697785, 0.0571101342182962,
0.0499909450038254, 0.047378551180847, 0, 0.0466201206066392,
0.0427752065397539, 0.0305080766948277, 0.0561881612768846,
0.0937596138315798, 0.0384956231339378, 0.0330024371972032,
0.0385322175366311, 0, 0.0861658192201194, 0.0366408526517955,
0.0589034458186427, 0.0446452586591299, 0.0334744886327146,
0.0687953800094684, 0.0618309049882769, 0.0366092768364357,
0.0338619236144384, 0.0412889298487122, 0.0216777279400637,
0, 0.084595121558149, 0.0634231697285, 0.0617995009189871,
0.0239748203450707, 0.0653655138271167, 0.0277581053007859,
0.0569828169728362, 0.0642569152017297, 0.0385978072453576,
0.0635361257394237, 0.0281566961727623, 0.0418873390519106,
0.0748707693142184, 0.0758963375549598, 0.0470773112292584,
0.0493483377361508, 0.0603692606019216, 0.0962487426430013,
0.0600252688420147, 0.0716035381435828, 0.0674831442321956,
0.037113290056433, 0, 0.0514389649383865, 0.0410552227977531,
0.0735631241759307, 0.0575067657110819, 0.0262354588150255,
0.0773386795334879, 0.0358883673548289, 0.0332658952081844,
0.0437422580746452, 0.0338532054321128, 0.0634198110593037,
0, 0.0334394459761714, 0.0266788234581108, 0.0477376472148228,
0.0502220576065856, 0.0472861679485688, 0.0300619740528548,
0.0740739690020685, 0.00922328438054768, 0.0577424695067317,
0.0479472769398223, 0.0597050353228643, 0.0404414159118109,
0.0294301352583194, 0.0509382529793653, 0.0479327093893747,
0.0286622422907276, 0.0807044483229501, 0.0272321368655714,
0.0327543785514226, 0.0592459401955545, 0, 0.0524279964546362,
0.0120744394145197, 0.0388158546655944, 0.0488134344555817,
0.0455565468677652, 0.0288529331576318, 0.0345513749084874,
0.057321163374162, 0.0277743453156285, 0.0192941143183214,
0.020225958436554, 0.048183897494407, 0.0659704898069531,
0.0181917555671571, 0.0482891619321894, 0.0707153855437075,
0.0366807810359953, 0.042337517390057, 0.0925023831753942,
0.0351779877667554, 0.0687582998251476, 0.0113660737878327,
0.0399175981312222, 0.0647865944114795, 0, 0.0206513007928582,
0.0477315488057113, 0.0538148851854549, 0.0133646757254749,
0.0290964565603116, 0.0471046166937369, 0.0654615457291351,
0.0425084337257644, 0, 0.0437855136062966, 0.010134866745323,
0.0592505907337767, 0.145329645009079, 0.0525484293579456,
0.0482935016773756, 0, 0.0324762093079304, 0, 0.0575391852736072,
0.0451328327401902, 0.0418992586761507, 0.0476117772142512,
0.0447662959826333, 0.0774276601365214, 0.0475639735331897,
0.048852926717522, 0.029029385147396, 0.0382187292965633,
0.0233852474940637, 0.09162229269106, 0.0606167911568372,
0.0326744904751437, 0.0505078546505011, 0, 0, 0.0562304640169903,
0.080931211688372, 0.0509942319583524, 0.0837230122527424,
0.0668204100035781, 0.0131275995993918, 0.0613925627890109,
0.0143690785022208, 0.0409928637887149, 0.0895641709349991,
0.00458402786141142, 0.0401143044775431, 0, 0.0399616271359792,
0.0363504930153114, 0.0597156366175702, 0.0476740451519719,
0.0347605996845489, 0.0387764173877908, 0.0498069631943095,
0.0379205524597568, 0.0261636049159682, 0.0208356242581979,
0.0482986966863916, 0.0239191868871862, 0.047163194926721,
0.0501052484712598, 0.0467518654016212, 0.0362450243358945,
0.0417631239114098, 0.0433567887144534, 0.0762113413808253,
0.0413241417073798, 0, 0.0377715078131342, 0.0394370079395239,
0.0495394129362723, 0.0335080103940972, 0.0101426161541824,
0.059335519921754, 0, 0.0450313973675383, 0.0495058156907279,
0, 0.041771477017568, 0.0195806321657018, 0.0353729403524358,
0.0395206678883288, 0.0466083714452797, 0.0205524452766147,
0.0443641923746719, 0.0409305510449349, 0.0809996745466028,
0.0406896729788617, 0.0532492543149396, 0.0142632493541697,
0.0410371404482752, 0.0399679734813915, 0.0446271803713085,
0, 0.0313908428854142, 0.0582906509605625, 0.087629965259937,
0.0694976825533012, 0.0237161877118006, 0.0421343837835051,
0.0648023934258207, 0.039942735072132, 0.0284139203127136,
0.0479042940980402, 0.0882484569458266, 0.0399432034956507,
0.042232220519562, 0.0336942266348107, 0.0297194138224271,
0.0215713868033411, 0.0843977535980013, 0.0879873886163903,
0, 0.0365455279316457, 0.0173340055682553, 0.0387619471409617,
0.0385462106916138, 0.0224946246053325, 0.0365647689159963,
0, 0.0570566749194842, 0, 0.0128589502724916, 0.0147897775809753,
0.0551794093684855, 0.0450461730063858, 0.070565838788763,
0.022673095156903, 0.0368010657137533, 0, 0.0546757954243484,
0.0640289996198511, 0.0348587754827972, 0.0495619174233487,
0.044674769350674, 0.0662586266273443, 0.0498903585656333,
0.035783829487976, 0.0345528707652255, 0.0441737342649839,
0.0391553354081407, 0.0627718129565852, 0.0328181766974383,
0.0567799328241164, 0, 0.0488799527345956, 0.0425607050495635,
0.10565326003009, 0.0751689589370907, 0.0337544828043444,
0.0563846051227193, 0.0380509765246799, 0.032061917951112,
0.0430771075824421, 0.0381230503584178, 0.0255259710240085,
0.0479346731978349, 0.0577059422583316, 0.0393986715723445,
0.0344394432739855, 0.0239820171505721, 0.07543876819341,
0.0505382152340173, 0.041189101618023, 0.0471093699806476
), maxima_sc = c(-0.0239292453270398, -0.00019794575182078,
0, -0.00924965704931748, 0.062587209350936, 0, -0.0161695454536572,
0, 0, -0.0318171509265791, 0, 0.0259852665269849, -0.00858067387934364,
-0.00362084023973579, 0, -0.247311025114267, -0.02761610169241,
0, 0, -0.00698782547341414, 0, -0.0113744287791474, 0, -0.00490422252643302,
0.018287909729204, 0, 0, 0, 0, -0.027026236760773, -0.00328675610621131,
0, -0.00976120614135593, 0, -0.0163442087781774, 0, -0.0159831932714987,
0, -0.0345596873336376, 0, 0.0297649966909723, -0.017766910100309,
-0.0298687878220593, 0, -0.00632820838599876, 0, -0.0316057085956885,
-0.00401113422577802, 0, 0, 0, -0.00263332633484772, -0.0168263877474006,
0, -0.00361002402727545, 0, 0, -0.017341926094018, -0.0136295707119172,
0.0882571031979493, -0.0410029712852804, -0.0137667565504178,
-0.00469302830324556, -0.00214935573314229, -0.018515810185365,
0, 0, 0, 0, 0, -0.0412613679261718, 0, -0.0109905744390918,
0, 0, 0, 0.00671206092047526, -0.0079530273154372, 0, -0.0974452743913715,
0, 0.020085020479752, 0, -0.0116345849864547, 0, 0, -0.0176221604253814,
0.0366978865956316, -0.0221616859737991, 0, -0.0207007363194524,
0, -0.0153451261850571, 0, 0, -0.0238338942478711, 0.0288792382786655,
0, 0, -0.0531076820758905, 0.0306215016395184, 0.0102516452177965,
0, 0, 0, 0, 0, 0.0537191255981443, 0.0117548390365059, 0,
0, 0.0387869619410127, 0, 0, 0, 0, 0, 0.0195174049644177,
0, 0, -0.00727495323310363, 0, 0, 0, 0, 0.0165730316447287,
0, 0.0518107129624027, 0, 0, 0, 0, 0, 0, 0, 0.103951016123222,
0.043394975981835, 0, 0, 0, 0, 0.0497001259116414, 0, 0,
-0.0163839458772354, 0, 0.014452894135565, 0.0213011748030821,
0, 0, -0.0304358293966873, 0, 0, 0.00969331492794717, 0,
0.0451243572847759, -0.0165418793826666, -0.000470482764117231,
-0.00886923213690257, 0, 0.049208708887767, 0, 0, 0, 0, 0.0252406806710699,
0, 0, 0, 0, 0.0648382134458741, 0, 0, 0, 0, 0, 0.0132721087653842,
0, 0, 0.0470465557422522, 0, 0.0350547829478382, 0, 0.122756064384152,
-0.0140872701168571, 0, 0, 0, 0, 0, -0.00502478530496103,
0.00847221294152181, 0, 0, 0.0513477027175027, -0.00326872666695351,
0.10452363273321, 0.00698099438649424, 0, 0, 0, 0, 0.096040505014404,
0, 0.0584394777215803, 0, 0, -0.0124678494907781, 0.0189828201161865,
0, 0, 0, 0.0410188327302439, 0.00292288841516183, 0, 0, 0.0336502273440397,
0.0478301060631606, 0, 0, 0, 0.0334637019565799, 0, 0.0360085101701031,
0.0439110258562996, 0.022914154597828, 0, 0.109817228561945,
0, 0.0443902361288078, 0, 0, -0.00935218981502514, 0, 0,
0, 0.0190741057973207, 0, 0, 0, 0.057761105817893, 0, 0,
0.00889421444264476, 0.00347395779045794, -0.00211253203347319,
0.0495548025449435, 0.0201286361505717, 0.0246999473086631,
0, 0, 0, 0, 0.101914698320516, 0, 0.0164681283014127, 0.0967262525469087,
0.0233609675928886, 0, 0, 0, 0, -0.00200350831811605, 0,
0.0159639927526578, 0, 0.00286567239854221, 0, 0, 0.0606042898323818,
0.0610629387099892, 0, 0.0582454427553054, 0.0133753772278906,
0, 0.00358937837937023, 0, 0, 0.0266641767777797, 0, 0, 0,
0, 0, 0.0492511687669325, 0.049261926808871, 0, 0, 0, 0,
0, 0, 0, -0.0132704828019872, 0.0416070097662177, 0, 0.0486714405099354,
0, 0, 0, -0.0295811567124512, 0, 0, 0, 0.0658437739994626,
0, 0, 0, 0, -0.000708462533872565, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0.0211450236299149, 0, 0.0199209149769144, 0, 0.0383504164841045,
0, 0, 0, 0, 0, 0, 0.0687149173617133, 0.0136200065507323,
0, 0, 0, 0, 0, 0, 0, 0.0742429552684458, -0.0113240365414917,
0.0108261183306536, -0.0135323531427381, 0, 0, 0.00828092451406187,
0.0474798944653947, 0, 0.0345776817567852, 0.0470294214122361,
0, 0, 0, -0.0913860533585726, 0, 0.0267945154040907, 0, 0,
0, 0, 0, -0.00400216645696432, 0, 0, 0, 0.0509795340771594,
0.00965588029083106, 0.0406272128864671, 0, 0, -0.024735218799564,
0, 0, 0, 0.00609724879606387, 0, 0.0186424770594976, 0, -0.00174414425845346,
0, 0, 0.0163463531030912, 0.151922559227221, -0.0230183313149595,
0, 0.00415659511299312, 0, 0, 0, -0.000608095144920848, 0,
0.0567781725857347, 0.0245680257063121, 0, 0, 0, 0.040756602134445,
0, 0, 0.0214784525004875), x_max_sc = c(0, 0, 0, -0.0567779969825567,
-0.0862023412437928, 0, -0.021037225169643, 0, 0, 0, 0, -0.0741943200884178,
-0.0396436222266138, -0.0285982179409369, -0.0507811985400196,
0.194865601853359, -0.0158525951798532, -0.00204655189899361,
-0.0430083089948564, 0, 0, -0.010538808034754, 0, -0.0451632554018735,
-0.0782514007733716, 0, -0.0168721313838889, 0, 0, 0, 0,
0, -0.018434831487833, 0, 0, 0, -0.0190247571288056, 0, 0,
-0.00811758995853774, -0.0674302072173483, 0, 0, -0.0307552762501201,
-0.0227997769720171, 0, -0.0231285603161688, -0.0241215154743764,
0, -0.0111734286512058, 0, 0, -0.0106861332749773, 0, -0.0234785560415487,
0, 0, 0, 0, -0.0584458351704236, -2.23915620176253e-05, 0,
0, -0.0177026360965861, 0, 0, 0, -0.0243650299850149, -0.00879557540092108,
-0.0526693974397552, -0.00518718484559592, -0.0209156983084677,
-0.0271597969446037, 0, -0.00103701339292906, -0.0241212262993604,
-0.00673386209554871, -0.0259884954612567, -0.0432957643896803,
0.0696005754767316, 0, -0.00775382820252859, 0, -0.0329164553485527,
-0.00567515449227432, -0.0584906055953839, 0, -0.0512540849730802,
-0.00912941646405399, -0.0195512218759606, 0, 0, 0, 0, 0,
0, -0.0439025027201751, 0, 0, -0.00633133504702436, 0, 0,
-0.000701114249046435, 0, 0, 0, -0.000277230487571007, -0.0831648181551467,
0, 0, 0, -0.0287298227796585, 0, 0, 0, -0.00112346688628203,
0, 0.0138489123685721, 0, 0, 0.00651903131724483, 0.00654663355229552,
0.0279219664314897, 0, 0, 0, -0.0184960250238411, -0.000619705436062813,
0, -0.0233904189903605, 0, 0, -0.0227145226771516, 0, 0,
-0.0926183494349799, -0.0283590080597651, 0, 0, 0, 0, -0.0707606219596321,
0, -0.0104614968533075, 0, 0, -0.0166422490403392, -0.00482293749454318,
0, -0.00718247433995904, -0.0204092206118299, 0, 0, -0.0453825052937442,
-0.0310812020737777, -0.0560879047153081, -0.0118798351804928,
-0.000691447967731702, -0.0251412476599006, -0.00795210700962483,
-0.0522733389678674, 0, -0.00709929831938429, 0, 0, -0.0293476787705635,
0, 0, 0, 0, -0.0784293010711612, -0.0073061028267132, -0.0397933431937639,
-0.018538689656254, -0.00103834061628362, 0, 0, 0, -0.0159759672409575,
-0.0459899564650113, 0, -0.050881875891693, 0, -0.149964969504073,
0, 0, -0.0279036620776292, 0, 0, -0.00324279986335286, -0.00720193913187133,
0, 0, 0, -0.0544375287242868, 0, -0.0919709696933843, 0,
0, 0, 0, 0, -0.0241025628989342, 0, 1.72472661356868e-05,
0.0231016709901464, 0, -0.0223859493474425, -0.133603574866534,
-0.00288822667243481, 0, 0, 0, -0.00818034291682672, 0, -0.00825970868610955,
0, -0.129156316311668, 0, 0, 0, -0.0467024412487457, 0, 0,
-0.0177481753943586, 0, 0, -0.0468696760656328, 0, 0, 0,
0, -0.0300308970933871, 0, 0, 0, 0.0065660187985606, 0, 0,
0, 0, -0.00300141821334048, 0, 0, 0, -0.0279428181198231,
-0.0333733467624327, 0, 0, 0.0286907622200812, 0, 0, 0, -0.127600870336952,
0, 0, -0.0782913558971221, 0, -0.0400625146303588, 0, 0,
0, 0, 0, 0, -4.77705399500034e-05, 0.0029012555065602, 0,
0, 0.0203609456121936, -0.00289393808894082, -0.0396437629317893,
-0.0120834564794146, 0, 0, -0.00959404805024734, 0.0149987904589571,
0, 0, 0, 0, 0, 0, 0, 0, -0.024065654874794, 0, 0, 0, 0, 0.0177446992508566,
0, -0.00177893907701003, 0, 0, 0, -0.0245368210009612, 0,
0, 0, 0, 0, 0, -0.0116201114059357, -0.107215466376995, 0,
0, 0, 0, -0.0432802673890524, 0, -0.0112690175686156, 0,
-0.000293606361633582, 0, 0, -0.00740357809133759, 0, 0,
0, 0, -0.0278885737902714, 0, -0.10672420737002, 0.00983215906805172,
-0.0114770099712569, 0, 0, 0, 0, -0.0184516667893859, -0.0235992532948167,
0, 0, 0, -0.00790139258172696, 0, 0, -6.47309295320972e-05,
-0.0939672820160154, 0, 0, 0, 0, 0, -0.0279894406642658,
-0.0187473020811115, 0, 0, -0.088204423075571, 0, 0, 0, 0.124632341684952,
0, -0.00725735905045398, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00594497223503625,
0, -0.0790534318010108, 0, 0, 0.0467674244260703, 0, 0, 0,
0.0135666602568411, 0, -0.0147171945411359, 0, -0.0127817278113969,
0, 0, 0.00223243270111326, 0.0455832421129776, 0.0147358652081134,
0, -0.00963549969244065, 0, 0, 0, -0.01218030230882, 0, 0,
0, 0, -0.00135143209370424, 0, -0.0138654841139972, -0.00278456984503607,
0, 0), x_min_sc = c(0, 0, 0, -0.0493163167779942, -0.0849924441569146,
0, -0.00498435815552371, 0, 0, 0, 0, -0.0617748810206883,
-0.0243371070637263, -0.0206238638639379, -0.0684208938010228,
0.115014940429425, 0.000765840677360543, 0, -0.0159216205331603,
0, 0, 0, 0, -0.0436864325279749, -0.0786538096722533, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.003390071099538, 0, 0, 0,
-0.0718064612847861, 0, -0.00605733003648087, -0.0396418414710554,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.0434433501056119,
0, 0, 0, 0, 0, 0, 0, 0.0089899250858484, -0.00124329036550566,
-0.0580599100768195, 0.018136296638311, -0.0353496820785111,
-0.00954246531089019, 0, 0, 0, 0.00690013535359092, 0, -0.0465127288551156,
0.0252959264547797, 0, 0.0037190780911635, 0, 0, 0, -0.0335832724466242,
0, -0.0254816167453556, -0.0237226785431966, -0.0104226817553536,
0, 0, 0, 0, 0, 0, -0.0247633614979125, 0, 0, 0, 0.00285341630113898,
-0.00219725472384276, 0, 0, 0, 0, 0.051104598079593, -0.0809978731665916,
0, -0.0116593805238943, 0, 0, 0, 0, 0, 0, 0, -0.000336374606507736,
0, 0, 0.0601787737237385, 0.0392455913671408, 0, 0, 0, 0,
0, 0.0520650302490248, 0, 0, 0, 0, 0, 0, 0, -0.0736428019215622,
0, 0, 0, 0, 0, -0.0300249165365179, 0, 0, 0, 0, 0, 0.00752534775727956,
0, 0, 0, -0.0290175075722305, 0, 0, 0, -0.0342201669828395,
0, 0.0145380756660058, 0, -0.00190232564248471, -0.0905844404877704,
0, 0, -0.00154758262435123, 0, 0, 0.0198197867901304, 0,
0, 0.0203337480951741, -0.017998958142854, 0, 0, 0, 0, 0,
0.00784664124372762, 0, 0.0125076104553423, -0.0282738918252159,
0, -0.045059371742143, 0, -0.110037353919043, 0, 0, 0.0124782968923941,
0, 0, 0, 0, 0, 0, 0, -0.0183903965486154, 0, -0.0716904458781262,
0, 0, 0, 0, 0, -0.0528345833493119, 0, 0, 0, 0, 0, -0.109474679232307,
0, -0.00827934619706751, 0, 0, 0, 0, 0, 0, -0.136417945124761,
0, 0, 0, -0.0200113342947698, 0, 0, 0, -0.00829354608737962,
0, -0.018457925339933, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, -0.0216542020131527, 0, 0.0264117623201696, 0,
-0.00413057056744108, 0, 0, 0, 0, -0.138877101717116, 0,
0, -0.0808364386785243, 0, -0.0104123476690789, 0, 0, 0,
0, 0.0381954896282095, 0, 0, 0, 0, 0, 0, 0.00758426399457844,
-0.0252998191860898, 0, 0.00850536723294313, 0, 0.00401259296574426,
0, 0, 0.0124715001000991, 0, 0, 0, 0, 0, -0.00198226797540236,
-0.0206125078006893, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.024838323678084,
0, 0, 0, 0.0193456336590759, 0, 0, 0, -0.0669928735782567,
0, 0, 0, 0, -0.0636317463837024, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0.00258556171307422, 0, 0, 0, -0.0559758417643389, 0,
0, 0, 0, 0, 0, 0, 0.00378125519608552, 0.0168500748630436,
0, 0, 0, 0, 0, 0, -0.0445961313161898, 0, 0, 0, 0, 0, 0.00803170468924242,
0, 0, -0.0287606595989194, -0.104691186192197, 0, 0, 0, 0.0838388277437149,
0, 0.00477959557389007, 0, -0.0218506808716724, 0, 0, 0,
0.0151520224913038, -0.00477483240075493, 0, 0, 0, 0, -0.0739944051175982,
0, 0, 0.0396021166482059, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.00772102130380992,
0, 0.0299438494297185, 0, 0, 0, 0, 0, 0, 0, -0.0184448432690344,
0, 0, 0, 0, 0, 0, 0, 0)), row.names = c(NA, -400L), class = c("grouped_df",
"tbl_df", "tbl", "data.frame"), .Names = c("draw", "(Intercept)",
"stretch_sc", "maxima_sc", "x_max_sc", "x_min_sc"), vars = "draw", drop = TRUE, indices = list(
0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L, 11L, 12L, 13L,
14L, 15L, 16L, 17L, 18L, 19L, 20L, 21L, 22L, 23L, 24L, 25L,
26L, 27L, 28L, 29L, 30L, 31L, 32L, 33L, 34L, 35L, 36L, 37L,
38L, 39L, 40L, 41L, 42L, 43L, 44L, 45L, 46L, 47L, 48L, 49L,
50L, 51L, 52L, 53L, 54L, 55L, 56L, 57L, 58L, 59L, 60L, 61L,
62L, 63L, 64L, 65L, 66L, 67L, 68L, 69L, 70L, 71L, 72L, 73L,
74L, 75L, 76L, 77L, 78L, 79L, 80L, 81L, 82L, 83L, 84L, 85L,
86L, 87L, 88L, 89L, 90L, 91L, 92L, 93L, 94L, 95L, 96L, 97L,
98L, 99L, 100L, 101L, 102L, 103L, 104L, 105L, 106L, 107L,
108L, 109L, 110L, 111L, 112L, 113L, 114L, 115L, 116L, 117L,
118L, 119L, 120L, 121L, 122L, 123L, 124L, 125L, 126L, 127L,
128L, 129L, 130L, 131L, 132L, 133L, 134L, 135L, 136L, 137L,
138L, 139L, 140L, 141L, 142L, 143L, 144L, 145L, 146L, 147L,
148L, 149L, 150L, 151L, 152L, 153L, 154L, 155L, 156L, 157L,
158L, 159L, 160L, 161L, 162L, 163L, 164L, 165L, 166L, 167L,
168L, 169L, 170L, 171L, 172L, 173L, 174L, 175L, 176L, 177L,
178L, 179L, 180L, 181L, 182L, 183L, 184L, 185L, 186L, 187L,
188L, 189L, 190L, 191L, 192L, 193L, 194L, 195L, 196L, 197L,
198L, 199L, 200L, 201L, 202L, 203L, 204L, 205L, 206L, 207L,
208L, 209L, 210L, 211L, 212L, 213L, 214L, 215L, 216L, 217L,
218L, 219L, 220L, 221L, 222L, 223L, 224L, 225L, 226L, 227L,
228L, 229L, 230L, 231L, 232L, 233L, 234L, 235L, 236L, 237L,
238L, 239L, 240L, 241L, 242L, 243L, 244L, 245L, 246L, 247L,
248L, 249L, 250L, 251L, 252L, 253L, 254L, 255L, 256L, 257L,
258L, 259L, 260L, 261L, 262L, 263L, 264L, 265L, 266L, 267L,
268L, 269L, 270L, 271L, 272L, 273L, 274L, 275L, 276L, 277L,
278L, 279L, 280L, 281L, 282L, 283L, 284L, 285L, 286L, 287L,
288L, 289L, 290L, 291L, 292L, 293L, 294L, 295L, 296L, 297L,
298L, 299L, 300L, 301L, 302L, 303L, 304L, 305L, 306L, 307L,
308L, 309L, 310L, 311L, 312L, 313L, 314L, 315L, 316L, 317L,
318L, 319L, 320L, 321L, 322L, 323L, 324L, 325L, 326L, 327L,
328L, 329L, 330L, 331L, 332L, 333L, 334L, 335L, 336L, 337L,
338L, 339L, 340L, 341L, 342L, 343L, 344L, 345L, 346L, 347L,
348L, 349L, 350L, 351L, 352L, 353L, 354L, 355L, 356L, 357L,
358L, 359L, 360L, 361L, 362L, 363L, 364L, 365L, 366L, 367L,
368L, 369L, 370L, 371L, 372L, 373L, 374L, 375L, 376L, 377L,
378L, 379L, 380L, 381L, 382L, 383L, 384L, 385L, 386L, 387L,
388L, 389L, 390L, 391L, 392L, 393L, 394L, 395L, 396L, 397L,
398L, 399L), group_sizes = c(1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L), biggest_group_size = 1L, labels = structure(list(
draw = 1:400), row.names = c(NA, -400L), class = "data.frame", vars = "draw", drop = TRUE, .Names = "draw")),
summ_stan_df = structure(list(shape1 = 3.61642238398219,
shape2 = 4.28941055469916, stretch = 0.808421740859661,
x_min = -0.128367350816079, x_max = 3.62958934710432,
maxima = 2.1423842458729, rep = 36L), .Names = c("shape1",
"shape2", "stretch", "x_min", "x_max", "maxima", "rep"), class = c("tbl_df",
"tbl", "data.frame"), row.names = c(NA, -1L))), .Names = c("lasso_df",
"lasso_depth", "summ_stan_df"))
run_x <-
structure(list(lasso_df = structure(list(var = c("maxima", "stretch",
"x_max", "x_min"), mean = c(0.252420310505223, 0.204970796345933,
-0.0198357644186884, -0.197934950800058), prop = c(0.1175, 0.2025,
0.09, 0.0275), rand_var = c(1.41586353557413, 1.41586353557413,
1.41586353557413, 1.41586353557413), mean_rand = c(-0.442041621464583,
-0.442041621464583, -0.442041621464583, -0.442041621464583),
sim_n = c(36L, 36L, 36L, 36L)), .Names = c("var", "mean",
"prop", "rand_var", "mean_rand", "sim_n"), class = c("tbl_df",
"tbl", "data.frame"), row.names = c(NA, -4L)), lasso_depth = structure(list(
var = c("maxima", "stretch", "x_max", "x_min"), mean = c(0.00585341699645272,
0.037186470054669, -0.00906809580465606, -0.00476687498799465
), prop = c(0.24, 0.8425, 0.055, 0.0875), rand_var = c(1.41586353557413,
1.41586353557413, 1.41586353557413, 1.41586353557413), mean_rand = c(-0.442041621464583,
-0.442041621464583, -0.442041621464583, -0.442041621464583
), sim_n = c(36L, 36L, 36L, 36L)), .Names = c("var", "mean",
"prop", "rand_var", "mean_rand", "sim_n"), class = c("tbl_df",
"tbl", "data.frame"), row.names = c(NA, -4L)), summ_stan_df = structure(list(
shape1 = 3.61642238398219, shape2 = 4.28941055469916, stretch = 0.808421740859661,
x_min = -0.128367350816079, x_max = 3.62958934710432, maxima = 2.1423842458729,
rep = 36L), .Names = c("shape1", "shape2", "stretch", "x_min",
"x_max", "maxima", "rep"), class = c("tbl_df", "tbl", "data.frame"
), row.names = c(NA, -1L))), .Names = c("lasso_df", "lasso_depth",
"summ_stan_df"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.